xref: /titanic_50/usr/src/lib/libc/port/threads/thr.c (revision c0889d7a91fa87e1cb7ef4457629b0cb51d47b50)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include "lint.h"
30 #include "thr_uberdata.h"
31 #include <pthread.h>
32 #include <procfs.h>
33 #include <sys/uio.h>
34 #include <ctype.h>
35 #include "libc.h"
36 
37 #undef errno
38 extern int errno;
39 
40 /*
41  * Between Solaris 2.5 and Solaris 9, __threaded was used to indicate
42  * "we are linked with libthread".  The Sun Workshop 6 update 1 compilation
43  * system used it illegally (it is a consolidation private symbol).
44  * To accommodate this and possibly other abusers of the symbol,
45  * we make it always equal to 1 now that libthread has been folded
46  * into libc.  The new __libc_threaded symbol is used to indicate
47  * the new meaning, "more than one thread exists".
48  */
49 int __threaded = 1;		/* always equal to 1 */
50 int __libc_threaded = 0;	/* zero until first thr_create() */
51 
52 /*
53  * thr_concurrency and pthread_concurrency are not used by the library.
54  * They exist solely to hold and return the values set by calls to
55  * thr_setconcurrency() and pthread_setconcurrency().
56  * Because thr_concurrency is affected by the THR_NEW_LWP flag
57  * to thr_create(), thr_concurrency is protected by link_lock.
58  */
59 static	int	thr_concurrency = 1;
60 static	int	pthread_concurrency;
61 
62 #define	HASHTBLSZ	1024	/* must be a power of two */
63 #define	TIDHASH(tid, udp)	(tid & (udp)->hash_mask)
64 
65 /* initial allocation, just enough for one lwp */
66 #pragma align 64(init_hash_table)
67 thr_hash_table_t init_hash_table[1] = {
68 	{ DEFAULTMUTEX, DEFAULTCV, NULL },
69 };
70 
71 extern const Lc_interface rtld_funcs[];
72 
73 /*
74  * The weak version is known to libc_db and mdb.
75  */
76 #pragma weak _uberdata = __uberdata
77 uberdata_t __uberdata = {
78 	{ DEFAULTMUTEX, NULL, 0 },	/* link_lock */
79 	{ RECURSIVEMUTEX, NULL, 0 },	/* fork_lock */
80 	{ RECURSIVEMUTEX, NULL, 0 },	/* atfork_lock */
81 	{ RECURSIVEMUTEX, NULL, 0 },	/* callout_lock */
82 	{ DEFAULTMUTEX, NULL, 0 },	/* tdb_hash_lock */
83 	{ 0, },				/* tdb_hash_lock_stats */
84 	{ { 0 }, },			/* siguaction[NSIG] */
85 	{{ DEFAULTMUTEX, NULL, 0 },		/* bucket[NBUCKETS] */
86 	{ DEFAULTMUTEX, NULL, 0 },
87 	{ DEFAULTMUTEX, NULL, 0 },
88 	{ DEFAULTMUTEX, NULL, 0 },
89 	{ DEFAULTMUTEX, NULL, 0 },
90 	{ DEFAULTMUTEX, NULL, 0 },
91 	{ DEFAULTMUTEX, NULL, 0 },
92 	{ DEFAULTMUTEX, NULL, 0 },
93 	{ DEFAULTMUTEX, NULL, 0 },
94 	{ DEFAULTMUTEX, NULL, 0 }},
95 	{ RECURSIVEMUTEX, NULL, NULL },		/* atexit_root */
96 	{ DEFAULTMUTEX, 0, 0, NULL },		/* tsd_metadata */
97 	{ DEFAULTMUTEX, {0, 0}, {0, 0} },	/* tls_metadata */
98 	0,			/* primary_map */
99 	0,			/* bucket_init */
100 	0,			/* pad[0] */
101 	0,			/* pad[1] */
102 	{ 0 },			/* uberflags */
103 	NULL,			/* queue_head */
104 	init_hash_table,	/* thr_hash_table */
105 	1,			/* hash_size: size of the hash table */
106 	0,			/* hash_mask: hash_size - 1 */
107 	NULL,			/* ulwp_one */
108 	NULL,			/* all_lwps */
109 	NULL,			/* all_zombies */
110 	0,			/* nthreads */
111 	0,			/* nzombies */
112 	0,			/* ndaemons */
113 	0,			/* pid */
114 	sigacthandler,		/* sigacthandler */
115 	NULL,			/* lwp_stacks */
116 	NULL,			/* lwp_laststack */
117 	0,			/* nfreestack */
118 	10,			/* thread_stack_cache */
119 	NULL,			/* ulwp_freelist */
120 	NULL,			/* ulwp_lastfree */
121 	NULL,			/* ulwp_replace_free */
122 	NULL,			/* ulwp_replace_last */
123 	NULL,			/* atforklist */
124 	NULL,			/* robustlocks */
125 	NULL,			/* __tdb_bootstrap */
126 	{			/* tdb */
127 		NULL,		/* tdb_sync_addr_hash */
128 		0,		/* tdb_register_count */
129 		0,		/* tdb_hash_alloc_failed */
130 		NULL,		/* tdb_sync_addr_free */
131 		NULL,		/* tdb_sync_addr_last */
132 		0,		/* tdb_sync_alloc */
133 		{ 0, 0 },	/* tdb_ev_global_mask */
134 		tdb_events,	/* tdb_events array */
135 	},
136 };
137 
138 /*
139  * The weak version is known to libc_db and mdb.
140  */
141 #pragma weak _tdb_bootstrap = __tdb_bootstrap
142 uberdata_t **__tdb_bootstrap = NULL;
143 
144 int	thread_queue_fifo = 4;
145 int	thread_queue_dump = 0;
146 int	thread_cond_wait_defer = 0;
147 int	thread_error_detection = 0;
148 int	thread_async_safe = 0;
149 int	thread_stack_cache = 10;
150 
151 int	thread_door_noreserve = 0;
152 
153 static	ulwp_t	*ulwp_alloc(void);
154 static	void	ulwp_free(ulwp_t *);
155 
156 /*
157  * Insert the lwp into the hash table.
158  */
159 void
160 hash_in_unlocked(ulwp_t *ulwp, int ix, uberdata_t *udp)
161 {
162 	ulwp->ul_hash = udp->thr_hash_table[ix].hash_bucket;
163 	udp->thr_hash_table[ix].hash_bucket = ulwp;
164 	ulwp->ul_ix = ix;
165 }
166 
167 void
168 hash_in(ulwp_t *ulwp, uberdata_t *udp)
169 {
170 	int ix = TIDHASH(ulwp->ul_lwpid, udp);
171 	mutex_t *mp = &udp->thr_hash_table[ix].hash_lock;
172 
173 	lmutex_lock(mp);
174 	hash_in_unlocked(ulwp, ix, udp);
175 	lmutex_unlock(mp);
176 }
177 
178 /*
179  * Delete the lwp from the hash table.
180  */
181 void
182 hash_out_unlocked(ulwp_t *ulwp, int ix, uberdata_t *udp)
183 {
184 	ulwp_t **ulwpp;
185 
186 	for (ulwpp = &udp->thr_hash_table[ix].hash_bucket;
187 	    ulwp != *ulwpp;
188 	    ulwpp = &(*ulwpp)->ul_hash)
189 		;
190 	*ulwpp = ulwp->ul_hash;
191 	ulwp->ul_hash = NULL;
192 	ulwp->ul_ix = -1;
193 }
194 
195 void
196 hash_out(ulwp_t *ulwp, uberdata_t *udp)
197 {
198 	int ix;
199 
200 	if ((ix = ulwp->ul_ix) >= 0) {
201 		mutex_t *mp = &udp->thr_hash_table[ix].hash_lock;
202 
203 		lmutex_lock(mp);
204 		hash_out_unlocked(ulwp, ix, udp);
205 		lmutex_unlock(mp);
206 	}
207 }
208 
209 /*
210  * Retain stack information for thread structures that are being recycled for
211  * new threads.  All other members of the thread structure should be zeroed.
212  */
213 static void
214 ulwp_clean(ulwp_t *ulwp)
215 {
216 	caddr_t stk = ulwp->ul_stk;
217 	size_t mapsiz = ulwp->ul_mapsiz;
218 	size_t guardsize = ulwp->ul_guardsize;
219 	uintptr_t stktop = ulwp->ul_stktop;
220 	size_t stksiz = ulwp->ul_stksiz;
221 
222 	(void) _private_memset(ulwp, 0, sizeof (*ulwp));
223 
224 	ulwp->ul_stk = stk;
225 	ulwp->ul_mapsiz = mapsiz;
226 	ulwp->ul_guardsize = guardsize;
227 	ulwp->ul_stktop = stktop;
228 	ulwp->ul_stksiz = stksiz;
229 }
230 
231 static int stackprot;
232 
233 /*
234  * Answer the question, "Is the lwp in question really dead?"
235  * We must inquire of the operating system to be really sure
236  * because the lwp may have called lwp_exit() but it has not
237  * yet completed the exit.
238  */
239 static int
240 dead_and_buried(ulwp_t *ulwp)
241 {
242 	if (ulwp->ul_lwpid == (lwpid_t)(-1))
243 		return (1);
244 	if (ulwp->ul_dead && ulwp->ul_detached &&
245 	    __lwp_kill(ulwp->ul_lwpid, 0) == ESRCH) {
246 		ulwp->ul_lwpid = (lwpid_t)(-1);
247 		return (1);
248 	}
249 	return (0);
250 }
251 
252 /*
253  * Attempt to keep the stack cache within the specified cache limit.
254  */
255 static void
256 trim_stack_cache(int cache_limit)
257 {
258 	ulwp_t *self = curthread;
259 	uberdata_t *udp = self->ul_uberdata;
260 	ulwp_t *prev = NULL;
261 	ulwp_t **ulwpp = &udp->lwp_stacks;
262 	ulwp_t *ulwp;
263 
264 	ASSERT(udp->nthreads <= 1 || MUTEX_OWNED(&udp->link_lock, self));
265 
266 	while (udp->nfreestack > cache_limit && (ulwp = *ulwpp) != NULL) {
267 		if (dead_and_buried(ulwp)) {
268 			*ulwpp = ulwp->ul_next;
269 			if (ulwp == udp->lwp_laststack)
270 				udp->lwp_laststack = prev;
271 			hash_out(ulwp, udp);
272 			udp->nfreestack--;
273 			(void) _private_munmap(ulwp->ul_stk, ulwp->ul_mapsiz);
274 			/*
275 			 * Now put the free ulwp on the ulwp freelist.
276 			 */
277 			ulwp->ul_mapsiz = 0;
278 			ulwp->ul_next = NULL;
279 			if (udp->ulwp_freelist == NULL)
280 				udp->ulwp_freelist = udp->ulwp_lastfree = ulwp;
281 			else {
282 				udp->ulwp_lastfree->ul_next = ulwp;
283 				udp->ulwp_lastfree = ulwp;
284 			}
285 		} else {
286 			prev = ulwp;
287 			ulwpp = &ulwp->ul_next;
288 		}
289 	}
290 }
291 
292 /*
293  * Find an unused stack of the requested size
294  * or create a new stack of the requested size.
295  * Return a pointer to the ulwp_t structure referring to the stack, or NULL.
296  * thr_exit() stores 1 in the ul_dead member.
297  * thr_join() stores -1 in the ul_lwpid member.
298  */
299 ulwp_t *
300 find_stack(size_t stksize, size_t guardsize)
301 {
302 	static size_t pagesize = 0;
303 
304 	uberdata_t *udp = curthread->ul_uberdata;
305 	size_t mapsize;
306 	ulwp_t *prev;
307 	ulwp_t *ulwp;
308 	ulwp_t **ulwpp;
309 	void *stk;
310 
311 	/*
312 	 * The stack is allocated PROT_READ|PROT_WRITE|PROT_EXEC
313 	 * unless overridden by the system's configuration.
314 	 */
315 	if (stackprot == 0) {	/* do this once */
316 		long lprot = _sysconf(_SC_STACK_PROT);
317 		if (lprot <= 0)
318 			lprot = (PROT_READ|PROT_WRITE|PROT_EXEC);
319 		stackprot = (int)lprot;
320 	}
321 	if (pagesize == 0)	/* do this once */
322 		pagesize = _sysconf(_SC_PAGESIZE);
323 
324 	/*
325 	 * One megabyte stacks by default, but subtract off
326 	 * two pages for the system-created red zones.
327 	 * Round up a non-zero stack size to a pagesize multiple.
328 	 */
329 	if (stksize == 0)
330 		stksize = DEFAULTSTACK - 2 * pagesize;
331 	else
332 		stksize = ((stksize + pagesize - 1) & -pagesize);
333 
334 	/*
335 	 * Round up the mapping size to a multiple of pagesize.
336 	 * Note: mmap() provides at least one page of red zone
337 	 * so we deduct that from the value of guardsize.
338 	 */
339 	if (guardsize != 0)
340 		guardsize = ((guardsize + pagesize - 1) & -pagesize) - pagesize;
341 	mapsize = stksize + guardsize;
342 
343 	lmutex_lock(&udp->link_lock);
344 	for (prev = NULL, ulwpp = &udp->lwp_stacks;
345 	    (ulwp = *ulwpp) != NULL;
346 	    prev = ulwp, ulwpp = &ulwp->ul_next) {
347 		if (ulwp->ul_mapsiz == mapsize &&
348 		    ulwp->ul_guardsize == guardsize &&
349 		    dead_and_buried(ulwp)) {
350 			/*
351 			 * The previous lwp is gone; reuse the stack.
352 			 * Remove the ulwp from the stack list.
353 			 */
354 			*ulwpp = ulwp->ul_next;
355 			ulwp->ul_next = NULL;
356 			if (ulwp == udp->lwp_laststack)
357 				udp->lwp_laststack = prev;
358 			hash_out(ulwp, udp);
359 			udp->nfreestack--;
360 			lmutex_unlock(&udp->link_lock);
361 			ulwp_clean(ulwp);
362 			return (ulwp);
363 		}
364 	}
365 
366 	/*
367 	 * None of the cached stacks matched our mapping size.
368 	 * Reduce the stack cache to get rid of possibly
369 	 * very old stacks that will never be reused.
370 	 */
371 	if (udp->nfreestack > udp->thread_stack_cache)
372 		trim_stack_cache(udp->thread_stack_cache);
373 	else if (udp->nfreestack > 0)
374 		trim_stack_cache(udp->nfreestack - 1);
375 	lmutex_unlock(&udp->link_lock);
376 
377 	/*
378 	 * Create a new stack.
379 	 */
380 	if ((stk = _private_mmap(NULL, mapsize, stackprot,
381 	    MAP_PRIVATE|MAP_NORESERVE|MAP_ANON, -1, (off_t)0)) != MAP_FAILED) {
382 		/*
383 		 * We have allocated our stack.  Now allocate the ulwp.
384 		 */
385 		ulwp = ulwp_alloc();
386 		if (ulwp == NULL)
387 			(void) _private_munmap(stk, mapsize);
388 		else {
389 			ulwp->ul_stk = stk;
390 			ulwp->ul_mapsiz = mapsize;
391 			ulwp->ul_guardsize = guardsize;
392 			ulwp->ul_stktop = (uintptr_t)stk + mapsize;
393 			ulwp->ul_stksiz = stksize;
394 			ulwp->ul_ix = -1;
395 			if (guardsize)	/* protect the extra red zone */
396 				(void) _private_mprotect(stk,
397 				    guardsize, PROT_NONE);
398 		}
399 	}
400 	return (ulwp);
401 }
402 
403 /*
404  * Get a ulwp_t structure from the free list or allocate a new one.
405  * Such ulwp_t's do not have a stack allocated by the library.
406  */
407 static ulwp_t *
408 ulwp_alloc(void)
409 {
410 	ulwp_t *self = curthread;
411 	uberdata_t *udp = self->ul_uberdata;
412 	size_t tls_size;
413 	ulwp_t *prev;
414 	ulwp_t *ulwp;
415 	ulwp_t **ulwpp;
416 	caddr_t data;
417 
418 	lmutex_lock(&udp->link_lock);
419 	for (prev = NULL, ulwpp = &udp->ulwp_freelist;
420 	    (ulwp = *ulwpp) != NULL;
421 	    prev = ulwp, ulwpp = &ulwp->ul_next) {
422 		if (dead_and_buried(ulwp)) {
423 			*ulwpp = ulwp->ul_next;
424 			ulwp->ul_next = NULL;
425 			if (ulwp == udp->ulwp_lastfree)
426 				udp->ulwp_lastfree = prev;
427 			hash_out(ulwp, udp);
428 			lmutex_unlock(&udp->link_lock);
429 			ulwp_clean(ulwp);
430 			return (ulwp);
431 		}
432 	}
433 	lmutex_unlock(&udp->link_lock);
434 
435 	tls_size = roundup64(udp->tls_metadata.static_tls.tls_size);
436 	data = lmalloc(sizeof (*ulwp) + tls_size);
437 	if (data != NULL) {
438 		/* LINTED pointer cast may result in improper alignment */
439 		ulwp = (ulwp_t *)(data + tls_size);
440 	}
441 	return (ulwp);
442 }
443 
444 /*
445  * Free a ulwp structure.
446  * If there is an associated stack, put it on the stack list and
447  * munmap() previously freed stacks up to the residual cache limit.
448  * Else put it on the ulwp free list and never call lfree() on it.
449  */
450 static void
451 ulwp_free(ulwp_t *ulwp)
452 {
453 	uberdata_t *udp = curthread->ul_uberdata;
454 
455 	ASSERT(udp->nthreads <= 1 || MUTEX_OWNED(&udp->link_lock, curthread));
456 	ulwp->ul_next = NULL;
457 	if (ulwp == udp->ulwp_one)	/* don't reuse the primoridal stack */
458 		/*EMPTY*/;
459 	else if (ulwp->ul_mapsiz != 0) {
460 		if (udp->lwp_stacks == NULL)
461 			udp->lwp_stacks = udp->lwp_laststack = ulwp;
462 		else {
463 			udp->lwp_laststack->ul_next = ulwp;
464 			udp->lwp_laststack = ulwp;
465 		}
466 		if (++udp->nfreestack > udp->thread_stack_cache)
467 			trim_stack_cache(udp->thread_stack_cache);
468 	} else {
469 		if (udp->ulwp_freelist == NULL)
470 			udp->ulwp_freelist = udp->ulwp_lastfree = ulwp;
471 		else {
472 			udp->ulwp_lastfree->ul_next = ulwp;
473 			udp->ulwp_lastfree = ulwp;
474 		}
475 	}
476 }
477 
478 /*
479  * Find a named lwp and return a pointer to its hash list location.
480  * On success, returns with the hash lock held.
481  */
482 ulwp_t **
483 find_lwpp(thread_t tid)
484 {
485 	uberdata_t *udp = curthread->ul_uberdata;
486 	int ix = TIDHASH(tid, udp);
487 	mutex_t *mp = &udp->thr_hash_table[ix].hash_lock;
488 	ulwp_t *ulwp;
489 	ulwp_t **ulwpp;
490 
491 	if (tid == 0)
492 		return (NULL);
493 
494 	lmutex_lock(mp);
495 	for (ulwpp = &udp->thr_hash_table[ix].hash_bucket;
496 	    (ulwp = *ulwpp) != NULL;
497 	    ulwpp = &ulwp->ul_hash) {
498 		if (ulwp->ul_lwpid == tid)
499 			return (ulwpp);
500 	}
501 	lmutex_unlock(mp);
502 	return (NULL);
503 }
504 
505 /*
506  * Wake up all lwps waiting on this lwp for some reason.
507  */
508 void
509 ulwp_broadcast(ulwp_t *ulwp)
510 {
511 	ulwp_t *self = curthread;
512 	uberdata_t *udp = self->ul_uberdata;
513 
514 	ASSERT(MUTEX_OWNED(ulwp_mutex(ulwp, udp), self));
515 	(void) cond_broadcast_internal(ulwp_condvar(ulwp, udp));
516 }
517 
518 /*
519  * Find a named lwp and return a pointer to it.
520  * Returns with the hash lock held.
521  */
522 ulwp_t *
523 find_lwp(thread_t tid)
524 {
525 	ulwp_t *self = curthread;
526 	uberdata_t *udp = self->ul_uberdata;
527 	ulwp_t *ulwp = NULL;
528 	ulwp_t **ulwpp;
529 
530 	if (self->ul_lwpid == tid) {
531 		ulwp = self;
532 		ulwp_lock(ulwp, udp);
533 	} else if ((ulwpp = find_lwpp(tid)) != NULL) {
534 		ulwp = *ulwpp;
535 	}
536 
537 	if (ulwp && ulwp->ul_dead) {
538 		ulwp_unlock(ulwp, udp);
539 		ulwp = NULL;
540 	}
541 
542 	return (ulwp);
543 }
544 
545 int
546 _thrp_create(void *stk, size_t stksize, void *(*func)(void *), void *arg,
547 	long flags, thread_t *new_thread, pri_t priority, int policy,
548 	size_t guardsize)
549 {
550 	ulwp_t *self = curthread;
551 	uberdata_t *udp = self->ul_uberdata;
552 	ucontext_t uc;
553 	uint_t lwp_flags;
554 	thread_t tid;
555 	int error = 0;
556 	ulwp_t *ulwp;
557 
558 	/*
559 	 * Enforce the restriction of not creating any threads
560 	 * until the primary link map has been initialized.
561 	 * Also, disallow thread creation to a child of vfork().
562 	 */
563 	if (!self->ul_primarymap || self->ul_vfork)
564 		return (ENOTSUP);
565 
566 	if (udp->hash_size == 1)
567 		finish_init();
568 
569 	if (((stk || stksize) && stksize < MINSTACK) ||
570 	    priority < THREAD_MIN_PRIORITY || priority > THREAD_MAX_PRIORITY)
571 		return (EINVAL);
572 
573 	if (stk == NULL) {
574 		if ((ulwp = find_stack(stksize, guardsize)) == NULL)
575 			return (ENOMEM);
576 		stksize = ulwp->ul_mapsiz - ulwp->ul_guardsize;
577 	} else {
578 		/* initialize the private stack */
579 		if ((ulwp = ulwp_alloc()) == NULL)
580 			return (ENOMEM);
581 		ulwp->ul_stk = stk;
582 		ulwp->ul_stktop = (uintptr_t)stk + stksize;
583 		ulwp->ul_stksiz = stksize;
584 		ulwp->ul_ix = -1;
585 	}
586 	ulwp->ul_errnop = &ulwp->ul_errno;
587 
588 	lwp_flags = LWP_SUSPENDED;
589 	if (flags & (THR_DETACHED|THR_DAEMON)) {
590 		flags |= THR_DETACHED;
591 		lwp_flags |= LWP_DETACHED;
592 	}
593 	if (flags & THR_DAEMON)
594 		lwp_flags |= LWP_DAEMON;
595 
596 	/* creating a thread: enforce mt-correctness in _mutex_lock() */
597 	self->ul_async_safe = 1;
598 
599 	/* per-thread copies of global variables, for speed */
600 	ulwp->ul_queue_fifo = self->ul_queue_fifo;
601 	ulwp->ul_cond_wait_defer = self->ul_cond_wait_defer;
602 	ulwp->ul_error_detection = self->ul_error_detection;
603 	ulwp->ul_async_safe = self->ul_async_safe;
604 	ulwp->ul_max_spinners = self->ul_max_spinners;
605 	ulwp->ul_adaptive_spin = self->ul_adaptive_spin;
606 	ulwp->ul_queue_spin = self->ul_queue_spin;
607 	ulwp->ul_door_noreserve = self->ul_door_noreserve;
608 
609 	ulwp->ul_primarymap = self->ul_primarymap;
610 	ulwp->ul_self = ulwp;
611 	ulwp->ul_uberdata = udp;
612 
613 	/* debugger support */
614 	ulwp->ul_usropts = flags;
615 
616 #ifdef __sparc
617 	/*
618 	 * We cache several instructions in the thread structure for use
619 	 * by the fasttrap DTrace provider. When changing this, read the
620 	 * comment in fasttrap.h for the all the other places that must
621 	 * be changed.
622 	 */
623 	ulwp->ul_dsave = 0x9de04000;	/* save %g1, %g0, %sp */
624 	ulwp->ul_drestore = 0x81e80000;	/* restore %g0, %g0, %g0 */
625 	ulwp->ul_dftret = 0x91d0203a;	/* ta 0x3a */
626 	ulwp->ul_dreturn = 0x81ca0000;	/* return %o0 */
627 #endif
628 
629 	ulwp->ul_startpc = func;
630 	ulwp->ul_startarg = arg;
631 	_fpinherit(ulwp);
632 	/*
633 	 * Defer signals on the new thread until its TLS constructors
634 	 * have been called.  _thr_setup() will call sigon() after
635 	 * it has called tls_setup().
636 	 */
637 	ulwp->ul_sigdefer = 1;
638 
639 	if (setup_context(&uc, _thr_setup, ulwp,
640 	    (caddr_t)ulwp->ul_stk + ulwp->ul_guardsize, stksize) != 0)
641 		error = EAGAIN;
642 
643 	/*
644 	 * Call enter_critical() to avoid being suspended until we
645 	 * have linked the new thread into the proper lists.
646 	 * This is necessary because forkall() and fork1() must
647 	 * suspend all threads and they must see a complete list.
648 	 */
649 	enter_critical(self);
650 	uc.uc_sigmask = ulwp->ul_sigmask = self->ul_sigmask;
651 	if (error != 0 ||
652 	    (error = __lwp_create(&uc, lwp_flags, &tid)) != 0) {
653 		exit_critical(self);
654 		ulwp->ul_lwpid = (lwpid_t)(-1);
655 		ulwp->ul_dead = 1;
656 		ulwp->ul_detached = 1;
657 		lmutex_lock(&udp->link_lock);
658 		ulwp_free(ulwp);
659 		lmutex_unlock(&udp->link_lock);
660 		return (error);
661 	}
662 	self->ul_nocancel = 0;	/* cancellation is now possible */
663 	udp->uberflags.uf_mt = 1;
664 	if (new_thread)
665 		*new_thread = tid;
666 	if (flags & THR_DETACHED)
667 		ulwp->ul_detached = 1;
668 	ulwp->ul_lwpid = tid;
669 	ulwp->ul_stop = TSTP_REGULAR;
670 	if (flags & THR_SUSPENDED)
671 		ulwp->ul_created = 1;
672 	ulwp->ul_policy = policy;
673 	ulwp->ul_pri = priority;
674 
675 	lmutex_lock(&udp->link_lock);
676 	ulwp->ul_forw = udp->all_lwps;
677 	ulwp->ul_back = udp->all_lwps->ul_back;
678 	ulwp->ul_back->ul_forw = ulwp;
679 	ulwp->ul_forw->ul_back = ulwp;
680 	hash_in(ulwp, udp);
681 	udp->nthreads++;
682 	if (flags & THR_DAEMON)
683 		udp->ndaemons++;
684 	if (flags & THR_NEW_LWP)
685 		thr_concurrency++;
686 	__libc_threaded = 1;		/* inform stdio */
687 	lmutex_unlock(&udp->link_lock);
688 
689 	if (__td_event_report(self, TD_CREATE, udp)) {
690 		self->ul_td_evbuf.eventnum = TD_CREATE;
691 		self->ul_td_evbuf.eventdata = (void *)(uintptr_t)tid;
692 		tdb_event(TD_CREATE, udp);
693 	}
694 
695 	exit_critical(self);
696 
697 	if (!(flags & THR_SUSPENDED))
698 		(void) _thrp_continue(tid, TSTP_REGULAR);
699 
700 	return (0);
701 }
702 
703 #pragma weak thr_create = _thr_create
704 int
705 _thr_create(void *stk, size_t stksize, void *(*func)(void *), void *arg,
706 	long flags, thread_t *new_thread)
707 {
708 	return (_thrp_create(stk, stksize, func, arg, flags, new_thread,
709 	    curthread->ul_pri, curthread->ul_policy, 0));
710 }
711 
712 /*
713  * A special cancellation cleanup hook for DCE.
714  * cleanuphndlr, when it is not NULL, will contain a callback
715  * function to be called before a thread is terminated in
716  * _thr_exit() as a result of being cancelled.
717  */
718 static void (*cleanuphndlr)(void) = NULL;
719 
720 /*
721  * _pthread_setcleanupinit: sets the cleanup hook.
722  */
723 int
724 _pthread_setcleanupinit(void (*func)(void))
725 {
726 	cleanuphndlr = func;
727 	return (0);
728 }
729 
730 void
731 _thrp_exit()
732 {
733 	ulwp_t *self = curthread;
734 	uberdata_t *udp = self->ul_uberdata;
735 	ulwp_t *replace = NULL;
736 
737 	if (__td_event_report(self, TD_DEATH, udp)) {
738 		self->ul_td_evbuf.eventnum = TD_DEATH;
739 		tdb_event(TD_DEATH, udp);
740 	}
741 
742 	ASSERT(self->ul_sigdefer != 0);
743 
744 	lmutex_lock(&udp->link_lock);
745 	udp->nthreads--;
746 	if (self->ul_usropts & THR_NEW_LWP)
747 		thr_concurrency--;
748 	if (self->ul_usropts & THR_DAEMON)
749 		udp->ndaemons--;
750 	else if (udp->nthreads == udp->ndaemons) {
751 		/*
752 		 * We are the last non-daemon thread exiting.
753 		 * Exit the process.  We retain our TSD and TLS so
754 		 * that atexit() application functions can use them.
755 		 */
756 		lmutex_unlock(&udp->link_lock);
757 		exit(0);
758 		thr_panic("_thrp_exit(): exit(0) returned");
759 	}
760 	lmutex_unlock(&udp->link_lock);
761 
762 	tsd_exit();		/* deallocate thread-specific data */
763 	tls_exit();		/* deallocate thread-local storage */
764 	heldlock_exit();	/* deal with left-over held locks */
765 
766 	/* block all signals to finish exiting */
767 	block_all_signals(self);
768 	/* also prevent ourself from being suspended */
769 	enter_critical(self);
770 	rwl_free(self);
771 	lmutex_lock(&udp->link_lock);
772 	ulwp_free(self);
773 	(void) ulwp_lock(self, udp);
774 
775 	if (self->ul_mapsiz && !self->ul_detached) {
776 		/*
777 		 * We want to free the stack for reuse but must keep
778 		 * the ulwp_t struct for the benefit of thr_join().
779 		 * For this purpose we allocate a replacement ulwp_t.
780 		 */
781 		if ((replace = udp->ulwp_replace_free) == NULL)
782 			replace = lmalloc(REPLACEMENT_SIZE);
783 		else if ((udp->ulwp_replace_free = replace->ul_next) == NULL)
784 			udp->ulwp_replace_last = NULL;
785 	}
786 
787 	if (udp->all_lwps == self)
788 		udp->all_lwps = self->ul_forw;
789 	if (udp->all_lwps == self)
790 		udp->all_lwps = NULL;
791 	else {
792 		self->ul_forw->ul_back = self->ul_back;
793 		self->ul_back->ul_forw = self->ul_forw;
794 	}
795 	self->ul_forw = self->ul_back = NULL;
796 	/* collect queue lock statistics before marking ourself dead */
797 	record_spin_locks(self);
798 	self->ul_dead = 1;
799 	self->ul_pleasestop = 0;
800 	if (replace != NULL) {
801 		int ix = self->ul_ix;		/* the hash index */
802 		(void) _private_memcpy(replace, self, REPLACEMENT_SIZE);
803 		replace->ul_self = replace;
804 		replace->ul_next = NULL;	/* clone not on stack list */
805 		replace->ul_mapsiz = 0;		/* allows clone to be freed */
806 		replace->ul_replace = 1;	/* requires clone to be freed */
807 		hash_out_unlocked(self, ix, udp);
808 		hash_in_unlocked(replace, ix, udp);
809 		ASSERT(!(self->ul_detached));
810 		self->ul_detached = 1;		/* this frees the stack */
811 		self->ul_schedctl = NULL;
812 		self->ul_schedctl_called = &udp->uberflags;
813 		set_curthread(self = replace);
814 		/*
815 		 * Having just changed the address of curthread, we
816 		 * must reset the ownership of the locks we hold so
817 		 * that assertions will not fire when we release them.
818 		 */
819 		udp->link_lock.mutex_owner = (uintptr_t)self;
820 		ulwp_mutex(self, udp)->mutex_owner = (uintptr_t)self;
821 		/*
822 		 * NOTE:
823 		 * On i386, %gs still references the original, not the
824 		 * replacement, ulwp structure.  Fetching the replacement
825 		 * curthread pointer via %gs:0 works correctly since the
826 		 * original ulwp structure will not be reallocated until
827 		 * this lwp has completed its lwp_exit() system call (see
828 		 * dead_and_buried()), but from here on out, we must make
829 		 * no references to %gs:<offset> other than %gs:0.
830 		 */
831 	}
832 	/*
833 	 * Put non-detached terminated threads in the all_zombies list.
834 	 */
835 	if (!self->ul_detached) {
836 		udp->nzombies++;
837 		if (udp->all_zombies == NULL) {
838 			ASSERT(udp->nzombies == 1);
839 			udp->all_zombies = self->ul_forw = self->ul_back = self;
840 		} else {
841 			self->ul_forw = udp->all_zombies;
842 			self->ul_back = udp->all_zombies->ul_back;
843 			self->ul_back->ul_forw = self;
844 			self->ul_forw->ul_back = self;
845 		}
846 	}
847 	/*
848 	 * Notify everyone waiting for this thread.
849 	 */
850 	ulwp_broadcast(self);
851 	(void) ulwp_unlock(self, udp);
852 	/*
853 	 * Prevent any more references to the schedctl data.
854 	 * We are exiting and continue_fork() may not find us.
855 	 * Do this just before dropping link_lock, since fork
856 	 * serializes on link_lock.
857 	 */
858 	self->ul_schedctl = NULL;
859 	self->ul_schedctl_called = &udp->uberflags;
860 	lmutex_unlock(&udp->link_lock);
861 
862 	ASSERT(self->ul_critical == 1);
863 	ASSERT(self->ul_preempt == 0);
864 	_lwp_terminate();	/* never returns */
865 	thr_panic("_thrp_exit(): _lwp_terminate() returned");
866 }
867 
868 void
869 collect_queue_statistics()
870 {
871 	uberdata_t *udp = curthread->ul_uberdata;
872 	ulwp_t *ulwp;
873 
874 	if (thread_queue_dump) {
875 		lmutex_lock(&udp->link_lock);
876 		if ((ulwp = udp->all_lwps) != NULL) {
877 			do {
878 				record_spin_locks(ulwp);
879 			} while ((ulwp = ulwp->ul_forw) != udp->all_lwps);
880 		}
881 		lmutex_unlock(&udp->link_lock);
882 	}
883 }
884 
885 void
886 _thr_exit_common(void *status, int unwind)
887 {
888 	ulwp_t *self = curthread;
889 	int cancelled = (self->ul_cancel_pending && status == PTHREAD_CANCELED);
890 
891 	ASSERT(self->ul_critical == 0 && self->ul_preempt == 0);
892 
893 	/*
894 	 * Disable cancellation and call the special DCE cancellation
895 	 * cleanup hook if it is enabled.  Do nothing else before calling
896 	 * the DCE cancellation cleanup hook; it may call longjmp() and
897 	 * never return here.
898 	 */
899 	self->ul_cancel_disabled = 1;
900 	self->ul_cancel_async = 0;
901 	self->ul_save_async = 0;
902 	self->ul_cancelable = 0;
903 	self->ul_cancel_pending = 0;
904 	set_cancel_pending_flag(self, 1);
905 	if (cancelled && cleanuphndlr != NULL)
906 		(*cleanuphndlr)();
907 
908 	/*
909 	 * Block application signals while we are exiting.
910 	 * We call out to C++, TSD, and TLS destructors while exiting
911 	 * and these are application-defined, so we cannot be assured
912 	 * that they won't reset the signal mask.  We use sigoff() to
913 	 * defer any signals that may be received as a result of this
914 	 * bad behavior.  Such signals will be lost to the process
915 	 * when the thread finishes exiting.
916 	 */
917 	(void) _thr_sigsetmask(SIG_SETMASK, &maskset, NULL);
918 	sigoff(self);
919 
920 	self->ul_rval = status;
921 
922 	/*
923 	 * If thr_exit is being called from the places where
924 	 * C++ destructors are to be called such as cancellation
925 	 * points, then set this flag. It is checked in _t_cancel()
926 	 * to decide whether _ex_unwind() is to be called or not.
927 	 */
928 	if (unwind)
929 		self->ul_unwind = 1;
930 
931 	/*
932 	 * _thrp_unwind() will eventually call _thrp_exit().
933 	 * It never returns.
934 	 */
935 	_thrp_unwind(NULL);
936 	thr_panic("_thr_exit_common(): _thrp_unwind() returned");
937 }
938 
939 /*
940  * Called when a thread returns from its start function.
941  * We are at the top of the stack; no unwinding is necessary.
942  */
943 void
944 _thr_terminate(void *status)
945 {
946 	_thr_exit_common(status, 0);
947 }
948 
949 #pragma weak thr_exit = _thr_exit
950 #pragma weak pthread_exit = _thr_exit
951 #pragma weak _pthread_exit = _thr_exit
952 void
953 _thr_exit(void *status)
954 {
955 	_thr_exit_common(status, 1);
956 }
957 
958 int
959 _thrp_join(thread_t tid, thread_t *departed, void **status, int do_cancel)
960 {
961 	uberdata_t *udp = curthread->ul_uberdata;
962 	mutex_t *mp;
963 	void *rval;
964 	thread_t found;
965 	ulwp_t *ulwp;
966 	ulwp_t **ulwpp;
967 	int replace;
968 	int error;
969 
970 	if (do_cancel)
971 		error = lwp_wait(tid, &found);
972 	else {
973 		while ((error = __lwp_wait(tid, &found)) == EINTR)
974 			;
975 	}
976 	if (error)
977 		return (error);
978 
979 	/*
980 	 * We must hold link_lock to avoid a race condition with find_stack().
981 	 */
982 	lmutex_lock(&udp->link_lock);
983 	if ((ulwpp = find_lwpp(found)) == NULL) {
984 		/*
985 		 * lwp_wait() found an lwp that the library doesn't know
986 		 * about.  It must have been created with _lwp_create().
987 		 * Just return its lwpid; we can't know its status.
988 		 */
989 		lmutex_unlock(&udp->link_lock);
990 		rval = NULL;
991 	} else {
992 		/*
993 		 * Remove ulwp from the hash table.
994 		 */
995 		ulwp = *ulwpp;
996 		*ulwpp = ulwp->ul_hash;
997 		ulwp->ul_hash = NULL;
998 		/*
999 		 * Remove ulwp from all_zombies list.
1000 		 */
1001 		ASSERT(udp->nzombies >= 1);
1002 		if (udp->all_zombies == ulwp)
1003 			udp->all_zombies = ulwp->ul_forw;
1004 		if (udp->all_zombies == ulwp)
1005 			udp->all_zombies = NULL;
1006 		else {
1007 			ulwp->ul_forw->ul_back = ulwp->ul_back;
1008 			ulwp->ul_back->ul_forw = ulwp->ul_forw;
1009 		}
1010 		ulwp->ul_forw = ulwp->ul_back = NULL;
1011 		udp->nzombies--;
1012 		ASSERT(ulwp->ul_dead && !ulwp->ul_detached &&
1013 		    !(ulwp->ul_usropts & (THR_DETACHED|THR_DAEMON)));
1014 		/*
1015 		 * We can't call ulwp_unlock(ulwp) after we set
1016 		 * ulwp->ul_ix = -1 so we have to get a pointer to the
1017 		 * ulwp's hash table mutex now in order to unlock it below.
1018 		 */
1019 		mp = ulwp_mutex(ulwp, udp);
1020 		ulwp->ul_lwpid = (lwpid_t)(-1);
1021 		ulwp->ul_ix = -1;
1022 		rval = ulwp->ul_rval;
1023 		replace = ulwp->ul_replace;
1024 		lmutex_unlock(mp);
1025 		if (replace) {
1026 			ulwp->ul_next = NULL;
1027 			if (udp->ulwp_replace_free == NULL)
1028 				udp->ulwp_replace_free =
1029 				    udp->ulwp_replace_last = ulwp;
1030 			else {
1031 				udp->ulwp_replace_last->ul_next = ulwp;
1032 				udp->ulwp_replace_last = ulwp;
1033 			}
1034 		}
1035 		lmutex_unlock(&udp->link_lock);
1036 	}
1037 
1038 	if (departed != NULL)
1039 		*departed = found;
1040 	if (status != NULL)
1041 		*status = rval;
1042 	return (0);
1043 }
1044 
1045 #pragma weak thr_join = _thr_join
1046 int
1047 _thr_join(thread_t tid, thread_t *departed, void **status)
1048 {
1049 	int error = _thrp_join(tid, departed, status, 1);
1050 	return ((error == EINVAL)? ESRCH : error);
1051 }
1052 
1053 /*
1054  * pthread_join() differs from Solaris thr_join():
1055  * It does not return the departed thread's id
1056  * and hence does not have a "departed" argument.
1057  * It returns EINVAL if tid refers to a detached thread.
1058  */
1059 #pragma weak pthread_join = _pthread_join
1060 int
1061 _pthread_join(pthread_t tid, void **status)
1062 {
1063 	return ((tid == 0)? ESRCH : _thrp_join(tid, NULL, status, 1));
1064 }
1065 
1066 #pragma weak pthread_detach = _thr_detach
1067 #pragma weak _pthread_detach = _thr_detach
1068 int
1069 _thr_detach(thread_t tid)
1070 {
1071 	uberdata_t *udp = curthread->ul_uberdata;
1072 	ulwp_t *ulwp;
1073 	ulwp_t **ulwpp;
1074 	int error = 0;
1075 
1076 	if ((ulwpp = find_lwpp(tid)) == NULL)
1077 		return (ESRCH);
1078 	ulwp = *ulwpp;
1079 
1080 	if (ulwp->ul_dead) {
1081 		ulwp_unlock(ulwp, udp);
1082 		error = _thrp_join(tid, NULL, NULL, 0);
1083 	} else {
1084 		error = __lwp_detach(tid);
1085 		ulwp->ul_detached = 1;
1086 		ulwp->ul_usropts |= THR_DETACHED;
1087 		ulwp_unlock(ulwp, udp);
1088 	}
1089 	return (error);
1090 }
1091 
1092 /*
1093  * Static local string compare function to avoid calling strncmp()
1094  * (and hence the dynamic linker) during library initialization.
1095  */
1096 static int
1097 sncmp(const char *s1, const char *s2, size_t n)
1098 {
1099 	n++;
1100 	while (--n != 0 && *s1 == *s2++)
1101 		if (*s1++ == '\0')
1102 			return (0);
1103 	return (n == 0 ? 0 : *(uchar_t *)s1 - *(uchar_t *)--s2);
1104 }
1105 
1106 static const char *
1107 ematch(const char *ev, const char *match)
1108 {
1109 	int c;
1110 
1111 	while ((c = *match++) != '\0') {
1112 		if (*ev++ != c)
1113 			return (NULL);
1114 	}
1115 	if (*ev++ != '=')
1116 		return (NULL);
1117 	return (ev);
1118 }
1119 
1120 static int
1121 envvar(const char *ev, const char *match, int limit)
1122 {
1123 	int val = -1;
1124 	const char *ename;
1125 
1126 	if ((ename = ematch(ev, match)) != NULL) {
1127 		int c;
1128 		for (val = 0; (c = *ename) != '\0'; ename++) {
1129 			if (!isdigit(c)) {
1130 				val = -1;
1131 				break;
1132 			}
1133 			val = val * 10 + (c - '0');
1134 			if (val > limit) {
1135 				val = limit;
1136 				break;
1137 			}
1138 		}
1139 	}
1140 	return (val);
1141 }
1142 
1143 static void
1144 etest(const char *ev)
1145 {
1146 	int value;
1147 
1148 	if ((value = envvar(ev, "QUEUE_SPIN", 1000000)) >= 0)
1149 		thread_queue_spin = value;
1150 	if ((value = envvar(ev, "ADAPTIVE_SPIN", 1000000)) >= 0)
1151 		thread_adaptive_spin = value;
1152 	if ((value = envvar(ev, "MAX_SPINNERS", 255)) >= 0)
1153 		thread_max_spinners = value;
1154 	if ((value = envvar(ev, "QUEUE_FIFO", 8)) >= 0)
1155 		thread_queue_fifo = value;
1156 #if defined(THREAD_DEBUG)
1157 	if ((value = envvar(ev, "QUEUE_VERIFY", 1)) >= 0)
1158 		thread_queue_verify = value;
1159 #endif
1160 	if ((value = envvar(ev, "QUEUE_DUMP", 1)) >= 0)
1161 		thread_queue_dump = value;
1162 	if ((value = envvar(ev, "STACK_CACHE", 10000)) >= 0)
1163 		thread_stack_cache = value;
1164 	if ((value = envvar(ev, "COND_WAIT_DEFER", 1)) >= 0)
1165 		thread_cond_wait_defer = value;
1166 	if ((value = envvar(ev, "ERROR_DETECTION", 2)) >= 0)
1167 		thread_error_detection = value;
1168 	if ((value = envvar(ev, "ASYNC_SAFE", 1)) >= 0)
1169 		thread_async_safe = value;
1170 	if ((value = envvar(ev, "DOOR_NORESERVE", 1)) >= 0)
1171 		thread_door_noreserve = value;
1172 }
1173 
1174 /*
1175  * Look for and evaluate environment variables of the form "_THREAD_*".
1176  * For compatibility with the past, we also look for environment
1177  * names of the form "LIBTHREAD_*".
1178  */
1179 static void
1180 set_thread_vars()
1181 {
1182 	extern const char **_environ;
1183 	const char **pev;
1184 	const char *ev;
1185 	char c;
1186 
1187 	if ((pev = _environ) == NULL)
1188 		return;
1189 	while ((ev = *pev++) != NULL) {
1190 		c = *ev;
1191 		if (c == '_' && sncmp(ev, "_THREAD_", 8) == 0)
1192 			etest(ev + 8);
1193 		if (c == 'L' && sncmp(ev, "LIBTHREAD_", 10) == 0)
1194 			etest(ev + 10);
1195 	}
1196 }
1197 
1198 /* PROBE_SUPPORT begin */
1199 #pragma weak __tnf_probe_notify
1200 extern void __tnf_probe_notify(void);
1201 /* PROBE_SUPPORT end */
1202 
1203 /* same as atexit() but private to the library */
1204 extern int _atexit(void (*)(void));
1205 
1206 /* same as _cleanup() but private to the library */
1207 extern void __cleanup(void);
1208 
1209 extern void atfork_init(void);
1210 
1211 #ifdef __amd64
1212 extern void __amd64id(void);
1213 #endif
1214 
1215 /*
1216  * libc_init() is called by ld.so.1 for library initialization.
1217  * We perform minimal initialization; enough to work with the main thread.
1218  */
1219 void
1220 libc_init(void)
1221 {
1222 	uberdata_t *udp = &__uberdata;
1223 	ulwp_t *oldself = __curthread();
1224 	ucontext_t uc;
1225 	ulwp_t *self;
1226 	struct rlimit rl;
1227 	caddr_t data;
1228 	size_t tls_size;
1229 	int setmask;
1230 
1231 	/*
1232 	 * For the initial stage of initialization, we must be careful
1233 	 * not to call any function that could possibly call _cerror().
1234 	 * For this purpose, we call only the raw system call wrappers.
1235 	 */
1236 
1237 #ifdef __amd64
1238 	/*
1239 	 * Gather information about cache layouts for optimized
1240 	 * AMD assembler strfoo() and memfoo() functions.
1241 	 */
1242 	__amd64id();
1243 #endif
1244 
1245 	/*
1246 	 * Every libc, regardless of which link map, must register __cleanup().
1247 	 */
1248 	(void) _atexit(__cleanup);
1249 
1250 	/*
1251 	 * We keep our uberdata on one of (a) the first alternate link map
1252 	 * or (b) the primary link map.  We switch to the primary link map
1253 	 * and stay there once we see it.  All intermediate link maps are
1254 	 * subject to being unloaded at any time.
1255 	 */
1256 	if (oldself != NULL && (oldself->ul_primarymap || !primary_link_map)) {
1257 		__tdb_bootstrap = oldself->ul_uberdata->tdb_bootstrap;
1258 		mutex_setup();
1259 		atfork_init();	/* every link map needs atfork() processing */
1260 		return;
1261 	}
1262 
1263 	/*
1264 	 * To establish the main stack information, we have to get our context.
1265 	 * This is also convenient to use for getting our signal mask.
1266 	 */
1267 	uc.uc_flags = UC_ALL;
1268 	(void) __getcontext_syscall(&uc);
1269 	ASSERT(uc.uc_link == NULL);
1270 
1271 	tls_size = roundup64(udp->tls_metadata.static_tls.tls_size);
1272 	ASSERT(primary_link_map || tls_size == 0);
1273 	data = lmalloc(sizeof (ulwp_t) + tls_size);
1274 	if (data == NULL)
1275 		thr_panic("cannot allocate thread structure for main thread");
1276 	/* LINTED pointer cast may result in improper alignment */
1277 	self = (ulwp_t *)(data + tls_size);
1278 	init_hash_table[0].hash_bucket = self;
1279 
1280 	self->ul_sigmask = uc.uc_sigmask;
1281 	delete_reserved_signals(&self->ul_sigmask);
1282 	/*
1283 	 * Are the old and new sets different?
1284 	 * (This can happen if we are currently blocking SIGCANCEL.)
1285 	 * If so, we must explicitly set our signal mask, below.
1286 	 */
1287 	setmask =
1288 	    ((self->ul_sigmask.__sigbits[0] ^ uc.uc_sigmask.__sigbits[0]) |
1289 	    (self->ul_sigmask.__sigbits[1] ^ uc.uc_sigmask.__sigbits[1]));
1290 
1291 #ifdef __sparc
1292 	/*
1293 	 * We cache several instructions in the thread structure for use
1294 	 * by the fasttrap DTrace provider. When changing this, read the
1295 	 * comment in fasttrap.h for the all the other places that must
1296 	 * be changed.
1297 	 */
1298 	self->ul_dsave = 0x9de04000;	/* save %g1, %g0, %sp */
1299 	self->ul_drestore = 0x81e80000;	/* restore %g0, %g0, %g0 */
1300 	self->ul_dftret = 0x91d0203a;	/* ta 0x3a */
1301 	self->ul_dreturn = 0x81ca0000;	/* return %o0 */
1302 #endif
1303 
1304 	self->ul_stktop = (uintptr_t)uc.uc_stack.ss_sp + uc.uc_stack.ss_size;
1305 	(void) _private_getrlimit(RLIMIT_STACK, &rl);
1306 	self->ul_stksiz = rl.rlim_cur;
1307 	self->ul_stk = (caddr_t)(self->ul_stktop - self->ul_stksiz);
1308 
1309 	self->ul_forw = self->ul_back = self;
1310 	self->ul_hash = NULL;
1311 	self->ul_ix = 0;
1312 	self->ul_lwpid = 1; /* __lwp_self() */
1313 	self->ul_main = 1;
1314 	self->ul_self = self;
1315 	self->ul_uberdata = udp;
1316 	if (oldself != NULL) {
1317 		int i;
1318 
1319 		ASSERT(primary_link_map);
1320 		ASSERT(oldself->ul_main == 1);
1321 		self->ul_stsd = oldself->ul_stsd;
1322 		for (i = 0; i < TSD_NFAST; i++)
1323 			self->ul_ftsd[i] = oldself->ul_ftsd[i];
1324 		self->ul_tls = oldself->ul_tls;
1325 		/*
1326 		 * Retrieve all pointers to uberdata allocated
1327 		 * while running on previous link maps.
1328 		 * We would like to do a structure assignment here, but
1329 		 * gcc turns structure assignments into calls to memcpy(),
1330 		 * a function exported from libc.  We can't call any such
1331 		 * external functions until we establish curthread, below,
1332 		 * so we just call our private version of memcpy().
1333 		 */
1334 		(void) _private_memcpy(udp,
1335 		    oldself->ul_uberdata, sizeof (*udp));
1336 		/*
1337 		 * These items point to global data on the primary link map.
1338 		 */
1339 		udp->thr_hash_table = init_hash_table;
1340 		udp->sigacthandler = sigacthandler;
1341 		udp->tdb.tdb_events = tdb_events;
1342 		ASSERT(udp->nthreads == 1 && !udp->uberflags.uf_mt);
1343 		ASSERT(udp->lwp_stacks == NULL);
1344 		ASSERT(udp->ulwp_freelist == NULL);
1345 		ASSERT(udp->ulwp_replace_free == NULL);
1346 		ASSERT(udp->hash_size == 1);
1347 	}
1348 	udp->all_lwps = self;
1349 	udp->ulwp_one = self;
1350 	udp->pid = _private_getpid();
1351 	udp->nthreads = 1;
1352 	/*
1353 	 * In every link map, tdb_bootstrap points to the same piece of
1354 	 * allocated memory.  When the primary link map is initialized,
1355 	 * the allocated memory is assigned a pointer to the one true
1356 	 * uberdata.  This allows libc_db to initialize itself regardless
1357 	 * of which instance of libc it finds in the address space.
1358 	 */
1359 	if (udp->tdb_bootstrap == NULL)
1360 		udp->tdb_bootstrap = lmalloc(sizeof (uberdata_t *));
1361 	__tdb_bootstrap = udp->tdb_bootstrap;
1362 	if (primary_link_map) {
1363 		self->ul_primarymap = 1;
1364 		udp->primary_map = 1;
1365 		*udp->tdb_bootstrap = udp;
1366 	}
1367 	/*
1368 	 * Cancellation can't happen until:
1369 	 *	pthread_cancel() is called
1370 	 * or:
1371 	 *	another thread is created
1372 	 * For now, as a single-threaded process, set the flag that tells
1373 	 * PROLOGUE/EPILOGUE (in scalls.c) that cancellation can't happen.
1374 	 */
1375 	self->ul_nocancel = 1;
1376 
1377 #if defined(__amd64)
1378 	(void) ___lwp_private(_LWP_SETPRIVATE, _LWP_FSBASE, self);
1379 #elif defined(__i386)
1380 	(void) ___lwp_private(_LWP_SETPRIVATE, _LWP_GSBASE, self);
1381 #endif	/* __i386 || __amd64 */
1382 	set_curthread(self);		/* redundant on i386 */
1383 	/*
1384 	 * Now curthread is established and it is safe to call any
1385 	 * function in libc except one that uses thread-local storage.
1386 	 */
1387 	self->ul_errnop = &errno;
1388 	if (oldself != NULL) {
1389 		/* tls_size was zero when oldself was allocated */
1390 		lfree(oldself, sizeof (ulwp_t));
1391 	}
1392 	mutex_setup();
1393 	atfork_init();
1394 	signal_init();
1395 
1396 	/*
1397 	 * If the stack is unlimited, we set the size to zero to disable
1398 	 * stack checking.
1399 	 * XXX: Work harder here.  Get the stack size from /proc/self/rmap
1400 	 */
1401 	if (self->ul_stksiz == RLIM_INFINITY) {
1402 		self->ul_ustack.ss_sp = (void *)self->ul_stktop;
1403 		self->ul_ustack.ss_size = 0;
1404 	} else {
1405 		self->ul_ustack.ss_sp = self->ul_stk;
1406 		self->ul_ustack.ss_size = self->ul_stksiz;
1407 	}
1408 	self->ul_ustack.ss_flags = 0;
1409 	(void) _private_setustack(&self->ul_ustack);
1410 
1411 	/*
1412 	 * Get the variables that affect thread behavior from the environment.
1413 	 */
1414 	set_thread_vars();
1415 	udp->uberflags.uf_thread_error_detection = (char)thread_error_detection;
1416 	udp->thread_stack_cache = thread_stack_cache;
1417 
1418 	/*
1419 	 * Make per-thread copies of global variables, for speed.
1420 	 */
1421 	self->ul_queue_fifo = (char)thread_queue_fifo;
1422 	self->ul_cond_wait_defer = (char)thread_cond_wait_defer;
1423 	self->ul_error_detection = (char)thread_error_detection;
1424 	self->ul_async_safe = (char)thread_async_safe;
1425 	self->ul_door_noreserve = (char)thread_door_noreserve;
1426 	self->ul_max_spinners = (uint8_t)thread_max_spinners;
1427 	self->ul_adaptive_spin = thread_adaptive_spin;
1428 	self->ul_queue_spin = thread_queue_spin;
1429 
1430 	/*
1431 	 * When we have initialized the primary link map, inform
1432 	 * the dynamic linker about our interface functions.
1433 	 */
1434 	if (self->ul_primarymap)
1435 		_ld_libc((void *)rtld_funcs);
1436 
1437 	/*
1438 	 * Defer signals until TLS constructors have been called.
1439 	 */
1440 	sigoff(self);
1441 	tls_setup();
1442 	sigon(self);
1443 	if (setmask)
1444 		(void) restore_signals(self);
1445 
1446 	/*
1447 	 * Make private copies of __xpg4 and __xpg6 so libc can test
1448 	 * them after this point without invoking the dynamic linker.
1449 	 */
1450 	libc__xpg4 = __xpg4;
1451 	libc__xpg6 = __xpg6;
1452 
1453 	/* PROBE_SUPPORT begin */
1454 	if (self->ul_primarymap && __tnf_probe_notify != NULL)
1455 		__tnf_probe_notify();
1456 	/* PROBE_SUPPORT end */
1457 
1458 	init_sigev_thread();
1459 	init_aio();
1460 
1461 	/*
1462 	 * We need to reset __threaded dynamically at runtime, so that
1463 	 * __threaded can be bound to __threaded outside libc which may not
1464 	 * have initial value of 1 (without a copy relocation in a.out).
1465 	 */
1466 	__threaded = 1;
1467 }
1468 
1469 #pragma fini(libc_fini)
1470 void
1471 libc_fini()
1472 {
1473 	/*
1474 	 * If we are doing fini processing for the instance of libc
1475 	 * on the first alternate link map (this happens only when
1476 	 * the dynamic linker rejects a bad audit library), then clear
1477 	 * __curthread().  We abandon whatever memory was allocated by
1478 	 * lmalloc() while running on this alternate link-map but we
1479 	 * don't care (and can't find the memory in any case); we just
1480 	 * want to protect the application from this bad audit library.
1481 	 * No fini processing is done by libc in the normal case.
1482 	 */
1483 
1484 	uberdata_t *udp = curthread->ul_uberdata;
1485 
1486 	if (udp->primary_map == 0 && udp == &__uberdata)
1487 		set_curthread(NULL);
1488 }
1489 
1490 /*
1491  * finish_init is called when we are about to become multi-threaded,
1492  * that is, on the first call to thr_create().
1493  */
1494 void
1495 finish_init()
1496 {
1497 	ulwp_t *self = curthread;
1498 	uberdata_t *udp = self->ul_uberdata;
1499 	thr_hash_table_t *htp;
1500 	void *data;
1501 	int i;
1502 
1503 	/*
1504 	 * No locks needed here; we are single-threaded on the first call.
1505 	 * We can be called only after the primary link map has been set up.
1506 	 */
1507 	ASSERT(self->ul_primarymap);
1508 	ASSERT(self == udp->ulwp_one);
1509 	ASSERT(!udp->uberflags.uf_mt);
1510 	ASSERT(udp->hash_size == 1);
1511 
1512 	/*
1513 	 * First allocate the queue_head array if not already allocated.
1514 	 */
1515 	if (udp->queue_head == NULL)
1516 		queue_alloc();
1517 
1518 	/*
1519 	 * Now allocate the thread hash table.
1520 	 */
1521 	if ((data = _private_mmap(NULL, HASHTBLSZ * sizeof (thr_hash_table_t),
1522 	    PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, (off_t)0))
1523 	    == MAP_FAILED)
1524 		thr_panic("cannot allocate thread hash table");
1525 
1526 	udp->thr_hash_table = htp = (thr_hash_table_t *)data;
1527 	udp->hash_size = HASHTBLSZ;
1528 	udp->hash_mask = HASHTBLSZ - 1;
1529 
1530 	for (i = 0; i < HASHTBLSZ; i++, htp++) {
1531 		htp->hash_lock.mutex_flag = LOCK_INITED;
1532 		htp->hash_lock.mutex_magic = MUTEX_MAGIC;
1533 		htp->hash_cond.cond_magic = COND_MAGIC;
1534 	}
1535 	hash_in_unlocked(self, TIDHASH(self->ul_lwpid, udp), udp);
1536 
1537 	/*
1538 	 * Set up the SIGCANCEL handler for threads cancellation.
1539 	 */
1540 	setup_cancelsig(SIGCANCEL);
1541 
1542 	/*
1543 	 * Arrange to do special things on exit --
1544 	 * - collect queue statistics from all remaining active threads.
1545 	 * - grab assert_lock to ensure that assertion failures
1546 	 *   and a core dump take precedence over _exit().
1547 	 * - dump queue statistics to stderr if _THREAD_QUEUE_DUMP is set.
1548 	 * (Functions are called in the reverse order of their registration.)
1549 	 */
1550 	(void) _atexit(dump_queue_statistics);
1551 	(void) _atexit(grab_assert_lock);
1552 	(void) _atexit(collect_queue_statistics);
1553 }
1554 
1555 /*
1556  * Used only by postfork1_child(), below.
1557  */
1558 static void
1559 mark_dead_and_buried(ulwp_t *ulwp)
1560 {
1561 	ulwp->ul_dead = 1;
1562 	ulwp->ul_lwpid = (lwpid_t)(-1);
1563 	ulwp->ul_hash = NULL;
1564 	ulwp->ul_ix = -1;
1565 	ulwp->ul_schedctl = NULL;
1566 	ulwp->ul_schedctl_called = NULL;
1567 }
1568 
1569 /*
1570  * This is called from fork1() in the child.
1571  * Reset our data structures to reflect one lwp.
1572  */
1573 void
1574 postfork1_child()
1575 {
1576 	ulwp_t *self = curthread;
1577 	uberdata_t *udp = self->ul_uberdata;
1578 	mutex_t *mp;
1579 	ulwp_t *next;
1580 	ulwp_t *ulwp;
1581 	int i;
1582 
1583 	/* daemon threads shouldn't call fork1(), but oh well... */
1584 	self->ul_usropts &= ~THR_DAEMON;
1585 	udp->nthreads = 1;
1586 	udp->ndaemons = 0;
1587 	udp->uberflags.uf_mt = 0;
1588 	__libc_threaded = 0;
1589 	for (i = 0; i < udp->hash_size; i++)
1590 		udp->thr_hash_table[i].hash_bucket = NULL;
1591 	self->ul_lwpid = __lwp_self();
1592 	hash_in_unlocked(self, TIDHASH(self->ul_lwpid, udp), udp);
1593 
1594 	/*
1595 	 * Some thread in the parent might have been suspended while
1596 	 * holding udp->callout_lock.  Reinitialize the child's copy.
1597 	 */
1598 	_private_mutex_init(&udp->callout_lock,
1599 	    USYNC_THREAD | LOCK_RECURSIVE, NULL);
1600 
1601 	/* no one in the child is on a sleep queue; reinitialize */
1602 	if (udp->queue_head) {
1603 		(void) _private_memset(udp->queue_head, 0,
1604 		    2 * QHASHSIZE * sizeof (queue_head_t));
1605 		for (i = 0; i < 2 * QHASHSIZE; i++) {
1606 			mp = &udp->queue_head[i].qh_lock;
1607 			mp->mutex_flag = LOCK_INITED;
1608 			mp->mutex_magic = MUTEX_MAGIC;
1609 		}
1610 	}
1611 
1612 	/*
1613 	 * All lwps except ourself are gone.  Mark them so.
1614 	 * First mark all of the lwps that have already been freed.
1615 	 * Then mark and free all of the active lwps except ourself.
1616 	 * Since we are single-threaded, no locks are required here.
1617 	 */
1618 	for (ulwp = udp->lwp_stacks; ulwp != NULL; ulwp = ulwp->ul_next)
1619 		mark_dead_and_buried(ulwp);
1620 	for (ulwp = udp->ulwp_freelist; ulwp != NULL; ulwp = ulwp->ul_next)
1621 		mark_dead_and_buried(ulwp);
1622 	for (ulwp = self->ul_forw; ulwp != self; ulwp = next) {
1623 		next = ulwp->ul_forw;
1624 		ulwp->ul_forw = ulwp->ul_back = NULL;
1625 		mark_dead_and_buried(ulwp);
1626 		tsd_free(ulwp);
1627 		tls_free(ulwp);
1628 		rwl_free(ulwp);
1629 		heldlock_free(ulwp);
1630 		ulwp_free(ulwp);
1631 	}
1632 	self->ul_forw = self->ul_back = udp->all_lwps = self;
1633 	if (self != udp->ulwp_one)
1634 		mark_dead_and_buried(udp->ulwp_one);
1635 	if ((ulwp = udp->all_zombies) != NULL) {
1636 		ASSERT(udp->nzombies != 0);
1637 		do {
1638 			next = ulwp->ul_forw;
1639 			ulwp->ul_forw = ulwp->ul_back = NULL;
1640 			mark_dead_and_buried(ulwp);
1641 			udp->nzombies--;
1642 			if (ulwp->ul_replace) {
1643 				ulwp->ul_next = NULL;
1644 				if (udp->ulwp_replace_free == NULL) {
1645 					udp->ulwp_replace_free =
1646 					    udp->ulwp_replace_last = ulwp;
1647 				} else {
1648 					udp->ulwp_replace_last->ul_next = ulwp;
1649 					udp->ulwp_replace_last = ulwp;
1650 				}
1651 			}
1652 		} while ((ulwp = next) != udp->all_zombies);
1653 		ASSERT(udp->nzombies == 0);
1654 		udp->all_zombies = NULL;
1655 		udp->nzombies = 0;
1656 	}
1657 	trim_stack_cache(0);
1658 
1659 	/*
1660 	 * Do post-fork1 processing for subsystems that need it.
1661 	 */
1662 	postfork1_child_tpool();
1663 	postfork1_child_sigev_aio();
1664 	postfork1_child_sigev_mq();
1665 	postfork1_child_sigev_timer();
1666 	postfork1_child_aio();
1667 }
1668 
1669 #pragma weak thr_setprio = _thr_setprio
1670 #pragma weak pthread_setschedprio = _thr_setprio
1671 #pragma weak _pthread_setschedprio = _thr_setprio
1672 int
1673 _thr_setprio(thread_t tid, int priority)
1674 {
1675 	struct sched_param param;
1676 
1677 	(void) _memset(&param, 0, sizeof (param));
1678 	param.sched_priority = priority;
1679 	return (_thread_setschedparam_main(tid, 0, &param, PRIO_SET_PRIO));
1680 }
1681 
1682 #pragma weak thr_getprio = _thr_getprio
1683 int
1684 _thr_getprio(thread_t tid, int *priority)
1685 {
1686 	uberdata_t *udp = curthread->ul_uberdata;
1687 	ulwp_t *ulwp;
1688 	int error = 0;
1689 
1690 	if ((ulwp = find_lwp(tid)) == NULL)
1691 		error = ESRCH;
1692 	else {
1693 		*priority = ulwp->ul_pri;
1694 		ulwp_unlock(ulwp, udp);
1695 	}
1696 	return (error);
1697 }
1698 
1699 lwpid_t
1700 lwp_self(void)
1701 {
1702 	return (curthread->ul_lwpid);
1703 }
1704 
1705 #pragma weak _ti_thr_self = _thr_self
1706 #pragma weak thr_self = _thr_self
1707 #pragma weak pthread_self = _thr_self
1708 #pragma weak _pthread_self = _thr_self
1709 thread_t
1710 _thr_self()
1711 {
1712 	return (curthread->ul_lwpid);
1713 }
1714 
1715 #pragma weak thr_main = _thr_main
1716 int
1717 _thr_main()
1718 {
1719 	ulwp_t *self = __curthread();
1720 
1721 	return ((self == NULL)? -1 : self->ul_main);
1722 }
1723 
1724 int
1725 _thrp_cancelled(void)
1726 {
1727 	return (curthread->ul_rval == PTHREAD_CANCELED);
1728 }
1729 
1730 int
1731 _thrp_stksegment(ulwp_t *ulwp, stack_t *stk)
1732 {
1733 	stk->ss_sp = (void *)ulwp->ul_stktop;
1734 	stk->ss_size = ulwp->ul_stksiz;
1735 	stk->ss_flags = 0;
1736 	return (0);
1737 }
1738 
1739 #pragma weak thr_stksegment = _thr_stksegment
1740 int
1741 _thr_stksegment(stack_t *stk)
1742 {
1743 	return (_thrp_stksegment(curthread, stk));
1744 }
1745 
1746 void
1747 force_continue(ulwp_t *ulwp)
1748 {
1749 #if defined(THREAD_DEBUG)
1750 	ulwp_t *self = curthread;
1751 	uberdata_t *udp = self->ul_uberdata;
1752 #endif
1753 	int error;
1754 	timespec_t ts;
1755 
1756 	ASSERT(MUTEX_OWNED(&udp->fork_lock, self));
1757 	ASSERT(MUTEX_OWNED(ulwp_mutex(ulwp, udp), self));
1758 
1759 	for (;;) {
1760 		error = __lwp_continue(ulwp->ul_lwpid);
1761 		if (error != 0 && error != EINTR)
1762 			break;
1763 		error = 0;
1764 		if (ulwp->ul_stopping) {	/* he is stopping himself */
1765 			ts.tv_sec = 0;		/* give him a chance to run */
1766 			ts.tv_nsec = 100000;	/* 100 usecs or clock tick */
1767 			(void) __nanosleep(&ts, NULL);
1768 		}
1769 		if (!ulwp->ul_stopping)		/* he is running now */
1770 			break;			/* so we are done */
1771 		/*
1772 		 * He is marked as being in the process of stopping
1773 		 * himself.  Loop around and continue him again.
1774 		 * He may not have been stopped the first time.
1775 		 */
1776 	}
1777 }
1778 
1779 /*
1780  * Suspend an lwp with lwp_suspend(), then move it to a safe
1781  * point, that is, to a point where ul_critical is zero.
1782  * On return, the ulwp_lock() is dropped as with ulwp_unlock().
1783  * If 'link_dropped' is non-NULL, then 'link_lock' is held on entry.
1784  * If we have to drop link_lock, we store 1 through link_dropped.
1785  * If the lwp exits before it can be suspended, we return ESRCH.
1786  */
1787 int
1788 safe_suspend(ulwp_t *ulwp, uchar_t whystopped, int *link_dropped)
1789 {
1790 	ulwp_t *self = curthread;
1791 	uberdata_t *udp = self->ul_uberdata;
1792 	cond_t *cvp = ulwp_condvar(ulwp, udp);
1793 	mutex_t *mp = ulwp_mutex(ulwp, udp);
1794 	thread_t tid = ulwp->ul_lwpid;
1795 	int ix = ulwp->ul_ix;
1796 	int error = 0;
1797 
1798 	ASSERT(whystopped == TSTP_REGULAR ||
1799 	    whystopped == TSTP_MUTATOR ||
1800 	    whystopped == TSTP_FORK);
1801 	ASSERT(ulwp != self);
1802 	ASSERT(!ulwp->ul_stop);
1803 	ASSERT(MUTEX_OWNED(&udp->fork_lock, self));
1804 	ASSERT(MUTEX_OWNED(mp, self));
1805 
1806 	if (link_dropped != NULL)
1807 		*link_dropped = 0;
1808 
1809 	/*
1810 	 * We must grab the target's spin lock before suspending it.
1811 	 * See the comments below and in _thrp_suspend() for why.
1812 	 */
1813 	spin_lock_set(&ulwp->ul_spinlock);
1814 	(void) ___lwp_suspend(tid);
1815 	spin_lock_clear(&ulwp->ul_spinlock);
1816 
1817 top:
1818 	if (ulwp->ul_critical == 0 || ulwp->ul_stopping) {
1819 		/* thread is already safe */
1820 		ulwp->ul_stop |= whystopped;
1821 	} else {
1822 		/*
1823 		 * Setting ul_pleasestop causes the target thread to stop
1824 		 * itself in _thrp_suspend(), below, after we drop its lock.
1825 		 * We must continue the critical thread before dropping
1826 		 * link_lock because the critical thread may be holding
1827 		 * the queue lock for link_lock.  This is delicate.
1828 		 */
1829 		ulwp->ul_pleasestop |= whystopped;
1830 		force_continue(ulwp);
1831 		if (link_dropped != NULL) {
1832 			*link_dropped = 1;
1833 			lmutex_unlock(&udp->link_lock);
1834 			/* be sure to drop link_lock only once */
1835 			link_dropped = NULL;
1836 		}
1837 
1838 		/*
1839 		 * The thread may disappear by calling thr_exit() so we
1840 		 * cannot rely on the ulwp pointer after dropping the lock.
1841 		 * Instead, we search the hash table to find it again.
1842 		 * When we return, we may find that the thread has been
1843 		 * continued by some other thread.  The suspend/continue
1844 		 * interfaces are prone to such race conditions by design.
1845 		 */
1846 		while (ulwp && !ulwp->ul_dead && !ulwp->ul_stop &&
1847 		    (ulwp->ul_pleasestop & whystopped)) {
1848 			(void) __cond_wait(cvp, mp);
1849 			for (ulwp = udp->thr_hash_table[ix].hash_bucket;
1850 			    ulwp != NULL; ulwp = ulwp->ul_hash) {
1851 				if (ulwp->ul_lwpid == tid)
1852 					break;
1853 			}
1854 		}
1855 
1856 		if (ulwp == NULL || ulwp->ul_dead)
1857 			error = ESRCH;
1858 		else {
1859 			/*
1860 			 * Do another lwp_suspend() to make sure we don't
1861 			 * return until the target thread is fully stopped
1862 			 * in the kernel.  Don't apply lwp_suspend() until
1863 			 * we know that the target is not holding any
1864 			 * queue locks, that is, that it has completed
1865 			 * ulwp_unlock(self) and has, or at least is
1866 			 * about to, call lwp_suspend() on itself.  We do
1867 			 * this by grabbing the target's spin lock.
1868 			 */
1869 			ASSERT(ulwp->ul_lwpid == tid);
1870 			spin_lock_set(&ulwp->ul_spinlock);
1871 			(void) ___lwp_suspend(tid);
1872 			spin_lock_clear(&ulwp->ul_spinlock);
1873 			/*
1874 			 * If some other thread did a thr_continue()
1875 			 * on the target thread we have to start over.
1876 			 */
1877 			if (!ulwp->ul_stopping || !(ulwp->ul_stop & whystopped))
1878 				goto top;
1879 		}
1880 	}
1881 
1882 	(void) cond_broadcast_internal(cvp);
1883 	lmutex_unlock(mp);
1884 	return (error);
1885 }
1886 
1887 int
1888 _thrp_suspend(thread_t tid, uchar_t whystopped)
1889 {
1890 	ulwp_t *self = curthread;
1891 	uberdata_t *udp = self->ul_uberdata;
1892 	ulwp_t *ulwp;
1893 	int error = 0;
1894 
1895 	ASSERT((whystopped & (TSTP_REGULAR|TSTP_MUTATOR|TSTP_FORK)) != 0);
1896 	ASSERT((whystopped & ~(TSTP_REGULAR|TSTP_MUTATOR|TSTP_FORK)) == 0);
1897 
1898 	/*
1899 	 * We can't suspend anyone except ourself while
1900 	 * some other thread is performing a fork.
1901 	 * This also allows only one suspension at a time.
1902 	 */
1903 	if (tid != self->ul_lwpid)
1904 		fork_lock_enter();
1905 
1906 	if ((ulwp = find_lwp(tid)) == NULL)
1907 		error = ESRCH;
1908 	else if (whystopped == TSTP_MUTATOR && !ulwp->ul_mutator) {
1909 		ulwp_unlock(ulwp, udp);
1910 		error = EINVAL;
1911 	} else if (ulwp->ul_stop) {	/* already stopped */
1912 		ulwp->ul_stop |= whystopped;
1913 		ulwp_broadcast(ulwp);
1914 		ulwp_unlock(ulwp, udp);
1915 	} else if (ulwp != self) {
1916 		/*
1917 		 * After suspending the other thread, move it out of a
1918 		 * critical section and deal with the schedctl mappings.
1919 		 * safe_suspend() suspends the other thread, calls
1920 		 * ulwp_broadcast(ulwp) and drops the ulwp lock.
1921 		 */
1922 		error = safe_suspend(ulwp, whystopped, NULL);
1923 	} else {
1924 		int schedctl_after_fork = 0;
1925 
1926 		/*
1927 		 * We are suspending ourself.  We must not take a signal
1928 		 * until we return from lwp_suspend() and clear ul_stopping.
1929 		 * This is to guard against siglongjmp().
1930 		 */
1931 		enter_critical(self);
1932 		self->ul_sp = stkptr();
1933 		_flush_windows();	/* sparc */
1934 		self->ul_pleasestop = 0;
1935 		self->ul_stop |= whystopped;
1936 		/*
1937 		 * Grab our spin lock before dropping ulwp_mutex(self).
1938 		 * This prevents the suspending thread from applying
1939 		 * lwp_suspend() to us before we emerge from
1940 		 * lmutex_unlock(mp) and have dropped mp's queue lock.
1941 		 */
1942 		spin_lock_set(&self->ul_spinlock);
1943 		self->ul_stopping = 1;
1944 		ulwp_broadcast(self);
1945 		ulwp_unlock(self, udp);
1946 		/*
1947 		 * From this point until we return from lwp_suspend(),
1948 		 * we must not call any function that might invoke the
1949 		 * dynamic linker, that is, we can only call functions
1950 		 * private to the library.
1951 		 *
1952 		 * Also, this is a nasty race condition for a process
1953 		 * that is undergoing a forkall() operation:
1954 		 * Once we clear our spinlock (below), we are vulnerable
1955 		 * to being suspended by the forkall() thread before
1956 		 * we manage to suspend ourself in ___lwp_suspend().
1957 		 * See safe_suspend() and force_continue().
1958 		 *
1959 		 * To avoid a SIGSEGV due to the disappearance
1960 		 * of the schedctl mappings in the child process,
1961 		 * which can happen in spin_lock_clear() if we
1962 		 * are suspended while we are in the middle of
1963 		 * its call to preempt(), we preemptively clear
1964 		 * our own schedctl pointer before dropping our
1965 		 * spinlock.  We reinstate it, in both the parent
1966 		 * and (if this really is a forkall()) the child.
1967 		 */
1968 		if (whystopped & TSTP_FORK) {
1969 			schedctl_after_fork = 1;
1970 			self->ul_schedctl = NULL;
1971 			self->ul_schedctl_called = &udp->uberflags;
1972 		}
1973 		spin_lock_clear(&self->ul_spinlock);
1974 		(void) ___lwp_suspend(tid);
1975 		/*
1976 		 * Somebody else continued us.
1977 		 * We can't grab ulwp_lock(self)
1978 		 * until after clearing ul_stopping.
1979 		 * force_continue() relies on this.
1980 		 */
1981 		self->ul_stopping = 0;
1982 		self->ul_sp = 0;
1983 		if (schedctl_after_fork) {
1984 			self->ul_schedctl_called = NULL;
1985 			self->ul_schedctl = NULL;
1986 			(void) setup_schedctl();
1987 		}
1988 		ulwp_lock(self, udp);
1989 		ulwp_broadcast(self);
1990 		ulwp_unlock(self, udp);
1991 		exit_critical(self);
1992 	}
1993 
1994 	if (tid != self->ul_lwpid)
1995 		fork_lock_exit();
1996 
1997 	return (error);
1998 }
1999 
2000 /*
2001  * Suspend all lwps other than ourself in preparation for fork.
2002  */
2003 void
2004 suspend_fork()
2005 {
2006 	ulwp_t *self = curthread;
2007 	uberdata_t *udp = self->ul_uberdata;
2008 	ulwp_t *ulwp;
2009 	int link_dropped;
2010 
2011 	ASSERT(MUTEX_OWNED(&udp->fork_lock, self));
2012 top:
2013 	lmutex_lock(&udp->link_lock);
2014 
2015 	for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) {
2016 		ulwp_lock(ulwp, udp);
2017 		if (ulwp->ul_stop) {	/* already stopped */
2018 			ulwp->ul_stop |= TSTP_FORK;
2019 			ulwp_broadcast(ulwp);
2020 			ulwp_unlock(ulwp, udp);
2021 		} else {
2022 			/*
2023 			 * Move the stopped lwp out of a critical section.
2024 			 */
2025 			if (safe_suspend(ulwp, TSTP_FORK, &link_dropped) ||
2026 			    link_dropped)
2027 				goto top;
2028 		}
2029 	}
2030 
2031 	lmutex_unlock(&udp->link_lock);
2032 }
2033 
2034 void
2035 continue_fork(int child)
2036 {
2037 	ulwp_t *self = curthread;
2038 	uberdata_t *udp = self->ul_uberdata;
2039 	ulwp_t *ulwp;
2040 
2041 	ASSERT(MUTEX_OWNED(&udp->fork_lock, self));
2042 
2043 	/*
2044 	 * Clear the schedctl pointers in the child of forkall().
2045 	 */
2046 	if (child) {
2047 		for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) {
2048 			ulwp->ul_schedctl_called =
2049 			    ulwp->ul_dead? &udp->uberflags : NULL;
2050 			ulwp->ul_schedctl = NULL;
2051 		}
2052 	}
2053 
2054 	/*
2055 	 * Set all lwps that were stopped for fork() running again.
2056 	 */
2057 	lmutex_lock(&udp->link_lock);
2058 	for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) {
2059 		mutex_t *mp = ulwp_mutex(ulwp, udp);
2060 		lmutex_lock(mp);
2061 		ASSERT(ulwp->ul_stop & TSTP_FORK);
2062 		ulwp->ul_stop &= ~TSTP_FORK;
2063 		ulwp_broadcast(ulwp);
2064 		if (!ulwp->ul_stop)
2065 			force_continue(ulwp);
2066 		lmutex_unlock(mp);
2067 	}
2068 	lmutex_unlock(&udp->link_lock);
2069 }
2070 
2071 int
2072 _thrp_continue(thread_t tid, uchar_t whystopped)
2073 {
2074 	uberdata_t *udp = curthread->ul_uberdata;
2075 	ulwp_t *ulwp;
2076 	mutex_t *mp;
2077 	int error = 0;
2078 
2079 	ASSERT(whystopped == TSTP_REGULAR ||
2080 	    whystopped == TSTP_MUTATOR);
2081 
2082 	/*
2083 	 * We single-thread the entire thread suspend/continue mechanism.
2084 	 */
2085 	fork_lock_enter();
2086 
2087 	if ((ulwp = find_lwp(tid)) == NULL) {
2088 		fork_lock_exit();
2089 		return (ESRCH);
2090 	}
2091 
2092 	mp = ulwp_mutex(ulwp, udp);
2093 	if ((whystopped == TSTP_MUTATOR && !ulwp->ul_mutator)) {
2094 		error = EINVAL;
2095 	} else if (ulwp->ul_stop & whystopped) {
2096 		ulwp->ul_stop &= ~whystopped;
2097 		ulwp_broadcast(ulwp);
2098 		if (!ulwp->ul_stop) {
2099 			if (whystopped == TSTP_REGULAR && ulwp->ul_created) {
2100 				ulwp->ul_sp = 0;
2101 				ulwp->ul_created = 0;
2102 			}
2103 			force_continue(ulwp);
2104 		}
2105 	}
2106 	lmutex_unlock(mp);
2107 
2108 	fork_lock_exit();
2109 	return (error);
2110 }
2111 
2112 #pragma weak thr_suspend = _thr_suspend
2113 int
2114 _thr_suspend(thread_t tid)
2115 {
2116 	return (_thrp_suspend(tid, TSTP_REGULAR));
2117 }
2118 
2119 #pragma weak thr_continue = _thr_continue
2120 int
2121 _thr_continue(thread_t tid)
2122 {
2123 	return (_thrp_continue(tid, TSTP_REGULAR));
2124 }
2125 
2126 #pragma weak thr_yield = _thr_yield
2127 void
2128 _thr_yield()
2129 {
2130 	lwp_yield();
2131 }
2132 
2133 #pragma weak thr_kill = _thr_kill
2134 #pragma weak pthread_kill = _thr_kill
2135 #pragma weak _pthread_kill = _thr_kill
2136 int
2137 _thr_kill(thread_t tid, int sig)
2138 {
2139 	if (sig == SIGCANCEL)
2140 		return (EINVAL);
2141 	return (__lwp_kill(tid, sig));
2142 }
2143 
2144 /*
2145  * Exit a critical section, take deferred actions if necessary.
2146  */
2147 void
2148 do_exit_critical()
2149 {
2150 	ulwp_t *self = curthread;
2151 	int sig;
2152 
2153 	ASSERT(self->ul_critical == 0);
2154 	if (self->ul_dead)
2155 		return;
2156 
2157 	while (self->ul_pleasestop ||
2158 	    (self->ul_cursig != 0 && self->ul_sigdefer == 0)) {
2159 		/*
2160 		 * Avoid a recursive call to exit_critical() in _thrp_suspend()
2161 		 * by keeping self->ul_critical == 1 here.
2162 		 */
2163 		self->ul_critical++;
2164 		while (self->ul_pleasestop) {
2165 			/*
2166 			 * Guard against suspending ourself while on a sleep
2167 			 * queue.  See the comments in call_user_handler().
2168 			 */
2169 			unsleep_self();
2170 			set_parking_flag(self, 0);
2171 			(void) _thrp_suspend(self->ul_lwpid,
2172 			    self->ul_pleasestop);
2173 		}
2174 		self->ul_critical--;
2175 
2176 		if ((sig = self->ul_cursig) != 0 && self->ul_sigdefer == 0) {
2177 			/*
2178 			 * Clear ul_cursig before proceeding.
2179 			 * This protects us from the dynamic linker's
2180 			 * calls to bind_guard()/bind_clear() in the
2181 			 * event that it is invoked to resolve a symbol
2182 			 * like take_deferred_signal() below.
2183 			 */
2184 			self->ul_cursig = 0;
2185 			take_deferred_signal(sig);
2186 			ASSERT(self->ul_cursig == 0);
2187 		}
2188 	}
2189 	ASSERT(self->ul_critical == 0);
2190 }
2191 
2192 /*
2193  * _ti_bind_guard() and _ti_bind_clear() are called by the dynamic linker
2194  * (ld.so.1) when it has do do something, like resolve a symbol to be called
2195  * by the application or one of its libraries.  _ti_bind_guard() is called
2196  * on entry to ld.so.1, _ti_bind_clear() on exit from ld.so.1 back to the
2197  * application.  The dynamic linker gets special dispensation from libc to
2198  * run in a critical region (all signals deferred and no thread suspension
2199  * or forking allowed), and to be immune from cancellation for the duration.
2200  */
2201 int
2202 _ti_bind_guard(int bindflag)
2203 {
2204 	ulwp_t *self = curthread;
2205 
2206 	if ((self->ul_bindflags & bindflag) == bindflag)
2207 		return (0);
2208 	enter_critical(self);
2209 	self->ul_save_state = self->ul_cancel_disabled;
2210 	self->ul_cancel_disabled = 1;
2211 	set_cancel_pending_flag(self, 0);
2212 	self->ul_bindflags |= bindflag;
2213 	return (1);
2214 }
2215 
2216 int
2217 _ti_bind_clear(int bindflag)
2218 {
2219 	ulwp_t *self = curthread;
2220 
2221 	if ((self->ul_bindflags & bindflag) == 0)
2222 		return (self->ul_bindflags);
2223 	self->ul_bindflags &= ~bindflag;
2224 	self->ul_cancel_disabled = self->ul_save_state;
2225 	set_cancel_pending_flag(self, 0);
2226 	exit_critical(self);
2227 	return (self->ul_bindflags);
2228 }
2229 
2230 /*
2231  * sigoff() and sigon() enable cond_wait() to behave (optionally) like
2232  * it does in the old libthread (see the comments in cond_wait_queue()).
2233  * Also, signals are deferred at thread startup until TLS constructors
2234  * have all been called, at which time _thr_setup() calls sigon().
2235  *
2236  * _sigoff() and _sigon() are external consolidation-private interfaces to
2237  * sigoff() and sigon(), respectively, in libc.  These are used in libnsl.
2238  * Also, _sigoff() and _sigon() are called from dbx's run-time checking
2239  * (librtc.so) to defer signals during its critical sections (not to be
2240  * confused with libc critical sections [see exit_critical() above]).
2241  */
2242 void
2243 _sigoff(void)
2244 {
2245 	sigoff(curthread);
2246 }
2247 
2248 void
2249 _sigon(void)
2250 {
2251 	sigon(curthread);
2252 }
2253 
2254 void
2255 sigon(ulwp_t *self)
2256 {
2257 	int sig;
2258 
2259 	ASSERT(self->ul_sigdefer > 0);
2260 	if (--self->ul_sigdefer == 0) {
2261 		if ((sig = self->ul_cursig) != 0 && self->ul_critical == 0) {
2262 			self->ul_cursig = 0;
2263 			take_deferred_signal(sig);
2264 			ASSERT(self->ul_cursig == 0);
2265 		}
2266 	}
2267 }
2268 
2269 #pragma weak thr_getconcurrency = _thr_getconcurrency
2270 int
2271 _thr_getconcurrency()
2272 {
2273 	return (thr_concurrency);
2274 }
2275 
2276 #pragma weak pthread_getconcurrency = _pthread_getconcurrency
2277 int
2278 _pthread_getconcurrency()
2279 {
2280 	return (pthread_concurrency);
2281 }
2282 
2283 #pragma weak thr_setconcurrency = _thr_setconcurrency
2284 int
2285 _thr_setconcurrency(int new_level)
2286 {
2287 	uberdata_t *udp = curthread->ul_uberdata;
2288 
2289 	if (new_level < 0)
2290 		return (EINVAL);
2291 	if (new_level > 65536)		/* 65536 is totally arbitrary */
2292 		return (EAGAIN);
2293 	lmutex_lock(&udp->link_lock);
2294 	if (new_level > thr_concurrency)
2295 		thr_concurrency = new_level;
2296 	lmutex_unlock(&udp->link_lock);
2297 	return (0);
2298 }
2299 
2300 #pragma weak pthread_setconcurrency = _pthread_setconcurrency
2301 int
2302 _pthread_setconcurrency(int new_level)
2303 {
2304 	if (new_level < 0)
2305 		return (EINVAL);
2306 	if (new_level > 65536)		/* 65536 is totally arbitrary */
2307 		return (EAGAIN);
2308 	pthread_concurrency = new_level;
2309 	return (0);
2310 }
2311 
2312 #pragma weak thr_min_stack = _thr_min_stack
2313 #pragma weak __pthread_min_stack = _thr_min_stack
2314 size_t
2315 _thr_min_stack(void)
2316 {
2317 	return (MINSTACK);
2318 }
2319 
2320 int
2321 __nthreads(void)
2322 {
2323 	return (curthread->ul_uberdata->nthreads);
2324 }
2325 
2326 /*
2327  * XXX
2328  * The remainder of this file implements the private interfaces to java for
2329  * garbage collection.  It is no longer used, at least by java 1.2.
2330  * It can all go away once all old JVMs have disappeared.
2331  */
2332 
2333 int	suspendingallmutators;	/* when non-zero, suspending all mutators. */
2334 int	suspendedallmutators;	/* when non-zero, all mutators suspended. */
2335 int	mutatorsbarrier;	/* when non-zero, mutators barrier imposed. */
2336 mutex_t	mutatorslock = DEFAULTMUTEX;	/* used to enforce mutators barrier. */
2337 cond_t	mutatorscv = DEFAULTCV;		/* where non-mutators sleep. */
2338 
2339 /*
2340  * Get the available register state for the target thread.
2341  * Return non-volatile registers: TRS_NONVOLATILE
2342  */
2343 #pragma weak thr_getstate = _thr_getstate
2344 int
2345 _thr_getstate(thread_t tid, int *flag, lwpid_t *lwp, stack_t *ss, gregset_t rs)
2346 {
2347 	ulwp_t *self = curthread;
2348 	uberdata_t *udp = self->ul_uberdata;
2349 	ulwp_t **ulwpp;
2350 	ulwp_t *ulwp;
2351 	int error = 0;
2352 	int trs_flag = TRS_LWPID;
2353 
2354 	if (tid == 0 || self->ul_lwpid == tid) {
2355 		ulwp = self;
2356 		ulwp_lock(ulwp, udp);
2357 	} else if ((ulwpp = find_lwpp(tid)) != NULL) {
2358 		ulwp = *ulwpp;
2359 	} else {
2360 		if (flag)
2361 			*flag = TRS_INVALID;
2362 		return (ESRCH);
2363 	}
2364 
2365 	if (ulwp->ul_dead) {
2366 		trs_flag = TRS_INVALID;
2367 	} else if (!ulwp->ul_stop && !suspendedallmutators) {
2368 		error = EINVAL;
2369 		trs_flag = TRS_INVALID;
2370 	} else if (ulwp->ul_stop) {
2371 		trs_flag = TRS_NONVOLATILE;
2372 		getgregs(ulwp, rs);
2373 	}
2374 
2375 	if (flag)
2376 		*flag = trs_flag;
2377 	if (lwp)
2378 		*lwp = tid;
2379 	if (ss != NULL)
2380 		(void) _thrp_stksegment(ulwp, ss);
2381 
2382 	ulwp_unlock(ulwp, udp);
2383 	return (error);
2384 }
2385 
2386 /*
2387  * Set the appropriate register state for the target thread.
2388  * This is not used by java.  It exists solely for the MSTC test suite.
2389  */
2390 #pragma weak thr_setstate = _thr_setstate
2391 int
2392 _thr_setstate(thread_t tid, int flag, gregset_t rs)
2393 {
2394 	uberdata_t *udp = curthread->ul_uberdata;
2395 	ulwp_t *ulwp;
2396 	int error = 0;
2397 
2398 	if ((ulwp = find_lwp(tid)) == NULL)
2399 		return (ESRCH);
2400 
2401 	if (!ulwp->ul_stop && !suspendedallmutators)
2402 		error = EINVAL;
2403 	else if (rs != NULL) {
2404 		switch (flag) {
2405 		case TRS_NONVOLATILE:
2406 			/* do /proc stuff here? */
2407 			if (ulwp->ul_stop)
2408 				setgregs(ulwp, rs);
2409 			else
2410 				error = EINVAL;
2411 			break;
2412 		case TRS_LWPID:		/* do /proc stuff here? */
2413 		default:
2414 			error = EINVAL;
2415 			break;
2416 		}
2417 	}
2418 
2419 	ulwp_unlock(ulwp, udp);
2420 	return (error);
2421 }
2422 
2423 int
2424 getlwpstatus(thread_t tid, struct lwpstatus *sp)
2425 {
2426 	extern ssize_t __pread(int, void *, size_t, off_t);
2427 	char buf[100];
2428 	int fd;
2429 
2430 	/* "/proc/self/lwp/%u/lwpstatus" w/o stdio */
2431 	(void) strcpy(buf, "/proc/self/lwp/");
2432 	ultos((uint64_t)tid, 10, buf + strlen(buf));
2433 	(void) strcat(buf, "/lwpstatus");
2434 	if ((fd = _private_open(buf, O_RDONLY, 0)) >= 0) {
2435 		while (__pread(fd, sp, sizeof (*sp), 0) == sizeof (*sp)) {
2436 			if (sp->pr_flags & PR_STOPPED) {
2437 				(void) _private_close(fd);
2438 				return (0);
2439 			}
2440 			lwp_yield();	/* give him a chance to stop */
2441 		}
2442 		(void) _private_close(fd);
2443 	}
2444 	return (-1);
2445 }
2446 
2447 int
2448 putlwpregs(thread_t tid, prgregset_t prp)
2449 {
2450 	extern ssize_t __writev(int, const struct iovec *, int);
2451 	char buf[100];
2452 	int fd;
2453 	long dstop_sreg[2];
2454 	long run_null[2];
2455 	iovec_t iov[3];
2456 
2457 	/* "/proc/self/lwp/%u/lwpctl" w/o stdio */
2458 	(void) strcpy(buf, "/proc/self/lwp/");
2459 	ultos((uint64_t)tid, 10, buf + strlen(buf));
2460 	(void) strcat(buf, "/lwpctl");
2461 	if ((fd = _private_open(buf, O_WRONLY, 0)) >= 0) {
2462 		dstop_sreg[0] = PCDSTOP;	/* direct it to stop */
2463 		dstop_sreg[1] = PCSREG;		/* set the registers */
2464 		iov[0].iov_base = (caddr_t)dstop_sreg;
2465 		iov[0].iov_len = sizeof (dstop_sreg);
2466 		iov[1].iov_base = (caddr_t)prp;	/* from the register set */
2467 		iov[1].iov_len = sizeof (prgregset_t);
2468 		run_null[0] = PCRUN;		/* make it runnable again */
2469 		run_null[1] = 0;
2470 		iov[2].iov_base = (caddr_t)run_null;
2471 		iov[2].iov_len = sizeof (run_null);
2472 		if (__writev(fd, iov, 3) >= 0) {
2473 			(void) _private_close(fd);
2474 			return (0);
2475 		}
2476 		(void) _private_close(fd);
2477 	}
2478 	return (-1);
2479 }
2480 
2481 static ulong_t
2482 gettsp_slow(thread_t tid)
2483 {
2484 	char buf[100];
2485 	struct lwpstatus status;
2486 
2487 	if (getlwpstatus(tid, &status) != 0) {
2488 		/* "__gettsp(%u): can't read lwpstatus" w/o stdio */
2489 		(void) strcpy(buf, "__gettsp(");
2490 		ultos((uint64_t)tid, 10, buf + strlen(buf));
2491 		(void) strcat(buf, "): can't read lwpstatus");
2492 		thr_panic(buf);
2493 	}
2494 	return (status.pr_reg[R_SP]);
2495 }
2496 
2497 ulong_t
2498 __gettsp(thread_t tid)
2499 {
2500 	uberdata_t *udp = curthread->ul_uberdata;
2501 	ulwp_t *ulwp;
2502 	ulong_t result;
2503 
2504 	if ((ulwp = find_lwp(tid)) == NULL)
2505 		return (0);
2506 
2507 	if (ulwp->ul_stop && (result = ulwp->ul_sp) != 0) {
2508 		ulwp_unlock(ulwp, udp);
2509 		return (result);
2510 	}
2511 
2512 	result = gettsp_slow(tid);
2513 	ulwp_unlock(ulwp, udp);
2514 	return (result);
2515 }
2516 
2517 /*
2518  * This tells java stack walkers how to find the ucontext
2519  * structure passed to signal handlers.
2520  */
2521 #pragma weak thr_sighndlrinfo = _thr_sighndlrinfo
2522 void
2523 _thr_sighndlrinfo(void (**func)(), int *funcsize)
2524 {
2525 	*func = &__sighndlr;
2526 	*funcsize = (char *)&__sighndlrend - (char *)&__sighndlr;
2527 }
2528 
2529 /*
2530  * Mark a thread a mutator or reset a mutator to being a default,
2531  * non-mutator thread.
2532  */
2533 #pragma weak thr_setmutator = _thr_setmutator
2534 int
2535 _thr_setmutator(thread_t tid, int enabled)
2536 {
2537 	ulwp_t *self = curthread;
2538 	uberdata_t *udp = self->ul_uberdata;
2539 	ulwp_t *ulwp;
2540 	int error;
2541 	int cancel_state;
2542 
2543 	enabled = enabled? 1 : 0;
2544 top:
2545 	if (tid == 0) {
2546 		ulwp = self;
2547 		ulwp_lock(ulwp, udp);
2548 	} else if ((ulwp = find_lwp(tid)) == NULL) {
2549 		return (ESRCH);
2550 	}
2551 
2552 	/*
2553 	 * The target thread should be the caller itself or a suspended thread.
2554 	 * This prevents the target from also changing its ul_mutator field.
2555 	 */
2556 	error = 0;
2557 	if (ulwp != self && !ulwp->ul_stop && enabled)
2558 		error = EINVAL;
2559 	else if (ulwp->ul_mutator != enabled) {
2560 		lmutex_lock(&mutatorslock);
2561 		if (mutatorsbarrier) {
2562 			ulwp_unlock(ulwp, udp);
2563 			(void) _pthread_setcancelstate(PTHREAD_CANCEL_DISABLE,
2564 			    &cancel_state);
2565 			while (mutatorsbarrier)
2566 				(void) _cond_wait(&mutatorscv, &mutatorslock);
2567 			(void) _pthread_setcancelstate(cancel_state, NULL);
2568 			lmutex_unlock(&mutatorslock);
2569 			goto top;
2570 		}
2571 		ulwp->ul_mutator = enabled;
2572 		lmutex_unlock(&mutatorslock);
2573 	}
2574 
2575 	ulwp_unlock(ulwp, udp);
2576 	return (error);
2577 }
2578 
2579 /*
2580  * Establish a barrier against new mutators.  Any non-mutator trying
2581  * to become a mutator is suspended until the barrier is removed.
2582  */
2583 #pragma weak thr_mutators_barrier = _thr_mutators_barrier
2584 void
2585 _thr_mutators_barrier(int enabled)
2586 {
2587 	int oldvalue;
2588 	int cancel_state;
2589 
2590 	lmutex_lock(&mutatorslock);
2591 
2592 	/*
2593 	 * Wait if trying to set the barrier while it is already set.
2594 	 */
2595 	(void) _pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_state);
2596 	while (mutatorsbarrier && enabled)
2597 		(void) _cond_wait(&mutatorscv, &mutatorslock);
2598 	(void) _pthread_setcancelstate(cancel_state, NULL);
2599 
2600 	oldvalue = mutatorsbarrier;
2601 	mutatorsbarrier = enabled;
2602 	/*
2603 	 * Wakeup any blocked non-mutators when barrier is removed.
2604 	 */
2605 	if (oldvalue && !enabled)
2606 		(void) cond_broadcast_internal(&mutatorscv);
2607 	lmutex_unlock(&mutatorslock);
2608 }
2609 
2610 /*
2611  * Suspend the set of all mutators except for the caller.  The list
2612  * of actively running threads is searched and only the mutators
2613  * in this list are suspended.  Actively running non-mutators remain
2614  * running.  Any other thread is suspended.
2615  */
2616 #pragma weak thr_suspend_allmutators = _thr_suspend_allmutators
2617 int
2618 _thr_suspend_allmutators(void)
2619 {
2620 	ulwp_t *self = curthread;
2621 	uberdata_t *udp = self->ul_uberdata;
2622 	ulwp_t *ulwp;
2623 	int link_dropped;
2624 
2625 	/*
2626 	 * We single-thread the entire thread suspend/continue mechanism.
2627 	 */
2628 	fork_lock_enter();
2629 
2630 top:
2631 	lmutex_lock(&udp->link_lock);
2632 
2633 	if (suspendingallmutators || suspendedallmutators) {
2634 		lmutex_unlock(&udp->link_lock);
2635 		fork_lock_exit();
2636 		return (EINVAL);
2637 	}
2638 	suspendingallmutators = 1;
2639 
2640 	for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) {
2641 		ulwp_lock(ulwp, udp);
2642 		if (!ulwp->ul_mutator) {
2643 			ulwp_unlock(ulwp, udp);
2644 		} else if (ulwp->ul_stop) {	/* already stopped */
2645 			ulwp->ul_stop |= TSTP_MUTATOR;
2646 			ulwp_broadcast(ulwp);
2647 			ulwp_unlock(ulwp, udp);
2648 		} else {
2649 			/*
2650 			 * Move the stopped lwp out of a critical section.
2651 			 */
2652 			if (safe_suspend(ulwp, TSTP_MUTATOR, &link_dropped) ||
2653 			    link_dropped) {
2654 				suspendingallmutators = 0;
2655 				goto top;
2656 			}
2657 		}
2658 	}
2659 
2660 	suspendedallmutators = 1;
2661 	suspendingallmutators = 0;
2662 	lmutex_unlock(&udp->link_lock);
2663 	fork_lock_exit();
2664 	return (0);
2665 }
2666 
2667 /*
2668  * Suspend the target mutator.  The caller is permitted to suspend
2669  * itself.  If a mutator barrier is enabled, the caller will suspend
2670  * itself as though it had been suspended by thr_suspend_allmutators().
2671  * When the barrier is removed, this thread will be resumed.  Any
2672  * suspended mutator, whether suspended by thr_suspend_mutator(), or by
2673  * thr_suspend_allmutators(), can be resumed by thr_continue_mutator().
2674  */
2675 #pragma weak thr_suspend_mutator = _thr_suspend_mutator
2676 int
2677 _thr_suspend_mutator(thread_t tid)
2678 {
2679 	if (tid == 0)
2680 		tid = curthread->ul_lwpid;
2681 	return (_thrp_suspend(tid, TSTP_MUTATOR));
2682 }
2683 
2684 /*
2685  * Resume the set of all suspended mutators.
2686  */
2687 #pragma weak thr_continue_allmutators = _thr_continue_allmutators
2688 int
2689 _thr_continue_allmutators()
2690 {
2691 	ulwp_t *self = curthread;
2692 	uberdata_t *udp = self->ul_uberdata;
2693 	ulwp_t *ulwp;
2694 
2695 	/*
2696 	 * We single-thread the entire thread suspend/continue mechanism.
2697 	 */
2698 	fork_lock_enter();
2699 
2700 	lmutex_lock(&udp->link_lock);
2701 	if (!suspendedallmutators) {
2702 		lmutex_unlock(&udp->link_lock);
2703 		fork_lock_exit();
2704 		return (EINVAL);
2705 	}
2706 	suspendedallmutators = 0;
2707 
2708 	for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) {
2709 		mutex_t *mp = ulwp_mutex(ulwp, udp);
2710 		lmutex_lock(mp);
2711 		if (ulwp->ul_stop & TSTP_MUTATOR) {
2712 			ulwp->ul_stop &= ~TSTP_MUTATOR;
2713 			ulwp_broadcast(ulwp);
2714 			if (!ulwp->ul_stop)
2715 				force_continue(ulwp);
2716 		}
2717 		lmutex_unlock(mp);
2718 	}
2719 
2720 	lmutex_unlock(&udp->link_lock);
2721 	fork_lock_exit();
2722 	return (0);
2723 }
2724 
2725 /*
2726  * Resume a suspended mutator.
2727  */
2728 #pragma weak thr_continue_mutator = _thr_continue_mutator
2729 int
2730 _thr_continue_mutator(thread_t tid)
2731 {
2732 	return (_thrp_continue(tid, TSTP_MUTATOR));
2733 }
2734 
2735 #pragma weak thr_wait_mutator = _thr_wait_mutator
2736 int
2737 _thr_wait_mutator(thread_t tid, int dontwait)
2738 {
2739 	uberdata_t *udp = curthread->ul_uberdata;
2740 	ulwp_t *ulwp;
2741 	int cancel_state;
2742 	int error = 0;
2743 
2744 	(void) _pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_state);
2745 top:
2746 	if ((ulwp = find_lwp(tid)) == NULL) {
2747 		(void) _pthread_setcancelstate(cancel_state, NULL);
2748 		return (ESRCH);
2749 	}
2750 
2751 	if (!ulwp->ul_mutator)
2752 		error = EINVAL;
2753 	else if (dontwait) {
2754 		if (!(ulwp->ul_stop & TSTP_MUTATOR))
2755 			error = EWOULDBLOCK;
2756 	} else if (!(ulwp->ul_stop & TSTP_MUTATOR)) {
2757 		cond_t *cvp = ulwp_condvar(ulwp, udp);
2758 		mutex_t *mp = ulwp_mutex(ulwp, udp);
2759 
2760 		(void) _cond_wait(cvp, mp);
2761 		(void) lmutex_unlock(mp);
2762 		goto top;
2763 	}
2764 
2765 	ulwp_unlock(ulwp, udp);
2766 	(void) _pthread_setcancelstate(cancel_state, NULL);
2767 	return (error);
2768 }
2769 
2770 /* PROBE_SUPPORT begin */
2771 
2772 void
2773 thr_probe_setup(void *data)
2774 {
2775 	curthread->ul_tpdp = data;
2776 }
2777 
2778 static void *
2779 _thread_probe_getfunc()
2780 {
2781 	return (curthread->ul_tpdp);
2782 }
2783 
2784 void * (*thr_probe_getfunc_addr)(void) = _thread_probe_getfunc;
2785 
2786 /* ARGSUSED */
2787 void
2788 _resume(ulwp_t *ulwp, caddr_t sp, int dontsave)
2789 {
2790 	/* never called */
2791 }
2792 
2793 /* ARGSUSED */
2794 void
2795 _resume_ret(ulwp_t *oldlwp)
2796 {
2797 	/* never called */
2798 }
2799 
2800 /* PROBE_SUPPORT end */
2801