xref: /illumos-gate/usr/src/lib/libc/port/threads/thr.c (revision 4e93fb0f6383eaac21897dcdae56b87118131e4d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include "lint.h"
30 #include "thr_uberdata.h"
31 #include <procfs.h>
32 #include <sys/uio.h>
33 #include <ctype.h>
34 
35 #undef errno
36 extern int errno;
37 
38 /*
39  * Between Solaris 2.5 and Solaris 9, __threaded was used to indicate
40  * "we are linked with libthread".  The Sun Workshop 6 update 1 compilation
41  * system used it illegally (it is a consolidation private symbol).
42  * To accommodate this and possibly other abusers of the symbol,
43  * we make it always equal to 1 now that libthread has been folded
44  * into libc.  The new __libc_threaded symbol is used to indicate
45  * the new meaning, "more than one thread exists".
46  */
47 int __threaded = 1;		/* always equal to 1 */
48 int __libc_threaded = 0;	/* zero until first thr_create() */
49 
50 /*
51  * thr_concurrency and pthread_concurrency are not used by the library.
52  * They exist solely to hold and return the values set by calls to
53  * thr_setconcurrency() and pthread_setconcurrency().
54  * Because thr_concurrency is affected by the THR_NEW_LWP flag
55  * to thr_create(), thr_concurrency is protected by link_lock.
56  */
57 static	int	thr_concurrency = 1;
58 static	int	pthread_concurrency;
59 
60 #define	HASHTBLSZ	1024	/* must be a power of two */
61 #define	TIDHASH(tid, udp)	(tid & (udp)->hash_mask)
62 
63 /* initial allocation, just enough for one lwp */
64 #pragma align 64(init_hash_table)
65 thr_hash_table_t init_hash_table[1] = {
66 	{ DEFAULTMUTEX, DEFAULTCV, NULL },
67 };
68 
69 extern const Lc_interface rtld_funcs[];
70 
71 /*
72  * The weak version is known to libc_db and mdb.
73  */
74 #pragma weak _uberdata = __uberdata
75 uberdata_t __uberdata = {
76 	{ DEFAULTMUTEX, DEFAULTCV },	/* link_lock */
77 	{ DEFAULTMUTEX, DEFAULTCV },	/* fork_lock */
78 	{ DEFAULTMUTEX, DEFAULTCV },	/* tdb_hash_lock */
79 	{ 0, },			/* tdb_hash_lock_stats */
80 	{ { 0 }, },		/* siguaction[NSIG] */
81 	{{ DEFAULTMUTEX, NULL, 0 },		/* bucket[NBUCKETS] */
82 	{ DEFAULTMUTEX, NULL, 0 },
83 	{ DEFAULTMUTEX, NULL, 0 },
84 	{ DEFAULTMUTEX, NULL, 0 },
85 	{ DEFAULTMUTEX, NULL, 0 },
86 	{ DEFAULTMUTEX, NULL, 0 },
87 	{ DEFAULTMUTEX, NULL, 0 },
88 	{ DEFAULTMUTEX, NULL, 0 },
89 	{ DEFAULTMUTEX, NULL, 0 },
90 	{ DEFAULTMUTEX, NULL, 0 }},
91 	{ RECURSIVEMUTEX, NULL, NULL },		/* atexit_root */
92 	{ DEFAULTMUTEX, 0, 0, NULL },		/* tsd_metadata */
93 	{ DEFAULTMUTEX, {0, 0}, {0, 0} },	/* tls_metadata */
94 	0,			/* primary_map */
95 	0,			/* bucket_init */
96 	0,			/* pad[0] */
97 	0,			/* pad[1] */
98 	{ 0 },			/* uberflags */
99 	NULL,			/* queue_head */
100 	init_hash_table,	/* thr_hash_table */
101 	1,			/* hash_size: size of the hash table */
102 	0,			/* hash_mask: hash_size - 1 */
103 	NULL,			/* ulwp_one */
104 	NULL,			/* all_lwps */
105 	NULL,			/* all_zombies */
106 	0,			/* nthreads */
107 	0,			/* nzombies */
108 	0,			/* ndaemons */
109 	0,			/* pid */
110 	sigacthandler,		/* sigacthandler */
111 	NULL,			/* lwp_stacks */
112 	NULL,			/* lwp_laststack */
113 	0,			/* nfreestack */
114 	10,			/* thread_stack_cache */
115 	NULL,			/* ulwp_freelist */
116 	NULL,			/* ulwp_lastfree */
117 	NULL,			/* ulwp_replace_free */
118 	NULL,			/* ulwp_replace_last */
119 	NULL,			/* atforklist */
120 	NULL,			/* __tdb_bootstrap */
121 	{			/* tdb */
122 		NULL,		/* tdb_sync_addr_hash */
123 		0,		/* tdb_register_count */
124 		0,		/* tdb_hash_alloc_failed */
125 		NULL,		/* tdb_sync_addr_free */
126 		NULL,		/* tdb_sync_addr_last */
127 		0,		/* tdb_sync_alloc */
128 		{ 0, 0 },	/* tdb_ev_global_mask */
129 		tdb_events,	/* tdb_events array */
130 	},
131 };
132 
133 /*
134  * The weak version is known to libc_db and mdb.
135  */
136 #pragma weak _tdb_bootstrap = __tdb_bootstrap
137 uberdata_t **__tdb_bootstrap = NULL;
138 
139 int	thread_queue_fifo = 4;
140 int	thread_queue_dump = 0;
141 int	thread_cond_wait_defer = 0;
142 int	thread_error_detection = 0;
143 int	thread_async_safe = 0;
144 int	thread_stack_cache = 10;
145 
146 int	thread_door_noreserve = 0;
147 
148 static	ulwp_t	*ulwp_alloc(void);
149 static	void	ulwp_free(ulwp_t *);
150 
151 /*
152  * Insert the lwp into the hash table.
153  */
154 void
155 hash_in_unlocked(ulwp_t *ulwp, int ix, uberdata_t *udp)
156 {
157 	ulwp->ul_hash = udp->thr_hash_table[ix].hash_bucket;
158 	udp->thr_hash_table[ix].hash_bucket = ulwp;
159 	ulwp->ul_ix = ix;
160 }
161 
162 void
163 hash_in(ulwp_t *ulwp, uberdata_t *udp)
164 {
165 	int ix = TIDHASH(ulwp->ul_lwpid, udp);
166 	mutex_t *mp = &udp->thr_hash_table[ix].hash_lock;
167 
168 	lmutex_lock(mp);
169 	hash_in_unlocked(ulwp, ix, udp);
170 	lmutex_unlock(mp);
171 }
172 
173 /*
174  * Delete the lwp from the hash table.
175  */
176 void
177 hash_out_unlocked(ulwp_t *ulwp, int ix, uberdata_t *udp)
178 {
179 	ulwp_t **ulwpp;
180 
181 	for (ulwpp = &udp->thr_hash_table[ix].hash_bucket;
182 	    ulwp != *ulwpp;
183 	    ulwpp = &(*ulwpp)->ul_hash)
184 		;
185 	*ulwpp = ulwp->ul_hash;
186 	ulwp->ul_hash = NULL;
187 	ulwp->ul_ix = -1;
188 }
189 
190 void
191 hash_out(ulwp_t *ulwp, uberdata_t *udp)
192 {
193 	int ix;
194 
195 	if ((ix = ulwp->ul_ix) >= 0) {
196 		mutex_t *mp = &udp->thr_hash_table[ix].hash_lock;
197 
198 		lmutex_lock(mp);
199 		hash_out_unlocked(ulwp, ix, udp);
200 		lmutex_unlock(mp);
201 	}
202 }
203 
204 static void
205 ulwp_clean(ulwp_t *ulwp)
206 {
207 	ulwp->ul_self = NULL;
208 	ulwp->ul_rval = NULL;
209 	ulwp->ul_lwpid = 0;
210 	ulwp->ul_pri = 0;
211 	ulwp->ul_mappedpri = 0;
212 	ulwp->ul_policy = 0;
213 	ulwp->ul_pri_mapped = 0;
214 	ulwp->ul_mutator = 0;
215 	ulwp->ul_pleasestop = 0;
216 	ulwp->ul_stop = 0;
217 	ulwp->ul_dead = 0;
218 	ulwp->ul_unwind = 0;
219 	ulwp->ul_detached = 0;
220 	ulwp->ul_stopping = 0;
221 	ulwp->ul_sp = 0;
222 	ulwp->ul_critical = 0;
223 	ulwp->ul_cancelable = 0;
224 	ulwp->ul_preempt = 0;
225 	ulwp->ul_sigsuspend = 0;
226 	ulwp->ul_cancel_pending = 0;
227 	ulwp->ul_cancel_disabled = 0;
228 	ulwp->ul_cancel_async = 0;
229 	ulwp->ul_save_async = 0;
230 	ulwp->ul_cursig = 0;
231 	ulwp->ul_created = 0;
232 	ulwp->ul_replace = 0;
233 	ulwp->ul_schedctl_called = NULL;
234 	ulwp->ul_errno = 0;
235 	ulwp->ul_errnop = NULL;
236 	ulwp->ul_clnup_hdr = NULL;
237 	ulwp->ul_schedctl = NULL;
238 	ulwp->ul_bindflags = 0;
239 	(void) _private_memset(&ulwp->ul_td_evbuf, 0,
240 		sizeof (ulwp->ul_td_evbuf));
241 	ulwp->ul_td_events_enable = 0;
242 	ulwp->ul_qtype = 0;
243 	ulwp->ul_usropts = 0;
244 	ulwp->ul_startpc = NULL;
245 	ulwp->ul_startarg = NULL;
246 	ulwp->ul_wchan = NULL;
247 	ulwp->ul_link = NULL;
248 	ulwp->ul_sleepq = NULL;
249 	ulwp->ul_mxchain = NULL;
250 	ulwp->ul_epri = 0;
251 	ulwp->ul_emappedpri = 0;
252 	/* PROBE_SUPPORT begin */
253 	ulwp->ul_tpdp = NULL;
254 	/* PROBE_SUPPORT end */
255 	ulwp->ul_siglink = NULL;
256 	(void) _private_memset(ulwp->ul_ftsd, 0,
257 		sizeof (void *) * TSD_NFAST);
258 	ulwp->ul_stsd = NULL;
259 	(void) _private_memset(&ulwp->ul_spinlock, 0,
260 		sizeof (ulwp->ul_spinlock));
261 	ulwp->ul_spin_lock_spin = 0;
262 	ulwp->ul_spin_lock_spin2 = 0;
263 	ulwp->ul_spin_lock_sleep = 0;
264 	ulwp->ul_spin_lock_wakeup = 0;
265 	ulwp->ul_ex_unwind = NULL;
266 }
267 
268 static int stackprot;
269 
270 /*
271  * Answer the question, "Is the lwp in question really dead?"
272  * We must inquire of the operating system to be really sure
273  * because the lwp may have called lwp_exit() but it has not
274  * yet completed the exit.
275  */
276 static int
277 dead_and_buried(ulwp_t *ulwp)
278 {
279 	if (ulwp->ul_lwpid == (lwpid_t)(-1))
280 		return (1);
281 	if (ulwp->ul_dead && ulwp->ul_detached &&
282 	    __lwp_kill(ulwp->ul_lwpid, 0) == ESRCH) {
283 		ulwp->ul_lwpid = (lwpid_t)(-1);
284 		return (1);
285 	}
286 	return (0);
287 }
288 
289 /*
290  * Attempt to keep the stack cache within the specified cache limit.
291  */
292 static void
293 trim_stack_cache(int cache_limit)
294 {
295 	ulwp_t *self = curthread;
296 	uberdata_t *udp = self->ul_uberdata;
297 	ulwp_t *prev = NULL;
298 	ulwp_t **ulwpp = &udp->lwp_stacks;
299 	ulwp_t *ulwp;
300 
301 	ASSERT(udp->nthreads <= 1 || MUTEX_OWNED(&udp->link_lock, self));
302 
303 	while (udp->nfreestack > cache_limit && (ulwp = *ulwpp) != NULL) {
304 		if (dead_and_buried(ulwp)) {
305 			*ulwpp = ulwp->ul_next;
306 			if (ulwp == udp->lwp_laststack)
307 				udp->lwp_laststack = prev;
308 			hash_out(ulwp, udp);
309 			udp->nfreestack--;
310 			(void) _private_munmap(ulwp->ul_stk, ulwp->ul_mapsiz);
311 			/*
312 			 * Now put the free ulwp on the ulwp freelist.
313 			 */
314 			ulwp->ul_mapsiz = 0;
315 			ulwp->ul_next = NULL;
316 			if (udp->ulwp_freelist == NULL)
317 				udp->ulwp_freelist = udp->ulwp_lastfree = ulwp;
318 			else {
319 				udp->ulwp_lastfree->ul_next = ulwp;
320 				udp->ulwp_lastfree = ulwp;
321 			}
322 		} else {
323 			prev = ulwp;
324 			ulwpp = &ulwp->ul_next;
325 		}
326 	}
327 }
328 
329 /*
330  * Find an unused stack of the requested size
331  * or create a new stack of the requested size.
332  * Return a pointer to the ulwp_t structure referring to the stack, or NULL.
333  * thr_exit() stores 1 in the ul_dead member.
334  * thr_join() stores -1 in the ul_lwpid member.
335  */
336 ulwp_t *
337 find_stack(size_t stksize, size_t guardsize)
338 {
339 	static size_t pagesize = 0;
340 
341 	uberdata_t *udp = curthread->ul_uberdata;
342 	size_t mapsize;
343 	ulwp_t *prev;
344 	ulwp_t *ulwp;
345 	ulwp_t **ulwpp;
346 	void *stk;
347 
348 	/*
349 	 * The stack is allocated PROT_READ|PROT_WRITE|PROT_EXEC
350 	 * unless overridden by the system's configuration.
351 	 */
352 	if (stackprot == 0) {	/* do this once */
353 		long lprot = _sysconf(_SC_STACK_PROT);
354 		if (lprot <= 0)
355 			lprot = (PROT_READ|PROT_WRITE|PROT_EXEC);
356 		stackprot = (int)lprot;
357 	}
358 	if (pagesize == 0)	/* do this once */
359 		pagesize = _sysconf(_SC_PAGESIZE);
360 
361 	/*
362 	 * One megabyte stacks by default, but subtract off
363 	 * two pages for the system-created red zones.
364 	 * Round up a non-zero stack size to a pagesize multiple.
365 	 */
366 	if (stksize == 0)
367 		stksize = DEFAULTSTACK - 2 * pagesize;
368 	else
369 		stksize = ((stksize + pagesize - 1) & -pagesize);
370 
371 	/*
372 	 * Round up the mapping size to a multiple of pagesize.
373 	 * Note: mmap() provides at least one page of red zone
374 	 * so we deduct that from the value of guardsize.
375 	 */
376 	if (guardsize != 0)
377 		guardsize = ((guardsize + pagesize - 1) & -pagesize) - pagesize;
378 	mapsize = stksize + guardsize;
379 
380 	lmutex_lock(&udp->link_lock);
381 	for (prev = NULL, ulwpp = &udp->lwp_stacks;
382 	    (ulwp = *ulwpp) != NULL;
383 	    prev = ulwp, ulwpp = &ulwp->ul_next) {
384 		if (ulwp->ul_mapsiz == mapsize &&
385 		    ulwp->ul_guardsize == guardsize &&
386 		    dead_and_buried(ulwp)) {
387 			/*
388 			 * The previous lwp is gone; reuse the stack.
389 			 * Remove the ulwp from the stack list.
390 			 */
391 			*ulwpp = ulwp->ul_next;
392 			ulwp->ul_next = NULL;
393 			if (ulwp == udp->lwp_laststack)
394 				udp->lwp_laststack = prev;
395 			hash_out(ulwp, udp);
396 			udp->nfreestack--;
397 			lmutex_unlock(&udp->link_lock);
398 			ulwp_clean(ulwp);
399 			return (ulwp);
400 		}
401 	}
402 
403 	/*
404 	 * None of the cached stacks matched our mapping size.
405 	 * Reduce the stack cache to get rid of possibly
406 	 * very old stacks that will never be reused.
407 	 */
408 	if (udp->nfreestack > udp->thread_stack_cache)
409 		trim_stack_cache(udp->thread_stack_cache);
410 	else if (udp->nfreestack > 0)
411 		trim_stack_cache(udp->nfreestack - 1);
412 	lmutex_unlock(&udp->link_lock);
413 
414 	/*
415 	 * Create a new stack.
416 	 */
417 	if ((stk = _private_mmap(NULL, mapsize, stackprot,
418 	    MAP_PRIVATE|MAP_NORESERVE|MAP_ANON, -1, (off_t)0)) != MAP_FAILED) {
419 		/*
420 		 * We have allocated our stack.  Now allocate the ulwp.
421 		 */
422 		ulwp = ulwp_alloc();
423 		if (ulwp == NULL)
424 			(void) _private_munmap(stk, mapsize);
425 		else {
426 			ulwp->ul_stk = stk;
427 			ulwp->ul_mapsiz = mapsize;
428 			ulwp->ul_guardsize = guardsize;
429 			ulwp->ul_stktop = (uintptr_t)stk + mapsize;
430 			ulwp->ul_stksiz = stksize;
431 			ulwp->ul_ix = -1;
432 			if (guardsize)	/* protect the extra red zone */
433 				(void) _private_mprotect(stk,
434 					guardsize, PROT_NONE);
435 		}
436 	}
437 	return (ulwp);
438 }
439 
440 /*
441  * Get a ulwp_t structure from the free list or allocate a new one.
442  * Such ulwp_t's do not have a stack allocated by the library.
443  */
444 static ulwp_t *
445 ulwp_alloc(void)
446 {
447 	ulwp_t *self = curthread;
448 	uberdata_t *udp = self->ul_uberdata;
449 	size_t tls_size;
450 	ulwp_t *prev;
451 	ulwp_t *ulwp;
452 	ulwp_t **ulwpp;
453 	caddr_t data;
454 
455 	lmutex_lock(&udp->link_lock);
456 	for (prev = NULL, ulwpp = &udp->ulwp_freelist;
457 	    (ulwp = *ulwpp) != NULL;
458 	    prev = ulwp, ulwpp = &ulwp->ul_next) {
459 		if (dead_and_buried(ulwp)) {
460 			*ulwpp = ulwp->ul_next;
461 			ulwp->ul_next = NULL;
462 			if (ulwp == udp->ulwp_lastfree)
463 				udp->ulwp_lastfree = prev;
464 			hash_out(ulwp, udp);
465 			lmutex_unlock(&udp->link_lock);
466 			ulwp_clean(ulwp);
467 			return (ulwp);
468 		}
469 	}
470 	lmutex_unlock(&udp->link_lock);
471 
472 	tls_size = roundup64(udp->tls_metadata.static_tls.tls_size);
473 	data = lmalloc(sizeof (*ulwp) + tls_size);
474 	if (data != NULL) {
475 		/* LINTED pointer cast may result in improper alignment */
476 		ulwp = (ulwp_t *)(data + tls_size);
477 	}
478 	return (ulwp);
479 }
480 
481 /*
482  * Free a ulwp structure.
483  * If there is an associated stack, put it on the stack list and
484  * munmap() previously freed stacks up to the residual cache limit.
485  * Else put it on the ulwp free list and never call lfree() on it.
486  */
487 static void
488 ulwp_free(ulwp_t *ulwp)
489 {
490 	uberdata_t *udp = curthread->ul_uberdata;
491 
492 	ASSERT(udp->nthreads <= 1 || MUTEX_OWNED(&udp->link_lock, curthread));
493 	ulwp->ul_next = NULL;
494 	if (ulwp == udp->ulwp_one)	/* don't reuse the primoridal stack */
495 		/*EMPTY*/;
496 	else if (ulwp->ul_mapsiz != 0) {
497 		if (udp->lwp_stacks == NULL)
498 			udp->lwp_stacks = udp->lwp_laststack = ulwp;
499 		else {
500 			udp->lwp_laststack->ul_next = ulwp;
501 			udp->lwp_laststack = ulwp;
502 		}
503 		if (++udp->nfreestack > udp->thread_stack_cache)
504 			trim_stack_cache(udp->thread_stack_cache);
505 	} else {
506 		if (udp->ulwp_freelist == NULL)
507 			udp->ulwp_freelist = udp->ulwp_lastfree = ulwp;
508 		else {
509 			udp->ulwp_lastfree->ul_next = ulwp;
510 			udp->ulwp_lastfree = ulwp;
511 		}
512 	}
513 }
514 
515 /*
516  * Find a named lwp and return a pointer to its hash list location.
517  * On success, returns with the hash lock held.
518  */
519 ulwp_t **
520 find_lwpp(thread_t tid)
521 {
522 	uberdata_t *udp = curthread->ul_uberdata;
523 	int ix = TIDHASH(tid, udp);
524 	mutex_t *mp = &udp->thr_hash_table[ix].hash_lock;
525 	ulwp_t *ulwp;
526 	ulwp_t **ulwpp;
527 
528 	if (tid == 0)
529 		return (NULL);
530 
531 	lmutex_lock(mp);
532 	for (ulwpp = &udp->thr_hash_table[ix].hash_bucket;
533 	    (ulwp = *ulwpp) != NULL;
534 	    ulwpp = &ulwp->ul_hash) {
535 		if (ulwp->ul_lwpid == tid)
536 			return (ulwpp);
537 	}
538 	lmutex_unlock(mp);
539 	return (NULL);
540 }
541 
542 /*
543  * Wake up all lwps waiting on this lwp for some reason.
544  */
545 void
546 ulwp_broadcast(ulwp_t *ulwp)
547 {
548 	ulwp_t *self = curthread;
549 	uberdata_t *udp = self->ul_uberdata;
550 
551 	ASSERT(MUTEX_OWNED(ulwp_mutex(ulwp, udp), self));
552 	(void) cond_broadcast_internal(ulwp_condvar(ulwp, udp));
553 }
554 
555 /*
556  * Find a named lwp and return a pointer to it.
557  * Returns with the hash lock held.
558  */
559 ulwp_t *
560 find_lwp(thread_t tid)
561 {
562 	ulwp_t *self = curthread;
563 	uberdata_t *udp = self->ul_uberdata;
564 	ulwp_t *ulwp = NULL;
565 	ulwp_t **ulwpp;
566 
567 	if (self->ul_lwpid == tid) {
568 		ulwp = self;
569 		ulwp_lock(ulwp, udp);
570 	} else if ((ulwpp = find_lwpp(tid)) != NULL) {
571 		ulwp = *ulwpp;
572 	}
573 
574 	if (ulwp && ulwp->ul_dead) {
575 		ulwp_unlock(ulwp, udp);
576 		ulwp = NULL;
577 	}
578 
579 	return (ulwp);
580 }
581 
582 int
583 _thrp_create(void *stk, size_t stksize, void *(*func)(void *), void *arg,
584 	long flags, thread_t *new_thread, pri_t priority, int policy,
585 	size_t guardsize)
586 {
587 	ulwp_t *self = curthread;
588 	uberdata_t *udp = self->ul_uberdata;
589 	ucontext_t uc;
590 	uint_t lwp_flags;
591 	thread_t tid;
592 	int error = 0;
593 	ulwp_t *ulwp;
594 
595 	/*
596 	 * Enforce the restriction of not creating any threads
597 	 * until the primary link map has been initialized.
598 	 * Also, disallow thread creation to a child of vfork().
599 	 */
600 	if (!self->ul_primarymap || self->ul_vfork)
601 		return (ENOTSUP);
602 
603 	if (udp->hash_size == 1)
604 		finish_init();
605 
606 	if (((stk || stksize) && stksize < MINSTACK) ||
607 	    priority < THREAD_MIN_PRIORITY || priority > THREAD_MAX_PRIORITY)
608 		return (EINVAL);
609 
610 	if (stk == NULL) {
611 		if ((ulwp = find_stack(stksize, guardsize)) == NULL)
612 			return (ENOMEM);
613 		stksize = ulwp->ul_mapsiz - ulwp->ul_guardsize;
614 	} else {
615 		/* initialize the private stack */
616 		if ((ulwp = ulwp_alloc()) == NULL)
617 			return (ENOMEM);
618 		ulwp->ul_stk = stk;
619 		ulwp->ul_stktop = (uintptr_t)stk + stksize;
620 		ulwp->ul_stksiz = stksize;
621 		ulwp->ul_ix = -1;
622 	}
623 	ulwp->ul_errnop = &ulwp->ul_errno;
624 
625 	lwp_flags = LWP_SUSPENDED;
626 	if (flags & (THR_DETACHED|THR_DAEMON)) {
627 		flags |= THR_DETACHED;
628 		lwp_flags |= LWP_DETACHED;
629 	}
630 	if (flags & THR_DAEMON)
631 		lwp_flags |= LWP_DAEMON;
632 
633 	/* creating a thread: enforce mt-correctness in _mutex_lock() */
634 	self->ul_async_safe = 1;
635 
636 	/* per-thread copies of global variables, for speed */
637 	ulwp->ul_queue_fifo = self->ul_queue_fifo;
638 	ulwp->ul_cond_wait_defer = self->ul_cond_wait_defer;
639 	ulwp->ul_error_detection = self->ul_error_detection;
640 	ulwp->ul_async_safe = self->ul_async_safe;
641 	ulwp->ul_max_spinners = self->ul_max_spinners;
642 	ulwp->ul_adaptive_spin = self->ul_adaptive_spin;
643 	ulwp->ul_release_spin = self->ul_release_spin;
644 	ulwp->ul_queue_spin = self->ul_queue_spin;
645 	ulwp->ul_door_noreserve = self->ul_door_noreserve;
646 
647 	ulwp->ul_primarymap = self->ul_primarymap;
648 	ulwp->ul_self = ulwp;
649 	ulwp->ul_uberdata = udp;
650 
651 	/* debugger support */
652 	ulwp->ul_usropts = flags;
653 
654 #ifdef __sparc
655 	/*
656 	 * We cache several instructions in the thread structure for use
657 	 * by the fasttrap DTrace provider. When changing this, read the
658 	 * comment in fasttrap.h for the all the other places that must
659 	 * be changed.
660 	 */
661 	ulwp->ul_dsave = 0x9de04000;	/* save %g1, %g0, %sp */
662 	ulwp->ul_drestore = 0x81e80000;	/* restore %g0, %g0, %g0 */
663 	ulwp->ul_dftret = 0x91d0203a;	/* ta 0x3a */
664 	ulwp->ul_dreturn = 0x81ca0000;	/* return %o0 */
665 #endif
666 
667 	ulwp->ul_startpc = func;
668 	ulwp->ul_startarg = arg;
669 	_fpinherit(ulwp);
670 	/*
671 	 * Defer signals on the new thread until its TLS constructors
672 	 * have been called.  _thr_setup() will call sigon() after
673 	 * it has called tls_setup().
674 	 */
675 	ulwp->ul_sigdefer = 1;
676 
677 	if (setup_context(&uc, _thr_setup, ulwp,
678 	    (caddr_t)ulwp->ul_stk + ulwp->ul_guardsize, stksize) != 0)
679 		error = EAGAIN;
680 
681 	/*
682 	 * Call enter_critical() to avoid being suspended until we
683 	 * have linked the new thread into the proper lists.
684 	 * This is necessary because forkall() and fork1() must
685 	 * suspend all threads and they must see a complete list.
686 	 */
687 	enter_critical(self);
688 	uc.uc_sigmask = ulwp->ul_sigmask = self->ul_sigmask;
689 	if (error != 0 ||
690 	    (error = __lwp_create(&uc, lwp_flags, &tid)) != 0) {
691 		exit_critical(self);
692 		ulwp->ul_lwpid = (lwpid_t)(-1);
693 		ulwp->ul_dead = 1;
694 		ulwp->ul_detached = 1;
695 		lmutex_lock(&udp->link_lock);
696 		ulwp_free(ulwp);
697 		lmutex_unlock(&udp->link_lock);
698 		return (error);
699 	}
700 	self->ul_nocancel = 0;	/* cancellation is now possible */
701 	ulwp->ul_nocancel = 0;
702 	udp->uberflags.uf_mt = 1;
703 	if (new_thread)
704 		*new_thread = tid;
705 	if (flags & THR_DETACHED)
706 		ulwp->ul_detached = 1;
707 	ulwp->ul_lwpid = tid;
708 	ulwp->ul_stop = TSTP_REGULAR;
709 	ulwp->ul_created = 1;
710 	ulwp->ul_policy = policy;
711 	ulwp->ul_pri = priority;
712 
713 	lmutex_lock(&udp->link_lock);
714 	ulwp->ul_forw = udp->all_lwps;
715 	ulwp->ul_back = udp->all_lwps->ul_back;
716 	ulwp->ul_back->ul_forw = ulwp;
717 	ulwp->ul_forw->ul_back = ulwp;
718 	hash_in(ulwp, udp);
719 	udp->nthreads++;
720 	if (flags & THR_DAEMON)
721 		udp->ndaemons++;
722 	if (flags & THR_NEW_LWP)
723 		thr_concurrency++;
724 	__libc_threaded = 1;		/* inform stdio */
725 	lmutex_unlock(&udp->link_lock);
726 
727 	if (__td_event_report(self, TD_CREATE, udp)) {
728 		self->ul_td_evbuf.eventnum = TD_CREATE;
729 		self->ul_td_evbuf.eventdata = (void *)(uintptr_t)tid;
730 		tdb_event(TD_CREATE, udp);
731 	}
732 	if (!(flags & THR_SUSPENDED)) {
733 		ulwp->ul_created = 0;
734 		(void) _thrp_continue(tid, TSTP_REGULAR);
735 	}
736 
737 	exit_critical(self);
738 	return (0);
739 }
740 
741 #pragma weak thr_create = _thr_create
742 int
743 _thr_create(void *stk, size_t stksize, void *(*func)(void *), void *arg,
744 	long flags, thread_t *new_thread)
745 {
746 	return (_thrp_create(stk, stksize, func, arg, flags, new_thread,
747 		curthread->ul_pri, curthread->ul_policy, 0));
748 }
749 
750 /*
751  * A special cancellation cleanup hook for DCE.
752  * cleanuphndlr, when it is not NULL, will contain a callback
753  * function to be called before a thread is terminated in
754  * _thr_exit() as a result of being cancelled.
755  */
756 static void (*cleanuphndlr)(void) = NULL;
757 
758 /*
759  * _pthread_setcleanupinit: sets the cleanup hook.
760  */
761 int
762 _pthread_setcleanupinit(void (*func)(void))
763 {
764 	cleanuphndlr = func;
765 	return (0);
766 }
767 
768 void
769 _thrp_exit()
770 {
771 	ulwp_t *self = curthread;
772 	uberdata_t *udp = self->ul_uberdata;
773 	ulwp_t *replace = NULL;
774 
775 	if (__td_event_report(self, TD_DEATH, udp)) {
776 		self->ul_td_evbuf.eventnum = TD_DEATH;
777 		tdb_event(TD_DEATH, udp);
778 	}
779 
780 	ASSERT(self->ul_sigdefer != 0);
781 
782 	lmutex_lock(&udp->link_lock);
783 	udp->nthreads--;
784 	if (self->ul_usropts & THR_NEW_LWP)
785 		thr_concurrency--;
786 	if (self->ul_usropts & THR_DAEMON)
787 		udp->ndaemons--;
788 	else if (udp->nthreads == udp->ndaemons) {
789 		/*
790 		 * We are the last non-daemon thread exiting.
791 		 * Exit the process.  We retain our TSD and TLS so
792 		 * that atexit() application functions can use them.
793 		 */
794 		lmutex_unlock(&udp->link_lock);
795 		exit(0);
796 		thr_panic("_thrp_exit(): exit(0) returned");
797 	}
798 	lmutex_unlock(&udp->link_lock);
799 
800 	tsd_exit();	/* deallocate thread-specific data */
801 	tls_exit();	/* deallocate thread-local storage */
802 
803 	/* block all signals to finish exiting */
804 	block_all_signals(self);
805 	/* also prevent ourself from being suspended */
806 	enter_critical(self);
807 	rwl_free(self);
808 	lmutex_lock(&udp->link_lock);
809 	ulwp_free(self);
810 	(void) ulwp_lock(self, udp);
811 
812 	if (self->ul_mapsiz && !self->ul_detached) {
813 		/*
814 		 * We want to free the stack for reuse but must keep
815 		 * the ulwp_t struct for the benefit of thr_join().
816 		 * For this purpose we allocate a replacement ulwp_t.
817 		 */
818 		if ((replace = udp->ulwp_replace_free) == NULL)
819 			replace = lmalloc(REPLACEMENT_SIZE);
820 		else if ((udp->ulwp_replace_free = replace->ul_next) == NULL)
821 			udp->ulwp_replace_last = NULL;
822 	}
823 
824 	if (udp->all_lwps == self)
825 		udp->all_lwps = self->ul_forw;
826 	if (udp->all_lwps == self)
827 		udp->all_lwps = NULL;
828 	else {
829 		self->ul_forw->ul_back = self->ul_back;
830 		self->ul_back->ul_forw = self->ul_forw;
831 	}
832 	self->ul_forw = self->ul_back = NULL;
833 	/* collect queue lock statistics before marking ourself dead */
834 	record_spin_locks(self);
835 	self->ul_dead = 1;
836 	self->ul_pleasestop = 0;
837 	if (replace != NULL) {
838 		int ix = self->ul_ix;		/* the hash index */
839 		(void) _private_memcpy(replace, self, REPLACEMENT_SIZE);
840 		replace->ul_self = replace;
841 		replace->ul_next = NULL;	/* clone not on stack list */
842 		replace->ul_mapsiz = 0;		/* allows clone to be freed */
843 		replace->ul_replace = 1;	/* requires clone to be freed */
844 		hash_out_unlocked(self, ix, udp);
845 		hash_in_unlocked(replace, ix, udp);
846 		ASSERT(!(self->ul_detached));
847 		self->ul_detached = 1;		/* this frees the stack */
848 		self->ul_schedctl = NULL;
849 		self->ul_schedctl_called = &udp->uberflags;
850 		set_curthread(self = replace);
851 		/*
852 		 * Having just changed the address of curthread, we
853 		 * must reset the ownership of the locks we hold so
854 		 * that assertions will not fire when we release them.
855 		 */
856 		udp->link_lock.mutex_owner = (uintptr_t)self;
857 		ulwp_mutex(self, udp)->mutex_owner = (uintptr_t)self;
858 		/*
859 		 * NOTE:
860 		 * On i386, %gs still references the original, not the
861 		 * replacement, ulwp structure.  Fetching the replacement
862 		 * curthread pointer via %gs:0 works correctly since the
863 		 * original ulwp structure will not be reallocated until
864 		 * this lwp has completed its lwp_exit() system call (see
865 		 * dead_and_buried()), but from here on out, we must make
866 		 * no references to %gs:<offset> other than %gs:0.
867 		 */
868 	}
869 	/*
870 	 * Put non-detached terminated threads in the all_zombies list.
871 	 */
872 	if (!self->ul_detached) {
873 		udp->nzombies++;
874 		if (udp->all_zombies == NULL) {
875 			ASSERT(udp->nzombies == 1);
876 			udp->all_zombies = self->ul_forw = self->ul_back = self;
877 		} else {
878 			self->ul_forw = udp->all_zombies;
879 			self->ul_back = udp->all_zombies->ul_back;
880 			self->ul_back->ul_forw = self;
881 			self->ul_forw->ul_back = self;
882 		}
883 	}
884 	/*
885 	 * Notify everyone waiting for this thread.
886 	 */
887 	ulwp_broadcast(self);
888 	(void) ulwp_unlock(self, udp);
889 	/*
890 	 * Prevent any more references to the schedctl data.
891 	 * We are exiting and continue_fork() may not find us.
892 	 * Do this just before dropping link_lock, since fork
893 	 * serializes on link_lock.
894 	 */
895 	self->ul_schedctl = NULL;
896 	self->ul_schedctl_called = &udp->uberflags;
897 	lmutex_unlock(&udp->link_lock);
898 
899 	ASSERT(self->ul_critical == 1);
900 	ASSERT(self->ul_preempt == 0);
901 	_lwp_terminate();	/* never returns */
902 	thr_panic("_thrp_exit(): _lwp_terminate() returned");
903 }
904 
905 void
906 collect_queue_statistics()
907 {
908 	uberdata_t *udp = curthread->ul_uberdata;
909 	ulwp_t *ulwp;
910 
911 	if (thread_queue_dump) {
912 		lmutex_lock(&udp->link_lock);
913 		if ((ulwp = udp->all_lwps) != NULL) {
914 			do {
915 				record_spin_locks(ulwp);
916 			} while ((ulwp = ulwp->ul_forw) != udp->all_lwps);
917 		}
918 		lmutex_unlock(&udp->link_lock);
919 	}
920 }
921 
922 void
923 _thr_exit_common(void *status, int unwind)
924 {
925 	ulwp_t *self = curthread;
926 	int cancelled = (self->ul_cancel_pending && status == PTHREAD_CANCELED);
927 
928 	ASSERT(self->ul_critical == 0 && self->ul_preempt == 0);
929 
930 	/*
931 	 * Disable cancellation and call the special DCE cancellation
932 	 * cleanup hook if it is enabled.  Do nothing else before calling
933 	 * the DCE cancellation cleanup hook; it may call longjmp() and
934 	 * never return here.
935 	 */
936 	self->ul_cancel_disabled = 1;
937 	self->ul_cancel_async = 0;
938 	self->ul_save_async = 0;
939 	self->ul_cancelable = 0;
940 	self->ul_cancel_pending = 0;
941 	if (cancelled && cleanuphndlr != NULL)
942 		(*cleanuphndlr)();
943 
944 	/*
945 	 * Block application signals while we are exiting.
946 	 * We call out to C++, TSD, and TLS destructors while exiting
947 	 * and these are application-defined, so we cannot be assured
948 	 * that they won't reset the signal mask.  We use sigoff() to
949 	 * defer any signals that may be received as a result of this
950 	 * bad behavior.  Such signals will be lost to the process
951 	 * when the thread finishes exiting.
952 	 */
953 	(void) _thr_sigsetmask(SIG_SETMASK, &maskset, NULL);
954 	sigoff(self);
955 
956 	self->ul_rval = status;
957 
958 	/*
959 	 * If thr_exit is being called from the places where
960 	 * C++ destructors are to be called such as cancellation
961 	 * points, then set this flag. It is checked in _t_cancel()
962 	 * to decide whether _ex_unwind() is to be called or not.
963 	 */
964 	if (unwind)
965 		self->ul_unwind = 1;
966 
967 	/*
968 	 * _thrp_unwind() will eventually call _thrp_exit().
969 	 * It never returns.
970 	 */
971 	_thrp_unwind(NULL);
972 	thr_panic("_thr_exit_common(): _thrp_unwind() returned");
973 }
974 
975 /*
976  * Called when a thread returns from its start function.
977  * We are at the top of the stack; no unwinding is necessary.
978  */
979 void
980 _thr_terminate(void *status)
981 {
982 	_thr_exit_common(status, 0);
983 }
984 
985 #pragma weak thr_exit = _thr_exit
986 #pragma weak pthread_exit = _thr_exit
987 #pragma weak _pthread_exit = _thr_exit
988 void
989 _thr_exit(void *status)
990 {
991 	_thr_exit_common(status, 1);
992 }
993 
994 int
995 _thrp_join(thread_t tid, thread_t *departed, void **status, int do_cancel)
996 {
997 	uberdata_t *udp = curthread->ul_uberdata;
998 	mutex_t *mp;
999 	void *rval;
1000 	thread_t found;
1001 	ulwp_t *ulwp;
1002 	ulwp_t **ulwpp;
1003 	int replace;
1004 	int error;
1005 
1006 	if (do_cancel)
1007 		error = lwp_wait(tid, &found);
1008 	else {
1009 		while ((error = __lwp_wait(tid, &found)) == EINTR)
1010 			;
1011 	}
1012 	if (error)
1013 		return (error);
1014 
1015 	/*
1016 	 * We must hold link_lock to avoid a race condition with find_stack().
1017 	 */
1018 	lmutex_lock(&udp->link_lock);
1019 	if ((ulwpp = find_lwpp(found)) == NULL) {
1020 		/*
1021 		 * lwp_wait() found an lwp that the library doesn't know
1022 		 * about.  It must have been created with _lwp_create().
1023 		 * Just return its lwpid; we can't know its status.
1024 		 */
1025 		lmutex_unlock(&udp->link_lock);
1026 		rval = NULL;
1027 	} else {
1028 		/*
1029 		 * Remove ulwp from the hash table.
1030 		 */
1031 		ulwp = *ulwpp;
1032 		*ulwpp = ulwp->ul_hash;
1033 		ulwp->ul_hash = NULL;
1034 		/*
1035 		 * Remove ulwp from all_zombies list.
1036 		 */
1037 		ASSERT(udp->nzombies >= 1);
1038 		if (udp->all_zombies == ulwp)
1039 			udp->all_zombies = ulwp->ul_forw;
1040 		if (udp->all_zombies == ulwp)
1041 			udp->all_zombies = NULL;
1042 		else {
1043 			ulwp->ul_forw->ul_back = ulwp->ul_back;
1044 			ulwp->ul_back->ul_forw = ulwp->ul_forw;
1045 		}
1046 		ulwp->ul_forw = ulwp->ul_back = NULL;
1047 		udp->nzombies--;
1048 		ASSERT(ulwp->ul_dead && !ulwp->ul_detached &&
1049 			!(ulwp->ul_usropts & (THR_DETACHED|THR_DAEMON)));
1050 		/*
1051 		 * We can't call ulwp_unlock(ulwp) after we set
1052 		 * ulwp->ul_ix = -1 so we have to get a pointer to the
1053 		 * ulwp's hash table mutex now in order to unlock it below.
1054 		 */
1055 		mp = ulwp_mutex(ulwp, udp);
1056 		ulwp->ul_lwpid = (lwpid_t)(-1);
1057 		ulwp->ul_ix = -1;
1058 		rval = ulwp->ul_rval;
1059 		replace = ulwp->ul_replace;
1060 		lmutex_unlock(mp);
1061 		if (replace) {
1062 			ulwp->ul_next = NULL;
1063 			if (udp->ulwp_replace_free == NULL)
1064 				udp->ulwp_replace_free =
1065 					udp->ulwp_replace_last = ulwp;
1066 			else {
1067 				udp->ulwp_replace_last->ul_next = ulwp;
1068 				udp->ulwp_replace_last = ulwp;
1069 			}
1070 		}
1071 		lmutex_unlock(&udp->link_lock);
1072 	}
1073 
1074 	if (departed != NULL)
1075 		*departed = found;
1076 	if (status != NULL)
1077 		*status = rval;
1078 	return (0);
1079 }
1080 
1081 #pragma weak thr_join = _thr_join
1082 int
1083 _thr_join(thread_t tid, thread_t *departed, void **status)
1084 {
1085 	int error = _thrp_join(tid, departed, status, 1);
1086 	return ((error == EINVAL)? ESRCH : error);
1087 }
1088 
1089 /*
1090  * pthread_join() differs from Solaris thr_join():
1091  * It does not return the departed thread's id
1092  * and hence does not have a "departed" argument.
1093  * It returns EINVAL if tid refers to a detached thread.
1094  */
1095 #pragma weak pthread_join = _pthread_join
1096 int
1097 _pthread_join(pthread_t tid, void **status)
1098 {
1099 	return ((tid == 0)? ESRCH : _thrp_join(tid, NULL, status, 1));
1100 }
1101 
1102 #pragma weak pthread_detach = _thr_detach
1103 #pragma weak _pthread_detach = _thr_detach
1104 int
1105 _thr_detach(thread_t tid)
1106 {
1107 	uberdata_t *udp = curthread->ul_uberdata;
1108 	ulwp_t *ulwp;
1109 	ulwp_t **ulwpp;
1110 	int error = 0;
1111 
1112 	if ((ulwpp = find_lwpp(tid)) == NULL)
1113 		return (ESRCH);
1114 	ulwp = *ulwpp;
1115 
1116 	if (ulwp->ul_dead) {
1117 		ulwp_unlock(ulwp, udp);
1118 		error = _thrp_join(tid, NULL, NULL, 0);
1119 	} else {
1120 		error = __lwp_detach(tid);
1121 		ulwp->ul_detached = 1;
1122 		ulwp->ul_usropts |= THR_DETACHED;
1123 		ulwp_unlock(ulwp, udp);
1124 	}
1125 	return (error);
1126 }
1127 
1128 /*
1129  * Static local string compare function to avoid calling strncmp()
1130  * (and hence the dynamic linker) during library initialization.
1131  */
1132 static int
1133 sncmp(const char *s1, const char *s2, size_t n)
1134 {
1135 	n++;
1136 	while (--n != 0 && *s1 == *s2++)
1137 		if (*s1++ == '\0')
1138 			return (0);
1139 	return (n == 0 ? 0 : *(uchar_t *)s1 - *(uchar_t *)--s2);
1140 }
1141 
1142 static const char *
1143 ematch(const char *ev, const char *match)
1144 {
1145 	int c;
1146 
1147 	while ((c = *match++) != '\0') {
1148 		if (*ev++ != c)
1149 			return (NULL);
1150 	}
1151 	if (*ev++ != '=')
1152 		return (NULL);
1153 	return (ev);
1154 }
1155 
1156 static int
1157 envvar(const char *ev, const char *match, int limit)
1158 {
1159 	int val = -1;
1160 	const char *ename;
1161 
1162 	if ((ename = ematch(ev, match)) != NULL) {
1163 		int c;
1164 		for (val = 0; (c = *ename) != '\0'; ename++) {
1165 			if (!isdigit(c)) {
1166 				val = -1;
1167 				break;
1168 			}
1169 			val = val * 10 + (c - '0');
1170 			if (val > limit) {
1171 				val = limit;
1172 				break;
1173 			}
1174 		}
1175 	}
1176 	return (val);
1177 }
1178 
1179 static void
1180 etest(const char *ev)
1181 {
1182 	int value;
1183 
1184 	if ((value = envvar(ev, "QUEUE_SPIN", 1000000)) >= 0)
1185 		thread_queue_spin = value;
1186 	if ((value = envvar(ev, "ADAPTIVE_SPIN", 1000000)) >= 0) {
1187 		thread_adaptive_spin = value;
1188 		thread_release_spin = (value + 1) / 2;
1189 	}
1190 	if ((value = envvar(ev, "RELEASE_SPIN", 1000000)) >= 0)
1191 		thread_release_spin = value;
1192 	if ((value = envvar(ev, "MAX_SPINNERS", 100)) >= 0)
1193 		thread_max_spinners = value;
1194 	if ((value = envvar(ev, "QUEUE_FIFO", 8)) >= 0)
1195 		thread_queue_fifo = value;
1196 #if defined(THREAD_DEBUG)
1197 	if ((value = envvar(ev, "QUEUE_VERIFY", 1)) >= 0)
1198 		thread_queue_verify = value;
1199 #endif
1200 	if ((value = envvar(ev, "QUEUE_DUMP", 1)) >= 0)
1201 		thread_queue_dump = value;
1202 	if ((value = envvar(ev, "STACK_CACHE", 10000)) >= 0)
1203 		thread_stack_cache = value;
1204 	if ((value = envvar(ev, "COND_WAIT_DEFER", 1)) >= 0)
1205 		thread_cond_wait_defer = value;
1206 	if ((value = envvar(ev, "ERROR_DETECTION", 2)) >= 0)
1207 		thread_error_detection = value;
1208 	if ((value = envvar(ev, "ASYNC_SAFE", 1)) >= 0)
1209 		thread_async_safe = value;
1210 	if ((value = envvar(ev, "DOOR_NORESERVE", 1)) >= 0)
1211 		thread_door_noreserve = value;
1212 }
1213 
1214 /*
1215  * Look for and evaluate environment variables of the form "_THREAD_*".
1216  * For compatibility with the past, we also look for environment
1217  * names of the form "LIBTHREAD_*".
1218  */
1219 static void
1220 set_thread_vars()
1221 {
1222 	extern const char **_environ;
1223 	const char **pev;
1224 	const char *ev;
1225 	char c;
1226 
1227 	if ((pev = _environ) == NULL)
1228 		return;
1229 	while ((ev = *pev++) != NULL) {
1230 		c = *ev;
1231 		if (c == '_' && sncmp(ev, "_THREAD_", 8) == 0)
1232 			etest(ev + 8);
1233 		if (c == 'L' && sncmp(ev, "LIBTHREAD_", 10) == 0)
1234 			etest(ev + 10);
1235 	}
1236 }
1237 
1238 /* PROBE_SUPPORT begin */
1239 #pragma weak __tnf_probe_notify
1240 extern void __tnf_probe_notify(void);
1241 /* PROBE_SUPPORT end */
1242 
1243 /* same as atexit() but private to the library */
1244 extern int _atexit(void (*)(void));
1245 
1246 /* same as _cleanup() but private to the library */
1247 extern void __cleanup(void);
1248 
1249 extern void atfork_init(void);
1250 
1251 #ifdef __amd64
1252 extern void __amd64id(void);
1253 #endif
1254 
1255 /*
1256  * libc_init() is called by ld.so.1 for library initialization.
1257  * We perform minimal initialization; enough to work with the main thread.
1258  */
1259 void
1260 libc_init(void)
1261 {
1262 	uberdata_t *udp = &__uberdata;
1263 	ulwp_t *oldself = __curthread();
1264 	ucontext_t uc;
1265 	ulwp_t *self;
1266 	struct rlimit rl;
1267 	caddr_t data;
1268 	size_t tls_size;
1269 	int setmask;
1270 
1271 	/*
1272 	 * For the initial stage of initialization, we must be careful
1273 	 * not to call any function that could possibly call _cerror().
1274 	 * For this purpose, we call only the raw system call wrappers.
1275 	 */
1276 
1277 #ifdef __amd64
1278 	/*
1279 	 * Gather information about cache layouts for optimized
1280 	 * AMD assembler strfoo() and memfoo() functions.
1281 	 */
1282 	__amd64id();
1283 #endif
1284 
1285 	/*
1286 	 * Every libc, regardless of which link map, must register __cleanup().
1287 	 */
1288 	(void) _atexit(__cleanup);
1289 
1290 	/*
1291 	 * We keep our uberdata on one of (a) the first alternate link map
1292 	 * or (b) the primary link map.  We switch to the primary link map
1293 	 * and stay there once we see it.  All intermediate link maps are
1294 	 * subject to being unloaded at any time.
1295 	 */
1296 	if (oldself != NULL && (oldself->ul_primarymap || !primary_link_map)) {
1297 		__tdb_bootstrap = oldself->ul_uberdata->tdb_bootstrap;
1298 		mutex_setup();
1299 		atfork_init();	/* every link map needs atfork() processing */
1300 		return;
1301 	}
1302 
1303 	/*
1304 	 * To establish the main stack information, we have to get our context.
1305 	 * This is also convenient to use for getting our signal mask.
1306 	 */
1307 	uc.uc_flags = UC_ALL;
1308 	(void) __getcontext_syscall(&uc);
1309 	ASSERT(uc.uc_link == NULL);
1310 
1311 	tls_size = roundup64(udp->tls_metadata.static_tls.tls_size);
1312 	ASSERT(primary_link_map || tls_size == 0);
1313 	data = lmalloc(sizeof (ulwp_t) + tls_size);
1314 	if (data == NULL)
1315 		thr_panic("cannot allocate thread structure for main thread");
1316 	/* LINTED pointer cast may result in improper alignment */
1317 	self = (ulwp_t *)(data + tls_size);
1318 	init_hash_table[0].hash_bucket = self;
1319 
1320 	self->ul_sigmask = uc.uc_sigmask;
1321 	delete_reserved_signals(&self->ul_sigmask);
1322 	/*
1323 	 * Are the old and new sets different?
1324 	 * (This can happen if we are currently blocking SIGCANCEL.)
1325 	 * If so, we must explicitly set our signal mask, below.
1326 	 */
1327 	setmask =
1328 	    ((self->ul_sigmask.__sigbits[0] ^ uc.uc_sigmask.__sigbits[0]) |
1329 	    (self->ul_sigmask.__sigbits[1] ^ uc.uc_sigmask.__sigbits[1]));
1330 
1331 #ifdef __sparc
1332 	/*
1333 	 * We cache several instructions in the thread structure for use
1334 	 * by the fasttrap DTrace provider. When changing this, read the
1335 	 * comment in fasttrap.h for the all the other places that must
1336 	 * be changed.
1337 	 */
1338 	self->ul_dsave = 0x9de04000;	/* save %g1, %g0, %sp */
1339 	self->ul_drestore = 0x81e80000;	/* restore %g0, %g0, %g0 */
1340 	self->ul_dftret = 0x91d0203a;	/* ta 0x3a */
1341 	self->ul_dreturn = 0x81ca0000;	/* return %o0 */
1342 #endif
1343 
1344 	self->ul_stktop =
1345 		(uintptr_t)uc.uc_stack.ss_sp + uc.uc_stack.ss_size;
1346 	(void) _private_getrlimit(RLIMIT_STACK, &rl);
1347 	self->ul_stksiz = rl.rlim_cur;
1348 	self->ul_stk = (caddr_t)(self->ul_stktop - self->ul_stksiz);
1349 
1350 	self->ul_forw = self->ul_back = self;
1351 	self->ul_hash = NULL;
1352 	self->ul_ix = 0;
1353 	self->ul_lwpid = 1; /* __lwp_self() */
1354 	self->ul_main = 1;
1355 	self->ul_self = self;
1356 	self->ul_uberdata = udp;
1357 	if (oldself != NULL) {
1358 		int i;
1359 
1360 		ASSERT(primary_link_map);
1361 		ASSERT(oldself->ul_main == 1);
1362 		self->ul_stsd = oldself->ul_stsd;
1363 		for (i = 0; i < TSD_NFAST; i++)
1364 			self->ul_ftsd[i] = oldself->ul_ftsd[i];
1365 		self->ul_tls = oldself->ul_tls;
1366 		/*
1367 		 * Retrieve all pointers to uberdata allocated
1368 		 * while running on previous link maps.
1369 		 * We would like to do a structure assignment here, but
1370 		 * gcc turns structure assignments into calls to memcpy(),
1371 		 * a function exported from libc.  We can't call any such
1372 		 * external functions until we establish curthread, below,
1373 		 * so we just call our private version of memcpy().
1374 		 */
1375 		(void) _private_memcpy(udp,
1376 		    oldself->ul_uberdata, sizeof (*udp));
1377 		/*
1378 		 * These items point to global data on the primary link map.
1379 		 */
1380 		udp->thr_hash_table = init_hash_table;
1381 		udp->sigacthandler = sigacthandler;
1382 		udp->tdb.tdb_events = tdb_events;
1383 		ASSERT(udp->nthreads == 1 && !udp->uberflags.uf_mt);
1384 		ASSERT(udp->lwp_stacks == NULL);
1385 		ASSERT(udp->ulwp_freelist == NULL);
1386 		ASSERT(udp->ulwp_replace_free == NULL);
1387 		ASSERT(udp->hash_size == 1);
1388 	}
1389 	udp->all_lwps = self;
1390 	udp->ulwp_one = self;
1391 	udp->pid = _private_getpid();
1392 	udp->nthreads = 1;
1393 	/*
1394 	 * In every link map, tdb_bootstrap points to the same piece of
1395 	 * allocated memory.  When the primary link map is initialized,
1396 	 * the allocated memory is assigned a pointer to the one true
1397 	 * uberdata.  This allows libc_db to initialize itself regardless
1398 	 * of which instance of libc it finds in the address space.
1399 	 */
1400 	if (udp->tdb_bootstrap == NULL)
1401 		udp->tdb_bootstrap = lmalloc(sizeof (uberdata_t *));
1402 	__tdb_bootstrap = udp->tdb_bootstrap;
1403 	if (primary_link_map) {
1404 		self->ul_primarymap = 1;
1405 		udp->primary_map = 1;
1406 		*udp->tdb_bootstrap = udp;
1407 	}
1408 	/*
1409 	 * Cancellation can't happen until:
1410 	 *	pthread_cancel() is called
1411 	 * or:
1412 	 *	another thread is created
1413 	 * For now, as a single-threaded process, set the flag that tells
1414 	 * PROLOGUE/EPILOGUE (in scalls.c) that cancellation can't happen.
1415 	 */
1416 	self->ul_nocancel = 1;
1417 
1418 #if defined(__amd64)
1419 	(void) ___lwp_private(_LWP_SETPRIVATE, _LWP_FSBASE, self);
1420 #elif defined(__i386)
1421 	(void) ___lwp_private(_LWP_SETPRIVATE, _LWP_GSBASE, self);
1422 #endif	/* __i386 || __amd64 */
1423 	set_curthread(self);		/* redundant on i386 */
1424 	/*
1425 	 * Now curthread is established and it is safe to call any
1426 	 * function in libc except one that uses thread-local storage.
1427 	 */
1428 	self->ul_errnop = &errno;
1429 	if (oldself != NULL) {
1430 		/* tls_size was zero when oldself was allocated */
1431 		lfree(oldself, sizeof (ulwp_t));
1432 	}
1433 	mutex_setup();
1434 	atfork_init();
1435 	signal_init();
1436 
1437 	/*
1438 	 * If the stack is unlimited, we set the size to zero to disable
1439 	 * stack checking.
1440 	 * XXX: Work harder here.  Get the stack size from /proc/self/rmap
1441 	 */
1442 	if (self->ul_stksiz == RLIM_INFINITY) {
1443 		self->ul_ustack.ss_sp = (void *)self->ul_stktop;
1444 		self->ul_ustack.ss_size = 0;
1445 	} else {
1446 		self->ul_ustack.ss_sp = self->ul_stk;
1447 		self->ul_ustack.ss_size = self->ul_stksiz;
1448 	}
1449 	self->ul_ustack.ss_flags = 0;
1450 	(void) _private_setustack(&self->ul_ustack);
1451 
1452 	/*
1453 	 * Get the variables that affect thread behavior from the environment.
1454 	 */
1455 	set_thread_vars();
1456 	udp->uberflags.uf_thread_error_detection = (char)thread_error_detection;
1457 	udp->thread_stack_cache = thread_stack_cache;
1458 
1459 	/*
1460 	 * Make per-thread copies of global variables, for speed.
1461 	 */
1462 	self->ul_queue_fifo = (char)thread_queue_fifo;
1463 	self->ul_cond_wait_defer = (char)thread_cond_wait_defer;
1464 	self->ul_error_detection = (char)thread_error_detection;
1465 	self->ul_async_safe = (char)thread_async_safe;
1466 	self->ul_door_noreserve = (char)thread_door_noreserve;
1467 	self->ul_max_spinners = (uchar_t)thread_max_spinners;
1468 	self->ul_adaptive_spin = thread_adaptive_spin;
1469 	self->ul_release_spin = thread_release_spin;
1470 	self->ul_queue_spin = thread_queue_spin;
1471 
1472 	/*
1473 	 * When we have initialized the primary link map, inform
1474 	 * the dynamic linker about our interface functions.
1475 	 */
1476 	if (self->ul_primarymap)
1477 		_ld_libc((void *)rtld_funcs);
1478 
1479 	/*
1480 	 * Defer signals until TLS constructors have been called.
1481 	 */
1482 	sigoff(self);
1483 	tls_setup();
1484 	sigon(self);
1485 	if (setmask)
1486 		(void) restore_signals(self);
1487 
1488 	/* PROBE_SUPPORT begin */
1489 	if (self->ul_primarymap && __tnf_probe_notify != NULL)
1490 		__tnf_probe_notify();
1491 	/* PROBE_SUPPORT end */
1492 
1493 	init_sigev_thread();
1494 	init_aio();
1495 
1496 	/*
1497 	 * We need to reset __threaded dynamically at runtime, so that
1498 	 * __threaded can be bound to __threaded outside libc which may not
1499 	 * have initial value of 1 (without a copy relocation in a.out).
1500 	 */
1501 	__threaded = 1;
1502 }
1503 
1504 #pragma fini(libc_fini)
1505 void
1506 libc_fini()
1507 {
1508 	/*
1509 	 * If we are doing fini processing for the instance of libc
1510 	 * on the first alternate link map (this happens only when
1511 	 * the dynamic linker rejects a bad audit library), then clear
1512 	 * __curthread().  We abandon whatever memory was allocated by
1513 	 * lmalloc() while running on this alternate link-map but we
1514 	 * don't care (and can't find the memory in any case); we just
1515 	 * want to protect the application from this bad audit library.
1516 	 * No fini processing is done by libc in the normal case.
1517 	 */
1518 
1519 	uberdata_t *udp = curthread->ul_uberdata;
1520 
1521 	if (udp->primary_map == 0 && udp == &__uberdata)
1522 		set_curthread(NULL);
1523 }
1524 
1525 /*
1526  * finish_init is called when we are about to become multi-threaded,
1527  * that is, on the first call to thr_create().
1528  */
1529 void
1530 finish_init()
1531 {
1532 	ulwp_t *self = curthread;
1533 	uberdata_t *udp = self->ul_uberdata;
1534 	thr_hash_table_t *htp;
1535 	void *data;
1536 	int i;
1537 
1538 	/*
1539 	 * No locks needed here; we are single-threaded on the first call.
1540 	 * We can be called only after the primary link map has been set up.
1541 	 */
1542 	ASSERT(self->ul_primarymap);
1543 	ASSERT(self == udp->ulwp_one);
1544 	ASSERT(!udp->uberflags.uf_mt);
1545 	ASSERT(udp->hash_size == 1);
1546 
1547 	/*
1548 	 * First allocate the queue_head array if not already allocated.
1549 	 */
1550 	if (udp->queue_head == NULL)
1551 		queue_alloc();
1552 
1553 	/*
1554 	 * Now allocate the thread hash table.
1555 	 */
1556 	if ((data = _private_mmap(NULL, HASHTBLSZ * sizeof (thr_hash_table_t),
1557 	    PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, (off_t)0))
1558 	    == MAP_FAILED)
1559 		thr_panic("cannot allocate thread hash table");
1560 
1561 	udp->thr_hash_table = htp = (thr_hash_table_t *)data;
1562 	udp->hash_size = HASHTBLSZ;
1563 	udp->hash_mask = HASHTBLSZ - 1;
1564 
1565 	for (i = 0; i < HASHTBLSZ; i++, htp++) {
1566 		htp->hash_lock.mutex_magic = MUTEX_MAGIC;
1567 		htp->hash_cond.cond_magic = COND_MAGIC;
1568 	}
1569 	hash_in_unlocked(self, TIDHASH(self->ul_lwpid, udp), udp);
1570 
1571 	/*
1572 	 * Set up the SIGCANCEL handler for threads cancellation.
1573 	 */
1574 	setup_cancelsig(SIGCANCEL);
1575 
1576 	/*
1577 	 * Arrange to do special things on exit --
1578 	 * - collect queue statistics from all remaining active threads.
1579 	 * - grab assert_lock to ensure that assertion failures
1580 	 *   and a core dump take precedence over _exit().
1581 	 * - dump queue statistics to stderr if _THREAD_QUEUE_DUMP is set.
1582 	 * (Functions are called in the reverse order of their registration.)
1583 	 */
1584 	(void) _atexit(dump_queue_statistics);
1585 	(void) _atexit(grab_assert_lock);
1586 	(void) _atexit(collect_queue_statistics);
1587 }
1588 
1589 /*
1590  * Used only by postfork1_child(), below.
1591  */
1592 static void
1593 mark_dead_and_buried(ulwp_t *ulwp)
1594 {
1595 	ulwp->ul_dead = 1;
1596 	ulwp->ul_lwpid = (lwpid_t)(-1);
1597 	ulwp->ul_hash = NULL;
1598 	ulwp->ul_ix = -1;
1599 	ulwp->ul_schedctl = NULL;
1600 	ulwp->ul_schedctl_called = NULL;
1601 }
1602 
1603 /*
1604  * This is called from fork1() in the child.
1605  * Reset our data structures to reflect one lwp.
1606  */
1607 void
1608 postfork1_child()
1609 {
1610 	ulwp_t *self = curthread;
1611 	uberdata_t *udp = self->ul_uberdata;
1612 	ulwp_t *next;
1613 	ulwp_t *ulwp;
1614 	int i;
1615 
1616 	/* daemon threads shouldn't call fork1(), but oh well... */
1617 	self->ul_usropts &= ~THR_DAEMON;
1618 	udp->nthreads = 1;
1619 	udp->ndaemons = 0;
1620 	udp->uberflags.uf_mt = 0;
1621 	__libc_threaded = 0;
1622 	for (i = 0; i < udp->hash_size; i++)
1623 		udp->thr_hash_table[i].hash_bucket = NULL;
1624 	self->ul_lwpid = __lwp_self();
1625 	hash_in_unlocked(self, TIDHASH(self->ul_lwpid, udp), udp);
1626 
1627 	/* no one in the child is on a sleep queue; reinitialize */
1628 	if (udp->queue_head) {
1629 		(void) _private_memset(udp->queue_head, 0,
1630 			2 * QHASHSIZE * sizeof (queue_head_t));
1631 		for (i = 0; i < 2 * QHASHSIZE; i++)
1632 			udp->queue_head[i].qh_lock.mutex_magic = MUTEX_MAGIC;
1633 	}
1634 
1635 	/*
1636 	 * All lwps except ourself are gone.  Mark them so.
1637 	 * First mark all of the lwps that have already been freed.
1638 	 * Then mark and free all of the active lwps except ourself.
1639 	 * Since we are single-threaded, no locks are required here.
1640 	 */
1641 	for (ulwp = udp->lwp_stacks; ulwp != NULL; ulwp = ulwp->ul_next)
1642 		mark_dead_and_buried(ulwp);
1643 	for (ulwp = udp->ulwp_freelist; ulwp != NULL; ulwp = ulwp->ul_next)
1644 		mark_dead_and_buried(ulwp);
1645 	for (ulwp = self->ul_forw; ulwp != self; ulwp = next) {
1646 		next = ulwp->ul_forw;
1647 		ulwp->ul_forw = ulwp->ul_back = NULL;
1648 		mark_dead_and_buried(ulwp);
1649 		tsd_free(ulwp);
1650 		tls_free(ulwp);
1651 		rwl_free(ulwp);
1652 		ulwp_free(ulwp);
1653 	}
1654 	self->ul_forw = self->ul_back = udp->all_lwps = self;
1655 	if (self != udp->ulwp_one)
1656 		mark_dead_and_buried(udp->ulwp_one);
1657 	if ((ulwp = udp->all_zombies) != NULL) {
1658 		ASSERT(udp->nzombies != 0);
1659 		do {
1660 			next = ulwp->ul_forw;
1661 			ulwp->ul_forw = ulwp->ul_back = NULL;
1662 			mark_dead_and_buried(ulwp);
1663 			udp->nzombies--;
1664 			if (ulwp->ul_replace) {
1665 				ulwp->ul_next = NULL;
1666 				if (udp->ulwp_replace_free == NULL) {
1667 					udp->ulwp_replace_free =
1668 						udp->ulwp_replace_last = ulwp;
1669 				} else {
1670 					udp->ulwp_replace_last->ul_next = ulwp;
1671 					udp->ulwp_replace_last = ulwp;
1672 				}
1673 			}
1674 		} while ((ulwp = next) != udp->all_zombies);
1675 		ASSERT(udp->nzombies == 0);
1676 		udp->all_zombies = NULL;
1677 		udp->nzombies = 0;
1678 	}
1679 	trim_stack_cache(0);
1680 
1681 	/*
1682 	 * Do post-fork1 processing for subsystems that need it.
1683 	 */
1684 	postfork1_child_tpool();
1685 	postfork1_child_sigev_aio();
1686 	postfork1_child_sigev_mq();
1687 	postfork1_child_sigev_timer();
1688 	postfork1_child_aio();
1689 }
1690 
1691 #pragma weak thr_setprio = _thr_setprio
1692 #pragma weak pthread_setschedprio = _thr_setprio
1693 #pragma weak _pthread_setschedprio = _thr_setprio
1694 int
1695 _thr_setprio(thread_t tid, int priority)
1696 {
1697 	struct sched_param param;
1698 
1699 	(void) _memset(&param, 0, sizeof (param));
1700 	param.sched_priority = priority;
1701 	return (_thread_setschedparam_main(tid, 0, &param, PRIO_SET_PRIO));
1702 }
1703 
1704 #pragma weak thr_getprio = _thr_getprio
1705 int
1706 _thr_getprio(thread_t tid, int *priority)
1707 {
1708 	uberdata_t *udp = curthread->ul_uberdata;
1709 	ulwp_t *ulwp;
1710 	int error = 0;
1711 
1712 	if ((ulwp = find_lwp(tid)) == NULL)
1713 		error = ESRCH;
1714 	else {
1715 		*priority = ulwp->ul_pri;
1716 		ulwp_unlock(ulwp, udp);
1717 	}
1718 	return (error);
1719 }
1720 
1721 lwpid_t
1722 lwp_self(void)
1723 {
1724 	return (curthread->ul_lwpid);
1725 }
1726 
1727 #pragma weak _ti_thr_self = _thr_self
1728 #pragma weak thr_self = _thr_self
1729 #pragma weak pthread_self = _thr_self
1730 #pragma weak _pthread_self = _thr_self
1731 thread_t
1732 _thr_self()
1733 {
1734 	return (curthread->ul_lwpid);
1735 }
1736 
1737 #pragma weak thr_main = _thr_main
1738 int
1739 _thr_main()
1740 {
1741 	ulwp_t *self = __curthread();
1742 
1743 	return ((self == NULL)? -1 : self->ul_main);
1744 }
1745 
1746 int
1747 _thrp_cancelled(void)
1748 {
1749 	return (curthread->ul_rval == PTHREAD_CANCELED);
1750 }
1751 
1752 int
1753 _thrp_stksegment(ulwp_t *ulwp, stack_t *stk)
1754 {
1755 	stk->ss_sp = (void *)ulwp->ul_stktop;
1756 	stk->ss_size = ulwp->ul_stksiz;
1757 	stk->ss_flags = 0;
1758 	return (0);
1759 }
1760 
1761 #pragma weak thr_stksegment = _thr_stksegment
1762 int
1763 _thr_stksegment(stack_t *stk)
1764 {
1765 	return (_thrp_stksegment(curthread, stk));
1766 }
1767 
1768 void
1769 force_continue(ulwp_t *ulwp)
1770 {
1771 #if defined(THREAD_DEBUG)
1772 	ulwp_t *self = curthread;
1773 	uberdata_t *udp = self->ul_uberdata;
1774 #endif
1775 	int error;
1776 	timespec_t ts;
1777 
1778 	ASSERT(MUTEX_OWNED(ulwp_mutex(ulwp, udp), self));
1779 
1780 	for (;;) {
1781 		error = __lwp_continue(ulwp->ul_lwpid);
1782 		if (error != 0 && error != EINTR)
1783 			break;
1784 		error = 0;
1785 		if (ulwp->ul_stopping) {	/* he is stopping himself */
1786 			ts.tv_sec = 0;		/* give him a chance to run */
1787 			ts.tv_nsec = 100000;	/* 100 usecs or clock tick */
1788 			(void) __nanosleep(&ts, NULL);
1789 		}
1790 		if (!ulwp->ul_stopping)		/* he is running now */
1791 			break;			/* so we are done */
1792 		/*
1793 		 * He is marked as being in the process of stopping
1794 		 * himself.  Loop around and continue him again.
1795 		 * He may not have been stopped the first time.
1796 		 */
1797 	}
1798 }
1799 
1800 /*
1801  * Suspend an lwp with lwp_suspend(), then move it to a safe
1802  * point, that is, to a point where ul_critical is zero.
1803  * On return, the ulwp_lock() is dropped as with ulwp_unlock().
1804  * If 'link_dropped' is non-NULL, then 'link_lock' is held on entry.
1805  * If we have to drop link_lock, we store 1 through link_dropped.
1806  * If the lwp exits before it can be suspended, we return ESRCH.
1807  */
1808 int
1809 safe_suspend(ulwp_t *ulwp, uchar_t whystopped, int *link_dropped)
1810 {
1811 	ulwp_t *self = curthread;
1812 	uberdata_t *udp = self->ul_uberdata;
1813 	cond_t *cvp = ulwp_condvar(ulwp, udp);
1814 	mutex_t *mp = ulwp_mutex(ulwp, udp);
1815 	thread_t tid = ulwp->ul_lwpid;
1816 	int ix = ulwp->ul_ix;
1817 	int error = 0;
1818 
1819 	ASSERT(whystopped == TSTP_REGULAR ||
1820 	    whystopped == TSTP_MUTATOR ||
1821 	    whystopped == TSTP_FORK);
1822 	ASSERT(ulwp != self);
1823 	ASSERT(!ulwp->ul_stop);
1824 	ASSERT(MUTEX_OWNED(mp, self));
1825 
1826 	if (link_dropped != NULL)
1827 		*link_dropped = 0;
1828 
1829 	/*
1830 	 * We must grab the target's spin lock before suspending it.
1831 	 * See the comments below and in _thrp_suspend() for why.
1832 	 */
1833 	spin_lock_set(&ulwp->ul_spinlock);
1834 	(void) ___lwp_suspend(tid);
1835 	spin_lock_clear(&ulwp->ul_spinlock);
1836 
1837 top:
1838 	if (ulwp->ul_critical == 0 || ulwp->ul_stopping) {
1839 		/* thread is already safe */
1840 		ulwp->ul_stop |= whystopped;
1841 	} else {
1842 		/*
1843 		 * Setting ul_pleasestop causes the target thread to stop
1844 		 * itself in _thrp_suspend(), below, after we drop its lock.
1845 		 * We must continue the critical thread before dropping
1846 		 * link_lock because the critical thread may be holding
1847 		 * the queue lock for link_lock.  This is delicate.
1848 		 */
1849 		ulwp->ul_pleasestop |= whystopped;
1850 		force_continue(ulwp);
1851 		if (link_dropped != NULL) {
1852 			*link_dropped = 1;
1853 			lmutex_unlock(&udp->link_lock);
1854 			/* be sure to drop link_lock only once */
1855 			link_dropped = NULL;
1856 		}
1857 
1858 		/*
1859 		 * The thread may disappear by calling thr_exit() so we
1860 		 * cannot rely on the ulwp pointer after dropping the lock.
1861 		 * Instead, we search the hash table to find it again.
1862 		 * When we return, we may find that the thread has been
1863 		 * continued by some other thread.  The suspend/continue
1864 		 * interfaces are prone to such race conditions by design.
1865 		 */
1866 		while (ulwp && !ulwp->ul_dead && !ulwp->ul_stop &&
1867 		    (ulwp->ul_pleasestop & whystopped)) {
1868 			(void) _cond_wait(cvp, mp);
1869 			for (ulwp = udp->thr_hash_table[ix].hash_bucket;
1870 			    ulwp != NULL; ulwp = ulwp->ul_hash) {
1871 				if (ulwp->ul_lwpid == tid)
1872 					break;
1873 			}
1874 		}
1875 
1876 		if (ulwp == NULL || ulwp->ul_dead)
1877 			error = ESRCH;
1878 		else {
1879 			/*
1880 			 * Do another lwp_suspend() to make sure we don't
1881 			 * return until the target thread is fully stopped
1882 			 * in the kernel.  Don't apply lwp_suspend() until
1883 			 * we know that the target is not holding any
1884 			 * queue locks, that is, that it has completed
1885 			 * ulwp_unlock(self) and has, or at least is
1886 			 * about to, call lwp_suspend() on itself.  We do
1887 			 * this by grabbing the target's spin lock.
1888 			 */
1889 			ASSERT(ulwp->ul_lwpid == tid);
1890 			spin_lock_set(&ulwp->ul_spinlock);
1891 			(void) ___lwp_suspend(tid);
1892 			spin_lock_clear(&ulwp->ul_spinlock);
1893 			/*
1894 			 * If some other thread did a thr_continue()
1895 			 * on the target thread we have to start over.
1896 			 */
1897 			if (!ulwp->ul_stopping || !(ulwp->ul_stop & whystopped))
1898 				goto top;
1899 		}
1900 	}
1901 
1902 	(void) cond_broadcast_internal(cvp);
1903 	lmutex_unlock(mp);
1904 	return (error);
1905 }
1906 
1907 int
1908 _thrp_suspend(thread_t tid, uchar_t whystopped)
1909 {
1910 	ulwp_t *self = curthread;
1911 	uberdata_t *udp = self->ul_uberdata;
1912 	ulwp_t *ulwp;
1913 	int error = 0;
1914 
1915 	ASSERT((whystopped & (TSTP_REGULAR|TSTP_MUTATOR|TSTP_FORK)) != 0);
1916 	ASSERT((whystopped & ~(TSTP_REGULAR|TSTP_MUTATOR|TSTP_FORK)) == 0);
1917 
1918 	/*
1919 	 * We can't suspend anyone except ourself while a fork is happening.
1920 	 * This also has the effect of allowing only one suspension at a time.
1921 	 */
1922 	if (tid != self->ul_lwpid)
1923 		(void) fork_lock_enter(NULL);
1924 
1925 	if ((ulwp = find_lwp(tid)) == NULL)
1926 		error = ESRCH;
1927 	else if (whystopped == TSTP_MUTATOR && !ulwp->ul_mutator) {
1928 		ulwp_unlock(ulwp, udp);
1929 		error = EINVAL;
1930 	} else if (ulwp->ul_stop) {	/* already stopped */
1931 		ulwp->ul_stop |= whystopped;
1932 		ulwp_broadcast(ulwp);
1933 		ulwp_unlock(ulwp, udp);
1934 	} else if (ulwp != self) {
1935 		/*
1936 		 * After suspending the other thread, move it out of a
1937 		 * critical section and deal with the schedctl mappings.
1938 		 * safe_suspend() suspends the other thread, calls
1939 		 * ulwp_broadcast(ulwp) and drops the ulwp lock.
1940 		 */
1941 		error = safe_suspend(ulwp, whystopped, NULL);
1942 	} else {
1943 		int schedctl_after_fork = 0;
1944 
1945 		/*
1946 		 * We are suspending ourself.  We must not take a signal
1947 		 * until we return from lwp_suspend() and clear ul_stopping.
1948 		 * This is to guard against siglongjmp().
1949 		 */
1950 		enter_critical(self);
1951 		self->ul_sp = stkptr();
1952 		_flush_windows();	/* sparc */
1953 		self->ul_pleasestop = 0;
1954 		self->ul_stop |= whystopped;
1955 		/*
1956 		 * Grab our spin lock before dropping ulwp_mutex(self).
1957 		 * This prevents the suspending thread from applying
1958 		 * lwp_suspend() to us before we emerge from
1959 		 * lmutex_unlock(mp) and have dropped mp's queue lock.
1960 		 */
1961 		spin_lock_set(&self->ul_spinlock);
1962 		self->ul_stopping = 1;
1963 		ulwp_broadcast(self);
1964 		ulwp_unlock(self, udp);
1965 		/*
1966 		 * From this point until we return from lwp_suspend(),
1967 		 * we must not call any function that might invoke the
1968 		 * dynamic linker, that is, we can only call functions
1969 		 * private to the library.
1970 		 *
1971 		 * Also, this is a nasty race condition for a process
1972 		 * that is undergoing a forkall() operation:
1973 		 * Once we clear our spinlock (below), we are vulnerable
1974 		 * to being suspended by the forkall() thread before
1975 		 * we manage to suspend ourself in ___lwp_suspend().
1976 		 * See safe_suspend() and force_continue().
1977 		 *
1978 		 * To avoid a SIGSEGV due to the disappearance
1979 		 * of the schedctl mappings in the child process,
1980 		 * which can happen in spin_lock_clear() if we
1981 		 * are suspended while we are in the middle of
1982 		 * its call to preempt(), we preemptively clear
1983 		 * our own schedctl pointer before dropping our
1984 		 * spinlock.  We reinstate it, in both the parent
1985 		 * and (if this really is a forkall()) the child.
1986 		 */
1987 		if (whystopped & TSTP_FORK) {
1988 			schedctl_after_fork = 1;
1989 			self->ul_schedctl = NULL;
1990 			self->ul_schedctl_called = &udp->uberflags;
1991 		}
1992 		spin_lock_clear(&self->ul_spinlock);
1993 		(void) ___lwp_suspend(tid);
1994 		/*
1995 		 * Somebody else continued us.
1996 		 * We can't grab ulwp_lock(self)
1997 		 * until after clearing ul_stopping.
1998 		 * force_continue() relies on this.
1999 		 */
2000 		self->ul_stopping = 0;
2001 		self->ul_sp = 0;
2002 		if (schedctl_after_fork) {
2003 			self->ul_schedctl_called = NULL;
2004 			self->ul_schedctl = NULL;
2005 			(void) setup_schedctl();
2006 		}
2007 		ulwp_lock(self, udp);
2008 		ulwp_broadcast(self);
2009 		ulwp_unlock(self, udp);
2010 		exit_critical(self);
2011 	}
2012 
2013 	if (tid != self->ul_lwpid)
2014 		fork_lock_exit();
2015 
2016 	return (error);
2017 }
2018 
2019 /*
2020  * Suspend all lwps other than ourself in preparation for fork.
2021  */
2022 void
2023 suspend_fork()
2024 {
2025 	ulwp_t *self = curthread;
2026 	uberdata_t *udp = self->ul_uberdata;
2027 	ulwp_t *ulwp;
2028 	int link_dropped;
2029 
2030 top:
2031 	lmutex_lock(&udp->link_lock);
2032 
2033 	for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) {
2034 		ulwp_lock(ulwp, udp);
2035 		if (ulwp->ul_stop) {	/* already stopped */
2036 			ulwp->ul_stop |= TSTP_FORK;
2037 			ulwp_broadcast(ulwp);
2038 			ulwp_unlock(ulwp, udp);
2039 		} else {
2040 			/*
2041 			 * Move the stopped lwp out of a critical section.
2042 			 */
2043 			if (safe_suspend(ulwp, TSTP_FORK, &link_dropped) ||
2044 			    link_dropped)
2045 				goto top;
2046 		}
2047 	}
2048 
2049 	lmutex_unlock(&udp->link_lock);
2050 }
2051 
2052 void
2053 continue_fork(int child)
2054 {
2055 	ulwp_t *self = curthread;
2056 	uberdata_t *udp = self->ul_uberdata;
2057 	ulwp_t *ulwp;
2058 
2059 	/*
2060 	 * Clear the schedctl pointers in the child of forkall().
2061 	 */
2062 	if (child) {
2063 		for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) {
2064 			ulwp->ul_schedctl_called =
2065 				ulwp->ul_dead? &udp->uberflags : NULL;
2066 			ulwp->ul_schedctl = NULL;
2067 		}
2068 	}
2069 
2070 	/*
2071 	 * Set all lwps that were stopped for fork() running again.
2072 	 */
2073 	lmutex_lock(&udp->link_lock);
2074 	for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) {
2075 		mutex_t *mp = ulwp_mutex(ulwp, udp);
2076 		lmutex_lock(mp);
2077 		ASSERT(ulwp->ul_stop & TSTP_FORK);
2078 		ulwp->ul_stop &= ~TSTP_FORK;
2079 		ulwp_broadcast(ulwp);
2080 		if (!ulwp->ul_stop)
2081 			force_continue(ulwp);
2082 		lmutex_unlock(mp);
2083 	}
2084 	lmutex_unlock(&udp->link_lock);
2085 }
2086 
2087 int
2088 _thrp_continue(thread_t tid, uchar_t whystopped)
2089 {
2090 	uberdata_t *udp = curthread->ul_uberdata;
2091 	ulwp_t *ulwp;
2092 	mutex_t *mp;
2093 	int error = 0;
2094 
2095 	ASSERT(whystopped == TSTP_REGULAR ||
2096 	    whystopped == TSTP_MUTATOR);
2097 
2098 	if ((ulwp = find_lwp(tid)) == NULL)
2099 		return (ESRCH);
2100 
2101 	mp = ulwp_mutex(ulwp, udp);
2102 	if ((whystopped == TSTP_MUTATOR && !ulwp->ul_mutator)) {
2103 		error = EINVAL;
2104 	} else if (ulwp->ul_stop & whystopped) {
2105 		ulwp->ul_stop &= ~whystopped;
2106 		ulwp_broadcast(ulwp);
2107 		if (!ulwp->ul_stop) {
2108 			if (whystopped == TSTP_REGULAR && ulwp->ul_created) {
2109 				ulwp->ul_sp = 0;
2110 				ulwp->ul_created = 0;
2111 			}
2112 			force_continue(ulwp);
2113 		}
2114 	}
2115 
2116 	lmutex_unlock(mp);
2117 	return (error);
2118 }
2119 
2120 #pragma weak thr_suspend = _thr_suspend
2121 int
2122 _thr_suspend(thread_t tid)
2123 {
2124 	return (_thrp_suspend(tid, TSTP_REGULAR));
2125 }
2126 
2127 #pragma weak thr_continue = _thr_continue
2128 int
2129 _thr_continue(thread_t tid)
2130 {
2131 	return (_thrp_continue(tid, TSTP_REGULAR));
2132 }
2133 
2134 #pragma weak thr_yield = _thr_yield
2135 void
2136 _thr_yield()
2137 {
2138 	lwp_yield();
2139 }
2140 
2141 #pragma weak thr_kill = _thr_kill
2142 #pragma weak pthread_kill = _thr_kill
2143 #pragma weak _pthread_kill = _thr_kill
2144 int
2145 _thr_kill(thread_t tid, int sig)
2146 {
2147 	if (sig == SIGCANCEL)
2148 		return (EINVAL);
2149 	return (__lwp_kill(tid, sig));
2150 }
2151 
2152 /*
2153  * Exit a critical section, take deferred actions if necessary.
2154  */
2155 void
2156 do_exit_critical()
2157 {
2158 	ulwp_t *self = curthread;
2159 	int sig;
2160 
2161 	ASSERT(self->ul_critical == 0);
2162 	if (self->ul_dead)
2163 		return;
2164 
2165 	while (self->ul_pleasestop ||
2166 	    (self->ul_cursig != 0 && self->ul_sigdefer == 0)) {
2167 		/*
2168 		 * Avoid a recursive call to exit_critical() in _thrp_suspend()
2169 		 * by keeping self->ul_critical == 1 here.
2170 		 */
2171 		self->ul_critical++;
2172 		while (self->ul_pleasestop) {
2173 			/*
2174 			 * Guard against suspending ourself while on a sleep
2175 			 * queue.  See the comments in call_user_handler().
2176 			 */
2177 			unsleep_self();
2178 			set_parking_flag(self, 0);
2179 			(void) _thrp_suspend(self->ul_lwpid,
2180 				self->ul_pleasestop);
2181 		}
2182 		self->ul_critical--;
2183 
2184 		if ((sig = self->ul_cursig) != 0 && self->ul_sigdefer == 0) {
2185 			/*
2186 			 * Clear ul_cursig before proceeding.
2187 			 * This protects us from the dynamic linker's
2188 			 * calls to bind_guard()/bind_clear() in the
2189 			 * event that it is invoked to resolve a symbol
2190 			 * like take_deferred_signal() below.
2191 			 */
2192 			self->ul_cursig = 0;
2193 			take_deferred_signal(sig);
2194 			ASSERT(self->ul_cursig == 0);
2195 		}
2196 	}
2197 	ASSERT(self->ul_critical == 0);
2198 }
2199 
2200 int
2201 _ti_bind_guard(int bindflag)
2202 {
2203 	ulwp_t *self = curthread;
2204 
2205 	if ((self->ul_bindflags & bindflag) == bindflag)
2206 		return (0);
2207 	enter_critical(self);
2208 	self->ul_bindflags |= bindflag;
2209 	return (1);
2210 }
2211 
2212 int
2213 _ti_bind_clear(int bindflag)
2214 {
2215 	ulwp_t *self = curthread;
2216 
2217 	if ((self->ul_bindflags & bindflag) == 0)
2218 		return (self->ul_bindflags);
2219 	self->ul_bindflags &= ~bindflag;
2220 	exit_critical(self);
2221 	return (self->ul_bindflags);
2222 }
2223 
2224 /*
2225  * sigoff() and sigon() enable cond_wait() to behave (optionally) like
2226  * it does in the old libthread (see the comments in cond_wait_queue()).
2227  * Also, signals are deferred at thread startup until TLS constructors
2228  * have all been called, at which time _thr_setup() calls sigon().
2229  *
2230  * _sigoff() and _sigon() are external consolidation-private interfaces to
2231  * sigoff() and sigon(), respectively, in libc.  These are used in libnsl.
2232  * Also, _sigoff() and _sigon() are called from dbx's run-time checking
2233  * (librtc.so) to defer signals during its critical sections (not to be
2234  * confused with libc critical sections [see exit_critical() above]).
2235  */
2236 void
2237 _sigoff(void)
2238 {
2239 	sigoff(curthread);
2240 }
2241 
2242 void
2243 _sigon(void)
2244 {
2245 	sigon(curthread);
2246 }
2247 
2248 void
2249 sigon(ulwp_t *self)
2250 {
2251 	int sig;
2252 
2253 	ASSERT(self->ul_sigdefer > 0);
2254 	if (--self->ul_sigdefer == 0) {
2255 		if ((sig = self->ul_cursig) != 0 && self->ul_critical == 0) {
2256 			self->ul_cursig = 0;
2257 			take_deferred_signal(sig);
2258 			ASSERT(self->ul_cursig == 0);
2259 		}
2260 	}
2261 }
2262 
2263 #pragma weak thr_getconcurrency = _thr_getconcurrency
2264 int
2265 _thr_getconcurrency()
2266 {
2267 	return (thr_concurrency);
2268 }
2269 
2270 #pragma weak pthread_getconcurrency = _pthread_getconcurrency
2271 int
2272 _pthread_getconcurrency()
2273 {
2274 	return (pthread_concurrency);
2275 }
2276 
2277 #pragma weak thr_setconcurrency = _thr_setconcurrency
2278 int
2279 _thr_setconcurrency(int new_level)
2280 {
2281 	uberdata_t *udp = curthread->ul_uberdata;
2282 
2283 	if (new_level < 0)
2284 		return (EINVAL);
2285 	if (new_level > 65536)		/* 65536 is totally arbitrary */
2286 		return (EAGAIN);
2287 	lmutex_lock(&udp->link_lock);
2288 	if (new_level > thr_concurrency)
2289 		thr_concurrency = new_level;
2290 	lmutex_unlock(&udp->link_lock);
2291 	return (0);
2292 }
2293 
2294 #pragma weak pthread_setconcurrency = _pthread_setconcurrency
2295 int
2296 _pthread_setconcurrency(int new_level)
2297 {
2298 	if (new_level < 0)
2299 		return (EINVAL);
2300 	if (new_level > 65536)		/* 65536 is totally arbitrary */
2301 		return (EAGAIN);
2302 	pthread_concurrency = new_level;
2303 	return (0);
2304 }
2305 
2306 #pragma weak thr_min_stack = _thr_min_stack
2307 #pragma weak __pthread_min_stack = _thr_min_stack
2308 size_t
2309 _thr_min_stack(void)
2310 {
2311 	return (MINSTACK);
2312 }
2313 
2314 int
2315 __nthreads(void)
2316 {
2317 	return (curthread->ul_uberdata->nthreads);
2318 }
2319 
2320 /*
2321  * XXX
2322  * The remainder of this file implements the private interfaces to java for
2323  * garbage collection.  It is no longer used, at least by java 1.2.
2324  * It can all go away once all old JVMs have disappeared.
2325  */
2326 
2327 int	suspendingallmutators;	/* when non-zero, suspending all mutators. */
2328 int	suspendedallmutators;	/* when non-zero, all mutators suspended. */
2329 int	mutatorsbarrier;	/* when non-zero, mutators barrier imposed. */
2330 mutex_t	mutatorslock = DEFAULTMUTEX;	/* used to enforce mutators barrier. */
2331 cond_t	mutatorscv = DEFAULTCV;		/* where non-mutators sleep. */
2332 
2333 /*
2334  * Get the available register state for the target thread.
2335  * Return non-volatile registers: TRS_NONVOLATILE
2336  */
2337 #pragma weak thr_getstate = _thr_getstate
2338 int
2339 _thr_getstate(thread_t tid, int *flag, lwpid_t *lwp, stack_t *ss, gregset_t rs)
2340 {
2341 	ulwp_t *self = curthread;
2342 	uberdata_t *udp = self->ul_uberdata;
2343 	ulwp_t **ulwpp;
2344 	ulwp_t *ulwp;
2345 	int error = 0;
2346 	int trs_flag = TRS_LWPID;
2347 
2348 	if (tid == 0 || self->ul_lwpid == tid) {
2349 		ulwp = self;
2350 		ulwp_lock(ulwp, udp);
2351 	} else if ((ulwpp = find_lwpp(tid)) != NULL) {
2352 		ulwp = *ulwpp;
2353 	} else {
2354 		if (flag)
2355 			*flag = TRS_INVALID;
2356 		return (ESRCH);
2357 	}
2358 
2359 	if (ulwp->ul_dead) {
2360 		trs_flag = TRS_INVALID;
2361 	} else if (!ulwp->ul_stop && !suspendedallmutators) {
2362 		error = EINVAL;
2363 		trs_flag = TRS_INVALID;
2364 	} else if (ulwp->ul_stop) {
2365 		trs_flag = TRS_NONVOLATILE;
2366 		getgregs(ulwp, rs);
2367 	}
2368 
2369 	if (flag)
2370 		*flag = trs_flag;
2371 	if (lwp)
2372 		*lwp = tid;
2373 	if (ss != NULL)
2374 		(void) _thrp_stksegment(ulwp, ss);
2375 
2376 	ulwp_unlock(ulwp, udp);
2377 	return (error);
2378 }
2379 
2380 /*
2381  * Set the appropriate register state for the target thread.
2382  * This is not used by java.  It exists solely for the MSTC test suite.
2383  */
2384 #pragma weak thr_setstate = _thr_setstate
2385 int
2386 _thr_setstate(thread_t tid, int flag, gregset_t rs)
2387 {
2388 	uberdata_t *udp = curthread->ul_uberdata;
2389 	ulwp_t *ulwp;
2390 	int error = 0;
2391 
2392 	if ((ulwp = find_lwp(tid)) == NULL)
2393 		return (ESRCH);
2394 
2395 	if (!ulwp->ul_stop && !suspendedallmutators)
2396 		error = EINVAL;
2397 	else if (rs != NULL) {
2398 		switch (flag) {
2399 		case TRS_NONVOLATILE:
2400 			/* do /proc stuff here? */
2401 			if (ulwp->ul_stop)
2402 				setgregs(ulwp, rs);
2403 			else
2404 				error = EINVAL;
2405 			break;
2406 		case TRS_LWPID:		/* do /proc stuff here? */
2407 		default:
2408 			error = EINVAL;
2409 			break;
2410 		}
2411 	}
2412 
2413 	ulwp_unlock(ulwp, udp);
2414 	return (error);
2415 }
2416 
2417 int
2418 getlwpstatus(thread_t tid, struct lwpstatus *sp)
2419 {
2420 	extern ssize_t _pread(int, void *, size_t, off_t);
2421 	char buf[100];
2422 	int fd;
2423 
2424 	/* "/proc/self/lwp/%u/lwpstatus" w/o stdio */
2425 	(void) strcpy(buf, "/proc/self/lwp/");
2426 	ultos((uint64_t)tid, 10, buf + strlen(buf));
2427 	(void) strcat(buf, "/lwpstatus");
2428 	if ((fd = _open(buf, O_RDONLY, 0)) >= 0) {
2429 		while (_pread(fd, sp, sizeof (*sp), 0) == sizeof (*sp)) {
2430 			if (sp->pr_flags & PR_STOPPED) {
2431 				(void) _close(fd);
2432 				return (0);
2433 			}
2434 			lwp_yield();	/* give him a chance to stop */
2435 		}
2436 		(void) _close(fd);
2437 	}
2438 	return (-1);
2439 }
2440 
2441 int
2442 putlwpregs(thread_t tid, prgregset_t prp)
2443 {
2444 	extern ssize_t _writev(int, const struct iovec *, int);
2445 	char buf[100];
2446 	int fd;
2447 	long dstop_sreg[2];
2448 	long run_null[2];
2449 	iovec_t iov[3];
2450 
2451 	/* "/proc/self/lwp/%u/lwpctl" w/o stdio */
2452 	(void) strcpy(buf, "/proc/self/lwp/");
2453 	ultos((uint64_t)tid, 10, buf + strlen(buf));
2454 	(void) strcat(buf, "/lwpctl");
2455 	if ((fd = _open(buf, O_WRONLY, 0)) >= 0) {
2456 		dstop_sreg[0] = PCDSTOP;	/* direct it to stop */
2457 		dstop_sreg[1] = PCSREG;		/* set the registers */
2458 		iov[0].iov_base = (caddr_t)dstop_sreg;
2459 		iov[0].iov_len = sizeof (dstop_sreg);
2460 		iov[1].iov_base = (caddr_t)prp;	/* from the register set */
2461 		iov[1].iov_len = sizeof (prgregset_t);
2462 		run_null[0] = PCRUN;		/* make it runnable again */
2463 		run_null[1] = 0;
2464 		iov[2].iov_base = (caddr_t)run_null;
2465 		iov[2].iov_len = sizeof (run_null);
2466 		if (_writev(fd, iov, 3) >= 0) {
2467 			(void) _close(fd);
2468 			return (0);
2469 		}
2470 		(void) _close(fd);
2471 	}
2472 	return (-1);
2473 }
2474 
2475 static ulong_t
2476 gettsp_slow(thread_t tid)
2477 {
2478 	char buf[100];
2479 	struct lwpstatus status;
2480 
2481 	if (getlwpstatus(tid, &status) != 0) {
2482 		/* "__gettsp(%u): can't read lwpstatus" w/o stdio */
2483 		(void) strcpy(buf, "__gettsp(");
2484 		ultos((uint64_t)tid, 10, buf + strlen(buf));
2485 		(void) strcat(buf, "): can't read lwpstatus");
2486 		thr_panic(buf);
2487 	}
2488 	return (status.pr_reg[R_SP]);
2489 }
2490 
2491 ulong_t
2492 __gettsp(thread_t tid)
2493 {
2494 	uberdata_t *udp = curthread->ul_uberdata;
2495 	ulwp_t *ulwp;
2496 	ulong_t result;
2497 
2498 	if ((ulwp = find_lwp(tid)) == NULL)
2499 		return (0);
2500 
2501 	if (ulwp->ul_stop && (result = ulwp->ul_sp) != 0) {
2502 		ulwp_unlock(ulwp, udp);
2503 		return (result);
2504 	}
2505 
2506 	result = gettsp_slow(tid);
2507 	ulwp_unlock(ulwp, udp);
2508 	return (result);
2509 }
2510 
2511 /*
2512  * This tells java stack walkers how to find the ucontext
2513  * structure passed to signal handlers.
2514  */
2515 #pragma weak thr_sighndlrinfo = _thr_sighndlrinfo
2516 void
2517 _thr_sighndlrinfo(void (**func)(), int *funcsize)
2518 {
2519 	*func = &__sighndlr;
2520 	*funcsize = (char *)&__sighndlrend - (char *)&__sighndlr;
2521 }
2522 
2523 /*
2524  * Mark a thread a mutator or reset a mutator to being a default,
2525  * non-mutator thread.
2526  */
2527 #pragma weak thr_setmutator = _thr_setmutator
2528 int
2529 _thr_setmutator(thread_t tid, int enabled)
2530 {
2531 	ulwp_t *self = curthread;
2532 	uberdata_t *udp = self->ul_uberdata;
2533 	ulwp_t *ulwp;
2534 	int error;
2535 
2536 	enabled = enabled?1:0;
2537 top:
2538 	if (tid == 0) {
2539 		ulwp = self;
2540 		ulwp_lock(ulwp, udp);
2541 	} else if ((ulwp = find_lwp(tid)) == NULL) {
2542 		return (ESRCH);
2543 	}
2544 
2545 	/*
2546 	 * The target thread should be the caller itself or a suspended thread.
2547 	 * This prevents the target from also changing its ul_mutator field.
2548 	 */
2549 	error = 0;
2550 	if (ulwp != self && !ulwp->ul_stop && enabled)
2551 		error = EINVAL;
2552 	else if (ulwp->ul_mutator != enabled) {
2553 		lmutex_lock(&mutatorslock);
2554 		if (mutatorsbarrier) {
2555 			ulwp_unlock(ulwp, udp);
2556 			while (mutatorsbarrier)
2557 				(void) _cond_wait(&mutatorscv, &mutatorslock);
2558 			lmutex_unlock(&mutatorslock);
2559 			goto top;
2560 		}
2561 		ulwp->ul_mutator = enabled;
2562 		lmutex_unlock(&mutatorslock);
2563 	}
2564 
2565 	ulwp_unlock(ulwp, udp);
2566 	return (error);
2567 }
2568 
2569 /*
2570  * Establish a barrier against new mutators.  Any non-mutator trying
2571  * to become a mutator is suspended until the barrier is removed.
2572  */
2573 #pragma weak thr_mutators_barrier = _thr_mutators_barrier
2574 void
2575 _thr_mutators_barrier(int enabled)
2576 {
2577 	int oldvalue;
2578 
2579 	lmutex_lock(&mutatorslock);
2580 
2581 	/*
2582 	 * Wait if trying to set the barrier while it is already set.
2583 	 */
2584 	while (mutatorsbarrier && enabled)
2585 		(void) _cond_wait(&mutatorscv, &mutatorslock);
2586 
2587 	oldvalue = mutatorsbarrier;
2588 	mutatorsbarrier = enabled;
2589 	/*
2590 	 * Wakeup any blocked non-mutators when barrier is removed.
2591 	 */
2592 	if (oldvalue && !enabled)
2593 		(void) cond_broadcast_internal(&mutatorscv);
2594 	lmutex_unlock(&mutatorslock);
2595 }
2596 
2597 /*
2598  * Suspend the set of all mutators except for the caller.  The list
2599  * of actively running threads is searched and only the mutators
2600  * in this list are suspended.  Actively running non-mutators remain
2601  * running.  Any other thread is suspended.
2602  */
2603 #pragma weak thr_suspend_allmutators = _thr_suspend_allmutators
2604 int
2605 _thr_suspend_allmutators(void)
2606 {
2607 	ulwp_t *self = curthread;
2608 	uberdata_t *udp = self->ul_uberdata;
2609 	ulwp_t *ulwp;
2610 	int link_dropped;
2611 
2612 	/*
2613 	 * We single-thread the entire thread suspend mechanism.
2614 	 */
2615 	(void) fork_lock_enter(NULL);
2616 top:
2617 	lmutex_lock(&udp->link_lock);
2618 
2619 	if (suspendingallmutators || suspendedallmutators) {
2620 		lmutex_unlock(&udp->link_lock);
2621 		fork_lock_exit();
2622 		return (EINVAL);
2623 	}
2624 	suspendingallmutators = 1;
2625 
2626 	for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) {
2627 		ulwp_lock(ulwp, udp);
2628 		if (!ulwp->ul_mutator) {
2629 			ulwp_unlock(ulwp, udp);
2630 		} else if (ulwp->ul_stop) {	/* already stopped */
2631 			ulwp->ul_stop |= TSTP_MUTATOR;
2632 			ulwp_broadcast(ulwp);
2633 			ulwp_unlock(ulwp, udp);
2634 		} else {
2635 			/*
2636 			 * Move the stopped lwp out of a critical section.
2637 			 */
2638 			if (safe_suspend(ulwp, TSTP_MUTATOR, &link_dropped) ||
2639 			    link_dropped) {
2640 				suspendingallmutators = 0;
2641 				goto top;
2642 			}
2643 		}
2644 	}
2645 
2646 	suspendedallmutators = 1;
2647 	suspendingallmutators = 0;
2648 	lmutex_unlock(&udp->link_lock);
2649 	fork_lock_exit();
2650 	return (0);
2651 }
2652 
2653 /*
2654  * Suspend the target mutator.  The caller is permitted to suspend
2655  * itself.  If a mutator barrier is enabled, the caller will suspend
2656  * itself as though it had been suspended by thr_suspend_allmutators().
2657  * When the barrier is removed, this thread will be resumed.  Any
2658  * suspended mutator, whether suspended by thr_suspend_mutator(), or by
2659  * thr_suspend_allmutators(), can be resumed by thr_continue_mutator().
2660  */
2661 #pragma weak thr_suspend_mutator = _thr_suspend_mutator
2662 int
2663 _thr_suspend_mutator(thread_t tid)
2664 {
2665 	if (tid == 0)
2666 		tid = curthread->ul_lwpid;
2667 	return (_thrp_suspend(tid, TSTP_MUTATOR));
2668 }
2669 
2670 /*
2671  * Resume the set of all suspended mutators.
2672  */
2673 #pragma weak thr_continue_allmutators = _thr_continue_allmutators
2674 int
2675 _thr_continue_allmutators()
2676 {
2677 	ulwp_t *self = curthread;
2678 	uberdata_t *udp = self->ul_uberdata;
2679 	ulwp_t *ulwp;
2680 
2681 	lmutex_lock(&udp->link_lock);
2682 	if (!suspendedallmutators) {
2683 		lmutex_unlock(&udp->link_lock);
2684 		return (EINVAL);
2685 	}
2686 	suspendedallmutators = 0;
2687 
2688 	for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) {
2689 		mutex_t *mp = ulwp_mutex(ulwp, udp);
2690 		lmutex_lock(mp);
2691 		if (ulwp->ul_stop & TSTP_MUTATOR) {
2692 			ulwp->ul_stop &= ~TSTP_MUTATOR;
2693 			ulwp_broadcast(ulwp);
2694 			if (!ulwp->ul_stop)
2695 				force_continue(ulwp);
2696 		}
2697 		lmutex_unlock(mp);
2698 	}
2699 
2700 	lmutex_unlock(&udp->link_lock);
2701 	return (0);
2702 }
2703 
2704 /*
2705  * Resume a suspended mutator.
2706  */
2707 #pragma weak thr_continue_mutator = _thr_continue_mutator
2708 int
2709 _thr_continue_mutator(thread_t tid)
2710 {
2711 	return (_thrp_continue(tid, TSTP_MUTATOR));
2712 }
2713 
2714 #pragma weak thr_wait_mutator = _thr_wait_mutator
2715 int
2716 _thr_wait_mutator(thread_t tid, int dontwait)
2717 {
2718 	uberdata_t *udp = curthread->ul_uberdata;
2719 	ulwp_t *ulwp;
2720 	int error = 0;
2721 
2722 top:
2723 	if ((ulwp = find_lwp(tid)) == NULL)
2724 		return (ESRCH);
2725 
2726 	if (!ulwp->ul_mutator)
2727 		error = EINVAL;
2728 	else if (dontwait) {
2729 		if (!(ulwp->ul_stop & TSTP_MUTATOR))
2730 			error = EWOULDBLOCK;
2731 	} else if (!(ulwp->ul_stop & TSTP_MUTATOR)) {
2732 		cond_t *cvp = ulwp_condvar(ulwp, udp);
2733 		mutex_t *mp = ulwp_mutex(ulwp, udp);
2734 
2735 		(void) _cond_wait(cvp, mp);
2736 		(void) lmutex_unlock(mp);
2737 		goto top;
2738 	}
2739 
2740 	ulwp_unlock(ulwp, udp);
2741 	return (error);
2742 }
2743 
2744 /* PROBE_SUPPORT begin */
2745 
2746 void
2747 thr_probe_setup(void *data)
2748 {
2749 	curthread->ul_tpdp = data;
2750 }
2751 
2752 static void *
2753 _thread_probe_getfunc()
2754 {
2755 	return (curthread->ul_tpdp);
2756 }
2757 
2758 void * (*thr_probe_getfunc_addr)(void) = _thread_probe_getfunc;
2759 
2760 /* ARGSUSED */
2761 void
2762 _resume(ulwp_t *ulwp, caddr_t sp, int dontsave)
2763 {
2764 	/* never called */
2765 }
2766 
2767 /* ARGSUSED */
2768 void
2769 _resume_ret(ulwp_t *oldlwp)
2770 {
2771 	/* never called */
2772 }
2773 
2774 /* PROBE_SUPPORT end */
2775