xref: /freebsd/lib/libthr/thread/thr_private.h (revision 243e928310d073338c5ec089f0dce238a80b9866)
1 /*
2  * Copyright (C) 2005 Daniel M. Eischen <deischen@freebsd.org>
3  * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
4  * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>.
5  *
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 #ifndef _THR_PRIVATE_H
33 #define _THR_PRIVATE_H
34 
35 /*
36  * Include files.
37  */
38 #include <sys/types.h>
39 #include <sys/time.h>
40 #include <sys/cdefs.h>
41 #include <sys/queue.h>
42 #include <sys/param.h>
43 #include <sys/cpuset.h>
44 #include <machine/atomic.h>
45 #include <errno.h>
46 #include <limits.h>
47 #include <signal.h>
48 #include <stdbool.h>
49 #include <stddef.h>
50 #include <stdio.h>
51 #include <unistd.h>
52 #include <ucontext.h>
53 #include <sys/thr.h>
54 #include <pthread.h>
55 
56 #define	SYM_FB10(sym)			__CONCAT(sym, _fb10)
57 #define	SYM_FBP10(sym)			__CONCAT(sym, _fbp10)
58 #define	WEAK_REF(sym, alias)		__weak_reference(sym, alias)
59 #define	SYM_COMPAT(sym, impl, ver)	__sym_compat(sym, impl, ver)
60 #define	SYM_DEFAULT(sym, impl, ver)	__sym_default(sym, impl, ver)
61 
62 #define	FB10_COMPAT(func, sym)				\
63 	WEAK_REF(func, SYM_FB10(sym));			\
64 	SYM_COMPAT(sym, SYM_FB10(sym), FBSD_1.0)
65 
66 #define	FB10_COMPAT_PRIVATE(func, sym)			\
67 	WEAK_REF(func, SYM_FBP10(sym));			\
68 	SYM_DEFAULT(sym, SYM_FBP10(sym), FBSDprivate_1.0)
69 
70 #include "pthread_md.h"
71 #include "thr_umtx.h"
72 #include "thread_db.h"
73 
74 #ifdef _PTHREAD_FORCED_UNWIND
75 #define _BSD_SOURCE
76 #include <unwind.h>
77 #endif
78 
79 typedef TAILQ_HEAD(pthreadlist, pthread) pthreadlist;
80 typedef TAILQ_HEAD(atfork_head, pthread_atfork) atfork_head;
81 TAILQ_HEAD(mutex_queue, pthread_mutex);
82 
83 /* Signal to do cancellation */
84 #define	SIGCANCEL		SIGTHR
85 
86 /*
87  * Kernel fatal error handler macro.
88  */
89 #define PANIC(string)		_thread_exit(__FILE__,__LINE__,string)
90 
91 /* Output debug messages like this: */
92 #define stdout_debug(args...)	_thread_printf(STDOUT_FILENO, ##args)
93 #define stderr_debug(args...)	_thread_printf(STDERR_FILENO, ##args)
94 
95 #ifdef _PTHREADS_INVARIANTS
96 #define THR_ASSERT(cond, msg) do {	\
97 	if (__predict_false(!(cond)))	\
98 		PANIC(msg);		\
99 } while (0)
100 #else
101 #define THR_ASSERT(cond, msg)
102 #endif
103 
104 #ifdef PIC
105 # define STATIC_LIB_REQUIRE(name)
106 #else
107 # define STATIC_LIB_REQUIRE(name) __asm (".globl " #name)
108 #endif
109 
110 #define	TIMESPEC_ADD(dst, src, val)				\
111 	do { 							\
112 		(dst)->tv_sec = (src)->tv_sec + (val)->tv_sec;	\
113 		(dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \
114 		if ((dst)->tv_nsec >= 1000000000) {		\
115 			(dst)->tv_sec++;			\
116 			(dst)->tv_nsec -= 1000000000;		\
117 		}						\
118 	} while (0)
119 
120 #define	TIMESPEC_SUB(dst, src, val)				\
121 	do { 							\
122 		(dst)->tv_sec = (src)->tv_sec - (val)->tv_sec;	\
123 		(dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \
124 		if ((dst)->tv_nsec < 0) {			\
125 			(dst)->tv_sec--;			\
126 			(dst)->tv_nsec += 1000000000;		\
127 		}						\
128 	} while (0)
129 
130 /* Magic cookie set for shared pthread locks and cv's pointers */
131 #define	THR_PSHARED_PTR						\
132     ((void *)(uintptr_t)((1ULL << (NBBY * sizeof(long) - 1)) | 1))
133 
134 /* XXX These values should be same as those defined in pthread.h */
135 #define	THR_MUTEX_INITIALIZER		((struct pthread_mutex *)NULL)
136 #define	THR_ADAPTIVE_MUTEX_INITIALIZER	((struct pthread_mutex *)1)
137 #define	THR_MUTEX_DESTROYED		((struct pthread_mutex *)2)
138 #define	THR_COND_INITIALIZER		((struct pthread_cond *)NULL)
139 #define	THR_COND_DESTROYED		((struct pthread_cond *)1)
140 #define	THR_RWLOCK_INITIALIZER		((struct pthread_rwlock *)NULL)
141 #define	THR_RWLOCK_DESTROYED		((struct pthread_rwlock *)1)
142 
143 #define PMUTEX_FLAG_TYPE_MASK	0x0ff
144 #define PMUTEX_FLAG_PRIVATE	0x100
145 #define PMUTEX_FLAG_DEFERRED	0x200
146 #define PMUTEX_TYPE(mtxflags)	((mtxflags) & PMUTEX_FLAG_TYPE_MASK)
147 
148 #define	PMUTEX_OWNER_ID(m)	((m)->m_lock.m_owner & ~UMUTEX_CONTESTED)
149 
150 #define MAX_DEFER_WAITERS       50
151 
152 /*
153  * Values for pthread_mutex m_ps indicator.
154  */
155 #define	PMUTEX_INITSTAGE_ALLOC	0
156 #define	PMUTEX_INITSTAGE_BUSY	1
157 #define	PMUTEX_INITSTAGE_DONE	2
158 
159 struct pthread_mutex {
160 	/*
161 	 * Lock for accesses to this structure.
162 	 */
163 	struct umutex			m_lock;
164 	int				m_flags;
165 	int				m_count;
166 	int				m_spinloops;
167 	int				m_yieldloops;
168 	int				m_ps;	/* pshared init stage */
169 	/*
170 	 * Link for all mutexes a thread currently owns, of the same
171 	 * prio type.
172 	 */
173 	TAILQ_ENTRY(pthread_mutex)	m_qe;
174 	/* Link for all private mutexes a thread currently owns. */
175 	TAILQ_ENTRY(pthread_mutex)	m_pqe;
176 	struct pthread_mutex		*m_rb_prev;
177 };
178 
179 struct pthread_mutex_attr {
180 	enum pthread_mutextype	m_type;
181 	int			m_protocol;
182 	int			m_ceiling;
183 	int			m_pshared;
184 	int			m_robust;
185 };
186 
187 #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \
188 	{ PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE, \
189 	    PTHREAD_MUTEX_STALLED }
190 
191 struct pthread_cond {
192 	__uint32_t	__has_user_waiters;
193 	__uint32_t	__has_kern_waiters;
194 	__uint32_t	__flags;
195 	__uint32_t	__clock_id;
196 };
197 
198 struct pthread_cond_attr {
199 	int		c_pshared;
200 	int		c_clockid;
201 };
202 
203 struct pthread_barrier {
204 	struct umutex		b_lock;
205 	struct ucond		b_cv;
206 	int64_t			b_cycle;
207 	int			b_count;
208 	int			b_waiters;
209 	int			b_refcount;
210 	int			b_destroying;
211 };
212 
213 struct pthread_barrierattr {
214 	int		pshared;
215 };
216 
217 struct pthread_spinlock {
218 	struct umutex	s_lock;
219 };
220 
221 /*
222  * Flags for condition variables.
223  */
224 #define COND_FLAGS_PRIVATE	0x01
225 #define COND_FLAGS_INITED	0x02
226 #define COND_FLAGS_BUSY		0x04
227 
228 /*
229  * Cleanup definitions.
230  */
231 struct pthread_cleanup {
232 	struct pthread_cleanup	*prev;
233 	void			(*routine)(void *);
234 	void			*routine_arg;
235 	int			onheap;
236 };
237 
238 #define	THR_CLEANUP_PUSH(td, func, arg) {		\
239 	struct pthread_cleanup __cup;			\
240 							\
241 	__cup.routine = func;				\
242 	__cup.routine_arg = arg;			\
243 	__cup.onheap = 0;				\
244 	__cup.prev = (td)->cleanup;			\
245 	(td)->cleanup = &__cup;
246 
247 #define	THR_CLEANUP_POP(td, exec)			\
248 	(td)->cleanup = __cup.prev;			\
249 	if ((exec) != 0)				\
250 		__cup.routine(__cup.routine_arg);	\
251 }
252 
253 struct pthread_atfork {
254 	TAILQ_ENTRY(pthread_atfork) qe;
255 	void (*prepare)(void);
256 	void (*parent)(void);
257 	void (*child)(void);
258 };
259 
260 struct pthread_attr {
261 #define pthread_attr_start_copy	sched_policy
262 	int	sched_policy;
263 	int	sched_inherit;
264 	int	prio;
265 	int	suspend;
266 #define	THR_STACK_USER		0x100	/* 0xFF reserved for <pthread.h> */
267 	int	flags;
268 	void	*stackaddr_attr;
269 	size_t	stacksize_attr;
270 	size_t	guardsize_attr;
271 #define pthread_attr_end_copy	cpuset
272 	cpuset_t	*cpuset;
273 	size_t	cpusetsize;
274 };
275 
276 struct wake_addr {
277 	struct wake_addr *link;
278 	unsigned int	value;
279 	char		pad[12];
280 };
281 
282 struct sleepqueue {
283 	TAILQ_HEAD(, pthread)    sq_blocked;
284 	SLIST_HEAD(, sleepqueue) sq_freeq;
285 	LIST_ENTRY(sleepqueue)   sq_hash;
286 	SLIST_ENTRY(sleepqueue)  sq_flink;
287 	void			 *sq_wchan;
288 	int			 sq_type;
289 };
290 
291 /*
292  * Thread creation state attributes.
293  */
294 #define THR_CREATE_RUNNING		0
295 #define THR_CREATE_SUSPENDED		1
296 
297 /*
298  * Miscellaneous definitions.
299  */
300 #define THR_STACK_DEFAULT		(sizeof(void *) / 4 * 1024 * 1024)
301 
302 /*
303  * Maximum size of initial thread's stack.  This perhaps deserves to be larger
304  * than the stacks of other threads, since many applications are likely to run
305  * almost entirely on this stack.
306  */
307 #define THR_STACK_INITIAL		(THR_STACK_DEFAULT * 2)
308 
309 /*
310  * Define priorities returned by kernel.
311  */
312 #define THR_MIN_PRIORITY		(_thr_priorities[SCHED_OTHER-1].pri_min)
313 #define THR_MAX_PRIORITY		(_thr_priorities[SCHED_OTHER-1].pri_max)
314 #define THR_DEF_PRIORITY		(_thr_priorities[SCHED_OTHER-1].pri_default)
315 
316 #define THR_MIN_RR_PRIORITY		(_thr_priorities[SCHED_RR-1].pri_min)
317 #define THR_MAX_RR_PRIORITY		(_thr_priorities[SCHED_RR-1].pri_max)
318 #define THR_DEF_RR_PRIORITY		(_thr_priorities[SCHED_RR-1].pri_default)
319 
320 /* XXX The SCHED_FIFO should have same priority range as SCHED_RR */
321 #define THR_MIN_FIFO_PRIORITY		(_thr_priorities[SCHED_FIFO_1].pri_min)
322 #define THR_MAX_FIFO_PRIORITY		(_thr_priorities[SCHED_FIFO-1].pri_max)
323 #define THR_DEF_FIFO_PRIORITY		(_thr_priorities[SCHED_FIFO-1].pri_default)
324 
325 struct pthread_prio {
326 	int	pri_min;
327 	int	pri_max;
328 	int	pri_default;
329 };
330 
331 struct pthread_rwlockattr {
332 	int		pshared;
333 };
334 
335 struct pthread_rwlock {
336 	struct urwlock 	lock;
337 	uint32_t	owner;
338 };
339 
340 /*
341  * Thread states.
342  */
343 enum pthread_state {
344 	PS_RUNNING,
345 	PS_DEAD
346 };
347 
348 struct pthread_specific_elem {
349 	const void	*data;
350 	int		seqno;
351 };
352 
353 struct pthread_key {
354 	volatile int	allocated;
355 	int		seqno;
356 	void            (*destructor)(void *);
357 };
358 
359 /*
360  * lwpid_t is 32bit but kernel thr API exports tid as long type
361  * to preserve the ABI for M:N model in very early date (r131431).
362  */
363 #define TID(thread)	((uint32_t) ((thread)->tid))
364 
365 /*
366  * Thread structure.
367  */
368 struct pthread {
369 #define _pthread_startzero	tid
370 	/* Kernel thread id. */
371 	long			tid;
372 #define	TID_TERMINATED		1
373 
374 	/*
375 	 * Lock for accesses to this thread structure.
376 	 */
377 	struct umutex		lock;
378 
379 	/* Internal condition variable cycle number. */
380 	uint32_t		cycle;
381 
382 	/* How many low level locks the thread held. */
383 	int			locklevel;
384 
385 	/*
386 	 * Set to non-zero when this thread has entered a critical
387 	 * region.  We allow for recursive entries into critical regions.
388 	 */
389 	int			critical_count;
390 
391 	/* Signal blocked counter. */
392 	int			sigblock;
393 
394 	/* Queue entry for list of all threads. */
395 	TAILQ_ENTRY(pthread)	tle;	/* link for all threads in process */
396 
397 	/* Queue entry for GC lists. */
398 	TAILQ_ENTRY(pthread)	gcle;
399 
400 	/* Hash queue entry. */
401 	LIST_ENTRY(pthread)	hle;
402 
403 	/* Sleep queue entry */
404 	TAILQ_ENTRY(pthread)    wle;
405 
406 	/* Threads reference count. */
407 	int			refcount;
408 
409 	/*
410 	 * Thread start routine, argument, stack pointer and thread
411 	 * attributes.
412 	 */
413 	void			*(*start_routine)(void *);
414 	void			*arg;
415 	struct pthread_attr	attr;
416 
417 #define	SHOULD_CANCEL(thr)					\
418 	((thr)->cancel_pending && (thr)->cancel_enable &&	\
419 	 (thr)->no_cancel == 0)
420 
421 	/* Cancellation is enabled */
422 	int			cancel_enable;
423 
424 	/* Cancellation request is pending */
425 	int			cancel_pending;
426 
427 	/* Thread is at cancellation point */
428 	int			cancel_point;
429 
430 	/* Cancellation is temporarily disabled */
431 	int			no_cancel;
432 
433 	/* Asynchronouse cancellation is enabled */
434 	int			cancel_async;
435 
436 	/* Cancellation is in progress */
437 	int			cancelling;
438 
439 	/* Thread temporary signal mask. */
440 	sigset_t		sigmask;
441 
442 	/* Thread should unblock SIGCANCEL. */
443 	int			unblock_sigcancel;
444 
445 	/* In sigsuspend state */
446 	int			in_sigsuspend;
447 
448 	/* deferred signal info	*/
449 	siginfo_t		deferred_siginfo;
450 
451 	/* signal mask to restore. */
452 	sigset_t		deferred_sigmask;
453 
454 	/* the sigaction should be used for deferred signal. */
455 	struct sigaction	deferred_sigact;
456 
457 	/* deferred signal delivery is performed, do not reenter. */
458 	int			deferred_run;
459 
460 	/* Force new thread to exit. */
461 	int			force_exit;
462 
463 	/* Thread state: */
464 	enum pthread_state 	state;
465 
466 	/*
467 	 * Error variable used instead of errno. The function __error()
468 	 * returns a pointer to this.
469 	 */
470 	int			error;
471 
472 	/*
473 	 * The joiner is the thread that is joining to this thread.  The
474 	 * join status keeps track of a join operation to another thread.
475 	 */
476 	struct pthread		*joiner;
477 
478 	/* Miscellaneous flags; only set with scheduling lock held. */
479 	int			flags;
480 #define THR_FLAGS_PRIVATE	0x0001
481 #define	THR_FLAGS_NEED_SUSPEND	0x0002	/* thread should be suspended */
482 #define	THR_FLAGS_SUSPENDED	0x0004	/* thread is suspended */
483 #define	THR_FLAGS_DETACHED	0x0008	/* thread is detached */
484 
485 	/* Thread list flags; only set with thread list lock held. */
486 	int			tlflags;
487 #define	TLFLAGS_GC_SAFE		0x0001	/* thread safe for cleaning */
488 #define	TLFLAGS_IN_TDLIST	0x0002	/* thread in all thread list */
489 #define	TLFLAGS_IN_GCLIST	0x0004	/* thread in gc list */
490 
491 	/*
492 	 * Queues of the owned mutexes.  Private queue must have index
493 	 * + 1 of the corresponding full queue.
494 	 */
495 #define	TMQ_NORM		0	/* NORMAL or PRIO_INHERIT normal */
496 #define	TMQ_NORM_PRIV		1	/* NORMAL or PRIO_INHERIT normal priv */
497 #define	TMQ_NORM_PP		2	/* PRIO_PROTECT normal mutexes */
498 #define	TMQ_NORM_PP_PRIV	3	/* PRIO_PROTECT normal priv */
499 #define	TMQ_ROBUST_PP		4	/* PRIO_PROTECT robust mutexes */
500 #define	TMQ_ROBUST_PP_PRIV	5	/* PRIO_PROTECT robust priv */
501 #define	TMQ_NITEMS		6
502 	struct mutex_queue	mq[TMQ_NITEMS];
503 
504 	void				*ret;
505 	struct pthread_specific_elem	*specific;
506 	int				specific_data_count;
507 
508 	/* Number rwlocks rdlocks held. */
509 	int			rdlock_count;
510 
511 	/*
512 	 * Current locks bitmap for rtld. */
513 	int			rtld_bits;
514 
515 	/* Thread control block */
516 	struct tcb		*tcb;
517 
518 	/* Cleanup handlers Link List */
519 	struct pthread_cleanup	*cleanup;
520 
521 #ifdef _PTHREAD_FORCED_UNWIND
522 	struct _Unwind_Exception	ex;
523 	void			*unwind_stackend;
524 	int			unwind_disabled;
525 #endif
526 
527 	/*
528 	 * Magic value to help recognize a valid thread structure
529 	 * from an invalid one:
530 	 */
531 #define	THR_MAGIC		((u_int32_t) 0xd09ba115)
532 	u_int32_t		magic;
533 
534 	/* Enable event reporting */
535 	int			report_events;
536 
537 	/* Event mask */
538 	int			event_mask;
539 
540 	/* Event */
541 	td_event_msg_t		event_buf;
542 
543 	/* Wait channel */
544 	void			*wchan;
545 
546 	/* Referenced mutex. */
547 	struct pthread_mutex	*mutex_obj;
548 
549 	/* Thread will sleep. */
550 	int			will_sleep;
551 
552 	/* Number of threads deferred. */
553 	int			nwaiter_defer;
554 
555 	int			robust_inited;
556 	uintptr_t		robust_list;
557 	uintptr_t		priv_robust_list;
558 	uintptr_t		inact_mtx;
559 
560 	/* Deferred threads from pthread_cond_signal. */
561 	unsigned int 		*defer_waiters[MAX_DEFER_WAITERS];
562 #define _pthread_endzero	wake_addr
563 
564 	struct wake_addr	*wake_addr;
565 #define WAKE_ADDR(td)           ((td)->wake_addr)
566 
567 	/* Sleep queue */
568 	struct	sleepqueue	*sleepqueue;
569 
570 };
571 
572 #define THR_SHOULD_GC(thrd) 						\
573 	((thrd)->refcount == 0 && (thrd)->state == PS_DEAD &&		\
574 	 ((thrd)->flags & THR_FLAGS_DETACHED) != 0)
575 
576 #define	THR_IN_CRITICAL(thrd)				\
577 	(((thrd)->locklevel > 0) ||			\
578 	((thrd)->critical_count > 0))
579 
580 #define	THR_CRITICAL_ENTER(thrd)			\
581 	(thrd)->critical_count++
582 
583 #define	THR_CRITICAL_LEAVE(thrd)			\
584 	do {						\
585 		(thrd)->critical_count--;		\
586 		_thr_ast(thrd);				\
587 	} while (0)
588 
589 #define THR_UMUTEX_TRYLOCK(thrd, lck)			\
590 	_thr_umutex_trylock((lck), TID(thrd))
591 
592 #define	THR_UMUTEX_LOCK(thrd, lck)			\
593 	_thr_umutex_lock((lck), TID(thrd))
594 
595 #define	THR_UMUTEX_TIMEDLOCK(thrd, lck, timo)		\
596 	_thr_umutex_timedlock((lck), TID(thrd), (timo))
597 
598 #define	THR_UMUTEX_UNLOCK(thrd, lck)			\
599 	_thr_umutex_unlock((lck), TID(thrd))
600 
601 #define	THR_LOCK_ACQUIRE(thrd, lck)			\
602 do {							\
603 	(thrd)->locklevel++;				\
604 	_thr_umutex_lock(lck, TID(thrd));		\
605 } while (0)
606 
607 #define	THR_LOCK_ACQUIRE_SPIN(thrd, lck)		\
608 do {							\
609 	(thrd)->locklevel++;				\
610 	_thr_umutex_lock_spin(lck, TID(thrd));		\
611 } while (0)
612 
613 #ifdef	_PTHREADS_INVARIANTS
614 #define	THR_ASSERT_LOCKLEVEL(thrd)			\
615 do {							\
616 	if (__predict_false((thrd)->locklevel <= 0))	\
617 		_thr_assert_lock_level();		\
618 } while (0)
619 #else
620 #define THR_ASSERT_LOCKLEVEL(thrd)
621 #endif
622 
623 #define	THR_LOCK_RELEASE(thrd, lck)			\
624 do {							\
625 	THR_ASSERT_LOCKLEVEL(thrd);			\
626 	_thr_umutex_unlock((lck), TID(thrd));		\
627 	(thrd)->locklevel--;				\
628 	_thr_ast(thrd);					\
629 } while (0)
630 
631 #define	THR_LOCK(curthrd)		THR_LOCK_ACQUIRE(curthrd, &(curthrd)->lock)
632 #define	THR_UNLOCK(curthrd)		THR_LOCK_RELEASE(curthrd, &(curthrd)->lock)
633 #define	THR_THREAD_LOCK(curthrd, thr)	THR_LOCK_ACQUIRE(curthrd, &(thr)->lock)
634 #define	THR_THREAD_UNLOCK(curthrd, thr)	THR_LOCK_RELEASE(curthrd, &(thr)->lock)
635 
636 #define	THREAD_LIST_RDLOCK(curthrd)				\
637 do {								\
638 	(curthrd)->locklevel++;					\
639 	_thr_rwl_rdlock(&_thr_list_lock);			\
640 } while (0)
641 
642 #define	THREAD_LIST_WRLOCK(curthrd)				\
643 do {								\
644 	(curthrd)->locklevel++;					\
645 	_thr_rwl_wrlock(&_thr_list_lock);			\
646 } while (0)
647 
648 #define	THREAD_LIST_UNLOCK(curthrd)				\
649 do {								\
650 	_thr_rwl_unlock(&_thr_list_lock);			\
651 	(curthrd)->locklevel--;					\
652 	_thr_ast(curthrd);					\
653 } while (0)
654 
655 /*
656  * Macros to insert/remove threads to the all thread list and
657  * the gc list.
658  */
659 #define	THR_LIST_ADD(thrd) do {					\
660 	if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) == 0) {	\
661 		TAILQ_INSERT_HEAD(&_thread_list, thrd, tle);	\
662 		_thr_hash_add(thrd);				\
663 		(thrd)->tlflags |= TLFLAGS_IN_TDLIST;		\
664 	}							\
665 } while (0)
666 #define	THR_LIST_REMOVE(thrd) do {				\
667 	if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) != 0) {	\
668 		TAILQ_REMOVE(&_thread_list, thrd, tle);		\
669 		_thr_hash_remove(thrd);				\
670 		(thrd)->tlflags &= ~TLFLAGS_IN_TDLIST;		\
671 	}							\
672 } while (0)
673 #define	THR_GCLIST_ADD(thrd) do {				\
674 	if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) == 0) {	\
675 		TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\
676 		(thrd)->tlflags |= TLFLAGS_IN_GCLIST;		\
677 		_gc_count++;					\
678 	}							\
679 } while (0)
680 #define	THR_GCLIST_REMOVE(thrd) do {				\
681 	if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) != 0) {	\
682 		TAILQ_REMOVE(&_thread_gc_list, thrd, gcle);	\
683 		(thrd)->tlflags &= ~TLFLAGS_IN_GCLIST;		\
684 		_gc_count--;					\
685 	}							\
686 } while (0)
687 
688 #define THR_REF_ADD(curthread, pthread) {			\
689 	THR_CRITICAL_ENTER(curthread);				\
690 	pthread->refcount++;					\
691 } while (0)
692 
693 #define THR_REF_DEL(curthread, pthread) {			\
694 	pthread->refcount--;					\
695 	THR_CRITICAL_LEAVE(curthread);				\
696 } while (0)
697 
698 #define GC_NEEDED()	(_gc_count >= 5)
699 
700 #define SHOULD_REPORT_EVENT(curthr, e)			\
701 	(curthr->report_events && 			\
702 	 (((curthr)->event_mask | _thread_event_mask ) & e) != 0)
703 
704 extern int __isthreaded;
705 
706 /*
707  * Global variables for the pthread kernel.
708  */
709 
710 extern char		*_usrstack __hidden;
711 extern struct pthread	*_thr_initial __hidden;
712 
713 /* For debugger */
714 extern int		_libthr_debug;
715 extern int		_thread_event_mask;
716 extern struct pthread	*_thread_last_event;
717 
718 /* List of all threads: */
719 extern pthreadlist	_thread_list;
720 
721 /* List of threads needing GC: */
722 extern pthreadlist	_thread_gc_list __hidden;
723 
724 extern int		_thread_active_threads;
725 extern atfork_head	_thr_atfork_list __hidden;
726 extern struct urwlock	_thr_atfork_lock __hidden;
727 
728 /* Default thread attributes: */
729 extern struct pthread_attr _pthread_attr_default __hidden;
730 
731 /* Default mutex attributes: */
732 extern struct pthread_mutex_attr _pthread_mutexattr_default __hidden;
733 extern struct pthread_mutex_attr _pthread_mutexattr_adaptive_default __hidden;
734 
735 /* Default condition variable attributes: */
736 extern struct pthread_cond_attr _pthread_condattr_default __hidden;
737 
738 extern struct pthread_prio _thr_priorities[] __hidden;
739 
740 extern int	_thr_is_smp __hidden;
741 
742 extern size_t	_thr_guard_default __hidden;
743 extern size_t	_thr_stack_default __hidden;
744 extern size_t	_thr_stack_initial __hidden;
745 extern int	_thr_page_size __hidden;
746 extern int	_thr_spinloops __hidden;
747 extern int	_thr_yieldloops __hidden;
748 extern int	_thr_queuefifo __hidden;
749 
750 /* Garbage thread count. */
751 extern int	_gc_count __hidden;
752 
753 extern struct umutex	_mutex_static_lock __hidden;
754 extern struct umutex	_cond_static_lock __hidden;
755 extern struct umutex	_rwlock_static_lock __hidden;
756 extern struct umutex	_keytable_lock __hidden;
757 extern struct urwlock	_thr_list_lock __hidden;
758 extern struct umutex	_thr_event_lock __hidden;
759 extern struct umutex	_suspend_all_lock __hidden;
760 extern int		_suspend_all_waiters __hidden;
761 extern int		_suspend_all_cycle __hidden;
762 extern struct pthread	*_single_thread __hidden;
763 
764 /*
765  * Function prototype definitions.
766  */
767 __BEGIN_DECLS
768 int	_thr_setthreaded(int) __hidden;
769 int	_mutex_cv_lock(struct pthread_mutex *, int, bool) __hidden;
770 int	_mutex_cv_unlock(struct pthread_mutex *, int *, int *) __hidden;
771 int     _mutex_cv_attach(struct pthread_mutex *, int) __hidden;
772 int     _mutex_cv_detach(struct pthread_mutex *, int *) __hidden;
773 int     _mutex_owned(struct pthread *, const struct pthread_mutex *) __hidden;
774 int	_mutex_reinit(pthread_mutex_t *) __hidden;
775 void	_mutex_fork(struct pthread *curthread) __hidden;
776 int	_mutex_enter_robust(struct pthread *curthread, struct pthread_mutex *m)
777 	    __hidden;
778 void	_mutex_leave_robust(struct pthread *curthread, struct pthread_mutex *m)
779 	    __hidden;
780 void	_libpthread_init(struct pthread *) __hidden;
781 struct pthread *_thr_alloc(struct pthread *) __hidden;
782 void	_thread_exit(const char *, int, const char *) __hidden __dead2;
783 int	_thr_ref_add(struct pthread *, struct pthread *, int) __hidden;
784 void	_thr_ref_delete(struct pthread *, struct pthread *) __hidden;
785 void	_thr_ref_delete_unlocked(struct pthread *, struct pthread *) __hidden;
786 int	_thr_find_thread(struct pthread *, struct pthread *, int) __hidden;
787 void	_thr_rtld_init(void) __hidden;
788 void	_thr_rtld_postfork_child(void) __hidden;
789 int	_thr_stack_alloc(struct pthread_attr *) __hidden;
790 void	_thr_stack_free(struct pthread_attr *) __hidden;
791 void	_thr_free(struct pthread *, struct pthread *) __hidden;
792 void	_thr_gc(struct pthread *) __hidden;
793 void    _thread_cleanupspecific(void) __hidden;
794 void	_thread_printf(int, const char *, ...) __hidden;
795 void	_thr_spinlock_init(void) __hidden;
796 void	_thr_cancel_enter(struct pthread *) __hidden;
797 void	_thr_cancel_enter2(struct pthread *, int) __hidden;
798 void	_thr_cancel_leave(struct pthread *, int) __hidden;
799 void	_thr_testcancel(struct pthread *) __hidden;
800 void	_thr_signal_block(struct pthread *) __hidden;
801 void	_thr_signal_unblock(struct pthread *) __hidden;
802 void	_thr_signal_init(int) __hidden;
803 void	_thr_signal_deinit(void) __hidden;
804 int	_thr_send_sig(struct pthread *, int sig) __hidden;
805 void	_thr_list_init(void) __hidden;
806 void	_thr_hash_add(struct pthread *) __hidden;
807 void	_thr_hash_remove(struct pthread *) __hidden;
808 struct pthread *_thr_hash_find(struct pthread *) __hidden;
809 void	_thr_link(struct pthread *, struct pthread *) __hidden;
810 void	_thr_unlink(struct pthread *, struct pthread *) __hidden;
811 void	_thr_assert_lock_level(void) __hidden __dead2;
812 void	_thr_ast(struct pthread *) __hidden;
813 void	_thr_once_init(void) __hidden;
814 void	_thr_report_creation(struct pthread *curthread,
815 	    struct pthread *newthread) __hidden;
816 void	_thr_report_death(struct pthread *curthread) __hidden;
817 int	_thr_getscheduler(lwpid_t, int *, struct sched_param *) __hidden;
818 int	_thr_setscheduler(lwpid_t, int, const struct sched_param *) __hidden;
819 void	_thr_signal_prefork(void) __hidden;
820 void	_thr_signal_postfork(void) __hidden;
821 void	_thr_signal_postfork_child(void) __hidden;
822 void	_thr_suspend_all_lock(struct pthread *) __hidden;
823 void	_thr_suspend_all_unlock(struct pthread *) __hidden;
824 void	_thr_try_gc(struct pthread *, struct pthread *) __hidden;
825 int	_rtp_to_schedparam(const struct rtprio *rtp, int *policy,
826 		struct sched_param *param) __hidden;
827 int	_schedparam_to_rtp(int policy, const struct sched_param *param,
828 		struct rtprio *rtp) __hidden;
829 void	_thread_bp_create(void);
830 void	_thread_bp_death(void);
831 int	_sched_yield(void);
832 
833 void	_pthread_cleanup_push(void (*)(void *), void *);
834 void	_pthread_cleanup_pop(int);
835 void	_pthread_exit_mask(void *status, sigset_t *mask) __dead2 __hidden;
836 void	_pthread_cancel_enter(int maycancel);
837 void 	_pthread_cancel_leave(int maycancel);
838 int	_pthread_mutex_consistent(pthread_mutex_t *) __nonnull(1);
839 int	_pthread_mutexattr_getrobust(pthread_mutexattr_t *__restrict,
840 	    int *__restrict) __nonnull_all;
841 int	_pthread_mutexattr_setrobust(pthread_mutexattr_t *, int)
842 	    __nonnull(1);
843 
844 /* #include <fcntl.h> */
845 #ifdef  _SYS_FCNTL_H_
846 int     __sys_fcntl(int, int, ...);
847 int     __sys_openat(int, const char *, int, ...);
848 #endif
849 
850 /* #include <signal.h> */
851 #ifdef _SIGNAL_H_
852 int	__sys_kill(pid_t, int);
853 int     __sys_sigaction(int, const struct sigaction *, struct sigaction *);
854 int     __sys_sigpending(sigset_t *);
855 int     __sys_sigprocmask(int, const sigset_t *, sigset_t *);
856 int     __sys_sigsuspend(const sigset_t *);
857 int     __sys_sigreturn(const ucontext_t *);
858 int     __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *);
859 int	__sys_sigwait(const sigset_t *, int *);
860 int	__sys_sigtimedwait(const sigset_t *, siginfo_t *,
861 		const struct timespec *);
862 int	__sys_sigwaitinfo(const sigset_t *set, siginfo_t *info);
863 #endif
864 
865 /* #include <time.h> */
866 #ifdef	_TIME_H_
867 int	__sys_nanosleep(const struct timespec *, struct timespec *);
868 #endif
869 
870 /* #include <sys/ucontext.h> */
871 #ifdef _SYS_UCONTEXT_H_
872 int	__sys_setcontext(const ucontext_t *ucp);
873 int	__sys_swapcontext(ucontext_t *oucp, const ucontext_t *ucp);
874 #endif
875 
876 /* #include <unistd.h> */
877 #ifdef  _UNISTD_H_
878 int     __sys_close(int);
879 int	__sys_fork(void);
880 pid_t	__sys_getpid(void);
881 ssize_t __sys_read(int, void *, size_t);
882 void	__sys_exit(int);
883 #endif
884 
885 static inline int
886 _thr_isthreaded(void)
887 {
888 	return (__isthreaded != 0);
889 }
890 
891 static inline int
892 _thr_is_inited(void)
893 {
894 	return (_thr_initial != NULL);
895 }
896 
897 static inline void
898 _thr_check_init(void)
899 {
900 	if (_thr_initial == NULL)
901 		_libpthread_init(NULL);
902 }
903 
904 struct wake_addr *_thr_alloc_wake_addr(void);
905 void	_thr_release_wake_addr(struct wake_addr *);
906 int	_thr_sleep(struct pthread *, int, const struct timespec *);
907 
908 void _thr_wake_addr_init(void) __hidden;
909 
910 static inline void
911 _thr_clear_wake(struct pthread *td)
912 {
913 	td->wake_addr->value = 0;
914 }
915 
916 static inline int
917 _thr_is_woken(struct pthread *td)
918 {
919 	return td->wake_addr->value != 0;
920 }
921 
922 static inline void
923 _thr_set_wake(unsigned int *waddr)
924 {
925 	*waddr = 1;
926 	_thr_umtx_wake(waddr, INT_MAX, 0);
927 }
928 
929 void _thr_wake_all(unsigned int *waddrs[], int) __hidden;
930 
931 static inline struct pthread *
932 _sleepq_first(struct sleepqueue *sq)
933 {
934 	return TAILQ_FIRST(&sq->sq_blocked);
935 }
936 
937 void	_sleepq_init(void) __hidden;
938 struct sleepqueue *_sleepq_alloc(void) __hidden;
939 void	_sleepq_free(struct sleepqueue *) __hidden;
940 void	_sleepq_lock(void *) __hidden;
941 void	_sleepq_unlock(void *) __hidden;
942 struct sleepqueue *_sleepq_lookup(void *) __hidden;
943 void	_sleepq_add(void *, struct pthread *) __hidden;
944 int	_sleepq_remove(struct sleepqueue *, struct pthread *) __hidden;
945 void	_sleepq_drop(struct sleepqueue *,
946 		void (*cb)(struct pthread *, void *arg), void *) __hidden;
947 
948 int	_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
949 	    void *(calloc_cb)(size_t, size_t));
950 
951 struct dl_phdr_info;
952 void __pthread_cxa_finalize(struct dl_phdr_info *phdr_info);
953 void _thr_tsd_unload(struct dl_phdr_info *phdr_info) __hidden;
954 void _thr_sigact_unload(struct dl_phdr_info *phdr_info) __hidden;
955 void _thr_stack_fix_protection(struct pthread *thrd);
956 
957 int *__error_threaded(void) __hidden;
958 void __thr_interpose_libc(void) __hidden;
959 pid_t __thr_fork(void);
960 int __thr_setcontext(const ucontext_t *ucp);
961 int __thr_sigaction(int sig, const struct sigaction *act,
962     struct sigaction *oact) __hidden;
963 int __thr_sigprocmask(int how, const sigset_t *set, sigset_t *oset);
964 int __thr_sigsuspend(const sigset_t * set);
965 int __thr_sigtimedwait(const sigset_t *set, siginfo_t *info,
966     const struct timespec * timeout);
967 int __thr_sigwait(const sigset_t *set, int *sig);
968 int __thr_sigwaitinfo(const sigset_t *set, siginfo_t *info);
969 int __thr_swapcontext(ucontext_t *oucp, const ucontext_t *ucp);
970 
971 void __thr_map_stacks_exec(void);
972 
973 struct _spinlock;
974 void __thr_spinunlock(struct _spinlock *lck);
975 void __thr_spinlock(struct _spinlock *lck);
976 
977 struct tcb *_tcb_ctor(struct pthread *, int);
978 void	_tcb_dtor(struct tcb *);
979 
980 void __thr_pshared_init(void) __hidden;
981 void *__thr_pshared_offpage(void *key, int doalloc) __hidden;
982 void __thr_pshared_destroy(void *key) __hidden;
983 void __thr_pshared_atfork_pre(void) __hidden;
984 void __thr_pshared_atfork_post(void) __hidden;
985 
986 __END_DECLS
987 
988 #endif  /* !_THR_PRIVATE_H */
989