1 /* 2 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by John Birrell. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * Private thread definitions for the uthread kernel. 33 * 34 * $FreeBSD$ 35 */ 36 37 #ifndef _THR_PRIVATE_H 38 #define _THR_PRIVATE_H 39 40 /* 41 * Evaluate the storage class specifier. 42 */ 43 #ifdef GLOBAL_PTHREAD_PRIVATE 44 #define SCLASS 45 #else 46 #define SCLASS extern 47 #endif 48 49 /* 50 * Include files. 51 */ 52 #include <sys/types.h> 53 #include <sys/cdefs.h> 54 #include <sys/errno.h> 55 #include <sys/time.h> 56 #include <sys/queue.h> 57 #include <pthread_np.h> 58 #include <sched.h> 59 #include <signal.h> 60 #include <spinlock.h> 61 #include <stdio.h> 62 #include <ucontext.h> 63 64 #include <machine/atomic.h> 65 #include <sys/thr.h> 66 #include <sys/umtx.h> 67 68 /* 69 * Kernel fatal error handler macro. 70 */ 71 #define PANIC(string) _thread_exit(__FILE__,__LINE__,string) 72 73 74 /* Output debug messages like this: */ 75 #define stdout_debug(args...) _thread_printf(STDOUT_FILENO, args) 76 #define stderr_debug(args...) _thread_printf(STDOUT_FILENO, args) 77 78 /* 79 * Currently executing thread. 80 */ 81 #define curthread _get_curthread() 82 83 /* 84 * State change macro without scheduling queue change: 85 */ 86 #define PTHREAD_SET_STATE(thrd, newstate) do { \ 87 (thrd)->state = newstate; \ 88 (thrd)->fname = __FILE__; \ 89 (thrd)->lineno = __LINE__; \ 90 } while (0) 91 92 /* 93 * State change macro with scheduling queue change - This must be 94 * called with GIANT held. 95 */ 96 #if defined(_PTHREADS_INVARIANTS) 97 #include <assert.h> 98 #define PTHREAD_ASSERT(cond, msg) do { \ 99 if (!(cond)) \ 100 PANIC(msg); \ 101 } while (0) 102 #define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) \ 103 PTHREAD_ASSERT((((thrd)->flags & PTHREAD_FLAGS_IN_SYNCQ) == 0), \ 104 "Illegal call from signal handler"); 105 #define PTHREAD_NEW_STATE(thrd, newstate) do { \ 106 if ((thrd)->state != newstate) { \ 107 if ((thrd)->state == PS_RUNNING) { \ 108 PTHREAD_SET_STATE(thrd, newstate); \ 109 } else if (newstate == PS_RUNNING) { \ 110 if (thr_kill(thrd->thr_id, SIGTHR)) \ 111 abort(); \ 112 PTHREAD_SET_STATE(thrd, newstate); \ 113 } \ 114 } \ 115 } while (0) 116 #else 117 #define PTHREAD_ASSERT(cond, msg) 118 #define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) 119 #define PTHREAD_NEW_STATE(thrd, newstate) do { \ 120 if (thr_kill(thrd->thr_id, SIGTHR)) \ 121 abort(); \ 122 PTHREAD_SET_STATE(thrd, newstate); \ 123 } while (0) 124 #if 0 125 #define PTHREAD_NEW_STATE(thrd, newstate) do { \ 126 if ((thrd)->state != newstate) { \ 127 if ((thrd)->state == PS_RUNNING) { \ 128 } else if (newstate == PS_RUNNING) { \ 129 if (thr_kill(thrd->thr_id, SIGTHR)) \ 130 abort(); \ 131 } \ 132 } \ 133 PTHREAD_SET_STATE(thrd, newstate); \ 134 } while (0) 135 #endif 136 #endif 137 138 139 /* 140 * TailQ initialization values. 141 */ 142 #define TAILQ_INITIALIZER { NULL, NULL } 143 144 #define UMTX_INITIALIZER { NULL, NULL } 145 146 struct pthread_mutex_attr { 147 enum pthread_mutextype m_type; 148 int m_protocol; 149 int m_ceiling; 150 long m_flags; 151 }; 152 153 /* 154 * Static mutex initialization values. 155 */ 156 157 #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \ 158 { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE } 159 160 #define PTHREAD_MUTEX_STATIC_INITIALIZER \ 161 { PTHREAD_MUTEXATTR_STATIC_INITIALIZER, UMTX_INITIALIZER, NULL, \ 162 0, 0, TAILQ_INITIALIZER } 163 164 union pthread_mutex_data { 165 void *m_ptr; 166 int m_count; 167 }; 168 169 struct pthread_mutex { 170 enum pthread_mutextype m_type; 171 int m_protocol; 172 TAILQ_HEAD(mutex_head, pthread) m_queue; 173 struct pthread *m_owner; 174 union pthread_mutex_data m_data; 175 long m_flags; 176 int m_refcount; 177 178 /* 179 * Used for priority inheritence and protection. 180 * 181 * m_prio - For priority inheritence, the highest active 182 * priority (threads locking the mutex inherit 183 * this priority). For priority protection, the 184 * ceiling priority of this mutex. 185 * m_saved_prio - mutex owners inherited priority before 186 * taking the mutex, restored when the owner 187 * unlocks the mutex. 188 */ 189 int m_prio; 190 int m_saved_prio; 191 192 /* 193 * Link for list of all mutexes a thread currently owns. 194 */ 195 TAILQ_ENTRY(pthread_mutex) m_qe; 196 197 /* 198 * Lock for accesses to this structure. 199 */ 200 spinlock_t lock; 201 }; 202 203 /* 204 * Flags for mutexes. 205 */ 206 #define MUTEX_FLAGS_PRIVATE 0x01 207 #define MUTEX_FLAGS_INITED 0x02 208 #define MUTEX_FLAGS_BUSY 0x04 209 210 /* 211 * Condition variable definitions. 212 */ 213 enum pthread_cond_type { 214 COND_TYPE_FAST, 215 COND_TYPE_MAX 216 }; 217 218 struct pthread_cond { 219 enum pthread_cond_type c_type; 220 TAILQ_HEAD(cond_head, pthread) c_queue; 221 pthread_mutex_t c_mutex; 222 void *c_data; 223 long c_flags; 224 int c_seqno; 225 226 /* 227 * Lock for accesses to this structure. 228 */ 229 struct umtx c_lock; 230 }; 231 232 struct pthread_cond_attr { 233 enum pthread_cond_type c_type; 234 long c_flags; 235 }; 236 237 /* 238 * Flags for condition variables. 239 */ 240 #define COND_FLAGS_INITED 0x01 241 242 /* 243 * Static cond initialization values. 244 */ 245 #define PTHREAD_COND_STATIC_INITIALIZER \ 246 { COND_TYPE_FAST, TAILQ_INITIALIZER, NULL, NULL, \ 247 0, 0, UMTX_INITIALIZER } 248 249 /* 250 * Semaphore definitions. 251 */ 252 struct sem { 253 #define SEM_MAGIC ((u_int32_t) 0x09fa4012) 254 u_int32_t magic; 255 pthread_mutex_t lock; 256 pthread_cond_t gtzero; 257 u_int32_t count; 258 u_int32_t nwaiters; 259 }; 260 261 /* 262 * Cleanup definitions. 263 */ 264 struct pthread_cleanup { 265 struct pthread_cleanup *next; 266 void (*routine) (); 267 void *routine_arg; 268 }; 269 270 struct pthread_attr { 271 int sched_policy; 272 int sched_inherit; 273 int sched_interval; 274 int prio; 275 int suspend; 276 int flags; 277 void *arg_attr; 278 void (*cleanup_attr) (); 279 void *stackaddr_attr; 280 size_t stacksize_attr; 281 size_t guardsize_attr; 282 }; 283 284 /* 285 * Thread creation state attributes. 286 */ 287 #define PTHREAD_CREATE_RUNNING 0 288 #define PTHREAD_CREATE_SUSPENDED 1 289 290 /* 291 * Miscellaneous definitions. 292 */ 293 #define PTHREAD_STACK_DEFAULT 65536 294 /* 295 * Size of default red zone at the end of each stack. In actuality, this "red 296 * zone" is merely an unmapped region, except in the case of the initial stack. 297 * Since mmap() makes it possible to specify the maximum growth of a MAP_STACK 298 * region, an unmapped gap between thread stacks achieves the same effect as 299 * explicitly mapped red zones. 300 * This is declared and initialized in uthread_init.c. 301 */ 302 extern int _pthread_guard_default; 303 304 extern int _pthread_page_size; 305 306 /* 307 * Maximum size of initial thread's stack. This perhaps deserves to be larger 308 * than the stacks of other threads, since many applications are likely to run 309 * almost entirely on this stack. 310 */ 311 #define PTHREAD_STACK_INITIAL 0x100000 312 313 /* 314 * Define the different priority ranges. All applications have thread 315 * priorities constrained within 0-31. The threads library raises the 316 * priority when delivering signals in order to ensure that signal 317 * delivery happens (from the POSIX spec) "as soon as possible". 318 * In the future, the threads library will also be able to map specific 319 * threads into real-time (cooperating) processes or kernel threads. 320 * The RT and SIGNAL priorities will be used internally and added to 321 * thread base priorities so that the scheduling queue can handle both 322 * normal and RT priority threads with and without signal handling. 323 * 324 * The approach taken is that, within each class, signal delivery 325 * always has priority over thread execution. 326 */ 327 #define PTHREAD_DEFAULT_PRIORITY 15 328 #define PTHREAD_MIN_PRIORITY 0 329 #define PTHREAD_MAX_PRIORITY 31 /* 0x1F */ 330 #define PTHREAD_SIGNAL_PRIORITY 32 /* 0x20 */ 331 #define PTHREAD_RT_PRIORITY 64 /* 0x40 */ 332 #define PTHREAD_FIRST_PRIORITY PTHREAD_MIN_PRIORITY 333 #define PTHREAD_LAST_PRIORITY \ 334 (PTHREAD_MAX_PRIORITY + PTHREAD_SIGNAL_PRIORITY + PTHREAD_RT_PRIORITY) 335 #define PTHREAD_BASE_PRIORITY(prio) ((prio) & PTHREAD_MAX_PRIORITY) 336 337 /* 338 * Clock resolution in microseconds. 339 */ 340 #define CLOCK_RES_USEC 10000 341 #define CLOCK_RES_USEC_MIN 1000 342 343 /* 344 * Time slice period in microseconds. 345 */ 346 #define TIMESLICE_USEC 20000 347 348 /* 349 * XXX Define a thread-safe macro to get the current time of day 350 * which is updated at regular intervals by the scheduling signal 351 * handler. 352 */ 353 #define GET_CURRENT_TOD(tv) gettimeofday(&(tv), NULL) 354 355 356 struct pthread_rwlockattr { 357 int pshared; 358 }; 359 360 struct pthread_rwlock { 361 pthread_mutex_t lock; /* monitor lock */ 362 int state; /* 0 = idle >0 = # of readers -1 = writer */ 363 pthread_cond_t read_signal; 364 pthread_cond_t write_signal; 365 int blocked_writers; 366 }; 367 368 /* 369 * Thread states. 370 */ 371 enum pthread_state { 372 PS_RUNNING, 373 PS_MUTEX_WAIT, 374 PS_COND_WAIT, 375 PS_SLEEP_WAIT, /* XXX We need to wrap syscalls to set this state */ 376 PS_WAIT_WAIT, 377 PS_JOIN, 378 PS_DEAD, 379 PS_DEADLOCK, 380 PS_STATE_MAX 381 }; 382 383 384 /* 385 * File descriptor locking definitions. 386 */ 387 #define FD_READ 0x1 388 #define FD_WRITE 0x2 389 #define FD_RDWR (FD_READ | FD_WRITE) 390 391 union pthread_wait_data { 392 pthread_mutex_t mutex; 393 pthread_cond_t cond; 394 spinlock_t *spinlock; 395 struct pthread *thread; 396 }; 397 398 struct join_status { 399 struct pthread *thread; 400 void *ret; 401 int error; 402 }; 403 404 struct pthread_state_data { 405 union pthread_wait_data psd_wait_data; 406 enum pthread_state psd_state; 407 int psd_flags; 408 }; 409 410 struct pthread_specific_elem { 411 const void *data; 412 int seqno; 413 }; 414 415 /* 416 * Thread structure. 417 */ 418 struct pthread { 419 /* 420 * Magic value to help recognize a valid thread structure 421 * from an invalid one: 422 */ 423 #define PTHREAD_MAGIC ((u_int32_t) 0xd09ba115) 424 u_int32_t magic; 425 char *name; 426 u_int64_t uniqueid; /* for gdb */ 427 thr_id_t thr_id; 428 sigset_t savedsig; 429 int crit_ref; /* crit. section netsting level */ 430 431 /* 432 * Lock for accesses to this thread structure. 433 */ 434 spinlock_t lock; 435 436 /* Queue entry for list of all threads: */ 437 TAILQ_ENTRY(pthread) tle; 438 439 /* Queue entry for list of dead threads: */ 440 TAILQ_ENTRY(pthread) dle; 441 442 /* 443 * Thread start routine, argument, stack pointer and thread 444 * attributes. 445 */ 446 void *(*start_routine)(void *); 447 void *arg; 448 void *stack; 449 struct pthread_attr attr; 450 451 /* 452 * Machine context, including signal state. 453 */ 454 ucontext_t ctx; 455 456 /* 457 * Cancelability flags - the lower 2 bits are used by cancel 458 * definitions in pthread.h 459 */ 460 #define PTHREAD_AT_CANCEL_POINT 0x0004 461 #define PTHREAD_CANCELLING 0x0008 462 463 /* 464 * Protected by Giant. 465 */ 466 int cancelflags; 467 468 /* Thread state: */ 469 enum pthread_state state; 470 471 /* 472 * Error variable used instead of errno. The function __error() 473 * returns a pointer to this. 474 */ 475 int error; 476 477 /* 478 * The joiner is the thread that is joining to this thread. The 479 * join status keeps track of a join operation to another thread. 480 */ 481 struct pthread *joiner; 482 struct join_status join_status; 483 484 /* 485 * A thread can belong to: 486 * 487 * o A queue of threads waiting for a mutex 488 * o A queue of threads waiting for a condition variable 489 * 490 * A thread can also be joining a thread (the joiner field above). 491 * 492 * It must not be possible for a thread to belong to any of the 493 * above queues while it is handling a signal. Signal handlers 494 * may longjmp back to previous stack frames circumventing normal 495 * control flow. This could corrupt queue integrity if the thread 496 * retains membership in the queue. Therefore, if a thread is a 497 * member of one of these queues when a signal handler is invoked, 498 * it must remove itself from the queue before calling the signal 499 * handler and reinsert itself after normal return of the handler. 500 * 501 * Use sqe for synchronization (mutex and condition variable) queue 502 * links. 503 */ 504 TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */ 505 506 /* Wait data. */ 507 union pthread_wait_data data; 508 509 /* Miscellaneous flags; only set with signals deferred. */ 510 int flags; 511 #define PTHREAD_FLAGS_PRIVATE 0x0001 512 #define PTHREAD_EXITING 0x0002 513 #define PTHREAD_FLAGS_IN_CONDQ 0x0080 /* in condition queue using sqe link*/ 514 #define PTHREAD_FLAGS_IN_MUTEXQ 0x0100 /* in mutex queue using sqe link */ 515 #define PTHREAD_FLAGS_SUSPENDED 0x0200 /* thread is suspended */ 516 #define PTHREAD_FLAGS_TRACE 0x0400 /* for debugging purposes */ 517 #define PTHREAD_FLAGS_IN_SYNCQ \ 518 (PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ) 519 520 /* 521 * Base priority is the user setable and retrievable priority 522 * of the thread. It is only affected by explicit calls to 523 * set thread priority and upon thread creation via a thread 524 * attribute or default priority. 525 */ 526 char base_priority; 527 528 /* 529 * Inherited priority is the priority a thread inherits by 530 * taking a priority inheritence or protection mutex. It 531 * is not affected by base priority changes. Inherited 532 * priority defaults to and remains 0 until a mutex is taken 533 * that is being waited on by any other thread whose priority 534 * is non-zero. 535 */ 536 char inherited_priority; 537 538 /* 539 * Active priority is always the maximum of the threads base 540 * priority and inherited priority. When there is a change 541 * in either the base or inherited priority, the active 542 * priority must be recalculated. 543 */ 544 char active_priority; 545 546 /* Number of priority ceiling or protection mutexes owned. */ 547 int priority_mutex_count; 548 549 /* 550 * Queue of currently owned mutexes. 551 */ 552 TAILQ_HEAD(, pthread_mutex) mutexq; 553 554 void *ret; 555 struct pthread_specific_elem *specific; 556 int specific_data_count; 557 558 /* 559 * Architecture specific id field used for _{get, set}_curthread() 560 * interface. 561 */ 562 void *arch_id; 563 564 /* Cleanup handlers Link List */ 565 struct pthread_cleanup *cleanup; 566 char *fname; /* Ptr to source file name */ 567 int lineno; /* Source line number. */ 568 }; 569 570 /* 571 * Global variables for the uthread kernel. 572 */ 573 574 SCLASS void *_usrstack 575 #ifdef GLOBAL_PTHREAD_PRIVATE 576 = (void *) USRSTACK; 577 #else 578 ; 579 #endif 580 581 SCLASS spinlock_t stack_lock 582 #ifdef GLOBAL_PTHREAD_PRIVATE 583 = _SPINLOCK_INITIALIZER 584 #endif 585 ; 586 #define STACK_LOCK _SPINLOCK(&stack_lock); 587 #define STACK_UNLOCK _SPINUNLOCK(&stack_lock); 588 589 /* List of all threads: */ 590 SCLASS TAILQ_HEAD(, pthread) _thread_list 591 #ifdef GLOBAL_PTHREAD_PRIVATE 592 = TAILQ_HEAD_INITIALIZER(_thread_list); 593 #else 594 ; 595 #endif 596 597 /* Dead threads: */ 598 SCLASS TAILQ_HEAD(, pthread) _dead_list 599 #ifdef GLOBAL_PTHREAD_PRIVATE 600 = TAILQ_HEAD_INITIALIZER(_dead_list); 601 #else 602 ; 603 #endif 604 605 /* 606 * These two locks protect the global active threads list and 607 * the global dead threads list, respectively. Combining these 608 * into one lock for both lists doesn't seem wise, since it 609 * would likely increase contention during busy thread creation 610 * and destruction for very little savings in space. 611 * 612 * The lock for the "dead threads list" must be a pthread mutex 613 * because it is used with condition variables to synchronize 614 * the gc thread with active threads in the process of exiting or 615 * dead threads who have just been joined. 616 */ 617 SCLASS spinlock_t thread_list_lock 618 #ifdef GLOBAL_PTHREAD_PRIVATE 619 = _SPINLOCK_INITIALIZER 620 #endif 621 ; 622 SCLASS pthread_mutex_t dead_list_lock 623 #ifdef GLOBAL_PTHREAD_PRIVATE 624 = NULL 625 #endif 626 ; 627 628 #define THREAD_LIST_LOCK _SPINLOCK(&thread_list_lock) 629 #define THREAD_LIST_UNLOCK _SPINUNLOCK(&thread_list_lock) 630 #define DEAD_LIST_LOCK _pthread_mutex_lock(&dead_list_lock) 631 #define DEAD_LIST_UNLOCK _pthread_mutex_unlock(&dead_list_lock) 632 633 /* Initial thread: */ 634 SCLASS struct pthread *_thread_initial 635 #ifdef GLOBAL_PTHREAD_PRIVATE 636 = NULL; 637 #else 638 ; 639 #endif 640 641 /* Default thread attributes: */ 642 SCLASS struct pthread_attr pthread_attr_default 643 #ifdef GLOBAL_PTHREAD_PRIVATE 644 = { SCHED_RR, 0, TIMESLICE_USEC, PTHREAD_DEFAULT_PRIORITY, 645 PTHREAD_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL, 646 PTHREAD_STACK_DEFAULT, -1 }; 647 #else 648 ; 649 #endif 650 651 /* Default mutex attributes: */ 652 SCLASS struct pthread_mutex_attr pthread_mutexattr_default 653 #ifdef GLOBAL_PTHREAD_PRIVATE 654 = { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 }; 655 #else 656 ; 657 #endif 658 659 /* Default condition variable attributes: */ 660 SCLASS struct pthread_cond_attr pthread_condattr_default 661 #ifdef GLOBAL_PTHREAD_PRIVATE 662 = { COND_TYPE_FAST, 0 }; 663 #else 664 ; 665 #endif 666 667 SCLASS int _clock_res_usec /* Clock resolution in usec. */ 668 #ifdef GLOBAL_PTHREAD_PRIVATE 669 = CLOCK_RES_USEC; 670 #else 671 ; 672 #endif 673 674 /* Giant lock. */ 675 SCLASS struct umtx _giant_mutex 676 #ifdef GLOBAL_PTHREAD_PRIVATE 677 = UMTX_INITIALIZER 678 #endif 679 ; 680 681 SCLASS int _giant_count; 682 683 /* Garbage collector condition variable. */ 684 SCLASS pthread_cond_t _gc_cond 685 #ifdef GLOBAL_PTHREAD_PRIVATE 686 = NULL 687 #endif 688 ; 689 690 /* 691 * Array of signal actions for this process. 692 */ 693 SCLASS struct sigaction _thread_sigact[NSIG]; 694 695 /* Tracks the number of threads blocked while waiting for a spinlock. */ 696 SCLASS volatile int _spinblock_count 697 #ifdef GLOBAL_PTHREAD_PRIVATE 698 = 0 699 #endif 700 ; 701 702 /* 703 * And, should we climb the beanstalk, 704 * We'll meet his brother, Giant. 705 */ 706 void GIANT_LOCK(pthread_t); 707 void GIANT_UNLOCK(pthread_t); 708 709 /* Undefine the storage class specifier: */ 710 #undef SCLASS 711 712 /* 713 * Function prototype definitions. 714 */ 715 __BEGIN_DECLS 716 char *__ttyname_basic(int); 717 char *__ttyname_r_basic(int, char *, size_t); 718 char *ttyname_r(int, char *, size_t); 719 void _cond_wait_backout(pthread_t); 720 int _find_thread(pthread_t); 721 pthread_t _get_curthread(void); 722 void *_set_curthread(ucontext_t *, struct pthread *, int *); 723 void _retire_thread(void *arch_id); 724 void *_thread_stack_alloc(size_t, size_t); 725 void _thread_stack_free(void *, size_t, size_t); 726 int _thread_create(pthread_t *,const pthread_attr_t *,void *(*start_routine)(void *),void *,pthread_t); 727 int _mutex_cv_lock(pthread_mutex_t *); 728 int _mutex_cv_unlock(pthread_mutex_t *); 729 void _mutex_lock_backout(pthread_t); 730 void _mutex_notify_priochange(pthread_t); 731 int _mutex_reinit(pthread_mutex_t *); 732 void _mutex_unlock_private(pthread_t); 733 int _cond_reinit(pthread_cond_t *); 734 void *_pthread_getspecific(pthread_key_t); 735 int _pthread_key_create(pthread_key_t *, void (*) (void *)); 736 int _pthread_key_delete(pthread_key_t); 737 int _pthread_mutex_destroy(pthread_mutex_t *); 738 int _pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *); 739 int _pthread_mutex_lock(pthread_mutex_t *); 740 int _pthread_mutex_trylock(pthread_mutex_t *); 741 int _pthread_mutex_unlock(pthread_mutex_t *); 742 int _pthread_mutexattr_init(pthread_mutexattr_t *); 743 int _pthread_mutexattr_destroy(pthread_mutexattr_t *); 744 int _pthread_mutexattr_settype(pthread_mutexattr_t *, int); 745 int _pthread_once(pthread_once_t *, void (*) (void)); 746 pthread_t _pthread_self(void); 747 int _pthread_setspecific(pthread_key_t, const void *); 748 int _spintrylock(spinlock_t *); 749 inline void _spinlock_pthread(pthread_t, spinlock_t *); 750 inline void _spinunlock_pthread(pthread_t, spinlock_t *); 751 void _thread_exit(char *, int, char *); 752 void _thread_exit_cleanup(void); 753 void *_thread_cleanup(pthread_t); 754 void _thread_cleanupspecific(void); 755 void _thread_dump_info(void); 756 void _thread_init(void); 757 void _thread_sig_wrapper(int sig, siginfo_t *info, void *context); 758 void _thread_printf(int fd, const char *, ...); 759 void _thread_start(void); 760 void _thread_seterrno(pthread_t, int); 761 pthread_addr_t _thread_gc(pthread_addr_t); 762 void _thread_enter_cancellation_point(void); 763 void _thread_leave_cancellation_point(void); 764 void _thread_cancellation_point(void); 765 int _thread_suspend(pthread_t thread, struct timespec *abstime); 766 void _thread_critical_enter(pthread_t); 767 void _thread_critical_exit(pthread_t); 768 769 /* #include <sys/aio.h> */ 770 #ifdef _SYS_AIO_H_ 771 int __sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *); 772 #endif 773 774 /* #include <sys/event.h> */ 775 #ifdef _SYS_EVENT_H_ 776 int __sys_kevent(int, const struct kevent *, int, struct kevent *, 777 int, const struct timespec *); 778 #endif 779 780 /* #include <sys/ioctl.h> */ 781 #ifdef _SYS_IOCTL_H_ 782 int __sys_ioctl(int, unsigned long, ...); 783 #endif 784 785 /* #include <sys/mman.h> */ 786 #ifdef _SYS_MMAN_H_ 787 int __sys_msync(void *, size_t, int); 788 #endif 789 790 /* #include <sys/mount.h> */ 791 #ifdef _SYS_MOUNT_H_ 792 int __sys_fstatfs(int, struct statfs *); 793 #endif 794 795 /* #include <sys/socket.h> */ 796 #ifdef _SYS_SOCKET_H_ 797 int __sys_accept(int, struct sockaddr *, socklen_t *); 798 int __sys_bind(int, const struct sockaddr *, socklen_t); 799 int __sys_connect(int, const struct sockaddr *, socklen_t); 800 int __sys_getpeername(int, struct sockaddr *, socklen_t *); 801 int __sys_getsockname(int, struct sockaddr *, socklen_t *); 802 int __sys_getsockopt(int, int, int, void *, socklen_t *); 803 int __sys_listen(int, int); 804 ssize_t __sys_recvfrom(int, void *, size_t, int, struct sockaddr *, socklen_t *); 805 ssize_t __sys_recvmsg(int, struct msghdr *, int); 806 int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *, off_t *, int); 807 ssize_t __sys_sendmsg(int, const struct msghdr *, int); 808 ssize_t __sys_sendto(int, const void *,size_t, int, const struct sockaddr *, socklen_t); 809 int __sys_setsockopt(int, int, int, const void *, socklen_t); 810 int __sys_shutdown(int, int); 811 int __sys_socket(int, int, int); 812 int __sys_socketpair(int, int, int, int *); 813 #endif 814 815 /* #include <sys/stat.h> */ 816 #ifdef _SYS_STAT_H_ 817 int __sys_fchflags(int, u_long); 818 int __sys_fchmod(int, mode_t); 819 int __sys_fstat(int, struct stat *); 820 #endif 821 822 /* #include <sys/uio.h> */ 823 #ifdef _SYS_UIO_H_ 824 ssize_t __sys_readv(int, const struct iovec *, int); 825 ssize_t __sys_writev(int, const struct iovec *, int); 826 #endif 827 828 /* #include <sys/wait.h> */ 829 #ifdef WNOHANG 830 pid_t __sys_wait4(pid_t, int *, int, struct rusage *); 831 #endif 832 833 /* #include <dirent.h> */ 834 #ifdef _DIRENT_H_ 835 int __sys_getdirentries(int, char *, int, long *); 836 #endif 837 838 /* #include <fcntl.h> */ 839 #ifdef _SYS_FCNTL_H_ 840 int __sys_fcntl(int, int, ...); 841 int __sys_flock(int, int); 842 int __sys_open(const char *, int, ...); 843 #endif 844 845 /* #include <poll.h> */ 846 #ifdef _SYS_POLL_H_ 847 int __sys_poll(struct pollfd *, unsigned, int); 848 #endif 849 850 /* #include <signal.h> */ 851 #ifdef _SIGNAL_H_ 852 int __sys_sigaction(int, const struct sigaction *, struct sigaction *); 853 int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *); 854 int __sys_sigprocmask(int, const sigset_t *, sigset_t *); 855 int __sys_sigreturn(ucontext_t *); 856 #endif 857 858 /* #include <unistd.h> */ 859 #ifdef _UNISTD_H_ 860 int __sys_close(int); 861 int __sys_dup(int); 862 int __sys_dup2(int, int); 863 int __sys_execve(const char *, char * const *, char * const *); 864 void __sys_exit(int); 865 int __sys_fchown(int, uid_t, gid_t); 866 pid_t __sys_fork(void); 867 long __sys_fpathconf(int, int); 868 int __sys_fsync(int); 869 int __sys_pipe(int *); 870 ssize_t __sys_read(int, void *, size_t); 871 ssize_t __sys_write(int, const void *, size_t); 872 #endif 873 874 __END_DECLS 875 876 #endif /* !_PTHREAD_PRIVATE_H */ 877