xref: /titanic_50/usr/src/lib/libc/port/threads/scalls.c (revision f38cb554a534c6df738be3f4d23327e69888e634)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /* Copyright (c) 2013, OmniTI Computer Consulting, Inc. All rights reserved. */
28 
29 #include "lint.h"
30 #include "thr_uberdata.h"
31 #include <stdarg.h>
32 #include <poll.h>
33 #include <stropts.h>
34 #include <dlfcn.h>
35 #include <wait.h>
36 #include <sys/socket.h>
37 #include <sys/uio.h>
38 #include <sys/file.h>
39 #include <sys/door.h>
40 
41 /*
42  * These leading-underbar symbols exist because mistakes were made
43  * in the past that put them into non-SUNWprivate versions of
44  * the libc mapfiles.  They should be eliminated, but oh well...
45  */
46 #pragma weak _fork = fork
47 #pragma weak _read = read
48 #pragma weak _write = write
49 #pragma weak _getmsg = getmsg
50 #pragma weak _getpmsg = getpmsg
51 #pragma weak _putmsg = putmsg
52 #pragma weak _putpmsg = putpmsg
53 #pragma weak _sleep = sleep
54 #pragma weak _close = close
55 #pragma weak _creat = creat
56 #pragma weak _fcntl = fcntl
57 #pragma weak _fsync = fsync
58 #pragma weak _lockf = lockf
59 #pragma weak _msgrcv = msgrcv
60 #pragma weak _msgsnd = msgsnd
61 #pragma weak _msync = msync
62 #pragma weak _open = open
63 #pragma weak _openat = openat
64 #pragma weak _pause = pause
65 #pragma weak _readv = readv
66 #pragma weak _sigpause = sigpause
67 #pragma weak _sigsuspend = sigsuspend
68 #pragma weak _tcdrain = tcdrain
69 #pragma weak _waitid = waitid
70 #pragma weak _writev = writev
71 
72 #if !defined(_LP64)
73 #pragma weak _creat64 = creat64
74 #pragma weak _lockf64 = lockf64
75 #pragma weak _open64 = open64
76 #pragma weak _openat64 = openat64
77 #pragma weak _pread64 = pread64
78 #pragma weak _pwrite64 = pwrite64
79 #endif
80 
81 /*
82  * These are SUNWprivate, but they are being used by Sun Studio libcollector.
83  */
84 #pragma weak _fork1 = fork1
85 #pragma weak _forkall = forkall
86 
87 /*
88  * atfork_lock protects the pthread_atfork() data structures.
89  *
90  * fork_lock does double-duty.  Not only does it (and atfork_lock)
91  * serialize calls to fork() and forkall(), but it also serializes calls
92  * to thr_suspend() and thr_continue() (because fork() and forkall() also
93  * suspend and continue other threads and they want no competition).
94  *
95  * Functions called in dlopen()ed L10N objects can do anything, including
96  * call malloc() and free().  Such calls are not fork-safe when protected
97  * by an ordinary mutex that is acquired in libc's prefork processing
98  * because, with an interposed malloc library present, there would be a
99  * lock ordering violation due to the pthread_atfork() prefork function
100  * in the interposition library acquiring its malloc lock(s) before the
101  * ordinary mutex in libc being acquired by libc's prefork functions.
102  *
103  * Within libc, calls to malloc() and free() are fork-safe if the calls
104  * are made while holding no other libc locks.  This covers almost all
105  * of libc's malloc() and free() calls.  For those libc code paths, such
106  * as the above-mentioned L10N calls, that require serialization and that
107  * may call malloc() or free(), libc uses callout_lock_enter() to perform
108  * the serialization.  This works because callout_lock is not acquired as
109  * part of running the pthread_atfork() prefork handlers (to avoid the
110  * lock ordering violation described above).  Rather, it is simply
111  * reinitialized in postfork1_child() to cover the case that some
112  * now-defunct thread might have been suspended while holding it.
113  */
114 
115 void
116 fork_lock_enter(void)
117 {
118 	ASSERT(curthread->ul_critical == 0);
119 	(void) mutex_lock(&curthread->ul_uberdata->fork_lock);
120 }
121 
122 void
123 fork_lock_exit(void)
124 {
125 	ASSERT(curthread->ul_critical == 0);
126 	(void) mutex_unlock(&curthread->ul_uberdata->fork_lock);
127 }
128 
129 /*
130  * Use cancel_safe_mutex_lock() to protect against being cancelled while
131  * holding callout_lock and calling outside of libc (via L10N plugins).
132  * We will honor a pending cancellation request when callout_lock_exit()
133  * is called, by calling cancel_safe_mutex_unlock().
134  */
135 void
136 callout_lock_enter(void)
137 {
138 	ASSERT(curthread->ul_critical == 0);
139 	cancel_safe_mutex_lock(&curthread->ul_uberdata->callout_lock);
140 }
141 
142 void
143 callout_lock_exit(void)
144 {
145 	ASSERT(curthread->ul_critical == 0);
146 	cancel_safe_mutex_unlock(&curthread->ul_uberdata->callout_lock);
147 }
148 
149 pid_t
150 forkx(int flags)
151 {
152 	ulwp_t *self = curthread;
153 	uberdata_t *udp = self->ul_uberdata;
154 	pid_t pid;
155 
156 	if (self->ul_vfork) {
157 		/*
158 		 * We are a child of vfork(); omit all of the fork
159 		 * logic and go straight to the system call trap.
160 		 * A vfork() child of a multithreaded parent
161 		 * must never call fork().
162 		 */
163 		if (udp->uberflags.uf_mt) {
164 			errno = ENOTSUP;
165 			return (-1);
166 		}
167 		pid = __forkx(flags);
168 		if (pid == 0) {		/* child */
169 			udp->pid = getpid();
170 			self->ul_vfork = 0;
171 		}
172 		return (pid);
173 	}
174 
175 	sigoff(self);
176 	if (self->ul_fork) {
177 		/*
178 		 * Cannot call fork() from a fork handler.
179 		 */
180 		sigon(self);
181 		errno = EDEADLK;
182 		return (-1);
183 	}
184 	self->ul_fork = 1;
185 
186 	/*
187 	 * The functions registered by pthread_atfork() are defined by
188 	 * the application and its libraries and we must not hold any
189 	 * internal lmutex_lock()-acquired locks while invoking them.
190 	 * We hold only udp->atfork_lock to protect the atfork linkages.
191 	 * If one of these pthread_atfork() functions attempts to fork
192 	 * or to call pthread_atfork(), libc will detect the error and
193 	 * fail the call with EDEADLK.  Otherwise, the pthread_atfork()
194 	 * functions are free to do anything they please (except they
195 	 * will not receive any signals).
196 	 */
197 	(void) mutex_lock(&udp->atfork_lock);
198 
199 	/*
200 	 * Posix (SUSv3) requires fork() to be async-signal-safe.
201 	 * This cannot be made to happen with fork handlers in place
202 	 * (they grab locks).  To be in nominal compliance, don't run
203 	 * any fork handlers if we are called within a signal context.
204 	 * This leaves the child process in a questionable state with
205 	 * respect to its locks, but at least the parent process does
206 	 * not become deadlocked due to the calling thread attempting
207 	 * to acquire a lock that it already owns.
208 	 */
209 	if (self->ul_siglink == NULL)
210 		_prefork_handler();
211 
212 	/*
213 	 * Block every other thread attempting thr_suspend() or thr_continue().
214 	 */
215 	(void) mutex_lock(&udp->fork_lock);
216 
217 	/*
218 	 * Block all signals.
219 	 * Just deferring them via sigoff() is not enough.
220 	 * We have to avoid taking a deferred signal in the child
221 	 * that was actually sent to the parent before __forkx().
222 	 */
223 	block_all_signals(self);
224 
225 	/*
226 	 * This suspends all threads but this one, leaving them
227 	 * suspended outside of any critical regions in the library.
228 	 * Thus, we are assured that no lmutex_lock()-acquired library
229 	 * locks are held while we invoke fork() from the current thread.
230 	 */
231 	suspend_fork();
232 
233 	pid = __forkx(flags);
234 
235 	if (pid == 0) {		/* child */
236 		/*
237 		 * Clear our schedctl pointer.
238 		 * Discard any deferred signal that was sent to the parent.
239 		 * Because we blocked all signals before __forkx(), a
240 		 * deferred signal cannot have been taken by the child.
241 		 */
242 		self->ul_schedctl_called = NULL;
243 		self->ul_schedctl = NULL;
244 		self->ul_cursig = 0;
245 		self->ul_siginfo.si_signo = 0;
246 		udp->pid = getpid();
247 		/* reset the library's data structures to reflect one thread */
248 		unregister_locks();
249 		postfork1_child();
250 		restore_signals(self);
251 		(void) mutex_unlock(&udp->fork_lock);
252 		if (self->ul_siglink == NULL)
253 			_postfork_child_handler();
254 	} else {
255 		/* restart all threads that were suspended for fork() */
256 		continue_fork(0);
257 		restore_signals(self);
258 		(void) mutex_unlock(&udp->fork_lock);
259 		if (self->ul_siglink == NULL)
260 			_postfork_parent_handler();
261 	}
262 
263 	(void) mutex_unlock(&udp->atfork_lock);
264 	self->ul_fork = 0;
265 	sigon(self);
266 
267 	return (pid);
268 }
269 
270 /*
271  * fork() is fork1() for both Posix threads and Solaris threads.
272  * The forkall() interface exists for applications that require
273  * the semantics of replicating all threads.
274  */
275 #pragma weak fork1 = fork
276 pid_t
277 fork(void)
278 {
279 	return (forkx(0));
280 }
281 
282 /*
283  * Much of the logic here is the same as in forkx().
284  * See the comments in forkx(), above.
285  */
286 pid_t
287 forkallx(int flags)
288 {
289 	ulwp_t *self = curthread;
290 	uberdata_t *udp = self->ul_uberdata;
291 	pid_t pid;
292 
293 	if (self->ul_vfork) {
294 		if (udp->uberflags.uf_mt) {
295 			errno = ENOTSUP;
296 			return (-1);
297 		}
298 		pid = __forkallx(flags);
299 		if (pid == 0) {		/* child */
300 			udp->pid = getpid();
301 			self->ul_vfork = 0;
302 		}
303 		return (pid);
304 	}
305 
306 	sigoff(self);
307 	if (self->ul_fork) {
308 		sigon(self);
309 		errno = EDEADLK;
310 		return (-1);
311 	}
312 	self->ul_fork = 1;
313 	(void) mutex_lock(&udp->atfork_lock);
314 	(void) mutex_lock(&udp->fork_lock);
315 	block_all_signals(self);
316 	suspend_fork();
317 
318 	pid = __forkallx(flags);
319 
320 	if (pid == 0) {
321 		self->ul_schedctl_called = NULL;
322 		self->ul_schedctl = NULL;
323 		self->ul_cursig = 0;
324 		self->ul_siginfo.si_signo = 0;
325 		udp->pid = getpid();
326 		unregister_locks();
327 		continue_fork(1);
328 	} else {
329 		continue_fork(0);
330 	}
331 	restore_signals(self);
332 	(void) mutex_unlock(&udp->fork_lock);
333 	(void) mutex_unlock(&udp->atfork_lock);
334 	self->ul_fork = 0;
335 	sigon(self);
336 
337 	return (pid);
338 }
339 
340 pid_t
341 forkall(void)
342 {
343 	return (forkallx(0));
344 }
345 
346 /*
347  * For the implementation of cancellation at cancellation points.
348  */
349 #define	PROLOGUE							\
350 {									\
351 	ulwp_t *self = curthread;					\
352 	int nocancel =							\
353 	    (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks |	\
354 	    self->ul_critical | self->ul_sigdefer);			\
355 	int abort = 0;							\
356 	if (nocancel == 0) {						\
357 		self->ul_save_async = self->ul_cancel_async;		\
358 		if (!self->ul_cancel_disabled) {			\
359 			self->ul_cancel_async = 1;			\
360 			if (self->ul_cancel_pending)			\
361 				pthread_exit(PTHREAD_CANCELED);		\
362 		}							\
363 		self->ul_sp = stkptr();					\
364 	} else if (self->ul_cancel_pending &&				\
365 	    !self->ul_cancel_disabled) {				\
366 		set_cancel_eintr_flag(self);				\
367 		abort = 1;						\
368 	}
369 
370 #define	EPILOGUE							\
371 	if (nocancel == 0) {						\
372 		self->ul_sp = 0;					\
373 		self->ul_cancel_async = self->ul_save_async;		\
374 	}								\
375 }
376 
377 /*
378  * Perform the body of the action required by most of the cancelable
379  * function calls.  The return(function_call) part is to allow the
380  * compiler to make the call be executed with tail recursion, which
381  * saves a register window on sparc and slightly (not much) improves
382  * the code for x86/x64 compilations.
383  */
384 #define	PERFORM(function_call)						\
385 	PROLOGUE							\
386 	if (abort) {							\
387 		*self->ul_errnop = EINTR;				\
388 		return (-1);						\
389 	}								\
390 	if (nocancel)							\
391 		return (function_call);					\
392 	rv = function_call;						\
393 	EPILOGUE							\
394 	return (rv);
395 
396 /*
397  * Specialized prologue for sigsuspend() and pollsys().
398  * These system calls pass a signal mask to the kernel.
399  * The kernel replaces the thread's signal mask with the
400  * temporary mask before the thread goes to sleep.  If
401  * a signal is received, the signal handler will execute
402  * with the temporary mask, as modified by the sigaction
403  * for the particular signal.
404  *
405  * We block all signals until we reach the kernel with the
406  * temporary mask.  This eliminates race conditions with
407  * setting the signal mask while signals are being posted.
408  */
409 #define	PROLOGUE_MASK(sigmask)						\
410 {									\
411 	ulwp_t *self = curthread;					\
412 	int nocancel =							\
413 	    (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks |	\
414 	    self->ul_critical | self->ul_sigdefer);			\
415 	if (!self->ul_vfork) {						\
416 		if (sigmask) {						\
417 			block_all_signals(self);			\
418 			self->ul_tmpmask = *sigmask;			\
419 			delete_reserved_signals(&self->ul_tmpmask);	\
420 			self->ul_sigsuspend = 1;			\
421 		}							\
422 		if (nocancel == 0) {					\
423 			self->ul_save_async = self->ul_cancel_async;	\
424 			if (!self->ul_cancel_disabled) {		\
425 				self->ul_cancel_async = 1;		\
426 				if (self->ul_cancel_pending) {		\
427 					if (self->ul_sigsuspend) {	\
428 						self->ul_sigsuspend = 0;\
429 						restore_signals(self);	\
430 					}				\
431 					pthread_exit(PTHREAD_CANCELED);	\
432 				}					\
433 			}						\
434 			self->ul_sp = stkptr();				\
435 		}							\
436 	}
437 
438 /*
439  * If a signal is taken, we return from the system call wrapper with
440  * our original signal mask restored (see code in call_user_handler()).
441  * If not (self->ul_sigsuspend is still non-zero), we must restore our
442  * original signal mask ourself.
443  */
444 #define	EPILOGUE_MASK							\
445 	if (nocancel == 0) {						\
446 		self->ul_sp = 0;					\
447 		self->ul_cancel_async = self->ul_save_async;		\
448 	}								\
449 	if (self->ul_sigsuspend) {					\
450 		self->ul_sigsuspend = 0;				\
451 		restore_signals(self);					\
452 	}								\
453 }
454 
455 /*
456  * Cancellation prologue and epilogue functions,
457  * for cancellation points too complex to include here.
458  */
459 void
460 _cancel_prologue(void)
461 {
462 	ulwp_t *self = curthread;
463 
464 	self->ul_cancel_prologue =
465 	    (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks |
466 	    self->ul_critical | self->ul_sigdefer) != 0;
467 	if (self->ul_cancel_prologue == 0) {
468 		self->ul_save_async = self->ul_cancel_async;
469 		if (!self->ul_cancel_disabled) {
470 			self->ul_cancel_async = 1;
471 			if (self->ul_cancel_pending)
472 				pthread_exit(PTHREAD_CANCELED);
473 		}
474 		self->ul_sp = stkptr();
475 	} else if (self->ul_cancel_pending &&
476 	    !self->ul_cancel_disabled) {
477 		set_cancel_eintr_flag(self);
478 	}
479 }
480 
481 void
482 _cancel_epilogue(void)
483 {
484 	ulwp_t *self = curthread;
485 
486 	if (self->ul_cancel_prologue == 0) {
487 		self->ul_sp = 0;
488 		self->ul_cancel_async = self->ul_save_async;
489 	}
490 }
491 
492 /*
493  * Called from _thrp_join() (thr_join() is a cancellation point)
494  */
495 int
496 lwp_wait(thread_t tid, thread_t *found)
497 {
498 	int error;
499 
500 	PROLOGUE
501 	if (abort)
502 		return (EINTR);
503 	while ((error = __lwp_wait(tid, found)) == EINTR && !cancel_active())
504 		continue;
505 	EPILOGUE
506 	return (error);
507 }
508 
509 ssize_t
510 read(int fd, void *buf, size_t size)
511 {
512 	extern ssize_t __read(int, void *, size_t);
513 	ssize_t rv;
514 
515 	PERFORM(__read(fd, buf, size))
516 }
517 
518 ssize_t
519 write(int fd, const void *buf, size_t size)
520 {
521 	extern ssize_t __write(int, const void *, size_t);
522 	ssize_t rv;
523 
524 	PERFORM(__write(fd, buf, size))
525 }
526 
527 int
528 getmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr,
529 	int *flagsp)
530 {
531 	extern int __getmsg(int, struct strbuf *, struct strbuf *, int *);
532 	int rv;
533 
534 	PERFORM(__getmsg(fd, ctlptr, dataptr, flagsp))
535 }
536 
537 int
538 getpmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr,
539 	int *bandp, int *flagsp)
540 {
541 	extern int __getpmsg(int, struct strbuf *, struct strbuf *,
542 	    int *, int *);
543 	int rv;
544 
545 	PERFORM(__getpmsg(fd, ctlptr, dataptr, bandp, flagsp))
546 }
547 
548 int
549 putmsg(int fd, const struct strbuf *ctlptr,
550 	const struct strbuf *dataptr, int flags)
551 {
552 	extern int __putmsg(int, const struct strbuf *,
553 	    const struct strbuf *, int);
554 	int rv;
555 
556 	PERFORM(__putmsg(fd, ctlptr, dataptr, flags))
557 }
558 
559 int
560 __xpg4_putmsg(int fd, const struct strbuf *ctlptr,
561 	const struct strbuf *dataptr, int flags)
562 {
563 	extern int __putmsg(int, const struct strbuf *,
564 	    const struct strbuf *, int);
565 	int rv;
566 
567 	PERFORM(__putmsg(fd, ctlptr, dataptr, flags|MSG_XPG4))
568 }
569 
570 int
571 putpmsg(int fd, const struct strbuf *ctlptr,
572 	const struct strbuf *dataptr, int band, int flags)
573 {
574 	extern int __putpmsg(int, const struct strbuf *,
575 	    const struct strbuf *, int, int);
576 	int rv;
577 
578 	PERFORM(__putpmsg(fd, ctlptr, dataptr, band, flags))
579 }
580 
581 int
582 __xpg4_putpmsg(int fd, const struct strbuf *ctlptr,
583 	const struct strbuf *dataptr, int band, int flags)
584 {
585 	extern int __putpmsg(int, const struct strbuf *,
586 	    const struct strbuf *, int, int);
587 	int rv;
588 
589 	PERFORM(__putpmsg(fd, ctlptr, dataptr, band, flags|MSG_XPG4))
590 }
591 
592 int
593 nanosleep(const timespec_t *rqtp, timespec_t *rmtp)
594 {
595 	int error;
596 
597 	PROLOGUE
598 	error = abort? EINTR : __nanosleep(rqtp, rmtp);
599 	EPILOGUE
600 	if (error) {
601 		errno = error;
602 		return (-1);
603 	}
604 	return (0);
605 }
606 
607 int
608 clock_nanosleep(clockid_t clock_id, int flags,
609 	const timespec_t *rqtp, timespec_t *rmtp)
610 {
611 	timespec_t reltime;
612 	hrtime_t start;
613 	hrtime_t rqlapse;
614 	hrtime_t lapse;
615 	int error;
616 
617 	switch (clock_id) {
618 	case CLOCK_VIRTUAL:
619 	case CLOCK_PROCESS_CPUTIME_ID:
620 	case CLOCK_THREAD_CPUTIME_ID:
621 		return (ENOTSUP);
622 	case CLOCK_REALTIME:
623 	case CLOCK_HIGHRES:
624 		break;
625 	default:
626 		return (EINVAL);
627 	}
628 	if (flags & TIMER_ABSTIME) {
629 		abstime_to_reltime(clock_id, rqtp, &reltime);
630 		rmtp = NULL;
631 	} else {
632 		reltime = *rqtp;
633 		if (clock_id == CLOCK_HIGHRES)
634 			start = gethrtime();
635 	}
636 restart:
637 	PROLOGUE
638 	error = abort? EINTR : __nanosleep(&reltime, rmtp);
639 	EPILOGUE
640 	if (error == 0 && clock_id == CLOCK_HIGHRES) {
641 		/*
642 		 * Don't return yet if we didn't really get a timeout.
643 		 * This can happen if we return because someone resets
644 		 * the system clock.
645 		 */
646 		if (flags & TIMER_ABSTIME) {
647 			if ((hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC +
648 			    rqtp->tv_nsec > gethrtime()) {
649 				abstime_to_reltime(clock_id, rqtp, &reltime);
650 				goto restart;
651 			}
652 		} else {
653 			rqlapse = (hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC +
654 			    rqtp->tv_nsec;
655 			lapse = gethrtime() - start;
656 			if (rqlapse > lapse) {
657 				hrt2ts(rqlapse - lapse, &reltime);
658 				goto restart;
659 			}
660 		}
661 	}
662 	if (error == 0 && clock_id == CLOCK_REALTIME &&
663 	    (flags & TIMER_ABSTIME)) {
664 		/*
665 		 * Don't return yet just because someone reset the
666 		 * system clock.  Recompute the new relative time
667 		 * and reissue the nanosleep() call if necessary.
668 		 *
669 		 * Resetting the system clock causes all sorts of
670 		 * problems and the SUSV3 standards body should
671 		 * have made the behavior of clock_nanosleep() be
672 		 * implementation-defined in such a case rather than
673 		 * being specific about honoring the new system time.
674 		 * Standards bodies are filled with fools and idiots.
675 		 */
676 		abstime_to_reltime(clock_id, rqtp, &reltime);
677 		if (reltime.tv_sec != 0 || reltime.tv_nsec != 0)
678 			goto restart;
679 	}
680 	return (error);
681 }
682 
683 unsigned int
684 sleep(unsigned int sec)
685 {
686 	unsigned int rem = 0;
687 	timespec_t ts;
688 	timespec_t tsr;
689 
690 	ts.tv_sec = (time_t)sec;
691 	ts.tv_nsec = 0;
692 	if (nanosleep(&ts, &tsr) == -1 && errno == EINTR) {
693 		rem = (unsigned int)tsr.tv_sec;
694 		if (tsr.tv_nsec >= NANOSEC / 2)
695 			rem++;
696 	}
697 	return (rem);
698 }
699 
700 int
701 usleep(useconds_t usec)
702 {
703 	timespec_t ts;
704 
705 	ts.tv_sec = usec / MICROSEC;
706 	ts.tv_nsec = (long)(usec % MICROSEC) * 1000;
707 	(void) nanosleep(&ts, NULL);
708 	return (0);
709 }
710 
711 int
712 close(int fildes)
713 {
714 	extern void _aio_close(int);
715 	extern int __close(int);
716 	int rv;
717 
718 	/*
719 	 * If we call _aio_close() while in a critical region,
720 	 * we will draw an ASSERT() failure, so don't do it.
721 	 * No calls to close() from within libc need _aio_close();
722 	 * only the application's calls to close() need this,
723 	 * and such calls are never from a libc critical region.
724 	 */
725 	if (curthread->ul_critical == 0)
726 		_aio_close(fildes);
727 	PERFORM(__close(fildes))
728 }
729 
730 int
731 door_call(int d, door_arg_t *params)
732 {
733 	extern int __door_call(int, door_arg_t *);
734 	int rv;
735 
736 	PERFORM(__door_call(d, params))
737 }
738 
739 int
740 fcntl(int fildes, int cmd, ...)
741 {
742 	extern int __fcntl(int, int, ...);
743 	intptr_t arg;
744 	int rv;
745 	va_list ap;
746 
747 	va_start(ap, cmd);
748 	arg = va_arg(ap, intptr_t);
749 	va_end(ap);
750 	if (cmd != F_SETLKW)
751 		return (__fcntl(fildes, cmd, arg));
752 	PERFORM(__fcntl(fildes, cmd, arg))
753 }
754 
755 int
756 fdatasync(int fildes)
757 {
758 	extern int __fdsync(int, int);
759 	int rv;
760 
761 	PERFORM(__fdsync(fildes, FDSYNC))
762 }
763 
764 int
765 fsync(int fildes)
766 {
767 	extern int __fdsync(int, int);
768 	int rv;
769 
770 	PERFORM(__fdsync(fildes, FSYNC))
771 }
772 
773 int
774 lockf(int fildes, int function, off_t size)
775 {
776 	extern int __lockf(int, int, off_t);
777 	int rv;
778 
779 	PERFORM(__lockf(fildes, function, size))
780 }
781 
782 #if !defined(_LP64)
783 int
784 lockf64(int fildes, int function, off64_t size)
785 {
786 	extern int __lockf64(int, int, off64_t);
787 	int rv;
788 
789 	PERFORM(__lockf64(fildes, function, size))
790 }
791 #endif	/* !_LP64 */
792 
793 ssize_t
794 msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg)
795 {
796 	extern ssize_t __msgrcv(int, void *, size_t, long, int);
797 	ssize_t rv;
798 
799 	PERFORM(__msgrcv(msqid, msgp, msgsz, msgtyp, msgflg))
800 }
801 
802 int
803 msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg)
804 {
805 	extern int __msgsnd(int, const void *, size_t, int);
806 	int rv;
807 
808 	PERFORM(__msgsnd(msqid, msgp, msgsz, msgflg))
809 }
810 
811 int
812 msync(caddr_t addr, size_t len, int flags)
813 {
814 	extern int __msync(caddr_t, size_t, int);
815 	int rv;
816 
817 	PERFORM(__msync(addr, len, flags))
818 }
819 
820 int
821 openat(int fd, const char *path, int oflag, ...)
822 {
823 	mode_t mode;
824 	int rv;
825 	va_list ap;
826 
827 	va_start(ap, oflag);
828 	mode = va_arg(ap, mode_t);
829 	va_end(ap);
830 	PERFORM(__openat(fd, path, oflag, mode))
831 }
832 
833 int
834 open(const char *path, int oflag, ...)
835 {
836 	mode_t mode;
837 	int rv;
838 	va_list ap;
839 
840 	va_start(ap, oflag);
841 	mode = va_arg(ap, mode_t);
842 	va_end(ap);
843 	PERFORM(__open(path, oflag, mode))
844 }
845 
846 int
847 creat(const char *path, mode_t mode)
848 {
849 	return (open(path, O_WRONLY | O_CREAT | O_TRUNC, mode));
850 }
851 
852 #if !defined(_LP64)
853 int
854 openat64(int fd, const char *path, int oflag, ...)
855 {
856 	mode_t mode;
857 	int rv;
858 	va_list ap;
859 
860 	va_start(ap, oflag);
861 	mode = va_arg(ap, mode_t);
862 	va_end(ap);
863 	PERFORM(__openat64(fd, path, oflag, mode))
864 }
865 
866 int
867 open64(const char *path, int oflag, ...)
868 {
869 	mode_t mode;
870 	int rv;
871 	va_list ap;
872 
873 	va_start(ap, oflag);
874 	mode = va_arg(ap, mode_t);
875 	va_end(ap);
876 	PERFORM(__open64(path, oflag, mode))
877 }
878 
879 int
880 creat64(const char *path, mode_t mode)
881 {
882 	return (open64(path, O_WRONLY | O_CREAT | O_TRUNC, mode));
883 }
884 #endif	/* !_LP64 */
885 
886 int
887 pause(void)
888 {
889 	extern int __pause(void);
890 	int rv;
891 
892 	PERFORM(__pause())
893 }
894 
895 ssize_t
896 pread(int fildes, void *buf, size_t nbyte, off_t offset)
897 {
898 	extern ssize_t __pread(int, void *, size_t, off_t);
899 	ssize_t rv;
900 
901 	PERFORM(__pread(fildes, buf, nbyte, offset))
902 }
903 
904 #if !defined(_LP64)
905 ssize_t
906 pread64(int fildes, void *buf, size_t nbyte, off64_t offset)
907 {
908 	extern ssize_t __pread64(int, void *, size_t, off64_t);
909 	ssize_t rv;
910 
911 	PERFORM(__pread64(fildes, buf, nbyte, offset))
912 }
913 #endif	/* !_LP64 */
914 
915 ssize_t
916 pwrite(int fildes, const void *buf, size_t nbyte, off_t offset)
917 {
918 	extern ssize_t __pwrite(int, const void *, size_t, off_t);
919 	ssize_t rv;
920 
921 	PERFORM(__pwrite(fildes, buf, nbyte, offset))
922 }
923 
924 #if !defined(_LP64)
925 ssize_t
926 pwrite64(int fildes, const void *buf, size_t nbyte, off64_t offset)
927 {
928 	extern ssize_t __pwrite64(int, const void *, size_t, off64_t);
929 	ssize_t rv;
930 
931 	PERFORM(__pwrite64(fildes, buf, nbyte, offset))
932 }
933 #endif	/* !_LP64 */
934 
935 ssize_t
936 readv(int fildes, const struct iovec *iov, int iovcnt)
937 {
938 	extern ssize_t __readv(int, const struct iovec *, int);
939 	ssize_t rv;
940 
941 	PERFORM(__readv(fildes, iov, iovcnt))
942 }
943 
944 int
945 sigpause(int sig)
946 {
947 	extern int __sigpause(int);
948 	int rv;
949 
950 	PERFORM(__sigpause(sig))
951 }
952 
953 int
954 sigsuspend(const sigset_t *set)
955 {
956 	extern int __sigsuspend(const sigset_t *);
957 	int rv;
958 
959 	PROLOGUE_MASK(set)
960 	rv = __sigsuspend(set);
961 	EPILOGUE_MASK
962 	return (rv);
963 }
964 
965 int
966 _pollsys(struct pollfd *fds, nfds_t nfd, const timespec_t *timeout,
967 	const sigset_t *sigmask)
968 {
969 	extern int __pollsys(struct pollfd *, nfds_t, const timespec_t *,
970 	    const sigset_t *);
971 	int rv;
972 
973 	PROLOGUE_MASK(sigmask)
974 	rv = __pollsys(fds, nfd, timeout, sigmask);
975 	EPILOGUE_MASK
976 	return (rv);
977 }
978 
979 int
980 sigtimedwait(const sigset_t *set, siginfo_t *infop, const timespec_t *timeout)
981 {
982 	extern int __sigtimedwait(const sigset_t *, siginfo_t *,
983 	    const timespec_t *);
984 	siginfo_t info;
985 	int sig;
986 
987 	PROLOGUE
988 	if (abort) {
989 		*self->ul_errnop = EINTR;
990 		sig = -1;
991 	} else {
992 		sig = __sigtimedwait(set, &info, timeout);
993 		if (sig == SIGCANCEL &&
994 		    (SI_FROMKERNEL(&info) || info.si_code == SI_LWP)) {
995 			do_sigcancel();
996 			*self->ul_errnop = EINTR;
997 			sig = -1;
998 		}
999 	}
1000 	EPILOGUE
1001 	if (sig != -1 && infop)
1002 		(void) memcpy(infop, &info, sizeof (*infop));
1003 	return (sig);
1004 }
1005 
1006 int
1007 sigwait(sigset_t *set)
1008 {
1009 	return (sigtimedwait(set, NULL, NULL));
1010 }
1011 
1012 int
1013 sigwaitinfo(const sigset_t *set, siginfo_t *info)
1014 {
1015 	return (sigtimedwait(set, info, NULL));
1016 }
1017 
1018 int
1019 sigqueue(pid_t pid, int signo, const union sigval value)
1020 {
1021 	extern int __sigqueue(pid_t pid, int signo,
1022 	    /* const union sigval */ void *value, int si_code, int block);
1023 	return (__sigqueue(pid, signo, value.sival_ptr, SI_QUEUE, 0));
1024 }
1025 
1026 int
1027 _so_accept(int sock, struct sockaddr *addr, uint_t *addrlen, int version,
1028     int flags)
1029 {
1030 	extern int __so_accept(int, struct sockaddr *, uint_t *, int, int);
1031 	int rv;
1032 
1033 	PERFORM(__so_accept(sock, addr, addrlen, version, flags))
1034 }
1035 
1036 int
1037 _so_connect(int sock, struct sockaddr *addr, uint_t addrlen, int version)
1038 {
1039 	extern int __so_connect(int, struct sockaddr *, uint_t, int);
1040 	int rv;
1041 
1042 	PERFORM(__so_connect(sock, addr, addrlen, version))
1043 }
1044 
1045 int
1046 _so_recv(int sock, void *buf, size_t len, int flags)
1047 {
1048 	extern int __so_recv(int, void *, size_t, int);
1049 	int rv;
1050 
1051 	PERFORM(__so_recv(sock, buf, len, flags))
1052 }
1053 
1054 int
1055 _so_recvfrom(int sock, void *buf, size_t len, int flags,
1056     struct sockaddr *addr, int *addrlen)
1057 {
1058 	extern int __so_recvfrom(int, void *, size_t, int,
1059 	    struct sockaddr *, int *);
1060 	int rv;
1061 
1062 	PERFORM(__so_recvfrom(sock, buf, len, flags, addr, addrlen))
1063 }
1064 
1065 int
1066 _so_recvmsg(int sock, struct msghdr *msg, int flags)
1067 {
1068 	extern int __so_recvmsg(int, struct msghdr *, int);
1069 	int rv;
1070 
1071 	PERFORM(__so_recvmsg(sock, msg, flags))
1072 }
1073 
1074 int
1075 _so_send(int sock, const void *buf, size_t len, int flags)
1076 {
1077 	extern int __so_send(int, const void *, size_t, int);
1078 	int rv;
1079 
1080 	PERFORM(__so_send(sock, buf, len, flags))
1081 }
1082 
1083 int
1084 _so_sendmsg(int sock, const struct msghdr *msg, int flags)
1085 {
1086 	extern int __so_sendmsg(int, const struct msghdr *, int);
1087 	int rv;
1088 
1089 	PERFORM(__so_sendmsg(sock, msg, flags))
1090 }
1091 
1092 int
1093 _so_sendto(int sock, const void *buf, size_t len, int flags,
1094     const struct sockaddr *addr, int *addrlen)
1095 {
1096 	extern int __so_sendto(int, const void *, size_t, int,
1097 	    const struct sockaddr *, int *);
1098 	int rv;
1099 
1100 	PERFORM(__so_sendto(sock, buf, len, flags, addr, addrlen))
1101 }
1102 
1103 int
1104 tcdrain(int fildes)
1105 {
1106 	extern int __tcdrain(int);
1107 	int rv;
1108 
1109 	PERFORM(__tcdrain(fildes))
1110 }
1111 
1112 int
1113 waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options)
1114 {
1115 	extern int __waitid(idtype_t, id_t, siginfo_t *, int);
1116 	int rv;
1117 
1118 	if (options & WNOHANG)
1119 		return (__waitid(idtype, id, infop, options));
1120 	PERFORM(__waitid(idtype, id, infop, options))
1121 }
1122 
1123 ssize_t
1124 writev(int fildes, const struct iovec *iov, int iovcnt)
1125 {
1126 	extern ssize_t __writev(int, const struct iovec *, int);
1127 	ssize_t rv;
1128 
1129 	PERFORM(__writev(fildes, iov, iovcnt))
1130 }
1131