xref: /illumos-gate/usr/src/lib/libc/port/threads/scalls.c (revision 9ec394dbf343c1f23c6e13c39df427f238e5a369)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include "lint.h"
30 #include "thr_uberdata.h"
31 #include <stdarg.h>
32 #include <poll.h>
33 #include <stropts.h>
34 #include <dlfcn.h>
35 #include <wait.h>
36 #include <sys/socket.h>
37 #include <sys/uio.h>
38 #include <sys/file.h>
39 #include <sys/door.h>
40 
41 /*
42  * These leading-underbar symbols exist because mistakes were made
43  * in the past that put them into non-SUNWprivate versions of
44  * the libc mapfiles.  They should be eliminated, but oh well...
45  */
46 #pragma weak _fork = fork
47 #pragma weak _read = read
48 #pragma weak _write = write
49 #pragma weak _getmsg = getmsg
50 #pragma weak _getpmsg = getpmsg
51 #pragma weak _putmsg = putmsg
52 #pragma weak _putpmsg = putpmsg
53 #pragma weak _sleep = sleep
54 #pragma weak _close = close
55 #pragma weak _creat = creat
56 #pragma weak _fcntl = fcntl
57 #pragma weak _fsync = fsync
58 #pragma weak _lockf = lockf
59 #pragma weak _msgrcv = msgrcv
60 #pragma weak _msgsnd = msgsnd
61 #pragma weak _msync = msync
62 #pragma weak _open = open
63 #pragma weak _openat = openat
64 #pragma weak _pause = pause
65 #pragma weak _readv = readv
66 #pragma weak _sigpause = sigpause
67 #pragma weak _sigsuspend = sigsuspend
68 #pragma weak _tcdrain = tcdrain
69 #pragma weak _waitid = waitid
70 #pragma weak _writev = writev
71 
72 #if !defined(_LP64)
73 #pragma weak _creat64 = creat64
74 #pragma weak _lockf64 = lockf64
75 #pragma weak _open64 = open64
76 #pragma weak _openat64 = openat64
77 #pragma weak _pread64 = pread64
78 #pragma weak _pwrite64 = pwrite64
79 #endif
80 
81 /*
82  * These are SUNWprivate, but they are being used by Sun Studio libcollector.
83  */
84 #pragma weak _fork1 = fork1
85 #pragma weak _forkall = forkall
86 
87 /*
88  * atfork_lock protects the pthread_atfork() data structures.
89  *
90  * fork_lock does double-duty.  Not only does it (and atfork_lock)
91  * serialize calls to fork() and forkall(), but it also serializes calls
92  * to thr_suspend() and thr_continue() (because fork() and forkall() also
93  * suspend and continue other threads and they want no competition).
94  *
95  * Functions called in dlopen()ed L10N objects can do anything, including
96  * call malloc() and free().  Such calls are not fork-safe when protected
97  * by an ordinary mutex that is acquired in libc's prefork processing
98  * because, with an interposed malloc library present, there would be a
99  * lock ordering violation due to the pthread_atfork() prefork function
100  * in the interposition library acquiring its malloc lock(s) before the
101  * ordinary mutex in libc being acquired by libc's prefork functions.
102  *
103  * Within libc, calls to malloc() and free() are fork-safe if the calls
104  * are made while holding no other libc locks.  This covers almost all
105  * of libc's malloc() and free() calls.  For those libc code paths, such
106  * as the above-mentioned L10N calls, that require serialization and that
107  * may call malloc() or free(), libc uses callout_lock_enter() to perform
108  * the serialization.  This works because callout_lock is not acquired as
109  * part of running the pthread_atfork() prefork handlers (to avoid the
110  * lock ordering violation described above).  Rather, it is simply
111  * reinitialized in postfork1_child() to cover the case that some
112  * now-defunct thread might have been suspended while holding it.
113  */
114 
115 void
116 fork_lock_enter(void)
117 {
118 	ASSERT(curthread->ul_critical == 0);
119 	(void) mutex_lock(&curthread->ul_uberdata->fork_lock);
120 }
121 
122 void
123 fork_lock_exit(void)
124 {
125 	ASSERT(curthread->ul_critical == 0);
126 	(void) mutex_unlock(&curthread->ul_uberdata->fork_lock);
127 }
128 
129 /*
130  * Use cancel_safe_mutex_lock() to protect against being cancelled while
131  * holding callout_lock and calling outside of libc (via L10N plugins).
132  * We will honor a pending cancellation request when callout_lock_exit()
133  * is called, by calling cancel_safe_mutex_unlock().
134  */
135 void
136 callout_lock_enter(void)
137 {
138 	ASSERT(curthread->ul_critical == 0);
139 	cancel_safe_mutex_lock(&curthread->ul_uberdata->callout_lock);
140 }
141 
142 void
143 callout_lock_exit(void)
144 {
145 	ASSERT(curthread->ul_critical == 0);
146 	cancel_safe_mutex_unlock(&curthread->ul_uberdata->callout_lock);
147 }
148 
149 pid_t
150 forkx(int flags)
151 {
152 	ulwp_t *self = curthread;
153 	uberdata_t *udp = self->ul_uberdata;
154 	pid_t pid;
155 
156 	if (self->ul_vfork) {
157 		/*
158 		 * We are a child of vfork(); omit all of the fork
159 		 * logic and go straight to the system call trap.
160 		 * A vfork() child of a multithreaded parent
161 		 * must never call fork().
162 		 */
163 		if (udp->uberflags.uf_mt) {
164 			errno = ENOTSUP;
165 			return (-1);
166 		}
167 		pid = __forkx(flags);
168 		if (pid == 0) {		/* child */
169 			udp->pid = getpid();
170 			self->ul_vfork = 0;
171 		}
172 		return (pid);
173 	}
174 
175 	sigoff(self);
176 	if (self->ul_fork) {
177 		/*
178 		 * Cannot call fork() from a fork handler.
179 		 */
180 		sigon(self);
181 		errno = EDEADLK;
182 		return (-1);
183 	}
184 	self->ul_fork = 1;
185 
186 	/*
187 	 * The functions registered by pthread_atfork() are defined by
188 	 * the application and its libraries and we must not hold any
189 	 * internal lmutex_lock()-acquired locks while invoking them.
190 	 * We hold only udp->atfork_lock to protect the atfork linkages.
191 	 * If one of these pthread_atfork() functions attempts to fork
192 	 * or to call pthread_atfork(), libc will detect the error and
193 	 * fail the call with EDEADLK.  Otherwise, the pthread_atfork()
194 	 * functions are free to do anything they please (except they
195 	 * will not receive any signals).
196 	 */
197 	(void) mutex_lock(&udp->atfork_lock);
198 	_prefork_handler();
199 
200 	/*
201 	 * Block every other thread attempting thr_suspend() or thr_continue().
202 	 */
203 	(void) mutex_lock(&udp->fork_lock);
204 
205 	/*
206 	 * Block all signals.
207 	 * Just deferring them via sigoff() is not enough.
208 	 * We have to avoid taking a deferred signal in the child
209 	 * that was actually sent to the parent before __forkx().
210 	 */
211 	block_all_signals(self);
212 
213 	/*
214 	 * This suspends all threads but this one, leaving them
215 	 * suspended outside of any critical regions in the library.
216 	 * Thus, we are assured that no lmutex_lock()-acquired library
217 	 * locks are held while we invoke fork() from the current thread.
218 	 */
219 	suspend_fork();
220 
221 	pid = __forkx(flags);
222 
223 	if (pid == 0) {		/* child */
224 		/*
225 		 * Clear our schedctl pointer.
226 		 * Discard any deferred signal that was sent to the parent.
227 		 * Because we blocked all signals before __forkx(), a
228 		 * deferred signal cannot have been taken by the child.
229 		 */
230 		self->ul_schedctl_called = NULL;
231 		self->ul_schedctl = NULL;
232 		self->ul_cursig = 0;
233 		self->ul_siginfo.si_signo = 0;
234 		udp->pid = getpid();
235 		/* reset the library's data structures to reflect one thread */
236 		unregister_locks();
237 		postfork1_child();
238 		restore_signals(self);
239 		(void) mutex_unlock(&udp->fork_lock);
240 		_postfork_child_handler();
241 	} else {
242 		/* restart all threads that were suspended for fork() */
243 		continue_fork(0);
244 		restore_signals(self);
245 		(void) mutex_unlock(&udp->fork_lock);
246 		_postfork_parent_handler();
247 	}
248 
249 	(void) mutex_unlock(&udp->atfork_lock);
250 	self->ul_fork = 0;
251 	sigon(self);
252 
253 	return (pid);
254 }
255 
256 /*
257  * fork() is fork1() for both Posix threads and Solaris threads.
258  * The forkall() interface exists for applications that require
259  * the semantics of replicating all threads.
260  */
261 #pragma weak fork1 = fork
262 pid_t
263 fork(void)
264 {
265 	return (forkx(0));
266 }
267 
268 /*
269  * Much of the logic here is the same as in forkx().
270  * See the comments in forkx(), above.
271  */
272 pid_t
273 forkallx(int flags)
274 {
275 	ulwp_t *self = curthread;
276 	uberdata_t *udp = self->ul_uberdata;
277 	pid_t pid;
278 
279 	if (self->ul_vfork) {
280 		if (udp->uberflags.uf_mt) {
281 			errno = ENOTSUP;
282 			return (-1);
283 		}
284 		pid = __forkallx(flags);
285 		if (pid == 0) {		/* child */
286 			udp->pid = getpid();
287 			self->ul_vfork = 0;
288 		}
289 		return (pid);
290 	}
291 
292 	sigoff(self);
293 	if (self->ul_fork) {
294 		sigon(self);
295 		errno = EDEADLK;
296 		return (-1);
297 	}
298 	self->ul_fork = 1;
299 	(void) mutex_lock(&udp->atfork_lock);
300 	(void) mutex_lock(&udp->fork_lock);
301 	block_all_signals(self);
302 	suspend_fork();
303 
304 	pid = __forkallx(flags);
305 
306 	if (pid == 0) {
307 		self->ul_schedctl_called = NULL;
308 		self->ul_schedctl = NULL;
309 		self->ul_cursig = 0;
310 		self->ul_siginfo.si_signo = 0;
311 		udp->pid = getpid();
312 		unregister_locks();
313 		continue_fork(1);
314 	} else {
315 		continue_fork(0);
316 	}
317 	restore_signals(self);
318 	(void) mutex_unlock(&udp->fork_lock);
319 	(void) mutex_unlock(&udp->atfork_lock);
320 	self->ul_fork = 0;
321 	sigon(self);
322 
323 	return (pid);
324 }
325 
326 pid_t
327 forkall(void)
328 {
329 	return (forkallx(0));
330 }
331 
332 /*
333  * For the implementation of cancellation at cancellation points.
334  */
335 #define	PROLOGUE							\
336 {									\
337 	ulwp_t *self = curthread;					\
338 	int nocancel =							\
339 	    (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks |	\
340 	    self->ul_critical | self->ul_sigdefer);			\
341 	int abort = 0;							\
342 	if (nocancel == 0) {						\
343 		self->ul_save_async = self->ul_cancel_async;		\
344 		if (!self->ul_cancel_disabled) {			\
345 			self->ul_cancel_async = 1;			\
346 			if (self->ul_cancel_pending)			\
347 				pthread_exit(PTHREAD_CANCELED);		\
348 		}							\
349 		self->ul_sp = stkptr();					\
350 	} else if (self->ul_cancel_pending &&				\
351 	    !self->ul_cancel_disabled) {				\
352 		set_cancel_eintr_flag(self);				\
353 		abort = 1;						\
354 	}
355 
356 #define	EPILOGUE							\
357 	if (nocancel == 0) {						\
358 		self->ul_sp = 0;					\
359 		self->ul_cancel_async = self->ul_save_async;		\
360 	}								\
361 }
362 
363 /*
364  * Perform the body of the action required by most of the cancelable
365  * function calls.  The return(function_call) part is to allow the
366  * compiler to make the call be executed with tail recursion, which
367  * saves a register window on sparc and slightly (not much) improves
368  * the code for x86/x64 compilations.
369  */
370 #define	PERFORM(function_call)						\
371 	PROLOGUE							\
372 	if (abort) {							\
373 		*self->ul_errnop = EINTR;				\
374 		return (-1);						\
375 	}								\
376 	if (nocancel)							\
377 		return (function_call);					\
378 	rv = function_call;						\
379 	EPILOGUE							\
380 	return (rv);
381 
382 /*
383  * Specialized prologue for sigsuspend() and pollsys().
384  * These system calls pass a signal mask to the kernel.
385  * The kernel replaces the thread's signal mask with the
386  * temporary mask before the thread goes to sleep.  If
387  * a signal is received, the signal handler will execute
388  * with the temporary mask, as modified by the sigaction
389  * for the particular signal.
390  *
391  * We block all signals until we reach the kernel with the
392  * temporary mask.  This eliminates race conditions with
393  * setting the signal mask while signals are being posted.
394  */
395 #define	PROLOGUE_MASK(sigmask)						\
396 {									\
397 	ulwp_t *self = curthread;					\
398 	int nocancel =							\
399 	    (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks |	\
400 	    self->ul_critical | self->ul_sigdefer);			\
401 	if (!self->ul_vfork) {						\
402 		if (sigmask) {						\
403 			block_all_signals(self);			\
404 			self->ul_tmpmask.__sigbits[0] = sigmask->__sigbits[0]; \
405 			self->ul_tmpmask.__sigbits[1] = sigmask->__sigbits[1]; \
406 			delete_reserved_signals(&self->ul_tmpmask);	\
407 			self->ul_sigsuspend = 1;			\
408 		}							\
409 		if (nocancel == 0) {					\
410 			self->ul_save_async = self->ul_cancel_async;	\
411 			if (!self->ul_cancel_disabled) {		\
412 				self->ul_cancel_async = 1;		\
413 				if (self->ul_cancel_pending) {		\
414 					if (self->ul_sigsuspend) {	\
415 						self->ul_sigsuspend = 0;\
416 						restore_signals(self);	\
417 					}				\
418 					pthread_exit(PTHREAD_CANCELED);	\
419 				}					\
420 			}						\
421 			self->ul_sp = stkptr();				\
422 		}							\
423 	}
424 
425 /*
426  * If a signal is taken, we return from the system call wrapper with
427  * our original signal mask restored (see code in call_user_handler()).
428  * If not (self->ul_sigsuspend is still non-zero), we must restore our
429  * original signal mask ourself.
430  */
431 #define	EPILOGUE_MASK							\
432 	if (nocancel == 0) {						\
433 		self->ul_sp = 0;					\
434 		self->ul_cancel_async = self->ul_save_async;		\
435 	}								\
436 	if (self->ul_sigsuspend) {					\
437 		self->ul_sigsuspend = 0;				\
438 		restore_signals(self);					\
439 	}								\
440 }
441 
442 /*
443  * Cancellation prologue and epilogue functions,
444  * for cancellation points too complex to include here.
445  */
446 void
447 _cancel_prologue(void)
448 {
449 	ulwp_t *self = curthread;
450 
451 	self->ul_cancel_prologue =
452 	    (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks |
453 	    self->ul_critical | self->ul_sigdefer) != 0;
454 	if (self->ul_cancel_prologue == 0) {
455 		self->ul_save_async = self->ul_cancel_async;
456 		if (!self->ul_cancel_disabled) {
457 			self->ul_cancel_async = 1;
458 			if (self->ul_cancel_pending)
459 				pthread_exit(PTHREAD_CANCELED);
460 		}
461 		self->ul_sp = stkptr();
462 	} else if (self->ul_cancel_pending &&
463 	    !self->ul_cancel_disabled) {
464 		set_cancel_eintr_flag(self);
465 	}
466 }
467 
468 void
469 _cancel_epilogue(void)
470 {
471 	ulwp_t *self = curthread;
472 
473 	if (self->ul_cancel_prologue == 0) {
474 		self->ul_sp = 0;
475 		self->ul_cancel_async = self->ul_save_async;
476 	}
477 }
478 
479 /*
480  * Called from _thrp_join() (thr_join() is a cancellation point)
481  */
482 int
483 lwp_wait(thread_t tid, thread_t *found)
484 {
485 	int error;
486 
487 	PROLOGUE
488 	if (abort)
489 		return (EINTR);
490 	while ((error = __lwp_wait(tid, found)) == EINTR && !cancel_active())
491 		continue;
492 	EPILOGUE
493 	return (error);
494 }
495 
496 ssize_t
497 read(int fd, void *buf, size_t size)
498 {
499 	extern ssize_t __read(int, void *, size_t);
500 	ssize_t rv;
501 
502 	PERFORM(__read(fd, buf, size))
503 }
504 
505 ssize_t
506 write(int fd, const void *buf, size_t size)
507 {
508 	extern ssize_t __write(int, const void *, size_t);
509 	ssize_t rv;
510 
511 	PERFORM(__write(fd, buf, size))
512 }
513 
514 int
515 getmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr,
516 	int *flagsp)
517 {
518 	extern int __getmsg(int, struct strbuf *, struct strbuf *, int *);
519 	int rv;
520 
521 	PERFORM(__getmsg(fd, ctlptr, dataptr, flagsp))
522 }
523 
524 int
525 getpmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr,
526 	int *bandp, int *flagsp)
527 {
528 	extern int __getpmsg(int, struct strbuf *, struct strbuf *,
529 	    int *, int *);
530 	int rv;
531 
532 	PERFORM(__getpmsg(fd, ctlptr, dataptr, bandp, flagsp))
533 }
534 
535 int
536 putmsg(int fd, const struct strbuf *ctlptr,
537 	const struct strbuf *dataptr, int flags)
538 {
539 	extern int __putmsg(int, const struct strbuf *,
540 	    const struct strbuf *, int);
541 	int rv;
542 
543 	PERFORM(__putmsg(fd, ctlptr, dataptr, flags))
544 }
545 
546 int
547 __xpg4_putmsg(int fd, const struct strbuf *ctlptr,
548 	const struct strbuf *dataptr, int flags)
549 {
550 	extern int __putmsg(int, const struct strbuf *,
551 	    const struct strbuf *, int);
552 	int rv;
553 
554 	PERFORM(__putmsg(fd, ctlptr, dataptr, flags|MSG_XPG4))
555 }
556 
557 int
558 putpmsg(int fd, const struct strbuf *ctlptr,
559 	const struct strbuf *dataptr, int band, int flags)
560 {
561 	extern int __putpmsg(int, const struct strbuf *,
562 	    const struct strbuf *, int, int);
563 	int rv;
564 
565 	PERFORM(__putpmsg(fd, ctlptr, dataptr, band, flags))
566 }
567 
568 int
569 __xpg4_putpmsg(int fd, const struct strbuf *ctlptr,
570 	const struct strbuf *dataptr, int band, int flags)
571 {
572 	extern int __putpmsg(int, const struct strbuf *,
573 	    const struct strbuf *, int, int);
574 	int rv;
575 
576 	PERFORM(__putpmsg(fd, ctlptr, dataptr, band, flags|MSG_XPG4))
577 }
578 
579 int
580 nanosleep(const timespec_t *rqtp, timespec_t *rmtp)
581 {
582 	int error;
583 
584 	PROLOGUE
585 	error = abort? EINTR : __nanosleep(rqtp, rmtp);
586 	EPILOGUE
587 	if (error) {
588 		errno = error;
589 		return (-1);
590 	}
591 	return (0);
592 }
593 
594 int
595 clock_nanosleep(clockid_t clock_id, int flags,
596 	const timespec_t *rqtp, timespec_t *rmtp)
597 {
598 	timespec_t reltime;
599 	hrtime_t start;
600 	hrtime_t rqlapse;
601 	hrtime_t lapse;
602 	int error;
603 
604 	switch (clock_id) {
605 	case CLOCK_VIRTUAL:
606 	case CLOCK_PROCESS_CPUTIME_ID:
607 	case CLOCK_THREAD_CPUTIME_ID:
608 		return (ENOTSUP);
609 	case CLOCK_REALTIME:
610 	case CLOCK_HIGHRES:
611 		break;
612 	default:
613 		return (EINVAL);
614 	}
615 	if (flags & TIMER_ABSTIME) {
616 		abstime_to_reltime(clock_id, rqtp, &reltime);
617 		rmtp = NULL;
618 	} else {
619 		reltime = *rqtp;
620 		if (clock_id == CLOCK_HIGHRES)
621 			start = gethrtime();
622 	}
623 restart:
624 	PROLOGUE
625 	error = abort? EINTR : __nanosleep(&reltime, rmtp);
626 	EPILOGUE
627 	if (error == 0 && clock_id == CLOCK_HIGHRES) {
628 		/*
629 		 * Don't return yet if we didn't really get a timeout.
630 		 * This can happen if we return because someone resets
631 		 * the system clock.
632 		 */
633 		if (flags & TIMER_ABSTIME) {
634 			if ((hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC +
635 			    rqtp->tv_nsec > gethrtime()) {
636 				abstime_to_reltime(clock_id, rqtp, &reltime);
637 				goto restart;
638 			}
639 		} else {
640 			rqlapse = (hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC +
641 			    rqtp->tv_nsec;
642 			lapse = gethrtime() - start;
643 			if (rqlapse > lapse) {
644 				hrt2ts(rqlapse - lapse, &reltime);
645 				goto restart;
646 			}
647 		}
648 	}
649 	if (error == 0 && clock_id == CLOCK_REALTIME &&
650 	    (flags & TIMER_ABSTIME)) {
651 		/*
652 		 * Don't return yet just because someone reset the
653 		 * system clock.  Recompute the new relative time
654 		 * and reissue the nanosleep() call if necessary.
655 		 *
656 		 * Resetting the system clock causes all sorts of
657 		 * problems and the SUSV3 standards body should
658 		 * have made the behavior of clock_nanosleep() be
659 		 * implementation-defined in such a case rather than
660 		 * being specific about honoring the new system time.
661 		 * Standards bodies are filled with fools and idiots.
662 		 */
663 		abstime_to_reltime(clock_id, rqtp, &reltime);
664 		if (reltime.tv_sec != 0 || reltime.tv_nsec != 0)
665 			goto restart;
666 	}
667 	return (error);
668 }
669 
670 unsigned int
671 sleep(unsigned int sec)
672 {
673 	unsigned int rem = 0;
674 	timespec_t ts;
675 	timespec_t tsr;
676 
677 	ts.tv_sec = (time_t)sec;
678 	ts.tv_nsec = 0;
679 	if (nanosleep(&ts, &tsr) == -1 && errno == EINTR) {
680 		rem = (unsigned int)tsr.tv_sec;
681 		if (tsr.tv_nsec >= NANOSEC / 2)
682 			rem++;
683 	}
684 	return (rem);
685 }
686 
687 int
688 usleep(useconds_t usec)
689 {
690 	timespec_t ts;
691 
692 	ts.tv_sec = usec / MICROSEC;
693 	ts.tv_nsec = (long)(usec % MICROSEC) * 1000;
694 	(void) nanosleep(&ts, NULL);
695 	return (0);
696 }
697 
698 int
699 close(int fildes)
700 {
701 	extern void _aio_close(int);
702 	extern int __close(int);
703 	int rv;
704 
705 	/*
706 	 * If we call _aio_close() while in a critical region,
707 	 * we will draw an ASSERT() failure, so don't do it.
708 	 * No calls to close() from within libc need _aio_close();
709 	 * only the application's calls to close() need this,
710 	 * and such calls are never from a libc critical region.
711 	 */
712 	if (curthread->ul_critical == 0)
713 		_aio_close(fildes);
714 	PERFORM(__close(fildes))
715 }
716 
717 int
718 creat(const char *path, mode_t mode)
719 {
720 	extern int __creat(const char *, mode_t);
721 	int rv;
722 
723 	PERFORM(__creat(path, mode))
724 }
725 
726 #if !defined(_LP64)
727 int
728 creat64(const char *path, mode_t mode)
729 {
730 	extern int __creat64(const char *, mode_t);
731 	int rv;
732 
733 	PERFORM(__creat64(path, mode))
734 }
735 #endif	/* !_LP64 */
736 
737 int
738 door_call(int d, door_arg_t *params)
739 {
740 	extern int __door_call(int, door_arg_t *);
741 	int rv;
742 
743 	PERFORM(__door_call(d, params))
744 }
745 
746 int
747 fcntl(int fildes, int cmd, ...)
748 {
749 	extern int __fcntl(int, int, ...);
750 	intptr_t arg;
751 	int rv;
752 	va_list ap;
753 
754 	va_start(ap, cmd);
755 	arg = va_arg(ap, intptr_t);
756 	va_end(ap);
757 	if (cmd != F_SETLKW)
758 		return (__fcntl(fildes, cmd, arg));
759 	PERFORM(__fcntl(fildes, cmd, arg))
760 }
761 
762 int
763 fdatasync(int fildes)
764 {
765 	extern int __fdsync(int, int);
766 	int rv;
767 
768 	PERFORM(__fdsync(fildes, FDSYNC))
769 }
770 
771 int
772 fsync(int fildes)
773 {
774 	extern int __fdsync(int, int);
775 	int rv;
776 
777 	PERFORM(__fdsync(fildes, FSYNC))
778 }
779 
780 int
781 lockf(int fildes, int function, off_t size)
782 {
783 	extern int __lockf(int, int, off_t);
784 	int rv;
785 
786 	PERFORM(__lockf(fildes, function, size))
787 }
788 
789 #if !defined(_LP64)
790 int
791 lockf64(int fildes, int function, off64_t size)
792 {
793 	extern int __lockf64(int, int, off64_t);
794 	int rv;
795 
796 	PERFORM(__lockf64(fildes, function, size))
797 }
798 #endif	/* !_LP64 */
799 
800 ssize_t
801 msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg)
802 {
803 	extern ssize_t __msgrcv(int, void *, size_t, long, int);
804 	ssize_t rv;
805 
806 	PERFORM(__msgrcv(msqid, msgp, msgsz, msgtyp, msgflg))
807 }
808 
809 int
810 msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg)
811 {
812 	extern int __msgsnd(int, const void *, size_t, int);
813 	int rv;
814 
815 	PERFORM(__msgsnd(msqid, msgp, msgsz, msgflg))
816 }
817 
818 int
819 msync(caddr_t addr, size_t len, int flags)
820 {
821 	extern int __msync(caddr_t, size_t, int);
822 	int rv;
823 
824 	PERFORM(__msync(addr, len, flags))
825 }
826 
827 int
828 open(const char *path, int oflag, ...)
829 {
830 	extern int __open(const char *, int, ...);
831 	mode_t mode;
832 	int rv;
833 	va_list ap;
834 
835 	va_start(ap, oflag);
836 	mode = va_arg(ap, mode_t);
837 	va_end(ap);
838 	PERFORM(__open(path, oflag, mode))
839 }
840 
841 int
842 openat(int fd, const char *path, int oflag, ...)
843 {
844 	extern int __openat(int, const char *, int, ...);
845 	mode_t mode;
846 	int rv;
847 	va_list ap;
848 
849 	va_start(ap, oflag);
850 	mode = va_arg(ap, mode_t);
851 	va_end(ap);
852 	PERFORM(__openat(fd, path, oflag, mode))
853 }
854 
855 #if !defined(_LP64)
856 int
857 open64(const char *path, int oflag, ...)
858 {
859 	extern int __open64(const char *, int, ...);
860 	mode_t mode;
861 	int rv;
862 	va_list ap;
863 
864 	va_start(ap, oflag);
865 	mode = va_arg(ap, mode_t);
866 	va_end(ap);
867 	PERFORM(__open64(path, oflag, mode))
868 }
869 
870 int
871 openat64(int fd, const char *path, int oflag, ...)
872 {
873 	extern int __openat64(int, const char *, int, ...);
874 	mode_t mode;
875 	int rv;
876 	va_list ap;
877 
878 	va_start(ap, oflag);
879 	mode = va_arg(ap, mode_t);
880 	va_end(ap);
881 	PERFORM(__openat64(fd, path, oflag, mode))
882 }
883 #endif	/* !_LP64 */
884 
885 int
886 pause(void)
887 {
888 	extern int __pause(void);
889 	int rv;
890 
891 	PERFORM(__pause())
892 }
893 
894 ssize_t
895 pread(int fildes, void *buf, size_t nbyte, off_t offset)
896 {
897 	extern ssize_t __pread(int, void *, size_t, off_t);
898 	ssize_t rv;
899 
900 	PERFORM(__pread(fildes, buf, nbyte, offset))
901 }
902 
903 #if !defined(_LP64)
904 ssize_t
905 pread64(int fildes, void *buf, size_t nbyte, off64_t offset)
906 {
907 	extern ssize_t __pread64(int, void *, size_t, off64_t);
908 	ssize_t rv;
909 
910 	PERFORM(__pread64(fildes, buf, nbyte, offset))
911 }
912 #endif	/* !_LP64 */
913 
914 ssize_t
915 pwrite(int fildes, const void *buf, size_t nbyte, off_t offset)
916 {
917 	extern ssize_t __pwrite(int, const void *, size_t, off_t);
918 	ssize_t rv;
919 
920 	PERFORM(__pwrite(fildes, buf, nbyte, offset))
921 }
922 
923 #if !defined(_LP64)
924 ssize_t
925 pwrite64(int fildes, const void *buf, size_t nbyte, off64_t offset)
926 {
927 	extern ssize_t __pwrite64(int, const void *, size_t, off64_t);
928 	ssize_t rv;
929 
930 	PERFORM(__pwrite64(fildes, buf, nbyte, offset))
931 }
932 #endif	/* !_LP64 */
933 
934 ssize_t
935 readv(int fildes, const struct iovec *iov, int iovcnt)
936 {
937 	extern ssize_t __readv(int, const struct iovec *, int);
938 	ssize_t rv;
939 
940 	PERFORM(__readv(fildes, iov, iovcnt))
941 }
942 
943 int
944 sigpause(int sig)
945 {
946 	extern int __sigpause(int);
947 	int rv;
948 
949 	PERFORM(__sigpause(sig))
950 }
951 
952 int
953 sigsuspend(const sigset_t *set)
954 {
955 	extern int __sigsuspend(const sigset_t *);
956 	int rv;
957 
958 	PROLOGUE_MASK(set)
959 	rv = __sigsuspend(set);
960 	EPILOGUE_MASK
961 	return (rv);
962 }
963 
964 int
965 _pollsys(struct pollfd *fds, nfds_t nfd, const timespec_t *timeout,
966 	const sigset_t *sigmask)
967 {
968 	extern int __pollsys(struct pollfd *, nfds_t, const timespec_t *,
969 	    const sigset_t *);
970 	int rv;
971 
972 	PROLOGUE_MASK(sigmask)
973 	rv = __pollsys(fds, nfd, timeout, sigmask);
974 	EPILOGUE_MASK
975 	return (rv);
976 }
977 
978 int
979 sigtimedwait(const sigset_t *set, siginfo_t *infop, const timespec_t *timeout)
980 {
981 	extern int __sigtimedwait(const sigset_t *, siginfo_t *,
982 	    const timespec_t *);
983 	siginfo_t info;
984 	int sig;
985 
986 	PROLOGUE
987 	if (abort) {
988 		*self->ul_errnop = EINTR;
989 		sig = -1;
990 	} else {
991 		sig = __sigtimedwait(set, &info, timeout);
992 		if (sig == SIGCANCEL &&
993 		    (SI_FROMKERNEL(&info) || info.si_code == SI_LWP)) {
994 			do_sigcancel();
995 			*self->ul_errnop = EINTR;
996 			sig = -1;
997 		}
998 	}
999 	EPILOGUE
1000 	if (sig != -1 && infop)
1001 		(void) memcpy(infop, &info, sizeof (*infop));
1002 	return (sig);
1003 }
1004 
1005 int
1006 sigwait(sigset_t *set)
1007 {
1008 	return (sigtimedwait(set, NULL, NULL));
1009 }
1010 
1011 int
1012 sigwaitinfo(const sigset_t *set, siginfo_t *info)
1013 {
1014 	return (sigtimedwait(set, info, NULL));
1015 }
1016 
1017 int
1018 sigqueue(pid_t pid, int signo, const union sigval value)
1019 {
1020 	extern int __sigqueue(pid_t pid, int signo,
1021 	    /* const union sigval */ void *value, int si_code, int block);
1022 	return (__sigqueue(pid, signo, value.sival_ptr, SI_QUEUE, 0));
1023 }
1024 
1025 int
1026 _so_accept(int sock, struct sockaddr *addr, uint_t *addrlen, int version)
1027 {
1028 	extern int __so_accept(int, struct sockaddr *, uint_t *, int);
1029 	int rv;
1030 
1031 	PERFORM(__so_accept(sock, addr, addrlen, version))
1032 }
1033 
1034 int
1035 _so_connect(int sock, struct sockaddr *addr, uint_t addrlen, int version)
1036 {
1037 	extern int __so_connect(int, struct sockaddr *, uint_t, int);
1038 	int rv;
1039 
1040 	PERFORM(__so_connect(sock, addr, addrlen, version))
1041 }
1042 
1043 int
1044 _so_recv(int sock, void *buf, size_t len, int flags)
1045 {
1046 	extern int __so_recv(int, void *, size_t, int);
1047 	int rv;
1048 
1049 	PERFORM(__so_recv(sock, buf, len, flags))
1050 }
1051 
1052 int
1053 _so_recvfrom(int sock, void *buf, size_t len, int flags,
1054     struct sockaddr *addr, int *addrlen)
1055 {
1056 	extern int __so_recvfrom(int, void *, size_t, int,
1057 	    struct sockaddr *, int *);
1058 	int rv;
1059 
1060 	PERFORM(__so_recvfrom(sock, buf, len, flags, addr, addrlen))
1061 }
1062 
1063 int
1064 _so_recvmsg(int sock, struct msghdr *msg, int flags)
1065 {
1066 	extern int __so_recvmsg(int, struct msghdr *, int);
1067 	int rv;
1068 
1069 	PERFORM(__so_recvmsg(sock, msg, flags))
1070 }
1071 
1072 int
1073 _so_send(int sock, const void *buf, size_t len, int flags)
1074 {
1075 	extern int __so_send(int, const void *, size_t, int);
1076 	int rv;
1077 
1078 	PERFORM(__so_send(sock, buf, len, flags))
1079 }
1080 
1081 int
1082 _so_sendmsg(int sock, const struct msghdr *msg, int flags)
1083 {
1084 	extern int __so_sendmsg(int, const struct msghdr *, int);
1085 	int rv;
1086 
1087 	PERFORM(__so_sendmsg(sock, msg, flags))
1088 }
1089 
1090 int
1091 _so_sendto(int sock, const void *buf, size_t len, int flags,
1092     const struct sockaddr *addr, int *addrlen)
1093 {
1094 	extern int __so_sendto(int, const void *, size_t, int,
1095 	    const struct sockaddr *, int *);
1096 	int rv;
1097 
1098 	PERFORM(__so_sendto(sock, buf, len, flags, addr, addrlen))
1099 }
1100 
1101 int
1102 tcdrain(int fildes)
1103 {
1104 	extern int __tcdrain(int);
1105 	int rv;
1106 
1107 	PERFORM(__tcdrain(fildes))
1108 }
1109 
1110 int
1111 waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options)
1112 {
1113 	extern int __waitid(idtype_t, id_t, siginfo_t *, int);
1114 	int rv;
1115 
1116 	if (options & WNOHANG)
1117 		return (__waitid(idtype, id, infop, options));
1118 	PERFORM(__waitid(idtype, id, infop, options))
1119 }
1120 
1121 ssize_t
1122 writev(int fildes, const struct iovec *iov, int iovcnt)
1123 {
1124 	extern ssize_t __writev(int, const struct iovec *, int);
1125 	ssize_t rv;
1126 
1127 	PERFORM(__writev(fildes, iov, iovcnt))
1128 }
1129