xref: /titanic_41/usr/src/lib/libc/port/threads/scalls.c (revision f5488aa822e08905cde61d596e965030a1dfffcd)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include "lint.h"
30 #include "thr_uberdata.h"
31 #include <stdarg.h>
32 #include <poll.h>
33 #include <stropts.h>
34 #include <dlfcn.h>
35 #include <wait.h>
36 #include <sys/socket.h>
37 #include <sys/uio.h>
38 #include <sys/file.h>
39 #include <sys/door.h>
40 
41 /*
42  * These leading-underbar symbols exist because mistakes were made
43  * in the past that put them into non-SUNWprivate versions of
44  * the libc mapfiles.  They should be eliminated, but oh well...
45  */
46 #pragma weak _fork = fork
47 #pragma weak _read = read
48 #pragma weak _write = write
49 #pragma weak _getmsg = getmsg
50 #pragma weak _getpmsg = getpmsg
51 #pragma weak _putmsg = putmsg
52 #pragma weak _putpmsg = putpmsg
53 #pragma weak _sleep = sleep
54 #pragma weak _close = close
55 #pragma weak _creat = creat
56 #pragma weak _fcntl = fcntl
57 #pragma weak _fsync = fsync
58 #pragma weak _lockf = lockf
59 #pragma weak _msgrcv = msgrcv
60 #pragma weak _msgsnd = msgsnd
61 #pragma weak _msync = msync
62 #pragma weak _open = open
63 #pragma weak _openat = openat
64 #pragma weak _pause = pause
65 #pragma weak _readv = readv
66 #pragma weak _sigpause = sigpause
67 #pragma weak _sigsuspend = sigsuspend
68 #pragma weak _tcdrain = tcdrain
69 #pragma weak _waitid = waitid
70 #pragma weak _writev = writev
71 
72 #if !defined(_LP64)
73 #pragma weak _creat64 = creat64
74 #pragma weak _lockf64 = lockf64
75 #pragma weak _open64 = open64
76 #pragma weak _openat64 = openat64
77 #pragma weak _pread64 = pread64
78 #pragma weak _pwrite64 = pwrite64
79 #endif
80 
81 /*
82  * atfork_lock protects the pthread_atfork() data structures.
83  *
84  * fork_lock does double-duty.  Not only does it (and atfork_lock)
85  * serialize calls to fork() and forkall(), but it also serializes calls
86  * to thr_suspend() and thr_continue() (because fork() and forkall() also
87  * suspend and continue other threads and they want no competition).
88  *
89  * Functions called in dlopen()ed L10N objects can do anything, including
90  * call malloc() and free().  Such calls are not fork-safe when protected
91  * by an ordinary mutex that is acquired in libc's prefork processing
92  * because, with an interposed malloc library present, there would be a
93  * lock ordering violation due to the pthread_atfork() prefork function
94  * in the interposition library acquiring its malloc lock(s) before the
95  * ordinary mutex in libc being acquired by libc's prefork functions.
96  *
97  * Within libc, calls to malloc() and free() are fork-safe if the calls
98  * are made while holding no other libc locks.  This covers almost all
99  * of libc's malloc() and free() calls.  For those libc code paths, such
100  * as the above-mentioned L10N calls, that require serialization and that
101  * may call malloc() or free(), libc uses callout_lock_enter() to perform
102  * the serialization.  This works because callout_lock is not acquired as
103  * part of running the pthread_atfork() prefork handlers (to avoid the
104  * lock ordering violation described above).  Rather, it is simply
105  * reinitialized in postfork1_child() to cover the case that some
106  * now-defunct thread might have been suspended while holding it.
107  */
108 
109 void
110 fork_lock_enter(void)
111 {
112 	ASSERT(curthread->ul_critical == 0);
113 	(void) mutex_lock(&curthread->ul_uberdata->fork_lock);
114 }
115 
116 void
117 fork_lock_exit(void)
118 {
119 	ASSERT(curthread->ul_critical == 0);
120 	(void) mutex_unlock(&curthread->ul_uberdata->fork_lock);
121 }
122 
123 /*
124  * Use cancel_safe_mutex_lock() to protect against being cancelled while
125  * holding callout_lock and calling outside of libc (via L10N plugins).
126  * We will honor a pending cancellation request when callout_lock_exit()
127  * is called, by calling cancel_safe_mutex_unlock().
128  */
129 void
130 callout_lock_enter(void)
131 {
132 	ASSERT(curthread->ul_critical == 0);
133 	cancel_safe_mutex_lock(&curthread->ul_uberdata->callout_lock);
134 }
135 
136 void
137 callout_lock_exit(void)
138 {
139 	ASSERT(curthread->ul_critical == 0);
140 	cancel_safe_mutex_unlock(&curthread->ul_uberdata->callout_lock);
141 }
142 
143 pid_t
144 forkx(int flags)
145 {
146 	ulwp_t *self = curthread;
147 	uberdata_t *udp = self->ul_uberdata;
148 	pid_t pid;
149 
150 	if (self->ul_vfork) {
151 		/*
152 		 * We are a child of vfork(); omit all of the fork
153 		 * logic and go straight to the system call trap.
154 		 * A vfork() child of a multithreaded parent
155 		 * must never call fork().
156 		 */
157 		if (udp->uberflags.uf_mt) {
158 			errno = ENOTSUP;
159 			return (-1);
160 		}
161 		pid = __forkx(flags);
162 		if (pid == 0) {		/* child */
163 			udp->pid = getpid();
164 			self->ul_vfork = 0;
165 		}
166 		return (pid);
167 	}
168 
169 	sigoff(self);
170 	if (self->ul_fork) {
171 		/*
172 		 * Cannot call fork() from a fork handler.
173 		 */
174 		sigon(self);
175 		errno = EDEADLK;
176 		return (-1);
177 	}
178 	self->ul_fork = 1;
179 
180 	/*
181 	 * The functions registered by pthread_atfork() are defined by
182 	 * the application and its libraries and we must not hold any
183 	 * internal lmutex_lock()-acquired locks while invoking them.
184 	 * We hold only udp->atfork_lock to protect the atfork linkages.
185 	 * If one of these pthread_atfork() functions attempts to fork
186 	 * or to call pthread_atfork(), libc will detect the error and
187 	 * fail the call with EDEADLK.  Otherwise, the pthread_atfork()
188 	 * functions are free to do anything they please (except they
189 	 * will not receive any signals).
190 	 */
191 	(void) mutex_lock(&udp->atfork_lock);
192 	_prefork_handler();
193 
194 	/*
195 	 * Block every other thread attempting thr_suspend() or thr_continue().
196 	 */
197 	(void) mutex_lock(&udp->fork_lock);
198 
199 	/*
200 	 * Block all signals.
201 	 * Just deferring them via sigoff() is not enough.
202 	 * We have to avoid taking a deferred signal in the child
203 	 * that was actually sent to the parent before __forkx().
204 	 */
205 	block_all_signals(self);
206 
207 	/*
208 	 * This suspends all threads but this one, leaving them
209 	 * suspended outside of any critical regions in the library.
210 	 * Thus, we are assured that no lmutex_lock()-acquired library
211 	 * locks are held while we invoke fork() from the current thread.
212 	 */
213 	suspend_fork();
214 
215 	pid = __forkx(flags);
216 
217 	if (pid == 0) {		/* child */
218 		/*
219 		 * Clear our schedctl pointer.
220 		 * Discard any deferred signal that was sent to the parent.
221 		 * Because we blocked all signals before __forkx(), a
222 		 * deferred signal cannot have been taken by the child.
223 		 */
224 		self->ul_schedctl_called = NULL;
225 		self->ul_schedctl = NULL;
226 		self->ul_cursig = 0;
227 		self->ul_siginfo.si_signo = 0;
228 		udp->pid = getpid();
229 		/* reset the library's data structures to reflect one thread */
230 		unregister_locks();
231 		postfork1_child();
232 		restore_signals(self);
233 		(void) mutex_unlock(&udp->fork_lock);
234 		_postfork_child_handler();
235 	} else {
236 		/* restart all threads that were suspended for fork() */
237 		continue_fork(0);
238 		restore_signals(self);
239 		(void) mutex_unlock(&udp->fork_lock);
240 		_postfork_parent_handler();
241 	}
242 
243 	(void) mutex_unlock(&udp->atfork_lock);
244 	self->ul_fork = 0;
245 	sigon(self);
246 
247 	return (pid);
248 }
249 
250 /*
251  * fork() is fork1() for both Posix threads and Solaris threads.
252  * The forkall() interface exists for applications that require
253  * the semantics of replicating all threads.
254  */
255 #pragma weak fork1 = fork
256 pid_t
257 fork(void)
258 {
259 	return (forkx(0));
260 }
261 
262 /*
263  * Much of the logic here is the same as in forkx().
264  * See the comments in forkx(), above.
265  */
266 pid_t
267 forkallx(int flags)
268 {
269 	ulwp_t *self = curthread;
270 	uberdata_t *udp = self->ul_uberdata;
271 	pid_t pid;
272 
273 	if (self->ul_vfork) {
274 		if (udp->uberflags.uf_mt) {
275 			errno = ENOTSUP;
276 			return (-1);
277 		}
278 		pid = __forkallx(flags);
279 		if (pid == 0) {		/* child */
280 			udp->pid = getpid();
281 			self->ul_vfork = 0;
282 		}
283 		return (pid);
284 	}
285 
286 	sigoff(self);
287 	if (self->ul_fork) {
288 		sigon(self);
289 		errno = EDEADLK;
290 		return (-1);
291 	}
292 	self->ul_fork = 1;
293 	(void) mutex_lock(&udp->atfork_lock);
294 	(void) mutex_lock(&udp->fork_lock);
295 	block_all_signals(self);
296 	suspend_fork();
297 
298 	pid = __forkallx(flags);
299 
300 	if (pid == 0) {
301 		self->ul_schedctl_called = NULL;
302 		self->ul_schedctl = NULL;
303 		self->ul_cursig = 0;
304 		self->ul_siginfo.si_signo = 0;
305 		udp->pid = getpid();
306 		unregister_locks();
307 		continue_fork(1);
308 	} else {
309 		continue_fork(0);
310 	}
311 	restore_signals(self);
312 	(void) mutex_unlock(&udp->fork_lock);
313 	(void) mutex_unlock(&udp->atfork_lock);
314 	self->ul_fork = 0;
315 	sigon(self);
316 
317 	return (pid);
318 }
319 
320 pid_t
321 forkall(void)
322 {
323 	return (forkallx(0));
324 }
325 
326 /*
327  * For the implementation of cancellation at cancellation points.
328  */
329 #define	PROLOGUE							\
330 {									\
331 	ulwp_t *self = curthread;					\
332 	int nocancel =							\
333 	    (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks |	\
334 	    self->ul_critical | self->ul_sigdefer);			\
335 	int abort = 0;							\
336 	if (nocancel == 0) {						\
337 		self->ul_save_async = self->ul_cancel_async;		\
338 		if (!self->ul_cancel_disabled) {			\
339 			self->ul_cancel_async = 1;			\
340 			if (self->ul_cancel_pending)			\
341 				pthread_exit(PTHREAD_CANCELED);		\
342 		}							\
343 		self->ul_sp = stkptr();					\
344 	} else if (self->ul_cancel_pending &&				\
345 	    !self->ul_cancel_disabled) {				\
346 		set_cancel_eintr_flag(self);				\
347 		abort = 1;						\
348 	}
349 
350 #define	EPILOGUE							\
351 	if (nocancel == 0) {						\
352 		self->ul_sp = 0;					\
353 		self->ul_cancel_async = self->ul_save_async;		\
354 	}								\
355 }
356 
357 /*
358  * Perform the body of the action required by most of the cancelable
359  * function calls.  The return(function_call) part is to allow the
360  * compiler to make the call be executed with tail recursion, which
361  * saves a register window on sparc and slightly (not much) improves
362  * the code for x86/x64 compilations.
363  */
364 #define	PERFORM(function_call)						\
365 	PROLOGUE							\
366 	if (abort) {							\
367 		*self->ul_errnop = EINTR;				\
368 		return (-1);						\
369 	}								\
370 	if (nocancel)							\
371 		return (function_call);					\
372 	rv = function_call;						\
373 	EPILOGUE							\
374 	return (rv);
375 
376 /*
377  * Specialized prologue for sigsuspend() and pollsys().
378  * These system calls pass a signal mask to the kernel.
379  * The kernel replaces the thread's signal mask with the
380  * temporary mask before the thread goes to sleep.  If
381  * a signal is received, the signal handler will execute
382  * with the temporary mask, as modified by the sigaction
383  * for the particular signal.
384  *
385  * We block all signals until we reach the kernel with the
386  * temporary mask.  This eliminates race conditions with
387  * setting the signal mask while signals are being posted.
388  */
389 #define	PROLOGUE_MASK(sigmask)						\
390 {									\
391 	ulwp_t *self = curthread;					\
392 	int nocancel =							\
393 	    (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks |	\
394 	    self->ul_critical | self->ul_sigdefer);			\
395 	if (!self->ul_vfork) {						\
396 		if (sigmask) {						\
397 			block_all_signals(self);			\
398 			self->ul_tmpmask.__sigbits[0] = sigmask->__sigbits[0]; \
399 			self->ul_tmpmask.__sigbits[1] = sigmask->__sigbits[1]; \
400 			delete_reserved_signals(&self->ul_tmpmask);	\
401 			self->ul_sigsuspend = 1;			\
402 		}							\
403 		if (nocancel == 0) {					\
404 			self->ul_save_async = self->ul_cancel_async;	\
405 			if (!self->ul_cancel_disabled) {		\
406 				self->ul_cancel_async = 1;		\
407 				if (self->ul_cancel_pending) {		\
408 					if (self->ul_sigsuspend) {	\
409 						self->ul_sigsuspend = 0;\
410 						restore_signals(self);	\
411 					}				\
412 					pthread_exit(PTHREAD_CANCELED);	\
413 				}					\
414 			}						\
415 			self->ul_sp = stkptr();				\
416 		}							\
417 	}
418 
419 /*
420  * If a signal is taken, we return from the system call wrapper with
421  * our original signal mask restored (see code in call_user_handler()).
422  * If not (self->ul_sigsuspend is still non-zero), we must restore our
423  * original signal mask ourself.
424  */
425 #define	EPILOGUE_MASK							\
426 	if (nocancel == 0) {						\
427 		self->ul_sp = 0;					\
428 		self->ul_cancel_async = self->ul_save_async;		\
429 	}								\
430 	if (self->ul_sigsuspend) {					\
431 		self->ul_sigsuspend = 0;				\
432 		restore_signals(self);					\
433 	}								\
434 }
435 
436 /*
437  * Cancellation prologue and epilogue functions,
438  * for cancellation points too complex to include here.
439  */
440 void
441 _cancel_prologue(void)
442 {
443 	ulwp_t *self = curthread;
444 
445 	self->ul_cancel_prologue =
446 	    (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks |
447 	    self->ul_critical | self->ul_sigdefer) != 0;
448 	if (self->ul_cancel_prologue == 0) {
449 		self->ul_save_async = self->ul_cancel_async;
450 		if (!self->ul_cancel_disabled) {
451 			self->ul_cancel_async = 1;
452 			if (self->ul_cancel_pending)
453 				pthread_exit(PTHREAD_CANCELED);
454 		}
455 		self->ul_sp = stkptr();
456 	} else if (self->ul_cancel_pending &&
457 	    !self->ul_cancel_disabled) {
458 		set_cancel_eintr_flag(self);
459 	}
460 }
461 
462 void
463 _cancel_epilogue(void)
464 {
465 	ulwp_t *self = curthread;
466 
467 	if (self->ul_cancel_prologue == 0) {
468 		self->ul_sp = 0;
469 		self->ul_cancel_async = self->ul_save_async;
470 	}
471 }
472 
473 /*
474  * Called from _thrp_join() (thr_join() is a cancellation point)
475  */
476 int
477 lwp_wait(thread_t tid, thread_t *found)
478 {
479 	int error;
480 
481 	PROLOGUE
482 	if (abort)
483 		return (EINTR);
484 	while ((error = __lwp_wait(tid, found)) == EINTR && !cancel_active())
485 		continue;
486 	EPILOGUE
487 	return (error);
488 }
489 
490 ssize_t
491 read(int fd, void *buf, size_t size)
492 {
493 	extern ssize_t __read(int, void *, size_t);
494 	ssize_t rv;
495 
496 	PERFORM(__read(fd, buf, size))
497 }
498 
499 ssize_t
500 write(int fd, const void *buf, size_t size)
501 {
502 	extern ssize_t __write(int, const void *, size_t);
503 	ssize_t rv;
504 
505 	PERFORM(__write(fd, buf, size))
506 }
507 
508 int
509 getmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr,
510 	int *flagsp)
511 {
512 	extern int __getmsg(int, struct strbuf *, struct strbuf *, int *);
513 	int rv;
514 
515 	PERFORM(__getmsg(fd, ctlptr, dataptr, flagsp))
516 }
517 
518 int
519 getpmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr,
520 	int *bandp, int *flagsp)
521 {
522 	extern int __getpmsg(int, struct strbuf *, struct strbuf *,
523 	    int *, int *);
524 	int rv;
525 
526 	PERFORM(__getpmsg(fd, ctlptr, dataptr, bandp, flagsp))
527 }
528 
529 int
530 putmsg(int fd, const struct strbuf *ctlptr,
531 	const struct strbuf *dataptr, int flags)
532 {
533 	extern int __putmsg(int, const struct strbuf *,
534 	    const struct strbuf *, int);
535 	int rv;
536 
537 	PERFORM(__putmsg(fd, ctlptr, dataptr, flags))
538 }
539 
540 int
541 __xpg4_putmsg(int fd, const struct strbuf *ctlptr,
542 	const struct strbuf *dataptr, int flags)
543 {
544 	extern int __putmsg(int, const struct strbuf *,
545 	    const struct strbuf *, int);
546 	int rv;
547 
548 	PERFORM(__putmsg(fd, ctlptr, dataptr, flags|MSG_XPG4))
549 }
550 
551 int
552 putpmsg(int fd, const struct strbuf *ctlptr,
553 	const struct strbuf *dataptr, int band, int flags)
554 {
555 	extern int __putpmsg(int, const struct strbuf *,
556 	    const struct strbuf *, int, int);
557 	int rv;
558 
559 	PERFORM(__putpmsg(fd, ctlptr, dataptr, band, flags))
560 }
561 
562 int
563 __xpg4_putpmsg(int fd, const struct strbuf *ctlptr,
564 	const struct strbuf *dataptr, int band, int flags)
565 {
566 	extern int __putpmsg(int, const struct strbuf *,
567 	    const struct strbuf *, int, int);
568 	int rv;
569 
570 	PERFORM(__putpmsg(fd, ctlptr, dataptr, band, flags|MSG_XPG4))
571 }
572 
573 int
574 nanosleep(const timespec_t *rqtp, timespec_t *rmtp)
575 {
576 	int error;
577 
578 	PROLOGUE
579 	error = abort? EINTR : __nanosleep(rqtp, rmtp);
580 	EPILOGUE
581 	if (error) {
582 		errno = error;
583 		return (-1);
584 	}
585 	return (0);
586 }
587 
588 int
589 clock_nanosleep(clockid_t clock_id, int flags,
590 	const timespec_t *rqtp, timespec_t *rmtp)
591 {
592 	timespec_t reltime;
593 	hrtime_t start;
594 	hrtime_t rqlapse;
595 	hrtime_t lapse;
596 	int error;
597 
598 	switch (clock_id) {
599 	case CLOCK_VIRTUAL:
600 	case CLOCK_PROCESS_CPUTIME_ID:
601 	case CLOCK_THREAD_CPUTIME_ID:
602 		return (ENOTSUP);
603 	case CLOCK_REALTIME:
604 	case CLOCK_HIGHRES:
605 		break;
606 	default:
607 		return (EINVAL);
608 	}
609 	if (flags & TIMER_ABSTIME) {
610 		abstime_to_reltime(clock_id, rqtp, &reltime);
611 		rmtp = NULL;
612 	} else {
613 		reltime = *rqtp;
614 		if (clock_id == CLOCK_HIGHRES)
615 			start = gethrtime();
616 	}
617 restart:
618 	PROLOGUE
619 	error = abort? EINTR : __nanosleep(&reltime, rmtp);
620 	EPILOGUE
621 	if (error == 0 && clock_id == CLOCK_HIGHRES) {
622 		/*
623 		 * Don't return yet if we didn't really get a timeout.
624 		 * This can happen if we return because someone resets
625 		 * the system clock.
626 		 */
627 		if (flags & TIMER_ABSTIME) {
628 			if ((hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC +
629 			    rqtp->tv_nsec > gethrtime()) {
630 				abstime_to_reltime(clock_id, rqtp, &reltime);
631 				goto restart;
632 			}
633 		} else {
634 			rqlapse = (hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC +
635 			    rqtp->tv_nsec;
636 			lapse = gethrtime() - start;
637 			if (rqlapse > lapse) {
638 				hrt2ts(rqlapse - lapse, &reltime);
639 				goto restart;
640 			}
641 		}
642 	}
643 	if (error == 0 && clock_id == CLOCK_REALTIME &&
644 	    (flags & TIMER_ABSTIME)) {
645 		/*
646 		 * Don't return yet just because someone reset the
647 		 * system clock.  Recompute the new relative time
648 		 * and reissue the nanosleep() call if necessary.
649 		 *
650 		 * Resetting the system clock causes all sorts of
651 		 * problems and the SUSV3 standards body should
652 		 * have made the behavior of clock_nanosleep() be
653 		 * implementation-defined in such a case rather than
654 		 * being specific about honoring the new system time.
655 		 * Standards bodies are filled with fools and idiots.
656 		 */
657 		abstime_to_reltime(clock_id, rqtp, &reltime);
658 		if (reltime.tv_sec != 0 || reltime.tv_nsec != 0)
659 			goto restart;
660 	}
661 	return (error);
662 }
663 
664 unsigned int
665 sleep(unsigned int sec)
666 {
667 	unsigned int rem = 0;
668 	timespec_t ts;
669 	timespec_t tsr;
670 
671 	ts.tv_sec = (time_t)sec;
672 	ts.tv_nsec = 0;
673 	if (nanosleep(&ts, &tsr) == -1 && errno == EINTR) {
674 		rem = (unsigned int)tsr.tv_sec;
675 		if (tsr.tv_nsec >= NANOSEC / 2)
676 			rem++;
677 	}
678 	return (rem);
679 }
680 
681 int
682 usleep(useconds_t usec)
683 {
684 	timespec_t ts;
685 
686 	ts.tv_sec = usec / MICROSEC;
687 	ts.tv_nsec = (long)(usec % MICROSEC) * 1000;
688 	(void) nanosleep(&ts, NULL);
689 	return (0);
690 }
691 
692 int
693 close(int fildes)
694 {
695 	extern void _aio_close(int);
696 	extern int __close(int);
697 	int rv;
698 
699 	/*
700 	 * If we call _aio_close() while in a critical region,
701 	 * we will draw an ASSERT() failure, so don't do it.
702 	 * No calls to close() from within libc need _aio_close();
703 	 * only the application's calls to close() need this,
704 	 * and such calls are never from a libc critical region.
705 	 */
706 	if (curthread->ul_critical == 0)
707 		_aio_close(fildes);
708 	PERFORM(__close(fildes))
709 }
710 
711 int
712 creat(const char *path, mode_t mode)
713 {
714 	extern int __creat(const char *, mode_t);
715 	int rv;
716 
717 	PERFORM(__creat(path, mode))
718 }
719 
720 #if !defined(_LP64)
721 int
722 creat64(const char *path, mode_t mode)
723 {
724 	extern int __creat64(const char *, mode_t);
725 	int rv;
726 
727 	PERFORM(__creat64(path, mode))
728 }
729 #endif	/* !_LP64 */
730 
731 int
732 door_call(int d, door_arg_t *params)
733 {
734 	extern int __door_call(int, door_arg_t *);
735 	int rv;
736 
737 	PERFORM(__door_call(d, params))
738 }
739 
740 int
741 fcntl(int fildes, int cmd, ...)
742 {
743 	extern int __fcntl(int, int, ...);
744 	intptr_t arg;
745 	int rv;
746 	va_list ap;
747 
748 	va_start(ap, cmd);
749 	arg = va_arg(ap, intptr_t);
750 	va_end(ap);
751 	if (cmd != F_SETLKW)
752 		return (__fcntl(fildes, cmd, arg));
753 	PERFORM(__fcntl(fildes, cmd, arg))
754 }
755 
756 int
757 fdatasync(int fildes)
758 {
759 	extern int __fdsync(int, int);
760 	int rv;
761 
762 	PERFORM(__fdsync(fildes, FDSYNC))
763 }
764 
765 int
766 fsync(int fildes)
767 {
768 	extern int __fdsync(int, int);
769 	int rv;
770 
771 	PERFORM(__fdsync(fildes, FSYNC))
772 }
773 
774 int
775 lockf(int fildes, int function, off_t size)
776 {
777 	extern int __lockf(int, int, off_t);
778 	int rv;
779 
780 	PERFORM(__lockf(fildes, function, size))
781 }
782 
783 #if !defined(_LP64)
784 int
785 lockf64(int fildes, int function, off64_t size)
786 {
787 	extern int __lockf64(int, int, off64_t);
788 	int rv;
789 
790 	PERFORM(__lockf64(fildes, function, size))
791 }
792 #endif	/* !_LP64 */
793 
794 ssize_t
795 msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg)
796 {
797 	extern ssize_t __msgrcv(int, void *, size_t, long, int);
798 	ssize_t rv;
799 
800 	PERFORM(__msgrcv(msqid, msgp, msgsz, msgtyp, msgflg))
801 }
802 
803 int
804 msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg)
805 {
806 	extern int __msgsnd(int, const void *, size_t, int);
807 	int rv;
808 
809 	PERFORM(__msgsnd(msqid, msgp, msgsz, msgflg))
810 }
811 
812 int
813 msync(caddr_t addr, size_t len, int flags)
814 {
815 	extern int __msync(caddr_t, size_t, int);
816 	int rv;
817 
818 	PERFORM(__msync(addr, len, flags))
819 }
820 
821 int
822 open(const char *path, int oflag, ...)
823 {
824 	extern int __open(const char *, int, ...);
825 	mode_t mode;
826 	int rv;
827 	va_list ap;
828 
829 	va_start(ap, oflag);
830 	mode = va_arg(ap, mode_t);
831 	va_end(ap);
832 	PERFORM(__open(path, oflag, mode))
833 }
834 
835 int
836 openat(int fd, const char *path, int oflag, ...)
837 {
838 	extern int __openat(int, const char *, int, ...);
839 	mode_t mode;
840 	int rv;
841 	va_list ap;
842 
843 	va_start(ap, oflag);
844 	mode = va_arg(ap, mode_t);
845 	va_end(ap);
846 	PERFORM(__openat(fd, path, oflag, mode))
847 }
848 
849 #if !defined(_LP64)
850 int
851 open64(const char *path, int oflag, ...)
852 {
853 	extern int __open64(const char *, int, ...);
854 	mode_t mode;
855 	int rv;
856 	va_list ap;
857 
858 	va_start(ap, oflag);
859 	mode = va_arg(ap, mode_t);
860 	va_end(ap);
861 	PERFORM(__open64(path, oflag, mode))
862 }
863 
864 int
865 openat64(int fd, const char *path, int oflag, ...)
866 {
867 	extern int __openat64(int, const char *, int, ...);
868 	mode_t mode;
869 	int rv;
870 	va_list ap;
871 
872 	va_start(ap, oflag);
873 	mode = va_arg(ap, mode_t);
874 	va_end(ap);
875 	PERFORM(__openat64(fd, path, oflag, mode))
876 }
877 #endif	/* !_LP64 */
878 
879 int
880 pause(void)
881 {
882 	extern int __pause(void);
883 	int rv;
884 
885 	PERFORM(__pause())
886 }
887 
888 ssize_t
889 pread(int fildes, void *buf, size_t nbyte, off_t offset)
890 {
891 	extern ssize_t __pread(int, void *, size_t, off_t);
892 	ssize_t rv;
893 
894 	PERFORM(__pread(fildes, buf, nbyte, offset))
895 }
896 
897 #if !defined(_LP64)
898 ssize_t
899 pread64(int fildes, void *buf, size_t nbyte, off64_t offset)
900 {
901 	extern ssize_t __pread64(int, void *, size_t, off64_t);
902 	ssize_t rv;
903 
904 	PERFORM(__pread64(fildes, buf, nbyte, offset))
905 }
906 #endif	/* !_LP64 */
907 
908 ssize_t
909 pwrite(int fildes, const void *buf, size_t nbyte, off_t offset)
910 {
911 	extern ssize_t __pwrite(int, const void *, size_t, off_t);
912 	ssize_t rv;
913 
914 	PERFORM(__pwrite(fildes, buf, nbyte, offset))
915 }
916 
917 #if !defined(_LP64)
918 ssize_t
919 pwrite64(int fildes, const void *buf, size_t nbyte, off64_t offset)
920 {
921 	extern ssize_t __pwrite64(int, const void *, size_t, off64_t);
922 	ssize_t rv;
923 
924 	PERFORM(__pwrite64(fildes, buf, nbyte, offset))
925 }
926 #endif	/* !_LP64 */
927 
928 ssize_t
929 readv(int fildes, const struct iovec *iov, int iovcnt)
930 {
931 	extern ssize_t __readv(int, const struct iovec *, int);
932 	ssize_t rv;
933 
934 	PERFORM(__readv(fildes, iov, iovcnt))
935 }
936 
937 int
938 sigpause(int sig)
939 {
940 	extern int __sigpause(int);
941 	int rv;
942 
943 	PERFORM(__sigpause(sig))
944 }
945 
946 int
947 sigsuspend(const sigset_t *set)
948 {
949 	extern int __sigsuspend(const sigset_t *);
950 	int rv;
951 
952 	PROLOGUE_MASK(set)
953 	rv = __sigsuspend(set);
954 	EPILOGUE_MASK
955 	return (rv);
956 }
957 
958 int
959 _pollsys(struct pollfd *fds, nfds_t nfd, const timespec_t *timeout,
960 	const sigset_t *sigmask)
961 {
962 	extern int __pollsys(struct pollfd *, nfds_t, const timespec_t *,
963 	    const sigset_t *);
964 	int rv;
965 
966 	PROLOGUE_MASK(sigmask)
967 	rv = __pollsys(fds, nfd, timeout, sigmask);
968 	EPILOGUE_MASK
969 	return (rv);
970 }
971 
972 int
973 sigtimedwait(const sigset_t *set, siginfo_t *infop, const timespec_t *timeout)
974 {
975 	extern int __sigtimedwait(const sigset_t *, siginfo_t *,
976 	    const timespec_t *);
977 	siginfo_t info;
978 	int sig;
979 
980 	PROLOGUE
981 	if (abort) {
982 		*self->ul_errnop = EINTR;
983 		sig = -1;
984 	} else {
985 		sig = __sigtimedwait(set, &info, timeout);
986 		if (sig == SIGCANCEL &&
987 		    (SI_FROMKERNEL(&info) || info.si_code == SI_LWP)) {
988 			do_sigcancel();
989 			*self->ul_errnop = EINTR;
990 			sig = -1;
991 		}
992 	}
993 	EPILOGUE
994 	if (sig != -1 && infop)
995 		(void) memcpy(infop, &info, sizeof (*infop));
996 	return (sig);
997 }
998 
999 int
1000 sigwait(sigset_t *set)
1001 {
1002 	return (sigtimedwait(set, NULL, NULL));
1003 }
1004 
1005 int
1006 sigwaitinfo(const sigset_t *set, siginfo_t *info)
1007 {
1008 	return (sigtimedwait(set, info, NULL));
1009 }
1010 
1011 int
1012 sigqueue(pid_t pid, int signo, const union sigval value)
1013 {
1014 	extern int __sigqueue(pid_t pid, int signo,
1015 	    /* const union sigval */ void *value, int si_code, int block);
1016 	return (__sigqueue(pid, signo, value.sival_ptr, SI_QUEUE, 0));
1017 }
1018 
1019 int
1020 _so_accept(int sock, struct sockaddr *addr, uint_t *addrlen, int version)
1021 {
1022 	extern int __so_accept(int, struct sockaddr *, uint_t *, int);
1023 	int rv;
1024 
1025 	PERFORM(__so_accept(sock, addr, addrlen, version))
1026 }
1027 
1028 int
1029 _so_connect(int sock, struct sockaddr *addr, uint_t addrlen, int version)
1030 {
1031 	extern int __so_connect(int, struct sockaddr *, uint_t, int);
1032 	int rv;
1033 
1034 	PERFORM(__so_connect(sock, addr, addrlen, version))
1035 }
1036 
1037 int
1038 _so_recv(int sock, void *buf, size_t len, int flags)
1039 {
1040 	extern int __so_recv(int, void *, size_t, int);
1041 	int rv;
1042 
1043 	PERFORM(__so_recv(sock, buf, len, flags))
1044 }
1045 
1046 int
1047 _so_recvfrom(int sock, void *buf, size_t len, int flags,
1048     struct sockaddr *addr, int *addrlen)
1049 {
1050 	extern int __so_recvfrom(int, void *, size_t, int,
1051 	    struct sockaddr *, int *);
1052 	int rv;
1053 
1054 	PERFORM(__so_recvfrom(sock, buf, len, flags, addr, addrlen))
1055 }
1056 
1057 int
1058 _so_recvmsg(int sock, struct msghdr *msg, int flags)
1059 {
1060 	extern int __so_recvmsg(int, struct msghdr *, int);
1061 	int rv;
1062 
1063 	PERFORM(__so_recvmsg(sock, msg, flags))
1064 }
1065 
1066 int
1067 _so_send(int sock, const void *buf, size_t len, int flags)
1068 {
1069 	extern int __so_send(int, const void *, size_t, int);
1070 	int rv;
1071 
1072 	PERFORM(__so_send(sock, buf, len, flags))
1073 }
1074 
1075 int
1076 _so_sendmsg(int sock, const struct msghdr *msg, int flags)
1077 {
1078 	extern int __so_sendmsg(int, const struct msghdr *, int);
1079 	int rv;
1080 
1081 	PERFORM(__so_sendmsg(sock, msg, flags))
1082 }
1083 
1084 int
1085 _so_sendto(int sock, const void *buf, size_t len, int flags,
1086     const struct sockaddr *addr, int *addrlen)
1087 {
1088 	extern int __so_sendto(int, const void *, size_t, int,
1089 	    const struct sockaddr *, int *);
1090 	int rv;
1091 
1092 	PERFORM(__so_sendto(sock, buf, len, flags, addr, addrlen))
1093 }
1094 
1095 int
1096 tcdrain(int fildes)
1097 {
1098 	extern int __tcdrain(int);
1099 	int rv;
1100 
1101 	PERFORM(__tcdrain(fildes))
1102 }
1103 
1104 int
1105 waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options)
1106 {
1107 	extern int __waitid(idtype_t, id_t, siginfo_t *, int);
1108 	int rv;
1109 
1110 	if (options & WNOHANG)
1111 		return (__waitid(idtype, id, infop, options));
1112 	PERFORM(__waitid(idtype, id, infop, options))
1113 }
1114 
1115 ssize_t
1116 writev(int fildes, const struct iovec *iov, int iovcnt)
1117 {
1118 	extern ssize_t __writev(int, const struct iovec *, int);
1119 	ssize_t rv;
1120 
1121 	PERFORM(__writev(fildes, iov, iovcnt))
1122 }
1123