xref: /titanic_51/usr/src/lib/libc/port/threads/scalls.c (revision 19d61fc7991644175873937566d932d8cf52912a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include "lint.h"
30 #include "thr_uberdata.h"
31 #include <stdarg.h>
32 #include <poll.h>
33 #include <stropts.h>
34 #include <dlfcn.h>
35 #include <sys/uio.h>
36 
37 /*
38  * fork_lock does double-duty.  Not only does it (and atfork_lock)
39  * serialize calls to fork() and forkall(), but it also serializes calls
40  * to thr_suspend() and thr_continue() (because fork() and forkall() also
41  * suspend and continue other threads and they want no competition).
42  *
43  * atfork_lock also does double-duty.  Not only does it protect the
44  * pthread_atfork() data structures, but it also serializes I18N calls
45  * to functions in dlopen()ed L10N objects.  These functions can do
46  * anything, including call malloc() and free().  Such calls are not
47  * fork-safe when protected by an ordinary mutex because, with an
48  * interposed malloc library present, there would be a lock ordering
49  * violation due to the pthread_atfork() prefork function in the
50  * interposition library acquiring its malloc lock(s) before the
51  * ordinary mutex in libc being acquired by libc's prefork functions.
52  *
53  * Within libc, calls to malloc() and free() are fork-safe only if the
54  * calls are made while holding no other libc locks.  This covers almost
55  * all of libc's malloc() and free() calls.  For those libc code paths,
56  * such as the above-mentioned I18N calls, that require serialization and
57  * that may call malloc() or free(), libc uses atfork_lock_enter() to perform
58  * the serialization.  This works because atfork_lock is acquired by fork()
59  * before any of the pthread_atfork() prefork functions are called.
60  */
61 
62 void
63 fork_lock_enter(void)
64 {
65 	ASSERT(curthread->ul_critical == 0);
66 	(void) _private_mutex_lock(&curthread->ul_uberdata->fork_lock);
67 }
68 
69 void
70 fork_lock_exit(void)
71 {
72 	ASSERT(curthread->ul_critical == 0);
73 	(void) _private_mutex_unlock(&curthread->ul_uberdata->fork_lock);
74 }
75 
76 void
77 atfork_lock_enter(void)
78 {
79 	ASSERT(curthread->ul_critical == 0);
80 	(void) _private_mutex_lock(&curthread->ul_uberdata->atfork_lock);
81 }
82 
83 void
84 atfork_lock_exit(void)
85 {
86 	ASSERT(curthread->ul_critical == 0);
87 	(void) _private_mutex_unlock(&curthread->ul_uberdata->atfork_lock);
88 }
89 
90 #pragma weak forkx = _private_forkx
91 #pragma weak _forkx = _private_forkx
92 pid_t
93 _private_forkx(int flags)
94 {
95 	ulwp_t *self = curthread;
96 	uberdata_t *udp = self->ul_uberdata;
97 	pid_t pid;
98 
99 	if (self->ul_vfork) {
100 		/*
101 		 * We are a child of vfork(); omit all of the fork
102 		 * logic and go straight to the system call trap.
103 		 * A vfork() child of a multithreaded parent
104 		 * must never call fork().
105 		 */
106 		if (udp->uberflags.uf_mt) {
107 			errno = ENOTSUP;
108 			return (-1);
109 		}
110 		pid = __forkx(flags);
111 		if (pid == 0) {		/* child */
112 			udp->pid = _private_getpid();
113 			self->ul_vfork = 0;
114 		}
115 		return (pid);
116 	}
117 
118 	sigoff(self);
119 	if (self->ul_fork) {
120 		/*
121 		 * Cannot call fork() from a fork handler.
122 		 */
123 		sigon(self);
124 		errno = EDEADLK;
125 		return (-1);
126 	}
127 	self->ul_fork = 1;
128 
129 	/*
130 	 * The functions registered by pthread_atfork() are defined by
131 	 * the application and its libraries and we must not hold any
132 	 * internal lmutex_lock()-acquired locks while invoking them.
133 	 * We hold only udp->atfork_lock to protect the atfork linkages.
134 	 * If one of these pthread_atfork() functions attempts to fork
135 	 * or to call pthread_atfork(), libc will detect the error and
136 	 * fail the call with EDEADLK.  Otherwise, the pthread_atfork()
137 	 * functions are free to do anything they please (except they
138 	 * will not receive any signals).
139 	 */
140 	(void) _private_mutex_lock(&udp->atfork_lock);
141 	_prefork_handler();
142 
143 	/*
144 	 * Block every other thread attempting thr_suspend() or thr_continue().
145 	 */
146 	(void) _private_mutex_lock(&udp->fork_lock);
147 
148 	/*
149 	 * Block all signals.
150 	 * Just deferring them via sigoff() is not enough.
151 	 * We have to avoid taking a deferred signal in the child
152 	 * that was actually sent to the parent before __forkx().
153 	 */
154 	block_all_signals(self);
155 
156 	/*
157 	 * This suspends all threads but this one, leaving them
158 	 * suspended outside of any critical regions in the library.
159 	 * Thus, we are assured that no lmutex_lock()-acquired library
160 	 * locks are held while we invoke fork() from the current thread.
161 	 */
162 	suspend_fork();
163 
164 	pid = __forkx(flags);
165 
166 	if (pid == 0) {		/* child */
167 		/*
168 		 * Clear our schedctl pointer.
169 		 * Discard any deferred signal that was sent to the parent.
170 		 * Because we blocked all signals before __forkx(), a
171 		 * deferred signal cannot have been taken by the child.
172 		 */
173 		self->ul_schedctl_called = NULL;
174 		self->ul_schedctl = NULL;
175 		self->ul_cursig = 0;
176 		self->ul_siginfo.si_signo = 0;
177 		udp->pid = _private_getpid();
178 		/* reset the library's data structures to reflect one thread */
179 		unregister_locks();
180 		postfork1_child();
181 		restore_signals(self);
182 		(void) _private_mutex_unlock(&udp->fork_lock);
183 		_postfork_child_handler();
184 	} else {
185 		/* restart all threads that were suspended for fork() */
186 		continue_fork(0);
187 		restore_signals(self);
188 		(void) _private_mutex_unlock(&udp->fork_lock);
189 		_postfork_parent_handler();
190 	}
191 
192 	(void) _private_mutex_unlock(&udp->atfork_lock);
193 	self->ul_fork = 0;
194 	sigon(self);
195 
196 	return (pid);
197 }
198 
199 /*
200  * fork() is fork1() for both Posix threads and Solaris threads.
201  * The forkall() interface exists for applications that require
202  * the semantics of replicating all threads.
203  */
204 #pragma weak fork1 = _fork
205 #pragma weak _fork1 = _fork
206 #pragma weak fork = _fork
207 pid_t
208 _fork(void)
209 {
210 	return (_private_forkx(0));
211 }
212 
213 /*
214  * Much of the logic here is the same as in forkx().
215  * See the comments in forkx(), above.
216  */
217 #pragma weak forkallx = _private_forkallx
218 #pragma weak _forkallx = _private_forkallx
219 pid_t
220 _private_forkallx(int flags)
221 {
222 	ulwp_t *self = curthread;
223 	uberdata_t *udp = self->ul_uberdata;
224 	pid_t pid;
225 
226 	if (self->ul_vfork) {
227 		if (udp->uberflags.uf_mt) {
228 			errno = ENOTSUP;
229 			return (-1);
230 		}
231 		pid = __forkallx(flags);
232 		if (pid == 0) {		/* child */
233 			udp->pid = _private_getpid();
234 			self->ul_vfork = 0;
235 		}
236 		return (pid);
237 	}
238 
239 	sigoff(self);
240 	if (self->ul_fork) {
241 		sigon(self);
242 		errno = EDEADLK;
243 		return (-1);
244 	}
245 	self->ul_fork = 1;
246 	(void) _private_mutex_lock(&udp->atfork_lock);
247 	(void) _private_mutex_lock(&udp->fork_lock);
248 	block_all_signals(self);
249 	suspend_fork();
250 
251 	pid = __forkallx(flags);
252 
253 	if (pid == 0) {
254 		self->ul_schedctl_called = NULL;
255 		self->ul_schedctl = NULL;
256 		self->ul_cursig = 0;
257 		self->ul_siginfo.si_signo = 0;
258 		udp->pid = _private_getpid();
259 		unregister_locks();
260 		continue_fork(1);
261 	} else {
262 		continue_fork(0);
263 	}
264 	restore_signals(self);
265 	(void) _private_mutex_unlock(&udp->fork_lock);
266 	(void) _private_mutex_unlock(&udp->atfork_lock);
267 	self->ul_fork = 0;
268 	sigon(self);
269 
270 	return (pid);
271 }
272 
273 #pragma weak forkall = _forkall
274 pid_t
275 _forkall(void)
276 {
277 	return (_private_forkallx(0));
278 }
279 
280 /*
281  * Hacks for system calls to provide cancellation
282  * and improve java garbage collection.
283  */
284 #define	PROLOGUE							\
285 {									\
286 	ulwp_t *self = curthread;					\
287 	int nocancel = (self->ul_vfork | self->ul_nocancel);		\
288 	if (nocancel == 0) {						\
289 		self->ul_save_async = self->ul_cancel_async;		\
290 		if (!self->ul_cancel_disabled) {			\
291 			self->ul_cancel_async = 1;			\
292 			if (self->ul_cancel_pending)			\
293 				_pthread_exit(PTHREAD_CANCELED);	\
294 		}							\
295 		self->ul_sp = stkptr();					\
296 	}
297 
298 #define	EPILOGUE							\
299 	if (nocancel == 0) {						\
300 		self->ul_sp = 0;					\
301 		self->ul_cancel_async = self->ul_save_async;		\
302 	}								\
303 }
304 
305 /*
306  * Perform the body of the action required by most of the cancelable
307  * function calls.  The return(function_call) part is to allow the
308  * compiler to make the call be executed with tail recursion, which
309  * saves a register window on sparc and slightly (not much) improves
310  * the code for x86/x64 compilations.
311  */
312 #define	PERFORM(function_call)						\
313 	PROLOGUE							\
314 	if (nocancel)							\
315 		return (function_call);					\
316 	rv = function_call;						\
317 	EPILOGUE							\
318 	return (rv);
319 
320 /*
321  * Specialized prologue for sigsuspend() and pollsys().
322  * These system calls pass a signal mask to the kernel.
323  * The kernel replaces the thread's signal mask with the
324  * temporary mask before the thread goes to sleep.  If
325  * a signal is received, the signal handler will execute
326  * with the temporary mask, as modified by the sigaction
327  * for the particular signal.
328  *
329  * We block all signals until we reach the kernel with the
330  * temporary mask.  This eliminates race conditions with
331  * setting the signal mask while signals are being posted.
332  */
333 #define	PROLOGUE_MASK(sigmask)						\
334 {									\
335 	ulwp_t *self = curthread;					\
336 	int nocancel = (self->ul_vfork | self->ul_nocancel);		\
337 	if (!self->ul_vfork) {						\
338 		if (sigmask) {						\
339 			block_all_signals(self);			\
340 			self->ul_tmpmask.__sigbits[0] = sigmask->__sigbits[0]; \
341 			self->ul_tmpmask.__sigbits[1] = sigmask->__sigbits[1]; \
342 			delete_reserved_signals(&self->ul_tmpmask);	\
343 			self->ul_sigsuspend = 1;			\
344 		}							\
345 		if (nocancel == 0) {					\
346 			self->ul_save_async = self->ul_cancel_async;	\
347 			if (!self->ul_cancel_disabled) {		\
348 				self->ul_cancel_async = 1;		\
349 				if (self->ul_cancel_pending) {		\
350 					if (self->ul_sigsuspend) {	\
351 						self->ul_sigsuspend = 0;\
352 						restore_signals(self);	\
353 					}				\
354 					_pthread_exit(PTHREAD_CANCELED);\
355 				}					\
356 			}						\
357 			self->ul_sp = stkptr();				\
358 		}							\
359 	}
360 
361 /*
362  * If a signal is taken, we return from the system call wrapper with
363  * our original signal mask restored (see code in call_user_handler()).
364  * If not (self->ul_sigsuspend is still non-zero), we must restore our
365  * original signal mask ourself.
366  */
367 #define	EPILOGUE_MASK							\
368 	if (nocancel == 0) {						\
369 		self->ul_sp = 0;					\
370 		self->ul_cancel_async = self->ul_save_async;		\
371 	}								\
372 	if (self->ul_sigsuspend) {					\
373 		self->ul_sigsuspend = 0;				\
374 		restore_signals(self);					\
375 	}								\
376 }
377 
378 /*
379  * Cancellation prologue and epilogue functions,
380  * for cancellation points too complex to include here.
381  */
382 void
383 _cancel_prologue(void)
384 {
385 	ulwp_t *self = curthread;
386 
387 	self->ul_cancel_prologue = (self->ul_vfork | self->ul_nocancel);
388 	if (self->ul_cancel_prologue == 0) {
389 		self->ul_save_async = self->ul_cancel_async;
390 		if (!self->ul_cancel_disabled) {
391 			self->ul_cancel_async = 1;
392 			if (self->ul_cancel_pending)
393 				_pthread_exit(PTHREAD_CANCELED);
394 		}
395 		self->ul_sp = stkptr();
396 	}
397 }
398 
399 void
400 _cancel_epilogue(void)
401 {
402 	ulwp_t *self = curthread;
403 
404 	if (self->ul_cancel_prologue == 0) {
405 		self->ul_sp = 0;
406 		self->ul_cancel_async = self->ul_save_async;
407 	}
408 }
409 
410 /*
411  * Called from _thrp_join() (thr_join() is a cancellation point)
412  */
413 int
414 lwp_wait(thread_t tid, thread_t *found)
415 {
416 	int error;
417 
418 	PROLOGUE
419 	while ((error = __lwp_wait(tid, found)) == EINTR)
420 		;
421 	EPILOGUE
422 	return (error);
423 }
424 
425 ssize_t
426 read(int fd, void *buf, size_t size)
427 {
428 	extern ssize_t _read(int, void *, size_t);
429 	ssize_t rv;
430 
431 	PERFORM(_read(fd, buf, size))
432 }
433 
434 ssize_t
435 write(int fd, const void *buf, size_t size)
436 {
437 	extern ssize_t _write(int, const void *, size_t);
438 	ssize_t rv;
439 
440 	PERFORM(_write(fd, buf, size))
441 }
442 
443 int
444 getmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr,
445 	int *flagsp)
446 {
447 	extern int _getmsg(int, struct strbuf *, struct strbuf *, int *);
448 	int rv;
449 
450 	PERFORM(_getmsg(fd, ctlptr, dataptr, flagsp))
451 }
452 
453 int
454 getpmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr,
455 	int *bandp, int *flagsp)
456 {
457 	extern int _getpmsg(int, struct strbuf *, struct strbuf *,
458 	    int *, int *);
459 	int rv;
460 
461 	PERFORM(_getpmsg(fd, ctlptr, dataptr, bandp, flagsp))
462 }
463 
464 int
465 putmsg(int fd, const struct strbuf *ctlptr,
466 	const struct strbuf *dataptr, int flags)
467 {
468 	extern int _putmsg(int, const struct strbuf *,
469 	    const struct strbuf *, int);
470 	int rv;
471 
472 	PERFORM(_putmsg(fd, ctlptr, dataptr, flags))
473 }
474 
475 int
476 __xpg4_putmsg(int fd, const struct strbuf *ctlptr,
477 	const struct strbuf *dataptr, int flags)
478 {
479 	extern int _putmsg(int, const struct strbuf *,
480 	    const struct strbuf *, int);
481 	int rv;
482 
483 	PERFORM(_putmsg(fd, ctlptr, dataptr, flags|MSG_XPG4))
484 }
485 
486 int
487 putpmsg(int fd, const struct strbuf *ctlptr,
488 	const struct strbuf *dataptr, int band, int flags)
489 {
490 	extern int _putpmsg(int, const struct strbuf *,
491 	    const struct strbuf *, int, int);
492 	int rv;
493 
494 	PERFORM(_putpmsg(fd, ctlptr, dataptr, band, flags))
495 }
496 
497 int
498 __xpg4_putpmsg(int fd, const struct strbuf *ctlptr,
499 	const struct strbuf *dataptr, int band, int flags)
500 {
501 	extern int _putpmsg(int, const struct strbuf *,
502 	    const struct strbuf *, int, int);
503 	int rv;
504 
505 	PERFORM(_putpmsg(fd, ctlptr, dataptr, band, flags|MSG_XPG4))
506 }
507 
508 #pragma weak nanosleep = _nanosleep
509 int
510 _nanosleep(const timespec_t *rqtp, timespec_t *rmtp)
511 {
512 	int error;
513 
514 	PROLOGUE
515 	error = __nanosleep(rqtp, rmtp);
516 	EPILOGUE
517 	if (error) {
518 		errno = error;
519 		return (-1);
520 	}
521 	return (0);
522 }
523 
524 #pragma weak clock_nanosleep = _clock_nanosleep
525 int
526 _clock_nanosleep(clockid_t clock_id, int flags,
527 	const timespec_t *rqtp, timespec_t *rmtp)
528 {
529 	timespec_t reltime;
530 	hrtime_t start;
531 	hrtime_t rqlapse;
532 	hrtime_t lapse;
533 	int error;
534 
535 	switch (clock_id) {
536 	case CLOCK_VIRTUAL:
537 	case CLOCK_PROCESS_CPUTIME_ID:
538 	case CLOCK_THREAD_CPUTIME_ID:
539 		return (ENOTSUP);
540 	case CLOCK_REALTIME:
541 	case CLOCK_HIGHRES:
542 		break;
543 	default:
544 		return (EINVAL);
545 	}
546 	if (flags & TIMER_ABSTIME) {
547 		abstime_to_reltime(clock_id, rqtp, &reltime);
548 		rmtp = NULL;
549 	} else {
550 		reltime = *rqtp;
551 		if (clock_id == CLOCK_HIGHRES)
552 			start = gethrtime();
553 	}
554 restart:
555 	PROLOGUE
556 	error = __nanosleep(&reltime, rmtp);
557 	EPILOGUE
558 	if (error == 0 && clock_id == CLOCK_HIGHRES) {
559 		/*
560 		 * Don't return yet if we didn't really get a timeout.
561 		 * This can happen if we return because someone resets
562 		 * the system clock.
563 		 */
564 		if (flags & TIMER_ABSTIME) {
565 			if ((hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC +
566 			    rqtp->tv_nsec > gethrtime()) {
567 				abstime_to_reltime(clock_id, rqtp, &reltime);
568 				goto restart;
569 			}
570 		} else {
571 			rqlapse = (hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC +
572 			    rqtp->tv_nsec;
573 			lapse = gethrtime() - start;
574 			if (rqlapse > lapse) {
575 				hrt2ts(rqlapse - lapse, &reltime);
576 				goto restart;
577 			}
578 		}
579 	}
580 	if (error == 0 && clock_id == CLOCK_REALTIME &&
581 	    (flags & TIMER_ABSTIME)) {
582 		/*
583 		 * Don't return yet just because someone reset the
584 		 * system clock.  Recompute the new relative time
585 		 * and reissue the nanosleep() call if necessary.
586 		 *
587 		 * Resetting the system clock causes all sorts of
588 		 * problems and the SUSV3 standards body should
589 		 * have made the behavior of clock_nanosleep() be
590 		 * implementation-defined in such a case rather than
591 		 * being specific about honoring the new system time.
592 		 * Standards bodies are filled with fools and idiots.
593 		 */
594 		abstime_to_reltime(clock_id, rqtp, &reltime);
595 		if (reltime.tv_sec != 0 || reltime.tv_nsec != 0)
596 			goto restart;
597 	}
598 	return (error);
599 }
600 
601 #pragma weak sleep = _sleep
602 unsigned int
603 _sleep(unsigned int sec)
604 {
605 	unsigned int rem = 0;
606 	int error;
607 	timespec_t ts;
608 	timespec_t tsr;
609 
610 	ts.tv_sec = (time_t)sec;
611 	ts.tv_nsec = 0;
612 	PROLOGUE
613 	error = __nanosleep(&ts, &tsr);
614 	EPILOGUE
615 	if (error == EINTR) {
616 		rem = (unsigned int)tsr.tv_sec;
617 		if (tsr.tv_nsec >= NANOSEC / 2)
618 			rem++;
619 	}
620 	return (rem);
621 }
622 
623 #pragma weak usleep = _usleep
624 int
625 _usleep(useconds_t usec)
626 {
627 	timespec_t ts;
628 
629 	ts.tv_sec = usec / MICROSEC;
630 	ts.tv_nsec = (long)(usec % MICROSEC) * 1000;
631 	PROLOGUE
632 	(void) __nanosleep(&ts, NULL);
633 	EPILOGUE
634 	return (0);
635 }
636 
637 int
638 close(int fildes)
639 {
640 	extern void _aio_close(int);
641 	extern int _close(int);
642 	int rv;
643 
644 	_aio_close(fildes);
645 	PERFORM(_close(fildes))
646 }
647 
648 int
649 creat(const char *path, mode_t mode)
650 {
651 	extern int _creat(const char *, mode_t);
652 	int rv;
653 
654 	PERFORM(_creat(path, mode))
655 }
656 
657 #if !defined(_LP64)
658 int
659 creat64(const char *path, mode_t mode)
660 {
661 	extern int _creat64(const char *, mode_t);
662 	int rv;
663 
664 	PERFORM(_creat64(path, mode))
665 }
666 #endif	/* !_LP64 */
667 
668 int
669 fcntl(int fildes, int cmd, ...)
670 {
671 	extern int _fcntl(int, int, ...);
672 	intptr_t arg;
673 	int rv;
674 	va_list ap;
675 
676 	va_start(ap, cmd);
677 	arg = va_arg(ap, intptr_t);
678 	va_end(ap);
679 	if (cmd != F_SETLKW)
680 		return (_fcntl(fildes, cmd, arg));
681 	PERFORM(_fcntl(fildes, cmd, arg))
682 }
683 
684 int
685 fdatasync(int fildes)
686 {
687 	extern int _fdatasync(int);
688 	int rv;
689 
690 	PERFORM(_fdatasync(fildes))
691 }
692 
693 int
694 fsync(int fildes)
695 {
696 	extern int _fsync(int);
697 	int rv;
698 
699 	PERFORM(_fsync(fildes))
700 }
701 
702 int
703 lockf(int fildes, int function, off_t size)
704 {
705 	extern int _lockf(int, int, off_t);
706 	int rv;
707 
708 	PERFORM(_lockf(fildes, function, size))
709 }
710 
711 #if !defined(_LP64)
712 int
713 lockf64(int fildes, int function, off64_t size)
714 {
715 	extern int _lockf64(int, int, off64_t);
716 	int rv;
717 
718 	PERFORM(_lockf64(fildes, function, size))
719 }
720 #endif	/* !_LP64 */
721 
722 ssize_t
723 msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg)
724 {
725 	extern ssize_t _msgrcv(int, void *, size_t, long, int);
726 	ssize_t rv;
727 
728 	PERFORM(_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg))
729 }
730 
731 int
732 msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg)
733 {
734 	extern int _msgsnd(int, const void *, size_t, int);
735 	int rv;
736 
737 	PERFORM(_msgsnd(msqid, msgp, msgsz, msgflg))
738 }
739 
740 int
741 msync(caddr_t addr, size_t len, int flags)
742 {
743 	extern int _msync(caddr_t, size_t, int);
744 	int rv;
745 
746 	PERFORM(_msync(addr, len, flags))
747 }
748 
749 int
750 open(const char *path, int oflag, ...)
751 {
752 	extern int _open(const char *, int, ...);
753 	mode_t mode;
754 	int rv;
755 	va_list ap;
756 
757 	va_start(ap, oflag);
758 	mode = va_arg(ap, mode_t);
759 	va_end(ap);
760 	PERFORM(_open(path, oflag, mode))
761 }
762 
763 #if !defined(_LP64)
764 int
765 open64(const char *path, int oflag, ...)
766 {
767 	extern int _open64(const char *, int, ...);
768 	mode_t mode;
769 	int rv;
770 	va_list ap;
771 
772 	va_start(ap, oflag);
773 	mode = va_arg(ap, mode_t);
774 	va_end(ap);
775 	PERFORM(_open64(path, oflag, mode))
776 }
777 #endif	/* !_LP64 */
778 
779 int
780 pause(void)
781 {
782 	extern int _pause(void);
783 	int rv;
784 
785 	PERFORM(_pause())
786 }
787 
788 ssize_t
789 pread(int fildes, void *buf, size_t nbyte, off_t offset)
790 {
791 	extern ssize_t _pread(int, void *, size_t, off_t);
792 	ssize_t rv;
793 
794 	PERFORM(_pread(fildes, buf, nbyte, offset))
795 }
796 
797 #if !defined(_LP64)
798 ssize_t
799 pread64(int fildes, void *buf, size_t nbyte, off64_t offset)
800 {
801 	extern ssize_t _pread64(int, void *, size_t, off64_t);
802 	ssize_t rv;
803 
804 	PERFORM(_pread64(fildes, buf, nbyte, offset))
805 }
806 #endif	/* !_LP64 */
807 
808 ssize_t
809 pwrite(int fildes, const void *buf, size_t nbyte, off_t offset)
810 {
811 	extern ssize_t _pwrite(int, const void *, size_t, off_t);
812 	ssize_t rv;
813 
814 	PERFORM(_pwrite(fildes, buf, nbyte, offset))
815 }
816 
817 #if !defined(_LP64)
818 ssize_t
819 pwrite64(int fildes, const void *buf, size_t nbyte, off64_t offset)
820 {
821 	extern ssize_t _pwrite64(int, const void *, size_t, off64_t);
822 	ssize_t rv;
823 
824 	PERFORM(_pwrite64(fildes, buf, nbyte, offset))
825 }
826 #endif	/* !_LP64 */
827 
828 ssize_t
829 readv(int fildes, const struct iovec *iov, int iovcnt)
830 {
831 	extern ssize_t _readv(int, const struct iovec *, int);
832 	ssize_t rv;
833 
834 	PERFORM(_readv(fildes, iov, iovcnt))
835 }
836 
837 int
838 sigpause(int sig)
839 {
840 	extern int _sigpause(int);
841 	int rv;
842 
843 	PERFORM(_sigpause(sig))
844 }
845 
846 #pragma weak sigsuspend = _sigsuspend
847 int
848 _sigsuspend(const sigset_t *set)
849 {
850 	extern int __sigsuspend(const sigset_t *);
851 	int rv;
852 
853 	PROLOGUE_MASK(set)
854 	rv = __sigsuspend(set);
855 	EPILOGUE_MASK
856 	return (rv);
857 }
858 
859 int
860 _pollsys(struct pollfd *fds, nfds_t nfd, const timespec_t *timeout,
861 	const sigset_t *sigmask)
862 {
863 	extern int __pollsys(struct pollfd *, nfds_t, const timespec_t *,
864 	    const sigset_t *);
865 	int rv;
866 
867 	PROLOGUE_MASK(sigmask)
868 	rv = __pollsys(fds, nfd, timeout, sigmask);
869 	EPILOGUE_MASK
870 	return (rv);
871 }
872 
873 #pragma weak sigtimedwait = _sigtimedwait
874 int
875 _sigtimedwait(const sigset_t *set, siginfo_t *infop, const timespec_t *timeout)
876 {
877 	extern int __sigtimedwait(const sigset_t *, siginfo_t *,
878 	    const timespec_t *);
879 	siginfo_t info;
880 	int sig;
881 
882 	PROLOGUE
883 	sig = __sigtimedwait(set, &info, timeout);
884 	if (sig == SIGCANCEL &&
885 	    (SI_FROMKERNEL(&info) || info.si_code == SI_LWP)) {
886 		do_sigcancel();
887 		errno = EINTR;
888 		sig = -1;
889 	}
890 	EPILOGUE
891 	if (sig != -1 && infop)
892 		(void) _private_memcpy(infop, &info, sizeof (*infop));
893 	return (sig);
894 }
895 
896 #pragma weak sigwait = _sigwait
897 int
898 _sigwait(sigset_t *set)
899 {
900 	return (_sigtimedwait(set, NULL, NULL));
901 }
902 
903 #pragma weak sigwaitinfo = _sigwaitinfo
904 int
905 _sigwaitinfo(const sigset_t *set, siginfo_t *info)
906 {
907 	return (_sigtimedwait(set, info, NULL));
908 }
909 
910 #pragma weak sigqueue = _sigqueue
911 int
912 _sigqueue(pid_t pid, int signo, const union sigval value)
913 {
914 	extern int __sigqueue(pid_t pid, int signo,
915 	    /* const union sigval */ void *value, int si_code, int block);
916 	return (__sigqueue(pid, signo, value.sival_ptr, SI_QUEUE, 0));
917 }
918 
919 int
920 tcdrain(int fildes)
921 {
922 	extern int _tcdrain(int);
923 	int rv;
924 
925 	PERFORM(_tcdrain(fildes))
926 }
927 
928 pid_t
929 wait(int *stat_loc)
930 {
931 	extern pid_t _wait(int *);
932 	pid_t rv;
933 
934 	PERFORM(_wait(stat_loc))
935 }
936 
937 pid_t
938 wait3(int *statusp, int options, struct rusage *rusage)
939 {
940 	extern pid_t _wait3(int *, int, struct rusage *);
941 	pid_t rv;
942 
943 	PERFORM(_wait3(statusp, options, rusage))
944 }
945 
946 int
947 waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options)
948 {
949 	extern int _waitid(idtype_t, id_t, siginfo_t *, int);
950 	int rv;
951 
952 	PERFORM(_waitid(idtype, id, infop, options))
953 }
954 
955 /*
956  * waitpid_cancel() is a libc-private symbol for internal use
957  * where cancellation semantics is desired (see system()).
958  */
959 #pragma weak waitpid_cancel = waitpid
960 pid_t
961 waitpid(pid_t pid, int *stat_loc, int options)
962 {
963 	extern pid_t _waitpid(pid_t, int *, int);
964 	pid_t rv;
965 
966 	PERFORM(_waitpid(pid, stat_loc, options))
967 }
968 
969 ssize_t
970 writev(int fildes, const struct iovec *iov, int iovcnt)
971 {
972 	extern ssize_t _writev(int, const struct iovec *, int);
973 	ssize_t rv;
974 
975 	PERFORM(_writev(fildes, iov, iovcnt))
976 }
977