xref: /titanic_41/usr/src/lib/libc/port/threads/scalls.c (revision 5aefb6555731130ca4fd295960123d71f2d21fe8)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 
23 /*
24  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
25  * Use is subject to license terms.
26  */
27 
28 #pragma ident	"%Z%%M%	%I%	%E% SMI"
29 
30 #include "lint.h"
31 #include "thr_uberdata.h"
32 #include <stdarg.h>
33 #include <poll.h>
34 #include <stropts.h>
35 #include <dlfcn.h>
36 #include <sys/uio.h>
37 
38 /*
39  * fork_lock is special -- We can't use lmutex_lock() (and thereby enter
40  * a critical region) because the second thread to reach this point would
41  * become unstoppable and the first thread would hang waiting for the
42  * second thread to stop itself.  Therefore we don't use lmutex_lock() in
43  * fork_lock_enter(), but we do defer signals (the other form of concurrency).
44  *
45  * fork_lock_enter() does triple-duty.  Not only does it serialize
46  * calls to fork() and forkall(), but it also serializes calls to
47  * thr_suspend() (fork() and forkall() also suspend other threads),
48  * and furthermore it serializes I18N calls to functions in other
49  * dlopen()ed L10N objects that might be calling malloc()/free().
50  */
51 
52 static void
53 fork_lock_error(const char *who)
54 {
55 	char msg[200];
56 
57 	(void) strlcpy(msg, "deadlock condition: ", sizeof (msg));
58 	(void) strlcat(msg, who, sizeof (msg));
59 	(void) strlcat(msg, "() called from a fork handler", sizeof (msg));
60 	thread_error(msg);
61 }
62 
63 int
64 fork_lock_enter(const char *who)
65 {
66 	ulwp_t *self = curthread;
67 	uberdata_t *udp = self->ul_uberdata;
68 	int error = 0;
69 
70 	ASSERT(self->ul_critical == 0);
71 	sigoff(self);
72 	(void) _private_mutex_lock(&udp->fork_lock);
73 	while (udp->fork_count) {
74 		if (udp->fork_owner == self) {
75 			/*
76 			 * This is like a recursive lock except that we
77 			 * inform the caller if we have been called from
78 			 * a fork handler and let it deal with that fact.
79 			 */
80 			if (self->ul_fork) {
81 				/*
82 				 * We have been called from a fork handler.
83 				 */
84 				if (who != NULL &&
85 				    udp->uberflags.uf_thread_error_detection)
86 					fork_lock_error(who);
87 				error = EDEADLK;
88 			}
89 			break;
90 		}
91 		ASSERT(self->ul_fork == 0);
92 		(void) _cond_wait(&udp->fork_cond, &udp->fork_lock);
93 	}
94 	udp->fork_owner = self;
95 	udp->fork_count++;
96 	(void) _private_mutex_unlock(&udp->fork_lock);
97 	return (error);
98 }
99 
100 void
101 fork_lock_exit(void)
102 {
103 	ulwp_t *self = curthread;
104 	uberdata_t *udp = self->ul_uberdata;
105 
106 	ASSERT(self->ul_critical == 0);
107 	(void) _private_mutex_lock(&udp->fork_lock);
108 	ASSERT(udp->fork_count != 0 && udp->fork_owner == self);
109 	if (--udp->fork_count == 0) {
110 		udp->fork_owner = NULL;
111 		(void) _cond_signal(&udp->fork_cond);
112 	}
113 	(void) _private_mutex_unlock(&udp->fork_lock);
114 	sigon(self);
115 }
116 
117 /*
118  * fork() is fork1() for both Posix threads and Solaris threads.
119  * The forkall() interface exists for applications that require
120  * the semantics of replicating all threads.
121  */
122 #pragma weak fork = _fork1
123 #pragma weak _fork = _fork1
124 #pragma weak fork1 = _fork1
125 pid_t
126 _fork1(void)
127 {
128 	ulwp_t *self = curthread;
129 	uberdata_t *udp = self->ul_uberdata;
130 	pid_t pid;
131 	int error;
132 
133 	if (self->ul_vfork) {
134 		/*
135 		 * We are a child of vfork(); omit all of the fork
136 		 * logic and go straight to the system call trap.
137 		 * A vfork() child of a multithreaded parent
138 		 * must never call fork().
139 		 */
140 		if (udp->uberflags.uf_mt) {
141 			errno = ENOTSUP;
142 			return (-1);
143 		}
144 		pid = __fork1();
145 		if (pid == 0) {		/* child */
146 			udp->pid = _private_getpid();
147 			self->ul_vfork = 0;
148 		}
149 		return (pid);
150 	}
151 
152 	if ((error = fork_lock_enter("fork")) != 0) {
153 		/*
154 		 * Cannot call fork() from a fork handler.
155 		 */
156 		fork_lock_exit();
157 		errno = error;
158 		return (-1);
159 	}
160 	self->ul_fork = 1;
161 
162 	/*
163 	 * The functions registered by pthread_atfork() are defined by
164 	 * the application and its libraries and we must not hold any
165 	 * internal libc locks while invoking them.  The fork_lock_enter()
166 	 * function serializes fork(), thr_suspend(), pthread_atfork() and
167 	 * dlclose() (which destroys whatever pthread_atfork() functions
168 	 * the library may have set up).  If one of these pthread_atfork()
169 	 * functions attempts to fork or suspend another thread or call
170 	 * pthread_atfork() or dlclose a library, it will detect a deadlock
171 	 * in fork_lock_enter().  Otherwise, the pthread_atfork() functions
172 	 * are free to do anything they please (except they will not
173 	 * receive any signals).
174 	 */
175 	_prefork_handler();
176 
177 	/*
178 	 * Block all signals.
179 	 * Just deferring them via sigon() is not enough.
180 	 * We have to avoid taking a deferred signal in the child
181 	 * that was actually sent to the parent before __fork1().
182 	 */
183 	block_all_signals(self);
184 
185 	/*
186 	 * This suspends all threads but this one, leaving them
187 	 * suspended outside of any critical regions in the library.
188 	 * Thus, we are assured that no library locks are held
189 	 * while we invoke fork1() from the current thread.
190 	 */
191 	(void) _private_mutex_lock(&udp->fork_lock);
192 	suspend_fork();
193 	(void) _private_mutex_unlock(&udp->fork_lock);
194 
195 	pid = __fork1();
196 
197 	if (pid == 0) {		/* child */
198 		/*
199 		 * Clear our schedctl pointer.
200 		 * Discard any deferred signal that was sent to the parent.
201 		 * Because we blocked all signals before __fork1(), a
202 		 * deferred signal cannot have been taken by the child.
203 		 */
204 		self->ul_schedctl_called = NULL;
205 		self->ul_schedctl = NULL;
206 		self->ul_cursig = 0;
207 		self->ul_siginfo.si_signo = 0;
208 		udp->pid = _private_getpid();
209 		/* reset the library's data structures to reflect one thread */
210 		_postfork1_child();
211 		restore_signals(self);
212 		_postfork_child_handler();
213 	} else {
214 		/* restart all threads that were suspended for fork1() */
215 		continue_fork(0);
216 		restore_signals(self);
217 		_postfork_parent_handler();
218 	}
219 
220 	self->ul_fork = 0;
221 	fork_lock_exit();
222 
223 	return (pid);
224 }
225 
226 /*
227  * Much of the logic here is the same as in fork1().
228  * See the comments in fork1(), above.
229  */
230 #pragma weak forkall = _forkall
231 pid_t
232 _forkall(void)
233 {
234 	ulwp_t *self = curthread;
235 	uberdata_t *udp = self->ul_uberdata;
236 	pid_t pid;
237 	int error;
238 
239 	if (self->ul_vfork) {
240 		if (udp->uberflags.uf_mt) {
241 			errno = ENOTSUP;
242 			return (-1);
243 		}
244 		pid = __forkall();
245 		if (pid == 0) {		/* child */
246 			udp->pid = _private_getpid();
247 			self->ul_vfork = 0;
248 		}
249 		return (pid);
250 	}
251 
252 	if ((error = fork_lock_enter("forkall")) != 0) {
253 		fork_lock_exit();
254 		errno = error;
255 		return (-1);
256 	}
257 	self->ul_fork = 1;
258 	block_all_signals(self);
259 	suspend_fork();
260 
261 	pid = __forkall();
262 
263 	if (pid == 0) {
264 		self->ul_schedctl_called = NULL;
265 		self->ul_schedctl = NULL;
266 		self->ul_cursig = 0;
267 		self->ul_siginfo.si_signo = 0;
268 		udp->pid = _private_getpid();
269 		continue_fork(1);
270 	} else {
271 		continue_fork(0);
272 	}
273 	restore_signals(self);
274 	self->ul_fork = 0;
275 	fork_lock_exit();
276 
277 	return (pid);
278 }
279 
280 /*
281  * Hacks for system calls to provide cancellation
282  * and improve java garbage collection.
283  */
284 #define	PROLOGUE							\
285 {									\
286 	ulwp_t *self = curthread;					\
287 	int nocancel = (self->ul_vfork | self->ul_nocancel);		\
288 	if (nocancel == 0) {						\
289 		self->ul_save_async = self->ul_cancel_async;		\
290 		if (!self->ul_cancel_disabled) {			\
291 			self->ul_cancel_async = 1;			\
292 			if (self->ul_cancel_pending)			\
293 				_pthread_exit(PTHREAD_CANCELED);	\
294 		}							\
295 		self->ul_sp = stkptr();					\
296 	}
297 
298 #define	EPILOGUE							\
299 	if (nocancel == 0) {						\
300 		self->ul_sp = 0;					\
301 		self->ul_cancel_async = self->ul_save_async;		\
302 	}								\
303 }
304 
305 /*
306  * Perform the body of the action required by most of the cancelable
307  * function calls.  The return(function_call) part is to allow the
308  * compiler to make the call be executed with tail recursion, which
309  * saves a register window on sparc and slightly (not much) improves
310  * the code for x86/x64 compilations.
311  */
312 #define	PERFORM(function_call)						\
313 	PROLOGUE							\
314 	if (nocancel)							\
315 		return (function_call);					\
316 	rv = function_call;						\
317 	EPILOGUE							\
318 	return (rv);
319 
320 /*
321  * Specialized prologue for sigsuspend() and pollsys().
322  * These system calls pass a signal mask to the kernel.
323  * The kernel replaces the thread's signal mask with the
324  * temporary mask before the thread goes to sleep.  If
325  * a signal is received, the signal handler will execute
326  * with the temporary mask, as modified by the sigaction
327  * for the particular signal.
328  *
329  * We block all signals until we reach the kernel with the
330  * temporary mask.  This eliminates race conditions with
331  * setting the signal mask while signals are being posted.
332  */
333 #define	PROLOGUE_MASK(sigmask)						\
334 {									\
335 	ulwp_t *self = curthread;					\
336 	int nocancel = (self->ul_vfork | self->ul_nocancel);		\
337 	if (!self->ul_vfork) {						\
338 		if (sigmask) {						\
339 			block_all_signals(self);			\
340 			self->ul_tmpmask.__sigbits[0] = sigmask->__sigbits[0]; \
341 			self->ul_tmpmask.__sigbits[1] = sigmask->__sigbits[1]; \
342 			delete_reserved_signals(&self->ul_tmpmask);	\
343 			self->ul_sigsuspend = 1;			\
344 		}							\
345 		if (nocancel == 0) {					\
346 			self->ul_save_async = self->ul_cancel_async;	\
347 			if (!self->ul_cancel_disabled) {		\
348 				self->ul_cancel_async = 1;		\
349 				if (self->ul_cancel_pending) {		\
350 					if (self->ul_sigsuspend) {	\
351 						self->ul_sigsuspend = 0;\
352 						restore_signals(self);	\
353 					}				\
354 					_pthread_exit(PTHREAD_CANCELED);\
355 				}					\
356 			}						\
357 			self->ul_sp = stkptr();				\
358 		}							\
359 	}
360 
361 /*
362  * If a signal is taken, we return from the system call wrapper with
363  * our original signal mask restored (see code in call_user_handler()).
364  * If not (self->ul_sigsuspend is still non-zero), we must restore our
365  * original signal mask ourself.
366  */
367 #define	EPILOGUE_MASK							\
368 	if (nocancel == 0) {						\
369 		self->ul_sp = 0;					\
370 		self->ul_cancel_async = self->ul_save_async;		\
371 	}								\
372 	if (self->ul_sigsuspend) {					\
373 		self->ul_sigsuspend = 0;				\
374 		restore_signals(self);					\
375 	}								\
376 }
377 
378 /*
379  * Called from _thrp_join() (thr_join() is a cancellation point)
380  */
381 int
382 lwp_wait(thread_t tid, thread_t *found)
383 {
384 	int error;
385 
386 	PROLOGUE
387 	while ((error = __lwp_wait(tid, found)) == EINTR)
388 		;
389 	EPILOGUE
390 	return (error);
391 }
392 
393 ssize_t
394 read(int fd, void *buf, size_t size)
395 {
396 	extern ssize_t _read(int, void *, size_t);
397 	ssize_t rv;
398 
399 	PERFORM(_read(fd, buf, size))
400 }
401 
402 ssize_t
403 write(int fd, const void *buf, size_t size)
404 {
405 	extern ssize_t _write(int, const void *, size_t);
406 	ssize_t rv;
407 
408 	PERFORM(_write(fd, buf, size))
409 }
410 
411 int
412 getmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr,
413 	int *flagsp)
414 {
415 	extern int _getmsg(int, struct strbuf *, struct strbuf *, int *);
416 	int rv;
417 
418 	PERFORM(_getmsg(fd, ctlptr, dataptr, flagsp))
419 }
420 
421 int
422 getpmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr,
423 	int *bandp, int *flagsp)
424 {
425 	extern int _getpmsg(int, struct strbuf *, struct strbuf *,
426 		int *, int *);
427 	int rv;
428 
429 	PERFORM(_getpmsg(fd, ctlptr, dataptr, bandp, flagsp))
430 }
431 
432 int
433 putmsg(int fd, const struct strbuf *ctlptr,
434 	const struct strbuf *dataptr, int flags)
435 {
436 	extern int _putmsg(int, const struct strbuf *,
437 		const struct strbuf *, int);
438 	int rv;
439 
440 	PERFORM(_putmsg(fd, ctlptr, dataptr, flags))
441 }
442 
443 int
444 __xpg4_putmsg(int fd, const struct strbuf *ctlptr,
445 	const struct strbuf *dataptr, int flags)
446 {
447 	extern int _putmsg(int, const struct strbuf *,
448 		const struct strbuf *, int);
449 	int rv;
450 
451 	PERFORM(_putmsg(fd, ctlptr, dataptr, flags|MSG_XPG4))
452 }
453 
454 int
455 putpmsg(int fd, const struct strbuf *ctlptr,
456 	const struct strbuf *dataptr, int band, int flags)
457 {
458 	extern int _putpmsg(int, const struct strbuf *,
459 		const struct strbuf *, int, int);
460 	int rv;
461 
462 	PERFORM(_putpmsg(fd, ctlptr, dataptr, band, flags))
463 }
464 
465 int
466 __xpg4_putpmsg(int fd, const struct strbuf *ctlptr,
467 	const struct strbuf *dataptr, int band, int flags)
468 {
469 	extern int _putpmsg(int, const struct strbuf *,
470 		const struct strbuf *, int, int);
471 	int rv;
472 
473 	PERFORM(_putpmsg(fd, ctlptr, dataptr, band, flags|MSG_XPG4))
474 }
475 
476 int
477 __nanosleep(const timespec_t *rqtp, timespec_t *rmtp)
478 {
479 	int error;
480 
481 	PROLOGUE
482 	error = ___nanosleep(rqtp, rmtp);
483 	EPILOGUE
484 	if (error) {
485 		errno = error;
486 		return (-1);
487 	}
488 	return (0);
489 }
490 
491 int
492 __clock_nanosleep(clockid_t clock_id, int flags,
493 	const timespec_t *rqtp, timespec_t *rmtp)
494 {
495 	timespec_t reltime;
496 	hrtime_t start;
497 	hrtime_t rqlapse;
498 	hrtime_t lapse;
499 	int error;
500 
501 	switch (clock_id) {
502 	case CLOCK_VIRTUAL:
503 	case CLOCK_PROCESS_CPUTIME_ID:
504 	case CLOCK_THREAD_CPUTIME_ID:
505 		return (ENOTSUP);
506 	case CLOCK_REALTIME:
507 	case CLOCK_HIGHRES:
508 		break;
509 	default:
510 		return (EINVAL);
511 	}
512 	if (flags & TIMER_ABSTIME) {
513 		abstime_to_reltime(clock_id, rqtp, &reltime);
514 		rmtp = NULL;
515 	} else {
516 		reltime = *rqtp;
517 		if (clock_id == CLOCK_HIGHRES)
518 			start = gethrtime();
519 	}
520 restart:
521 	PROLOGUE
522 	error = ___nanosleep(&reltime, rmtp);
523 	EPILOGUE
524 	if (error == 0 && clock_id == CLOCK_HIGHRES) {
525 		/*
526 		 * Don't return yet if we didn't really get a timeout.
527 		 * This can happen if we return because someone resets
528 		 * the system clock.
529 		 */
530 		if (flags & TIMER_ABSTIME) {
531 			if ((hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC +
532 			    rqtp->tv_nsec > gethrtime()) {
533 				abstime_to_reltime(clock_id, rqtp, &reltime);
534 				goto restart;
535 			}
536 		} else {
537 			rqlapse = (hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC +
538 				rqtp->tv_nsec;
539 			lapse = gethrtime() - start;
540 			if (rqlapse > lapse) {
541 				hrt2ts(rqlapse - lapse, &reltime);
542 				goto restart;
543 			}
544 		}
545 	}
546 	if (error == 0 && clock_id == CLOCK_REALTIME &&
547 	    (flags & TIMER_ABSTIME)) {
548 		/*
549 		 * Don't return yet just because someone reset the
550 		 * system clock.  Recompute the new relative time
551 		 * and reissue the nanosleep() call if necessary.
552 		 *
553 		 * Resetting the system clock causes all sorts of
554 		 * problems and the SUSV3 standards body should
555 		 * have made the behavior of clock_nanosleep() be
556 		 * implementation-defined in such a case rather than
557 		 * being specific about honoring the new system time.
558 		 * Standards bodies are filled with fools and idiots.
559 		 */
560 		abstime_to_reltime(clock_id, rqtp, &reltime);
561 		if (reltime.tv_sec != 0 || reltime.tv_nsec != 0)
562 			goto restart;
563 	}
564 	return (error);
565 }
566 
567 #pragma weak sleep = _sleep
568 unsigned int
569 _sleep(unsigned int sec)
570 {
571 	unsigned int rem = 0;
572 	int error;
573 	timespec_t ts;
574 	timespec_t tsr;
575 
576 	ts.tv_sec = (time_t)sec;
577 	ts.tv_nsec = 0;
578 	PROLOGUE
579 	error = ___nanosleep(&ts, &tsr);
580 	EPILOGUE
581 	if (error == EINTR) {
582 		rem = (unsigned int)tsr.tv_sec;
583 		if (tsr.tv_nsec >= NANOSEC / 2)
584 			rem++;
585 	}
586 	return (rem);
587 }
588 
589 #pragma weak usleep = _usleep
590 int
591 _usleep(useconds_t usec)
592 {
593 	timespec_t ts;
594 
595 	ts.tv_sec = usec / MICROSEC;
596 	ts.tv_nsec = (long)(usec % MICROSEC) * 1000;
597 	PROLOGUE
598 	(void) ___nanosleep(&ts, NULL);
599 	EPILOGUE
600 	return (0);
601 }
602 
603 int
604 close(int fildes)
605 {
606 	extern int _close(int);
607 	int rv;
608 
609 	PERFORM(_close(fildes))
610 }
611 
612 int
613 creat(const char *path, mode_t mode)
614 {
615 	extern int _creat(const char *, mode_t);
616 	int rv;
617 
618 	PERFORM(_creat(path, mode))
619 }
620 
621 #if !defined(_LP64)
622 int
623 creat64(const char *path, mode_t mode)
624 {
625 	extern int _creat64(const char *, mode_t);
626 	int rv;
627 
628 	PERFORM(_creat64(path, mode))
629 }
630 #endif	/* !_LP64 */
631 
632 int
633 fcntl(int fildes, int cmd, ...)
634 {
635 	extern int _fcntl(int, int, ...);
636 	intptr_t arg;
637 	int rv;
638 	va_list ap;
639 
640 	va_start(ap, cmd);
641 	arg = va_arg(ap, intptr_t);
642 	va_end(ap);
643 	if (cmd != F_SETLKW)
644 		return (_fcntl(fildes, cmd, arg));
645 	PERFORM(_fcntl(fildes, cmd, arg))
646 }
647 
648 int
649 fsync(int fildes)
650 {
651 	extern int _fsync(int);
652 	int rv;
653 
654 	PERFORM(_fsync(fildes))
655 }
656 
657 int
658 lockf(int fildes, int function, off_t size)
659 {
660 	extern int _lockf(int, int, off_t);
661 	int rv;
662 
663 	PERFORM(_lockf(fildes, function, size))
664 }
665 
666 #if !defined(_LP64)
667 int
668 lockf64(int fildes, int function, off64_t size)
669 {
670 	extern int _lockf64(int, int, off64_t);
671 	int rv;
672 
673 	PERFORM(_lockf64(fildes, function, size))
674 }
675 #endif	/* !_LP64 */
676 
677 ssize_t
678 msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg)
679 {
680 	extern ssize_t _msgrcv(int, void *, size_t, long, int);
681 	ssize_t rv;
682 
683 	PERFORM(_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg))
684 }
685 
686 int
687 msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg)
688 {
689 	extern int _msgsnd(int, const void *, size_t, int);
690 	int rv;
691 
692 	PERFORM(_msgsnd(msqid, msgp, msgsz, msgflg))
693 }
694 
695 int
696 msync(caddr_t addr, size_t len, int flags)
697 {
698 	extern int _msync(caddr_t, size_t, int);
699 	int rv;
700 
701 	PERFORM(_msync(addr, len, flags))
702 }
703 
704 int
705 open(const char *path, int oflag, ...)
706 {
707 	extern int _open(const char *, int, ...);
708 	mode_t mode;
709 	int rv;
710 	va_list ap;
711 
712 	va_start(ap, oflag);
713 	mode = va_arg(ap, mode_t);
714 	va_end(ap);
715 	PERFORM(_open(path, oflag, mode))
716 }
717 
718 #if !defined(_LP64)
719 int
720 open64(const char *path, int oflag, ...)
721 {
722 	extern int _open64(const char *, int, ...);
723 	mode_t mode;
724 	int rv;
725 	va_list ap;
726 
727 	va_start(ap, oflag);
728 	mode = va_arg(ap, mode_t);
729 	va_end(ap);
730 	PERFORM(_open64(path, oflag, mode))
731 }
732 #endif	/* !_LP64 */
733 
734 int
735 pause(void)
736 {
737 	extern int _pause(void);
738 	int rv;
739 
740 	PERFORM(_pause())
741 }
742 
743 ssize_t
744 pread(int fildes, void *buf, size_t nbyte, off_t offset)
745 {
746 	extern ssize_t _pread(int, void *, size_t, off_t);
747 	ssize_t rv;
748 
749 	PERFORM(_pread(fildes, buf, nbyte, offset))
750 }
751 
752 #if !defined(_LP64)
753 ssize_t
754 pread64(int fildes, void *buf, size_t nbyte, off64_t offset)
755 {
756 	extern ssize_t _pread64(int, void *, size_t, off64_t);
757 	ssize_t rv;
758 
759 	PERFORM(_pread64(fildes, buf, nbyte, offset))
760 }
761 #endif	/* !_LP64 */
762 
763 ssize_t
764 pwrite(int fildes, const void *buf, size_t nbyte, off_t offset)
765 {
766 	extern ssize_t _pwrite(int, const void *, size_t, off_t);
767 	ssize_t rv;
768 
769 	PERFORM(_pwrite(fildes, buf, nbyte, offset))
770 }
771 
772 #if !defined(_LP64)
773 ssize_t
774 pwrite64(int fildes, const void *buf, size_t nbyte, off64_t offset)
775 {
776 	extern ssize_t _pwrite64(int, const void *, size_t, off64_t);
777 	ssize_t rv;
778 
779 	PERFORM(_pwrite64(fildes, buf, nbyte, offset))
780 }
781 #endif	/* !_LP64 */
782 
783 ssize_t
784 readv(int fildes, const struct iovec *iov, int iovcnt)
785 {
786 	extern ssize_t _readv(int, const struct iovec *, int);
787 	ssize_t rv;
788 
789 	PERFORM(_readv(fildes, iov, iovcnt))
790 }
791 
792 int
793 sigpause(int sig)
794 {
795 	extern int _sigpause(int);
796 	int rv;
797 
798 	PERFORM(_sigpause(sig))
799 }
800 
801 #pragma weak sigsuspend = _sigsuspend
802 int
803 _sigsuspend(const sigset_t *set)
804 {
805 	extern int __sigsuspend(const sigset_t *);
806 	int rv;
807 
808 	PROLOGUE_MASK(set)
809 	rv = __sigsuspend(set);
810 	EPILOGUE_MASK
811 	return (rv);
812 }
813 
814 int
815 _pollsys(struct pollfd *fds, nfds_t nfd, const timespec_t *timeout,
816 	const sigset_t *sigmask)
817 {
818 	extern int __pollsys(struct pollfd *, nfds_t, const timespec_t *,
819 		const sigset_t *);
820 	int rv;
821 
822 	PROLOGUE_MASK(sigmask)
823 	rv = __pollsys(fds, nfd, timeout, sigmask);
824 	EPILOGUE_MASK
825 	return (rv);
826 }
827 
828 int
829 __sigtimedwait(const sigset_t *set, siginfo_t *infop,
830 	const timespec_t *timeout)
831 {
832 	extern int ___sigtimedwait(const sigset_t *, siginfo_t *,
833 		const timespec_t *);
834 	siginfo_t info;
835 	int sig;
836 
837 	PROLOGUE
838 	sig = ___sigtimedwait(set, &info, timeout);
839 	if (sig == SIGCANCEL &&
840 	    (SI_FROMKERNEL(&info) || info.si_code == SI_LWP)) {
841 		do_sigcancel();
842 		errno = EINTR;
843 		sig = -1;
844 	}
845 	EPILOGUE
846 	if (sig != -1 && infop)
847 		(void) _private_memcpy(infop, &info, sizeof (*infop));
848 	return (sig);
849 }
850 
851 #pragma weak sigwait = _sigwait
852 int
853 _sigwait(sigset_t *set)
854 {
855 	return (__sigtimedwait(set, NULL, NULL));
856 }
857 
858 int
859 tcdrain(int fildes)
860 {
861 	extern int _tcdrain(int);
862 	int rv;
863 
864 	PERFORM(_tcdrain(fildes))
865 }
866 
867 pid_t
868 wait(int *stat_loc)
869 {
870 	extern pid_t _wait(int *);
871 	pid_t rv;
872 
873 	PERFORM(_wait(stat_loc))
874 }
875 
876 pid_t
877 wait3(int *statusp, int options, struct rusage *rusage)
878 {
879 	extern pid_t _wait3(int *, int, struct rusage *);
880 	pid_t rv;
881 
882 	PERFORM(_wait3(statusp, options, rusage))
883 }
884 
885 int
886 waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options)
887 {
888 	extern int _waitid(idtype_t, id_t, siginfo_t *, int);
889 	int rv;
890 
891 	PERFORM(_waitid(idtype, id, infop, options))
892 }
893 
894 /*
895  * waitpid_cancel() is a libc-private symbol for internal use
896  * where cancellation semantics is desired (see system()).
897  */
898 #pragma weak waitpid_cancel = waitpid
899 pid_t
900 waitpid(pid_t pid, int *stat_loc, int options)
901 {
902 	extern pid_t _waitpid(pid_t, int *, int);
903 	pid_t rv;
904 
905 	PERFORM(_waitpid(pid, stat_loc, options))
906 }
907 
908 ssize_t
909 writev(int fildes, const struct iovec *iov, int iovcnt)
910 {
911 	extern ssize_t _writev(int, const struct iovec *, int);
912 	ssize_t rv;
913 
914 	PERFORM(_writev(fildes, iov, iovcnt))
915 }
916