xref: /illumos-gate/usr/src/lib/libc/port/threads/scalls.c (revision 51ccf66eff01cf7e19106ce91bc35ef397259faf)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include "lint.h"
30 #include "thr_uberdata.h"
31 #include <stdarg.h>
32 #include <poll.h>
33 #include <stropts.h>
34 #include <dlfcn.h>
35 #include <wait.h>
36 #include <sys/socket.h>
37 #include <sys/uio.h>
38 #include <sys/file.h>
39 #include <sys/door.h>
40 
41 /*
42  * atfork_lock protects the pthread_atfork() data structures.
43  *
44  * fork_lock does double-duty.  Not only does it (and atfork_lock)
45  * serialize calls to fork() and forkall(), but it also serializes calls
46  * to thr_suspend() and thr_continue() (because fork() and forkall() also
47  * suspend and continue other threads and they want no competition).
48  *
49  * Functions called in dlopen()ed L10N objects can do anything, including
50  * call malloc() and free().  Such calls are not fork-safe when protected
51  * by an ordinary mutex that is acquired in libc's prefork processing
52  * because, with an interposed malloc library present, there would be a
53  * lock ordering violation due to the pthread_atfork() prefork function
54  * in the interposition library acquiring its malloc lock(s) before the
55  * ordinary mutex in libc being acquired by libc's prefork functions.
56  *
57  * Within libc, calls to malloc() and free() are fork-safe if the calls
58  * are made while holding no other libc locks.  This covers almost all
59  * of libc's malloc() and free() calls.  For those libc code paths, such
60  * as the above-mentioned L10N calls, that require serialization and that
61  * may call malloc() or free(), libc uses callout_lock_enter() to perform
62  * the serialization.  This works because callout_lock is not acquired as
63  * part of running the pthread_atfork() prefork handlers (to avoid the
64  * lock ordering violation described above).  Rather, it is simply
65  * reinitialized in postfork1_child() to cover the case that some
66  * now-defunct thread might have been suspended while holding it.
67  */
68 
69 void
70 fork_lock_enter(void)
71 {
72 	ASSERT(curthread->ul_critical == 0);
73 	(void) mutex_lock(&curthread->ul_uberdata->fork_lock);
74 }
75 
76 void
77 fork_lock_exit(void)
78 {
79 	ASSERT(curthread->ul_critical == 0);
80 	(void) mutex_unlock(&curthread->ul_uberdata->fork_lock);
81 }
82 
83 /*
84  * Use cancel_safe_mutex_lock() to protect against being cancelled while
85  * holding callout_lock and calling outside of libc (via L10N plugins).
86  * We will honor a pending cancellation request when callout_lock_exit()
87  * is called, by calling cancel_safe_mutex_unlock().
88  */
89 void
90 callout_lock_enter(void)
91 {
92 	ASSERT(curthread->ul_critical == 0);
93 	cancel_safe_mutex_lock(&curthread->ul_uberdata->callout_lock);
94 }
95 
96 void
97 callout_lock_exit(void)
98 {
99 	ASSERT(curthread->ul_critical == 0);
100 	cancel_safe_mutex_unlock(&curthread->ul_uberdata->callout_lock);
101 }
102 
103 #pragma weak forkx = _forkx
104 pid_t
105 _forkx(int flags)
106 {
107 	ulwp_t *self = curthread;
108 	uberdata_t *udp = self->ul_uberdata;
109 	pid_t pid;
110 
111 	if (self->ul_vfork) {
112 		/*
113 		 * We are a child of vfork(); omit all of the fork
114 		 * logic and go straight to the system call trap.
115 		 * A vfork() child of a multithreaded parent
116 		 * must never call fork().
117 		 */
118 		if (udp->uberflags.uf_mt) {
119 			errno = ENOTSUP;
120 			return (-1);
121 		}
122 		pid = __forkx(flags);
123 		if (pid == 0) {		/* child */
124 			udp->pid = getpid();
125 			self->ul_vfork = 0;
126 		}
127 		return (pid);
128 	}
129 
130 	sigoff(self);
131 	if (self->ul_fork) {
132 		/*
133 		 * Cannot call fork() from a fork handler.
134 		 */
135 		sigon(self);
136 		errno = EDEADLK;
137 		return (-1);
138 	}
139 	self->ul_fork = 1;
140 
141 	/*
142 	 * The functions registered by pthread_atfork() are defined by
143 	 * the application and its libraries and we must not hold any
144 	 * internal lmutex_lock()-acquired locks while invoking them.
145 	 * We hold only udp->atfork_lock to protect the atfork linkages.
146 	 * If one of these pthread_atfork() functions attempts to fork
147 	 * or to call pthread_atfork(), libc will detect the error and
148 	 * fail the call with EDEADLK.  Otherwise, the pthread_atfork()
149 	 * functions are free to do anything they please (except they
150 	 * will not receive any signals).
151 	 */
152 	(void) mutex_lock(&udp->atfork_lock);
153 	_prefork_handler();
154 
155 	/*
156 	 * Block every other thread attempting thr_suspend() or thr_continue().
157 	 */
158 	(void) mutex_lock(&udp->fork_lock);
159 
160 	/*
161 	 * Block all signals.
162 	 * Just deferring them via sigoff() is not enough.
163 	 * We have to avoid taking a deferred signal in the child
164 	 * that was actually sent to the parent before __forkx().
165 	 */
166 	block_all_signals(self);
167 
168 	/*
169 	 * This suspends all threads but this one, leaving them
170 	 * suspended outside of any critical regions in the library.
171 	 * Thus, we are assured that no lmutex_lock()-acquired library
172 	 * locks are held while we invoke fork() from the current thread.
173 	 */
174 	suspend_fork();
175 
176 	pid = __forkx(flags);
177 
178 	if (pid == 0) {		/* child */
179 		/*
180 		 * Clear our schedctl pointer.
181 		 * Discard any deferred signal that was sent to the parent.
182 		 * Because we blocked all signals before __forkx(), a
183 		 * deferred signal cannot have been taken by the child.
184 		 */
185 		self->ul_schedctl_called = NULL;
186 		self->ul_schedctl = NULL;
187 		self->ul_cursig = 0;
188 		self->ul_siginfo.si_signo = 0;
189 		udp->pid = getpid();
190 		/* reset the library's data structures to reflect one thread */
191 		unregister_locks();
192 		postfork1_child();
193 		restore_signals(self);
194 		(void) mutex_unlock(&udp->fork_lock);
195 		_postfork_child_handler();
196 	} else {
197 		/* restart all threads that were suspended for fork() */
198 		continue_fork(0);
199 		restore_signals(self);
200 		(void) mutex_unlock(&udp->fork_lock);
201 		_postfork_parent_handler();
202 	}
203 
204 	(void) mutex_unlock(&udp->atfork_lock);
205 	self->ul_fork = 0;
206 	sigon(self);
207 
208 	return (pid);
209 }
210 
211 /*
212  * fork() is fork1() for both Posix threads and Solaris threads.
213  * The forkall() interface exists for applications that require
214  * the semantics of replicating all threads.
215  */
216 #pragma weak fork1 = _fork
217 #pragma weak _fork1 = _fork
218 #pragma weak fork = _fork
219 pid_t
220 _fork(void)
221 {
222 	return (_forkx(0));
223 }
224 
225 /*
226  * Much of the logic here is the same as in forkx().
227  * See the comments in forkx(), above.
228  */
229 #pragma weak forkallx = _forkallx
230 pid_t
231 _forkallx(int flags)
232 {
233 	ulwp_t *self = curthread;
234 	uberdata_t *udp = self->ul_uberdata;
235 	pid_t pid;
236 
237 	if (self->ul_vfork) {
238 		if (udp->uberflags.uf_mt) {
239 			errno = ENOTSUP;
240 			return (-1);
241 		}
242 		pid = __forkallx(flags);
243 		if (pid == 0) {		/* child */
244 			udp->pid = getpid();
245 			self->ul_vfork = 0;
246 		}
247 		return (pid);
248 	}
249 
250 	sigoff(self);
251 	if (self->ul_fork) {
252 		sigon(self);
253 		errno = EDEADLK;
254 		return (-1);
255 	}
256 	self->ul_fork = 1;
257 	(void) mutex_lock(&udp->atfork_lock);
258 	(void) mutex_lock(&udp->fork_lock);
259 	block_all_signals(self);
260 	suspend_fork();
261 
262 	pid = __forkallx(flags);
263 
264 	if (pid == 0) {
265 		self->ul_schedctl_called = NULL;
266 		self->ul_schedctl = NULL;
267 		self->ul_cursig = 0;
268 		self->ul_siginfo.si_signo = 0;
269 		udp->pid = getpid();
270 		unregister_locks();
271 		continue_fork(1);
272 	} else {
273 		continue_fork(0);
274 	}
275 	restore_signals(self);
276 	(void) mutex_unlock(&udp->fork_lock);
277 	(void) mutex_unlock(&udp->atfork_lock);
278 	self->ul_fork = 0;
279 	sigon(self);
280 
281 	return (pid);
282 }
283 
284 #pragma weak forkall = _forkall
285 pid_t
286 _forkall(void)
287 {
288 	return (_forkallx(0));
289 }
290 
291 /*
292  * For the implementation of cancellation at cancellation points.
293  */
294 #define	PROLOGUE							\
295 {									\
296 	ulwp_t *self = curthread;					\
297 	int nocancel =							\
298 	    (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks |	\
299 	    self->ul_critical | self->ul_sigdefer);			\
300 	int abort = 0;							\
301 	if (nocancel == 0) {						\
302 		self->ul_save_async = self->ul_cancel_async;		\
303 		if (!self->ul_cancel_disabled) {			\
304 			self->ul_cancel_async = 1;			\
305 			if (self->ul_cancel_pending)			\
306 				_pthread_exit(PTHREAD_CANCELED);	\
307 		}							\
308 		self->ul_sp = stkptr();					\
309 	} else if (self->ul_cancel_pending &&				\
310 	    !self->ul_cancel_disabled) {				\
311 		set_cancel_eintr_flag(self);				\
312 		abort = 1;						\
313 	}
314 
315 #define	EPILOGUE							\
316 	if (nocancel == 0) {						\
317 		self->ul_sp = 0;					\
318 		self->ul_cancel_async = self->ul_save_async;		\
319 	}								\
320 }
321 
322 /*
323  * Perform the body of the action required by most of the cancelable
324  * function calls.  The return(function_call) part is to allow the
325  * compiler to make the call be executed with tail recursion, which
326  * saves a register window on sparc and slightly (not much) improves
327  * the code for x86/x64 compilations.
328  */
329 #define	PERFORM(function_call)						\
330 	PROLOGUE							\
331 	if (abort) {							\
332 		*self->ul_errnop = EINTR;				\
333 		return (-1);						\
334 	}								\
335 	if (nocancel)							\
336 		return (function_call);					\
337 	rv = function_call;						\
338 	EPILOGUE							\
339 	return (rv);
340 
341 /*
342  * Specialized prologue for sigsuspend() and pollsys().
343  * These system calls pass a signal mask to the kernel.
344  * The kernel replaces the thread's signal mask with the
345  * temporary mask before the thread goes to sleep.  If
346  * a signal is received, the signal handler will execute
347  * with the temporary mask, as modified by the sigaction
348  * for the particular signal.
349  *
350  * We block all signals until we reach the kernel with the
351  * temporary mask.  This eliminates race conditions with
352  * setting the signal mask while signals are being posted.
353  */
354 #define	PROLOGUE_MASK(sigmask)						\
355 {									\
356 	ulwp_t *self = curthread;					\
357 	int nocancel =							\
358 	    (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks |	\
359 	    self->ul_critical | self->ul_sigdefer);			\
360 	if (!self->ul_vfork) {						\
361 		if (sigmask) {						\
362 			block_all_signals(self);			\
363 			self->ul_tmpmask.__sigbits[0] = sigmask->__sigbits[0]; \
364 			self->ul_tmpmask.__sigbits[1] = sigmask->__sigbits[1]; \
365 			delete_reserved_signals(&self->ul_tmpmask);	\
366 			self->ul_sigsuspend = 1;			\
367 		}							\
368 		if (nocancel == 0) {					\
369 			self->ul_save_async = self->ul_cancel_async;	\
370 			if (!self->ul_cancel_disabled) {		\
371 				self->ul_cancel_async = 1;		\
372 				if (self->ul_cancel_pending) {		\
373 					if (self->ul_sigsuspend) {	\
374 						self->ul_sigsuspend = 0;\
375 						restore_signals(self);	\
376 					}				\
377 					_pthread_exit(PTHREAD_CANCELED);\
378 				}					\
379 			}						\
380 			self->ul_sp = stkptr();				\
381 		}							\
382 	}
383 
384 /*
385  * If a signal is taken, we return from the system call wrapper with
386  * our original signal mask restored (see code in call_user_handler()).
387  * If not (self->ul_sigsuspend is still non-zero), we must restore our
388  * original signal mask ourself.
389  */
390 #define	EPILOGUE_MASK							\
391 	if (nocancel == 0) {						\
392 		self->ul_sp = 0;					\
393 		self->ul_cancel_async = self->ul_save_async;		\
394 	}								\
395 	if (self->ul_sigsuspend) {					\
396 		self->ul_sigsuspend = 0;				\
397 		restore_signals(self);					\
398 	}								\
399 }
400 
401 /*
402  * Cancellation prologue and epilogue functions,
403  * for cancellation points too complex to include here.
404  */
405 void
406 _cancel_prologue(void)
407 {
408 	ulwp_t *self = curthread;
409 
410 	self->ul_cancel_prologue =
411 	    (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks |
412 	    self->ul_critical | self->ul_sigdefer) != 0;
413 	if (self->ul_cancel_prologue == 0) {
414 		self->ul_save_async = self->ul_cancel_async;
415 		if (!self->ul_cancel_disabled) {
416 			self->ul_cancel_async = 1;
417 			if (self->ul_cancel_pending)
418 				_pthread_exit(PTHREAD_CANCELED);
419 		}
420 		self->ul_sp = stkptr();
421 	} else if (self->ul_cancel_pending &&
422 	    !self->ul_cancel_disabled) {
423 		set_cancel_eintr_flag(self);
424 	}
425 }
426 
427 void
428 _cancel_epilogue(void)
429 {
430 	ulwp_t *self = curthread;
431 
432 	if (self->ul_cancel_prologue == 0) {
433 		self->ul_sp = 0;
434 		self->ul_cancel_async = self->ul_save_async;
435 	}
436 }
437 
438 /*
439  * Called from _thrp_join() (thr_join() is a cancellation point)
440  */
441 int
442 lwp_wait(thread_t tid, thread_t *found)
443 {
444 	int error;
445 
446 	PROLOGUE
447 	if (abort)
448 		return (EINTR);
449 	while ((error = __lwp_wait(tid, found)) == EINTR && !cancel_active())
450 		continue;
451 	EPILOGUE
452 	return (error);
453 }
454 
455 #pragma weak read = _read
456 ssize_t
457 _read(int fd, void *buf, size_t size)
458 {
459 	extern ssize_t __read(int, void *, size_t);
460 	ssize_t rv;
461 
462 	PERFORM(__read(fd, buf, size))
463 }
464 
465 #pragma weak write = _write
466 ssize_t
467 _write(int fd, const void *buf, size_t size)
468 {
469 	extern ssize_t __write(int, const void *, size_t);
470 	ssize_t rv;
471 
472 	PERFORM(__write(fd, buf, size))
473 }
474 
475 #pragma weak getmsg = _getmsg
476 int
477 _getmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr,
478 	int *flagsp)
479 {
480 	extern int __getmsg(int, struct strbuf *, struct strbuf *, int *);
481 	int rv;
482 
483 	PERFORM(__getmsg(fd, ctlptr, dataptr, flagsp))
484 }
485 
486 #pragma weak getpmsg = _getpmsg
487 int
488 _getpmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr,
489 	int *bandp, int *flagsp)
490 {
491 	extern int __getpmsg(int, struct strbuf *, struct strbuf *,
492 	    int *, int *);
493 	int rv;
494 
495 	PERFORM(__getpmsg(fd, ctlptr, dataptr, bandp, flagsp))
496 }
497 
498 #pragma weak putmsg = _putmsg
499 int
500 _putmsg(int fd, const struct strbuf *ctlptr,
501 	const struct strbuf *dataptr, int flags)
502 {
503 	extern int __putmsg(int, const struct strbuf *,
504 	    const struct strbuf *, int);
505 	int rv;
506 
507 	PERFORM(__putmsg(fd, ctlptr, dataptr, flags))
508 }
509 
510 int
511 __xpg4_putmsg(int fd, const struct strbuf *ctlptr,
512 	const struct strbuf *dataptr, int flags)
513 {
514 	extern int __putmsg(int, const struct strbuf *,
515 	    const struct strbuf *, int);
516 	int rv;
517 
518 	PERFORM(__putmsg(fd, ctlptr, dataptr, flags|MSG_XPG4))
519 }
520 
521 #pragma weak putpmsg = _putpmsg
522 int
523 _putpmsg(int fd, const struct strbuf *ctlptr,
524 	const struct strbuf *dataptr, int band, int flags)
525 {
526 	extern int __putpmsg(int, const struct strbuf *,
527 	    const struct strbuf *, int, int);
528 	int rv;
529 
530 	PERFORM(__putpmsg(fd, ctlptr, dataptr, band, flags))
531 }
532 
533 int
534 __xpg4_putpmsg(int fd, const struct strbuf *ctlptr,
535 	const struct strbuf *dataptr, int band, int flags)
536 {
537 	extern int __putpmsg(int, const struct strbuf *,
538 	    const struct strbuf *, int, int);
539 	int rv;
540 
541 	PERFORM(__putpmsg(fd, ctlptr, dataptr, band, flags|MSG_XPG4))
542 }
543 
544 #pragma weak nanosleep = _nanosleep
545 int
546 _nanosleep(const timespec_t *rqtp, timespec_t *rmtp)
547 {
548 	int error;
549 
550 	PROLOGUE
551 	error = abort? EINTR : __nanosleep(rqtp, rmtp);
552 	EPILOGUE
553 	if (error) {
554 		errno = error;
555 		return (-1);
556 	}
557 	return (0);
558 }
559 
560 #pragma weak clock_nanosleep = _clock_nanosleep
561 int
562 _clock_nanosleep(clockid_t clock_id, int flags,
563 	const timespec_t *rqtp, timespec_t *rmtp)
564 {
565 	timespec_t reltime;
566 	hrtime_t start;
567 	hrtime_t rqlapse;
568 	hrtime_t lapse;
569 	int error;
570 
571 	switch (clock_id) {
572 	case CLOCK_VIRTUAL:
573 	case CLOCK_PROCESS_CPUTIME_ID:
574 	case CLOCK_THREAD_CPUTIME_ID:
575 		return (ENOTSUP);
576 	case CLOCK_REALTIME:
577 	case CLOCK_HIGHRES:
578 		break;
579 	default:
580 		return (EINVAL);
581 	}
582 	if (flags & TIMER_ABSTIME) {
583 		abstime_to_reltime(clock_id, rqtp, &reltime);
584 		rmtp = NULL;
585 	} else {
586 		reltime = *rqtp;
587 		if (clock_id == CLOCK_HIGHRES)
588 			start = gethrtime();
589 	}
590 restart:
591 	PROLOGUE
592 	error = abort? EINTR : __nanosleep(&reltime, rmtp);
593 	EPILOGUE
594 	if (error == 0 && clock_id == CLOCK_HIGHRES) {
595 		/*
596 		 * Don't return yet if we didn't really get a timeout.
597 		 * This can happen if we return because someone resets
598 		 * the system clock.
599 		 */
600 		if (flags & TIMER_ABSTIME) {
601 			if ((hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC +
602 			    rqtp->tv_nsec > gethrtime()) {
603 				abstime_to_reltime(clock_id, rqtp, &reltime);
604 				goto restart;
605 			}
606 		} else {
607 			rqlapse = (hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC +
608 			    rqtp->tv_nsec;
609 			lapse = gethrtime() - start;
610 			if (rqlapse > lapse) {
611 				hrt2ts(rqlapse - lapse, &reltime);
612 				goto restart;
613 			}
614 		}
615 	}
616 	if (error == 0 && clock_id == CLOCK_REALTIME &&
617 	    (flags & TIMER_ABSTIME)) {
618 		/*
619 		 * Don't return yet just because someone reset the
620 		 * system clock.  Recompute the new relative time
621 		 * and reissue the nanosleep() call if necessary.
622 		 *
623 		 * Resetting the system clock causes all sorts of
624 		 * problems and the SUSV3 standards body should
625 		 * have made the behavior of clock_nanosleep() be
626 		 * implementation-defined in such a case rather than
627 		 * being specific about honoring the new system time.
628 		 * Standards bodies are filled with fools and idiots.
629 		 */
630 		abstime_to_reltime(clock_id, rqtp, &reltime);
631 		if (reltime.tv_sec != 0 || reltime.tv_nsec != 0)
632 			goto restart;
633 	}
634 	return (error);
635 }
636 
637 #pragma weak sleep = _sleep
638 unsigned int
639 _sleep(unsigned int sec)
640 {
641 	unsigned int rem = 0;
642 	timespec_t ts;
643 	timespec_t tsr;
644 
645 	ts.tv_sec = (time_t)sec;
646 	ts.tv_nsec = 0;
647 	if (_nanosleep(&ts, &tsr) == -1 && errno == EINTR) {
648 		rem = (unsigned int)tsr.tv_sec;
649 		if (tsr.tv_nsec >= NANOSEC / 2)
650 			rem++;
651 	}
652 	return (rem);
653 }
654 
655 #pragma weak usleep = _usleep
656 int
657 _usleep(useconds_t usec)
658 {
659 	timespec_t ts;
660 
661 	ts.tv_sec = usec / MICROSEC;
662 	ts.tv_nsec = (long)(usec % MICROSEC) * 1000;
663 	(void) _nanosleep(&ts, NULL);
664 	return (0);
665 }
666 
667 #pragma weak close = _close
668 int
669 _close(int fildes)
670 {
671 	extern void _aio_close(int);
672 	extern int __close(int);
673 	int rv;
674 
675 	/*
676 	 * If we call _aio_close() while in a critical region,
677 	 * we will draw an ASSERT() failure, so don't do it.
678 	 * No calls to close() from within libc need _aio_close();
679 	 * only the application's calls to close() need this,
680 	 * and such calls are never from a libc critical region.
681 	 */
682 	if (curthread->ul_critical == 0)
683 		_aio_close(fildes);
684 	PERFORM(__close(fildes))
685 }
686 
687 #pragma weak creat = _creat
688 int
689 _creat(const char *path, mode_t mode)
690 {
691 	extern int __creat(const char *, mode_t);
692 	int rv;
693 
694 	PERFORM(__creat(path, mode))
695 }
696 
697 #if !defined(_LP64)
698 #pragma weak creat64 = _creat64
699 int
700 _creat64(const char *path, mode_t mode)
701 {
702 	extern int __creat64(const char *, mode_t);
703 	int rv;
704 
705 	PERFORM(__creat64(path, mode))
706 }
707 #endif	/* !_LP64 */
708 
709 #pragma weak door_call = _door_call
710 int
711 _door_call(int d, door_arg_t *params)
712 {
713 	extern int __door_call(int, door_arg_t *);
714 	int rv;
715 
716 	PERFORM(__door_call(d, params))
717 }
718 
719 #pragma weak fcntl = _fcntl
720 int
721 _fcntl(int fildes, int cmd, ...)
722 {
723 	extern int __fcntl(int, int, ...);
724 	intptr_t arg;
725 	int rv;
726 	va_list ap;
727 
728 	va_start(ap, cmd);
729 	arg = va_arg(ap, intptr_t);
730 	va_end(ap);
731 	if (cmd != F_SETLKW)
732 		return (__fcntl(fildes, cmd, arg));
733 	PERFORM(__fcntl(fildes, cmd, arg))
734 }
735 
736 #pragma weak fdatasync = _fdatasync
737 int
738 _fdatasync(int fildes)
739 {
740 	extern int __fdsync(int, int);
741 	int rv;
742 
743 	PERFORM(__fdsync(fildes, FDSYNC))
744 }
745 
746 #pragma weak fsync = _fsync
747 int
748 _fsync(int fildes)
749 {
750 	extern int __fdsync(int, int);
751 	int rv;
752 
753 	PERFORM(__fdsync(fildes, FSYNC))
754 }
755 
756 #pragma weak lockf = _lockf
757 int
758 _lockf(int fildes, int function, off_t size)
759 {
760 	extern int __lockf(int, int, off_t);
761 	int rv;
762 
763 	PERFORM(__lockf(fildes, function, size))
764 }
765 
766 #if !defined(_LP64)
767 #pragma weak lockf64 = _lockf64
768 int
769 _lockf64(int fildes, int function, off64_t size)
770 {
771 	extern int __lockf64(int, int, off64_t);
772 	int rv;
773 
774 	PERFORM(__lockf64(fildes, function, size))
775 }
776 #endif	/* !_LP64 */
777 
778 #pragma weak msgrcv = _msgrcv
779 ssize_t
780 _msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg)
781 {
782 	extern ssize_t __msgrcv(int, void *, size_t, long, int);
783 	ssize_t rv;
784 
785 	PERFORM(__msgrcv(msqid, msgp, msgsz, msgtyp, msgflg))
786 }
787 
788 #pragma weak msgsnd = _msgsnd
789 int
790 _msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg)
791 {
792 	extern int __msgsnd(int, const void *, size_t, int);
793 	int rv;
794 
795 	PERFORM(__msgsnd(msqid, msgp, msgsz, msgflg))
796 }
797 
798 #pragma weak msync = _msync
799 int
800 _msync(caddr_t addr, size_t len, int flags)
801 {
802 	extern int __msync(caddr_t, size_t, int);
803 	int rv;
804 
805 	PERFORM(__msync(addr, len, flags))
806 }
807 
808 #pragma weak open = _open
809 int
810 _open(const char *path, int oflag, ...)
811 {
812 	extern int __open(const char *, int, ...);
813 	mode_t mode;
814 	int rv;
815 	va_list ap;
816 
817 	va_start(ap, oflag);
818 	mode = va_arg(ap, mode_t);
819 	va_end(ap);
820 	PERFORM(__open(path, oflag, mode))
821 }
822 
823 #pragma weak openat = _openat
824 int
825 _openat(int fd, const char *path, int oflag, ...)
826 {
827 	extern int __openat(int, const char *, int, ...);
828 	mode_t mode;
829 	int rv;
830 	va_list ap;
831 
832 	va_start(ap, oflag);
833 	mode = va_arg(ap, mode_t);
834 	va_end(ap);
835 	PERFORM(__openat(fd, path, oflag, mode))
836 }
837 
838 #if !defined(_LP64)
839 #pragma weak open64 = _open64
840 int
841 _open64(const char *path, int oflag, ...)
842 {
843 	extern int __open64(const char *, int, ...);
844 	mode_t mode;
845 	int rv;
846 	va_list ap;
847 
848 	va_start(ap, oflag);
849 	mode = va_arg(ap, mode_t);
850 	va_end(ap);
851 	PERFORM(__open64(path, oflag, mode))
852 }
853 
854 #pragma weak openat64 = _openat64
855 int
856 _openat64(int fd, const char *path, int oflag, ...)
857 {
858 	extern int __openat64(int, const char *, int, ...);
859 	mode_t mode;
860 	int rv;
861 	va_list ap;
862 
863 	va_start(ap, oflag);
864 	mode = va_arg(ap, mode_t);
865 	va_end(ap);
866 	PERFORM(__openat64(fd, path, oflag, mode))
867 }
868 #endif	/* !_LP64 */
869 
870 #pragma weak pause = _pause
871 int
872 _pause(void)
873 {
874 	extern int __pause(void);
875 	int rv;
876 
877 	PERFORM(__pause())
878 }
879 
880 #pragma weak pread = _pread
881 ssize_t
882 _pread(int fildes, void *buf, size_t nbyte, off_t offset)
883 {
884 	extern ssize_t __pread(int, void *, size_t, off_t);
885 	ssize_t rv;
886 
887 	PERFORM(__pread(fildes, buf, nbyte, offset))
888 }
889 
890 #if !defined(_LP64)
891 #pragma weak pread64 = _pread64
892 ssize_t
893 _pread64(int fildes, void *buf, size_t nbyte, off64_t offset)
894 {
895 	extern ssize_t __pread64(int, void *, size_t, off64_t);
896 	ssize_t rv;
897 
898 	PERFORM(__pread64(fildes, buf, nbyte, offset))
899 }
900 #endif	/* !_LP64 */
901 
902 #pragma weak pwrite = _pwrite
903 ssize_t
904 _pwrite(int fildes, const void *buf, size_t nbyte, off_t offset)
905 {
906 	extern ssize_t __pwrite(int, const void *, size_t, off_t);
907 	ssize_t rv;
908 
909 	PERFORM(__pwrite(fildes, buf, nbyte, offset))
910 }
911 
912 #if !defined(_LP64)
913 #pragma weak pwrite64 = _pwrite64
914 ssize_t
915 _pwrite64(int fildes, const void *buf, size_t nbyte, off64_t offset)
916 {
917 	extern ssize_t __pwrite64(int, const void *, size_t, off64_t);
918 	ssize_t rv;
919 
920 	PERFORM(__pwrite64(fildes, buf, nbyte, offset))
921 }
922 #endif	/* !_LP64 */
923 
924 #pragma weak readv = _readv
925 ssize_t
926 _readv(int fildes, const struct iovec *iov, int iovcnt)
927 {
928 	extern ssize_t __readv(int, const struct iovec *, int);
929 	ssize_t rv;
930 
931 	PERFORM(__readv(fildes, iov, iovcnt))
932 }
933 
934 #pragma weak sigpause = _sigpause
935 int
936 _sigpause(int sig)
937 {
938 	extern int __sigpause(int);
939 	int rv;
940 
941 	PERFORM(__sigpause(sig))
942 }
943 
944 #pragma weak sigsuspend = _sigsuspend
945 int
946 _sigsuspend(const sigset_t *set)
947 {
948 	extern int __sigsuspend(const sigset_t *);
949 	int rv;
950 
951 	PROLOGUE_MASK(set)
952 	rv = __sigsuspend(set);
953 	EPILOGUE_MASK
954 	return (rv);
955 }
956 
957 int
958 _pollsys(struct pollfd *fds, nfds_t nfd, const timespec_t *timeout,
959 	const sigset_t *sigmask)
960 {
961 	extern int __pollsys(struct pollfd *, nfds_t, const timespec_t *,
962 	    const sigset_t *);
963 	int rv;
964 
965 	PROLOGUE_MASK(sigmask)
966 	rv = __pollsys(fds, nfd, timeout, sigmask);
967 	EPILOGUE_MASK
968 	return (rv);
969 }
970 
971 #pragma weak sigtimedwait = _sigtimedwait
972 int
973 _sigtimedwait(const sigset_t *set, siginfo_t *infop, const timespec_t *timeout)
974 {
975 	extern int __sigtimedwait(const sigset_t *, siginfo_t *,
976 	    const timespec_t *);
977 	siginfo_t info;
978 	int sig;
979 
980 	if (!primary_link_map) {
981 		errno = ENOTSUP;
982 		return (-1);
983 	}
984 
985 	PROLOGUE
986 	if (abort) {
987 		*self->ul_errnop = EINTR;
988 		sig = -1;
989 	} else {
990 		sig = __sigtimedwait(set, &info, timeout);
991 		if (sig == SIGCANCEL &&
992 		    (SI_FROMKERNEL(&info) || info.si_code == SI_LWP)) {
993 			do_sigcancel();
994 			*self->ul_errnop = EINTR;
995 			sig = -1;
996 		}
997 	}
998 	EPILOGUE
999 	if (sig != -1 && infop)
1000 		(void) memcpy(infop, &info, sizeof (*infop));
1001 	return (sig);
1002 }
1003 
1004 #pragma weak sigwait = _sigwait
1005 int
1006 _sigwait(sigset_t *set)
1007 {
1008 	return (_sigtimedwait(set, NULL, NULL));
1009 }
1010 
1011 #pragma weak sigwaitinfo = _sigwaitinfo
1012 int
1013 _sigwaitinfo(const sigset_t *set, siginfo_t *info)
1014 {
1015 	return (_sigtimedwait(set, info, NULL));
1016 }
1017 
1018 #pragma weak sigqueue = _sigqueue
1019 int
1020 _sigqueue(pid_t pid, int signo, const union sigval value)
1021 {
1022 	extern int __sigqueue(pid_t pid, int signo,
1023 	    /* const union sigval */ void *value, int si_code, int block);
1024 	return (__sigqueue(pid, signo, value.sival_ptr, SI_QUEUE, 0));
1025 }
1026 
1027 int
1028 _so_accept(int sock, struct sockaddr *addr, uint_t *addrlen, int version)
1029 {
1030 	extern int __so_accept(int, struct sockaddr *, uint_t *, int);
1031 	int rv;
1032 
1033 	PERFORM(__so_accept(sock, addr, addrlen, version))
1034 }
1035 
1036 int
1037 _so_connect(int sock, struct sockaddr *addr, uint_t addrlen, int version)
1038 {
1039 	extern int __so_connect(int, struct sockaddr *, uint_t, int);
1040 	int rv;
1041 
1042 	PERFORM(__so_connect(sock, addr, addrlen, version))
1043 }
1044 
1045 int
1046 _so_recv(int sock, void *buf, size_t len, int flags)
1047 {
1048 	extern int __so_recv(int, void *, size_t, int);
1049 	int rv;
1050 
1051 	PERFORM(__so_recv(sock, buf, len, flags))
1052 }
1053 
1054 int
1055 _so_recvfrom(int sock, void *buf, size_t len, int flags,
1056     struct sockaddr *addr, int *addrlen)
1057 {
1058 	extern int __so_recvfrom(int, void *, size_t, int,
1059 	    struct sockaddr *, int *);
1060 	int rv;
1061 
1062 	PERFORM(__so_recvfrom(sock, buf, len, flags, addr, addrlen))
1063 }
1064 
1065 int
1066 _so_recvmsg(int sock, struct msghdr *msg, int flags)
1067 {
1068 	extern int __so_recvmsg(int, struct msghdr *, int);
1069 	int rv;
1070 
1071 	PERFORM(__so_recvmsg(sock, msg, flags))
1072 }
1073 
1074 int
1075 _so_send(int sock, const void *buf, size_t len, int flags)
1076 {
1077 	extern int __so_send(int, const void *, size_t, int);
1078 	int rv;
1079 
1080 	PERFORM(__so_send(sock, buf, len, flags))
1081 }
1082 
1083 int
1084 _so_sendmsg(int sock, const struct msghdr *msg, int flags)
1085 {
1086 	extern int __so_sendmsg(int, const struct msghdr *, int);
1087 	int rv;
1088 
1089 	PERFORM(__so_sendmsg(sock, msg, flags))
1090 }
1091 
1092 int
1093 _so_sendto(int sock, const void *buf, size_t len, int flags,
1094     const struct sockaddr *addr, int *addrlen)
1095 {
1096 	extern int __so_sendto(int, const void *, size_t, int,
1097 	    const struct sockaddr *, int *);
1098 	int rv;
1099 
1100 	PERFORM(__so_sendto(sock, buf, len, flags, addr, addrlen))
1101 }
1102 
1103 #pragma weak tcdrain = _tcdrain
1104 int
1105 _tcdrain(int fildes)
1106 {
1107 	extern int __tcdrain(int);
1108 	int rv;
1109 
1110 	PERFORM(__tcdrain(fildes))
1111 }
1112 
1113 #pragma weak waitid = _waitid
1114 int
1115 _waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options)
1116 {
1117 	extern int __waitid(idtype_t, id_t, siginfo_t *, int);
1118 	int rv;
1119 
1120 	if (options & WNOHANG)
1121 		return (__waitid(idtype, id, infop, options));
1122 	PERFORM(__waitid(idtype, id, infop, options))
1123 }
1124 
1125 #pragma weak writev = _writev
1126 ssize_t
1127 _writev(int fildes, const struct iovec *iov, int iovcnt)
1128 {
1129 	extern ssize_t __writev(int, const struct iovec *, int);
1130 	ssize_t rv;
1131 
1132 	PERFORM(__writev(fildes, iov, iovcnt))
1133 }
1134