1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include "lint.h" 28 #include "thr_uberdata.h" 29 #include <stdarg.h> 30 #include <poll.h> 31 #include <stropts.h> 32 #include <dlfcn.h> 33 #include <wait.h> 34 #include <sys/socket.h> 35 #include <sys/uio.h> 36 #include <sys/file.h> 37 #include <sys/door.h> 38 39 /* 40 * These leading-underbar symbols exist because mistakes were made 41 * in the past that put them into non-SUNWprivate versions of 42 * the libc mapfiles. They should be eliminated, but oh well... 43 */ 44 #pragma weak _fork = fork 45 #pragma weak _read = read 46 #pragma weak _write = write 47 #pragma weak _getmsg = getmsg 48 #pragma weak _getpmsg = getpmsg 49 #pragma weak _putmsg = putmsg 50 #pragma weak _putpmsg = putpmsg 51 #pragma weak _sleep = sleep 52 #pragma weak _close = close 53 #pragma weak _creat = creat 54 #pragma weak _fcntl = fcntl 55 #pragma weak _fsync = fsync 56 #pragma weak _lockf = lockf 57 #pragma weak _msgrcv = msgrcv 58 #pragma weak _msgsnd = msgsnd 59 #pragma weak _msync = msync 60 #pragma weak _open = open 61 #pragma weak _openat = openat 62 #pragma weak _pause = pause 63 #pragma weak _readv = readv 64 #pragma weak _sigpause = sigpause 65 #pragma weak _sigsuspend = sigsuspend 66 #pragma weak _tcdrain = tcdrain 67 #pragma weak _waitid = waitid 68 #pragma weak _writev = writev 69 70 #if !defined(_LP64) 71 #pragma weak _creat64 = creat64 72 #pragma weak _lockf64 = lockf64 73 #pragma weak _open64 = open64 74 #pragma weak _openat64 = openat64 75 #pragma weak _pread64 = pread64 76 #pragma weak _pwrite64 = pwrite64 77 #endif 78 79 /* 80 * These are SUNWprivate, but they are being used by Sun Studio libcollector. 81 */ 82 #pragma weak _fork1 = fork1 83 #pragma weak _forkall = forkall 84 85 /* 86 * atfork_lock protects the pthread_atfork() data structures. 87 * 88 * fork_lock does double-duty. Not only does it (and atfork_lock) 89 * serialize calls to fork() and forkall(), but it also serializes calls 90 * to thr_suspend() and thr_continue() (because fork() and forkall() also 91 * suspend and continue other threads and they want no competition). 92 * 93 * Functions called in dlopen()ed L10N objects can do anything, including 94 * call malloc() and free(). Such calls are not fork-safe when protected 95 * by an ordinary mutex that is acquired in libc's prefork processing 96 * because, with an interposed malloc library present, there would be a 97 * lock ordering violation due to the pthread_atfork() prefork function 98 * in the interposition library acquiring its malloc lock(s) before the 99 * ordinary mutex in libc being acquired by libc's prefork functions. 100 * 101 * Within libc, calls to malloc() and free() are fork-safe if the calls 102 * are made while holding no other libc locks. This covers almost all 103 * of libc's malloc() and free() calls. For those libc code paths, such 104 * as the above-mentioned L10N calls, that require serialization and that 105 * may call malloc() or free(), libc uses callout_lock_enter() to perform 106 * the serialization. This works because callout_lock is not acquired as 107 * part of running the pthread_atfork() prefork handlers (to avoid the 108 * lock ordering violation described above). Rather, it is simply 109 * reinitialized in postfork1_child() to cover the case that some 110 * now-defunct thread might have been suspended while holding it. 111 */ 112 113 void 114 fork_lock_enter(void) 115 { 116 ASSERT(curthread->ul_critical == 0); 117 (void) mutex_lock(&curthread->ul_uberdata->fork_lock); 118 } 119 120 void 121 fork_lock_exit(void) 122 { 123 ASSERT(curthread->ul_critical == 0); 124 (void) mutex_unlock(&curthread->ul_uberdata->fork_lock); 125 } 126 127 /* 128 * Use cancel_safe_mutex_lock() to protect against being cancelled while 129 * holding callout_lock and calling outside of libc (via L10N plugins). 130 * We will honor a pending cancellation request when callout_lock_exit() 131 * is called, by calling cancel_safe_mutex_unlock(). 132 */ 133 void 134 callout_lock_enter(void) 135 { 136 ASSERT(curthread->ul_critical == 0); 137 cancel_safe_mutex_lock(&curthread->ul_uberdata->callout_lock); 138 } 139 140 void 141 callout_lock_exit(void) 142 { 143 ASSERT(curthread->ul_critical == 0); 144 cancel_safe_mutex_unlock(&curthread->ul_uberdata->callout_lock); 145 } 146 147 pid_t 148 forkx(int flags) 149 { 150 ulwp_t *self = curthread; 151 uberdata_t *udp = self->ul_uberdata; 152 pid_t pid; 153 154 if (self->ul_vfork) { 155 /* 156 * We are a child of vfork(); omit all of the fork 157 * logic and go straight to the system call trap. 158 * A vfork() child of a multithreaded parent 159 * must never call fork(). 160 */ 161 if (udp->uberflags.uf_mt) { 162 errno = ENOTSUP; 163 return (-1); 164 } 165 pid = __forkx(flags); 166 if (pid == 0) { /* child */ 167 udp->pid = getpid(); 168 self->ul_vfork = 0; 169 } 170 return (pid); 171 } 172 173 sigoff(self); 174 if (self->ul_fork) { 175 /* 176 * Cannot call fork() from a fork handler. 177 */ 178 sigon(self); 179 errno = EDEADLK; 180 return (-1); 181 } 182 self->ul_fork = 1; 183 184 /* 185 * The functions registered by pthread_atfork() are defined by 186 * the application and its libraries and we must not hold any 187 * internal lmutex_lock()-acquired locks while invoking them. 188 * We hold only udp->atfork_lock to protect the atfork linkages. 189 * If one of these pthread_atfork() functions attempts to fork 190 * or to call pthread_atfork(), libc will detect the error and 191 * fail the call with EDEADLK. Otherwise, the pthread_atfork() 192 * functions are free to do anything they please (except they 193 * will not receive any signals). 194 */ 195 (void) mutex_lock(&udp->atfork_lock); 196 197 /* 198 * Posix (SUSv3) requires fork() to be async-signal-safe. 199 * This cannot be made to happen with fork handlers in place 200 * (they grab locks). To be in nominal compliance, don't run 201 * any fork handlers if we are called within a signal context. 202 * This leaves the child process in a questionable state with 203 * respect to its locks, but at least the parent process does 204 * not become deadlocked due to the calling thread attempting 205 * to acquire a lock that it already owns. 206 */ 207 if (self->ul_siglink == NULL) 208 _prefork_handler(); 209 210 /* 211 * Block every other thread attempting thr_suspend() or thr_continue(). 212 */ 213 (void) mutex_lock(&udp->fork_lock); 214 215 /* 216 * Block all signals. 217 * Just deferring them via sigoff() is not enough. 218 * We have to avoid taking a deferred signal in the child 219 * that was actually sent to the parent before __forkx(). 220 */ 221 block_all_signals(self); 222 223 /* 224 * This suspends all threads but this one, leaving them 225 * suspended outside of any critical regions in the library. 226 * Thus, we are assured that no lmutex_lock()-acquired library 227 * locks are held while we invoke fork() from the current thread. 228 */ 229 suspend_fork(); 230 231 pid = __forkx(flags); 232 233 if (pid == 0) { /* child */ 234 /* 235 * Clear our schedctl pointer. 236 * Discard any deferred signal that was sent to the parent. 237 * Because we blocked all signals before __forkx(), a 238 * deferred signal cannot have been taken by the child. 239 */ 240 self->ul_schedctl_called = NULL; 241 self->ul_schedctl = NULL; 242 self->ul_cursig = 0; 243 self->ul_siginfo.si_signo = 0; 244 udp->pid = getpid(); 245 /* reset the library's data structures to reflect one thread */ 246 unregister_locks(); 247 postfork1_child(); 248 restore_signals(self); 249 (void) mutex_unlock(&udp->fork_lock); 250 if (self->ul_siglink == NULL) 251 _postfork_child_handler(); 252 } else { 253 /* restart all threads that were suspended for fork() */ 254 continue_fork(0); 255 restore_signals(self); 256 (void) mutex_unlock(&udp->fork_lock); 257 if (self->ul_siglink == NULL) 258 _postfork_parent_handler(); 259 } 260 261 (void) mutex_unlock(&udp->atfork_lock); 262 self->ul_fork = 0; 263 sigon(self); 264 265 return (pid); 266 } 267 268 /* 269 * fork() is fork1() for both Posix threads and Solaris threads. 270 * The forkall() interface exists for applications that require 271 * the semantics of replicating all threads. 272 */ 273 #pragma weak fork1 = fork 274 pid_t 275 fork(void) 276 { 277 return (forkx(0)); 278 } 279 280 /* 281 * Much of the logic here is the same as in forkx(). 282 * See the comments in forkx(), above. 283 */ 284 pid_t 285 forkallx(int flags) 286 { 287 ulwp_t *self = curthread; 288 uberdata_t *udp = self->ul_uberdata; 289 pid_t pid; 290 291 if (self->ul_vfork) { 292 if (udp->uberflags.uf_mt) { 293 errno = ENOTSUP; 294 return (-1); 295 } 296 pid = __forkallx(flags); 297 if (pid == 0) { /* child */ 298 udp->pid = getpid(); 299 self->ul_vfork = 0; 300 } 301 return (pid); 302 } 303 304 sigoff(self); 305 if (self->ul_fork) { 306 sigon(self); 307 errno = EDEADLK; 308 return (-1); 309 } 310 self->ul_fork = 1; 311 (void) mutex_lock(&udp->atfork_lock); 312 (void) mutex_lock(&udp->fork_lock); 313 block_all_signals(self); 314 suspend_fork(); 315 316 pid = __forkallx(flags); 317 318 if (pid == 0) { 319 self->ul_schedctl_called = NULL; 320 self->ul_schedctl = NULL; 321 self->ul_cursig = 0; 322 self->ul_siginfo.si_signo = 0; 323 udp->pid = getpid(); 324 unregister_locks(); 325 continue_fork(1); 326 } else { 327 continue_fork(0); 328 } 329 restore_signals(self); 330 (void) mutex_unlock(&udp->fork_lock); 331 (void) mutex_unlock(&udp->atfork_lock); 332 self->ul_fork = 0; 333 sigon(self); 334 335 return (pid); 336 } 337 338 pid_t 339 forkall(void) 340 { 341 return (forkallx(0)); 342 } 343 344 /* 345 * For the implementation of cancellation at cancellation points. 346 */ 347 #define PROLOGUE \ 348 { \ 349 ulwp_t *self = curthread; \ 350 int nocancel = \ 351 (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks | \ 352 self->ul_critical | self->ul_sigdefer); \ 353 int abort = 0; \ 354 if (nocancel == 0) { \ 355 self->ul_save_async = self->ul_cancel_async; \ 356 if (!self->ul_cancel_disabled) { \ 357 self->ul_cancel_async = 1; \ 358 if (self->ul_cancel_pending) \ 359 pthread_exit(PTHREAD_CANCELED); \ 360 } \ 361 self->ul_sp = stkptr(); \ 362 } else if (self->ul_cancel_pending && \ 363 !self->ul_cancel_disabled) { \ 364 set_cancel_eintr_flag(self); \ 365 abort = 1; \ 366 } 367 368 #define EPILOGUE \ 369 if (nocancel == 0) { \ 370 self->ul_sp = 0; \ 371 self->ul_cancel_async = self->ul_save_async; \ 372 } \ 373 } 374 375 /* 376 * Perform the body of the action required by most of the cancelable 377 * function calls. The return(function_call) part is to allow the 378 * compiler to make the call be executed with tail recursion, which 379 * saves a register window on sparc and slightly (not much) improves 380 * the code for x86/x64 compilations. 381 */ 382 #define PERFORM(function_call) \ 383 PROLOGUE \ 384 if (abort) { \ 385 *self->ul_errnop = EINTR; \ 386 return (-1); \ 387 } \ 388 if (nocancel) \ 389 return (function_call); \ 390 rv = function_call; \ 391 EPILOGUE \ 392 return (rv); 393 394 /* 395 * Specialized prologue for sigsuspend() and pollsys(). 396 * These system calls pass a signal mask to the kernel. 397 * The kernel replaces the thread's signal mask with the 398 * temporary mask before the thread goes to sleep. If 399 * a signal is received, the signal handler will execute 400 * with the temporary mask, as modified by the sigaction 401 * for the particular signal. 402 * 403 * We block all signals until we reach the kernel with the 404 * temporary mask. This eliminates race conditions with 405 * setting the signal mask while signals are being posted. 406 */ 407 #define PROLOGUE_MASK(sigmask) \ 408 { \ 409 ulwp_t *self = curthread; \ 410 int nocancel = \ 411 (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks | \ 412 self->ul_critical | self->ul_sigdefer); \ 413 if (!self->ul_vfork) { \ 414 if (sigmask) { \ 415 block_all_signals(self); \ 416 self->ul_tmpmask.__sigbits[0] = sigmask->__sigbits[0]; \ 417 self->ul_tmpmask.__sigbits[1] = sigmask->__sigbits[1]; \ 418 delete_reserved_signals(&self->ul_tmpmask); \ 419 self->ul_sigsuspend = 1; \ 420 } \ 421 if (nocancel == 0) { \ 422 self->ul_save_async = self->ul_cancel_async; \ 423 if (!self->ul_cancel_disabled) { \ 424 self->ul_cancel_async = 1; \ 425 if (self->ul_cancel_pending) { \ 426 if (self->ul_sigsuspend) { \ 427 self->ul_sigsuspend = 0;\ 428 restore_signals(self); \ 429 } \ 430 pthread_exit(PTHREAD_CANCELED); \ 431 } \ 432 } \ 433 self->ul_sp = stkptr(); \ 434 } \ 435 } 436 437 /* 438 * If a signal is taken, we return from the system call wrapper with 439 * our original signal mask restored (see code in call_user_handler()). 440 * If not (self->ul_sigsuspend is still non-zero), we must restore our 441 * original signal mask ourself. 442 */ 443 #define EPILOGUE_MASK \ 444 if (nocancel == 0) { \ 445 self->ul_sp = 0; \ 446 self->ul_cancel_async = self->ul_save_async; \ 447 } \ 448 if (self->ul_sigsuspend) { \ 449 self->ul_sigsuspend = 0; \ 450 restore_signals(self); \ 451 } \ 452 } 453 454 /* 455 * Cancellation prologue and epilogue functions, 456 * for cancellation points too complex to include here. 457 */ 458 void 459 _cancel_prologue(void) 460 { 461 ulwp_t *self = curthread; 462 463 self->ul_cancel_prologue = 464 (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks | 465 self->ul_critical | self->ul_sigdefer) != 0; 466 if (self->ul_cancel_prologue == 0) { 467 self->ul_save_async = self->ul_cancel_async; 468 if (!self->ul_cancel_disabled) { 469 self->ul_cancel_async = 1; 470 if (self->ul_cancel_pending) 471 pthread_exit(PTHREAD_CANCELED); 472 } 473 self->ul_sp = stkptr(); 474 } else if (self->ul_cancel_pending && 475 !self->ul_cancel_disabled) { 476 set_cancel_eintr_flag(self); 477 } 478 } 479 480 void 481 _cancel_epilogue(void) 482 { 483 ulwp_t *self = curthread; 484 485 if (self->ul_cancel_prologue == 0) { 486 self->ul_sp = 0; 487 self->ul_cancel_async = self->ul_save_async; 488 } 489 } 490 491 /* 492 * Called from _thrp_join() (thr_join() is a cancellation point) 493 */ 494 int 495 lwp_wait(thread_t tid, thread_t *found) 496 { 497 int error; 498 499 PROLOGUE 500 if (abort) 501 return (EINTR); 502 while ((error = __lwp_wait(tid, found)) == EINTR && !cancel_active()) 503 continue; 504 EPILOGUE 505 return (error); 506 } 507 508 ssize_t 509 read(int fd, void *buf, size_t size) 510 { 511 extern ssize_t __read(int, void *, size_t); 512 ssize_t rv; 513 514 PERFORM(__read(fd, buf, size)) 515 } 516 517 ssize_t 518 write(int fd, const void *buf, size_t size) 519 { 520 extern ssize_t __write(int, const void *, size_t); 521 ssize_t rv; 522 523 PERFORM(__write(fd, buf, size)) 524 } 525 526 int 527 getmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr, 528 int *flagsp) 529 { 530 extern int __getmsg(int, struct strbuf *, struct strbuf *, int *); 531 int rv; 532 533 PERFORM(__getmsg(fd, ctlptr, dataptr, flagsp)) 534 } 535 536 int 537 getpmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr, 538 int *bandp, int *flagsp) 539 { 540 extern int __getpmsg(int, struct strbuf *, struct strbuf *, 541 int *, int *); 542 int rv; 543 544 PERFORM(__getpmsg(fd, ctlptr, dataptr, bandp, flagsp)) 545 } 546 547 int 548 putmsg(int fd, const struct strbuf *ctlptr, 549 const struct strbuf *dataptr, int flags) 550 { 551 extern int __putmsg(int, const struct strbuf *, 552 const struct strbuf *, int); 553 int rv; 554 555 PERFORM(__putmsg(fd, ctlptr, dataptr, flags)) 556 } 557 558 int 559 __xpg4_putmsg(int fd, const struct strbuf *ctlptr, 560 const struct strbuf *dataptr, int flags) 561 { 562 extern int __putmsg(int, const struct strbuf *, 563 const struct strbuf *, int); 564 int rv; 565 566 PERFORM(__putmsg(fd, ctlptr, dataptr, flags|MSG_XPG4)) 567 } 568 569 int 570 putpmsg(int fd, const struct strbuf *ctlptr, 571 const struct strbuf *dataptr, int band, int flags) 572 { 573 extern int __putpmsg(int, const struct strbuf *, 574 const struct strbuf *, int, int); 575 int rv; 576 577 PERFORM(__putpmsg(fd, ctlptr, dataptr, band, flags)) 578 } 579 580 int 581 __xpg4_putpmsg(int fd, const struct strbuf *ctlptr, 582 const struct strbuf *dataptr, int band, int flags) 583 { 584 extern int __putpmsg(int, const struct strbuf *, 585 const struct strbuf *, int, int); 586 int rv; 587 588 PERFORM(__putpmsg(fd, ctlptr, dataptr, band, flags|MSG_XPG4)) 589 } 590 591 int 592 nanosleep(const timespec_t *rqtp, timespec_t *rmtp) 593 { 594 int error; 595 596 PROLOGUE 597 error = abort? EINTR : __nanosleep(rqtp, rmtp); 598 EPILOGUE 599 if (error) { 600 errno = error; 601 return (-1); 602 } 603 return (0); 604 } 605 606 int 607 clock_nanosleep(clockid_t clock_id, int flags, 608 const timespec_t *rqtp, timespec_t *rmtp) 609 { 610 timespec_t reltime; 611 hrtime_t start; 612 hrtime_t rqlapse; 613 hrtime_t lapse; 614 int error; 615 616 switch (clock_id) { 617 case CLOCK_VIRTUAL: 618 case CLOCK_PROCESS_CPUTIME_ID: 619 case CLOCK_THREAD_CPUTIME_ID: 620 return (ENOTSUP); 621 case CLOCK_REALTIME: 622 case CLOCK_HIGHRES: 623 break; 624 default: 625 return (EINVAL); 626 } 627 if (flags & TIMER_ABSTIME) { 628 abstime_to_reltime(clock_id, rqtp, &reltime); 629 rmtp = NULL; 630 } else { 631 reltime = *rqtp; 632 if (clock_id == CLOCK_HIGHRES) 633 start = gethrtime(); 634 } 635 restart: 636 PROLOGUE 637 error = abort? EINTR : __nanosleep(&reltime, rmtp); 638 EPILOGUE 639 if (error == 0 && clock_id == CLOCK_HIGHRES) { 640 /* 641 * Don't return yet if we didn't really get a timeout. 642 * This can happen if we return because someone resets 643 * the system clock. 644 */ 645 if (flags & TIMER_ABSTIME) { 646 if ((hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC + 647 rqtp->tv_nsec > gethrtime()) { 648 abstime_to_reltime(clock_id, rqtp, &reltime); 649 goto restart; 650 } 651 } else { 652 rqlapse = (hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC + 653 rqtp->tv_nsec; 654 lapse = gethrtime() - start; 655 if (rqlapse > lapse) { 656 hrt2ts(rqlapse - lapse, &reltime); 657 goto restart; 658 } 659 } 660 } 661 if (error == 0 && clock_id == CLOCK_REALTIME && 662 (flags & TIMER_ABSTIME)) { 663 /* 664 * Don't return yet just because someone reset the 665 * system clock. Recompute the new relative time 666 * and reissue the nanosleep() call if necessary. 667 * 668 * Resetting the system clock causes all sorts of 669 * problems and the SUSV3 standards body should 670 * have made the behavior of clock_nanosleep() be 671 * implementation-defined in such a case rather than 672 * being specific about honoring the new system time. 673 * Standards bodies are filled with fools and idiots. 674 */ 675 abstime_to_reltime(clock_id, rqtp, &reltime); 676 if (reltime.tv_sec != 0 || reltime.tv_nsec != 0) 677 goto restart; 678 } 679 return (error); 680 } 681 682 unsigned int 683 sleep(unsigned int sec) 684 { 685 unsigned int rem = 0; 686 timespec_t ts; 687 timespec_t tsr; 688 689 ts.tv_sec = (time_t)sec; 690 ts.tv_nsec = 0; 691 if (nanosleep(&ts, &tsr) == -1 && errno == EINTR) { 692 rem = (unsigned int)tsr.tv_sec; 693 if (tsr.tv_nsec >= NANOSEC / 2) 694 rem++; 695 } 696 return (rem); 697 } 698 699 int 700 usleep(useconds_t usec) 701 { 702 timespec_t ts; 703 704 ts.tv_sec = usec / MICROSEC; 705 ts.tv_nsec = (long)(usec % MICROSEC) * 1000; 706 (void) nanosleep(&ts, NULL); 707 return (0); 708 } 709 710 int 711 close(int fildes) 712 { 713 extern void _aio_close(int); 714 extern int __close(int); 715 int rv; 716 717 /* 718 * If we call _aio_close() while in a critical region, 719 * we will draw an ASSERT() failure, so don't do it. 720 * No calls to close() from within libc need _aio_close(); 721 * only the application's calls to close() need this, 722 * and such calls are never from a libc critical region. 723 */ 724 if (curthread->ul_critical == 0) 725 _aio_close(fildes); 726 PERFORM(__close(fildes)) 727 } 728 729 int 730 creat(const char *path, mode_t mode) 731 { 732 extern int __creat(const char *, mode_t); 733 int rv; 734 735 PERFORM(__creat(path, mode)) 736 } 737 738 #if !defined(_LP64) 739 int 740 creat64(const char *path, mode_t mode) 741 { 742 extern int __creat64(const char *, mode_t); 743 int rv; 744 745 PERFORM(__creat64(path, mode)) 746 } 747 #endif /* !_LP64 */ 748 749 int 750 door_call(int d, door_arg_t *params) 751 { 752 extern int __door_call(int, door_arg_t *); 753 int rv; 754 755 PERFORM(__door_call(d, params)) 756 } 757 758 int 759 fcntl(int fildes, int cmd, ...) 760 { 761 extern int __fcntl(int, int, ...); 762 intptr_t arg; 763 int rv; 764 va_list ap; 765 766 va_start(ap, cmd); 767 arg = va_arg(ap, intptr_t); 768 va_end(ap); 769 if (cmd != F_SETLKW) 770 return (__fcntl(fildes, cmd, arg)); 771 PERFORM(__fcntl(fildes, cmd, arg)) 772 } 773 774 int 775 fdatasync(int fildes) 776 { 777 extern int __fdsync(int, int); 778 int rv; 779 780 PERFORM(__fdsync(fildes, FDSYNC)) 781 } 782 783 int 784 fsync(int fildes) 785 { 786 extern int __fdsync(int, int); 787 int rv; 788 789 PERFORM(__fdsync(fildes, FSYNC)) 790 } 791 792 int 793 lockf(int fildes, int function, off_t size) 794 { 795 extern int __lockf(int, int, off_t); 796 int rv; 797 798 PERFORM(__lockf(fildes, function, size)) 799 } 800 801 #if !defined(_LP64) 802 int 803 lockf64(int fildes, int function, off64_t size) 804 { 805 extern int __lockf64(int, int, off64_t); 806 int rv; 807 808 PERFORM(__lockf64(fildes, function, size)) 809 } 810 #endif /* !_LP64 */ 811 812 ssize_t 813 msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg) 814 { 815 extern ssize_t __msgrcv(int, void *, size_t, long, int); 816 ssize_t rv; 817 818 PERFORM(__msgrcv(msqid, msgp, msgsz, msgtyp, msgflg)) 819 } 820 821 int 822 msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg) 823 { 824 extern int __msgsnd(int, const void *, size_t, int); 825 int rv; 826 827 PERFORM(__msgsnd(msqid, msgp, msgsz, msgflg)) 828 } 829 830 int 831 msync(caddr_t addr, size_t len, int flags) 832 { 833 extern int __msync(caddr_t, size_t, int); 834 int rv; 835 836 PERFORM(__msync(addr, len, flags)) 837 } 838 839 int 840 open(const char *path, int oflag, ...) 841 { 842 extern int __open(const char *, int, ...); 843 mode_t mode; 844 int rv; 845 va_list ap; 846 847 va_start(ap, oflag); 848 mode = va_arg(ap, mode_t); 849 va_end(ap); 850 PERFORM(__open(path, oflag, mode)) 851 } 852 853 int 854 openat(int fd, const char *path, int oflag, ...) 855 { 856 extern int __openat(int, const char *, int, ...); 857 mode_t mode; 858 int rv; 859 va_list ap; 860 861 va_start(ap, oflag); 862 mode = va_arg(ap, mode_t); 863 va_end(ap); 864 PERFORM(__openat(fd, path, oflag, mode)) 865 } 866 867 #if !defined(_LP64) 868 int 869 open64(const char *path, int oflag, ...) 870 { 871 extern int __open64(const char *, int, ...); 872 mode_t mode; 873 int rv; 874 va_list ap; 875 876 va_start(ap, oflag); 877 mode = va_arg(ap, mode_t); 878 va_end(ap); 879 PERFORM(__open64(path, oflag, mode)) 880 } 881 882 int 883 openat64(int fd, const char *path, int oflag, ...) 884 { 885 extern int __openat64(int, const char *, int, ...); 886 mode_t mode; 887 int rv; 888 va_list ap; 889 890 va_start(ap, oflag); 891 mode = va_arg(ap, mode_t); 892 va_end(ap); 893 PERFORM(__openat64(fd, path, oflag, mode)) 894 } 895 #endif /* !_LP64 */ 896 897 int 898 pause(void) 899 { 900 extern int __pause(void); 901 int rv; 902 903 PERFORM(__pause()) 904 } 905 906 ssize_t 907 pread(int fildes, void *buf, size_t nbyte, off_t offset) 908 { 909 extern ssize_t __pread(int, void *, size_t, off_t); 910 ssize_t rv; 911 912 PERFORM(__pread(fildes, buf, nbyte, offset)) 913 } 914 915 #if !defined(_LP64) 916 ssize_t 917 pread64(int fildes, void *buf, size_t nbyte, off64_t offset) 918 { 919 extern ssize_t __pread64(int, void *, size_t, off64_t); 920 ssize_t rv; 921 922 PERFORM(__pread64(fildes, buf, nbyte, offset)) 923 } 924 #endif /* !_LP64 */ 925 926 ssize_t 927 pwrite(int fildes, const void *buf, size_t nbyte, off_t offset) 928 { 929 extern ssize_t __pwrite(int, const void *, size_t, off_t); 930 ssize_t rv; 931 932 PERFORM(__pwrite(fildes, buf, nbyte, offset)) 933 } 934 935 #if !defined(_LP64) 936 ssize_t 937 pwrite64(int fildes, const void *buf, size_t nbyte, off64_t offset) 938 { 939 extern ssize_t __pwrite64(int, const void *, size_t, off64_t); 940 ssize_t rv; 941 942 PERFORM(__pwrite64(fildes, buf, nbyte, offset)) 943 } 944 #endif /* !_LP64 */ 945 946 ssize_t 947 readv(int fildes, const struct iovec *iov, int iovcnt) 948 { 949 extern ssize_t __readv(int, const struct iovec *, int); 950 ssize_t rv; 951 952 PERFORM(__readv(fildes, iov, iovcnt)) 953 } 954 955 int 956 sigpause(int sig) 957 { 958 extern int __sigpause(int); 959 int rv; 960 961 PERFORM(__sigpause(sig)) 962 } 963 964 int 965 sigsuspend(const sigset_t *set) 966 { 967 extern int __sigsuspend(const sigset_t *); 968 int rv; 969 970 PROLOGUE_MASK(set) 971 rv = __sigsuspend(set); 972 EPILOGUE_MASK 973 return (rv); 974 } 975 976 int 977 _pollsys(struct pollfd *fds, nfds_t nfd, const timespec_t *timeout, 978 const sigset_t *sigmask) 979 { 980 extern int __pollsys(struct pollfd *, nfds_t, const timespec_t *, 981 const sigset_t *); 982 int rv; 983 984 PROLOGUE_MASK(sigmask) 985 rv = __pollsys(fds, nfd, timeout, sigmask); 986 EPILOGUE_MASK 987 return (rv); 988 } 989 990 int 991 sigtimedwait(const sigset_t *set, siginfo_t *infop, const timespec_t *timeout) 992 { 993 extern int __sigtimedwait(const sigset_t *, siginfo_t *, 994 const timespec_t *); 995 siginfo_t info; 996 int sig; 997 998 PROLOGUE 999 if (abort) { 1000 *self->ul_errnop = EINTR; 1001 sig = -1; 1002 } else { 1003 sig = __sigtimedwait(set, &info, timeout); 1004 if (sig == SIGCANCEL && 1005 (SI_FROMKERNEL(&info) || info.si_code == SI_LWP)) { 1006 do_sigcancel(); 1007 *self->ul_errnop = EINTR; 1008 sig = -1; 1009 } 1010 } 1011 EPILOGUE 1012 if (sig != -1 && infop) 1013 (void) memcpy(infop, &info, sizeof (*infop)); 1014 return (sig); 1015 } 1016 1017 int 1018 sigwait(sigset_t *set) 1019 { 1020 return (sigtimedwait(set, NULL, NULL)); 1021 } 1022 1023 int 1024 sigwaitinfo(const sigset_t *set, siginfo_t *info) 1025 { 1026 return (sigtimedwait(set, info, NULL)); 1027 } 1028 1029 int 1030 sigqueue(pid_t pid, int signo, const union sigval value) 1031 { 1032 extern int __sigqueue(pid_t pid, int signo, 1033 /* const union sigval */ void *value, int si_code, int block); 1034 return (__sigqueue(pid, signo, value.sival_ptr, SI_QUEUE, 0)); 1035 } 1036 1037 int 1038 _so_accept(int sock, struct sockaddr *addr, uint_t *addrlen, int version) 1039 { 1040 extern int __so_accept(int, struct sockaddr *, uint_t *, int); 1041 int rv; 1042 1043 PERFORM(__so_accept(sock, addr, addrlen, version)) 1044 } 1045 1046 int 1047 _so_connect(int sock, struct sockaddr *addr, uint_t addrlen, int version) 1048 { 1049 extern int __so_connect(int, struct sockaddr *, uint_t, int); 1050 int rv; 1051 1052 PERFORM(__so_connect(sock, addr, addrlen, version)) 1053 } 1054 1055 int 1056 _so_recv(int sock, void *buf, size_t len, int flags) 1057 { 1058 extern int __so_recv(int, void *, size_t, int); 1059 int rv; 1060 1061 PERFORM(__so_recv(sock, buf, len, flags)) 1062 } 1063 1064 int 1065 _so_recvfrom(int sock, void *buf, size_t len, int flags, 1066 struct sockaddr *addr, int *addrlen) 1067 { 1068 extern int __so_recvfrom(int, void *, size_t, int, 1069 struct sockaddr *, int *); 1070 int rv; 1071 1072 PERFORM(__so_recvfrom(sock, buf, len, flags, addr, addrlen)) 1073 } 1074 1075 int 1076 _so_recvmsg(int sock, struct msghdr *msg, int flags) 1077 { 1078 extern int __so_recvmsg(int, struct msghdr *, int); 1079 int rv; 1080 1081 PERFORM(__so_recvmsg(sock, msg, flags)) 1082 } 1083 1084 int 1085 _so_send(int sock, const void *buf, size_t len, int flags) 1086 { 1087 extern int __so_send(int, const void *, size_t, int); 1088 int rv; 1089 1090 PERFORM(__so_send(sock, buf, len, flags)) 1091 } 1092 1093 int 1094 _so_sendmsg(int sock, const struct msghdr *msg, int flags) 1095 { 1096 extern int __so_sendmsg(int, const struct msghdr *, int); 1097 int rv; 1098 1099 PERFORM(__so_sendmsg(sock, msg, flags)) 1100 } 1101 1102 int 1103 _so_sendto(int sock, const void *buf, size_t len, int flags, 1104 const struct sockaddr *addr, int *addrlen) 1105 { 1106 extern int __so_sendto(int, const void *, size_t, int, 1107 const struct sockaddr *, int *); 1108 int rv; 1109 1110 PERFORM(__so_sendto(sock, buf, len, flags, addr, addrlen)) 1111 } 1112 1113 int 1114 tcdrain(int fildes) 1115 { 1116 extern int __tcdrain(int); 1117 int rv; 1118 1119 PERFORM(__tcdrain(fildes)) 1120 } 1121 1122 int 1123 waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options) 1124 { 1125 extern int __waitid(idtype_t, id_t, siginfo_t *, int); 1126 int rv; 1127 1128 if (options & WNOHANG) 1129 return (__waitid(idtype, id, infop, options)); 1130 PERFORM(__waitid(idtype, id, infop, options)) 1131 } 1132 1133 ssize_t 1134 writev(int fildes, const struct iovec *iov, int iovcnt) 1135 { 1136 extern ssize_t __writev(int, const struct iovec *, int); 1137 ssize_t rv; 1138 1139 PERFORM(__writev(fildes, iov, iovcnt)) 1140 } 1141