1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include "lint.h" 30 #include "thr_uberdata.h" 31 #include <stdarg.h> 32 #include <poll.h> 33 #include <stropts.h> 34 #include <dlfcn.h> 35 #include <sys/uio.h> 36 37 /* 38 * fork_lock is special -- We can't use lmutex_lock() (and thereby enter 39 * a critical region) because the second thread to reach this point would 40 * become unstoppable and the first thread would hang waiting for the 41 * second thread to stop itself. Therefore we don't use lmutex_lock() in 42 * fork_lock_enter(), but we do defer signals (the other form of concurrency). 43 * 44 * fork_lock_enter() does triple-duty. Not only does it serialize 45 * calls to fork() and forkall(), but it also serializes calls to 46 * thr_suspend() (fork() and forkall() also suspend other threads), 47 * and furthermore it serializes I18N calls to functions in other 48 * dlopen()ed L10N objects that might be calling malloc()/free(). 49 */ 50 51 static void 52 fork_lock_error(const char *who) 53 { 54 char msg[200]; 55 56 (void) strlcpy(msg, "deadlock condition: ", sizeof (msg)); 57 (void) strlcat(msg, who, sizeof (msg)); 58 (void) strlcat(msg, "() called from a fork handler", sizeof (msg)); 59 thread_error(msg); 60 } 61 62 int 63 fork_lock_enter(const char *who) 64 { 65 ulwp_t *self = curthread; 66 uberdata_t *udp = self->ul_uberdata; 67 int error = 0; 68 69 ASSERT(self->ul_critical == 0); 70 sigoff(self); 71 (void) _private_mutex_lock(&udp->fork_lock); 72 if (udp->fork_count) { 73 ASSERT(udp->fork_owner == self); 74 /* 75 * This is a simple recursive lock except that we 76 * inform the caller if we have been called from 77 * a fork handler and let it deal with that fact. 78 */ 79 if (self->ul_fork) { 80 /* 81 * We have been called from a fork handler. 82 */ 83 if (who != NULL && 84 udp->uberflags.uf_thread_error_detection) 85 fork_lock_error(who); 86 error = EDEADLK; 87 } 88 } 89 udp->fork_owner = self; 90 udp->fork_count++; 91 return (error); 92 } 93 94 void 95 fork_lock_exit(void) 96 { 97 ulwp_t *self = curthread; 98 uberdata_t *udp = self->ul_uberdata; 99 100 ASSERT(self->ul_critical == 0); 101 ASSERT(udp->fork_count != 0 && udp->fork_owner == self); 102 if (--udp->fork_count == 0) 103 udp->fork_owner = NULL; 104 (void) _private_mutex_unlock(&udp->fork_lock); 105 sigon(self); 106 } 107 108 #pragma weak forkx = _private_forkx 109 #pragma weak _forkx = _private_forkx 110 static pid_t 111 _private_forkx(int flags) 112 { 113 ulwp_t *self = curthread; 114 uberdata_t *udp = self->ul_uberdata; 115 pid_t pid; 116 int error; 117 118 if (self->ul_vfork) { 119 /* 120 * We are a child of vfork(); omit all of the fork 121 * logic and go straight to the system call trap. 122 * A vfork() child of a multithreaded parent 123 * must never call fork(). 124 */ 125 if (udp->uberflags.uf_mt) { 126 errno = ENOTSUP; 127 return (-1); 128 } 129 pid = __forkx(flags); 130 if (pid == 0) { /* child */ 131 udp->pid = _private_getpid(); 132 self->ul_vfork = 0; 133 } 134 return (pid); 135 } 136 137 if ((error = fork_lock_enter("fork")) != 0) { 138 /* 139 * Cannot call fork() from a fork handler. 140 */ 141 fork_lock_exit(); 142 errno = error; 143 return (-1); 144 } 145 self->ul_fork = 1; 146 147 /* 148 * The functions registered by pthread_atfork() are defined by 149 * the application and its libraries and we must not hold any 150 * internal libc locks while invoking them. The fork_lock_enter() 151 * function serializes fork(), thr_suspend(), pthread_atfork() and 152 * dlclose() (which destroys whatever pthread_atfork() functions 153 * the library may have set up). If one of these pthread_atfork() 154 * functions attempts to fork or suspend another thread or call 155 * pthread_atfork() or dlclose a library, it will detect a deadlock 156 * in fork_lock_enter(). Otherwise, the pthread_atfork() functions 157 * are free to do anything they please (except they will not 158 * receive any signals). 159 */ 160 _prefork_handler(); 161 162 /* 163 * Block all signals. 164 * Just deferring them via sigon() is not enough. 165 * We have to avoid taking a deferred signal in the child 166 * that was actually sent to the parent before __forkx(). 167 */ 168 block_all_signals(self); 169 170 /* 171 * This suspends all threads but this one, leaving them 172 * suspended outside of any critical regions in the library. 173 * Thus, we are assured that no library locks are held 174 * while we invoke fork() from the current thread. 175 */ 176 suspend_fork(); 177 178 pid = __forkx(flags); 179 180 if (pid == 0) { /* child */ 181 /* 182 * Clear our schedctl pointer. 183 * Discard any deferred signal that was sent to the parent. 184 * Because we blocked all signals before __forkx(), a 185 * deferred signal cannot have been taken by the child. 186 */ 187 self->ul_schedctl_called = NULL; 188 self->ul_schedctl = NULL; 189 self->ul_cursig = 0; 190 self->ul_siginfo.si_signo = 0; 191 udp->pid = _private_getpid(); 192 /* reset the library's data structures to reflect one thread */ 193 postfork1_child(); 194 restore_signals(self); 195 _postfork_child_handler(); 196 } else { 197 /* restart all threads that were suspended for fork() */ 198 continue_fork(0); 199 restore_signals(self); 200 _postfork_parent_handler(); 201 } 202 203 self->ul_fork = 0; 204 fork_lock_exit(); 205 206 return (pid); 207 } 208 209 /* 210 * fork() is fork1() for both Posix threads and Solaris threads. 211 * The forkall() interface exists for applications that require 212 * the semantics of replicating all threads. 213 */ 214 #pragma weak fork1 = _fork 215 #pragma weak _fork1 = _fork 216 #pragma weak fork = _fork 217 pid_t 218 _fork(void) 219 { 220 return (_private_forkx(0)); 221 } 222 223 /* 224 * Much of the logic here is the same as in forkx(). 225 * See the comments in forkx(), above. 226 */ 227 #pragma weak forkallx = _private_forkallx 228 #pragma weak _forkallx = _private_forkallx 229 static pid_t 230 _private_forkallx(int flags) 231 { 232 ulwp_t *self = curthread; 233 uberdata_t *udp = self->ul_uberdata; 234 pid_t pid; 235 int error; 236 237 if (self->ul_vfork) { 238 if (udp->uberflags.uf_mt) { 239 errno = ENOTSUP; 240 return (-1); 241 } 242 pid = __forkallx(flags); 243 if (pid == 0) { /* child */ 244 udp->pid = _private_getpid(); 245 self->ul_vfork = 0; 246 } 247 return (pid); 248 } 249 250 if ((error = fork_lock_enter("forkall")) != 0) { 251 fork_lock_exit(); 252 errno = error; 253 return (-1); 254 } 255 self->ul_fork = 1; 256 block_all_signals(self); 257 suspend_fork(); 258 259 pid = __forkallx(flags); 260 261 if (pid == 0) { 262 self->ul_schedctl_called = NULL; 263 self->ul_schedctl = NULL; 264 self->ul_cursig = 0; 265 self->ul_siginfo.si_signo = 0; 266 udp->pid = _private_getpid(); 267 continue_fork(1); 268 } else { 269 continue_fork(0); 270 } 271 restore_signals(self); 272 self->ul_fork = 0; 273 fork_lock_exit(); 274 275 return (pid); 276 } 277 278 #pragma weak forkall = _forkall 279 pid_t 280 _forkall(void) 281 { 282 return (_private_forkallx(0)); 283 } 284 285 /* 286 * Hacks for system calls to provide cancellation 287 * and improve java garbage collection. 288 */ 289 #define PROLOGUE \ 290 { \ 291 ulwp_t *self = curthread; \ 292 int nocancel = (self->ul_vfork | self->ul_nocancel); \ 293 if (nocancel == 0) { \ 294 self->ul_save_async = self->ul_cancel_async; \ 295 if (!self->ul_cancel_disabled) { \ 296 self->ul_cancel_async = 1; \ 297 if (self->ul_cancel_pending) \ 298 _pthread_exit(PTHREAD_CANCELED); \ 299 } \ 300 self->ul_sp = stkptr(); \ 301 } 302 303 #define EPILOGUE \ 304 if (nocancel == 0) { \ 305 self->ul_sp = 0; \ 306 self->ul_cancel_async = self->ul_save_async; \ 307 } \ 308 } 309 310 /* 311 * Perform the body of the action required by most of the cancelable 312 * function calls. The return(function_call) part is to allow the 313 * compiler to make the call be executed with tail recursion, which 314 * saves a register window on sparc and slightly (not much) improves 315 * the code for x86/x64 compilations. 316 */ 317 #define PERFORM(function_call) \ 318 PROLOGUE \ 319 if (nocancel) \ 320 return (function_call); \ 321 rv = function_call; \ 322 EPILOGUE \ 323 return (rv); 324 325 /* 326 * Specialized prologue for sigsuspend() and pollsys(). 327 * These system calls pass a signal mask to the kernel. 328 * The kernel replaces the thread's signal mask with the 329 * temporary mask before the thread goes to sleep. If 330 * a signal is received, the signal handler will execute 331 * with the temporary mask, as modified by the sigaction 332 * for the particular signal. 333 * 334 * We block all signals until we reach the kernel with the 335 * temporary mask. This eliminates race conditions with 336 * setting the signal mask while signals are being posted. 337 */ 338 #define PROLOGUE_MASK(sigmask) \ 339 { \ 340 ulwp_t *self = curthread; \ 341 int nocancel = (self->ul_vfork | self->ul_nocancel); \ 342 if (!self->ul_vfork) { \ 343 if (sigmask) { \ 344 block_all_signals(self); \ 345 self->ul_tmpmask.__sigbits[0] = sigmask->__sigbits[0]; \ 346 self->ul_tmpmask.__sigbits[1] = sigmask->__sigbits[1]; \ 347 delete_reserved_signals(&self->ul_tmpmask); \ 348 self->ul_sigsuspend = 1; \ 349 } \ 350 if (nocancel == 0) { \ 351 self->ul_save_async = self->ul_cancel_async; \ 352 if (!self->ul_cancel_disabled) { \ 353 self->ul_cancel_async = 1; \ 354 if (self->ul_cancel_pending) { \ 355 if (self->ul_sigsuspend) { \ 356 self->ul_sigsuspend = 0;\ 357 restore_signals(self); \ 358 } \ 359 _pthread_exit(PTHREAD_CANCELED);\ 360 } \ 361 } \ 362 self->ul_sp = stkptr(); \ 363 } \ 364 } 365 366 /* 367 * If a signal is taken, we return from the system call wrapper with 368 * our original signal mask restored (see code in call_user_handler()). 369 * If not (self->ul_sigsuspend is still non-zero), we must restore our 370 * original signal mask ourself. 371 */ 372 #define EPILOGUE_MASK \ 373 if (nocancel == 0) { \ 374 self->ul_sp = 0; \ 375 self->ul_cancel_async = self->ul_save_async; \ 376 } \ 377 if (self->ul_sigsuspend) { \ 378 self->ul_sigsuspend = 0; \ 379 restore_signals(self); \ 380 } \ 381 } 382 383 /* 384 * Cancellation prologue and epilogue functions, 385 * for cancellation points too complex to include here. 386 */ 387 void 388 _cancel_prologue(void) 389 { 390 ulwp_t *self = curthread; 391 392 self->ul_cancel_prologue = (self->ul_vfork | self->ul_nocancel); 393 if (self->ul_cancel_prologue == 0) { 394 self->ul_save_async = self->ul_cancel_async; 395 if (!self->ul_cancel_disabled) { 396 self->ul_cancel_async = 1; 397 if (self->ul_cancel_pending) 398 _pthread_exit(PTHREAD_CANCELED); 399 } 400 self->ul_sp = stkptr(); 401 } 402 } 403 404 void 405 _cancel_epilogue(void) 406 { 407 ulwp_t *self = curthread; 408 409 if (self->ul_cancel_prologue == 0) { 410 self->ul_sp = 0; 411 self->ul_cancel_async = self->ul_save_async; 412 } 413 } 414 415 /* 416 * Called from _thrp_join() (thr_join() is a cancellation point) 417 */ 418 int 419 lwp_wait(thread_t tid, thread_t *found) 420 { 421 int error; 422 423 PROLOGUE 424 while ((error = __lwp_wait(tid, found)) == EINTR) 425 ; 426 EPILOGUE 427 return (error); 428 } 429 430 ssize_t 431 read(int fd, void *buf, size_t size) 432 { 433 extern ssize_t _read(int, void *, size_t); 434 ssize_t rv; 435 436 PERFORM(_read(fd, buf, size)) 437 } 438 439 ssize_t 440 write(int fd, const void *buf, size_t size) 441 { 442 extern ssize_t _write(int, const void *, size_t); 443 ssize_t rv; 444 445 PERFORM(_write(fd, buf, size)) 446 } 447 448 int 449 getmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr, 450 int *flagsp) 451 { 452 extern int _getmsg(int, struct strbuf *, struct strbuf *, int *); 453 int rv; 454 455 PERFORM(_getmsg(fd, ctlptr, dataptr, flagsp)) 456 } 457 458 int 459 getpmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr, 460 int *bandp, int *flagsp) 461 { 462 extern int _getpmsg(int, struct strbuf *, struct strbuf *, 463 int *, int *); 464 int rv; 465 466 PERFORM(_getpmsg(fd, ctlptr, dataptr, bandp, flagsp)) 467 } 468 469 int 470 putmsg(int fd, const struct strbuf *ctlptr, 471 const struct strbuf *dataptr, int flags) 472 { 473 extern int _putmsg(int, const struct strbuf *, 474 const struct strbuf *, int); 475 int rv; 476 477 PERFORM(_putmsg(fd, ctlptr, dataptr, flags)) 478 } 479 480 int 481 __xpg4_putmsg(int fd, const struct strbuf *ctlptr, 482 const struct strbuf *dataptr, int flags) 483 { 484 extern int _putmsg(int, const struct strbuf *, 485 const struct strbuf *, int); 486 int rv; 487 488 PERFORM(_putmsg(fd, ctlptr, dataptr, flags|MSG_XPG4)) 489 } 490 491 int 492 putpmsg(int fd, const struct strbuf *ctlptr, 493 const struct strbuf *dataptr, int band, int flags) 494 { 495 extern int _putpmsg(int, const struct strbuf *, 496 const struct strbuf *, int, int); 497 int rv; 498 499 PERFORM(_putpmsg(fd, ctlptr, dataptr, band, flags)) 500 } 501 502 int 503 __xpg4_putpmsg(int fd, const struct strbuf *ctlptr, 504 const struct strbuf *dataptr, int band, int flags) 505 { 506 extern int _putpmsg(int, const struct strbuf *, 507 const struct strbuf *, int, int); 508 int rv; 509 510 PERFORM(_putpmsg(fd, ctlptr, dataptr, band, flags|MSG_XPG4)) 511 } 512 513 #pragma weak nanosleep = _nanosleep 514 int 515 _nanosleep(const timespec_t *rqtp, timespec_t *rmtp) 516 { 517 int error; 518 519 PROLOGUE 520 error = __nanosleep(rqtp, rmtp); 521 EPILOGUE 522 if (error) { 523 errno = error; 524 return (-1); 525 } 526 return (0); 527 } 528 529 #pragma weak clock_nanosleep = _clock_nanosleep 530 int 531 _clock_nanosleep(clockid_t clock_id, int flags, 532 const timespec_t *rqtp, timespec_t *rmtp) 533 { 534 timespec_t reltime; 535 hrtime_t start; 536 hrtime_t rqlapse; 537 hrtime_t lapse; 538 int error; 539 540 switch (clock_id) { 541 case CLOCK_VIRTUAL: 542 case CLOCK_PROCESS_CPUTIME_ID: 543 case CLOCK_THREAD_CPUTIME_ID: 544 return (ENOTSUP); 545 case CLOCK_REALTIME: 546 case CLOCK_HIGHRES: 547 break; 548 default: 549 return (EINVAL); 550 } 551 if (flags & TIMER_ABSTIME) { 552 abstime_to_reltime(clock_id, rqtp, &reltime); 553 rmtp = NULL; 554 } else { 555 reltime = *rqtp; 556 if (clock_id == CLOCK_HIGHRES) 557 start = gethrtime(); 558 } 559 restart: 560 PROLOGUE 561 error = __nanosleep(&reltime, rmtp); 562 EPILOGUE 563 if (error == 0 && clock_id == CLOCK_HIGHRES) { 564 /* 565 * Don't return yet if we didn't really get a timeout. 566 * This can happen if we return because someone resets 567 * the system clock. 568 */ 569 if (flags & TIMER_ABSTIME) { 570 if ((hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC + 571 rqtp->tv_nsec > gethrtime()) { 572 abstime_to_reltime(clock_id, rqtp, &reltime); 573 goto restart; 574 } 575 } else { 576 rqlapse = (hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC + 577 rqtp->tv_nsec; 578 lapse = gethrtime() - start; 579 if (rqlapse > lapse) { 580 hrt2ts(rqlapse - lapse, &reltime); 581 goto restart; 582 } 583 } 584 } 585 if (error == 0 && clock_id == CLOCK_REALTIME && 586 (flags & TIMER_ABSTIME)) { 587 /* 588 * Don't return yet just because someone reset the 589 * system clock. Recompute the new relative time 590 * and reissue the nanosleep() call if necessary. 591 * 592 * Resetting the system clock causes all sorts of 593 * problems and the SUSV3 standards body should 594 * have made the behavior of clock_nanosleep() be 595 * implementation-defined in such a case rather than 596 * being specific about honoring the new system time. 597 * Standards bodies are filled with fools and idiots. 598 */ 599 abstime_to_reltime(clock_id, rqtp, &reltime); 600 if (reltime.tv_sec != 0 || reltime.tv_nsec != 0) 601 goto restart; 602 } 603 return (error); 604 } 605 606 #pragma weak sleep = _sleep 607 unsigned int 608 _sleep(unsigned int sec) 609 { 610 unsigned int rem = 0; 611 int error; 612 timespec_t ts; 613 timespec_t tsr; 614 615 ts.tv_sec = (time_t)sec; 616 ts.tv_nsec = 0; 617 PROLOGUE 618 error = __nanosleep(&ts, &tsr); 619 EPILOGUE 620 if (error == EINTR) { 621 rem = (unsigned int)tsr.tv_sec; 622 if (tsr.tv_nsec >= NANOSEC / 2) 623 rem++; 624 } 625 return (rem); 626 } 627 628 #pragma weak usleep = _usleep 629 int 630 _usleep(useconds_t usec) 631 { 632 timespec_t ts; 633 634 ts.tv_sec = usec / MICROSEC; 635 ts.tv_nsec = (long)(usec % MICROSEC) * 1000; 636 PROLOGUE 637 (void) __nanosleep(&ts, NULL); 638 EPILOGUE 639 return (0); 640 } 641 642 int 643 close(int fildes) 644 { 645 extern void _aio_close(int); 646 extern int _close(int); 647 int rv; 648 649 _aio_close(fildes); 650 PERFORM(_close(fildes)) 651 } 652 653 int 654 creat(const char *path, mode_t mode) 655 { 656 extern int _creat(const char *, mode_t); 657 int rv; 658 659 PERFORM(_creat(path, mode)) 660 } 661 662 #if !defined(_LP64) 663 int 664 creat64(const char *path, mode_t mode) 665 { 666 extern int _creat64(const char *, mode_t); 667 int rv; 668 669 PERFORM(_creat64(path, mode)) 670 } 671 #endif /* !_LP64 */ 672 673 int 674 fcntl(int fildes, int cmd, ...) 675 { 676 extern int _fcntl(int, int, ...); 677 intptr_t arg; 678 int rv; 679 va_list ap; 680 681 va_start(ap, cmd); 682 arg = va_arg(ap, intptr_t); 683 va_end(ap); 684 if (cmd != F_SETLKW) 685 return (_fcntl(fildes, cmd, arg)); 686 PERFORM(_fcntl(fildes, cmd, arg)) 687 } 688 689 int 690 fsync(int fildes) 691 { 692 extern int _fsync(int); 693 int rv; 694 695 PERFORM(_fsync(fildes)) 696 } 697 698 int 699 lockf(int fildes, int function, off_t size) 700 { 701 extern int _lockf(int, int, off_t); 702 int rv; 703 704 PERFORM(_lockf(fildes, function, size)) 705 } 706 707 #if !defined(_LP64) 708 int 709 lockf64(int fildes, int function, off64_t size) 710 { 711 extern int _lockf64(int, int, off64_t); 712 int rv; 713 714 PERFORM(_lockf64(fildes, function, size)) 715 } 716 #endif /* !_LP64 */ 717 718 ssize_t 719 msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg) 720 { 721 extern ssize_t _msgrcv(int, void *, size_t, long, int); 722 ssize_t rv; 723 724 PERFORM(_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg)) 725 } 726 727 int 728 msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg) 729 { 730 extern int _msgsnd(int, const void *, size_t, int); 731 int rv; 732 733 PERFORM(_msgsnd(msqid, msgp, msgsz, msgflg)) 734 } 735 736 int 737 msync(caddr_t addr, size_t len, int flags) 738 { 739 extern int _msync(caddr_t, size_t, int); 740 int rv; 741 742 PERFORM(_msync(addr, len, flags)) 743 } 744 745 int 746 open(const char *path, int oflag, ...) 747 { 748 extern int _open(const char *, int, ...); 749 mode_t mode; 750 int rv; 751 va_list ap; 752 753 va_start(ap, oflag); 754 mode = va_arg(ap, mode_t); 755 va_end(ap); 756 PERFORM(_open(path, oflag, mode)) 757 } 758 759 #if !defined(_LP64) 760 int 761 open64(const char *path, int oflag, ...) 762 { 763 extern int _open64(const char *, int, ...); 764 mode_t mode; 765 int rv; 766 va_list ap; 767 768 va_start(ap, oflag); 769 mode = va_arg(ap, mode_t); 770 va_end(ap); 771 PERFORM(_open64(path, oflag, mode)) 772 } 773 #endif /* !_LP64 */ 774 775 int 776 pause(void) 777 { 778 extern int _pause(void); 779 int rv; 780 781 PERFORM(_pause()) 782 } 783 784 ssize_t 785 pread(int fildes, void *buf, size_t nbyte, off_t offset) 786 { 787 extern ssize_t _pread(int, void *, size_t, off_t); 788 ssize_t rv; 789 790 PERFORM(_pread(fildes, buf, nbyte, offset)) 791 } 792 793 #if !defined(_LP64) 794 ssize_t 795 pread64(int fildes, void *buf, size_t nbyte, off64_t offset) 796 { 797 extern ssize_t _pread64(int, void *, size_t, off64_t); 798 ssize_t rv; 799 800 PERFORM(_pread64(fildes, buf, nbyte, offset)) 801 } 802 #endif /* !_LP64 */ 803 804 ssize_t 805 pwrite(int fildes, const void *buf, size_t nbyte, off_t offset) 806 { 807 extern ssize_t _pwrite(int, const void *, size_t, off_t); 808 ssize_t rv; 809 810 PERFORM(_pwrite(fildes, buf, nbyte, offset)) 811 } 812 813 #if !defined(_LP64) 814 ssize_t 815 pwrite64(int fildes, const void *buf, size_t nbyte, off64_t offset) 816 { 817 extern ssize_t _pwrite64(int, const void *, size_t, off64_t); 818 ssize_t rv; 819 820 PERFORM(_pwrite64(fildes, buf, nbyte, offset)) 821 } 822 #endif /* !_LP64 */ 823 824 ssize_t 825 readv(int fildes, const struct iovec *iov, int iovcnt) 826 { 827 extern ssize_t _readv(int, const struct iovec *, int); 828 ssize_t rv; 829 830 PERFORM(_readv(fildes, iov, iovcnt)) 831 } 832 833 int 834 sigpause(int sig) 835 { 836 extern int _sigpause(int); 837 int rv; 838 839 PERFORM(_sigpause(sig)) 840 } 841 842 #pragma weak sigsuspend = _sigsuspend 843 int 844 _sigsuspend(const sigset_t *set) 845 { 846 extern int __sigsuspend(const sigset_t *); 847 int rv; 848 849 PROLOGUE_MASK(set) 850 rv = __sigsuspend(set); 851 EPILOGUE_MASK 852 return (rv); 853 } 854 855 int 856 _pollsys(struct pollfd *fds, nfds_t nfd, const timespec_t *timeout, 857 const sigset_t *sigmask) 858 { 859 extern int __pollsys(struct pollfd *, nfds_t, const timespec_t *, 860 const sigset_t *); 861 int rv; 862 863 PROLOGUE_MASK(sigmask) 864 rv = __pollsys(fds, nfd, timeout, sigmask); 865 EPILOGUE_MASK 866 return (rv); 867 } 868 869 #pragma weak sigtimedwait = _sigtimedwait 870 int 871 _sigtimedwait(const sigset_t *set, siginfo_t *infop, const timespec_t *timeout) 872 { 873 extern int __sigtimedwait(const sigset_t *, siginfo_t *, 874 const timespec_t *); 875 siginfo_t info; 876 int sig; 877 878 PROLOGUE 879 sig = __sigtimedwait(set, &info, timeout); 880 if (sig == SIGCANCEL && 881 (SI_FROMKERNEL(&info) || info.si_code == SI_LWP)) { 882 do_sigcancel(); 883 errno = EINTR; 884 sig = -1; 885 } 886 EPILOGUE 887 if (sig != -1 && infop) 888 (void) _private_memcpy(infop, &info, sizeof (*infop)); 889 return (sig); 890 } 891 892 #pragma weak sigwait = _sigwait 893 int 894 _sigwait(sigset_t *set) 895 { 896 return (_sigtimedwait(set, NULL, NULL)); 897 } 898 899 #pragma weak sigwaitinfo = _sigwaitinfo 900 int 901 _sigwaitinfo(const sigset_t *set, siginfo_t *info) 902 { 903 return (_sigtimedwait(set, info, NULL)); 904 } 905 906 #pragma weak sigqueue = _sigqueue 907 int 908 _sigqueue(pid_t pid, int signo, const union sigval value) 909 { 910 extern int __sigqueue(pid_t pid, int signo, 911 /* const union sigval */ void *value, int si_code, int block); 912 return (__sigqueue(pid, signo, value.sival_ptr, SI_QUEUE, 0)); 913 } 914 915 int 916 tcdrain(int fildes) 917 { 918 extern int _tcdrain(int); 919 int rv; 920 921 PERFORM(_tcdrain(fildes)) 922 } 923 924 pid_t 925 wait(int *stat_loc) 926 { 927 extern pid_t _wait(int *); 928 pid_t rv; 929 930 PERFORM(_wait(stat_loc)) 931 } 932 933 pid_t 934 wait3(int *statusp, int options, struct rusage *rusage) 935 { 936 extern pid_t _wait3(int *, int, struct rusage *); 937 pid_t rv; 938 939 PERFORM(_wait3(statusp, options, rusage)) 940 } 941 942 int 943 waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options) 944 { 945 extern int _waitid(idtype_t, id_t, siginfo_t *, int); 946 int rv; 947 948 PERFORM(_waitid(idtype, id, infop, options)) 949 } 950 951 /* 952 * waitpid_cancel() is a libc-private symbol for internal use 953 * where cancellation semantics is desired (see system()). 954 */ 955 #pragma weak waitpid_cancel = waitpid 956 pid_t 957 waitpid(pid_t pid, int *stat_loc, int options) 958 { 959 extern pid_t _waitpid(pid_t, int *, int); 960 pid_t rv; 961 962 PERFORM(_waitpid(pid, stat_loc, options)) 963 } 964 965 ssize_t 966 writev(int fildes, const struct iovec *iov, int iovcnt) 967 { 968 extern ssize_t _writev(int, const struct iovec *, int); 969 ssize_t rv; 970 971 PERFORM(_writev(fildes, iov, iovcnt)) 972 } 973