1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include "lint.h" 30 #include "thr_uberdata.h" 31 #include <stdarg.h> 32 #include <poll.h> 33 #include <stropts.h> 34 #include <dlfcn.h> 35 #include <sys/uio.h> 36 37 /* 38 * fork_lock is special -- We can't use lmutex_lock() (and thereby enter 39 * a critical region) because the second thread to reach this point would 40 * become unstoppable and the first thread would hang waiting for the 41 * second thread to stop itself. Therefore we don't use lmutex_lock() in 42 * fork_lock_enter(), but we do defer signals (the other form of concurrency). 43 * 44 * fork_lock_enter() does triple-duty. Not only does it serialize 45 * calls to fork() and forkall(), but it also serializes calls to 46 * thr_suspend() (fork() and forkall() also suspend other threads), 47 * and furthermore it serializes I18N calls to functions in other 48 * dlopen()ed L10N objects that might be calling malloc()/free(). 49 */ 50 51 static void 52 fork_lock_error(const char *who) 53 { 54 char msg[200]; 55 56 (void) strlcpy(msg, "deadlock condition: ", sizeof (msg)); 57 (void) strlcat(msg, who, sizeof (msg)); 58 (void) strlcat(msg, "() called from a fork handler", sizeof (msg)); 59 thread_error(msg); 60 } 61 62 int 63 fork_lock_enter(const char *who) 64 { 65 ulwp_t *self = curthread; 66 uberdata_t *udp = self->ul_uberdata; 67 int error = 0; 68 69 ASSERT(self->ul_critical == 0); 70 sigoff(self); 71 (void) _private_mutex_lock(&udp->fork_lock); 72 if (udp->fork_count) { 73 ASSERT(udp->fork_owner == self); 74 /* 75 * This is a simple recursive lock except that we 76 * inform the caller if we have been called from 77 * a fork handler and let it deal with that fact. 78 */ 79 if (self->ul_fork) { 80 /* 81 * We have been called from a fork handler. 82 */ 83 if (who != NULL && 84 udp->uberflags.uf_thread_error_detection) 85 fork_lock_error(who); 86 error = EDEADLK; 87 } 88 } 89 udp->fork_owner = self; 90 udp->fork_count++; 91 return (error); 92 } 93 94 void 95 fork_lock_exit(void) 96 { 97 ulwp_t *self = curthread; 98 uberdata_t *udp = self->ul_uberdata; 99 100 ASSERT(self->ul_critical == 0); 101 ASSERT(udp->fork_count != 0 && udp->fork_owner == self); 102 if (--udp->fork_count == 0) 103 udp->fork_owner = NULL; 104 (void) _private_mutex_unlock(&udp->fork_lock); 105 sigon(self); 106 } 107 108 /* 109 * Note: Instead of making this function static, we reduce it to local 110 * scope in the mapfile. That allows the linker to prevent it from 111 * appearing in the .SUNW_dynsymsort section. 112 */ 113 #pragma weak forkx = _private_forkx 114 #pragma weak _forkx = _private_forkx 115 pid_t 116 _private_forkx(int flags) 117 { 118 ulwp_t *self = curthread; 119 uberdata_t *udp = self->ul_uberdata; 120 pid_t pid; 121 int error; 122 123 if (self->ul_vfork) { 124 /* 125 * We are a child of vfork(); omit all of the fork 126 * logic and go straight to the system call trap. 127 * A vfork() child of a multithreaded parent 128 * must never call fork(). 129 */ 130 if (udp->uberflags.uf_mt) { 131 errno = ENOTSUP; 132 return (-1); 133 } 134 pid = __forkx(flags); 135 if (pid == 0) { /* child */ 136 udp->pid = _private_getpid(); 137 self->ul_vfork = 0; 138 } 139 return (pid); 140 } 141 142 if ((error = fork_lock_enter("fork")) != 0) { 143 /* 144 * Cannot call fork() from a fork handler. 145 */ 146 fork_lock_exit(); 147 errno = error; 148 return (-1); 149 } 150 self->ul_fork = 1; 151 152 /* 153 * The functions registered by pthread_atfork() are defined by 154 * the application and its libraries and we must not hold any 155 * internal libc locks while invoking them. The fork_lock_enter() 156 * function serializes fork(), thr_suspend(), pthread_atfork() and 157 * dlclose() (which destroys whatever pthread_atfork() functions 158 * the library may have set up). If one of these pthread_atfork() 159 * functions attempts to fork or suspend another thread or call 160 * pthread_atfork() or dlclose a library, it will detect a deadlock 161 * in fork_lock_enter(). Otherwise, the pthread_atfork() functions 162 * are free to do anything they please (except they will not 163 * receive any signals). 164 */ 165 _prefork_handler(); 166 167 /* 168 * Block all signals. 169 * Just deferring them via sigon() is not enough. 170 * We have to avoid taking a deferred signal in the child 171 * that was actually sent to the parent before __forkx(). 172 */ 173 block_all_signals(self); 174 175 /* 176 * This suspends all threads but this one, leaving them 177 * suspended outside of any critical regions in the library. 178 * Thus, we are assured that no library locks are held 179 * while we invoke fork() from the current thread. 180 */ 181 suspend_fork(); 182 183 pid = __forkx(flags); 184 185 if (pid == 0) { /* child */ 186 /* 187 * Clear our schedctl pointer. 188 * Discard any deferred signal that was sent to the parent. 189 * Because we blocked all signals before __forkx(), a 190 * deferred signal cannot have been taken by the child. 191 */ 192 self->ul_schedctl_called = NULL; 193 self->ul_schedctl = NULL; 194 self->ul_cursig = 0; 195 self->ul_siginfo.si_signo = 0; 196 udp->pid = _private_getpid(); 197 /* reset the library's data structures to reflect one thread */ 198 unregister_locks(); 199 postfork1_child(); 200 restore_signals(self); 201 _postfork_child_handler(); 202 } else { 203 /* restart all threads that were suspended for fork() */ 204 continue_fork(0); 205 restore_signals(self); 206 _postfork_parent_handler(); 207 } 208 209 self->ul_fork = 0; 210 fork_lock_exit(); 211 212 return (pid); 213 } 214 215 /* 216 * fork() is fork1() for both Posix threads and Solaris threads. 217 * The forkall() interface exists for applications that require 218 * the semantics of replicating all threads. 219 */ 220 #pragma weak fork1 = _fork 221 #pragma weak _fork1 = _fork 222 #pragma weak fork = _fork 223 pid_t 224 _fork(void) 225 { 226 return (_private_forkx(0)); 227 } 228 229 /* 230 * Much of the logic here is the same as in forkx(). 231 * See the comments in forkx(), above. 232 */ 233 #pragma weak forkallx = _private_forkallx 234 #pragma weak _forkallx = _private_forkallx 235 pid_t 236 _private_forkallx(int flags) 237 { 238 ulwp_t *self = curthread; 239 uberdata_t *udp = self->ul_uberdata; 240 pid_t pid; 241 int error; 242 243 if (self->ul_vfork) { 244 if (udp->uberflags.uf_mt) { 245 errno = ENOTSUP; 246 return (-1); 247 } 248 pid = __forkallx(flags); 249 if (pid == 0) { /* child */ 250 udp->pid = _private_getpid(); 251 self->ul_vfork = 0; 252 } 253 return (pid); 254 } 255 256 if ((error = fork_lock_enter("forkall")) != 0) { 257 fork_lock_exit(); 258 errno = error; 259 return (-1); 260 } 261 self->ul_fork = 1; 262 block_all_signals(self); 263 suspend_fork(); 264 265 pid = __forkallx(flags); 266 267 if (pid == 0) { 268 self->ul_schedctl_called = NULL; 269 self->ul_schedctl = NULL; 270 self->ul_cursig = 0; 271 self->ul_siginfo.si_signo = 0; 272 udp->pid = _private_getpid(); 273 unregister_locks(); 274 continue_fork(1); 275 } else { 276 continue_fork(0); 277 } 278 restore_signals(self); 279 self->ul_fork = 0; 280 fork_lock_exit(); 281 282 return (pid); 283 } 284 285 #pragma weak forkall = _forkall 286 pid_t 287 _forkall(void) 288 { 289 return (_private_forkallx(0)); 290 } 291 292 /* 293 * Hacks for system calls to provide cancellation 294 * and improve java garbage collection. 295 */ 296 #define PROLOGUE \ 297 { \ 298 ulwp_t *self = curthread; \ 299 int nocancel = (self->ul_vfork | self->ul_nocancel); \ 300 if (nocancel == 0) { \ 301 self->ul_save_async = self->ul_cancel_async; \ 302 if (!self->ul_cancel_disabled) { \ 303 self->ul_cancel_async = 1; \ 304 if (self->ul_cancel_pending) \ 305 _pthread_exit(PTHREAD_CANCELED); \ 306 } \ 307 self->ul_sp = stkptr(); \ 308 } 309 310 #define EPILOGUE \ 311 if (nocancel == 0) { \ 312 self->ul_sp = 0; \ 313 self->ul_cancel_async = self->ul_save_async; \ 314 } \ 315 } 316 317 /* 318 * Perform the body of the action required by most of the cancelable 319 * function calls. The return(function_call) part is to allow the 320 * compiler to make the call be executed with tail recursion, which 321 * saves a register window on sparc and slightly (not much) improves 322 * the code for x86/x64 compilations. 323 */ 324 #define PERFORM(function_call) \ 325 PROLOGUE \ 326 if (nocancel) \ 327 return (function_call); \ 328 rv = function_call; \ 329 EPILOGUE \ 330 return (rv); 331 332 /* 333 * Specialized prologue for sigsuspend() and pollsys(). 334 * These system calls pass a signal mask to the kernel. 335 * The kernel replaces the thread's signal mask with the 336 * temporary mask before the thread goes to sleep. If 337 * a signal is received, the signal handler will execute 338 * with the temporary mask, as modified by the sigaction 339 * for the particular signal. 340 * 341 * We block all signals until we reach the kernel with the 342 * temporary mask. This eliminates race conditions with 343 * setting the signal mask while signals are being posted. 344 */ 345 #define PROLOGUE_MASK(sigmask) \ 346 { \ 347 ulwp_t *self = curthread; \ 348 int nocancel = (self->ul_vfork | self->ul_nocancel); \ 349 if (!self->ul_vfork) { \ 350 if (sigmask) { \ 351 block_all_signals(self); \ 352 self->ul_tmpmask.__sigbits[0] = sigmask->__sigbits[0]; \ 353 self->ul_tmpmask.__sigbits[1] = sigmask->__sigbits[1]; \ 354 delete_reserved_signals(&self->ul_tmpmask); \ 355 self->ul_sigsuspend = 1; \ 356 } \ 357 if (nocancel == 0) { \ 358 self->ul_save_async = self->ul_cancel_async; \ 359 if (!self->ul_cancel_disabled) { \ 360 self->ul_cancel_async = 1; \ 361 if (self->ul_cancel_pending) { \ 362 if (self->ul_sigsuspend) { \ 363 self->ul_sigsuspend = 0;\ 364 restore_signals(self); \ 365 } \ 366 _pthread_exit(PTHREAD_CANCELED);\ 367 } \ 368 } \ 369 self->ul_sp = stkptr(); \ 370 } \ 371 } 372 373 /* 374 * If a signal is taken, we return from the system call wrapper with 375 * our original signal mask restored (see code in call_user_handler()). 376 * If not (self->ul_sigsuspend is still non-zero), we must restore our 377 * original signal mask ourself. 378 */ 379 #define EPILOGUE_MASK \ 380 if (nocancel == 0) { \ 381 self->ul_sp = 0; \ 382 self->ul_cancel_async = self->ul_save_async; \ 383 } \ 384 if (self->ul_sigsuspend) { \ 385 self->ul_sigsuspend = 0; \ 386 restore_signals(self); \ 387 } \ 388 } 389 390 /* 391 * Cancellation prologue and epilogue functions, 392 * for cancellation points too complex to include here. 393 */ 394 void 395 _cancel_prologue(void) 396 { 397 ulwp_t *self = curthread; 398 399 self->ul_cancel_prologue = (self->ul_vfork | self->ul_nocancel); 400 if (self->ul_cancel_prologue == 0) { 401 self->ul_save_async = self->ul_cancel_async; 402 if (!self->ul_cancel_disabled) { 403 self->ul_cancel_async = 1; 404 if (self->ul_cancel_pending) 405 _pthread_exit(PTHREAD_CANCELED); 406 } 407 self->ul_sp = stkptr(); 408 } 409 } 410 411 void 412 _cancel_epilogue(void) 413 { 414 ulwp_t *self = curthread; 415 416 if (self->ul_cancel_prologue == 0) { 417 self->ul_sp = 0; 418 self->ul_cancel_async = self->ul_save_async; 419 } 420 } 421 422 /* 423 * Called from _thrp_join() (thr_join() is a cancellation point) 424 */ 425 int 426 lwp_wait(thread_t tid, thread_t *found) 427 { 428 int error; 429 430 PROLOGUE 431 while ((error = __lwp_wait(tid, found)) == EINTR) 432 ; 433 EPILOGUE 434 return (error); 435 } 436 437 ssize_t 438 read(int fd, void *buf, size_t size) 439 { 440 extern ssize_t _read(int, void *, size_t); 441 ssize_t rv; 442 443 PERFORM(_read(fd, buf, size)) 444 } 445 446 ssize_t 447 write(int fd, const void *buf, size_t size) 448 { 449 extern ssize_t _write(int, const void *, size_t); 450 ssize_t rv; 451 452 PERFORM(_write(fd, buf, size)) 453 } 454 455 int 456 getmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr, 457 int *flagsp) 458 { 459 extern int _getmsg(int, struct strbuf *, struct strbuf *, int *); 460 int rv; 461 462 PERFORM(_getmsg(fd, ctlptr, dataptr, flagsp)) 463 } 464 465 int 466 getpmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr, 467 int *bandp, int *flagsp) 468 { 469 extern int _getpmsg(int, struct strbuf *, struct strbuf *, 470 int *, int *); 471 int rv; 472 473 PERFORM(_getpmsg(fd, ctlptr, dataptr, bandp, flagsp)) 474 } 475 476 int 477 putmsg(int fd, const struct strbuf *ctlptr, 478 const struct strbuf *dataptr, int flags) 479 { 480 extern int _putmsg(int, const struct strbuf *, 481 const struct strbuf *, int); 482 int rv; 483 484 PERFORM(_putmsg(fd, ctlptr, dataptr, flags)) 485 } 486 487 int 488 __xpg4_putmsg(int fd, const struct strbuf *ctlptr, 489 const struct strbuf *dataptr, int flags) 490 { 491 extern int _putmsg(int, const struct strbuf *, 492 const struct strbuf *, int); 493 int rv; 494 495 PERFORM(_putmsg(fd, ctlptr, dataptr, flags|MSG_XPG4)) 496 } 497 498 int 499 putpmsg(int fd, const struct strbuf *ctlptr, 500 const struct strbuf *dataptr, int band, int flags) 501 { 502 extern int _putpmsg(int, const struct strbuf *, 503 const struct strbuf *, int, int); 504 int rv; 505 506 PERFORM(_putpmsg(fd, ctlptr, dataptr, band, flags)) 507 } 508 509 int 510 __xpg4_putpmsg(int fd, const struct strbuf *ctlptr, 511 const struct strbuf *dataptr, int band, int flags) 512 { 513 extern int _putpmsg(int, const struct strbuf *, 514 const struct strbuf *, int, int); 515 int rv; 516 517 PERFORM(_putpmsg(fd, ctlptr, dataptr, band, flags|MSG_XPG4)) 518 } 519 520 #pragma weak nanosleep = _nanosleep 521 int 522 _nanosleep(const timespec_t *rqtp, timespec_t *rmtp) 523 { 524 int error; 525 526 PROLOGUE 527 error = __nanosleep(rqtp, rmtp); 528 EPILOGUE 529 if (error) { 530 errno = error; 531 return (-1); 532 } 533 return (0); 534 } 535 536 #pragma weak clock_nanosleep = _clock_nanosleep 537 int 538 _clock_nanosleep(clockid_t clock_id, int flags, 539 const timespec_t *rqtp, timespec_t *rmtp) 540 { 541 timespec_t reltime; 542 hrtime_t start; 543 hrtime_t rqlapse; 544 hrtime_t lapse; 545 int error; 546 547 switch (clock_id) { 548 case CLOCK_VIRTUAL: 549 case CLOCK_PROCESS_CPUTIME_ID: 550 case CLOCK_THREAD_CPUTIME_ID: 551 return (ENOTSUP); 552 case CLOCK_REALTIME: 553 case CLOCK_HIGHRES: 554 break; 555 default: 556 return (EINVAL); 557 } 558 if (flags & TIMER_ABSTIME) { 559 abstime_to_reltime(clock_id, rqtp, &reltime); 560 rmtp = NULL; 561 } else { 562 reltime = *rqtp; 563 if (clock_id == CLOCK_HIGHRES) 564 start = gethrtime(); 565 } 566 restart: 567 PROLOGUE 568 error = __nanosleep(&reltime, rmtp); 569 EPILOGUE 570 if (error == 0 && clock_id == CLOCK_HIGHRES) { 571 /* 572 * Don't return yet if we didn't really get a timeout. 573 * This can happen if we return because someone resets 574 * the system clock. 575 */ 576 if (flags & TIMER_ABSTIME) { 577 if ((hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC + 578 rqtp->tv_nsec > gethrtime()) { 579 abstime_to_reltime(clock_id, rqtp, &reltime); 580 goto restart; 581 } 582 } else { 583 rqlapse = (hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC + 584 rqtp->tv_nsec; 585 lapse = gethrtime() - start; 586 if (rqlapse > lapse) { 587 hrt2ts(rqlapse - lapse, &reltime); 588 goto restart; 589 } 590 } 591 } 592 if (error == 0 && clock_id == CLOCK_REALTIME && 593 (flags & TIMER_ABSTIME)) { 594 /* 595 * Don't return yet just because someone reset the 596 * system clock. Recompute the new relative time 597 * and reissue the nanosleep() call if necessary. 598 * 599 * Resetting the system clock causes all sorts of 600 * problems and the SUSV3 standards body should 601 * have made the behavior of clock_nanosleep() be 602 * implementation-defined in such a case rather than 603 * being specific about honoring the new system time. 604 * Standards bodies are filled with fools and idiots. 605 */ 606 abstime_to_reltime(clock_id, rqtp, &reltime); 607 if (reltime.tv_sec != 0 || reltime.tv_nsec != 0) 608 goto restart; 609 } 610 return (error); 611 } 612 613 #pragma weak sleep = _sleep 614 unsigned int 615 _sleep(unsigned int sec) 616 { 617 unsigned int rem = 0; 618 int error; 619 timespec_t ts; 620 timespec_t tsr; 621 622 ts.tv_sec = (time_t)sec; 623 ts.tv_nsec = 0; 624 PROLOGUE 625 error = __nanosleep(&ts, &tsr); 626 EPILOGUE 627 if (error == EINTR) { 628 rem = (unsigned int)tsr.tv_sec; 629 if (tsr.tv_nsec >= NANOSEC / 2) 630 rem++; 631 } 632 return (rem); 633 } 634 635 #pragma weak usleep = _usleep 636 int 637 _usleep(useconds_t usec) 638 { 639 timespec_t ts; 640 641 ts.tv_sec = usec / MICROSEC; 642 ts.tv_nsec = (long)(usec % MICROSEC) * 1000; 643 PROLOGUE 644 (void) __nanosleep(&ts, NULL); 645 EPILOGUE 646 return (0); 647 } 648 649 int 650 close(int fildes) 651 { 652 extern void _aio_close(int); 653 extern int _close(int); 654 int rv; 655 656 _aio_close(fildes); 657 PERFORM(_close(fildes)) 658 } 659 660 int 661 creat(const char *path, mode_t mode) 662 { 663 extern int _creat(const char *, mode_t); 664 int rv; 665 666 PERFORM(_creat(path, mode)) 667 } 668 669 #if !defined(_LP64) 670 int 671 creat64(const char *path, mode_t mode) 672 { 673 extern int _creat64(const char *, mode_t); 674 int rv; 675 676 PERFORM(_creat64(path, mode)) 677 } 678 #endif /* !_LP64 */ 679 680 int 681 fcntl(int fildes, int cmd, ...) 682 { 683 extern int _fcntl(int, int, ...); 684 intptr_t arg; 685 int rv; 686 va_list ap; 687 688 va_start(ap, cmd); 689 arg = va_arg(ap, intptr_t); 690 va_end(ap); 691 if (cmd != F_SETLKW) 692 return (_fcntl(fildes, cmd, arg)); 693 PERFORM(_fcntl(fildes, cmd, arg)) 694 } 695 696 int 697 fsync(int fildes) 698 { 699 extern int _fsync(int); 700 int rv; 701 702 PERFORM(_fsync(fildes)) 703 } 704 705 int 706 lockf(int fildes, int function, off_t size) 707 { 708 extern int _lockf(int, int, off_t); 709 int rv; 710 711 PERFORM(_lockf(fildes, function, size)) 712 } 713 714 #if !defined(_LP64) 715 int 716 lockf64(int fildes, int function, off64_t size) 717 { 718 extern int _lockf64(int, int, off64_t); 719 int rv; 720 721 PERFORM(_lockf64(fildes, function, size)) 722 } 723 #endif /* !_LP64 */ 724 725 ssize_t 726 msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg) 727 { 728 extern ssize_t _msgrcv(int, void *, size_t, long, int); 729 ssize_t rv; 730 731 PERFORM(_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg)) 732 } 733 734 int 735 msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg) 736 { 737 extern int _msgsnd(int, const void *, size_t, int); 738 int rv; 739 740 PERFORM(_msgsnd(msqid, msgp, msgsz, msgflg)) 741 } 742 743 int 744 msync(caddr_t addr, size_t len, int flags) 745 { 746 extern int _msync(caddr_t, size_t, int); 747 int rv; 748 749 PERFORM(_msync(addr, len, flags)) 750 } 751 752 int 753 open(const char *path, int oflag, ...) 754 { 755 extern int _open(const char *, int, ...); 756 mode_t mode; 757 int rv; 758 va_list ap; 759 760 va_start(ap, oflag); 761 mode = va_arg(ap, mode_t); 762 va_end(ap); 763 PERFORM(_open(path, oflag, mode)) 764 } 765 766 #if !defined(_LP64) 767 int 768 open64(const char *path, int oflag, ...) 769 { 770 extern int _open64(const char *, int, ...); 771 mode_t mode; 772 int rv; 773 va_list ap; 774 775 va_start(ap, oflag); 776 mode = va_arg(ap, mode_t); 777 va_end(ap); 778 PERFORM(_open64(path, oflag, mode)) 779 } 780 #endif /* !_LP64 */ 781 782 int 783 pause(void) 784 { 785 extern int _pause(void); 786 int rv; 787 788 PERFORM(_pause()) 789 } 790 791 ssize_t 792 pread(int fildes, void *buf, size_t nbyte, off_t offset) 793 { 794 extern ssize_t _pread(int, void *, size_t, off_t); 795 ssize_t rv; 796 797 PERFORM(_pread(fildes, buf, nbyte, offset)) 798 } 799 800 #if !defined(_LP64) 801 ssize_t 802 pread64(int fildes, void *buf, size_t nbyte, off64_t offset) 803 { 804 extern ssize_t _pread64(int, void *, size_t, off64_t); 805 ssize_t rv; 806 807 PERFORM(_pread64(fildes, buf, nbyte, offset)) 808 } 809 #endif /* !_LP64 */ 810 811 ssize_t 812 pwrite(int fildes, const void *buf, size_t nbyte, off_t offset) 813 { 814 extern ssize_t _pwrite(int, const void *, size_t, off_t); 815 ssize_t rv; 816 817 PERFORM(_pwrite(fildes, buf, nbyte, offset)) 818 } 819 820 #if !defined(_LP64) 821 ssize_t 822 pwrite64(int fildes, const void *buf, size_t nbyte, off64_t offset) 823 { 824 extern ssize_t _pwrite64(int, const void *, size_t, off64_t); 825 ssize_t rv; 826 827 PERFORM(_pwrite64(fildes, buf, nbyte, offset)) 828 } 829 #endif /* !_LP64 */ 830 831 ssize_t 832 readv(int fildes, const struct iovec *iov, int iovcnt) 833 { 834 extern ssize_t _readv(int, const struct iovec *, int); 835 ssize_t rv; 836 837 PERFORM(_readv(fildes, iov, iovcnt)) 838 } 839 840 int 841 sigpause(int sig) 842 { 843 extern int _sigpause(int); 844 int rv; 845 846 PERFORM(_sigpause(sig)) 847 } 848 849 #pragma weak sigsuspend = _sigsuspend 850 int 851 _sigsuspend(const sigset_t *set) 852 { 853 extern int __sigsuspend(const sigset_t *); 854 int rv; 855 856 PROLOGUE_MASK(set) 857 rv = __sigsuspend(set); 858 EPILOGUE_MASK 859 return (rv); 860 } 861 862 int 863 _pollsys(struct pollfd *fds, nfds_t nfd, const timespec_t *timeout, 864 const sigset_t *sigmask) 865 { 866 extern int __pollsys(struct pollfd *, nfds_t, const timespec_t *, 867 const sigset_t *); 868 int rv; 869 870 PROLOGUE_MASK(sigmask) 871 rv = __pollsys(fds, nfd, timeout, sigmask); 872 EPILOGUE_MASK 873 return (rv); 874 } 875 876 #pragma weak sigtimedwait = _sigtimedwait 877 int 878 _sigtimedwait(const sigset_t *set, siginfo_t *infop, const timespec_t *timeout) 879 { 880 extern int __sigtimedwait(const sigset_t *, siginfo_t *, 881 const timespec_t *); 882 siginfo_t info; 883 int sig; 884 885 PROLOGUE 886 sig = __sigtimedwait(set, &info, timeout); 887 if (sig == SIGCANCEL && 888 (SI_FROMKERNEL(&info) || info.si_code == SI_LWP)) { 889 do_sigcancel(); 890 errno = EINTR; 891 sig = -1; 892 } 893 EPILOGUE 894 if (sig != -1 && infop) 895 (void) _private_memcpy(infop, &info, sizeof (*infop)); 896 return (sig); 897 } 898 899 #pragma weak sigwait = _sigwait 900 int 901 _sigwait(sigset_t *set) 902 { 903 return (_sigtimedwait(set, NULL, NULL)); 904 } 905 906 #pragma weak sigwaitinfo = _sigwaitinfo 907 int 908 _sigwaitinfo(const sigset_t *set, siginfo_t *info) 909 { 910 return (_sigtimedwait(set, info, NULL)); 911 } 912 913 #pragma weak sigqueue = _sigqueue 914 int 915 _sigqueue(pid_t pid, int signo, const union sigval value) 916 { 917 extern int __sigqueue(pid_t pid, int signo, 918 /* const union sigval */ void *value, int si_code, int block); 919 return (__sigqueue(pid, signo, value.sival_ptr, SI_QUEUE, 0)); 920 } 921 922 int 923 tcdrain(int fildes) 924 { 925 extern int _tcdrain(int); 926 int rv; 927 928 PERFORM(_tcdrain(fildes)) 929 } 930 931 pid_t 932 wait(int *stat_loc) 933 { 934 extern pid_t _wait(int *); 935 pid_t rv; 936 937 PERFORM(_wait(stat_loc)) 938 } 939 940 pid_t 941 wait3(int *statusp, int options, struct rusage *rusage) 942 { 943 extern pid_t _wait3(int *, int, struct rusage *); 944 pid_t rv; 945 946 PERFORM(_wait3(statusp, options, rusage)) 947 } 948 949 int 950 waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options) 951 { 952 extern int _waitid(idtype_t, id_t, siginfo_t *, int); 953 int rv; 954 955 PERFORM(_waitid(idtype, id, infop, options)) 956 } 957 958 /* 959 * waitpid_cancel() is a libc-private symbol for internal use 960 * where cancellation semantics is desired (see system()). 961 */ 962 #pragma weak waitpid_cancel = waitpid 963 pid_t 964 waitpid(pid_t pid, int *stat_loc, int options) 965 { 966 extern pid_t _waitpid(pid_t, int *, int); 967 pid_t rv; 968 969 PERFORM(_waitpid(pid, stat_loc, options)) 970 } 971 972 ssize_t 973 writev(int fildes, const struct iovec *iov, int iovcnt) 974 { 975 extern ssize_t _writev(int, const struct iovec *, int); 976 ssize_t rv; 977 978 PERFORM(_writev(fildes, iov, iovcnt)) 979 } 980