1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include "lint.h" 30 #include "thr_uberdata.h" 31 #include <stdarg.h> 32 #include <poll.h> 33 #include <stropts.h> 34 #include <dlfcn.h> 35 #include <sys/uio.h> 36 37 /* 38 * atfork_lock protects the pthread_atfork() data structures. 39 * 40 * fork_lock does double-duty. Not only does it (and atfork_lock) 41 * serialize calls to fork() and forkall(), but it also serializes calls 42 * to thr_suspend() and thr_continue() (because fork() and forkall() also 43 * suspend and continue other threads and they want no competition). 44 * 45 * Functions called in dlopen()ed L10N objects can do anything, including 46 * call malloc() and free(). Such calls are not fork-safe when protected 47 * by an ordinary mutex that is acquired in libc's prefork processing 48 * because, with an interposed malloc library present, there would be a 49 * lock ordering violation due to the pthread_atfork() prefork function 50 * in the interposition library acquiring its malloc lock(s) before the 51 * ordinary mutex in libc being acquired by libc's prefork functions. 52 * 53 * Within libc, calls to malloc() and free() are fork-safe if the calls 54 * are made while holding no other libc locks. This covers almost all 55 * of libc's malloc() and free() calls. For those libc code paths, such 56 * as the above-mentioned L10N calls, that require serialization and that 57 * may call malloc() or free(), libc uses callout_lock_enter() to perform 58 * the serialization. This works because callout_lock is not acquired as 59 * part of running the pthread_atfork() prefork handlers (to avoid the 60 * lock ordering violation described above). Rather, it is simply 61 * reinitialized in postfork1_child() to cover the case that some 62 * now-defunct thread might have been suspended while holding it. 63 */ 64 65 void 66 fork_lock_enter(void) 67 { 68 ASSERT(curthread->ul_critical == 0); 69 (void) _private_mutex_lock(&curthread->ul_uberdata->fork_lock); 70 } 71 72 void 73 fork_lock_exit(void) 74 { 75 ASSERT(curthread->ul_critical == 0); 76 (void) _private_mutex_unlock(&curthread->ul_uberdata->fork_lock); 77 } 78 79 void 80 callout_lock_enter(void) 81 { 82 ASSERT(curthread->ul_critical == 0); 83 (void) _private_mutex_lock(&curthread->ul_uberdata->callout_lock); 84 } 85 86 void 87 callout_lock_exit(void) 88 { 89 ASSERT(curthread->ul_critical == 0); 90 (void) _private_mutex_unlock(&curthread->ul_uberdata->callout_lock); 91 } 92 93 #pragma weak forkx = _private_forkx 94 #pragma weak _forkx = _private_forkx 95 pid_t 96 _private_forkx(int flags) 97 { 98 ulwp_t *self = curthread; 99 uberdata_t *udp = self->ul_uberdata; 100 pid_t pid; 101 102 if (self->ul_vfork) { 103 /* 104 * We are a child of vfork(); omit all of the fork 105 * logic and go straight to the system call trap. 106 * A vfork() child of a multithreaded parent 107 * must never call fork(). 108 */ 109 if (udp->uberflags.uf_mt) { 110 errno = ENOTSUP; 111 return (-1); 112 } 113 pid = __forkx(flags); 114 if (pid == 0) { /* child */ 115 udp->pid = _private_getpid(); 116 self->ul_vfork = 0; 117 } 118 return (pid); 119 } 120 121 sigoff(self); 122 if (self->ul_fork) { 123 /* 124 * Cannot call fork() from a fork handler. 125 */ 126 sigon(self); 127 errno = EDEADLK; 128 return (-1); 129 } 130 self->ul_fork = 1; 131 132 /* 133 * The functions registered by pthread_atfork() are defined by 134 * the application and its libraries and we must not hold any 135 * internal lmutex_lock()-acquired locks while invoking them. 136 * We hold only udp->atfork_lock to protect the atfork linkages. 137 * If one of these pthread_atfork() functions attempts to fork 138 * or to call pthread_atfork(), libc will detect the error and 139 * fail the call with EDEADLK. Otherwise, the pthread_atfork() 140 * functions are free to do anything they please (except they 141 * will not receive any signals). 142 */ 143 (void) _private_mutex_lock(&udp->atfork_lock); 144 _prefork_handler(); 145 146 /* 147 * Block every other thread attempting thr_suspend() or thr_continue(). 148 */ 149 (void) _private_mutex_lock(&udp->fork_lock); 150 151 /* 152 * Block all signals. 153 * Just deferring them via sigoff() is not enough. 154 * We have to avoid taking a deferred signal in the child 155 * that was actually sent to the parent before __forkx(). 156 */ 157 block_all_signals(self); 158 159 /* 160 * This suspends all threads but this one, leaving them 161 * suspended outside of any critical regions in the library. 162 * Thus, we are assured that no lmutex_lock()-acquired library 163 * locks are held while we invoke fork() from the current thread. 164 */ 165 suspend_fork(); 166 167 pid = __forkx(flags); 168 169 if (pid == 0) { /* child */ 170 /* 171 * Clear our schedctl pointer. 172 * Discard any deferred signal that was sent to the parent. 173 * Because we blocked all signals before __forkx(), a 174 * deferred signal cannot have been taken by the child. 175 */ 176 self->ul_schedctl_called = NULL; 177 self->ul_schedctl = NULL; 178 self->ul_cursig = 0; 179 self->ul_siginfo.si_signo = 0; 180 udp->pid = _private_getpid(); 181 /* reset the library's data structures to reflect one thread */ 182 unregister_locks(); 183 postfork1_child(); 184 restore_signals(self); 185 (void) _private_mutex_unlock(&udp->fork_lock); 186 _postfork_child_handler(); 187 } else { 188 /* restart all threads that were suspended for fork() */ 189 continue_fork(0); 190 restore_signals(self); 191 (void) _private_mutex_unlock(&udp->fork_lock); 192 _postfork_parent_handler(); 193 } 194 195 (void) _private_mutex_unlock(&udp->atfork_lock); 196 self->ul_fork = 0; 197 sigon(self); 198 199 return (pid); 200 } 201 202 /* 203 * fork() is fork1() for both Posix threads and Solaris threads. 204 * The forkall() interface exists for applications that require 205 * the semantics of replicating all threads. 206 */ 207 #pragma weak fork1 = _fork 208 #pragma weak _fork1 = _fork 209 #pragma weak fork = _fork 210 pid_t 211 _fork(void) 212 { 213 return (_private_forkx(0)); 214 } 215 216 /* 217 * Much of the logic here is the same as in forkx(). 218 * See the comments in forkx(), above. 219 */ 220 #pragma weak forkallx = _private_forkallx 221 #pragma weak _forkallx = _private_forkallx 222 pid_t 223 _private_forkallx(int flags) 224 { 225 ulwp_t *self = curthread; 226 uberdata_t *udp = self->ul_uberdata; 227 pid_t pid; 228 229 if (self->ul_vfork) { 230 if (udp->uberflags.uf_mt) { 231 errno = ENOTSUP; 232 return (-1); 233 } 234 pid = __forkallx(flags); 235 if (pid == 0) { /* child */ 236 udp->pid = _private_getpid(); 237 self->ul_vfork = 0; 238 } 239 return (pid); 240 } 241 242 sigoff(self); 243 if (self->ul_fork) { 244 sigon(self); 245 errno = EDEADLK; 246 return (-1); 247 } 248 self->ul_fork = 1; 249 (void) _private_mutex_lock(&udp->atfork_lock); 250 (void) _private_mutex_lock(&udp->fork_lock); 251 block_all_signals(self); 252 suspend_fork(); 253 254 pid = __forkallx(flags); 255 256 if (pid == 0) { 257 self->ul_schedctl_called = NULL; 258 self->ul_schedctl = NULL; 259 self->ul_cursig = 0; 260 self->ul_siginfo.si_signo = 0; 261 udp->pid = _private_getpid(); 262 unregister_locks(); 263 continue_fork(1); 264 } else { 265 continue_fork(0); 266 } 267 restore_signals(self); 268 (void) _private_mutex_unlock(&udp->fork_lock); 269 (void) _private_mutex_unlock(&udp->atfork_lock); 270 self->ul_fork = 0; 271 sigon(self); 272 273 return (pid); 274 } 275 276 #pragma weak forkall = _forkall 277 pid_t 278 _forkall(void) 279 { 280 return (_private_forkallx(0)); 281 } 282 283 /* 284 * Hacks for system calls to provide cancellation 285 * and improve java garbage collection. 286 */ 287 #define PROLOGUE \ 288 { \ 289 ulwp_t *self = curthread; \ 290 int nocancel = (self->ul_vfork | self->ul_nocancel); \ 291 if (nocancel == 0) { \ 292 self->ul_save_async = self->ul_cancel_async; \ 293 if (!self->ul_cancel_disabled) { \ 294 self->ul_cancel_async = 1; \ 295 if (self->ul_cancel_pending) \ 296 _pthread_exit(PTHREAD_CANCELED); \ 297 } \ 298 self->ul_sp = stkptr(); \ 299 } 300 301 #define EPILOGUE \ 302 if (nocancel == 0) { \ 303 self->ul_sp = 0; \ 304 self->ul_cancel_async = self->ul_save_async; \ 305 } \ 306 } 307 308 /* 309 * Perform the body of the action required by most of the cancelable 310 * function calls. The return(function_call) part is to allow the 311 * compiler to make the call be executed with tail recursion, which 312 * saves a register window on sparc and slightly (not much) improves 313 * the code for x86/x64 compilations. 314 */ 315 #define PERFORM(function_call) \ 316 PROLOGUE \ 317 if (nocancel) \ 318 return (function_call); \ 319 rv = function_call; \ 320 EPILOGUE \ 321 return (rv); 322 323 /* 324 * Specialized prologue for sigsuspend() and pollsys(). 325 * These system calls pass a signal mask to the kernel. 326 * The kernel replaces the thread's signal mask with the 327 * temporary mask before the thread goes to sleep. If 328 * a signal is received, the signal handler will execute 329 * with the temporary mask, as modified by the sigaction 330 * for the particular signal. 331 * 332 * We block all signals until we reach the kernel with the 333 * temporary mask. This eliminates race conditions with 334 * setting the signal mask while signals are being posted. 335 */ 336 #define PROLOGUE_MASK(sigmask) \ 337 { \ 338 ulwp_t *self = curthread; \ 339 int nocancel = (self->ul_vfork | self->ul_nocancel); \ 340 if (!self->ul_vfork) { \ 341 if (sigmask) { \ 342 block_all_signals(self); \ 343 self->ul_tmpmask.__sigbits[0] = sigmask->__sigbits[0]; \ 344 self->ul_tmpmask.__sigbits[1] = sigmask->__sigbits[1]; \ 345 delete_reserved_signals(&self->ul_tmpmask); \ 346 self->ul_sigsuspend = 1; \ 347 } \ 348 if (nocancel == 0) { \ 349 self->ul_save_async = self->ul_cancel_async; \ 350 if (!self->ul_cancel_disabled) { \ 351 self->ul_cancel_async = 1; \ 352 if (self->ul_cancel_pending) { \ 353 if (self->ul_sigsuspend) { \ 354 self->ul_sigsuspend = 0;\ 355 restore_signals(self); \ 356 } \ 357 _pthread_exit(PTHREAD_CANCELED);\ 358 } \ 359 } \ 360 self->ul_sp = stkptr(); \ 361 } \ 362 } 363 364 /* 365 * If a signal is taken, we return from the system call wrapper with 366 * our original signal mask restored (see code in call_user_handler()). 367 * If not (self->ul_sigsuspend is still non-zero), we must restore our 368 * original signal mask ourself. 369 */ 370 #define EPILOGUE_MASK \ 371 if (nocancel == 0) { \ 372 self->ul_sp = 0; \ 373 self->ul_cancel_async = self->ul_save_async; \ 374 } \ 375 if (self->ul_sigsuspend) { \ 376 self->ul_sigsuspend = 0; \ 377 restore_signals(self); \ 378 } \ 379 } 380 381 /* 382 * Cancellation prologue and epilogue functions, 383 * for cancellation points too complex to include here. 384 */ 385 void 386 _cancel_prologue(void) 387 { 388 ulwp_t *self = curthread; 389 390 self->ul_cancel_prologue = (self->ul_vfork | self->ul_nocancel); 391 if (self->ul_cancel_prologue == 0) { 392 self->ul_save_async = self->ul_cancel_async; 393 if (!self->ul_cancel_disabled) { 394 self->ul_cancel_async = 1; 395 if (self->ul_cancel_pending) 396 _pthread_exit(PTHREAD_CANCELED); 397 } 398 self->ul_sp = stkptr(); 399 } 400 } 401 402 void 403 _cancel_epilogue(void) 404 { 405 ulwp_t *self = curthread; 406 407 if (self->ul_cancel_prologue == 0) { 408 self->ul_sp = 0; 409 self->ul_cancel_async = self->ul_save_async; 410 } 411 } 412 413 /* 414 * Called from _thrp_join() (thr_join() is a cancellation point) 415 */ 416 int 417 lwp_wait(thread_t tid, thread_t *found) 418 { 419 int error; 420 421 PROLOGUE 422 while ((error = __lwp_wait(tid, found)) == EINTR) 423 ; 424 EPILOGUE 425 return (error); 426 } 427 428 ssize_t 429 read(int fd, void *buf, size_t size) 430 { 431 extern ssize_t _read(int, void *, size_t); 432 ssize_t rv; 433 434 PERFORM(_read(fd, buf, size)) 435 } 436 437 ssize_t 438 write(int fd, const void *buf, size_t size) 439 { 440 extern ssize_t _write(int, const void *, size_t); 441 ssize_t rv; 442 443 PERFORM(_write(fd, buf, size)) 444 } 445 446 int 447 getmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr, 448 int *flagsp) 449 { 450 extern int _getmsg(int, struct strbuf *, struct strbuf *, int *); 451 int rv; 452 453 PERFORM(_getmsg(fd, ctlptr, dataptr, flagsp)) 454 } 455 456 int 457 getpmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr, 458 int *bandp, int *flagsp) 459 { 460 extern int _getpmsg(int, struct strbuf *, struct strbuf *, 461 int *, int *); 462 int rv; 463 464 PERFORM(_getpmsg(fd, ctlptr, dataptr, bandp, flagsp)) 465 } 466 467 int 468 putmsg(int fd, const struct strbuf *ctlptr, 469 const struct strbuf *dataptr, int flags) 470 { 471 extern int _putmsg(int, const struct strbuf *, 472 const struct strbuf *, int); 473 int rv; 474 475 PERFORM(_putmsg(fd, ctlptr, dataptr, flags)) 476 } 477 478 int 479 __xpg4_putmsg(int fd, const struct strbuf *ctlptr, 480 const struct strbuf *dataptr, int flags) 481 { 482 extern int _putmsg(int, const struct strbuf *, 483 const struct strbuf *, int); 484 int rv; 485 486 PERFORM(_putmsg(fd, ctlptr, dataptr, flags|MSG_XPG4)) 487 } 488 489 int 490 putpmsg(int fd, const struct strbuf *ctlptr, 491 const struct strbuf *dataptr, int band, int flags) 492 { 493 extern int _putpmsg(int, const struct strbuf *, 494 const struct strbuf *, int, int); 495 int rv; 496 497 PERFORM(_putpmsg(fd, ctlptr, dataptr, band, flags)) 498 } 499 500 int 501 __xpg4_putpmsg(int fd, const struct strbuf *ctlptr, 502 const struct strbuf *dataptr, int band, int flags) 503 { 504 extern int _putpmsg(int, const struct strbuf *, 505 const struct strbuf *, int, int); 506 int rv; 507 508 PERFORM(_putpmsg(fd, ctlptr, dataptr, band, flags|MSG_XPG4)) 509 } 510 511 #pragma weak nanosleep = _nanosleep 512 int 513 _nanosleep(const timespec_t *rqtp, timespec_t *rmtp) 514 { 515 int error; 516 517 PROLOGUE 518 error = __nanosleep(rqtp, rmtp); 519 EPILOGUE 520 if (error) { 521 errno = error; 522 return (-1); 523 } 524 return (0); 525 } 526 527 #pragma weak clock_nanosleep = _clock_nanosleep 528 int 529 _clock_nanosleep(clockid_t clock_id, int flags, 530 const timespec_t *rqtp, timespec_t *rmtp) 531 { 532 timespec_t reltime; 533 hrtime_t start; 534 hrtime_t rqlapse; 535 hrtime_t lapse; 536 int error; 537 538 switch (clock_id) { 539 case CLOCK_VIRTUAL: 540 case CLOCK_PROCESS_CPUTIME_ID: 541 case CLOCK_THREAD_CPUTIME_ID: 542 return (ENOTSUP); 543 case CLOCK_REALTIME: 544 case CLOCK_HIGHRES: 545 break; 546 default: 547 return (EINVAL); 548 } 549 if (flags & TIMER_ABSTIME) { 550 abstime_to_reltime(clock_id, rqtp, &reltime); 551 rmtp = NULL; 552 } else { 553 reltime = *rqtp; 554 if (clock_id == CLOCK_HIGHRES) 555 start = gethrtime(); 556 } 557 restart: 558 PROLOGUE 559 error = __nanosleep(&reltime, rmtp); 560 EPILOGUE 561 if (error == 0 && clock_id == CLOCK_HIGHRES) { 562 /* 563 * Don't return yet if we didn't really get a timeout. 564 * This can happen if we return because someone resets 565 * the system clock. 566 */ 567 if (flags & TIMER_ABSTIME) { 568 if ((hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC + 569 rqtp->tv_nsec > gethrtime()) { 570 abstime_to_reltime(clock_id, rqtp, &reltime); 571 goto restart; 572 } 573 } else { 574 rqlapse = (hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC + 575 rqtp->tv_nsec; 576 lapse = gethrtime() - start; 577 if (rqlapse > lapse) { 578 hrt2ts(rqlapse - lapse, &reltime); 579 goto restart; 580 } 581 } 582 } 583 if (error == 0 && clock_id == CLOCK_REALTIME && 584 (flags & TIMER_ABSTIME)) { 585 /* 586 * Don't return yet just because someone reset the 587 * system clock. Recompute the new relative time 588 * and reissue the nanosleep() call if necessary. 589 * 590 * Resetting the system clock causes all sorts of 591 * problems and the SUSV3 standards body should 592 * have made the behavior of clock_nanosleep() be 593 * implementation-defined in such a case rather than 594 * being specific about honoring the new system time. 595 * Standards bodies are filled with fools and idiots. 596 */ 597 abstime_to_reltime(clock_id, rqtp, &reltime); 598 if (reltime.tv_sec != 0 || reltime.tv_nsec != 0) 599 goto restart; 600 } 601 return (error); 602 } 603 604 #pragma weak sleep = _sleep 605 unsigned int 606 _sleep(unsigned int sec) 607 { 608 unsigned int rem = 0; 609 int error; 610 timespec_t ts; 611 timespec_t tsr; 612 613 ts.tv_sec = (time_t)sec; 614 ts.tv_nsec = 0; 615 PROLOGUE 616 error = __nanosleep(&ts, &tsr); 617 EPILOGUE 618 if (error == EINTR) { 619 rem = (unsigned int)tsr.tv_sec; 620 if (tsr.tv_nsec >= NANOSEC / 2) 621 rem++; 622 } 623 return (rem); 624 } 625 626 #pragma weak usleep = _usleep 627 int 628 _usleep(useconds_t usec) 629 { 630 timespec_t ts; 631 632 ts.tv_sec = usec / MICROSEC; 633 ts.tv_nsec = (long)(usec % MICROSEC) * 1000; 634 PROLOGUE 635 (void) __nanosleep(&ts, NULL); 636 EPILOGUE 637 return (0); 638 } 639 640 int 641 close(int fildes) 642 { 643 extern void _aio_close(int); 644 extern int _close(int); 645 int rv; 646 647 _aio_close(fildes); 648 PERFORM(_close(fildes)) 649 } 650 651 int 652 creat(const char *path, mode_t mode) 653 { 654 extern int _creat(const char *, mode_t); 655 int rv; 656 657 PERFORM(_creat(path, mode)) 658 } 659 660 #if !defined(_LP64) 661 int 662 creat64(const char *path, mode_t mode) 663 { 664 extern int _creat64(const char *, mode_t); 665 int rv; 666 667 PERFORM(_creat64(path, mode)) 668 } 669 #endif /* !_LP64 */ 670 671 int 672 fcntl(int fildes, int cmd, ...) 673 { 674 extern int _fcntl(int, int, ...); 675 intptr_t arg; 676 int rv; 677 va_list ap; 678 679 va_start(ap, cmd); 680 arg = va_arg(ap, intptr_t); 681 va_end(ap); 682 if (cmd != F_SETLKW) 683 return (_fcntl(fildes, cmd, arg)); 684 PERFORM(_fcntl(fildes, cmd, arg)) 685 } 686 687 int 688 fdatasync(int fildes) 689 { 690 extern int _fdatasync(int); 691 int rv; 692 693 PERFORM(_fdatasync(fildes)) 694 } 695 696 int 697 fsync(int fildes) 698 { 699 extern int _fsync(int); 700 int rv; 701 702 PERFORM(_fsync(fildes)) 703 } 704 705 int 706 lockf(int fildes, int function, off_t size) 707 { 708 extern int _lockf(int, int, off_t); 709 int rv; 710 711 PERFORM(_lockf(fildes, function, size)) 712 } 713 714 #if !defined(_LP64) 715 int 716 lockf64(int fildes, int function, off64_t size) 717 { 718 extern int _lockf64(int, int, off64_t); 719 int rv; 720 721 PERFORM(_lockf64(fildes, function, size)) 722 } 723 #endif /* !_LP64 */ 724 725 ssize_t 726 msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg) 727 { 728 extern ssize_t _msgrcv(int, void *, size_t, long, int); 729 ssize_t rv; 730 731 PERFORM(_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg)) 732 } 733 734 int 735 msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg) 736 { 737 extern int _msgsnd(int, const void *, size_t, int); 738 int rv; 739 740 PERFORM(_msgsnd(msqid, msgp, msgsz, msgflg)) 741 } 742 743 int 744 msync(caddr_t addr, size_t len, int flags) 745 { 746 extern int _msync(caddr_t, size_t, int); 747 int rv; 748 749 PERFORM(_msync(addr, len, flags)) 750 } 751 752 int 753 open(const char *path, int oflag, ...) 754 { 755 extern int _open(const char *, int, ...); 756 mode_t mode; 757 int rv; 758 va_list ap; 759 760 va_start(ap, oflag); 761 mode = va_arg(ap, mode_t); 762 va_end(ap); 763 PERFORM(_open(path, oflag, mode)) 764 } 765 766 #if !defined(_LP64) 767 int 768 open64(const char *path, int oflag, ...) 769 { 770 extern int _open64(const char *, int, ...); 771 mode_t mode; 772 int rv; 773 va_list ap; 774 775 va_start(ap, oflag); 776 mode = va_arg(ap, mode_t); 777 va_end(ap); 778 PERFORM(_open64(path, oflag, mode)) 779 } 780 #endif /* !_LP64 */ 781 782 int 783 pause(void) 784 { 785 extern int _pause(void); 786 int rv; 787 788 PERFORM(_pause()) 789 } 790 791 ssize_t 792 pread(int fildes, void *buf, size_t nbyte, off_t offset) 793 { 794 extern ssize_t _pread(int, void *, size_t, off_t); 795 ssize_t rv; 796 797 PERFORM(_pread(fildes, buf, nbyte, offset)) 798 } 799 800 #if !defined(_LP64) 801 ssize_t 802 pread64(int fildes, void *buf, size_t nbyte, off64_t offset) 803 { 804 extern ssize_t _pread64(int, void *, size_t, off64_t); 805 ssize_t rv; 806 807 PERFORM(_pread64(fildes, buf, nbyte, offset)) 808 } 809 #endif /* !_LP64 */ 810 811 ssize_t 812 pwrite(int fildes, const void *buf, size_t nbyte, off_t offset) 813 { 814 extern ssize_t _pwrite(int, const void *, size_t, off_t); 815 ssize_t rv; 816 817 PERFORM(_pwrite(fildes, buf, nbyte, offset)) 818 } 819 820 #if !defined(_LP64) 821 ssize_t 822 pwrite64(int fildes, const void *buf, size_t nbyte, off64_t offset) 823 { 824 extern ssize_t _pwrite64(int, const void *, size_t, off64_t); 825 ssize_t rv; 826 827 PERFORM(_pwrite64(fildes, buf, nbyte, offset)) 828 } 829 #endif /* !_LP64 */ 830 831 ssize_t 832 readv(int fildes, const struct iovec *iov, int iovcnt) 833 { 834 extern ssize_t _readv(int, const struct iovec *, int); 835 ssize_t rv; 836 837 PERFORM(_readv(fildes, iov, iovcnt)) 838 } 839 840 int 841 sigpause(int sig) 842 { 843 extern int _sigpause(int); 844 int rv; 845 846 PERFORM(_sigpause(sig)) 847 } 848 849 #pragma weak sigsuspend = _sigsuspend 850 int 851 _sigsuspend(const sigset_t *set) 852 { 853 extern int __sigsuspend(const sigset_t *); 854 int rv; 855 856 PROLOGUE_MASK(set) 857 rv = __sigsuspend(set); 858 EPILOGUE_MASK 859 return (rv); 860 } 861 862 int 863 _pollsys(struct pollfd *fds, nfds_t nfd, const timespec_t *timeout, 864 const sigset_t *sigmask) 865 { 866 extern int __pollsys(struct pollfd *, nfds_t, const timespec_t *, 867 const sigset_t *); 868 int rv; 869 870 PROLOGUE_MASK(sigmask) 871 rv = __pollsys(fds, nfd, timeout, sigmask); 872 EPILOGUE_MASK 873 return (rv); 874 } 875 876 #pragma weak sigtimedwait = _sigtimedwait 877 int 878 _sigtimedwait(const sigset_t *set, siginfo_t *infop, const timespec_t *timeout) 879 { 880 extern int __sigtimedwait(const sigset_t *, siginfo_t *, 881 const timespec_t *); 882 siginfo_t info; 883 int sig; 884 885 PROLOGUE 886 sig = __sigtimedwait(set, &info, timeout); 887 if (sig == SIGCANCEL && 888 (SI_FROMKERNEL(&info) || info.si_code == SI_LWP)) { 889 do_sigcancel(); 890 errno = EINTR; 891 sig = -1; 892 } 893 EPILOGUE 894 if (sig != -1 && infop) 895 (void) _private_memcpy(infop, &info, sizeof (*infop)); 896 return (sig); 897 } 898 899 #pragma weak sigwait = _sigwait 900 int 901 _sigwait(sigset_t *set) 902 { 903 return (_sigtimedwait(set, NULL, NULL)); 904 } 905 906 #pragma weak sigwaitinfo = _sigwaitinfo 907 int 908 _sigwaitinfo(const sigset_t *set, siginfo_t *info) 909 { 910 return (_sigtimedwait(set, info, NULL)); 911 } 912 913 #pragma weak sigqueue = _sigqueue 914 int 915 _sigqueue(pid_t pid, int signo, const union sigval value) 916 { 917 extern int __sigqueue(pid_t pid, int signo, 918 /* const union sigval */ void *value, int si_code, int block); 919 return (__sigqueue(pid, signo, value.sival_ptr, SI_QUEUE, 0)); 920 } 921 922 int 923 tcdrain(int fildes) 924 { 925 extern int _tcdrain(int); 926 int rv; 927 928 PERFORM(_tcdrain(fildes)) 929 } 930 931 pid_t 932 wait(int *stat_loc) 933 { 934 extern pid_t _wait(int *); 935 pid_t rv; 936 937 PERFORM(_wait(stat_loc)) 938 } 939 940 pid_t 941 wait3(int *statusp, int options, struct rusage *rusage) 942 { 943 extern pid_t _wait3(int *, int, struct rusage *); 944 pid_t rv; 945 946 PERFORM(_wait3(statusp, options, rusage)) 947 } 948 949 int 950 waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options) 951 { 952 extern int _waitid(idtype_t, id_t, siginfo_t *, int); 953 int rv; 954 955 PERFORM(_waitid(idtype, id, infop, options)) 956 } 957 958 /* 959 * waitpid_cancel() is a libc-private symbol for internal use 960 * where cancellation semantics is desired (see system()). 961 */ 962 #pragma weak waitpid_cancel = waitpid 963 pid_t 964 waitpid(pid_t pid, int *stat_loc, int options) 965 { 966 extern pid_t _waitpid(pid_t, int *, int); 967 pid_t rv; 968 969 PERFORM(_waitpid(pid, stat_loc, options)) 970 } 971 972 ssize_t 973 writev(int fildes, const struct iovec *iov, int iovcnt) 974 { 975 extern ssize_t _writev(int, const struct iovec *, int); 976 ssize_t rv; 977 978 PERFORM(_writev(fildes, iov, iovcnt)) 979 } 980