1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include "lint.h" 30 #include "thr_uberdata.h" 31 #include <stdarg.h> 32 #include <poll.h> 33 #include <stropts.h> 34 #include <dlfcn.h> 35 #include <sys/uio.h> 36 37 /* 38 * fork_lock is special -- We can't use lmutex_lock() (and thereby enter 39 * a critical region) because the second thread to reach this point would 40 * become unstoppable and the first thread would hang waiting for the 41 * second thread to stop itself. Therefore we don't use lmutex_lock() in 42 * fork_lock_enter(), but we do defer signals (the other form of concurrency). 43 * 44 * fork_lock_enter() does triple-duty. Not only does it serialize 45 * calls to fork() and forkall(), but it also serializes calls to 46 * thr_suspend() (fork() and forkall() also suspend other threads), 47 * and furthermore it serializes I18N calls to functions in other 48 * dlopen()ed L10N objects that might be calling malloc()/free(). 49 */ 50 51 static void 52 fork_lock_error(const char *who) 53 { 54 char msg[200]; 55 56 (void) strlcpy(msg, "deadlock condition: ", sizeof (msg)); 57 (void) strlcat(msg, who, sizeof (msg)); 58 (void) strlcat(msg, "() called from a fork handler", sizeof (msg)); 59 thread_error(msg); 60 } 61 62 int 63 fork_lock_enter(const char *who) 64 { 65 ulwp_t *self = curthread; 66 uberdata_t *udp = self->ul_uberdata; 67 int error = 0; 68 69 ASSERT(self->ul_critical == 0); 70 sigoff(self); 71 (void) _private_mutex_lock(&udp->fork_lock); 72 while (udp->fork_count) { 73 if (udp->fork_owner == self) { 74 /* 75 * This is like a recursive lock except that we 76 * inform the caller if we have been called from 77 * a fork handler and let it deal with that fact. 78 */ 79 if (self->ul_fork) { 80 /* 81 * We have been called from a fork handler. 82 */ 83 if (who != NULL && 84 udp->uberflags.uf_thread_error_detection) 85 fork_lock_error(who); 86 error = EDEADLK; 87 } 88 break; 89 } 90 ASSERT(self->ul_fork == 0); 91 (void) _cond_wait(&udp->fork_cond, &udp->fork_lock); 92 } 93 udp->fork_owner = self; 94 udp->fork_count++; 95 (void) _private_mutex_unlock(&udp->fork_lock); 96 return (error); 97 } 98 99 void 100 fork_lock_exit(void) 101 { 102 ulwp_t *self = curthread; 103 uberdata_t *udp = self->ul_uberdata; 104 105 ASSERT(self->ul_critical == 0); 106 (void) _private_mutex_lock(&udp->fork_lock); 107 ASSERT(udp->fork_count != 0 && udp->fork_owner == self); 108 if (--udp->fork_count == 0) { 109 udp->fork_owner = NULL; 110 (void) _cond_signal(&udp->fork_cond); 111 } 112 (void) _private_mutex_unlock(&udp->fork_lock); 113 sigon(self); 114 } 115 116 /* 117 * fork() is fork1() for both Posix threads and Solaris threads. 118 * The forkall() interface exists for applications that require 119 * the semantics of replicating all threads. 120 */ 121 #pragma weak fork = _fork1 122 #pragma weak _fork = _fork1 123 #pragma weak fork1 = _fork1 124 pid_t 125 _fork1(void) 126 { 127 ulwp_t *self = curthread; 128 uberdata_t *udp = self->ul_uberdata; 129 pid_t pid; 130 int error; 131 132 if (self->ul_vfork) { 133 /* 134 * We are a child of vfork(); omit all of the fork 135 * logic and go straight to the system call trap. 136 * A vfork() child of a multithreaded parent 137 * must never call fork(). 138 */ 139 if (udp->uberflags.uf_mt) { 140 errno = ENOTSUP; 141 return (-1); 142 } 143 pid = __fork1(); 144 if (pid == 0) { /* child */ 145 udp->pid = _private_getpid(); 146 self->ul_vfork = 0; 147 } 148 return (pid); 149 } 150 151 if ((error = fork_lock_enter("fork")) != 0) { 152 /* 153 * Cannot call fork() from a fork handler. 154 */ 155 fork_lock_exit(); 156 errno = error; 157 return (-1); 158 } 159 self->ul_fork = 1; 160 161 /* 162 * The functions registered by pthread_atfork() are defined by 163 * the application and its libraries and we must not hold any 164 * internal libc locks while invoking them. The fork_lock_enter() 165 * function serializes fork(), thr_suspend(), pthread_atfork() and 166 * dlclose() (which destroys whatever pthread_atfork() functions 167 * the library may have set up). If one of these pthread_atfork() 168 * functions attempts to fork or suspend another thread or call 169 * pthread_atfork() or dlclose a library, it will detect a deadlock 170 * in fork_lock_enter(). Otherwise, the pthread_atfork() functions 171 * are free to do anything they please (except they will not 172 * receive any signals). 173 */ 174 _prefork_handler(); 175 176 /* 177 * Block all signals. 178 * Just deferring them via sigon() is not enough. 179 * We have to avoid taking a deferred signal in the child 180 * that was actually sent to the parent before __fork1(). 181 */ 182 block_all_signals(self); 183 184 /* 185 * This suspends all threads but this one, leaving them 186 * suspended outside of any critical regions in the library. 187 * Thus, we are assured that no library locks are held 188 * while we invoke fork1() from the current thread. 189 */ 190 (void) _private_mutex_lock(&udp->fork_lock); 191 suspend_fork(); 192 (void) _private_mutex_unlock(&udp->fork_lock); 193 194 pid = __fork1(); 195 196 if (pid == 0) { /* child */ 197 /* 198 * Clear our schedctl pointer. 199 * Discard any deferred signal that was sent to the parent. 200 * Because we blocked all signals before __fork1(), a 201 * deferred signal cannot have been taken by the child. 202 */ 203 self->ul_schedctl_called = NULL; 204 self->ul_schedctl = NULL; 205 self->ul_cursig = 0; 206 self->ul_siginfo.si_signo = 0; 207 udp->pid = _private_getpid(); 208 /* reset the library's data structures to reflect one thread */ 209 postfork1_child(); 210 restore_signals(self); 211 _postfork_child_handler(); 212 } else { 213 /* restart all threads that were suspended for fork1() */ 214 continue_fork(0); 215 restore_signals(self); 216 _postfork_parent_handler(); 217 } 218 219 self->ul_fork = 0; 220 fork_lock_exit(); 221 222 return (pid); 223 } 224 225 /* 226 * Much of the logic here is the same as in fork1(). 227 * See the comments in fork1(), above. 228 */ 229 #pragma weak forkall = _forkall 230 pid_t 231 _forkall(void) 232 { 233 ulwp_t *self = curthread; 234 uberdata_t *udp = self->ul_uberdata; 235 pid_t pid; 236 int error; 237 238 if (self->ul_vfork) { 239 if (udp->uberflags.uf_mt) { 240 errno = ENOTSUP; 241 return (-1); 242 } 243 pid = __forkall(); 244 if (pid == 0) { /* child */ 245 udp->pid = _private_getpid(); 246 self->ul_vfork = 0; 247 } 248 return (pid); 249 } 250 251 if ((error = fork_lock_enter("forkall")) != 0) { 252 fork_lock_exit(); 253 errno = error; 254 return (-1); 255 } 256 self->ul_fork = 1; 257 block_all_signals(self); 258 suspend_fork(); 259 260 pid = __forkall(); 261 262 if (pid == 0) { 263 self->ul_schedctl_called = NULL; 264 self->ul_schedctl = NULL; 265 self->ul_cursig = 0; 266 self->ul_siginfo.si_signo = 0; 267 udp->pid = _private_getpid(); 268 continue_fork(1); 269 } else { 270 continue_fork(0); 271 } 272 restore_signals(self); 273 self->ul_fork = 0; 274 fork_lock_exit(); 275 276 return (pid); 277 } 278 279 /* 280 * Hacks for system calls to provide cancellation 281 * and improve java garbage collection. 282 */ 283 #define PROLOGUE \ 284 { \ 285 ulwp_t *self = curthread; \ 286 int nocancel = (self->ul_vfork | self->ul_nocancel); \ 287 if (nocancel == 0) { \ 288 self->ul_save_async = self->ul_cancel_async; \ 289 if (!self->ul_cancel_disabled) { \ 290 self->ul_cancel_async = 1; \ 291 if (self->ul_cancel_pending) \ 292 _pthread_exit(PTHREAD_CANCELED); \ 293 } \ 294 self->ul_sp = stkptr(); \ 295 } 296 297 #define EPILOGUE \ 298 if (nocancel == 0) { \ 299 self->ul_sp = 0; \ 300 self->ul_cancel_async = self->ul_save_async; \ 301 } \ 302 } 303 304 /* 305 * Perform the body of the action required by most of the cancelable 306 * function calls. The return(function_call) part is to allow the 307 * compiler to make the call be executed with tail recursion, which 308 * saves a register window on sparc and slightly (not much) improves 309 * the code for x86/x64 compilations. 310 */ 311 #define PERFORM(function_call) \ 312 PROLOGUE \ 313 if (nocancel) \ 314 return (function_call); \ 315 rv = function_call; \ 316 EPILOGUE \ 317 return (rv); 318 319 /* 320 * Specialized prologue for sigsuspend() and pollsys(). 321 * These system calls pass a signal mask to the kernel. 322 * The kernel replaces the thread's signal mask with the 323 * temporary mask before the thread goes to sleep. If 324 * a signal is received, the signal handler will execute 325 * with the temporary mask, as modified by the sigaction 326 * for the particular signal. 327 * 328 * We block all signals until we reach the kernel with the 329 * temporary mask. This eliminates race conditions with 330 * setting the signal mask while signals are being posted. 331 */ 332 #define PROLOGUE_MASK(sigmask) \ 333 { \ 334 ulwp_t *self = curthread; \ 335 int nocancel = (self->ul_vfork | self->ul_nocancel); \ 336 if (!self->ul_vfork) { \ 337 if (sigmask) { \ 338 block_all_signals(self); \ 339 self->ul_tmpmask.__sigbits[0] = sigmask->__sigbits[0]; \ 340 self->ul_tmpmask.__sigbits[1] = sigmask->__sigbits[1]; \ 341 delete_reserved_signals(&self->ul_tmpmask); \ 342 self->ul_sigsuspend = 1; \ 343 } \ 344 if (nocancel == 0) { \ 345 self->ul_save_async = self->ul_cancel_async; \ 346 if (!self->ul_cancel_disabled) { \ 347 self->ul_cancel_async = 1; \ 348 if (self->ul_cancel_pending) { \ 349 if (self->ul_sigsuspend) { \ 350 self->ul_sigsuspend = 0;\ 351 restore_signals(self); \ 352 } \ 353 _pthread_exit(PTHREAD_CANCELED);\ 354 } \ 355 } \ 356 self->ul_sp = stkptr(); \ 357 } \ 358 } 359 360 /* 361 * If a signal is taken, we return from the system call wrapper with 362 * our original signal mask restored (see code in call_user_handler()). 363 * If not (self->ul_sigsuspend is still non-zero), we must restore our 364 * original signal mask ourself. 365 */ 366 #define EPILOGUE_MASK \ 367 if (nocancel == 0) { \ 368 self->ul_sp = 0; \ 369 self->ul_cancel_async = self->ul_save_async; \ 370 } \ 371 if (self->ul_sigsuspend) { \ 372 self->ul_sigsuspend = 0; \ 373 restore_signals(self); \ 374 } \ 375 } 376 377 /* 378 * Cancellation prologue and epilogue functions, 379 * for cancellation points too complex to include here. 380 */ 381 void 382 _cancel_prologue(void) 383 { 384 ulwp_t *self = curthread; 385 386 self->ul_cancel_prologue = (self->ul_vfork | self->ul_nocancel); 387 if (self->ul_cancel_prologue == 0) { 388 self->ul_save_async = self->ul_cancel_async; 389 if (!self->ul_cancel_disabled) { 390 self->ul_cancel_async = 1; 391 if (self->ul_cancel_pending) 392 _pthread_exit(PTHREAD_CANCELED); 393 } 394 self->ul_sp = stkptr(); 395 } 396 } 397 398 void 399 _cancel_epilogue(void) 400 { 401 ulwp_t *self = curthread; 402 403 if (self->ul_cancel_prologue == 0) { 404 self->ul_sp = 0; 405 self->ul_cancel_async = self->ul_save_async; 406 } 407 } 408 409 /* 410 * Called from _thrp_join() (thr_join() is a cancellation point) 411 */ 412 int 413 lwp_wait(thread_t tid, thread_t *found) 414 { 415 int error; 416 417 PROLOGUE 418 while ((error = __lwp_wait(tid, found)) == EINTR) 419 ; 420 EPILOGUE 421 return (error); 422 } 423 424 ssize_t 425 read(int fd, void *buf, size_t size) 426 { 427 extern ssize_t _read(int, void *, size_t); 428 ssize_t rv; 429 430 PERFORM(_read(fd, buf, size)) 431 } 432 433 ssize_t 434 write(int fd, const void *buf, size_t size) 435 { 436 extern ssize_t _write(int, const void *, size_t); 437 ssize_t rv; 438 439 PERFORM(_write(fd, buf, size)) 440 } 441 442 int 443 getmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr, 444 int *flagsp) 445 { 446 extern int _getmsg(int, struct strbuf *, struct strbuf *, int *); 447 int rv; 448 449 PERFORM(_getmsg(fd, ctlptr, dataptr, flagsp)) 450 } 451 452 int 453 getpmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr, 454 int *bandp, int *flagsp) 455 { 456 extern int _getpmsg(int, struct strbuf *, struct strbuf *, 457 int *, int *); 458 int rv; 459 460 PERFORM(_getpmsg(fd, ctlptr, dataptr, bandp, flagsp)) 461 } 462 463 int 464 putmsg(int fd, const struct strbuf *ctlptr, 465 const struct strbuf *dataptr, int flags) 466 { 467 extern int _putmsg(int, const struct strbuf *, 468 const struct strbuf *, int); 469 int rv; 470 471 PERFORM(_putmsg(fd, ctlptr, dataptr, flags)) 472 } 473 474 int 475 __xpg4_putmsg(int fd, const struct strbuf *ctlptr, 476 const struct strbuf *dataptr, int flags) 477 { 478 extern int _putmsg(int, const struct strbuf *, 479 const struct strbuf *, int); 480 int rv; 481 482 PERFORM(_putmsg(fd, ctlptr, dataptr, flags|MSG_XPG4)) 483 } 484 485 int 486 putpmsg(int fd, const struct strbuf *ctlptr, 487 const struct strbuf *dataptr, int band, int flags) 488 { 489 extern int _putpmsg(int, const struct strbuf *, 490 const struct strbuf *, int, int); 491 int rv; 492 493 PERFORM(_putpmsg(fd, ctlptr, dataptr, band, flags)) 494 } 495 496 int 497 __xpg4_putpmsg(int fd, const struct strbuf *ctlptr, 498 const struct strbuf *dataptr, int band, int flags) 499 { 500 extern int _putpmsg(int, const struct strbuf *, 501 const struct strbuf *, int, int); 502 int rv; 503 504 PERFORM(_putpmsg(fd, ctlptr, dataptr, band, flags|MSG_XPG4)) 505 } 506 507 #pragma weak nanosleep = _nanosleep 508 int 509 _nanosleep(const timespec_t *rqtp, timespec_t *rmtp) 510 { 511 int error; 512 513 PROLOGUE 514 error = __nanosleep(rqtp, rmtp); 515 EPILOGUE 516 if (error) { 517 errno = error; 518 return (-1); 519 } 520 return (0); 521 } 522 523 #pragma weak clock_nanosleep = _clock_nanosleep 524 int 525 _clock_nanosleep(clockid_t clock_id, int flags, 526 const timespec_t *rqtp, timespec_t *rmtp) 527 { 528 timespec_t reltime; 529 hrtime_t start; 530 hrtime_t rqlapse; 531 hrtime_t lapse; 532 int error; 533 534 switch (clock_id) { 535 case CLOCK_VIRTUAL: 536 case CLOCK_PROCESS_CPUTIME_ID: 537 case CLOCK_THREAD_CPUTIME_ID: 538 return (ENOTSUP); 539 case CLOCK_REALTIME: 540 case CLOCK_HIGHRES: 541 break; 542 default: 543 return (EINVAL); 544 } 545 if (flags & TIMER_ABSTIME) { 546 abstime_to_reltime(clock_id, rqtp, &reltime); 547 rmtp = NULL; 548 } else { 549 reltime = *rqtp; 550 if (clock_id == CLOCK_HIGHRES) 551 start = gethrtime(); 552 } 553 restart: 554 PROLOGUE 555 error = __nanosleep(&reltime, rmtp); 556 EPILOGUE 557 if (error == 0 && clock_id == CLOCK_HIGHRES) { 558 /* 559 * Don't return yet if we didn't really get a timeout. 560 * This can happen if we return because someone resets 561 * the system clock. 562 */ 563 if (flags & TIMER_ABSTIME) { 564 if ((hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC + 565 rqtp->tv_nsec > gethrtime()) { 566 abstime_to_reltime(clock_id, rqtp, &reltime); 567 goto restart; 568 } 569 } else { 570 rqlapse = (hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC + 571 rqtp->tv_nsec; 572 lapse = gethrtime() - start; 573 if (rqlapse > lapse) { 574 hrt2ts(rqlapse - lapse, &reltime); 575 goto restart; 576 } 577 } 578 } 579 if (error == 0 && clock_id == CLOCK_REALTIME && 580 (flags & TIMER_ABSTIME)) { 581 /* 582 * Don't return yet just because someone reset the 583 * system clock. Recompute the new relative time 584 * and reissue the nanosleep() call if necessary. 585 * 586 * Resetting the system clock causes all sorts of 587 * problems and the SUSV3 standards body should 588 * have made the behavior of clock_nanosleep() be 589 * implementation-defined in such a case rather than 590 * being specific about honoring the new system time. 591 * Standards bodies are filled with fools and idiots. 592 */ 593 abstime_to_reltime(clock_id, rqtp, &reltime); 594 if (reltime.tv_sec != 0 || reltime.tv_nsec != 0) 595 goto restart; 596 } 597 return (error); 598 } 599 600 #pragma weak sleep = _sleep 601 unsigned int 602 _sleep(unsigned int sec) 603 { 604 unsigned int rem = 0; 605 int error; 606 timespec_t ts; 607 timespec_t tsr; 608 609 ts.tv_sec = (time_t)sec; 610 ts.tv_nsec = 0; 611 PROLOGUE 612 error = __nanosleep(&ts, &tsr); 613 EPILOGUE 614 if (error == EINTR) { 615 rem = (unsigned int)tsr.tv_sec; 616 if (tsr.tv_nsec >= NANOSEC / 2) 617 rem++; 618 } 619 return (rem); 620 } 621 622 #pragma weak usleep = _usleep 623 int 624 _usleep(useconds_t usec) 625 { 626 timespec_t ts; 627 628 ts.tv_sec = usec / MICROSEC; 629 ts.tv_nsec = (long)(usec % MICROSEC) * 1000; 630 PROLOGUE 631 (void) __nanosleep(&ts, NULL); 632 EPILOGUE 633 return (0); 634 } 635 636 int 637 close(int fildes) 638 { 639 extern void _aio_close(int); 640 extern int _close(int); 641 int rv; 642 643 _aio_close(fildes); 644 PERFORM(_close(fildes)) 645 } 646 647 int 648 creat(const char *path, mode_t mode) 649 { 650 extern int _creat(const char *, mode_t); 651 int rv; 652 653 PERFORM(_creat(path, mode)) 654 } 655 656 #if !defined(_LP64) 657 int 658 creat64(const char *path, mode_t mode) 659 { 660 extern int _creat64(const char *, mode_t); 661 int rv; 662 663 PERFORM(_creat64(path, mode)) 664 } 665 #endif /* !_LP64 */ 666 667 int 668 fcntl(int fildes, int cmd, ...) 669 { 670 extern int _fcntl(int, int, ...); 671 intptr_t arg; 672 int rv; 673 va_list ap; 674 675 va_start(ap, cmd); 676 arg = va_arg(ap, intptr_t); 677 va_end(ap); 678 if (cmd != F_SETLKW) 679 return (_fcntl(fildes, cmd, arg)); 680 PERFORM(_fcntl(fildes, cmd, arg)) 681 } 682 683 int 684 fsync(int fildes) 685 { 686 extern int _fsync(int); 687 int rv; 688 689 PERFORM(_fsync(fildes)) 690 } 691 692 int 693 lockf(int fildes, int function, off_t size) 694 { 695 extern int _lockf(int, int, off_t); 696 int rv; 697 698 PERFORM(_lockf(fildes, function, size)) 699 } 700 701 #if !defined(_LP64) 702 int 703 lockf64(int fildes, int function, off64_t size) 704 { 705 extern int _lockf64(int, int, off64_t); 706 int rv; 707 708 PERFORM(_lockf64(fildes, function, size)) 709 } 710 #endif /* !_LP64 */ 711 712 ssize_t 713 msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg) 714 { 715 extern ssize_t _msgrcv(int, void *, size_t, long, int); 716 ssize_t rv; 717 718 PERFORM(_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg)) 719 } 720 721 int 722 msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg) 723 { 724 extern int _msgsnd(int, const void *, size_t, int); 725 int rv; 726 727 PERFORM(_msgsnd(msqid, msgp, msgsz, msgflg)) 728 } 729 730 int 731 msync(caddr_t addr, size_t len, int flags) 732 { 733 extern int _msync(caddr_t, size_t, int); 734 int rv; 735 736 PERFORM(_msync(addr, len, flags)) 737 } 738 739 int 740 open(const char *path, int oflag, ...) 741 { 742 extern int _open(const char *, int, ...); 743 mode_t mode; 744 int rv; 745 va_list ap; 746 747 va_start(ap, oflag); 748 mode = va_arg(ap, mode_t); 749 va_end(ap); 750 PERFORM(_open(path, oflag, mode)) 751 } 752 753 #if !defined(_LP64) 754 int 755 open64(const char *path, int oflag, ...) 756 { 757 extern int _open64(const char *, int, ...); 758 mode_t mode; 759 int rv; 760 va_list ap; 761 762 va_start(ap, oflag); 763 mode = va_arg(ap, mode_t); 764 va_end(ap); 765 PERFORM(_open64(path, oflag, mode)) 766 } 767 #endif /* !_LP64 */ 768 769 int 770 pause(void) 771 { 772 extern int _pause(void); 773 int rv; 774 775 PERFORM(_pause()) 776 } 777 778 ssize_t 779 pread(int fildes, void *buf, size_t nbyte, off_t offset) 780 { 781 extern ssize_t _pread(int, void *, size_t, off_t); 782 ssize_t rv; 783 784 PERFORM(_pread(fildes, buf, nbyte, offset)) 785 } 786 787 #if !defined(_LP64) 788 ssize_t 789 pread64(int fildes, void *buf, size_t nbyte, off64_t offset) 790 { 791 extern ssize_t _pread64(int, void *, size_t, off64_t); 792 ssize_t rv; 793 794 PERFORM(_pread64(fildes, buf, nbyte, offset)) 795 } 796 #endif /* !_LP64 */ 797 798 ssize_t 799 pwrite(int fildes, const void *buf, size_t nbyte, off_t offset) 800 { 801 extern ssize_t _pwrite(int, const void *, size_t, off_t); 802 ssize_t rv; 803 804 PERFORM(_pwrite(fildes, buf, nbyte, offset)) 805 } 806 807 #if !defined(_LP64) 808 ssize_t 809 pwrite64(int fildes, const void *buf, size_t nbyte, off64_t offset) 810 { 811 extern ssize_t _pwrite64(int, const void *, size_t, off64_t); 812 ssize_t rv; 813 814 PERFORM(_pwrite64(fildes, buf, nbyte, offset)) 815 } 816 #endif /* !_LP64 */ 817 818 ssize_t 819 readv(int fildes, const struct iovec *iov, int iovcnt) 820 { 821 extern ssize_t _readv(int, const struct iovec *, int); 822 ssize_t rv; 823 824 PERFORM(_readv(fildes, iov, iovcnt)) 825 } 826 827 int 828 sigpause(int sig) 829 { 830 extern int _sigpause(int); 831 int rv; 832 833 PERFORM(_sigpause(sig)) 834 } 835 836 #pragma weak sigsuspend = _sigsuspend 837 int 838 _sigsuspend(const sigset_t *set) 839 { 840 extern int __sigsuspend(const sigset_t *); 841 int rv; 842 843 PROLOGUE_MASK(set) 844 rv = __sigsuspend(set); 845 EPILOGUE_MASK 846 return (rv); 847 } 848 849 int 850 _pollsys(struct pollfd *fds, nfds_t nfd, const timespec_t *timeout, 851 const sigset_t *sigmask) 852 { 853 extern int __pollsys(struct pollfd *, nfds_t, const timespec_t *, 854 const sigset_t *); 855 int rv; 856 857 PROLOGUE_MASK(sigmask) 858 rv = __pollsys(fds, nfd, timeout, sigmask); 859 EPILOGUE_MASK 860 return (rv); 861 } 862 863 #pragma weak sigtimedwait = _sigtimedwait 864 int 865 _sigtimedwait(const sigset_t *set, siginfo_t *infop, const timespec_t *timeout) 866 { 867 extern int __sigtimedwait(const sigset_t *, siginfo_t *, 868 const timespec_t *); 869 siginfo_t info; 870 int sig; 871 872 PROLOGUE 873 sig = __sigtimedwait(set, &info, timeout); 874 if (sig == SIGCANCEL && 875 (SI_FROMKERNEL(&info) || info.si_code == SI_LWP)) { 876 do_sigcancel(); 877 errno = EINTR; 878 sig = -1; 879 } 880 EPILOGUE 881 if (sig != -1 && infop) 882 (void) _private_memcpy(infop, &info, sizeof (*infop)); 883 return (sig); 884 } 885 886 #pragma weak sigwait = _sigwait 887 int 888 _sigwait(sigset_t *set) 889 { 890 return (_sigtimedwait(set, NULL, NULL)); 891 } 892 893 #pragma weak sigwaitinfo = _sigwaitinfo 894 int 895 _sigwaitinfo(const sigset_t *set, siginfo_t *info) 896 { 897 return (_sigtimedwait(set, info, NULL)); 898 } 899 900 #pragma weak sigqueue = _sigqueue 901 int 902 _sigqueue(pid_t pid, int signo, const union sigval value) 903 { 904 extern int __sigqueue(pid_t pid, int signo, 905 /* const union sigval */ void *value, int si_code, int block); 906 return (__sigqueue(pid, signo, value.sival_ptr, SI_QUEUE, 0)); 907 } 908 909 int 910 tcdrain(int fildes) 911 { 912 extern int _tcdrain(int); 913 int rv; 914 915 PERFORM(_tcdrain(fildes)) 916 } 917 918 pid_t 919 wait(int *stat_loc) 920 { 921 extern pid_t _wait(int *); 922 pid_t rv; 923 924 PERFORM(_wait(stat_loc)) 925 } 926 927 pid_t 928 wait3(int *statusp, int options, struct rusage *rusage) 929 { 930 extern pid_t _wait3(int *, int, struct rusage *); 931 pid_t rv; 932 933 PERFORM(_wait3(statusp, options, rusage)) 934 } 935 936 int 937 waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options) 938 { 939 extern int _waitid(idtype_t, id_t, siginfo_t *, int); 940 int rv; 941 942 PERFORM(_waitid(idtype, id, infop, options)) 943 } 944 945 /* 946 * waitpid_cancel() is a libc-private symbol for internal use 947 * where cancellation semantics is desired (see system()). 948 */ 949 #pragma weak waitpid_cancel = waitpid 950 pid_t 951 waitpid(pid_t pid, int *stat_loc, int options) 952 { 953 extern pid_t _waitpid(pid_t, int *, int); 954 pid_t rv; 955 956 PERFORM(_waitpid(pid, stat_loc, options)) 957 } 958 959 ssize_t 960 writev(int fildes, const struct iovec *iov, int iovcnt) 961 { 962 extern ssize_t _writev(int, const struct iovec *, int); 963 ssize_t rv; 964 965 PERFORM(_writev(fildes, iov, iovcnt)) 966 } 967