1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include "lint.h" 30 #include "thr_uberdata.h" 31 #include <stdarg.h> 32 #include <poll.h> 33 #include <stropts.h> 34 #include <dlfcn.h> 35 #include <sys/uio.h> 36 37 /* 38 * fork_lock is special -- We can't use lmutex_lock() (and thereby enter 39 * a critical region) because the second thread to reach this point would 40 * become unstoppable and the first thread would hang waiting for the 41 * second thread to stop itself. Therefore we don't use lmutex_lock() in 42 * fork_lock_enter(), but we do defer signals (the other form of concurrency). 43 * 44 * fork_lock_enter() does triple-duty. Not only does it serialize 45 * calls to fork() and forkall(), but it also serializes calls to 46 * thr_suspend() (fork() and forkall() also suspend other threads), 47 * and furthermore it serializes I18N calls to functions in other 48 * dlopen()ed L10N objects that might be calling malloc()/free(). 49 */ 50 51 static void 52 fork_lock_error(const char *who) 53 { 54 char msg[200]; 55 56 (void) strlcpy(msg, "deadlock condition: ", sizeof (msg)); 57 (void) strlcat(msg, who, sizeof (msg)); 58 (void) strlcat(msg, "() called from a fork handler", sizeof (msg)); 59 thread_error(msg); 60 } 61 62 int 63 fork_lock_enter(const char *who) 64 { 65 ulwp_t *self = curthread; 66 uberdata_t *udp = self->ul_uberdata; 67 int error = 0; 68 69 ASSERT(self->ul_critical == 0); 70 sigoff(self); 71 (void) _private_mutex_lock(&udp->fork_lock); 72 if (udp->fork_count) { 73 ASSERT(udp->fork_owner == self); 74 /* 75 * This is a simple recursive lock except that we 76 * inform the caller if we have been called from 77 * a fork handler and let it deal with that fact. 78 */ 79 if (self->ul_fork) { 80 /* 81 * We have been called from a fork handler. 82 */ 83 if (who != NULL && 84 udp->uberflags.uf_thread_error_detection) 85 fork_lock_error(who); 86 error = EDEADLK; 87 } 88 } 89 udp->fork_owner = self; 90 udp->fork_count++; 91 return (error); 92 } 93 94 void 95 fork_lock_exit(void) 96 { 97 ulwp_t *self = curthread; 98 uberdata_t *udp = self->ul_uberdata; 99 100 ASSERT(self->ul_critical == 0); 101 ASSERT(udp->fork_count != 0 && udp->fork_owner == self); 102 if (--udp->fork_count == 0) 103 udp->fork_owner = NULL; 104 (void) _private_mutex_unlock(&udp->fork_lock); 105 sigon(self); 106 } 107 108 /* 109 * Note: Instead of making this function static, we reduce it to local 110 * scope in the mapfile. That allows the linker to prevent it from 111 * appearing in the .SUNW_dynsymsort section. 112 */ 113 #pragma weak forkx = _private_forkx 114 #pragma weak _forkx = _private_forkx 115 pid_t 116 _private_forkx(int flags) 117 { 118 ulwp_t *self = curthread; 119 uberdata_t *udp = self->ul_uberdata; 120 pid_t pid; 121 int error; 122 123 if (self->ul_vfork) { 124 /* 125 * We are a child of vfork(); omit all of the fork 126 * logic and go straight to the system call trap. 127 * A vfork() child of a multithreaded parent 128 * must never call fork(). 129 */ 130 if (udp->uberflags.uf_mt) { 131 errno = ENOTSUP; 132 return (-1); 133 } 134 pid = __forkx(flags); 135 if (pid == 0) { /* child */ 136 udp->pid = _private_getpid(); 137 self->ul_vfork = 0; 138 } 139 return (pid); 140 } 141 142 if ((error = fork_lock_enter("fork")) != 0) { 143 /* 144 * Cannot call fork() from a fork handler. 145 */ 146 fork_lock_exit(); 147 errno = error; 148 return (-1); 149 } 150 self->ul_fork = 1; 151 152 /* 153 * The functions registered by pthread_atfork() are defined by 154 * the application and its libraries and we must not hold any 155 * internal libc locks while invoking them. The fork_lock_enter() 156 * function serializes fork(), thr_suspend(), pthread_atfork() and 157 * dlclose() (which destroys whatever pthread_atfork() functions 158 * the library may have set up). If one of these pthread_atfork() 159 * functions attempts to fork or suspend another thread or call 160 * pthread_atfork() or dlclose a library, it will detect a deadlock 161 * in fork_lock_enter(). Otherwise, the pthread_atfork() functions 162 * are free to do anything they please (except they will not 163 * receive any signals). 164 */ 165 _prefork_handler(); 166 167 /* 168 * Block all signals. 169 * Just deferring them via sigon() is not enough. 170 * We have to avoid taking a deferred signal in the child 171 * that was actually sent to the parent before __forkx(). 172 */ 173 block_all_signals(self); 174 175 /* 176 * This suspends all threads but this one, leaving them 177 * suspended outside of any critical regions in the library. 178 * Thus, we are assured that no library locks are held 179 * while we invoke fork() from the current thread. 180 */ 181 suspend_fork(); 182 183 pid = __forkx(flags); 184 185 if (pid == 0) { /* child */ 186 /* 187 * Clear our schedctl pointer. 188 * Discard any deferred signal that was sent to the parent. 189 * Because we blocked all signals before __forkx(), a 190 * deferred signal cannot have been taken by the child. 191 */ 192 self->ul_schedctl_called = NULL; 193 self->ul_schedctl = NULL; 194 self->ul_cursig = 0; 195 self->ul_siginfo.si_signo = 0; 196 udp->pid = _private_getpid(); 197 /* reset the library's data structures to reflect one thread */ 198 postfork1_child(); 199 restore_signals(self); 200 _postfork_child_handler(); 201 } else { 202 /* restart all threads that were suspended for fork() */ 203 continue_fork(0); 204 restore_signals(self); 205 _postfork_parent_handler(); 206 } 207 208 self->ul_fork = 0; 209 fork_lock_exit(); 210 211 return (pid); 212 } 213 214 /* 215 * fork() is fork1() for both Posix threads and Solaris threads. 216 * The forkall() interface exists for applications that require 217 * the semantics of replicating all threads. 218 */ 219 #pragma weak fork1 = _fork 220 #pragma weak _fork1 = _fork 221 #pragma weak fork = _fork 222 pid_t 223 _fork(void) 224 { 225 return (_private_forkx(0)); 226 } 227 228 /* 229 * Much of the logic here is the same as in forkx(). 230 * See the comments in forkx(), above. 231 */ 232 #pragma weak forkallx = _private_forkallx 233 #pragma weak _forkallx = _private_forkallx 234 pid_t 235 _private_forkallx(int flags) 236 { 237 ulwp_t *self = curthread; 238 uberdata_t *udp = self->ul_uberdata; 239 pid_t pid; 240 int error; 241 242 if (self->ul_vfork) { 243 if (udp->uberflags.uf_mt) { 244 errno = ENOTSUP; 245 return (-1); 246 } 247 pid = __forkallx(flags); 248 if (pid == 0) { /* child */ 249 udp->pid = _private_getpid(); 250 self->ul_vfork = 0; 251 } 252 return (pid); 253 } 254 255 if ((error = fork_lock_enter("forkall")) != 0) { 256 fork_lock_exit(); 257 errno = error; 258 return (-1); 259 } 260 self->ul_fork = 1; 261 block_all_signals(self); 262 suspend_fork(); 263 264 pid = __forkallx(flags); 265 266 if (pid == 0) { 267 self->ul_schedctl_called = NULL; 268 self->ul_schedctl = NULL; 269 self->ul_cursig = 0; 270 self->ul_siginfo.si_signo = 0; 271 udp->pid = _private_getpid(); 272 continue_fork(1); 273 } else { 274 continue_fork(0); 275 } 276 restore_signals(self); 277 self->ul_fork = 0; 278 fork_lock_exit(); 279 280 return (pid); 281 } 282 283 #pragma weak forkall = _forkall 284 pid_t 285 _forkall(void) 286 { 287 return (_private_forkallx(0)); 288 } 289 290 /* 291 * Hacks for system calls to provide cancellation 292 * and improve java garbage collection. 293 */ 294 #define PROLOGUE \ 295 { \ 296 ulwp_t *self = curthread; \ 297 int nocancel = (self->ul_vfork | self->ul_nocancel); \ 298 if (nocancel == 0) { \ 299 self->ul_save_async = self->ul_cancel_async; \ 300 if (!self->ul_cancel_disabled) { \ 301 self->ul_cancel_async = 1; \ 302 if (self->ul_cancel_pending) \ 303 _pthread_exit(PTHREAD_CANCELED); \ 304 } \ 305 self->ul_sp = stkptr(); \ 306 } 307 308 #define EPILOGUE \ 309 if (nocancel == 0) { \ 310 self->ul_sp = 0; \ 311 self->ul_cancel_async = self->ul_save_async; \ 312 } \ 313 } 314 315 /* 316 * Perform the body of the action required by most of the cancelable 317 * function calls. The return(function_call) part is to allow the 318 * compiler to make the call be executed with tail recursion, which 319 * saves a register window on sparc and slightly (not much) improves 320 * the code for x86/x64 compilations. 321 */ 322 #define PERFORM(function_call) \ 323 PROLOGUE \ 324 if (nocancel) \ 325 return (function_call); \ 326 rv = function_call; \ 327 EPILOGUE \ 328 return (rv); 329 330 /* 331 * Specialized prologue for sigsuspend() and pollsys(). 332 * These system calls pass a signal mask to the kernel. 333 * The kernel replaces the thread's signal mask with the 334 * temporary mask before the thread goes to sleep. If 335 * a signal is received, the signal handler will execute 336 * with the temporary mask, as modified by the sigaction 337 * for the particular signal. 338 * 339 * We block all signals until we reach the kernel with the 340 * temporary mask. This eliminates race conditions with 341 * setting the signal mask while signals are being posted. 342 */ 343 #define PROLOGUE_MASK(sigmask) \ 344 { \ 345 ulwp_t *self = curthread; \ 346 int nocancel = (self->ul_vfork | self->ul_nocancel); \ 347 if (!self->ul_vfork) { \ 348 if (sigmask) { \ 349 block_all_signals(self); \ 350 self->ul_tmpmask.__sigbits[0] = sigmask->__sigbits[0]; \ 351 self->ul_tmpmask.__sigbits[1] = sigmask->__sigbits[1]; \ 352 delete_reserved_signals(&self->ul_tmpmask); \ 353 self->ul_sigsuspend = 1; \ 354 } \ 355 if (nocancel == 0) { \ 356 self->ul_save_async = self->ul_cancel_async; \ 357 if (!self->ul_cancel_disabled) { \ 358 self->ul_cancel_async = 1; \ 359 if (self->ul_cancel_pending) { \ 360 if (self->ul_sigsuspend) { \ 361 self->ul_sigsuspend = 0;\ 362 restore_signals(self); \ 363 } \ 364 _pthread_exit(PTHREAD_CANCELED);\ 365 } \ 366 } \ 367 self->ul_sp = stkptr(); \ 368 } \ 369 } 370 371 /* 372 * If a signal is taken, we return from the system call wrapper with 373 * our original signal mask restored (see code in call_user_handler()). 374 * If not (self->ul_sigsuspend is still non-zero), we must restore our 375 * original signal mask ourself. 376 */ 377 #define EPILOGUE_MASK \ 378 if (nocancel == 0) { \ 379 self->ul_sp = 0; \ 380 self->ul_cancel_async = self->ul_save_async; \ 381 } \ 382 if (self->ul_sigsuspend) { \ 383 self->ul_sigsuspend = 0; \ 384 restore_signals(self); \ 385 } \ 386 } 387 388 /* 389 * Cancellation prologue and epilogue functions, 390 * for cancellation points too complex to include here. 391 */ 392 void 393 _cancel_prologue(void) 394 { 395 ulwp_t *self = curthread; 396 397 self->ul_cancel_prologue = (self->ul_vfork | self->ul_nocancel); 398 if (self->ul_cancel_prologue == 0) { 399 self->ul_save_async = self->ul_cancel_async; 400 if (!self->ul_cancel_disabled) { 401 self->ul_cancel_async = 1; 402 if (self->ul_cancel_pending) 403 _pthread_exit(PTHREAD_CANCELED); 404 } 405 self->ul_sp = stkptr(); 406 } 407 } 408 409 void 410 _cancel_epilogue(void) 411 { 412 ulwp_t *self = curthread; 413 414 if (self->ul_cancel_prologue == 0) { 415 self->ul_sp = 0; 416 self->ul_cancel_async = self->ul_save_async; 417 } 418 } 419 420 /* 421 * Called from _thrp_join() (thr_join() is a cancellation point) 422 */ 423 int 424 lwp_wait(thread_t tid, thread_t *found) 425 { 426 int error; 427 428 PROLOGUE 429 while ((error = __lwp_wait(tid, found)) == EINTR) 430 ; 431 EPILOGUE 432 return (error); 433 } 434 435 ssize_t 436 read(int fd, void *buf, size_t size) 437 { 438 extern ssize_t _read(int, void *, size_t); 439 ssize_t rv; 440 441 PERFORM(_read(fd, buf, size)) 442 } 443 444 ssize_t 445 write(int fd, const void *buf, size_t size) 446 { 447 extern ssize_t _write(int, const void *, size_t); 448 ssize_t rv; 449 450 PERFORM(_write(fd, buf, size)) 451 } 452 453 int 454 getmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr, 455 int *flagsp) 456 { 457 extern int _getmsg(int, struct strbuf *, struct strbuf *, int *); 458 int rv; 459 460 PERFORM(_getmsg(fd, ctlptr, dataptr, flagsp)) 461 } 462 463 int 464 getpmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr, 465 int *bandp, int *flagsp) 466 { 467 extern int _getpmsg(int, struct strbuf *, struct strbuf *, 468 int *, int *); 469 int rv; 470 471 PERFORM(_getpmsg(fd, ctlptr, dataptr, bandp, flagsp)) 472 } 473 474 int 475 putmsg(int fd, const struct strbuf *ctlptr, 476 const struct strbuf *dataptr, int flags) 477 { 478 extern int _putmsg(int, const struct strbuf *, 479 const struct strbuf *, int); 480 int rv; 481 482 PERFORM(_putmsg(fd, ctlptr, dataptr, flags)) 483 } 484 485 int 486 __xpg4_putmsg(int fd, const struct strbuf *ctlptr, 487 const struct strbuf *dataptr, int flags) 488 { 489 extern int _putmsg(int, const struct strbuf *, 490 const struct strbuf *, int); 491 int rv; 492 493 PERFORM(_putmsg(fd, ctlptr, dataptr, flags|MSG_XPG4)) 494 } 495 496 int 497 putpmsg(int fd, const struct strbuf *ctlptr, 498 const struct strbuf *dataptr, int band, int flags) 499 { 500 extern int _putpmsg(int, const struct strbuf *, 501 const struct strbuf *, int, int); 502 int rv; 503 504 PERFORM(_putpmsg(fd, ctlptr, dataptr, band, flags)) 505 } 506 507 int 508 __xpg4_putpmsg(int fd, const struct strbuf *ctlptr, 509 const struct strbuf *dataptr, int band, int flags) 510 { 511 extern int _putpmsg(int, const struct strbuf *, 512 const struct strbuf *, int, int); 513 int rv; 514 515 PERFORM(_putpmsg(fd, ctlptr, dataptr, band, flags|MSG_XPG4)) 516 } 517 518 #pragma weak nanosleep = _nanosleep 519 int 520 _nanosleep(const timespec_t *rqtp, timespec_t *rmtp) 521 { 522 int error; 523 524 PROLOGUE 525 error = __nanosleep(rqtp, rmtp); 526 EPILOGUE 527 if (error) { 528 errno = error; 529 return (-1); 530 } 531 return (0); 532 } 533 534 #pragma weak clock_nanosleep = _clock_nanosleep 535 int 536 _clock_nanosleep(clockid_t clock_id, int flags, 537 const timespec_t *rqtp, timespec_t *rmtp) 538 { 539 timespec_t reltime; 540 hrtime_t start; 541 hrtime_t rqlapse; 542 hrtime_t lapse; 543 int error; 544 545 switch (clock_id) { 546 case CLOCK_VIRTUAL: 547 case CLOCK_PROCESS_CPUTIME_ID: 548 case CLOCK_THREAD_CPUTIME_ID: 549 return (ENOTSUP); 550 case CLOCK_REALTIME: 551 case CLOCK_HIGHRES: 552 break; 553 default: 554 return (EINVAL); 555 } 556 if (flags & TIMER_ABSTIME) { 557 abstime_to_reltime(clock_id, rqtp, &reltime); 558 rmtp = NULL; 559 } else { 560 reltime = *rqtp; 561 if (clock_id == CLOCK_HIGHRES) 562 start = gethrtime(); 563 } 564 restart: 565 PROLOGUE 566 error = __nanosleep(&reltime, rmtp); 567 EPILOGUE 568 if (error == 0 && clock_id == CLOCK_HIGHRES) { 569 /* 570 * Don't return yet if we didn't really get a timeout. 571 * This can happen if we return because someone resets 572 * the system clock. 573 */ 574 if (flags & TIMER_ABSTIME) { 575 if ((hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC + 576 rqtp->tv_nsec > gethrtime()) { 577 abstime_to_reltime(clock_id, rqtp, &reltime); 578 goto restart; 579 } 580 } else { 581 rqlapse = (hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC + 582 rqtp->tv_nsec; 583 lapse = gethrtime() - start; 584 if (rqlapse > lapse) { 585 hrt2ts(rqlapse - lapse, &reltime); 586 goto restart; 587 } 588 } 589 } 590 if (error == 0 && clock_id == CLOCK_REALTIME && 591 (flags & TIMER_ABSTIME)) { 592 /* 593 * Don't return yet just because someone reset the 594 * system clock. Recompute the new relative time 595 * and reissue the nanosleep() call if necessary. 596 * 597 * Resetting the system clock causes all sorts of 598 * problems and the SUSV3 standards body should 599 * have made the behavior of clock_nanosleep() be 600 * implementation-defined in such a case rather than 601 * being specific about honoring the new system time. 602 * Standards bodies are filled with fools and idiots. 603 */ 604 abstime_to_reltime(clock_id, rqtp, &reltime); 605 if (reltime.tv_sec != 0 || reltime.tv_nsec != 0) 606 goto restart; 607 } 608 return (error); 609 } 610 611 #pragma weak sleep = _sleep 612 unsigned int 613 _sleep(unsigned int sec) 614 { 615 unsigned int rem = 0; 616 int error; 617 timespec_t ts; 618 timespec_t tsr; 619 620 ts.tv_sec = (time_t)sec; 621 ts.tv_nsec = 0; 622 PROLOGUE 623 error = __nanosleep(&ts, &tsr); 624 EPILOGUE 625 if (error == EINTR) { 626 rem = (unsigned int)tsr.tv_sec; 627 if (tsr.tv_nsec >= NANOSEC / 2) 628 rem++; 629 } 630 return (rem); 631 } 632 633 #pragma weak usleep = _usleep 634 int 635 _usleep(useconds_t usec) 636 { 637 timespec_t ts; 638 639 ts.tv_sec = usec / MICROSEC; 640 ts.tv_nsec = (long)(usec % MICROSEC) * 1000; 641 PROLOGUE 642 (void) __nanosleep(&ts, NULL); 643 EPILOGUE 644 return (0); 645 } 646 647 int 648 close(int fildes) 649 { 650 extern void _aio_close(int); 651 extern int _close(int); 652 int rv; 653 654 _aio_close(fildes); 655 PERFORM(_close(fildes)) 656 } 657 658 int 659 creat(const char *path, mode_t mode) 660 { 661 extern int _creat(const char *, mode_t); 662 int rv; 663 664 PERFORM(_creat(path, mode)) 665 } 666 667 #if !defined(_LP64) 668 int 669 creat64(const char *path, mode_t mode) 670 { 671 extern int _creat64(const char *, mode_t); 672 int rv; 673 674 PERFORM(_creat64(path, mode)) 675 } 676 #endif /* !_LP64 */ 677 678 int 679 fcntl(int fildes, int cmd, ...) 680 { 681 extern int _fcntl(int, int, ...); 682 intptr_t arg; 683 int rv; 684 va_list ap; 685 686 va_start(ap, cmd); 687 arg = va_arg(ap, intptr_t); 688 va_end(ap); 689 if (cmd != F_SETLKW) 690 return (_fcntl(fildes, cmd, arg)); 691 PERFORM(_fcntl(fildes, cmd, arg)) 692 } 693 694 int 695 fsync(int fildes) 696 { 697 extern int _fsync(int); 698 int rv; 699 700 PERFORM(_fsync(fildes)) 701 } 702 703 int 704 lockf(int fildes, int function, off_t size) 705 { 706 extern int _lockf(int, int, off_t); 707 int rv; 708 709 PERFORM(_lockf(fildes, function, size)) 710 } 711 712 #if !defined(_LP64) 713 int 714 lockf64(int fildes, int function, off64_t size) 715 { 716 extern int _lockf64(int, int, off64_t); 717 int rv; 718 719 PERFORM(_lockf64(fildes, function, size)) 720 } 721 #endif /* !_LP64 */ 722 723 ssize_t 724 msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg) 725 { 726 extern ssize_t _msgrcv(int, void *, size_t, long, int); 727 ssize_t rv; 728 729 PERFORM(_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg)) 730 } 731 732 int 733 msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg) 734 { 735 extern int _msgsnd(int, const void *, size_t, int); 736 int rv; 737 738 PERFORM(_msgsnd(msqid, msgp, msgsz, msgflg)) 739 } 740 741 int 742 msync(caddr_t addr, size_t len, int flags) 743 { 744 extern int _msync(caddr_t, size_t, int); 745 int rv; 746 747 PERFORM(_msync(addr, len, flags)) 748 } 749 750 int 751 open(const char *path, int oflag, ...) 752 { 753 extern int _open(const char *, int, ...); 754 mode_t mode; 755 int rv; 756 va_list ap; 757 758 va_start(ap, oflag); 759 mode = va_arg(ap, mode_t); 760 va_end(ap); 761 PERFORM(_open(path, oflag, mode)) 762 } 763 764 #if !defined(_LP64) 765 int 766 open64(const char *path, int oflag, ...) 767 { 768 extern int _open64(const char *, int, ...); 769 mode_t mode; 770 int rv; 771 va_list ap; 772 773 va_start(ap, oflag); 774 mode = va_arg(ap, mode_t); 775 va_end(ap); 776 PERFORM(_open64(path, oflag, mode)) 777 } 778 #endif /* !_LP64 */ 779 780 int 781 pause(void) 782 { 783 extern int _pause(void); 784 int rv; 785 786 PERFORM(_pause()) 787 } 788 789 ssize_t 790 pread(int fildes, void *buf, size_t nbyte, off_t offset) 791 { 792 extern ssize_t _pread(int, void *, size_t, off_t); 793 ssize_t rv; 794 795 PERFORM(_pread(fildes, buf, nbyte, offset)) 796 } 797 798 #if !defined(_LP64) 799 ssize_t 800 pread64(int fildes, void *buf, size_t nbyte, off64_t offset) 801 { 802 extern ssize_t _pread64(int, void *, size_t, off64_t); 803 ssize_t rv; 804 805 PERFORM(_pread64(fildes, buf, nbyte, offset)) 806 } 807 #endif /* !_LP64 */ 808 809 ssize_t 810 pwrite(int fildes, const void *buf, size_t nbyte, off_t offset) 811 { 812 extern ssize_t _pwrite(int, const void *, size_t, off_t); 813 ssize_t rv; 814 815 PERFORM(_pwrite(fildes, buf, nbyte, offset)) 816 } 817 818 #if !defined(_LP64) 819 ssize_t 820 pwrite64(int fildes, const void *buf, size_t nbyte, off64_t offset) 821 { 822 extern ssize_t _pwrite64(int, const void *, size_t, off64_t); 823 ssize_t rv; 824 825 PERFORM(_pwrite64(fildes, buf, nbyte, offset)) 826 } 827 #endif /* !_LP64 */ 828 829 ssize_t 830 readv(int fildes, const struct iovec *iov, int iovcnt) 831 { 832 extern ssize_t _readv(int, const struct iovec *, int); 833 ssize_t rv; 834 835 PERFORM(_readv(fildes, iov, iovcnt)) 836 } 837 838 int 839 sigpause(int sig) 840 { 841 extern int _sigpause(int); 842 int rv; 843 844 PERFORM(_sigpause(sig)) 845 } 846 847 #pragma weak sigsuspend = _sigsuspend 848 int 849 _sigsuspend(const sigset_t *set) 850 { 851 extern int __sigsuspend(const sigset_t *); 852 int rv; 853 854 PROLOGUE_MASK(set) 855 rv = __sigsuspend(set); 856 EPILOGUE_MASK 857 return (rv); 858 } 859 860 int 861 _pollsys(struct pollfd *fds, nfds_t nfd, const timespec_t *timeout, 862 const sigset_t *sigmask) 863 { 864 extern int __pollsys(struct pollfd *, nfds_t, const timespec_t *, 865 const sigset_t *); 866 int rv; 867 868 PROLOGUE_MASK(sigmask) 869 rv = __pollsys(fds, nfd, timeout, sigmask); 870 EPILOGUE_MASK 871 return (rv); 872 } 873 874 #pragma weak sigtimedwait = _sigtimedwait 875 int 876 _sigtimedwait(const sigset_t *set, siginfo_t *infop, const timespec_t *timeout) 877 { 878 extern int __sigtimedwait(const sigset_t *, siginfo_t *, 879 const timespec_t *); 880 siginfo_t info; 881 int sig; 882 883 PROLOGUE 884 sig = __sigtimedwait(set, &info, timeout); 885 if (sig == SIGCANCEL && 886 (SI_FROMKERNEL(&info) || info.si_code == SI_LWP)) { 887 do_sigcancel(); 888 errno = EINTR; 889 sig = -1; 890 } 891 EPILOGUE 892 if (sig != -1 && infop) 893 (void) _private_memcpy(infop, &info, sizeof (*infop)); 894 return (sig); 895 } 896 897 #pragma weak sigwait = _sigwait 898 int 899 _sigwait(sigset_t *set) 900 { 901 return (_sigtimedwait(set, NULL, NULL)); 902 } 903 904 #pragma weak sigwaitinfo = _sigwaitinfo 905 int 906 _sigwaitinfo(const sigset_t *set, siginfo_t *info) 907 { 908 return (_sigtimedwait(set, info, NULL)); 909 } 910 911 #pragma weak sigqueue = _sigqueue 912 int 913 _sigqueue(pid_t pid, int signo, const union sigval value) 914 { 915 extern int __sigqueue(pid_t pid, int signo, 916 /* const union sigval */ void *value, int si_code, int block); 917 return (__sigqueue(pid, signo, value.sival_ptr, SI_QUEUE, 0)); 918 } 919 920 int 921 tcdrain(int fildes) 922 { 923 extern int _tcdrain(int); 924 int rv; 925 926 PERFORM(_tcdrain(fildes)) 927 } 928 929 pid_t 930 wait(int *stat_loc) 931 { 932 extern pid_t _wait(int *); 933 pid_t rv; 934 935 PERFORM(_wait(stat_loc)) 936 } 937 938 pid_t 939 wait3(int *statusp, int options, struct rusage *rusage) 940 { 941 extern pid_t _wait3(int *, int, struct rusage *); 942 pid_t rv; 943 944 PERFORM(_wait3(statusp, options, rusage)) 945 } 946 947 int 948 waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options) 949 { 950 extern int _waitid(idtype_t, id_t, siginfo_t *, int); 951 int rv; 952 953 PERFORM(_waitid(idtype, id, infop, options)) 954 } 955 956 /* 957 * waitpid_cancel() is a libc-private symbol for internal use 958 * where cancellation semantics is desired (see system()). 959 */ 960 #pragma weak waitpid_cancel = waitpid 961 pid_t 962 waitpid(pid_t pid, int *stat_loc, int options) 963 { 964 extern pid_t _waitpid(pid_t, int *, int); 965 pid_t rv; 966 967 PERFORM(_waitpid(pid, stat_loc, options)) 968 } 969 970 ssize_t 971 writev(int fildes, const struct iovec *iov, int iovcnt) 972 { 973 extern ssize_t _writev(int, const struct iovec *, int); 974 ssize_t rv; 975 976 PERFORM(_writev(fildes, iov, iovcnt)) 977 } 978