1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include "lint.h" 30 #include "thr_uberdata.h" 31 #include <stdarg.h> 32 #include <poll.h> 33 #include <stropts.h> 34 #include <dlfcn.h> 35 #include <sys/uio.h> 36 37 /* 38 * fork_lock is special -- We can't use lmutex_lock() (and thereby enter 39 * a critical region) because the second thread to reach this point would 40 * become unstoppable and the first thread would hang waiting for the 41 * second thread to stop itself. Therefore we don't use lmutex_lock() in 42 * fork_lock_enter(), but we do defer signals (the other form of concurrency). 43 * 44 * fork_lock_enter() does triple-duty. Not only does it serialize 45 * calls to fork() and forkall(), but it also serializes calls to 46 * thr_suspend() (fork() and forkall() also suspend other threads), 47 * and furthermore it serializes I18N calls to functions in other 48 * dlopen()ed L10N objects that might be calling malloc()/free(). 49 */ 50 51 static void 52 fork_lock_error(const char *who) 53 { 54 char msg[200]; 55 56 (void) strlcpy(msg, "deadlock condition: ", sizeof (msg)); 57 (void) strlcat(msg, who, sizeof (msg)); 58 (void) strlcat(msg, "() called from a fork handler", sizeof (msg)); 59 thread_error(msg); 60 } 61 62 int 63 fork_lock_enter(const char *who) 64 { 65 ulwp_t *self = curthread; 66 uberdata_t *udp = self->ul_uberdata; 67 int error = 0; 68 69 ASSERT(self->ul_critical == 0); 70 sigoff(self); 71 (void) _private_mutex_lock(&udp->fork_lock); 72 while (udp->fork_count) { 73 if (udp->fork_owner == self) { 74 /* 75 * This is like a recursive lock except that we 76 * inform the caller if we have been called from 77 * a fork handler and let it deal with that fact. 78 */ 79 if (self->ul_fork) { 80 /* 81 * We have been called from a fork handler. 82 */ 83 if (who != NULL && 84 udp->uberflags.uf_thread_error_detection) 85 fork_lock_error(who); 86 error = EDEADLK; 87 } 88 break; 89 } 90 ASSERT(self->ul_fork == 0); 91 (void) _cond_wait(&udp->fork_cond, &udp->fork_lock); 92 } 93 udp->fork_owner = self; 94 udp->fork_count++; 95 (void) _private_mutex_unlock(&udp->fork_lock); 96 return (error); 97 } 98 99 void 100 fork_lock_exit(void) 101 { 102 ulwp_t *self = curthread; 103 uberdata_t *udp = self->ul_uberdata; 104 105 ASSERT(self->ul_critical == 0); 106 (void) _private_mutex_lock(&udp->fork_lock); 107 ASSERT(udp->fork_count != 0 && udp->fork_owner == self); 108 if (--udp->fork_count == 0) { 109 udp->fork_owner = NULL; 110 (void) _cond_signal(&udp->fork_cond); 111 } 112 (void) _private_mutex_unlock(&udp->fork_lock); 113 sigon(self); 114 } 115 116 #pragma weak forkx = _private_forkx 117 #pragma weak _forkx = _private_forkx 118 static pid_t 119 _private_forkx(int flags) 120 { 121 ulwp_t *self = curthread; 122 uberdata_t *udp = self->ul_uberdata; 123 pid_t pid; 124 int error; 125 126 if (self->ul_vfork) { 127 /* 128 * We are a child of vfork(); omit all of the fork 129 * logic and go straight to the system call trap. 130 * A vfork() child of a multithreaded parent 131 * must never call fork(). 132 */ 133 if (udp->uberflags.uf_mt) { 134 errno = ENOTSUP; 135 return (-1); 136 } 137 pid = __forkx(flags); 138 if (pid == 0) { /* child */ 139 udp->pid = _private_getpid(); 140 self->ul_vfork = 0; 141 } 142 return (pid); 143 } 144 145 if ((error = fork_lock_enter("fork")) != 0) { 146 /* 147 * Cannot call fork() from a fork handler. 148 */ 149 fork_lock_exit(); 150 errno = error; 151 return (-1); 152 } 153 self->ul_fork = 1; 154 155 /* 156 * The functions registered by pthread_atfork() are defined by 157 * the application and its libraries and we must not hold any 158 * internal libc locks while invoking them. The fork_lock_enter() 159 * function serializes fork(), thr_suspend(), pthread_atfork() and 160 * dlclose() (which destroys whatever pthread_atfork() functions 161 * the library may have set up). If one of these pthread_atfork() 162 * functions attempts to fork or suspend another thread or call 163 * pthread_atfork() or dlclose a library, it will detect a deadlock 164 * in fork_lock_enter(). Otherwise, the pthread_atfork() functions 165 * are free to do anything they please (except they will not 166 * receive any signals). 167 */ 168 _prefork_handler(); 169 170 /* 171 * Block all signals. 172 * Just deferring them via sigon() is not enough. 173 * We have to avoid taking a deferred signal in the child 174 * that was actually sent to the parent before __forkx(). 175 */ 176 block_all_signals(self); 177 178 /* 179 * This suspends all threads but this one, leaving them 180 * suspended outside of any critical regions in the library. 181 * Thus, we are assured that no library locks are held 182 * while we invoke fork() from the current thread. 183 */ 184 (void) _private_mutex_lock(&udp->fork_lock); 185 suspend_fork(); 186 (void) _private_mutex_unlock(&udp->fork_lock); 187 188 pid = __forkx(flags); 189 190 if (pid == 0) { /* child */ 191 /* 192 * Clear our schedctl pointer. 193 * Discard any deferred signal that was sent to the parent. 194 * Because we blocked all signals before __forkx(), a 195 * deferred signal cannot have been taken by the child. 196 */ 197 self->ul_schedctl_called = NULL; 198 self->ul_schedctl = NULL; 199 self->ul_cursig = 0; 200 self->ul_siginfo.si_signo = 0; 201 udp->pid = _private_getpid(); 202 /* reset the library's data structures to reflect one thread */ 203 postfork1_child(); 204 restore_signals(self); 205 _postfork_child_handler(); 206 } else { 207 /* restart all threads that were suspended for fork() */ 208 continue_fork(0); 209 restore_signals(self); 210 _postfork_parent_handler(); 211 } 212 213 self->ul_fork = 0; 214 fork_lock_exit(); 215 216 return (pid); 217 } 218 219 /* 220 * fork() is fork1() for both Posix threads and Solaris threads. 221 * The forkall() interface exists for applications that require 222 * the semantics of replicating all threads. 223 */ 224 #pragma weak fork1 = _fork 225 #pragma weak _fork1 = _fork 226 #pragma weak fork = _fork 227 pid_t 228 _fork(void) 229 { 230 return (_private_forkx(0)); 231 } 232 233 /* 234 * Much of the logic here is the same as in forkx(). 235 * See the comments in forkx(), above. 236 */ 237 #pragma weak forkallx = _private_forkallx 238 #pragma weak _forkallx = _private_forkallx 239 static pid_t 240 _private_forkallx(int flags) 241 { 242 ulwp_t *self = curthread; 243 uberdata_t *udp = self->ul_uberdata; 244 pid_t pid; 245 int error; 246 247 if (self->ul_vfork) { 248 if (udp->uberflags.uf_mt) { 249 errno = ENOTSUP; 250 return (-1); 251 } 252 pid = __forkallx(flags); 253 if (pid == 0) { /* child */ 254 udp->pid = _private_getpid(); 255 self->ul_vfork = 0; 256 } 257 return (pid); 258 } 259 260 if ((error = fork_lock_enter("forkall")) != 0) { 261 fork_lock_exit(); 262 errno = error; 263 return (-1); 264 } 265 self->ul_fork = 1; 266 block_all_signals(self); 267 suspend_fork(); 268 269 pid = __forkallx(flags); 270 271 if (pid == 0) { 272 self->ul_schedctl_called = NULL; 273 self->ul_schedctl = NULL; 274 self->ul_cursig = 0; 275 self->ul_siginfo.si_signo = 0; 276 udp->pid = _private_getpid(); 277 continue_fork(1); 278 } else { 279 continue_fork(0); 280 } 281 restore_signals(self); 282 self->ul_fork = 0; 283 fork_lock_exit(); 284 285 return (pid); 286 } 287 288 #pragma weak forkall = _forkall 289 pid_t 290 _forkall(void) 291 { 292 return (_private_forkallx(0)); 293 } 294 295 /* 296 * Hacks for system calls to provide cancellation 297 * and improve java garbage collection. 298 */ 299 #define PROLOGUE \ 300 { \ 301 ulwp_t *self = curthread; \ 302 int nocancel = (self->ul_vfork | self->ul_nocancel); \ 303 if (nocancel == 0) { \ 304 self->ul_save_async = self->ul_cancel_async; \ 305 if (!self->ul_cancel_disabled) { \ 306 self->ul_cancel_async = 1; \ 307 if (self->ul_cancel_pending) \ 308 _pthread_exit(PTHREAD_CANCELED); \ 309 } \ 310 self->ul_sp = stkptr(); \ 311 } 312 313 #define EPILOGUE \ 314 if (nocancel == 0) { \ 315 self->ul_sp = 0; \ 316 self->ul_cancel_async = self->ul_save_async; \ 317 } \ 318 } 319 320 /* 321 * Perform the body of the action required by most of the cancelable 322 * function calls. The return(function_call) part is to allow the 323 * compiler to make the call be executed with tail recursion, which 324 * saves a register window on sparc and slightly (not much) improves 325 * the code for x86/x64 compilations. 326 */ 327 #define PERFORM(function_call) \ 328 PROLOGUE \ 329 if (nocancel) \ 330 return (function_call); \ 331 rv = function_call; \ 332 EPILOGUE \ 333 return (rv); 334 335 /* 336 * Specialized prologue for sigsuspend() and pollsys(). 337 * These system calls pass a signal mask to the kernel. 338 * The kernel replaces the thread's signal mask with the 339 * temporary mask before the thread goes to sleep. If 340 * a signal is received, the signal handler will execute 341 * with the temporary mask, as modified by the sigaction 342 * for the particular signal. 343 * 344 * We block all signals until we reach the kernel with the 345 * temporary mask. This eliminates race conditions with 346 * setting the signal mask while signals are being posted. 347 */ 348 #define PROLOGUE_MASK(sigmask) \ 349 { \ 350 ulwp_t *self = curthread; \ 351 int nocancel = (self->ul_vfork | self->ul_nocancel); \ 352 if (!self->ul_vfork) { \ 353 if (sigmask) { \ 354 block_all_signals(self); \ 355 self->ul_tmpmask.__sigbits[0] = sigmask->__sigbits[0]; \ 356 self->ul_tmpmask.__sigbits[1] = sigmask->__sigbits[1]; \ 357 delete_reserved_signals(&self->ul_tmpmask); \ 358 self->ul_sigsuspend = 1; \ 359 } \ 360 if (nocancel == 0) { \ 361 self->ul_save_async = self->ul_cancel_async; \ 362 if (!self->ul_cancel_disabled) { \ 363 self->ul_cancel_async = 1; \ 364 if (self->ul_cancel_pending) { \ 365 if (self->ul_sigsuspend) { \ 366 self->ul_sigsuspend = 0;\ 367 restore_signals(self); \ 368 } \ 369 _pthread_exit(PTHREAD_CANCELED);\ 370 } \ 371 } \ 372 self->ul_sp = stkptr(); \ 373 } \ 374 } 375 376 /* 377 * If a signal is taken, we return from the system call wrapper with 378 * our original signal mask restored (see code in call_user_handler()). 379 * If not (self->ul_sigsuspend is still non-zero), we must restore our 380 * original signal mask ourself. 381 */ 382 #define EPILOGUE_MASK \ 383 if (nocancel == 0) { \ 384 self->ul_sp = 0; \ 385 self->ul_cancel_async = self->ul_save_async; \ 386 } \ 387 if (self->ul_sigsuspend) { \ 388 self->ul_sigsuspend = 0; \ 389 restore_signals(self); \ 390 } \ 391 } 392 393 /* 394 * Cancellation prologue and epilogue functions, 395 * for cancellation points too complex to include here. 396 */ 397 void 398 _cancel_prologue(void) 399 { 400 ulwp_t *self = curthread; 401 402 self->ul_cancel_prologue = (self->ul_vfork | self->ul_nocancel); 403 if (self->ul_cancel_prologue == 0) { 404 self->ul_save_async = self->ul_cancel_async; 405 if (!self->ul_cancel_disabled) { 406 self->ul_cancel_async = 1; 407 if (self->ul_cancel_pending) 408 _pthread_exit(PTHREAD_CANCELED); 409 } 410 self->ul_sp = stkptr(); 411 } 412 } 413 414 void 415 _cancel_epilogue(void) 416 { 417 ulwp_t *self = curthread; 418 419 if (self->ul_cancel_prologue == 0) { 420 self->ul_sp = 0; 421 self->ul_cancel_async = self->ul_save_async; 422 } 423 } 424 425 /* 426 * Called from _thrp_join() (thr_join() is a cancellation point) 427 */ 428 int 429 lwp_wait(thread_t tid, thread_t *found) 430 { 431 int error; 432 433 PROLOGUE 434 while ((error = __lwp_wait(tid, found)) == EINTR) 435 ; 436 EPILOGUE 437 return (error); 438 } 439 440 ssize_t 441 read(int fd, void *buf, size_t size) 442 { 443 extern ssize_t _read(int, void *, size_t); 444 ssize_t rv; 445 446 PERFORM(_read(fd, buf, size)) 447 } 448 449 ssize_t 450 write(int fd, const void *buf, size_t size) 451 { 452 extern ssize_t _write(int, const void *, size_t); 453 ssize_t rv; 454 455 PERFORM(_write(fd, buf, size)) 456 } 457 458 int 459 getmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr, 460 int *flagsp) 461 { 462 extern int _getmsg(int, struct strbuf *, struct strbuf *, int *); 463 int rv; 464 465 PERFORM(_getmsg(fd, ctlptr, dataptr, flagsp)) 466 } 467 468 int 469 getpmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr, 470 int *bandp, int *flagsp) 471 { 472 extern int _getpmsg(int, struct strbuf *, struct strbuf *, 473 int *, int *); 474 int rv; 475 476 PERFORM(_getpmsg(fd, ctlptr, dataptr, bandp, flagsp)) 477 } 478 479 int 480 putmsg(int fd, const struct strbuf *ctlptr, 481 const struct strbuf *dataptr, int flags) 482 { 483 extern int _putmsg(int, const struct strbuf *, 484 const struct strbuf *, int); 485 int rv; 486 487 PERFORM(_putmsg(fd, ctlptr, dataptr, flags)) 488 } 489 490 int 491 __xpg4_putmsg(int fd, const struct strbuf *ctlptr, 492 const struct strbuf *dataptr, int flags) 493 { 494 extern int _putmsg(int, const struct strbuf *, 495 const struct strbuf *, int); 496 int rv; 497 498 PERFORM(_putmsg(fd, ctlptr, dataptr, flags|MSG_XPG4)) 499 } 500 501 int 502 putpmsg(int fd, const struct strbuf *ctlptr, 503 const struct strbuf *dataptr, int band, int flags) 504 { 505 extern int _putpmsg(int, const struct strbuf *, 506 const struct strbuf *, int, int); 507 int rv; 508 509 PERFORM(_putpmsg(fd, ctlptr, dataptr, band, flags)) 510 } 511 512 int 513 __xpg4_putpmsg(int fd, const struct strbuf *ctlptr, 514 const struct strbuf *dataptr, int band, int flags) 515 { 516 extern int _putpmsg(int, const struct strbuf *, 517 const struct strbuf *, int, int); 518 int rv; 519 520 PERFORM(_putpmsg(fd, ctlptr, dataptr, band, flags|MSG_XPG4)) 521 } 522 523 #pragma weak nanosleep = _nanosleep 524 int 525 _nanosleep(const timespec_t *rqtp, timespec_t *rmtp) 526 { 527 int error; 528 529 PROLOGUE 530 error = __nanosleep(rqtp, rmtp); 531 EPILOGUE 532 if (error) { 533 errno = error; 534 return (-1); 535 } 536 return (0); 537 } 538 539 #pragma weak clock_nanosleep = _clock_nanosleep 540 int 541 _clock_nanosleep(clockid_t clock_id, int flags, 542 const timespec_t *rqtp, timespec_t *rmtp) 543 { 544 timespec_t reltime; 545 hrtime_t start; 546 hrtime_t rqlapse; 547 hrtime_t lapse; 548 int error; 549 550 switch (clock_id) { 551 case CLOCK_VIRTUAL: 552 case CLOCK_PROCESS_CPUTIME_ID: 553 case CLOCK_THREAD_CPUTIME_ID: 554 return (ENOTSUP); 555 case CLOCK_REALTIME: 556 case CLOCK_HIGHRES: 557 break; 558 default: 559 return (EINVAL); 560 } 561 if (flags & TIMER_ABSTIME) { 562 abstime_to_reltime(clock_id, rqtp, &reltime); 563 rmtp = NULL; 564 } else { 565 reltime = *rqtp; 566 if (clock_id == CLOCK_HIGHRES) 567 start = gethrtime(); 568 } 569 restart: 570 PROLOGUE 571 error = __nanosleep(&reltime, rmtp); 572 EPILOGUE 573 if (error == 0 && clock_id == CLOCK_HIGHRES) { 574 /* 575 * Don't return yet if we didn't really get a timeout. 576 * This can happen if we return because someone resets 577 * the system clock. 578 */ 579 if (flags & TIMER_ABSTIME) { 580 if ((hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC + 581 rqtp->tv_nsec > gethrtime()) { 582 abstime_to_reltime(clock_id, rqtp, &reltime); 583 goto restart; 584 } 585 } else { 586 rqlapse = (hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC + 587 rqtp->tv_nsec; 588 lapse = gethrtime() - start; 589 if (rqlapse > lapse) { 590 hrt2ts(rqlapse - lapse, &reltime); 591 goto restart; 592 } 593 } 594 } 595 if (error == 0 && clock_id == CLOCK_REALTIME && 596 (flags & TIMER_ABSTIME)) { 597 /* 598 * Don't return yet just because someone reset the 599 * system clock. Recompute the new relative time 600 * and reissue the nanosleep() call if necessary. 601 * 602 * Resetting the system clock causes all sorts of 603 * problems and the SUSV3 standards body should 604 * have made the behavior of clock_nanosleep() be 605 * implementation-defined in such a case rather than 606 * being specific about honoring the new system time. 607 * Standards bodies are filled with fools and idiots. 608 */ 609 abstime_to_reltime(clock_id, rqtp, &reltime); 610 if (reltime.tv_sec != 0 || reltime.tv_nsec != 0) 611 goto restart; 612 } 613 return (error); 614 } 615 616 #pragma weak sleep = _sleep 617 unsigned int 618 _sleep(unsigned int sec) 619 { 620 unsigned int rem = 0; 621 int error; 622 timespec_t ts; 623 timespec_t tsr; 624 625 ts.tv_sec = (time_t)sec; 626 ts.tv_nsec = 0; 627 PROLOGUE 628 error = __nanosleep(&ts, &tsr); 629 EPILOGUE 630 if (error == EINTR) { 631 rem = (unsigned int)tsr.tv_sec; 632 if (tsr.tv_nsec >= NANOSEC / 2) 633 rem++; 634 } 635 return (rem); 636 } 637 638 #pragma weak usleep = _usleep 639 int 640 _usleep(useconds_t usec) 641 { 642 timespec_t ts; 643 644 ts.tv_sec = usec / MICROSEC; 645 ts.tv_nsec = (long)(usec % MICROSEC) * 1000; 646 PROLOGUE 647 (void) __nanosleep(&ts, NULL); 648 EPILOGUE 649 return (0); 650 } 651 652 int 653 close(int fildes) 654 { 655 extern void _aio_close(int); 656 extern int _close(int); 657 int rv; 658 659 _aio_close(fildes); 660 PERFORM(_close(fildes)) 661 } 662 663 int 664 creat(const char *path, mode_t mode) 665 { 666 extern int _creat(const char *, mode_t); 667 int rv; 668 669 PERFORM(_creat(path, mode)) 670 } 671 672 #if !defined(_LP64) 673 int 674 creat64(const char *path, mode_t mode) 675 { 676 extern int _creat64(const char *, mode_t); 677 int rv; 678 679 PERFORM(_creat64(path, mode)) 680 } 681 #endif /* !_LP64 */ 682 683 int 684 fcntl(int fildes, int cmd, ...) 685 { 686 extern int _fcntl(int, int, ...); 687 intptr_t arg; 688 int rv; 689 va_list ap; 690 691 va_start(ap, cmd); 692 arg = va_arg(ap, intptr_t); 693 va_end(ap); 694 if (cmd != F_SETLKW) 695 return (_fcntl(fildes, cmd, arg)); 696 PERFORM(_fcntl(fildes, cmd, arg)) 697 } 698 699 int 700 fsync(int fildes) 701 { 702 extern int _fsync(int); 703 int rv; 704 705 PERFORM(_fsync(fildes)) 706 } 707 708 int 709 lockf(int fildes, int function, off_t size) 710 { 711 extern int _lockf(int, int, off_t); 712 int rv; 713 714 PERFORM(_lockf(fildes, function, size)) 715 } 716 717 #if !defined(_LP64) 718 int 719 lockf64(int fildes, int function, off64_t size) 720 { 721 extern int _lockf64(int, int, off64_t); 722 int rv; 723 724 PERFORM(_lockf64(fildes, function, size)) 725 } 726 #endif /* !_LP64 */ 727 728 ssize_t 729 msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg) 730 { 731 extern ssize_t _msgrcv(int, void *, size_t, long, int); 732 ssize_t rv; 733 734 PERFORM(_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg)) 735 } 736 737 int 738 msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg) 739 { 740 extern int _msgsnd(int, const void *, size_t, int); 741 int rv; 742 743 PERFORM(_msgsnd(msqid, msgp, msgsz, msgflg)) 744 } 745 746 int 747 msync(caddr_t addr, size_t len, int flags) 748 { 749 extern int _msync(caddr_t, size_t, int); 750 int rv; 751 752 PERFORM(_msync(addr, len, flags)) 753 } 754 755 int 756 open(const char *path, int oflag, ...) 757 { 758 extern int _open(const char *, int, ...); 759 mode_t mode; 760 int rv; 761 va_list ap; 762 763 va_start(ap, oflag); 764 mode = va_arg(ap, mode_t); 765 va_end(ap); 766 PERFORM(_open(path, oflag, mode)) 767 } 768 769 #if !defined(_LP64) 770 int 771 open64(const char *path, int oflag, ...) 772 { 773 extern int _open64(const char *, int, ...); 774 mode_t mode; 775 int rv; 776 va_list ap; 777 778 va_start(ap, oflag); 779 mode = va_arg(ap, mode_t); 780 va_end(ap); 781 PERFORM(_open64(path, oflag, mode)) 782 } 783 #endif /* !_LP64 */ 784 785 int 786 pause(void) 787 { 788 extern int _pause(void); 789 int rv; 790 791 PERFORM(_pause()) 792 } 793 794 ssize_t 795 pread(int fildes, void *buf, size_t nbyte, off_t offset) 796 { 797 extern ssize_t _pread(int, void *, size_t, off_t); 798 ssize_t rv; 799 800 PERFORM(_pread(fildes, buf, nbyte, offset)) 801 } 802 803 #if !defined(_LP64) 804 ssize_t 805 pread64(int fildes, void *buf, size_t nbyte, off64_t offset) 806 { 807 extern ssize_t _pread64(int, void *, size_t, off64_t); 808 ssize_t rv; 809 810 PERFORM(_pread64(fildes, buf, nbyte, offset)) 811 } 812 #endif /* !_LP64 */ 813 814 ssize_t 815 pwrite(int fildes, const void *buf, size_t nbyte, off_t offset) 816 { 817 extern ssize_t _pwrite(int, const void *, size_t, off_t); 818 ssize_t rv; 819 820 PERFORM(_pwrite(fildes, buf, nbyte, offset)) 821 } 822 823 #if !defined(_LP64) 824 ssize_t 825 pwrite64(int fildes, const void *buf, size_t nbyte, off64_t offset) 826 { 827 extern ssize_t _pwrite64(int, const void *, size_t, off64_t); 828 ssize_t rv; 829 830 PERFORM(_pwrite64(fildes, buf, nbyte, offset)) 831 } 832 #endif /* !_LP64 */ 833 834 ssize_t 835 readv(int fildes, const struct iovec *iov, int iovcnt) 836 { 837 extern ssize_t _readv(int, const struct iovec *, int); 838 ssize_t rv; 839 840 PERFORM(_readv(fildes, iov, iovcnt)) 841 } 842 843 int 844 sigpause(int sig) 845 { 846 extern int _sigpause(int); 847 int rv; 848 849 PERFORM(_sigpause(sig)) 850 } 851 852 #pragma weak sigsuspend = _sigsuspend 853 int 854 _sigsuspend(const sigset_t *set) 855 { 856 extern int __sigsuspend(const sigset_t *); 857 int rv; 858 859 PROLOGUE_MASK(set) 860 rv = __sigsuspend(set); 861 EPILOGUE_MASK 862 return (rv); 863 } 864 865 int 866 _pollsys(struct pollfd *fds, nfds_t nfd, const timespec_t *timeout, 867 const sigset_t *sigmask) 868 { 869 extern int __pollsys(struct pollfd *, nfds_t, const timespec_t *, 870 const sigset_t *); 871 int rv; 872 873 PROLOGUE_MASK(sigmask) 874 rv = __pollsys(fds, nfd, timeout, sigmask); 875 EPILOGUE_MASK 876 return (rv); 877 } 878 879 #pragma weak sigtimedwait = _sigtimedwait 880 int 881 _sigtimedwait(const sigset_t *set, siginfo_t *infop, const timespec_t *timeout) 882 { 883 extern int __sigtimedwait(const sigset_t *, siginfo_t *, 884 const timespec_t *); 885 siginfo_t info; 886 int sig; 887 888 PROLOGUE 889 sig = __sigtimedwait(set, &info, timeout); 890 if (sig == SIGCANCEL && 891 (SI_FROMKERNEL(&info) || info.si_code == SI_LWP)) { 892 do_sigcancel(); 893 errno = EINTR; 894 sig = -1; 895 } 896 EPILOGUE 897 if (sig != -1 && infop) 898 (void) _private_memcpy(infop, &info, sizeof (*infop)); 899 return (sig); 900 } 901 902 #pragma weak sigwait = _sigwait 903 int 904 _sigwait(sigset_t *set) 905 { 906 return (_sigtimedwait(set, NULL, NULL)); 907 } 908 909 #pragma weak sigwaitinfo = _sigwaitinfo 910 int 911 _sigwaitinfo(const sigset_t *set, siginfo_t *info) 912 { 913 return (_sigtimedwait(set, info, NULL)); 914 } 915 916 #pragma weak sigqueue = _sigqueue 917 int 918 _sigqueue(pid_t pid, int signo, const union sigval value) 919 { 920 extern int __sigqueue(pid_t pid, int signo, 921 /* const union sigval */ void *value, int si_code, int block); 922 return (__sigqueue(pid, signo, value.sival_ptr, SI_QUEUE, 0)); 923 } 924 925 int 926 tcdrain(int fildes) 927 { 928 extern int _tcdrain(int); 929 int rv; 930 931 PERFORM(_tcdrain(fildes)) 932 } 933 934 pid_t 935 wait(int *stat_loc) 936 { 937 extern pid_t _wait(int *); 938 pid_t rv; 939 940 PERFORM(_wait(stat_loc)) 941 } 942 943 pid_t 944 wait3(int *statusp, int options, struct rusage *rusage) 945 { 946 extern pid_t _wait3(int *, int, struct rusage *); 947 pid_t rv; 948 949 PERFORM(_wait3(statusp, options, rusage)) 950 } 951 952 int 953 waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options) 954 { 955 extern int _waitid(idtype_t, id_t, siginfo_t *, int); 956 int rv; 957 958 PERFORM(_waitid(idtype, id, infop, options)) 959 } 960 961 /* 962 * waitpid_cancel() is a libc-private symbol for internal use 963 * where cancellation semantics is desired (see system()). 964 */ 965 #pragma weak waitpid_cancel = waitpid 966 pid_t 967 waitpid(pid_t pid, int *stat_loc, int options) 968 { 969 extern pid_t _waitpid(pid_t, int *, int); 970 pid_t rv; 971 972 PERFORM(_waitpid(pid, stat_loc, options)) 973 } 974 975 ssize_t 976 writev(int fildes, const struct iovec *iov, int iovcnt) 977 { 978 extern ssize_t _writev(int, const struct iovec *, int); 979 ssize_t rv; 980 981 PERFORM(_writev(fildes, iov, iovcnt)) 982 } 983