1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include "lint.h" 30 #include "thr_uberdata.h" 31 #include <stdarg.h> 32 #include <poll.h> 33 #include <stropts.h> 34 #include <dlfcn.h> 35 #include <wait.h> 36 #include <sys/socket.h> 37 #include <sys/uio.h> 38 #include <sys/file.h> 39 #include <sys/door.h> 40 41 /* 42 * atfork_lock protects the pthread_atfork() data structures. 43 * 44 * fork_lock does double-duty. Not only does it (and atfork_lock) 45 * serialize calls to fork() and forkall(), but it also serializes calls 46 * to thr_suspend() and thr_continue() (because fork() and forkall() also 47 * suspend and continue other threads and they want no competition). 48 * 49 * Functions called in dlopen()ed L10N objects can do anything, including 50 * call malloc() and free(). Such calls are not fork-safe when protected 51 * by an ordinary mutex that is acquired in libc's prefork processing 52 * because, with an interposed malloc library present, there would be a 53 * lock ordering violation due to the pthread_atfork() prefork function 54 * in the interposition library acquiring its malloc lock(s) before the 55 * ordinary mutex in libc being acquired by libc's prefork functions. 56 * 57 * Within libc, calls to malloc() and free() are fork-safe if the calls 58 * are made while holding no other libc locks. This covers almost all 59 * of libc's malloc() and free() calls. For those libc code paths, such 60 * as the above-mentioned L10N calls, that require serialization and that 61 * may call malloc() or free(), libc uses callout_lock_enter() to perform 62 * the serialization. This works because callout_lock is not acquired as 63 * part of running the pthread_atfork() prefork handlers (to avoid the 64 * lock ordering violation described above). Rather, it is simply 65 * reinitialized in postfork1_child() to cover the case that some 66 * now-defunct thread might have been suspended while holding it. 67 */ 68 69 void 70 fork_lock_enter(void) 71 { 72 ASSERT(curthread->ul_critical == 0); 73 (void) _private_mutex_lock(&curthread->ul_uberdata->fork_lock); 74 } 75 76 void 77 fork_lock_exit(void) 78 { 79 ASSERT(curthread->ul_critical == 0); 80 (void) _private_mutex_unlock(&curthread->ul_uberdata->fork_lock); 81 } 82 83 /* 84 * Use cancel_safe_mutex_lock() to protect against being cancelled while 85 * holding callout_lock and calling outside of libc (via L10N plugins). 86 * We will honor a pending cancellation request when callout_lock_exit() 87 * is called, by calling cancel_safe_mutex_unlock(). 88 */ 89 void 90 callout_lock_enter(void) 91 { 92 ASSERT(curthread->ul_critical == 0); 93 cancel_safe_mutex_lock(&curthread->ul_uberdata->callout_lock); 94 } 95 96 void 97 callout_lock_exit(void) 98 { 99 ASSERT(curthread->ul_critical == 0); 100 cancel_safe_mutex_unlock(&curthread->ul_uberdata->callout_lock); 101 } 102 103 #pragma weak forkx = _private_forkx 104 #pragma weak _forkx = _private_forkx 105 pid_t 106 _private_forkx(int flags) 107 { 108 ulwp_t *self = curthread; 109 uberdata_t *udp = self->ul_uberdata; 110 pid_t pid; 111 112 if (self->ul_vfork) { 113 /* 114 * We are a child of vfork(); omit all of the fork 115 * logic and go straight to the system call trap. 116 * A vfork() child of a multithreaded parent 117 * must never call fork(). 118 */ 119 if (udp->uberflags.uf_mt) { 120 errno = ENOTSUP; 121 return (-1); 122 } 123 pid = __forkx(flags); 124 if (pid == 0) { /* child */ 125 udp->pid = _private_getpid(); 126 self->ul_vfork = 0; 127 } 128 return (pid); 129 } 130 131 sigoff(self); 132 if (self->ul_fork) { 133 /* 134 * Cannot call fork() from a fork handler. 135 */ 136 sigon(self); 137 errno = EDEADLK; 138 return (-1); 139 } 140 self->ul_fork = 1; 141 142 /* 143 * The functions registered by pthread_atfork() are defined by 144 * the application and its libraries and we must not hold any 145 * internal lmutex_lock()-acquired locks while invoking them. 146 * We hold only udp->atfork_lock to protect the atfork linkages. 147 * If one of these pthread_atfork() functions attempts to fork 148 * or to call pthread_atfork(), libc will detect the error and 149 * fail the call with EDEADLK. Otherwise, the pthread_atfork() 150 * functions are free to do anything they please (except they 151 * will not receive any signals). 152 */ 153 (void) _private_mutex_lock(&udp->atfork_lock); 154 _prefork_handler(); 155 156 /* 157 * Block every other thread attempting thr_suspend() or thr_continue(). 158 */ 159 (void) _private_mutex_lock(&udp->fork_lock); 160 161 /* 162 * Block all signals. 163 * Just deferring them via sigoff() is not enough. 164 * We have to avoid taking a deferred signal in the child 165 * that was actually sent to the parent before __forkx(). 166 */ 167 block_all_signals(self); 168 169 /* 170 * This suspends all threads but this one, leaving them 171 * suspended outside of any critical regions in the library. 172 * Thus, we are assured that no lmutex_lock()-acquired library 173 * locks are held while we invoke fork() from the current thread. 174 */ 175 suspend_fork(); 176 177 pid = __forkx(flags); 178 179 if (pid == 0) { /* child */ 180 /* 181 * Clear our schedctl pointer. 182 * Discard any deferred signal that was sent to the parent. 183 * Because we blocked all signals before __forkx(), a 184 * deferred signal cannot have been taken by the child. 185 */ 186 self->ul_schedctl_called = NULL; 187 self->ul_schedctl = NULL; 188 self->ul_cursig = 0; 189 self->ul_siginfo.si_signo = 0; 190 udp->pid = _private_getpid(); 191 /* reset the library's data structures to reflect one thread */ 192 unregister_locks(); 193 postfork1_child(); 194 restore_signals(self); 195 (void) _private_mutex_unlock(&udp->fork_lock); 196 _postfork_child_handler(); 197 } else { 198 /* restart all threads that were suspended for fork() */ 199 continue_fork(0); 200 restore_signals(self); 201 (void) _private_mutex_unlock(&udp->fork_lock); 202 _postfork_parent_handler(); 203 } 204 205 (void) _private_mutex_unlock(&udp->atfork_lock); 206 self->ul_fork = 0; 207 sigon(self); 208 209 return (pid); 210 } 211 212 /* 213 * fork() is fork1() for both Posix threads and Solaris threads. 214 * The forkall() interface exists for applications that require 215 * the semantics of replicating all threads. 216 */ 217 #pragma weak fork1 = _fork 218 #pragma weak _fork1 = _fork 219 #pragma weak fork = _fork 220 pid_t 221 _fork(void) 222 { 223 return (_private_forkx(0)); 224 } 225 226 /* 227 * Much of the logic here is the same as in forkx(). 228 * See the comments in forkx(), above. 229 */ 230 #pragma weak forkallx = _private_forkallx 231 #pragma weak _forkallx = _private_forkallx 232 pid_t 233 _private_forkallx(int flags) 234 { 235 ulwp_t *self = curthread; 236 uberdata_t *udp = self->ul_uberdata; 237 pid_t pid; 238 239 if (self->ul_vfork) { 240 if (udp->uberflags.uf_mt) { 241 errno = ENOTSUP; 242 return (-1); 243 } 244 pid = __forkallx(flags); 245 if (pid == 0) { /* child */ 246 udp->pid = _private_getpid(); 247 self->ul_vfork = 0; 248 } 249 return (pid); 250 } 251 252 sigoff(self); 253 if (self->ul_fork) { 254 sigon(self); 255 errno = EDEADLK; 256 return (-1); 257 } 258 self->ul_fork = 1; 259 (void) _private_mutex_lock(&udp->atfork_lock); 260 (void) _private_mutex_lock(&udp->fork_lock); 261 block_all_signals(self); 262 suspend_fork(); 263 264 pid = __forkallx(flags); 265 266 if (pid == 0) { 267 self->ul_schedctl_called = NULL; 268 self->ul_schedctl = NULL; 269 self->ul_cursig = 0; 270 self->ul_siginfo.si_signo = 0; 271 udp->pid = _private_getpid(); 272 unregister_locks(); 273 continue_fork(1); 274 } else { 275 continue_fork(0); 276 } 277 restore_signals(self); 278 (void) _private_mutex_unlock(&udp->fork_lock); 279 (void) _private_mutex_unlock(&udp->atfork_lock); 280 self->ul_fork = 0; 281 sigon(self); 282 283 return (pid); 284 } 285 286 #pragma weak forkall = _forkall 287 pid_t 288 _forkall(void) 289 { 290 return (_private_forkallx(0)); 291 } 292 293 /* 294 * For the implementation of cancellation at cancellation points. 295 */ 296 #define PROLOGUE \ 297 { \ 298 ulwp_t *self = curthread; \ 299 int nocancel = \ 300 (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks | \ 301 self->ul_critical | self->ul_sigdefer); \ 302 int abort = 0; \ 303 if (nocancel == 0) { \ 304 self->ul_save_async = self->ul_cancel_async; \ 305 if (!self->ul_cancel_disabled) { \ 306 self->ul_cancel_async = 1; \ 307 if (self->ul_cancel_pending) \ 308 _pthread_exit(PTHREAD_CANCELED); \ 309 } \ 310 self->ul_sp = stkptr(); \ 311 } else if (self->ul_cancel_pending && \ 312 !self->ul_cancel_disabled) { \ 313 set_cancel_eintr_flag(self); \ 314 abort = 1; \ 315 } 316 317 #define EPILOGUE \ 318 if (nocancel == 0) { \ 319 self->ul_sp = 0; \ 320 self->ul_cancel_async = self->ul_save_async; \ 321 } \ 322 } 323 324 /* 325 * Perform the body of the action required by most of the cancelable 326 * function calls. The return(function_call) part is to allow the 327 * compiler to make the call be executed with tail recursion, which 328 * saves a register window on sparc and slightly (not much) improves 329 * the code for x86/x64 compilations. 330 */ 331 #define PERFORM(function_call) \ 332 PROLOGUE \ 333 if (abort) { \ 334 *self->ul_errnop = EINTR; \ 335 return (-1); \ 336 } \ 337 if (nocancel) \ 338 return (function_call); \ 339 rv = function_call; \ 340 EPILOGUE \ 341 return (rv); 342 343 /* 344 * Specialized prologue for sigsuspend() and pollsys(). 345 * These system calls pass a signal mask to the kernel. 346 * The kernel replaces the thread's signal mask with the 347 * temporary mask before the thread goes to sleep. If 348 * a signal is received, the signal handler will execute 349 * with the temporary mask, as modified by the sigaction 350 * for the particular signal. 351 * 352 * We block all signals until we reach the kernel with the 353 * temporary mask. This eliminates race conditions with 354 * setting the signal mask while signals are being posted. 355 */ 356 #define PROLOGUE_MASK(sigmask) \ 357 { \ 358 ulwp_t *self = curthread; \ 359 int nocancel = \ 360 (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks | \ 361 self->ul_critical | self->ul_sigdefer); \ 362 if (!self->ul_vfork) { \ 363 if (sigmask) { \ 364 block_all_signals(self); \ 365 self->ul_tmpmask.__sigbits[0] = sigmask->__sigbits[0]; \ 366 self->ul_tmpmask.__sigbits[1] = sigmask->__sigbits[1]; \ 367 delete_reserved_signals(&self->ul_tmpmask); \ 368 self->ul_sigsuspend = 1; \ 369 } \ 370 if (nocancel == 0) { \ 371 self->ul_save_async = self->ul_cancel_async; \ 372 if (!self->ul_cancel_disabled) { \ 373 self->ul_cancel_async = 1; \ 374 if (self->ul_cancel_pending) { \ 375 if (self->ul_sigsuspend) { \ 376 self->ul_sigsuspend = 0;\ 377 restore_signals(self); \ 378 } \ 379 _pthread_exit(PTHREAD_CANCELED);\ 380 } \ 381 } \ 382 self->ul_sp = stkptr(); \ 383 } \ 384 } 385 386 /* 387 * If a signal is taken, we return from the system call wrapper with 388 * our original signal mask restored (see code in call_user_handler()). 389 * If not (self->ul_sigsuspend is still non-zero), we must restore our 390 * original signal mask ourself. 391 */ 392 #define EPILOGUE_MASK \ 393 if (nocancel == 0) { \ 394 self->ul_sp = 0; \ 395 self->ul_cancel_async = self->ul_save_async; \ 396 } \ 397 if (self->ul_sigsuspend) { \ 398 self->ul_sigsuspend = 0; \ 399 restore_signals(self); \ 400 } \ 401 } 402 403 /* 404 * Cancellation prologue and epilogue functions, 405 * for cancellation points too complex to include here. 406 */ 407 void 408 _cancel_prologue(void) 409 { 410 ulwp_t *self = curthread; 411 412 self->ul_cancel_prologue = 413 (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks | 414 self->ul_critical | self->ul_sigdefer) != 0; 415 if (self->ul_cancel_prologue == 0) { 416 self->ul_save_async = self->ul_cancel_async; 417 if (!self->ul_cancel_disabled) { 418 self->ul_cancel_async = 1; 419 if (self->ul_cancel_pending) 420 _pthread_exit(PTHREAD_CANCELED); 421 } 422 self->ul_sp = stkptr(); 423 } else if (self->ul_cancel_pending && 424 !self->ul_cancel_disabled) { 425 set_cancel_eintr_flag(self); 426 } 427 } 428 429 void 430 _cancel_epilogue(void) 431 { 432 ulwp_t *self = curthread; 433 434 if (self->ul_cancel_prologue == 0) { 435 self->ul_sp = 0; 436 self->ul_cancel_async = self->ul_save_async; 437 } 438 } 439 440 /* 441 * Called from _thrp_join() (thr_join() is a cancellation point) 442 */ 443 int 444 lwp_wait(thread_t tid, thread_t *found) 445 { 446 int error; 447 448 PROLOGUE 449 if (abort) 450 return (EINTR); 451 while ((error = __lwp_wait(tid, found)) == EINTR && !cancel_active()) 452 continue; 453 EPILOGUE 454 return (error); 455 } 456 457 #pragma weak read = _read 458 ssize_t 459 _read(int fd, void *buf, size_t size) 460 { 461 extern ssize_t __read(int, void *, size_t); 462 ssize_t rv; 463 464 PERFORM(__read(fd, buf, size)) 465 } 466 467 #pragma weak write = _write 468 ssize_t 469 _write(int fd, const void *buf, size_t size) 470 { 471 extern ssize_t __write(int, const void *, size_t); 472 ssize_t rv; 473 474 PERFORM(__write(fd, buf, size)) 475 } 476 477 #pragma weak getmsg = _getmsg 478 int 479 _getmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr, 480 int *flagsp) 481 { 482 extern int __getmsg(int, struct strbuf *, struct strbuf *, int *); 483 int rv; 484 485 PERFORM(__getmsg(fd, ctlptr, dataptr, flagsp)) 486 } 487 488 #pragma weak getpmsg = _getpmsg 489 int 490 _getpmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr, 491 int *bandp, int *flagsp) 492 { 493 extern int __getpmsg(int, struct strbuf *, struct strbuf *, 494 int *, int *); 495 int rv; 496 497 PERFORM(__getpmsg(fd, ctlptr, dataptr, bandp, flagsp)) 498 } 499 500 #pragma weak putmsg = _putmsg 501 int 502 _putmsg(int fd, const struct strbuf *ctlptr, 503 const struct strbuf *dataptr, int flags) 504 { 505 extern int __putmsg(int, const struct strbuf *, 506 const struct strbuf *, int); 507 int rv; 508 509 PERFORM(__putmsg(fd, ctlptr, dataptr, flags)) 510 } 511 512 int 513 __xpg4_putmsg(int fd, const struct strbuf *ctlptr, 514 const struct strbuf *dataptr, int flags) 515 { 516 extern int __putmsg(int, const struct strbuf *, 517 const struct strbuf *, int); 518 int rv; 519 520 PERFORM(__putmsg(fd, ctlptr, dataptr, flags|MSG_XPG4)) 521 } 522 523 #pragma weak putpmsg = _putpmsg 524 int 525 _putpmsg(int fd, const struct strbuf *ctlptr, 526 const struct strbuf *dataptr, int band, int flags) 527 { 528 extern int __putpmsg(int, const struct strbuf *, 529 const struct strbuf *, int, int); 530 int rv; 531 532 PERFORM(__putpmsg(fd, ctlptr, dataptr, band, flags)) 533 } 534 535 int 536 __xpg4_putpmsg(int fd, const struct strbuf *ctlptr, 537 const struct strbuf *dataptr, int band, int flags) 538 { 539 extern int __putpmsg(int, const struct strbuf *, 540 const struct strbuf *, int, int); 541 int rv; 542 543 PERFORM(__putpmsg(fd, ctlptr, dataptr, band, flags|MSG_XPG4)) 544 } 545 546 #pragma weak nanosleep = _nanosleep 547 int 548 _nanosleep(const timespec_t *rqtp, timespec_t *rmtp) 549 { 550 int error; 551 552 PROLOGUE 553 error = abort? EINTR : __nanosleep(rqtp, rmtp); 554 EPILOGUE 555 if (error) { 556 errno = error; 557 return (-1); 558 } 559 return (0); 560 } 561 562 #pragma weak clock_nanosleep = _clock_nanosleep 563 int 564 _clock_nanosleep(clockid_t clock_id, int flags, 565 const timespec_t *rqtp, timespec_t *rmtp) 566 { 567 timespec_t reltime; 568 hrtime_t start; 569 hrtime_t rqlapse; 570 hrtime_t lapse; 571 int error; 572 573 switch (clock_id) { 574 case CLOCK_VIRTUAL: 575 case CLOCK_PROCESS_CPUTIME_ID: 576 case CLOCK_THREAD_CPUTIME_ID: 577 return (ENOTSUP); 578 case CLOCK_REALTIME: 579 case CLOCK_HIGHRES: 580 break; 581 default: 582 return (EINVAL); 583 } 584 if (flags & TIMER_ABSTIME) { 585 abstime_to_reltime(clock_id, rqtp, &reltime); 586 rmtp = NULL; 587 } else { 588 reltime = *rqtp; 589 if (clock_id == CLOCK_HIGHRES) 590 start = gethrtime(); 591 } 592 restart: 593 PROLOGUE 594 error = abort? EINTR : __nanosleep(&reltime, rmtp); 595 EPILOGUE 596 if (error == 0 && clock_id == CLOCK_HIGHRES) { 597 /* 598 * Don't return yet if we didn't really get a timeout. 599 * This can happen if we return because someone resets 600 * the system clock. 601 */ 602 if (flags & TIMER_ABSTIME) { 603 if ((hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC + 604 rqtp->tv_nsec > gethrtime()) { 605 abstime_to_reltime(clock_id, rqtp, &reltime); 606 goto restart; 607 } 608 } else { 609 rqlapse = (hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC + 610 rqtp->tv_nsec; 611 lapse = gethrtime() - start; 612 if (rqlapse > lapse) { 613 hrt2ts(rqlapse - lapse, &reltime); 614 goto restart; 615 } 616 } 617 } 618 if (error == 0 && clock_id == CLOCK_REALTIME && 619 (flags & TIMER_ABSTIME)) { 620 /* 621 * Don't return yet just because someone reset the 622 * system clock. Recompute the new relative time 623 * and reissue the nanosleep() call if necessary. 624 * 625 * Resetting the system clock causes all sorts of 626 * problems and the SUSV3 standards body should 627 * have made the behavior of clock_nanosleep() be 628 * implementation-defined in such a case rather than 629 * being specific about honoring the new system time. 630 * Standards bodies are filled with fools and idiots. 631 */ 632 abstime_to_reltime(clock_id, rqtp, &reltime); 633 if (reltime.tv_sec != 0 || reltime.tv_nsec != 0) 634 goto restart; 635 } 636 return (error); 637 } 638 639 #pragma weak sleep = _sleep 640 unsigned int 641 _sleep(unsigned int sec) 642 { 643 unsigned int rem = 0; 644 timespec_t ts; 645 timespec_t tsr; 646 647 ts.tv_sec = (time_t)sec; 648 ts.tv_nsec = 0; 649 if (_nanosleep(&ts, &tsr) == -1 && errno == EINTR) { 650 rem = (unsigned int)tsr.tv_sec; 651 if (tsr.tv_nsec >= NANOSEC / 2) 652 rem++; 653 } 654 return (rem); 655 } 656 657 #pragma weak usleep = _usleep 658 int 659 _usleep(useconds_t usec) 660 { 661 timespec_t ts; 662 663 ts.tv_sec = usec / MICROSEC; 664 ts.tv_nsec = (long)(usec % MICROSEC) * 1000; 665 (void) _nanosleep(&ts, NULL); 666 return (0); 667 } 668 669 #pragma weak close = _close 670 int 671 _close(int fildes) 672 { 673 extern void _aio_close(int); 674 extern int __close(int); 675 int rv; 676 677 _aio_close(fildes); 678 PERFORM(__close(fildes)) 679 } 680 681 #pragma weak creat = _creat 682 int 683 _creat(const char *path, mode_t mode) 684 { 685 extern int __creat(const char *, mode_t); 686 int rv; 687 688 PERFORM(__creat(path, mode)) 689 } 690 691 #if !defined(_LP64) 692 #pragma weak creat64 = _creat64 693 int 694 _creat64(const char *path, mode_t mode) 695 { 696 extern int __creat64(const char *, mode_t); 697 int rv; 698 699 PERFORM(__creat64(path, mode)) 700 } 701 #endif /* !_LP64 */ 702 703 #pragma weak door_call = _door_call 704 int 705 _door_call(int d, door_arg_t *params) 706 { 707 extern int __door_call(int, door_arg_t *); 708 int rv; 709 710 PERFORM(__door_call(d, params)) 711 } 712 713 #pragma weak fcntl = _fcntl 714 int 715 _fcntl(int fildes, int cmd, ...) 716 { 717 extern int __fcntl(int, int, ...); 718 intptr_t arg; 719 int rv; 720 va_list ap; 721 722 va_start(ap, cmd); 723 arg = va_arg(ap, intptr_t); 724 va_end(ap); 725 if (cmd != F_SETLKW) 726 return (__fcntl(fildes, cmd, arg)); 727 PERFORM(__fcntl(fildes, cmd, arg)) 728 } 729 730 #pragma weak fdatasync = _fdatasync 731 int 732 _fdatasync(int fildes) 733 { 734 extern int __fdsync(int, int); 735 int rv; 736 737 PERFORM(__fdsync(fildes, FDSYNC)) 738 } 739 740 #pragma weak fsync = _fsync 741 int 742 _fsync(int fildes) 743 { 744 extern int __fdsync(int, int); 745 int rv; 746 747 PERFORM(__fdsync(fildes, FSYNC)) 748 } 749 750 #pragma weak lockf = _lockf 751 int 752 _lockf(int fildes, int function, off_t size) 753 { 754 extern int __lockf(int, int, off_t); 755 int rv; 756 757 PERFORM(__lockf(fildes, function, size)) 758 } 759 760 #if !defined(_LP64) 761 #pragma weak lockf64 = _lockf64 762 int 763 _lockf64(int fildes, int function, off64_t size) 764 { 765 extern int __lockf64(int, int, off64_t); 766 int rv; 767 768 PERFORM(__lockf64(fildes, function, size)) 769 } 770 #endif /* !_LP64 */ 771 772 #pragma weak msgrcv = _msgrcv 773 ssize_t 774 _msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg) 775 { 776 extern ssize_t __msgrcv(int, void *, size_t, long, int); 777 ssize_t rv; 778 779 PERFORM(__msgrcv(msqid, msgp, msgsz, msgtyp, msgflg)) 780 } 781 782 #pragma weak msgsnd = _msgsnd 783 int 784 _msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg) 785 { 786 extern int __msgsnd(int, const void *, size_t, int); 787 int rv; 788 789 PERFORM(__msgsnd(msqid, msgp, msgsz, msgflg)) 790 } 791 792 #pragma weak msync = _msync 793 int 794 _msync(caddr_t addr, size_t len, int flags) 795 { 796 extern int __msync(caddr_t, size_t, int); 797 int rv; 798 799 PERFORM(__msync(addr, len, flags)) 800 } 801 802 #pragma weak open = _open 803 int 804 _open(const char *path, int oflag, ...) 805 { 806 extern int __open(const char *, int, ...); 807 mode_t mode; 808 int rv; 809 va_list ap; 810 811 va_start(ap, oflag); 812 mode = va_arg(ap, mode_t); 813 va_end(ap); 814 PERFORM(__open(path, oflag, mode)) 815 } 816 817 #pragma weak openat = _openat 818 int 819 _openat(int fd, const char *path, int oflag, ...) 820 { 821 extern int __openat(int, const char *, int, ...); 822 mode_t mode; 823 int rv; 824 va_list ap; 825 826 va_start(ap, oflag); 827 mode = va_arg(ap, mode_t); 828 va_end(ap); 829 PERFORM(__openat(fd, path, oflag, mode)) 830 } 831 832 #if !defined(_LP64) 833 #pragma weak open64 = _open64 834 int 835 _open64(const char *path, int oflag, ...) 836 { 837 extern int __open64(const char *, int, ...); 838 mode_t mode; 839 int rv; 840 va_list ap; 841 842 va_start(ap, oflag); 843 mode = va_arg(ap, mode_t); 844 va_end(ap); 845 PERFORM(__open64(path, oflag, mode)) 846 } 847 848 #pragma weak openat64 = _openat64 849 int 850 _openat64(int fd, const char *path, int oflag, ...) 851 { 852 extern int __openat64(int, const char *, int, ...); 853 mode_t mode; 854 int rv; 855 va_list ap; 856 857 va_start(ap, oflag); 858 mode = va_arg(ap, mode_t); 859 va_end(ap); 860 PERFORM(__openat64(fd, path, oflag, mode)) 861 } 862 #endif /* !_LP64 */ 863 864 #pragma weak pause = _pause 865 int 866 _pause(void) 867 { 868 extern int __pause(void); 869 int rv; 870 871 PERFORM(__pause()) 872 } 873 874 #pragma weak pread = _pread 875 ssize_t 876 _pread(int fildes, void *buf, size_t nbyte, off_t offset) 877 { 878 extern ssize_t __pread(int, void *, size_t, off_t); 879 ssize_t rv; 880 881 PERFORM(__pread(fildes, buf, nbyte, offset)) 882 } 883 884 #if !defined(_LP64) 885 #pragma weak pread64 = _pread64 886 ssize_t 887 _pread64(int fildes, void *buf, size_t nbyte, off64_t offset) 888 { 889 extern ssize_t __pread64(int, void *, size_t, off64_t); 890 ssize_t rv; 891 892 PERFORM(__pread64(fildes, buf, nbyte, offset)) 893 } 894 #endif /* !_LP64 */ 895 896 #pragma weak pwrite = _pwrite 897 ssize_t 898 _pwrite(int fildes, const void *buf, size_t nbyte, off_t offset) 899 { 900 extern ssize_t __pwrite(int, const void *, size_t, off_t); 901 ssize_t rv; 902 903 PERFORM(__pwrite(fildes, buf, nbyte, offset)) 904 } 905 906 #if !defined(_LP64) 907 #pragma weak pwrite64 = _pwrite64 908 ssize_t 909 _pwrite64(int fildes, const void *buf, size_t nbyte, off64_t offset) 910 { 911 extern ssize_t __pwrite64(int, const void *, size_t, off64_t); 912 ssize_t rv; 913 914 PERFORM(__pwrite64(fildes, buf, nbyte, offset)) 915 } 916 #endif /* !_LP64 */ 917 918 #pragma weak readv = _readv 919 ssize_t 920 _readv(int fildes, const struct iovec *iov, int iovcnt) 921 { 922 extern ssize_t __readv(int, const struct iovec *, int); 923 ssize_t rv; 924 925 PERFORM(__readv(fildes, iov, iovcnt)) 926 } 927 928 #pragma weak sigpause = _sigpause 929 int 930 _sigpause(int sig) 931 { 932 extern int __sigpause(int); 933 int rv; 934 935 PERFORM(__sigpause(sig)) 936 } 937 938 #pragma weak sigsuspend = _sigsuspend 939 int 940 _sigsuspend(const sigset_t *set) 941 { 942 extern int __sigsuspend(const sigset_t *); 943 int rv; 944 945 PROLOGUE_MASK(set) 946 rv = __sigsuspend(set); 947 EPILOGUE_MASK 948 return (rv); 949 } 950 951 int 952 _pollsys(struct pollfd *fds, nfds_t nfd, const timespec_t *timeout, 953 const sigset_t *sigmask) 954 { 955 extern int __pollsys(struct pollfd *, nfds_t, const timespec_t *, 956 const sigset_t *); 957 int rv; 958 959 PROLOGUE_MASK(sigmask) 960 rv = __pollsys(fds, nfd, timeout, sigmask); 961 EPILOGUE_MASK 962 return (rv); 963 } 964 965 #pragma weak sigtimedwait = _sigtimedwait 966 int 967 _sigtimedwait(const sigset_t *set, siginfo_t *infop, const timespec_t *timeout) 968 { 969 extern int __sigtimedwait(const sigset_t *, siginfo_t *, 970 const timespec_t *); 971 siginfo_t info; 972 int sig; 973 974 PROLOGUE 975 if (abort) { 976 *self->ul_errnop = EINTR; 977 sig = -1; 978 } else { 979 sig = __sigtimedwait(set, &info, timeout); 980 if (sig == SIGCANCEL && 981 (SI_FROMKERNEL(&info) || info.si_code == SI_LWP)) { 982 do_sigcancel(); 983 *self->ul_errnop = EINTR; 984 sig = -1; 985 } 986 } 987 EPILOGUE 988 if (sig != -1 && infop) 989 (void) _private_memcpy(infop, &info, sizeof (*infop)); 990 return (sig); 991 } 992 993 #pragma weak sigwait = _sigwait 994 int 995 _sigwait(sigset_t *set) 996 { 997 return (_sigtimedwait(set, NULL, NULL)); 998 } 999 1000 #pragma weak sigwaitinfo = _sigwaitinfo 1001 int 1002 _sigwaitinfo(const sigset_t *set, siginfo_t *info) 1003 { 1004 return (_sigtimedwait(set, info, NULL)); 1005 } 1006 1007 #pragma weak sigqueue = _sigqueue 1008 int 1009 _sigqueue(pid_t pid, int signo, const union sigval value) 1010 { 1011 extern int __sigqueue(pid_t pid, int signo, 1012 /* const union sigval */ void *value, int si_code, int block); 1013 return (__sigqueue(pid, signo, value.sival_ptr, SI_QUEUE, 0)); 1014 } 1015 1016 int 1017 _so_accept(int sock, struct sockaddr *addr, uint_t *addrlen, int version) 1018 { 1019 extern int __so_accept(int, struct sockaddr *, uint_t *, int); 1020 int rv; 1021 1022 PERFORM(__so_accept(sock, addr, addrlen, version)) 1023 } 1024 1025 int 1026 _so_connect(int sock, struct sockaddr *addr, uint_t addrlen, int version) 1027 { 1028 extern int __so_connect(int, struct sockaddr *, uint_t, int); 1029 int rv; 1030 1031 PERFORM(__so_connect(sock, addr, addrlen, version)) 1032 } 1033 1034 int 1035 _so_recv(int sock, void *buf, size_t len, int flags) 1036 { 1037 extern int __so_recv(int, void *, size_t, int); 1038 int rv; 1039 1040 PERFORM(__so_recv(sock, buf, len, flags)) 1041 } 1042 1043 int 1044 _so_recvfrom(int sock, void *buf, size_t len, int flags, 1045 struct sockaddr *addr, int *addrlen) 1046 { 1047 extern int __so_recvfrom(int, void *, size_t, int, 1048 struct sockaddr *, int *); 1049 int rv; 1050 1051 PERFORM(__so_recvfrom(sock, buf, len, flags, addr, addrlen)) 1052 } 1053 1054 int 1055 _so_recvmsg(int sock, struct msghdr *msg, int flags) 1056 { 1057 extern int __so_recvmsg(int, struct msghdr *, int); 1058 int rv; 1059 1060 PERFORM(__so_recvmsg(sock, msg, flags)) 1061 } 1062 1063 int 1064 _so_send(int sock, const void *buf, size_t len, int flags) 1065 { 1066 extern int __so_send(int, const void *, size_t, int); 1067 int rv; 1068 1069 PERFORM(__so_send(sock, buf, len, flags)) 1070 } 1071 1072 int 1073 _so_sendmsg(int sock, const struct msghdr *msg, int flags) 1074 { 1075 extern int __so_sendmsg(int, const struct msghdr *, int); 1076 int rv; 1077 1078 PERFORM(__so_sendmsg(sock, msg, flags)) 1079 } 1080 1081 int 1082 _so_sendto(int sock, const void *buf, size_t len, int flags, 1083 const struct sockaddr *addr, int *addrlen) 1084 { 1085 extern int __so_sendto(int, const void *, size_t, int, 1086 const struct sockaddr *, int *); 1087 int rv; 1088 1089 PERFORM(__so_sendto(sock, buf, len, flags, addr, addrlen)) 1090 } 1091 1092 #pragma weak tcdrain = _tcdrain 1093 int 1094 _tcdrain(int fildes) 1095 { 1096 extern int __tcdrain(int); 1097 int rv; 1098 1099 PERFORM(__tcdrain(fildes)) 1100 } 1101 1102 #pragma weak waitid = _waitid 1103 int 1104 _waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options) 1105 { 1106 extern int __waitid(idtype_t, id_t, siginfo_t *, int); 1107 int rv; 1108 1109 if (options & WNOHANG) 1110 return (__waitid(idtype, id, infop, options)); 1111 PERFORM(__waitid(idtype, id, infop, options)) 1112 } 1113 1114 #pragma weak writev = _writev 1115 ssize_t 1116 _writev(int fildes, const struct iovec *iov, int iovcnt) 1117 { 1118 extern ssize_t __writev(int, const struct iovec *, int); 1119 ssize_t rv; 1120 1121 PERFORM(__writev(fildes, iov, iovcnt)) 1122 } 1123