1 /* 2 * Copyright (c) 2005, David Xu <davidxu@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include "namespace.h" 30 #include <sys/param.h> 31 #include <sys/types.h> 32 #include <sys/signalvar.h> 33 #include <signal.h> 34 #include <errno.h> 35 #include <stdlib.h> 36 #include <string.h> 37 #include <pthread.h> 38 #include "un-namespace.h" 39 #include "libc_private.h" 40 41 #include "thr_private.h" 42 43 /* #define DEBUG_SIGNAL */ 44 #ifdef DEBUG_SIGNAL 45 #define DBG_MSG stdout_debug 46 #else 47 #define DBG_MSG(x...) 48 #endif 49 50 struct usigaction { 51 struct sigaction sigact; 52 struct urwlock lock; 53 }; 54 55 static struct usigaction _thr_sigact[_SIG_MAXSIG]; 56 57 static void thr_sighandler(int, siginfo_t *, void *); 58 static void handle_signal(struct sigaction *, int, siginfo_t *, ucontext_t *); 59 static void check_deferred_signal(struct pthread *); 60 static void check_suspend(struct pthread *); 61 static void check_cancel(struct pthread *curthread, ucontext_t *ucp); 62 63 int ___pause(void); 64 int _raise(int); 65 int __sigtimedwait(const sigset_t *set, siginfo_t *info, 66 const struct timespec * timeout); 67 int _sigtimedwait(const sigset_t *set, siginfo_t *info, 68 const struct timespec * timeout); 69 int __sigwaitinfo(const sigset_t *set, siginfo_t *info); 70 int _sigwaitinfo(const sigset_t *set, siginfo_t *info); 71 int ___sigwait(const sigset_t *set, int *sig); 72 int _sigwait(const sigset_t *set, int *sig); 73 int __sigsuspend(const sigset_t *sigmask); 74 int _sigaction(int, const struct sigaction *, struct sigaction *); 75 int _setcontext(const ucontext_t *); 76 int _swapcontext(ucontext_t *, const ucontext_t *); 77 78 static const sigset_t _thr_deferset={{ 79 0xffffffff & ~(_SIG_BIT(SIGBUS)|_SIG_BIT(SIGILL)|_SIG_BIT(SIGFPE)| 80 _SIG_BIT(SIGSEGV)|_SIG_BIT(SIGTRAP)|_SIG_BIT(SIGSYS)), 81 0xffffffff, 82 0xffffffff, 83 0xffffffff}}; 84 85 static const sigset_t _thr_maskset={{ 86 0xffffffff, 87 0xffffffff, 88 0xffffffff, 89 0xffffffff}}; 90 91 void 92 _thr_signal_block(struct pthread *curthread) 93 { 94 95 if (curthread->sigblock > 0) { 96 curthread->sigblock++; 97 return; 98 } 99 __sys_sigprocmask(SIG_BLOCK, &_thr_maskset, &curthread->sigmask); 100 curthread->sigblock++; 101 } 102 103 void 104 _thr_signal_unblock(struct pthread *curthread) 105 { 106 if (--curthread->sigblock == 0) 107 __sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL); 108 } 109 110 int 111 _thr_send_sig(struct pthread *thread, int sig) 112 { 113 return thr_kill(thread->tid, sig); 114 } 115 116 static inline void 117 remove_thr_signals(sigset_t *set) 118 { 119 if (SIGISMEMBER(*set, SIGCANCEL)) 120 SIGDELSET(*set, SIGCANCEL); 121 } 122 123 static const sigset_t * 124 thr_remove_thr_signals(const sigset_t *set, sigset_t *newset) 125 { 126 *newset = *set; 127 remove_thr_signals(newset); 128 return (newset); 129 } 130 131 static void 132 sigcancel_handler(int sig __unused, 133 siginfo_t *info __unused, ucontext_t *ucp) 134 { 135 struct pthread *curthread = _get_curthread(); 136 int err; 137 138 if (THR_IN_CRITICAL(curthread)) 139 return; 140 err = errno; 141 check_suspend(curthread); 142 check_cancel(curthread, ucp); 143 errno = err; 144 } 145 146 typedef void (*ohandler)(int sig, int code, 147 struct sigcontext *scp, char *addr, __sighandler_t *catcher); 148 149 /* 150 * The signal handler wrapper is entered with all signal masked. 151 */ 152 static void 153 thr_sighandler(int sig, siginfo_t *info, void *_ucp) 154 { 155 struct pthread *curthread = _get_curthread(); 156 ucontext_t *ucp = _ucp; 157 struct sigaction act; 158 int err; 159 160 err = errno; 161 _thr_rwl_rdlock(&_thr_sigact[sig-1].lock); 162 act = _thr_sigact[sig-1].sigact; 163 _thr_rwl_unlock(&_thr_sigact[sig-1].lock); 164 errno = err; 165 166 /* 167 * if a thread is in critical region, for example it holds low level locks, 168 * try to defer the signal processing, however if the signal is synchronous 169 * signal, it means a bad thing has happened, this is a programming error, 170 * resuming fault point can not help anything (normally causes deadloop), 171 * so here we let user code handle it immediately. 172 */ 173 if (THR_IN_CRITICAL(curthread) && SIGISMEMBER(_thr_deferset, sig)) { 174 memcpy(&curthread->deferred_sigact, &act, sizeof(struct sigaction)); 175 memcpy(&curthread->deferred_siginfo, info, sizeof(siginfo_t)); 176 curthread->deferred_sigmask = ucp->uc_sigmask; 177 /* mask all signals, we will restore it later. */ 178 ucp->uc_sigmask = _thr_deferset; 179 return; 180 } 181 182 handle_signal(&act, sig, info, ucp); 183 } 184 185 static void 186 handle_signal(struct sigaction *actp, int sig, siginfo_t *info, ucontext_t *ucp) 187 { 188 struct pthread *curthread = _get_curthread(); 189 ucontext_t uc2; 190 __siginfohandler_t *sigfunc; 191 int cancel_point; 192 int cancel_async; 193 int cancel_enable; 194 int in_sigsuspend; 195 int err; 196 197 /* add previous level mask */ 198 SIGSETOR(actp->sa_mask, ucp->uc_sigmask); 199 200 /* add this signal's mask */ 201 if (!(actp->sa_flags & SA_NODEFER)) 202 SIGADDSET(actp->sa_mask, sig); 203 204 in_sigsuspend = curthread->in_sigsuspend; 205 curthread->in_sigsuspend = 0; 206 207 /* 208 * if thread is in deferred cancellation mode, disable cancellation 209 * in signal handler. 210 * if user signal handler calls a cancellation point function, e.g, 211 * it calls write() to write data to file, because write() is a 212 * cancellation point, the thread is immediately cancelled if 213 * cancellation is pending, to avoid this problem while thread is in 214 * deferring mode, cancellation is temporarily disabled. 215 */ 216 cancel_point = curthread->cancel_point; 217 cancel_async = curthread->cancel_async; 218 cancel_enable = curthread->cancel_enable; 219 curthread->cancel_point = 0; 220 if (!cancel_async) 221 curthread->cancel_enable = 0; 222 223 /* restore correct mask before calling user handler */ 224 __sys_sigprocmask(SIG_SETMASK, &actp->sa_mask, NULL); 225 226 sigfunc = actp->sa_sigaction; 227 228 /* 229 * We have already reset cancellation point flags, so if user's code 230 * longjmp()s out of its signal handler, wish its jmpbuf was set 231 * outside of a cancellation point, in most cases, this would be 232 * true. however, ther is no way to save cancel_enable in jmpbuf, 233 * so after setjmps() returns once more, the user code may need to 234 * re-set cancel_enable flag by calling pthread_setcancelstate(). 235 */ 236 if ((actp->sa_flags & SA_SIGINFO) != 0) 237 (*(sigfunc))(sig, info, ucp); 238 else { 239 ((ohandler)(*sigfunc))( 240 sig, info->si_code, (struct sigcontext *)ucp, 241 info->si_addr, (__sighandler_t *)sigfunc); 242 } 243 err = errno; 244 245 curthread->in_sigsuspend = in_sigsuspend; 246 curthread->cancel_point = cancel_point; 247 curthread->cancel_enable = cancel_enable; 248 249 memcpy(&uc2, ucp, sizeof(uc2)); 250 SIGDELSET(uc2.uc_sigmask, SIGCANCEL); 251 252 /* reschedule cancellation */ 253 check_cancel(curthread, &uc2); 254 errno = err; 255 __sys_sigreturn(&uc2); 256 } 257 258 void 259 _thr_ast(struct pthread *curthread) 260 { 261 262 if (!THR_IN_CRITICAL(curthread)) { 263 check_deferred_signal(curthread); 264 check_suspend(curthread); 265 check_cancel(curthread, NULL); 266 } 267 } 268 269 /* reschedule cancellation */ 270 static void 271 check_cancel(struct pthread *curthread, ucontext_t *ucp) 272 { 273 274 if (__predict_true(!curthread->cancel_pending || 275 !curthread->cancel_enable || curthread->no_cancel)) 276 return; 277 278 /* 279 * Otherwise, we are in defer mode, and we are at 280 * cancel point, tell kernel to not block the current 281 * thread on next cancelable system call. 282 * 283 * There are three cases we should call thr_wake() to 284 * turn on TDP_WAKEUP or send SIGCANCEL in kernel: 285 * 1) we are going to call a cancelable system call, 286 * non-zero cancel_point means we are already in 287 * cancelable state, next system call is cancelable. 288 * 2) because _thr_ast() may be called by 289 * THR_CRITICAL_LEAVE() which is used by rtld rwlock 290 * and any libthr internal locks, when rtld rwlock 291 * is used, it is mostly caused my an unresolved PLT. 292 * those routines may clear the TDP_WAKEUP flag by 293 * invoking some system calls, in those cases, we 294 * also should reenable the flag. 295 * 3) thread is in sigsuspend(), and the syscall insists 296 * on getting a signal before it agrees to return. 297 */ 298 if (curthread->cancel_point) { 299 if (curthread->in_sigsuspend && ucp) { 300 SIGADDSET(ucp->uc_sigmask, SIGCANCEL); 301 curthread->unblock_sigcancel = 1; 302 _thr_send_sig(curthread, SIGCANCEL); 303 } else 304 thr_wake(curthread->tid); 305 } else if (curthread->cancel_async) { 306 /* 307 * asynchronous cancellation mode, act upon 308 * immediately. 309 */ 310 _pthread_exit_mask(PTHREAD_CANCELED, 311 ucp? &ucp->uc_sigmask : NULL); 312 } 313 } 314 315 static void 316 check_deferred_signal(struct pthread *curthread) 317 { 318 ucontext_t *uc; 319 struct sigaction act; 320 siginfo_t info; 321 322 if (__predict_true(curthread->deferred_siginfo.si_signo == 0)) 323 return; 324 325 #if defined(__amd64__) || defined(__i386__) 326 uc = alloca(__getcontextx_size()); 327 __fillcontextx((char *)uc); 328 #else 329 ucontext_t ucv; 330 uc = &ucv; 331 getcontext(uc); 332 #endif 333 if (curthread->deferred_siginfo.si_signo != 0) { 334 act = curthread->deferred_sigact; 335 uc->uc_sigmask = curthread->deferred_sigmask; 336 memcpy(&info, &curthread->deferred_siginfo, sizeof(siginfo_t)); 337 /* remove signal */ 338 curthread->deferred_siginfo.si_signo = 0; 339 if (act.sa_flags & SA_RESETHAND) { 340 struct sigaction tact; 341 342 tact = act; 343 tact.sa_handler = SIG_DFL; 344 _sigaction(info.si_signo, &tact, NULL); 345 } 346 handle_signal(&act, info.si_signo, &info, uc); 347 } 348 } 349 350 static void 351 check_suspend(struct pthread *curthread) 352 { 353 uint32_t cycle; 354 355 if (__predict_true((curthread->flags & 356 (THR_FLAGS_NEED_SUSPEND | THR_FLAGS_SUSPENDED)) 357 != THR_FLAGS_NEED_SUSPEND)) 358 return; 359 if (curthread == _single_thread) 360 return; 361 if (curthread->force_exit) 362 return; 363 364 /* 365 * Blocks SIGCANCEL which other threads must send. 366 */ 367 _thr_signal_block(curthread); 368 369 /* 370 * Increase critical_count, here we don't use THR_LOCK/UNLOCK 371 * because we are leaf code, we don't want to recursively call 372 * ourself. 373 */ 374 curthread->critical_count++; 375 THR_UMUTEX_LOCK(curthread, &(curthread)->lock); 376 while ((curthread->flags & (THR_FLAGS_NEED_SUSPEND | 377 THR_FLAGS_SUSPENDED)) == THR_FLAGS_NEED_SUSPEND) { 378 curthread->cycle++; 379 cycle = curthread->cycle; 380 381 /* Wake the thread suspending us. */ 382 _thr_umtx_wake(&curthread->cycle, INT_MAX, 0); 383 384 /* 385 * if we are from pthread_exit, we don't want to 386 * suspend, just go and die. 387 */ 388 if (curthread->state == PS_DEAD) 389 break; 390 curthread->flags |= THR_FLAGS_SUSPENDED; 391 THR_UMUTEX_UNLOCK(curthread, &(curthread)->lock); 392 _thr_umtx_wait_uint(&curthread->cycle, cycle, NULL, 0); 393 THR_UMUTEX_LOCK(curthread, &(curthread)->lock); 394 curthread->flags &= ~THR_FLAGS_SUSPENDED; 395 } 396 THR_UMUTEX_UNLOCK(curthread, &(curthread)->lock); 397 curthread->critical_count--; 398 399 _thr_signal_unblock(curthread); 400 } 401 402 void 403 _thr_signal_init(void) 404 { 405 struct sigaction act; 406 407 /* Install SIGCANCEL handler. */ 408 SIGFILLSET(act.sa_mask); 409 act.sa_flags = SA_SIGINFO; 410 act.sa_sigaction = (__siginfohandler_t *)&sigcancel_handler; 411 __sys_sigaction(SIGCANCEL, &act, NULL); 412 413 /* Unblock SIGCANCEL */ 414 SIGEMPTYSET(act.sa_mask); 415 SIGADDSET(act.sa_mask, SIGCANCEL); 416 __sys_sigprocmask(SIG_UNBLOCK, &act.sa_mask, NULL); 417 } 418 419 void 420 _thr_sigact_unload(struct dl_phdr_info *phdr_info) 421 { 422 #if 0 423 struct pthread *curthread = _get_curthread(); 424 struct urwlock *rwlp; 425 struct sigaction *actp; 426 struct sigaction kact; 427 void (*handler)(int); 428 int sig; 429 430 _thr_signal_block(curthread); 431 for (sig = 1; sig <= _SIG_MAXSIG; sig++) { 432 actp = &_thr_sigact[sig-1].sigact; 433 retry: 434 handler = actp->sa_handler; 435 if (handler != SIG_DFL && handler != SIG_IGN && 436 __elf_phdr_match_addr(phdr_info, handler)) { 437 rwlp = &_thr_sigact[sig-1].lock; 438 _thr_rwl_wrlock(rwlp); 439 if (handler != actp->sa_handler) { 440 _thr_rwl_unlock(rwlp); 441 goto retry; 442 } 443 actp->sa_handler = SIG_DFL; 444 actp->sa_flags = SA_SIGINFO; 445 SIGEMPTYSET(actp->sa_mask); 446 if (__sys_sigaction(sig, NULL, &kact) == 0 && 447 kact.sa_handler != SIG_DFL && 448 kact.sa_handler != SIG_IGN) 449 __sys_sigaction(sig, actp, NULL); 450 _thr_rwl_unlock(rwlp); 451 } 452 } 453 _thr_signal_unblock(curthread); 454 #endif 455 } 456 457 void 458 _thr_signal_prefork(void) 459 { 460 int i; 461 462 for (i = 1; i <= _SIG_MAXSIG; ++i) 463 _thr_rwl_rdlock(&_thr_sigact[i-1].lock); 464 } 465 466 void 467 _thr_signal_postfork(void) 468 { 469 int i; 470 471 for (i = 1; i <= _SIG_MAXSIG; ++i) 472 _thr_rwl_unlock(&_thr_sigact[i-1].lock); 473 } 474 475 void 476 _thr_signal_postfork_child(void) 477 { 478 int i; 479 480 for (i = 1; i <= _SIG_MAXSIG; ++i) 481 bzero(&_thr_sigact[i-1].lock, sizeof(struct urwlock)); 482 } 483 484 void 485 _thr_signal_deinit(void) 486 { 487 } 488 489 __weak_reference(___pause, pause); 490 491 int 492 ___pause(void) 493 { 494 sigset_t oset; 495 496 if (_sigprocmask(SIG_BLOCK, NULL, &oset) == -1) 497 return (-1); 498 return (__sigsuspend(&oset)); 499 } 500 501 __weak_reference(_raise, raise); 502 503 int 504 _raise(int sig) 505 { 506 return _thr_send_sig(_get_curthread(), sig); 507 } 508 509 __weak_reference(_sigaction, sigaction); 510 511 int 512 _sigaction(int sig, const struct sigaction * act, struct sigaction * oact) 513 { 514 struct sigaction newact, oldact, oldact2; 515 sigset_t oldset; 516 int ret = 0, err = 0; 517 518 if (!_SIG_VALID(sig) || sig == SIGCANCEL) { 519 errno = EINVAL; 520 return (-1); 521 } 522 523 if (act) 524 newact = *act; 525 526 __sys_sigprocmask(SIG_SETMASK, &_thr_maskset, &oldset); 527 _thr_rwl_wrlock(&_thr_sigact[sig-1].lock); 528 529 if (act != NULL) { 530 oldact2 = _thr_sigact[sig-1].sigact; 531 532 /* 533 * if a new sig handler is SIG_DFL or SIG_IGN, 534 * don't remove old handler from _thr_sigact[], 535 * so deferred signals still can use the handlers, 536 * multiple threads invoking sigaction itself is 537 * a race condition, so it is not a problem. 538 */ 539 if (newact.sa_handler != SIG_DFL && 540 newact.sa_handler != SIG_IGN) { 541 _thr_sigact[sig-1].sigact = newact; 542 remove_thr_signals( 543 &_thr_sigact[sig-1].sigact.sa_mask); 544 newact.sa_flags &= ~SA_NODEFER; 545 newact.sa_flags |= SA_SIGINFO; 546 newact.sa_sigaction = thr_sighandler; 547 newact.sa_mask = _thr_maskset; /* mask all signals */ 548 } 549 if ((ret = __sys_sigaction(sig, &newact, &oldact))) { 550 err = errno; 551 _thr_sigact[sig-1].sigact = oldact2; 552 } 553 } else if (oact != NULL) { 554 ret = __sys_sigaction(sig, NULL, &oldact); 555 err = errno; 556 } 557 558 if (oldact.sa_handler != SIG_DFL && 559 oldact.sa_handler != SIG_IGN) { 560 if (act != NULL) 561 oldact = oldact2; 562 else if (oact != NULL) 563 oldact = _thr_sigact[sig-1].sigact; 564 } 565 566 _thr_rwl_unlock(&_thr_sigact[sig-1].lock); 567 __sys_sigprocmask(SIG_SETMASK, &oldset, NULL); 568 569 if (ret == 0) { 570 if (oact != NULL) 571 *oact = oldact; 572 } else { 573 errno = err; 574 } 575 return (ret); 576 } 577 578 __weak_reference(_sigprocmask, sigprocmask); 579 580 int 581 _sigprocmask(int how, const sigset_t *set, sigset_t *oset) 582 { 583 const sigset_t *p = set; 584 sigset_t newset; 585 586 if (how != SIG_UNBLOCK) { 587 if (set != NULL) { 588 newset = *set; 589 SIGDELSET(newset, SIGCANCEL); 590 p = &newset; 591 } 592 } 593 return (__sys_sigprocmask(how, p, oset)); 594 } 595 596 __weak_reference(_pthread_sigmask, pthread_sigmask); 597 598 int 599 _pthread_sigmask(int how, const sigset_t *set, sigset_t *oset) 600 { 601 if (_sigprocmask(how, set, oset)) 602 return (errno); 603 return (0); 604 } 605 606 __weak_reference(__sigsuspend, sigsuspend); 607 608 int 609 _sigsuspend(const sigset_t * set) 610 { 611 sigset_t newset; 612 613 return (__sys_sigsuspend(thr_remove_thr_signals(set, &newset))); 614 } 615 616 int 617 __sigsuspend(const sigset_t * set) 618 { 619 struct pthread *curthread; 620 sigset_t newset; 621 int ret, old; 622 623 curthread = _get_curthread(); 624 625 old = curthread->in_sigsuspend; 626 curthread->in_sigsuspend = 1; 627 _thr_cancel_enter(curthread); 628 ret = __sys_sigsuspend(thr_remove_thr_signals(set, &newset)); 629 _thr_cancel_leave(curthread, 1); 630 curthread->in_sigsuspend = old; 631 if (curthread->unblock_sigcancel) { 632 curthread->unblock_sigcancel = 0; 633 SIGEMPTYSET(newset); 634 SIGADDSET(newset, SIGCANCEL); 635 __sys_sigprocmask(SIG_UNBLOCK, &newset, NULL); 636 } 637 638 return (ret); 639 } 640 641 __weak_reference(___sigwait, sigwait); 642 __weak_reference(__sigtimedwait, sigtimedwait); 643 __weak_reference(__sigwaitinfo, sigwaitinfo); 644 645 int 646 _sigtimedwait(const sigset_t *set, siginfo_t *info, 647 const struct timespec * timeout) 648 { 649 sigset_t newset; 650 651 return (__sys_sigtimedwait(thr_remove_thr_signals(set, &newset), info, 652 timeout)); 653 } 654 655 /* 656 * Cancellation behavior: 657 * Thread may be canceled at start, if thread got signal, 658 * it is not canceled. 659 */ 660 int 661 __sigtimedwait(const sigset_t *set, siginfo_t *info, 662 const struct timespec * timeout) 663 { 664 struct pthread *curthread = _get_curthread(); 665 sigset_t newset; 666 int ret; 667 668 _thr_cancel_enter(curthread); 669 ret = __sys_sigtimedwait(thr_remove_thr_signals(set, &newset), info, 670 timeout); 671 _thr_cancel_leave(curthread, (ret == -1)); 672 return (ret); 673 } 674 675 int 676 _sigwaitinfo(const sigset_t *set, siginfo_t *info) 677 { 678 sigset_t newset; 679 680 return (__sys_sigwaitinfo(thr_remove_thr_signals(set, &newset), info)); 681 } 682 683 /* 684 * Cancellation behavior: 685 * Thread may be canceled at start, if thread got signal, 686 * it is not canceled. 687 */ 688 int 689 __sigwaitinfo(const sigset_t *set, siginfo_t *info) 690 { 691 struct pthread *curthread = _get_curthread(); 692 sigset_t newset; 693 int ret; 694 695 _thr_cancel_enter(curthread); 696 ret = __sys_sigwaitinfo(thr_remove_thr_signals(set, &newset), info); 697 _thr_cancel_leave(curthread, ret == -1); 698 return (ret); 699 } 700 701 int 702 _sigwait(const sigset_t *set, int *sig) 703 { 704 sigset_t newset; 705 706 return (__sys_sigwait(thr_remove_thr_signals(set, &newset), sig)); 707 } 708 709 /* 710 * Cancellation behavior: 711 * Thread may be canceled at start, if thread got signal, 712 * it is not canceled. 713 */ 714 int 715 ___sigwait(const sigset_t *set, int *sig) 716 { 717 struct pthread *curthread = _get_curthread(); 718 sigset_t newset; 719 int ret; 720 721 do { 722 _thr_cancel_enter(curthread); 723 ret = __sys_sigwait(thr_remove_thr_signals(set, &newset), sig); 724 _thr_cancel_leave(curthread, (ret != 0)); 725 } while (ret == EINTR); 726 return (ret); 727 } 728 729 __weak_reference(_setcontext, setcontext); 730 int 731 _setcontext(const ucontext_t *ucp) 732 { 733 ucontext_t uc; 734 735 (void) memcpy(&uc, ucp, sizeof(uc)); 736 remove_thr_signals(&uc.uc_sigmask); 737 return __sys_setcontext(&uc); 738 } 739 740 __weak_reference(__sys_swapcontext, swapcontext); 741