1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2005, David Xu <davidxu@freebsd.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include "namespace.h" 30 #include <sys/param.h> 31 #include <sys/auxv.h> 32 #include <sys/elf.h> 33 #include <sys/signalvar.h> 34 #include <sys/syscall.h> 35 #include <signal.h> 36 #include <errno.h> 37 #include <stdlib.h> 38 #include <string.h> 39 #include <pthread.h> 40 #include "un-namespace.h" 41 #include "libc_private.h" 42 43 #include "libc_private.h" 44 #include "thr_private.h" 45 46 /* #define DEBUG_SIGNAL */ 47 #ifdef DEBUG_SIGNAL 48 #define DBG_MSG stdout_debug 49 #else 50 #define DBG_MSG(x...) 51 #endif 52 53 struct usigaction { 54 struct sigaction sigact; 55 struct urwlock lock; 56 }; 57 58 static struct usigaction _thr_sigact[_SIG_MAXSIG]; 59 60 static inline struct usigaction * 61 __libc_sigaction_slot(int signo) 62 { 63 64 return (&_thr_sigact[signo - 1]); 65 } 66 67 static void thr_sighandler(int, siginfo_t *, void *); 68 static void handle_signal(struct sigaction *, int, siginfo_t *, ucontext_t *); 69 static void check_deferred_signal(struct pthread *); 70 static void check_suspend(struct pthread *); 71 static void check_cancel(struct pthread *curthread, ucontext_t *ucp); 72 73 int _sigtimedwait(const sigset_t *set, siginfo_t *info, 74 const struct timespec * timeout); 75 int _sigwaitinfo(const sigset_t *set, siginfo_t *info); 76 int _sigwait(const sigset_t *set, int *sig); 77 int _setcontext(const ucontext_t *); 78 int _swapcontext(ucontext_t *, const ucontext_t *); 79 80 static const sigset_t _thr_deferset={{ 81 0xffffffff & ~(_SIG_BIT(SIGBUS)|_SIG_BIT(SIGILL)|_SIG_BIT(SIGFPE)| 82 _SIG_BIT(SIGSEGV)|_SIG_BIT(SIGTRAP)|_SIG_BIT(SIGSYS)), 83 0xffffffff, 84 0xffffffff, 85 0xffffffff}}; 86 87 static const sigset_t _thr_maskset={{ 88 0xffffffff, 89 0xffffffff, 90 0xffffffff, 91 0xffffffff}}; 92 93 static void 94 thr_signal_block_slow(struct pthread *curthread) 95 { 96 if (curthread->sigblock > 0) { 97 curthread->sigblock++; 98 return; 99 } 100 __sys_sigprocmask(SIG_BLOCK, &_thr_maskset, &curthread->sigmask); 101 curthread->sigblock++; 102 } 103 104 static void 105 thr_signal_unblock_slow(struct pthread *curthread) 106 { 107 if (--curthread->sigblock == 0) 108 __sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL); 109 } 110 111 static void 112 thr_signal_block_fast(struct pthread *curthread) 113 { 114 atomic_add_32(&curthread->fsigblock, SIGFASTBLOCK_INC); 115 } 116 117 static void 118 thr_signal_unblock_fast(struct pthread *curthread) 119 { 120 uint32_t oldval; 121 122 oldval = atomic_fetchadd_32(&curthread->fsigblock, -SIGFASTBLOCK_INC); 123 if (oldval == (SIGFASTBLOCK_PEND | SIGFASTBLOCK_INC)) 124 __sys_sigfastblock(SIGFASTBLOCK_UNBLOCK, NULL); 125 } 126 127 static bool fast_sigblock; 128 129 void 130 _thr_signal_block(struct pthread *curthread) 131 { 132 if (fast_sigblock) 133 thr_signal_block_fast(curthread); 134 else 135 thr_signal_block_slow(curthread); 136 } 137 138 void 139 _thr_signal_unblock(struct pthread *curthread) 140 { 141 if (fast_sigblock) 142 thr_signal_unblock_fast(curthread); 143 else 144 thr_signal_unblock_slow(curthread); 145 } 146 147 void 148 _thr_signal_block_check_fast(void) 149 { 150 int bsdflags, error; 151 152 error = elf_aux_info(AT_BSDFLAGS, &bsdflags, sizeof(bsdflags)); 153 if (error != 0) 154 return; 155 fast_sigblock = (bsdflags & ELF_BSDF_SIGFASTBLK) != 0; 156 } 157 158 void 159 _thr_signal_block_setup(struct pthread *curthread) 160 { 161 if (!fast_sigblock) 162 return; 163 __sys_sigfastblock(SIGFASTBLOCK_SETPTR, &curthread->fsigblock); 164 } 165 166 int 167 _thr_send_sig(struct pthread *thread, int sig) 168 { 169 return thr_kill(thread->tid, sig); 170 } 171 172 static inline void 173 remove_thr_signals(sigset_t *set) 174 { 175 if (SIGISMEMBER(*set, SIGCANCEL)) 176 SIGDELSET(*set, SIGCANCEL); 177 } 178 179 static const sigset_t * 180 thr_remove_thr_signals(const sigset_t *set, sigset_t *newset) 181 { 182 *newset = *set; 183 remove_thr_signals(newset); 184 return (newset); 185 } 186 187 static void 188 sigcancel_handler(int sig __unused, 189 siginfo_t *info __unused, ucontext_t *ucp) 190 { 191 struct pthread *curthread = _get_curthread(); 192 int err; 193 194 if (THR_IN_CRITICAL(curthread)) 195 return; 196 err = errno; 197 check_suspend(curthread); 198 check_cancel(curthread, ucp); 199 errno = err; 200 } 201 202 typedef void (*ohandler)(int sig, int code, struct sigcontext *scp, 203 char *addr, __sighandler_t *catcher); 204 205 /* 206 * The signal handler wrapper is entered with all signal masked. 207 */ 208 static void 209 thr_sighandler(int sig, siginfo_t *info, void *_ucp) 210 { 211 struct pthread *curthread; 212 ucontext_t *ucp; 213 struct sigaction act; 214 struct usigaction *usa; 215 int err; 216 217 err = errno; 218 curthread = _get_curthread(); 219 ucp = _ucp; 220 usa = __libc_sigaction_slot(sig); 221 _thr_rwl_rdlock(&usa->lock); 222 act = usa->sigact; 223 _thr_rwl_unlock(&usa->lock); 224 errno = err; 225 curthread->deferred_run = 0; 226 227 /* 228 * if a thread is in critical region, for example it holds low level locks, 229 * try to defer the signal processing, however if the signal is synchronous 230 * signal, it means a bad thing has happened, this is a programming error, 231 * resuming fault point can not help anything (normally causes deadloop), 232 * so here we let user code handle it immediately. 233 */ 234 if (THR_IN_CRITICAL(curthread) && SIGISMEMBER(_thr_deferset, sig)) { 235 memcpy(&curthread->deferred_sigact, &act, sizeof(struct sigaction)); 236 memcpy(&curthread->deferred_siginfo, info, sizeof(siginfo_t)); 237 curthread->deferred_sigmask = ucp->uc_sigmask; 238 /* mask all signals, we will restore it later. */ 239 ucp->uc_sigmask = _thr_deferset; 240 return; 241 } 242 243 handle_signal(&act, sig, info, ucp); 244 } 245 246 static void 247 handle_signal(struct sigaction *actp, int sig, siginfo_t *info, ucontext_t *ucp) 248 { 249 struct pthread *curthread = _get_curthread(); 250 ucontext_t uc2; 251 __siginfohandler_t *sigfunc; 252 int cancel_point; 253 int cancel_async; 254 int cancel_enable; 255 int in_sigsuspend; 256 int err; 257 258 /* add previous level mask */ 259 SIGSETOR(actp->sa_mask, ucp->uc_sigmask); 260 261 /* add this signal's mask */ 262 if (!(actp->sa_flags & SA_NODEFER)) 263 SIGADDSET(actp->sa_mask, sig); 264 265 in_sigsuspend = curthread->in_sigsuspend; 266 curthread->in_sigsuspend = 0; 267 268 /* 269 * If thread is in deferred cancellation mode, disable cancellation 270 * in signal handler. 271 * If user signal handler calls a cancellation point function, e.g, 272 * it calls write() to write data to file, because write() is a 273 * cancellation point, the thread is immediately cancelled if 274 * cancellation is pending, to avoid this problem while thread is in 275 * deferring mode, cancellation is temporarily disabled. 276 */ 277 cancel_point = curthread->cancel_point; 278 cancel_async = curthread->cancel_async; 279 cancel_enable = curthread->cancel_enable; 280 curthread->cancel_point = 0; 281 if (!cancel_async) 282 curthread->cancel_enable = 0; 283 284 /* restore correct mask before calling user handler */ 285 __sys_sigprocmask(SIG_SETMASK, &actp->sa_mask, NULL); 286 287 sigfunc = actp->sa_sigaction; 288 289 /* 290 * We have already reset cancellation point flags, so if user's code 291 * longjmp()s out of its signal handler, wish its jmpbuf was set 292 * outside of a cancellation point, in most cases, this would be 293 * true. However, there is no way to save cancel_enable in jmpbuf, 294 * so after setjmps() returns once more, the user code may need to 295 * re-set cancel_enable flag by calling pthread_setcancelstate(). 296 */ 297 if ((actp->sa_flags & SA_SIGINFO) != 0) { 298 sigfunc(sig, info, ucp); 299 } else { 300 ((ohandler)sigfunc)(sig, info->si_code, 301 (struct sigcontext *)ucp, info->si_addr, 302 (__sighandler_t *)sigfunc); 303 } 304 err = errno; 305 306 curthread->in_sigsuspend = in_sigsuspend; 307 curthread->cancel_point = cancel_point; 308 curthread->cancel_enable = cancel_enable; 309 310 memcpy(&uc2, ucp, sizeof(uc2)); 311 SIGDELSET(uc2.uc_sigmask, SIGCANCEL); 312 313 /* reschedule cancellation */ 314 check_cancel(curthread, &uc2); 315 errno = err; 316 syscall(SYS_sigreturn, &uc2); 317 } 318 319 void 320 _thr_ast(struct pthread *curthread) 321 { 322 323 if (!THR_IN_CRITICAL(curthread)) { 324 check_deferred_signal(curthread); 325 check_suspend(curthread); 326 check_cancel(curthread, NULL); 327 } 328 } 329 330 /* reschedule cancellation */ 331 static void 332 check_cancel(struct pthread *curthread, ucontext_t *ucp) 333 { 334 335 if (__predict_true(!curthread->cancel_pending || 336 !curthread->cancel_enable || curthread->no_cancel)) 337 return; 338 339 /* 340 * Otherwise, we are in defer mode, and we are at 341 * cancel point, tell kernel to not block the current 342 * thread on next cancelable system call. 343 * 344 * There are three cases we should call thr_wake() to 345 * turn on TDP_WAKEUP or send SIGCANCEL in kernel: 346 * 1) we are going to call a cancelable system call, 347 * non-zero cancel_point means we are already in 348 * cancelable state, next system call is cancelable. 349 * 2) because _thr_ast() may be called by 350 * THR_CRITICAL_LEAVE() which is used by rtld rwlock 351 * and any libthr internal locks, when rtld rwlock 352 * is used, it is mostly caused by an unresolved PLT. 353 * Those routines may clear the TDP_WAKEUP flag by 354 * invoking some system calls, in those cases, we 355 * also should reenable the flag. 356 * 3) thread is in sigsuspend(), and the syscall insists 357 * on getting a signal before it agrees to return. 358 */ 359 if (curthread->cancel_point) { 360 if (curthread->in_sigsuspend && ucp) { 361 SIGADDSET(ucp->uc_sigmask, SIGCANCEL); 362 curthread->unblock_sigcancel = 1; 363 _thr_send_sig(curthread, SIGCANCEL); 364 } else 365 thr_wake(curthread->tid); 366 } else if (curthread->cancel_async) { 367 /* 368 * asynchronous cancellation mode, act upon 369 * immediately. 370 */ 371 _pthread_exit_mask(PTHREAD_CANCELED, 372 ucp? &ucp->uc_sigmask : NULL); 373 } 374 } 375 376 static void 377 check_deferred_signal(struct pthread *curthread) 378 { 379 ucontext_t *uc; 380 struct sigaction act; 381 siginfo_t info; 382 int uc_len; 383 384 if (__predict_true(curthread->deferred_siginfo.si_signo == 0 || 385 curthread->deferred_run)) 386 return; 387 388 curthread->deferred_run = 1; 389 uc_len = __getcontextx_size(); 390 uc = alloca(uc_len); 391 getcontext(uc); 392 if (curthread->deferred_siginfo.si_signo == 0) { 393 curthread->deferred_run = 0; 394 return; 395 } 396 __fillcontextx2((char *)uc); 397 act = curthread->deferred_sigact; 398 uc->uc_sigmask = curthread->deferred_sigmask; 399 memcpy(&info, &curthread->deferred_siginfo, sizeof(siginfo_t)); 400 /* remove signal */ 401 curthread->deferred_siginfo.si_signo = 0; 402 handle_signal(&act, info.si_signo, &info, uc); 403 } 404 405 static void 406 check_suspend(struct pthread *curthread) 407 { 408 uint32_t cycle; 409 410 if (__predict_true((curthread->flags & 411 (THR_FLAGS_NEED_SUSPEND | THR_FLAGS_SUSPENDED)) 412 != THR_FLAGS_NEED_SUSPEND)) 413 return; 414 if (curthread == _single_thread) 415 return; 416 if (curthread->force_exit) 417 return; 418 419 /* 420 * Blocks SIGCANCEL which other threads must send. 421 */ 422 _thr_signal_block(curthread); 423 424 /* 425 * Increase critical_count, here we don't use THR_LOCK/UNLOCK 426 * because we are leaf code, we don't want to recursively call 427 * ourself. 428 */ 429 curthread->critical_count++; 430 THR_UMUTEX_LOCK(curthread, &(curthread)->lock); 431 while ((curthread->flags & THR_FLAGS_NEED_SUSPEND) != 0) { 432 curthread->cycle++; 433 cycle = curthread->cycle; 434 435 /* Wake the thread suspending us. */ 436 _thr_umtx_wake(&curthread->cycle, INT_MAX, 0); 437 438 /* 439 * if we are from pthread_exit, we don't want to 440 * suspend, just go and die. 441 */ 442 if (curthread->state == PS_DEAD) 443 break; 444 curthread->flags |= THR_FLAGS_SUSPENDED; 445 THR_UMUTEX_UNLOCK(curthread, &(curthread)->lock); 446 _thr_umtx_wait_uint(&curthread->cycle, cycle, NULL, 0); 447 THR_UMUTEX_LOCK(curthread, &(curthread)->lock); 448 } 449 THR_UMUTEX_UNLOCK(curthread, &(curthread)->lock); 450 curthread->critical_count--; 451 452 _thr_signal_unblock(curthread); 453 } 454 455 void 456 _thr_signal_init(int dlopened) 457 { 458 struct sigaction act, nact, oact; 459 struct usigaction *usa; 460 sigset_t oldset; 461 int sig, error; 462 463 if (dlopened) { 464 __sys_sigprocmask(SIG_SETMASK, &_thr_maskset, &oldset); 465 for (sig = 1; sig <= _SIG_MAXSIG; sig++) { 466 if (sig == SIGCANCEL) 467 continue; 468 error = __sys_sigaction(sig, NULL, &oact); 469 if (error == -1 || oact.sa_handler == SIG_DFL || 470 oact.sa_handler == SIG_IGN) 471 continue; 472 usa = __libc_sigaction_slot(sig); 473 usa->sigact = oact; 474 nact = oact; 475 remove_thr_signals(&usa->sigact.sa_mask); 476 nact.sa_flags &= ~SA_NODEFER; 477 nact.sa_flags |= SA_SIGINFO; 478 nact.sa_sigaction = thr_sighandler; 479 nact.sa_mask = _thr_maskset; 480 (void)__sys_sigaction(sig, &nact, NULL); 481 } 482 __sys_sigprocmask(SIG_SETMASK, &oldset, NULL); 483 } 484 485 /* Install SIGCANCEL handler. */ 486 SIGFILLSET(act.sa_mask); 487 act.sa_flags = SA_SIGINFO; 488 act.sa_sigaction = (__siginfohandler_t *)&sigcancel_handler; 489 __sys_sigaction(SIGCANCEL, &act, NULL); 490 491 /* Unblock SIGCANCEL */ 492 SIGEMPTYSET(act.sa_mask); 493 SIGADDSET(act.sa_mask, SIGCANCEL); 494 __sys_sigprocmask(SIG_UNBLOCK, &act.sa_mask, NULL); 495 } 496 497 void 498 _thr_sigact_unload(struct dl_phdr_info *phdr_info __unused) 499 { 500 #if 0 501 struct pthread *curthread = _get_curthread(); 502 struct urwlock *rwlp; 503 struct sigaction *actp; 504 struct usigaction *usa; 505 struct sigaction kact; 506 void (*handler)(int); 507 int sig; 508 509 _thr_signal_block(curthread); 510 for (sig = 1; sig <= _SIG_MAXSIG; sig++) { 511 usa = __libc_sigaction_slot(sig); 512 actp = &usa->sigact; 513 retry: 514 handler = actp->sa_handler; 515 if (handler != SIG_DFL && handler != SIG_IGN && 516 __elf_phdr_match_addr(phdr_info, handler)) { 517 rwlp = &usa->lock; 518 _thr_rwl_wrlock(rwlp); 519 if (handler != actp->sa_handler) { 520 _thr_rwl_unlock(rwlp); 521 goto retry; 522 } 523 actp->sa_handler = SIG_DFL; 524 actp->sa_flags = SA_SIGINFO; 525 SIGEMPTYSET(actp->sa_mask); 526 if (__sys_sigaction(sig, NULL, &kact) == 0 && 527 kact.sa_handler != SIG_DFL && 528 kact.sa_handler != SIG_IGN) 529 __sys_sigaction(sig, actp, NULL); 530 _thr_rwl_unlock(rwlp); 531 } 532 } 533 _thr_signal_unblock(curthread); 534 #endif 535 } 536 537 void 538 _thr_signal_prefork(void) 539 { 540 int i; 541 542 for (i = 1; i <= _SIG_MAXSIG; ++i) 543 _thr_rwl_rdlock(&__libc_sigaction_slot(i)->lock); 544 } 545 546 void 547 _thr_signal_postfork(void) 548 { 549 int i; 550 551 for (i = 1; i <= _SIG_MAXSIG; ++i) 552 _thr_rwl_unlock(&__libc_sigaction_slot(i)->lock); 553 } 554 555 void 556 _thr_signal_postfork_child(void) 557 { 558 int i; 559 560 for (i = 1; i <= _SIG_MAXSIG; ++i) { 561 bzero(&__libc_sigaction_slot(i) -> lock, 562 sizeof(struct urwlock)); 563 } 564 } 565 566 void 567 _thr_signal_deinit(void) 568 { 569 } 570 571 int 572 __thr_sigaction(int sig, const struct sigaction *act, struct sigaction *oact) 573 { 574 struct sigaction newact, oldact, oldact2; 575 sigset_t oldset; 576 struct usigaction *usa; 577 int ret, err; 578 579 if (!_SIG_VALID(sig) || sig == SIGCANCEL) { 580 errno = EINVAL; 581 return (-1); 582 } 583 584 ret = 0; 585 err = 0; 586 usa = __libc_sigaction_slot(sig); 587 588 __sys_sigprocmask(SIG_SETMASK, &_thr_maskset, &oldset); 589 _thr_rwl_wrlock(&usa->lock); 590 591 if (act != NULL) { 592 oldact2 = usa->sigact; 593 newact = *act; 594 595 /* 596 * if a new sig handler is SIG_DFL or SIG_IGN, 597 * don't remove old handler from __libc_sigact[], 598 * so deferred signals still can use the handlers, 599 * multiple threads invoking sigaction itself is 600 * a race condition, so it is not a problem. 601 */ 602 if (newact.sa_handler != SIG_DFL && 603 newact.sa_handler != SIG_IGN) { 604 usa->sigact = newact; 605 remove_thr_signals(&usa->sigact.sa_mask); 606 newact.sa_flags &= ~SA_NODEFER; 607 newact.sa_flags |= SA_SIGINFO; 608 newact.sa_sigaction = thr_sighandler; 609 newact.sa_mask = _thr_maskset; /* mask all signals */ 610 } 611 ret = __sys_sigaction(sig, &newact, &oldact); 612 if (ret == -1) { 613 err = errno; 614 usa->sigact = oldact2; 615 } 616 } else if (oact != NULL) { 617 ret = __sys_sigaction(sig, NULL, &oldact); 618 err = errno; 619 } 620 621 if (oldact.sa_handler != SIG_DFL && oldact.sa_handler != SIG_IGN) { 622 if (act != NULL) 623 oldact = oldact2; 624 else if (oact != NULL) 625 oldact = usa->sigact; 626 } 627 628 _thr_rwl_unlock(&usa->lock); 629 __sys_sigprocmask(SIG_SETMASK, &oldset, NULL); 630 631 if (ret == 0) { 632 if (oact != NULL) 633 *oact = oldact; 634 } else { 635 errno = err; 636 } 637 return (ret); 638 } 639 640 int 641 __thr_sigprocmask(int how, const sigset_t *set, sigset_t *oset) 642 { 643 const sigset_t *p = set; 644 sigset_t newset; 645 646 if (how != SIG_UNBLOCK) { 647 if (set != NULL) { 648 newset = *set; 649 SIGDELSET(newset, SIGCANCEL); 650 p = &newset; 651 } 652 } 653 return (__sys_sigprocmask(how, p, oset)); 654 } 655 656 __weak_reference(_thr_sigmask, pthread_sigmask); 657 __weak_reference(_thr_sigmask, _pthread_sigmask); 658 659 int 660 _thr_sigmask(int how, const sigset_t *set, sigset_t *oset) 661 { 662 663 if (__thr_sigprocmask(how, set, oset)) 664 return (errno); 665 return (0); 666 } 667 668 int 669 _sigsuspend(const sigset_t * set) 670 { 671 sigset_t newset; 672 673 return (__sys_sigsuspend(thr_remove_thr_signals(set, &newset))); 674 } 675 676 int 677 __thr_sigsuspend(const sigset_t * set) 678 { 679 struct pthread *curthread; 680 sigset_t newset; 681 int ret, old; 682 683 curthread = _get_curthread(); 684 685 old = curthread->in_sigsuspend; 686 curthread->in_sigsuspend = 1; 687 _thr_cancel_enter(curthread); 688 ret = __sys_sigsuspend(thr_remove_thr_signals(set, &newset)); 689 _thr_cancel_leave(curthread, 1); 690 curthread->in_sigsuspend = old; 691 if (curthread->unblock_sigcancel) { 692 curthread->unblock_sigcancel = 0; 693 SIGEMPTYSET(newset); 694 SIGADDSET(newset, SIGCANCEL); 695 __sys_sigprocmask(SIG_UNBLOCK, &newset, NULL); 696 } 697 698 return (ret); 699 } 700 701 int 702 _sigtimedwait(const sigset_t *set, siginfo_t *info, 703 const struct timespec * timeout) 704 { 705 sigset_t newset; 706 707 return (__sys_sigtimedwait(thr_remove_thr_signals(set, &newset), info, 708 timeout)); 709 } 710 711 /* 712 * Cancellation behavior: 713 * Thread may be canceled at start, if thread got signal, 714 * it is not canceled. 715 */ 716 int 717 __thr_sigtimedwait(const sigset_t *set, siginfo_t *info, 718 const struct timespec * timeout) 719 { 720 struct pthread *curthread = _get_curthread(); 721 sigset_t newset; 722 int ret; 723 724 _thr_cancel_enter(curthread); 725 ret = __sys_sigtimedwait(thr_remove_thr_signals(set, &newset), info, 726 timeout); 727 _thr_cancel_leave(curthread, (ret == -1)); 728 return (ret); 729 } 730 731 int 732 _sigwaitinfo(const sigset_t *set, siginfo_t *info) 733 { 734 sigset_t newset; 735 736 return (__sys_sigwaitinfo(thr_remove_thr_signals(set, &newset), info)); 737 } 738 739 /* 740 * Cancellation behavior: 741 * Thread may be canceled at start, if thread got signal, 742 * it is not canceled. 743 */ 744 int 745 __thr_sigwaitinfo(const sigset_t *set, siginfo_t *info) 746 { 747 struct pthread *curthread = _get_curthread(); 748 sigset_t newset; 749 int ret; 750 751 _thr_cancel_enter(curthread); 752 ret = __sys_sigwaitinfo(thr_remove_thr_signals(set, &newset), info); 753 _thr_cancel_leave(curthread, ret == -1); 754 return (ret); 755 } 756 757 int 758 _sigwait(const sigset_t *set, int *sig) 759 { 760 sigset_t newset; 761 762 return (__sys_sigwait(thr_remove_thr_signals(set, &newset), sig)); 763 } 764 765 /* 766 * Cancellation behavior: 767 * Thread may be canceled at start, if thread got signal, 768 * it is not canceled. 769 */ 770 int 771 __thr_sigwait(const sigset_t *set, int *sig) 772 { 773 struct pthread *curthread = _get_curthread(); 774 sigset_t newset; 775 int ret; 776 777 do { 778 _thr_cancel_enter(curthread); 779 ret = __sys_sigwait(thr_remove_thr_signals(set, &newset), sig); 780 _thr_cancel_leave(curthread, (ret != 0)); 781 } while (ret == EINTR); 782 return (ret); 783 } 784 785 int 786 __thr_setcontext(const ucontext_t *ucp) 787 { 788 ucontext_t uc; 789 790 if (ucp == NULL) { 791 errno = EINVAL; 792 return (-1); 793 } 794 if (!SIGISMEMBER(ucp->uc_sigmask, SIGCANCEL)) 795 return (__sys_setcontext(ucp)); 796 (void) memcpy(&uc, ucp, sizeof(uc)); 797 SIGDELSET(uc.uc_sigmask, SIGCANCEL); 798 return (__sys_setcontext(&uc)); 799 } 800 801 int 802 __thr_swapcontext(ucontext_t *oucp, const ucontext_t *ucp) 803 { 804 ucontext_t uc; 805 806 if (oucp == NULL || ucp == NULL) { 807 errno = EINVAL; 808 return (-1); 809 } 810 if (SIGISMEMBER(ucp->uc_sigmask, SIGCANCEL)) { 811 (void) memcpy(&uc, ucp, sizeof(uc)); 812 SIGDELSET(uc.uc_sigmask, SIGCANCEL); 813 ucp = &uc; 814 } 815 return (__sys_swapcontext(oucp, ucp)); 816 } 817