1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2005, David Xu <davidxu@freebsd.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "namespace.h" 33 #include <sys/param.h> 34 #include <sys/auxv.h> 35 #include <sys/elf.h> 36 #include <sys/signalvar.h> 37 #include <sys/syscall.h> 38 #include <signal.h> 39 #include <errno.h> 40 #include <stdlib.h> 41 #include <string.h> 42 #include <pthread.h> 43 #include "un-namespace.h" 44 #include "libc_private.h" 45 46 #include "libc_private.h" 47 #include "thr_private.h" 48 49 /* #define DEBUG_SIGNAL */ 50 #ifdef DEBUG_SIGNAL 51 #define DBG_MSG stdout_debug 52 #else 53 #define DBG_MSG(x...) 54 #endif 55 56 struct usigaction { 57 struct sigaction sigact; 58 struct urwlock lock; 59 }; 60 61 static struct usigaction _thr_sigact[_SIG_MAXSIG]; 62 63 static inline struct usigaction * 64 __libc_sigaction_slot(int signo) 65 { 66 67 return (&_thr_sigact[signo - 1]); 68 } 69 70 static void thr_sighandler(int, siginfo_t *, void *); 71 static void handle_signal(struct sigaction *, int, siginfo_t *, ucontext_t *); 72 static void check_deferred_signal(struct pthread *); 73 static void check_suspend(struct pthread *); 74 static void check_cancel(struct pthread *curthread, ucontext_t *ucp); 75 76 int _sigtimedwait(const sigset_t *set, siginfo_t *info, 77 const struct timespec * timeout); 78 int _sigwaitinfo(const sigset_t *set, siginfo_t *info); 79 int _sigwait(const sigset_t *set, int *sig); 80 int _setcontext(const ucontext_t *); 81 int _swapcontext(ucontext_t *, const ucontext_t *); 82 83 static const sigset_t _thr_deferset={{ 84 0xffffffff & ~(_SIG_BIT(SIGBUS)|_SIG_BIT(SIGILL)|_SIG_BIT(SIGFPE)| 85 _SIG_BIT(SIGSEGV)|_SIG_BIT(SIGTRAP)|_SIG_BIT(SIGSYS)), 86 0xffffffff, 87 0xffffffff, 88 0xffffffff}}; 89 90 static const sigset_t _thr_maskset={{ 91 0xffffffff, 92 0xffffffff, 93 0xffffffff, 94 0xffffffff}}; 95 96 static void 97 thr_signal_block_slow(struct pthread *curthread) 98 { 99 if (curthread->sigblock > 0) { 100 curthread->sigblock++; 101 return; 102 } 103 __sys_sigprocmask(SIG_BLOCK, &_thr_maskset, &curthread->sigmask); 104 curthread->sigblock++; 105 } 106 107 static void 108 thr_signal_unblock_slow(struct pthread *curthread) 109 { 110 if (--curthread->sigblock == 0) 111 __sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL); 112 } 113 114 static void 115 thr_signal_block_fast(struct pthread *curthread) 116 { 117 atomic_add_32(&curthread->fsigblock, SIGFASTBLOCK_INC); 118 } 119 120 static void 121 thr_signal_unblock_fast(struct pthread *curthread) 122 { 123 uint32_t oldval; 124 125 oldval = atomic_fetchadd_32(&curthread->fsigblock, -SIGFASTBLOCK_INC); 126 if (oldval == (SIGFASTBLOCK_PEND | SIGFASTBLOCK_INC)) 127 __sys_sigfastblock(SIGFASTBLOCK_UNBLOCK, NULL); 128 } 129 130 static bool fast_sigblock; 131 132 void 133 _thr_signal_block(struct pthread *curthread) 134 { 135 if (fast_sigblock) 136 thr_signal_block_fast(curthread); 137 else 138 thr_signal_block_slow(curthread); 139 } 140 141 void 142 _thr_signal_unblock(struct pthread *curthread) 143 { 144 if (fast_sigblock) 145 thr_signal_unblock_fast(curthread); 146 else 147 thr_signal_unblock_slow(curthread); 148 } 149 150 void 151 _thr_signal_block_check_fast(void) 152 { 153 int bsdflags, error; 154 155 error = elf_aux_info(AT_BSDFLAGS, &bsdflags, sizeof(bsdflags)); 156 if (error != 0) 157 return; 158 fast_sigblock = (bsdflags & ELF_BSDF_SIGFASTBLK) != 0; 159 } 160 161 void 162 _thr_signal_block_setup(struct pthread *curthread) 163 { 164 if (!fast_sigblock) 165 return; 166 __sys_sigfastblock(SIGFASTBLOCK_SETPTR, &curthread->fsigblock); 167 } 168 169 int 170 _thr_send_sig(struct pthread *thread, int sig) 171 { 172 return thr_kill(thread->tid, sig); 173 } 174 175 static inline void 176 remove_thr_signals(sigset_t *set) 177 { 178 if (SIGISMEMBER(*set, SIGCANCEL)) 179 SIGDELSET(*set, SIGCANCEL); 180 } 181 182 static const sigset_t * 183 thr_remove_thr_signals(const sigset_t *set, sigset_t *newset) 184 { 185 *newset = *set; 186 remove_thr_signals(newset); 187 return (newset); 188 } 189 190 static void 191 sigcancel_handler(int sig __unused, 192 siginfo_t *info __unused, ucontext_t *ucp) 193 { 194 struct pthread *curthread = _get_curthread(); 195 int err; 196 197 if (THR_IN_CRITICAL(curthread)) 198 return; 199 err = errno; 200 check_suspend(curthread); 201 check_cancel(curthread, ucp); 202 errno = err; 203 } 204 205 typedef void (*ohandler)(int sig, int code, struct sigcontext *scp, 206 char *addr, __sighandler_t *catcher); 207 208 /* 209 * The signal handler wrapper is entered with all signal masked. 210 */ 211 static void 212 thr_sighandler(int sig, siginfo_t *info, void *_ucp) 213 { 214 struct pthread *curthread; 215 ucontext_t *ucp; 216 struct sigaction act; 217 struct usigaction *usa; 218 int err; 219 220 err = errno; 221 curthread = _get_curthread(); 222 ucp = _ucp; 223 usa = __libc_sigaction_slot(sig); 224 _thr_rwl_rdlock(&usa->lock); 225 act = usa->sigact; 226 _thr_rwl_unlock(&usa->lock); 227 errno = err; 228 curthread->deferred_run = 0; 229 230 /* 231 * if a thread is in critical region, for example it holds low level locks, 232 * try to defer the signal processing, however if the signal is synchronous 233 * signal, it means a bad thing has happened, this is a programming error, 234 * resuming fault point can not help anything (normally causes deadloop), 235 * so here we let user code handle it immediately. 236 */ 237 if (THR_IN_CRITICAL(curthread) && SIGISMEMBER(_thr_deferset, sig)) { 238 memcpy(&curthread->deferred_sigact, &act, sizeof(struct sigaction)); 239 memcpy(&curthread->deferred_siginfo, info, sizeof(siginfo_t)); 240 curthread->deferred_sigmask = ucp->uc_sigmask; 241 /* mask all signals, we will restore it later. */ 242 ucp->uc_sigmask = _thr_deferset; 243 return; 244 } 245 246 handle_signal(&act, sig, info, ucp); 247 } 248 249 static void 250 handle_signal(struct sigaction *actp, int sig, siginfo_t *info, ucontext_t *ucp) 251 { 252 struct pthread *curthread = _get_curthread(); 253 ucontext_t uc2; 254 __siginfohandler_t *sigfunc; 255 int cancel_point; 256 int cancel_async; 257 int cancel_enable; 258 int in_sigsuspend; 259 int err; 260 261 /* add previous level mask */ 262 SIGSETOR(actp->sa_mask, ucp->uc_sigmask); 263 264 /* add this signal's mask */ 265 if (!(actp->sa_flags & SA_NODEFER)) 266 SIGADDSET(actp->sa_mask, sig); 267 268 in_sigsuspend = curthread->in_sigsuspend; 269 curthread->in_sigsuspend = 0; 270 271 /* 272 * If thread is in deferred cancellation mode, disable cancellation 273 * in signal handler. 274 * If user signal handler calls a cancellation point function, e.g, 275 * it calls write() to write data to file, because write() is a 276 * cancellation point, the thread is immediately cancelled if 277 * cancellation is pending, to avoid this problem while thread is in 278 * deferring mode, cancellation is temporarily disabled. 279 */ 280 cancel_point = curthread->cancel_point; 281 cancel_async = curthread->cancel_async; 282 cancel_enable = curthread->cancel_enable; 283 curthread->cancel_point = 0; 284 if (!cancel_async) 285 curthread->cancel_enable = 0; 286 287 /* restore correct mask before calling user handler */ 288 __sys_sigprocmask(SIG_SETMASK, &actp->sa_mask, NULL); 289 290 sigfunc = actp->sa_sigaction; 291 292 /* 293 * We have already reset cancellation point flags, so if user's code 294 * longjmp()s out of its signal handler, wish its jmpbuf was set 295 * outside of a cancellation point, in most cases, this would be 296 * true. However, there is no way to save cancel_enable in jmpbuf, 297 * so after setjmps() returns once more, the user code may need to 298 * re-set cancel_enable flag by calling pthread_setcancelstate(). 299 */ 300 if ((actp->sa_flags & SA_SIGINFO) != 0) { 301 sigfunc(sig, info, ucp); 302 } else { 303 ((ohandler)sigfunc)(sig, info->si_code, 304 (struct sigcontext *)ucp, info->si_addr, 305 (__sighandler_t *)sigfunc); 306 } 307 err = errno; 308 309 curthread->in_sigsuspend = in_sigsuspend; 310 curthread->cancel_point = cancel_point; 311 curthread->cancel_enable = cancel_enable; 312 313 memcpy(&uc2, ucp, sizeof(uc2)); 314 SIGDELSET(uc2.uc_sigmask, SIGCANCEL); 315 316 /* reschedule cancellation */ 317 check_cancel(curthread, &uc2); 318 errno = err; 319 syscall(SYS_sigreturn, &uc2); 320 } 321 322 void 323 _thr_ast(struct pthread *curthread) 324 { 325 326 if (!THR_IN_CRITICAL(curthread)) { 327 check_deferred_signal(curthread); 328 check_suspend(curthread); 329 check_cancel(curthread, NULL); 330 } 331 } 332 333 /* reschedule cancellation */ 334 static void 335 check_cancel(struct pthread *curthread, ucontext_t *ucp) 336 { 337 338 if (__predict_true(!curthread->cancel_pending || 339 !curthread->cancel_enable || curthread->no_cancel)) 340 return; 341 342 /* 343 * Otherwise, we are in defer mode, and we are at 344 * cancel point, tell kernel to not block the current 345 * thread on next cancelable system call. 346 * 347 * There are three cases we should call thr_wake() to 348 * turn on TDP_WAKEUP or send SIGCANCEL in kernel: 349 * 1) we are going to call a cancelable system call, 350 * non-zero cancel_point means we are already in 351 * cancelable state, next system call is cancelable. 352 * 2) because _thr_ast() may be called by 353 * THR_CRITICAL_LEAVE() which is used by rtld rwlock 354 * and any libthr internal locks, when rtld rwlock 355 * is used, it is mostly caused by an unresolved PLT. 356 * Those routines may clear the TDP_WAKEUP flag by 357 * invoking some system calls, in those cases, we 358 * also should reenable the flag. 359 * 3) thread is in sigsuspend(), and the syscall insists 360 * on getting a signal before it agrees to return. 361 */ 362 if (curthread->cancel_point) { 363 if (curthread->in_sigsuspend && ucp) { 364 SIGADDSET(ucp->uc_sigmask, SIGCANCEL); 365 curthread->unblock_sigcancel = 1; 366 _thr_send_sig(curthread, SIGCANCEL); 367 } else 368 thr_wake(curthread->tid); 369 } else if (curthread->cancel_async) { 370 /* 371 * asynchronous cancellation mode, act upon 372 * immediately. 373 */ 374 _pthread_exit_mask(PTHREAD_CANCELED, 375 ucp? &ucp->uc_sigmask : NULL); 376 } 377 } 378 379 static void 380 check_deferred_signal(struct pthread *curthread) 381 { 382 ucontext_t *uc; 383 struct sigaction act; 384 siginfo_t info; 385 int uc_len; 386 387 if (__predict_true(curthread->deferred_siginfo.si_signo == 0 || 388 curthread->deferred_run)) 389 return; 390 391 curthread->deferred_run = 1; 392 uc_len = __getcontextx_size(); 393 uc = alloca(uc_len); 394 getcontext(uc); 395 if (curthread->deferred_siginfo.si_signo == 0) { 396 curthread->deferred_run = 0; 397 return; 398 } 399 __fillcontextx2((char *)uc); 400 act = curthread->deferred_sigact; 401 uc->uc_sigmask = curthread->deferred_sigmask; 402 memcpy(&info, &curthread->deferred_siginfo, sizeof(siginfo_t)); 403 /* remove signal */ 404 curthread->deferred_siginfo.si_signo = 0; 405 handle_signal(&act, info.si_signo, &info, uc); 406 } 407 408 static void 409 check_suspend(struct pthread *curthread) 410 { 411 uint32_t cycle; 412 413 if (__predict_true((curthread->flags & 414 (THR_FLAGS_NEED_SUSPEND | THR_FLAGS_SUSPENDED)) 415 != THR_FLAGS_NEED_SUSPEND)) 416 return; 417 if (curthread == _single_thread) 418 return; 419 if (curthread->force_exit) 420 return; 421 422 /* 423 * Blocks SIGCANCEL which other threads must send. 424 */ 425 _thr_signal_block(curthread); 426 427 /* 428 * Increase critical_count, here we don't use THR_LOCK/UNLOCK 429 * because we are leaf code, we don't want to recursively call 430 * ourself. 431 */ 432 curthread->critical_count++; 433 THR_UMUTEX_LOCK(curthread, &(curthread)->lock); 434 while ((curthread->flags & THR_FLAGS_NEED_SUSPEND) != 0) { 435 curthread->cycle++; 436 cycle = curthread->cycle; 437 438 /* Wake the thread suspending us. */ 439 _thr_umtx_wake(&curthread->cycle, INT_MAX, 0); 440 441 /* 442 * if we are from pthread_exit, we don't want to 443 * suspend, just go and die. 444 */ 445 if (curthread->state == PS_DEAD) 446 break; 447 curthread->flags |= THR_FLAGS_SUSPENDED; 448 THR_UMUTEX_UNLOCK(curthread, &(curthread)->lock); 449 _thr_umtx_wait_uint(&curthread->cycle, cycle, NULL, 0); 450 THR_UMUTEX_LOCK(curthread, &(curthread)->lock); 451 } 452 THR_UMUTEX_UNLOCK(curthread, &(curthread)->lock); 453 curthread->critical_count--; 454 455 _thr_signal_unblock(curthread); 456 } 457 458 void 459 _thr_signal_init(int dlopened) 460 { 461 struct sigaction act, nact, oact; 462 struct usigaction *usa; 463 sigset_t oldset; 464 int sig, error; 465 466 if (dlopened) { 467 __sys_sigprocmask(SIG_SETMASK, &_thr_maskset, &oldset); 468 for (sig = 1; sig <= _SIG_MAXSIG; sig++) { 469 if (sig == SIGCANCEL) 470 continue; 471 error = __sys_sigaction(sig, NULL, &oact); 472 if (error == -1 || oact.sa_handler == SIG_DFL || 473 oact.sa_handler == SIG_IGN) 474 continue; 475 usa = __libc_sigaction_slot(sig); 476 usa->sigact = oact; 477 nact = oact; 478 remove_thr_signals(&usa->sigact.sa_mask); 479 nact.sa_flags &= ~SA_NODEFER; 480 nact.sa_flags |= SA_SIGINFO; 481 nact.sa_sigaction = thr_sighandler; 482 nact.sa_mask = _thr_maskset; 483 (void)__sys_sigaction(sig, &nact, NULL); 484 } 485 __sys_sigprocmask(SIG_SETMASK, &oldset, NULL); 486 } 487 488 /* Install SIGCANCEL handler. */ 489 SIGFILLSET(act.sa_mask); 490 act.sa_flags = SA_SIGINFO; 491 act.sa_sigaction = (__siginfohandler_t *)&sigcancel_handler; 492 __sys_sigaction(SIGCANCEL, &act, NULL); 493 494 /* Unblock SIGCANCEL */ 495 SIGEMPTYSET(act.sa_mask); 496 SIGADDSET(act.sa_mask, SIGCANCEL); 497 __sys_sigprocmask(SIG_UNBLOCK, &act.sa_mask, NULL); 498 } 499 500 void 501 _thr_sigact_unload(struct dl_phdr_info *phdr_info __unused) 502 { 503 #if 0 504 struct pthread *curthread = _get_curthread(); 505 struct urwlock *rwlp; 506 struct sigaction *actp; 507 struct usigaction *usa; 508 struct sigaction kact; 509 void (*handler)(int); 510 int sig; 511 512 _thr_signal_block(curthread); 513 for (sig = 1; sig <= _SIG_MAXSIG; sig++) { 514 usa = __libc_sigaction_slot(sig); 515 actp = &usa->sigact; 516 retry: 517 handler = actp->sa_handler; 518 if (handler != SIG_DFL && handler != SIG_IGN && 519 __elf_phdr_match_addr(phdr_info, handler)) { 520 rwlp = &usa->lock; 521 _thr_rwl_wrlock(rwlp); 522 if (handler != actp->sa_handler) { 523 _thr_rwl_unlock(rwlp); 524 goto retry; 525 } 526 actp->sa_handler = SIG_DFL; 527 actp->sa_flags = SA_SIGINFO; 528 SIGEMPTYSET(actp->sa_mask); 529 if (__sys_sigaction(sig, NULL, &kact) == 0 && 530 kact.sa_handler != SIG_DFL && 531 kact.sa_handler != SIG_IGN) 532 __sys_sigaction(sig, actp, NULL); 533 _thr_rwl_unlock(rwlp); 534 } 535 } 536 _thr_signal_unblock(curthread); 537 #endif 538 } 539 540 void 541 _thr_signal_prefork(void) 542 { 543 int i; 544 545 for (i = 1; i <= _SIG_MAXSIG; ++i) 546 _thr_rwl_rdlock(&__libc_sigaction_slot(i)->lock); 547 } 548 549 void 550 _thr_signal_postfork(void) 551 { 552 int i; 553 554 for (i = 1; i <= _SIG_MAXSIG; ++i) 555 _thr_rwl_unlock(&__libc_sigaction_slot(i)->lock); 556 } 557 558 void 559 _thr_signal_postfork_child(void) 560 { 561 int i; 562 563 for (i = 1; i <= _SIG_MAXSIG; ++i) { 564 bzero(&__libc_sigaction_slot(i) -> lock, 565 sizeof(struct urwlock)); 566 } 567 } 568 569 void 570 _thr_signal_deinit(void) 571 { 572 } 573 574 int 575 __thr_sigaction(int sig, const struct sigaction *act, struct sigaction *oact) 576 { 577 struct sigaction newact, oldact, oldact2; 578 sigset_t oldset; 579 struct usigaction *usa; 580 int ret, err; 581 582 if (!_SIG_VALID(sig) || sig == SIGCANCEL) { 583 errno = EINVAL; 584 return (-1); 585 } 586 587 ret = 0; 588 err = 0; 589 usa = __libc_sigaction_slot(sig); 590 591 __sys_sigprocmask(SIG_SETMASK, &_thr_maskset, &oldset); 592 _thr_rwl_wrlock(&usa->lock); 593 594 if (act != NULL) { 595 oldact2 = usa->sigact; 596 newact = *act; 597 598 /* 599 * if a new sig handler is SIG_DFL or SIG_IGN, 600 * don't remove old handler from __libc_sigact[], 601 * so deferred signals still can use the handlers, 602 * multiple threads invoking sigaction itself is 603 * a race condition, so it is not a problem. 604 */ 605 if (newact.sa_handler != SIG_DFL && 606 newact.sa_handler != SIG_IGN) { 607 usa->sigact = newact; 608 remove_thr_signals(&usa->sigact.sa_mask); 609 newact.sa_flags &= ~SA_NODEFER; 610 newact.sa_flags |= SA_SIGINFO; 611 newact.sa_sigaction = thr_sighandler; 612 newact.sa_mask = _thr_maskset; /* mask all signals */ 613 } 614 ret = __sys_sigaction(sig, &newact, &oldact); 615 if (ret == -1) { 616 err = errno; 617 usa->sigact = oldact2; 618 } 619 } else if (oact != NULL) { 620 ret = __sys_sigaction(sig, NULL, &oldact); 621 err = errno; 622 } 623 624 if (oldact.sa_handler != SIG_DFL && oldact.sa_handler != SIG_IGN) { 625 if (act != NULL) 626 oldact = oldact2; 627 else if (oact != NULL) 628 oldact = usa->sigact; 629 } 630 631 _thr_rwl_unlock(&usa->lock); 632 __sys_sigprocmask(SIG_SETMASK, &oldset, NULL); 633 634 if (ret == 0) { 635 if (oact != NULL) 636 *oact = oldact; 637 } else { 638 errno = err; 639 } 640 return (ret); 641 } 642 643 int 644 __thr_sigprocmask(int how, const sigset_t *set, sigset_t *oset) 645 { 646 const sigset_t *p = set; 647 sigset_t newset; 648 649 if (how != SIG_UNBLOCK) { 650 if (set != NULL) { 651 newset = *set; 652 SIGDELSET(newset, SIGCANCEL); 653 p = &newset; 654 } 655 } 656 return (__sys_sigprocmask(how, p, oset)); 657 } 658 659 __weak_reference(_thr_sigmask, pthread_sigmask); 660 __weak_reference(_thr_sigmask, _pthread_sigmask); 661 662 int 663 _thr_sigmask(int how, const sigset_t *set, sigset_t *oset) 664 { 665 666 if (__thr_sigprocmask(how, set, oset)) 667 return (errno); 668 return (0); 669 } 670 671 int 672 _sigsuspend(const sigset_t * set) 673 { 674 sigset_t newset; 675 676 return (__sys_sigsuspend(thr_remove_thr_signals(set, &newset))); 677 } 678 679 int 680 __thr_sigsuspend(const sigset_t * set) 681 { 682 struct pthread *curthread; 683 sigset_t newset; 684 int ret, old; 685 686 curthread = _get_curthread(); 687 688 old = curthread->in_sigsuspend; 689 curthread->in_sigsuspend = 1; 690 _thr_cancel_enter(curthread); 691 ret = __sys_sigsuspend(thr_remove_thr_signals(set, &newset)); 692 _thr_cancel_leave(curthread, 1); 693 curthread->in_sigsuspend = old; 694 if (curthread->unblock_sigcancel) { 695 curthread->unblock_sigcancel = 0; 696 SIGEMPTYSET(newset); 697 SIGADDSET(newset, SIGCANCEL); 698 __sys_sigprocmask(SIG_UNBLOCK, &newset, NULL); 699 } 700 701 return (ret); 702 } 703 704 int 705 _sigtimedwait(const sigset_t *set, siginfo_t *info, 706 const struct timespec * timeout) 707 { 708 sigset_t newset; 709 710 return (__sys_sigtimedwait(thr_remove_thr_signals(set, &newset), info, 711 timeout)); 712 } 713 714 /* 715 * Cancellation behavior: 716 * Thread may be canceled at start, if thread got signal, 717 * it is not canceled. 718 */ 719 int 720 __thr_sigtimedwait(const sigset_t *set, siginfo_t *info, 721 const struct timespec * timeout) 722 { 723 struct pthread *curthread = _get_curthread(); 724 sigset_t newset; 725 int ret; 726 727 _thr_cancel_enter(curthread); 728 ret = __sys_sigtimedwait(thr_remove_thr_signals(set, &newset), info, 729 timeout); 730 _thr_cancel_leave(curthread, (ret == -1)); 731 return (ret); 732 } 733 734 int 735 _sigwaitinfo(const sigset_t *set, siginfo_t *info) 736 { 737 sigset_t newset; 738 739 return (__sys_sigwaitinfo(thr_remove_thr_signals(set, &newset), info)); 740 } 741 742 /* 743 * Cancellation behavior: 744 * Thread may be canceled at start, if thread got signal, 745 * it is not canceled. 746 */ 747 int 748 __thr_sigwaitinfo(const sigset_t *set, siginfo_t *info) 749 { 750 struct pthread *curthread = _get_curthread(); 751 sigset_t newset; 752 int ret; 753 754 _thr_cancel_enter(curthread); 755 ret = __sys_sigwaitinfo(thr_remove_thr_signals(set, &newset), info); 756 _thr_cancel_leave(curthread, ret == -1); 757 return (ret); 758 } 759 760 int 761 _sigwait(const sigset_t *set, int *sig) 762 { 763 sigset_t newset; 764 765 return (__sys_sigwait(thr_remove_thr_signals(set, &newset), sig)); 766 } 767 768 /* 769 * Cancellation behavior: 770 * Thread may be canceled at start, if thread got signal, 771 * it is not canceled. 772 */ 773 int 774 __thr_sigwait(const sigset_t *set, int *sig) 775 { 776 struct pthread *curthread = _get_curthread(); 777 sigset_t newset; 778 int ret; 779 780 do { 781 _thr_cancel_enter(curthread); 782 ret = __sys_sigwait(thr_remove_thr_signals(set, &newset), sig); 783 _thr_cancel_leave(curthread, (ret != 0)); 784 } while (ret == EINTR); 785 return (ret); 786 } 787 788 int 789 __thr_setcontext(const ucontext_t *ucp) 790 { 791 ucontext_t uc; 792 793 if (ucp == NULL) { 794 errno = EINVAL; 795 return (-1); 796 } 797 if (!SIGISMEMBER(ucp->uc_sigmask, SIGCANCEL)) 798 return (__sys_setcontext(ucp)); 799 (void) memcpy(&uc, ucp, sizeof(uc)); 800 SIGDELSET(uc.uc_sigmask, SIGCANCEL); 801 return (__sys_setcontext(&uc)); 802 } 803 804 int 805 __thr_swapcontext(ucontext_t *oucp, const ucontext_t *ucp) 806 { 807 ucontext_t uc; 808 809 if (oucp == NULL || ucp == NULL) { 810 errno = EINVAL; 811 return (-1); 812 } 813 if (SIGISMEMBER(ucp->uc_sigmask, SIGCANCEL)) { 814 (void) memcpy(&uc, ucp, sizeof(uc)); 815 SIGDELSET(uc.uc_sigmask, SIGCANCEL); 816 ucp = &uc; 817 } 818 return (__sys_swapcontext(oucp, ucp)); 819 } 820