1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2005, David Xu <davidxu@freebsd.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include "namespace.h"
30 #include <sys/param.h>
31 #include <sys/auxv.h>
32 #include <sys/elf.h>
33 #include <sys/signalvar.h>
34 #include <sys/syscall.h>
35 #include <signal.h>
36 #include <errno.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <pthread.h>
40 #include "un-namespace.h"
41 #include "libc_private.h"
42
43 #include "libc_private.h"
44 #include "thr_private.h"
45
46 /* #define DEBUG_SIGNAL */
47 #ifdef DEBUG_SIGNAL
48 #define DBG_MSG stdout_debug
49 #else
50 #define DBG_MSG(x...)
51 #endif
52
53 struct usigaction {
54 struct sigaction sigact;
55 struct urwlock lock;
56 };
57
58 static struct usigaction _thr_sigact[_SIG_MAXSIG];
59
60 static inline struct usigaction *
__libc_sigaction_slot(int signo)61 __libc_sigaction_slot(int signo)
62 {
63
64 return (&_thr_sigact[signo - 1]);
65 }
66
67 static void thr_sighandler(int, siginfo_t *, void *);
68 static void handle_signal(struct sigaction *, int, siginfo_t *, ucontext_t *);
69 static void check_deferred_signal(struct pthread *);
70 static void check_suspend(struct pthread *);
71 static void check_cancel(struct pthread *curthread, ucontext_t *ucp);
72
73 int _sigtimedwait(const sigset_t *set, siginfo_t *info,
74 const struct timespec * timeout);
75 int _sigwaitinfo(const sigset_t *set, siginfo_t *info);
76 int _sigwait(const sigset_t *set, int *sig);
77 int _setcontext(const ucontext_t *);
78 int _swapcontext(ucontext_t *, const ucontext_t *);
79
80 static const sigset_t _thr_deferset={{
81 0xffffffff & ~(_SIG_BIT(SIGBUS)|_SIG_BIT(SIGILL)|_SIG_BIT(SIGFPE)|
82 _SIG_BIT(SIGSEGV)|_SIG_BIT(SIGTRAP)|_SIG_BIT(SIGSYS)),
83 0xffffffff,
84 0xffffffff,
85 0xffffffff}};
86
87 static const sigset_t _thr_maskset={{
88 0xffffffff,
89 0xffffffff,
90 0xffffffff,
91 0xffffffff}};
92
93 static void
thr_signal_block_slow(struct pthread * curthread)94 thr_signal_block_slow(struct pthread *curthread)
95 {
96 if (curthread->sigblock > 0) {
97 curthread->sigblock++;
98 return;
99 }
100 __sys_sigprocmask(SIG_BLOCK, &_thr_maskset, &curthread->sigmask);
101 curthread->sigblock++;
102 }
103
104 static void
thr_signal_unblock_slow(struct pthread * curthread)105 thr_signal_unblock_slow(struct pthread *curthread)
106 {
107 if (--curthread->sigblock == 0)
108 __sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
109 }
110
111 static void
thr_signal_block_fast(struct pthread * curthread)112 thr_signal_block_fast(struct pthread *curthread)
113 {
114 atomic_add_32(&curthread->fsigblock, SIGFASTBLOCK_INC);
115 }
116
117 static void
thr_signal_unblock_fast(struct pthread * curthread)118 thr_signal_unblock_fast(struct pthread *curthread)
119 {
120 uint32_t oldval;
121
122 oldval = atomic_fetchadd_32(&curthread->fsigblock, -SIGFASTBLOCK_INC);
123 if (oldval == (SIGFASTBLOCK_PEND | SIGFASTBLOCK_INC))
124 __sys_sigfastblock(SIGFASTBLOCK_UNBLOCK, NULL);
125 }
126
127 static bool fast_sigblock;
128
129 void
_thr_signal_block(struct pthread * curthread)130 _thr_signal_block(struct pthread *curthread)
131 {
132 if (fast_sigblock)
133 thr_signal_block_fast(curthread);
134 else
135 thr_signal_block_slow(curthread);
136 }
137
138 void
_thr_signal_unblock(struct pthread * curthread)139 _thr_signal_unblock(struct pthread *curthread)
140 {
141 if (fast_sigblock)
142 thr_signal_unblock_fast(curthread);
143 else
144 thr_signal_unblock_slow(curthread);
145 }
146
147 void
_thr_signal_block_check_fast(void)148 _thr_signal_block_check_fast(void)
149 {
150 int bsdflags, error;
151
152 error = elf_aux_info(AT_BSDFLAGS, &bsdflags, sizeof(bsdflags));
153 if (error != 0)
154 return;
155 fast_sigblock = (bsdflags & ELF_BSDF_SIGFASTBLK) != 0;
156 }
157
158 void
_thr_signal_block_setup(struct pthread * curthread)159 _thr_signal_block_setup(struct pthread *curthread)
160 {
161 if (!fast_sigblock)
162 return;
163 __sys_sigfastblock(SIGFASTBLOCK_SETPTR, &curthread->fsigblock);
164 }
165
166 int
_thr_send_sig(struct pthread * thread,int sig)167 _thr_send_sig(struct pthread *thread, int sig)
168 {
169 return thr_kill(thread->tid, sig);
170 }
171
172 static inline void
remove_thr_signals(sigset_t * set)173 remove_thr_signals(sigset_t *set)
174 {
175 if (SIGISMEMBER(*set, SIGCANCEL))
176 SIGDELSET(*set, SIGCANCEL);
177 }
178
179 static const sigset_t *
thr_remove_thr_signals(const sigset_t * set,sigset_t * newset)180 thr_remove_thr_signals(const sigset_t *set, sigset_t *newset)
181 {
182 *newset = *set;
183 remove_thr_signals(newset);
184 return (newset);
185 }
186
187 static void
sigcancel_handler(int sig __unused,siginfo_t * info __unused,ucontext_t * ucp)188 sigcancel_handler(int sig __unused, siginfo_t *info __unused, ucontext_t *ucp)
189 {
190 struct pthread *curthread = _get_curthread();
191 int err;
192
193 if (THR_IN_CRITICAL(curthread))
194 return;
195 err = errno;
196 check_suspend(curthread);
197 check_cancel(curthread, ucp);
198 errno = err;
199 }
200
201 typedef void (*ohandler)(int sig, int code, struct sigcontext *scp,
202 char *addr, __sighandler_t *catcher);
203
204 /*
205 * The signal handler wrapper is entered with all signal masked.
206 */
207 static void
thr_sighandler(int sig,siginfo_t * info,void * _ucp)208 thr_sighandler(int sig, siginfo_t *info, void *_ucp)
209 {
210 struct pthread *curthread;
211 ucontext_t *ucp;
212 struct sigaction act;
213 struct usigaction *usa;
214 int err;
215
216 err = errno;
217 curthread = _get_curthread();
218 ucp = _ucp;
219 usa = __libc_sigaction_slot(sig);
220 _thr_rwl_rdlock(&usa->lock);
221 act = usa->sigact;
222 _thr_rwl_unlock(&usa->lock);
223 errno = err;
224 curthread->deferred_run = 0;
225
226 /*
227 * if a thread is in critical region, for example it holds low level locks,
228 * try to defer the signal processing, however if the signal is synchronous
229 * signal, it means a bad thing has happened, this is a programming error,
230 * resuming fault point can not help anything (normally causes deadloop),
231 * so here we let user code handle it immediately.
232 */
233 if (THR_IN_CRITICAL(curthread) && SIGISMEMBER(_thr_deferset, sig)) {
234 memcpy(&curthread->deferred_sigact, &act, sizeof(struct sigaction));
235 memcpy(&curthread->deferred_siginfo, info, sizeof(siginfo_t));
236 curthread->deferred_sigmask = ucp->uc_sigmask;
237 /* mask all signals, we will restore it later. */
238 ucp->uc_sigmask = _thr_deferset;
239 return;
240 }
241
242 handle_signal(&act, sig, info, ucp);
243 }
244
245 static void
handle_signal(struct sigaction * actp,int sig,siginfo_t * info,ucontext_t * ucp)246 handle_signal(struct sigaction *actp, int sig, siginfo_t *info, ucontext_t *ucp)
247 {
248 struct pthread *curthread = _get_curthread();
249 __siginfohandler_t *sigfunc;
250 int cancel_point;
251 int cancel_async;
252 int cancel_enable;
253 int in_sigsuspend;
254 int err;
255
256 /* add previous level mask */
257 SIGSETOR(actp->sa_mask, ucp->uc_sigmask);
258
259 /* add this signal's mask */
260 if (!(actp->sa_flags & SA_NODEFER))
261 SIGADDSET(actp->sa_mask, sig);
262
263 in_sigsuspend = curthread->in_sigsuspend;
264 curthread->in_sigsuspend = 0;
265
266 /*
267 * If thread is in deferred cancellation mode, disable cancellation
268 * in signal handler.
269 * If user signal handler calls a cancellation point function, e.g,
270 * it calls write() to write data to file, because write() is a
271 * cancellation point, the thread is immediately cancelled if
272 * cancellation is pending, to avoid this problem while thread is in
273 * deferring mode, cancellation is temporarily disabled.
274 */
275 cancel_point = curthread->cancel_point;
276 cancel_async = curthread->cancel_async;
277 cancel_enable = curthread->cancel_enable;
278 curthread->cancel_point = 0;
279 if (!cancel_async)
280 curthread->cancel_enable = 0;
281
282 /* restore correct mask before calling user handler */
283 __sys_sigprocmask(SIG_SETMASK, &actp->sa_mask, NULL);
284
285 sigfunc = actp->sa_sigaction;
286
287 /*
288 * We have already reset cancellation point flags, so if user's code
289 * longjmp()s out of its signal handler, wish its jmpbuf was set
290 * outside of a cancellation point, in most cases, this would be
291 * true. However, there is no way to save cancel_enable in jmpbuf,
292 * so after setjmps() returns once more, the user code may need to
293 * re-set cancel_enable flag by calling pthread_setcancelstate().
294 */
295 if ((actp->sa_flags & SA_SIGINFO) != 0) {
296 sigfunc(sig, info, ucp);
297 } else {
298 ((ohandler)sigfunc)(sig, info->si_code,
299 (struct sigcontext *)ucp, info->si_addr,
300 (__sighandler_t *)sigfunc);
301 }
302 err = errno;
303
304 curthread->in_sigsuspend = in_sigsuspend;
305 curthread->cancel_point = cancel_point;
306 curthread->cancel_enable = cancel_enable;
307
308 SIGDELSET(ucp->uc_sigmask, SIGCANCEL);
309
310 /* reschedule cancellation */
311 check_cancel(curthread, ucp);
312 errno = err;
313 }
314
315 void
_thr_ast(struct pthread * curthread)316 _thr_ast(struct pthread *curthread)
317 {
318
319 if (!THR_IN_CRITICAL(curthread)) {
320 check_deferred_signal(curthread);
321 check_suspend(curthread);
322 check_cancel(curthread, NULL);
323 }
324 }
325
326 /* reschedule cancellation */
327 static void
check_cancel(struct pthread * curthread,ucontext_t * ucp)328 check_cancel(struct pthread *curthread, ucontext_t *ucp)
329 {
330
331 if (__predict_true(!curthread->cancel_pending ||
332 !curthread->cancel_enable || curthread->no_cancel))
333 return;
334
335 /*
336 * Otherwise, we are in defer mode, and we are at
337 * cancel point, tell kernel to not block the current
338 * thread on next cancelable system call.
339 *
340 * There are three cases we should call thr_wake() to
341 * turn on TDP_WAKEUP or send SIGCANCEL in kernel:
342 * 1) we are going to call a cancelable system call,
343 * non-zero cancel_point means we are already in
344 * cancelable state, next system call is cancelable.
345 * 2) because _thr_ast() may be called by
346 * THR_CRITICAL_LEAVE() which is used by rtld rwlock
347 * and any libthr internal locks, when rtld rwlock
348 * is used, it is mostly caused by an unresolved PLT.
349 * Those routines may clear the TDP_WAKEUP flag by
350 * invoking some system calls, in those cases, we
351 * also should reenable the flag.
352 * 3) thread is in sigsuspend(), and the syscall insists
353 * on getting a signal before it agrees to return.
354 */
355 if (curthread->cancel_point) {
356 if (curthread->in_sigsuspend) {
357 if (ucp != NULL) {
358 SIGADDSET(ucp->uc_sigmask, SIGCANCEL);
359 curthread->unblock_sigcancel = 1;
360 }
361 _thr_send_sig(curthread, SIGCANCEL);
362 } else
363 thr_wake(curthread->tid);
364 } else if (curthread->cancel_async) {
365 /*
366 * asynchronous cancellation mode, act upon
367 * immediately.
368 */
369 _pthread_exit_mask(PTHREAD_CANCELED, ucp != NULL ?
370 &ucp->uc_sigmask : NULL);
371 }
372 }
373
374 static void
check_deferred_signal(struct pthread * curthread)375 check_deferred_signal(struct pthread *curthread)
376 {
377 ucontext_t *uc;
378 struct sigaction act;
379 siginfo_t info;
380 int uc_len;
381
382 if (__predict_true(curthread->deferred_siginfo.si_signo == 0 ||
383 curthread->deferred_run))
384 return;
385
386 curthread->deferred_run = 1;
387 uc_len = __getcontextx_size();
388 uc = alloca(uc_len);
389 getcontext(uc);
390 if (curthread->deferred_siginfo.si_signo == 0) {
391 curthread->deferred_run = 0;
392 return;
393 }
394 __fillcontextx2((char *)uc);
395 act = curthread->deferred_sigact;
396 uc->uc_sigmask = curthread->deferred_sigmask;
397 memcpy(&info, &curthread->deferred_siginfo, sizeof(siginfo_t));
398 /* remove signal */
399 curthread->deferred_siginfo.si_signo = 0;
400 handle_signal(&act, info.si_signo, &info, uc);
401 syscall(SYS_sigreturn, uc);
402 }
403
404 static void
check_suspend(struct pthread * curthread)405 check_suspend(struct pthread *curthread)
406 {
407 uint32_t cycle;
408
409 if (__predict_true((curthread->flags & (THR_FLAGS_NEED_SUSPEND |
410 THR_FLAGS_SUSPENDED)) != THR_FLAGS_NEED_SUSPEND))
411 return;
412 if (curthread == _single_thread)
413 return;
414 if (curthread->force_exit)
415 return;
416
417 /*
418 * Blocks SIGCANCEL which other threads must send.
419 */
420 _thr_signal_block(curthread);
421
422 /*
423 * Increase critical_count, here we don't use THR_LOCK/UNLOCK
424 * because we are leaf code, we don't want to recursively call
425 * ourself.
426 */
427 curthread->critical_count++;
428 THR_UMUTEX_LOCK(curthread, &(curthread)->lock);
429 while ((curthread->flags & THR_FLAGS_NEED_SUSPEND) != 0) {
430 curthread->cycle++;
431 cycle = curthread->cycle;
432
433 /* Wake the thread suspending us. */
434 _thr_umtx_wake(&curthread->cycle, INT_MAX, 0);
435
436 /*
437 * if we are from pthread_exit, we don't want to
438 * suspend, just go and die.
439 */
440 if (curthread->state == PS_DEAD)
441 break;
442 curthread->flags |= THR_FLAGS_SUSPENDED;
443 THR_UMUTEX_UNLOCK(curthread, &(curthread)->lock);
444 _thr_umtx_wait_uint(&curthread->cycle, cycle, NULL, 0);
445 THR_UMUTEX_LOCK(curthread, &(curthread)->lock);
446 }
447 THR_UMUTEX_UNLOCK(curthread, &(curthread)->lock);
448 curthread->critical_count--;
449
450 _thr_signal_unblock(curthread);
451 }
452
453 void
_thr_signal_init(int dlopened)454 _thr_signal_init(int dlopened)
455 {
456 struct sigaction act, nact, oact;
457 struct usigaction *usa;
458 sigset_t oldset;
459 int sig, error;
460
461 if (dlopened) {
462 __sys_sigprocmask(SIG_SETMASK, &_thr_maskset, &oldset);
463 for (sig = 1; sig <= _SIG_MAXSIG; sig++) {
464 if (sig == SIGCANCEL)
465 continue;
466 error = __sys_sigaction(sig, NULL, &oact);
467 if (error == -1 || oact.sa_handler == SIG_DFL ||
468 oact.sa_handler == SIG_IGN)
469 continue;
470 usa = __libc_sigaction_slot(sig);
471 usa->sigact = oact;
472 nact = oact;
473 remove_thr_signals(&usa->sigact.sa_mask);
474 nact.sa_flags &= ~SA_NODEFER;
475 nact.sa_flags |= SA_SIGINFO;
476 nact.sa_sigaction = thr_sighandler;
477 nact.sa_mask = _thr_maskset;
478 (void)__sys_sigaction(sig, &nact, NULL);
479 }
480 __sys_sigprocmask(SIG_SETMASK, &oldset, NULL);
481 }
482
483 /* Install SIGCANCEL handler. */
484 SIGFILLSET(act.sa_mask);
485 act.sa_flags = SA_SIGINFO;
486 act.sa_sigaction = (__siginfohandler_t *)&sigcancel_handler;
487 __sys_sigaction(SIGCANCEL, &act, NULL);
488
489 /* Unblock SIGCANCEL */
490 SIGEMPTYSET(act.sa_mask);
491 SIGADDSET(act.sa_mask, SIGCANCEL);
492 __sys_sigprocmask(SIG_UNBLOCK, &act.sa_mask, NULL);
493 }
494
495 void
_thr_sigact_unload(struct dl_phdr_info * phdr_info __unused)496 _thr_sigact_unload(struct dl_phdr_info *phdr_info __unused)
497 {
498 #if 0
499 struct pthread *curthread = _get_curthread();
500 struct urwlock *rwlp;
501 struct sigaction *actp;
502 struct usigaction *usa;
503 struct sigaction kact;
504 void (*handler)(int);
505 int sig;
506
507 _thr_signal_block(curthread);
508 for (sig = 1; sig <= _SIG_MAXSIG; sig++) {
509 usa = __libc_sigaction_slot(sig);
510 actp = &usa->sigact;
511 retry:
512 handler = actp->sa_handler;
513 if (handler != SIG_DFL && handler != SIG_IGN &&
514 __elf_phdr_match_addr(phdr_info, handler)) {
515 rwlp = &usa->lock;
516 _thr_rwl_wrlock(rwlp);
517 if (handler != actp->sa_handler) {
518 _thr_rwl_unlock(rwlp);
519 goto retry;
520 }
521 actp->sa_handler = SIG_DFL;
522 actp->sa_flags = SA_SIGINFO;
523 SIGEMPTYSET(actp->sa_mask);
524 if (__sys_sigaction(sig, NULL, &kact) == 0 &&
525 kact.sa_handler != SIG_DFL &&
526 kact.sa_handler != SIG_IGN)
527 __sys_sigaction(sig, actp, NULL);
528 _thr_rwl_unlock(rwlp);
529 }
530 }
531 _thr_signal_unblock(curthread);
532 #endif
533 }
534
535 void
_thr_signal_prefork(void)536 _thr_signal_prefork(void)
537 {
538 int i;
539
540 for (i = 1; i <= _SIG_MAXSIG; ++i)
541 _thr_rwl_rdlock(&__libc_sigaction_slot(i)->lock);
542 }
543
544 void
_thr_signal_postfork(void)545 _thr_signal_postfork(void)
546 {
547 int i;
548
549 for (i = 1; i <= _SIG_MAXSIG; ++i)
550 _thr_rwl_unlock(&__libc_sigaction_slot(i)->lock);
551 }
552
553 void
_thr_signal_postfork_child(void)554 _thr_signal_postfork_child(void)
555 {
556 int i;
557
558 for (i = 1; i <= _SIG_MAXSIG; ++i) {
559 bzero(&__libc_sigaction_slot(i) -> lock,
560 sizeof(struct urwlock));
561 }
562 }
563
564 void
_thr_signal_deinit(void)565 _thr_signal_deinit(void)
566 {
567 }
568
569 int
__thr_sigaction(int sig,const struct sigaction * act,struct sigaction * oact)570 __thr_sigaction(int sig, const struct sigaction *act, struct sigaction *oact)
571 {
572 struct sigaction newact, oldact, oldact2;
573 sigset_t oldset;
574 struct usigaction *usa;
575 int ret, err;
576
577 if (!_SIG_VALID(sig) || sig == SIGCANCEL) {
578 errno = EINVAL;
579 return (-1);
580 }
581
582 ret = 0;
583 err = 0;
584 usa = __libc_sigaction_slot(sig);
585
586 __sys_sigprocmask(SIG_SETMASK, &_thr_maskset, &oldset);
587 _thr_rwl_wrlock(&usa->lock);
588
589 if (act != NULL) {
590 oldact2 = usa->sigact;
591 newact = *act;
592
593 /*
594 * if a new sig handler is SIG_DFL or SIG_IGN,
595 * don't remove old handler from __libc_sigact[],
596 * so deferred signals still can use the handlers,
597 * multiple threads invoking sigaction itself is
598 * a race condition, so it is not a problem.
599 */
600 if (newact.sa_handler != SIG_DFL &&
601 newact.sa_handler != SIG_IGN) {
602 usa->sigact = newact;
603 remove_thr_signals(&usa->sigact.sa_mask);
604 newact.sa_flags &= ~SA_NODEFER;
605 newact.sa_flags |= SA_SIGINFO;
606 newact.sa_sigaction = thr_sighandler;
607 newact.sa_mask = _thr_maskset; /* mask all signals */
608 }
609 ret = __sys_sigaction(sig, &newact, &oldact);
610 if (ret == -1) {
611 err = errno;
612 usa->sigact = oldact2;
613 }
614 } else if (oact != NULL) {
615 ret = __sys_sigaction(sig, NULL, &oldact);
616 err = errno;
617 }
618
619 if (oldact.sa_handler != SIG_DFL && oldact.sa_handler != SIG_IGN) {
620 if (act != NULL)
621 oldact = oldact2;
622 else if (oact != NULL)
623 oldact = usa->sigact;
624 }
625
626 _thr_rwl_unlock(&usa->lock);
627 __sys_sigprocmask(SIG_SETMASK, &oldset, NULL);
628
629 if (ret == 0) {
630 if (oact != NULL)
631 *oact = oldact;
632 } else {
633 errno = err;
634 }
635 return (ret);
636 }
637
638 int
__thr_sigprocmask(int how,const sigset_t * set,sigset_t * oset)639 __thr_sigprocmask(int how, const sigset_t *set, sigset_t *oset)
640 {
641 const sigset_t *p = set;
642 sigset_t newset;
643
644 if (how != SIG_UNBLOCK) {
645 if (set != NULL) {
646 newset = *set;
647 SIGDELSET(newset, SIGCANCEL);
648 p = &newset;
649 }
650 }
651 return (__sys_sigprocmask(how, p, oset));
652 }
653
654 __weak_reference(_thr_sigmask, pthread_sigmask);
655 __weak_reference(_thr_sigmask, _pthread_sigmask);
656
657 int
_thr_sigmask(int how,const sigset_t * set,sigset_t * oset)658 _thr_sigmask(int how, const sigset_t *set, sigset_t *oset)
659 {
660
661 if (__thr_sigprocmask(how, set, oset))
662 return (errno);
663 return (0);
664 }
665
666 int
_sigsuspend(const sigset_t * set)667 _sigsuspend(const sigset_t *set)
668 {
669 sigset_t newset;
670
671 return (__sys_sigsuspend(thr_remove_thr_signals(set, &newset)));
672 }
673
674 int
__thr_sigsuspend(const sigset_t * set)675 __thr_sigsuspend(const sigset_t *set)
676 {
677 struct pthread *curthread;
678 sigset_t newset;
679 int ret, old;
680
681 curthread = _get_curthread();
682
683 old = curthread->in_sigsuspend;
684 curthread->in_sigsuspend = 1;
685 _thr_cancel_enter(curthread);
686 ret = __sys_sigsuspend(thr_remove_thr_signals(set, &newset));
687 _thr_cancel_leave(curthread, 1);
688 curthread->in_sigsuspend = old;
689 if (curthread->unblock_sigcancel) {
690 curthread->unblock_sigcancel = 0;
691 SIGEMPTYSET(newset);
692 SIGADDSET(newset, SIGCANCEL);
693 __sys_sigprocmask(SIG_UNBLOCK, &newset, NULL);
694 }
695
696 return (ret);
697 }
698
699 int
_sigtimedwait(const sigset_t * set,siginfo_t * info,const struct timespec * timeout)700 _sigtimedwait(const sigset_t *set, siginfo_t *info,
701 const struct timespec *timeout)
702 {
703 sigset_t newset;
704
705 return (__sys_sigtimedwait(thr_remove_thr_signals(set, &newset), info,
706 timeout));
707 }
708
709 /*
710 * Cancellation behavior:
711 * Thread may be canceled at start, if thread got signal,
712 * it is not canceled.
713 */
714 int
__thr_sigtimedwait(const sigset_t * set,siginfo_t * info,const struct timespec * timeout)715 __thr_sigtimedwait(const sigset_t *set, siginfo_t *info,
716 const struct timespec *timeout)
717 {
718 struct pthread *curthread = _get_curthread();
719 sigset_t newset;
720 int ret;
721
722 _thr_cancel_enter(curthread);
723 ret = __sys_sigtimedwait(thr_remove_thr_signals(set, &newset), info,
724 timeout);
725 _thr_cancel_leave(curthread, (ret == -1));
726 return (ret);
727 }
728
729 int
_sigwaitinfo(const sigset_t * set,siginfo_t * info)730 _sigwaitinfo(const sigset_t *set, siginfo_t *info)
731 {
732 sigset_t newset;
733
734 return (__sys_sigwaitinfo(thr_remove_thr_signals(set, &newset), info));
735 }
736
737 /*
738 * Cancellation behavior:
739 * Thread may be canceled at start, if thread got signal,
740 * it is not canceled.
741 */
742 int
__thr_sigwaitinfo(const sigset_t * set,siginfo_t * info)743 __thr_sigwaitinfo(const sigset_t *set, siginfo_t *info)
744 {
745 struct pthread *curthread = _get_curthread();
746 sigset_t newset;
747 int ret;
748
749 _thr_cancel_enter(curthread);
750 ret = __sys_sigwaitinfo(thr_remove_thr_signals(set, &newset), info);
751 _thr_cancel_leave(curthread, ret == -1);
752 return (ret);
753 }
754
755 int
_sigwait(const sigset_t * set,int * sig)756 _sigwait(const sigset_t *set, int *sig)
757 {
758 sigset_t newset;
759
760 return (__sys_sigwait(thr_remove_thr_signals(set, &newset), sig));
761 }
762
763 /*
764 * Cancellation behavior:
765 * Thread may be canceled at start, if thread got signal,
766 * it is not canceled.
767 */
768 int
__thr_sigwait(const sigset_t * set,int * sig)769 __thr_sigwait(const sigset_t *set, int *sig)
770 {
771 struct pthread *curthread = _get_curthread();
772 sigset_t newset;
773 int ret;
774
775 do {
776 _thr_cancel_enter(curthread);
777 ret = __sys_sigwait(thr_remove_thr_signals(set, &newset), sig);
778 _thr_cancel_leave(curthread, (ret != 0));
779 } while (ret == EINTR);
780 return (ret);
781 }
782
783 int
__thr_setcontext(const ucontext_t * ucp)784 __thr_setcontext(const ucontext_t *ucp)
785 {
786 ucontext_t uc;
787
788 if (ucp == NULL) {
789 errno = EINVAL;
790 return (-1);
791 }
792 if (!SIGISMEMBER(ucp->uc_sigmask, SIGCANCEL))
793 return (__sys_setcontext(ucp));
794 (void) memcpy(&uc, ucp, sizeof(uc));
795 SIGDELSET(uc.uc_sigmask, SIGCANCEL);
796 return (__sys_setcontext(&uc));
797 }
798
799 int
__thr_swapcontext(ucontext_t * oucp,const ucontext_t * ucp)800 __thr_swapcontext(ucontext_t *oucp, const ucontext_t *ucp)
801 {
802 ucontext_t uc;
803
804 if (oucp == NULL || ucp == NULL) {
805 errno = EINVAL;
806 return (-1);
807 }
808 if (SIGISMEMBER(ucp->uc_sigmask, SIGCANCEL)) {
809 (void) memcpy(&uc, ucp, sizeof(uc));
810 SIGDELSET(uc.uc_sigmask, SIGCANCEL);
811 ucp = &uc;
812 }
813 return (__sys_swapcontext(oucp, ucp));
814 }
815