17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 534709573Sraf * Common Development and Distribution License (the "License"). 634709573Sraf * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 210293487cSraf 227c478bd9Sstevel@tonic-gate /* 233de0cfbbSRoger A. Faulkner * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 247c478bd9Sstevel@tonic-gate * Use is subject to license terms. 257c478bd9Sstevel@tonic-gate */ 267c478bd9Sstevel@tonic-gate 277c478bd9Sstevel@tonic-gate #include "lint.h" 287257d1b4Sraf #include <sys/feature_tests.h> 297257d1b4Sraf /* 307257d1b4Sraf * setcontext() really can return, if UC_CPU is not specified. 317257d1b4Sraf * Make the compiler shut up about it. 327257d1b4Sraf */ 337257d1b4Sraf #if defined(__NORETURN) 347257d1b4Sraf #undef __NORETURN 357257d1b4Sraf #endif 367257d1b4Sraf #define __NORETURN 377c478bd9Sstevel@tonic-gate #include "thr_uberdata.h" 38f841f6adSraf #include "asyncio.h" 397c478bd9Sstevel@tonic-gate #include <signal.h> 407c478bd9Sstevel@tonic-gate #include <siginfo.h> 417c478bd9Sstevel@tonic-gate #include <sys/systm.h> 427c478bd9Sstevel@tonic-gate 43bdf0047cSRoger A. Faulkner /* maskable signals */ 44bdf0047cSRoger A. Faulkner const sigset_t maskset = {MASKSET0, MASKSET1, MASKSET2, MASKSET3}; 457c478bd9Sstevel@tonic-gate 467c478bd9Sstevel@tonic-gate /* 477c478bd9Sstevel@tonic-gate * Return true if the valid signal bits in both sets are the same. 487c478bd9Sstevel@tonic-gate */ 497c478bd9Sstevel@tonic-gate int 507c478bd9Sstevel@tonic-gate sigequalset(const sigset_t *s1, const sigset_t *s2) 517c478bd9Sstevel@tonic-gate { 527c478bd9Sstevel@tonic-gate /* 537c478bd9Sstevel@tonic-gate * We only test valid signal bits, not rubbish following MAXSIG 547c478bd9Sstevel@tonic-gate * (for speed). Algorithm: 557c478bd9Sstevel@tonic-gate * if (s1 & fillset) == (s2 & fillset) then (s1 ^ s2) & fillset == 0 567c478bd9Sstevel@tonic-gate */ 57bdf0047cSRoger A. Faulkner /* see lib/libc/inc/thr_uberdata.h for why this must be true */ 58bdf0047cSRoger A. Faulkner #if (MAXSIG > (2 * 32) && MAXSIG <= (3 * 32)) 597c478bd9Sstevel@tonic-gate return (!((s1->__sigbits[0] ^ s2->__sigbits[0]) | 60bdf0047cSRoger A. Faulkner (s1->__sigbits[1] ^ s2->__sigbits[1]) | 61bdf0047cSRoger A. Faulkner ((s1->__sigbits[2] ^ s2->__sigbits[2]) & FILLSET2))); 62bdf0047cSRoger A. Faulkner #else 63bdf0047cSRoger A. Faulkner #error "fix me: MAXSIG out of bounds" 64bdf0047cSRoger A. Faulkner #endif 657c478bd9Sstevel@tonic-gate } 667c478bd9Sstevel@tonic-gate 677c478bd9Sstevel@tonic-gate /* 687c478bd9Sstevel@tonic-gate * Common code for calling the user-specified signal handler. 697c478bd9Sstevel@tonic-gate */ 707c478bd9Sstevel@tonic-gate void 717c478bd9Sstevel@tonic-gate call_user_handler(int sig, siginfo_t *sip, ucontext_t *ucp) 727c478bd9Sstevel@tonic-gate { 737c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 747c478bd9Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 757c478bd9Sstevel@tonic-gate struct sigaction uact; 767c478bd9Sstevel@tonic-gate volatile struct sigaction *sap; 777c478bd9Sstevel@tonic-gate 787c478bd9Sstevel@tonic-gate /* 797c478bd9Sstevel@tonic-gate * If we are taking a signal while parked or about to be parked 807c478bd9Sstevel@tonic-gate * on __lwp_park() then remove ourself from the sleep queue so 817c478bd9Sstevel@tonic-gate * that we can grab locks. The code in mutex_lock_queue() and 827c478bd9Sstevel@tonic-gate * cond_wait_common() will detect this and deal with it when 837c478bd9Sstevel@tonic-gate * __lwp_park() returns. 847c478bd9Sstevel@tonic-gate */ 857c478bd9Sstevel@tonic-gate unsleep_self(); 867c478bd9Sstevel@tonic-gate set_parking_flag(self, 0); 877c478bd9Sstevel@tonic-gate 887c478bd9Sstevel@tonic-gate if (__td_event_report(self, TD_CATCHSIG, udp)) { 897c478bd9Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_CATCHSIG; 907c478bd9Sstevel@tonic-gate self->ul_td_evbuf.eventdata = (void *)(intptr_t)sig; 917c478bd9Sstevel@tonic-gate tdb_event(TD_CATCHSIG, udp); 927c478bd9Sstevel@tonic-gate } 937c478bd9Sstevel@tonic-gate 947c478bd9Sstevel@tonic-gate /* 957c478bd9Sstevel@tonic-gate * Get a self-consistent set of flags, handler, and mask 967c478bd9Sstevel@tonic-gate * while holding the sig's sig_lock for the least possible time. 977c478bd9Sstevel@tonic-gate * We must acquire the sig's sig_lock because some thread running 987c478bd9Sstevel@tonic-gate * in sigaction() might be establishing a new signal handler. 9941efec22Sraf * The code in sigaction() acquires the writer lock; here 10041efec22Sraf * we acquire the readers lock to ehance concurrency in the 10141efec22Sraf * face of heavy signal traffic, such as generated by java. 1027c478bd9Sstevel@tonic-gate * 1037c478bd9Sstevel@tonic-gate * Locking exceptions: 1047c478bd9Sstevel@tonic-gate * No locking for a child of vfork(). 1057c478bd9Sstevel@tonic-gate * If the signal is SIGPROF with an si_code of PROF_SIG, 1067c478bd9Sstevel@tonic-gate * then we assume that this signal was generated by 1077c478bd9Sstevel@tonic-gate * setitimer(ITIMER_REALPROF) set up by the dbx collector. 1087c478bd9Sstevel@tonic-gate * If the signal is SIGEMT with an si_code of EMT_CPCOVF, 1097c478bd9Sstevel@tonic-gate * then we assume that the signal was generated by 1107c478bd9Sstevel@tonic-gate * a hardware performance counter overflow. 1117c478bd9Sstevel@tonic-gate * In these cases, assume that we need no locking. It is the 1127c478bd9Sstevel@tonic-gate * monitoring program's responsibility to ensure correctness. 1137c478bd9Sstevel@tonic-gate */ 1147c478bd9Sstevel@tonic-gate sap = &udp->siguaction[sig].sig_uaction; 1157c478bd9Sstevel@tonic-gate if (self->ul_vfork || 1167c478bd9Sstevel@tonic-gate (sip != NULL && 1177c478bd9Sstevel@tonic-gate ((sig == SIGPROF && sip->si_code == PROF_SIG) || 1187c478bd9Sstevel@tonic-gate (sig == SIGEMT && sip->si_code == EMT_CPCOVF)))) { 1197c478bd9Sstevel@tonic-gate /* we wish this assignment could be atomic */ 1208cd45542Sraf (void) memcpy(&uact, (void *)sap, sizeof (uact)); 1217c478bd9Sstevel@tonic-gate } else { 12241efec22Sraf rwlock_t *rwlp = &udp->siguaction[sig].sig_lock; 12341efec22Sraf lrw_rdlock(rwlp); 1248cd45542Sraf (void) memcpy(&uact, (void *)sap, sizeof (uact)); 125a574db85Sraf if ((sig == SIGCANCEL || sig == SIGAIOCANCEL) && 126a574db85Sraf (sap->sa_flags & SA_RESETHAND)) 1277c478bd9Sstevel@tonic-gate sap->sa_sigaction = SIG_DFL; 12841efec22Sraf lrw_unlock(rwlp); 1297c478bd9Sstevel@tonic-gate } 1307c478bd9Sstevel@tonic-gate 1317c478bd9Sstevel@tonic-gate /* 1327c478bd9Sstevel@tonic-gate * Set the proper signal mask and call the user's signal handler. 1337c478bd9Sstevel@tonic-gate * (We overrode the user-requested signal mask with maskset 1347c478bd9Sstevel@tonic-gate * so we currently have all blockable signals blocked.) 1357c478bd9Sstevel@tonic-gate * 1367c478bd9Sstevel@tonic-gate * We would like to ASSERT() that the signal is not a member of the 1377c478bd9Sstevel@tonic-gate * signal mask at the previous level (ucp->uc_sigmask) or the specified 1387c478bd9Sstevel@tonic-gate * signal mask for sigsuspend() or pollsys() (self->ul_tmpmask) but 1397c478bd9Sstevel@tonic-gate * /proc can override this via PCSSIG, so we don't bother. 1407c478bd9Sstevel@tonic-gate * 1417c478bd9Sstevel@tonic-gate * We would also like to ASSERT() that the signal mask at the previous 1427c478bd9Sstevel@tonic-gate * level equals self->ul_sigmask (maskset for sigsuspend() / pollsys()), 1437c478bd9Sstevel@tonic-gate * but /proc can change the thread's signal mask via PCSHOLD, so we 1447c478bd9Sstevel@tonic-gate * don't bother with that either. 1457c478bd9Sstevel@tonic-gate */ 1467c478bd9Sstevel@tonic-gate ASSERT(ucp->uc_flags & UC_SIGMASK); 1477c478bd9Sstevel@tonic-gate if (self->ul_sigsuspend) { 1487c478bd9Sstevel@tonic-gate ucp->uc_sigmask = self->ul_sigmask; 1497c478bd9Sstevel@tonic-gate self->ul_sigsuspend = 0; 1507c478bd9Sstevel@tonic-gate /* the sigsuspend() or pollsys() signal mask */ 1517c478bd9Sstevel@tonic-gate sigorset(&uact.sa_mask, &self->ul_tmpmask); 1527c478bd9Sstevel@tonic-gate } else { 1537c478bd9Sstevel@tonic-gate /* the signal mask at the previous level */ 1547c478bd9Sstevel@tonic-gate sigorset(&uact.sa_mask, &ucp->uc_sigmask); 1557c478bd9Sstevel@tonic-gate } 1567c478bd9Sstevel@tonic-gate if (!(uact.sa_flags & SA_NODEFER)) /* add current signal */ 1578cd45542Sraf (void) sigaddset(&uact.sa_mask, sig); 1587c478bd9Sstevel@tonic-gate self->ul_sigmask = uact.sa_mask; 1597c478bd9Sstevel@tonic-gate self->ul_siglink = ucp; 160bdf0047cSRoger A. Faulkner (void) __lwp_sigmask(SIG_SETMASK, &uact.sa_mask); 1617c478bd9Sstevel@tonic-gate 1627c478bd9Sstevel@tonic-gate /* 1637c478bd9Sstevel@tonic-gate * If this thread has been sent SIGCANCEL from the kernel 1647c478bd9Sstevel@tonic-gate * or from pthread_cancel(), it is being asked to exit. 1657c478bd9Sstevel@tonic-gate * The kernel may send SIGCANCEL without a siginfo struct. 1667c478bd9Sstevel@tonic-gate * If the SIGCANCEL is process-directed (from kill() or 1677c478bd9Sstevel@tonic-gate * sigqueue()), treat it as an ordinary signal. 1687c478bd9Sstevel@tonic-gate */ 1697c478bd9Sstevel@tonic-gate if (sig == SIGCANCEL) { 1707c478bd9Sstevel@tonic-gate if (sip == NULL || SI_FROMKERNEL(sip) || 1717c478bd9Sstevel@tonic-gate sip->si_code == SI_LWP) { 1727c478bd9Sstevel@tonic-gate do_sigcancel(); 1737c478bd9Sstevel@tonic-gate goto out; 1747c478bd9Sstevel@tonic-gate } 175f841f6adSraf /* SIGCANCEL is ignored by default */ 176f841f6adSraf if (uact.sa_sigaction == SIG_DFL || 177f841f6adSraf uact.sa_sigaction == SIG_IGN) 178f841f6adSraf goto out; 179f841f6adSraf } 180f841f6adSraf 181f841f6adSraf /* 182f841f6adSraf * If this thread has been sent SIGAIOCANCEL (SIGLWP) and 183f841f6adSraf * we are an aio worker thread, cancel the aio request. 184f841f6adSraf */ 185f841f6adSraf if (sig == SIGAIOCANCEL) { 1867257d1b4Sraf aio_worker_t *aiowp = pthread_getspecific(_aio_key); 187f841f6adSraf 188f841f6adSraf if (sip != NULL && sip->si_code == SI_LWP && aiowp != NULL) 1897257d1b4Sraf siglongjmp(aiowp->work_jmp_buf, 1); 190f841f6adSraf /* SIGLWP is ignored by default */ 1917c478bd9Sstevel@tonic-gate if (uact.sa_sigaction == SIG_DFL || 1927c478bd9Sstevel@tonic-gate uact.sa_sigaction == SIG_IGN) 1937c478bd9Sstevel@tonic-gate goto out; 1947c478bd9Sstevel@tonic-gate } 1957c478bd9Sstevel@tonic-gate 1967c478bd9Sstevel@tonic-gate if (!(uact.sa_flags & SA_SIGINFO)) 1977c478bd9Sstevel@tonic-gate sip = NULL; 1987c478bd9Sstevel@tonic-gate __sighndlr(sig, sip, ucp, uact.sa_sigaction); 1997c478bd9Sstevel@tonic-gate 2007c478bd9Sstevel@tonic-gate #if defined(sparc) || defined(__sparc) 2017c478bd9Sstevel@tonic-gate /* 2027c478bd9Sstevel@tonic-gate * If this is a floating point exception and the queue 2037c478bd9Sstevel@tonic-gate * is non-empty, pop the top entry from the queue. This 2047c478bd9Sstevel@tonic-gate * is to maintain expected behavior. 2057c478bd9Sstevel@tonic-gate */ 2067c478bd9Sstevel@tonic-gate if (sig == SIGFPE && ucp->uc_mcontext.fpregs.fpu_qcnt) { 2077c478bd9Sstevel@tonic-gate fpregset_t *fp = &ucp->uc_mcontext.fpregs; 2087c478bd9Sstevel@tonic-gate 2097c478bd9Sstevel@tonic-gate if (--fp->fpu_qcnt > 0) { 2107c478bd9Sstevel@tonic-gate unsigned char i; 211*bc0e9132SGordon Ross struct _fq *fqp; 2127c478bd9Sstevel@tonic-gate 2137c478bd9Sstevel@tonic-gate fqp = fp->fpu_q; 2147c478bd9Sstevel@tonic-gate for (i = 0; i < fp->fpu_qcnt; i++) 2157c478bd9Sstevel@tonic-gate fqp[i] = fqp[i+1]; 2167c478bd9Sstevel@tonic-gate } 2177c478bd9Sstevel@tonic-gate } 2187c478bd9Sstevel@tonic-gate #endif /* sparc */ 2197c478bd9Sstevel@tonic-gate 2207c478bd9Sstevel@tonic-gate out: 2217257d1b4Sraf (void) setcontext(ucp); 2227257d1b4Sraf thr_panic("call_user_handler(): setcontext() returned"); 2237c478bd9Sstevel@tonic-gate } 2247c478bd9Sstevel@tonic-gate 2257c478bd9Sstevel@tonic-gate /* 2267c478bd9Sstevel@tonic-gate * take_deferred_signal() is called when ul_critical and ul_sigdefer become 2277c478bd9Sstevel@tonic-gate * zero and a deferred signal has been recorded on the current thread. 2287c478bd9Sstevel@tonic-gate * We are out of the critical region and are ready to take a signal. 2297c478bd9Sstevel@tonic-gate * The kernel has all signals blocked on this lwp, but our value of 2307c478bd9Sstevel@tonic-gate * ul_sigmask is the correct signal mask for the previous context. 231f48068adSraf * 232f48068adSraf * We call __sigresend() to atomically restore the signal mask and 233f48068adSraf * cause the signal to be sent again with the remembered siginfo. 234f48068adSraf * We will not return successfully from __sigresend() until the 235f48068adSraf * application's signal handler has been run via sigacthandler(). 2367c478bd9Sstevel@tonic-gate */ 2377c478bd9Sstevel@tonic-gate void 2387c478bd9Sstevel@tonic-gate take_deferred_signal(int sig) 2397c478bd9Sstevel@tonic-gate { 240f48068adSraf extern int __sigresend(int, siginfo_t *, sigset_t *); 2417c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 242ba03ff9eSraf siguaction_t *suap = &self->ul_uberdata->siguaction[sig]; 2437c478bd9Sstevel@tonic-gate siginfo_t *sip; 244f48068adSraf int error; 2457c478bd9Sstevel@tonic-gate 246f48068adSraf ASSERT((self->ul_critical | self->ul_sigdefer | self->ul_cursig) == 0); 2477c478bd9Sstevel@tonic-gate 248ba03ff9eSraf /* 249ba03ff9eSraf * If the signal handler was established with SA_RESETHAND, 250ba03ff9eSraf * the kernel has reset the handler to SIG_DFL, so we have 251ba03ff9eSraf * to reestablish the handler now so that it will be entered 252ba03ff9eSraf * again when we call __sigresend(), below. 2539ac35488Sraf * 2549ac35488Sraf * Logically, we should acquire and release the signal's 2559ac35488Sraf * sig_lock around this operation to protect the integrity 2569ac35488Sraf * of the signal action while we copy it, as is done below 2579ac35488Sraf * in _libc_sigaction(). However, we may be on a user-level 2589ac35488Sraf * sleep queue at this point and lrw_wrlock(&suap->sig_lock) 2599ac35488Sraf * might attempt to sleep on a different sleep queue and 2609ac35488Sraf * that would corrupt the entire sleep queue mechanism. 2619ac35488Sraf * 2629ac35488Sraf * If we are on a sleep queue we will remove ourself from 2639ac35488Sraf * it in call_user_handler(), called from sigacthandler(), 2649ac35488Sraf * before entering the application's signal handler. 2659ac35488Sraf * In the meantime, we must not acquire any locks. 266ba03ff9eSraf */ 267ba03ff9eSraf if (suap->sig_uaction.sa_flags & SA_RESETHAND) { 268ba03ff9eSraf struct sigaction tact = suap->sig_uaction; 269ba03ff9eSraf tact.sa_flags &= ~SA_NODEFER; 270ba03ff9eSraf tact.sa_sigaction = self->ul_uberdata->sigacthandler; 271ba03ff9eSraf tact.sa_mask = maskset; 272ba03ff9eSraf (void) __sigaction(sig, &tact, NULL); 273ba03ff9eSraf } 274ba03ff9eSraf 2757c478bd9Sstevel@tonic-gate if (self->ul_siginfo.si_signo == 0) 2767c478bd9Sstevel@tonic-gate sip = NULL; 277f48068adSraf else 278f48068adSraf sip = &self->ul_siginfo; 279f48068adSraf 280f48068adSraf /* EAGAIN can happen only for a pending SIGSTOP signal */ 281f48068adSraf while ((error = __sigresend(sig, sip, &self->ul_sigmask)) == EAGAIN) 282f48068adSraf continue; 283f48068adSraf if (error) 284f48068adSraf thr_panic("take_deferred_signal(): __sigresend() failed"); 2857c478bd9Sstevel@tonic-gate } 2867c478bd9Sstevel@tonic-gate 2877c478bd9Sstevel@tonic-gate void 2887c478bd9Sstevel@tonic-gate sigacthandler(int sig, siginfo_t *sip, void *uvp) 2897c478bd9Sstevel@tonic-gate { 2907c478bd9Sstevel@tonic-gate ucontext_t *ucp = uvp; 2917c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 2927c478bd9Sstevel@tonic-gate 2937c478bd9Sstevel@tonic-gate /* 2947c478bd9Sstevel@tonic-gate * Do this in case we took a signal while in a cancelable system call. 2957c478bd9Sstevel@tonic-gate * It does no harm if we were not in such a system call. 2967c478bd9Sstevel@tonic-gate */ 2977c478bd9Sstevel@tonic-gate self->ul_sp = 0; 2987c478bd9Sstevel@tonic-gate if (sig != SIGCANCEL) 2997c478bd9Sstevel@tonic-gate self->ul_cancel_async = self->ul_save_async; 3007c478bd9Sstevel@tonic-gate 3017c478bd9Sstevel@tonic-gate /* 3023de0cfbbSRoger A. Faulkner * If this thread has performed a longjmp() from a signal handler 3033de0cfbbSRoger A. Faulkner * back to main level some time in the past, it has left the kernel 3043de0cfbbSRoger A. Faulkner * thinking that it is still in the signal context. We repair this 3053de0cfbbSRoger A. Faulkner * possible damage by setting ucp->uc_link to NULL if we know that 3063de0cfbbSRoger A. Faulkner * we are actually executing at main level (self->ul_siglink == NULL). 3073de0cfbbSRoger A. Faulkner * See the code for setjmp()/longjmp() for more details. 3083de0cfbbSRoger A. Faulkner */ 3093de0cfbbSRoger A. Faulkner if (self->ul_siglink == NULL) 3103de0cfbbSRoger A. Faulkner ucp->uc_link = NULL; 3113de0cfbbSRoger A. Faulkner 3123de0cfbbSRoger A. Faulkner /* 3137c478bd9Sstevel@tonic-gate * If we are not in a critical region and are 3147c478bd9Sstevel@tonic-gate * not deferring signals, take the signal now. 3157c478bd9Sstevel@tonic-gate */ 3167c478bd9Sstevel@tonic-gate if ((self->ul_critical + self->ul_sigdefer) == 0) { 3177c478bd9Sstevel@tonic-gate call_user_handler(sig, sip, ucp); 318d544002dSrh87107 /* 319d544002dSrh87107 * On the surface, the following call seems redundant 320d544002dSrh87107 * because call_user_handler() cannot return. However, 321d544002dSrh87107 * we don't want to return from here because the compiler 322d544002dSrh87107 * might recycle our frame. We want to keep it on the 323d544002dSrh87107 * stack to assist debuggers such as pstack in identifying 324d544002dSrh87107 * signal frames. The call to thr_panic() serves to prevent 325d544002dSrh87107 * tail-call optimisation here. 326d544002dSrh87107 */ 327d544002dSrh87107 thr_panic("sigacthandler(): call_user_handler() returned"); 3287c478bd9Sstevel@tonic-gate } 3297c478bd9Sstevel@tonic-gate 3307c478bd9Sstevel@tonic-gate /* 3317c478bd9Sstevel@tonic-gate * We are in a critical region or we are deferring signals. When 3327c478bd9Sstevel@tonic-gate * we emerge from the region we will call take_deferred_signal(). 3337c478bd9Sstevel@tonic-gate */ 3347c478bd9Sstevel@tonic-gate ASSERT(self->ul_cursig == 0); 3357c478bd9Sstevel@tonic-gate self->ul_cursig = (char)sig; 3367c478bd9Sstevel@tonic-gate if (sip != NULL) 3378cd45542Sraf (void) memcpy(&self->ul_siginfo, 3380293487cSraf sip, sizeof (siginfo_t)); 3397c478bd9Sstevel@tonic-gate else 3407c478bd9Sstevel@tonic-gate self->ul_siginfo.si_signo = 0; 3417c478bd9Sstevel@tonic-gate 3427c478bd9Sstevel@tonic-gate /* 3437c478bd9Sstevel@tonic-gate * Make sure that if we return to a call to __lwp_park() 3447c478bd9Sstevel@tonic-gate * or ___lwp_cond_wait() that it returns right away 3457c478bd9Sstevel@tonic-gate * (giving us a spurious wakeup but not a deadlock). 3467c478bd9Sstevel@tonic-gate */ 3477c478bd9Sstevel@tonic-gate set_parking_flag(self, 0); 3487c478bd9Sstevel@tonic-gate 3497c478bd9Sstevel@tonic-gate /* 3507c478bd9Sstevel@tonic-gate * Return to the previous context with all signals blocked. 3517c478bd9Sstevel@tonic-gate * We will restore the signal mask in take_deferred_signal(). 3527c478bd9Sstevel@tonic-gate * Note that we are calling the system call trap here, not 3537257d1b4Sraf * the setcontext() wrapper. We don't want to change the 3547c478bd9Sstevel@tonic-gate * thread's ul_sigmask by this operation. 3557c478bd9Sstevel@tonic-gate */ 3567c478bd9Sstevel@tonic-gate ucp->uc_sigmask = maskset; 3578cd45542Sraf (void) __setcontext(ucp); 3587c478bd9Sstevel@tonic-gate thr_panic("sigacthandler(): __setcontext() returned"); 3597c478bd9Sstevel@tonic-gate } 3607c478bd9Sstevel@tonic-gate 3617257d1b4Sraf #pragma weak _sigaction = sigaction 3627c478bd9Sstevel@tonic-gate int 3637257d1b4Sraf sigaction(int sig, const struct sigaction *nact, struct sigaction *oact) 3647c478bd9Sstevel@tonic-gate { 3657c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 3667c478bd9Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 3677c478bd9Sstevel@tonic-gate struct sigaction oaction; 3687c478bd9Sstevel@tonic-gate struct sigaction tact; 3697c478bd9Sstevel@tonic-gate struct sigaction *tactp = NULL; 3707c478bd9Sstevel@tonic-gate int rv; 3717c478bd9Sstevel@tonic-gate 3727c478bd9Sstevel@tonic-gate if (sig <= 0 || sig >= NSIG) { 3737c478bd9Sstevel@tonic-gate errno = EINVAL; 3747c478bd9Sstevel@tonic-gate return (-1); 3757c478bd9Sstevel@tonic-gate } 3767c478bd9Sstevel@tonic-gate 3777c478bd9Sstevel@tonic-gate if (!self->ul_vfork) 37841efec22Sraf lrw_wrlock(&udp->siguaction[sig].sig_lock); 3797c478bd9Sstevel@tonic-gate 3807c478bd9Sstevel@tonic-gate oaction = udp->siguaction[sig].sig_uaction; 3817c478bd9Sstevel@tonic-gate 3827c478bd9Sstevel@tonic-gate if (nact != NULL) { 3837c478bd9Sstevel@tonic-gate tact = *nact; /* make a copy so we can modify it */ 3847c478bd9Sstevel@tonic-gate tactp = &tact; 3857c478bd9Sstevel@tonic-gate delete_reserved_signals(&tact.sa_mask); 3867c478bd9Sstevel@tonic-gate 3877c478bd9Sstevel@tonic-gate #if !defined(_LP64) 3887c478bd9Sstevel@tonic-gate tact.sa_resv[0] = tact.sa_resv[1] = 0; /* cleanliness */ 3897c478bd9Sstevel@tonic-gate #endif 3907c478bd9Sstevel@tonic-gate /* 3917c478bd9Sstevel@tonic-gate * To be compatible with the behavior of SunOS 4.x: 3927c478bd9Sstevel@tonic-gate * If the new signal handler is SIG_IGN or SIG_DFL, do 3937c478bd9Sstevel@tonic-gate * not change the signal's entry in the siguaction array. 3947c478bd9Sstevel@tonic-gate * This allows a child of vfork(2) to set signal handlers 3957c478bd9Sstevel@tonic-gate * to SIG_IGN or SIG_DFL without affecting the parent. 3967c478bd9Sstevel@tonic-gate * 3977c478bd9Sstevel@tonic-gate * This also covers a race condition with some thread 3987c478bd9Sstevel@tonic-gate * setting the signal action to SIG_DFL or SIG_IGN 3997c478bd9Sstevel@tonic-gate * when the thread has also received and deferred 4007c478bd9Sstevel@tonic-gate * that signal. When the thread takes the deferred 4017c478bd9Sstevel@tonic-gate * signal, even though it has set the action to SIG_DFL 4027c478bd9Sstevel@tonic-gate * or SIG_IGN, it will execute the old signal handler 4037c478bd9Sstevel@tonic-gate * anyway. This is an inherent signaling race condition 4047c478bd9Sstevel@tonic-gate * and is not a bug. 4057c478bd9Sstevel@tonic-gate * 4067c478bd9Sstevel@tonic-gate * A child of vfork() is not allowed to change signal 4077c478bd9Sstevel@tonic-gate * handlers to anything other than SIG_DFL or SIG_IGN. 4087c478bd9Sstevel@tonic-gate */ 4097c478bd9Sstevel@tonic-gate if (self->ul_vfork) { 4107c478bd9Sstevel@tonic-gate if (tact.sa_sigaction != SIG_IGN) 4117c478bd9Sstevel@tonic-gate tact.sa_sigaction = SIG_DFL; 412f841f6adSraf } else if (sig == SIGCANCEL || sig == SIGAIOCANCEL) { 4137c478bd9Sstevel@tonic-gate /* 414f841f6adSraf * Always catch these signals. 415f841f6adSraf * We need SIGCANCEL for pthread_cancel() to work. 416f841f6adSraf * We need SIGAIOCANCEL for aio_cancel() to work. 4177c478bd9Sstevel@tonic-gate */ 4187c478bd9Sstevel@tonic-gate udp->siguaction[sig].sig_uaction = tact; 4197c478bd9Sstevel@tonic-gate if (tact.sa_sigaction == SIG_DFL || 4207c478bd9Sstevel@tonic-gate tact.sa_sigaction == SIG_IGN) 4217c478bd9Sstevel@tonic-gate tact.sa_flags = SA_SIGINFO; 4227c478bd9Sstevel@tonic-gate else { 4237c478bd9Sstevel@tonic-gate tact.sa_flags |= SA_SIGINFO; 424a574db85Sraf tact.sa_flags &= 425a574db85Sraf ~(SA_NODEFER | SA_RESETHAND | SA_RESTART); 4267c478bd9Sstevel@tonic-gate } 4277c478bd9Sstevel@tonic-gate tact.sa_sigaction = udp->sigacthandler; 4287c478bd9Sstevel@tonic-gate tact.sa_mask = maskset; 4297c478bd9Sstevel@tonic-gate } else if (tact.sa_sigaction != SIG_DFL && 4307c478bd9Sstevel@tonic-gate tact.sa_sigaction != SIG_IGN) { 4317c478bd9Sstevel@tonic-gate udp->siguaction[sig].sig_uaction = tact; 4327c478bd9Sstevel@tonic-gate tact.sa_flags &= ~SA_NODEFER; 4337c478bd9Sstevel@tonic-gate tact.sa_sigaction = udp->sigacthandler; 4347c478bd9Sstevel@tonic-gate tact.sa_mask = maskset; 4357c478bd9Sstevel@tonic-gate } 4367c478bd9Sstevel@tonic-gate } 4377c478bd9Sstevel@tonic-gate 4387c478bd9Sstevel@tonic-gate if ((rv = __sigaction(sig, tactp, oact)) != 0) 4397c478bd9Sstevel@tonic-gate udp->siguaction[sig].sig_uaction = oaction; 4407c478bd9Sstevel@tonic-gate else if (oact != NULL && 4417c478bd9Sstevel@tonic-gate oact->sa_sigaction != SIG_DFL && 4427c478bd9Sstevel@tonic-gate oact->sa_sigaction != SIG_IGN) 4437c478bd9Sstevel@tonic-gate *oact = oaction; 4447c478bd9Sstevel@tonic-gate 445f841f6adSraf /* 446f841f6adSraf * We detect setting the disposition of SIGIO just to set the 447f841f6adSraf * _sigio_enabled flag for the asynchronous i/o (aio) code. 448f841f6adSraf */ 449f841f6adSraf if (sig == SIGIO && rv == 0 && tactp != NULL) { 450f841f6adSraf _sigio_enabled = 451f841f6adSraf (tactp->sa_handler != SIG_DFL && 452f841f6adSraf tactp->sa_handler != SIG_IGN); 453f841f6adSraf } 454f841f6adSraf 4557c478bd9Sstevel@tonic-gate if (!self->ul_vfork) 45641efec22Sraf lrw_unlock(&udp->siguaction[sig].sig_lock); 4577c478bd9Sstevel@tonic-gate return (rv); 4587c478bd9Sstevel@tonic-gate } 4597c478bd9Sstevel@tonic-gate 4608cd45542Sraf /* 4618cd45542Sraf * This is a private interface for the linux brand interface. 4628cd45542Sraf */ 4639acbbeafSnn35248 void 4649acbbeafSnn35248 setsigacthandler(void (*nsigacthandler)(int, siginfo_t *, void *), 4659acbbeafSnn35248 void (**osigacthandler)(int, siginfo_t *, void *)) 4669acbbeafSnn35248 { 4679acbbeafSnn35248 ulwp_t *self = curthread; 4689acbbeafSnn35248 uberdata_t *udp = self->ul_uberdata; 4699acbbeafSnn35248 4709acbbeafSnn35248 if (osigacthandler != NULL) 4719acbbeafSnn35248 *osigacthandler = udp->sigacthandler; 4729acbbeafSnn35248 4739acbbeafSnn35248 udp->sigacthandler = nsigacthandler; 4749acbbeafSnn35248 } 4759acbbeafSnn35248 4767c478bd9Sstevel@tonic-gate /* 4777c478bd9Sstevel@tonic-gate * Tell the kernel to block all signals. 4787c478bd9Sstevel@tonic-gate * Use the schedctl interface, or failing that, use __lwp_sigmask(). 4797c478bd9Sstevel@tonic-gate * This action can be rescinded only by making a system call that 4807c478bd9Sstevel@tonic-gate * sets the signal mask: 4817c478bd9Sstevel@tonic-gate * __lwp_sigmask(), __sigprocmask(), __setcontext(), 4827c478bd9Sstevel@tonic-gate * __sigsuspend() or __pollsys(). 4837c478bd9Sstevel@tonic-gate * In particular, this action cannot be reversed by assigning 4847c478bd9Sstevel@tonic-gate * scp->sc_sigblock = 0. That would be a way to lose signals. 4857c478bd9Sstevel@tonic-gate * See the definition of restore_signals(self). 4867c478bd9Sstevel@tonic-gate */ 4877c478bd9Sstevel@tonic-gate void 4887c478bd9Sstevel@tonic-gate block_all_signals(ulwp_t *self) 4897c478bd9Sstevel@tonic-gate { 4907c478bd9Sstevel@tonic-gate volatile sc_shared_t *scp; 4917c478bd9Sstevel@tonic-gate 4927c478bd9Sstevel@tonic-gate enter_critical(self); 4937c478bd9Sstevel@tonic-gate if ((scp = self->ul_schedctl) != NULL || 4947c478bd9Sstevel@tonic-gate (scp = setup_schedctl()) != NULL) 4957c478bd9Sstevel@tonic-gate scp->sc_sigblock = 1; 4967c478bd9Sstevel@tonic-gate else 497bdf0047cSRoger A. Faulkner (void) __lwp_sigmask(SIG_SETMASK, &maskset); 4987c478bd9Sstevel@tonic-gate exit_critical(self); 4997c478bd9Sstevel@tonic-gate } 5007c478bd9Sstevel@tonic-gate 5019acbbeafSnn35248 /* 5028cd45542Sraf * setcontext() has code that forcibly restores the curthread 5039acbbeafSnn35248 * pointer in a context passed to the setcontext(2) syscall. 5049acbbeafSnn35248 * 5059acbbeafSnn35248 * Certain processes may need to disable this feature, so these routines 5069acbbeafSnn35248 * provide the mechanism to do so. 5079acbbeafSnn35248 * 5089acbbeafSnn35248 * (As an example, branded 32-bit x86 processes may use %gs for their own 5099acbbeafSnn35248 * purposes, so they need to be able to specify a %gs value to be restored 5109acbbeafSnn35248 * on return from a signal handler via the passed ucontext_t.) 5119acbbeafSnn35248 */ 5129acbbeafSnn35248 static int setcontext_enforcement = 1; 5139acbbeafSnn35248 5149acbbeafSnn35248 void 5159acbbeafSnn35248 set_setcontext_enforcement(int on) 5169acbbeafSnn35248 { 5179acbbeafSnn35248 setcontext_enforcement = on; 5189acbbeafSnn35248 } 5199acbbeafSnn35248 5207257d1b4Sraf #pragma weak _setcontext = setcontext 5217c478bd9Sstevel@tonic-gate int 5227257d1b4Sraf setcontext(const ucontext_t *ucp) 5237c478bd9Sstevel@tonic-gate { 5247c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 5257c478bd9Sstevel@tonic-gate int ret; 5267c478bd9Sstevel@tonic-gate ucontext_t uc; 5277c478bd9Sstevel@tonic-gate 5287c478bd9Sstevel@tonic-gate /* 5297c478bd9Sstevel@tonic-gate * Returning from the main context (uc_link == NULL) causes 5307c478bd9Sstevel@tonic-gate * the thread to exit. See setcontext(2) and makecontext(3C). 5317c478bd9Sstevel@tonic-gate */ 5327c478bd9Sstevel@tonic-gate if (ucp == NULL) 5337257d1b4Sraf thr_exit(NULL); 5348cd45542Sraf (void) memcpy(&uc, ucp, sizeof (uc)); 5357c478bd9Sstevel@tonic-gate 5367c478bd9Sstevel@tonic-gate /* 5377c478bd9Sstevel@tonic-gate * Restore previous signal mask and context link. 5387c478bd9Sstevel@tonic-gate */ 5397c478bd9Sstevel@tonic-gate if (uc.uc_flags & UC_SIGMASK) { 5407c478bd9Sstevel@tonic-gate block_all_signals(self); 5417c478bd9Sstevel@tonic-gate delete_reserved_signals(&uc.uc_sigmask); 5427c478bd9Sstevel@tonic-gate self->ul_sigmask = uc.uc_sigmask; 5437c478bd9Sstevel@tonic-gate if (self->ul_cursig) { 5447c478bd9Sstevel@tonic-gate /* 5457c478bd9Sstevel@tonic-gate * We have a deferred signal present. 5467c478bd9Sstevel@tonic-gate * The signal mask will be set when the 5477c478bd9Sstevel@tonic-gate * signal is taken in take_deferred_signal(). 5487c478bd9Sstevel@tonic-gate */ 5497c478bd9Sstevel@tonic-gate ASSERT(self->ul_critical + self->ul_sigdefer != 0); 5507c478bd9Sstevel@tonic-gate uc.uc_flags &= ~UC_SIGMASK; 5517c478bd9Sstevel@tonic-gate } 5527c478bd9Sstevel@tonic-gate } 5537c478bd9Sstevel@tonic-gate self->ul_siglink = uc.uc_link; 5547c478bd9Sstevel@tonic-gate 5557c478bd9Sstevel@tonic-gate /* 5567c478bd9Sstevel@tonic-gate * We don't know where this context structure has been. 5577c478bd9Sstevel@tonic-gate * Preserve the curthread pointer, at least. 5589acbbeafSnn35248 * 5599acbbeafSnn35248 * Allow this feature to be disabled if a particular process 5609acbbeafSnn35248 * requests it. 5617c478bd9Sstevel@tonic-gate */ 5629acbbeafSnn35248 if (setcontext_enforcement) { 5637c478bd9Sstevel@tonic-gate #if defined(__sparc) 5647c478bd9Sstevel@tonic-gate uc.uc_mcontext.gregs[REG_G7] = (greg_t)self; 5657c478bd9Sstevel@tonic-gate #elif defined(__amd64) 566ae115bc7Smrj uc.uc_mcontext.gregs[REG_FS] = (greg_t)0; /* null for fsbase */ 5677c478bd9Sstevel@tonic-gate #elif defined(__i386) 568ae115bc7Smrj uc.uc_mcontext.gregs[GS] = (greg_t)LWPGS_SEL; 5697c478bd9Sstevel@tonic-gate #else 5707c478bd9Sstevel@tonic-gate #error "none of __sparc, __amd64, __i386 defined" 5717c478bd9Sstevel@tonic-gate #endif 5729acbbeafSnn35248 } 5739acbbeafSnn35248 5747c478bd9Sstevel@tonic-gate /* 5757c478bd9Sstevel@tonic-gate * Make sure that if we return to a call to __lwp_park() 5767c478bd9Sstevel@tonic-gate * or ___lwp_cond_wait() that it returns right away 5777c478bd9Sstevel@tonic-gate * (giving us a spurious wakeup but not a deadlock). 5787c478bd9Sstevel@tonic-gate */ 5797c478bd9Sstevel@tonic-gate set_parking_flag(self, 0); 5807c478bd9Sstevel@tonic-gate self->ul_sp = 0; 5818cd45542Sraf ret = __setcontext(&uc); 5827c478bd9Sstevel@tonic-gate 5837c478bd9Sstevel@tonic-gate /* 5847c478bd9Sstevel@tonic-gate * It is OK for setcontext() to return if the user has not specified 5857c478bd9Sstevel@tonic-gate * UC_CPU. 5867c478bd9Sstevel@tonic-gate */ 5877c478bd9Sstevel@tonic-gate if (uc.uc_flags & UC_CPU) 5887c478bd9Sstevel@tonic-gate thr_panic("setcontext(): __setcontext() returned"); 5897c478bd9Sstevel@tonic-gate return (ret); 5907c478bd9Sstevel@tonic-gate } 5917c478bd9Sstevel@tonic-gate 5927257d1b4Sraf #pragma weak _thr_sigsetmask = thr_sigsetmask 5937c478bd9Sstevel@tonic-gate int 5947257d1b4Sraf thr_sigsetmask(int how, const sigset_t *set, sigset_t *oset) 5957c478bd9Sstevel@tonic-gate { 5967c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 5977c478bd9Sstevel@tonic-gate sigset_t saveset; 5987c478bd9Sstevel@tonic-gate 5997c478bd9Sstevel@tonic-gate if (set == NULL) { 6007c478bd9Sstevel@tonic-gate enter_critical(self); 6017c478bd9Sstevel@tonic-gate if (oset != NULL) 6027c478bd9Sstevel@tonic-gate *oset = self->ul_sigmask; 6037c478bd9Sstevel@tonic-gate exit_critical(self); 6047c478bd9Sstevel@tonic-gate } else { 6057c478bd9Sstevel@tonic-gate switch (how) { 6067c478bd9Sstevel@tonic-gate case SIG_BLOCK: 6077c478bd9Sstevel@tonic-gate case SIG_UNBLOCK: 6087c478bd9Sstevel@tonic-gate case SIG_SETMASK: 6097c478bd9Sstevel@tonic-gate break; 6107c478bd9Sstevel@tonic-gate default: 6117c478bd9Sstevel@tonic-gate return (EINVAL); 6127c478bd9Sstevel@tonic-gate } 6137c478bd9Sstevel@tonic-gate 6147c478bd9Sstevel@tonic-gate /* 6157c478bd9Sstevel@tonic-gate * The assignments to self->ul_sigmask must be protected from 6167c478bd9Sstevel@tonic-gate * signals. The nuances of this code are subtle. Be careful. 6177c478bd9Sstevel@tonic-gate */ 6187c478bd9Sstevel@tonic-gate block_all_signals(self); 6197c478bd9Sstevel@tonic-gate if (oset != NULL) 6207c478bd9Sstevel@tonic-gate saveset = self->ul_sigmask; 6217c478bd9Sstevel@tonic-gate switch (how) { 6227c478bd9Sstevel@tonic-gate case SIG_BLOCK: 6237c478bd9Sstevel@tonic-gate self->ul_sigmask.__sigbits[0] |= set->__sigbits[0]; 6247c478bd9Sstevel@tonic-gate self->ul_sigmask.__sigbits[1] |= set->__sigbits[1]; 625bdf0047cSRoger A. Faulkner self->ul_sigmask.__sigbits[2] |= set->__sigbits[2]; 626bdf0047cSRoger A. Faulkner self->ul_sigmask.__sigbits[3] |= set->__sigbits[3]; 6277c478bd9Sstevel@tonic-gate break; 6287c478bd9Sstevel@tonic-gate case SIG_UNBLOCK: 6297c478bd9Sstevel@tonic-gate self->ul_sigmask.__sigbits[0] &= ~set->__sigbits[0]; 6307c478bd9Sstevel@tonic-gate self->ul_sigmask.__sigbits[1] &= ~set->__sigbits[1]; 631bdf0047cSRoger A. Faulkner self->ul_sigmask.__sigbits[2] &= ~set->__sigbits[2]; 632bdf0047cSRoger A. Faulkner self->ul_sigmask.__sigbits[3] &= ~set->__sigbits[3]; 6337c478bd9Sstevel@tonic-gate break; 6347c478bd9Sstevel@tonic-gate case SIG_SETMASK: 6357c478bd9Sstevel@tonic-gate self->ul_sigmask.__sigbits[0] = set->__sigbits[0]; 6367c478bd9Sstevel@tonic-gate self->ul_sigmask.__sigbits[1] = set->__sigbits[1]; 637bdf0047cSRoger A. Faulkner self->ul_sigmask.__sigbits[2] = set->__sigbits[2]; 638bdf0047cSRoger A. Faulkner self->ul_sigmask.__sigbits[3] = set->__sigbits[3]; 6397c478bd9Sstevel@tonic-gate break; 6407c478bd9Sstevel@tonic-gate } 6417c478bd9Sstevel@tonic-gate delete_reserved_signals(&self->ul_sigmask); 6427c478bd9Sstevel@tonic-gate if (oset != NULL) 6437c478bd9Sstevel@tonic-gate *oset = saveset; 6447c478bd9Sstevel@tonic-gate restore_signals(self); 6457c478bd9Sstevel@tonic-gate } 6467c478bd9Sstevel@tonic-gate 6477c478bd9Sstevel@tonic-gate return (0); 6487c478bd9Sstevel@tonic-gate } 6497c478bd9Sstevel@tonic-gate 6507257d1b4Sraf #pragma weak _pthread_sigmask = pthread_sigmask 6517c478bd9Sstevel@tonic-gate int 6527257d1b4Sraf pthread_sigmask(int how, const sigset_t *set, sigset_t *oset) 6537257d1b4Sraf { 6547257d1b4Sraf return (thr_sigsetmask(how, set, oset)); 6557257d1b4Sraf } 6567257d1b4Sraf 6577257d1b4Sraf #pragma weak _sigprocmask = sigprocmask 6587257d1b4Sraf int 6597257d1b4Sraf sigprocmask(int how, const sigset_t *set, sigset_t *oset) 6607c478bd9Sstevel@tonic-gate { 6617c478bd9Sstevel@tonic-gate int error; 6627c478bd9Sstevel@tonic-gate 6637c478bd9Sstevel@tonic-gate /* 6647c478bd9Sstevel@tonic-gate * Guard against children of vfork(). 6657c478bd9Sstevel@tonic-gate */ 6667c478bd9Sstevel@tonic-gate if (curthread->ul_vfork) 667bdf0047cSRoger A. Faulkner return (__sigprocmask(how, set, oset)); 6687c478bd9Sstevel@tonic-gate 6697257d1b4Sraf if ((error = thr_sigsetmask(how, set, oset)) != 0) { 6707c478bd9Sstevel@tonic-gate errno = error; 6717c478bd9Sstevel@tonic-gate return (-1); 6727c478bd9Sstevel@tonic-gate } 6737c478bd9Sstevel@tonic-gate 6747c478bd9Sstevel@tonic-gate return (0); 6757c478bd9Sstevel@tonic-gate } 6767c478bd9Sstevel@tonic-gate 6777c478bd9Sstevel@tonic-gate /* 6787c478bd9Sstevel@tonic-gate * Called at library initialization to set up signal handling. 67941efec22Sraf * All we really do is initialize the sig_lock rwlocks. 6807c478bd9Sstevel@tonic-gate * All signal handlers are either SIG_DFL or SIG_IGN on exec(). 6817c478bd9Sstevel@tonic-gate * However, if any signal handlers were established on alternate 6827c478bd9Sstevel@tonic-gate * link maps before the primary link map has been initialized, 6837c478bd9Sstevel@tonic-gate * then inform the kernel of the new sigacthandler. 6847c478bd9Sstevel@tonic-gate */ 6857c478bd9Sstevel@tonic-gate void 6867c478bd9Sstevel@tonic-gate signal_init() 6877c478bd9Sstevel@tonic-gate { 6887c478bd9Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 6897c478bd9Sstevel@tonic-gate struct sigaction *sap; 6907c478bd9Sstevel@tonic-gate struct sigaction act; 69141efec22Sraf rwlock_t *rwlp; 6927c478bd9Sstevel@tonic-gate int sig; 6937c478bd9Sstevel@tonic-gate 6947c478bd9Sstevel@tonic-gate for (sig = 0; sig < NSIG; sig++) { 69541efec22Sraf rwlp = &udp->siguaction[sig].sig_lock; 69641efec22Sraf rwlp->rwlock_magic = RWL_MAGIC; 69741efec22Sraf rwlp->mutex.mutex_flag = LOCK_INITED; 69841efec22Sraf rwlp->mutex.mutex_magic = MUTEX_MAGIC; 6997c478bd9Sstevel@tonic-gate sap = &udp->siguaction[sig].sig_uaction; 7007c478bd9Sstevel@tonic-gate if (sap->sa_sigaction != SIG_DFL && 7017c478bd9Sstevel@tonic-gate sap->sa_sigaction != SIG_IGN && 7027c478bd9Sstevel@tonic-gate __sigaction(sig, NULL, &act) == 0 && 7037c478bd9Sstevel@tonic-gate act.sa_sigaction != SIG_DFL && 7047c478bd9Sstevel@tonic-gate act.sa_sigaction != SIG_IGN) { 7057c478bd9Sstevel@tonic-gate act = *sap; 7067c478bd9Sstevel@tonic-gate act.sa_flags &= ~SA_NODEFER; 7077c478bd9Sstevel@tonic-gate act.sa_sigaction = udp->sigacthandler; 7087c478bd9Sstevel@tonic-gate act.sa_mask = maskset; 7097c478bd9Sstevel@tonic-gate (void) __sigaction(sig, &act, NULL); 7107c478bd9Sstevel@tonic-gate } 7117c478bd9Sstevel@tonic-gate } 7127c478bd9Sstevel@tonic-gate } 7137c478bd9Sstevel@tonic-gate 7147c478bd9Sstevel@tonic-gate /* 7157c478bd9Sstevel@tonic-gate * Common code for cancelling self in _sigcancel() and pthread_cancel(). 716a574db85Sraf * First record the fact that a cancellation is pending. 717a574db85Sraf * Then, if cancellation is disabled or if we are holding unprotected 718a574db85Sraf * libc locks, just return to defer the cancellation. 719a574db85Sraf * Then, if we are at a cancellation point (ul_cancelable) just 720a574db85Sraf * return and let _canceloff() do the exit. 721a574db85Sraf * Else exit immediately if async mode is in effect. 7227c478bd9Sstevel@tonic-gate */ 7237c478bd9Sstevel@tonic-gate void 724a574db85Sraf do_sigcancel(void) 7257c478bd9Sstevel@tonic-gate { 7267c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 7277c478bd9Sstevel@tonic-gate 7287c478bd9Sstevel@tonic-gate ASSERT(self->ul_critical == 0); 7297c478bd9Sstevel@tonic-gate ASSERT(self->ul_sigdefer == 0); 7307c478bd9Sstevel@tonic-gate self->ul_cancel_pending = 1; 7317c478bd9Sstevel@tonic-gate if (self->ul_cancel_async && 7327c478bd9Sstevel@tonic-gate !self->ul_cancel_disabled && 733a574db85Sraf self->ul_libc_locks == 0 && 7347c478bd9Sstevel@tonic-gate !self->ul_cancelable) 7357257d1b4Sraf pthread_exit(PTHREAD_CANCELED); 736a574db85Sraf set_cancel_pending_flag(self, 0); 7377c478bd9Sstevel@tonic-gate } 7387c478bd9Sstevel@tonic-gate 7397c478bd9Sstevel@tonic-gate /* 740f841f6adSraf * Set up the SIGCANCEL handler for threads cancellation, 741f841f6adSraf * needed only when we have more than one thread, 742f841f6adSraf * or the SIGAIOCANCEL handler for aio cancellation, 743f841f6adSraf * called when aio is initialized, in __uaio_init(). 7447c478bd9Sstevel@tonic-gate */ 7457c478bd9Sstevel@tonic-gate void 746f841f6adSraf setup_cancelsig(int sig) 7477c478bd9Sstevel@tonic-gate { 7487c478bd9Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 74941efec22Sraf rwlock_t *rwlp = &udp->siguaction[sig].sig_lock; 7507c478bd9Sstevel@tonic-gate struct sigaction act; 7517c478bd9Sstevel@tonic-gate 752f841f6adSraf ASSERT(sig == SIGCANCEL || sig == SIGAIOCANCEL); 75341efec22Sraf lrw_rdlock(rwlp); 754f841f6adSraf act = udp->siguaction[sig].sig_uaction; 75541efec22Sraf lrw_unlock(rwlp); 7567c478bd9Sstevel@tonic-gate if (act.sa_sigaction == SIG_DFL || 7577c478bd9Sstevel@tonic-gate act.sa_sigaction == SIG_IGN) 7587c478bd9Sstevel@tonic-gate act.sa_flags = SA_SIGINFO; 7597c478bd9Sstevel@tonic-gate else { 7607c478bd9Sstevel@tonic-gate act.sa_flags |= SA_SIGINFO; 761a574db85Sraf act.sa_flags &= ~(SA_NODEFER | SA_RESETHAND | SA_RESTART); 7627c478bd9Sstevel@tonic-gate } 7637c478bd9Sstevel@tonic-gate act.sa_sigaction = udp->sigacthandler; 7647c478bd9Sstevel@tonic-gate act.sa_mask = maskset; 765f841f6adSraf (void) __sigaction(sig, &act, NULL); 7667c478bd9Sstevel@tonic-gate } 767