1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 28 /* All Rights Reserved */ 29 30 31 #pragma ident "%Z%%M% %I% %E% SMI" 32 33 #include <sys/param.h> 34 #include <sys/types.h> 35 #include <sys/bitmap.h> 36 #include <sys/sysmacros.h> 37 #include <sys/systm.h> 38 #include <sys/cred.h> 39 #include <sys/user.h> 40 #include <sys/errno.h> 41 #include <sys/proc.h> 42 #include <sys/poll_impl.h> /* only needed for kludge in sigwaiting_send() */ 43 #include <sys/signal.h> 44 #include <sys/siginfo.h> 45 #include <sys/fault.h> 46 #include <sys/ucontext.h> 47 #include <sys/procfs.h> 48 #include <sys/wait.h> 49 #include <sys/class.h> 50 #include <sys/mman.h> 51 #include <sys/procset.h> 52 #include <sys/kmem.h> 53 #include <sys/cpuvar.h> 54 #include <sys/prsystm.h> 55 #include <sys/debug.h> 56 #include <vm/as.h> 57 #include <sys/bitmap.h> 58 #include <c2/audit.h> 59 #include <sys/core.h> 60 #include <sys/schedctl.h> 61 #include <sys/contract/process_impl.h> 62 #include <sys/dtrace.h> 63 #include <sys/sdt.h> 64 65 /* MUST be contiguous */ 66 k_sigset_t nullsmask = {0, 0}; 67 68 k_sigset_t fillset = {FILLSET0, FILLSET1}; 69 70 k_sigset_t cantmask = {CANTMASK0, CANTMASK1}; 71 72 k_sigset_t cantreset = {(sigmask(SIGILL)|sigmask(SIGTRAP)|sigmask(SIGPWR)), 0}; 73 74 k_sigset_t ignoredefault = {(sigmask(SIGCONT)|sigmask(SIGCLD)|sigmask(SIGPWR) 75 |sigmask(SIGWINCH)|sigmask(SIGURG)|sigmask(SIGWAITING)), 76 (sigmask(SIGLWP)|sigmask(SIGCANCEL)|sigmask(SIGFREEZE) 77 |sigmask(SIGTHAW)|sigmask(SIGXRES)|sigmask(SIGJVM1) 78 |sigmask(SIGJVM2))}; 79 80 k_sigset_t stopdefault = {(sigmask(SIGSTOP)|sigmask(SIGTSTP) 81 |sigmask(SIGTTOU)|sigmask(SIGTTIN)), 0}; 82 83 k_sigset_t coredefault = {(sigmask(SIGQUIT)|sigmask(SIGILL)|sigmask(SIGTRAP) 84 |sigmask(SIGIOT)|sigmask(SIGEMT)|sigmask(SIGFPE) 85 |sigmask(SIGBUS)|sigmask(SIGSEGV)|sigmask(SIGSYS) 86 |sigmask(SIGXCPU)|sigmask(SIGXFSZ)), 0}; 87 88 k_sigset_t holdvfork = {(sigmask(SIGTTOU)|sigmask(SIGTTIN)|sigmask(SIGTSTP)), 89 0}; 90 91 static int isjobstop(int); 92 static void post_sigcld(proc_t *, sigqueue_t *); 93 94 /* 95 * Internal variables for counting number of user thread stop requests posted. 96 * They may not be accurate at some special situation such as that a virtually 97 * stopped thread starts to run. 98 */ 99 static int num_utstop; 100 /* 101 * Internal variables for broadcasting an event when all thread stop requests 102 * are processed. 103 */ 104 static kcondvar_t utstop_cv; 105 106 static kmutex_t thread_stop_lock; 107 void del_one_utstop(void); 108 109 /* 110 * Send the specified signal to the specified process. 111 */ 112 void 113 psignal(proc_t *p, int sig) 114 { 115 mutex_enter(&p->p_lock); 116 sigtoproc(p, NULL, sig); 117 mutex_exit(&p->p_lock); 118 } 119 120 /* 121 * Send the specified signal to the specified thread. 122 */ 123 void 124 tsignal(kthread_t *t, int sig) 125 { 126 proc_t *p = ttoproc(t); 127 128 mutex_enter(&p->p_lock); 129 sigtoproc(p, t, sig); 130 mutex_exit(&p->p_lock); 131 } 132 133 int 134 signal_is_blocked(kthread_t *t, int sig) 135 { 136 return (sigismember(&t->t_hold, sig) || 137 (schedctl_sigblock(t) && !sigismember(&cantmask, sig))); 138 } 139 140 /* 141 * Return true if the signal can safely be discarded on generation. 142 * That is, if there is no need for the signal on the receiving end. 143 * The answer is true if the process is a zombie or 144 * if all of these conditions are true: 145 * the signal is being ignored 146 * the process is single-threaded 147 * the signal is not being traced by /proc 148 * the signal is not blocked by the process 149 */ 150 static int 151 sig_discardable(proc_t *p, int sig) 152 { 153 kthread_t *t = p->p_tlist; 154 155 return (t == NULL || /* if zombie or ... */ 156 (sigismember(&p->p_ignore, sig) && /* signal is ignored */ 157 t->t_forw == t && /* and single-threaded */ 158 !tracing(p, sig) && /* and no /proc tracing */ 159 !signal_is_blocked(t, sig))); /* and signal not blocked */ 160 } 161 162 /* 163 * Return true if this thread is going to eat this signal soon. 164 */ 165 int 166 eat_signal(kthread_t *t, int sig) 167 { 168 int rval = 0; 169 ASSERT(THREAD_LOCK_HELD(t)); 170 171 /* 172 * Do not do anything if the target thread has the signal blocked. 173 */ 174 if (!signal_is_blocked(t, sig)) { 175 t->t_sig_check = 1; /* have thread do an issig */ 176 if (t->t_state == TS_SLEEP && (t->t_flag & T_WAKEABLE)) { 177 setrun_locked(t); 178 rval = 1; 179 } else if (t->t_state == TS_STOPPED && sig == SIGKILL) { 180 ttoproc(t)->p_stopsig = 0; 181 t->t_dtrace_stop = 0; 182 t->t_schedflag |= TS_XSTART | TS_PSTART; 183 setrun_locked(t); 184 } else if (t != curthread && t->t_state == TS_ONPROC) { 185 aston(t); /* make it do issig promptly */ 186 if (t->t_cpu != CPU) 187 poke_cpu(t->t_cpu->cpu_id); 188 rval = 1; 189 } else if (t->t_state == TS_RUN) { 190 rval = 1; 191 } 192 } 193 194 return (rval); 195 } 196 197 /* 198 * Post a signal. 199 * If a non-null thread pointer is passed, then post the signal 200 * to the thread/lwp, otherwise post the signal to the process. 201 */ 202 void 203 sigtoproc(proc_t *p, kthread_t *t, int sig) 204 { 205 kthread_t *tt; 206 int ext = !(curproc->p_flag & SSYS) && 207 (curproc->p_ct_process != p->p_ct_process); 208 209 ASSERT(MUTEX_HELD(&p->p_lock)); 210 211 if (sig <= 0 || sig >= NSIG) 212 return; 213 214 /* 215 * Regardless of origin or directedness, 216 * SIGKILL kills all lwps in the process immediately 217 * and jobcontrol signals affect all lwps in the process. 218 */ 219 if (sig == SIGKILL) { 220 p->p_flag |= SKILLED | (ext ? SEXTKILLED : 0); 221 t = NULL; 222 } else if (sig == SIGCONT) { 223 /* 224 * The SSCONT flag will remain set until a stopping 225 * signal comes in (below). This is harmless. 226 */ 227 p->p_flag |= SSCONT; 228 sigdelq(p, NULL, SIGSTOP); 229 sigdelq(p, NULL, SIGTSTP); 230 sigdelq(p, NULL, SIGTTOU); 231 sigdelq(p, NULL, SIGTTIN); 232 sigdiffset(&p->p_sig, &stopdefault); 233 sigdiffset(&p->p_extsig, &stopdefault); 234 p->p_stopsig = 0; 235 if ((tt = p->p_tlist) != NULL) { 236 do { 237 sigdelq(p, tt, SIGSTOP); 238 sigdelq(p, tt, SIGTSTP); 239 sigdelq(p, tt, SIGTTOU); 240 sigdelq(p, tt, SIGTTIN); 241 sigdiffset(&tt->t_sig, &stopdefault); 242 sigdiffset(&tt->t_extsig, &stopdefault); 243 } while ((tt = tt->t_forw) != p->p_tlist); 244 } 245 if ((tt = p->p_tlist) != NULL) { 246 do { 247 thread_lock(tt); 248 if (tt->t_state == TS_STOPPED && 249 tt->t_whystop == PR_JOBCONTROL) { 250 tt->t_schedflag |= TS_XSTART; 251 setrun_locked(tt); 252 } 253 thread_unlock(tt); 254 } while ((tt = tt->t_forw) != p->p_tlist); 255 } 256 } else if (sigismember(&stopdefault, sig)) { 257 /* 258 * This test has a race condition which we can't fix: 259 * By the time the stopping signal is received by 260 * the target process/thread, the signal handler 261 * and/or the detached state might have changed. 262 */ 263 if (PTOU(p)->u_signal[sig-1] == SIG_DFL && 264 (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned)) 265 p->p_flag &= ~SSCONT; 266 sigdelq(p, NULL, SIGCONT); 267 sigdelset(&p->p_sig, SIGCONT); 268 sigdelset(&p->p_extsig, SIGCONT); 269 if ((tt = p->p_tlist) != NULL) { 270 do { 271 sigdelq(p, tt, SIGCONT); 272 sigdelset(&tt->t_sig, SIGCONT); 273 sigdelset(&tt->t_extsig, SIGCONT); 274 } while ((tt = tt->t_forw) != p->p_tlist); 275 } 276 } 277 278 if (sig_discardable(p, sig)) { 279 DTRACE_PROC3(signal__discard, kthread_t *, p->p_tlist, 280 proc_t *, p, int, sig); 281 return; 282 } 283 284 if (t != NULL) { 285 /* 286 * This is a directed signal, wake up the lwp. 287 */ 288 sigaddset(&t->t_sig, sig); 289 if (ext) 290 sigaddset(&t->t_extsig, sig); 291 thread_lock(t); 292 (void) eat_signal(t, sig); 293 thread_unlock(t); 294 DTRACE_PROC2(signal__send, kthread_t *, t, int, sig); 295 } else if ((tt = p->p_tlist) != NULL) { 296 /* 297 * Make sure that some lwp that already exists 298 * in the process fields the signal soon. 299 * Wake up an interruptibly sleeping lwp if necessary. 300 */ 301 int su = 0; 302 303 sigaddset(&p->p_sig, sig); 304 if (ext) 305 sigaddset(&p->p_extsig, sig); 306 do { 307 thread_lock(tt); 308 if (eat_signal(tt, sig)) { 309 thread_unlock(tt); 310 break; 311 } 312 if (sig == SIGKILL && SUSPENDED(tt)) 313 su++; 314 thread_unlock(tt); 315 } while ((tt = tt->t_forw) != p->p_tlist); 316 /* 317 * If the process is deadlocked, make somebody run and die. 318 */ 319 if (sig == SIGKILL && p->p_stat != SIDL && 320 p->p_lwprcnt == 0 && p->p_lwpcnt == su) { 321 thread_lock(tt); 322 p->p_lwprcnt++; 323 tt->t_schedflag |= TS_CSTART; 324 setrun_locked(tt); 325 thread_unlock(tt); 326 } 327 328 DTRACE_PROC2(signal__send, kthread_t *, tt, int, sig); 329 } 330 } 331 332 static int 333 isjobstop(int sig) 334 { 335 proc_t *p = ttoproc(curthread); 336 337 ASSERT(MUTEX_HELD(&p->p_lock)); 338 339 if (u.u_signal[sig-1] == SIG_DFL && sigismember(&stopdefault, sig)) { 340 /* 341 * If SIGCONT has been posted since we promoted this signal 342 * from pending to current, then don't do a jobcontrol stop. 343 */ 344 if (!(p->p_flag & SSCONT) && 345 (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned) && 346 curthread != p->p_agenttp) { 347 sigqueue_t *sqp; 348 349 stop(PR_JOBCONTROL, sig); 350 mutex_exit(&p->p_lock); 351 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 352 mutex_enter(&pidlock); 353 /* 354 * Only the first lwp to continue notifies the parent. 355 */ 356 if (p->p_pidflag & CLDCONT) 357 siginfofree(sqp); 358 else { 359 p->p_pidflag |= CLDCONT; 360 p->p_wcode = CLD_CONTINUED; 361 p->p_wdata = SIGCONT; 362 sigcld(p, sqp); 363 } 364 mutex_exit(&pidlock); 365 mutex_enter(&p->p_lock); 366 } 367 return (1); 368 } 369 return (0); 370 } 371 372 /* 373 * Returns true if the current process has a signal to process, and 374 * the signal is not held. The signal to process is put in p_cursig. 375 * This is asked at least once each time a process enters the system 376 * (though this can usually be done without actually calling issig by 377 * checking the pending signal masks). A signal does not do anything 378 * directly to a process; it sets a flag that asks the process to do 379 * something to itself. 380 * 381 * The "why" argument indicates the allowable side-effects of the call: 382 * 383 * FORREAL: Extract the next pending signal from p_sig into p_cursig; 384 * stop the process if a stop has been requested or if a traced signal 385 * is pending. 386 * 387 * JUSTLOOKING: Don't stop the process, just indicate whether or not 388 * a signal might be pending (FORREAL is needed to tell for sure). 389 * 390 * XXX: Changes to the logic in these routines should be propagated 391 * to lm_sigispending(). See bug 1201594. 392 */ 393 394 static int issig_forreal(void); 395 static int issig_justlooking(void); 396 397 int 398 issig(int why) 399 { 400 ASSERT(why == FORREAL || why == JUSTLOOKING); 401 402 return ((why == FORREAL)? issig_forreal() : issig_justlooking()); 403 } 404 405 406 static int 407 issig_justlooking(void) 408 { 409 kthread_t *t = curthread; 410 klwp_t *lwp = ttolwp(t); 411 proc_t *p = ttoproc(t); 412 k_sigset_t set; 413 414 /* 415 * This function answers the question: 416 * "Is there any reason to call issig_forreal()?" 417 * 418 * We have to answer the question w/o grabbing any locks 419 * because we are (most likely) being called after we 420 * put ourselves on the sleep queue. 421 */ 422 423 if (t->t_dtrace_stop | t->t_dtrace_sig) 424 return (1); 425 426 /* 427 * Another piece of complexity in this process. When single-stepping a 428 * process, we don't want an intervening signal or TP_PAUSE request to 429 * suspend the current thread. Otherwise, the controlling process will 430 * hang beacuse we will be stopped with TS_PSTART set in t_schedflag. 431 * We will trigger any remaining signals when we re-enter the kernel on 432 * the single step trap. 433 */ 434 if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP) 435 return (0); 436 437 if ((lwp->lwp_asleep && MUSTRETURN(p, t)) || 438 (p->p_flag & (SEXITLWPS|SKILLED)) || 439 (!lwp->lwp_nostop_r && ((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) | 440 (t->t_proc_flag & TP_HOLDLWP))) || 441 (!lwp->lwp_nostop && (p->p_stopsig | (t->t_proc_flag & 442 (TP_PRSTOP|TP_CHKPT|TP_PAUSE)))) || 443 lwp->lwp_cursig) 444 return (1); 445 446 if (p->p_flag & SVFWAIT) 447 return (0); 448 set = p->p_sig; 449 sigorset(&set, &t->t_sig); 450 if (schedctl_sigblock(t)) /* all blockable signals blocked */ 451 sigandset(&set, &cantmask); 452 else 453 sigdiffset(&set, &t->t_hold); 454 if (p->p_flag & SVFORK) 455 sigdiffset(&set, &holdvfork); 456 457 if (!sigisempty(&set)) { 458 int sig; 459 460 for (sig = 1; sig < NSIG; sig++) { 461 if (sigismember(&set, sig) && 462 (tracing(p, sig) || 463 !sigismember(&p->p_ignore, sig))) { 464 /* 465 * Don't promote a signal that will stop 466 * the process when lwp_nostop is set. 467 */ 468 if (!lwp->lwp_nostop || 469 u.u_signal[sig-1] != SIG_DFL || 470 !sigismember(&stopdefault, sig)) 471 return (1); 472 } 473 } 474 } 475 476 return (0); 477 } 478 479 static int 480 issig_forreal(void) 481 { 482 int sig = 0, ext = 0; 483 kthread_t *t = curthread; 484 klwp_t *lwp = ttolwp(t); 485 proc_t *p = ttoproc(t); 486 int toproc = 0; 487 int sigcld_found = 0; 488 int nostop_break = 0; 489 490 ASSERT(t->t_state == TS_ONPROC); 491 492 mutex_enter(&p->p_lock); 493 schedctl_finish_sigblock(t); 494 495 if (t->t_dtrace_stop | t->t_dtrace_sig) { 496 if (t->t_dtrace_stop) { 497 /* 498 * If DTrace's "stop" action has been invoked on us, 499 * set TP_PRSTOP. 500 */ 501 t->t_proc_flag |= TP_PRSTOP; 502 } 503 504 if (t->t_dtrace_sig != 0) { 505 k_siginfo_t info; 506 507 /* 508 * Post the signal generated as the result of 509 * DTrace's "raise" action as a normal signal before 510 * the full-fledged signal checking begins. 511 */ 512 bzero(&info, sizeof (info)); 513 info.si_signo = t->t_dtrace_sig; 514 info.si_code = SI_DTRACE; 515 516 sigaddq(p, NULL, &info, KM_NOSLEEP); 517 518 t->t_dtrace_sig = 0; 519 } 520 } 521 522 for (;;) { 523 if (p->p_flag & (SEXITLWPS|SKILLED)) { 524 lwp->lwp_cursig = sig = SIGKILL; 525 lwp->lwp_extsig = ext = (p->p_flag & SEXTKILLED) != 0; 526 break; 527 } 528 529 /* 530 * Another piece of complexity in this process. When 531 * single-stepping a process, we don't want an intervening 532 * signal or TP_PAUSE request to suspend the current thread. 533 * Otherwise, the controlling process will hang beacuse we will 534 * be stopped with TS_PSTART set in t_schedflag. We will 535 * trigger any remaining signals when we re-enter the kernel on 536 * the single step trap. 537 */ 538 if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP) { 539 sig = 0; 540 break; 541 } 542 543 /* 544 * Hold the lwp here for watchpoint manipulation. 545 */ 546 if ((t->t_proc_flag & TP_PAUSE) && !lwp->lwp_nostop) { 547 stop(PR_SUSPENDED, SUSPEND_PAUSE); 548 continue; 549 } 550 551 if (lwp->lwp_asleep && MUSTRETURN(p, t)) { 552 if ((sig = lwp->lwp_cursig) != 0) { 553 /* 554 * Make sure we call ISSIG() in post_syscall() 555 * to re-validate this current signal. 556 */ 557 t->t_sig_check = 1; 558 } 559 break; 560 } 561 562 /* 563 * If the request is PR_CHECKPOINT, ignore the rest of signals 564 * or requests. Honor other stop requests or signals later. 565 * Go back to top of loop here to check if an exit or hold 566 * event has occurred while stopped. 567 */ 568 if ((t->t_proc_flag & TP_CHKPT) && !lwp->lwp_nostop) { 569 stop(PR_CHECKPOINT, 0); 570 continue; 571 } 572 573 /* 574 * Honor SHOLDFORK1, SHOLDWATCH, and TP_HOLDLWP before dealing 575 * with signals or /proc. Another lwp is executing fork1(), 576 * or is undergoing watchpoint activity (remapping a page), 577 * or is executing lwp_suspend() on this lwp. 578 * Again, go back to top of loop to check if an exit 579 * or hold event has occurred while stopped. 580 * We explicitly allow this form of stopping of one 581 * lwp in a process by another lwp in the same process, 582 * even if lwp->lwp_nostop is set, because otherwise a 583 * process can become deadlocked on a fork1(). 584 * Allow this only if lwp_nostop_r is not set, 585 * to avoid a recursive call to prstop(). 586 */ 587 if (((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) || 588 (t->t_proc_flag & TP_HOLDLWP)) && !lwp->lwp_nostop_r) { 589 stop(PR_SUSPENDED, SUSPEND_NORMAL); 590 continue; 591 } 592 593 /* 594 * Honor requested stop before dealing with the 595 * current signal; a debugger may change it. 596 * Do not want to go back to loop here since this is a special 597 * stop that means: make incremental progress before the next 598 * stop. The danger is that returning to top of loop would most 599 * likely drop the thread right back here to stop soon after it 600 * was continued, violating the incremental progress request. 601 */ 602 if ((t->t_proc_flag & TP_PRSTOP) && !lwp->lwp_nostop) 603 stop(PR_REQUESTED, 0); 604 605 /* 606 * If a debugger wants us to take a signal it will have 607 * left it in lwp->lwp_cursig. If lwp_cursig has been cleared 608 * or if it's being ignored, we continue on looking for another 609 * signal. Otherwise we return the specified signal, provided 610 * it's not a signal that causes a job control stop. 611 * 612 * When stopped on PR_JOBCONTROL, there is no current 613 * signal; we cancel lwp->lwp_cursig temporarily before 614 * calling isjobstop(). The current signal may be reset 615 * by a debugger while we are stopped in isjobstop(). 616 */ 617 if ((sig = lwp->lwp_cursig) != 0) { 618 ext = lwp->lwp_extsig; 619 lwp->lwp_cursig = 0; 620 lwp->lwp_extsig = 0; 621 if (!sigismember(&p->p_ignore, sig) && 622 !isjobstop(sig)) { 623 if (p->p_flag & (SEXITLWPS|SKILLED)) { 624 sig = SIGKILL; 625 ext = (p->p_flag & SEXTKILLED) != 0; 626 } 627 lwp->lwp_cursig = (uchar_t)sig; 628 lwp->lwp_extsig = (uchar_t)ext; 629 break; 630 } 631 /* 632 * The signal is being ignored or it caused a 633 * job-control stop. If another current signal 634 * has not been established, return the current 635 * siginfo, if any, to the memory manager. 636 */ 637 if (lwp->lwp_cursig == 0 && lwp->lwp_curinfo != NULL) { 638 siginfofree(lwp->lwp_curinfo); 639 lwp->lwp_curinfo = NULL; 640 } 641 /* 642 * Loop around again in case we were stopped 643 * on a job control signal and a /proc stop 644 * request was posted or another current signal 645 * was established while we were stopped. 646 */ 647 continue; 648 } 649 650 if (p->p_stopsig && !lwp->lwp_nostop && 651 curthread != p->p_agenttp) { 652 /* 653 * Some lwp in the process has already stopped 654 * showing PR_JOBCONTROL. This is a stop in 655 * sympathy with the other lwp, even if this 656 * lwp is blocking the stopping signal. 657 */ 658 stop(PR_JOBCONTROL, p->p_stopsig); 659 continue; 660 } 661 662 /* 663 * Loop on the pending signals until we find a 664 * non-held signal that is traced or not ignored. 665 * First check the signals pending for the lwp, 666 * then the signals pending for the process as a whole. 667 */ 668 for (;;) { 669 k_sigset_t tsig; 670 671 tsig = t->t_sig; 672 if ((sig = fsig(&tsig, t)) != 0) { 673 if (sig == SIGCLD) 674 sigcld_found = 1; 675 toproc = 0; 676 if (tracing(p, sig) || 677 !sigismember(&p->p_ignore, sig)) { 678 if (sigismember(&t->t_extsig, sig)) 679 ext = 1; 680 break; 681 } 682 sigdelset(&t->t_sig, sig); 683 sigdelset(&t->t_extsig, sig); 684 sigdelq(p, t, sig); 685 } else if ((sig = fsig(&p->p_sig, t)) != 0) { 686 if (sig == SIGCLD) 687 sigcld_found = 1; 688 toproc = 1; 689 if (tracing(p, sig) || 690 !sigismember(&p->p_ignore, sig)) { 691 if (sigismember(&p->p_extsig, sig)) 692 ext = 1; 693 break; 694 } 695 sigdelset(&p->p_sig, sig); 696 sigdelset(&p->p_extsig, sig); 697 sigdelq(p, NULL, sig); 698 } else { 699 /* no signal was found */ 700 break; 701 } 702 } 703 704 if (sig == 0) { /* no signal was found */ 705 if (p->p_flag & (SEXITLWPS|SKILLED)) { 706 lwp->lwp_cursig = SIGKILL; 707 sig = SIGKILL; 708 ext = (p->p_flag & SEXTKILLED) != 0; 709 } 710 break; 711 } 712 713 /* 714 * If we have been informed not to stop (i.e., we are being 715 * called from within a network operation), then don't promote 716 * the signal at this time, just return the signal number. 717 * We will call issig() again later when it is safe. 718 * 719 * fsig() does not return a jobcontrol stopping signal 720 * with a default action of stopping the process if 721 * lwp_nostop is set, so we won't be causing a bogus 722 * EINTR by this action. (Such a signal is eaten by 723 * isjobstop() when we loop around to do final checks.) 724 */ 725 if (lwp->lwp_nostop) { 726 nostop_break = 1; 727 break; 728 } 729 730 /* 731 * Promote the signal from pending to current. 732 * 733 * Note that sigdeq() will set lwp->lwp_curinfo to NULL 734 * if no siginfo_t exists for this signal. 735 */ 736 lwp->lwp_cursig = (uchar_t)sig; 737 lwp->lwp_extsig = (uchar_t)ext; 738 t->t_sig_check = 1; /* so post_syscall will see signal */ 739 ASSERT(lwp->lwp_curinfo == NULL); 740 sigdeq(p, toproc ? NULL : t, sig, &lwp->lwp_curinfo); 741 742 if (tracing(p, sig)) 743 stop(PR_SIGNALLED, sig); 744 745 /* 746 * Loop around to check for requested stop before 747 * performing the usual current-signal actions. 748 */ 749 } 750 751 mutex_exit(&p->p_lock); 752 753 /* 754 * If SIGCLD was dequeued, search for other pending SIGCLD's. 755 * Don't do it if we are returning SIGCLD and the signal 756 * handler will be reset by psig(); this enables reliable 757 * delivery of SIGCLD even when using the old, broken 758 * signal() interface for setting the signal handler. 759 */ 760 if (sigcld_found && 761 (sig != SIGCLD || !sigismember(&u.u_sigresethand, SIGCLD))) 762 sigcld_repost(); 763 764 if (sig != 0) 765 (void) undo_watch_step(NULL); 766 767 /* 768 * If we have been blocked since the p_lock was dropped off 769 * above, then this promoted signal might have been handled 770 * already when we were on the way back from sleep queue, so 771 * just ignore it. 772 * If we have been informed not to stop, just return the signal 773 * number. Also see comments above. 774 */ 775 if (!nostop_break) { 776 sig = lwp->lwp_cursig; 777 } 778 779 return (sig != 0); 780 } 781 782 /* 783 * Return true if the process is currently stopped showing PR_JOBCONTROL. 784 * This is true only if all of the process's lwp's are so stopped. 785 * If this is asked by one of the lwps in the process, exclude that lwp. 786 */ 787 int 788 jobstopped(proc_t *p) 789 { 790 kthread_t *t; 791 792 ASSERT(MUTEX_HELD(&p->p_lock)); 793 794 if ((t = p->p_tlist) == NULL) 795 return (0); 796 797 do { 798 thread_lock(t); 799 /* ignore current, zombie and suspended lwps in the test */ 800 if (!(t == curthread || t->t_state == TS_ZOMB || 801 SUSPENDED(t)) && 802 (t->t_state != TS_STOPPED || 803 t->t_whystop != PR_JOBCONTROL)) { 804 thread_unlock(t); 805 return (0); 806 } 807 thread_unlock(t); 808 } while ((t = t->t_forw) != p->p_tlist); 809 810 return (1); 811 } 812 813 /* 814 * Put ourself (curthread) into the stopped state and notify tracers. 815 */ 816 void 817 stop(int why, int what) 818 { 819 kthread_t *t = curthread; 820 proc_t *p = ttoproc(t); 821 klwp_t *lwp = ttolwp(t); 822 kthread_t *tx; 823 lwpent_t *lep; 824 int procstop; 825 int flags = TS_ALLSTART; 826 hrtime_t stoptime; 827 828 /* 829 * Can't stop a system process. 830 */ 831 if (p == NULL || lwp == NULL || (p->p_flag & SSYS) || p->p_as == &kas) 832 return; 833 834 ASSERT(MUTEX_HELD(&p->p_lock)); 835 836 if (why != PR_SUSPENDED && why != PR_CHECKPOINT) { 837 /* 838 * Don't stop an lwp with SIGKILL pending. 839 * Don't stop if the process or lwp is exiting. 840 */ 841 if (lwp->lwp_cursig == SIGKILL || 842 sigismember(&t->t_sig, SIGKILL) || 843 sigismember(&p->p_sig, SIGKILL) || 844 (t->t_proc_flag & TP_LWPEXIT) || 845 (p->p_flag & (SEXITLWPS|SKILLED))) { 846 p->p_stopsig = 0; 847 t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP); 848 return; 849 } 850 } 851 852 /* 853 * Make sure we don't deadlock on a recursive call to prstop(). 854 * prstop() sets the lwp_nostop_r flag and increments lwp_nostop. 855 */ 856 if (lwp->lwp_nostop_r || 857 (lwp->lwp_nostop && 858 (why != PR_SUSPENDED || what != SUSPEND_NORMAL))) 859 return; 860 861 /* 862 * Make sure the lwp is in an orderly state for inspection 863 * by a debugger through /proc or for dumping via core(). 864 */ 865 schedctl_finish_sigblock(t); 866 t->t_proc_flag |= TP_STOPPING; /* must set before dropping p_lock */ 867 mutex_exit(&p->p_lock); 868 stoptime = gethrtime(); 869 prstop(why, what); 870 (void) undo_watch_step(NULL); 871 mutex_enter(&p->p_lock); 872 ASSERT(t->t_state == TS_ONPROC); 873 874 switch (why) { 875 case PR_CHECKPOINT: 876 /* 877 * The situation may have changed since we dropped 878 * and reacquired p->p_lock. Double-check now 879 * whether we should stop or not. 880 */ 881 if (!(t->t_proc_flag & TP_CHKPT)) { 882 t->t_proc_flag &= ~TP_STOPPING; 883 return; 884 } 885 t->t_proc_flag &= ~TP_CHKPT; 886 flags &= ~TS_RESUME; 887 break; 888 889 case PR_JOBCONTROL: 890 ASSERT(what == SIGSTOP || what == SIGTSTP || 891 what == SIGTTIN || what == SIGTTOU); 892 flags &= ~TS_XSTART; 893 break; 894 895 case PR_SUSPENDED: 896 ASSERT(what == SUSPEND_NORMAL || what == SUSPEND_PAUSE); 897 /* 898 * The situation may have changed since we dropped 899 * and reacquired p->p_lock. Double-check now 900 * whether we should stop or not. 901 */ 902 if (what == SUSPEND_PAUSE) { 903 if (!(t->t_proc_flag & TP_PAUSE)) { 904 t->t_proc_flag &= ~TP_STOPPING; 905 return; 906 } 907 flags &= ~TS_UNPAUSE; 908 } else { 909 if (!((t->t_proc_flag & TP_HOLDLWP) || 910 (p->p_flag & (SHOLDFORK|SHOLDFORK1|SHOLDWATCH)))) { 911 t->t_proc_flag &= ~TP_STOPPING; 912 return; 913 } 914 /* 915 * If SHOLDFORK is in effect and we are stopping 916 * while asleep (not at the top of the stack), 917 * we return now to allow the hold to take effect 918 * when we reach the top of the kernel stack. 919 */ 920 if (lwp->lwp_asleep && (p->p_flag & SHOLDFORK)) { 921 t->t_proc_flag &= ~TP_STOPPING; 922 return; 923 } 924 flags &= ~TS_CSTART; 925 } 926 break; 927 928 default: /* /proc stop */ 929 flags &= ~TS_PSTART; 930 /* 931 * Do synchronous stop unless the async-stop flag is set. 932 * If why is PR_REQUESTED and t->t_dtrace_stop flag is set, 933 * then no debugger is present and we also do synchronous stop. 934 */ 935 if ((why != PR_REQUESTED || t->t_dtrace_stop) && 936 !(p->p_proc_flag & P_PR_ASYNC)) { 937 int notify; 938 939 for (tx = t->t_forw; tx != t; tx = tx->t_forw) { 940 notify = 0; 941 thread_lock(tx); 942 if (ISTOPPED(tx) || 943 (tx->t_proc_flag & TP_PRSTOP)) { 944 thread_unlock(tx); 945 continue; 946 } 947 tx->t_proc_flag |= TP_PRSTOP; 948 tx->t_sig_check = 1; 949 if (tx->t_state == TS_SLEEP && 950 (tx->t_flag & T_WAKEABLE)) { 951 /* 952 * Don't actually wake it up if it's 953 * in one of the lwp_*() syscalls. 954 * Mark it virtually stopped and 955 * notify /proc waiters (below). 956 */ 957 if (tx->t_wchan0 == NULL) 958 setrun_locked(tx); 959 else { 960 tx->t_proc_flag |= TP_PRVSTOP; 961 tx->t_stoptime = stoptime; 962 notify = 1; 963 } 964 } 965 /* 966 * force the thread into the kernel 967 * if it is not already there. 968 */ 969 if (tx->t_state == TS_ONPROC && 970 tx->t_cpu != CPU) 971 poke_cpu(tx->t_cpu->cpu_id); 972 thread_unlock(tx); 973 lep = p->p_lwpdir[tx->t_dslot].ld_entry; 974 if (notify && lep->le_trace) 975 prnotify(lep->le_trace); 976 } 977 /* 978 * We do this just in case one of the threads we asked 979 * to stop is in holdlwps() (called from cfork()) or 980 * lwp_suspend(). 981 */ 982 cv_broadcast(&p->p_holdlwps); 983 } 984 break; 985 } 986 987 t->t_stoptime = stoptime; 988 989 if (why == PR_JOBCONTROL || (why == PR_SUSPENDED && p->p_stopsig)) { 990 /* 991 * Determine if the whole process is jobstopped. 992 */ 993 if (jobstopped(p)) { 994 sigqueue_t *sqp; 995 int sig; 996 997 if ((sig = p->p_stopsig) == 0) 998 p->p_stopsig = (uchar_t)(sig = what); 999 mutex_exit(&p->p_lock); 1000 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 1001 mutex_enter(&pidlock); 1002 /* 1003 * The last lwp to stop notifies the parent. 1004 * Turn off the CLDCONT flag now so the first 1005 * lwp to continue knows what to do. 1006 */ 1007 p->p_pidflag &= ~CLDCONT; 1008 p->p_wcode = CLD_STOPPED; 1009 p->p_wdata = sig; 1010 sigcld(p, sqp); 1011 /* 1012 * Grab p->p_lock before releasing pidlock so the 1013 * parent and the child don't have a race condition. 1014 */ 1015 mutex_enter(&p->p_lock); 1016 mutex_exit(&pidlock); 1017 p->p_stopsig = 0; 1018 } else if (why == PR_JOBCONTROL && p->p_stopsig == 0) { 1019 /* 1020 * Set p->p_stopsig and wake up sleeping lwps 1021 * so they will stop in sympathy with this lwp. 1022 */ 1023 p->p_stopsig = (uchar_t)what; 1024 pokelwps(p); 1025 /* 1026 * We do this just in case one of the threads we asked 1027 * to stop is in holdlwps() (called from cfork()) or 1028 * lwp_suspend(). 1029 */ 1030 cv_broadcast(&p->p_holdlwps); 1031 } 1032 } 1033 1034 if (why != PR_JOBCONTROL && why != PR_CHECKPOINT) { 1035 /* 1036 * Do process-level notification when all lwps are 1037 * either stopped on events of interest to /proc 1038 * or are stopped showing PR_SUSPENDED or are zombies. 1039 */ 1040 procstop = 1; 1041 for (tx = t->t_forw; procstop && tx != t; tx = tx->t_forw) { 1042 if (VSTOPPED(tx)) 1043 continue; 1044 thread_lock(tx); 1045 switch (tx->t_state) { 1046 case TS_ZOMB: 1047 break; 1048 case TS_STOPPED: 1049 /* neither ISTOPPED nor SUSPENDED? */ 1050 if ((tx->t_schedflag & 1051 (TS_CSTART | TS_UNPAUSE | TS_PSTART)) == 1052 (TS_CSTART | TS_UNPAUSE | TS_PSTART)) 1053 procstop = 0; 1054 break; 1055 case TS_SLEEP: 1056 /* not paused for watchpoints? */ 1057 if (!(tx->t_flag & T_WAKEABLE) || 1058 tx->t_wchan0 == NULL || 1059 !(tx->t_proc_flag & TP_PAUSE)) 1060 procstop = 0; 1061 break; 1062 default: 1063 procstop = 0; 1064 break; 1065 } 1066 thread_unlock(tx); 1067 } 1068 if (procstop) { 1069 /* there must not be any remapped watched pages now */ 1070 ASSERT(p->p_mapcnt == 0); 1071 if (p->p_proc_flag & P_PR_PTRACE) { 1072 /* ptrace() compatibility */ 1073 mutex_exit(&p->p_lock); 1074 mutex_enter(&pidlock); 1075 p->p_wcode = CLD_TRAPPED; 1076 p->p_wdata = (why == PR_SIGNALLED)? 1077 what : SIGTRAP; 1078 cv_broadcast(&p->p_parent->p_cv); 1079 /* 1080 * Grab p->p_lock before releasing pidlock so 1081 * parent and child don't have a race condition. 1082 */ 1083 mutex_enter(&p->p_lock); 1084 mutex_exit(&pidlock); 1085 } 1086 if (p->p_trace) /* /proc */ 1087 prnotify(p->p_trace); 1088 cv_broadcast(&pr_pid_cv[p->p_slot]); /* pauselwps() */ 1089 cv_broadcast(&p->p_holdlwps); /* holdwatch() */ 1090 } 1091 if (why != PR_SUSPENDED) { 1092 lep = p->p_lwpdir[t->t_dslot].ld_entry; 1093 if (lep->le_trace) /* /proc */ 1094 prnotify(lep->le_trace); 1095 /* 1096 * Special notification for creation of the agent lwp. 1097 */ 1098 if (t == p->p_agenttp && 1099 (t->t_proc_flag & TP_PRSTOP) && 1100 p->p_trace) 1101 prnotify(p->p_trace); 1102 /* 1103 * The situation may have changed since we dropped 1104 * and reacquired p->p_lock. Double-check now 1105 * whether we should stop or not. 1106 */ 1107 if (!(t->t_proc_flag & TP_STOPPING)) { 1108 if (t->t_proc_flag & TP_PRSTOP) 1109 t->t_proc_flag |= TP_STOPPING; 1110 } 1111 t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP); 1112 prnostep(lwp); 1113 } 1114 } 1115 1116 if (why == PR_SUSPENDED) { 1117 1118 /* 1119 * We always broadcast in the case of SUSPEND_PAUSE. This is 1120 * because checks for TP_PAUSE take precedence over checks for 1121 * SHOLDWATCH. If a thread is trying to stop because of 1122 * SUSPEND_PAUSE and tries to do a holdwatch(), it will be 1123 * waiting for the rest of the threads to enter a stopped state. 1124 * If we are stopping for a SUSPEND_PAUSE, we may be the last 1125 * lwp and not know it, so broadcast just in case. 1126 */ 1127 if (what == SUSPEND_PAUSE || 1128 --p->p_lwprcnt == 0 || (t->t_proc_flag & TP_HOLDLWP)) 1129 cv_broadcast(&p->p_holdlwps); 1130 1131 } 1132 1133 /* 1134 * Need to do this here (rather than after the thread is officially 1135 * stopped) because we can't call mutex_enter from a stopped thread. 1136 */ 1137 if (why == PR_CHECKPOINT) 1138 del_one_utstop(); 1139 1140 thread_lock(t); 1141 ASSERT((t->t_schedflag & TS_ALLSTART) == 0); 1142 t->t_schedflag |= flags; 1143 t->t_whystop = (short)why; 1144 t->t_whatstop = (short)what; 1145 CL_STOP(t, why, what); 1146 (void) new_mstate(t, LMS_STOPPED); 1147 thread_stop(t); /* set stop state and drop lock */ 1148 1149 if (why != PR_SUSPENDED && why != PR_CHECKPOINT) { 1150 /* 1151 * We may have gotten a SIGKILL or a SIGCONT when 1152 * we released p->p_lock; make one last check. 1153 * Also check for a /proc run-on-last-close. 1154 */ 1155 if (sigismember(&t->t_sig, SIGKILL) || 1156 sigismember(&p->p_sig, SIGKILL) || 1157 (t->t_proc_flag & TP_LWPEXIT) || 1158 (p->p_flag & (SEXITLWPS|SKILLED))) { 1159 p->p_stopsig = 0; 1160 thread_lock(t); 1161 t->t_schedflag |= TS_XSTART | TS_PSTART; 1162 setrun_locked(t); 1163 thread_unlock_nopreempt(t); 1164 } else if (why == PR_JOBCONTROL) { 1165 if (p->p_flag & SSCONT) { 1166 /* 1167 * This resulted from a SIGCONT posted 1168 * while we were not holding p->p_lock. 1169 */ 1170 p->p_stopsig = 0; 1171 thread_lock(t); 1172 t->t_schedflag |= TS_XSTART; 1173 setrun_locked(t); 1174 thread_unlock_nopreempt(t); 1175 } 1176 } else if (!(t->t_proc_flag & TP_STOPPING)) { 1177 /* 1178 * This resulted from a /proc run-on-last-close. 1179 */ 1180 thread_lock(t); 1181 t->t_schedflag |= TS_PSTART; 1182 setrun_locked(t); 1183 thread_unlock_nopreempt(t); 1184 } 1185 } 1186 1187 t->t_proc_flag &= ~TP_STOPPING; 1188 mutex_exit(&p->p_lock); 1189 1190 swtch(); 1191 setallwatch(); /* reestablish any watchpoints set while stopped */ 1192 mutex_enter(&p->p_lock); 1193 prbarrier(p); /* barrier against /proc locking */ 1194 } 1195 1196 /* Interface for resetting user thread stop count. */ 1197 void 1198 utstop_init(void) 1199 { 1200 mutex_enter(&thread_stop_lock); 1201 num_utstop = 0; 1202 mutex_exit(&thread_stop_lock); 1203 } 1204 1205 /* Interface for registering a user thread stop request. */ 1206 void 1207 add_one_utstop(void) 1208 { 1209 mutex_enter(&thread_stop_lock); 1210 num_utstop++; 1211 mutex_exit(&thread_stop_lock); 1212 } 1213 1214 /* Interface for cancelling a user thread stop request */ 1215 void 1216 del_one_utstop(void) 1217 { 1218 mutex_enter(&thread_stop_lock); 1219 num_utstop--; 1220 if (num_utstop == 0) 1221 cv_broadcast(&utstop_cv); 1222 mutex_exit(&thread_stop_lock); 1223 } 1224 1225 /* Interface to wait for all user threads to be stopped */ 1226 void 1227 utstop_timedwait(clock_t ticks) 1228 { 1229 mutex_enter(&thread_stop_lock); 1230 if (num_utstop > 0) 1231 (void) cv_timedwait(&utstop_cv, &thread_stop_lock, 1232 ticks + lbolt); 1233 mutex_exit(&thread_stop_lock); 1234 } 1235 1236 /* 1237 * Perform the action specified by the current signal. 1238 * The usual sequence is: 1239 * if (issig()) 1240 * psig(); 1241 * The signal bit has already been cleared by issig(), 1242 * the current signal number has been stored in lwp_cursig, 1243 * and the current siginfo is now referenced by lwp_curinfo. 1244 */ 1245 void 1246 psig(void) 1247 { 1248 kthread_t *t = curthread; 1249 proc_t *p = ttoproc(t); 1250 klwp_t *lwp = ttolwp(t); 1251 void (*func)(); 1252 int sig, rc, code, ext; 1253 pid_t pid = -1; 1254 id_t ctid = 0; 1255 zoneid_t zoneid = -1; 1256 sigqueue_t *sqp = NULL; 1257 1258 mutex_enter(&p->p_lock); 1259 schedctl_finish_sigblock(t); 1260 code = CLD_KILLED; 1261 1262 if (p->p_flag & SEXITLWPS) { 1263 lwp_exit(); 1264 return; /* not reached */ 1265 } 1266 sig = lwp->lwp_cursig; 1267 ext = lwp->lwp_extsig; 1268 1269 ASSERT(sig < NSIG); 1270 1271 /* 1272 * Re-check lwp_cursig after we acquire p_lock. Since p_lock was 1273 * dropped between issig() and psig(), a debugger may have cleared 1274 * lwp_cursig via /proc in the intervening window. 1275 */ 1276 if (sig == 0) { 1277 if (lwp->lwp_curinfo) { 1278 siginfofree(lwp->lwp_curinfo); 1279 lwp->lwp_curinfo = NULL; 1280 } 1281 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */ 1282 t->t_flag &= ~T_TOMASK; 1283 t->t_hold = lwp->lwp_sigoldmask; 1284 } 1285 mutex_exit(&p->p_lock); 1286 return; 1287 } 1288 func = u.u_signal[sig-1]; 1289 1290 /* 1291 * The signal disposition could have changed since we promoted 1292 * this signal from pending to current (we dropped p->p_lock). 1293 * This can happen only in a multi-threaded process. 1294 */ 1295 if (sigismember(&p->p_ignore, sig) || 1296 (func == SIG_DFL && sigismember(&stopdefault, sig))) { 1297 lwp->lwp_cursig = 0; 1298 lwp->lwp_extsig = 0; 1299 if (lwp->lwp_curinfo) { 1300 siginfofree(lwp->lwp_curinfo); 1301 lwp->lwp_curinfo = NULL; 1302 } 1303 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */ 1304 t->t_flag &= ~T_TOMASK; 1305 t->t_hold = lwp->lwp_sigoldmask; 1306 } 1307 mutex_exit(&p->p_lock); 1308 return; 1309 } 1310 1311 /* 1312 * We check lwp_curinfo first since pr_setsig can actually 1313 * stuff a sigqueue_t there for SIGKILL. 1314 */ 1315 if (lwp->lwp_curinfo) { 1316 sqp = lwp->lwp_curinfo; 1317 } else if (sig == SIGKILL && p->p_killsqp) { 1318 sqp = p->p_killsqp; 1319 } 1320 1321 if (sqp != NULL) { 1322 if (SI_FROMUSER(&sqp->sq_info)) { 1323 pid = sqp->sq_info.si_pid; 1324 ctid = sqp->sq_info.si_ctid; 1325 zoneid = sqp->sq_info.si_zoneid; 1326 } 1327 /* 1328 * If we have a sigqueue_t, its sq_external value 1329 * trumps the lwp_extsig value. It is theoretically 1330 * possible to make lwp_extsig reflect reality, but it 1331 * would unnecessarily complicate things elsewhere. 1332 */ 1333 ext = sqp->sq_external; 1334 } 1335 1336 if (func == SIG_DFL) { 1337 mutex_exit(&p->p_lock); 1338 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *, 1339 NULL, void (*)(void), func); 1340 } else { 1341 k_siginfo_t *sip = NULL; 1342 1343 /* 1344 * If DTrace user-land tracing is active, give DTrace a 1345 * chance to defer the signal until after tracing is 1346 * complete. 1347 */ 1348 if (t->t_dtrace_on && dtrace_safe_defer_signal()) { 1349 mutex_exit(&p->p_lock); 1350 return; 1351 } 1352 1353 /* 1354 * save siginfo pointer here, in case the 1355 * the signal's reset bit is on 1356 * 1357 * The presence of a current signal prevents paging 1358 * from succeeding over a network. We copy the current 1359 * signal information to the side and cancel the current 1360 * signal so that sendsig() will succeed. 1361 */ 1362 if (sigismember(&p->p_siginfo, sig)) { 1363 if (sqp) { 1364 bcopy(&sqp->sq_info, &lwp->lwp_siginfo, 1365 sizeof (k_siginfo_t)); 1366 sip = &lwp->lwp_siginfo; 1367 } else if (sig == SIGPROF && 1368 t->t_rprof != NULL && 1369 t->t_rprof->rp_anystate && 1370 lwp->lwp_siginfo.si_signo == SIGPROF) { 1371 sip = &lwp->lwp_siginfo; 1372 } 1373 } 1374 1375 if (t->t_flag & T_TOMASK) 1376 t->t_flag &= ~T_TOMASK; 1377 else 1378 lwp->lwp_sigoldmask = t->t_hold; 1379 sigorset(&t->t_hold, &u.u_sigmask[sig-1]); 1380 if (!sigismember(&u.u_signodefer, sig)) 1381 sigaddset(&t->t_hold, sig); 1382 if (sigismember(&u.u_sigresethand, sig)) 1383 setsigact(sig, SIG_DFL, nullsmask, 0); 1384 1385 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *, 1386 sip, void (*)(void), func); 1387 1388 lwp->lwp_cursig = 0; 1389 lwp->lwp_extsig = 0; 1390 if (lwp->lwp_curinfo) { 1391 /* p->p_killsqp is freed by freeproc */ 1392 siginfofree(lwp->lwp_curinfo); 1393 lwp->lwp_curinfo = NULL; 1394 } 1395 mutex_exit(&p->p_lock); 1396 lwp->lwp_ru.nsignals++; 1397 1398 if (p->p_model == DATAMODEL_NATIVE) 1399 rc = sendsig(sig, sip, func); 1400 #ifdef _SYSCALL32_IMPL 1401 else 1402 rc = sendsig32(sig, sip, func); 1403 #endif /* _SYSCALL32_IMPL */ 1404 if (rc) 1405 return; 1406 sig = lwp->lwp_cursig = SIGSEGV; 1407 ext = 0; /* lwp_extsig was set above */ 1408 pid = -1; 1409 ctid = 0; 1410 } 1411 1412 if (sigismember(&coredefault, sig)) { 1413 /* 1414 * Terminate all LWPs but don't discard them. 1415 * If another lwp beat us to the punch by calling exit(), 1416 * evaporate now. 1417 */ 1418 if (exitlwps(1) != 0) { 1419 mutex_enter(&p->p_lock); 1420 lwp_exit(); 1421 } 1422 /* if we got a SIGKILL from anywhere, no core dump */ 1423 if (p->p_flag & SKILLED) { 1424 sig = SIGKILL; 1425 ext = (p->p_flag & SEXTKILLED) != 0; 1426 } else { 1427 #ifdef C2_AUDIT 1428 if (audit_active) /* audit core dump */ 1429 audit_core_start(sig); 1430 #endif 1431 if (core(sig, ext) == 0) 1432 code = CLD_DUMPED; 1433 #ifdef C2_AUDIT 1434 if (audit_active) /* audit core dump */ 1435 audit_core_finish(code); 1436 #endif 1437 } 1438 } 1439 if (ext) 1440 contract_process_sig(p->p_ct_process, p, sig, pid, ctid, 1441 zoneid); 1442 1443 exit(code, sig); 1444 } 1445 1446 /* 1447 * Find next unheld signal in ssp for thread t. 1448 */ 1449 int 1450 fsig(k_sigset_t *ssp, kthread_t *t) 1451 { 1452 proc_t *p = ttoproc(t); 1453 user_t *up = PTOU(p); 1454 int i; 1455 k_sigset_t temp; 1456 1457 ASSERT(MUTEX_HELD(&p->p_lock)); 1458 1459 /* 1460 * Don't promote any signals for the parent of a vfork()d 1461 * child that hasn't yet released the parent's memory. 1462 */ 1463 if (p->p_flag & SVFWAIT) 1464 return (0); 1465 1466 temp = *ssp; 1467 sigdiffset(&temp, &t->t_hold); 1468 1469 /* 1470 * Don't promote stopping signals (except SIGSTOP) for a child 1471 * of vfork() that hasn't yet released the parent's memory. 1472 */ 1473 if (p->p_flag & SVFORK) 1474 sigdiffset(&temp, &holdvfork); 1475 1476 /* 1477 * Don't promote a signal that will stop 1478 * the process when lwp_nostop is set. 1479 */ 1480 if (ttolwp(t)->lwp_nostop) { 1481 sigdelset(&temp, SIGSTOP); 1482 if (!p->p_pgidp->pid_pgorphaned) { 1483 if (up->u_signal[SIGTSTP-1] == SIG_DFL) 1484 sigdelset(&temp, SIGTSTP); 1485 if (up->u_signal[SIGTTIN-1] == SIG_DFL) 1486 sigdelset(&temp, SIGTTIN); 1487 if (up->u_signal[SIGTTOU-1] == SIG_DFL) 1488 sigdelset(&temp, SIGTTOU); 1489 } 1490 } 1491 1492 /* 1493 * Choose SIGKILL and SIGPROF before all other pending signals. 1494 * The rest are promoted in signal number order. 1495 */ 1496 if (sigismember(&temp, SIGKILL)) 1497 return (SIGKILL); 1498 if (sigismember(&temp, SIGPROF)) 1499 return (SIGPROF); 1500 1501 for (i = 0; i < sizeof (temp) / sizeof (temp.__sigbits[0]); i++) { 1502 if (temp.__sigbits[i]) 1503 return ((i * NBBY * sizeof (temp.__sigbits[0])) + 1504 lowbit(temp.__sigbits[i])); 1505 } 1506 1507 return (0); 1508 } 1509 1510 void 1511 setsigact(int sig, void (*disp)(), k_sigset_t mask, int flags) 1512 { 1513 proc_t *p = ttoproc(curthread); 1514 kthread_t *t; 1515 1516 ASSERT(MUTEX_HELD(&p->p_lock)); 1517 1518 u.u_signal[sig - 1] = disp; 1519 1520 /* 1521 * Honor the SA_SIGINFO flag if the signal is being caught. 1522 * Force the SA_SIGINFO flag if the signal is not being caught. 1523 * This is necessary to make sigqueue() and sigwaitinfo() work 1524 * properly together when the signal is set to default or is 1525 * being temporarily ignored. 1526 */ 1527 if ((flags & SA_SIGINFO) || disp == SIG_DFL || disp == SIG_IGN) 1528 sigaddset(&p->p_siginfo, sig); 1529 else 1530 sigdelset(&p->p_siginfo, sig); 1531 1532 if (disp != SIG_DFL && disp != SIG_IGN) { 1533 sigdelset(&p->p_ignore, sig); 1534 u.u_sigmask[sig - 1] = mask; 1535 if (!sigismember(&cantreset, sig)) { 1536 if (flags & SA_RESETHAND) 1537 sigaddset(&u.u_sigresethand, sig); 1538 else 1539 sigdelset(&u.u_sigresethand, sig); 1540 } 1541 if (flags & SA_NODEFER) 1542 sigaddset(&u.u_signodefer, sig); 1543 else 1544 sigdelset(&u.u_signodefer, sig); 1545 if (flags & SA_RESTART) 1546 sigaddset(&u.u_sigrestart, sig); 1547 else 1548 sigdelset(&u.u_sigrestart, sig); 1549 if (flags & SA_ONSTACK) 1550 sigaddset(&u.u_sigonstack, sig); 1551 else 1552 sigdelset(&u.u_sigonstack, sig); 1553 1554 } else if (disp == SIG_IGN || 1555 (disp == SIG_DFL && sigismember(&ignoredefault, sig))) { 1556 /* 1557 * Setting the signal action to SIG_IGN results in the 1558 * discarding of all pending signals of that signal number. 1559 * Setting the signal action to SIG_DFL does the same *only* 1560 * if the signal's default behavior is to be ignored. 1561 */ 1562 sigaddset(&p->p_ignore, sig); 1563 sigdelset(&p->p_sig, sig); 1564 sigdelset(&p->p_extsig, sig); 1565 sigdelq(p, NULL, sig); 1566 t = p->p_tlist; 1567 do { 1568 sigdelset(&t->t_sig, sig); 1569 sigdelset(&t->t_extsig, sig); 1570 sigdelq(p, t, sig); 1571 } while ((t = t->t_forw) != p->p_tlist); 1572 1573 } else { 1574 /* 1575 * The signal action is being set to SIG_DFL and the default 1576 * behavior is to do something: make sure it is not ignored. 1577 */ 1578 sigdelset(&p->p_ignore, sig); 1579 } 1580 1581 if (sig == SIGCLD) { 1582 if (flags & SA_NOCLDWAIT) 1583 p->p_flag |= SNOWAIT; 1584 else 1585 p->p_flag &= ~SNOWAIT; 1586 1587 if (flags & SA_NOCLDSTOP) 1588 p->p_flag &= ~SJCTL; 1589 else 1590 p->p_flag |= SJCTL; 1591 1592 if (p->p_flag & SNOWAIT || disp == SIG_IGN) { 1593 proc_t *cp, *tp; 1594 1595 mutex_exit(&p->p_lock); 1596 mutex_enter(&pidlock); 1597 for (cp = p->p_child; cp != NULL; cp = tp) { 1598 tp = cp->p_sibling; 1599 if (cp->p_stat == SZOMB) 1600 freeproc(cp); 1601 } 1602 mutex_exit(&pidlock); 1603 mutex_enter(&p->p_lock); 1604 } 1605 } 1606 } 1607 1608 /* 1609 * Set all signal actions not already set to SIG_DFL or SIG_IGN to SIG_DFL. 1610 * Called from exec_common() for a process undergoing execve() 1611 * and from cfork() for a newly-created child of vfork(). 1612 * In the vfork() case, 'p' is not the current process. 1613 * In both cases, there is only one thread in the process. 1614 */ 1615 void 1616 sigdefault(proc_t *p) 1617 { 1618 kthread_t *t = p->p_tlist; 1619 struct user *up = PTOU(p); 1620 int sig; 1621 1622 ASSERT(MUTEX_HELD(&p->p_lock)); 1623 1624 for (sig = 1; sig < NSIG; sig++) { 1625 if (up->u_signal[sig - 1] != SIG_DFL && 1626 up->u_signal[sig - 1] != SIG_IGN) { 1627 up->u_signal[sig - 1] = SIG_DFL; 1628 sigemptyset(&up->u_sigmask[sig - 1]); 1629 if (sigismember(&ignoredefault, sig)) { 1630 sigdelq(p, NULL, sig); 1631 sigdelq(p, t, sig); 1632 } 1633 if (sig == SIGCLD) 1634 p->p_flag &= ~(SNOWAIT|SJCTL); 1635 } 1636 } 1637 sigorset(&p->p_ignore, &ignoredefault); 1638 sigfillset(&p->p_siginfo); 1639 sigdiffset(&p->p_siginfo, &cantmask); 1640 sigdiffset(&p->p_sig, &ignoredefault); 1641 sigdiffset(&p->p_extsig, &ignoredefault); 1642 sigdiffset(&t->t_sig, &ignoredefault); 1643 sigdiffset(&t->t_extsig, &ignoredefault); 1644 } 1645 1646 void 1647 sigcld(proc_t *cp, sigqueue_t *sqp) 1648 { 1649 proc_t *pp = cp->p_parent; 1650 1651 ASSERT(MUTEX_HELD(&pidlock)); 1652 1653 switch (cp->p_wcode) { 1654 case CLD_EXITED: 1655 case CLD_DUMPED: 1656 case CLD_KILLED: 1657 ASSERT(cp->p_stat == SZOMB); 1658 /* 1659 * The broadcast on p_srwchan_cv is a kludge to 1660 * wakeup a possible thread in uadmin(A_SHUTDOWN). 1661 */ 1662 cv_broadcast(&cp->p_srwchan_cv); 1663 1664 /* 1665 * Add to newstate list of the parent 1666 */ 1667 add_ns(pp, cp); 1668 1669 cv_broadcast(&pp->p_cv); 1670 if ((pp->p_flag & SNOWAIT) || 1671 (PTOU(pp)->u_signal[SIGCLD - 1] == SIG_IGN)) 1672 freeproc(cp); 1673 else { 1674 post_sigcld(cp, sqp); 1675 sqp = NULL; 1676 } 1677 break; 1678 1679 case CLD_STOPPED: 1680 case CLD_CONTINUED: 1681 cv_broadcast(&pp->p_cv); 1682 if (pp->p_flag & SJCTL) { 1683 post_sigcld(cp, sqp); 1684 sqp = NULL; 1685 } 1686 break; 1687 } 1688 1689 if (sqp) 1690 siginfofree(sqp); 1691 } 1692 1693 /* 1694 * Common code called from sigcld() and issig_forreal() 1695 * Give the parent process a SIGCLD if it does not have one pending, 1696 * else mark the child process so a SIGCLD can be posted later. 1697 */ 1698 static void 1699 post_sigcld(proc_t *cp, sigqueue_t *sqp) 1700 { 1701 proc_t *pp = cp->p_parent; 1702 void (*handler)() = PTOU(pp)->u_signal[SIGCLD - 1]; 1703 k_siginfo_t info; 1704 1705 ASSERT(MUTEX_HELD(&pidlock)); 1706 mutex_enter(&pp->p_lock); 1707 1708 /* 1709 * If a SIGCLD is pending, or if SIGCLD is not now being caught, 1710 * then just mark the child process so that its SIGCLD will 1711 * be posted later, when the first SIGCLD is taken off the 1712 * queue or when the parent is ready to receive it, if ever. 1713 */ 1714 if (handler == SIG_DFL || handler == SIG_IGN || 1715 sigismember(&pp->p_sig, SIGCLD)) 1716 cp->p_pidflag |= CLDPEND; 1717 else { 1718 cp->p_pidflag &= ~CLDPEND; 1719 if (sqp == NULL) { 1720 /* 1721 * This can only happen when the parent is init. 1722 * (See call to sigcld(q, NULL) in exit().) 1723 * Use KM_NOSLEEP to avoid deadlock. 1724 */ 1725 ASSERT(pp == proc_init); 1726 winfo(cp, &info, 0); 1727 sigaddq(pp, NULL, &info, KM_NOSLEEP); 1728 } else { 1729 winfo(cp, &sqp->sq_info, 0); 1730 sigaddqa(pp, NULL, sqp); 1731 sqp = NULL; 1732 } 1733 } 1734 1735 mutex_exit(&pp->p_lock); 1736 1737 if (sqp) 1738 siginfofree(sqp); 1739 } 1740 1741 /* 1742 * Search for a child that has a pending SIGCLD for us, the parent. 1743 * The queue of SIGCLD signals is implied by the list of children. 1744 * We post the SIGCLD signals one at a time so they don't get lost. 1745 * When one is dequeued, another is enqueued, until there are no more. 1746 */ 1747 void 1748 sigcld_repost() 1749 { 1750 proc_t *pp = curproc; 1751 proc_t *cp; 1752 void (*handler)() = PTOU(pp)->u_signal[SIGCLD - 1]; 1753 sigqueue_t *sqp; 1754 1755 /* 1756 * Don't bother if SIGCLD is not now being caught. 1757 */ 1758 if (handler == SIG_DFL || handler == SIG_IGN) 1759 return; 1760 1761 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 1762 mutex_enter(&pidlock); 1763 for (cp = pp->p_child; cp; cp = cp->p_sibling) { 1764 if (cp->p_pidflag & CLDPEND) { 1765 post_sigcld(cp, sqp); 1766 mutex_exit(&pidlock); 1767 return; 1768 } 1769 } 1770 mutex_exit(&pidlock); 1771 kmem_free(sqp, sizeof (sigqueue_t)); 1772 } 1773 1774 /* 1775 * count number of sigqueue send by sigaddqa() 1776 */ 1777 void 1778 sigqsend(int cmd, proc_t *p, kthread_t *t, sigqueue_t *sigqp) 1779 { 1780 sigqhdr_t *sqh; 1781 1782 sqh = (sigqhdr_t *)sigqp->sq_backptr; 1783 ASSERT(sqh); 1784 1785 mutex_enter(&sqh->sqb_lock); 1786 sqh->sqb_sent++; 1787 mutex_exit(&sqh->sqb_lock); 1788 1789 if (cmd == SN_SEND) 1790 sigaddqa(p, t, sigqp); 1791 else 1792 siginfofree(sigqp); 1793 } 1794 1795 int 1796 sigsendproc(proc_t *p, sigsend_t *pv) 1797 { 1798 struct cred *cr; 1799 proc_t *myprocp = curproc; 1800 1801 ASSERT(MUTEX_HELD(&pidlock)); 1802 1803 if (p->p_pid == 1 && pv->sig && sigismember(&cantmask, pv->sig)) 1804 return (EPERM); 1805 1806 cr = CRED(); 1807 1808 if (pv->checkperm == 0 || 1809 (pv->sig == SIGCONT && p->p_sessp == myprocp->p_sessp) || 1810 prochasprocperm(p, myprocp, cr)) { 1811 pv->perm++; 1812 if (pv->sig) { 1813 /* Make sure we should be setting si_pid and friends */ 1814 ASSERT(pv->sicode <= 0); 1815 if (SI_CANQUEUE(pv->sicode)) { 1816 sigqueue_t *sqp; 1817 1818 mutex_enter(&myprocp->p_lock); 1819 sqp = sigqalloc(myprocp->p_sigqhdr); 1820 mutex_exit(&myprocp->p_lock); 1821 if (sqp == NULL) 1822 return (EAGAIN); 1823 sqp->sq_info.si_signo = pv->sig; 1824 sqp->sq_info.si_code = pv->sicode; 1825 sqp->sq_info.si_pid = myprocp->p_pid; 1826 sqp->sq_info.si_ctid = PRCTID(myprocp); 1827 sqp->sq_info.si_zoneid = getzoneid(); 1828 sqp->sq_info.si_uid = crgetruid(cr); 1829 sqp->sq_info.si_value = pv->value; 1830 mutex_enter(&p->p_lock); 1831 sigqsend(SN_SEND, p, NULL, sqp); 1832 mutex_exit(&p->p_lock); 1833 } else { 1834 k_siginfo_t info; 1835 bzero(&info, sizeof (info)); 1836 info.si_signo = pv->sig; 1837 info.si_code = pv->sicode; 1838 info.si_pid = myprocp->p_pid; 1839 info.si_ctid = PRCTID(myprocp); 1840 info.si_zoneid = getzoneid(); 1841 info.si_uid = crgetruid(cr); 1842 mutex_enter(&p->p_lock); 1843 /* 1844 * XXX: Should be KM_SLEEP but 1845 * we have to avoid deadlock. 1846 */ 1847 sigaddq(p, NULL, &info, KM_NOSLEEP); 1848 mutex_exit(&p->p_lock); 1849 } 1850 } 1851 } 1852 1853 return (0); 1854 } 1855 1856 int 1857 sigsendset(procset_t *psp, sigsend_t *pv) 1858 { 1859 int error; 1860 1861 error = dotoprocs(psp, sigsendproc, (char *)pv); 1862 if (error == 0 && pv->perm == 0) 1863 return (EPERM); 1864 1865 return (error); 1866 } 1867 1868 /* 1869 * Dequeue a queued siginfo structure. 1870 * If a non-null thread pointer is passed then dequeue from 1871 * the thread queue, otherwise dequeue from the process queue. 1872 */ 1873 void 1874 sigdeq(proc_t *p, kthread_t *t, int sig, sigqueue_t **qpp) 1875 { 1876 sigqueue_t **psqp, *sqp; 1877 1878 ASSERT(MUTEX_HELD(&p->p_lock)); 1879 1880 *qpp = NULL; 1881 1882 if (t != NULL) { 1883 sigdelset(&t->t_sig, sig); 1884 sigdelset(&t->t_extsig, sig); 1885 psqp = &t->t_sigqueue; 1886 } else { 1887 sigdelset(&p->p_sig, sig); 1888 sigdelset(&p->p_extsig, sig); 1889 psqp = &p->p_sigqueue; 1890 } 1891 1892 for (;;) { 1893 if ((sqp = *psqp) == NULL) 1894 return; 1895 if (sqp->sq_info.si_signo == sig) 1896 break; 1897 else 1898 psqp = &sqp->sq_next; 1899 } 1900 *qpp = sqp; 1901 *psqp = sqp->sq_next; 1902 for (sqp = *psqp; sqp; sqp = sqp->sq_next) { 1903 if (sqp->sq_info.si_signo == sig) { 1904 if (t != (kthread_t *)NULL) { 1905 sigaddset(&t->t_sig, sig); 1906 t->t_sig_check = 1; 1907 } else { 1908 sigaddset(&p->p_sig, sig); 1909 set_proc_ast(p); 1910 } 1911 break; 1912 } 1913 } 1914 } 1915 1916 /* 1917 * Delete a queued SIGCLD siginfo structure matching the k_siginfo_t argument. 1918 */ 1919 void 1920 sigcld_delete(k_siginfo_t *ip) 1921 { 1922 proc_t *p = curproc; 1923 int another_sigcld = 0; 1924 sigqueue_t **psqp, *sqp; 1925 1926 ASSERT(ip->si_signo == SIGCLD); 1927 1928 mutex_enter(&p->p_lock); 1929 1930 if (!sigismember(&p->p_sig, SIGCLD)) { 1931 mutex_exit(&p->p_lock); 1932 return; 1933 } 1934 1935 psqp = &p->p_sigqueue; 1936 for (;;) { 1937 if ((sqp = *psqp) == NULL) { 1938 mutex_exit(&p->p_lock); 1939 return; 1940 } 1941 if (sqp->sq_info.si_signo == SIGCLD) { 1942 if (sqp->sq_info.si_pid == ip->si_pid && 1943 sqp->sq_info.si_code == ip->si_code && 1944 sqp->sq_info.si_status == ip->si_status) 1945 break; 1946 another_sigcld = 1; 1947 } 1948 psqp = &sqp->sq_next; 1949 } 1950 *psqp = sqp->sq_next; 1951 1952 siginfofree(sqp); 1953 1954 for (sqp = *psqp; !another_sigcld && sqp; sqp = sqp->sq_next) { 1955 if (sqp->sq_info.si_signo == SIGCLD) 1956 another_sigcld = 1; 1957 } 1958 1959 if (!another_sigcld) { 1960 sigdelset(&p->p_sig, SIGCLD); 1961 sigdelset(&p->p_extsig, SIGCLD); 1962 } 1963 1964 mutex_exit(&p->p_lock); 1965 } 1966 1967 /* 1968 * Delete queued siginfo structures. 1969 * If a non-null thread pointer is passed then delete from 1970 * the thread queue, otherwise delete from the process queue. 1971 */ 1972 void 1973 sigdelq(proc_t *p, kthread_t *t, int sig) 1974 { 1975 sigqueue_t **psqp, *sqp; 1976 1977 /* 1978 * We must be holding p->p_lock unless the process is 1979 * being reaped or has failed to get started on fork. 1980 */ 1981 ASSERT(MUTEX_HELD(&p->p_lock) || 1982 p->p_stat == SIDL || p->p_stat == SZOMB); 1983 1984 if (t != (kthread_t *)NULL) 1985 psqp = &t->t_sigqueue; 1986 else 1987 psqp = &p->p_sigqueue; 1988 1989 while (*psqp) { 1990 sqp = *psqp; 1991 if (sig == 0 || sqp->sq_info.si_signo == sig) { 1992 *psqp = sqp->sq_next; 1993 siginfofree(sqp); 1994 } else 1995 psqp = &sqp->sq_next; 1996 } 1997 } 1998 1999 /* 2000 * Insert a siginfo structure into a queue. 2001 * If a non-null thread pointer is passed then add to the thread queue, 2002 * otherwise add to the process queue. 2003 * 2004 * The function sigaddqins() is called with sigqueue already allocated. 2005 * It is called from sigaddqa() and sigaddq() below. 2006 * 2007 * The value of si_code implicitly indicates whether sigp is to be 2008 * explicitly queued, or to be queued to depth one. 2009 */ 2010 static void 2011 sigaddqins(proc_t *p, kthread_t *t, sigqueue_t *sigqp) 2012 { 2013 sigqueue_t **psqp; 2014 int sig = sigqp->sq_info.si_signo; 2015 2016 sigqp->sq_external = (curproc != &p0) && 2017 (curproc->p_ct_process != p->p_ct_process); 2018 2019 /* 2020 * issig_forreal() doesn't bother dequeueing signals if SKILLED 2021 * is set, and even if it did, we would want to avoid situation 2022 * (which would be unique to SIGKILL) where one thread dequeued 2023 * the sigqueue_t and another executed psig(). So we create a 2024 * separate stash for SIGKILL's sigqueue_t. Because a second 2025 * SIGKILL can set SEXTKILLED, we overwrite the existing entry 2026 * if (and only if) it was non-extracontractual. 2027 */ 2028 if (sig == SIGKILL) { 2029 if (p->p_killsqp == NULL || !p->p_killsqp->sq_external) { 2030 if (p->p_killsqp != NULL) 2031 siginfofree(p->p_killsqp); 2032 p->p_killsqp = sigqp; 2033 sigqp->sq_next = NULL; 2034 } else { 2035 siginfofree(sigqp); 2036 } 2037 return; 2038 } 2039 2040 ASSERT(sig >= 1 && sig < NSIG); 2041 if (t != NULL) /* directed to a thread */ 2042 psqp = &t->t_sigqueue; 2043 else /* directed to a process */ 2044 psqp = &p->p_sigqueue; 2045 if (SI_CANQUEUE(sigqp->sq_info.si_code) && 2046 sigismember(&p->p_siginfo, sig)) { 2047 for (; *psqp != NULL; psqp = &(*psqp)->sq_next) 2048 ; 2049 } else { 2050 for (; *psqp != NULL; psqp = &(*psqp)->sq_next) { 2051 if ((*psqp)->sq_info.si_signo == sig) { 2052 siginfofree(sigqp); 2053 return; 2054 } 2055 } 2056 } 2057 *psqp = sigqp; 2058 sigqp->sq_next = NULL; 2059 } 2060 2061 /* 2062 * The function sigaddqa() is called with sigqueue already allocated. 2063 * If signal is ignored, discard but guarantee KILL and generation semantics. 2064 * It is called from sigqueue() and other places. 2065 */ 2066 void 2067 sigaddqa(proc_t *p, kthread_t *t, sigqueue_t *sigqp) 2068 { 2069 int sig = sigqp->sq_info.si_signo; 2070 2071 ASSERT(MUTEX_HELD(&p->p_lock)); 2072 ASSERT(sig >= 1 && sig < NSIG); 2073 2074 if (sig_discardable(p, sig)) 2075 siginfofree(sigqp); 2076 else 2077 sigaddqins(p, t, sigqp); 2078 2079 sigtoproc(p, t, sig); 2080 } 2081 2082 /* 2083 * Allocate the sigqueue_t structure and call sigaddqins(). 2084 */ 2085 void 2086 sigaddq(proc_t *p, kthread_t *t, k_siginfo_t *infop, int km_flags) 2087 { 2088 sigqueue_t *sqp; 2089 int sig = infop->si_signo; 2090 2091 ASSERT(MUTEX_HELD(&p->p_lock)); 2092 ASSERT(sig >= 1 && sig < NSIG); 2093 2094 /* 2095 * If the signal will be discarded by sigtoproc() or 2096 * if the process isn't requesting siginfo and it isn't 2097 * blocking the signal (it *could* change it's mind while 2098 * the signal is pending) then don't bother creating one. 2099 */ 2100 if (!sig_discardable(p, sig) && 2101 (sigismember(&p->p_siginfo, sig) || 2102 (curproc->p_ct_process != p->p_ct_process) || 2103 (sig == SIGCLD && SI_FROMKERNEL(infop))) && 2104 ((sqp = kmem_alloc(sizeof (sigqueue_t), km_flags)) != NULL)) { 2105 bcopy(infop, &sqp->sq_info, sizeof (k_siginfo_t)); 2106 sqp->sq_func = NULL; 2107 sqp->sq_next = NULL; 2108 sigaddqins(p, t, sqp); 2109 } 2110 sigtoproc(p, t, sig); 2111 } 2112 2113 /* 2114 * Handle stop-on-fault processing for the debugger. Returns 0 2115 * if the fault is cleared during the stop, nonzero if it isn't. 2116 */ 2117 int 2118 stop_on_fault(uint_t fault, k_siginfo_t *sip) 2119 { 2120 proc_t *p = ttoproc(curthread); 2121 klwp_t *lwp = ttolwp(curthread); 2122 2123 ASSERT(prismember(&p->p_fltmask, fault)); 2124 2125 /* 2126 * Record current fault and siginfo structure so debugger can 2127 * find it. 2128 */ 2129 mutex_enter(&p->p_lock); 2130 lwp->lwp_curflt = (uchar_t)fault; 2131 lwp->lwp_siginfo = *sip; 2132 2133 stop(PR_FAULTED, fault); 2134 2135 fault = lwp->lwp_curflt; 2136 lwp->lwp_curflt = 0; 2137 mutex_exit(&p->p_lock); 2138 return (fault); 2139 } 2140 2141 void 2142 sigorset(k_sigset_t *s1, k_sigset_t *s2) 2143 { 2144 s1->__sigbits[0] |= s2->__sigbits[0]; 2145 s1->__sigbits[1] |= s2->__sigbits[1]; 2146 } 2147 2148 void 2149 sigandset(k_sigset_t *s1, k_sigset_t *s2) 2150 { 2151 s1->__sigbits[0] &= s2->__sigbits[0]; 2152 s1->__sigbits[1] &= s2->__sigbits[1]; 2153 } 2154 2155 void 2156 sigdiffset(k_sigset_t *s1, k_sigset_t *s2) 2157 { 2158 s1->__sigbits[0] &= ~(s2->__sigbits[0]); 2159 s1->__sigbits[1] &= ~(s2->__sigbits[1]); 2160 } 2161 2162 /* 2163 * Return non-zero if curthread->t_sig_check should be set to 1, that is, 2164 * if there are any signals the thread might take on return from the kernel. 2165 * If ksigset_t's were a single word, we would do: 2166 * return (((p->p_sig | t->t_sig) & ~t->t_hold) & fillset); 2167 */ 2168 int 2169 sigcheck(proc_t *p, kthread_t *t) 2170 { 2171 sc_shared_t *tdp = t->t_schedctl; 2172 2173 /* 2174 * If signals are blocked via the schedctl interface 2175 * then we only check for the unmaskable signals. 2176 */ 2177 if (tdp != NULL && tdp->sc_sigblock) 2178 return ((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) & 2179 CANTMASK0); 2180 2181 return (((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) & 2182 ~t->t_hold.__sigbits[0]) | 2183 (((p->p_sig.__sigbits[1] | t->t_sig.__sigbits[1]) & 2184 ~t->t_hold.__sigbits[1]) & FILLSET1)); 2185 } 2186 2187 /* ONC_PLUS EXTRACT START */ 2188 void 2189 sigintr(k_sigset_t *smask, int intable) 2190 { 2191 proc_t *p; 2192 int owned; 2193 k_sigset_t lmask; /* local copy of cantmask */ 2194 klwp_t *lwp = ttolwp(curthread); 2195 2196 /* 2197 * Mask out all signals except SIGHUP, SIGINT, SIGQUIT 2198 * and SIGTERM. (Preserving the existing masks). 2199 * This function supports the -intr nfs and ufs mount option. 2200 */ 2201 2202 /* 2203 * don't do kernel threads 2204 */ 2205 if (lwp == NULL) 2206 return; 2207 2208 /* 2209 * get access to signal mask 2210 */ 2211 p = ttoproc(curthread); 2212 owned = mutex_owned(&p->p_lock); /* this is filthy */ 2213 if (!owned) 2214 mutex_enter(&p->p_lock); 2215 2216 /* 2217 * remember the current mask 2218 */ 2219 schedctl_finish_sigblock(curthread); 2220 *smask = curthread->t_hold; 2221 2222 /* 2223 * mask out all signals 2224 */ 2225 sigfillset(&curthread->t_hold); 2226 2227 /* 2228 * Unmask the non-maskable signals (e.g., KILL), as long as 2229 * they aren't already masked (which could happen at exit). 2230 * The first sigdiffset sets lmask to (cantmask & ~curhold). The 2231 * second sets the current hold mask to (~0 & ~lmask), which reduces 2232 * to (~cantmask | curhold). 2233 */ 2234 lmask = cantmask; 2235 sigdiffset(&lmask, smask); 2236 sigdiffset(&curthread->t_hold, &lmask); 2237 2238 /* 2239 * Re-enable HUP, QUIT, and TERM iff they were originally enabled 2240 * Re-enable INT if it's originally enabled and the NFS mount option 2241 * nointr is not set. 2242 */ 2243 if (!sigismember(smask, SIGHUP)) 2244 sigdelset(&curthread->t_hold, SIGHUP); 2245 if (!sigismember(smask, SIGINT) && intable) 2246 sigdelset(&curthread->t_hold, SIGINT); 2247 if (!sigismember(smask, SIGQUIT)) 2248 sigdelset(&curthread->t_hold, SIGQUIT); 2249 if (!sigismember(smask, SIGTERM)) 2250 sigdelset(&curthread->t_hold, SIGTERM); 2251 2252 /* 2253 * release access to signal mask 2254 */ 2255 if (!owned) 2256 mutex_exit(&p->p_lock); 2257 2258 /* 2259 * Indicate that this lwp is not to be stopped. 2260 */ 2261 lwp->lwp_nostop++; 2262 2263 } 2264 /* ONC_PLUS EXTRACT END */ 2265 2266 void 2267 sigunintr(k_sigset_t *smask) 2268 { 2269 proc_t *p; 2270 int owned; 2271 klwp_t *lwp = ttolwp(curthread); 2272 2273 /* 2274 * Reset previous mask (See sigintr() above) 2275 */ 2276 if (lwp != NULL) { 2277 lwp->lwp_nostop--; /* restore lwp stoppability */ 2278 p = ttoproc(curthread); 2279 owned = mutex_owned(&p->p_lock); /* this is filthy */ 2280 if (!owned) 2281 mutex_enter(&p->p_lock); 2282 curthread->t_hold = *smask; 2283 /* so unmasked signals will be seen */ 2284 curthread->t_sig_check = 1; 2285 if (!owned) 2286 mutex_exit(&p->p_lock); 2287 } 2288 } 2289 2290 void 2291 sigreplace(k_sigset_t *newmask, k_sigset_t *oldmask) 2292 { 2293 proc_t *p; 2294 int owned; 2295 /* 2296 * Save current signal mask in oldmask, then 2297 * set it to newmask. 2298 */ 2299 if (ttolwp(curthread) != NULL) { 2300 p = ttoproc(curthread); 2301 owned = mutex_owned(&p->p_lock); /* this is filthy */ 2302 if (!owned) 2303 mutex_enter(&p->p_lock); 2304 schedctl_finish_sigblock(curthread); 2305 if (oldmask != NULL) 2306 *oldmask = curthread->t_hold; 2307 curthread->t_hold = *newmask; 2308 curthread->t_sig_check = 1; 2309 if (!owned) 2310 mutex_exit(&p->p_lock); 2311 } 2312 } 2313 2314 /* 2315 * Return true if the signal number is in range 2316 * and the signal code specifies signal queueing. 2317 */ 2318 int 2319 sigwillqueue(int sig, int code) 2320 { 2321 if (sig >= 0 && sig < NSIG) { 2322 switch (code) { 2323 case SI_QUEUE: 2324 case SI_TIMER: 2325 case SI_ASYNCIO: 2326 case SI_MESGQ: 2327 return (1); 2328 } 2329 } 2330 return (0); 2331 } 2332 2333 #ifndef UCHAR_MAX 2334 #define UCHAR_MAX 255 2335 #endif 2336 2337 /* 2338 * The entire pool (with maxcount entries) is pre-allocated at 2339 * the first sigqueue/signotify call. 2340 */ 2341 sigqhdr_t * 2342 sigqhdralloc(size_t size, uint_t maxcount) 2343 { 2344 size_t i; 2345 sigqueue_t *sq, *next; 2346 sigqhdr_t *sqh; 2347 2348 i = (maxcount * size) + sizeof (sigqhdr_t); 2349 ASSERT(maxcount <= UCHAR_MAX && i <= USHRT_MAX); 2350 sqh = kmem_alloc(i, KM_SLEEP); 2351 sqh->sqb_count = (uchar_t)maxcount; 2352 sqh->sqb_maxcount = (uchar_t)maxcount; 2353 sqh->sqb_size = (ushort_t)i; 2354 sqh->sqb_pexited = 0; 2355 sqh->sqb_sent = 0; 2356 sqh->sqb_free = sq = (sigqueue_t *)(sqh + 1); 2357 for (i = maxcount - 1; i != 0; i--) { 2358 next = (sigqueue_t *)((uintptr_t)sq + size); 2359 sq->sq_next = next; 2360 sq = next; 2361 } 2362 sq->sq_next = NULL; 2363 mutex_init(&sqh->sqb_lock, NULL, MUTEX_DEFAULT, NULL); 2364 return (sqh); 2365 } 2366 2367 static void sigqrel(sigqueue_t *); 2368 2369 /* 2370 * allocate a sigqueue/signotify structure from the per process 2371 * pre-allocated pool. 2372 */ 2373 sigqueue_t * 2374 sigqalloc(sigqhdr_t *sqh) 2375 { 2376 sigqueue_t *sq = NULL; 2377 2378 ASSERT(MUTEX_HELD(&curproc->p_lock)); 2379 2380 if (sqh != NULL) { 2381 mutex_enter(&sqh->sqb_lock); 2382 if (sqh->sqb_count > 0) { 2383 sqh->sqb_count--; 2384 sq = sqh->sqb_free; 2385 sqh->sqb_free = sq->sq_next; 2386 mutex_exit(&sqh->sqb_lock); 2387 bzero(&sq->sq_info, sizeof (k_siginfo_t)); 2388 sq->sq_backptr = sqh; 2389 sq->sq_func = sigqrel; 2390 sq->sq_next = NULL; 2391 sq->sq_external = 0; 2392 } else { 2393 mutex_exit(&sqh->sqb_lock); 2394 } 2395 } 2396 return (sq); 2397 } 2398 2399 /* 2400 * Return a sigqueue structure back to the pre-allocated pool. 2401 */ 2402 static void 2403 sigqrel(sigqueue_t *sq) 2404 { 2405 sigqhdr_t *sqh; 2406 2407 /* make sure that p_lock of the affected process is held */ 2408 2409 sqh = (sigqhdr_t *)sq->sq_backptr; 2410 mutex_enter(&sqh->sqb_lock); 2411 if (sqh->sqb_pexited && sqh->sqb_sent == 1) { 2412 mutex_exit(&sqh->sqb_lock); 2413 mutex_destroy(&sqh->sqb_lock); 2414 kmem_free(sqh, sqh->sqb_size); 2415 } else { 2416 sqh->sqb_count++; 2417 sqh->sqb_sent--; 2418 sq->sq_next = sqh->sqb_free; 2419 sq->sq_backptr = NULL; 2420 sqh->sqb_free = sq; 2421 mutex_exit(&sqh->sqb_lock); 2422 } 2423 } 2424 2425 /* 2426 * Free up the pre-allocated sigqueue headers of sigqueue pool 2427 * and signotify pool, if possible. 2428 * Called only by the owning process during exec() and exit(). 2429 */ 2430 void 2431 sigqfree(proc_t *p) 2432 { 2433 ASSERT(MUTEX_HELD(&p->p_lock)); 2434 2435 if (p->p_sigqhdr != NULL) { /* sigqueue pool */ 2436 sigqhdrfree(p->p_sigqhdr); 2437 p->p_sigqhdr = NULL; 2438 } 2439 if (p->p_signhdr != NULL) { /* signotify pool */ 2440 sigqhdrfree(p->p_signhdr); 2441 p->p_signhdr = NULL; 2442 } 2443 } 2444 2445 /* 2446 * Free up the pre-allocated header and sigq pool if possible. 2447 */ 2448 void 2449 sigqhdrfree(sigqhdr_t *sqh) 2450 { 2451 mutex_enter(&sqh->sqb_lock); 2452 if (sqh->sqb_sent == 0) { 2453 mutex_exit(&sqh->sqb_lock); 2454 mutex_destroy(&sqh->sqb_lock); 2455 kmem_free(sqh, sqh->sqb_size); 2456 } else { 2457 sqh->sqb_pexited = 1; 2458 mutex_exit(&sqh->sqb_lock); 2459 } 2460 } 2461 2462 /* 2463 * Free up a single sigqueue structure. 2464 * No other code should free a sigqueue directly. 2465 */ 2466 void 2467 siginfofree(sigqueue_t *sqp) 2468 { 2469 if (sqp != NULL) { 2470 if (sqp->sq_func != NULL) 2471 (sqp->sq_func)(sqp); 2472 else 2473 kmem_free(sqp, sizeof (sigqueue_t)); 2474 } 2475 } 2476 2477 /* 2478 * Generate a synchronous signal caused by a hardware 2479 * condition encountered by an lwp. Called from trap(). 2480 */ 2481 void 2482 trapsig(k_siginfo_t *ip, int restartable) 2483 { 2484 proc_t *p = ttoproc(curthread); 2485 int sig = ip->si_signo; 2486 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 2487 2488 ASSERT(sig > 0 && sig < NSIG); 2489 2490 if (curthread->t_dtrace_on) 2491 dtrace_safe_synchronous_signal(); 2492 2493 mutex_enter(&p->p_lock); 2494 schedctl_finish_sigblock(curthread); 2495 /* 2496 * Avoid a possible infinite loop if the lwp is holding the 2497 * signal generated by a trap of a restartable instruction or 2498 * if the signal so generated is being ignored by the process. 2499 */ 2500 if (restartable && 2501 (sigismember(&curthread->t_hold, sig) || 2502 p->p_user.u_signal[sig-1] == SIG_IGN)) { 2503 sigdelset(&curthread->t_hold, sig); 2504 p->p_user.u_signal[sig-1] = SIG_DFL; 2505 sigdelset(&p->p_ignore, sig); 2506 } 2507 bcopy(ip, &sqp->sq_info, sizeof (k_siginfo_t)); 2508 sigaddqa(p, curthread, sqp); 2509 mutex_exit(&p->p_lock); 2510 } 2511 2512 #ifdef _SYSCALL32_IMPL 2513 2514 /* 2515 * It's tricky to transmit a sigval between 32-bit and 64-bit 2516 * process, since in the 64-bit world, a pointer and an integer 2517 * are different sizes. Since we're constrained by the standards 2518 * world not to change the types, and it's unclear how useful it is 2519 * to send pointers between address spaces this way, we preserve 2520 * the 'int' interpretation for 32-bit processes interoperating 2521 * with 64-bit processes. The full semantics (pointers or integers) 2522 * are available for N-bit processes interoperating with N-bit 2523 * processes. 2524 */ 2525 void 2526 siginfo_kto32(const k_siginfo_t *src, siginfo32_t *dest) 2527 { 2528 bzero(dest, sizeof (*dest)); 2529 2530 /* 2531 * The absolute minimum content is si_signo and si_code. 2532 */ 2533 dest->si_signo = src->si_signo; 2534 if ((dest->si_code = src->si_code) == SI_NOINFO) 2535 return; 2536 2537 /* 2538 * A siginfo generated by user level is structured 2539 * differently from one generated by the kernel. 2540 */ 2541 if (SI_FROMUSER(src)) { 2542 dest->si_pid = src->si_pid; 2543 dest->si_ctid = src->si_ctid; 2544 dest->si_zoneid = src->si_zoneid; 2545 dest->si_uid = src->si_uid; 2546 if (SI_CANQUEUE(src->si_code)) 2547 dest->si_value.sival_int = 2548 (int32_t)src->si_value.sival_int; 2549 return; 2550 } 2551 2552 dest->si_errno = src->si_errno; 2553 2554 switch (src->si_signo) { 2555 default: 2556 dest->si_pid = src->si_pid; 2557 dest->si_ctid = src->si_ctid; 2558 dest->si_zoneid = src->si_zoneid; 2559 dest->si_uid = src->si_uid; 2560 dest->si_value.sival_int = (int32_t)src->si_value.sival_int; 2561 break; 2562 case SIGCLD: 2563 dest->si_pid = src->si_pid; 2564 dest->si_ctid = src->si_ctid; 2565 dest->si_zoneid = src->si_zoneid; 2566 dest->si_status = src->si_status; 2567 dest->si_stime = src->si_stime; 2568 dest->si_utime = src->si_utime; 2569 break; 2570 case SIGSEGV: 2571 case SIGBUS: 2572 case SIGILL: 2573 case SIGTRAP: 2574 case SIGFPE: 2575 case SIGEMT: 2576 dest->si_addr = (caddr32_t)(uintptr_t)src->si_addr; 2577 dest->si_trapno = src->si_trapno; 2578 dest->si_pc = (caddr32_t)(uintptr_t)src->si_pc; 2579 break; 2580 case SIGPOLL: 2581 case SIGXFSZ: 2582 dest->si_fd = src->si_fd; 2583 dest->si_band = src->si_band; 2584 break; 2585 case SIGPROF: 2586 dest->si_faddr = (caddr32_t)(uintptr_t)src->si_faddr; 2587 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec; 2588 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec; 2589 dest->si_syscall = src->si_syscall; 2590 dest->si_nsysarg = src->si_nsysarg; 2591 dest->si_fault = src->si_fault; 2592 break; 2593 } 2594 } 2595 2596 void 2597 siginfo_32tok(const siginfo32_t *src, k_siginfo_t *dest) 2598 { 2599 bzero(dest, sizeof (*dest)); 2600 2601 /* 2602 * The absolute minimum content is si_signo and si_code. 2603 */ 2604 dest->si_signo = src->si_signo; 2605 if ((dest->si_code = src->si_code) == SI_NOINFO) 2606 return; 2607 2608 /* 2609 * A siginfo generated by user level is structured 2610 * differently from one generated by the kernel. 2611 */ 2612 if (SI_FROMUSER(src)) { 2613 dest->si_pid = src->si_pid; 2614 dest->si_ctid = src->si_ctid; 2615 dest->si_zoneid = src->si_zoneid; 2616 dest->si_uid = src->si_uid; 2617 if (SI_CANQUEUE(src->si_code)) 2618 dest->si_value.sival_int = 2619 (int)src->si_value.sival_int; 2620 return; 2621 } 2622 2623 dest->si_errno = src->si_errno; 2624 2625 switch (src->si_signo) { 2626 default: 2627 dest->si_pid = src->si_pid; 2628 dest->si_ctid = src->si_ctid; 2629 dest->si_zoneid = src->si_zoneid; 2630 dest->si_uid = src->si_uid; 2631 dest->si_value.sival_int = (int)src->si_value.sival_int; 2632 break; 2633 case SIGCLD: 2634 dest->si_pid = src->si_pid; 2635 dest->si_ctid = src->si_ctid; 2636 dest->si_zoneid = src->si_zoneid; 2637 dest->si_status = src->si_status; 2638 dest->si_stime = src->si_stime; 2639 dest->si_utime = src->si_utime; 2640 break; 2641 case SIGSEGV: 2642 case SIGBUS: 2643 case SIGILL: 2644 case SIGTRAP: 2645 case SIGFPE: 2646 case SIGEMT: 2647 dest->si_addr = (void *)(uintptr_t)src->si_addr; 2648 dest->si_trapno = src->si_trapno; 2649 dest->si_pc = (void *)(uintptr_t)src->si_pc; 2650 break; 2651 case SIGPOLL: 2652 case SIGXFSZ: 2653 dest->si_fd = src->si_fd; 2654 dest->si_band = src->si_band; 2655 break; 2656 case SIGPROF: 2657 dest->si_faddr = (void *)(uintptr_t)src->si_faddr; 2658 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec; 2659 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec; 2660 dest->si_syscall = src->si_syscall; 2661 dest->si_nsysarg = src->si_nsysarg; 2662 dest->si_fault = src->si_fault; 2663 break; 2664 } 2665 } 2666 2667 #endif /* _SYSCALL32_IMPL */ 2668