1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 28 /* All Rights Reserved */ 29 30 31 #pragma ident "%Z%%M% %I% %E% SMI" 32 33 #include <sys/param.h> 34 #include <sys/types.h> 35 #include <sys/bitmap.h> 36 #include <sys/sysmacros.h> 37 #include <sys/systm.h> 38 #include <sys/cred.h> 39 #include <sys/user.h> 40 #include <sys/errno.h> 41 #include <sys/proc.h> 42 #include <sys/poll_impl.h> /* only needed for kludge in sigwaiting_send() */ 43 #include <sys/signal.h> 44 #include <sys/siginfo.h> 45 #include <sys/fault.h> 46 #include <sys/ucontext.h> 47 #include <sys/procfs.h> 48 #include <sys/wait.h> 49 #include <sys/class.h> 50 #include <sys/mman.h> 51 #include <sys/procset.h> 52 #include <sys/kmem.h> 53 #include <sys/cpuvar.h> 54 #include <sys/prsystm.h> 55 #include <sys/debug.h> 56 #include <vm/as.h> 57 #include <sys/bitmap.h> 58 #include <c2/audit.h> 59 #include <sys/core.h> 60 #include <sys/schedctl.h> 61 #include <sys/contract/process_impl.h> 62 #include <sys/dtrace.h> 63 #include <sys/sdt.h> 64 65 /* MUST be contiguous */ 66 k_sigset_t nullsmask = {0, 0}; 67 68 k_sigset_t fillset = {FILLSET0, FILLSET1}; 69 70 k_sigset_t cantmask = {CANTMASK0, CANTMASK1}; 71 72 k_sigset_t cantreset = {(sigmask(SIGILL)|sigmask(SIGTRAP)|sigmask(SIGPWR)), 0}; 73 74 k_sigset_t ignoredefault = {(sigmask(SIGCONT)|sigmask(SIGCLD)|sigmask(SIGPWR) 75 |sigmask(SIGWINCH)|sigmask(SIGURG)|sigmask(SIGWAITING)), 76 (sigmask(SIGLWP)|sigmask(SIGCANCEL)|sigmask(SIGFREEZE) 77 |sigmask(SIGTHAW)|sigmask(SIGXRES)|sigmask(SIGJVM1) 78 |sigmask(SIGJVM2))}; 79 80 k_sigset_t stopdefault = {(sigmask(SIGSTOP)|sigmask(SIGTSTP) 81 |sigmask(SIGTTOU)|sigmask(SIGTTIN)), 0}; 82 83 k_sigset_t coredefault = {(sigmask(SIGQUIT)|sigmask(SIGILL)|sigmask(SIGTRAP) 84 |sigmask(SIGIOT)|sigmask(SIGEMT)|sigmask(SIGFPE) 85 |sigmask(SIGBUS)|sigmask(SIGSEGV)|sigmask(SIGSYS) 86 |sigmask(SIGXCPU)|sigmask(SIGXFSZ)), 0}; 87 88 k_sigset_t holdvfork = {(sigmask(SIGTTOU)|sigmask(SIGTTIN)|sigmask(SIGTSTP)), 89 0}; 90 91 static int isjobstop(int); 92 static void post_sigcld(proc_t *, sigqueue_t *); 93 94 /* 95 * Internal variables for counting number of user thread stop requests posted. 96 * They may not be accurate at some special situation such as that a virtually 97 * stopped thread starts to run. 98 */ 99 static int num_utstop; 100 /* 101 * Internal variables for broadcasting an event when all thread stop requests 102 * are processed. 103 */ 104 static kcondvar_t utstop_cv; 105 106 static kmutex_t thread_stop_lock; 107 void del_one_utstop(void); 108 109 /* 110 * Send the specified signal to the specified process. 111 */ 112 void 113 psignal(proc_t *p, int sig) 114 { 115 mutex_enter(&p->p_lock); 116 sigtoproc(p, NULL, sig); 117 mutex_exit(&p->p_lock); 118 } 119 120 /* 121 * Send the specified signal to the specified thread. 122 */ 123 void 124 tsignal(kthread_t *t, int sig) 125 { 126 proc_t *p = ttoproc(t); 127 128 mutex_enter(&p->p_lock); 129 sigtoproc(p, t, sig); 130 mutex_exit(&p->p_lock); 131 } 132 133 int 134 signal_is_blocked(kthread_t *t, int sig) 135 { 136 return (sigismember(&t->t_hold, sig) || 137 (schedctl_sigblock(t) && !sigismember(&cantmask, sig))); 138 } 139 140 /* 141 * Return true if the signal can safely be discarded on generation. 142 * That is, if there is no need for the signal on the receiving end. 143 * The answer is true if the process is a zombie or 144 * if all of these conditions are true: 145 * the signal is being ignored 146 * the process is single-threaded 147 * the signal is not being traced by /proc 148 * the signal is not blocked by the process 149 */ 150 static int 151 sig_discardable(proc_t *p, int sig) 152 { 153 kthread_t *t = p->p_tlist; 154 155 return (t == NULL || /* if zombie or ... */ 156 (sigismember(&p->p_ignore, sig) && /* signal is ignored */ 157 t->t_forw == t && /* and single-threaded */ 158 !tracing(p, sig) && /* and no /proc tracing */ 159 !signal_is_blocked(t, sig))); /* and signal not blocked */ 160 } 161 162 /* 163 * Return true if this thread is going to eat this signal soon. 164 */ 165 int 166 eat_signal(kthread_t *t, int sig) 167 { 168 int rval = 0; 169 ASSERT(THREAD_LOCK_HELD(t)); 170 171 /* 172 * Do not do anything if the target thread has the signal blocked. 173 */ 174 if (!signal_is_blocked(t, sig)) { 175 t->t_sig_check = 1; /* have thread do an issig */ 176 if (t->t_state == TS_SLEEP && (t->t_flag & T_WAKEABLE)) { 177 setrun_locked(t); 178 rval = 1; 179 } else if (t->t_state == TS_STOPPED && sig == SIGKILL) { 180 ttoproc(t)->p_stopsig = 0; 181 t->t_dtrace_stop = 0; 182 t->t_schedflag |= TS_XSTART | TS_PSTART; 183 setrun_locked(t); 184 } else if (t != curthread && t->t_state == TS_ONPROC) { 185 if ((t != curthread) && (t->t_cpu != CPU)) 186 poke_cpu(t->t_cpu->cpu_id); 187 rval = 1; 188 } else if (t->t_state == TS_RUN) { 189 rval = 1; 190 } 191 } 192 193 return (rval); 194 } 195 196 /* 197 * Post a signal. 198 * If a non-null thread pointer is passed, then post the signal 199 * to the thread/lwp, otherwise post the signal to the process. 200 */ 201 void 202 sigtoproc(proc_t *p, kthread_t *t, int sig) 203 { 204 kthread_t *tt; 205 int ext = !(curproc->p_flag & SSYS) && 206 (curproc->p_ct_process != p->p_ct_process); 207 208 ASSERT(MUTEX_HELD(&p->p_lock)); 209 210 if (sig <= 0 || sig >= NSIG) 211 return; 212 213 /* 214 * Regardless of origin or directedness, 215 * SIGKILL kills all lwps in the process immediately 216 * and jobcontrol signals affect all lwps in the process. 217 */ 218 if (sig == SIGKILL) { 219 p->p_flag |= SKILLED | (ext ? SEXTKILLED : 0); 220 t = NULL; 221 } else if (sig == SIGCONT) { 222 /* 223 * The SSCONT flag will remain set until a stopping 224 * signal comes in (below). This is harmless. 225 */ 226 p->p_flag |= SSCONT; 227 sigdelq(p, NULL, SIGSTOP); 228 sigdelq(p, NULL, SIGTSTP); 229 sigdelq(p, NULL, SIGTTOU); 230 sigdelq(p, NULL, SIGTTIN); 231 sigdiffset(&p->p_sig, &stopdefault); 232 sigdiffset(&p->p_extsig, &stopdefault); 233 p->p_stopsig = 0; 234 if ((tt = p->p_tlist) != NULL) { 235 do { 236 sigdelq(p, tt, SIGSTOP); 237 sigdelq(p, tt, SIGTSTP); 238 sigdelq(p, tt, SIGTTOU); 239 sigdelq(p, tt, SIGTTIN); 240 sigdiffset(&tt->t_sig, &stopdefault); 241 sigdiffset(&tt->t_extsig, &stopdefault); 242 } while ((tt = tt->t_forw) != p->p_tlist); 243 } 244 if ((tt = p->p_tlist) != NULL) { 245 do { 246 thread_lock(tt); 247 if (tt->t_state == TS_STOPPED && 248 tt->t_whystop == PR_JOBCONTROL) { 249 tt->t_schedflag |= TS_XSTART; 250 setrun_locked(tt); 251 } 252 thread_unlock(tt); 253 } while ((tt = tt->t_forw) != p->p_tlist); 254 } 255 } else if (sigismember(&stopdefault, sig)) { 256 /* 257 * This test has a race condition which we can't fix: 258 * By the time the stopping signal is received by 259 * the target process/thread, the signal handler 260 * and/or the detached state might have changed. 261 */ 262 if (PTOU(p)->u_signal[sig-1] == SIG_DFL && 263 (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned)) 264 p->p_flag &= ~SSCONT; 265 sigdelq(p, NULL, SIGCONT); 266 sigdelset(&p->p_sig, SIGCONT); 267 sigdelset(&p->p_extsig, SIGCONT); 268 if ((tt = p->p_tlist) != NULL) { 269 do { 270 sigdelq(p, tt, SIGCONT); 271 sigdelset(&tt->t_sig, SIGCONT); 272 sigdelset(&tt->t_extsig, SIGCONT); 273 } while ((tt = tt->t_forw) != p->p_tlist); 274 } 275 } 276 277 if (sig_discardable(p, sig)) { 278 DTRACE_PROC3(signal__discard, kthread_t *, p->p_tlist, 279 proc_t *, p, int, sig); 280 return; 281 } 282 283 if (t != NULL) { 284 /* 285 * This is a directed signal, wake up the lwp. 286 */ 287 sigaddset(&t->t_sig, sig); 288 if (ext) 289 sigaddset(&t->t_extsig, sig); 290 thread_lock(t); 291 (void) eat_signal(t, sig); 292 thread_unlock(t); 293 DTRACE_PROC2(signal__send, kthread_t *, t, int, sig); 294 } else if ((tt = p->p_tlist) != NULL) { 295 /* 296 * Make sure that some lwp that already exists 297 * in the process fields the signal soon. 298 * Wake up an interruptibly sleeping lwp if necessary. 299 */ 300 int su = 0; 301 302 sigaddset(&p->p_sig, sig); 303 if (ext) 304 sigaddset(&p->p_extsig, sig); 305 do { 306 thread_lock(tt); 307 if (eat_signal(tt, sig)) { 308 thread_unlock(tt); 309 break; 310 } 311 if (sig == SIGKILL && SUSPENDED(tt)) 312 su++; 313 thread_unlock(tt); 314 } while ((tt = tt->t_forw) != p->p_tlist); 315 /* 316 * If the process is deadlocked, make somebody run and die. 317 */ 318 if (sig == SIGKILL && p->p_stat != SIDL && 319 p->p_lwprcnt == 0 && p->p_lwpcnt == su) { 320 thread_lock(tt); 321 p->p_lwprcnt++; 322 tt->t_schedflag |= TS_CSTART; 323 setrun_locked(tt); 324 thread_unlock(tt); 325 } 326 327 DTRACE_PROC2(signal__send, kthread_t *, tt, int, sig); 328 } 329 } 330 331 static int 332 isjobstop(int sig) 333 { 334 proc_t *p = ttoproc(curthread); 335 336 ASSERT(MUTEX_HELD(&p->p_lock)); 337 338 if (u.u_signal[sig-1] == SIG_DFL && sigismember(&stopdefault, sig)) { 339 /* 340 * If SIGCONT has been posted since we promoted this signal 341 * from pending to current, then don't do a jobcontrol stop. 342 */ 343 if (!(p->p_flag & SSCONT) && 344 (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned) && 345 curthread != p->p_agenttp) { 346 sigqueue_t *sqp; 347 348 stop(PR_JOBCONTROL, sig); 349 mutex_exit(&p->p_lock); 350 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 351 mutex_enter(&pidlock); 352 /* 353 * Only the first lwp to continue notifies the parent. 354 */ 355 if (p->p_pidflag & CLDCONT) 356 siginfofree(sqp); 357 else { 358 p->p_pidflag |= CLDCONT; 359 p->p_wcode = CLD_CONTINUED; 360 p->p_wdata = SIGCONT; 361 sigcld(p, sqp); 362 } 363 mutex_exit(&pidlock); 364 mutex_enter(&p->p_lock); 365 } 366 return (1); 367 } 368 return (0); 369 } 370 371 /* 372 * Returns true if the current process has a signal to process, and 373 * the signal is not held. The signal to process is put in p_cursig. 374 * This is asked at least once each time a process enters the system 375 * (though this can usually be done without actually calling issig by 376 * checking the pending signal masks). A signal does not do anything 377 * directly to a process; it sets a flag that asks the process to do 378 * something to itself. 379 * 380 * The "why" argument indicates the allowable side-effects of the call: 381 * 382 * FORREAL: Extract the next pending signal from p_sig into p_cursig; 383 * stop the process if a stop has been requested or if a traced signal 384 * is pending. 385 * 386 * JUSTLOOKING: Don't stop the process, just indicate whether or not 387 * a signal might be pending (FORREAL is needed to tell for sure). 388 * 389 * XXX: Changes to the logic in these routines should be propagated 390 * to lm_sigispending(). See bug 1201594. 391 */ 392 393 static int issig_forreal(void); 394 static int issig_justlooking(void); 395 396 int 397 issig(int why) 398 { 399 ASSERT(why == FORREAL || why == JUSTLOOKING); 400 401 return ((why == FORREAL)? issig_forreal() : issig_justlooking()); 402 } 403 404 405 static int 406 issig_justlooking(void) 407 { 408 kthread_t *t = curthread; 409 klwp_t *lwp = ttolwp(t); 410 proc_t *p = ttoproc(t); 411 k_sigset_t set; 412 413 /* 414 * This function answers the question: 415 * "Is there any reason to call issig_forreal()?" 416 * 417 * We have to answer the question w/o grabbing any locks 418 * because we are (most likely) being called after we 419 * put ourselves on the sleep queue. 420 */ 421 422 if (t->t_dtrace_stop | t->t_dtrace_sig) 423 return (1); 424 425 /* 426 * Another piece of complexity in this process. When single-stepping a 427 * process, we don't want an intervening signal or TP_PAUSE request to 428 * suspend the current thread. Otherwise, the controlling process will 429 * hang beacuse we will be stopped with TS_PSTART set in t_schedflag. 430 * We will trigger any remaining signals when we re-enter the kernel on 431 * the single step trap. 432 */ 433 if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP) 434 return (0); 435 436 if ((lwp->lwp_asleep && MUSTRETURN(p, t)) || 437 (p->p_flag & (SEXITLWPS|SKILLED)) || 438 (lwp->lwp_nostop == 0 && 439 (p->p_stopsig | (p->p_flag & (SHOLDFORK1|SHOLDWATCH)) | 440 (t->t_proc_flag & 441 (TP_PRSTOP|TP_HOLDLWP|TP_CHKPT|TP_PAUSE)))) || 442 lwp->lwp_cursig) 443 return (1); 444 445 if (p->p_flag & SVFWAIT) 446 return (0); 447 set = p->p_sig; 448 sigorset(&set, &t->t_sig); 449 if (schedctl_sigblock(t)) /* all blockable signals blocked */ 450 sigandset(&set, &cantmask); 451 else 452 sigdiffset(&set, &t->t_hold); 453 if (p->p_flag & SVFORK) 454 sigdiffset(&set, &holdvfork); 455 456 if (!sigisempty(&set)) { 457 int sig; 458 459 for (sig = 1; sig < NSIG; sig++) { 460 if (sigismember(&set, sig) && 461 (tracing(p, sig) || 462 !sigismember(&p->p_ignore, sig))) { 463 /* 464 * Don't promote a signal that will stop 465 * the process when lwp_nostop is set. 466 */ 467 if (!lwp->lwp_nostop || 468 u.u_signal[sig-1] != SIG_DFL || 469 !sigismember(&stopdefault, sig)) 470 return (1); 471 } 472 } 473 } 474 475 return (0); 476 } 477 478 static int 479 issig_forreal(void) 480 { 481 int sig = 0, ext = 0; 482 kthread_t *t = curthread; 483 klwp_t *lwp = ttolwp(t); 484 proc_t *p = ttoproc(t); 485 int toproc = 0; 486 int sigcld_found = 0; 487 int nostop_break = 0; 488 489 ASSERT(t->t_state == TS_ONPROC); 490 491 mutex_enter(&p->p_lock); 492 schedctl_finish_sigblock(t); 493 494 if (t->t_dtrace_stop | t->t_dtrace_sig) { 495 if (t->t_dtrace_stop) { 496 /* 497 * If DTrace's "stop" action has been invoked on us, 498 * set TP_PRSTOP. 499 */ 500 t->t_proc_flag |= TP_PRSTOP; 501 } 502 503 if (t->t_dtrace_sig != 0) { 504 k_siginfo_t info; 505 506 /* 507 * Post the signal generated as the result of 508 * DTrace's "raise" action as a normal signal before 509 * the full-fledged signal checking begins. 510 */ 511 bzero(&info, sizeof (info)); 512 info.si_signo = t->t_dtrace_sig; 513 info.si_code = SI_DTRACE; 514 515 sigaddq(p, NULL, &info, KM_NOSLEEP); 516 517 t->t_dtrace_sig = 0; 518 } 519 } 520 521 for (;;) { 522 if (p->p_flag & (SEXITLWPS|SKILLED)) { 523 lwp->lwp_cursig = sig = SIGKILL; 524 lwp->lwp_extsig = ext = (p->p_flag & SEXTKILLED) != 0; 525 break; 526 } 527 528 /* 529 * Another piece of complexity in this process. When 530 * single-stepping a process, we don't want an intervening 531 * signal or TP_PAUSE request to suspend the current thread. 532 * Otherwise, the controlling process will hang beacuse we will 533 * be stopped with TS_PSTART set in t_schedflag. We will 534 * trigger any remaining signals when we re-enter the kernel on 535 * the single step trap. 536 */ 537 if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP) { 538 sig = 0; 539 break; 540 } 541 542 /* 543 * Hold the lwp here for watchpoint manipulation. 544 */ 545 if ((t->t_proc_flag & TP_PAUSE) && !lwp->lwp_nostop) { 546 stop(PR_SUSPENDED, SUSPEND_PAUSE); 547 continue; 548 } 549 550 if (lwp->lwp_asleep && MUSTRETURN(p, t)) { 551 if ((sig = lwp->lwp_cursig) != 0) { 552 /* 553 * Make sure we call ISSIG() in post_syscall() 554 * to re-validate this current signal. 555 */ 556 t->t_sig_check = 1; 557 } 558 break; 559 } 560 561 /* 562 * If the request is PR_CHECKPOINT, ignore the rest of signals 563 * or requests. Honor other stop requests or signals later. 564 * Go back to top of loop here to check if an exit or hold 565 * event has occurred while stopped. 566 */ 567 if ((t->t_proc_flag & TP_CHKPT) && !lwp->lwp_nostop) { 568 stop(PR_CHECKPOINT, 0); 569 continue; 570 } 571 572 /* 573 * Honor SHOLDFORK1, SHOLDWATCH, and TP_HOLDLWP before dealing 574 * with signals or /proc. Another lwp is executing fork1(), 575 * or is undergoing watchpoint activity (remapping a page), 576 * or is executing lwp_suspend() on this lwp. 577 * Again, go back to top of loop to check if an exit 578 * or hold event has occurred while stopped. 579 */ 580 if (((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) || 581 (t->t_proc_flag & TP_HOLDLWP)) && !lwp->lwp_nostop) { 582 stop(PR_SUSPENDED, SUSPEND_NORMAL); 583 continue; 584 } 585 586 /* 587 * Honor requested stop before dealing with the 588 * current signal; a debugger may change it. 589 * Do not want to go back to loop here since this is a special 590 * stop that means: make incremental progress before the next 591 * stop. The danger is that returning to top of loop would most 592 * likely drop the thread right back here to stop soon after it 593 * was continued, violating the incremental progress request. 594 */ 595 if ((t->t_proc_flag & TP_PRSTOP) && !lwp->lwp_nostop) 596 stop(PR_REQUESTED, 0); 597 598 /* 599 * If a debugger wants us to take a signal it will have 600 * left it in lwp->lwp_cursig. If lwp_cursig has been cleared 601 * or if it's being ignored, we continue on looking for another 602 * signal. Otherwise we return the specified signal, provided 603 * it's not a signal that causes a job control stop. 604 * 605 * When stopped on PR_JOBCONTROL, there is no current 606 * signal; we cancel lwp->lwp_cursig temporarily before 607 * calling isjobstop(). The current signal may be reset 608 * by a debugger while we are stopped in isjobstop(). 609 */ 610 if ((sig = lwp->lwp_cursig) != 0) { 611 ext = lwp->lwp_extsig; 612 lwp->lwp_cursig = 0; 613 lwp->lwp_extsig = 0; 614 if (!sigismember(&p->p_ignore, sig) && 615 !isjobstop(sig)) { 616 if (p->p_flag & (SEXITLWPS|SKILLED)) { 617 sig = SIGKILL; 618 ext = (p->p_flag & SEXTKILLED) != 0; 619 } 620 lwp->lwp_cursig = (uchar_t)sig; 621 lwp->lwp_extsig = (uchar_t)ext; 622 break; 623 } 624 /* 625 * The signal is being ignored or it caused a 626 * job-control stop. If another current signal 627 * has not been established, return the current 628 * siginfo, if any, to the memory manager. 629 */ 630 if (lwp->lwp_cursig == 0 && lwp->lwp_curinfo != NULL) { 631 siginfofree(lwp->lwp_curinfo); 632 lwp->lwp_curinfo = NULL; 633 } 634 /* 635 * Loop around again in case we were stopped 636 * on a job control signal and a /proc stop 637 * request was posted or another current signal 638 * was established while we were stopped. 639 */ 640 continue; 641 } 642 643 if (p->p_stopsig && !lwp->lwp_nostop && 644 curthread != p->p_agenttp) { 645 /* 646 * Some lwp in the process has already stopped 647 * showing PR_JOBCONTROL. This is a stop in 648 * sympathy with the other lwp, even if this 649 * lwp is blocking the stopping signal. 650 */ 651 stop(PR_JOBCONTROL, p->p_stopsig); 652 continue; 653 } 654 655 /* 656 * Loop on the pending signals until we find a 657 * non-held signal that is traced or not ignored. 658 * First check the signals pending for the lwp, 659 * then the signals pending for the process as a whole. 660 */ 661 for (;;) { 662 k_sigset_t tsig; 663 664 tsig = t->t_sig; 665 if ((sig = fsig(&tsig, t)) != 0) { 666 if (sig == SIGCLD) 667 sigcld_found = 1; 668 toproc = 0; 669 if (tracing(p, sig) || 670 !sigismember(&p->p_ignore, sig)) { 671 if (sigismember(&t->t_extsig, sig)) 672 ext = 1; 673 break; 674 } 675 sigdelset(&t->t_sig, sig); 676 sigdelset(&t->t_extsig, sig); 677 sigdelq(p, t, sig); 678 } else if ((sig = fsig(&p->p_sig, t)) != 0) { 679 if (sig == SIGCLD) 680 sigcld_found = 1; 681 toproc = 1; 682 if (tracing(p, sig) || 683 !sigismember(&p->p_ignore, sig)) { 684 if (sigismember(&p->p_extsig, sig)) 685 ext = 1; 686 break; 687 } 688 sigdelset(&p->p_sig, sig); 689 sigdelset(&p->p_extsig, sig); 690 sigdelq(p, NULL, sig); 691 } else { 692 /* no signal was found */ 693 break; 694 } 695 } 696 697 if (sig == 0) { /* no signal was found */ 698 if (p->p_flag & (SEXITLWPS|SKILLED)) { 699 lwp->lwp_cursig = SIGKILL; 700 sig = SIGKILL; 701 ext = (p->p_flag & SEXTKILLED) != 0; 702 } 703 break; 704 } 705 706 /* 707 * If we have been informed not to stop (i.e., we are being 708 * called from within a network operation), then don't promote 709 * the signal at this time, just return the signal number. 710 * We will call issig() again later when it is safe. 711 * 712 * fsig() does not return a jobcontrol stopping signal 713 * with a default action of stopping the process if 714 * lwp_nostop is set, so we won't be causing a bogus 715 * EINTR by this action. (Such a signal is eaten by 716 * isjobstop() when we loop around to do final checks.) 717 */ 718 if (lwp->lwp_nostop) { 719 nostop_break = 1; 720 break; 721 } 722 723 /* 724 * Promote the signal from pending to current. 725 * 726 * Note that sigdeq() will set lwp->lwp_curinfo to NULL 727 * if no siginfo_t exists for this signal. 728 */ 729 lwp->lwp_cursig = (uchar_t)sig; 730 lwp->lwp_extsig = (uchar_t)ext; 731 t->t_sig_check = 1; /* so post_syscall will see signal */ 732 ASSERT(lwp->lwp_curinfo == NULL); 733 sigdeq(p, toproc ? NULL : t, sig, &lwp->lwp_curinfo); 734 735 if (tracing(p, sig)) 736 stop(PR_SIGNALLED, sig); 737 738 /* 739 * Loop around to check for requested stop before 740 * performing the usual current-signal actions. 741 */ 742 } 743 744 mutex_exit(&p->p_lock); 745 746 /* 747 * If SIGCLD was dequeued, search for other pending SIGCLD's. 748 * Don't do it if we are returning SIGCLD and the signal 749 * handler will be reset by psig(); this enables reliable 750 * delivery of SIGCLD even when using the old, broken 751 * signal() interface for setting the signal handler. 752 */ 753 if (sigcld_found && 754 (sig != SIGCLD || !sigismember(&u.u_sigresethand, SIGCLD))) 755 sigcld_repost(); 756 757 if (sig != 0) 758 (void) undo_watch_step(NULL); 759 760 /* 761 * If we have been blocked since the p_lock was dropped off 762 * above, then this promoted signal might have been handled 763 * already when we were on the way back from sleep queue, so 764 * just ignore it. 765 * If we have been informed not to stop, just return the signal 766 * number. Also see comments above. 767 */ 768 if (!nostop_break) { 769 sig = lwp->lwp_cursig; 770 } 771 772 return (sig != 0); 773 } 774 775 /* 776 * Return true if the process is currently stopped showing PR_JOBCONTROL. 777 * This is true only if all of the process's lwp's are so stopped. 778 * If this is asked by one of the lwps in the process, exclude that lwp. 779 */ 780 int 781 jobstopped(proc_t *p) 782 { 783 kthread_t *t; 784 785 ASSERT(MUTEX_HELD(&p->p_lock)); 786 787 if ((t = p->p_tlist) == NULL) 788 return (0); 789 790 do { 791 thread_lock(t); 792 /* ignore current, zombie and suspended lwps in the test */ 793 if (!(t == curthread || t->t_state == TS_ZOMB || 794 SUSPENDED(t)) && 795 (t->t_state != TS_STOPPED || 796 t->t_whystop != PR_JOBCONTROL)) { 797 thread_unlock(t); 798 return (0); 799 } 800 thread_unlock(t); 801 } while ((t = t->t_forw) != p->p_tlist); 802 803 return (1); 804 } 805 806 /* 807 * Put ourself (curthread) into the stopped state and notify tracers. 808 */ 809 void 810 stop(int why, int what) 811 { 812 kthread_t *t = curthread; 813 proc_t *p = ttoproc(t); 814 klwp_t *lwp = ttolwp(t); 815 kthread_t *tx; 816 lwpent_t *lep; 817 int procstop; 818 int flags = TS_ALLSTART; 819 hrtime_t stoptime; 820 821 /* 822 * Can't stop a system process. 823 */ 824 if (p == NULL || lwp == NULL || (p->p_flag & SSYS) || p->p_as == &kas) 825 return; 826 827 ASSERT(MUTEX_HELD(&p->p_lock)); 828 829 if (why != PR_SUSPENDED && why != PR_CHECKPOINT) { 830 /* 831 * Don't stop an lwp with SIGKILL pending. 832 * Don't stop if the process or lwp is exiting. 833 */ 834 if (lwp->lwp_cursig == SIGKILL || 835 sigismember(&t->t_sig, SIGKILL) || 836 sigismember(&p->p_sig, SIGKILL) || 837 (t->t_proc_flag & TP_LWPEXIT) || 838 (p->p_flag & (SEXITLWPS|SKILLED))) { 839 p->p_stopsig = 0; 840 t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP); 841 return; 842 } 843 } 844 845 /* 846 * Make sure we don't deadlock on a recursive call to prstop(). 847 * prstop() sets the lwp_nostop flag. 848 */ 849 if (lwp->lwp_nostop) 850 return; 851 852 /* 853 * Make sure the lwp is in an orderly state for inspection 854 * by a debugger through /proc or for dumping via core(). 855 */ 856 schedctl_finish_sigblock(t); 857 t->t_proc_flag |= TP_STOPPING; /* must set before dropping p_lock */ 858 mutex_exit(&p->p_lock); 859 stoptime = gethrtime(); 860 prstop(why, what); 861 (void) undo_watch_step(NULL); 862 mutex_enter(&p->p_lock); 863 ASSERT(t->t_state == TS_ONPROC); 864 865 switch (why) { 866 case PR_CHECKPOINT: 867 /* 868 * The situation may have changed since we dropped 869 * and reacquired p->p_lock. Double-check now 870 * whether we should stop or not. 871 */ 872 if (!(t->t_proc_flag & TP_CHKPT)) { 873 t->t_proc_flag &= ~TP_STOPPING; 874 return; 875 } 876 t->t_proc_flag &= ~TP_CHKPT; 877 flags &= ~TS_RESUME; 878 break; 879 880 case PR_JOBCONTROL: 881 ASSERT(what == SIGSTOP || what == SIGTSTP || 882 what == SIGTTIN || what == SIGTTOU); 883 flags &= ~TS_XSTART; 884 break; 885 886 case PR_SUSPENDED: 887 ASSERT(what == SUSPEND_NORMAL || what == SUSPEND_PAUSE); 888 /* 889 * The situation may have changed since we dropped 890 * and reacquired p->p_lock. Double-check now 891 * whether we should stop or not. 892 */ 893 if (what == SUSPEND_PAUSE) { 894 if (!(t->t_proc_flag & TP_PAUSE)) { 895 t->t_proc_flag &= ~TP_STOPPING; 896 return; 897 } 898 flags &= ~TS_UNPAUSE; 899 } else { 900 if (!((t->t_proc_flag & TP_HOLDLWP) || 901 (p->p_flag & (SHOLDFORK|SHOLDFORK1|SHOLDWATCH)))) { 902 t->t_proc_flag &= ~TP_STOPPING; 903 return; 904 } 905 /* 906 * If SHOLDFORK is in effect and we are stopping 907 * while asleep (not at the top of the stack), 908 * we return now to allow the hold to take effect 909 * when we reach the top of the kernel stack. 910 */ 911 if (lwp->lwp_asleep && (p->p_flag & SHOLDFORK)) { 912 t->t_proc_flag &= ~TP_STOPPING; 913 return; 914 } 915 flags &= ~TS_CSTART; 916 } 917 break; 918 919 default: /* /proc stop */ 920 flags &= ~TS_PSTART; 921 /* 922 * Do synchronous stop unless the async-stop flag is set. 923 * If why is PR_REQUESTED and t->t_dtrace_stop flag is set, 924 * then no debugger is present and we also do synchronous stop. 925 */ 926 if ((why != PR_REQUESTED || t->t_dtrace_stop) && 927 !(p->p_proc_flag & P_PR_ASYNC)) { 928 int notify; 929 930 for (tx = t->t_forw; tx != t; tx = tx->t_forw) { 931 notify = 0; 932 thread_lock(tx); 933 if (ISTOPPED(tx) || 934 (tx->t_proc_flag & TP_PRSTOP)) { 935 thread_unlock(tx); 936 continue; 937 } 938 tx->t_proc_flag |= TP_PRSTOP; 939 tx->t_sig_check = 1; 940 if (tx->t_state == TS_SLEEP && 941 (tx->t_flag & T_WAKEABLE)) { 942 /* 943 * Don't actually wake it up if it's 944 * in one of the lwp_*() syscalls. 945 * Mark it virtually stopped and 946 * notify /proc waiters (below). 947 */ 948 if (tx->t_wchan0 == NULL) 949 setrun_locked(tx); 950 else { 951 tx->t_proc_flag |= TP_PRVSTOP; 952 tx->t_stoptime = stoptime; 953 notify = 1; 954 } 955 } 956 /* 957 * force the thread into the kernel 958 * if it is not already there. 959 */ 960 if (tx->t_state == TS_ONPROC && 961 tx->t_cpu != CPU) 962 poke_cpu(tx->t_cpu->cpu_id); 963 thread_unlock(tx); 964 lep = p->p_lwpdir[tx->t_dslot].ld_entry; 965 if (notify && lep->le_trace) 966 prnotify(lep->le_trace); 967 } 968 /* 969 * We do this just in case one of the threads we asked 970 * to stop is in holdlwps() (called from cfork()) or 971 * lwp_suspend(). 972 */ 973 cv_broadcast(&p->p_holdlwps); 974 } 975 break; 976 } 977 978 t->t_stoptime = stoptime; 979 980 if (why == PR_JOBCONTROL || (why == PR_SUSPENDED && p->p_stopsig)) { 981 /* 982 * Determine if the whole process is jobstopped. 983 */ 984 if (jobstopped(p)) { 985 sigqueue_t *sqp; 986 int sig; 987 988 if ((sig = p->p_stopsig) == 0) 989 p->p_stopsig = (uchar_t)(sig = what); 990 mutex_exit(&p->p_lock); 991 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 992 mutex_enter(&pidlock); 993 /* 994 * The last lwp to stop notifies the parent. 995 * Turn off the CLDCONT flag now so the first 996 * lwp to continue knows what to do. 997 */ 998 p->p_pidflag &= ~CLDCONT; 999 p->p_wcode = CLD_STOPPED; 1000 p->p_wdata = sig; 1001 sigcld(p, sqp); 1002 /* 1003 * Grab p->p_lock before releasing pidlock so the 1004 * parent and the child don't have a race condition. 1005 */ 1006 mutex_enter(&p->p_lock); 1007 mutex_exit(&pidlock); 1008 p->p_stopsig = 0; 1009 } else if (why == PR_JOBCONTROL && p->p_stopsig == 0) { 1010 /* 1011 * Set p->p_stopsig and wake up sleeping lwps 1012 * so they will stop in sympathy with this lwp. 1013 */ 1014 p->p_stopsig = (uchar_t)what; 1015 pokelwps(p); 1016 /* 1017 * We do this just in case one of the threads we asked 1018 * to stop is in holdlwps() (called from cfork()) or 1019 * lwp_suspend(). 1020 */ 1021 cv_broadcast(&p->p_holdlwps); 1022 } 1023 } 1024 1025 if (why != PR_JOBCONTROL && why != PR_CHECKPOINT) { 1026 /* 1027 * Do process-level notification when all lwps are 1028 * either stopped on events of interest to /proc 1029 * or are stopped showing PR_SUSPENDED or are zombies. 1030 */ 1031 procstop = 1; 1032 for (tx = t->t_forw; procstop && tx != t; tx = tx->t_forw) { 1033 if (VSTOPPED(tx)) 1034 continue; 1035 thread_lock(tx); 1036 switch (tx->t_state) { 1037 case TS_ZOMB: 1038 break; 1039 case TS_STOPPED: 1040 /* neither ISTOPPED nor SUSPENDED? */ 1041 if ((tx->t_schedflag & 1042 (TS_CSTART | TS_UNPAUSE | TS_PSTART)) == 1043 (TS_CSTART | TS_UNPAUSE | TS_PSTART)) 1044 procstop = 0; 1045 break; 1046 case TS_SLEEP: 1047 /* not paused for watchpoints? */ 1048 if (!(tx->t_flag & T_WAKEABLE) || 1049 tx->t_wchan0 == NULL || 1050 !(tx->t_proc_flag & TP_PAUSE)) 1051 procstop = 0; 1052 break; 1053 default: 1054 procstop = 0; 1055 break; 1056 } 1057 thread_unlock(tx); 1058 } 1059 if (procstop) { 1060 /* there must not be any remapped watched pages now */ 1061 ASSERT(p->p_mapcnt == 0); 1062 if (p->p_proc_flag & P_PR_PTRACE) { 1063 /* ptrace() compatibility */ 1064 mutex_exit(&p->p_lock); 1065 mutex_enter(&pidlock); 1066 p->p_wcode = CLD_TRAPPED; 1067 p->p_wdata = (why == PR_SIGNALLED)? 1068 what : SIGTRAP; 1069 cv_broadcast(&p->p_parent->p_cv); 1070 /* 1071 * Grab p->p_lock before releasing pidlock so 1072 * parent and child don't have a race condition. 1073 */ 1074 mutex_enter(&p->p_lock); 1075 mutex_exit(&pidlock); 1076 } 1077 if (p->p_trace) /* /proc */ 1078 prnotify(p->p_trace); 1079 cv_broadcast(&pr_pid_cv[p->p_slot]); /* pauselwps() */ 1080 cv_broadcast(&p->p_holdlwps); /* holdwatch() */ 1081 } 1082 if (why != PR_SUSPENDED) { 1083 lep = p->p_lwpdir[t->t_dslot].ld_entry; 1084 if (lep->le_trace) /* /proc */ 1085 prnotify(lep->le_trace); 1086 /* 1087 * Special notification for creation of the agent lwp. 1088 */ 1089 if (t == p->p_agenttp && 1090 (t->t_proc_flag & TP_PRSTOP) && 1091 p->p_trace) 1092 prnotify(p->p_trace); 1093 /* 1094 * The situation may have changed since we dropped 1095 * and reacquired p->p_lock. Double-check now 1096 * whether we should stop or not. 1097 */ 1098 if (!(t->t_proc_flag & TP_STOPPING)) { 1099 if (t->t_proc_flag & TP_PRSTOP) 1100 t->t_proc_flag |= TP_STOPPING; 1101 } 1102 t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP); 1103 prnostep(lwp); 1104 } 1105 } 1106 1107 if (why == PR_SUSPENDED) { 1108 1109 /* 1110 * We always broadcast in the case of SUSPEND_PAUSE. This is 1111 * because checks for TP_PAUSE take precedence over checks for 1112 * SHOLDWATCH. If a thread is trying to stop because of 1113 * SUSPEND_PAUSE and tries to do a holdwatch(), it will be 1114 * waiting for the rest of the threads to enter a stopped state. 1115 * If we are stopping for a SUSPEND_PAUSE, we may be the last 1116 * lwp and not know it, so broadcast just in case. 1117 */ 1118 if (what == SUSPEND_PAUSE || 1119 --p->p_lwprcnt == 0 || (t->t_proc_flag & TP_HOLDLWP)) 1120 cv_broadcast(&p->p_holdlwps); 1121 1122 } 1123 1124 /* 1125 * Need to do this here (rather than after the thread is officially 1126 * stopped) because we can't call mutex_enter from a stopped thread. 1127 */ 1128 if (why == PR_CHECKPOINT) 1129 del_one_utstop(); 1130 1131 thread_lock(t); 1132 ASSERT((t->t_schedflag & TS_ALLSTART) == 0); 1133 t->t_schedflag |= flags; 1134 t->t_whystop = (short)why; 1135 t->t_whatstop = (short)what; 1136 CL_STOP(t, why, what); 1137 (void) new_mstate(t, LMS_STOPPED); 1138 thread_stop(t); /* set stop state and drop lock */ 1139 1140 if (why != PR_SUSPENDED && why != PR_CHECKPOINT) { 1141 /* 1142 * We may have gotten a SIGKILL or a SIGCONT when 1143 * we released p->p_lock; make one last check. 1144 * Also check for a /proc run-on-last-close. 1145 */ 1146 if (sigismember(&t->t_sig, SIGKILL) || 1147 sigismember(&p->p_sig, SIGKILL) || 1148 (t->t_proc_flag & TP_LWPEXIT) || 1149 (p->p_flag & (SEXITLWPS|SKILLED))) { 1150 p->p_stopsig = 0; 1151 thread_lock(t); 1152 t->t_schedflag |= TS_XSTART | TS_PSTART; 1153 setrun_locked(t); 1154 thread_unlock_nopreempt(t); 1155 } else if (why == PR_JOBCONTROL) { 1156 if (p->p_flag & SSCONT) { 1157 /* 1158 * This resulted from a SIGCONT posted 1159 * while we were not holding p->p_lock. 1160 */ 1161 p->p_stopsig = 0; 1162 thread_lock(t); 1163 t->t_schedflag |= TS_XSTART; 1164 setrun_locked(t); 1165 thread_unlock_nopreempt(t); 1166 } 1167 } else if (!(t->t_proc_flag & TP_STOPPING)) { 1168 /* 1169 * This resulted from a /proc run-on-last-close. 1170 */ 1171 thread_lock(t); 1172 t->t_schedflag |= TS_PSTART; 1173 setrun_locked(t); 1174 thread_unlock_nopreempt(t); 1175 } 1176 } 1177 1178 t->t_proc_flag &= ~TP_STOPPING; 1179 mutex_exit(&p->p_lock); 1180 1181 swtch(); 1182 setallwatch(); /* reestablish any watchpoints set while stopped */ 1183 mutex_enter(&p->p_lock); 1184 prbarrier(p); /* barrier against /proc locking */ 1185 } 1186 1187 /* Interface for resetting user thread stop count. */ 1188 void 1189 utstop_init(void) 1190 { 1191 mutex_enter(&thread_stop_lock); 1192 num_utstop = 0; 1193 mutex_exit(&thread_stop_lock); 1194 } 1195 1196 /* Interface for registering a user thread stop request. */ 1197 void 1198 add_one_utstop(void) 1199 { 1200 mutex_enter(&thread_stop_lock); 1201 num_utstop++; 1202 mutex_exit(&thread_stop_lock); 1203 } 1204 1205 /* Interface for cancelling a user thread stop request */ 1206 void 1207 del_one_utstop(void) 1208 { 1209 mutex_enter(&thread_stop_lock); 1210 num_utstop--; 1211 if (num_utstop == 0) 1212 cv_broadcast(&utstop_cv); 1213 mutex_exit(&thread_stop_lock); 1214 } 1215 1216 /* Interface to wait for all user threads to be stopped */ 1217 void 1218 utstop_timedwait(clock_t ticks) 1219 { 1220 mutex_enter(&thread_stop_lock); 1221 if (num_utstop > 0) 1222 (void) cv_timedwait(&utstop_cv, &thread_stop_lock, 1223 ticks + lbolt); 1224 mutex_exit(&thread_stop_lock); 1225 } 1226 1227 /* 1228 * Perform the action specified by the current signal. 1229 * The usual sequence is: 1230 * if (issig()) 1231 * psig(); 1232 * The signal bit has already been cleared by issig(), 1233 * the current signal number has been stored in lwp_cursig, 1234 * and the current siginfo is now referenced by lwp_curinfo. 1235 */ 1236 void 1237 psig(void) 1238 { 1239 kthread_t *t = curthread; 1240 proc_t *p = ttoproc(t); 1241 klwp_t *lwp = ttolwp(t); 1242 void (*func)(); 1243 int sig, rc, code, ext; 1244 pid_t pid = -1; 1245 id_t ctid = 0; 1246 zoneid_t zoneid = -1; 1247 sigqueue_t *sqp = NULL; 1248 1249 mutex_enter(&p->p_lock); 1250 schedctl_finish_sigblock(t); 1251 code = CLD_KILLED; 1252 1253 if (p->p_flag & SEXITLWPS) { 1254 lwp_exit(); 1255 return; /* not reached */ 1256 } 1257 sig = lwp->lwp_cursig; 1258 ext = lwp->lwp_extsig; 1259 1260 ASSERT(sig < NSIG); 1261 1262 /* 1263 * Re-check lwp_cursig after we acquire p_lock. Since p_lock was 1264 * dropped between issig() and psig(), a debugger may have cleared 1265 * lwp_cursig via /proc in the intervening window. 1266 */ 1267 if (sig == 0) { 1268 if (lwp->lwp_curinfo) { 1269 siginfofree(lwp->lwp_curinfo); 1270 lwp->lwp_curinfo = NULL; 1271 } 1272 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */ 1273 t->t_flag &= ~T_TOMASK; 1274 t->t_hold = lwp->lwp_sigoldmask; 1275 } 1276 mutex_exit(&p->p_lock); 1277 return; 1278 } 1279 func = u.u_signal[sig-1]; 1280 1281 /* 1282 * The signal disposition could have changed since we promoted 1283 * this signal from pending to current (we dropped p->p_lock). 1284 * This can happen only in a multi-threaded process. 1285 */ 1286 if (sigismember(&p->p_ignore, sig) || 1287 (func == SIG_DFL && sigismember(&stopdefault, sig))) { 1288 lwp->lwp_cursig = 0; 1289 lwp->lwp_extsig = 0; 1290 if (lwp->lwp_curinfo) { 1291 siginfofree(lwp->lwp_curinfo); 1292 lwp->lwp_curinfo = NULL; 1293 } 1294 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */ 1295 t->t_flag &= ~T_TOMASK; 1296 t->t_hold = lwp->lwp_sigoldmask; 1297 } 1298 mutex_exit(&p->p_lock); 1299 return; 1300 } 1301 1302 /* 1303 * We check lwp_curinfo first since pr_setsig can actually 1304 * stuff a sigqueue_t there for SIGKILL. 1305 */ 1306 if (lwp->lwp_curinfo) { 1307 sqp = lwp->lwp_curinfo; 1308 } else if (sig == SIGKILL && p->p_killsqp) { 1309 sqp = p->p_killsqp; 1310 } 1311 1312 if (sqp != NULL) { 1313 if (SI_FROMUSER(&sqp->sq_info)) { 1314 pid = sqp->sq_info.si_pid; 1315 ctid = sqp->sq_info.si_ctid; 1316 zoneid = sqp->sq_info.si_zoneid; 1317 } 1318 /* 1319 * If we have a sigqueue_t, its sq_external value 1320 * trumps the lwp_extsig value. It is theoretically 1321 * possible to make lwp_extsig reflect reality, but it 1322 * would unnecessarily complicate things elsewhere. 1323 */ 1324 ext = sqp->sq_external; 1325 } 1326 1327 if (func == SIG_DFL) { 1328 mutex_exit(&p->p_lock); 1329 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *, 1330 NULL, void (*)(void), func); 1331 } else { 1332 k_siginfo_t *sip = NULL; 1333 1334 /* 1335 * If DTrace user-land tracing is active, give DTrace a 1336 * chance to defer the signal until after tracing is 1337 * complete. 1338 */ 1339 if (t->t_dtrace_on && dtrace_safe_defer_signal()) { 1340 mutex_exit(&p->p_lock); 1341 return; 1342 } 1343 1344 /* 1345 * save siginfo pointer here, in case the 1346 * the signal's reset bit is on 1347 * 1348 * The presence of a current signal prevents paging 1349 * from succeeding over a network. We copy the current 1350 * signal information to the side and cancel the current 1351 * signal so that sendsig() will succeed. 1352 */ 1353 if (sigismember(&p->p_siginfo, sig)) { 1354 if (sqp) { 1355 bcopy(&sqp->sq_info, &lwp->lwp_siginfo, 1356 sizeof (k_siginfo_t)); 1357 sip = &lwp->lwp_siginfo; 1358 } else if (sig == SIGPROF && 1359 t->t_rprof != NULL && 1360 t->t_rprof->rp_anystate && 1361 lwp->lwp_siginfo.si_signo == SIGPROF) { 1362 sip = &lwp->lwp_siginfo; 1363 } 1364 } 1365 1366 if (t->t_flag & T_TOMASK) 1367 t->t_flag &= ~T_TOMASK; 1368 else 1369 lwp->lwp_sigoldmask = t->t_hold; 1370 sigorset(&t->t_hold, &u.u_sigmask[sig-1]); 1371 if (!sigismember(&u.u_signodefer, sig)) 1372 sigaddset(&t->t_hold, sig); 1373 if (sigismember(&u.u_sigresethand, sig)) 1374 setsigact(sig, SIG_DFL, nullsmask, 0); 1375 1376 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *, 1377 sip, void (*)(void), func); 1378 1379 lwp->lwp_cursig = 0; 1380 lwp->lwp_extsig = 0; 1381 if (lwp->lwp_curinfo) { 1382 /* p->p_killsqp is freed by freeproc */ 1383 siginfofree(lwp->lwp_curinfo); 1384 lwp->lwp_curinfo = NULL; 1385 } 1386 mutex_exit(&p->p_lock); 1387 lwp->lwp_ru.nsignals++; 1388 1389 if (p->p_model == DATAMODEL_NATIVE) 1390 rc = sendsig(sig, sip, func); 1391 #ifdef _SYSCALL32_IMPL 1392 else 1393 rc = sendsig32(sig, sip, func); 1394 #endif /* _SYSCALL32_IMPL */ 1395 if (rc) 1396 return; 1397 sig = lwp->lwp_cursig = SIGSEGV; 1398 ext = 0; /* lwp_extsig was set above */ 1399 pid = -1; 1400 ctid = 0; 1401 } 1402 1403 if (sigismember(&coredefault, sig)) { 1404 /* 1405 * Terminate all LWPs but don't discard them. 1406 * If another lwp beat us to the punch by calling exit(), 1407 * evaporate now. 1408 */ 1409 if (exitlwps(1) != 0) { 1410 mutex_enter(&p->p_lock); 1411 lwp_exit(); 1412 } 1413 /* if we got a SIGKILL from anywhere, no core dump */ 1414 if (p->p_flag & SKILLED) { 1415 sig = SIGKILL; 1416 ext = (p->p_flag & SEXTKILLED) != 0; 1417 } else { 1418 #ifdef C2_AUDIT 1419 if (audit_active) /* audit core dump */ 1420 audit_core_start(sig); 1421 #endif 1422 if (core(sig, ext) == 0) 1423 code = CLD_DUMPED; 1424 #ifdef C2_AUDIT 1425 if (audit_active) /* audit core dump */ 1426 audit_core_finish(code); 1427 #endif 1428 } 1429 } 1430 if (ext) 1431 contract_process_sig(p->p_ct_process, p, sig, pid, ctid, 1432 zoneid); 1433 1434 exit(code, sig); 1435 } 1436 1437 /* 1438 * Find next unheld signal in ssp for thread t. 1439 */ 1440 int 1441 fsig(k_sigset_t *ssp, kthread_t *t) 1442 { 1443 proc_t *p = ttoproc(t); 1444 user_t *up = PTOU(p); 1445 int i; 1446 k_sigset_t temp; 1447 1448 ASSERT(MUTEX_HELD(&p->p_lock)); 1449 1450 /* 1451 * Don't promote any signals for the parent of a vfork()d 1452 * child that hasn't yet released the parent's memory. 1453 */ 1454 if (p->p_flag & SVFWAIT) 1455 return (0); 1456 1457 temp = *ssp; 1458 sigdiffset(&temp, &t->t_hold); 1459 1460 /* 1461 * Don't promote stopping signals (except SIGSTOP) for a child 1462 * of vfork() that hasn't yet released the parent's memory. 1463 */ 1464 if (p->p_flag & SVFORK) 1465 sigdiffset(&temp, &holdvfork); 1466 1467 /* 1468 * Don't promote a signal that will stop 1469 * the process when lwp_nostop is set. 1470 */ 1471 if (ttolwp(t)->lwp_nostop) { 1472 sigdelset(&temp, SIGSTOP); 1473 if (!p->p_pgidp->pid_pgorphaned) { 1474 if (up->u_signal[SIGTSTP-1] == SIG_DFL) 1475 sigdelset(&temp, SIGTSTP); 1476 if (up->u_signal[SIGTTIN-1] == SIG_DFL) 1477 sigdelset(&temp, SIGTTIN); 1478 if (up->u_signal[SIGTTOU-1] == SIG_DFL) 1479 sigdelset(&temp, SIGTTOU); 1480 } 1481 } 1482 1483 /* 1484 * Choose SIGKILL and SIGPROF before all other pending signals. 1485 * The rest are promoted in signal number order. 1486 */ 1487 if (sigismember(&temp, SIGKILL)) 1488 return (SIGKILL); 1489 if (sigismember(&temp, SIGPROF)) 1490 return (SIGPROF); 1491 1492 for (i = 0; i < sizeof (temp) / sizeof (temp.__sigbits[0]); i++) { 1493 if (temp.__sigbits[i]) 1494 return ((i * NBBY * sizeof (temp.__sigbits[0])) + 1495 lowbit(temp.__sigbits[i])); 1496 } 1497 1498 return (0); 1499 } 1500 1501 void 1502 setsigact(int sig, void (*disp)(), k_sigset_t mask, int flags) 1503 { 1504 proc_t *p = ttoproc(curthread); 1505 kthread_t *t; 1506 1507 ASSERT(MUTEX_HELD(&p->p_lock)); 1508 1509 u.u_signal[sig - 1] = disp; 1510 1511 /* 1512 * Honor the SA_SIGINFO flag if the signal is being caught. 1513 * Force the SA_SIGINFO flag if the signal is not being caught. 1514 * This is necessary to make sigqueue() and sigwaitinfo() work 1515 * properly together when the signal is set to default or is 1516 * being temporarily ignored. 1517 */ 1518 if ((flags & SA_SIGINFO) || disp == SIG_DFL || disp == SIG_IGN) 1519 sigaddset(&p->p_siginfo, sig); 1520 else 1521 sigdelset(&p->p_siginfo, sig); 1522 1523 if (disp != SIG_DFL && disp != SIG_IGN) { 1524 sigdelset(&p->p_ignore, sig); 1525 u.u_sigmask[sig - 1] = mask; 1526 if (!sigismember(&cantreset, sig)) { 1527 if (flags & SA_RESETHAND) 1528 sigaddset(&u.u_sigresethand, sig); 1529 else 1530 sigdelset(&u.u_sigresethand, sig); 1531 } 1532 if (flags & SA_NODEFER) 1533 sigaddset(&u.u_signodefer, sig); 1534 else 1535 sigdelset(&u.u_signodefer, sig); 1536 if (flags & SA_RESTART) 1537 sigaddset(&u.u_sigrestart, sig); 1538 else 1539 sigdelset(&u.u_sigrestart, sig); 1540 if (flags & SA_ONSTACK) 1541 sigaddset(&u.u_sigonstack, sig); 1542 else 1543 sigdelset(&u.u_sigonstack, sig); 1544 1545 } else if (disp == SIG_IGN || 1546 (disp == SIG_DFL && sigismember(&ignoredefault, sig))) { 1547 /* 1548 * Setting the signal action to SIG_IGN results in the 1549 * discarding of all pending signals of that signal number. 1550 * Setting the signal action to SIG_DFL does the same *only* 1551 * if the signal's default behavior is to be ignored. 1552 */ 1553 sigaddset(&p->p_ignore, sig); 1554 sigdelset(&p->p_sig, sig); 1555 sigdelset(&p->p_extsig, sig); 1556 sigdelq(p, NULL, sig); 1557 t = p->p_tlist; 1558 do { 1559 sigdelset(&t->t_sig, sig); 1560 sigdelset(&t->t_extsig, sig); 1561 sigdelq(p, t, sig); 1562 } while ((t = t->t_forw) != p->p_tlist); 1563 1564 } else { 1565 /* 1566 * The signal action is being set to SIG_DFL and the default 1567 * behavior is to do something: make sure it is not ignored. 1568 */ 1569 sigdelset(&p->p_ignore, sig); 1570 } 1571 1572 if (sig == SIGCLD) { 1573 if (flags & SA_NOCLDWAIT) 1574 p->p_flag |= SNOWAIT; 1575 else 1576 p->p_flag &= ~SNOWAIT; 1577 1578 if (flags & SA_NOCLDSTOP) 1579 p->p_flag &= ~SJCTL; 1580 else 1581 p->p_flag |= SJCTL; 1582 1583 if (p->p_flag & SNOWAIT || disp == SIG_IGN) { 1584 proc_t *cp, *tp; 1585 1586 mutex_exit(&p->p_lock); 1587 mutex_enter(&pidlock); 1588 for (cp = p->p_child; cp != NULL; cp = tp) { 1589 tp = cp->p_sibling; 1590 if (cp->p_stat == SZOMB) 1591 freeproc(cp); 1592 } 1593 mutex_exit(&pidlock); 1594 mutex_enter(&p->p_lock); 1595 } 1596 } 1597 } 1598 1599 /* 1600 * Set all signal actions not already set to SIG_DFL or SIG_IGN to SIG_DFL. 1601 * Called from exec_common() for a process undergoing execve() 1602 * and from cfork() for a newly-created child of vfork(). 1603 * In the vfork() case, 'p' is not the current process. 1604 * In both cases, there is only one thread in the process. 1605 */ 1606 void 1607 sigdefault(proc_t *p) 1608 { 1609 kthread_t *t = p->p_tlist; 1610 struct user *up = PTOU(p); 1611 int sig; 1612 1613 ASSERT(MUTEX_HELD(&p->p_lock)); 1614 1615 for (sig = 1; sig < NSIG; sig++) { 1616 if (up->u_signal[sig - 1] != SIG_DFL && 1617 up->u_signal[sig - 1] != SIG_IGN) { 1618 up->u_signal[sig - 1] = SIG_DFL; 1619 sigemptyset(&up->u_sigmask[sig - 1]); 1620 if (sigismember(&ignoredefault, sig)) { 1621 sigdelq(p, NULL, sig); 1622 sigdelq(p, t, sig); 1623 } 1624 if (sig == SIGCLD) 1625 p->p_flag &= ~(SNOWAIT|SJCTL); 1626 } 1627 } 1628 sigorset(&p->p_ignore, &ignoredefault); 1629 sigfillset(&p->p_siginfo); 1630 sigdiffset(&p->p_siginfo, &cantmask); 1631 sigdiffset(&p->p_sig, &ignoredefault); 1632 sigdiffset(&p->p_extsig, &ignoredefault); 1633 sigdiffset(&t->t_sig, &ignoredefault); 1634 sigdiffset(&t->t_extsig, &ignoredefault); 1635 } 1636 1637 void 1638 sigcld(proc_t *cp, sigqueue_t *sqp) 1639 { 1640 proc_t *pp = cp->p_parent; 1641 1642 ASSERT(MUTEX_HELD(&pidlock)); 1643 1644 switch (cp->p_wcode) { 1645 case CLD_EXITED: 1646 case CLD_DUMPED: 1647 case CLD_KILLED: 1648 ASSERT(cp->p_stat == SZOMB); 1649 /* 1650 * The broadcast on p_srwchan_cv is a kludge to 1651 * wakeup a possible thread in uadmin(A_SHUTDOWN). 1652 */ 1653 cv_broadcast(&cp->p_srwchan_cv); 1654 1655 /* 1656 * Add to newstate list of the parent 1657 */ 1658 add_ns(pp, cp); 1659 1660 cv_broadcast(&pp->p_cv); 1661 if ((pp->p_flag & SNOWAIT) || 1662 (PTOU(pp)->u_signal[SIGCLD - 1] == SIG_IGN)) 1663 freeproc(cp); 1664 else { 1665 post_sigcld(cp, sqp); 1666 sqp = NULL; 1667 } 1668 break; 1669 1670 case CLD_STOPPED: 1671 case CLD_CONTINUED: 1672 cv_broadcast(&pp->p_cv); 1673 if (pp->p_flag & SJCTL) { 1674 post_sigcld(cp, sqp); 1675 sqp = NULL; 1676 } 1677 break; 1678 } 1679 1680 if (sqp) 1681 siginfofree(sqp); 1682 } 1683 1684 /* 1685 * Common code called from sigcld() and issig_forreal() 1686 * Give the parent process a SIGCLD if it does not have one pending, 1687 * else mark the child process so a SIGCLD can be posted later. 1688 */ 1689 static void 1690 post_sigcld(proc_t *cp, sigqueue_t *sqp) 1691 { 1692 proc_t *pp = cp->p_parent; 1693 void (*handler)() = PTOU(pp)->u_signal[SIGCLD - 1]; 1694 k_siginfo_t info; 1695 1696 ASSERT(MUTEX_HELD(&pidlock)); 1697 mutex_enter(&pp->p_lock); 1698 1699 /* 1700 * If a SIGCLD is pending, or if SIGCLD is not now being caught, 1701 * then just mark the child process so that its SIGCLD will 1702 * be posted later, when the first SIGCLD is taken off the 1703 * queue or when the parent is ready to receive it, if ever. 1704 */ 1705 if (handler == SIG_DFL || handler == SIG_IGN || 1706 sigismember(&pp->p_sig, SIGCLD)) 1707 cp->p_pidflag |= CLDPEND; 1708 else { 1709 cp->p_pidflag &= ~CLDPEND; 1710 if (sqp == NULL) { 1711 /* 1712 * This can only happen when the parent is init. 1713 * (See call to sigcld(q, NULL) in exit().) 1714 * Use KM_NOSLEEP to avoid deadlock. 1715 */ 1716 ASSERT(pp == proc_init); 1717 winfo(cp, &info, 0); 1718 sigaddq(pp, NULL, &info, KM_NOSLEEP); 1719 } else { 1720 winfo(cp, &sqp->sq_info, 0); 1721 sigaddqa(pp, NULL, sqp); 1722 sqp = NULL; 1723 } 1724 } 1725 1726 mutex_exit(&pp->p_lock); 1727 1728 if (sqp) 1729 siginfofree(sqp); 1730 } 1731 1732 /* 1733 * Search for a child that has a pending SIGCLD for us, the parent. 1734 * The queue of SIGCLD signals is implied by the list of children. 1735 * We post the SIGCLD signals one at a time so they don't get lost. 1736 * When one is dequeued, another is enqueued, until there are no more. 1737 */ 1738 void 1739 sigcld_repost() 1740 { 1741 proc_t *pp = curproc; 1742 proc_t *cp; 1743 void (*handler)() = PTOU(pp)->u_signal[SIGCLD - 1]; 1744 sigqueue_t *sqp; 1745 1746 /* 1747 * Don't bother if SIGCLD is not now being caught. 1748 */ 1749 if (handler == SIG_DFL || handler == SIG_IGN) 1750 return; 1751 1752 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 1753 mutex_enter(&pidlock); 1754 for (cp = pp->p_child; cp; cp = cp->p_sibling) { 1755 if (cp->p_pidflag & CLDPEND) { 1756 post_sigcld(cp, sqp); 1757 mutex_exit(&pidlock); 1758 return; 1759 } 1760 } 1761 mutex_exit(&pidlock); 1762 kmem_free(sqp, sizeof (sigqueue_t)); 1763 } 1764 1765 /* 1766 * count number of sigqueue send by sigaddqa() 1767 */ 1768 void 1769 sigqsend(int cmd, proc_t *p, kthread_t *t, sigqueue_t *sigqp) 1770 { 1771 sigqhdr_t *sqh; 1772 1773 sqh = (sigqhdr_t *)sigqp->sq_backptr; 1774 ASSERT(sqh); 1775 1776 mutex_enter(&sqh->sqb_lock); 1777 sqh->sqb_sent++; 1778 mutex_exit(&sqh->sqb_lock); 1779 1780 if (cmd == SN_SEND) 1781 sigaddqa(p, t, sigqp); 1782 else 1783 siginfofree(sigqp); 1784 } 1785 1786 int 1787 sigsendproc(proc_t *p, sigsend_t *pv) 1788 { 1789 struct cred *cr; 1790 proc_t *myprocp = curproc; 1791 1792 ASSERT(MUTEX_HELD(&pidlock)); 1793 1794 if (p->p_pid == 1 && pv->sig && sigismember(&cantmask, pv->sig)) 1795 return (EPERM); 1796 1797 cr = CRED(); 1798 1799 if (pv->checkperm == 0 || 1800 (pv->sig == SIGCONT && p->p_sessp == myprocp->p_sessp) || 1801 prochasprocperm(p, myprocp, cr)) { 1802 pv->perm++; 1803 if (pv->sig) { 1804 /* Make sure we should be setting si_pid and friends */ 1805 ASSERT(pv->sicode <= 0); 1806 if (SI_CANQUEUE(pv->sicode)) { 1807 sigqueue_t *sqp; 1808 1809 mutex_enter(&myprocp->p_lock); 1810 sqp = sigqalloc(myprocp->p_sigqhdr); 1811 mutex_exit(&myprocp->p_lock); 1812 if (sqp == NULL) 1813 return (EAGAIN); 1814 sqp->sq_info.si_signo = pv->sig; 1815 sqp->sq_info.si_code = pv->sicode; 1816 sqp->sq_info.si_pid = myprocp->p_pid; 1817 sqp->sq_info.si_ctid = PRCTID(myprocp); 1818 sqp->sq_info.si_zoneid = getzoneid(); 1819 sqp->sq_info.si_uid = crgetruid(cr); 1820 sqp->sq_info.si_value = pv->value; 1821 mutex_enter(&p->p_lock); 1822 sigqsend(SN_SEND, p, NULL, sqp); 1823 mutex_exit(&p->p_lock); 1824 } else { 1825 k_siginfo_t info; 1826 bzero(&info, sizeof (info)); 1827 info.si_signo = pv->sig; 1828 info.si_code = pv->sicode; 1829 info.si_pid = myprocp->p_pid; 1830 info.si_ctid = PRCTID(myprocp); 1831 info.si_zoneid = getzoneid(); 1832 info.si_uid = crgetruid(cr); 1833 mutex_enter(&p->p_lock); 1834 /* 1835 * XXX: Should be KM_SLEEP but 1836 * we have to avoid deadlock. 1837 */ 1838 sigaddq(p, NULL, &info, KM_NOSLEEP); 1839 mutex_exit(&p->p_lock); 1840 } 1841 } 1842 } 1843 1844 return (0); 1845 } 1846 1847 int 1848 sigsendset(procset_t *psp, sigsend_t *pv) 1849 { 1850 int error; 1851 1852 error = dotoprocs(psp, sigsendproc, (char *)pv); 1853 if (error == 0 && pv->perm == 0) 1854 return (EPERM); 1855 1856 return (error); 1857 } 1858 1859 /* 1860 * Dequeue a queued siginfo structure. 1861 * If a non-null thread pointer is passed then dequeue from 1862 * the thread queue, otherwise dequeue from the process queue. 1863 */ 1864 void 1865 sigdeq(proc_t *p, kthread_t *t, int sig, sigqueue_t **qpp) 1866 { 1867 sigqueue_t **psqp, *sqp; 1868 1869 ASSERT(MUTEX_HELD(&p->p_lock)); 1870 1871 *qpp = NULL; 1872 1873 if (t != NULL) { 1874 sigdelset(&t->t_sig, sig); 1875 sigdelset(&t->t_extsig, sig); 1876 psqp = &t->t_sigqueue; 1877 } else { 1878 sigdelset(&p->p_sig, sig); 1879 sigdelset(&p->p_extsig, sig); 1880 psqp = &p->p_sigqueue; 1881 } 1882 1883 for (;;) { 1884 if ((sqp = *psqp) == NULL) 1885 return; 1886 if (sqp->sq_info.si_signo == sig) 1887 break; 1888 else 1889 psqp = &sqp->sq_next; 1890 } 1891 *qpp = sqp; 1892 *psqp = sqp->sq_next; 1893 for (sqp = *psqp; sqp; sqp = sqp->sq_next) { 1894 if (sqp->sq_info.si_signo == sig) { 1895 if (t != (kthread_t *)NULL) { 1896 sigaddset(&t->t_sig, sig); 1897 t->t_sig_check = 1; 1898 } else { 1899 sigaddset(&p->p_sig, sig); 1900 set_proc_ast(p); 1901 } 1902 break; 1903 } 1904 } 1905 } 1906 1907 /* 1908 * Delete a queued SIGCLD siginfo structure matching the k_siginfo_t argument. 1909 */ 1910 void 1911 sigcld_delete(k_siginfo_t *ip) 1912 { 1913 proc_t *p = curproc; 1914 int another_sigcld = 0; 1915 sigqueue_t **psqp, *sqp; 1916 1917 ASSERT(ip->si_signo == SIGCLD); 1918 1919 mutex_enter(&p->p_lock); 1920 1921 if (!sigismember(&p->p_sig, SIGCLD)) { 1922 mutex_exit(&p->p_lock); 1923 return; 1924 } 1925 1926 psqp = &p->p_sigqueue; 1927 for (;;) { 1928 if ((sqp = *psqp) == NULL) { 1929 mutex_exit(&p->p_lock); 1930 return; 1931 } 1932 if (sqp->sq_info.si_signo == SIGCLD) { 1933 if (sqp->sq_info.si_pid == ip->si_pid && 1934 sqp->sq_info.si_code == ip->si_code && 1935 sqp->sq_info.si_status == ip->si_status) 1936 break; 1937 another_sigcld = 1; 1938 } 1939 psqp = &sqp->sq_next; 1940 } 1941 *psqp = sqp->sq_next; 1942 1943 siginfofree(sqp); 1944 1945 for (sqp = *psqp; !another_sigcld && sqp; sqp = sqp->sq_next) { 1946 if (sqp->sq_info.si_signo == SIGCLD) 1947 another_sigcld = 1; 1948 } 1949 1950 if (!another_sigcld) { 1951 sigdelset(&p->p_sig, SIGCLD); 1952 sigdelset(&p->p_extsig, SIGCLD); 1953 } 1954 1955 mutex_exit(&p->p_lock); 1956 } 1957 1958 /* 1959 * Delete queued siginfo structures. 1960 * If a non-null thread pointer is passed then delete from 1961 * the thread queue, otherwise delete from the process queue. 1962 */ 1963 void 1964 sigdelq(proc_t *p, kthread_t *t, int sig) 1965 { 1966 sigqueue_t **psqp, *sqp; 1967 1968 /* 1969 * We must be holding p->p_lock unless the process is 1970 * being reaped or has failed to get started on fork. 1971 */ 1972 ASSERT(MUTEX_HELD(&p->p_lock) || 1973 p->p_stat == SIDL || p->p_stat == SZOMB); 1974 1975 if (t != (kthread_t *)NULL) 1976 psqp = &t->t_sigqueue; 1977 else 1978 psqp = &p->p_sigqueue; 1979 1980 while (*psqp) { 1981 sqp = *psqp; 1982 if (sig == 0 || sqp->sq_info.si_signo == sig) { 1983 *psqp = sqp->sq_next; 1984 siginfofree(sqp); 1985 } else 1986 psqp = &sqp->sq_next; 1987 } 1988 } 1989 1990 /* 1991 * Insert a siginfo structure into a queue. 1992 * If a non-null thread pointer is passed then add to the thread queue, 1993 * otherwise add to the process queue. 1994 * 1995 * The function sigaddqins() is called with sigqueue already allocated. 1996 * It is called from sigaddqa() and sigaddq() below. 1997 * 1998 * The value of si_code implicitly indicates whether sigp is to be 1999 * explicitly queued, or to be queued to depth one. 2000 */ 2001 static void 2002 sigaddqins(proc_t *p, kthread_t *t, sigqueue_t *sigqp) 2003 { 2004 sigqueue_t **psqp; 2005 int sig = sigqp->sq_info.si_signo; 2006 2007 sigqp->sq_external = (curproc != &p0) && 2008 (curproc->p_ct_process != p->p_ct_process); 2009 2010 /* 2011 * issig_forreal() doesn't bother dequeueing signals if SKILLED 2012 * is set, and even if it did, we would want to avoid situation 2013 * (which would be unique to SIGKILL) where one thread dequeued 2014 * the sigqueue_t and another executed psig(). So we create a 2015 * separate stash for SIGKILL's sigqueue_t. Because a second 2016 * SIGKILL can set SEXTKILLED, we overwrite the existing entry 2017 * if (and only if) it was non-extracontractual. 2018 */ 2019 if (sig == SIGKILL) { 2020 if (p->p_killsqp == NULL || !p->p_killsqp->sq_external) { 2021 if (p->p_killsqp != NULL) 2022 siginfofree(p->p_killsqp); 2023 p->p_killsqp = sigqp; 2024 sigqp->sq_next = NULL; 2025 } else { 2026 siginfofree(sigqp); 2027 } 2028 return; 2029 } 2030 2031 ASSERT(sig >= 1 && sig < NSIG); 2032 if (t != NULL) /* directed to a thread */ 2033 psqp = &t->t_sigqueue; 2034 else /* directed to a process */ 2035 psqp = &p->p_sigqueue; 2036 if (SI_CANQUEUE(sigqp->sq_info.si_code) && 2037 sigismember(&p->p_siginfo, sig)) { 2038 for (; *psqp != NULL; psqp = &(*psqp)->sq_next) 2039 ; 2040 } else { 2041 for (; *psqp != NULL; psqp = &(*psqp)->sq_next) { 2042 if ((*psqp)->sq_info.si_signo == sig) { 2043 siginfofree(sigqp); 2044 return; 2045 } 2046 } 2047 } 2048 *psqp = sigqp; 2049 sigqp->sq_next = NULL; 2050 } 2051 2052 /* 2053 * The function sigaddqa() is called with sigqueue already allocated. 2054 * If signal is ignored, discard but guarantee KILL and generation semantics. 2055 * It is called from sigqueue() and other places. 2056 */ 2057 void 2058 sigaddqa(proc_t *p, kthread_t *t, sigqueue_t *sigqp) 2059 { 2060 int sig = sigqp->sq_info.si_signo; 2061 2062 ASSERT(MUTEX_HELD(&p->p_lock)); 2063 ASSERT(sig >= 1 && sig < NSIG); 2064 2065 if (sig_discardable(p, sig)) 2066 siginfofree(sigqp); 2067 else 2068 sigaddqins(p, t, sigqp); 2069 2070 sigtoproc(p, t, sig); 2071 } 2072 2073 /* 2074 * Allocate the sigqueue_t structure and call sigaddqins(). 2075 */ 2076 void 2077 sigaddq(proc_t *p, kthread_t *t, k_siginfo_t *infop, int km_flags) 2078 { 2079 sigqueue_t *sqp; 2080 int sig = infop->si_signo; 2081 2082 ASSERT(MUTEX_HELD(&p->p_lock)); 2083 ASSERT(sig >= 1 && sig < NSIG); 2084 2085 /* 2086 * If the signal will be discarded by sigtoproc() or 2087 * if the process isn't requesting siginfo and it isn't 2088 * blocking the signal (it *could* change it's mind while 2089 * the signal is pending) then don't bother creating one. 2090 */ 2091 if (!sig_discardable(p, sig) && 2092 (sigismember(&p->p_siginfo, sig) || 2093 (curproc->p_ct_process != p->p_ct_process) || 2094 (sig == SIGCLD && SI_FROMKERNEL(infop))) && 2095 ((sqp = kmem_alloc(sizeof (sigqueue_t), km_flags)) != NULL)) { 2096 bcopy(infop, &sqp->sq_info, sizeof (k_siginfo_t)); 2097 sqp->sq_func = NULL; 2098 sqp->sq_next = NULL; 2099 sigaddqins(p, t, sqp); 2100 } 2101 sigtoproc(p, t, sig); 2102 } 2103 2104 /* 2105 * Handle stop-on-fault processing for the debugger. Returns 0 2106 * if the fault is cleared during the stop, nonzero if it isn't. 2107 */ 2108 int 2109 stop_on_fault(uint_t fault, k_siginfo_t *sip) 2110 { 2111 proc_t *p = ttoproc(curthread); 2112 klwp_t *lwp = ttolwp(curthread); 2113 2114 ASSERT(prismember(&p->p_fltmask, fault)); 2115 2116 /* 2117 * Record current fault and siginfo structure so debugger can 2118 * find it. 2119 */ 2120 mutex_enter(&p->p_lock); 2121 lwp->lwp_curflt = (uchar_t)fault; 2122 lwp->lwp_siginfo = *sip; 2123 2124 stop(PR_FAULTED, fault); 2125 2126 fault = lwp->lwp_curflt; 2127 lwp->lwp_curflt = 0; 2128 mutex_exit(&p->p_lock); 2129 return (fault); 2130 } 2131 2132 void 2133 sigorset(k_sigset_t *s1, k_sigset_t *s2) 2134 { 2135 s1->__sigbits[0] |= s2->__sigbits[0]; 2136 s1->__sigbits[1] |= s2->__sigbits[1]; 2137 } 2138 2139 void 2140 sigandset(k_sigset_t *s1, k_sigset_t *s2) 2141 { 2142 s1->__sigbits[0] &= s2->__sigbits[0]; 2143 s1->__sigbits[1] &= s2->__sigbits[1]; 2144 } 2145 2146 void 2147 sigdiffset(k_sigset_t *s1, k_sigset_t *s2) 2148 { 2149 s1->__sigbits[0] &= ~(s2->__sigbits[0]); 2150 s1->__sigbits[1] &= ~(s2->__sigbits[1]); 2151 } 2152 2153 /* 2154 * Return non-zero if curthread->t_sig_check should be set to 1, that is, 2155 * if there are any signals the thread might take on return from the kernel. 2156 * If ksigset_t's were a single word, we would do: 2157 * return (((p->p_sig | t->t_sig) & ~t->t_hold) & fillset); 2158 */ 2159 int 2160 sigcheck(proc_t *p, kthread_t *t) 2161 { 2162 sc_shared_t *tdp = t->t_schedctl; 2163 2164 /* 2165 * If signals are blocked via the schedctl interface 2166 * then we only check for the unmaskable signals. 2167 */ 2168 if (tdp != NULL && tdp->sc_sigblock) 2169 return ((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) & 2170 CANTMASK0); 2171 2172 return (((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) & 2173 ~t->t_hold.__sigbits[0]) | 2174 (((p->p_sig.__sigbits[1] | t->t_sig.__sigbits[1]) & 2175 ~t->t_hold.__sigbits[1]) & FILLSET1)); 2176 } 2177 2178 /* ONC_PLUS EXTRACT START */ 2179 void 2180 sigintr(k_sigset_t *smask, int intable) 2181 { 2182 proc_t *p; 2183 int owned; 2184 k_sigset_t lmask; /* local copy of cantmask */ 2185 klwp_t *lwp = ttolwp(curthread); 2186 2187 /* 2188 * Mask out all signals except SIGHUP, SIGINT, SIGQUIT 2189 * and SIGTERM. (Preserving the existing masks). 2190 * This function supports the -intr nfs and ufs mount option. 2191 */ 2192 2193 /* 2194 * don't do kernel threads 2195 */ 2196 if (lwp == NULL) 2197 return; 2198 2199 /* 2200 * get access to signal mask 2201 */ 2202 p = ttoproc(curthread); 2203 owned = mutex_owned(&p->p_lock); /* this is filthy */ 2204 if (!owned) 2205 mutex_enter(&p->p_lock); 2206 2207 /* 2208 * remember the current mask 2209 */ 2210 schedctl_finish_sigblock(curthread); 2211 *smask = curthread->t_hold; 2212 2213 /* 2214 * mask out all signals 2215 */ 2216 sigfillset(&curthread->t_hold); 2217 2218 /* 2219 * Unmask the non-maskable signals (e.g., KILL), as long as 2220 * they aren't already masked (which could happen at exit). 2221 * The first sigdiffset sets lmask to (cantmask & ~curhold). The 2222 * second sets the current hold mask to (~0 & ~lmask), which reduces 2223 * to (~cantmask | curhold). 2224 */ 2225 lmask = cantmask; 2226 sigdiffset(&lmask, smask); 2227 sigdiffset(&curthread->t_hold, &lmask); 2228 2229 /* 2230 * Re-enable HUP, QUIT, and TERM iff they were originally enabled 2231 * Re-enable INT if it's originally enabled and the NFS mount option 2232 * nointr is not set. 2233 */ 2234 if (!sigismember(smask, SIGHUP)) 2235 sigdelset(&curthread->t_hold, SIGHUP); 2236 if (!sigismember(smask, SIGINT) && intable) 2237 sigdelset(&curthread->t_hold, SIGINT); 2238 if (!sigismember(smask, SIGQUIT)) 2239 sigdelset(&curthread->t_hold, SIGQUIT); 2240 if (!sigismember(smask, SIGTERM)) 2241 sigdelset(&curthread->t_hold, SIGTERM); 2242 2243 /* 2244 * release access to signal mask 2245 */ 2246 if (!owned) 2247 mutex_exit(&p->p_lock); 2248 2249 /* 2250 * Indicate that this lwp is not to be stopped. 2251 */ 2252 lwp->lwp_nostop++; 2253 2254 } 2255 /* ONC_PLUS EXTRACT END */ 2256 2257 void 2258 sigunintr(k_sigset_t *smask) 2259 { 2260 proc_t *p; 2261 int owned; 2262 klwp_t *lwp = ttolwp(curthread); 2263 2264 /* 2265 * Reset previous mask (See sigintr() above) 2266 */ 2267 if (lwp != NULL) { 2268 lwp->lwp_nostop--; /* restore lwp stoppability */ 2269 p = ttoproc(curthread); 2270 owned = mutex_owned(&p->p_lock); /* this is filthy */ 2271 if (!owned) 2272 mutex_enter(&p->p_lock); 2273 curthread->t_hold = *smask; 2274 /* so unmasked signals will be seen */ 2275 curthread->t_sig_check = 1; 2276 if (!owned) 2277 mutex_exit(&p->p_lock); 2278 } 2279 } 2280 2281 void 2282 sigreplace(k_sigset_t *newmask, k_sigset_t *oldmask) 2283 { 2284 proc_t *p; 2285 int owned; 2286 /* 2287 * Save current signal mask in oldmask, then 2288 * set it to newmask. 2289 */ 2290 if (ttolwp(curthread) != NULL) { 2291 p = ttoproc(curthread); 2292 owned = mutex_owned(&p->p_lock); /* this is filthy */ 2293 if (!owned) 2294 mutex_enter(&p->p_lock); 2295 schedctl_finish_sigblock(curthread); 2296 if (oldmask != NULL) 2297 *oldmask = curthread->t_hold; 2298 curthread->t_hold = *newmask; 2299 curthread->t_sig_check = 1; 2300 if (!owned) 2301 mutex_exit(&p->p_lock); 2302 } 2303 } 2304 2305 /* 2306 * Return true if the signal number is in range 2307 * and the signal code specifies signal queueing. 2308 */ 2309 int 2310 sigwillqueue(int sig, int code) 2311 { 2312 if (sig >= 0 && sig < NSIG) { 2313 switch (code) { 2314 case SI_QUEUE: 2315 case SI_TIMER: 2316 case SI_ASYNCIO: 2317 case SI_MESGQ: 2318 return (1); 2319 } 2320 } 2321 return (0); 2322 } 2323 2324 #ifndef UCHAR_MAX 2325 #define UCHAR_MAX 255 2326 #endif 2327 2328 /* 2329 * The entire pool (with maxcount entries) is pre-allocated at 2330 * the first sigqueue/signotify call. 2331 */ 2332 sigqhdr_t * 2333 sigqhdralloc(size_t size, uint_t maxcount) 2334 { 2335 size_t i; 2336 sigqueue_t *sq, *next; 2337 sigqhdr_t *sqh; 2338 2339 i = (maxcount * size) + sizeof (sigqhdr_t); 2340 ASSERT(maxcount <= UCHAR_MAX && i <= USHRT_MAX); 2341 sqh = kmem_alloc(i, KM_SLEEP); 2342 sqh->sqb_count = (uchar_t)maxcount; 2343 sqh->sqb_maxcount = (uchar_t)maxcount; 2344 sqh->sqb_size = (ushort_t)i; 2345 sqh->sqb_pexited = 0; 2346 sqh->sqb_sent = 0; 2347 sqh->sqb_free = sq = (sigqueue_t *)(sqh + 1); 2348 for (i = maxcount - 1; i != 0; i--) { 2349 next = (sigqueue_t *)((uintptr_t)sq + size); 2350 sq->sq_next = next; 2351 sq = next; 2352 } 2353 sq->sq_next = NULL; 2354 mutex_init(&sqh->sqb_lock, NULL, MUTEX_DEFAULT, NULL); 2355 return (sqh); 2356 } 2357 2358 static void sigqrel(sigqueue_t *); 2359 2360 /* 2361 * allocate a sigqueue/signotify structure from the per process 2362 * pre-allocated pool. 2363 */ 2364 sigqueue_t * 2365 sigqalloc(sigqhdr_t *sqh) 2366 { 2367 sigqueue_t *sq = NULL; 2368 2369 ASSERT(MUTEX_HELD(&curproc->p_lock)); 2370 2371 if (sqh != NULL) { 2372 mutex_enter(&sqh->sqb_lock); 2373 if (sqh->sqb_count > 0) { 2374 sqh->sqb_count--; 2375 sq = sqh->sqb_free; 2376 sqh->sqb_free = sq->sq_next; 2377 mutex_exit(&sqh->sqb_lock); 2378 bzero(&sq->sq_info, sizeof (k_siginfo_t)); 2379 sq->sq_backptr = sqh; 2380 sq->sq_func = sigqrel; 2381 sq->sq_next = NULL; 2382 sq->sq_external = 0; 2383 } else { 2384 mutex_exit(&sqh->sqb_lock); 2385 } 2386 } 2387 return (sq); 2388 } 2389 2390 /* 2391 * Return a sigqueue structure back to the pre-allocated pool. 2392 */ 2393 static void 2394 sigqrel(sigqueue_t *sq) 2395 { 2396 sigqhdr_t *sqh; 2397 2398 /* make sure that p_lock of the affected process is held */ 2399 2400 sqh = (sigqhdr_t *)sq->sq_backptr; 2401 mutex_enter(&sqh->sqb_lock); 2402 if (sqh->sqb_pexited && sqh->sqb_sent == 1) { 2403 mutex_exit(&sqh->sqb_lock); 2404 mutex_destroy(&sqh->sqb_lock); 2405 kmem_free(sqh, sqh->sqb_size); 2406 } else { 2407 sqh->sqb_count++; 2408 sqh->sqb_sent--; 2409 sq->sq_next = sqh->sqb_free; 2410 sq->sq_backptr = NULL; 2411 sqh->sqb_free = sq; 2412 mutex_exit(&sqh->sqb_lock); 2413 } 2414 } 2415 2416 /* 2417 * Free up the pre-allocated sigqueue headers of sigqueue pool 2418 * and signotify pool, if possible. 2419 * Called only by the owning process during exec() and exit(). 2420 */ 2421 void 2422 sigqfree(proc_t *p) 2423 { 2424 ASSERT(MUTEX_HELD(&p->p_lock)); 2425 2426 if (p->p_sigqhdr != NULL) { /* sigqueue pool */ 2427 sigqhdrfree(p->p_sigqhdr); 2428 p->p_sigqhdr = NULL; 2429 } 2430 if (p->p_signhdr != NULL) { /* signotify pool */ 2431 sigqhdrfree(p->p_signhdr); 2432 p->p_signhdr = NULL; 2433 } 2434 } 2435 2436 /* 2437 * Free up the pre-allocated header and sigq pool if possible. 2438 */ 2439 void 2440 sigqhdrfree(sigqhdr_t *sqh) 2441 { 2442 mutex_enter(&sqh->sqb_lock); 2443 if (sqh->sqb_sent == 0) { 2444 mutex_exit(&sqh->sqb_lock); 2445 mutex_destroy(&sqh->sqb_lock); 2446 kmem_free(sqh, sqh->sqb_size); 2447 } else { 2448 sqh->sqb_pexited = 1; 2449 mutex_exit(&sqh->sqb_lock); 2450 } 2451 } 2452 2453 /* 2454 * Free up a single sigqueue structure. 2455 * No other code should free a sigqueue directly. 2456 */ 2457 void 2458 siginfofree(sigqueue_t *sqp) 2459 { 2460 if (sqp != NULL) { 2461 if (sqp->sq_func != NULL) 2462 (sqp->sq_func)(sqp); 2463 else 2464 kmem_free(sqp, sizeof (sigqueue_t)); 2465 } 2466 } 2467 2468 /* 2469 * Generate a synchronous signal caused by a hardware 2470 * condition encountered by an lwp. Called from trap(). 2471 */ 2472 void 2473 trapsig(k_siginfo_t *ip, int restartable) 2474 { 2475 proc_t *p = ttoproc(curthread); 2476 int sig = ip->si_signo; 2477 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 2478 2479 ASSERT(sig > 0 && sig < NSIG); 2480 2481 if (curthread->t_dtrace_on) 2482 dtrace_safe_synchronous_signal(); 2483 2484 mutex_enter(&p->p_lock); 2485 schedctl_finish_sigblock(curthread); 2486 /* 2487 * Avoid a possible infinite loop if the lwp is holding the 2488 * signal generated by a trap of a restartable instruction or 2489 * if the signal so generated is being ignored by the process. 2490 */ 2491 if (restartable && 2492 (sigismember(&curthread->t_hold, sig) || 2493 p->p_user.u_signal[sig-1] == SIG_IGN)) { 2494 sigdelset(&curthread->t_hold, sig); 2495 p->p_user.u_signal[sig-1] = SIG_DFL; 2496 sigdelset(&p->p_ignore, sig); 2497 } 2498 bcopy(ip, &sqp->sq_info, sizeof (k_siginfo_t)); 2499 sigaddqa(p, curthread, sqp); 2500 mutex_exit(&p->p_lock); 2501 } 2502 2503 #ifdef _SYSCALL32_IMPL 2504 2505 /* 2506 * It's tricky to transmit a sigval between 32-bit and 64-bit 2507 * process, since in the 64-bit world, a pointer and an integer 2508 * are different sizes. Since we're constrained by the standards 2509 * world not to change the types, and it's unclear how useful it is 2510 * to send pointers between address spaces this way, we preserve 2511 * the 'int' interpretation for 32-bit processes interoperating 2512 * with 64-bit processes. The full semantics (pointers or integers) 2513 * are available for N-bit processes interoperating with N-bit 2514 * processes. 2515 */ 2516 void 2517 siginfo_kto32(const k_siginfo_t *src, siginfo32_t *dest) 2518 { 2519 bzero(dest, sizeof (*dest)); 2520 2521 /* 2522 * The absolute minimum content is si_signo and si_code. 2523 */ 2524 dest->si_signo = src->si_signo; 2525 if ((dest->si_code = src->si_code) == SI_NOINFO) 2526 return; 2527 2528 /* 2529 * A siginfo generated by user level is structured 2530 * differently from one generated by the kernel. 2531 */ 2532 if (SI_FROMUSER(src)) { 2533 dest->si_pid = src->si_pid; 2534 dest->si_ctid = src->si_ctid; 2535 dest->si_zoneid = src->si_zoneid; 2536 dest->si_uid = src->si_uid; 2537 if (SI_CANQUEUE(src->si_code)) 2538 dest->si_value.sival_int = 2539 (int32_t)src->si_value.sival_int; 2540 return; 2541 } 2542 2543 dest->si_errno = src->si_errno; 2544 2545 switch (src->si_signo) { 2546 default: 2547 dest->si_pid = src->si_pid; 2548 dest->si_ctid = src->si_ctid; 2549 dest->si_zoneid = src->si_zoneid; 2550 dest->si_uid = src->si_uid; 2551 dest->si_value.sival_int = (int32_t)src->si_value.sival_int; 2552 break; 2553 case SIGCLD: 2554 dest->si_pid = src->si_pid; 2555 dest->si_ctid = src->si_ctid; 2556 dest->si_zoneid = src->si_zoneid; 2557 dest->si_status = src->si_status; 2558 dest->si_stime = src->si_stime; 2559 dest->si_utime = src->si_utime; 2560 break; 2561 case SIGSEGV: 2562 case SIGBUS: 2563 case SIGILL: 2564 case SIGTRAP: 2565 case SIGFPE: 2566 case SIGEMT: 2567 dest->si_addr = (caddr32_t)(uintptr_t)src->si_addr; 2568 dest->si_trapno = src->si_trapno; 2569 dest->si_pc = (caddr32_t)(uintptr_t)src->si_pc; 2570 break; 2571 case SIGPOLL: 2572 case SIGXFSZ: 2573 dest->si_fd = src->si_fd; 2574 dest->si_band = src->si_band; 2575 break; 2576 case SIGPROF: 2577 dest->si_faddr = (caddr32_t)(uintptr_t)src->si_faddr; 2578 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec; 2579 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec; 2580 dest->si_syscall = src->si_syscall; 2581 dest->si_nsysarg = src->si_nsysarg; 2582 dest->si_fault = src->si_fault; 2583 break; 2584 } 2585 } 2586 2587 void 2588 siginfo_32tok(const siginfo32_t *src, k_siginfo_t *dest) 2589 { 2590 bzero(dest, sizeof (*dest)); 2591 2592 /* 2593 * The absolute minimum content is si_signo and si_code. 2594 */ 2595 dest->si_signo = src->si_signo; 2596 if ((dest->si_code = src->si_code) == SI_NOINFO) 2597 return; 2598 2599 /* 2600 * A siginfo generated by user level is structured 2601 * differently from one generated by the kernel. 2602 */ 2603 if (SI_FROMUSER(src)) { 2604 dest->si_pid = src->si_pid; 2605 dest->si_ctid = src->si_ctid; 2606 dest->si_zoneid = src->si_zoneid; 2607 dest->si_uid = src->si_uid; 2608 if (SI_CANQUEUE(src->si_code)) 2609 dest->si_value.sival_int = 2610 (int)src->si_value.sival_int; 2611 return; 2612 } 2613 2614 dest->si_errno = src->si_errno; 2615 2616 switch (src->si_signo) { 2617 default: 2618 dest->si_pid = src->si_pid; 2619 dest->si_ctid = src->si_ctid; 2620 dest->si_zoneid = src->si_zoneid; 2621 dest->si_uid = src->si_uid; 2622 dest->si_value.sival_int = (int)src->si_value.sival_int; 2623 break; 2624 case SIGCLD: 2625 dest->si_pid = src->si_pid; 2626 dest->si_ctid = src->si_ctid; 2627 dest->si_zoneid = src->si_zoneid; 2628 dest->si_status = src->si_status; 2629 dest->si_stime = src->si_stime; 2630 dest->si_utime = src->si_utime; 2631 break; 2632 case SIGSEGV: 2633 case SIGBUS: 2634 case SIGILL: 2635 case SIGTRAP: 2636 case SIGFPE: 2637 case SIGEMT: 2638 dest->si_addr = (void *)(uintptr_t)src->si_addr; 2639 dest->si_trapno = src->si_trapno; 2640 dest->si_pc = (void *)(uintptr_t)src->si_pc; 2641 break; 2642 case SIGPOLL: 2643 case SIGXFSZ: 2644 dest->si_fd = src->si_fd; 2645 dest->si_band = src->si_band; 2646 break; 2647 case SIGPROF: 2648 dest->si_faddr = (void *)(uintptr_t)src->si_faddr; 2649 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec; 2650 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec; 2651 dest->si_syscall = src->si_syscall; 2652 dest->si_nsysarg = src->si_nsysarg; 2653 dest->si_fault = src->si_fault; 2654 break; 2655 } 2656 } 2657 2658 #endif /* _SYSCALL32_IMPL */ 2659