1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 28 /* All Rights Reserved */ 29 30 31 #pragma ident "%Z%%M% %I% %E% SMI" 32 33 #include <sys/param.h> 34 #include <sys/types.h> 35 #include <sys/bitmap.h> 36 #include <sys/sysmacros.h> 37 #include <sys/systm.h> 38 #include <sys/cred.h> 39 #include <sys/user.h> 40 #include <sys/errno.h> 41 #include <sys/proc.h> 42 #include <sys/poll_impl.h> /* only needed for kludge in sigwaiting_send() */ 43 #include <sys/signal.h> 44 #include <sys/siginfo.h> 45 #include <sys/fault.h> 46 #include <sys/ucontext.h> 47 #include <sys/procfs.h> 48 #include <sys/wait.h> 49 #include <sys/class.h> 50 #include <sys/mman.h> 51 #include <sys/procset.h> 52 #include <sys/kmem.h> 53 #include <sys/cpuvar.h> 54 #include <sys/prsystm.h> 55 #include <sys/debug.h> 56 #include <vm/as.h> 57 #include <sys/bitmap.h> 58 #include <c2/audit.h> 59 #include <sys/core.h> 60 #include <sys/schedctl.h> 61 #include <sys/contract/process_impl.h> 62 #include <sys/dtrace.h> 63 #include <sys/sdt.h> 64 65 /* MUST be contiguous */ 66 k_sigset_t nullsmask = {0, 0}; 67 68 k_sigset_t fillset = {FILLSET0, FILLSET1}; 69 70 k_sigset_t cantmask = {CANTMASK0, CANTMASK1}; 71 72 k_sigset_t cantreset = {(sigmask(SIGILL)|sigmask(SIGTRAP)|sigmask(SIGPWR)), 0}; 73 74 k_sigset_t ignoredefault = {(sigmask(SIGCONT)|sigmask(SIGCLD)|sigmask(SIGPWR) 75 |sigmask(SIGWINCH)|sigmask(SIGURG)|sigmask(SIGWAITING)), 76 (sigmask(SIGLWP)|sigmask(SIGCANCEL)|sigmask(SIGFREEZE) 77 |sigmask(SIGTHAW)|sigmask(SIGXRES)|sigmask(SIGJVM1) 78 |sigmask(SIGJVM2))}; 79 80 k_sigset_t stopdefault = {(sigmask(SIGSTOP)|sigmask(SIGTSTP) 81 |sigmask(SIGTTOU)|sigmask(SIGTTIN)), 0}; 82 83 k_sigset_t coredefault = {(sigmask(SIGQUIT)|sigmask(SIGILL)|sigmask(SIGTRAP) 84 |sigmask(SIGIOT)|sigmask(SIGEMT)|sigmask(SIGFPE) 85 |sigmask(SIGBUS)|sigmask(SIGSEGV)|sigmask(SIGSYS) 86 |sigmask(SIGXCPU)|sigmask(SIGXFSZ)), 0}; 87 88 k_sigset_t holdvfork = {(sigmask(SIGTTOU)|sigmask(SIGTTIN)|sigmask(SIGTSTP)), 89 0}; 90 91 static int isjobstop(int); 92 static void post_sigcld(proc_t *, sigqueue_t *); 93 94 /* 95 * Internal variables for counting number of user thread stop requests posted. 96 * They may not be accurate at some special situation such as that a virtually 97 * stopped thread starts to run. 98 */ 99 static int num_utstop; 100 /* 101 * Internal variables for broadcasting an event when all thread stop requests 102 * are processed. 103 */ 104 static kcondvar_t utstop_cv; 105 106 static kmutex_t thread_stop_lock; 107 void del_one_utstop(void); 108 109 /* 110 * Send the specified signal to the specified process. 111 */ 112 void 113 psignal(proc_t *p, int sig) 114 { 115 mutex_enter(&p->p_lock); 116 sigtoproc(p, NULL, sig); 117 mutex_exit(&p->p_lock); 118 } 119 120 /* 121 * Send the specified signal to the specified thread. 122 */ 123 void 124 tsignal(kthread_t *t, int sig) 125 { 126 proc_t *p = ttoproc(t); 127 128 mutex_enter(&p->p_lock); 129 sigtoproc(p, t, sig); 130 mutex_exit(&p->p_lock); 131 } 132 133 int 134 signal_is_blocked(kthread_t *t, int sig) 135 { 136 return (sigismember(&t->t_hold, sig) || 137 (schedctl_sigblock(t) && !sigismember(&cantmask, sig))); 138 } 139 140 /* 141 * Return true if the signal can safely be discarded on generation. 142 * That is, if there is no need for the signal on the receiving end. 143 * The answer is true if the process is a zombie or 144 * if all of these conditions are true: 145 * the signal is being ignored 146 * the process is single-threaded 147 * the signal is not being traced by /proc 148 * the signal is not blocked by the process 149 */ 150 static int 151 sig_discardable(proc_t *p, int sig) 152 { 153 kthread_t *t = p->p_tlist; 154 155 return (t == NULL || /* if zombie or ... */ 156 (sigismember(&p->p_ignore, sig) && /* signal is ignored */ 157 t->t_forw == t && /* and single-threaded */ 158 !tracing(p, sig) && /* and no /proc tracing */ 159 !signal_is_blocked(t, sig))); /* and signal not blocked */ 160 } 161 162 /* 163 * Return true if this thread is going to eat this signal soon. 164 */ 165 int 166 eat_signal(kthread_t *t, int sig) 167 { 168 int rval = 0; 169 ASSERT(THREAD_LOCK_HELD(t)); 170 171 /* 172 * Do not do anything if the target thread has the signal blocked. 173 */ 174 if (!signal_is_blocked(t, sig)) { 175 t->t_sig_check = 1; /* have thread do an issig */ 176 if (t->t_state == TS_SLEEP && (t->t_flag & T_WAKEABLE)) { 177 setrun_locked(t); 178 rval = 1; 179 } else if (t->t_state == TS_STOPPED && sig == SIGKILL) { 180 ttoproc(t)->p_stopsig = 0; 181 t->t_dtrace_stop = 0; 182 t->t_schedflag |= TS_XSTART | TS_PSTART; 183 setrun_locked(t); 184 } else if (t != curthread && t->t_state == TS_ONPROC) { 185 aston(t); /* make it do issig promptly */ 186 if (t->t_cpu != CPU) 187 poke_cpu(t->t_cpu->cpu_id); 188 rval = 1; 189 } else if (t->t_state == TS_RUN) { 190 rval = 1; 191 } 192 } 193 194 return (rval); 195 } 196 197 /* 198 * Post a signal. 199 * If a non-null thread pointer is passed, then post the signal 200 * to the thread/lwp, otherwise post the signal to the process. 201 */ 202 void 203 sigtoproc(proc_t *p, kthread_t *t, int sig) 204 { 205 kthread_t *tt; 206 int ext = !(curproc->p_flag & SSYS) && 207 (curproc->p_ct_process != p->p_ct_process); 208 209 ASSERT(MUTEX_HELD(&p->p_lock)); 210 211 if (sig <= 0 || sig >= NSIG) 212 return; 213 214 /* 215 * Regardless of origin or directedness, 216 * SIGKILL kills all lwps in the process immediately 217 * and jobcontrol signals affect all lwps in the process. 218 */ 219 if (sig == SIGKILL) { 220 p->p_flag |= SKILLED | (ext ? SEXTKILLED : 0); 221 t = NULL; 222 } else if (sig == SIGCONT) { 223 /* 224 * The SSCONT flag will remain set until a stopping 225 * signal comes in (below). This is harmless. 226 */ 227 p->p_flag |= SSCONT; 228 sigdelq(p, NULL, SIGSTOP); 229 sigdelq(p, NULL, SIGTSTP); 230 sigdelq(p, NULL, SIGTTOU); 231 sigdelq(p, NULL, SIGTTIN); 232 sigdiffset(&p->p_sig, &stopdefault); 233 sigdiffset(&p->p_extsig, &stopdefault); 234 p->p_stopsig = 0; 235 if ((tt = p->p_tlist) != NULL) { 236 do { 237 sigdelq(p, tt, SIGSTOP); 238 sigdelq(p, tt, SIGTSTP); 239 sigdelq(p, tt, SIGTTOU); 240 sigdelq(p, tt, SIGTTIN); 241 sigdiffset(&tt->t_sig, &stopdefault); 242 sigdiffset(&tt->t_extsig, &stopdefault); 243 } while ((tt = tt->t_forw) != p->p_tlist); 244 } 245 if ((tt = p->p_tlist) != NULL) { 246 do { 247 thread_lock(tt); 248 if (tt->t_state == TS_STOPPED && 249 tt->t_whystop == PR_JOBCONTROL) { 250 tt->t_schedflag |= TS_XSTART; 251 setrun_locked(tt); 252 } 253 thread_unlock(tt); 254 } while ((tt = tt->t_forw) != p->p_tlist); 255 } 256 } else if (sigismember(&stopdefault, sig)) { 257 /* 258 * This test has a race condition which we can't fix: 259 * By the time the stopping signal is received by 260 * the target process/thread, the signal handler 261 * and/or the detached state might have changed. 262 */ 263 if (PTOU(p)->u_signal[sig-1] == SIG_DFL && 264 (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned)) 265 p->p_flag &= ~SSCONT; 266 sigdelq(p, NULL, SIGCONT); 267 sigdelset(&p->p_sig, SIGCONT); 268 sigdelset(&p->p_extsig, SIGCONT); 269 if ((tt = p->p_tlist) != NULL) { 270 do { 271 sigdelq(p, tt, SIGCONT); 272 sigdelset(&tt->t_sig, SIGCONT); 273 sigdelset(&tt->t_extsig, SIGCONT); 274 } while ((tt = tt->t_forw) != p->p_tlist); 275 } 276 } 277 278 if (sig_discardable(p, sig)) { 279 DTRACE_PROC3(signal__discard, kthread_t *, p->p_tlist, 280 proc_t *, p, int, sig); 281 return; 282 } 283 284 if (t != NULL) { 285 /* 286 * This is a directed signal, wake up the lwp. 287 */ 288 sigaddset(&t->t_sig, sig); 289 if (ext) 290 sigaddset(&t->t_extsig, sig); 291 thread_lock(t); 292 (void) eat_signal(t, sig); 293 thread_unlock(t); 294 DTRACE_PROC2(signal__send, kthread_t *, t, int, sig); 295 } else if ((tt = p->p_tlist) != NULL) { 296 /* 297 * Make sure that some lwp that already exists 298 * in the process fields the signal soon. 299 * Wake up an interruptibly sleeping lwp if necessary. 300 */ 301 int su = 0; 302 303 sigaddset(&p->p_sig, sig); 304 if (ext) 305 sigaddset(&p->p_extsig, sig); 306 do { 307 thread_lock(tt); 308 if (eat_signal(tt, sig)) { 309 thread_unlock(tt); 310 break; 311 } 312 if (sig == SIGKILL && SUSPENDED(tt)) 313 su++; 314 thread_unlock(tt); 315 } while ((tt = tt->t_forw) != p->p_tlist); 316 /* 317 * If the process is deadlocked, make somebody run and die. 318 */ 319 if (sig == SIGKILL && p->p_stat != SIDL && 320 p->p_lwprcnt == 0 && p->p_lwpcnt == su) { 321 thread_lock(tt); 322 p->p_lwprcnt++; 323 tt->t_schedflag |= TS_CSTART; 324 setrun_locked(tt); 325 thread_unlock(tt); 326 } 327 328 DTRACE_PROC2(signal__send, kthread_t *, tt, int, sig); 329 } 330 } 331 332 static int 333 isjobstop(int sig) 334 { 335 proc_t *p = ttoproc(curthread); 336 337 ASSERT(MUTEX_HELD(&p->p_lock)); 338 339 if (u.u_signal[sig-1] == SIG_DFL && sigismember(&stopdefault, sig)) { 340 /* 341 * If SIGCONT has been posted since we promoted this signal 342 * from pending to current, then don't do a jobcontrol stop. 343 */ 344 if (!(p->p_flag & SSCONT) && 345 (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned) && 346 curthread != p->p_agenttp) { 347 sigqueue_t *sqp; 348 349 stop(PR_JOBCONTROL, sig); 350 mutex_exit(&p->p_lock); 351 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 352 mutex_enter(&pidlock); 353 /* 354 * Only the first lwp to continue notifies the parent. 355 */ 356 if (p->p_pidflag & CLDCONT) 357 siginfofree(sqp); 358 else { 359 p->p_pidflag |= CLDCONT; 360 p->p_wcode = CLD_CONTINUED; 361 p->p_wdata = SIGCONT; 362 sigcld(p, sqp); 363 } 364 mutex_exit(&pidlock); 365 mutex_enter(&p->p_lock); 366 } 367 return (1); 368 } 369 return (0); 370 } 371 372 /* 373 * Returns true if the current process has a signal to process, and 374 * the signal is not held. The signal to process is put in p_cursig. 375 * This is asked at least once each time a process enters the system 376 * (though this can usually be done without actually calling issig by 377 * checking the pending signal masks). A signal does not do anything 378 * directly to a process; it sets a flag that asks the process to do 379 * something to itself. 380 * 381 * The "why" argument indicates the allowable side-effects of the call: 382 * 383 * FORREAL: Extract the next pending signal from p_sig into p_cursig; 384 * stop the process if a stop has been requested or if a traced signal 385 * is pending. 386 * 387 * JUSTLOOKING: Don't stop the process, just indicate whether or not 388 * a signal might be pending (FORREAL is needed to tell for sure). 389 * 390 * XXX: Changes to the logic in these routines should be propagated 391 * to lm_sigispending(). See bug 1201594. 392 */ 393 394 static int issig_forreal(void); 395 static int issig_justlooking(void); 396 397 int 398 issig(int why) 399 { 400 ASSERT(why == FORREAL || why == JUSTLOOKING); 401 402 return ((why == FORREAL)? issig_forreal() : issig_justlooking()); 403 } 404 405 406 static int 407 issig_justlooking(void) 408 { 409 kthread_t *t = curthread; 410 klwp_t *lwp = ttolwp(t); 411 proc_t *p = ttoproc(t); 412 k_sigset_t set; 413 414 /* 415 * This function answers the question: 416 * "Is there any reason to call issig_forreal()?" 417 * 418 * We have to answer the question w/o grabbing any locks 419 * because we are (most likely) being called after we 420 * put ourselves on the sleep queue. 421 */ 422 423 if (t->t_dtrace_stop | t->t_dtrace_sig) 424 return (1); 425 426 /* 427 * Another piece of complexity in this process. When single-stepping a 428 * process, we don't want an intervening signal or TP_PAUSE request to 429 * suspend the current thread. Otherwise, the controlling process will 430 * hang beacuse we will be stopped with TS_PSTART set in t_schedflag. 431 * We will trigger any remaining signals when we re-enter the kernel on 432 * the single step trap. 433 */ 434 if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP) 435 return (0); 436 437 if ((lwp->lwp_asleep && MUSTRETURN(p, t)) || 438 (p->p_flag & (SEXITLWPS|SKILLED)) || 439 (lwp->lwp_nostop == 0 && 440 (p->p_stopsig | (p->p_flag & (SHOLDFORK1|SHOLDWATCH)) | 441 (t->t_proc_flag & 442 (TP_PRSTOP|TP_HOLDLWP|TP_CHKPT|TP_PAUSE)))) || 443 lwp->lwp_cursig) 444 return (1); 445 446 if (p->p_flag & SVFWAIT) 447 return (0); 448 set = p->p_sig; 449 sigorset(&set, &t->t_sig); 450 if (schedctl_sigblock(t)) /* all blockable signals blocked */ 451 sigandset(&set, &cantmask); 452 else 453 sigdiffset(&set, &t->t_hold); 454 if (p->p_flag & SVFORK) 455 sigdiffset(&set, &holdvfork); 456 457 if (!sigisempty(&set)) { 458 int sig; 459 460 for (sig = 1; sig < NSIG; sig++) { 461 if (sigismember(&set, sig) && 462 (tracing(p, sig) || 463 !sigismember(&p->p_ignore, sig))) { 464 /* 465 * Don't promote a signal that will stop 466 * the process when lwp_nostop is set. 467 */ 468 if (!lwp->lwp_nostop || 469 u.u_signal[sig-1] != SIG_DFL || 470 !sigismember(&stopdefault, sig)) 471 return (1); 472 } 473 } 474 } 475 476 return (0); 477 } 478 479 static int 480 issig_forreal(void) 481 { 482 int sig = 0, ext = 0; 483 kthread_t *t = curthread; 484 klwp_t *lwp = ttolwp(t); 485 proc_t *p = ttoproc(t); 486 int toproc = 0; 487 int sigcld_found = 0; 488 int nostop_break = 0; 489 490 ASSERT(t->t_state == TS_ONPROC); 491 492 mutex_enter(&p->p_lock); 493 schedctl_finish_sigblock(t); 494 495 if (t->t_dtrace_stop | t->t_dtrace_sig) { 496 if (t->t_dtrace_stop) { 497 /* 498 * If DTrace's "stop" action has been invoked on us, 499 * set TP_PRSTOP. 500 */ 501 t->t_proc_flag |= TP_PRSTOP; 502 } 503 504 if (t->t_dtrace_sig != 0) { 505 k_siginfo_t info; 506 507 /* 508 * Post the signal generated as the result of 509 * DTrace's "raise" action as a normal signal before 510 * the full-fledged signal checking begins. 511 */ 512 bzero(&info, sizeof (info)); 513 info.si_signo = t->t_dtrace_sig; 514 info.si_code = SI_DTRACE; 515 516 sigaddq(p, NULL, &info, KM_NOSLEEP); 517 518 t->t_dtrace_sig = 0; 519 } 520 } 521 522 for (;;) { 523 if (p->p_flag & (SEXITLWPS|SKILLED)) { 524 lwp->lwp_cursig = sig = SIGKILL; 525 lwp->lwp_extsig = ext = (p->p_flag & SEXTKILLED) != 0; 526 break; 527 } 528 529 /* 530 * Another piece of complexity in this process. When 531 * single-stepping a process, we don't want an intervening 532 * signal or TP_PAUSE request to suspend the current thread. 533 * Otherwise, the controlling process will hang beacuse we will 534 * be stopped with TS_PSTART set in t_schedflag. We will 535 * trigger any remaining signals when we re-enter the kernel on 536 * the single step trap. 537 */ 538 if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP) { 539 sig = 0; 540 break; 541 } 542 543 /* 544 * Hold the lwp here for watchpoint manipulation. 545 */ 546 if ((t->t_proc_flag & TP_PAUSE) && !lwp->lwp_nostop) { 547 stop(PR_SUSPENDED, SUSPEND_PAUSE); 548 continue; 549 } 550 551 if (lwp->lwp_asleep && MUSTRETURN(p, t)) { 552 if ((sig = lwp->lwp_cursig) != 0) { 553 /* 554 * Make sure we call ISSIG() in post_syscall() 555 * to re-validate this current signal. 556 */ 557 t->t_sig_check = 1; 558 } 559 break; 560 } 561 562 /* 563 * If the request is PR_CHECKPOINT, ignore the rest of signals 564 * or requests. Honor other stop requests or signals later. 565 * Go back to top of loop here to check if an exit or hold 566 * event has occurred while stopped. 567 */ 568 if ((t->t_proc_flag & TP_CHKPT) && !lwp->lwp_nostop) { 569 stop(PR_CHECKPOINT, 0); 570 continue; 571 } 572 573 /* 574 * Honor SHOLDFORK1, SHOLDWATCH, and TP_HOLDLWP before dealing 575 * with signals or /proc. Another lwp is executing fork1(), 576 * or is undergoing watchpoint activity (remapping a page), 577 * or is executing lwp_suspend() on this lwp. 578 * Again, go back to top of loop to check if an exit 579 * or hold event has occurred while stopped. 580 */ 581 if (((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) || 582 (t->t_proc_flag & TP_HOLDLWP)) && !lwp->lwp_nostop) { 583 stop(PR_SUSPENDED, SUSPEND_NORMAL); 584 continue; 585 } 586 587 /* 588 * Honor requested stop before dealing with the 589 * current signal; a debugger may change it. 590 * Do not want to go back to loop here since this is a special 591 * stop that means: make incremental progress before the next 592 * stop. The danger is that returning to top of loop would most 593 * likely drop the thread right back here to stop soon after it 594 * was continued, violating the incremental progress request. 595 */ 596 if ((t->t_proc_flag & TP_PRSTOP) && !lwp->lwp_nostop) 597 stop(PR_REQUESTED, 0); 598 599 /* 600 * If a debugger wants us to take a signal it will have 601 * left it in lwp->lwp_cursig. If lwp_cursig has been cleared 602 * or if it's being ignored, we continue on looking for another 603 * signal. Otherwise we return the specified signal, provided 604 * it's not a signal that causes a job control stop. 605 * 606 * When stopped on PR_JOBCONTROL, there is no current 607 * signal; we cancel lwp->lwp_cursig temporarily before 608 * calling isjobstop(). The current signal may be reset 609 * by a debugger while we are stopped in isjobstop(). 610 */ 611 if ((sig = lwp->lwp_cursig) != 0) { 612 ext = lwp->lwp_extsig; 613 lwp->lwp_cursig = 0; 614 lwp->lwp_extsig = 0; 615 if (!sigismember(&p->p_ignore, sig) && 616 !isjobstop(sig)) { 617 if (p->p_flag & (SEXITLWPS|SKILLED)) { 618 sig = SIGKILL; 619 ext = (p->p_flag & SEXTKILLED) != 0; 620 } 621 lwp->lwp_cursig = (uchar_t)sig; 622 lwp->lwp_extsig = (uchar_t)ext; 623 break; 624 } 625 /* 626 * The signal is being ignored or it caused a 627 * job-control stop. If another current signal 628 * has not been established, return the current 629 * siginfo, if any, to the memory manager. 630 */ 631 if (lwp->lwp_cursig == 0 && lwp->lwp_curinfo != NULL) { 632 siginfofree(lwp->lwp_curinfo); 633 lwp->lwp_curinfo = NULL; 634 } 635 /* 636 * Loop around again in case we were stopped 637 * on a job control signal and a /proc stop 638 * request was posted or another current signal 639 * was established while we were stopped. 640 */ 641 continue; 642 } 643 644 if (p->p_stopsig && !lwp->lwp_nostop && 645 curthread != p->p_agenttp) { 646 /* 647 * Some lwp in the process has already stopped 648 * showing PR_JOBCONTROL. This is a stop in 649 * sympathy with the other lwp, even if this 650 * lwp is blocking the stopping signal. 651 */ 652 stop(PR_JOBCONTROL, p->p_stopsig); 653 continue; 654 } 655 656 /* 657 * Loop on the pending signals until we find a 658 * non-held signal that is traced or not ignored. 659 * First check the signals pending for the lwp, 660 * then the signals pending for the process as a whole. 661 */ 662 for (;;) { 663 k_sigset_t tsig; 664 665 tsig = t->t_sig; 666 if ((sig = fsig(&tsig, t)) != 0) { 667 if (sig == SIGCLD) 668 sigcld_found = 1; 669 toproc = 0; 670 if (tracing(p, sig) || 671 !sigismember(&p->p_ignore, sig)) { 672 if (sigismember(&t->t_extsig, sig)) 673 ext = 1; 674 break; 675 } 676 sigdelset(&t->t_sig, sig); 677 sigdelset(&t->t_extsig, sig); 678 sigdelq(p, t, sig); 679 } else if ((sig = fsig(&p->p_sig, t)) != 0) { 680 if (sig == SIGCLD) 681 sigcld_found = 1; 682 toproc = 1; 683 if (tracing(p, sig) || 684 !sigismember(&p->p_ignore, sig)) { 685 if (sigismember(&p->p_extsig, sig)) 686 ext = 1; 687 break; 688 } 689 sigdelset(&p->p_sig, sig); 690 sigdelset(&p->p_extsig, sig); 691 sigdelq(p, NULL, sig); 692 } else { 693 /* no signal was found */ 694 break; 695 } 696 } 697 698 if (sig == 0) { /* no signal was found */ 699 if (p->p_flag & (SEXITLWPS|SKILLED)) { 700 lwp->lwp_cursig = SIGKILL; 701 sig = SIGKILL; 702 ext = (p->p_flag & SEXTKILLED) != 0; 703 } 704 break; 705 } 706 707 /* 708 * If we have been informed not to stop (i.e., we are being 709 * called from within a network operation), then don't promote 710 * the signal at this time, just return the signal number. 711 * We will call issig() again later when it is safe. 712 * 713 * fsig() does not return a jobcontrol stopping signal 714 * with a default action of stopping the process if 715 * lwp_nostop is set, so we won't be causing a bogus 716 * EINTR by this action. (Such a signal is eaten by 717 * isjobstop() when we loop around to do final checks.) 718 */ 719 if (lwp->lwp_nostop) { 720 nostop_break = 1; 721 break; 722 } 723 724 /* 725 * Promote the signal from pending to current. 726 * 727 * Note that sigdeq() will set lwp->lwp_curinfo to NULL 728 * if no siginfo_t exists for this signal. 729 */ 730 lwp->lwp_cursig = (uchar_t)sig; 731 lwp->lwp_extsig = (uchar_t)ext; 732 t->t_sig_check = 1; /* so post_syscall will see signal */ 733 ASSERT(lwp->lwp_curinfo == NULL); 734 sigdeq(p, toproc ? NULL : t, sig, &lwp->lwp_curinfo); 735 736 if (tracing(p, sig)) 737 stop(PR_SIGNALLED, sig); 738 739 /* 740 * Loop around to check for requested stop before 741 * performing the usual current-signal actions. 742 */ 743 } 744 745 mutex_exit(&p->p_lock); 746 747 /* 748 * If SIGCLD was dequeued, search for other pending SIGCLD's. 749 * Don't do it if we are returning SIGCLD and the signal 750 * handler will be reset by psig(); this enables reliable 751 * delivery of SIGCLD even when using the old, broken 752 * signal() interface for setting the signal handler. 753 */ 754 if (sigcld_found && 755 (sig != SIGCLD || !sigismember(&u.u_sigresethand, SIGCLD))) 756 sigcld_repost(); 757 758 if (sig != 0) 759 (void) undo_watch_step(NULL); 760 761 /* 762 * If we have been blocked since the p_lock was dropped off 763 * above, then this promoted signal might have been handled 764 * already when we were on the way back from sleep queue, so 765 * just ignore it. 766 * If we have been informed not to stop, just return the signal 767 * number. Also see comments above. 768 */ 769 if (!nostop_break) { 770 sig = lwp->lwp_cursig; 771 } 772 773 return (sig != 0); 774 } 775 776 /* 777 * Return true if the process is currently stopped showing PR_JOBCONTROL. 778 * This is true only if all of the process's lwp's are so stopped. 779 * If this is asked by one of the lwps in the process, exclude that lwp. 780 */ 781 int 782 jobstopped(proc_t *p) 783 { 784 kthread_t *t; 785 786 ASSERT(MUTEX_HELD(&p->p_lock)); 787 788 if ((t = p->p_tlist) == NULL) 789 return (0); 790 791 do { 792 thread_lock(t); 793 /* ignore current, zombie and suspended lwps in the test */ 794 if (!(t == curthread || t->t_state == TS_ZOMB || 795 SUSPENDED(t)) && 796 (t->t_state != TS_STOPPED || 797 t->t_whystop != PR_JOBCONTROL)) { 798 thread_unlock(t); 799 return (0); 800 } 801 thread_unlock(t); 802 } while ((t = t->t_forw) != p->p_tlist); 803 804 return (1); 805 } 806 807 /* 808 * Put ourself (curthread) into the stopped state and notify tracers. 809 */ 810 void 811 stop(int why, int what) 812 { 813 kthread_t *t = curthread; 814 proc_t *p = ttoproc(t); 815 klwp_t *lwp = ttolwp(t); 816 kthread_t *tx; 817 lwpent_t *lep; 818 int procstop; 819 int flags = TS_ALLSTART; 820 hrtime_t stoptime; 821 822 /* 823 * Can't stop a system process. 824 */ 825 if (p == NULL || lwp == NULL || (p->p_flag & SSYS) || p->p_as == &kas) 826 return; 827 828 ASSERT(MUTEX_HELD(&p->p_lock)); 829 830 if (why != PR_SUSPENDED && why != PR_CHECKPOINT) { 831 /* 832 * Don't stop an lwp with SIGKILL pending. 833 * Don't stop if the process or lwp is exiting. 834 */ 835 if (lwp->lwp_cursig == SIGKILL || 836 sigismember(&t->t_sig, SIGKILL) || 837 sigismember(&p->p_sig, SIGKILL) || 838 (t->t_proc_flag & TP_LWPEXIT) || 839 (p->p_flag & (SEXITLWPS|SKILLED))) { 840 p->p_stopsig = 0; 841 t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP); 842 return; 843 } 844 } 845 846 /* 847 * Make sure we don't deadlock on a recursive call to prstop(). 848 * prstop() sets the lwp_nostop flag. 849 */ 850 if (lwp->lwp_nostop) 851 return; 852 853 /* 854 * Make sure the lwp is in an orderly state for inspection 855 * by a debugger through /proc or for dumping via core(). 856 */ 857 schedctl_finish_sigblock(t); 858 t->t_proc_flag |= TP_STOPPING; /* must set before dropping p_lock */ 859 mutex_exit(&p->p_lock); 860 stoptime = gethrtime(); 861 prstop(why, what); 862 (void) undo_watch_step(NULL); 863 mutex_enter(&p->p_lock); 864 ASSERT(t->t_state == TS_ONPROC); 865 866 switch (why) { 867 case PR_CHECKPOINT: 868 /* 869 * The situation may have changed since we dropped 870 * and reacquired p->p_lock. Double-check now 871 * whether we should stop or not. 872 */ 873 if (!(t->t_proc_flag & TP_CHKPT)) { 874 t->t_proc_flag &= ~TP_STOPPING; 875 return; 876 } 877 t->t_proc_flag &= ~TP_CHKPT; 878 flags &= ~TS_RESUME; 879 break; 880 881 case PR_JOBCONTROL: 882 ASSERT(what == SIGSTOP || what == SIGTSTP || 883 what == SIGTTIN || what == SIGTTOU); 884 flags &= ~TS_XSTART; 885 break; 886 887 case PR_SUSPENDED: 888 ASSERT(what == SUSPEND_NORMAL || what == SUSPEND_PAUSE); 889 /* 890 * The situation may have changed since we dropped 891 * and reacquired p->p_lock. Double-check now 892 * whether we should stop or not. 893 */ 894 if (what == SUSPEND_PAUSE) { 895 if (!(t->t_proc_flag & TP_PAUSE)) { 896 t->t_proc_flag &= ~TP_STOPPING; 897 return; 898 } 899 flags &= ~TS_UNPAUSE; 900 } else { 901 if (!((t->t_proc_flag & TP_HOLDLWP) || 902 (p->p_flag & (SHOLDFORK|SHOLDFORK1|SHOLDWATCH)))) { 903 t->t_proc_flag &= ~TP_STOPPING; 904 return; 905 } 906 /* 907 * If SHOLDFORK is in effect and we are stopping 908 * while asleep (not at the top of the stack), 909 * we return now to allow the hold to take effect 910 * when we reach the top of the kernel stack. 911 */ 912 if (lwp->lwp_asleep && (p->p_flag & SHOLDFORK)) { 913 t->t_proc_flag &= ~TP_STOPPING; 914 return; 915 } 916 flags &= ~TS_CSTART; 917 } 918 break; 919 920 default: /* /proc stop */ 921 flags &= ~TS_PSTART; 922 /* 923 * Do synchronous stop unless the async-stop flag is set. 924 * If why is PR_REQUESTED and t->t_dtrace_stop flag is set, 925 * then no debugger is present and we also do synchronous stop. 926 */ 927 if ((why != PR_REQUESTED || t->t_dtrace_stop) && 928 !(p->p_proc_flag & P_PR_ASYNC)) { 929 int notify; 930 931 for (tx = t->t_forw; tx != t; tx = tx->t_forw) { 932 notify = 0; 933 thread_lock(tx); 934 if (ISTOPPED(tx) || 935 (tx->t_proc_flag & TP_PRSTOP)) { 936 thread_unlock(tx); 937 continue; 938 } 939 tx->t_proc_flag |= TP_PRSTOP; 940 tx->t_sig_check = 1; 941 if (tx->t_state == TS_SLEEP && 942 (tx->t_flag & T_WAKEABLE)) { 943 /* 944 * Don't actually wake it up if it's 945 * in one of the lwp_*() syscalls. 946 * Mark it virtually stopped and 947 * notify /proc waiters (below). 948 */ 949 if (tx->t_wchan0 == NULL) 950 setrun_locked(tx); 951 else { 952 tx->t_proc_flag |= TP_PRVSTOP; 953 tx->t_stoptime = stoptime; 954 notify = 1; 955 } 956 } 957 /* 958 * force the thread into the kernel 959 * if it is not already there. 960 */ 961 if (tx->t_state == TS_ONPROC && 962 tx->t_cpu != CPU) 963 poke_cpu(tx->t_cpu->cpu_id); 964 thread_unlock(tx); 965 lep = p->p_lwpdir[tx->t_dslot].ld_entry; 966 if (notify && lep->le_trace) 967 prnotify(lep->le_trace); 968 } 969 /* 970 * We do this just in case one of the threads we asked 971 * to stop is in holdlwps() (called from cfork()) or 972 * lwp_suspend(). 973 */ 974 cv_broadcast(&p->p_holdlwps); 975 } 976 break; 977 } 978 979 t->t_stoptime = stoptime; 980 981 if (why == PR_JOBCONTROL || (why == PR_SUSPENDED && p->p_stopsig)) { 982 /* 983 * Determine if the whole process is jobstopped. 984 */ 985 if (jobstopped(p)) { 986 sigqueue_t *sqp; 987 int sig; 988 989 if ((sig = p->p_stopsig) == 0) 990 p->p_stopsig = (uchar_t)(sig = what); 991 mutex_exit(&p->p_lock); 992 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 993 mutex_enter(&pidlock); 994 /* 995 * The last lwp to stop notifies the parent. 996 * Turn off the CLDCONT flag now so the first 997 * lwp to continue knows what to do. 998 */ 999 p->p_pidflag &= ~CLDCONT; 1000 p->p_wcode = CLD_STOPPED; 1001 p->p_wdata = sig; 1002 sigcld(p, sqp); 1003 /* 1004 * Grab p->p_lock before releasing pidlock so the 1005 * parent and the child don't have a race condition. 1006 */ 1007 mutex_enter(&p->p_lock); 1008 mutex_exit(&pidlock); 1009 p->p_stopsig = 0; 1010 } else if (why == PR_JOBCONTROL && p->p_stopsig == 0) { 1011 /* 1012 * Set p->p_stopsig and wake up sleeping lwps 1013 * so they will stop in sympathy with this lwp. 1014 */ 1015 p->p_stopsig = (uchar_t)what; 1016 pokelwps(p); 1017 /* 1018 * We do this just in case one of the threads we asked 1019 * to stop is in holdlwps() (called from cfork()) or 1020 * lwp_suspend(). 1021 */ 1022 cv_broadcast(&p->p_holdlwps); 1023 } 1024 } 1025 1026 if (why != PR_JOBCONTROL && why != PR_CHECKPOINT) { 1027 /* 1028 * Do process-level notification when all lwps are 1029 * either stopped on events of interest to /proc 1030 * or are stopped showing PR_SUSPENDED or are zombies. 1031 */ 1032 procstop = 1; 1033 for (tx = t->t_forw; procstop && tx != t; tx = tx->t_forw) { 1034 if (VSTOPPED(tx)) 1035 continue; 1036 thread_lock(tx); 1037 switch (tx->t_state) { 1038 case TS_ZOMB: 1039 break; 1040 case TS_STOPPED: 1041 /* neither ISTOPPED nor SUSPENDED? */ 1042 if ((tx->t_schedflag & 1043 (TS_CSTART | TS_UNPAUSE | TS_PSTART)) == 1044 (TS_CSTART | TS_UNPAUSE | TS_PSTART)) 1045 procstop = 0; 1046 break; 1047 case TS_SLEEP: 1048 /* not paused for watchpoints? */ 1049 if (!(tx->t_flag & T_WAKEABLE) || 1050 tx->t_wchan0 == NULL || 1051 !(tx->t_proc_flag & TP_PAUSE)) 1052 procstop = 0; 1053 break; 1054 default: 1055 procstop = 0; 1056 break; 1057 } 1058 thread_unlock(tx); 1059 } 1060 if (procstop) { 1061 /* there must not be any remapped watched pages now */ 1062 ASSERT(p->p_mapcnt == 0); 1063 if (p->p_proc_flag & P_PR_PTRACE) { 1064 /* ptrace() compatibility */ 1065 mutex_exit(&p->p_lock); 1066 mutex_enter(&pidlock); 1067 p->p_wcode = CLD_TRAPPED; 1068 p->p_wdata = (why == PR_SIGNALLED)? 1069 what : SIGTRAP; 1070 cv_broadcast(&p->p_parent->p_cv); 1071 /* 1072 * Grab p->p_lock before releasing pidlock so 1073 * parent and child don't have a race condition. 1074 */ 1075 mutex_enter(&p->p_lock); 1076 mutex_exit(&pidlock); 1077 } 1078 if (p->p_trace) /* /proc */ 1079 prnotify(p->p_trace); 1080 cv_broadcast(&pr_pid_cv[p->p_slot]); /* pauselwps() */ 1081 cv_broadcast(&p->p_holdlwps); /* holdwatch() */ 1082 } 1083 if (why != PR_SUSPENDED) { 1084 lep = p->p_lwpdir[t->t_dslot].ld_entry; 1085 if (lep->le_trace) /* /proc */ 1086 prnotify(lep->le_trace); 1087 /* 1088 * Special notification for creation of the agent lwp. 1089 */ 1090 if (t == p->p_agenttp && 1091 (t->t_proc_flag & TP_PRSTOP) && 1092 p->p_trace) 1093 prnotify(p->p_trace); 1094 /* 1095 * The situation may have changed since we dropped 1096 * and reacquired p->p_lock. Double-check now 1097 * whether we should stop or not. 1098 */ 1099 if (!(t->t_proc_flag & TP_STOPPING)) { 1100 if (t->t_proc_flag & TP_PRSTOP) 1101 t->t_proc_flag |= TP_STOPPING; 1102 } 1103 t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP); 1104 prnostep(lwp); 1105 } 1106 } 1107 1108 if (why == PR_SUSPENDED) { 1109 1110 /* 1111 * We always broadcast in the case of SUSPEND_PAUSE. This is 1112 * because checks for TP_PAUSE take precedence over checks for 1113 * SHOLDWATCH. If a thread is trying to stop because of 1114 * SUSPEND_PAUSE and tries to do a holdwatch(), it will be 1115 * waiting for the rest of the threads to enter a stopped state. 1116 * If we are stopping for a SUSPEND_PAUSE, we may be the last 1117 * lwp and not know it, so broadcast just in case. 1118 */ 1119 if (what == SUSPEND_PAUSE || 1120 --p->p_lwprcnt == 0 || (t->t_proc_flag & TP_HOLDLWP)) 1121 cv_broadcast(&p->p_holdlwps); 1122 1123 } 1124 1125 /* 1126 * Need to do this here (rather than after the thread is officially 1127 * stopped) because we can't call mutex_enter from a stopped thread. 1128 */ 1129 if (why == PR_CHECKPOINT) 1130 del_one_utstop(); 1131 1132 thread_lock(t); 1133 ASSERT((t->t_schedflag & TS_ALLSTART) == 0); 1134 t->t_schedflag |= flags; 1135 t->t_whystop = (short)why; 1136 t->t_whatstop = (short)what; 1137 CL_STOP(t, why, what); 1138 (void) new_mstate(t, LMS_STOPPED); 1139 thread_stop(t); /* set stop state and drop lock */ 1140 1141 if (why != PR_SUSPENDED && why != PR_CHECKPOINT) { 1142 /* 1143 * We may have gotten a SIGKILL or a SIGCONT when 1144 * we released p->p_lock; make one last check. 1145 * Also check for a /proc run-on-last-close. 1146 */ 1147 if (sigismember(&t->t_sig, SIGKILL) || 1148 sigismember(&p->p_sig, SIGKILL) || 1149 (t->t_proc_flag & TP_LWPEXIT) || 1150 (p->p_flag & (SEXITLWPS|SKILLED))) { 1151 p->p_stopsig = 0; 1152 thread_lock(t); 1153 t->t_schedflag |= TS_XSTART | TS_PSTART; 1154 setrun_locked(t); 1155 thread_unlock_nopreempt(t); 1156 } else if (why == PR_JOBCONTROL) { 1157 if (p->p_flag & SSCONT) { 1158 /* 1159 * This resulted from a SIGCONT posted 1160 * while we were not holding p->p_lock. 1161 */ 1162 p->p_stopsig = 0; 1163 thread_lock(t); 1164 t->t_schedflag |= TS_XSTART; 1165 setrun_locked(t); 1166 thread_unlock_nopreempt(t); 1167 } 1168 } else if (!(t->t_proc_flag & TP_STOPPING)) { 1169 /* 1170 * This resulted from a /proc run-on-last-close. 1171 */ 1172 thread_lock(t); 1173 t->t_schedflag |= TS_PSTART; 1174 setrun_locked(t); 1175 thread_unlock_nopreempt(t); 1176 } 1177 } 1178 1179 t->t_proc_flag &= ~TP_STOPPING; 1180 mutex_exit(&p->p_lock); 1181 1182 swtch(); 1183 setallwatch(); /* reestablish any watchpoints set while stopped */ 1184 mutex_enter(&p->p_lock); 1185 prbarrier(p); /* barrier against /proc locking */ 1186 } 1187 1188 /* Interface for resetting user thread stop count. */ 1189 void 1190 utstop_init(void) 1191 { 1192 mutex_enter(&thread_stop_lock); 1193 num_utstop = 0; 1194 mutex_exit(&thread_stop_lock); 1195 } 1196 1197 /* Interface for registering a user thread stop request. */ 1198 void 1199 add_one_utstop(void) 1200 { 1201 mutex_enter(&thread_stop_lock); 1202 num_utstop++; 1203 mutex_exit(&thread_stop_lock); 1204 } 1205 1206 /* Interface for cancelling a user thread stop request */ 1207 void 1208 del_one_utstop(void) 1209 { 1210 mutex_enter(&thread_stop_lock); 1211 num_utstop--; 1212 if (num_utstop == 0) 1213 cv_broadcast(&utstop_cv); 1214 mutex_exit(&thread_stop_lock); 1215 } 1216 1217 /* Interface to wait for all user threads to be stopped */ 1218 void 1219 utstop_timedwait(clock_t ticks) 1220 { 1221 mutex_enter(&thread_stop_lock); 1222 if (num_utstop > 0) 1223 (void) cv_timedwait(&utstop_cv, &thread_stop_lock, 1224 ticks + lbolt); 1225 mutex_exit(&thread_stop_lock); 1226 } 1227 1228 /* 1229 * Perform the action specified by the current signal. 1230 * The usual sequence is: 1231 * if (issig()) 1232 * psig(); 1233 * The signal bit has already been cleared by issig(), 1234 * the current signal number has been stored in lwp_cursig, 1235 * and the current siginfo is now referenced by lwp_curinfo. 1236 */ 1237 void 1238 psig(void) 1239 { 1240 kthread_t *t = curthread; 1241 proc_t *p = ttoproc(t); 1242 klwp_t *lwp = ttolwp(t); 1243 void (*func)(); 1244 int sig, rc, code, ext; 1245 pid_t pid = -1; 1246 id_t ctid = 0; 1247 zoneid_t zoneid = -1; 1248 sigqueue_t *sqp = NULL; 1249 1250 mutex_enter(&p->p_lock); 1251 schedctl_finish_sigblock(t); 1252 code = CLD_KILLED; 1253 1254 if (p->p_flag & SEXITLWPS) { 1255 lwp_exit(); 1256 return; /* not reached */ 1257 } 1258 sig = lwp->lwp_cursig; 1259 ext = lwp->lwp_extsig; 1260 1261 ASSERT(sig < NSIG); 1262 1263 /* 1264 * Re-check lwp_cursig after we acquire p_lock. Since p_lock was 1265 * dropped between issig() and psig(), a debugger may have cleared 1266 * lwp_cursig via /proc in the intervening window. 1267 */ 1268 if (sig == 0) { 1269 if (lwp->lwp_curinfo) { 1270 siginfofree(lwp->lwp_curinfo); 1271 lwp->lwp_curinfo = NULL; 1272 } 1273 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */ 1274 t->t_flag &= ~T_TOMASK; 1275 t->t_hold = lwp->lwp_sigoldmask; 1276 } 1277 mutex_exit(&p->p_lock); 1278 return; 1279 } 1280 func = u.u_signal[sig-1]; 1281 1282 /* 1283 * The signal disposition could have changed since we promoted 1284 * this signal from pending to current (we dropped p->p_lock). 1285 * This can happen only in a multi-threaded process. 1286 */ 1287 if (sigismember(&p->p_ignore, sig) || 1288 (func == SIG_DFL && sigismember(&stopdefault, sig))) { 1289 lwp->lwp_cursig = 0; 1290 lwp->lwp_extsig = 0; 1291 if (lwp->lwp_curinfo) { 1292 siginfofree(lwp->lwp_curinfo); 1293 lwp->lwp_curinfo = NULL; 1294 } 1295 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */ 1296 t->t_flag &= ~T_TOMASK; 1297 t->t_hold = lwp->lwp_sigoldmask; 1298 } 1299 mutex_exit(&p->p_lock); 1300 return; 1301 } 1302 1303 /* 1304 * We check lwp_curinfo first since pr_setsig can actually 1305 * stuff a sigqueue_t there for SIGKILL. 1306 */ 1307 if (lwp->lwp_curinfo) { 1308 sqp = lwp->lwp_curinfo; 1309 } else if (sig == SIGKILL && p->p_killsqp) { 1310 sqp = p->p_killsqp; 1311 } 1312 1313 if (sqp != NULL) { 1314 if (SI_FROMUSER(&sqp->sq_info)) { 1315 pid = sqp->sq_info.si_pid; 1316 ctid = sqp->sq_info.si_ctid; 1317 zoneid = sqp->sq_info.si_zoneid; 1318 } 1319 /* 1320 * If we have a sigqueue_t, its sq_external value 1321 * trumps the lwp_extsig value. It is theoretically 1322 * possible to make lwp_extsig reflect reality, but it 1323 * would unnecessarily complicate things elsewhere. 1324 */ 1325 ext = sqp->sq_external; 1326 } 1327 1328 if (func == SIG_DFL) { 1329 mutex_exit(&p->p_lock); 1330 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *, 1331 NULL, void (*)(void), func); 1332 } else { 1333 k_siginfo_t *sip = NULL; 1334 1335 /* 1336 * If DTrace user-land tracing is active, give DTrace a 1337 * chance to defer the signal until after tracing is 1338 * complete. 1339 */ 1340 if (t->t_dtrace_on && dtrace_safe_defer_signal()) { 1341 mutex_exit(&p->p_lock); 1342 return; 1343 } 1344 1345 /* 1346 * save siginfo pointer here, in case the 1347 * the signal's reset bit is on 1348 * 1349 * The presence of a current signal prevents paging 1350 * from succeeding over a network. We copy the current 1351 * signal information to the side and cancel the current 1352 * signal so that sendsig() will succeed. 1353 */ 1354 if (sigismember(&p->p_siginfo, sig)) { 1355 if (sqp) { 1356 bcopy(&sqp->sq_info, &lwp->lwp_siginfo, 1357 sizeof (k_siginfo_t)); 1358 sip = &lwp->lwp_siginfo; 1359 } else if (sig == SIGPROF && 1360 t->t_rprof != NULL && 1361 t->t_rprof->rp_anystate && 1362 lwp->lwp_siginfo.si_signo == SIGPROF) { 1363 sip = &lwp->lwp_siginfo; 1364 } 1365 } 1366 1367 if (t->t_flag & T_TOMASK) 1368 t->t_flag &= ~T_TOMASK; 1369 else 1370 lwp->lwp_sigoldmask = t->t_hold; 1371 sigorset(&t->t_hold, &u.u_sigmask[sig-1]); 1372 if (!sigismember(&u.u_signodefer, sig)) 1373 sigaddset(&t->t_hold, sig); 1374 if (sigismember(&u.u_sigresethand, sig)) 1375 setsigact(sig, SIG_DFL, nullsmask, 0); 1376 1377 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *, 1378 sip, void (*)(void), func); 1379 1380 lwp->lwp_cursig = 0; 1381 lwp->lwp_extsig = 0; 1382 if (lwp->lwp_curinfo) { 1383 /* p->p_killsqp is freed by freeproc */ 1384 siginfofree(lwp->lwp_curinfo); 1385 lwp->lwp_curinfo = NULL; 1386 } 1387 mutex_exit(&p->p_lock); 1388 lwp->lwp_ru.nsignals++; 1389 1390 if (p->p_model == DATAMODEL_NATIVE) 1391 rc = sendsig(sig, sip, func); 1392 #ifdef _SYSCALL32_IMPL 1393 else 1394 rc = sendsig32(sig, sip, func); 1395 #endif /* _SYSCALL32_IMPL */ 1396 if (rc) 1397 return; 1398 sig = lwp->lwp_cursig = SIGSEGV; 1399 ext = 0; /* lwp_extsig was set above */ 1400 pid = -1; 1401 ctid = 0; 1402 } 1403 1404 if (sigismember(&coredefault, sig)) { 1405 /* 1406 * Terminate all LWPs but don't discard them. 1407 * If another lwp beat us to the punch by calling exit(), 1408 * evaporate now. 1409 */ 1410 if (exitlwps(1) != 0) { 1411 mutex_enter(&p->p_lock); 1412 lwp_exit(); 1413 } 1414 /* if we got a SIGKILL from anywhere, no core dump */ 1415 if (p->p_flag & SKILLED) { 1416 sig = SIGKILL; 1417 ext = (p->p_flag & SEXTKILLED) != 0; 1418 } else { 1419 #ifdef C2_AUDIT 1420 if (audit_active) /* audit core dump */ 1421 audit_core_start(sig); 1422 #endif 1423 if (core(sig, ext) == 0) 1424 code = CLD_DUMPED; 1425 #ifdef C2_AUDIT 1426 if (audit_active) /* audit core dump */ 1427 audit_core_finish(code); 1428 #endif 1429 } 1430 } 1431 if (ext) 1432 contract_process_sig(p->p_ct_process, p, sig, pid, ctid, 1433 zoneid); 1434 1435 exit(code, sig); 1436 } 1437 1438 /* 1439 * Find next unheld signal in ssp for thread t. 1440 */ 1441 int 1442 fsig(k_sigset_t *ssp, kthread_t *t) 1443 { 1444 proc_t *p = ttoproc(t); 1445 user_t *up = PTOU(p); 1446 int i; 1447 k_sigset_t temp; 1448 1449 ASSERT(MUTEX_HELD(&p->p_lock)); 1450 1451 /* 1452 * Don't promote any signals for the parent of a vfork()d 1453 * child that hasn't yet released the parent's memory. 1454 */ 1455 if (p->p_flag & SVFWAIT) 1456 return (0); 1457 1458 temp = *ssp; 1459 sigdiffset(&temp, &t->t_hold); 1460 1461 /* 1462 * Don't promote stopping signals (except SIGSTOP) for a child 1463 * of vfork() that hasn't yet released the parent's memory. 1464 */ 1465 if (p->p_flag & SVFORK) 1466 sigdiffset(&temp, &holdvfork); 1467 1468 /* 1469 * Don't promote a signal that will stop 1470 * the process when lwp_nostop is set. 1471 */ 1472 if (ttolwp(t)->lwp_nostop) { 1473 sigdelset(&temp, SIGSTOP); 1474 if (!p->p_pgidp->pid_pgorphaned) { 1475 if (up->u_signal[SIGTSTP-1] == SIG_DFL) 1476 sigdelset(&temp, SIGTSTP); 1477 if (up->u_signal[SIGTTIN-1] == SIG_DFL) 1478 sigdelset(&temp, SIGTTIN); 1479 if (up->u_signal[SIGTTOU-1] == SIG_DFL) 1480 sigdelset(&temp, SIGTTOU); 1481 } 1482 } 1483 1484 /* 1485 * Choose SIGKILL and SIGPROF before all other pending signals. 1486 * The rest are promoted in signal number order. 1487 */ 1488 if (sigismember(&temp, SIGKILL)) 1489 return (SIGKILL); 1490 if (sigismember(&temp, SIGPROF)) 1491 return (SIGPROF); 1492 1493 for (i = 0; i < sizeof (temp) / sizeof (temp.__sigbits[0]); i++) { 1494 if (temp.__sigbits[i]) 1495 return ((i * NBBY * sizeof (temp.__sigbits[0])) + 1496 lowbit(temp.__sigbits[i])); 1497 } 1498 1499 return (0); 1500 } 1501 1502 void 1503 setsigact(int sig, void (*disp)(), k_sigset_t mask, int flags) 1504 { 1505 proc_t *p = ttoproc(curthread); 1506 kthread_t *t; 1507 1508 ASSERT(MUTEX_HELD(&p->p_lock)); 1509 1510 u.u_signal[sig - 1] = disp; 1511 1512 /* 1513 * Honor the SA_SIGINFO flag if the signal is being caught. 1514 * Force the SA_SIGINFO flag if the signal is not being caught. 1515 * This is necessary to make sigqueue() and sigwaitinfo() work 1516 * properly together when the signal is set to default or is 1517 * being temporarily ignored. 1518 */ 1519 if ((flags & SA_SIGINFO) || disp == SIG_DFL || disp == SIG_IGN) 1520 sigaddset(&p->p_siginfo, sig); 1521 else 1522 sigdelset(&p->p_siginfo, sig); 1523 1524 if (disp != SIG_DFL && disp != SIG_IGN) { 1525 sigdelset(&p->p_ignore, sig); 1526 u.u_sigmask[sig - 1] = mask; 1527 if (!sigismember(&cantreset, sig)) { 1528 if (flags & SA_RESETHAND) 1529 sigaddset(&u.u_sigresethand, sig); 1530 else 1531 sigdelset(&u.u_sigresethand, sig); 1532 } 1533 if (flags & SA_NODEFER) 1534 sigaddset(&u.u_signodefer, sig); 1535 else 1536 sigdelset(&u.u_signodefer, sig); 1537 if (flags & SA_RESTART) 1538 sigaddset(&u.u_sigrestart, sig); 1539 else 1540 sigdelset(&u.u_sigrestart, sig); 1541 if (flags & SA_ONSTACK) 1542 sigaddset(&u.u_sigonstack, sig); 1543 else 1544 sigdelset(&u.u_sigonstack, sig); 1545 1546 } else if (disp == SIG_IGN || 1547 (disp == SIG_DFL && sigismember(&ignoredefault, sig))) { 1548 /* 1549 * Setting the signal action to SIG_IGN results in the 1550 * discarding of all pending signals of that signal number. 1551 * Setting the signal action to SIG_DFL does the same *only* 1552 * if the signal's default behavior is to be ignored. 1553 */ 1554 sigaddset(&p->p_ignore, sig); 1555 sigdelset(&p->p_sig, sig); 1556 sigdelset(&p->p_extsig, sig); 1557 sigdelq(p, NULL, sig); 1558 t = p->p_tlist; 1559 do { 1560 sigdelset(&t->t_sig, sig); 1561 sigdelset(&t->t_extsig, sig); 1562 sigdelq(p, t, sig); 1563 } while ((t = t->t_forw) != p->p_tlist); 1564 1565 } else { 1566 /* 1567 * The signal action is being set to SIG_DFL and the default 1568 * behavior is to do something: make sure it is not ignored. 1569 */ 1570 sigdelset(&p->p_ignore, sig); 1571 } 1572 1573 if (sig == SIGCLD) { 1574 if (flags & SA_NOCLDWAIT) 1575 p->p_flag |= SNOWAIT; 1576 else 1577 p->p_flag &= ~SNOWAIT; 1578 1579 if (flags & SA_NOCLDSTOP) 1580 p->p_flag &= ~SJCTL; 1581 else 1582 p->p_flag |= SJCTL; 1583 1584 if (p->p_flag & SNOWAIT || disp == SIG_IGN) { 1585 proc_t *cp, *tp; 1586 1587 mutex_exit(&p->p_lock); 1588 mutex_enter(&pidlock); 1589 for (cp = p->p_child; cp != NULL; cp = tp) { 1590 tp = cp->p_sibling; 1591 if (cp->p_stat == SZOMB) 1592 freeproc(cp); 1593 } 1594 mutex_exit(&pidlock); 1595 mutex_enter(&p->p_lock); 1596 } 1597 } 1598 } 1599 1600 /* 1601 * Set all signal actions not already set to SIG_DFL or SIG_IGN to SIG_DFL. 1602 * Called from exec_common() for a process undergoing execve() 1603 * and from cfork() for a newly-created child of vfork(). 1604 * In the vfork() case, 'p' is not the current process. 1605 * In both cases, there is only one thread in the process. 1606 */ 1607 void 1608 sigdefault(proc_t *p) 1609 { 1610 kthread_t *t = p->p_tlist; 1611 struct user *up = PTOU(p); 1612 int sig; 1613 1614 ASSERT(MUTEX_HELD(&p->p_lock)); 1615 1616 for (sig = 1; sig < NSIG; sig++) { 1617 if (up->u_signal[sig - 1] != SIG_DFL && 1618 up->u_signal[sig - 1] != SIG_IGN) { 1619 up->u_signal[sig - 1] = SIG_DFL; 1620 sigemptyset(&up->u_sigmask[sig - 1]); 1621 if (sigismember(&ignoredefault, sig)) { 1622 sigdelq(p, NULL, sig); 1623 sigdelq(p, t, sig); 1624 } 1625 if (sig == SIGCLD) 1626 p->p_flag &= ~(SNOWAIT|SJCTL); 1627 } 1628 } 1629 sigorset(&p->p_ignore, &ignoredefault); 1630 sigfillset(&p->p_siginfo); 1631 sigdiffset(&p->p_siginfo, &cantmask); 1632 sigdiffset(&p->p_sig, &ignoredefault); 1633 sigdiffset(&p->p_extsig, &ignoredefault); 1634 sigdiffset(&t->t_sig, &ignoredefault); 1635 sigdiffset(&t->t_extsig, &ignoredefault); 1636 } 1637 1638 void 1639 sigcld(proc_t *cp, sigqueue_t *sqp) 1640 { 1641 proc_t *pp = cp->p_parent; 1642 1643 ASSERT(MUTEX_HELD(&pidlock)); 1644 1645 switch (cp->p_wcode) { 1646 case CLD_EXITED: 1647 case CLD_DUMPED: 1648 case CLD_KILLED: 1649 ASSERT(cp->p_stat == SZOMB); 1650 /* 1651 * The broadcast on p_srwchan_cv is a kludge to 1652 * wakeup a possible thread in uadmin(A_SHUTDOWN). 1653 */ 1654 cv_broadcast(&cp->p_srwchan_cv); 1655 1656 /* 1657 * Add to newstate list of the parent 1658 */ 1659 add_ns(pp, cp); 1660 1661 cv_broadcast(&pp->p_cv); 1662 if ((pp->p_flag & SNOWAIT) || 1663 (PTOU(pp)->u_signal[SIGCLD - 1] == SIG_IGN)) 1664 freeproc(cp); 1665 else { 1666 post_sigcld(cp, sqp); 1667 sqp = NULL; 1668 } 1669 break; 1670 1671 case CLD_STOPPED: 1672 case CLD_CONTINUED: 1673 cv_broadcast(&pp->p_cv); 1674 if (pp->p_flag & SJCTL) { 1675 post_sigcld(cp, sqp); 1676 sqp = NULL; 1677 } 1678 break; 1679 } 1680 1681 if (sqp) 1682 siginfofree(sqp); 1683 } 1684 1685 /* 1686 * Common code called from sigcld() and issig_forreal() 1687 * Give the parent process a SIGCLD if it does not have one pending, 1688 * else mark the child process so a SIGCLD can be posted later. 1689 */ 1690 static void 1691 post_sigcld(proc_t *cp, sigqueue_t *sqp) 1692 { 1693 proc_t *pp = cp->p_parent; 1694 void (*handler)() = PTOU(pp)->u_signal[SIGCLD - 1]; 1695 k_siginfo_t info; 1696 1697 ASSERT(MUTEX_HELD(&pidlock)); 1698 mutex_enter(&pp->p_lock); 1699 1700 /* 1701 * If a SIGCLD is pending, or if SIGCLD is not now being caught, 1702 * then just mark the child process so that its SIGCLD will 1703 * be posted later, when the first SIGCLD is taken off the 1704 * queue or when the parent is ready to receive it, if ever. 1705 */ 1706 if (handler == SIG_DFL || handler == SIG_IGN || 1707 sigismember(&pp->p_sig, SIGCLD)) 1708 cp->p_pidflag |= CLDPEND; 1709 else { 1710 cp->p_pidflag &= ~CLDPEND; 1711 if (sqp == NULL) { 1712 /* 1713 * This can only happen when the parent is init. 1714 * (See call to sigcld(q, NULL) in exit().) 1715 * Use KM_NOSLEEP to avoid deadlock. 1716 */ 1717 ASSERT(pp == proc_init); 1718 winfo(cp, &info, 0); 1719 sigaddq(pp, NULL, &info, KM_NOSLEEP); 1720 } else { 1721 winfo(cp, &sqp->sq_info, 0); 1722 sigaddqa(pp, NULL, sqp); 1723 sqp = NULL; 1724 } 1725 } 1726 1727 mutex_exit(&pp->p_lock); 1728 1729 if (sqp) 1730 siginfofree(sqp); 1731 } 1732 1733 /* 1734 * Search for a child that has a pending SIGCLD for us, the parent. 1735 * The queue of SIGCLD signals is implied by the list of children. 1736 * We post the SIGCLD signals one at a time so they don't get lost. 1737 * When one is dequeued, another is enqueued, until there are no more. 1738 */ 1739 void 1740 sigcld_repost() 1741 { 1742 proc_t *pp = curproc; 1743 proc_t *cp; 1744 void (*handler)() = PTOU(pp)->u_signal[SIGCLD - 1]; 1745 sigqueue_t *sqp; 1746 1747 /* 1748 * Don't bother if SIGCLD is not now being caught. 1749 */ 1750 if (handler == SIG_DFL || handler == SIG_IGN) 1751 return; 1752 1753 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 1754 mutex_enter(&pidlock); 1755 for (cp = pp->p_child; cp; cp = cp->p_sibling) { 1756 if (cp->p_pidflag & CLDPEND) { 1757 post_sigcld(cp, sqp); 1758 mutex_exit(&pidlock); 1759 return; 1760 } 1761 } 1762 mutex_exit(&pidlock); 1763 kmem_free(sqp, sizeof (sigqueue_t)); 1764 } 1765 1766 /* 1767 * count number of sigqueue send by sigaddqa() 1768 */ 1769 void 1770 sigqsend(int cmd, proc_t *p, kthread_t *t, sigqueue_t *sigqp) 1771 { 1772 sigqhdr_t *sqh; 1773 1774 sqh = (sigqhdr_t *)sigqp->sq_backptr; 1775 ASSERT(sqh); 1776 1777 mutex_enter(&sqh->sqb_lock); 1778 sqh->sqb_sent++; 1779 mutex_exit(&sqh->sqb_lock); 1780 1781 if (cmd == SN_SEND) 1782 sigaddqa(p, t, sigqp); 1783 else 1784 siginfofree(sigqp); 1785 } 1786 1787 int 1788 sigsendproc(proc_t *p, sigsend_t *pv) 1789 { 1790 struct cred *cr; 1791 proc_t *myprocp = curproc; 1792 1793 ASSERT(MUTEX_HELD(&pidlock)); 1794 1795 if (p->p_pid == 1 && pv->sig && sigismember(&cantmask, pv->sig)) 1796 return (EPERM); 1797 1798 cr = CRED(); 1799 1800 if (pv->checkperm == 0 || 1801 (pv->sig == SIGCONT && p->p_sessp == myprocp->p_sessp) || 1802 prochasprocperm(p, myprocp, cr)) { 1803 pv->perm++; 1804 if (pv->sig) { 1805 /* Make sure we should be setting si_pid and friends */ 1806 ASSERT(pv->sicode <= 0); 1807 if (SI_CANQUEUE(pv->sicode)) { 1808 sigqueue_t *sqp; 1809 1810 mutex_enter(&myprocp->p_lock); 1811 sqp = sigqalloc(myprocp->p_sigqhdr); 1812 mutex_exit(&myprocp->p_lock); 1813 if (sqp == NULL) 1814 return (EAGAIN); 1815 sqp->sq_info.si_signo = pv->sig; 1816 sqp->sq_info.si_code = pv->sicode; 1817 sqp->sq_info.si_pid = myprocp->p_pid; 1818 sqp->sq_info.si_ctid = PRCTID(myprocp); 1819 sqp->sq_info.si_zoneid = getzoneid(); 1820 sqp->sq_info.si_uid = crgetruid(cr); 1821 sqp->sq_info.si_value = pv->value; 1822 mutex_enter(&p->p_lock); 1823 sigqsend(SN_SEND, p, NULL, sqp); 1824 mutex_exit(&p->p_lock); 1825 } else { 1826 k_siginfo_t info; 1827 bzero(&info, sizeof (info)); 1828 info.si_signo = pv->sig; 1829 info.si_code = pv->sicode; 1830 info.si_pid = myprocp->p_pid; 1831 info.si_ctid = PRCTID(myprocp); 1832 info.si_zoneid = getzoneid(); 1833 info.si_uid = crgetruid(cr); 1834 mutex_enter(&p->p_lock); 1835 /* 1836 * XXX: Should be KM_SLEEP but 1837 * we have to avoid deadlock. 1838 */ 1839 sigaddq(p, NULL, &info, KM_NOSLEEP); 1840 mutex_exit(&p->p_lock); 1841 } 1842 } 1843 } 1844 1845 return (0); 1846 } 1847 1848 int 1849 sigsendset(procset_t *psp, sigsend_t *pv) 1850 { 1851 int error; 1852 1853 error = dotoprocs(psp, sigsendproc, (char *)pv); 1854 if (error == 0 && pv->perm == 0) 1855 return (EPERM); 1856 1857 return (error); 1858 } 1859 1860 /* 1861 * Dequeue a queued siginfo structure. 1862 * If a non-null thread pointer is passed then dequeue from 1863 * the thread queue, otherwise dequeue from the process queue. 1864 */ 1865 void 1866 sigdeq(proc_t *p, kthread_t *t, int sig, sigqueue_t **qpp) 1867 { 1868 sigqueue_t **psqp, *sqp; 1869 1870 ASSERT(MUTEX_HELD(&p->p_lock)); 1871 1872 *qpp = NULL; 1873 1874 if (t != NULL) { 1875 sigdelset(&t->t_sig, sig); 1876 sigdelset(&t->t_extsig, sig); 1877 psqp = &t->t_sigqueue; 1878 } else { 1879 sigdelset(&p->p_sig, sig); 1880 sigdelset(&p->p_extsig, sig); 1881 psqp = &p->p_sigqueue; 1882 } 1883 1884 for (;;) { 1885 if ((sqp = *psqp) == NULL) 1886 return; 1887 if (sqp->sq_info.si_signo == sig) 1888 break; 1889 else 1890 psqp = &sqp->sq_next; 1891 } 1892 *qpp = sqp; 1893 *psqp = sqp->sq_next; 1894 for (sqp = *psqp; sqp; sqp = sqp->sq_next) { 1895 if (sqp->sq_info.si_signo == sig) { 1896 if (t != (kthread_t *)NULL) { 1897 sigaddset(&t->t_sig, sig); 1898 t->t_sig_check = 1; 1899 } else { 1900 sigaddset(&p->p_sig, sig); 1901 set_proc_ast(p); 1902 } 1903 break; 1904 } 1905 } 1906 } 1907 1908 /* 1909 * Delete a queued SIGCLD siginfo structure matching the k_siginfo_t argument. 1910 */ 1911 void 1912 sigcld_delete(k_siginfo_t *ip) 1913 { 1914 proc_t *p = curproc; 1915 int another_sigcld = 0; 1916 sigqueue_t **psqp, *sqp; 1917 1918 ASSERT(ip->si_signo == SIGCLD); 1919 1920 mutex_enter(&p->p_lock); 1921 1922 if (!sigismember(&p->p_sig, SIGCLD)) { 1923 mutex_exit(&p->p_lock); 1924 return; 1925 } 1926 1927 psqp = &p->p_sigqueue; 1928 for (;;) { 1929 if ((sqp = *psqp) == NULL) { 1930 mutex_exit(&p->p_lock); 1931 return; 1932 } 1933 if (sqp->sq_info.si_signo == SIGCLD) { 1934 if (sqp->sq_info.si_pid == ip->si_pid && 1935 sqp->sq_info.si_code == ip->si_code && 1936 sqp->sq_info.si_status == ip->si_status) 1937 break; 1938 another_sigcld = 1; 1939 } 1940 psqp = &sqp->sq_next; 1941 } 1942 *psqp = sqp->sq_next; 1943 1944 siginfofree(sqp); 1945 1946 for (sqp = *psqp; !another_sigcld && sqp; sqp = sqp->sq_next) { 1947 if (sqp->sq_info.si_signo == SIGCLD) 1948 another_sigcld = 1; 1949 } 1950 1951 if (!another_sigcld) { 1952 sigdelset(&p->p_sig, SIGCLD); 1953 sigdelset(&p->p_extsig, SIGCLD); 1954 } 1955 1956 mutex_exit(&p->p_lock); 1957 } 1958 1959 /* 1960 * Delete queued siginfo structures. 1961 * If a non-null thread pointer is passed then delete from 1962 * the thread queue, otherwise delete from the process queue. 1963 */ 1964 void 1965 sigdelq(proc_t *p, kthread_t *t, int sig) 1966 { 1967 sigqueue_t **psqp, *sqp; 1968 1969 /* 1970 * We must be holding p->p_lock unless the process is 1971 * being reaped or has failed to get started on fork. 1972 */ 1973 ASSERT(MUTEX_HELD(&p->p_lock) || 1974 p->p_stat == SIDL || p->p_stat == SZOMB); 1975 1976 if (t != (kthread_t *)NULL) 1977 psqp = &t->t_sigqueue; 1978 else 1979 psqp = &p->p_sigqueue; 1980 1981 while (*psqp) { 1982 sqp = *psqp; 1983 if (sig == 0 || sqp->sq_info.si_signo == sig) { 1984 *psqp = sqp->sq_next; 1985 siginfofree(sqp); 1986 } else 1987 psqp = &sqp->sq_next; 1988 } 1989 } 1990 1991 /* 1992 * Insert a siginfo structure into a queue. 1993 * If a non-null thread pointer is passed then add to the thread queue, 1994 * otherwise add to the process queue. 1995 * 1996 * The function sigaddqins() is called with sigqueue already allocated. 1997 * It is called from sigaddqa() and sigaddq() below. 1998 * 1999 * The value of si_code implicitly indicates whether sigp is to be 2000 * explicitly queued, or to be queued to depth one. 2001 */ 2002 static void 2003 sigaddqins(proc_t *p, kthread_t *t, sigqueue_t *sigqp) 2004 { 2005 sigqueue_t **psqp; 2006 int sig = sigqp->sq_info.si_signo; 2007 2008 sigqp->sq_external = (curproc != &p0) && 2009 (curproc->p_ct_process != p->p_ct_process); 2010 2011 /* 2012 * issig_forreal() doesn't bother dequeueing signals if SKILLED 2013 * is set, and even if it did, we would want to avoid situation 2014 * (which would be unique to SIGKILL) where one thread dequeued 2015 * the sigqueue_t and another executed psig(). So we create a 2016 * separate stash for SIGKILL's sigqueue_t. Because a second 2017 * SIGKILL can set SEXTKILLED, we overwrite the existing entry 2018 * if (and only if) it was non-extracontractual. 2019 */ 2020 if (sig == SIGKILL) { 2021 if (p->p_killsqp == NULL || !p->p_killsqp->sq_external) { 2022 if (p->p_killsqp != NULL) 2023 siginfofree(p->p_killsqp); 2024 p->p_killsqp = sigqp; 2025 sigqp->sq_next = NULL; 2026 } else { 2027 siginfofree(sigqp); 2028 } 2029 return; 2030 } 2031 2032 ASSERT(sig >= 1 && sig < NSIG); 2033 if (t != NULL) /* directed to a thread */ 2034 psqp = &t->t_sigqueue; 2035 else /* directed to a process */ 2036 psqp = &p->p_sigqueue; 2037 if (SI_CANQUEUE(sigqp->sq_info.si_code) && 2038 sigismember(&p->p_siginfo, sig)) { 2039 for (; *psqp != NULL; psqp = &(*psqp)->sq_next) 2040 ; 2041 } else { 2042 for (; *psqp != NULL; psqp = &(*psqp)->sq_next) { 2043 if ((*psqp)->sq_info.si_signo == sig) { 2044 siginfofree(sigqp); 2045 return; 2046 } 2047 } 2048 } 2049 *psqp = sigqp; 2050 sigqp->sq_next = NULL; 2051 } 2052 2053 /* 2054 * The function sigaddqa() is called with sigqueue already allocated. 2055 * If signal is ignored, discard but guarantee KILL and generation semantics. 2056 * It is called from sigqueue() and other places. 2057 */ 2058 void 2059 sigaddqa(proc_t *p, kthread_t *t, sigqueue_t *sigqp) 2060 { 2061 int sig = sigqp->sq_info.si_signo; 2062 2063 ASSERT(MUTEX_HELD(&p->p_lock)); 2064 ASSERT(sig >= 1 && sig < NSIG); 2065 2066 if (sig_discardable(p, sig)) 2067 siginfofree(sigqp); 2068 else 2069 sigaddqins(p, t, sigqp); 2070 2071 sigtoproc(p, t, sig); 2072 } 2073 2074 /* 2075 * Allocate the sigqueue_t structure and call sigaddqins(). 2076 */ 2077 void 2078 sigaddq(proc_t *p, kthread_t *t, k_siginfo_t *infop, int km_flags) 2079 { 2080 sigqueue_t *sqp; 2081 int sig = infop->si_signo; 2082 2083 ASSERT(MUTEX_HELD(&p->p_lock)); 2084 ASSERT(sig >= 1 && sig < NSIG); 2085 2086 /* 2087 * If the signal will be discarded by sigtoproc() or 2088 * if the process isn't requesting siginfo and it isn't 2089 * blocking the signal (it *could* change it's mind while 2090 * the signal is pending) then don't bother creating one. 2091 */ 2092 if (!sig_discardable(p, sig) && 2093 (sigismember(&p->p_siginfo, sig) || 2094 (curproc->p_ct_process != p->p_ct_process) || 2095 (sig == SIGCLD && SI_FROMKERNEL(infop))) && 2096 ((sqp = kmem_alloc(sizeof (sigqueue_t), km_flags)) != NULL)) { 2097 bcopy(infop, &sqp->sq_info, sizeof (k_siginfo_t)); 2098 sqp->sq_func = NULL; 2099 sqp->sq_next = NULL; 2100 sigaddqins(p, t, sqp); 2101 } 2102 sigtoproc(p, t, sig); 2103 } 2104 2105 /* 2106 * Handle stop-on-fault processing for the debugger. Returns 0 2107 * if the fault is cleared during the stop, nonzero if it isn't. 2108 */ 2109 int 2110 stop_on_fault(uint_t fault, k_siginfo_t *sip) 2111 { 2112 proc_t *p = ttoproc(curthread); 2113 klwp_t *lwp = ttolwp(curthread); 2114 2115 ASSERT(prismember(&p->p_fltmask, fault)); 2116 2117 /* 2118 * Record current fault and siginfo structure so debugger can 2119 * find it. 2120 */ 2121 mutex_enter(&p->p_lock); 2122 lwp->lwp_curflt = (uchar_t)fault; 2123 lwp->lwp_siginfo = *sip; 2124 2125 stop(PR_FAULTED, fault); 2126 2127 fault = lwp->lwp_curflt; 2128 lwp->lwp_curflt = 0; 2129 mutex_exit(&p->p_lock); 2130 return (fault); 2131 } 2132 2133 void 2134 sigorset(k_sigset_t *s1, k_sigset_t *s2) 2135 { 2136 s1->__sigbits[0] |= s2->__sigbits[0]; 2137 s1->__sigbits[1] |= s2->__sigbits[1]; 2138 } 2139 2140 void 2141 sigandset(k_sigset_t *s1, k_sigset_t *s2) 2142 { 2143 s1->__sigbits[0] &= s2->__sigbits[0]; 2144 s1->__sigbits[1] &= s2->__sigbits[1]; 2145 } 2146 2147 void 2148 sigdiffset(k_sigset_t *s1, k_sigset_t *s2) 2149 { 2150 s1->__sigbits[0] &= ~(s2->__sigbits[0]); 2151 s1->__sigbits[1] &= ~(s2->__sigbits[1]); 2152 } 2153 2154 /* 2155 * Return non-zero if curthread->t_sig_check should be set to 1, that is, 2156 * if there are any signals the thread might take on return from the kernel. 2157 * If ksigset_t's were a single word, we would do: 2158 * return (((p->p_sig | t->t_sig) & ~t->t_hold) & fillset); 2159 */ 2160 int 2161 sigcheck(proc_t *p, kthread_t *t) 2162 { 2163 sc_shared_t *tdp = t->t_schedctl; 2164 2165 /* 2166 * If signals are blocked via the schedctl interface 2167 * then we only check for the unmaskable signals. 2168 */ 2169 if (tdp != NULL && tdp->sc_sigblock) 2170 return ((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) & 2171 CANTMASK0); 2172 2173 return (((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) & 2174 ~t->t_hold.__sigbits[0]) | 2175 (((p->p_sig.__sigbits[1] | t->t_sig.__sigbits[1]) & 2176 ~t->t_hold.__sigbits[1]) & FILLSET1)); 2177 } 2178 2179 /* ONC_PLUS EXTRACT START */ 2180 void 2181 sigintr(k_sigset_t *smask, int intable) 2182 { 2183 proc_t *p; 2184 int owned; 2185 k_sigset_t lmask; /* local copy of cantmask */ 2186 klwp_t *lwp = ttolwp(curthread); 2187 2188 /* 2189 * Mask out all signals except SIGHUP, SIGINT, SIGQUIT 2190 * and SIGTERM. (Preserving the existing masks). 2191 * This function supports the -intr nfs and ufs mount option. 2192 */ 2193 2194 /* 2195 * don't do kernel threads 2196 */ 2197 if (lwp == NULL) 2198 return; 2199 2200 /* 2201 * get access to signal mask 2202 */ 2203 p = ttoproc(curthread); 2204 owned = mutex_owned(&p->p_lock); /* this is filthy */ 2205 if (!owned) 2206 mutex_enter(&p->p_lock); 2207 2208 /* 2209 * remember the current mask 2210 */ 2211 schedctl_finish_sigblock(curthread); 2212 *smask = curthread->t_hold; 2213 2214 /* 2215 * mask out all signals 2216 */ 2217 sigfillset(&curthread->t_hold); 2218 2219 /* 2220 * Unmask the non-maskable signals (e.g., KILL), as long as 2221 * they aren't already masked (which could happen at exit). 2222 * The first sigdiffset sets lmask to (cantmask & ~curhold). The 2223 * second sets the current hold mask to (~0 & ~lmask), which reduces 2224 * to (~cantmask | curhold). 2225 */ 2226 lmask = cantmask; 2227 sigdiffset(&lmask, smask); 2228 sigdiffset(&curthread->t_hold, &lmask); 2229 2230 /* 2231 * Re-enable HUP, QUIT, and TERM iff they were originally enabled 2232 * Re-enable INT if it's originally enabled and the NFS mount option 2233 * nointr is not set. 2234 */ 2235 if (!sigismember(smask, SIGHUP)) 2236 sigdelset(&curthread->t_hold, SIGHUP); 2237 if (!sigismember(smask, SIGINT) && intable) 2238 sigdelset(&curthread->t_hold, SIGINT); 2239 if (!sigismember(smask, SIGQUIT)) 2240 sigdelset(&curthread->t_hold, SIGQUIT); 2241 if (!sigismember(smask, SIGTERM)) 2242 sigdelset(&curthread->t_hold, SIGTERM); 2243 2244 /* 2245 * release access to signal mask 2246 */ 2247 if (!owned) 2248 mutex_exit(&p->p_lock); 2249 2250 /* 2251 * Indicate that this lwp is not to be stopped. 2252 */ 2253 lwp->lwp_nostop++; 2254 2255 } 2256 /* ONC_PLUS EXTRACT END */ 2257 2258 void 2259 sigunintr(k_sigset_t *smask) 2260 { 2261 proc_t *p; 2262 int owned; 2263 klwp_t *lwp = ttolwp(curthread); 2264 2265 /* 2266 * Reset previous mask (See sigintr() above) 2267 */ 2268 if (lwp != NULL) { 2269 lwp->lwp_nostop--; /* restore lwp stoppability */ 2270 p = ttoproc(curthread); 2271 owned = mutex_owned(&p->p_lock); /* this is filthy */ 2272 if (!owned) 2273 mutex_enter(&p->p_lock); 2274 curthread->t_hold = *smask; 2275 /* so unmasked signals will be seen */ 2276 curthread->t_sig_check = 1; 2277 if (!owned) 2278 mutex_exit(&p->p_lock); 2279 } 2280 } 2281 2282 void 2283 sigreplace(k_sigset_t *newmask, k_sigset_t *oldmask) 2284 { 2285 proc_t *p; 2286 int owned; 2287 /* 2288 * Save current signal mask in oldmask, then 2289 * set it to newmask. 2290 */ 2291 if (ttolwp(curthread) != NULL) { 2292 p = ttoproc(curthread); 2293 owned = mutex_owned(&p->p_lock); /* this is filthy */ 2294 if (!owned) 2295 mutex_enter(&p->p_lock); 2296 schedctl_finish_sigblock(curthread); 2297 if (oldmask != NULL) 2298 *oldmask = curthread->t_hold; 2299 curthread->t_hold = *newmask; 2300 curthread->t_sig_check = 1; 2301 if (!owned) 2302 mutex_exit(&p->p_lock); 2303 } 2304 } 2305 2306 /* 2307 * Return true if the signal number is in range 2308 * and the signal code specifies signal queueing. 2309 */ 2310 int 2311 sigwillqueue(int sig, int code) 2312 { 2313 if (sig >= 0 && sig < NSIG) { 2314 switch (code) { 2315 case SI_QUEUE: 2316 case SI_TIMER: 2317 case SI_ASYNCIO: 2318 case SI_MESGQ: 2319 return (1); 2320 } 2321 } 2322 return (0); 2323 } 2324 2325 #ifndef UCHAR_MAX 2326 #define UCHAR_MAX 255 2327 #endif 2328 2329 /* 2330 * The entire pool (with maxcount entries) is pre-allocated at 2331 * the first sigqueue/signotify call. 2332 */ 2333 sigqhdr_t * 2334 sigqhdralloc(size_t size, uint_t maxcount) 2335 { 2336 size_t i; 2337 sigqueue_t *sq, *next; 2338 sigqhdr_t *sqh; 2339 2340 i = (maxcount * size) + sizeof (sigqhdr_t); 2341 ASSERT(maxcount <= UCHAR_MAX && i <= USHRT_MAX); 2342 sqh = kmem_alloc(i, KM_SLEEP); 2343 sqh->sqb_count = (uchar_t)maxcount; 2344 sqh->sqb_maxcount = (uchar_t)maxcount; 2345 sqh->sqb_size = (ushort_t)i; 2346 sqh->sqb_pexited = 0; 2347 sqh->sqb_sent = 0; 2348 sqh->sqb_free = sq = (sigqueue_t *)(sqh + 1); 2349 for (i = maxcount - 1; i != 0; i--) { 2350 next = (sigqueue_t *)((uintptr_t)sq + size); 2351 sq->sq_next = next; 2352 sq = next; 2353 } 2354 sq->sq_next = NULL; 2355 mutex_init(&sqh->sqb_lock, NULL, MUTEX_DEFAULT, NULL); 2356 return (sqh); 2357 } 2358 2359 static void sigqrel(sigqueue_t *); 2360 2361 /* 2362 * allocate a sigqueue/signotify structure from the per process 2363 * pre-allocated pool. 2364 */ 2365 sigqueue_t * 2366 sigqalloc(sigqhdr_t *sqh) 2367 { 2368 sigqueue_t *sq = NULL; 2369 2370 ASSERT(MUTEX_HELD(&curproc->p_lock)); 2371 2372 if (sqh != NULL) { 2373 mutex_enter(&sqh->sqb_lock); 2374 if (sqh->sqb_count > 0) { 2375 sqh->sqb_count--; 2376 sq = sqh->sqb_free; 2377 sqh->sqb_free = sq->sq_next; 2378 mutex_exit(&sqh->sqb_lock); 2379 bzero(&sq->sq_info, sizeof (k_siginfo_t)); 2380 sq->sq_backptr = sqh; 2381 sq->sq_func = sigqrel; 2382 sq->sq_next = NULL; 2383 sq->sq_external = 0; 2384 } else { 2385 mutex_exit(&sqh->sqb_lock); 2386 } 2387 } 2388 return (sq); 2389 } 2390 2391 /* 2392 * Return a sigqueue structure back to the pre-allocated pool. 2393 */ 2394 static void 2395 sigqrel(sigqueue_t *sq) 2396 { 2397 sigqhdr_t *sqh; 2398 2399 /* make sure that p_lock of the affected process is held */ 2400 2401 sqh = (sigqhdr_t *)sq->sq_backptr; 2402 mutex_enter(&sqh->sqb_lock); 2403 if (sqh->sqb_pexited && sqh->sqb_sent == 1) { 2404 mutex_exit(&sqh->sqb_lock); 2405 mutex_destroy(&sqh->sqb_lock); 2406 kmem_free(sqh, sqh->sqb_size); 2407 } else { 2408 sqh->sqb_count++; 2409 sqh->sqb_sent--; 2410 sq->sq_next = sqh->sqb_free; 2411 sq->sq_backptr = NULL; 2412 sqh->sqb_free = sq; 2413 mutex_exit(&sqh->sqb_lock); 2414 } 2415 } 2416 2417 /* 2418 * Free up the pre-allocated sigqueue headers of sigqueue pool 2419 * and signotify pool, if possible. 2420 * Called only by the owning process during exec() and exit(). 2421 */ 2422 void 2423 sigqfree(proc_t *p) 2424 { 2425 ASSERT(MUTEX_HELD(&p->p_lock)); 2426 2427 if (p->p_sigqhdr != NULL) { /* sigqueue pool */ 2428 sigqhdrfree(p->p_sigqhdr); 2429 p->p_sigqhdr = NULL; 2430 } 2431 if (p->p_signhdr != NULL) { /* signotify pool */ 2432 sigqhdrfree(p->p_signhdr); 2433 p->p_signhdr = NULL; 2434 } 2435 } 2436 2437 /* 2438 * Free up the pre-allocated header and sigq pool if possible. 2439 */ 2440 void 2441 sigqhdrfree(sigqhdr_t *sqh) 2442 { 2443 mutex_enter(&sqh->sqb_lock); 2444 if (sqh->sqb_sent == 0) { 2445 mutex_exit(&sqh->sqb_lock); 2446 mutex_destroy(&sqh->sqb_lock); 2447 kmem_free(sqh, sqh->sqb_size); 2448 } else { 2449 sqh->sqb_pexited = 1; 2450 mutex_exit(&sqh->sqb_lock); 2451 } 2452 } 2453 2454 /* 2455 * Free up a single sigqueue structure. 2456 * No other code should free a sigqueue directly. 2457 */ 2458 void 2459 siginfofree(sigqueue_t *sqp) 2460 { 2461 if (sqp != NULL) { 2462 if (sqp->sq_func != NULL) 2463 (sqp->sq_func)(sqp); 2464 else 2465 kmem_free(sqp, sizeof (sigqueue_t)); 2466 } 2467 } 2468 2469 /* 2470 * Generate a synchronous signal caused by a hardware 2471 * condition encountered by an lwp. Called from trap(). 2472 */ 2473 void 2474 trapsig(k_siginfo_t *ip, int restartable) 2475 { 2476 proc_t *p = ttoproc(curthread); 2477 int sig = ip->si_signo; 2478 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 2479 2480 ASSERT(sig > 0 && sig < NSIG); 2481 2482 if (curthread->t_dtrace_on) 2483 dtrace_safe_synchronous_signal(); 2484 2485 mutex_enter(&p->p_lock); 2486 schedctl_finish_sigblock(curthread); 2487 /* 2488 * Avoid a possible infinite loop if the lwp is holding the 2489 * signal generated by a trap of a restartable instruction or 2490 * if the signal so generated is being ignored by the process. 2491 */ 2492 if (restartable && 2493 (sigismember(&curthread->t_hold, sig) || 2494 p->p_user.u_signal[sig-1] == SIG_IGN)) { 2495 sigdelset(&curthread->t_hold, sig); 2496 p->p_user.u_signal[sig-1] = SIG_DFL; 2497 sigdelset(&p->p_ignore, sig); 2498 } 2499 bcopy(ip, &sqp->sq_info, sizeof (k_siginfo_t)); 2500 sigaddqa(p, curthread, sqp); 2501 mutex_exit(&p->p_lock); 2502 } 2503 2504 #ifdef _SYSCALL32_IMPL 2505 2506 /* 2507 * It's tricky to transmit a sigval between 32-bit and 64-bit 2508 * process, since in the 64-bit world, a pointer and an integer 2509 * are different sizes. Since we're constrained by the standards 2510 * world not to change the types, and it's unclear how useful it is 2511 * to send pointers between address spaces this way, we preserve 2512 * the 'int' interpretation for 32-bit processes interoperating 2513 * with 64-bit processes. The full semantics (pointers or integers) 2514 * are available for N-bit processes interoperating with N-bit 2515 * processes. 2516 */ 2517 void 2518 siginfo_kto32(const k_siginfo_t *src, siginfo32_t *dest) 2519 { 2520 bzero(dest, sizeof (*dest)); 2521 2522 /* 2523 * The absolute minimum content is si_signo and si_code. 2524 */ 2525 dest->si_signo = src->si_signo; 2526 if ((dest->si_code = src->si_code) == SI_NOINFO) 2527 return; 2528 2529 /* 2530 * A siginfo generated by user level is structured 2531 * differently from one generated by the kernel. 2532 */ 2533 if (SI_FROMUSER(src)) { 2534 dest->si_pid = src->si_pid; 2535 dest->si_ctid = src->si_ctid; 2536 dest->si_zoneid = src->si_zoneid; 2537 dest->si_uid = src->si_uid; 2538 if (SI_CANQUEUE(src->si_code)) 2539 dest->si_value.sival_int = 2540 (int32_t)src->si_value.sival_int; 2541 return; 2542 } 2543 2544 dest->si_errno = src->si_errno; 2545 2546 switch (src->si_signo) { 2547 default: 2548 dest->si_pid = src->si_pid; 2549 dest->si_ctid = src->si_ctid; 2550 dest->si_zoneid = src->si_zoneid; 2551 dest->si_uid = src->si_uid; 2552 dest->si_value.sival_int = (int32_t)src->si_value.sival_int; 2553 break; 2554 case SIGCLD: 2555 dest->si_pid = src->si_pid; 2556 dest->si_ctid = src->si_ctid; 2557 dest->si_zoneid = src->si_zoneid; 2558 dest->si_status = src->si_status; 2559 dest->si_stime = src->si_stime; 2560 dest->si_utime = src->si_utime; 2561 break; 2562 case SIGSEGV: 2563 case SIGBUS: 2564 case SIGILL: 2565 case SIGTRAP: 2566 case SIGFPE: 2567 case SIGEMT: 2568 dest->si_addr = (caddr32_t)(uintptr_t)src->si_addr; 2569 dest->si_trapno = src->si_trapno; 2570 dest->si_pc = (caddr32_t)(uintptr_t)src->si_pc; 2571 break; 2572 case SIGPOLL: 2573 case SIGXFSZ: 2574 dest->si_fd = src->si_fd; 2575 dest->si_band = src->si_band; 2576 break; 2577 case SIGPROF: 2578 dest->si_faddr = (caddr32_t)(uintptr_t)src->si_faddr; 2579 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec; 2580 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec; 2581 dest->si_syscall = src->si_syscall; 2582 dest->si_nsysarg = src->si_nsysarg; 2583 dest->si_fault = src->si_fault; 2584 break; 2585 } 2586 } 2587 2588 void 2589 siginfo_32tok(const siginfo32_t *src, k_siginfo_t *dest) 2590 { 2591 bzero(dest, sizeof (*dest)); 2592 2593 /* 2594 * The absolute minimum content is si_signo and si_code. 2595 */ 2596 dest->si_signo = src->si_signo; 2597 if ((dest->si_code = src->si_code) == SI_NOINFO) 2598 return; 2599 2600 /* 2601 * A siginfo generated by user level is structured 2602 * differently from one generated by the kernel. 2603 */ 2604 if (SI_FROMUSER(src)) { 2605 dest->si_pid = src->si_pid; 2606 dest->si_ctid = src->si_ctid; 2607 dest->si_zoneid = src->si_zoneid; 2608 dest->si_uid = src->si_uid; 2609 if (SI_CANQUEUE(src->si_code)) 2610 dest->si_value.sival_int = 2611 (int)src->si_value.sival_int; 2612 return; 2613 } 2614 2615 dest->si_errno = src->si_errno; 2616 2617 switch (src->si_signo) { 2618 default: 2619 dest->si_pid = src->si_pid; 2620 dest->si_ctid = src->si_ctid; 2621 dest->si_zoneid = src->si_zoneid; 2622 dest->si_uid = src->si_uid; 2623 dest->si_value.sival_int = (int)src->si_value.sival_int; 2624 break; 2625 case SIGCLD: 2626 dest->si_pid = src->si_pid; 2627 dest->si_ctid = src->si_ctid; 2628 dest->si_zoneid = src->si_zoneid; 2629 dest->si_status = src->si_status; 2630 dest->si_stime = src->si_stime; 2631 dest->si_utime = src->si_utime; 2632 break; 2633 case SIGSEGV: 2634 case SIGBUS: 2635 case SIGILL: 2636 case SIGTRAP: 2637 case SIGFPE: 2638 case SIGEMT: 2639 dest->si_addr = (void *)(uintptr_t)src->si_addr; 2640 dest->si_trapno = src->si_trapno; 2641 dest->si_pc = (void *)(uintptr_t)src->si_pc; 2642 break; 2643 case SIGPOLL: 2644 case SIGXFSZ: 2645 dest->si_fd = src->si_fd; 2646 dest->si_band = src->si_band; 2647 break; 2648 case SIGPROF: 2649 dest->si_faddr = (void *)(uintptr_t)src->si_faddr; 2650 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec; 2651 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec; 2652 dest->si_syscall = src->si_syscall; 2653 dest->si_nsysarg = src->si_nsysarg; 2654 dest->si_fault = src->si_fault; 2655 break; 2656 } 2657 } 2658 2659 #endif /* _SYSCALL32_IMPL */ 2660