1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 28 /* All Rights Reserved */ 29 30 #include <sys/param.h> 31 #include <sys/types.h> 32 #include <sys/bitmap.h> 33 #include <sys/sysmacros.h> 34 #include <sys/systm.h> 35 #include <sys/cred.h> 36 #include <sys/user.h> 37 #include <sys/errno.h> 38 #include <sys/proc.h> 39 #include <sys/poll_impl.h> /* only needed for kludge in sigwaiting_send() */ 40 #include <sys/signal.h> 41 #include <sys/siginfo.h> 42 #include <sys/fault.h> 43 #include <sys/ucontext.h> 44 #include <sys/procfs.h> 45 #include <sys/wait.h> 46 #include <sys/class.h> 47 #include <sys/mman.h> 48 #include <sys/procset.h> 49 #include <sys/kmem.h> 50 #include <sys/cpuvar.h> 51 #include <sys/prsystm.h> 52 #include <sys/debug.h> 53 #include <vm/as.h> 54 #include <sys/bitmap.h> 55 #include <c2/audit.h> 56 #include <sys/core.h> 57 #include <sys/schedctl.h> 58 #include <sys/contract/process_impl.h> 59 #include <sys/cyclic.h> 60 #include <sys/dtrace.h> 61 #include <sys/sdt.h> 62 63 /* MUST be contiguous */ 64 k_sigset_t nullsmask = {0, 0}; 65 66 k_sigset_t fillset = {FILLSET0, FILLSET1}; 67 68 k_sigset_t cantmask = {CANTMASK0, CANTMASK1}; 69 70 k_sigset_t cantreset = {(sigmask(SIGILL)|sigmask(SIGTRAP)|sigmask(SIGPWR)), 0}; 71 72 k_sigset_t ignoredefault = {(sigmask(SIGCONT)|sigmask(SIGCLD)|sigmask(SIGPWR) 73 |sigmask(SIGWINCH)|sigmask(SIGURG)|sigmask(SIGWAITING)), 74 (sigmask(SIGLWP)|sigmask(SIGCANCEL)|sigmask(SIGFREEZE) 75 |sigmask(SIGTHAW)|sigmask(SIGXRES)|sigmask(SIGJVM1) 76 |sigmask(SIGJVM2))}; 77 78 k_sigset_t stopdefault = {(sigmask(SIGSTOP)|sigmask(SIGTSTP) 79 |sigmask(SIGTTOU)|sigmask(SIGTTIN)), 0}; 80 81 k_sigset_t coredefault = {(sigmask(SIGQUIT)|sigmask(SIGILL)|sigmask(SIGTRAP) 82 |sigmask(SIGIOT)|sigmask(SIGEMT)|sigmask(SIGFPE) 83 |sigmask(SIGBUS)|sigmask(SIGSEGV)|sigmask(SIGSYS) 84 |sigmask(SIGXCPU)|sigmask(SIGXFSZ)), 0}; 85 86 k_sigset_t holdvfork = {(sigmask(SIGTTOU)|sigmask(SIGTTIN)|sigmask(SIGTSTP)), 87 0}; 88 89 static int isjobstop(int); 90 static void post_sigcld(proc_t *, sigqueue_t *); 91 92 /* 93 * Internal variables for counting number of user thread stop requests posted. 94 * They may not be accurate at some special situation such as that a virtually 95 * stopped thread starts to run. 96 */ 97 static int num_utstop; 98 /* 99 * Internal variables for broadcasting an event when all thread stop requests 100 * are processed. 101 */ 102 static kcondvar_t utstop_cv; 103 104 static kmutex_t thread_stop_lock; 105 void del_one_utstop(void); 106 107 /* 108 * Send the specified signal to the specified process. 109 */ 110 void 111 psignal(proc_t *p, int sig) 112 { 113 mutex_enter(&p->p_lock); 114 sigtoproc(p, NULL, sig); 115 mutex_exit(&p->p_lock); 116 } 117 118 /* 119 * Send the specified signal to the specified thread. 120 */ 121 void 122 tsignal(kthread_t *t, int sig) 123 { 124 proc_t *p = ttoproc(t); 125 126 mutex_enter(&p->p_lock); 127 sigtoproc(p, t, sig); 128 mutex_exit(&p->p_lock); 129 } 130 131 int 132 signal_is_blocked(kthread_t *t, int sig) 133 { 134 return (sigismember(&t->t_hold, sig) || 135 (schedctl_sigblock(t) && !sigismember(&cantmask, sig))); 136 } 137 138 /* 139 * Return true if the signal can safely be discarded on generation. 140 * That is, if there is no need for the signal on the receiving end. 141 * The answer is true if the process is a zombie or 142 * if all of these conditions are true: 143 * the signal is being ignored 144 * the process is single-threaded 145 * the signal is not being traced by /proc 146 * the signal is not blocked by the process 147 * the signal is not being accepted via sigwait() 148 */ 149 static int 150 sig_discardable(proc_t *p, int sig) 151 { 152 kthread_t *t = p->p_tlist; 153 154 return (t == NULL || /* if zombie or ... */ 155 (sigismember(&p->p_ignore, sig) && /* signal is ignored */ 156 t->t_forw == t && /* and single-threaded */ 157 !tracing(p, sig) && /* and no /proc tracing */ 158 !signal_is_blocked(t, sig) && /* and signal not blocked */ 159 !sigismember(&t->t_sigwait, sig))); /* and not being accepted */ 160 } 161 162 /* 163 * Return true if this thread is going to eat this signal soon. 164 * Note that, if the signal is SIGKILL, we force stopped threads to be 165 * set running (to make SIGKILL be a sure kill), but only if the process 166 * is not currently locked by /proc (the P_PR_LOCK flag). Code in /proc 167 * relies on the fact that a process will not change shape while P_PR_LOCK 168 * is set (it drops and reacquires p->p_lock while leaving P_PR_LOCK set). 169 * We wish that we could simply call prbarrier() below, in sigtoproc(), to 170 * ensure that the process is not locked by /proc, but prbarrier() drops 171 * and reacquires p->p_lock and dropping p->p_lock here would be damaging. 172 */ 173 int 174 eat_signal(kthread_t *t, int sig) 175 { 176 int rval = 0; 177 ASSERT(THREAD_LOCK_HELD(t)); 178 179 /* 180 * Do not do anything if the target thread has the signal blocked. 181 */ 182 if (!signal_is_blocked(t, sig)) { 183 t->t_sig_check = 1; /* have thread do an issig */ 184 if (ISWAKEABLE(t) || ISWAITING(t)) { 185 setrun_locked(t); 186 rval = 1; 187 } else if (t->t_state == TS_STOPPED && sig == SIGKILL && 188 !(ttoproc(t)->p_proc_flag & P_PR_LOCK)) { 189 ttoproc(t)->p_stopsig = 0; 190 t->t_dtrace_stop = 0; 191 t->t_schedflag |= TS_XSTART | TS_PSTART; 192 setrun_locked(t); 193 } else if (t != curthread && t->t_state == TS_ONPROC) { 194 aston(t); /* make it do issig promptly */ 195 if (t->t_cpu != CPU) 196 poke_cpu(t->t_cpu->cpu_id); 197 rval = 1; 198 } else if (t->t_state == TS_RUN) { 199 rval = 1; 200 } 201 } 202 203 return (rval); 204 } 205 206 /* 207 * Post a signal. 208 * If a non-null thread pointer is passed, then post the signal 209 * to the thread/lwp, otherwise post the signal to the process. 210 */ 211 void 212 sigtoproc(proc_t *p, kthread_t *t, int sig) 213 { 214 kthread_t *tt; 215 int ext = !(curproc->p_flag & SSYS) && 216 (curproc->p_ct_process != p->p_ct_process); 217 218 ASSERT(MUTEX_HELD(&p->p_lock)); 219 220 /* System processes don't get signals */ 221 if (sig <= 0 || sig >= NSIG || (p->p_flag & SSYS)) 222 return; 223 224 /* 225 * Regardless of origin or directedness, 226 * SIGKILL kills all lwps in the process immediately 227 * and jobcontrol signals affect all lwps in the process. 228 */ 229 if (sig == SIGKILL) { 230 p->p_flag |= SKILLED | (ext ? SEXTKILLED : 0); 231 t = NULL; 232 } else if (sig == SIGCONT) { 233 /* 234 * The SSCONT flag will remain set until a stopping 235 * signal comes in (below). This is harmless. 236 */ 237 p->p_flag |= SSCONT; 238 sigdelq(p, NULL, SIGSTOP); 239 sigdelq(p, NULL, SIGTSTP); 240 sigdelq(p, NULL, SIGTTOU); 241 sigdelq(p, NULL, SIGTTIN); 242 sigdiffset(&p->p_sig, &stopdefault); 243 sigdiffset(&p->p_extsig, &stopdefault); 244 p->p_stopsig = 0; 245 if ((tt = p->p_tlist) != NULL) { 246 do { 247 sigdelq(p, tt, SIGSTOP); 248 sigdelq(p, tt, SIGTSTP); 249 sigdelq(p, tt, SIGTTOU); 250 sigdelq(p, tt, SIGTTIN); 251 sigdiffset(&tt->t_sig, &stopdefault); 252 sigdiffset(&tt->t_extsig, &stopdefault); 253 } while ((tt = tt->t_forw) != p->p_tlist); 254 } 255 if ((tt = p->p_tlist) != NULL) { 256 do { 257 thread_lock(tt); 258 if (tt->t_state == TS_STOPPED && 259 tt->t_whystop == PR_JOBCONTROL) { 260 tt->t_schedflag |= TS_XSTART; 261 setrun_locked(tt); 262 } 263 thread_unlock(tt); 264 } while ((tt = tt->t_forw) != p->p_tlist); 265 } 266 } else if (sigismember(&stopdefault, sig)) { 267 /* 268 * This test has a race condition which we can't fix: 269 * By the time the stopping signal is received by 270 * the target process/thread, the signal handler 271 * and/or the detached state might have changed. 272 */ 273 if (PTOU(p)->u_signal[sig-1] == SIG_DFL && 274 (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned)) 275 p->p_flag &= ~SSCONT; 276 sigdelq(p, NULL, SIGCONT); 277 sigdelset(&p->p_sig, SIGCONT); 278 sigdelset(&p->p_extsig, SIGCONT); 279 if ((tt = p->p_tlist) != NULL) { 280 do { 281 sigdelq(p, tt, SIGCONT); 282 sigdelset(&tt->t_sig, SIGCONT); 283 sigdelset(&tt->t_extsig, SIGCONT); 284 } while ((tt = tt->t_forw) != p->p_tlist); 285 } 286 } 287 288 if (sig_discardable(p, sig)) { 289 DTRACE_PROC3(signal__discard, kthread_t *, p->p_tlist, 290 proc_t *, p, int, sig); 291 return; 292 } 293 294 if (t != NULL) { 295 /* 296 * This is a directed signal, wake up the lwp. 297 */ 298 sigaddset(&t->t_sig, sig); 299 if (ext) 300 sigaddset(&t->t_extsig, sig); 301 thread_lock(t); 302 (void) eat_signal(t, sig); 303 thread_unlock(t); 304 DTRACE_PROC2(signal__send, kthread_t *, t, int, sig); 305 } else if ((tt = p->p_tlist) != NULL) { 306 /* 307 * Make sure that some lwp that already exists 308 * in the process fields the signal soon. 309 * Wake up an interruptibly sleeping lwp if necessary. 310 * For SIGKILL make all of the lwps see the signal; 311 * This is needed to guarantee a sure kill for processes 312 * with a mix of realtime and non-realtime threads. 313 */ 314 int su = 0; 315 316 sigaddset(&p->p_sig, sig); 317 if (ext) 318 sigaddset(&p->p_extsig, sig); 319 do { 320 thread_lock(tt); 321 if (eat_signal(tt, sig) && sig != SIGKILL) { 322 thread_unlock(tt); 323 break; 324 } 325 if (SUSPENDED(tt)) 326 su++; 327 thread_unlock(tt); 328 } while ((tt = tt->t_forw) != p->p_tlist); 329 /* 330 * If the process is deadlocked, make somebody run and die. 331 */ 332 if (sig == SIGKILL && p->p_stat != SIDL && 333 p->p_lwprcnt == 0 && p->p_lwpcnt == su && 334 !(p->p_proc_flag & P_PR_LOCK)) { 335 thread_lock(tt); 336 p->p_lwprcnt++; 337 tt->t_schedflag |= TS_CSTART; 338 setrun_locked(tt); 339 thread_unlock(tt); 340 } 341 342 DTRACE_PROC2(signal__send, kthread_t *, tt, int, sig); 343 } 344 } 345 346 static int 347 isjobstop(int sig) 348 { 349 proc_t *p = ttoproc(curthread); 350 351 ASSERT(MUTEX_HELD(&p->p_lock)); 352 353 if (PTOU(curproc)->u_signal[sig-1] == SIG_DFL && 354 sigismember(&stopdefault, sig)) { 355 /* 356 * If SIGCONT has been posted since we promoted this signal 357 * from pending to current, then don't do a jobcontrol stop. 358 */ 359 if (!(p->p_flag & SSCONT) && 360 (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned) && 361 curthread != p->p_agenttp) { 362 sigqueue_t *sqp; 363 364 stop(PR_JOBCONTROL, sig); 365 mutex_exit(&p->p_lock); 366 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 367 mutex_enter(&pidlock); 368 /* 369 * Only the first lwp to continue notifies the parent. 370 */ 371 if (p->p_pidflag & CLDCONT) 372 siginfofree(sqp); 373 else { 374 p->p_pidflag |= CLDCONT; 375 p->p_wcode = CLD_CONTINUED; 376 p->p_wdata = SIGCONT; 377 sigcld(p, sqp); 378 } 379 mutex_exit(&pidlock); 380 mutex_enter(&p->p_lock); 381 } 382 return (1); 383 } 384 return (0); 385 } 386 387 /* 388 * Returns true if the current process has a signal to process, and 389 * the signal is not held. The signal to process is put in p_cursig. 390 * This is asked at least once each time a process enters the system 391 * (though this can usually be done without actually calling issig by 392 * checking the pending signal masks). A signal does not do anything 393 * directly to a process; it sets a flag that asks the process to do 394 * something to itself. 395 * 396 * The "why" argument indicates the allowable side-effects of the call: 397 * 398 * FORREAL: Extract the next pending signal from p_sig into p_cursig; 399 * stop the process if a stop has been requested or if a traced signal 400 * is pending. 401 * 402 * JUSTLOOKING: Don't stop the process, just indicate whether or not 403 * a signal might be pending (FORREAL is needed to tell for sure). 404 * 405 * XXX: Changes to the logic in these routines should be propagated 406 * to lm_sigispending(). See bug 1201594. 407 */ 408 409 static int issig_forreal(void); 410 static int issig_justlooking(void); 411 412 int 413 issig(int why) 414 { 415 ASSERT(why == FORREAL || why == JUSTLOOKING); 416 417 return ((why == FORREAL)? issig_forreal() : issig_justlooking()); 418 } 419 420 421 static int 422 issig_justlooking(void) 423 { 424 kthread_t *t = curthread; 425 klwp_t *lwp = ttolwp(t); 426 proc_t *p = ttoproc(t); 427 k_sigset_t set; 428 429 /* 430 * This function answers the question: 431 * "Is there any reason to call issig_forreal()?" 432 * 433 * We have to answer the question w/o grabbing any locks 434 * because we are (most likely) being called after we 435 * put ourselves on the sleep queue. 436 */ 437 438 if (t->t_dtrace_stop | t->t_dtrace_sig) 439 return (1); 440 441 /* 442 * Another piece of complexity in this process. When single-stepping a 443 * process, we don't want an intervening signal or TP_PAUSE request to 444 * suspend the current thread. Otherwise, the controlling process will 445 * hang beacuse we will be stopped with TS_PSTART set in t_schedflag. 446 * We will trigger any remaining signals when we re-enter the kernel on 447 * the single step trap. 448 */ 449 if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP) 450 return (0); 451 452 if ((lwp->lwp_asleep && MUSTRETURN(p, t)) || 453 (p->p_flag & (SEXITLWPS|SKILLED)) || 454 (lwp->lwp_nostop == 0 && 455 (p->p_stopsig | (p->p_flag & (SHOLDFORK1|SHOLDWATCH)) | 456 (t->t_proc_flag & 457 (TP_PRSTOP|TP_HOLDLWP|TP_CHKPT|TP_PAUSE)))) || 458 lwp->lwp_cursig) 459 return (1); 460 461 if (p->p_flag & SVFWAIT) 462 return (0); 463 set = p->p_sig; 464 sigorset(&set, &t->t_sig); 465 if (schedctl_sigblock(t)) /* all blockable signals blocked */ 466 sigandset(&set, &cantmask); 467 else 468 sigdiffset(&set, &t->t_hold); 469 if (p->p_flag & SVFORK) 470 sigdiffset(&set, &holdvfork); 471 472 if (!sigisempty(&set)) { 473 int sig; 474 475 for (sig = 1; sig < NSIG; sig++) { 476 if (sigismember(&set, sig) && 477 (tracing(p, sig) || 478 sigismember(&t->t_sigwait, sig) || 479 !sigismember(&p->p_ignore, sig))) { 480 /* 481 * Don't promote a signal that will stop 482 * the process when lwp_nostop is set. 483 */ 484 if (!lwp->lwp_nostop || 485 PTOU(p)->u_signal[sig-1] != SIG_DFL || 486 !sigismember(&stopdefault, sig)) 487 return (1); 488 } 489 } 490 } 491 492 return (0); 493 } 494 495 static int 496 issig_forreal(void) 497 { 498 int sig = 0, ext = 0; 499 kthread_t *t = curthread; 500 klwp_t *lwp = ttolwp(t); 501 proc_t *p = ttoproc(t); 502 int toproc = 0; 503 int sigcld_found = 0; 504 int nostop_break = 0; 505 506 ASSERT(t->t_state == TS_ONPROC); 507 508 mutex_enter(&p->p_lock); 509 schedctl_finish_sigblock(t); 510 511 if (t->t_dtrace_stop | t->t_dtrace_sig) { 512 if (t->t_dtrace_stop) { 513 /* 514 * If DTrace's "stop" action has been invoked on us, 515 * set TP_PRSTOP. 516 */ 517 t->t_proc_flag |= TP_PRSTOP; 518 } 519 520 if (t->t_dtrace_sig != 0) { 521 k_siginfo_t info; 522 523 /* 524 * Post the signal generated as the result of 525 * DTrace's "raise" action as a normal signal before 526 * the full-fledged signal checking begins. 527 */ 528 bzero(&info, sizeof (info)); 529 info.si_signo = t->t_dtrace_sig; 530 info.si_code = SI_DTRACE; 531 532 sigaddq(p, NULL, &info, KM_NOSLEEP); 533 534 t->t_dtrace_sig = 0; 535 } 536 } 537 538 for (;;) { 539 if (p->p_flag & (SEXITLWPS|SKILLED)) { 540 lwp->lwp_cursig = sig = SIGKILL; 541 lwp->lwp_extsig = ext = (p->p_flag & SEXTKILLED) != 0; 542 t->t_sig_check = 1; 543 break; 544 } 545 546 /* 547 * Another piece of complexity in this process. When 548 * single-stepping a process, we don't want an intervening 549 * signal or TP_PAUSE request to suspend the current thread. 550 * Otherwise, the controlling process will hang beacuse we will 551 * be stopped with TS_PSTART set in t_schedflag. We will 552 * trigger any remaining signals when we re-enter the kernel on 553 * the single step trap. 554 */ 555 if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP) { 556 sig = 0; 557 break; 558 } 559 560 /* 561 * Hold the lwp here for watchpoint manipulation. 562 */ 563 if ((t->t_proc_flag & TP_PAUSE) && !lwp->lwp_nostop) { 564 stop(PR_SUSPENDED, SUSPEND_PAUSE); 565 continue; 566 } 567 568 if (lwp->lwp_asleep && MUSTRETURN(p, t)) { 569 if ((sig = lwp->lwp_cursig) != 0) { 570 /* 571 * Make sure we call ISSIG() in post_syscall() 572 * to re-validate this current signal. 573 */ 574 t->t_sig_check = 1; 575 } 576 break; 577 } 578 579 /* 580 * If the request is PR_CHECKPOINT, ignore the rest of signals 581 * or requests. Honor other stop requests or signals later. 582 * Go back to top of loop here to check if an exit or hold 583 * event has occurred while stopped. 584 */ 585 if ((t->t_proc_flag & TP_CHKPT) && !lwp->lwp_nostop) { 586 stop(PR_CHECKPOINT, 0); 587 continue; 588 } 589 590 /* 591 * Honor SHOLDFORK1, SHOLDWATCH, and TP_HOLDLWP before dealing 592 * with signals or /proc. Another lwp is executing fork1(), 593 * or is undergoing watchpoint activity (remapping a page), 594 * or is executing lwp_suspend() on this lwp. 595 * Again, go back to top of loop to check if an exit 596 * or hold event has occurred while stopped. 597 */ 598 if (((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) || 599 (t->t_proc_flag & TP_HOLDLWP)) && !lwp->lwp_nostop) { 600 stop(PR_SUSPENDED, SUSPEND_NORMAL); 601 continue; 602 } 603 604 /* 605 * Honor requested stop before dealing with the 606 * current signal; a debugger may change it. 607 * Do not want to go back to loop here since this is a special 608 * stop that means: make incremental progress before the next 609 * stop. The danger is that returning to top of loop would most 610 * likely drop the thread right back here to stop soon after it 611 * was continued, violating the incremental progress request. 612 */ 613 if ((t->t_proc_flag & TP_PRSTOP) && !lwp->lwp_nostop) 614 stop(PR_REQUESTED, 0); 615 616 /* 617 * If a debugger wants us to take a signal it will have 618 * left it in lwp->lwp_cursig. If lwp_cursig has been cleared 619 * or if it's being ignored, we continue on looking for another 620 * signal. Otherwise we return the specified signal, provided 621 * it's not a signal that causes a job control stop. 622 * 623 * When stopped on PR_JOBCONTROL, there is no current 624 * signal; we cancel lwp->lwp_cursig temporarily before 625 * calling isjobstop(). The current signal may be reset 626 * by a debugger while we are stopped in isjobstop(). 627 * 628 * If the current thread is accepting the signal 629 * (via sigwait(), sigwaitinfo(), or sigtimedwait()), 630 * we allow the signal to be accepted, even if it is 631 * being ignored, and without causing a job control stop. 632 */ 633 if ((sig = lwp->lwp_cursig) != 0) { 634 ext = lwp->lwp_extsig; 635 lwp->lwp_cursig = 0; 636 lwp->lwp_extsig = 0; 637 if (sigismember(&t->t_sigwait, sig) || 638 (!sigismember(&p->p_ignore, sig) && 639 !isjobstop(sig))) { 640 if (p->p_flag & (SEXITLWPS|SKILLED)) { 641 sig = SIGKILL; 642 ext = (p->p_flag & SEXTKILLED) != 0; 643 } 644 lwp->lwp_cursig = (uchar_t)sig; 645 lwp->lwp_extsig = (uchar_t)ext; 646 break; 647 } 648 /* 649 * The signal is being ignored or it caused a 650 * job-control stop. If another current signal 651 * has not been established, return the current 652 * siginfo, if any, to the memory manager. 653 */ 654 if (lwp->lwp_cursig == 0 && lwp->lwp_curinfo != NULL) { 655 siginfofree(lwp->lwp_curinfo); 656 lwp->lwp_curinfo = NULL; 657 } 658 /* 659 * Loop around again in case we were stopped 660 * on a job control signal and a /proc stop 661 * request was posted or another current signal 662 * was established while we were stopped. 663 */ 664 continue; 665 } 666 667 if (p->p_stopsig && !lwp->lwp_nostop && 668 curthread != p->p_agenttp) { 669 /* 670 * Some lwp in the process has already stopped 671 * showing PR_JOBCONTROL. This is a stop in 672 * sympathy with the other lwp, even if this 673 * lwp is blocking the stopping signal. 674 */ 675 stop(PR_JOBCONTROL, p->p_stopsig); 676 continue; 677 } 678 679 /* 680 * Loop on the pending signals until we find a 681 * non-held signal that is traced or not ignored. 682 * First check the signals pending for the lwp, 683 * then the signals pending for the process as a whole. 684 */ 685 for (;;) { 686 if ((sig = fsig(&t->t_sig, t)) != 0) { 687 toproc = 0; 688 if (tracing(p, sig) || 689 sigismember(&t->t_sigwait, sig) || 690 !sigismember(&p->p_ignore, sig)) { 691 if (sigismember(&t->t_extsig, sig)) 692 ext = 1; 693 break; 694 } 695 sigdelset(&t->t_sig, sig); 696 sigdelset(&t->t_extsig, sig); 697 sigdelq(p, t, sig); 698 } else if ((sig = fsig(&p->p_sig, t)) != 0) { 699 if (sig == SIGCLD) 700 sigcld_found = 1; 701 toproc = 1; 702 if (tracing(p, sig) || 703 sigismember(&t->t_sigwait, sig) || 704 !sigismember(&p->p_ignore, sig)) { 705 if (sigismember(&p->p_extsig, sig)) 706 ext = 1; 707 break; 708 } 709 sigdelset(&p->p_sig, sig); 710 sigdelset(&p->p_extsig, sig); 711 sigdelq(p, NULL, sig); 712 } else { 713 /* no signal was found */ 714 break; 715 } 716 } 717 718 if (sig == 0) { /* no signal was found */ 719 if (p->p_flag & (SEXITLWPS|SKILLED)) { 720 lwp->lwp_cursig = SIGKILL; 721 sig = SIGKILL; 722 ext = (p->p_flag & SEXTKILLED) != 0; 723 } 724 break; 725 } 726 727 /* 728 * If we have been informed not to stop (i.e., we are being 729 * called from within a network operation), then don't promote 730 * the signal at this time, just return the signal number. 731 * We will call issig() again later when it is safe. 732 * 733 * fsig() does not return a jobcontrol stopping signal 734 * with a default action of stopping the process if 735 * lwp_nostop is set, so we won't be causing a bogus 736 * EINTR by this action. (Such a signal is eaten by 737 * isjobstop() when we loop around to do final checks.) 738 */ 739 if (lwp->lwp_nostop) { 740 nostop_break = 1; 741 break; 742 } 743 744 /* 745 * Promote the signal from pending to current. 746 * 747 * Note that sigdeq() will set lwp->lwp_curinfo to NULL 748 * if no siginfo_t exists for this signal. 749 */ 750 lwp->lwp_cursig = (uchar_t)sig; 751 lwp->lwp_extsig = (uchar_t)ext; 752 t->t_sig_check = 1; /* so post_syscall will see signal */ 753 ASSERT(lwp->lwp_curinfo == NULL); 754 sigdeq(p, toproc ? NULL : t, sig, &lwp->lwp_curinfo); 755 756 if (tracing(p, sig)) 757 stop(PR_SIGNALLED, sig); 758 759 /* 760 * Loop around to check for requested stop before 761 * performing the usual current-signal actions. 762 */ 763 } 764 765 mutex_exit(&p->p_lock); 766 767 /* 768 * If SIGCLD was dequeued from the process's signal queue, 769 * search for other pending SIGCLD's from the list of children. 770 */ 771 if (sigcld_found) 772 sigcld_repost(); 773 774 if (sig != 0) 775 (void) undo_watch_step(NULL); 776 777 /* 778 * If we have been blocked since the p_lock was dropped off 779 * above, then this promoted signal might have been handled 780 * already when we were on the way back from sleep queue, so 781 * just ignore it. 782 * If we have been informed not to stop, just return the signal 783 * number. Also see comments above. 784 */ 785 if (!nostop_break) { 786 sig = lwp->lwp_cursig; 787 } 788 789 return (sig != 0); 790 } 791 792 /* 793 * Return true if the process is currently stopped showing PR_JOBCONTROL. 794 * This is true only if all of the process's lwp's are so stopped. 795 * If this is asked by one of the lwps in the process, exclude that lwp. 796 */ 797 int 798 jobstopped(proc_t *p) 799 { 800 kthread_t *t; 801 802 ASSERT(MUTEX_HELD(&p->p_lock)); 803 804 if ((t = p->p_tlist) == NULL) 805 return (0); 806 807 do { 808 thread_lock(t); 809 /* ignore current, zombie and suspended lwps in the test */ 810 if (!(t == curthread || t->t_state == TS_ZOMB || 811 SUSPENDED(t)) && 812 (t->t_state != TS_STOPPED || 813 t->t_whystop != PR_JOBCONTROL)) { 814 thread_unlock(t); 815 return (0); 816 } 817 thread_unlock(t); 818 } while ((t = t->t_forw) != p->p_tlist); 819 820 return (1); 821 } 822 823 /* 824 * Put ourself (curthread) into the stopped state and notify tracers. 825 */ 826 void 827 stop(int why, int what) 828 { 829 kthread_t *t = curthread; 830 proc_t *p = ttoproc(t); 831 klwp_t *lwp = ttolwp(t); 832 kthread_t *tx; 833 lwpent_t *lep; 834 int procstop; 835 int flags = TS_ALLSTART; 836 hrtime_t stoptime; 837 838 /* 839 * Can't stop a system process. 840 */ 841 if (p == NULL || lwp == NULL || (p->p_flag & SSYS) || p->p_as == &kas) 842 return; 843 844 ASSERT(MUTEX_HELD(&p->p_lock)); 845 846 if (why != PR_SUSPENDED && why != PR_CHECKPOINT) { 847 /* 848 * Don't stop an lwp with SIGKILL pending. 849 * Don't stop if the process or lwp is exiting. 850 */ 851 if (lwp->lwp_cursig == SIGKILL || 852 sigismember(&t->t_sig, SIGKILL) || 853 sigismember(&p->p_sig, SIGKILL) || 854 (t->t_proc_flag & TP_LWPEXIT) || 855 (p->p_flag & (SEXITLWPS|SKILLED))) { 856 p->p_stopsig = 0; 857 t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP); 858 return; 859 } 860 } 861 862 /* 863 * Make sure we don't deadlock on a recursive call to prstop(). 864 * prstop() sets the lwp_nostop flag. 865 */ 866 if (lwp->lwp_nostop) 867 return; 868 869 /* 870 * Make sure the lwp is in an orderly state for inspection 871 * by a debugger through /proc or for dumping via core(). 872 */ 873 schedctl_finish_sigblock(t); 874 t->t_proc_flag |= TP_STOPPING; /* must set before dropping p_lock */ 875 mutex_exit(&p->p_lock); 876 stoptime = gethrtime(); 877 prstop(why, what); 878 (void) undo_watch_step(NULL); 879 mutex_enter(&p->p_lock); 880 ASSERT(t->t_state == TS_ONPROC); 881 882 switch (why) { 883 case PR_CHECKPOINT: 884 /* 885 * The situation may have changed since we dropped 886 * and reacquired p->p_lock. Double-check now 887 * whether we should stop or not. 888 */ 889 if (!(t->t_proc_flag & TP_CHKPT)) { 890 t->t_proc_flag &= ~TP_STOPPING; 891 return; 892 } 893 t->t_proc_flag &= ~TP_CHKPT; 894 flags &= ~TS_RESUME; 895 break; 896 897 case PR_JOBCONTROL: 898 ASSERT(what == SIGSTOP || what == SIGTSTP || 899 what == SIGTTIN || what == SIGTTOU); 900 flags &= ~TS_XSTART; 901 break; 902 903 case PR_SUSPENDED: 904 ASSERT(what == SUSPEND_NORMAL || what == SUSPEND_PAUSE); 905 /* 906 * The situation may have changed since we dropped 907 * and reacquired p->p_lock. Double-check now 908 * whether we should stop or not. 909 */ 910 if (what == SUSPEND_PAUSE) { 911 if (!(t->t_proc_flag & TP_PAUSE)) { 912 t->t_proc_flag &= ~TP_STOPPING; 913 return; 914 } 915 flags &= ~TS_UNPAUSE; 916 } else { 917 if (!((t->t_proc_flag & TP_HOLDLWP) || 918 (p->p_flag & (SHOLDFORK|SHOLDFORK1|SHOLDWATCH)))) { 919 t->t_proc_flag &= ~TP_STOPPING; 920 return; 921 } 922 /* 923 * If SHOLDFORK is in effect and we are stopping 924 * while asleep (not at the top of the stack), 925 * we return now to allow the hold to take effect 926 * when we reach the top of the kernel stack. 927 */ 928 if (lwp->lwp_asleep && (p->p_flag & SHOLDFORK)) { 929 t->t_proc_flag &= ~TP_STOPPING; 930 return; 931 } 932 flags &= ~TS_CSTART; 933 } 934 break; 935 936 default: /* /proc stop */ 937 flags &= ~TS_PSTART; 938 /* 939 * Do synchronous stop unless the async-stop flag is set. 940 * If why is PR_REQUESTED and t->t_dtrace_stop flag is set, 941 * then no debugger is present and we also do synchronous stop. 942 */ 943 if ((why != PR_REQUESTED || t->t_dtrace_stop) && 944 !(p->p_proc_flag & P_PR_ASYNC)) { 945 int notify; 946 947 for (tx = t->t_forw; tx != t; tx = tx->t_forw) { 948 notify = 0; 949 thread_lock(tx); 950 if (ISTOPPED(tx) || 951 (tx->t_proc_flag & TP_PRSTOP)) { 952 thread_unlock(tx); 953 continue; 954 } 955 tx->t_proc_flag |= TP_PRSTOP; 956 tx->t_sig_check = 1; 957 if (tx->t_state == TS_SLEEP && 958 (tx->t_flag & T_WAKEABLE)) { 959 /* 960 * Don't actually wake it up if it's 961 * in one of the lwp_*() syscalls. 962 * Mark it virtually stopped and 963 * notify /proc waiters (below). 964 */ 965 if (tx->t_wchan0 == NULL) 966 setrun_locked(tx); 967 else { 968 tx->t_proc_flag |= TP_PRVSTOP; 969 tx->t_stoptime = stoptime; 970 notify = 1; 971 } 972 } 973 974 /* Move waiting thread to run queue */ 975 if (ISWAITING(tx)) 976 setrun_locked(tx); 977 978 /* 979 * force the thread into the kernel 980 * if it is not already there. 981 */ 982 if (tx->t_state == TS_ONPROC && 983 tx->t_cpu != CPU) 984 poke_cpu(tx->t_cpu->cpu_id); 985 thread_unlock(tx); 986 lep = p->p_lwpdir[tx->t_dslot].ld_entry; 987 if (notify && lep->le_trace) 988 prnotify(lep->le_trace); 989 } 990 /* 991 * We do this just in case one of the threads we asked 992 * to stop is in holdlwps() (called from cfork()) or 993 * lwp_suspend(). 994 */ 995 cv_broadcast(&p->p_holdlwps); 996 } 997 break; 998 } 999 1000 t->t_stoptime = stoptime; 1001 1002 if (why == PR_JOBCONTROL || (why == PR_SUSPENDED && p->p_stopsig)) { 1003 /* 1004 * Determine if the whole process is jobstopped. 1005 */ 1006 if (jobstopped(p)) { 1007 sigqueue_t *sqp; 1008 int sig; 1009 1010 if ((sig = p->p_stopsig) == 0) 1011 p->p_stopsig = (uchar_t)(sig = what); 1012 mutex_exit(&p->p_lock); 1013 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 1014 mutex_enter(&pidlock); 1015 /* 1016 * The last lwp to stop notifies the parent. 1017 * Turn off the CLDCONT flag now so the first 1018 * lwp to continue knows what to do. 1019 */ 1020 p->p_pidflag &= ~CLDCONT; 1021 p->p_wcode = CLD_STOPPED; 1022 p->p_wdata = sig; 1023 sigcld(p, sqp); 1024 /* 1025 * Grab p->p_lock before releasing pidlock so the 1026 * parent and the child don't have a race condition. 1027 */ 1028 mutex_enter(&p->p_lock); 1029 mutex_exit(&pidlock); 1030 p->p_stopsig = 0; 1031 } else if (why == PR_JOBCONTROL && p->p_stopsig == 0) { 1032 /* 1033 * Set p->p_stopsig and wake up sleeping lwps 1034 * so they will stop in sympathy with this lwp. 1035 */ 1036 p->p_stopsig = (uchar_t)what; 1037 pokelwps(p); 1038 /* 1039 * We do this just in case one of the threads we asked 1040 * to stop is in holdlwps() (called from cfork()) or 1041 * lwp_suspend(). 1042 */ 1043 cv_broadcast(&p->p_holdlwps); 1044 } 1045 } 1046 1047 if (why != PR_JOBCONTROL && why != PR_CHECKPOINT) { 1048 /* 1049 * Do process-level notification when all lwps are 1050 * either stopped on events of interest to /proc 1051 * or are stopped showing PR_SUSPENDED or are zombies. 1052 */ 1053 procstop = 1; 1054 for (tx = t->t_forw; procstop && tx != t; tx = tx->t_forw) { 1055 if (VSTOPPED(tx)) 1056 continue; 1057 thread_lock(tx); 1058 switch (tx->t_state) { 1059 case TS_ZOMB: 1060 break; 1061 case TS_STOPPED: 1062 /* neither ISTOPPED nor SUSPENDED? */ 1063 if ((tx->t_schedflag & 1064 (TS_CSTART | TS_UNPAUSE | TS_PSTART)) == 1065 (TS_CSTART | TS_UNPAUSE | TS_PSTART)) 1066 procstop = 0; 1067 break; 1068 case TS_SLEEP: 1069 /* not paused for watchpoints? */ 1070 if (!(tx->t_flag & T_WAKEABLE) || 1071 tx->t_wchan0 == NULL || 1072 !(tx->t_proc_flag & TP_PAUSE)) 1073 procstop = 0; 1074 break; 1075 default: 1076 procstop = 0; 1077 break; 1078 } 1079 thread_unlock(tx); 1080 } 1081 if (procstop) { 1082 /* there must not be any remapped watched pages now */ 1083 ASSERT(p->p_mapcnt == 0); 1084 if (p->p_proc_flag & P_PR_PTRACE) { 1085 /* ptrace() compatibility */ 1086 mutex_exit(&p->p_lock); 1087 mutex_enter(&pidlock); 1088 p->p_wcode = CLD_TRAPPED; 1089 p->p_wdata = (why == PR_SIGNALLED)? 1090 what : SIGTRAP; 1091 cv_broadcast(&p->p_parent->p_cv); 1092 /* 1093 * Grab p->p_lock before releasing pidlock so 1094 * parent and child don't have a race condition. 1095 */ 1096 mutex_enter(&p->p_lock); 1097 mutex_exit(&pidlock); 1098 } 1099 if (p->p_trace) /* /proc */ 1100 prnotify(p->p_trace); 1101 cv_broadcast(&pr_pid_cv[p->p_slot]); /* pauselwps() */ 1102 cv_broadcast(&p->p_holdlwps); /* holdwatch() */ 1103 } 1104 if (why != PR_SUSPENDED) { 1105 lep = p->p_lwpdir[t->t_dslot].ld_entry; 1106 if (lep->le_trace) /* /proc */ 1107 prnotify(lep->le_trace); 1108 /* 1109 * Special notification for creation of the agent lwp. 1110 */ 1111 if (t == p->p_agenttp && 1112 (t->t_proc_flag & TP_PRSTOP) && 1113 p->p_trace) 1114 prnotify(p->p_trace); 1115 /* 1116 * The situation may have changed since we dropped 1117 * and reacquired p->p_lock. Double-check now 1118 * whether we should stop or not. 1119 */ 1120 if (!(t->t_proc_flag & TP_STOPPING)) { 1121 if (t->t_proc_flag & TP_PRSTOP) 1122 t->t_proc_flag |= TP_STOPPING; 1123 } 1124 t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP); 1125 prnostep(lwp); 1126 } 1127 } 1128 1129 if (why == PR_SUSPENDED) { 1130 1131 /* 1132 * We always broadcast in the case of SUSPEND_PAUSE. This is 1133 * because checks for TP_PAUSE take precedence over checks for 1134 * SHOLDWATCH. If a thread is trying to stop because of 1135 * SUSPEND_PAUSE and tries to do a holdwatch(), it will be 1136 * waiting for the rest of the threads to enter a stopped state. 1137 * If we are stopping for a SUSPEND_PAUSE, we may be the last 1138 * lwp and not know it, so broadcast just in case. 1139 */ 1140 if (what == SUSPEND_PAUSE || 1141 --p->p_lwprcnt == 0 || (t->t_proc_flag & TP_HOLDLWP)) 1142 cv_broadcast(&p->p_holdlwps); 1143 1144 } 1145 1146 /* 1147 * Need to do this here (rather than after the thread is officially 1148 * stopped) because we can't call mutex_enter from a stopped thread. 1149 */ 1150 if (why == PR_CHECKPOINT) 1151 del_one_utstop(); 1152 1153 thread_lock(t); 1154 ASSERT((t->t_schedflag & TS_ALLSTART) == 0); 1155 t->t_schedflag |= flags; 1156 t->t_whystop = (short)why; 1157 t->t_whatstop = (short)what; 1158 CL_STOP(t, why, what); 1159 (void) new_mstate(t, LMS_STOPPED); 1160 thread_stop(t); /* set stop state and drop lock */ 1161 1162 if (why != PR_SUSPENDED && why != PR_CHECKPOINT) { 1163 /* 1164 * We may have gotten a SIGKILL or a SIGCONT when 1165 * we released p->p_lock; make one last check. 1166 * Also check for a /proc run-on-last-close. 1167 */ 1168 if (sigismember(&t->t_sig, SIGKILL) || 1169 sigismember(&p->p_sig, SIGKILL) || 1170 (t->t_proc_flag & TP_LWPEXIT) || 1171 (p->p_flag & (SEXITLWPS|SKILLED))) { 1172 p->p_stopsig = 0; 1173 thread_lock(t); 1174 t->t_schedflag |= TS_XSTART | TS_PSTART; 1175 setrun_locked(t); 1176 thread_unlock_nopreempt(t); 1177 } else if (why == PR_JOBCONTROL) { 1178 if (p->p_flag & SSCONT) { 1179 /* 1180 * This resulted from a SIGCONT posted 1181 * while we were not holding p->p_lock. 1182 */ 1183 p->p_stopsig = 0; 1184 thread_lock(t); 1185 t->t_schedflag |= TS_XSTART; 1186 setrun_locked(t); 1187 thread_unlock_nopreempt(t); 1188 } 1189 } else if (!(t->t_proc_flag & TP_STOPPING)) { 1190 /* 1191 * This resulted from a /proc run-on-last-close. 1192 */ 1193 thread_lock(t); 1194 t->t_schedflag |= TS_PSTART; 1195 setrun_locked(t); 1196 thread_unlock_nopreempt(t); 1197 } 1198 } 1199 1200 t->t_proc_flag &= ~TP_STOPPING; 1201 mutex_exit(&p->p_lock); 1202 1203 swtch(); 1204 setallwatch(); /* reestablish any watchpoints set while stopped */ 1205 mutex_enter(&p->p_lock); 1206 prbarrier(p); /* barrier against /proc locking */ 1207 } 1208 1209 /* Interface for resetting user thread stop count. */ 1210 void 1211 utstop_init(void) 1212 { 1213 mutex_enter(&thread_stop_lock); 1214 num_utstop = 0; 1215 mutex_exit(&thread_stop_lock); 1216 } 1217 1218 /* Interface for registering a user thread stop request. */ 1219 void 1220 add_one_utstop(void) 1221 { 1222 mutex_enter(&thread_stop_lock); 1223 num_utstop++; 1224 mutex_exit(&thread_stop_lock); 1225 } 1226 1227 /* Interface for cancelling a user thread stop request */ 1228 void 1229 del_one_utstop(void) 1230 { 1231 mutex_enter(&thread_stop_lock); 1232 num_utstop--; 1233 if (num_utstop == 0) 1234 cv_broadcast(&utstop_cv); 1235 mutex_exit(&thread_stop_lock); 1236 } 1237 1238 /* Interface to wait for all user threads to be stopped */ 1239 void 1240 utstop_timedwait(clock_t ticks) 1241 { 1242 mutex_enter(&thread_stop_lock); 1243 if (num_utstop > 0) 1244 (void) cv_reltimedwait(&utstop_cv, &thread_stop_lock, ticks, 1245 TR_CLOCK_TICK); 1246 mutex_exit(&thread_stop_lock); 1247 } 1248 1249 /* 1250 * Perform the action specified by the current signal. 1251 * The usual sequence is: 1252 * if (issig()) 1253 * psig(); 1254 * The signal bit has already been cleared by issig(), 1255 * the current signal number has been stored in lwp_cursig, 1256 * and the current siginfo is now referenced by lwp_curinfo. 1257 */ 1258 void 1259 psig(void) 1260 { 1261 kthread_t *t = curthread; 1262 proc_t *p = ttoproc(t); 1263 klwp_t *lwp = ttolwp(t); 1264 void (*func)(); 1265 int sig, rc, code, ext; 1266 pid_t pid = -1; 1267 id_t ctid = 0; 1268 zoneid_t zoneid = -1; 1269 sigqueue_t *sqp = NULL; 1270 uint32_t auditing = AU_AUDITING(); 1271 1272 mutex_enter(&p->p_lock); 1273 schedctl_finish_sigblock(t); 1274 code = CLD_KILLED; 1275 1276 if (p->p_flag & SEXITLWPS) { 1277 lwp_exit(); 1278 return; /* not reached */ 1279 } 1280 sig = lwp->lwp_cursig; 1281 ext = lwp->lwp_extsig; 1282 1283 ASSERT(sig < NSIG); 1284 1285 /* 1286 * Re-check lwp_cursig after we acquire p_lock. Since p_lock was 1287 * dropped between issig() and psig(), a debugger may have cleared 1288 * lwp_cursig via /proc in the intervening window. 1289 */ 1290 if (sig == 0) { 1291 if (lwp->lwp_curinfo) { 1292 siginfofree(lwp->lwp_curinfo); 1293 lwp->lwp_curinfo = NULL; 1294 } 1295 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */ 1296 t->t_flag &= ~T_TOMASK; 1297 t->t_hold = lwp->lwp_sigoldmask; 1298 } 1299 mutex_exit(&p->p_lock); 1300 return; 1301 } 1302 func = PTOU(curproc)->u_signal[sig-1]; 1303 1304 /* 1305 * The signal disposition could have changed since we promoted 1306 * this signal from pending to current (we dropped p->p_lock). 1307 * This can happen only in a multi-threaded process. 1308 */ 1309 if (sigismember(&p->p_ignore, sig) || 1310 (func == SIG_DFL && sigismember(&stopdefault, sig))) { 1311 lwp->lwp_cursig = 0; 1312 lwp->lwp_extsig = 0; 1313 if (lwp->lwp_curinfo) { 1314 siginfofree(lwp->lwp_curinfo); 1315 lwp->lwp_curinfo = NULL; 1316 } 1317 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */ 1318 t->t_flag &= ~T_TOMASK; 1319 t->t_hold = lwp->lwp_sigoldmask; 1320 } 1321 mutex_exit(&p->p_lock); 1322 return; 1323 } 1324 1325 /* 1326 * We check lwp_curinfo first since pr_setsig can actually 1327 * stuff a sigqueue_t there for SIGKILL. 1328 */ 1329 if (lwp->lwp_curinfo) { 1330 sqp = lwp->lwp_curinfo; 1331 } else if (sig == SIGKILL && p->p_killsqp) { 1332 sqp = p->p_killsqp; 1333 } 1334 1335 if (sqp != NULL) { 1336 if (SI_FROMUSER(&sqp->sq_info)) { 1337 pid = sqp->sq_info.si_pid; 1338 ctid = sqp->sq_info.si_ctid; 1339 zoneid = sqp->sq_info.si_zoneid; 1340 } 1341 /* 1342 * If we have a sigqueue_t, its sq_external value 1343 * trumps the lwp_extsig value. It is theoretically 1344 * possible to make lwp_extsig reflect reality, but it 1345 * would unnecessarily complicate things elsewhere. 1346 */ 1347 ext = sqp->sq_external; 1348 } 1349 1350 if (func == SIG_DFL) { 1351 mutex_exit(&p->p_lock); 1352 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *, 1353 NULL, void (*)(void), func); 1354 } else { 1355 k_siginfo_t *sip = NULL; 1356 1357 /* 1358 * If DTrace user-land tracing is active, give DTrace a 1359 * chance to defer the signal until after tracing is 1360 * complete. 1361 */ 1362 if (t->t_dtrace_on && dtrace_safe_defer_signal()) { 1363 mutex_exit(&p->p_lock); 1364 return; 1365 } 1366 1367 /* 1368 * save siginfo pointer here, in case the 1369 * the signal's reset bit is on 1370 * 1371 * The presence of a current signal prevents paging 1372 * from succeeding over a network. We copy the current 1373 * signal information to the side and cancel the current 1374 * signal so that sendsig() will succeed. 1375 */ 1376 if (sigismember(&p->p_siginfo, sig)) { 1377 sip = &lwp->lwp_siginfo; 1378 if (sqp) { 1379 bcopy(&sqp->sq_info, sip, sizeof (*sip)); 1380 /* 1381 * If we were interrupted out of a system call 1382 * due to pthread_cancel(), inform libc. 1383 */ 1384 if (sig == SIGCANCEL && 1385 sip->si_code == SI_LWP && 1386 t->t_sysnum != 0) 1387 schedctl_cancel_eintr(); 1388 } else if (sig == SIGPROF && sip->si_signo == SIGPROF && 1389 t->t_rprof != NULL && t->t_rprof->rp_anystate) { 1390 /* EMPTY */; 1391 } else { 1392 bzero(sip, sizeof (*sip)); 1393 sip->si_signo = sig; 1394 sip->si_code = SI_NOINFO; 1395 } 1396 } 1397 1398 if (t->t_flag & T_TOMASK) 1399 t->t_flag &= ~T_TOMASK; 1400 else 1401 lwp->lwp_sigoldmask = t->t_hold; 1402 sigorset(&t->t_hold, &PTOU(curproc)->u_sigmask[sig-1]); 1403 if (!sigismember(&PTOU(curproc)->u_signodefer, sig)) 1404 sigaddset(&t->t_hold, sig); 1405 if (sigismember(&PTOU(curproc)->u_sigresethand, sig)) 1406 setsigact(sig, SIG_DFL, nullsmask, 0); 1407 1408 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *, 1409 sip, void (*)(void), func); 1410 1411 lwp->lwp_cursig = 0; 1412 lwp->lwp_extsig = 0; 1413 if (lwp->lwp_curinfo) { 1414 /* p->p_killsqp is freed by freeproc */ 1415 siginfofree(lwp->lwp_curinfo); 1416 lwp->lwp_curinfo = NULL; 1417 } 1418 mutex_exit(&p->p_lock); 1419 lwp->lwp_ru.nsignals++; 1420 1421 if (p->p_model == DATAMODEL_NATIVE) 1422 rc = sendsig(sig, sip, func); 1423 #ifdef _SYSCALL32_IMPL 1424 else 1425 rc = sendsig32(sig, sip, func); 1426 #endif /* _SYSCALL32_IMPL */ 1427 if (rc) 1428 return; 1429 sig = lwp->lwp_cursig = SIGSEGV; 1430 ext = 0; /* lwp_extsig was set above */ 1431 pid = -1; 1432 ctid = 0; 1433 } 1434 1435 if (sigismember(&coredefault, sig)) { 1436 /* 1437 * Terminate all LWPs but don't discard them. 1438 * If another lwp beat us to the punch by calling exit(), 1439 * evaporate now. 1440 */ 1441 proc_is_exiting(p); 1442 if (exitlwps(1) != 0) { 1443 mutex_enter(&p->p_lock); 1444 lwp_exit(); 1445 } 1446 /* if we got a SIGKILL from anywhere, no core dump */ 1447 if (p->p_flag & SKILLED) { 1448 sig = SIGKILL; 1449 ext = (p->p_flag & SEXTKILLED) != 0; 1450 } else { 1451 if (auditing) /* audit core dump */ 1452 audit_core_start(sig); 1453 if (core(sig, ext) == 0) 1454 code = CLD_DUMPED; 1455 if (auditing) /* audit core dump */ 1456 audit_core_finish(code); 1457 } 1458 } 1459 1460 /* 1461 * Generate a contract event once if the process is killed 1462 * by a signal. 1463 */ 1464 if (ext) { 1465 proc_is_exiting(p); 1466 if (exitlwps(0) != 0) { 1467 mutex_enter(&p->p_lock); 1468 lwp_exit(); 1469 } 1470 contract_process_sig(p->p_ct_process, p, sig, pid, ctid, 1471 zoneid); 1472 } 1473 1474 exit(code, sig); 1475 } 1476 1477 /* 1478 * Find next unheld signal in ssp for thread t. 1479 */ 1480 int 1481 fsig(k_sigset_t *ssp, kthread_t *t) 1482 { 1483 proc_t *p = ttoproc(t); 1484 user_t *up = PTOU(p); 1485 int i; 1486 k_sigset_t temp; 1487 1488 ASSERT(MUTEX_HELD(&p->p_lock)); 1489 1490 /* 1491 * Don't promote any signals for the parent of a vfork()d 1492 * child that hasn't yet released the parent's memory. 1493 */ 1494 if (p->p_flag & SVFWAIT) 1495 return (0); 1496 1497 temp = *ssp; 1498 sigdiffset(&temp, &t->t_hold); 1499 1500 /* 1501 * Don't promote stopping signals (except SIGSTOP) for a child 1502 * of vfork() that hasn't yet released the parent's memory. 1503 */ 1504 if (p->p_flag & SVFORK) 1505 sigdiffset(&temp, &holdvfork); 1506 1507 /* 1508 * Don't promote a signal that will stop 1509 * the process when lwp_nostop is set. 1510 */ 1511 if (ttolwp(t)->lwp_nostop) { 1512 sigdelset(&temp, SIGSTOP); 1513 if (!p->p_pgidp->pid_pgorphaned) { 1514 if (up->u_signal[SIGTSTP-1] == SIG_DFL) 1515 sigdelset(&temp, SIGTSTP); 1516 if (up->u_signal[SIGTTIN-1] == SIG_DFL) 1517 sigdelset(&temp, SIGTTIN); 1518 if (up->u_signal[SIGTTOU-1] == SIG_DFL) 1519 sigdelset(&temp, SIGTTOU); 1520 } 1521 } 1522 1523 /* 1524 * Choose SIGKILL and SIGPROF before all other pending signals. 1525 * The rest are promoted in signal number order. 1526 */ 1527 if (sigismember(&temp, SIGKILL)) 1528 return (SIGKILL); 1529 if (sigismember(&temp, SIGPROF)) 1530 return (SIGPROF); 1531 1532 for (i = 0; i < sizeof (temp) / sizeof (temp.__sigbits[0]); i++) { 1533 if (temp.__sigbits[i]) 1534 return ((i * NBBY * sizeof (temp.__sigbits[0])) + 1535 lowbit(temp.__sigbits[i])); 1536 } 1537 1538 return (0); 1539 } 1540 1541 void 1542 setsigact(int sig, void (*disp)(), k_sigset_t mask, int flags) 1543 { 1544 proc_t *p = ttoproc(curthread); 1545 kthread_t *t; 1546 1547 ASSERT(MUTEX_HELD(&p->p_lock)); 1548 1549 PTOU(curproc)->u_signal[sig - 1] = disp; 1550 1551 /* 1552 * Honor the SA_SIGINFO flag if the signal is being caught. 1553 * Force the SA_SIGINFO flag if the signal is not being caught. 1554 * This is necessary to make sigqueue() and sigwaitinfo() work 1555 * properly together when the signal is set to default or is 1556 * being temporarily ignored. 1557 */ 1558 if ((flags & SA_SIGINFO) || disp == SIG_DFL || disp == SIG_IGN) 1559 sigaddset(&p->p_siginfo, sig); 1560 else 1561 sigdelset(&p->p_siginfo, sig); 1562 1563 if (disp != SIG_DFL && disp != SIG_IGN) { 1564 sigdelset(&p->p_ignore, sig); 1565 PTOU(curproc)->u_sigmask[sig - 1] = mask; 1566 if (!sigismember(&cantreset, sig)) { 1567 if (flags & SA_RESETHAND) 1568 sigaddset(&PTOU(curproc)->u_sigresethand, sig); 1569 else 1570 sigdelset(&PTOU(curproc)->u_sigresethand, sig); 1571 } 1572 if (flags & SA_NODEFER) 1573 sigaddset(&PTOU(curproc)->u_signodefer, sig); 1574 else 1575 sigdelset(&PTOU(curproc)->u_signodefer, sig); 1576 if (flags & SA_RESTART) 1577 sigaddset(&PTOU(curproc)->u_sigrestart, sig); 1578 else 1579 sigdelset(&PTOU(curproc)->u_sigrestart, sig); 1580 if (flags & SA_ONSTACK) 1581 sigaddset(&PTOU(curproc)->u_sigonstack, sig); 1582 else 1583 sigdelset(&PTOU(curproc)->u_sigonstack, sig); 1584 } else if (disp == SIG_IGN || 1585 (disp == SIG_DFL && sigismember(&ignoredefault, sig))) { 1586 /* 1587 * Setting the signal action to SIG_IGN results in the 1588 * discarding of all pending signals of that signal number. 1589 * Setting the signal action to SIG_DFL does the same *only* 1590 * if the signal's default behavior is to be ignored. 1591 */ 1592 sigaddset(&p->p_ignore, sig); 1593 sigdelset(&p->p_sig, sig); 1594 sigdelset(&p->p_extsig, sig); 1595 sigdelq(p, NULL, sig); 1596 t = p->p_tlist; 1597 do { 1598 sigdelset(&t->t_sig, sig); 1599 sigdelset(&t->t_extsig, sig); 1600 sigdelq(p, t, sig); 1601 } while ((t = t->t_forw) != p->p_tlist); 1602 } else { 1603 /* 1604 * The signal action is being set to SIG_DFL and the default 1605 * behavior is to do something: make sure it is not ignored. 1606 */ 1607 sigdelset(&p->p_ignore, sig); 1608 } 1609 1610 if (sig == SIGCLD) { 1611 if (flags & SA_NOCLDWAIT) 1612 p->p_flag |= SNOWAIT; 1613 else 1614 p->p_flag &= ~SNOWAIT; 1615 1616 if (flags & SA_NOCLDSTOP) 1617 p->p_flag &= ~SJCTL; 1618 else 1619 p->p_flag |= SJCTL; 1620 1621 if ((p->p_flag & SNOWAIT) || disp == SIG_IGN) { 1622 proc_t *cp, *tp; 1623 1624 mutex_exit(&p->p_lock); 1625 mutex_enter(&pidlock); 1626 for (cp = p->p_child; cp != NULL; cp = tp) { 1627 tp = cp->p_sibling; 1628 if (cp->p_stat == SZOMB && 1629 !(cp->p_pidflag & CLDWAITPID)) 1630 freeproc(cp); 1631 } 1632 mutex_exit(&pidlock); 1633 mutex_enter(&p->p_lock); 1634 } 1635 } 1636 } 1637 1638 /* 1639 * Set all signal actions not already set to SIG_DFL or SIG_IGN to SIG_DFL. 1640 * Called from exec_common() for a process undergoing execve() 1641 * and from cfork() for a newly-created child of vfork(). 1642 * In the vfork() case, 'p' is not the current process. 1643 * In both cases, there is only one thread in the process. 1644 */ 1645 void 1646 sigdefault(proc_t *p) 1647 { 1648 kthread_t *t = p->p_tlist; 1649 struct user *up = PTOU(p); 1650 int sig; 1651 1652 ASSERT(MUTEX_HELD(&p->p_lock)); 1653 1654 for (sig = 1; sig < NSIG; sig++) { 1655 if (up->u_signal[sig - 1] != SIG_DFL && 1656 up->u_signal[sig - 1] != SIG_IGN) { 1657 up->u_signal[sig - 1] = SIG_DFL; 1658 sigemptyset(&up->u_sigmask[sig - 1]); 1659 if (sigismember(&ignoredefault, sig)) { 1660 sigdelq(p, NULL, sig); 1661 sigdelq(p, t, sig); 1662 } 1663 if (sig == SIGCLD) 1664 p->p_flag &= ~(SNOWAIT|SJCTL); 1665 } 1666 } 1667 sigorset(&p->p_ignore, &ignoredefault); 1668 sigfillset(&p->p_siginfo); 1669 sigdiffset(&p->p_siginfo, &cantmask); 1670 sigdiffset(&p->p_sig, &ignoredefault); 1671 sigdiffset(&p->p_extsig, &ignoredefault); 1672 sigdiffset(&t->t_sig, &ignoredefault); 1673 sigdiffset(&t->t_extsig, &ignoredefault); 1674 } 1675 1676 void 1677 sigcld(proc_t *cp, sigqueue_t *sqp) 1678 { 1679 proc_t *pp = cp->p_parent; 1680 1681 ASSERT(MUTEX_HELD(&pidlock)); 1682 1683 switch (cp->p_wcode) { 1684 case CLD_EXITED: 1685 case CLD_DUMPED: 1686 case CLD_KILLED: 1687 ASSERT(cp->p_stat == SZOMB); 1688 /* 1689 * The broadcast on p_srwchan_cv is a kludge to 1690 * wakeup a possible thread in uadmin(A_SHUTDOWN). 1691 */ 1692 cv_broadcast(&cp->p_srwchan_cv); 1693 1694 /* 1695 * Add to newstate list of the parent 1696 */ 1697 add_ns(pp, cp); 1698 1699 cv_broadcast(&pp->p_cv); 1700 if ((pp->p_flag & SNOWAIT) || 1701 PTOU(pp)->u_signal[SIGCLD - 1] == SIG_IGN) { 1702 if (!(cp->p_pidflag & CLDWAITPID)) 1703 freeproc(cp); 1704 } else if (!(cp->p_pidflag & CLDNOSIGCHLD)) { 1705 post_sigcld(cp, sqp); 1706 sqp = NULL; 1707 } 1708 break; 1709 1710 case CLD_STOPPED: 1711 case CLD_CONTINUED: 1712 cv_broadcast(&pp->p_cv); 1713 if (pp->p_flag & SJCTL) { 1714 post_sigcld(cp, sqp); 1715 sqp = NULL; 1716 } 1717 break; 1718 } 1719 1720 if (sqp) 1721 siginfofree(sqp); 1722 } 1723 1724 /* 1725 * Common code called from sigcld() and from 1726 * waitid() and issig_forreal() via sigcld_repost(). 1727 * Give the parent process a SIGCLD if it does not have one pending, 1728 * else mark the child process so a SIGCLD can be posted later. 1729 */ 1730 static void 1731 post_sigcld(proc_t *cp, sigqueue_t *sqp) 1732 { 1733 proc_t *pp = cp->p_parent; 1734 k_siginfo_t info; 1735 1736 ASSERT(MUTEX_HELD(&pidlock)); 1737 mutex_enter(&pp->p_lock); 1738 1739 /* 1740 * If a SIGCLD is pending, then just mark the child process 1741 * so that its SIGCLD will be posted later, when the first 1742 * SIGCLD is taken off the queue or when the parent is ready 1743 * to receive it or accept it, if ever. 1744 */ 1745 if (sigismember(&pp->p_sig, SIGCLD)) { 1746 cp->p_pidflag |= CLDPEND; 1747 } else { 1748 cp->p_pidflag &= ~CLDPEND; 1749 if (sqp == NULL) { 1750 /* 1751 * This can only happen when the parent is init. 1752 * (See call to sigcld(q, NULL) in exit().) 1753 * Use KM_NOSLEEP to avoid deadlock. 1754 */ 1755 ASSERT(pp == proc_init); 1756 winfo(cp, &info, 0); 1757 sigaddq(pp, NULL, &info, KM_NOSLEEP); 1758 } else { 1759 winfo(cp, &sqp->sq_info, 0); 1760 sigaddqa(pp, NULL, sqp); 1761 sqp = NULL; 1762 } 1763 } 1764 1765 mutex_exit(&pp->p_lock); 1766 1767 if (sqp) 1768 siginfofree(sqp); 1769 } 1770 1771 /* 1772 * Search for a child that has a pending SIGCLD for us, the parent. 1773 * The queue of SIGCLD signals is implied by the list of children. 1774 * We post the SIGCLD signals one at a time so they don't get lost. 1775 * When one is dequeued, another is enqueued, until there are no more. 1776 */ 1777 void 1778 sigcld_repost() 1779 { 1780 proc_t *pp = curproc; 1781 proc_t *cp; 1782 sigqueue_t *sqp; 1783 1784 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 1785 mutex_enter(&pidlock); 1786 for (cp = pp->p_child; cp; cp = cp->p_sibling) { 1787 if (cp->p_pidflag & CLDPEND) { 1788 post_sigcld(cp, sqp); 1789 mutex_exit(&pidlock); 1790 return; 1791 } 1792 } 1793 mutex_exit(&pidlock); 1794 kmem_free(sqp, sizeof (sigqueue_t)); 1795 } 1796 1797 /* 1798 * count number of sigqueue send by sigaddqa() 1799 */ 1800 void 1801 sigqsend(int cmd, proc_t *p, kthread_t *t, sigqueue_t *sigqp) 1802 { 1803 sigqhdr_t *sqh; 1804 1805 sqh = (sigqhdr_t *)sigqp->sq_backptr; 1806 ASSERT(sqh); 1807 1808 mutex_enter(&sqh->sqb_lock); 1809 sqh->sqb_sent++; 1810 mutex_exit(&sqh->sqb_lock); 1811 1812 if (cmd == SN_SEND) 1813 sigaddqa(p, t, sigqp); 1814 else 1815 siginfofree(sigqp); 1816 } 1817 1818 int 1819 sigsendproc(proc_t *p, sigsend_t *pv) 1820 { 1821 struct cred *cr; 1822 proc_t *myprocp = curproc; 1823 1824 ASSERT(MUTEX_HELD(&pidlock)); 1825 1826 if (p->p_pid == 1 && pv->sig && sigismember(&cantmask, pv->sig)) 1827 return (EPERM); 1828 1829 cr = CRED(); 1830 1831 if (pv->checkperm == 0 || 1832 (pv->sig == SIGCONT && p->p_sessp == myprocp->p_sessp) || 1833 prochasprocperm(p, myprocp, cr)) { 1834 pv->perm++; 1835 if (pv->sig) { 1836 /* Make sure we should be setting si_pid and friends */ 1837 ASSERT(pv->sicode <= 0); 1838 if (SI_CANQUEUE(pv->sicode)) { 1839 sigqueue_t *sqp; 1840 1841 mutex_enter(&myprocp->p_lock); 1842 sqp = sigqalloc(myprocp->p_sigqhdr); 1843 mutex_exit(&myprocp->p_lock); 1844 if (sqp == NULL) 1845 return (EAGAIN); 1846 sqp->sq_info.si_signo = pv->sig; 1847 sqp->sq_info.si_code = pv->sicode; 1848 sqp->sq_info.si_pid = myprocp->p_pid; 1849 sqp->sq_info.si_ctid = PRCTID(myprocp); 1850 sqp->sq_info.si_zoneid = getzoneid(); 1851 sqp->sq_info.si_uid = crgetruid(cr); 1852 sqp->sq_info.si_value = pv->value; 1853 mutex_enter(&p->p_lock); 1854 sigqsend(SN_SEND, p, NULL, sqp); 1855 mutex_exit(&p->p_lock); 1856 } else { 1857 k_siginfo_t info; 1858 bzero(&info, sizeof (info)); 1859 info.si_signo = pv->sig; 1860 info.si_code = pv->sicode; 1861 info.si_pid = myprocp->p_pid; 1862 info.si_ctid = PRCTID(myprocp); 1863 info.si_zoneid = getzoneid(); 1864 info.si_uid = crgetruid(cr); 1865 mutex_enter(&p->p_lock); 1866 /* 1867 * XXX: Should be KM_SLEEP but 1868 * we have to avoid deadlock. 1869 */ 1870 sigaddq(p, NULL, &info, KM_NOSLEEP); 1871 mutex_exit(&p->p_lock); 1872 } 1873 } 1874 } 1875 1876 return (0); 1877 } 1878 1879 int 1880 sigsendset(procset_t *psp, sigsend_t *pv) 1881 { 1882 int error; 1883 1884 error = dotoprocs(psp, sigsendproc, (char *)pv); 1885 if (error == 0 && pv->perm == 0) 1886 return (EPERM); 1887 1888 return (error); 1889 } 1890 1891 /* 1892 * Dequeue a queued siginfo structure. 1893 * If a non-null thread pointer is passed then dequeue from 1894 * the thread queue, otherwise dequeue from the process queue. 1895 */ 1896 void 1897 sigdeq(proc_t *p, kthread_t *t, int sig, sigqueue_t **qpp) 1898 { 1899 sigqueue_t **psqp, *sqp; 1900 1901 ASSERT(MUTEX_HELD(&p->p_lock)); 1902 1903 *qpp = NULL; 1904 1905 if (t != NULL) { 1906 sigdelset(&t->t_sig, sig); 1907 sigdelset(&t->t_extsig, sig); 1908 psqp = &t->t_sigqueue; 1909 } else { 1910 sigdelset(&p->p_sig, sig); 1911 sigdelset(&p->p_extsig, sig); 1912 psqp = &p->p_sigqueue; 1913 } 1914 1915 for (;;) { 1916 if ((sqp = *psqp) == NULL) 1917 return; 1918 if (sqp->sq_info.si_signo == sig) 1919 break; 1920 else 1921 psqp = &sqp->sq_next; 1922 } 1923 *qpp = sqp; 1924 *psqp = sqp->sq_next; 1925 for (sqp = *psqp; sqp; sqp = sqp->sq_next) { 1926 if (sqp->sq_info.si_signo == sig) { 1927 if (t != (kthread_t *)NULL) { 1928 sigaddset(&t->t_sig, sig); 1929 t->t_sig_check = 1; 1930 } else { 1931 sigaddset(&p->p_sig, sig); 1932 set_proc_ast(p); 1933 } 1934 break; 1935 } 1936 } 1937 } 1938 1939 /* 1940 * Delete a queued SIGCLD siginfo structure matching the k_siginfo_t argument. 1941 */ 1942 void 1943 sigcld_delete(k_siginfo_t *ip) 1944 { 1945 proc_t *p = curproc; 1946 int another_sigcld = 0; 1947 sigqueue_t **psqp, *sqp; 1948 1949 ASSERT(ip->si_signo == SIGCLD); 1950 1951 mutex_enter(&p->p_lock); 1952 1953 if (!sigismember(&p->p_sig, SIGCLD)) { 1954 mutex_exit(&p->p_lock); 1955 return; 1956 } 1957 1958 psqp = &p->p_sigqueue; 1959 for (;;) { 1960 if ((sqp = *psqp) == NULL) { 1961 mutex_exit(&p->p_lock); 1962 return; 1963 } 1964 if (sqp->sq_info.si_signo == SIGCLD) { 1965 if (sqp->sq_info.si_pid == ip->si_pid && 1966 sqp->sq_info.si_code == ip->si_code && 1967 sqp->sq_info.si_status == ip->si_status) 1968 break; 1969 another_sigcld = 1; 1970 } 1971 psqp = &sqp->sq_next; 1972 } 1973 *psqp = sqp->sq_next; 1974 1975 siginfofree(sqp); 1976 1977 for (sqp = *psqp; !another_sigcld && sqp; sqp = sqp->sq_next) { 1978 if (sqp->sq_info.si_signo == SIGCLD) 1979 another_sigcld = 1; 1980 } 1981 1982 if (!another_sigcld) { 1983 sigdelset(&p->p_sig, SIGCLD); 1984 sigdelset(&p->p_extsig, SIGCLD); 1985 } 1986 1987 mutex_exit(&p->p_lock); 1988 } 1989 1990 /* 1991 * Delete queued siginfo structures. 1992 * If a non-null thread pointer is passed then delete from 1993 * the thread queue, otherwise delete from the process queue. 1994 */ 1995 void 1996 sigdelq(proc_t *p, kthread_t *t, int sig) 1997 { 1998 sigqueue_t **psqp, *sqp; 1999 2000 /* 2001 * We must be holding p->p_lock unless the process is 2002 * being reaped or has failed to get started on fork. 2003 */ 2004 ASSERT(MUTEX_HELD(&p->p_lock) || 2005 p->p_stat == SIDL || p->p_stat == SZOMB); 2006 2007 if (t != (kthread_t *)NULL) 2008 psqp = &t->t_sigqueue; 2009 else 2010 psqp = &p->p_sigqueue; 2011 2012 while (*psqp) { 2013 sqp = *psqp; 2014 if (sig == 0 || sqp->sq_info.si_signo == sig) { 2015 *psqp = sqp->sq_next; 2016 siginfofree(sqp); 2017 } else 2018 psqp = &sqp->sq_next; 2019 } 2020 } 2021 2022 /* 2023 * Insert a siginfo structure into a queue. 2024 * If a non-null thread pointer is passed then add to the thread queue, 2025 * otherwise add to the process queue. 2026 * 2027 * The function sigaddqins() is called with sigqueue already allocated. 2028 * It is called from sigaddqa() and sigaddq() below. 2029 * 2030 * The value of si_code implicitly indicates whether sigp is to be 2031 * explicitly queued, or to be queued to depth one. 2032 */ 2033 static void 2034 sigaddqins(proc_t *p, kthread_t *t, sigqueue_t *sigqp) 2035 { 2036 sigqueue_t **psqp; 2037 int sig = sigqp->sq_info.si_signo; 2038 2039 sigqp->sq_external = (curproc != &p0) && 2040 (curproc->p_ct_process != p->p_ct_process); 2041 2042 /* 2043 * issig_forreal() doesn't bother dequeueing signals if SKILLED 2044 * is set, and even if it did, we would want to avoid situation 2045 * (which would be unique to SIGKILL) where one thread dequeued 2046 * the sigqueue_t and another executed psig(). So we create a 2047 * separate stash for SIGKILL's sigqueue_t. Because a second 2048 * SIGKILL can set SEXTKILLED, we overwrite the existing entry 2049 * if (and only if) it was non-extracontractual. 2050 */ 2051 if (sig == SIGKILL) { 2052 if (p->p_killsqp == NULL || !p->p_killsqp->sq_external) { 2053 if (p->p_killsqp != NULL) 2054 siginfofree(p->p_killsqp); 2055 p->p_killsqp = sigqp; 2056 sigqp->sq_next = NULL; 2057 } else { 2058 siginfofree(sigqp); 2059 } 2060 return; 2061 } 2062 2063 ASSERT(sig >= 1 && sig < NSIG); 2064 if (t != NULL) /* directed to a thread */ 2065 psqp = &t->t_sigqueue; 2066 else /* directed to a process */ 2067 psqp = &p->p_sigqueue; 2068 if (SI_CANQUEUE(sigqp->sq_info.si_code) && 2069 sigismember(&p->p_siginfo, sig)) { 2070 for (; *psqp != NULL; psqp = &(*psqp)->sq_next) 2071 ; 2072 } else { 2073 for (; *psqp != NULL; psqp = &(*psqp)->sq_next) { 2074 if ((*psqp)->sq_info.si_signo == sig) { 2075 siginfofree(sigqp); 2076 return; 2077 } 2078 } 2079 } 2080 *psqp = sigqp; 2081 sigqp->sq_next = NULL; 2082 } 2083 2084 /* 2085 * The function sigaddqa() is called with sigqueue already allocated. 2086 * If signal is ignored, discard but guarantee KILL and generation semantics. 2087 * It is called from sigqueue() and other places. 2088 */ 2089 void 2090 sigaddqa(proc_t *p, kthread_t *t, sigqueue_t *sigqp) 2091 { 2092 int sig = sigqp->sq_info.si_signo; 2093 2094 ASSERT(MUTEX_HELD(&p->p_lock)); 2095 ASSERT(sig >= 1 && sig < NSIG); 2096 2097 if (sig_discardable(p, sig)) 2098 siginfofree(sigqp); 2099 else 2100 sigaddqins(p, t, sigqp); 2101 2102 sigtoproc(p, t, sig); 2103 } 2104 2105 /* 2106 * Allocate the sigqueue_t structure and call sigaddqins(). 2107 */ 2108 void 2109 sigaddq(proc_t *p, kthread_t *t, k_siginfo_t *infop, int km_flags) 2110 { 2111 sigqueue_t *sqp; 2112 int sig = infop->si_signo; 2113 2114 ASSERT(MUTEX_HELD(&p->p_lock)); 2115 ASSERT(sig >= 1 && sig < NSIG); 2116 2117 /* 2118 * If the signal will be discarded by sigtoproc() or 2119 * if the process isn't requesting siginfo and it isn't 2120 * blocking the signal (it *could* change it's mind while 2121 * the signal is pending) then don't bother creating one. 2122 */ 2123 if (!sig_discardable(p, sig) && 2124 (sigismember(&p->p_siginfo, sig) || 2125 (curproc->p_ct_process != p->p_ct_process) || 2126 (sig == SIGCLD && SI_FROMKERNEL(infop))) && 2127 ((sqp = kmem_alloc(sizeof (sigqueue_t), km_flags)) != NULL)) { 2128 bcopy(infop, &sqp->sq_info, sizeof (k_siginfo_t)); 2129 sqp->sq_func = NULL; 2130 sqp->sq_next = NULL; 2131 sigaddqins(p, t, sqp); 2132 } 2133 sigtoproc(p, t, sig); 2134 } 2135 2136 /* 2137 * Handle stop-on-fault processing for the debugger. Returns 0 2138 * if the fault is cleared during the stop, nonzero if it isn't. 2139 */ 2140 int 2141 stop_on_fault(uint_t fault, k_siginfo_t *sip) 2142 { 2143 proc_t *p = ttoproc(curthread); 2144 klwp_t *lwp = ttolwp(curthread); 2145 2146 ASSERT(prismember(&p->p_fltmask, fault)); 2147 2148 /* 2149 * Record current fault and siginfo structure so debugger can 2150 * find it. 2151 */ 2152 mutex_enter(&p->p_lock); 2153 lwp->lwp_curflt = (uchar_t)fault; 2154 lwp->lwp_siginfo = *sip; 2155 2156 stop(PR_FAULTED, fault); 2157 2158 fault = lwp->lwp_curflt; 2159 lwp->lwp_curflt = 0; 2160 mutex_exit(&p->p_lock); 2161 return (fault); 2162 } 2163 2164 void 2165 sigorset(k_sigset_t *s1, k_sigset_t *s2) 2166 { 2167 s1->__sigbits[0] |= s2->__sigbits[0]; 2168 s1->__sigbits[1] |= s2->__sigbits[1]; 2169 } 2170 2171 void 2172 sigandset(k_sigset_t *s1, k_sigset_t *s2) 2173 { 2174 s1->__sigbits[0] &= s2->__sigbits[0]; 2175 s1->__sigbits[1] &= s2->__sigbits[1]; 2176 } 2177 2178 void 2179 sigdiffset(k_sigset_t *s1, k_sigset_t *s2) 2180 { 2181 s1->__sigbits[0] &= ~(s2->__sigbits[0]); 2182 s1->__sigbits[1] &= ~(s2->__sigbits[1]); 2183 } 2184 2185 /* 2186 * Return non-zero if curthread->t_sig_check should be set to 1, that is, 2187 * if there are any signals the thread might take on return from the kernel. 2188 * If ksigset_t's were a single word, we would do: 2189 * return (((p->p_sig | t->t_sig) & ~t->t_hold) & fillset); 2190 */ 2191 int 2192 sigcheck(proc_t *p, kthread_t *t) 2193 { 2194 sc_shared_t *tdp = t->t_schedctl; 2195 2196 /* 2197 * If signals are blocked via the schedctl interface 2198 * then we only check for the unmaskable signals. 2199 */ 2200 if (tdp != NULL && tdp->sc_sigblock) 2201 return ((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) & 2202 CANTMASK0); 2203 2204 return (((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) & 2205 ~t->t_hold.__sigbits[0]) | 2206 (((p->p_sig.__sigbits[1] | t->t_sig.__sigbits[1]) & 2207 ~t->t_hold.__sigbits[1]) & FILLSET1)); 2208 } 2209 2210 /* ONC_PLUS EXTRACT START */ 2211 void 2212 sigintr(k_sigset_t *smask, int intable) 2213 { 2214 proc_t *p; 2215 int owned; 2216 k_sigset_t lmask; /* local copy of cantmask */ 2217 klwp_t *lwp = ttolwp(curthread); 2218 2219 /* 2220 * Mask out all signals except SIGHUP, SIGINT, SIGQUIT 2221 * and SIGTERM. (Preserving the existing masks). 2222 * This function supports the -intr nfs and ufs mount option. 2223 */ 2224 2225 /* 2226 * don't do kernel threads 2227 */ 2228 if (lwp == NULL) 2229 return; 2230 2231 /* 2232 * get access to signal mask 2233 */ 2234 p = ttoproc(curthread); 2235 owned = mutex_owned(&p->p_lock); /* this is filthy */ 2236 if (!owned) 2237 mutex_enter(&p->p_lock); 2238 2239 /* 2240 * remember the current mask 2241 */ 2242 schedctl_finish_sigblock(curthread); 2243 *smask = curthread->t_hold; 2244 2245 /* 2246 * mask out all signals 2247 */ 2248 sigfillset(&curthread->t_hold); 2249 2250 /* 2251 * Unmask the non-maskable signals (e.g., KILL), as long as 2252 * they aren't already masked (which could happen at exit). 2253 * The first sigdiffset sets lmask to (cantmask & ~curhold). The 2254 * second sets the current hold mask to (~0 & ~lmask), which reduces 2255 * to (~cantmask | curhold). 2256 */ 2257 lmask = cantmask; 2258 sigdiffset(&lmask, smask); 2259 sigdiffset(&curthread->t_hold, &lmask); 2260 2261 /* 2262 * Re-enable HUP, QUIT, and TERM iff they were originally enabled 2263 * Re-enable INT if it's originally enabled and the NFS mount option 2264 * nointr is not set. 2265 */ 2266 if (!sigismember(smask, SIGHUP)) 2267 sigdelset(&curthread->t_hold, SIGHUP); 2268 if (!sigismember(smask, SIGINT) && intable) 2269 sigdelset(&curthread->t_hold, SIGINT); 2270 if (!sigismember(smask, SIGQUIT)) 2271 sigdelset(&curthread->t_hold, SIGQUIT); 2272 if (!sigismember(smask, SIGTERM)) 2273 sigdelset(&curthread->t_hold, SIGTERM); 2274 2275 /* 2276 * release access to signal mask 2277 */ 2278 if (!owned) 2279 mutex_exit(&p->p_lock); 2280 2281 /* 2282 * Indicate that this lwp is not to be stopped. 2283 */ 2284 lwp->lwp_nostop++; 2285 2286 } 2287 /* ONC_PLUS EXTRACT END */ 2288 2289 void 2290 sigunintr(k_sigset_t *smask) 2291 { 2292 proc_t *p; 2293 int owned; 2294 klwp_t *lwp = ttolwp(curthread); 2295 2296 /* 2297 * Reset previous mask (See sigintr() above) 2298 */ 2299 if (lwp != NULL) { 2300 lwp->lwp_nostop--; /* restore lwp stoppability */ 2301 p = ttoproc(curthread); 2302 owned = mutex_owned(&p->p_lock); /* this is filthy */ 2303 if (!owned) 2304 mutex_enter(&p->p_lock); 2305 curthread->t_hold = *smask; 2306 /* so unmasked signals will be seen */ 2307 curthread->t_sig_check = 1; 2308 if (!owned) 2309 mutex_exit(&p->p_lock); 2310 } 2311 } 2312 2313 void 2314 sigreplace(k_sigset_t *newmask, k_sigset_t *oldmask) 2315 { 2316 proc_t *p; 2317 int owned; 2318 /* 2319 * Save current signal mask in oldmask, then 2320 * set it to newmask. 2321 */ 2322 if (ttolwp(curthread) != NULL) { 2323 p = ttoproc(curthread); 2324 owned = mutex_owned(&p->p_lock); /* this is filthy */ 2325 if (!owned) 2326 mutex_enter(&p->p_lock); 2327 schedctl_finish_sigblock(curthread); 2328 if (oldmask != NULL) 2329 *oldmask = curthread->t_hold; 2330 curthread->t_hold = *newmask; 2331 curthread->t_sig_check = 1; 2332 if (!owned) 2333 mutex_exit(&p->p_lock); 2334 } 2335 } 2336 2337 /* 2338 * Return true if the signal number is in range 2339 * and the signal code specifies signal queueing. 2340 */ 2341 int 2342 sigwillqueue(int sig, int code) 2343 { 2344 if (sig >= 0 && sig < NSIG) { 2345 switch (code) { 2346 case SI_QUEUE: 2347 case SI_TIMER: 2348 case SI_ASYNCIO: 2349 case SI_MESGQ: 2350 return (1); 2351 } 2352 } 2353 return (0); 2354 } 2355 2356 #ifndef UCHAR_MAX 2357 #define UCHAR_MAX 255 2358 #endif 2359 2360 /* 2361 * The entire pool (with maxcount entries) is pre-allocated at 2362 * the first sigqueue/signotify call. 2363 */ 2364 sigqhdr_t * 2365 sigqhdralloc(size_t size, uint_t maxcount) 2366 { 2367 size_t i; 2368 sigqueue_t *sq, *next; 2369 sigqhdr_t *sqh; 2370 2371 i = (maxcount * size) + sizeof (sigqhdr_t); 2372 ASSERT(maxcount <= UCHAR_MAX && i <= USHRT_MAX); 2373 sqh = kmem_alloc(i, KM_SLEEP); 2374 sqh->sqb_count = (uchar_t)maxcount; 2375 sqh->sqb_maxcount = (uchar_t)maxcount; 2376 sqh->sqb_size = (ushort_t)i; 2377 sqh->sqb_pexited = 0; 2378 sqh->sqb_sent = 0; 2379 sqh->sqb_free = sq = (sigqueue_t *)(sqh + 1); 2380 for (i = maxcount - 1; i != 0; i--) { 2381 next = (sigqueue_t *)((uintptr_t)sq + size); 2382 sq->sq_next = next; 2383 sq = next; 2384 } 2385 sq->sq_next = NULL; 2386 cv_init(&sqh->sqb_cv, NULL, CV_DEFAULT, NULL); 2387 mutex_init(&sqh->sqb_lock, NULL, MUTEX_DEFAULT, NULL); 2388 return (sqh); 2389 } 2390 2391 static void sigqrel(sigqueue_t *); 2392 2393 /* 2394 * allocate a sigqueue/signotify structure from the per process 2395 * pre-allocated pool. 2396 */ 2397 sigqueue_t * 2398 sigqalloc(sigqhdr_t *sqh) 2399 { 2400 sigqueue_t *sq = NULL; 2401 2402 ASSERT(MUTEX_HELD(&curproc->p_lock)); 2403 2404 if (sqh != NULL) { 2405 mutex_enter(&sqh->sqb_lock); 2406 if (sqh->sqb_count > 0) { 2407 sqh->sqb_count--; 2408 sq = sqh->sqb_free; 2409 sqh->sqb_free = sq->sq_next; 2410 mutex_exit(&sqh->sqb_lock); 2411 bzero(&sq->sq_info, sizeof (k_siginfo_t)); 2412 sq->sq_backptr = sqh; 2413 sq->sq_func = sigqrel; 2414 sq->sq_next = NULL; 2415 sq->sq_external = 0; 2416 } else { 2417 mutex_exit(&sqh->sqb_lock); 2418 } 2419 } 2420 return (sq); 2421 } 2422 2423 /* 2424 * Return a sigqueue structure back to the pre-allocated pool. 2425 */ 2426 static void 2427 sigqrel(sigqueue_t *sq) 2428 { 2429 sigqhdr_t *sqh; 2430 2431 /* make sure that p_lock of the affected process is held */ 2432 2433 sqh = (sigqhdr_t *)sq->sq_backptr; 2434 mutex_enter(&sqh->sqb_lock); 2435 if (sqh->sqb_pexited && sqh->sqb_sent == 1) { 2436 mutex_exit(&sqh->sqb_lock); 2437 cv_destroy(&sqh->sqb_cv); 2438 mutex_destroy(&sqh->sqb_lock); 2439 kmem_free(sqh, sqh->sqb_size); 2440 } else { 2441 sqh->sqb_count++; 2442 sqh->sqb_sent--; 2443 sq->sq_next = sqh->sqb_free; 2444 sq->sq_backptr = NULL; 2445 sqh->sqb_free = sq; 2446 cv_signal(&sqh->sqb_cv); 2447 mutex_exit(&sqh->sqb_lock); 2448 } 2449 } 2450 2451 /* 2452 * Free up the pre-allocated sigqueue headers of sigqueue pool 2453 * and signotify pool, if possible. 2454 * Called only by the owning process during exec() and exit(). 2455 */ 2456 void 2457 sigqfree(proc_t *p) 2458 { 2459 ASSERT(MUTEX_HELD(&p->p_lock)); 2460 2461 if (p->p_sigqhdr != NULL) { /* sigqueue pool */ 2462 sigqhdrfree(p->p_sigqhdr); 2463 p->p_sigqhdr = NULL; 2464 } 2465 if (p->p_signhdr != NULL) { /* signotify pool */ 2466 sigqhdrfree(p->p_signhdr); 2467 p->p_signhdr = NULL; 2468 } 2469 } 2470 2471 /* 2472 * Free up the pre-allocated header and sigq pool if possible. 2473 */ 2474 void 2475 sigqhdrfree(sigqhdr_t *sqh) 2476 { 2477 mutex_enter(&sqh->sqb_lock); 2478 if (sqh->sqb_sent == 0) { 2479 mutex_exit(&sqh->sqb_lock); 2480 cv_destroy(&sqh->sqb_cv); 2481 mutex_destroy(&sqh->sqb_lock); 2482 kmem_free(sqh, sqh->sqb_size); 2483 } else { 2484 sqh->sqb_pexited = 1; 2485 mutex_exit(&sqh->sqb_lock); 2486 } 2487 } 2488 2489 /* 2490 * Free up a single sigqueue structure. 2491 * No other code should free a sigqueue directly. 2492 */ 2493 void 2494 siginfofree(sigqueue_t *sqp) 2495 { 2496 if (sqp != NULL) { 2497 if (sqp->sq_func != NULL) 2498 (sqp->sq_func)(sqp); 2499 else 2500 kmem_free(sqp, sizeof (sigqueue_t)); 2501 } 2502 } 2503 2504 /* 2505 * Generate a synchronous signal caused by a hardware 2506 * condition encountered by an lwp. Called from trap(). 2507 */ 2508 void 2509 trapsig(k_siginfo_t *ip, int restartable) 2510 { 2511 proc_t *p = ttoproc(curthread); 2512 int sig = ip->si_signo; 2513 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 2514 2515 ASSERT(sig > 0 && sig < NSIG); 2516 2517 if (curthread->t_dtrace_on) 2518 dtrace_safe_synchronous_signal(); 2519 2520 mutex_enter(&p->p_lock); 2521 schedctl_finish_sigblock(curthread); 2522 /* 2523 * Avoid a possible infinite loop if the lwp is holding the 2524 * signal generated by a trap of a restartable instruction or 2525 * if the signal so generated is being ignored by the process. 2526 */ 2527 if (restartable && 2528 (sigismember(&curthread->t_hold, sig) || 2529 p->p_user.u_signal[sig-1] == SIG_IGN)) { 2530 sigdelset(&curthread->t_hold, sig); 2531 p->p_user.u_signal[sig-1] = SIG_DFL; 2532 sigdelset(&p->p_ignore, sig); 2533 } 2534 bcopy(ip, &sqp->sq_info, sizeof (k_siginfo_t)); 2535 sigaddqa(p, curthread, sqp); 2536 mutex_exit(&p->p_lock); 2537 } 2538 2539 /* 2540 * Dispatch the real time profiling signal in the traditional way, 2541 * honoring all of the /proc tracing mechanism built into issig(). 2542 */ 2543 static void 2544 realsigprof_slow(int sysnum, int nsysarg, int error) 2545 { 2546 kthread_t *t = curthread; 2547 proc_t *p = ttoproc(t); 2548 klwp_t *lwp = ttolwp(t); 2549 k_siginfo_t *sip = &lwp->lwp_siginfo; 2550 void (*func)(); 2551 2552 mutex_enter(&p->p_lock); 2553 func = PTOU(p)->u_signal[SIGPROF - 1]; 2554 if (p->p_rprof_cyclic == CYCLIC_NONE || 2555 func == SIG_DFL || func == SIG_IGN) { 2556 bzero(t->t_rprof, sizeof (*t->t_rprof)); 2557 mutex_exit(&p->p_lock); 2558 return; 2559 } 2560 if (sigismember(&t->t_hold, SIGPROF)) { 2561 mutex_exit(&p->p_lock); 2562 return; 2563 } 2564 sip->si_signo = SIGPROF; 2565 sip->si_code = PROF_SIG; 2566 sip->si_errno = error; 2567 hrt2ts(gethrtime(), &sip->si_tstamp); 2568 sip->si_syscall = sysnum; 2569 sip->si_nsysarg = nsysarg; 2570 sip->si_fault = lwp->lwp_lastfault; 2571 sip->si_faddr = lwp->lwp_lastfaddr; 2572 lwp->lwp_lastfault = 0; 2573 lwp->lwp_lastfaddr = NULL; 2574 sigtoproc(p, t, SIGPROF); 2575 mutex_exit(&p->p_lock); 2576 ASSERT(lwp->lwp_cursig == 0); 2577 if (issig(FORREAL)) 2578 psig(); 2579 sip->si_signo = 0; 2580 bzero(t->t_rprof, sizeof (*t->t_rprof)); 2581 } 2582 2583 /* 2584 * We are not tracing the SIGPROF signal, or doing any other unnatural 2585 * acts, like watchpoints, so dispatch the real time profiling signal 2586 * directly, bypassing all of the overhead built into issig(). 2587 */ 2588 static void 2589 realsigprof_fast(int sysnum, int nsysarg, int error) 2590 { 2591 kthread_t *t = curthread; 2592 proc_t *p = ttoproc(t); 2593 klwp_t *lwp = ttolwp(t); 2594 k_siginfo_t *sip = &lwp->lwp_siginfo; 2595 void (*func)(); 2596 int rc; 2597 int code; 2598 2599 /* 2600 * We don't need to acquire p->p_lock here; 2601 * we are manipulating thread-private data. 2602 */ 2603 func = PTOU(p)->u_signal[SIGPROF - 1]; 2604 if (p->p_rprof_cyclic == CYCLIC_NONE || 2605 func == SIG_DFL || func == SIG_IGN) { 2606 bzero(t->t_rprof, sizeof (*t->t_rprof)); 2607 return; 2608 } 2609 if (lwp->lwp_cursig != 0 || 2610 lwp->lwp_curinfo != NULL || 2611 sigismember(&t->t_hold, SIGPROF)) { 2612 return; 2613 } 2614 sip->si_signo = SIGPROF; 2615 sip->si_code = PROF_SIG; 2616 sip->si_errno = error; 2617 hrt2ts(gethrtime(), &sip->si_tstamp); 2618 sip->si_syscall = sysnum; 2619 sip->si_nsysarg = nsysarg; 2620 sip->si_fault = lwp->lwp_lastfault; 2621 sip->si_faddr = lwp->lwp_lastfaddr; 2622 lwp->lwp_lastfault = 0; 2623 lwp->lwp_lastfaddr = NULL; 2624 if (t->t_flag & T_TOMASK) 2625 t->t_flag &= ~T_TOMASK; 2626 else 2627 lwp->lwp_sigoldmask = t->t_hold; 2628 sigorset(&t->t_hold, &PTOU(p)->u_sigmask[SIGPROF - 1]); 2629 if (!sigismember(&PTOU(p)->u_signodefer, SIGPROF)) 2630 sigaddset(&t->t_hold, SIGPROF); 2631 lwp->lwp_extsig = 0; 2632 lwp->lwp_ru.nsignals++; 2633 if (p->p_model == DATAMODEL_NATIVE) 2634 rc = sendsig(SIGPROF, sip, func); 2635 #ifdef _SYSCALL32_IMPL 2636 else 2637 rc = sendsig32(SIGPROF, sip, func); 2638 #endif /* _SYSCALL32_IMPL */ 2639 sip->si_signo = 0; 2640 bzero(t->t_rprof, sizeof (*t->t_rprof)); 2641 if (rc == 0) { 2642 /* 2643 * sendsig() failed; we must dump core with a SIGSEGV. 2644 * See psig(). This code is copied from there. 2645 */ 2646 lwp->lwp_cursig = SIGSEGV; 2647 code = CLD_KILLED; 2648 proc_is_exiting(p); 2649 if (exitlwps(1) != 0) { 2650 mutex_enter(&p->p_lock); 2651 lwp_exit(); 2652 } 2653 if (audit_active == C2AUDIT_LOADED) 2654 audit_core_start(SIGSEGV); 2655 if (core(SIGSEGV, 0) == 0) 2656 code = CLD_DUMPED; 2657 if (audit_active == C2AUDIT_LOADED) 2658 audit_core_finish(code); 2659 exit(code, SIGSEGV); 2660 } 2661 } 2662 2663 /* 2664 * Arrange for the real time profiling signal to be dispatched. 2665 */ 2666 void 2667 realsigprof(int sysnum, int nsysarg, int error) 2668 { 2669 kthread_t *t = curthread; 2670 proc_t *p = ttoproc(t); 2671 2672 if (t->t_rprof->rp_anystate == 0) 2673 return; 2674 2675 schedctl_finish_sigblock(t); 2676 2677 /* test for any activity that requires p->p_lock */ 2678 if (tracing(p, SIGPROF) || pr_watch_active(p) || 2679 sigismember(&PTOU(p)->u_sigresethand, SIGPROF)) { 2680 /* do it the classic slow way */ 2681 realsigprof_slow(sysnum, nsysarg, error); 2682 } else { 2683 /* do it the cheating-a-little fast way */ 2684 realsigprof_fast(sysnum, nsysarg, error); 2685 } 2686 } 2687 2688 #ifdef _SYSCALL32_IMPL 2689 2690 /* 2691 * It's tricky to transmit a sigval between 32-bit and 64-bit 2692 * process, since in the 64-bit world, a pointer and an integer 2693 * are different sizes. Since we're constrained by the standards 2694 * world not to change the types, and it's unclear how useful it is 2695 * to send pointers between address spaces this way, we preserve 2696 * the 'int' interpretation for 32-bit processes interoperating 2697 * with 64-bit processes. The full semantics (pointers or integers) 2698 * are available for N-bit processes interoperating with N-bit 2699 * processes. 2700 */ 2701 void 2702 siginfo_kto32(const k_siginfo_t *src, siginfo32_t *dest) 2703 { 2704 bzero(dest, sizeof (*dest)); 2705 2706 /* 2707 * The absolute minimum content is si_signo and si_code. 2708 */ 2709 dest->si_signo = src->si_signo; 2710 if ((dest->si_code = src->si_code) == SI_NOINFO) 2711 return; 2712 2713 /* 2714 * A siginfo generated by user level is structured 2715 * differently from one generated by the kernel. 2716 */ 2717 if (SI_FROMUSER(src)) { 2718 dest->si_pid = src->si_pid; 2719 dest->si_ctid = src->si_ctid; 2720 dest->si_zoneid = src->si_zoneid; 2721 dest->si_uid = src->si_uid; 2722 if (SI_CANQUEUE(src->si_code)) 2723 dest->si_value.sival_int = 2724 (int32_t)src->si_value.sival_int; 2725 return; 2726 } 2727 2728 dest->si_errno = src->si_errno; 2729 2730 switch (src->si_signo) { 2731 default: 2732 dest->si_pid = src->si_pid; 2733 dest->si_ctid = src->si_ctid; 2734 dest->si_zoneid = src->si_zoneid; 2735 dest->si_uid = src->si_uid; 2736 dest->si_value.sival_int = (int32_t)src->si_value.sival_int; 2737 break; 2738 case SIGCLD: 2739 dest->si_pid = src->si_pid; 2740 dest->si_ctid = src->si_ctid; 2741 dest->si_zoneid = src->si_zoneid; 2742 dest->si_status = src->si_status; 2743 dest->si_stime = src->si_stime; 2744 dest->si_utime = src->si_utime; 2745 break; 2746 case SIGSEGV: 2747 case SIGBUS: 2748 case SIGILL: 2749 case SIGTRAP: 2750 case SIGFPE: 2751 case SIGEMT: 2752 dest->si_addr = (caddr32_t)(uintptr_t)src->si_addr; 2753 dest->si_trapno = src->si_trapno; 2754 dest->si_pc = (caddr32_t)(uintptr_t)src->si_pc; 2755 break; 2756 case SIGPOLL: 2757 case SIGXFSZ: 2758 dest->si_fd = src->si_fd; 2759 dest->si_band = src->si_band; 2760 break; 2761 case SIGPROF: 2762 dest->si_faddr = (caddr32_t)(uintptr_t)src->si_faddr; 2763 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec; 2764 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec; 2765 dest->si_syscall = src->si_syscall; 2766 dest->si_nsysarg = src->si_nsysarg; 2767 dest->si_fault = src->si_fault; 2768 break; 2769 } 2770 } 2771 2772 void 2773 siginfo_32tok(const siginfo32_t *src, k_siginfo_t *dest) 2774 { 2775 bzero(dest, sizeof (*dest)); 2776 2777 /* 2778 * The absolute minimum content is si_signo and si_code. 2779 */ 2780 dest->si_signo = src->si_signo; 2781 if ((dest->si_code = src->si_code) == SI_NOINFO) 2782 return; 2783 2784 /* 2785 * A siginfo generated by user level is structured 2786 * differently from one generated by the kernel. 2787 */ 2788 if (SI_FROMUSER(src)) { 2789 dest->si_pid = src->si_pid; 2790 dest->si_ctid = src->si_ctid; 2791 dest->si_zoneid = src->si_zoneid; 2792 dest->si_uid = src->si_uid; 2793 if (SI_CANQUEUE(src->si_code)) 2794 dest->si_value.sival_int = 2795 (int)src->si_value.sival_int; 2796 return; 2797 } 2798 2799 dest->si_errno = src->si_errno; 2800 2801 switch (src->si_signo) { 2802 default: 2803 dest->si_pid = src->si_pid; 2804 dest->si_ctid = src->si_ctid; 2805 dest->si_zoneid = src->si_zoneid; 2806 dest->si_uid = src->si_uid; 2807 dest->si_value.sival_int = (int)src->si_value.sival_int; 2808 break; 2809 case SIGCLD: 2810 dest->si_pid = src->si_pid; 2811 dest->si_ctid = src->si_ctid; 2812 dest->si_zoneid = src->si_zoneid; 2813 dest->si_status = src->si_status; 2814 dest->si_stime = src->si_stime; 2815 dest->si_utime = src->si_utime; 2816 break; 2817 case SIGSEGV: 2818 case SIGBUS: 2819 case SIGILL: 2820 case SIGTRAP: 2821 case SIGFPE: 2822 case SIGEMT: 2823 dest->si_addr = (void *)(uintptr_t)src->si_addr; 2824 dest->si_trapno = src->si_trapno; 2825 dest->si_pc = (void *)(uintptr_t)src->si_pc; 2826 break; 2827 case SIGPOLL: 2828 case SIGXFSZ: 2829 dest->si_fd = src->si_fd; 2830 dest->si_band = src->si_band; 2831 break; 2832 case SIGPROF: 2833 dest->si_faddr = (void *)(uintptr_t)src->si_faddr; 2834 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec; 2835 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec; 2836 dest->si_syscall = src->si_syscall; 2837 dest->si_nsysarg = src->si_nsysarg; 2838 dest->si_fault = src->si_fault; 2839 break; 2840 } 2841 } 2842 2843 #endif /* _SYSCALL32_IMPL */ 2844