1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 28 /* All Rights Reserved */ 29 30 #include <sys/param.h> 31 #include <sys/types.h> 32 #include <sys/bitmap.h> 33 #include <sys/sysmacros.h> 34 #include <sys/systm.h> 35 #include <sys/cred.h> 36 #include <sys/user.h> 37 #include <sys/errno.h> 38 #include <sys/proc.h> 39 #include <sys/poll_impl.h> /* only needed for kludge in sigwaiting_send() */ 40 #include <sys/signal.h> 41 #include <sys/siginfo.h> 42 #include <sys/fault.h> 43 #include <sys/ucontext.h> 44 #include <sys/procfs.h> 45 #include <sys/wait.h> 46 #include <sys/class.h> 47 #include <sys/mman.h> 48 #include <sys/procset.h> 49 #include <sys/kmem.h> 50 #include <sys/cpuvar.h> 51 #include <sys/prsystm.h> 52 #include <sys/debug.h> 53 #include <vm/as.h> 54 #include <sys/bitmap.h> 55 #include <c2/audit.h> 56 #include <sys/core.h> 57 #include <sys/schedctl.h> 58 #include <sys/contract/process_impl.h> 59 #include <sys/cyclic.h> 60 #include <sys/dtrace.h> 61 #include <sys/sdt.h> 62 63 /* MUST be contiguous */ 64 k_sigset_t nullsmask = {0, 0}; 65 66 k_sigset_t fillset = {FILLSET0, FILLSET1}; 67 68 k_sigset_t cantmask = {CANTMASK0, CANTMASK1}; 69 70 k_sigset_t cantreset = {(sigmask(SIGILL)|sigmask(SIGTRAP)|sigmask(SIGPWR)), 0}; 71 72 k_sigset_t ignoredefault = {(sigmask(SIGCONT)|sigmask(SIGCLD)|sigmask(SIGPWR) 73 |sigmask(SIGWINCH)|sigmask(SIGURG)|sigmask(SIGWAITING)), 74 (sigmask(SIGLWP)|sigmask(SIGCANCEL)|sigmask(SIGFREEZE) 75 |sigmask(SIGTHAW)|sigmask(SIGXRES)|sigmask(SIGJVM1) 76 |sigmask(SIGJVM2))}; 77 78 k_sigset_t stopdefault = {(sigmask(SIGSTOP)|sigmask(SIGTSTP) 79 |sigmask(SIGTTOU)|sigmask(SIGTTIN)), 0}; 80 81 k_sigset_t coredefault = {(sigmask(SIGQUIT)|sigmask(SIGILL)|sigmask(SIGTRAP) 82 |sigmask(SIGIOT)|sigmask(SIGEMT)|sigmask(SIGFPE) 83 |sigmask(SIGBUS)|sigmask(SIGSEGV)|sigmask(SIGSYS) 84 |sigmask(SIGXCPU)|sigmask(SIGXFSZ)), 0}; 85 86 k_sigset_t holdvfork = {(sigmask(SIGTTOU)|sigmask(SIGTTIN)|sigmask(SIGTSTP)), 87 0}; 88 89 static int isjobstop(int); 90 static void post_sigcld(proc_t *, sigqueue_t *); 91 92 /* 93 * Internal variables for counting number of user thread stop requests posted. 94 * They may not be accurate at some special situation such as that a virtually 95 * stopped thread starts to run. 96 */ 97 static int num_utstop; 98 /* 99 * Internal variables for broadcasting an event when all thread stop requests 100 * are processed. 101 */ 102 static kcondvar_t utstop_cv; 103 104 static kmutex_t thread_stop_lock; 105 void del_one_utstop(void); 106 107 /* 108 * Send the specified signal to the specified process. 109 */ 110 void 111 psignal(proc_t *p, int sig) 112 { 113 mutex_enter(&p->p_lock); 114 sigtoproc(p, NULL, sig); 115 mutex_exit(&p->p_lock); 116 } 117 118 /* 119 * Send the specified signal to the specified thread. 120 */ 121 void 122 tsignal(kthread_t *t, int sig) 123 { 124 proc_t *p = ttoproc(t); 125 126 mutex_enter(&p->p_lock); 127 sigtoproc(p, t, sig); 128 mutex_exit(&p->p_lock); 129 } 130 131 int 132 signal_is_blocked(kthread_t *t, int sig) 133 { 134 return (sigismember(&t->t_hold, sig) || 135 (schedctl_sigblock(t) && !sigismember(&cantmask, sig))); 136 } 137 138 /* 139 * Return true if the signal can safely be discarded on generation. 140 * That is, if there is no need for the signal on the receiving end. 141 * The answer is true if the process is a zombie or 142 * if all of these conditions are true: 143 * the signal is being ignored 144 * the process is single-threaded 145 * the signal is not being traced by /proc 146 * the signal is not blocked by the process 147 * the signal is not being accepted via sigwait() 148 */ 149 static int 150 sig_discardable(proc_t *p, int sig) 151 { 152 kthread_t *t = p->p_tlist; 153 154 return (t == NULL || /* if zombie or ... */ 155 (sigismember(&p->p_ignore, sig) && /* signal is ignored */ 156 t->t_forw == t && /* and single-threaded */ 157 !tracing(p, sig) && /* and no /proc tracing */ 158 !signal_is_blocked(t, sig) && /* and signal not blocked */ 159 !sigismember(&t->t_sigwait, sig))); /* and not being accepted */ 160 } 161 162 /* 163 * Return true if this thread is going to eat this signal soon. 164 * Note that, if the signal is SIGKILL, we force stopped threads to be 165 * set running (to make SIGKILL be a sure kill), but only if the process 166 * is not currently locked by /proc (the P_PR_LOCK flag). Code in /proc 167 * relies on the fact that a process will not change shape while P_PR_LOCK 168 * is set (it drops and reacquires p->p_lock while leaving P_PR_LOCK set). 169 * We wish that we could simply call prbarrier() below, in sigtoproc(), to 170 * ensure that the process is not locked by /proc, but prbarrier() drops 171 * and reacquires p->p_lock and dropping p->p_lock here would be damaging. 172 */ 173 int 174 eat_signal(kthread_t *t, int sig) 175 { 176 int rval = 0; 177 ASSERT(THREAD_LOCK_HELD(t)); 178 179 /* 180 * Do not do anything if the target thread has the signal blocked. 181 */ 182 if (!signal_is_blocked(t, sig)) { 183 t->t_sig_check = 1; /* have thread do an issig */ 184 if (ISWAKEABLE(t) || ISWAITING(t)) { 185 setrun_locked(t); 186 rval = 1; 187 } else if (t->t_state == TS_STOPPED && sig == SIGKILL && 188 !(ttoproc(t)->p_proc_flag & P_PR_LOCK)) { 189 ttoproc(t)->p_stopsig = 0; 190 t->t_dtrace_stop = 0; 191 t->t_schedflag |= TS_XSTART | TS_PSTART; 192 setrun_locked(t); 193 } else if (t != curthread && t->t_state == TS_ONPROC) { 194 aston(t); /* make it do issig promptly */ 195 if (t->t_cpu != CPU) 196 poke_cpu(t->t_cpu->cpu_id); 197 rval = 1; 198 } else if (t->t_state == TS_RUN) { 199 rval = 1; 200 } 201 } 202 203 return (rval); 204 } 205 206 /* 207 * Post a signal. 208 * If a non-null thread pointer is passed, then post the signal 209 * to the thread/lwp, otherwise post the signal to the process. 210 */ 211 void 212 sigtoproc(proc_t *p, kthread_t *t, int sig) 213 { 214 kthread_t *tt; 215 int ext = !(curproc->p_flag & SSYS) && 216 (curproc->p_ct_process != p->p_ct_process); 217 218 ASSERT(MUTEX_HELD(&p->p_lock)); 219 220 if (sig <= 0 || sig >= NSIG) 221 return; 222 223 /* 224 * Regardless of origin or directedness, 225 * SIGKILL kills all lwps in the process immediately 226 * and jobcontrol signals affect all lwps in the process. 227 */ 228 if (sig == SIGKILL) { 229 p->p_flag |= SKILLED | (ext ? SEXTKILLED : 0); 230 t = NULL; 231 } else if (sig == SIGCONT) { 232 /* 233 * The SSCONT flag will remain set until a stopping 234 * signal comes in (below). This is harmless. 235 */ 236 p->p_flag |= SSCONT; 237 sigdelq(p, NULL, SIGSTOP); 238 sigdelq(p, NULL, SIGTSTP); 239 sigdelq(p, NULL, SIGTTOU); 240 sigdelq(p, NULL, SIGTTIN); 241 sigdiffset(&p->p_sig, &stopdefault); 242 sigdiffset(&p->p_extsig, &stopdefault); 243 p->p_stopsig = 0; 244 if ((tt = p->p_tlist) != NULL) { 245 do { 246 sigdelq(p, tt, SIGSTOP); 247 sigdelq(p, tt, SIGTSTP); 248 sigdelq(p, tt, SIGTTOU); 249 sigdelq(p, tt, SIGTTIN); 250 sigdiffset(&tt->t_sig, &stopdefault); 251 sigdiffset(&tt->t_extsig, &stopdefault); 252 } while ((tt = tt->t_forw) != p->p_tlist); 253 } 254 if ((tt = p->p_tlist) != NULL) { 255 do { 256 thread_lock(tt); 257 if (tt->t_state == TS_STOPPED && 258 tt->t_whystop == PR_JOBCONTROL) { 259 tt->t_schedflag |= TS_XSTART; 260 setrun_locked(tt); 261 } 262 thread_unlock(tt); 263 } while ((tt = tt->t_forw) != p->p_tlist); 264 } 265 } else if (sigismember(&stopdefault, sig)) { 266 /* 267 * This test has a race condition which we can't fix: 268 * By the time the stopping signal is received by 269 * the target process/thread, the signal handler 270 * and/or the detached state might have changed. 271 */ 272 if (PTOU(p)->u_signal[sig-1] == SIG_DFL && 273 (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned)) 274 p->p_flag &= ~SSCONT; 275 sigdelq(p, NULL, SIGCONT); 276 sigdelset(&p->p_sig, SIGCONT); 277 sigdelset(&p->p_extsig, SIGCONT); 278 if ((tt = p->p_tlist) != NULL) { 279 do { 280 sigdelq(p, tt, SIGCONT); 281 sigdelset(&tt->t_sig, SIGCONT); 282 sigdelset(&tt->t_extsig, SIGCONT); 283 } while ((tt = tt->t_forw) != p->p_tlist); 284 } 285 } 286 287 if (sig_discardable(p, sig)) { 288 DTRACE_PROC3(signal__discard, kthread_t *, p->p_tlist, 289 proc_t *, p, int, sig); 290 return; 291 } 292 293 if (t != NULL) { 294 /* 295 * This is a directed signal, wake up the lwp. 296 */ 297 sigaddset(&t->t_sig, sig); 298 if (ext) 299 sigaddset(&t->t_extsig, sig); 300 thread_lock(t); 301 (void) eat_signal(t, sig); 302 thread_unlock(t); 303 DTRACE_PROC2(signal__send, kthread_t *, t, int, sig); 304 } else if ((tt = p->p_tlist) != NULL) { 305 /* 306 * Make sure that some lwp that already exists 307 * in the process fields the signal soon. 308 * Wake up an interruptibly sleeping lwp if necessary. 309 * For SIGKILL make all of the lwps see the signal; 310 * This is needed to guarantee a sure kill for processes 311 * with a mix of realtime and non-realtime threads. 312 */ 313 int su = 0; 314 315 sigaddset(&p->p_sig, sig); 316 if (ext) 317 sigaddset(&p->p_extsig, sig); 318 do { 319 thread_lock(tt); 320 if (eat_signal(tt, sig) && sig != SIGKILL) { 321 thread_unlock(tt); 322 break; 323 } 324 if (SUSPENDED(tt)) 325 su++; 326 thread_unlock(tt); 327 } while ((tt = tt->t_forw) != p->p_tlist); 328 /* 329 * If the process is deadlocked, make somebody run and die. 330 */ 331 if (sig == SIGKILL && p->p_stat != SIDL && 332 p->p_lwprcnt == 0 && p->p_lwpcnt == su && 333 !(p->p_proc_flag & P_PR_LOCK)) { 334 thread_lock(tt); 335 p->p_lwprcnt++; 336 tt->t_schedflag |= TS_CSTART; 337 setrun_locked(tt); 338 thread_unlock(tt); 339 } 340 341 DTRACE_PROC2(signal__send, kthread_t *, tt, int, sig); 342 } 343 } 344 345 static int 346 isjobstop(int sig) 347 { 348 proc_t *p = ttoproc(curthread); 349 350 ASSERT(MUTEX_HELD(&p->p_lock)); 351 352 if (PTOU(curproc)->u_signal[sig-1] == SIG_DFL && 353 sigismember(&stopdefault, sig)) { 354 /* 355 * If SIGCONT has been posted since we promoted this signal 356 * from pending to current, then don't do a jobcontrol stop. 357 */ 358 if (!(p->p_flag & SSCONT) && 359 (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned) && 360 curthread != p->p_agenttp) { 361 sigqueue_t *sqp; 362 363 stop(PR_JOBCONTROL, sig); 364 mutex_exit(&p->p_lock); 365 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 366 mutex_enter(&pidlock); 367 /* 368 * Only the first lwp to continue notifies the parent. 369 */ 370 if (p->p_pidflag & CLDCONT) 371 siginfofree(sqp); 372 else { 373 p->p_pidflag |= CLDCONT; 374 p->p_wcode = CLD_CONTINUED; 375 p->p_wdata = SIGCONT; 376 sigcld(p, sqp); 377 } 378 mutex_exit(&pidlock); 379 mutex_enter(&p->p_lock); 380 } 381 return (1); 382 } 383 return (0); 384 } 385 386 /* 387 * Returns true if the current process has a signal to process, and 388 * the signal is not held. The signal to process is put in p_cursig. 389 * This is asked at least once each time a process enters the system 390 * (though this can usually be done without actually calling issig by 391 * checking the pending signal masks). A signal does not do anything 392 * directly to a process; it sets a flag that asks the process to do 393 * something to itself. 394 * 395 * The "why" argument indicates the allowable side-effects of the call: 396 * 397 * FORREAL: Extract the next pending signal from p_sig into p_cursig; 398 * stop the process if a stop has been requested or if a traced signal 399 * is pending. 400 * 401 * JUSTLOOKING: Don't stop the process, just indicate whether or not 402 * a signal might be pending (FORREAL is needed to tell for sure). 403 * 404 * XXX: Changes to the logic in these routines should be propagated 405 * to lm_sigispending(). See bug 1201594. 406 */ 407 408 static int issig_forreal(void); 409 static int issig_justlooking(void); 410 411 int 412 issig(int why) 413 { 414 ASSERT(why == FORREAL || why == JUSTLOOKING); 415 416 return ((why == FORREAL)? issig_forreal() : issig_justlooking()); 417 } 418 419 420 static int 421 issig_justlooking(void) 422 { 423 kthread_t *t = curthread; 424 klwp_t *lwp = ttolwp(t); 425 proc_t *p = ttoproc(t); 426 k_sigset_t set; 427 428 /* 429 * This function answers the question: 430 * "Is there any reason to call issig_forreal()?" 431 * 432 * We have to answer the question w/o grabbing any locks 433 * because we are (most likely) being called after we 434 * put ourselves on the sleep queue. 435 */ 436 437 if (t->t_dtrace_stop | t->t_dtrace_sig) 438 return (1); 439 440 /* 441 * Another piece of complexity in this process. When single-stepping a 442 * process, we don't want an intervening signal or TP_PAUSE request to 443 * suspend the current thread. Otherwise, the controlling process will 444 * hang beacuse we will be stopped with TS_PSTART set in t_schedflag. 445 * We will trigger any remaining signals when we re-enter the kernel on 446 * the single step trap. 447 */ 448 if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP) 449 return (0); 450 451 if ((lwp->lwp_asleep && MUSTRETURN(p, t)) || 452 (p->p_flag & (SEXITLWPS|SKILLED)) || 453 (lwp->lwp_nostop == 0 && 454 (p->p_stopsig | (p->p_flag & (SHOLDFORK1|SHOLDWATCH)) | 455 (t->t_proc_flag & 456 (TP_PRSTOP|TP_HOLDLWP|TP_CHKPT|TP_PAUSE)))) || 457 lwp->lwp_cursig) 458 return (1); 459 460 if (p->p_flag & SVFWAIT) 461 return (0); 462 set = p->p_sig; 463 sigorset(&set, &t->t_sig); 464 if (schedctl_sigblock(t)) /* all blockable signals blocked */ 465 sigandset(&set, &cantmask); 466 else 467 sigdiffset(&set, &t->t_hold); 468 if (p->p_flag & SVFORK) 469 sigdiffset(&set, &holdvfork); 470 471 if (!sigisempty(&set)) { 472 int sig; 473 474 for (sig = 1; sig < NSIG; sig++) { 475 if (sigismember(&set, sig) && 476 (tracing(p, sig) || 477 sigismember(&t->t_sigwait, sig) || 478 !sigismember(&p->p_ignore, sig))) { 479 /* 480 * Don't promote a signal that will stop 481 * the process when lwp_nostop is set. 482 */ 483 if (!lwp->lwp_nostop || 484 PTOU(p)->u_signal[sig-1] != SIG_DFL || 485 !sigismember(&stopdefault, sig)) 486 return (1); 487 } 488 } 489 } 490 491 return (0); 492 } 493 494 static int 495 issig_forreal(void) 496 { 497 int sig = 0, ext = 0; 498 kthread_t *t = curthread; 499 klwp_t *lwp = ttolwp(t); 500 proc_t *p = ttoproc(t); 501 int toproc = 0; 502 int sigcld_found = 0; 503 int nostop_break = 0; 504 505 ASSERT(t->t_state == TS_ONPROC); 506 507 mutex_enter(&p->p_lock); 508 schedctl_finish_sigblock(t); 509 510 if (t->t_dtrace_stop | t->t_dtrace_sig) { 511 if (t->t_dtrace_stop) { 512 /* 513 * If DTrace's "stop" action has been invoked on us, 514 * set TP_PRSTOP. 515 */ 516 t->t_proc_flag |= TP_PRSTOP; 517 } 518 519 if (t->t_dtrace_sig != 0) { 520 k_siginfo_t info; 521 522 /* 523 * Post the signal generated as the result of 524 * DTrace's "raise" action as a normal signal before 525 * the full-fledged signal checking begins. 526 */ 527 bzero(&info, sizeof (info)); 528 info.si_signo = t->t_dtrace_sig; 529 info.si_code = SI_DTRACE; 530 531 sigaddq(p, NULL, &info, KM_NOSLEEP); 532 533 t->t_dtrace_sig = 0; 534 } 535 } 536 537 for (;;) { 538 if (p->p_flag & (SEXITLWPS|SKILLED)) { 539 lwp->lwp_cursig = sig = SIGKILL; 540 lwp->lwp_extsig = ext = (p->p_flag & SEXTKILLED) != 0; 541 t->t_sig_check = 1; 542 break; 543 } 544 545 /* 546 * Another piece of complexity in this process. When 547 * single-stepping a process, we don't want an intervening 548 * signal or TP_PAUSE request to suspend the current thread. 549 * Otherwise, the controlling process will hang beacuse we will 550 * be stopped with TS_PSTART set in t_schedflag. We will 551 * trigger any remaining signals when we re-enter the kernel on 552 * the single step trap. 553 */ 554 if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP) { 555 sig = 0; 556 break; 557 } 558 559 /* 560 * Hold the lwp here for watchpoint manipulation. 561 */ 562 if ((t->t_proc_flag & TP_PAUSE) && !lwp->lwp_nostop) { 563 stop(PR_SUSPENDED, SUSPEND_PAUSE); 564 continue; 565 } 566 567 if (lwp->lwp_asleep && MUSTRETURN(p, t)) { 568 if ((sig = lwp->lwp_cursig) != 0) { 569 /* 570 * Make sure we call ISSIG() in post_syscall() 571 * to re-validate this current signal. 572 */ 573 t->t_sig_check = 1; 574 } 575 break; 576 } 577 578 /* 579 * If the request is PR_CHECKPOINT, ignore the rest of signals 580 * or requests. Honor other stop requests or signals later. 581 * Go back to top of loop here to check if an exit or hold 582 * event has occurred while stopped. 583 */ 584 if ((t->t_proc_flag & TP_CHKPT) && !lwp->lwp_nostop) { 585 stop(PR_CHECKPOINT, 0); 586 continue; 587 } 588 589 /* 590 * Honor SHOLDFORK1, SHOLDWATCH, and TP_HOLDLWP before dealing 591 * with signals or /proc. Another lwp is executing fork1(), 592 * or is undergoing watchpoint activity (remapping a page), 593 * or is executing lwp_suspend() on this lwp. 594 * Again, go back to top of loop to check if an exit 595 * or hold event has occurred while stopped. 596 */ 597 if (((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) || 598 (t->t_proc_flag & TP_HOLDLWP)) && !lwp->lwp_nostop) { 599 stop(PR_SUSPENDED, SUSPEND_NORMAL); 600 continue; 601 } 602 603 /* 604 * Honor requested stop before dealing with the 605 * current signal; a debugger may change it. 606 * Do not want to go back to loop here since this is a special 607 * stop that means: make incremental progress before the next 608 * stop. The danger is that returning to top of loop would most 609 * likely drop the thread right back here to stop soon after it 610 * was continued, violating the incremental progress request. 611 */ 612 if ((t->t_proc_flag & TP_PRSTOP) && !lwp->lwp_nostop) 613 stop(PR_REQUESTED, 0); 614 615 /* 616 * If a debugger wants us to take a signal it will have 617 * left it in lwp->lwp_cursig. If lwp_cursig has been cleared 618 * or if it's being ignored, we continue on looking for another 619 * signal. Otherwise we return the specified signal, provided 620 * it's not a signal that causes a job control stop. 621 * 622 * When stopped on PR_JOBCONTROL, there is no current 623 * signal; we cancel lwp->lwp_cursig temporarily before 624 * calling isjobstop(). The current signal may be reset 625 * by a debugger while we are stopped in isjobstop(). 626 * 627 * If the current thread is accepting the signal 628 * (via sigwait(), sigwaitinfo(), or sigtimedwait()), 629 * we allow the signal to be accepted, even if it is 630 * being ignored, and without causing a job control stop. 631 */ 632 if ((sig = lwp->lwp_cursig) != 0) { 633 ext = lwp->lwp_extsig; 634 lwp->lwp_cursig = 0; 635 lwp->lwp_extsig = 0; 636 if (sigismember(&t->t_sigwait, sig) || 637 (!sigismember(&p->p_ignore, sig) && 638 !isjobstop(sig))) { 639 if (p->p_flag & (SEXITLWPS|SKILLED)) { 640 sig = SIGKILL; 641 ext = (p->p_flag & SEXTKILLED) != 0; 642 } 643 lwp->lwp_cursig = (uchar_t)sig; 644 lwp->lwp_extsig = (uchar_t)ext; 645 break; 646 } 647 /* 648 * The signal is being ignored or it caused a 649 * job-control stop. If another current signal 650 * has not been established, return the current 651 * siginfo, if any, to the memory manager. 652 */ 653 if (lwp->lwp_cursig == 0 && lwp->lwp_curinfo != NULL) { 654 siginfofree(lwp->lwp_curinfo); 655 lwp->lwp_curinfo = NULL; 656 } 657 /* 658 * Loop around again in case we were stopped 659 * on a job control signal and a /proc stop 660 * request was posted or another current signal 661 * was established while we were stopped. 662 */ 663 continue; 664 } 665 666 if (p->p_stopsig && !lwp->lwp_nostop && 667 curthread != p->p_agenttp) { 668 /* 669 * Some lwp in the process has already stopped 670 * showing PR_JOBCONTROL. This is a stop in 671 * sympathy with the other lwp, even if this 672 * lwp is blocking the stopping signal. 673 */ 674 stop(PR_JOBCONTROL, p->p_stopsig); 675 continue; 676 } 677 678 /* 679 * Loop on the pending signals until we find a 680 * non-held signal that is traced or not ignored. 681 * First check the signals pending for the lwp, 682 * then the signals pending for the process as a whole. 683 */ 684 for (;;) { 685 if ((sig = fsig(&t->t_sig, t)) != 0) { 686 toproc = 0; 687 if (tracing(p, sig) || 688 sigismember(&t->t_sigwait, sig) || 689 !sigismember(&p->p_ignore, sig)) { 690 if (sigismember(&t->t_extsig, sig)) 691 ext = 1; 692 break; 693 } 694 sigdelset(&t->t_sig, sig); 695 sigdelset(&t->t_extsig, sig); 696 sigdelq(p, t, sig); 697 } else if ((sig = fsig(&p->p_sig, t)) != 0) { 698 if (sig == SIGCLD) 699 sigcld_found = 1; 700 toproc = 1; 701 if (tracing(p, sig) || 702 sigismember(&t->t_sigwait, sig) || 703 !sigismember(&p->p_ignore, sig)) { 704 if (sigismember(&p->p_extsig, sig)) 705 ext = 1; 706 break; 707 } 708 sigdelset(&p->p_sig, sig); 709 sigdelset(&p->p_extsig, sig); 710 sigdelq(p, NULL, sig); 711 } else { 712 /* no signal was found */ 713 break; 714 } 715 } 716 717 if (sig == 0) { /* no signal was found */ 718 if (p->p_flag & (SEXITLWPS|SKILLED)) { 719 lwp->lwp_cursig = SIGKILL; 720 sig = SIGKILL; 721 ext = (p->p_flag & SEXTKILLED) != 0; 722 } 723 break; 724 } 725 726 /* 727 * If we have been informed not to stop (i.e., we are being 728 * called from within a network operation), then don't promote 729 * the signal at this time, just return the signal number. 730 * We will call issig() again later when it is safe. 731 * 732 * fsig() does not return a jobcontrol stopping signal 733 * with a default action of stopping the process if 734 * lwp_nostop is set, so we won't be causing a bogus 735 * EINTR by this action. (Such a signal is eaten by 736 * isjobstop() when we loop around to do final checks.) 737 */ 738 if (lwp->lwp_nostop) { 739 nostop_break = 1; 740 break; 741 } 742 743 /* 744 * Promote the signal from pending to current. 745 * 746 * Note that sigdeq() will set lwp->lwp_curinfo to NULL 747 * if no siginfo_t exists for this signal. 748 */ 749 lwp->lwp_cursig = (uchar_t)sig; 750 lwp->lwp_extsig = (uchar_t)ext; 751 t->t_sig_check = 1; /* so post_syscall will see signal */ 752 ASSERT(lwp->lwp_curinfo == NULL); 753 sigdeq(p, toproc ? NULL : t, sig, &lwp->lwp_curinfo); 754 755 if (tracing(p, sig)) 756 stop(PR_SIGNALLED, sig); 757 758 /* 759 * Loop around to check for requested stop before 760 * performing the usual current-signal actions. 761 */ 762 } 763 764 mutex_exit(&p->p_lock); 765 766 /* 767 * If SIGCLD was dequeued from the process's signal queue, 768 * search for other pending SIGCLD's from the list of children. 769 */ 770 if (sigcld_found) 771 sigcld_repost(); 772 773 if (sig != 0) 774 (void) undo_watch_step(NULL); 775 776 /* 777 * If we have been blocked since the p_lock was dropped off 778 * above, then this promoted signal might have been handled 779 * already when we were on the way back from sleep queue, so 780 * just ignore it. 781 * If we have been informed not to stop, just return the signal 782 * number. Also see comments above. 783 */ 784 if (!nostop_break) { 785 sig = lwp->lwp_cursig; 786 } 787 788 return (sig != 0); 789 } 790 791 /* 792 * Return true if the process is currently stopped showing PR_JOBCONTROL. 793 * This is true only if all of the process's lwp's are so stopped. 794 * If this is asked by one of the lwps in the process, exclude that lwp. 795 */ 796 int 797 jobstopped(proc_t *p) 798 { 799 kthread_t *t; 800 801 ASSERT(MUTEX_HELD(&p->p_lock)); 802 803 if ((t = p->p_tlist) == NULL) 804 return (0); 805 806 do { 807 thread_lock(t); 808 /* ignore current, zombie and suspended lwps in the test */ 809 if (!(t == curthread || t->t_state == TS_ZOMB || 810 SUSPENDED(t)) && 811 (t->t_state != TS_STOPPED || 812 t->t_whystop != PR_JOBCONTROL)) { 813 thread_unlock(t); 814 return (0); 815 } 816 thread_unlock(t); 817 } while ((t = t->t_forw) != p->p_tlist); 818 819 return (1); 820 } 821 822 /* 823 * Put ourself (curthread) into the stopped state and notify tracers. 824 */ 825 void 826 stop(int why, int what) 827 { 828 kthread_t *t = curthread; 829 proc_t *p = ttoproc(t); 830 klwp_t *lwp = ttolwp(t); 831 kthread_t *tx; 832 lwpent_t *lep; 833 int procstop; 834 int flags = TS_ALLSTART; 835 hrtime_t stoptime; 836 837 /* 838 * Can't stop a system process. 839 */ 840 if (p == NULL || lwp == NULL || (p->p_flag & SSYS) || p->p_as == &kas) 841 return; 842 843 ASSERT(MUTEX_HELD(&p->p_lock)); 844 845 if (why != PR_SUSPENDED && why != PR_CHECKPOINT) { 846 /* 847 * Don't stop an lwp with SIGKILL pending. 848 * Don't stop if the process or lwp is exiting. 849 */ 850 if (lwp->lwp_cursig == SIGKILL || 851 sigismember(&t->t_sig, SIGKILL) || 852 sigismember(&p->p_sig, SIGKILL) || 853 (t->t_proc_flag & TP_LWPEXIT) || 854 (p->p_flag & (SEXITLWPS|SKILLED))) { 855 p->p_stopsig = 0; 856 t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP); 857 return; 858 } 859 } 860 861 /* 862 * Make sure we don't deadlock on a recursive call to prstop(). 863 * prstop() sets the lwp_nostop flag. 864 */ 865 if (lwp->lwp_nostop) 866 return; 867 868 /* 869 * Make sure the lwp is in an orderly state for inspection 870 * by a debugger through /proc or for dumping via core(). 871 */ 872 schedctl_finish_sigblock(t); 873 t->t_proc_flag |= TP_STOPPING; /* must set before dropping p_lock */ 874 mutex_exit(&p->p_lock); 875 stoptime = gethrtime(); 876 prstop(why, what); 877 (void) undo_watch_step(NULL); 878 mutex_enter(&p->p_lock); 879 ASSERT(t->t_state == TS_ONPROC); 880 881 switch (why) { 882 case PR_CHECKPOINT: 883 /* 884 * The situation may have changed since we dropped 885 * and reacquired p->p_lock. Double-check now 886 * whether we should stop or not. 887 */ 888 if (!(t->t_proc_flag & TP_CHKPT)) { 889 t->t_proc_flag &= ~TP_STOPPING; 890 return; 891 } 892 t->t_proc_flag &= ~TP_CHKPT; 893 flags &= ~TS_RESUME; 894 break; 895 896 case PR_JOBCONTROL: 897 ASSERT(what == SIGSTOP || what == SIGTSTP || 898 what == SIGTTIN || what == SIGTTOU); 899 flags &= ~TS_XSTART; 900 break; 901 902 case PR_SUSPENDED: 903 ASSERT(what == SUSPEND_NORMAL || what == SUSPEND_PAUSE); 904 /* 905 * The situation may have changed since we dropped 906 * and reacquired p->p_lock. Double-check now 907 * whether we should stop or not. 908 */ 909 if (what == SUSPEND_PAUSE) { 910 if (!(t->t_proc_flag & TP_PAUSE)) { 911 t->t_proc_flag &= ~TP_STOPPING; 912 return; 913 } 914 flags &= ~TS_UNPAUSE; 915 } else { 916 if (!((t->t_proc_flag & TP_HOLDLWP) || 917 (p->p_flag & (SHOLDFORK|SHOLDFORK1|SHOLDWATCH)))) { 918 t->t_proc_flag &= ~TP_STOPPING; 919 return; 920 } 921 /* 922 * If SHOLDFORK is in effect and we are stopping 923 * while asleep (not at the top of the stack), 924 * we return now to allow the hold to take effect 925 * when we reach the top of the kernel stack. 926 */ 927 if (lwp->lwp_asleep && (p->p_flag & SHOLDFORK)) { 928 t->t_proc_flag &= ~TP_STOPPING; 929 return; 930 } 931 flags &= ~TS_CSTART; 932 } 933 break; 934 935 default: /* /proc stop */ 936 flags &= ~TS_PSTART; 937 /* 938 * Do synchronous stop unless the async-stop flag is set. 939 * If why is PR_REQUESTED and t->t_dtrace_stop flag is set, 940 * then no debugger is present and we also do synchronous stop. 941 */ 942 if ((why != PR_REQUESTED || t->t_dtrace_stop) && 943 !(p->p_proc_flag & P_PR_ASYNC)) { 944 int notify; 945 946 for (tx = t->t_forw; tx != t; tx = tx->t_forw) { 947 notify = 0; 948 thread_lock(tx); 949 if (ISTOPPED(tx) || 950 (tx->t_proc_flag & TP_PRSTOP)) { 951 thread_unlock(tx); 952 continue; 953 } 954 tx->t_proc_flag |= TP_PRSTOP; 955 tx->t_sig_check = 1; 956 if (tx->t_state == TS_SLEEP && 957 (tx->t_flag & T_WAKEABLE)) { 958 /* 959 * Don't actually wake it up if it's 960 * in one of the lwp_*() syscalls. 961 * Mark it virtually stopped and 962 * notify /proc waiters (below). 963 */ 964 if (tx->t_wchan0 == NULL) 965 setrun_locked(tx); 966 else { 967 tx->t_proc_flag |= TP_PRVSTOP; 968 tx->t_stoptime = stoptime; 969 notify = 1; 970 } 971 } 972 973 /* Move waiting thread to run queue */ 974 if (ISWAITING(tx)) 975 setrun_locked(tx); 976 977 /* 978 * force the thread into the kernel 979 * if it is not already there. 980 */ 981 if (tx->t_state == TS_ONPROC && 982 tx->t_cpu != CPU) 983 poke_cpu(tx->t_cpu->cpu_id); 984 thread_unlock(tx); 985 lep = p->p_lwpdir[tx->t_dslot].ld_entry; 986 if (notify && lep->le_trace) 987 prnotify(lep->le_trace); 988 } 989 /* 990 * We do this just in case one of the threads we asked 991 * to stop is in holdlwps() (called from cfork()) or 992 * lwp_suspend(). 993 */ 994 cv_broadcast(&p->p_holdlwps); 995 } 996 break; 997 } 998 999 t->t_stoptime = stoptime; 1000 1001 if (why == PR_JOBCONTROL || (why == PR_SUSPENDED && p->p_stopsig)) { 1002 /* 1003 * Determine if the whole process is jobstopped. 1004 */ 1005 if (jobstopped(p)) { 1006 sigqueue_t *sqp; 1007 int sig; 1008 1009 if ((sig = p->p_stopsig) == 0) 1010 p->p_stopsig = (uchar_t)(sig = what); 1011 mutex_exit(&p->p_lock); 1012 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 1013 mutex_enter(&pidlock); 1014 /* 1015 * The last lwp to stop notifies the parent. 1016 * Turn off the CLDCONT flag now so the first 1017 * lwp to continue knows what to do. 1018 */ 1019 p->p_pidflag &= ~CLDCONT; 1020 p->p_wcode = CLD_STOPPED; 1021 p->p_wdata = sig; 1022 sigcld(p, sqp); 1023 /* 1024 * Grab p->p_lock before releasing pidlock so the 1025 * parent and the child don't have a race condition. 1026 */ 1027 mutex_enter(&p->p_lock); 1028 mutex_exit(&pidlock); 1029 p->p_stopsig = 0; 1030 } else if (why == PR_JOBCONTROL && p->p_stopsig == 0) { 1031 /* 1032 * Set p->p_stopsig and wake up sleeping lwps 1033 * so they will stop in sympathy with this lwp. 1034 */ 1035 p->p_stopsig = (uchar_t)what; 1036 pokelwps(p); 1037 /* 1038 * We do this just in case one of the threads we asked 1039 * to stop is in holdlwps() (called from cfork()) or 1040 * lwp_suspend(). 1041 */ 1042 cv_broadcast(&p->p_holdlwps); 1043 } 1044 } 1045 1046 if (why != PR_JOBCONTROL && why != PR_CHECKPOINT) { 1047 /* 1048 * Do process-level notification when all lwps are 1049 * either stopped on events of interest to /proc 1050 * or are stopped showing PR_SUSPENDED or are zombies. 1051 */ 1052 procstop = 1; 1053 for (tx = t->t_forw; procstop && tx != t; tx = tx->t_forw) { 1054 if (VSTOPPED(tx)) 1055 continue; 1056 thread_lock(tx); 1057 switch (tx->t_state) { 1058 case TS_ZOMB: 1059 break; 1060 case TS_STOPPED: 1061 /* neither ISTOPPED nor SUSPENDED? */ 1062 if ((tx->t_schedflag & 1063 (TS_CSTART | TS_UNPAUSE | TS_PSTART)) == 1064 (TS_CSTART | TS_UNPAUSE | TS_PSTART)) 1065 procstop = 0; 1066 break; 1067 case TS_SLEEP: 1068 /* not paused for watchpoints? */ 1069 if (!(tx->t_flag & T_WAKEABLE) || 1070 tx->t_wchan0 == NULL || 1071 !(tx->t_proc_flag & TP_PAUSE)) 1072 procstop = 0; 1073 break; 1074 default: 1075 procstop = 0; 1076 break; 1077 } 1078 thread_unlock(tx); 1079 } 1080 if (procstop) { 1081 /* there must not be any remapped watched pages now */ 1082 ASSERT(p->p_mapcnt == 0); 1083 if (p->p_proc_flag & P_PR_PTRACE) { 1084 /* ptrace() compatibility */ 1085 mutex_exit(&p->p_lock); 1086 mutex_enter(&pidlock); 1087 p->p_wcode = CLD_TRAPPED; 1088 p->p_wdata = (why == PR_SIGNALLED)? 1089 what : SIGTRAP; 1090 cv_broadcast(&p->p_parent->p_cv); 1091 /* 1092 * Grab p->p_lock before releasing pidlock so 1093 * parent and child don't have a race condition. 1094 */ 1095 mutex_enter(&p->p_lock); 1096 mutex_exit(&pidlock); 1097 } 1098 if (p->p_trace) /* /proc */ 1099 prnotify(p->p_trace); 1100 cv_broadcast(&pr_pid_cv[p->p_slot]); /* pauselwps() */ 1101 cv_broadcast(&p->p_holdlwps); /* holdwatch() */ 1102 } 1103 if (why != PR_SUSPENDED) { 1104 lep = p->p_lwpdir[t->t_dslot].ld_entry; 1105 if (lep->le_trace) /* /proc */ 1106 prnotify(lep->le_trace); 1107 /* 1108 * Special notification for creation of the agent lwp. 1109 */ 1110 if (t == p->p_agenttp && 1111 (t->t_proc_flag & TP_PRSTOP) && 1112 p->p_trace) 1113 prnotify(p->p_trace); 1114 /* 1115 * The situation may have changed since we dropped 1116 * and reacquired p->p_lock. Double-check now 1117 * whether we should stop or not. 1118 */ 1119 if (!(t->t_proc_flag & TP_STOPPING)) { 1120 if (t->t_proc_flag & TP_PRSTOP) 1121 t->t_proc_flag |= TP_STOPPING; 1122 } 1123 t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP); 1124 prnostep(lwp); 1125 } 1126 } 1127 1128 if (why == PR_SUSPENDED) { 1129 1130 /* 1131 * We always broadcast in the case of SUSPEND_PAUSE. This is 1132 * because checks for TP_PAUSE take precedence over checks for 1133 * SHOLDWATCH. If a thread is trying to stop because of 1134 * SUSPEND_PAUSE and tries to do a holdwatch(), it will be 1135 * waiting for the rest of the threads to enter a stopped state. 1136 * If we are stopping for a SUSPEND_PAUSE, we may be the last 1137 * lwp and not know it, so broadcast just in case. 1138 */ 1139 if (what == SUSPEND_PAUSE || 1140 --p->p_lwprcnt == 0 || (t->t_proc_flag & TP_HOLDLWP)) 1141 cv_broadcast(&p->p_holdlwps); 1142 1143 } 1144 1145 /* 1146 * Need to do this here (rather than after the thread is officially 1147 * stopped) because we can't call mutex_enter from a stopped thread. 1148 */ 1149 if (why == PR_CHECKPOINT) 1150 del_one_utstop(); 1151 1152 thread_lock(t); 1153 ASSERT((t->t_schedflag & TS_ALLSTART) == 0); 1154 t->t_schedflag |= flags; 1155 t->t_whystop = (short)why; 1156 t->t_whatstop = (short)what; 1157 CL_STOP(t, why, what); 1158 (void) new_mstate(t, LMS_STOPPED); 1159 thread_stop(t); /* set stop state and drop lock */ 1160 1161 if (why != PR_SUSPENDED && why != PR_CHECKPOINT) { 1162 /* 1163 * We may have gotten a SIGKILL or a SIGCONT when 1164 * we released p->p_lock; make one last check. 1165 * Also check for a /proc run-on-last-close. 1166 */ 1167 if (sigismember(&t->t_sig, SIGKILL) || 1168 sigismember(&p->p_sig, SIGKILL) || 1169 (t->t_proc_flag & TP_LWPEXIT) || 1170 (p->p_flag & (SEXITLWPS|SKILLED))) { 1171 p->p_stopsig = 0; 1172 thread_lock(t); 1173 t->t_schedflag |= TS_XSTART | TS_PSTART; 1174 setrun_locked(t); 1175 thread_unlock_nopreempt(t); 1176 } else if (why == PR_JOBCONTROL) { 1177 if (p->p_flag & SSCONT) { 1178 /* 1179 * This resulted from a SIGCONT posted 1180 * while we were not holding p->p_lock. 1181 */ 1182 p->p_stopsig = 0; 1183 thread_lock(t); 1184 t->t_schedflag |= TS_XSTART; 1185 setrun_locked(t); 1186 thread_unlock_nopreempt(t); 1187 } 1188 } else if (!(t->t_proc_flag & TP_STOPPING)) { 1189 /* 1190 * This resulted from a /proc run-on-last-close. 1191 */ 1192 thread_lock(t); 1193 t->t_schedflag |= TS_PSTART; 1194 setrun_locked(t); 1195 thread_unlock_nopreempt(t); 1196 } 1197 } 1198 1199 t->t_proc_flag &= ~TP_STOPPING; 1200 mutex_exit(&p->p_lock); 1201 1202 swtch(); 1203 setallwatch(); /* reestablish any watchpoints set while stopped */ 1204 mutex_enter(&p->p_lock); 1205 prbarrier(p); /* barrier against /proc locking */ 1206 } 1207 1208 /* Interface for resetting user thread stop count. */ 1209 void 1210 utstop_init(void) 1211 { 1212 mutex_enter(&thread_stop_lock); 1213 num_utstop = 0; 1214 mutex_exit(&thread_stop_lock); 1215 } 1216 1217 /* Interface for registering a user thread stop request. */ 1218 void 1219 add_one_utstop(void) 1220 { 1221 mutex_enter(&thread_stop_lock); 1222 num_utstop++; 1223 mutex_exit(&thread_stop_lock); 1224 } 1225 1226 /* Interface for cancelling a user thread stop request */ 1227 void 1228 del_one_utstop(void) 1229 { 1230 mutex_enter(&thread_stop_lock); 1231 num_utstop--; 1232 if (num_utstop == 0) 1233 cv_broadcast(&utstop_cv); 1234 mutex_exit(&thread_stop_lock); 1235 } 1236 1237 /* Interface to wait for all user threads to be stopped */ 1238 void 1239 utstop_timedwait(clock_t ticks) 1240 { 1241 mutex_enter(&thread_stop_lock); 1242 if (num_utstop > 0) 1243 (void) cv_reltimedwait(&utstop_cv, &thread_stop_lock, ticks, 1244 TR_CLOCK_TICK); 1245 mutex_exit(&thread_stop_lock); 1246 } 1247 1248 /* 1249 * Perform the action specified by the current signal. 1250 * The usual sequence is: 1251 * if (issig()) 1252 * psig(); 1253 * The signal bit has already been cleared by issig(), 1254 * the current signal number has been stored in lwp_cursig, 1255 * and the current siginfo is now referenced by lwp_curinfo. 1256 */ 1257 void 1258 psig(void) 1259 { 1260 kthread_t *t = curthread; 1261 proc_t *p = ttoproc(t); 1262 klwp_t *lwp = ttolwp(t); 1263 void (*func)(); 1264 int sig, rc, code, ext; 1265 pid_t pid = -1; 1266 id_t ctid = 0; 1267 zoneid_t zoneid = -1; 1268 sigqueue_t *sqp = NULL; 1269 1270 mutex_enter(&p->p_lock); 1271 schedctl_finish_sigblock(t); 1272 code = CLD_KILLED; 1273 1274 if (p->p_flag & SEXITLWPS) { 1275 lwp_exit(); 1276 return; /* not reached */ 1277 } 1278 sig = lwp->lwp_cursig; 1279 ext = lwp->lwp_extsig; 1280 1281 ASSERT(sig < NSIG); 1282 1283 /* 1284 * Re-check lwp_cursig after we acquire p_lock. Since p_lock was 1285 * dropped between issig() and psig(), a debugger may have cleared 1286 * lwp_cursig via /proc in the intervening window. 1287 */ 1288 if (sig == 0) { 1289 if (lwp->lwp_curinfo) { 1290 siginfofree(lwp->lwp_curinfo); 1291 lwp->lwp_curinfo = NULL; 1292 } 1293 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */ 1294 t->t_flag &= ~T_TOMASK; 1295 t->t_hold = lwp->lwp_sigoldmask; 1296 } 1297 mutex_exit(&p->p_lock); 1298 return; 1299 } 1300 func = PTOU(curproc)->u_signal[sig-1]; 1301 1302 /* 1303 * The signal disposition could have changed since we promoted 1304 * this signal from pending to current (we dropped p->p_lock). 1305 * This can happen only in a multi-threaded process. 1306 */ 1307 if (sigismember(&p->p_ignore, sig) || 1308 (func == SIG_DFL && sigismember(&stopdefault, sig))) { 1309 lwp->lwp_cursig = 0; 1310 lwp->lwp_extsig = 0; 1311 if (lwp->lwp_curinfo) { 1312 siginfofree(lwp->lwp_curinfo); 1313 lwp->lwp_curinfo = NULL; 1314 } 1315 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */ 1316 t->t_flag &= ~T_TOMASK; 1317 t->t_hold = lwp->lwp_sigoldmask; 1318 } 1319 mutex_exit(&p->p_lock); 1320 return; 1321 } 1322 1323 /* 1324 * We check lwp_curinfo first since pr_setsig can actually 1325 * stuff a sigqueue_t there for SIGKILL. 1326 */ 1327 if (lwp->lwp_curinfo) { 1328 sqp = lwp->lwp_curinfo; 1329 } else if (sig == SIGKILL && p->p_killsqp) { 1330 sqp = p->p_killsqp; 1331 } 1332 1333 if (sqp != NULL) { 1334 if (SI_FROMUSER(&sqp->sq_info)) { 1335 pid = sqp->sq_info.si_pid; 1336 ctid = sqp->sq_info.si_ctid; 1337 zoneid = sqp->sq_info.si_zoneid; 1338 } 1339 /* 1340 * If we have a sigqueue_t, its sq_external value 1341 * trumps the lwp_extsig value. It is theoretically 1342 * possible to make lwp_extsig reflect reality, but it 1343 * would unnecessarily complicate things elsewhere. 1344 */ 1345 ext = sqp->sq_external; 1346 } 1347 1348 if (func == SIG_DFL) { 1349 mutex_exit(&p->p_lock); 1350 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *, 1351 NULL, void (*)(void), func); 1352 } else { 1353 k_siginfo_t *sip = NULL; 1354 1355 /* 1356 * If DTrace user-land tracing is active, give DTrace a 1357 * chance to defer the signal until after tracing is 1358 * complete. 1359 */ 1360 if (t->t_dtrace_on && dtrace_safe_defer_signal()) { 1361 mutex_exit(&p->p_lock); 1362 return; 1363 } 1364 1365 /* 1366 * save siginfo pointer here, in case the 1367 * the signal's reset bit is on 1368 * 1369 * The presence of a current signal prevents paging 1370 * from succeeding over a network. We copy the current 1371 * signal information to the side and cancel the current 1372 * signal so that sendsig() will succeed. 1373 */ 1374 if (sigismember(&p->p_siginfo, sig)) { 1375 sip = &lwp->lwp_siginfo; 1376 if (sqp) { 1377 bcopy(&sqp->sq_info, sip, sizeof (*sip)); 1378 /* 1379 * If we were interrupted out of a system call 1380 * due to pthread_cancel(), inform libc. 1381 */ 1382 if (sig == SIGCANCEL && 1383 sip->si_code == SI_LWP && 1384 t->t_sysnum != 0) 1385 schedctl_cancel_eintr(); 1386 } else if (sig == SIGPROF && sip->si_signo == SIGPROF && 1387 t->t_rprof != NULL && t->t_rprof->rp_anystate) { 1388 /* EMPTY */; 1389 } else { 1390 bzero(sip, sizeof (*sip)); 1391 sip->si_signo = sig; 1392 sip->si_code = SI_NOINFO; 1393 } 1394 } 1395 1396 if (t->t_flag & T_TOMASK) 1397 t->t_flag &= ~T_TOMASK; 1398 else 1399 lwp->lwp_sigoldmask = t->t_hold; 1400 sigorset(&t->t_hold, &PTOU(curproc)->u_sigmask[sig-1]); 1401 if (!sigismember(&PTOU(curproc)->u_signodefer, sig)) 1402 sigaddset(&t->t_hold, sig); 1403 if (sigismember(&PTOU(curproc)->u_sigresethand, sig)) 1404 setsigact(sig, SIG_DFL, nullsmask, 0); 1405 1406 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *, 1407 sip, void (*)(void), func); 1408 1409 lwp->lwp_cursig = 0; 1410 lwp->lwp_extsig = 0; 1411 if (lwp->lwp_curinfo) { 1412 /* p->p_killsqp is freed by freeproc */ 1413 siginfofree(lwp->lwp_curinfo); 1414 lwp->lwp_curinfo = NULL; 1415 } 1416 mutex_exit(&p->p_lock); 1417 lwp->lwp_ru.nsignals++; 1418 1419 if (p->p_model == DATAMODEL_NATIVE) 1420 rc = sendsig(sig, sip, func); 1421 #ifdef _SYSCALL32_IMPL 1422 else 1423 rc = sendsig32(sig, sip, func); 1424 #endif /* _SYSCALL32_IMPL */ 1425 if (rc) 1426 return; 1427 sig = lwp->lwp_cursig = SIGSEGV; 1428 ext = 0; /* lwp_extsig was set above */ 1429 pid = -1; 1430 ctid = 0; 1431 } 1432 1433 if (sigismember(&coredefault, sig)) { 1434 /* 1435 * Terminate all LWPs but don't discard them. 1436 * If another lwp beat us to the punch by calling exit(), 1437 * evaporate now. 1438 */ 1439 proc_is_exiting(p); 1440 if (exitlwps(1) != 0) { 1441 mutex_enter(&p->p_lock); 1442 lwp_exit(); 1443 } 1444 /* if we got a SIGKILL from anywhere, no core dump */ 1445 if (p->p_flag & SKILLED) { 1446 sig = SIGKILL; 1447 ext = (p->p_flag & SEXTKILLED) != 0; 1448 } else { 1449 if (audit_active) /* audit core dump */ 1450 audit_core_start(sig); 1451 if (core(sig, ext) == 0) 1452 code = CLD_DUMPED; 1453 if (audit_active) /* audit core dump */ 1454 audit_core_finish(code); 1455 } 1456 } 1457 1458 /* 1459 * Generate a contract event once if the process is killed 1460 * by a signal. 1461 */ 1462 if (ext) { 1463 proc_is_exiting(p); 1464 if (exitlwps(0) != 0) { 1465 mutex_enter(&p->p_lock); 1466 lwp_exit(); 1467 } 1468 contract_process_sig(p->p_ct_process, p, sig, pid, ctid, 1469 zoneid); 1470 } 1471 1472 exit(code, sig); 1473 } 1474 1475 /* 1476 * Find next unheld signal in ssp for thread t. 1477 */ 1478 int 1479 fsig(k_sigset_t *ssp, kthread_t *t) 1480 { 1481 proc_t *p = ttoproc(t); 1482 user_t *up = PTOU(p); 1483 int i; 1484 k_sigset_t temp; 1485 1486 ASSERT(MUTEX_HELD(&p->p_lock)); 1487 1488 /* 1489 * Don't promote any signals for the parent of a vfork()d 1490 * child that hasn't yet released the parent's memory. 1491 */ 1492 if (p->p_flag & SVFWAIT) 1493 return (0); 1494 1495 temp = *ssp; 1496 sigdiffset(&temp, &t->t_hold); 1497 1498 /* 1499 * Don't promote stopping signals (except SIGSTOP) for a child 1500 * of vfork() that hasn't yet released the parent's memory. 1501 */ 1502 if (p->p_flag & SVFORK) 1503 sigdiffset(&temp, &holdvfork); 1504 1505 /* 1506 * Don't promote a signal that will stop 1507 * the process when lwp_nostop is set. 1508 */ 1509 if (ttolwp(t)->lwp_nostop) { 1510 sigdelset(&temp, SIGSTOP); 1511 if (!p->p_pgidp->pid_pgorphaned) { 1512 if (up->u_signal[SIGTSTP-1] == SIG_DFL) 1513 sigdelset(&temp, SIGTSTP); 1514 if (up->u_signal[SIGTTIN-1] == SIG_DFL) 1515 sigdelset(&temp, SIGTTIN); 1516 if (up->u_signal[SIGTTOU-1] == SIG_DFL) 1517 sigdelset(&temp, SIGTTOU); 1518 } 1519 } 1520 1521 /* 1522 * Choose SIGKILL and SIGPROF before all other pending signals. 1523 * The rest are promoted in signal number order. 1524 */ 1525 if (sigismember(&temp, SIGKILL)) 1526 return (SIGKILL); 1527 if (sigismember(&temp, SIGPROF)) 1528 return (SIGPROF); 1529 1530 for (i = 0; i < sizeof (temp) / sizeof (temp.__sigbits[0]); i++) { 1531 if (temp.__sigbits[i]) 1532 return ((i * NBBY * sizeof (temp.__sigbits[0])) + 1533 lowbit(temp.__sigbits[i])); 1534 } 1535 1536 return (0); 1537 } 1538 1539 void 1540 setsigact(int sig, void (*disp)(), k_sigset_t mask, int flags) 1541 { 1542 proc_t *p = ttoproc(curthread); 1543 kthread_t *t; 1544 1545 ASSERT(MUTEX_HELD(&p->p_lock)); 1546 1547 PTOU(curproc)->u_signal[sig - 1] = disp; 1548 1549 /* 1550 * Honor the SA_SIGINFO flag if the signal is being caught. 1551 * Force the SA_SIGINFO flag if the signal is not being caught. 1552 * This is necessary to make sigqueue() and sigwaitinfo() work 1553 * properly together when the signal is set to default or is 1554 * being temporarily ignored. 1555 */ 1556 if ((flags & SA_SIGINFO) || disp == SIG_DFL || disp == SIG_IGN) 1557 sigaddset(&p->p_siginfo, sig); 1558 else 1559 sigdelset(&p->p_siginfo, sig); 1560 1561 if (disp != SIG_DFL && disp != SIG_IGN) { 1562 sigdelset(&p->p_ignore, sig); 1563 PTOU(curproc)->u_sigmask[sig - 1] = mask; 1564 if (!sigismember(&cantreset, sig)) { 1565 if (flags & SA_RESETHAND) 1566 sigaddset(&PTOU(curproc)->u_sigresethand, sig); 1567 else 1568 sigdelset(&PTOU(curproc)->u_sigresethand, sig); 1569 } 1570 if (flags & SA_NODEFER) 1571 sigaddset(&PTOU(curproc)->u_signodefer, sig); 1572 else 1573 sigdelset(&PTOU(curproc)->u_signodefer, sig); 1574 if (flags & SA_RESTART) 1575 sigaddset(&PTOU(curproc)->u_sigrestart, sig); 1576 else 1577 sigdelset(&PTOU(curproc)->u_sigrestart, sig); 1578 if (flags & SA_ONSTACK) 1579 sigaddset(&PTOU(curproc)->u_sigonstack, sig); 1580 else 1581 sigdelset(&PTOU(curproc)->u_sigonstack, sig); 1582 } else if (disp == SIG_IGN || 1583 (disp == SIG_DFL && sigismember(&ignoredefault, sig))) { 1584 /* 1585 * Setting the signal action to SIG_IGN results in the 1586 * discarding of all pending signals of that signal number. 1587 * Setting the signal action to SIG_DFL does the same *only* 1588 * if the signal's default behavior is to be ignored. 1589 */ 1590 sigaddset(&p->p_ignore, sig); 1591 sigdelset(&p->p_sig, sig); 1592 sigdelset(&p->p_extsig, sig); 1593 sigdelq(p, NULL, sig); 1594 t = p->p_tlist; 1595 do { 1596 sigdelset(&t->t_sig, sig); 1597 sigdelset(&t->t_extsig, sig); 1598 sigdelq(p, t, sig); 1599 } while ((t = t->t_forw) != p->p_tlist); 1600 } else { 1601 /* 1602 * The signal action is being set to SIG_DFL and the default 1603 * behavior is to do something: make sure it is not ignored. 1604 */ 1605 sigdelset(&p->p_ignore, sig); 1606 } 1607 1608 if (sig == SIGCLD) { 1609 if (flags & SA_NOCLDWAIT) 1610 p->p_flag |= SNOWAIT; 1611 else 1612 p->p_flag &= ~SNOWAIT; 1613 1614 if (flags & SA_NOCLDSTOP) 1615 p->p_flag &= ~SJCTL; 1616 else 1617 p->p_flag |= SJCTL; 1618 1619 if ((p->p_flag & SNOWAIT) || disp == SIG_IGN) { 1620 proc_t *cp, *tp; 1621 1622 mutex_exit(&p->p_lock); 1623 mutex_enter(&pidlock); 1624 for (cp = p->p_child; cp != NULL; cp = tp) { 1625 tp = cp->p_sibling; 1626 if (cp->p_stat == SZOMB && 1627 !(cp->p_pidflag & CLDWAITPID)) 1628 freeproc(cp); 1629 } 1630 mutex_exit(&pidlock); 1631 mutex_enter(&p->p_lock); 1632 } 1633 } 1634 } 1635 1636 /* 1637 * Set all signal actions not already set to SIG_DFL or SIG_IGN to SIG_DFL. 1638 * Called from exec_common() for a process undergoing execve() 1639 * and from cfork() for a newly-created child of vfork(). 1640 * In the vfork() case, 'p' is not the current process. 1641 * In both cases, there is only one thread in the process. 1642 */ 1643 void 1644 sigdefault(proc_t *p) 1645 { 1646 kthread_t *t = p->p_tlist; 1647 struct user *up = PTOU(p); 1648 int sig; 1649 1650 ASSERT(MUTEX_HELD(&p->p_lock)); 1651 1652 for (sig = 1; sig < NSIG; sig++) { 1653 if (up->u_signal[sig - 1] != SIG_DFL && 1654 up->u_signal[sig - 1] != SIG_IGN) { 1655 up->u_signal[sig - 1] = SIG_DFL; 1656 sigemptyset(&up->u_sigmask[sig - 1]); 1657 if (sigismember(&ignoredefault, sig)) { 1658 sigdelq(p, NULL, sig); 1659 sigdelq(p, t, sig); 1660 } 1661 if (sig == SIGCLD) 1662 p->p_flag &= ~(SNOWAIT|SJCTL); 1663 } 1664 } 1665 sigorset(&p->p_ignore, &ignoredefault); 1666 sigfillset(&p->p_siginfo); 1667 sigdiffset(&p->p_siginfo, &cantmask); 1668 sigdiffset(&p->p_sig, &ignoredefault); 1669 sigdiffset(&p->p_extsig, &ignoredefault); 1670 sigdiffset(&t->t_sig, &ignoredefault); 1671 sigdiffset(&t->t_extsig, &ignoredefault); 1672 } 1673 1674 void 1675 sigcld(proc_t *cp, sigqueue_t *sqp) 1676 { 1677 proc_t *pp = cp->p_parent; 1678 1679 ASSERT(MUTEX_HELD(&pidlock)); 1680 1681 switch (cp->p_wcode) { 1682 case CLD_EXITED: 1683 case CLD_DUMPED: 1684 case CLD_KILLED: 1685 ASSERT(cp->p_stat == SZOMB); 1686 /* 1687 * The broadcast on p_srwchan_cv is a kludge to 1688 * wakeup a possible thread in uadmin(A_SHUTDOWN). 1689 */ 1690 cv_broadcast(&cp->p_srwchan_cv); 1691 1692 /* 1693 * Add to newstate list of the parent 1694 */ 1695 add_ns(pp, cp); 1696 1697 cv_broadcast(&pp->p_cv); 1698 if ((pp->p_flag & SNOWAIT) || 1699 PTOU(pp)->u_signal[SIGCLD - 1] == SIG_IGN) { 1700 if (!(cp->p_pidflag & CLDWAITPID)) 1701 freeproc(cp); 1702 } else if (!(cp->p_pidflag & CLDNOSIGCHLD)) { 1703 post_sigcld(cp, sqp); 1704 sqp = NULL; 1705 } 1706 break; 1707 1708 case CLD_STOPPED: 1709 case CLD_CONTINUED: 1710 cv_broadcast(&pp->p_cv); 1711 if (pp->p_flag & SJCTL) { 1712 post_sigcld(cp, sqp); 1713 sqp = NULL; 1714 } 1715 break; 1716 } 1717 1718 if (sqp) 1719 siginfofree(sqp); 1720 } 1721 1722 /* 1723 * Common code called from sigcld() and from 1724 * waitid() and issig_forreal() via sigcld_repost(). 1725 * Give the parent process a SIGCLD if it does not have one pending, 1726 * else mark the child process so a SIGCLD can be posted later. 1727 */ 1728 static void 1729 post_sigcld(proc_t *cp, sigqueue_t *sqp) 1730 { 1731 proc_t *pp = cp->p_parent; 1732 k_siginfo_t info; 1733 1734 ASSERT(MUTEX_HELD(&pidlock)); 1735 mutex_enter(&pp->p_lock); 1736 1737 /* 1738 * If a SIGCLD is pending, then just mark the child process 1739 * so that its SIGCLD will be posted later, when the first 1740 * SIGCLD is taken off the queue or when the parent is ready 1741 * to receive it or accept it, if ever. 1742 */ 1743 if (sigismember(&pp->p_sig, SIGCLD)) { 1744 cp->p_pidflag |= CLDPEND; 1745 } else { 1746 cp->p_pidflag &= ~CLDPEND; 1747 if (sqp == NULL) { 1748 /* 1749 * This can only happen when the parent is init. 1750 * (See call to sigcld(q, NULL) in exit().) 1751 * Use KM_NOSLEEP to avoid deadlock. 1752 */ 1753 ASSERT(pp == proc_init); 1754 winfo(cp, &info, 0); 1755 sigaddq(pp, NULL, &info, KM_NOSLEEP); 1756 } else { 1757 winfo(cp, &sqp->sq_info, 0); 1758 sigaddqa(pp, NULL, sqp); 1759 sqp = NULL; 1760 } 1761 } 1762 1763 mutex_exit(&pp->p_lock); 1764 1765 if (sqp) 1766 siginfofree(sqp); 1767 } 1768 1769 /* 1770 * Search for a child that has a pending SIGCLD for us, the parent. 1771 * The queue of SIGCLD signals is implied by the list of children. 1772 * We post the SIGCLD signals one at a time so they don't get lost. 1773 * When one is dequeued, another is enqueued, until there are no more. 1774 */ 1775 void 1776 sigcld_repost() 1777 { 1778 proc_t *pp = curproc; 1779 proc_t *cp; 1780 sigqueue_t *sqp; 1781 1782 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 1783 mutex_enter(&pidlock); 1784 for (cp = pp->p_child; cp; cp = cp->p_sibling) { 1785 if (cp->p_pidflag & CLDPEND) { 1786 post_sigcld(cp, sqp); 1787 mutex_exit(&pidlock); 1788 return; 1789 } 1790 } 1791 mutex_exit(&pidlock); 1792 kmem_free(sqp, sizeof (sigqueue_t)); 1793 } 1794 1795 /* 1796 * count number of sigqueue send by sigaddqa() 1797 */ 1798 void 1799 sigqsend(int cmd, proc_t *p, kthread_t *t, sigqueue_t *sigqp) 1800 { 1801 sigqhdr_t *sqh; 1802 1803 sqh = (sigqhdr_t *)sigqp->sq_backptr; 1804 ASSERT(sqh); 1805 1806 mutex_enter(&sqh->sqb_lock); 1807 sqh->sqb_sent++; 1808 mutex_exit(&sqh->sqb_lock); 1809 1810 if (cmd == SN_SEND) 1811 sigaddqa(p, t, sigqp); 1812 else 1813 siginfofree(sigqp); 1814 } 1815 1816 int 1817 sigsendproc(proc_t *p, sigsend_t *pv) 1818 { 1819 struct cred *cr; 1820 proc_t *myprocp = curproc; 1821 1822 ASSERT(MUTEX_HELD(&pidlock)); 1823 1824 if (p->p_pid == 1 && pv->sig && sigismember(&cantmask, pv->sig)) 1825 return (EPERM); 1826 1827 cr = CRED(); 1828 1829 if (pv->checkperm == 0 || 1830 (pv->sig == SIGCONT && p->p_sessp == myprocp->p_sessp) || 1831 prochasprocperm(p, myprocp, cr)) { 1832 pv->perm++; 1833 if (pv->sig) { 1834 /* Make sure we should be setting si_pid and friends */ 1835 ASSERT(pv->sicode <= 0); 1836 if (SI_CANQUEUE(pv->sicode)) { 1837 sigqueue_t *sqp; 1838 1839 mutex_enter(&myprocp->p_lock); 1840 sqp = sigqalloc(myprocp->p_sigqhdr); 1841 mutex_exit(&myprocp->p_lock); 1842 if (sqp == NULL) 1843 return (EAGAIN); 1844 sqp->sq_info.si_signo = pv->sig; 1845 sqp->sq_info.si_code = pv->sicode; 1846 sqp->sq_info.si_pid = myprocp->p_pid; 1847 sqp->sq_info.si_ctid = PRCTID(myprocp); 1848 sqp->sq_info.si_zoneid = getzoneid(); 1849 sqp->sq_info.si_uid = crgetruid(cr); 1850 sqp->sq_info.si_value = pv->value; 1851 mutex_enter(&p->p_lock); 1852 sigqsend(SN_SEND, p, NULL, sqp); 1853 mutex_exit(&p->p_lock); 1854 } else { 1855 k_siginfo_t info; 1856 bzero(&info, sizeof (info)); 1857 info.si_signo = pv->sig; 1858 info.si_code = pv->sicode; 1859 info.si_pid = myprocp->p_pid; 1860 info.si_ctid = PRCTID(myprocp); 1861 info.si_zoneid = getzoneid(); 1862 info.si_uid = crgetruid(cr); 1863 mutex_enter(&p->p_lock); 1864 /* 1865 * XXX: Should be KM_SLEEP but 1866 * we have to avoid deadlock. 1867 */ 1868 sigaddq(p, NULL, &info, KM_NOSLEEP); 1869 mutex_exit(&p->p_lock); 1870 } 1871 } 1872 } 1873 1874 return (0); 1875 } 1876 1877 int 1878 sigsendset(procset_t *psp, sigsend_t *pv) 1879 { 1880 int error; 1881 1882 error = dotoprocs(psp, sigsendproc, (char *)pv); 1883 if (error == 0 && pv->perm == 0) 1884 return (EPERM); 1885 1886 return (error); 1887 } 1888 1889 /* 1890 * Dequeue a queued siginfo structure. 1891 * If a non-null thread pointer is passed then dequeue from 1892 * the thread queue, otherwise dequeue from the process queue. 1893 */ 1894 void 1895 sigdeq(proc_t *p, kthread_t *t, int sig, sigqueue_t **qpp) 1896 { 1897 sigqueue_t **psqp, *sqp; 1898 1899 ASSERT(MUTEX_HELD(&p->p_lock)); 1900 1901 *qpp = NULL; 1902 1903 if (t != NULL) { 1904 sigdelset(&t->t_sig, sig); 1905 sigdelset(&t->t_extsig, sig); 1906 psqp = &t->t_sigqueue; 1907 } else { 1908 sigdelset(&p->p_sig, sig); 1909 sigdelset(&p->p_extsig, sig); 1910 psqp = &p->p_sigqueue; 1911 } 1912 1913 for (;;) { 1914 if ((sqp = *psqp) == NULL) 1915 return; 1916 if (sqp->sq_info.si_signo == sig) 1917 break; 1918 else 1919 psqp = &sqp->sq_next; 1920 } 1921 *qpp = sqp; 1922 *psqp = sqp->sq_next; 1923 for (sqp = *psqp; sqp; sqp = sqp->sq_next) { 1924 if (sqp->sq_info.si_signo == sig) { 1925 if (t != (kthread_t *)NULL) { 1926 sigaddset(&t->t_sig, sig); 1927 t->t_sig_check = 1; 1928 } else { 1929 sigaddset(&p->p_sig, sig); 1930 set_proc_ast(p); 1931 } 1932 break; 1933 } 1934 } 1935 } 1936 1937 /* 1938 * Delete a queued SIGCLD siginfo structure matching the k_siginfo_t argument. 1939 */ 1940 void 1941 sigcld_delete(k_siginfo_t *ip) 1942 { 1943 proc_t *p = curproc; 1944 int another_sigcld = 0; 1945 sigqueue_t **psqp, *sqp; 1946 1947 ASSERT(ip->si_signo == SIGCLD); 1948 1949 mutex_enter(&p->p_lock); 1950 1951 if (!sigismember(&p->p_sig, SIGCLD)) { 1952 mutex_exit(&p->p_lock); 1953 return; 1954 } 1955 1956 psqp = &p->p_sigqueue; 1957 for (;;) { 1958 if ((sqp = *psqp) == NULL) { 1959 mutex_exit(&p->p_lock); 1960 return; 1961 } 1962 if (sqp->sq_info.si_signo == SIGCLD) { 1963 if (sqp->sq_info.si_pid == ip->si_pid && 1964 sqp->sq_info.si_code == ip->si_code && 1965 sqp->sq_info.si_status == ip->si_status) 1966 break; 1967 another_sigcld = 1; 1968 } 1969 psqp = &sqp->sq_next; 1970 } 1971 *psqp = sqp->sq_next; 1972 1973 siginfofree(sqp); 1974 1975 for (sqp = *psqp; !another_sigcld && sqp; sqp = sqp->sq_next) { 1976 if (sqp->sq_info.si_signo == SIGCLD) 1977 another_sigcld = 1; 1978 } 1979 1980 if (!another_sigcld) { 1981 sigdelset(&p->p_sig, SIGCLD); 1982 sigdelset(&p->p_extsig, SIGCLD); 1983 } 1984 1985 mutex_exit(&p->p_lock); 1986 } 1987 1988 /* 1989 * Delete queued siginfo structures. 1990 * If a non-null thread pointer is passed then delete from 1991 * the thread queue, otherwise delete from the process queue. 1992 */ 1993 void 1994 sigdelq(proc_t *p, kthread_t *t, int sig) 1995 { 1996 sigqueue_t **psqp, *sqp; 1997 1998 /* 1999 * We must be holding p->p_lock unless the process is 2000 * being reaped or has failed to get started on fork. 2001 */ 2002 ASSERT(MUTEX_HELD(&p->p_lock) || 2003 p->p_stat == SIDL || p->p_stat == SZOMB); 2004 2005 if (t != (kthread_t *)NULL) 2006 psqp = &t->t_sigqueue; 2007 else 2008 psqp = &p->p_sigqueue; 2009 2010 while (*psqp) { 2011 sqp = *psqp; 2012 if (sig == 0 || sqp->sq_info.si_signo == sig) { 2013 *psqp = sqp->sq_next; 2014 siginfofree(sqp); 2015 } else 2016 psqp = &sqp->sq_next; 2017 } 2018 } 2019 2020 /* 2021 * Insert a siginfo structure into a queue. 2022 * If a non-null thread pointer is passed then add to the thread queue, 2023 * otherwise add to the process queue. 2024 * 2025 * The function sigaddqins() is called with sigqueue already allocated. 2026 * It is called from sigaddqa() and sigaddq() below. 2027 * 2028 * The value of si_code implicitly indicates whether sigp is to be 2029 * explicitly queued, or to be queued to depth one. 2030 */ 2031 static void 2032 sigaddqins(proc_t *p, kthread_t *t, sigqueue_t *sigqp) 2033 { 2034 sigqueue_t **psqp; 2035 int sig = sigqp->sq_info.si_signo; 2036 2037 sigqp->sq_external = (curproc != &p0) && 2038 (curproc->p_ct_process != p->p_ct_process); 2039 2040 /* 2041 * issig_forreal() doesn't bother dequeueing signals if SKILLED 2042 * is set, and even if it did, we would want to avoid situation 2043 * (which would be unique to SIGKILL) where one thread dequeued 2044 * the sigqueue_t and another executed psig(). So we create a 2045 * separate stash for SIGKILL's sigqueue_t. Because a second 2046 * SIGKILL can set SEXTKILLED, we overwrite the existing entry 2047 * if (and only if) it was non-extracontractual. 2048 */ 2049 if (sig == SIGKILL) { 2050 if (p->p_killsqp == NULL || !p->p_killsqp->sq_external) { 2051 if (p->p_killsqp != NULL) 2052 siginfofree(p->p_killsqp); 2053 p->p_killsqp = sigqp; 2054 sigqp->sq_next = NULL; 2055 } else { 2056 siginfofree(sigqp); 2057 } 2058 return; 2059 } 2060 2061 ASSERT(sig >= 1 && sig < NSIG); 2062 if (t != NULL) /* directed to a thread */ 2063 psqp = &t->t_sigqueue; 2064 else /* directed to a process */ 2065 psqp = &p->p_sigqueue; 2066 if (SI_CANQUEUE(sigqp->sq_info.si_code) && 2067 sigismember(&p->p_siginfo, sig)) { 2068 for (; *psqp != NULL; psqp = &(*psqp)->sq_next) 2069 ; 2070 } else { 2071 for (; *psqp != NULL; psqp = &(*psqp)->sq_next) { 2072 if ((*psqp)->sq_info.si_signo == sig) { 2073 siginfofree(sigqp); 2074 return; 2075 } 2076 } 2077 } 2078 *psqp = sigqp; 2079 sigqp->sq_next = NULL; 2080 } 2081 2082 /* 2083 * The function sigaddqa() is called with sigqueue already allocated. 2084 * If signal is ignored, discard but guarantee KILL and generation semantics. 2085 * It is called from sigqueue() and other places. 2086 */ 2087 void 2088 sigaddqa(proc_t *p, kthread_t *t, sigqueue_t *sigqp) 2089 { 2090 int sig = sigqp->sq_info.si_signo; 2091 2092 ASSERT(MUTEX_HELD(&p->p_lock)); 2093 ASSERT(sig >= 1 && sig < NSIG); 2094 2095 if (sig_discardable(p, sig)) 2096 siginfofree(sigqp); 2097 else 2098 sigaddqins(p, t, sigqp); 2099 2100 sigtoproc(p, t, sig); 2101 } 2102 2103 /* 2104 * Allocate the sigqueue_t structure and call sigaddqins(). 2105 */ 2106 void 2107 sigaddq(proc_t *p, kthread_t *t, k_siginfo_t *infop, int km_flags) 2108 { 2109 sigqueue_t *sqp; 2110 int sig = infop->si_signo; 2111 2112 ASSERT(MUTEX_HELD(&p->p_lock)); 2113 ASSERT(sig >= 1 && sig < NSIG); 2114 2115 /* 2116 * If the signal will be discarded by sigtoproc() or 2117 * if the process isn't requesting siginfo and it isn't 2118 * blocking the signal (it *could* change it's mind while 2119 * the signal is pending) then don't bother creating one. 2120 */ 2121 if (!sig_discardable(p, sig) && 2122 (sigismember(&p->p_siginfo, sig) || 2123 (curproc->p_ct_process != p->p_ct_process) || 2124 (sig == SIGCLD && SI_FROMKERNEL(infop))) && 2125 ((sqp = kmem_alloc(sizeof (sigqueue_t), km_flags)) != NULL)) { 2126 bcopy(infop, &sqp->sq_info, sizeof (k_siginfo_t)); 2127 sqp->sq_func = NULL; 2128 sqp->sq_next = NULL; 2129 sigaddqins(p, t, sqp); 2130 } 2131 sigtoproc(p, t, sig); 2132 } 2133 2134 /* 2135 * Handle stop-on-fault processing for the debugger. Returns 0 2136 * if the fault is cleared during the stop, nonzero if it isn't. 2137 */ 2138 int 2139 stop_on_fault(uint_t fault, k_siginfo_t *sip) 2140 { 2141 proc_t *p = ttoproc(curthread); 2142 klwp_t *lwp = ttolwp(curthread); 2143 2144 ASSERT(prismember(&p->p_fltmask, fault)); 2145 2146 /* 2147 * Record current fault and siginfo structure so debugger can 2148 * find it. 2149 */ 2150 mutex_enter(&p->p_lock); 2151 lwp->lwp_curflt = (uchar_t)fault; 2152 lwp->lwp_siginfo = *sip; 2153 2154 stop(PR_FAULTED, fault); 2155 2156 fault = lwp->lwp_curflt; 2157 lwp->lwp_curflt = 0; 2158 mutex_exit(&p->p_lock); 2159 return (fault); 2160 } 2161 2162 void 2163 sigorset(k_sigset_t *s1, k_sigset_t *s2) 2164 { 2165 s1->__sigbits[0] |= s2->__sigbits[0]; 2166 s1->__sigbits[1] |= s2->__sigbits[1]; 2167 } 2168 2169 void 2170 sigandset(k_sigset_t *s1, k_sigset_t *s2) 2171 { 2172 s1->__sigbits[0] &= s2->__sigbits[0]; 2173 s1->__sigbits[1] &= s2->__sigbits[1]; 2174 } 2175 2176 void 2177 sigdiffset(k_sigset_t *s1, k_sigset_t *s2) 2178 { 2179 s1->__sigbits[0] &= ~(s2->__sigbits[0]); 2180 s1->__sigbits[1] &= ~(s2->__sigbits[1]); 2181 } 2182 2183 /* 2184 * Return non-zero if curthread->t_sig_check should be set to 1, that is, 2185 * if there are any signals the thread might take on return from the kernel. 2186 * If ksigset_t's were a single word, we would do: 2187 * return (((p->p_sig | t->t_sig) & ~t->t_hold) & fillset); 2188 */ 2189 int 2190 sigcheck(proc_t *p, kthread_t *t) 2191 { 2192 sc_shared_t *tdp = t->t_schedctl; 2193 2194 /* 2195 * If signals are blocked via the schedctl interface 2196 * then we only check for the unmaskable signals. 2197 */ 2198 if (tdp != NULL && tdp->sc_sigblock) 2199 return ((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) & 2200 CANTMASK0); 2201 2202 return (((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) & 2203 ~t->t_hold.__sigbits[0]) | 2204 (((p->p_sig.__sigbits[1] | t->t_sig.__sigbits[1]) & 2205 ~t->t_hold.__sigbits[1]) & FILLSET1)); 2206 } 2207 2208 /* ONC_PLUS EXTRACT START */ 2209 void 2210 sigintr(k_sigset_t *smask, int intable) 2211 { 2212 proc_t *p; 2213 int owned; 2214 k_sigset_t lmask; /* local copy of cantmask */ 2215 klwp_t *lwp = ttolwp(curthread); 2216 2217 /* 2218 * Mask out all signals except SIGHUP, SIGINT, SIGQUIT 2219 * and SIGTERM. (Preserving the existing masks). 2220 * This function supports the -intr nfs and ufs mount option. 2221 */ 2222 2223 /* 2224 * don't do kernel threads 2225 */ 2226 if (lwp == NULL) 2227 return; 2228 2229 /* 2230 * get access to signal mask 2231 */ 2232 p = ttoproc(curthread); 2233 owned = mutex_owned(&p->p_lock); /* this is filthy */ 2234 if (!owned) 2235 mutex_enter(&p->p_lock); 2236 2237 /* 2238 * remember the current mask 2239 */ 2240 schedctl_finish_sigblock(curthread); 2241 *smask = curthread->t_hold; 2242 2243 /* 2244 * mask out all signals 2245 */ 2246 sigfillset(&curthread->t_hold); 2247 2248 /* 2249 * Unmask the non-maskable signals (e.g., KILL), as long as 2250 * they aren't already masked (which could happen at exit). 2251 * The first sigdiffset sets lmask to (cantmask & ~curhold). The 2252 * second sets the current hold mask to (~0 & ~lmask), which reduces 2253 * to (~cantmask | curhold). 2254 */ 2255 lmask = cantmask; 2256 sigdiffset(&lmask, smask); 2257 sigdiffset(&curthread->t_hold, &lmask); 2258 2259 /* 2260 * Re-enable HUP, QUIT, and TERM iff they were originally enabled 2261 * Re-enable INT if it's originally enabled and the NFS mount option 2262 * nointr is not set. 2263 */ 2264 if (!sigismember(smask, SIGHUP)) 2265 sigdelset(&curthread->t_hold, SIGHUP); 2266 if (!sigismember(smask, SIGINT) && intable) 2267 sigdelset(&curthread->t_hold, SIGINT); 2268 if (!sigismember(smask, SIGQUIT)) 2269 sigdelset(&curthread->t_hold, SIGQUIT); 2270 if (!sigismember(smask, SIGTERM)) 2271 sigdelset(&curthread->t_hold, SIGTERM); 2272 2273 /* 2274 * release access to signal mask 2275 */ 2276 if (!owned) 2277 mutex_exit(&p->p_lock); 2278 2279 /* 2280 * Indicate that this lwp is not to be stopped. 2281 */ 2282 lwp->lwp_nostop++; 2283 2284 } 2285 /* ONC_PLUS EXTRACT END */ 2286 2287 void 2288 sigunintr(k_sigset_t *smask) 2289 { 2290 proc_t *p; 2291 int owned; 2292 klwp_t *lwp = ttolwp(curthread); 2293 2294 /* 2295 * Reset previous mask (See sigintr() above) 2296 */ 2297 if (lwp != NULL) { 2298 lwp->lwp_nostop--; /* restore lwp stoppability */ 2299 p = ttoproc(curthread); 2300 owned = mutex_owned(&p->p_lock); /* this is filthy */ 2301 if (!owned) 2302 mutex_enter(&p->p_lock); 2303 curthread->t_hold = *smask; 2304 /* so unmasked signals will be seen */ 2305 curthread->t_sig_check = 1; 2306 if (!owned) 2307 mutex_exit(&p->p_lock); 2308 } 2309 } 2310 2311 void 2312 sigreplace(k_sigset_t *newmask, k_sigset_t *oldmask) 2313 { 2314 proc_t *p; 2315 int owned; 2316 /* 2317 * Save current signal mask in oldmask, then 2318 * set it to newmask. 2319 */ 2320 if (ttolwp(curthread) != NULL) { 2321 p = ttoproc(curthread); 2322 owned = mutex_owned(&p->p_lock); /* this is filthy */ 2323 if (!owned) 2324 mutex_enter(&p->p_lock); 2325 schedctl_finish_sigblock(curthread); 2326 if (oldmask != NULL) 2327 *oldmask = curthread->t_hold; 2328 curthread->t_hold = *newmask; 2329 curthread->t_sig_check = 1; 2330 if (!owned) 2331 mutex_exit(&p->p_lock); 2332 } 2333 } 2334 2335 /* 2336 * Return true if the signal number is in range 2337 * and the signal code specifies signal queueing. 2338 */ 2339 int 2340 sigwillqueue(int sig, int code) 2341 { 2342 if (sig >= 0 && sig < NSIG) { 2343 switch (code) { 2344 case SI_QUEUE: 2345 case SI_TIMER: 2346 case SI_ASYNCIO: 2347 case SI_MESGQ: 2348 return (1); 2349 } 2350 } 2351 return (0); 2352 } 2353 2354 #ifndef UCHAR_MAX 2355 #define UCHAR_MAX 255 2356 #endif 2357 2358 /* 2359 * The entire pool (with maxcount entries) is pre-allocated at 2360 * the first sigqueue/signotify call. 2361 */ 2362 sigqhdr_t * 2363 sigqhdralloc(size_t size, uint_t maxcount) 2364 { 2365 size_t i; 2366 sigqueue_t *sq, *next; 2367 sigqhdr_t *sqh; 2368 2369 i = (maxcount * size) + sizeof (sigqhdr_t); 2370 ASSERT(maxcount <= UCHAR_MAX && i <= USHRT_MAX); 2371 sqh = kmem_alloc(i, KM_SLEEP); 2372 sqh->sqb_count = (uchar_t)maxcount; 2373 sqh->sqb_maxcount = (uchar_t)maxcount; 2374 sqh->sqb_size = (ushort_t)i; 2375 sqh->sqb_pexited = 0; 2376 sqh->sqb_sent = 0; 2377 sqh->sqb_free = sq = (sigqueue_t *)(sqh + 1); 2378 for (i = maxcount - 1; i != 0; i--) { 2379 next = (sigqueue_t *)((uintptr_t)sq + size); 2380 sq->sq_next = next; 2381 sq = next; 2382 } 2383 sq->sq_next = NULL; 2384 cv_init(&sqh->sqb_cv, NULL, CV_DEFAULT, NULL); 2385 mutex_init(&sqh->sqb_lock, NULL, MUTEX_DEFAULT, NULL); 2386 return (sqh); 2387 } 2388 2389 static void sigqrel(sigqueue_t *); 2390 2391 /* 2392 * allocate a sigqueue/signotify structure from the per process 2393 * pre-allocated pool. 2394 */ 2395 sigqueue_t * 2396 sigqalloc(sigqhdr_t *sqh) 2397 { 2398 sigqueue_t *sq = NULL; 2399 2400 ASSERT(MUTEX_HELD(&curproc->p_lock)); 2401 2402 if (sqh != NULL) { 2403 mutex_enter(&sqh->sqb_lock); 2404 if (sqh->sqb_count > 0) { 2405 sqh->sqb_count--; 2406 sq = sqh->sqb_free; 2407 sqh->sqb_free = sq->sq_next; 2408 mutex_exit(&sqh->sqb_lock); 2409 bzero(&sq->sq_info, sizeof (k_siginfo_t)); 2410 sq->sq_backptr = sqh; 2411 sq->sq_func = sigqrel; 2412 sq->sq_next = NULL; 2413 sq->sq_external = 0; 2414 } else { 2415 mutex_exit(&sqh->sqb_lock); 2416 } 2417 } 2418 return (sq); 2419 } 2420 2421 /* 2422 * Return a sigqueue structure back to the pre-allocated pool. 2423 */ 2424 static void 2425 sigqrel(sigqueue_t *sq) 2426 { 2427 sigqhdr_t *sqh; 2428 2429 /* make sure that p_lock of the affected process is held */ 2430 2431 sqh = (sigqhdr_t *)sq->sq_backptr; 2432 mutex_enter(&sqh->sqb_lock); 2433 if (sqh->sqb_pexited && sqh->sqb_sent == 1) { 2434 mutex_exit(&sqh->sqb_lock); 2435 cv_destroy(&sqh->sqb_cv); 2436 mutex_destroy(&sqh->sqb_lock); 2437 kmem_free(sqh, sqh->sqb_size); 2438 } else { 2439 sqh->sqb_count++; 2440 sqh->sqb_sent--; 2441 sq->sq_next = sqh->sqb_free; 2442 sq->sq_backptr = NULL; 2443 sqh->sqb_free = sq; 2444 cv_signal(&sqh->sqb_cv); 2445 mutex_exit(&sqh->sqb_lock); 2446 } 2447 } 2448 2449 /* 2450 * Free up the pre-allocated sigqueue headers of sigqueue pool 2451 * and signotify pool, if possible. 2452 * Called only by the owning process during exec() and exit(). 2453 */ 2454 void 2455 sigqfree(proc_t *p) 2456 { 2457 ASSERT(MUTEX_HELD(&p->p_lock)); 2458 2459 if (p->p_sigqhdr != NULL) { /* sigqueue pool */ 2460 sigqhdrfree(p->p_sigqhdr); 2461 p->p_sigqhdr = NULL; 2462 } 2463 if (p->p_signhdr != NULL) { /* signotify pool */ 2464 sigqhdrfree(p->p_signhdr); 2465 p->p_signhdr = NULL; 2466 } 2467 } 2468 2469 /* 2470 * Free up the pre-allocated header and sigq pool if possible. 2471 */ 2472 void 2473 sigqhdrfree(sigqhdr_t *sqh) 2474 { 2475 mutex_enter(&sqh->sqb_lock); 2476 if (sqh->sqb_sent == 0) { 2477 mutex_exit(&sqh->sqb_lock); 2478 cv_destroy(&sqh->sqb_cv); 2479 mutex_destroy(&sqh->sqb_lock); 2480 kmem_free(sqh, sqh->sqb_size); 2481 } else { 2482 sqh->sqb_pexited = 1; 2483 mutex_exit(&sqh->sqb_lock); 2484 } 2485 } 2486 2487 /* 2488 * Free up a single sigqueue structure. 2489 * No other code should free a sigqueue directly. 2490 */ 2491 void 2492 siginfofree(sigqueue_t *sqp) 2493 { 2494 if (sqp != NULL) { 2495 if (sqp->sq_func != NULL) 2496 (sqp->sq_func)(sqp); 2497 else 2498 kmem_free(sqp, sizeof (sigqueue_t)); 2499 } 2500 } 2501 2502 /* 2503 * Generate a synchronous signal caused by a hardware 2504 * condition encountered by an lwp. Called from trap(). 2505 */ 2506 void 2507 trapsig(k_siginfo_t *ip, int restartable) 2508 { 2509 proc_t *p = ttoproc(curthread); 2510 int sig = ip->si_signo; 2511 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 2512 2513 ASSERT(sig > 0 && sig < NSIG); 2514 2515 if (curthread->t_dtrace_on) 2516 dtrace_safe_synchronous_signal(); 2517 2518 mutex_enter(&p->p_lock); 2519 schedctl_finish_sigblock(curthread); 2520 /* 2521 * Avoid a possible infinite loop if the lwp is holding the 2522 * signal generated by a trap of a restartable instruction or 2523 * if the signal so generated is being ignored by the process. 2524 */ 2525 if (restartable && 2526 (sigismember(&curthread->t_hold, sig) || 2527 p->p_user.u_signal[sig-1] == SIG_IGN)) { 2528 sigdelset(&curthread->t_hold, sig); 2529 p->p_user.u_signal[sig-1] = SIG_DFL; 2530 sigdelset(&p->p_ignore, sig); 2531 } 2532 bcopy(ip, &sqp->sq_info, sizeof (k_siginfo_t)); 2533 sigaddqa(p, curthread, sqp); 2534 mutex_exit(&p->p_lock); 2535 } 2536 2537 /* 2538 * Dispatch the real time profiling signal in the traditional way, 2539 * honoring all of the /proc tracing mechanism built into issig(). 2540 */ 2541 static void 2542 realsigprof_slow(int sysnum, int nsysarg, int error) 2543 { 2544 kthread_t *t = curthread; 2545 proc_t *p = ttoproc(t); 2546 klwp_t *lwp = ttolwp(t); 2547 k_siginfo_t *sip = &lwp->lwp_siginfo; 2548 void (*func)(); 2549 2550 mutex_enter(&p->p_lock); 2551 func = PTOU(p)->u_signal[SIGPROF - 1]; 2552 if (p->p_rprof_cyclic == CYCLIC_NONE || 2553 func == SIG_DFL || func == SIG_IGN) { 2554 bzero(t->t_rprof, sizeof (*t->t_rprof)); 2555 mutex_exit(&p->p_lock); 2556 return; 2557 } 2558 if (sigismember(&t->t_hold, SIGPROF)) { 2559 mutex_exit(&p->p_lock); 2560 return; 2561 } 2562 sip->si_signo = SIGPROF; 2563 sip->si_code = PROF_SIG; 2564 sip->si_errno = error; 2565 hrt2ts(gethrtime(), &sip->si_tstamp); 2566 sip->si_syscall = sysnum; 2567 sip->si_nsysarg = nsysarg; 2568 sip->si_fault = lwp->lwp_lastfault; 2569 sip->si_faddr = lwp->lwp_lastfaddr; 2570 lwp->lwp_lastfault = 0; 2571 lwp->lwp_lastfaddr = NULL; 2572 sigtoproc(p, t, SIGPROF); 2573 mutex_exit(&p->p_lock); 2574 ASSERT(lwp->lwp_cursig == 0); 2575 if (issig(FORREAL)) 2576 psig(); 2577 sip->si_signo = 0; 2578 bzero(t->t_rprof, sizeof (*t->t_rprof)); 2579 } 2580 2581 /* 2582 * We are not tracing the SIGPROF signal, or doing any other unnatural 2583 * acts, like watchpoints, so dispatch the real time profiling signal 2584 * directly, bypassing all of the overhead built into issig(). 2585 */ 2586 static void 2587 realsigprof_fast(int sysnum, int nsysarg, int error) 2588 { 2589 kthread_t *t = curthread; 2590 proc_t *p = ttoproc(t); 2591 klwp_t *lwp = ttolwp(t); 2592 k_siginfo_t *sip = &lwp->lwp_siginfo; 2593 void (*func)(); 2594 int rc; 2595 int code; 2596 2597 /* 2598 * We don't need to acquire p->p_lock here; 2599 * we are manipulating thread-private data. 2600 */ 2601 func = PTOU(p)->u_signal[SIGPROF - 1]; 2602 if (p->p_rprof_cyclic == CYCLIC_NONE || 2603 func == SIG_DFL || func == SIG_IGN) { 2604 bzero(t->t_rprof, sizeof (*t->t_rprof)); 2605 return; 2606 } 2607 if (lwp->lwp_cursig != 0 || 2608 lwp->lwp_curinfo != NULL || 2609 sigismember(&t->t_hold, SIGPROF)) { 2610 return; 2611 } 2612 sip->si_signo = SIGPROF; 2613 sip->si_code = PROF_SIG; 2614 sip->si_errno = error; 2615 hrt2ts(gethrtime(), &sip->si_tstamp); 2616 sip->si_syscall = sysnum; 2617 sip->si_nsysarg = nsysarg; 2618 sip->si_fault = lwp->lwp_lastfault; 2619 sip->si_faddr = lwp->lwp_lastfaddr; 2620 lwp->lwp_lastfault = 0; 2621 lwp->lwp_lastfaddr = NULL; 2622 if (t->t_flag & T_TOMASK) 2623 t->t_flag &= ~T_TOMASK; 2624 else 2625 lwp->lwp_sigoldmask = t->t_hold; 2626 sigorset(&t->t_hold, &PTOU(p)->u_sigmask[SIGPROF - 1]); 2627 if (!sigismember(&PTOU(p)->u_signodefer, SIGPROF)) 2628 sigaddset(&t->t_hold, SIGPROF); 2629 lwp->lwp_extsig = 0; 2630 lwp->lwp_ru.nsignals++; 2631 if (p->p_model == DATAMODEL_NATIVE) 2632 rc = sendsig(SIGPROF, sip, func); 2633 #ifdef _SYSCALL32_IMPL 2634 else 2635 rc = sendsig32(SIGPROF, sip, func); 2636 #endif /* _SYSCALL32_IMPL */ 2637 sip->si_signo = 0; 2638 bzero(t->t_rprof, sizeof (*t->t_rprof)); 2639 if (rc == 0) { 2640 /* 2641 * sendsig() failed; we must dump core with a SIGSEGV. 2642 * See psig(). This code is copied from there. 2643 */ 2644 lwp->lwp_cursig = SIGSEGV; 2645 code = CLD_KILLED; 2646 proc_is_exiting(p); 2647 if (exitlwps(1) != 0) { 2648 mutex_enter(&p->p_lock); 2649 lwp_exit(); 2650 } 2651 if (audit_active) 2652 audit_core_start(SIGSEGV); 2653 if (core(SIGSEGV, 0) == 0) 2654 code = CLD_DUMPED; 2655 if (audit_active) 2656 audit_core_finish(code); 2657 exit(code, SIGSEGV); 2658 } 2659 } 2660 2661 /* 2662 * Arrange for the real time profiling signal to be dispatched. 2663 */ 2664 void 2665 realsigprof(int sysnum, int nsysarg, int error) 2666 { 2667 kthread_t *t = curthread; 2668 proc_t *p = ttoproc(t); 2669 2670 if (t->t_rprof->rp_anystate == 0) 2671 return; 2672 2673 schedctl_finish_sigblock(t); 2674 2675 /* test for any activity that requires p->p_lock */ 2676 if (tracing(p, SIGPROF) || pr_watch_active(p) || 2677 sigismember(&PTOU(p)->u_sigresethand, SIGPROF)) { 2678 /* do it the classic slow way */ 2679 realsigprof_slow(sysnum, nsysarg, error); 2680 } else { 2681 /* do it the cheating-a-little fast way */ 2682 realsigprof_fast(sysnum, nsysarg, error); 2683 } 2684 } 2685 2686 #ifdef _SYSCALL32_IMPL 2687 2688 /* 2689 * It's tricky to transmit a sigval between 32-bit and 64-bit 2690 * process, since in the 64-bit world, a pointer and an integer 2691 * are different sizes. Since we're constrained by the standards 2692 * world not to change the types, and it's unclear how useful it is 2693 * to send pointers between address spaces this way, we preserve 2694 * the 'int' interpretation for 32-bit processes interoperating 2695 * with 64-bit processes. The full semantics (pointers or integers) 2696 * are available for N-bit processes interoperating with N-bit 2697 * processes. 2698 */ 2699 void 2700 siginfo_kto32(const k_siginfo_t *src, siginfo32_t *dest) 2701 { 2702 bzero(dest, sizeof (*dest)); 2703 2704 /* 2705 * The absolute minimum content is si_signo and si_code. 2706 */ 2707 dest->si_signo = src->si_signo; 2708 if ((dest->si_code = src->si_code) == SI_NOINFO) 2709 return; 2710 2711 /* 2712 * A siginfo generated by user level is structured 2713 * differently from one generated by the kernel. 2714 */ 2715 if (SI_FROMUSER(src)) { 2716 dest->si_pid = src->si_pid; 2717 dest->si_ctid = src->si_ctid; 2718 dest->si_zoneid = src->si_zoneid; 2719 dest->si_uid = src->si_uid; 2720 if (SI_CANQUEUE(src->si_code)) 2721 dest->si_value.sival_int = 2722 (int32_t)src->si_value.sival_int; 2723 return; 2724 } 2725 2726 dest->si_errno = src->si_errno; 2727 2728 switch (src->si_signo) { 2729 default: 2730 dest->si_pid = src->si_pid; 2731 dest->si_ctid = src->si_ctid; 2732 dest->si_zoneid = src->si_zoneid; 2733 dest->si_uid = src->si_uid; 2734 dest->si_value.sival_int = (int32_t)src->si_value.sival_int; 2735 break; 2736 case SIGCLD: 2737 dest->si_pid = src->si_pid; 2738 dest->si_ctid = src->si_ctid; 2739 dest->si_zoneid = src->si_zoneid; 2740 dest->si_status = src->si_status; 2741 dest->si_stime = src->si_stime; 2742 dest->si_utime = src->si_utime; 2743 break; 2744 case SIGSEGV: 2745 case SIGBUS: 2746 case SIGILL: 2747 case SIGTRAP: 2748 case SIGFPE: 2749 case SIGEMT: 2750 dest->si_addr = (caddr32_t)(uintptr_t)src->si_addr; 2751 dest->si_trapno = src->si_trapno; 2752 dest->si_pc = (caddr32_t)(uintptr_t)src->si_pc; 2753 break; 2754 case SIGPOLL: 2755 case SIGXFSZ: 2756 dest->si_fd = src->si_fd; 2757 dest->si_band = src->si_band; 2758 break; 2759 case SIGPROF: 2760 dest->si_faddr = (caddr32_t)(uintptr_t)src->si_faddr; 2761 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec; 2762 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec; 2763 dest->si_syscall = src->si_syscall; 2764 dest->si_nsysarg = src->si_nsysarg; 2765 dest->si_fault = src->si_fault; 2766 break; 2767 } 2768 } 2769 2770 void 2771 siginfo_32tok(const siginfo32_t *src, k_siginfo_t *dest) 2772 { 2773 bzero(dest, sizeof (*dest)); 2774 2775 /* 2776 * The absolute minimum content is si_signo and si_code. 2777 */ 2778 dest->si_signo = src->si_signo; 2779 if ((dest->si_code = src->si_code) == SI_NOINFO) 2780 return; 2781 2782 /* 2783 * A siginfo generated by user level is structured 2784 * differently from one generated by the kernel. 2785 */ 2786 if (SI_FROMUSER(src)) { 2787 dest->si_pid = src->si_pid; 2788 dest->si_ctid = src->si_ctid; 2789 dest->si_zoneid = src->si_zoneid; 2790 dest->si_uid = src->si_uid; 2791 if (SI_CANQUEUE(src->si_code)) 2792 dest->si_value.sival_int = 2793 (int)src->si_value.sival_int; 2794 return; 2795 } 2796 2797 dest->si_errno = src->si_errno; 2798 2799 switch (src->si_signo) { 2800 default: 2801 dest->si_pid = src->si_pid; 2802 dest->si_ctid = src->si_ctid; 2803 dest->si_zoneid = src->si_zoneid; 2804 dest->si_uid = src->si_uid; 2805 dest->si_value.sival_int = (int)src->si_value.sival_int; 2806 break; 2807 case SIGCLD: 2808 dest->si_pid = src->si_pid; 2809 dest->si_ctid = src->si_ctid; 2810 dest->si_zoneid = src->si_zoneid; 2811 dest->si_status = src->si_status; 2812 dest->si_stime = src->si_stime; 2813 dest->si_utime = src->si_utime; 2814 break; 2815 case SIGSEGV: 2816 case SIGBUS: 2817 case SIGILL: 2818 case SIGTRAP: 2819 case SIGFPE: 2820 case SIGEMT: 2821 dest->si_addr = (void *)(uintptr_t)src->si_addr; 2822 dest->si_trapno = src->si_trapno; 2823 dest->si_pc = (void *)(uintptr_t)src->si_pc; 2824 break; 2825 case SIGPOLL: 2826 case SIGXFSZ: 2827 dest->si_fd = src->si_fd; 2828 dest->si_band = src->si_band; 2829 break; 2830 case SIGPROF: 2831 dest->si_faddr = (void *)(uintptr_t)src->si_faddr; 2832 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec; 2833 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec; 2834 dest->si_syscall = src->si_syscall; 2835 dest->si_nsysarg = src->si_nsysarg; 2836 dest->si_fault = src->si_fault; 2837 break; 2838 } 2839 } 2840 2841 #endif /* _SYSCALL32_IMPL */ 2842