1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 28 /* All Rights Reserved */ 29 30 31 #pragma ident "%Z%%M% %I% %E% SMI" 32 33 #include <sys/param.h> 34 #include <sys/types.h> 35 #include <sys/bitmap.h> 36 #include <sys/sysmacros.h> 37 #include <sys/systm.h> 38 #include <sys/cred.h> 39 #include <sys/user.h> 40 #include <sys/errno.h> 41 #include <sys/proc.h> 42 #include <sys/poll_impl.h> /* only needed for kludge in sigwaiting_send() */ 43 #include <sys/signal.h> 44 #include <sys/siginfo.h> 45 #include <sys/fault.h> 46 #include <sys/ucontext.h> 47 #include <sys/procfs.h> 48 #include <sys/wait.h> 49 #include <sys/class.h> 50 #include <sys/mman.h> 51 #include <sys/procset.h> 52 #include <sys/kmem.h> 53 #include <sys/cpuvar.h> 54 #include <sys/prsystm.h> 55 #include <sys/debug.h> 56 #include <vm/as.h> 57 #include <sys/bitmap.h> 58 #include <c2/audit.h> 59 #include <sys/core.h> 60 #include <sys/schedctl.h> 61 #include <sys/contract/process_impl.h> 62 #include <sys/dtrace.h> 63 #include <sys/sdt.h> 64 65 /* MUST be contiguous */ 66 k_sigset_t nullsmask = {0, 0}; 67 68 k_sigset_t fillset = {FILLSET0, FILLSET1}; 69 70 k_sigset_t cantmask = {CANTMASK0, CANTMASK1}; 71 72 k_sigset_t cantreset = {(sigmask(SIGILL)|sigmask(SIGTRAP)|sigmask(SIGPWR)), 0}; 73 74 k_sigset_t ignoredefault = {(sigmask(SIGCONT)|sigmask(SIGCLD)|sigmask(SIGPWR) 75 |sigmask(SIGWINCH)|sigmask(SIGURG)|sigmask(SIGWAITING)), 76 (sigmask(SIGLWP)|sigmask(SIGCANCEL)|sigmask(SIGFREEZE) 77 |sigmask(SIGTHAW)|sigmask(SIGXRES)|sigmask(SIGJVM1) 78 |sigmask(SIGJVM2))}; 79 80 k_sigset_t stopdefault = {(sigmask(SIGSTOP)|sigmask(SIGTSTP) 81 |sigmask(SIGTTOU)|sigmask(SIGTTIN)), 0}; 82 83 k_sigset_t coredefault = {(sigmask(SIGQUIT)|sigmask(SIGILL)|sigmask(SIGTRAP) 84 |sigmask(SIGIOT)|sigmask(SIGEMT)|sigmask(SIGFPE) 85 |sigmask(SIGBUS)|sigmask(SIGSEGV)|sigmask(SIGSYS) 86 |sigmask(SIGXCPU)|sigmask(SIGXFSZ)), 0}; 87 88 k_sigset_t holdvfork = {(sigmask(SIGTTOU)|sigmask(SIGTTIN)|sigmask(SIGTSTP)), 89 0}; 90 91 static int isjobstop(int); 92 static void post_sigcld(proc_t *, sigqueue_t *); 93 94 /* 95 * Internal variables for counting number of user thread stop requests posted. 96 * They may not be accurate at some special situation such as that a virtually 97 * stopped thread starts to run. 98 */ 99 static int num_utstop; 100 /* 101 * Internal variables for broadcasting an event when all thread stop requests 102 * are processed. 103 */ 104 static kcondvar_t utstop_cv; 105 106 static kmutex_t thread_stop_lock; 107 void del_one_utstop(void); 108 109 /* 110 * Send the specified signal to the specified process. 111 */ 112 void 113 psignal(proc_t *p, int sig) 114 { 115 mutex_enter(&p->p_lock); 116 sigtoproc(p, NULL, sig); 117 mutex_exit(&p->p_lock); 118 } 119 120 /* 121 * Send the specified signal to the specified thread. 122 */ 123 void 124 tsignal(kthread_t *t, int sig) 125 { 126 proc_t *p = ttoproc(t); 127 128 mutex_enter(&p->p_lock); 129 sigtoproc(p, t, sig); 130 mutex_exit(&p->p_lock); 131 } 132 133 int 134 signal_is_blocked(kthread_t *t, int sig) 135 { 136 return (sigismember(&t->t_hold, sig) || 137 (schedctl_sigblock(t) && !sigismember(&cantmask, sig))); 138 } 139 140 /* 141 * Return true if the signal can safely be discarded on generation. 142 * That is, if there is no need for the signal on the receiving end. 143 * The answer is true if the process is a zombie or 144 * if all of these conditions are true: 145 * the signal is being ignored 146 * the process is single-threaded 147 * the signal is not being traced by /proc 148 * the signal is not blocked by the process 149 */ 150 static int 151 sig_discardable(proc_t *p, int sig) 152 { 153 kthread_t *t = p->p_tlist; 154 155 return (t == NULL || /* if zombie or ... */ 156 (sigismember(&p->p_ignore, sig) && /* signal is ignored */ 157 t->t_forw == t && /* and single-threaded */ 158 !tracing(p, sig) && /* and no /proc tracing */ 159 !signal_is_blocked(t, sig))); /* and signal not blocked */ 160 } 161 162 /* 163 * Return true if this thread is going to eat this signal soon. 164 * Note that, if the signal is SIGKILL, we force stopped threads to be 165 * set running (to make SIGKILL be a sure kill), but only if the process 166 * is not currently locked by /proc (the P_PR_LOCK flag). Code in /proc 167 * relies on the fact that a process will not change shape while P_PR_LOCK 168 * is set (it drops and reacquires p->p_lock while leaving P_PR_LOCK set). 169 * We wish that we could simply call prbarrier() below, in sigtoproc(), to 170 * ensure that the process is not locked by /proc, but prbarrier() drops 171 * and reacquires p->p_lock and dropping p->p_lock here would be damaging. 172 */ 173 int 174 eat_signal(kthread_t *t, int sig) 175 { 176 int rval = 0; 177 ASSERT(THREAD_LOCK_HELD(t)); 178 179 /* 180 * Do not do anything if the target thread has the signal blocked. 181 */ 182 if (!signal_is_blocked(t, sig)) { 183 t->t_sig_check = 1; /* have thread do an issig */ 184 if (ISWAKEABLE(t) || ISWAITING(t)) { 185 setrun_locked(t); 186 rval = 1; 187 } else if (t->t_state == TS_STOPPED && sig == SIGKILL && 188 !(ttoproc(t)->p_proc_flag & P_PR_LOCK)) { 189 ttoproc(t)->p_stopsig = 0; 190 t->t_dtrace_stop = 0; 191 t->t_schedflag |= TS_XSTART | TS_PSTART; 192 setrun_locked(t); 193 } else if (t != curthread && t->t_state == TS_ONPROC) { 194 aston(t); /* make it do issig promptly */ 195 if (t->t_cpu != CPU) 196 poke_cpu(t->t_cpu->cpu_id); 197 rval = 1; 198 } else if (t->t_state == TS_RUN) { 199 rval = 1; 200 } 201 } 202 203 return (rval); 204 } 205 206 /* 207 * Post a signal. 208 * If a non-null thread pointer is passed, then post the signal 209 * to the thread/lwp, otherwise post the signal to the process. 210 */ 211 void 212 sigtoproc(proc_t *p, kthread_t *t, int sig) 213 { 214 kthread_t *tt; 215 int ext = !(curproc->p_flag & SSYS) && 216 (curproc->p_ct_process != p->p_ct_process); 217 218 ASSERT(MUTEX_HELD(&p->p_lock)); 219 220 if (sig <= 0 || sig >= NSIG) 221 return; 222 223 /* 224 * Regardless of origin or directedness, 225 * SIGKILL kills all lwps in the process immediately 226 * and jobcontrol signals affect all lwps in the process. 227 */ 228 if (sig == SIGKILL) { 229 p->p_flag |= SKILLED | (ext ? SEXTKILLED : 0); 230 t = NULL; 231 } else if (sig == SIGCONT) { 232 /* 233 * The SSCONT flag will remain set until a stopping 234 * signal comes in (below). This is harmless. 235 */ 236 p->p_flag |= SSCONT; 237 sigdelq(p, NULL, SIGSTOP); 238 sigdelq(p, NULL, SIGTSTP); 239 sigdelq(p, NULL, SIGTTOU); 240 sigdelq(p, NULL, SIGTTIN); 241 sigdiffset(&p->p_sig, &stopdefault); 242 sigdiffset(&p->p_extsig, &stopdefault); 243 p->p_stopsig = 0; 244 if ((tt = p->p_tlist) != NULL) { 245 do { 246 sigdelq(p, tt, SIGSTOP); 247 sigdelq(p, tt, SIGTSTP); 248 sigdelq(p, tt, SIGTTOU); 249 sigdelq(p, tt, SIGTTIN); 250 sigdiffset(&tt->t_sig, &stopdefault); 251 sigdiffset(&tt->t_extsig, &stopdefault); 252 } while ((tt = tt->t_forw) != p->p_tlist); 253 } 254 if ((tt = p->p_tlist) != NULL) { 255 do { 256 thread_lock(tt); 257 if (tt->t_state == TS_STOPPED && 258 tt->t_whystop == PR_JOBCONTROL) { 259 tt->t_schedflag |= TS_XSTART; 260 setrun_locked(tt); 261 } 262 thread_unlock(tt); 263 } while ((tt = tt->t_forw) != p->p_tlist); 264 } 265 } else if (sigismember(&stopdefault, sig)) { 266 /* 267 * This test has a race condition which we can't fix: 268 * By the time the stopping signal is received by 269 * the target process/thread, the signal handler 270 * and/or the detached state might have changed. 271 */ 272 if (PTOU(p)->u_signal[sig-1] == SIG_DFL && 273 (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned)) 274 p->p_flag &= ~SSCONT; 275 sigdelq(p, NULL, SIGCONT); 276 sigdelset(&p->p_sig, SIGCONT); 277 sigdelset(&p->p_extsig, SIGCONT); 278 if ((tt = p->p_tlist) != NULL) { 279 do { 280 sigdelq(p, tt, SIGCONT); 281 sigdelset(&tt->t_sig, SIGCONT); 282 sigdelset(&tt->t_extsig, SIGCONT); 283 } while ((tt = tt->t_forw) != p->p_tlist); 284 } 285 } 286 287 if (sig_discardable(p, sig)) { 288 DTRACE_PROC3(signal__discard, kthread_t *, p->p_tlist, 289 proc_t *, p, int, sig); 290 return; 291 } 292 293 if (t != NULL) { 294 /* 295 * This is a directed signal, wake up the lwp. 296 */ 297 sigaddset(&t->t_sig, sig); 298 if (ext) 299 sigaddset(&t->t_extsig, sig); 300 thread_lock(t); 301 (void) eat_signal(t, sig); 302 thread_unlock(t); 303 DTRACE_PROC2(signal__send, kthread_t *, t, int, sig); 304 } else if ((tt = p->p_tlist) != NULL) { 305 /* 306 * Make sure that some lwp that already exists 307 * in the process fields the signal soon. 308 * Wake up an interruptibly sleeping lwp if necessary. 309 * For SIGKILL make all of the lwps see the signal; 310 * This is needed to guarantee a sure kill for processes 311 * with a mix of realtime and non-realtime threads. 312 */ 313 int su = 0; 314 315 sigaddset(&p->p_sig, sig); 316 if (ext) 317 sigaddset(&p->p_extsig, sig); 318 do { 319 thread_lock(tt); 320 if (eat_signal(tt, sig) && sig != SIGKILL) { 321 thread_unlock(tt); 322 break; 323 } 324 if (SUSPENDED(tt)) 325 su++; 326 thread_unlock(tt); 327 } while ((tt = tt->t_forw) != p->p_tlist); 328 /* 329 * If the process is deadlocked, make somebody run and die. 330 */ 331 if (sig == SIGKILL && p->p_stat != SIDL && 332 p->p_lwprcnt == 0 && p->p_lwpcnt == su && 333 !(p->p_proc_flag & P_PR_LOCK)) { 334 thread_lock(tt); 335 p->p_lwprcnt++; 336 tt->t_schedflag |= TS_CSTART; 337 setrun_locked(tt); 338 thread_unlock(tt); 339 } 340 341 DTRACE_PROC2(signal__send, kthread_t *, tt, int, sig); 342 } 343 } 344 345 static int 346 isjobstop(int sig) 347 { 348 proc_t *p = ttoproc(curthread); 349 350 ASSERT(MUTEX_HELD(&p->p_lock)); 351 352 if (PTOU(curproc)->u_signal[sig-1] == SIG_DFL && 353 sigismember(&stopdefault, sig)) { 354 /* 355 * If SIGCONT has been posted since we promoted this signal 356 * from pending to current, then don't do a jobcontrol stop. 357 */ 358 if (!(p->p_flag & SSCONT) && 359 (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned) && 360 curthread != p->p_agenttp) { 361 sigqueue_t *sqp; 362 363 stop(PR_JOBCONTROL, sig); 364 mutex_exit(&p->p_lock); 365 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 366 mutex_enter(&pidlock); 367 /* 368 * Only the first lwp to continue notifies the parent. 369 */ 370 if (p->p_pidflag & CLDCONT) 371 siginfofree(sqp); 372 else { 373 p->p_pidflag |= CLDCONT; 374 p->p_wcode = CLD_CONTINUED; 375 p->p_wdata = SIGCONT; 376 sigcld(p, sqp); 377 } 378 mutex_exit(&pidlock); 379 mutex_enter(&p->p_lock); 380 } 381 return (1); 382 } 383 return (0); 384 } 385 386 /* 387 * Returns true if the current process has a signal to process, and 388 * the signal is not held. The signal to process is put in p_cursig. 389 * This is asked at least once each time a process enters the system 390 * (though this can usually be done without actually calling issig by 391 * checking the pending signal masks). A signal does not do anything 392 * directly to a process; it sets a flag that asks the process to do 393 * something to itself. 394 * 395 * The "why" argument indicates the allowable side-effects of the call: 396 * 397 * FORREAL: Extract the next pending signal from p_sig into p_cursig; 398 * stop the process if a stop has been requested or if a traced signal 399 * is pending. 400 * 401 * JUSTLOOKING: Don't stop the process, just indicate whether or not 402 * a signal might be pending (FORREAL is needed to tell for sure). 403 * 404 * XXX: Changes to the logic in these routines should be propagated 405 * to lm_sigispending(). See bug 1201594. 406 */ 407 408 static int issig_forreal(void); 409 static int issig_justlooking(void); 410 411 int 412 issig(int why) 413 { 414 ASSERT(why == FORREAL || why == JUSTLOOKING); 415 416 return ((why == FORREAL)? issig_forreal() : issig_justlooking()); 417 } 418 419 420 static int 421 issig_justlooking(void) 422 { 423 kthread_t *t = curthread; 424 klwp_t *lwp = ttolwp(t); 425 proc_t *p = ttoproc(t); 426 k_sigset_t set; 427 428 /* 429 * This function answers the question: 430 * "Is there any reason to call issig_forreal()?" 431 * 432 * We have to answer the question w/o grabbing any locks 433 * because we are (most likely) being called after we 434 * put ourselves on the sleep queue. 435 */ 436 437 if (t->t_dtrace_stop | t->t_dtrace_sig) 438 return (1); 439 440 /* 441 * Another piece of complexity in this process. When single-stepping a 442 * process, we don't want an intervening signal or TP_PAUSE request to 443 * suspend the current thread. Otherwise, the controlling process will 444 * hang beacuse we will be stopped with TS_PSTART set in t_schedflag. 445 * We will trigger any remaining signals when we re-enter the kernel on 446 * the single step trap. 447 */ 448 if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP) 449 return (0); 450 451 if ((lwp->lwp_asleep && MUSTRETURN(p, t)) || 452 (p->p_flag & (SEXITLWPS|SKILLED)) || 453 (lwp->lwp_nostop == 0 && 454 (p->p_stopsig | (p->p_flag & (SHOLDFORK1|SHOLDWATCH)) | 455 (t->t_proc_flag & 456 (TP_PRSTOP|TP_HOLDLWP|TP_CHKPT|TP_PAUSE)))) || 457 lwp->lwp_cursig) 458 return (1); 459 460 if (p->p_flag & SVFWAIT) 461 return (0); 462 set = p->p_sig; 463 sigorset(&set, &t->t_sig); 464 if (schedctl_sigblock(t)) /* all blockable signals blocked */ 465 sigandset(&set, &cantmask); 466 else 467 sigdiffset(&set, &t->t_hold); 468 if (p->p_flag & SVFORK) 469 sigdiffset(&set, &holdvfork); 470 471 if (!sigisempty(&set)) { 472 int sig; 473 474 for (sig = 1; sig < NSIG; sig++) { 475 if (sigismember(&set, sig) && 476 (tracing(p, sig) || 477 !sigismember(&p->p_ignore, sig))) { 478 /* 479 * Don't promote a signal that will stop 480 * the process when lwp_nostop is set. 481 */ 482 if (!lwp->lwp_nostop || 483 PTOU(curproc)->u_signal[sig-1] != SIG_DFL || 484 !sigismember(&stopdefault, sig)) 485 return (1); 486 } 487 } 488 } 489 490 return (0); 491 } 492 493 static int 494 issig_forreal(void) 495 { 496 int sig = 0, ext = 0; 497 kthread_t *t = curthread; 498 klwp_t *lwp = ttolwp(t); 499 proc_t *p = ttoproc(t); 500 int toproc = 0; 501 int sigcld_found = 0; 502 int nostop_break = 0; 503 504 ASSERT(t->t_state == TS_ONPROC); 505 506 mutex_enter(&p->p_lock); 507 schedctl_finish_sigblock(t); 508 509 if (t->t_dtrace_stop | t->t_dtrace_sig) { 510 if (t->t_dtrace_stop) { 511 /* 512 * If DTrace's "stop" action has been invoked on us, 513 * set TP_PRSTOP. 514 */ 515 t->t_proc_flag |= TP_PRSTOP; 516 } 517 518 if (t->t_dtrace_sig != 0) { 519 k_siginfo_t info; 520 521 /* 522 * Post the signal generated as the result of 523 * DTrace's "raise" action as a normal signal before 524 * the full-fledged signal checking begins. 525 */ 526 bzero(&info, sizeof (info)); 527 info.si_signo = t->t_dtrace_sig; 528 info.si_code = SI_DTRACE; 529 530 sigaddq(p, NULL, &info, KM_NOSLEEP); 531 532 t->t_dtrace_sig = 0; 533 } 534 } 535 536 for (;;) { 537 if (p->p_flag & (SEXITLWPS|SKILLED)) { 538 lwp->lwp_cursig = sig = SIGKILL; 539 lwp->lwp_extsig = ext = (p->p_flag & SEXTKILLED) != 0; 540 t->t_sig_check = 1; 541 break; 542 } 543 544 /* 545 * Another piece of complexity in this process. When 546 * single-stepping a process, we don't want an intervening 547 * signal or TP_PAUSE request to suspend the current thread. 548 * Otherwise, the controlling process will hang beacuse we will 549 * be stopped with TS_PSTART set in t_schedflag. We will 550 * trigger any remaining signals when we re-enter the kernel on 551 * the single step trap. 552 */ 553 if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP) { 554 sig = 0; 555 break; 556 } 557 558 /* 559 * Hold the lwp here for watchpoint manipulation. 560 */ 561 if ((t->t_proc_flag & TP_PAUSE) && !lwp->lwp_nostop) { 562 stop(PR_SUSPENDED, SUSPEND_PAUSE); 563 continue; 564 } 565 566 if (lwp->lwp_asleep && MUSTRETURN(p, t)) { 567 if ((sig = lwp->lwp_cursig) != 0) { 568 /* 569 * Make sure we call ISSIG() in post_syscall() 570 * to re-validate this current signal. 571 */ 572 t->t_sig_check = 1; 573 } 574 break; 575 } 576 577 /* 578 * If the request is PR_CHECKPOINT, ignore the rest of signals 579 * or requests. Honor other stop requests or signals later. 580 * Go back to top of loop here to check if an exit or hold 581 * event has occurred while stopped. 582 */ 583 if ((t->t_proc_flag & TP_CHKPT) && !lwp->lwp_nostop) { 584 stop(PR_CHECKPOINT, 0); 585 continue; 586 } 587 588 /* 589 * Honor SHOLDFORK1, SHOLDWATCH, and TP_HOLDLWP before dealing 590 * with signals or /proc. Another lwp is executing fork1(), 591 * or is undergoing watchpoint activity (remapping a page), 592 * or is executing lwp_suspend() on this lwp. 593 * Again, go back to top of loop to check if an exit 594 * or hold event has occurred while stopped. 595 */ 596 if (((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) || 597 (t->t_proc_flag & TP_HOLDLWP)) && !lwp->lwp_nostop) { 598 stop(PR_SUSPENDED, SUSPEND_NORMAL); 599 continue; 600 } 601 602 /* 603 * Honor requested stop before dealing with the 604 * current signal; a debugger may change it. 605 * Do not want to go back to loop here since this is a special 606 * stop that means: make incremental progress before the next 607 * stop. The danger is that returning to top of loop would most 608 * likely drop the thread right back here to stop soon after it 609 * was continued, violating the incremental progress request. 610 */ 611 if ((t->t_proc_flag & TP_PRSTOP) && !lwp->lwp_nostop) 612 stop(PR_REQUESTED, 0); 613 614 /* 615 * If a debugger wants us to take a signal it will have 616 * left it in lwp->lwp_cursig. If lwp_cursig has been cleared 617 * or if it's being ignored, we continue on looking for another 618 * signal. Otherwise we return the specified signal, provided 619 * it's not a signal that causes a job control stop. 620 * 621 * When stopped on PR_JOBCONTROL, there is no current 622 * signal; we cancel lwp->lwp_cursig temporarily before 623 * calling isjobstop(). The current signal may be reset 624 * by a debugger while we are stopped in isjobstop(). 625 */ 626 if ((sig = lwp->lwp_cursig) != 0) { 627 ext = lwp->lwp_extsig; 628 lwp->lwp_cursig = 0; 629 lwp->lwp_extsig = 0; 630 if (!sigismember(&p->p_ignore, sig) && 631 !isjobstop(sig)) { 632 if (p->p_flag & (SEXITLWPS|SKILLED)) { 633 sig = SIGKILL; 634 ext = (p->p_flag & SEXTKILLED) != 0; 635 } 636 lwp->lwp_cursig = (uchar_t)sig; 637 lwp->lwp_extsig = (uchar_t)ext; 638 break; 639 } 640 /* 641 * The signal is being ignored or it caused a 642 * job-control stop. If another current signal 643 * has not been established, return the current 644 * siginfo, if any, to the memory manager. 645 */ 646 if (lwp->lwp_cursig == 0 && lwp->lwp_curinfo != NULL) { 647 siginfofree(lwp->lwp_curinfo); 648 lwp->lwp_curinfo = NULL; 649 } 650 /* 651 * Loop around again in case we were stopped 652 * on a job control signal and a /proc stop 653 * request was posted or another current signal 654 * was established while we were stopped. 655 */ 656 continue; 657 } 658 659 if (p->p_stopsig && !lwp->lwp_nostop && 660 curthread != p->p_agenttp) { 661 /* 662 * Some lwp in the process has already stopped 663 * showing PR_JOBCONTROL. This is a stop in 664 * sympathy with the other lwp, even if this 665 * lwp is blocking the stopping signal. 666 */ 667 stop(PR_JOBCONTROL, p->p_stopsig); 668 continue; 669 } 670 671 /* 672 * Loop on the pending signals until we find a 673 * non-held signal that is traced or not ignored. 674 * First check the signals pending for the lwp, 675 * then the signals pending for the process as a whole. 676 */ 677 for (;;) { 678 k_sigset_t tsig; 679 680 tsig = t->t_sig; 681 if ((sig = fsig(&tsig, t)) != 0) { 682 if (sig == SIGCLD) 683 sigcld_found = 1; 684 toproc = 0; 685 if (tracing(p, sig) || 686 !sigismember(&p->p_ignore, sig)) { 687 if (sigismember(&t->t_extsig, sig)) 688 ext = 1; 689 break; 690 } 691 sigdelset(&t->t_sig, sig); 692 sigdelset(&t->t_extsig, sig); 693 sigdelq(p, t, sig); 694 } else if ((sig = fsig(&p->p_sig, t)) != 0) { 695 if (sig == SIGCLD) 696 sigcld_found = 1; 697 toproc = 1; 698 if (tracing(p, sig) || 699 !sigismember(&p->p_ignore, sig)) { 700 if (sigismember(&p->p_extsig, sig)) 701 ext = 1; 702 break; 703 } 704 sigdelset(&p->p_sig, sig); 705 sigdelset(&p->p_extsig, sig); 706 sigdelq(p, NULL, sig); 707 } else { 708 /* no signal was found */ 709 break; 710 } 711 } 712 713 if (sig == 0) { /* no signal was found */ 714 if (p->p_flag & (SEXITLWPS|SKILLED)) { 715 lwp->lwp_cursig = SIGKILL; 716 sig = SIGKILL; 717 ext = (p->p_flag & SEXTKILLED) != 0; 718 } 719 break; 720 } 721 722 /* 723 * If we have been informed not to stop (i.e., we are being 724 * called from within a network operation), then don't promote 725 * the signal at this time, just return the signal number. 726 * We will call issig() again later when it is safe. 727 * 728 * fsig() does not return a jobcontrol stopping signal 729 * with a default action of stopping the process if 730 * lwp_nostop is set, so we won't be causing a bogus 731 * EINTR by this action. (Such a signal is eaten by 732 * isjobstop() when we loop around to do final checks.) 733 */ 734 if (lwp->lwp_nostop) { 735 nostop_break = 1; 736 break; 737 } 738 739 /* 740 * Promote the signal from pending to current. 741 * 742 * Note that sigdeq() will set lwp->lwp_curinfo to NULL 743 * if no siginfo_t exists for this signal. 744 */ 745 lwp->lwp_cursig = (uchar_t)sig; 746 lwp->lwp_extsig = (uchar_t)ext; 747 t->t_sig_check = 1; /* so post_syscall will see signal */ 748 ASSERT(lwp->lwp_curinfo == NULL); 749 sigdeq(p, toproc ? NULL : t, sig, &lwp->lwp_curinfo); 750 751 if (tracing(p, sig)) 752 stop(PR_SIGNALLED, sig); 753 754 /* 755 * Loop around to check for requested stop before 756 * performing the usual current-signal actions. 757 */ 758 } 759 760 mutex_exit(&p->p_lock); 761 762 /* 763 * If SIGCLD was dequeued, search for other pending SIGCLD's. 764 * Don't do it if we are returning SIGCLD and the signal 765 * handler will be reset by psig(); this enables reliable 766 * delivery of SIGCLD even when using the old, broken 767 * signal() interface for setting the signal handler. 768 */ 769 if (sigcld_found && 770 (sig != SIGCLD || !sigismember(&PTOU(curproc)->u_sigresethand, 771 SIGCLD))) 772 sigcld_repost(); 773 774 if (sig != 0) 775 (void) undo_watch_step(NULL); 776 777 /* 778 * If we have been blocked since the p_lock was dropped off 779 * above, then this promoted signal might have been handled 780 * already when we were on the way back from sleep queue, so 781 * just ignore it. 782 * If we have been informed not to stop, just return the signal 783 * number. Also see comments above. 784 */ 785 if (!nostop_break) { 786 sig = lwp->lwp_cursig; 787 } 788 789 return (sig != 0); 790 } 791 792 /* 793 * Return true if the process is currently stopped showing PR_JOBCONTROL. 794 * This is true only if all of the process's lwp's are so stopped. 795 * If this is asked by one of the lwps in the process, exclude that lwp. 796 */ 797 int 798 jobstopped(proc_t *p) 799 { 800 kthread_t *t; 801 802 ASSERT(MUTEX_HELD(&p->p_lock)); 803 804 if ((t = p->p_tlist) == NULL) 805 return (0); 806 807 do { 808 thread_lock(t); 809 /* ignore current, zombie and suspended lwps in the test */ 810 if (!(t == curthread || t->t_state == TS_ZOMB || 811 SUSPENDED(t)) && 812 (t->t_state != TS_STOPPED || 813 t->t_whystop != PR_JOBCONTROL)) { 814 thread_unlock(t); 815 return (0); 816 } 817 thread_unlock(t); 818 } while ((t = t->t_forw) != p->p_tlist); 819 820 return (1); 821 } 822 823 /* 824 * Put ourself (curthread) into the stopped state and notify tracers. 825 */ 826 void 827 stop(int why, int what) 828 { 829 kthread_t *t = curthread; 830 proc_t *p = ttoproc(t); 831 klwp_t *lwp = ttolwp(t); 832 kthread_t *tx; 833 lwpent_t *lep; 834 int procstop; 835 int flags = TS_ALLSTART; 836 hrtime_t stoptime; 837 838 /* 839 * Can't stop a system process. 840 */ 841 if (p == NULL || lwp == NULL || (p->p_flag & SSYS) || p->p_as == &kas) 842 return; 843 844 ASSERT(MUTEX_HELD(&p->p_lock)); 845 846 if (why != PR_SUSPENDED && why != PR_CHECKPOINT) { 847 /* 848 * Don't stop an lwp with SIGKILL pending. 849 * Don't stop if the process or lwp is exiting. 850 */ 851 if (lwp->lwp_cursig == SIGKILL || 852 sigismember(&t->t_sig, SIGKILL) || 853 sigismember(&p->p_sig, SIGKILL) || 854 (t->t_proc_flag & TP_LWPEXIT) || 855 (p->p_flag & (SEXITLWPS|SKILLED))) { 856 p->p_stopsig = 0; 857 t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP); 858 return; 859 } 860 } 861 862 /* 863 * Make sure we don't deadlock on a recursive call to prstop(). 864 * prstop() sets the lwp_nostop flag. 865 */ 866 if (lwp->lwp_nostop) 867 return; 868 869 /* 870 * Make sure the lwp is in an orderly state for inspection 871 * by a debugger through /proc or for dumping via core(). 872 */ 873 schedctl_finish_sigblock(t); 874 t->t_proc_flag |= TP_STOPPING; /* must set before dropping p_lock */ 875 mutex_exit(&p->p_lock); 876 stoptime = gethrtime(); 877 prstop(why, what); 878 (void) undo_watch_step(NULL); 879 mutex_enter(&p->p_lock); 880 ASSERT(t->t_state == TS_ONPROC); 881 882 switch (why) { 883 case PR_CHECKPOINT: 884 /* 885 * The situation may have changed since we dropped 886 * and reacquired p->p_lock. Double-check now 887 * whether we should stop or not. 888 */ 889 if (!(t->t_proc_flag & TP_CHKPT)) { 890 t->t_proc_flag &= ~TP_STOPPING; 891 return; 892 } 893 t->t_proc_flag &= ~TP_CHKPT; 894 flags &= ~TS_RESUME; 895 break; 896 897 case PR_JOBCONTROL: 898 ASSERT(what == SIGSTOP || what == SIGTSTP || 899 what == SIGTTIN || what == SIGTTOU); 900 flags &= ~TS_XSTART; 901 break; 902 903 case PR_SUSPENDED: 904 ASSERT(what == SUSPEND_NORMAL || what == SUSPEND_PAUSE); 905 /* 906 * The situation may have changed since we dropped 907 * and reacquired p->p_lock. Double-check now 908 * whether we should stop or not. 909 */ 910 if (what == SUSPEND_PAUSE) { 911 if (!(t->t_proc_flag & TP_PAUSE)) { 912 t->t_proc_flag &= ~TP_STOPPING; 913 return; 914 } 915 flags &= ~TS_UNPAUSE; 916 } else { 917 if (!((t->t_proc_flag & TP_HOLDLWP) || 918 (p->p_flag & (SHOLDFORK|SHOLDFORK1|SHOLDWATCH)))) { 919 t->t_proc_flag &= ~TP_STOPPING; 920 return; 921 } 922 /* 923 * If SHOLDFORK is in effect and we are stopping 924 * while asleep (not at the top of the stack), 925 * we return now to allow the hold to take effect 926 * when we reach the top of the kernel stack. 927 */ 928 if (lwp->lwp_asleep && (p->p_flag & SHOLDFORK)) { 929 t->t_proc_flag &= ~TP_STOPPING; 930 return; 931 } 932 flags &= ~TS_CSTART; 933 } 934 break; 935 936 default: /* /proc stop */ 937 flags &= ~TS_PSTART; 938 /* 939 * Do synchronous stop unless the async-stop flag is set. 940 * If why is PR_REQUESTED and t->t_dtrace_stop flag is set, 941 * then no debugger is present and we also do synchronous stop. 942 */ 943 if ((why != PR_REQUESTED || t->t_dtrace_stop) && 944 !(p->p_proc_flag & P_PR_ASYNC)) { 945 int notify; 946 947 for (tx = t->t_forw; tx != t; tx = tx->t_forw) { 948 notify = 0; 949 thread_lock(tx); 950 if (ISTOPPED(tx) || 951 (tx->t_proc_flag & TP_PRSTOP)) { 952 thread_unlock(tx); 953 continue; 954 } 955 tx->t_proc_flag |= TP_PRSTOP; 956 tx->t_sig_check = 1; 957 if (tx->t_state == TS_SLEEP && 958 (tx->t_flag & T_WAKEABLE)) { 959 /* 960 * Don't actually wake it up if it's 961 * in one of the lwp_*() syscalls. 962 * Mark it virtually stopped and 963 * notify /proc waiters (below). 964 */ 965 if (tx->t_wchan0 == NULL) 966 setrun_locked(tx); 967 else { 968 tx->t_proc_flag |= TP_PRVSTOP; 969 tx->t_stoptime = stoptime; 970 notify = 1; 971 } 972 } 973 974 /* Move waiting thread to run queue */ 975 if (ISWAITING(tx)) 976 setrun_locked(tx); 977 978 /* 979 * force the thread into the kernel 980 * if it is not already there. 981 */ 982 if (tx->t_state == TS_ONPROC && 983 tx->t_cpu != CPU) 984 poke_cpu(tx->t_cpu->cpu_id); 985 thread_unlock(tx); 986 lep = p->p_lwpdir[tx->t_dslot].ld_entry; 987 if (notify && lep->le_trace) 988 prnotify(lep->le_trace); 989 } 990 /* 991 * We do this just in case one of the threads we asked 992 * to stop is in holdlwps() (called from cfork()) or 993 * lwp_suspend(). 994 */ 995 cv_broadcast(&p->p_holdlwps); 996 } 997 break; 998 } 999 1000 t->t_stoptime = stoptime; 1001 1002 if (why == PR_JOBCONTROL || (why == PR_SUSPENDED && p->p_stopsig)) { 1003 /* 1004 * Determine if the whole process is jobstopped. 1005 */ 1006 if (jobstopped(p)) { 1007 sigqueue_t *sqp; 1008 int sig; 1009 1010 if ((sig = p->p_stopsig) == 0) 1011 p->p_stopsig = (uchar_t)(sig = what); 1012 mutex_exit(&p->p_lock); 1013 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 1014 mutex_enter(&pidlock); 1015 /* 1016 * The last lwp to stop notifies the parent. 1017 * Turn off the CLDCONT flag now so the first 1018 * lwp to continue knows what to do. 1019 */ 1020 p->p_pidflag &= ~CLDCONT; 1021 p->p_wcode = CLD_STOPPED; 1022 p->p_wdata = sig; 1023 sigcld(p, sqp); 1024 /* 1025 * Grab p->p_lock before releasing pidlock so the 1026 * parent and the child don't have a race condition. 1027 */ 1028 mutex_enter(&p->p_lock); 1029 mutex_exit(&pidlock); 1030 p->p_stopsig = 0; 1031 } else if (why == PR_JOBCONTROL && p->p_stopsig == 0) { 1032 /* 1033 * Set p->p_stopsig and wake up sleeping lwps 1034 * so they will stop in sympathy with this lwp. 1035 */ 1036 p->p_stopsig = (uchar_t)what; 1037 pokelwps(p); 1038 /* 1039 * We do this just in case one of the threads we asked 1040 * to stop is in holdlwps() (called from cfork()) or 1041 * lwp_suspend(). 1042 */ 1043 cv_broadcast(&p->p_holdlwps); 1044 } 1045 } 1046 1047 if (why != PR_JOBCONTROL && why != PR_CHECKPOINT) { 1048 /* 1049 * Do process-level notification when all lwps are 1050 * either stopped on events of interest to /proc 1051 * or are stopped showing PR_SUSPENDED or are zombies. 1052 */ 1053 procstop = 1; 1054 for (tx = t->t_forw; procstop && tx != t; tx = tx->t_forw) { 1055 if (VSTOPPED(tx)) 1056 continue; 1057 thread_lock(tx); 1058 switch (tx->t_state) { 1059 case TS_ZOMB: 1060 break; 1061 case TS_STOPPED: 1062 /* neither ISTOPPED nor SUSPENDED? */ 1063 if ((tx->t_schedflag & 1064 (TS_CSTART | TS_UNPAUSE | TS_PSTART)) == 1065 (TS_CSTART | TS_UNPAUSE | TS_PSTART)) 1066 procstop = 0; 1067 break; 1068 case TS_SLEEP: 1069 /* not paused for watchpoints? */ 1070 if (!(tx->t_flag & T_WAKEABLE) || 1071 tx->t_wchan0 == NULL || 1072 !(tx->t_proc_flag & TP_PAUSE)) 1073 procstop = 0; 1074 break; 1075 default: 1076 procstop = 0; 1077 break; 1078 } 1079 thread_unlock(tx); 1080 } 1081 if (procstop) { 1082 /* there must not be any remapped watched pages now */ 1083 ASSERT(p->p_mapcnt == 0); 1084 if (p->p_proc_flag & P_PR_PTRACE) { 1085 /* ptrace() compatibility */ 1086 mutex_exit(&p->p_lock); 1087 mutex_enter(&pidlock); 1088 p->p_wcode = CLD_TRAPPED; 1089 p->p_wdata = (why == PR_SIGNALLED)? 1090 what : SIGTRAP; 1091 cv_broadcast(&p->p_parent->p_cv); 1092 /* 1093 * Grab p->p_lock before releasing pidlock so 1094 * parent and child don't have a race condition. 1095 */ 1096 mutex_enter(&p->p_lock); 1097 mutex_exit(&pidlock); 1098 } 1099 if (p->p_trace) /* /proc */ 1100 prnotify(p->p_trace); 1101 cv_broadcast(&pr_pid_cv[p->p_slot]); /* pauselwps() */ 1102 cv_broadcast(&p->p_holdlwps); /* holdwatch() */ 1103 } 1104 if (why != PR_SUSPENDED) { 1105 lep = p->p_lwpdir[t->t_dslot].ld_entry; 1106 if (lep->le_trace) /* /proc */ 1107 prnotify(lep->le_trace); 1108 /* 1109 * Special notification for creation of the agent lwp. 1110 */ 1111 if (t == p->p_agenttp && 1112 (t->t_proc_flag & TP_PRSTOP) && 1113 p->p_trace) 1114 prnotify(p->p_trace); 1115 /* 1116 * The situation may have changed since we dropped 1117 * and reacquired p->p_lock. Double-check now 1118 * whether we should stop or not. 1119 */ 1120 if (!(t->t_proc_flag & TP_STOPPING)) { 1121 if (t->t_proc_flag & TP_PRSTOP) 1122 t->t_proc_flag |= TP_STOPPING; 1123 } 1124 t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP); 1125 prnostep(lwp); 1126 } 1127 } 1128 1129 if (why == PR_SUSPENDED) { 1130 1131 /* 1132 * We always broadcast in the case of SUSPEND_PAUSE. This is 1133 * because checks for TP_PAUSE take precedence over checks for 1134 * SHOLDWATCH. If a thread is trying to stop because of 1135 * SUSPEND_PAUSE and tries to do a holdwatch(), it will be 1136 * waiting for the rest of the threads to enter a stopped state. 1137 * If we are stopping for a SUSPEND_PAUSE, we may be the last 1138 * lwp and not know it, so broadcast just in case. 1139 */ 1140 if (what == SUSPEND_PAUSE || 1141 --p->p_lwprcnt == 0 || (t->t_proc_flag & TP_HOLDLWP)) 1142 cv_broadcast(&p->p_holdlwps); 1143 1144 } 1145 1146 /* 1147 * Need to do this here (rather than after the thread is officially 1148 * stopped) because we can't call mutex_enter from a stopped thread. 1149 */ 1150 if (why == PR_CHECKPOINT) 1151 del_one_utstop(); 1152 1153 thread_lock(t); 1154 ASSERT((t->t_schedflag & TS_ALLSTART) == 0); 1155 t->t_schedflag |= flags; 1156 t->t_whystop = (short)why; 1157 t->t_whatstop = (short)what; 1158 CL_STOP(t, why, what); 1159 (void) new_mstate(t, LMS_STOPPED); 1160 thread_stop(t); /* set stop state and drop lock */ 1161 1162 if (why != PR_SUSPENDED && why != PR_CHECKPOINT) { 1163 /* 1164 * We may have gotten a SIGKILL or a SIGCONT when 1165 * we released p->p_lock; make one last check. 1166 * Also check for a /proc run-on-last-close. 1167 */ 1168 if (sigismember(&t->t_sig, SIGKILL) || 1169 sigismember(&p->p_sig, SIGKILL) || 1170 (t->t_proc_flag & TP_LWPEXIT) || 1171 (p->p_flag & (SEXITLWPS|SKILLED))) { 1172 p->p_stopsig = 0; 1173 thread_lock(t); 1174 t->t_schedflag |= TS_XSTART | TS_PSTART; 1175 setrun_locked(t); 1176 thread_unlock_nopreempt(t); 1177 } else if (why == PR_JOBCONTROL) { 1178 if (p->p_flag & SSCONT) { 1179 /* 1180 * This resulted from a SIGCONT posted 1181 * while we were not holding p->p_lock. 1182 */ 1183 p->p_stopsig = 0; 1184 thread_lock(t); 1185 t->t_schedflag |= TS_XSTART; 1186 setrun_locked(t); 1187 thread_unlock_nopreempt(t); 1188 } 1189 } else if (!(t->t_proc_flag & TP_STOPPING)) { 1190 /* 1191 * This resulted from a /proc run-on-last-close. 1192 */ 1193 thread_lock(t); 1194 t->t_schedflag |= TS_PSTART; 1195 setrun_locked(t); 1196 thread_unlock_nopreempt(t); 1197 } 1198 } 1199 1200 t->t_proc_flag &= ~TP_STOPPING; 1201 mutex_exit(&p->p_lock); 1202 1203 swtch(); 1204 setallwatch(); /* reestablish any watchpoints set while stopped */ 1205 mutex_enter(&p->p_lock); 1206 prbarrier(p); /* barrier against /proc locking */ 1207 } 1208 1209 /* Interface for resetting user thread stop count. */ 1210 void 1211 utstop_init(void) 1212 { 1213 mutex_enter(&thread_stop_lock); 1214 num_utstop = 0; 1215 mutex_exit(&thread_stop_lock); 1216 } 1217 1218 /* Interface for registering a user thread stop request. */ 1219 void 1220 add_one_utstop(void) 1221 { 1222 mutex_enter(&thread_stop_lock); 1223 num_utstop++; 1224 mutex_exit(&thread_stop_lock); 1225 } 1226 1227 /* Interface for cancelling a user thread stop request */ 1228 void 1229 del_one_utstop(void) 1230 { 1231 mutex_enter(&thread_stop_lock); 1232 num_utstop--; 1233 if (num_utstop == 0) 1234 cv_broadcast(&utstop_cv); 1235 mutex_exit(&thread_stop_lock); 1236 } 1237 1238 /* Interface to wait for all user threads to be stopped */ 1239 void 1240 utstop_timedwait(clock_t ticks) 1241 { 1242 mutex_enter(&thread_stop_lock); 1243 if (num_utstop > 0) 1244 (void) cv_timedwait(&utstop_cv, &thread_stop_lock, 1245 ticks + lbolt); 1246 mutex_exit(&thread_stop_lock); 1247 } 1248 1249 /* 1250 * Perform the action specified by the current signal. 1251 * The usual sequence is: 1252 * if (issig()) 1253 * psig(); 1254 * The signal bit has already been cleared by issig(), 1255 * the current signal number has been stored in lwp_cursig, 1256 * and the current siginfo is now referenced by lwp_curinfo. 1257 */ 1258 void 1259 psig(void) 1260 { 1261 kthread_t *t = curthread; 1262 proc_t *p = ttoproc(t); 1263 klwp_t *lwp = ttolwp(t); 1264 void (*func)(); 1265 int sig, rc, code, ext; 1266 pid_t pid = -1; 1267 id_t ctid = 0; 1268 zoneid_t zoneid = -1; 1269 sigqueue_t *sqp = NULL; 1270 1271 mutex_enter(&p->p_lock); 1272 schedctl_finish_sigblock(t); 1273 code = CLD_KILLED; 1274 1275 if (p->p_flag & SEXITLWPS) { 1276 lwp_exit(); 1277 return; /* not reached */ 1278 } 1279 sig = lwp->lwp_cursig; 1280 ext = lwp->lwp_extsig; 1281 1282 ASSERT(sig < NSIG); 1283 1284 /* 1285 * Re-check lwp_cursig after we acquire p_lock. Since p_lock was 1286 * dropped between issig() and psig(), a debugger may have cleared 1287 * lwp_cursig via /proc in the intervening window. 1288 */ 1289 if (sig == 0) { 1290 if (lwp->lwp_curinfo) { 1291 siginfofree(lwp->lwp_curinfo); 1292 lwp->lwp_curinfo = NULL; 1293 } 1294 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */ 1295 t->t_flag &= ~T_TOMASK; 1296 t->t_hold = lwp->lwp_sigoldmask; 1297 } 1298 mutex_exit(&p->p_lock); 1299 return; 1300 } 1301 func = PTOU(curproc)->u_signal[sig-1]; 1302 1303 /* 1304 * The signal disposition could have changed since we promoted 1305 * this signal from pending to current (we dropped p->p_lock). 1306 * This can happen only in a multi-threaded process. 1307 */ 1308 if (sigismember(&p->p_ignore, sig) || 1309 (func == SIG_DFL && sigismember(&stopdefault, sig))) { 1310 lwp->lwp_cursig = 0; 1311 lwp->lwp_extsig = 0; 1312 if (lwp->lwp_curinfo) { 1313 siginfofree(lwp->lwp_curinfo); 1314 lwp->lwp_curinfo = NULL; 1315 } 1316 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */ 1317 t->t_flag &= ~T_TOMASK; 1318 t->t_hold = lwp->lwp_sigoldmask; 1319 } 1320 mutex_exit(&p->p_lock); 1321 return; 1322 } 1323 1324 /* 1325 * We check lwp_curinfo first since pr_setsig can actually 1326 * stuff a sigqueue_t there for SIGKILL. 1327 */ 1328 if (lwp->lwp_curinfo) { 1329 sqp = lwp->lwp_curinfo; 1330 } else if (sig == SIGKILL && p->p_killsqp) { 1331 sqp = p->p_killsqp; 1332 } 1333 1334 if (sqp != NULL) { 1335 if (SI_FROMUSER(&sqp->sq_info)) { 1336 pid = sqp->sq_info.si_pid; 1337 ctid = sqp->sq_info.si_ctid; 1338 zoneid = sqp->sq_info.si_zoneid; 1339 } 1340 /* 1341 * If we have a sigqueue_t, its sq_external value 1342 * trumps the lwp_extsig value. It is theoretically 1343 * possible to make lwp_extsig reflect reality, but it 1344 * would unnecessarily complicate things elsewhere. 1345 */ 1346 ext = sqp->sq_external; 1347 } 1348 1349 if (func == SIG_DFL) { 1350 mutex_exit(&p->p_lock); 1351 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *, 1352 NULL, void (*)(void), func); 1353 } else { 1354 k_siginfo_t *sip = NULL; 1355 1356 /* 1357 * If DTrace user-land tracing is active, give DTrace a 1358 * chance to defer the signal until after tracing is 1359 * complete. 1360 */ 1361 if (t->t_dtrace_on && dtrace_safe_defer_signal()) { 1362 mutex_exit(&p->p_lock); 1363 return; 1364 } 1365 1366 /* 1367 * save siginfo pointer here, in case the 1368 * the signal's reset bit is on 1369 * 1370 * The presence of a current signal prevents paging 1371 * from succeeding over a network. We copy the current 1372 * signal information to the side and cancel the current 1373 * signal so that sendsig() will succeed. 1374 */ 1375 if (sigismember(&p->p_siginfo, sig)) { 1376 sip = &lwp->lwp_siginfo; 1377 if (sqp) { 1378 bcopy(&sqp->sq_info, sip, sizeof (*sip)); 1379 /* 1380 * If we were interrupted out of a system call 1381 * due to pthread_cancel(), inform libc. 1382 */ 1383 if (sig == SIGCANCEL && 1384 sip->si_code == SI_LWP && 1385 t->t_sysnum != 0) 1386 schedctl_cancel_eintr(); 1387 } else if (sig == SIGPROF && sip->si_signo == SIGPROF && 1388 t->t_rprof != NULL && t->t_rprof->rp_anystate) { 1389 /* EMPTY */; 1390 } else { 1391 bzero(sip, sizeof (*sip)); 1392 sip->si_signo = sig; 1393 sip->si_code = SI_NOINFO; 1394 } 1395 } 1396 1397 if (t->t_flag & T_TOMASK) 1398 t->t_flag &= ~T_TOMASK; 1399 else 1400 lwp->lwp_sigoldmask = t->t_hold; 1401 sigorset(&t->t_hold, &PTOU(curproc)->u_sigmask[sig-1]); 1402 if (!sigismember(&PTOU(curproc)->u_signodefer, sig)) 1403 sigaddset(&t->t_hold, sig); 1404 if (sigismember(&PTOU(curproc)->u_sigresethand, sig)) 1405 setsigact(sig, SIG_DFL, nullsmask, 0); 1406 1407 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *, 1408 sip, void (*)(void), func); 1409 1410 lwp->lwp_cursig = 0; 1411 lwp->lwp_extsig = 0; 1412 if (lwp->lwp_curinfo) { 1413 /* p->p_killsqp is freed by freeproc */ 1414 siginfofree(lwp->lwp_curinfo); 1415 lwp->lwp_curinfo = NULL; 1416 } 1417 mutex_exit(&p->p_lock); 1418 lwp->lwp_ru.nsignals++; 1419 1420 if (p->p_model == DATAMODEL_NATIVE) 1421 rc = sendsig(sig, sip, func); 1422 #ifdef _SYSCALL32_IMPL 1423 else 1424 rc = sendsig32(sig, sip, func); 1425 #endif /* _SYSCALL32_IMPL */ 1426 if (rc) 1427 return; 1428 sig = lwp->lwp_cursig = SIGSEGV; 1429 ext = 0; /* lwp_extsig was set above */ 1430 pid = -1; 1431 ctid = 0; 1432 } 1433 1434 if (sigismember(&coredefault, sig)) { 1435 /* 1436 * Terminate all LWPs but don't discard them. 1437 * If another lwp beat us to the punch by calling exit(), 1438 * evaporate now. 1439 */ 1440 proc_is_exiting(p); 1441 if (exitlwps(1) != 0) { 1442 mutex_enter(&p->p_lock); 1443 lwp_exit(); 1444 } 1445 /* if we got a SIGKILL from anywhere, no core dump */ 1446 if (p->p_flag & SKILLED) { 1447 sig = SIGKILL; 1448 ext = (p->p_flag & SEXTKILLED) != 0; 1449 } else { 1450 if (audit_active) /* audit core dump */ 1451 audit_core_start(sig); 1452 if (core(sig, ext) == 0) 1453 code = CLD_DUMPED; 1454 if (audit_active) /* audit core dump */ 1455 audit_core_finish(code); 1456 } 1457 } 1458 1459 /* 1460 * Generate a contract event once if the process is killed 1461 * by a signal. 1462 */ 1463 if (ext) { 1464 proc_is_exiting(p); 1465 if (exitlwps(0) != 0) { 1466 mutex_enter(&p->p_lock); 1467 lwp_exit(); 1468 } 1469 contract_process_sig(p->p_ct_process, p, sig, pid, ctid, 1470 zoneid); 1471 } 1472 1473 exit(code, sig); 1474 } 1475 1476 /* 1477 * Find next unheld signal in ssp for thread t. 1478 */ 1479 int 1480 fsig(k_sigset_t *ssp, kthread_t *t) 1481 { 1482 proc_t *p = ttoproc(t); 1483 user_t *up = PTOU(p); 1484 int i; 1485 k_sigset_t temp; 1486 1487 ASSERT(MUTEX_HELD(&p->p_lock)); 1488 1489 /* 1490 * Don't promote any signals for the parent of a vfork()d 1491 * child that hasn't yet released the parent's memory. 1492 */ 1493 if (p->p_flag & SVFWAIT) 1494 return (0); 1495 1496 temp = *ssp; 1497 sigdiffset(&temp, &t->t_hold); 1498 1499 /* 1500 * Don't promote stopping signals (except SIGSTOP) for a child 1501 * of vfork() that hasn't yet released the parent's memory. 1502 */ 1503 if (p->p_flag & SVFORK) 1504 sigdiffset(&temp, &holdvfork); 1505 1506 /* 1507 * Don't promote a signal that will stop 1508 * the process when lwp_nostop is set. 1509 */ 1510 if (ttolwp(t)->lwp_nostop) { 1511 sigdelset(&temp, SIGSTOP); 1512 if (!p->p_pgidp->pid_pgorphaned) { 1513 if (up->u_signal[SIGTSTP-1] == SIG_DFL) 1514 sigdelset(&temp, SIGTSTP); 1515 if (up->u_signal[SIGTTIN-1] == SIG_DFL) 1516 sigdelset(&temp, SIGTTIN); 1517 if (up->u_signal[SIGTTOU-1] == SIG_DFL) 1518 sigdelset(&temp, SIGTTOU); 1519 } 1520 } 1521 1522 /* 1523 * Choose SIGKILL and SIGPROF before all other pending signals. 1524 * The rest are promoted in signal number order. 1525 */ 1526 if (sigismember(&temp, SIGKILL)) 1527 return (SIGKILL); 1528 if (sigismember(&temp, SIGPROF)) 1529 return (SIGPROF); 1530 1531 for (i = 0; i < sizeof (temp) / sizeof (temp.__sigbits[0]); i++) { 1532 if (temp.__sigbits[i]) 1533 return ((i * NBBY * sizeof (temp.__sigbits[0])) + 1534 lowbit(temp.__sigbits[i])); 1535 } 1536 1537 return (0); 1538 } 1539 1540 void 1541 setsigact(int sig, void (*disp)(), k_sigset_t mask, int flags) 1542 { 1543 proc_t *p = ttoproc(curthread); 1544 kthread_t *t; 1545 1546 ASSERT(MUTEX_HELD(&p->p_lock)); 1547 1548 PTOU(curproc)->u_signal[sig - 1] = disp; 1549 1550 /* 1551 * Honor the SA_SIGINFO flag if the signal is being caught. 1552 * Force the SA_SIGINFO flag if the signal is not being caught. 1553 * This is necessary to make sigqueue() and sigwaitinfo() work 1554 * properly together when the signal is set to default or is 1555 * being temporarily ignored. 1556 */ 1557 if ((flags & SA_SIGINFO) || disp == SIG_DFL || disp == SIG_IGN) 1558 sigaddset(&p->p_siginfo, sig); 1559 else 1560 sigdelset(&p->p_siginfo, sig); 1561 1562 if (disp != SIG_DFL && disp != SIG_IGN) { 1563 sigdelset(&p->p_ignore, sig); 1564 PTOU(curproc)->u_sigmask[sig - 1] = mask; 1565 if (!sigismember(&cantreset, sig)) { 1566 if (flags & SA_RESETHAND) 1567 sigaddset(&PTOU(curproc)->u_sigresethand, sig); 1568 else 1569 sigdelset(&PTOU(curproc)->u_sigresethand, sig); 1570 } 1571 if (flags & SA_NODEFER) 1572 sigaddset(&PTOU(curproc)->u_signodefer, sig); 1573 else 1574 sigdelset(&PTOU(curproc)->u_signodefer, sig); 1575 if (flags & SA_RESTART) 1576 sigaddset(&PTOU(curproc)->u_sigrestart, sig); 1577 else 1578 sigdelset(&PTOU(curproc)->u_sigrestart, sig); 1579 if (flags & SA_ONSTACK) 1580 sigaddset(&PTOU(curproc)->u_sigonstack, sig); 1581 else 1582 sigdelset(&PTOU(curproc)->u_sigonstack, sig); 1583 1584 } else if (disp == SIG_IGN || 1585 (disp == SIG_DFL && sigismember(&ignoredefault, sig))) { 1586 /* 1587 * Setting the signal action to SIG_IGN results in the 1588 * discarding of all pending signals of that signal number. 1589 * Setting the signal action to SIG_DFL does the same *only* 1590 * if the signal's default behavior is to be ignored. 1591 */ 1592 sigaddset(&p->p_ignore, sig); 1593 sigdelset(&p->p_sig, sig); 1594 sigdelset(&p->p_extsig, sig); 1595 sigdelq(p, NULL, sig); 1596 t = p->p_tlist; 1597 do { 1598 sigdelset(&t->t_sig, sig); 1599 sigdelset(&t->t_extsig, sig); 1600 sigdelq(p, t, sig); 1601 } while ((t = t->t_forw) != p->p_tlist); 1602 1603 } else { 1604 /* 1605 * The signal action is being set to SIG_DFL and the default 1606 * behavior is to do something: make sure it is not ignored. 1607 */ 1608 sigdelset(&p->p_ignore, sig); 1609 } 1610 1611 if (sig == SIGCLD) { 1612 if (flags & SA_NOCLDWAIT) 1613 p->p_flag |= SNOWAIT; 1614 else 1615 p->p_flag &= ~SNOWAIT; 1616 1617 if (flags & SA_NOCLDSTOP) 1618 p->p_flag &= ~SJCTL; 1619 else 1620 p->p_flag |= SJCTL; 1621 1622 if ((p->p_flag & SNOWAIT) || disp == SIG_IGN) { 1623 proc_t *cp, *tp; 1624 1625 mutex_exit(&p->p_lock); 1626 mutex_enter(&pidlock); 1627 for (cp = p->p_child; cp != NULL; cp = tp) { 1628 tp = cp->p_sibling; 1629 if (cp->p_stat == SZOMB && 1630 !(cp->p_pidflag & CLDWAITPID)) 1631 freeproc(cp); 1632 } 1633 mutex_exit(&pidlock); 1634 mutex_enter(&p->p_lock); 1635 } 1636 } 1637 } 1638 1639 /* 1640 * Set all signal actions not already set to SIG_DFL or SIG_IGN to SIG_DFL. 1641 * Called from exec_common() for a process undergoing execve() 1642 * and from cfork() for a newly-created child of vfork(). 1643 * In the vfork() case, 'p' is not the current process. 1644 * In both cases, there is only one thread in the process. 1645 */ 1646 void 1647 sigdefault(proc_t *p) 1648 { 1649 kthread_t *t = p->p_tlist; 1650 struct user *up = PTOU(p); 1651 int sig; 1652 1653 ASSERT(MUTEX_HELD(&p->p_lock)); 1654 1655 for (sig = 1; sig < NSIG; sig++) { 1656 if (up->u_signal[sig - 1] != SIG_DFL && 1657 up->u_signal[sig - 1] != SIG_IGN) { 1658 up->u_signal[sig - 1] = SIG_DFL; 1659 sigemptyset(&up->u_sigmask[sig - 1]); 1660 if (sigismember(&ignoredefault, sig)) { 1661 sigdelq(p, NULL, sig); 1662 sigdelq(p, t, sig); 1663 } 1664 if (sig == SIGCLD) 1665 p->p_flag &= ~(SNOWAIT|SJCTL); 1666 } 1667 } 1668 sigorset(&p->p_ignore, &ignoredefault); 1669 sigfillset(&p->p_siginfo); 1670 sigdiffset(&p->p_siginfo, &cantmask); 1671 sigdiffset(&p->p_sig, &ignoredefault); 1672 sigdiffset(&p->p_extsig, &ignoredefault); 1673 sigdiffset(&t->t_sig, &ignoredefault); 1674 sigdiffset(&t->t_extsig, &ignoredefault); 1675 } 1676 1677 void 1678 sigcld(proc_t *cp, sigqueue_t *sqp) 1679 { 1680 proc_t *pp = cp->p_parent; 1681 1682 ASSERT(MUTEX_HELD(&pidlock)); 1683 1684 switch (cp->p_wcode) { 1685 case CLD_EXITED: 1686 case CLD_DUMPED: 1687 case CLD_KILLED: 1688 ASSERT(cp->p_stat == SZOMB); 1689 /* 1690 * The broadcast on p_srwchan_cv is a kludge to 1691 * wakeup a possible thread in uadmin(A_SHUTDOWN). 1692 */ 1693 cv_broadcast(&cp->p_srwchan_cv); 1694 1695 /* 1696 * Add to newstate list of the parent 1697 */ 1698 add_ns(pp, cp); 1699 1700 cv_broadcast(&pp->p_cv); 1701 if ((pp->p_flag & SNOWAIT) || 1702 PTOU(pp)->u_signal[SIGCLD - 1] == SIG_IGN) { 1703 if (!(cp->p_pidflag & CLDWAITPID)) 1704 freeproc(cp); 1705 } else if (!(cp->p_pidflag & CLDNOSIGCHLD)) { 1706 post_sigcld(cp, sqp); 1707 sqp = NULL; 1708 } 1709 break; 1710 1711 case CLD_STOPPED: 1712 case CLD_CONTINUED: 1713 cv_broadcast(&pp->p_cv); 1714 if (pp->p_flag & SJCTL) { 1715 post_sigcld(cp, sqp); 1716 sqp = NULL; 1717 } 1718 break; 1719 } 1720 1721 if (sqp) 1722 siginfofree(sqp); 1723 } 1724 1725 /* 1726 * Common code called from sigcld() and issig_forreal() 1727 * Give the parent process a SIGCLD if it does not have one pending, 1728 * else mark the child process so a SIGCLD can be posted later. 1729 */ 1730 static void 1731 post_sigcld(proc_t *cp, sigqueue_t *sqp) 1732 { 1733 proc_t *pp = cp->p_parent; 1734 void (*handler)() = PTOU(pp)->u_signal[SIGCLD - 1]; 1735 k_siginfo_t info; 1736 1737 ASSERT(MUTEX_HELD(&pidlock)); 1738 mutex_enter(&pp->p_lock); 1739 1740 /* 1741 * If a SIGCLD is pending, or if SIGCLD is not now being caught, 1742 * then just mark the child process so that its SIGCLD will 1743 * be posted later, when the first SIGCLD is taken off the 1744 * queue or when the parent is ready to receive it, if ever. 1745 */ 1746 if (handler == SIG_DFL || handler == SIG_IGN || 1747 sigismember(&pp->p_sig, SIGCLD)) 1748 cp->p_pidflag |= CLDPEND; 1749 else { 1750 cp->p_pidflag &= ~CLDPEND; 1751 if (sqp == NULL) { 1752 /* 1753 * This can only happen when the parent is init. 1754 * (See call to sigcld(q, NULL) in exit().) 1755 * Use KM_NOSLEEP to avoid deadlock. 1756 */ 1757 ASSERT(pp == proc_init); 1758 winfo(cp, &info, 0); 1759 sigaddq(pp, NULL, &info, KM_NOSLEEP); 1760 } else { 1761 winfo(cp, &sqp->sq_info, 0); 1762 sigaddqa(pp, NULL, sqp); 1763 sqp = NULL; 1764 } 1765 } 1766 1767 mutex_exit(&pp->p_lock); 1768 1769 if (sqp) 1770 siginfofree(sqp); 1771 } 1772 1773 /* 1774 * Search for a child that has a pending SIGCLD for us, the parent. 1775 * The queue of SIGCLD signals is implied by the list of children. 1776 * We post the SIGCLD signals one at a time so they don't get lost. 1777 * When one is dequeued, another is enqueued, until there are no more. 1778 */ 1779 void 1780 sigcld_repost() 1781 { 1782 proc_t *pp = curproc; 1783 proc_t *cp; 1784 void (*handler)() = PTOU(pp)->u_signal[SIGCLD - 1]; 1785 sigqueue_t *sqp; 1786 1787 /* 1788 * Don't bother if SIGCLD is not now being caught. 1789 */ 1790 if (handler == SIG_DFL || handler == SIG_IGN) 1791 return; 1792 1793 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 1794 mutex_enter(&pidlock); 1795 for (cp = pp->p_child; cp; cp = cp->p_sibling) { 1796 if (cp->p_pidflag & CLDPEND) { 1797 post_sigcld(cp, sqp); 1798 mutex_exit(&pidlock); 1799 return; 1800 } 1801 } 1802 mutex_exit(&pidlock); 1803 kmem_free(sqp, sizeof (sigqueue_t)); 1804 } 1805 1806 /* 1807 * count number of sigqueue send by sigaddqa() 1808 */ 1809 void 1810 sigqsend(int cmd, proc_t *p, kthread_t *t, sigqueue_t *sigqp) 1811 { 1812 sigqhdr_t *sqh; 1813 1814 sqh = (sigqhdr_t *)sigqp->sq_backptr; 1815 ASSERT(sqh); 1816 1817 mutex_enter(&sqh->sqb_lock); 1818 sqh->sqb_sent++; 1819 mutex_exit(&sqh->sqb_lock); 1820 1821 if (cmd == SN_SEND) 1822 sigaddqa(p, t, sigqp); 1823 else 1824 siginfofree(sigqp); 1825 } 1826 1827 int 1828 sigsendproc(proc_t *p, sigsend_t *pv) 1829 { 1830 struct cred *cr; 1831 proc_t *myprocp = curproc; 1832 1833 ASSERT(MUTEX_HELD(&pidlock)); 1834 1835 if (p->p_pid == 1 && pv->sig && sigismember(&cantmask, pv->sig)) 1836 return (EPERM); 1837 1838 cr = CRED(); 1839 1840 if (pv->checkperm == 0 || 1841 (pv->sig == SIGCONT && p->p_sessp == myprocp->p_sessp) || 1842 prochasprocperm(p, myprocp, cr)) { 1843 pv->perm++; 1844 if (pv->sig) { 1845 /* Make sure we should be setting si_pid and friends */ 1846 ASSERT(pv->sicode <= 0); 1847 if (SI_CANQUEUE(pv->sicode)) { 1848 sigqueue_t *sqp; 1849 1850 mutex_enter(&myprocp->p_lock); 1851 sqp = sigqalloc(myprocp->p_sigqhdr); 1852 mutex_exit(&myprocp->p_lock); 1853 if (sqp == NULL) 1854 return (EAGAIN); 1855 sqp->sq_info.si_signo = pv->sig; 1856 sqp->sq_info.si_code = pv->sicode; 1857 sqp->sq_info.si_pid = myprocp->p_pid; 1858 sqp->sq_info.si_ctid = PRCTID(myprocp); 1859 sqp->sq_info.si_zoneid = getzoneid(); 1860 sqp->sq_info.si_uid = crgetruid(cr); 1861 sqp->sq_info.si_value = pv->value; 1862 mutex_enter(&p->p_lock); 1863 sigqsend(SN_SEND, p, NULL, sqp); 1864 mutex_exit(&p->p_lock); 1865 } else { 1866 k_siginfo_t info; 1867 bzero(&info, sizeof (info)); 1868 info.si_signo = pv->sig; 1869 info.si_code = pv->sicode; 1870 info.si_pid = myprocp->p_pid; 1871 info.si_ctid = PRCTID(myprocp); 1872 info.si_zoneid = getzoneid(); 1873 info.si_uid = crgetruid(cr); 1874 mutex_enter(&p->p_lock); 1875 /* 1876 * XXX: Should be KM_SLEEP but 1877 * we have to avoid deadlock. 1878 */ 1879 sigaddq(p, NULL, &info, KM_NOSLEEP); 1880 mutex_exit(&p->p_lock); 1881 } 1882 } 1883 } 1884 1885 return (0); 1886 } 1887 1888 int 1889 sigsendset(procset_t *psp, sigsend_t *pv) 1890 { 1891 int error; 1892 1893 error = dotoprocs(psp, sigsendproc, (char *)pv); 1894 if (error == 0 && pv->perm == 0) 1895 return (EPERM); 1896 1897 return (error); 1898 } 1899 1900 /* 1901 * Dequeue a queued siginfo structure. 1902 * If a non-null thread pointer is passed then dequeue from 1903 * the thread queue, otherwise dequeue from the process queue. 1904 */ 1905 void 1906 sigdeq(proc_t *p, kthread_t *t, int sig, sigqueue_t **qpp) 1907 { 1908 sigqueue_t **psqp, *sqp; 1909 1910 ASSERT(MUTEX_HELD(&p->p_lock)); 1911 1912 *qpp = NULL; 1913 1914 if (t != NULL) { 1915 sigdelset(&t->t_sig, sig); 1916 sigdelset(&t->t_extsig, sig); 1917 psqp = &t->t_sigqueue; 1918 } else { 1919 sigdelset(&p->p_sig, sig); 1920 sigdelset(&p->p_extsig, sig); 1921 psqp = &p->p_sigqueue; 1922 } 1923 1924 for (;;) { 1925 if ((sqp = *psqp) == NULL) 1926 return; 1927 if (sqp->sq_info.si_signo == sig) 1928 break; 1929 else 1930 psqp = &sqp->sq_next; 1931 } 1932 *qpp = sqp; 1933 *psqp = sqp->sq_next; 1934 for (sqp = *psqp; sqp; sqp = sqp->sq_next) { 1935 if (sqp->sq_info.si_signo == sig) { 1936 if (t != (kthread_t *)NULL) { 1937 sigaddset(&t->t_sig, sig); 1938 t->t_sig_check = 1; 1939 } else { 1940 sigaddset(&p->p_sig, sig); 1941 set_proc_ast(p); 1942 } 1943 break; 1944 } 1945 } 1946 } 1947 1948 /* 1949 * Delete a queued SIGCLD siginfo structure matching the k_siginfo_t argument. 1950 */ 1951 void 1952 sigcld_delete(k_siginfo_t *ip) 1953 { 1954 proc_t *p = curproc; 1955 int another_sigcld = 0; 1956 sigqueue_t **psqp, *sqp; 1957 1958 ASSERT(ip->si_signo == SIGCLD); 1959 1960 mutex_enter(&p->p_lock); 1961 1962 if (!sigismember(&p->p_sig, SIGCLD)) { 1963 mutex_exit(&p->p_lock); 1964 return; 1965 } 1966 1967 psqp = &p->p_sigqueue; 1968 for (;;) { 1969 if ((sqp = *psqp) == NULL) { 1970 mutex_exit(&p->p_lock); 1971 return; 1972 } 1973 if (sqp->sq_info.si_signo == SIGCLD) { 1974 if (sqp->sq_info.si_pid == ip->si_pid && 1975 sqp->sq_info.si_code == ip->si_code && 1976 sqp->sq_info.si_status == ip->si_status) 1977 break; 1978 another_sigcld = 1; 1979 } 1980 psqp = &sqp->sq_next; 1981 } 1982 *psqp = sqp->sq_next; 1983 1984 siginfofree(sqp); 1985 1986 for (sqp = *psqp; !another_sigcld && sqp; sqp = sqp->sq_next) { 1987 if (sqp->sq_info.si_signo == SIGCLD) 1988 another_sigcld = 1; 1989 } 1990 1991 if (!another_sigcld) { 1992 sigdelset(&p->p_sig, SIGCLD); 1993 sigdelset(&p->p_extsig, SIGCLD); 1994 } 1995 1996 mutex_exit(&p->p_lock); 1997 } 1998 1999 /* 2000 * Delete queued siginfo structures. 2001 * If a non-null thread pointer is passed then delete from 2002 * the thread queue, otherwise delete from the process queue. 2003 */ 2004 void 2005 sigdelq(proc_t *p, kthread_t *t, int sig) 2006 { 2007 sigqueue_t **psqp, *sqp; 2008 2009 /* 2010 * We must be holding p->p_lock unless the process is 2011 * being reaped or has failed to get started on fork. 2012 */ 2013 ASSERT(MUTEX_HELD(&p->p_lock) || 2014 p->p_stat == SIDL || p->p_stat == SZOMB); 2015 2016 if (t != (kthread_t *)NULL) 2017 psqp = &t->t_sigqueue; 2018 else 2019 psqp = &p->p_sigqueue; 2020 2021 while (*psqp) { 2022 sqp = *psqp; 2023 if (sig == 0 || sqp->sq_info.si_signo == sig) { 2024 *psqp = sqp->sq_next; 2025 siginfofree(sqp); 2026 } else 2027 psqp = &sqp->sq_next; 2028 } 2029 } 2030 2031 /* 2032 * Insert a siginfo structure into a queue. 2033 * If a non-null thread pointer is passed then add to the thread queue, 2034 * otherwise add to the process queue. 2035 * 2036 * The function sigaddqins() is called with sigqueue already allocated. 2037 * It is called from sigaddqa() and sigaddq() below. 2038 * 2039 * The value of si_code implicitly indicates whether sigp is to be 2040 * explicitly queued, or to be queued to depth one. 2041 */ 2042 static void 2043 sigaddqins(proc_t *p, kthread_t *t, sigqueue_t *sigqp) 2044 { 2045 sigqueue_t **psqp; 2046 int sig = sigqp->sq_info.si_signo; 2047 2048 sigqp->sq_external = (curproc != &p0) && 2049 (curproc->p_ct_process != p->p_ct_process); 2050 2051 /* 2052 * issig_forreal() doesn't bother dequeueing signals if SKILLED 2053 * is set, and even if it did, we would want to avoid situation 2054 * (which would be unique to SIGKILL) where one thread dequeued 2055 * the sigqueue_t and another executed psig(). So we create a 2056 * separate stash for SIGKILL's sigqueue_t. Because a second 2057 * SIGKILL can set SEXTKILLED, we overwrite the existing entry 2058 * if (and only if) it was non-extracontractual. 2059 */ 2060 if (sig == SIGKILL) { 2061 if (p->p_killsqp == NULL || !p->p_killsqp->sq_external) { 2062 if (p->p_killsqp != NULL) 2063 siginfofree(p->p_killsqp); 2064 p->p_killsqp = sigqp; 2065 sigqp->sq_next = NULL; 2066 } else { 2067 siginfofree(sigqp); 2068 } 2069 return; 2070 } 2071 2072 ASSERT(sig >= 1 && sig < NSIG); 2073 if (t != NULL) /* directed to a thread */ 2074 psqp = &t->t_sigqueue; 2075 else /* directed to a process */ 2076 psqp = &p->p_sigqueue; 2077 if (SI_CANQUEUE(sigqp->sq_info.si_code) && 2078 sigismember(&p->p_siginfo, sig)) { 2079 for (; *psqp != NULL; psqp = &(*psqp)->sq_next) 2080 ; 2081 } else { 2082 for (; *psqp != NULL; psqp = &(*psqp)->sq_next) { 2083 if ((*psqp)->sq_info.si_signo == sig) { 2084 siginfofree(sigqp); 2085 return; 2086 } 2087 } 2088 } 2089 *psqp = sigqp; 2090 sigqp->sq_next = NULL; 2091 } 2092 2093 /* 2094 * The function sigaddqa() is called with sigqueue already allocated. 2095 * If signal is ignored, discard but guarantee KILL and generation semantics. 2096 * It is called from sigqueue() and other places. 2097 */ 2098 void 2099 sigaddqa(proc_t *p, kthread_t *t, sigqueue_t *sigqp) 2100 { 2101 int sig = sigqp->sq_info.si_signo; 2102 2103 ASSERT(MUTEX_HELD(&p->p_lock)); 2104 ASSERT(sig >= 1 && sig < NSIG); 2105 2106 if (sig_discardable(p, sig)) 2107 siginfofree(sigqp); 2108 else 2109 sigaddqins(p, t, sigqp); 2110 2111 sigtoproc(p, t, sig); 2112 } 2113 2114 /* 2115 * Allocate the sigqueue_t structure and call sigaddqins(). 2116 */ 2117 void 2118 sigaddq(proc_t *p, kthread_t *t, k_siginfo_t *infop, int km_flags) 2119 { 2120 sigqueue_t *sqp; 2121 int sig = infop->si_signo; 2122 2123 ASSERT(MUTEX_HELD(&p->p_lock)); 2124 ASSERT(sig >= 1 && sig < NSIG); 2125 2126 /* 2127 * If the signal will be discarded by sigtoproc() or 2128 * if the process isn't requesting siginfo and it isn't 2129 * blocking the signal (it *could* change it's mind while 2130 * the signal is pending) then don't bother creating one. 2131 */ 2132 if (!sig_discardable(p, sig) && 2133 (sigismember(&p->p_siginfo, sig) || 2134 (curproc->p_ct_process != p->p_ct_process) || 2135 (sig == SIGCLD && SI_FROMKERNEL(infop))) && 2136 ((sqp = kmem_alloc(sizeof (sigqueue_t), km_flags)) != NULL)) { 2137 bcopy(infop, &sqp->sq_info, sizeof (k_siginfo_t)); 2138 sqp->sq_func = NULL; 2139 sqp->sq_next = NULL; 2140 sigaddqins(p, t, sqp); 2141 } 2142 sigtoproc(p, t, sig); 2143 } 2144 2145 /* 2146 * Handle stop-on-fault processing for the debugger. Returns 0 2147 * if the fault is cleared during the stop, nonzero if it isn't. 2148 */ 2149 int 2150 stop_on_fault(uint_t fault, k_siginfo_t *sip) 2151 { 2152 proc_t *p = ttoproc(curthread); 2153 klwp_t *lwp = ttolwp(curthread); 2154 2155 ASSERT(prismember(&p->p_fltmask, fault)); 2156 2157 /* 2158 * Record current fault and siginfo structure so debugger can 2159 * find it. 2160 */ 2161 mutex_enter(&p->p_lock); 2162 lwp->lwp_curflt = (uchar_t)fault; 2163 lwp->lwp_siginfo = *sip; 2164 2165 stop(PR_FAULTED, fault); 2166 2167 fault = lwp->lwp_curflt; 2168 lwp->lwp_curflt = 0; 2169 mutex_exit(&p->p_lock); 2170 return (fault); 2171 } 2172 2173 void 2174 sigorset(k_sigset_t *s1, k_sigset_t *s2) 2175 { 2176 s1->__sigbits[0] |= s2->__sigbits[0]; 2177 s1->__sigbits[1] |= s2->__sigbits[1]; 2178 } 2179 2180 void 2181 sigandset(k_sigset_t *s1, k_sigset_t *s2) 2182 { 2183 s1->__sigbits[0] &= s2->__sigbits[0]; 2184 s1->__sigbits[1] &= s2->__sigbits[1]; 2185 } 2186 2187 void 2188 sigdiffset(k_sigset_t *s1, k_sigset_t *s2) 2189 { 2190 s1->__sigbits[0] &= ~(s2->__sigbits[0]); 2191 s1->__sigbits[1] &= ~(s2->__sigbits[1]); 2192 } 2193 2194 /* 2195 * Return non-zero if curthread->t_sig_check should be set to 1, that is, 2196 * if there are any signals the thread might take on return from the kernel. 2197 * If ksigset_t's were a single word, we would do: 2198 * return (((p->p_sig | t->t_sig) & ~t->t_hold) & fillset); 2199 */ 2200 int 2201 sigcheck(proc_t *p, kthread_t *t) 2202 { 2203 sc_shared_t *tdp = t->t_schedctl; 2204 2205 /* 2206 * If signals are blocked via the schedctl interface 2207 * then we only check for the unmaskable signals. 2208 */ 2209 if (tdp != NULL && tdp->sc_sigblock) 2210 return ((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) & 2211 CANTMASK0); 2212 2213 return (((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) & 2214 ~t->t_hold.__sigbits[0]) | 2215 (((p->p_sig.__sigbits[1] | t->t_sig.__sigbits[1]) & 2216 ~t->t_hold.__sigbits[1]) & FILLSET1)); 2217 } 2218 2219 /* ONC_PLUS EXTRACT START */ 2220 void 2221 sigintr(k_sigset_t *smask, int intable) 2222 { 2223 proc_t *p; 2224 int owned; 2225 k_sigset_t lmask; /* local copy of cantmask */ 2226 klwp_t *lwp = ttolwp(curthread); 2227 2228 /* 2229 * Mask out all signals except SIGHUP, SIGINT, SIGQUIT 2230 * and SIGTERM. (Preserving the existing masks). 2231 * This function supports the -intr nfs and ufs mount option. 2232 */ 2233 2234 /* 2235 * don't do kernel threads 2236 */ 2237 if (lwp == NULL) 2238 return; 2239 2240 /* 2241 * get access to signal mask 2242 */ 2243 p = ttoproc(curthread); 2244 owned = mutex_owned(&p->p_lock); /* this is filthy */ 2245 if (!owned) 2246 mutex_enter(&p->p_lock); 2247 2248 /* 2249 * remember the current mask 2250 */ 2251 schedctl_finish_sigblock(curthread); 2252 *smask = curthread->t_hold; 2253 2254 /* 2255 * mask out all signals 2256 */ 2257 sigfillset(&curthread->t_hold); 2258 2259 /* 2260 * Unmask the non-maskable signals (e.g., KILL), as long as 2261 * they aren't already masked (which could happen at exit). 2262 * The first sigdiffset sets lmask to (cantmask & ~curhold). The 2263 * second sets the current hold mask to (~0 & ~lmask), which reduces 2264 * to (~cantmask | curhold). 2265 */ 2266 lmask = cantmask; 2267 sigdiffset(&lmask, smask); 2268 sigdiffset(&curthread->t_hold, &lmask); 2269 2270 /* 2271 * Re-enable HUP, QUIT, and TERM iff they were originally enabled 2272 * Re-enable INT if it's originally enabled and the NFS mount option 2273 * nointr is not set. 2274 */ 2275 if (!sigismember(smask, SIGHUP)) 2276 sigdelset(&curthread->t_hold, SIGHUP); 2277 if (!sigismember(smask, SIGINT) && intable) 2278 sigdelset(&curthread->t_hold, SIGINT); 2279 if (!sigismember(smask, SIGQUIT)) 2280 sigdelset(&curthread->t_hold, SIGQUIT); 2281 if (!sigismember(smask, SIGTERM)) 2282 sigdelset(&curthread->t_hold, SIGTERM); 2283 2284 /* 2285 * release access to signal mask 2286 */ 2287 if (!owned) 2288 mutex_exit(&p->p_lock); 2289 2290 /* 2291 * Indicate that this lwp is not to be stopped. 2292 */ 2293 lwp->lwp_nostop++; 2294 2295 } 2296 /* ONC_PLUS EXTRACT END */ 2297 2298 void 2299 sigunintr(k_sigset_t *smask) 2300 { 2301 proc_t *p; 2302 int owned; 2303 klwp_t *lwp = ttolwp(curthread); 2304 2305 /* 2306 * Reset previous mask (See sigintr() above) 2307 */ 2308 if (lwp != NULL) { 2309 lwp->lwp_nostop--; /* restore lwp stoppability */ 2310 p = ttoproc(curthread); 2311 owned = mutex_owned(&p->p_lock); /* this is filthy */ 2312 if (!owned) 2313 mutex_enter(&p->p_lock); 2314 curthread->t_hold = *smask; 2315 /* so unmasked signals will be seen */ 2316 curthread->t_sig_check = 1; 2317 if (!owned) 2318 mutex_exit(&p->p_lock); 2319 } 2320 } 2321 2322 void 2323 sigreplace(k_sigset_t *newmask, k_sigset_t *oldmask) 2324 { 2325 proc_t *p; 2326 int owned; 2327 /* 2328 * Save current signal mask in oldmask, then 2329 * set it to newmask. 2330 */ 2331 if (ttolwp(curthread) != NULL) { 2332 p = ttoproc(curthread); 2333 owned = mutex_owned(&p->p_lock); /* this is filthy */ 2334 if (!owned) 2335 mutex_enter(&p->p_lock); 2336 schedctl_finish_sigblock(curthread); 2337 if (oldmask != NULL) 2338 *oldmask = curthread->t_hold; 2339 curthread->t_hold = *newmask; 2340 curthread->t_sig_check = 1; 2341 if (!owned) 2342 mutex_exit(&p->p_lock); 2343 } 2344 } 2345 2346 /* 2347 * Return true if the signal number is in range 2348 * and the signal code specifies signal queueing. 2349 */ 2350 int 2351 sigwillqueue(int sig, int code) 2352 { 2353 if (sig >= 0 && sig < NSIG) { 2354 switch (code) { 2355 case SI_QUEUE: 2356 case SI_TIMER: 2357 case SI_ASYNCIO: 2358 case SI_MESGQ: 2359 return (1); 2360 } 2361 } 2362 return (0); 2363 } 2364 2365 #ifndef UCHAR_MAX 2366 #define UCHAR_MAX 255 2367 #endif 2368 2369 /* 2370 * The entire pool (with maxcount entries) is pre-allocated at 2371 * the first sigqueue/signotify call. 2372 */ 2373 sigqhdr_t * 2374 sigqhdralloc(size_t size, uint_t maxcount) 2375 { 2376 size_t i; 2377 sigqueue_t *sq, *next; 2378 sigqhdr_t *sqh; 2379 2380 i = (maxcount * size) + sizeof (sigqhdr_t); 2381 ASSERT(maxcount <= UCHAR_MAX && i <= USHRT_MAX); 2382 sqh = kmem_alloc(i, KM_SLEEP); 2383 sqh->sqb_count = (uchar_t)maxcount; 2384 sqh->sqb_maxcount = (uchar_t)maxcount; 2385 sqh->sqb_size = (ushort_t)i; 2386 sqh->sqb_pexited = 0; 2387 sqh->sqb_sent = 0; 2388 sqh->sqb_free = sq = (sigqueue_t *)(sqh + 1); 2389 for (i = maxcount - 1; i != 0; i--) { 2390 next = (sigqueue_t *)((uintptr_t)sq + size); 2391 sq->sq_next = next; 2392 sq = next; 2393 } 2394 sq->sq_next = NULL; 2395 cv_init(&sqh->sqb_cv, NULL, CV_DEFAULT, NULL); 2396 mutex_init(&sqh->sqb_lock, NULL, MUTEX_DEFAULT, NULL); 2397 return (sqh); 2398 } 2399 2400 static void sigqrel(sigqueue_t *); 2401 2402 /* 2403 * allocate a sigqueue/signotify structure from the per process 2404 * pre-allocated pool. 2405 */ 2406 sigqueue_t * 2407 sigqalloc(sigqhdr_t *sqh) 2408 { 2409 sigqueue_t *sq = NULL; 2410 2411 ASSERT(MUTEX_HELD(&curproc->p_lock)); 2412 2413 if (sqh != NULL) { 2414 mutex_enter(&sqh->sqb_lock); 2415 if (sqh->sqb_count > 0) { 2416 sqh->sqb_count--; 2417 sq = sqh->sqb_free; 2418 sqh->sqb_free = sq->sq_next; 2419 mutex_exit(&sqh->sqb_lock); 2420 bzero(&sq->sq_info, sizeof (k_siginfo_t)); 2421 sq->sq_backptr = sqh; 2422 sq->sq_func = sigqrel; 2423 sq->sq_next = NULL; 2424 sq->sq_external = 0; 2425 } else { 2426 mutex_exit(&sqh->sqb_lock); 2427 } 2428 } 2429 return (sq); 2430 } 2431 2432 /* 2433 * Return a sigqueue structure back to the pre-allocated pool. 2434 */ 2435 static void 2436 sigqrel(sigqueue_t *sq) 2437 { 2438 sigqhdr_t *sqh; 2439 2440 /* make sure that p_lock of the affected process is held */ 2441 2442 sqh = (sigqhdr_t *)sq->sq_backptr; 2443 mutex_enter(&sqh->sqb_lock); 2444 if (sqh->sqb_pexited && sqh->sqb_sent == 1) { 2445 mutex_exit(&sqh->sqb_lock); 2446 cv_destroy(&sqh->sqb_cv); 2447 mutex_destroy(&sqh->sqb_lock); 2448 kmem_free(sqh, sqh->sqb_size); 2449 } else { 2450 sqh->sqb_count++; 2451 sqh->sqb_sent--; 2452 sq->sq_next = sqh->sqb_free; 2453 sq->sq_backptr = NULL; 2454 sqh->sqb_free = sq; 2455 cv_signal(&sqh->sqb_cv); 2456 mutex_exit(&sqh->sqb_lock); 2457 } 2458 } 2459 2460 /* 2461 * Free up the pre-allocated sigqueue headers of sigqueue pool 2462 * and signotify pool, if possible. 2463 * Called only by the owning process during exec() and exit(). 2464 */ 2465 void 2466 sigqfree(proc_t *p) 2467 { 2468 ASSERT(MUTEX_HELD(&p->p_lock)); 2469 2470 if (p->p_sigqhdr != NULL) { /* sigqueue pool */ 2471 sigqhdrfree(p->p_sigqhdr); 2472 p->p_sigqhdr = NULL; 2473 } 2474 if (p->p_signhdr != NULL) { /* signotify pool */ 2475 sigqhdrfree(p->p_signhdr); 2476 p->p_signhdr = NULL; 2477 } 2478 } 2479 2480 /* 2481 * Free up the pre-allocated header and sigq pool if possible. 2482 */ 2483 void 2484 sigqhdrfree(sigqhdr_t *sqh) 2485 { 2486 mutex_enter(&sqh->sqb_lock); 2487 if (sqh->sqb_sent == 0) { 2488 mutex_exit(&sqh->sqb_lock); 2489 cv_destroy(&sqh->sqb_cv); 2490 mutex_destroy(&sqh->sqb_lock); 2491 kmem_free(sqh, sqh->sqb_size); 2492 } else { 2493 sqh->sqb_pexited = 1; 2494 mutex_exit(&sqh->sqb_lock); 2495 } 2496 } 2497 2498 /* 2499 * Free up a single sigqueue structure. 2500 * No other code should free a sigqueue directly. 2501 */ 2502 void 2503 siginfofree(sigqueue_t *sqp) 2504 { 2505 if (sqp != NULL) { 2506 if (sqp->sq_func != NULL) 2507 (sqp->sq_func)(sqp); 2508 else 2509 kmem_free(sqp, sizeof (sigqueue_t)); 2510 } 2511 } 2512 2513 /* 2514 * Generate a synchronous signal caused by a hardware 2515 * condition encountered by an lwp. Called from trap(). 2516 */ 2517 void 2518 trapsig(k_siginfo_t *ip, int restartable) 2519 { 2520 proc_t *p = ttoproc(curthread); 2521 int sig = ip->si_signo; 2522 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 2523 2524 ASSERT(sig > 0 && sig < NSIG); 2525 2526 if (curthread->t_dtrace_on) 2527 dtrace_safe_synchronous_signal(); 2528 2529 mutex_enter(&p->p_lock); 2530 schedctl_finish_sigblock(curthread); 2531 /* 2532 * Avoid a possible infinite loop if the lwp is holding the 2533 * signal generated by a trap of a restartable instruction or 2534 * if the signal so generated is being ignored by the process. 2535 */ 2536 if (restartable && 2537 (sigismember(&curthread->t_hold, sig) || 2538 p->p_user.u_signal[sig-1] == SIG_IGN)) { 2539 sigdelset(&curthread->t_hold, sig); 2540 p->p_user.u_signal[sig-1] = SIG_DFL; 2541 sigdelset(&p->p_ignore, sig); 2542 } 2543 bcopy(ip, &sqp->sq_info, sizeof (k_siginfo_t)); 2544 sigaddqa(p, curthread, sqp); 2545 mutex_exit(&p->p_lock); 2546 } 2547 2548 #ifdef _SYSCALL32_IMPL 2549 2550 /* 2551 * It's tricky to transmit a sigval between 32-bit and 64-bit 2552 * process, since in the 64-bit world, a pointer and an integer 2553 * are different sizes. Since we're constrained by the standards 2554 * world not to change the types, and it's unclear how useful it is 2555 * to send pointers between address spaces this way, we preserve 2556 * the 'int' interpretation for 32-bit processes interoperating 2557 * with 64-bit processes. The full semantics (pointers or integers) 2558 * are available for N-bit processes interoperating with N-bit 2559 * processes. 2560 */ 2561 void 2562 siginfo_kto32(const k_siginfo_t *src, siginfo32_t *dest) 2563 { 2564 bzero(dest, sizeof (*dest)); 2565 2566 /* 2567 * The absolute minimum content is si_signo and si_code. 2568 */ 2569 dest->si_signo = src->si_signo; 2570 if ((dest->si_code = src->si_code) == SI_NOINFO) 2571 return; 2572 2573 /* 2574 * A siginfo generated by user level is structured 2575 * differently from one generated by the kernel. 2576 */ 2577 if (SI_FROMUSER(src)) { 2578 dest->si_pid = src->si_pid; 2579 dest->si_ctid = src->si_ctid; 2580 dest->si_zoneid = src->si_zoneid; 2581 dest->si_uid = src->si_uid; 2582 if (SI_CANQUEUE(src->si_code)) 2583 dest->si_value.sival_int = 2584 (int32_t)src->si_value.sival_int; 2585 return; 2586 } 2587 2588 dest->si_errno = src->si_errno; 2589 2590 switch (src->si_signo) { 2591 default: 2592 dest->si_pid = src->si_pid; 2593 dest->si_ctid = src->si_ctid; 2594 dest->si_zoneid = src->si_zoneid; 2595 dest->si_uid = src->si_uid; 2596 dest->si_value.sival_int = (int32_t)src->si_value.sival_int; 2597 break; 2598 case SIGCLD: 2599 dest->si_pid = src->si_pid; 2600 dest->si_ctid = src->si_ctid; 2601 dest->si_zoneid = src->si_zoneid; 2602 dest->si_status = src->si_status; 2603 dest->si_stime = src->si_stime; 2604 dest->si_utime = src->si_utime; 2605 break; 2606 case SIGSEGV: 2607 case SIGBUS: 2608 case SIGILL: 2609 case SIGTRAP: 2610 case SIGFPE: 2611 case SIGEMT: 2612 dest->si_addr = (caddr32_t)(uintptr_t)src->si_addr; 2613 dest->si_trapno = src->si_trapno; 2614 dest->si_pc = (caddr32_t)(uintptr_t)src->si_pc; 2615 break; 2616 case SIGPOLL: 2617 case SIGXFSZ: 2618 dest->si_fd = src->si_fd; 2619 dest->si_band = src->si_band; 2620 break; 2621 case SIGPROF: 2622 dest->si_faddr = (caddr32_t)(uintptr_t)src->si_faddr; 2623 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec; 2624 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec; 2625 dest->si_syscall = src->si_syscall; 2626 dest->si_nsysarg = src->si_nsysarg; 2627 dest->si_fault = src->si_fault; 2628 break; 2629 } 2630 } 2631 2632 void 2633 siginfo_32tok(const siginfo32_t *src, k_siginfo_t *dest) 2634 { 2635 bzero(dest, sizeof (*dest)); 2636 2637 /* 2638 * The absolute minimum content is si_signo and si_code. 2639 */ 2640 dest->si_signo = src->si_signo; 2641 if ((dest->si_code = src->si_code) == SI_NOINFO) 2642 return; 2643 2644 /* 2645 * A siginfo generated by user level is structured 2646 * differently from one generated by the kernel. 2647 */ 2648 if (SI_FROMUSER(src)) { 2649 dest->si_pid = src->si_pid; 2650 dest->si_ctid = src->si_ctid; 2651 dest->si_zoneid = src->si_zoneid; 2652 dest->si_uid = src->si_uid; 2653 if (SI_CANQUEUE(src->si_code)) 2654 dest->si_value.sival_int = 2655 (int)src->si_value.sival_int; 2656 return; 2657 } 2658 2659 dest->si_errno = src->si_errno; 2660 2661 switch (src->si_signo) { 2662 default: 2663 dest->si_pid = src->si_pid; 2664 dest->si_ctid = src->si_ctid; 2665 dest->si_zoneid = src->si_zoneid; 2666 dest->si_uid = src->si_uid; 2667 dest->si_value.sival_int = (int)src->si_value.sival_int; 2668 break; 2669 case SIGCLD: 2670 dest->si_pid = src->si_pid; 2671 dest->si_ctid = src->si_ctid; 2672 dest->si_zoneid = src->si_zoneid; 2673 dest->si_status = src->si_status; 2674 dest->si_stime = src->si_stime; 2675 dest->si_utime = src->si_utime; 2676 break; 2677 case SIGSEGV: 2678 case SIGBUS: 2679 case SIGILL: 2680 case SIGTRAP: 2681 case SIGFPE: 2682 case SIGEMT: 2683 dest->si_addr = (void *)(uintptr_t)src->si_addr; 2684 dest->si_trapno = src->si_trapno; 2685 dest->si_pc = (void *)(uintptr_t)src->si_pc; 2686 break; 2687 case SIGPOLL: 2688 case SIGXFSZ: 2689 dest->si_fd = src->si_fd; 2690 dest->si_band = src->si_band; 2691 break; 2692 case SIGPROF: 2693 dest->si_faddr = (void *)(uintptr_t)src->si_faddr; 2694 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec; 2695 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec; 2696 dest->si_syscall = src->si_syscall; 2697 dest->si_nsysarg = src->si_nsysarg; 2698 dest->si_fault = src->si_fault; 2699 break; 2700 } 2701 } 2702 2703 #endif /* _SYSCALL32_IMPL */ 2704