1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 28 /* All Rights Reserved */ 29 30 #include <sys/param.h> 31 #include <sys/types.h> 32 #include <sys/bitmap.h> 33 #include <sys/sysmacros.h> 34 #include <sys/systm.h> 35 #include <sys/cred.h> 36 #include <sys/user.h> 37 #include <sys/errno.h> 38 #include <sys/proc.h> 39 #include <sys/poll_impl.h> /* only needed for kludge in sigwaiting_send() */ 40 #include <sys/signal.h> 41 #include <sys/siginfo.h> 42 #include <sys/fault.h> 43 #include <sys/ucontext.h> 44 #include <sys/procfs.h> 45 #include <sys/wait.h> 46 #include <sys/class.h> 47 #include <sys/mman.h> 48 #include <sys/procset.h> 49 #include <sys/kmem.h> 50 #include <sys/cpuvar.h> 51 #include <sys/prsystm.h> 52 #include <sys/debug.h> 53 #include <vm/as.h> 54 #include <sys/bitmap.h> 55 #include <c2/audit.h> 56 #include <sys/core.h> 57 #include <sys/schedctl.h> 58 #include <sys/contract/process_impl.h> 59 #include <sys/dtrace.h> 60 #include <sys/sdt.h> 61 62 /* MUST be contiguous */ 63 k_sigset_t nullsmask = {0, 0}; 64 65 k_sigset_t fillset = {FILLSET0, FILLSET1}; 66 67 k_sigset_t cantmask = {CANTMASK0, CANTMASK1}; 68 69 k_sigset_t cantreset = {(sigmask(SIGILL)|sigmask(SIGTRAP)|sigmask(SIGPWR)), 0}; 70 71 k_sigset_t ignoredefault = {(sigmask(SIGCONT)|sigmask(SIGCLD)|sigmask(SIGPWR) 72 |sigmask(SIGWINCH)|sigmask(SIGURG)|sigmask(SIGWAITING)), 73 (sigmask(SIGLWP)|sigmask(SIGCANCEL)|sigmask(SIGFREEZE) 74 |sigmask(SIGTHAW)|sigmask(SIGXRES)|sigmask(SIGJVM1) 75 |sigmask(SIGJVM2))}; 76 77 k_sigset_t stopdefault = {(sigmask(SIGSTOP)|sigmask(SIGTSTP) 78 |sigmask(SIGTTOU)|sigmask(SIGTTIN)), 0}; 79 80 k_sigset_t coredefault = {(sigmask(SIGQUIT)|sigmask(SIGILL)|sigmask(SIGTRAP) 81 |sigmask(SIGIOT)|sigmask(SIGEMT)|sigmask(SIGFPE) 82 |sigmask(SIGBUS)|sigmask(SIGSEGV)|sigmask(SIGSYS) 83 |sigmask(SIGXCPU)|sigmask(SIGXFSZ)), 0}; 84 85 k_sigset_t holdvfork = {(sigmask(SIGTTOU)|sigmask(SIGTTIN)|sigmask(SIGTSTP)), 86 0}; 87 88 static int isjobstop(int); 89 static void post_sigcld(proc_t *, sigqueue_t *); 90 91 /* 92 * Internal variables for counting number of user thread stop requests posted. 93 * They may not be accurate at some special situation such as that a virtually 94 * stopped thread starts to run. 95 */ 96 static int num_utstop; 97 /* 98 * Internal variables for broadcasting an event when all thread stop requests 99 * are processed. 100 */ 101 static kcondvar_t utstop_cv; 102 103 static kmutex_t thread_stop_lock; 104 void del_one_utstop(void); 105 106 /* 107 * Send the specified signal to the specified process. 108 */ 109 void 110 psignal(proc_t *p, int sig) 111 { 112 mutex_enter(&p->p_lock); 113 sigtoproc(p, NULL, sig); 114 mutex_exit(&p->p_lock); 115 } 116 117 /* 118 * Send the specified signal to the specified thread. 119 */ 120 void 121 tsignal(kthread_t *t, int sig) 122 { 123 proc_t *p = ttoproc(t); 124 125 mutex_enter(&p->p_lock); 126 sigtoproc(p, t, sig); 127 mutex_exit(&p->p_lock); 128 } 129 130 int 131 signal_is_blocked(kthread_t *t, int sig) 132 { 133 return (sigismember(&t->t_hold, sig) || 134 (schedctl_sigblock(t) && !sigismember(&cantmask, sig))); 135 } 136 137 /* 138 * Return true if the signal can safely be discarded on generation. 139 * That is, if there is no need for the signal on the receiving end. 140 * The answer is true if the process is a zombie or 141 * if all of these conditions are true: 142 * the signal is being ignored 143 * the process is single-threaded 144 * the signal is not being traced by /proc 145 * the signal is not blocked by the process 146 * the signal is not being accepted via sigwait() 147 */ 148 static int 149 sig_discardable(proc_t *p, int sig) 150 { 151 kthread_t *t = p->p_tlist; 152 153 return (t == NULL || /* if zombie or ... */ 154 (sigismember(&p->p_ignore, sig) && /* signal is ignored */ 155 t->t_forw == t && /* and single-threaded */ 156 !tracing(p, sig) && /* and no /proc tracing */ 157 !signal_is_blocked(t, sig) && /* and signal not blocked */ 158 !sigismember(&t->t_sigwait, sig))); /* and not being accepted */ 159 } 160 161 /* 162 * Return true if this thread is going to eat this signal soon. 163 * Note that, if the signal is SIGKILL, we force stopped threads to be 164 * set running (to make SIGKILL be a sure kill), but only if the process 165 * is not currently locked by /proc (the P_PR_LOCK flag). Code in /proc 166 * relies on the fact that a process will not change shape while P_PR_LOCK 167 * is set (it drops and reacquires p->p_lock while leaving P_PR_LOCK set). 168 * We wish that we could simply call prbarrier() below, in sigtoproc(), to 169 * ensure that the process is not locked by /proc, but prbarrier() drops 170 * and reacquires p->p_lock and dropping p->p_lock here would be damaging. 171 */ 172 int 173 eat_signal(kthread_t *t, int sig) 174 { 175 int rval = 0; 176 ASSERT(THREAD_LOCK_HELD(t)); 177 178 /* 179 * Do not do anything if the target thread has the signal blocked. 180 */ 181 if (!signal_is_blocked(t, sig)) { 182 t->t_sig_check = 1; /* have thread do an issig */ 183 if (ISWAKEABLE(t) || ISWAITING(t)) { 184 setrun_locked(t); 185 rval = 1; 186 } else if (t->t_state == TS_STOPPED && sig == SIGKILL && 187 !(ttoproc(t)->p_proc_flag & P_PR_LOCK)) { 188 ttoproc(t)->p_stopsig = 0; 189 t->t_dtrace_stop = 0; 190 t->t_schedflag |= TS_XSTART | TS_PSTART; 191 setrun_locked(t); 192 } else if (t != curthread && t->t_state == TS_ONPROC) { 193 aston(t); /* make it do issig promptly */ 194 if (t->t_cpu != CPU) 195 poke_cpu(t->t_cpu->cpu_id); 196 rval = 1; 197 } else if (t->t_state == TS_RUN) { 198 rval = 1; 199 } 200 } 201 202 return (rval); 203 } 204 205 /* 206 * Post a signal. 207 * If a non-null thread pointer is passed, then post the signal 208 * to the thread/lwp, otherwise post the signal to the process. 209 */ 210 void 211 sigtoproc(proc_t *p, kthread_t *t, int sig) 212 { 213 kthread_t *tt; 214 int ext = !(curproc->p_flag & SSYS) && 215 (curproc->p_ct_process != p->p_ct_process); 216 217 ASSERT(MUTEX_HELD(&p->p_lock)); 218 219 if (sig <= 0 || sig >= NSIG) 220 return; 221 222 /* 223 * Regardless of origin or directedness, 224 * SIGKILL kills all lwps in the process immediately 225 * and jobcontrol signals affect all lwps in the process. 226 */ 227 if (sig == SIGKILL) { 228 p->p_flag |= SKILLED | (ext ? SEXTKILLED : 0); 229 t = NULL; 230 } else if (sig == SIGCONT) { 231 /* 232 * The SSCONT flag will remain set until a stopping 233 * signal comes in (below). This is harmless. 234 */ 235 p->p_flag |= SSCONT; 236 sigdelq(p, NULL, SIGSTOP); 237 sigdelq(p, NULL, SIGTSTP); 238 sigdelq(p, NULL, SIGTTOU); 239 sigdelq(p, NULL, SIGTTIN); 240 sigdiffset(&p->p_sig, &stopdefault); 241 sigdiffset(&p->p_extsig, &stopdefault); 242 p->p_stopsig = 0; 243 if ((tt = p->p_tlist) != NULL) { 244 do { 245 sigdelq(p, tt, SIGSTOP); 246 sigdelq(p, tt, SIGTSTP); 247 sigdelq(p, tt, SIGTTOU); 248 sigdelq(p, tt, SIGTTIN); 249 sigdiffset(&tt->t_sig, &stopdefault); 250 sigdiffset(&tt->t_extsig, &stopdefault); 251 } while ((tt = tt->t_forw) != p->p_tlist); 252 } 253 if ((tt = p->p_tlist) != NULL) { 254 do { 255 thread_lock(tt); 256 if (tt->t_state == TS_STOPPED && 257 tt->t_whystop == PR_JOBCONTROL) { 258 tt->t_schedflag |= TS_XSTART; 259 setrun_locked(tt); 260 } 261 thread_unlock(tt); 262 } while ((tt = tt->t_forw) != p->p_tlist); 263 } 264 } else if (sigismember(&stopdefault, sig)) { 265 /* 266 * This test has a race condition which we can't fix: 267 * By the time the stopping signal is received by 268 * the target process/thread, the signal handler 269 * and/or the detached state might have changed. 270 */ 271 if (PTOU(p)->u_signal[sig-1] == SIG_DFL && 272 (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned)) 273 p->p_flag &= ~SSCONT; 274 sigdelq(p, NULL, SIGCONT); 275 sigdelset(&p->p_sig, SIGCONT); 276 sigdelset(&p->p_extsig, SIGCONT); 277 if ((tt = p->p_tlist) != NULL) { 278 do { 279 sigdelq(p, tt, SIGCONT); 280 sigdelset(&tt->t_sig, SIGCONT); 281 sigdelset(&tt->t_extsig, SIGCONT); 282 } while ((tt = tt->t_forw) != p->p_tlist); 283 } 284 } 285 286 if (sig_discardable(p, sig)) { 287 DTRACE_PROC3(signal__discard, kthread_t *, p->p_tlist, 288 proc_t *, p, int, sig); 289 return; 290 } 291 292 if (t != NULL) { 293 /* 294 * This is a directed signal, wake up the lwp. 295 */ 296 sigaddset(&t->t_sig, sig); 297 if (ext) 298 sigaddset(&t->t_extsig, sig); 299 thread_lock(t); 300 (void) eat_signal(t, sig); 301 thread_unlock(t); 302 DTRACE_PROC2(signal__send, kthread_t *, t, int, sig); 303 } else if ((tt = p->p_tlist) != NULL) { 304 /* 305 * Make sure that some lwp that already exists 306 * in the process fields the signal soon. 307 * Wake up an interruptibly sleeping lwp if necessary. 308 * For SIGKILL make all of the lwps see the signal; 309 * This is needed to guarantee a sure kill for processes 310 * with a mix of realtime and non-realtime threads. 311 */ 312 int su = 0; 313 314 sigaddset(&p->p_sig, sig); 315 if (ext) 316 sigaddset(&p->p_extsig, sig); 317 do { 318 thread_lock(tt); 319 if (eat_signal(tt, sig) && sig != SIGKILL) { 320 thread_unlock(tt); 321 break; 322 } 323 if (SUSPENDED(tt)) 324 su++; 325 thread_unlock(tt); 326 } while ((tt = tt->t_forw) != p->p_tlist); 327 /* 328 * If the process is deadlocked, make somebody run and die. 329 */ 330 if (sig == SIGKILL && p->p_stat != SIDL && 331 p->p_lwprcnt == 0 && p->p_lwpcnt == su && 332 !(p->p_proc_flag & P_PR_LOCK)) { 333 thread_lock(tt); 334 p->p_lwprcnt++; 335 tt->t_schedflag |= TS_CSTART; 336 setrun_locked(tt); 337 thread_unlock(tt); 338 } 339 340 DTRACE_PROC2(signal__send, kthread_t *, tt, int, sig); 341 } 342 } 343 344 static int 345 isjobstop(int sig) 346 { 347 proc_t *p = ttoproc(curthread); 348 349 ASSERT(MUTEX_HELD(&p->p_lock)); 350 351 if (PTOU(curproc)->u_signal[sig-1] == SIG_DFL && 352 sigismember(&stopdefault, sig)) { 353 /* 354 * If SIGCONT has been posted since we promoted this signal 355 * from pending to current, then don't do a jobcontrol stop. 356 */ 357 if (!(p->p_flag & SSCONT) && 358 (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned) && 359 curthread != p->p_agenttp) { 360 sigqueue_t *sqp; 361 362 stop(PR_JOBCONTROL, sig); 363 mutex_exit(&p->p_lock); 364 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 365 mutex_enter(&pidlock); 366 /* 367 * Only the first lwp to continue notifies the parent. 368 */ 369 if (p->p_pidflag & CLDCONT) 370 siginfofree(sqp); 371 else { 372 p->p_pidflag |= CLDCONT; 373 p->p_wcode = CLD_CONTINUED; 374 p->p_wdata = SIGCONT; 375 sigcld(p, sqp); 376 } 377 mutex_exit(&pidlock); 378 mutex_enter(&p->p_lock); 379 } 380 return (1); 381 } 382 return (0); 383 } 384 385 /* 386 * Returns true if the current process has a signal to process, and 387 * the signal is not held. The signal to process is put in p_cursig. 388 * This is asked at least once each time a process enters the system 389 * (though this can usually be done without actually calling issig by 390 * checking the pending signal masks). A signal does not do anything 391 * directly to a process; it sets a flag that asks the process to do 392 * something to itself. 393 * 394 * The "why" argument indicates the allowable side-effects of the call: 395 * 396 * FORREAL: Extract the next pending signal from p_sig into p_cursig; 397 * stop the process if a stop has been requested or if a traced signal 398 * is pending. 399 * 400 * JUSTLOOKING: Don't stop the process, just indicate whether or not 401 * a signal might be pending (FORREAL is needed to tell for sure). 402 * 403 * XXX: Changes to the logic in these routines should be propagated 404 * to lm_sigispending(). See bug 1201594. 405 */ 406 407 static int issig_forreal(void); 408 static int issig_justlooking(void); 409 410 int 411 issig(int why) 412 { 413 ASSERT(why == FORREAL || why == JUSTLOOKING); 414 415 return ((why == FORREAL)? issig_forreal() : issig_justlooking()); 416 } 417 418 419 static int 420 issig_justlooking(void) 421 { 422 kthread_t *t = curthread; 423 klwp_t *lwp = ttolwp(t); 424 proc_t *p = ttoproc(t); 425 k_sigset_t set; 426 427 /* 428 * This function answers the question: 429 * "Is there any reason to call issig_forreal()?" 430 * 431 * We have to answer the question w/o grabbing any locks 432 * because we are (most likely) being called after we 433 * put ourselves on the sleep queue. 434 */ 435 436 if (t->t_dtrace_stop | t->t_dtrace_sig) 437 return (1); 438 439 /* 440 * Another piece of complexity in this process. When single-stepping a 441 * process, we don't want an intervening signal or TP_PAUSE request to 442 * suspend the current thread. Otherwise, the controlling process will 443 * hang beacuse we will be stopped with TS_PSTART set in t_schedflag. 444 * We will trigger any remaining signals when we re-enter the kernel on 445 * the single step trap. 446 */ 447 if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP) 448 return (0); 449 450 if ((lwp->lwp_asleep && MUSTRETURN(p, t)) || 451 (p->p_flag & (SEXITLWPS|SKILLED)) || 452 (lwp->lwp_nostop == 0 && 453 (p->p_stopsig | (p->p_flag & (SHOLDFORK1|SHOLDWATCH)) | 454 (t->t_proc_flag & 455 (TP_PRSTOP|TP_HOLDLWP|TP_CHKPT|TP_PAUSE)))) || 456 lwp->lwp_cursig) 457 return (1); 458 459 if (p->p_flag & SVFWAIT) 460 return (0); 461 set = p->p_sig; 462 sigorset(&set, &t->t_sig); 463 if (schedctl_sigblock(t)) /* all blockable signals blocked */ 464 sigandset(&set, &cantmask); 465 else 466 sigdiffset(&set, &t->t_hold); 467 if (p->p_flag & SVFORK) 468 sigdiffset(&set, &holdvfork); 469 470 if (!sigisempty(&set)) { 471 int sig; 472 473 for (sig = 1; sig < NSIG; sig++) { 474 if (sigismember(&set, sig) && 475 (tracing(p, sig) || 476 sigismember(&t->t_sigwait, sig) || 477 !sigismember(&p->p_ignore, sig))) { 478 /* 479 * Don't promote a signal that will stop 480 * the process when lwp_nostop is set. 481 */ 482 if (!lwp->lwp_nostop || 483 PTOU(curproc)->u_signal[sig-1] != SIG_DFL || 484 !sigismember(&stopdefault, sig)) 485 return (1); 486 } 487 } 488 } 489 490 return (0); 491 } 492 493 static int 494 issig_forreal(void) 495 { 496 int sig = 0, ext = 0; 497 kthread_t *t = curthread; 498 klwp_t *lwp = ttolwp(t); 499 proc_t *p = ttoproc(t); 500 int toproc = 0; 501 int sigcld_found = 0; 502 int nostop_break = 0; 503 504 ASSERT(t->t_state == TS_ONPROC); 505 506 mutex_enter(&p->p_lock); 507 schedctl_finish_sigblock(t); 508 509 if (t->t_dtrace_stop | t->t_dtrace_sig) { 510 if (t->t_dtrace_stop) { 511 /* 512 * If DTrace's "stop" action has been invoked on us, 513 * set TP_PRSTOP. 514 */ 515 t->t_proc_flag |= TP_PRSTOP; 516 } 517 518 if (t->t_dtrace_sig != 0) { 519 k_siginfo_t info; 520 521 /* 522 * Post the signal generated as the result of 523 * DTrace's "raise" action as a normal signal before 524 * the full-fledged signal checking begins. 525 */ 526 bzero(&info, sizeof (info)); 527 info.si_signo = t->t_dtrace_sig; 528 info.si_code = SI_DTRACE; 529 530 sigaddq(p, NULL, &info, KM_NOSLEEP); 531 532 t->t_dtrace_sig = 0; 533 } 534 } 535 536 for (;;) { 537 if (p->p_flag & (SEXITLWPS|SKILLED)) { 538 lwp->lwp_cursig = sig = SIGKILL; 539 lwp->lwp_extsig = ext = (p->p_flag & SEXTKILLED) != 0; 540 t->t_sig_check = 1; 541 break; 542 } 543 544 /* 545 * Another piece of complexity in this process. When 546 * single-stepping a process, we don't want an intervening 547 * signal or TP_PAUSE request to suspend the current thread. 548 * Otherwise, the controlling process will hang beacuse we will 549 * be stopped with TS_PSTART set in t_schedflag. We will 550 * trigger any remaining signals when we re-enter the kernel on 551 * the single step trap. 552 */ 553 if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP) { 554 sig = 0; 555 break; 556 } 557 558 /* 559 * Hold the lwp here for watchpoint manipulation. 560 */ 561 if ((t->t_proc_flag & TP_PAUSE) && !lwp->lwp_nostop) { 562 stop(PR_SUSPENDED, SUSPEND_PAUSE); 563 continue; 564 } 565 566 if (lwp->lwp_asleep && MUSTRETURN(p, t)) { 567 if ((sig = lwp->lwp_cursig) != 0) { 568 /* 569 * Make sure we call ISSIG() in post_syscall() 570 * to re-validate this current signal. 571 */ 572 t->t_sig_check = 1; 573 } 574 break; 575 } 576 577 /* 578 * If the request is PR_CHECKPOINT, ignore the rest of signals 579 * or requests. Honor other stop requests or signals later. 580 * Go back to top of loop here to check if an exit or hold 581 * event has occurred while stopped. 582 */ 583 if ((t->t_proc_flag & TP_CHKPT) && !lwp->lwp_nostop) { 584 stop(PR_CHECKPOINT, 0); 585 continue; 586 } 587 588 /* 589 * Honor SHOLDFORK1, SHOLDWATCH, and TP_HOLDLWP before dealing 590 * with signals or /proc. Another lwp is executing fork1(), 591 * or is undergoing watchpoint activity (remapping a page), 592 * or is executing lwp_suspend() on this lwp. 593 * Again, go back to top of loop to check if an exit 594 * or hold event has occurred while stopped. 595 */ 596 if (((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) || 597 (t->t_proc_flag & TP_HOLDLWP)) && !lwp->lwp_nostop) { 598 stop(PR_SUSPENDED, SUSPEND_NORMAL); 599 continue; 600 } 601 602 /* 603 * Honor requested stop before dealing with the 604 * current signal; a debugger may change it. 605 * Do not want to go back to loop here since this is a special 606 * stop that means: make incremental progress before the next 607 * stop. The danger is that returning to top of loop would most 608 * likely drop the thread right back here to stop soon after it 609 * was continued, violating the incremental progress request. 610 */ 611 if ((t->t_proc_flag & TP_PRSTOP) && !lwp->lwp_nostop) 612 stop(PR_REQUESTED, 0); 613 614 /* 615 * If a debugger wants us to take a signal it will have 616 * left it in lwp->lwp_cursig. If lwp_cursig has been cleared 617 * or if it's being ignored, we continue on looking for another 618 * signal. Otherwise we return the specified signal, provided 619 * it's not a signal that causes a job control stop. 620 * 621 * When stopped on PR_JOBCONTROL, there is no current 622 * signal; we cancel lwp->lwp_cursig temporarily before 623 * calling isjobstop(). The current signal may be reset 624 * by a debugger while we are stopped in isjobstop(). 625 * 626 * If the current thread is accepting the signal 627 * (via sigwait(), sigwaitinfo(), or sigtimedwait()), 628 * we allow the signal to be accepted, even if it is 629 * being ignored, and without causing a job control stop. 630 */ 631 if ((sig = lwp->lwp_cursig) != 0) { 632 ext = lwp->lwp_extsig; 633 lwp->lwp_cursig = 0; 634 lwp->lwp_extsig = 0; 635 if (sigismember(&t->t_sigwait, sig) || 636 (!sigismember(&p->p_ignore, sig) && 637 !isjobstop(sig))) { 638 if (p->p_flag & (SEXITLWPS|SKILLED)) { 639 sig = SIGKILL; 640 ext = (p->p_flag & SEXTKILLED) != 0; 641 } 642 lwp->lwp_cursig = (uchar_t)sig; 643 lwp->lwp_extsig = (uchar_t)ext; 644 break; 645 } 646 /* 647 * The signal is being ignored or it caused a 648 * job-control stop. If another current signal 649 * has not been established, return the current 650 * siginfo, if any, to the memory manager. 651 */ 652 if (lwp->lwp_cursig == 0 && lwp->lwp_curinfo != NULL) { 653 siginfofree(lwp->lwp_curinfo); 654 lwp->lwp_curinfo = NULL; 655 } 656 /* 657 * Loop around again in case we were stopped 658 * on a job control signal and a /proc stop 659 * request was posted or another current signal 660 * was established while we were stopped. 661 */ 662 continue; 663 } 664 665 if (p->p_stopsig && !lwp->lwp_nostop && 666 curthread != p->p_agenttp) { 667 /* 668 * Some lwp in the process has already stopped 669 * showing PR_JOBCONTROL. This is a stop in 670 * sympathy with the other lwp, even if this 671 * lwp is blocking the stopping signal. 672 */ 673 stop(PR_JOBCONTROL, p->p_stopsig); 674 continue; 675 } 676 677 /* 678 * Loop on the pending signals until we find a 679 * non-held signal that is traced or not ignored. 680 * First check the signals pending for the lwp, 681 * then the signals pending for the process as a whole. 682 */ 683 for (;;) { 684 if ((sig = fsig(&t->t_sig, t)) != 0) { 685 if (sig == SIGCLD) 686 sigcld_found = 1; 687 toproc = 0; 688 if (tracing(p, sig) || 689 sigismember(&t->t_sigwait, sig) || 690 !sigismember(&p->p_ignore, sig)) { 691 if (sigismember(&t->t_extsig, sig)) 692 ext = 1; 693 break; 694 } 695 sigdelset(&t->t_sig, sig); 696 sigdelset(&t->t_extsig, sig); 697 sigdelq(p, t, sig); 698 } else if ((sig = fsig(&p->p_sig, t)) != 0) { 699 if (sig == SIGCLD) 700 sigcld_found = 1; 701 toproc = 1; 702 if (tracing(p, sig) || 703 sigismember(&t->t_sigwait, sig) || 704 !sigismember(&p->p_ignore, sig)) { 705 if (sigismember(&p->p_extsig, sig)) 706 ext = 1; 707 break; 708 } 709 sigdelset(&p->p_sig, sig); 710 sigdelset(&p->p_extsig, sig); 711 sigdelq(p, NULL, sig); 712 } else { 713 /* no signal was found */ 714 break; 715 } 716 } 717 718 if (sig == 0) { /* no signal was found */ 719 if (p->p_flag & (SEXITLWPS|SKILLED)) { 720 lwp->lwp_cursig = SIGKILL; 721 sig = SIGKILL; 722 ext = (p->p_flag & SEXTKILLED) != 0; 723 } 724 break; 725 } 726 727 /* 728 * If we have been informed not to stop (i.e., we are being 729 * called from within a network operation), then don't promote 730 * the signal at this time, just return the signal number. 731 * We will call issig() again later when it is safe. 732 * 733 * fsig() does not return a jobcontrol stopping signal 734 * with a default action of stopping the process if 735 * lwp_nostop is set, so we won't be causing a bogus 736 * EINTR by this action. (Such a signal is eaten by 737 * isjobstop() when we loop around to do final checks.) 738 */ 739 if (lwp->lwp_nostop) { 740 nostop_break = 1; 741 break; 742 } 743 744 /* 745 * Promote the signal from pending to current. 746 * 747 * Note that sigdeq() will set lwp->lwp_curinfo to NULL 748 * if no siginfo_t exists for this signal. 749 */ 750 lwp->lwp_cursig = (uchar_t)sig; 751 lwp->lwp_extsig = (uchar_t)ext; 752 t->t_sig_check = 1; /* so post_syscall will see signal */ 753 ASSERT(lwp->lwp_curinfo == NULL); 754 sigdeq(p, toproc ? NULL : t, sig, &lwp->lwp_curinfo); 755 756 if (tracing(p, sig)) 757 stop(PR_SIGNALLED, sig); 758 759 /* 760 * Loop around to check for requested stop before 761 * performing the usual current-signal actions. 762 */ 763 } 764 765 mutex_exit(&p->p_lock); 766 767 /* 768 * If SIGCLD was dequeued, search for other pending SIGCLD's. 769 * Don't do it if we are returning SIGCLD and the signal 770 * handler will be reset by psig(); this enables reliable 771 * delivery of SIGCLD even when using the old, broken 772 * signal() interface for setting the signal handler. 773 */ 774 if (sigcld_found && 775 (sig != SIGCLD || !sigismember(&PTOU(curproc)->u_sigresethand, 776 SIGCLD))) 777 sigcld_repost(); 778 779 if (sig != 0) 780 (void) undo_watch_step(NULL); 781 782 /* 783 * If we have been blocked since the p_lock was dropped off 784 * above, then this promoted signal might have been handled 785 * already when we were on the way back from sleep queue, so 786 * just ignore it. 787 * If we have been informed not to stop, just return the signal 788 * number. Also see comments above. 789 */ 790 if (!nostop_break) { 791 sig = lwp->lwp_cursig; 792 } 793 794 return (sig != 0); 795 } 796 797 /* 798 * Return true if the process is currently stopped showing PR_JOBCONTROL. 799 * This is true only if all of the process's lwp's are so stopped. 800 * If this is asked by one of the lwps in the process, exclude that lwp. 801 */ 802 int 803 jobstopped(proc_t *p) 804 { 805 kthread_t *t; 806 807 ASSERT(MUTEX_HELD(&p->p_lock)); 808 809 if ((t = p->p_tlist) == NULL) 810 return (0); 811 812 do { 813 thread_lock(t); 814 /* ignore current, zombie and suspended lwps in the test */ 815 if (!(t == curthread || t->t_state == TS_ZOMB || 816 SUSPENDED(t)) && 817 (t->t_state != TS_STOPPED || 818 t->t_whystop != PR_JOBCONTROL)) { 819 thread_unlock(t); 820 return (0); 821 } 822 thread_unlock(t); 823 } while ((t = t->t_forw) != p->p_tlist); 824 825 return (1); 826 } 827 828 /* 829 * Put ourself (curthread) into the stopped state and notify tracers. 830 */ 831 void 832 stop(int why, int what) 833 { 834 kthread_t *t = curthread; 835 proc_t *p = ttoproc(t); 836 klwp_t *lwp = ttolwp(t); 837 kthread_t *tx; 838 lwpent_t *lep; 839 int procstop; 840 int flags = TS_ALLSTART; 841 hrtime_t stoptime; 842 843 /* 844 * Can't stop a system process. 845 */ 846 if (p == NULL || lwp == NULL || (p->p_flag & SSYS) || p->p_as == &kas) 847 return; 848 849 ASSERT(MUTEX_HELD(&p->p_lock)); 850 851 if (why != PR_SUSPENDED && why != PR_CHECKPOINT) { 852 /* 853 * Don't stop an lwp with SIGKILL pending. 854 * Don't stop if the process or lwp is exiting. 855 */ 856 if (lwp->lwp_cursig == SIGKILL || 857 sigismember(&t->t_sig, SIGKILL) || 858 sigismember(&p->p_sig, SIGKILL) || 859 (t->t_proc_flag & TP_LWPEXIT) || 860 (p->p_flag & (SEXITLWPS|SKILLED))) { 861 p->p_stopsig = 0; 862 t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP); 863 return; 864 } 865 } 866 867 /* 868 * Make sure we don't deadlock on a recursive call to prstop(). 869 * prstop() sets the lwp_nostop flag. 870 */ 871 if (lwp->lwp_nostop) 872 return; 873 874 /* 875 * Make sure the lwp is in an orderly state for inspection 876 * by a debugger through /proc or for dumping via core(). 877 */ 878 schedctl_finish_sigblock(t); 879 t->t_proc_flag |= TP_STOPPING; /* must set before dropping p_lock */ 880 mutex_exit(&p->p_lock); 881 stoptime = gethrtime(); 882 prstop(why, what); 883 (void) undo_watch_step(NULL); 884 mutex_enter(&p->p_lock); 885 ASSERT(t->t_state == TS_ONPROC); 886 887 switch (why) { 888 case PR_CHECKPOINT: 889 /* 890 * The situation may have changed since we dropped 891 * and reacquired p->p_lock. Double-check now 892 * whether we should stop or not. 893 */ 894 if (!(t->t_proc_flag & TP_CHKPT)) { 895 t->t_proc_flag &= ~TP_STOPPING; 896 return; 897 } 898 t->t_proc_flag &= ~TP_CHKPT; 899 flags &= ~TS_RESUME; 900 break; 901 902 case PR_JOBCONTROL: 903 ASSERT(what == SIGSTOP || what == SIGTSTP || 904 what == SIGTTIN || what == SIGTTOU); 905 flags &= ~TS_XSTART; 906 break; 907 908 case PR_SUSPENDED: 909 ASSERT(what == SUSPEND_NORMAL || what == SUSPEND_PAUSE); 910 /* 911 * The situation may have changed since we dropped 912 * and reacquired p->p_lock. Double-check now 913 * whether we should stop or not. 914 */ 915 if (what == SUSPEND_PAUSE) { 916 if (!(t->t_proc_flag & TP_PAUSE)) { 917 t->t_proc_flag &= ~TP_STOPPING; 918 return; 919 } 920 flags &= ~TS_UNPAUSE; 921 } else { 922 if (!((t->t_proc_flag & TP_HOLDLWP) || 923 (p->p_flag & (SHOLDFORK|SHOLDFORK1|SHOLDWATCH)))) { 924 t->t_proc_flag &= ~TP_STOPPING; 925 return; 926 } 927 /* 928 * If SHOLDFORK is in effect and we are stopping 929 * while asleep (not at the top of the stack), 930 * we return now to allow the hold to take effect 931 * when we reach the top of the kernel stack. 932 */ 933 if (lwp->lwp_asleep && (p->p_flag & SHOLDFORK)) { 934 t->t_proc_flag &= ~TP_STOPPING; 935 return; 936 } 937 flags &= ~TS_CSTART; 938 } 939 break; 940 941 default: /* /proc stop */ 942 flags &= ~TS_PSTART; 943 /* 944 * Do synchronous stop unless the async-stop flag is set. 945 * If why is PR_REQUESTED and t->t_dtrace_stop flag is set, 946 * then no debugger is present and we also do synchronous stop. 947 */ 948 if ((why != PR_REQUESTED || t->t_dtrace_stop) && 949 !(p->p_proc_flag & P_PR_ASYNC)) { 950 int notify; 951 952 for (tx = t->t_forw; tx != t; tx = tx->t_forw) { 953 notify = 0; 954 thread_lock(tx); 955 if (ISTOPPED(tx) || 956 (tx->t_proc_flag & TP_PRSTOP)) { 957 thread_unlock(tx); 958 continue; 959 } 960 tx->t_proc_flag |= TP_PRSTOP; 961 tx->t_sig_check = 1; 962 if (tx->t_state == TS_SLEEP && 963 (tx->t_flag & T_WAKEABLE)) { 964 /* 965 * Don't actually wake it up if it's 966 * in one of the lwp_*() syscalls. 967 * Mark it virtually stopped and 968 * notify /proc waiters (below). 969 */ 970 if (tx->t_wchan0 == NULL) 971 setrun_locked(tx); 972 else { 973 tx->t_proc_flag |= TP_PRVSTOP; 974 tx->t_stoptime = stoptime; 975 notify = 1; 976 } 977 } 978 979 /* Move waiting thread to run queue */ 980 if (ISWAITING(tx)) 981 setrun_locked(tx); 982 983 /* 984 * force the thread into the kernel 985 * if it is not already there. 986 */ 987 if (tx->t_state == TS_ONPROC && 988 tx->t_cpu != CPU) 989 poke_cpu(tx->t_cpu->cpu_id); 990 thread_unlock(tx); 991 lep = p->p_lwpdir[tx->t_dslot].ld_entry; 992 if (notify && lep->le_trace) 993 prnotify(lep->le_trace); 994 } 995 /* 996 * We do this just in case one of the threads we asked 997 * to stop is in holdlwps() (called from cfork()) or 998 * lwp_suspend(). 999 */ 1000 cv_broadcast(&p->p_holdlwps); 1001 } 1002 break; 1003 } 1004 1005 t->t_stoptime = stoptime; 1006 1007 if (why == PR_JOBCONTROL || (why == PR_SUSPENDED && p->p_stopsig)) { 1008 /* 1009 * Determine if the whole process is jobstopped. 1010 */ 1011 if (jobstopped(p)) { 1012 sigqueue_t *sqp; 1013 int sig; 1014 1015 if ((sig = p->p_stopsig) == 0) 1016 p->p_stopsig = (uchar_t)(sig = what); 1017 mutex_exit(&p->p_lock); 1018 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 1019 mutex_enter(&pidlock); 1020 /* 1021 * The last lwp to stop notifies the parent. 1022 * Turn off the CLDCONT flag now so the first 1023 * lwp to continue knows what to do. 1024 */ 1025 p->p_pidflag &= ~CLDCONT; 1026 p->p_wcode = CLD_STOPPED; 1027 p->p_wdata = sig; 1028 sigcld(p, sqp); 1029 /* 1030 * Grab p->p_lock before releasing pidlock so the 1031 * parent and the child don't have a race condition. 1032 */ 1033 mutex_enter(&p->p_lock); 1034 mutex_exit(&pidlock); 1035 p->p_stopsig = 0; 1036 } else if (why == PR_JOBCONTROL && p->p_stopsig == 0) { 1037 /* 1038 * Set p->p_stopsig and wake up sleeping lwps 1039 * so they will stop in sympathy with this lwp. 1040 */ 1041 p->p_stopsig = (uchar_t)what; 1042 pokelwps(p); 1043 /* 1044 * We do this just in case one of the threads we asked 1045 * to stop is in holdlwps() (called from cfork()) or 1046 * lwp_suspend(). 1047 */ 1048 cv_broadcast(&p->p_holdlwps); 1049 } 1050 } 1051 1052 if (why != PR_JOBCONTROL && why != PR_CHECKPOINT) { 1053 /* 1054 * Do process-level notification when all lwps are 1055 * either stopped on events of interest to /proc 1056 * or are stopped showing PR_SUSPENDED or are zombies. 1057 */ 1058 procstop = 1; 1059 for (tx = t->t_forw; procstop && tx != t; tx = tx->t_forw) { 1060 if (VSTOPPED(tx)) 1061 continue; 1062 thread_lock(tx); 1063 switch (tx->t_state) { 1064 case TS_ZOMB: 1065 break; 1066 case TS_STOPPED: 1067 /* neither ISTOPPED nor SUSPENDED? */ 1068 if ((tx->t_schedflag & 1069 (TS_CSTART | TS_UNPAUSE | TS_PSTART)) == 1070 (TS_CSTART | TS_UNPAUSE | TS_PSTART)) 1071 procstop = 0; 1072 break; 1073 case TS_SLEEP: 1074 /* not paused for watchpoints? */ 1075 if (!(tx->t_flag & T_WAKEABLE) || 1076 tx->t_wchan0 == NULL || 1077 !(tx->t_proc_flag & TP_PAUSE)) 1078 procstop = 0; 1079 break; 1080 default: 1081 procstop = 0; 1082 break; 1083 } 1084 thread_unlock(tx); 1085 } 1086 if (procstop) { 1087 /* there must not be any remapped watched pages now */ 1088 ASSERT(p->p_mapcnt == 0); 1089 if (p->p_proc_flag & P_PR_PTRACE) { 1090 /* ptrace() compatibility */ 1091 mutex_exit(&p->p_lock); 1092 mutex_enter(&pidlock); 1093 p->p_wcode = CLD_TRAPPED; 1094 p->p_wdata = (why == PR_SIGNALLED)? 1095 what : SIGTRAP; 1096 cv_broadcast(&p->p_parent->p_cv); 1097 /* 1098 * Grab p->p_lock before releasing pidlock so 1099 * parent and child don't have a race condition. 1100 */ 1101 mutex_enter(&p->p_lock); 1102 mutex_exit(&pidlock); 1103 } 1104 if (p->p_trace) /* /proc */ 1105 prnotify(p->p_trace); 1106 cv_broadcast(&pr_pid_cv[p->p_slot]); /* pauselwps() */ 1107 cv_broadcast(&p->p_holdlwps); /* holdwatch() */ 1108 } 1109 if (why != PR_SUSPENDED) { 1110 lep = p->p_lwpdir[t->t_dslot].ld_entry; 1111 if (lep->le_trace) /* /proc */ 1112 prnotify(lep->le_trace); 1113 /* 1114 * Special notification for creation of the agent lwp. 1115 */ 1116 if (t == p->p_agenttp && 1117 (t->t_proc_flag & TP_PRSTOP) && 1118 p->p_trace) 1119 prnotify(p->p_trace); 1120 /* 1121 * The situation may have changed since we dropped 1122 * and reacquired p->p_lock. Double-check now 1123 * whether we should stop or not. 1124 */ 1125 if (!(t->t_proc_flag & TP_STOPPING)) { 1126 if (t->t_proc_flag & TP_PRSTOP) 1127 t->t_proc_flag |= TP_STOPPING; 1128 } 1129 t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP); 1130 prnostep(lwp); 1131 } 1132 } 1133 1134 if (why == PR_SUSPENDED) { 1135 1136 /* 1137 * We always broadcast in the case of SUSPEND_PAUSE. This is 1138 * because checks for TP_PAUSE take precedence over checks for 1139 * SHOLDWATCH. If a thread is trying to stop because of 1140 * SUSPEND_PAUSE and tries to do a holdwatch(), it will be 1141 * waiting for the rest of the threads to enter a stopped state. 1142 * If we are stopping for a SUSPEND_PAUSE, we may be the last 1143 * lwp and not know it, so broadcast just in case. 1144 */ 1145 if (what == SUSPEND_PAUSE || 1146 --p->p_lwprcnt == 0 || (t->t_proc_flag & TP_HOLDLWP)) 1147 cv_broadcast(&p->p_holdlwps); 1148 1149 } 1150 1151 /* 1152 * Need to do this here (rather than after the thread is officially 1153 * stopped) because we can't call mutex_enter from a stopped thread. 1154 */ 1155 if (why == PR_CHECKPOINT) 1156 del_one_utstop(); 1157 1158 thread_lock(t); 1159 ASSERT((t->t_schedflag & TS_ALLSTART) == 0); 1160 t->t_schedflag |= flags; 1161 t->t_whystop = (short)why; 1162 t->t_whatstop = (short)what; 1163 CL_STOP(t, why, what); 1164 (void) new_mstate(t, LMS_STOPPED); 1165 thread_stop(t); /* set stop state and drop lock */ 1166 1167 if (why != PR_SUSPENDED && why != PR_CHECKPOINT) { 1168 /* 1169 * We may have gotten a SIGKILL or a SIGCONT when 1170 * we released p->p_lock; make one last check. 1171 * Also check for a /proc run-on-last-close. 1172 */ 1173 if (sigismember(&t->t_sig, SIGKILL) || 1174 sigismember(&p->p_sig, SIGKILL) || 1175 (t->t_proc_flag & TP_LWPEXIT) || 1176 (p->p_flag & (SEXITLWPS|SKILLED))) { 1177 p->p_stopsig = 0; 1178 thread_lock(t); 1179 t->t_schedflag |= TS_XSTART | TS_PSTART; 1180 setrun_locked(t); 1181 thread_unlock_nopreempt(t); 1182 } else if (why == PR_JOBCONTROL) { 1183 if (p->p_flag & SSCONT) { 1184 /* 1185 * This resulted from a SIGCONT posted 1186 * while we were not holding p->p_lock. 1187 */ 1188 p->p_stopsig = 0; 1189 thread_lock(t); 1190 t->t_schedflag |= TS_XSTART; 1191 setrun_locked(t); 1192 thread_unlock_nopreempt(t); 1193 } 1194 } else if (!(t->t_proc_flag & TP_STOPPING)) { 1195 /* 1196 * This resulted from a /proc run-on-last-close. 1197 */ 1198 thread_lock(t); 1199 t->t_schedflag |= TS_PSTART; 1200 setrun_locked(t); 1201 thread_unlock_nopreempt(t); 1202 } 1203 } 1204 1205 t->t_proc_flag &= ~TP_STOPPING; 1206 mutex_exit(&p->p_lock); 1207 1208 swtch(); 1209 setallwatch(); /* reestablish any watchpoints set while stopped */ 1210 mutex_enter(&p->p_lock); 1211 prbarrier(p); /* barrier against /proc locking */ 1212 } 1213 1214 /* Interface for resetting user thread stop count. */ 1215 void 1216 utstop_init(void) 1217 { 1218 mutex_enter(&thread_stop_lock); 1219 num_utstop = 0; 1220 mutex_exit(&thread_stop_lock); 1221 } 1222 1223 /* Interface for registering a user thread stop request. */ 1224 void 1225 add_one_utstop(void) 1226 { 1227 mutex_enter(&thread_stop_lock); 1228 num_utstop++; 1229 mutex_exit(&thread_stop_lock); 1230 } 1231 1232 /* Interface for cancelling a user thread stop request */ 1233 void 1234 del_one_utstop(void) 1235 { 1236 mutex_enter(&thread_stop_lock); 1237 num_utstop--; 1238 if (num_utstop == 0) 1239 cv_broadcast(&utstop_cv); 1240 mutex_exit(&thread_stop_lock); 1241 } 1242 1243 /* Interface to wait for all user threads to be stopped */ 1244 void 1245 utstop_timedwait(clock_t ticks) 1246 { 1247 mutex_enter(&thread_stop_lock); 1248 if (num_utstop > 0) 1249 (void) cv_timedwait(&utstop_cv, &thread_stop_lock, 1250 ticks + lbolt); 1251 mutex_exit(&thread_stop_lock); 1252 } 1253 1254 /* 1255 * Perform the action specified by the current signal. 1256 * The usual sequence is: 1257 * if (issig()) 1258 * psig(); 1259 * The signal bit has already been cleared by issig(), 1260 * the current signal number has been stored in lwp_cursig, 1261 * and the current siginfo is now referenced by lwp_curinfo. 1262 */ 1263 void 1264 psig(void) 1265 { 1266 kthread_t *t = curthread; 1267 proc_t *p = ttoproc(t); 1268 klwp_t *lwp = ttolwp(t); 1269 void (*func)(); 1270 int sig, rc, code, ext; 1271 pid_t pid = -1; 1272 id_t ctid = 0; 1273 zoneid_t zoneid = -1; 1274 sigqueue_t *sqp = NULL; 1275 1276 mutex_enter(&p->p_lock); 1277 schedctl_finish_sigblock(t); 1278 code = CLD_KILLED; 1279 1280 if (p->p_flag & SEXITLWPS) { 1281 lwp_exit(); 1282 return; /* not reached */ 1283 } 1284 sig = lwp->lwp_cursig; 1285 ext = lwp->lwp_extsig; 1286 1287 ASSERT(sig < NSIG); 1288 1289 /* 1290 * Re-check lwp_cursig after we acquire p_lock. Since p_lock was 1291 * dropped between issig() and psig(), a debugger may have cleared 1292 * lwp_cursig via /proc in the intervening window. 1293 */ 1294 if (sig == 0) { 1295 if (lwp->lwp_curinfo) { 1296 siginfofree(lwp->lwp_curinfo); 1297 lwp->lwp_curinfo = NULL; 1298 } 1299 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */ 1300 t->t_flag &= ~T_TOMASK; 1301 t->t_hold = lwp->lwp_sigoldmask; 1302 } 1303 mutex_exit(&p->p_lock); 1304 return; 1305 } 1306 func = PTOU(curproc)->u_signal[sig-1]; 1307 1308 /* 1309 * The signal disposition could have changed since we promoted 1310 * this signal from pending to current (we dropped p->p_lock). 1311 * This can happen only in a multi-threaded process. 1312 */ 1313 if (sigismember(&p->p_ignore, sig) || 1314 (func == SIG_DFL && sigismember(&stopdefault, sig))) { 1315 lwp->lwp_cursig = 0; 1316 lwp->lwp_extsig = 0; 1317 if (lwp->lwp_curinfo) { 1318 siginfofree(lwp->lwp_curinfo); 1319 lwp->lwp_curinfo = NULL; 1320 } 1321 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */ 1322 t->t_flag &= ~T_TOMASK; 1323 t->t_hold = lwp->lwp_sigoldmask; 1324 } 1325 mutex_exit(&p->p_lock); 1326 return; 1327 } 1328 1329 /* 1330 * We check lwp_curinfo first since pr_setsig can actually 1331 * stuff a sigqueue_t there for SIGKILL. 1332 */ 1333 if (lwp->lwp_curinfo) { 1334 sqp = lwp->lwp_curinfo; 1335 } else if (sig == SIGKILL && p->p_killsqp) { 1336 sqp = p->p_killsqp; 1337 } 1338 1339 if (sqp != NULL) { 1340 if (SI_FROMUSER(&sqp->sq_info)) { 1341 pid = sqp->sq_info.si_pid; 1342 ctid = sqp->sq_info.si_ctid; 1343 zoneid = sqp->sq_info.si_zoneid; 1344 } 1345 /* 1346 * If we have a sigqueue_t, its sq_external value 1347 * trumps the lwp_extsig value. It is theoretically 1348 * possible to make lwp_extsig reflect reality, but it 1349 * would unnecessarily complicate things elsewhere. 1350 */ 1351 ext = sqp->sq_external; 1352 } 1353 1354 if (func == SIG_DFL) { 1355 mutex_exit(&p->p_lock); 1356 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *, 1357 NULL, void (*)(void), func); 1358 } else { 1359 k_siginfo_t *sip = NULL; 1360 1361 /* 1362 * If DTrace user-land tracing is active, give DTrace a 1363 * chance to defer the signal until after tracing is 1364 * complete. 1365 */ 1366 if (t->t_dtrace_on && dtrace_safe_defer_signal()) { 1367 mutex_exit(&p->p_lock); 1368 return; 1369 } 1370 1371 /* 1372 * save siginfo pointer here, in case the 1373 * the signal's reset bit is on 1374 * 1375 * The presence of a current signal prevents paging 1376 * from succeeding over a network. We copy the current 1377 * signal information to the side and cancel the current 1378 * signal so that sendsig() will succeed. 1379 */ 1380 if (sigismember(&p->p_siginfo, sig)) { 1381 sip = &lwp->lwp_siginfo; 1382 if (sqp) { 1383 bcopy(&sqp->sq_info, sip, sizeof (*sip)); 1384 /* 1385 * If we were interrupted out of a system call 1386 * due to pthread_cancel(), inform libc. 1387 */ 1388 if (sig == SIGCANCEL && 1389 sip->si_code == SI_LWP && 1390 t->t_sysnum != 0) 1391 schedctl_cancel_eintr(); 1392 } else if (sig == SIGPROF && sip->si_signo == SIGPROF && 1393 t->t_rprof != NULL && t->t_rprof->rp_anystate) { 1394 /* EMPTY */; 1395 } else { 1396 bzero(sip, sizeof (*sip)); 1397 sip->si_signo = sig; 1398 sip->si_code = SI_NOINFO; 1399 } 1400 } 1401 1402 if (t->t_flag & T_TOMASK) 1403 t->t_flag &= ~T_TOMASK; 1404 else 1405 lwp->lwp_sigoldmask = t->t_hold; 1406 sigorset(&t->t_hold, &PTOU(curproc)->u_sigmask[sig-1]); 1407 if (!sigismember(&PTOU(curproc)->u_signodefer, sig)) 1408 sigaddset(&t->t_hold, sig); 1409 if (sigismember(&PTOU(curproc)->u_sigresethand, sig)) 1410 setsigact(sig, SIG_DFL, nullsmask, 0); 1411 1412 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *, 1413 sip, void (*)(void), func); 1414 1415 lwp->lwp_cursig = 0; 1416 lwp->lwp_extsig = 0; 1417 if (lwp->lwp_curinfo) { 1418 /* p->p_killsqp is freed by freeproc */ 1419 siginfofree(lwp->lwp_curinfo); 1420 lwp->lwp_curinfo = NULL; 1421 } 1422 mutex_exit(&p->p_lock); 1423 lwp->lwp_ru.nsignals++; 1424 1425 if (p->p_model == DATAMODEL_NATIVE) 1426 rc = sendsig(sig, sip, func); 1427 #ifdef _SYSCALL32_IMPL 1428 else 1429 rc = sendsig32(sig, sip, func); 1430 #endif /* _SYSCALL32_IMPL */ 1431 if (rc) 1432 return; 1433 sig = lwp->lwp_cursig = SIGSEGV; 1434 ext = 0; /* lwp_extsig was set above */ 1435 pid = -1; 1436 ctid = 0; 1437 } 1438 1439 if (sigismember(&coredefault, sig)) { 1440 /* 1441 * Terminate all LWPs but don't discard them. 1442 * If another lwp beat us to the punch by calling exit(), 1443 * evaporate now. 1444 */ 1445 proc_is_exiting(p); 1446 if (exitlwps(1) != 0) { 1447 mutex_enter(&p->p_lock); 1448 lwp_exit(); 1449 } 1450 /* if we got a SIGKILL from anywhere, no core dump */ 1451 if (p->p_flag & SKILLED) { 1452 sig = SIGKILL; 1453 ext = (p->p_flag & SEXTKILLED) != 0; 1454 } else { 1455 if (audit_active) /* audit core dump */ 1456 audit_core_start(sig); 1457 if (core(sig, ext) == 0) 1458 code = CLD_DUMPED; 1459 if (audit_active) /* audit core dump */ 1460 audit_core_finish(code); 1461 } 1462 } 1463 1464 /* 1465 * Generate a contract event once if the process is killed 1466 * by a signal. 1467 */ 1468 if (ext) { 1469 proc_is_exiting(p); 1470 if (exitlwps(0) != 0) { 1471 mutex_enter(&p->p_lock); 1472 lwp_exit(); 1473 } 1474 contract_process_sig(p->p_ct_process, p, sig, pid, ctid, 1475 zoneid); 1476 } 1477 1478 exit(code, sig); 1479 } 1480 1481 /* 1482 * Find next unheld signal in ssp for thread t. 1483 */ 1484 int 1485 fsig(k_sigset_t *ssp, kthread_t *t) 1486 { 1487 proc_t *p = ttoproc(t); 1488 user_t *up = PTOU(p); 1489 int i; 1490 k_sigset_t temp; 1491 1492 ASSERT(MUTEX_HELD(&p->p_lock)); 1493 1494 /* 1495 * Don't promote any signals for the parent of a vfork()d 1496 * child that hasn't yet released the parent's memory. 1497 */ 1498 if (p->p_flag & SVFWAIT) 1499 return (0); 1500 1501 temp = *ssp; 1502 sigdiffset(&temp, &t->t_hold); 1503 1504 /* 1505 * Don't promote stopping signals (except SIGSTOP) for a child 1506 * of vfork() that hasn't yet released the parent's memory. 1507 */ 1508 if (p->p_flag & SVFORK) 1509 sigdiffset(&temp, &holdvfork); 1510 1511 /* 1512 * Don't promote a signal that will stop 1513 * the process when lwp_nostop is set. 1514 */ 1515 if (ttolwp(t)->lwp_nostop) { 1516 sigdelset(&temp, SIGSTOP); 1517 if (!p->p_pgidp->pid_pgorphaned) { 1518 if (up->u_signal[SIGTSTP-1] == SIG_DFL) 1519 sigdelset(&temp, SIGTSTP); 1520 if (up->u_signal[SIGTTIN-1] == SIG_DFL) 1521 sigdelset(&temp, SIGTTIN); 1522 if (up->u_signal[SIGTTOU-1] == SIG_DFL) 1523 sigdelset(&temp, SIGTTOU); 1524 } 1525 } 1526 1527 /* 1528 * Choose SIGKILL and SIGPROF before all other pending signals. 1529 * The rest are promoted in signal number order. 1530 */ 1531 if (sigismember(&temp, SIGKILL)) 1532 return (SIGKILL); 1533 if (sigismember(&temp, SIGPROF)) 1534 return (SIGPROF); 1535 1536 for (i = 0; i < sizeof (temp) / sizeof (temp.__sigbits[0]); i++) { 1537 if (temp.__sigbits[i]) 1538 return ((i * NBBY * sizeof (temp.__sigbits[0])) + 1539 lowbit(temp.__sigbits[i])); 1540 } 1541 1542 return (0); 1543 } 1544 1545 void 1546 setsigact(int sig, void (*disp)(), k_sigset_t mask, int flags) 1547 { 1548 proc_t *p = ttoproc(curthread); 1549 kthread_t *t; 1550 1551 ASSERT(MUTEX_HELD(&p->p_lock)); 1552 1553 PTOU(curproc)->u_signal[sig - 1] = disp; 1554 1555 /* 1556 * Honor the SA_SIGINFO flag if the signal is being caught. 1557 * Force the SA_SIGINFO flag if the signal is not being caught. 1558 * This is necessary to make sigqueue() and sigwaitinfo() work 1559 * properly together when the signal is set to default or is 1560 * being temporarily ignored. 1561 */ 1562 if ((flags & SA_SIGINFO) || disp == SIG_DFL || disp == SIG_IGN) 1563 sigaddset(&p->p_siginfo, sig); 1564 else 1565 sigdelset(&p->p_siginfo, sig); 1566 1567 if (disp != SIG_DFL && disp != SIG_IGN) { 1568 sigdelset(&p->p_ignore, sig); 1569 PTOU(curproc)->u_sigmask[sig - 1] = mask; 1570 if (!sigismember(&cantreset, sig)) { 1571 if (flags & SA_RESETHAND) 1572 sigaddset(&PTOU(curproc)->u_sigresethand, sig); 1573 else 1574 sigdelset(&PTOU(curproc)->u_sigresethand, sig); 1575 } 1576 if (flags & SA_NODEFER) 1577 sigaddset(&PTOU(curproc)->u_signodefer, sig); 1578 else 1579 sigdelset(&PTOU(curproc)->u_signodefer, sig); 1580 if (flags & SA_RESTART) 1581 sigaddset(&PTOU(curproc)->u_sigrestart, sig); 1582 else 1583 sigdelset(&PTOU(curproc)->u_sigrestart, sig); 1584 if (flags & SA_ONSTACK) 1585 sigaddset(&PTOU(curproc)->u_sigonstack, sig); 1586 else 1587 sigdelset(&PTOU(curproc)->u_sigonstack, sig); 1588 1589 } else if (disp == SIG_IGN || 1590 (disp == SIG_DFL && sigismember(&ignoredefault, sig))) { 1591 /* 1592 * Setting the signal action to SIG_IGN results in the 1593 * discarding of all pending signals of that signal number. 1594 * Setting the signal action to SIG_DFL does the same *only* 1595 * if the signal's default behavior is to be ignored. 1596 */ 1597 sigaddset(&p->p_ignore, sig); 1598 sigdelset(&p->p_sig, sig); 1599 sigdelset(&p->p_extsig, sig); 1600 sigdelq(p, NULL, sig); 1601 t = p->p_tlist; 1602 do { 1603 sigdelset(&t->t_sig, sig); 1604 sigdelset(&t->t_extsig, sig); 1605 sigdelq(p, t, sig); 1606 } while ((t = t->t_forw) != p->p_tlist); 1607 1608 } else { 1609 /* 1610 * The signal action is being set to SIG_DFL and the default 1611 * behavior is to do something: make sure it is not ignored. 1612 */ 1613 sigdelset(&p->p_ignore, sig); 1614 } 1615 1616 if (sig == SIGCLD) { 1617 if (flags & SA_NOCLDWAIT) 1618 p->p_flag |= SNOWAIT; 1619 else 1620 p->p_flag &= ~SNOWAIT; 1621 1622 if (flags & SA_NOCLDSTOP) 1623 p->p_flag &= ~SJCTL; 1624 else 1625 p->p_flag |= SJCTL; 1626 1627 if ((p->p_flag & SNOWAIT) || disp == SIG_IGN) { 1628 proc_t *cp, *tp; 1629 1630 mutex_exit(&p->p_lock); 1631 mutex_enter(&pidlock); 1632 for (cp = p->p_child; cp != NULL; cp = tp) { 1633 tp = cp->p_sibling; 1634 if (cp->p_stat == SZOMB && 1635 !(cp->p_pidflag & CLDWAITPID)) 1636 freeproc(cp); 1637 } 1638 mutex_exit(&pidlock); 1639 mutex_enter(&p->p_lock); 1640 } 1641 } 1642 } 1643 1644 /* 1645 * Set all signal actions not already set to SIG_DFL or SIG_IGN to SIG_DFL. 1646 * Called from exec_common() for a process undergoing execve() 1647 * and from cfork() for a newly-created child of vfork(). 1648 * In the vfork() case, 'p' is not the current process. 1649 * In both cases, there is only one thread in the process. 1650 */ 1651 void 1652 sigdefault(proc_t *p) 1653 { 1654 kthread_t *t = p->p_tlist; 1655 struct user *up = PTOU(p); 1656 int sig; 1657 1658 ASSERT(MUTEX_HELD(&p->p_lock)); 1659 1660 for (sig = 1; sig < NSIG; sig++) { 1661 if (up->u_signal[sig - 1] != SIG_DFL && 1662 up->u_signal[sig - 1] != SIG_IGN) { 1663 up->u_signal[sig - 1] = SIG_DFL; 1664 sigemptyset(&up->u_sigmask[sig - 1]); 1665 if (sigismember(&ignoredefault, sig)) { 1666 sigdelq(p, NULL, sig); 1667 sigdelq(p, t, sig); 1668 } 1669 if (sig == SIGCLD) 1670 p->p_flag &= ~(SNOWAIT|SJCTL); 1671 } 1672 } 1673 sigorset(&p->p_ignore, &ignoredefault); 1674 sigfillset(&p->p_siginfo); 1675 sigdiffset(&p->p_siginfo, &cantmask); 1676 sigdiffset(&p->p_sig, &ignoredefault); 1677 sigdiffset(&p->p_extsig, &ignoredefault); 1678 sigdiffset(&t->t_sig, &ignoredefault); 1679 sigdiffset(&t->t_extsig, &ignoredefault); 1680 } 1681 1682 void 1683 sigcld(proc_t *cp, sigqueue_t *sqp) 1684 { 1685 proc_t *pp = cp->p_parent; 1686 1687 ASSERT(MUTEX_HELD(&pidlock)); 1688 1689 switch (cp->p_wcode) { 1690 case CLD_EXITED: 1691 case CLD_DUMPED: 1692 case CLD_KILLED: 1693 ASSERT(cp->p_stat == SZOMB); 1694 /* 1695 * The broadcast on p_srwchan_cv is a kludge to 1696 * wakeup a possible thread in uadmin(A_SHUTDOWN). 1697 */ 1698 cv_broadcast(&cp->p_srwchan_cv); 1699 1700 /* 1701 * Add to newstate list of the parent 1702 */ 1703 add_ns(pp, cp); 1704 1705 cv_broadcast(&pp->p_cv); 1706 if ((pp->p_flag & SNOWAIT) || 1707 PTOU(pp)->u_signal[SIGCLD - 1] == SIG_IGN) { 1708 if (!(cp->p_pidflag & CLDWAITPID)) 1709 freeproc(cp); 1710 } else if (!(cp->p_pidflag & CLDNOSIGCHLD)) { 1711 post_sigcld(cp, sqp); 1712 sqp = NULL; 1713 } 1714 break; 1715 1716 case CLD_STOPPED: 1717 case CLD_CONTINUED: 1718 cv_broadcast(&pp->p_cv); 1719 if (pp->p_flag & SJCTL) { 1720 post_sigcld(cp, sqp); 1721 sqp = NULL; 1722 } 1723 break; 1724 } 1725 1726 if (sqp) 1727 siginfofree(sqp); 1728 } 1729 1730 /* 1731 * Common code called from sigcld() and issig_forreal() 1732 * Give the parent process a SIGCLD if it does not have one pending, 1733 * else mark the child process so a SIGCLD can be posted later. 1734 */ 1735 static void 1736 post_sigcld(proc_t *cp, sigqueue_t *sqp) 1737 { 1738 proc_t *pp = cp->p_parent; 1739 void (*handler)() = PTOU(pp)->u_signal[SIGCLD - 1]; 1740 k_siginfo_t info; 1741 1742 ASSERT(MUTEX_HELD(&pidlock)); 1743 mutex_enter(&pp->p_lock); 1744 1745 /* 1746 * If a SIGCLD is pending, or if SIGCLD is not now being caught, 1747 * then just mark the child process so that its SIGCLD will 1748 * be posted later, when the first SIGCLD is taken off the 1749 * queue or when the parent is ready to receive it, if ever. 1750 */ 1751 if (handler == SIG_DFL || handler == SIG_IGN || 1752 sigismember(&pp->p_sig, SIGCLD)) 1753 cp->p_pidflag |= CLDPEND; 1754 else { 1755 cp->p_pidflag &= ~CLDPEND; 1756 if (sqp == NULL) { 1757 /* 1758 * This can only happen when the parent is init. 1759 * (See call to sigcld(q, NULL) in exit().) 1760 * Use KM_NOSLEEP to avoid deadlock. 1761 */ 1762 ASSERT(pp == proc_init); 1763 winfo(cp, &info, 0); 1764 sigaddq(pp, NULL, &info, KM_NOSLEEP); 1765 } else { 1766 winfo(cp, &sqp->sq_info, 0); 1767 sigaddqa(pp, NULL, sqp); 1768 sqp = NULL; 1769 } 1770 } 1771 1772 mutex_exit(&pp->p_lock); 1773 1774 if (sqp) 1775 siginfofree(sqp); 1776 } 1777 1778 /* 1779 * Search for a child that has a pending SIGCLD for us, the parent. 1780 * The queue of SIGCLD signals is implied by the list of children. 1781 * We post the SIGCLD signals one at a time so they don't get lost. 1782 * When one is dequeued, another is enqueued, until there are no more. 1783 */ 1784 void 1785 sigcld_repost() 1786 { 1787 proc_t *pp = curproc; 1788 proc_t *cp; 1789 void (*handler)() = PTOU(pp)->u_signal[SIGCLD - 1]; 1790 sigqueue_t *sqp; 1791 1792 /* 1793 * Don't bother if SIGCLD is not now being caught. 1794 */ 1795 if (handler == SIG_DFL || handler == SIG_IGN) 1796 return; 1797 1798 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 1799 mutex_enter(&pidlock); 1800 for (cp = pp->p_child; cp; cp = cp->p_sibling) { 1801 if (cp->p_pidflag & CLDPEND) { 1802 post_sigcld(cp, sqp); 1803 mutex_exit(&pidlock); 1804 return; 1805 } 1806 } 1807 mutex_exit(&pidlock); 1808 kmem_free(sqp, sizeof (sigqueue_t)); 1809 } 1810 1811 /* 1812 * count number of sigqueue send by sigaddqa() 1813 */ 1814 void 1815 sigqsend(int cmd, proc_t *p, kthread_t *t, sigqueue_t *sigqp) 1816 { 1817 sigqhdr_t *sqh; 1818 1819 sqh = (sigqhdr_t *)sigqp->sq_backptr; 1820 ASSERT(sqh); 1821 1822 mutex_enter(&sqh->sqb_lock); 1823 sqh->sqb_sent++; 1824 mutex_exit(&sqh->sqb_lock); 1825 1826 if (cmd == SN_SEND) 1827 sigaddqa(p, t, sigqp); 1828 else 1829 siginfofree(sigqp); 1830 } 1831 1832 int 1833 sigsendproc(proc_t *p, sigsend_t *pv) 1834 { 1835 struct cred *cr; 1836 proc_t *myprocp = curproc; 1837 1838 ASSERT(MUTEX_HELD(&pidlock)); 1839 1840 if (p->p_pid == 1 && pv->sig && sigismember(&cantmask, pv->sig)) 1841 return (EPERM); 1842 1843 cr = CRED(); 1844 1845 if (pv->checkperm == 0 || 1846 (pv->sig == SIGCONT && p->p_sessp == myprocp->p_sessp) || 1847 prochasprocperm(p, myprocp, cr)) { 1848 pv->perm++; 1849 if (pv->sig) { 1850 /* Make sure we should be setting si_pid and friends */ 1851 ASSERT(pv->sicode <= 0); 1852 if (SI_CANQUEUE(pv->sicode)) { 1853 sigqueue_t *sqp; 1854 1855 mutex_enter(&myprocp->p_lock); 1856 sqp = sigqalloc(myprocp->p_sigqhdr); 1857 mutex_exit(&myprocp->p_lock); 1858 if (sqp == NULL) 1859 return (EAGAIN); 1860 sqp->sq_info.si_signo = pv->sig; 1861 sqp->sq_info.si_code = pv->sicode; 1862 sqp->sq_info.si_pid = myprocp->p_pid; 1863 sqp->sq_info.si_ctid = PRCTID(myprocp); 1864 sqp->sq_info.si_zoneid = getzoneid(); 1865 sqp->sq_info.si_uid = crgetruid(cr); 1866 sqp->sq_info.si_value = pv->value; 1867 mutex_enter(&p->p_lock); 1868 sigqsend(SN_SEND, p, NULL, sqp); 1869 mutex_exit(&p->p_lock); 1870 } else { 1871 k_siginfo_t info; 1872 bzero(&info, sizeof (info)); 1873 info.si_signo = pv->sig; 1874 info.si_code = pv->sicode; 1875 info.si_pid = myprocp->p_pid; 1876 info.si_ctid = PRCTID(myprocp); 1877 info.si_zoneid = getzoneid(); 1878 info.si_uid = crgetruid(cr); 1879 mutex_enter(&p->p_lock); 1880 /* 1881 * XXX: Should be KM_SLEEP but 1882 * we have to avoid deadlock. 1883 */ 1884 sigaddq(p, NULL, &info, KM_NOSLEEP); 1885 mutex_exit(&p->p_lock); 1886 } 1887 } 1888 } 1889 1890 return (0); 1891 } 1892 1893 int 1894 sigsendset(procset_t *psp, sigsend_t *pv) 1895 { 1896 int error; 1897 1898 error = dotoprocs(psp, sigsendproc, (char *)pv); 1899 if (error == 0 && pv->perm == 0) 1900 return (EPERM); 1901 1902 return (error); 1903 } 1904 1905 /* 1906 * Dequeue a queued siginfo structure. 1907 * If a non-null thread pointer is passed then dequeue from 1908 * the thread queue, otherwise dequeue from the process queue. 1909 */ 1910 void 1911 sigdeq(proc_t *p, kthread_t *t, int sig, sigqueue_t **qpp) 1912 { 1913 sigqueue_t **psqp, *sqp; 1914 1915 ASSERT(MUTEX_HELD(&p->p_lock)); 1916 1917 *qpp = NULL; 1918 1919 if (t != NULL) { 1920 sigdelset(&t->t_sig, sig); 1921 sigdelset(&t->t_extsig, sig); 1922 psqp = &t->t_sigqueue; 1923 } else { 1924 sigdelset(&p->p_sig, sig); 1925 sigdelset(&p->p_extsig, sig); 1926 psqp = &p->p_sigqueue; 1927 } 1928 1929 for (;;) { 1930 if ((sqp = *psqp) == NULL) 1931 return; 1932 if (sqp->sq_info.si_signo == sig) 1933 break; 1934 else 1935 psqp = &sqp->sq_next; 1936 } 1937 *qpp = sqp; 1938 *psqp = sqp->sq_next; 1939 for (sqp = *psqp; sqp; sqp = sqp->sq_next) { 1940 if (sqp->sq_info.si_signo == sig) { 1941 if (t != (kthread_t *)NULL) { 1942 sigaddset(&t->t_sig, sig); 1943 t->t_sig_check = 1; 1944 } else { 1945 sigaddset(&p->p_sig, sig); 1946 set_proc_ast(p); 1947 } 1948 break; 1949 } 1950 } 1951 } 1952 1953 /* 1954 * Delete a queued SIGCLD siginfo structure matching the k_siginfo_t argument. 1955 */ 1956 void 1957 sigcld_delete(k_siginfo_t *ip) 1958 { 1959 proc_t *p = curproc; 1960 int another_sigcld = 0; 1961 sigqueue_t **psqp, *sqp; 1962 1963 ASSERT(ip->si_signo == SIGCLD); 1964 1965 mutex_enter(&p->p_lock); 1966 1967 if (!sigismember(&p->p_sig, SIGCLD)) { 1968 mutex_exit(&p->p_lock); 1969 return; 1970 } 1971 1972 psqp = &p->p_sigqueue; 1973 for (;;) { 1974 if ((sqp = *psqp) == NULL) { 1975 mutex_exit(&p->p_lock); 1976 return; 1977 } 1978 if (sqp->sq_info.si_signo == SIGCLD) { 1979 if (sqp->sq_info.si_pid == ip->si_pid && 1980 sqp->sq_info.si_code == ip->si_code && 1981 sqp->sq_info.si_status == ip->si_status) 1982 break; 1983 another_sigcld = 1; 1984 } 1985 psqp = &sqp->sq_next; 1986 } 1987 *psqp = sqp->sq_next; 1988 1989 siginfofree(sqp); 1990 1991 for (sqp = *psqp; !another_sigcld && sqp; sqp = sqp->sq_next) { 1992 if (sqp->sq_info.si_signo == SIGCLD) 1993 another_sigcld = 1; 1994 } 1995 1996 if (!another_sigcld) { 1997 sigdelset(&p->p_sig, SIGCLD); 1998 sigdelset(&p->p_extsig, SIGCLD); 1999 } 2000 2001 mutex_exit(&p->p_lock); 2002 } 2003 2004 /* 2005 * Delete queued siginfo structures. 2006 * If a non-null thread pointer is passed then delete from 2007 * the thread queue, otherwise delete from the process queue. 2008 */ 2009 void 2010 sigdelq(proc_t *p, kthread_t *t, int sig) 2011 { 2012 sigqueue_t **psqp, *sqp; 2013 2014 /* 2015 * We must be holding p->p_lock unless the process is 2016 * being reaped or has failed to get started on fork. 2017 */ 2018 ASSERT(MUTEX_HELD(&p->p_lock) || 2019 p->p_stat == SIDL || p->p_stat == SZOMB); 2020 2021 if (t != (kthread_t *)NULL) 2022 psqp = &t->t_sigqueue; 2023 else 2024 psqp = &p->p_sigqueue; 2025 2026 while (*psqp) { 2027 sqp = *psqp; 2028 if (sig == 0 || sqp->sq_info.si_signo == sig) { 2029 *psqp = sqp->sq_next; 2030 siginfofree(sqp); 2031 } else 2032 psqp = &sqp->sq_next; 2033 } 2034 } 2035 2036 /* 2037 * Insert a siginfo structure into a queue. 2038 * If a non-null thread pointer is passed then add to the thread queue, 2039 * otherwise add to the process queue. 2040 * 2041 * The function sigaddqins() is called with sigqueue already allocated. 2042 * It is called from sigaddqa() and sigaddq() below. 2043 * 2044 * The value of si_code implicitly indicates whether sigp is to be 2045 * explicitly queued, or to be queued to depth one. 2046 */ 2047 static void 2048 sigaddqins(proc_t *p, kthread_t *t, sigqueue_t *sigqp) 2049 { 2050 sigqueue_t **psqp; 2051 int sig = sigqp->sq_info.si_signo; 2052 2053 sigqp->sq_external = (curproc != &p0) && 2054 (curproc->p_ct_process != p->p_ct_process); 2055 2056 /* 2057 * issig_forreal() doesn't bother dequeueing signals if SKILLED 2058 * is set, and even if it did, we would want to avoid situation 2059 * (which would be unique to SIGKILL) where one thread dequeued 2060 * the sigqueue_t and another executed psig(). So we create a 2061 * separate stash for SIGKILL's sigqueue_t. Because a second 2062 * SIGKILL can set SEXTKILLED, we overwrite the existing entry 2063 * if (and only if) it was non-extracontractual. 2064 */ 2065 if (sig == SIGKILL) { 2066 if (p->p_killsqp == NULL || !p->p_killsqp->sq_external) { 2067 if (p->p_killsqp != NULL) 2068 siginfofree(p->p_killsqp); 2069 p->p_killsqp = sigqp; 2070 sigqp->sq_next = NULL; 2071 } else { 2072 siginfofree(sigqp); 2073 } 2074 return; 2075 } 2076 2077 ASSERT(sig >= 1 && sig < NSIG); 2078 if (t != NULL) /* directed to a thread */ 2079 psqp = &t->t_sigqueue; 2080 else /* directed to a process */ 2081 psqp = &p->p_sigqueue; 2082 if (SI_CANQUEUE(sigqp->sq_info.si_code) && 2083 sigismember(&p->p_siginfo, sig)) { 2084 for (; *psqp != NULL; psqp = &(*psqp)->sq_next) 2085 ; 2086 } else { 2087 for (; *psqp != NULL; psqp = &(*psqp)->sq_next) { 2088 if ((*psqp)->sq_info.si_signo == sig) { 2089 siginfofree(sigqp); 2090 return; 2091 } 2092 } 2093 } 2094 *psqp = sigqp; 2095 sigqp->sq_next = NULL; 2096 } 2097 2098 /* 2099 * The function sigaddqa() is called with sigqueue already allocated. 2100 * If signal is ignored, discard but guarantee KILL and generation semantics. 2101 * It is called from sigqueue() and other places. 2102 */ 2103 void 2104 sigaddqa(proc_t *p, kthread_t *t, sigqueue_t *sigqp) 2105 { 2106 int sig = sigqp->sq_info.si_signo; 2107 2108 ASSERT(MUTEX_HELD(&p->p_lock)); 2109 ASSERT(sig >= 1 && sig < NSIG); 2110 2111 if (sig_discardable(p, sig)) 2112 siginfofree(sigqp); 2113 else 2114 sigaddqins(p, t, sigqp); 2115 2116 sigtoproc(p, t, sig); 2117 } 2118 2119 /* 2120 * Allocate the sigqueue_t structure and call sigaddqins(). 2121 */ 2122 void 2123 sigaddq(proc_t *p, kthread_t *t, k_siginfo_t *infop, int km_flags) 2124 { 2125 sigqueue_t *sqp; 2126 int sig = infop->si_signo; 2127 2128 ASSERT(MUTEX_HELD(&p->p_lock)); 2129 ASSERT(sig >= 1 && sig < NSIG); 2130 2131 /* 2132 * If the signal will be discarded by sigtoproc() or 2133 * if the process isn't requesting siginfo and it isn't 2134 * blocking the signal (it *could* change it's mind while 2135 * the signal is pending) then don't bother creating one. 2136 */ 2137 if (!sig_discardable(p, sig) && 2138 (sigismember(&p->p_siginfo, sig) || 2139 (curproc->p_ct_process != p->p_ct_process) || 2140 (sig == SIGCLD && SI_FROMKERNEL(infop))) && 2141 ((sqp = kmem_alloc(sizeof (sigqueue_t), km_flags)) != NULL)) { 2142 bcopy(infop, &sqp->sq_info, sizeof (k_siginfo_t)); 2143 sqp->sq_func = NULL; 2144 sqp->sq_next = NULL; 2145 sigaddqins(p, t, sqp); 2146 } 2147 sigtoproc(p, t, sig); 2148 } 2149 2150 /* 2151 * Handle stop-on-fault processing for the debugger. Returns 0 2152 * if the fault is cleared during the stop, nonzero if it isn't. 2153 */ 2154 int 2155 stop_on_fault(uint_t fault, k_siginfo_t *sip) 2156 { 2157 proc_t *p = ttoproc(curthread); 2158 klwp_t *lwp = ttolwp(curthread); 2159 2160 ASSERT(prismember(&p->p_fltmask, fault)); 2161 2162 /* 2163 * Record current fault and siginfo structure so debugger can 2164 * find it. 2165 */ 2166 mutex_enter(&p->p_lock); 2167 lwp->lwp_curflt = (uchar_t)fault; 2168 lwp->lwp_siginfo = *sip; 2169 2170 stop(PR_FAULTED, fault); 2171 2172 fault = lwp->lwp_curflt; 2173 lwp->lwp_curflt = 0; 2174 mutex_exit(&p->p_lock); 2175 return (fault); 2176 } 2177 2178 void 2179 sigorset(k_sigset_t *s1, k_sigset_t *s2) 2180 { 2181 s1->__sigbits[0] |= s2->__sigbits[0]; 2182 s1->__sigbits[1] |= s2->__sigbits[1]; 2183 } 2184 2185 void 2186 sigandset(k_sigset_t *s1, k_sigset_t *s2) 2187 { 2188 s1->__sigbits[0] &= s2->__sigbits[0]; 2189 s1->__sigbits[1] &= s2->__sigbits[1]; 2190 } 2191 2192 void 2193 sigdiffset(k_sigset_t *s1, k_sigset_t *s2) 2194 { 2195 s1->__sigbits[0] &= ~(s2->__sigbits[0]); 2196 s1->__sigbits[1] &= ~(s2->__sigbits[1]); 2197 } 2198 2199 /* 2200 * Return non-zero if curthread->t_sig_check should be set to 1, that is, 2201 * if there are any signals the thread might take on return from the kernel. 2202 * If ksigset_t's were a single word, we would do: 2203 * return (((p->p_sig | t->t_sig) & ~t->t_hold) & fillset); 2204 */ 2205 int 2206 sigcheck(proc_t *p, kthread_t *t) 2207 { 2208 sc_shared_t *tdp = t->t_schedctl; 2209 2210 /* 2211 * If signals are blocked via the schedctl interface 2212 * then we only check for the unmaskable signals. 2213 */ 2214 if (tdp != NULL && tdp->sc_sigblock) 2215 return ((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) & 2216 CANTMASK0); 2217 2218 return (((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) & 2219 ~t->t_hold.__sigbits[0]) | 2220 (((p->p_sig.__sigbits[1] | t->t_sig.__sigbits[1]) & 2221 ~t->t_hold.__sigbits[1]) & FILLSET1)); 2222 } 2223 2224 /* ONC_PLUS EXTRACT START */ 2225 void 2226 sigintr(k_sigset_t *smask, int intable) 2227 { 2228 proc_t *p; 2229 int owned; 2230 k_sigset_t lmask; /* local copy of cantmask */ 2231 klwp_t *lwp = ttolwp(curthread); 2232 2233 /* 2234 * Mask out all signals except SIGHUP, SIGINT, SIGQUIT 2235 * and SIGTERM. (Preserving the existing masks). 2236 * This function supports the -intr nfs and ufs mount option. 2237 */ 2238 2239 /* 2240 * don't do kernel threads 2241 */ 2242 if (lwp == NULL) 2243 return; 2244 2245 /* 2246 * get access to signal mask 2247 */ 2248 p = ttoproc(curthread); 2249 owned = mutex_owned(&p->p_lock); /* this is filthy */ 2250 if (!owned) 2251 mutex_enter(&p->p_lock); 2252 2253 /* 2254 * remember the current mask 2255 */ 2256 schedctl_finish_sigblock(curthread); 2257 *smask = curthread->t_hold; 2258 2259 /* 2260 * mask out all signals 2261 */ 2262 sigfillset(&curthread->t_hold); 2263 2264 /* 2265 * Unmask the non-maskable signals (e.g., KILL), as long as 2266 * they aren't already masked (which could happen at exit). 2267 * The first sigdiffset sets lmask to (cantmask & ~curhold). The 2268 * second sets the current hold mask to (~0 & ~lmask), which reduces 2269 * to (~cantmask | curhold). 2270 */ 2271 lmask = cantmask; 2272 sigdiffset(&lmask, smask); 2273 sigdiffset(&curthread->t_hold, &lmask); 2274 2275 /* 2276 * Re-enable HUP, QUIT, and TERM iff they were originally enabled 2277 * Re-enable INT if it's originally enabled and the NFS mount option 2278 * nointr is not set. 2279 */ 2280 if (!sigismember(smask, SIGHUP)) 2281 sigdelset(&curthread->t_hold, SIGHUP); 2282 if (!sigismember(smask, SIGINT) && intable) 2283 sigdelset(&curthread->t_hold, SIGINT); 2284 if (!sigismember(smask, SIGQUIT)) 2285 sigdelset(&curthread->t_hold, SIGQUIT); 2286 if (!sigismember(smask, SIGTERM)) 2287 sigdelset(&curthread->t_hold, SIGTERM); 2288 2289 /* 2290 * release access to signal mask 2291 */ 2292 if (!owned) 2293 mutex_exit(&p->p_lock); 2294 2295 /* 2296 * Indicate that this lwp is not to be stopped. 2297 */ 2298 lwp->lwp_nostop++; 2299 2300 } 2301 /* ONC_PLUS EXTRACT END */ 2302 2303 void 2304 sigunintr(k_sigset_t *smask) 2305 { 2306 proc_t *p; 2307 int owned; 2308 klwp_t *lwp = ttolwp(curthread); 2309 2310 /* 2311 * Reset previous mask (See sigintr() above) 2312 */ 2313 if (lwp != NULL) { 2314 lwp->lwp_nostop--; /* restore lwp stoppability */ 2315 p = ttoproc(curthread); 2316 owned = mutex_owned(&p->p_lock); /* this is filthy */ 2317 if (!owned) 2318 mutex_enter(&p->p_lock); 2319 curthread->t_hold = *smask; 2320 /* so unmasked signals will be seen */ 2321 curthread->t_sig_check = 1; 2322 if (!owned) 2323 mutex_exit(&p->p_lock); 2324 } 2325 } 2326 2327 void 2328 sigreplace(k_sigset_t *newmask, k_sigset_t *oldmask) 2329 { 2330 proc_t *p; 2331 int owned; 2332 /* 2333 * Save current signal mask in oldmask, then 2334 * set it to newmask. 2335 */ 2336 if (ttolwp(curthread) != NULL) { 2337 p = ttoproc(curthread); 2338 owned = mutex_owned(&p->p_lock); /* this is filthy */ 2339 if (!owned) 2340 mutex_enter(&p->p_lock); 2341 schedctl_finish_sigblock(curthread); 2342 if (oldmask != NULL) 2343 *oldmask = curthread->t_hold; 2344 curthread->t_hold = *newmask; 2345 curthread->t_sig_check = 1; 2346 if (!owned) 2347 mutex_exit(&p->p_lock); 2348 } 2349 } 2350 2351 /* 2352 * Return true if the signal number is in range 2353 * and the signal code specifies signal queueing. 2354 */ 2355 int 2356 sigwillqueue(int sig, int code) 2357 { 2358 if (sig >= 0 && sig < NSIG) { 2359 switch (code) { 2360 case SI_QUEUE: 2361 case SI_TIMER: 2362 case SI_ASYNCIO: 2363 case SI_MESGQ: 2364 return (1); 2365 } 2366 } 2367 return (0); 2368 } 2369 2370 #ifndef UCHAR_MAX 2371 #define UCHAR_MAX 255 2372 #endif 2373 2374 /* 2375 * The entire pool (with maxcount entries) is pre-allocated at 2376 * the first sigqueue/signotify call. 2377 */ 2378 sigqhdr_t * 2379 sigqhdralloc(size_t size, uint_t maxcount) 2380 { 2381 size_t i; 2382 sigqueue_t *sq, *next; 2383 sigqhdr_t *sqh; 2384 2385 i = (maxcount * size) + sizeof (sigqhdr_t); 2386 ASSERT(maxcount <= UCHAR_MAX && i <= USHRT_MAX); 2387 sqh = kmem_alloc(i, KM_SLEEP); 2388 sqh->sqb_count = (uchar_t)maxcount; 2389 sqh->sqb_maxcount = (uchar_t)maxcount; 2390 sqh->sqb_size = (ushort_t)i; 2391 sqh->sqb_pexited = 0; 2392 sqh->sqb_sent = 0; 2393 sqh->sqb_free = sq = (sigqueue_t *)(sqh + 1); 2394 for (i = maxcount - 1; i != 0; i--) { 2395 next = (sigqueue_t *)((uintptr_t)sq + size); 2396 sq->sq_next = next; 2397 sq = next; 2398 } 2399 sq->sq_next = NULL; 2400 cv_init(&sqh->sqb_cv, NULL, CV_DEFAULT, NULL); 2401 mutex_init(&sqh->sqb_lock, NULL, MUTEX_DEFAULT, NULL); 2402 return (sqh); 2403 } 2404 2405 static void sigqrel(sigqueue_t *); 2406 2407 /* 2408 * allocate a sigqueue/signotify structure from the per process 2409 * pre-allocated pool. 2410 */ 2411 sigqueue_t * 2412 sigqalloc(sigqhdr_t *sqh) 2413 { 2414 sigqueue_t *sq = NULL; 2415 2416 ASSERT(MUTEX_HELD(&curproc->p_lock)); 2417 2418 if (sqh != NULL) { 2419 mutex_enter(&sqh->sqb_lock); 2420 if (sqh->sqb_count > 0) { 2421 sqh->sqb_count--; 2422 sq = sqh->sqb_free; 2423 sqh->sqb_free = sq->sq_next; 2424 mutex_exit(&sqh->sqb_lock); 2425 bzero(&sq->sq_info, sizeof (k_siginfo_t)); 2426 sq->sq_backptr = sqh; 2427 sq->sq_func = sigqrel; 2428 sq->sq_next = NULL; 2429 sq->sq_external = 0; 2430 } else { 2431 mutex_exit(&sqh->sqb_lock); 2432 } 2433 } 2434 return (sq); 2435 } 2436 2437 /* 2438 * Return a sigqueue structure back to the pre-allocated pool. 2439 */ 2440 static void 2441 sigqrel(sigqueue_t *sq) 2442 { 2443 sigqhdr_t *sqh; 2444 2445 /* make sure that p_lock of the affected process is held */ 2446 2447 sqh = (sigqhdr_t *)sq->sq_backptr; 2448 mutex_enter(&sqh->sqb_lock); 2449 if (sqh->sqb_pexited && sqh->sqb_sent == 1) { 2450 mutex_exit(&sqh->sqb_lock); 2451 cv_destroy(&sqh->sqb_cv); 2452 mutex_destroy(&sqh->sqb_lock); 2453 kmem_free(sqh, sqh->sqb_size); 2454 } else { 2455 sqh->sqb_count++; 2456 sqh->sqb_sent--; 2457 sq->sq_next = sqh->sqb_free; 2458 sq->sq_backptr = NULL; 2459 sqh->sqb_free = sq; 2460 cv_signal(&sqh->sqb_cv); 2461 mutex_exit(&sqh->sqb_lock); 2462 } 2463 } 2464 2465 /* 2466 * Free up the pre-allocated sigqueue headers of sigqueue pool 2467 * and signotify pool, if possible. 2468 * Called only by the owning process during exec() and exit(). 2469 */ 2470 void 2471 sigqfree(proc_t *p) 2472 { 2473 ASSERT(MUTEX_HELD(&p->p_lock)); 2474 2475 if (p->p_sigqhdr != NULL) { /* sigqueue pool */ 2476 sigqhdrfree(p->p_sigqhdr); 2477 p->p_sigqhdr = NULL; 2478 } 2479 if (p->p_signhdr != NULL) { /* signotify pool */ 2480 sigqhdrfree(p->p_signhdr); 2481 p->p_signhdr = NULL; 2482 } 2483 } 2484 2485 /* 2486 * Free up the pre-allocated header and sigq pool if possible. 2487 */ 2488 void 2489 sigqhdrfree(sigqhdr_t *sqh) 2490 { 2491 mutex_enter(&sqh->sqb_lock); 2492 if (sqh->sqb_sent == 0) { 2493 mutex_exit(&sqh->sqb_lock); 2494 cv_destroy(&sqh->sqb_cv); 2495 mutex_destroy(&sqh->sqb_lock); 2496 kmem_free(sqh, sqh->sqb_size); 2497 } else { 2498 sqh->sqb_pexited = 1; 2499 mutex_exit(&sqh->sqb_lock); 2500 } 2501 } 2502 2503 /* 2504 * Free up a single sigqueue structure. 2505 * No other code should free a sigqueue directly. 2506 */ 2507 void 2508 siginfofree(sigqueue_t *sqp) 2509 { 2510 if (sqp != NULL) { 2511 if (sqp->sq_func != NULL) 2512 (sqp->sq_func)(sqp); 2513 else 2514 kmem_free(sqp, sizeof (sigqueue_t)); 2515 } 2516 } 2517 2518 /* 2519 * Generate a synchronous signal caused by a hardware 2520 * condition encountered by an lwp. Called from trap(). 2521 */ 2522 void 2523 trapsig(k_siginfo_t *ip, int restartable) 2524 { 2525 proc_t *p = ttoproc(curthread); 2526 int sig = ip->si_signo; 2527 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 2528 2529 ASSERT(sig > 0 && sig < NSIG); 2530 2531 if (curthread->t_dtrace_on) 2532 dtrace_safe_synchronous_signal(); 2533 2534 mutex_enter(&p->p_lock); 2535 schedctl_finish_sigblock(curthread); 2536 /* 2537 * Avoid a possible infinite loop if the lwp is holding the 2538 * signal generated by a trap of a restartable instruction or 2539 * if the signal so generated is being ignored by the process. 2540 */ 2541 if (restartable && 2542 (sigismember(&curthread->t_hold, sig) || 2543 p->p_user.u_signal[sig-1] == SIG_IGN)) { 2544 sigdelset(&curthread->t_hold, sig); 2545 p->p_user.u_signal[sig-1] = SIG_DFL; 2546 sigdelset(&p->p_ignore, sig); 2547 } 2548 bcopy(ip, &sqp->sq_info, sizeof (k_siginfo_t)); 2549 sigaddqa(p, curthread, sqp); 2550 mutex_exit(&p->p_lock); 2551 } 2552 2553 #ifdef _SYSCALL32_IMPL 2554 2555 /* 2556 * It's tricky to transmit a sigval between 32-bit and 64-bit 2557 * process, since in the 64-bit world, a pointer and an integer 2558 * are different sizes. Since we're constrained by the standards 2559 * world not to change the types, and it's unclear how useful it is 2560 * to send pointers between address spaces this way, we preserve 2561 * the 'int' interpretation for 32-bit processes interoperating 2562 * with 64-bit processes. The full semantics (pointers or integers) 2563 * are available for N-bit processes interoperating with N-bit 2564 * processes. 2565 */ 2566 void 2567 siginfo_kto32(const k_siginfo_t *src, siginfo32_t *dest) 2568 { 2569 bzero(dest, sizeof (*dest)); 2570 2571 /* 2572 * The absolute minimum content is si_signo and si_code. 2573 */ 2574 dest->si_signo = src->si_signo; 2575 if ((dest->si_code = src->si_code) == SI_NOINFO) 2576 return; 2577 2578 /* 2579 * A siginfo generated by user level is structured 2580 * differently from one generated by the kernel. 2581 */ 2582 if (SI_FROMUSER(src)) { 2583 dest->si_pid = src->si_pid; 2584 dest->si_ctid = src->si_ctid; 2585 dest->si_zoneid = src->si_zoneid; 2586 dest->si_uid = src->si_uid; 2587 if (SI_CANQUEUE(src->si_code)) 2588 dest->si_value.sival_int = 2589 (int32_t)src->si_value.sival_int; 2590 return; 2591 } 2592 2593 dest->si_errno = src->si_errno; 2594 2595 switch (src->si_signo) { 2596 default: 2597 dest->si_pid = src->si_pid; 2598 dest->si_ctid = src->si_ctid; 2599 dest->si_zoneid = src->si_zoneid; 2600 dest->si_uid = src->si_uid; 2601 dest->si_value.sival_int = (int32_t)src->si_value.sival_int; 2602 break; 2603 case SIGCLD: 2604 dest->si_pid = src->si_pid; 2605 dest->si_ctid = src->si_ctid; 2606 dest->si_zoneid = src->si_zoneid; 2607 dest->si_status = src->si_status; 2608 dest->si_stime = src->si_stime; 2609 dest->si_utime = src->si_utime; 2610 break; 2611 case SIGSEGV: 2612 case SIGBUS: 2613 case SIGILL: 2614 case SIGTRAP: 2615 case SIGFPE: 2616 case SIGEMT: 2617 dest->si_addr = (caddr32_t)(uintptr_t)src->si_addr; 2618 dest->si_trapno = src->si_trapno; 2619 dest->si_pc = (caddr32_t)(uintptr_t)src->si_pc; 2620 break; 2621 case SIGPOLL: 2622 case SIGXFSZ: 2623 dest->si_fd = src->si_fd; 2624 dest->si_band = src->si_band; 2625 break; 2626 case SIGPROF: 2627 dest->si_faddr = (caddr32_t)(uintptr_t)src->si_faddr; 2628 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec; 2629 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec; 2630 dest->si_syscall = src->si_syscall; 2631 dest->si_nsysarg = src->si_nsysarg; 2632 dest->si_fault = src->si_fault; 2633 break; 2634 } 2635 } 2636 2637 void 2638 siginfo_32tok(const siginfo32_t *src, k_siginfo_t *dest) 2639 { 2640 bzero(dest, sizeof (*dest)); 2641 2642 /* 2643 * The absolute minimum content is si_signo and si_code. 2644 */ 2645 dest->si_signo = src->si_signo; 2646 if ((dest->si_code = src->si_code) == SI_NOINFO) 2647 return; 2648 2649 /* 2650 * A siginfo generated by user level is structured 2651 * differently from one generated by the kernel. 2652 */ 2653 if (SI_FROMUSER(src)) { 2654 dest->si_pid = src->si_pid; 2655 dest->si_ctid = src->si_ctid; 2656 dest->si_zoneid = src->si_zoneid; 2657 dest->si_uid = src->si_uid; 2658 if (SI_CANQUEUE(src->si_code)) 2659 dest->si_value.sival_int = 2660 (int)src->si_value.sival_int; 2661 return; 2662 } 2663 2664 dest->si_errno = src->si_errno; 2665 2666 switch (src->si_signo) { 2667 default: 2668 dest->si_pid = src->si_pid; 2669 dest->si_ctid = src->si_ctid; 2670 dest->si_zoneid = src->si_zoneid; 2671 dest->si_uid = src->si_uid; 2672 dest->si_value.sival_int = (int)src->si_value.sival_int; 2673 break; 2674 case SIGCLD: 2675 dest->si_pid = src->si_pid; 2676 dest->si_ctid = src->si_ctid; 2677 dest->si_zoneid = src->si_zoneid; 2678 dest->si_status = src->si_status; 2679 dest->si_stime = src->si_stime; 2680 dest->si_utime = src->si_utime; 2681 break; 2682 case SIGSEGV: 2683 case SIGBUS: 2684 case SIGILL: 2685 case SIGTRAP: 2686 case SIGFPE: 2687 case SIGEMT: 2688 dest->si_addr = (void *)(uintptr_t)src->si_addr; 2689 dest->si_trapno = src->si_trapno; 2690 dest->si_pc = (void *)(uintptr_t)src->si_pc; 2691 break; 2692 case SIGPOLL: 2693 case SIGXFSZ: 2694 dest->si_fd = src->si_fd; 2695 dest->si_band = src->si_band; 2696 break; 2697 case SIGPROF: 2698 dest->si_faddr = (void *)(uintptr_t)src->si_faddr; 2699 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec; 2700 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec; 2701 dest->si_syscall = src->si_syscall; 2702 dest->si_nsysarg = src->si_nsysarg; 2703 dest->si_fault = src->si_fault; 2704 break; 2705 } 2706 } 2707 2708 #endif /* _SYSCALL32_IMPL */ 2709