1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 28 /* All Rights Reserved */ 29 30 31 #pragma ident "%Z%%M% %I% %E% SMI" 32 33 #include <sys/param.h> 34 #include <sys/types.h> 35 #include <sys/bitmap.h> 36 #include <sys/sysmacros.h> 37 #include <sys/systm.h> 38 #include <sys/cred.h> 39 #include <sys/user.h> 40 #include <sys/errno.h> 41 #include <sys/proc.h> 42 #include <sys/poll_impl.h> /* only needed for kludge in sigwaiting_send() */ 43 #include <sys/signal.h> 44 #include <sys/siginfo.h> 45 #include <sys/fault.h> 46 #include <sys/ucontext.h> 47 #include <sys/procfs.h> 48 #include <sys/wait.h> 49 #include <sys/class.h> 50 #include <sys/mman.h> 51 #include <sys/procset.h> 52 #include <sys/kmem.h> 53 #include <sys/cpuvar.h> 54 #include <sys/prsystm.h> 55 #include <sys/debug.h> 56 #include <vm/as.h> 57 #include <sys/bitmap.h> 58 #include <c2/audit.h> 59 #include <sys/core.h> 60 #include <sys/schedctl.h> 61 #include <sys/contract/process_impl.h> 62 #include <sys/dtrace.h> 63 #include <sys/sdt.h> 64 65 /* MUST be contiguous */ 66 k_sigset_t nullsmask = {0, 0}; 67 68 k_sigset_t fillset = {FILLSET0, FILLSET1}; 69 70 k_sigset_t cantmask = {CANTMASK0, CANTMASK1}; 71 72 k_sigset_t cantreset = {(sigmask(SIGILL)|sigmask(SIGTRAP)|sigmask(SIGPWR)), 0}; 73 74 k_sigset_t ignoredefault = {(sigmask(SIGCONT)|sigmask(SIGCLD)|sigmask(SIGPWR) 75 |sigmask(SIGWINCH)|sigmask(SIGURG)|sigmask(SIGWAITING)), 76 (sigmask(SIGLWP)|sigmask(SIGCANCEL)|sigmask(SIGFREEZE) 77 |sigmask(SIGTHAW)|sigmask(SIGXRES)|sigmask(SIGJVM1) 78 |sigmask(SIGJVM2))}; 79 80 k_sigset_t stopdefault = {(sigmask(SIGSTOP)|sigmask(SIGTSTP) 81 |sigmask(SIGTTOU)|sigmask(SIGTTIN)), 0}; 82 83 k_sigset_t coredefault = {(sigmask(SIGQUIT)|sigmask(SIGILL)|sigmask(SIGTRAP) 84 |sigmask(SIGIOT)|sigmask(SIGEMT)|sigmask(SIGFPE) 85 |sigmask(SIGBUS)|sigmask(SIGSEGV)|sigmask(SIGSYS) 86 |sigmask(SIGXCPU)|sigmask(SIGXFSZ)), 0}; 87 88 k_sigset_t holdvfork = {(sigmask(SIGTTOU)|sigmask(SIGTTIN)|sigmask(SIGTSTP)), 89 0}; 90 91 static int isjobstop(int); 92 static void post_sigcld(proc_t *, sigqueue_t *); 93 94 /* 95 * Internal variables for counting number of user thread stop requests posted. 96 * They may not be accurate at some special situation such as that a virtually 97 * stopped thread starts to run. 98 */ 99 static int num_utstop; 100 /* 101 * Internal variables for broadcasting an event when all thread stop requests 102 * are processed. 103 */ 104 static kcondvar_t utstop_cv; 105 106 static kmutex_t thread_stop_lock; 107 void del_one_utstop(void); 108 109 /* 110 * Send the specified signal to the specified process. 111 */ 112 void 113 psignal(proc_t *p, int sig) 114 { 115 mutex_enter(&p->p_lock); 116 sigtoproc(p, NULL, sig); 117 mutex_exit(&p->p_lock); 118 } 119 120 /* 121 * Send the specified signal to the specified thread. 122 */ 123 void 124 tsignal(kthread_t *t, int sig) 125 { 126 proc_t *p = ttoproc(t); 127 128 mutex_enter(&p->p_lock); 129 sigtoproc(p, t, sig); 130 mutex_exit(&p->p_lock); 131 } 132 133 int 134 signal_is_blocked(kthread_t *t, int sig) 135 { 136 return (sigismember(&t->t_hold, sig) || 137 (schedctl_sigblock(t) && !sigismember(&cantmask, sig))); 138 } 139 140 /* 141 * Return true if the signal can safely be discarded on generation. 142 * That is, if there is no need for the signal on the receiving end. 143 * The answer is true if the process is a zombie or 144 * if all of these conditions are true: 145 * the signal is being ignored 146 * the process is single-threaded 147 * the signal is not being traced by /proc 148 * the signal is not blocked by the process 149 */ 150 static int 151 sig_discardable(proc_t *p, int sig) 152 { 153 kthread_t *t = p->p_tlist; 154 155 return (t == NULL || /* if zombie or ... */ 156 (sigismember(&p->p_ignore, sig) && /* signal is ignored */ 157 t->t_forw == t && /* and single-threaded */ 158 !tracing(p, sig) && /* and no /proc tracing */ 159 !signal_is_blocked(t, sig))); /* and signal not blocked */ 160 } 161 162 /* 163 * Return true if this thread is going to eat this signal soon. 164 * Note that, if the signal is SIGKILL, we force stopped threads to be 165 * set running (to make SIGKILL be a sure kill), but only if the process 166 * is not currently locked by /proc (the P_PR_LOCK flag). Code in /proc 167 * relies on the fact that a process will not change shape while P_PR_LOCK 168 * is set (it drops and reacquires p->p_lock while leaving P_PR_LOCK set). 169 * We wish that we could simply call prbarrier() below, in sigtoproc(), to 170 * ensure that the process is not locked by /proc, but prbarrier() drops 171 * and reacquires p->p_lock and dropping p->p_lock here would be damaging. 172 */ 173 int 174 eat_signal(kthread_t *t, int sig) 175 { 176 int rval = 0; 177 ASSERT(THREAD_LOCK_HELD(t)); 178 179 /* 180 * Do not do anything if the target thread has the signal blocked. 181 */ 182 if (!signal_is_blocked(t, sig)) { 183 t->t_sig_check = 1; /* have thread do an issig */ 184 if (t->t_state == TS_SLEEP && (t->t_flag & T_WAKEABLE)) { 185 setrun_locked(t); 186 rval = 1; 187 } else if (t->t_state == TS_STOPPED && sig == SIGKILL && 188 !(ttoproc(t)->p_proc_flag & P_PR_LOCK)) { 189 ttoproc(t)->p_stopsig = 0; 190 t->t_dtrace_stop = 0; 191 t->t_schedflag |= TS_XSTART | TS_PSTART; 192 setrun_locked(t); 193 } else if (t != curthread && t->t_state == TS_ONPROC) { 194 aston(t); /* make it do issig promptly */ 195 if (t->t_cpu != CPU) 196 poke_cpu(t->t_cpu->cpu_id); 197 rval = 1; 198 } else if (t->t_state == TS_RUN) { 199 rval = 1; 200 } 201 } 202 203 return (rval); 204 } 205 206 /* 207 * Post a signal. 208 * If a non-null thread pointer is passed, then post the signal 209 * to the thread/lwp, otherwise post the signal to the process. 210 */ 211 void 212 sigtoproc(proc_t *p, kthread_t *t, int sig) 213 { 214 kthread_t *tt; 215 int ext = !(curproc->p_flag & SSYS) && 216 (curproc->p_ct_process != p->p_ct_process); 217 218 ASSERT(MUTEX_HELD(&p->p_lock)); 219 220 if (sig <= 0 || sig >= NSIG) 221 return; 222 223 /* 224 * Regardless of origin or directedness, 225 * SIGKILL kills all lwps in the process immediately 226 * and jobcontrol signals affect all lwps in the process. 227 */ 228 if (sig == SIGKILL) { 229 p->p_flag |= SKILLED | (ext ? SEXTKILLED : 0); 230 t = NULL; 231 } else if (sig == SIGCONT) { 232 /* 233 * The SSCONT flag will remain set until a stopping 234 * signal comes in (below). This is harmless. 235 */ 236 p->p_flag |= SSCONT; 237 sigdelq(p, NULL, SIGSTOP); 238 sigdelq(p, NULL, SIGTSTP); 239 sigdelq(p, NULL, SIGTTOU); 240 sigdelq(p, NULL, SIGTTIN); 241 sigdiffset(&p->p_sig, &stopdefault); 242 sigdiffset(&p->p_extsig, &stopdefault); 243 p->p_stopsig = 0; 244 if ((tt = p->p_tlist) != NULL) { 245 do { 246 sigdelq(p, tt, SIGSTOP); 247 sigdelq(p, tt, SIGTSTP); 248 sigdelq(p, tt, SIGTTOU); 249 sigdelq(p, tt, SIGTTIN); 250 sigdiffset(&tt->t_sig, &stopdefault); 251 sigdiffset(&tt->t_extsig, &stopdefault); 252 } while ((tt = tt->t_forw) != p->p_tlist); 253 } 254 if ((tt = p->p_tlist) != NULL) { 255 do { 256 thread_lock(tt); 257 if (tt->t_state == TS_STOPPED && 258 tt->t_whystop == PR_JOBCONTROL) { 259 tt->t_schedflag |= TS_XSTART; 260 setrun_locked(tt); 261 } 262 thread_unlock(tt); 263 } while ((tt = tt->t_forw) != p->p_tlist); 264 } 265 } else if (sigismember(&stopdefault, sig)) { 266 /* 267 * This test has a race condition which we can't fix: 268 * By the time the stopping signal is received by 269 * the target process/thread, the signal handler 270 * and/or the detached state might have changed. 271 */ 272 if (PTOU(p)->u_signal[sig-1] == SIG_DFL && 273 (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned)) 274 p->p_flag &= ~SSCONT; 275 sigdelq(p, NULL, SIGCONT); 276 sigdelset(&p->p_sig, SIGCONT); 277 sigdelset(&p->p_extsig, SIGCONT); 278 if ((tt = p->p_tlist) != NULL) { 279 do { 280 sigdelq(p, tt, SIGCONT); 281 sigdelset(&tt->t_sig, SIGCONT); 282 sigdelset(&tt->t_extsig, SIGCONT); 283 } while ((tt = tt->t_forw) != p->p_tlist); 284 } 285 } 286 287 if (sig_discardable(p, sig)) { 288 DTRACE_PROC3(signal__discard, kthread_t *, p->p_tlist, 289 proc_t *, p, int, sig); 290 return; 291 } 292 293 if (t != NULL) { 294 /* 295 * This is a directed signal, wake up the lwp. 296 */ 297 sigaddset(&t->t_sig, sig); 298 if (ext) 299 sigaddset(&t->t_extsig, sig); 300 thread_lock(t); 301 (void) eat_signal(t, sig); 302 thread_unlock(t); 303 DTRACE_PROC2(signal__send, kthread_t *, t, int, sig); 304 } else if ((tt = p->p_tlist) != NULL) { 305 /* 306 * Make sure that some lwp that already exists 307 * in the process fields the signal soon. 308 * Wake up an interruptibly sleeping lwp if necessary. 309 */ 310 int su = 0; 311 312 sigaddset(&p->p_sig, sig); 313 if (ext) 314 sigaddset(&p->p_extsig, sig); 315 do { 316 thread_lock(tt); 317 if (eat_signal(tt, sig)) { 318 thread_unlock(tt); 319 break; 320 } 321 if (sig == SIGKILL && SUSPENDED(tt)) 322 su++; 323 thread_unlock(tt); 324 } while ((tt = tt->t_forw) != p->p_tlist); 325 /* 326 * If the process is deadlocked, make somebody run and die. 327 */ 328 if (sig == SIGKILL && p->p_stat != SIDL && 329 p->p_lwprcnt == 0 && p->p_lwpcnt == su && 330 !(p->p_proc_flag & P_PR_LOCK)) { 331 thread_lock(tt); 332 p->p_lwprcnt++; 333 tt->t_schedflag |= TS_CSTART; 334 setrun_locked(tt); 335 thread_unlock(tt); 336 } 337 338 DTRACE_PROC2(signal__send, kthread_t *, tt, int, sig); 339 } 340 } 341 342 static int 343 isjobstop(int sig) 344 { 345 proc_t *p = ttoproc(curthread); 346 347 ASSERT(MUTEX_HELD(&p->p_lock)); 348 349 if (u.u_signal[sig-1] == SIG_DFL && sigismember(&stopdefault, sig)) { 350 /* 351 * If SIGCONT has been posted since we promoted this signal 352 * from pending to current, then don't do a jobcontrol stop. 353 */ 354 if (!(p->p_flag & SSCONT) && 355 (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned) && 356 curthread != p->p_agenttp) { 357 sigqueue_t *sqp; 358 359 stop(PR_JOBCONTROL, sig); 360 mutex_exit(&p->p_lock); 361 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 362 mutex_enter(&pidlock); 363 /* 364 * Only the first lwp to continue notifies the parent. 365 */ 366 if (p->p_pidflag & CLDCONT) 367 siginfofree(sqp); 368 else { 369 p->p_pidflag |= CLDCONT; 370 p->p_wcode = CLD_CONTINUED; 371 p->p_wdata = SIGCONT; 372 sigcld(p, sqp); 373 } 374 mutex_exit(&pidlock); 375 mutex_enter(&p->p_lock); 376 } 377 return (1); 378 } 379 return (0); 380 } 381 382 /* 383 * Returns true if the current process has a signal to process, and 384 * the signal is not held. The signal to process is put in p_cursig. 385 * This is asked at least once each time a process enters the system 386 * (though this can usually be done without actually calling issig by 387 * checking the pending signal masks). A signal does not do anything 388 * directly to a process; it sets a flag that asks the process to do 389 * something to itself. 390 * 391 * The "why" argument indicates the allowable side-effects of the call: 392 * 393 * FORREAL: Extract the next pending signal from p_sig into p_cursig; 394 * stop the process if a stop has been requested or if a traced signal 395 * is pending. 396 * 397 * JUSTLOOKING: Don't stop the process, just indicate whether or not 398 * a signal might be pending (FORREAL is needed to tell for sure). 399 * 400 * XXX: Changes to the logic in these routines should be propagated 401 * to lm_sigispending(). See bug 1201594. 402 */ 403 404 static int issig_forreal(void); 405 static int issig_justlooking(void); 406 407 int 408 issig(int why) 409 { 410 ASSERT(why == FORREAL || why == JUSTLOOKING); 411 412 return ((why == FORREAL)? issig_forreal() : issig_justlooking()); 413 } 414 415 416 static int 417 issig_justlooking(void) 418 { 419 kthread_t *t = curthread; 420 klwp_t *lwp = ttolwp(t); 421 proc_t *p = ttoproc(t); 422 k_sigset_t set; 423 424 /* 425 * This function answers the question: 426 * "Is there any reason to call issig_forreal()?" 427 * 428 * We have to answer the question w/o grabbing any locks 429 * because we are (most likely) being called after we 430 * put ourselves on the sleep queue. 431 */ 432 433 if (t->t_dtrace_stop | t->t_dtrace_sig) 434 return (1); 435 436 /* 437 * Another piece of complexity in this process. When single-stepping a 438 * process, we don't want an intervening signal or TP_PAUSE request to 439 * suspend the current thread. Otherwise, the controlling process will 440 * hang beacuse we will be stopped with TS_PSTART set in t_schedflag. 441 * We will trigger any remaining signals when we re-enter the kernel on 442 * the single step trap. 443 */ 444 if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP) 445 return (0); 446 447 if ((lwp->lwp_asleep && MUSTRETURN(p, t)) || 448 (p->p_flag & (SEXITLWPS|SKILLED)) || 449 (!lwp->lwp_nostop_r && ((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) | 450 (t->t_proc_flag & TP_HOLDLWP))) || 451 (!lwp->lwp_nostop && (p->p_stopsig | (t->t_proc_flag & 452 (TP_PRSTOP|TP_CHKPT|TP_PAUSE)))) || 453 lwp->lwp_cursig) 454 return (1); 455 456 if (p->p_flag & SVFWAIT) 457 return (0); 458 set = p->p_sig; 459 sigorset(&set, &t->t_sig); 460 if (schedctl_sigblock(t)) /* all blockable signals blocked */ 461 sigandset(&set, &cantmask); 462 else 463 sigdiffset(&set, &t->t_hold); 464 if (p->p_flag & SVFORK) 465 sigdiffset(&set, &holdvfork); 466 467 if (!sigisempty(&set)) { 468 int sig; 469 470 for (sig = 1; sig < NSIG; sig++) { 471 if (sigismember(&set, sig) && 472 (tracing(p, sig) || 473 !sigismember(&p->p_ignore, sig))) { 474 /* 475 * Don't promote a signal that will stop 476 * the process when lwp_nostop is set. 477 */ 478 if (!lwp->lwp_nostop || 479 u.u_signal[sig-1] != SIG_DFL || 480 !sigismember(&stopdefault, sig)) 481 return (1); 482 } 483 } 484 } 485 486 return (0); 487 } 488 489 static int 490 issig_forreal(void) 491 { 492 int sig = 0, ext = 0; 493 kthread_t *t = curthread; 494 klwp_t *lwp = ttolwp(t); 495 proc_t *p = ttoproc(t); 496 int toproc = 0; 497 int sigcld_found = 0; 498 int nostop_break = 0; 499 500 ASSERT(t->t_state == TS_ONPROC); 501 502 mutex_enter(&p->p_lock); 503 schedctl_finish_sigblock(t); 504 505 if (t->t_dtrace_stop | t->t_dtrace_sig) { 506 if (t->t_dtrace_stop) { 507 /* 508 * If DTrace's "stop" action has been invoked on us, 509 * set TP_PRSTOP. 510 */ 511 t->t_proc_flag |= TP_PRSTOP; 512 } 513 514 if (t->t_dtrace_sig != 0) { 515 k_siginfo_t info; 516 517 /* 518 * Post the signal generated as the result of 519 * DTrace's "raise" action as a normal signal before 520 * the full-fledged signal checking begins. 521 */ 522 bzero(&info, sizeof (info)); 523 info.si_signo = t->t_dtrace_sig; 524 info.si_code = SI_DTRACE; 525 526 sigaddq(p, NULL, &info, KM_NOSLEEP); 527 528 t->t_dtrace_sig = 0; 529 } 530 } 531 532 for (;;) { 533 if (p->p_flag & (SEXITLWPS|SKILLED)) { 534 lwp->lwp_cursig = sig = SIGKILL; 535 lwp->lwp_extsig = ext = (p->p_flag & SEXTKILLED) != 0; 536 break; 537 } 538 539 /* 540 * Another piece of complexity in this process. When 541 * single-stepping a process, we don't want an intervening 542 * signal or TP_PAUSE request to suspend the current thread. 543 * Otherwise, the controlling process will hang beacuse we will 544 * be stopped with TS_PSTART set in t_schedflag. We will 545 * trigger any remaining signals when we re-enter the kernel on 546 * the single step trap. 547 */ 548 if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP) { 549 sig = 0; 550 break; 551 } 552 553 /* 554 * Hold the lwp here for watchpoint manipulation. 555 */ 556 if ((t->t_proc_flag & TP_PAUSE) && !lwp->lwp_nostop) { 557 stop(PR_SUSPENDED, SUSPEND_PAUSE); 558 continue; 559 } 560 561 if (lwp->lwp_asleep && MUSTRETURN(p, t)) { 562 if ((sig = lwp->lwp_cursig) != 0) { 563 /* 564 * Make sure we call ISSIG() in post_syscall() 565 * to re-validate this current signal. 566 */ 567 t->t_sig_check = 1; 568 } 569 break; 570 } 571 572 /* 573 * If the request is PR_CHECKPOINT, ignore the rest of signals 574 * or requests. Honor other stop requests or signals later. 575 * Go back to top of loop here to check if an exit or hold 576 * event has occurred while stopped. 577 */ 578 if ((t->t_proc_flag & TP_CHKPT) && !lwp->lwp_nostop) { 579 stop(PR_CHECKPOINT, 0); 580 continue; 581 } 582 583 /* 584 * Honor SHOLDFORK1, SHOLDWATCH, and TP_HOLDLWP before dealing 585 * with signals or /proc. Another lwp is executing fork1(), 586 * or is undergoing watchpoint activity (remapping a page), 587 * or is executing lwp_suspend() on this lwp. 588 * Again, go back to top of loop to check if an exit 589 * or hold event has occurred while stopped. 590 * We explicitly allow this form of stopping of one 591 * lwp in a process by another lwp in the same process, 592 * even if lwp->lwp_nostop is set, because otherwise a 593 * process can become deadlocked on a fork1(). 594 * Allow this only if lwp_nostop_r is not set, 595 * to avoid a recursive call to prstop(). 596 */ 597 if (((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) || 598 (t->t_proc_flag & TP_HOLDLWP)) && !lwp->lwp_nostop_r) { 599 stop(PR_SUSPENDED, SUSPEND_NORMAL); 600 continue; 601 } 602 603 /* 604 * Honor requested stop before dealing with the 605 * current signal; a debugger may change it. 606 * Do not want to go back to loop here since this is a special 607 * stop that means: make incremental progress before the next 608 * stop. The danger is that returning to top of loop would most 609 * likely drop the thread right back here to stop soon after it 610 * was continued, violating the incremental progress request. 611 */ 612 if ((t->t_proc_flag & TP_PRSTOP) && !lwp->lwp_nostop) 613 stop(PR_REQUESTED, 0); 614 615 /* 616 * If a debugger wants us to take a signal it will have 617 * left it in lwp->lwp_cursig. If lwp_cursig has been cleared 618 * or if it's being ignored, we continue on looking for another 619 * signal. Otherwise we return the specified signal, provided 620 * it's not a signal that causes a job control stop. 621 * 622 * When stopped on PR_JOBCONTROL, there is no current 623 * signal; we cancel lwp->lwp_cursig temporarily before 624 * calling isjobstop(). The current signal may be reset 625 * by a debugger while we are stopped in isjobstop(). 626 */ 627 if ((sig = lwp->lwp_cursig) != 0) { 628 ext = lwp->lwp_extsig; 629 lwp->lwp_cursig = 0; 630 lwp->lwp_extsig = 0; 631 if (!sigismember(&p->p_ignore, sig) && 632 !isjobstop(sig)) { 633 if (p->p_flag & (SEXITLWPS|SKILLED)) { 634 sig = SIGKILL; 635 ext = (p->p_flag & SEXTKILLED) != 0; 636 } 637 lwp->lwp_cursig = (uchar_t)sig; 638 lwp->lwp_extsig = (uchar_t)ext; 639 break; 640 } 641 /* 642 * The signal is being ignored or it caused a 643 * job-control stop. If another current signal 644 * has not been established, return the current 645 * siginfo, if any, to the memory manager. 646 */ 647 if (lwp->lwp_cursig == 0 && lwp->lwp_curinfo != NULL) { 648 siginfofree(lwp->lwp_curinfo); 649 lwp->lwp_curinfo = NULL; 650 } 651 /* 652 * Loop around again in case we were stopped 653 * on a job control signal and a /proc stop 654 * request was posted or another current signal 655 * was established while we were stopped. 656 */ 657 continue; 658 } 659 660 if (p->p_stopsig && !lwp->lwp_nostop && 661 curthread != p->p_agenttp) { 662 /* 663 * Some lwp in the process has already stopped 664 * showing PR_JOBCONTROL. This is a stop in 665 * sympathy with the other lwp, even if this 666 * lwp is blocking the stopping signal. 667 */ 668 stop(PR_JOBCONTROL, p->p_stopsig); 669 continue; 670 } 671 672 /* 673 * Loop on the pending signals until we find a 674 * non-held signal that is traced or not ignored. 675 * First check the signals pending for the lwp, 676 * then the signals pending for the process as a whole. 677 */ 678 for (;;) { 679 k_sigset_t tsig; 680 681 tsig = t->t_sig; 682 if ((sig = fsig(&tsig, t)) != 0) { 683 if (sig == SIGCLD) 684 sigcld_found = 1; 685 toproc = 0; 686 if (tracing(p, sig) || 687 !sigismember(&p->p_ignore, sig)) { 688 if (sigismember(&t->t_extsig, sig)) 689 ext = 1; 690 break; 691 } 692 sigdelset(&t->t_sig, sig); 693 sigdelset(&t->t_extsig, sig); 694 sigdelq(p, t, sig); 695 } else if ((sig = fsig(&p->p_sig, t)) != 0) { 696 if (sig == SIGCLD) 697 sigcld_found = 1; 698 toproc = 1; 699 if (tracing(p, sig) || 700 !sigismember(&p->p_ignore, sig)) { 701 if (sigismember(&p->p_extsig, sig)) 702 ext = 1; 703 break; 704 } 705 sigdelset(&p->p_sig, sig); 706 sigdelset(&p->p_extsig, sig); 707 sigdelq(p, NULL, sig); 708 } else { 709 /* no signal was found */ 710 break; 711 } 712 } 713 714 if (sig == 0) { /* no signal was found */ 715 if (p->p_flag & (SEXITLWPS|SKILLED)) { 716 lwp->lwp_cursig = SIGKILL; 717 sig = SIGKILL; 718 ext = (p->p_flag & SEXTKILLED) != 0; 719 } 720 break; 721 } 722 723 /* 724 * If we have been informed not to stop (i.e., we are being 725 * called from within a network operation), then don't promote 726 * the signal at this time, just return the signal number. 727 * We will call issig() again later when it is safe. 728 * 729 * fsig() does not return a jobcontrol stopping signal 730 * with a default action of stopping the process if 731 * lwp_nostop is set, so we won't be causing a bogus 732 * EINTR by this action. (Such a signal is eaten by 733 * isjobstop() when we loop around to do final checks.) 734 */ 735 if (lwp->lwp_nostop) { 736 nostop_break = 1; 737 break; 738 } 739 740 /* 741 * Promote the signal from pending to current. 742 * 743 * Note that sigdeq() will set lwp->lwp_curinfo to NULL 744 * if no siginfo_t exists for this signal. 745 */ 746 lwp->lwp_cursig = (uchar_t)sig; 747 lwp->lwp_extsig = (uchar_t)ext; 748 t->t_sig_check = 1; /* so post_syscall will see signal */ 749 ASSERT(lwp->lwp_curinfo == NULL); 750 sigdeq(p, toproc ? NULL : t, sig, &lwp->lwp_curinfo); 751 752 if (tracing(p, sig)) 753 stop(PR_SIGNALLED, sig); 754 755 /* 756 * Loop around to check for requested stop before 757 * performing the usual current-signal actions. 758 */ 759 } 760 761 mutex_exit(&p->p_lock); 762 763 /* 764 * If SIGCLD was dequeued, search for other pending SIGCLD's. 765 * Don't do it if we are returning SIGCLD and the signal 766 * handler will be reset by psig(); this enables reliable 767 * delivery of SIGCLD even when using the old, broken 768 * signal() interface for setting the signal handler. 769 */ 770 if (sigcld_found && 771 (sig != SIGCLD || !sigismember(&u.u_sigresethand, SIGCLD))) 772 sigcld_repost(); 773 774 if (sig != 0) 775 (void) undo_watch_step(NULL); 776 777 /* 778 * If we have been blocked since the p_lock was dropped off 779 * above, then this promoted signal might have been handled 780 * already when we were on the way back from sleep queue, so 781 * just ignore it. 782 * If we have been informed not to stop, just return the signal 783 * number. Also see comments above. 784 */ 785 if (!nostop_break) { 786 sig = lwp->lwp_cursig; 787 } 788 789 return (sig != 0); 790 } 791 792 /* 793 * Return true if the process is currently stopped showing PR_JOBCONTROL. 794 * This is true only if all of the process's lwp's are so stopped. 795 * If this is asked by one of the lwps in the process, exclude that lwp. 796 */ 797 int 798 jobstopped(proc_t *p) 799 { 800 kthread_t *t; 801 802 ASSERT(MUTEX_HELD(&p->p_lock)); 803 804 if ((t = p->p_tlist) == NULL) 805 return (0); 806 807 do { 808 thread_lock(t); 809 /* ignore current, zombie and suspended lwps in the test */ 810 if (!(t == curthread || t->t_state == TS_ZOMB || 811 SUSPENDED(t)) && 812 (t->t_state != TS_STOPPED || 813 t->t_whystop != PR_JOBCONTROL)) { 814 thread_unlock(t); 815 return (0); 816 } 817 thread_unlock(t); 818 } while ((t = t->t_forw) != p->p_tlist); 819 820 return (1); 821 } 822 823 /* 824 * Put ourself (curthread) into the stopped state and notify tracers. 825 */ 826 void 827 stop(int why, int what) 828 { 829 kthread_t *t = curthread; 830 proc_t *p = ttoproc(t); 831 klwp_t *lwp = ttolwp(t); 832 kthread_t *tx; 833 lwpent_t *lep; 834 int procstop; 835 int flags = TS_ALLSTART; 836 hrtime_t stoptime; 837 838 /* 839 * Can't stop a system process. 840 */ 841 if (p == NULL || lwp == NULL || (p->p_flag & SSYS) || p->p_as == &kas) 842 return; 843 844 ASSERT(MUTEX_HELD(&p->p_lock)); 845 846 if (why != PR_SUSPENDED && why != PR_CHECKPOINT) { 847 /* 848 * Don't stop an lwp with SIGKILL pending. 849 * Don't stop if the process or lwp is exiting. 850 */ 851 if (lwp->lwp_cursig == SIGKILL || 852 sigismember(&t->t_sig, SIGKILL) || 853 sigismember(&p->p_sig, SIGKILL) || 854 (t->t_proc_flag & TP_LWPEXIT) || 855 (p->p_flag & (SEXITLWPS|SKILLED))) { 856 p->p_stopsig = 0; 857 t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP); 858 return; 859 } 860 } 861 862 /* 863 * Make sure we don't deadlock on a recursive call to prstop(). 864 * prstop() sets the lwp_nostop_r flag and increments lwp_nostop. 865 */ 866 if (lwp->lwp_nostop_r || 867 (lwp->lwp_nostop && 868 (why != PR_SUSPENDED || what != SUSPEND_NORMAL))) 869 return; 870 871 /* 872 * Make sure the lwp is in an orderly state for inspection 873 * by a debugger through /proc or for dumping via core(). 874 */ 875 schedctl_finish_sigblock(t); 876 t->t_proc_flag |= TP_STOPPING; /* must set before dropping p_lock */ 877 mutex_exit(&p->p_lock); 878 stoptime = gethrtime(); 879 prstop(why, what); 880 (void) undo_watch_step(NULL); 881 mutex_enter(&p->p_lock); 882 ASSERT(t->t_state == TS_ONPROC); 883 884 switch (why) { 885 case PR_CHECKPOINT: 886 /* 887 * The situation may have changed since we dropped 888 * and reacquired p->p_lock. Double-check now 889 * whether we should stop or not. 890 */ 891 if (!(t->t_proc_flag & TP_CHKPT)) { 892 t->t_proc_flag &= ~TP_STOPPING; 893 return; 894 } 895 t->t_proc_flag &= ~TP_CHKPT; 896 flags &= ~TS_RESUME; 897 break; 898 899 case PR_JOBCONTROL: 900 ASSERT(what == SIGSTOP || what == SIGTSTP || 901 what == SIGTTIN || what == SIGTTOU); 902 flags &= ~TS_XSTART; 903 break; 904 905 case PR_SUSPENDED: 906 ASSERT(what == SUSPEND_NORMAL || what == SUSPEND_PAUSE); 907 /* 908 * The situation may have changed since we dropped 909 * and reacquired p->p_lock. Double-check now 910 * whether we should stop or not. 911 */ 912 if (what == SUSPEND_PAUSE) { 913 if (!(t->t_proc_flag & TP_PAUSE)) { 914 t->t_proc_flag &= ~TP_STOPPING; 915 return; 916 } 917 flags &= ~TS_UNPAUSE; 918 } else { 919 if (!((t->t_proc_flag & TP_HOLDLWP) || 920 (p->p_flag & (SHOLDFORK|SHOLDFORK1|SHOLDWATCH)))) { 921 t->t_proc_flag &= ~TP_STOPPING; 922 return; 923 } 924 /* 925 * If SHOLDFORK is in effect and we are stopping 926 * while asleep (not at the top of the stack), 927 * we return now to allow the hold to take effect 928 * when we reach the top of the kernel stack. 929 */ 930 if (lwp->lwp_asleep && (p->p_flag & SHOLDFORK)) { 931 t->t_proc_flag &= ~TP_STOPPING; 932 return; 933 } 934 flags &= ~TS_CSTART; 935 } 936 break; 937 938 default: /* /proc stop */ 939 flags &= ~TS_PSTART; 940 /* 941 * Do synchronous stop unless the async-stop flag is set. 942 * If why is PR_REQUESTED and t->t_dtrace_stop flag is set, 943 * then no debugger is present and we also do synchronous stop. 944 */ 945 if ((why != PR_REQUESTED || t->t_dtrace_stop) && 946 !(p->p_proc_flag & P_PR_ASYNC)) { 947 int notify; 948 949 for (tx = t->t_forw; tx != t; tx = tx->t_forw) { 950 notify = 0; 951 thread_lock(tx); 952 if (ISTOPPED(tx) || 953 (tx->t_proc_flag & TP_PRSTOP)) { 954 thread_unlock(tx); 955 continue; 956 } 957 tx->t_proc_flag |= TP_PRSTOP; 958 tx->t_sig_check = 1; 959 if (tx->t_state == TS_SLEEP && 960 (tx->t_flag & T_WAKEABLE)) { 961 /* 962 * Don't actually wake it up if it's 963 * in one of the lwp_*() syscalls. 964 * Mark it virtually stopped and 965 * notify /proc waiters (below). 966 */ 967 if (tx->t_wchan0 == NULL) 968 setrun_locked(tx); 969 else { 970 tx->t_proc_flag |= TP_PRVSTOP; 971 tx->t_stoptime = stoptime; 972 notify = 1; 973 } 974 } 975 /* 976 * force the thread into the kernel 977 * if it is not already there. 978 */ 979 if (tx->t_state == TS_ONPROC && 980 tx->t_cpu != CPU) 981 poke_cpu(tx->t_cpu->cpu_id); 982 thread_unlock(tx); 983 lep = p->p_lwpdir[tx->t_dslot].ld_entry; 984 if (notify && lep->le_trace) 985 prnotify(lep->le_trace); 986 } 987 /* 988 * We do this just in case one of the threads we asked 989 * to stop is in holdlwps() (called from cfork()) or 990 * lwp_suspend(). 991 */ 992 cv_broadcast(&p->p_holdlwps); 993 } 994 break; 995 } 996 997 t->t_stoptime = stoptime; 998 999 if (why == PR_JOBCONTROL || (why == PR_SUSPENDED && p->p_stopsig)) { 1000 /* 1001 * Determine if the whole process is jobstopped. 1002 */ 1003 if (jobstopped(p)) { 1004 sigqueue_t *sqp; 1005 int sig; 1006 1007 if ((sig = p->p_stopsig) == 0) 1008 p->p_stopsig = (uchar_t)(sig = what); 1009 mutex_exit(&p->p_lock); 1010 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 1011 mutex_enter(&pidlock); 1012 /* 1013 * The last lwp to stop notifies the parent. 1014 * Turn off the CLDCONT flag now so the first 1015 * lwp to continue knows what to do. 1016 */ 1017 p->p_pidflag &= ~CLDCONT; 1018 p->p_wcode = CLD_STOPPED; 1019 p->p_wdata = sig; 1020 sigcld(p, sqp); 1021 /* 1022 * Grab p->p_lock before releasing pidlock so the 1023 * parent and the child don't have a race condition. 1024 */ 1025 mutex_enter(&p->p_lock); 1026 mutex_exit(&pidlock); 1027 p->p_stopsig = 0; 1028 } else if (why == PR_JOBCONTROL && p->p_stopsig == 0) { 1029 /* 1030 * Set p->p_stopsig and wake up sleeping lwps 1031 * so they will stop in sympathy with this lwp. 1032 */ 1033 p->p_stopsig = (uchar_t)what; 1034 pokelwps(p); 1035 /* 1036 * We do this just in case one of the threads we asked 1037 * to stop is in holdlwps() (called from cfork()) or 1038 * lwp_suspend(). 1039 */ 1040 cv_broadcast(&p->p_holdlwps); 1041 } 1042 } 1043 1044 if (why != PR_JOBCONTROL && why != PR_CHECKPOINT) { 1045 /* 1046 * Do process-level notification when all lwps are 1047 * either stopped on events of interest to /proc 1048 * or are stopped showing PR_SUSPENDED or are zombies. 1049 */ 1050 procstop = 1; 1051 for (tx = t->t_forw; procstop && tx != t; tx = tx->t_forw) { 1052 if (VSTOPPED(tx)) 1053 continue; 1054 thread_lock(tx); 1055 switch (tx->t_state) { 1056 case TS_ZOMB: 1057 break; 1058 case TS_STOPPED: 1059 /* neither ISTOPPED nor SUSPENDED? */ 1060 if ((tx->t_schedflag & 1061 (TS_CSTART | TS_UNPAUSE | TS_PSTART)) == 1062 (TS_CSTART | TS_UNPAUSE | TS_PSTART)) 1063 procstop = 0; 1064 break; 1065 case TS_SLEEP: 1066 /* not paused for watchpoints? */ 1067 if (!(tx->t_flag & T_WAKEABLE) || 1068 tx->t_wchan0 == NULL || 1069 !(tx->t_proc_flag & TP_PAUSE)) 1070 procstop = 0; 1071 break; 1072 default: 1073 procstop = 0; 1074 break; 1075 } 1076 thread_unlock(tx); 1077 } 1078 if (procstop) { 1079 /* there must not be any remapped watched pages now */ 1080 ASSERT(p->p_mapcnt == 0); 1081 if (p->p_proc_flag & P_PR_PTRACE) { 1082 /* ptrace() compatibility */ 1083 mutex_exit(&p->p_lock); 1084 mutex_enter(&pidlock); 1085 p->p_wcode = CLD_TRAPPED; 1086 p->p_wdata = (why == PR_SIGNALLED)? 1087 what : SIGTRAP; 1088 cv_broadcast(&p->p_parent->p_cv); 1089 /* 1090 * Grab p->p_lock before releasing pidlock so 1091 * parent and child don't have a race condition. 1092 */ 1093 mutex_enter(&p->p_lock); 1094 mutex_exit(&pidlock); 1095 } 1096 if (p->p_trace) /* /proc */ 1097 prnotify(p->p_trace); 1098 cv_broadcast(&pr_pid_cv[p->p_slot]); /* pauselwps() */ 1099 cv_broadcast(&p->p_holdlwps); /* holdwatch() */ 1100 } 1101 if (why != PR_SUSPENDED) { 1102 lep = p->p_lwpdir[t->t_dslot].ld_entry; 1103 if (lep->le_trace) /* /proc */ 1104 prnotify(lep->le_trace); 1105 /* 1106 * Special notification for creation of the agent lwp. 1107 */ 1108 if (t == p->p_agenttp && 1109 (t->t_proc_flag & TP_PRSTOP) && 1110 p->p_trace) 1111 prnotify(p->p_trace); 1112 /* 1113 * The situation may have changed since we dropped 1114 * and reacquired p->p_lock. Double-check now 1115 * whether we should stop or not. 1116 */ 1117 if (!(t->t_proc_flag & TP_STOPPING)) { 1118 if (t->t_proc_flag & TP_PRSTOP) 1119 t->t_proc_flag |= TP_STOPPING; 1120 } 1121 t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP); 1122 prnostep(lwp); 1123 } 1124 } 1125 1126 if (why == PR_SUSPENDED) { 1127 1128 /* 1129 * We always broadcast in the case of SUSPEND_PAUSE. This is 1130 * because checks for TP_PAUSE take precedence over checks for 1131 * SHOLDWATCH. If a thread is trying to stop because of 1132 * SUSPEND_PAUSE and tries to do a holdwatch(), it will be 1133 * waiting for the rest of the threads to enter a stopped state. 1134 * If we are stopping for a SUSPEND_PAUSE, we may be the last 1135 * lwp and not know it, so broadcast just in case. 1136 */ 1137 if (what == SUSPEND_PAUSE || 1138 --p->p_lwprcnt == 0 || (t->t_proc_flag & TP_HOLDLWP)) 1139 cv_broadcast(&p->p_holdlwps); 1140 1141 } 1142 1143 /* 1144 * Need to do this here (rather than after the thread is officially 1145 * stopped) because we can't call mutex_enter from a stopped thread. 1146 */ 1147 if (why == PR_CHECKPOINT) 1148 del_one_utstop(); 1149 1150 thread_lock(t); 1151 ASSERT((t->t_schedflag & TS_ALLSTART) == 0); 1152 t->t_schedflag |= flags; 1153 t->t_whystop = (short)why; 1154 t->t_whatstop = (short)what; 1155 CL_STOP(t, why, what); 1156 (void) new_mstate(t, LMS_STOPPED); 1157 thread_stop(t); /* set stop state and drop lock */ 1158 1159 if (why != PR_SUSPENDED && why != PR_CHECKPOINT) { 1160 /* 1161 * We may have gotten a SIGKILL or a SIGCONT when 1162 * we released p->p_lock; make one last check. 1163 * Also check for a /proc run-on-last-close. 1164 */ 1165 if (sigismember(&t->t_sig, SIGKILL) || 1166 sigismember(&p->p_sig, SIGKILL) || 1167 (t->t_proc_flag & TP_LWPEXIT) || 1168 (p->p_flag & (SEXITLWPS|SKILLED))) { 1169 p->p_stopsig = 0; 1170 thread_lock(t); 1171 t->t_schedflag |= TS_XSTART | TS_PSTART; 1172 setrun_locked(t); 1173 thread_unlock_nopreempt(t); 1174 } else if (why == PR_JOBCONTROL) { 1175 if (p->p_flag & SSCONT) { 1176 /* 1177 * This resulted from a SIGCONT posted 1178 * while we were not holding p->p_lock. 1179 */ 1180 p->p_stopsig = 0; 1181 thread_lock(t); 1182 t->t_schedflag |= TS_XSTART; 1183 setrun_locked(t); 1184 thread_unlock_nopreempt(t); 1185 } 1186 } else if (!(t->t_proc_flag & TP_STOPPING)) { 1187 /* 1188 * This resulted from a /proc run-on-last-close. 1189 */ 1190 thread_lock(t); 1191 t->t_schedflag |= TS_PSTART; 1192 setrun_locked(t); 1193 thread_unlock_nopreempt(t); 1194 } 1195 } 1196 1197 t->t_proc_flag &= ~TP_STOPPING; 1198 mutex_exit(&p->p_lock); 1199 1200 swtch(); 1201 setallwatch(); /* reestablish any watchpoints set while stopped */ 1202 mutex_enter(&p->p_lock); 1203 prbarrier(p); /* barrier against /proc locking */ 1204 } 1205 1206 /* Interface for resetting user thread stop count. */ 1207 void 1208 utstop_init(void) 1209 { 1210 mutex_enter(&thread_stop_lock); 1211 num_utstop = 0; 1212 mutex_exit(&thread_stop_lock); 1213 } 1214 1215 /* Interface for registering a user thread stop request. */ 1216 void 1217 add_one_utstop(void) 1218 { 1219 mutex_enter(&thread_stop_lock); 1220 num_utstop++; 1221 mutex_exit(&thread_stop_lock); 1222 } 1223 1224 /* Interface for cancelling a user thread stop request */ 1225 void 1226 del_one_utstop(void) 1227 { 1228 mutex_enter(&thread_stop_lock); 1229 num_utstop--; 1230 if (num_utstop == 0) 1231 cv_broadcast(&utstop_cv); 1232 mutex_exit(&thread_stop_lock); 1233 } 1234 1235 /* Interface to wait for all user threads to be stopped */ 1236 void 1237 utstop_timedwait(clock_t ticks) 1238 { 1239 mutex_enter(&thread_stop_lock); 1240 if (num_utstop > 0) 1241 (void) cv_timedwait(&utstop_cv, &thread_stop_lock, 1242 ticks + lbolt); 1243 mutex_exit(&thread_stop_lock); 1244 } 1245 1246 /* 1247 * Perform the action specified by the current signal. 1248 * The usual sequence is: 1249 * if (issig()) 1250 * psig(); 1251 * The signal bit has already been cleared by issig(), 1252 * the current signal number has been stored in lwp_cursig, 1253 * and the current siginfo is now referenced by lwp_curinfo. 1254 */ 1255 void 1256 psig(void) 1257 { 1258 kthread_t *t = curthread; 1259 proc_t *p = ttoproc(t); 1260 klwp_t *lwp = ttolwp(t); 1261 void (*func)(); 1262 int sig, rc, code, ext; 1263 pid_t pid = -1; 1264 id_t ctid = 0; 1265 zoneid_t zoneid = -1; 1266 sigqueue_t *sqp = NULL; 1267 1268 mutex_enter(&p->p_lock); 1269 schedctl_finish_sigblock(t); 1270 code = CLD_KILLED; 1271 1272 if (p->p_flag & SEXITLWPS) { 1273 lwp_exit(); 1274 return; /* not reached */ 1275 } 1276 sig = lwp->lwp_cursig; 1277 ext = lwp->lwp_extsig; 1278 1279 ASSERT(sig < NSIG); 1280 1281 /* 1282 * Re-check lwp_cursig after we acquire p_lock. Since p_lock was 1283 * dropped between issig() and psig(), a debugger may have cleared 1284 * lwp_cursig via /proc in the intervening window. 1285 */ 1286 if (sig == 0) { 1287 if (lwp->lwp_curinfo) { 1288 siginfofree(lwp->lwp_curinfo); 1289 lwp->lwp_curinfo = NULL; 1290 } 1291 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */ 1292 t->t_flag &= ~T_TOMASK; 1293 t->t_hold = lwp->lwp_sigoldmask; 1294 } 1295 mutex_exit(&p->p_lock); 1296 return; 1297 } 1298 func = u.u_signal[sig-1]; 1299 1300 /* 1301 * The signal disposition could have changed since we promoted 1302 * this signal from pending to current (we dropped p->p_lock). 1303 * This can happen only in a multi-threaded process. 1304 */ 1305 if (sigismember(&p->p_ignore, sig) || 1306 (func == SIG_DFL && sigismember(&stopdefault, sig))) { 1307 lwp->lwp_cursig = 0; 1308 lwp->lwp_extsig = 0; 1309 if (lwp->lwp_curinfo) { 1310 siginfofree(lwp->lwp_curinfo); 1311 lwp->lwp_curinfo = NULL; 1312 } 1313 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */ 1314 t->t_flag &= ~T_TOMASK; 1315 t->t_hold = lwp->lwp_sigoldmask; 1316 } 1317 mutex_exit(&p->p_lock); 1318 return; 1319 } 1320 1321 /* 1322 * We check lwp_curinfo first since pr_setsig can actually 1323 * stuff a sigqueue_t there for SIGKILL. 1324 */ 1325 if (lwp->lwp_curinfo) { 1326 sqp = lwp->lwp_curinfo; 1327 } else if (sig == SIGKILL && p->p_killsqp) { 1328 sqp = p->p_killsqp; 1329 } 1330 1331 if (sqp != NULL) { 1332 if (SI_FROMUSER(&sqp->sq_info)) { 1333 pid = sqp->sq_info.si_pid; 1334 ctid = sqp->sq_info.si_ctid; 1335 zoneid = sqp->sq_info.si_zoneid; 1336 } 1337 /* 1338 * If we have a sigqueue_t, its sq_external value 1339 * trumps the lwp_extsig value. It is theoretically 1340 * possible to make lwp_extsig reflect reality, but it 1341 * would unnecessarily complicate things elsewhere. 1342 */ 1343 ext = sqp->sq_external; 1344 } 1345 1346 if (func == SIG_DFL) { 1347 mutex_exit(&p->p_lock); 1348 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *, 1349 NULL, void (*)(void), func); 1350 } else { 1351 k_siginfo_t *sip = NULL; 1352 1353 /* 1354 * If DTrace user-land tracing is active, give DTrace a 1355 * chance to defer the signal until after tracing is 1356 * complete. 1357 */ 1358 if (t->t_dtrace_on && dtrace_safe_defer_signal()) { 1359 mutex_exit(&p->p_lock); 1360 return; 1361 } 1362 1363 /* 1364 * save siginfo pointer here, in case the 1365 * the signal's reset bit is on 1366 * 1367 * The presence of a current signal prevents paging 1368 * from succeeding over a network. We copy the current 1369 * signal information to the side and cancel the current 1370 * signal so that sendsig() will succeed. 1371 */ 1372 if (sigismember(&p->p_siginfo, sig)) { 1373 if (sqp) { 1374 bcopy(&sqp->sq_info, &lwp->lwp_siginfo, 1375 sizeof (k_siginfo_t)); 1376 sip = &lwp->lwp_siginfo; 1377 } else if (sig == SIGPROF && 1378 t->t_rprof != NULL && 1379 t->t_rprof->rp_anystate && 1380 lwp->lwp_siginfo.si_signo == SIGPROF) { 1381 sip = &lwp->lwp_siginfo; 1382 } 1383 } 1384 1385 if (t->t_flag & T_TOMASK) 1386 t->t_flag &= ~T_TOMASK; 1387 else 1388 lwp->lwp_sigoldmask = t->t_hold; 1389 sigorset(&t->t_hold, &u.u_sigmask[sig-1]); 1390 if (!sigismember(&u.u_signodefer, sig)) 1391 sigaddset(&t->t_hold, sig); 1392 if (sigismember(&u.u_sigresethand, sig)) 1393 setsigact(sig, SIG_DFL, nullsmask, 0); 1394 1395 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *, 1396 sip, void (*)(void), func); 1397 1398 lwp->lwp_cursig = 0; 1399 lwp->lwp_extsig = 0; 1400 if (lwp->lwp_curinfo) { 1401 /* p->p_killsqp is freed by freeproc */ 1402 siginfofree(lwp->lwp_curinfo); 1403 lwp->lwp_curinfo = NULL; 1404 } 1405 mutex_exit(&p->p_lock); 1406 lwp->lwp_ru.nsignals++; 1407 1408 if (p->p_model == DATAMODEL_NATIVE) 1409 rc = sendsig(sig, sip, func); 1410 #ifdef _SYSCALL32_IMPL 1411 else 1412 rc = sendsig32(sig, sip, func); 1413 #endif /* _SYSCALL32_IMPL */ 1414 if (rc) 1415 return; 1416 sig = lwp->lwp_cursig = SIGSEGV; 1417 ext = 0; /* lwp_extsig was set above */ 1418 pid = -1; 1419 ctid = 0; 1420 } 1421 1422 if (sigismember(&coredefault, sig)) { 1423 /* 1424 * Terminate all LWPs but don't discard them. 1425 * If another lwp beat us to the punch by calling exit(), 1426 * evaporate now. 1427 */ 1428 proc_is_exiting(p); 1429 if (exitlwps(1) != 0) { 1430 mutex_enter(&p->p_lock); 1431 lwp_exit(); 1432 } 1433 /* if we got a SIGKILL from anywhere, no core dump */ 1434 if (p->p_flag & SKILLED) { 1435 sig = SIGKILL; 1436 ext = (p->p_flag & SEXTKILLED) != 0; 1437 } else { 1438 #ifdef C2_AUDIT 1439 if (audit_active) /* audit core dump */ 1440 audit_core_start(sig); 1441 #endif 1442 if (core(sig, ext) == 0) 1443 code = CLD_DUMPED; 1444 #ifdef C2_AUDIT 1445 if (audit_active) /* audit core dump */ 1446 audit_core_finish(code); 1447 #endif 1448 } 1449 } 1450 if (ext) 1451 contract_process_sig(p->p_ct_process, p, sig, pid, ctid, 1452 zoneid); 1453 1454 exit(code, sig); 1455 } 1456 1457 /* 1458 * Find next unheld signal in ssp for thread t. 1459 */ 1460 int 1461 fsig(k_sigset_t *ssp, kthread_t *t) 1462 { 1463 proc_t *p = ttoproc(t); 1464 user_t *up = PTOU(p); 1465 int i; 1466 k_sigset_t temp; 1467 1468 ASSERT(MUTEX_HELD(&p->p_lock)); 1469 1470 /* 1471 * Don't promote any signals for the parent of a vfork()d 1472 * child that hasn't yet released the parent's memory. 1473 */ 1474 if (p->p_flag & SVFWAIT) 1475 return (0); 1476 1477 temp = *ssp; 1478 sigdiffset(&temp, &t->t_hold); 1479 1480 /* 1481 * Don't promote stopping signals (except SIGSTOP) for a child 1482 * of vfork() that hasn't yet released the parent's memory. 1483 */ 1484 if (p->p_flag & SVFORK) 1485 sigdiffset(&temp, &holdvfork); 1486 1487 /* 1488 * Don't promote a signal that will stop 1489 * the process when lwp_nostop is set. 1490 */ 1491 if (ttolwp(t)->lwp_nostop) { 1492 sigdelset(&temp, SIGSTOP); 1493 if (!p->p_pgidp->pid_pgorphaned) { 1494 if (up->u_signal[SIGTSTP-1] == SIG_DFL) 1495 sigdelset(&temp, SIGTSTP); 1496 if (up->u_signal[SIGTTIN-1] == SIG_DFL) 1497 sigdelset(&temp, SIGTTIN); 1498 if (up->u_signal[SIGTTOU-1] == SIG_DFL) 1499 sigdelset(&temp, SIGTTOU); 1500 } 1501 } 1502 1503 /* 1504 * Choose SIGKILL and SIGPROF before all other pending signals. 1505 * The rest are promoted in signal number order. 1506 */ 1507 if (sigismember(&temp, SIGKILL)) 1508 return (SIGKILL); 1509 if (sigismember(&temp, SIGPROF)) 1510 return (SIGPROF); 1511 1512 for (i = 0; i < sizeof (temp) / sizeof (temp.__sigbits[0]); i++) { 1513 if (temp.__sigbits[i]) 1514 return ((i * NBBY * sizeof (temp.__sigbits[0])) + 1515 lowbit(temp.__sigbits[i])); 1516 } 1517 1518 return (0); 1519 } 1520 1521 void 1522 setsigact(int sig, void (*disp)(), k_sigset_t mask, int flags) 1523 { 1524 proc_t *p = ttoproc(curthread); 1525 kthread_t *t; 1526 1527 ASSERT(MUTEX_HELD(&p->p_lock)); 1528 1529 u.u_signal[sig - 1] = disp; 1530 1531 /* 1532 * Honor the SA_SIGINFO flag if the signal is being caught. 1533 * Force the SA_SIGINFO flag if the signal is not being caught. 1534 * This is necessary to make sigqueue() and sigwaitinfo() work 1535 * properly together when the signal is set to default or is 1536 * being temporarily ignored. 1537 */ 1538 if ((flags & SA_SIGINFO) || disp == SIG_DFL || disp == SIG_IGN) 1539 sigaddset(&p->p_siginfo, sig); 1540 else 1541 sigdelset(&p->p_siginfo, sig); 1542 1543 if (disp != SIG_DFL && disp != SIG_IGN) { 1544 sigdelset(&p->p_ignore, sig); 1545 u.u_sigmask[sig - 1] = mask; 1546 if (!sigismember(&cantreset, sig)) { 1547 if (flags & SA_RESETHAND) 1548 sigaddset(&u.u_sigresethand, sig); 1549 else 1550 sigdelset(&u.u_sigresethand, sig); 1551 } 1552 if (flags & SA_NODEFER) 1553 sigaddset(&u.u_signodefer, sig); 1554 else 1555 sigdelset(&u.u_signodefer, sig); 1556 if (flags & SA_RESTART) 1557 sigaddset(&u.u_sigrestart, sig); 1558 else 1559 sigdelset(&u.u_sigrestart, sig); 1560 if (flags & SA_ONSTACK) 1561 sigaddset(&u.u_sigonstack, sig); 1562 else 1563 sigdelset(&u.u_sigonstack, sig); 1564 1565 } else if (disp == SIG_IGN || 1566 (disp == SIG_DFL && sigismember(&ignoredefault, sig))) { 1567 /* 1568 * Setting the signal action to SIG_IGN results in the 1569 * discarding of all pending signals of that signal number. 1570 * Setting the signal action to SIG_DFL does the same *only* 1571 * if the signal's default behavior is to be ignored. 1572 */ 1573 sigaddset(&p->p_ignore, sig); 1574 sigdelset(&p->p_sig, sig); 1575 sigdelset(&p->p_extsig, sig); 1576 sigdelq(p, NULL, sig); 1577 t = p->p_tlist; 1578 do { 1579 sigdelset(&t->t_sig, sig); 1580 sigdelset(&t->t_extsig, sig); 1581 sigdelq(p, t, sig); 1582 } while ((t = t->t_forw) != p->p_tlist); 1583 1584 } else { 1585 /* 1586 * The signal action is being set to SIG_DFL and the default 1587 * behavior is to do something: make sure it is not ignored. 1588 */ 1589 sigdelset(&p->p_ignore, sig); 1590 } 1591 1592 if (sig == SIGCLD) { 1593 if (flags & SA_NOCLDWAIT) 1594 p->p_flag |= SNOWAIT; 1595 else 1596 p->p_flag &= ~SNOWAIT; 1597 1598 if (flags & SA_NOCLDSTOP) 1599 p->p_flag &= ~SJCTL; 1600 else 1601 p->p_flag |= SJCTL; 1602 1603 if (p->p_flag & SNOWAIT || disp == SIG_IGN) { 1604 proc_t *cp, *tp; 1605 1606 mutex_exit(&p->p_lock); 1607 mutex_enter(&pidlock); 1608 for (cp = p->p_child; cp != NULL; cp = tp) { 1609 tp = cp->p_sibling; 1610 if (cp->p_stat == SZOMB) 1611 freeproc(cp); 1612 } 1613 mutex_exit(&pidlock); 1614 mutex_enter(&p->p_lock); 1615 } 1616 } 1617 } 1618 1619 /* 1620 * Set all signal actions not already set to SIG_DFL or SIG_IGN to SIG_DFL. 1621 * Called from exec_common() for a process undergoing execve() 1622 * and from cfork() for a newly-created child of vfork(). 1623 * In the vfork() case, 'p' is not the current process. 1624 * In both cases, there is only one thread in the process. 1625 */ 1626 void 1627 sigdefault(proc_t *p) 1628 { 1629 kthread_t *t = p->p_tlist; 1630 struct user *up = PTOU(p); 1631 int sig; 1632 1633 ASSERT(MUTEX_HELD(&p->p_lock)); 1634 1635 for (sig = 1; sig < NSIG; sig++) { 1636 if (up->u_signal[sig - 1] != SIG_DFL && 1637 up->u_signal[sig - 1] != SIG_IGN) { 1638 up->u_signal[sig - 1] = SIG_DFL; 1639 sigemptyset(&up->u_sigmask[sig - 1]); 1640 if (sigismember(&ignoredefault, sig)) { 1641 sigdelq(p, NULL, sig); 1642 sigdelq(p, t, sig); 1643 } 1644 if (sig == SIGCLD) 1645 p->p_flag &= ~(SNOWAIT|SJCTL); 1646 } 1647 } 1648 sigorset(&p->p_ignore, &ignoredefault); 1649 sigfillset(&p->p_siginfo); 1650 sigdiffset(&p->p_siginfo, &cantmask); 1651 sigdiffset(&p->p_sig, &ignoredefault); 1652 sigdiffset(&p->p_extsig, &ignoredefault); 1653 sigdiffset(&t->t_sig, &ignoredefault); 1654 sigdiffset(&t->t_extsig, &ignoredefault); 1655 } 1656 1657 void 1658 sigcld(proc_t *cp, sigqueue_t *sqp) 1659 { 1660 proc_t *pp = cp->p_parent; 1661 1662 ASSERT(MUTEX_HELD(&pidlock)); 1663 1664 switch (cp->p_wcode) { 1665 case CLD_EXITED: 1666 case CLD_DUMPED: 1667 case CLD_KILLED: 1668 ASSERT(cp->p_stat == SZOMB); 1669 /* 1670 * The broadcast on p_srwchan_cv is a kludge to 1671 * wakeup a possible thread in uadmin(A_SHUTDOWN). 1672 */ 1673 cv_broadcast(&cp->p_srwchan_cv); 1674 1675 /* 1676 * Add to newstate list of the parent 1677 */ 1678 add_ns(pp, cp); 1679 1680 cv_broadcast(&pp->p_cv); 1681 if ((pp->p_flag & SNOWAIT) || 1682 (PTOU(pp)->u_signal[SIGCLD - 1] == SIG_IGN)) 1683 freeproc(cp); 1684 else { 1685 post_sigcld(cp, sqp); 1686 sqp = NULL; 1687 } 1688 break; 1689 1690 case CLD_STOPPED: 1691 case CLD_CONTINUED: 1692 cv_broadcast(&pp->p_cv); 1693 if (pp->p_flag & SJCTL) { 1694 post_sigcld(cp, sqp); 1695 sqp = NULL; 1696 } 1697 break; 1698 } 1699 1700 if (sqp) 1701 siginfofree(sqp); 1702 } 1703 1704 /* 1705 * Common code called from sigcld() and issig_forreal() 1706 * Give the parent process a SIGCLD if it does not have one pending, 1707 * else mark the child process so a SIGCLD can be posted later. 1708 */ 1709 static void 1710 post_sigcld(proc_t *cp, sigqueue_t *sqp) 1711 { 1712 proc_t *pp = cp->p_parent; 1713 void (*handler)() = PTOU(pp)->u_signal[SIGCLD - 1]; 1714 k_siginfo_t info; 1715 1716 ASSERT(MUTEX_HELD(&pidlock)); 1717 mutex_enter(&pp->p_lock); 1718 1719 /* 1720 * If a SIGCLD is pending, or if SIGCLD is not now being caught, 1721 * then just mark the child process so that its SIGCLD will 1722 * be posted later, when the first SIGCLD is taken off the 1723 * queue or when the parent is ready to receive it, if ever. 1724 */ 1725 if (handler == SIG_DFL || handler == SIG_IGN || 1726 sigismember(&pp->p_sig, SIGCLD)) 1727 cp->p_pidflag |= CLDPEND; 1728 else { 1729 cp->p_pidflag &= ~CLDPEND; 1730 if (sqp == NULL) { 1731 /* 1732 * This can only happen when the parent is init. 1733 * (See call to sigcld(q, NULL) in exit().) 1734 * Use KM_NOSLEEP to avoid deadlock. 1735 */ 1736 ASSERT(pp == proc_init); 1737 winfo(cp, &info, 0); 1738 sigaddq(pp, NULL, &info, KM_NOSLEEP); 1739 } else { 1740 winfo(cp, &sqp->sq_info, 0); 1741 sigaddqa(pp, NULL, sqp); 1742 sqp = NULL; 1743 } 1744 } 1745 1746 mutex_exit(&pp->p_lock); 1747 1748 if (sqp) 1749 siginfofree(sqp); 1750 } 1751 1752 /* 1753 * Search for a child that has a pending SIGCLD for us, the parent. 1754 * The queue of SIGCLD signals is implied by the list of children. 1755 * We post the SIGCLD signals one at a time so they don't get lost. 1756 * When one is dequeued, another is enqueued, until there are no more. 1757 */ 1758 void 1759 sigcld_repost() 1760 { 1761 proc_t *pp = curproc; 1762 proc_t *cp; 1763 void (*handler)() = PTOU(pp)->u_signal[SIGCLD - 1]; 1764 sigqueue_t *sqp; 1765 1766 /* 1767 * Don't bother if SIGCLD is not now being caught. 1768 */ 1769 if (handler == SIG_DFL || handler == SIG_IGN) 1770 return; 1771 1772 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 1773 mutex_enter(&pidlock); 1774 for (cp = pp->p_child; cp; cp = cp->p_sibling) { 1775 if (cp->p_pidflag & CLDPEND) { 1776 post_sigcld(cp, sqp); 1777 mutex_exit(&pidlock); 1778 return; 1779 } 1780 } 1781 mutex_exit(&pidlock); 1782 kmem_free(sqp, sizeof (sigqueue_t)); 1783 } 1784 1785 /* 1786 * count number of sigqueue send by sigaddqa() 1787 */ 1788 void 1789 sigqsend(int cmd, proc_t *p, kthread_t *t, sigqueue_t *sigqp) 1790 { 1791 sigqhdr_t *sqh; 1792 1793 sqh = (sigqhdr_t *)sigqp->sq_backptr; 1794 ASSERT(sqh); 1795 1796 mutex_enter(&sqh->sqb_lock); 1797 sqh->sqb_sent++; 1798 mutex_exit(&sqh->sqb_lock); 1799 1800 if (cmd == SN_SEND) 1801 sigaddqa(p, t, sigqp); 1802 else 1803 siginfofree(sigqp); 1804 } 1805 1806 int 1807 sigsendproc(proc_t *p, sigsend_t *pv) 1808 { 1809 struct cred *cr; 1810 proc_t *myprocp = curproc; 1811 1812 ASSERT(MUTEX_HELD(&pidlock)); 1813 1814 if (p->p_pid == 1 && pv->sig && sigismember(&cantmask, pv->sig)) 1815 return (EPERM); 1816 1817 cr = CRED(); 1818 1819 if (pv->checkperm == 0 || 1820 (pv->sig == SIGCONT && p->p_sessp == myprocp->p_sessp) || 1821 prochasprocperm(p, myprocp, cr)) { 1822 pv->perm++; 1823 if (pv->sig) { 1824 /* Make sure we should be setting si_pid and friends */ 1825 ASSERT(pv->sicode <= 0); 1826 if (SI_CANQUEUE(pv->sicode)) { 1827 sigqueue_t *sqp; 1828 1829 mutex_enter(&myprocp->p_lock); 1830 sqp = sigqalloc(myprocp->p_sigqhdr); 1831 mutex_exit(&myprocp->p_lock); 1832 if (sqp == NULL) 1833 return (EAGAIN); 1834 sqp->sq_info.si_signo = pv->sig; 1835 sqp->sq_info.si_code = pv->sicode; 1836 sqp->sq_info.si_pid = myprocp->p_pid; 1837 sqp->sq_info.si_ctid = PRCTID(myprocp); 1838 sqp->sq_info.si_zoneid = getzoneid(); 1839 sqp->sq_info.si_uid = crgetruid(cr); 1840 sqp->sq_info.si_value = pv->value; 1841 mutex_enter(&p->p_lock); 1842 sigqsend(SN_SEND, p, NULL, sqp); 1843 mutex_exit(&p->p_lock); 1844 } else { 1845 k_siginfo_t info; 1846 bzero(&info, sizeof (info)); 1847 info.si_signo = pv->sig; 1848 info.si_code = pv->sicode; 1849 info.si_pid = myprocp->p_pid; 1850 info.si_ctid = PRCTID(myprocp); 1851 info.si_zoneid = getzoneid(); 1852 info.si_uid = crgetruid(cr); 1853 mutex_enter(&p->p_lock); 1854 /* 1855 * XXX: Should be KM_SLEEP but 1856 * we have to avoid deadlock. 1857 */ 1858 sigaddq(p, NULL, &info, KM_NOSLEEP); 1859 mutex_exit(&p->p_lock); 1860 } 1861 } 1862 } 1863 1864 return (0); 1865 } 1866 1867 int 1868 sigsendset(procset_t *psp, sigsend_t *pv) 1869 { 1870 int error; 1871 1872 error = dotoprocs(psp, sigsendproc, (char *)pv); 1873 if (error == 0 && pv->perm == 0) 1874 return (EPERM); 1875 1876 return (error); 1877 } 1878 1879 /* 1880 * Dequeue a queued siginfo structure. 1881 * If a non-null thread pointer is passed then dequeue from 1882 * the thread queue, otherwise dequeue from the process queue. 1883 */ 1884 void 1885 sigdeq(proc_t *p, kthread_t *t, int sig, sigqueue_t **qpp) 1886 { 1887 sigqueue_t **psqp, *sqp; 1888 1889 ASSERT(MUTEX_HELD(&p->p_lock)); 1890 1891 *qpp = NULL; 1892 1893 if (t != NULL) { 1894 sigdelset(&t->t_sig, sig); 1895 sigdelset(&t->t_extsig, sig); 1896 psqp = &t->t_sigqueue; 1897 } else { 1898 sigdelset(&p->p_sig, sig); 1899 sigdelset(&p->p_extsig, sig); 1900 psqp = &p->p_sigqueue; 1901 } 1902 1903 for (;;) { 1904 if ((sqp = *psqp) == NULL) 1905 return; 1906 if (sqp->sq_info.si_signo == sig) 1907 break; 1908 else 1909 psqp = &sqp->sq_next; 1910 } 1911 *qpp = sqp; 1912 *psqp = sqp->sq_next; 1913 for (sqp = *psqp; sqp; sqp = sqp->sq_next) { 1914 if (sqp->sq_info.si_signo == sig) { 1915 if (t != (kthread_t *)NULL) { 1916 sigaddset(&t->t_sig, sig); 1917 t->t_sig_check = 1; 1918 } else { 1919 sigaddset(&p->p_sig, sig); 1920 set_proc_ast(p); 1921 } 1922 break; 1923 } 1924 } 1925 } 1926 1927 /* 1928 * Delete a queued SIGCLD siginfo structure matching the k_siginfo_t argument. 1929 */ 1930 void 1931 sigcld_delete(k_siginfo_t *ip) 1932 { 1933 proc_t *p = curproc; 1934 int another_sigcld = 0; 1935 sigqueue_t **psqp, *sqp; 1936 1937 ASSERT(ip->si_signo == SIGCLD); 1938 1939 mutex_enter(&p->p_lock); 1940 1941 if (!sigismember(&p->p_sig, SIGCLD)) { 1942 mutex_exit(&p->p_lock); 1943 return; 1944 } 1945 1946 psqp = &p->p_sigqueue; 1947 for (;;) { 1948 if ((sqp = *psqp) == NULL) { 1949 mutex_exit(&p->p_lock); 1950 return; 1951 } 1952 if (sqp->sq_info.si_signo == SIGCLD) { 1953 if (sqp->sq_info.si_pid == ip->si_pid && 1954 sqp->sq_info.si_code == ip->si_code && 1955 sqp->sq_info.si_status == ip->si_status) 1956 break; 1957 another_sigcld = 1; 1958 } 1959 psqp = &sqp->sq_next; 1960 } 1961 *psqp = sqp->sq_next; 1962 1963 siginfofree(sqp); 1964 1965 for (sqp = *psqp; !another_sigcld && sqp; sqp = sqp->sq_next) { 1966 if (sqp->sq_info.si_signo == SIGCLD) 1967 another_sigcld = 1; 1968 } 1969 1970 if (!another_sigcld) { 1971 sigdelset(&p->p_sig, SIGCLD); 1972 sigdelset(&p->p_extsig, SIGCLD); 1973 } 1974 1975 mutex_exit(&p->p_lock); 1976 } 1977 1978 /* 1979 * Delete queued siginfo structures. 1980 * If a non-null thread pointer is passed then delete from 1981 * the thread queue, otherwise delete from the process queue. 1982 */ 1983 void 1984 sigdelq(proc_t *p, kthread_t *t, int sig) 1985 { 1986 sigqueue_t **psqp, *sqp; 1987 1988 /* 1989 * We must be holding p->p_lock unless the process is 1990 * being reaped or has failed to get started on fork. 1991 */ 1992 ASSERT(MUTEX_HELD(&p->p_lock) || 1993 p->p_stat == SIDL || p->p_stat == SZOMB); 1994 1995 if (t != (kthread_t *)NULL) 1996 psqp = &t->t_sigqueue; 1997 else 1998 psqp = &p->p_sigqueue; 1999 2000 while (*psqp) { 2001 sqp = *psqp; 2002 if (sig == 0 || sqp->sq_info.si_signo == sig) { 2003 *psqp = sqp->sq_next; 2004 siginfofree(sqp); 2005 } else 2006 psqp = &sqp->sq_next; 2007 } 2008 } 2009 2010 /* 2011 * Insert a siginfo structure into a queue. 2012 * If a non-null thread pointer is passed then add to the thread queue, 2013 * otherwise add to the process queue. 2014 * 2015 * The function sigaddqins() is called with sigqueue already allocated. 2016 * It is called from sigaddqa() and sigaddq() below. 2017 * 2018 * The value of si_code implicitly indicates whether sigp is to be 2019 * explicitly queued, or to be queued to depth one. 2020 */ 2021 static void 2022 sigaddqins(proc_t *p, kthread_t *t, sigqueue_t *sigqp) 2023 { 2024 sigqueue_t **psqp; 2025 int sig = sigqp->sq_info.si_signo; 2026 2027 sigqp->sq_external = (curproc != &p0) && 2028 (curproc->p_ct_process != p->p_ct_process); 2029 2030 /* 2031 * issig_forreal() doesn't bother dequeueing signals if SKILLED 2032 * is set, and even if it did, we would want to avoid situation 2033 * (which would be unique to SIGKILL) where one thread dequeued 2034 * the sigqueue_t and another executed psig(). So we create a 2035 * separate stash for SIGKILL's sigqueue_t. Because a second 2036 * SIGKILL can set SEXTKILLED, we overwrite the existing entry 2037 * if (and only if) it was non-extracontractual. 2038 */ 2039 if (sig == SIGKILL) { 2040 if (p->p_killsqp == NULL || !p->p_killsqp->sq_external) { 2041 if (p->p_killsqp != NULL) 2042 siginfofree(p->p_killsqp); 2043 p->p_killsqp = sigqp; 2044 sigqp->sq_next = NULL; 2045 } else { 2046 siginfofree(sigqp); 2047 } 2048 return; 2049 } 2050 2051 ASSERT(sig >= 1 && sig < NSIG); 2052 if (t != NULL) /* directed to a thread */ 2053 psqp = &t->t_sigqueue; 2054 else /* directed to a process */ 2055 psqp = &p->p_sigqueue; 2056 if (SI_CANQUEUE(sigqp->sq_info.si_code) && 2057 sigismember(&p->p_siginfo, sig)) { 2058 for (; *psqp != NULL; psqp = &(*psqp)->sq_next) 2059 ; 2060 } else { 2061 for (; *psqp != NULL; psqp = &(*psqp)->sq_next) { 2062 if ((*psqp)->sq_info.si_signo == sig) { 2063 siginfofree(sigqp); 2064 return; 2065 } 2066 } 2067 } 2068 *psqp = sigqp; 2069 sigqp->sq_next = NULL; 2070 } 2071 2072 /* 2073 * The function sigaddqa() is called with sigqueue already allocated. 2074 * If signal is ignored, discard but guarantee KILL and generation semantics. 2075 * It is called from sigqueue() and other places. 2076 */ 2077 void 2078 sigaddqa(proc_t *p, kthread_t *t, sigqueue_t *sigqp) 2079 { 2080 int sig = sigqp->sq_info.si_signo; 2081 2082 ASSERT(MUTEX_HELD(&p->p_lock)); 2083 ASSERT(sig >= 1 && sig < NSIG); 2084 2085 if (sig_discardable(p, sig)) 2086 siginfofree(sigqp); 2087 else 2088 sigaddqins(p, t, sigqp); 2089 2090 sigtoproc(p, t, sig); 2091 } 2092 2093 /* 2094 * Allocate the sigqueue_t structure and call sigaddqins(). 2095 */ 2096 void 2097 sigaddq(proc_t *p, kthread_t *t, k_siginfo_t *infop, int km_flags) 2098 { 2099 sigqueue_t *sqp; 2100 int sig = infop->si_signo; 2101 2102 ASSERT(MUTEX_HELD(&p->p_lock)); 2103 ASSERT(sig >= 1 && sig < NSIG); 2104 2105 /* 2106 * If the signal will be discarded by sigtoproc() or 2107 * if the process isn't requesting siginfo and it isn't 2108 * blocking the signal (it *could* change it's mind while 2109 * the signal is pending) then don't bother creating one. 2110 */ 2111 if (!sig_discardable(p, sig) && 2112 (sigismember(&p->p_siginfo, sig) || 2113 (curproc->p_ct_process != p->p_ct_process) || 2114 (sig == SIGCLD && SI_FROMKERNEL(infop))) && 2115 ((sqp = kmem_alloc(sizeof (sigqueue_t), km_flags)) != NULL)) { 2116 bcopy(infop, &sqp->sq_info, sizeof (k_siginfo_t)); 2117 sqp->sq_func = NULL; 2118 sqp->sq_next = NULL; 2119 sigaddqins(p, t, sqp); 2120 } 2121 sigtoproc(p, t, sig); 2122 } 2123 2124 /* 2125 * Handle stop-on-fault processing for the debugger. Returns 0 2126 * if the fault is cleared during the stop, nonzero if it isn't. 2127 */ 2128 int 2129 stop_on_fault(uint_t fault, k_siginfo_t *sip) 2130 { 2131 proc_t *p = ttoproc(curthread); 2132 klwp_t *lwp = ttolwp(curthread); 2133 2134 ASSERT(prismember(&p->p_fltmask, fault)); 2135 2136 /* 2137 * Record current fault and siginfo structure so debugger can 2138 * find it. 2139 */ 2140 mutex_enter(&p->p_lock); 2141 lwp->lwp_curflt = (uchar_t)fault; 2142 lwp->lwp_siginfo = *sip; 2143 2144 stop(PR_FAULTED, fault); 2145 2146 fault = lwp->lwp_curflt; 2147 lwp->lwp_curflt = 0; 2148 mutex_exit(&p->p_lock); 2149 return (fault); 2150 } 2151 2152 void 2153 sigorset(k_sigset_t *s1, k_sigset_t *s2) 2154 { 2155 s1->__sigbits[0] |= s2->__sigbits[0]; 2156 s1->__sigbits[1] |= s2->__sigbits[1]; 2157 } 2158 2159 void 2160 sigandset(k_sigset_t *s1, k_sigset_t *s2) 2161 { 2162 s1->__sigbits[0] &= s2->__sigbits[0]; 2163 s1->__sigbits[1] &= s2->__sigbits[1]; 2164 } 2165 2166 void 2167 sigdiffset(k_sigset_t *s1, k_sigset_t *s2) 2168 { 2169 s1->__sigbits[0] &= ~(s2->__sigbits[0]); 2170 s1->__sigbits[1] &= ~(s2->__sigbits[1]); 2171 } 2172 2173 /* 2174 * Return non-zero if curthread->t_sig_check should be set to 1, that is, 2175 * if there are any signals the thread might take on return from the kernel. 2176 * If ksigset_t's were a single word, we would do: 2177 * return (((p->p_sig | t->t_sig) & ~t->t_hold) & fillset); 2178 */ 2179 int 2180 sigcheck(proc_t *p, kthread_t *t) 2181 { 2182 sc_shared_t *tdp = t->t_schedctl; 2183 2184 /* 2185 * If signals are blocked via the schedctl interface 2186 * then we only check for the unmaskable signals. 2187 */ 2188 if (tdp != NULL && tdp->sc_sigblock) 2189 return ((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) & 2190 CANTMASK0); 2191 2192 return (((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) & 2193 ~t->t_hold.__sigbits[0]) | 2194 (((p->p_sig.__sigbits[1] | t->t_sig.__sigbits[1]) & 2195 ~t->t_hold.__sigbits[1]) & FILLSET1)); 2196 } 2197 2198 /* ONC_PLUS EXTRACT START */ 2199 void 2200 sigintr(k_sigset_t *smask, int intable) 2201 { 2202 proc_t *p; 2203 int owned; 2204 k_sigset_t lmask; /* local copy of cantmask */ 2205 klwp_t *lwp = ttolwp(curthread); 2206 2207 /* 2208 * Mask out all signals except SIGHUP, SIGINT, SIGQUIT 2209 * and SIGTERM. (Preserving the existing masks). 2210 * This function supports the -intr nfs and ufs mount option. 2211 */ 2212 2213 /* 2214 * don't do kernel threads 2215 */ 2216 if (lwp == NULL) 2217 return; 2218 2219 /* 2220 * get access to signal mask 2221 */ 2222 p = ttoproc(curthread); 2223 owned = mutex_owned(&p->p_lock); /* this is filthy */ 2224 if (!owned) 2225 mutex_enter(&p->p_lock); 2226 2227 /* 2228 * remember the current mask 2229 */ 2230 schedctl_finish_sigblock(curthread); 2231 *smask = curthread->t_hold; 2232 2233 /* 2234 * mask out all signals 2235 */ 2236 sigfillset(&curthread->t_hold); 2237 2238 /* 2239 * Unmask the non-maskable signals (e.g., KILL), as long as 2240 * they aren't already masked (which could happen at exit). 2241 * The first sigdiffset sets lmask to (cantmask & ~curhold). The 2242 * second sets the current hold mask to (~0 & ~lmask), which reduces 2243 * to (~cantmask | curhold). 2244 */ 2245 lmask = cantmask; 2246 sigdiffset(&lmask, smask); 2247 sigdiffset(&curthread->t_hold, &lmask); 2248 2249 /* 2250 * Re-enable HUP, QUIT, and TERM iff they were originally enabled 2251 * Re-enable INT if it's originally enabled and the NFS mount option 2252 * nointr is not set. 2253 */ 2254 if (!sigismember(smask, SIGHUP)) 2255 sigdelset(&curthread->t_hold, SIGHUP); 2256 if (!sigismember(smask, SIGINT) && intable) 2257 sigdelset(&curthread->t_hold, SIGINT); 2258 if (!sigismember(smask, SIGQUIT)) 2259 sigdelset(&curthread->t_hold, SIGQUIT); 2260 if (!sigismember(smask, SIGTERM)) 2261 sigdelset(&curthread->t_hold, SIGTERM); 2262 2263 /* 2264 * release access to signal mask 2265 */ 2266 if (!owned) 2267 mutex_exit(&p->p_lock); 2268 2269 /* 2270 * Indicate that this lwp is not to be stopped. 2271 */ 2272 lwp->lwp_nostop++; 2273 2274 } 2275 /* ONC_PLUS EXTRACT END */ 2276 2277 void 2278 sigunintr(k_sigset_t *smask) 2279 { 2280 proc_t *p; 2281 int owned; 2282 klwp_t *lwp = ttolwp(curthread); 2283 2284 /* 2285 * Reset previous mask (See sigintr() above) 2286 */ 2287 if (lwp != NULL) { 2288 lwp->lwp_nostop--; /* restore lwp stoppability */ 2289 p = ttoproc(curthread); 2290 owned = mutex_owned(&p->p_lock); /* this is filthy */ 2291 if (!owned) 2292 mutex_enter(&p->p_lock); 2293 curthread->t_hold = *smask; 2294 /* so unmasked signals will be seen */ 2295 curthread->t_sig_check = 1; 2296 if (!owned) 2297 mutex_exit(&p->p_lock); 2298 } 2299 } 2300 2301 void 2302 sigreplace(k_sigset_t *newmask, k_sigset_t *oldmask) 2303 { 2304 proc_t *p; 2305 int owned; 2306 /* 2307 * Save current signal mask in oldmask, then 2308 * set it to newmask. 2309 */ 2310 if (ttolwp(curthread) != NULL) { 2311 p = ttoproc(curthread); 2312 owned = mutex_owned(&p->p_lock); /* this is filthy */ 2313 if (!owned) 2314 mutex_enter(&p->p_lock); 2315 schedctl_finish_sigblock(curthread); 2316 if (oldmask != NULL) 2317 *oldmask = curthread->t_hold; 2318 curthread->t_hold = *newmask; 2319 curthread->t_sig_check = 1; 2320 if (!owned) 2321 mutex_exit(&p->p_lock); 2322 } 2323 } 2324 2325 /* 2326 * Return true if the signal number is in range 2327 * and the signal code specifies signal queueing. 2328 */ 2329 int 2330 sigwillqueue(int sig, int code) 2331 { 2332 if (sig >= 0 && sig < NSIG) { 2333 switch (code) { 2334 case SI_QUEUE: 2335 case SI_TIMER: 2336 case SI_ASYNCIO: 2337 case SI_MESGQ: 2338 return (1); 2339 } 2340 } 2341 return (0); 2342 } 2343 2344 #ifndef UCHAR_MAX 2345 #define UCHAR_MAX 255 2346 #endif 2347 2348 /* 2349 * The entire pool (with maxcount entries) is pre-allocated at 2350 * the first sigqueue/signotify call. 2351 */ 2352 sigqhdr_t * 2353 sigqhdralloc(size_t size, uint_t maxcount) 2354 { 2355 size_t i; 2356 sigqueue_t *sq, *next; 2357 sigqhdr_t *sqh; 2358 2359 i = (maxcount * size) + sizeof (sigqhdr_t); 2360 ASSERT(maxcount <= UCHAR_MAX && i <= USHRT_MAX); 2361 sqh = kmem_alloc(i, KM_SLEEP); 2362 sqh->sqb_count = (uchar_t)maxcount; 2363 sqh->sqb_maxcount = (uchar_t)maxcount; 2364 sqh->sqb_size = (ushort_t)i; 2365 sqh->sqb_pexited = 0; 2366 sqh->sqb_sent = 0; 2367 sqh->sqb_free = sq = (sigqueue_t *)(sqh + 1); 2368 for (i = maxcount - 1; i != 0; i--) { 2369 next = (sigqueue_t *)((uintptr_t)sq + size); 2370 sq->sq_next = next; 2371 sq = next; 2372 } 2373 sq->sq_next = NULL; 2374 cv_init(&sqh->sqb_cv, NULL, CV_DEFAULT, NULL); 2375 mutex_init(&sqh->sqb_lock, NULL, MUTEX_DEFAULT, NULL); 2376 return (sqh); 2377 } 2378 2379 static void sigqrel(sigqueue_t *); 2380 2381 /* 2382 * allocate a sigqueue/signotify structure from the per process 2383 * pre-allocated pool. 2384 */ 2385 sigqueue_t * 2386 sigqalloc(sigqhdr_t *sqh) 2387 { 2388 sigqueue_t *sq = NULL; 2389 2390 ASSERT(MUTEX_HELD(&curproc->p_lock)); 2391 2392 if (sqh != NULL) { 2393 mutex_enter(&sqh->sqb_lock); 2394 if (sqh->sqb_count > 0) { 2395 sqh->sqb_count--; 2396 sq = sqh->sqb_free; 2397 sqh->sqb_free = sq->sq_next; 2398 mutex_exit(&sqh->sqb_lock); 2399 bzero(&sq->sq_info, sizeof (k_siginfo_t)); 2400 sq->sq_backptr = sqh; 2401 sq->sq_func = sigqrel; 2402 sq->sq_next = NULL; 2403 sq->sq_external = 0; 2404 } else { 2405 mutex_exit(&sqh->sqb_lock); 2406 } 2407 } 2408 return (sq); 2409 } 2410 2411 /* 2412 * Return a sigqueue structure back to the pre-allocated pool. 2413 */ 2414 static void 2415 sigqrel(sigqueue_t *sq) 2416 { 2417 sigqhdr_t *sqh; 2418 2419 /* make sure that p_lock of the affected process is held */ 2420 2421 sqh = (sigqhdr_t *)sq->sq_backptr; 2422 mutex_enter(&sqh->sqb_lock); 2423 if (sqh->sqb_pexited && sqh->sqb_sent == 1) { 2424 mutex_exit(&sqh->sqb_lock); 2425 cv_destroy(&sqh->sqb_cv); 2426 mutex_destroy(&sqh->sqb_lock); 2427 kmem_free(sqh, sqh->sqb_size); 2428 } else { 2429 sqh->sqb_count++; 2430 sqh->sqb_sent--; 2431 sq->sq_next = sqh->sqb_free; 2432 sq->sq_backptr = NULL; 2433 sqh->sqb_free = sq; 2434 cv_signal(&sqh->sqb_cv); 2435 mutex_exit(&sqh->sqb_lock); 2436 } 2437 } 2438 2439 /* 2440 * Free up the pre-allocated sigqueue headers of sigqueue pool 2441 * and signotify pool, if possible. 2442 * Called only by the owning process during exec() and exit(). 2443 */ 2444 void 2445 sigqfree(proc_t *p) 2446 { 2447 ASSERT(MUTEX_HELD(&p->p_lock)); 2448 2449 if (p->p_sigqhdr != NULL) { /* sigqueue pool */ 2450 sigqhdrfree(p->p_sigqhdr); 2451 p->p_sigqhdr = NULL; 2452 } 2453 if (p->p_signhdr != NULL) { /* signotify pool */ 2454 sigqhdrfree(p->p_signhdr); 2455 p->p_signhdr = NULL; 2456 } 2457 } 2458 2459 /* 2460 * Free up the pre-allocated header and sigq pool if possible. 2461 */ 2462 void 2463 sigqhdrfree(sigqhdr_t *sqh) 2464 { 2465 mutex_enter(&sqh->sqb_lock); 2466 if (sqh->sqb_sent == 0) { 2467 mutex_exit(&sqh->sqb_lock); 2468 cv_destroy(&sqh->sqb_cv); 2469 mutex_destroy(&sqh->sqb_lock); 2470 kmem_free(sqh, sqh->sqb_size); 2471 } else { 2472 sqh->sqb_pexited = 1; 2473 mutex_exit(&sqh->sqb_lock); 2474 } 2475 } 2476 2477 /* 2478 * Free up a single sigqueue structure. 2479 * No other code should free a sigqueue directly. 2480 */ 2481 void 2482 siginfofree(sigqueue_t *sqp) 2483 { 2484 if (sqp != NULL) { 2485 if (sqp->sq_func != NULL) 2486 (sqp->sq_func)(sqp); 2487 else 2488 kmem_free(sqp, sizeof (sigqueue_t)); 2489 } 2490 } 2491 2492 /* 2493 * Generate a synchronous signal caused by a hardware 2494 * condition encountered by an lwp. Called from trap(). 2495 */ 2496 void 2497 trapsig(k_siginfo_t *ip, int restartable) 2498 { 2499 proc_t *p = ttoproc(curthread); 2500 int sig = ip->si_signo; 2501 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 2502 2503 ASSERT(sig > 0 && sig < NSIG); 2504 2505 if (curthread->t_dtrace_on) 2506 dtrace_safe_synchronous_signal(); 2507 2508 mutex_enter(&p->p_lock); 2509 schedctl_finish_sigblock(curthread); 2510 /* 2511 * Avoid a possible infinite loop if the lwp is holding the 2512 * signal generated by a trap of a restartable instruction or 2513 * if the signal so generated is being ignored by the process. 2514 */ 2515 if (restartable && 2516 (sigismember(&curthread->t_hold, sig) || 2517 p->p_user.u_signal[sig-1] == SIG_IGN)) { 2518 sigdelset(&curthread->t_hold, sig); 2519 p->p_user.u_signal[sig-1] = SIG_DFL; 2520 sigdelset(&p->p_ignore, sig); 2521 } 2522 bcopy(ip, &sqp->sq_info, sizeof (k_siginfo_t)); 2523 sigaddqa(p, curthread, sqp); 2524 mutex_exit(&p->p_lock); 2525 } 2526 2527 #ifdef _SYSCALL32_IMPL 2528 2529 /* 2530 * It's tricky to transmit a sigval between 32-bit and 64-bit 2531 * process, since in the 64-bit world, a pointer and an integer 2532 * are different sizes. Since we're constrained by the standards 2533 * world not to change the types, and it's unclear how useful it is 2534 * to send pointers between address spaces this way, we preserve 2535 * the 'int' interpretation for 32-bit processes interoperating 2536 * with 64-bit processes. The full semantics (pointers or integers) 2537 * are available for N-bit processes interoperating with N-bit 2538 * processes. 2539 */ 2540 void 2541 siginfo_kto32(const k_siginfo_t *src, siginfo32_t *dest) 2542 { 2543 bzero(dest, sizeof (*dest)); 2544 2545 /* 2546 * The absolute minimum content is si_signo and si_code. 2547 */ 2548 dest->si_signo = src->si_signo; 2549 if ((dest->si_code = src->si_code) == SI_NOINFO) 2550 return; 2551 2552 /* 2553 * A siginfo generated by user level is structured 2554 * differently from one generated by the kernel. 2555 */ 2556 if (SI_FROMUSER(src)) { 2557 dest->si_pid = src->si_pid; 2558 dest->si_ctid = src->si_ctid; 2559 dest->si_zoneid = src->si_zoneid; 2560 dest->si_uid = src->si_uid; 2561 if (SI_CANQUEUE(src->si_code)) 2562 dest->si_value.sival_int = 2563 (int32_t)src->si_value.sival_int; 2564 return; 2565 } 2566 2567 dest->si_errno = src->si_errno; 2568 2569 switch (src->si_signo) { 2570 default: 2571 dest->si_pid = src->si_pid; 2572 dest->si_ctid = src->si_ctid; 2573 dest->si_zoneid = src->si_zoneid; 2574 dest->si_uid = src->si_uid; 2575 dest->si_value.sival_int = (int32_t)src->si_value.sival_int; 2576 break; 2577 case SIGCLD: 2578 dest->si_pid = src->si_pid; 2579 dest->si_ctid = src->si_ctid; 2580 dest->si_zoneid = src->si_zoneid; 2581 dest->si_status = src->si_status; 2582 dest->si_stime = src->si_stime; 2583 dest->si_utime = src->si_utime; 2584 break; 2585 case SIGSEGV: 2586 case SIGBUS: 2587 case SIGILL: 2588 case SIGTRAP: 2589 case SIGFPE: 2590 case SIGEMT: 2591 dest->si_addr = (caddr32_t)(uintptr_t)src->si_addr; 2592 dest->si_trapno = src->si_trapno; 2593 dest->si_pc = (caddr32_t)(uintptr_t)src->si_pc; 2594 break; 2595 case SIGPOLL: 2596 case SIGXFSZ: 2597 dest->si_fd = src->si_fd; 2598 dest->si_band = src->si_band; 2599 break; 2600 case SIGPROF: 2601 dest->si_faddr = (caddr32_t)(uintptr_t)src->si_faddr; 2602 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec; 2603 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec; 2604 dest->si_syscall = src->si_syscall; 2605 dest->si_nsysarg = src->si_nsysarg; 2606 dest->si_fault = src->si_fault; 2607 break; 2608 } 2609 } 2610 2611 void 2612 siginfo_32tok(const siginfo32_t *src, k_siginfo_t *dest) 2613 { 2614 bzero(dest, sizeof (*dest)); 2615 2616 /* 2617 * The absolute minimum content is si_signo and si_code. 2618 */ 2619 dest->si_signo = src->si_signo; 2620 if ((dest->si_code = src->si_code) == SI_NOINFO) 2621 return; 2622 2623 /* 2624 * A siginfo generated by user level is structured 2625 * differently from one generated by the kernel. 2626 */ 2627 if (SI_FROMUSER(src)) { 2628 dest->si_pid = src->si_pid; 2629 dest->si_ctid = src->si_ctid; 2630 dest->si_zoneid = src->si_zoneid; 2631 dest->si_uid = src->si_uid; 2632 if (SI_CANQUEUE(src->si_code)) 2633 dest->si_value.sival_int = 2634 (int)src->si_value.sival_int; 2635 return; 2636 } 2637 2638 dest->si_errno = src->si_errno; 2639 2640 switch (src->si_signo) { 2641 default: 2642 dest->si_pid = src->si_pid; 2643 dest->si_ctid = src->si_ctid; 2644 dest->si_zoneid = src->si_zoneid; 2645 dest->si_uid = src->si_uid; 2646 dest->si_value.sival_int = (int)src->si_value.sival_int; 2647 break; 2648 case SIGCLD: 2649 dest->si_pid = src->si_pid; 2650 dest->si_ctid = src->si_ctid; 2651 dest->si_zoneid = src->si_zoneid; 2652 dest->si_status = src->si_status; 2653 dest->si_stime = src->si_stime; 2654 dest->si_utime = src->si_utime; 2655 break; 2656 case SIGSEGV: 2657 case SIGBUS: 2658 case SIGILL: 2659 case SIGTRAP: 2660 case SIGFPE: 2661 case SIGEMT: 2662 dest->si_addr = (void *)(uintptr_t)src->si_addr; 2663 dest->si_trapno = src->si_trapno; 2664 dest->si_pc = (void *)(uintptr_t)src->si_pc; 2665 break; 2666 case SIGPOLL: 2667 case SIGXFSZ: 2668 dest->si_fd = src->si_fd; 2669 dest->si_band = src->si_band; 2670 break; 2671 case SIGPROF: 2672 dest->si_faddr = (void *)(uintptr_t)src->si_faddr; 2673 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec; 2674 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec; 2675 dest->si_syscall = src->si_syscall; 2676 dest->si_nsysarg = src->si_nsysarg; 2677 dest->si_fault = src->si_fault; 2678 break; 2679 } 2680 } 2681 2682 #endif /* _SYSCALL32_IMPL */ 2683