1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 28 /* All Rights Reserved */ 29 30 31 #pragma ident "%Z%%M% %I% %E% SMI" 32 33 #include <sys/param.h> 34 #include <sys/types.h> 35 #include <sys/bitmap.h> 36 #include <sys/sysmacros.h> 37 #include <sys/systm.h> 38 #include <sys/cred.h> 39 #include <sys/user.h> 40 #include <sys/errno.h> 41 #include <sys/proc.h> 42 #include <sys/poll_impl.h> /* only needed for kludge in sigwaiting_send() */ 43 #include <sys/signal.h> 44 #include <sys/siginfo.h> 45 #include <sys/fault.h> 46 #include <sys/ucontext.h> 47 #include <sys/procfs.h> 48 #include <sys/wait.h> 49 #include <sys/class.h> 50 #include <sys/mman.h> 51 #include <sys/procset.h> 52 #include <sys/kmem.h> 53 #include <sys/cpuvar.h> 54 #include <sys/prsystm.h> 55 #include <sys/debug.h> 56 #include <vm/as.h> 57 #include <sys/bitmap.h> 58 #include <c2/audit.h> 59 #include <sys/core.h> 60 #include <sys/schedctl.h> 61 #include <sys/contract/process_impl.h> 62 #include <sys/dtrace.h> 63 #include <sys/sdt.h> 64 65 /* MUST be contiguous */ 66 k_sigset_t nullsmask = {0, 0}; 67 68 k_sigset_t fillset = {FILLSET0, FILLSET1}; 69 70 k_sigset_t cantmask = {CANTMASK0, CANTMASK1}; 71 72 k_sigset_t cantreset = {(sigmask(SIGILL)|sigmask(SIGTRAP)|sigmask(SIGPWR)), 0}; 73 74 k_sigset_t ignoredefault = {(sigmask(SIGCONT)|sigmask(SIGCLD)|sigmask(SIGPWR) 75 |sigmask(SIGWINCH)|sigmask(SIGURG)|sigmask(SIGWAITING)), 76 (sigmask(SIGLWP)|sigmask(SIGCANCEL)|sigmask(SIGFREEZE) 77 |sigmask(SIGTHAW)|sigmask(SIGXRES)|sigmask(SIGJVM1) 78 |sigmask(SIGJVM2))}; 79 80 k_sigset_t stopdefault = {(sigmask(SIGSTOP)|sigmask(SIGTSTP) 81 |sigmask(SIGTTOU)|sigmask(SIGTTIN)), 0}; 82 83 k_sigset_t coredefault = {(sigmask(SIGQUIT)|sigmask(SIGILL)|sigmask(SIGTRAP) 84 |sigmask(SIGIOT)|sigmask(SIGEMT)|sigmask(SIGFPE) 85 |sigmask(SIGBUS)|sigmask(SIGSEGV)|sigmask(SIGSYS) 86 |sigmask(SIGXCPU)|sigmask(SIGXFSZ)), 0}; 87 88 k_sigset_t holdvfork = {(sigmask(SIGTTOU)|sigmask(SIGTTIN)|sigmask(SIGTSTP)), 89 0}; 90 91 static int isjobstop(int); 92 static void post_sigcld(proc_t *, sigqueue_t *); 93 94 /* 95 * Internal variables for counting number of user thread stop requests posted. 96 * They may not be accurate at some special situation such as that a virtually 97 * stopped thread starts to run. 98 */ 99 static int num_utstop; 100 /* 101 * Internal variables for broadcasting an event when all thread stop requests 102 * are processed. 103 */ 104 static kcondvar_t utstop_cv; 105 106 static kmutex_t thread_stop_lock; 107 void del_one_utstop(void); 108 109 /* 110 * Send the specified signal to the specified process. 111 */ 112 void 113 psignal(proc_t *p, int sig) 114 { 115 mutex_enter(&p->p_lock); 116 sigtoproc(p, NULL, sig); 117 mutex_exit(&p->p_lock); 118 } 119 120 /* 121 * Send the specified signal to the specified thread. 122 */ 123 void 124 tsignal(kthread_t *t, int sig) 125 { 126 proc_t *p = ttoproc(t); 127 128 mutex_enter(&p->p_lock); 129 sigtoproc(p, t, sig); 130 mutex_exit(&p->p_lock); 131 } 132 133 int 134 signal_is_blocked(kthread_t *t, int sig) 135 { 136 return (sigismember(&t->t_hold, sig) || 137 (schedctl_sigblock(t) && !sigismember(&cantmask, sig))); 138 } 139 140 /* 141 * Return true if the signal can safely be discarded on generation. 142 * That is, if there is no need for the signal on the receiving end. 143 * The answer is true if the process is a zombie or 144 * if all of these conditions are true: 145 * the signal is being ignored 146 * the process is single-threaded 147 * the signal is not being traced by /proc 148 * the signal is not blocked by the process 149 */ 150 static int 151 sig_discardable(proc_t *p, int sig) 152 { 153 kthread_t *t = p->p_tlist; 154 155 return (t == NULL || /* if zombie or ... */ 156 (sigismember(&p->p_ignore, sig) && /* signal is ignored */ 157 t->t_forw == t && /* and single-threaded */ 158 !tracing(p, sig) && /* and no /proc tracing */ 159 !signal_is_blocked(t, sig))); /* and signal not blocked */ 160 } 161 162 /* 163 * Return true if this thread is going to eat this signal soon. 164 * Note that, if the signal is SIGKILL, we force stopped threads to be 165 * set running (to make SIGKILL be a sure kill), but only if the process 166 * is not currently locked by /proc (the P_PR_LOCK flag). Code in /proc 167 * relies on the fact that a process will not change shape while P_PR_LOCK 168 * is set (it drops and reacquires p->p_lock while leaving P_PR_LOCK set). 169 * We wish that we could simply call prbarrier() below, in sigtoproc(), to 170 * ensure that the process is not locked by /proc, but prbarrier() drops 171 * and reacquires p->p_lock and dropping p->p_lock here would be damaging. 172 */ 173 int 174 eat_signal(kthread_t *t, int sig) 175 { 176 int rval = 0; 177 ASSERT(THREAD_LOCK_HELD(t)); 178 179 /* 180 * Do not do anything if the target thread has the signal blocked. 181 */ 182 if (!signal_is_blocked(t, sig)) { 183 t->t_sig_check = 1; /* have thread do an issig */ 184 if (t->t_state == TS_SLEEP && (t->t_flag & T_WAKEABLE)) { 185 setrun_locked(t); 186 rval = 1; 187 } else if (t->t_state == TS_STOPPED && sig == SIGKILL && 188 !(ttoproc(t)->p_proc_flag & P_PR_LOCK)) { 189 ttoproc(t)->p_stopsig = 0; 190 t->t_dtrace_stop = 0; 191 t->t_schedflag |= TS_XSTART | TS_PSTART; 192 setrun_locked(t); 193 } else if (t != curthread && t->t_state == TS_ONPROC) { 194 aston(t); /* make it do issig promptly */ 195 if (t->t_cpu != CPU) 196 poke_cpu(t->t_cpu->cpu_id); 197 rval = 1; 198 } else if (t->t_state == TS_RUN) { 199 rval = 1; 200 } 201 } 202 203 return (rval); 204 } 205 206 /* 207 * Post a signal. 208 * If a non-null thread pointer is passed, then post the signal 209 * to the thread/lwp, otherwise post the signal to the process. 210 */ 211 void 212 sigtoproc(proc_t *p, kthread_t *t, int sig) 213 { 214 kthread_t *tt; 215 int ext = !(curproc->p_flag & SSYS) && 216 (curproc->p_ct_process != p->p_ct_process); 217 218 ASSERT(MUTEX_HELD(&p->p_lock)); 219 220 if (sig <= 0 || sig >= NSIG) 221 return; 222 223 /* 224 * Regardless of origin or directedness, 225 * SIGKILL kills all lwps in the process immediately 226 * and jobcontrol signals affect all lwps in the process. 227 */ 228 if (sig == SIGKILL) { 229 p->p_flag |= SKILLED | (ext ? SEXTKILLED : 0); 230 t = NULL; 231 } else if (sig == SIGCONT) { 232 /* 233 * The SSCONT flag will remain set until a stopping 234 * signal comes in (below). This is harmless. 235 */ 236 p->p_flag |= SSCONT; 237 sigdelq(p, NULL, SIGSTOP); 238 sigdelq(p, NULL, SIGTSTP); 239 sigdelq(p, NULL, SIGTTOU); 240 sigdelq(p, NULL, SIGTTIN); 241 sigdiffset(&p->p_sig, &stopdefault); 242 sigdiffset(&p->p_extsig, &stopdefault); 243 p->p_stopsig = 0; 244 if ((tt = p->p_tlist) != NULL) { 245 do { 246 sigdelq(p, tt, SIGSTOP); 247 sigdelq(p, tt, SIGTSTP); 248 sigdelq(p, tt, SIGTTOU); 249 sigdelq(p, tt, SIGTTIN); 250 sigdiffset(&tt->t_sig, &stopdefault); 251 sigdiffset(&tt->t_extsig, &stopdefault); 252 } while ((tt = tt->t_forw) != p->p_tlist); 253 } 254 if ((tt = p->p_tlist) != NULL) { 255 do { 256 thread_lock(tt); 257 if (tt->t_state == TS_STOPPED && 258 tt->t_whystop == PR_JOBCONTROL) { 259 tt->t_schedflag |= TS_XSTART; 260 setrun_locked(tt); 261 } 262 thread_unlock(tt); 263 } while ((tt = tt->t_forw) != p->p_tlist); 264 } 265 } else if (sigismember(&stopdefault, sig)) { 266 /* 267 * This test has a race condition which we can't fix: 268 * By the time the stopping signal is received by 269 * the target process/thread, the signal handler 270 * and/or the detached state might have changed. 271 */ 272 if (PTOU(p)->u_signal[sig-1] == SIG_DFL && 273 (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned)) 274 p->p_flag &= ~SSCONT; 275 sigdelq(p, NULL, SIGCONT); 276 sigdelset(&p->p_sig, SIGCONT); 277 sigdelset(&p->p_extsig, SIGCONT); 278 if ((tt = p->p_tlist) != NULL) { 279 do { 280 sigdelq(p, tt, SIGCONT); 281 sigdelset(&tt->t_sig, SIGCONT); 282 sigdelset(&tt->t_extsig, SIGCONT); 283 } while ((tt = tt->t_forw) != p->p_tlist); 284 } 285 } 286 287 if (sig_discardable(p, sig)) { 288 DTRACE_PROC3(signal__discard, kthread_t *, p->p_tlist, 289 proc_t *, p, int, sig); 290 return; 291 } 292 293 if (t != NULL) { 294 /* 295 * This is a directed signal, wake up the lwp. 296 */ 297 sigaddset(&t->t_sig, sig); 298 if (ext) 299 sigaddset(&t->t_extsig, sig); 300 thread_lock(t); 301 (void) eat_signal(t, sig); 302 thread_unlock(t); 303 DTRACE_PROC2(signal__send, kthread_t *, t, int, sig); 304 } else if ((tt = p->p_tlist) != NULL) { 305 /* 306 * Make sure that some lwp that already exists 307 * in the process fields the signal soon. 308 * Wake up an interruptibly sleeping lwp if necessary. 309 */ 310 int su = 0; 311 312 sigaddset(&p->p_sig, sig); 313 if (ext) 314 sigaddset(&p->p_extsig, sig); 315 do { 316 thread_lock(tt); 317 if (eat_signal(tt, sig)) { 318 thread_unlock(tt); 319 break; 320 } 321 if (sig == SIGKILL && SUSPENDED(tt)) 322 su++; 323 thread_unlock(tt); 324 } while ((tt = tt->t_forw) != p->p_tlist); 325 /* 326 * If the process is deadlocked, make somebody run and die. 327 */ 328 if (sig == SIGKILL && p->p_stat != SIDL && 329 p->p_lwprcnt == 0 && p->p_lwpcnt == su && 330 !(p->p_proc_flag & P_PR_LOCK)) { 331 thread_lock(tt); 332 p->p_lwprcnt++; 333 tt->t_schedflag |= TS_CSTART; 334 setrun_locked(tt); 335 thread_unlock(tt); 336 } 337 338 DTRACE_PROC2(signal__send, kthread_t *, tt, int, sig); 339 } 340 } 341 342 static int 343 isjobstop(int sig) 344 { 345 proc_t *p = ttoproc(curthread); 346 347 ASSERT(MUTEX_HELD(&p->p_lock)); 348 349 if (u.u_signal[sig-1] == SIG_DFL && sigismember(&stopdefault, sig)) { 350 /* 351 * If SIGCONT has been posted since we promoted this signal 352 * from pending to current, then don't do a jobcontrol stop. 353 */ 354 if (!(p->p_flag & SSCONT) && 355 (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned) && 356 curthread != p->p_agenttp) { 357 sigqueue_t *sqp; 358 359 stop(PR_JOBCONTROL, sig); 360 mutex_exit(&p->p_lock); 361 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 362 mutex_enter(&pidlock); 363 /* 364 * Only the first lwp to continue notifies the parent. 365 */ 366 if (p->p_pidflag & CLDCONT) 367 siginfofree(sqp); 368 else { 369 p->p_pidflag |= CLDCONT; 370 p->p_wcode = CLD_CONTINUED; 371 p->p_wdata = SIGCONT; 372 sigcld(p, sqp); 373 } 374 mutex_exit(&pidlock); 375 mutex_enter(&p->p_lock); 376 } 377 return (1); 378 } 379 return (0); 380 } 381 382 /* 383 * Returns true if the current process has a signal to process, and 384 * the signal is not held. The signal to process is put in p_cursig. 385 * This is asked at least once each time a process enters the system 386 * (though this can usually be done without actually calling issig by 387 * checking the pending signal masks). A signal does not do anything 388 * directly to a process; it sets a flag that asks the process to do 389 * something to itself. 390 * 391 * The "why" argument indicates the allowable side-effects of the call: 392 * 393 * FORREAL: Extract the next pending signal from p_sig into p_cursig; 394 * stop the process if a stop has been requested or if a traced signal 395 * is pending. 396 * 397 * JUSTLOOKING: Don't stop the process, just indicate whether or not 398 * a signal might be pending (FORREAL is needed to tell for sure). 399 * 400 * XXX: Changes to the logic in these routines should be propagated 401 * to lm_sigispending(). See bug 1201594. 402 */ 403 404 static int issig_forreal(void); 405 static int issig_justlooking(void); 406 407 int 408 issig(int why) 409 { 410 ASSERT(why == FORREAL || why == JUSTLOOKING); 411 412 return ((why == FORREAL)? issig_forreal() : issig_justlooking()); 413 } 414 415 416 static int 417 issig_justlooking(void) 418 { 419 kthread_t *t = curthread; 420 klwp_t *lwp = ttolwp(t); 421 proc_t *p = ttoproc(t); 422 k_sigset_t set; 423 424 /* 425 * This function answers the question: 426 * "Is there any reason to call issig_forreal()?" 427 * 428 * We have to answer the question w/o grabbing any locks 429 * because we are (most likely) being called after we 430 * put ourselves on the sleep queue. 431 */ 432 433 if (t->t_dtrace_stop | t->t_dtrace_sig) 434 return (1); 435 436 /* 437 * Another piece of complexity in this process. When single-stepping a 438 * process, we don't want an intervening signal or TP_PAUSE request to 439 * suspend the current thread. Otherwise, the controlling process will 440 * hang beacuse we will be stopped with TS_PSTART set in t_schedflag. 441 * We will trigger any remaining signals when we re-enter the kernel on 442 * the single step trap. 443 */ 444 if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP) 445 return (0); 446 447 if ((lwp->lwp_asleep && MUSTRETURN(p, t)) || 448 (p->p_flag & (SEXITLWPS|SKILLED)) || 449 (!lwp->lwp_nostop_r && ((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) | 450 (t->t_proc_flag & TP_HOLDLWP))) || 451 (!lwp->lwp_nostop && (p->p_stopsig | (t->t_proc_flag & 452 (TP_PRSTOP|TP_CHKPT|TP_PAUSE)))) || 453 lwp->lwp_cursig) 454 return (1); 455 456 if (p->p_flag & SVFWAIT) 457 return (0); 458 set = p->p_sig; 459 sigorset(&set, &t->t_sig); 460 if (schedctl_sigblock(t)) /* all blockable signals blocked */ 461 sigandset(&set, &cantmask); 462 else 463 sigdiffset(&set, &t->t_hold); 464 if (p->p_flag & SVFORK) 465 sigdiffset(&set, &holdvfork); 466 467 if (!sigisempty(&set)) { 468 int sig; 469 470 for (sig = 1; sig < NSIG; sig++) { 471 if (sigismember(&set, sig) && 472 (tracing(p, sig) || 473 !sigismember(&p->p_ignore, sig))) { 474 /* 475 * Don't promote a signal that will stop 476 * the process when lwp_nostop is set. 477 */ 478 if (!lwp->lwp_nostop || 479 u.u_signal[sig-1] != SIG_DFL || 480 !sigismember(&stopdefault, sig)) 481 return (1); 482 } 483 } 484 } 485 486 return (0); 487 } 488 489 static int 490 issig_forreal(void) 491 { 492 int sig = 0, ext = 0; 493 kthread_t *t = curthread; 494 klwp_t *lwp = ttolwp(t); 495 proc_t *p = ttoproc(t); 496 int toproc = 0; 497 int sigcld_found = 0; 498 int nostop_break = 0; 499 500 ASSERT(t->t_state == TS_ONPROC); 501 502 mutex_enter(&p->p_lock); 503 schedctl_finish_sigblock(t); 504 505 if (t->t_dtrace_stop | t->t_dtrace_sig) { 506 if (t->t_dtrace_stop) { 507 /* 508 * If DTrace's "stop" action has been invoked on us, 509 * set TP_PRSTOP. 510 */ 511 t->t_proc_flag |= TP_PRSTOP; 512 } 513 514 if (t->t_dtrace_sig != 0) { 515 k_siginfo_t info; 516 517 /* 518 * Post the signal generated as the result of 519 * DTrace's "raise" action as a normal signal before 520 * the full-fledged signal checking begins. 521 */ 522 bzero(&info, sizeof (info)); 523 info.si_signo = t->t_dtrace_sig; 524 info.si_code = SI_DTRACE; 525 526 sigaddq(p, NULL, &info, KM_NOSLEEP); 527 528 t->t_dtrace_sig = 0; 529 } 530 } 531 532 for (;;) { 533 if (p->p_flag & (SEXITLWPS|SKILLED)) { 534 lwp->lwp_cursig = sig = SIGKILL; 535 lwp->lwp_extsig = ext = (p->p_flag & SEXTKILLED) != 0; 536 break; 537 } 538 539 /* 540 * Another piece of complexity in this process. When 541 * single-stepping a process, we don't want an intervening 542 * signal or TP_PAUSE request to suspend the current thread. 543 * Otherwise, the controlling process will hang beacuse we will 544 * be stopped with TS_PSTART set in t_schedflag. We will 545 * trigger any remaining signals when we re-enter the kernel on 546 * the single step trap. 547 */ 548 if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP) { 549 sig = 0; 550 break; 551 } 552 553 /* 554 * Hold the lwp here for watchpoint manipulation. 555 */ 556 if ((t->t_proc_flag & TP_PAUSE) && !lwp->lwp_nostop) { 557 stop(PR_SUSPENDED, SUSPEND_PAUSE); 558 continue; 559 } 560 561 if (lwp->lwp_asleep && MUSTRETURN(p, t)) { 562 if ((sig = lwp->lwp_cursig) != 0) { 563 /* 564 * Make sure we call ISSIG() in post_syscall() 565 * to re-validate this current signal. 566 */ 567 t->t_sig_check = 1; 568 } 569 break; 570 } 571 572 /* 573 * If the request is PR_CHECKPOINT, ignore the rest of signals 574 * or requests. Honor other stop requests or signals later. 575 * Go back to top of loop here to check if an exit or hold 576 * event has occurred while stopped. 577 */ 578 if ((t->t_proc_flag & TP_CHKPT) && !lwp->lwp_nostop) { 579 stop(PR_CHECKPOINT, 0); 580 continue; 581 } 582 583 /* 584 * Honor SHOLDFORK1, SHOLDWATCH, and TP_HOLDLWP before dealing 585 * with signals or /proc. Another lwp is executing fork1(), 586 * or is undergoing watchpoint activity (remapping a page), 587 * or is executing lwp_suspend() on this lwp. 588 * Again, go back to top of loop to check if an exit 589 * or hold event has occurred while stopped. 590 * We explicitly allow this form of stopping of one 591 * lwp in a process by another lwp in the same process, 592 * even if lwp->lwp_nostop is set, because otherwise a 593 * process can become deadlocked on a fork1(). 594 * Allow this only if lwp_nostop_r is not set, 595 * to avoid a recursive call to prstop(). 596 */ 597 if (((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) || 598 (t->t_proc_flag & TP_HOLDLWP)) && !lwp->lwp_nostop_r) { 599 stop(PR_SUSPENDED, SUSPEND_NORMAL); 600 continue; 601 } 602 603 /* 604 * Honor requested stop before dealing with the 605 * current signal; a debugger may change it. 606 * Do not want to go back to loop here since this is a special 607 * stop that means: make incremental progress before the next 608 * stop. The danger is that returning to top of loop would most 609 * likely drop the thread right back here to stop soon after it 610 * was continued, violating the incremental progress request. 611 */ 612 if ((t->t_proc_flag & TP_PRSTOP) && !lwp->lwp_nostop) 613 stop(PR_REQUESTED, 0); 614 615 /* 616 * If a debugger wants us to take a signal it will have 617 * left it in lwp->lwp_cursig. If lwp_cursig has been cleared 618 * or if it's being ignored, we continue on looking for another 619 * signal. Otherwise we return the specified signal, provided 620 * it's not a signal that causes a job control stop. 621 * 622 * When stopped on PR_JOBCONTROL, there is no current 623 * signal; we cancel lwp->lwp_cursig temporarily before 624 * calling isjobstop(). The current signal may be reset 625 * by a debugger while we are stopped in isjobstop(). 626 */ 627 if ((sig = lwp->lwp_cursig) != 0) { 628 ext = lwp->lwp_extsig; 629 lwp->lwp_cursig = 0; 630 lwp->lwp_extsig = 0; 631 if (!sigismember(&p->p_ignore, sig) && 632 !isjobstop(sig)) { 633 if (p->p_flag & (SEXITLWPS|SKILLED)) { 634 sig = SIGKILL; 635 ext = (p->p_flag & SEXTKILLED) != 0; 636 } 637 lwp->lwp_cursig = (uchar_t)sig; 638 lwp->lwp_extsig = (uchar_t)ext; 639 break; 640 } 641 /* 642 * The signal is being ignored or it caused a 643 * job-control stop. If another current signal 644 * has not been established, return the current 645 * siginfo, if any, to the memory manager. 646 */ 647 if (lwp->lwp_cursig == 0 && lwp->lwp_curinfo != NULL) { 648 siginfofree(lwp->lwp_curinfo); 649 lwp->lwp_curinfo = NULL; 650 } 651 /* 652 * Loop around again in case we were stopped 653 * on a job control signal and a /proc stop 654 * request was posted or another current signal 655 * was established while we were stopped. 656 */ 657 continue; 658 } 659 660 if (p->p_stopsig && !lwp->lwp_nostop && 661 curthread != p->p_agenttp) { 662 /* 663 * Some lwp in the process has already stopped 664 * showing PR_JOBCONTROL. This is a stop in 665 * sympathy with the other lwp, even if this 666 * lwp is blocking the stopping signal. 667 */ 668 stop(PR_JOBCONTROL, p->p_stopsig); 669 continue; 670 } 671 672 /* 673 * Loop on the pending signals until we find a 674 * non-held signal that is traced or not ignored. 675 * First check the signals pending for the lwp, 676 * then the signals pending for the process as a whole. 677 */ 678 for (;;) { 679 k_sigset_t tsig; 680 681 tsig = t->t_sig; 682 if ((sig = fsig(&tsig, t)) != 0) { 683 if (sig == SIGCLD) 684 sigcld_found = 1; 685 toproc = 0; 686 if (tracing(p, sig) || 687 !sigismember(&p->p_ignore, sig)) { 688 if (sigismember(&t->t_extsig, sig)) 689 ext = 1; 690 break; 691 } 692 sigdelset(&t->t_sig, sig); 693 sigdelset(&t->t_extsig, sig); 694 sigdelq(p, t, sig); 695 } else if ((sig = fsig(&p->p_sig, t)) != 0) { 696 if (sig == SIGCLD) 697 sigcld_found = 1; 698 toproc = 1; 699 if (tracing(p, sig) || 700 !sigismember(&p->p_ignore, sig)) { 701 if (sigismember(&p->p_extsig, sig)) 702 ext = 1; 703 break; 704 } 705 sigdelset(&p->p_sig, sig); 706 sigdelset(&p->p_extsig, sig); 707 sigdelq(p, NULL, sig); 708 } else { 709 /* no signal was found */ 710 break; 711 } 712 } 713 714 if (sig == 0) { /* no signal was found */ 715 if (p->p_flag & (SEXITLWPS|SKILLED)) { 716 lwp->lwp_cursig = SIGKILL; 717 sig = SIGKILL; 718 ext = (p->p_flag & SEXTKILLED) != 0; 719 } 720 break; 721 } 722 723 /* 724 * If we have been informed not to stop (i.e., we are being 725 * called from within a network operation), then don't promote 726 * the signal at this time, just return the signal number. 727 * We will call issig() again later when it is safe. 728 * 729 * fsig() does not return a jobcontrol stopping signal 730 * with a default action of stopping the process if 731 * lwp_nostop is set, so we won't be causing a bogus 732 * EINTR by this action. (Such a signal is eaten by 733 * isjobstop() when we loop around to do final checks.) 734 */ 735 if (lwp->lwp_nostop) { 736 nostop_break = 1; 737 break; 738 } 739 740 /* 741 * Promote the signal from pending to current. 742 * 743 * Note that sigdeq() will set lwp->lwp_curinfo to NULL 744 * if no siginfo_t exists for this signal. 745 */ 746 lwp->lwp_cursig = (uchar_t)sig; 747 lwp->lwp_extsig = (uchar_t)ext; 748 t->t_sig_check = 1; /* so post_syscall will see signal */ 749 ASSERT(lwp->lwp_curinfo == NULL); 750 sigdeq(p, toproc ? NULL : t, sig, &lwp->lwp_curinfo); 751 752 if (tracing(p, sig)) 753 stop(PR_SIGNALLED, sig); 754 755 /* 756 * Loop around to check for requested stop before 757 * performing the usual current-signal actions. 758 */ 759 } 760 761 mutex_exit(&p->p_lock); 762 763 /* 764 * If SIGCLD was dequeued, search for other pending SIGCLD's. 765 * Don't do it if we are returning SIGCLD and the signal 766 * handler will be reset by psig(); this enables reliable 767 * delivery of SIGCLD even when using the old, broken 768 * signal() interface for setting the signal handler. 769 */ 770 if (sigcld_found && 771 (sig != SIGCLD || !sigismember(&u.u_sigresethand, SIGCLD))) 772 sigcld_repost(); 773 774 if (sig != 0) 775 (void) undo_watch_step(NULL); 776 777 /* 778 * If we have been blocked since the p_lock was dropped off 779 * above, then this promoted signal might have been handled 780 * already when we were on the way back from sleep queue, so 781 * just ignore it. 782 * If we have been informed not to stop, just return the signal 783 * number. Also see comments above. 784 */ 785 if (!nostop_break) { 786 sig = lwp->lwp_cursig; 787 } 788 789 return (sig != 0); 790 } 791 792 /* 793 * Return true if the process is currently stopped showing PR_JOBCONTROL. 794 * This is true only if all of the process's lwp's are so stopped. 795 * If this is asked by one of the lwps in the process, exclude that lwp. 796 */ 797 int 798 jobstopped(proc_t *p) 799 { 800 kthread_t *t; 801 802 ASSERT(MUTEX_HELD(&p->p_lock)); 803 804 if ((t = p->p_tlist) == NULL) 805 return (0); 806 807 do { 808 thread_lock(t); 809 /* ignore current, zombie and suspended lwps in the test */ 810 if (!(t == curthread || t->t_state == TS_ZOMB || 811 SUSPENDED(t)) && 812 (t->t_state != TS_STOPPED || 813 t->t_whystop != PR_JOBCONTROL)) { 814 thread_unlock(t); 815 return (0); 816 } 817 thread_unlock(t); 818 } while ((t = t->t_forw) != p->p_tlist); 819 820 return (1); 821 } 822 823 /* 824 * Put ourself (curthread) into the stopped state and notify tracers. 825 */ 826 void 827 stop(int why, int what) 828 { 829 kthread_t *t = curthread; 830 proc_t *p = ttoproc(t); 831 klwp_t *lwp = ttolwp(t); 832 kthread_t *tx; 833 lwpent_t *lep; 834 int procstop; 835 int flags = TS_ALLSTART; 836 hrtime_t stoptime; 837 838 /* 839 * Can't stop a system process. 840 */ 841 if (p == NULL || lwp == NULL || (p->p_flag & SSYS) || p->p_as == &kas) 842 return; 843 844 ASSERT(MUTEX_HELD(&p->p_lock)); 845 846 if (why != PR_SUSPENDED && why != PR_CHECKPOINT) { 847 /* 848 * Don't stop an lwp with SIGKILL pending. 849 * Don't stop if the process or lwp is exiting. 850 */ 851 if (lwp->lwp_cursig == SIGKILL || 852 sigismember(&t->t_sig, SIGKILL) || 853 sigismember(&p->p_sig, SIGKILL) || 854 (t->t_proc_flag & TP_LWPEXIT) || 855 (p->p_flag & (SEXITLWPS|SKILLED))) { 856 p->p_stopsig = 0; 857 t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP); 858 return; 859 } 860 } 861 862 /* 863 * Make sure we don't deadlock on a recursive call to prstop(). 864 * prstop() sets the lwp_nostop_r flag and increments lwp_nostop. 865 */ 866 if (lwp->lwp_nostop_r || 867 (lwp->lwp_nostop && 868 (why != PR_SUSPENDED || what != SUSPEND_NORMAL))) 869 return; 870 871 /* 872 * Make sure the lwp is in an orderly state for inspection 873 * by a debugger through /proc or for dumping via core(). 874 */ 875 schedctl_finish_sigblock(t); 876 t->t_proc_flag |= TP_STOPPING; /* must set before dropping p_lock */ 877 mutex_exit(&p->p_lock); 878 stoptime = gethrtime(); 879 prstop(why, what); 880 (void) undo_watch_step(NULL); 881 mutex_enter(&p->p_lock); 882 ASSERT(t->t_state == TS_ONPROC); 883 884 switch (why) { 885 case PR_CHECKPOINT: 886 /* 887 * The situation may have changed since we dropped 888 * and reacquired p->p_lock. Double-check now 889 * whether we should stop or not. 890 */ 891 if (!(t->t_proc_flag & TP_CHKPT)) { 892 t->t_proc_flag &= ~TP_STOPPING; 893 return; 894 } 895 t->t_proc_flag &= ~TP_CHKPT; 896 flags &= ~TS_RESUME; 897 break; 898 899 case PR_JOBCONTROL: 900 ASSERT(what == SIGSTOP || what == SIGTSTP || 901 what == SIGTTIN || what == SIGTTOU); 902 flags &= ~TS_XSTART; 903 break; 904 905 case PR_SUSPENDED: 906 ASSERT(what == SUSPEND_NORMAL || what == SUSPEND_PAUSE); 907 /* 908 * The situation may have changed since we dropped 909 * and reacquired p->p_lock. Double-check now 910 * whether we should stop or not. 911 */ 912 if (what == SUSPEND_PAUSE) { 913 if (!(t->t_proc_flag & TP_PAUSE)) { 914 t->t_proc_flag &= ~TP_STOPPING; 915 return; 916 } 917 flags &= ~TS_UNPAUSE; 918 } else { 919 if (!((t->t_proc_flag & TP_HOLDLWP) || 920 (p->p_flag & (SHOLDFORK|SHOLDFORK1|SHOLDWATCH)))) { 921 t->t_proc_flag &= ~TP_STOPPING; 922 return; 923 } 924 /* 925 * If SHOLDFORK is in effect and we are stopping 926 * while asleep (not at the top of the stack), 927 * we return now to allow the hold to take effect 928 * when we reach the top of the kernel stack. 929 */ 930 if (lwp->lwp_asleep && (p->p_flag & SHOLDFORK)) { 931 t->t_proc_flag &= ~TP_STOPPING; 932 return; 933 } 934 flags &= ~TS_CSTART; 935 } 936 break; 937 938 default: /* /proc stop */ 939 flags &= ~TS_PSTART; 940 /* 941 * Do synchronous stop unless the async-stop flag is set. 942 * If why is PR_REQUESTED and t->t_dtrace_stop flag is set, 943 * then no debugger is present and we also do synchronous stop. 944 */ 945 if ((why != PR_REQUESTED || t->t_dtrace_stop) && 946 !(p->p_proc_flag & P_PR_ASYNC)) { 947 int notify; 948 949 for (tx = t->t_forw; tx != t; tx = tx->t_forw) { 950 notify = 0; 951 thread_lock(tx); 952 if (ISTOPPED(tx) || 953 (tx->t_proc_flag & TP_PRSTOP)) { 954 thread_unlock(tx); 955 continue; 956 } 957 tx->t_proc_flag |= TP_PRSTOP; 958 tx->t_sig_check = 1; 959 if (tx->t_state == TS_SLEEP && 960 (tx->t_flag & T_WAKEABLE)) { 961 /* 962 * Don't actually wake it up if it's 963 * in one of the lwp_*() syscalls. 964 * Mark it virtually stopped and 965 * notify /proc waiters (below). 966 */ 967 if (tx->t_wchan0 == NULL) 968 setrun_locked(tx); 969 else { 970 tx->t_proc_flag |= TP_PRVSTOP; 971 tx->t_stoptime = stoptime; 972 notify = 1; 973 } 974 } 975 /* 976 * force the thread into the kernel 977 * if it is not already there. 978 */ 979 if (tx->t_state == TS_ONPROC && 980 tx->t_cpu != CPU) 981 poke_cpu(tx->t_cpu->cpu_id); 982 thread_unlock(tx); 983 lep = p->p_lwpdir[tx->t_dslot].ld_entry; 984 if (notify && lep->le_trace) 985 prnotify(lep->le_trace); 986 } 987 /* 988 * We do this just in case one of the threads we asked 989 * to stop is in holdlwps() (called from cfork()) or 990 * lwp_suspend(). 991 */ 992 cv_broadcast(&p->p_holdlwps); 993 } 994 break; 995 } 996 997 t->t_stoptime = stoptime; 998 999 if (why == PR_JOBCONTROL || (why == PR_SUSPENDED && p->p_stopsig)) { 1000 /* 1001 * Determine if the whole process is jobstopped. 1002 */ 1003 if (jobstopped(p)) { 1004 sigqueue_t *sqp; 1005 int sig; 1006 1007 if ((sig = p->p_stopsig) == 0) 1008 p->p_stopsig = (uchar_t)(sig = what); 1009 mutex_exit(&p->p_lock); 1010 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 1011 mutex_enter(&pidlock); 1012 /* 1013 * The last lwp to stop notifies the parent. 1014 * Turn off the CLDCONT flag now so the first 1015 * lwp to continue knows what to do. 1016 */ 1017 p->p_pidflag &= ~CLDCONT; 1018 p->p_wcode = CLD_STOPPED; 1019 p->p_wdata = sig; 1020 sigcld(p, sqp); 1021 /* 1022 * Grab p->p_lock before releasing pidlock so the 1023 * parent and the child don't have a race condition. 1024 */ 1025 mutex_enter(&p->p_lock); 1026 mutex_exit(&pidlock); 1027 p->p_stopsig = 0; 1028 } else if (why == PR_JOBCONTROL && p->p_stopsig == 0) { 1029 /* 1030 * Set p->p_stopsig and wake up sleeping lwps 1031 * so they will stop in sympathy with this lwp. 1032 */ 1033 p->p_stopsig = (uchar_t)what; 1034 pokelwps(p); 1035 /* 1036 * We do this just in case one of the threads we asked 1037 * to stop is in holdlwps() (called from cfork()) or 1038 * lwp_suspend(). 1039 */ 1040 cv_broadcast(&p->p_holdlwps); 1041 } 1042 } 1043 1044 if (why != PR_JOBCONTROL && why != PR_CHECKPOINT) { 1045 /* 1046 * Do process-level notification when all lwps are 1047 * either stopped on events of interest to /proc 1048 * or are stopped showing PR_SUSPENDED or are zombies. 1049 */ 1050 procstop = 1; 1051 for (tx = t->t_forw; procstop && tx != t; tx = tx->t_forw) { 1052 if (VSTOPPED(tx)) 1053 continue; 1054 thread_lock(tx); 1055 switch (tx->t_state) { 1056 case TS_ZOMB: 1057 break; 1058 case TS_STOPPED: 1059 /* neither ISTOPPED nor SUSPENDED? */ 1060 if ((tx->t_schedflag & 1061 (TS_CSTART | TS_UNPAUSE | TS_PSTART)) == 1062 (TS_CSTART | TS_UNPAUSE | TS_PSTART)) 1063 procstop = 0; 1064 break; 1065 case TS_SLEEP: 1066 /* not paused for watchpoints? */ 1067 if (!(tx->t_flag & T_WAKEABLE) || 1068 tx->t_wchan0 == NULL || 1069 !(tx->t_proc_flag & TP_PAUSE)) 1070 procstop = 0; 1071 break; 1072 default: 1073 procstop = 0; 1074 break; 1075 } 1076 thread_unlock(tx); 1077 } 1078 if (procstop) { 1079 /* there must not be any remapped watched pages now */ 1080 ASSERT(p->p_mapcnt == 0); 1081 if (p->p_proc_flag & P_PR_PTRACE) { 1082 /* ptrace() compatibility */ 1083 mutex_exit(&p->p_lock); 1084 mutex_enter(&pidlock); 1085 p->p_wcode = CLD_TRAPPED; 1086 p->p_wdata = (why == PR_SIGNALLED)? 1087 what : SIGTRAP; 1088 cv_broadcast(&p->p_parent->p_cv); 1089 /* 1090 * Grab p->p_lock before releasing pidlock so 1091 * parent and child don't have a race condition. 1092 */ 1093 mutex_enter(&p->p_lock); 1094 mutex_exit(&pidlock); 1095 } 1096 if (p->p_trace) /* /proc */ 1097 prnotify(p->p_trace); 1098 cv_broadcast(&pr_pid_cv[p->p_slot]); /* pauselwps() */ 1099 cv_broadcast(&p->p_holdlwps); /* holdwatch() */ 1100 } 1101 if (why != PR_SUSPENDED) { 1102 lep = p->p_lwpdir[t->t_dslot].ld_entry; 1103 if (lep->le_trace) /* /proc */ 1104 prnotify(lep->le_trace); 1105 /* 1106 * Special notification for creation of the agent lwp. 1107 */ 1108 if (t == p->p_agenttp && 1109 (t->t_proc_flag & TP_PRSTOP) && 1110 p->p_trace) 1111 prnotify(p->p_trace); 1112 /* 1113 * The situation may have changed since we dropped 1114 * and reacquired p->p_lock. Double-check now 1115 * whether we should stop or not. 1116 */ 1117 if (!(t->t_proc_flag & TP_STOPPING)) { 1118 if (t->t_proc_flag & TP_PRSTOP) 1119 t->t_proc_flag |= TP_STOPPING; 1120 } 1121 t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP); 1122 prnostep(lwp); 1123 } 1124 } 1125 1126 if (why == PR_SUSPENDED) { 1127 1128 /* 1129 * We always broadcast in the case of SUSPEND_PAUSE. This is 1130 * because checks for TP_PAUSE take precedence over checks for 1131 * SHOLDWATCH. If a thread is trying to stop because of 1132 * SUSPEND_PAUSE and tries to do a holdwatch(), it will be 1133 * waiting for the rest of the threads to enter a stopped state. 1134 * If we are stopping for a SUSPEND_PAUSE, we may be the last 1135 * lwp and not know it, so broadcast just in case. 1136 */ 1137 if (what == SUSPEND_PAUSE || 1138 --p->p_lwprcnt == 0 || (t->t_proc_flag & TP_HOLDLWP)) 1139 cv_broadcast(&p->p_holdlwps); 1140 1141 } 1142 1143 /* 1144 * Need to do this here (rather than after the thread is officially 1145 * stopped) because we can't call mutex_enter from a stopped thread. 1146 */ 1147 if (why == PR_CHECKPOINT) 1148 del_one_utstop(); 1149 1150 thread_lock(t); 1151 ASSERT((t->t_schedflag & TS_ALLSTART) == 0); 1152 t->t_schedflag |= flags; 1153 t->t_whystop = (short)why; 1154 t->t_whatstop = (short)what; 1155 CL_STOP(t, why, what); 1156 (void) new_mstate(t, LMS_STOPPED); 1157 thread_stop(t); /* set stop state and drop lock */ 1158 1159 if (why != PR_SUSPENDED && why != PR_CHECKPOINT) { 1160 /* 1161 * We may have gotten a SIGKILL or a SIGCONT when 1162 * we released p->p_lock; make one last check. 1163 * Also check for a /proc run-on-last-close. 1164 */ 1165 if (sigismember(&t->t_sig, SIGKILL) || 1166 sigismember(&p->p_sig, SIGKILL) || 1167 (t->t_proc_flag & TP_LWPEXIT) || 1168 (p->p_flag & (SEXITLWPS|SKILLED))) { 1169 p->p_stopsig = 0; 1170 thread_lock(t); 1171 t->t_schedflag |= TS_XSTART | TS_PSTART; 1172 setrun_locked(t); 1173 thread_unlock_nopreempt(t); 1174 } else if (why == PR_JOBCONTROL) { 1175 if (p->p_flag & SSCONT) { 1176 /* 1177 * This resulted from a SIGCONT posted 1178 * while we were not holding p->p_lock. 1179 */ 1180 p->p_stopsig = 0; 1181 thread_lock(t); 1182 t->t_schedflag |= TS_XSTART; 1183 setrun_locked(t); 1184 thread_unlock_nopreempt(t); 1185 } 1186 } else if (!(t->t_proc_flag & TP_STOPPING)) { 1187 /* 1188 * This resulted from a /proc run-on-last-close. 1189 */ 1190 thread_lock(t); 1191 t->t_schedflag |= TS_PSTART; 1192 setrun_locked(t); 1193 thread_unlock_nopreempt(t); 1194 } 1195 } 1196 1197 t->t_proc_flag &= ~TP_STOPPING; 1198 mutex_exit(&p->p_lock); 1199 1200 swtch(); 1201 setallwatch(); /* reestablish any watchpoints set while stopped */ 1202 mutex_enter(&p->p_lock); 1203 prbarrier(p); /* barrier against /proc locking */ 1204 } 1205 1206 /* Interface for resetting user thread stop count. */ 1207 void 1208 utstop_init(void) 1209 { 1210 mutex_enter(&thread_stop_lock); 1211 num_utstop = 0; 1212 mutex_exit(&thread_stop_lock); 1213 } 1214 1215 /* Interface for registering a user thread stop request. */ 1216 void 1217 add_one_utstop(void) 1218 { 1219 mutex_enter(&thread_stop_lock); 1220 num_utstop++; 1221 mutex_exit(&thread_stop_lock); 1222 } 1223 1224 /* Interface for cancelling a user thread stop request */ 1225 void 1226 del_one_utstop(void) 1227 { 1228 mutex_enter(&thread_stop_lock); 1229 num_utstop--; 1230 if (num_utstop == 0) 1231 cv_broadcast(&utstop_cv); 1232 mutex_exit(&thread_stop_lock); 1233 } 1234 1235 /* Interface to wait for all user threads to be stopped */ 1236 void 1237 utstop_timedwait(clock_t ticks) 1238 { 1239 mutex_enter(&thread_stop_lock); 1240 if (num_utstop > 0) 1241 (void) cv_timedwait(&utstop_cv, &thread_stop_lock, 1242 ticks + lbolt); 1243 mutex_exit(&thread_stop_lock); 1244 } 1245 1246 /* 1247 * Perform the action specified by the current signal. 1248 * The usual sequence is: 1249 * if (issig()) 1250 * psig(); 1251 * The signal bit has already been cleared by issig(), 1252 * the current signal number has been stored in lwp_cursig, 1253 * and the current siginfo is now referenced by lwp_curinfo. 1254 */ 1255 void 1256 psig(void) 1257 { 1258 kthread_t *t = curthread; 1259 proc_t *p = ttoproc(t); 1260 klwp_t *lwp = ttolwp(t); 1261 void (*func)(); 1262 int sig, rc, code, ext; 1263 pid_t pid = -1; 1264 id_t ctid = 0; 1265 zoneid_t zoneid = -1; 1266 sigqueue_t *sqp = NULL; 1267 1268 mutex_enter(&p->p_lock); 1269 schedctl_finish_sigblock(t); 1270 code = CLD_KILLED; 1271 1272 if (p->p_flag & SEXITLWPS) { 1273 lwp_exit(); 1274 return; /* not reached */ 1275 } 1276 sig = lwp->lwp_cursig; 1277 ext = lwp->lwp_extsig; 1278 1279 ASSERT(sig < NSIG); 1280 1281 /* 1282 * Re-check lwp_cursig after we acquire p_lock. Since p_lock was 1283 * dropped between issig() and psig(), a debugger may have cleared 1284 * lwp_cursig via /proc in the intervening window. 1285 */ 1286 if (sig == 0) { 1287 if (lwp->lwp_curinfo) { 1288 siginfofree(lwp->lwp_curinfo); 1289 lwp->lwp_curinfo = NULL; 1290 } 1291 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */ 1292 t->t_flag &= ~T_TOMASK; 1293 t->t_hold = lwp->lwp_sigoldmask; 1294 } 1295 mutex_exit(&p->p_lock); 1296 return; 1297 } 1298 func = u.u_signal[sig-1]; 1299 1300 /* 1301 * The signal disposition could have changed since we promoted 1302 * this signal from pending to current (we dropped p->p_lock). 1303 * This can happen only in a multi-threaded process. 1304 */ 1305 if (sigismember(&p->p_ignore, sig) || 1306 (func == SIG_DFL && sigismember(&stopdefault, sig))) { 1307 lwp->lwp_cursig = 0; 1308 lwp->lwp_extsig = 0; 1309 if (lwp->lwp_curinfo) { 1310 siginfofree(lwp->lwp_curinfo); 1311 lwp->lwp_curinfo = NULL; 1312 } 1313 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */ 1314 t->t_flag &= ~T_TOMASK; 1315 t->t_hold = lwp->lwp_sigoldmask; 1316 } 1317 mutex_exit(&p->p_lock); 1318 return; 1319 } 1320 1321 /* 1322 * We check lwp_curinfo first since pr_setsig can actually 1323 * stuff a sigqueue_t there for SIGKILL. 1324 */ 1325 if (lwp->lwp_curinfo) { 1326 sqp = lwp->lwp_curinfo; 1327 } else if (sig == SIGKILL && p->p_killsqp) { 1328 sqp = p->p_killsqp; 1329 } 1330 1331 if (sqp != NULL) { 1332 if (SI_FROMUSER(&sqp->sq_info)) { 1333 pid = sqp->sq_info.si_pid; 1334 ctid = sqp->sq_info.si_ctid; 1335 zoneid = sqp->sq_info.si_zoneid; 1336 } 1337 /* 1338 * If we have a sigqueue_t, its sq_external value 1339 * trumps the lwp_extsig value. It is theoretically 1340 * possible to make lwp_extsig reflect reality, but it 1341 * would unnecessarily complicate things elsewhere. 1342 */ 1343 ext = sqp->sq_external; 1344 } 1345 1346 if (func == SIG_DFL) { 1347 mutex_exit(&p->p_lock); 1348 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *, 1349 NULL, void (*)(void), func); 1350 } else { 1351 k_siginfo_t *sip = NULL; 1352 1353 /* 1354 * If DTrace user-land tracing is active, give DTrace a 1355 * chance to defer the signal until after tracing is 1356 * complete. 1357 */ 1358 if (t->t_dtrace_on && dtrace_safe_defer_signal()) { 1359 mutex_exit(&p->p_lock); 1360 return; 1361 } 1362 1363 /* 1364 * save siginfo pointer here, in case the 1365 * the signal's reset bit is on 1366 * 1367 * The presence of a current signal prevents paging 1368 * from succeeding over a network. We copy the current 1369 * signal information to the side and cancel the current 1370 * signal so that sendsig() will succeed. 1371 */ 1372 if (sigismember(&p->p_siginfo, sig)) { 1373 if (sqp) { 1374 bcopy(&sqp->sq_info, &lwp->lwp_siginfo, 1375 sizeof (k_siginfo_t)); 1376 sip = &lwp->lwp_siginfo; 1377 } else if (sig == SIGPROF && 1378 t->t_rprof != NULL && 1379 t->t_rprof->rp_anystate && 1380 lwp->lwp_siginfo.si_signo == SIGPROF) { 1381 sip = &lwp->lwp_siginfo; 1382 } 1383 } 1384 1385 if (t->t_flag & T_TOMASK) 1386 t->t_flag &= ~T_TOMASK; 1387 else 1388 lwp->lwp_sigoldmask = t->t_hold; 1389 sigorset(&t->t_hold, &u.u_sigmask[sig-1]); 1390 if (!sigismember(&u.u_signodefer, sig)) 1391 sigaddset(&t->t_hold, sig); 1392 if (sigismember(&u.u_sigresethand, sig)) 1393 setsigact(sig, SIG_DFL, nullsmask, 0); 1394 1395 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *, 1396 sip, void (*)(void), func); 1397 1398 lwp->lwp_cursig = 0; 1399 lwp->lwp_extsig = 0; 1400 if (lwp->lwp_curinfo) { 1401 /* p->p_killsqp is freed by freeproc */ 1402 siginfofree(lwp->lwp_curinfo); 1403 lwp->lwp_curinfo = NULL; 1404 } 1405 mutex_exit(&p->p_lock); 1406 lwp->lwp_ru.nsignals++; 1407 1408 if (p->p_model == DATAMODEL_NATIVE) 1409 rc = sendsig(sig, sip, func); 1410 #ifdef _SYSCALL32_IMPL 1411 else 1412 rc = sendsig32(sig, sip, func); 1413 #endif /* _SYSCALL32_IMPL */ 1414 if (rc) 1415 return; 1416 sig = lwp->lwp_cursig = SIGSEGV; 1417 ext = 0; /* lwp_extsig was set above */ 1418 pid = -1; 1419 ctid = 0; 1420 } 1421 1422 if (sigismember(&coredefault, sig)) { 1423 /* 1424 * Terminate all LWPs but don't discard them. 1425 * If another lwp beat us to the punch by calling exit(), 1426 * evaporate now. 1427 */ 1428 proc_is_exiting(p); 1429 if (exitlwps(1) != 0) { 1430 mutex_enter(&p->p_lock); 1431 lwp_exit(); 1432 } 1433 /* if we got a SIGKILL from anywhere, no core dump */ 1434 if (p->p_flag & SKILLED) { 1435 sig = SIGKILL; 1436 ext = (p->p_flag & SEXTKILLED) != 0; 1437 } else { 1438 #ifdef C2_AUDIT 1439 if (audit_active) /* audit core dump */ 1440 audit_core_start(sig); 1441 #endif 1442 if (core(sig, ext) == 0) 1443 code = CLD_DUMPED; 1444 #ifdef C2_AUDIT 1445 if (audit_active) /* audit core dump */ 1446 audit_core_finish(code); 1447 #endif 1448 } 1449 } 1450 if (ext) 1451 contract_process_sig(p->p_ct_process, p, sig, pid, ctid, 1452 zoneid); 1453 1454 exit(code, sig); 1455 } 1456 1457 /* 1458 * Find next unheld signal in ssp for thread t. 1459 */ 1460 int 1461 fsig(k_sigset_t *ssp, kthread_t *t) 1462 { 1463 proc_t *p = ttoproc(t); 1464 user_t *up = PTOU(p); 1465 int i; 1466 k_sigset_t temp; 1467 1468 ASSERT(MUTEX_HELD(&p->p_lock)); 1469 1470 /* 1471 * Don't promote any signals for the parent of a vfork()d 1472 * child that hasn't yet released the parent's memory. 1473 */ 1474 if (p->p_flag & SVFWAIT) 1475 return (0); 1476 1477 temp = *ssp; 1478 sigdiffset(&temp, &t->t_hold); 1479 1480 /* 1481 * Don't promote stopping signals (except SIGSTOP) for a child 1482 * of vfork() that hasn't yet released the parent's memory. 1483 */ 1484 if (p->p_flag & SVFORK) 1485 sigdiffset(&temp, &holdvfork); 1486 1487 /* 1488 * Don't promote a signal that will stop 1489 * the process when lwp_nostop is set. 1490 */ 1491 if (ttolwp(t)->lwp_nostop) { 1492 sigdelset(&temp, SIGSTOP); 1493 if (!p->p_pgidp->pid_pgorphaned) { 1494 if (up->u_signal[SIGTSTP-1] == SIG_DFL) 1495 sigdelset(&temp, SIGTSTP); 1496 if (up->u_signal[SIGTTIN-1] == SIG_DFL) 1497 sigdelset(&temp, SIGTTIN); 1498 if (up->u_signal[SIGTTOU-1] == SIG_DFL) 1499 sigdelset(&temp, SIGTTOU); 1500 } 1501 } 1502 1503 /* 1504 * Choose SIGKILL and SIGPROF before all other pending signals. 1505 * The rest are promoted in signal number order. 1506 */ 1507 if (sigismember(&temp, SIGKILL)) 1508 return (SIGKILL); 1509 if (sigismember(&temp, SIGPROF)) 1510 return (SIGPROF); 1511 1512 for (i = 0; i < sizeof (temp) / sizeof (temp.__sigbits[0]); i++) { 1513 if (temp.__sigbits[i]) 1514 return ((i * NBBY * sizeof (temp.__sigbits[0])) + 1515 lowbit(temp.__sigbits[i])); 1516 } 1517 1518 return (0); 1519 } 1520 1521 void 1522 setsigact(int sig, void (*disp)(), k_sigset_t mask, int flags) 1523 { 1524 proc_t *p = ttoproc(curthread); 1525 kthread_t *t; 1526 1527 ASSERT(MUTEX_HELD(&p->p_lock)); 1528 1529 u.u_signal[sig - 1] = disp; 1530 1531 /* 1532 * Honor the SA_SIGINFO flag if the signal is being caught. 1533 * Force the SA_SIGINFO flag if the signal is not being caught. 1534 * This is necessary to make sigqueue() and sigwaitinfo() work 1535 * properly together when the signal is set to default or is 1536 * being temporarily ignored. 1537 */ 1538 if ((flags & SA_SIGINFO) || disp == SIG_DFL || disp == SIG_IGN) 1539 sigaddset(&p->p_siginfo, sig); 1540 else 1541 sigdelset(&p->p_siginfo, sig); 1542 1543 if (disp != SIG_DFL && disp != SIG_IGN) { 1544 sigdelset(&p->p_ignore, sig); 1545 u.u_sigmask[sig - 1] = mask; 1546 if (!sigismember(&cantreset, sig)) { 1547 if (flags & SA_RESETHAND) 1548 sigaddset(&u.u_sigresethand, sig); 1549 else 1550 sigdelset(&u.u_sigresethand, sig); 1551 } 1552 if (flags & SA_NODEFER) 1553 sigaddset(&u.u_signodefer, sig); 1554 else 1555 sigdelset(&u.u_signodefer, sig); 1556 if (flags & SA_RESTART) 1557 sigaddset(&u.u_sigrestart, sig); 1558 else 1559 sigdelset(&u.u_sigrestart, sig); 1560 if (flags & SA_ONSTACK) 1561 sigaddset(&u.u_sigonstack, sig); 1562 else 1563 sigdelset(&u.u_sigonstack, sig); 1564 1565 } else if (disp == SIG_IGN || 1566 (disp == SIG_DFL && sigismember(&ignoredefault, sig))) { 1567 /* 1568 * Setting the signal action to SIG_IGN results in the 1569 * discarding of all pending signals of that signal number. 1570 * Setting the signal action to SIG_DFL does the same *only* 1571 * if the signal's default behavior is to be ignored. 1572 */ 1573 sigaddset(&p->p_ignore, sig); 1574 sigdelset(&p->p_sig, sig); 1575 sigdelset(&p->p_extsig, sig); 1576 sigdelq(p, NULL, sig); 1577 t = p->p_tlist; 1578 do { 1579 sigdelset(&t->t_sig, sig); 1580 sigdelset(&t->t_extsig, sig); 1581 sigdelq(p, t, sig); 1582 } while ((t = t->t_forw) != p->p_tlist); 1583 1584 } else { 1585 /* 1586 * The signal action is being set to SIG_DFL and the default 1587 * behavior is to do something: make sure it is not ignored. 1588 */ 1589 sigdelset(&p->p_ignore, sig); 1590 } 1591 1592 if (sig == SIGCLD) { 1593 if (flags & SA_NOCLDWAIT) 1594 p->p_flag |= SNOWAIT; 1595 else 1596 p->p_flag &= ~SNOWAIT; 1597 1598 if (flags & SA_NOCLDSTOP) 1599 p->p_flag &= ~SJCTL; 1600 else 1601 p->p_flag |= SJCTL; 1602 1603 if ((p->p_flag & SNOWAIT) || disp == SIG_IGN) { 1604 proc_t *cp, *tp; 1605 1606 mutex_exit(&p->p_lock); 1607 mutex_enter(&pidlock); 1608 for (cp = p->p_child; cp != NULL; cp = tp) { 1609 tp = cp->p_sibling; 1610 if (cp->p_stat == SZOMB && 1611 !(cp->p_pidflag & CLDWAITPID)) 1612 freeproc(cp); 1613 } 1614 mutex_exit(&pidlock); 1615 mutex_enter(&p->p_lock); 1616 } 1617 } 1618 } 1619 1620 /* 1621 * Set all signal actions not already set to SIG_DFL or SIG_IGN to SIG_DFL. 1622 * Called from exec_common() for a process undergoing execve() 1623 * and from cfork() for a newly-created child of vfork(). 1624 * In the vfork() case, 'p' is not the current process. 1625 * In both cases, there is only one thread in the process. 1626 */ 1627 void 1628 sigdefault(proc_t *p) 1629 { 1630 kthread_t *t = p->p_tlist; 1631 struct user *up = PTOU(p); 1632 int sig; 1633 1634 ASSERT(MUTEX_HELD(&p->p_lock)); 1635 1636 for (sig = 1; sig < NSIG; sig++) { 1637 if (up->u_signal[sig - 1] != SIG_DFL && 1638 up->u_signal[sig - 1] != SIG_IGN) { 1639 up->u_signal[sig - 1] = SIG_DFL; 1640 sigemptyset(&up->u_sigmask[sig - 1]); 1641 if (sigismember(&ignoredefault, sig)) { 1642 sigdelq(p, NULL, sig); 1643 sigdelq(p, t, sig); 1644 } 1645 if (sig == SIGCLD) 1646 p->p_flag &= ~(SNOWAIT|SJCTL); 1647 } 1648 } 1649 sigorset(&p->p_ignore, &ignoredefault); 1650 sigfillset(&p->p_siginfo); 1651 sigdiffset(&p->p_siginfo, &cantmask); 1652 sigdiffset(&p->p_sig, &ignoredefault); 1653 sigdiffset(&p->p_extsig, &ignoredefault); 1654 sigdiffset(&t->t_sig, &ignoredefault); 1655 sigdiffset(&t->t_extsig, &ignoredefault); 1656 } 1657 1658 void 1659 sigcld(proc_t *cp, sigqueue_t *sqp) 1660 { 1661 proc_t *pp = cp->p_parent; 1662 1663 ASSERT(MUTEX_HELD(&pidlock)); 1664 1665 switch (cp->p_wcode) { 1666 case CLD_EXITED: 1667 case CLD_DUMPED: 1668 case CLD_KILLED: 1669 ASSERT(cp->p_stat == SZOMB); 1670 /* 1671 * The broadcast on p_srwchan_cv is a kludge to 1672 * wakeup a possible thread in uadmin(A_SHUTDOWN). 1673 */ 1674 cv_broadcast(&cp->p_srwchan_cv); 1675 1676 /* 1677 * Add to newstate list of the parent 1678 */ 1679 add_ns(pp, cp); 1680 1681 cv_broadcast(&pp->p_cv); 1682 if ((pp->p_flag & SNOWAIT) || 1683 PTOU(pp)->u_signal[SIGCLD - 1] == SIG_IGN) { 1684 if (!(cp->p_pidflag & CLDWAITPID)) 1685 freeproc(cp); 1686 } else if (!(cp->p_pidflag & CLDNOSIGCHLD)) { 1687 post_sigcld(cp, sqp); 1688 sqp = NULL; 1689 } 1690 break; 1691 1692 case CLD_STOPPED: 1693 case CLD_CONTINUED: 1694 cv_broadcast(&pp->p_cv); 1695 if (pp->p_flag & SJCTL) { 1696 post_sigcld(cp, sqp); 1697 sqp = NULL; 1698 } 1699 break; 1700 } 1701 1702 if (sqp) 1703 siginfofree(sqp); 1704 } 1705 1706 /* 1707 * Common code called from sigcld() and issig_forreal() 1708 * Give the parent process a SIGCLD if it does not have one pending, 1709 * else mark the child process so a SIGCLD can be posted later. 1710 */ 1711 static void 1712 post_sigcld(proc_t *cp, sigqueue_t *sqp) 1713 { 1714 proc_t *pp = cp->p_parent; 1715 void (*handler)() = PTOU(pp)->u_signal[SIGCLD - 1]; 1716 k_siginfo_t info; 1717 1718 ASSERT(MUTEX_HELD(&pidlock)); 1719 mutex_enter(&pp->p_lock); 1720 1721 /* 1722 * If a SIGCLD is pending, or if SIGCLD is not now being caught, 1723 * then just mark the child process so that its SIGCLD will 1724 * be posted later, when the first SIGCLD is taken off the 1725 * queue or when the parent is ready to receive it, if ever. 1726 */ 1727 if (handler == SIG_DFL || handler == SIG_IGN || 1728 sigismember(&pp->p_sig, SIGCLD)) 1729 cp->p_pidflag |= CLDPEND; 1730 else { 1731 cp->p_pidflag &= ~CLDPEND; 1732 if (sqp == NULL) { 1733 /* 1734 * This can only happen when the parent is init. 1735 * (See call to sigcld(q, NULL) in exit().) 1736 * Use KM_NOSLEEP to avoid deadlock. 1737 */ 1738 ASSERT(pp == proc_init); 1739 winfo(cp, &info, 0); 1740 sigaddq(pp, NULL, &info, KM_NOSLEEP); 1741 } else { 1742 winfo(cp, &sqp->sq_info, 0); 1743 sigaddqa(pp, NULL, sqp); 1744 sqp = NULL; 1745 } 1746 } 1747 1748 mutex_exit(&pp->p_lock); 1749 1750 if (sqp) 1751 siginfofree(sqp); 1752 } 1753 1754 /* 1755 * Search for a child that has a pending SIGCLD for us, the parent. 1756 * The queue of SIGCLD signals is implied by the list of children. 1757 * We post the SIGCLD signals one at a time so they don't get lost. 1758 * When one is dequeued, another is enqueued, until there are no more. 1759 */ 1760 void 1761 sigcld_repost() 1762 { 1763 proc_t *pp = curproc; 1764 proc_t *cp; 1765 void (*handler)() = PTOU(pp)->u_signal[SIGCLD - 1]; 1766 sigqueue_t *sqp; 1767 1768 /* 1769 * Don't bother if SIGCLD is not now being caught. 1770 */ 1771 if (handler == SIG_DFL || handler == SIG_IGN) 1772 return; 1773 1774 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 1775 mutex_enter(&pidlock); 1776 for (cp = pp->p_child; cp; cp = cp->p_sibling) { 1777 if (cp->p_pidflag & CLDPEND) { 1778 post_sigcld(cp, sqp); 1779 mutex_exit(&pidlock); 1780 return; 1781 } 1782 } 1783 mutex_exit(&pidlock); 1784 kmem_free(sqp, sizeof (sigqueue_t)); 1785 } 1786 1787 /* 1788 * count number of sigqueue send by sigaddqa() 1789 */ 1790 void 1791 sigqsend(int cmd, proc_t *p, kthread_t *t, sigqueue_t *sigqp) 1792 { 1793 sigqhdr_t *sqh; 1794 1795 sqh = (sigqhdr_t *)sigqp->sq_backptr; 1796 ASSERT(sqh); 1797 1798 mutex_enter(&sqh->sqb_lock); 1799 sqh->sqb_sent++; 1800 mutex_exit(&sqh->sqb_lock); 1801 1802 if (cmd == SN_SEND) 1803 sigaddqa(p, t, sigqp); 1804 else 1805 siginfofree(sigqp); 1806 } 1807 1808 int 1809 sigsendproc(proc_t *p, sigsend_t *pv) 1810 { 1811 struct cred *cr; 1812 proc_t *myprocp = curproc; 1813 1814 ASSERT(MUTEX_HELD(&pidlock)); 1815 1816 if (p->p_pid == 1 && pv->sig && sigismember(&cantmask, pv->sig)) 1817 return (EPERM); 1818 1819 cr = CRED(); 1820 1821 if (pv->checkperm == 0 || 1822 (pv->sig == SIGCONT && p->p_sessp == myprocp->p_sessp) || 1823 prochasprocperm(p, myprocp, cr)) { 1824 pv->perm++; 1825 if (pv->sig) { 1826 /* Make sure we should be setting si_pid and friends */ 1827 ASSERT(pv->sicode <= 0); 1828 if (SI_CANQUEUE(pv->sicode)) { 1829 sigqueue_t *sqp; 1830 1831 mutex_enter(&myprocp->p_lock); 1832 sqp = sigqalloc(myprocp->p_sigqhdr); 1833 mutex_exit(&myprocp->p_lock); 1834 if (sqp == NULL) 1835 return (EAGAIN); 1836 sqp->sq_info.si_signo = pv->sig; 1837 sqp->sq_info.si_code = pv->sicode; 1838 sqp->sq_info.si_pid = myprocp->p_pid; 1839 sqp->sq_info.si_ctid = PRCTID(myprocp); 1840 sqp->sq_info.si_zoneid = getzoneid(); 1841 sqp->sq_info.si_uid = crgetruid(cr); 1842 sqp->sq_info.si_value = pv->value; 1843 mutex_enter(&p->p_lock); 1844 sigqsend(SN_SEND, p, NULL, sqp); 1845 mutex_exit(&p->p_lock); 1846 } else { 1847 k_siginfo_t info; 1848 bzero(&info, sizeof (info)); 1849 info.si_signo = pv->sig; 1850 info.si_code = pv->sicode; 1851 info.si_pid = myprocp->p_pid; 1852 info.si_ctid = PRCTID(myprocp); 1853 info.si_zoneid = getzoneid(); 1854 info.si_uid = crgetruid(cr); 1855 mutex_enter(&p->p_lock); 1856 /* 1857 * XXX: Should be KM_SLEEP but 1858 * we have to avoid deadlock. 1859 */ 1860 sigaddq(p, NULL, &info, KM_NOSLEEP); 1861 mutex_exit(&p->p_lock); 1862 } 1863 } 1864 } 1865 1866 return (0); 1867 } 1868 1869 int 1870 sigsendset(procset_t *psp, sigsend_t *pv) 1871 { 1872 int error; 1873 1874 error = dotoprocs(psp, sigsendproc, (char *)pv); 1875 if (error == 0 && pv->perm == 0) 1876 return (EPERM); 1877 1878 return (error); 1879 } 1880 1881 /* 1882 * Dequeue a queued siginfo structure. 1883 * If a non-null thread pointer is passed then dequeue from 1884 * the thread queue, otherwise dequeue from the process queue. 1885 */ 1886 void 1887 sigdeq(proc_t *p, kthread_t *t, int sig, sigqueue_t **qpp) 1888 { 1889 sigqueue_t **psqp, *sqp; 1890 1891 ASSERT(MUTEX_HELD(&p->p_lock)); 1892 1893 *qpp = NULL; 1894 1895 if (t != NULL) { 1896 sigdelset(&t->t_sig, sig); 1897 sigdelset(&t->t_extsig, sig); 1898 psqp = &t->t_sigqueue; 1899 } else { 1900 sigdelset(&p->p_sig, sig); 1901 sigdelset(&p->p_extsig, sig); 1902 psqp = &p->p_sigqueue; 1903 } 1904 1905 for (;;) { 1906 if ((sqp = *psqp) == NULL) 1907 return; 1908 if (sqp->sq_info.si_signo == sig) 1909 break; 1910 else 1911 psqp = &sqp->sq_next; 1912 } 1913 *qpp = sqp; 1914 *psqp = sqp->sq_next; 1915 for (sqp = *psqp; sqp; sqp = sqp->sq_next) { 1916 if (sqp->sq_info.si_signo == sig) { 1917 if (t != (kthread_t *)NULL) { 1918 sigaddset(&t->t_sig, sig); 1919 t->t_sig_check = 1; 1920 } else { 1921 sigaddset(&p->p_sig, sig); 1922 set_proc_ast(p); 1923 } 1924 break; 1925 } 1926 } 1927 } 1928 1929 /* 1930 * Delete a queued SIGCLD siginfo structure matching the k_siginfo_t argument. 1931 */ 1932 void 1933 sigcld_delete(k_siginfo_t *ip) 1934 { 1935 proc_t *p = curproc; 1936 int another_sigcld = 0; 1937 sigqueue_t **psqp, *sqp; 1938 1939 ASSERT(ip->si_signo == SIGCLD); 1940 1941 mutex_enter(&p->p_lock); 1942 1943 if (!sigismember(&p->p_sig, SIGCLD)) { 1944 mutex_exit(&p->p_lock); 1945 return; 1946 } 1947 1948 psqp = &p->p_sigqueue; 1949 for (;;) { 1950 if ((sqp = *psqp) == NULL) { 1951 mutex_exit(&p->p_lock); 1952 return; 1953 } 1954 if (sqp->sq_info.si_signo == SIGCLD) { 1955 if (sqp->sq_info.si_pid == ip->si_pid && 1956 sqp->sq_info.si_code == ip->si_code && 1957 sqp->sq_info.si_status == ip->si_status) 1958 break; 1959 another_sigcld = 1; 1960 } 1961 psqp = &sqp->sq_next; 1962 } 1963 *psqp = sqp->sq_next; 1964 1965 siginfofree(sqp); 1966 1967 for (sqp = *psqp; !another_sigcld && sqp; sqp = sqp->sq_next) { 1968 if (sqp->sq_info.si_signo == SIGCLD) 1969 another_sigcld = 1; 1970 } 1971 1972 if (!another_sigcld) { 1973 sigdelset(&p->p_sig, SIGCLD); 1974 sigdelset(&p->p_extsig, SIGCLD); 1975 } 1976 1977 mutex_exit(&p->p_lock); 1978 } 1979 1980 /* 1981 * Delete queued siginfo structures. 1982 * If a non-null thread pointer is passed then delete from 1983 * the thread queue, otherwise delete from the process queue. 1984 */ 1985 void 1986 sigdelq(proc_t *p, kthread_t *t, int sig) 1987 { 1988 sigqueue_t **psqp, *sqp; 1989 1990 /* 1991 * We must be holding p->p_lock unless the process is 1992 * being reaped or has failed to get started on fork. 1993 */ 1994 ASSERT(MUTEX_HELD(&p->p_lock) || 1995 p->p_stat == SIDL || p->p_stat == SZOMB); 1996 1997 if (t != (kthread_t *)NULL) 1998 psqp = &t->t_sigqueue; 1999 else 2000 psqp = &p->p_sigqueue; 2001 2002 while (*psqp) { 2003 sqp = *psqp; 2004 if (sig == 0 || sqp->sq_info.si_signo == sig) { 2005 *psqp = sqp->sq_next; 2006 siginfofree(sqp); 2007 } else 2008 psqp = &sqp->sq_next; 2009 } 2010 } 2011 2012 /* 2013 * Insert a siginfo structure into a queue. 2014 * If a non-null thread pointer is passed then add to the thread queue, 2015 * otherwise add to the process queue. 2016 * 2017 * The function sigaddqins() is called with sigqueue already allocated. 2018 * It is called from sigaddqa() and sigaddq() below. 2019 * 2020 * The value of si_code implicitly indicates whether sigp is to be 2021 * explicitly queued, or to be queued to depth one. 2022 */ 2023 static void 2024 sigaddqins(proc_t *p, kthread_t *t, sigqueue_t *sigqp) 2025 { 2026 sigqueue_t **psqp; 2027 int sig = sigqp->sq_info.si_signo; 2028 2029 sigqp->sq_external = (curproc != &p0) && 2030 (curproc->p_ct_process != p->p_ct_process); 2031 2032 /* 2033 * issig_forreal() doesn't bother dequeueing signals if SKILLED 2034 * is set, and even if it did, we would want to avoid situation 2035 * (which would be unique to SIGKILL) where one thread dequeued 2036 * the sigqueue_t and another executed psig(). So we create a 2037 * separate stash for SIGKILL's sigqueue_t. Because a second 2038 * SIGKILL can set SEXTKILLED, we overwrite the existing entry 2039 * if (and only if) it was non-extracontractual. 2040 */ 2041 if (sig == SIGKILL) { 2042 if (p->p_killsqp == NULL || !p->p_killsqp->sq_external) { 2043 if (p->p_killsqp != NULL) 2044 siginfofree(p->p_killsqp); 2045 p->p_killsqp = sigqp; 2046 sigqp->sq_next = NULL; 2047 } else { 2048 siginfofree(sigqp); 2049 } 2050 return; 2051 } 2052 2053 ASSERT(sig >= 1 && sig < NSIG); 2054 if (t != NULL) /* directed to a thread */ 2055 psqp = &t->t_sigqueue; 2056 else /* directed to a process */ 2057 psqp = &p->p_sigqueue; 2058 if (SI_CANQUEUE(sigqp->sq_info.si_code) && 2059 sigismember(&p->p_siginfo, sig)) { 2060 for (; *psqp != NULL; psqp = &(*psqp)->sq_next) 2061 ; 2062 } else { 2063 for (; *psqp != NULL; psqp = &(*psqp)->sq_next) { 2064 if ((*psqp)->sq_info.si_signo == sig) { 2065 siginfofree(sigqp); 2066 return; 2067 } 2068 } 2069 } 2070 *psqp = sigqp; 2071 sigqp->sq_next = NULL; 2072 } 2073 2074 /* 2075 * The function sigaddqa() is called with sigqueue already allocated. 2076 * If signal is ignored, discard but guarantee KILL and generation semantics. 2077 * It is called from sigqueue() and other places. 2078 */ 2079 void 2080 sigaddqa(proc_t *p, kthread_t *t, sigqueue_t *sigqp) 2081 { 2082 int sig = sigqp->sq_info.si_signo; 2083 2084 ASSERT(MUTEX_HELD(&p->p_lock)); 2085 ASSERT(sig >= 1 && sig < NSIG); 2086 2087 if (sig_discardable(p, sig)) 2088 siginfofree(sigqp); 2089 else 2090 sigaddqins(p, t, sigqp); 2091 2092 sigtoproc(p, t, sig); 2093 } 2094 2095 /* 2096 * Allocate the sigqueue_t structure and call sigaddqins(). 2097 */ 2098 void 2099 sigaddq(proc_t *p, kthread_t *t, k_siginfo_t *infop, int km_flags) 2100 { 2101 sigqueue_t *sqp; 2102 int sig = infop->si_signo; 2103 2104 ASSERT(MUTEX_HELD(&p->p_lock)); 2105 ASSERT(sig >= 1 && sig < NSIG); 2106 2107 /* 2108 * If the signal will be discarded by sigtoproc() or 2109 * if the process isn't requesting siginfo and it isn't 2110 * blocking the signal (it *could* change it's mind while 2111 * the signal is pending) then don't bother creating one. 2112 */ 2113 if (!sig_discardable(p, sig) && 2114 (sigismember(&p->p_siginfo, sig) || 2115 (curproc->p_ct_process != p->p_ct_process) || 2116 (sig == SIGCLD && SI_FROMKERNEL(infop))) && 2117 ((sqp = kmem_alloc(sizeof (sigqueue_t), km_flags)) != NULL)) { 2118 bcopy(infop, &sqp->sq_info, sizeof (k_siginfo_t)); 2119 sqp->sq_func = NULL; 2120 sqp->sq_next = NULL; 2121 sigaddqins(p, t, sqp); 2122 } 2123 sigtoproc(p, t, sig); 2124 } 2125 2126 /* 2127 * Handle stop-on-fault processing for the debugger. Returns 0 2128 * if the fault is cleared during the stop, nonzero if it isn't. 2129 */ 2130 int 2131 stop_on_fault(uint_t fault, k_siginfo_t *sip) 2132 { 2133 proc_t *p = ttoproc(curthread); 2134 klwp_t *lwp = ttolwp(curthread); 2135 2136 ASSERT(prismember(&p->p_fltmask, fault)); 2137 2138 /* 2139 * Record current fault and siginfo structure so debugger can 2140 * find it. 2141 */ 2142 mutex_enter(&p->p_lock); 2143 lwp->lwp_curflt = (uchar_t)fault; 2144 lwp->lwp_siginfo = *sip; 2145 2146 stop(PR_FAULTED, fault); 2147 2148 fault = lwp->lwp_curflt; 2149 lwp->lwp_curflt = 0; 2150 mutex_exit(&p->p_lock); 2151 return (fault); 2152 } 2153 2154 void 2155 sigorset(k_sigset_t *s1, k_sigset_t *s2) 2156 { 2157 s1->__sigbits[0] |= s2->__sigbits[0]; 2158 s1->__sigbits[1] |= s2->__sigbits[1]; 2159 } 2160 2161 void 2162 sigandset(k_sigset_t *s1, k_sigset_t *s2) 2163 { 2164 s1->__sigbits[0] &= s2->__sigbits[0]; 2165 s1->__sigbits[1] &= s2->__sigbits[1]; 2166 } 2167 2168 void 2169 sigdiffset(k_sigset_t *s1, k_sigset_t *s2) 2170 { 2171 s1->__sigbits[0] &= ~(s2->__sigbits[0]); 2172 s1->__sigbits[1] &= ~(s2->__sigbits[1]); 2173 } 2174 2175 /* 2176 * Return non-zero if curthread->t_sig_check should be set to 1, that is, 2177 * if there are any signals the thread might take on return from the kernel. 2178 * If ksigset_t's were a single word, we would do: 2179 * return (((p->p_sig | t->t_sig) & ~t->t_hold) & fillset); 2180 */ 2181 int 2182 sigcheck(proc_t *p, kthread_t *t) 2183 { 2184 sc_shared_t *tdp = t->t_schedctl; 2185 2186 /* 2187 * If signals are blocked via the schedctl interface 2188 * then we only check for the unmaskable signals. 2189 */ 2190 if (tdp != NULL && tdp->sc_sigblock) 2191 return ((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) & 2192 CANTMASK0); 2193 2194 return (((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) & 2195 ~t->t_hold.__sigbits[0]) | 2196 (((p->p_sig.__sigbits[1] | t->t_sig.__sigbits[1]) & 2197 ~t->t_hold.__sigbits[1]) & FILLSET1)); 2198 } 2199 2200 /* ONC_PLUS EXTRACT START */ 2201 void 2202 sigintr(k_sigset_t *smask, int intable) 2203 { 2204 proc_t *p; 2205 int owned; 2206 k_sigset_t lmask; /* local copy of cantmask */ 2207 klwp_t *lwp = ttolwp(curthread); 2208 2209 /* 2210 * Mask out all signals except SIGHUP, SIGINT, SIGQUIT 2211 * and SIGTERM. (Preserving the existing masks). 2212 * This function supports the -intr nfs and ufs mount option. 2213 */ 2214 2215 /* 2216 * don't do kernel threads 2217 */ 2218 if (lwp == NULL) 2219 return; 2220 2221 /* 2222 * get access to signal mask 2223 */ 2224 p = ttoproc(curthread); 2225 owned = mutex_owned(&p->p_lock); /* this is filthy */ 2226 if (!owned) 2227 mutex_enter(&p->p_lock); 2228 2229 /* 2230 * remember the current mask 2231 */ 2232 schedctl_finish_sigblock(curthread); 2233 *smask = curthread->t_hold; 2234 2235 /* 2236 * mask out all signals 2237 */ 2238 sigfillset(&curthread->t_hold); 2239 2240 /* 2241 * Unmask the non-maskable signals (e.g., KILL), as long as 2242 * they aren't already masked (which could happen at exit). 2243 * The first sigdiffset sets lmask to (cantmask & ~curhold). The 2244 * second sets the current hold mask to (~0 & ~lmask), which reduces 2245 * to (~cantmask | curhold). 2246 */ 2247 lmask = cantmask; 2248 sigdiffset(&lmask, smask); 2249 sigdiffset(&curthread->t_hold, &lmask); 2250 2251 /* 2252 * Re-enable HUP, QUIT, and TERM iff they were originally enabled 2253 * Re-enable INT if it's originally enabled and the NFS mount option 2254 * nointr is not set. 2255 */ 2256 if (!sigismember(smask, SIGHUP)) 2257 sigdelset(&curthread->t_hold, SIGHUP); 2258 if (!sigismember(smask, SIGINT) && intable) 2259 sigdelset(&curthread->t_hold, SIGINT); 2260 if (!sigismember(smask, SIGQUIT)) 2261 sigdelset(&curthread->t_hold, SIGQUIT); 2262 if (!sigismember(smask, SIGTERM)) 2263 sigdelset(&curthread->t_hold, SIGTERM); 2264 2265 /* 2266 * release access to signal mask 2267 */ 2268 if (!owned) 2269 mutex_exit(&p->p_lock); 2270 2271 /* 2272 * Indicate that this lwp is not to be stopped. 2273 */ 2274 lwp->lwp_nostop++; 2275 2276 } 2277 /* ONC_PLUS EXTRACT END */ 2278 2279 void 2280 sigunintr(k_sigset_t *smask) 2281 { 2282 proc_t *p; 2283 int owned; 2284 klwp_t *lwp = ttolwp(curthread); 2285 2286 /* 2287 * Reset previous mask (See sigintr() above) 2288 */ 2289 if (lwp != NULL) { 2290 lwp->lwp_nostop--; /* restore lwp stoppability */ 2291 p = ttoproc(curthread); 2292 owned = mutex_owned(&p->p_lock); /* this is filthy */ 2293 if (!owned) 2294 mutex_enter(&p->p_lock); 2295 curthread->t_hold = *smask; 2296 /* so unmasked signals will be seen */ 2297 curthread->t_sig_check = 1; 2298 if (!owned) 2299 mutex_exit(&p->p_lock); 2300 } 2301 } 2302 2303 void 2304 sigreplace(k_sigset_t *newmask, k_sigset_t *oldmask) 2305 { 2306 proc_t *p; 2307 int owned; 2308 /* 2309 * Save current signal mask in oldmask, then 2310 * set it to newmask. 2311 */ 2312 if (ttolwp(curthread) != NULL) { 2313 p = ttoproc(curthread); 2314 owned = mutex_owned(&p->p_lock); /* this is filthy */ 2315 if (!owned) 2316 mutex_enter(&p->p_lock); 2317 schedctl_finish_sigblock(curthread); 2318 if (oldmask != NULL) 2319 *oldmask = curthread->t_hold; 2320 curthread->t_hold = *newmask; 2321 curthread->t_sig_check = 1; 2322 if (!owned) 2323 mutex_exit(&p->p_lock); 2324 } 2325 } 2326 2327 /* 2328 * Return true if the signal number is in range 2329 * and the signal code specifies signal queueing. 2330 */ 2331 int 2332 sigwillqueue(int sig, int code) 2333 { 2334 if (sig >= 0 && sig < NSIG) { 2335 switch (code) { 2336 case SI_QUEUE: 2337 case SI_TIMER: 2338 case SI_ASYNCIO: 2339 case SI_MESGQ: 2340 return (1); 2341 } 2342 } 2343 return (0); 2344 } 2345 2346 #ifndef UCHAR_MAX 2347 #define UCHAR_MAX 255 2348 #endif 2349 2350 /* 2351 * The entire pool (with maxcount entries) is pre-allocated at 2352 * the first sigqueue/signotify call. 2353 */ 2354 sigqhdr_t * 2355 sigqhdralloc(size_t size, uint_t maxcount) 2356 { 2357 size_t i; 2358 sigqueue_t *sq, *next; 2359 sigqhdr_t *sqh; 2360 2361 i = (maxcount * size) + sizeof (sigqhdr_t); 2362 ASSERT(maxcount <= UCHAR_MAX && i <= USHRT_MAX); 2363 sqh = kmem_alloc(i, KM_SLEEP); 2364 sqh->sqb_count = (uchar_t)maxcount; 2365 sqh->sqb_maxcount = (uchar_t)maxcount; 2366 sqh->sqb_size = (ushort_t)i; 2367 sqh->sqb_pexited = 0; 2368 sqh->sqb_sent = 0; 2369 sqh->sqb_free = sq = (sigqueue_t *)(sqh + 1); 2370 for (i = maxcount - 1; i != 0; i--) { 2371 next = (sigqueue_t *)((uintptr_t)sq + size); 2372 sq->sq_next = next; 2373 sq = next; 2374 } 2375 sq->sq_next = NULL; 2376 cv_init(&sqh->sqb_cv, NULL, CV_DEFAULT, NULL); 2377 mutex_init(&sqh->sqb_lock, NULL, MUTEX_DEFAULT, NULL); 2378 return (sqh); 2379 } 2380 2381 static void sigqrel(sigqueue_t *); 2382 2383 /* 2384 * allocate a sigqueue/signotify structure from the per process 2385 * pre-allocated pool. 2386 */ 2387 sigqueue_t * 2388 sigqalloc(sigqhdr_t *sqh) 2389 { 2390 sigqueue_t *sq = NULL; 2391 2392 ASSERT(MUTEX_HELD(&curproc->p_lock)); 2393 2394 if (sqh != NULL) { 2395 mutex_enter(&sqh->sqb_lock); 2396 if (sqh->sqb_count > 0) { 2397 sqh->sqb_count--; 2398 sq = sqh->sqb_free; 2399 sqh->sqb_free = sq->sq_next; 2400 mutex_exit(&sqh->sqb_lock); 2401 bzero(&sq->sq_info, sizeof (k_siginfo_t)); 2402 sq->sq_backptr = sqh; 2403 sq->sq_func = sigqrel; 2404 sq->sq_next = NULL; 2405 sq->sq_external = 0; 2406 } else { 2407 mutex_exit(&sqh->sqb_lock); 2408 } 2409 } 2410 return (sq); 2411 } 2412 2413 /* 2414 * Return a sigqueue structure back to the pre-allocated pool. 2415 */ 2416 static void 2417 sigqrel(sigqueue_t *sq) 2418 { 2419 sigqhdr_t *sqh; 2420 2421 /* make sure that p_lock of the affected process is held */ 2422 2423 sqh = (sigqhdr_t *)sq->sq_backptr; 2424 mutex_enter(&sqh->sqb_lock); 2425 if (sqh->sqb_pexited && sqh->sqb_sent == 1) { 2426 mutex_exit(&sqh->sqb_lock); 2427 cv_destroy(&sqh->sqb_cv); 2428 mutex_destroy(&sqh->sqb_lock); 2429 kmem_free(sqh, sqh->sqb_size); 2430 } else { 2431 sqh->sqb_count++; 2432 sqh->sqb_sent--; 2433 sq->sq_next = sqh->sqb_free; 2434 sq->sq_backptr = NULL; 2435 sqh->sqb_free = sq; 2436 cv_signal(&sqh->sqb_cv); 2437 mutex_exit(&sqh->sqb_lock); 2438 } 2439 } 2440 2441 /* 2442 * Free up the pre-allocated sigqueue headers of sigqueue pool 2443 * and signotify pool, if possible. 2444 * Called only by the owning process during exec() and exit(). 2445 */ 2446 void 2447 sigqfree(proc_t *p) 2448 { 2449 ASSERT(MUTEX_HELD(&p->p_lock)); 2450 2451 if (p->p_sigqhdr != NULL) { /* sigqueue pool */ 2452 sigqhdrfree(p->p_sigqhdr); 2453 p->p_sigqhdr = NULL; 2454 } 2455 if (p->p_signhdr != NULL) { /* signotify pool */ 2456 sigqhdrfree(p->p_signhdr); 2457 p->p_signhdr = NULL; 2458 } 2459 } 2460 2461 /* 2462 * Free up the pre-allocated header and sigq pool if possible. 2463 */ 2464 void 2465 sigqhdrfree(sigqhdr_t *sqh) 2466 { 2467 mutex_enter(&sqh->sqb_lock); 2468 if (sqh->sqb_sent == 0) { 2469 mutex_exit(&sqh->sqb_lock); 2470 cv_destroy(&sqh->sqb_cv); 2471 mutex_destroy(&sqh->sqb_lock); 2472 kmem_free(sqh, sqh->sqb_size); 2473 } else { 2474 sqh->sqb_pexited = 1; 2475 mutex_exit(&sqh->sqb_lock); 2476 } 2477 } 2478 2479 /* 2480 * Free up a single sigqueue structure. 2481 * No other code should free a sigqueue directly. 2482 */ 2483 void 2484 siginfofree(sigqueue_t *sqp) 2485 { 2486 if (sqp != NULL) { 2487 if (sqp->sq_func != NULL) 2488 (sqp->sq_func)(sqp); 2489 else 2490 kmem_free(sqp, sizeof (sigqueue_t)); 2491 } 2492 } 2493 2494 /* 2495 * Generate a synchronous signal caused by a hardware 2496 * condition encountered by an lwp. Called from trap(). 2497 */ 2498 void 2499 trapsig(k_siginfo_t *ip, int restartable) 2500 { 2501 proc_t *p = ttoproc(curthread); 2502 int sig = ip->si_signo; 2503 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 2504 2505 ASSERT(sig > 0 && sig < NSIG); 2506 2507 if (curthread->t_dtrace_on) 2508 dtrace_safe_synchronous_signal(); 2509 2510 mutex_enter(&p->p_lock); 2511 schedctl_finish_sigblock(curthread); 2512 /* 2513 * Avoid a possible infinite loop if the lwp is holding the 2514 * signal generated by a trap of a restartable instruction or 2515 * if the signal so generated is being ignored by the process. 2516 */ 2517 if (restartable && 2518 (sigismember(&curthread->t_hold, sig) || 2519 p->p_user.u_signal[sig-1] == SIG_IGN)) { 2520 sigdelset(&curthread->t_hold, sig); 2521 p->p_user.u_signal[sig-1] = SIG_DFL; 2522 sigdelset(&p->p_ignore, sig); 2523 } 2524 bcopy(ip, &sqp->sq_info, sizeof (k_siginfo_t)); 2525 sigaddqa(p, curthread, sqp); 2526 mutex_exit(&p->p_lock); 2527 } 2528 2529 #ifdef _SYSCALL32_IMPL 2530 2531 /* 2532 * It's tricky to transmit a sigval between 32-bit and 64-bit 2533 * process, since in the 64-bit world, a pointer and an integer 2534 * are different sizes. Since we're constrained by the standards 2535 * world not to change the types, and it's unclear how useful it is 2536 * to send pointers between address spaces this way, we preserve 2537 * the 'int' interpretation for 32-bit processes interoperating 2538 * with 64-bit processes. The full semantics (pointers or integers) 2539 * are available for N-bit processes interoperating with N-bit 2540 * processes. 2541 */ 2542 void 2543 siginfo_kto32(const k_siginfo_t *src, siginfo32_t *dest) 2544 { 2545 bzero(dest, sizeof (*dest)); 2546 2547 /* 2548 * The absolute minimum content is si_signo and si_code. 2549 */ 2550 dest->si_signo = src->si_signo; 2551 if ((dest->si_code = src->si_code) == SI_NOINFO) 2552 return; 2553 2554 /* 2555 * A siginfo generated by user level is structured 2556 * differently from one generated by the kernel. 2557 */ 2558 if (SI_FROMUSER(src)) { 2559 dest->si_pid = src->si_pid; 2560 dest->si_ctid = src->si_ctid; 2561 dest->si_zoneid = src->si_zoneid; 2562 dest->si_uid = src->si_uid; 2563 if (SI_CANQUEUE(src->si_code)) 2564 dest->si_value.sival_int = 2565 (int32_t)src->si_value.sival_int; 2566 return; 2567 } 2568 2569 dest->si_errno = src->si_errno; 2570 2571 switch (src->si_signo) { 2572 default: 2573 dest->si_pid = src->si_pid; 2574 dest->si_ctid = src->si_ctid; 2575 dest->si_zoneid = src->si_zoneid; 2576 dest->si_uid = src->si_uid; 2577 dest->si_value.sival_int = (int32_t)src->si_value.sival_int; 2578 break; 2579 case SIGCLD: 2580 dest->si_pid = src->si_pid; 2581 dest->si_ctid = src->si_ctid; 2582 dest->si_zoneid = src->si_zoneid; 2583 dest->si_status = src->si_status; 2584 dest->si_stime = src->si_stime; 2585 dest->si_utime = src->si_utime; 2586 break; 2587 case SIGSEGV: 2588 case SIGBUS: 2589 case SIGILL: 2590 case SIGTRAP: 2591 case SIGFPE: 2592 case SIGEMT: 2593 dest->si_addr = (caddr32_t)(uintptr_t)src->si_addr; 2594 dest->si_trapno = src->si_trapno; 2595 dest->si_pc = (caddr32_t)(uintptr_t)src->si_pc; 2596 break; 2597 case SIGPOLL: 2598 case SIGXFSZ: 2599 dest->si_fd = src->si_fd; 2600 dest->si_band = src->si_band; 2601 break; 2602 case SIGPROF: 2603 dest->si_faddr = (caddr32_t)(uintptr_t)src->si_faddr; 2604 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec; 2605 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec; 2606 dest->si_syscall = src->si_syscall; 2607 dest->si_nsysarg = src->si_nsysarg; 2608 dest->si_fault = src->si_fault; 2609 break; 2610 } 2611 } 2612 2613 void 2614 siginfo_32tok(const siginfo32_t *src, k_siginfo_t *dest) 2615 { 2616 bzero(dest, sizeof (*dest)); 2617 2618 /* 2619 * The absolute minimum content is si_signo and si_code. 2620 */ 2621 dest->si_signo = src->si_signo; 2622 if ((dest->si_code = src->si_code) == SI_NOINFO) 2623 return; 2624 2625 /* 2626 * A siginfo generated by user level is structured 2627 * differently from one generated by the kernel. 2628 */ 2629 if (SI_FROMUSER(src)) { 2630 dest->si_pid = src->si_pid; 2631 dest->si_ctid = src->si_ctid; 2632 dest->si_zoneid = src->si_zoneid; 2633 dest->si_uid = src->si_uid; 2634 if (SI_CANQUEUE(src->si_code)) 2635 dest->si_value.sival_int = 2636 (int)src->si_value.sival_int; 2637 return; 2638 } 2639 2640 dest->si_errno = src->si_errno; 2641 2642 switch (src->si_signo) { 2643 default: 2644 dest->si_pid = src->si_pid; 2645 dest->si_ctid = src->si_ctid; 2646 dest->si_zoneid = src->si_zoneid; 2647 dest->si_uid = src->si_uid; 2648 dest->si_value.sival_int = (int)src->si_value.sival_int; 2649 break; 2650 case SIGCLD: 2651 dest->si_pid = src->si_pid; 2652 dest->si_ctid = src->si_ctid; 2653 dest->si_zoneid = src->si_zoneid; 2654 dest->si_status = src->si_status; 2655 dest->si_stime = src->si_stime; 2656 dest->si_utime = src->si_utime; 2657 break; 2658 case SIGSEGV: 2659 case SIGBUS: 2660 case SIGILL: 2661 case SIGTRAP: 2662 case SIGFPE: 2663 case SIGEMT: 2664 dest->si_addr = (void *)(uintptr_t)src->si_addr; 2665 dest->si_trapno = src->si_trapno; 2666 dest->si_pc = (void *)(uintptr_t)src->si_pc; 2667 break; 2668 case SIGPOLL: 2669 case SIGXFSZ: 2670 dest->si_fd = src->si_fd; 2671 dest->si_band = src->si_band; 2672 break; 2673 case SIGPROF: 2674 dest->si_faddr = (void *)(uintptr_t)src->si_faddr; 2675 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec; 2676 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec; 2677 dest->si_syscall = src->si_syscall; 2678 dest->si_nsysarg = src->si_nsysarg; 2679 dest->si_fault = src->si_fault; 2680 break; 2681 } 2682 } 2683 2684 #endif /* _SYSCALL32_IMPL */ 2685