1 /*- 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_compat.h" 41 #include "opt_ktrace.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/signalvar.h> 46 #include <sys/vnode.h> 47 #include <sys/acct.h> 48 #include <sys/condvar.h> 49 #include <sys/event.h> 50 #include <sys/fcntl.h> 51 #include <sys/kernel.h> 52 #include <sys/kse.h> 53 #include <sys/ktr.h> 54 #include <sys/ktrace.h> 55 #include <sys/lock.h> 56 #include <sys/malloc.h> 57 #include <sys/mutex.h> 58 #include <sys/namei.h> 59 #include <sys/proc.h> 60 #include <sys/pioctl.h> 61 #include <sys/resourcevar.h> 62 #include <sys/sched.h> 63 #include <sys/sleepqueue.h> 64 #include <sys/smp.h> 65 #include <sys/stat.h> 66 #include <sys/sx.h> 67 #include <sys/syscallsubr.h> 68 #include <sys/sysctl.h> 69 #include <sys/sysent.h> 70 #include <sys/syslog.h> 71 #include <sys/sysproto.h> 72 #include <sys/timers.h> 73 #include <sys/unistd.h> 74 #include <sys/wait.h> 75 #include <vm/vm.h> 76 #include <vm/vm_extern.h> 77 #include <vm/uma.h> 78 79 #include <machine/cpu.h> 80 81 #if defined (__alpha__) && !defined(COMPAT_43) 82 #error "You *really* need COMPAT_43 on the alpha for longjmp(3)" 83 #endif 84 85 #define ONSIG 32 /* NSIG for osig* syscalls. XXX. */ 86 87 static int coredump(struct thread *); 88 static char *expand_name(const char *, uid_t, pid_t); 89 static int killpg1(struct thread *td, int sig, int pgid, int all); 90 static int issignal(struct thread *p); 91 static int sigprop(int sig); 92 static void tdsigwakeup(struct thread *td, int sig, sig_t action); 93 static int filt_sigattach(struct knote *kn); 94 static void filt_sigdetach(struct knote *kn); 95 static int filt_signal(struct knote *kn, long hint); 96 static struct thread *sigtd(struct proc *p, int sig, int prop); 97 static int kern_sigtimedwait(struct thread *, sigset_t, 98 ksiginfo_t *, struct timespec *); 99 static int do_tdsignal(struct thread *, int, ksiginfo_t *, sigtarget_t); 100 static void sigqueue_start(void); 101 static int psignal_common(struct proc *p, int sig, ksiginfo_t *ksi); 102 103 static uma_zone_t ksiginfo_zone = NULL; 104 struct filterops sig_filtops = 105 { 0, filt_sigattach, filt_sigdetach, filt_signal }; 106 107 static int kern_logsigexit = 1; 108 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW, 109 &kern_logsigexit, 0, 110 "Log processes quitting on abnormal signals to syslog(3)"); 111 112 SYSCTL_NODE(_kern, OID_AUTO, sigqueue, CTLFLAG_RW, 0, "POSIX real time signal"); 113 114 static int max_pending_per_proc = 128; 115 SYSCTL_INT(_kern_sigqueue, OID_AUTO, max_pending_per_proc, CTLFLAG_RW, 116 &max_pending_per_proc, 0, "Max pending signals per proc"); 117 118 static int queue_rt_signal_only = 1; 119 SYSCTL_INT(_kern_sigqueue, OID_AUTO, queue_rt_signal_only, CTLFLAG_RW, 120 &queue_rt_signal_only, 0, "Only rt signal is queued"); 121 122 static int preallocate_siginfo = 1024; 123 TUNABLE_INT("kern.sigqueue.preallocate", &preallocate_siginfo); 124 SYSCTL_INT(_kern_sigqueue, OID_AUTO, preallocate, CTLFLAG_RD, 125 &preallocate_siginfo, 0, "Preallocated signal memory size"); 126 127 static int signal_overflow = 0; 128 SYSCTL_INT(_kern_sigqueue, OID_AUTO, signal_overflow, CTLFLAG_RD, 129 &signal_overflow, 0, "Number of signals overflew"); 130 131 static int signal_alloc_fail = 0; 132 SYSCTL_INT(_kern_sigqueue, OID_AUTO, signal_alloc_fail, CTLFLAG_RD, 133 &signal_alloc_fail, 0, "signals failed to be allocated"); 134 135 SYSINIT(signal, SI_SUB_P1003_1B, SI_ORDER_FIRST+3, sigqueue_start, NULL); 136 137 /* 138 * Policy -- Can ucred cr1 send SIGIO to process cr2? 139 * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG 140 * in the right situations. 141 */ 142 #define CANSIGIO(cr1, cr2) \ 143 ((cr1)->cr_uid == 0 || \ 144 (cr1)->cr_ruid == (cr2)->cr_ruid || \ 145 (cr1)->cr_uid == (cr2)->cr_ruid || \ 146 (cr1)->cr_ruid == (cr2)->cr_uid || \ 147 (cr1)->cr_uid == (cr2)->cr_uid) 148 149 int sugid_coredump; 150 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RW, 151 &sugid_coredump, 0, "Enable coredumping set user/group ID processes"); 152 153 static int do_coredump = 1; 154 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW, 155 &do_coredump, 0, "Enable/Disable coredumps"); 156 157 static int set_core_nodump_flag = 0; 158 SYSCTL_INT(_kern, OID_AUTO, nodump_coredump, CTLFLAG_RW, &set_core_nodump_flag, 159 0, "Enable setting the NODUMP flag on coredump files"); 160 161 /* 162 * Signal properties and actions. 163 * The array below categorizes the signals and their default actions 164 * according to the following properties: 165 */ 166 #define SA_KILL 0x01 /* terminates process by default */ 167 #define SA_CORE 0x02 /* ditto and coredumps */ 168 #define SA_STOP 0x04 /* suspend process */ 169 #define SA_TTYSTOP 0x08 /* ditto, from tty */ 170 #define SA_IGNORE 0x10 /* ignore by default */ 171 #define SA_CONT 0x20 /* continue if suspended */ 172 #define SA_CANTMASK 0x40 /* non-maskable, catchable */ 173 #define SA_PROC 0x80 /* deliverable to any thread */ 174 175 static int sigproptbl[NSIG] = { 176 SA_KILL|SA_PROC, /* SIGHUP */ 177 SA_KILL|SA_PROC, /* SIGINT */ 178 SA_KILL|SA_CORE|SA_PROC, /* SIGQUIT */ 179 SA_KILL|SA_CORE, /* SIGILL */ 180 SA_KILL|SA_CORE, /* SIGTRAP */ 181 SA_KILL|SA_CORE, /* SIGABRT */ 182 SA_KILL|SA_CORE|SA_PROC, /* SIGEMT */ 183 SA_KILL|SA_CORE, /* SIGFPE */ 184 SA_KILL|SA_PROC, /* SIGKILL */ 185 SA_KILL|SA_CORE, /* SIGBUS */ 186 SA_KILL|SA_CORE, /* SIGSEGV */ 187 SA_KILL|SA_CORE, /* SIGSYS */ 188 SA_KILL|SA_PROC, /* SIGPIPE */ 189 SA_KILL|SA_PROC, /* SIGALRM */ 190 SA_KILL|SA_PROC, /* SIGTERM */ 191 SA_IGNORE|SA_PROC, /* SIGURG */ 192 SA_STOP|SA_PROC, /* SIGSTOP */ 193 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTSTP */ 194 SA_IGNORE|SA_CONT|SA_PROC, /* SIGCONT */ 195 SA_IGNORE|SA_PROC, /* SIGCHLD */ 196 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTTIN */ 197 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTTOU */ 198 SA_IGNORE|SA_PROC, /* SIGIO */ 199 SA_KILL, /* SIGXCPU */ 200 SA_KILL, /* SIGXFSZ */ 201 SA_KILL|SA_PROC, /* SIGVTALRM */ 202 SA_KILL|SA_PROC, /* SIGPROF */ 203 SA_IGNORE|SA_PROC, /* SIGWINCH */ 204 SA_IGNORE|SA_PROC, /* SIGINFO */ 205 SA_KILL|SA_PROC, /* SIGUSR1 */ 206 SA_KILL|SA_PROC, /* SIGUSR2 */ 207 }; 208 209 static void 210 sigqueue_start(void) 211 { 212 ksiginfo_zone = uma_zcreate("ksiginfo", sizeof(ksiginfo_t), 213 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 214 uma_prealloc(ksiginfo_zone, preallocate_siginfo); 215 } 216 217 ksiginfo_t * 218 ksiginfo_alloc(void) 219 { 220 if (ksiginfo_zone != NULL) 221 return ((ksiginfo_t *)uma_zalloc(ksiginfo_zone, M_NOWAIT | M_ZERO)); 222 return (NULL); 223 } 224 225 void 226 ksiginfo_free(ksiginfo_t *ksi) 227 { 228 uma_zfree(ksiginfo_zone, ksi); 229 } 230 231 static __inline int 232 ksiginfo_tryfree(ksiginfo_t *ksi) 233 { 234 if (!(ksi->ksi_flags & KSI_EXT)) { 235 uma_zfree(ksiginfo_zone, ksi); 236 return (1); 237 } 238 return (0); 239 } 240 241 void 242 sigqueue_init(sigqueue_t *list, struct proc *p) 243 { 244 SIGEMPTYSET(list->sq_signals); 245 TAILQ_INIT(&list->sq_list); 246 list->sq_proc = p; 247 list->sq_flags = SQ_INIT; 248 } 249 250 /* 251 * Get a signal's ksiginfo. 252 * Return: 253 * 0 - signal not found 254 * others - signal number 255 */ 256 int 257 sigqueue_get(sigqueue_t *sq, int signo, ksiginfo_t *si) 258 { 259 struct proc *p = sq->sq_proc; 260 struct ksiginfo *ksi, *next; 261 int count = 0; 262 263 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited")); 264 265 if (!SIGISMEMBER(sq->sq_signals, signo)) 266 return (0); 267 268 for (ksi = TAILQ_FIRST(&sq->sq_list); ksi != NULL; ksi = next) { 269 next = TAILQ_NEXT(ksi, ksi_link); 270 if (ksi->ksi_signo == signo) { 271 if (count == 0) { 272 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link); 273 ksi->ksi_sigq = NULL; 274 ksiginfo_copy(ksi, si); 275 if (ksiginfo_tryfree(ksi) && p != NULL) 276 p->p_pendingcnt--; 277 } 278 count++; 279 } 280 } 281 282 if (count <= 1) 283 SIGDELSET(sq->sq_signals, signo); 284 si->ksi_signo = signo; 285 return (signo); 286 } 287 288 void 289 sigqueue_take(ksiginfo_t *ksi) 290 { 291 struct ksiginfo *kp; 292 struct proc *p; 293 sigqueue_t *sq; 294 295 if ((sq = ksi->ksi_sigq) == NULL) 296 return; 297 298 p = sq->sq_proc; 299 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link); 300 ksi->ksi_sigq = NULL; 301 if (!(ksi->ksi_flags & KSI_EXT) && p != NULL) 302 p->p_pendingcnt--; 303 304 for (kp = TAILQ_FIRST(&sq->sq_list); kp != NULL; 305 kp = TAILQ_NEXT(kp, ksi_link)) { 306 if (kp->ksi_signo == ksi->ksi_signo) 307 break; 308 } 309 if (kp == NULL) 310 SIGDELSET(sq->sq_signals, ksi->ksi_signo); 311 } 312 313 int 314 sigqueue_add(sigqueue_t *sq, int signo, ksiginfo_t *si) 315 { 316 struct proc *p = sq->sq_proc; 317 struct ksiginfo *ksi; 318 int ret = 0; 319 320 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited")); 321 322 if (signo == SIGKILL || signo == SIGSTOP || si == NULL) 323 goto out_set_bit; 324 325 /* directly insert the ksi, don't copy it */ 326 if (si->ksi_flags & KSI_INS) { 327 TAILQ_INSERT_TAIL(&sq->sq_list, si, ksi_link); 328 si->ksi_sigq = sq; 329 goto out_set_bit; 330 } 331 332 if (__predict_false(ksiginfo_zone == NULL)) 333 goto out_set_bit; 334 335 if (p != NULL && p->p_pendingcnt > max_pending_per_proc) { 336 signal_overflow++; 337 ret = EAGAIN; 338 } else if ((ksi = ksiginfo_alloc()) == NULL) { 339 signal_alloc_fail++; 340 ret = EAGAIN; 341 } else { 342 if (p != NULL) 343 p->p_pendingcnt++; 344 ksiginfo_copy(si, ksi); 345 ksi->ksi_signo = signo; 346 TAILQ_INSERT_TAIL(&sq->sq_list, ksi, ksi_link); 347 ksi->ksi_sigq = sq; 348 } 349 350 if ((si->ksi_flags & KSI_TRAP) != 0) { 351 ret = 0; 352 goto out_set_bit; 353 } 354 355 if (ret != 0) 356 return (ret); 357 358 out_set_bit: 359 SIGADDSET(sq->sq_signals, signo); 360 return (ret); 361 } 362 363 void 364 sigqueue_flush(sigqueue_t *sq) 365 { 366 struct proc *p = sq->sq_proc; 367 ksiginfo_t *ksi; 368 369 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited")); 370 371 if (p != NULL) 372 PROC_LOCK_ASSERT(p, MA_OWNED); 373 374 while ((ksi = TAILQ_FIRST(&sq->sq_list)) != NULL) { 375 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link); 376 ksi->ksi_sigq = NULL; 377 if (ksiginfo_tryfree(ksi) && p != NULL) 378 p->p_pendingcnt--; 379 } 380 381 SIGEMPTYSET(sq->sq_signals); 382 } 383 384 void 385 sigqueue_collect_set(sigqueue_t *sq, sigset_t *set) 386 { 387 ksiginfo_t *ksi; 388 389 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited")); 390 391 TAILQ_FOREACH(ksi, &sq->sq_list, ksi_link) 392 SIGADDSET(*set, ksi->ksi_signo); 393 } 394 395 void 396 sigqueue_move_set(sigqueue_t *src, sigqueue_t *dst, sigset_t *setp) 397 { 398 sigset_t tmp, set; 399 struct proc *p1, *p2; 400 ksiginfo_t *ksi, *next; 401 402 KASSERT(src->sq_flags & SQ_INIT, ("src sigqueue not inited")); 403 KASSERT(dst->sq_flags & SQ_INIT, ("dst sigqueue not inited")); 404 /* 405 * make a copy, this allows setp to point to src or dst 406 * sq_signals without trouble. 407 */ 408 set = *setp; 409 p1 = src->sq_proc; 410 p2 = dst->sq_proc; 411 /* Move siginfo to target list */ 412 for (ksi = TAILQ_FIRST(&src->sq_list); ksi != NULL; ksi = next) { 413 next = TAILQ_NEXT(ksi, ksi_link); 414 if (SIGISMEMBER(set, ksi->ksi_signo)) { 415 TAILQ_REMOVE(&src->sq_list, ksi, ksi_link); 416 if (p1 != NULL) 417 p1->p_pendingcnt--; 418 TAILQ_INSERT_TAIL(&dst->sq_list, ksi, ksi_link); 419 ksi->ksi_sigq = dst; 420 if (p2 != NULL) 421 p2->p_pendingcnt++; 422 } 423 } 424 425 /* Move pending bits to target list */ 426 tmp = src->sq_signals; 427 SIGSETAND(tmp, set); 428 SIGSETOR(dst->sq_signals, tmp); 429 SIGSETNAND(src->sq_signals, tmp); 430 431 /* Finally, rescan src queue and set pending bits for it */ 432 sigqueue_collect_set(src, &src->sq_signals); 433 } 434 435 void 436 sigqueue_move(sigqueue_t *src, sigqueue_t *dst, int signo) 437 { 438 sigset_t set; 439 440 SIGEMPTYSET(set); 441 SIGADDSET(set, signo); 442 sigqueue_move_set(src, dst, &set); 443 } 444 445 void 446 sigqueue_delete_set(sigqueue_t *sq, sigset_t *set) 447 { 448 struct proc *p = sq->sq_proc; 449 ksiginfo_t *ksi, *next; 450 451 KASSERT(sq->sq_flags & SQ_INIT, ("src sigqueue not inited")); 452 453 /* Remove siginfo queue */ 454 for (ksi = TAILQ_FIRST(&sq->sq_list); ksi != NULL; ksi = next) { 455 next = TAILQ_NEXT(ksi, ksi_link); 456 if (SIGISMEMBER(*set, ksi->ksi_signo)) { 457 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link); 458 ksi->ksi_sigq = NULL; 459 if (ksiginfo_tryfree(ksi) && p != NULL) 460 p->p_pendingcnt--; 461 } 462 } 463 SIGSETNAND(sq->sq_signals, *set); 464 /* Finally, rescan queue and set pending bits for it */ 465 sigqueue_collect_set(sq, &sq->sq_signals); 466 } 467 468 void 469 sigqueue_delete(sigqueue_t *sq, int signo) 470 { 471 sigset_t set; 472 473 SIGEMPTYSET(set); 474 SIGADDSET(set, signo); 475 sigqueue_delete_set(sq, &set); 476 } 477 478 /* Remove a set of signals for a process */ 479 void 480 sigqueue_delete_set_proc(struct proc *p, sigset_t *set) 481 { 482 sigqueue_t worklist; 483 struct thread *td0; 484 485 PROC_LOCK_ASSERT(p, MA_OWNED); 486 487 sigqueue_init(&worklist, NULL); 488 sigqueue_move_set(&p->p_sigqueue, &worklist, set); 489 490 mtx_lock_spin(&sched_lock); 491 FOREACH_THREAD_IN_PROC(p, td0) 492 sigqueue_move_set(&td0->td_sigqueue, &worklist, set); 493 mtx_unlock_spin(&sched_lock); 494 495 sigqueue_flush(&worklist); 496 } 497 498 void 499 sigqueue_delete_proc(struct proc *p, int signo) 500 { 501 sigset_t set; 502 503 SIGEMPTYSET(set); 504 SIGADDSET(set, signo); 505 sigqueue_delete_set_proc(p, &set); 506 } 507 508 void 509 sigqueue_delete_stopmask_proc(struct proc *p) 510 { 511 sigset_t set; 512 513 SIGEMPTYSET(set); 514 SIGADDSET(set, SIGSTOP); 515 SIGADDSET(set, SIGTSTP); 516 SIGADDSET(set, SIGTTIN); 517 SIGADDSET(set, SIGTTOU); 518 sigqueue_delete_set_proc(p, &set); 519 } 520 521 /* 522 * Determine signal that should be delivered to process p, the current 523 * process, 0 if none. If there is a pending stop signal with default 524 * action, the process stops in issignal(). 525 * 526 * MP SAFE. 527 */ 528 int 529 cursig(struct thread *td) 530 { 531 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 532 mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED); 533 mtx_assert(&sched_lock, MA_NOTOWNED); 534 return (SIGPENDING(td) ? issignal(td) : 0); 535 } 536 537 /* 538 * Arrange for ast() to handle unmasked pending signals on return to user 539 * mode. This must be called whenever a signal is added to td_sigqueue or 540 * unmasked in td_sigmask. 541 */ 542 void 543 signotify(struct thread *td) 544 { 545 struct proc *p; 546 sigset_t set, saved; 547 548 p = td->td_proc; 549 550 PROC_LOCK_ASSERT(p, MA_OWNED); 551 552 /* 553 * If our mask changed we may have to move signal that were 554 * previously masked by all threads to our sigqueue. 555 */ 556 set = p->p_sigqueue.sq_signals; 557 if (p->p_flag & P_SA) 558 saved = p->p_sigqueue.sq_signals; 559 SIGSETNAND(set, td->td_sigmask); 560 if (! SIGISEMPTY(set)) 561 sigqueue_move_set(&p->p_sigqueue, &td->td_sigqueue, &set); 562 if (SIGPENDING(td)) { 563 mtx_lock_spin(&sched_lock); 564 td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING; 565 mtx_unlock_spin(&sched_lock); 566 } 567 if ((p->p_flag & P_SA) && !(p->p_flag & P_SIGEVENT)) { 568 if (!SIGSETEQ(saved, p->p_sigqueue.sq_signals)) { 569 /* pending set changed */ 570 p->p_flag |= P_SIGEVENT; 571 wakeup(&p->p_siglist); 572 } 573 } 574 } 575 576 int 577 sigonstack(size_t sp) 578 { 579 struct thread *td = curthread; 580 581 return ((td->td_pflags & TDP_ALTSTACK) ? 582 #if defined(COMPAT_43) 583 ((td->td_sigstk.ss_size == 0) ? 584 (td->td_sigstk.ss_flags & SS_ONSTACK) : 585 ((sp - (size_t)td->td_sigstk.ss_sp) < td->td_sigstk.ss_size)) 586 #else 587 ((sp - (size_t)td->td_sigstk.ss_sp) < td->td_sigstk.ss_size) 588 #endif 589 : 0); 590 } 591 592 static __inline int 593 sigprop(int sig) 594 { 595 596 if (sig > 0 && sig < NSIG) 597 return (sigproptbl[_SIG_IDX(sig)]); 598 return (0); 599 } 600 601 int 602 sig_ffs(sigset_t *set) 603 { 604 int i; 605 606 for (i = 0; i < _SIG_WORDS; i++) 607 if (set->__bits[i]) 608 return (ffs(set->__bits[i]) + (i * 32)); 609 return (0); 610 } 611 612 /* 613 * kern_sigaction 614 * sigaction 615 * freebsd4_sigaction 616 * osigaction 617 * 618 * MPSAFE 619 */ 620 int 621 kern_sigaction(td, sig, act, oact, flags) 622 struct thread *td; 623 register int sig; 624 struct sigaction *act, *oact; 625 int flags; 626 { 627 struct sigacts *ps; 628 struct proc *p = td->td_proc; 629 630 if (!_SIG_VALID(sig)) 631 return (EINVAL); 632 633 PROC_LOCK(p); 634 ps = p->p_sigacts; 635 mtx_lock(&ps->ps_mtx); 636 if (oact) { 637 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)]; 638 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)]; 639 oact->sa_flags = 0; 640 if (SIGISMEMBER(ps->ps_sigonstack, sig)) 641 oact->sa_flags |= SA_ONSTACK; 642 if (!SIGISMEMBER(ps->ps_sigintr, sig)) 643 oact->sa_flags |= SA_RESTART; 644 if (SIGISMEMBER(ps->ps_sigreset, sig)) 645 oact->sa_flags |= SA_RESETHAND; 646 if (SIGISMEMBER(ps->ps_signodefer, sig)) 647 oact->sa_flags |= SA_NODEFER; 648 if (SIGISMEMBER(ps->ps_siginfo, sig)) 649 oact->sa_flags |= SA_SIGINFO; 650 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP) 651 oact->sa_flags |= SA_NOCLDSTOP; 652 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT) 653 oact->sa_flags |= SA_NOCLDWAIT; 654 } 655 if (act) { 656 if ((sig == SIGKILL || sig == SIGSTOP) && 657 act->sa_handler != SIG_DFL) { 658 mtx_unlock(&ps->ps_mtx); 659 PROC_UNLOCK(p); 660 return (EINVAL); 661 } 662 663 /* 664 * Change setting atomically. 665 */ 666 667 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask; 668 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]); 669 if (act->sa_flags & SA_SIGINFO) { 670 ps->ps_sigact[_SIG_IDX(sig)] = 671 (__sighandler_t *)act->sa_sigaction; 672 SIGADDSET(ps->ps_siginfo, sig); 673 } else { 674 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler; 675 SIGDELSET(ps->ps_siginfo, sig); 676 } 677 if (!(act->sa_flags & SA_RESTART)) 678 SIGADDSET(ps->ps_sigintr, sig); 679 else 680 SIGDELSET(ps->ps_sigintr, sig); 681 if (act->sa_flags & SA_ONSTACK) 682 SIGADDSET(ps->ps_sigonstack, sig); 683 else 684 SIGDELSET(ps->ps_sigonstack, sig); 685 if (act->sa_flags & SA_RESETHAND) 686 SIGADDSET(ps->ps_sigreset, sig); 687 else 688 SIGDELSET(ps->ps_sigreset, sig); 689 if (act->sa_flags & SA_NODEFER) 690 SIGADDSET(ps->ps_signodefer, sig); 691 else 692 SIGDELSET(ps->ps_signodefer, sig); 693 if (sig == SIGCHLD) { 694 if (act->sa_flags & SA_NOCLDSTOP) 695 ps->ps_flag |= PS_NOCLDSTOP; 696 else 697 ps->ps_flag &= ~PS_NOCLDSTOP; 698 if (act->sa_flags & SA_NOCLDWAIT) { 699 /* 700 * Paranoia: since SA_NOCLDWAIT is implemented 701 * by reparenting the dying child to PID 1 (and 702 * trust it to reap the zombie), PID 1 itself 703 * is forbidden to set SA_NOCLDWAIT. 704 */ 705 if (p->p_pid == 1) 706 ps->ps_flag &= ~PS_NOCLDWAIT; 707 else 708 ps->ps_flag |= PS_NOCLDWAIT; 709 } else 710 ps->ps_flag &= ~PS_NOCLDWAIT; 711 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN) 712 ps->ps_flag |= PS_CLDSIGIGN; 713 else 714 ps->ps_flag &= ~PS_CLDSIGIGN; 715 } 716 /* 717 * Set bit in ps_sigignore for signals that are set to SIG_IGN, 718 * and for signals set to SIG_DFL where the default is to 719 * ignore. However, don't put SIGCONT in ps_sigignore, as we 720 * have to restart the process. 721 */ 722 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || 723 (sigprop(sig) & SA_IGNORE && 724 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) { 725 if ((p->p_flag & P_SA) && 726 SIGISMEMBER(p->p_sigqueue.sq_signals, sig)) { 727 p->p_flag |= P_SIGEVENT; 728 wakeup(&p->p_siglist); 729 } 730 /* never to be seen again */ 731 sigqueue_delete_proc(p, sig); 732 if (sig != SIGCONT) 733 /* easier in psignal */ 734 SIGADDSET(ps->ps_sigignore, sig); 735 SIGDELSET(ps->ps_sigcatch, sig); 736 } else { 737 SIGDELSET(ps->ps_sigignore, sig); 738 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL) 739 SIGDELSET(ps->ps_sigcatch, sig); 740 else 741 SIGADDSET(ps->ps_sigcatch, sig); 742 } 743 #ifdef COMPAT_FREEBSD4 744 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || 745 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL || 746 (flags & KSA_FREEBSD4) == 0) 747 SIGDELSET(ps->ps_freebsd4, sig); 748 else 749 SIGADDSET(ps->ps_freebsd4, sig); 750 #endif 751 #ifdef COMPAT_43 752 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || 753 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL || 754 (flags & KSA_OSIGSET) == 0) 755 SIGDELSET(ps->ps_osigset, sig); 756 else 757 SIGADDSET(ps->ps_osigset, sig); 758 #endif 759 } 760 mtx_unlock(&ps->ps_mtx); 761 PROC_UNLOCK(p); 762 return (0); 763 } 764 765 #ifndef _SYS_SYSPROTO_H_ 766 struct sigaction_args { 767 int sig; 768 struct sigaction *act; 769 struct sigaction *oact; 770 }; 771 #endif 772 /* 773 * MPSAFE 774 */ 775 int 776 sigaction(td, uap) 777 struct thread *td; 778 register struct sigaction_args *uap; 779 { 780 struct sigaction act, oact; 781 register struct sigaction *actp, *oactp; 782 int error; 783 784 actp = (uap->act != NULL) ? &act : NULL; 785 oactp = (uap->oact != NULL) ? &oact : NULL; 786 if (actp) { 787 error = copyin(uap->act, actp, sizeof(act)); 788 if (error) 789 return (error); 790 } 791 error = kern_sigaction(td, uap->sig, actp, oactp, 0); 792 if (oactp && !error) 793 error = copyout(oactp, uap->oact, sizeof(oact)); 794 return (error); 795 } 796 797 #ifdef COMPAT_FREEBSD4 798 #ifndef _SYS_SYSPROTO_H_ 799 struct freebsd4_sigaction_args { 800 int sig; 801 struct sigaction *act; 802 struct sigaction *oact; 803 }; 804 #endif 805 /* 806 * MPSAFE 807 */ 808 int 809 freebsd4_sigaction(td, uap) 810 struct thread *td; 811 register struct freebsd4_sigaction_args *uap; 812 { 813 struct sigaction act, oact; 814 register struct sigaction *actp, *oactp; 815 int error; 816 817 818 actp = (uap->act != NULL) ? &act : NULL; 819 oactp = (uap->oact != NULL) ? &oact : NULL; 820 if (actp) { 821 error = copyin(uap->act, actp, sizeof(act)); 822 if (error) 823 return (error); 824 } 825 error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4); 826 if (oactp && !error) 827 error = copyout(oactp, uap->oact, sizeof(oact)); 828 return (error); 829 } 830 #endif /* COMAPT_FREEBSD4 */ 831 832 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 833 #ifndef _SYS_SYSPROTO_H_ 834 struct osigaction_args { 835 int signum; 836 struct osigaction *nsa; 837 struct osigaction *osa; 838 }; 839 #endif 840 /* 841 * MPSAFE 842 */ 843 int 844 osigaction(td, uap) 845 struct thread *td; 846 register struct osigaction_args *uap; 847 { 848 struct osigaction sa; 849 struct sigaction nsa, osa; 850 register struct sigaction *nsap, *osap; 851 int error; 852 853 if (uap->signum <= 0 || uap->signum >= ONSIG) 854 return (EINVAL); 855 856 nsap = (uap->nsa != NULL) ? &nsa : NULL; 857 osap = (uap->osa != NULL) ? &osa : NULL; 858 859 if (nsap) { 860 error = copyin(uap->nsa, &sa, sizeof(sa)); 861 if (error) 862 return (error); 863 nsap->sa_handler = sa.sa_handler; 864 nsap->sa_flags = sa.sa_flags; 865 OSIG2SIG(sa.sa_mask, nsap->sa_mask); 866 } 867 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET); 868 if (osap && !error) { 869 sa.sa_handler = osap->sa_handler; 870 sa.sa_flags = osap->sa_flags; 871 SIG2OSIG(osap->sa_mask, sa.sa_mask); 872 error = copyout(&sa, uap->osa, sizeof(sa)); 873 } 874 return (error); 875 } 876 877 #if !defined(__i386__) && !defined(__alpha__) 878 /* Avoid replicating the same stub everywhere */ 879 int 880 osigreturn(td, uap) 881 struct thread *td; 882 struct osigreturn_args *uap; 883 { 884 885 return (nosys(td, (struct nosys_args *)uap)); 886 } 887 #endif 888 #endif /* COMPAT_43 */ 889 890 /* 891 * Initialize signal state for process 0; 892 * set to ignore signals that are ignored by default. 893 */ 894 void 895 siginit(p) 896 struct proc *p; 897 { 898 register int i; 899 struct sigacts *ps; 900 901 PROC_LOCK(p); 902 ps = p->p_sigacts; 903 mtx_lock(&ps->ps_mtx); 904 for (i = 1; i <= NSIG; i++) 905 if (sigprop(i) & SA_IGNORE && i != SIGCONT) 906 SIGADDSET(ps->ps_sigignore, i); 907 mtx_unlock(&ps->ps_mtx); 908 PROC_UNLOCK(p); 909 } 910 911 /* 912 * Reset signals for an exec of the specified process. 913 */ 914 void 915 execsigs(struct proc *p) 916 { 917 struct sigacts *ps; 918 int sig; 919 struct thread *td; 920 921 /* 922 * Reset caught signals. Held signals remain held 923 * through td_sigmask (unless they were caught, 924 * and are now ignored by default). 925 */ 926 PROC_LOCK_ASSERT(p, MA_OWNED); 927 td = FIRST_THREAD_IN_PROC(p); 928 ps = p->p_sigacts; 929 mtx_lock(&ps->ps_mtx); 930 while (SIGNOTEMPTY(ps->ps_sigcatch)) { 931 sig = sig_ffs(&ps->ps_sigcatch); 932 SIGDELSET(ps->ps_sigcatch, sig); 933 if (sigprop(sig) & SA_IGNORE) { 934 if (sig != SIGCONT) 935 SIGADDSET(ps->ps_sigignore, sig); 936 sigqueue_delete_proc(p, sig); 937 } 938 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 939 } 940 /* 941 * Reset stack state to the user stack. 942 * Clear set of signals caught on the signal stack. 943 */ 944 td->td_sigstk.ss_flags = SS_DISABLE; 945 td->td_sigstk.ss_size = 0; 946 td->td_sigstk.ss_sp = 0; 947 td->td_pflags &= ~TDP_ALTSTACK; 948 /* 949 * Reset no zombies if child dies flag as Solaris does. 950 */ 951 ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN); 952 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN) 953 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL; 954 mtx_unlock(&ps->ps_mtx); 955 } 956 957 /* 958 * kern_sigprocmask() 959 * 960 * Manipulate signal mask. 961 */ 962 int 963 kern_sigprocmask(td, how, set, oset, old) 964 struct thread *td; 965 int how; 966 sigset_t *set, *oset; 967 int old; 968 { 969 int error; 970 971 PROC_LOCK(td->td_proc); 972 if (oset != NULL) 973 *oset = td->td_sigmask; 974 975 error = 0; 976 if (set != NULL) { 977 switch (how) { 978 case SIG_BLOCK: 979 SIG_CANTMASK(*set); 980 SIGSETOR(td->td_sigmask, *set); 981 break; 982 case SIG_UNBLOCK: 983 SIGSETNAND(td->td_sigmask, *set); 984 signotify(td); 985 break; 986 case SIG_SETMASK: 987 SIG_CANTMASK(*set); 988 if (old) 989 SIGSETLO(td->td_sigmask, *set); 990 else 991 td->td_sigmask = *set; 992 signotify(td); 993 break; 994 default: 995 error = EINVAL; 996 break; 997 } 998 } 999 PROC_UNLOCK(td->td_proc); 1000 return (error); 1001 } 1002 1003 /* 1004 * sigprocmask() - MP SAFE 1005 */ 1006 1007 #ifndef _SYS_SYSPROTO_H_ 1008 struct sigprocmask_args { 1009 int how; 1010 const sigset_t *set; 1011 sigset_t *oset; 1012 }; 1013 #endif 1014 int 1015 sigprocmask(td, uap) 1016 register struct thread *td; 1017 struct sigprocmask_args *uap; 1018 { 1019 sigset_t set, oset; 1020 sigset_t *setp, *osetp; 1021 int error; 1022 1023 setp = (uap->set != NULL) ? &set : NULL; 1024 osetp = (uap->oset != NULL) ? &oset : NULL; 1025 if (setp) { 1026 error = copyin(uap->set, setp, sizeof(set)); 1027 if (error) 1028 return (error); 1029 } 1030 error = kern_sigprocmask(td, uap->how, setp, osetp, 0); 1031 if (osetp && !error) { 1032 error = copyout(osetp, uap->oset, sizeof(oset)); 1033 } 1034 return (error); 1035 } 1036 1037 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 1038 /* 1039 * osigprocmask() - MP SAFE 1040 */ 1041 #ifndef _SYS_SYSPROTO_H_ 1042 struct osigprocmask_args { 1043 int how; 1044 osigset_t mask; 1045 }; 1046 #endif 1047 int 1048 osigprocmask(td, uap) 1049 register struct thread *td; 1050 struct osigprocmask_args *uap; 1051 { 1052 sigset_t set, oset; 1053 int error; 1054 1055 OSIG2SIG(uap->mask, set); 1056 error = kern_sigprocmask(td, uap->how, &set, &oset, 1); 1057 SIG2OSIG(oset, td->td_retval[0]); 1058 return (error); 1059 } 1060 #endif /* COMPAT_43 */ 1061 1062 /* 1063 * MPSAFE 1064 */ 1065 int 1066 sigwait(struct thread *td, struct sigwait_args *uap) 1067 { 1068 ksiginfo_t ksi; 1069 sigset_t set; 1070 int error; 1071 1072 error = copyin(uap->set, &set, sizeof(set)); 1073 if (error) { 1074 td->td_retval[0] = error; 1075 return (0); 1076 } 1077 1078 error = kern_sigtimedwait(td, set, &ksi, NULL); 1079 if (error) { 1080 if (error == ERESTART) 1081 return (error); 1082 td->td_retval[0] = error; 1083 return (0); 1084 } 1085 1086 error = copyout(&ksi.ksi_signo, uap->sig, sizeof(ksi.ksi_signo)); 1087 td->td_retval[0] = error; 1088 return (0); 1089 } 1090 /* 1091 * MPSAFE 1092 */ 1093 int 1094 sigtimedwait(struct thread *td, struct sigtimedwait_args *uap) 1095 { 1096 struct timespec ts; 1097 struct timespec *timeout; 1098 sigset_t set; 1099 ksiginfo_t ksi; 1100 int error; 1101 1102 if (uap->timeout) { 1103 error = copyin(uap->timeout, &ts, sizeof(ts)); 1104 if (error) 1105 return (error); 1106 1107 timeout = &ts; 1108 } else 1109 timeout = NULL; 1110 1111 error = copyin(uap->set, &set, sizeof(set)); 1112 if (error) 1113 return (error); 1114 1115 error = kern_sigtimedwait(td, set, &ksi, timeout); 1116 if (error) 1117 return (error); 1118 1119 if (uap->info) 1120 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t)); 1121 1122 if (error == 0) 1123 td->td_retval[0] = ksi.ksi_signo; 1124 return (error); 1125 } 1126 1127 /* 1128 * MPSAFE 1129 */ 1130 int 1131 sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap) 1132 { 1133 ksiginfo_t ksi; 1134 sigset_t set; 1135 int error; 1136 1137 error = copyin(uap->set, &set, sizeof(set)); 1138 if (error) 1139 return (error); 1140 1141 error = kern_sigtimedwait(td, set, &ksi, NULL); 1142 if (error) 1143 return (error); 1144 1145 if (uap->info) 1146 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t)); 1147 1148 if (error == 0) 1149 td->td_retval[0] = ksi.ksi_signo; 1150 return (error); 1151 } 1152 1153 static int 1154 kern_sigtimedwait(struct thread *td, sigset_t waitset, ksiginfo_t *ksi, 1155 struct timespec *timeout) 1156 { 1157 struct sigacts *ps; 1158 sigset_t savedmask; 1159 struct proc *p; 1160 int error, sig, hz, i, timevalid = 0; 1161 struct timespec rts, ets, ts; 1162 struct timeval tv; 1163 1164 p = td->td_proc; 1165 error = 0; 1166 sig = 0; 1167 SIG_CANTMASK(waitset); 1168 1169 PROC_LOCK(p); 1170 ps = p->p_sigacts; 1171 savedmask = td->td_sigmask; 1172 if (timeout) { 1173 if (timeout->tv_nsec >= 0 && timeout->tv_nsec < 1000000000) { 1174 timevalid = 1; 1175 getnanouptime(&rts); 1176 ets = rts; 1177 timespecadd(&ets, timeout); 1178 } 1179 } 1180 1181 again: 1182 for (i = 1; i <= _SIG_MAXSIG; ++i) { 1183 if (!SIGISMEMBER(waitset, i)) 1184 continue; 1185 if (SIGISMEMBER(td->td_sigqueue.sq_signals, i)) { 1186 SIGFILLSET(td->td_sigmask); 1187 SIG_CANTMASK(td->td_sigmask); 1188 SIGDELSET(td->td_sigmask, i); 1189 mtx_lock(&ps->ps_mtx); 1190 sig = cursig(td); 1191 i = 0; 1192 mtx_unlock(&ps->ps_mtx); 1193 } else if (SIGISMEMBER(p->p_sigqueue.sq_signals, i)) { 1194 if (p->p_flag & P_SA) { 1195 p->p_flag |= P_SIGEVENT; 1196 wakeup(&p->p_siglist); 1197 } 1198 sigqueue_move(&p->p_sigqueue, &td->td_sigqueue, i); 1199 SIGFILLSET(td->td_sigmask); 1200 SIG_CANTMASK(td->td_sigmask); 1201 SIGDELSET(td->td_sigmask, i); 1202 mtx_lock(&ps->ps_mtx); 1203 sig = cursig(td); 1204 i = 0; 1205 mtx_unlock(&ps->ps_mtx); 1206 } 1207 if (sig) 1208 goto out; 1209 } 1210 if (error) 1211 goto out; 1212 1213 /* 1214 * POSIX says this must be checked after looking for pending 1215 * signals. 1216 */ 1217 if (timeout) { 1218 if (!timevalid) { 1219 error = EINVAL; 1220 goto out; 1221 } 1222 getnanouptime(&rts); 1223 if (timespeccmp(&rts, &ets, >=)) { 1224 error = EAGAIN; 1225 goto out; 1226 } 1227 ts = ets; 1228 timespecsub(&ts, &rts); 1229 TIMESPEC_TO_TIMEVAL(&tv, &ts); 1230 hz = tvtohz(&tv); 1231 } else 1232 hz = 0; 1233 1234 td->td_sigmask = savedmask; 1235 SIGSETNAND(td->td_sigmask, waitset); 1236 signotify(td); 1237 error = msleep(&ps, &p->p_mtx, PPAUSE|PCATCH, "sigwait", hz); 1238 if (timeout) { 1239 if (error == ERESTART) { 1240 /* timeout can not be restarted. */ 1241 error = EINTR; 1242 } else if (error == EAGAIN) { 1243 /* will calculate timeout by ourself. */ 1244 error = 0; 1245 } 1246 } 1247 goto again; 1248 1249 out: 1250 if (sig) { 1251 sig_t action; 1252 1253 ksiginfo_init(ksi); 1254 sigqueue_get(&td->td_sigqueue, sig, ksi); 1255 ksi->ksi_signo = sig; 1256 if (ksi->ksi_code == SI_TIMER) 1257 itimer_accept(p, ksi->ksi_timerid, ksi); 1258 error = 0; 1259 mtx_lock(&ps->ps_mtx); 1260 action = ps->ps_sigact[_SIG_IDX(sig)]; 1261 mtx_unlock(&ps->ps_mtx); 1262 #ifdef KTRACE 1263 if (KTRPOINT(td, KTR_PSIG)) 1264 ktrpsig(sig, action, &td->td_sigmask, 0); 1265 #endif 1266 _STOPEVENT(p, S_SIG, sig); 1267 1268 } 1269 td->td_sigmask = savedmask; 1270 signotify(td); 1271 PROC_UNLOCK(p); 1272 return (error); 1273 } 1274 1275 #ifndef _SYS_SYSPROTO_H_ 1276 struct sigpending_args { 1277 sigset_t *set; 1278 }; 1279 #endif 1280 /* 1281 * MPSAFE 1282 */ 1283 int 1284 sigpending(td, uap) 1285 struct thread *td; 1286 struct sigpending_args *uap; 1287 { 1288 struct proc *p = td->td_proc; 1289 sigset_t pending; 1290 1291 PROC_LOCK(p); 1292 pending = p->p_sigqueue.sq_signals; 1293 SIGSETOR(pending, td->td_sigqueue.sq_signals); 1294 PROC_UNLOCK(p); 1295 return (copyout(&pending, uap->set, sizeof(sigset_t))); 1296 } 1297 1298 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 1299 #ifndef _SYS_SYSPROTO_H_ 1300 struct osigpending_args { 1301 int dummy; 1302 }; 1303 #endif 1304 /* 1305 * MPSAFE 1306 */ 1307 int 1308 osigpending(td, uap) 1309 struct thread *td; 1310 struct osigpending_args *uap; 1311 { 1312 struct proc *p = td->td_proc; 1313 sigset_t pending; 1314 1315 PROC_LOCK(p); 1316 pending = p->p_sigqueue.sq_signals; 1317 SIGSETOR(pending, td->td_sigqueue.sq_signals); 1318 PROC_UNLOCK(p); 1319 SIG2OSIG(pending, td->td_retval[0]); 1320 return (0); 1321 } 1322 #endif /* COMPAT_43 */ 1323 1324 #if defined(COMPAT_43) 1325 /* 1326 * Generalized interface signal handler, 4.3-compatible. 1327 */ 1328 #ifndef _SYS_SYSPROTO_H_ 1329 struct osigvec_args { 1330 int signum; 1331 struct sigvec *nsv; 1332 struct sigvec *osv; 1333 }; 1334 #endif 1335 /* 1336 * MPSAFE 1337 */ 1338 /* ARGSUSED */ 1339 int 1340 osigvec(td, uap) 1341 struct thread *td; 1342 register struct osigvec_args *uap; 1343 { 1344 struct sigvec vec; 1345 struct sigaction nsa, osa; 1346 register struct sigaction *nsap, *osap; 1347 int error; 1348 1349 if (uap->signum <= 0 || uap->signum >= ONSIG) 1350 return (EINVAL); 1351 nsap = (uap->nsv != NULL) ? &nsa : NULL; 1352 osap = (uap->osv != NULL) ? &osa : NULL; 1353 if (nsap) { 1354 error = copyin(uap->nsv, &vec, sizeof(vec)); 1355 if (error) 1356 return (error); 1357 nsap->sa_handler = vec.sv_handler; 1358 OSIG2SIG(vec.sv_mask, nsap->sa_mask); 1359 nsap->sa_flags = vec.sv_flags; 1360 nsap->sa_flags ^= SA_RESTART; /* opposite of SV_INTERRUPT */ 1361 } 1362 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET); 1363 if (osap && !error) { 1364 vec.sv_handler = osap->sa_handler; 1365 SIG2OSIG(osap->sa_mask, vec.sv_mask); 1366 vec.sv_flags = osap->sa_flags; 1367 vec.sv_flags &= ~SA_NOCLDWAIT; 1368 vec.sv_flags ^= SA_RESTART; 1369 error = copyout(&vec, uap->osv, sizeof(vec)); 1370 } 1371 return (error); 1372 } 1373 1374 #ifndef _SYS_SYSPROTO_H_ 1375 struct osigblock_args { 1376 int mask; 1377 }; 1378 #endif 1379 /* 1380 * MPSAFE 1381 */ 1382 int 1383 osigblock(td, uap) 1384 register struct thread *td; 1385 struct osigblock_args *uap; 1386 { 1387 struct proc *p = td->td_proc; 1388 sigset_t set; 1389 1390 OSIG2SIG(uap->mask, set); 1391 SIG_CANTMASK(set); 1392 PROC_LOCK(p); 1393 SIG2OSIG(td->td_sigmask, td->td_retval[0]); 1394 SIGSETOR(td->td_sigmask, set); 1395 PROC_UNLOCK(p); 1396 return (0); 1397 } 1398 1399 #ifndef _SYS_SYSPROTO_H_ 1400 struct osigsetmask_args { 1401 int mask; 1402 }; 1403 #endif 1404 /* 1405 * MPSAFE 1406 */ 1407 int 1408 osigsetmask(td, uap) 1409 struct thread *td; 1410 struct osigsetmask_args *uap; 1411 { 1412 struct proc *p = td->td_proc; 1413 sigset_t set; 1414 1415 OSIG2SIG(uap->mask, set); 1416 SIG_CANTMASK(set); 1417 PROC_LOCK(p); 1418 SIG2OSIG(td->td_sigmask, td->td_retval[0]); 1419 SIGSETLO(td->td_sigmask, set); 1420 signotify(td); 1421 PROC_UNLOCK(p); 1422 return (0); 1423 } 1424 #endif /* COMPAT_43 */ 1425 1426 /* 1427 * Suspend calling thread until signal, providing mask to be set 1428 * in the meantime. 1429 */ 1430 #ifndef _SYS_SYSPROTO_H_ 1431 struct sigsuspend_args { 1432 const sigset_t *sigmask; 1433 }; 1434 #endif 1435 /* 1436 * MPSAFE 1437 */ 1438 /* ARGSUSED */ 1439 int 1440 sigsuspend(td, uap) 1441 struct thread *td; 1442 struct sigsuspend_args *uap; 1443 { 1444 sigset_t mask; 1445 int error; 1446 1447 error = copyin(uap->sigmask, &mask, sizeof(mask)); 1448 if (error) 1449 return (error); 1450 return (kern_sigsuspend(td, mask)); 1451 } 1452 1453 int 1454 kern_sigsuspend(struct thread *td, sigset_t mask) 1455 { 1456 struct proc *p = td->td_proc; 1457 1458 /* 1459 * When returning from sigsuspend, we want 1460 * the old mask to be restored after the 1461 * signal handler has finished. Thus, we 1462 * save it here and mark the sigacts structure 1463 * to indicate this. 1464 */ 1465 PROC_LOCK(p); 1466 td->td_oldsigmask = td->td_sigmask; 1467 td->td_pflags |= TDP_OLDMASK; 1468 SIG_CANTMASK(mask); 1469 td->td_sigmask = mask; 1470 signotify(td); 1471 while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause", 0) == 0) 1472 /* void */; 1473 PROC_UNLOCK(p); 1474 /* always return EINTR rather than ERESTART... */ 1475 return (EINTR); 1476 } 1477 1478 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 1479 /* 1480 * Compatibility sigsuspend call for old binaries. Note nonstandard calling 1481 * convention: libc stub passes mask, not pointer, to save a copyin. 1482 */ 1483 #ifndef _SYS_SYSPROTO_H_ 1484 struct osigsuspend_args { 1485 osigset_t mask; 1486 }; 1487 #endif 1488 /* 1489 * MPSAFE 1490 */ 1491 /* ARGSUSED */ 1492 int 1493 osigsuspend(td, uap) 1494 struct thread *td; 1495 struct osigsuspend_args *uap; 1496 { 1497 struct proc *p = td->td_proc; 1498 sigset_t mask; 1499 1500 PROC_LOCK(p); 1501 td->td_oldsigmask = td->td_sigmask; 1502 td->td_pflags |= TDP_OLDMASK; 1503 OSIG2SIG(uap->mask, mask); 1504 SIG_CANTMASK(mask); 1505 SIGSETLO(td->td_sigmask, mask); 1506 signotify(td); 1507 while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "opause", 0) == 0) 1508 /* void */; 1509 PROC_UNLOCK(p); 1510 /* always return EINTR rather than ERESTART... */ 1511 return (EINTR); 1512 } 1513 #endif /* COMPAT_43 */ 1514 1515 #if defined(COMPAT_43) 1516 #ifndef _SYS_SYSPROTO_H_ 1517 struct osigstack_args { 1518 struct sigstack *nss; 1519 struct sigstack *oss; 1520 }; 1521 #endif 1522 /* 1523 * MPSAFE 1524 */ 1525 /* ARGSUSED */ 1526 int 1527 osigstack(td, uap) 1528 struct thread *td; 1529 register struct osigstack_args *uap; 1530 { 1531 struct sigstack nss, oss; 1532 int error = 0; 1533 1534 if (uap->nss != NULL) { 1535 error = copyin(uap->nss, &nss, sizeof(nss)); 1536 if (error) 1537 return (error); 1538 } 1539 oss.ss_sp = td->td_sigstk.ss_sp; 1540 oss.ss_onstack = sigonstack(cpu_getstack(td)); 1541 if (uap->nss != NULL) { 1542 td->td_sigstk.ss_sp = nss.ss_sp; 1543 td->td_sigstk.ss_size = 0; 1544 td->td_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK; 1545 td->td_pflags |= TDP_ALTSTACK; 1546 } 1547 if (uap->oss != NULL) 1548 error = copyout(&oss, uap->oss, sizeof(oss)); 1549 1550 return (error); 1551 } 1552 #endif /* COMPAT_43 */ 1553 1554 #ifndef _SYS_SYSPROTO_H_ 1555 struct sigaltstack_args { 1556 stack_t *ss; 1557 stack_t *oss; 1558 }; 1559 #endif 1560 /* 1561 * MPSAFE 1562 */ 1563 /* ARGSUSED */ 1564 int 1565 sigaltstack(td, uap) 1566 struct thread *td; 1567 register struct sigaltstack_args *uap; 1568 { 1569 stack_t ss, oss; 1570 int error; 1571 1572 if (uap->ss != NULL) { 1573 error = copyin(uap->ss, &ss, sizeof(ss)); 1574 if (error) 1575 return (error); 1576 } 1577 error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL, 1578 (uap->oss != NULL) ? &oss : NULL); 1579 if (error) 1580 return (error); 1581 if (uap->oss != NULL) 1582 error = copyout(&oss, uap->oss, sizeof(stack_t)); 1583 return (error); 1584 } 1585 1586 int 1587 kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss) 1588 { 1589 struct proc *p = td->td_proc; 1590 int oonstack; 1591 1592 oonstack = sigonstack(cpu_getstack(td)); 1593 1594 if (oss != NULL) { 1595 *oss = td->td_sigstk; 1596 oss->ss_flags = (td->td_pflags & TDP_ALTSTACK) 1597 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 1598 } 1599 1600 if (ss != NULL) { 1601 if (oonstack) 1602 return (EPERM); 1603 if ((ss->ss_flags & ~SS_DISABLE) != 0) 1604 return (EINVAL); 1605 if (!(ss->ss_flags & SS_DISABLE)) { 1606 if (ss->ss_size < p->p_sysent->sv_minsigstksz) 1607 return (ENOMEM); 1608 1609 td->td_sigstk = *ss; 1610 td->td_pflags |= TDP_ALTSTACK; 1611 } else { 1612 td->td_pflags &= ~TDP_ALTSTACK; 1613 } 1614 } 1615 return (0); 1616 } 1617 1618 /* 1619 * Common code for kill process group/broadcast kill. 1620 * cp is calling process. 1621 */ 1622 static int 1623 killpg1(td, sig, pgid, all) 1624 register struct thread *td; 1625 int sig, pgid, all; 1626 { 1627 register struct proc *p; 1628 struct pgrp *pgrp; 1629 int nfound = 0; 1630 1631 if (all) { 1632 /* 1633 * broadcast 1634 */ 1635 sx_slock(&allproc_lock); 1636 LIST_FOREACH(p, &allproc, p_list) { 1637 PROC_LOCK(p); 1638 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM || 1639 p == td->td_proc) { 1640 PROC_UNLOCK(p); 1641 continue; 1642 } 1643 if (p_cansignal(td, p, sig) == 0) { 1644 nfound++; 1645 if (sig) 1646 psignal(p, sig); 1647 } 1648 PROC_UNLOCK(p); 1649 } 1650 sx_sunlock(&allproc_lock); 1651 } else { 1652 sx_slock(&proctree_lock); 1653 if (pgid == 0) { 1654 /* 1655 * zero pgid means send to my process group. 1656 */ 1657 pgrp = td->td_proc->p_pgrp; 1658 PGRP_LOCK(pgrp); 1659 } else { 1660 pgrp = pgfind(pgid); 1661 if (pgrp == NULL) { 1662 sx_sunlock(&proctree_lock); 1663 return (ESRCH); 1664 } 1665 } 1666 sx_sunlock(&proctree_lock); 1667 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 1668 PROC_LOCK(p); 1669 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM) { 1670 PROC_UNLOCK(p); 1671 continue; 1672 } 1673 if (p_cansignal(td, p, sig) == 0) { 1674 nfound++; 1675 if (sig) 1676 psignal(p, sig); 1677 } 1678 PROC_UNLOCK(p); 1679 } 1680 PGRP_UNLOCK(pgrp); 1681 } 1682 return (nfound ? 0 : ESRCH); 1683 } 1684 1685 #ifndef _SYS_SYSPROTO_H_ 1686 struct kill_args { 1687 int pid; 1688 int signum; 1689 }; 1690 #endif 1691 /* 1692 * MPSAFE 1693 */ 1694 /* ARGSUSED */ 1695 int 1696 kill(td, uap) 1697 register struct thread *td; 1698 register struct kill_args *uap; 1699 { 1700 register struct proc *p; 1701 int error; 1702 1703 if ((u_int)uap->signum > _SIG_MAXSIG) 1704 return (EINVAL); 1705 1706 if (uap->pid > 0) { 1707 /* kill single process */ 1708 if ((p = pfind(uap->pid)) == NULL) { 1709 if ((p = zpfind(uap->pid)) == NULL) 1710 return (ESRCH); 1711 } 1712 error = p_cansignal(td, p, uap->signum); 1713 if (error == 0 && uap->signum) 1714 psignal(p, uap->signum); 1715 PROC_UNLOCK(p); 1716 return (error); 1717 } 1718 switch (uap->pid) { 1719 case -1: /* broadcast signal */ 1720 return (killpg1(td, uap->signum, 0, 1)); 1721 case 0: /* signal own process group */ 1722 return (killpg1(td, uap->signum, 0, 0)); 1723 default: /* negative explicit process group */ 1724 return (killpg1(td, uap->signum, -uap->pid, 0)); 1725 } 1726 /* NOTREACHED */ 1727 } 1728 1729 #if defined(COMPAT_43) 1730 #ifndef _SYS_SYSPROTO_H_ 1731 struct okillpg_args { 1732 int pgid; 1733 int signum; 1734 }; 1735 #endif 1736 /* 1737 * MPSAFE 1738 */ 1739 /* ARGSUSED */ 1740 int 1741 okillpg(td, uap) 1742 struct thread *td; 1743 register struct okillpg_args *uap; 1744 { 1745 1746 if ((u_int)uap->signum > _SIG_MAXSIG) 1747 return (EINVAL); 1748 1749 return (killpg1(td, uap->signum, uap->pgid, 0)); 1750 } 1751 #endif /* COMPAT_43 */ 1752 1753 #ifndef _SYS_SYSPROTO_H_ 1754 struct sigqueue_args { 1755 pid_t pid; 1756 int signum; 1757 /* union sigval */ void *value; 1758 }; 1759 #endif 1760 1761 int 1762 sigqueue(struct thread *td, struct sigqueue_args *uap) 1763 { 1764 ksiginfo_t ksi; 1765 struct proc *p; 1766 int error; 1767 1768 if ((u_int)uap->signum > _SIG_MAXSIG) 1769 return (EINVAL); 1770 1771 /* 1772 * Specification says sigqueue can only send signal to 1773 * single process. 1774 */ 1775 if (uap->pid <= 0) 1776 return (EINVAL); 1777 1778 if ((p = pfind(uap->pid)) == NULL) { 1779 if ((p = zpfind(uap->pid)) == NULL) 1780 return (ESRCH); 1781 } 1782 error = p_cansignal(td, p, uap->signum); 1783 if (error == 0 && uap->signum != 0) { 1784 ksiginfo_init(&ksi); 1785 ksi.ksi_signo = uap->signum; 1786 ksi.ksi_code = SI_QUEUE; 1787 ksi.ksi_pid = td->td_proc->p_pid; 1788 ksi.ksi_uid = td->td_ucred->cr_ruid; 1789 ksi.ksi_value.sigval_ptr = uap->value; 1790 error = psignal_info(p, &ksi); 1791 } 1792 PROC_UNLOCK(p); 1793 return (error); 1794 } 1795 1796 /* 1797 * Send a signal to a process group. 1798 */ 1799 void 1800 gsignal(pgid, sig) 1801 int pgid, sig; 1802 { 1803 struct pgrp *pgrp; 1804 1805 if (pgid != 0) { 1806 sx_slock(&proctree_lock); 1807 pgrp = pgfind(pgid); 1808 sx_sunlock(&proctree_lock); 1809 if (pgrp != NULL) { 1810 pgsignal(pgrp, sig, 0); 1811 PGRP_UNLOCK(pgrp); 1812 } 1813 } 1814 } 1815 1816 /* 1817 * Send a signal to a process group. If checktty is 1, 1818 * limit to members which have a controlling terminal. 1819 */ 1820 void 1821 pgsignal(pgrp, sig, checkctty) 1822 struct pgrp *pgrp; 1823 int sig, checkctty; 1824 { 1825 register struct proc *p; 1826 1827 if (pgrp) { 1828 PGRP_LOCK_ASSERT(pgrp, MA_OWNED); 1829 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 1830 PROC_LOCK(p); 1831 if (checkctty == 0 || p->p_flag & P_CONTROLT) 1832 psignal(p, sig); 1833 PROC_UNLOCK(p); 1834 } 1835 } 1836 } 1837 1838 /* 1839 * Send a signal caused by a trap to the current thread. 1840 * If it will be caught immediately, deliver it with correct code. 1841 * Otherwise, post it normally. 1842 * 1843 * MPSAFE 1844 */ 1845 void 1846 trapsignal(struct thread *td, ksiginfo_t *ksi) 1847 { 1848 struct sigacts *ps; 1849 struct proc *p; 1850 int error; 1851 int sig; 1852 int code; 1853 1854 p = td->td_proc; 1855 sig = ksi->ksi_signo; 1856 code = ksi->ksi_code; 1857 KASSERT(_SIG_VALID(sig), ("invalid signal")); 1858 1859 if (td->td_pflags & TDP_SA) { 1860 if (td->td_mailbox == NULL) 1861 thread_user_enter(td); 1862 PROC_LOCK(p); 1863 SIGDELSET(td->td_sigmask, sig); 1864 mtx_lock_spin(&sched_lock); 1865 /* 1866 * Force scheduling an upcall, so UTS has chance to 1867 * process the signal before thread runs again in 1868 * userland. 1869 */ 1870 if (td->td_upcall) 1871 td->td_upcall->ku_flags |= KUF_DOUPCALL; 1872 mtx_unlock_spin(&sched_lock); 1873 } else { 1874 PROC_LOCK(p); 1875 } 1876 ps = p->p_sigacts; 1877 mtx_lock(&ps->ps_mtx); 1878 if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) && 1879 !SIGISMEMBER(td->td_sigmask, sig)) { 1880 p->p_stats->p_ru.ru_nsignals++; 1881 #ifdef KTRACE 1882 if (KTRPOINT(curthread, KTR_PSIG)) 1883 ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)], 1884 &td->td_sigmask, code); 1885 #endif 1886 if (!(td->td_pflags & TDP_SA)) 1887 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)], 1888 ksi, &td->td_sigmask); 1889 else if (td->td_mailbox == NULL) { 1890 mtx_unlock(&ps->ps_mtx); 1891 /* UTS caused a sync signal */ 1892 p->p_code = code; /* XXX for core dump/debugger */ 1893 p->p_sig = sig; /* XXX to verify code */ 1894 sigexit(td, sig); 1895 } else { 1896 mtx_unlock(&ps->ps_mtx); 1897 SIGADDSET(td->td_sigmask, sig); 1898 PROC_UNLOCK(p); 1899 error = copyout(&ksi->ksi_info, &td->td_mailbox->tm_syncsig, 1900 sizeof(siginfo_t)); 1901 PROC_LOCK(p); 1902 /* UTS memory corrupted */ 1903 if (error) 1904 sigexit(td, SIGSEGV); 1905 mtx_lock(&ps->ps_mtx); 1906 } 1907 SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]); 1908 if (!SIGISMEMBER(ps->ps_signodefer, sig)) 1909 SIGADDSET(td->td_sigmask, sig); 1910 if (SIGISMEMBER(ps->ps_sigreset, sig)) { 1911 /* 1912 * See kern_sigaction() for origin of this code. 1913 */ 1914 SIGDELSET(ps->ps_sigcatch, sig); 1915 if (sig != SIGCONT && 1916 sigprop(sig) & SA_IGNORE) 1917 SIGADDSET(ps->ps_sigignore, sig); 1918 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 1919 } 1920 mtx_unlock(&ps->ps_mtx); 1921 } else { 1922 mtx_unlock(&ps->ps_mtx); 1923 p->p_code = code; /* XXX for core dump/debugger */ 1924 p->p_sig = sig; /* XXX to verify code */ 1925 tdsignal(td, sig, ksi, SIGTARGET_TD); 1926 } 1927 PROC_UNLOCK(p); 1928 } 1929 1930 static struct thread * 1931 sigtd(struct proc *p, int sig, int prop) 1932 { 1933 struct thread *td, *signal_td; 1934 1935 PROC_LOCK_ASSERT(p, MA_OWNED); 1936 1937 /* 1938 * Check if current thread can handle the signal without 1939 * switching conetxt to another thread. 1940 */ 1941 if (curproc == p && !SIGISMEMBER(curthread->td_sigmask, sig)) 1942 return (curthread); 1943 signal_td = NULL; 1944 mtx_lock_spin(&sched_lock); 1945 FOREACH_THREAD_IN_PROC(p, td) { 1946 if (!SIGISMEMBER(td->td_sigmask, sig)) { 1947 signal_td = td; 1948 break; 1949 } 1950 } 1951 if (signal_td == NULL) 1952 signal_td = FIRST_THREAD_IN_PROC(p); 1953 mtx_unlock_spin(&sched_lock); 1954 return (signal_td); 1955 } 1956 1957 /* 1958 * Send the signal to the process. If the signal has an action, the action 1959 * is usually performed by the target process rather than the caller; we add 1960 * the signal to the set of pending signals for the process. 1961 * 1962 * Exceptions: 1963 * o When a stop signal is sent to a sleeping process that takes the 1964 * default action, the process is stopped without awakening it. 1965 * o SIGCONT restarts stopped processes (or puts them back to sleep) 1966 * regardless of the signal action (eg, blocked or ignored). 1967 * 1968 * Other ignored signals are discarded immediately. 1969 * 1970 * MPSAFE 1971 */ 1972 void 1973 psignal(struct proc *p, int sig) 1974 { 1975 (void) psignal_common(p, sig, NULL); 1976 } 1977 1978 int 1979 psignal_info(struct proc *p, ksiginfo_t *ksi) 1980 { 1981 return (psignal_common(p, ksi->ksi_signo, ksi)); 1982 } 1983 1984 static int 1985 psignal_common(struct proc *p, int sig, ksiginfo_t *ksi) 1986 { 1987 struct thread *td; 1988 int prop; 1989 1990 if (!_SIG_VALID(sig)) 1991 panic("psignal(): invalid signal"); 1992 1993 PROC_LOCK_ASSERT(p, MA_OWNED); 1994 /* 1995 * IEEE Std 1003.1-2001: return success when killing a zombie. 1996 */ 1997 if (p->p_state == PRS_ZOMBIE) 1998 return (0); 1999 prop = sigprop(sig); 2000 2001 /* 2002 * Find a thread to deliver the signal to. 2003 */ 2004 td = sigtd(p, sig, prop); 2005 2006 return (tdsignal(td, sig, ksi, SIGTARGET_P)); 2007 } 2008 2009 /* 2010 * MPSAFE 2011 */ 2012 int 2013 tdsignal(struct thread *td, int sig, ksiginfo_t *ksi, sigtarget_t target) 2014 { 2015 sigset_t saved; 2016 struct proc *p = td->td_proc; 2017 int ret; 2018 2019 if (p->p_flag & P_SA) 2020 saved = p->p_sigqueue.sq_signals; 2021 ret = do_tdsignal(td, sig, ksi, target); 2022 if ((p->p_flag & P_SA) && !(p->p_flag & P_SIGEVENT)) { 2023 if (!SIGSETEQ(saved, p->p_sigqueue.sq_signals)) { 2024 /* pending set changed */ 2025 p->p_flag |= P_SIGEVENT; 2026 wakeup(&p->p_siglist); 2027 } 2028 } 2029 return (ret); 2030 } 2031 2032 static int 2033 do_tdsignal(struct thread *td, int sig, ksiginfo_t *ksi, sigtarget_t target) 2034 { 2035 struct proc *p; 2036 sig_t action; 2037 sigqueue_t *sigqueue; 2038 struct thread *td0; 2039 int prop; 2040 struct sigacts *ps; 2041 int ret = 0; 2042 2043 if (!_SIG_VALID(sig)) 2044 panic("do_tdsignal(): invalid signal"); 2045 2046 p = td->td_proc; 2047 ps = p->p_sigacts; 2048 2049 PROC_LOCK_ASSERT(p, MA_OWNED); 2050 KNOTE_LOCKED(&p->p_klist, NOTE_SIGNAL | sig); 2051 2052 prop = sigprop(sig); 2053 2054 /* 2055 * If the signal is blocked and not destined for this thread, then 2056 * assign it to the process so that we can find it later in the first 2057 * thread that unblocks it. Otherwise, assign it to this thread now. 2058 */ 2059 if (target == SIGTARGET_TD) { 2060 sigqueue = &td->td_sigqueue; 2061 } else { 2062 if (!SIGISMEMBER(td->td_sigmask, sig)) 2063 sigqueue = &td->td_sigqueue; 2064 else 2065 sigqueue = &p->p_sigqueue; 2066 } 2067 2068 /* 2069 * If the signal is being ignored, 2070 * or process is exiting or thread is exiting, 2071 * then we forget about it immediately. 2072 * (Note: we don't set SIGCONT in ps_sigignore, 2073 * and if it is set to SIG_IGN, 2074 * action will be SIG_DFL here.) 2075 */ 2076 mtx_lock(&ps->ps_mtx); 2077 if (SIGISMEMBER(ps->ps_sigignore, sig) || 2078 (p->p_flag & P_WEXIT)) { 2079 mtx_unlock(&ps->ps_mtx); 2080 return (ret); 2081 } 2082 if (SIGISMEMBER(td->td_sigmask, sig)) 2083 action = SIG_HOLD; 2084 else if (SIGISMEMBER(ps->ps_sigcatch, sig)) 2085 action = SIG_CATCH; 2086 else 2087 action = SIG_DFL; 2088 mtx_unlock(&ps->ps_mtx); 2089 2090 if (prop & SA_CONT) 2091 sigqueue_delete_stopmask_proc(p); 2092 else if (prop & SA_STOP) { 2093 /* 2094 * If sending a tty stop signal to a member of an orphaned 2095 * process group, discard the signal here if the action 2096 * is default; don't stop the process below if sleeping, 2097 * and don't clear any pending SIGCONT. 2098 */ 2099 if ((prop & SA_TTYSTOP) && 2100 (p->p_pgrp->pg_jobc == 0) && 2101 (action == SIG_DFL)) 2102 return (ret); 2103 sigqueue_delete_proc(p, SIGCONT); 2104 p->p_flag &= ~P_CONTINUED; 2105 } 2106 2107 ret = sigqueue_add(sigqueue, sig, ksi); 2108 if (ret != 0) 2109 return (ret); 2110 signotify(td); /* uses schedlock */ 2111 /* 2112 * Defer further processing for signals which are held, 2113 * except that stopped processes must be continued by SIGCONT. 2114 */ 2115 if (action == SIG_HOLD && 2116 !((prop & SA_CONT) && (p->p_flag & P_STOPPED_SIG))) 2117 return (ret); 2118 /* 2119 * SIGKILL: Remove procfs STOPEVENTs. 2120 */ 2121 if (sig == SIGKILL) { 2122 /* from procfs_ioctl.c: PIOCBIC */ 2123 p->p_stops = 0; 2124 /* from procfs_ioctl.c: PIOCCONT */ 2125 p->p_step = 0; 2126 wakeup(&p->p_step); 2127 } 2128 /* 2129 * Some signals have a process-wide effect and a per-thread 2130 * component. Most processing occurs when the process next 2131 * tries to cross the user boundary, however there are some 2132 * times when processing needs to be done immediatly, such as 2133 * waking up threads so that they can cross the user boundary. 2134 * We try do the per-process part here. 2135 */ 2136 if (P_SHOULDSTOP(p)) { 2137 /* 2138 * The process is in stopped mode. All the threads should be 2139 * either winding down or already on the suspended queue. 2140 */ 2141 if (p->p_flag & P_TRACED) { 2142 /* 2143 * The traced process is already stopped, 2144 * so no further action is necessary. 2145 * No signal can restart us. 2146 */ 2147 goto out; 2148 } 2149 2150 if (sig == SIGKILL) { 2151 /* 2152 * SIGKILL sets process running. 2153 * It will die elsewhere. 2154 * All threads must be restarted. 2155 */ 2156 p->p_flag &= ~P_STOPPED_SIG; 2157 goto runfast; 2158 } 2159 2160 if (prop & SA_CONT) { 2161 /* 2162 * If SIGCONT is default (or ignored), we continue the 2163 * process but don't leave the signal in sigqueue as 2164 * it has no further action. If SIGCONT is held, we 2165 * continue the process and leave the signal in 2166 * sigqueue. If the process catches SIGCONT, let it 2167 * handle the signal itself. If it isn't waiting on 2168 * an event, it goes back to run state. 2169 * Otherwise, process goes back to sleep state. 2170 */ 2171 p->p_flag &= ~P_STOPPED_SIG; 2172 p->p_flag |= P_CONTINUED; 2173 if (action == SIG_DFL) { 2174 sigqueue_delete(sigqueue, sig); 2175 } else if (action == SIG_CATCH) { 2176 /* 2177 * The process wants to catch it so it needs 2178 * to run at least one thread, but which one? 2179 * It would seem that the answer would be to 2180 * run an upcall in the next KSE to run, and 2181 * deliver the signal that way. In a NON KSE 2182 * process, we need to make sure that the 2183 * single thread is runnable asap. 2184 * XXXKSE for now however, make them all run. 2185 */ 2186 goto runfast; 2187 } 2188 /* 2189 * The signal is not ignored or caught. 2190 */ 2191 mtx_lock_spin(&sched_lock); 2192 thread_unsuspend(p); 2193 mtx_unlock_spin(&sched_lock); 2194 goto out; 2195 } 2196 2197 if (prop & SA_STOP) { 2198 /* 2199 * Already stopped, don't need to stop again 2200 * (If we did the shell could get confused). 2201 * Just make sure the signal STOP bit set. 2202 */ 2203 p->p_flag |= P_STOPPED_SIG; 2204 sigqueue_delete(sigqueue, sig); 2205 goto out; 2206 } 2207 2208 /* 2209 * All other kinds of signals: 2210 * If a thread is sleeping interruptibly, simulate a 2211 * wakeup so that when it is continued it will be made 2212 * runnable and can look at the signal. However, don't make 2213 * the PROCESS runnable, leave it stopped. 2214 * It may run a bit until it hits a thread_suspend_check(). 2215 */ 2216 mtx_lock_spin(&sched_lock); 2217 if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR)) 2218 sleepq_abort(td); 2219 mtx_unlock_spin(&sched_lock); 2220 goto out; 2221 /* 2222 * Mutexes are short lived. Threads waiting on them will 2223 * hit thread_suspend_check() soon. 2224 */ 2225 } else if (p->p_state == PRS_NORMAL) { 2226 if (p->p_flag & P_TRACED || action == SIG_CATCH) { 2227 mtx_lock_spin(&sched_lock); 2228 tdsigwakeup(td, sig, action); 2229 mtx_unlock_spin(&sched_lock); 2230 goto out; 2231 } 2232 2233 MPASS(action == SIG_DFL); 2234 2235 if (prop & SA_STOP) { 2236 if (p->p_flag & P_PPWAIT) 2237 goto out; 2238 p->p_flag |= P_STOPPED_SIG; 2239 p->p_xstat = sig; 2240 p->p_xthread = td; 2241 mtx_lock_spin(&sched_lock); 2242 FOREACH_THREAD_IN_PROC(p, td0) { 2243 if (TD_IS_SLEEPING(td0) && 2244 (td0->td_flags & TDF_SINTR) && 2245 !TD_IS_SUSPENDED(td0)) { 2246 thread_suspend_one(td0); 2247 } else if (td != td0) { 2248 td0->td_flags |= TDF_ASTPENDING; 2249 } 2250 } 2251 thread_stopped(p); 2252 if (p->p_numthreads == p->p_suspcount) { 2253 mtx_unlock_spin(&sched_lock); 2254 sigqueue_delete_proc(p, p->p_xstat); 2255 } else 2256 mtx_unlock_spin(&sched_lock); 2257 goto out; 2258 } 2259 else 2260 goto runfast; 2261 /* NOTREACHED */ 2262 } else { 2263 /* Not in "NORMAL" state. discard the signal. */ 2264 sigqueue_delete(sigqueue, sig); 2265 goto out; 2266 } 2267 2268 /* 2269 * The process is not stopped so we need to apply the signal to all the 2270 * running threads. 2271 */ 2272 2273 runfast: 2274 mtx_lock_spin(&sched_lock); 2275 tdsigwakeup(td, sig, action); 2276 thread_unsuspend(p); 2277 mtx_unlock_spin(&sched_lock); 2278 out: 2279 /* If we jump here, sched_lock should not be owned. */ 2280 mtx_assert(&sched_lock, MA_NOTOWNED); 2281 return (ret); 2282 } 2283 2284 /* 2285 * The force of a signal has been directed against a single 2286 * thread. We need to see what we can do about knocking it 2287 * out of any sleep it may be in etc. 2288 */ 2289 static void 2290 tdsigwakeup(struct thread *td, int sig, sig_t action) 2291 { 2292 struct proc *p = td->td_proc; 2293 register int prop; 2294 2295 PROC_LOCK_ASSERT(p, MA_OWNED); 2296 mtx_assert(&sched_lock, MA_OWNED); 2297 prop = sigprop(sig); 2298 2299 /* 2300 * Bring the priority of a thread up if we want it to get 2301 * killed in this lifetime. 2302 */ 2303 if (action == SIG_DFL && (prop & SA_KILL)) { 2304 if (p->p_nice > 0) 2305 sched_nice(td->td_proc, 0); 2306 if (td->td_priority > PUSER) 2307 sched_prio(td, PUSER); 2308 } 2309 2310 if (TD_ON_SLEEPQ(td)) { 2311 /* 2312 * If thread is sleeping uninterruptibly 2313 * we can't interrupt the sleep... the signal will 2314 * be noticed when the process returns through 2315 * trap() or syscall(). 2316 */ 2317 if ((td->td_flags & TDF_SINTR) == 0) 2318 return; 2319 /* 2320 * If SIGCONT is default (or ignored) and process is 2321 * asleep, we are finished; the process should not 2322 * be awakened. 2323 */ 2324 if ((prop & SA_CONT) && action == SIG_DFL) { 2325 mtx_unlock_spin(&sched_lock); 2326 sigqueue_delete(&p->p_sigqueue, sig); 2327 /* 2328 * It may be on either list in this state. 2329 * Remove from both for now. 2330 */ 2331 sigqueue_delete(&td->td_sigqueue, sig); 2332 mtx_lock_spin(&sched_lock); 2333 return; 2334 } 2335 2336 /* 2337 * Give low priority threads a better chance to run. 2338 */ 2339 if (td->td_priority > PUSER) 2340 sched_prio(td, PUSER); 2341 2342 sleepq_abort(td); 2343 } else { 2344 /* 2345 * Other states do nothing with the signal immediately, 2346 * other than kicking ourselves if we are running. 2347 * It will either never be noticed, or noticed very soon. 2348 */ 2349 #ifdef SMP 2350 if (TD_IS_RUNNING(td) && td != curthread) 2351 forward_signal(td); 2352 #endif 2353 } 2354 } 2355 2356 int 2357 ptracestop(struct thread *td, int sig) 2358 { 2359 struct proc *p = td->td_proc; 2360 struct thread *td0; 2361 2362 PROC_LOCK_ASSERT(p, MA_OWNED); 2363 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, 2364 &p->p_mtx.mtx_object, "Stopping for traced signal"); 2365 2366 mtx_lock_spin(&sched_lock); 2367 td->td_flags |= TDF_XSIG; 2368 mtx_unlock_spin(&sched_lock); 2369 td->td_xsig = sig; 2370 while ((p->p_flag & P_TRACED) && (td->td_flags & TDF_XSIG)) { 2371 if (p->p_flag & P_SINGLE_EXIT) { 2372 mtx_lock_spin(&sched_lock); 2373 td->td_flags &= ~TDF_XSIG; 2374 mtx_unlock_spin(&sched_lock); 2375 return (sig); 2376 } 2377 /* 2378 * Just make wait() to work, the last stopped thread 2379 * will win. 2380 */ 2381 p->p_xstat = sig; 2382 p->p_xthread = td; 2383 p->p_flag |= (P_STOPPED_SIG|P_STOPPED_TRACE); 2384 mtx_lock_spin(&sched_lock); 2385 FOREACH_THREAD_IN_PROC(p, td0) { 2386 if (TD_IS_SLEEPING(td0) && 2387 (td0->td_flags & TDF_SINTR) && 2388 !TD_IS_SUSPENDED(td0)) { 2389 thread_suspend_one(td0); 2390 } else if (td != td0) { 2391 td0->td_flags |= TDF_ASTPENDING; 2392 } 2393 } 2394 stopme: 2395 thread_stopped(p); 2396 thread_suspend_one(td); 2397 PROC_UNLOCK(p); 2398 DROP_GIANT(); 2399 mi_switch(SW_VOL, NULL); 2400 mtx_unlock_spin(&sched_lock); 2401 PICKUP_GIANT(); 2402 PROC_LOCK(p); 2403 if (!(p->p_flag & P_TRACED)) 2404 break; 2405 if (td->td_flags & TDF_DBSUSPEND) { 2406 if (p->p_flag & P_SINGLE_EXIT) 2407 break; 2408 mtx_lock_spin(&sched_lock); 2409 goto stopme; 2410 } 2411 } 2412 return (td->td_xsig); 2413 } 2414 2415 /* 2416 * If the current process has received a signal (should be caught or cause 2417 * termination, should interrupt current syscall), return the signal number. 2418 * Stop signals with default action are processed immediately, then cleared; 2419 * they aren't returned. This is checked after each entry to the system for 2420 * a syscall or trap (though this can usually be done without calling issignal 2421 * by checking the pending signal masks in cursig.) The normal call 2422 * sequence is 2423 * 2424 * while (sig = cursig(curthread)) 2425 * postsig(sig); 2426 */ 2427 static int 2428 issignal(td) 2429 struct thread *td; 2430 { 2431 struct proc *p; 2432 struct sigacts *ps; 2433 sigset_t sigpending; 2434 int sig, prop, newsig; 2435 struct thread *td0; 2436 2437 p = td->td_proc; 2438 ps = p->p_sigacts; 2439 mtx_assert(&ps->ps_mtx, MA_OWNED); 2440 PROC_LOCK_ASSERT(p, MA_OWNED); 2441 for (;;) { 2442 int traced = (p->p_flag & P_TRACED) || (p->p_stops & S_SIG); 2443 2444 sigpending = td->td_sigqueue.sq_signals; 2445 SIGSETNAND(sigpending, td->td_sigmask); 2446 2447 if (p->p_flag & P_PPWAIT) 2448 SIG_STOPSIGMASK(sigpending); 2449 if (SIGISEMPTY(sigpending)) /* no signal to send */ 2450 return (0); 2451 sig = sig_ffs(&sigpending); 2452 2453 if (p->p_stops & S_SIG) { 2454 mtx_unlock(&ps->ps_mtx); 2455 stopevent(p, S_SIG, sig); 2456 mtx_lock(&ps->ps_mtx); 2457 } 2458 2459 /* 2460 * We should see pending but ignored signals 2461 * only if P_TRACED was on when they were posted. 2462 */ 2463 if (SIGISMEMBER(ps->ps_sigignore, sig) && (traced == 0)) { 2464 sigqueue_delete(&td->td_sigqueue, sig); 2465 if (td->td_pflags & TDP_SA) 2466 SIGADDSET(td->td_sigmask, sig); 2467 continue; 2468 } 2469 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) { 2470 /* 2471 * If traced, always stop. 2472 */ 2473 mtx_unlock(&ps->ps_mtx); 2474 newsig = ptracestop(td, sig); 2475 mtx_lock(&ps->ps_mtx); 2476 2477 if (td->td_pflags & TDP_SA) 2478 SIGADDSET(td->td_sigmask, sig); 2479 2480 if (sig != newsig) { 2481 /* 2482 * clear old signal. 2483 * XXX shrug off debugger, it causes siginfo to 2484 * be thrown away. 2485 */ 2486 sigqueue_delete(&td->td_sigqueue, sig); 2487 2488 /* 2489 * If parent wants us to take the signal, 2490 * then it will leave it in p->p_xstat; 2491 * otherwise we just look for signals again. 2492 */ 2493 if (newsig == 0) 2494 continue; 2495 sig = newsig; 2496 2497 /* 2498 * Put the new signal into td_sigqueue. If the 2499 * signal is being masked, look for other signals. 2500 */ 2501 SIGADDSET(td->td_sigqueue.sq_signals, sig); 2502 if (td->td_pflags & TDP_SA) 2503 SIGDELSET(td->td_sigmask, sig); 2504 if (SIGISMEMBER(td->td_sigmask, sig)) 2505 continue; 2506 signotify(td); 2507 } 2508 2509 /* 2510 * If the traced bit got turned off, go back up 2511 * to the top to rescan signals. This ensures 2512 * that p_sig* and p_sigact are consistent. 2513 */ 2514 if ((p->p_flag & P_TRACED) == 0) 2515 continue; 2516 } 2517 2518 prop = sigprop(sig); 2519 2520 /* 2521 * Decide whether the signal should be returned. 2522 * Return the signal's number, or fall through 2523 * to clear it from the pending mask. 2524 */ 2525 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) { 2526 2527 case (intptr_t)SIG_DFL: 2528 /* 2529 * Don't take default actions on system processes. 2530 */ 2531 if (p->p_pid <= 1) { 2532 #ifdef DIAGNOSTIC 2533 /* 2534 * Are you sure you want to ignore SIGSEGV 2535 * in init? XXX 2536 */ 2537 printf("Process (pid %lu) got signal %d\n", 2538 (u_long)p->p_pid, sig); 2539 #endif 2540 break; /* == ignore */ 2541 } 2542 /* 2543 * If there is a pending stop signal to process 2544 * with default action, stop here, 2545 * then clear the signal. However, 2546 * if process is member of an orphaned 2547 * process group, ignore tty stop signals. 2548 */ 2549 if (prop & SA_STOP) { 2550 if (p->p_flag & P_TRACED || 2551 (p->p_pgrp->pg_jobc == 0 && 2552 prop & SA_TTYSTOP)) 2553 break; /* == ignore */ 2554 mtx_unlock(&ps->ps_mtx); 2555 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, 2556 &p->p_mtx.mtx_object, "Catching SIGSTOP"); 2557 p->p_flag |= P_STOPPED_SIG; 2558 p->p_xstat = sig; 2559 p->p_xthread = td; 2560 mtx_lock_spin(&sched_lock); 2561 FOREACH_THREAD_IN_PROC(p, td0) { 2562 if (TD_IS_SLEEPING(td0) && 2563 (td0->td_flags & TDF_SINTR) && 2564 !TD_IS_SUSPENDED(td0)) { 2565 thread_suspend_one(td0); 2566 } else if (td != td0) { 2567 td0->td_flags |= TDF_ASTPENDING; 2568 } 2569 } 2570 thread_stopped(p); 2571 thread_suspend_one(td); 2572 PROC_UNLOCK(p); 2573 DROP_GIANT(); 2574 mi_switch(SW_INVOL, NULL); 2575 mtx_unlock_spin(&sched_lock); 2576 PICKUP_GIANT(); 2577 PROC_LOCK(p); 2578 mtx_lock(&ps->ps_mtx); 2579 break; 2580 } else if (prop & SA_IGNORE) { 2581 /* 2582 * Except for SIGCONT, shouldn't get here. 2583 * Default action is to ignore; drop it. 2584 */ 2585 break; /* == ignore */ 2586 } else 2587 return (sig); 2588 /*NOTREACHED*/ 2589 2590 case (intptr_t)SIG_IGN: 2591 /* 2592 * Masking above should prevent us ever trying 2593 * to take action on an ignored signal other 2594 * than SIGCONT, unless process is traced. 2595 */ 2596 if ((prop & SA_CONT) == 0 && 2597 (p->p_flag & P_TRACED) == 0) 2598 printf("issignal\n"); 2599 break; /* == ignore */ 2600 2601 default: 2602 /* 2603 * This signal has an action, let 2604 * postsig() process it. 2605 */ 2606 return (sig); 2607 } 2608 sigqueue_delete(&td->td_sigqueue, sig); /* take the signal! */ 2609 } 2610 /* NOTREACHED */ 2611 } 2612 2613 /* 2614 * MPSAFE 2615 */ 2616 void 2617 thread_stopped(struct proc *p) 2618 { 2619 struct proc *p1 = curthread->td_proc; 2620 struct sigacts *ps; 2621 int n; 2622 2623 PROC_LOCK_ASSERT(p, MA_OWNED); 2624 mtx_assert(&sched_lock, MA_OWNED); 2625 n = p->p_suspcount; 2626 if (p == p1) 2627 n++; 2628 if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) { 2629 mtx_unlock_spin(&sched_lock); 2630 p->p_flag &= ~P_WAITED; 2631 PROC_LOCK(p->p_pptr); 2632 /* 2633 * Wake up parent sleeping in kern_wait(), also send 2634 * SIGCHLD to parent, but SIGCHLD does not guarantee 2635 * that parent will awake, because parent may masked 2636 * the signal. 2637 */ 2638 p->p_pptr->p_flag |= P_STATCHILD; 2639 wakeup(p->p_pptr); 2640 ps = p->p_pptr->p_sigacts; 2641 mtx_lock(&ps->ps_mtx); 2642 if ((ps->ps_flag & PS_NOCLDSTOP) == 0) { 2643 mtx_unlock(&ps->ps_mtx); 2644 psignal(p->p_pptr, SIGCHLD); 2645 } else 2646 mtx_unlock(&ps->ps_mtx); 2647 PROC_UNLOCK(p->p_pptr); 2648 mtx_lock_spin(&sched_lock); 2649 } 2650 } 2651 2652 /* 2653 * Take the action for the specified signal 2654 * from the current set of pending signals. 2655 */ 2656 void 2657 postsig(sig) 2658 register int sig; 2659 { 2660 struct thread *td = curthread; 2661 register struct proc *p = td->td_proc; 2662 struct sigacts *ps; 2663 sig_t action; 2664 ksiginfo_t ksi; 2665 sigset_t returnmask; 2666 int code; 2667 2668 KASSERT(sig != 0, ("postsig")); 2669 2670 PROC_LOCK_ASSERT(p, MA_OWNED); 2671 ps = p->p_sigacts; 2672 mtx_assert(&ps->ps_mtx, MA_OWNED); 2673 ksiginfo_init(&ksi); 2674 sigqueue_get(&td->td_sigqueue, sig, &ksi); 2675 ksi.ksi_signo = sig; 2676 if (ksi.ksi_code == SI_TIMER) 2677 itimer_accept(p, ksi.ksi_timerid, &ksi); 2678 action = ps->ps_sigact[_SIG_IDX(sig)]; 2679 #ifdef KTRACE 2680 if (KTRPOINT(td, KTR_PSIG)) 2681 ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ? 2682 &td->td_oldsigmask : &td->td_sigmask, 0); 2683 #endif 2684 if (p->p_stops & S_SIG) { 2685 mtx_unlock(&ps->ps_mtx); 2686 stopevent(p, S_SIG, sig); 2687 mtx_lock(&ps->ps_mtx); 2688 } 2689 2690 if (!(td->td_pflags & TDP_SA) && action == SIG_DFL) { 2691 /* 2692 * Default action, where the default is to kill 2693 * the process. (Other cases were ignored above.) 2694 */ 2695 mtx_unlock(&ps->ps_mtx); 2696 sigexit(td, sig); 2697 /* NOTREACHED */ 2698 } else { 2699 if (td->td_pflags & TDP_SA) { 2700 if (sig == SIGKILL) { 2701 mtx_unlock(&ps->ps_mtx); 2702 sigexit(td, sig); 2703 } 2704 } 2705 2706 /* 2707 * If we get here, the signal must be caught. 2708 */ 2709 KASSERT(action != SIG_IGN && !SIGISMEMBER(td->td_sigmask, sig), 2710 ("postsig action")); 2711 /* 2712 * Set the new mask value and also defer further 2713 * occurrences of this signal. 2714 * 2715 * Special case: user has done a sigsuspend. Here the 2716 * current mask is not of interest, but rather the 2717 * mask from before the sigsuspend is what we want 2718 * restored after the signal processing is completed. 2719 */ 2720 if (td->td_pflags & TDP_OLDMASK) { 2721 returnmask = td->td_oldsigmask; 2722 td->td_pflags &= ~TDP_OLDMASK; 2723 } else 2724 returnmask = td->td_sigmask; 2725 2726 SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]); 2727 if (!SIGISMEMBER(ps->ps_signodefer, sig)) 2728 SIGADDSET(td->td_sigmask, sig); 2729 2730 if (SIGISMEMBER(ps->ps_sigreset, sig)) { 2731 /* 2732 * See kern_sigaction() for origin of this code. 2733 */ 2734 SIGDELSET(ps->ps_sigcatch, sig); 2735 if (sig != SIGCONT && 2736 sigprop(sig) & SA_IGNORE) 2737 SIGADDSET(ps->ps_sigignore, sig); 2738 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 2739 } 2740 p->p_stats->p_ru.ru_nsignals++; 2741 if (p->p_sig != sig) { 2742 code = 0; 2743 } else { 2744 code = p->p_code; 2745 p->p_code = 0; 2746 p->p_sig = 0; 2747 } 2748 if (td->td_pflags & TDP_SA) 2749 thread_signal_add(curthread, &ksi); 2750 else 2751 (*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask); 2752 } 2753 } 2754 2755 /* 2756 * Kill the current process for stated reason. 2757 */ 2758 void 2759 killproc(p, why) 2760 struct proc *p; 2761 char *why; 2762 { 2763 2764 PROC_LOCK_ASSERT(p, MA_OWNED); 2765 CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)", 2766 p, p->p_pid, p->p_comm); 2767 log(LOG_ERR, "pid %d (%s), uid %d, was killed: %s\n", p->p_pid, p->p_comm, 2768 p->p_ucred ? p->p_ucred->cr_uid : -1, why); 2769 psignal(p, SIGKILL); 2770 } 2771 2772 /* 2773 * Force the current process to exit with the specified signal, dumping core 2774 * if appropriate. We bypass the normal tests for masked and caught signals, 2775 * allowing unrecoverable failures to terminate the process without changing 2776 * signal state. Mark the accounting record with the signal termination. 2777 * If dumping core, save the signal number for the debugger. Calls exit and 2778 * does not return. 2779 * 2780 * MPSAFE 2781 */ 2782 void 2783 sigexit(td, sig) 2784 struct thread *td; 2785 int sig; 2786 { 2787 struct proc *p = td->td_proc; 2788 2789 PROC_LOCK_ASSERT(p, MA_OWNED); 2790 p->p_acflag |= AXSIG; 2791 /* 2792 * We must be single-threading to generate a core dump. This 2793 * ensures that the registers in the core file are up-to-date. 2794 * Also, the ELF dump handler assumes that the thread list doesn't 2795 * change out from under it. 2796 * 2797 * XXX If another thread attempts to single-thread before us 2798 * (e.g. via fork()), we won't get a dump at all. 2799 */ 2800 if ((sigprop(sig) & SA_CORE) && (thread_single(SINGLE_NO_EXIT) == 0)) { 2801 p->p_sig = sig; 2802 /* 2803 * Log signals which would cause core dumps 2804 * (Log as LOG_INFO to appease those who don't want 2805 * these messages.) 2806 * XXX : Todo, as well as euid, write out ruid too 2807 * Note that coredump() drops proc lock. 2808 */ 2809 if (coredump(td) == 0) 2810 sig |= WCOREFLAG; 2811 if (kern_logsigexit) 2812 log(LOG_INFO, 2813 "pid %d (%s), uid %d: exited on signal %d%s\n", 2814 p->p_pid, p->p_comm, 2815 td->td_ucred ? td->td_ucred->cr_uid : -1, 2816 sig &~ WCOREFLAG, 2817 sig & WCOREFLAG ? " (core dumped)" : ""); 2818 } else 2819 PROC_UNLOCK(p); 2820 exit1(td, W_EXITCODE(0, sig)); 2821 /* NOTREACHED */ 2822 } 2823 2824 static char corefilename[MAXPATHLEN] = {"%N.core"}; 2825 SYSCTL_STRING(_kern, OID_AUTO, corefile, CTLFLAG_RW, corefilename, 2826 sizeof(corefilename), "process corefile name format string"); 2827 2828 /* 2829 * expand_name(name, uid, pid) 2830 * Expand the name described in corefilename, using name, uid, and pid. 2831 * corefilename is a printf-like string, with three format specifiers: 2832 * %N name of process ("name") 2833 * %P process id (pid) 2834 * %U user id (uid) 2835 * For example, "%N.core" is the default; they can be disabled completely 2836 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P". 2837 * This is controlled by the sysctl variable kern.corefile (see above). 2838 */ 2839 2840 static char * 2841 expand_name(name, uid, pid) 2842 const char *name; 2843 uid_t uid; 2844 pid_t pid; 2845 { 2846 const char *format, *appendstr; 2847 char *temp; 2848 char buf[11]; /* Buffer for pid/uid -- max 4B */ 2849 size_t i, l, n; 2850 2851 format = corefilename; 2852 temp = malloc(MAXPATHLEN, M_TEMP, M_NOWAIT | M_ZERO); 2853 if (temp == NULL) 2854 return (NULL); 2855 for (i = 0, n = 0; n < MAXPATHLEN && format[i]; i++) { 2856 switch (format[i]) { 2857 case '%': /* Format character */ 2858 i++; 2859 switch (format[i]) { 2860 case '%': 2861 appendstr = "%"; 2862 break; 2863 case 'N': /* process name */ 2864 appendstr = name; 2865 break; 2866 case 'P': /* process id */ 2867 sprintf(buf, "%u", pid); 2868 appendstr = buf; 2869 break; 2870 case 'U': /* user id */ 2871 sprintf(buf, "%u", uid); 2872 appendstr = buf; 2873 break; 2874 default: 2875 appendstr = ""; 2876 log(LOG_ERR, 2877 "Unknown format character %c in `%s'\n", 2878 format[i], format); 2879 } 2880 l = strlen(appendstr); 2881 if ((n + l) >= MAXPATHLEN) 2882 goto toolong; 2883 memcpy(temp + n, appendstr, l); 2884 n += l; 2885 break; 2886 default: 2887 temp[n++] = format[i]; 2888 } 2889 } 2890 if (format[i] != '\0') 2891 goto toolong; 2892 return (temp); 2893 toolong: 2894 log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too long\n", 2895 (long)pid, name, (u_long)uid); 2896 free(temp, M_TEMP); 2897 return (NULL); 2898 } 2899 2900 /* 2901 * Dump a process' core. The main routine does some 2902 * policy checking, and creates the name of the coredump; 2903 * then it passes on a vnode and a size limit to the process-specific 2904 * coredump routine if there is one; if there _is not_ one, it returns 2905 * ENOSYS; otherwise it returns the error from the process-specific routine. 2906 */ 2907 2908 static int 2909 coredump(struct thread *td) 2910 { 2911 struct proc *p = td->td_proc; 2912 register struct vnode *vp; 2913 register struct ucred *cred = td->td_ucred; 2914 struct flock lf; 2915 struct nameidata nd; 2916 struct vattr vattr; 2917 int error, error1, flags, locked; 2918 struct mount *mp; 2919 char *name; /* name of corefile */ 2920 off_t limit; 2921 2922 PROC_LOCK_ASSERT(p, MA_OWNED); 2923 MPASS((p->p_flag & P_HADTHREADS) == 0 || p->p_singlethread == td); 2924 _STOPEVENT(p, S_CORE, 0); 2925 2926 if (((sugid_coredump == 0) && p->p_flag & P_SUGID) || do_coredump == 0) { 2927 PROC_UNLOCK(p); 2928 return (EFAULT); 2929 } 2930 2931 /* 2932 * Note that the bulk of limit checking is done after 2933 * the corefile is created. The exception is if the limit 2934 * for corefiles is 0, in which case we don't bother 2935 * creating the corefile at all. This layout means that 2936 * a corefile is truncated instead of not being created, 2937 * if it is larger than the limit. 2938 */ 2939 limit = (off_t)lim_cur(p, RLIMIT_CORE); 2940 PROC_UNLOCK(p); 2941 if (limit == 0) 2942 return (EFBIG); 2943 2944 mtx_lock(&Giant); 2945 restart: 2946 name = expand_name(p->p_comm, td->td_ucred->cr_uid, p->p_pid); 2947 if (name == NULL) { 2948 mtx_unlock(&Giant); 2949 return (EINVAL); 2950 } 2951 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td); /* XXXKSE */ 2952 flags = O_CREAT | FWRITE | O_NOFOLLOW; 2953 error = vn_open(&nd, &flags, S_IRUSR | S_IWUSR, -1); 2954 free(name, M_TEMP); 2955 if (error) { 2956 mtx_unlock(&Giant); 2957 return (error); 2958 } 2959 NDFREE(&nd, NDF_ONLY_PNBUF); 2960 vp = nd.ni_vp; 2961 2962 /* Don't dump to non-regular files or files with links. */ 2963 if (vp->v_type != VREG || 2964 VOP_GETATTR(vp, &vattr, cred, td) || vattr.va_nlink != 1) { 2965 VOP_UNLOCK(vp, 0, td); 2966 error = EFAULT; 2967 goto out; 2968 } 2969 2970 VOP_UNLOCK(vp, 0, td); 2971 lf.l_whence = SEEK_SET; 2972 lf.l_start = 0; 2973 lf.l_len = 0; 2974 lf.l_type = F_WRLCK; 2975 locked = (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK) == 0); 2976 2977 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2978 lf.l_type = F_UNLCK; 2979 if (locked) 2980 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK); 2981 if ((error = vn_close(vp, FWRITE, cred, td)) != 0) 2982 return (error); 2983 if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0) 2984 return (error); 2985 goto restart; 2986 } 2987 2988 VATTR_NULL(&vattr); 2989 vattr.va_size = 0; 2990 if (set_core_nodump_flag) 2991 vattr.va_flags = UF_NODUMP; 2992 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 2993 VOP_LEASE(vp, td, cred, LEASE_WRITE); 2994 VOP_SETATTR(vp, &vattr, cred, td); 2995 VOP_UNLOCK(vp, 0, td); 2996 PROC_LOCK(p); 2997 p->p_acflag |= ACORE; 2998 PROC_UNLOCK(p); 2999 3000 error = p->p_sysent->sv_coredump ? 3001 p->p_sysent->sv_coredump(td, vp, limit) : 3002 ENOSYS; 3003 3004 if (locked) { 3005 lf.l_type = F_UNLCK; 3006 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK); 3007 } 3008 vn_finished_write(mp); 3009 out: 3010 error1 = vn_close(vp, FWRITE, cred, td); 3011 mtx_unlock(&Giant); 3012 if (error == 0) 3013 error = error1; 3014 return (error); 3015 } 3016 3017 /* 3018 * Nonexistent system call-- signal process (may want to handle it). 3019 * Flag error in case process won't see signal immediately (blocked or ignored). 3020 */ 3021 #ifndef _SYS_SYSPROTO_H_ 3022 struct nosys_args { 3023 int dummy; 3024 }; 3025 #endif 3026 /* 3027 * MPSAFE 3028 */ 3029 /* ARGSUSED */ 3030 int 3031 nosys(td, args) 3032 struct thread *td; 3033 struct nosys_args *args; 3034 { 3035 struct proc *p = td->td_proc; 3036 3037 PROC_LOCK(p); 3038 psignal(p, SIGSYS); 3039 PROC_UNLOCK(p); 3040 return (ENOSYS); 3041 } 3042 3043 /* 3044 * Send a SIGIO or SIGURG signal to a process or process group using 3045 * stored credentials rather than those of the current process. 3046 */ 3047 void 3048 pgsigio(sigiop, sig, checkctty) 3049 struct sigio **sigiop; 3050 int sig, checkctty; 3051 { 3052 struct sigio *sigio; 3053 3054 SIGIO_LOCK(); 3055 sigio = *sigiop; 3056 if (sigio == NULL) { 3057 SIGIO_UNLOCK(); 3058 return; 3059 } 3060 if (sigio->sio_pgid > 0) { 3061 PROC_LOCK(sigio->sio_proc); 3062 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred)) 3063 psignal(sigio->sio_proc, sig); 3064 PROC_UNLOCK(sigio->sio_proc); 3065 } else if (sigio->sio_pgid < 0) { 3066 struct proc *p; 3067 3068 PGRP_LOCK(sigio->sio_pgrp); 3069 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) { 3070 PROC_LOCK(p); 3071 if (CANSIGIO(sigio->sio_ucred, p->p_ucred) && 3072 (checkctty == 0 || (p->p_flag & P_CONTROLT))) 3073 psignal(p, sig); 3074 PROC_UNLOCK(p); 3075 } 3076 PGRP_UNLOCK(sigio->sio_pgrp); 3077 } 3078 SIGIO_UNLOCK(); 3079 } 3080 3081 static int 3082 filt_sigattach(struct knote *kn) 3083 { 3084 struct proc *p = curproc; 3085 3086 kn->kn_ptr.p_proc = p; 3087 kn->kn_flags |= EV_CLEAR; /* automatically set */ 3088 3089 knlist_add(&p->p_klist, kn, 0); 3090 3091 return (0); 3092 } 3093 3094 static void 3095 filt_sigdetach(struct knote *kn) 3096 { 3097 struct proc *p = kn->kn_ptr.p_proc; 3098 3099 knlist_remove(&p->p_klist, kn, 0); 3100 } 3101 3102 /* 3103 * signal knotes are shared with proc knotes, so we apply a mask to 3104 * the hint in order to differentiate them from process hints. This 3105 * could be avoided by using a signal-specific knote list, but probably 3106 * isn't worth the trouble. 3107 */ 3108 static int 3109 filt_signal(struct knote *kn, long hint) 3110 { 3111 3112 if (hint & NOTE_SIGNAL) { 3113 hint &= ~NOTE_SIGNAL; 3114 3115 if (kn->kn_id == hint) 3116 kn->kn_data++; 3117 } 3118 return (kn->kn_data != 0); 3119 } 3120 3121 struct sigacts * 3122 sigacts_alloc(void) 3123 { 3124 struct sigacts *ps; 3125 3126 ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO); 3127 ps->ps_refcnt = 1; 3128 mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF); 3129 return (ps); 3130 } 3131 3132 void 3133 sigacts_free(struct sigacts *ps) 3134 { 3135 3136 mtx_lock(&ps->ps_mtx); 3137 ps->ps_refcnt--; 3138 if (ps->ps_refcnt == 0) { 3139 mtx_destroy(&ps->ps_mtx); 3140 free(ps, M_SUBPROC); 3141 } else 3142 mtx_unlock(&ps->ps_mtx); 3143 } 3144 3145 struct sigacts * 3146 sigacts_hold(struct sigacts *ps) 3147 { 3148 mtx_lock(&ps->ps_mtx); 3149 ps->ps_refcnt++; 3150 mtx_unlock(&ps->ps_mtx); 3151 return (ps); 3152 } 3153 3154 void 3155 sigacts_copy(struct sigacts *dest, struct sigacts *src) 3156 { 3157 3158 KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest")); 3159 mtx_lock(&src->ps_mtx); 3160 bcopy(src, dest, offsetof(struct sigacts, ps_refcnt)); 3161 mtx_unlock(&src->ps_mtx); 3162 } 3163 3164 int 3165 sigacts_shared(struct sigacts *ps) 3166 { 3167 int shared; 3168 3169 mtx_lock(&ps->ps_mtx); 3170 shared = ps->ps_refcnt > 1; 3171 mtx_unlock(&ps->ps_mtx); 3172 return (shared); 3173 } 3174