1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_compat.h" 41 #include "opt_ktrace.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/signalvar.h> 46 #include <sys/vnode.h> 47 #include <sys/acct.h> 48 #include <sys/condvar.h> 49 #include <sys/event.h> 50 #include <sys/fcntl.h> 51 #include <sys/kernel.h> 52 #include <sys/kse.h> 53 #include <sys/ktr.h> 54 #include <sys/ktrace.h> 55 #include <sys/lock.h> 56 #include <sys/malloc.h> 57 #include <sys/mutex.h> 58 #include <sys/namei.h> 59 #include <sys/proc.h> 60 #include <sys/pioctl.h> 61 #include <sys/resourcevar.h> 62 #include <sys/sleepqueue.h> 63 #include <sys/smp.h> 64 #include <sys/stat.h> 65 #include <sys/sx.h> 66 #include <sys/syscallsubr.h> 67 #include <sys/sysctl.h> 68 #include <sys/sysent.h> 69 #include <sys/syslog.h> 70 #include <sys/sysproto.h> 71 #include <sys/unistd.h> 72 #include <sys/wait.h> 73 74 #include <machine/cpu.h> 75 76 #if defined (__alpha__) && !defined(COMPAT_43) 77 #error "You *really* need COMPAT_43 on the alpha for longjmp(3)" 78 #endif 79 80 #define ONSIG 32 /* NSIG for osig* syscalls. XXX. */ 81 82 static int coredump(struct thread *); 83 static char *expand_name(const char *, uid_t, pid_t); 84 static int killpg1(struct thread *td, int sig, int pgid, int all); 85 static int issignal(struct thread *p); 86 static int sigprop(int sig); 87 static void stop(struct proc *); 88 static void tdsigwakeup(struct thread *td, int sig, sig_t action); 89 static int filt_sigattach(struct knote *kn); 90 static void filt_sigdetach(struct knote *kn); 91 static int filt_signal(struct knote *kn, long hint); 92 static struct thread *sigtd(struct proc *p, int sig, int prop); 93 static int kern_sigtimedwait(struct thread *td, sigset_t set, 94 siginfo_t *info, struct timespec *timeout); 95 static void do_tdsignal(struct thread *td, int sig, sigtarget_t target); 96 97 struct filterops sig_filtops = 98 { 0, filt_sigattach, filt_sigdetach, filt_signal }; 99 100 static int kern_logsigexit = 1; 101 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW, 102 &kern_logsigexit, 0, 103 "Log processes quitting on abnormal signals to syslog(3)"); 104 105 /* 106 * Policy -- Can ucred cr1 send SIGIO to process cr2? 107 * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG 108 * in the right situations. 109 */ 110 #define CANSIGIO(cr1, cr2) \ 111 ((cr1)->cr_uid == 0 || \ 112 (cr1)->cr_ruid == (cr2)->cr_ruid || \ 113 (cr1)->cr_uid == (cr2)->cr_ruid || \ 114 (cr1)->cr_ruid == (cr2)->cr_uid || \ 115 (cr1)->cr_uid == (cr2)->cr_uid) 116 117 int sugid_coredump; 118 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RW, 119 &sugid_coredump, 0, "Enable coredumping set user/group ID processes"); 120 121 static int do_coredump = 1; 122 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW, 123 &do_coredump, 0, "Enable/Disable coredumps"); 124 125 /* 126 * Signal properties and actions. 127 * The array below categorizes the signals and their default actions 128 * according to the following properties: 129 */ 130 #define SA_KILL 0x01 /* terminates process by default */ 131 #define SA_CORE 0x02 /* ditto and coredumps */ 132 #define SA_STOP 0x04 /* suspend process */ 133 #define SA_TTYSTOP 0x08 /* ditto, from tty */ 134 #define SA_IGNORE 0x10 /* ignore by default */ 135 #define SA_CONT 0x20 /* continue if suspended */ 136 #define SA_CANTMASK 0x40 /* non-maskable, catchable */ 137 #define SA_PROC 0x80 /* deliverable to any thread */ 138 139 static int sigproptbl[NSIG] = { 140 SA_KILL|SA_PROC, /* SIGHUP */ 141 SA_KILL|SA_PROC, /* SIGINT */ 142 SA_KILL|SA_CORE|SA_PROC, /* SIGQUIT */ 143 SA_KILL|SA_CORE, /* SIGILL */ 144 SA_KILL|SA_CORE, /* SIGTRAP */ 145 SA_KILL|SA_CORE, /* SIGABRT */ 146 SA_KILL|SA_CORE|SA_PROC, /* SIGEMT */ 147 SA_KILL|SA_CORE, /* SIGFPE */ 148 SA_KILL|SA_PROC, /* SIGKILL */ 149 SA_KILL|SA_CORE, /* SIGBUS */ 150 SA_KILL|SA_CORE, /* SIGSEGV */ 151 SA_KILL|SA_CORE, /* SIGSYS */ 152 SA_KILL|SA_PROC, /* SIGPIPE */ 153 SA_KILL|SA_PROC, /* SIGALRM */ 154 SA_KILL|SA_PROC, /* SIGTERM */ 155 SA_IGNORE|SA_PROC, /* SIGURG */ 156 SA_STOP|SA_PROC, /* SIGSTOP */ 157 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTSTP */ 158 SA_IGNORE|SA_CONT|SA_PROC, /* SIGCONT */ 159 SA_IGNORE|SA_PROC, /* SIGCHLD */ 160 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTTIN */ 161 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTTOU */ 162 SA_IGNORE|SA_PROC, /* SIGIO */ 163 SA_KILL, /* SIGXCPU */ 164 SA_KILL, /* SIGXFSZ */ 165 SA_KILL|SA_PROC, /* SIGVTALRM */ 166 SA_KILL|SA_PROC, /* SIGPROF */ 167 SA_IGNORE|SA_PROC, /* SIGWINCH */ 168 SA_IGNORE|SA_PROC, /* SIGINFO */ 169 SA_KILL|SA_PROC, /* SIGUSR1 */ 170 SA_KILL|SA_PROC, /* SIGUSR2 */ 171 }; 172 173 /* 174 * Determine signal that should be delivered to process p, the current 175 * process, 0 if none. If there is a pending stop signal with default 176 * action, the process stops in issignal(). 177 * XXXKSE the check for a pending stop is not done under KSE 178 * 179 * MP SAFE. 180 */ 181 int 182 cursig(struct thread *td) 183 { 184 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 185 mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED); 186 mtx_assert(&sched_lock, MA_NOTOWNED); 187 return (SIGPENDING(td) ? issignal(td) : 0); 188 } 189 190 /* 191 * Arrange for ast() to handle unmasked pending signals on return to user 192 * mode. This must be called whenever a signal is added to td_siglist or 193 * unmasked in td_sigmask. 194 */ 195 void 196 signotify(struct thread *td) 197 { 198 struct proc *p; 199 sigset_t set, saved; 200 201 p = td->td_proc; 202 203 PROC_LOCK_ASSERT(p, MA_OWNED); 204 205 /* 206 * If our mask changed we may have to move signal that were 207 * previously masked by all threads to our siglist. 208 */ 209 set = p->p_siglist; 210 if (p->p_flag & P_SA) 211 saved = p->p_siglist; 212 SIGSETNAND(set, td->td_sigmask); 213 SIGSETNAND(p->p_siglist, set); 214 SIGSETOR(td->td_siglist, set); 215 216 if (SIGPENDING(td)) { 217 mtx_lock_spin(&sched_lock); 218 td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING; 219 mtx_unlock_spin(&sched_lock); 220 } 221 if ((p->p_flag & P_SA) && !(p->p_flag & P_SIGEVENT)) { 222 if (!SIGSETEQ(saved, p->p_siglist)) { 223 /* pending set changed */ 224 p->p_flag |= P_SIGEVENT; 225 wakeup(&p->p_siglist); 226 } 227 } 228 } 229 230 int 231 sigonstack(size_t sp) 232 { 233 struct thread *td = curthread; 234 235 return ((td->td_pflags & TDP_ALTSTACK) ? 236 #if defined(COMPAT_43) 237 ((td->td_sigstk.ss_size == 0) ? 238 (td->td_sigstk.ss_flags & SS_ONSTACK) : 239 ((sp - (size_t)td->td_sigstk.ss_sp) < td->td_sigstk.ss_size)) 240 #else 241 ((sp - (size_t)td->td_sigstk.ss_sp) < td->td_sigstk.ss_size) 242 #endif 243 : 0); 244 } 245 246 static __inline int 247 sigprop(int sig) 248 { 249 250 if (sig > 0 && sig < NSIG) 251 return (sigproptbl[_SIG_IDX(sig)]); 252 return (0); 253 } 254 255 int 256 sig_ffs(sigset_t *set) 257 { 258 int i; 259 260 for (i = 0; i < _SIG_WORDS; i++) 261 if (set->__bits[i]) 262 return (ffs(set->__bits[i]) + (i * 32)); 263 return (0); 264 } 265 266 /* 267 * kern_sigaction 268 * sigaction 269 * freebsd4_sigaction 270 * osigaction 271 * 272 * MPSAFE 273 */ 274 int 275 kern_sigaction(td, sig, act, oact, flags) 276 struct thread *td; 277 register int sig; 278 struct sigaction *act, *oact; 279 int flags; 280 { 281 struct sigacts *ps; 282 struct thread *td0; 283 struct proc *p = td->td_proc; 284 285 if (!_SIG_VALID(sig)) 286 return (EINVAL); 287 288 PROC_LOCK(p); 289 ps = p->p_sigacts; 290 mtx_lock(&ps->ps_mtx); 291 if (oact) { 292 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)]; 293 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)]; 294 oact->sa_flags = 0; 295 if (SIGISMEMBER(ps->ps_sigonstack, sig)) 296 oact->sa_flags |= SA_ONSTACK; 297 if (!SIGISMEMBER(ps->ps_sigintr, sig)) 298 oact->sa_flags |= SA_RESTART; 299 if (SIGISMEMBER(ps->ps_sigreset, sig)) 300 oact->sa_flags |= SA_RESETHAND; 301 if (SIGISMEMBER(ps->ps_signodefer, sig)) 302 oact->sa_flags |= SA_NODEFER; 303 if (SIGISMEMBER(ps->ps_siginfo, sig)) 304 oact->sa_flags |= SA_SIGINFO; 305 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP) 306 oact->sa_flags |= SA_NOCLDSTOP; 307 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT) 308 oact->sa_flags |= SA_NOCLDWAIT; 309 } 310 if (act) { 311 if ((sig == SIGKILL || sig == SIGSTOP) && 312 act->sa_handler != SIG_DFL) { 313 mtx_unlock(&ps->ps_mtx); 314 PROC_UNLOCK(p); 315 return (EINVAL); 316 } 317 318 /* 319 * Change setting atomically. 320 */ 321 322 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask; 323 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]); 324 if (act->sa_flags & SA_SIGINFO) { 325 ps->ps_sigact[_SIG_IDX(sig)] = 326 (__sighandler_t *)act->sa_sigaction; 327 SIGADDSET(ps->ps_siginfo, sig); 328 } else { 329 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler; 330 SIGDELSET(ps->ps_siginfo, sig); 331 } 332 if (!(act->sa_flags & SA_RESTART)) 333 SIGADDSET(ps->ps_sigintr, sig); 334 else 335 SIGDELSET(ps->ps_sigintr, sig); 336 if (act->sa_flags & SA_ONSTACK) 337 SIGADDSET(ps->ps_sigonstack, sig); 338 else 339 SIGDELSET(ps->ps_sigonstack, sig); 340 if (act->sa_flags & SA_RESETHAND) 341 SIGADDSET(ps->ps_sigreset, sig); 342 else 343 SIGDELSET(ps->ps_sigreset, sig); 344 if (act->sa_flags & SA_NODEFER) 345 SIGADDSET(ps->ps_signodefer, sig); 346 else 347 SIGDELSET(ps->ps_signodefer, sig); 348 if (sig == SIGCHLD) { 349 if (act->sa_flags & SA_NOCLDSTOP) 350 ps->ps_flag |= PS_NOCLDSTOP; 351 else 352 ps->ps_flag &= ~PS_NOCLDSTOP; 353 if (act->sa_flags & SA_NOCLDWAIT) { 354 /* 355 * Paranoia: since SA_NOCLDWAIT is implemented 356 * by reparenting the dying child to PID 1 (and 357 * trust it to reap the zombie), PID 1 itself 358 * is forbidden to set SA_NOCLDWAIT. 359 */ 360 if (p->p_pid == 1) 361 ps->ps_flag &= ~PS_NOCLDWAIT; 362 else 363 ps->ps_flag |= PS_NOCLDWAIT; 364 } else 365 ps->ps_flag &= ~PS_NOCLDWAIT; 366 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN) 367 ps->ps_flag |= PS_CLDSIGIGN; 368 else 369 ps->ps_flag &= ~PS_CLDSIGIGN; 370 } 371 /* 372 * Set bit in ps_sigignore for signals that are set to SIG_IGN, 373 * and for signals set to SIG_DFL where the default is to 374 * ignore. However, don't put SIGCONT in ps_sigignore, as we 375 * have to restart the process. 376 */ 377 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || 378 (sigprop(sig) & SA_IGNORE && 379 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) { 380 if ((p->p_flag & P_SA) && 381 SIGISMEMBER(p->p_siglist, sig)) { 382 p->p_flag |= P_SIGEVENT; 383 wakeup(&p->p_siglist); 384 } 385 /* never to be seen again */ 386 SIGDELSET(p->p_siglist, sig); 387 mtx_lock_spin(&sched_lock); 388 FOREACH_THREAD_IN_PROC(p, td0) 389 SIGDELSET(td0->td_siglist, sig); 390 mtx_unlock_spin(&sched_lock); 391 if (sig != SIGCONT) 392 /* easier in psignal */ 393 SIGADDSET(ps->ps_sigignore, sig); 394 SIGDELSET(ps->ps_sigcatch, sig); 395 } else { 396 SIGDELSET(ps->ps_sigignore, sig); 397 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL) 398 SIGDELSET(ps->ps_sigcatch, sig); 399 else 400 SIGADDSET(ps->ps_sigcatch, sig); 401 } 402 #ifdef COMPAT_FREEBSD4 403 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || 404 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL || 405 (flags & KSA_FREEBSD4) == 0) 406 SIGDELSET(ps->ps_freebsd4, sig); 407 else 408 SIGADDSET(ps->ps_freebsd4, sig); 409 #endif 410 #ifdef COMPAT_43 411 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || 412 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL || 413 (flags & KSA_OSIGSET) == 0) 414 SIGDELSET(ps->ps_osigset, sig); 415 else 416 SIGADDSET(ps->ps_osigset, sig); 417 #endif 418 } 419 mtx_unlock(&ps->ps_mtx); 420 PROC_UNLOCK(p); 421 return (0); 422 } 423 424 #ifndef _SYS_SYSPROTO_H_ 425 struct sigaction_args { 426 int sig; 427 struct sigaction *act; 428 struct sigaction *oact; 429 }; 430 #endif 431 /* 432 * MPSAFE 433 */ 434 int 435 sigaction(td, uap) 436 struct thread *td; 437 register struct sigaction_args *uap; 438 { 439 struct sigaction act, oact; 440 register struct sigaction *actp, *oactp; 441 int error; 442 443 actp = (uap->act != NULL) ? &act : NULL; 444 oactp = (uap->oact != NULL) ? &oact : NULL; 445 if (actp) { 446 error = copyin(uap->act, actp, sizeof(act)); 447 if (error) 448 return (error); 449 } 450 error = kern_sigaction(td, uap->sig, actp, oactp, 0); 451 if (oactp && !error) 452 error = copyout(oactp, uap->oact, sizeof(oact)); 453 return (error); 454 } 455 456 #ifdef COMPAT_FREEBSD4 457 #ifndef _SYS_SYSPROTO_H_ 458 struct freebsd4_sigaction_args { 459 int sig; 460 struct sigaction *act; 461 struct sigaction *oact; 462 }; 463 #endif 464 /* 465 * MPSAFE 466 */ 467 int 468 freebsd4_sigaction(td, uap) 469 struct thread *td; 470 register struct freebsd4_sigaction_args *uap; 471 { 472 struct sigaction act, oact; 473 register struct sigaction *actp, *oactp; 474 int error; 475 476 477 actp = (uap->act != NULL) ? &act : NULL; 478 oactp = (uap->oact != NULL) ? &oact : NULL; 479 if (actp) { 480 error = copyin(uap->act, actp, sizeof(act)); 481 if (error) 482 return (error); 483 } 484 error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4); 485 if (oactp && !error) 486 error = copyout(oactp, uap->oact, sizeof(oact)); 487 return (error); 488 } 489 #endif /* COMAPT_FREEBSD4 */ 490 491 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 492 #ifndef _SYS_SYSPROTO_H_ 493 struct osigaction_args { 494 int signum; 495 struct osigaction *nsa; 496 struct osigaction *osa; 497 }; 498 #endif 499 /* 500 * MPSAFE 501 */ 502 int 503 osigaction(td, uap) 504 struct thread *td; 505 register struct osigaction_args *uap; 506 { 507 struct osigaction sa; 508 struct sigaction nsa, osa; 509 register struct sigaction *nsap, *osap; 510 int error; 511 512 if (uap->signum <= 0 || uap->signum >= ONSIG) 513 return (EINVAL); 514 515 nsap = (uap->nsa != NULL) ? &nsa : NULL; 516 osap = (uap->osa != NULL) ? &osa : NULL; 517 518 if (nsap) { 519 error = copyin(uap->nsa, &sa, sizeof(sa)); 520 if (error) 521 return (error); 522 nsap->sa_handler = sa.sa_handler; 523 nsap->sa_flags = sa.sa_flags; 524 OSIG2SIG(sa.sa_mask, nsap->sa_mask); 525 } 526 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET); 527 if (osap && !error) { 528 sa.sa_handler = osap->sa_handler; 529 sa.sa_flags = osap->sa_flags; 530 SIG2OSIG(osap->sa_mask, sa.sa_mask); 531 error = copyout(&sa, uap->osa, sizeof(sa)); 532 } 533 return (error); 534 } 535 536 #if !defined(__i386__) && !defined(__alpha__) 537 /* Avoid replicating the same stub everywhere */ 538 int 539 osigreturn(td, uap) 540 struct thread *td; 541 struct osigreturn_args *uap; 542 { 543 544 return (nosys(td, (struct nosys_args *)uap)); 545 } 546 #endif 547 #endif /* COMPAT_43 */ 548 549 /* 550 * Initialize signal state for process 0; 551 * set to ignore signals that are ignored by default. 552 */ 553 void 554 siginit(p) 555 struct proc *p; 556 { 557 register int i; 558 struct sigacts *ps; 559 560 PROC_LOCK(p); 561 ps = p->p_sigacts; 562 mtx_lock(&ps->ps_mtx); 563 for (i = 1; i <= NSIG; i++) 564 if (sigprop(i) & SA_IGNORE && i != SIGCONT) 565 SIGADDSET(ps->ps_sigignore, i); 566 mtx_unlock(&ps->ps_mtx); 567 PROC_UNLOCK(p); 568 } 569 570 /* 571 * Reset signals for an exec of the specified process. 572 */ 573 void 574 execsigs(struct proc *p) 575 { 576 struct sigacts *ps; 577 int sig; 578 struct thread *td; 579 580 /* 581 * Reset caught signals. Held signals remain held 582 * through td_sigmask (unless they were caught, 583 * and are now ignored by default). 584 */ 585 PROC_LOCK_ASSERT(p, MA_OWNED); 586 td = FIRST_THREAD_IN_PROC(p); 587 ps = p->p_sigacts; 588 mtx_lock(&ps->ps_mtx); 589 while (SIGNOTEMPTY(ps->ps_sigcatch)) { 590 sig = sig_ffs(&ps->ps_sigcatch); 591 SIGDELSET(ps->ps_sigcatch, sig); 592 if (sigprop(sig) & SA_IGNORE) { 593 if (sig != SIGCONT) 594 SIGADDSET(ps->ps_sigignore, sig); 595 SIGDELSET(p->p_siglist, sig); 596 /* 597 * There is only one thread at this point. 598 */ 599 SIGDELSET(td->td_siglist, sig); 600 } 601 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 602 } 603 /* 604 * Reset stack state to the user stack. 605 * Clear set of signals caught on the signal stack. 606 */ 607 td->td_sigstk.ss_flags = SS_DISABLE; 608 td->td_sigstk.ss_size = 0; 609 td->td_sigstk.ss_sp = 0; 610 td->td_pflags &= ~TDP_ALTSTACK; 611 /* 612 * Reset no zombies if child dies flag as Solaris does. 613 */ 614 ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN); 615 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN) 616 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL; 617 mtx_unlock(&ps->ps_mtx); 618 } 619 620 /* 621 * kern_sigprocmask() 622 * 623 * Manipulate signal mask. 624 */ 625 int 626 kern_sigprocmask(td, how, set, oset, old) 627 struct thread *td; 628 int how; 629 sigset_t *set, *oset; 630 int old; 631 { 632 int error; 633 634 PROC_LOCK(td->td_proc); 635 if (oset != NULL) 636 *oset = td->td_sigmask; 637 638 error = 0; 639 if (set != NULL) { 640 switch (how) { 641 case SIG_BLOCK: 642 SIG_CANTMASK(*set); 643 SIGSETOR(td->td_sigmask, *set); 644 break; 645 case SIG_UNBLOCK: 646 SIGSETNAND(td->td_sigmask, *set); 647 signotify(td); 648 break; 649 case SIG_SETMASK: 650 SIG_CANTMASK(*set); 651 if (old) 652 SIGSETLO(td->td_sigmask, *set); 653 else 654 td->td_sigmask = *set; 655 signotify(td); 656 break; 657 default: 658 error = EINVAL; 659 break; 660 } 661 } 662 PROC_UNLOCK(td->td_proc); 663 return (error); 664 } 665 666 /* 667 * sigprocmask() - MP SAFE 668 */ 669 670 #ifndef _SYS_SYSPROTO_H_ 671 struct sigprocmask_args { 672 int how; 673 const sigset_t *set; 674 sigset_t *oset; 675 }; 676 #endif 677 int 678 sigprocmask(td, uap) 679 register struct thread *td; 680 struct sigprocmask_args *uap; 681 { 682 sigset_t set, oset; 683 sigset_t *setp, *osetp; 684 int error; 685 686 setp = (uap->set != NULL) ? &set : NULL; 687 osetp = (uap->oset != NULL) ? &oset : NULL; 688 if (setp) { 689 error = copyin(uap->set, setp, sizeof(set)); 690 if (error) 691 return (error); 692 } 693 error = kern_sigprocmask(td, uap->how, setp, osetp, 0); 694 if (osetp && !error) { 695 error = copyout(osetp, uap->oset, sizeof(oset)); 696 } 697 return (error); 698 } 699 700 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 701 /* 702 * osigprocmask() - MP SAFE 703 */ 704 #ifndef _SYS_SYSPROTO_H_ 705 struct osigprocmask_args { 706 int how; 707 osigset_t mask; 708 }; 709 #endif 710 int 711 osigprocmask(td, uap) 712 register struct thread *td; 713 struct osigprocmask_args *uap; 714 { 715 sigset_t set, oset; 716 int error; 717 718 OSIG2SIG(uap->mask, set); 719 error = kern_sigprocmask(td, uap->how, &set, &oset, 1); 720 SIG2OSIG(oset, td->td_retval[0]); 721 return (error); 722 } 723 #endif /* COMPAT_43 */ 724 725 #ifndef _SYS_SYSPROTO_H_ 726 struct sigpending_args { 727 sigset_t *set; 728 }; 729 #endif 730 /* 731 * MPSAFE 732 */ 733 int 734 sigwait(struct thread *td, struct sigwait_args *uap) 735 { 736 siginfo_t info; 737 sigset_t set; 738 int error; 739 740 error = copyin(uap->set, &set, sizeof(set)); 741 if (error) { 742 td->td_retval[0] = error; 743 return (0); 744 } 745 746 error = kern_sigtimedwait(td, set, &info, NULL); 747 if (error) { 748 if (error == ERESTART) 749 return (error); 750 td->td_retval[0] = error; 751 return (0); 752 } 753 754 error = copyout(&info.si_signo, uap->sig, sizeof(info.si_signo)); 755 /* Repost if we got an error. */ 756 if (error && info.si_signo) { 757 PROC_LOCK(td->td_proc); 758 tdsignal(td, info.si_signo, SIGTARGET_TD); 759 PROC_UNLOCK(td->td_proc); 760 } 761 td->td_retval[0] = error; 762 return (0); 763 } 764 /* 765 * MPSAFE 766 */ 767 int 768 sigtimedwait(struct thread *td, struct sigtimedwait_args *uap) 769 { 770 struct timespec ts; 771 struct timespec *timeout; 772 sigset_t set; 773 siginfo_t info; 774 int error; 775 776 if (uap->timeout) { 777 error = copyin(uap->timeout, &ts, sizeof(ts)); 778 if (error) 779 return (error); 780 781 timeout = &ts; 782 } else 783 timeout = NULL; 784 785 error = copyin(uap->set, &set, sizeof(set)); 786 if (error) 787 return (error); 788 789 error = kern_sigtimedwait(td, set, &info, timeout); 790 if (error) 791 return (error); 792 793 if (uap->info) 794 error = copyout(&info, uap->info, sizeof(info)); 795 /* Repost if we got an error. */ 796 if (error && info.si_signo) { 797 PROC_LOCK(td->td_proc); 798 tdsignal(td, info.si_signo, SIGTARGET_TD); 799 PROC_UNLOCK(td->td_proc); 800 } else { 801 td->td_retval[0] = info.si_signo; 802 } 803 return (error); 804 } 805 806 /* 807 * MPSAFE 808 */ 809 int 810 sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap) 811 { 812 siginfo_t info; 813 sigset_t set; 814 int error; 815 816 error = copyin(uap->set, &set, sizeof(set)); 817 if (error) 818 return (error); 819 820 error = kern_sigtimedwait(td, set, &info, NULL); 821 if (error) 822 return (error); 823 824 if (uap->info) 825 error = copyout(&info, uap->info, sizeof(info)); 826 /* Repost if we got an error. */ 827 if (error && info.si_signo) { 828 PROC_LOCK(td->td_proc); 829 tdsignal(td, info.si_signo, SIGTARGET_TD); 830 PROC_UNLOCK(td->td_proc); 831 } else { 832 td->td_retval[0] = info.si_signo; 833 } 834 return (error); 835 } 836 837 static int 838 kern_sigtimedwait(struct thread *td, sigset_t waitset, siginfo_t *info, 839 struct timespec *timeout) 840 { 841 struct sigacts *ps; 842 sigset_t savedmask, sigset; 843 struct proc *p; 844 int error; 845 int sig; 846 int hz; 847 int i; 848 849 p = td->td_proc; 850 error = 0; 851 sig = 0; 852 SIG_CANTMASK(waitset); 853 854 PROC_LOCK(p); 855 ps = p->p_sigacts; 856 savedmask = td->td_sigmask; 857 858 again: 859 for (i = 1; i <= _SIG_MAXSIG; ++i) { 860 if (!SIGISMEMBER(waitset, i)) 861 continue; 862 if (SIGISMEMBER(td->td_siglist, i)) { 863 SIGFILLSET(td->td_sigmask); 864 SIG_CANTMASK(td->td_sigmask); 865 SIGDELSET(td->td_sigmask, i); 866 mtx_lock(&ps->ps_mtx); 867 sig = cursig(td); 868 i = 0; 869 mtx_unlock(&ps->ps_mtx); 870 } else if (SIGISMEMBER(p->p_siglist, i)) { 871 if (p->p_flag & P_SA) { 872 p->p_flag |= P_SIGEVENT; 873 wakeup(&p->p_siglist); 874 } 875 SIGDELSET(p->p_siglist, i); 876 SIGADDSET(td->td_siglist, i); 877 SIGFILLSET(td->td_sigmask); 878 SIG_CANTMASK(td->td_sigmask); 879 SIGDELSET(td->td_sigmask, i); 880 mtx_lock(&ps->ps_mtx); 881 sig = cursig(td); 882 i = 0; 883 mtx_unlock(&ps->ps_mtx); 884 } 885 if (sig) { 886 td->td_sigmask = savedmask; 887 signotify(td); 888 goto out; 889 } 890 } 891 if (error) 892 goto out; 893 894 td->td_sigmask = savedmask; 895 signotify(td); 896 sigset = td->td_siglist; 897 SIGSETOR(sigset, p->p_siglist); 898 SIGSETAND(sigset, waitset); 899 if (!SIGISEMPTY(sigset)) 900 goto again; 901 902 /* 903 * POSIX says this must be checked after looking for pending 904 * signals. 905 */ 906 if (timeout) { 907 struct timeval tv; 908 909 if (timeout->tv_nsec < 0 || timeout->tv_nsec > 1000000000) { 910 error = EINVAL; 911 goto out; 912 } 913 if (timeout->tv_sec == 0 && timeout->tv_nsec == 0) { 914 error = EAGAIN; 915 goto out; 916 } 917 TIMESPEC_TO_TIMEVAL(&tv, timeout); 918 hz = tvtohz(&tv); 919 } else 920 hz = 0; 921 922 td->td_waitset = &waitset; 923 error = msleep(&ps, &p->p_mtx, PPAUSE|PCATCH, "sigwait", hz); 924 td->td_waitset = NULL; 925 if (error == 0) /* surplus wakeup ? */ 926 error = EINTR; 927 goto again; 928 929 out: 930 if (sig) { 931 sig_t action; 932 933 error = 0; 934 mtx_lock(&ps->ps_mtx); 935 action = ps->ps_sigact[_SIG_IDX(sig)]; 936 mtx_unlock(&ps->ps_mtx); 937 #ifdef KTRACE 938 if (KTRPOINT(td, KTR_PSIG)) 939 ktrpsig(sig, action, &td->td_sigmask, 0); 940 #endif 941 _STOPEVENT(p, S_SIG, sig); 942 943 SIGDELSET(td->td_siglist, sig); 944 info->si_signo = sig; 945 info->si_code = 0; 946 } 947 PROC_UNLOCK(p); 948 return (error); 949 } 950 951 /* 952 * MPSAFE 953 */ 954 int 955 sigpending(td, uap) 956 struct thread *td; 957 struct sigpending_args *uap; 958 { 959 struct proc *p = td->td_proc; 960 sigset_t siglist; 961 962 PROC_LOCK(p); 963 siglist = p->p_siglist; 964 SIGSETOR(siglist, td->td_siglist); 965 PROC_UNLOCK(p); 966 return (copyout(&siglist, uap->set, sizeof(sigset_t))); 967 } 968 969 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 970 #ifndef _SYS_SYSPROTO_H_ 971 struct osigpending_args { 972 int dummy; 973 }; 974 #endif 975 /* 976 * MPSAFE 977 */ 978 int 979 osigpending(td, uap) 980 struct thread *td; 981 struct osigpending_args *uap; 982 { 983 struct proc *p = td->td_proc; 984 sigset_t siglist; 985 986 PROC_LOCK(p); 987 siglist = p->p_siglist; 988 SIGSETOR(siglist, td->td_siglist); 989 PROC_UNLOCK(p); 990 SIG2OSIG(siglist, td->td_retval[0]); 991 return (0); 992 } 993 #endif /* COMPAT_43 */ 994 995 #if defined(COMPAT_43) 996 /* 997 * Generalized interface signal handler, 4.3-compatible. 998 */ 999 #ifndef _SYS_SYSPROTO_H_ 1000 struct osigvec_args { 1001 int signum; 1002 struct sigvec *nsv; 1003 struct sigvec *osv; 1004 }; 1005 #endif 1006 /* 1007 * MPSAFE 1008 */ 1009 /* ARGSUSED */ 1010 int 1011 osigvec(td, uap) 1012 struct thread *td; 1013 register struct osigvec_args *uap; 1014 { 1015 struct sigvec vec; 1016 struct sigaction nsa, osa; 1017 register struct sigaction *nsap, *osap; 1018 int error; 1019 1020 if (uap->signum <= 0 || uap->signum >= ONSIG) 1021 return (EINVAL); 1022 nsap = (uap->nsv != NULL) ? &nsa : NULL; 1023 osap = (uap->osv != NULL) ? &osa : NULL; 1024 if (nsap) { 1025 error = copyin(uap->nsv, &vec, sizeof(vec)); 1026 if (error) 1027 return (error); 1028 nsap->sa_handler = vec.sv_handler; 1029 OSIG2SIG(vec.sv_mask, nsap->sa_mask); 1030 nsap->sa_flags = vec.sv_flags; 1031 nsap->sa_flags ^= SA_RESTART; /* opposite of SV_INTERRUPT */ 1032 } 1033 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET); 1034 if (osap && !error) { 1035 vec.sv_handler = osap->sa_handler; 1036 SIG2OSIG(osap->sa_mask, vec.sv_mask); 1037 vec.sv_flags = osap->sa_flags; 1038 vec.sv_flags &= ~SA_NOCLDWAIT; 1039 vec.sv_flags ^= SA_RESTART; 1040 error = copyout(&vec, uap->osv, sizeof(vec)); 1041 } 1042 return (error); 1043 } 1044 1045 #ifndef _SYS_SYSPROTO_H_ 1046 struct osigblock_args { 1047 int mask; 1048 }; 1049 #endif 1050 /* 1051 * MPSAFE 1052 */ 1053 int 1054 osigblock(td, uap) 1055 register struct thread *td; 1056 struct osigblock_args *uap; 1057 { 1058 struct proc *p = td->td_proc; 1059 sigset_t set; 1060 1061 OSIG2SIG(uap->mask, set); 1062 SIG_CANTMASK(set); 1063 PROC_LOCK(p); 1064 SIG2OSIG(td->td_sigmask, td->td_retval[0]); 1065 SIGSETOR(td->td_sigmask, set); 1066 PROC_UNLOCK(p); 1067 return (0); 1068 } 1069 1070 #ifndef _SYS_SYSPROTO_H_ 1071 struct osigsetmask_args { 1072 int mask; 1073 }; 1074 #endif 1075 /* 1076 * MPSAFE 1077 */ 1078 int 1079 osigsetmask(td, uap) 1080 struct thread *td; 1081 struct osigsetmask_args *uap; 1082 { 1083 struct proc *p = td->td_proc; 1084 sigset_t set; 1085 1086 OSIG2SIG(uap->mask, set); 1087 SIG_CANTMASK(set); 1088 PROC_LOCK(p); 1089 SIG2OSIG(td->td_sigmask, td->td_retval[0]); 1090 SIGSETLO(td->td_sigmask, set); 1091 signotify(td); 1092 PROC_UNLOCK(p); 1093 return (0); 1094 } 1095 #endif /* COMPAT_43 */ 1096 1097 /* 1098 * Suspend process until signal, providing mask to be set 1099 * in the meantime. 1100 ***** XXXKSE this doesn't make sense under KSE. 1101 ***** Do we suspend the thread or all threads in the process? 1102 ***** How do we suspend threads running NOW on another processor? 1103 */ 1104 #ifndef _SYS_SYSPROTO_H_ 1105 struct sigsuspend_args { 1106 const sigset_t *sigmask; 1107 }; 1108 #endif 1109 /* 1110 * MPSAFE 1111 */ 1112 /* ARGSUSED */ 1113 int 1114 sigsuspend(td, uap) 1115 struct thread *td; 1116 struct sigsuspend_args *uap; 1117 { 1118 sigset_t mask; 1119 int error; 1120 1121 error = copyin(uap->sigmask, &mask, sizeof(mask)); 1122 if (error) 1123 return (error); 1124 return (kern_sigsuspend(td, mask)); 1125 } 1126 1127 int 1128 kern_sigsuspend(struct thread *td, sigset_t mask) 1129 { 1130 struct proc *p = td->td_proc; 1131 1132 /* 1133 * When returning from sigsuspend, we want 1134 * the old mask to be restored after the 1135 * signal handler has finished. Thus, we 1136 * save it here and mark the sigacts structure 1137 * to indicate this. 1138 */ 1139 PROC_LOCK(p); 1140 td->td_oldsigmask = td->td_sigmask; 1141 td->td_pflags |= TDP_OLDMASK; 1142 SIG_CANTMASK(mask); 1143 td->td_sigmask = mask; 1144 signotify(td); 1145 while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause", 0) == 0) 1146 /* void */; 1147 PROC_UNLOCK(p); 1148 /* always return EINTR rather than ERESTART... */ 1149 return (EINTR); 1150 } 1151 1152 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 1153 /* 1154 * Compatibility sigsuspend call for old binaries. Note nonstandard calling 1155 * convention: libc stub passes mask, not pointer, to save a copyin. 1156 */ 1157 #ifndef _SYS_SYSPROTO_H_ 1158 struct osigsuspend_args { 1159 osigset_t mask; 1160 }; 1161 #endif 1162 /* 1163 * MPSAFE 1164 */ 1165 /* ARGSUSED */ 1166 int 1167 osigsuspend(td, uap) 1168 struct thread *td; 1169 struct osigsuspend_args *uap; 1170 { 1171 struct proc *p = td->td_proc; 1172 sigset_t mask; 1173 1174 PROC_LOCK(p); 1175 td->td_oldsigmask = td->td_sigmask; 1176 td->td_pflags |= TDP_OLDMASK; 1177 OSIG2SIG(uap->mask, mask); 1178 SIG_CANTMASK(mask); 1179 SIGSETLO(td->td_sigmask, mask); 1180 signotify(td); 1181 while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "opause", 0) == 0) 1182 /* void */; 1183 PROC_UNLOCK(p); 1184 /* always return EINTR rather than ERESTART... */ 1185 return (EINTR); 1186 } 1187 #endif /* COMPAT_43 */ 1188 1189 #if defined(COMPAT_43) 1190 #ifndef _SYS_SYSPROTO_H_ 1191 struct osigstack_args { 1192 struct sigstack *nss; 1193 struct sigstack *oss; 1194 }; 1195 #endif 1196 /* 1197 * MPSAFE 1198 */ 1199 /* ARGSUSED */ 1200 int 1201 osigstack(td, uap) 1202 struct thread *td; 1203 register struct osigstack_args *uap; 1204 { 1205 struct sigstack nss, oss; 1206 int error = 0; 1207 1208 if (uap->nss != NULL) { 1209 error = copyin(uap->nss, &nss, sizeof(nss)); 1210 if (error) 1211 return (error); 1212 } 1213 oss.ss_sp = td->td_sigstk.ss_sp; 1214 oss.ss_onstack = sigonstack(cpu_getstack(td)); 1215 if (uap->nss != NULL) { 1216 td->td_sigstk.ss_sp = nss.ss_sp; 1217 td->td_sigstk.ss_size = 0; 1218 td->td_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK; 1219 td->td_pflags |= TDP_ALTSTACK; 1220 } 1221 if (uap->oss != NULL) 1222 error = copyout(&oss, uap->oss, sizeof(oss)); 1223 1224 return (error); 1225 } 1226 #endif /* COMPAT_43 */ 1227 1228 #ifndef _SYS_SYSPROTO_H_ 1229 struct sigaltstack_args { 1230 stack_t *ss; 1231 stack_t *oss; 1232 }; 1233 #endif 1234 /* 1235 * MPSAFE 1236 */ 1237 /* ARGSUSED */ 1238 int 1239 sigaltstack(td, uap) 1240 struct thread *td; 1241 register struct sigaltstack_args *uap; 1242 { 1243 stack_t ss, oss; 1244 int error; 1245 1246 if (uap->ss != NULL) { 1247 error = copyin(uap->ss, &ss, sizeof(ss)); 1248 if (error) 1249 return (error); 1250 } 1251 error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL, 1252 (uap->oss != NULL) ? &oss : NULL); 1253 if (error) 1254 return (error); 1255 if (uap->oss != NULL) 1256 error = copyout(&oss, uap->oss, sizeof(stack_t)); 1257 return (error); 1258 } 1259 1260 int 1261 kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss) 1262 { 1263 struct proc *p = td->td_proc; 1264 int oonstack; 1265 1266 oonstack = sigonstack(cpu_getstack(td)); 1267 1268 if (oss != NULL) { 1269 *oss = td->td_sigstk; 1270 oss->ss_flags = (td->td_pflags & TDP_ALTSTACK) 1271 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 1272 } 1273 1274 if (ss != NULL) { 1275 if (oonstack) 1276 return (EPERM); 1277 if ((ss->ss_flags & ~SS_DISABLE) != 0) 1278 return (EINVAL); 1279 if (!(ss->ss_flags & SS_DISABLE)) { 1280 if (ss->ss_size < p->p_sysent->sv_minsigstksz) { 1281 return (ENOMEM); 1282 } 1283 td->td_sigstk = *ss; 1284 td->td_pflags |= TDP_ALTSTACK; 1285 } else { 1286 td->td_pflags &= ~TDP_ALTSTACK; 1287 } 1288 } 1289 return (0); 1290 } 1291 1292 /* 1293 * Common code for kill process group/broadcast kill. 1294 * cp is calling process. 1295 */ 1296 static int 1297 killpg1(td, sig, pgid, all) 1298 register struct thread *td; 1299 int sig, pgid, all; 1300 { 1301 register struct proc *p; 1302 struct pgrp *pgrp; 1303 int nfound = 0; 1304 1305 if (all) { 1306 /* 1307 * broadcast 1308 */ 1309 sx_slock(&allproc_lock); 1310 LIST_FOREACH(p, &allproc, p_list) { 1311 PROC_LOCK(p); 1312 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM || 1313 p == td->td_proc) { 1314 PROC_UNLOCK(p); 1315 continue; 1316 } 1317 if (p_cansignal(td, p, sig) == 0) { 1318 nfound++; 1319 if (sig) 1320 psignal(p, sig); 1321 } 1322 PROC_UNLOCK(p); 1323 } 1324 sx_sunlock(&allproc_lock); 1325 } else { 1326 sx_slock(&proctree_lock); 1327 if (pgid == 0) { 1328 /* 1329 * zero pgid means send to my process group. 1330 */ 1331 pgrp = td->td_proc->p_pgrp; 1332 PGRP_LOCK(pgrp); 1333 } else { 1334 pgrp = pgfind(pgid); 1335 if (pgrp == NULL) { 1336 sx_sunlock(&proctree_lock); 1337 return (ESRCH); 1338 } 1339 } 1340 sx_sunlock(&proctree_lock); 1341 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 1342 PROC_LOCK(p); 1343 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM) { 1344 PROC_UNLOCK(p); 1345 continue; 1346 } 1347 if (p->p_state == PRS_ZOMBIE) { 1348 PROC_UNLOCK(p); 1349 continue; 1350 } 1351 if (p_cansignal(td, p, sig) == 0) { 1352 nfound++; 1353 if (sig) 1354 psignal(p, sig); 1355 } 1356 PROC_UNLOCK(p); 1357 } 1358 PGRP_UNLOCK(pgrp); 1359 } 1360 return (nfound ? 0 : ESRCH); 1361 } 1362 1363 #ifndef _SYS_SYSPROTO_H_ 1364 struct kill_args { 1365 int pid; 1366 int signum; 1367 }; 1368 #endif 1369 /* 1370 * MPSAFE 1371 */ 1372 /* ARGSUSED */ 1373 int 1374 kill(td, uap) 1375 register struct thread *td; 1376 register struct kill_args *uap; 1377 { 1378 register struct proc *p; 1379 int error; 1380 1381 if ((u_int)uap->signum > _SIG_MAXSIG) 1382 return (EINVAL); 1383 1384 if (uap->pid > 0) { 1385 /* kill single process */ 1386 if ((p = pfind(uap->pid)) == NULL) { 1387 if ((p = zpfind(uap->pid)) != NULL) { 1388 /* 1389 * IEEE Std 1003.1-2001: return success 1390 * when killing a zombie. 1391 */ 1392 PROC_UNLOCK(p); 1393 return (0); 1394 } 1395 return (ESRCH); 1396 } 1397 error = p_cansignal(td, p, uap->signum); 1398 if (error == 0 && uap->signum) 1399 psignal(p, uap->signum); 1400 PROC_UNLOCK(p); 1401 return (error); 1402 } 1403 switch (uap->pid) { 1404 case -1: /* broadcast signal */ 1405 return (killpg1(td, uap->signum, 0, 1)); 1406 case 0: /* signal own process group */ 1407 return (killpg1(td, uap->signum, 0, 0)); 1408 default: /* negative explicit process group */ 1409 return (killpg1(td, uap->signum, -uap->pid, 0)); 1410 } 1411 /* NOTREACHED */ 1412 } 1413 1414 #if defined(COMPAT_43) 1415 #ifndef _SYS_SYSPROTO_H_ 1416 struct okillpg_args { 1417 int pgid; 1418 int signum; 1419 }; 1420 #endif 1421 /* 1422 * MPSAFE 1423 */ 1424 /* ARGSUSED */ 1425 int 1426 okillpg(td, uap) 1427 struct thread *td; 1428 register struct okillpg_args *uap; 1429 { 1430 1431 if ((u_int)uap->signum > _SIG_MAXSIG) 1432 return (EINVAL); 1433 return (killpg1(td, uap->signum, uap->pgid, 0)); 1434 } 1435 #endif /* COMPAT_43 */ 1436 1437 /* 1438 * Send a signal to a process group. 1439 */ 1440 void 1441 gsignal(pgid, sig) 1442 int pgid, sig; 1443 { 1444 struct pgrp *pgrp; 1445 1446 if (pgid != 0) { 1447 sx_slock(&proctree_lock); 1448 pgrp = pgfind(pgid); 1449 sx_sunlock(&proctree_lock); 1450 if (pgrp != NULL) { 1451 pgsignal(pgrp, sig, 0); 1452 PGRP_UNLOCK(pgrp); 1453 } 1454 } 1455 } 1456 1457 /* 1458 * Send a signal to a process group. If checktty is 1, 1459 * limit to members which have a controlling terminal. 1460 */ 1461 void 1462 pgsignal(pgrp, sig, checkctty) 1463 struct pgrp *pgrp; 1464 int sig, checkctty; 1465 { 1466 register struct proc *p; 1467 1468 if (pgrp) { 1469 PGRP_LOCK_ASSERT(pgrp, MA_OWNED); 1470 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 1471 PROC_LOCK(p); 1472 if (checkctty == 0 || p->p_flag & P_CONTROLT) 1473 psignal(p, sig); 1474 PROC_UNLOCK(p); 1475 } 1476 } 1477 } 1478 1479 /* 1480 * Send a signal caused by a trap to the current thread. 1481 * If it will be caught immediately, deliver it with correct code. 1482 * Otherwise, post it normally. 1483 * 1484 * MPSAFE 1485 */ 1486 void 1487 trapsignal(struct thread *td, int sig, u_long code) 1488 { 1489 struct sigacts *ps; 1490 struct proc *p; 1491 siginfo_t siginfo; 1492 int error; 1493 1494 p = td->td_proc; 1495 if (td->td_pflags & TDP_SA) { 1496 if (td->td_mailbox == NULL) 1497 thread_user_enter(p, td); 1498 PROC_LOCK(p); 1499 SIGDELSET(td->td_sigmask, sig); 1500 mtx_lock_spin(&sched_lock); 1501 /* 1502 * Force scheduling an upcall, so UTS has chance to 1503 * process the signal before thread runs again in 1504 * userland. 1505 */ 1506 if (td->td_upcall) 1507 td->td_upcall->ku_flags |= KUF_DOUPCALL; 1508 mtx_unlock_spin(&sched_lock); 1509 } else { 1510 PROC_LOCK(p); 1511 } 1512 ps = p->p_sigacts; 1513 mtx_lock(&ps->ps_mtx); 1514 if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) && 1515 !SIGISMEMBER(td->td_sigmask, sig)) { 1516 p->p_stats->p_ru.ru_nsignals++; 1517 #ifdef KTRACE 1518 if (KTRPOINT(curthread, KTR_PSIG)) 1519 ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)], 1520 &td->td_sigmask, code); 1521 #endif 1522 if (!(td->td_pflags & TDP_SA)) 1523 (*p->p_sysent->sv_sendsig)( 1524 ps->ps_sigact[_SIG_IDX(sig)], sig, 1525 &td->td_sigmask, code); 1526 else if (td->td_mailbox == NULL) { 1527 mtx_unlock(&ps->ps_mtx); 1528 /* UTS caused a sync signal */ 1529 p->p_code = code; /* XXX for core dump/debugger */ 1530 p->p_sig = sig; /* XXX to verify code */ 1531 sigexit(td, sig); 1532 } else { 1533 cpu_thread_siginfo(sig, code, &siginfo); 1534 mtx_unlock(&ps->ps_mtx); 1535 SIGADDSET(td->td_sigmask, sig); 1536 PROC_UNLOCK(p); 1537 error = copyout(&siginfo, &td->td_mailbox->tm_syncsig, 1538 sizeof(siginfo)); 1539 PROC_LOCK(p); 1540 /* UTS memory corrupted */ 1541 if (error) 1542 sigexit(td, SIGSEGV); 1543 mtx_lock(&ps->ps_mtx); 1544 } 1545 SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]); 1546 if (!SIGISMEMBER(ps->ps_signodefer, sig)) 1547 SIGADDSET(td->td_sigmask, sig); 1548 if (SIGISMEMBER(ps->ps_sigreset, sig)) { 1549 /* 1550 * See kern_sigaction() for origin of this code. 1551 */ 1552 SIGDELSET(ps->ps_sigcatch, sig); 1553 if (sig != SIGCONT && 1554 sigprop(sig) & SA_IGNORE) 1555 SIGADDSET(ps->ps_sigignore, sig); 1556 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 1557 } 1558 mtx_unlock(&ps->ps_mtx); 1559 } else { 1560 mtx_unlock(&ps->ps_mtx); 1561 p->p_code = code; /* XXX for core dump/debugger */ 1562 p->p_sig = sig; /* XXX to verify code */ 1563 tdsignal(td, sig, SIGTARGET_TD); 1564 } 1565 PROC_UNLOCK(p); 1566 } 1567 1568 static struct thread * 1569 sigtd(struct proc *p, int sig, int prop) 1570 { 1571 struct thread *td, *signal_td; 1572 1573 PROC_LOCK_ASSERT(p, MA_OWNED); 1574 1575 /* 1576 * First find a thread in sigwait state and signal belongs to 1577 * its wait set. POSIX's arguments is that speed of delivering signal 1578 * to sigwait thread is faster than delivering signal to user stack. 1579 * If we can not find sigwait thread, then find the first thread in 1580 * the proc that doesn't have this signal masked, an exception is 1581 * if current thread is sending signal to its process, and it does not 1582 * mask the signal, it should get the signal, this is another fast 1583 * way to deliver signal. 1584 */ 1585 signal_td = NULL; 1586 mtx_lock_spin(&sched_lock); 1587 FOREACH_THREAD_IN_PROC(p, td) { 1588 if (td->td_waitset != NULL && 1589 SIGISMEMBER(*(td->td_waitset), sig)) { 1590 mtx_unlock_spin(&sched_lock); 1591 return (td); 1592 } 1593 if (!SIGISMEMBER(td->td_sigmask, sig)) { 1594 if (td == curthread) 1595 signal_td = curthread; 1596 else if (signal_td == NULL) 1597 signal_td = td; 1598 } 1599 } 1600 if (signal_td == NULL) 1601 signal_td = FIRST_THREAD_IN_PROC(p); 1602 mtx_unlock_spin(&sched_lock); 1603 return (signal_td); 1604 } 1605 1606 /* 1607 * Send the signal to the process. If the signal has an action, the action 1608 * is usually performed by the target process rather than the caller; we add 1609 * the signal to the set of pending signals for the process. 1610 * 1611 * Exceptions: 1612 * o When a stop signal is sent to a sleeping process that takes the 1613 * default action, the process is stopped without awakening it. 1614 * o SIGCONT restarts stopped processes (or puts them back to sleep) 1615 * regardless of the signal action (eg, blocked or ignored). 1616 * 1617 * Other ignored signals are discarded immediately. 1618 * 1619 * MPSAFE 1620 */ 1621 void 1622 psignal(struct proc *p, int sig) 1623 { 1624 struct thread *td; 1625 int prop; 1626 1627 if (!_SIG_VALID(sig)) 1628 panic("psignal(): invalid signal"); 1629 1630 PROC_LOCK_ASSERT(p, MA_OWNED); 1631 prop = sigprop(sig); 1632 1633 /* 1634 * Find a thread to deliver the signal to. 1635 */ 1636 td = sigtd(p, sig, prop); 1637 1638 tdsignal(td, sig, SIGTARGET_P); 1639 } 1640 1641 /* 1642 * MPSAFE 1643 */ 1644 void 1645 tdsignal(struct thread *td, int sig, sigtarget_t target) 1646 { 1647 sigset_t saved; 1648 struct proc *p = td->td_proc; 1649 1650 if (p->p_flag & P_SA) 1651 saved = p->p_siglist; 1652 do_tdsignal(td, sig, target); 1653 if ((p->p_flag & P_SA) && !(p->p_flag & P_SIGEVENT)) { 1654 if (!SIGSETEQ(saved, p->p_siglist)) { 1655 /* pending set changed */ 1656 p->p_flag |= P_SIGEVENT; 1657 wakeup(&p->p_siglist); 1658 } 1659 } 1660 } 1661 1662 static void 1663 do_tdsignal(struct thread *td, int sig, sigtarget_t target) 1664 { 1665 struct proc *p; 1666 register sig_t action; 1667 sigset_t *siglist; 1668 struct thread *td0; 1669 register int prop; 1670 struct sigacts *ps; 1671 1672 if (!_SIG_VALID(sig)) 1673 panic("do_tdsignal(): invalid signal"); 1674 1675 p = td->td_proc; 1676 ps = p->p_sigacts; 1677 1678 PROC_LOCK_ASSERT(p, MA_OWNED); 1679 KNOTE(&p->p_klist, NOTE_SIGNAL | sig); 1680 1681 prop = sigprop(sig); 1682 1683 /* 1684 * If the signal is blocked and not destined for this thread, then 1685 * assign it to the process so that we can find it later in the first 1686 * thread that unblocks it. Otherwise, assign it to this thread now. 1687 */ 1688 if (target == SIGTARGET_TD) { 1689 siglist = &td->td_siglist; 1690 } else { 1691 if (!SIGISMEMBER(td->td_sigmask, sig)) 1692 siglist = &td->td_siglist; 1693 else if (td->td_waitset != NULL && 1694 SIGISMEMBER(*(td->td_waitset), sig)) 1695 siglist = &td->td_siglist; 1696 else 1697 siglist = &p->p_siglist; 1698 } 1699 1700 /* 1701 * If proc is traced, always give parent a chance; 1702 * if signal event is tracked by procfs, give *that* 1703 * a chance, as well. 1704 */ 1705 if ((p->p_flag & P_TRACED) || (p->p_stops & S_SIG)) { 1706 action = SIG_DFL; 1707 } else { 1708 /* 1709 * If the signal is being ignored, 1710 * then we forget about it immediately. 1711 * (Note: we don't set SIGCONT in ps_sigignore, 1712 * and if it is set to SIG_IGN, 1713 * action will be SIG_DFL here.) 1714 */ 1715 mtx_lock(&ps->ps_mtx); 1716 if (SIGISMEMBER(ps->ps_sigignore, sig) || 1717 (p->p_flag & P_WEXIT)) { 1718 mtx_unlock(&ps->ps_mtx); 1719 return; 1720 } 1721 if (((td->td_waitset == NULL) && 1722 SIGISMEMBER(td->td_sigmask, sig)) || 1723 ((td->td_waitset != NULL) && 1724 SIGISMEMBER(td->td_sigmask, sig) && 1725 !SIGISMEMBER(*(td->td_waitset), sig))) 1726 action = SIG_HOLD; 1727 else if (SIGISMEMBER(ps->ps_sigcatch, sig)) 1728 action = SIG_CATCH; 1729 else 1730 action = SIG_DFL; 1731 mtx_unlock(&ps->ps_mtx); 1732 } 1733 1734 if (prop & SA_CONT) { 1735 SIG_STOPSIGMASK(p->p_siglist); 1736 /* 1737 * XXX Should investigate leaving STOP and CONT sigs only in 1738 * the proc's siglist. 1739 */ 1740 mtx_lock_spin(&sched_lock); 1741 FOREACH_THREAD_IN_PROC(p, td0) 1742 SIG_STOPSIGMASK(td0->td_siglist); 1743 mtx_unlock_spin(&sched_lock); 1744 } 1745 1746 if (prop & SA_STOP) { 1747 /* 1748 * If sending a tty stop signal to a member of an orphaned 1749 * process group, discard the signal here if the action 1750 * is default; don't stop the process below if sleeping, 1751 * and don't clear any pending SIGCONT. 1752 */ 1753 if ((prop & SA_TTYSTOP) && 1754 (p->p_pgrp->pg_jobc == 0) && 1755 (action == SIG_DFL)) 1756 return; 1757 SIG_CONTSIGMASK(p->p_siglist); 1758 mtx_lock_spin(&sched_lock); 1759 FOREACH_THREAD_IN_PROC(p, td0) 1760 SIG_CONTSIGMASK(td0->td_siglist); 1761 mtx_unlock_spin(&sched_lock); 1762 p->p_flag &= ~P_CONTINUED; 1763 } 1764 1765 SIGADDSET(*siglist, sig); 1766 signotify(td); /* uses schedlock */ 1767 if (siglist == &td->td_siglist && (td->td_waitset != NULL) && 1768 action != SIG_HOLD) { 1769 td->td_waitset = NULL; 1770 } 1771 1772 /* 1773 * Defer further processing for signals which are held, 1774 * except that stopped processes must be continued by SIGCONT. 1775 */ 1776 if (action == SIG_HOLD && 1777 !((prop & SA_CONT) && (p->p_flag & P_STOPPED_SIG))) 1778 return; 1779 /* 1780 * Some signals have a process-wide effect and a per-thread 1781 * component. Most processing occurs when the process next 1782 * tries to cross the user boundary, however there are some 1783 * times when processing needs to be done immediatly, such as 1784 * waking up threads so that they can cross the user boundary. 1785 * We try do the per-process part here. 1786 */ 1787 if (P_SHOULDSTOP(p)) { 1788 /* 1789 * The process is in stopped mode. All the threads should be 1790 * either winding down or already on the suspended queue. 1791 */ 1792 if (p->p_flag & P_TRACED) { 1793 /* 1794 * The traced process is already stopped, 1795 * so no further action is necessary. 1796 * No signal can restart us. 1797 */ 1798 goto out; 1799 } 1800 1801 if (sig == SIGKILL) { 1802 /* 1803 * SIGKILL sets process running. 1804 * It will die elsewhere. 1805 * All threads must be restarted. 1806 */ 1807 p->p_flag &= ~P_STOPPED; 1808 goto runfast; 1809 } 1810 1811 if (prop & SA_CONT) { 1812 /* 1813 * If SIGCONT is default (or ignored), we continue the 1814 * process but don't leave the signal in siglist as 1815 * it has no further action. If SIGCONT is held, we 1816 * continue the process and leave the signal in 1817 * siglist. If the process catches SIGCONT, let it 1818 * handle the signal itself. If it isn't waiting on 1819 * an event, it goes back to run state. 1820 * Otherwise, process goes back to sleep state. 1821 */ 1822 p->p_flag &= ~P_STOPPED_SIG; 1823 p->p_flag |= P_CONTINUED; 1824 if (action == SIG_DFL) { 1825 SIGDELSET(*siglist, sig); 1826 } else if (action == SIG_CATCH) { 1827 /* 1828 * The process wants to catch it so it needs 1829 * to run at least one thread, but which one? 1830 * It would seem that the answer would be to 1831 * run an upcall in the next KSE to run, and 1832 * deliver the signal that way. In a NON KSE 1833 * process, we need to make sure that the 1834 * single thread is runnable asap. 1835 * XXXKSE for now however, make them all run. 1836 */ 1837 goto runfast; 1838 } 1839 /* 1840 * The signal is not ignored or caught. 1841 */ 1842 mtx_lock_spin(&sched_lock); 1843 thread_unsuspend(p); 1844 mtx_unlock_spin(&sched_lock); 1845 goto out; 1846 } 1847 1848 if (prop & SA_STOP) { 1849 /* 1850 * Already stopped, don't need to stop again 1851 * (If we did the shell could get confused). 1852 * Just make sure the signal STOP bit set. 1853 */ 1854 p->p_flag |= P_STOPPED_SIG; 1855 SIGDELSET(*siglist, sig); 1856 goto out; 1857 } 1858 1859 /* 1860 * All other kinds of signals: 1861 * If a thread is sleeping interruptibly, simulate a 1862 * wakeup so that when it is continued it will be made 1863 * runnable and can look at the signal. However, don't make 1864 * the PROCESS runnable, leave it stopped. 1865 * It may run a bit until it hits a thread_suspend_check(). 1866 */ 1867 mtx_lock_spin(&sched_lock); 1868 if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR)) 1869 sleepq_abort(td); 1870 mtx_unlock_spin(&sched_lock); 1871 goto out; 1872 /* 1873 * Mutexes are short lived. Threads waiting on them will 1874 * hit thread_suspend_check() soon. 1875 */ 1876 } else if (p->p_state == PRS_NORMAL) { 1877 if ((p->p_flag & P_TRACED) || (action != SIG_DFL) || 1878 !(prop & SA_STOP)) { 1879 mtx_lock_spin(&sched_lock); 1880 tdsigwakeup(td, sig, action); 1881 mtx_unlock_spin(&sched_lock); 1882 goto out; 1883 } 1884 if (prop & SA_STOP) { 1885 if (p->p_flag & P_PPWAIT) 1886 goto out; 1887 p->p_flag |= P_STOPPED_SIG; 1888 p->p_xstat = sig; 1889 p->p_xthread = td; 1890 mtx_lock_spin(&sched_lock); 1891 FOREACH_THREAD_IN_PROC(p, td0) { 1892 if (TD_IS_SLEEPING(td0) && 1893 (td0->td_flags & TDF_SINTR) && 1894 !TD_IS_SUSPENDED(td0)) { 1895 thread_suspend_one(td0); 1896 } else if (td != td0) { 1897 td0->td_flags |= TDF_ASTPENDING; 1898 } 1899 } 1900 thread_stopped(p); 1901 if (p->p_numthreads == p->p_suspcount) { 1902 SIGDELSET(p->p_siglist, p->p_xstat); 1903 FOREACH_THREAD_IN_PROC(p, td0) 1904 SIGDELSET(td0->td_siglist, p->p_xstat); 1905 } 1906 mtx_unlock_spin(&sched_lock); 1907 goto out; 1908 } 1909 else 1910 goto runfast; 1911 /* NOTREACHED */ 1912 } else { 1913 /* Not in "NORMAL" state. discard the signal. */ 1914 SIGDELSET(*siglist, sig); 1915 goto out; 1916 } 1917 1918 /* 1919 * The process is not stopped so we need to apply the signal to all the 1920 * running threads. 1921 */ 1922 1923 runfast: 1924 mtx_lock_spin(&sched_lock); 1925 tdsigwakeup(td, sig, action); 1926 thread_unsuspend(p); 1927 mtx_unlock_spin(&sched_lock); 1928 out: 1929 /* If we jump here, sched_lock should not be owned. */ 1930 mtx_assert(&sched_lock, MA_NOTOWNED); 1931 } 1932 1933 /* 1934 * The force of a signal has been directed against a single 1935 * thread. We need to see what we can do about knocking it 1936 * out of any sleep it may be in etc. 1937 */ 1938 static void 1939 tdsigwakeup(struct thread *td, int sig, sig_t action) 1940 { 1941 struct proc *p = td->td_proc; 1942 register int prop; 1943 1944 PROC_LOCK_ASSERT(p, MA_OWNED); 1945 mtx_assert(&sched_lock, MA_OWNED); 1946 prop = sigprop(sig); 1947 1948 /* 1949 * Bring the priority of a thread up if we want it to get 1950 * killed in this lifetime. 1951 */ 1952 if (action == SIG_DFL && (prop & SA_KILL)) { 1953 if (td->td_priority > PUSER) 1954 td->td_priority = PUSER; 1955 } 1956 1957 if (TD_ON_SLEEPQ(td)) { 1958 /* 1959 * If thread is sleeping uninterruptibly 1960 * we can't interrupt the sleep... the signal will 1961 * be noticed when the process returns through 1962 * trap() or syscall(). 1963 */ 1964 if ((td->td_flags & TDF_SINTR) == 0) 1965 return; 1966 /* 1967 * Process is sleeping and traced. Make it runnable 1968 * so it can discover the signal in issignal() and stop 1969 * for its parent. 1970 */ 1971 if (p->p_flag & P_TRACED) { 1972 p->p_flag &= ~P_STOPPED_TRACE; 1973 } else { 1974 /* 1975 * If SIGCONT is default (or ignored) and process is 1976 * asleep, we are finished; the process should not 1977 * be awakened. 1978 */ 1979 if ((prop & SA_CONT) && action == SIG_DFL) { 1980 SIGDELSET(p->p_siglist, sig); 1981 /* 1982 * It may be on either list in this state. 1983 * Remove from both for now. 1984 */ 1985 SIGDELSET(td->td_siglist, sig); 1986 return; 1987 } 1988 1989 /* 1990 * Give low priority threads a better chance to run. 1991 */ 1992 if (td->td_priority > PUSER) 1993 td->td_priority = PUSER; 1994 } 1995 sleepq_abort(td); 1996 } else { 1997 /* 1998 * Other states do nothing with the signal immediately, 1999 * other than kicking ourselves if we are running. 2000 * It will either never be noticed, or noticed very soon. 2001 */ 2002 #ifdef SMP 2003 if (TD_IS_RUNNING(td) && td != curthread) 2004 forward_signal(td); 2005 #endif 2006 } 2007 } 2008 2009 int 2010 ptracestop(struct thread *td, int sig) 2011 { 2012 struct proc *p = td->td_proc; 2013 struct thread *td0; 2014 2015 PROC_LOCK_ASSERT(p, MA_OWNED); 2016 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, 2017 &p->p_mtx.mtx_object, "Stopping for traced signal"); 2018 2019 mtx_lock_spin(&sched_lock); 2020 td->td_flags |= TDF_XSIG; 2021 mtx_unlock_spin(&sched_lock); 2022 td->td_xsig = sig; 2023 while ((p->p_flag & P_TRACED) && (td->td_flags & TDF_XSIG)) { 2024 if (p->p_flag & P_SINGLE_EXIT) { 2025 mtx_lock_spin(&sched_lock); 2026 td->td_flags &= ~TDF_XSIG; 2027 mtx_unlock_spin(&sched_lock); 2028 return (sig); 2029 } 2030 /* 2031 * Just make wait() to work, the last stopped thread 2032 * will win. 2033 */ 2034 p->p_xstat = sig; 2035 p->p_xthread = td; 2036 p->p_flag |= (P_STOPPED_SIG|P_STOPPED_TRACE); 2037 mtx_lock_spin(&sched_lock); 2038 FOREACH_THREAD_IN_PROC(p, td0) { 2039 if (TD_IS_SLEEPING(td0) && 2040 (td0->td_flags & TDF_SINTR) && 2041 !TD_IS_SUSPENDED(td0)) { 2042 thread_suspend_one(td0); 2043 } else if (td != td0) { 2044 td0->td_flags |= TDF_ASTPENDING; 2045 } 2046 } 2047 stopme: 2048 thread_stopped(p); 2049 thread_suspend_one(td); 2050 PROC_UNLOCK(p); 2051 DROP_GIANT(); 2052 mi_switch(SW_VOL, NULL); 2053 mtx_unlock_spin(&sched_lock); 2054 PICKUP_GIANT(); 2055 PROC_LOCK(p); 2056 if (!(p->p_flag & P_TRACED)) 2057 break; 2058 if (td->td_flags & TDF_DBSUSPEND) { 2059 if (p->p_flag & P_SINGLE_EXIT) 2060 break; 2061 mtx_lock_spin(&sched_lock); 2062 goto stopme; 2063 } 2064 } 2065 return (td->td_xsig); 2066 } 2067 2068 /* 2069 * If the current process has received a signal (should be caught or cause 2070 * termination, should interrupt current syscall), return the signal number. 2071 * Stop signals with default action are processed immediately, then cleared; 2072 * they aren't returned. This is checked after each entry to the system for 2073 * a syscall or trap (though this can usually be done without calling issignal 2074 * by checking the pending signal masks in cursig.) The normal call 2075 * sequence is 2076 * 2077 * while (sig = cursig(curthread)) 2078 * postsig(sig); 2079 */ 2080 static int 2081 issignal(td) 2082 struct thread *td; 2083 { 2084 struct proc *p; 2085 struct sigacts *ps; 2086 sigset_t sigpending; 2087 int sig, prop, newsig; 2088 struct thread *td0; 2089 2090 p = td->td_proc; 2091 ps = p->p_sigacts; 2092 mtx_assert(&ps->ps_mtx, MA_OWNED); 2093 PROC_LOCK_ASSERT(p, MA_OWNED); 2094 for (;;) { 2095 int traced = (p->p_flag & P_TRACED) || (p->p_stops & S_SIG); 2096 2097 sigpending = td->td_siglist; 2098 SIGSETNAND(sigpending, td->td_sigmask); 2099 2100 if (p->p_flag & P_PPWAIT) 2101 SIG_STOPSIGMASK(sigpending); 2102 if (SIGISEMPTY(sigpending)) /* no signal to send */ 2103 return (0); 2104 sig = sig_ffs(&sigpending); 2105 2106 if (p->p_stops & S_SIG) { 2107 mtx_unlock(&ps->ps_mtx); 2108 stopevent(p, S_SIG, sig); 2109 mtx_lock(&ps->ps_mtx); 2110 } 2111 2112 /* 2113 * We should see pending but ignored signals 2114 * only if P_TRACED was on when they were posted. 2115 */ 2116 if (SIGISMEMBER(ps->ps_sigignore, sig) && (traced == 0)) { 2117 SIGDELSET(td->td_siglist, sig); 2118 if (td->td_pflags & TDP_SA) 2119 SIGADDSET(td->td_sigmask, sig); 2120 continue; 2121 } 2122 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) { 2123 /* 2124 * If traced, always stop. 2125 */ 2126 mtx_unlock(&ps->ps_mtx); 2127 newsig = ptracestop(td, sig); 2128 mtx_lock(&ps->ps_mtx); 2129 2130 /* 2131 * If parent wants us to take the signal, 2132 * then it will leave it in p->p_xstat; 2133 * otherwise we just look for signals again. 2134 */ 2135 SIGDELSET(td->td_siglist, sig); /* clear old signal */ 2136 if (td->td_pflags & TDP_SA) 2137 SIGADDSET(td->td_sigmask, sig); 2138 if (newsig == 0) 2139 continue; 2140 sig = newsig; 2141 /* 2142 * If the traced bit got turned off, go back up 2143 * to the top to rescan signals. This ensures 2144 * that p_sig* and p_sigact are consistent. 2145 */ 2146 if ((p->p_flag & P_TRACED) == 0) 2147 continue; 2148 2149 /* 2150 * Put the new signal into td_siglist. If the 2151 * signal is being masked, look for other signals. 2152 */ 2153 SIGADDSET(td->td_siglist, sig); 2154 if (td->td_pflags & TDP_SA) 2155 SIGDELSET(td->td_sigmask, sig); 2156 if (SIGISMEMBER(td->td_sigmask, sig)) 2157 continue; 2158 signotify(td); 2159 } 2160 2161 prop = sigprop(sig); 2162 2163 /* 2164 * Decide whether the signal should be returned. 2165 * Return the signal's number, or fall through 2166 * to clear it from the pending mask. 2167 */ 2168 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) { 2169 2170 case (intptr_t)SIG_DFL: 2171 /* 2172 * Don't take default actions on system processes. 2173 */ 2174 if (p->p_pid <= 1) { 2175 #ifdef DIAGNOSTIC 2176 /* 2177 * Are you sure you want to ignore SIGSEGV 2178 * in init? XXX 2179 */ 2180 printf("Process (pid %lu) got signal %d\n", 2181 (u_long)p->p_pid, sig); 2182 #endif 2183 break; /* == ignore */ 2184 } 2185 /* 2186 * If there is a pending stop signal to process 2187 * with default action, stop here, 2188 * then clear the signal. However, 2189 * if process is member of an orphaned 2190 * process group, ignore tty stop signals. 2191 */ 2192 if (prop & SA_STOP) { 2193 if (p->p_flag & P_TRACED || 2194 (p->p_pgrp->pg_jobc == 0 && 2195 prop & SA_TTYSTOP)) 2196 break; /* == ignore */ 2197 mtx_unlock(&ps->ps_mtx); 2198 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, 2199 &p->p_mtx.mtx_object, "Catching SIGSTOP"); 2200 p->p_flag |= P_STOPPED_SIG; 2201 p->p_xstat = sig; 2202 p->p_xthread = td; 2203 mtx_lock_spin(&sched_lock); 2204 FOREACH_THREAD_IN_PROC(p, td0) { 2205 if (TD_IS_SLEEPING(td0) && 2206 (td0->td_flags & TDF_SINTR) && 2207 !TD_IS_SUSPENDED(td0)) { 2208 thread_suspend_one(td0); 2209 } else if (td != td0) { 2210 td0->td_flags |= TDF_ASTPENDING; 2211 } 2212 } 2213 thread_stopped(p); 2214 thread_suspend_one(td); 2215 PROC_UNLOCK(p); 2216 DROP_GIANT(); 2217 mi_switch(SW_INVOL, NULL); 2218 mtx_unlock_spin(&sched_lock); 2219 PICKUP_GIANT(); 2220 PROC_LOCK(p); 2221 mtx_lock(&ps->ps_mtx); 2222 break; 2223 } else if (prop & SA_IGNORE) { 2224 /* 2225 * Except for SIGCONT, shouldn't get here. 2226 * Default action is to ignore; drop it. 2227 */ 2228 break; /* == ignore */ 2229 } else 2230 return (sig); 2231 /*NOTREACHED*/ 2232 2233 case (intptr_t)SIG_IGN: 2234 /* 2235 * Masking above should prevent us ever trying 2236 * to take action on an ignored signal other 2237 * than SIGCONT, unless process is traced. 2238 */ 2239 if ((prop & SA_CONT) == 0 && 2240 (p->p_flag & P_TRACED) == 0) 2241 printf("issignal\n"); 2242 break; /* == ignore */ 2243 2244 default: 2245 /* 2246 * This signal has an action, let 2247 * postsig() process it. 2248 */ 2249 return (sig); 2250 } 2251 SIGDELSET(td->td_siglist, sig); /* take the signal! */ 2252 } 2253 /* NOTREACHED */ 2254 } 2255 2256 /* 2257 * Put the argument process into the stopped state and notify the parent 2258 * via wakeup. Signals are handled elsewhere. The process must not be 2259 * on the run queue. Must be called with the proc p locked. 2260 */ 2261 static void 2262 stop(struct proc *p) 2263 { 2264 2265 PROC_LOCK_ASSERT(p, MA_OWNED); 2266 p->p_flag |= P_STOPPED_SIG; 2267 p->p_flag &= ~P_WAITED; 2268 wakeup(p->p_pptr); 2269 } 2270 2271 /* 2272 * MPSAFE 2273 */ 2274 void 2275 thread_stopped(struct proc *p) 2276 { 2277 struct proc *p1 = curthread->td_proc; 2278 struct sigacts *ps; 2279 int n; 2280 2281 PROC_LOCK_ASSERT(p, MA_OWNED); 2282 mtx_assert(&sched_lock, MA_OWNED); 2283 n = p->p_suspcount; 2284 if (p == p1) 2285 n++; 2286 if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) { 2287 mtx_unlock_spin(&sched_lock); 2288 stop(p); 2289 PROC_LOCK(p->p_pptr); 2290 ps = p->p_pptr->p_sigacts; 2291 mtx_lock(&ps->ps_mtx); 2292 if ((ps->ps_flag & PS_NOCLDSTOP) == 0) { 2293 mtx_unlock(&ps->ps_mtx); 2294 psignal(p->p_pptr, SIGCHLD); 2295 } else 2296 mtx_unlock(&ps->ps_mtx); 2297 PROC_UNLOCK(p->p_pptr); 2298 mtx_lock_spin(&sched_lock); 2299 } 2300 } 2301 2302 /* 2303 * Take the action for the specified signal 2304 * from the current set of pending signals. 2305 */ 2306 void 2307 postsig(sig) 2308 register int sig; 2309 { 2310 struct thread *td = curthread; 2311 register struct proc *p = td->td_proc; 2312 struct sigacts *ps; 2313 sig_t action; 2314 sigset_t returnmask; 2315 int code; 2316 2317 KASSERT(sig != 0, ("postsig")); 2318 2319 PROC_LOCK_ASSERT(p, MA_OWNED); 2320 ps = p->p_sigacts; 2321 mtx_assert(&ps->ps_mtx, MA_OWNED); 2322 SIGDELSET(td->td_siglist, sig); 2323 action = ps->ps_sigact[_SIG_IDX(sig)]; 2324 #ifdef KTRACE 2325 if (KTRPOINT(td, KTR_PSIG)) 2326 ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ? 2327 &td->td_oldsigmask : &td->td_sigmask, 0); 2328 #endif 2329 if (p->p_stops & S_SIG) { 2330 mtx_unlock(&ps->ps_mtx); 2331 stopevent(p, S_SIG, sig); 2332 mtx_lock(&ps->ps_mtx); 2333 } 2334 2335 if (!(td->td_pflags & TDP_SA) && action == SIG_DFL) { 2336 /* 2337 * Default action, where the default is to kill 2338 * the process. (Other cases were ignored above.) 2339 */ 2340 mtx_unlock(&ps->ps_mtx); 2341 sigexit(td, sig); 2342 /* NOTREACHED */ 2343 } else { 2344 if (td->td_pflags & TDP_SA) { 2345 if (sig == SIGKILL) { 2346 mtx_unlock(&ps->ps_mtx); 2347 sigexit(td, sig); 2348 } 2349 } 2350 2351 /* 2352 * If we get here, the signal must be caught. 2353 */ 2354 KASSERT(action != SIG_IGN && !SIGISMEMBER(td->td_sigmask, sig), 2355 ("postsig action")); 2356 /* 2357 * Set the new mask value and also defer further 2358 * occurrences of this signal. 2359 * 2360 * Special case: user has done a sigsuspend. Here the 2361 * current mask is not of interest, but rather the 2362 * mask from before the sigsuspend is what we want 2363 * restored after the signal processing is completed. 2364 */ 2365 if (td->td_pflags & TDP_OLDMASK) { 2366 returnmask = td->td_oldsigmask; 2367 td->td_pflags &= ~TDP_OLDMASK; 2368 } else 2369 returnmask = td->td_sigmask; 2370 2371 SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]); 2372 if (!SIGISMEMBER(ps->ps_signodefer, sig)) 2373 SIGADDSET(td->td_sigmask, sig); 2374 2375 if (SIGISMEMBER(ps->ps_sigreset, sig)) { 2376 /* 2377 * See kern_sigaction() for origin of this code. 2378 */ 2379 SIGDELSET(ps->ps_sigcatch, sig); 2380 if (sig != SIGCONT && 2381 sigprop(sig) & SA_IGNORE) 2382 SIGADDSET(ps->ps_sigignore, sig); 2383 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 2384 } 2385 p->p_stats->p_ru.ru_nsignals++; 2386 if (p->p_sig != sig) { 2387 code = 0; 2388 } else { 2389 code = p->p_code; 2390 p->p_code = 0; 2391 p->p_sig = 0; 2392 } 2393 if (td->td_pflags & TDP_SA) 2394 thread_signal_add(curthread, sig); 2395 else 2396 (*p->p_sysent->sv_sendsig)(action, sig, 2397 &returnmask, code); 2398 } 2399 } 2400 2401 /* 2402 * Kill the current process for stated reason. 2403 */ 2404 void 2405 killproc(p, why) 2406 struct proc *p; 2407 char *why; 2408 { 2409 2410 PROC_LOCK_ASSERT(p, MA_OWNED); 2411 CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)", 2412 p, p->p_pid, p->p_comm); 2413 log(LOG_ERR, "pid %d (%s), uid %d, was killed: %s\n", p->p_pid, p->p_comm, 2414 p->p_ucred ? p->p_ucred->cr_uid : -1, why); 2415 psignal(p, SIGKILL); 2416 } 2417 2418 /* 2419 * Force the current process to exit with the specified signal, dumping core 2420 * if appropriate. We bypass the normal tests for masked and caught signals, 2421 * allowing unrecoverable failures to terminate the process without changing 2422 * signal state. Mark the accounting record with the signal termination. 2423 * If dumping core, save the signal number for the debugger. Calls exit and 2424 * does not return. 2425 * 2426 * MPSAFE 2427 */ 2428 void 2429 sigexit(td, sig) 2430 struct thread *td; 2431 int sig; 2432 { 2433 struct proc *p = td->td_proc; 2434 2435 PROC_LOCK_ASSERT(p, MA_OWNED); 2436 p->p_acflag |= AXSIG; 2437 if (sigprop(sig) & SA_CORE) { 2438 p->p_sig = sig; 2439 /* 2440 * Log signals which would cause core dumps 2441 * (Log as LOG_INFO to appease those who don't want 2442 * these messages.) 2443 * XXX : Todo, as well as euid, write out ruid too 2444 * Note that coredump() drops proc lock. 2445 */ 2446 if (coredump(td) == 0) 2447 sig |= WCOREFLAG; 2448 if (kern_logsigexit) 2449 log(LOG_INFO, 2450 "pid %d (%s), uid %d: exited on signal %d%s\n", 2451 p->p_pid, p->p_comm, 2452 td->td_ucred ? td->td_ucred->cr_uid : -1, 2453 sig &~ WCOREFLAG, 2454 sig & WCOREFLAG ? " (core dumped)" : ""); 2455 } else 2456 PROC_UNLOCK(p); 2457 exit1(td, W_EXITCODE(0, sig)); 2458 /* NOTREACHED */ 2459 } 2460 2461 static char corefilename[MAXPATHLEN+1] = {"%N.core"}; 2462 SYSCTL_STRING(_kern, OID_AUTO, corefile, CTLFLAG_RW, corefilename, 2463 sizeof(corefilename), "process corefile name format string"); 2464 2465 /* 2466 * expand_name(name, uid, pid) 2467 * Expand the name described in corefilename, using name, uid, and pid. 2468 * corefilename is a printf-like string, with three format specifiers: 2469 * %N name of process ("name") 2470 * %P process id (pid) 2471 * %U user id (uid) 2472 * For example, "%N.core" is the default; they can be disabled completely 2473 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P". 2474 * This is controlled by the sysctl variable kern.corefile (see above). 2475 */ 2476 2477 static char * 2478 expand_name(name, uid, pid) 2479 const char *name; 2480 uid_t uid; 2481 pid_t pid; 2482 { 2483 const char *format, *appendstr; 2484 char *temp; 2485 char buf[11]; /* Buffer for pid/uid -- max 4B */ 2486 size_t i, l, n; 2487 2488 format = corefilename; 2489 temp = malloc(MAXPATHLEN, M_TEMP, M_NOWAIT | M_ZERO); 2490 if (temp == NULL) 2491 return (NULL); 2492 for (i = 0, n = 0; n < MAXPATHLEN && format[i]; i++) { 2493 switch (format[i]) { 2494 case '%': /* Format character */ 2495 i++; 2496 switch (format[i]) { 2497 case '%': 2498 appendstr = "%"; 2499 break; 2500 case 'N': /* process name */ 2501 appendstr = name; 2502 break; 2503 case 'P': /* process id */ 2504 sprintf(buf, "%u", pid); 2505 appendstr = buf; 2506 break; 2507 case 'U': /* user id */ 2508 sprintf(buf, "%u", uid); 2509 appendstr = buf; 2510 break; 2511 default: 2512 appendstr = ""; 2513 log(LOG_ERR, 2514 "Unknown format character %c in `%s'\n", 2515 format[i], format); 2516 } 2517 l = strlen(appendstr); 2518 if ((n + l) >= MAXPATHLEN) 2519 goto toolong; 2520 memcpy(temp + n, appendstr, l); 2521 n += l; 2522 break; 2523 default: 2524 temp[n++] = format[i]; 2525 } 2526 } 2527 if (format[i] != '\0') 2528 goto toolong; 2529 return (temp); 2530 toolong: 2531 log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too long\n", 2532 (long)pid, name, (u_long)uid); 2533 free(temp, M_TEMP); 2534 return (NULL); 2535 } 2536 2537 /* 2538 * Dump a process' core. The main routine does some 2539 * policy checking, and creates the name of the coredump; 2540 * then it passes on a vnode and a size limit to the process-specific 2541 * coredump routine if there is one; if there _is not_ one, it returns 2542 * ENOSYS; otherwise it returns the error from the process-specific routine. 2543 */ 2544 2545 static int 2546 coredump(struct thread *td) 2547 { 2548 struct proc *p = td->td_proc; 2549 register struct vnode *vp; 2550 register struct ucred *cred = td->td_ucred; 2551 struct flock lf; 2552 struct nameidata nd; 2553 struct vattr vattr; 2554 int error, error1, flags, locked; 2555 struct mount *mp; 2556 char *name; /* name of corefile */ 2557 off_t limit; 2558 2559 PROC_LOCK_ASSERT(p, MA_OWNED); 2560 _STOPEVENT(p, S_CORE, 0); 2561 2562 if (((sugid_coredump == 0) && p->p_flag & P_SUGID) || do_coredump == 0) { 2563 PROC_UNLOCK(p); 2564 return (EFAULT); 2565 } 2566 2567 /* 2568 * Note that the bulk of limit checking is done after 2569 * the corefile is created. The exception is if the limit 2570 * for corefiles is 0, in which case we don't bother 2571 * creating the corefile at all. This layout means that 2572 * a corefile is truncated instead of not being created, 2573 * if it is larger than the limit. 2574 */ 2575 limit = (off_t)lim_cur(p, RLIMIT_CORE); 2576 PROC_UNLOCK(p); 2577 if (limit == 0) 2578 return (EFBIG); 2579 2580 mtx_lock(&Giant); 2581 restart: 2582 name = expand_name(p->p_comm, td->td_ucred->cr_uid, p->p_pid); 2583 if (name == NULL) { 2584 mtx_unlock(&Giant); 2585 return (EINVAL); 2586 } 2587 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td); /* XXXKSE */ 2588 flags = O_CREAT | FWRITE | O_NOFOLLOW; 2589 error = vn_open(&nd, &flags, S_IRUSR | S_IWUSR, -1); 2590 free(name, M_TEMP); 2591 if (error) { 2592 mtx_unlock(&Giant); 2593 return (error); 2594 } 2595 NDFREE(&nd, NDF_ONLY_PNBUF); 2596 vp = nd.ni_vp; 2597 2598 /* Don't dump to non-regular files or files with links. */ 2599 if (vp->v_type != VREG || 2600 VOP_GETATTR(vp, &vattr, cred, td) || vattr.va_nlink != 1) { 2601 VOP_UNLOCK(vp, 0, td); 2602 error = EFAULT; 2603 goto out; 2604 } 2605 2606 VOP_UNLOCK(vp, 0, td); 2607 lf.l_whence = SEEK_SET; 2608 lf.l_start = 0; 2609 lf.l_len = 0; 2610 lf.l_type = F_WRLCK; 2611 locked = (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK) == 0); 2612 2613 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2614 lf.l_type = F_UNLCK; 2615 if (locked) 2616 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK); 2617 if ((error = vn_close(vp, FWRITE, cred, td)) != 0) 2618 return (error); 2619 if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0) 2620 return (error); 2621 goto restart; 2622 } 2623 2624 VATTR_NULL(&vattr); 2625 vattr.va_size = 0; 2626 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 2627 VOP_LEASE(vp, td, cred, LEASE_WRITE); 2628 VOP_SETATTR(vp, &vattr, cred, td); 2629 VOP_UNLOCK(vp, 0, td); 2630 PROC_LOCK(p); 2631 p->p_acflag |= ACORE; 2632 PROC_UNLOCK(p); 2633 2634 error = p->p_sysent->sv_coredump ? 2635 p->p_sysent->sv_coredump(td, vp, limit) : 2636 ENOSYS; 2637 2638 if (locked) { 2639 lf.l_type = F_UNLCK; 2640 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK); 2641 } 2642 vn_finished_write(mp); 2643 out: 2644 error1 = vn_close(vp, FWRITE, cred, td); 2645 mtx_unlock(&Giant); 2646 if (error == 0) 2647 error = error1; 2648 return (error); 2649 } 2650 2651 /* 2652 * Nonexistent system call-- signal process (may want to handle it). 2653 * Flag error in case process won't see signal immediately (blocked or ignored). 2654 */ 2655 #ifndef _SYS_SYSPROTO_H_ 2656 struct nosys_args { 2657 int dummy; 2658 }; 2659 #endif 2660 /* 2661 * MPSAFE 2662 */ 2663 /* ARGSUSED */ 2664 int 2665 nosys(td, args) 2666 struct thread *td; 2667 struct nosys_args *args; 2668 { 2669 struct proc *p = td->td_proc; 2670 2671 PROC_LOCK(p); 2672 psignal(p, SIGSYS); 2673 PROC_UNLOCK(p); 2674 return (ENOSYS); 2675 } 2676 2677 /* 2678 * Send a SIGIO or SIGURG signal to a process or process group using 2679 * stored credentials rather than those of the current process. 2680 */ 2681 void 2682 pgsigio(sigiop, sig, checkctty) 2683 struct sigio **sigiop; 2684 int sig, checkctty; 2685 { 2686 struct sigio *sigio; 2687 2688 SIGIO_LOCK(); 2689 sigio = *sigiop; 2690 if (sigio == NULL) { 2691 SIGIO_UNLOCK(); 2692 return; 2693 } 2694 if (sigio->sio_pgid > 0) { 2695 PROC_LOCK(sigio->sio_proc); 2696 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred)) 2697 psignal(sigio->sio_proc, sig); 2698 PROC_UNLOCK(sigio->sio_proc); 2699 } else if (sigio->sio_pgid < 0) { 2700 struct proc *p; 2701 2702 PGRP_LOCK(sigio->sio_pgrp); 2703 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) { 2704 PROC_LOCK(p); 2705 if (CANSIGIO(sigio->sio_ucred, p->p_ucred) && 2706 (checkctty == 0 || (p->p_flag & P_CONTROLT))) 2707 psignal(p, sig); 2708 PROC_UNLOCK(p); 2709 } 2710 PGRP_UNLOCK(sigio->sio_pgrp); 2711 } 2712 SIGIO_UNLOCK(); 2713 } 2714 2715 static int 2716 filt_sigattach(struct knote *kn) 2717 { 2718 struct proc *p = curproc; 2719 2720 kn->kn_ptr.p_proc = p; 2721 kn->kn_flags |= EV_CLEAR; /* automatically set */ 2722 2723 PROC_LOCK(p); 2724 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext); 2725 PROC_UNLOCK(p); 2726 2727 return (0); 2728 } 2729 2730 static void 2731 filt_sigdetach(struct knote *kn) 2732 { 2733 struct proc *p = kn->kn_ptr.p_proc; 2734 2735 PROC_LOCK(p); 2736 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext); 2737 PROC_UNLOCK(p); 2738 } 2739 2740 /* 2741 * signal knotes are shared with proc knotes, so we apply a mask to 2742 * the hint in order to differentiate them from process hints. This 2743 * could be avoided by using a signal-specific knote list, but probably 2744 * isn't worth the trouble. 2745 */ 2746 static int 2747 filt_signal(struct knote *kn, long hint) 2748 { 2749 2750 if (hint & NOTE_SIGNAL) { 2751 hint &= ~NOTE_SIGNAL; 2752 2753 if (kn->kn_id == hint) 2754 kn->kn_data++; 2755 } 2756 return (kn->kn_data != 0); 2757 } 2758 2759 struct sigacts * 2760 sigacts_alloc(void) 2761 { 2762 struct sigacts *ps; 2763 2764 ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO); 2765 ps->ps_refcnt = 1; 2766 mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF); 2767 return (ps); 2768 } 2769 2770 void 2771 sigacts_free(struct sigacts *ps) 2772 { 2773 2774 mtx_lock(&ps->ps_mtx); 2775 ps->ps_refcnt--; 2776 if (ps->ps_refcnt == 0) { 2777 mtx_destroy(&ps->ps_mtx); 2778 free(ps, M_SUBPROC); 2779 } else 2780 mtx_unlock(&ps->ps_mtx); 2781 } 2782 2783 struct sigacts * 2784 sigacts_hold(struct sigacts *ps) 2785 { 2786 mtx_lock(&ps->ps_mtx); 2787 ps->ps_refcnt++; 2788 mtx_unlock(&ps->ps_mtx); 2789 return (ps); 2790 } 2791 2792 void 2793 sigacts_copy(struct sigacts *dest, struct sigacts *src) 2794 { 2795 2796 KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest")); 2797 mtx_lock(&src->ps_mtx); 2798 bcopy(src, dest, offsetof(struct sigacts, ps_refcnt)); 2799 mtx_unlock(&src->ps_mtx); 2800 } 2801 2802 int 2803 sigacts_shared(struct sigacts *ps) 2804 { 2805 int shared; 2806 2807 mtx_lock(&ps->ps_mtx); 2808 shared = ps->ps_refcnt > 1; 2809 mtx_unlock(&ps->ps_mtx); 2810 return (shared); 2811 } 2812