1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94 39 * $FreeBSD$ 40 */ 41 42 #include "opt_compat.h" 43 #include "opt_ktrace.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/signalvar.h> 48 #include <sys/vnode.h> 49 #include <sys/acct.h> 50 #include <sys/condvar.h> 51 #include <sys/event.h> 52 #include <sys/fcntl.h> 53 #include <sys/kernel.h> 54 #include <sys/ktr.h> 55 #include <sys/ktrace.h> 56 #include <sys/lock.h> 57 #include <sys/malloc.h> 58 #include <sys/mutex.h> 59 #include <sys/namei.h> 60 #include <sys/proc.h> 61 #include <sys/pioctl.h> 62 #include <sys/resourcevar.h> 63 #include <sys/smp.h> 64 #include <sys/stat.h> 65 #include <sys/sx.h> 66 #include <sys/syscallsubr.h> 67 #include <sys/sysctl.h> 68 #include <sys/sysent.h> 69 #include <sys/syslog.h> 70 #include <sys/sysproto.h> 71 #include <sys/unistd.h> 72 #include <sys/wait.h> 73 74 #include <machine/cpu.h> 75 76 #if defined (__alpha__) && !defined(COMPAT_43) 77 #error "You *really* need COMPAT_43 on the alpha for longjmp(3)" 78 #endif 79 80 #define ONSIG 32 /* NSIG for osig* syscalls. XXX. */ 81 82 static int coredump(struct thread *); 83 static char *expand_name(const char *, uid_t, pid_t); 84 static int killpg1(struct thread *td, int sig, int pgid, int all); 85 static int issignal(struct thread *p); 86 static int sigprop(int sig); 87 static void stop(struct proc *); 88 static void tdsigwakeup(struct thread *td, int sig, sig_t action); 89 static int filt_sigattach(struct knote *kn); 90 static void filt_sigdetach(struct knote *kn); 91 static int filt_signal(struct knote *kn, long hint); 92 static struct thread *sigtd(struct proc *p, int sig, int prop); 93 static int kern_sigtimedwait(struct thread *td, sigset_t set, 94 siginfo_t *info, struct timespec *timeout); 95 96 struct filterops sig_filtops = 97 { 0, filt_sigattach, filt_sigdetach, filt_signal }; 98 99 static int kern_logsigexit = 1; 100 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW, 101 &kern_logsigexit, 0, 102 "Log processes quitting on abnormal signals to syslog(3)"); 103 104 /* 105 * Policy -- Can ucred cr1 send SIGIO to process cr2? 106 * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG 107 * in the right situations. 108 */ 109 #define CANSIGIO(cr1, cr2) \ 110 ((cr1)->cr_uid == 0 || \ 111 (cr1)->cr_ruid == (cr2)->cr_ruid || \ 112 (cr1)->cr_uid == (cr2)->cr_ruid || \ 113 (cr1)->cr_ruid == (cr2)->cr_uid || \ 114 (cr1)->cr_uid == (cr2)->cr_uid) 115 116 int sugid_coredump; 117 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RW, 118 &sugid_coredump, 0, "Enable coredumping set user/group ID processes"); 119 120 static int do_coredump = 1; 121 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW, 122 &do_coredump, 0, "Enable/Disable coredumps"); 123 124 /* 125 * Signal properties and actions. 126 * The array below categorizes the signals and their default actions 127 * according to the following properties: 128 */ 129 #define SA_KILL 0x01 /* terminates process by default */ 130 #define SA_CORE 0x02 /* ditto and coredumps */ 131 #define SA_STOP 0x04 /* suspend process */ 132 #define SA_TTYSTOP 0x08 /* ditto, from tty */ 133 #define SA_IGNORE 0x10 /* ignore by default */ 134 #define SA_CONT 0x20 /* continue if suspended */ 135 #define SA_CANTMASK 0x40 /* non-maskable, catchable */ 136 #define SA_PROC 0x80 /* deliverable to any thread */ 137 138 static int sigproptbl[NSIG] = { 139 SA_KILL|SA_PROC, /* SIGHUP */ 140 SA_KILL|SA_PROC, /* SIGINT */ 141 SA_KILL|SA_CORE|SA_PROC, /* SIGQUIT */ 142 SA_KILL|SA_CORE, /* SIGILL */ 143 SA_KILL|SA_CORE, /* SIGTRAP */ 144 SA_KILL|SA_CORE, /* SIGABRT */ 145 SA_KILL|SA_CORE|SA_PROC, /* SIGEMT */ 146 SA_KILL|SA_CORE, /* SIGFPE */ 147 SA_KILL|SA_PROC, /* SIGKILL */ 148 SA_KILL|SA_CORE, /* SIGBUS */ 149 SA_KILL|SA_CORE, /* SIGSEGV */ 150 SA_KILL|SA_CORE, /* SIGSYS */ 151 SA_KILL|SA_PROC, /* SIGPIPE */ 152 SA_KILL|SA_PROC, /* SIGALRM */ 153 SA_KILL|SA_PROC, /* SIGTERM */ 154 SA_IGNORE|SA_PROC, /* SIGURG */ 155 SA_STOP|SA_PROC, /* SIGSTOP */ 156 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTSTP */ 157 SA_IGNORE|SA_CONT|SA_PROC, /* SIGCONT */ 158 SA_IGNORE|SA_PROC, /* SIGCHLD */ 159 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTTIN */ 160 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTTOU */ 161 SA_IGNORE|SA_PROC, /* SIGIO */ 162 SA_KILL, /* SIGXCPU */ 163 SA_KILL, /* SIGXFSZ */ 164 SA_KILL|SA_PROC, /* SIGVTALRM */ 165 SA_KILL|SA_PROC, /* SIGPROF */ 166 SA_IGNORE|SA_PROC, /* SIGWINCH */ 167 SA_IGNORE|SA_PROC, /* SIGINFO */ 168 SA_KILL|SA_PROC, /* SIGUSR1 */ 169 SA_KILL|SA_PROC, /* SIGUSR2 */ 170 }; 171 172 /* 173 * Determine signal that should be delivered to process p, the current 174 * process, 0 if none. If there is a pending stop signal with default 175 * action, the process stops in issignal(). 176 * XXXKSE the check for a pending stop is not done under KSE 177 * 178 * MP SAFE. 179 */ 180 int 181 cursig(struct thread *td) 182 { 183 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 184 mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED); 185 mtx_assert(&sched_lock, MA_NOTOWNED); 186 return (SIGPENDING(td) ? issignal(td) : 0); 187 } 188 189 /* 190 * Arrange for ast() to handle unmasked pending signals on return to user 191 * mode. This must be called whenever a signal is added to td_siglist or 192 * unmasked in td_sigmask. 193 */ 194 void 195 signotify(struct thread *td) 196 { 197 struct proc *p; 198 sigset_t set; 199 200 p = td->td_proc; 201 202 PROC_LOCK_ASSERT(p, MA_OWNED); 203 204 /* 205 * If our mask changed we may have to move signal that were 206 * previously masked by all threads to our siglist. 207 */ 208 set = p->p_siglist; 209 SIGSETNAND(set, td->td_sigmask); 210 SIGSETNAND(p->p_siglist, set); 211 SIGSETOR(td->td_siglist, set); 212 213 if (SIGPENDING(td)) { 214 mtx_lock_spin(&sched_lock); 215 td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING; 216 mtx_unlock_spin(&sched_lock); 217 } 218 } 219 220 int 221 sigonstack(size_t sp) 222 { 223 struct proc *p = curthread->td_proc; 224 225 PROC_LOCK_ASSERT(p, MA_OWNED); 226 return ((p->p_flag & P_ALTSTACK) ? 227 #if defined(COMPAT_43) || defined(COMPAT_SUNOS) 228 ((p->p_sigstk.ss_size == 0) ? (p->p_sigstk.ss_flags & SS_ONSTACK) : 229 ((sp - (size_t)p->p_sigstk.ss_sp) < p->p_sigstk.ss_size)) 230 #else 231 ((sp - (size_t)p->p_sigstk.ss_sp) < p->p_sigstk.ss_size) 232 #endif 233 : 0); 234 } 235 236 static __inline int 237 sigprop(int sig) 238 { 239 240 if (sig > 0 && sig < NSIG) 241 return (sigproptbl[_SIG_IDX(sig)]); 242 return (0); 243 } 244 245 int 246 sig_ffs(sigset_t *set) 247 { 248 int i; 249 250 for (i = 0; i < _SIG_WORDS; i++) 251 if (set->__bits[i]) 252 return (ffs(set->__bits[i]) + (i * 32)); 253 return (0); 254 } 255 256 /* 257 * kern_sigaction 258 * sigaction 259 * freebsd4_sigaction 260 * osigaction 261 * 262 * MPSAFE 263 */ 264 int 265 kern_sigaction(td, sig, act, oact, flags) 266 struct thread *td; 267 register int sig; 268 struct sigaction *act, *oact; 269 int flags; 270 { 271 struct sigacts *ps; 272 struct thread *td0; 273 struct proc *p = td->td_proc; 274 275 if (!_SIG_VALID(sig)) 276 return (EINVAL); 277 278 PROC_LOCK(p); 279 ps = p->p_sigacts; 280 mtx_lock(&ps->ps_mtx); 281 if (oact) { 282 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)]; 283 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)]; 284 oact->sa_flags = 0; 285 if (SIGISMEMBER(ps->ps_sigonstack, sig)) 286 oact->sa_flags |= SA_ONSTACK; 287 if (!SIGISMEMBER(ps->ps_sigintr, sig)) 288 oact->sa_flags |= SA_RESTART; 289 if (SIGISMEMBER(ps->ps_sigreset, sig)) 290 oact->sa_flags |= SA_RESETHAND; 291 if (SIGISMEMBER(ps->ps_signodefer, sig)) 292 oact->sa_flags |= SA_NODEFER; 293 if (SIGISMEMBER(ps->ps_siginfo, sig)) 294 oact->sa_flags |= SA_SIGINFO; 295 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP) 296 oact->sa_flags |= SA_NOCLDSTOP; 297 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT) 298 oact->sa_flags |= SA_NOCLDWAIT; 299 } 300 if (act) { 301 if ((sig == SIGKILL || sig == SIGSTOP) && 302 act->sa_handler != SIG_DFL) { 303 mtx_unlock(&ps->ps_mtx); 304 PROC_UNLOCK(p); 305 return (EINVAL); 306 } 307 308 /* 309 * Change setting atomically. 310 */ 311 312 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask; 313 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]); 314 if (act->sa_flags & SA_SIGINFO) { 315 ps->ps_sigact[_SIG_IDX(sig)] = 316 (__sighandler_t *)act->sa_sigaction; 317 SIGADDSET(ps->ps_siginfo, sig); 318 } else { 319 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler; 320 SIGDELSET(ps->ps_siginfo, sig); 321 } 322 if (!(act->sa_flags & SA_RESTART)) 323 SIGADDSET(ps->ps_sigintr, sig); 324 else 325 SIGDELSET(ps->ps_sigintr, sig); 326 if (act->sa_flags & SA_ONSTACK) 327 SIGADDSET(ps->ps_sigonstack, sig); 328 else 329 SIGDELSET(ps->ps_sigonstack, sig); 330 if (act->sa_flags & SA_RESETHAND) 331 SIGADDSET(ps->ps_sigreset, sig); 332 else 333 SIGDELSET(ps->ps_sigreset, sig); 334 if (act->sa_flags & SA_NODEFER) 335 SIGADDSET(ps->ps_signodefer, sig); 336 else 337 SIGDELSET(ps->ps_signodefer, sig); 338 #ifdef COMPAT_SUNOS 339 if (act->sa_flags & SA_USERTRAMP) 340 SIGADDSET(ps->ps_usertramp, sig); 341 else 342 SIGDELSET(ps->ps_usertramp, sig); 343 #endif 344 if (sig == SIGCHLD) { 345 if (act->sa_flags & SA_NOCLDSTOP) 346 ps->ps_flag |= PS_NOCLDSTOP; 347 else 348 ps->ps_flag &= ~PS_NOCLDSTOP; 349 if (act->sa_flags & SA_NOCLDWAIT) { 350 /* 351 * Paranoia: since SA_NOCLDWAIT is implemented 352 * by reparenting the dying child to PID 1 (and 353 * trust it to reap the zombie), PID 1 itself 354 * is forbidden to set SA_NOCLDWAIT. 355 */ 356 if (p->p_pid == 1) 357 ps->ps_flag &= ~PS_NOCLDWAIT; 358 else 359 ps->ps_flag |= PS_NOCLDWAIT; 360 } else 361 ps->ps_flag &= ~PS_NOCLDWAIT; 362 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN) 363 ps->ps_flag |= PS_CLDSIGIGN; 364 else 365 ps->ps_flag &= ~PS_CLDSIGIGN; 366 } 367 /* 368 * Set bit in ps_sigignore for signals that are set to SIG_IGN, 369 * and for signals set to SIG_DFL where the default is to 370 * ignore. However, don't put SIGCONT in ps_sigignore, as we 371 * have to restart the process. 372 */ 373 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || 374 (sigprop(sig) & SA_IGNORE && 375 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) { 376 /* never to be seen again */ 377 SIGDELSET(p->p_siglist, sig); 378 FOREACH_THREAD_IN_PROC(p, td0) 379 SIGDELSET(td0->td_siglist, sig); 380 if (sig != SIGCONT) 381 /* easier in psignal */ 382 SIGADDSET(ps->ps_sigignore, sig); 383 SIGDELSET(ps->ps_sigcatch, sig); 384 } else { 385 SIGDELSET(ps->ps_sigignore, sig); 386 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL) 387 SIGDELSET(ps->ps_sigcatch, sig); 388 else 389 SIGADDSET(ps->ps_sigcatch, sig); 390 } 391 #ifdef COMPAT_FREEBSD4 392 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || 393 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL || 394 (flags & KSA_FREEBSD4) == 0) 395 SIGDELSET(ps->ps_freebsd4, sig); 396 else 397 SIGADDSET(ps->ps_freebsd4, sig); 398 #endif 399 #ifdef COMPAT_43 400 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || 401 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL || 402 (flags & KSA_OSIGSET) == 0) 403 SIGDELSET(ps->ps_osigset, sig); 404 else 405 SIGADDSET(ps->ps_osigset, sig); 406 #endif 407 } 408 mtx_unlock(&ps->ps_mtx); 409 PROC_UNLOCK(p); 410 return (0); 411 } 412 413 #ifndef _SYS_SYSPROTO_H_ 414 struct sigaction_args { 415 int sig; 416 struct sigaction *act; 417 struct sigaction *oact; 418 }; 419 #endif 420 /* 421 * MPSAFE 422 */ 423 int 424 sigaction(td, uap) 425 struct thread *td; 426 register struct sigaction_args *uap; 427 { 428 struct sigaction act, oact; 429 register struct sigaction *actp, *oactp; 430 int error; 431 432 actp = (uap->act != NULL) ? &act : NULL; 433 oactp = (uap->oact != NULL) ? &oact : NULL; 434 if (actp) { 435 error = copyin(uap->act, actp, sizeof(act)); 436 if (error) 437 return (error); 438 } 439 error = kern_sigaction(td, uap->sig, actp, oactp, 0); 440 if (oactp && !error) 441 error = copyout(oactp, uap->oact, sizeof(oact)); 442 return (error); 443 } 444 445 #ifdef COMPAT_FREEBSD4 446 #ifndef _SYS_SYSPROTO_H_ 447 struct freebsd4_sigaction_args { 448 int sig; 449 struct sigaction *act; 450 struct sigaction *oact; 451 }; 452 #endif 453 /* 454 * MPSAFE 455 */ 456 int 457 freebsd4_sigaction(td, uap) 458 struct thread *td; 459 register struct freebsd4_sigaction_args *uap; 460 { 461 struct sigaction act, oact; 462 register struct sigaction *actp, *oactp; 463 int error; 464 465 466 actp = (uap->act != NULL) ? &act : NULL; 467 oactp = (uap->oact != NULL) ? &oact : NULL; 468 if (actp) { 469 error = copyin(uap->act, actp, sizeof(act)); 470 if (error) 471 return (error); 472 } 473 error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4); 474 if (oactp && !error) 475 error = copyout(oactp, uap->oact, sizeof(oact)); 476 return (error); 477 } 478 #endif /* COMAPT_FREEBSD4 */ 479 480 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 481 #ifndef _SYS_SYSPROTO_H_ 482 struct osigaction_args { 483 int signum; 484 struct osigaction *nsa; 485 struct osigaction *osa; 486 }; 487 #endif 488 /* 489 * MPSAFE 490 */ 491 int 492 osigaction(td, uap) 493 struct thread *td; 494 register struct osigaction_args *uap; 495 { 496 struct osigaction sa; 497 struct sigaction nsa, osa; 498 register struct sigaction *nsap, *osap; 499 int error; 500 501 if (uap->signum <= 0 || uap->signum >= ONSIG) 502 return (EINVAL); 503 504 nsap = (uap->nsa != NULL) ? &nsa : NULL; 505 osap = (uap->osa != NULL) ? &osa : NULL; 506 507 if (nsap) { 508 error = copyin(uap->nsa, &sa, sizeof(sa)); 509 if (error) 510 return (error); 511 nsap->sa_handler = sa.sa_handler; 512 nsap->sa_flags = sa.sa_flags; 513 OSIG2SIG(sa.sa_mask, nsap->sa_mask); 514 } 515 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET); 516 if (osap && !error) { 517 sa.sa_handler = osap->sa_handler; 518 sa.sa_flags = osap->sa_flags; 519 SIG2OSIG(osap->sa_mask, sa.sa_mask); 520 error = copyout(&sa, uap->osa, sizeof(sa)); 521 } 522 return (error); 523 } 524 525 #if !defined(__i386__) && !defined(__alpha__) 526 /* Avoid replicating the same stub everywhere */ 527 int 528 osigreturn(td, uap) 529 struct thread *td; 530 struct osigreturn_args *uap; 531 { 532 533 return (nosys(td, (struct nosys_args *)uap)); 534 } 535 #endif 536 #endif /* COMPAT_43 */ 537 538 /* 539 * Initialize signal state for process 0; 540 * set to ignore signals that are ignored by default. 541 */ 542 void 543 siginit(p) 544 struct proc *p; 545 { 546 register int i; 547 struct sigacts *ps; 548 549 PROC_LOCK(p); 550 ps = p->p_sigacts; 551 mtx_lock(&ps->ps_mtx); 552 for (i = 1; i <= NSIG; i++) 553 if (sigprop(i) & SA_IGNORE && i != SIGCONT) 554 SIGADDSET(ps->ps_sigignore, i); 555 mtx_unlock(&ps->ps_mtx); 556 PROC_UNLOCK(p); 557 } 558 559 /* 560 * Reset signals for an exec of the specified process. 561 */ 562 void 563 execsigs(p) 564 register struct proc *p; 565 { 566 register struct sigacts *ps; 567 register int sig; 568 569 /* 570 * Reset caught signals. Held signals remain held 571 * through td_sigmask (unless they were caught, 572 * and are now ignored by default). 573 */ 574 PROC_LOCK_ASSERT(p, MA_OWNED); 575 ps = p->p_sigacts; 576 mtx_lock(&ps->ps_mtx); 577 while (SIGNOTEMPTY(ps->ps_sigcatch)) { 578 sig = sig_ffs(&ps->ps_sigcatch); 579 SIGDELSET(ps->ps_sigcatch, sig); 580 if (sigprop(sig) & SA_IGNORE) { 581 if (sig != SIGCONT) 582 SIGADDSET(ps->ps_sigignore, sig); 583 SIGDELSET(p->p_siglist, sig); 584 /* 585 * There is only one thread at this point. 586 */ 587 SIGDELSET(FIRST_THREAD_IN_PROC(p)->td_siglist, sig); 588 } 589 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 590 } 591 /* 592 * Clear out the td's sigmask. Normal processes use the proc sigmask. 593 */ 594 SIGEMPTYSET(FIRST_THREAD_IN_PROC(p)->td_sigmask); 595 /* 596 * Reset stack state to the user stack. 597 * Clear set of signals caught on the signal stack. 598 */ 599 p->p_sigstk.ss_flags = SS_DISABLE; 600 p->p_sigstk.ss_size = 0; 601 p->p_sigstk.ss_sp = 0; 602 p->p_flag &= ~P_ALTSTACK; 603 /* 604 * Reset no zombies if child dies flag as Solaris does. 605 */ 606 ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN); 607 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN) 608 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL; 609 mtx_unlock(&ps->ps_mtx); 610 } 611 612 /* 613 * kern_sigprocmask() 614 * 615 * Manipulate signal mask. 616 */ 617 int 618 kern_sigprocmask(td, how, set, oset, old) 619 struct thread *td; 620 int how; 621 sigset_t *set, *oset; 622 int old; 623 { 624 int error; 625 626 PROC_LOCK(td->td_proc); 627 if (oset != NULL) 628 *oset = td->td_sigmask; 629 630 error = 0; 631 if (set != NULL) { 632 switch (how) { 633 case SIG_BLOCK: 634 SIG_CANTMASK(*set); 635 SIGSETOR(td->td_sigmask, *set); 636 break; 637 case SIG_UNBLOCK: 638 SIGSETNAND(td->td_sigmask, *set); 639 signotify(td); 640 break; 641 case SIG_SETMASK: 642 SIG_CANTMASK(*set); 643 if (old) 644 SIGSETLO(td->td_sigmask, *set); 645 else 646 td->td_sigmask = *set; 647 signotify(td); 648 break; 649 default: 650 error = EINVAL; 651 break; 652 } 653 } 654 PROC_UNLOCK(td->td_proc); 655 return (error); 656 } 657 658 /* 659 * sigprocmask() - MP SAFE 660 */ 661 662 #ifndef _SYS_SYSPROTO_H_ 663 struct sigprocmask_args { 664 int how; 665 const sigset_t *set; 666 sigset_t *oset; 667 }; 668 #endif 669 int 670 sigprocmask(td, uap) 671 register struct thread *td; 672 struct sigprocmask_args *uap; 673 { 674 sigset_t set, oset; 675 sigset_t *setp, *osetp; 676 int error; 677 678 setp = (uap->set != NULL) ? &set : NULL; 679 osetp = (uap->oset != NULL) ? &oset : NULL; 680 if (setp) { 681 error = copyin(uap->set, setp, sizeof(set)); 682 if (error) 683 return (error); 684 } 685 error = kern_sigprocmask(td, uap->how, setp, osetp, 0); 686 if (osetp && !error) { 687 error = copyout(osetp, uap->oset, sizeof(oset)); 688 } 689 return (error); 690 } 691 692 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 693 /* 694 * osigprocmask() - MP SAFE 695 */ 696 #ifndef _SYS_SYSPROTO_H_ 697 struct osigprocmask_args { 698 int how; 699 osigset_t mask; 700 }; 701 #endif 702 int 703 osigprocmask(td, uap) 704 register struct thread *td; 705 struct osigprocmask_args *uap; 706 { 707 sigset_t set, oset; 708 int error; 709 710 OSIG2SIG(uap->mask, set); 711 error = kern_sigprocmask(td, uap->how, &set, &oset, 1); 712 SIG2OSIG(oset, td->td_retval[0]); 713 return (error); 714 } 715 #endif /* COMPAT_43 */ 716 717 #ifndef _SYS_SYSPROTO_H_ 718 struct sigpending_args { 719 sigset_t *set; 720 }; 721 #endif 722 /* 723 * MPSAFE 724 */ 725 int 726 sigwait(struct thread *td, struct sigwait_args *uap) 727 { 728 siginfo_t info; 729 sigset_t set; 730 int error; 731 732 error = copyin(uap->set, &set, sizeof(set)); 733 if (error) 734 return (error); 735 736 error = kern_sigtimedwait(td, set, &info, NULL); 737 if (error) 738 return (error); 739 740 error = copyout(&info.si_signo, uap->sig, sizeof(info.si_signo)); 741 /* Repost if we got an error. */ 742 if (error && info.si_signo) { 743 PROC_LOCK(td->td_proc); 744 tdsignal(td, info.si_signo); 745 PROC_UNLOCK(td->td_proc); 746 } 747 return (error); 748 } 749 /* 750 * MPSAFE 751 */ 752 int 753 sigtimedwait(struct thread *td, struct sigtimedwait_args *uap) 754 { 755 struct timespec ts; 756 struct timespec *timeout; 757 sigset_t set; 758 siginfo_t info; 759 int error; 760 761 if (uap->timeout) { 762 error = copyin(uap->timeout, &ts, sizeof(ts)); 763 if (error) 764 return (error); 765 766 timeout = &ts; 767 } else 768 timeout = NULL; 769 770 error = copyin(uap->set, &set, sizeof(set)); 771 if (error) 772 return (error); 773 774 error = kern_sigtimedwait(td, set, &info, timeout); 775 if (error) 776 return (error); 777 778 error = copyout(&info, uap->info, sizeof(info)); 779 /* Repost if we got an error. */ 780 if (error && info.si_signo) { 781 PROC_LOCK(td->td_proc); 782 tdsignal(td, info.si_signo); 783 PROC_UNLOCK(td->td_proc); 784 } 785 return (error); 786 } 787 788 /* 789 * MPSAFE 790 */ 791 int 792 sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap) 793 { 794 siginfo_t info; 795 sigset_t set; 796 int error; 797 798 error = copyin(uap->set, &set, sizeof(set)); 799 if (error) 800 return (error); 801 802 error = kern_sigtimedwait(td, set, &info, NULL); 803 if (error) 804 return (error); 805 806 error = copyout(&info, uap->info, sizeof(info)); 807 /* Repost if we got an error. */ 808 if (error && info.si_signo) { 809 PROC_LOCK(td->td_proc); 810 tdsignal(td, info.si_signo); 811 PROC_UNLOCK(td->td_proc); 812 } 813 return (error); 814 } 815 816 static int 817 kern_sigtimedwait(struct thread *td, sigset_t set, siginfo_t *info, 818 struct timespec *timeout) 819 { 820 register struct sigacts *ps; 821 sigset_t oldmask; 822 struct proc *p; 823 int error; 824 int sig; 825 int hz; 826 827 p = td->td_proc; 828 error = 0; 829 sig = 0; 830 SIG_CANTMASK(set); 831 832 PROC_LOCK(p); 833 ps = p->p_sigacts; 834 oldmask = td->td_sigmask; 835 td->td_sigmask = set; 836 signotify(td); 837 838 mtx_lock(&ps->ps_mtx); 839 sig = cursig(td); 840 if (sig) 841 goto out; 842 843 /* 844 * POSIX says this must be checked after looking for pending 845 * signals. 846 */ 847 if (timeout) { 848 struct timeval tv; 849 850 if (timeout->tv_nsec > 1000000000) { 851 error = EINVAL; 852 goto out; 853 } 854 TIMESPEC_TO_TIMEVAL(&tv, timeout); 855 hz = tvtohz(&tv); 856 } else 857 hz = 0; 858 859 mtx_unlock(&ps->ps_mtx); 860 error = msleep(ps, &p->p_mtx, PPAUSE|PCATCH, "pause", hz); 861 mtx_lock(&ps->ps_mtx); 862 if (error == EINTR) 863 error = 0; 864 else if (error) 865 goto out; 866 867 sig = cursig(td); 868 out: 869 td->td_sigmask = oldmask; 870 if (sig) { 871 sig_t action; 872 873 action = ps->ps_sigact[_SIG_IDX(sig)]; 874 mtx_unlock(&ps->ps_mtx); 875 #ifdef KTRACE 876 if (KTRPOINT(td, KTR_PSIG)) 877 ktrpsig(sig, action, td->td_flags & TDF_OLDMASK ? 878 &td->td_oldsigmask : &td->td_sigmask, 0); 879 #endif 880 _STOPEVENT(p, S_SIG, sig); 881 882 if (action == SIG_DFL) 883 sigexit(td, sig); 884 /* NOTREACHED */ 885 886 SIGDELSET(td->td_siglist, sig); 887 info->si_signo = sig; 888 info->si_code = 0; 889 } else 890 mtx_unlock(&ps->ps_mtx); 891 PROC_UNLOCK(p); 892 return (error); 893 } 894 895 /* 896 * MPSAFE 897 */ 898 int 899 sigpending(td, uap) 900 struct thread *td; 901 struct sigpending_args *uap; 902 { 903 struct proc *p = td->td_proc; 904 sigset_t siglist; 905 906 PROC_LOCK(p); 907 siglist = p->p_siglist; 908 SIGSETOR(siglist, td->td_siglist); 909 PROC_UNLOCK(p); 910 return (copyout(&siglist, uap->set, sizeof(sigset_t))); 911 } 912 913 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 914 #ifndef _SYS_SYSPROTO_H_ 915 struct osigpending_args { 916 int dummy; 917 }; 918 #endif 919 /* 920 * MPSAFE 921 */ 922 int 923 osigpending(td, uap) 924 struct thread *td; 925 struct osigpending_args *uap; 926 { 927 struct proc *p = td->td_proc; 928 sigset_t siglist; 929 930 PROC_LOCK(p); 931 siglist = p->p_siglist; 932 SIGSETOR(siglist, td->td_siglist); 933 PROC_UNLOCK(p); 934 SIG2OSIG(siglist, td->td_retval[0]); 935 return (0); 936 } 937 #endif /* COMPAT_43 */ 938 939 #if defined(COMPAT_43) || defined(COMPAT_SUNOS) 940 /* 941 * Generalized interface signal handler, 4.3-compatible. 942 */ 943 #ifndef _SYS_SYSPROTO_H_ 944 struct osigvec_args { 945 int signum; 946 struct sigvec *nsv; 947 struct sigvec *osv; 948 }; 949 #endif 950 /* 951 * MPSAFE 952 */ 953 /* ARGSUSED */ 954 int 955 osigvec(td, uap) 956 struct thread *td; 957 register struct osigvec_args *uap; 958 { 959 struct sigvec vec; 960 struct sigaction nsa, osa; 961 register struct sigaction *nsap, *osap; 962 int error; 963 964 if (uap->signum <= 0 || uap->signum >= ONSIG) 965 return (EINVAL); 966 nsap = (uap->nsv != NULL) ? &nsa : NULL; 967 osap = (uap->osv != NULL) ? &osa : NULL; 968 if (nsap) { 969 error = copyin(uap->nsv, &vec, sizeof(vec)); 970 if (error) 971 return (error); 972 nsap->sa_handler = vec.sv_handler; 973 OSIG2SIG(vec.sv_mask, nsap->sa_mask); 974 nsap->sa_flags = vec.sv_flags; 975 nsap->sa_flags ^= SA_RESTART; /* opposite of SV_INTERRUPT */ 976 #ifdef COMPAT_SUNOS 977 nsap->sa_flags |= SA_USERTRAMP; 978 #endif 979 } 980 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET); 981 if (osap && !error) { 982 vec.sv_handler = osap->sa_handler; 983 SIG2OSIG(osap->sa_mask, vec.sv_mask); 984 vec.sv_flags = osap->sa_flags; 985 vec.sv_flags &= ~SA_NOCLDWAIT; 986 vec.sv_flags ^= SA_RESTART; 987 #ifdef COMPAT_SUNOS 988 vec.sv_flags &= ~SA_NOCLDSTOP; 989 #endif 990 error = copyout(&vec, uap->osv, sizeof(vec)); 991 } 992 return (error); 993 } 994 995 #ifndef _SYS_SYSPROTO_H_ 996 struct osigblock_args { 997 int mask; 998 }; 999 #endif 1000 /* 1001 * MPSAFE 1002 */ 1003 int 1004 osigblock(td, uap) 1005 register struct thread *td; 1006 struct osigblock_args *uap; 1007 { 1008 struct proc *p = td->td_proc; 1009 sigset_t set; 1010 1011 OSIG2SIG(uap->mask, set); 1012 SIG_CANTMASK(set); 1013 PROC_LOCK(p); 1014 SIG2OSIG(td->td_sigmask, td->td_retval[0]); 1015 SIGSETOR(td->td_sigmask, set); 1016 PROC_UNLOCK(p); 1017 return (0); 1018 } 1019 1020 #ifndef _SYS_SYSPROTO_H_ 1021 struct osigsetmask_args { 1022 int mask; 1023 }; 1024 #endif 1025 /* 1026 * MPSAFE 1027 */ 1028 int 1029 osigsetmask(td, uap) 1030 struct thread *td; 1031 struct osigsetmask_args *uap; 1032 { 1033 struct proc *p = td->td_proc; 1034 sigset_t set; 1035 1036 OSIG2SIG(uap->mask, set); 1037 SIG_CANTMASK(set); 1038 PROC_LOCK(p); 1039 SIG2OSIG(td->td_sigmask, td->td_retval[0]); 1040 SIGSETLO(td->td_sigmask, set); 1041 signotify(td); 1042 PROC_UNLOCK(p); 1043 return (0); 1044 } 1045 #endif /* COMPAT_43 || COMPAT_SUNOS */ 1046 1047 /* 1048 * Suspend process until signal, providing mask to be set 1049 * in the meantime. Note nonstandard calling convention: 1050 * libc stub passes mask, not pointer, to save a copyin. 1051 ***** XXXKSE this doesn't make sense under KSE. 1052 ***** Do we suspend the thread or all threads in the process? 1053 ***** How do we suspend threads running NOW on another processor? 1054 */ 1055 #ifndef _SYS_SYSPROTO_H_ 1056 struct sigsuspend_args { 1057 const sigset_t *sigmask; 1058 }; 1059 #endif 1060 /* 1061 * MPSAFE 1062 */ 1063 /* ARGSUSED */ 1064 int 1065 sigsuspend(td, uap) 1066 struct thread *td; 1067 struct sigsuspend_args *uap; 1068 { 1069 sigset_t mask; 1070 int error; 1071 1072 error = copyin(uap->sigmask, &mask, sizeof(mask)); 1073 if (error) 1074 return (error); 1075 return (kern_sigsuspend(td, mask)); 1076 } 1077 1078 int 1079 kern_sigsuspend(struct thread *td, sigset_t mask) 1080 { 1081 struct proc *p = td->td_proc; 1082 1083 /* 1084 * When returning from sigsuspend, we want 1085 * the old mask to be restored after the 1086 * signal handler has finished. Thus, we 1087 * save it here and mark the sigacts structure 1088 * to indicate this. 1089 */ 1090 PROC_LOCK(p); 1091 td->td_oldsigmask = td->td_sigmask; 1092 mtx_lock_spin(&sched_lock); 1093 td->td_flags |= TDF_OLDMASK; 1094 mtx_unlock_spin(&sched_lock); 1095 SIG_CANTMASK(mask); 1096 td->td_sigmask = mask; 1097 signotify(td); 1098 while (msleep(p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause", 0) == 0) 1099 /* void */; 1100 PROC_UNLOCK(p); 1101 /* always return EINTR rather than ERESTART... */ 1102 return (EINTR); 1103 } 1104 1105 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 1106 #ifndef _SYS_SYSPROTO_H_ 1107 struct osigsuspend_args { 1108 osigset_t mask; 1109 }; 1110 #endif 1111 /* 1112 * MPSAFE 1113 */ 1114 /* ARGSUSED */ 1115 int 1116 osigsuspend(td, uap) 1117 struct thread *td; 1118 struct osigsuspend_args *uap; 1119 { 1120 struct proc *p = td->td_proc; 1121 sigset_t mask; 1122 1123 PROC_LOCK(p); 1124 td->td_oldsigmask = td->td_sigmask; 1125 mtx_lock_spin(&sched_lock); 1126 td->td_flags |= TDF_OLDMASK; 1127 mtx_unlock_spin(&sched_lock); 1128 OSIG2SIG(uap->mask, mask); 1129 SIG_CANTMASK(mask); 1130 SIGSETLO(td->td_sigmask, mask); 1131 signotify(td); 1132 while (msleep(p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "opause", 0) == 0) 1133 /* void */; 1134 PROC_UNLOCK(p); 1135 /* always return EINTR rather than ERESTART... */ 1136 return (EINTR); 1137 } 1138 #endif /* COMPAT_43 */ 1139 1140 #if defined(COMPAT_43) || defined(COMPAT_SUNOS) 1141 #ifndef _SYS_SYSPROTO_H_ 1142 struct osigstack_args { 1143 struct sigstack *nss; 1144 struct sigstack *oss; 1145 }; 1146 #endif 1147 /* 1148 * MPSAFE 1149 */ 1150 /* ARGSUSED */ 1151 int 1152 osigstack(td, uap) 1153 struct thread *td; 1154 register struct osigstack_args *uap; 1155 { 1156 struct proc *p = td->td_proc; 1157 struct sigstack nss, oss; 1158 int error = 0; 1159 1160 if (uap->nss != NULL) { 1161 error = copyin(uap->nss, &nss, sizeof(nss)); 1162 if (error) 1163 return (error); 1164 } 1165 PROC_LOCK(p); 1166 oss.ss_sp = p->p_sigstk.ss_sp; 1167 oss.ss_onstack = sigonstack(cpu_getstack(td)); 1168 if (uap->nss != NULL) { 1169 p->p_sigstk.ss_sp = nss.ss_sp; 1170 p->p_sigstk.ss_size = 0; 1171 p->p_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK; 1172 p->p_flag |= P_ALTSTACK; 1173 } 1174 PROC_UNLOCK(p); 1175 if (uap->oss != NULL) 1176 error = copyout(&oss, uap->oss, sizeof(oss)); 1177 1178 return (error); 1179 } 1180 #endif /* COMPAT_43 || COMPAT_SUNOS */ 1181 1182 #ifndef _SYS_SYSPROTO_H_ 1183 struct sigaltstack_args { 1184 stack_t *ss; 1185 stack_t *oss; 1186 }; 1187 #endif 1188 /* 1189 * MPSAFE 1190 */ 1191 /* ARGSUSED */ 1192 int 1193 sigaltstack(td, uap) 1194 struct thread *td; 1195 register struct sigaltstack_args *uap; 1196 { 1197 stack_t ss, oss; 1198 int error; 1199 1200 if (uap->ss != NULL) { 1201 error = copyin(uap->ss, &ss, sizeof(ss)); 1202 if (error) 1203 return (error); 1204 } 1205 error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL, 1206 (uap->oss != NULL) ? &oss : NULL); 1207 if (error) 1208 return (error); 1209 if (uap->oss != NULL) 1210 error = copyout(&oss, uap->oss, sizeof(stack_t)); 1211 return (error); 1212 } 1213 1214 int 1215 kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss) 1216 { 1217 struct proc *p = td->td_proc; 1218 int oonstack; 1219 1220 PROC_LOCK(p); 1221 oonstack = sigonstack(cpu_getstack(td)); 1222 1223 if (oss != NULL) { 1224 *oss = p->p_sigstk; 1225 oss->ss_flags = (p->p_flag & P_ALTSTACK) 1226 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 1227 } 1228 1229 if (ss != NULL) { 1230 if (oonstack) { 1231 PROC_UNLOCK(p); 1232 return (EPERM); 1233 } 1234 if ((ss->ss_flags & ~SS_DISABLE) != 0) { 1235 PROC_UNLOCK(p); 1236 return (EINVAL); 1237 } 1238 if (!(ss->ss_flags & SS_DISABLE)) { 1239 if (ss->ss_size < p->p_sysent->sv_minsigstksz) { 1240 PROC_UNLOCK(p); 1241 return (ENOMEM); 1242 } 1243 p->p_sigstk = *ss; 1244 p->p_flag |= P_ALTSTACK; 1245 } else { 1246 p->p_flag &= ~P_ALTSTACK; 1247 } 1248 } 1249 PROC_UNLOCK(p); 1250 return (0); 1251 } 1252 1253 /* 1254 * Common code for kill process group/broadcast kill. 1255 * cp is calling process. 1256 */ 1257 static int 1258 killpg1(td, sig, pgid, all) 1259 register struct thread *td; 1260 int sig, pgid, all; 1261 { 1262 register struct proc *p; 1263 struct pgrp *pgrp; 1264 int nfound = 0; 1265 1266 if (all) { 1267 /* 1268 * broadcast 1269 */ 1270 sx_slock(&allproc_lock); 1271 LIST_FOREACH(p, &allproc, p_list) { 1272 PROC_LOCK(p); 1273 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM || 1274 p == td->td_proc) { 1275 PROC_UNLOCK(p); 1276 continue; 1277 } 1278 if (p_cansignal(td, p, sig) == 0) { 1279 nfound++; 1280 if (sig) 1281 psignal(p, sig); 1282 } 1283 PROC_UNLOCK(p); 1284 } 1285 sx_sunlock(&allproc_lock); 1286 } else { 1287 sx_slock(&proctree_lock); 1288 if (pgid == 0) { 1289 /* 1290 * zero pgid means send to my process group. 1291 */ 1292 pgrp = td->td_proc->p_pgrp; 1293 PGRP_LOCK(pgrp); 1294 } else { 1295 pgrp = pgfind(pgid); 1296 if (pgrp == NULL) { 1297 sx_sunlock(&proctree_lock); 1298 return (ESRCH); 1299 } 1300 } 1301 sx_sunlock(&proctree_lock); 1302 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 1303 PROC_LOCK(p); 1304 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM) { 1305 PROC_UNLOCK(p); 1306 continue; 1307 } 1308 if (p->p_state == PRS_ZOMBIE) { 1309 PROC_UNLOCK(p); 1310 continue; 1311 } 1312 if (p_cansignal(td, p, sig) == 0) { 1313 nfound++; 1314 if (sig) 1315 psignal(p, sig); 1316 } 1317 PROC_UNLOCK(p); 1318 } 1319 PGRP_UNLOCK(pgrp); 1320 } 1321 return (nfound ? 0 : ESRCH); 1322 } 1323 1324 #ifndef _SYS_SYSPROTO_H_ 1325 struct kill_args { 1326 int pid; 1327 int signum; 1328 }; 1329 #endif 1330 /* 1331 * MPSAFE 1332 */ 1333 /* ARGSUSED */ 1334 int 1335 kill(td, uap) 1336 register struct thread *td; 1337 register struct kill_args *uap; 1338 { 1339 register struct proc *p; 1340 int error; 1341 1342 if ((u_int)uap->signum > _SIG_MAXSIG) 1343 return (EINVAL); 1344 1345 if (uap->pid > 0) { 1346 /* kill single process */ 1347 if ((p = pfind(uap->pid)) == NULL) 1348 return (ESRCH); 1349 error = p_cansignal(td, p, uap->signum); 1350 if (error == 0 && uap->signum) 1351 psignal(p, uap->signum); 1352 PROC_UNLOCK(p); 1353 return (error); 1354 } 1355 switch (uap->pid) { 1356 case -1: /* broadcast signal */ 1357 return (killpg1(td, uap->signum, 0, 1)); 1358 case 0: /* signal own process group */ 1359 return (killpg1(td, uap->signum, 0, 0)); 1360 default: /* negative explicit process group */ 1361 return (killpg1(td, uap->signum, -uap->pid, 0)); 1362 } 1363 /* NOTREACHED */ 1364 } 1365 1366 #if defined(COMPAT_43) || defined(COMPAT_SUNOS) 1367 #ifndef _SYS_SYSPROTO_H_ 1368 struct okillpg_args { 1369 int pgid; 1370 int signum; 1371 }; 1372 #endif 1373 /* 1374 * MPSAFE 1375 */ 1376 /* ARGSUSED */ 1377 int 1378 okillpg(td, uap) 1379 struct thread *td; 1380 register struct okillpg_args *uap; 1381 { 1382 1383 if ((u_int)uap->signum > _SIG_MAXSIG) 1384 return (EINVAL); 1385 return (killpg1(td, uap->signum, uap->pgid, 0)); 1386 } 1387 #endif /* COMPAT_43 || COMPAT_SUNOS */ 1388 1389 /* 1390 * Send a signal to a process group. 1391 */ 1392 void 1393 gsignal(pgid, sig) 1394 int pgid, sig; 1395 { 1396 struct pgrp *pgrp; 1397 1398 if (pgid != 0) { 1399 sx_slock(&proctree_lock); 1400 pgrp = pgfind(pgid); 1401 sx_sunlock(&proctree_lock); 1402 if (pgrp != NULL) { 1403 pgsignal(pgrp, sig, 0); 1404 PGRP_UNLOCK(pgrp); 1405 } 1406 } 1407 } 1408 1409 /* 1410 * Send a signal to a process group. If checktty is 1, 1411 * limit to members which have a controlling terminal. 1412 */ 1413 void 1414 pgsignal(pgrp, sig, checkctty) 1415 struct pgrp *pgrp; 1416 int sig, checkctty; 1417 { 1418 register struct proc *p; 1419 1420 if (pgrp) { 1421 PGRP_LOCK_ASSERT(pgrp, MA_OWNED); 1422 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 1423 PROC_LOCK(p); 1424 if (checkctty == 0 || p->p_flag & P_CONTROLT) 1425 psignal(p, sig); 1426 PROC_UNLOCK(p); 1427 } 1428 } 1429 } 1430 1431 /* 1432 * Send a signal caused by a trap to the current thread. 1433 * If it will be caught immediately, deliver it with correct code. 1434 * Otherwise, post it normally. 1435 * 1436 * MPSAFE 1437 */ 1438 void 1439 trapsignal(struct thread *td, int sig, u_long code) 1440 { 1441 struct sigacts *ps; 1442 struct proc *p; 1443 1444 p = td->td_proc; 1445 1446 PROC_LOCK(p); 1447 ps = p->p_sigacts; 1448 mtx_lock(&ps->ps_mtx); 1449 if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) && 1450 !SIGISMEMBER(td->td_sigmask, sig)) { 1451 p->p_stats->p_ru.ru_nsignals++; 1452 #ifdef KTRACE 1453 if (KTRPOINT(curthread, KTR_PSIG)) 1454 ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)], 1455 &td->td_sigmask, code); 1456 #endif 1457 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)], sig, 1458 &td->td_sigmask, code); 1459 SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]); 1460 if (!SIGISMEMBER(ps->ps_signodefer, sig)) 1461 SIGADDSET(td->td_sigmask, sig); 1462 if (SIGISMEMBER(ps->ps_sigreset, sig)) { 1463 /* 1464 * See kern_sigaction() for origin of this code. 1465 */ 1466 SIGDELSET(ps->ps_sigcatch, sig); 1467 if (sig != SIGCONT && 1468 sigprop(sig) & SA_IGNORE) 1469 SIGADDSET(ps->ps_sigignore, sig); 1470 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 1471 } 1472 mtx_unlock(&ps->ps_mtx); 1473 } else { 1474 mtx_unlock(&ps->ps_mtx); 1475 p->p_code = code; /* XXX for core dump/debugger */ 1476 p->p_sig = sig; /* XXX to verify code */ 1477 tdsignal(td, sig); 1478 } 1479 PROC_UNLOCK(p); 1480 } 1481 1482 static struct thread * 1483 sigtd(struct proc *p, int sig, int prop) 1484 { 1485 struct thread *td; 1486 1487 PROC_LOCK_ASSERT(p, MA_OWNED); 1488 1489 /* 1490 * If we know the signal is bound for a specific thread then we 1491 * assume that we are in that threads context. This is the case 1492 * for SIGXCPU, SIGILL, etc. Otherwise someone did a kill() from 1493 * userland and the real thread doesn't actually matter. 1494 */ 1495 if ((prop & SA_PROC) != 0 && curthread->td_proc == p) 1496 return (curthread); 1497 1498 /* 1499 * We should search for the first thread that is blocked in 1500 * sigsuspend with this signal unmasked. 1501 */ 1502 1503 /* XXX */ 1504 1505 /* 1506 * Find the first thread in the proc that doesn't have this signal 1507 * masked. 1508 */ 1509 FOREACH_THREAD_IN_PROC(p, td) 1510 if (!SIGISMEMBER(td->td_sigmask, sig)) 1511 return (td); 1512 1513 return (FIRST_THREAD_IN_PROC(p)); 1514 } 1515 1516 /* 1517 * Send the signal to the process. If the signal has an action, the action 1518 * is usually performed by the target process rather than the caller; we add 1519 * the signal to the set of pending signals for the process. 1520 * 1521 * Exceptions: 1522 * o When a stop signal is sent to a sleeping process that takes the 1523 * default action, the process is stopped without awakening it. 1524 * o SIGCONT restarts stopped processes (or puts them back to sleep) 1525 * regardless of the signal action (eg, blocked or ignored). 1526 * 1527 * Other ignored signals are discarded immediately. 1528 * 1529 * MPSAFE 1530 */ 1531 void 1532 psignal(struct proc *p, int sig) 1533 { 1534 struct thread *td; 1535 int prop; 1536 1537 PROC_LOCK_ASSERT(p, MA_OWNED); 1538 prop = sigprop(sig); 1539 1540 /* 1541 * Find a thread to deliver the signal to. 1542 */ 1543 td = sigtd(p, sig, prop); 1544 1545 tdsignal(td, sig); 1546 } 1547 1548 /* 1549 * MPSAFE 1550 */ 1551 void 1552 tdsignal(struct thread *td, int sig) 1553 { 1554 struct proc *p; 1555 register sig_t action; 1556 sigset_t *siglist; 1557 struct thread *td0; 1558 register int prop; 1559 struct sigacts *ps; 1560 1561 KASSERT(_SIG_VALID(sig), 1562 ("tdsignal(): invalid signal %d\n", sig)); 1563 1564 p = td->td_proc; 1565 ps = p->p_sigacts; 1566 1567 PROC_LOCK_ASSERT(p, MA_OWNED); 1568 KNOTE(&p->p_klist, NOTE_SIGNAL | sig); 1569 1570 prop = sigprop(sig); 1571 1572 /* 1573 * If this thread is blocking this signal then we'll leave it in the 1574 * proc so that we can find it in the first thread that unblocks it. 1575 */ 1576 if (SIGISMEMBER(td->td_sigmask, sig)) 1577 siglist = &p->p_siglist; 1578 else 1579 siglist = &td->td_siglist; 1580 1581 /* 1582 * If proc is traced, always give parent a chance; 1583 * if signal event is tracked by procfs, give *that* 1584 * a chance, as well. 1585 */ 1586 if ((p->p_flag & P_TRACED) || (p->p_stops & S_SIG)) { 1587 action = SIG_DFL; 1588 } else { 1589 /* 1590 * If the signal is being ignored, 1591 * then we forget about it immediately. 1592 * (Note: we don't set SIGCONT in ps_sigignore, 1593 * and if it is set to SIG_IGN, 1594 * action will be SIG_DFL here.) 1595 */ 1596 mtx_lock(&ps->ps_mtx); 1597 if (SIGISMEMBER(ps->ps_sigignore, sig) || 1598 (p->p_flag & P_WEXIT)) { 1599 mtx_unlock(&ps->ps_mtx); 1600 return; 1601 } 1602 if (SIGISMEMBER(td->td_sigmask, sig)) 1603 action = SIG_HOLD; 1604 else if (SIGISMEMBER(ps->ps_sigcatch, sig)) 1605 action = SIG_CATCH; 1606 else 1607 action = SIG_DFL; 1608 mtx_unlock(&ps->ps_mtx); 1609 } 1610 1611 if (prop & SA_CONT) { 1612 SIG_STOPSIGMASK(p->p_siglist); 1613 /* 1614 * XXX Should investigate leaving STOP and CONT sigs only in 1615 * the proc's siglist. 1616 */ 1617 FOREACH_THREAD_IN_PROC(p, td0) 1618 SIG_STOPSIGMASK(td0->td_siglist); 1619 } 1620 1621 if (prop & SA_STOP) { 1622 /* 1623 * If sending a tty stop signal to a member of an orphaned 1624 * process group, discard the signal here if the action 1625 * is default; don't stop the process below if sleeping, 1626 * and don't clear any pending SIGCONT. 1627 */ 1628 if ((prop & SA_TTYSTOP) && 1629 (p->p_pgrp->pg_jobc == 0) && 1630 (action == SIG_DFL)) 1631 return; 1632 SIG_CONTSIGMASK(p->p_siglist); 1633 FOREACH_THREAD_IN_PROC(p, td0) 1634 SIG_CONTSIGMASK(td0->td_siglist); 1635 p->p_flag &= ~P_CONTINUED; 1636 } 1637 SIGADDSET(*siglist, sig); 1638 signotify(td); /* uses schedlock */ 1639 /* 1640 * Defer further processing for signals which are held, 1641 * except that stopped processes must be continued by SIGCONT. 1642 */ 1643 if (action == SIG_HOLD && 1644 !((prop & SA_CONT) && (p->p_flag & P_STOPPED_SIG))) 1645 return; 1646 /* 1647 * Some signals have a process-wide effect and a per-thread 1648 * component. Most processing occurs when the process next 1649 * tries to cross the user boundary, however there are some 1650 * times when processing needs to be done immediatly, such as 1651 * waking up threads so that they can cross the user boundary. 1652 * We try do the per-process part here. 1653 */ 1654 if (P_SHOULDSTOP(p)) { 1655 /* 1656 * The process is in stopped mode. All the threads should be 1657 * either winding down or already on the suspended queue. 1658 */ 1659 if (p->p_flag & P_TRACED) { 1660 /* 1661 * The traced process is already stopped, 1662 * so no further action is necessary. 1663 * No signal can restart us. 1664 */ 1665 goto out; 1666 } 1667 1668 if (sig == SIGKILL) { 1669 /* 1670 * SIGKILL sets process running. 1671 * It will die elsewhere. 1672 * All threads must be restarted. 1673 */ 1674 p->p_flag &= ~P_STOPPED; 1675 goto runfast; 1676 } 1677 1678 if (prop & SA_CONT) { 1679 /* 1680 * If SIGCONT is default (or ignored), we continue the 1681 * process but don't leave the signal in siglist as 1682 * it has no further action. If SIGCONT is held, we 1683 * continue the process and leave the signal in 1684 * siglist. If the process catches SIGCONT, let it 1685 * handle the signal itself. If it isn't waiting on 1686 * an event, it goes back to run state. 1687 * Otherwise, process goes back to sleep state. 1688 */ 1689 p->p_flag &= ~P_STOPPED_SIG; 1690 p->p_flag |= P_CONTINUED; 1691 if (action == SIG_DFL) { 1692 SIGDELSET(*siglist, sig); 1693 } else if (action == SIG_CATCH) { 1694 /* 1695 * The process wants to catch it so it needs 1696 * to run at least one thread, but which one? 1697 * It would seem that the answer would be to 1698 * run an upcall in the next KSE to run, and 1699 * deliver the signal that way. In a NON KSE 1700 * process, we need to make sure that the 1701 * single thread is runnable asap. 1702 * XXXKSE for now however, make them all run. 1703 */ 1704 goto runfast; 1705 } 1706 /* 1707 * The signal is not ignored or caught. 1708 */ 1709 mtx_lock_spin(&sched_lock); 1710 thread_unsuspend(p); 1711 mtx_unlock_spin(&sched_lock); 1712 goto out; 1713 } 1714 1715 if (prop & SA_STOP) { 1716 /* 1717 * Already stopped, don't need to stop again 1718 * (If we did the shell could get confused). 1719 * Just make sure the signal STOP bit set. 1720 */ 1721 p->p_flag |= P_STOPPED_SIG; 1722 SIGDELSET(*siglist, sig); 1723 goto out; 1724 } 1725 1726 /* 1727 * All other kinds of signals: 1728 * If a thread is sleeping interruptibly, simulate a 1729 * wakeup so that when it is continued it will be made 1730 * runnable and can look at the signal. However, don't make 1731 * the PROCESS runnable, leave it stopped. 1732 * It may run a bit until it hits a thread_suspend_check(). 1733 */ 1734 mtx_lock_spin(&sched_lock); 1735 if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR)) { 1736 if (td->td_flags & TDF_CVWAITQ) 1737 cv_abort(td); 1738 else 1739 abortsleep(td); 1740 } 1741 mtx_unlock_spin(&sched_lock); 1742 goto out; 1743 /* 1744 * XXXKSE What about threads that are waiting on mutexes? 1745 * Shouldn't they abort too? 1746 * No, hopefully mutexes are short lived.. They'll 1747 * eventually hit thread_suspend_check(). 1748 */ 1749 } else if (p->p_state == PRS_NORMAL) { 1750 if ((p->p_flag & P_TRACED) || (action != SIG_DFL) || 1751 !(prop & SA_STOP)) { 1752 mtx_lock_spin(&sched_lock); 1753 tdsigwakeup(td, sig, action); 1754 mtx_unlock_spin(&sched_lock); 1755 goto out; 1756 } 1757 if (prop & SA_STOP) { 1758 if (p->p_flag & P_PPWAIT) 1759 goto out; 1760 p->p_flag |= P_STOPPED_SIG; 1761 p->p_xstat = sig; 1762 mtx_lock_spin(&sched_lock); 1763 FOREACH_THREAD_IN_PROC(p, td0) { 1764 if (TD_IS_SLEEPING(td0) && 1765 (td->td_flags & TDF_SINTR)) 1766 thread_suspend_one(td0); 1767 } 1768 thread_stopped(p); 1769 if (p->p_numthreads == p->p_suspcount) { 1770 SIGDELSET(p->p_siglist, p->p_xstat); 1771 FOREACH_THREAD_IN_PROC(p, td0) 1772 SIGDELSET(td0->td_siglist, p->p_xstat); 1773 } 1774 mtx_unlock_spin(&sched_lock); 1775 goto out; 1776 } 1777 else 1778 goto runfast; 1779 /* NOTREACHED */ 1780 } else { 1781 /* Not in "NORMAL" state. discard the signal. */ 1782 SIGDELSET(*siglist, sig); 1783 goto out; 1784 } 1785 1786 /* 1787 * The process is not stopped so we need to apply the signal to all the 1788 * running threads. 1789 */ 1790 1791 runfast: 1792 mtx_lock_spin(&sched_lock); 1793 tdsigwakeup(td, sig, action); 1794 thread_unsuspend(p); 1795 mtx_unlock_spin(&sched_lock); 1796 out: 1797 /* If we jump here, sched_lock should not be owned. */ 1798 mtx_assert(&sched_lock, MA_NOTOWNED); 1799 } 1800 1801 /* 1802 * The force of a signal has been directed against a single 1803 * thread. We need to see what we can do about knocking it 1804 * out of any sleep it may be in etc. 1805 */ 1806 static void 1807 tdsigwakeup(struct thread *td, int sig, sig_t action) 1808 { 1809 struct proc *p = td->td_proc; 1810 register int prop; 1811 1812 PROC_LOCK_ASSERT(p, MA_OWNED); 1813 mtx_assert(&sched_lock, MA_OWNED); 1814 prop = sigprop(sig); 1815 /* 1816 * Bring the priority of a thread up if we want it to get 1817 * killed in this lifetime. 1818 */ 1819 if ((action == SIG_DFL) && (prop & SA_KILL)) { 1820 if (td->td_priority > PUSER) { 1821 td->td_priority = PUSER; 1822 } 1823 } 1824 if (TD_IS_SLEEPING(td)) { 1825 /* 1826 * If thread is sleeping uninterruptibly 1827 * we can't interrupt the sleep... the signal will 1828 * be noticed when the process returns through 1829 * trap() or syscall(). 1830 */ 1831 if ((td->td_flags & TDF_SINTR) == 0) { 1832 return; 1833 } 1834 /* 1835 * Process is sleeping and traced. Make it runnable 1836 * so it can discover the signal in issignal() and stop 1837 * for its parent. 1838 */ 1839 if (p->p_flag & P_TRACED) { 1840 p->p_flag &= ~P_STOPPED_TRACE; 1841 } else { 1842 1843 /* 1844 * If SIGCONT is default (or ignored) and process is 1845 * asleep, we are finished; the process should not 1846 * be awakened. 1847 */ 1848 if ((prop & SA_CONT) && action == SIG_DFL) { 1849 SIGDELSET(p->p_siglist, sig); 1850 /* 1851 * It may be on either list in this state. 1852 * Remove from both for now. 1853 */ 1854 SIGDELSET(td->td_siglist, sig); 1855 return; 1856 } 1857 1858 /* 1859 * Raise priority to at least PUSER. 1860 */ 1861 if (td->td_priority > PUSER) { 1862 td->td_priority = PUSER; 1863 } 1864 } 1865 if (td->td_flags & TDF_CVWAITQ) 1866 cv_abort(td); 1867 else 1868 abortsleep(td); 1869 } 1870 #ifdef SMP 1871 else { 1872 /* 1873 * Other states do nothing with the signal immediatly, 1874 * other than kicking ourselves if we are running. 1875 * It will either never be noticed, or noticed very soon. 1876 */ 1877 if (TD_IS_RUNNING(td) && td != curthread) { 1878 forward_signal(td); 1879 } 1880 } 1881 #endif 1882 } 1883 1884 /* 1885 * If the current process has received a signal (should be caught or cause 1886 * termination, should interrupt current syscall), return the signal number. 1887 * Stop signals with default action are processed immediately, then cleared; 1888 * they aren't returned. This is checked after each entry to the system for 1889 * a syscall or trap (though this can usually be done without calling issignal 1890 * by checking the pending signal masks in cursig.) The normal call 1891 * sequence is 1892 * 1893 * while (sig = cursig(curthread)) 1894 * postsig(sig); 1895 */ 1896 static int 1897 issignal(td) 1898 struct thread *td; 1899 { 1900 struct proc *p; 1901 struct sigacts *ps; 1902 sigset_t sigpending; 1903 register int sig, prop; 1904 1905 p = td->td_proc; 1906 ps = p->p_sigacts; 1907 mtx_assert(&ps->ps_mtx, MA_OWNED); 1908 PROC_LOCK_ASSERT(p, MA_OWNED); 1909 for (;;) { 1910 int traced = (p->p_flag & P_TRACED) || (p->p_stops & S_SIG); 1911 1912 sigpending = td->td_siglist; 1913 SIGSETNAND(sigpending, td->td_sigmask); 1914 1915 if (p->p_flag & P_PPWAIT) 1916 SIG_STOPSIGMASK(sigpending); 1917 if (SIGISEMPTY(sigpending)) /* no signal to send */ 1918 return (0); 1919 sig = sig_ffs(&sigpending); 1920 1921 _STOPEVENT(p, S_SIG, sig); 1922 1923 /* 1924 * We should see pending but ignored signals 1925 * only if P_TRACED was on when they were posted. 1926 */ 1927 if (SIGISMEMBER(ps->ps_sigignore, sig) && (traced == 0)) { 1928 SIGDELSET(td->td_siglist, sig); 1929 continue; 1930 } 1931 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) { 1932 /* 1933 * If traced, always stop. 1934 */ 1935 mtx_unlock(&ps->ps_mtx); 1936 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, 1937 &p->p_mtx.mtx_object, "Stopping for traced signal"); 1938 p->p_xstat = sig; 1939 PROC_LOCK(p->p_pptr); 1940 psignal(p->p_pptr, SIGCHLD); 1941 PROC_UNLOCK(p->p_pptr); 1942 mtx_lock_spin(&sched_lock); 1943 stop(p); /* uses schedlock too eventually */ 1944 thread_suspend_one(td); 1945 PROC_UNLOCK(p); 1946 DROP_GIANT(); 1947 p->p_stats->p_ru.ru_nivcsw++; 1948 mi_switch(); 1949 mtx_unlock_spin(&sched_lock); 1950 PICKUP_GIANT(); 1951 PROC_LOCK(p); 1952 mtx_lock(&ps->ps_mtx); 1953 1954 /* 1955 * If parent wants us to take the signal, 1956 * then it will leave it in p->p_xstat; 1957 * otherwise we just look for signals again. 1958 */ 1959 SIGDELSET(td->td_siglist, sig); /* clear old signal */ 1960 sig = p->p_xstat; 1961 if (sig == 0) 1962 continue; 1963 1964 /* 1965 * If the traced bit got turned off, go back up 1966 * to the top to rescan signals. This ensures 1967 * that p_sig* and p_sigact are consistent. 1968 */ 1969 if ((p->p_flag & P_TRACED) == 0) 1970 continue; 1971 1972 /* 1973 * Put the new signal into td_siglist. If the 1974 * signal is being masked, look for other signals. 1975 */ 1976 SIGADDSET(td->td_siglist, sig); 1977 if (SIGISMEMBER(td->td_sigmask, sig)) 1978 continue; 1979 signotify(td); 1980 } 1981 1982 prop = sigprop(sig); 1983 1984 /* 1985 * Decide whether the signal should be returned. 1986 * Return the signal's number, or fall through 1987 * to clear it from the pending mask. 1988 */ 1989 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) { 1990 1991 case (intptr_t)SIG_DFL: 1992 /* 1993 * Don't take default actions on system processes. 1994 */ 1995 if (p->p_pid <= 1) { 1996 #ifdef DIAGNOSTIC 1997 /* 1998 * Are you sure you want to ignore SIGSEGV 1999 * in init? XXX 2000 */ 2001 printf("Process (pid %lu) got signal %d\n", 2002 (u_long)p->p_pid, sig); 2003 #endif 2004 break; /* == ignore */ 2005 } 2006 /* 2007 * If there is a pending stop signal to process 2008 * with default action, stop here, 2009 * then clear the signal. However, 2010 * if process is member of an orphaned 2011 * process group, ignore tty stop signals. 2012 */ 2013 if (prop & SA_STOP) { 2014 if (p->p_flag & P_TRACED || 2015 (p->p_pgrp->pg_jobc == 0 && 2016 prop & SA_TTYSTOP)) 2017 break; /* == ignore */ 2018 mtx_unlock(&ps->ps_mtx); 2019 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, 2020 &p->p_mtx.mtx_object, "Catching SIGSTOP"); 2021 p->p_flag |= P_STOPPED_SIG; 2022 p->p_xstat = sig; 2023 mtx_lock_spin(&sched_lock); 2024 thread_stopped(p); 2025 thread_suspend_one(td); 2026 PROC_UNLOCK(p); 2027 DROP_GIANT(); 2028 p->p_stats->p_ru.ru_nivcsw++; 2029 mi_switch(); 2030 mtx_unlock_spin(&sched_lock); 2031 PICKUP_GIANT(); 2032 PROC_LOCK(p); 2033 mtx_lock(&ps->ps_mtx); 2034 break; 2035 } else if (prop & SA_IGNORE) { 2036 /* 2037 * Except for SIGCONT, shouldn't get here. 2038 * Default action is to ignore; drop it. 2039 */ 2040 break; /* == ignore */ 2041 } else 2042 return (sig); 2043 /*NOTREACHED*/ 2044 2045 case (intptr_t)SIG_IGN: 2046 /* 2047 * Masking above should prevent us ever trying 2048 * to take action on an ignored signal other 2049 * than SIGCONT, unless process is traced. 2050 */ 2051 if ((prop & SA_CONT) == 0 && 2052 (p->p_flag & P_TRACED) == 0) 2053 printf("issignal\n"); 2054 break; /* == ignore */ 2055 2056 default: 2057 /* 2058 * This signal has an action, let 2059 * postsig() process it. 2060 */ 2061 return (sig); 2062 } 2063 SIGDELSET(td->td_siglist, sig); /* take the signal! */ 2064 } 2065 /* NOTREACHED */ 2066 } 2067 2068 /* 2069 * Put the argument process into the stopped state and notify the parent 2070 * via wakeup. Signals are handled elsewhere. The process must not be 2071 * on the run queue. Must be called with the proc p locked and the scheduler 2072 * lock held. 2073 */ 2074 static void 2075 stop(struct proc *p) 2076 { 2077 2078 PROC_LOCK_ASSERT(p, MA_OWNED); 2079 p->p_flag |= P_STOPPED_SIG; 2080 p->p_flag &= ~P_WAITED; 2081 wakeup(p->p_pptr); 2082 } 2083 2084 /* 2085 * MPSAFE 2086 */ 2087 void 2088 thread_stopped(struct proc *p) 2089 { 2090 struct proc *p1 = curthread->td_proc; 2091 struct sigacts *ps; 2092 int n; 2093 2094 PROC_LOCK_ASSERT(p, MA_OWNED); 2095 mtx_assert(&sched_lock, MA_OWNED); 2096 n = p->p_suspcount; 2097 if (p == p1) 2098 n++; 2099 if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) { 2100 mtx_unlock_spin(&sched_lock); 2101 stop(p); 2102 PROC_LOCK(p->p_pptr); 2103 ps = p->p_pptr->p_sigacts; 2104 mtx_lock(&ps->ps_mtx); 2105 if ((ps->ps_flag & PS_NOCLDSTOP) == 0) { 2106 mtx_unlock(&ps->ps_mtx); 2107 psignal(p->p_pptr, SIGCHLD); 2108 } else 2109 mtx_unlock(&ps->ps_mtx); 2110 PROC_UNLOCK(p->p_pptr); 2111 mtx_lock_spin(&sched_lock); 2112 } 2113 } 2114 2115 /* 2116 * Take the action for the specified signal 2117 * from the current set of pending signals. 2118 */ 2119 void 2120 postsig(sig) 2121 register int sig; 2122 { 2123 struct thread *td = curthread; 2124 register struct proc *p = td->td_proc; 2125 struct sigacts *ps; 2126 sig_t action; 2127 sigset_t returnmask; 2128 int code; 2129 2130 KASSERT(sig != 0, ("postsig")); 2131 2132 PROC_LOCK_ASSERT(p, MA_OWNED); 2133 ps = p->p_sigacts; 2134 mtx_assert(&ps->ps_mtx, MA_OWNED); 2135 SIGDELSET(td->td_siglist, sig); 2136 action = ps->ps_sigact[_SIG_IDX(sig)]; 2137 #ifdef KTRACE 2138 if (KTRPOINT(td, KTR_PSIG)) 2139 ktrpsig(sig, action, td->td_flags & TDF_OLDMASK ? 2140 &td->td_oldsigmask : &td->td_sigmask, 0); 2141 #endif 2142 _STOPEVENT(p, S_SIG, sig); 2143 2144 if (action == SIG_DFL) { 2145 /* 2146 * Default action, where the default is to kill 2147 * the process. (Other cases were ignored above.) 2148 */ 2149 mtx_unlock(&ps->ps_mtx); 2150 sigexit(td, sig); 2151 /* NOTREACHED */ 2152 } else { 2153 /* 2154 * If we get here, the signal must be caught. 2155 */ 2156 KASSERT(action != SIG_IGN && !SIGISMEMBER(td->td_sigmask, sig), 2157 ("postsig action")); 2158 /* 2159 * Set the new mask value and also defer further 2160 * occurrences of this signal. 2161 * 2162 * Special case: user has done a sigsuspend. Here the 2163 * current mask is not of interest, but rather the 2164 * mask from before the sigsuspend is what we want 2165 * restored after the signal processing is completed. 2166 */ 2167 if (td->td_flags & TDF_OLDMASK) { 2168 returnmask = td->td_oldsigmask; 2169 mtx_lock_spin(&sched_lock); 2170 td->td_flags &= ~TDF_OLDMASK; 2171 mtx_unlock_spin(&sched_lock); 2172 } else 2173 returnmask = td->td_sigmask; 2174 2175 SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]); 2176 if (!SIGISMEMBER(ps->ps_signodefer, sig)) 2177 SIGADDSET(td->td_sigmask, sig); 2178 2179 if (SIGISMEMBER(ps->ps_sigreset, sig)) { 2180 /* 2181 * See kern_sigaction() for origin of this code. 2182 */ 2183 SIGDELSET(ps->ps_sigcatch, sig); 2184 if (sig != SIGCONT && 2185 sigprop(sig) & SA_IGNORE) 2186 SIGADDSET(ps->ps_sigignore, sig); 2187 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 2188 } 2189 p->p_stats->p_ru.ru_nsignals++; 2190 if (p->p_sig != sig) { 2191 code = 0; 2192 } else { 2193 code = p->p_code; 2194 p->p_code = 0; 2195 p->p_sig = 0; 2196 } 2197 if (p->p_flag & P_THREADED) 2198 thread_signal_add(curthread, sig); 2199 else 2200 (*p->p_sysent->sv_sendsig)(action, sig, 2201 &returnmask, code); 2202 } 2203 } 2204 2205 /* 2206 * Kill the current process for stated reason. 2207 */ 2208 void 2209 killproc(p, why) 2210 struct proc *p; 2211 char *why; 2212 { 2213 2214 PROC_LOCK_ASSERT(p, MA_OWNED); 2215 CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)", 2216 p, p->p_pid, p->p_comm); 2217 log(LOG_ERR, "pid %d (%s), uid %d, was killed: %s\n", p->p_pid, p->p_comm, 2218 p->p_ucred ? p->p_ucred->cr_uid : -1, why); 2219 psignal(p, SIGKILL); 2220 } 2221 2222 /* 2223 * Force the current process to exit with the specified signal, dumping core 2224 * if appropriate. We bypass the normal tests for masked and caught signals, 2225 * allowing unrecoverable failures to terminate the process without changing 2226 * signal state. Mark the accounting record with the signal termination. 2227 * If dumping core, save the signal number for the debugger. Calls exit and 2228 * does not return. 2229 * 2230 * MPSAFE 2231 */ 2232 void 2233 sigexit(td, sig) 2234 struct thread *td; 2235 int sig; 2236 { 2237 struct proc *p = td->td_proc; 2238 2239 PROC_LOCK_ASSERT(p, MA_OWNED); 2240 p->p_acflag |= AXSIG; 2241 if (sigprop(sig) & SA_CORE) { 2242 p->p_sig = sig; 2243 /* 2244 * Log signals which would cause core dumps 2245 * (Log as LOG_INFO to appease those who don't want 2246 * these messages.) 2247 * XXX : Todo, as well as euid, write out ruid too 2248 */ 2249 PROC_UNLOCK(p); 2250 if (!mtx_owned(&Giant)) 2251 mtx_lock(&Giant); 2252 if (coredump(td) == 0) 2253 sig |= WCOREFLAG; 2254 if (kern_logsigexit) 2255 log(LOG_INFO, 2256 "pid %d (%s), uid %d: exited on signal %d%s\n", 2257 p->p_pid, p->p_comm, 2258 td->td_ucred ? td->td_ucred->cr_uid : -1, 2259 sig &~ WCOREFLAG, 2260 sig & WCOREFLAG ? " (core dumped)" : ""); 2261 } else { 2262 PROC_UNLOCK(p); 2263 if (!mtx_owned(&Giant)) 2264 mtx_lock(&Giant); 2265 } 2266 exit1(td, W_EXITCODE(0, sig)); 2267 /* NOTREACHED */ 2268 } 2269 2270 static char corefilename[MAXPATHLEN+1] = {"%N.core"}; 2271 SYSCTL_STRING(_kern, OID_AUTO, corefile, CTLFLAG_RW, corefilename, 2272 sizeof(corefilename), "process corefile name format string"); 2273 2274 /* 2275 * expand_name(name, uid, pid) 2276 * Expand the name described in corefilename, using name, uid, and pid. 2277 * corefilename is a printf-like string, with three format specifiers: 2278 * %N name of process ("name") 2279 * %P process id (pid) 2280 * %U user id (uid) 2281 * For example, "%N.core" is the default; they can be disabled completely 2282 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P". 2283 * This is controlled by the sysctl variable kern.corefile (see above). 2284 */ 2285 2286 static char * 2287 expand_name(name, uid, pid) 2288 const char *name; 2289 uid_t uid; 2290 pid_t pid; 2291 { 2292 const char *format, *appendstr; 2293 char *temp; 2294 char buf[11]; /* Buffer for pid/uid -- max 4B */ 2295 size_t i, l, n; 2296 2297 format = corefilename; 2298 temp = malloc(MAXPATHLEN, M_TEMP, M_NOWAIT | M_ZERO); 2299 if (temp == NULL) 2300 return (NULL); 2301 for (i = 0, n = 0; n < MAXPATHLEN && format[i]; i++) { 2302 switch (format[i]) { 2303 case '%': /* Format character */ 2304 i++; 2305 switch (format[i]) { 2306 case '%': 2307 appendstr = "%"; 2308 break; 2309 case 'N': /* process name */ 2310 appendstr = name; 2311 break; 2312 case 'P': /* process id */ 2313 sprintf(buf, "%u", pid); 2314 appendstr = buf; 2315 break; 2316 case 'U': /* user id */ 2317 sprintf(buf, "%u", uid); 2318 appendstr = buf; 2319 break; 2320 default: 2321 appendstr = ""; 2322 log(LOG_ERR, 2323 "Unknown format character %c in `%s'\n", 2324 format[i], format); 2325 } 2326 l = strlen(appendstr); 2327 if ((n + l) >= MAXPATHLEN) 2328 goto toolong; 2329 memcpy(temp + n, appendstr, l); 2330 n += l; 2331 break; 2332 default: 2333 temp[n++] = format[i]; 2334 } 2335 } 2336 if (format[i] != '\0') 2337 goto toolong; 2338 return (temp); 2339 toolong: 2340 log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too long\n", 2341 (long)pid, name, (u_long)uid); 2342 free(temp, M_TEMP); 2343 return (NULL); 2344 } 2345 2346 /* 2347 * Dump a process' core. The main routine does some 2348 * policy checking, and creates the name of the coredump; 2349 * then it passes on a vnode and a size limit to the process-specific 2350 * coredump routine if there is one; if there _is not_ one, it returns 2351 * ENOSYS; otherwise it returns the error from the process-specific routine. 2352 */ 2353 2354 static int 2355 coredump(struct thread *td) 2356 { 2357 struct proc *p = td->td_proc; 2358 register struct vnode *vp; 2359 register struct ucred *cred = td->td_ucred; 2360 struct flock lf; 2361 struct nameidata nd; 2362 struct vattr vattr; 2363 int error, error1, flags; 2364 struct mount *mp; 2365 char *name; /* name of corefile */ 2366 off_t limit; 2367 2368 PROC_LOCK(p); 2369 _STOPEVENT(p, S_CORE, 0); 2370 2371 if (((sugid_coredump == 0) && p->p_flag & P_SUGID) || do_coredump == 0) { 2372 PROC_UNLOCK(p); 2373 return (EFAULT); 2374 } 2375 2376 /* 2377 * Note that the bulk of limit checking is done after 2378 * the corefile is created. The exception is if the limit 2379 * for corefiles is 0, in which case we don't bother 2380 * creating the corefile at all. This layout means that 2381 * a corefile is truncated instead of not being created, 2382 * if it is larger than the limit. 2383 */ 2384 limit = p->p_rlimit[RLIMIT_CORE].rlim_cur; 2385 if (limit == 0) { 2386 PROC_UNLOCK(p); 2387 return 0; 2388 } 2389 PROC_UNLOCK(p); 2390 2391 restart: 2392 name = expand_name(p->p_comm, td->td_ucred->cr_uid, p->p_pid); 2393 if (name == NULL) 2394 return (EINVAL); 2395 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td); /* XXXKSE */ 2396 flags = O_CREAT | FWRITE | O_NOFOLLOW; 2397 error = vn_open(&nd, &flags, S_IRUSR | S_IWUSR); 2398 free(name, M_TEMP); 2399 if (error) 2400 return (error); 2401 NDFREE(&nd, NDF_ONLY_PNBUF); 2402 vp = nd.ni_vp; 2403 2404 /* Don't dump to non-regular files or files with links. */ 2405 if (vp->v_type != VREG || 2406 VOP_GETATTR(vp, &vattr, cred, td) || vattr.va_nlink != 1) { 2407 VOP_UNLOCK(vp, 0, td); 2408 error = EFAULT; 2409 goto out2; 2410 } 2411 2412 VOP_UNLOCK(vp, 0, td); 2413 lf.l_whence = SEEK_SET; 2414 lf.l_start = 0; 2415 lf.l_len = 0; 2416 lf.l_type = F_WRLCK; 2417 error = VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK); 2418 if (error) 2419 goto out2; 2420 2421 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2422 lf.l_type = F_UNLCK; 2423 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK); 2424 if ((error = vn_close(vp, FWRITE, cred, td)) != 0) 2425 return (error); 2426 if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0) 2427 return (error); 2428 goto restart; 2429 } 2430 2431 VATTR_NULL(&vattr); 2432 vattr.va_size = 0; 2433 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 2434 VOP_LEASE(vp, td, cred, LEASE_WRITE); 2435 VOP_SETATTR(vp, &vattr, cred, td); 2436 VOP_UNLOCK(vp, 0, td); 2437 PROC_LOCK(p); 2438 p->p_acflag |= ACORE; 2439 PROC_UNLOCK(p); 2440 2441 error = p->p_sysent->sv_coredump ? 2442 p->p_sysent->sv_coredump(td, vp, limit) : 2443 ENOSYS; 2444 2445 lf.l_type = F_UNLCK; 2446 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK); 2447 vn_finished_write(mp); 2448 out2: 2449 error1 = vn_close(vp, FWRITE, cred, td); 2450 if (error == 0) 2451 error = error1; 2452 return (error); 2453 } 2454 2455 /* 2456 * Nonexistent system call-- signal process (may want to handle it). 2457 * Flag error in case process won't see signal immediately (blocked or ignored). 2458 */ 2459 #ifndef _SYS_SYSPROTO_H_ 2460 struct nosys_args { 2461 int dummy; 2462 }; 2463 #endif 2464 /* 2465 * MPSAFE 2466 */ 2467 /* ARGSUSED */ 2468 int 2469 nosys(td, args) 2470 struct thread *td; 2471 struct nosys_args *args; 2472 { 2473 struct proc *p = td->td_proc; 2474 2475 PROC_LOCK(p); 2476 psignal(p, SIGSYS); 2477 PROC_UNLOCK(p); 2478 return (ENOSYS); 2479 } 2480 2481 /* 2482 * Send a SIGIO or SIGURG signal to a process or process group using 2483 * stored credentials rather than those of the current process. 2484 */ 2485 void 2486 pgsigio(sigiop, sig, checkctty) 2487 struct sigio **sigiop; 2488 int sig, checkctty; 2489 { 2490 struct sigio *sigio; 2491 2492 SIGIO_LOCK(); 2493 sigio = *sigiop; 2494 if (sigio == NULL) { 2495 SIGIO_UNLOCK(); 2496 return; 2497 } 2498 if (sigio->sio_pgid > 0) { 2499 PROC_LOCK(sigio->sio_proc); 2500 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred)) 2501 psignal(sigio->sio_proc, sig); 2502 PROC_UNLOCK(sigio->sio_proc); 2503 } else if (sigio->sio_pgid < 0) { 2504 struct proc *p; 2505 2506 PGRP_LOCK(sigio->sio_pgrp); 2507 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) { 2508 PROC_LOCK(p); 2509 if (CANSIGIO(sigio->sio_ucred, p->p_ucred) && 2510 (checkctty == 0 || (p->p_flag & P_CONTROLT))) 2511 psignal(p, sig); 2512 PROC_UNLOCK(p); 2513 } 2514 PGRP_UNLOCK(sigio->sio_pgrp); 2515 } 2516 SIGIO_UNLOCK(); 2517 } 2518 2519 static int 2520 filt_sigattach(struct knote *kn) 2521 { 2522 struct proc *p = curproc; 2523 2524 kn->kn_ptr.p_proc = p; 2525 kn->kn_flags |= EV_CLEAR; /* automatically set */ 2526 2527 PROC_LOCK(p); 2528 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext); 2529 PROC_UNLOCK(p); 2530 2531 return (0); 2532 } 2533 2534 static void 2535 filt_sigdetach(struct knote *kn) 2536 { 2537 struct proc *p = kn->kn_ptr.p_proc; 2538 2539 PROC_LOCK(p); 2540 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext); 2541 PROC_UNLOCK(p); 2542 } 2543 2544 /* 2545 * signal knotes are shared with proc knotes, so we apply a mask to 2546 * the hint in order to differentiate them from process hints. This 2547 * could be avoided by using a signal-specific knote list, but probably 2548 * isn't worth the trouble. 2549 */ 2550 static int 2551 filt_signal(struct knote *kn, long hint) 2552 { 2553 2554 if (hint & NOTE_SIGNAL) { 2555 hint &= ~NOTE_SIGNAL; 2556 2557 if (kn->kn_id == hint) 2558 kn->kn_data++; 2559 } 2560 return (kn->kn_data != 0); 2561 } 2562 2563 struct sigacts * 2564 sigacts_alloc(void) 2565 { 2566 struct sigacts *ps; 2567 2568 ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO); 2569 ps->ps_refcnt = 1; 2570 mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF); 2571 return (ps); 2572 } 2573 2574 void 2575 sigacts_free(struct sigacts *ps) 2576 { 2577 2578 mtx_lock(&ps->ps_mtx); 2579 ps->ps_refcnt--; 2580 if (ps->ps_refcnt == 0) { 2581 mtx_destroy(&ps->ps_mtx); 2582 free(ps, M_SUBPROC); 2583 } else 2584 mtx_unlock(&ps->ps_mtx); 2585 } 2586 2587 struct sigacts * 2588 sigacts_hold(struct sigacts *ps) 2589 { 2590 mtx_lock(&ps->ps_mtx); 2591 ps->ps_refcnt++; 2592 mtx_unlock(&ps->ps_mtx); 2593 return (ps); 2594 } 2595 2596 void 2597 sigacts_copy(struct sigacts *dest, struct sigacts *src) 2598 { 2599 2600 KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest")); 2601 mtx_lock(&src->ps_mtx); 2602 bcopy(src, dest, offsetof(struct sigacts, ps_refcnt)); 2603 mtx_unlock(&src->ps_mtx); 2604 } 2605 2606 int 2607 sigacts_shared(struct sigacts *ps) 2608 { 2609 int shared; 2610 2611 mtx_lock(&ps->ps_mtx); 2612 shared = ps->ps_refcnt > 1; 2613 mtx_unlock(&ps->ps_mtx); 2614 return (shared); 2615 } 2616