1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94 39 * $FreeBSD$ 40 */ 41 42 #include "opt_compat.h" 43 #include "opt_ktrace.h" 44 45 #include <sys/param.h> 46 #include <sys/kernel.h> 47 #include <sys/sysproto.h> 48 #include <sys/systm.h> 49 #include <sys/signalvar.h> 50 #include <sys/namei.h> 51 #include <sys/vnode.h> 52 #include <sys/event.h> 53 #include <sys/proc.h> 54 #include <sys/pioctl.h> 55 #include <sys/acct.h> 56 #include <sys/fcntl.h> 57 #include <sys/condvar.h> 58 #include <sys/lock.h> 59 #include <sys/mutex.h> 60 #include <sys/wait.h> 61 #include <sys/ktr.h> 62 #include <sys/ktrace.h> 63 #include <sys/resourcevar.h> 64 #include <sys/smp.h> 65 #include <sys/stat.h> 66 #include <sys/sx.h> 67 #include <sys/syslog.h> 68 #include <sys/sysent.h> 69 #include <sys/sysctl.h> 70 #include <sys/malloc.h> 71 #include <sys/unistd.h> 72 73 #include <machine/cpu.h> 74 75 #define ONSIG 32 /* NSIG for osig* syscalls. XXX. */ 76 77 static int coredump(struct thread *); 78 static int do_sigaction(struct proc *p, int sig, struct sigaction *act, 79 struct sigaction *oact, int old); 80 static int do_sigprocmask(struct proc *p, int how, sigset_t *set, 81 sigset_t *oset, int old); 82 static char *expand_name(const char *, uid_t, pid_t); 83 static int killpg1(struct thread *td, int sig, int pgid, int all); 84 static int sig_ffs(sigset_t *set); 85 static int sigprop(int sig); 86 static void stop(struct proc *); 87 88 static int filt_sigattach(struct knote *kn); 89 static void filt_sigdetach(struct knote *kn); 90 static int filt_signal(struct knote *kn, long hint); 91 92 struct filterops sig_filtops = 93 { 0, filt_sigattach, filt_sigdetach, filt_signal }; 94 95 static int kern_logsigexit = 1; 96 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW, 97 &kern_logsigexit, 0, 98 "Log processes quitting on abnormal signals to syslog(3)"); 99 100 /* 101 * Policy -- Can ucred cr1 send SIGIO to process cr2? 102 * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG 103 * in the right situations. 104 */ 105 #define CANSIGIO(cr1, cr2) \ 106 ((cr1)->cr_uid == 0 || \ 107 (cr1)->cr_ruid == (cr2)->cr_ruid || \ 108 (cr1)->cr_uid == (cr2)->cr_ruid || \ 109 (cr1)->cr_ruid == (cr2)->cr_uid || \ 110 (cr1)->cr_uid == (cr2)->cr_uid) 111 112 int sugid_coredump; 113 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RW, 114 &sugid_coredump, 0, "Enable coredumping set user/group ID processes"); 115 116 static int do_coredump = 1; 117 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW, 118 &do_coredump, 0, "Enable/Disable coredumps"); 119 120 /* 121 * Signal properties and actions. 122 * The array below categorizes the signals and their default actions 123 * according to the following properties: 124 */ 125 #define SA_KILL 0x01 /* terminates process by default */ 126 #define SA_CORE 0x02 /* ditto and coredumps */ 127 #define SA_STOP 0x04 /* suspend process */ 128 #define SA_TTYSTOP 0x08 /* ditto, from tty */ 129 #define SA_IGNORE 0x10 /* ignore by default */ 130 #define SA_CONT 0x20 /* continue if suspended */ 131 #define SA_CANTMASK 0x40 /* non-maskable, catchable */ 132 133 static int sigproptbl[NSIG] = { 134 SA_KILL, /* SIGHUP */ 135 SA_KILL, /* SIGINT */ 136 SA_KILL|SA_CORE, /* SIGQUIT */ 137 SA_KILL|SA_CORE, /* SIGILL */ 138 SA_KILL|SA_CORE, /* SIGTRAP */ 139 SA_KILL|SA_CORE, /* SIGABRT */ 140 SA_KILL|SA_CORE, /* SIGEMT */ 141 SA_KILL|SA_CORE, /* SIGFPE */ 142 SA_KILL, /* SIGKILL */ 143 SA_KILL|SA_CORE, /* SIGBUS */ 144 SA_KILL|SA_CORE, /* SIGSEGV */ 145 SA_KILL|SA_CORE, /* SIGSYS */ 146 SA_KILL, /* SIGPIPE */ 147 SA_KILL, /* SIGALRM */ 148 SA_KILL, /* SIGTERM */ 149 SA_IGNORE, /* SIGURG */ 150 SA_STOP, /* SIGSTOP */ 151 SA_STOP|SA_TTYSTOP, /* SIGTSTP */ 152 SA_IGNORE|SA_CONT, /* SIGCONT */ 153 SA_IGNORE, /* SIGCHLD */ 154 SA_STOP|SA_TTYSTOP, /* SIGTTIN */ 155 SA_STOP|SA_TTYSTOP, /* SIGTTOU */ 156 SA_IGNORE, /* SIGIO */ 157 SA_KILL, /* SIGXCPU */ 158 SA_KILL, /* SIGXFSZ */ 159 SA_KILL, /* SIGVTALRM */ 160 SA_KILL, /* SIGPROF */ 161 SA_IGNORE, /* SIGWINCH */ 162 SA_IGNORE, /* SIGINFO */ 163 SA_KILL, /* SIGUSR1 */ 164 SA_KILL, /* SIGUSR2 */ 165 }; 166 167 /* 168 * Determine signal that should be delivered to process p, the current 169 * process, 0 if none. If there is a pending stop signal with default 170 * action, the process stops in issignal(). 171 * 172 * MP SAFE. 173 */ 174 int 175 CURSIG(struct proc *p) 176 { 177 178 PROC_LOCK_ASSERT(p, MA_OWNED); 179 mtx_assert(&sched_lock, MA_NOTOWNED); 180 return (SIGPENDING(p) ? issignal(p) : 0); 181 } 182 183 /* 184 * Arrange for ast() to handle unmasked pending signals on return to user 185 * mode. This must be called whenever a signal is added to p_siglist or 186 * unmasked in p_sigmask. 187 */ 188 void 189 signotify(struct proc *p) 190 { 191 192 PROC_LOCK_ASSERT(p, MA_OWNED); 193 mtx_lock_spin(&sched_lock); 194 if (SIGPENDING(p)) { 195 p->p_sflag |= PS_NEEDSIGCHK; 196 p->p_kse.ke_flags |= KEF_ASTPENDING; /* XXXKSE */ 197 } 198 mtx_unlock_spin(&sched_lock); 199 } 200 201 static __inline int 202 sigprop(int sig) 203 { 204 205 if (sig > 0 && sig < NSIG) 206 return (sigproptbl[_SIG_IDX(sig)]); 207 return (0); 208 } 209 210 static __inline int 211 sig_ffs(sigset_t *set) 212 { 213 int i; 214 215 for (i = 0; i < _SIG_WORDS; i++) 216 if (set->__bits[i]) 217 return (ffs(set->__bits[i]) + (i * 32)); 218 return (0); 219 } 220 221 /* 222 * do_sigaction 223 * sigaction 224 * osigaction 225 */ 226 static int 227 do_sigaction(p, sig, act, oact, old) 228 struct proc *p; 229 register int sig; 230 struct sigaction *act, *oact; 231 int old; 232 { 233 register struct sigacts *ps; 234 235 if (!_SIG_VALID(sig)) 236 return (EINVAL); 237 238 PROC_LOCK(p); 239 ps = p->p_sigacts; 240 if (oact) { 241 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)]; 242 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)]; 243 oact->sa_flags = 0; 244 if (SIGISMEMBER(ps->ps_sigonstack, sig)) 245 oact->sa_flags |= SA_ONSTACK; 246 if (!SIGISMEMBER(ps->ps_sigintr, sig)) 247 oact->sa_flags |= SA_RESTART; 248 if (SIGISMEMBER(ps->ps_sigreset, sig)) 249 oact->sa_flags |= SA_RESETHAND; 250 if (SIGISMEMBER(ps->ps_signodefer, sig)) 251 oact->sa_flags |= SA_NODEFER; 252 if (SIGISMEMBER(ps->ps_siginfo, sig)) 253 oact->sa_flags |= SA_SIGINFO; 254 if (sig == SIGCHLD && p->p_procsig->ps_flag & PS_NOCLDSTOP) 255 oact->sa_flags |= SA_NOCLDSTOP; 256 if (sig == SIGCHLD && p->p_procsig->ps_flag & PS_NOCLDWAIT) 257 oact->sa_flags |= SA_NOCLDWAIT; 258 } 259 if (act) { 260 if ((sig == SIGKILL || sig == SIGSTOP) && 261 act->sa_handler != SIG_DFL) { 262 PROC_UNLOCK(p); 263 return (EINVAL); 264 } 265 266 /* 267 * Change setting atomically. 268 */ 269 270 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask; 271 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]); 272 if (act->sa_flags & SA_SIGINFO) { 273 ps->ps_sigact[_SIG_IDX(sig)] = 274 (__sighandler_t *)act->sa_sigaction; 275 SIGADDSET(ps->ps_siginfo, sig); 276 } else { 277 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler; 278 SIGDELSET(ps->ps_siginfo, sig); 279 } 280 if (!(act->sa_flags & SA_RESTART)) 281 SIGADDSET(ps->ps_sigintr, sig); 282 else 283 SIGDELSET(ps->ps_sigintr, sig); 284 if (act->sa_flags & SA_ONSTACK) 285 SIGADDSET(ps->ps_sigonstack, sig); 286 else 287 SIGDELSET(ps->ps_sigonstack, sig); 288 if (act->sa_flags & SA_RESETHAND) 289 SIGADDSET(ps->ps_sigreset, sig); 290 else 291 SIGDELSET(ps->ps_sigreset, sig); 292 if (act->sa_flags & SA_NODEFER) 293 SIGADDSET(ps->ps_signodefer, sig); 294 else 295 SIGDELSET(ps->ps_signodefer, sig); 296 #ifdef COMPAT_SUNOS 297 if (act->sa_flags & SA_USERTRAMP) 298 SIGADDSET(ps->ps_usertramp, sig); 299 else 300 SIGDELSET(ps->ps_usertramp, sig); 301 #endif 302 if (sig == SIGCHLD) { 303 if (act->sa_flags & SA_NOCLDSTOP) 304 p->p_procsig->ps_flag |= PS_NOCLDSTOP; 305 else 306 p->p_procsig->ps_flag &= ~PS_NOCLDSTOP; 307 if (act->sa_flags & SA_NOCLDWAIT) { 308 /* 309 * Paranoia: since SA_NOCLDWAIT is implemented 310 * by reparenting the dying child to PID 1 (and 311 * trust it to reap the zombie), PID 1 itself 312 * is forbidden to set SA_NOCLDWAIT. 313 */ 314 if (p->p_pid == 1) 315 p->p_procsig->ps_flag &= ~PS_NOCLDWAIT; 316 else 317 p->p_procsig->ps_flag |= PS_NOCLDWAIT; 318 } else 319 p->p_procsig->ps_flag &= ~PS_NOCLDWAIT; 320 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN) 321 p->p_procsig->ps_flag |= PS_CLDSIGIGN; 322 else 323 p->p_procsig->ps_flag &= ~PS_CLDSIGIGN; 324 } 325 /* 326 * Set bit in p_sigignore for signals that are set to SIG_IGN, 327 * and for signals set to SIG_DFL where the default is to 328 * ignore. However, don't put SIGCONT in p_sigignore, as we 329 * have to restart the process. 330 */ 331 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || 332 (sigprop(sig) & SA_IGNORE && 333 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) { 334 /* never to be seen again */ 335 SIGDELSET(p->p_siglist, sig); 336 if (sig != SIGCONT) 337 /* easier in psignal */ 338 SIGADDSET(p->p_sigignore, sig); 339 SIGDELSET(p->p_sigcatch, sig); 340 } else { 341 SIGDELSET(p->p_sigignore, sig); 342 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL) 343 SIGDELSET(p->p_sigcatch, sig); 344 else 345 SIGADDSET(p->p_sigcatch, sig); 346 } 347 #ifdef COMPAT_43 348 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || 349 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL || !old) 350 SIGDELSET(ps->ps_osigset, sig); 351 else 352 SIGADDSET(ps->ps_osigset, sig); 353 #endif 354 } 355 PROC_UNLOCK(p); 356 return (0); 357 } 358 359 #ifndef _SYS_SYSPROTO_H_ 360 struct sigaction_args { 361 int sig; 362 struct sigaction *act; 363 struct sigaction *oact; 364 }; 365 #endif 366 /* 367 * MPSAFE 368 */ 369 /* ARGSUSED */ 370 int 371 sigaction(td, uap) 372 struct thread *td; 373 register struct sigaction_args *uap; 374 { 375 struct proc *p = td->td_proc; 376 struct sigaction act, oact; 377 register struct sigaction *actp, *oactp; 378 int error; 379 380 mtx_lock(&Giant); 381 382 actp = (uap->act != NULL) ? &act : NULL; 383 oactp = (uap->oact != NULL) ? &oact : NULL; 384 if (actp) { 385 error = copyin(uap->act, actp, sizeof(act)); 386 if (error) 387 goto done2; 388 } 389 error = do_sigaction(p, uap->sig, actp, oactp, 0); 390 if (oactp && !error) { 391 error = copyout(oactp, uap->oact, sizeof(oact)); 392 } 393 done2: 394 mtx_unlock(&Giant); 395 return (error); 396 } 397 398 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 399 #ifndef _SYS_SYSPROTO_H_ 400 struct osigaction_args { 401 int signum; 402 struct osigaction *nsa; 403 struct osigaction *osa; 404 }; 405 #endif 406 /* 407 * MPSAFE 408 */ 409 /* ARGSUSED */ 410 int 411 osigaction(td, uap) 412 struct thread *td; 413 register struct osigaction_args *uap; 414 { 415 struct proc *p = td->td_proc; 416 struct osigaction sa; 417 struct sigaction nsa, osa; 418 register struct sigaction *nsap, *osap; 419 int error; 420 421 if (uap->signum <= 0 || uap->signum >= ONSIG) 422 return (EINVAL); 423 424 nsap = (uap->nsa != NULL) ? &nsa : NULL; 425 osap = (uap->osa != NULL) ? &osa : NULL; 426 427 mtx_lock(&Giant); 428 429 if (nsap) { 430 error = copyin(uap->nsa, &sa, sizeof(sa)); 431 if (error) 432 goto done2; 433 nsap->sa_handler = sa.sa_handler; 434 nsap->sa_flags = sa.sa_flags; 435 OSIG2SIG(sa.sa_mask, nsap->sa_mask); 436 } 437 error = do_sigaction(p, uap->signum, nsap, osap, 1); 438 if (osap && !error) { 439 sa.sa_handler = osap->sa_handler; 440 sa.sa_flags = osap->sa_flags; 441 SIG2OSIG(osap->sa_mask, sa.sa_mask); 442 error = copyout(&sa, uap->osa, sizeof(sa)); 443 } 444 done2: 445 mtx_unlock(&Giant); 446 return (error); 447 } 448 #endif /* COMPAT_43 */ 449 450 /* 451 * Initialize signal state for process 0; 452 * set to ignore signals that are ignored by default. 453 */ 454 void 455 siginit(p) 456 struct proc *p; 457 { 458 register int i; 459 460 PROC_LOCK(p); 461 for (i = 1; i <= NSIG; i++) 462 if (sigprop(i) & SA_IGNORE && i != SIGCONT) 463 SIGADDSET(p->p_sigignore, i); 464 PROC_UNLOCK(p); 465 } 466 467 /* 468 * Reset signals for an exec of the specified process. 469 */ 470 void 471 execsigs(p) 472 register struct proc *p; 473 { 474 register struct sigacts *ps; 475 register int sig; 476 477 /* 478 * Reset caught signals. Held signals remain held 479 * through p_sigmask (unless they were caught, 480 * and are now ignored by default). 481 */ 482 PROC_LOCK_ASSERT(p, MA_OWNED); 483 ps = p->p_sigacts; 484 while (SIGNOTEMPTY(p->p_sigcatch)) { 485 sig = sig_ffs(&p->p_sigcatch); 486 SIGDELSET(p->p_sigcatch, sig); 487 if (sigprop(sig) & SA_IGNORE) { 488 if (sig != SIGCONT) 489 SIGADDSET(p->p_sigignore, sig); 490 SIGDELSET(p->p_siglist, sig); 491 } 492 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 493 } 494 /* 495 * Reset stack state to the user stack. 496 * Clear set of signals caught on the signal stack. 497 */ 498 p->p_sigstk.ss_flags = SS_DISABLE; 499 p->p_sigstk.ss_size = 0; 500 p->p_sigstk.ss_sp = 0; 501 p->p_flag &= ~P_ALTSTACK; 502 /* 503 * Reset no zombies if child dies flag as Solaris does. 504 */ 505 p->p_procsig->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN); 506 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN) 507 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL; 508 } 509 510 /* 511 * do_sigprocmask() 512 * 513 * Manipulate signal mask. 514 */ 515 static int 516 do_sigprocmask(p, how, set, oset, old) 517 struct proc *p; 518 int how; 519 sigset_t *set, *oset; 520 int old; 521 { 522 int error; 523 524 PROC_LOCK(p); 525 if (oset != NULL) 526 *oset = p->p_sigmask; 527 528 error = 0; 529 if (set != NULL) { 530 switch (how) { 531 case SIG_BLOCK: 532 SIG_CANTMASK(*set); 533 SIGSETOR(p->p_sigmask, *set); 534 break; 535 case SIG_UNBLOCK: 536 SIGSETNAND(p->p_sigmask, *set); 537 signotify(p); 538 break; 539 case SIG_SETMASK: 540 SIG_CANTMASK(*set); 541 if (old) 542 SIGSETLO(p->p_sigmask, *set); 543 else 544 p->p_sigmask = *set; 545 signotify(p); 546 break; 547 default: 548 error = EINVAL; 549 break; 550 } 551 } 552 PROC_UNLOCK(p); 553 return (error); 554 } 555 556 /* 557 * sigprocmask() - MP SAFE (XXXKSE not under KSE it isn't) 558 */ 559 560 #ifndef _SYS_SYSPROTO_H_ 561 struct sigprocmask_args { 562 int how; 563 const sigset_t *set; 564 sigset_t *oset; 565 }; 566 #endif 567 int 568 sigprocmask(td, uap) 569 register struct thread *td; 570 struct sigprocmask_args *uap; 571 { 572 struct proc *p = td->td_proc; 573 sigset_t set, oset; 574 sigset_t *setp, *osetp; 575 int error; 576 577 setp = (uap->set != NULL) ? &set : NULL; 578 osetp = (uap->oset != NULL) ? &oset : NULL; 579 if (setp) { 580 error = copyin(uap->set, setp, sizeof(set)); 581 if (error) 582 return (error); 583 } 584 error = do_sigprocmask(p, uap->how, setp, osetp, 0); 585 if (osetp && !error) { 586 error = copyout(osetp, uap->oset, sizeof(oset)); 587 } 588 return (error); 589 } 590 591 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 592 /* 593 * osigprocmask() - MP SAFE 594 */ 595 #ifndef _SYS_SYSPROTO_H_ 596 struct osigprocmask_args { 597 int how; 598 osigset_t mask; 599 }; 600 #endif 601 int 602 osigprocmask(td, uap) 603 register struct thread *td; 604 struct osigprocmask_args *uap; 605 { 606 struct proc *p = td->td_proc; 607 sigset_t set, oset; 608 int error; 609 610 OSIG2SIG(uap->mask, set); 611 error = do_sigprocmask(p, uap->how, &set, &oset, 1); 612 SIG2OSIG(oset, td->td_retval[0]); 613 return (error); 614 } 615 #endif /* COMPAT_43 */ 616 617 #ifndef _SYS_SYSPROTO_H_ 618 struct sigpending_args { 619 sigset_t *set; 620 }; 621 #endif 622 /* 623 * MPSAFE 624 */ 625 /* ARGSUSED */ 626 int 627 sigpending(td, uap) 628 struct thread *td; 629 struct sigpending_args *uap; 630 { 631 struct proc *p = td->td_proc; 632 sigset_t siglist; 633 int error; 634 635 mtx_lock(&Giant); 636 PROC_LOCK(p); 637 siglist = p->p_siglist; 638 PROC_UNLOCK(p); 639 mtx_unlock(&Giant); 640 error = copyout(&siglist, uap->set, sizeof(sigset_t)); 641 return(error); 642 } 643 644 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 645 #ifndef _SYS_SYSPROTO_H_ 646 struct osigpending_args { 647 int dummy; 648 }; 649 #endif 650 /* 651 * MPSAFE 652 */ 653 /* ARGSUSED */ 654 int 655 osigpending(td, uap) 656 struct thread *td; 657 struct osigpending_args *uap; 658 { 659 struct proc *p = td->td_proc; 660 661 mtx_lock(&Giant); 662 PROC_LOCK(p); 663 SIG2OSIG(p->p_siglist, td->td_retval[0]); 664 PROC_UNLOCK(p); 665 mtx_unlock(&Giant); 666 return (0); 667 } 668 #endif /* COMPAT_43 */ 669 670 #if defined(COMPAT_43) || defined(COMPAT_SUNOS) 671 /* 672 * Generalized interface signal handler, 4.3-compatible. 673 */ 674 #ifndef _SYS_SYSPROTO_H_ 675 struct osigvec_args { 676 int signum; 677 struct sigvec *nsv; 678 struct sigvec *osv; 679 }; 680 #endif 681 /* 682 * MPSAFE 683 */ 684 /* ARGSUSED */ 685 int 686 osigvec(td, uap) 687 struct thread *td; 688 register struct osigvec_args *uap; 689 { 690 struct proc *p = td->td_proc; 691 struct sigvec vec; 692 struct sigaction nsa, osa; 693 register struct sigaction *nsap, *osap; 694 int error; 695 696 if (uap->signum <= 0 || uap->signum >= ONSIG) 697 return (EINVAL); 698 nsap = (uap->nsv != NULL) ? &nsa : NULL; 699 osap = (uap->osv != NULL) ? &osa : NULL; 700 if (nsap) { 701 error = copyin(uap->nsv, &vec, sizeof(vec)); 702 if (error) 703 return (error); 704 nsap->sa_handler = vec.sv_handler; 705 OSIG2SIG(vec.sv_mask, nsap->sa_mask); 706 nsap->sa_flags = vec.sv_flags; 707 nsap->sa_flags ^= SA_RESTART; /* opposite of SV_INTERRUPT */ 708 #ifdef COMPAT_SUNOS 709 nsap->sa_flags |= SA_USERTRAMP; 710 #endif 711 } 712 mtx_lock(&Giant); 713 error = do_sigaction(p, uap->signum, nsap, osap, 1); 714 mtx_unlock(&Giant); 715 if (osap && !error) { 716 vec.sv_handler = osap->sa_handler; 717 SIG2OSIG(osap->sa_mask, vec.sv_mask); 718 vec.sv_flags = osap->sa_flags; 719 vec.sv_flags &= ~SA_NOCLDWAIT; 720 vec.sv_flags ^= SA_RESTART; 721 #ifdef COMPAT_SUNOS 722 vec.sv_flags &= ~SA_NOCLDSTOP; 723 #endif 724 error = copyout(&vec, uap->osv, sizeof(vec)); 725 } 726 return (error); 727 } 728 729 #ifndef _SYS_SYSPROTO_H_ 730 struct osigblock_args { 731 int mask; 732 }; 733 #endif 734 /* 735 * MPSAFE 736 */ 737 int 738 osigblock(td, uap) 739 register struct thread *td; 740 struct osigblock_args *uap; 741 { 742 struct proc *p = td->td_proc; 743 sigset_t set; 744 745 OSIG2SIG(uap->mask, set); 746 SIG_CANTMASK(set); 747 mtx_lock(&Giant); 748 PROC_LOCK(p); 749 SIG2OSIG(p->p_sigmask, td->td_retval[0]); 750 SIGSETOR(p->p_sigmask, set); 751 PROC_UNLOCK(p); 752 mtx_unlock(&Giant); 753 return (0); 754 } 755 756 #ifndef _SYS_SYSPROTO_H_ 757 struct osigsetmask_args { 758 int mask; 759 }; 760 #endif 761 /* 762 * MPSAFE 763 */ 764 int 765 osigsetmask(td, uap) 766 struct thread *td; 767 struct osigsetmask_args *uap; 768 { 769 struct proc *p = td->td_proc; 770 sigset_t set; 771 772 OSIG2SIG(uap->mask, set); 773 SIG_CANTMASK(set); 774 mtx_lock(&Giant); 775 PROC_LOCK(p); 776 SIG2OSIG(p->p_sigmask, td->td_retval[0]); 777 SIGSETLO(p->p_sigmask, set); 778 signotify(p); 779 PROC_UNLOCK(p); 780 mtx_unlock(&Giant); 781 return (0); 782 } 783 #endif /* COMPAT_43 || COMPAT_SUNOS */ 784 785 /* 786 * Suspend process until signal, providing mask to be set 787 * in the meantime. Note nonstandard calling convention: 788 * libc stub passes mask, not pointer, to save a copyin. 789 ***** XXXKSE this doesn't make sense under KSE. 790 ***** Do we suspend the thread or all threads in the process? 791 ***** How do we suspend threads running NOW on another processor? 792 */ 793 #ifndef _SYS_SYSPROTO_H_ 794 struct sigsuspend_args { 795 const sigset_t *sigmask; 796 }; 797 #endif 798 /* 799 * MPSAFE 800 */ 801 /* ARGSUSED */ 802 int 803 sigsuspend(td, uap) 804 struct thread *td; 805 struct sigsuspend_args *uap; 806 { 807 struct proc *p = td->td_proc; 808 sigset_t mask; 809 register struct sigacts *ps; 810 int error; 811 812 error = copyin(uap->sigmask, &mask, sizeof(mask)); 813 if (error) 814 return (error); 815 816 /* 817 * When returning from sigsuspend, we want 818 * the old mask to be restored after the 819 * signal handler has finished. Thus, we 820 * save it here and mark the sigacts structure 821 * to indicate this. 822 */ 823 mtx_lock(&Giant); 824 PROC_LOCK(p); 825 ps = p->p_sigacts; 826 p->p_oldsigmask = p->p_sigmask; 827 p->p_flag |= P_OLDMASK; 828 829 SIG_CANTMASK(mask); 830 p->p_sigmask = mask; 831 signotify(p); 832 while (msleep((caddr_t) ps, &p->p_mtx, PPAUSE|PCATCH, "pause", 0) == 0) 833 /* void */; 834 PROC_UNLOCK(p); 835 mtx_unlock(&Giant); 836 /* always return EINTR rather than ERESTART... */ 837 return (EINTR); 838 } 839 840 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 841 #ifndef _SYS_SYSPROTO_H_ 842 struct osigsuspend_args { 843 osigset_t mask; 844 }; 845 #endif 846 /* 847 * MPSAFE 848 */ 849 /* ARGSUSED */ 850 int 851 osigsuspend(td, uap) 852 struct thread *td; 853 struct osigsuspend_args *uap; 854 { 855 struct proc *p = td->td_proc; 856 sigset_t mask; 857 register struct sigacts *ps; 858 859 mtx_lock(&Giant); 860 PROC_LOCK(p); 861 ps = p->p_sigacts; 862 p->p_oldsigmask = p->p_sigmask; 863 p->p_flag |= P_OLDMASK; 864 OSIG2SIG(uap->mask, mask); 865 SIG_CANTMASK(mask); 866 SIGSETLO(p->p_sigmask, mask); 867 signotify(p); 868 while (msleep((caddr_t) ps, &p->p_mtx, PPAUSE|PCATCH, "opause", 0) == 0) 869 /* void */; 870 PROC_UNLOCK(p); 871 mtx_unlock(&Giant); 872 /* always return EINTR rather than ERESTART... */ 873 return (EINTR); 874 } 875 #endif /* COMPAT_43 */ 876 877 #if defined(COMPAT_43) || defined(COMPAT_SUNOS) 878 #ifndef _SYS_SYSPROTO_H_ 879 struct osigstack_args { 880 struct sigstack *nss; 881 struct sigstack *oss; 882 }; 883 #endif 884 /* 885 * MPSAFE 886 */ 887 /* ARGSUSED */ 888 int 889 osigstack(td, uap) 890 struct thread *td; 891 register struct osigstack_args *uap; 892 { 893 struct proc *p = td->td_proc; 894 struct sigstack ss; 895 int error = 0; 896 897 mtx_lock(&Giant); 898 899 if (uap->oss != NULL) { 900 PROC_LOCK(p); 901 ss.ss_sp = p->p_sigstk.ss_sp; 902 ss.ss_onstack = sigonstack(cpu_getstack(td)); 903 PROC_UNLOCK(p); 904 error = copyout(&ss, uap->oss, sizeof(struct sigstack)); 905 if (error) 906 goto done2; 907 } 908 909 if (uap->nss != NULL) { 910 if ((error = copyin(uap->nss, &ss, sizeof(ss))) != 0) 911 goto done2; 912 PROC_LOCK(p); 913 p->p_sigstk.ss_sp = ss.ss_sp; 914 p->p_sigstk.ss_size = 0; 915 p->p_sigstk.ss_flags |= ss.ss_onstack & SS_ONSTACK; 916 p->p_flag |= P_ALTSTACK; 917 PROC_UNLOCK(p); 918 } 919 done2: 920 mtx_unlock(&Giant); 921 return (error); 922 } 923 #endif /* COMPAT_43 || COMPAT_SUNOS */ 924 925 #ifndef _SYS_SYSPROTO_H_ 926 struct sigaltstack_args { 927 stack_t *ss; 928 stack_t *oss; 929 }; 930 #endif 931 /* 932 * MPSAFE 933 */ 934 /* ARGSUSED */ 935 int 936 sigaltstack(td, uap) 937 struct thread *td; 938 register struct sigaltstack_args *uap; 939 { 940 struct proc *p = td->td_proc; 941 stack_t ss; 942 int oonstack; 943 int error = 0; 944 945 mtx_lock(&Giant); 946 947 oonstack = sigonstack(cpu_getstack(td)); 948 949 if (uap->oss != NULL) { 950 PROC_LOCK(p); 951 ss = p->p_sigstk; 952 ss.ss_flags = (p->p_flag & P_ALTSTACK) 953 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 954 PROC_UNLOCK(p); 955 if ((error = copyout(&ss, uap->oss, sizeof(stack_t))) != 0) 956 goto done2; 957 } 958 959 if (uap->ss != NULL) { 960 if (oonstack) { 961 error = EPERM; 962 goto done2; 963 } 964 if ((error = copyin(uap->ss, &ss, sizeof(ss))) != 0) 965 goto done2; 966 if ((ss.ss_flags & ~SS_DISABLE) != 0) { 967 error = EINVAL; 968 goto done2; 969 } 970 if (!(ss.ss_flags & SS_DISABLE)) { 971 if (ss.ss_size < p->p_sysent->sv_minsigstksz) { 972 error = ENOMEM; 973 goto done2; 974 } 975 PROC_LOCK(p); 976 p->p_sigstk = ss; 977 p->p_flag |= P_ALTSTACK; 978 PROC_UNLOCK(p); 979 } else { 980 PROC_LOCK(p); 981 p->p_flag &= ~P_ALTSTACK; 982 PROC_UNLOCK(p); 983 } 984 } 985 done2: 986 mtx_unlock(&Giant); 987 return (error); 988 } 989 990 /* 991 * Common code for kill process group/broadcast kill. 992 * cp is calling process. 993 */ 994 int 995 killpg1(td, sig, pgid, all) 996 register struct thread *td; 997 int sig, pgid, all; 998 { 999 register struct proc *p; 1000 struct pgrp *pgrp; 1001 int nfound = 0; 1002 1003 if (all) { 1004 /* 1005 * broadcast 1006 */ 1007 sx_slock(&allproc_lock); 1008 LIST_FOREACH(p, &allproc, p_list) { 1009 PROC_LOCK(p); 1010 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM || 1011 p == td->td_proc) { 1012 PROC_UNLOCK(p); 1013 continue; 1014 } 1015 if (p_cansignal(td->td_proc, p, sig) == 0) { 1016 nfound++; 1017 if (sig) 1018 psignal(p, sig); 1019 } 1020 PROC_UNLOCK(p); 1021 } 1022 sx_sunlock(&allproc_lock); 1023 } else { 1024 sx_slock(&proctree_lock); 1025 if (pgid == 0) { 1026 /* 1027 * zero pgid means send to my process group. 1028 */ 1029 pgrp = td->td_proc->p_pgrp; 1030 PGRP_LOCK(pgrp); 1031 } else { 1032 pgrp = pgfind(pgid); 1033 if (pgrp == NULL) { 1034 sx_sunlock(&proctree_lock); 1035 return (ESRCH); 1036 } 1037 } 1038 sx_sunlock(&proctree_lock); 1039 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 1040 PROC_LOCK(p); 1041 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM) { 1042 PROC_UNLOCK(p); 1043 continue; 1044 } 1045 if (p->p_stat == SZOMB) { 1046 PROC_UNLOCK(p); 1047 continue; 1048 } 1049 if (p_cansignal(td->td_proc, p, sig) == 0) { 1050 nfound++; 1051 if (sig) 1052 psignal(p, sig); 1053 } 1054 PROC_UNLOCK(p); 1055 } 1056 PGRP_UNLOCK(pgrp); 1057 } 1058 return (nfound ? 0 : ESRCH); 1059 } 1060 1061 #ifndef _SYS_SYSPROTO_H_ 1062 struct kill_args { 1063 int pid; 1064 int signum; 1065 }; 1066 #endif 1067 /* 1068 * MPSAFE 1069 */ 1070 /* ARGSUSED */ 1071 int 1072 kill(td, uap) 1073 register struct thread *td; 1074 register struct kill_args *uap; 1075 { 1076 register struct proc *p; 1077 int error = 0; 1078 1079 if ((u_int)uap->signum > _SIG_MAXSIG) 1080 return (EINVAL); 1081 1082 mtx_lock(&Giant); 1083 if (uap->pid > 0) { 1084 /* kill single process */ 1085 if ((p = pfind(uap->pid)) == NULL) { 1086 error = ESRCH; 1087 } else if (p_cansignal(td->td_proc, p, uap->signum)) { 1088 PROC_UNLOCK(p); 1089 error = EPERM; 1090 } else { 1091 if (uap->signum) 1092 psignal(p, uap->signum); 1093 PROC_UNLOCK(p); 1094 error = 0; 1095 } 1096 } else { 1097 switch (uap->pid) { 1098 case -1: /* broadcast signal */ 1099 error = killpg1(td, uap->signum, 0, 1); 1100 break; 1101 case 0: /* signal own process group */ 1102 error = killpg1(td, uap->signum, 0, 0); 1103 break; 1104 default: /* negative explicit process group */ 1105 error = killpg1(td, uap->signum, -uap->pid, 0); 1106 break; 1107 } 1108 } 1109 mtx_unlock(&Giant); 1110 return(error); 1111 } 1112 1113 #if defined(COMPAT_43) || defined(COMPAT_SUNOS) 1114 #ifndef _SYS_SYSPROTO_H_ 1115 struct okillpg_args { 1116 int pgid; 1117 int signum; 1118 }; 1119 #endif 1120 /* 1121 * MPSAFE 1122 */ 1123 /* ARGSUSED */ 1124 int 1125 okillpg(td, uap) 1126 struct thread *td; 1127 register struct okillpg_args *uap; 1128 { 1129 int error; 1130 1131 if ((u_int)uap->signum > _SIG_MAXSIG) 1132 return (EINVAL); 1133 mtx_lock(&Giant); 1134 error = killpg1(td, uap->signum, uap->pgid, 0); 1135 mtx_unlock(&Giant); 1136 return (error); 1137 } 1138 #endif /* COMPAT_43 || COMPAT_SUNOS */ 1139 1140 /* 1141 * Send a signal to a process group. 1142 */ 1143 void 1144 gsignal(pgid, sig) 1145 int pgid, sig; 1146 { 1147 struct pgrp *pgrp; 1148 1149 if (pgid != 0) { 1150 sx_slock(&proctree_lock); 1151 pgrp = pgfind(pgid); 1152 sx_sunlock(&proctree_lock); 1153 if (pgrp != NULL) { 1154 pgsignal(pgrp, sig, 0); 1155 PGRP_UNLOCK(pgrp); 1156 } 1157 } 1158 } 1159 1160 /* 1161 * Send a signal to a process group. If checktty is 1, 1162 * limit to members which have a controlling terminal. 1163 */ 1164 void 1165 pgsignal(pgrp, sig, checkctty) 1166 struct pgrp *pgrp; 1167 int sig, checkctty; 1168 { 1169 register struct proc *p; 1170 1171 if (pgrp) { 1172 PGRP_LOCK_ASSERT(pgrp, MA_OWNED); 1173 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 1174 PROC_LOCK(p); 1175 if (checkctty == 0 || p->p_flag & P_CONTROLT) 1176 psignal(p, sig); 1177 PROC_UNLOCK(p); 1178 } 1179 } 1180 } 1181 1182 /* 1183 * Send a signal caused by a trap to the current process. 1184 * If it will be caught immediately, deliver it with correct code. 1185 * Otherwise, post it normally. 1186 * 1187 * MPSAFE 1188 */ 1189 void 1190 trapsignal(p, sig, code) 1191 struct proc *p; 1192 register int sig; 1193 u_long code; 1194 { 1195 register struct sigacts *ps = p->p_sigacts; 1196 1197 mtx_lock(&Giant); 1198 PROC_LOCK(p); 1199 if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(p->p_sigcatch, sig) && 1200 !SIGISMEMBER(p->p_sigmask, sig)) { 1201 p->p_stats->p_ru.ru_nsignals++; 1202 #ifdef KTRACE 1203 if (KTRPOINT(p, KTR_PSIG)) 1204 ktrpsig(p->p_tracep, sig, ps->ps_sigact[_SIG_IDX(sig)], 1205 &p->p_sigmask, code); 1206 #endif 1207 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)], sig, 1208 &p->p_sigmask, code); 1209 SIGSETOR(p->p_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]); 1210 if (!SIGISMEMBER(ps->ps_signodefer, sig)) 1211 SIGADDSET(p->p_sigmask, sig); 1212 if (SIGISMEMBER(ps->ps_sigreset, sig)) { 1213 /* 1214 * See do_sigaction() for origin of this code. 1215 */ 1216 SIGDELSET(p->p_sigcatch, sig); 1217 if (sig != SIGCONT && 1218 sigprop(sig) & SA_IGNORE) 1219 SIGADDSET(p->p_sigignore, sig); 1220 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 1221 } 1222 } else { 1223 p->p_code = code; /* XXX for core dump/debugger */ 1224 p->p_sig = sig; /* XXX to verify code */ 1225 psignal(p, sig); 1226 } 1227 PROC_UNLOCK(p); 1228 mtx_unlock(&Giant); 1229 } 1230 1231 /* 1232 * Send the signal to the process. If the signal has an action, the action 1233 * is usually performed by the target process rather than the caller; we add 1234 * the signal to the set of pending signals for the process. 1235 * 1236 * Exceptions: 1237 * o When a stop signal is sent to a sleeping process that takes the 1238 * default action, the process is stopped without awakening it. 1239 * o SIGCONT restarts stopped processes (or puts them back to sleep) 1240 * regardless of the signal action (eg, blocked or ignored). 1241 * 1242 * Other ignored signals are discarded immediately. 1243 */ 1244 void 1245 psignal(p, sig) 1246 register struct proc *p; 1247 register int sig; 1248 { 1249 register int prop; 1250 register sig_t action; 1251 struct thread *td; 1252 #ifdef SMP 1253 struct ksegrp *kg; 1254 #endif 1255 1256 KASSERT(_SIG_VALID(sig), 1257 ("psignal(): invalid signal %d\n", sig)); 1258 1259 PROC_LOCK_ASSERT(p, MA_OWNED); 1260 KNOTE(&p->p_klist, NOTE_SIGNAL | sig); 1261 1262 prop = sigprop(sig); 1263 1264 /* 1265 * If proc is traced, always give parent a chance; 1266 * if signal event is tracked by procfs, give *that* 1267 * a chance, as well. 1268 */ 1269 if ((p->p_flag & P_TRACED) || (p->p_stops & S_SIG)) { 1270 action = SIG_DFL; 1271 } else { 1272 /* 1273 * If the signal is being ignored, 1274 * then we forget about it immediately. 1275 * (Note: we don't set SIGCONT in p_sigignore, 1276 * and if it is set to SIG_IGN, 1277 * action will be SIG_DFL here.) 1278 */ 1279 if (SIGISMEMBER(p->p_sigignore, sig) || (p->p_flag & P_WEXIT)) 1280 return; 1281 if (SIGISMEMBER(p->p_sigmask, sig)) 1282 action = SIG_HOLD; 1283 else if (SIGISMEMBER(p->p_sigcatch, sig)) 1284 action = SIG_CATCH; 1285 else 1286 action = SIG_DFL; 1287 } 1288 1289 /* 1290 * bring the priority of a process up if we want it to get 1291 * killed in this lifetime. 1292 * XXXKSE think if a better way to do this. 1293 * 1294 * What we need to do is see if there is a thread that will 1295 * be able to accept the signal. e.g. 1296 * FOREACH_THREAD_IN_PROC() { 1297 * if runnable, we're done 1298 * else pick one at random. 1299 * } 1300 */ 1301 /* XXXKSE 1302 * For now there is one thread per proc. 1303 * Effectively select one sucker thread.. 1304 */ 1305 td = FIRST_THREAD_IN_PROC(p); 1306 mtx_lock_spin(&sched_lock); 1307 if ((p->p_ksegrp.kg_nice > NZERO) && (action == SIG_DFL) && 1308 (prop & SA_KILL) && ((p->p_flag & P_TRACED) == 0)) 1309 p->p_ksegrp.kg_nice = NZERO; /* XXXKSE */ 1310 mtx_unlock_spin(&sched_lock); 1311 1312 if (prop & SA_CONT) 1313 SIG_STOPSIGMASK(p->p_siglist); 1314 1315 if (prop & SA_STOP) { 1316 /* 1317 * If sending a tty stop signal to a member of an orphaned 1318 * process group, discard the signal here if the action 1319 * is default; don't stop the process below if sleeping, 1320 * and don't clear any pending SIGCONT. 1321 */ 1322 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0 && 1323 action == SIG_DFL) 1324 return; 1325 SIG_CONTSIGMASK(p->p_siglist); 1326 } 1327 SIGADDSET(p->p_siglist, sig); 1328 mtx_lock_spin(&sched_lock); 1329 signotify(p); 1330 1331 /* 1332 * Defer further processing for signals which are held, 1333 * except that stopped processes must be continued by SIGCONT. 1334 */ 1335 if (action == SIG_HOLD && (!(prop & SA_CONT) || p->p_stat != SSTOP)) { 1336 mtx_unlock_spin(&sched_lock); 1337 return; 1338 } 1339 1340 switch (p->p_stat) { 1341 1342 case SSLEEP: 1343 /* 1344 * If process is sleeping uninterruptibly 1345 * we can't interrupt the sleep... the signal will 1346 * be noticed when the process returns through 1347 * trap() or syscall(). 1348 */ 1349 if ((td->td_flags & TDF_SINTR) == 0) 1350 goto out; 1351 /* 1352 * Process is sleeping and traced... make it runnable 1353 * so it can discover the signal in issignal() and stop 1354 * for the parent. 1355 */ 1356 if (p->p_flag & P_TRACED) 1357 goto run; 1358 /* 1359 * If SIGCONT is default (or ignored) and process is 1360 * asleep, we are finished; the process should not 1361 * be awakened. 1362 */ 1363 if ((prop & SA_CONT) && action == SIG_DFL) { 1364 SIGDELSET(p->p_siglist, sig); 1365 goto out; 1366 } 1367 /* 1368 * When a sleeping process receives a stop 1369 * signal, process immediately if possible. 1370 * All other (caught or default) signals 1371 * cause the process to run. 1372 */ 1373 if (prop & SA_STOP) { 1374 if (action != SIG_DFL) 1375 goto runfast; 1376 /* 1377 * If a child holding parent blocked, 1378 * stopping could cause deadlock. 1379 */ 1380 if (p->p_flag & P_PPWAIT) 1381 goto out; 1382 mtx_unlock_spin(&sched_lock); 1383 SIGDELSET(p->p_siglist, sig); 1384 p->p_xstat = sig; 1385 PROC_LOCK(p->p_pptr); 1386 if ((p->p_pptr->p_procsig->ps_flag & PS_NOCLDSTOP) == 0) 1387 psignal(p->p_pptr, SIGCHLD); 1388 PROC_UNLOCK(p->p_pptr); 1389 mtx_lock_spin(&sched_lock); 1390 stop(p); 1391 goto out; 1392 } else 1393 goto runfast; 1394 /* NOTREACHED */ 1395 1396 case SSTOP: 1397 /* 1398 * If traced process is already stopped, 1399 * then no further action is necessary. 1400 */ 1401 if (p->p_flag & P_TRACED) 1402 goto out; 1403 1404 /* 1405 * Kill signal always sets processes running. 1406 */ 1407 if (sig == SIGKILL) 1408 goto runfast; 1409 1410 if (prop & SA_CONT) { 1411 /* 1412 * If SIGCONT is default (or ignored), we continue the 1413 * process but don't leave the signal in p_siglist, as 1414 * it has no further action. If SIGCONT is held, we 1415 * continue the process and leave the signal in 1416 * p_siglist. If the process catches SIGCONT, let it 1417 * handle the signal itself. If it isn't waiting on 1418 * an event, then it goes back to run state. 1419 * Otherwise, process goes back to sleep state. 1420 */ 1421 if (action == SIG_DFL) 1422 SIGDELSET(p->p_siglist, sig); 1423 if (action == SIG_CATCH) 1424 goto runfast; 1425 /* 1426 * XXXKSE 1427 * do this for each thread. 1428 */ 1429 if (p->p_flag & P_KSES) { 1430 mtx_assert(&sched_lock, 1431 MA_OWNED | MA_NOTRECURSED); 1432 FOREACH_THREAD_IN_PROC(p, td) { 1433 if (td->td_wchan == NULL) { 1434 setrunnable(td); /* XXXKSE */ 1435 } else { 1436 /* mark it as sleeping */ 1437 } 1438 } 1439 } else { 1440 if (td->td_wchan == NULL) 1441 goto run; 1442 p->p_stat = SSLEEP; 1443 } 1444 goto out; 1445 } 1446 1447 if (prop & SA_STOP) { 1448 /* 1449 * Already stopped, don't need to stop again. 1450 * (If we did the shell could get confused.) 1451 */ 1452 SIGDELSET(p->p_siglist, sig); 1453 goto out; 1454 } 1455 1456 /* 1457 * If process is sleeping interruptibly, then simulate a 1458 * wakeup so that when it is continued, it will be made 1459 * runnable and can look at the signal. But don't make 1460 * the process runnable, leave it stopped. 1461 * XXXKSE should we wake ALL blocked threads? 1462 */ 1463 if (p->p_flag & P_KSES) { 1464 FOREACH_THREAD_IN_PROC(p, td) { 1465 if (td->td_wchan && (td->td_flags & TDF_SINTR)){ 1466 if (td->td_flags & TDF_CVWAITQ) 1467 cv_waitq_remove(td); 1468 else 1469 unsleep(td); /* XXXKSE */ 1470 } 1471 } 1472 } else { 1473 if (td->td_wchan && td->td_flags & TDF_SINTR) { 1474 if (td->td_flags & TDF_CVWAITQ) 1475 cv_waitq_remove(td); 1476 else 1477 unsleep(td); /* XXXKSE */ 1478 } 1479 } 1480 goto out; 1481 1482 default: 1483 /* 1484 * SRUN, SIDL, SZOMB do nothing with the signal, 1485 * other than kicking ourselves if we are running. 1486 * It will either never be noticed, or noticed very soon. 1487 */ 1488 if (p->p_stat == SRUN) { 1489 #ifdef SMP 1490 struct kse *ke; 1491 struct thread *td = curthread; 1492 /* we should only deliver to one thread.. but which one? */ 1493 FOREACH_KSEGRP_IN_PROC(p, kg) { 1494 FOREACH_KSE_IN_GROUP(kg, ke) { 1495 if (ke->ke_thread == td) { 1496 continue; 1497 } 1498 forward_signal(ke->ke_thread); 1499 } 1500 } 1501 #endif 1502 } 1503 goto out; 1504 } 1505 /*NOTREACHED*/ 1506 1507 runfast: 1508 /* 1509 * Raise priority to at least PUSER. 1510 * XXXKSE Should we make them all run fast? 1511 * Maybe just one would be enough? 1512 */ 1513 1514 if (FIRST_THREAD_IN_PROC(p)->td_priority > PUSER) { 1515 FIRST_THREAD_IN_PROC(p)->td_priority = PUSER; 1516 } 1517 run: 1518 /* If we jump here, sched_lock has to be owned. */ 1519 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); 1520 setrunnable(td); /* XXXKSE */ 1521 out: 1522 mtx_unlock_spin(&sched_lock); 1523 1524 /* Once we get here, sched_lock should not be owned. */ 1525 mtx_assert(&sched_lock, MA_NOTOWNED); 1526 } 1527 1528 /* 1529 * If the current process has received a signal (should be caught or cause 1530 * termination, should interrupt current syscall), return the signal number. 1531 * Stop signals with default action are processed immediately, then cleared; 1532 * they aren't returned. This is checked after each entry to the system for 1533 * a syscall or trap (though this can usually be done without calling issignal 1534 * by checking the pending signal masks in the CURSIG macro.) The normal call 1535 * sequence is 1536 * 1537 * while (sig = CURSIG(curproc)) 1538 * postsig(sig); 1539 */ 1540 int 1541 issignal(p) 1542 register struct proc *p; 1543 { 1544 sigset_t mask; 1545 register int sig, prop; 1546 1547 PROC_LOCK_ASSERT(p, MA_OWNED); 1548 for (;;) { 1549 int traced = (p->p_flag & P_TRACED) || (p->p_stops & S_SIG); 1550 1551 mask = p->p_siglist; 1552 SIGSETNAND(mask, p->p_sigmask); 1553 if (p->p_flag & P_PPWAIT) 1554 SIG_STOPSIGMASK(mask); 1555 if (!SIGNOTEMPTY(mask)) /* no signal to send */ 1556 return (0); 1557 sig = sig_ffs(&mask); 1558 prop = sigprop(sig); 1559 1560 _STOPEVENT(p, S_SIG, sig); 1561 1562 /* 1563 * We should see pending but ignored signals 1564 * only if P_TRACED was on when they were posted. 1565 */ 1566 if (SIGISMEMBER(p->p_sigignore, sig) && (traced == 0)) { 1567 SIGDELSET(p->p_siglist, sig); 1568 continue; 1569 } 1570 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) { 1571 /* 1572 * If traced, always stop, and stay 1573 * stopped until released by the parent. 1574 */ 1575 p->p_xstat = sig; 1576 PROC_LOCK(p->p_pptr); 1577 psignal(p->p_pptr, SIGCHLD); 1578 PROC_UNLOCK(p->p_pptr); 1579 do { 1580 mtx_lock_spin(&sched_lock); 1581 stop(p); 1582 PROC_UNLOCK(p); 1583 DROP_GIANT(); 1584 p->p_stats->p_ru.ru_nivcsw++; 1585 mi_switch(); 1586 mtx_unlock_spin(&sched_lock); 1587 PICKUP_GIANT(); 1588 PROC_LOCK(p); 1589 } while (!trace_req(p) 1590 && p->p_flag & P_TRACED); 1591 1592 /* 1593 * If the traced bit got turned off, go back up 1594 * to the top to rescan signals. This ensures 1595 * that p_sig* and ps_sigact are consistent. 1596 */ 1597 if ((p->p_flag & P_TRACED) == 0) 1598 continue; 1599 1600 /* 1601 * If parent wants us to take the signal, 1602 * then it will leave it in p->p_xstat; 1603 * otherwise we just look for signals again. 1604 */ 1605 SIGDELSET(p->p_siglist, sig); /* clear old signal */ 1606 sig = p->p_xstat; 1607 if (sig == 0) 1608 continue; 1609 1610 /* 1611 * Put the new signal into p_siglist. If the 1612 * signal is being masked, look for other signals. 1613 */ 1614 SIGADDSET(p->p_siglist, sig); 1615 if (SIGISMEMBER(p->p_sigmask, sig)) 1616 continue; 1617 } 1618 1619 /* 1620 * Decide whether the signal should be returned. 1621 * Return the signal's number, or fall through 1622 * to clear it from the pending mask. 1623 */ 1624 switch ((int)(intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) { 1625 1626 case (int)SIG_DFL: 1627 /* 1628 * Don't take default actions on system processes. 1629 */ 1630 if (p->p_pid <= 1) { 1631 #ifdef DIAGNOSTIC 1632 /* 1633 * Are you sure you want to ignore SIGSEGV 1634 * in init? XXX 1635 */ 1636 printf("Process (pid %lu) got signal %d\n", 1637 (u_long)p->p_pid, sig); 1638 #endif 1639 break; /* == ignore */ 1640 } 1641 /* 1642 * If there is a pending stop signal to process 1643 * with default action, stop here, 1644 * then clear the signal. However, 1645 * if process is member of an orphaned 1646 * process group, ignore tty stop signals. 1647 */ 1648 if (prop & SA_STOP) { 1649 if (p->p_flag & P_TRACED || 1650 (p->p_pgrp->pg_jobc == 0 && 1651 prop & SA_TTYSTOP)) 1652 break; /* == ignore */ 1653 p->p_xstat = sig; 1654 PROC_LOCK(p->p_pptr); 1655 if ((p->p_pptr->p_procsig->ps_flag & PS_NOCLDSTOP) == 0) 1656 psignal(p->p_pptr, SIGCHLD); 1657 PROC_UNLOCK(p->p_pptr); 1658 mtx_lock_spin(&sched_lock); 1659 stop(p); 1660 PROC_UNLOCK(p); 1661 DROP_GIANT(); 1662 p->p_stats->p_ru.ru_nivcsw++; 1663 mi_switch(); 1664 mtx_unlock_spin(&sched_lock); 1665 PICKUP_GIANT(); 1666 PROC_LOCK(p); 1667 break; 1668 } else if (prop & SA_IGNORE) { 1669 /* 1670 * Except for SIGCONT, shouldn't get here. 1671 * Default action is to ignore; drop it. 1672 */ 1673 break; /* == ignore */ 1674 } else 1675 return (sig); 1676 /*NOTREACHED*/ 1677 1678 case (int)SIG_IGN: 1679 /* 1680 * Masking above should prevent us ever trying 1681 * to take action on an ignored signal other 1682 * than SIGCONT, unless process is traced. 1683 */ 1684 if ((prop & SA_CONT) == 0 && 1685 (p->p_flag & P_TRACED) == 0) 1686 printf("issignal\n"); 1687 break; /* == ignore */ 1688 1689 default: 1690 /* 1691 * This signal has an action, let 1692 * postsig() process it. 1693 */ 1694 return (sig); 1695 } 1696 SIGDELSET(p->p_siglist, sig); /* take the signal! */ 1697 } 1698 /* NOTREACHED */ 1699 } 1700 1701 /* 1702 * Put the argument process into the stopped state and notify the parent 1703 * via wakeup. Signals are handled elsewhere. The process must not be 1704 * on the run queue. Must be called with the proc p locked and the scheduler 1705 * lock held. 1706 */ 1707 static void 1708 stop(p) 1709 register struct proc *p; 1710 { 1711 1712 PROC_LOCK_ASSERT(p, MA_OWNED); 1713 mtx_assert(&sched_lock, MA_OWNED); 1714 p->p_stat = SSTOP; 1715 p->p_flag &= ~P_WAITED; 1716 wakeup((caddr_t)p->p_pptr); 1717 } 1718 1719 /* 1720 * Take the action for the specified signal 1721 * from the current set of pending signals. 1722 */ 1723 void 1724 postsig(sig) 1725 register int sig; 1726 { 1727 struct thread *td = curthread; 1728 register struct proc *p = td->td_proc; 1729 struct sigacts *ps; 1730 sig_t action; 1731 sigset_t returnmask; 1732 int code; 1733 1734 KASSERT(sig != 0, ("postsig")); 1735 1736 PROC_LOCK_ASSERT(p, MA_OWNED); 1737 ps = p->p_sigacts; 1738 SIGDELSET(p->p_siglist, sig); 1739 action = ps->ps_sigact[_SIG_IDX(sig)]; 1740 #ifdef KTRACE 1741 if (KTRPOINT(p, KTR_PSIG)) 1742 ktrpsig(p->p_tracep, sig, action, p->p_flag & P_OLDMASK ? 1743 &p->p_oldsigmask : &p->p_sigmask, 0); 1744 #endif 1745 _STOPEVENT(p, S_SIG, sig); 1746 1747 if (action == SIG_DFL) { 1748 /* 1749 * Default action, where the default is to kill 1750 * the process. (Other cases were ignored above.) 1751 */ 1752 sigexit(td, sig); 1753 /* NOTREACHED */ 1754 } else { 1755 /* 1756 * If we get here, the signal must be caught. 1757 */ 1758 KASSERT(action != SIG_IGN && !SIGISMEMBER(p->p_sigmask, sig), 1759 ("postsig action")); 1760 /* 1761 * Set the new mask value and also defer further 1762 * occurrences of this signal. 1763 * 1764 * Special case: user has done a sigsuspend. Here the 1765 * current mask is not of interest, but rather the 1766 * mask from before the sigsuspend is what we want 1767 * restored after the signal processing is completed. 1768 */ 1769 if (p->p_flag & P_OLDMASK) { 1770 returnmask = p->p_oldsigmask; 1771 p->p_flag &= ~P_OLDMASK; 1772 } else 1773 returnmask = p->p_sigmask; 1774 1775 SIGSETOR(p->p_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]); 1776 if (!SIGISMEMBER(ps->ps_signodefer, sig)) 1777 SIGADDSET(p->p_sigmask, sig); 1778 1779 if (SIGISMEMBER(ps->ps_sigreset, sig)) { 1780 /* 1781 * See do_sigaction() for origin of this code. 1782 */ 1783 SIGDELSET(p->p_sigcatch, sig); 1784 if (sig != SIGCONT && 1785 sigprop(sig) & SA_IGNORE) 1786 SIGADDSET(p->p_sigignore, sig); 1787 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 1788 } 1789 p->p_stats->p_ru.ru_nsignals++; 1790 if (p->p_sig != sig) { 1791 code = 0; 1792 } else { 1793 code = p->p_code; 1794 p->p_code = 0; 1795 p->p_sig = 0; 1796 } 1797 (*p->p_sysent->sv_sendsig)(action, sig, &returnmask, code); 1798 } 1799 } 1800 1801 /* 1802 * Kill the current process for stated reason. 1803 */ 1804 void 1805 killproc(p, why) 1806 struct proc *p; 1807 char *why; 1808 { 1809 1810 PROC_LOCK_ASSERT(p, MA_OWNED); 1811 CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)", 1812 p, p->p_pid, p->p_comm); 1813 log(LOG_ERR, "pid %d (%s), uid %d, was killed: %s\n", p->p_pid, p->p_comm, 1814 p->p_ucred ? p->p_ucred->cr_uid : -1, why); 1815 psignal(p, SIGKILL); 1816 } 1817 1818 /* 1819 * Force the current process to exit with the specified signal, dumping core 1820 * if appropriate. We bypass the normal tests for masked and caught signals, 1821 * allowing unrecoverable failures to terminate the process without changing 1822 * signal state. Mark the accounting record with the signal termination. 1823 * If dumping core, save the signal number for the debugger. Calls exit and 1824 * does not return. 1825 */ 1826 void 1827 sigexit(td, sig) 1828 struct thread *td; 1829 int sig; 1830 { 1831 struct proc *p = td->td_proc; 1832 1833 PROC_LOCK_ASSERT(p, MA_OWNED); 1834 p->p_acflag |= AXSIG; 1835 if (sigprop(sig) & SA_CORE) { 1836 p->p_sig = sig; 1837 /* 1838 * Log signals which would cause core dumps 1839 * (Log as LOG_INFO to appease those who don't want 1840 * these messages.) 1841 * XXX : Todo, as well as euid, write out ruid too 1842 */ 1843 PROC_UNLOCK(p); 1844 if (!mtx_owned(&Giant)) 1845 mtx_lock(&Giant); 1846 if (coredump(td) == 0) 1847 sig |= WCOREFLAG; 1848 if (kern_logsigexit) 1849 log(LOG_INFO, 1850 "pid %d (%s), uid %d: exited on signal %d%s\n", 1851 p->p_pid, p->p_comm, 1852 td->td_ucred ? td->td_ucred->cr_uid : -1, 1853 sig &~ WCOREFLAG, 1854 sig & WCOREFLAG ? " (core dumped)" : ""); 1855 } else { 1856 PROC_UNLOCK(p); 1857 if (!mtx_owned(&Giant)) 1858 mtx_lock(&Giant); 1859 } 1860 exit1(td, W_EXITCODE(0, sig)); 1861 /* NOTREACHED */ 1862 } 1863 1864 static char corefilename[MAXPATHLEN+1] = {"%N.core"}; 1865 SYSCTL_STRING(_kern, OID_AUTO, corefile, CTLFLAG_RW, corefilename, 1866 sizeof(corefilename), "process corefile name format string"); 1867 1868 /* 1869 * expand_name(name, uid, pid) 1870 * Expand the name described in corefilename, using name, uid, and pid. 1871 * corefilename is a printf-like string, with three format specifiers: 1872 * %N name of process ("name") 1873 * %P process id (pid) 1874 * %U user id (uid) 1875 * For example, "%N.core" is the default; they can be disabled completely 1876 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P". 1877 * This is controlled by the sysctl variable kern.corefile (see above). 1878 */ 1879 1880 static char * 1881 expand_name(name, uid, pid) 1882 const char *name; uid_t uid; pid_t pid; { 1883 char *temp; 1884 char buf[11]; /* Buffer for pid/uid -- max 4B */ 1885 int i, n; 1886 char *format = corefilename; 1887 size_t namelen; 1888 1889 temp = malloc(MAXPATHLEN + 1, M_TEMP, M_NOWAIT); 1890 if (temp == NULL) 1891 return NULL; 1892 namelen = strlen(name); 1893 for (i = 0, n = 0; n < MAXPATHLEN && format[i]; i++) { 1894 int l; 1895 switch (format[i]) { 1896 case '%': /* Format character */ 1897 i++; 1898 switch (format[i]) { 1899 case '%': 1900 temp[n++] = '%'; 1901 break; 1902 case 'N': /* process name */ 1903 if ((n + namelen) > MAXPATHLEN) { 1904 log(LOG_ERR, "pid %d (%s), uid (%u): Path `%s%s' is too long\n", 1905 pid, name, uid, temp, name); 1906 free(temp, M_TEMP); 1907 return NULL; 1908 } 1909 memcpy(temp+n, name, namelen); 1910 n += namelen; 1911 break; 1912 case 'P': /* process id */ 1913 l = sprintf(buf, "%u", pid); 1914 if ((n + l) > MAXPATHLEN) { 1915 log(LOG_ERR, "pid %d (%s), uid (%u): Path `%s%s' is too long\n", 1916 pid, name, uid, temp, name); 1917 free(temp, M_TEMP); 1918 return NULL; 1919 } 1920 memcpy(temp+n, buf, l); 1921 n += l; 1922 break; 1923 case 'U': /* user id */ 1924 l = sprintf(buf, "%u", uid); 1925 if ((n + l) > MAXPATHLEN) { 1926 log(LOG_ERR, "pid %d (%s), uid (%u): Path `%s%s' is too long\n", 1927 pid, name, uid, temp, name); 1928 free(temp, M_TEMP); 1929 return NULL; 1930 } 1931 memcpy(temp+n, buf, l); 1932 n += l; 1933 break; 1934 default: 1935 log(LOG_ERR, "Unknown format character %c in `%s'\n", format[i], format); 1936 } 1937 break; 1938 default: 1939 temp[n++] = format[i]; 1940 } 1941 } 1942 temp[n] = '\0'; 1943 return temp; 1944 } 1945 1946 /* 1947 * Dump a process' core. The main routine does some 1948 * policy checking, and creates the name of the coredump; 1949 * then it passes on a vnode and a size limit to the process-specific 1950 * coredump routine if there is one; if there _is not_ one, it returns 1951 * ENOSYS; otherwise it returns the error from the process-specific routine. 1952 * 1953 * XXX: VOP_GETATTR() here requires holding the vnode lock. 1954 */ 1955 1956 static int 1957 coredump(struct thread *td) 1958 { 1959 struct proc *p = td->td_proc; 1960 register struct vnode *vp; 1961 register struct ucred *cred = td->td_ucred; 1962 struct flock lf; 1963 struct nameidata nd; 1964 struct vattr vattr; 1965 int error, error1, flags; 1966 struct mount *mp; 1967 char *name; /* name of corefile */ 1968 off_t limit; 1969 1970 PROC_LOCK(p); 1971 _STOPEVENT(p, S_CORE, 0); 1972 1973 if (((sugid_coredump == 0) && p->p_flag & P_SUGID) || do_coredump == 0) { 1974 PROC_UNLOCK(p); 1975 return (EFAULT); 1976 } 1977 1978 /* 1979 * Note that the bulk of limit checking is done after 1980 * the corefile is created. The exception is if the limit 1981 * for corefiles is 0, in which case we don't bother 1982 * creating the corefile at all. This layout means that 1983 * a corefile is truncated instead of not being created, 1984 * if it is larger than the limit. 1985 */ 1986 limit = p->p_rlimit[RLIMIT_CORE].rlim_cur; 1987 if (limit == 0) { 1988 PROC_UNLOCK(p); 1989 return 0; 1990 } 1991 PROC_UNLOCK(p); 1992 1993 restart: 1994 name = expand_name(p->p_comm, td->td_ucred->cr_uid, p->p_pid); 1995 if (name == NULL) 1996 return (EINVAL); 1997 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td); /* XXXKSE */ 1998 flags = O_CREAT | FWRITE | O_NOFOLLOW; 1999 error = vn_open(&nd, &flags, S_IRUSR | S_IWUSR); 2000 free(name, M_TEMP); 2001 if (error) 2002 return (error); 2003 NDFREE(&nd, NDF_ONLY_PNBUF); 2004 vp = nd.ni_vp; 2005 2006 VOP_UNLOCK(vp, 0, td); 2007 lf.l_whence = SEEK_SET; 2008 lf.l_start = 0; 2009 lf.l_len = 0; 2010 lf.l_type = F_WRLCK; 2011 error = VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK); 2012 if (error) 2013 goto out2; 2014 2015 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2016 lf.l_type = F_UNLCK; 2017 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK); 2018 if ((error = vn_close(vp, FWRITE, cred, td)) != 0) 2019 return (error); 2020 if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0) 2021 return (error); 2022 goto restart; 2023 } 2024 2025 /* Don't dump to non-regular files or files with links. */ 2026 if (vp->v_type != VREG || 2027 VOP_GETATTR(vp, &vattr, cred, td) || vattr.va_nlink != 1) { 2028 error = EFAULT; 2029 goto out1; 2030 } 2031 VATTR_NULL(&vattr); 2032 vattr.va_size = 0; 2033 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 2034 VOP_LEASE(vp, td, cred, LEASE_WRITE); 2035 VOP_SETATTR(vp, &vattr, cred, td); 2036 VOP_UNLOCK(vp, 0, td); 2037 PROC_LOCK(p); 2038 p->p_acflag |= ACORE; 2039 PROC_UNLOCK(p); 2040 2041 error = p->p_sysent->sv_coredump ? 2042 p->p_sysent->sv_coredump(td, vp, limit) : 2043 ENOSYS; 2044 2045 out1: 2046 lf.l_type = F_UNLCK; 2047 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK); 2048 vn_finished_write(mp); 2049 out2: 2050 error1 = vn_close(vp, FWRITE, cred, td); 2051 if (error == 0) 2052 error = error1; 2053 return (error); 2054 } 2055 2056 /* 2057 * Nonexistent system call-- signal process (may want to handle it). 2058 * Flag error in case process won't see signal immediately (blocked or ignored). 2059 */ 2060 #ifndef _SYS_SYSPROTO_H_ 2061 struct nosys_args { 2062 int dummy; 2063 }; 2064 #endif 2065 /* 2066 * MPSAFE 2067 */ 2068 /* ARGSUSED */ 2069 int 2070 nosys(td, args) 2071 struct thread *td; 2072 struct nosys_args *args; 2073 { 2074 struct proc *p = td->td_proc; 2075 2076 mtx_lock(&Giant); 2077 PROC_LOCK(p); 2078 psignal(p, SIGSYS); 2079 PROC_UNLOCK(p); 2080 mtx_unlock(&Giant); 2081 return (ENOSYS); 2082 } 2083 2084 /* 2085 * Send a SIGIO or SIGURG signal to a process or process group using 2086 * stored credentials rather than those of the current process. 2087 */ 2088 void 2089 pgsigio(sigiop, sig, checkctty) 2090 struct sigio **sigiop; 2091 int sig, checkctty; 2092 { 2093 struct sigio *sigio; 2094 2095 SIGIO_LOCK(); 2096 sigio = *sigiop; 2097 if (sigio == NULL) { 2098 SIGIO_UNLOCK(); 2099 return; 2100 } 2101 if (sigio->sio_pgid > 0) { 2102 PROC_LOCK(sigio->sio_proc); 2103 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred)) 2104 psignal(sigio->sio_proc, sig); 2105 PROC_UNLOCK(sigio->sio_proc); 2106 } else if (sigio->sio_pgid < 0) { 2107 struct proc *p; 2108 2109 PGRP_LOCK(sigio->sio_pgrp); 2110 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) { 2111 PROC_LOCK(p); 2112 if (CANSIGIO(sigio->sio_ucred, p->p_ucred) && 2113 (checkctty == 0 || (p->p_flag & P_CONTROLT))) 2114 psignal(p, sig); 2115 PROC_UNLOCK(p); 2116 } 2117 PGRP_UNLOCK(sigio->sio_pgrp); 2118 } 2119 SIGIO_UNLOCK(); 2120 } 2121 2122 static int 2123 filt_sigattach(struct knote *kn) 2124 { 2125 struct proc *p = curproc; 2126 2127 kn->kn_ptr.p_proc = p; 2128 kn->kn_flags |= EV_CLEAR; /* automatically set */ 2129 2130 PROC_LOCK(p); 2131 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext); 2132 PROC_UNLOCK(p); 2133 2134 return (0); 2135 } 2136 2137 static void 2138 filt_sigdetach(struct knote *kn) 2139 { 2140 struct proc *p = kn->kn_ptr.p_proc; 2141 2142 PROC_LOCK(p); 2143 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext); 2144 PROC_UNLOCK(p); 2145 } 2146 2147 /* 2148 * signal knotes are shared with proc knotes, so we apply a mask to 2149 * the hint in order to differentiate them from process hints. This 2150 * could be avoided by using a signal-specific knote list, but probably 2151 * isn't worth the trouble. 2152 */ 2153 static int 2154 filt_signal(struct knote *kn, long hint) 2155 { 2156 2157 if (hint & NOTE_SIGNAL) { 2158 hint &= ~NOTE_SIGNAL; 2159 2160 if (kn->kn_id == hint) 2161 kn->kn_data++; 2162 } 2163 return (kn->kn_data != 0); 2164 } 2165