xref: /freebsd/sys/kern/kern_sig.c (revision 71fe318b852b8dfb3e799cb12ef184750f7f8eac)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_sig.c	8.7 (Berkeley) 4/18/94
39  * $FreeBSD$
40  */
41 
42 #include "opt_compat.h"
43 #include "opt_ktrace.h"
44 
45 #include <sys/param.h>
46 #include <sys/kernel.h>
47 #include <sys/sysproto.h>
48 #include <sys/systm.h>
49 #include <sys/signalvar.h>
50 #include <sys/namei.h>
51 #include <sys/vnode.h>
52 #include <sys/event.h>
53 #include <sys/proc.h>
54 #include <sys/pioctl.h>
55 #include <sys/acct.h>
56 #include <sys/fcntl.h>
57 #include <sys/condvar.h>
58 #include <sys/lock.h>
59 #include <sys/mutex.h>
60 #include <sys/wait.h>
61 #include <sys/ktr.h>
62 #include <sys/ktrace.h>
63 #include <sys/resourcevar.h>
64 #include <sys/smp.h>
65 #include <sys/stat.h>
66 #include <sys/sx.h>
67 #include <sys/syscallsubr.h>
68 #include <sys/syslog.h>
69 #include <sys/sysent.h>
70 #include <sys/sysctl.h>
71 #include <sys/malloc.h>
72 #include <sys/unistd.h>
73 
74 #include <machine/cpu.h>
75 
76 #if !defined(COMPAT_FREEBSD4) && !defined(NO_COMPAT_FREEBSD4)
77 #error "You *really* want COMPAT_FREEBSD4 on -current for a while"
78 #endif
79 #if defined (__alpha__) && !defined(COMPAT_43)
80 #error "You *really* need COMPAT_43 on the alpha for longjmp(3)"
81 #endif
82 
83 #define	ONSIG	32		/* NSIG for osig* syscalls.  XXX. */
84 
85 static int	coredump(struct thread *);
86 static int	do_sigprocmask(struct proc *p, int how, sigset_t *set,
87 			sigset_t *oset, int old);
88 static char	*expand_name(const char *, uid_t, pid_t);
89 static int	killpg1(struct thread *td, int sig, int pgid, int all);
90 static int	sig_ffs(sigset_t *set);
91 static int	sigprop(int sig);
92 static void	stop(struct proc *);
93 static void	tdsignal(struct thread *td, int sig, sig_t action);
94 static int	filt_sigattach(struct knote *kn);
95 static void	filt_sigdetach(struct knote *kn);
96 static int	filt_signal(struct knote *kn, long hint);
97 
98 struct filterops sig_filtops =
99 	{ 0, filt_sigattach, filt_sigdetach, filt_signal };
100 
101 static int	kern_logsigexit = 1;
102 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,
103     &kern_logsigexit, 0,
104     "Log processes quitting on abnormal signals to syslog(3)");
105 
106 /*
107  * Policy -- Can ucred cr1 send SIGIO to process cr2?
108  * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG
109  * in the right situations.
110  */
111 #define CANSIGIO(cr1, cr2) \
112 	((cr1)->cr_uid == 0 || \
113 	    (cr1)->cr_ruid == (cr2)->cr_ruid || \
114 	    (cr1)->cr_uid == (cr2)->cr_ruid || \
115 	    (cr1)->cr_ruid == (cr2)->cr_uid || \
116 	    (cr1)->cr_uid == (cr2)->cr_uid)
117 
118 int sugid_coredump;
119 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RW,
120     &sugid_coredump, 0, "Enable coredumping set user/group ID processes");
121 
122 static int	do_coredump = 1;
123 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,
124 	&do_coredump, 0, "Enable/Disable coredumps");
125 
126 /*
127  * Signal properties and actions.
128  * The array below categorizes the signals and their default actions
129  * according to the following properties:
130  */
131 #define	SA_KILL		0x01		/* terminates process by default */
132 #define	SA_CORE		0x02		/* ditto and coredumps */
133 #define	SA_STOP		0x04		/* suspend process */
134 #define	SA_TTYSTOP	0x08		/* ditto, from tty */
135 #define	SA_IGNORE	0x10		/* ignore by default */
136 #define	SA_CONT		0x20		/* continue if suspended */
137 #define	SA_CANTMASK	0x40		/* non-maskable, catchable */
138 
139 static int sigproptbl[NSIG] = {
140         SA_KILL,                /* SIGHUP */
141         SA_KILL,                /* SIGINT */
142         SA_KILL|SA_CORE,        /* SIGQUIT */
143         SA_KILL|SA_CORE,        /* SIGILL */
144         SA_KILL|SA_CORE,        /* SIGTRAP */
145         SA_KILL|SA_CORE,        /* SIGABRT */
146         SA_KILL|SA_CORE,        /* SIGEMT */
147         SA_KILL|SA_CORE,        /* SIGFPE */
148         SA_KILL,                /* SIGKILL */
149         SA_KILL|SA_CORE,        /* SIGBUS */
150         SA_KILL|SA_CORE,        /* SIGSEGV */
151         SA_KILL|SA_CORE,        /* SIGSYS */
152         SA_KILL,                /* SIGPIPE */
153         SA_KILL,                /* SIGALRM */
154         SA_KILL,                /* SIGTERM */
155         SA_IGNORE,              /* SIGURG */
156         SA_STOP,                /* SIGSTOP */
157         SA_STOP|SA_TTYSTOP,     /* SIGTSTP */
158         SA_IGNORE|SA_CONT,      /* SIGCONT */
159         SA_IGNORE,              /* SIGCHLD */
160         SA_STOP|SA_TTYSTOP,     /* SIGTTIN */
161         SA_STOP|SA_TTYSTOP,     /* SIGTTOU */
162         SA_IGNORE,              /* SIGIO */
163         SA_KILL,                /* SIGXCPU */
164         SA_KILL,                /* SIGXFSZ */
165         SA_KILL,                /* SIGVTALRM */
166         SA_KILL,                /* SIGPROF */
167         SA_IGNORE,              /* SIGWINCH  */
168         SA_IGNORE,              /* SIGINFO */
169         SA_KILL,                /* SIGUSR1 */
170         SA_KILL,                /* SIGUSR2 */
171 };
172 
173 /*
174  * Determine signal that should be delivered to process p, the current
175  * process, 0 if none.  If there is a pending stop signal with default
176  * action, the process stops in issignal().
177  * XXXKSE   the check for a pending stop is not done under KSE
178  *
179  * MP SAFE.
180  */
181 int
182 cursig(struct thread *td)
183 {
184 	struct proc *p = td->td_proc;
185 
186 	PROC_LOCK_ASSERT(p, MA_OWNED);
187 	mtx_assert(&sched_lock, MA_NOTOWNED);
188 	return (SIGPENDING(p) ? issignal(td) : 0);
189 }
190 
191 /*
192  * Arrange for ast() to handle unmasked pending signals on return to user
193  * mode.  This must be called whenever a signal is added to p_siglist or
194  * unmasked in p_sigmask.
195  */
196 void
197 signotify(struct proc *p)
198 {
199 	struct kse *ke;
200 	struct ksegrp *kg;
201 
202 	PROC_LOCK_ASSERT(p, MA_OWNED);
203 	mtx_lock_spin(&sched_lock);
204 	if (SIGPENDING(p)) {
205 		p->p_sflag |= PS_NEEDSIGCHK;
206 		/* XXXKSE for now punish all KSEs */
207 		FOREACH_KSEGRP_IN_PROC(p, kg) {
208 			FOREACH_KSE_IN_GROUP(kg, ke) {
209 				ke->ke_flags |= KEF_ASTPENDING;
210 			}
211 		}
212 	}
213 	mtx_unlock_spin(&sched_lock);
214 }
215 
216 static __inline int
217 sigprop(int sig)
218 {
219 
220 	if (sig > 0 && sig < NSIG)
221 		return (sigproptbl[_SIG_IDX(sig)]);
222 	return (0);
223 }
224 
225 static __inline int
226 sig_ffs(sigset_t *set)
227 {
228 	int i;
229 
230 	for (i = 0; i < _SIG_WORDS; i++)
231 		if (set->__bits[i])
232 			return (ffs(set->__bits[i]) + (i * 32));
233 	return (0);
234 }
235 
236 /*
237  * kern_sigaction
238  * sigaction
239  * freebsd4_sigaction
240  * osigaction
241  */
242 int
243 kern_sigaction(td, sig, act, oact, flags)
244 	struct thread *td;
245 	register int sig;
246 	struct sigaction *act, *oact;
247 	int flags;
248 {
249 	register struct sigacts *ps;
250 	struct proc *p = td->td_proc;
251 
252 	if (!_SIG_VALID(sig))
253 		return (EINVAL);
254 
255 	PROC_LOCK(p);
256 	ps = p->p_sigacts;
257 	if (oact) {
258 		oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)];
259 		oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)];
260 		oact->sa_flags = 0;
261 		if (SIGISMEMBER(ps->ps_sigonstack, sig))
262 			oact->sa_flags |= SA_ONSTACK;
263 		if (!SIGISMEMBER(ps->ps_sigintr, sig))
264 			oact->sa_flags |= SA_RESTART;
265 		if (SIGISMEMBER(ps->ps_sigreset, sig))
266 			oact->sa_flags |= SA_RESETHAND;
267 		if (SIGISMEMBER(ps->ps_signodefer, sig))
268 			oact->sa_flags |= SA_NODEFER;
269 		if (SIGISMEMBER(ps->ps_siginfo, sig))
270 			oact->sa_flags |= SA_SIGINFO;
271 		if (sig == SIGCHLD && p->p_procsig->ps_flag & PS_NOCLDSTOP)
272 			oact->sa_flags |= SA_NOCLDSTOP;
273 		if (sig == SIGCHLD && p->p_procsig->ps_flag & PS_NOCLDWAIT)
274 			oact->sa_flags |= SA_NOCLDWAIT;
275 	}
276 	if (act) {
277 		if ((sig == SIGKILL || sig == SIGSTOP) &&
278 		    act->sa_handler != SIG_DFL) {
279 			PROC_UNLOCK(p);
280 			return (EINVAL);
281 		}
282 
283 		/*
284 		 * Change setting atomically.
285 		 */
286 
287 		ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask;
288 		SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]);
289 		if (act->sa_flags & SA_SIGINFO) {
290 			ps->ps_sigact[_SIG_IDX(sig)] =
291 			    (__sighandler_t *)act->sa_sigaction;
292 			SIGADDSET(ps->ps_siginfo, sig);
293 		} else {
294 			ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler;
295 			SIGDELSET(ps->ps_siginfo, sig);
296 		}
297 		if (!(act->sa_flags & SA_RESTART))
298 			SIGADDSET(ps->ps_sigintr, sig);
299 		else
300 			SIGDELSET(ps->ps_sigintr, sig);
301 		if (act->sa_flags & SA_ONSTACK)
302 			SIGADDSET(ps->ps_sigonstack, sig);
303 		else
304 			SIGDELSET(ps->ps_sigonstack, sig);
305 		if (act->sa_flags & SA_RESETHAND)
306 			SIGADDSET(ps->ps_sigreset, sig);
307 		else
308 			SIGDELSET(ps->ps_sigreset, sig);
309 		if (act->sa_flags & SA_NODEFER)
310 			SIGADDSET(ps->ps_signodefer, sig);
311 		else
312 			SIGDELSET(ps->ps_signodefer, sig);
313 #ifdef COMPAT_SUNOS
314 		if (act->sa_flags & SA_USERTRAMP)
315 			SIGADDSET(ps->ps_usertramp, sig);
316 		else
317 			SIGDELSET(ps->ps_usertramp, sig);
318 #endif
319 		if (sig == SIGCHLD) {
320 			if (act->sa_flags & SA_NOCLDSTOP)
321 				p->p_procsig->ps_flag |= PS_NOCLDSTOP;
322 			else
323 				p->p_procsig->ps_flag &= ~PS_NOCLDSTOP;
324 			if (act->sa_flags & SA_NOCLDWAIT) {
325 				/*
326 				 * Paranoia: since SA_NOCLDWAIT is implemented
327 				 * by reparenting the dying child to PID 1 (and
328 				 * trust it to reap the zombie), PID 1 itself
329 				 * is forbidden to set SA_NOCLDWAIT.
330 				 */
331 				if (p->p_pid == 1)
332 					p->p_procsig->ps_flag &= ~PS_NOCLDWAIT;
333 				else
334 					p->p_procsig->ps_flag |= PS_NOCLDWAIT;
335 			} else
336 				p->p_procsig->ps_flag &= ~PS_NOCLDWAIT;
337 			if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
338 				p->p_procsig->ps_flag |= PS_CLDSIGIGN;
339 			else
340 				p->p_procsig->ps_flag &= ~PS_CLDSIGIGN;
341 		}
342 		/*
343 		 * Set bit in p_sigignore for signals that are set to SIG_IGN,
344 		 * and for signals set to SIG_DFL where the default is to
345 		 * ignore. However, don't put SIGCONT in p_sigignore, as we
346 		 * have to restart the process.
347 		 */
348 		if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
349 		    (sigprop(sig) & SA_IGNORE &&
350 		     ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
351 			/* never to be seen again */
352 			SIGDELSET(p->p_siglist, sig);
353 			if (sig != SIGCONT)
354 				/* easier in psignal */
355 				SIGADDSET(p->p_sigignore, sig);
356 			SIGDELSET(p->p_sigcatch, sig);
357 		} else {
358 			SIGDELSET(p->p_sigignore, sig);
359 			if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)
360 				SIGDELSET(p->p_sigcatch, sig);
361 			else
362 				SIGADDSET(p->p_sigcatch, sig);
363 		}
364 #ifdef COMPAT_FREEBSD4
365 		if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
366 		    ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
367 		    (flags & KSA_FREEBSD4) == 0)
368 			SIGDELSET(ps->ps_freebsd4, sig);
369 		else
370 			SIGADDSET(ps->ps_freebsd4, sig);
371 #endif
372 #ifdef COMPAT_43
373 		if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
374 		    ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
375 		    (flags & KSA_OSIGSET) == 0)
376 			SIGDELSET(ps->ps_osigset, sig);
377 		else
378 			SIGADDSET(ps->ps_osigset, sig);
379 #endif
380 	}
381 	PROC_UNLOCK(p);
382 	return (0);
383 }
384 
385 #ifndef _SYS_SYSPROTO_H_
386 struct sigaction_args {
387 	int	sig;
388 	struct	sigaction *act;
389 	struct	sigaction *oact;
390 };
391 #endif
392 /*
393  * MPSAFE
394  */
395 /* ARGSUSED */
396 int
397 sigaction(td, uap)
398 	struct thread *td;
399 	register struct sigaction_args *uap;
400 {
401 	struct sigaction act, oact;
402 	register struct sigaction *actp, *oactp;
403 	int error;
404 
405 	mtx_lock(&Giant);
406 
407 	actp = (uap->act != NULL) ? &act : NULL;
408 	oactp = (uap->oact != NULL) ? &oact : NULL;
409 	if (actp) {
410 		error = copyin(uap->act, actp, sizeof(act));
411 		if (error)
412 			goto done2;
413 	}
414 	error = kern_sigaction(td, uap->sig, actp, oactp, 0);
415 	if (oactp && !error) {
416 		error = copyout(oactp, uap->oact, sizeof(oact));
417 	}
418 done2:
419 	mtx_unlock(&Giant);
420 	return (error);
421 }
422 
423 #ifdef COMPAT_FREEBSD4
424 #ifndef _SYS_SYSPROTO_H_
425 struct freebsd4_sigaction_args {
426 	int	sig;
427 	struct	sigaction *act;
428 	struct	sigaction *oact;
429 };
430 #endif
431 /*
432  * MPSAFE
433  */
434 /* ARGSUSED */
435 int
436 freebsd4_sigaction(td, uap)
437 	struct thread *td;
438 	register struct freebsd4_sigaction_args *uap;
439 {
440 	struct sigaction act, oact;
441 	register struct sigaction *actp, *oactp;
442 	int error;
443 
444 	mtx_lock(&Giant);
445 
446 	actp = (uap->act != NULL) ? &act : NULL;
447 	oactp = (uap->oact != NULL) ? &oact : NULL;
448 	if (actp) {
449 		error = copyin(uap->act, actp, sizeof(act));
450 		if (error)
451 			goto done2;
452 	}
453 	error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4);
454 	if (oactp && !error) {
455 		error = copyout(oactp, uap->oact, sizeof(oact));
456 	}
457 done2:
458 	mtx_unlock(&Giant);
459 	return (error);
460 }
461 #endif	/* COMAPT_FREEBSD4 */
462 
463 #ifdef COMPAT_43	/* XXX - COMPAT_FBSD3 */
464 #ifndef _SYS_SYSPROTO_H_
465 struct osigaction_args {
466 	int	signum;
467 	struct	osigaction *nsa;
468 	struct	osigaction *osa;
469 };
470 #endif
471 /*
472  * MPSAFE
473  */
474 /* ARGSUSED */
475 int
476 osigaction(td, uap)
477 	struct thread *td;
478 	register struct osigaction_args *uap;
479 {
480 	struct osigaction sa;
481 	struct sigaction nsa, osa;
482 	register struct sigaction *nsap, *osap;
483 	int error;
484 
485 	if (uap->signum <= 0 || uap->signum >= ONSIG)
486 		return (EINVAL);
487 
488 	nsap = (uap->nsa != NULL) ? &nsa : NULL;
489 	osap = (uap->osa != NULL) ? &osa : NULL;
490 
491 	mtx_lock(&Giant);
492 
493 	if (nsap) {
494 		error = copyin(uap->nsa, &sa, sizeof(sa));
495 		if (error)
496 			goto done2;
497 		nsap->sa_handler = sa.sa_handler;
498 		nsap->sa_flags = sa.sa_flags;
499 		OSIG2SIG(sa.sa_mask, nsap->sa_mask);
500 	}
501 	error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
502 	if (osap && !error) {
503 		sa.sa_handler = osap->sa_handler;
504 		sa.sa_flags = osap->sa_flags;
505 		SIG2OSIG(osap->sa_mask, sa.sa_mask);
506 		error = copyout(&sa, uap->osa, sizeof(sa));
507 	}
508 done2:
509 	mtx_unlock(&Giant);
510 	return (error);
511 }
512 
513 #if !defined(__i386__) && !defined(__alpha__)
514 /* Avoid replicating the same stub everywhere */
515 int
516 osigreturn(td, uap)
517 	struct thread *td;
518 	struct osigreturn_args *uap;
519 {
520 
521 	return (nosys(td, (struct nosys_args *)uap));
522 }
523 #endif
524 #endif /* COMPAT_43 */
525 
526 /*
527  * Initialize signal state for process 0;
528  * set to ignore signals that are ignored by default.
529  */
530 void
531 siginit(p)
532 	struct proc *p;
533 {
534 	register int i;
535 
536 	PROC_LOCK(p);
537 	for (i = 1; i <= NSIG; i++)
538 		if (sigprop(i) & SA_IGNORE && i != SIGCONT)
539 			SIGADDSET(p->p_sigignore, i);
540 	PROC_UNLOCK(p);
541 }
542 
543 /*
544  * Reset signals for an exec of the specified process.
545  */
546 void
547 execsigs(p)
548 	register struct proc *p;
549 {
550 	register struct sigacts *ps;
551 	register int sig;
552 
553 	/*
554 	 * Reset caught signals.  Held signals remain held
555 	 * through p_sigmask (unless they were caught,
556 	 * and are now ignored by default).
557 	 */
558 	PROC_LOCK_ASSERT(p, MA_OWNED);
559 	ps = p->p_sigacts;
560 	while (SIGNOTEMPTY(p->p_sigcatch)) {
561 		sig = sig_ffs(&p->p_sigcatch);
562 		SIGDELSET(p->p_sigcatch, sig);
563 		if (sigprop(sig) & SA_IGNORE) {
564 			if (sig != SIGCONT)
565 				SIGADDSET(p->p_sigignore, sig);
566 			SIGDELSET(p->p_siglist, sig);
567 		}
568 		ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
569 	}
570 	/*
571 	 * Reset stack state to the user stack.
572 	 * Clear set of signals caught on the signal stack.
573 	 */
574 	p->p_sigstk.ss_flags = SS_DISABLE;
575 	p->p_sigstk.ss_size = 0;
576 	p->p_sigstk.ss_sp = 0;
577 	p->p_flag &= ~P_ALTSTACK;
578 	/*
579 	 * Reset no zombies if child dies flag as Solaris does.
580 	 */
581 	p->p_procsig->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN);
582 	if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
583 		ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL;
584 }
585 
586 /*
587  * do_sigprocmask()
588  *
589  *	Manipulate signal mask.
590  */
591 static int
592 do_sigprocmask(p, how, set, oset, old)
593 	struct proc *p;
594 	int how;
595 	sigset_t *set, *oset;
596 	int old;
597 {
598 	int error;
599 
600 	PROC_LOCK(p);
601 	if (oset != NULL)
602 		*oset = p->p_sigmask;
603 
604 	error = 0;
605 	if (set != NULL) {
606 		switch (how) {
607 		case SIG_BLOCK:
608 			SIG_CANTMASK(*set);
609 			SIGSETOR(p->p_sigmask, *set);
610 			break;
611 		case SIG_UNBLOCK:
612 			SIGSETNAND(p->p_sigmask, *set);
613 			signotify(p);
614 			break;
615 		case SIG_SETMASK:
616 			SIG_CANTMASK(*set);
617 			if (old)
618 				SIGSETLO(p->p_sigmask, *set);
619 			else
620 				p->p_sigmask = *set;
621 			signotify(p);
622 			break;
623 		default:
624 			error = EINVAL;
625 			break;
626 		}
627 	}
628 	PROC_UNLOCK(p);
629 	return (error);
630 }
631 
632 /*
633  * sigprocmask() - MP SAFE (XXXKSE not under KSE it isn't)
634  */
635 
636 #ifndef _SYS_SYSPROTO_H_
637 struct sigprocmask_args {
638 	int	how;
639 	const sigset_t *set;
640 	sigset_t *oset;
641 };
642 #endif
643 int
644 sigprocmask(td, uap)
645 	register struct thread *td;
646 	struct sigprocmask_args *uap;
647 {
648 	struct proc *p = td->td_proc;
649 	sigset_t set, oset;
650 	sigset_t *setp, *osetp;
651 	int error;
652 
653 	setp = (uap->set != NULL) ? &set : NULL;
654 	osetp = (uap->oset != NULL) ? &oset : NULL;
655 	if (setp) {
656 		error = copyin(uap->set, setp, sizeof(set));
657 		if (error)
658 			return (error);
659 	}
660 	error = do_sigprocmask(p, uap->how, setp, osetp, 0);
661 	if (osetp && !error) {
662 		error = copyout(osetp, uap->oset, sizeof(oset));
663 	}
664 	return (error);
665 }
666 
667 #ifdef COMPAT_43	/* XXX - COMPAT_FBSD3 */
668 /*
669  * osigprocmask() - MP SAFE
670  */
671 #ifndef _SYS_SYSPROTO_H_
672 struct osigprocmask_args {
673 	int	how;
674 	osigset_t mask;
675 };
676 #endif
677 int
678 osigprocmask(td, uap)
679 	register struct thread *td;
680 	struct osigprocmask_args *uap;
681 {
682 	struct proc *p = td->td_proc;
683 	sigset_t set, oset;
684 	int error;
685 
686 	OSIG2SIG(uap->mask, set);
687 	error = do_sigprocmask(p, uap->how, &set, &oset, 1);
688 	SIG2OSIG(oset, td->td_retval[0]);
689 	return (error);
690 }
691 #endif /* COMPAT_43 */
692 
693 #ifndef _SYS_SYSPROTO_H_
694 struct sigpending_args {
695 	sigset_t	*set;
696 };
697 #endif
698 /*
699  * MPSAFE
700  */
701 /* ARGSUSED */
702 int
703 sigpending(td, uap)
704 	struct thread *td;
705 	struct sigpending_args *uap;
706 {
707 	struct proc *p = td->td_proc;
708 	sigset_t siglist;
709 	int error;
710 
711 	mtx_lock(&Giant);
712 	PROC_LOCK(p);
713 	siglist = p->p_siglist;
714 	PROC_UNLOCK(p);
715 	mtx_unlock(&Giant);
716 	error = copyout(&siglist, uap->set, sizeof(sigset_t));
717 	return(error);
718 }
719 
720 #ifdef COMPAT_43	/* XXX - COMPAT_FBSD3 */
721 #ifndef _SYS_SYSPROTO_H_
722 struct osigpending_args {
723 	int	dummy;
724 };
725 #endif
726 /*
727  * MPSAFE
728  */
729 /* ARGSUSED */
730 int
731 osigpending(td, uap)
732 	struct thread *td;
733 	struct osigpending_args *uap;
734 {
735 	struct proc *p = td->td_proc;
736 
737 	mtx_lock(&Giant);
738 	PROC_LOCK(p);
739 	SIG2OSIG(p->p_siglist, td->td_retval[0]);
740 	PROC_UNLOCK(p);
741 	mtx_unlock(&Giant);
742 	return (0);
743 }
744 #endif /* COMPAT_43 */
745 
746 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
747 /*
748  * Generalized interface signal handler, 4.3-compatible.
749  */
750 #ifndef _SYS_SYSPROTO_H_
751 struct osigvec_args {
752 	int	signum;
753 	struct	sigvec *nsv;
754 	struct	sigvec *osv;
755 };
756 #endif
757 /*
758  * MPSAFE
759  */
760 /* ARGSUSED */
761 int
762 osigvec(td, uap)
763 	struct thread *td;
764 	register struct osigvec_args *uap;
765 {
766 	struct sigvec vec;
767 	struct sigaction nsa, osa;
768 	register struct sigaction *nsap, *osap;
769 	int error;
770 
771 	if (uap->signum <= 0 || uap->signum >= ONSIG)
772 		return (EINVAL);
773 	nsap = (uap->nsv != NULL) ? &nsa : NULL;
774 	osap = (uap->osv != NULL) ? &osa : NULL;
775 	if (nsap) {
776 		error = copyin(uap->nsv, &vec, sizeof(vec));
777 		if (error)
778 			return (error);
779 		nsap->sa_handler = vec.sv_handler;
780 		OSIG2SIG(vec.sv_mask, nsap->sa_mask);
781 		nsap->sa_flags = vec.sv_flags;
782 		nsap->sa_flags ^= SA_RESTART;	/* opposite of SV_INTERRUPT */
783 #ifdef COMPAT_SUNOS
784 		nsap->sa_flags |= SA_USERTRAMP;
785 #endif
786 	}
787 	mtx_lock(&Giant);
788 	error = kern_sigaction(td, uap->signum, nsap, osap, 1);
789 	mtx_unlock(&Giant);
790 	if (osap && !error) {
791 		vec.sv_handler = osap->sa_handler;
792 		SIG2OSIG(osap->sa_mask, vec.sv_mask);
793 		vec.sv_flags = osap->sa_flags;
794 		vec.sv_flags &= ~SA_NOCLDWAIT;
795 		vec.sv_flags ^= SA_RESTART;
796 #ifdef COMPAT_SUNOS
797 		vec.sv_flags &= ~SA_NOCLDSTOP;
798 #endif
799 		error = copyout(&vec, uap->osv, sizeof(vec));
800 	}
801 	return (error);
802 }
803 
804 #ifndef _SYS_SYSPROTO_H_
805 struct osigblock_args {
806 	int	mask;
807 };
808 #endif
809 /*
810  * MPSAFE
811  */
812 int
813 osigblock(td, uap)
814 	register struct thread *td;
815 	struct osigblock_args *uap;
816 {
817 	struct proc *p = td->td_proc;
818 	sigset_t set;
819 
820 	OSIG2SIG(uap->mask, set);
821 	SIG_CANTMASK(set);
822 	mtx_lock(&Giant);
823 	PROC_LOCK(p);
824 	SIG2OSIG(p->p_sigmask, td->td_retval[0]);
825 	SIGSETOR(p->p_sigmask, set);
826 	PROC_UNLOCK(p);
827 	mtx_unlock(&Giant);
828 	return (0);
829 }
830 
831 #ifndef _SYS_SYSPROTO_H_
832 struct osigsetmask_args {
833 	int	mask;
834 };
835 #endif
836 /*
837  * MPSAFE
838  */
839 int
840 osigsetmask(td, uap)
841 	struct thread *td;
842 	struct osigsetmask_args *uap;
843 {
844 	struct proc *p = td->td_proc;
845 	sigset_t set;
846 
847 	OSIG2SIG(uap->mask, set);
848 	SIG_CANTMASK(set);
849 	mtx_lock(&Giant);
850 	PROC_LOCK(p);
851 	SIG2OSIG(p->p_sigmask, td->td_retval[0]);
852 	SIGSETLO(p->p_sigmask, set);
853 	signotify(p);
854 	PROC_UNLOCK(p);
855 	mtx_unlock(&Giant);
856 	return (0);
857 }
858 #endif /* COMPAT_43 || COMPAT_SUNOS */
859 
860 /*
861  * Suspend process until signal, providing mask to be set
862  * in the meantime.  Note nonstandard calling convention:
863  * libc stub passes mask, not pointer, to save a copyin.
864  ***** XXXKSE this doesn't make sense under KSE.
865  ***** Do we suspend the thread or all threads in the process?
866  ***** How do we suspend threads running NOW on another processor?
867  */
868 #ifndef _SYS_SYSPROTO_H_
869 struct sigsuspend_args {
870 	const sigset_t *sigmask;
871 };
872 #endif
873 /*
874  * MPSAFE
875  */
876 /* ARGSUSED */
877 int
878 sigsuspend(td, uap)
879 	struct thread *td;
880 	struct sigsuspend_args *uap;
881 {
882 	sigset_t mask;
883 	int error;
884 
885 	error = copyin(uap->sigmask, &mask, sizeof(mask));
886 	if (error)
887 		return (error);
888 	return (kern_sigsuspend(td, mask));
889 }
890 
891 int
892 kern_sigsuspend(struct thread *td, sigset_t mask)
893 {
894 	struct proc *p = td->td_proc;
895 	register struct sigacts *ps;
896 
897 	/*
898 	 * When returning from sigsuspend, we want
899 	 * the old mask to be restored after the
900 	 * signal handler has finished.  Thus, we
901 	 * save it here and mark the sigacts structure
902 	 * to indicate this.
903 	 */
904 	mtx_lock(&Giant);
905 	PROC_LOCK(p);
906 	ps = p->p_sigacts;
907 	p->p_oldsigmask = p->p_sigmask;
908 	p->p_flag |= P_OLDMASK;
909 
910 	SIG_CANTMASK(mask);
911 	p->p_sigmask = mask;
912 	signotify(p);
913 	while (msleep(ps, &p->p_mtx, PPAUSE|PCATCH, "pause", 0) == 0)
914 		/* void */;
915 	PROC_UNLOCK(p);
916 	mtx_unlock(&Giant);
917 	/* always return EINTR rather than ERESTART... */
918 	return (EINTR);
919 }
920 
921 #ifdef COMPAT_43	/* XXX - COMPAT_FBSD3 */
922 #ifndef _SYS_SYSPROTO_H_
923 struct osigsuspend_args {
924 	osigset_t mask;
925 };
926 #endif
927 /*
928  * MPSAFE
929  */
930 /* ARGSUSED */
931 int
932 osigsuspend(td, uap)
933 	struct thread *td;
934 	struct osigsuspend_args *uap;
935 {
936 	struct proc *p = td->td_proc;
937 	sigset_t mask;
938 	register struct sigacts *ps;
939 
940 	mtx_lock(&Giant);
941 	PROC_LOCK(p);
942 	ps = p->p_sigacts;
943 	p->p_oldsigmask = p->p_sigmask;
944 	p->p_flag |= P_OLDMASK;
945 	OSIG2SIG(uap->mask, mask);
946 	SIG_CANTMASK(mask);
947 	SIGSETLO(p->p_sigmask, mask);
948 	signotify(p);
949 	while (msleep(ps, &p->p_mtx, PPAUSE|PCATCH, "opause", 0) == 0)
950 		/* void */;
951 	PROC_UNLOCK(p);
952 	mtx_unlock(&Giant);
953 	/* always return EINTR rather than ERESTART... */
954 	return (EINTR);
955 }
956 #endif /* COMPAT_43 */
957 
958 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
959 #ifndef _SYS_SYSPROTO_H_
960 struct osigstack_args {
961 	struct	sigstack *nss;
962 	struct	sigstack *oss;
963 };
964 #endif
965 /*
966  * MPSAFE
967  */
968 /* ARGSUSED */
969 int
970 osigstack(td, uap)
971 	struct thread *td;
972 	register struct osigstack_args *uap;
973 {
974 	struct proc *p = td->td_proc;
975 	struct sigstack ss;
976 	int error = 0;
977 
978 	mtx_lock(&Giant);
979 
980 	if (uap->oss != NULL) {
981 		PROC_LOCK(p);
982 		ss.ss_sp = p->p_sigstk.ss_sp;
983 		ss.ss_onstack = sigonstack(cpu_getstack(td));
984 		PROC_UNLOCK(p);
985 		error = copyout(&ss, uap->oss, sizeof(struct sigstack));
986 		if (error)
987 			goto done2;
988 	}
989 
990 	if (uap->nss != NULL) {
991 		if ((error = copyin(uap->nss, &ss, sizeof(ss))) != 0)
992 			goto done2;
993 		PROC_LOCK(p);
994 		p->p_sigstk.ss_sp = ss.ss_sp;
995 		p->p_sigstk.ss_size = 0;
996 		p->p_sigstk.ss_flags |= ss.ss_onstack & SS_ONSTACK;
997 		p->p_flag |= P_ALTSTACK;
998 		PROC_UNLOCK(p);
999 	}
1000 done2:
1001 	mtx_unlock(&Giant);
1002 	return (error);
1003 }
1004 #endif /* COMPAT_43 || COMPAT_SUNOS */
1005 
1006 #ifndef _SYS_SYSPROTO_H_
1007 struct sigaltstack_args {
1008 	stack_t	*ss;
1009 	stack_t	*oss;
1010 };
1011 #endif
1012 /*
1013  * MPSAFE
1014  */
1015 /* ARGSUSED */
1016 int
1017 sigaltstack(td, uap)
1018 	struct thread *td;
1019 	register struct sigaltstack_args *uap;
1020 {
1021 	stack_t ss, oss;
1022 	int error;
1023 
1024 	if (uap->ss != NULL) {
1025 		error = copyin(uap->ss, &ss, sizeof(ss));
1026 		if (error)
1027 			return (error);
1028 	}
1029 	error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL,
1030 	    (uap->oss != NULL) ? &oss : NULL);
1031 	if (error)
1032 		return (error);
1033 	if (uap->oss != NULL)
1034 		error = copyout(&oss, uap->oss, sizeof(stack_t));
1035 	return (error);
1036 }
1037 
1038 int
1039 kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss)
1040 {
1041 	struct proc *p = td->td_proc;
1042 	int oonstack;
1043 	int error = 0;
1044 
1045 	mtx_lock(&Giant);
1046 
1047 	oonstack = sigonstack(cpu_getstack(td));
1048 
1049 	if (oss != NULL) {
1050 		PROC_LOCK(p);
1051 		*oss = p->p_sigstk;
1052 		oss->ss_flags = (p->p_flag & P_ALTSTACK)
1053 		    ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
1054 		PROC_UNLOCK(p);
1055 	}
1056 
1057 	if (ss != NULL) {
1058 		if (oonstack) {
1059 			error = EPERM;
1060 			goto done2;
1061 		}
1062 		if ((ss->ss_flags & ~SS_DISABLE) != 0) {
1063 			error = EINVAL;
1064 			goto done2;
1065 		}
1066 		if (!(ss->ss_flags & SS_DISABLE)) {
1067 			if (ss->ss_size < p->p_sysent->sv_minsigstksz) {
1068 				error = ENOMEM;
1069 				goto done2;
1070 			}
1071 			PROC_LOCK(p);
1072 			p->p_sigstk = *ss;
1073 			p->p_flag |= P_ALTSTACK;
1074 			PROC_UNLOCK(p);
1075 		} else {
1076 			PROC_LOCK(p);
1077 			p->p_flag &= ~P_ALTSTACK;
1078 			PROC_UNLOCK(p);
1079 		}
1080 	}
1081 done2:
1082 	mtx_unlock(&Giant);
1083 	return (error);
1084 }
1085 
1086 /*
1087  * Common code for kill process group/broadcast kill.
1088  * cp is calling process.
1089  */
1090 static int
1091 killpg1(td, sig, pgid, all)
1092 	register struct thread *td;
1093 	int sig, pgid, all;
1094 {
1095 	register struct proc *p;
1096 	struct pgrp *pgrp;
1097 	int nfound = 0;
1098 
1099 	if (all) {
1100 		/*
1101 		 * broadcast
1102 		 */
1103 		sx_slock(&allproc_lock);
1104 		LIST_FOREACH(p, &allproc, p_list) {
1105 			PROC_LOCK(p);
1106 			if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1107 			    p == td->td_proc) {
1108 				PROC_UNLOCK(p);
1109 				continue;
1110 			}
1111 			if (p_cansignal(td, p, sig) == 0) {
1112 				nfound++;
1113 				if (sig)
1114 					psignal(p, sig);
1115 			}
1116 			PROC_UNLOCK(p);
1117 		}
1118 		sx_sunlock(&allproc_lock);
1119 	} else {
1120 		sx_slock(&proctree_lock);
1121 		if (pgid == 0) {
1122 			/*
1123 			 * zero pgid means send to my process group.
1124 			 */
1125 			pgrp = td->td_proc->p_pgrp;
1126 			PGRP_LOCK(pgrp);
1127 		} else {
1128 			pgrp = pgfind(pgid);
1129 			if (pgrp == NULL) {
1130 				sx_sunlock(&proctree_lock);
1131 				return (ESRCH);
1132 			}
1133 		}
1134 		sx_sunlock(&proctree_lock);
1135 		LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1136 			PROC_LOCK(p);
1137 			if (p->p_pid <= 1 || p->p_flag & P_SYSTEM) {
1138 				PROC_UNLOCK(p);
1139 				continue;
1140 			}
1141 			if (p->p_state == PRS_ZOMBIE) {
1142 				PROC_UNLOCK(p);
1143 				continue;
1144 			}
1145 			if (p_cansignal(td, p, sig) == 0) {
1146 				nfound++;
1147 				if (sig)
1148 					psignal(p, sig);
1149 			}
1150 			PROC_UNLOCK(p);
1151 		}
1152 		PGRP_UNLOCK(pgrp);
1153 	}
1154 	return (nfound ? 0 : ESRCH);
1155 }
1156 
1157 #ifndef _SYS_SYSPROTO_H_
1158 struct kill_args {
1159 	int	pid;
1160 	int	signum;
1161 };
1162 #endif
1163 /*
1164  * MPSAFE
1165  */
1166 /* ARGSUSED */
1167 int
1168 kill(td, uap)
1169 	register struct thread *td;
1170 	register struct kill_args *uap;
1171 {
1172 	register struct proc *p;
1173 	int error = 0;
1174 
1175 	if ((u_int)uap->signum > _SIG_MAXSIG)
1176 		return (EINVAL);
1177 
1178 	mtx_lock(&Giant);
1179 	if (uap->pid > 0) {
1180 		/* kill single process */
1181 		if ((p = pfind(uap->pid)) == NULL) {
1182 			error = ESRCH;
1183 		} else if ((error = p_cansignal(td, p, uap->signum)) != 0) {
1184 			PROC_UNLOCK(p);
1185 		} else {
1186 			if (uap->signum)
1187 				psignal(p, uap->signum);
1188 			PROC_UNLOCK(p);
1189 			error = 0;
1190 		}
1191 	} else {
1192 		switch (uap->pid) {
1193 		case -1:		/* broadcast signal */
1194 			error = killpg1(td, uap->signum, 0, 1);
1195 			break;
1196 		case 0:			/* signal own process group */
1197 			error = killpg1(td, uap->signum, 0, 0);
1198 			break;
1199 		default:		/* negative explicit process group */
1200 			error = killpg1(td, uap->signum, -uap->pid, 0);
1201 			break;
1202 		}
1203 	}
1204 	mtx_unlock(&Giant);
1205 	return(error);
1206 }
1207 
1208 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
1209 #ifndef _SYS_SYSPROTO_H_
1210 struct okillpg_args {
1211 	int	pgid;
1212 	int	signum;
1213 };
1214 #endif
1215 /*
1216  * MPSAFE
1217  */
1218 /* ARGSUSED */
1219 int
1220 okillpg(td, uap)
1221 	struct thread *td;
1222 	register struct okillpg_args *uap;
1223 {
1224 	int error;
1225 
1226 	if ((u_int)uap->signum > _SIG_MAXSIG)
1227 		return (EINVAL);
1228 	mtx_lock(&Giant);
1229 	error = killpg1(td, uap->signum, uap->pgid, 0);
1230 	mtx_unlock(&Giant);
1231 	return (error);
1232 }
1233 #endif /* COMPAT_43 || COMPAT_SUNOS */
1234 
1235 /*
1236  * Send a signal to a process group.
1237  */
1238 void
1239 gsignal(pgid, sig)
1240 	int pgid, sig;
1241 {
1242 	struct pgrp *pgrp;
1243 
1244 	if (pgid != 0) {
1245 		sx_slock(&proctree_lock);
1246 		pgrp = pgfind(pgid);
1247 		sx_sunlock(&proctree_lock);
1248 		if (pgrp != NULL) {
1249 			pgsignal(pgrp, sig, 0);
1250 			PGRP_UNLOCK(pgrp);
1251 		}
1252 	}
1253 }
1254 
1255 /*
1256  * Send a signal to a process group.  If checktty is 1,
1257  * limit to members which have a controlling terminal.
1258  */
1259 void
1260 pgsignal(pgrp, sig, checkctty)
1261 	struct pgrp *pgrp;
1262 	int sig, checkctty;
1263 {
1264 	register struct proc *p;
1265 
1266 	if (pgrp) {
1267 		PGRP_LOCK_ASSERT(pgrp, MA_OWNED);
1268 		LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1269 			PROC_LOCK(p);
1270 			if (checkctty == 0 || p->p_flag & P_CONTROLT)
1271 				psignal(p, sig);
1272 			PROC_UNLOCK(p);
1273 		}
1274 	}
1275 }
1276 
1277 /*
1278  * Send a signal caused by a trap to the current process.
1279  * If it will be caught immediately, deliver it with correct code.
1280  * Otherwise, post it normally.
1281  *
1282  * MPSAFE
1283  */
1284 void
1285 trapsignal(p, sig, code)
1286 	struct proc *p;
1287 	register int sig;
1288 	u_long code;
1289 {
1290 	register struct sigacts *ps = p->p_sigacts;
1291 
1292 	PROC_LOCK(p);
1293 	if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(p->p_sigcatch, sig) &&
1294 	    !SIGISMEMBER(p->p_sigmask, sig)) {
1295 		p->p_stats->p_ru.ru_nsignals++;
1296 #ifdef KTRACE
1297 		if (KTRPOINT(curthread, KTR_PSIG))
1298 			ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
1299 			    &p->p_sigmask, code);
1300 #endif
1301 		(*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)], sig,
1302 						&p->p_sigmask, code);
1303 		SIGSETOR(p->p_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
1304 		if (!SIGISMEMBER(ps->ps_signodefer, sig))
1305 			SIGADDSET(p->p_sigmask, sig);
1306 		if (SIGISMEMBER(ps->ps_sigreset, sig)) {
1307 			/*
1308 			 * See kern_sigaction() for origin of this code.
1309 			 */
1310 			SIGDELSET(p->p_sigcatch, sig);
1311 			if (sig != SIGCONT &&
1312 			    sigprop(sig) & SA_IGNORE)
1313 				SIGADDSET(p->p_sigignore, sig);
1314 			ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
1315 		}
1316 	} else {
1317 		p->p_code = code;	/* XXX for core dump/debugger */
1318 		p->p_sig = sig;		/* XXX to verify code */
1319 		psignal(p, sig);
1320 	}
1321 	PROC_UNLOCK(p);
1322 }
1323 
1324 /*
1325  * Send the signal to the process.  If the signal has an action, the action
1326  * is usually performed by the target process rather than the caller; we add
1327  * the signal to the set of pending signals for the process.
1328  *
1329  * Exceptions:
1330  *   o When a stop signal is sent to a sleeping process that takes the
1331  *     default action, the process is stopped without awakening it.
1332  *   o SIGCONT restarts stopped processes (or puts them back to sleep)
1333  *     regardless of the signal action (eg, blocked or ignored).
1334  *
1335  * Other ignored signals are discarded immediately.
1336  */
1337 void
1338 psignal(p, sig)
1339 	register struct proc *p;
1340 	register int sig;
1341 {
1342 	register sig_t action;
1343 	struct thread *td;
1344 	register int prop;
1345 
1346 
1347 	KASSERT(_SIG_VALID(sig),
1348 	    ("psignal(): invalid signal %d\n", sig));
1349 
1350 	PROC_LOCK_ASSERT(p, MA_OWNED);
1351 	KNOTE(&p->p_klist, NOTE_SIGNAL | sig);
1352 
1353 	prop = sigprop(sig);
1354 	/*
1355 	 * If proc is traced, always give parent a chance;
1356 	 * if signal event is tracked by procfs, give *that*
1357 	 * a chance, as well.
1358 	 */
1359 	if ((p->p_flag & P_TRACED) || (p->p_stops & S_SIG)) {
1360 		action = SIG_DFL;
1361 	} else {
1362 		/*
1363 		 * If the signal is being ignored,
1364 		 * then we forget about it immediately.
1365 		 * (Note: we don't set SIGCONT in p_sigignore,
1366 		 * and if it is set to SIG_IGN,
1367 		 * action will be SIG_DFL here.)
1368 		 */
1369 		if (SIGISMEMBER(p->p_sigignore, sig) || (p->p_flag & P_WEXIT))
1370 			return;
1371 		if (SIGISMEMBER(p->p_sigmask, sig))
1372 			action = SIG_HOLD;
1373 		else if (SIGISMEMBER(p->p_sigcatch, sig))
1374 			action = SIG_CATCH;
1375 		else
1376 			action = SIG_DFL;
1377 	}
1378 
1379 	if (prop & SA_CONT)
1380 		SIG_STOPSIGMASK(p->p_siglist);
1381 
1382 	if (prop & SA_STOP) {
1383 		/*
1384 		 * If sending a tty stop signal to a member of an orphaned
1385 		 * process group, discard the signal here if the action
1386 		 * is default; don't stop the process below if sleeping,
1387 		 * and don't clear any pending SIGCONT.
1388 		 */
1389 		if ((prop & SA_TTYSTOP) &&
1390 		    (p->p_pgrp->pg_jobc == 0) &&
1391 		    (action == SIG_DFL))
1392 		        return;
1393 		SIG_CONTSIGMASK(p->p_siglist);
1394 		p->p_flag &= ~P_CONTINUED;
1395 	}
1396 	SIGADDSET(p->p_siglist, sig);
1397 	signotify(p);			/* uses schedlock */
1398 
1399 	/*
1400 	 * Some signals have a process-wide effect and a per-thread
1401 	 * component.  Most processing occurs when the process next
1402 	 * tries to cross the user boundary, however there are some
1403 	 * times when processing needs to be done immediatly, such as
1404 	 * waking up threads so that they can cross the user boundary.
1405 	 * We try do the per-process part here.
1406 	 */
1407 	if (P_SHOULDSTOP(p)) {
1408 		/*
1409 		 * The process is in stopped mode. All the threads should be
1410 		 * either winding down or already on the suspended queue.
1411 		 */
1412 		if (p->p_flag & P_TRACED) {
1413 			/*
1414 			 * The traced process is already stopped,
1415 			 * so no further action is necessary.
1416 			 * No signal can restart us.
1417 			 */
1418 			goto out;
1419 		}
1420 
1421 		if (sig == SIGKILL) {
1422 			/*
1423 			 * SIGKILL sets process running.
1424 			 * It will die elsewhere.
1425 			 * All threads must be restarted.
1426 			 */
1427 			p->p_flag &= ~P_STOPPED;
1428 			goto runfast;
1429 		}
1430 
1431 		if (prop & SA_CONT) {
1432 			/*
1433 			 * If SIGCONT is default (or ignored), we continue the
1434 			 * process but don't leave the signal in p_siglist as
1435 			 * it has no further action.  If SIGCONT is held, we
1436 			 * continue the process and leave the signal in
1437 			 * p_siglist.  If the process catches SIGCONT, let it
1438 			 * handle the signal itself.  If it isn't waiting on
1439 			 * an event, it goes back to run state.
1440 			 * Otherwise, process goes back to sleep state.
1441 			 */
1442 			p->p_flag &= ~P_STOPPED_SIG;
1443 			p->p_flag |= P_CONTINUED;
1444 			if (action == SIG_DFL) {
1445 				SIGDELSET(p->p_siglist, sig);
1446 			} else if (action == SIG_CATCH) {
1447 				/*
1448 				 * The process wants to catch it so it needs
1449 				 * to run at least one thread, but which one?
1450 				 * It would seem that the answer would be to
1451 				 * run an upcall in the next KSE to run, and
1452 				 * deliver the signal that way. In a NON KSE
1453 				 * process, we need to make sure that the
1454 				 * single thread is runnable asap.
1455 				 * XXXKSE for now however, make them all run.
1456 				 */
1457 				goto runfast;
1458 			}
1459 			/*
1460 			 * The signal is not ignored or caught.
1461 			 */
1462 			mtx_lock_spin(&sched_lock);
1463 			thread_unsuspend(p);
1464 			mtx_unlock_spin(&sched_lock);
1465 			goto out;
1466 		}
1467 
1468 		if (prop & SA_STOP) {
1469 			/*
1470 			 * Already stopped, don't need to stop again
1471 			 * (If we did the shell could get confused).
1472 			 * Just make sure the signal STOP bit set.
1473 			 */
1474 			p->p_flag |= P_STOPPED_SIG;
1475 			SIGDELSET(p->p_siglist, sig);
1476 			goto out;
1477 		}
1478 
1479 		/*
1480 		 * All other kinds of signals:
1481 		 * If a thread is sleeping interruptibly, simulate a
1482 		 * wakeup so that when it is continued it will be made
1483 		 * runnable and can look at the signal.  However, don't make
1484 		 * the PROCESS runnable, leave it stopped.
1485 		 * It may run a bit until it hits a thread_suspend_check().
1486 		 */
1487 		mtx_lock_spin(&sched_lock);
1488 		FOREACH_THREAD_IN_PROC(p, td) {
1489 			if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR)) {
1490 				if (td->td_flags & TDF_CVWAITQ)
1491 					cv_abort(td);
1492 				else
1493 					abortsleep(td);
1494 			}
1495 		}
1496 		mtx_unlock_spin(&sched_lock);
1497 		goto out;
1498 		/*
1499 		 * XXXKSE  What about threads that are waiting on mutexes?
1500 		 * Shouldn't they abort too?
1501 		 * No, hopefully mutexes are short lived.. They'll
1502 		 * eventually hit thread_suspend_check().
1503 		 */
1504 	}  else if (p->p_state == PRS_NORMAL) {
1505 		if (prop & SA_CONT) {
1506 			/*
1507 			 * Already active, don't need to start again.
1508 			 */
1509 			SIGDELSET(p->p_siglist, sig);
1510 			goto out;
1511 		}
1512 		if ((p->p_flag & P_TRACED) || (action != SIG_DFL) ||
1513 			!(prop & SA_STOP)) {
1514 			mtx_lock_spin(&sched_lock);
1515 			FOREACH_THREAD_IN_PROC(p, td)
1516 				tdsignal(td, sig, action);
1517 			mtx_unlock_spin(&sched_lock);
1518 			goto out;
1519 		}
1520 		if (prop & SA_STOP) {
1521 			if (p->p_flag & P_PPWAIT)
1522 				goto out;
1523 			mtx_lock_spin(&sched_lock);
1524 			FOREACH_THREAD_IN_PROC(p, td) {
1525 				if (TD_IS_SLEEPING(td) &&
1526 					(td->td_flags & TDF_SINTR))
1527 					thread_suspend_one(td);
1528 			}
1529 			if (p->p_suspcount == p->p_numthreads) {
1530 				mtx_unlock_spin(&sched_lock);
1531 				stop(p);
1532 				p->p_xstat = sig;
1533 				SIGDELSET(p->p_siglist, sig);
1534 				PROC_LOCK(p->p_pptr);
1535 				if ((p->p_pptr->p_procsig->ps_flag &
1536 					PS_NOCLDSTOP) == 0) {
1537 					psignal(p->p_pptr, SIGCHLD);
1538 				}
1539 				PROC_UNLOCK(p->p_pptr);
1540 			} else {
1541 				mtx_unlock_spin(&sched_lock);
1542 			}
1543 			goto out;
1544 		}
1545 		else
1546 			goto runfast;
1547 		/* NOTREACHED */
1548 	} else {
1549 		/* Not in "NORMAL" state. discard the signal. */
1550 		SIGDELSET(p->p_siglist, sig);
1551 		goto out;
1552 	}
1553 
1554 	/*
1555 	 * The process is not stopped so we need to apply the signal to all the
1556 	 * running threads.
1557 	 */
1558 
1559 runfast:
1560 	mtx_lock_spin(&sched_lock);
1561 	FOREACH_THREAD_IN_PROC(p, td)
1562 		tdsignal(td, sig, action);
1563 	thread_unsuspend(p);
1564 	mtx_unlock_spin(&sched_lock);
1565 out:
1566 	/* If we jump here, sched_lock should not be owned. */
1567 	mtx_assert(&sched_lock, MA_NOTOWNED);
1568 }
1569 
1570 /*
1571  * The force of a signal has been directed against a single
1572  * thread. We need to see what we can do about knocking it
1573  * out of any sleep it may be in etc.
1574  */
1575 static void
1576 tdsignal(struct thread *td, int sig, sig_t action)
1577 {
1578 	struct proc *p = td->td_proc;
1579 	register int prop;
1580 
1581 	mtx_assert(&sched_lock, MA_OWNED);
1582 	prop = sigprop(sig);
1583 	/*
1584 	 * Bring the priority of a thread up if we want it to get
1585 	 * killed in this lifetime.
1586 	 */
1587 	if ((action == SIG_DFL) && (prop & SA_KILL)) {
1588 		if (td->td_priority > PUSER) {
1589 			td->td_priority = PUSER;
1590 		}
1591 	}
1592 
1593 	/*
1594 	 * Defer further processing for signals which are held,
1595 	 * except that stopped processes must be continued by SIGCONT.
1596 	 */
1597 	if (action == SIG_HOLD) {
1598 		return;
1599 	}
1600 	if (TD_IS_SLEEPING(td)) {
1601 		/*
1602 		 * If thread is sleeping uninterruptibly
1603 		 * we can't interrupt the sleep... the signal will
1604 		 * be noticed when the process returns through
1605 		 * trap() or syscall().
1606 		 */
1607 		if ((td->td_flags & TDF_SINTR) == 0) {
1608 			return;
1609 		}
1610 		/*
1611 		 * Process is sleeping and traced.  Make it runnable
1612 		 * so it can discover the signal in issignal() and stop
1613 		 * for its parent.
1614 		 */
1615 		if (p->p_flag & P_TRACED) {
1616 			p->p_flag &= ~P_STOPPED_TRACE;
1617 		} else {
1618 
1619 			/*
1620 			 * If SIGCONT is default (or ignored) and process is
1621 			 * asleep, we are finished; the process should not
1622 			 * be awakened.
1623 			 */
1624 			if ((prop & SA_CONT) && action == SIG_DFL) {
1625 				SIGDELSET(p->p_siglist, sig);
1626 				return;
1627 			}
1628 
1629 			/*
1630 			 * Raise priority to at least PUSER.
1631 			 */
1632 			if (td->td_priority > PUSER) {
1633 				td->td_priority = PUSER;
1634 			}
1635 		}
1636 		if (td->td_flags & TDF_CVWAITQ)
1637 			cv_abort(td);
1638 		else
1639 			abortsleep(td);
1640 	}
1641 #ifdef SMP
1642 	  else {
1643 		/*
1644 		 * Other states do nothing with the signal immediatly,
1645 		 * other than kicking ourselves if we are running.
1646 		 * It will either never be noticed, or noticed very soon.
1647 		 */
1648 		if (TD_IS_RUNNING(td) && td != curthread) {
1649 			forward_signal(td);
1650 		}
1651 	  }
1652 #endif
1653 }
1654 
1655 /*
1656  * If the current process has received a signal (should be caught or cause
1657  * termination, should interrupt current syscall), return the signal number.
1658  * Stop signals with default action are processed immediately, then cleared;
1659  * they aren't returned.  This is checked after each entry to the system for
1660  * a syscall or trap (though this can usually be done without calling issignal
1661  * by checking the pending signal masks in cursig.) The normal call
1662  * sequence is
1663  *
1664  *	while (sig = cursig(curthread))
1665  *		postsig(sig);
1666  */
1667 int
1668 issignal(td)
1669 	struct thread *td;
1670 {
1671 	struct proc *p;
1672 	sigset_t mask;
1673 	register int sig, prop;
1674 
1675 	p = td->td_proc;
1676 	PROC_LOCK_ASSERT(p, MA_OWNED);
1677 	WITNESS_SLEEP(1, &p->p_mtx.mtx_object);
1678 	for (;;) {
1679 		int traced = (p->p_flag & P_TRACED) || (p->p_stops & S_SIG);
1680 
1681 		mask = p->p_siglist;
1682 		SIGSETNAND(mask, p->p_sigmask);
1683 		if (p->p_flag & P_PPWAIT)
1684 			SIG_STOPSIGMASK(mask);
1685 		if (SIGISEMPTY(mask))		/* no signal to send */
1686 			return (0);
1687 		sig = sig_ffs(&mask);
1688 		prop = sigprop(sig);
1689 
1690 		_STOPEVENT(p, S_SIG, sig);
1691 
1692 		/*
1693 		 * We should see pending but ignored signals
1694 		 * only if P_TRACED was on when they were posted.
1695 		 */
1696 		if (SIGISMEMBER(p->p_sigignore, sig) && (traced == 0)) {
1697 			SIGDELSET(p->p_siglist, sig);
1698 			continue;
1699 		}
1700 		if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
1701 			/*
1702 			 * If traced, always stop.
1703 			 */
1704 			p->p_xstat = sig;
1705 			PROC_LOCK(p->p_pptr);
1706 			psignal(p->p_pptr, SIGCHLD);
1707 			PROC_UNLOCK(p->p_pptr);
1708 			mtx_lock_spin(&sched_lock);
1709 			stop(p);	/* uses schedlock too eventually */
1710 			thread_suspend_one(td);
1711 			PROC_UNLOCK(p);
1712 			DROP_GIANT();
1713 			p->p_stats->p_ru.ru_nivcsw++;
1714 			mi_switch();
1715 			mtx_unlock_spin(&sched_lock);
1716 			PICKUP_GIANT();
1717 			PROC_LOCK(p);
1718 
1719 			/*
1720 			 * If the traced bit got turned off, go back up
1721 			 * to the top to rescan signals.  This ensures
1722 			 * that p_sig* and ps_sigact are consistent.
1723 			 */
1724 			if ((p->p_flag & P_TRACED) == 0)
1725 				continue;
1726 
1727 			/*
1728 			 * If parent wants us to take the signal,
1729 			 * then it will leave it in p->p_xstat;
1730 			 * otherwise we just look for signals again.
1731 			 */
1732 			SIGDELSET(p->p_siglist, sig);	/* clear old signal */
1733 			sig = p->p_xstat;
1734 			if (sig == 0)
1735 				continue;
1736 
1737 			/*
1738 			 * Put the new signal into p_siglist.  If the
1739 			 * signal is being masked, look for other signals.
1740 			 */
1741 			SIGADDSET(p->p_siglist, sig);
1742 			if (SIGISMEMBER(p->p_sigmask, sig))
1743 				continue;
1744 			signotify(p);
1745 		}
1746 
1747 		/*
1748 		 * Decide whether the signal should be returned.
1749 		 * Return the signal's number, or fall through
1750 		 * to clear it from the pending mask.
1751 		 */
1752 		switch ((int)(intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) {
1753 
1754 		case (int)SIG_DFL:
1755 			/*
1756 			 * Don't take default actions on system processes.
1757 			 */
1758 			if (p->p_pid <= 1) {
1759 #ifdef DIAGNOSTIC
1760 				/*
1761 				 * Are you sure you want to ignore SIGSEGV
1762 				 * in init? XXX
1763 				 */
1764 				printf("Process (pid %lu) got signal %d\n",
1765 					(u_long)p->p_pid, sig);
1766 #endif
1767 				break;		/* == ignore */
1768 			}
1769 			/*
1770 			 * If there is a pending stop signal to process
1771 			 * with default action, stop here,
1772 			 * then clear the signal.  However,
1773 			 * if process is member of an orphaned
1774 			 * process group, ignore tty stop signals.
1775 			 */
1776 			if (prop & SA_STOP) {
1777 				if (p->p_flag & P_TRACED ||
1778 		    		    (p->p_pgrp->pg_jobc == 0 &&
1779 				     prop & SA_TTYSTOP))
1780 					break;	/* == ignore */
1781 				p->p_xstat = sig;
1782 				mtx_lock_spin(&sched_lock);
1783 				if (p->p_suspcount+1 == p->p_numthreads) {
1784 					mtx_unlock_spin(&sched_lock);
1785 					PROC_LOCK(p->p_pptr);
1786 					if ((p->p_pptr->p_procsig->ps_flag &
1787 				    		PS_NOCLDSTOP) == 0) {
1788 						psignal(p->p_pptr, SIGCHLD);
1789 					}
1790 					PROC_UNLOCK(p->p_pptr);
1791 					mtx_lock_spin(&sched_lock);
1792 				}
1793 				stop(p);
1794 				thread_suspend_one(td);
1795 				PROC_UNLOCK(p);
1796 				DROP_GIANT();
1797 				p->p_stats->p_ru.ru_nivcsw++;
1798 				mi_switch();
1799 				mtx_unlock_spin(&sched_lock);
1800 				PICKUP_GIANT();
1801 				PROC_LOCK(p);
1802 				break;
1803 			} else if (prop & SA_IGNORE) {
1804 				/*
1805 				 * Except for SIGCONT, shouldn't get here.
1806 				 * Default action is to ignore; drop it.
1807 				 */
1808 				break;		/* == ignore */
1809 			} else
1810 				return (sig);
1811 			/*NOTREACHED*/
1812 
1813 		case (int)SIG_IGN:
1814 			/*
1815 			 * Masking above should prevent us ever trying
1816 			 * to take action on an ignored signal other
1817 			 * than SIGCONT, unless process is traced.
1818 			 */
1819 			if ((prop & SA_CONT) == 0 &&
1820 			    (p->p_flag & P_TRACED) == 0)
1821 				printf("issignal\n");
1822 			break;		/* == ignore */
1823 
1824 		default:
1825 			/*
1826 			 * This signal has an action, let
1827 			 * postsig() process it.
1828 			 */
1829 			return (sig);
1830 		}
1831 		SIGDELSET(p->p_siglist, sig);		/* take the signal! */
1832 	}
1833 	/* NOTREACHED */
1834 }
1835 
1836 /*
1837  * Put the argument process into the stopped state and notify the parent
1838  * via wakeup.  Signals are handled elsewhere.  The process must not be
1839  * on the run queue.  Must be called with the proc p locked and the scheduler
1840  * lock held.
1841  */
1842 static void
1843 stop(p)
1844 	register struct proc *p;
1845 {
1846 
1847 	PROC_LOCK_ASSERT(p, MA_OWNED);
1848 	p->p_flag |= P_STOPPED_SIG;
1849 	p->p_flag &= ~P_WAITED;
1850 	wakeup(p->p_pptr);
1851 }
1852 
1853 /*
1854  * Take the action for the specified signal
1855  * from the current set of pending signals.
1856  */
1857 void
1858 postsig(sig)
1859 	register int sig;
1860 {
1861 	struct thread *td = curthread;
1862 	register struct proc *p = td->td_proc;
1863 	struct sigacts *ps;
1864 	sig_t action;
1865 	sigset_t returnmask;
1866 	int code;
1867 
1868 	KASSERT(sig != 0, ("postsig"));
1869 
1870 	PROC_LOCK_ASSERT(p, MA_OWNED);
1871 	ps = p->p_sigacts;
1872 	SIGDELSET(p->p_siglist, sig);
1873 	action = ps->ps_sigact[_SIG_IDX(sig)];
1874 #ifdef KTRACE
1875 	if (KTRPOINT(td, KTR_PSIG))
1876 		ktrpsig(sig, action, p->p_flag & P_OLDMASK ?
1877 		    &p->p_oldsigmask : &p->p_sigmask, 0);
1878 #endif
1879 	_STOPEVENT(p, S_SIG, sig);
1880 
1881 	if (action == SIG_DFL) {
1882 		/*
1883 		 * Default action, where the default is to kill
1884 		 * the process.  (Other cases were ignored above.)
1885 		 */
1886 		sigexit(td, sig);
1887 		/* NOTREACHED */
1888 	} else {
1889 		/*
1890 		 * If we get here, the signal must be caught.
1891 		 */
1892 		KASSERT(action != SIG_IGN && !SIGISMEMBER(p->p_sigmask, sig),
1893 		    ("postsig action"));
1894 		/*
1895 		 * Set the new mask value and also defer further
1896 		 * occurrences of this signal.
1897 		 *
1898 		 * Special case: user has done a sigsuspend.  Here the
1899 		 * current mask is not of interest, but rather the
1900 		 * mask from before the sigsuspend is what we want
1901 		 * restored after the signal processing is completed.
1902 		 */
1903 		if (p->p_flag & P_OLDMASK) {
1904 			returnmask = p->p_oldsigmask;
1905 			p->p_flag &= ~P_OLDMASK;
1906 		} else
1907 			returnmask = p->p_sigmask;
1908 
1909 		SIGSETOR(p->p_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
1910 		if (!SIGISMEMBER(ps->ps_signodefer, sig))
1911 			SIGADDSET(p->p_sigmask, sig);
1912 
1913 		if (SIGISMEMBER(ps->ps_sigreset, sig)) {
1914 			/*
1915 			 * See kern_sigaction() for origin of this code.
1916 			 */
1917 			SIGDELSET(p->p_sigcatch, sig);
1918 			if (sig != SIGCONT &&
1919 			    sigprop(sig) & SA_IGNORE)
1920 				SIGADDSET(p->p_sigignore, sig);
1921 			ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
1922 		}
1923 		p->p_stats->p_ru.ru_nsignals++;
1924 		if (p->p_sig != sig) {
1925 			code = 0;
1926 		} else {
1927 			code = p->p_code;
1928 			p->p_code = 0;
1929 			p->p_sig = 0;
1930 		}
1931 		if (p->p_flag & P_KSES)
1932 			if (signal_upcall(p, sig))
1933 				return;
1934 		(*p->p_sysent->sv_sendsig)(action, sig, &returnmask, code);
1935 	}
1936 }
1937 
1938 /*
1939  * Kill the current process for stated reason.
1940  */
1941 void
1942 killproc(p, why)
1943 	struct proc *p;
1944 	char *why;
1945 {
1946 
1947 	PROC_LOCK_ASSERT(p, MA_OWNED);
1948 	CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)",
1949 		p, p->p_pid, p->p_comm);
1950 	log(LOG_ERR, "pid %d (%s), uid %d, was killed: %s\n", p->p_pid, p->p_comm,
1951 		p->p_ucred ? p->p_ucred->cr_uid : -1, why);
1952 	psignal(p, SIGKILL);
1953 }
1954 
1955 /*
1956  * Force the current process to exit with the specified signal, dumping core
1957  * if appropriate.  We bypass the normal tests for masked and caught signals,
1958  * allowing unrecoverable failures to terminate the process without changing
1959  * signal state.  Mark the accounting record with the signal termination.
1960  * If dumping core, save the signal number for the debugger.  Calls exit and
1961  * does not return.
1962  */
1963 void
1964 sigexit(td, sig)
1965 	struct thread *td;
1966 	int sig;
1967 {
1968 	struct proc *p = td->td_proc;
1969 
1970 	PROC_LOCK_ASSERT(p, MA_OWNED);
1971 	p->p_acflag |= AXSIG;
1972 	if (sigprop(sig) & SA_CORE) {
1973 		p->p_sig = sig;
1974 		/*
1975 		 * Log signals which would cause core dumps
1976 		 * (Log as LOG_INFO to appease those who don't want
1977 		 * these messages.)
1978 		 * XXX : Todo, as well as euid, write out ruid too
1979 		 */
1980 		PROC_UNLOCK(p);
1981 		if (!mtx_owned(&Giant))
1982 			mtx_lock(&Giant);
1983 		if (coredump(td) == 0)
1984 			sig |= WCOREFLAG;
1985 		if (kern_logsigexit)
1986 			log(LOG_INFO,
1987 			    "pid %d (%s), uid %d: exited on signal %d%s\n",
1988 			    p->p_pid, p->p_comm,
1989 			    td->td_ucred ? td->td_ucred->cr_uid : -1,
1990 			    sig &~ WCOREFLAG,
1991 			    sig & WCOREFLAG ? " (core dumped)" : "");
1992 	} else {
1993 		PROC_UNLOCK(p);
1994 		if (!mtx_owned(&Giant))
1995 			mtx_lock(&Giant);
1996 	}
1997 	exit1(td, W_EXITCODE(0, sig));
1998 	/* NOTREACHED */
1999 }
2000 
2001 static char corefilename[MAXPATHLEN+1] = {"%N.core"};
2002 SYSCTL_STRING(_kern, OID_AUTO, corefile, CTLFLAG_RW, corefilename,
2003 	      sizeof(corefilename), "process corefile name format string");
2004 
2005 /*
2006  * expand_name(name, uid, pid)
2007  * Expand the name described in corefilename, using name, uid, and pid.
2008  * corefilename is a printf-like string, with three format specifiers:
2009  *	%N	name of process ("name")
2010  *	%P	process id (pid)
2011  *	%U	user id (uid)
2012  * For example, "%N.core" is the default; they can be disabled completely
2013  * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
2014  * This is controlled by the sysctl variable kern.corefile (see above).
2015  */
2016 
2017 static char *
2018 expand_name(name, uid, pid)
2019 	const char *name;
2020 	uid_t uid;
2021 	pid_t pid;
2022 {
2023 	const char *format, *appendstr;
2024 	char *temp;
2025 	char buf[11];		/* Buffer for pid/uid -- max 4B */
2026 	size_t i, l, n;
2027 
2028 	format = corefilename;
2029 	temp = malloc(MAXPATHLEN, M_TEMP, M_NOWAIT | M_ZERO);
2030 	if (temp == NULL)
2031 		return (NULL);
2032 	for (i = 0, n = 0; n < MAXPATHLEN && format[i]; i++) {
2033 		switch (format[i]) {
2034 		case '%':	/* Format character */
2035 			i++;
2036 			switch (format[i]) {
2037 			case '%':
2038 				appendstr = "%";
2039 				break;
2040 			case 'N':	/* process name */
2041 				appendstr = name;
2042 				break;
2043 			case 'P':	/* process id */
2044 				sprintf(buf, "%u", pid);
2045 				appendstr = buf;
2046 				break;
2047 			case 'U':	/* user id */
2048 				sprintf(buf, "%u", uid);
2049 				appendstr = buf;
2050 				break;
2051 			default:
2052 				appendstr = "";
2053 			  	log(LOG_ERR,
2054 				    "Unknown format character %c in `%s'\n",
2055 				    format[i], format);
2056 			}
2057 			l = strlen(appendstr);
2058 			if ((n + l) >= MAXPATHLEN)
2059 				goto toolong;
2060 			memcpy(temp + n, appendstr, l);
2061 			n += l;
2062 			break;
2063 		default:
2064 			temp[n++] = format[i];
2065 		}
2066 	}
2067 	if (format[i] != '\0')
2068 		goto toolong;
2069 	return (temp);
2070 toolong:
2071 	log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too long\n",
2072 	    (long)pid, name, (u_long)uid);
2073 	free(temp, M_TEMP);
2074 	return (NULL);
2075 }
2076 
2077 /*
2078  * Dump a process' core.  The main routine does some
2079  * policy checking, and creates the name of the coredump;
2080  * then it passes on a vnode and a size limit to the process-specific
2081  * coredump routine if there is one; if there _is not_ one, it returns
2082  * ENOSYS; otherwise it returns the error from the process-specific routine.
2083  */
2084 
2085 static int
2086 coredump(struct thread *td)
2087 {
2088 	struct proc *p = td->td_proc;
2089 	register struct vnode *vp;
2090 	register struct ucred *cred = td->td_ucred;
2091 	struct flock lf;
2092 	struct nameidata nd;
2093 	struct vattr vattr;
2094 	int error, error1, flags;
2095 	struct mount *mp;
2096 	char *name;			/* name of corefile */
2097 	off_t limit;
2098 
2099 	PROC_LOCK(p);
2100 	_STOPEVENT(p, S_CORE, 0);
2101 
2102 	if (((sugid_coredump == 0) && p->p_flag & P_SUGID) || do_coredump == 0) {
2103 		PROC_UNLOCK(p);
2104 		return (EFAULT);
2105 	}
2106 
2107 	/*
2108 	 * Note that the bulk of limit checking is done after
2109 	 * the corefile is created.  The exception is if the limit
2110 	 * for corefiles is 0, in which case we don't bother
2111 	 * creating the corefile at all.  This layout means that
2112 	 * a corefile is truncated instead of not being created,
2113 	 * if it is larger than the limit.
2114 	 */
2115 	limit = p->p_rlimit[RLIMIT_CORE].rlim_cur;
2116 	if (limit == 0) {
2117 		PROC_UNLOCK(p);
2118 		return 0;
2119 	}
2120 	PROC_UNLOCK(p);
2121 
2122 restart:
2123 	name = expand_name(p->p_comm, td->td_ucred->cr_uid, p->p_pid);
2124 	if (name == NULL)
2125 		return (EINVAL);
2126 	NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td); /* XXXKSE */
2127 	flags = O_CREAT | FWRITE | O_NOFOLLOW;
2128 	error = vn_open(&nd, &flags, S_IRUSR | S_IWUSR);
2129 	free(name, M_TEMP);
2130 	if (error)
2131 		return (error);
2132 	NDFREE(&nd, NDF_ONLY_PNBUF);
2133 	vp = nd.ni_vp;
2134 
2135 	/* Don't dump to non-regular files or files with links. */
2136 	if (vp->v_type != VREG ||
2137 	    VOP_GETATTR(vp, &vattr, cred, td) || vattr.va_nlink != 1) {
2138 		VOP_UNLOCK(vp, 0, td);
2139 		error = EFAULT;
2140 		goto out2;
2141 	}
2142 
2143 	VOP_UNLOCK(vp, 0, td);
2144 	lf.l_whence = SEEK_SET;
2145 	lf.l_start = 0;
2146 	lf.l_len = 0;
2147 	lf.l_type = F_WRLCK;
2148 	error = VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK);
2149 	if (error)
2150 		goto out2;
2151 
2152 	if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
2153 		lf.l_type = F_UNLCK;
2154 		VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
2155 		if ((error = vn_close(vp, FWRITE, cred, td)) != 0)
2156 			return (error);
2157 		if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
2158 			return (error);
2159 		goto restart;
2160 	}
2161 
2162 	VATTR_NULL(&vattr);
2163 	vattr.va_size = 0;
2164 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2165 	VOP_LEASE(vp, td, cred, LEASE_WRITE);
2166 	VOP_SETATTR(vp, &vattr, cred, td);
2167 	VOP_UNLOCK(vp, 0, td);
2168 	PROC_LOCK(p);
2169 	p->p_acflag |= ACORE;
2170 	PROC_UNLOCK(p);
2171 
2172 	error = p->p_sysent->sv_coredump ?
2173 	  p->p_sysent->sv_coredump(td, vp, limit) :
2174 	  ENOSYS;
2175 
2176 	lf.l_type = F_UNLCK;
2177 	VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
2178 	vn_finished_write(mp);
2179 out2:
2180 	error1 = vn_close(vp, FWRITE, cred, td);
2181 	if (error == 0)
2182 		error = error1;
2183 	return (error);
2184 }
2185 
2186 /*
2187  * Nonexistent system call-- signal process (may want to handle it).
2188  * Flag error in case process won't see signal immediately (blocked or ignored).
2189  */
2190 #ifndef _SYS_SYSPROTO_H_
2191 struct nosys_args {
2192 	int	dummy;
2193 };
2194 #endif
2195 /*
2196  * MPSAFE
2197  */
2198 /* ARGSUSED */
2199 int
2200 nosys(td, args)
2201 	struct thread *td;
2202 	struct nosys_args *args;
2203 {
2204 	struct proc *p = td->td_proc;
2205 
2206 	mtx_lock(&Giant);
2207 	PROC_LOCK(p);
2208 	psignal(p, SIGSYS);
2209 	PROC_UNLOCK(p);
2210 	mtx_unlock(&Giant);
2211 	return (ENOSYS);
2212 }
2213 
2214 /*
2215  * Send a SIGIO or SIGURG signal to a process or process group using
2216  * stored credentials rather than those of the current process.
2217  */
2218 void
2219 pgsigio(sigiop, sig, checkctty)
2220 	struct sigio **sigiop;
2221 	int sig, checkctty;
2222 {
2223 	struct sigio *sigio;
2224 
2225 	SIGIO_LOCK();
2226 	sigio = *sigiop;
2227 	if (sigio == NULL) {
2228 		SIGIO_UNLOCK();
2229 		return;
2230 	}
2231 	if (sigio->sio_pgid > 0) {
2232 		PROC_LOCK(sigio->sio_proc);
2233 		if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred))
2234 			psignal(sigio->sio_proc, sig);
2235 		PROC_UNLOCK(sigio->sio_proc);
2236 	} else if (sigio->sio_pgid < 0) {
2237 		struct proc *p;
2238 
2239 		PGRP_LOCK(sigio->sio_pgrp);
2240 		LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) {
2241 			PROC_LOCK(p);
2242 			if (CANSIGIO(sigio->sio_ucred, p->p_ucred) &&
2243 			    (checkctty == 0 || (p->p_flag & P_CONTROLT)))
2244 				psignal(p, sig);
2245 			PROC_UNLOCK(p);
2246 		}
2247 		PGRP_UNLOCK(sigio->sio_pgrp);
2248 	}
2249 	SIGIO_UNLOCK();
2250 }
2251 
2252 static int
2253 filt_sigattach(struct knote *kn)
2254 {
2255 	struct proc *p = curproc;
2256 
2257 	kn->kn_ptr.p_proc = p;
2258 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
2259 
2260 	PROC_LOCK(p);
2261 	SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
2262 	PROC_UNLOCK(p);
2263 
2264 	return (0);
2265 }
2266 
2267 static void
2268 filt_sigdetach(struct knote *kn)
2269 {
2270 	struct proc *p = kn->kn_ptr.p_proc;
2271 
2272 	PROC_LOCK(p);
2273 	SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
2274 	PROC_UNLOCK(p);
2275 }
2276 
2277 /*
2278  * signal knotes are shared with proc knotes, so we apply a mask to
2279  * the hint in order to differentiate them from process hints.  This
2280  * could be avoided by using a signal-specific knote list, but probably
2281  * isn't worth the trouble.
2282  */
2283 static int
2284 filt_signal(struct knote *kn, long hint)
2285 {
2286 
2287 	if (hint & NOTE_SIGNAL) {
2288 		hint &= ~NOTE_SIGNAL;
2289 
2290 		if (kn->kn_id == hint)
2291 			kn->kn_data++;
2292 	}
2293 	return (kn->kn_data != 0);
2294 }
2295