xref: /freebsd/sys/kern/kern_sig.c (revision a3e8fd0b7f663db7eafff527d5c3ca3bcfa8a537)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_sig.c	8.7 (Berkeley) 4/18/94
39  * $FreeBSD$
40  */
41 
42 #include "opt_compat.h"
43 #include "opt_ktrace.h"
44 
45 #include <sys/param.h>
46 #include <sys/kernel.h>
47 #include <sys/sysproto.h>
48 #include <sys/systm.h>
49 #include <sys/signalvar.h>
50 #include <sys/namei.h>
51 #include <sys/vnode.h>
52 #include <sys/event.h>
53 #include <sys/proc.h>
54 #include <sys/pioctl.h>
55 #include <sys/acct.h>
56 #include <sys/fcntl.h>
57 #include <sys/condvar.h>
58 #include <sys/lock.h>
59 #include <sys/mutex.h>
60 #include <sys/wait.h>
61 #include <sys/ktr.h>
62 #include <sys/ktrace.h>
63 #include <sys/resourcevar.h>
64 #include <sys/smp.h>
65 #include <sys/stat.h>
66 #include <sys/sx.h>
67 #include <sys/syscallsubr.h>
68 #include <sys/syslog.h>
69 #include <sys/sysent.h>
70 #include <sys/sysctl.h>
71 #include <sys/malloc.h>
72 #include <sys/unistd.h>
73 
74 #include <machine/cpu.h>
75 
76 #define	ONSIG	32		/* NSIG for osig* syscalls.  XXX. */
77 
78 static int	coredump(struct thread *);
79 static int	do_sigprocmask(struct proc *p, int how, sigset_t *set,
80 			sigset_t *oset, int old);
81 static char	*expand_name(const char *, uid_t, pid_t);
82 static int	killpg1(struct thread *td, int sig, int pgid, int all);
83 static int	sig_ffs(sigset_t *set);
84 static int	sigprop(int sig);
85 static void	stop(struct proc *);
86 static void	tdsignal(struct thread *td, int sig, sig_t action);
87 static int	filt_sigattach(struct knote *kn);
88 static void	filt_sigdetach(struct knote *kn);
89 static int	filt_signal(struct knote *kn, long hint);
90 
91 struct filterops sig_filtops =
92 	{ 0, filt_sigattach, filt_sigdetach, filt_signal };
93 
94 static int	kern_logsigexit = 1;
95 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,
96     &kern_logsigexit, 0,
97     "Log processes quitting on abnormal signals to syslog(3)");
98 
99 /*
100  * Policy -- Can ucred cr1 send SIGIO to process cr2?
101  * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG
102  * in the right situations.
103  */
104 #define CANSIGIO(cr1, cr2) \
105 	((cr1)->cr_uid == 0 || \
106 	    (cr1)->cr_ruid == (cr2)->cr_ruid || \
107 	    (cr1)->cr_uid == (cr2)->cr_ruid || \
108 	    (cr1)->cr_ruid == (cr2)->cr_uid || \
109 	    (cr1)->cr_uid == (cr2)->cr_uid)
110 
111 int sugid_coredump;
112 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RW,
113     &sugid_coredump, 0, "Enable coredumping set user/group ID processes");
114 
115 static int	do_coredump = 1;
116 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,
117 	&do_coredump, 0, "Enable/Disable coredumps");
118 
119 /*
120  * Signal properties and actions.
121  * The array below categorizes the signals and their default actions
122  * according to the following properties:
123  */
124 #define	SA_KILL		0x01		/* terminates process by default */
125 #define	SA_CORE		0x02		/* ditto and coredumps */
126 #define	SA_STOP		0x04		/* suspend process */
127 #define	SA_TTYSTOP	0x08		/* ditto, from tty */
128 #define	SA_IGNORE	0x10		/* ignore by default */
129 #define	SA_CONT		0x20		/* continue if suspended */
130 #define	SA_CANTMASK	0x40		/* non-maskable, catchable */
131 
132 static int sigproptbl[NSIG] = {
133         SA_KILL,                /* SIGHUP */
134         SA_KILL,                /* SIGINT */
135         SA_KILL|SA_CORE,        /* SIGQUIT */
136         SA_KILL|SA_CORE,        /* SIGILL */
137         SA_KILL|SA_CORE,        /* SIGTRAP */
138         SA_KILL|SA_CORE,        /* SIGABRT */
139         SA_KILL|SA_CORE,        /* SIGEMT */
140         SA_KILL|SA_CORE,        /* SIGFPE */
141         SA_KILL,                /* SIGKILL */
142         SA_KILL|SA_CORE,        /* SIGBUS */
143         SA_KILL|SA_CORE,        /* SIGSEGV */
144         SA_KILL|SA_CORE,        /* SIGSYS */
145         SA_KILL,                /* SIGPIPE */
146         SA_KILL,                /* SIGALRM */
147         SA_KILL,                /* SIGTERM */
148         SA_IGNORE,              /* SIGURG */
149         SA_STOP,                /* SIGSTOP */
150         SA_STOP|SA_TTYSTOP,     /* SIGTSTP */
151         SA_IGNORE|SA_CONT,      /* SIGCONT */
152         SA_IGNORE,              /* SIGCHLD */
153         SA_STOP|SA_TTYSTOP,     /* SIGTTIN */
154         SA_STOP|SA_TTYSTOP,     /* SIGTTOU */
155         SA_IGNORE,              /* SIGIO */
156         SA_KILL,                /* SIGXCPU */
157         SA_KILL,                /* SIGXFSZ */
158         SA_KILL,                /* SIGVTALRM */
159         SA_KILL,                /* SIGPROF */
160         SA_IGNORE,              /* SIGWINCH  */
161         SA_IGNORE,              /* SIGINFO */
162         SA_KILL,                /* SIGUSR1 */
163         SA_KILL,                /* SIGUSR2 */
164 };
165 
166 /*
167  * Determine signal that should be delivered to process p, the current
168  * process, 0 if none.  If there is a pending stop signal with default
169  * action, the process stops in issignal().
170  * XXXKSE   the check for a pending stop is not done under KSE
171  *
172  * MP SAFE.
173  */
174 int
175 cursig(struct thread *td)
176 {
177 	struct proc *p = td->td_proc;
178 
179 	PROC_LOCK_ASSERT(p, MA_OWNED);
180 	mtx_assert(&sched_lock, MA_NOTOWNED);
181 	return (SIGPENDING(p) ? issignal(td) : 0);
182 }
183 
184 /*
185  * Arrange for ast() to handle unmasked pending signals on return to user
186  * mode.  This must be called whenever a signal is added to p_siglist or
187  * unmasked in p_sigmask.
188  */
189 void
190 signotify(struct proc *p)
191 {
192 	struct kse *ke;
193 	struct ksegrp *kg;
194 
195 	PROC_LOCK_ASSERT(p, MA_OWNED);
196 	mtx_lock_spin(&sched_lock);
197 	if (SIGPENDING(p)) {
198 		p->p_sflag |= PS_NEEDSIGCHK;
199 		/* XXXKSE for now punish all KSEs */
200 		FOREACH_KSEGRP_IN_PROC(p, kg) {
201 			FOREACH_KSE_IN_GROUP(kg, ke) {
202 				ke->ke_flags |= KEF_ASTPENDING;
203 			}
204 		}
205 	}
206 	mtx_unlock_spin(&sched_lock);
207 }
208 
209 static __inline int
210 sigprop(int sig)
211 {
212 
213 	if (sig > 0 && sig < NSIG)
214 		return (sigproptbl[_SIG_IDX(sig)]);
215 	return (0);
216 }
217 
218 static __inline int
219 sig_ffs(sigset_t *set)
220 {
221 	int i;
222 
223 	for (i = 0; i < _SIG_WORDS; i++)
224 		if (set->__bits[i])
225 			return (ffs(set->__bits[i]) + (i * 32));
226 	return (0);
227 }
228 
229 /*
230  * kern_sigaction
231  * sigaction
232  * osigaction
233  */
234 int
235 kern_sigaction(td, sig, act, oact, old)
236 	struct thread *td;
237 	register int sig;
238 	struct sigaction *act, *oact;
239 	int old;
240 {
241 	register struct sigacts *ps;
242 	struct proc *p = td->td_proc;
243 
244 	if (!_SIG_VALID(sig))
245 		return (EINVAL);
246 
247 	PROC_LOCK(p);
248 	ps = p->p_sigacts;
249 	if (oact) {
250 		oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)];
251 		oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)];
252 		oact->sa_flags = 0;
253 		if (SIGISMEMBER(ps->ps_sigonstack, sig))
254 			oact->sa_flags |= SA_ONSTACK;
255 		if (!SIGISMEMBER(ps->ps_sigintr, sig))
256 			oact->sa_flags |= SA_RESTART;
257 		if (SIGISMEMBER(ps->ps_sigreset, sig))
258 			oact->sa_flags |= SA_RESETHAND;
259 		if (SIGISMEMBER(ps->ps_signodefer, sig))
260 			oact->sa_flags |= SA_NODEFER;
261 		if (SIGISMEMBER(ps->ps_siginfo, sig))
262 			oact->sa_flags |= SA_SIGINFO;
263 		if (sig == SIGCHLD && p->p_procsig->ps_flag & PS_NOCLDSTOP)
264 			oact->sa_flags |= SA_NOCLDSTOP;
265 		if (sig == SIGCHLD && p->p_procsig->ps_flag & PS_NOCLDWAIT)
266 			oact->sa_flags |= SA_NOCLDWAIT;
267 	}
268 	if (act) {
269 		if ((sig == SIGKILL || sig == SIGSTOP) &&
270 		    act->sa_handler != SIG_DFL) {
271 			PROC_UNLOCK(p);
272 			return (EINVAL);
273 		}
274 
275 		/*
276 		 * Change setting atomically.
277 		 */
278 
279 		ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask;
280 		SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]);
281 		if (act->sa_flags & SA_SIGINFO) {
282 			ps->ps_sigact[_SIG_IDX(sig)] =
283 			    (__sighandler_t *)act->sa_sigaction;
284 			SIGADDSET(ps->ps_siginfo, sig);
285 		} else {
286 			ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler;
287 			SIGDELSET(ps->ps_siginfo, sig);
288 		}
289 		if (!(act->sa_flags & SA_RESTART))
290 			SIGADDSET(ps->ps_sigintr, sig);
291 		else
292 			SIGDELSET(ps->ps_sigintr, sig);
293 		if (act->sa_flags & SA_ONSTACK)
294 			SIGADDSET(ps->ps_sigonstack, sig);
295 		else
296 			SIGDELSET(ps->ps_sigonstack, sig);
297 		if (act->sa_flags & SA_RESETHAND)
298 			SIGADDSET(ps->ps_sigreset, sig);
299 		else
300 			SIGDELSET(ps->ps_sigreset, sig);
301 		if (act->sa_flags & SA_NODEFER)
302 			SIGADDSET(ps->ps_signodefer, sig);
303 		else
304 			SIGDELSET(ps->ps_signodefer, sig);
305 #ifdef COMPAT_SUNOS
306 		if (act->sa_flags & SA_USERTRAMP)
307 			SIGADDSET(ps->ps_usertramp, sig);
308 		else
309 			SIGDELSET(ps->ps_usertramp, sig);
310 #endif
311 		if (sig == SIGCHLD) {
312 			if (act->sa_flags & SA_NOCLDSTOP)
313 				p->p_procsig->ps_flag |= PS_NOCLDSTOP;
314 			else
315 				p->p_procsig->ps_flag &= ~PS_NOCLDSTOP;
316 			if (act->sa_flags & SA_NOCLDWAIT) {
317 				/*
318 				 * Paranoia: since SA_NOCLDWAIT is implemented
319 				 * by reparenting the dying child to PID 1 (and
320 				 * trust it to reap the zombie), PID 1 itself
321 				 * is forbidden to set SA_NOCLDWAIT.
322 				 */
323 				if (p->p_pid == 1)
324 					p->p_procsig->ps_flag &= ~PS_NOCLDWAIT;
325 				else
326 					p->p_procsig->ps_flag |= PS_NOCLDWAIT;
327 			} else
328 				p->p_procsig->ps_flag &= ~PS_NOCLDWAIT;
329 			if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
330 				p->p_procsig->ps_flag |= PS_CLDSIGIGN;
331 			else
332 				p->p_procsig->ps_flag &= ~PS_CLDSIGIGN;
333 		}
334 		/*
335 		 * Set bit in p_sigignore for signals that are set to SIG_IGN,
336 		 * and for signals set to SIG_DFL where the default is to
337 		 * ignore. However, don't put SIGCONT in p_sigignore, as we
338 		 * have to restart the process.
339 		 */
340 		if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
341 		    (sigprop(sig) & SA_IGNORE &&
342 		     ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
343 			/* never to be seen again */
344 			SIGDELSET(p->p_siglist, sig);
345 			if (sig != SIGCONT)
346 				/* easier in psignal */
347 				SIGADDSET(p->p_sigignore, sig);
348 			SIGDELSET(p->p_sigcatch, sig);
349 		} else {
350 			SIGDELSET(p->p_sigignore, sig);
351 			if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)
352 				SIGDELSET(p->p_sigcatch, sig);
353 			else
354 				SIGADDSET(p->p_sigcatch, sig);
355 		}
356 #ifdef COMPAT_43
357 		if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
358 		    ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL || !old)
359 			SIGDELSET(ps->ps_osigset, sig);
360 		else
361 			SIGADDSET(ps->ps_osigset, sig);
362 #endif
363 	}
364 	PROC_UNLOCK(p);
365 	return (0);
366 }
367 
368 #ifndef _SYS_SYSPROTO_H_
369 struct sigaction_args {
370 	int	sig;
371 	struct	sigaction *act;
372 	struct	sigaction *oact;
373 };
374 #endif
375 /*
376  * MPSAFE
377  */
378 /* ARGSUSED */
379 int
380 sigaction(td, uap)
381 	struct thread *td;
382 	register struct sigaction_args *uap;
383 {
384 	struct sigaction act, oact;
385 	register struct sigaction *actp, *oactp;
386 	int error;
387 
388 	mtx_lock(&Giant);
389 
390 	actp = (uap->act != NULL) ? &act : NULL;
391 	oactp = (uap->oact != NULL) ? &oact : NULL;
392 	if (actp) {
393 		error = copyin(uap->act, actp, sizeof(act));
394 		if (error)
395 			goto done2;
396 	}
397 	error = kern_sigaction(td, uap->sig, actp, oactp, 0);
398 	if (oactp && !error) {
399 		error = copyout(oactp, uap->oact, sizeof(oact));
400 	}
401 done2:
402 	mtx_unlock(&Giant);
403 	return (error);
404 }
405 
406 #ifdef COMPAT_43	/* XXX - COMPAT_FBSD3 */
407 #ifndef _SYS_SYSPROTO_H_
408 struct osigaction_args {
409 	int	signum;
410 	struct	osigaction *nsa;
411 	struct	osigaction *osa;
412 };
413 #endif
414 /*
415  * MPSAFE
416  */
417 /* ARGSUSED */
418 int
419 osigaction(td, uap)
420 	struct thread *td;
421 	register struct osigaction_args *uap;
422 {
423 	struct osigaction sa;
424 	struct sigaction nsa, osa;
425 	register struct sigaction *nsap, *osap;
426 	int error;
427 
428 	if (uap->signum <= 0 || uap->signum >= ONSIG)
429 		return (EINVAL);
430 
431 	nsap = (uap->nsa != NULL) ? &nsa : NULL;
432 	osap = (uap->osa != NULL) ? &osa : NULL;
433 
434 	mtx_lock(&Giant);
435 
436 	if (nsap) {
437 		error = copyin(uap->nsa, &sa, sizeof(sa));
438 		if (error)
439 			goto done2;
440 		nsap->sa_handler = sa.sa_handler;
441 		nsap->sa_flags = sa.sa_flags;
442 		OSIG2SIG(sa.sa_mask, nsap->sa_mask);
443 	}
444 	error = kern_sigaction(td, uap->signum, nsap, osap, 1);
445 	if (osap && !error) {
446 		sa.sa_handler = osap->sa_handler;
447 		sa.sa_flags = osap->sa_flags;
448 		SIG2OSIG(osap->sa_mask, sa.sa_mask);
449 		error = copyout(&sa, uap->osa, sizeof(sa));
450 	}
451 done2:
452 	mtx_unlock(&Giant);
453 	return (error);
454 }
455 #endif /* COMPAT_43 */
456 
457 /*
458  * Initialize signal state for process 0;
459  * set to ignore signals that are ignored by default.
460  */
461 void
462 siginit(p)
463 	struct proc *p;
464 {
465 	register int i;
466 
467 	PROC_LOCK(p);
468 	for (i = 1; i <= NSIG; i++)
469 		if (sigprop(i) & SA_IGNORE && i != SIGCONT)
470 			SIGADDSET(p->p_sigignore, i);
471 	PROC_UNLOCK(p);
472 }
473 
474 /*
475  * Reset signals for an exec of the specified process.
476  */
477 void
478 execsigs(p)
479 	register struct proc *p;
480 {
481 	register struct sigacts *ps;
482 	register int sig;
483 
484 	/*
485 	 * Reset caught signals.  Held signals remain held
486 	 * through p_sigmask (unless they were caught,
487 	 * and are now ignored by default).
488 	 */
489 	PROC_LOCK_ASSERT(p, MA_OWNED);
490 	ps = p->p_sigacts;
491 	while (SIGNOTEMPTY(p->p_sigcatch)) {
492 		sig = sig_ffs(&p->p_sigcatch);
493 		SIGDELSET(p->p_sigcatch, sig);
494 		if (sigprop(sig) & SA_IGNORE) {
495 			if (sig != SIGCONT)
496 				SIGADDSET(p->p_sigignore, sig);
497 			SIGDELSET(p->p_siglist, sig);
498 		}
499 		ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
500 	}
501 	/*
502 	 * Reset stack state to the user stack.
503 	 * Clear set of signals caught on the signal stack.
504 	 */
505 	p->p_sigstk.ss_flags = SS_DISABLE;
506 	p->p_sigstk.ss_size = 0;
507 	p->p_sigstk.ss_sp = 0;
508 	p->p_flag &= ~P_ALTSTACK;
509 	/*
510 	 * Reset no zombies if child dies flag as Solaris does.
511 	 */
512 	p->p_procsig->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN);
513 	if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
514 		ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL;
515 }
516 
517 /*
518  * do_sigprocmask()
519  *
520  *	Manipulate signal mask.
521  */
522 static int
523 do_sigprocmask(p, how, set, oset, old)
524 	struct proc *p;
525 	int how;
526 	sigset_t *set, *oset;
527 	int old;
528 {
529 	int error;
530 
531 	PROC_LOCK(p);
532 	if (oset != NULL)
533 		*oset = p->p_sigmask;
534 
535 	error = 0;
536 	if (set != NULL) {
537 		switch (how) {
538 		case SIG_BLOCK:
539 			SIG_CANTMASK(*set);
540 			SIGSETOR(p->p_sigmask, *set);
541 			break;
542 		case SIG_UNBLOCK:
543 			SIGSETNAND(p->p_sigmask, *set);
544 			signotify(p);
545 			break;
546 		case SIG_SETMASK:
547 			SIG_CANTMASK(*set);
548 			if (old)
549 				SIGSETLO(p->p_sigmask, *set);
550 			else
551 				p->p_sigmask = *set;
552 			signotify(p);
553 			break;
554 		default:
555 			error = EINVAL;
556 			break;
557 		}
558 	}
559 	PROC_UNLOCK(p);
560 	return (error);
561 }
562 
563 /*
564  * sigprocmask() - MP SAFE (XXXKSE not under KSE it isn't)
565  */
566 
567 #ifndef _SYS_SYSPROTO_H_
568 struct sigprocmask_args {
569 	int	how;
570 	const sigset_t *set;
571 	sigset_t *oset;
572 };
573 #endif
574 int
575 sigprocmask(td, uap)
576 	register struct thread *td;
577 	struct sigprocmask_args *uap;
578 {
579 	struct proc *p = td->td_proc;
580 	sigset_t set, oset;
581 	sigset_t *setp, *osetp;
582 	int error;
583 
584 	setp = (uap->set != NULL) ? &set : NULL;
585 	osetp = (uap->oset != NULL) ? &oset : NULL;
586 	if (setp) {
587 		error = copyin(uap->set, setp, sizeof(set));
588 		if (error)
589 			return (error);
590 	}
591 	error = do_sigprocmask(p, uap->how, setp, osetp, 0);
592 	if (osetp && !error) {
593 		error = copyout(osetp, uap->oset, sizeof(oset));
594 	}
595 	return (error);
596 }
597 
598 #ifdef COMPAT_43	/* XXX - COMPAT_FBSD3 */
599 /*
600  * osigprocmask() - MP SAFE
601  */
602 #ifndef _SYS_SYSPROTO_H_
603 struct osigprocmask_args {
604 	int	how;
605 	osigset_t mask;
606 };
607 #endif
608 int
609 osigprocmask(td, uap)
610 	register struct thread *td;
611 	struct osigprocmask_args *uap;
612 {
613 	struct proc *p = td->td_proc;
614 	sigset_t set, oset;
615 	int error;
616 
617 	OSIG2SIG(uap->mask, set);
618 	error = do_sigprocmask(p, uap->how, &set, &oset, 1);
619 	SIG2OSIG(oset, td->td_retval[0]);
620 	return (error);
621 }
622 #endif /* COMPAT_43 */
623 
624 #ifndef _SYS_SYSPROTO_H_
625 struct sigpending_args {
626 	sigset_t	*set;
627 };
628 #endif
629 /*
630  * MPSAFE
631  */
632 /* ARGSUSED */
633 int
634 sigpending(td, uap)
635 	struct thread *td;
636 	struct sigpending_args *uap;
637 {
638 	struct proc *p = td->td_proc;
639 	sigset_t siglist;
640 	int error;
641 
642 	mtx_lock(&Giant);
643 	PROC_LOCK(p);
644 	siglist = p->p_siglist;
645 	PROC_UNLOCK(p);
646 	mtx_unlock(&Giant);
647 	error = copyout(&siglist, uap->set, sizeof(sigset_t));
648 	return(error);
649 }
650 
651 #ifdef COMPAT_43	/* XXX - COMPAT_FBSD3 */
652 #ifndef _SYS_SYSPROTO_H_
653 struct osigpending_args {
654 	int	dummy;
655 };
656 #endif
657 /*
658  * MPSAFE
659  */
660 /* ARGSUSED */
661 int
662 osigpending(td, uap)
663 	struct thread *td;
664 	struct osigpending_args *uap;
665 {
666 	struct proc *p = td->td_proc;
667 
668 	mtx_lock(&Giant);
669 	PROC_LOCK(p);
670 	SIG2OSIG(p->p_siglist, td->td_retval[0]);
671 	PROC_UNLOCK(p);
672 	mtx_unlock(&Giant);
673 	return (0);
674 }
675 #endif /* COMPAT_43 */
676 
677 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
678 /*
679  * Generalized interface signal handler, 4.3-compatible.
680  */
681 #ifndef _SYS_SYSPROTO_H_
682 struct osigvec_args {
683 	int	signum;
684 	struct	sigvec *nsv;
685 	struct	sigvec *osv;
686 };
687 #endif
688 /*
689  * MPSAFE
690  */
691 /* ARGSUSED */
692 int
693 osigvec(td, uap)
694 	struct thread *td;
695 	register struct osigvec_args *uap;
696 {
697 	struct sigvec vec;
698 	struct sigaction nsa, osa;
699 	register struct sigaction *nsap, *osap;
700 	int error;
701 
702 	if (uap->signum <= 0 || uap->signum >= ONSIG)
703 		return (EINVAL);
704 	nsap = (uap->nsv != NULL) ? &nsa : NULL;
705 	osap = (uap->osv != NULL) ? &osa : NULL;
706 	if (nsap) {
707 		error = copyin(uap->nsv, &vec, sizeof(vec));
708 		if (error)
709 			return (error);
710 		nsap->sa_handler = vec.sv_handler;
711 		OSIG2SIG(vec.sv_mask, nsap->sa_mask);
712 		nsap->sa_flags = vec.sv_flags;
713 		nsap->sa_flags ^= SA_RESTART;	/* opposite of SV_INTERRUPT */
714 #ifdef COMPAT_SUNOS
715 		nsap->sa_flags |= SA_USERTRAMP;
716 #endif
717 	}
718 	mtx_lock(&Giant);
719 	error = kern_sigaction(td, uap->signum, nsap, osap, 1);
720 	mtx_unlock(&Giant);
721 	if (osap && !error) {
722 		vec.sv_handler = osap->sa_handler;
723 		SIG2OSIG(osap->sa_mask, vec.sv_mask);
724 		vec.sv_flags = osap->sa_flags;
725 		vec.sv_flags &= ~SA_NOCLDWAIT;
726 		vec.sv_flags ^= SA_RESTART;
727 #ifdef COMPAT_SUNOS
728 		vec.sv_flags &= ~SA_NOCLDSTOP;
729 #endif
730 		error = copyout(&vec, uap->osv, sizeof(vec));
731 	}
732 	return (error);
733 }
734 
735 #ifndef _SYS_SYSPROTO_H_
736 struct osigblock_args {
737 	int	mask;
738 };
739 #endif
740 /*
741  * MPSAFE
742  */
743 int
744 osigblock(td, uap)
745 	register struct thread *td;
746 	struct osigblock_args *uap;
747 {
748 	struct proc *p = td->td_proc;
749 	sigset_t set;
750 
751 	OSIG2SIG(uap->mask, set);
752 	SIG_CANTMASK(set);
753 	mtx_lock(&Giant);
754 	PROC_LOCK(p);
755 	SIG2OSIG(p->p_sigmask, td->td_retval[0]);
756 	SIGSETOR(p->p_sigmask, set);
757 	PROC_UNLOCK(p);
758 	mtx_unlock(&Giant);
759 	return (0);
760 }
761 
762 #ifndef _SYS_SYSPROTO_H_
763 struct osigsetmask_args {
764 	int	mask;
765 };
766 #endif
767 /*
768  * MPSAFE
769  */
770 int
771 osigsetmask(td, uap)
772 	struct thread *td;
773 	struct osigsetmask_args *uap;
774 {
775 	struct proc *p = td->td_proc;
776 	sigset_t set;
777 
778 	OSIG2SIG(uap->mask, set);
779 	SIG_CANTMASK(set);
780 	mtx_lock(&Giant);
781 	PROC_LOCK(p);
782 	SIG2OSIG(p->p_sigmask, td->td_retval[0]);
783 	SIGSETLO(p->p_sigmask, set);
784 	signotify(p);
785 	PROC_UNLOCK(p);
786 	mtx_unlock(&Giant);
787 	return (0);
788 }
789 #endif /* COMPAT_43 || COMPAT_SUNOS */
790 
791 /*
792  * Suspend process until signal, providing mask to be set
793  * in the meantime.  Note nonstandard calling convention:
794  * libc stub passes mask, not pointer, to save a copyin.
795  ***** XXXKSE this doesn't make sense under KSE.
796  ***** Do we suspend the thread or all threads in the process?
797  ***** How do we suspend threads running NOW on another processor?
798  */
799 #ifndef _SYS_SYSPROTO_H_
800 struct sigsuspend_args {
801 	const sigset_t *sigmask;
802 };
803 #endif
804 /*
805  * MPSAFE
806  */
807 /* ARGSUSED */
808 int
809 sigsuspend(td, uap)
810 	struct thread *td;
811 	struct sigsuspend_args *uap;
812 {
813 	sigset_t mask;
814 	int error;
815 
816 	error = copyin(uap->sigmask, &mask, sizeof(mask));
817 	if (error)
818 		return (error);
819 	return (kern_sigsuspend(td, mask));
820 }
821 
822 int
823 kern_sigsuspend(struct thread *td, sigset_t mask)
824 {
825 	struct proc *p = td->td_proc;
826 	register struct sigacts *ps;
827 
828 	/*
829 	 * When returning from sigsuspend, we want
830 	 * the old mask to be restored after the
831 	 * signal handler has finished.  Thus, we
832 	 * save it here and mark the sigacts structure
833 	 * to indicate this.
834 	 */
835 	mtx_lock(&Giant);
836 	PROC_LOCK(p);
837 	ps = p->p_sigacts;
838 	p->p_oldsigmask = p->p_sigmask;
839 	p->p_flag |= P_OLDMASK;
840 
841 	SIG_CANTMASK(mask);
842 	p->p_sigmask = mask;
843 	signotify(p);
844 	while (msleep(ps, &p->p_mtx, PPAUSE|PCATCH, "pause", 0) == 0)
845 		/* void */;
846 	PROC_UNLOCK(p);
847 	mtx_unlock(&Giant);
848 	/* always return EINTR rather than ERESTART... */
849 	return (EINTR);
850 }
851 
852 #ifdef COMPAT_43	/* XXX - COMPAT_FBSD3 */
853 #ifndef _SYS_SYSPROTO_H_
854 struct osigsuspend_args {
855 	osigset_t mask;
856 };
857 #endif
858 /*
859  * MPSAFE
860  */
861 /* ARGSUSED */
862 int
863 osigsuspend(td, uap)
864 	struct thread *td;
865 	struct osigsuspend_args *uap;
866 {
867 	struct proc *p = td->td_proc;
868 	sigset_t mask;
869 	register struct sigacts *ps;
870 
871 	mtx_lock(&Giant);
872 	PROC_LOCK(p);
873 	ps = p->p_sigacts;
874 	p->p_oldsigmask = p->p_sigmask;
875 	p->p_flag |= P_OLDMASK;
876 	OSIG2SIG(uap->mask, mask);
877 	SIG_CANTMASK(mask);
878 	SIGSETLO(p->p_sigmask, mask);
879 	signotify(p);
880 	while (msleep(ps, &p->p_mtx, PPAUSE|PCATCH, "opause", 0) == 0)
881 		/* void */;
882 	PROC_UNLOCK(p);
883 	mtx_unlock(&Giant);
884 	/* always return EINTR rather than ERESTART... */
885 	return (EINTR);
886 }
887 #endif /* COMPAT_43 */
888 
889 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
890 #ifndef _SYS_SYSPROTO_H_
891 struct osigstack_args {
892 	struct	sigstack *nss;
893 	struct	sigstack *oss;
894 };
895 #endif
896 /*
897  * MPSAFE
898  */
899 /* ARGSUSED */
900 int
901 osigstack(td, uap)
902 	struct thread *td;
903 	register struct osigstack_args *uap;
904 {
905 	struct proc *p = td->td_proc;
906 	struct sigstack ss;
907 	int error = 0;
908 
909 	mtx_lock(&Giant);
910 
911 	if (uap->oss != NULL) {
912 		PROC_LOCK(p);
913 		ss.ss_sp = p->p_sigstk.ss_sp;
914 		ss.ss_onstack = sigonstack(cpu_getstack(td));
915 		PROC_UNLOCK(p);
916 		error = copyout(&ss, uap->oss, sizeof(struct sigstack));
917 		if (error)
918 			goto done2;
919 	}
920 
921 	if (uap->nss != NULL) {
922 		if ((error = copyin(uap->nss, &ss, sizeof(ss))) != 0)
923 			goto done2;
924 		PROC_LOCK(p);
925 		p->p_sigstk.ss_sp = ss.ss_sp;
926 		p->p_sigstk.ss_size = 0;
927 		p->p_sigstk.ss_flags |= ss.ss_onstack & SS_ONSTACK;
928 		p->p_flag |= P_ALTSTACK;
929 		PROC_UNLOCK(p);
930 	}
931 done2:
932 	mtx_unlock(&Giant);
933 	return (error);
934 }
935 #endif /* COMPAT_43 || COMPAT_SUNOS */
936 
937 #ifndef _SYS_SYSPROTO_H_
938 struct sigaltstack_args {
939 	stack_t	*ss;
940 	stack_t	*oss;
941 };
942 #endif
943 /*
944  * MPSAFE
945  */
946 /* ARGSUSED */
947 int
948 sigaltstack(td, uap)
949 	struct thread *td;
950 	register struct sigaltstack_args *uap;
951 {
952 	stack_t ss, oss;
953 	int error;
954 
955 	if (uap->ss != NULL) {
956 		error = copyin(uap->ss, &ss, sizeof(ss));
957 		if (error)
958 			return (error);
959 	}
960 	error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL,
961 	    (uap->oss != NULL) ? &oss : NULL);
962 	if (error)
963 		return (error);
964 	if (uap->oss != NULL)
965 		error = copyout(&oss, uap->oss, sizeof(stack_t));
966 	return (error);
967 }
968 
969 int
970 kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss)
971 {
972 	struct proc *p = td->td_proc;
973 	int oonstack;
974 	int error = 0;
975 
976 	mtx_lock(&Giant);
977 
978 	oonstack = sigonstack(cpu_getstack(td));
979 
980 	if (oss != NULL) {
981 		PROC_LOCK(p);
982 		*oss = p->p_sigstk;
983 		oss->ss_flags = (p->p_flag & P_ALTSTACK)
984 		    ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
985 		PROC_UNLOCK(p);
986 	}
987 
988 	if (ss != NULL) {
989 		if (oonstack) {
990 			error = EPERM;
991 			goto done2;
992 		}
993 		if ((ss->ss_flags & ~SS_DISABLE) != 0) {
994 			error = EINVAL;
995 			goto done2;
996 		}
997 		if (!(ss->ss_flags & SS_DISABLE)) {
998 			if (ss->ss_size < p->p_sysent->sv_minsigstksz) {
999 				error = ENOMEM;
1000 				goto done2;
1001 			}
1002 			PROC_LOCK(p);
1003 			p->p_sigstk = *ss;
1004 			p->p_flag |= P_ALTSTACK;
1005 			PROC_UNLOCK(p);
1006 		} else {
1007 			PROC_LOCK(p);
1008 			p->p_flag &= ~P_ALTSTACK;
1009 			PROC_UNLOCK(p);
1010 		}
1011 	}
1012 done2:
1013 	mtx_unlock(&Giant);
1014 	return (error);
1015 }
1016 
1017 /*
1018  * Common code for kill process group/broadcast kill.
1019  * cp is calling process.
1020  */
1021 static int
1022 killpg1(td, sig, pgid, all)
1023 	register struct thread *td;
1024 	int sig, pgid, all;
1025 {
1026 	register struct proc *p;
1027 	struct pgrp *pgrp;
1028 	int nfound = 0;
1029 
1030 	if (all) {
1031 		/*
1032 		 * broadcast
1033 		 */
1034 		sx_slock(&allproc_lock);
1035 		LIST_FOREACH(p, &allproc, p_list) {
1036 			PROC_LOCK(p);
1037 			if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1038 			    p == td->td_proc) {
1039 				PROC_UNLOCK(p);
1040 				continue;
1041 			}
1042 			if (p_cansignal(td, p, sig) == 0) {
1043 				nfound++;
1044 				if (sig)
1045 					psignal(p, sig);
1046 			}
1047 			PROC_UNLOCK(p);
1048 		}
1049 		sx_sunlock(&allproc_lock);
1050 	} else {
1051 		sx_slock(&proctree_lock);
1052 		if (pgid == 0) {
1053 			/*
1054 			 * zero pgid means send to my process group.
1055 			 */
1056 			pgrp = td->td_proc->p_pgrp;
1057 			PGRP_LOCK(pgrp);
1058 		} else {
1059 			pgrp = pgfind(pgid);
1060 			if (pgrp == NULL) {
1061 				sx_sunlock(&proctree_lock);
1062 				return (ESRCH);
1063 			}
1064 		}
1065 		sx_sunlock(&proctree_lock);
1066 		LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1067 			PROC_LOCK(p);
1068 			if (p->p_pid <= 1 || p->p_flag & P_SYSTEM) {
1069 				PROC_UNLOCK(p);
1070 				continue;
1071 			}
1072 			if (p->p_state == PRS_ZOMBIE) {
1073 				PROC_UNLOCK(p);
1074 				continue;
1075 			}
1076 			if (p_cansignal(td, p, sig) == 0) {
1077 				nfound++;
1078 				if (sig)
1079 					psignal(p, sig);
1080 			}
1081 			PROC_UNLOCK(p);
1082 		}
1083 		PGRP_UNLOCK(pgrp);
1084 	}
1085 	return (nfound ? 0 : ESRCH);
1086 }
1087 
1088 #ifndef _SYS_SYSPROTO_H_
1089 struct kill_args {
1090 	int	pid;
1091 	int	signum;
1092 };
1093 #endif
1094 /*
1095  * MPSAFE
1096  */
1097 /* ARGSUSED */
1098 int
1099 kill(td, uap)
1100 	register struct thread *td;
1101 	register struct kill_args *uap;
1102 {
1103 	register struct proc *p;
1104 	int error = 0;
1105 
1106 	if ((u_int)uap->signum > _SIG_MAXSIG)
1107 		return (EINVAL);
1108 
1109 	mtx_lock(&Giant);
1110 	if (uap->pid > 0) {
1111 		/* kill single process */
1112 		if ((p = pfind(uap->pid)) == NULL) {
1113 			error = ESRCH;
1114 		} else if ((error = p_cansignal(td, p, uap->signum)) != 0) {
1115 			PROC_UNLOCK(p);
1116 		} else {
1117 			if (uap->signum)
1118 				psignal(p, uap->signum);
1119 			PROC_UNLOCK(p);
1120 			error = 0;
1121 		}
1122 	} else {
1123 		switch (uap->pid) {
1124 		case -1:		/* broadcast signal */
1125 			error = killpg1(td, uap->signum, 0, 1);
1126 			break;
1127 		case 0:			/* signal own process group */
1128 			error = killpg1(td, uap->signum, 0, 0);
1129 			break;
1130 		default:		/* negative explicit process group */
1131 			error = killpg1(td, uap->signum, -uap->pid, 0);
1132 			break;
1133 		}
1134 	}
1135 	mtx_unlock(&Giant);
1136 	return(error);
1137 }
1138 
1139 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
1140 #ifndef _SYS_SYSPROTO_H_
1141 struct okillpg_args {
1142 	int	pgid;
1143 	int	signum;
1144 };
1145 #endif
1146 /*
1147  * MPSAFE
1148  */
1149 /* ARGSUSED */
1150 int
1151 okillpg(td, uap)
1152 	struct thread *td;
1153 	register struct okillpg_args *uap;
1154 {
1155 	int error;
1156 
1157 	if ((u_int)uap->signum > _SIG_MAXSIG)
1158 		return (EINVAL);
1159 	mtx_lock(&Giant);
1160 	error = killpg1(td, uap->signum, uap->pgid, 0);
1161 	mtx_unlock(&Giant);
1162 	return (error);
1163 }
1164 #endif /* COMPAT_43 || COMPAT_SUNOS */
1165 
1166 /*
1167  * Send a signal to a process group.
1168  */
1169 void
1170 gsignal(pgid, sig)
1171 	int pgid, sig;
1172 {
1173 	struct pgrp *pgrp;
1174 
1175 	if (pgid != 0) {
1176 		sx_slock(&proctree_lock);
1177 		pgrp = pgfind(pgid);
1178 		sx_sunlock(&proctree_lock);
1179 		if (pgrp != NULL) {
1180 			pgsignal(pgrp, sig, 0);
1181 			PGRP_UNLOCK(pgrp);
1182 		}
1183 	}
1184 }
1185 
1186 /*
1187  * Send a signal to a process group.  If checktty is 1,
1188  * limit to members which have a controlling terminal.
1189  */
1190 void
1191 pgsignal(pgrp, sig, checkctty)
1192 	struct pgrp *pgrp;
1193 	int sig, checkctty;
1194 {
1195 	register struct proc *p;
1196 
1197 	if (pgrp) {
1198 		PGRP_LOCK_ASSERT(pgrp, MA_OWNED);
1199 		LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1200 			PROC_LOCK(p);
1201 			if (checkctty == 0 || p->p_flag & P_CONTROLT)
1202 				psignal(p, sig);
1203 			PROC_UNLOCK(p);
1204 		}
1205 	}
1206 }
1207 
1208 /*
1209  * Send a signal caused by a trap to the current process.
1210  * If it will be caught immediately, deliver it with correct code.
1211  * Otherwise, post it normally.
1212  *
1213  * MPSAFE
1214  */
1215 void
1216 trapsignal(p, sig, code)
1217 	struct proc *p;
1218 	register int sig;
1219 	u_long code;
1220 {
1221 	register struct sigacts *ps = p->p_sigacts;
1222 
1223 	PROC_LOCK(p);
1224 	if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(p->p_sigcatch, sig) &&
1225 	    !SIGISMEMBER(p->p_sigmask, sig)) {
1226 		p->p_stats->p_ru.ru_nsignals++;
1227 #ifdef KTRACE
1228 		if (KTRPOINT(curthread, KTR_PSIG))
1229 			ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
1230 			    &p->p_sigmask, code);
1231 #endif
1232 		(*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)], sig,
1233 						&p->p_sigmask, code);
1234 		SIGSETOR(p->p_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
1235 		if (!SIGISMEMBER(ps->ps_signodefer, sig))
1236 			SIGADDSET(p->p_sigmask, sig);
1237 		if (SIGISMEMBER(ps->ps_sigreset, sig)) {
1238 			/*
1239 			 * See kern_sigaction() for origin of this code.
1240 			 */
1241 			SIGDELSET(p->p_sigcatch, sig);
1242 			if (sig != SIGCONT &&
1243 			    sigprop(sig) & SA_IGNORE)
1244 				SIGADDSET(p->p_sigignore, sig);
1245 			ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
1246 		}
1247 	} else {
1248 		p->p_code = code;	/* XXX for core dump/debugger */
1249 		p->p_sig = sig;		/* XXX to verify code */
1250 		psignal(p, sig);
1251 	}
1252 	PROC_UNLOCK(p);
1253 }
1254 
1255 /*
1256  * Send the signal to the process.  If the signal has an action, the action
1257  * is usually performed by the target process rather than the caller; we add
1258  * the signal to the set of pending signals for the process.
1259  *
1260  * Exceptions:
1261  *   o When a stop signal is sent to a sleeping process that takes the
1262  *     default action, the process is stopped without awakening it.
1263  *   o SIGCONT restarts stopped processes (or puts them back to sleep)
1264  *     regardless of the signal action (eg, blocked or ignored).
1265  *
1266  * Other ignored signals are discarded immediately.
1267  */
1268 void
1269 psignal(p, sig)
1270 	register struct proc *p;
1271 	register int sig;
1272 {
1273 	register sig_t action;
1274 	struct thread *td;
1275 	register int prop;
1276 
1277 
1278 	KASSERT(_SIG_VALID(sig),
1279 	    ("psignal(): invalid signal %d\n", sig));
1280 
1281 	PROC_LOCK_ASSERT(p, MA_OWNED);
1282 	KNOTE(&p->p_klist, NOTE_SIGNAL | sig);
1283 
1284 	prop = sigprop(sig);
1285 	/*
1286 	 * If proc is traced, always give parent a chance;
1287 	 * if signal event is tracked by procfs, give *that*
1288 	 * a chance, as well.
1289 	 */
1290 	if ((p->p_flag & P_TRACED) || (p->p_stops & S_SIG)) {
1291 		action = SIG_DFL;
1292 	} else {
1293 		/*
1294 		 * If the signal is being ignored,
1295 		 * then we forget about it immediately.
1296 		 * (Note: we don't set SIGCONT in p_sigignore,
1297 		 * and if it is set to SIG_IGN,
1298 		 * action will be SIG_DFL here.)
1299 		 */
1300 		if (SIGISMEMBER(p->p_sigignore, sig) || (p->p_flag & P_WEXIT))
1301 			return;
1302 		if (SIGISMEMBER(p->p_sigmask, sig))
1303 			action = SIG_HOLD;
1304 		else if (SIGISMEMBER(p->p_sigcatch, sig))
1305 			action = SIG_CATCH;
1306 		else
1307 			action = SIG_DFL;
1308 	}
1309 
1310 	if (prop & SA_CONT)
1311 		SIG_STOPSIGMASK(p->p_siglist);
1312 
1313 	if (prop & SA_STOP) {
1314 		/*
1315 		 * If sending a tty stop signal to a member of an orphaned
1316 		 * process group, discard the signal here if the action
1317 		 * is default; don't stop the process below if sleeping,
1318 		 * and don't clear any pending SIGCONT.
1319 		 */
1320 		if ((prop & SA_TTYSTOP) &&
1321 		    (p->p_pgrp->pg_jobc == 0) &&
1322 		    (action == SIG_DFL))
1323 		        return;
1324 		SIG_CONTSIGMASK(p->p_siglist);
1325 		p->p_flag &= ~P_CONTINUED;
1326 	}
1327 	SIGADDSET(p->p_siglist, sig);
1328 	signotify(p);			/* uses schedlock */
1329 
1330 	/*
1331 	 * Some signals have a process-wide effect and a per-thread
1332 	 * component.  Most processing occurs when the process next
1333 	 * tries to cross the user boundary, however there are some
1334 	 * times when processing needs to be done immediatly, such as
1335 	 * waking up threads so that they can cross the user boundary.
1336 	 * We try do the per-process part here.
1337 	 */
1338 	if (P_SHOULDSTOP(p)) {
1339 		/*
1340 		 * The process is in stopped mode. All the threads should be
1341 		 * either winding down or already on the suspended queue.
1342 		 */
1343 		if (p->p_flag & P_TRACED) {
1344 			/*
1345 			 * The traced process is already stopped,
1346 			 * so no further action is necessary.
1347 			 * No signal can restart us.
1348 			 */
1349 			goto out;
1350 		}
1351 
1352 		if (sig == SIGKILL) {
1353 			/*
1354 			 * SIGKILL sets process running.
1355 			 * It will die elsewhere.
1356 			 * All threads must be restarted.
1357 			 */
1358 			p->p_flag &= ~P_STOPPED;
1359 			goto runfast;
1360 		}
1361 
1362 		if (prop & SA_CONT) {
1363 			/*
1364 			 * If SIGCONT is default (or ignored), we continue the
1365 			 * process but don't leave the signal in p_siglist as
1366 			 * it has no further action.  If SIGCONT is held, we
1367 			 * continue the process and leave the signal in
1368 			 * p_siglist.  If the process catches SIGCONT, let it
1369 			 * handle the signal itself.  If it isn't waiting on
1370 			 * an event, it goes back to run state.
1371 			 * Otherwise, process goes back to sleep state.
1372 			 */
1373 			p->p_flag &= ~P_STOPPED_SIG;
1374 			p->p_flag |= P_CONTINUED;
1375 			if (action == SIG_DFL) {
1376 				SIGDELSET(p->p_siglist, sig);
1377 			} else if (action == SIG_CATCH) {
1378 				/*
1379 				 * The process wants to catch it so it needs
1380 				 * to run at least one thread, but which one?
1381 				 * It would seem that the answer would be to
1382 				 * run an upcall in the next KSE to run, and
1383 				 * deliver the signal that way. In a NON KSE
1384 				 * process, we need to make sure that the
1385 				 * single thread is runnable asap.
1386 				 * XXXKSE for now however, make them all run.
1387 				 */
1388 				goto runfast;
1389 			}
1390 			/*
1391 			 * The signal is not ignored or caught.
1392 			 */
1393 			mtx_lock_spin(&sched_lock);
1394 			thread_unsuspend(p);
1395 			mtx_unlock_spin(&sched_lock);
1396 			goto out;
1397 		}
1398 
1399 		if (prop & SA_STOP) {
1400 			/*
1401 			 * Already stopped, don't need to stop again
1402 			 * (If we did the shell could get confused).
1403 			 * Just make sure the signal STOP bit set.
1404 			 */
1405 			p->p_flag |= P_STOPPED_SIG;
1406 			SIGDELSET(p->p_siglist, sig);
1407 			goto out;
1408 		}
1409 
1410 		/*
1411 		 * All other kinds of signals:
1412 		 * If a thread is sleeping interruptibly, simulate a
1413 		 * wakeup so that when it is continued it will be made
1414 		 * runnable and can look at the signal.  However, don't make
1415 		 * the PROCESS runnable, leave it stopped.
1416 		 * It may run a bit until it hits a thread_suspend_check().
1417 		 */
1418 		mtx_lock_spin(&sched_lock);
1419 		FOREACH_THREAD_IN_PROC(p, td) {
1420 			if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR)) {
1421 				if (td->td_flags & TDF_CVWAITQ)
1422 					cv_abort(td);
1423 				else
1424 					abortsleep(td);
1425 			}
1426 		}
1427 		mtx_unlock_spin(&sched_lock);
1428 		goto out;
1429 		/*
1430 		 * XXXKSE  What about threads that are waiting on mutexes?
1431 		 * Shouldn't they abort too?
1432 		 * No, hopefully mutexes are short lived.. They'll
1433 		 * eventually hit thread_suspend_check().
1434 		 */
1435 	}  else if (p->p_state == PRS_NORMAL) {
1436 		if (prop & SA_CONT) {
1437 			/*
1438 			 * Already active, don't need to start again.
1439 			 */
1440 			SIGDELSET(p->p_siglist, sig);
1441 			goto out;
1442 		}
1443 		if ((p->p_flag & P_TRACED) || (action != SIG_DFL) ||
1444 			!(prop & SA_STOP)) {
1445 			mtx_lock_spin(&sched_lock);
1446 			FOREACH_THREAD_IN_PROC(p, td)
1447 				tdsignal(td, sig, action);
1448 			mtx_unlock_spin(&sched_lock);
1449 			goto out;
1450 		}
1451 		if (prop & SA_STOP) {
1452 			if (p->p_flag & P_PPWAIT)
1453 				goto out;
1454 			mtx_lock_spin(&sched_lock);
1455 			FOREACH_THREAD_IN_PROC(p, td) {
1456 				if (TD_IS_SLEEPING(td) &&
1457 					(td->td_flags & TDF_SINTR))
1458 					thread_suspend_one(td);
1459 			}
1460 			if (p->p_suspcount == p->p_numthreads) {
1461 				mtx_unlock_spin(&sched_lock);
1462 				stop(p);
1463 				p->p_xstat = sig;
1464 				SIGDELSET(p->p_siglist, sig);
1465 				PROC_LOCK(p->p_pptr);
1466 				if ((p->p_pptr->p_procsig->ps_flag &
1467 					PS_NOCLDSTOP) == 0) {
1468 					psignal(p->p_pptr, SIGCHLD);
1469 				}
1470 				PROC_UNLOCK(p->p_pptr);
1471 			} else {
1472 				mtx_unlock_spin(&sched_lock);
1473 			}
1474 			goto out;
1475 		}
1476 		else
1477 			goto runfast;
1478 		/* NOTREACHED */
1479 	} else {
1480 		/* Not in "NORMAL" state. discard the signal. */
1481 		SIGDELSET(p->p_siglist, sig);
1482 		goto out;
1483 	}
1484 
1485 	/*
1486 	 * The process is not stopped so we need to apply the signal to all the
1487 	 * running threads.
1488 	 */
1489 
1490 runfast:
1491 	mtx_lock_spin(&sched_lock);
1492 	FOREACH_THREAD_IN_PROC(p, td)
1493 		tdsignal(td, sig, action);
1494 	thread_unsuspend(p);
1495 	mtx_unlock_spin(&sched_lock);
1496 out:
1497 	/* If we jump here, sched_lock should not be owned. */
1498 	mtx_assert(&sched_lock, MA_NOTOWNED);
1499 }
1500 
1501 /*
1502  * The force of a signal has been directed against a single
1503  * thread. We need to see what we can do about knocking it
1504  * out of any sleep it may be in etc.
1505  */
1506 static void
1507 tdsignal(struct thread *td, int sig, sig_t action)
1508 {
1509 	struct proc *p = td->td_proc;
1510 	register int prop;
1511 
1512 	mtx_assert(&sched_lock, MA_OWNED);
1513 	prop = sigprop(sig);
1514 	/*
1515 	 * Bring the priority of a thread up if we want it to get
1516 	 * killed in this lifetime.
1517 	 */
1518 	if ((action == SIG_DFL) && (prop & SA_KILL)) {
1519 		if (td->td_priority > PUSER) {
1520 			td->td_priority = PUSER;
1521 		}
1522 	}
1523 
1524 	/*
1525 	 * Defer further processing for signals which are held,
1526 	 * except that stopped processes must be continued by SIGCONT.
1527 	 */
1528 	if (action == SIG_HOLD) {
1529 		return;
1530 	}
1531 	if (TD_IS_SLEEPING(td)) {
1532 		/*
1533 		 * If thread is sleeping uninterruptibly
1534 		 * we can't interrupt the sleep... the signal will
1535 		 * be noticed when the process returns through
1536 		 * trap() or syscall().
1537 		 */
1538 		if ((td->td_flags & TDF_SINTR) == 0) {
1539 			return;
1540 		}
1541 		/*
1542 		 * Process is sleeping and traced.  Make it runnable
1543 		 * so it can discover the signal in issignal() and stop
1544 		 * for its parent.
1545 		 */
1546 		if (p->p_flag & P_TRACED) {
1547 			p->p_flag &= ~P_STOPPED_TRACE;
1548 		} else {
1549 
1550 			/*
1551 			 * If SIGCONT is default (or ignored) and process is
1552 			 * asleep, we are finished; the process should not
1553 			 * be awakened.
1554 			 */
1555 			if ((prop & SA_CONT) && action == SIG_DFL) {
1556 				SIGDELSET(p->p_siglist, sig);
1557 				return;
1558 			}
1559 
1560 			/*
1561 			 * Raise priority to at least PUSER.
1562 			 */
1563 			if (td->td_priority > PUSER) {
1564 				td->td_priority = PUSER;
1565 			}
1566 		}
1567 		if (td->td_flags & TDF_CVWAITQ)
1568 			cv_abort(td);
1569 		else
1570 			abortsleep(td);
1571 	}
1572 #ifdef SMP
1573 	  else {
1574 		/*
1575 		 * Other states do nothing with the signal immediatly,
1576 		 * other than kicking ourselves if we are running.
1577 		 * It will either never be noticed, or noticed very soon.
1578 		 */
1579 		if (TD_IS_RUNNING(td) && td != curthread) {
1580 			forward_signal(td);
1581 		}
1582 	  }
1583 #endif
1584 }
1585 
1586 /*
1587  * If the current process has received a signal (should be caught or cause
1588  * termination, should interrupt current syscall), return the signal number.
1589  * Stop signals with default action are processed immediately, then cleared;
1590  * they aren't returned.  This is checked after each entry to the system for
1591  * a syscall or trap (though this can usually be done without calling issignal
1592  * by checking the pending signal masks in cursig.) The normal call
1593  * sequence is
1594  *
1595  *	while (sig = cursig(curthread))
1596  *		postsig(sig);
1597  */
1598 int
1599 issignal(td)
1600 	struct thread *td;
1601 {
1602 	struct proc *p;
1603 	sigset_t mask;
1604 	register int sig, prop;
1605 
1606 	p = td->td_proc;
1607 	PROC_LOCK_ASSERT(p, MA_OWNED);
1608 	WITNESS_SLEEP(1, &p->p_mtx.mtx_object);
1609 	for (;;) {
1610 		int traced = (p->p_flag & P_TRACED) || (p->p_stops & S_SIG);
1611 
1612 		mask = p->p_siglist;
1613 		SIGSETNAND(mask, p->p_sigmask);
1614 		if (p->p_flag & P_PPWAIT)
1615 			SIG_STOPSIGMASK(mask);
1616 		if (SIGISEMPTY(mask))		/* no signal to send */
1617 			return (0);
1618 		sig = sig_ffs(&mask);
1619 		prop = sigprop(sig);
1620 
1621 		_STOPEVENT(p, S_SIG, sig);
1622 
1623 		/*
1624 		 * We should see pending but ignored signals
1625 		 * only if P_TRACED was on when they were posted.
1626 		 */
1627 		if (SIGISMEMBER(p->p_sigignore, sig) && (traced == 0)) {
1628 			SIGDELSET(p->p_siglist, sig);
1629 			continue;
1630 		}
1631 		if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
1632 			/*
1633 			 * If traced, always stop.
1634 			 */
1635 			p->p_xstat = sig;
1636 			PROC_LOCK(p->p_pptr);
1637 			psignal(p->p_pptr, SIGCHLD);
1638 			PROC_UNLOCK(p->p_pptr);
1639 			mtx_lock_spin(&sched_lock);
1640 			stop(p);	/* uses schedlock too eventually */
1641 			thread_suspend_one(td);
1642 			PROC_UNLOCK(p);
1643 			DROP_GIANT();
1644 			p->p_stats->p_ru.ru_nivcsw++;
1645 			mi_switch();
1646 			mtx_unlock_spin(&sched_lock);
1647 			PICKUP_GIANT();
1648 			PROC_LOCK(p);
1649 
1650 			/*
1651 			 * If the traced bit got turned off, go back up
1652 			 * to the top to rescan signals.  This ensures
1653 			 * that p_sig* and ps_sigact are consistent.
1654 			 */
1655 			if ((p->p_flag & P_TRACED) == 0)
1656 				continue;
1657 
1658 			/*
1659 			 * If parent wants us to take the signal,
1660 			 * then it will leave it in p->p_xstat;
1661 			 * otherwise we just look for signals again.
1662 			 */
1663 			SIGDELSET(p->p_siglist, sig);	/* clear old signal */
1664 			sig = p->p_xstat;
1665 			if (sig == 0)
1666 				continue;
1667 
1668 			/*
1669 			 * Put the new signal into p_siglist.  If the
1670 			 * signal is being masked, look for other signals.
1671 			 */
1672 			SIGADDSET(p->p_siglist, sig);
1673 			if (SIGISMEMBER(p->p_sigmask, sig))
1674 				continue;
1675 			signotify(p);
1676 		}
1677 
1678 		/*
1679 		 * Decide whether the signal should be returned.
1680 		 * Return the signal's number, or fall through
1681 		 * to clear it from the pending mask.
1682 		 */
1683 		switch ((int)(intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) {
1684 
1685 		case (int)SIG_DFL:
1686 			/*
1687 			 * Don't take default actions on system processes.
1688 			 */
1689 			if (p->p_pid <= 1) {
1690 #ifdef DIAGNOSTIC
1691 				/*
1692 				 * Are you sure you want to ignore SIGSEGV
1693 				 * in init? XXX
1694 				 */
1695 				printf("Process (pid %lu) got signal %d\n",
1696 					(u_long)p->p_pid, sig);
1697 #endif
1698 				break;		/* == ignore */
1699 			}
1700 			/*
1701 			 * If there is a pending stop signal to process
1702 			 * with default action, stop here,
1703 			 * then clear the signal.  However,
1704 			 * if process is member of an orphaned
1705 			 * process group, ignore tty stop signals.
1706 			 */
1707 			if (prop & SA_STOP) {
1708 				if (p->p_flag & P_TRACED ||
1709 		    		    (p->p_pgrp->pg_jobc == 0 &&
1710 				     prop & SA_TTYSTOP))
1711 					break;	/* == ignore */
1712 				p->p_xstat = sig;
1713 				mtx_lock_spin(&sched_lock);
1714 				if (p->p_suspcount+1 == p->p_numthreads) {
1715 					mtx_unlock_spin(&sched_lock);
1716 					PROC_LOCK(p->p_pptr);
1717 					if ((p->p_pptr->p_procsig->ps_flag &
1718 				    		PS_NOCLDSTOP) == 0) {
1719 						psignal(p->p_pptr, SIGCHLD);
1720 					}
1721 					PROC_UNLOCK(p->p_pptr);
1722 					mtx_lock_spin(&sched_lock);
1723 				}
1724 				stop(p);
1725 				thread_suspend_one(td);
1726 				PROC_UNLOCK(p);
1727 				DROP_GIANT();
1728 				p->p_stats->p_ru.ru_nivcsw++;
1729 				mi_switch();
1730 				mtx_unlock_spin(&sched_lock);
1731 				PICKUP_GIANT();
1732 				PROC_LOCK(p);
1733 				break;
1734 			} else if (prop & SA_IGNORE) {
1735 				/*
1736 				 * Except for SIGCONT, shouldn't get here.
1737 				 * Default action is to ignore; drop it.
1738 				 */
1739 				break;		/* == ignore */
1740 			} else
1741 				return (sig);
1742 			/*NOTREACHED*/
1743 
1744 		case (int)SIG_IGN:
1745 			/*
1746 			 * Masking above should prevent us ever trying
1747 			 * to take action on an ignored signal other
1748 			 * than SIGCONT, unless process is traced.
1749 			 */
1750 			if ((prop & SA_CONT) == 0 &&
1751 			    (p->p_flag & P_TRACED) == 0)
1752 				printf("issignal\n");
1753 			break;		/* == ignore */
1754 
1755 		default:
1756 			/*
1757 			 * This signal has an action, let
1758 			 * postsig() process it.
1759 			 */
1760 			return (sig);
1761 		}
1762 		SIGDELSET(p->p_siglist, sig);		/* take the signal! */
1763 	}
1764 	/* NOTREACHED */
1765 }
1766 
1767 /*
1768  * Put the argument process into the stopped state and notify the parent
1769  * via wakeup.  Signals are handled elsewhere.  The process must not be
1770  * on the run queue.  Must be called with the proc p locked and the scheduler
1771  * lock held.
1772  */
1773 static void
1774 stop(p)
1775 	register struct proc *p;
1776 {
1777 
1778 	PROC_LOCK_ASSERT(p, MA_OWNED);
1779 	p->p_flag |= P_STOPPED_SIG;
1780 	p->p_flag &= ~P_WAITED;
1781 	wakeup(p->p_pptr);
1782 }
1783 
1784 /*
1785  * Take the action for the specified signal
1786  * from the current set of pending signals.
1787  */
1788 void
1789 postsig(sig)
1790 	register int sig;
1791 {
1792 	struct thread *td = curthread;
1793 	register struct proc *p = td->td_proc;
1794 	struct sigacts *ps;
1795 	sig_t action;
1796 	sigset_t returnmask;
1797 	int code;
1798 
1799 	KASSERT(sig != 0, ("postsig"));
1800 
1801 	PROC_LOCK_ASSERT(p, MA_OWNED);
1802 	ps = p->p_sigacts;
1803 	SIGDELSET(p->p_siglist, sig);
1804 	action = ps->ps_sigact[_SIG_IDX(sig)];
1805 #ifdef KTRACE
1806 	if (KTRPOINT(td, KTR_PSIG))
1807 		ktrpsig(sig, action, p->p_flag & P_OLDMASK ?
1808 		    &p->p_oldsigmask : &p->p_sigmask, 0);
1809 #endif
1810 	_STOPEVENT(p, S_SIG, sig);
1811 
1812 	if (action == SIG_DFL) {
1813 		/*
1814 		 * Default action, where the default is to kill
1815 		 * the process.  (Other cases were ignored above.)
1816 		 */
1817 		sigexit(td, sig);
1818 		/* NOTREACHED */
1819 	} else {
1820 		/*
1821 		 * If we get here, the signal must be caught.
1822 		 */
1823 		KASSERT(action != SIG_IGN && !SIGISMEMBER(p->p_sigmask, sig),
1824 		    ("postsig action"));
1825 		/*
1826 		 * Set the new mask value and also defer further
1827 		 * occurrences of this signal.
1828 		 *
1829 		 * Special case: user has done a sigsuspend.  Here the
1830 		 * current mask is not of interest, but rather the
1831 		 * mask from before the sigsuspend is what we want
1832 		 * restored after the signal processing is completed.
1833 		 */
1834 		if (p->p_flag & P_OLDMASK) {
1835 			returnmask = p->p_oldsigmask;
1836 			p->p_flag &= ~P_OLDMASK;
1837 		} else
1838 			returnmask = p->p_sigmask;
1839 
1840 		SIGSETOR(p->p_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
1841 		if (!SIGISMEMBER(ps->ps_signodefer, sig))
1842 			SIGADDSET(p->p_sigmask, sig);
1843 
1844 		if (SIGISMEMBER(ps->ps_sigreset, sig)) {
1845 			/*
1846 			 * See kern_sigaction() for origin of this code.
1847 			 */
1848 			SIGDELSET(p->p_sigcatch, sig);
1849 			if (sig != SIGCONT &&
1850 			    sigprop(sig) & SA_IGNORE)
1851 				SIGADDSET(p->p_sigignore, sig);
1852 			ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
1853 		}
1854 		p->p_stats->p_ru.ru_nsignals++;
1855 		if (p->p_sig != sig) {
1856 			code = 0;
1857 		} else {
1858 			code = p->p_code;
1859 			p->p_code = 0;
1860 			p->p_sig = 0;
1861 		}
1862 		if (p->p_flag & P_KSES)
1863 			if (signal_upcall(p, sig))
1864 				return;
1865 		(*p->p_sysent->sv_sendsig)(action, sig, &returnmask, code);
1866 	}
1867 }
1868 
1869 /*
1870  * Kill the current process for stated reason.
1871  */
1872 void
1873 killproc(p, why)
1874 	struct proc *p;
1875 	char *why;
1876 {
1877 
1878 	PROC_LOCK_ASSERT(p, MA_OWNED);
1879 	CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)",
1880 		p, p->p_pid, p->p_comm);
1881 	log(LOG_ERR, "pid %d (%s), uid %d, was killed: %s\n", p->p_pid, p->p_comm,
1882 		p->p_ucred ? p->p_ucred->cr_uid : -1, why);
1883 	psignal(p, SIGKILL);
1884 }
1885 
1886 /*
1887  * Force the current process to exit with the specified signal, dumping core
1888  * if appropriate.  We bypass the normal tests for masked and caught signals,
1889  * allowing unrecoverable failures to terminate the process without changing
1890  * signal state.  Mark the accounting record with the signal termination.
1891  * If dumping core, save the signal number for the debugger.  Calls exit and
1892  * does not return.
1893  */
1894 void
1895 sigexit(td, sig)
1896 	struct thread *td;
1897 	int sig;
1898 {
1899 	struct proc *p = td->td_proc;
1900 
1901 	PROC_LOCK_ASSERT(p, MA_OWNED);
1902 	p->p_acflag |= AXSIG;
1903 	if (sigprop(sig) & SA_CORE) {
1904 		p->p_sig = sig;
1905 		/*
1906 		 * Log signals which would cause core dumps
1907 		 * (Log as LOG_INFO to appease those who don't want
1908 		 * these messages.)
1909 		 * XXX : Todo, as well as euid, write out ruid too
1910 		 */
1911 		PROC_UNLOCK(p);
1912 		if (!mtx_owned(&Giant))
1913 			mtx_lock(&Giant);
1914 		if (coredump(td) == 0)
1915 			sig |= WCOREFLAG;
1916 		if (kern_logsigexit)
1917 			log(LOG_INFO,
1918 			    "pid %d (%s), uid %d: exited on signal %d%s\n",
1919 			    p->p_pid, p->p_comm,
1920 			    td->td_ucred ? td->td_ucred->cr_uid : -1,
1921 			    sig &~ WCOREFLAG,
1922 			    sig & WCOREFLAG ? " (core dumped)" : "");
1923 	} else {
1924 		PROC_UNLOCK(p);
1925 		if (!mtx_owned(&Giant))
1926 			mtx_lock(&Giant);
1927 	}
1928 	exit1(td, W_EXITCODE(0, sig));
1929 	/* NOTREACHED */
1930 }
1931 
1932 static char corefilename[MAXPATHLEN+1] = {"%N.core"};
1933 SYSCTL_STRING(_kern, OID_AUTO, corefile, CTLFLAG_RW, corefilename,
1934 	      sizeof(corefilename), "process corefile name format string");
1935 
1936 /*
1937  * expand_name(name, uid, pid)
1938  * Expand the name described in corefilename, using name, uid, and pid.
1939  * corefilename is a printf-like string, with three format specifiers:
1940  *	%N	name of process ("name")
1941  *	%P	process id (pid)
1942  *	%U	user id (uid)
1943  * For example, "%N.core" is the default; they can be disabled completely
1944  * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
1945  * This is controlled by the sysctl variable kern.corefile (see above).
1946  */
1947 
1948 static char *
1949 expand_name(name, uid, pid)
1950 	const char *name;
1951 	uid_t uid;
1952 	pid_t pid;
1953 {
1954 	const char *format, *appendstr;
1955 	char *temp;
1956 	char buf[11];		/* Buffer for pid/uid -- max 4B */
1957 	size_t i, l, n;
1958 
1959 	format = corefilename;
1960 	temp = malloc(MAXPATHLEN, M_TEMP, M_NOWAIT | M_ZERO);
1961 	if (temp == NULL)
1962 		return (NULL);
1963 	for (i = 0, n = 0; n < MAXPATHLEN && format[i]; i++) {
1964 		switch (format[i]) {
1965 		case '%':	/* Format character */
1966 			i++;
1967 			switch (format[i]) {
1968 			case '%':
1969 				appendstr = "%";
1970 				break;
1971 			case 'N':	/* process name */
1972 				appendstr = name;
1973 				break;
1974 			case 'P':	/* process id */
1975 				sprintf(buf, "%u", pid);
1976 				appendstr = buf;
1977 				break;
1978 			case 'U':	/* user id */
1979 				sprintf(buf, "%u", uid);
1980 				appendstr = buf;
1981 				break;
1982 			default:
1983 				appendstr = "";
1984 			  	log(LOG_ERR,
1985 				    "Unknown format character %c in `%s'\n",
1986 				    format[i], format);
1987 			}
1988 			l = strlen(appendstr);
1989 			if ((n + l) >= MAXPATHLEN)
1990 				goto toolong;
1991 			memcpy(temp + n, appendstr, l);
1992 			n += l;
1993 			break;
1994 		default:
1995 			temp[n++] = format[i];
1996 		}
1997 	}
1998 	if (format[i] != '\0')
1999 		goto toolong;
2000 	return (temp);
2001 toolong:
2002 	log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too long\n",
2003 	    (long)pid, name, (u_long)uid);
2004 	free(temp, M_TEMP);
2005 	return (NULL);
2006 }
2007 
2008 /*
2009  * Dump a process' core.  The main routine does some
2010  * policy checking, and creates the name of the coredump;
2011  * then it passes on a vnode and a size limit to the process-specific
2012  * coredump routine if there is one; if there _is not_ one, it returns
2013  * ENOSYS; otherwise it returns the error from the process-specific routine.
2014  */
2015 
2016 static int
2017 coredump(struct thread *td)
2018 {
2019 	struct proc *p = td->td_proc;
2020 	register struct vnode *vp;
2021 	register struct ucred *cred = td->td_ucred;
2022 	struct flock lf;
2023 	struct nameidata nd;
2024 	struct vattr vattr;
2025 	int error, error1, flags;
2026 	struct mount *mp;
2027 	char *name;			/* name of corefile */
2028 	off_t limit;
2029 
2030 	PROC_LOCK(p);
2031 	_STOPEVENT(p, S_CORE, 0);
2032 
2033 	if (((sugid_coredump == 0) && p->p_flag & P_SUGID) || do_coredump == 0) {
2034 		PROC_UNLOCK(p);
2035 		return (EFAULT);
2036 	}
2037 
2038 	/*
2039 	 * Note that the bulk of limit checking is done after
2040 	 * the corefile is created.  The exception is if the limit
2041 	 * for corefiles is 0, in which case we don't bother
2042 	 * creating the corefile at all.  This layout means that
2043 	 * a corefile is truncated instead of not being created,
2044 	 * if it is larger than the limit.
2045 	 */
2046 	limit = p->p_rlimit[RLIMIT_CORE].rlim_cur;
2047 	if (limit == 0) {
2048 		PROC_UNLOCK(p);
2049 		return 0;
2050 	}
2051 	PROC_UNLOCK(p);
2052 
2053 restart:
2054 	name = expand_name(p->p_comm, td->td_ucred->cr_uid, p->p_pid);
2055 	if (name == NULL)
2056 		return (EINVAL);
2057 	NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td); /* XXXKSE */
2058 	flags = O_CREAT | FWRITE | O_NOFOLLOW;
2059 	error = vn_open(&nd, &flags, S_IRUSR | S_IWUSR);
2060 	free(name, M_TEMP);
2061 	if (error)
2062 		return (error);
2063 	NDFREE(&nd, NDF_ONLY_PNBUF);
2064 	vp = nd.ni_vp;
2065 
2066 	/* Don't dump to non-regular files or files with links. */
2067 	if (vp->v_type != VREG ||
2068 	    VOP_GETATTR(vp, &vattr, cred, td) || vattr.va_nlink != 1) {
2069 		VOP_UNLOCK(vp, 0, td);
2070 		error = EFAULT;
2071 		goto out2;
2072 	}
2073 
2074 	VOP_UNLOCK(vp, 0, td);
2075 	lf.l_whence = SEEK_SET;
2076 	lf.l_start = 0;
2077 	lf.l_len = 0;
2078 	lf.l_type = F_WRLCK;
2079 	error = VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK);
2080 	if (error)
2081 		goto out2;
2082 
2083 	if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
2084 		lf.l_type = F_UNLCK;
2085 		VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
2086 		if ((error = vn_close(vp, FWRITE, cred, td)) != 0)
2087 			return (error);
2088 		if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
2089 			return (error);
2090 		goto restart;
2091 	}
2092 
2093 	VATTR_NULL(&vattr);
2094 	vattr.va_size = 0;
2095 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2096 	VOP_LEASE(vp, td, cred, LEASE_WRITE);
2097 	VOP_SETATTR(vp, &vattr, cred, td);
2098 	VOP_UNLOCK(vp, 0, td);
2099 	PROC_LOCK(p);
2100 	p->p_acflag |= ACORE;
2101 	PROC_UNLOCK(p);
2102 
2103 	error = p->p_sysent->sv_coredump ?
2104 	  p->p_sysent->sv_coredump(td, vp, limit) :
2105 	  ENOSYS;
2106 
2107 	lf.l_type = F_UNLCK;
2108 	VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
2109 	vn_finished_write(mp);
2110 out2:
2111 	error1 = vn_close(vp, FWRITE, cred, td);
2112 	if (error == 0)
2113 		error = error1;
2114 	return (error);
2115 }
2116 
2117 /*
2118  * Nonexistent system call-- signal process (may want to handle it).
2119  * Flag error in case process won't see signal immediately (blocked or ignored).
2120  */
2121 #ifndef _SYS_SYSPROTO_H_
2122 struct nosys_args {
2123 	int	dummy;
2124 };
2125 #endif
2126 /*
2127  * MPSAFE
2128  */
2129 /* ARGSUSED */
2130 int
2131 nosys(td, args)
2132 	struct thread *td;
2133 	struct nosys_args *args;
2134 {
2135 	struct proc *p = td->td_proc;
2136 
2137 	mtx_lock(&Giant);
2138 	PROC_LOCK(p);
2139 	psignal(p, SIGSYS);
2140 	PROC_UNLOCK(p);
2141 	mtx_unlock(&Giant);
2142 	return (ENOSYS);
2143 }
2144 
2145 /*
2146  * Send a SIGIO or SIGURG signal to a process or process group using
2147  * stored credentials rather than those of the current process.
2148  */
2149 void
2150 pgsigio(sigiop, sig, checkctty)
2151 	struct sigio **sigiop;
2152 	int sig, checkctty;
2153 {
2154 	struct sigio *sigio;
2155 
2156 	SIGIO_LOCK();
2157 	sigio = *sigiop;
2158 	if (sigio == NULL) {
2159 		SIGIO_UNLOCK();
2160 		return;
2161 	}
2162 	if (sigio->sio_pgid > 0) {
2163 		PROC_LOCK(sigio->sio_proc);
2164 		if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred))
2165 			psignal(sigio->sio_proc, sig);
2166 		PROC_UNLOCK(sigio->sio_proc);
2167 	} else if (sigio->sio_pgid < 0) {
2168 		struct proc *p;
2169 
2170 		PGRP_LOCK(sigio->sio_pgrp);
2171 		LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) {
2172 			PROC_LOCK(p);
2173 			if (CANSIGIO(sigio->sio_ucred, p->p_ucred) &&
2174 			    (checkctty == 0 || (p->p_flag & P_CONTROLT)))
2175 				psignal(p, sig);
2176 			PROC_UNLOCK(p);
2177 		}
2178 		PGRP_UNLOCK(sigio->sio_pgrp);
2179 	}
2180 	SIGIO_UNLOCK();
2181 }
2182 
2183 static int
2184 filt_sigattach(struct knote *kn)
2185 {
2186 	struct proc *p = curproc;
2187 
2188 	kn->kn_ptr.p_proc = p;
2189 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
2190 
2191 	PROC_LOCK(p);
2192 	SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
2193 	PROC_UNLOCK(p);
2194 
2195 	return (0);
2196 }
2197 
2198 static void
2199 filt_sigdetach(struct knote *kn)
2200 {
2201 	struct proc *p = kn->kn_ptr.p_proc;
2202 
2203 	PROC_LOCK(p);
2204 	SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
2205 	PROC_UNLOCK(p);
2206 }
2207 
2208 /*
2209  * signal knotes are shared with proc knotes, so we apply a mask to
2210  * the hint in order to differentiate them from process hints.  This
2211  * could be avoided by using a signal-specific knote list, but probably
2212  * isn't worth the trouble.
2213  */
2214 static int
2215 filt_signal(struct knote *kn, long hint)
2216 {
2217 
2218 	if (hint & NOTE_SIGNAL) {
2219 		hint &= ~NOTE_SIGNAL;
2220 
2221 		if (kn->kn_id == hint)
2222 			kn->kn_data++;
2223 	}
2224 	return (kn->kn_data != 0);
2225 }
2226