xref: /freebsd/sys/kern/kern_sig.c (revision ac099daf6742ead81ea7ea86351a8ef4e783041b)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1989, 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  * (c) UNIX System Laboratories, Inc.
7  * All or some portions of this file are derived from material licensed
8  * to the University of California by American Telephone and Telegraph
9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10  * the permission of UNIX System Laboratories, Inc.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)kern_sig.c	8.7 (Berkeley) 4/18/94
37  */
38 
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41 
42 #include "opt_ktrace.h"
43 
44 #include <sys/param.h>
45 #include <sys/ctype.h>
46 #include <sys/systm.h>
47 #include <sys/signalvar.h>
48 #include <sys/vnode.h>
49 #include <sys/acct.h>
50 #include <sys/capsicum.h>
51 #include <sys/compressor.h>
52 #include <sys/condvar.h>
53 #include <sys/devctl.h>
54 #include <sys/event.h>
55 #include <sys/fcntl.h>
56 #include <sys/imgact.h>
57 #include <sys/kernel.h>
58 #include <sys/ktr.h>
59 #include <sys/ktrace.h>
60 #include <sys/limits.h>
61 #include <sys/lock.h>
62 #include <sys/malloc.h>
63 #include <sys/mutex.h>
64 #include <sys/refcount.h>
65 #include <sys/namei.h>
66 #include <sys/proc.h>
67 #include <sys/procdesc.h>
68 #include <sys/ptrace.h>
69 #include <sys/posix4.h>
70 #include <sys/racct.h>
71 #include <sys/resourcevar.h>
72 #include <sys/sdt.h>
73 #include <sys/sbuf.h>
74 #include <sys/sleepqueue.h>
75 #include <sys/smp.h>
76 #include <sys/stat.h>
77 #include <sys/sx.h>
78 #include <sys/syscallsubr.h>
79 #include <sys/sysctl.h>
80 #include <sys/sysent.h>
81 #include <sys/syslog.h>
82 #include <sys/sysproto.h>
83 #include <sys/timers.h>
84 #include <sys/unistd.h>
85 #include <sys/wait.h>
86 #include <vm/vm.h>
87 #include <vm/vm_extern.h>
88 #include <vm/uma.h>
89 
90 #include <sys/jail.h>
91 
92 #include <machine/cpu.h>
93 
94 #include <security/audit/audit.h>
95 
96 #define	ONSIG	32		/* NSIG for osig* syscalls.  XXX. */
97 
98 SDT_PROVIDER_DECLARE(proc);
99 SDT_PROBE_DEFINE3(proc, , , signal__send,
100     "struct thread *", "struct proc *", "int");
101 SDT_PROBE_DEFINE2(proc, , , signal__clear,
102     "int", "ksiginfo_t *");
103 SDT_PROBE_DEFINE3(proc, , , signal__discard,
104     "struct thread *", "struct proc *", "int");
105 
106 static int	coredump(struct thread *);
107 static int	killpg1(struct thread *td, int sig, int pgid, int all,
108 		    ksiginfo_t *ksi);
109 static int	issignal(struct thread *td);
110 static void	reschedule_signals(struct proc *p, sigset_t block, int flags);
111 static int	sigprop(int sig);
112 static void	tdsigwakeup(struct thread *, int, sig_t, int);
113 static int	sig_suspend_threads(struct thread *, struct proc *, int);
114 static int	filt_sigattach(struct knote *kn);
115 static void	filt_sigdetach(struct knote *kn);
116 static int	filt_signal(struct knote *kn, long hint);
117 static struct thread *sigtd(struct proc *p, int sig, bool fast_sigblock);
118 static void	sigqueue_start(void);
119 
120 static uma_zone_t	ksiginfo_zone = NULL;
121 struct filterops sig_filtops = {
122 	.f_isfd = 0,
123 	.f_attach = filt_sigattach,
124 	.f_detach = filt_sigdetach,
125 	.f_event = filt_signal,
126 };
127 
128 static int	kern_logsigexit = 1;
129 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,
130     &kern_logsigexit, 0,
131     "Log processes quitting on abnormal signals to syslog(3)");
132 
133 static int	kern_forcesigexit = 1;
134 SYSCTL_INT(_kern, OID_AUTO, forcesigexit, CTLFLAG_RW,
135     &kern_forcesigexit, 0, "Force trap signal to be handled");
136 
137 static SYSCTL_NODE(_kern, OID_AUTO, sigqueue, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
138     "POSIX real time signal");
139 
140 static int	max_pending_per_proc = 128;
141 SYSCTL_INT(_kern_sigqueue, OID_AUTO, max_pending_per_proc, CTLFLAG_RW,
142     &max_pending_per_proc, 0, "Max pending signals per proc");
143 
144 static int	preallocate_siginfo = 1024;
145 SYSCTL_INT(_kern_sigqueue, OID_AUTO, preallocate, CTLFLAG_RDTUN,
146     &preallocate_siginfo, 0, "Preallocated signal memory size");
147 
148 static int	signal_overflow = 0;
149 SYSCTL_INT(_kern_sigqueue, OID_AUTO, overflow, CTLFLAG_RD,
150     &signal_overflow, 0, "Number of signals overflew");
151 
152 static int	signal_alloc_fail = 0;
153 SYSCTL_INT(_kern_sigqueue, OID_AUTO, alloc_fail, CTLFLAG_RD,
154     &signal_alloc_fail, 0, "signals failed to be allocated");
155 
156 static int	kern_lognosys = 0;
157 SYSCTL_INT(_kern, OID_AUTO, lognosys, CTLFLAG_RWTUN, &kern_lognosys, 0,
158     "Log invalid syscalls");
159 
160 __read_frequently bool sigfastblock_fetch_always = false;
161 SYSCTL_BOOL(_kern, OID_AUTO, sigfastblock_fetch_always, CTLFLAG_RWTUN,
162     &sigfastblock_fetch_always, 0,
163     "Fetch sigfastblock word on each syscall entry for proper "
164     "blocking semantic");
165 
166 SYSINIT(signal, SI_SUB_P1003_1B, SI_ORDER_FIRST+3, sigqueue_start, NULL);
167 
168 /*
169  * Policy -- Can ucred cr1 send SIGIO to process cr2?
170  * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG
171  * in the right situations.
172  */
173 #define CANSIGIO(cr1, cr2) \
174 	((cr1)->cr_uid == 0 || \
175 	    (cr1)->cr_ruid == (cr2)->cr_ruid || \
176 	    (cr1)->cr_uid == (cr2)->cr_ruid || \
177 	    (cr1)->cr_ruid == (cr2)->cr_uid || \
178 	    (cr1)->cr_uid == (cr2)->cr_uid)
179 
180 static int	sugid_coredump;
181 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RWTUN,
182     &sugid_coredump, 0, "Allow setuid and setgid processes to dump core");
183 
184 static int	capmode_coredump;
185 SYSCTL_INT(_kern, OID_AUTO, capmode_coredump, CTLFLAG_RWTUN,
186     &capmode_coredump, 0, "Allow processes in capability mode to dump core");
187 
188 static int	do_coredump = 1;
189 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,
190 	&do_coredump, 0, "Enable/Disable coredumps");
191 
192 static int	set_core_nodump_flag = 0;
193 SYSCTL_INT(_kern, OID_AUTO, nodump_coredump, CTLFLAG_RW, &set_core_nodump_flag,
194 	0, "Enable setting the NODUMP flag on coredump files");
195 
196 static int	coredump_devctl = 0;
197 SYSCTL_INT(_kern, OID_AUTO, coredump_devctl, CTLFLAG_RW, &coredump_devctl,
198 	0, "Generate a devctl notification when processes coredump");
199 
200 /*
201  * Signal properties and actions.
202  * The array below categorizes the signals and their default actions
203  * according to the following properties:
204  */
205 #define	SIGPROP_KILL		0x01	/* terminates process by default */
206 #define	SIGPROP_CORE		0x02	/* ditto and coredumps */
207 #define	SIGPROP_STOP		0x04	/* suspend process */
208 #define	SIGPROP_TTYSTOP		0x08	/* ditto, from tty */
209 #define	SIGPROP_IGNORE		0x10	/* ignore by default */
210 #define	SIGPROP_CONT		0x20	/* continue if suspended */
211 #define	SIGPROP_CANTMASK	0x40	/* non-maskable, catchable */
212 
213 static int sigproptbl[NSIG] = {
214 	[SIGHUP] =	SIGPROP_KILL,
215 	[SIGINT] =	SIGPROP_KILL,
216 	[SIGQUIT] =	SIGPROP_KILL | SIGPROP_CORE,
217 	[SIGILL] =	SIGPROP_KILL | SIGPROP_CORE,
218 	[SIGTRAP] =	SIGPROP_KILL | SIGPROP_CORE,
219 	[SIGABRT] =	SIGPROP_KILL | SIGPROP_CORE,
220 	[SIGEMT] =	SIGPROP_KILL | SIGPROP_CORE,
221 	[SIGFPE] =	SIGPROP_KILL | SIGPROP_CORE,
222 	[SIGKILL] =	SIGPROP_KILL,
223 	[SIGBUS] =	SIGPROP_KILL | SIGPROP_CORE,
224 	[SIGSEGV] =	SIGPROP_KILL | SIGPROP_CORE,
225 	[SIGSYS] =	SIGPROP_KILL | SIGPROP_CORE,
226 	[SIGPIPE] =	SIGPROP_KILL,
227 	[SIGALRM] =	SIGPROP_KILL,
228 	[SIGTERM] =	SIGPROP_KILL,
229 	[SIGURG] =	SIGPROP_IGNORE,
230 	[SIGSTOP] =	SIGPROP_STOP,
231 	[SIGTSTP] =	SIGPROP_STOP | SIGPROP_TTYSTOP,
232 	[SIGCONT] =	SIGPROP_IGNORE | SIGPROP_CONT,
233 	[SIGCHLD] =	SIGPROP_IGNORE,
234 	[SIGTTIN] =	SIGPROP_STOP | SIGPROP_TTYSTOP,
235 	[SIGTTOU] =	SIGPROP_STOP | SIGPROP_TTYSTOP,
236 	[SIGIO] =	SIGPROP_IGNORE,
237 	[SIGXCPU] =	SIGPROP_KILL,
238 	[SIGXFSZ] =	SIGPROP_KILL,
239 	[SIGVTALRM] =	SIGPROP_KILL,
240 	[SIGPROF] =	SIGPROP_KILL,
241 	[SIGWINCH] =	SIGPROP_IGNORE,
242 	[SIGINFO] =	SIGPROP_IGNORE,
243 	[SIGUSR1] =	SIGPROP_KILL,
244 	[SIGUSR2] =	SIGPROP_KILL,
245 };
246 
247 sigset_t fastblock_mask;
248 
249 static void
250 sigqueue_start(void)
251 {
252 	ksiginfo_zone = uma_zcreate("ksiginfo", sizeof(ksiginfo_t),
253 		NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
254 	uma_prealloc(ksiginfo_zone, preallocate_siginfo);
255 	p31b_setcfg(CTL_P1003_1B_REALTIME_SIGNALS, _POSIX_REALTIME_SIGNALS);
256 	p31b_setcfg(CTL_P1003_1B_RTSIG_MAX, SIGRTMAX - SIGRTMIN + 1);
257 	p31b_setcfg(CTL_P1003_1B_SIGQUEUE_MAX, max_pending_per_proc);
258 	SIGFILLSET(fastblock_mask);
259 	SIG_CANTMASK(fastblock_mask);
260 }
261 
262 ksiginfo_t *
263 ksiginfo_alloc(int wait)
264 {
265 	int flags;
266 
267 	flags = M_ZERO;
268 	if (! wait)
269 		flags |= M_NOWAIT;
270 	if (ksiginfo_zone != NULL)
271 		return ((ksiginfo_t *)uma_zalloc(ksiginfo_zone, flags));
272 	return (NULL);
273 }
274 
275 void
276 ksiginfo_free(ksiginfo_t *ksi)
277 {
278 	uma_zfree(ksiginfo_zone, ksi);
279 }
280 
281 static __inline int
282 ksiginfo_tryfree(ksiginfo_t *ksi)
283 {
284 	if (!(ksi->ksi_flags & KSI_EXT)) {
285 		uma_zfree(ksiginfo_zone, ksi);
286 		return (1);
287 	}
288 	return (0);
289 }
290 
291 void
292 sigqueue_init(sigqueue_t *list, struct proc *p)
293 {
294 	SIGEMPTYSET(list->sq_signals);
295 	SIGEMPTYSET(list->sq_kill);
296 	SIGEMPTYSET(list->sq_ptrace);
297 	TAILQ_INIT(&list->sq_list);
298 	list->sq_proc = p;
299 	list->sq_flags = SQ_INIT;
300 }
301 
302 /*
303  * Get a signal's ksiginfo.
304  * Return:
305  *	0	-	signal not found
306  *	others	-	signal number
307  */
308 static int
309 sigqueue_get(sigqueue_t *sq, int signo, ksiginfo_t *si)
310 {
311 	struct proc *p = sq->sq_proc;
312 	struct ksiginfo *ksi, *next;
313 	int count = 0;
314 
315 	KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
316 
317 	if (!SIGISMEMBER(sq->sq_signals, signo))
318 		return (0);
319 
320 	if (SIGISMEMBER(sq->sq_ptrace, signo)) {
321 		count++;
322 		SIGDELSET(sq->sq_ptrace, signo);
323 		si->ksi_flags |= KSI_PTRACE;
324 	}
325 	if (SIGISMEMBER(sq->sq_kill, signo)) {
326 		count++;
327 		if (count == 1)
328 			SIGDELSET(sq->sq_kill, signo);
329 	}
330 
331 	TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
332 		if (ksi->ksi_signo == signo) {
333 			if (count == 0) {
334 				TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
335 				ksi->ksi_sigq = NULL;
336 				ksiginfo_copy(ksi, si);
337 				if (ksiginfo_tryfree(ksi) && p != NULL)
338 					p->p_pendingcnt--;
339 			}
340 			if (++count > 1)
341 				break;
342 		}
343 	}
344 
345 	if (count <= 1)
346 		SIGDELSET(sq->sq_signals, signo);
347 	si->ksi_signo = signo;
348 	return (signo);
349 }
350 
351 void
352 sigqueue_take(ksiginfo_t *ksi)
353 {
354 	struct ksiginfo *kp;
355 	struct proc	*p;
356 	sigqueue_t	*sq;
357 
358 	if (ksi == NULL || (sq = ksi->ksi_sigq) == NULL)
359 		return;
360 
361 	p = sq->sq_proc;
362 	TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
363 	ksi->ksi_sigq = NULL;
364 	if (!(ksi->ksi_flags & KSI_EXT) && p != NULL)
365 		p->p_pendingcnt--;
366 
367 	for (kp = TAILQ_FIRST(&sq->sq_list); kp != NULL;
368 	     kp = TAILQ_NEXT(kp, ksi_link)) {
369 		if (kp->ksi_signo == ksi->ksi_signo)
370 			break;
371 	}
372 	if (kp == NULL && !SIGISMEMBER(sq->sq_kill, ksi->ksi_signo) &&
373 	    !SIGISMEMBER(sq->sq_ptrace, ksi->ksi_signo))
374 		SIGDELSET(sq->sq_signals, ksi->ksi_signo);
375 }
376 
377 static int
378 sigqueue_add(sigqueue_t *sq, int signo, ksiginfo_t *si)
379 {
380 	struct proc *p = sq->sq_proc;
381 	struct ksiginfo *ksi;
382 	int ret = 0;
383 
384 	KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
385 
386 	/*
387 	 * SIGKILL/SIGSTOP cannot be caught or masked, so take the fast path
388 	 * for these signals.
389 	 */
390 	if (signo == SIGKILL || signo == SIGSTOP || si == NULL) {
391 		SIGADDSET(sq->sq_kill, signo);
392 		goto out_set_bit;
393 	}
394 
395 	/* directly insert the ksi, don't copy it */
396 	if (si->ksi_flags & KSI_INS) {
397 		if (si->ksi_flags & KSI_HEAD)
398 			TAILQ_INSERT_HEAD(&sq->sq_list, si, ksi_link);
399 		else
400 			TAILQ_INSERT_TAIL(&sq->sq_list, si, ksi_link);
401 		si->ksi_sigq = sq;
402 		goto out_set_bit;
403 	}
404 
405 	if (__predict_false(ksiginfo_zone == NULL)) {
406 		SIGADDSET(sq->sq_kill, signo);
407 		goto out_set_bit;
408 	}
409 
410 	if (p != NULL && p->p_pendingcnt >= max_pending_per_proc) {
411 		signal_overflow++;
412 		ret = EAGAIN;
413 	} else if ((ksi = ksiginfo_alloc(0)) == NULL) {
414 		signal_alloc_fail++;
415 		ret = EAGAIN;
416 	} else {
417 		if (p != NULL)
418 			p->p_pendingcnt++;
419 		ksiginfo_copy(si, ksi);
420 		ksi->ksi_signo = signo;
421 		if (si->ksi_flags & KSI_HEAD)
422 			TAILQ_INSERT_HEAD(&sq->sq_list, ksi, ksi_link);
423 		else
424 			TAILQ_INSERT_TAIL(&sq->sq_list, ksi, ksi_link);
425 		ksi->ksi_sigq = sq;
426 	}
427 
428 	if (ret != 0) {
429 		if ((si->ksi_flags & KSI_PTRACE) != 0) {
430 			SIGADDSET(sq->sq_ptrace, signo);
431 			ret = 0;
432 			goto out_set_bit;
433 		} else if ((si->ksi_flags & KSI_TRAP) != 0 ||
434 		    (si->ksi_flags & KSI_SIGQ) == 0) {
435 			SIGADDSET(sq->sq_kill, signo);
436 			ret = 0;
437 			goto out_set_bit;
438 		}
439 		return (ret);
440 	}
441 
442 out_set_bit:
443 	SIGADDSET(sq->sq_signals, signo);
444 	return (ret);
445 }
446 
447 void
448 sigqueue_flush(sigqueue_t *sq)
449 {
450 	struct proc *p = sq->sq_proc;
451 	ksiginfo_t *ksi;
452 
453 	KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
454 
455 	if (p != NULL)
456 		PROC_LOCK_ASSERT(p, MA_OWNED);
457 
458 	while ((ksi = TAILQ_FIRST(&sq->sq_list)) != NULL) {
459 		TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
460 		ksi->ksi_sigq = NULL;
461 		if (ksiginfo_tryfree(ksi) && p != NULL)
462 			p->p_pendingcnt--;
463 	}
464 
465 	SIGEMPTYSET(sq->sq_signals);
466 	SIGEMPTYSET(sq->sq_kill);
467 	SIGEMPTYSET(sq->sq_ptrace);
468 }
469 
470 static void
471 sigqueue_move_set(sigqueue_t *src, sigqueue_t *dst, const sigset_t *set)
472 {
473 	sigset_t tmp;
474 	struct proc *p1, *p2;
475 	ksiginfo_t *ksi, *next;
476 
477 	KASSERT(src->sq_flags & SQ_INIT, ("src sigqueue not inited"));
478 	KASSERT(dst->sq_flags & SQ_INIT, ("dst sigqueue not inited"));
479 	p1 = src->sq_proc;
480 	p2 = dst->sq_proc;
481 	/* Move siginfo to target list */
482 	TAILQ_FOREACH_SAFE(ksi, &src->sq_list, ksi_link, next) {
483 		if (SIGISMEMBER(*set, ksi->ksi_signo)) {
484 			TAILQ_REMOVE(&src->sq_list, ksi, ksi_link);
485 			if (p1 != NULL)
486 				p1->p_pendingcnt--;
487 			TAILQ_INSERT_TAIL(&dst->sq_list, ksi, ksi_link);
488 			ksi->ksi_sigq = dst;
489 			if (p2 != NULL)
490 				p2->p_pendingcnt++;
491 		}
492 	}
493 
494 	/* Move pending bits to target list */
495 	tmp = src->sq_kill;
496 	SIGSETAND(tmp, *set);
497 	SIGSETOR(dst->sq_kill, tmp);
498 	SIGSETNAND(src->sq_kill, tmp);
499 
500 	tmp = src->sq_ptrace;
501 	SIGSETAND(tmp, *set);
502 	SIGSETOR(dst->sq_ptrace, tmp);
503 	SIGSETNAND(src->sq_ptrace, tmp);
504 
505 	tmp = src->sq_signals;
506 	SIGSETAND(tmp, *set);
507 	SIGSETOR(dst->sq_signals, tmp);
508 	SIGSETNAND(src->sq_signals, tmp);
509 }
510 
511 #if 0
512 static void
513 sigqueue_move(sigqueue_t *src, sigqueue_t *dst, int signo)
514 {
515 	sigset_t set;
516 
517 	SIGEMPTYSET(set);
518 	SIGADDSET(set, signo);
519 	sigqueue_move_set(src, dst, &set);
520 }
521 #endif
522 
523 static void
524 sigqueue_delete_set(sigqueue_t *sq, const sigset_t *set)
525 {
526 	struct proc *p = sq->sq_proc;
527 	ksiginfo_t *ksi, *next;
528 
529 	KASSERT(sq->sq_flags & SQ_INIT, ("src sigqueue not inited"));
530 
531 	/* Remove siginfo queue */
532 	TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
533 		if (SIGISMEMBER(*set, ksi->ksi_signo)) {
534 			TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
535 			ksi->ksi_sigq = NULL;
536 			if (ksiginfo_tryfree(ksi) && p != NULL)
537 				p->p_pendingcnt--;
538 		}
539 	}
540 	SIGSETNAND(sq->sq_kill, *set);
541 	SIGSETNAND(sq->sq_ptrace, *set);
542 	SIGSETNAND(sq->sq_signals, *set);
543 }
544 
545 void
546 sigqueue_delete(sigqueue_t *sq, int signo)
547 {
548 	sigset_t set;
549 
550 	SIGEMPTYSET(set);
551 	SIGADDSET(set, signo);
552 	sigqueue_delete_set(sq, &set);
553 }
554 
555 /* Remove a set of signals for a process */
556 static void
557 sigqueue_delete_set_proc(struct proc *p, const sigset_t *set)
558 {
559 	sigqueue_t worklist;
560 	struct thread *td0;
561 
562 	PROC_LOCK_ASSERT(p, MA_OWNED);
563 
564 	sigqueue_init(&worklist, NULL);
565 	sigqueue_move_set(&p->p_sigqueue, &worklist, set);
566 
567 	FOREACH_THREAD_IN_PROC(p, td0)
568 		sigqueue_move_set(&td0->td_sigqueue, &worklist, set);
569 
570 	sigqueue_flush(&worklist);
571 }
572 
573 void
574 sigqueue_delete_proc(struct proc *p, int signo)
575 {
576 	sigset_t set;
577 
578 	SIGEMPTYSET(set);
579 	SIGADDSET(set, signo);
580 	sigqueue_delete_set_proc(p, &set);
581 }
582 
583 static void
584 sigqueue_delete_stopmask_proc(struct proc *p)
585 {
586 	sigset_t set;
587 
588 	SIGEMPTYSET(set);
589 	SIGADDSET(set, SIGSTOP);
590 	SIGADDSET(set, SIGTSTP);
591 	SIGADDSET(set, SIGTTIN);
592 	SIGADDSET(set, SIGTTOU);
593 	sigqueue_delete_set_proc(p, &set);
594 }
595 
596 /*
597  * Determine signal that should be delivered to thread td, the current
598  * thread, 0 if none.  If there is a pending stop signal with default
599  * action, the process stops in issignal().
600  */
601 int
602 cursig(struct thread *td)
603 {
604 	PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
605 	mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED);
606 	THREAD_LOCK_ASSERT(td, MA_NOTOWNED);
607 	return (SIGPENDING(td) ? issignal(td) : 0);
608 }
609 
610 /*
611  * Arrange for ast() to handle unmasked pending signals on return to user
612  * mode.  This must be called whenever a signal is added to td_sigqueue or
613  * unmasked in td_sigmask.
614  */
615 void
616 signotify(struct thread *td)
617 {
618 
619 	PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
620 
621 	if (SIGPENDING(td)) {
622 		thread_lock(td);
623 		td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING;
624 		thread_unlock(td);
625 	}
626 }
627 
628 /*
629  * Returns 1 (true) if altstack is configured for the thread, and the
630  * passed stack bottom address falls into the altstack range.  Handles
631  * the 43 compat special case where the alt stack size is zero.
632  */
633 int
634 sigonstack(size_t sp)
635 {
636 	struct thread *td;
637 
638 	td = curthread;
639 	if ((td->td_pflags & TDP_ALTSTACK) == 0)
640 		return (0);
641 #if defined(COMPAT_43)
642 	if (SV_PROC_FLAG(td->td_proc, SV_AOUT) && td->td_sigstk.ss_size == 0)
643 		return ((td->td_sigstk.ss_flags & SS_ONSTACK) != 0);
644 #endif
645 	return (sp >= (size_t)td->td_sigstk.ss_sp &&
646 	    sp < td->td_sigstk.ss_size + (size_t)td->td_sigstk.ss_sp);
647 }
648 
649 static __inline int
650 sigprop(int sig)
651 {
652 
653 	if (sig > 0 && sig < nitems(sigproptbl))
654 		return (sigproptbl[sig]);
655 	return (0);
656 }
657 
658 int
659 sig_ffs(sigset_t *set)
660 {
661 	int i;
662 
663 	for (i = 0; i < _SIG_WORDS; i++)
664 		if (set->__bits[i])
665 			return (ffs(set->__bits[i]) + (i * 32));
666 	return (0);
667 }
668 
669 static bool
670 sigact_flag_test(const struct sigaction *act, int flag)
671 {
672 
673 	/*
674 	 * SA_SIGINFO is reset when signal disposition is set to
675 	 * ignore or default.  Other flags are kept according to user
676 	 * settings.
677 	 */
678 	return ((act->sa_flags & flag) != 0 && (flag != SA_SIGINFO ||
679 	    ((__sighandler_t *)act->sa_sigaction != SIG_IGN &&
680 	    (__sighandler_t *)act->sa_sigaction != SIG_DFL)));
681 }
682 
683 /*
684  * kern_sigaction
685  * sigaction
686  * freebsd4_sigaction
687  * osigaction
688  */
689 int
690 kern_sigaction(struct thread *td, int sig, const struct sigaction *act,
691     struct sigaction *oact, int flags)
692 {
693 	struct sigacts *ps;
694 	struct proc *p = td->td_proc;
695 
696 	if (!_SIG_VALID(sig))
697 		return (EINVAL);
698 	if (act != NULL && act->sa_handler != SIG_DFL &&
699 	    act->sa_handler != SIG_IGN && (act->sa_flags & ~(SA_ONSTACK |
700 	    SA_RESTART | SA_RESETHAND | SA_NOCLDSTOP | SA_NODEFER |
701 	    SA_NOCLDWAIT | SA_SIGINFO)) != 0)
702 		return (EINVAL);
703 
704 	PROC_LOCK(p);
705 	ps = p->p_sigacts;
706 	mtx_lock(&ps->ps_mtx);
707 	if (oact) {
708 		memset(oact, 0, sizeof(*oact));
709 		oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)];
710 		if (SIGISMEMBER(ps->ps_sigonstack, sig))
711 			oact->sa_flags |= SA_ONSTACK;
712 		if (!SIGISMEMBER(ps->ps_sigintr, sig))
713 			oact->sa_flags |= SA_RESTART;
714 		if (SIGISMEMBER(ps->ps_sigreset, sig))
715 			oact->sa_flags |= SA_RESETHAND;
716 		if (SIGISMEMBER(ps->ps_signodefer, sig))
717 			oact->sa_flags |= SA_NODEFER;
718 		if (SIGISMEMBER(ps->ps_siginfo, sig)) {
719 			oact->sa_flags |= SA_SIGINFO;
720 			oact->sa_sigaction =
721 			    (__siginfohandler_t *)ps->ps_sigact[_SIG_IDX(sig)];
722 		} else
723 			oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)];
724 		if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP)
725 			oact->sa_flags |= SA_NOCLDSTOP;
726 		if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT)
727 			oact->sa_flags |= SA_NOCLDWAIT;
728 	}
729 	if (act) {
730 		if ((sig == SIGKILL || sig == SIGSTOP) &&
731 		    act->sa_handler != SIG_DFL) {
732 			mtx_unlock(&ps->ps_mtx);
733 			PROC_UNLOCK(p);
734 			return (EINVAL);
735 		}
736 
737 		/*
738 		 * Change setting atomically.
739 		 */
740 
741 		ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask;
742 		SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]);
743 		if (sigact_flag_test(act, SA_SIGINFO)) {
744 			ps->ps_sigact[_SIG_IDX(sig)] =
745 			    (__sighandler_t *)act->sa_sigaction;
746 			SIGADDSET(ps->ps_siginfo, sig);
747 		} else {
748 			ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler;
749 			SIGDELSET(ps->ps_siginfo, sig);
750 		}
751 		if (!sigact_flag_test(act, SA_RESTART))
752 			SIGADDSET(ps->ps_sigintr, sig);
753 		else
754 			SIGDELSET(ps->ps_sigintr, sig);
755 		if (sigact_flag_test(act, SA_ONSTACK))
756 			SIGADDSET(ps->ps_sigonstack, sig);
757 		else
758 			SIGDELSET(ps->ps_sigonstack, sig);
759 		if (sigact_flag_test(act, SA_RESETHAND))
760 			SIGADDSET(ps->ps_sigreset, sig);
761 		else
762 			SIGDELSET(ps->ps_sigreset, sig);
763 		if (sigact_flag_test(act, SA_NODEFER))
764 			SIGADDSET(ps->ps_signodefer, sig);
765 		else
766 			SIGDELSET(ps->ps_signodefer, sig);
767 		if (sig == SIGCHLD) {
768 			if (act->sa_flags & SA_NOCLDSTOP)
769 				ps->ps_flag |= PS_NOCLDSTOP;
770 			else
771 				ps->ps_flag &= ~PS_NOCLDSTOP;
772 			if (act->sa_flags & SA_NOCLDWAIT) {
773 				/*
774 				 * Paranoia: since SA_NOCLDWAIT is implemented
775 				 * by reparenting the dying child to PID 1 (and
776 				 * trust it to reap the zombie), PID 1 itself
777 				 * is forbidden to set SA_NOCLDWAIT.
778 				 */
779 				if (p->p_pid == 1)
780 					ps->ps_flag &= ~PS_NOCLDWAIT;
781 				else
782 					ps->ps_flag |= PS_NOCLDWAIT;
783 			} else
784 				ps->ps_flag &= ~PS_NOCLDWAIT;
785 			if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
786 				ps->ps_flag |= PS_CLDSIGIGN;
787 			else
788 				ps->ps_flag &= ~PS_CLDSIGIGN;
789 		}
790 		/*
791 		 * Set bit in ps_sigignore for signals that are set to SIG_IGN,
792 		 * and for signals set to SIG_DFL where the default is to
793 		 * ignore. However, don't put SIGCONT in ps_sigignore, as we
794 		 * have to restart the process.
795 		 */
796 		if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
797 		    (sigprop(sig) & SIGPROP_IGNORE &&
798 		     ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
799 			/* never to be seen again */
800 			sigqueue_delete_proc(p, sig);
801 			if (sig != SIGCONT)
802 				/* easier in psignal */
803 				SIGADDSET(ps->ps_sigignore, sig);
804 			SIGDELSET(ps->ps_sigcatch, sig);
805 		} else {
806 			SIGDELSET(ps->ps_sigignore, sig);
807 			if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)
808 				SIGDELSET(ps->ps_sigcatch, sig);
809 			else
810 				SIGADDSET(ps->ps_sigcatch, sig);
811 		}
812 #ifdef COMPAT_FREEBSD4
813 		if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
814 		    ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
815 		    (flags & KSA_FREEBSD4) == 0)
816 			SIGDELSET(ps->ps_freebsd4, sig);
817 		else
818 			SIGADDSET(ps->ps_freebsd4, sig);
819 #endif
820 #ifdef COMPAT_43
821 		if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
822 		    ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
823 		    (flags & KSA_OSIGSET) == 0)
824 			SIGDELSET(ps->ps_osigset, sig);
825 		else
826 			SIGADDSET(ps->ps_osigset, sig);
827 #endif
828 	}
829 	mtx_unlock(&ps->ps_mtx);
830 	PROC_UNLOCK(p);
831 	return (0);
832 }
833 
834 #ifndef _SYS_SYSPROTO_H_
835 struct sigaction_args {
836 	int	sig;
837 	struct	sigaction *act;
838 	struct	sigaction *oact;
839 };
840 #endif
841 int
842 sys_sigaction(struct thread *td, struct sigaction_args *uap)
843 {
844 	struct sigaction act, oact;
845 	struct sigaction *actp, *oactp;
846 	int error;
847 
848 	actp = (uap->act != NULL) ? &act : NULL;
849 	oactp = (uap->oact != NULL) ? &oact : NULL;
850 	if (actp) {
851 		error = copyin(uap->act, actp, sizeof(act));
852 		if (error)
853 			return (error);
854 	}
855 	error = kern_sigaction(td, uap->sig, actp, oactp, 0);
856 	if (oactp && !error)
857 		error = copyout(oactp, uap->oact, sizeof(oact));
858 	return (error);
859 }
860 
861 #ifdef COMPAT_FREEBSD4
862 #ifndef _SYS_SYSPROTO_H_
863 struct freebsd4_sigaction_args {
864 	int	sig;
865 	struct	sigaction *act;
866 	struct	sigaction *oact;
867 };
868 #endif
869 int
870 freebsd4_sigaction(struct thread *td, struct freebsd4_sigaction_args *uap)
871 {
872 	struct sigaction act, oact;
873 	struct sigaction *actp, *oactp;
874 	int error;
875 
876 	actp = (uap->act != NULL) ? &act : NULL;
877 	oactp = (uap->oact != NULL) ? &oact : NULL;
878 	if (actp) {
879 		error = copyin(uap->act, actp, sizeof(act));
880 		if (error)
881 			return (error);
882 	}
883 	error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4);
884 	if (oactp && !error)
885 		error = copyout(oactp, uap->oact, sizeof(oact));
886 	return (error);
887 }
888 #endif	/* COMAPT_FREEBSD4 */
889 
890 #ifdef COMPAT_43	/* XXX - COMPAT_FBSD3 */
891 #ifndef _SYS_SYSPROTO_H_
892 struct osigaction_args {
893 	int	signum;
894 	struct	osigaction *nsa;
895 	struct	osigaction *osa;
896 };
897 #endif
898 int
899 osigaction(struct thread *td, struct osigaction_args *uap)
900 {
901 	struct osigaction sa;
902 	struct sigaction nsa, osa;
903 	struct sigaction *nsap, *osap;
904 	int error;
905 
906 	if (uap->signum <= 0 || uap->signum >= ONSIG)
907 		return (EINVAL);
908 
909 	nsap = (uap->nsa != NULL) ? &nsa : NULL;
910 	osap = (uap->osa != NULL) ? &osa : NULL;
911 
912 	if (nsap) {
913 		error = copyin(uap->nsa, &sa, sizeof(sa));
914 		if (error)
915 			return (error);
916 		nsap->sa_handler = sa.sa_handler;
917 		nsap->sa_flags = sa.sa_flags;
918 		OSIG2SIG(sa.sa_mask, nsap->sa_mask);
919 	}
920 	error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
921 	if (osap && !error) {
922 		sa.sa_handler = osap->sa_handler;
923 		sa.sa_flags = osap->sa_flags;
924 		SIG2OSIG(osap->sa_mask, sa.sa_mask);
925 		error = copyout(&sa, uap->osa, sizeof(sa));
926 	}
927 	return (error);
928 }
929 
930 #if !defined(__i386__)
931 /* Avoid replicating the same stub everywhere */
932 int
933 osigreturn(struct thread *td, struct osigreturn_args *uap)
934 {
935 
936 	return (nosys(td, (struct nosys_args *)uap));
937 }
938 #endif
939 #endif /* COMPAT_43 */
940 
941 /*
942  * Initialize signal state for process 0;
943  * set to ignore signals that are ignored by default.
944  */
945 void
946 siginit(struct proc *p)
947 {
948 	int i;
949 	struct sigacts *ps;
950 
951 	PROC_LOCK(p);
952 	ps = p->p_sigacts;
953 	mtx_lock(&ps->ps_mtx);
954 	for (i = 1; i <= NSIG; i++) {
955 		if (sigprop(i) & SIGPROP_IGNORE && i != SIGCONT) {
956 			SIGADDSET(ps->ps_sigignore, i);
957 		}
958 	}
959 	mtx_unlock(&ps->ps_mtx);
960 	PROC_UNLOCK(p);
961 }
962 
963 /*
964  * Reset specified signal to the default disposition.
965  */
966 static void
967 sigdflt(struct sigacts *ps, int sig)
968 {
969 
970 	mtx_assert(&ps->ps_mtx, MA_OWNED);
971 	SIGDELSET(ps->ps_sigcatch, sig);
972 	if ((sigprop(sig) & SIGPROP_IGNORE) != 0 && sig != SIGCONT)
973 		SIGADDSET(ps->ps_sigignore, sig);
974 	ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
975 	SIGDELSET(ps->ps_siginfo, sig);
976 }
977 
978 /*
979  * Reset signals for an exec of the specified process.
980  */
981 void
982 execsigs(struct proc *p)
983 {
984 	sigset_t osigignore;
985 	struct sigacts *ps;
986 	int sig;
987 	struct thread *td;
988 
989 	/*
990 	 * Reset caught signals.  Held signals remain held
991 	 * through td_sigmask (unless they were caught,
992 	 * and are now ignored by default).
993 	 */
994 	PROC_LOCK_ASSERT(p, MA_OWNED);
995 	ps = p->p_sigacts;
996 	mtx_lock(&ps->ps_mtx);
997 	sig_drop_caught(p);
998 
999 	/*
1000 	 * As CloudABI processes cannot modify signal handlers, fully
1001 	 * reset all signals to their default behavior. Do ignore
1002 	 * SIGPIPE, as it would otherwise be impossible to recover from
1003 	 * writes to broken pipes and sockets.
1004 	 */
1005 	if (SV_PROC_ABI(p) == SV_ABI_CLOUDABI) {
1006 		osigignore = ps->ps_sigignore;
1007 		while (SIGNOTEMPTY(osigignore)) {
1008 			sig = sig_ffs(&osigignore);
1009 			SIGDELSET(osigignore, sig);
1010 			if (sig != SIGPIPE)
1011 				sigdflt(ps, sig);
1012 		}
1013 		SIGADDSET(ps->ps_sigignore, SIGPIPE);
1014 	}
1015 
1016 	/*
1017 	 * Reset stack state to the user stack.
1018 	 * Clear set of signals caught on the signal stack.
1019 	 */
1020 	td = curthread;
1021 	MPASS(td->td_proc == p);
1022 	td->td_sigstk.ss_flags = SS_DISABLE;
1023 	td->td_sigstk.ss_size = 0;
1024 	td->td_sigstk.ss_sp = 0;
1025 	td->td_pflags &= ~TDP_ALTSTACK;
1026 	/*
1027 	 * Reset no zombies if child dies flag as Solaris does.
1028 	 */
1029 	ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN);
1030 	if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
1031 		ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL;
1032 	mtx_unlock(&ps->ps_mtx);
1033 }
1034 
1035 /*
1036  * kern_sigprocmask()
1037  *
1038  *	Manipulate signal mask.
1039  */
1040 int
1041 kern_sigprocmask(struct thread *td, int how, sigset_t *set, sigset_t *oset,
1042     int flags)
1043 {
1044 	sigset_t new_block, oset1;
1045 	struct proc *p;
1046 	int error;
1047 
1048 	p = td->td_proc;
1049 	if ((flags & SIGPROCMASK_PROC_LOCKED) != 0)
1050 		PROC_LOCK_ASSERT(p, MA_OWNED);
1051 	else
1052 		PROC_LOCK(p);
1053 	mtx_assert(&p->p_sigacts->ps_mtx, (flags & SIGPROCMASK_PS_LOCKED) != 0
1054 	    ? MA_OWNED : MA_NOTOWNED);
1055 	if (oset != NULL)
1056 		*oset = td->td_sigmask;
1057 
1058 	error = 0;
1059 	if (set != NULL) {
1060 		switch (how) {
1061 		case SIG_BLOCK:
1062 			SIG_CANTMASK(*set);
1063 			oset1 = td->td_sigmask;
1064 			SIGSETOR(td->td_sigmask, *set);
1065 			new_block = td->td_sigmask;
1066 			SIGSETNAND(new_block, oset1);
1067 			break;
1068 		case SIG_UNBLOCK:
1069 			SIGSETNAND(td->td_sigmask, *set);
1070 			signotify(td);
1071 			goto out;
1072 		case SIG_SETMASK:
1073 			SIG_CANTMASK(*set);
1074 			oset1 = td->td_sigmask;
1075 			if (flags & SIGPROCMASK_OLD)
1076 				SIGSETLO(td->td_sigmask, *set);
1077 			else
1078 				td->td_sigmask = *set;
1079 			new_block = td->td_sigmask;
1080 			SIGSETNAND(new_block, oset1);
1081 			signotify(td);
1082 			break;
1083 		default:
1084 			error = EINVAL;
1085 			goto out;
1086 		}
1087 
1088 		/*
1089 		 * The new_block set contains signals that were not previously
1090 		 * blocked, but are blocked now.
1091 		 *
1092 		 * In case we block any signal that was not previously blocked
1093 		 * for td, and process has the signal pending, try to schedule
1094 		 * signal delivery to some thread that does not block the
1095 		 * signal, possibly waking it up.
1096 		 */
1097 		if (p->p_numthreads != 1)
1098 			reschedule_signals(p, new_block, flags);
1099 	}
1100 
1101 out:
1102 	if (!(flags & SIGPROCMASK_PROC_LOCKED))
1103 		PROC_UNLOCK(p);
1104 	return (error);
1105 }
1106 
1107 #ifndef _SYS_SYSPROTO_H_
1108 struct sigprocmask_args {
1109 	int	how;
1110 	const sigset_t *set;
1111 	sigset_t *oset;
1112 };
1113 #endif
1114 int
1115 sys_sigprocmask(struct thread *td, struct sigprocmask_args *uap)
1116 {
1117 	sigset_t set, oset;
1118 	sigset_t *setp, *osetp;
1119 	int error;
1120 
1121 	setp = (uap->set != NULL) ? &set : NULL;
1122 	osetp = (uap->oset != NULL) ? &oset : NULL;
1123 	if (setp) {
1124 		error = copyin(uap->set, setp, sizeof(set));
1125 		if (error)
1126 			return (error);
1127 	}
1128 	error = kern_sigprocmask(td, uap->how, setp, osetp, 0);
1129 	if (osetp && !error) {
1130 		error = copyout(osetp, uap->oset, sizeof(oset));
1131 	}
1132 	return (error);
1133 }
1134 
1135 #ifdef COMPAT_43	/* XXX - COMPAT_FBSD3 */
1136 #ifndef _SYS_SYSPROTO_H_
1137 struct osigprocmask_args {
1138 	int	how;
1139 	osigset_t mask;
1140 };
1141 #endif
1142 int
1143 osigprocmask(struct thread *td, struct osigprocmask_args *uap)
1144 {
1145 	sigset_t set, oset;
1146 	int error;
1147 
1148 	OSIG2SIG(uap->mask, set);
1149 	error = kern_sigprocmask(td, uap->how, &set, &oset, 1);
1150 	SIG2OSIG(oset, td->td_retval[0]);
1151 	return (error);
1152 }
1153 #endif /* COMPAT_43 */
1154 
1155 int
1156 sys_sigwait(struct thread *td, struct sigwait_args *uap)
1157 {
1158 	ksiginfo_t ksi;
1159 	sigset_t set;
1160 	int error;
1161 
1162 	error = copyin(uap->set, &set, sizeof(set));
1163 	if (error) {
1164 		td->td_retval[0] = error;
1165 		return (0);
1166 	}
1167 
1168 	error = kern_sigtimedwait(td, set, &ksi, NULL);
1169 	if (error) {
1170 		if (error == EINTR && td->td_proc->p_osrel < P_OSREL_SIGWAIT)
1171 			error = ERESTART;
1172 		if (error == ERESTART)
1173 			return (error);
1174 		td->td_retval[0] = error;
1175 		return (0);
1176 	}
1177 
1178 	error = copyout(&ksi.ksi_signo, uap->sig, sizeof(ksi.ksi_signo));
1179 	td->td_retval[0] = error;
1180 	return (0);
1181 }
1182 
1183 int
1184 sys_sigtimedwait(struct thread *td, struct sigtimedwait_args *uap)
1185 {
1186 	struct timespec ts;
1187 	struct timespec *timeout;
1188 	sigset_t set;
1189 	ksiginfo_t ksi;
1190 	int error;
1191 
1192 	if (uap->timeout) {
1193 		error = copyin(uap->timeout, &ts, sizeof(ts));
1194 		if (error)
1195 			return (error);
1196 
1197 		timeout = &ts;
1198 	} else
1199 		timeout = NULL;
1200 
1201 	error = copyin(uap->set, &set, sizeof(set));
1202 	if (error)
1203 		return (error);
1204 
1205 	error = kern_sigtimedwait(td, set, &ksi, timeout);
1206 	if (error)
1207 		return (error);
1208 
1209 	if (uap->info)
1210 		error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
1211 
1212 	if (error == 0)
1213 		td->td_retval[0] = ksi.ksi_signo;
1214 	return (error);
1215 }
1216 
1217 int
1218 sys_sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap)
1219 {
1220 	ksiginfo_t ksi;
1221 	sigset_t set;
1222 	int error;
1223 
1224 	error = copyin(uap->set, &set, sizeof(set));
1225 	if (error)
1226 		return (error);
1227 
1228 	error = kern_sigtimedwait(td, set, &ksi, NULL);
1229 	if (error)
1230 		return (error);
1231 
1232 	if (uap->info)
1233 		error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
1234 
1235 	if (error == 0)
1236 		td->td_retval[0] = ksi.ksi_signo;
1237 	return (error);
1238 }
1239 
1240 static void
1241 proc_td_siginfo_capture(struct thread *td, siginfo_t *si)
1242 {
1243 	struct thread *thr;
1244 
1245 	FOREACH_THREAD_IN_PROC(td->td_proc, thr) {
1246 		if (thr == td)
1247 			thr->td_si = *si;
1248 		else
1249 			thr->td_si.si_signo = 0;
1250 	}
1251 }
1252 
1253 int
1254 kern_sigtimedwait(struct thread *td, sigset_t waitset, ksiginfo_t *ksi,
1255 	struct timespec *timeout)
1256 {
1257 	struct sigacts *ps;
1258 	sigset_t saved_mask, new_block;
1259 	struct proc *p;
1260 	int error, sig, timo, timevalid = 0;
1261 	struct timespec rts, ets, ts;
1262 	struct timeval tv;
1263 	bool traced;
1264 
1265 	p = td->td_proc;
1266 	error = 0;
1267 	ets.tv_sec = 0;
1268 	ets.tv_nsec = 0;
1269 	traced = false;
1270 
1271 	/* Ensure the sigfastblock value is up to date. */
1272 	sigfastblock_fetch(td);
1273 
1274 	if (timeout != NULL) {
1275 		if (timeout->tv_nsec >= 0 && timeout->tv_nsec < 1000000000) {
1276 			timevalid = 1;
1277 			getnanouptime(&rts);
1278 			timespecadd(&rts, timeout, &ets);
1279 		}
1280 	}
1281 	ksiginfo_init(ksi);
1282 	/* Some signals can not be waited for. */
1283 	SIG_CANTMASK(waitset);
1284 	ps = p->p_sigacts;
1285 	PROC_LOCK(p);
1286 	saved_mask = td->td_sigmask;
1287 	SIGSETNAND(td->td_sigmask, waitset);
1288 	for (;;) {
1289 		mtx_lock(&ps->ps_mtx);
1290 		sig = cursig(td);
1291 		mtx_unlock(&ps->ps_mtx);
1292 		KASSERT(sig >= 0, ("sig %d", sig));
1293 		if (sig != 0 && SIGISMEMBER(waitset, sig)) {
1294 			if (sigqueue_get(&td->td_sigqueue, sig, ksi) != 0 ||
1295 			    sigqueue_get(&p->p_sigqueue, sig, ksi) != 0) {
1296 				error = 0;
1297 				break;
1298 			}
1299 		}
1300 
1301 		if (error != 0)
1302 			break;
1303 
1304 		/*
1305 		 * POSIX says this must be checked after looking for pending
1306 		 * signals.
1307 		 */
1308 		if (timeout != NULL) {
1309 			if (!timevalid) {
1310 				error = EINVAL;
1311 				break;
1312 			}
1313 			getnanouptime(&rts);
1314 			if (timespeccmp(&rts, &ets, >=)) {
1315 				error = EAGAIN;
1316 				break;
1317 			}
1318 			timespecsub(&ets, &rts, &ts);
1319 			TIMESPEC_TO_TIMEVAL(&tv, &ts);
1320 			timo = tvtohz(&tv);
1321 		} else {
1322 			timo = 0;
1323 		}
1324 
1325 		if (traced) {
1326 			error = EINTR;
1327 			break;
1328 		}
1329 
1330 		error = msleep(ps, &p->p_mtx, PPAUSE|PCATCH, "sigwait", timo);
1331 
1332 		if (timeout != NULL) {
1333 			if (error == ERESTART) {
1334 				/* Timeout can not be restarted. */
1335 				error = EINTR;
1336 			} else if (error == EAGAIN) {
1337 				/* We will calculate timeout by ourself. */
1338 				error = 0;
1339 			}
1340 		}
1341 
1342 		/*
1343 		 * If PTRACE_SCE or PTRACE_SCX were set after
1344 		 * userspace entered the syscall, return spurious
1345 		 * EINTR after wait was done.  Only do this as last
1346 		 * resort after rechecking for possible queued signals
1347 		 * and expired timeouts.
1348 		 */
1349 		if (error == 0 && (p->p_ptevents & PTRACE_SYSCALL) != 0)
1350 			traced = true;
1351 	}
1352 
1353 	new_block = saved_mask;
1354 	SIGSETNAND(new_block, td->td_sigmask);
1355 	td->td_sigmask = saved_mask;
1356 	/*
1357 	 * Fewer signals can be delivered to us, reschedule signal
1358 	 * notification.
1359 	 */
1360 	if (p->p_numthreads != 1)
1361 		reschedule_signals(p, new_block, 0);
1362 
1363 	if (error == 0) {
1364 		SDT_PROBE2(proc, , , signal__clear, sig, ksi);
1365 
1366 		if (ksi->ksi_code == SI_TIMER)
1367 			itimer_accept(p, ksi->ksi_timerid, ksi);
1368 
1369 #ifdef KTRACE
1370 		if (KTRPOINT(td, KTR_PSIG)) {
1371 			sig_t action;
1372 
1373 			mtx_lock(&ps->ps_mtx);
1374 			action = ps->ps_sigact[_SIG_IDX(sig)];
1375 			mtx_unlock(&ps->ps_mtx);
1376 			ktrpsig(sig, action, &td->td_sigmask, ksi->ksi_code);
1377 		}
1378 #endif
1379 		if (sig == SIGKILL) {
1380 			proc_td_siginfo_capture(td, &ksi->ksi_info);
1381 			sigexit(td, sig);
1382 		}
1383 	}
1384 	PROC_UNLOCK(p);
1385 	return (error);
1386 }
1387 
1388 #ifndef _SYS_SYSPROTO_H_
1389 struct sigpending_args {
1390 	sigset_t	*set;
1391 };
1392 #endif
1393 int
1394 sys_sigpending(struct thread *td, struct sigpending_args *uap)
1395 {
1396 	struct proc *p = td->td_proc;
1397 	sigset_t pending;
1398 
1399 	PROC_LOCK(p);
1400 	pending = p->p_sigqueue.sq_signals;
1401 	SIGSETOR(pending, td->td_sigqueue.sq_signals);
1402 	PROC_UNLOCK(p);
1403 	return (copyout(&pending, uap->set, sizeof(sigset_t)));
1404 }
1405 
1406 #ifdef COMPAT_43	/* XXX - COMPAT_FBSD3 */
1407 #ifndef _SYS_SYSPROTO_H_
1408 struct osigpending_args {
1409 	int	dummy;
1410 };
1411 #endif
1412 int
1413 osigpending(struct thread *td, struct osigpending_args *uap)
1414 {
1415 	struct proc *p = td->td_proc;
1416 	sigset_t pending;
1417 
1418 	PROC_LOCK(p);
1419 	pending = p->p_sigqueue.sq_signals;
1420 	SIGSETOR(pending, td->td_sigqueue.sq_signals);
1421 	PROC_UNLOCK(p);
1422 	SIG2OSIG(pending, td->td_retval[0]);
1423 	return (0);
1424 }
1425 #endif /* COMPAT_43 */
1426 
1427 #if defined(COMPAT_43)
1428 /*
1429  * Generalized interface signal handler, 4.3-compatible.
1430  */
1431 #ifndef _SYS_SYSPROTO_H_
1432 struct osigvec_args {
1433 	int	signum;
1434 	struct	sigvec *nsv;
1435 	struct	sigvec *osv;
1436 };
1437 #endif
1438 /* ARGSUSED */
1439 int
1440 osigvec(struct thread *td, struct osigvec_args *uap)
1441 {
1442 	struct sigvec vec;
1443 	struct sigaction nsa, osa;
1444 	struct sigaction *nsap, *osap;
1445 	int error;
1446 
1447 	if (uap->signum <= 0 || uap->signum >= ONSIG)
1448 		return (EINVAL);
1449 	nsap = (uap->nsv != NULL) ? &nsa : NULL;
1450 	osap = (uap->osv != NULL) ? &osa : NULL;
1451 	if (nsap) {
1452 		error = copyin(uap->nsv, &vec, sizeof(vec));
1453 		if (error)
1454 			return (error);
1455 		nsap->sa_handler = vec.sv_handler;
1456 		OSIG2SIG(vec.sv_mask, nsap->sa_mask);
1457 		nsap->sa_flags = vec.sv_flags;
1458 		nsap->sa_flags ^= SA_RESTART;	/* opposite of SV_INTERRUPT */
1459 	}
1460 	error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
1461 	if (osap && !error) {
1462 		vec.sv_handler = osap->sa_handler;
1463 		SIG2OSIG(osap->sa_mask, vec.sv_mask);
1464 		vec.sv_flags = osap->sa_flags;
1465 		vec.sv_flags &= ~SA_NOCLDWAIT;
1466 		vec.sv_flags ^= SA_RESTART;
1467 		error = copyout(&vec, uap->osv, sizeof(vec));
1468 	}
1469 	return (error);
1470 }
1471 
1472 #ifndef _SYS_SYSPROTO_H_
1473 struct osigblock_args {
1474 	int	mask;
1475 };
1476 #endif
1477 int
1478 osigblock(struct thread *td, struct osigblock_args *uap)
1479 {
1480 	sigset_t set, oset;
1481 
1482 	OSIG2SIG(uap->mask, set);
1483 	kern_sigprocmask(td, SIG_BLOCK, &set, &oset, 0);
1484 	SIG2OSIG(oset, td->td_retval[0]);
1485 	return (0);
1486 }
1487 
1488 #ifndef _SYS_SYSPROTO_H_
1489 struct osigsetmask_args {
1490 	int	mask;
1491 };
1492 #endif
1493 int
1494 osigsetmask(struct thread *td, struct osigsetmask_args *uap)
1495 {
1496 	sigset_t set, oset;
1497 
1498 	OSIG2SIG(uap->mask, set);
1499 	kern_sigprocmask(td, SIG_SETMASK, &set, &oset, 0);
1500 	SIG2OSIG(oset, td->td_retval[0]);
1501 	return (0);
1502 }
1503 #endif /* COMPAT_43 */
1504 
1505 /*
1506  * Suspend calling thread until signal, providing mask to be set in the
1507  * meantime.
1508  */
1509 #ifndef _SYS_SYSPROTO_H_
1510 struct sigsuspend_args {
1511 	const sigset_t *sigmask;
1512 };
1513 #endif
1514 /* ARGSUSED */
1515 int
1516 sys_sigsuspend(struct thread *td, struct sigsuspend_args *uap)
1517 {
1518 	sigset_t mask;
1519 	int error;
1520 
1521 	error = copyin(uap->sigmask, &mask, sizeof(mask));
1522 	if (error)
1523 		return (error);
1524 	return (kern_sigsuspend(td, mask));
1525 }
1526 
1527 int
1528 kern_sigsuspend(struct thread *td, sigset_t mask)
1529 {
1530 	struct proc *p = td->td_proc;
1531 	int has_sig, sig;
1532 
1533 	/* Ensure the sigfastblock value is up to date. */
1534 	sigfastblock_fetch(td);
1535 
1536 	/*
1537 	 * When returning from sigsuspend, we want
1538 	 * the old mask to be restored after the
1539 	 * signal handler has finished.  Thus, we
1540 	 * save it here and mark the sigacts structure
1541 	 * to indicate this.
1542 	 */
1543 	PROC_LOCK(p);
1544 	kern_sigprocmask(td, SIG_SETMASK, &mask, &td->td_oldsigmask,
1545 	    SIGPROCMASK_PROC_LOCKED);
1546 	td->td_pflags |= TDP_OLDMASK;
1547 
1548 	/*
1549 	 * Process signals now. Otherwise, we can get spurious wakeup
1550 	 * due to signal entered process queue, but delivered to other
1551 	 * thread. But sigsuspend should return only on signal
1552 	 * delivery.
1553 	 */
1554 	(p->p_sysent->sv_set_syscall_retval)(td, EINTR);
1555 	for (has_sig = 0; !has_sig;) {
1556 		while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause",
1557 			0) == 0)
1558 			/* void */;
1559 		thread_suspend_check(0);
1560 		mtx_lock(&p->p_sigacts->ps_mtx);
1561 		while ((sig = cursig(td)) != 0) {
1562 			KASSERT(sig >= 0, ("sig %d", sig));
1563 			has_sig += postsig(sig);
1564 		}
1565 		mtx_unlock(&p->p_sigacts->ps_mtx);
1566 
1567 		/*
1568 		 * If PTRACE_SCE or PTRACE_SCX were set after
1569 		 * userspace entered the syscall, return spurious
1570 		 * EINTR.
1571 		 */
1572 		if ((p->p_ptevents & PTRACE_SYSCALL) != 0)
1573 			has_sig += 1;
1574 	}
1575 	PROC_UNLOCK(p);
1576 	td->td_errno = EINTR;
1577 	td->td_pflags |= TDP_NERRNO;
1578 	return (EJUSTRETURN);
1579 }
1580 
1581 #ifdef COMPAT_43	/* XXX - COMPAT_FBSD3 */
1582 /*
1583  * Compatibility sigsuspend call for old binaries.  Note nonstandard calling
1584  * convention: libc stub passes mask, not pointer, to save a copyin.
1585  */
1586 #ifndef _SYS_SYSPROTO_H_
1587 struct osigsuspend_args {
1588 	osigset_t mask;
1589 };
1590 #endif
1591 /* ARGSUSED */
1592 int
1593 osigsuspend(struct thread *td, struct osigsuspend_args *uap)
1594 {
1595 	sigset_t mask;
1596 
1597 	OSIG2SIG(uap->mask, mask);
1598 	return (kern_sigsuspend(td, mask));
1599 }
1600 #endif /* COMPAT_43 */
1601 
1602 #if defined(COMPAT_43)
1603 #ifndef _SYS_SYSPROTO_H_
1604 struct osigstack_args {
1605 	struct	sigstack *nss;
1606 	struct	sigstack *oss;
1607 };
1608 #endif
1609 /* ARGSUSED */
1610 int
1611 osigstack(struct thread *td, struct osigstack_args *uap)
1612 {
1613 	struct sigstack nss, oss;
1614 	int error = 0;
1615 
1616 	if (uap->nss != NULL) {
1617 		error = copyin(uap->nss, &nss, sizeof(nss));
1618 		if (error)
1619 			return (error);
1620 	}
1621 	oss.ss_sp = td->td_sigstk.ss_sp;
1622 	oss.ss_onstack = sigonstack(cpu_getstack(td));
1623 	if (uap->nss != NULL) {
1624 		td->td_sigstk.ss_sp = nss.ss_sp;
1625 		td->td_sigstk.ss_size = 0;
1626 		td->td_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK;
1627 		td->td_pflags |= TDP_ALTSTACK;
1628 	}
1629 	if (uap->oss != NULL)
1630 		error = copyout(&oss, uap->oss, sizeof(oss));
1631 
1632 	return (error);
1633 }
1634 #endif /* COMPAT_43 */
1635 
1636 #ifndef _SYS_SYSPROTO_H_
1637 struct sigaltstack_args {
1638 	stack_t	*ss;
1639 	stack_t	*oss;
1640 };
1641 #endif
1642 /* ARGSUSED */
1643 int
1644 sys_sigaltstack(struct thread *td, struct sigaltstack_args *uap)
1645 {
1646 	stack_t ss, oss;
1647 	int error;
1648 
1649 	if (uap->ss != NULL) {
1650 		error = copyin(uap->ss, &ss, sizeof(ss));
1651 		if (error)
1652 			return (error);
1653 	}
1654 	error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL,
1655 	    (uap->oss != NULL) ? &oss : NULL);
1656 	if (error)
1657 		return (error);
1658 	if (uap->oss != NULL)
1659 		error = copyout(&oss, uap->oss, sizeof(stack_t));
1660 	return (error);
1661 }
1662 
1663 int
1664 kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss)
1665 {
1666 	struct proc *p = td->td_proc;
1667 	int oonstack;
1668 
1669 	oonstack = sigonstack(cpu_getstack(td));
1670 
1671 	if (oss != NULL) {
1672 		*oss = td->td_sigstk;
1673 		oss->ss_flags = (td->td_pflags & TDP_ALTSTACK)
1674 		    ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
1675 	}
1676 
1677 	if (ss != NULL) {
1678 		if (oonstack)
1679 			return (EPERM);
1680 		if ((ss->ss_flags & ~SS_DISABLE) != 0)
1681 			return (EINVAL);
1682 		if (!(ss->ss_flags & SS_DISABLE)) {
1683 			if (ss->ss_size < p->p_sysent->sv_minsigstksz)
1684 				return (ENOMEM);
1685 
1686 			td->td_sigstk = *ss;
1687 			td->td_pflags |= TDP_ALTSTACK;
1688 		} else {
1689 			td->td_pflags &= ~TDP_ALTSTACK;
1690 		}
1691 	}
1692 	return (0);
1693 }
1694 
1695 struct killpg1_ctx {
1696 	struct thread *td;
1697 	ksiginfo_t *ksi;
1698 	int sig;
1699 	bool sent;
1700 	bool found;
1701 	int ret;
1702 };
1703 
1704 static void
1705 killpg1_sendsig(struct proc *p, bool notself, struct killpg1_ctx *arg)
1706 {
1707 	int err;
1708 
1709 	if (p->p_pid <= 1 || (p->p_flag & P_SYSTEM) != 0 ||
1710 	    (notself && p == arg->td->td_proc) || p->p_state == PRS_NEW)
1711 		return;
1712 	PROC_LOCK(p);
1713 	err = p_cansignal(arg->td, p, arg->sig);
1714 	if (err == 0 && arg->sig != 0)
1715 		pksignal(p, arg->sig, arg->ksi);
1716 	PROC_UNLOCK(p);
1717 	if (err != ESRCH)
1718 		arg->found = true;
1719 	if (err == 0)
1720 		arg->sent = true;
1721 	else if (arg->ret == 0 && err != ESRCH && err != EPERM)
1722 		arg->ret = err;
1723 }
1724 
1725 /*
1726  * Common code for kill process group/broadcast kill.
1727  * cp is calling process.
1728  */
1729 static int
1730 killpg1(struct thread *td, int sig, int pgid, int all, ksiginfo_t *ksi)
1731 {
1732 	struct proc *p;
1733 	struct pgrp *pgrp;
1734 	struct killpg1_ctx arg;
1735 
1736 	arg.td = td;
1737 	arg.ksi = ksi;
1738 	arg.sig = sig;
1739 	arg.sent = false;
1740 	arg.found = false;
1741 	arg.ret = 0;
1742 	if (all) {
1743 		/*
1744 		 * broadcast
1745 		 */
1746 		sx_slock(&allproc_lock);
1747 		FOREACH_PROC_IN_SYSTEM(p) {
1748 			killpg1_sendsig(p, true, &arg);
1749 		}
1750 		sx_sunlock(&allproc_lock);
1751 	} else {
1752 		sx_slock(&proctree_lock);
1753 		if (pgid == 0) {
1754 			/*
1755 			 * zero pgid means send to my process group.
1756 			 */
1757 			pgrp = td->td_proc->p_pgrp;
1758 			PGRP_LOCK(pgrp);
1759 		} else {
1760 			pgrp = pgfind(pgid);
1761 			if (pgrp == NULL) {
1762 				sx_sunlock(&proctree_lock);
1763 				return (ESRCH);
1764 			}
1765 		}
1766 		sx_sunlock(&proctree_lock);
1767 		LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1768 			killpg1_sendsig(p, false, &arg);
1769 		}
1770 		PGRP_UNLOCK(pgrp);
1771 	}
1772 	MPASS(arg.ret != 0 || arg.found || !arg.sent);
1773 	if (arg.ret == 0 && !arg.sent)
1774 		arg.ret = arg.found ? EPERM : ESRCH;
1775 	return (arg.ret);
1776 }
1777 
1778 #ifndef _SYS_SYSPROTO_H_
1779 struct kill_args {
1780 	int	pid;
1781 	int	signum;
1782 };
1783 #endif
1784 /* ARGSUSED */
1785 int
1786 sys_kill(struct thread *td, struct kill_args *uap)
1787 {
1788 
1789 	return (kern_kill(td, uap->pid, uap->signum));
1790 }
1791 
1792 int
1793 kern_kill(struct thread *td, pid_t pid, int signum)
1794 {
1795 	ksiginfo_t ksi;
1796 	struct proc *p;
1797 	int error;
1798 
1799 	/*
1800 	 * A process in capability mode can send signals only to himself.
1801 	 * The main rationale behind this is that abort(3) is implemented as
1802 	 * kill(getpid(), SIGABRT).
1803 	 */
1804 	if (IN_CAPABILITY_MODE(td) && pid != td->td_proc->p_pid)
1805 		return (ECAPMODE);
1806 
1807 	AUDIT_ARG_SIGNUM(signum);
1808 	AUDIT_ARG_PID(pid);
1809 	if ((u_int)signum > _SIG_MAXSIG)
1810 		return (EINVAL);
1811 
1812 	ksiginfo_init(&ksi);
1813 	ksi.ksi_signo = signum;
1814 	ksi.ksi_code = SI_USER;
1815 	ksi.ksi_pid = td->td_proc->p_pid;
1816 	ksi.ksi_uid = td->td_ucred->cr_ruid;
1817 
1818 	if (pid > 0) {
1819 		/* kill single process */
1820 		if ((p = pfind_any(pid)) == NULL)
1821 			return (ESRCH);
1822 		AUDIT_ARG_PROCESS(p);
1823 		error = p_cansignal(td, p, signum);
1824 		if (error == 0 && signum)
1825 			pksignal(p, signum, &ksi);
1826 		PROC_UNLOCK(p);
1827 		return (error);
1828 	}
1829 	switch (pid) {
1830 	case -1:		/* broadcast signal */
1831 		return (killpg1(td, signum, 0, 1, &ksi));
1832 	case 0:			/* signal own process group */
1833 		return (killpg1(td, signum, 0, 0, &ksi));
1834 	default:		/* negative explicit process group */
1835 		return (killpg1(td, signum, -pid, 0, &ksi));
1836 	}
1837 	/* NOTREACHED */
1838 }
1839 
1840 int
1841 sys_pdkill(struct thread *td, struct pdkill_args *uap)
1842 {
1843 	struct proc *p;
1844 	int error;
1845 
1846 	AUDIT_ARG_SIGNUM(uap->signum);
1847 	AUDIT_ARG_FD(uap->fd);
1848 	if ((u_int)uap->signum > _SIG_MAXSIG)
1849 		return (EINVAL);
1850 
1851 	error = procdesc_find(td, uap->fd, &cap_pdkill_rights, &p);
1852 	if (error)
1853 		return (error);
1854 	AUDIT_ARG_PROCESS(p);
1855 	error = p_cansignal(td, p, uap->signum);
1856 	if (error == 0 && uap->signum)
1857 		kern_psignal(p, uap->signum);
1858 	PROC_UNLOCK(p);
1859 	return (error);
1860 }
1861 
1862 #if defined(COMPAT_43)
1863 #ifndef _SYS_SYSPROTO_H_
1864 struct okillpg_args {
1865 	int	pgid;
1866 	int	signum;
1867 };
1868 #endif
1869 /* ARGSUSED */
1870 int
1871 okillpg(struct thread *td, struct okillpg_args *uap)
1872 {
1873 	ksiginfo_t ksi;
1874 
1875 	AUDIT_ARG_SIGNUM(uap->signum);
1876 	AUDIT_ARG_PID(uap->pgid);
1877 	if ((u_int)uap->signum > _SIG_MAXSIG)
1878 		return (EINVAL);
1879 
1880 	ksiginfo_init(&ksi);
1881 	ksi.ksi_signo = uap->signum;
1882 	ksi.ksi_code = SI_USER;
1883 	ksi.ksi_pid = td->td_proc->p_pid;
1884 	ksi.ksi_uid = td->td_ucred->cr_ruid;
1885 	return (killpg1(td, uap->signum, uap->pgid, 0, &ksi));
1886 }
1887 #endif /* COMPAT_43 */
1888 
1889 #ifndef _SYS_SYSPROTO_H_
1890 struct sigqueue_args {
1891 	pid_t pid;
1892 	int signum;
1893 	/* union sigval */ void *value;
1894 };
1895 #endif
1896 int
1897 sys_sigqueue(struct thread *td, struct sigqueue_args *uap)
1898 {
1899 	union sigval sv;
1900 
1901 	sv.sival_ptr = uap->value;
1902 
1903 	return (kern_sigqueue(td, uap->pid, uap->signum, &sv));
1904 }
1905 
1906 int
1907 kern_sigqueue(struct thread *td, pid_t pid, int signum, union sigval *value)
1908 {
1909 	ksiginfo_t ksi;
1910 	struct proc *p;
1911 	int error;
1912 
1913 	if ((u_int)signum > _SIG_MAXSIG)
1914 		return (EINVAL);
1915 
1916 	/*
1917 	 * Specification says sigqueue can only send signal to
1918 	 * single process.
1919 	 */
1920 	if (pid <= 0)
1921 		return (EINVAL);
1922 
1923 	if ((p = pfind_any(pid)) == NULL)
1924 		return (ESRCH);
1925 	error = p_cansignal(td, p, signum);
1926 	if (error == 0 && signum != 0) {
1927 		ksiginfo_init(&ksi);
1928 		ksi.ksi_flags = KSI_SIGQ;
1929 		ksi.ksi_signo = signum;
1930 		ksi.ksi_code = SI_QUEUE;
1931 		ksi.ksi_pid = td->td_proc->p_pid;
1932 		ksi.ksi_uid = td->td_ucred->cr_ruid;
1933 		ksi.ksi_value = *value;
1934 		error = pksignal(p, ksi.ksi_signo, &ksi);
1935 	}
1936 	PROC_UNLOCK(p);
1937 	return (error);
1938 }
1939 
1940 /*
1941  * Send a signal to a process group.
1942  */
1943 void
1944 gsignal(int pgid, int sig, ksiginfo_t *ksi)
1945 {
1946 	struct pgrp *pgrp;
1947 
1948 	if (pgid != 0) {
1949 		sx_slock(&proctree_lock);
1950 		pgrp = pgfind(pgid);
1951 		sx_sunlock(&proctree_lock);
1952 		if (pgrp != NULL) {
1953 			pgsignal(pgrp, sig, 0, ksi);
1954 			PGRP_UNLOCK(pgrp);
1955 		}
1956 	}
1957 }
1958 
1959 /*
1960  * Send a signal to a process group.  If checktty is 1,
1961  * limit to members which have a controlling terminal.
1962  */
1963 void
1964 pgsignal(struct pgrp *pgrp, int sig, int checkctty, ksiginfo_t *ksi)
1965 {
1966 	struct proc *p;
1967 
1968 	if (pgrp) {
1969 		PGRP_LOCK_ASSERT(pgrp, MA_OWNED);
1970 		LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1971 			PROC_LOCK(p);
1972 			if (p->p_state == PRS_NORMAL &&
1973 			    (checkctty == 0 || p->p_flag & P_CONTROLT))
1974 				pksignal(p, sig, ksi);
1975 			PROC_UNLOCK(p);
1976 		}
1977 	}
1978 }
1979 
1980 /*
1981  * Recalculate the signal mask and reset the signal disposition after
1982  * usermode frame for delivery is formed.  Should be called after
1983  * mach-specific routine, because sysent->sv_sendsig() needs correct
1984  * ps_siginfo and signal mask.
1985  */
1986 static void
1987 postsig_done(int sig, struct thread *td, struct sigacts *ps)
1988 {
1989 	sigset_t mask;
1990 
1991 	mtx_assert(&ps->ps_mtx, MA_OWNED);
1992 	td->td_ru.ru_nsignals++;
1993 	mask = ps->ps_catchmask[_SIG_IDX(sig)];
1994 	if (!SIGISMEMBER(ps->ps_signodefer, sig))
1995 		SIGADDSET(mask, sig);
1996 	kern_sigprocmask(td, SIG_BLOCK, &mask, NULL,
1997 	    SIGPROCMASK_PROC_LOCKED | SIGPROCMASK_PS_LOCKED);
1998 	if (SIGISMEMBER(ps->ps_sigreset, sig))
1999 		sigdflt(ps, sig);
2000 }
2001 
2002 /*
2003  * Send a signal caused by a trap to the current thread.  If it will be
2004  * caught immediately, deliver it with correct code.  Otherwise, post it
2005  * normally.
2006  */
2007 void
2008 trapsignal(struct thread *td, ksiginfo_t *ksi)
2009 {
2010 	struct sigacts *ps;
2011 	struct proc *p;
2012 	sigset_t sigmask;
2013 	int code, sig;
2014 
2015 	p = td->td_proc;
2016 	sig = ksi->ksi_signo;
2017 	code = ksi->ksi_code;
2018 	KASSERT(_SIG_VALID(sig), ("invalid signal"));
2019 
2020 	sigfastblock_fetch(td);
2021 	PROC_LOCK(p);
2022 	ps = p->p_sigacts;
2023 	mtx_lock(&ps->ps_mtx);
2024 	sigmask = td->td_sigmask;
2025 	if (td->td_sigblock_val != 0)
2026 		SIGSETOR(sigmask, fastblock_mask);
2027 	if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) &&
2028 	    !SIGISMEMBER(sigmask, sig)) {
2029 #ifdef KTRACE
2030 		if (KTRPOINT(curthread, KTR_PSIG))
2031 			ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
2032 			    &td->td_sigmask, code);
2033 #endif
2034 		(*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)],
2035 				ksi, &td->td_sigmask);
2036 		postsig_done(sig, td, ps);
2037 		mtx_unlock(&ps->ps_mtx);
2038 	} else {
2039 		/*
2040 		 * Avoid a possible infinite loop if the thread
2041 		 * masking the signal or process is ignoring the
2042 		 * signal.
2043 		 */
2044 		if (kern_forcesigexit && (SIGISMEMBER(sigmask, sig) ||
2045 		    ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN)) {
2046 			SIGDELSET(td->td_sigmask, sig);
2047 			SIGDELSET(ps->ps_sigcatch, sig);
2048 			SIGDELSET(ps->ps_sigignore, sig);
2049 			ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
2050 			td->td_pflags &= ~TDP_SIGFASTBLOCK;
2051 			td->td_sigblock_val = 0;
2052 		}
2053 		mtx_unlock(&ps->ps_mtx);
2054 		p->p_sig = sig;		/* XXX to verify code */
2055 		tdsendsignal(p, td, sig, ksi);
2056 	}
2057 	PROC_UNLOCK(p);
2058 }
2059 
2060 static struct thread *
2061 sigtd(struct proc *p, int sig, bool fast_sigblock)
2062 {
2063 	struct thread *td, *signal_td;
2064 
2065 	PROC_LOCK_ASSERT(p, MA_OWNED);
2066 	MPASS(!fast_sigblock || p == curproc);
2067 
2068 	/*
2069 	 * Check if current thread can handle the signal without
2070 	 * switching context to another thread.
2071 	 */
2072 	if (curproc == p && !SIGISMEMBER(curthread->td_sigmask, sig) &&
2073 	    (!fast_sigblock || curthread->td_sigblock_val == 0))
2074 		return (curthread);
2075 	signal_td = NULL;
2076 	FOREACH_THREAD_IN_PROC(p, td) {
2077 		if (!SIGISMEMBER(td->td_sigmask, sig) && (!fast_sigblock ||
2078 		    td != curthread || td->td_sigblock_val == 0)) {
2079 			signal_td = td;
2080 			break;
2081 		}
2082 	}
2083 	if (signal_td == NULL)
2084 		signal_td = FIRST_THREAD_IN_PROC(p);
2085 	return (signal_td);
2086 }
2087 
2088 /*
2089  * Send the signal to the process.  If the signal has an action, the action
2090  * is usually performed by the target process rather than the caller; we add
2091  * the signal to the set of pending signals for the process.
2092  *
2093  * Exceptions:
2094  *   o When a stop signal is sent to a sleeping process that takes the
2095  *     default action, the process is stopped without awakening it.
2096  *   o SIGCONT restarts stopped processes (or puts them back to sleep)
2097  *     regardless of the signal action (eg, blocked or ignored).
2098  *
2099  * Other ignored signals are discarded immediately.
2100  *
2101  * NB: This function may be entered from the debugger via the "kill" DDB
2102  * command.  There is little that can be done to mitigate the possibly messy
2103  * side effects of this unwise possibility.
2104  */
2105 void
2106 kern_psignal(struct proc *p, int sig)
2107 {
2108 	ksiginfo_t ksi;
2109 
2110 	ksiginfo_init(&ksi);
2111 	ksi.ksi_signo = sig;
2112 	ksi.ksi_code = SI_KERNEL;
2113 	(void) tdsendsignal(p, NULL, sig, &ksi);
2114 }
2115 
2116 int
2117 pksignal(struct proc *p, int sig, ksiginfo_t *ksi)
2118 {
2119 
2120 	return (tdsendsignal(p, NULL, sig, ksi));
2121 }
2122 
2123 /* Utility function for finding a thread to send signal event to. */
2124 int
2125 sigev_findtd(struct proc *p ,struct sigevent *sigev, struct thread **ttd)
2126 {
2127 	struct thread *td;
2128 
2129 	if (sigev->sigev_notify == SIGEV_THREAD_ID) {
2130 		td = tdfind(sigev->sigev_notify_thread_id, p->p_pid);
2131 		if (td == NULL)
2132 			return (ESRCH);
2133 		*ttd = td;
2134 	} else {
2135 		*ttd = NULL;
2136 		PROC_LOCK(p);
2137 	}
2138 	return (0);
2139 }
2140 
2141 void
2142 tdsignal(struct thread *td, int sig)
2143 {
2144 	ksiginfo_t ksi;
2145 
2146 	ksiginfo_init(&ksi);
2147 	ksi.ksi_signo = sig;
2148 	ksi.ksi_code = SI_KERNEL;
2149 	(void) tdsendsignal(td->td_proc, td, sig, &ksi);
2150 }
2151 
2152 void
2153 tdksignal(struct thread *td, int sig, ksiginfo_t *ksi)
2154 {
2155 
2156 	(void) tdsendsignal(td->td_proc, td, sig, ksi);
2157 }
2158 
2159 int
2160 tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
2161 {
2162 	sig_t action;
2163 	sigqueue_t *sigqueue;
2164 	int prop;
2165 	struct sigacts *ps;
2166 	int intrval;
2167 	int ret = 0;
2168 	int wakeup_swapper;
2169 
2170 	MPASS(td == NULL || p == td->td_proc);
2171 	PROC_LOCK_ASSERT(p, MA_OWNED);
2172 
2173 	if (!_SIG_VALID(sig))
2174 		panic("%s(): invalid signal %d", __func__, sig);
2175 
2176 	KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("%s: ksi on queue", __func__));
2177 
2178 	/*
2179 	 * IEEE Std 1003.1-2001: return success when killing a zombie.
2180 	 */
2181 	if (p->p_state == PRS_ZOMBIE) {
2182 		if (ksi && (ksi->ksi_flags & KSI_INS))
2183 			ksiginfo_tryfree(ksi);
2184 		return (ret);
2185 	}
2186 
2187 	ps = p->p_sigacts;
2188 	KNOTE_LOCKED(p->p_klist, NOTE_SIGNAL | sig);
2189 	prop = sigprop(sig);
2190 
2191 	if (td == NULL) {
2192 		td = sigtd(p, sig, false);
2193 		sigqueue = &p->p_sigqueue;
2194 	} else
2195 		sigqueue = &td->td_sigqueue;
2196 
2197 	SDT_PROBE3(proc, , , signal__send, td, p, sig);
2198 
2199 	/*
2200 	 * If the signal is being ignored,
2201 	 * then we forget about it immediately.
2202 	 * (Note: we don't set SIGCONT in ps_sigignore,
2203 	 * and if it is set to SIG_IGN,
2204 	 * action will be SIG_DFL here.)
2205 	 */
2206 	mtx_lock(&ps->ps_mtx);
2207 	if (SIGISMEMBER(ps->ps_sigignore, sig)) {
2208 		SDT_PROBE3(proc, , , signal__discard, td, p, sig);
2209 
2210 		mtx_unlock(&ps->ps_mtx);
2211 		if (ksi && (ksi->ksi_flags & KSI_INS))
2212 			ksiginfo_tryfree(ksi);
2213 		return (ret);
2214 	}
2215 	if (SIGISMEMBER(td->td_sigmask, sig))
2216 		action = SIG_HOLD;
2217 	else if (SIGISMEMBER(ps->ps_sigcatch, sig))
2218 		action = SIG_CATCH;
2219 	else
2220 		action = SIG_DFL;
2221 	if (SIGISMEMBER(ps->ps_sigintr, sig))
2222 		intrval = EINTR;
2223 	else
2224 		intrval = ERESTART;
2225 	mtx_unlock(&ps->ps_mtx);
2226 
2227 	if (prop & SIGPROP_CONT)
2228 		sigqueue_delete_stopmask_proc(p);
2229 	else if (prop & SIGPROP_STOP) {
2230 		/*
2231 		 * If sending a tty stop signal to a member of an orphaned
2232 		 * process group, discard the signal here if the action
2233 		 * is default; don't stop the process below if sleeping,
2234 		 * and don't clear any pending SIGCONT.
2235 		 */
2236 		if ((prop & SIGPROP_TTYSTOP) != 0 &&
2237 		    (p->p_pgrp->pg_flags & PGRP_ORPHANED) != 0 &&
2238 		    action == SIG_DFL) {
2239 			if (ksi && (ksi->ksi_flags & KSI_INS))
2240 				ksiginfo_tryfree(ksi);
2241 			return (ret);
2242 		}
2243 		sigqueue_delete_proc(p, SIGCONT);
2244 		if (p->p_flag & P_CONTINUED) {
2245 			p->p_flag &= ~P_CONTINUED;
2246 			PROC_LOCK(p->p_pptr);
2247 			sigqueue_take(p->p_ksi);
2248 			PROC_UNLOCK(p->p_pptr);
2249 		}
2250 	}
2251 
2252 	ret = sigqueue_add(sigqueue, sig, ksi);
2253 	if (ret != 0)
2254 		return (ret);
2255 	signotify(td);
2256 	/*
2257 	 * Defer further processing for signals which are held,
2258 	 * except that stopped processes must be continued by SIGCONT.
2259 	 */
2260 	if (action == SIG_HOLD &&
2261 	    !((prop & SIGPROP_CONT) && (p->p_flag & P_STOPPED_SIG)))
2262 		return (ret);
2263 
2264 	wakeup_swapper = 0;
2265 
2266 	/*
2267 	 * Some signals have a process-wide effect and a per-thread
2268 	 * component.  Most processing occurs when the process next
2269 	 * tries to cross the user boundary, however there are some
2270 	 * times when processing needs to be done immediately, such as
2271 	 * waking up threads so that they can cross the user boundary.
2272 	 * We try to do the per-process part here.
2273 	 */
2274 	if (P_SHOULDSTOP(p)) {
2275 		KASSERT(!(p->p_flag & P_WEXIT),
2276 		    ("signal to stopped but exiting process"));
2277 		if (sig == SIGKILL) {
2278 			/*
2279 			 * If traced process is already stopped,
2280 			 * then no further action is necessary.
2281 			 */
2282 			if (p->p_flag & P_TRACED)
2283 				goto out;
2284 			/*
2285 			 * SIGKILL sets process running.
2286 			 * It will die elsewhere.
2287 			 * All threads must be restarted.
2288 			 */
2289 			p->p_flag &= ~P_STOPPED_SIG;
2290 			goto runfast;
2291 		}
2292 
2293 		if (prop & SIGPROP_CONT) {
2294 			/*
2295 			 * If traced process is already stopped,
2296 			 * then no further action is necessary.
2297 			 */
2298 			if (p->p_flag & P_TRACED)
2299 				goto out;
2300 			/*
2301 			 * If SIGCONT is default (or ignored), we continue the
2302 			 * process but don't leave the signal in sigqueue as
2303 			 * it has no further action.  If SIGCONT is held, we
2304 			 * continue the process and leave the signal in
2305 			 * sigqueue.  If the process catches SIGCONT, let it
2306 			 * handle the signal itself.  If it isn't waiting on
2307 			 * an event, it goes back to run state.
2308 			 * Otherwise, process goes back to sleep state.
2309 			 */
2310 			p->p_flag &= ~P_STOPPED_SIG;
2311 			PROC_SLOCK(p);
2312 			if (p->p_numthreads == p->p_suspcount) {
2313 				PROC_SUNLOCK(p);
2314 				p->p_flag |= P_CONTINUED;
2315 				p->p_xsig = SIGCONT;
2316 				PROC_LOCK(p->p_pptr);
2317 				childproc_continued(p);
2318 				PROC_UNLOCK(p->p_pptr);
2319 				PROC_SLOCK(p);
2320 			}
2321 			if (action == SIG_DFL) {
2322 				thread_unsuspend(p);
2323 				PROC_SUNLOCK(p);
2324 				sigqueue_delete(sigqueue, sig);
2325 				goto out_cont;
2326 			}
2327 			if (action == SIG_CATCH) {
2328 				/*
2329 				 * The process wants to catch it so it needs
2330 				 * to run at least one thread, but which one?
2331 				 */
2332 				PROC_SUNLOCK(p);
2333 				goto runfast;
2334 			}
2335 			/*
2336 			 * The signal is not ignored or caught.
2337 			 */
2338 			thread_unsuspend(p);
2339 			PROC_SUNLOCK(p);
2340 			goto out_cont;
2341 		}
2342 
2343 		if (prop & SIGPROP_STOP) {
2344 			/*
2345 			 * If traced process is already stopped,
2346 			 * then no further action is necessary.
2347 			 */
2348 			if (p->p_flag & P_TRACED)
2349 				goto out;
2350 			/*
2351 			 * Already stopped, don't need to stop again
2352 			 * (If we did the shell could get confused).
2353 			 * Just make sure the signal STOP bit set.
2354 			 */
2355 			p->p_flag |= P_STOPPED_SIG;
2356 			sigqueue_delete(sigqueue, sig);
2357 			goto out;
2358 		}
2359 
2360 		/*
2361 		 * All other kinds of signals:
2362 		 * If a thread is sleeping interruptibly, simulate a
2363 		 * wakeup so that when it is continued it will be made
2364 		 * runnable and can look at the signal.  However, don't make
2365 		 * the PROCESS runnable, leave it stopped.
2366 		 * It may run a bit until it hits a thread_suspend_check().
2367 		 */
2368 		PROC_SLOCK(p);
2369 		thread_lock(td);
2370 		if (TD_CAN_ABORT(td))
2371 			wakeup_swapper = sleepq_abort(td, intrval);
2372 		else
2373 			thread_unlock(td);
2374 		PROC_SUNLOCK(p);
2375 		goto out;
2376 		/*
2377 		 * Mutexes are short lived. Threads waiting on them will
2378 		 * hit thread_suspend_check() soon.
2379 		 */
2380 	} else if (p->p_state == PRS_NORMAL) {
2381 		if (p->p_flag & P_TRACED || action == SIG_CATCH) {
2382 			tdsigwakeup(td, sig, action, intrval);
2383 			goto out;
2384 		}
2385 
2386 		MPASS(action == SIG_DFL);
2387 
2388 		if (prop & SIGPROP_STOP) {
2389 			if (p->p_flag & (P_PPWAIT|P_WEXIT))
2390 				goto out;
2391 			p->p_flag |= P_STOPPED_SIG;
2392 			p->p_xsig = sig;
2393 			PROC_SLOCK(p);
2394 			wakeup_swapper = sig_suspend_threads(td, p, 1);
2395 			if (p->p_numthreads == p->p_suspcount) {
2396 				/*
2397 				 * only thread sending signal to another
2398 				 * process can reach here, if thread is sending
2399 				 * signal to its process, because thread does
2400 				 * not suspend itself here, p_numthreads
2401 				 * should never be equal to p_suspcount.
2402 				 */
2403 				thread_stopped(p);
2404 				PROC_SUNLOCK(p);
2405 				sigqueue_delete_proc(p, p->p_xsig);
2406 			} else
2407 				PROC_SUNLOCK(p);
2408 			goto out;
2409 		}
2410 	} else {
2411 		/* Not in "NORMAL" state. discard the signal. */
2412 		sigqueue_delete(sigqueue, sig);
2413 		goto out;
2414 	}
2415 
2416 	/*
2417 	 * The process is not stopped so we need to apply the signal to all the
2418 	 * running threads.
2419 	 */
2420 runfast:
2421 	tdsigwakeup(td, sig, action, intrval);
2422 	PROC_SLOCK(p);
2423 	thread_unsuspend(p);
2424 	PROC_SUNLOCK(p);
2425 out_cont:
2426 	itimer_proc_continue(p);
2427 	kqtimer_proc_continue(p);
2428 out:
2429 	/* If we jump here, proc slock should not be owned. */
2430 	PROC_SLOCK_ASSERT(p, MA_NOTOWNED);
2431 	if (wakeup_swapper)
2432 		kick_proc0();
2433 
2434 	return (ret);
2435 }
2436 
2437 /*
2438  * The force of a signal has been directed against a single
2439  * thread.  We need to see what we can do about knocking it
2440  * out of any sleep it may be in etc.
2441  */
2442 static void
2443 tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
2444 {
2445 	struct proc *p = td->td_proc;
2446 	int prop, wakeup_swapper;
2447 
2448 	PROC_LOCK_ASSERT(p, MA_OWNED);
2449 	prop = sigprop(sig);
2450 
2451 	PROC_SLOCK(p);
2452 	thread_lock(td);
2453 	/*
2454 	 * Bring the priority of a thread up if we want it to get
2455 	 * killed in this lifetime.  Be careful to avoid bumping the
2456 	 * priority of the idle thread, since we still allow to signal
2457 	 * kernel processes.
2458 	 */
2459 	if (action == SIG_DFL && (prop & SIGPROP_KILL) != 0 &&
2460 	    td->td_priority > PUSER && !TD_IS_IDLETHREAD(td))
2461 		sched_prio(td, PUSER);
2462 	if (TD_ON_SLEEPQ(td)) {
2463 		/*
2464 		 * If thread is sleeping uninterruptibly
2465 		 * we can't interrupt the sleep... the signal will
2466 		 * be noticed when the process returns through
2467 		 * trap() or syscall().
2468 		 */
2469 		if ((td->td_flags & TDF_SINTR) == 0)
2470 			goto out;
2471 		/*
2472 		 * If SIGCONT is default (or ignored) and process is
2473 		 * asleep, we are finished; the process should not
2474 		 * be awakened.
2475 		 */
2476 		if ((prop & SIGPROP_CONT) && action == SIG_DFL) {
2477 			thread_unlock(td);
2478 			PROC_SUNLOCK(p);
2479 			sigqueue_delete(&p->p_sigqueue, sig);
2480 			/*
2481 			 * It may be on either list in this state.
2482 			 * Remove from both for now.
2483 			 */
2484 			sigqueue_delete(&td->td_sigqueue, sig);
2485 			return;
2486 		}
2487 
2488 		/*
2489 		 * Don't awaken a sleeping thread for SIGSTOP if the
2490 		 * STOP signal is deferred.
2491 		 */
2492 		if ((prop & SIGPROP_STOP) != 0 && (td->td_flags & (TDF_SBDRY |
2493 		    TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY)
2494 			goto out;
2495 
2496 		/*
2497 		 * Give low priority threads a better chance to run.
2498 		 */
2499 		if (td->td_priority > PUSER && !TD_IS_IDLETHREAD(td))
2500 			sched_prio(td, PUSER);
2501 
2502 		wakeup_swapper = sleepq_abort(td, intrval);
2503 		PROC_SUNLOCK(p);
2504 		if (wakeup_swapper)
2505 			kick_proc0();
2506 		return;
2507 	}
2508 
2509 	/*
2510 	 * Other states do nothing with the signal immediately,
2511 	 * other than kicking ourselves if we are running.
2512 	 * It will either never be noticed, or noticed very soon.
2513 	 */
2514 #ifdef SMP
2515 	if (TD_IS_RUNNING(td) && td != curthread)
2516 		forward_signal(td);
2517 #endif
2518 
2519 out:
2520 	PROC_SUNLOCK(p);
2521 	thread_unlock(td);
2522 }
2523 
2524 static int
2525 sig_suspend_threads(struct thread *td, struct proc *p, int sending)
2526 {
2527 	struct thread *td2;
2528 	int wakeup_swapper;
2529 
2530 	PROC_LOCK_ASSERT(p, MA_OWNED);
2531 	PROC_SLOCK_ASSERT(p, MA_OWNED);
2532 	MPASS(sending || td == curthread);
2533 
2534 	wakeup_swapper = 0;
2535 	FOREACH_THREAD_IN_PROC(p, td2) {
2536 		thread_lock(td2);
2537 		td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
2538 		if ((TD_IS_SLEEPING(td2) || TD_IS_SWAPPED(td2)) &&
2539 		    (td2->td_flags & TDF_SINTR)) {
2540 			if (td2->td_flags & TDF_SBDRY) {
2541 				/*
2542 				 * Once a thread is asleep with
2543 				 * TDF_SBDRY and without TDF_SERESTART
2544 				 * or TDF_SEINTR set, it should never
2545 				 * become suspended due to this check.
2546 				 */
2547 				KASSERT(!TD_IS_SUSPENDED(td2),
2548 				    ("thread with deferred stops suspended"));
2549 				if (TD_SBDRY_INTR(td2)) {
2550 					wakeup_swapper |= sleepq_abort(td2,
2551 					    TD_SBDRY_ERRNO(td2));
2552 					continue;
2553 				}
2554 			} else if (!TD_IS_SUSPENDED(td2))
2555 				thread_suspend_one(td2);
2556 		} else if (!TD_IS_SUSPENDED(td2)) {
2557 			if (sending || td != td2)
2558 				td2->td_flags |= TDF_ASTPENDING;
2559 #ifdef SMP
2560 			if (TD_IS_RUNNING(td2) && td2 != td)
2561 				forward_signal(td2);
2562 #endif
2563 		}
2564 		thread_unlock(td2);
2565 	}
2566 	return (wakeup_swapper);
2567 }
2568 
2569 /*
2570  * Stop the process for an event deemed interesting to the debugger. If si is
2571  * non-NULL, this is a signal exchange; the new signal requested by the
2572  * debugger will be returned for handling. If si is NULL, this is some other
2573  * type of interesting event. The debugger may request a signal be delivered in
2574  * that case as well, however it will be deferred until it can be handled.
2575  */
2576 int
2577 ptracestop(struct thread *td, int sig, ksiginfo_t *si)
2578 {
2579 	struct proc *p = td->td_proc;
2580 	struct thread *td2;
2581 	ksiginfo_t ksi;
2582 
2583 	PROC_LOCK_ASSERT(p, MA_OWNED);
2584 	KASSERT(!(p->p_flag & P_WEXIT), ("Stopping exiting process"));
2585 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
2586 	    &p->p_mtx.lock_object, "Stopping for traced signal");
2587 
2588 	td->td_xsig = sig;
2589 
2590 	if (si == NULL || (si->ksi_flags & KSI_PTRACE) == 0) {
2591 		td->td_dbgflags |= TDB_XSIG;
2592 		CTR4(KTR_PTRACE, "ptracestop: tid %d (pid %d) flags %#x sig %d",
2593 		    td->td_tid, p->p_pid, td->td_dbgflags, sig);
2594 		PROC_SLOCK(p);
2595 		while ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_XSIG)) {
2596 			if (P_KILLED(p)) {
2597 				/*
2598 				 * Ensure that, if we've been PT_KILLed, the
2599 				 * exit status reflects that. Another thread
2600 				 * may also be in ptracestop(), having just
2601 				 * received the SIGKILL, but this thread was
2602 				 * unsuspended first.
2603 				 */
2604 				td->td_dbgflags &= ~TDB_XSIG;
2605 				td->td_xsig = SIGKILL;
2606 				p->p_ptevents = 0;
2607 				break;
2608 			}
2609 			if (p->p_flag & P_SINGLE_EXIT &&
2610 			    !(td->td_dbgflags & TDB_EXIT)) {
2611 				/*
2612 				 * Ignore ptrace stops except for thread exit
2613 				 * events when the process exits.
2614 				 */
2615 				td->td_dbgflags &= ~TDB_XSIG;
2616 				PROC_SUNLOCK(p);
2617 				return (0);
2618 			}
2619 
2620 			/*
2621 			 * Make wait(2) work.  Ensure that right after the
2622 			 * attach, the thread which was decided to become the
2623 			 * leader of attach gets reported to the waiter.
2624 			 * Otherwise, just avoid overwriting another thread's
2625 			 * assignment to p_xthread.  If another thread has
2626 			 * already set p_xthread, the current thread will get
2627 			 * a chance to report itself upon the next iteration.
2628 			 */
2629 			if ((td->td_dbgflags & TDB_FSTP) != 0 ||
2630 			    ((p->p_flag2 & P2_PTRACE_FSTP) == 0 &&
2631 			    p->p_xthread == NULL)) {
2632 				p->p_xsig = sig;
2633 				p->p_xthread = td;
2634 
2635 				/*
2636 				 * If we are on sleepqueue already,
2637 				 * let sleepqueue code decide if it
2638 				 * needs to go sleep after attach.
2639 				 */
2640 				if (td->td_wchan == NULL)
2641 					td->td_dbgflags &= ~TDB_FSTP;
2642 
2643 				p->p_flag2 &= ~P2_PTRACE_FSTP;
2644 				p->p_flag |= P_STOPPED_SIG | P_STOPPED_TRACE;
2645 				sig_suspend_threads(td, p, 0);
2646 			}
2647 			if ((td->td_dbgflags & TDB_STOPATFORK) != 0) {
2648 				td->td_dbgflags &= ~TDB_STOPATFORK;
2649 			}
2650 stopme:
2651 			thread_suspend_switch(td, p);
2652 			if (p->p_xthread == td)
2653 				p->p_xthread = NULL;
2654 			if (!(p->p_flag & P_TRACED))
2655 				break;
2656 			if (td->td_dbgflags & TDB_SUSPEND) {
2657 				if (p->p_flag & P_SINGLE_EXIT)
2658 					break;
2659 				goto stopme;
2660 			}
2661 		}
2662 		PROC_SUNLOCK(p);
2663 	}
2664 
2665 	if (si != NULL && sig == td->td_xsig) {
2666 		/* Parent wants us to take the original signal unchanged. */
2667 		si->ksi_flags |= KSI_HEAD;
2668 		if (sigqueue_add(&td->td_sigqueue, sig, si) != 0)
2669 			si->ksi_signo = 0;
2670 	} else if (td->td_xsig != 0) {
2671 		/*
2672 		 * If parent wants us to take a new signal, then it will leave
2673 		 * it in td->td_xsig; otherwise we just look for signals again.
2674 		 */
2675 		ksiginfo_init(&ksi);
2676 		ksi.ksi_signo = td->td_xsig;
2677 		ksi.ksi_flags |= KSI_PTRACE;
2678 		td2 = sigtd(p, td->td_xsig, false);
2679 		tdsendsignal(p, td2, td->td_xsig, &ksi);
2680 		if (td != td2)
2681 			return (0);
2682 	}
2683 
2684 	return (td->td_xsig);
2685 }
2686 
2687 static void
2688 reschedule_signals(struct proc *p, sigset_t block, int flags)
2689 {
2690 	struct sigacts *ps;
2691 	struct thread *td;
2692 	int sig;
2693 	bool fastblk, pslocked;
2694 
2695 	PROC_LOCK_ASSERT(p, MA_OWNED);
2696 	ps = p->p_sigacts;
2697 	pslocked = (flags & SIGPROCMASK_PS_LOCKED) != 0;
2698 	mtx_assert(&ps->ps_mtx, pslocked ? MA_OWNED : MA_NOTOWNED);
2699 	if (SIGISEMPTY(p->p_siglist))
2700 		return;
2701 	SIGSETAND(block, p->p_siglist);
2702 	fastblk = (flags & SIGPROCMASK_FASTBLK) != 0;
2703 	while ((sig = sig_ffs(&block)) != 0) {
2704 		SIGDELSET(block, sig);
2705 		td = sigtd(p, sig, fastblk);
2706 
2707 		/*
2708 		 * If sigtd() selected us despite sigfastblock is
2709 		 * blocking, do not activate AST or wake us, to avoid
2710 		 * loop in AST handler.
2711 		 */
2712 		if (fastblk && td == curthread)
2713 			continue;
2714 
2715 		signotify(td);
2716 		if (!pslocked)
2717 			mtx_lock(&ps->ps_mtx);
2718 		if (p->p_flag & P_TRACED ||
2719 		    (SIGISMEMBER(ps->ps_sigcatch, sig) &&
2720 		    !SIGISMEMBER(td->td_sigmask, sig))) {
2721 			tdsigwakeup(td, sig, SIG_CATCH,
2722 			    (SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR :
2723 			    ERESTART));
2724 		}
2725 		if (!pslocked)
2726 			mtx_unlock(&ps->ps_mtx);
2727 	}
2728 }
2729 
2730 void
2731 tdsigcleanup(struct thread *td)
2732 {
2733 	struct proc *p;
2734 	sigset_t unblocked;
2735 
2736 	p = td->td_proc;
2737 	PROC_LOCK_ASSERT(p, MA_OWNED);
2738 
2739 	sigqueue_flush(&td->td_sigqueue);
2740 	if (p->p_numthreads == 1)
2741 		return;
2742 
2743 	/*
2744 	 * Since we cannot handle signals, notify signal post code
2745 	 * about this by filling the sigmask.
2746 	 *
2747 	 * Also, if needed, wake up thread(s) that do not block the
2748 	 * same signals as the exiting thread, since the thread might
2749 	 * have been selected for delivery and woken up.
2750 	 */
2751 	SIGFILLSET(unblocked);
2752 	SIGSETNAND(unblocked, td->td_sigmask);
2753 	SIGFILLSET(td->td_sigmask);
2754 	reschedule_signals(p, unblocked, 0);
2755 
2756 }
2757 
2758 static int
2759 sigdeferstop_curr_flags(int cflags)
2760 {
2761 
2762 	MPASS((cflags & (TDF_SEINTR | TDF_SERESTART)) == 0 ||
2763 	    (cflags & TDF_SBDRY) != 0);
2764 	return (cflags & (TDF_SBDRY | TDF_SEINTR | TDF_SERESTART));
2765 }
2766 
2767 /*
2768  * Defer the delivery of SIGSTOP for the current thread, according to
2769  * the requested mode.  Returns previous flags, which must be restored
2770  * by sigallowstop().
2771  *
2772  * TDF_SBDRY, TDF_SEINTR, and TDF_SERESTART flags are only set and
2773  * cleared by the current thread, which allow the lock-less read-only
2774  * accesses below.
2775  */
2776 int
2777 sigdeferstop_impl(int mode)
2778 {
2779 	struct thread *td;
2780 	int cflags, nflags;
2781 
2782 	td = curthread;
2783 	cflags = sigdeferstop_curr_flags(td->td_flags);
2784 	switch (mode) {
2785 	case SIGDEFERSTOP_NOP:
2786 		nflags = cflags;
2787 		break;
2788 	case SIGDEFERSTOP_OFF:
2789 		nflags = 0;
2790 		break;
2791 	case SIGDEFERSTOP_SILENT:
2792 		nflags = (cflags | TDF_SBDRY) & ~(TDF_SEINTR | TDF_SERESTART);
2793 		break;
2794 	case SIGDEFERSTOP_EINTR:
2795 		nflags = (cflags | TDF_SBDRY | TDF_SEINTR) & ~TDF_SERESTART;
2796 		break;
2797 	case SIGDEFERSTOP_ERESTART:
2798 		nflags = (cflags | TDF_SBDRY | TDF_SERESTART) & ~TDF_SEINTR;
2799 		break;
2800 	default:
2801 		panic("sigdeferstop: invalid mode %x", mode);
2802 		break;
2803 	}
2804 	if (cflags == nflags)
2805 		return (SIGDEFERSTOP_VAL_NCHG);
2806 	thread_lock(td);
2807 	td->td_flags = (td->td_flags & ~cflags) | nflags;
2808 	thread_unlock(td);
2809 	return (cflags);
2810 }
2811 
2812 /*
2813  * Restores the STOP handling mode, typically permitting the delivery
2814  * of SIGSTOP for the current thread.  This does not immediately
2815  * suspend if a stop was posted.  Instead, the thread will suspend
2816  * either via ast() or a subsequent interruptible sleep.
2817  */
2818 void
2819 sigallowstop_impl(int prev)
2820 {
2821 	struct thread *td;
2822 	int cflags;
2823 
2824 	KASSERT(prev != SIGDEFERSTOP_VAL_NCHG, ("failed sigallowstop"));
2825 	KASSERT((prev & ~(TDF_SBDRY | TDF_SEINTR | TDF_SERESTART)) == 0,
2826 	    ("sigallowstop: incorrect previous mode %x", prev));
2827 	td = curthread;
2828 	cflags = sigdeferstop_curr_flags(td->td_flags);
2829 	if (cflags != prev) {
2830 		thread_lock(td);
2831 		td->td_flags = (td->td_flags & ~cflags) | prev;
2832 		thread_unlock(td);
2833 	}
2834 }
2835 
2836 /*
2837  * If the current process has received a signal (should be caught or cause
2838  * termination, should interrupt current syscall), return the signal number.
2839  * Stop signals with default action are processed immediately, then cleared;
2840  * they aren't returned.  This is checked after each entry to the system for
2841  * a syscall or trap (though this can usually be done without calling issignal
2842  * by checking the pending signal masks in cursig.) The normal call
2843  * sequence is
2844  *
2845  *	while (sig = cursig(curthread))
2846  *		postsig(sig);
2847  */
2848 static int
2849 issignal(struct thread *td)
2850 {
2851 	struct proc *p;
2852 	struct sigacts *ps;
2853 	struct sigqueue *queue;
2854 	sigset_t sigpending;
2855 	ksiginfo_t ksi;
2856 	int prop, sig;
2857 
2858 	p = td->td_proc;
2859 	ps = p->p_sigacts;
2860 	mtx_assert(&ps->ps_mtx, MA_OWNED);
2861 	PROC_LOCK_ASSERT(p, MA_OWNED);
2862 	for (;;) {
2863 		sigpending = td->td_sigqueue.sq_signals;
2864 		SIGSETOR(sigpending, p->p_sigqueue.sq_signals);
2865 		SIGSETNAND(sigpending, td->td_sigmask);
2866 
2867 		if ((p->p_flag & P_PPWAIT) != 0 || (td->td_flags &
2868 		    (TDF_SBDRY | TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY)
2869 			SIG_STOPSIGMASK(sigpending);
2870 		if (SIGISEMPTY(sigpending))	/* no signal to send */
2871 			return (0);
2872 
2873 		/*
2874 		 * Do fast sigblock if requested by usermode.  Since
2875 		 * we do know that there was a signal pending at this
2876 		 * point, set the FAST_SIGBLOCK_PEND as indicator for
2877 		 * usermode to perform a dummy call to
2878 		 * FAST_SIGBLOCK_UNBLOCK, which causes immediate
2879 		 * delivery of postponed pending signal.
2880 		 */
2881 		if ((td->td_pflags & TDP_SIGFASTBLOCK) != 0) {
2882 			if (td->td_sigblock_val != 0)
2883 				SIGSETNAND(sigpending, fastblock_mask);
2884 			if (SIGISEMPTY(sigpending)) {
2885 				td->td_pflags |= TDP_SIGFASTPENDING;
2886 				return (0);
2887 			}
2888 		}
2889 
2890 		if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED &&
2891 		    (p->p_flag2 & P2_PTRACE_FSTP) != 0 &&
2892 		    SIGISMEMBER(sigpending, SIGSTOP)) {
2893 			/*
2894 			 * If debugger just attached, always consume
2895 			 * SIGSTOP from ptrace(PT_ATTACH) first, to
2896 			 * execute the debugger attach ritual in
2897 			 * order.
2898 			 */
2899 			sig = SIGSTOP;
2900 			td->td_dbgflags |= TDB_FSTP;
2901 		} else {
2902 			sig = sig_ffs(&sigpending);
2903 		}
2904 
2905 		/*
2906 		 * We should see pending but ignored signals
2907 		 * only if P_TRACED was on when they were posted.
2908 		 */
2909 		if (SIGISMEMBER(ps->ps_sigignore, sig) &&
2910 		    (p->p_flag & P_TRACED) == 0) {
2911 			sigqueue_delete(&td->td_sigqueue, sig);
2912 			sigqueue_delete(&p->p_sigqueue, sig);
2913 			continue;
2914 		}
2915 		if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED) {
2916 			/*
2917 			 * If traced, always stop.
2918 			 * Remove old signal from queue before the stop.
2919 			 * XXX shrug off debugger, it causes siginfo to
2920 			 * be thrown away.
2921 			 */
2922 			queue = &td->td_sigqueue;
2923 			ksiginfo_init(&ksi);
2924 			if (sigqueue_get(queue, sig, &ksi) == 0) {
2925 				queue = &p->p_sigqueue;
2926 				sigqueue_get(queue, sig, &ksi);
2927 			}
2928 			td->td_si = ksi.ksi_info;
2929 
2930 			mtx_unlock(&ps->ps_mtx);
2931 			sig = ptracestop(td, sig, &ksi);
2932 			mtx_lock(&ps->ps_mtx);
2933 
2934 			td->td_si.si_signo = 0;
2935 
2936 			/*
2937 			 * Keep looking if the debugger discarded or
2938 			 * replaced the signal.
2939 			 */
2940 			if (sig == 0)
2941 				continue;
2942 
2943 			/*
2944 			 * If the signal became masked, re-queue it.
2945 			 */
2946 			if (SIGISMEMBER(td->td_sigmask, sig)) {
2947 				ksi.ksi_flags |= KSI_HEAD;
2948 				sigqueue_add(&p->p_sigqueue, sig, &ksi);
2949 				continue;
2950 			}
2951 
2952 			/*
2953 			 * If the traced bit got turned off, requeue
2954 			 * the signal and go back up to the top to
2955 			 * rescan signals.  This ensures that p_sig*
2956 			 * and p_sigact are consistent.
2957 			 */
2958 			if ((p->p_flag & P_TRACED) == 0) {
2959 				ksi.ksi_flags |= KSI_HEAD;
2960 				sigqueue_add(queue, sig, &ksi);
2961 				continue;
2962 			}
2963 		}
2964 
2965 		prop = sigprop(sig);
2966 
2967 		/*
2968 		 * Decide whether the signal should be returned.
2969 		 * Return the signal's number, or fall through
2970 		 * to clear it from the pending mask.
2971 		 */
2972 		switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) {
2973 		case (intptr_t)SIG_DFL:
2974 			/*
2975 			 * Don't take default actions on system processes.
2976 			 */
2977 			if (p->p_pid <= 1) {
2978 #ifdef DIAGNOSTIC
2979 				/*
2980 				 * Are you sure you want to ignore SIGSEGV
2981 				 * in init? XXX
2982 				 */
2983 				printf("Process (pid %lu) got signal %d\n",
2984 					(u_long)p->p_pid, sig);
2985 #endif
2986 				break;		/* == ignore */
2987 			}
2988 			/*
2989 			 * If there is a pending stop signal to process with
2990 			 * default action, stop here, then clear the signal.
2991 			 * Traced or exiting processes should ignore stops.
2992 			 * Additionally, a member of an orphaned process group
2993 			 * should ignore tty stops.
2994 			 */
2995 			if (prop & SIGPROP_STOP) {
2996 				mtx_unlock(&ps->ps_mtx);
2997 				if ((p->p_flag & (P_TRACED | P_WEXIT |
2998 				    P_SINGLE_EXIT)) != 0 || ((p->p_pgrp->
2999 				    pg_flags & PGRP_ORPHANED) != 0 &&
3000 				    (prop & SIGPROP_TTYSTOP) != 0)) {
3001 					mtx_lock(&ps->ps_mtx);
3002 					break;	/* == ignore */
3003 				}
3004 				if (TD_SBDRY_INTR(td)) {
3005 					KASSERT((td->td_flags & TDF_SBDRY) != 0,
3006 					    ("lost TDF_SBDRY"));
3007 					mtx_lock(&ps->ps_mtx);
3008 					return (-1);
3009 				}
3010 				WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
3011 				    &p->p_mtx.lock_object, "Catching SIGSTOP");
3012 				sigqueue_delete(&td->td_sigqueue, sig);
3013 				sigqueue_delete(&p->p_sigqueue, sig);
3014 				p->p_flag |= P_STOPPED_SIG;
3015 				p->p_xsig = sig;
3016 				PROC_SLOCK(p);
3017 				sig_suspend_threads(td, p, 0);
3018 				thread_suspend_switch(td, p);
3019 				PROC_SUNLOCK(p);
3020 				mtx_lock(&ps->ps_mtx);
3021 				goto next;
3022 			} else if (prop & SIGPROP_IGNORE) {
3023 				/*
3024 				 * Except for SIGCONT, shouldn't get here.
3025 				 * Default action is to ignore; drop it.
3026 				 */
3027 				break;		/* == ignore */
3028 			} else
3029 				return (sig);
3030 			/*NOTREACHED*/
3031 
3032 		case (intptr_t)SIG_IGN:
3033 			/*
3034 			 * Masking above should prevent us ever trying
3035 			 * to take action on an ignored signal other
3036 			 * than SIGCONT, unless process is traced.
3037 			 */
3038 			if ((prop & SIGPROP_CONT) == 0 &&
3039 			    (p->p_flag & P_TRACED) == 0)
3040 				printf("issignal\n");
3041 			break;		/* == ignore */
3042 
3043 		default:
3044 			/*
3045 			 * This signal has an action, let
3046 			 * postsig() process it.
3047 			 */
3048 			return (sig);
3049 		}
3050 		sigqueue_delete(&td->td_sigqueue, sig);	/* take the signal! */
3051 		sigqueue_delete(&p->p_sigqueue, sig);
3052 next:;
3053 	}
3054 	/* NOTREACHED */
3055 }
3056 
3057 void
3058 thread_stopped(struct proc *p)
3059 {
3060 	int n;
3061 
3062 	PROC_LOCK_ASSERT(p, MA_OWNED);
3063 	PROC_SLOCK_ASSERT(p, MA_OWNED);
3064 	n = p->p_suspcount;
3065 	if (p == curproc)
3066 		n++;
3067 	if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) {
3068 		PROC_SUNLOCK(p);
3069 		p->p_flag &= ~P_WAITED;
3070 		PROC_LOCK(p->p_pptr);
3071 		childproc_stopped(p, (p->p_flag & P_TRACED) ?
3072 			CLD_TRAPPED : CLD_STOPPED);
3073 		PROC_UNLOCK(p->p_pptr);
3074 		PROC_SLOCK(p);
3075 	}
3076 }
3077 
3078 /*
3079  * Take the action for the specified signal
3080  * from the current set of pending signals.
3081  */
3082 int
3083 postsig(int sig)
3084 {
3085 	struct thread *td;
3086 	struct proc *p;
3087 	struct sigacts *ps;
3088 	sig_t action;
3089 	ksiginfo_t ksi;
3090 	sigset_t returnmask;
3091 
3092 	KASSERT(sig != 0, ("postsig"));
3093 
3094 	td = curthread;
3095 	p = td->td_proc;
3096 	PROC_LOCK_ASSERT(p, MA_OWNED);
3097 	ps = p->p_sigacts;
3098 	mtx_assert(&ps->ps_mtx, MA_OWNED);
3099 	ksiginfo_init(&ksi);
3100 	if (sigqueue_get(&td->td_sigqueue, sig, &ksi) == 0 &&
3101 	    sigqueue_get(&p->p_sigqueue, sig, &ksi) == 0)
3102 		return (0);
3103 	ksi.ksi_signo = sig;
3104 	if (ksi.ksi_code == SI_TIMER)
3105 		itimer_accept(p, ksi.ksi_timerid, &ksi);
3106 	action = ps->ps_sigact[_SIG_IDX(sig)];
3107 #ifdef KTRACE
3108 	if (KTRPOINT(td, KTR_PSIG))
3109 		ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ?
3110 		    &td->td_oldsigmask : &td->td_sigmask, ksi.ksi_code);
3111 #endif
3112 
3113 	if (action == SIG_DFL) {
3114 		/*
3115 		 * Default action, where the default is to kill
3116 		 * the process.  (Other cases were ignored above.)
3117 		 */
3118 		mtx_unlock(&ps->ps_mtx);
3119 		proc_td_siginfo_capture(td, &ksi.ksi_info);
3120 		sigexit(td, sig);
3121 		/* NOTREACHED */
3122 	} else {
3123 		/*
3124 		 * If we get here, the signal must be caught.
3125 		 */
3126 		KASSERT(action != SIG_IGN, ("postsig action %p", action));
3127 		KASSERT(!SIGISMEMBER(td->td_sigmask, sig),
3128 		    ("postsig action: blocked sig %d", sig));
3129 
3130 		/*
3131 		 * Set the new mask value and also defer further
3132 		 * occurrences of this signal.
3133 		 *
3134 		 * Special case: user has done a sigsuspend.  Here the
3135 		 * current mask is not of interest, but rather the
3136 		 * mask from before the sigsuspend is what we want
3137 		 * restored after the signal processing is completed.
3138 		 */
3139 		if (td->td_pflags & TDP_OLDMASK) {
3140 			returnmask = td->td_oldsigmask;
3141 			td->td_pflags &= ~TDP_OLDMASK;
3142 		} else
3143 			returnmask = td->td_sigmask;
3144 
3145 		if (p->p_sig == sig) {
3146 			p->p_sig = 0;
3147 		}
3148 		(*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask);
3149 		postsig_done(sig, td, ps);
3150 	}
3151 	return (1);
3152 }
3153 
3154 int
3155 sig_ast_checksusp(struct thread *td)
3156 {
3157 	struct proc *p;
3158 	int ret;
3159 
3160 	p = td->td_proc;
3161 	PROC_LOCK_ASSERT(p, MA_OWNED);
3162 
3163 	if ((td->td_flags & TDF_NEEDSUSPCHK) == 0)
3164 		return (0);
3165 
3166 	ret = thread_suspend_check(1);
3167 	MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
3168 	return (ret);
3169 }
3170 
3171 int
3172 sig_ast_needsigchk(struct thread *td)
3173 {
3174 	struct proc *p;
3175 	struct sigacts *ps;
3176 	int ret, sig;
3177 
3178 	p = td->td_proc;
3179 	PROC_LOCK_ASSERT(p, MA_OWNED);
3180 
3181 	if ((td->td_flags & TDF_NEEDSIGCHK) == 0)
3182 		return (0);
3183 
3184 	ps = p->p_sigacts;
3185 	mtx_lock(&ps->ps_mtx);
3186 	sig = cursig(td);
3187 	if (sig == -1) {
3188 		mtx_unlock(&ps->ps_mtx);
3189 		KASSERT((td->td_flags & TDF_SBDRY) != 0, ("lost TDF_SBDRY"));
3190 		KASSERT(TD_SBDRY_INTR(td),
3191 		    ("lost TDF_SERESTART of TDF_SEINTR"));
3192 		KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) !=
3193 		    (TDF_SEINTR | TDF_SERESTART),
3194 		    ("both TDF_SEINTR and TDF_SERESTART"));
3195 		ret = TD_SBDRY_ERRNO(td);
3196 	} else if (sig != 0) {
3197 		ret = SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR : ERESTART;
3198 		mtx_unlock(&ps->ps_mtx);
3199 	} else {
3200 		mtx_unlock(&ps->ps_mtx);
3201 		ret = 0;
3202 	}
3203 
3204 	/*
3205 	 * Do not go into sleep if this thread was the ptrace(2)
3206 	 * attach leader.  cursig() consumed SIGSTOP from PT_ATTACH,
3207 	 * but we usually act on the signal by interrupting sleep, and
3208 	 * should do that here as well.
3209 	 */
3210 	if ((td->td_dbgflags & TDB_FSTP) != 0) {
3211 		if (ret == 0)
3212 			ret = EINTR;
3213 		td->td_dbgflags &= ~TDB_FSTP;
3214 	}
3215 
3216 	return (ret);
3217 }
3218 
3219 int
3220 sig_intr(void)
3221 {
3222 	struct thread *td;
3223 	struct proc *p;
3224 	int ret;
3225 
3226 	td = curthread;
3227 	if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) == 0)
3228 		return (0);
3229 
3230 	p = td->td_proc;
3231 
3232 	PROC_LOCK(p);
3233 	ret = sig_ast_checksusp(td);
3234 	if (ret == 0)
3235 		ret = sig_ast_needsigchk(td);
3236 	PROC_UNLOCK(p);
3237 	return (ret);
3238 }
3239 
3240 void
3241 proc_wkilled(struct proc *p)
3242 {
3243 
3244 	PROC_LOCK_ASSERT(p, MA_OWNED);
3245 	if ((p->p_flag & P_WKILLED) == 0) {
3246 		p->p_flag |= P_WKILLED;
3247 		/*
3248 		 * Notify swapper that there is a process to swap in.
3249 		 * The notification is racy, at worst it would take 10
3250 		 * seconds for the swapper process to notice.
3251 		 */
3252 		if ((p->p_flag & (P_INMEM | P_SWAPPINGIN)) == 0)
3253 			wakeup(&proc0);
3254 	}
3255 }
3256 
3257 /*
3258  * Kill the current process for stated reason.
3259  */
3260 void
3261 killproc(struct proc *p, const char *why)
3262 {
3263 
3264 	PROC_LOCK_ASSERT(p, MA_OWNED);
3265 	CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)", p, p->p_pid,
3266 	    p->p_comm);
3267 	log(LOG_ERR, "pid %d (%s), jid %d, uid %d, was killed: %s\n",
3268 	    p->p_pid, p->p_comm, p->p_ucred->cr_prison->pr_id,
3269 	    p->p_ucred->cr_uid, why);
3270 	proc_wkilled(p);
3271 	kern_psignal(p, SIGKILL);
3272 }
3273 
3274 /*
3275  * Force the current process to exit with the specified signal, dumping core
3276  * if appropriate.  We bypass the normal tests for masked and caught signals,
3277  * allowing unrecoverable failures to terminate the process without changing
3278  * signal state.  Mark the accounting record with the signal termination.
3279  * If dumping core, save the signal number for the debugger.  Calls exit and
3280  * does not return.
3281  */
3282 void
3283 sigexit(struct thread *td, int sig)
3284 {
3285 	struct proc *p = td->td_proc;
3286 
3287 	PROC_LOCK_ASSERT(p, MA_OWNED);
3288 	p->p_acflag |= AXSIG;
3289 	/*
3290 	 * We must be single-threading to generate a core dump.  This
3291 	 * ensures that the registers in the core file are up-to-date.
3292 	 * Also, the ELF dump handler assumes that the thread list doesn't
3293 	 * change out from under it.
3294 	 *
3295 	 * XXX If another thread attempts to single-thread before us
3296 	 *     (e.g. via fork()), we won't get a dump at all.
3297 	 */
3298 	if ((sigprop(sig) & SIGPROP_CORE) &&
3299 	    thread_single(p, SINGLE_NO_EXIT) == 0) {
3300 		p->p_sig = sig;
3301 		/*
3302 		 * Log signals which would cause core dumps
3303 		 * (Log as LOG_INFO to appease those who don't want
3304 		 * these messages.)
3305 		 * XXX : Todo, as well as euid, write out ruid too
3306 		 * Note that coredump() drops proc lock.
3307 		 */
3308 		if (coredump(td) == 0)
3309 			sig |= WCOREFLAG;
3310 		if (kern_logsigexit)
3311 			log(LOG_INFO,
3312 			    "pid %d (%s), jid %d, uid %d: exited on "
3313 			    "signal %d%s\n", p->p_pid, p->p_comm,
3314 			    p->p_ucred->cr_prison->pr_id,
3315 			    td->td_ucred->cr_uid,
3316 			    sig &~ WCOREFLAG,
3317 			    sig & WCOREFLAG ? " (core dumped)" : "");
3318 	} else
3319 		PROC_UNLOCK(p);
3320 	exit1(td, 0, sig);
3321 	/* NOTREACHED */
3322 }
3323 
3324 /*
3325  * Send queued SIGCHLD to parent when child process's state
3326  * is changed.
3327  */
3328 static void
3329 sigparent(struct proc *p, int reason, int status)
3330 {
3331 	PROC_LOCK_ASSERT(p, MA_OWNED);
3332 	PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
3333 
3334 	if (p->p_ksi != NULL) {
3335 		p->p_ksi->ksi_signo  = SIGCHLD;
3336 		p->p_ksi->ksi_code   = reason;
3337 		p->p_ksi->ksi_status = status;
3338 		p->p_ksi->ksi_pid    = p->p_pid;
3339 		p->p_ksi->ksi_uid    = p->p_ucred->cr_ruid;
3340 		if (KSI_ONQ(p->p_ksi))
3341 			return;
3342 	}
3343 	pksignal(p->p_pptr, SIGCHLD, p->p_ksi);
3344 }
3345 
3346 static void
3347 childproc_jobstate(struct proc *p, int reason, int sig)
3348 {
3349 	struct sigacts *ps;
3350 
3351 	PROC_LOCK_ASSERT(p, MA_OWNED);
3352 	PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
3353 
3354 	/*
3355 	 * Wake up parent sleeping in kern_wait(), also send
3356 	 * SIGCHLD to parent, but SIGCHLD does not guarantee
3357 	 * that parent will awake, because parent may masked
3358 	 * the signal.
3359 	 */
3360 	p->p_pptr->p_flag |= P_STATCHILD;
3361 	wakeup(p->p_pptr);
3362 
3363 	ps = p->p_pptr->p_sigacts;
3364 	mtx_lock(&ps->ps_mtx);
3365 	if ((ps->ps_flag & PS_NOCLDSTOP) == 0) {
3366 		mtx_unlock(&ps->ps_mtx);
3367 		sigparent(p, reason, sig);
3368 	} else
3369 		mtx_unlock(&ps->ps_mtx);
3370 }
3371 
3372 void
3373 childproc_stopped(struct proc *p, int reason)
3374 {
3375 
3376 	childproc_jobstate(p, reason, p->p_xsig);
3377 }
3378 
3379 void
3380 childproc_continued(struct proc *p)
3381 {
3382 	childproc_jobstate(p, CLD_CONTINUED, SIGCONT);
3383 }
3384 
3385 void
3386 childproc_exited(struct proc *p)
3387 {
3388 	int reason, status;
3389 
3390 	if (WCOREDUMP(p->p_xsig)) {
3391 		reason = CLD_DUMPED;
3392 		status = WTERMSIG(p->p_xsig);
3393 	} else if (WIFSIGNALED(p->p_xsig)) {
3394 		reason = CLD_KILLED;
3395 		status = WTERMSIG(p->p_xsig);
3396 	} else {
3397 		reason = CLD_EXITED;
3398 		status = p->p_xexit;
3399 	}
3400 	/*
3401 	 * XXX avoid calling wakeup(p->p_pptr), the work is
3402 	 * done in exit1().
3403 	 */
3404 	sigparent(p, reason, status);
3405 }
3406 
3407 #define	MAX_NUM_CORE_FILES 100000
3408 #ifndef NUM_CORE_FILES
3409 #define	NUM_CORE_FILES 5
3410 #endif
3411 CTASSERT(NUM_CORE_FILES >= 0 && NUM_CORE_FILES <= MAX_NUM_CORE_FILES);
3412 static int num_cores = NUM_CORE_FILES;
3413 
3414 static int
3415 sysctl_debug_num_cores_check (SYSCTL_HANDLER_ARGS)
3416 {
3417 	int error;
3418 	int new_val;
3419 
3420 	new_val = num_cores;
3421 	error = sysctl_handle_int(oidp, &new_val, 0, req);
3422 	if (error != 0 || req->newptr == NULL)
3423 		return (error);
3424 	if (new_val > MAX_NUM_CORE_FILES)
3425 		new_val = MAX_NUM_CORE_FILES;
3426 	if (new_val < 0)
3427 		new_val = 0;
3428 	num_cores = new_val;
3429 	return (0);
3430 }
3431 SYSCTL_PROC(_debug, OID_AUTO, ncores,
3432     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, sizeof(int),
3433     sysctl_debug_num_cores_check, "I",
3434     "Maximum number of generated process corefiles while using index format");
3435 
3436 #define	GZIP_SUFFIX	".gz"
3437 #define	ZSTD_SUFFIX	".zst"
3438 
3439 int compress_user_cores = 0;
3440 
3441 static int
3442 sysctl_compress_user_cores(SYSCTL_HANDLER_ARGS)
3443 {
3444 	int error, val;
3445 
3446 	val = compress_user_cores;
3447 	error = sysctl_handle_int(oidp, &val, 0, req);
3448 	if (error != 0 || req->newptr == NULL)
3449 		return (error);
3450 	if (val != 0 && !compressor_avail(val))
3451 		return (EINVAL);
3452 	compress_user_cores = val;
3453 	return (error);
3454 }
3455 SYSCTL_PROC(_kern, OID_AUTO, compress_user_cores,
3456     CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, 0, sizeof(int),
3457     sysctl_compress_user_cores, "I",
3458     "Enable compression of user corefiles ("
3459     __XSTRING(COMPRESS_GZIP) " = gzip, "
3460     __XSTRING(COMPRESS_ZSTD) " = zstd)");
3461 
3462 int compress_user_cores_level = 6;
3463 SYSCTL_INT(_kern, OID_AUTO, compress_user_cores_level, CTLFLAG_RWTUN,
3464     &compress_user_cores_level, 0,
3465     "Corefile compression level");
3466 
3467 /*
3468  * Protect the access to corefilename[] by allproc_lock.
3469  */
3470 #define	corefilename_lock	allproc_lock
3471 
3472 static char corefilename[MAXPATHLEN] = {"%N.core"};
3473 TUNABLE_STR("kern.corefile", corefilename, sizeof(corefilename));
3474 
3475 static int
3476 sysctl_kern_corefile(SYSCTL_HANDLER_ARGS)
3477 {
3478 	int error;
3479 
3480 	sx_xlock(&corefilename_lock);
3481 	error = sysctl_handle_string(oidp, corefilename, sizeof(corefilename),
3482 	    req);
3483 	sx_xunlock(&corefilename_lock);
3484 
3485 	return (error);
3486 }
3487 SYSCTL_PROC(_kern, OID_AUTO, corefile, CTLTYPE_STRING | CTLFLAG_RW |
3488     CTLFLAG_MPSAFE, 0, 0, sysctl_kern_corefile, "A",
3489     "Process corefile name format string");
3490 
3491 static void
3492 vnode_close_locked(struct thread *td, struct vnode *vp)
3493 {
3494 
3495 	VOP_UNLOCK(vp);
3496 	vn_close(vp, FWRITE, td->td_ucred, td);
3497 }
3498 
3499 /*
3500  * If the core format has a %I in it, then we need to check
3501  * for existing corefiles before defining a name.
3502  * To do this we iterate over 0..ncores to find a
3503  * non-existing core file name to use. If all core files are
3504  * already used we choose the oldest one.
3505  */
3506 static int
3507 corefile_open_last(struct thread *td, char *name, int indexpos,
3508     int indexlen, int ncores, struct vnode **vpp)
3509 {
3510 	struct vnode *oldvp, *nextvp, *vp;
3511 	struct vattr vattr;
3512 	struct nameidata nd;
3513 	int error, i, flags, oflags, cmode;
3514 	char ch;
3515 	struct timespec lasttime;
3516 
3517 	nextvp = oldvp = NULL;
3518 	cmode = S_IRUSR | S_IWUSR;
3519 	oflags = VN_OPEN_NOAUDIT | VN_OPEN_NAMECACHE |
3520 	    (capmode_coredump ? VN_OPEN_NOCAPCHECK : 0);
3521 
3522 	for (i = 0; i < ncores; i++) {
3523 		flags = O_CREAT | FWRITE | O_NOFOLLOW;
3524 
3525 		ch = name[indexpos + indexlen];
3526 		(void)snprintf(name + indexpos, indexlen + 1, "%.*u", indexlen,
3527 		    i);
3528 		name[indexpos + indexlen] = ch;
3529 
3530 		NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td);
3531 		error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred,
3532 		    NULL);
3533 		if (error != 0)
3534 			break;
3535 
3536 		vp = nd.ni_vp;
3537 		NDFREE(&nd, NDF_ONLY_PNBUF);
3538 		if ((flags & O_CREAT) == O_CREAT) {
3539 			nextvp = vp;
3540 			break;
3541 		}
3542 
3543 		error = VOP_GETATTR(vp, &vattr, td->td_ucred);
3544 		if (error != 0) {
3545 			vnode_close_locked(td, vp);
3546 			break;
3547 		}
3548 
3549 		if (oldvp == NULL ||
3550 		    lasttime.tv_sec > vattr.va_mtime.tv_sec ||
3551 		    (lasttime.tv_sec == vattr.va_mtime.tv_sec &&
3552 		    lasttime.tv_nsec >= vattr.va_mtime.tv_nsec)) {
3553 			if (oldvp != NULL)
3554 				vn_close(oldvp, FWRITE, td->td_ucred, td);
3555 			oldvp = vp;
3556 			VOP_UNLOCK(oldvp);
3557 			lasttime = vattr.va_mtime;
3558 		} else {
3559 			vnode_close_locked(td, vp);
3560 		}
3561 	}
3562 
3563 	if (oldvp != NULL) {
3564 		if (nextvp == NULL) {
3565 			if ((td->td_proc->p_flag & P_SUGID) != 0) {
3566 				error = EFAULT;
3567 				vn_close(oldvp, FWRITE, td->td_ucred, td);
3568 			} else {
3569 				nextvp = oldvp;
3570 				error = vn_lock(nextvp, LK_EXCLUSIVE);
3571 				if (error != 0) {
3572 					vn_close(nextvp, FWRITE, td->td_ucred,
3573 					    td);
3574 					nextvp = NULL;
3575 				}
3576 			}
3577 		} else {
3578 			vn_close(oldvp, FWRITE, td->td_ucred, td);
3579 		}
3580 	}
3581 	if (error != 0) {
3582 		if (nextvp != NULL)
3583 			vnode_close_locked(td, oldvp);
3584 	} else {
3585 		*vpp = nextvp;
3586 	}
3587 
3588 	return (error);
3589 }
3590 
3591 /*
3592  * corefile_open(comm, uid, pid, td, compress, vpp, namep)
3593  * Expand the name described in corefilename, using name, uid, and pid
3594  * and open/create core file.
3595  * corefilename is a printf-like string, with three format specifiers:
3596  *	%N	name of process ("name")
3597  *	%P	process id (pid)
3598  *	%U	user id (uid)
3599  * For example, "%N.core" is the default; they can be disabled completely
3600  * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
3601  * This is controlled by the sysctl variable kern.corefile (see above).
3602  */
3603 static int
3604 corefile_open(const char *comm, uid_t uid, pid_t pid, struct thread *td,
3605     int compress, int signum, struct vnode **vpp, char **namep)
3606 {
3607 	struct sbuf sb;
3608 	struct nameidata nd;
3609 	const char *format;
3610 	char *hostname, *name;
3611 	int cmode, error, flags, i, indexpos, indexlen, oflags, ncores;
3612 
3613 	hostname = NULL;
3614 	format = corefilename;
3615 	name = malloc(MAXPATHLEN, M_TEMP, M_WAITOK | M_ZERO);
3616 	indexlen = 0;
3617 	indexpos = -1;
3618 	ncores = num_cores;
3619 	(void)sbuf_new(&sb, name, MAXPATHLEN, SBUF_FIXEDLEN);
3620 	sx_slock(&corefilename_lock);
3621 	for (i = 0; format[i] != '\0'; i++) {
3622 		switch (format[i]) {
3623 		case '%':	/* Format character */
3624 			i++;
3625 			switch (format[i]) {
3626 			case '%':
3627 				sbuf_putc(&sb, '%');
3628 				break;
3629 			case 'H':	/* hostname */
3630 				if (hostname == NULL) {
3631 					hostname = malloc(MAXHOSTNAMELEN,
3632 					    M_TEMP, M_WAITOK);
3633 				}
3634 				getcredhostname(td->td_ucred, hostname,
3635 				    MAXHOSTNAMELEN);
3636 				sbuf_printf(&sb, "%s", hostname);
3637 				break;
3638 			case 'I':	/* autoincrementing index */
3639 				if (indexpos != -1) {
3640 					sbuf_printf(&sb, "%%I");
3641 					break;
3642 				}
3643 
3644 				indexpos = sbuf_len(&sb);
3645 				sbuf_printf(&sb, "%u", ncores - 1);
3646 				indexlen = sbuf_len(&sb) - indexpos;
3647 				break;
3648 			case 'N':	/* process name */
3649 				sbuf_printf(&sb, "%s", comm);
3650 				break;
3651 			case 'P':	/* process id */
3652 				sbuf_printf(&sb, "%u", pid);
3653 				break;
3654 			case 'S':	/* signal number */
3655 				sbuf_printf(&sb, "%i", signum);
3656 				break;
3657 			case 'U':	/* user id */
3658 				sbuf_printf(&sb, "%u", uid);
3659 				break;
3660 			default:
3661 				log(LOG_ERR,
3662 				    "Unknown format character %c in "
3663 				    "corename `%s'\n", format[i], format);
3664 				break;
3665 			}
3666 			break;
3667 		default:
3668 			sbuf_putc(&sb, format[i]);
3669 			break;
3670 		}
3671 	}
3672 	sx_sunlock(&corefilename_lock);
3673 	free(hostname, M_TEMP);
3674 	if (compress == COMPRESS_GZIP)
3675 		sbuf_printf(&sb, GZIP_SUFFIX);
3676 	else if (compress == COMPRESS_ZSTD)
3677 		sbuf_printf(&sb, ZSTD_SUFFIX);
3678 	if (sbuf_error(&sb) != 0) {
3679 		log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too "
3680 		    "long\n", (long)pid, comm, (u_long)uid);
3681 		sbuf_delete(&sb);
3682 		free(name, M_TEMP);
3683 		return (ENOMEM);
3684 	}
3685 	sbuf_finish(&sb);
3686 	sbuf_delete(&sb);
3687 
3688 	if (indexpos != -1) {
3689 		error = corefile_open_last(td, name, indexpos, indexlen, ncores,
3690 		    vpp);
3691 		if (error != 0) {
3692 			log(LOG_ERR,
3693 			    "pid %d (%s), uid (%u):  Path `%s' failed "
3694 			    "on initial open test, error = %d\n",
3695 			    pid, comm, uid, name, error);
3696 		}
3697 	} else {
3698 		cmode = S_IRUSR | S_IWUSR;
3699 		oflags = VN_OPEN_NOAUDIT | VN_OPEN_NAMECACHE |
3700 		    (capmode_coredump ? VN_OPEN_NOCAPCHECK : 0);
3701 		flags = O_CREAT | FWRITE | O_NOFOLLOW;
3702 		if ((td->td_proc->p_flag & P_SUGID) != 0)
3703 			flags |= O_EXCL;
3704 
3705 		NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td);
3706 		error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred,
3707 		    NULL);
3708 		if (error == 0) {
3709 			*vpp = nd.ni_vp;
3710 			NDFREE(&nd, NDF_ONLY_PNBUF);
3711 		}
3712 	}
3713 
3714 	if (error != 0) {
3715 #ifdef AUDIT
3716 		audit_proc_coredump(td, name, error);
3717 #endif
3718 		free(name, M_TEMP);
3719 		return (error);
3720 	}
3721 	*namep = name;
3722 	return (0);
3723 }
3724 
3725 /*
3726  * Dump a process' core.  The main routine does some
3727  * policy checking, and creates the name of the coredump;
3728  * then it passes on a vnode and a size limit to the process-specific
3729  * coredump routine if there is one; if there _is not_ one, it returns
3730  * ENOSYS; otherwise it returns the error from the process-specific routine.
3731  */
3732 
3733 static int
3734 coredump(struct thread *td)
3735 {
3736 	struct proc *p = td->td_proc;
3737 	struct ucred *cred = td->td_ucred;
3738 	struct vnode *vp;
3739 	struct flock lf;
3740 	struct vattr vattr;
3741 	size_t fullpathsize;
3742 	int error, error1, locked;
3743 	char *name;			/* name of corefile */
3744 	void *rl_cookie;
3745 	off_t limit;
3746 	char *fullpath, *freepath = NULL;
3747 	struct sbuf *sb;
3748 
3749 	PROC_LOCK_ASSERT(p, MA_OWNED);
3750 	MPASS((p->p_flag & P_HADTHREADS) == 0 || p->p_singlethread == td);
3751 
3752 	if (!do_coredump || (!sugid_coredump && (p->p_flag & P_SUGID) != 0) ||
3753 	    (p->p_flag2 & P2_NOTRACE) != 0) {
3754 		PROC_UNLOCK(p);
3755 		return (EFAULT);
3756 	}
3757 
3758 	/*
3759 	 * Note that the bulk of limit checking is done after
3760 	 * the corefile is created.  The exception is if the limit
3761 	 * for corefiles is 0, in which case we don't bother
3762 	 * creating the corefile at all.  This layout means that
3763 	 * a corefile is truncated instead of not being created,
3764 	 * if it is larger than the limit.
3765 	 */
3766 	limit = (off_t)lim_cur(td, RLIMIT_CORE);
3767 	if (limit == 0 || racct_get_available(p, RACCT_CORE) == 0) {
3768 		PROC_UNLOCK(p);
3769 		return (EFBIG);
3770 	}
3771 	PROC_UNLOCK(p);
3772 
3773 	error = corefile_open(p->p_comm, cred->cr_uid, p->p_pid, td,
3774 	    compress_user_cores, p->p_sig, &vp, &name);
3775 	if (error != 0)
3776 		return (error);
3777 
3778 	/*
3779 	 * Don't dump to non-regular files or files with links.
3780 	 * Do not dump into system files. Effective user must own the corefile.
3781 	 */
3782 	if (vp->v_type != VREG || VOP_GETATTR(vp, &vattr, cred) != 0 ||
3783 	    vattr.va_nlink != 1 || (vp->v_vflag & VV_SYSTEM) != 0 ||
3784 	    vattr.va_uid != cred->cr_uid) {
3785 		VOP_UNLOCK(vp);
3786 		error = EFAULT;
3787 		goto out;
3788 	}
3789 
3790 	VOP_UNLOCK(vp);
3791 
3792 	/* Postpone other writers, including core dumps of other processes. */
3793 	rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX);
3794 
3795 	lf.l_whence = SEEK_SET;
3796 	lf.l_start = 0;
3797 	lf.l_len = 0;
3798 	lf.l_type = F_WRLCK;
3799 	locked = (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK) == 0);
3800 
3801 	VATTR_NULL(&vattr);
3802 	vattr.va_size = 0;
3803 	if (set_core_nodump_flag)
3804 		vattr.va_flags = UF_NODUMP;
3805 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3806 	VOP_SETATTR(vp, &vattr, cred);
3807 	VOP_UNLOCK(vp);
3808 	PROC_LOCK(p);
3809 	p->p_acflag |= ACORE;
3810 	PROC_UNLOCK(p);
3811 
3812 	if (p->p_sysent->sv_coredump != NULL) {
3813 		error = p->p_sysent->sv_coredump(td, vp, limit, 0);
3814 	} else {
3815 		error = ENOSYS;
3816 	}
3817 
3818 	if (locked) {
3819 		lf.l_type = F_UNLCK;
3820 		VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
3821 	}
3822 	vn_rangelock_unlock(vp, rl_cookie);
3823 
3824 	/*
3825 	 * Notify the userland helper that a process triggered a core dump.
3826 	 * This allows the helper to run an automated debugging session.
3827 	 */
3828 	if (error != 0 || coredump_devctl == 0)
3829 		goto out;
3830 	sb = sbuf_new_auto();
3831 	if (vn_fullpath_global(p->p_textvp, &fullpath, &freepath) != 0)
3832 		goto out2;
3833 	sbuf_printf(sb, "comm=\"");
3834 	devctl_safe_quote_sb(sb, fullpath);
3835 	free(freepath, M_TEMP);
3836 	sbuf_printf(sb, "\" core=\"");
3837 
3838 	/*
3839 	 * We can't lookup core file vp directly. When we're replacing a core, and
3840 	 * other random times, we flush the name cache, so it will fail. Instead,
3841 	 * if the path of the core is relative, add the current dir in front if it.
3842 	 */
3843 	if (name[0] != '/') {
3844 		fullpathsize = MAXPATHLEN;
3845 		freepath = malloc(fullpathsize, M_TEMP, M_WAITOK);
3846 		if (vn_getcwd(freepath, &fullpath, &fullpathsize) != 0) {
3847 			free(freepath, M_TEMP);
3848 			goto out2;
3849 		}
3850 		devctl_safe_quote_sb(sb, fullpath);
3851 		free(freepath, M_TEMP);
3852 		sbuf_putc(sb, '/');
3853 	}
3854 	devctl_safe_quote_sb(sb, name);
3855 	sbuf_printf(sb, "\"");
3856 	if (sbuf_finish(sb) == 0)
3857 		devctl_notify("kernel", "signal", "coredump", sbuf_data(sb));
3858 out2:
3859 	sbuf_delete(sb);
3860 out:
3861 	error1 = vn_close(vp, FWRITE, cred, td);
3862 	if (error == 0)
3863 		error = error1;
3864 #ifdef AUDIT
3865 	audit_proc_coredump(td, name, error);
3866 #endif
3867 	free(name, M_TEMP);
3868 	return (error);
3869 }
3870 
3871 /*
3872  * Nonexistent system call-- signal process (may want to handle it).  Flag
3873  * error in case process won't see signal immediately (blocked or ignored).
3874  */
3875 #ifndef _SYS_SYSPROTO_H_
3876 struct nosys_args {
3877 	int	dummy;
3878 };
3879 #endif
3880 /* ARGSUSED */
3881 int
3882 nosys(struct thread *td, struct nosys_args *args)
3883 {
3884 	struct proc *p;
3885 
3886 	p = td->td_proc;
3887 
3888 	PROC_LOCK(p);
3889 	tdsignal(td, SIGSYS);
3890 	PROC_UNLOCK(p);
3891 	if (kern_lognosys == 1 || kern_lognosys == 3) {
3892 		uprintf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm,
3893 		    td->td_sa.code);
3894 	}
3895 	if (kern_lognosys == 2 || kern_lognosys == 3 ||
3896 	    (p->p_pid == 1 && (kern_lognosys & 3) == 0)) {
3897 		printf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm,
3898 		    td->td_sa.code);
3899 	}
3900 	return (ENOSYS);
3901 }
3902 
3903 /*
3904  * Send a SIGIO or SIGURG signal to a process or process group using stored
3905  * credentials rather than those of the current process.
3906  */
3907 void
3908 pgsigio(struct sigio **sigiop, int sig, int checkctty)
3909 {
3910 	ksiginfo_t ksi;
3911 	struct sigio *sigio;
3912 
3913 	ksiginfo_init(&ksi);
3914 	ksi.ksi_signo = sig;
3915 	ksi.ksi_code = SI_KERNEL;
3916 
3917 	SIGIO_LOCK();
3918 	sigio = *sigiop;
3919 	if (sigio == NULL) {
3920 		SIGIO_UNLOCK();
3921 		return;
3922 	}
3923 	if (sigio->sio_pgid > 0) {
3924 		PROC_LOCK(sigio->sio_proc);
3925 		if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred))
3926 			kern_psignal(sigio->sio_proc, sig);
3927 		PROC_UNLOCK(sigio->sio_proc);
3928 	} else if (sigio->sio_pgid < 0) {
3929 		struct proc *p;
3930 
3931 		PGRP_LOCK(sigio->sio_pgrp);
3932 		LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) {
3933 			PROC_LOCK(p);
3934 			if (p->p_state == PRS_NORMAL &&
3935 			    CANSIGIO(sigio->sio_ucred, p->p_ucred) &&
3936 			    (checkctty == 0 || (p->p_flag & P_CONTROLT)))
3937 				kern_psignal(p, sig);
3938 			PROC_UNLOCK(p);
3939 		}
3940 		PGRP_UNLOCK(sigio->sio_pgrp);
3941 	}
3942 	SIGIO_UNLOCK();
3943 }
3944 
3945 static int
3946 filt_sigattach(struct knote *kn)
3947 {
3948 	struct proc *p = curproc;
3949 
3950 	kn->kn_ptr.p_proc = p;
3951 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
3952 
3953 	knlist_add(p->p_klist, kn, 0);
3954 
3955 	return (0);
3956 }
3957 
3958 static void
3959 filt_sigdetach(struct knote *kn)
3960 {
3961 	struct proc *p = kn->kn_ptr.p_proc;
3962 
3963 	knlist_remove(p->p_klist, kn, 0);
3964 }
3965 
3966 /*
3967  * signal knotes are shared with proc knotes, so we apply a mask to
3968  * the hint in order to differentiate them from process hints.  This
3969  * could be avoided by using a signal-specific knote list, but probably
3970  * isn't worth the trouble.
3971  */
3972 static int
3973 filt_signal(struct knote *kn, long hint)
3974 {
3975 
3976 	if (hint & NOTE_SIGNAL) {
3977 		hint &= ~NOTE_SIGNAL;
3978 
3979 		if (kn->kn_id == hint)
3980 			kn->kn_data++;
3981 	}
3982 	return (kn->kn_data != 0);
3983 }
3984 
3985 struct sigacts *
3986 sigacts_alloc(void)
3987 {
3988 	struct sigacts *ps;
3989 
3990 	ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO);
3991 	refcount_init(&ps->ps_refcnt, 1);
3992 	mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF);
3993 	return (ps);
3994 }
3995 
3996 void
3997 sigacts_free(struct sigacts *ps)
3998 {
3999 
4000 	if (refcount_release(&ps->ps_refcnt) == 0)
4001 		return;
4002 	mtx_destroy(&ps->ps_mtx);
4003 	free(ps, M_SUBPROC);
4004 }
4005 
4006 struct sigacts *
4007 sigacts_hold(struct sigacts *ps)
4008 {
4009 
4010 	refcount_acquire(&ps->ps_refcnt);
4011 	return (ps);
4012 }
4013 
4014 void
4015 sigacts_copy(struct sigacts *dest, struct sigacts *src)
4016 {
4017 
4018 	KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest"));
4019 	mtx_lock(&src->ps_mtx);
4020 	bcopy(src, dest, offsetof(struct sigacts, ps_refcnt));
4021 	mtx_unlock(&src->ps_mtx);
4022 }
4023 
4024 int
4025 sigacts_shared(struct sigacts *ps)
4026 {
4027 
4028 	return (ps->ps_refcnt > 1);
4029 }
4030 
4031 void
4032 sig_drop_caught(struct proc *p)
4033 {
4034 	int sig;
4035 	struct sigacts *ps;
4036 
4037 	ps = p->p_sigacts;
4038 	PROC_LOCK_ASSERT(p, MA_OWNED);
4039 	mtx_assert(&ps->ps_mtx, MA_OWNED);
4040 	while (SIGNOTEMPTY(ps->ps_sigcatch)) {
4041 		sig = sig_ffs(&ps->ps_sigcatch);
4042 		sigdflt(ps, sig);
4043 		if ((sigprop(sig) & SIGPROP_IGNORE) != 0)
4044 			sigqueue_delete_proc(p, sig);
4045 	}
4046 }
4047 
4048 static void
4049 sigfastblock_failed(struct thread *td, bool sendsig, bool write)
4050 {
4051 	ksiginfo_t ksi;
4052 
4053 	/*
4054 	 * Prevent further fetches and SIGSEGVs, allowing thread to
4055 	 * issue syscalls despite corruption.
4056 	 */
4057 	sigfastblock_clear(td);
4058 
4059 	if (!sendsig)
4060 		return;
4061 	ksiginfo_init_trap(&ksi);
4062 	ksi.ksi_signo = SIGSEGV;
4063 	ksi.ksi_code = write ? SEGV_ACCERR : SEGV_MAPERR;
4064 	ksi.ksi_addr = td->td_sigblock_ptr;
4065 	trapsignal(td, &ksi);
4066 }
4067 
4068 static bool
4069 sigfastblock_fetch_sig(struct thread *td, bool sendsig, uint32_t *valp)
4070 {
4071 	uint32_t res;
4072 
4073 	if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0)
4074 		return (true);
4075 	if (fueword32((void *)td->td_sigblock_ptr, &res) == -1) {
4076 		sigfastblock_failed(td, sendsig, false);
4077 		return (false);
4078 	}
4079 	*valp = res;
4080 	td->td_sigblock_val = res & ~SIGFASTBLOCK_FLAGS;
4081 	return (true);
4082 }
4083 
4084 static void
4085 sigfastblock_resched(struct thread *td, bool resched)
4086 {
4087 	struct proc *p;
4088 
4089 	if (resched) {
4090 		p = td->td_proc;
4091 		PROC_LOCK(p);
4092 		reschedule_signals(p, td->td_sigmask, 0);
4093 		PROC_UNLOCK(p);
4094 	}
4095 	thread_lock(td);
4096 	td->td_flags |= TDF_ASTPENDING | TDF_NEEDSIGCHK;
4097 	thread_unlock(td);
4098 }
4099 
4100 int
4101 sys_sigfastblock(struct thread *td, struct sigfastblock_args *uap)
4102 {
4103 	struct proc *p;
4104 	int error, res;
4105 	uint32_t oldval;
4106 
4107 	error = 0;
4108 	p = td->td_proc;
4109 	switch (uap->cmd) {
4110 	case SIGFASTBLOCK_SETPTR:
4111 		if ((td->td_pflags & TDP_SIGFASTBLOCK) != 0) {
4112 			error = EBUSY;
4113 			break;
4114 		}
4115 		if (((uintptr_t)(uap->ptr) & (sizeof(uint32_t) - 1)) != 0) {
4116 			error = EINVAL;
4117 			break;
4118 		}
4119 		td->td_pflags |= TDP_SIGFASTBLOCK;
4120 		td->td_sigblock_ptr = uap->ptr;
4121 		break;
4122 
4123 	case SIGFASTBLOCK_UNBLOCK:
4124 		if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) {
4125 			error = EINVAL;
4126 			break;
4127 		}
4128 
4129 		for (;;) {
4130 			res = casueword32(td->td_sigblock_ptr,
4131 			    SIGFASTBLOCK_PEND, &oldval, 0);
4132 			if (res == -1) {
4133 				error = EFAULT;
4134 				sigfastblock_failed(td, false, true);
4135 				break;
4136 			}
4137 			if (res == 0)
4138 				break;
4139 			MPASS(res == 1);
4140 			if (oldval != SIGFASTBLOCK_PEND) {
4141 				error = EBUSY;
4142 				break;
4143 			}
4144 			error = thread_check_susp(td, false);
4145 			if (error != 0)
4146 				break;
4147 		}
4148 		if (error != 0)
4149 			break;
4150 
4151 		/*
4152 		 * td_sigblock_val is cleared there, but not on a
4153 		 * syscall exit.  The end effect is that a single
4154 		 * interruptible sleep, while user sigblock word is
4155 		 * set, might return EINTR or ERESTART to usermode
4156 		 * without delivering signal.  All further sleeps,
4157 		 * until userspace clears the word and does
4158 		 * sigfastblock(UNBLOCK), observe current word and no
4159 		 * longer get interrupted.  It is slight
4160 		 * non-conformance, with alternative to have read the
4161 		 * sigblock word on each syscall entry.
4162 		 */
4163 		td->td_sigblock_val = 0;
4164 
4165 		/*
4166 		 * Rely on normal ast mechanism to deliver pending
4167 		 * signals to current thread.  But notify others about
4168 		 * fake unblock.
4169 		 */
4170 		sigfastblock_resched(td, error == 0 && p->p_numthreads != 1);
4171 
4172 		break;
4173 
4174 	case SIGFASTBLOCK_UNSETPTR:
4175 		if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) {
4176 			error = EINVAL;
4177 			break;
4178 		}
4179 		if (!sigfastblock_fetch_sig(td, false, &oldval)) {
4180 			error = EFAULT;
4181 			break;
4182 		}
4183 		if (oldval != 0 && oldval != SIGFASTBLOCK_PEND) {
4184 			error = EBUSY;
4185 			break;
4186 		}
4187 		sigfastblock_clear(td);
4188 		break;
4189 
4190 	default:
4191 		error = EINVAL;
4192 		break;
4193 	}
4194 	return (error);
4195 }
4196 
4197 void
4198 sigfastblock_clear(struct thread *td)
4199 {
4200 	bool resched;
4201 
4202 	if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0)
4203 		return;
4204 	td->td_sigblock_val = 0;
4205 	resched = (td->td_pflags & TDP_SIGFASTPENDING) != 0 ||
4206 	    SIGPENDING(td);
4207 	td->td_pflags &= ~(TDP_SIGFASTBLOCK | TDP_SIGFASTPENDING);
4208 	sigfastblock_resched(td, resched);
4209 }
4210 
4211 void
4212 sigfastblock_fetch(struct thread *td)
4213 {
4214 	uint32_t val;
4215 
4216 	(void)sigfastblock_fetch_sig(td, true, &val);
4217 }
4218 
4219 static void
4220 sigfastblock_setpend1(struct thread *td)
4221 {
4222 	int res;
4223 	uint32_t oldval;
4224 
4225 	if ((td->td_pflags & TDP_SIGFASTPENDING) == 0)
4226 		return;
4227 	res = fueword32((void *)td->td_sigblock_ptr, &oldval);
4228 	if (res == -1) {
4229 		sigfastblock_failed(td, true, false);
4230 		return;
4231 	}
4232 	for (;;) {
4233 		res = casueword32(td->td_sigblock_ptr, oldval, &oldval,
4234 		    oldval | SIGFASTBLOCK_PEND);
4235 		if (res == -1) {
4236 			sigfastblock_failed(td, true, true);
4237 			return;
4238 		}
4239 		if (res == 0) {
4240 			td->td_sigblock_val = oldval & ~SIGFASTBLOCK_FLAGS;
4241 			td->td_pflags &= ~TDP_SIGFASTPENDING;
4242 			break;
4243 		}
4244 		MPASS(res == 1);
4245 		if (thread_check_susp(td, false) != 0)
4246 			break;
4247 	}
4248 }
4249 
4250 void
4251 sigfastblock_setpend(struct thread *td, bool resched)
4252 {
4253 	struct proc *p;
4254 
4255 	sigfastblock_setpend1(td);
4256 	if (resched) {
4257 		p = td->td_proc;
4258 		PROC_LOCK(p);
4259 		reschedule_signals(p, fastblock_mask, SIGPROCMASK_FASTBLK);
4260 		PROC_UNLOCK(p);
4261 	}
4262 }
4263