xref: /freebsd/sys/kern/kern_sig.c (revision a6b7d5cddacd4aa14ee058195eedfe207a69029e)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1989, 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  * (c) UNIX System Laboratories, Inc.
7  * All or some portions of this file are derived from material licensed
8  * to the University of California by American Telephone and Telegraph
9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10  * the permission of UNIX System Laboratories, Inc.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 
37 #include "opt_capsicum.h"
38 #include "opt_ktrace.h"
39 
40 #include <sys/param.h>
41 #include <sys/capsicum.h>
42 #include <sys/ctype.h>
43 #include <sys/systm.h>
44 #include <sys/signalvar.h>
45 #include <sys/vnode.h>
46 #include <sys/acct.h>
47 #include <sys/capsicum.h>
48 #include <sys/compressor.h>
49 #include <sys/condvar.h>
50 #include <sys/devctl.h>
51 #include <sys/event.h>
52 #include <sys/fcntl.h>
53 #include <sys/imgact.h>
54 #include <sys/jail.h>
55 #include <sys/kernel.h>
56 #include <sys/ktr.h>
57 #include <sys/ktrace.h>
58 #include <sys/limits.h>
59 #include <sys/lock.h>
60 #include <sys/malloc.h>
61 #include <sys/mutex.h>
62 #include <sys/refcount.h>
63 #include <sys/namei.h>
64 #include <sys/proc.h>
65 #include <sys/procdesc.h>
66 #include <sys/ptrace.h>
67 #include <sys/posix4.h>
68 #include <sys/racct.h>
69 #include <sys/resourcevar.h>
70 #include <sys/sdt.h>
71 #include <sys/sbuf.h>
72 #include <sys/sleepqueue.h>
73 #include <sys/smp.h>
74 #include <sys/stat.h>
75 #include <sys/sx.h>
76 #include <sys/syscall.h>
77 #include <sys/syscallsubr.h>
78 #include <sys/sysctl.h>
79 #include <sys/sysent.h>
80 #include <sys/syslog.h>
81 #include <sys/sysproto.h>
82 #include <sys/timers.h>
83 #include <sys/unistd.h>
84 #include <sys/vmmeter.h>
85 #include <sys/wait.h>
86 #include <vm/vm.h>
87 #include <vm/vm_extern.h>
88 #include <vm/uma.h>
89 
90 #include <machine/cpu.h>
91 
92 #include <security/audit/audit.h>
93 
94 #define	ONSIG	32		/* NSIG for osig* syscalls.  XXX. */
95 
96 SDT_PROVIDER_DECLARE(proc);
97 SDT_PROBE_DEFINE3(proc, , , signal__send,
98     "struct thread *", "struct proc *", "int");
99 SDT_PROBE_DEFINE2(proc, , , signal__clear,
100     "int", "ksiginfo_t *");
101 SDT_PROBE_DEFINE3(proc, , , signal__discard,
102     "struct thread *", "struct proc *", "int");
103 
104 static int	coredump(struct thread *);
105 static int	killpg1(struct thread *td, int sig, int pgid, int all,
106 		    ksiginfo_t *ksi);
107 static int	issignal(struct thread *td);
108 static void	reschedule_signals(struct proc *p, sigset_t block, int flags);
109 static int	sigprop(int sig);
110 static void	tdsigwakeup(struct thread *, int, sig_t, int);
111 static void	sig_suspend_threads(struct thread *, struct proc *);
112 static int	filt_sigattach(struct knote *kn);
113 static void	filt_sigdetach(struct knote *kn);
114 static int	filt_signal(struct knote *kn, long hint);
115 static struct thread *sigtd(struct proc *p, int sig, bool fast_sigblock);
116 static void	sigqueue_start(void);
117 static void	sigfastblock_setpend(struct thread *td, bool resched);
118 static void	sig_handle_first_stop(struct thread *td, struct proc *p,
119     int sig, bool ext);
120 
121 static uma_zone_t	ksiginfo_zone = NULL;
122 const struct filterops sig_filtops = {
123 	.f_isfd = 0,
124 	.f_attach = filt_sigattach,
125 	.f_detach = filt_sigdetach,
126 	.f_event = filt_signal,
127 };
128 
129 static int	kern_logsigexit = 1;
130 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,
131     &kern_logsigexit, 0,
132     "Log processes quitting on abnormal signals to syslog(3)");
133 
134 static int	kern_forcesigexit = 1;
135 SYSCTL_INT(_kern, OID_AUTO, forcesigexit, CTLFLAG_RW,
136     &kern_forcesigexit, 0, "Force trap signal to be handled");
137 
138 static SYSCTL_NODE(_kern, OID_AUTO, sigqueue, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
139     "POSIX real time signal");
140 
141 static int	max_pending_per_proc = 128;
142 SYSCTL_INT(_kern_sigqueue, OID_AUTO, max_pending_per_proc, CTLFLAG_RW,
143     &max_pending_per_proc, 0, "Max pending signals per proc");
144 
145 static int	preallocate_siginfo = 1024;
146 SYSCTL_INT(_kern_sigqueue, OID_AUTO, preallocate, CTLFLAG_RDTUN,
147     &preallocate_siginfo, 0, "Preallocated signal memory size");
148 
149 static int	signal_overflow = 0;
150 SYSCTL_INT(_kern_sigqueue, OID_AUTO, overflow, CTLFLAG_RD,
151     &signal_overflow, 0, "Number of signals overflew");
152 
153 static int	signal_alloc_fail = 0;
154 SYSCTL_INT(_kern_sigqueue, OID_AUTO, alloc_fail, CTLFLAG_RD,
155     &signal_alloc_fail, 0, "signals failed to be allocated");
156 
157 static int	kern_lognosys = 0;
158 SYSCTL_INT(_kern, OID_AUTO, lognosys, CTLFLAG_RWTUN, &kern_lognosys, 0,
159     "Log invalid syscalls");
160 
161 static int	kern_signosys = 1;
162 SYSCTL_INT(_kern, OID_AUTO, signosys, CTLFLAG_RWTUN, &kern_signosys, 0,
163     "Send SIGSYS on return from invalid syscall");
164 
165 __read_frequently bool sigfastblock_fetch_always = false;
166 SYSCTL_BOOL(_kern, OID_AUTO, sigfastblock_fetch_always, CTLFLAG_RWTUN,
167     &sigfastblock_fetch_always, 0,
168     "Fetch sigfastblock word on each syscall entry for proper "
169     "blocking semantic");
170 
171 static bool	kern_sig_discard_ign = true;
172 SYSCTL_BOOL(_kern, OID_AUTO, sig_discard_ign, CTLFLAG_RWTUN,
173     &kern_sig_discard_ign, 0,
174     "Discard ignored signals on delivery, otherwise queue them to "
175     "the target queue");
176 
177 static bool pt_attach_transparent = true;
178 SYSCTL_BOOL(_debug, OID_AUTO, ptrace_attach_transparent, CTLFLAG_RWTUN,
179     &pt_attach_transparent, 0,
180     "Hide wakes from PT_ATTACH on interruptible sleeps");
181 
182 SYSINIT(signal, SI_SUB_P1003_1B, SI_ORDER_FIRST+3, sigqueue_start, NULL);
183 
184 /*
185  * Policy -- Can ucred cr1 send SIGIO to process cr2?
186  * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG
187  * in the right situations.
188  */
189 #define CANSIGIO(cr1, cr2) \
190 	((cr1)->cr_uid == 0 || \
191 	    (cr1)->cr_ruid == (cr2)->cr_ruid || \
192 	    (cr1)->cr_uid == (cr2)->cr_ruid || \
193 	    (cr1)->cr_ruid == (cr2)->cr_uid || \
194 	    (cr1)->cr_uid == (cr2)->cr_uid)
195 
196 static int	sugid_coredump;
197 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RWTUN,
198     &sugid_coredump, 0, "Allow setuid and setgid processes to dump core");
199 
200 static int	capmode_coredump;
201 SYSCTL_INT(_kern, OID_AUTO, capmode_coredump, CTLFLAG_RWTUN,
202     &capmode_coredump, 0, "Allow processes in capability mode to dump core");
203 
204 static int	do_coredump = 1;
205 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,
206 	&do_coredump, 0, "Enable/Disable coredumps");
207 
208 static int	set_core_nodump_flag = 0;
209 SYSCTL_INT(_kern, OID_AUTO, nodump_coredump, CTLFLAG_RW, &set_core_nodump_flag,
210 	0, "Enable setting the NODUMP flag on coredump files");
211 
212 static int	coredump_devctl = 0;
213 SYSCTL_INT(_kern, OID_AUTO, coredump_devctl, CTLFLAG_RW, &coredump_devctl,
214 	0, "Generate a devctl notification when processes coredump");
215 
216 /*
217  * Signal properties and actions.
218  * The array below categorizes the signals and their default actions
219  * according to the following properties:
220  */
221 #define	SIGPROP_KILL		0x01	/* terminates process by default */
222 #define	SIGPROP_CORE		0x02	/* ditto and coredumps */
223 #define	SIGPROP_STOP		0x04	/* suspend process */
224 #define	SIGPROP_TTYSTOP		0x08	/* ditto, from tty */
225 #define	SIGPROP_IGNORE		0x10	/* ignore by default */
226 #define	SIGPROP_CONT		0x20	/* continue if suspended */
227 
228 static const int sigproptbl[NSIG] = {
229 	[SIGHUP] =	SIGPROP_KILL,
230 	[SIGINT] =	SIGPROP_KILL,
231 	[SIGQUIT] =	SIGPROP_KILL | SIGPROP_CORE,
232 	[SIGILL] =	SIGPROP_KILL | SIGPROP_CORE,
233 	[SIGTRAP] =	SIGPROP_KILL | SIGPROP_CORE,
234 	[SIGABRT] =	SIGPROP_KILL | SIGPROP_CORE,
235 	[SIGEMT] =	SIGPROP_KILL | SIGPROP_CORE,
236 	[SIGFPE] =	SIGPROP_KILL | SIGPROP_CORE,
237 	[SIGKILL] =	SIGPROP_KILL,
238 	[SIGBUS] =	SIGPROP_KILL | SIGPROP_CORE,
239 	[SIGSEGV] =	SIGPROP_KILL | SIGPROP_CORE,
240 	[SIGSYS] =	SIGPROP_KILL | SIGPROP_CORE,
241 	[SIGPIPE] =	SIGPROP_KILL,
242 	[SIGALRM] =	SIGPROP_KILL,
243 	[SIGTERM] =	SIGPROP_KILL,
244 	[SIGURG] =	SIGPROP_IGNORE,
245 	[SIGSTOP] =	SIGPROP_STOP,
246 	[SIGTSTP] =	SIGPROP_STOP | SIGPROP_TTYSTOP,
247 	[SIGCONT] =	SIGPROP_IGNORE | SIGPROP_CONT,
248 	[SIGCHLD] =	SIGPROP_IGNORE,
249 	[SIGTTIN] =	SIGPROP_STOP | SIGPROP_TTYSTOP,
250 	[SIGTTOU] =	SIGPROP_STOP | SIGPROP_TTYSTOP,
251 	[SIGIO] =	SIGPROP_IGNORE,
252 	[SIGXCPU] =	SIGPROP_KILL,
253 	[SIGXFSZ] =	SIGPROP_KILL,
254 	[SIGVTALRM] =	SIGPROP_KILL,
255 	[SIGPROF] =	SIGPROP_KILL,
256 	[SIGWINCH] =	SIGPROP_IGNORE,
257 	[SIGINFO] =	SIGPROP_IGNORE,
258 	[SIGUSR1] =	SIGPROP_KILL,
259 	[SIGUSR2] =	SIGPROP_KILL,
260 };
261 
262 #define	_SIG_FOREACH_ADVANCE(i, set) ({					\
263 	int __found;							\
264 	for (;;) {							\
265 		if (__bits != 0) {					\
266 			int __sig = ffs(__bits);			\
267 			__bits &= ~(1u << (__sig - 1));			\
268 			sig = __i * sizeof((set)->__bits[0]) * NBBY + __sig; \
269 			__found = 1;					\
270 			break;						\
271 		}							\
272 		if (++__i == _SIG_WORDS) {				\
273 			__found = 0;					\
274 			break;						\
275 		}							\
276 		__bits = (set)->__bits[__i];				\
277 	}								\
278 	__found != 0;							\
279 })
280 
281 #define	SIG_FOREACH(i, set)						\
282 	for (int32_t __i = -1, __bits = 0;				\
283 	    _SIG_FOREACH_ADVANCE(i, set); )				\
284 
285 static sigset_t fastblock_mask;
286 
287 static void
ast_sig(struct thread * td,int tda)288 ast_sig(struct thread *td, int tda)
289 {
290 	struct proc *p;
291 	int old_boundary, sig;
292 	bool resched_sigs;
293 
294 	p = td->td_proc;
295 
296 #ifdef DIAGNOSTIC
297 	if (p->p_numthreads == 1 && (tda & (TDAI(TDA_SIG) |
298 	    TDAI(TDA_AST))) == 0) {
299 		PROC_LOCK(p);
300 		thread_lock(td);
301 		/*
302 		 * Note that TDA_SIG should be re-read from
303 		 * td_ast, since signal might have been delivered
304 		 * after we cleared td_flags above.  This is one of
305 		 * the reason for looping check for AST condition.
306 		 * See comment in userret() about P_PPWAIT.
307 		 */
308 		if ((p->p_flag & P_PPWAIT) == 0 &&
309 		    (td->td_pflags & TDP_SIGFASTBLOCK) == 0) {
310 			if (SIGPENDING(td) && ((tda | td->td_ast) &
311 			    (TDAI(TDA_SIG) | TDAI(TDA_AST))) == 0) {
312 				thread_unlock(td); /* fix dumps */
313 				panic(
314 				    "failed2 to set signal flags for ast p %p "
315 				    "td %p tda %#x td_ast %#x fl %#x",
316 				    p, td, tda, td->td_ast, td->td_flags);
317 			}
318 		}
319 		thread_unlock(td);
320 		PROC_UNLOCK(p);
321 	}
322 #endif
323 
324 	/*
325 	 * Check for signals. Unlocked reads of p_pendingcnt or
326 	 * p_siglist might cause process-directed signal to be handled
327 	 * later.
328 	 */
329 	if ((tda & TDAI(TDA_SIG)) != 0 || p->p_pendingcnt > 0 ||
330 	    !SIGISEMPTY(p->p_siglist)) {
331 		sigfastblock_fetch(td);
332 		PROC_LOCK(p);
333 		old_boundary = ~TDB_BOUNDARY | (td->td_dbgflags & TDB_BOUNDARY);
334 		td->td_dbgflags |= TDB_BOUNDARY;
335 		mtx_lock(&p->p_sigacts->ps_mtx);
336 		while ((sig = cursig(td)) != 0) {
337 			KASSERT(sig >= 0, ("sig %d", sig));
338 			postsig(sig);
339 		}
340 		mtx_unlock(&p->p_sigacts->ps_mtx);
341 		td->td_dbgflags &= old_boundary;
342 		PROC_UNLOCK(p);
343 		resched_sigs = true;
344 	} else {
345 		resched_sigs = false;
346 	}
347 
348 	/*
349 	 * Handle deferred update of the fast sigblock value, after
350 	 * the postsig() loop was performed.
351 	 */
352 	sigfastblock_setpend(td, resched_sigs);
353 
354 	/*
355 	 * Clear td_sa.code: signal to ptrace that syscall arguments
356 	 * are unavailable after this point. This AST handler is the
357 	 * last chance for ptracestop() to signal the tracer before
358 	 * the tracee returns to userspace.
359 	 */
360 	td->td_sa.code = 0;
361 }
362 
363 static void
ast_sigsuspend(struct thread * td,int tda __unused)364 ast_sigsuspend(struct thread *td, int tda __unused)
365 {
366 	MPASS((td->td_pflags & TDP_OLDMASK) != 0);
367 	td->td_pflags &= ~TDP_OLDMASK;
368 	kern_sigprocmask(td, SIG_SETMASK, &td->td_oldsigmask, NULL, 0);
369 }
370 
371 static void
sigqueue_start(void)372 sigqueue_start(void)
373 {
374 	ksiginfo_zone = uma_zcreate("ksiginfo", sizeof(ksiginfo_t),
375 		NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
376 	uma_prealloc(ksiginfo_zone, preallocate_siginfo);
377 	p31b_setcfg(CTL_P1003_1B_REALTIME_SIGNALS, _POSIX_REALTIME_SIGNALS);
378 	p31b_setcfg(CTL_P1003_1B_RTSIG_MAX, SIGRTMAX - SIGRTMIN + 1);
379 	p31b_setcfg(CTL_P1003_1B_SIGQUEUE_MAX, max_pending_per_proc);
380 	SIGFILLSET(fastblock_mask);
381 	SIG_CANTMASK(fastblock_mask);
382 	ast_register(TDA_SIG, ASTR_UNCOND, 0, ast_sig);
383 
384 	/*
385 	 * TDA_PSELECT is for the case where the signal mask should be restored
386 	 * before delivering any signals so that we do not deliver any that are
387 	 * blocked by the normal thread mask.  It is mutually exclusive with
388 	 * TDA_SIGSUSPEND, which should be used if we *do* want to deliver
389 	 * signals that are normally blocked, e.g., if it interrupted our sleep.
390 	 */
391 	ast_register(TDA_PSELECT, ASTR_ASTF_REQUIRED | ASTR_TDP,
392 	    TDP_OLDMASK, ast_sigsuspend);
393 	ast_register(TDA_SIGSUSPEND, ASTR_ASTF_REQUIRED | ASTR_TDP,
394 	    TDP_OLDMASK, ast_sigsuspend);
395 }
396 
397 ksiginfo_t *
ksiginfo_alloc(int mwait)398 ksiginfo_alloc(int mwait)
399 {
400 	MPASS(mwait == M_WAITOK || mwait == M_NOWAIT);
401 
402 	if (ksiginfo_zone == NULL)
403 		return (NULL);
404 	return (uma_zalloc(ksiginfo_zone, mwait | M_ZERO));
405 }
406 
407 void
ksiginfo_free(ksiginfo_t * ksi)408 ksiginfo_free(ksiginfo_t *ksi)
409 {
410 	uma_zfree(ksiginfo_zone, ksi);
411 }
412 
413 static __inline bool
ksiginfo_tryfree(ksiginfo_t * ksi)414 ksiginfo_tryfree(ksiginfo_t *ksi)
415 {
416 	if ((ksi->ksi_flags & KSI_EXT) == 0) {
417 		uma_zfree(ksiginfo_zone, ksi);
418 		return (true);
419 	}
420 	return (false);
421 }
422 
423 void
sigqueue_init(sigqueue_t * list,struct proc * p)424 sigqueue_init(sigqueue_t *list, struct proc *p)
425 {
426 	SIGEMPTYSET(list->sq_signals);
427 	SIGEMPTYSET(list->sq_kill);
428 	SIGEMPTYSET(list->sq_ptrace);
429 	TAILQ_INIT(&list->sq_list);
430 	list->sq_proc = p;
431 	list->sq_flags = SQ_INIT;
432 }
433 
434 /*
435  * Get a signal's ksiginfo.
436  * Return:
437  *	0	-	signal not found
438  *	others	-	signal number
439  */
440 static int
sigqueue_get(sigqueue_t * sq,int signo,ksiginfo_t * si)441 sigqueue_get(sigqueue_t *sq, int signo, ksiginfo_t *si)
442 {
443 	struct proc *p = sq->sq_proc;
444 	struct ksiginfo *ksi, *next;
445 	int count = 0;
446 
447 	KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
448 
449 	if (!SIGISMEMBER(sq->sq_signals, signo))
450 		return (0);
451 
452 	if (SIGISMEMBER(sq->sq_ptrace, signo)) {
453 		count++;
454 		SIGDELSET(sq->sq_ptrace, signo);
455 		si->ksi_flags |= KSI_PTRACE;
456 	}
457 	if (SIGISMEMBER(sq->sq_kill, signo)) {
458 		count++;
459 		if (count == 1)
460 			SIGDELSET(sq->sq_kill, signo);
461 	}
462 
463 	TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
464 		if (ksi->ksi_signo == signo) {
465 			if (count == 0) {
466 				TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
467 				ksi->ksi_sigq = NULL;
468 				ksiginfo_copy(ksi, si);
469 				if (ksiginfo_tryfree(ksi) && p != NULL)
470 					p->p_pendingcnt--;
471 			}
472 			if (++count > 1)
473 				break;
474 		}
475 	}
476 
477 	if (count <= 1)
478 		SIGDELSET(sq->sq_signals, signo);
479 	si->ksi_signo = signo;
480 	return (signo);
481 }
482 
483 void
sigqueue_take(ksiginfo_t * ksi)484 sigqueue_take(ksiginfo_t *ksi)
485 {
486 	struct ksiginfo *kp;
487 	struct proc	*p;
488 	sigqueue_t	*sq;
489 
490 	if (ksi == NULL || (sq = ksi->ksi_sigq) == NULL)
491 		return;
492 
493 	p = sq->sq_proc;
494 	TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
495 	ksi->ksi_sigq = NULL;
496 	if (!(ksi->ksi_flags & KSI_EXT) && p != NULL)
497 		p->p_pendingcnt--;
498 
499 	for (kp = TAILQ_FIRST(&sq->sq_list); kp != NULL;
500 	     kp = TAILQ_NEXT(kp, ksi_link)) {
501 		if (kp->ksi_signo == ksi->ksi_signo)
502 			break;
503 	}
504 	if (kp == NULL && !SIGISMEMBER(sq->sq_kill, ksi->ksi_signo) &&
505 	    !SIGISMEMBER(sq->sq_ptrace, ksi->ksi_signo))
506 		SIGDELSET(sq->sq_signals, ksi->ksi_signo);
507 }
508 
509 static int
sigqueue_add(sigqueue_t * sq,int signo,ksiginfo_t * si)510 sigqueue_add(sigqueue_t *sq, int signo, ksiginfo_t *si)
511 {
512 	struct proc *p = sq->sq_proc;
513 	struct ksiginfo *ksi;
514 	int ret = 0;
515 
516 	KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
517 
518 	/*
519 	 * SIGKILL/SIGSTOP cannot be caught or masked, so take the fast path
520 	 * for these signals.
521 	 */
522 	if (signo == SIGKILL || signo == SIGSTOP || si == NULL) {
523 		SIGADDSET(sq->sq_kill, signo);
524 		goto out_set_bit;
525 	}
526 
527 	/* directly insert the ksi, don't copy it */
528 	if (si->ksi_flags & KSI_INS) {
529 		if (si->ksi_flags & KSI_HEAD)
530 			TAILQ_INSERT_HEAD(&sq->sq_list, si, ksi_link);
531 		else
532 			TAILQ_INSERT_TAIL(&sq->sq_list, si, ksi_link);
533 		si->ksi_sigq = sq;
534 		goto out_set_bit;
535 	}
536 
537 	if (__predict_false(ksiginfo_zone == NULL)) {
538 		SIGADDSET(sq->sq_kill, signo);
539 		goto out_set_bit;
540 	}
541 
542 	if (p != NULL && p->p_pendingcnt >= max_pending_per_proc) {
543 		signal_overflow++;
544 		ret = EAGAIN;
545 	} else if ((ksi = ksiginfo_alloc(M_NOWAIT)) == NULL) {
546 		signal_alloc_fail++;
547 		ret = EAGAIN;
548 	} else {
549 		if (p != NULL)
550 			p->p_pendingcnt++;
551 		ksiginfo_copy(si, ksi);
552 		ksi->ksi_signo = signo;
553 		if (si->ksi_flags & KSI_HEAD)
554 			TAILQ_INSERT_HEAD(&sq->sq_list, ksi, ksi_link);
555 		else
556 			TAILQ_INSERT_TAIL(&sq->sq_list, ksi, ksi_link);
557 		ksi->ksi_sigq = sq;
558 	}
559 
560 	if (ret != 0) {
561 		if ((si->ksi_flags & KSI_PTRACE) != 0) {
562 			SIGADDSET(sq->sq_ptrace, signo);
563 			ret = 0;
564 			goto out_set_bit;
565 		} else if ((si->ksi_flags & KSI_TRAP) != 0 ||
566 		    (si->ksi_flags & KSI_SIGQ) == 0) {
567 			SIGADDSET(sq->sq_kill, signo);
568 			ret = 0;
569 			goto out_set_bit;
570 		}
571 		return (ret);
572 	}
573 
574 out_set_bit:
575 	SIGADDSET(sq->sq_signals, signo);
576 	return (ret);
577 }
578 
579 void
sigqueue_flush(sigqueue_t * sq)580 sigqueue_flush(sigqueue_t *sq)
581 {
582 	struct proc *p = sq->sq_proc;
583 	ksiginfo_t *ksi;
584 
585 	KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
586 
587 	if (p != NULL)
588 		PROC_LOCK_ASSERT(p, MA_OWNED);
589 
590 	while ((ksi = TAILQ_FIRST(&sq->sq_list)) != NULL) {
591 		TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
592 		ksi->ksi_sigq = NULL;
593 		if (ksiginfo_tryfree(ksi) && p != NULL)
594 			p->p_pendingcnt--;
595 	}
596 
597 	SIGEMPTYSET(sq->sq_signals);
598 	SIGEMPTYSET(sq->sq_kill);
599 	SIGEMPTYSET(sq->sq_ptrace);
600 }
601 
602 static void
sigqueue_move_set(sigqueue_t * src,sigqueue_t * dst,const sigset_t * set)603 sigqueue_move_set(sigqueue_t *src, sigqueue_t *dst, const sigset_t *set)
604 {
605 	sigset_t tmp;
606 	struct proc *p1, *p2;
607 	ksiginfo_t *ksi, *next;
608 
609 	KASSERT(src->sq_flags & SQ_INIT, ("src sigqueue not inited"));
610 	KASSERT(dst->sq_flags & SQ_INIT, ("dst sigqueue not inited"));
611 	p1 = src->sq_proc;
612 	p2 = dst->sq_proc;
613 	/* Move siginfo to target list */
614 	TAILQ_FOREACH_SAFE(ksi, &src->sq_list, ksi_link, next) {
615 		if (SIGISMEMBER(*set, ksi->ksi_signo)) {
616 			TAILQ_REMOVE(&src->sq_list, ksi, ksi_link);
617 			if (p1 != NULL)
618 				p1->p_pendingcnt--;
619 			TAILQ_INSERT_TAIL(&dst->sq_list, ksi, ksi_link);
620 			ksi->ksi_sigq = dst;
621 			if (p2 != NULL)
622 				p2->p_pendingcnt++;
623 		}
624 	}
625 
626 	/* Move pending bits to target list */
627 	tmp = src->sq_kill;
628 	SIGSETAND(tmp, *set);
629 	SIGSETOR(dst->sq_kill, tmp);
630 	SIGSETNAND(src->sq_kill, tmp);
631 
632 	tmp = src->sq_ptrace;
633 	SIGSETAND(tmp, *set);
634 	SIGSETOR(dst->sq_ptrace, tmp);
635 	SIGSETNAND(src->sq_ptrace, tmp);
636 
637 	tmp = src->sq_signals;
638 	SIGSETAND(tmp, *set);
639 	SIGSETOR(dst->sq_signals, tmp);
640 	SIGSETNAND(src->sq_signals, tmp);
641 }
642 
643 #if 0
644 static void
645 sigqueue_move(sigqueue_t *src, sigqueue_t *dst, int signo)
646 {
647 	sigset_t set;
648 
649 	SIGEMPTYSET(set);
650 	SIGADDSET(set, signo);
651 	sigqueue_move_set(src, dst, &set);
652 }
653 #endif
654 
655 static void
sigqueue_delete_set(sigqueue_t * sq,const sigset_t * set)656 sigqueue_delete_set(sigqueue_t *sq, const sigset_t *set)
657 {
658 	struct proc *p = sq->sq_proc;
659 	ksiginfo_t *ksi, *next;
660 
661 	KASSERT(sq->sq_flags & SQ_INIT, ("src sigqueue not inited"));
662 
663 	/* Remove siginfo queue */
664 	TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
665 		if (SIGISMEMBER(*set, ksi->ksi_signo)) {
666 			TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
667 			ksi->ksi_sigq = NULL;
668 			if (ksiginfo_tryfree(ksi) && p != NULL)
669 				p->p_pendingcnt--;
670 		}
671 	}
672 	SIGSETNAND(sq->sq_kill, *set);
673 	SIGSETNAND(sq->sq_ptrace, *set);
674 	SIGSETNAND(sq->sq_signals, *set);
675 }
676 
677 void
sigqueue_delete(sigqueue_t * sq,int signo)678 sigqueue_delete(sigqueue_t *sq, int signo)
679 {
680 	sigset_t set;
681 
682 	SIGEMPTYSET(set);
683 	SIGADDSET(set, signo);
684 	sigqueue_delete_set(sq, &set);
685 }
686 
687 /* Remove a set of signals for a process */
688 static void
sigqueue_delete_set_proc(struct proc * p,const sigset_t * set)689 sigqueue_delete_set_proc(struct proc *p, const sigset_t *set)
690 {
691 	sigqueue_t worklist;
692 	struct thread *td0;
693 
694 	PROC_LOCK_ASSERT(p, MA_OWNED);
695 
696 	sigqueue_init(&worklist, NULL);
697 	sigqueue_move_set(&p->p_sigqueue, &worklist, set);
698 
699 	FOREACH_THREAD_IN_PROC(p, td0)
700 		sigqueue_move_set(&td0->td_sigqueue, &worklist, set);
701 
702 	sigqueue_flush(&worklist);
703 }
704 
705 void
sigqueue_delete_proc(struct proc * p,int signo)706 sigqueue_delete_proc(struct proc *p, int signo)
707 {
708 	sigset_t set;
709 
710 	SIGEMPTYSET(set);
711 	SIGADDSET(set, signo);
712 	sigqueue_delete_set_proc(p, &set);
713 }
714 
715 static void
sigqueue_delete_stopmask_proc(struct proc * p)716 sigqueue_delete_stopmask_proc(struct proc *p)
717 {
718 	sigset_t set;
719 
720 	SIGEMPTYSET(set);
721 	SIGADDSET(set, SIGSTOP);
722 	SIGADDSET(set, SIGTSTP);
723 	SIGADDSET(set, SIGTTIN);
724 	SIGADDSET(set, SIGTTOU);
725 	sigqueue_delete_set_proc(p, &set);
726 }
727 
728 /*
729  * Determine signal that should be delivered to thread td, the current
730  * thread, 0 if none.  If there is a pending stop signal with default
731  * action, the process stops in issignal().
732  */
733 int
cursig(struct thread * td)734 cursig(struct thread *td)
735 {
736 	PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
737 	mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED);
738 	THREAD_LOCK_ASSERT(td, MA_NOTOWNED);
739 	return (SIGPENDING(td) ? issignal(td) : 0);
740 }
741 
742 /*
743  * Arrange for ast() to handle unmasked pending signals on return to user
744  * mode.  This must be called whenever a signal is added to td_sigqueue or
745  * unmasked in td_sigmask.
746  */
747 void
signotify(struct thread * td)748 signotify(struct thread *td)
749 {
750 
751 	PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
752 
753 	if (SIGPENDING(td))
754 		ast_sched(td, TDA_SIG);
755 }
756 
757 /*
758  * Returns 1 (true) if altstack is configured for the thread, and the
759  * passed stack bottom address falls into the altstack range.  Handles
760  * the 43 compat special case where the alt stack size is zero.
761  */
762 int
sigonstack(size_t sp)763 sigonstack(size_t sp)
764 {
765 	struct thread *td;
766 
767 	td = curthread;
768 	if ((td->td_pflags & TDP_ALTSTACK) == 0)
769 		return (0);
770 #if defined(COMPAT_43)
771 	if (SV_PROC_FLAG(td->td_proc, SV_AOUT) && td->td_sigstk.ss_size == 0)
772 		return ((td->td_sigstk.ss_flags & SS_ONSTACK) != 0);
773 #endif
774 	return (sp >= (size_t)td->td_sigstk.ss_sp &&
775 	    sp < td->td_sigstk.ss_size + (size_t)td->td_sigstk.ss_sp);
776 }
777 
778 static __inline int
sigprop(int sig)779 sigprop(int sig)
780 {
781 
782 	if (sig > 0 && sig < nitems(sigproptbl))
783 		return (sigproptbl[sig]);
784 	return (0);
785 }
786 
787 static bool
sigact_flag_test(const struct sigaction * act,int flag)788 sigact_flag_test(const struct sigaction *act, int flag)
789 {
790 
791 	/*
792 	 * SA_SIGINFO is reset when signal disposition is set to
793 	 * ignore or default.  Other flags are kept according to user
794 	 * settings.
795 	 */
796 	return ((act->sa_flags & flag) != 0 && (flag != SA_SIGINFO ||
797 	    ((__sighandler_t *)act->sa_sigaction != SIG_IGN &&
798 	    (__sighandler_t *)act->sa_sigaction != SIG_DFL)));
799 }
800 
801 /*
802  * kern_sigaction
803  * sigaction
804  * freebsd4_sigaction
805  * osigaction
806  */
807 int
kern_sigaction(struct thread * td,int sig,const struct sigaction * act,struct sigaction * oact,int flags)808 kern_sigaction(struct thread *td, int sig, const struct sigaction *act,
809     struct sigaction *oact, int flags)
810 {
811 	struct sigacts *ps;
812 	struct proc *p = td->td_proc;
813 
814 	if (!_SIG_VALID(sig))
815 		return (EINVAL);
816 	if (act != NULL && act->sa_handler != SIG_DFL &&
817 	    act->sa_handler != SIG_IGN && (act->sa_flags & ~(SA_ONSTACK |
818 	    SA_RESTART | SA_RESETHAND | SA_NOCLDSTOP | SA_NODEFER |
819 	    SA_NOCLDWAIT | SA_SIGINFO)) != 0)
820 		return (EINVAL);
821 
822 	PROC_LOCK(p);
823 	ps = p->p_sigacts;
824 	mtx_lock(&ps->ps_mtx);
825 	if (oact) {
826 		memset(oact, 0, sizeof(*oact));
827 		oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)];
828 		if (SIGISMEMBER(ps->ps_sigonstack, sig))
829 			oact->sa_flags |= SA_ONSTACK;
830 		if (!SIGISMEMBER(ps->ps_sigintr, sig))
831 			oact->sa_flags |= SA_RESTART;
832 		if (SIGISMEMBER(ps->ps_sigreset, sig))
833 			oact->sa_flags |= SA_RESETHAND;
834 		if (SIGISMEMBER(ps->ps_signodefer, sig))
835 			oact->sa_flags |= SA_NODEFER;
836 		if (SIGISMEMBER(ps->ps_siginfo, sig)) {
837 			oact->sa_flags |= SA_SIGINFO;
838 			oact->sa_sigaction =
839 			    (__siginfohandler_t *)ps->ps_sigact[_SIG_IDX(sig)];
840 		} else
841 			oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)];
842 		if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP)
843 			oact->sa_flags |= SA_NOCLDSTOP;
844 		if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT)
845 			oact->sa_flags |= SA_NOCLDWAIT;
846 	}
847 	if (act) {
848 		if ((sig == SIGKILL || sig == SIGSTOP) &&
849 		    act->sa_handler != SIG_DFL) {
850 			mtx_unlock(&ps->ps_mtx);
851 			PROC_UNLOCK(p);
852 			return (EINVAL);
853 		}
854 
855 		/*
856 		 * Change setting atomically.
857 		 */
858 
859 		ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask;
860 		SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]);
861 		if (sigact_flag_test(act, SA_SIGINFO)) {
862 			ps->ps_sigact[_SIG_IDX(sig)] =
863 			    (__sighandler_t *)act->sa_sigaction;
864 			SIGADDSET(ps->ps_siginfo, sig);
865 		} else {
866 			ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler;
867 			SIGDELSET(ps->ps_siginfo, sig);
868 		}
869 		if (!sigact_flag_test(act, SA_RESTART))
870 			SIGADDSET(ps->ps_sigintr, sig);
871 		else
872 			SIGDELSET(ps->ps_sigintr, sig);
873 		if (sigact_flag_test(act, SA_ONSTACK))
874 			SIGADDSET(ps->ps_sigonstack, sig);
875 		else
876 			SIGDELSET(ps->ps_sigonstack, sig);
877 		if (sigact_flag_test(act, SA_RESETHAND))
878 			SIGADDSET(ps->ps_sigreset, sig);
879 		else
880 			SIGDELSET(ps->ps_sigreset, sig);
881 		if (sigact_flag_test(act, SA_NODEFER))
882 			SIGADDSET(ps->ps_signodefer, sig);
883 		else
884 			SIGDELSET(ps->ps_signodefer, sig);
885 		if (sig == SIGCHLD) {
886 			if (act->sa_flags & SA_NOCLDSTOP)
887 				ps->ps_flag |= PS_NOCLDSTOP;
888 			else
889 				ps->ps_flag &= ~PS_NOCLDSTOP;
890 			if (act->sa_flags & SA_NOCLDWAIT) {
891 				/*
892 				 * Paranoia: since SA_NOCLDWAIT is implemented
893 				 * by reparenting the dying child to PID 1 (and
894 				 * trust it to reap the zombie), PID 1 itself
895 				 * is forbidden to set SA_NOCLDWAIT.
896 				 */
897 				if (p->p_pid == 1)
898 					ps->ps_flag &= ~PS_NOCLDWAIT;
899 				else
900 					ps->ps_flag |= PS_NOCLDWAIT;
901 			} else
902 				ps->ps_flag &= ~PS_NOCLDWAIT;
903 			if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
904 				ps->ps_flag |= PS_CLDSIGIGN;
905 			else
906 				ps->ps_flag &= ~PS_CLDSIGIGN;
907 		}
908 		/*
909 		 * Set bit in ps_sigignore for signals that are set to SIG_IGN,
910 		 * and for signals set to SIG_DFL where the default is to
911 		 * ignore. However, don't put SIGCONT in ps_sigignore, as we
912 		 * have to restart the process.
913 		 */
914 		if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
915 		    (sigprop(sig) & SIGPROP_IGNORE &&
916 		     ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
917 			/* never to be seen again */
918 			sigqueue_delete_proc(p, sig);
919 			if (sig != SIGCONT)
920 				/* easier in psignal */
921 				SIGADDSET(ps->ps_sigignore, sig);
922 			SIGDELSET(ps->ps_sigcatch, sig);
923 		} else {
924 			SIGDELSET(ps->ps_sigignore, sig);
925 			if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)
926 				SIGDELSET(ps->ps_sigcatch, sig);
927 			else
928 				SIGADDSET(ps->ps_sigcatch, sig);
929 		}
930 #ifdef COMPAT_FREEBSD4
931 		if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
932 		    ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
933 		    (flags & KSA_FREEBSD4) == 0)
934 			SIGDELSET(ps->ps_freebsd4, sig);
935 		else
936 			SIGADDSET(ps->ps_freebsd4, sig);
937 #endif
938 #ifdef COMPAT_43
939 		if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
940 		    ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
941 		    (flags & KSA_OSIGSET) == 0)
942 			SIGDELSET(ps->ps_osigset, sig);
943 		else
944 			SIGADDSET(ps->ps_osigset, sig);
945 #endif
946 	}
947 	mtx_unlock(&ps->ps_mtx);
948 	PROC_UNLOCK(p);
949 	return (0);
950 }
951 
952 #ifndef _SYS_SYSPROTO_H_
953 struct sigaction_args {
954 	int	sig;
955 	struct	sigaction *act;
956 	struct	sigaction *oact;
957 };
958 #endif
959 int
sys_sigaction(struct thread * td,struct sigaction_args * uap)960 sys_sigaction(struct thread *td, struct sigaction_args *uap)
961 {
962 	struct sigaction act, oact;
963 	struct sigaction *actp, *oactp;
964 	int error;
965 
966 	actp = (uap->act != NULL) ? &act : NULL;
967 	oactp = (uap->oact != NULL) ? &oact : NULL;
968 	if (actp) {
969 		error = copyin(uap->act, actp, sizeof(act));
970 		if (error)
971 			return (error);
972 	}
973 	error = kern_sigaction(td, uap->sig, actp, oactp, 0);
974 	if (oactp && !error)
975 		error = copyout(oactp, uap->oact, sizeof(oact));
976 	return (error);
977 }
978 
979 #ifdef COMPAT_FREEBSD4
980 #ifndef _SYS_SYSPROTO_H_
981 struct freebsd4_sigaction_args {
982 	int	sig;
983 	struct	sigaction *act;
984 	struct	sigaction *oact;
985 };
986 #endif
987 int
freebsd4_sigaction(struct thread * td,struct freebsd4_sigaction_args * uap)988 freebsd4_sigaction(struct thread *td, struct freebsd4_sigaction_args *uap)
989 {
990 	struct sigaction act, oact;
991 	struct sigaction *actp, *oactp;
992 	int error;
993 
994 	actp = (uap->act != NULL) ? &act : NULL;
995 	oactp = (uap->oact != NULL) ? &oact : NULL;
996 	if (actp) {
997 		error = copyin(uap->act, actp, sizeof(act));
998 		if (error)
999 			return (error);
1000 	}
1001 	error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4);
1002 	if (oactp && !error)
1003 		error = copyout(oactp, uap->oact, sizeof(oact));
1004 	return (error);
1005 }
1006 #endif	/* COMAPT_FREEBSD4 */
1007 
1008 #ifdef COMPAT_43	/* XXX - COMPAT_FBSD3 */
1009 #ifndef _SYS_SYSPROTO_H_
1010 struct osigaction_args {
1011 	int	signum;
1012 	struct	osigaction *nsa;
1013 	struct	osigaction *osa;
1014 };
1015 #endif
1016 int
osigaction(struct thread * td,struct osigaction_args * uap)1017 osigaction(struct thread *td, struct osigaction_args *uap)
1018 {
1019 	struct osigaction sa;
1020 	struct sigaction nsa, osa;
1021 	struct sigaction *nsap, *osap;
1022 	int error;
1023 
1024 	if (uap->signum <= 0 || uap->signum >= ONSIG)
1025 		return (EINVAL);
1026 
1027 	nsap = (uap->nsa != NULL) ? &nsa : NULL;
1028 	osap = (uap->osa != NULL) ? &osa : NULL;
1029 
1030 	if (nsap) {
1031 		error = copyin(uap->nsa, &sa, sizeof(sa));
1032 		if (error)
1033 			return (error);
1034 		nsap->sa_handler = sa.sa_handler;
1035 		nsap->sa_flags = sa.sa_flags;
1036 		OSIG2SIG(sa.sa_mask, nsap->sa_mask);
1037 	}
1038 	error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
1039 	if (osap && !error) {
1040 		sa.sa_handler = osap->sa_handler;
1041 		sa.sa_flags = osap->sa_flags;
1042 		SIG2OSIG(osap->sa_mask, sa.sa_mask);
1043 		error = copyout(&sa, uap->osa, sizeof(sa));
1044 	}
1045 	return (error);
1046 }
1047 
1048 #if !defined(__i386__)
1049 /* Avoid replicating the same stub everywhere */
1050 int
osigreturn(struct thread * td,struct osigreturn_args * uap)1051 osigreturn(struct thread *td, struct osigreturn_args *uap)
1052 {
1053 
1054 	return (nosys(td, (struct nosys_args *)uap));
1055 }
1056 #endif
1057 #endif /* COMPAT_43 */
1058 
1059 /*
1060  * Initialize signal state for process 0;
1061  * set to ignore signals that are ignored by default.
1062  */
1063 void
siginit(struct proc * p)1064 siginit(struct proc *p)
1065 {
1066 	int i;
1067 	struct sigacts *ps;
1068 
1069 	PROC_LOCK(p);
1070 	ps = p->p_sigacts;
1071 	mtx_lock(&ps->ps_mtx);
1072 	for (i = 1; i <= NSIG; i++) {
1073 		if (sigprop(i) & SIGPROP_IGNORE && i != SIGCONT) {
1074 			SIGADDSET(ps->ps_sigignore, i);
1075 		}
1076 	}
1077 	mtx_unlock(&ps->ps_mtx);
1078 	PROC_UNLOCK(p);
1079 }
1080 
1081 /*
1082  * Reset specified signal to the default disposition.
1083  */
1084 static void
sigdflt(struct sigacts * ps,int sig)1085 sigdflt(struct sigacts *ps, int sig)
1086 {
1087 
1088 	mtx_assert(&ps->ps_mtx, MA_OWNED);
1089 	SIGDELSET(ps->ps_sigcatch, sig);
1090 	if ((sigprop(sig) & SIGPROP_IGNORE) != 0 && sig != SIGCONT)
1091 		SIGADDSET(ps->ps_sigignore, sig);
1092 	ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
1093 	SIGDELSET(ps->ps_siginfo, sig);
1094 }
1095 
1096 /*
1097  * Reset signals for an exec of the specified process.
1098  */
1099 void
execsigs(struct proc * p)1100 execsigs(struct proc *p)
1101 {
1102 	struct sigacts *ps;
1103 	struct thread *td;
1104 
1105 	/*
1106 	 * Reset caught signals.  Held signals remain held
1107 	 * through td_sigmask (unless they were caught,
1108 	 * and are now ignored by default).
1109 	 */
1110 	PROC_LOCK_ASSERT(p, MA_OWNED);
1111 	ps = p->p_sigacts;
1112 	mtx_lock(&ps->ps_mtx);
1113 	sig_drop_caught(p);
1114 
1115 	/*
1116 	 * Reset stack state to the user stack.
1117 	 * Clear set of signals caught on the signal stack.
1118 	 */
1119 	td = curthread;
1120 	MPASS(td->td_proc == p);
1121 	td->td_sigstk.ss_flags = SS_DISABLE;
1122 	td->td_sigstk.ss_size = 0;
1123 	td->td_sigstk.ss_sp = 0;
1124 	td->td_pflags &= ~TDP_ALTSTACK;
1125 	/*
1126 	 * Reset no zombies if child dies flag as Solaris does.
1127 	 */
1128 	ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN);
1129 	if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
1130 		ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL;
1131 	mtx_unlock(&ps->ps_mtx);
1132 }
1133 
1134 /*
1135  * kern_sigprocmask()
1136  *
1137  *	Manipulate signal mask.
1138  */
1139 int
kern_sigprocmask(struct thread * td,int how,sigset_t * set,sigset_t * oset,int flags)1140 kern_sigprocmask(struct thread *td, int how, sigset_t *set, sigset_t *oset,
1141     int flags)
1142 {
1143 	sigset_t new_block, oset1;
1144 	struct proc *p;
1145 	int error;
1146 
1147 	p = td->td_proc;
1148 	if ((flags & SIGPROCMASK_PROC_LOCKED) != 0)
1149 		PROC_LOCK_ASSERT(p, MA_OWNED);
1150 	else
1151 		PROC_LOCK(p);
1152 	mtx_assert(&p->p_sigacts->ps_mtx, (flags & SIGPROCMASK_PS_LOCKED) != 0
1153 	    ? MA_OWNED : MA_NOTOWNED);
1154 	if (oset != NULL)
1155 		*oset = td->td_sigmask;
1156 
1157 	error = 0;
1158 	if (set != NULL) {
1159 		switch (how) {
1160 		case SIG_BLOCK:
1161 			SIG_CANTMASK(*set);
1162 			oset1 = td->td_sigmask;
1163 			SIGSETOR(td->td_sigmask, *set);
1164 			new_block = td->td_sigmask;
1165 			SIGSETNAND(new_block, oset1);
1166 			break;
1167 		case SIG_UNBLOCK:
1168 			SIGSETNAND(td->td_sigmask, *set);
1169 			signotify(td);
1170 			goto out;
1171 		case SIG_SETMASK:
1172 			SIG_CANTMASK(*set);
1173 			oset1 = td->td_sigmask;
1174 			if (flags & SIGPROCMASK_OLD)
1175 				SIGSETLO(td->td_sigmask, *set);
1176 			else
1177 				td->td_sigmask = *set;
1178 			new_block = td->td_sigmask;
1179 			SIGSETNAND(new_block, oset1);
1180 			signotify(td);
1181 			break;
1182 		default:
1183 			error = EINVAL;
1184 			goto out;
1185 		}
1186 
1187 		/*
1188 		 * The new_block set contains signals that were not previously
1189 		 * blocked, but are blocked now.
1190 		 *
1191 		 * In case we block any signal that was not previously blocked
1192 		 * for td, and process has the signal pending, try to schedule
1193 		 * signal delivery to some thread that does not block the
1194 		 * signal, possibly waking it up.
1195 		 */
1196 		if (p->p_numthreads != 1)
1197 			reschedule_signals(p, new_block, flags);
1198 	}
1199 
1200 out:
1201 	if (!(flags & SIGPROCMASK_PROC_LOCKED))
1202 		PROC_UNLOCK(p);
1203 	return (error);
1204 }
1205 
1206 #ifndef _SYS_SYSPROTO_H_
1207 struct sigprocmask_args {
1208 	int	how;
1209 	const sigset_t *set;
1210 	sigset_t *oset;
1211 };
1212 #endif
1213 int
sys_sigprocmask(struct thread * td,struct sigprocmask_args * uap)1214 sys_sigprocmask(struct thread *td, struct sigprocmask_args *uap)
1215 {
1216 	sigset_t set, oset;
1217 	sigset_t *setp, *osetp;
1218 	int error;
1219 
1220 	setp = (uap->set != NULL) ? &set : NULL;
1221 	osetp = (uap->oset != NULL) ? &oset : NULL;
1222 	if (setp) {
1223 		error = copyin(uap->set, setp, sizeof(set));
1224 		if (error)
1225 			return (error);
1226 	}
1227 	error = kern_sigprocmask(td, uap->how, setp, osetp, 0);
1228 	if (osetp && !error) {
1229 		error = copyout(osetp, uap->oset, sizeof(oset));
1230 	}
1231 	return (error);
1232 }
1233 
1234 #ifdef COMPAT_43	/* XXX - COMPAT_FBSD3 */
1235 #ifndef _SYS_SYSPROTO_H_
1236 struct osigprocmask_args {
1237 	int	how;
1238 	osigset_t mask;
1239 };
1240 #endif
1241 int
osigprocmask(struct thread * td,struct osigprocmask_args * uap)1242 osigprocmask(struct thread *td, struct osigprocmask_args *uap)
1243 {
1244 	sigset_t set, oset;
1245 	int error;
1246 
1247 	OSIG2SIG(uap->mask, set);
1248 	error = kern_sigprocmask(td, uap->how, &set, &oset, 1);
1249 	SIG2OSIG(oset, td->td_retval[0]);
1250 	return (error);
1251 }
1252 #endif /* COMPAT_43 */
1253 
1254 int
sys_sigwait(struct thread * td,struct sigwait_args * uap)1255 sys_sigwait(struct thread *td, struct sigwait_args *uap)
1256 {
1257 	ksiginfo_t ksi;
1258 	sigset_t set;
1259 	int error;
1260 
1261 	error = copyin(uap->set, &set, sizeof(set));
1262 	if (error) {
1263 		td->td_retval[0] = error;
1264 		return (0);
1265 	}
1266 
1267 	error = kern_sigtimedwait(td, set, &ksi, NULL);
1268 	if (error) {
1269 		/*
1270 		 * sigwait() function shall not return EINTR, but
1271 		 * the syscall does.  Non-ancient libc provides the
1272 		 * wrapper which hides EINTR.  Otherwise, EINTR return
1273 		 * is used by libthr to handle required cancellation
1274 		 * point in the sigwait().
1275 		 */
1276 		if (error == EINTR && td->td_proc->p_osrel < P_OSREL_SIGWAIT)
1277 			return (ERESTART);
1278 		td->td_retval[0] = error;
1279 		return (0);
1280 	}
1281 
1282 	error = copyout(&ksi.ksi_signo, uap->sig, sizeof(ksi.ksi_signo));
1283 	td->td_retval[0] = error;
1284 	return (0);
1285 }
1286 
1287 int
sys_sigtimedwait(struct thread * td,struct sigtimedwait_args * uap)1288 sys_sigtimedwait(struct thread *td, struct sigtimedwait_args *uap)
1289 {
1290 	struct timespec ts;
1291 	struct timespec *timeout;
1292 	sigset_t set;
1293 	ksiginfo_t ksi;
1294 	int error;
1295 
1296 	if (uap->timeout) {
1297 		error = copyin(uap->timeout, &ts, sizeof(ts));
1298 		if (error)
1299 			return (error);
1300 
1301 		timeout = &ts;
1302 	} else
1303 		timeout = NULL;
1304 
1305 	error = copyin(uap->set, &set, sizeof(set));
1306 	if (error)
1307 		return (error);
1308 
1309 	error = kern_sigtimedwait(td, set, &ksi, timeout);
1310 	if (error)
1311 		return (error);
1312 
1313 	if (uap->info)
1314 		error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
1315 
1316 	if (error == 0)
1317 		td->td_retval[0] = ksi.ksi_signo;
1318 	return (error);
1319 }
1320 
1321 int
sys_sigwaitinfo(struct thread * td,struct sigwaitinfo_args * uap)1322 sys_sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap)
1323 {
1324 	ksiginfo_t ksi;
1325 	sigset_t set;
1326 	int error;
1327 
1328 	error = copyin(uap->set, &set, sizeof(set));
1329 	if (error)
1330 		return (error);
1331 
1332 	error = kern_sigtimedwait(td, set, &ksi, NULL);
1333 	if (error)
1334 		return (error);
1335 
1336 	if (uap->info)
1337 		error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
1338 
1339 	if (error == 0)
1340 		td->td_retval[0] = ksi.ksi_signo;
1341 	return (error);
1342 }
1343 
1344 static void
proc_td_siginfo_capture(struct thread * td,siginfo_t * si)1345 proc_td_siginfo_capture(struct thread *td, siginfo_t *si)
1346 {
1347 	struct thread *thr;
1348 
1349 	FOREACH_THREAD_IN_PROC(td->td_proc, thr) {
1350 		if (thr == td)
1351 			thr->td_si = *si;
1352 		else
1353 			thr->td_si.si_signo = 0;
1354 	}
1355 }
1356 
1357 int
kern_sigtimedwait(struct thread * td,sigset_t waitset,ksiginfo_t * ksi,struct timespec * timeout)1358 kern_sigtimedwait(struct thread *td, sigset_t waitset, ksiginfo_t *ksi,
1359 	struct timespec *timeout)
1360 {
1361 	struct sigacts *ps;
1362 	sigset_t saved_mask, new_block;
1363 	struct proc *p;
1364 	int error, sig, timevalid = 0;
1365 	sbintime_t sbt, precision, tsbt;
1366 	struct timespec ts;
1367 	bool traced;
1368 
1369 	p = td->td_proc;
1370 	error = 0;
1371 	traced = false;
1372 
1373 	/* Ensure the sigfastblock value is up to date. */
1374 	sigfastblock_fetch(td);
1375 
1376 	if (timeout != NULL) {
1377 		if (timeout->tv_nsec >= 0 && timeout->tv_nsec < 1000000000) {
1378 			timevalid = 1;
1379 			ts = *timeout;
1380 			if (ts.tv_sec < INT32_MAX / 2) {
1381 				tsbt = tstosbt(ts);
1382 				precision = tsbt;
1383 				precision >>= tc_precexp;
1384 				if (TIMESEL(&sbt, tsbt))
1385 					sbt += tc_tick_sbt;
1386 				sbt += tsbt;
1387 			} else
1388 				precision = sbt = 0;
1389 		}
1390 	} else
1391 		precision = sbt = 0;
1392 	ksiginfo_init(ksi);
1393 	/* Some signals can not be waited for. */
1394 	SIG_CANTMASK(waitset);
1395 	ps = p->p_sigacts;
1396 	PROC_LOCK(p);
1397 	saved_mask = td->td_sigmask;
1398 	SIGSETNAND(td->td_sigmask, waitset);
1399 	if ((p->p_sysent->sv_flags & SV_SIG_DISCIGN) != 0 ||
1400 	    !kern_sig_discard_ign) {
1401 		thread_lock(td);
1402 		td->td_flags |= TDF_SIGWAIT;
1403 		thread_unlock(td);
1404 	}
1405 	for (;;) {
1406 		mtx_lock(&ps->ps_mtx);
1407 		sig = cursig(td);
1408 		mtx_unlock(&ps->ps_mtx);
1409 		KASSERT(sig >= 0, ("sig %d", sig));
1410 		if (sig != 0 && SIGISMEMBER(waitset, sig)) {
1411 			if (sigqueue_get(&td->td_sigqueue, sig, ksi) != 0 ||
1412 			    sigqueue_get(&p->p_sigqueue, sig, ksi) != 0) {
1413 				error = 0;
1414 				break;
1415 			}
1416 		}
1417 
1418 		if (error != 0)
1419 			break;
1420 
1421 		/*
1422 		 * POSIX says this must be checked after looking for pending
1423 		 * signals.
1424 		 */
1425 		if (timeout != NULL && !timevalid) {
1426 			error = EINVAL;
1427 			break;
1428 		}
1429 
1430 		if (traced) {
1431 			error = EINTR;
1432 			break;
1433 		}
1434 
1435 		error = msleep_sbt(&p->p_sigacts, &p->p_mtx, PPAUSE | PCATCH,
1436 		    "sigwait", sbt, precision, C_ABSOLUTE);
1437 
1438 		/* The syscalls can not be restarted. */
1439 		if (error == ERESTART)
1440 			error = EINTR;
1441 
1442 		/*
1443 		 * If PTRACE_SCE or PTRACE_SCX were set after
1444 		 * userspace entered the syscall, return spurious
1445 		 * EINTR after wait was done.  Only do this as last
1446 		 * resort after rechecking for possible queued signals
1447 		 * and expired timeouts.
1448 		 */
1449 		if (error == 0 && (p->p_ptevents & PTRACE_SYSCALL) != 0)
1450 			traced = true;
1451 	}
1452 	thread_lock(td);
1453 	td->td_flags &= ~TDF_SIGWAIT;
1454 	thread_unlock(td);
1455 
1456 	new_block = saved_mask;
1457 	SIGSETNAND(new_block, td->td_sigmask);
1458 	td->td_sigmask = saved_mask;
1459 	/*
1460 	 * Fewer signals can be delivered to us, reschedule signal
1461 	 * notification.
1462 	 */
1463 	if (p->p_numthreads != 1)
1464 		reschedule_signals(p, new_block, 0);
1465 
1466 	if (error == 0) {
1467 		SDT_PROBE2(proc, , , signal__clear, sig, ksi);
1468 
1469 		if (ksi->ksi_code == SI_TIMER)
1470 			itimer_accept(p, ksi->ksi_timerid, ksi);
1471 
1472 #ifdef KTRACE
1473 		if (KTRPOINT(td, KTR_PSIG)) {
1474 			sig_t action;
1475 
1476 			mtx_lock(&ps->ps_mtx);
1477 			action = ps->ps_sigact[_SIG_IDX(sig)];
1478 			mtx_unlock(&ps->ps_mtx);
1479 			ktrpsig(sig, action, &td->td_sigmask, ksi->ksi_code);
1480 		}
1481 #endif
1482 		if (sig == SIGKILL) {
1483 			proc_td_siginfo_capture(td, &ksi->ksi_info);
1484 			sigexit(td, sig);
1485 		}
1486 	}
1487 	PROC_UNLOCK(p);
1488 	return (error);
1489 }
1490 
1491 #ifndef _SYS_SYSPROTO_H_
1492 struct sigpending_args {
1493 	sigset_t	*set;
1494 };
1495 #endif
1496 int
sys_sigpending(struct thread * td,struct sigpending_args * uap)1497 sys_sigpending(struct thread *td, struct sigpending_args *uap)
1498 {
1499 	struct proc *p = td->td_proc;
1500 	sigset_t pending;
1501 
1502 	PROC_LOCK(p);
1503 	pending = p->p_sigqueue.sq_signals;
1504 	SIGSETOR(pending, td->td_sigqueue.sq_signals);
1505 	PROC_UNLOCK(p);
1506 	return (copyout(&pending, uap->set, sizeof(sigset_t)));
1507 }
1508 
1509 #ifdef COMPAT_43	/* XXX - COMPAT_FBSD3 */
1510 #ifndef _SYS_SYSPROTO_H_
1511 struct osigpending_args {
1512 	int	dummy;
1513 };
1514 #endif
1515 int
osigpending(struct thread * td,struct osigpending_args * uap)1516 osigpending(struct thread *td, struct osigpending_args *uap)
1517 {
1518 	struct proc *p = td->td_proc;
1519 	sigset_t pending;
1520 
1521 	PROC_LOCK(p);
1522 	pending = p->p_sigqueue.sq_signals;
1523 	SIGSETOR(pending, td->td_sigqueue.sq_signals);
1524 	PROC_UNLOCK(p);
1525 	SIG2OSIG(pending, td->td_retval[0]);
1526 	return (0);
1527 }
1528 #endif /* COMPAT_43 */
1529 
1530 #if defined(COMPAT_43)
1531 /*
1532  * Generalized interface signal handler, 4.3-compatible.
1533  */
1534 #ifndef _SYS_SYSPROTO_H_
1535 struct osigvec_args {
1536 	int	signum;
1537 	struct	sigvec *nsv;
1538 	struct	sigvec *osv;
1539 };
1540 #endif
1541 /* ARGSUSED */
1542 int
osigvec(struct thread * td,struct osigvec_args * uap)1543 osigvec(struct thread *td, struct osigvec_args *uap)
1544 {
1545 	struct sigvec vec;
1546 	struct sigaction nsa, osa;
1547 	struct sigaction *nsap, *osap;
1548 	int error;
1549 
1550 	if (uap->signum <= 0 || uap->signum >= ONSIG)
1551 		return (EINVAL);
1552 	nsap = (uap->nsv != NULL) ? &nsa : NULL;
1553 	osap = (uap->osv != NULL) ? &osa : NULL;
1554 	if (nsap) {
1555 		error = copyin(uap->nsv, &vec, sizeof(vec));
1556 		if (error)
1557 			return (error);
1558 		nsap->sa_handler = vec.sv_handler;
1559 		OSIG2SIG(vec.sv_mask, nsap->sa_mask);
1560 		nsap->sa_flags = vec.sv_flags;
1561 		nsap->sa_flags ^= SA_RESTART;	/* opposite of SV_INTERRUPT */
1562 	}
1563 	error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
1564 	if (osap && !error) {
1565 		vec.sv_handler = osap->sa_handler;
1566 		SIG2OSIG(osap->sa_mask, vec.sv_mask);
1567 		vec.sv_flags = osap->sa_flags;
1568 		vec.sv_flags &= ~SA_NOCLDWAIT;
1569 		vec.sv_flags ^= SA_RESTART;
1570 		error = copyout(&vec, uap->osv, sizeof(vec));
1571 	}
1572 	return (error);
1573 }
1574 
1575 #ifndef _SYS_SYSPROTO_H_
1576 struct osigblock_args {
1577 	int	mask;
1578 };
1579 #endif
1580 int
osigblock(struct thread * td,struct osigblock_args * uap)1581 osigblock(struct thread *td, struct osigblock_args *uap)
1582 {
1583 	sigset_t set, oset;
1584 
1585 	OSIG2SIG(uap->mask, set);
1586 	kern_sigprocmask(td, SIG_BLOCK, &set, &oset, 0);
1587 	SIG2OSIG(oset, td->td_retval[0]);
1588 	return (0);
1589 }
1590 
1591 #ifndef _SYS_SYSPROTO_H_
1592 struct osigsetmask_args {
1593 	int	mask;
1594 };
1595 #endif
1596 int
osigsetmask(struct thread * td,struct osigsetmask_args * uap)1597 osigsetmask(struct thread *td, struct osigsetmask_args *uap)
1598 {
1599 	sigset_t set, oset;
1600 
1601 	OSIG2SIG(uap->mask, set);
1602 	kern_sigprocmask(td, SIG_SETMASK, &set, &oset, 0);
1603 	SIG2OSIG(oset, td->td_retval[0]);
1604 	return (0);
1605 }
1606 #endif /* COMPAT_43 */
1607 
1608 /*
1609  * Suspend calling thread until signal, providing mask to be set in the
1610  * meantime.
1611  */
1612 #ifndef _SYS_SYSPROTO_H_
1613 struct sigsuspend_args {
1614 	const sigset_t *sigmask;
1615 };
1616 #endif
1617 /* ARGSUSED */
1618 int
sys_sigsuspend(struct thread * td,struct sigsuspend_args * uap)1619 sys_sigsuspend(struct thread *td, struct sigsuspend_args *uap)
1620 {
1621 	sigset_t mask;
1622 	int error;
1623 
1624 	error = copyin(uap->sigmask, &mask, sizeof(mask));
1625 	if (error)
1626 		return (error);
1627 	return (kern_sigsuspend(td, mask));
1628 }
1629 
1630 int
kern_sigsuspend(struct thread * td,sigset_t mask)1631 kern_sigsuspend(struct thread *td, sigset_t mask)
1632 {
1633 	struct proc *p = td->td_proc;
1634 	int has_sig, sig;
1635 
1636 	/* Ensure the sigfastblock value is up to date. */
1637 	sigfastblock_fetch(td);
1638 
1639 	/*
1640 	 * When returning from sigsuspend, we want
1641 	 * the old mask to be restored after the
1642 	 * signal handler has finished.  Thus, we
1643 	 * save it here and mark the sigacts structure
1644 	 * to indicate this.
1645 	 */
1646 	PROC_LOCK(p);
1647 	kern_sigprocmask(td, SIG_SETMASK, &mask, &td->td_oldsigmask,
1648 	    SIGPROCMASK_PROC_LOCKED);
1649 	td->td_pflags |= TDP_OLDMASK;
1650 	ast_sched(td, TDA_SIGSUSPEND);
1651 
1652 	/*
1653 	 * Process signals now. Otherwise, we can get spurious wakeup
1654 	 * due to signal entered process queue, but delivered to other
1655 	 * thread. But sigsuspend should return only on signal
1656 	 * delivery.
1657 	 */
1658 	(p->p_sysent->sv_set_syscall_retval)(td, EINTR);
1659 	for (has_sig = 0; !has_sig;) {
1660 		while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause",
1661 			0) == 0)
1662 			/* void */;
1663 		thread_suspend_check(0);
1664 		mtx_lock(&p->p_sigacts->ps_mtx);
1665 		while ((sig = cursig(td)) != 0) {
1666 			KASSERT(sig >= 0, ("sig %d", sig));
1667 			has_sig += postsig(sig);
1668 		}
1669 		mtx_unlock(&p->p_sigacts->ps_mtx);
1670 
1671 		/*
1672 		 * If PTRACE_SCE or PTRACE_SCX were set after
1673 		 * userspace entered the syscall, return spurious
1674 		 * EINTR.
1675 		 */
1676 		if ((p->p_ptevents & PTRACE_SYSCALL) != 0)
1677 			has_sig += 1;
1678 	}
1679 	PROC_UNLOCK(p);
1680 	td->td_errno = EINTR;
1681 	td->td_pflags |= TDP_NERRNO;
1682 	return (EJUSTRETURN);
1683 }
1684 
1685 #ifdef COMPAT_43	/* XXX - COMPAT_FBSD3 */
1686 /*
1687  * Compatibility sigsuspend call for old binaries.  Note nonstandard calling
1688  * convention: libc stub passes mask, not pointer, to save a copyin.
1689  */
1690 #ifndef _SYS_SYSPROTO_H_
1691 struct osigsuspend_args {
1692 	osigset_t mask;
1693 };
1694 #endif
1695 /* ARGSUSED */
1696 int
osigsuspend(struct thread * td,struct osigsuspend_args * uap)1697 osigsuspend(struct thread *td, struct osigsuspend_args *uap)
1698 {
1699 	sigset_t mask;
1700 
1701 	OSIG2SIG(uap->mask, mask);
1702 	return (kern_sigsuspend(td, mask));
1703 }
1704 #endif /* COMPAT_43 */
1705 
1706 #if defined(COMPAT_43)
1707 #ifndef _SYS_SYSPROTO_H_
1708 struct osigstack_args {
1709 	struct	sigstack *nss;
1710 	struct	sigstack *oss;
1711 };
1712 #endif
1713 /* ARGSUSED */
1714 int
osigstack(struct thread * td,struct osigstack_args * uap)1715 osigstack(struct thread *td, struct osigstack_args *uap)
1716 {
1717 	struct sigstack nss, oss;
1718 	int error = 0;
1719 
1720 	if (uap->nss != NULL) {
1721 		error = copyin(uap->nss, &nss, sizeof(nss));
1722 		if (error)
1723 			return (error);
1724 	}
1725 	oss.ss_sp = td->td_sigstk.ss_sp;
1726 	oss.ss_onstack = sigonstack(cpu_getstack(td));
1727 	if (uap->nss != NULL) {
1728 		td->td_sigstk.ss_sp = nss.ss_sp;
1729 		td->td_sigstk.ss_size = 0;
1730 		td->td_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK;
1731 		td->td_pflags |= TDP_ALTSTACK;
1732 	}
1733 	if (uap->oss != NULL)
1734 		error = copyout(&oss, uap->oss, sizeof(oss));
1735 
1736 	return (error);
1737 }
1738 #endif /* COMPAT_43 */
1739 
1740 #ifndef _SYS_SYSPROTO_H_
1741 struct sigaltstack_args {
1742 	stack_t	*ss;
1743 	stack_t	*oss;
1744 };
1745 #endif
1746 /* ARGSUSED */
1747 int
sys_sigaltstack(struct thread * td,struct sigaltstack_args * uap)1748 sys_sigaltstack(struct thread *td, struct sigaltstack_args *uap)
1749 {
1750 	stack_t ss, oss;
1751 	int error;
1752 
1753 	if (uap->ss != NULL) {
1754 		error = copyin(uap->ss, &ss, sizeof(ss));
1755 		if (error)
1756 			return (error);
1757 	}
1758 	error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL,
1759 	    (uap->oss != NULL) ? &oss : NULL);
1760 	if (error)
1761 		return (error);
1762 	if (uap->oss != NULL)
1763 		error = copyout(&oss, uap->oss, sizeof(stack_t));
1764 	return (error);
1765 }
1766 
1767 int
kern_sigaltstack(struct thread * td,stack_t * ss,stack_t * oss)1768 kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss)
1769 {
1770 	struct proc *p = td->td_proc;
1771 	int oonstack;
1772 
1773 	oonstack = sigonstack(cpu_getstack(td));
1774 
1775 	if (oss != NULL) {
1776 		*oss = td->td_sigstk;
1777 		oss->ss_flags = (td->td_pflags & TDP_ALTSTACK)
1778 		    ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
1779 	}
1780 
1781 	if (ss != NULL) {
1782 		if (oonstack)
1783 			return (EPERM);
1784 		if ((ss->ss_flags & ~SS_DISABLE) != 0)
1785 			return (EINVAL);
1786 		if (!(ss->ss_flags & SS_DISABLE)) {
1787 			if (ss->ss_size < p->p_sysent->sv_minsigstksz)
1788 				return (ENOMEM);
1789 
1790 			td->td_sigstk = *ss;
1791 			td->td_pflags |= TDP_ALTSTACK;
1792 		} else {
1793 			td->td_pflags &= ~TDP_ALTSTACK;
1794 		}
1795 	}
1796 	return (0);
1797 }
1798 
1799 struct killpg1_ctx {
1800 	struct thread *td;
1801 	ksiginfo_t *ksi;
1802 	int sig;
1803 	bool sent;
1804 	bool found;
1805 	int ret;
1806 };
1807 
1808 static void
killpg1_sendsig_locked(struct proc * p,struct killpg1_ctx * arg)1809 killpg1_sendsig_locked(struct proc *p, struct killpg1_ctx *arg)
1810 {
1811 	int err;
1812 
1813 	err = p_cansignal(arg->td, p, arg->sig);
1814 	if (err == 0 && arg->sig != 0)
1815 		pksignal(p, arg->sig, arg->ksi);
1816 	if (err != ESRCH)
1817 		arg->found = true;
1818 	if (err == 0)
1819 		arg->sent = true;
1820 	else if (arg->ret == 0 && err != ESRCH && err != EPERM)
1821 		arg->ret = err;
1822 }
1823 
1824 static void
killpg1_sendsig(struct proc * p,bool notself,struct killpg1_ctx * arg)1825 killpg1_sendsig(struct proc *p, bool notself, struct killpg1_ctx *arg)
1826 {
1827 
1828 	if (p->p_pid <= 1 || (p->p_flag & P_SYSTEM) != 0 ||
1829 	    (notself && p == arg->td->td_proc) || p->p_state == PRS_NEW)
1830 		return;
1831 
1832 	PROC_LOCK(p);
1833 	killpg1_sendsig_locked(p, arg);
1834 	PROC_UNLOCK(p);
1835 }
1836 
1837 static void
kill_processes_prison_cb(struct proc * p,void * arg)1838 kill_processes_prison_cb(struct proc *p, void *arg)
1839 {
1840 	struct killpg1_ctx *ctx = arg;
1841 
1842 	if (p->p_pid <= 1 || (p->p_flag & P_SYSTEM) != 0 ||
1843 	    (p == ctx->td->td_proc) || p->p_state == PRS_NEW)
1844 		return;
1845 
1846 	killpg1_sendsig_locked(p, ctx);
1847 }
1848 
1849 /*
1850  * Common code for kill process group/broadcast kill.
1851  * td is the calling thread, as usual.
1852  */
1853 static int
killpg1(struct thread * td,int sig,int pgid,int all,ksiginfo_t * ksi)1854 killpg1(struct thread *td, int sig, int pgid, int all, ksiginfo_t *ksi)
1855 {
1856 	struct proc *p;
1857 	struct pgrp *pgrp;
1858 	struct killpg1_ctx arg;
1859 
1860 	arg.td = td;
1861 	arg.ksi = ksi;
1862 	arg.sig = sig;
1863 	arg.sent = false;
1864 	arg.found = false;
1865 	arg.ret = 0;
1866 	if (all) {
1867 		/*
1868 		 * broadcast
1869 		 */
1870 		prison_proc_iterate(td->td_ucred->cr_prison,
1871 		    kill_processes_prison_cb, &arg);
1872 	} else {
1873 again:
1874 		sx_slock(&proctree_lock);
1875 		if (pgid == 0) {
1876 			/*
1877 			 * zero pgid means send to my process group.
1878 			 */
1879 			pgrp = td->td_proc->p_pgrp;
1880 			PGRP_LOCK(pgrp);
1881 		} else {
1882 			pgrp = pgfind(pgid);
1883 			if (pgrp == NULL) {
1884 				sx_sunlock(&proctree_lock);
1885 				return (ESRCH);
1886 			}
1887 		}
1888 		sx_sunlock(&proctree_lock);
1889 		if (!sx_try_xlock(&pgrp->pg_killsx)) {
1890 			PGRP_UNLOCK(pgrp);
1891 			sx_xlock(&pgrp->pg_killsx);
1892 			sx_xunlock(&pgrp->pg_killsx);
1893 			goto again;
1894 		}
1895 		LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1896 			killpg1_sendsig(p, false, &arg);
1897 		}
1898 		PGRP_UNLOCK(pgrp);
1899 		sx_xunlock(&pgrp->pg_killsx);
1900 	}
1901 	MPASS(arg.ret != 0 || arg.found || !arg.sent);
1902 	if (arg.ret == 0 && !arg.sent)
1903 		arg.ret = arg.found ? EPERM : ESRCH;
1904 	return (arg.ret);
1905 }
1906 
1907 #ifndef _SYS_SYSPROTO_H_
1908 struct kill_args {
1909 	int	pid;
1910 	int	signum;
1911 };
1912 #endif
1913 /* ARGSUSED */
1914 int
sys_kill(struct thread * td,struct kill_args * uap)1915 sys_kill(struct thread *td, struct kill_args *uap)
1916 {
1917 
1918 	return (kern_kill(td, uap->pid, uap->signum));
1919 }
1920 
1921 int
kern_kill(struct thread * td,pid_t pid,int signum)1922 kern_kill(struct thread *td, pid_t pid, int signum)
1923 {
1924 	ksiginfo_t ksi;
1925 	struct proc *p;
1926 	int error;
1927 
1928 	/*
1929 	 * A process in capability mode can send signals only to himself.
1930 	 * The main rationale behind this is that abort(3) is implemented as
1931 	 * kill(getpid(), SIGABRT).
1932 	 */
1933 	if (pid != td->td_proc->p_pid) {
1934 		if (CAP_TRACING(td))
1935 			ktrcapfail(CAPFAIL_SIGNAL, &signum);
1936 		if (IN_CAPABILITY_MODE(td))
1937 			return (ECAPMODE);
1938 	}
1939 
1940 	AUDIT_ARG_SIGNUM(signum);
1941 	AUDIT_ARG_PID(pid);
1942 	if ((u_int)signum > _SIG_MAXSIG)
1943 		return (EINVAL);
1944 
1945 	ksiginfo_init(&ksi);
1946 	ksi.ksi_signo = signum;
1947 	ksi.ksi_code = SI_USER;
1948 	ksi.ksi_pid = td->td_proc->p_pid;
1949 	ksi.ksi_uid = td->td_ucred->cr_ruid;
1950 
1951 	if (pid > 0) {
1952 		/* kill single process */
1953 		if ((p = pfind_any(pid)) == NULL)
1954 			return (ESRCH);
1955 		AUDIT_ARG_PROCESS(p);
1956 		error = p_cansignal(td, p, signum);
1957 		if (error == 0 && signum)
1958 			pksignal(p, signum, &ksi);
1959 		PROC_UNLOCK(p);
1960 		return (error);
1961 	}
1962 	switch (pid) {
1963 	case -1:		/* broadcast signal */
1964 		return (killpg1(td, signum, 0, 1, &ksi));
1965 	case 0:			/* signal own process group */
1966 		return (killpg1(td, signum, 0, 0, &ksi));
1967 	default:		/* negative explicit process group */
1968 		return (killpg1(td, signum, -pid, 0, &ksi));
1969 	}
1970 	/* NOTREACHED */
1971 }
1972 
1973 int
sys_pdkill(struct thread * td,struct pdkill_args * uap)1974 sys_pdkill(struct thread *td, struct pdkill_args *uap)
1975 {
1976 	struct proc *p;
1977 	int error;
1978 
1979 	AUDIT_ARG_SIGNUM(uap->signum);
1980 	AUDIT_ARG_FD(uap->fd);
1981 	if ((u_int)uap->signum > _SIG_MAXSIG)
1982 		return (EINVAL);
1983 
1984 	error = procdesc_find(td, uap->fd, &cap_pdkill_rights, &p);
1985 	if (error)
1986 		return (error);
1987 	AUDIT_ARG_PROCESS(p);
1988 	error = p_cansignal(td, p, uap->signum);
1989 	if (error == 0 && uap->signum)
1990 		kern_psignal(p, uap->signum);
1991 	PROC_UNLOCK(p);
1992 	return (error);
1993 }
1994 
1995 #if defined(COMPAT_43)
1996 #ifndef _SYS_SYSPROTO_H_
1997 struct okillpg_args {
1998 	int	pgid;
1999 	int	signum;
2000 };
2001 #endif
2002 /* ARGSUSED */
2003 int
okillpg(struct thread * td,struct okillpg_args * uap)2004 okillpg(struct thread *td, struct okillpg_args *uap)
2005 {
2006 	ksiginfo_t ksi;
2007 
2008 	AUDIT_ARG_SIGNUM(uap->signum);
2009 	AUDIT_ARG_PID(uap->pgid);
2010 	if ((u_int)uap->signum > _SIG_MAXSIG)
2011 		return (EINVAL);
2012 
2013 	ksiginfo_init(&ksi);
2014 	ksi.ksi_signo = uap->signum;
2015 	ksi.ksi_code = SI_USER;
2016 	ksi.ksi_pid = td->td_proc->p_pid;
2017 	ksi.ksi_uid = td->td_ucred->cr_ruid;
2018 	return (killpg1(td, uap->signum, uap->pgid, 0, &ksi));
2019 }
2020 #endif /* COMPAT_43 */
2021 
2022 #ifndef _SYS_SYSPROTO_H_
2023 struct sigqueue_args {
2024 	pid_t pid;
2025 	int signum;
2026 	/* union sigval */ void *value;
2027 };
2028 #endif
2029 int
sys_sigqueue(struct thread * td,struct sigqueue_args * uap)2030 sys_sigqueue(struct thread *td, struct sigqueue_args *uap)
2031 {
2032 	union sigval sv;
2033 
2034 	sv.sival_ptr = uap->value;
2035 
2036 	return (kern_sigqueue(td, uap->pid, uap->signum, &sv));
2037 }
2038 
2039 int
kern_sigqueue(struct thread * td,pid_t pid,int signumf,union sigval * value)2040 kern_sigqueue(struct thread *td, pid_t pid, int signumf, union sigval *value)
2041 {
2042 	ksiginfo_t ksi;
2043 	struct proc *p;
2044 	struct thread *td2;
2045 	u_int signum;
2046 	int error;
2047 
2048 	signum = signumf & ~__SIGQUEUE_TID;
2049 	if (signum > _SIG_MAXSIG)
2050 		return (EINVAL);
2051 
2052 	/*
2053 	 * Specification says sigqueue can only send signal to
2054 	 * single process.
2055 	 */
2056 	if (pid <= 0)
2057 		return (EINVAL);
2058 
2059 	if ((signumf & __SIGQUEUE_TID) == 0) {
2060 		if ((p = pfind_any(pid)) == NULL)
2061 			return (ESRCH);
2062 		td2 = NULL;
2063 	} else {
2064 		p = td->td_proc;
2065 		td2 = tdfind((lwpid_t)pid, p->p_pid);
2066 		if (td2 == NULL)
2067 			return (ESRCH);
2068 	}
2069 
2070 	error = p_cansignal(td, p, signum);
2071 	if (error == 0 && signum != 0) {
2072 		ksiginfo_init(&ksi);
2073 		ksi.ksi_flags = KSI_SIGQ;
2074 		ksi.ksi_signo = signum;
2075 		ksi.ksi_code = SI_QUEUE;
2076 		ksi.ksi_pid = td->td_proc->p_pid;
2077 		ksi.ksi_uid = td->td_ucred->cr_ruid;
2078 		ksi.ksi_value = *value;
2079 		error = tdsendsignal(p, td2, ksi.ksi_signo, &ksi);
2080 	}
2081 	PROC_UNLOCK(p);
2082 	return (error);
2083 }
2084 
2085 /*
2086  * Send a signal to a process group.  If checktty is 1,
2087  * limit to members which have a controlling terminal.
2088  */
2089 void
pgsignal(struct pgrp * pgrp,int sig,int checkctty,ksiginfo_t * ksi)2090 pgsignal(struct pgrp *pgrp, int sig, int checkctty, ksiginfo_t *ksi)
2091 {
2092 	struct proc *p;
2093 
2094 	if (pgrp) {
2095 		PGRP_LOCK_ASSERT(pgrp, MA_OWNED);
2096 		LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
2097 			PROC_LOCK(p);
2098 			if (p->p_state == PRS_NORMAL &&
2099 			    (checkctty == 0 || p->p_flag & P_CONTROLT))
2100 				pksignal(p, sig, ksi);
2101 			PROC_UNLOCK(p);
2102 		}
2103 	}
2104 }
2105 
2106 /*
2107  * Recalculate the signal mask and reset the signal disposition after
2108  * usermode frame for delivery is formed.  Should be called after
2109  * mach-specific routine, because sysent->sv_sendsig() needs correct
2110  * ps_siginfo and signal mask.
2111  */
2112 static void
postsig_done(int sig,struct thread * td,struct sigacts * ps)2113 postsig_done(int sig, struct thread *td, struct sigacts *ps)
2114 {
2115 	sigset_t mask;
2116 
2117 	mtx_assert(&ps->ps_mtx, MA_OWNED);
2118 	td->td_ru.ru_nsignals++;
2119 	mask = ps->ps_catchmask[_SIG_IDX(sig)];
2120 	if (!SIGISMEMBER(ps->ps_signodefer, sig))
2121 		SIGADDSET(mask, sig);
2122 	kern_sigprocmask(td, SIG_BLOCK, &mask, NULL,
2123 	    SIGPROCMASK_PROC_LOCKED | SIGPROCMASK_PS_LOCKED);
2124 	if (SIGISMEMBER(ps->ps_sigreset, sig))
2125 		sigdflt(ps, sig);
2126 }
2127 
2128 /*
2129  * Send a signal caused by a trap to the current thread.  If it will be
2130  * caught immediately, deliver it with correct code.  Otherwise, post it
2131  * normally.
2132  */
2133 void
trapsignal(struct thread * td,ksiginfo_t * ksi)2134 trapsignal(struct thread *td, ksiginfo_t *ksi)
2135 {
2136 	struct sigacts *ps;
2137 	struct proc *p;
2138 	sigset_t sigmask;
2139 	int sig;
2140 
2141 	p = td->td_proc;
2142 	sig = ksi->ksi_signo;
2143 	KASSERT(_SIG_VALID(sig), ("invalid signal"));
2144 
2145 	sigfastblock_fetch(td);
2146 	PROC_LOCK(p);
2147 	ps = p->p_sigacts;
2148 	mtx_lock(&ps->ps_mtx);
2149 	sigmask = td->td_sigmask;
2150 	if (td->td_sigblock_val != 0)
2151 		SIGSETOR(sigmask, fastblock_mask);
2152 	if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) &&
2153 	    !SIGISMEMBER(sigmask, sig)) {
2154 #ifdef KTRACE
2155 		if (KTRPOINT(curthread, KTR_PSIG))
2156 			ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
2157 			    &td->td_sigmask, ksi->ksi_code);
2158 #endif
2159 		(*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)],
2160 		    ksi, &td->td_sigmask);
2161 		postsig_done(sig, td, ps);
2162 		mtx_unlock(&ps->ps_mtx);
2163 	} else {
2164 		/*
2165 		 * Avoid a possible infinite loop if the thread
2166 		 * masking the signal or process is ignoring the
2167 		 * signal.
2168 		 */
2169 		if (kern_forcesigexit && (SIGISMEMBER(sigmask, sig) ||
2170 		    ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN)) {
2171 			SIGDELSET(td->td_sigmask, sig);
2172 			SIGDELSET(ps->ps_sigcatch, sig);
2173 			SIGDELSET(ps->ps_sigignore, sig);
2174 			ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
2175 			td->td_pflags &= ~TDP_SIGFASTBLOCK;
2176 			td->td_sigblock_val = 0;
2177 		}
2178 		mtx_unlock(&ps->ps_mtx);
2179 		p->p_sig = sig;		/* XXX to verify code */
2180 		tdsendsignal(p, td, sig, ksi);
2181 	}
2182 	PROC_UNLOCK(p);
2183 }
2184 
2185 static struct thread *
sigtd(struct proc * p,int sig,bool fast_sigblock)2186 sigtd(struct proc *p, int sig, bool fast_sigblock)
2187 {
2188 	struct thread *td, *signal_td;
2189 
2190 	PROC_LOCK_ASSERT(p, MA_OWNED);
2191 	MPASS(!fast_sigblock || p == curproc);
2192 
2193 	/*
2194 	 * Check if current thread can handle the signal without
2195 	 * switching context to another thread.
2196 	 */
2197 	if (curproc == p && !SIGISMEMBER(curthread->td_sigmask, sig) &&
2198 	    (!fast_sigblock || curthread->td_sigblock_val == 0))
2199 		return (curthread);
2200 
2201 	/* Find a non-stopped thread that does not mask the signal. */
2202 	signal_td = NULL;
2203 	FOREACH_THREAD_IN_PROC(p, td) {
2204 		if (!SIGISMEMBER(td->td_sigmask, sig) && (!fast_sigblock ||
2205 		    td != curthread || td->td_sigblock_val == 0) &&
2206 		    (td->td_flags & TDF_BOUNDARY) == 0) {
2207 			signal_td = td;
2208 			break;
2209 		}
2210 	}
2211 	/* Select random (first) thread if no better match was found. */
2212 	if (signal_td == NULL)
2213 		signal_td = FIRST_THREAD_IN_PROC(p);
2214 	return (signal_td);
2215 }
2216 
2217 /*
2218  * Send the signal to the process.  If the signal has an action, the action
2219  * is usually performed by the target process rather than the caller; we add
2220  * the signal to the set of pending signals for the process.
2221  *
2222  * Exceptions:
2223  *   o When a stop signal is sent to a sleeping process that takes the
2224  *     default action, the process is stopped without awakening it.
2225  *   o SIGCONT restarts stopped processes (or puts them back to sleep)
2226  *     regardless of the signal action (eg, blocked or ignored).
2227  *
2228  * Other ignored signals are discarded immediately.
2229  *
2230  * NB: This function may be entered from the debugger via the "kill" DDB
2231  * command.  There is little that can be done to mitigate the possibly messy
2232  * side effects of this unwise possibility.
2233  */
2234 void
kern_psignal(struct proc * p,int sig)2235 kern_psignal(struct proc *p, int sig)
2236 {
2237 	ksiginfo_t ksi;
2238 
2239 	ksiginfo_init(&ksi);
2240 	ksi.ksi_signo = sig;
2241 	ksi.ksi_code = SI_KERNEL;
2242 	(void) tdsendsignal(p, NULL, sig, &ksi);
2243 }
2244 
2245 int
pksignal(struct proc * p,int sig,ksiginfo_t * ksi)2246 pksignal(struct proc *p, int sig, ksiginfo_t *ksi)
2247 {
2248 
2249 	return (tdsendsignal(p, NULL, sig, ksi));
2250 }
2251 
2252 /* Utility function for finding a thread to send signal event to. */
2253 int
sigev_findtd(struct proc * p,struct sigevent * sigev,struct thread ** ttd)2254 sigev_findtd(struct proc *p, struct sigevent *sigev, struct thread **ttd)
2255 {
2256 	struct thread *td;
2257 
2258 	if (sigev->sigev_notify == SIGEV_THREAD_ID) {
2259 		td = tdfind(sigev->sigev_notify_thread_id, p->p_pid);
2260 		if (td == NULL)
2261 			return (ESRCH);
2262 		*ttd = td;
2263 	} else {
2264 		*ttd = NULL;
2265 		PROC_LOCK(p);
2266 	}
2267 	return (0);
2268 }
2269 
2270 void
tdsignal(struct thread * td,int sig)2271 tdsignal(struct thread *td, int sig)
2272 {
2273 	ksiginfo_t ksi;
2274 
2275 	ksiginfo_init(&ksi);
2276 	ksi.ksi_signo = sig;
2277 	ksi.ksi_code = SI_KERNEL;
2278 	(void) tdsendsignal(td->td_proc, td, sig, &ksi);
2279 }
2280 
2281 void
tdksignal(struct thread * td,int sig,ksiginfo_t * ksi)2282 tdksignal(struct thread *td, int sig, ksiginfo_t *ksi)
2283 {
2284 
2285 	(void) tdsendsignal(td->td_proc, td, sig, ksi);
2286 }
2287 
2288 static void
sig_sleepq_abort(struct thread * td,int intrval)2289 sig_sleepq_abort(struct thread *td, int intrval)
2290 {
2291 	THREAD_LOCK_ASSERT(td, MA_OWNED);
2292 
2293 	if (intrval == 0 && (td->td_flags & TDF_SIGWAIT) == 0)
2294 		thread_unlock(td);
2295 	else
2296 		sleepq_abort(td, intrval);
2297 }
2298 
2299 int
tdsendsignal(struct proc * p,struct thread * td,int sig,ksiginfo_t * ksi)2300 tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
2301 {
2302 	sig_t action;
2303 	sigqueue_t *sigqueue;
2304 	struct sigacts *ps;
2305 	int intrval, prop, ret;
2306 
2307 	MPASS(td == NULL || p == td->td_proc);
2308 	PROC_LOCK_ASSERT(p, MA_OWNED);
2309 
2310 	if (!_SIG_VALID(sig))
2311 		panic("%s(): invalid signal %d", __func__, sig);
2312 
2313 	KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("%s: ksi on queue", __func__));
2314 
2315 	/*
2316 	 * IEEE Std 1003.1-2001: return success when killing a zombie.
2317 	 */
2318 	if (p->p_state == PRS_ZOMBIE) {
2319 		if (ksi != NULL && (ksi->ksi_flags & KSI_INS) != 0)
2320 			ksiginfo_tryfree(ksi);
2321 		return (0);
2322 	}
2323 
2324 	ps = p->p_sigacts;
2325 	KNOTE_LOCKED(p->p_klist, NOTE_SIGNAL | sig);
2326 	prop = sigprop(sig);
2327 
2328 	if (td == NULL) {
2329 		td = sigtd(p, sig, false);
2330 		sigqueue = &p->p_sigqueue;
2331 	} else
2332 		sigqueue = &td->td_sigqueue;
2333 
2334 	SDT_PROBE3(proc, , , signal__send, td, p, sig);
2335 
2336 	/*
2337 	 * If the signal is being ignored, then we forget about it
2338 	 * immediately, except when the target process executes
2339 	 * sigwait().  (Note: we don't set SIGCONT in ps_sigignore,
2340 	 * and if it is set to SIG_IGN, action will be SIG_DFL here.)
2341 	 */
2342 	mtx_lock(&ps->ps_mtx);
2343 	if (SIGISMEMBER(ps->ps_sigignore, sig)) {
2344 		if (kern_sig_discard_ign &&
2345 		    (p->p_sysent->sv_flags & SV_SIG_DISCIGN) == 0) {
2346 			SDT_PROBE3(proc, , , signal__discard, td, p, sig);
2347 
2348 			mtx_unlock(&ps->ps_mtx);
2349 			if (ksi != NULL && (ksi->ksi_flags & KSI_INS) != 0)
2350 				ksiginfo_tryfree(ksi);
2351 			return (0);
2352 		} else {
2353 			action = SIG_CATCH;
2354 			intrval = 0;
2355 		}
2356 	} else {
2357 		if (SIGISMEMBER(td->td_sigmask, sig))
2358 			action = SIG_HOLD;
2359 		else if (SIGISMEMBER(ps->ps_sigcatch, sig))
2360 			action = SIG_CATCH;
2361 		else
2362 			action = SIG_DFL;
2363 		if (SIGISMEMBER(ps->ps_sigintr, sig))
2364 			intrval = EINTR;
2365 		else
2366 			intrval = ERESTART;
2367 	}
2368 	mtx_unlock(&ps->ps_mtx);
2369 
2370 	if (prop & SIGPROP_CONT)
2371 		sigqueue_delete_stopmask_proc(p);
2372 	else if (prop & SIGPROP_STOP) {
2373 		if (pt_attach_transparent &&
2374 		    (p->p_flag & P_TRACED) != 0 &&
2375 		    (p->p_flag2 & P2_PTRACE_FSTP) != 0) {
2376 			td->td_dbgflags |= TDB_FSTP;
2377 			PROC_SLOCK(p);
2378 			sig_handle_first_stop(td, p, sig, true);
2379 			PROC_SUNLOCK(p);
2380 			return (0);
2381 		}
2382 
2383 		/*
2384 		 * If sending a tty stop signal to a member of an orphaned
2385 		 * process group, discard the signal here if the action
2386 		 * is default; don't stop the process below if sleeping,
2387 		 * and don't clear any pending SIGCONT.
2388 		 */
2389 		if ((prop & SIGPROP_TTYSTOP) != 0 &&
2390 		    (p->p_pgrp->pg_flags & PGRP_ORPHANED) != 0 &&
2391 		    action == SIG_DFL) {
2392 			if (ksi != NULL && (ksi->ksi_flags & KSI_INS) != 0)
2393 				ksiginfo_tryfree(ksi);
2394 			return (0);
2395 		}
2396 		sigqueue_delete_proc(p, SIGCONT);
2397 		if (p->p_flag & P_CONTINUED) {
2398 			p->p_flag &= ~P_CONTINUED;
2399 			PROC_LOCK(p->p_pptr);
2400 			sigqueue_take(p->p_ksi);
2401 			PROC_UNLOCK(p->p_pptr);
2402 		}
2403 	}
2404 
2405 	ret = sigqueue_add(sigqueue, sig, ksi);
2406 	if (ret != 0)
2407 		return (ret);
2408 	signotify(td);
2409 	/*
2410 	 * Defer further processing for signals which are held,
2411 	 * except that stopped processes must be continued by SIGCONT.
2412 	 */
2413 	if (action == SIG_HOLD &&
2414 	    !((prop & SIGPROP_CONT) && (p->p_flag & P_STOPPED_SIG)))
2415 		return (0);
2416 
2417 	/*
2418 	 * Some signals have a process-wide effect and a per-thread
2419 	 * component.  Most processing occurs when the process next
2420 	 * tries to cross the user boundary, however there are some
2421 	 * times when processing needs to be done immediately, such as
2422 	 * waking up threads so that they can cross the user boundary.
2423 	 * We try to do the per-process part here.
2424 	 */
2425 	if (P_SHOULDSTOP(p)) {
2426 		KASSERT(!(p->p_flag & P_WEXIT),
2427 		    ("signal to stopped but exiting process"));
2428 		if (sig == SIGKILL) {
2429 			/*
2430 			 * If traced process is already stopped,
2431 			 * then no further action is necessary.
2432 			 */
2433 			if (p->p_flag & P_TRACED)
2434 				return (0);
2435 			/*
2436 			 * SIGKILL sets process running.
2437 			 * It will die elsewhere.
2438 			 * All threads must be restarted.
2439 			 */
2440 			p->p_flag &= ~P_STOPPED_SIG;
2441 			goto runfast;
2442 		}
2443 
2444 		if (prop & SIGPROP_CONT) {
2445 			/*
2446 			 * If traced process is already stopped,
2447 			 * then no further action is necessary.
2448 			 */
2449 			if (p->p_flag & P_TRACED)
2450 				return (0);
2451 			/*
2452 			 * If SIGCONT is default (or ignored), we continue the
2453 			 * process but don't leave the signal in sigqueue as
2454 			 * it has no further action.  If SIGCONT is held, we
2455 			 * continue the process and leave the signal in
2456 			 * sigqueue.  If the process catches SIGCONT, let it
2457 			 * handle the signal itself.  If it isn't waiting on
2458 			 * an event, it goes back to run state.
2459 			 * Otherwise, process goes back to sleep state.
2460 			 */
2461 			p->p_flag &= ~P_STOPPED_SIG;
2462 			PROC_SLOCK(p);
2463 			if (p->p_numthreads == p->p_suspcount) {
2464 				PROC_SUNLOCK(p);
2465 				p->p_flag |= P_CONTINUED;
2466 				p->p_xsig = SIGCONT;
2467 				PROC_LOCK(p->p_pptr);
2468 				childproc_continued(p);
2469 				PROC_UNLOCK(p->p_pptr);
2470 				PROC_SLOCK(p);
2471 			}
2472 			if (action == SIG_DFL) {
2473 				thread_unsuspend(p);
2474 				PROC_SUNLOCK(p);
2475 				sigqueue_delete(sigqueue, sig);
2476 				goto out_cont;
2477 			}
2478 			if (action == SIG_CATCH) {
2479 				/*
2480 				 * The process wants to catch it so it needs
2481 				 * to run at least one thread, but which one?
2482 				 */
2483 				PROC_SUNLOCK(p);
2484 				goto runfast;
2485 			}
2486 			/*
2487 			 * The signal is not ignored or caught.
2488 			 */
2489 			thread_unsuspend(p);
2490 			PROC_SUNLOCK(p);
2491 			goto out_cont;
2492 		}
2493 
2494 		if (prop & SIGPROP_STOP) {
2495 			/*
2496 			 * If traced process is already stopped,
2497 			 * then no further action is necessary.
2498 			 */
2499 			if (p->p_flag & P_TRACED)
2500 				return (0);
2501 			/*
2502 			 * Already stopped, don't need to stop again
2503 			 * (If we did the shell could get confused).
2504 			 * Just make sure the signal STOP bit set.
2505 			 */
2506 			p->p_flag |= P_STOPPED_SIG;
2507 			sigqueue_delete(sigqueue, sig);
2508 			return (0);
2509 		}
2510 
2511 		/*
2512 		 * All other kinds of signals:
2513 		 * If a thread is sleeping interruptibly, simulate a
2514 		 * wakeup so that when it is continued it will be made
2515 		 * runnable and can look at the signal.  However, don't make
2516 		 * the PROCESS runnable, leave it stopped.
2517 		 * It may run a bit until it hits a thread_suspend_check().
2518 		 */
2519 		PROC_SLOCK(p);
2520 		thread_lock(td);
2521 		if (TD_CAN_ABORT(td))
2522 			sig_sleepq_abort(td, intrval);
2523 		else
2524 			thread_unlock(td);
2525 		PROC_SUNLOCK(p);
2526 		return (0);
2527 		/*
2528 		 * Mutexes are short lived. Threads waiting on them will
2529 		 * hit thread_suspend_check() soon.
2530 		 */
2531 	} else if (p->p_state == PRS_NORMAL) {
2532 		if (p->p_flag & P_TRACED || action == SIG_CATCH) {
2533 			tdsigwakeup(td, sig, action, intrval);
2534 			return (0);
2535 		}
2536 
2537 		MPASS(action == SIG_DFL);
2538 
2539 		if (prop & SIGPROP_STOP) {
2540 			if (p->p_flag & (P_PPWAIT|P_WEXIT))
2541 				return (0);
2542 			p->p_flag |= P_STOPPED_SIG;
2543 			p->p_xsig = sig;
2544 			PROC_SLOCK(p);
2545 			sig_suspend_threads(td, p);
2546 			if (p->p_numthreads == p->p_suspcount) {
2547 				/*
2548 				 * only thread sending signal to another
2549 				 * process can reach here, if thread is sending
2550 				 * signal to its process, because thread does
2551 				 * not suspend itself here, p_numthreads
2552 				 * should never be equal to p_suspcount.
2553 				 */
2554 				thread_stopped(p);
2555 				PROC_SUNLOCK(p);
2556 				sigqueue_delete_proc(p, p->p_xsig);
2557 			} else
2558 				PROC_SUNLOCK(p);
2559 			return (0);
2560 		}
2561 	} else {
2562 		/* Not in "NORMAL" state. discard the signal. */
2563 		sigqueue_delete(sigqueue, sig);
2564 		return (0);
2565 	}
2566 
2567 	/*
2568 	 * The process is not stopped so we need to apply the signal to all the
2569 	 * running threads.
2570 	 */
2571 runfast:
2572 	tdsigwakeup(td, sig, action, intrval);
2573 	PROC_SLOCK(p);
2574 	thread_unsuspend(p);
2575 	PROC_SUNLOCK(p);
2576 out_cont:
2577 	itimer_proc_continue(p);
2578 	kqtimer_proc_continue(p);
2579 
2580 	return (0);
2581 }
2582 
2583 /*
2584  * The force of a signal has been directed against a single
2585  * thread.  We need to see what we can do about knocking it
2586  * out of any sleep it may be in etc.
2587  */
2588 static void
tdsigwakeup(struct thread * td,int sig,sig_t action,int intrval)2589 tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
2590 {
2591 	struct proc *p = td->td_proc;
2592 	int prop;
2593 
2594 	PROC_LOCK_ASSERT(p, MA_OWNED);
2595 	prop = sigprop(sig);
2596 
2597 	PROC_SLOCK(p);
2598 	thread_lock(td);
2599 	/*
2600 	 * Bring the priority of a thread up if we want it to get
2601 	 * killed in this lifetime.  Be careful to avoid bumping the
2602 	 * priority of the idle thread, since we still allow to signal
2603 	 * kernel processes.
2604 	 */
2605 	if (action == SIG_DFL && (prop & SIGPROP_KILL) != 0 &&
2606 	    td->td_priority > PUSER && !TD_IS_IDLETHREAD(td))
2607 		sched_prio(td, PUSER);
2608 	if (TD_ON_SLEEPQ(td)) {
2609 		/*
2610 		 * If thread is sleeping uninterruptibly
2611 		 * we can't interrupt the sleep... the signal will
2612 		 * be noticed when the process returns through
2613 		 * trap() or syscall().
2614 		 */
2615 		if ((td->td_flags & TDF_SINTR) == 0)
2616 			goto out;
2617 		/*
2618 		 * If SIGCONT is default (or ignored) and process is
2619 		 * asleep, we are finished; the process should not
2620 		 * be awakened.
2621 		 */
2622 		if ((prop & SIGPROP_CONT) && action == SIG_DFL) {
2623 			thread_unlock(td);
2624 			PROC_SUNLOCK(p);
2625 			sigqueue_delete(&p->p_sigqueue, sig);
2626 			/*
2627 			 * It may be on either list in this state.
2628 			 * Remove from both for now.
2629 			 */
2630 			sigqueue_delete(&td->td_sigqueue, sig);
2631 			return;
2632 		}
2633 
2634 		/*
2635 		 * Don't awaken a sleeping thread for SIGSTOP if the
2636 		 * STOP signal is deferred.
2637 		 */
2638 		if ((prop & SIGPROP_STOP) != 0 && (td->td_flags & (TDF_SBDRY |
2639 		    TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY)
2640 			goto out;
2641 
2642 		/*
2643 		 * Give low priority threads a better chance to run.
2644 		 */
2645 		if (td->td_priority > PUSER && !TD_IS_IDLETHREAD(td))
2646 			sched_prio(td, PUSER);
2647 
2648 		sig_sleepq_abort(td, intrval);
2649 		PROC_SUNLOCK(p);
2650 		return;
2651 	}
2652 
2653 	/*
2654 	 * Other states do nothing with the signal immediately,
2655 	 * other than kicking ourselves if we are running.
2656 	 * It will either never be noticed, or noticed very soon.
2657 	 */
2658 #ifdef SMP
2659 	if (TD_IS_RUNNING(td) && td != curthread)
2660 		forward_signal(td);
2661 #endif
2662 
2663 out:
2664 	PROC_SUNLOCK(p);
2665 	thread_unlock(td);
2666 }
2667 
2668 static void
ptrace_coredumpreq(struct thread * td,struct proc * p,struct thr_coredump_req * tcq)2669 ptrace_coredumpreq(struct thread *td, struct proc *p,
2670     struct thr_coredump_req *tcq)
2671 {
2672 	void *rl_cookie;
2673 
2674 	if (p->p_sysent->sv_coredump == NULL) {
2675 		tcq->tc_error = ENOSYS;
2676 		return;
2677 	}
2678 
2679 	rl_cookie = vn_rangelock_wlock(tcq->tc_vp, 0, OFF_MAX);
2680 	tcq->tc_error = p->p_sysent->sv_coredump(td, tcq->tc_vp,
2681 	    tcq->tc_limit, tcq->tc_flags);
2682 	vn_rangelock_unlock(tcq->tc_vp, rl_cookie);
2683 }
2684 
2685 static void
ptrace_syscallreq(struct thread * td,struct proc * p,struct thr_syscall_req * tsr)2686 ptrace_syscallreq(struct thread *td, struct proc *p,
2687     struct thr_syscall_req *tsr)
2688 {
2689 	struct sysentvec *sv;
2690 	struct sysent *se;
2691 	register_t rv_saved[2];
2692 	int error, nerror;
2693 	int sc;
2694 	bool audited, sy_thr_static;
2695 
2696 	sv = p->p_sysent;
2697 	if (sv->sv_table == NULL || sv->sv_size < tsr->ts_sa.code) {
2698 		tsr->ts_ret.sr_error = ENOSYS;
2699 		return;
2700 	}
2701 
2702 	sc = tsr->ts_sa.code;
2703 	if (sc == SYS_syscall || sc == SYS___syscall) {
2704 		sc = tsr->ts_sa.args[0];
2705 		memmove(&tsr->ts_sa.args[0], &tsr->ts_sa.args[1],
2706 		    sizeof(register_t) * (tsr->ts_nargs - 1));
2707 	}
2708 
2709 	tsr->ts_sa.callp = se = &sv->sv_table[sc];
2710 
2711 	VM_CNT_INC(v_syscall);
2712 	td->td_pticks = 0;
2713 	if (__predict_false(td->td_cowgen != atomic_load_int(
2714 	    &td->td_proc->p_cowgen)))
2715 		thread_cow_update(td);
2716 
2717 	td->td_sa = tsr->ts_sa;
2718 
2719 #ifdef CAPABILITY_MODE
2720 	if ((se->sy_flags & SYF_CAPENABLED) == 0) {
2721 		if (CAP_TRACING(td))
2722 			ktrcapfail(CAPFAIL_SYSCALL, NULL);
2723 		if (IN_CAPABILITY_MODE(td)) {
2724 			tsr->ts_ret.sr_error = ECAPMODE;
2725 			return;
2726 		}
2727 	}
2728 #endif
2729 
2730 	sy_thr_static = (se->sy_thrcnt & SY_THR_STATIC) != 0;
2731 	audited = AUDIT_SYSCALL_ENTER(sc, td) != 0;
2732 
2733 	if (!sy_thr_static) {
2734 		error = syscall_thread_enter(td, &se);
2735 		sy_thr_static = (se->sy_thrcnt & SY_THR_STATIC) != 0;
2736 		if (error != 0) {
2737 			tsr->ts_ret.sr_error = error;
2738 			return;
2739 		}
2740 	}
2741 
2742 	rv_saved[0] = td->td_retval[0];
2743 	rv_saved[1] = td->td_retval[1];
2744 	nerror = td->td_errno;
2745 	td->td_retval[0] = 0;
2746 	td->td_retval[1] = 0;
2747 
2748 #ifdef KDTRACE_HOOKS
2749 	if (se->sy_entry != 0)
2750 		(*systrace_probe_func)(&tsr->ts_sa, SYSTRACE_ENTRY, 0);
2751 #endif
2752 	tsr->ts_ret.sr_error = se->sy_call(td, tsr->ts_sa.args);
2753 #ifdef KDTRACE_HOOKS
2754 	if (se->sy_return != 0)
2755 		(*systrace_probe_func)(&tsr->ts_sa, SYSTRACE_RETURN,
2756 		    tsr->ts_ret.sr_error != 0 ? -1 : td->td_retval[0]);
2757 #endif
2758 
2759 	tsr->ts_ret.sr_retval[0] = td->td_retval[0];
2760 	tsr->ts_ret.sr_retval[1] = td->td_retval[1];
2761 	td->td_retval[0] = rv_saved[0];
2762 	td->td_retval[1] = rv_saved[1];
2763 	td->td_errno = nerror;
2764 
2765 	if (audited)
2766 		AUDIT_SYSCALL_EXIT(error, td);
2767 	if (!sy_thr_static)
2768 		syscall_thread_exit(td, se);
2769 }
2770 
2771 static void
ptrace_remotereq(struct thread * td,int flag)2772 ptrace_remotereq(struct thread *td, int flag)
2773 {
2774 	struct proc *p;
2775 
2776 	MPASS(td == curthread);
2777 	p = td->td_proc;
2778 	PROC_LOCK_ASSERT(p, MA_OWNED);
2779 	if ((td->td_dbgflags & flag) == 0)
2780 		return;
2781 	KASSERT((p->p_flag & P_STOPPED_TRACE) != 0, ("not stopped"));
2782 	KASSERT(td->td_remotereq != NULL, ("td_remotereq is NULL"));
2783 
2784 	PROC_UNLOCK(p);
2785 	switch (flag) {
2786 	case TDB_COREDUMPREQ:
2787 		ptrace_coredumpreq(td, p, td->td_remotereq);
2788 		break;
2789 	case TDB_SCREMOTEREQ:
2790 		ptrace_syscallreq(td, p, td->td_remotereq);
2791 		break;
2792 	default:
2793 		__unreachable();
2794 	}
2795 	PROC_LOCK(p);
2796 
2797 	MPASS((td->td_dbgflags & flag) != 0);
2798 	td->td_dbgflags &= ~flag;
2799 	td->td_remotereq = NULL;
2800 	wakeup(p);
2801 }
2802 
2803 static void
sig_suspend_threads(struct thread * td,struct proc * p)2804 sig_suspend_threads(struct thread *td, struct proc *p)
2805 {
2806 	struct thread *td2;
2807 
2808 	PROC_LOCK_ASSERT(p, MA_OWNED);
2809 	PROC_SLOCK_ASSERT(p, MA_OWNED);
2810 
2811 	FOREACH_THREAD_IN_PROC(p, td2) {
2812 		thread_lock(td2);
2813 		ast_sched_locked(td2, TDA_SUSPEND);
2814 		if (TD_IS_SLEEPING(td2) && (td2->td_flags & TDF_SINTR) != 0) {
2815 			if (td2->td_flags & TDF_SBDRY) {
2816 				/*
2817 				 * Once a thread is asleep with
2818 				 * TDF_SBDRY and without TDF_SERESTART
2819 				 * or TDF_SEINTR set, it should never
2820 				 * become suspended due to this check.
2821 				 */
2822 				KASSERT(!TD_IS_SUSPENDED(td2),
2823 				    ("thread with deferred stops suspended"));
2824 				if (TD_SBDRY_INTR(td2)) {
2825 					sleepq_abort(td2, TD_SBDRY_ERRNO(td2));
2826 					continue;
2827 				}
2828 			} else if (!TD_IS_SUSPENDED(td2))
2829 				thread_suspend_one(td2);
2830 		} else if (!TD_IS_SUSPENDED(td2)) {
2831 #ifdef SMP
2832 			if (TD_IS_RUNNING(td2) && td2 != td)
2833 				forward_signal(td2);
2834 #endif
2835 		}
2836 		thread_unlock(td2);
2837 	}
2838 }
2839 
2840 static void
sig_handle_first_stop(struct thread * td,struct proc * p,int sig,bool ext)2841 sig_handle_first_stop(struct thread *td, struct proc *p, int sig, bool ext)
2842 {
2843 	if ((td->td_dbgflags & TDB_FSTP) == 0 &&
2844 	    ((p->p_flag2 & P2_PTRACE_FSTP) != 0 ||
2845 	    p->p_xthread != NULL))
2846 		return;
2847 
2848 	p->p_xsig = sig;
2849 	p->p_xthread = td;
2850 
2851 	/*
2852 	 * If we are on sleepqueue already, let sleepqueue
2853 	 * code decide if it needs to go sleep after attach.
2854 	 */
2855 	if (ext || td->td_wchan == NULL)
2856 		td->td_dbgflags &= ~TDB_FSTP;
2857 
2858 	p->p_flag2 &= ~P2_PTRACE_FSTP;
2859 	p->p_flag |= P_STOPPED_SIG | P_STOPPED_TRACE;
2860 	sig_suspend_threads(td, p);
2861 }
2862 
2863 /*
2864  * Stop the process for an event deemed interesting to the debugger. If si is
2865  * non-NULL, this is a signal exchange; the new signal requested by the
2866  * debugger will be returned for handling. If si is NULL, this is some other
2867  * type of interesting event. The debugger may request a signal be delivered in
2868  * that case as well, however it will be deferred until it can be handled.
2869  */
2870 int
ptracestop(struct thread * td,int sig,ksiginfo_t * si)2871 ptracestop(struct thread *td, int sig, ksiginfo_t *si)
2872 {
2873 	struct proc *p = td->td_proc;
2874 	struct thread *td2;
2875 	ksiginfo_t ksi;
2876 
2877 	PROC_LOCK_ASSERT(p, MA_OWNED);
2878 	KASSERT(!(p->p_flag & P_WEXIT), ("Stopping exiting process"));
2879 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
2880 	    &p->p_mtx.lock_object, "Stopping for traced signal");
2881 
2882 	td->td_xsig = sig;
2883 
2884 	if (si == NULL || (si->ksi_flags & KSI_PTRACE) == 0) {
2885 		td->td_dbgflags |= TDB_XSIG;
2886 		CTR4(KTR_PTRACE, "ptracestop: tid %d (pid %d) flags %#x sig %d",
2887 		    td->td_tid, p->p_pid, td->td_dbgflags, sig);
2888 		PROC_SLOCK(p);
2889 		while ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_XSIG)) {
2890 			if (P_KILLED(p)) {
2891 				/*
2892 				 * Ensure that, if we've been PT_KILLed, the
2893 				 * exit status reflects that. Another thread
2894 				 * may also be in ptracestop(), having just
2895 				 * received the SIGKILL, but this thread was
2896 				 * unsuspended first.
2897 				 */
2898 				td->td_dbgflags &= ~TDB_XSIG;
2899 				td->td_xsig = SIGKILL;
2900 				p->p_ptevents = 0;
2901 				break;
2902 			}
2903 			if (p->p_flag & P_SINGLE_EXIT &&
2904 			    !(td->td_dbgflags & TDB_EXIT)) {
2905 				/*
2906 				 * Ignore ptrace stops except for thread exit
2907 				 * events when the process exits.
2908 				 */
2909 				td->td_dbgflags &= ~TDB_XSIG;
2910 				PROC_SUNLOCK(p);
2911 				return (0);
2912 			}
2913 
2914 			/*
2915 			 * Make wait(2) work.  Ensure that right after the
2916 			 * attach, the thread which was decided to become the
2917 			 * leader of attach gets reported to the waiter.
2918 			 * Otherwise, just avoid overwriting another thread's
2919 			 * assignment to p_xthread.  If another thread has
2920 			 * already set p_xthread, the current thread will get
2921 			 * a chance to report itself upon the next iteration.
2922 			 */
2923 			sig_handle_first_stop(td, p, sig, false);
2924 
2925 			if ((td->td_dbgflags & TDB_STOPATFORK) != 0) {
2926 				td->td_dbgflags &= ~TDB_STOPATFORK;
2927 			}
2928 stopme:
2929 			td->td_dbgflags |= TDB_SSWITCH;
2930 			thread_suspend_switch(td, p);
2931 			td->td_dbgflags &= ~TDB_SSWITCH;
2932 			if ((td->td_dbgflags & (TDB_COREDUMPREQ |
2933 			    TDB_SCREMOTEREQ)) != 0) {
2934 				MPASS((td->td_dbgflags & (TDB_COREDUMPREQ |
2935 				    TDB_SCREMOTEREQ)) !=
2936 				    (TDB_COREDUMPREQ | TDB_SCREMOTEREQ));
2937 				PROC_SUNLOCK(p);
2938 				ptrace_remotereq(td, td->td_dbgflags &
2939 				    (TDB_COREDUMPREQ | TDB_SCREMOTEREQ));
2940 				PROC_SLOCK(p);
2941 				goto stopme;
2942 			}
2943 			if (p->p_xthread == td)
2944 				p->p_xthread = NULL;
2945 			if (!(p->p_flag & P_TRACED))
2946 				break;
2947 			if (td->td_dbgflags & TDB_SUSPEND) {
2948 				if (p->p_flag & P_SINGLE_EXIT)
2949 					break;
2950 				goto stopme;
2951 			}
2952 		}
2953 		PROC_SUNLOCK(p);
2954 	}
2955 
2956 	if (si != NULL && sig == td->td_xsig) {
2957 		/* Parent wants us to take the original signal unchanged. */
2958 		si->ksi_flags |= KSI_HEAD;
2959 		if (sigqueue_add(&td->td_sigqueue, sig, si) != 0)
2960 			si->ksi_signo = 0;
2961 	} else if (td->td_xsig != 0) {
2962 		/*
2963 		 * If parent wants us to take a new signal, then it will leave
2964 		 * it in td->td_xsig; otherwise we just look for signals again.
2965 		 */
2966 		ksiginfo_init(&ksi);
2967 		ksi.ksi_signo = td->td_xsig;
2968 		ksi.ksi_flags |= KSI_PTRACE;
2969 		td2 = sigtd(p, td->td_xsig, false);
2970 		tdsendsignal(p, td2, td->td_xsig, &ksi);
2971 		if (td != td2)
2972 			return (0);
2973 	}
2974 
2975 	return (td->td_xsig);
2976 }
2977 
2978 static void
reschedule_signals(struct proc * p,sigset_t block,int flags)2979 reschedule_signals(struct proc *p, sigset_t block, int flags)
2980 {
2981 	struct sigacts *ps;
2982 	struct thread *td;
2983 	int sig;
2984 	bool fastblk, pslocked;
2985 
2986 	PROC_LOCK_ASSERT(p, MA_OWNED);
2987 	ps = p->p_sigacts;
2988 	pslocked = (flags & SIGPROCMASK_PS_LOCKED) != 0;
2989 	mtx_assert(&ps->ps_mtx, pslocked ? MA_OWNED : MA_NOTOWNED);
2990 	if (SIGISEMPTY(p->p_siglist))
2991 		return;
2992 	SIGSETAND(block, p->p_siglist);
2993 	fastblk = (flags & SIGPROCMASK_FASTBLK) != 0;
2994 	SIG_FOREACH(sig, &block) {
2995 		td = sigtd(p, sig, fastblk);
2996 
2997 		/*
2998 		 * If sigtd() selected us despite sigfastblock is
2999 		 * blocking, do not activate AST or wake us, to avoid
3000 		 * loop in AST handler.
3001 		 */
3002 		if (fastblk && td == curthread)
3003 			continue;
3004 
3005 		signotify(td);
3006 		if (!pslocked)
3007 			mtx_lock(&ps->ps_mtx);
3008 		if (p->p_flag & P_TRACED ||
3009 		    (SIGISMEMBER(ps->ps_sigcatch, sig) &&
3010 		    !SIGISMEMBER(td->td_sigmask, sig))) {
3011 			tdsigwakeup(td, sig, SIG_CATCH,
3012 			    (SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR :
3013 			    ERESTART));
3014 		}
3015 		if (!pslocked)
3016 			mtx_unlock(&ps->ps_mtx);
3017 	}
3018 }
3019 
3020 void
tdsigcleanup(struct thread * td)3021 tdsigcleanup(struct thread *td)
3022 {
3023 	struct proc *p;
3024 	sigset_t unblocked;
3025 
3026 	p = td->td_proc;
3027 	PROC_LOCK_ASSERT(p, MA_OWNED);
3028 
3029 	sigqueue_flush(&td->td_sigqueue);
3030 	if (p->p_numthreads == 1)
3031 		return;
3032 
3033 	/*
3034 	 * Since we cannot handle signals, notify signal post code
3035 	 * about this by filling the sigmask.
3036 	 *
3037 	 * Also, if needed, wake up thread(s) that do not block the
3038 	 * same signals as the exiting thread, since the thread might
3039 	 * have been selected for delivery and woken up.
3040 	 */
3041 	SIGFILLSET(unblocked);
3042 	SIGSETNAND(unblocked, td->td_sigmask);
3043 	SIGFILLSET(td->td_sigmask);
3044 	reschedule_signals(p, unblocked, 0);
3045 
3046 }
3047 
3048 static int
sigdeferstop_curr_flags(int cflags)3049 sigdeferstop_curr_flags(int cflags)
3050 {
3051 
3052 	MPASS((cflags & (TDF_SEINTR | TDF_SERESTART)) == 0 ||
3053 	    (cflags & TDF_SBDRY) != 0);
3054 	return (cflags & (TDF_SBDRY | TDF_SEINTR | TDF_SERESTART));
3055 }
3056 
3057 /*
3058  * Defer the delivery of SIGSTOP for the current thread, according to
3059  * the requested mode.  Returns previous flags, which must be restored
3060  * by sigallowstop().
3061  *
3062  * TDF_SBDRY, TDF_SEINTR, and TDF_SERESTART flags are only set and
3063  * cleared by the current thread, which allow the lock-less read-only
3064  * accesses below.
3065  */
3066 int
sigdeferstop_impl(int mode)3067 sigdeferstop_impl(int mode)
3068 {
3069 	struct thread *td;
3070 	int cflags, nflags;
3071 
3072 	td = curthread;
3073 	cflags = sigdeferstop_curr_flags(td->td_flags);
3074 	switch (mode) {
3075 	case SIGDEFERSTOP_NOP:
3076 		nflags = cflags;
3077 		break;
3078 	case SIGDEFERSTOP_OFF:
3079 		nflags = 0;
3080 		break;
3081 	case SIGDEFERSTOP_SILENT:
3082 		nflags = (cflags | TDF_SBDRY) & ~(TDF_SEINTR | TDF_SERESTART);
3083 		break;
3084 	case SIGDEFERSTOP_EINTR:
3085 		nflags = (cflags | TDF_SBDRY | TDF_SEINTR) & ~TDF_SERESTART;
3086 		break;
3087 	case SIGDEFERSTOP_ERESTART:
3088 		nflags = (cflags | TDF_SBDRY | TDF_SERESTART) & ~TDF_SEINTR;
3089 		break;
3090 	default:
3091 		panic("sigdeferstop: invalid mode %x", mode);
3092 		break;
3093 	}
3094 	if (cflags == nflags)
3095 		return (SIGDEFERSTOP_VAL_NCHG);
3096 	thread_lock(td);
3097 	td->td_flags = (td->td_flags & ~cflags) | nflags;
3098 	thread_unlock(td);
3099 	return (cflags);
3100 }
3101 
3102 /*
3103  * Restores the STOP handling mode, typically permitting the delivery
3104  * of SIGSTOP for the current thread.  This does not immediately
3105  * suspend if a stop was posted.  Instead, the thread will suspend
3106  * either via ast() or a subsequent interruptible sleep.
3107  */
3108 void
sigallowstop_impl(int prev)3109 sigallowstop_impl(int prev)
3110 {
3111 	struct thread *td;
3112 	int cflags;
3113 
3114 	KASSERT(prev != SIGDEFERSTOP_VAL_NCHG, ("failed sigallowstop"));
3115 	KASSERT((prev & ~(TDF_SBDRY | TDF_SEINTR | TDF_SERESTART)) == 0,
3116 	    ("sigallowstop: incorrect previous mode %x", prev));
3117 	td = curthread;
3118 	cflags = sigdeferstop_curr_flags(td->td_flags);
3119 	if (cflags != prev) {
3120 		thread_lock(td);
3121 		td->td_flags = (td->td_flags & ~cflags) | prev;
3122 		thread_unlock(td);
3123 	}
3124 }
3125 
3126 enum sigstatus {
3127 	SIGSTATUS_HANDLE,
3128 	SIGSTATUS_HANDLED,
3129 	SIGSTATUS_IGNORE,
3130 	SIGSTATUS_SBDRY_STOP,
3131 };
3132 
3133 /*
3134  * The thread has signal "sig" pending.  Figure out what to do with it:
3135  *
3136  * _HANDLE     -> the caller should handle the signal
3137  * _HANDLED    -> handled internally, reload pending signal set
3138  * _IGNORE     -> ignored, remove from the set of pending signals and try the
3139  *                next pending signal
3140  * _SBDRY_STOP -> the signal should stop the thread but this is not
3141  *                permitted in the current context
3142  */
3143 static enum sigstatus
sigprocess(struct thread * td,int sig)3144 sigprocess(struct thread *td, int sig)
3145 {
3146 	struct proc *p;
3147 	struct sigacts *ps;
3148 	struct sigqueue *queue;
3149 	ksiginfo_t ksi;
3150 	int prop;
3151 
3152 	KASSERT(_SIG_VALID(sig), ("%s: invalid signal %d", __func__, sig));
3153 
3154 	p = td->td_proc;
3155 	ps = p->p_sigacts;
3156 	mtx_assert(&ps->ps_mtx, MA_OWNED);
3157 	PROC_LOCK_ASSERT(p, MA_OWNED);
3158 
3159 	/*
3160 	 * We should allow pending but ignored signals below
3161 	 * if there is sigwait() active, or P_TRACED was
3162 	 * on when they were posted.
3163 	 */
3164 	if (SIGISMEMBER(ps->ps_sigignore, sig) &&
3165 	    (p->p_flag & P_TRACED) == 0 &&
3166 	    (td->td_flags & TDF_SIGWAIT) == 0) {
3167 		return (SIGSTATUS_IGNORE);
3168 	}
3169 
3170 	/*
3171 	 * If the process is going to single-thread mode to prepare
3172 	 * for exit, there is no sense in delivering any signal
3173 	 * to usermode.  Another important consequence is that
3174 	 * msleep(..., PCATCH, ...) now is only interruptible by a
3175 	 * suspend request.
3176 	 */
3177 	if ((p->p_flag2 & P2_WEXIT) != 0)
3178 		return (SIGSTATUS_IGNORE);
3179 
3180 	if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED) {
3181 		/*
3182 		 * If traced, always stop.
3183 		 * Remove old signal from queue before the stop.
3184 		 * XXX shrug off debugger, it causes siginfo to
3185 		 * be thrown away.
3186 		 */
3187 		queue = &td->td_sigqueue;
3188 		ksiginfo_init(&ksi);
3189 		if (sigqueue_get(queue, sig, &ksi) == 0) {
3190 			queue = &p->p_sigqueue;
3191 			sigqueue_get(queue, sig, &ksi);
3192 		}
3193 		td->td_si = ksi.ksi_info;
3194 
3195 		mtx_unlock(&ps->ps_mtx);
3196 		sig = ptracestop(td, sig, &ksi);
3197 		mtx_lock(&ps->ps_mtx);
3198 
3199 		td->td_si.si_signo = 0;
3200 
3201 		/*
3202 		 * Keep looking if the debugger discarded or
3203 		 * replaced the signal.
3204 		 */
3205 		if (sig == 0)
3206 			return (SIGSTATUS_HANDLED);
3207 
3208 		/*
3209 		 * If the signal became masked, re-queue it.
3210 		 */
3211 		if (SIGISMEMBER(td->td_sigmask, sig)) {
3212 			ksi.ksi_flags |= KSI_HEAD;
3213 			sigqueue_add(&p->p_sigqueue, sig, &ksi);
3214 			return (SIGSTATUS_HANDLED);
3215 		}
3216 
3217 		/*
3218 		 * If the traced bit got turned off, requeue the signal and
3219 		 * reload the set of pending signals.  This ensures that p_sig*
3220 		 * and p_sigact are consistent.
3221 		 */
3222 		if ((p->p_flag & P_TRACED) == 0) {
3223 			if ((ksi.ksi_flags & KSI_PTRACE) == 0) {
3224 				ksi.ksi_flags |= KSI_HEAD;
3225 				sigqueue_add(queue, sig, &ksi);
3226 			}
3227 			return (SIGSTATUS_HANDLED);
3228 		}
3229 	}
3230 
3231 	/*
3232 	 * Decide whether the signal should be returned.
3233 	 * Return the signal's number, or fall through
3234 	 * to clear it from the pending mask.
3235 	 */
3236 	switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) {
3237 	case (intptr_t)SIG_DFL:
3238 		/*
3239 		 * Don't take default actions on system processes.
3240 		 */
3241 		if (p->p_pid <= 1) {
3242 #ifdef DIAGNOSTIC
3243 			/*
3244 			 * Are you sure you want to ignore SIGSEGV
3245 			 * in init? XXX
3246 			 */
3247 			printf("Process (pid %lu) got signal %d\n",
3248 				(u_long)p->p_pid, sig);
3249 #endif
3250 			return (SIGSTATUS_IGNORE);
3251 		}
3252 
3253 		/*
3254 		 * If there is a pending stop signal to process with
3255 		 * default action, stop here, then clear the signal.
3256 		 * Traced or exiting processes should ignore stops.
3257 		 * Additionally, a member of an orphaned process group
3258 		 * should ignore tty stops.
3259 		 */
3260 		prop = sigprop(sig);
3261 		if (prop & SIGPROP_STOP) {
3262 			mtx_unlock(&ps->ps_mtx);
3263 			if ((p->p_flag & (P_TRACED | P_WEXIT |
3264 			    P_SINGLE_EXIT)) != 0 || ((p->p_pgrp->
3265 			    pg_flags & PGRP_ORPHANED) != 0 &&
3266 			    (prop & SIGPROP_TTYSTOP) != 0)) {
3267 				mtx_lock(&ps->ps_mtx);
3268 				return (SIGSTATUS_IGNORE);
3269 			}
3270 			if (TD_SBDRY_INTR(td)) {
3271 				KASSERT((td->td_flags & TDF_SBDRY) != 0,
3272 				    ("lost TDF_SBDRY"));
3273 				mtx_lock(&ps->ps_mtx);
3274 				return (SIGSTATUS_SBDRY_STOP);
3275 			}
3276 			WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
3277 			    &p->p_mtx.lock_object, "Catching SIGSTOP");
3278 			sigqueue_delete(&td->td_sigqueue, sig);
3279 			sigqueue_delete(&p->p_sigqueue, sig);
3280 			p->p_flag |= P_STOPPED_SIG;
3281 			p->p_xsig = sig;
3282 			PROC_SLOCK(p);
3283 			sig_suspend_threads(td, p);
3284 			thread_suspend_switch(td, p);
3285 			PROC_SUNLOCK(p);
3286 			mtx_lock(&ps->ps_mtx);
3287 			return (SIGSTATUS_HANDLED);
3288 		} else if ((prop & SIGPROP_IGNORE) != 0 &&
3289 		    (td->td_flags & TDF_SIGWAIT) == 0) {
3290 			/*
3291 			 * Default action is to ignore; drop it if
3292 			 * not in kern_sigtimedwait().
3293 			 */
3294 			return (SIGSTATUS_IGNORE);
3295 		} else {
3296 			return (SIGSTATUS_HANDLE);
3297 		}
3298 
3299 	case (intptr_t)SIG_IGN:
3300 		if ((td->td_flags & TDF_SIGWAIT) == 0)
3301 			return (SIGSTATUS_IGNORE);
3302 		else
3303 			return (SIGSTATUS_HANDLE);
3304 
3305 	default:
3306 		/*
3307 		 * This signal has an action, let postsig() process it.
3308 		 */
3309 		return (SIGSTATUS_HANDLE);
3310 	}
3311 }
3312 
3313 /*
3314  * If the current process has received a signal (should be caught or cause
3315  * termination, should interrupt current syscall), return the signal number.
3316  * Stop signals with default action are processed immediately, then cleared;
3317  * they aren't returned.  This is checked after each entry to the system for
3318  * a syscall or trap (though this can usually be done without calling
3319  * issignal by checking the pending signal masks in cursig.) The normal call
3320  * sequence is
3321  *
3322  *	while (sig = cursig(curthread))
3323  *		postsig(sig);
3324  */
3325 static int
issignal(struct thread * td)3326 issignal(struct thread *td)
3327 {
3328 	struct proc *p;
3329 	sigset_t sigpending;
3330 	int sig;
3331 
3332 	p = td->td_proc;
3333 	PROC_LOCK_ASSERT(p, MA_OWNED);
3334 
3335 	for (;;) {
3336 		sigpending = td->td_sigqueue.sq_signals;
3337 		SIGSETOR(sigpending, p->p_sigqueue.sq_signals);
3338 		SIGSETNAND(sigpending, td->td_sigmask);
3339 
3340 		if ((p->p_flag & P_PPWAIT) != 0 || (td->td_flags &
3341 		    (TDF_SBDRY | TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY)
3342 			SIG_STOPSIGMASK(sigpending);
3343 		if (SIGISEMPTY(sigpending))	/* no signal to send */
3344 			return (0);
3345 
3346 		/*
3347 		 * Do fast sigblock if requested by usermode.  Since
3348 		 * we do know that there was a signal pending at this
3349 		 * point, set the FAST_SIGBLOCK_PEND as indicator for
3350 		 * usermode to perform a dummy call to
3351 		 * FAST_SIGBLOCK_UNBLOCK, which causes immediate
3352 		 * delivery of postponed pending signal.
3353 		 */
3354 		if ((td->td_pflags & TDP_SIGFASTBLOCK) != 0) {
3355 			if (td->td_sigblock_val != 0)
3356 				SIGSETNAND(sigpending, fastblock_mask);
3357 			if (SIGISEMPTY(sigpending)) {
3358 				td->td_pflags |= TDP_SIGFASTPENDING;
3359 				return (0);
3360 			}
3361 		}
3362 
3363 		if (!pt_attach_transparent &&
3364 		    (p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED &&
3365 		    (p->p_flag2 & P2_PTRACE_FSTP) != 0 &&
3366 		    SIGISMEMBER(sigpending, SIGSTOP)) {
3367 			/*
3368 			 * If debugger just attached, always consume
3369 			 * SIGSTOP from ptrace(PT_ATTACH) first, to
3370 			 * execute the debugger attach ritual in
3371 			 * order.
3372 			 */
3373 			td->td_dbgflags |= TDB_FSTP;
3374 			SIGEMPTYSET(sigpending);
3375 			SIGADDSET(sigpending, SIGSTOP);
3376 		}
3377 
3378 		SIG_FOREACH(sig, &sigpending) {
3379 			switch (sigprocess(td, sig)) {
3380 			case SIGSTATUS_HANDLE:
3381 				return (sig);
3382 			case SIGSTATUS_HANDLED:
3383 				goto next;
3384 			case SIGSTATUS_IGNORE:
3385 				sigqueue_delete(&td->td_sigqueue, sig);
3386 				sigqueue_delete(&p->p_sigqueue, sig);
3387 				break;
3388 			case SIGSTATUS_SBDRY_STOP:
3389 				return (-1);
3390 			}
3391 		}
3392 next:;
3393 	}
3394 }
3395 
3396 void
thread_stopped(struct proc * p)3397 thread_stopped(struct proc *p)
3398 {
3399 	int n;
3400 
3401 	PROC_LOCK_ASSERT(p, MA_OWNED);
3402 	PROC_SLOCK_ASSERT(p, MA_OWNED);
3403 	n = p->p_suspcount;
3404 	if (p == curproc)
3405 		n++;
3406 	if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) {
3407 		PROC_SUNLOCK(p);
3408 		p->p_flag &= ~P_WAITED;
3409 		PROC_LOCK(p->p_pptr);
3410 		childproc_stopped(p, (p->p_flag & P_TRACED) ?
3411 			CLD_TRAPPED : CLD_STOPPED);
3412 		PROC_UNLOCK(p->p_pptr);
3413 		PROC_SLOCK(p);
3414 	}
3415 }
3416 
3417 /*
3418  * Take the action for the specified signal
3419  * from the current set of pending signals.
3420  */
3421 int
postsig(int sig)3422 postsig(int sig)
3423 {
3424 	struct thread *td;
3425 	struct proc *p;
3426 	struct sigacts *ps;
3427 	sig_t action;
3428 	ksiginfo_t ksi;
3429 	sigset_t returnmask;
3430 
3431 	KASSERT(sig != 0, ("postsig"));
3432 
3433 	td = curthread;
3434 	p = td->td_proc;
3435 	PROC_LOCK_ASSERT(p, MA_OWNED);
3436 	ps = p->p_sigacts;
3437 	mtx_assert(&ps->ps_mtx, MA_OWNED);
3438 	ksiginfo_init(&ksi);
3439 	if (sigqueue_get(&td->td_sigqueue, sig, &ksi) == 0 &&
3440 	    sigqueue_get(&p->p_sigqueue, sig, &ksi) == 0)
3441 		return (0);
3442 	ksi.ksi_signo = sig;
3443 	if (ksi.ksi_code == SI_TIMER)
3444 		itimer_accept(p, ksi.ksi_timerid, &ksi);
3445 	action = ps->ps_sigact[_SIG_IDX(sig)];
3446 #ifdef KTRACE
3447 	if (KTRPOINT(td, KTR_PSIG))
3448 		ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ?
3449 		    &td->td_oldsigmask : &td->td_sigmask, ksi.ksi_code);
3450 #endif
3451 
3452 	if (action == SIG_DFL) {
3453 		/*
3454 		 * Default action, where the default is to kill
3455 		 * the process.  (Other cases were ignored above.)
3456 		 */
3457 		mtx_unlock(&ps->ps_mtx);
3458 		proc_td_siginfo_capture(td, &ksi.ksi_info);
3459 		sigexit(td, sig);
3460 		/* NOTREACHED */
3461 	} else {
3462 		/*
3463 		 * If we get here, the signal must be caught.
3464 		 */
3465 		KASSERT(action != SIG_IGN, ("postsig action %p", action));
3466 		KASSERT(!SIGISMEMBER(td->td_sigmask, sig),
3467 		    ("postsig action: blocked sig %d", sig));
3468 
3469 		/*
3470 		 * Set the new mask value and also defer further
3471 		 * occurrences of this signal.
3472 		 *
3473 		 * Special case: user has done a sigsuspend.  Here the
3474 		 * current mask is not of interest, but rather the
3475 		 * mask from before the sigsuspend is what we want
3476 		 * restored after the signal processing is completed.
3477 		 */
3478 		if (td->td_pflags & TDP_OLDMASK) {
3479 			returnmask = td->td_oldsigmask;
3480 			td->td_pflags &= ~TDP_OLDMASK;
3481 		} else
3482 			returnmask = td->td_sigmask;
3483 
3484 		if (p->p_sig == sig) {
3485 			p->p_sig = 0;
3486 		}
3487 		(*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask);
3488 		postsig_done(sig, td, ps);
3489 	}
3490 	return (1);
3491 }
3492 
3493 int
sig_ast_checksusp(struct thread * td)3494 sig_ast_checksusp(struct thread *td)
3495 {
3496 	struct proc *p __diagused;
3497 	int ret;
3498 
3499 	p = td->td_proc;
3500 	PROC_LOCK_ASSERT(p, MA_OWNED);
3501 
3502 	if (!td_ast_pending(td, TDA_SUSPEND))
3503 		return (0);
3504 
3505 	ret = thread_suspend_check(1);
3506 	MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
3507 	return (ret);
3508 }
3509 
3510 int
sig_ast_needsigchk(struct thread * td)3511 sig_ast_needsigchk(struct thread *td)
3512 {
3513 	struct proc *p;
3514 	struct sigacts *ps;
3515 	int ret, sig;
3516 
3517 	p = td->td_proc;
3518 	PROC_LOCK_ASSERT(p, MA_OWNED);
3519 
3520 	if (!td_ast_pending(td, TDA_SIG))
3521 		return (0);
3522 
3523 	ps = p->p_sigacts;
3524 	mtx_lock(&ps->ps_mtx);
3525 	sig = cursig(td);
3526 	if (sig == -1) {
3527 		mtx_unlock(&ps->ps_mtx);
3528 		KASSERT((td->td_flags & TDF_SBDRY) != 0, ("lost TDF_SBDRY"));
3529 		KASSERT(TD_SBDRY_INTR(td),
3530 		    ("lost TDF_SERESTART of TDF_SEINTR"));
3531 		KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) !=
3532 		    (TDF_SEINTR | TDF_SERESTART),
3533 		    ("both TDF_SEINTR and TDF_SERESTART"));
3534 		ret = TD_SBDRY_ERRNO(td);
3535 	} else if (sig != 0) {
3536 		ret = SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR : ERESTART;
3537 		mtx_unlock(&ps->ps_mtx);
3538 	} else {
3539 		mtx_unlock(&ps->ps_mtx);
3540 		ret = 0;
3541 	}
3542 
3543 	/*
3544 	 * Do not go into sleep if this thread was the ptrace(2)
3545 	 * attach leader.  cursig() consumed SIGSTOP from PT_ATTACH,
3546 	 * but we usually act on the signal by interrupting sleep, and
3547 	 * should do that here as well.
3548 	 */
3549 	if ((td->td_dbgflags & TDB_FSTP) != 0) {
3550 		if (ret == 0)
3551 			ret = EINTR;
3552 		td->td_dbgflags &= ~TDB_FSTP;
3553 	}
3554 
3555 	return (ret);
3556 }
3557 
3558 int
sig_intr(void)3559 sig_intr(void)
3560 {
3561 	struct thread *td;
3562 	struct proc *p;
3563 	int ret;
3564 
3565 	td = curthread;
3566 	if (!td_ast_pending(td, TDA_SIG) && !td_ast_pending(td, TDA_SUSPEND))
3567 		return (0);
3568 
3569 	p = td->td_proc;
3570 
3571 	PROC_LOCK(p);
3572 	ret = sig_ast_checksusp(td);
3573 	if (ret == 0)
3574 		ret = sig_ast_needsigchk(td);
3575 	PROC_UNLOCK(p);
3576 	return (ret);
3577 }
3578 
3579 bool
curproc_sigkilled(void)3580 curproc_sigkilled(void)
3581 {
3582 	struct thread *td;
3583 	struct proc *p;
3584 	struct sigacts *ps;
3585 	bool res;
3586 
3587 	td = curthread;
3588 	if (!td_ast_pending(td, TDA_SIG))
3589 		return (false);
3590 
3591 	p = td->td_proc;
3592 	PROC_LOCK(p);
3593 	ps = p->p_sigacts;
3594 	mtx_lock(&ps->ps_mtx);
3595 	res = SIGISMEMBER(td->td_sigqueue.sq_signals, SIGKILL) ||
3596 	    SIGISMEMBER(p->p_sigqueue.sq_signals, SIGKILL);
3597 	mtx_unlock(&ps->ps_mtx);
3598 	PROC_UNLOCK(p);
3599 	return (res);
3600 }
3601 
3602 void
proc_wkilled(struct proc * p)3603 proc_wkilled(struct proc *p)
3604 {
3605 
3606 	PROC_LOCK_ASSERT(p, MA_OWNED);
3607 	if ((p->p_flag & P_WKILLED) == 0)
3608 		p->p_flag |= P_WKILLED;
3609 }
3610 
3611 /*
3612  * Kill the current process for stated reason.
3613  */
3614 void
killproc(struct proc * p,const char * why)3615 killproc(struct proc *p, const char *why)
3616 {
3617 
3618 	PROC_LOCK_ASSERT(p, MA_OWNED);
3619 	CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)", p, p->p_pid,
3620 	    p->p_comm);
3621 	log(LOG_ERR, "pid %d (%s), jid %d, uid %d, was killed: %s\n",
3622 	    p->p_pid, p->p_comm, p->p_ucred->cr_prison->pr_id,
3623 	    p->p_ucred->cr_uid, why);
3624 	proc_wkilled(p);
3625 	kern_psignal(p, SIGKILL);
3626 }
3627 
3628 /*
3629  * Force the current process to exit with the specified signal, dumping core
3630  * if appropriate.  We bypass the normal tests for masked and caught signals,
3631  * allowing unrecoverable failures to terminate the process without changing
3632  * signal state.  Mark the accounting record with the signal termination.
3633  * If dumping core, save the signal number for the debugger.  Calls exit and
3634  * does not return.
3635  */
3636 void
sigexit(struct thread * td,int sig)3637 sigexit(struct thread *td, int sig)
3638 {
3639 	struct proc *p = td->td_proc;
3640 	const char *coreinfo;
3641 	int rv;
3642 	bool logexit;
3643 
3644 	PROC_LOCK_ASSERT(p, MA_OWNED);
3645 	proc_set_p2_wexit(p);
3646 
3647 	p->p_acflag |= AXSIG;
3648 	if ((p->p_flag2 & P2_LOGSIGEXIT_CTL) == 0)
3649 		logexit = kern_logsigexit != 0;
3650 	else
3651 		logexit = (p->p_flag2 & P2_LOGSIGEXIT_ENABLE) != 0;
3652 
3653 	/*
3654 	 * We must be single-threading to generate a core dump.  This
3655 	 * ensures that the registers in the core file are up-to-date.
3656 	 * Also, the ELF dump handler assumes that the thread list doesn't
3657 	 * change out from under it.
3658 	 *
3659 	 * XXX If another thread attempts to single-thread before us
3660 	 *     (e.g. via fork()), we won't get a dump at all.
3661 	 */
3662 	if ((sigprop(sig) & SIGPROP_CORE) &&
3663 	    thread_single(p, SINGLE_NO_EXIT) == 0) {
3664 		p->p_sig = sig;
3665 		/*
3666 		 * Log signals which would cause core dumps
3667 		 * (Log as LOG_INFO to appease those who don't want
3668 		 * these messages.)
3669 		 * XXX : Todo, as well as euid, write out ruid too
3670 		 * Note that coredump() drops proc lock.
3671 		 */
3672 		rv = coredump(td);
3673 		switch (rv) {
3674 		case 0:
3675 			sig |= WCOREFLAG;
3676 			coreinfo = " (core dumped)";
3677 			break;
3678 		case EFAULT:
3679 			coreinfo = " (no core dump - bad address)";
3680 			break;
3681 		case EINVAL:
3682 			coreinfo = " (no core dump - invalid argument)";
3683 			break;
3684 		case EFBIG:
3685 			coreinfo = " (no core dump - too large)";
3686 			break;
3687 		default:
3688 			coreinfo = " (no core dump - other error)";
3689 			break;
3690 		}
3691 		if (logexit)
3692 			log(LOG_INFO,
3693 			    "pid %d (%s), jid %d, uid %d: exited on "
3694 			    "signal %d%s\n", p->p_pid, p->p_comm,
3695 			    p->p_ucred->cr_prison->pr_id,
3696 			    td->td_ucred->cr_uid,
3697 			    sig &~ WCOREFLAG, coreinfo);
3698 	} else
3699 		PROC_UNLOCK(p);
3700 	exit1(td, 0, sig);
3701 	/* NOTREACHED */
3702 }
3703 
3704 /*
3705  * Send queued SIGCHLD to parent when child process's state
3706  * is changed.
3707  */
3708 static void
sigparent(struct proc * p,int reason,int status)3709 sigparent(struct proc *p, int reason, int status)
3710 {
3711 	PROC_LOCK_ASSERT(p, MA_OWNED);
3712 	PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
3713 
3714 	if (p->p_ksi != NULL) {
3715 		p->p_ksi->ksi_signo  = SIGCHLD;
3716 		p->p_ksi->ksi_code   = reason;
3717 		p->p_ksi->ksi_status = status;
3718 		p->p_ksi->ksi_pid    = p->p_pid;
3719 		p->p_ksi->ksi_uid    = p->p_ucred->cr_ruid;
3720 		if (KSI_ONQ(p->p_ksi))
3721 			return;
3722 	}
3723 	pksignal(p->p_pptr, SIGCHLD, p->p_ksi);
3724 }
3725 
3726 static void
childproc_jobstate(struct proc * p,int reason,int sig)3727 childproc_jobstate(struct proc *p, int reason, int sig)
3728 {
3729 	struct sigacts *ps;
3730 
3731 	PROC_LOCK_ASSERT(p, MA_OWNED);
3732 	PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
3733 
3734 	/*
3735 	 * Wake up parent sleeping in kern_wait(), also send
3736 	 * SIGCHLD to parent, but SIGCHLD does not guarantee
3737 	 * that parent will awake, because parent may masked
3738 	 * the signal.
3739 	 */
3740 	p->p_pptr->p_flag |= P_STATCHILD;
3741 	wakeup(p->p_pptr);
3742 
3743 	ps = p->p_pptr->p_sigacts;
3744 	mtx_lock(&ps->ps_mtx);
3745 	if ((ps->ps_flag & PS_NOCLDSTOP) == 0) {
3746 		mtx_unlock(&ps->ps_mtx);
3747 		sigparent(p, reason, sig);
3748 	} else
3749 		mtx_unlock(&ps->ps_mtx);
3750 }
3751 
3752 void
childproc_stopped(struct proc * p,int reason)3753 childproc_stopped(struct proc *p, int reason)
3754 {
3755 
3756 	childproc_jobstate(p, reason, p->p_xsig);
3757 }
3758 
3759 void
childproc_continued(struct proc * p)3760 childproc_continued(struct proc *p)
3761 {
3762 	childproc_jobstate(p, CLD_CONTINUED, SIGCONT);
3763 }
3764 
3765 void
childproc_exited(struct proc * p)3766 childproc_exited(struct proc *p)
3767 {
3768 	int reason, status;
3769 
3770 	if (WCOREDUMP(p->p_xsig)) {
3771 		reason = CLD_DUMPED;
3772 		status = WTERMSIG(p->p_xsig);
3773 	} else if (WIFSIGNALED(p->p_xsig)) {
3774 		reason = CLD_KILLED;
3775 		status = WTERMSIG(p->p_xsig);
3776 	} else {
3777 		reason = CLD_EXITED;
3778 		status = p->p_xexit;
3779 	}
3780 	/*
3781 	 * XXX avoid calling wakeup(p->p_pptr), the work is
3782 	 * done in exit1().
3783 	 */
3784 	sigparent(p, reason, status);
3785 }
3786 
3787 #define	MAX_NUM_CORE_FILES 100000
3788 #ifndef NUM_CORE_FILES
3789 #define	NUM_CORE_FILES 5
3790 #endif
3791 CTASSERT(NUM_CORE_FILES >= 0 && NUM_CORE_FILES <= MAX_NUM_CORE_FILES);
3792 static int num_cores = NUM_CORE_FILES;
3793 
3794 static int
sysctl_debug_num_cores_check(SYSCTL_HANDLER_ARGS)3795 sysctl_debug_num_cores_check (SYSCTL_HANDLER_ARGS)
3796 {
3797 	int error;
3798 	int new_val;
3799 
3800 	new_val = num_cores;
3801 	error = sysctl_handle_int(oidp, &new_val, 0, req);
3802 	if (error != 0 || req->newptr == NULL)
3803 		return (error);
3804 	if (new_val > MAX_NUM_CORE_FILES)
3805 		new_val = MAX_NUM_CORE_FILES;
3806 	if (new_val < 0)
3807 		new_val = 0;
3808 	num_cores = new_val;
3809 	return (0);
3810 }
3811 SYSCTL_PROC(_debug, OID_AUTO, ncores,
3812     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, sizeof(int),
3813     sysctl_debug_num_cores_check, "I",
3814     "Maximum number of generated process corefiles while using index format");
3815 
3816 #define	GZIP_SUFFIX	".gz"
3817 #define	ZSTD_SUFFIX	".zst"
3818 
3819 int compress_user_cores = 0;
3820 
3821 static int
sysctl_compress_user_cores(SYSCTL_HANDLER_ARGS)3822 sysctl_compress_user_cores(SYSCTL_HANDLER_ARGS)
3823 {
3824 	int error, val;
3825 
3826 	val = compress_user_cores;
3827 	error = sysctl_handle_int(oidp, &val, 0, req);
3828 	if (error != 0 || req->newptr == NULL)
3829 		return (error);
3830 	if (val != 0 && !compressor_avail(val))
3831 		return (EINVAL);
3832 	compress_user_cores = val;
3833 	return (error);
3834 }
3835 SYSCTL_PROC(_kern, OID_AUTO, compress_user_cores,
3836     CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, 0, sizeof(int),
3837     sysctl_compress_user_cores, "I",
3838     "Enable compression of user corefiles ("
3839     __XSTRING(COMPRESS_GZIP) " = gzip, "
3840     __XSTRING(COMPRESS_ZSTD) " = zstd)");
3841 
3842 int compress_user_cores_level = 6;
3843 SYSCTL_INT(_kern, OID_AUTO, compress_user_cores_level, CTLFLAG_RWTUN,
3844     &compress_user_cores_level, 0,
3845     "Corefile compression level");
3846 
3847 /*
3848  * Protect the access to corefilename[] by allproc_lock.
3849  */
3850 #define	corefilename_lock	allproc_lock
3851 
3852 static char corefilename[MAXPATHLEN] = {"%N.core"};
3853 TUNABLE_STR("kern.corefile", corefilename, sizeof(corefilename));
3854 
3855 static int
sysctl_kern_corefile(SYSCTL_HANDLER_ARGS)3856 sysctl_kern_corefile(SYSCTL_HANDLER_ARGS)
3857 {
3858 	int error;
3859 
3860 	sx_xlock(&corefilename_lock);
3861 	error = sysctl_handle_string(oidp, corefilename, sizeof(corefilename),
3862 	    req);
3863 	sx_xunlock(&corefilename_lock);
3864 
3865 	return (error);
3866 }
3867 SYSCTL_PROC(_kern, OID_AUTO, corefile, CTLTYPE_STRING | CTLFLAG_RW |
3868     CTLFLAG_MPSAFE, 0, 0, sysctl_kern_corefile, "A",
3869     "Process corefile name format string");
3870 
3871 static void
vnode_close_locked(struct thread * td,struct vnode * vp)3872 vnode_close_locked(struct thread *td, struct vnode *vp)
3873 {
3874 
3875 	VOP_UNLOCK(vp);
3876 	vn_close(vp, FWRITE, td->td_ucred, td);
3877 }
3878 
3879 /*
3880  * If the core format has a %I in it, then we need to check
3881  * for existing corefiles before defining a name.
3882  * To do this we iterate over 0..ncores to find a
3883  * non-existing core file name to use. If all core files are
3884  * already used we choose the oldest one.
3885  */
3886 static int
corefile_open_last(struct thread * td,char * name,int indexpos,int indexlen,int ncores,struct vnode ** vpp)3887 corefile_open_last(struct thread *td, char *name, int indexpos,
3888     int indexlen, int ncores, struct vnode **vpp)
3889 {
3890 	struct vnode *oldvp, *nextvp, *vp;
3891 	struct vattr vattr;
3892 	struct nameidata nd;
3893 	int error, i, flags, oflags, cmode;
3894 	char ch;
3895 	struct timespec lasttime;
3896 
3897 	nextvp = oldvp = NULL;
3898 	cmode = S_IRUSR | S_IWUSR;
3899 	oflags = VN_OPEN_NOAUDIT | VN_OPEN_NAMECACHE |
3900 	    (capmode_coredump ? VN_OPEN_NOCAPCHECK : 0);
3901 
3902 	for (i = 0; i < ncores; i++) {
3903 		flags = O_CREAT | FWRITE | O_NOFOLLOW;
3904 
3905 		ch = name[indexpos + indexlen];
3906 		(void)snprintf(name + indexpos, indexlen + 1, "%.*u", indexlen,
3907 		    i);
3908 		name[indexpos + indexlen] = ch;
3909 
3910 		NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name);
3911 		error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred,
3912 		    NULL);
3913 		if (error != 0)
3914 			break;
3915 
3916 		vp = nd.ni_vp;
3917 		NDFREE_PNBUF(&nd);
3918 		if ((flags & O_CREAT) == O_CREAT) {
3919 			nextvp = vp;
3920 			break;
3921 		}
3922 
3923 		error = VOP_GETATTR(vp, &vattr, td->td_ucred);
3924 		if (error != 0) {
3925 			vnode_close_locked(td, vp);
3926 			break;
3927 		}
3928 
3929 		if (oldvp == NULL ||
3930 		    lasttime.tv_sec > vattr.va_mtime.tv_sec ||
3931 		    (lasttime.tv_sec == vattr.va_mtime.tv_sec &&
3932 		    lasttime.tv_nsec >= vattr.va_mtime.tv_nsec)) {
3933 			if (oldvp != NULL)
3934 				vn_close(oldvp, FWRITE, td->td_ucred, td);
3935 			oldvp = vp;
3936 			VOP_UNLOCK(oldvp);
3937 			lasttime = vattr.va_mtime;
3938 		} else {
3939 			vnode_close_locked(td, vp);
3940 		}
3941 	}
3942 
3943 	if (oldvp != NULL) {
3944 		if (nextvp == NULL) {
3945 			if ((td->td_proc->p_flag & P_SUGID) != 0) {
3946 				error = EFAULT;
3947 				vn_close(oldvp, FWRITE, td->td_ucred, td);
3948 			} else {
3949 				nextvp = oldvp;
3950 				error = vn_lock(nextvp, LK_EXCLUSIVE);
3951 				if (error != 0) {
3952 					vn_close(nextvp, FWRITE, td->td_ucred,
3953 					    td);
3954 					nextvp = NULL;
3955 				}
3956 			}
3957 		} else {
3958 			vn_close(oldvp, FWRITE, td->td_ucred, td);
3959 		}
3960 	}
3961 	if (error != 0) {
3962 		if (nextvp != NULL)
3963 			vnode_close_locked(td, oldvp);
3964 	} else {
3965 		*vpp = nextvp;
3966 	}
3967 
3968 	return (error);
3969 }
3970 
3971 /*
3972  * corefile_open(comm, uid, pid, td, compress, vpp, namep)
3973  * Expand the name described in corefilename, using name, uid, and pid
3974  * and open/create core file.
3975  * corefilename is a printf-like string, with three format specifiers:
3976  *	%N	name of process ("name")
3977  *	%P	process id (pid)
3978  *	%U	user id (uid)
3979  * For example, "%N.core" is the default; they can be disabled completely
3980  * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
3981  * This is controlled by the sysctl variable kern.corefile (see above).
3982  */
3983 static int
corefile_open(const char * comm,uid_t uid,pid_t pid,struct thread * td,int compress,int signum,struct vnode ** vpp,char ** namep)3984 corefile_open(const char *comm, uid_t uid, pid_t pid, struct thread *td,
3985     int compress, int signum, struct vnode **vpp, char **namep)
3986 {
3987 	struct sbuf sb;
3988 	struct nameidata nd;
3989 	const char *format;
3990 	char *hostname, *name;
3991 	int cmode, error, flags, i, indexpos, indexlen, oflags, ncores;
3992 
3993 	hostname = NULL;
3994 	format = corefilename;
3995 	name = malloc(MAXPATHLEN, M_TEMP, M_WAITOK | M_ZERO);
3996 	indexlen = 0;
3997 	indexpos = -1;
3998 	ncores = num_cores;
3999 	(void)sbuf_new(&sb, name, MAXPATHLEN, SBUF_FIXEDLEN);
4000 	sx_slock(&corefilename_lock);
4001 	for (i = 0; format[i] != '\0'; i++) {
4002 		switch (format[i]) {
4003 		case '%':	/* Format character */
4004 			i++;
4005 			switch (format[i]) {
4006 			case '%':
4007 				sbuf_putc(&sb, '%');
4008 				break;
4009 			case 'H':	/* hostname */
4010 				if (hostname == NULL) {
4011 					hostname = malloc(MAXHOSTNAMELEN,
4012 					    M_TEMP, M_WAITOK);
4013 				}
4014 				getcredhostname(td->td_ucred, hostname,
4015 				    MAXHOSTNAMELEN);
4016 				sbuf_cat(&sb, hostname);
4017 				break;
4018 			case 'I':	/* autoincrementing index */
4019 				if (indexpos != -1) {
4020 					sbuf_printf(&sb, "%%I");
4021 					break;
4022 				}
4023 
4024 				indexpos = sbuf_len(&sb);
4025 				sbuf_printf(&sb, "%u", ncores - 1);
4026 				indexlen = sbuf_len(&sb) - indexpos;
4027 				break;
4028 			case 'N':	/* process name */
4029 				sbuf_printf(&sb, "%s", comm);
4030 				break;
4031 			case 'P':	/* process id */
4032 				sbuf_printf(&sb, "%u", pid);
4033 				break;
4034 			case 'S':	/* signal number */
4035 				sbuf_printf(&sb, "%i", signum);
4036 				break;
4037 			case 'U':	/* user id */
4038 				sbuf_printf(&sb, "%u", uid);
4039 				break;
4040 			default:
4041 				log(LOG_ERR,
4042 				    "Unknown format character %c in "
4043 				    "corename `%s'\n", format[i], format);
4044 				break;
4045 			}
4046 			break;
4047 		default:
4048 			sbuf_putc(&sb, format[i]);
4049 			break;
4050 		}
4051 	}
4052 	sx_sunlock(&corefilename_lock);
4053 	free(hostname, M_TEMP);
4054 	if (compress == COMPRESS_GZIP)
4055 		sbuf_cat(&sb, GZIP_SUFFIX);
4056 	else if (compress == COMPRESS_ZSTD)
4057 		sbuf_cat(&sb, ZSTD_SUFFIX);
4058 	if (sbuf_error(&sb) != 0) {
4059 		log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too "
4060 		    "long\n", (long)pid, comm, (u_long)uid);
4061 		sbuf_delete(&sb);
4062 		free(name, M_TEMP);
4063 		return (ENOMEM);
4064 	}
4065 	sbuf_finish(&sb);
4066 	sbuf_delete(&sb);
4067 
4068 	if (indexpos != -1) {
4069 		error = corefile_open_last(td, name, indexpos, indexlen, ncores,
4070 		    vpp);
4071 		if (error != 0) {
4072 			log(LOG_ERR,
4073 			    "pid %d (%s), uid (%u):  Path `%s' failed "
4074 			    "on initial open test, error = %d\n",
4075 			    pid, comm, uid, name, error);
4076 		}
4077 	} else {
4078 		cmode = S_IRUSR | S_IWUSR;
4079 		oflags = VN_OPEN_NOAUDIT | VN_OPEN_NAMECACHE |
4080 		    (capmode_coredump ? VN_OPEN_NOCAPCHECK : 0);
4081 		flags = O_CREAT | FWRITE | O_NOFOLLOW;
4082 		if ((td->td_proc->p_flag & P_SUGID) != 0)
4083 			flags |= O_EXCL;
4084 
4085 		NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name);
4086 		error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred,
4087 		    NULL);
4088 		if (error == 0) {
4089 			*vpp = nd.ni_vp;
4090 			NDFREE_PNBUF(&nd);
4091 		}
4092 	}
4093 
4094 	if (error != 0) {
4095 #ifdef AUDIT
4096 		audit_proc_coredump(td, name, error);
4097 #endif
4098 		free(name, M_TEMP);
4099 		return (error);
4100 	}
4101 	*namep = name;
4102 	return (0);
4103 }
4104 
4105 /*
4106  * Dump a process' core.  The main routine does some
4107  * policy checking, and creates the name of the coredump;
4108  * then it passes on a vnode and a size limit to the process-specific
4109  * coredump routine if there is one; if there _is not_ one, it returns
4110  * ENOSYS; otherwise it returns the error from the process-specific routine.
4111  */
4112 
4113 static int
coredump(struct thread * td)4114 coredump(struct thread *td)
4115 {
4116 	struct proc *p = td->td_proc;
4117 	struct ucred *cred = td->td_ucred;
4118 	struct vnode *vp;
4119 	struct flock lf;
4120 	struct vattr vattr;
4121 	size_t fullpathsize;
4122 	int error, error1, locked;
4123 	char *name;			/* name of corefile */
4124 	void *rl_cookie;
4125 	off_t limit;
4126 	char *fullpath, *freepath = NULL;
4127 	struct sbuf *sb;
4128 
4129 	PROC_LOCK_ASSERT(p, MA_OWNED);
4130 	MPASS((p->p_flag & P_HADTHREADS) == 0 || p->p_singlethread == td);
4131 
4132 	if (!do_coredump || (!sugid_coredump && (p->p_flag & P_SUGID) != 0) ||
4133 	    (p->p_flag2 & P2_NOTRACE) != 0) {
4134 		PROC_UNLOCK(p);
4135 		return (EFAULT);
4136 	}
4137 
4138 	/*
4139 	 * Note that the bulk of limit checking is done after
4140 	 * the corefile is created.  The exception is if the limit
4141 	 * for corefiles is 0, in which case we don't bother
4142 	 * creating the corefile at all.  This layout means that
4143 	 * a corefile is truncated instead of not being created,
4144 	 * if it is larger than the limit.
4145 	 */
4146 	limit = (off_t)lim_cur(td, RLIMIT_CORE);
4147 	if (limit == 0 || racct_get_available(p, RACCT_CORE) == 0) {
4148 		PROC_UNLOCK(p);
4149 		return (EFBIG);
4150 	}
4151 	PROC_UNLOCK(p);
4152 
4153 	error = corefile_open(p->p_comm, cred->cr_uid, p->p_pid, td,
4154 	    compress_user_cores, p->p_sig, &vp, &name);
4155 	if (error != 0)
4156 		return (error);
4157 
4158 	/*
4159 	 * Don't dump to non-regular files or files with links.
4160 	 * Do not dump into system files. Effective user must own the corefile.
4161 	 */
4162 	if (vp->v_type != VREG || VOP_GETATTR(vp, &vattr, cred) != 0 ||
4163 	    vattr.va_nlink != 1 || (vp->v_vflag & VV_SYSTEM) != 0 ||
4164 	    vattr.va_uid != cred->cr_uid) {
4165 		VOP_UNLOCK(vp);
4166 		error = EFAULT;
4167 		goto out;
4168 	}
4169 
4170 	VOP_UNLOCK(vp);
4171 
4172 	/* Postpone other writers, including core dumps of other processes. */
4173 	rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX);
4174 
4175 	lf.l_whence = SEEK_SET;
4176 	lf.l_start = 0;
4177 	lf.l_len = 0;
4178 	lf.l_type = F_WRLCK;
4179 	locked = (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK) == 0);
4180 
4181 	VATTR_NULL(&vattr);
4182 	vattr.va_size = 0;
4183 	if (set_core_nodump_flag)
4184 		vattr.va_flags = UF_NODUMP;
4185 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
4186 	VOP_SETATTR(vp, &vattr, cred);
4187 	VOP_UNLOCK(vp);
4188 	PROC_LOCK(p);
4189 	p->p_acflag |= ACORE;
4190 	PROC_UNLOCK(p);
4191 
4192 	if (p->p_sysent->sv_coredump != NULL) {
4193 		error = p->p_sysent->sv_coredump(td, vp, limit, 0);
4194 	} else {
4195 		error = ENOSYS;
4196 	}
4197 
4198 	if (locked) {
4199 		lf.l_type = F_UNLCK;
4200 		VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
4201 	}
4202 	vn_rangelock_unlock(vp, rl_cookie);
4203 
4204 	/*
4205 	 * Notify the userland helper that a process triggered a core dump.
4206 	 * This allows the helper to run an automated debugging session.
4207 	 */
4208 	if (error != 0 || coredump_devctl == 0)
4209 		goto out;
4210 	sb = sbuf_new_auto();
4211 	if (vn_fullpath_global(p->p_textvp, &fullpath, &freepath) != 0)
4212 		goto out2;
4213 	sbuf_cat(sb, "comm=\"");
4214 	devctl_safe_quote_sb(sb, fullpath);
4215 	free(freepath, M_TEMP);
4216 	sbuf_cat(sb, "\" core=\"");
4217 
4218 	/*
4219 	 * We can't lookup core file vp directly. When we're replacing a core, and
4220 	 * other random times, we flush the name cache, so it will fail. Instead,
4221 	 * if the path of the core is relative, add the current dir in front if it.
4222 	 */
4223 	if (name[0] != '/') {
4224 		fullpathsize = MAXPATHLEN;
4225 		freepath = malloc(fullpathsize, M_TEMP, M_WAITOK);
4226 		if (vn_getcwd(freepath, &fullpath, &fullpathsize) != 0) {
4227 			free(freepath, M_TEMP);
4228 			goto out2;
4229 		}
4230 		devctl_safe_quote_sb(sb, fullpath);
4231 		free(freepath, M_TEMP);
4232 		sbuf_putc(sb, '/');
4233 	}
4234 	devctl_safe_quote_sb(sb, name);
4235 	sbuf_putc(sb, '"');
4236 	if (sbuf_finish(sb) == 0)
4237 		devctl_notify("kernel", "signal", "coredump", sbuf_data(sb));
4238 out2:
4239 	sbuf_delete(sb);
4240 out:
4241 	error1 = vn_close(vp, FWRITE, cred, td);
4242 	if (error == 0)
4243 		error = error1;
4244 #ifdef AUDIT
4245 	audit_proc_coredump(td, name, error);
4246 #endif
4247 	free(name, M_TEMP);
4248 	return (error);
4249 }
4250 
4251 /*
4252  * Nonexistent system call-- signal process (may want to handle it).  Flag
4253  * error in case process won't see signal immediately (blocked or ignored).
4254  */
4255 #ifndef _SYS_SYSPROTO_H_
4256 struct nosys_args {
4257 	int	dummy;
4258 };
4259 #endif
4260 /* ARGSUSED */
4261 int
nosys(struct thread * td,struct nosys_args * args)4262 nosys(struct thread *td, struct nosys_args *args)
4263 {
4264 	struct proc *p;
4265 
4266 	p = td->td_proc;
4267 
4268 	if (SV_PROC_FLAG(p, SV_SIGSYS) != 0 && kern_signosys) {
4269 		PROC_LOCK(p);
4270 		tdsignal(td, SIGSYS);
4271 		PROC_UNLOCK(p);
4272 	}
4273 	if (kern_lognosys == 1 || kern_lognosys == 3) {
4274 		uprintf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm,
4275 		    td->td_sa.code);
4276 	}
4277 	if (kern_lognosys == 2 || kern_lognosys == 3 ||
4278 	    (p->p_pid == 1 && (kern_lognosys & 3) == 0)) {
4279 		printf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm,
4280 		    td->td_sa.code);
4281 	}
4282 	return (ENOSYS);
4283 }
4284 
4285 /*
4286  * Send a SIGIO or SIGURG signal to a process or process group using stored
4287  * credentials rather than those of the current process.
4288  */
4289 void
pgsigio(struct sigio ** sigiop,int sig,int checkctty)4290 pgsigio(struct sigio **sigiop, int sig, int checkctty)
4291 {
4292 	ksiginfo_t ksi;
4293 	struct sigio *sigio;
4294 
4295 	ksiginfo_init(&ksi);
4296 	ksi.ksi_signo = sig;
4297 	ksi.ksi_code = SI_KERNEL;
4298 
4299 	SIGIO_LOCK();
4300 	sigio = *sigiop;
4301 	if (sigio == NULL) {
4302 		SIGIO_UNLOCK();
4303 		return;
4304 	}
4305 	if (sigio->sio_pgid > 0) {
4306 		PROC_LOCK(sigio->sio_proc);
4307 		if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred))
4308 			kern_psignal(sigio->sio_proc, sig);
4309 		PROC_UNLOCK(sigio->sio_proc);
4310 	} else if (sigio->sio_pgid < 0) {
4311 		struct proc *p;
4312 
4313 		PGRP_LOCK(sigio->sio_pgrp);
4314 		LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) {
4315 			PROC_LOCK(p);
4316 			if (p->p_state == PRS_NORMAL &&
4317 			    CANSIGIO(sigio->sio_ucred, p->p_ucred) &&
4318 			    (checkctty == 0 || (p->p_flag & P_CONTROLT)))
4319 				kern_psignal(p, sig);
4320 			PROC_UNLOCK(p);
4321 		}
4322 		PGRP_UNLOCK(sigio->sio_pgrp);
4323 	}
4324 	SIGIO_UNLOCK();
4325 }
4326 
4327 static int
filt_sigattach(struct knote * kn)4328 filt_sigattach(struct knote *kn)
4329 {
4330 	struct proc *p = curproc;
4331 
4332 	kn->kn_ptr.p_proc = p;
4333 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
4334 
4335 	knlist_add(p->p_klist, kn, 0);
4336 
4337 	return (0);
4338 }
4339 
4340 static void
filt_sigdetach(struct knote * kn)4341 filt_sigdetach(struct knote *kn)
4342 {
4343 	knlist_remove(kn->kn_knlist, kn, 0);
4344 }
4345 
4346 /*
4347  * signal knotes are shared with proc knotes, so we apply a mask to
4348  * the hint in order to differentiate them from process hints.  This
4349  * could be avoided by using a signal-specific knote list, but probably
4350  * isn't worth the trouble.
4351  */
4352 static int
filt_signal(struct knote * kn,long hint)4353 filt_signal(struct knote *kn, long hint)
4354 {
4355 
4356 	if (hint & NOTE_SIGNAL) {
4357 		hint &= ~NOTE_SIGNAL;
4358 
4359 		if (kn->kn_id == hint)
4360 			kn->kn_data++;
4361 	}
4362 	return (kn->kn_data != 0);
4363 }
4364 
4365 struct sigacts *
sigacts_alloc(void)4366 sigacts_alloc(void)
4367 {
4368 	struct sigacts *ps;
4369 
4370 	ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO);
4371 	refcount_init(&ps->ps_refcnt, 1);
4372 	mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF);
4373 	return (ps);
4374 }
4375 
4376 void
sigacts_free(struct sigacts * ps)4377 sigacts_free(struct sigacts *ps)
4378 {
4379 
4380 	if (refcount_release(&ps->ps_refcnt) == 0)
4381 		return;
4382 	mtx_destroy(&ps->ps_mtx);
4383 	free(ps, M_SUBPROC);
4384 }
4385 
4386 struct sigacts *
sigacts_hold(struct sigacts * ps)4387 sigacts_hold(struct sigacts *ps)
4388 {
4389 
4390 	refcount_acquire(&ps->ps_refcnt);
4391 	return (ps);
4392 }
4393 
4394 void
sigacts_copy(struct sigacts * dest,struct sigacts * src)4395 sigacts_copy(struct sigacts *dest, struct sigacts *src)
4396 {
4397 
4398 	KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest"));
4399 	mtx_lock(&src->ps_mtx);
4400 	bcopy(src, dest, offsetof(struct sigacts, ps_refcnt));
4401 	mtx_unlock(&src->ps_mtx);
4402 }
4403 
4404 int
sigacts_shared(struct sigacts * ps)4405 sigacts_shared(struct sigacts *ps)
4406 {
4407 
4408 	return (ps->ps_refcnt > 1);
4409 }
4410 
4411 void
sig_drop_caught(struct proc * p)4412 sig_drop_caught(struct proc *p)
4413 {
4414 	int sig;
4415 	struct sigacts *ps;
4416 
4417 	ps = p->p_sigacts;
4418 	PROC_LOCK_ASSERT(p, MA_OWNED);
4419 	mtx_assert(&ps->ps_mtx, MA_OWNED);
4420 	SIG_FOREACH(sig, &ps->ps_sigcatch) {
4421 		sigdflt(ps, sig);
4422 		if ((sigprop(sig) & SIGPROP_IGNORE) != 0)
4423 			sigqueue_delete_proc(p, sig);
4424 	}
4425 }
4426 
4427 static void
sigfastblock_failed(struct thread * td,bool sendsig,bool write)4428 sigfastblock_failed(struct thread *td, bool sendsig, bool write)
4429 {
4430 	ksiginfo_t ksi;
4431 
4432 	/*
4433 	 * Prevent further fetches and SIGSEGVs, allowing thread to
4434 	 * issue syscalls despite corruption.
4435 	 */
4436 	sigfastblock_clear(td);
4437 
4438 	if (!sendsig)
4439 		return;
4440 	ksiginfo_init_trap(&ksi);
4441 	ksi.ksi_signo = SIGSEGV;
4442 	ksi.ksi_code = write ? SEGV_ACCERR : SEGV_MAPERR;
4443 	ksi.ksi_addr = td->td_sigblock_ptr;
4444 	trapsignal(td, &ksi);
4445 }
4446 
4447 static bool
sigfastblock_fetch_sig(struct thread * td,bool sendsig,uint32_t * valp)4448 sigfastblock_fetch_sig(struct thread *td, bool sendsig, uint32_t *valp)
4449 {
4450 	uint32_t res;
4451 
4452 	if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0)
4453 		return (true);
4454 	if (fueword32((void *)td->td_sigblock_ptr, &res) == -1) {
4455 		sigfastblock_failed(td, sendsig, false);
4456 		return (false);
4457 	}
4458 	*valp = res;
4459 	td->td_sigblock_val = res & ~SIGFASTBLOCK_FLAGS;
4460 	return (true);
4461 }
4462 
4463 static void
sigfastblock_resched(struct thread * td,bool resched)4464 sigfastblock_resched(struct thread *td, bool resched)
4465 {
4466 	struct proc *p;
4467 
4468 	if (resched) {
4469 		p = td->td_proc;
4470 		PROC_LOCK(p);
4471 		reschedule_signals(p, td->td_sigmask, 0);
4472 		PROC_UNLOCK(p);
4473 	}
4474 	ast_sched(td, TDA_SIG);
4475 }
4476 
4477 int
sys_sigfastblock(struct thread * td,struct sigfastblock_args * uap)4478 sys_sigfastblock(struct thread *td, struct sigfastblock_args *uap)
4479 {
4480 	struct proc *p;
4481 	int error, res;
4482 	uint32_t oldval;
4483 
4484 	error = 0;
4485 	p = td->td_proc;
4486 	switch (uap->cmd) {
4487 	case SIGFASTBLOCK_SETPTR:
4488 		if ((td->td_pflags & TDP_SIGFASTBLOCK) != 0) {
4489 			error = EBUSY;
4490 			break;
4491 		}
4492 		if (((uintptr_t)(uap->ptr) & (sizeof(uint32_t) - 1)) != 0) {
4493 			error = EINVAL;
4494 			break;
4495 		}
4496 		td->td_pflags |= TDP_SIGFASTBLOCK;
4497 		td->td_sigblock_ptr = uap->ptr;
4498 		break;
4499 
4500 	case SIGFASTBLOCK_UNBLOCK:
4501 		if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) {
4502 			error = EINVAL;
4503 			break;
4504 		}
4505 
4506 		for (;;) {
4507 			res = casueword32(td->td_sigblock_ptr,
4508 			    SIGFASTBLOCK_PEND, &oldval, 0);
4509 			if (res == -1) {
4510 				error = EFAULT;
4511 				sigfastblock_failed(td, false, true);
4512 				break;
4513 			}
4514 			if (res == 0)
4515 				break;
4516 			MPASS(res == 1);
4517 			if (oldval != SIGFASTBLOCK_PEND) {
4518 				error = EBUSY;
4519 				break;
4520 			}
4521 			error = thread_check_susp(td, false);
4522 			if (error != 0)
4523 				break;
4524 		}
4525 		if (error != 0)
4526 			break;
4527 
4528 		/*
4529 		 * td_sigblock_val is cleared there, but not on a
4530 		 * syscall exit.  The end effect is that a single
4531 		 * interruptible sleep, while user sigblock word is
4532 		 * set, might return EINTR or ERESTART to usermode
4533 		 * without delivering signal.  All further sleeps,
4534 		 * until userspace clears the word and does
4535 		 * sigfastblock(UNBLOCK), observe current word and no
4536 		 * longer get interrupted.  It is slight
4537 		 * non-conformance, with alternative to have read the
4538 		 * sigblock word on each syscall entry.
4539 		 */
4540 		td->td_sigblock_val = 0;
4541 
4542 		/*
4543 		 * Rely on normal ast mechanism to deliver pending
4544 		 * signals to current thread.  But notify others about
4545 		 * fake unblock.
4546 		 */
4547 		sigfastblock_resched(td, error == 0 && p->p_numthreads != 1);
4548 
4549 		break;
4550 
4551 	case SIGFASTBLOCK_UNSETPTR:
4552 		if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) {
4553 			error = EINVAL;
4554 			break;
4555 		}
4556 		if (!sigfastblock_fetch_sig(td, false, &oldval)) {
4557 			error = EFAULT;
4558 			break;
4559 		}
4560 		if (oldval != 0 && oldval != SIGFASTBLOCK_PEND) {
4561 			error = EBUSY;
4562 			break;
4563 		}
4564 		sigfastblock_clear(td);
4565 		break;
4566 
4567 	default:
4568 		error = EINVAL;
4569 		break;
4570 	}
4571 	return (error);
4572 }
4573 
4574 void
sigfastblock_clear(struct thread * td)4575 sigfastblock_clear(struct thread *td)
4576 {
4577 	bool resched;
4578 
4579 	if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0)
4580 		return;
4581 	td->td_sigblock_val = 0;
4582 	resched = (td->td_pflags & TDP_SIGFASTPENDING) != 0 ||
4583 	    SIGPENDING(td);
4584 	td->td_pflags &= ~(TDP_SIGFASTBLOCK | TDP_SIGFASTPENDING);
4585 	sigfastblock_resched(td, resched);
4586 }
4587 
4588 void
sigfastblock_fetch(struct thread * td)4589 sigfastblock_fetch(struct thread *td)
4590 {
4591 	uint32_t val;
4592 
4593 	(void)sigfastblock_fetch_sig(td, true, &val);
4594 }
4595 
4596 static void
sigfastblock_setpend1(struct thread * td)4597 sigfastblock_setpend1(struct thread *td)
4598 {
4599 	int res;
4600 	uint32_t oldval;
4601 
4602 	if ((td->td_pflags & TDP_SIGFASTPENDING) == 0)
4603 		return;
4604 	res = fueword32((void *)td->td_sigblock_ptr, &oldval);
4605 	if (res == -1) {
4606 		sigfastblock_failed(td, true, false);
4607 		return;
4608 	}
4609 	for (;;) {
4610 		res = casueword32(td->td_sigblock_ptr, oldval, &oldval,
4611 		    oldval | SIGFASTBLOCK_PEND);
4612 		if (res == -1) {
4613 			sigfastblock_failed(td, true, true);
4614 			return;
4615 		}
4616 		if (res == 0) {
4617 			td->td_sigblock_val = oldval & ~SIGFASTBLOCK_FLAGS;
4618 			td->td_pflags &= ~TDP_SIGFASTPENDING;
4619 			break;
4620 		}
4621 		MPASS(res == 1);
4622 		if (thread_check_susp(td, false) != 0)
4623 			break;
4624 	}
4625 }
4626 
4627 static void
sigfastblock_setpend(struct thread * td,bool resched)4628 sigfastblock_setpend(struct thread *td, bool resched)
4629 {
4630 	struct proc *p;
4631 
4632 	sigfastblock_setpend1(td);
4633 	if (resched) {
4634 		p = td->td_proc;
4635 		PROC_LOCK(p);
4636 		reschedule_signals(p, fastblock_mask, SIGPROCMASK_FASTBLK);
4637 		PROC_UNLOCK(p);
4638 	}
4639 }
4640