1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1989, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 37 #include "opt_capsicum.h" 38 #include "opt_ktrace.h" 39 40 #include <sys/param.h> 41 #include <sys/capsicum.h> 42 #include <sys/ctype.h> 43 #include <sys/systm.h> 44 #include <sys/signalvar.h> 45 #include <sys/vnode.h> 46 #include <sys/acct.h> 47 #include <sys/capsicum.h> 48 #include <sys/condvar.h> 49 #include <sys/devctl.h> 50 #include <sys/event.h> 51 #include <sys/exec.h> 52 #include <sys/fcntl.h> 53 #include <sys/imgact.h> 54 #include <sys/jail.h> 55 #include <sys/kernel.h> 56 #include <sys/ktr.h> 57 #include <sys/ktrace.h> 58 #include <sys/limits.h> 59 #include <sys/lock.h> 60 #include <sys/malloc.h> 61 #include <sys/mutex.h> 62 #include <sys/refcount.h> 63 #include <sys/namei.h> 64 #include <sys/proc.h> 65 #include <sys/procdesc.h> 66 #include <sys/ptrace.h> 67 #include <sys/posix4.h> 68 #include <sys/racct.h> 69 #include <sys/resourcevar.h> 70 #include <sys/sdt.h> 71 #include <sys/sbuf.h> 72 #include <sys/sleepqueue.h> 73 #include <sys/smp.h> 74 #include <sys/stat.h> 75 #include <sys/sx.h> 76 #include <sys/syscall.h> 77 #include <sys/syscallsubr.h> 78 #include <sys/sysctl.h> 79 #include <sys/sysent.h> 80 #include <sys/syslog.h> 81 #include <sys/sysproto.h> 82 #include <sys/timers.h> 83 #include <sys/ucoredump.h> 84 #include <sys/unistd.h> 85 #include <sys/vmmeter.h> 86 #include <sys/wait.h> 87 #include <vm/vm.h> 88 #include <vm/vm_extern.h> 89 #include <vm/uma.h> 90 91 #include <machine/cpu.h> 92 93 #include <security/audit/audit.h> 94 95 #define ONSIG 32 /* NSIG for osig* syscalls. XXX. */ 96 97 SDT_PROVIDER_DECLARE(proc); 98 SDT_PROBE_DEFINE3(proc, , , signal__send, 99 "struct thread *", "struct proc *", "int"); 100 SDT_PROBE_DEFINE2(proc, , , signal__clear, 101 "int", "ksiginfo_t *"); 102 SDT_PROBE_DEFINE3(proc, , , signal__discard, 103 "struct thread *", "struct proc *", "int"); 104 105 static int killpg1(struct thread *td, int sig, int pgid, int all, 106 ksiginfo_t *ksi); 107 static int issignal(struct thread *td); 108 static void reschedule_signals(struct proc *p, sigset_t block, int flags); 109 static int sigprop(int sig); 110 static void tdsigwakeup(struct thread *, int, sig_t, int); 111 static bool sig_suspend_threads(struct thread *, struct proc *); 112 static int filt_sigattach(struct knote *kn); 113 static void filt_sigdetach(struct knote *kn); 114 static int filt_signal(struct knote *kn, long hint); 115 static struct thread *sigtd(struct proc *p, int sig, bool fast_sigblock); 116 static void sigqueue_start(void); 117 static void sigfastblock_setpend(struct thread *td, bool resched); 118 static void sig_handle_first_stop(struct thread *td, struct proc *p, 119 int sig); 120 121 static uma_zone_t ksiginfo_zone = NULL; 122 const struct filterops sig_filtops = { 123 .f_isfd = 0, 124 .f_attach = filt_sigattach, 125 .f_detach = filt_sigdetach, 126 .f_event = filt_signal, 127 }; 128 129 static int kern_forcesigexit = 1; 130 SYSCTL_INT(_kern, OID_AUTO, forcesigexit, CTLFLAG_RW, 131 &kern_forcesigexit, 0, "Force trap signal to be handled"); 132 133 static SYSCTL_NODE(_kern, OID_AUTO, sigqueue, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 134 "POSIX real time signal"); 135 136 static int max_pending_per_proc = 128; 137 SYSCTL_INT(_kern_sigqueue, OID_AUTO, max_pending_per_proc, CTLFLAG_RW, 138 &max_pending_per_proc, 0, "Max pending signals per proc"); 139 140 static int preallocate_siginfo = 1024; 141 SYSCTL_INT(_kern_sigqueue, OID_AUTO, preallocate, CTLFLAG_RDTUN, 142 &preallocate_siginfo, 0, "Preallocated signal memory size"); 143 144 static int signal_overflow = 0; 145 SYSCTL_INT(_kern_sigqueue, OID_AUTO, overflow, CTLFLAG_RD, 146 &signal_overflow, 0, "Number of signals overflew"); 147 148 static int signal_alloc_fail = 0; 149 SYSCTL_INT(_kern_sigqueue, OID_AUTO, alloc_fail, CTLFLAG_RD, 150 &signal_alloc_fail, 0, "signals failed to be allocated"); 151 152 static int kern_lognosys = 0; 153 SYSCTL_INT(_kern, OID_AUTO, lognosys, CTLFLAG_RWTUN, &kern_lognosys, 0, 154 "Log invalid syscalls"); 155 156 static int kern_signosys = 1; 157 SYSCTL_INT(_kern, OID_AUTO, signosys, CTLFLAG_RWTUN, &kern_signosys, 0, 158 "Send SIGSYS on return from invalid syscall"); 159 160 __read_frequently bool sigfastblock_fetch_always = false; 161 SYSCTL_BOOL(_kern, OID_AUTO, sigfastblock_fetch_always, CTLFLAG_RWTUN, 162 &sigfastblock_fetch_always, 0, 163 "Fetch sigfastblock word on each syscall entry for proper " 164 "blocking semantic"); 165 166 static bool kern_sig_discard_ign = true; 167 SYSCTL_BOOL(_kern, OID_AUTO, sig_discard_ign, CTLFLAG_RWTUN, 168 &kern_sig_discard_ign, 0, 169 "Discard ignored signals on delivery, otherwise queue them to " 170 "the target queue"); 171 172 bool pt_attach_transparent = true; 173 SYSCTL_BOOL(_debug, OID_AUTO, ptrace_attach_transparent, CTLFLAG_RWTUN, 174 &pt_attach_transparent, 0, 175 "Hide wakes from PT_ATTACH on interruptible sleeps"); 176 177 SYSINIT(signal, SI_SUB_P1003_1B, SI_ORDER_FIRST+3, sigqueue_start, NULL); 178 179 /* 180 * Policy -- Can ucred cr1 send SIGIO to process cr2? 181 * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG 182 * in the right situations. 183 */ 184 #define CANSIGIO(cr1, cr2) \ 185 ((cr1)->cr_uid == 0 || \ 186 (cr1)->cr_ruid == (cr2)->cr_ruid || \ 187 (cr1)->cr_uid == (cr2)->cr_ruid || \ 188 (cr1)->cr_ruid == (cr2)->cr_uid || \ 189 (cr1)->cr_uid == (cr2)->cr_uid) 190 191 /* 192 * Signal properties and actions. 193 * The array below categorizes the signals and their default actions 194 * according to the following properties: 195 */ 196 #define SIGPROP_KILL 0x01 /* terminates process by default */ 197 #define SIGPROP_CORE 0x02 /* ditto and coredumps */ 198 #define SIGPROP_STOP 0x04 /* suspend process */ 199 #define SIGPROP_TTYSTOP 0x08 /* ditto, from tty */ 200 #define SIGPROP_IGNORE 0x10 /* ignore by default */ 201 #define SIGPROP_CONT 0x20 /* continue if suspended */ 202 203 static const int sigproptbl[NSIG] = { 204 [SIGHUP] = SIGPROP_KILL, 205 [SIGINT] = SIGPROP_KILL, 206 [SIGQUIT] = SIGPROP_KILL | SIGPROP_CORE, 207 [SIGILL] = SIGPROP_KILL | SIGPROP_CORE, 208 [SIGTRAP] = SIGPROP_KILL | SIGPROP_CORE, 209 [SIGABRT] = SIGPROP_KILL | SIGPROP_CORE, 210 [SIGEMT] = SIGPROP_KILL | SIGPROP_CORE, 211 [SIGFPE] = SIGPROP_KILL | SIGPROP_CORE, 212 [SIGKILL] = SIGPROP_KILL, 213 [SIGBUS] = SIGPROP_KILL | SIGPROP_CORE, 214 [SIGSEGV] = SIGPROP_KILL | SIGPROP_CORE, 215 [SIGSYS] = SIGPROP_KILL | SIGPROP_CORE, 216 [SIGPIPE] = SIGPROP_KILL, 217 [SIGALRM] = SIGPROP_KILL, 218 [SIGTERM] = SIGPROP_KILL, 219 [SIGURG] = SIGPROP_IGNORE, 220 [SIGSTOP] = SIGPROP_STOP, 221 [SIGTSTP] = SIGPROP_STOP | SIGPROP_TTYSTOP, 222 [SIGCONT] = SIGPROP_IGNORE | SIGPROP_CONT, 223 [SIGCHLD] = SIGPROP_IGNORE, 224 [SIGTTIN] = SIGPROP_STOP | SIGPROP_TTYSTOP, 225 [SIGTTOU] = SIGPROP_STOP | SIGPROP_TTYSTOP, 226 [SIGIO] = SIGPROP_IGNORE, 227 [SIGXCPU] = SIGPROP_KILL, 228 [SIGXFSZ] = SIGPROP_KILL, 229 [SIGVTALRM] = SIGPROP_KILL, 230 [SIGPROF] = SIGPROP_KILL, 231 [SIGWINCH] = SIGPROP_IGNORE, 232 [SIGINFO] = SIGPROP_IGNORE, 233 [SIGUSR1] = SIGPROP_KILL, 234 [SIGUSR2] = SIGPROP_KILL, 235 }; 236 237 #define _SIG_FOREACH_ADVANCE(i, set) ({ \ 238 int __found; \ 239 for (;;) { \ 240 if (__bits != 0) { \ 241 int __sig = ffs(__bits); \ 242 __bits &= ~(1u << (__sig - 1)); \ 243 sig = __i * sizeof((set)->__bits[0]) * NBBY + __sig; \ 244 __found = 1; \ 245 break; \ 246 } \ 247 if (++__i == _SIG_WORDS) { \ 248 __found = 0; \ 249 break; \ 250 } \ 251 __bits = (set)->__bits[__i]; \ 252 } \ 253 __found != 0; \ 254 }) 255 256 #define SIG_FOREACH(i, set) \ 257 for (int32_t __i = -1, __bits = 0; \ 258 _SIG_FOREACH_ADVANCE(i, set); ) \ 259 260 static sigset_t fastblock_mask; 261 262 static void 263 ast_sig(struct thread *td, int tda) 264 { 265 struct proc *p; 266 int old_boundary, sig; 267 bool resched_sigs; 268 269 p = td->td_proc; 270 271 #ifdef DIAGNOSTIC 272 if (p->p_numthreads == 1 && (tda & (TDAI(TDA_SIG) | 273 TDAI(TDA_AST))) == 0) { 274 PROC_LOCK(p); 275 thread_lock(td); 276 /* 277 * Note that TDA_SIG should be re-read from 278 * td_ast, since signal might have been delivered 279 * after we cleared td_flags above. This is one of 280 * the reason for looping check for AST condition. 281 * See comment in userret() about P_PPWAIT. 282 */ 283 if ((p->p_flag & P_PPWAIT) == 0 && 284 (td->td_pflags & TDP_SIGFASTBLOCK) == 0) { 285 if (SIGPENDING(td) && ((tda | td->td_ast) & 286 (TDAI(TDA_SIG) | TDAI(TDA_AST))) == 0) { 287 thread_unlock(td); /* fix dumps */ 288 panic( 289 "failed2 to set signal flags for ast p %p " 290 "td %p tda %#x td_ast %#x fl %#x", 291 p, td, tda, td->td_ast, td->td_flags); 292 } 293 } 294 thread_unlock(td); 295 PROC_UNLOCK(p); 296 } 297 #endif 298 299 /* 300 * Check for signals. Unlocked reads of p_pendingcnt or 301 * p_siglist might cause process-directed signal to be handled 302 * later. 303 */ 304 if ((tda & TDAI(TDA_SIG)) != 0 || p->p_pendingcnt > 0 || 305 !SIGISEMPTY(p->p_siglist)) { 306 sigfastblock_fetch(td); 307 PROC_LOCK(p); 308 old_boundary = ~TDB_BOUNDARY | (td->td_dbgflags & TDB_BOUNDARY); 309 td->td_dbgflags |= TDB_BOUNDARY; 310 mtx_lock(&p->p_sigacts->ps_mtx); 311 while ((sig = cursig(td)) != 0) { 312 KASSERT(sig >= 0, ("sig %d", sig)); 313 postsig(sig); 314 } 315 mtx_unlock(&p->p_sigacts->ps_mtx); 316 td->td_dbgflags &= old_boundary; 317 PROC_UNLOCK(p); 318 resched_sigs = true; 319 } else { 320 resched_sigs = false; 321 } 322 323 /* 324 * Handle deferred update of the fast sigblock value, after 325 * the postsig() loop was performed. 326 */ 327 sigfastblock_setpend(td, resched_sigs); 328 329 /* 330 * Clear td_sa.code: signal to ptrace that syscall arguments 331 * are unavailable after this point. This AST handler is the 332 * last chance for ptracestop() to signal the tracer before 333 * the tracee returns to userspace. 334 */ 335 td->td_sa.code = 0; 336 } 337 338 static void 339 ast_sigsuspend(struct thread *td, int tda __unused) 340 { 341 MPASS((td->td_pflags & TDP_OLDMASK) != 0); 342 td->td_pflags &= ~TDP_OLDMASK; 343 kern_sigprocmask(td, SIG_SETMASK, &td->td_oldsigmask, NULL, 0); 344 } 345 346 static void 347 sigqueue_start(void) 348 { 349 ksiginfo_zone = uma_zcreate("ksiginfo", sizeof(ksiginfo_t), 350 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 351 uma_prealloc(ksiginfo_zone, preallocate_siginfo); 352 p31b_setcfg(CTL_P1003_1B_REALTIME_SIGNALS, _POSIX_REALTIME_SIGNALS); 353 p31b_setcfg(CTL_P1003_1B_RTSIG_MAX, SIGRTMAX - SIGRTMIN + 1); 354 p31b_setcfg(CTL_P1003_1B_SIGQUEUE_MAX, max_pending_per_proc); 355 SIGFILLSET(fastblock_mask); 356 SIG_CANTMASK(fastblock_mask); 357 ast_register(TDA_SIG, ASTR_UNCOND, 0, ast_sig); 358 359 /* 360 * TDA_PSELECT is for the case where the signal mask should be restored 361 * before delivering any signals so that we do not deliver any that are 362 * blocked by the normal thread mask. It is mutually exclusive with 363 * TDA_SIGSUSPEND, which should be used if we *do* want to deliver 364 * signals that are normally blocked, e.g., if it interrupted our sleep. 365 */ 366 ast_register(TDA_PSELECT, ASTR_ASTF_REQUIRED | ASTR_TDP, 367 TDP_OLDMASK, ast_sigsuspend); 368 ast_register(TDA_SIGSUSPEND, ASTR_ASTF_REQUIRED | ASTR_TDP, 369 TDP_OLDMASK, ast_sigsuspend); 370 } 371 372 ksiginfo_t * 373 ksiginfo_alloc(int mwait) 374 { 375 MPASS(mwait == M_WAITOK || mwait == M_NOWAIT); 376 377 if (ksiginfo_zone == NULL) 378 return (NULL); 379 return (uma_zalloc(ksiginfo_zone, mwait | M_ZERO)); 380 } 381 382 void 383 ksiginfo_free(ksiginfo_t *ksi) 384 { 385 uma_zfree(ksiginfo_zone, ksi); 386 } 387 388 static __inline bool 389 ksiginfo_tryfree(ksiginfo_t *ksi) 390 { 391 if ((ksi->ksi_flags & KSI_EXT) == 0) { 392 uma_zfree(ksiginfo_zone, ksi); 393 return (true); 394 } 395 return (false); 396 } 397 398 void 399 sigqueue_init(sigqueue_t *list, struct proc *p) 400 { 401 SIGEMPTYSET(list->sq_signals); 402 SIGEMPTYSET(list->sq_kill); 403 SIGEMPTYSET(list->sq_ptrace); 404 TAILQ_INIT(&list->sq_list); 405 list->sq_proc = p; 406 list->sq_flags = SQ_INIT; 407 } 408 409 /* 410 * Get a signal's ksiginfo. 411 * Return: 412 * 0 - signal not found 413 * others - signal number 414 */ 415 static int 416 sigqueue_get(sigqueue_t *sq, int signo, ksiginfo_t *si) 417 { 418 struct proc *p = sq->sq_proc; 419 struct ksiginfo *ksi, *next; 420 int count = 0; 421 422 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited")); 423 424 if (!SIGISMEMBER(sq->sq_signals, signo)) 425 return (0); 426 427 if (SIGISMEMBER(sq->sq_ptrace, signo)) { 428 count++; 429 SIGDELSET(sq->sq_ptrace, signo); 430 si->ksi_flags |= KSI_PTRACE; 431 } 432 if (SIGISMEMBER(sq->sq_kill, signo)) { 433 count++; 434 if (count == 1) 435 SIGDELSET(sq->sq_kill, signo); 436 } 437 438 TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) { 439 if (ksi->ksi_signo == signo) { 440 if (count == 0) { 441 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link); 442 ksi->ksi_sigq = NULL; 443 ksiginfo_copy(ksi, si); 444 if (ksiginfo_tryfree(ksi) && p != NULL) 445 p->p_pendingcnt--; 446 } 447 if (++count > 1) 448 break; 449 } 450 } 451 452 if (count <= 1) 453 SIGDELSET(sq->sq_signals, signo); 454 si->ksi_signo = signo; 455 return (signo); 456 } 457 458 void 459 sigqueue_take(ksiginfo_t *ksi) 460 { 461 struct ksiginfo *kp; 462 struct proc *p; 463 sigqueue_t *sq; 464 465 if (ksi == NULL || (sq = ksi->ksi_sigq) == NULL) 466 return; 467 468 p = sq->sq_proc; 469 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link); 470 ksi->ksi_sigq = NULL; 471 if (!(ksi->ksi_flags & KSI_EXT) && p != NULL) 472 p->p_pendingcnt--; 473 474 for (kp = TAILQ_FIRST(&sq->sq_list); kp != NULL; 475 kp = TAILQ_NEXT(kp, ksi_link)) { 476 if (kp->ksi_signo == ksi->ksi_signo) 477 break; 478 } 479 if (kp == NULL && !SIGISMEMBER(sq->sq_kill, ksi->ksi_signo) && 480 !SIGISMEMBER(sq->sq_ptrace, ksi->ksi_signo)) 481 SIGDELSET(sq->sq_signals, ksi->ksi_signo); 482 } 483 484 static int 485 sigqueue_add(sigqueue_t *sq, int signo, ksiginfo_t *si) 486 { 487 struct proc *p = sq->sq_proc; 488 struct ksiginfo *ksi; 489 int ret = 0; 490 491 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited")); 492 493 /* 494 * SIGKILL/SIGSTOP cannot be caught or masked, so take the fast path 495 * for these signals. 496 */ 497 if (signo == SIGKILL || signo == SIGSTOP || si == NULL) { 498 SIGADDSET(sq->sq_kill, signo); 499 goto out_set_bit; 500 } 501 502 /* directly insert the ksi, don't copy it */ 503 if (si->ksi_flags & KSI_INS) { 504 if (si->ksi_flags & KSI_HEAD) 505 TAILQ_INSERT_HEAD(&sq->sq_list, si, ksi_link); 506 else 507 TAILQ_INSERT_TAIL(&sq->sq_list, si, ksi_link); 508 si->ksi_sigq = sq; 509 goto out_set_bit; 510 } 511 512 if (__predict_false(ksiginfo_zone == NULL)) { 513 SIGADDSET(sq->sq_kill, signo); 514 goto out_set_bit; 515 } 516 517 if (p != NULL && p->p_pendingcnt >= max_pending_per_proc) { 518 signal_overflow++; 519 ret = EAGAIN; 520 } else if ((ksi = ksiginfo_alloc(M_NOWAIT)) == NULL) { 521 signal_alloc_fail++; 522 ret = EAGAIN; 523 } else { 524 if (p != NULL) 525 p->p_pendingcnt++; 526 ksiginfo_copy(si, ksi); 527 ksi->ksi_signo = signo; 528 if (si->ksi_flags & KSI_HEAD) 529 TAILQ_INSERT_HEAD(&sq->sq_list, ksi, ksi_link); 530 else 531 TAILQ_INSERT_TAIL(&sq->sq_list, ksi, ksi_link); 532 ksi->ksi_sigq = sq; 533 } 534 535 if (ret != 0) { 536 if ((si->ksi_flags & KSI_PTRACE) != 0) { 537 SIGADDSET(sq->sq_ptrace, signo); 538 ret = 0; 539 goto out_set_bit; 540 } else if ((si->ksi_flags & KSI_TRAP) != 0 || 541 (si->ksi_flags & KSI_SIGQ) == 0) { 542 SIGADDSET(sq->sq_kill, signo); 543 ret = 0; 544 goto out_set_bit; 545 } 546 return (ret); 547 } 548 549 out_set_bit: 550 SIGADDSET(sq->sq_signals, signo); 551 return (ret); 552 } 553 554 void 555 sigqueue_flush(sigqueue_t *sq) 556 { 557 struct proc *p = sq->sq_proc; 558 ksiginfo_t *ksi; 559 560 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited")); 561 562 if (p != NULL) 563 PROC_LOCK_ASSERT(p, MA_OWNED); 564 565 while ((ksi = TAILQ_FIRST(&sq->sq_list)) != NULL) { 566 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link); 567 ksi->ksi_sigq = NULL; 568 if (ksiginfo_tryfree(ksi) && p != NULL) 569 p->p_pendingcnt--; 570 } 571 572 SIGEMPTYSET(sq->sq_signals); 573 SIGEMPTYSET(sq->sq_kill); 574 SIGEMPTYSET(sq->sq_ptrace); 575 } 576 577 static void 578 sigqueue_move_set(sigqueue_t *src, sigqueue_t *dst, const sigset_t *set) 579 { 580 sigset_t tmp; 581 struct proc *p1, *p2; 582 ksiginfo_t *ksi, *next; 583 584 KASSERT(src->sq_flags & SQ_INIT, ("src sigqueue not inited")); 585 KASSERT(dst->sq_flags & SQ_INIT, ("dst sigqueue not inited")); 586 p1 = src->sq_proc; 587 p2 = dst->sq_proc; 588 /* Move siginfo to target list */ 589 TAILQ_FOREACH_SAFE(ksi, &src->sq_list, ksi_link, next) { 590 if (SIGISMEMBER(*set, ksi->ksi_signo)) { 591 TAILQ_REMOVE(&src->sq_list, ksi, ksi_link); 592 if (p1 != NULL) 593 p1->p_pendingcnt--; 594 TAILQ_INSERT_TAIL(&dst->sq_list, ksi, ksi_link); 595 ksi->ksi_sigq = dst; 596 if (p2 != NULL) 597 p2->p_pendingcnt++; 598 } 599 } 600 601 /* Move pending bits to target list */ 602 tmp = src->sq_kill; 603 SIGSETAND(tmp, *set); 604 SIGSETOR(dst->sq_kill, tmp); 605 SIGSETNAND(src->sq_kill, tmp); 606 607 tmp = src->sq_ptrace; 608 SIGSETAND(tmp, *set); 609 SIGSETOR(dst->sq_ptrace, tmp); 610 SIGSETNAND(src->sq_ptrace, tmp); 611 612 tmp = src->sq_signals; 613 SIGSETAND(tmp, *set); 614 SIGSETOR(dst->sq_signals, tmp); 615 SIGSETNAND(src->sq_signals, tmp); 616 } 617 618 #if 0 619 static void 620 sigqueue_move(sigqueue_t *src, sigqueue_t *dst, int signo) 621 { 622 sigset_t set; 623 624 SIGEMPTYSET(set); 625 SIGADDSET(set, signo); 626 sigqueue_move_set(src, dst, &set); 627 } 628 #endif 629 630 static void 631 sigqueue_delete_set(sigqueue_t *sq, const sigset_t *set) 632 { 633 struct proc *p = sq->sq_proc; 634 ksiginfo_t *ksi, *next; 635 636 KASSERT(sq->sq_flags & SQ_INIT, ("src sigqueue not inited")); 637 638 /* Remove siginfo queue */ 639 TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) { 640 if (SIGISMEMBER(*set, ksi->ksi_signo)) { 641 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link); 642 ksi->ksi_sigq = NULL; 643 if (ksiginfo_tryfree(ksi) && p != NULL) 644 p->p_pendingcnt--; 645 } 646 } 647 SIGSETNAND(sq->sq_kill, *set); 648 SIGSETNAND(sq->sq_ptrace, *set); 649 SIGSETNAND(sq->sq_signals, *set); 650 } 651 652 void 653 sigqueue_delete(sigqueue_t *sq, int signo) 654 { 655 sigset_t set; 656 657 SIGEMPTYSET(set); 658 SIGADDSET(set, signo); 659 sigqueue_delete_set(sq, &set); 660 } 661 662 /* Remove a set of signals for a process */ 663 static void 664 sigqueue_delete_set_proc(struct proc *p, const sigset_t *set) 665 { 666 sigqueue_t worklist; 667 struct thread *td0; 668 669 PROC_LOCK_ASSERT(p, MA_OWNED); 670 671 sigqueue_init(&worklist, NULL); 672 sigqueue_move_set(&p->p_sigqueue, &worklist, set); 673 674 FOREACH_THREAD_IN_PROC(p, td0) 675 sigqueue_move_set(&td0->td_sigqueue, &worklist, set); 676 677 sigqueue_flush(&worklist); 678 } 679 680 void 681 sigqueue_delete_proc(struct proc *p, int signo) 682 { 683 sigset_t set; 684 685 SIGEMPTYSET(set); 686 SIGADDSET(set, signo); 687 sigqueue_delete_set_proc(p, &set); 688 } 689 690 static void 691 sigqueue_delete_stopmask_proc(struct proc *p) 692 { 693 sigset_t set; 694 695 SIGEMPTYSET(set); 696 SIGADDSET(set, SIGSTOP); 697 SIGADDSET(set, SIGTSTP); 698 SIGADDSET(set, SIGTTIN); 699 SIGADDSET(set, SIGTTOU); 700 sigqueue_delete_set_proc(p, &set); 701 } 702 703 /* 704 * Determine signal that should be delivered to thread td, the current 705 * thread, 0 if none. If there is a pending stop signal with default 706 * action, the process stops in issignal(). 707 */ 708 int 709 cursig(struct thread *td) 710 { 711 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 712 mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED); 713 THREAD_LOCK_ASSERT(td, MA_NOTOWNED); 714 return (SIGPENDING(td) ? issignal(td) : 0); 715 } 716 717 /* 718 * Arrange for ast() to handle unmasked pending signals on return to user 719 * mode. This must be called whenever a signal is added to td_sigqueue or 720 * unmasked in td_sigmask. 721 */ 722 void 723 signotify(struct thread *td) 724 { 725 726 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 727 728 if (SIGPENDING(td)) 729 ast_sched(td, TDA_SIG); 730 } 731 732 /* 733 * Returns 1 (true) if altstack is configured for the thread, and the 734 * passed stack bottom address falls into the altstack range. Handles 735 * the 43 compat special case where the alt stack size is zero. 736 */ 737 int 738 sigonstack(size_t sp) 739 { 740 struct thread *td; 741 742 td = curthread; 743 if ((td->td_pflags & TDP_ALTSTACK) == 0) 744 return (0); 745 #if defined(COMPAT_43) 746 if (SV_PROC_FLAG(td->td_proc, SV_AOUT) && td->td_sigstk.ss_size == 0) 747 return ((td->td_sigstk.ss_flags & SS_ONSTACK) != 0); 748 #endif 749 return (sp >= (size_t)td->td_sigstk.ss_sp && 750 sp < td->td_sigstk.ss_size + (size_t)td->td_sigstk.ss_sp); 751 } 752 753 static __inline int 754 sigprop(int sig) 755 { 756 757 if (sig > 0 && sig < nitems(sigproptbl)) 758 return (sigproptbl[sig]); 759 return (0); 760 } 761 762 bool 763 sig_do_core(int sig) 764 { 765 766 return ((sigprop(sig) & SIGPROP_CORE) != 0); 767 } 768 769 static bool 770 sigact_flag_test(const struct sigaction *act, int flag) 771 { 772 773 /* 774 * SA_SIGINFO is reset when signal disposition is set to 775 * ignore or default. Other flags are kept according to user 776 * settings. 777 */ 778 return ((act->sa_flags & flag) != 0 && (flag != SA_SIGINFO || 779 ((__sighandler_t *)act->sa_sigaction != SIG_IGN && 780 (__sighandler_t *)act->sa_sigaction != SIG_DFL))); 781 } 782 783 /* 784 * kern_sigaction 785 * sigaction 786 * freebsd4_sigaction 787 * osigaction 788 */ 789 int 790 kern_sigaction(struct thread *td, int sig, const struct sigaction *act, 791 struct sigaction *oact, int flags) 792 { 793 struct sigacts *ps; 794 struct proc *p = td->td_proc; 795 796 if (!_SIG_VALID(sig)) 797 return (EINVAL); 798 if (act != NULL && act->sa_handler != SIG_DFL && 799 act->sa_handler != SIG_IGN && (act->sa_flags & ~(SA_ONSTACK | 800 SA_RESTART | SA_RESETHAND | SA_NOCLDSTOP | SA_NODEFER | 801 SA_NOCLDWAIT | SA_SIGINFO)) != 0) 802 return (EINVAL); 803 804 PROC_LOCK(p); 805 ps = p->p_sigacts; 806 mtx_lock(&ps->ps_mtx); 807 if (oact) { 808 memset(oact, 0, sizeof(*oact)); 809 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)]; 810 if (SIGISMEMBER(ps->ps_sigonstack, sig)) 811 oact->sa_flags |= SA_ONSTACK; 812 if (!SIGISMEMBER(ps->ps_sigintr, sig)) 813 oact->sa_flags |= SA_RESTART; 814 if (SIGISMEMBER(ps->ps_sigreset, sig)) 815 oact->sa_flags |= SA_RESETHAND; 816 if (SIGISMEMBER(ps->ps_signodefer, sig)) 817 oact->sa_flags |= SA_NODEFER; 818 if (SIGISMEMBER(ps->ps_siginfo, sig)) { 819 oact->sa_flags |= SA_SIGINFO; 820 oact->sa_sigaction = 821 (__siginfohandler_t *)ps->ps_sigact[_SIG_IDX(sig)]; 822 } else 823 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)]; 824 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP) 825 oact->sa_flags |= SA_NOCLDSTOP; 826 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT) 827 oact->sa_flags |= SA_NOCLDWAIT; 828 } 829 if (act) { 830 if ((sig == SIGKILL || sig == SIGSTOP) && 831 act->sa_handler != SIG_DFL) { 832 mtx_unlock(&ps->ps_mtx); 833 PROC_UNLOCK(p); 834 return (EINVAL); 835 } 836 837 /* 838 * Change setting atomically. 839 */ 840 841 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask; 842 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]); 843 if (sigact_flag_test(act, SA_SIGINFO)) { 844 ps->ps_sigact[_SIG_IDX(sig)] = 845 (__sighandler_t *)act->sa_sigaction; 846 SIGADDSET(ps->ps_siginfo, sig); 847 } else { 848 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler; 849 SIGDELSET(ps->ps_siginfo, sig); 850 } 851 if (!sigact_flag_test(act, SA_RESTART)) 852 SIGADDSET(ps->ps_sigintr, sig); 853 else 854 SIGDELSET(ps->ps_sigintr, sig); 855 if (sigact_flag_test(act, SA_ONSTACK)) 856 SIGADDSET(ps->ps_sigonstack, sig); 857 else 858 SIGDELSET(ps->ps_sigonstack, sig); 859 if (sigact_flag_test(act, SA_RESETHAND)) 860 SIGADDSET(ps->ps_sigreset, sig); 861 else 862 SIGDELSET(ps->ps_sigreset, sig); 863 if (sigact_flag_test(act, SA_NODEFER)) 864 SIGADDSET(ps->ps_signodefer, sig); 865 else 866 SIGDELSET(ps->ps_signodefer, sig); 867 if (sig == SIGCHLD) { 868 if (act->sa_flags & SA_NOCLDSTOP) 869 ps->ps_flag |= PS_NOCLDSTOP; 870 else 871 ps->ps_flag &= ~PS_NOCLDSTOP; 872 if (act->sa_flags & SA_NOCLDWAIT) { 873 /* 874 * Paranoia: since SA_NOCLDWAIT is implemented 875 * by reparenting the dying child to PID 1 (and 876 * trust it to reap the zombie), PID 1 itself 877 * is forbidden to set SA_NOCLDWAIT. 878 */ 879 if (p->p_pid == 1) 880 ps->ps_flag &= ~PS_NOCLDWAIT; 881 else 882 ps->ps_flag |= PS_NOCLDWAIT; 883 } else 884 ps->ps_flag &= ~PS_NOCLDWAIT; 885 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN) 886 ps->ps_flag |= PS_CLDSIGIGN; 887 else 888 ps->ps_flag &= ~PS_CLDSIGIGN; 889 } 890 /* 891 * Set bit in ps_sigignore for signals that are set to SIG_IGN, 892 * and for signals set to SIG_DFL where the default is to 893 * ignore. However, don't put SIGCONT in ps_sigignore, as we 894 * have to restart the process. 895 */ 896 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || 897 (sigprop(sig) & SIGPROP_IGNORE && 898 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) { 899 /* never to be seen again */ 900 sigqueue_delete_proc(p, sig); 901 if (sig != SIGCONT) 902 /* easier in psignal */ 903 SIGADDSET(ps->ps_sigignore, sig); 904 SIGDELSET(ps->ps_sigcatch, sig); 905 } else { 906 SIGDELSET(ps->ps_sigignore, sig); 907 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL) 908 SIGDELSET(ps->ps_sigcatch, sig); 909 else 910 SIGADDSET(ps->ps_sigcatch, sig); 911 } 912 #ifdef COMPAT_FREEBSD4 913 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || 914 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL || 915 (flags & KSA_FREEBSD4) == 0) 916 SIGDELSET(ps->ps_freebsd4, sig); 917 else 918 SIGADDSET(ps->ps_freebsd4, sig); 919 #endif 920 #ifdef COMPAT_43 921 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || 922 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL || 923 (flags & KSA_OSIGSET) == 0) 924 SIGDELSET(ps->ps_osigset, sig); 925 else 926 SIGADDSET(ps->ps_osigset, sig); 927 #endif 928 } 929 mtx_unlock(&ps->ps_mtx); 930 PROC_UNLOCK(p); 931 return (0); 932 } 933 934 #ifndef _SYS_SYSPROTO_H_ 935 struct sigaction_args { 936 int sig; 937 struct sigaction *act; 938 struct sigaction *oact; 939 }; 940 #endif 941 int 942 sys_sigaction(struct thread *td, struct sigaction_args *uap) 943 { 944 struct sigaction act, oact; 945 struct sigaction *actp, *oactp; 946 int error; 947 948 actp = (uap->act != NULL) ? &act : NULL; 949 oactp = (uap->oact != NULL) ? &oact : NULL; 950 if (actp) { 951 error = copyin(uap->act, actp, sizeof(act)); 952 if (error) 953 return (error); 954 } 955 error = kern_sigaction(td, uap->sig, actp, oactp, 0); 956 if (oactp && !error) 957 error = copyout(oactp, uap->oact, sizeof(oact)); 958 return (error); 959 } 960 961 #ifdef COMPAT_FREEBSD4 962 #ifndef _SYS_SYSPROTO_H_ 963 struct freebsd4_sigaction_args { 964 int sig; 965 struct sigaction *act; 966 struct sigaction *oact; 967 }; 968 #endif 969 int 970 freebsd4_sigaction(struct thread *td, struct freebsd4_sigaction_args *uap) 971 { 972 struct sigaction act, oact; 973 struct sigaction *actp, *oactp; 974 int error; 975 976 actp = (uap->act != NULL) ? &act : NULL; 977 oactp = (uap->oact != NULL) ? &oact : NULL; 978 if (actp) { 979 error = copyin(uap->act, actp, sizeof(act)); 980 if (error) 981 return (error); 982 } 983 error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4); 984 if (oactp && !error) 985 error = copyout(oactp, uap->oact, sizeof(oact)); 986 return (error); 987 } 988 #endif /* COMAPT_FREEBSD4 */ 989 990 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 991 #ifndef _SYS_SYSPROTO_H_ 992 struct osigaction_args { 993 int signum; 994 struct osigaction *nsa; 995 struct osigaction *osa; 996 }; 997 #endif 998 int 999 osigaction(struct thread *td, struct osigaction_args *uap) 1000 { 1001 struct osigaction sa; 1002 struct sigaction nsa, osa; 1003 struct sigaction *nsap, *osap; 1004 int error; 1005 1006 if (uap->signum <= 0 || uap->signum >= ONSIG) 1007 return (EINVAL); 1008 1009 nsap = (uap->nsa != NULL) ? &nsa : NULL; 1010 osap = (uap->osa != NULL) ? &osa : NULL; 1011 1012 if (nsap) { 1013 error = copyin(uap->nsa, &sa, sizeof(sa)); 1014 if (error) 1015 return (error); 1016 nsap->sa_handler = sa.sa_handler; 1017 nsap->sa_flags = sa.sa_flags; 1018 OSIG2SIG(sa.sa_mask, nsap->sa_mask); 1019 } 1020 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET); 1021 if (osap && !error) { 1022 sa.sa_handler = osap->sa_handler; 1023 sa.sa_flags = osap->sa_flags; 1024 SIG2OSIG(osap->sa_mask, sa.sa_mask); 1025 error = copyout(&sa, uap->osa, sizeof(sa)); 1026 } 1027 return (error); 1028 } 1029 1030 #if !defined(__i386__) 1031 /* Avoid replicating the same stub everywhere */ 1032 int 1033 osigreturn(struct thread *td, struct osigreturn_args *uap) 1034 { 1035 return (kern_nosys(td, 0)); 1036 } 1037 #endif 1038 #endif /* COMPAT_43 */ 1039 1040 /* 1041 * Initialize signal state for process 0; 1042 * set to ignore signals that are ignored by default. 1043 */ 1044 void 1045 siginit(struct proc *p) 1046 { 1047 int i; 1048 struct sigacts *ps; 1049 1050 PROC_LOCK(p); 1051 ps = p->p_sigacts; 1052 mtx_lock(&ps->ps_mtx); 1053 for (i = 1; i <= NSIG; i++) { 1054 if (sigprop(i) & SIGPROP_IGNORE && i != SIGCONT) { 1055 SIGADDSET(ps->ps_sigignore, i); 1056 } 1057 } 1058 mtx_unlock(&ps->ps_mtx); 1059 PROC_UNLOCK(p); 1060 } 1061 1062 /* 1063 * Reset specified signal to the default disposition. 1064 */ 1065 static void 1066 sigdflt(struct sigacts *ps, int sig) 1067 { 1068 1069 mtx_assert(&ps->ps_mtx, MA_OWNED); 1070 SIGDELSET(ps->ps_sigcatch, sig); 1071 if ((sigprop(sig) & SIGPROP_IGNORE) != 0 && sig != SIGCONT) 1072 SIGADDSET(ps->ps_sigignore, sig); 1073 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 1074 SIGDELSET(ps->ps_siginfo, sig); 1075 } 1076 1077 /* 1078 * Reset signals for an exec of the specified process. 1079 */ 1080 void 1081 execsigs(struct proc *p) 1082 { 1083 struct sigacts *ps; 1084 struct thread *td; 1085 1086 /* 1087 * Reset caught signals. Held signals remain held 1088 * through td_sigmask (unless they were caught, 1089 * and are now ignored by default). 1090 */ 1091 PROC_LOCK_ASSERT(p, MA_OWNED); 1092 ps = p->p_sigacts; 1093 mtx_lock(&ps->ps_mtx); 1094 sig_drop_caught(p); 1095 1096 /* 1097 * Reset stack state to the user stack. 1098 * Clear set of signals caught on the signal stack. 1099 */ 1100 td = curthread; 1101 MPASS(td->td_proc == p); 1102 td->td_sigstk.ss_flags = SS_DISABLE; 1103 td->td_sigstk.ss_size = 0; 1104 td->td_sigstk.ss_sp = 0; 1105 td->td_pflags &= ~TDP_ALTSTACK; 1106 /* 1107 * Reset no zombies if child dies flag as Solaris does. 1108 */ 1109 ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN); 1110 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN) 1111 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL; 1112 mtx_unlock(&ps->ps_mtx); 1113 } 1114 1115 /* 1116 * kern_sigprocmask() 1117 * 1118 * Manipulate signal mask. 1119 */ 1120 int 1121 kern_sigprocmask(struct thread *td, int how, sigset_t *set, sigset_t *oset, 1122 int flags) 1123 { 1124 sigset_t new_block, oset1; 1125 struct proc *p; 1126 int error; 1127 1128 p = td->td_proc; 1129 if ((flags & SIGPROCMASK_PROC_LOCKED) != 0) 1130 PROC_LOCK_ASSERT(p, MA_OWNED); 1131 else 1132 PROC_LOCK(p); 1133 mtx_assert(&p->p_sigacts->ps_mtx, (flags & SIGPROCMASK_PS_LOCKED) != 0 1134 ? MA_OWNED : MA_NOTOWNED); 1135 if (oset != NULL) 1136 *oset = td->td_sigmask; 1137 1138 error = 0; 1139 if (set != NULL) { 1140 switch (how) { 1141 case SIG_BLOCK: 1142 SIG_CANTMASK(*set); 1143 oset1 = td->td_sigmask; 1144 SIGSETOR(td->td_sigmask, *set); 1145 new_block = td->td_sigmask; 1146 SIGSETNAND(new_block, oset1); 1147 break; 1148 case SIG_UNBLOCK: 1149 SIGSETNAND(td->td_sigmask, *set); 1150 signotify(td); 1151 goto out; 1152 case SIG_SETMASK: 1153 SIG_CANTMASK(*set); 1154 oset1 = td->td_sigmask; 1155 if (flags & SIGPROCMASK_OLD) 1156 SIGSETLO(td->td_sigmask, *set); 1157 else 1158 td->td_sigmask = *set; 1159 new_block = td->td_sigmask; 1160 SIGSETNAND(new_block, oset1); 1161 signotify(td); 1162 break; 1163 default: 1164 error = EINVAL; 1165 goto out; 1166 } 1167 1168 /* 1169 * The new_block set contains signals that were not previously 1170 * blocked, but are blocked now. 1171 * 1172 * In case we block any signal that was not previously blocked 1173 * for td, and process has the signal pending, try to schedule 1174 * signal delivery to some thread that does not block the 1175 * signal, possibly waking it up. 1176 */ 1177 if (p->p_numthreads != 1) 1178 reschedule_signals(p, new_block, flags); 1179 } 1180 1181 out: 1182 if (!(flags & SIGPROCMASK_PROC_LOCKED)) 1183 PROC_UNLOCK(p); 1184 return (error); 1185 } 1186 1187 #ifndef _SYS_SYSPROTO_H_ 1188 struct sigprocmask_args { 1189 int how; 1190 const sigset_t *set; 1191 sigset_t *oset; 1192 }; 1193 #endif 1194 int 1195 sys_sigprocmask(struct thread *td, struct sigprocmask_args *uap) 1196 { 1197 sigset_t set, oset; 1198 sigset_t *setp, *osetp; 1199 int error; 1200 1201 setp = (uap->set != NULL) ? &set : NULL; 1202 osetp = (uap->oset != NULL) ? &oset : NULL; 1203 if (setp) { 1204 error = copyin(uap->set, setp, sizeof(set)); 1205 if (error) 1206 return (error); 1207 } 1208 error = kern_sigprocmask(td, uap->how, setp, osetp, 0); 1209 if (osetp && !error) { 1210 error = copyout(osetp, uap->oset, sizeof(oset)); 1211 } 1212 return (error); 1213 } 1214 1215 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 1216 #ifndef _SYS_SYSPROTO_H_ 1217 struct osigprocmask_args { 1218 int how; 1219 osigset_t mask; 1220 }; 1221 #endif 1222 int 1223 osigprocmask(struct thread *td, struct osigprocmask_args *uap) 1224 { 1225 sigset_t set, oset; 1226 int error; 1227 1228 OSIG2SIG(uap->mask, set); 1229 error = kern_sigprocmask(td, uap->how, &set, &oset, 1); 1230 SIG2OSIG(oset, td->td_retval[0]); 1231 return (error); 1232 } 1233 #endif /* COMPAT_43 */ 1234 1235 int 1236 sys_sigwait(struct thread *td, struct sigwait_args *uap) 1237 { 1238 ksiginfo_t ksi; 1239 sigset_t set; 1240 int error; 1241 1242 error = copyin(uap->set, &set, sizeof(set)); 1243 if (error) { 1244 td->td_retval[0] = error; 1245 return (0); 1246 } 1247 1248 error = kern_sigtimedwait(td, set, &ksi, NULL); 1249 if (error) { 1250 /* 1251 * sigwait() function shall not return EINTR, but 1252 * the syscall does. Non-ancient libc provides the 1253 * wrapper which hides EINTR. Otherwise, EINTR return 1254 * is used by libthr to handle required cancellation 1255 * point in the sigwait(). 1256 */ 1257 if (error == EINTR && td->td_proc->p_osrel < P_OSREL_SIGWAIT) 1258 return (ERESTART); 1259 td->td_retval[0] = error; 1260 return (0); 1261 } 1262 1263 error = copyout(&ksi.ksi_signo, uap->sig, sizeof(ksi.ksi_signo)); 1264 td->td_retval[0] = error; 1265 return (0); 1266 } 1267 1268 int 1269 sys_sigtimedwait(struct thread *td, struct sigtimedwait_args *uap) 1270 { 1271 struct timespec ts; 1272 struct timespec *timeout; 1273 sigset_t set; 1274 ksiginfo_t ksi; 1275 int error; 1276 1277 if (uap->timeout) { 1278 error = copyin(uap->timeout, &ts, sizeof(ts)); 1279 if (error) 1280 return (error); 1281 1282 timeout = &ts; 1283 } else 1284 timeout = NULL; 1285 1286 error = copyin(uap->set, &set, sizeof(set)); 1287 if (error) 1288 return (error); 1289 1290 error = kern_sigtimedwait(td, set, &ksi, timeout); 1291 if (error) 1292 return (error); 1293 1294 if (uap->info) 1295 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t)); 1296 1297 if (error == 0) 1298 td->td_retval[0] = ksi.ksi_signo; 1299 return (error); 1300 } 1301 1302 int 1303 sys_sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap) 1304 { 1305 ksiginfo_t ksi; 1306 sigset_t set; 1307 int error; 1308 1309 error = copyin(uap->set, &set, sizeof(set)); 1310 if (error) 1311 return (error); 1312 1313 error = kern_sigtimedwait(td, set, &ksi, NULL); 1314 if (error) 1315 return (error); 1316 1317 if (uap->info) 1318 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t)); 1319 1320 if (error == 0) 1321 td->td_retval[0] = ksi.ksi_signo; 1322 return (error); 1323 } 1324 1325 static void 1326 proc_td_siginfo_capture(struct thread *td, siginfo_t *si) 1327 { 1328 struct thread *thr; 1329 1330 FOREACH_THREAD_IN_PROC(td->td_proc, thr) { 1331 if (thr == td) 1332 thr->td_si = *si; 1333 else 1334 thr->td_si.si_signo = 0; 1335 } 1336 } 1337 1338 int 1339 kern_sigtimedwait(struct thread *td, sigset_t waitset, ksiginfo_t *ksi, 1340 struct timespec *timeout) 1341 { 1342 struct sigacts *ps; 1343 sigset_t saved_mask, new_block; 1344 struct proc *p; 1345 int error, sig, timevalid = 0; 1346 sbintime_t sbt, precision, tsbt; 1347 struct timespec ts; 1348 bool traced; 1349 1350 p = td->td_proc; 1351 error = 0; 1352 traced = false; 1353 1354 /* Ensure the sigfastblock value is up to date. */ 1355 sigfastblock_fetch(td); 1356 1357 if (timeout != NULL) { 1358 if (timeout->tv_nsec >= 0 && timeout->tv_nsec < 1000000000) { 1359 timevalid = 1; 1360 ts = *timeout; 1361 if (ts.tv_sec < INT32_MAX / 2) { 1362 tsbt = tstosbt(ts); 1363 precision = tsbt; 1364 precision >>= tc_precexp; 1365 if (TIMESEL(&sbt, tsbt)) 1366 sbt += tc_tick_sbt; 1367 sbt += tsbt; 1368 } else 1369 precision = sbt = 0; 1370 } 1371 } else 1372 precision = sbt = 0; 1373 ksiginfo_init(ksi); 1374 /* Some signals can not be waited for. */ 1375 SIG_CANTMASK(waitset); 1376 ps = p->p_sigacts; 1377 PROC_LOCK(p); 1378 saved_mask = td->td_sigmask; 1379 SIGSETNAND(td->td_sigmask, waitset); 1380 if ((p->p_sysent->sv_flags & SV_SIG_DISCIGN) != 0 || 1381 !kern_sig_discard_ign) { 1382 thread_lock(td); 1383 td->td_flags |= TDF_SIGWAIT; 1384 thread_unlock(td); 1385 } 1386 for (;;) { 1387 mtx_lock(&ps->ps_mtx); 1388 sig = cursig(td); 1389 mtx_unlock(&ps->ps_mtx); 1390 KASSERT(sig >= 0, ("sig %d", sig)); 1391 if (sig != 0 && SIGISMEMBER(waitset, sig)) { 1392 if (sigqueue_get(&td->td_sigqueue, sig, ksi) != 0 || 1393 sigqueue_get(&p->p_sigqueue, sig, ksi) != 0) { 1394 error = 0; 1395 break; 1396 } 1397 } 1398 1399 if (error != 0) 1400 break; 1401 1402 /* 1403 * POSIX says this must be checked after looking for pending 1404 * signals. 1405 */ 1406 if (timeout != NULL && !timevalid) { 1407 error = EINVAL; 1408 break; 1409 } 1410 1411 if (traced) { 1412 error = EINTR; 1413 break; 1414 } 1415 1416 error = msleep_sbt(&p->p_sigacts, &p->p_mtx, PPAUSE | PCATCH, 1417 "sigwait", sbt, precision, C_ABSOLUTE); 1418 1419 /* The syscalls can not be restarted. */ 1420 if (error == ERESTART) 1421 error = EINTR; 1422 1423 /* 1424 * If PTRACE_SCE or PTRACE_SCX were set after 1425 * userspace entered the syscall, return spurious 1426 * EINTR after wait was done. Only do this as last 1427 * resort after rechecking for possible queued signals 1428 * and expired timeouts. 1429 */ 1430 if (error == 0 && (p->p_ptevents & PTRACE_SYSCALL) != 0) 1431 traced = true; 1432 } 1433 thread_lock(td); 1434 td->td_flags &= ~TDF_SIGWAIT; 1435 thread_unlock(td); 1436 1437 new_block = saved_mask; 1438 SIGSETNAND(new_block, td->td_sigmask); 1439 td->td_sigmask = saved_mask; 1440 /* 1441 * Fewer signals can be delivered to us, reschedule signal 1442 * notification. 1443 */ 1444 if (p->p_numthreads != 1) 1445 reschedule_signals(p, new_block, 0); 1446 1447 if (error == 0) { 1448 SDT_PROBE2(proc, , , signal__clear, sig, ksi); 1449 1450 if (ksi->ksi_code == SI_TIMER) 1451 itimer_accept(p, ksi->ksi_timerid, ksi); 1452 1453 #ifdef KTRACE 1454 if (KTRPOINT(td, KTR_PSIG)) { 1455 sig_t action; 1456 1457 mtx_lock(&ps->ps_mtx); 1458 action = ps->ps_sigact[_SIG_IDX(sig)]; 1459 mtx_unlock(&ps->ps_mtx); 1460 ktrpsig(sig, action, &td->td_sigmask, ksi->ksi_code); 1461 } 1462 #endif 1463 if (sig == SIGKILL) { 1464 proc_td_siginfo_capture(td, &ksi->ksi_info); 1465 sigexit(td, sig); 1466 } 1467 } 1468 PROC_UNLOCK(p); 1469 return (error); 1470 } 1471 1472 #ifndef _SYS_SYSPROTO_H_ 1473 struct sigpending_args { 1474 sigset_t *set; 1475 }; 1476 #endif 1477 int 1478 sys_sigpending(struct thread *td, struct sigpending_args *uap) 1479 { 1480 struct proc *p = td->td_proc; 1481 sigset_t pending; 1482 1483 PROC_LOCK(p); 1484 pending = p->p_sigqueue.sq_signals; 1485 SIGSETOR(pending, td->td_sigqueue.sq_signals); 1486 PROC_UNLOCK(p); 1487 return (copyout(&pending, uap->set, sizeof(sigset_t))); 1488 } 1489 1490 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 1491 #ifndef _SYS_SYSPROTO_H_ 1492 struct osigpending_args { 1493 int dummy; 1494 }; 1495 #endif 1496 int 1497 osigpending(struct thread *td, struct osigpending_args *uap) 1498 { 1499 struct proc *p = td->td_proc; 1500 sigset_t pending; 1501 1502 PROC_LOCK(p); 1503 pending = p->p_sigqueue.sq_signals; 1504 SIGSETOR(pending, td->td_sigqueue.sq_signals); 1505 PROC_UNLOCK(p); 1506 SIG2OSIG(pending, td->td_retval[0]); 1507 return (0); 1508 } 1509 #endif /* COMPAT_43 */ 1510 1511 #if defined(COMPAT_43) 1512 /* 1513 * Generalized interface signal handler, 4.3-compatible. 1514 */ 1515 #ifndef _SYS_SYSPROTO_H_ 1516 struct osigvec_args { 1517 int signum; 1518 struct sigvec *nsv; 1519 struct sigvec *osv; 1520 }; 1521 #endif 1522 /* ARGSUSED */ 1523 int 1524 osigvec(struct thread *td, struct osigvec_args *uap) 1525 { 1526 struct sigvec vec; 1527 struct sigaction nsa, osa; 1528 struct sigaction *nsap, *osap; 1529 int error; 1530 1531 if (uap->signum <= 0 || uap->signum >= ONSIG) 1532 return (EINVAL); 1533 nsap = (uap->nsv != NULL) ? &nsa : NULL; 1534 osap = (uap->osv != NULL) ? &osa : NULL; 1535 if (nsap) { 1536 error = copyin(uap->nsv, &vec, sizeof(vec)); 1537 if (error) 1538 return (error); 1539 nsap->sa_handler = vec.sv_handler; 1540 OSIG2SIG(vec.sv_mask, nsap->sa_mask); 1541 nsap->sa_flags = vec.sv_flags; 1542 nsap->sa_flags ^= SA_RESTART; /* opposite of SV_INTERRUPT */ 1543 } 1544 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET); 1545 if (osap && !error) { 1546 vec.sv_handler = osap->sa_handler; 1547 SIG2OSIG(osap->sa_mask, vec.sv_mask); 1548 vec.sv_flags = osap->sa_flags; 1549 vec.sv_flags &= ~SA_NOCLDWAIT; 1550 vec.sv_flags ^= SA_RESTART; 1551 error = copyout(&vec, uap->osv, sizeof(vec)); 1552 } 1553 return (error); 1554 } 1555 1556 #ifndef _SYS_SYSPROTO_H_ 1557 struct osigblock_args { 1558 int mask; 1559 }; 1560 #endif 1561 int 1562 osigblock(struct thread *td, struct osigblock_args *uap) 1563 { 1564 sigset_t set, oset; 1565 1566 OSIG2SIG(uap->mask, set); 1567 kern_sigprocmask(td, SIG_BLOCK, &set, &oset, 0); 1568 SIG2OSIG(oset, td->td_retval[0]); 1569 return (0); 1570 } 1571 1572 #ifndef _SYS_SYSPROTO_H_ 1573 struct osigsetmask_args { 1574 int mask; 1575 }; 1576 #endif 1577 int 1578 osigsetmask(struct thread *td, struct osigsetmask_args *uap) 1579 { 1580 sigset_t set, oset; 1581 1582 OSIG2SIG(uap->mask, set); 1583 kern_sigprocmask(td, SIG_SETMASK, &set, &oset, 0); 1584 SIG2OSIG(oset, td->td_retval[0]); 1585 return (0); 1586 } 1587 #endif /* COMPAT_43 */ 1588 1589 /* 1590 * Suspend calling thread until signal, providing mask to be set in the 1591 * meantime. 1592 */ 1593 #ifndef _SYS_SYSPROTO_H_ 1594 struct sigsuspend_args { 1595 const sigset_t *sigmask; 1596 }; 1597 #endif 1598 /* ARGSUSED */ 1599 int 1600 sys_sigsuspend(struct thread *td, struct sigsuspend_args *uap) 1601 { 1602 sigset_t mask; 1603 int error; 1604 1605 error = copyin(uap->sigmask, &mask, sizeof(mask)); 1606 if (error) 1607 return (error); 1608 return (kern_sigsuspend(td, mask)); 1609 } 1610 1611 int 1612 kern_sigsuspend(struct thread *td, sigset_t mask) 1613 { 1614 struct proc *p = td->td_proc; 1615 int has_sig, sig; 1616 1617 /* Ensure the sigfastblock value is up to date. */ 1618 sigfastblock_fetch(td); 1619 1620 /* 1621 * When returning from sigsuspend, we want 1622 * the old mask to be restored after the 1623 * signal handler has finished. Thus, we 1624 * save it here and mark the sigacts structure 1625 * to indicate this. 1626 */ 1627 PROC_LOCK(p); 1628 kern_sigprocmask(td, SIG_SETMASK, &mask, &td->td_oldsigmask, 1629 SIGPROCMASK_PROC_LOCKED); 1630 td->td_pflags |= TDP_OLDMASK; 1631 ast_sched(td, TDA_SIGSUSPEND); 1632 1633 /* 1634 * Process signals now. Otherwise, we can get spurious wakeup 1635 * due to signal entered process queue, but delivered to other 1636 * thread. But sigsuspend should return only on signal 1637 * delivery. 1638 */ 1639 (p->p_sysent->sv_set_syscall_retval)(td, EINTR); 1640 for (has_sig = 0; !has_sig;) { 1641 while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE | PCATCH, 1642 "sigsusp", 0) == 0) 1643 /* void */; 1644 thread_suspend_check(0); 1645 mtx_lock(&p->p_sigacts->ps_mtx); 1646 while ((sig = cursig(td)) != 0) { 1647 KASSERT(sig >= 0, ("sig %d", sig)); 1648 has_sig += postsig(sig); 1649 } 1650 mtx_unlock(&p->p_sigacts->ps_mtx); 1651 1652 /* 1653 * If PTRACE_SCE or PTRACE_SCX were set after 1654 * userspace entered the syscall, return spurious 1655 * EINTR. 1656 */ 1657 if ((p->p_ptevents & PTRACE_SYSCALL) != 0) 1658 has_sig += 1; 1659 } 1660 PROC_UNLOCK(p); 1661 td->td_errno = EINTR; 1662 td->td_pflags |= TDP_NERRNO; 1663 return (EJUSTRETURN); 1664 } 1665 1666 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 1667 /* 1668 * Compatibility sigsuspend call for old binaries. Note nonstandard calling 1669 * convention: libc stub passes mask, not pointer, to save a copyin. 1670 */ 1671 #ifndef _SYS_SYSPROTO_H_ 1672 struct osigsuspend_args { 1673 osigset_t mask; 1674 }; 1675 #endif 1676 /* ARGSUSED */ 1677 int 1678 osigsuspend(struct thread *td, struct osigsuspend_args *uap) 1679 { 1680 sigset_t mask; 1681 1682 OSIG2SIG(uap->mask, mask); 1683 return (kern_sigsuspend(td, mask)); 1684 } 1685 #endif /* COMPAT_43 */ 1686 1687 #if defined(COMPAT_43) 1688 #ifndef _SYS_SYSPROTO_H_ 1689 struct osigstack_args { 1690 struct sigstack *nss; 1691 struct sigstack *oss; 1692 }; 1693 #endif 1694 /* ARGSUSED */ 1695 int 1696 osigstack(struct thread *td, struct osigstack_args *uap) 1697 { 1698 struct sigstack nss, oss; 1699 int error = 0; 1700 1701 if (uap->nss != NULL) { 1702 error = copyin(uap->nss, &nss, sizeof(nss)); 1703 if (error) 1704 return (error); 1705 } 1706 oss.ss_sp = td->td_sigstk.ss_sp; 1707 oss.ss_onstack = sigonstack(cpu_getstack(td)); 1708 if (uap->nss != NULL) { 1709 td->td_sigstk.ss_sp = nss.ss_sp; 1710 td->td_sigstk.ss_size = 0; 1711 td->td_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK; 1712 td->td_pflags |= TDP_ALTSTACK; 1713 } 1714 if (uap->oss != NULL) 1715 error = copyout(&oss, uap->oss, sizeof(oss)); 1716 1717 return (error); 1718 } 1719 #endif /* COMPAT_43 */ 1720 1721 #ifndef _SYS_SYSPROTO_H_ 1722 struct sigaltstack_args { 1723 stack_t *ss; 1724 stack_t *oss; 1725 }; 1726 #endif 1727 /* ARGSUSED */ 1728 int 1729 sys_sigaltstack(struct thread *td, struct sigaltstack_args *uap) 1730 { 1731 stack_t ss, oss; 1732 int error; 1733 1734 if (uap->ss != NULL) { 1735 error = copyin(uap->ss, &ss, sizeof(ss)); 1736 if (error) 1737 return (error); 1738 } 1739 error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL, 1740 (uap->oss != NULL) ? &oss : NULL); 1741 if (error) 1742 return (error); 1743 if (uap->oss != NULL) 1744 error = copyout(&oss, uap->oss, sizeof(stack_t)); 1745 return (error); 1746 } 1747 1748 int 1749 kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss) 1750 { 1751 struct proc *p = td->td_proc; 1752 int oonstack; 1753 1754 oonstack = sigonstack(cpu_getstack(td)); 1755 1756 if (oss != NULL) { 1757 *oss = td->td_sigstk; 1758 oss->ss_flags = (td->td_pflags & TDP_ALTSTACK) 1759 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 1760 } 1761 1762 if (ss != NULL) { 1763 if (oonstack) 1764 return (EPERM); 1765 if ((ss->ss_flags & ~SS_DISABLE) != 0) 1766 return (EINVAL); 1767 if (!(ss->ss_flags & SS_DISABLE)) { 1768 if (ss->ss_size < p->p_sysent->sv_minsigstksz) 1769 return (ENOMEM); 1770 1771 td->td_sigstk = *ss; 1772 td->td_pflags |= TDP_ALTSTACK; 1773 } else { 1774 td->td_pflags &= ~TDP_ALTSTACK; 1775 } 1776 } 1777 return (0); 1778 } 1779 1780 struct killpg1_ctx { 1781 struct thread *td; 1782 ksiginfo_t *ksi; 1783 int sig; 1784 bool sent; 1785 bool found; 1786 int ret; 1787 }; 1788 1789 static void 1790 killpg1_sendsig_locked(struct proc *p, struct killpg1_ctx *arg) 1791 { 1792 int err; 1793 1794 err = p_cansignal(arg->td, p, arg->sig); 1795 if (err == 0 && arg->sig != 0) 1796 pksignal(p, arg->sig, arg->ksi); 1797 if (err != ESRCH) 1798 arg->found = true; 1799 if (err == 0) 1800 arg->sent = true; 1801 else if (arg->ret == 0 && err != ESRCH && err != EPERM) 1802 arg->ret = err; 1803 } 1804 1805 static void 1806 killpg1_sendsig(struct proc *p, bool notself, struct killpg1_ctx *arg) 1807 { 1808 1809 if (p->p_pid <= 1 || (p->p_flag & P_SYSTEM) != 0 || 1810 (notself && p == arg->td->td_proc) || p->p_state == PRS_NEW) 1811 return; 1812 1813 PROC_LOCK(p); 1814 killpg1_sendsig_locked(p, arg); 1815 PROC_UNLOCK(p); 1816 } 1817 1818 static void 1819 kill_processes_prison_cb(struct proc *p, void *arg) 1820 { 1821 struct killpg1_ctx *ctx = arg; 1822 1823 if (p->p_pid <= 1 || (p->p_flag & P_SYSTEM) != 0 || 1824 (p == ctx->td->td_proc) || p->p_state == PRS_NEW) 1825 return; 1826 1827 killpg1_sendsig_locked(p, ctx); 1828 } 1829 1830 /* 1831 * Common code for kill process group/broadcast kill. 1832 * td is the calling thread, as usual. 1833 */ 1834 static int 1835 killpg1(struct thread *td, int sig, int pgid, int all, ksiginfo_t *ksi) 1836 { 1837 struct proc *p; 1838 struct pgrp *pgrp; 1839 struct killpg1_ctx arg; 1840 1841 arg.td = td; 1842 arg.ksi = ksi; 1843 arg.sig = sig; 1844 arg.sent = false; 1845 arg.found = false; 1846 arg.ret = 0; 1847 if (all) { 1848 /* 1849 * broadcast 1850 */ 1851 prison_proc_iterate(td->td_ucred->cr_prison, 1852 kill_processes_prison_cb, &arg); 1853 } else { 1854 again: 1855 sx_slock(&proctree_lock); 1856 if (pgid == 0) { 1857 /* 1858 * zero pgid means send to my process group. 1859 */ 1860 pgrp = td->td_proc->p_pgrp; 1861 PGRP_LOCK(pgrp); 1862 } else { 1863 pgrp = pgfind(pgid); 1864 if (pgrp == NULL) { 1865 sx_sunlock(&proctree_lock); 1866 return (ESRCH); 1867 } 1868 } 1869 sx_sunlock(&proctree_lock); 1870 if (!sx_try_xlock(&pgrp->pg_killsx)) { 1871 PGRP_UNLOCK(pgrp); 1872 sx_xlock(&pgrp->pg_killsx); 1873 sx_xunlock(&pgrp->pg_killsx); 1874 goto again; 1875 } 1876 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 1877 killpg1_sendsig(p, false, &arg); 1878 } 1879 PGRP_UNLOCK(pgrp); 1880 sx_xunlock(&pgrp->pg_killsx); 1881 } 1882 MPASS(arg.ret != 0 || arg.found || !arg.sent); 1883 if (arg.ret == 0 && !arg.sent) 1884 arg.ret = arg.found ? EPERM : ESRCH; 1885 return (arg.ret); 1886 } 1887 1888 #ifndef _SYS_SYSPROTO_H_ 1889 struct kill_args { 1890 int pid; 1891 int signum; 1892 }; 1893 #endif 1894 /* ARGSUSED */ 1895 int 1896 sys_kill(struct thread *td, struct kill_args *uap) 1897 { 1898 1899 return (kern_kill(td, uap->pid, uap->signum)); 1900 } 1901 1902 int 1903 kern_kill(struct thread *td, pid_t pid, int signum) 1904 { 1905 ksiginfo_t ksi; 1906 struct proc *p; 1907 int error; 1908 1909 /* 1910 * A process in capability mode can send signals only to himself. 1911 * The main rationale behind this is that abort(3) is implemented as 1912 * kill(getpid(), SIGABRT). 1913 */ 1914 if (pid != td->td_proc->p_pid) { 1915 if (CAP_TRACING(td)) 1916 ktrcapfail(CAPFAIL_SIGNAL, &signum); 1917 if (IN_CAPABILITY_MODE(td)) 1918 return (ECAPMODE); 1919 } 1920 1921 AUDIT_ARG_SIGNUM(signum); 1922 AUDIT_ARG_PID(pid); 1923 if ((u_int)signum > _SIG_MAXSIG) 1924 return (EINVAL); 1925 1926 ksiginfo_init(&ksi); 1927 ksi.ksi_signo = signum; 1928 ksi.ksi_code = SI_USER; 1929 ksi.ksi_pid = td->td_proc->p_pid; 1930 ksi.ksi_uid = td->td_ucred->cr_ruid; 1931 1932 if (pid > 0) { 1933 /* kill single process */ 1934 if ((p = pfind_any(pid)) == NULL) 1935 return (ESRCH); 1936 AUDIT_ARG_PROCESS(p); 1937 error = p_cansignal(td, p, signum); 1938 if (error == 0 && signum) 1939 pksignal(p, signum, &ksi); 1940 PROC_UNLOCK(p); 1941 return (error); 1942 } 1943 switch (pid) { 1944 case -1: /* broadcast signal */ 1945 return (killpg1(td, signum, 0, 1, &ksi)); 1946 case 0: /* signal own process group */ 1947 return (killpg1(td, signum, 0, 0, &ksi)); 1948 default: /* negative explicit process group */ 1949 return (killpg1(td, signum, -pid, 0, &ksi)); 1950 } 1951 /* NOTREACHED */ 1952 } 1953 1954 int 1955 sys_pdkill(struct thread *td, struct pdkill_args *uap) 1956 { 1957 struct proc *p; 1958 int error; 1959 1960 AUDIT_ARG_SIGNUM(uap->signum); 1961 AUDIT_ARG_FD(uap->fd); 1962 if ((u_int)uap->signum > _SIG_MAXSIG) 1963 return (EINVAL); 1964 1965 error = procdesc_find(td, uap->fd, &cap_pdkill_rights, &p); 1966 if (error) 1967 return (error); 1968 AUDIT_ARG_PROCESS(p); 1969 error = p_cansignal(td, p, uap->signum); 1970 if (error == 0 && uap->signum) 1971 kern_psignal(p, uap->signum); 1972 PROC_UNLOCK(p); 1973 return (error); 1974 } 1975 1976 #if defined(COMPAT_43) 1977 #ifndef _SYS_SYSPROTO_H_ 1978 struct okillpg_args { 1979 int pgid; 1980 int signum; 1981 }; 1982 #endif 1983 /* ARGSUSED */ 1984 int 1985 okillpg(struct thread *td, struct okillpg_args *uap) 1986 { 1987 ksiginfo_t ksi; 1988 1989 AUDIT_ARG_SIGNUM(uap->signum); 1990 AUDIT_ARG_PID(uap->pgid); 1991 if ((u_int)uap->signum > _SIG_MAXSIG) 1992 return (EINVAL); 1993 1994 ksiginfo_init(&ksi); 1995 ksi.ksi_signo = uap->signum; 1996 ksi.ksi_code = SI_USER; 1997 ksi.ksi_pid = td->td_proc->p_pid; 1998 ksi.ksi_uid = td->td_ucred->cr_ruid; 1999 return (killpg1(td, uap->signum, uap->pgid, 0, &ksi)); 2000 } 2001 #endif /* COMPAT_43 */ 2002 2003 #ifndef _SYS_SYSPROTO_H_ 2004 struct sigqueue_args { 2005 pid_t pid; 2006 int signum; 2007 /* union sigval */ void *value; 2008 }; 2009 #endif 2010 int 2011 sys_sigqueue(struct thread *td, struct sigqueue_args *uap) 2012 { 2013 union sigval sv; 2014 2015 sv.sival_ptr = uap->value; 2016 2017 return (kern_sigqueue(td, uap->pid, uap->signum, &sv)); 2018 } 2019 2020 int 2021 kern_sigqueue(struct thread *td, pid_t pid, int signumf, union sigval *value) 2022 { 2023 ksiginfo_t ksi; 2024 struct proc *p; 2025 struct thread *td2; 2026 u_int signum; 2027 int error; 2028 2029 signum = signumf & ~__SIGQUEUE_TID; 2030 if (signum > _SIG_MAXSIG) 2031 return (EINVAL); 2032 2033 /* 2034 * Specification says sigqueue can only send signal to 2035 * single process. 2036 */ 2037 if (pid <= 0) 2038 return (EINVAL); 2039 2040 if ((signumf & __SIGQUEUE_TID) == 0) { 2041 if ((p = pfind_any(pid)) == NULL) 2042 return (ESRCH); 2043 td2 = NULL; 2044 } else { 2045 p = td->td_proc; 2046 td2 = tdfind((lwpid_t)pid, p->p_pid); 2047 if (td2 == NULL) 2048 return (ESRCH); 2049 } 2050 2051 error = p_cansignal(td, p, signum); 2052 if (error == 0 && signum != 0) { 2053 ksiginfo_init(&ksi); 2054 ksi.ksi_flags = KSI_SIGQ; 2055 ksi.ksi_signo = signum; 2056 ksi.ksi_code = SI_QUEUE; 2057 ksi.ksi_pid = td->td_proc->p_pid; 2058 ksi.ksi_uid = td->td_ucred->cr_ruid; 2059 ksi.ksi_value = *value; 2060 error = tdsendsignal(p, td2, ksi.ksi_signo, &ksi); 2061 } 2062 PROC_UNLOCK(p); 2063 return (error); 2064 } 2065 2066 /* 2067 * Send a signal to a process group. If checktty is 1, 2068 * limit to members which have a controlling terminal. 2069 */ 2070 void 2071 pgsignal(struct pgrp *pgrp, int sig, int checkctty, ksiginfo_t *ksi) 2072 { 2073 struct proc *p; 2074 2075 if (pgrp) { 2076 PGRP_LOCK_ASSERT(pgrp, MA_OWNED); 2077 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 2078 PROC_LOCK(p); 2079 if (p->p_state == PRS_NORMAL && 2080 (checkctty == 0 || p->p_flag & P_CONTROLT)) 2081 pksignal(p, sig, ksi); 2082 PROC_UNLOCK(p); 2083 } 2084 } 2085 } 2086 2087 /* 2088 * Recalculate the signal mask and reset the signal disposition after 2089 * usermode frame for delivery is formed. Should be called after 2090 * mach-specific routine, because sysent->sv_sendsig() needs correct 2091 * ps_siginfo and signal mask. 2092 */ 2093 static void 2094 postsig_done(int sig, struct thread *td, struct sigacts *ps) 2095 { 2096 sigset_t mask; 2097 2098 mtx_assert(&ps->ps_mtx, MA_OWNED); 2099 td->td_ru.ru_nsignals++; 2100 mask = ps->ps_catchmask[_SIG_IDX(sig)]; 2101 if (!SIGISMEMBER(ps->ps_signodefer, sig)) 2102 SIGADDSET(mask, sig); 2103 kern_sigprocmask(td, SIG_BLOCK, &mask, NULL, 2104 SIGPROCMASK_PROC_LOCKED | SIGPROCMASK_PS_LOCKED); 2105 if (SIGISMEMBER(ps->ps_sigreset, sig)) 2106 sigdflt(ps, sig); 2107 } 2108 2109 /* 2110 * Send a signal caused by a trap to the current thread. If it will be 2111 * caught immediately, deliver it with correct code. Otherwise, post it 2112 * normally. 2113 */ 2114 void 2115 trapsignal(struct thread *td, ksiginfo_t *ksi) 2116 { 2117 struct sigacts *ps; 2118 struct proc *p; 2119 sigset_t sigmask; 2120 int sig; 2121 2122 p = td->td_proc; 2123 sig = ksi->ksi_signo; 2124 KASSERT(_SIG_VALID(sig), ("invalid signal")); 2125 2126 sigfastblock_fetch(td); 2127 PROC_LOCK(p); 2128 ps = p->p_sigacts; 2129 mtx_lock(&ps->ps_mtx); 2130 sigmask = td->td_sigmask; 2131 if (td->td_sigblock_val != 0) 2132 SIGSETOR(sigmask, fastblock_mask); 2133 if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) && 2134 !SIGISMEMBER(sigmask, sig)) { 2135 #ifdef KTRACE 2136 if (KTRPOINT(curthread, KTR_PSIG)) 2137 ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)], 2138 &td->td_sigmask, ksi->ksi_code); 2139 #endif 2140 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)], 2141 ksi, &td->td_sigmask); 2142 postsig_done(sig, td, ps); 2143 mtx_unlock(&ps->ps_mtx); 2144 } else { 2145 /* 2146 * Avoid a possible infinite loop if the thread 2147 * masking the signal or process is ignoring the 2148 * signal. 2149 */ 2150 if (kern_forcesigexit && (SIGISMEMBER(sigmask, sig) || 2151 ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN)) { 2152 SIGDELSET(td->td_sigmask, sig); 2153 SIGDELSET(ps->ps_sigcatch, sig); 2154 SIGDELSET(ps->ps_sigignore, sig); 2155 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 2156 td->td_pflags &= ~TDP_SIGFASTBLOCK; 2157 td->td_sigblock_val = 0; 2158 } 2159 mtx_unlock(&ps->ps_mtx); 2160 p->p_sig = sig; /* XXX to verify code */ 2161 tdsendsignal(p, td, sig, ksi); 2162 } 2163 PROC_UNLOCK(p); 2164 } 2165 2166 static struct thread * 2167 sigtd(struct proc *p, int sig, bool fast_sigblock) 2168 { 2169 struct thread *td, *signal_td; 2170 2171 PROC_LOCK_ASSERT(p, MA_OWNED); 2172 MPASS(!fast_sigblock || p == curproc); 2173 2174 /* 2175 * Check if current thread can handle the signal without 2176 * switching context to another thread. 2177 */ 2178 if (curproc == p && !SIGISMEMBER(curthread->td_sigmask, sig) && 2179 (!fast_sigblock || curthread->td_sigblock_val == 0)) 2180 return (curthread); 2181 2182 /* Find a non-stopped thread that does not mask the signal. */ 2183 signal_td = NULL; 2184 FOREACH_THREAD_IN_PROC(p, td) { 2185 if (!SIGISMEMBER(td->td_sigmask, sig) && (!fast_sigblock || 2186 td != curthread || td->td_sigblock_val == 0) && 2187 (td->td_flags & TDF_BOUNDARY) == 0) { 2188 signal_td = td; 2189 break; 2190 } 2191 } 2192 /* Select random (first) thread if no better match was found. */ 2193 if (signal_td == NULL) 2194 signal_td = FIRST_THREAD_IN_PROC(p); 2195 return (signal_td); 2196 } 2197 2198 /* 2199 * Send the signal to the process. If the signal has an action, the action 2200 * is usually performed by the target process rather than the caller; we add 2201 * the signal to the set of pending signals for the process. 2202 * 2203 * Exceptions: 2204 * o When a stop signal is sent to a sleeping process that takes the 2205 * default action, the process is stopped without awakening it. 2206 * o SIGCONT restarts stopped processes (or puts them back to sleep) 2207 * regardless of the signal action (eg, blocked or ignored). 2208 * 2209 * Other ignored signals are discarded immediately. 2210 * 2211 * NB: This function may be entered from the debugger via the "kill" DDB 2212 * command. There is little that can be done to mitigate the possibly messy 2213 * side effects of this unwise possibility. 2214 */ 2215 void 2216 kern_psignal(struct proc *p, int sig) 2217 { 2218 ksiginfo_t ksi; 2219 2220 ksiginfo_init(&ksi); 2221 ksi.ksi_signo = sig; 2222 ksi.ksi_code = SI_KERNEL; 2223 (void) tdsendsignal(p, NULL, sig, &ksi); 2224 } 2225 2226 int 2227 pksignal(struct proc *p, int sig, ksiginfo_t *ksi) 2228 { 2229 2230 return (tdsendsignal(p, NULL, sig, ksi)); 2231 } 2232 2233 /* Utility function for finding a thread to send signal event to. */ 2234 int 2235 sigev_findtd(struct proc *p, struct sigevent *sigev, struct thread **ttd) 2236 { 2237 struct thread *td; 2238 2239 if (sigev->sigev_notify == SIGEV_THREAD_ID) { 2240 td = tdfind(sigev->sigev_notify_thread_id, p->p_pid); 2241 if (td == NULL) 2242 return (ESRCH); 2243 *ttd = td; 2244 } else { 2245 *ttd = NULL; 2246 PROC_LOCK(p); 2247 } 2248 return (0); 2249 } 2250 2251 void 2252 tdsignal(struct thread *td, int sig) 2253 { 2254 ksiginfo_t ksi; 2255 2256 ksiginfo_init(&ksi); 2257 ksi.ksi_signo = sig; 2258 ksi.ksi_code = SI_KERNEL; 2259 (void) tdsendsignal(td->td_proc, td, sig, &ksi); 2260 } 2261 2262 void 2263 tdksignal(struct thread *td, int sig, ksiginfo_t *ksi) 2264 { 2265 2266 (void) tdsendsignal(td->td_proc, td, sig, ksi); 2267 } 2268 2269 static void 2270 sig_sleepq_abort(struct thread *td, int intrval) 2271 { 2272 THREAD_LOCK_ASSERT(td, MA_OWNED); 2273 2274 if (intrval == 0 && (td->td_flags & TDF_SIGWAIT) == 0) 2275 thread_unlock(td); 2276 else 2277 sleepq_abort(td, intrval); 2278 } 2279 2280 int 2281 tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi) 2282 { 2283 sig_t action; 2284 sigqueue_t *sigqueue; 2285 struct sigacts *ps; 2286 int intrval, prop, ret; 2287 2288 MPASS(td == NULL || p == td->td_proc); 2289 PROC_LOCK_ASSERT(p, MA_OWNED); 2290 2291 if (!_SIG_VALID(sig)) 2292 panic("%s(): invalid signal %d", __func__, sig); 2293 2294 KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("%s: ksi on queue", __func__)); 2295 2296 /* 2297 * IEEE Std 1003.1-2001: return success when killing a zombie. 2298 */ 2299 if (p->p_state == PRS_ZOMBIE) { 2300 if (ksi != NULL && (ksi->ksi_flags & KSI_INS) != 0) 2301 ksiginfo_tryfree(ksi); 2302 return (0); 2303 } 2304 2305 ps = p->p_sigacts; 2306 KNOTE_LOCKED(p->p_klist, NOTE_SIGNAL | sig); 2307 prop = sigprop(sig); 2308 2309 if (td == NULL) { 2310 td = sigtd(p, sig, false); 2311 sigqueue = &p->p_sigqueue; 2312 } else 2313 sigqueue = &td->td_sigqueue; 2314 2315 SDT_PROBE3(proc, , , signal__send, td, p, sig); 2316 2317 /* 2318 * If the signal is being ignored, then we forget about it 2319 * immediately, except when the target process executes 2320 * sigwait(). (Note: we don't set SIGCONT in ps_sigignore, 2321 * and if it is set to SIG_IGN, action will be SIG_DFL here.) 2322 */ 2323 mtx_lock(&ps->ps_mtx); 2324 if (SIGISMEMBER(ps->ps_sigignore, sig)) { 2325 if (kern_sig_discard_ign && 2326 (p->p_sysent->sv_flags & SV_SIG_DISCIGN) == 0) { 2327 SDT_PROBE3(proc, , , signal__discard, td, p, sig); 2328 2329 mtx_unlock(&ps->ps_mtx); 2330 if (ksi != NULL && (ksi->ksi_flags & KSI_INS) != 0) 2331 ksiginfo_tryfree(ksi); 2332 return (0); 2333 } else { 2334 action = SIG_CATCH; 2335 intrval = 0; 2336 } 2337 } else { 2338 if (SIGISMEMBER(td->td_sigmask, sig)) 2339 action = SIG_HOLD; 2340 else if (SIGISMEMBER(ps->ps_sigcatch, sig)) 2341 action = SIG_CATCH; 2342 else 2343 action = SIG_DFL; 2344 if (SIGISMEMBER(ps->ps_sigintr, sig)) 2345 intrval = EINTR; 2346 else 2347 intrval = ERESTART; 2348 } 2349 mtx_unlock(&ps->ps_mtx); 2350 2351 if (prop & SIGPROP_CONT) 2352 sigqueue_delete_stopmask_proc(p); 2353 else if (prop & SIGPROP_STOP) { 2354 if (pt_attach_transparent && 2355 (p->p_flag & P_TRACED) != 0 && 2356 (p->p_flag2 & P2_PTRACE_FSTP) != 0) { 2357 PROC_SLOCK(p); 2358 sig_handle_first_stop(NULL, p, sig); 2359 PROC_SUNLOCK(p); 2360 return (0); 2361 } 2362 2363 /* 2364 * If sending a tty stop signal to a member of an orphaned 2365 * process group, discard the signal here if the action 2366 * is default; don't stop the process below if sleeping, 2367 * and don't clear any pending SIGCONT. 2368 */ 2369 if ((prop & SIGPROP_TTYSTOP) != 0 && 2370 (p->p_pgrp->pg_flags & PGRP_ORPHANED) != 0 && 2371 action == SIG_DFL) { 2372 if (ksi != NULL && (ksi->ksi_flags & KSI_INS) != 0) 2373 ksiginfo_tryfree(ksi); 2374 return (0); 2375 } 2376 sigqueue_delete_proc(p, SIGCONT); 2377 if (p->p_flag & P_CONTINUED) { 2378 p->p_flag &= ~P_CONTINUED; 2379 PROC_LOCK(p->p_pptr); 2380 sigqueue_take(p->p_ksi); 2381 PROC_UNLOCK(p->p_pptr); 2382 } 2383 } 2384 2385 ret = sigqueue_add(sigqueue, sig, ksi); 2386 if (ret != 0) 2387 return (ret); 2388 signotify(td); 2389 /* 2390 * Defer further processing for signals which are held, 2391 * except that stopped processes must be continued by SIGCONT. 2392 */ 2393 if (action == SIG_HOLD && 2394 !((prop & SIGPROP_CONT) && (p->p_flag & P_STOPPED_SIG))) 2395 return (0); 2396 2397 /* 2398 * Some signals have a process-wide effect and a per-thread 2399 * component. Most processing occurs when the process next 2400 * tries to cross the user boundary, however there are some 2401 * times when processing needs to be done immediately, such as 2402 * waking up threads so that they can cross the user boundary. 2403 * We try to do the per-process part here. 2404 */ 2405 if (P_SHOULDSTOP(p)) { 2406 KASSERT(!(p->p_flag & P_WEXIT), 2407 ("signal to stopped but exiting process")); 2408 if (sig == SIGKILL) { 2409 /* 2410 * If traced process is already stopped, 2411 * then no further action is necessary. 2412 */ 2413 if (p->p_flag & P_TRACED) 2414 return (0); 2415 /* 2416 * SIGKILL sets process running. 2417 * It will die elsewhere. 2418 * All threads must be restarted. 2419 */ 2420 p->p_flag &= ~P_STOPPED_SIG; 2421 goto runfast; 2422 } 2423 2424 if (prop & SIGPROP_CONT) { 2425 /* 2426 * If traced process is already stopped, 2427 * then no further action is necessary. 2428 */ 2429 if (p->p_flag & P_TRACED) 2430 return (0); 2431 /* 2432 * If SIGCONT is default (or ignored), we continue the 2433 * process but don't leave the signal in sigqueue as 2434 * it has no further action. If SIGCONT is held, we 2435 * continue the process and leave the signal in 2436 * sigqueue. If the process catches SIGCONT, let it 2437 * handle the signal itself. If it isn't waiting on 2438 * an event, it goes back to run state. 2439 * Otherwise, process goes back to sleep state. 2440 */ 2441 p->p_flag &= ~P_STOPPED_SIG; 2442 PROC_SLOCK(p); 2443 if (p->p_numthreads == p->p_suspcount) { 2444 PROC_SUNLOCK(p); 2445 PROC_LOCK(p->p_pptr); 2446 childproc_continued(p); 2447 PROC_UNLOCK(p->p_pptr); 2448 PROC_SLOCK(p); 2449 } 2450 if (action == SIG_DFL) { 2451 thread_unsuspend(p); 2452 PROC_SUNLOCK(p); 2453 sigqueue_delete(sigqueue, sig); 2454 goto out_cont; 2455 } 2456 if (action == SIG_CATCH) { 2457 /* 2458 * The process wants to catch it so it needs 2459 * to run at least one thread, but which one? 2460 */ 2461 PROC_SUNLOCK(p); 2462 goto runfast; 2463 } 2464 /* 2465 * The signal is not ignored or caught. 2466 */ 2467 thread_unsuspend(p); 2468 PROC_SUNLOCK(p); 2469 goto out_cont; 2470 } 2471 2472 if (prop & SIGPROP_STOP) { 2473 /* 2474 * If traced process is already stopped, 2475 * then no further action is necessary. 2476 */ 2477 if (p->p_flag & P_TRACED) 2478 return (0); 2479 /* 2480 * Already stopped, don't need to stop again 2481 * (If we did the shell could get confused). 2482 * Just make sure the signal STOP bit set. 2483 */ 2484 p->p_flag |= P_STOPPED_SIG; 2485 sigqueue_delete(sigqueue, sig); 2486 return (0); 2487 } 2488 2489 /* 2490 * All other kinds of signals: 2491 * If a thread is sleeping interruptibly, simulate a 2492 * wakeup so that when it is continued it will be made 2493 * runnable and can look at the signal. However, don't make 2494 * the PROCESS runnable, leave it stopped. 2495 * It may run a bit until it hits a thread_suspend_check(). 2496 */ 2497 PROC_SLOCK(p); 2498 thread_lock(td); 2499 if (TD_CAN_ABORT(td)) 2500 sig_sleepq_abort(td, intrval); 2501 else 2502 thread_unlock(td); 2503 PROC_SUNLOCK(p); 2504 return (0); 2505 /* 2506 * Mutexes are short lived. Threads waiting on them will 2507 * hit thread_suspend_check() soon. 2508 */ 2509 } else if (p->p_state == PRS_NORMAL) { 2510 if (p->p_flag & P_TRACED || action == SIG_CATCH) { 2511 tdsigwakeup(td, sig, action, intrval); 2512 return (0); 2513 } 2514 2515 MPASS(action == SIG_DFL); 2516 2517 if (prop & SIGPROP_STOP) { 2518 if (p->p_flag & (P_PPWAIT|P_WEXIT)) 2519 return (0); 2520 p->p_flag |= P_STOPPED_SIG; 2521 p->p_xsig = sig; 2522 PROC_SLOCK(p); 2523 sig_suspend_threads(td, p); 2524 if (p->p_numthreads == p->p_suspcount) { 2525 /* 2526 * only thread sending signal to another 2527 * process can reach here, if thread is sending 2528 * signal to its process, because thread does 2529 * not suspend itself here, p_numthreads 2530 * should never be equal to p_suspcount. 2531 */ 2532 thread_stopped(p); 2533 PROC_SUNLOCK(p); 2534 sigqueue_delete_proc(p, p->p_xsig); 2535 } else 2536 PROC_SUNLOCK(p); 2537 return (0); 2538 } 2539 } else { 2540 /* Not in "NORMAL" state. discard the signal. */ 2541 sigqueue_delete(sigqueue, sig); 2542 return (0); 2543 } 2544 2545 /* 2546 * The process is not stopped so we need to apply the signal to all the 2547 * running threads. 2548 */ 2549 runfast: 2550 tdsigwakeup(td, sig, action, intrval); 2551 PROC_SLOCK(p); 2552 thread_unsuspend(p); 2553 PROC_SUNLOCK(p); 2554 out_cont: 2555 itimer_proc_continue(p); 2556 kqtimer_proc_continue(p); 2557 2558 return (0); 2559 } 2560 2561 /* 2562 * The force of a signal has been directed against a single 2563 * thread. We need to see what we can do about knocking it 2564 * out of any sleep it may be in etc. 2565 */ 2566 static void 2567 tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval) 2568 { 2569 struct proc *p = td->td_proc; 2570 int prop; 2571 2572 PROC_LOCK_ASSERT(p, MA_OWNED); 2573 prop = sigprop(sig); 2574 2575 PROC_SLOCK(p); 2576 thread_lock(td); 2577 /* 2578 * Bring the priority of a thread up if we want it to get 2579 * killed in this lifetime. Be careful to avoid bumping the 2580 * priority of the idle thread, since we still allow to signal 2581 * kernel processes. 2582 */ 2583 if (action == SIG_DFL && (prop & SIGPROP_KILL) != 0 && 2584 td->td_priority > PUSER && !TD_IS_IDLETHREAD(td)) 2585 sched_prio(td, PUSER); 2586 if (TD_ON_SLEEPQ(td)) { 2587 /* 2588 * If thread is sleeping uninterruptibly 2589 * we can't interrupt the sleep... the signal will 2590 * be noticed when the process returns through 2591 * trap() or syscall(). 2592 */ 2593 if ((td->td_flags & TDF_SINTR) == 0) 2594 goto out; 2595 /* 2596 * If SIGCONT is default (or ignored) and process is 2597 * asleep, we are finished; the process should not 2598 * be awakened. 2599 */ 2600 if ((prop & SIGPROP_CONT) && action == SIG_DFL) { 2601 thread_unlock(td); 2602 PROC_SUNLOCK(p); 2603 sigqueue_delete(&p->p_sigqueue, sig); 2604 /* 2605 * It may be on either list in this state. 2606 * Remove from both for now. 2607 */ 2608 sigqueue_delete(&td->td_sigqueue, sig); 2609 return; 2610 } 2611 2612 /* 2613 * Don't awaken a sleeping thread for SIGSTOP if the 2614 * STOP signal is deferred. 2615 */ 2616 if ((prop & SIGPROP_STOP) != 0 && (td->td_flags & (TDF_SBDRY | 2617 TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY) 2618 goto out; 2619 2620 /* 2621 * Give low priority threads a better chance to run. 2622 */ 2623 if (td->td_priority > PUSER && !TD_IS_IDLETHREAD(td)) 2624 sched_prio(td, PUSER); 2625 2626 sig_sleepq_abort(td, intrval); 2627 PROC_SUNLOCK(p); 2628 return; 2629 } 2630 2631 /* 2632 * Other states do nothing with the signal immediately, 2633 * other than kicking ourselves if we are running. 2634 * It will either never be noticed, or noticed very soon. 2635 */ 2636 #ifdef SMP 2637 if (TD_IS_RUNNING(td) && td != curthread) 2638 forward_signal(td); 2639 #endif 2640 2641 out: 2642 PROC_SUNLOCK(p); 2643 thread_unlock(td); 2644 } 2645 2646 static void 2647 ptrace_coredumpreq(struct thread *td, struct proc *p, 2648 struct thr_coredump_req *tcq) 2649 { 2650 struct coredump_vnode_ctx wctx; 2651 struct coredump_writer cdw; 2652 void *rl_cookie; 2653 2654 if (p->p_sysent->sv_coredump == NULL) { 2655 tcq->tc_error = ENOSYS; 2656 return; 2657 } 2658 2659 memset(&wctx, 0, sizeof(wctx)); 2660 wctx.vp = tcq->tc_vp; 2661 wctx.fcred = NOCRED; 2662 2663 memset(&cdw, 0, sizeof(wctx)); 2664 cdw.ctx = &wctx; 2665 cdw.write_fn = core_vn_write; 2666 cdw.extend_fn = core_vn_extend; 2667 2668 rl_cookie = vn_rangelock_wlock(tcq->tc_vp, 0, OFF_MAX); 2669 tcq->tc_error = p->p_sysent->sv_coredump(td, &cdw, 2670 tcq->tc_limit, tcq->tc_flags); 2671 vn_rangelock_unlock(tcq->tc_vp, rl_cookie); 2672 } 2673 2674 static void 2675 ptrace_syscallreq(struct thread *td, struct proc *p, 2676 struct thr_syscall_req *tsr) 2677 { 2678 struct sysentvec *sv; 2679 struct sysent *se; 2680 register_t rv_saved[2]; 2681 int error, nerror; 2682 int sc; 2683 bool audited, sy_thr_static; 2684 2685 sv = p->p_sysent; 2686 if (sv->sv_table == NULL || sv->sv_size < tsr->ts_sa.code) { 2687 tsr->ts_ret.sr_error = ENOSYS; 2688 return; 2689 } 2690 2691 sc = tsr->ts_sa.code; 2692 if (sc == SYS_syscall || sc == SYS___syscall) { 2693 sc = tsr->ts_sa.args[0]; 2694 memmove(&tsr->ts_sa.args[0], &tsr->ts_sa.args[1], 2695 sizeof(register_t) * (tsr->ts_nargs - 1)); 2696 } 2697 2698 tsr->ts_sa.callp = se = &sv->sv_table[sc]; 2699 2700 VM_CNT_INC(v_syscall); 2701 td->td_pticks = 0; 2702 if (__predict_false(td->td_cowgen != atomic_load_int( 2703 &td->td_proc->p_cowgen))) 2704 thread_cow_update(td); 2705 2706 td->td_sa = tsr->ts_sa; 2707 2708 #ifdef CAPABILITY_MODE 2709 if ((se->sy_flags & SYF_CAPENABLED) == 0) { 2710 if (CAP_TRACING(td)) 2711 ktrcapfail(CAPFAIL_SYSCALL, NULL); 2712 if (IN_CAPABILITY_MODE(td)) { 2713 tsr->ts_ret.sr_error = ECAPMODE; 2714 return; 2715 } 2716 } 2717 #endif 2718 2719 sy_thr_static = (se->sy_thrcnt & SY_THR_STATIC) != 0; 2720 audited = AUDIT_SYSCALL_ENTER(sc, td) != 0; 2721 2722 if (!sy_thr_static) { 2723 error = syscall_thread_enter(td, &se); 2724 sy_thr_static = (se->sy_thrcnt & SY_THR_STATIC) != 0; 2725 if (error != 0) { 2726 tsr->ts_ret.sr_error = error; 2727 return; 2728 } 2729 } 2730 2731 rv_saved[0] = td->td_retval[0]; 2732 rv_saved[1] = td->td_retval[1]; 2733 nerror = td->td_errno; 2734 td->td_retval[0] = 0; 2735 td->td_retval[1] = 0; 2736 2737 #ifdef KDTRACE_HOOKS 2738 if (se->sy_entry != 0) 2739 (*systrace_probe_func)(&tsr->ts_sa, SYSTRACE_ENTRY, 0); 2740 #endif 2741 tsr->ts_ret.sr_error = se->sy_call(td, tsr->ts_sa.args); 2742 #ifdef KDTRACE_HOOKS 2743 if (se->sy_return != 0) 2744 (*systrace_probe_func)(&tsr->ts_sa, SYSTRACE_RETURN, 2745 tsr->ts_ret.sr_error != 0 ? -1 : td->td_retval[0]); 2746 #endif 2747 2748 tsr->ts_ret.sr_retval[0] = td->td_retval[0]; 2749 tsr->ts_ret.sr_retval[1] = td->td_retval[1]; 2750 td->td_retval[0] = rv_saved[0]; 2751 td->td_retval[1] = rv_saved[1]; 2752 td->td_errno = nerror; 2753 2754 if (audited) 2755 AUDIT_SYSCALL_EXIT(error, td); 2756 if (!sy_thr_static) 2757 syscall_thread_exit(td, se); 2758 } 2759 2760 static void 2761 ptrace_remotereq(struct thread *td, int flag) 2762 { 2763 struct proc *p; 2764 2765 MPASS(td == curthread); 2766 p = td->td_proc; 2767 PROC_LOCK_ASSERT(p, MA_OWNED); 2768 if ((td->td_dbgflags & flag) == 0) 2769 return; 2770 KASSERT((p->p_flag & P_STOPPED_TRACE) != 0, ("not stopped")); 2771 KASSERT(td->td_remotereq != NULL, ("td_remotereq is NULL")); 2772 2773 PROC_UNLOCK(p); 2774 switch (flag) { 2775 case TDB_COREDUMPREQ: 2776 ptrace_coredumpreq(td, p, td->td_remotereq); 2777 break; 2778 case TDB_SCREMOTEREQ: 2779 ptrace_syscallreq(td, p, td->td_remotereq); 2780 break; 2781 default: 2782 __unreachable(); 2783 } 2784 PROC_LOCK(p); 2785 2786 MPASS((td->td_dbgflags & flag) != 0); 2787 td->td_dbgflags &= ~flag; 2788 td->td_remotereq = NULL; 2789 wakeup(p); 2790 } 2791 2792 /* 2793 * Suspend threads of the process p, either by directly setting the 2794 * inhibitor for the thread sleeping interruptibly, or by making the 2795 * thread suspend at the userspace boundary by scheduling a suspend AST. 2796 * 2797 * Returns true if some threads were suspended directly from the 2798 * sleeping state, and false if all threads are forced to process AST. 2799 */ 2800 static bool 2801 sig_suspend_threads(struct thread *td, struct proc *p) 2802 { 2803 struct thread *td2; 2804 bool res; 2805 2806 PROC_LOCK_ASSERT(p, MA_OWNED); 2807 PROC_SLOCK_ASSERT(p, MA_OWNED); 2808 2809 res = false; 2810 FOREACH_THREAD_IN_PROC(p, td2) { 2811 thread_lock(td2); 2812 ast_sched_locked(td2, TDA_SUSPEND); 2813 if (TD_IS_SLEEPING(td2) && (td2->td_flags & TDF_SINTR) != 0) { 2814 if (td2->td_flags & TDF_SBDRY) { 2815 /* 2816 * Once a thread is asleep with 2817 * TDF_SBDRY and without TDF_SERESTART 2818 * or TDF_SEINTR set, it should never 2819 * become suspended due to this check. 2820 */ 2821 KASSERT(!TD_IS_SUSPENDED(td2), 2822 ("thread with deferred stops suspended")); 2823 if (TD_SBDRY_INTR(td2)) { 2824 sleepq_abort(td2, TD_SBDRY_ERRNO(td2)); 2825 continue; 2826 } 2827 } else if (!TD_IS_SUSPENDED(td2)) { 2828 thread_suspend_one(td2); 2829 res = true; 2830 } 2831 } else if (!TD_IS_SUSPENDED(td2)) { 2832 #ifdef SMP 2833 if (TD_IS_RUNNING(td2) && td2 != td) 2834 forward_signal(td2); 2835 #endif 2836 } 2837 thread_unlock(td2); 2838 } 2839 return (res); 2840 } 2841 2842 static void 2843 sig_handle_first_stop(struct thread *td, struct proc *p, int sig) 2844 { 2845 if (td != NULL && (td->td_dbgflags & TDB_FSTP) == 0 && 2846 ((p->p_flag2 & P2_PTRACE_FSTP) != 0 || p->p_xthread != NULL)) 2847 return; 2848 2849 p->p_xsig = sig; 2850 p->p_xthread = td; 2851 2852 /* 2853 * If we are on sleepqueue already, let sleepqueue 2854 * code decide if it needs to go sleep after attach. 2855 */ 2856 if (td != NULL && td->td_wchan == NULL) 2857 td->td_dbgflags &= ~TDB_FSTP; 2858 2859 p->p_flag2 &= ~P2_PTRACE_FSTP; 2860 p->p_flag |= P_STOPPED_SIG | P_STOPPED_TRACE; 2861 if (sig_suspend_threads(td, p) && td == NULL) 2862 thread_stopped(p); 2863 } 2864 2865 /* 2866 * Stop the process for an event deemed interesting to the debugger. If si is 2867 * non-NULL, this is a signal exchange; the new signal requested by the 2868 * debugger will be returned for handling. If si is NULL, this is some other 2869 * type of interesting event. The debugger may request a signal be delivered in 2870 * that case as well, however it will be deferred until it can be handled. 2871 */ 2872 int 2873 ptracestop(struct thread *td, int sig, ksiginfo_t *si) 2874 { 2875 struct proc *p = td->td_proc; 2876 struct thread *td2; 2877 ksiginfo_t ksi; 2878 2879 PROC_LOCK_ASSERT(p, MA_OWNED); 2880 KASSERT(!(p->p_flag & P_WEXIT), ("Stopping exiting process")); 2881 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, 2882 &p->p_mtx.lock_object, "Stopping for traced signal"); 2883 2884 td->td_xsig = sig; 2885 2886 if (si == NULL || (si->ksi_flags & KSI_PTRACE) == 0) { 2887 td->td_dbgflags |= TDB_XSIG; 2888 CTR4(KTR_PTRACE, "ptracestop: tid %d (pid %d) flags %#x sig %d", 2889 td->td_tid, p->p_pid, td->td_dbgflags, sig); 2890 PROC_SLOCK(p); 2891 while ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_XSIG)) { 2892 if (P_KILLED(p)) { 2893 /* 2894 * Ensure that, if we've been PT_KILLed, the 2895 * exit status reflects that. Another thread 2896 * may also be in ptracestop(), having just 2897 * received the SIGKILL, but this thread was 2898 * unsuspended first. 2899 */ 2900 td->td_dbgflags &= ~TDB_XSIG; 2901 td->td_xsig = SIGKILL; 2902 p->p_ptevents = 0; 2903 break; 2904 } 2905 if (p->p_flag & P_SINGLE_EXIT && 2906 !(td->td_dbgflags & TDB_EXIT)) { 2907 /* 2908 * Ignore ptrace stops except for thread exit 2909 * events when the process exits. 2910 */ 2911 td->td_dbgflags &= ~TDB_XSIG; 2912 PROC_SUNLOCK(p); 2913 return (0); 2914 } 2915 2916 /* 2917 * Make wait(2) work. Ensure that right after the 2918 * attach, the thread which was decided to become the 2919 * leader of attach gets reported to the waiter. 2920 * Otherwise, just avoid overwriting another thread's 2921 * assignment to p_xthread. If another thread has 2922 * already set p_xthread, the current thread will get 2923 * a chance to report itself upon the next iteration. 2924 */ 2925 sig_handle_first_stop(td, p, sig); 2926 2927 if ((td->td_dbgflags & TDB_STOPATFORK) != 0) { 2928 td->td_dbgflags &= ~TDB_STOPATFORK; 2929 } 2930 stopme: 2931 td->td_dbgflags |= TDB_SSWITCH; 2932 thread_suspend_switch(td, p); 2933 td->td_dbgflags &= ~TDB_SSWITCH; 2934 if ((td->td_dbgflags & (TDB_COREDUMPREQ | 2935 TDB_SCREMOTEREQ)) != 0) { 2936 MPASS((td->td_dbgflags & (TDB_COREDUMPREQ | 2937 TDB_SCREMOTEREQ)) != 2938 (TDB_COREDUMPREQ | TDB_SCREMOTEREQ)); 2939 PROC_SUNLOCK(p); 2940 ptrace_remotereq(td, td->td_dbgflags & 2941 (TDB_COREDUMPREQ | TDB_SCREMOTEREQ)); 2942 PROC_SLOCK(p); 2943 goto stopme; 2944 } 2945 if (p->p_xthread == td) 2946 p->p_xthread = NULL; 2947 if (!(p->p_flag & P_TRACED)) 2948 break; 2949 if (td->td_dbgflags & TDB_SUSPEND) { 2950 if (p->p_flag & P_SINGLE_EXIT) 2951 break; 2952 goto stopme; 2953 } 2954 } 2955 PROC_SUNLOCK(p); 2956 } 2957 2958 if (si != NULL && sig == td->td_xsig) { 2959 /* Parent wants us to take the original signal unchanged. */ 2960 si->ksi_flags |= KSI_HEAD; 2961 if (sigqueue_add(&td->td_sigqueue, sig, si) != 0) 2962 si->ksi_signo = 0; 2963 } else if (td->td_xsig != 0) { 2964 /* 2965 * If parent wants us to take a new signal, then it will leave 2966 * it in td->td_xsig; otherwise we just look for signals again. 2967 */ 2968 ksiginfo_init(&ksi); 2969 ksi.ksi_signo = td->td_xsig; 2970 ksi.ksi_flags |= KSI_PTRACE; 2971 td2 = sigtd(p, td->td_xsig, false); 2972 tdsendsignal(p, td2, td->td_xsig, &ksi); 2973 if (td != td2) 2974 return (0); 2975 } 2976 2977 return (td->td_xsig); 2978 } 2979 2980 static void 2981 reschedule_signals(struct proc *p, sigset_t block, int flags) 2982 { 2983 struct sigacts *ps; 2984 struct thread *td; 2985 int sig; 2986 bool fastblk, pslocked; 2987 2988 PROC_LOCK_ASSERT(p, MA_OWNED); 2989 ps = p->p_sigacts; 2990 pslocked = (flags & SIGPROCMASK_PS_LOCKED) != 0; 2991 mtx_assert(&ps->ps_mtx, pslocked ? MA_OWNED : MA_NOTOWNED); 2992 if (SIGISEMPTY(p->p_siglist)) 2993 return; 2994 SIGSETAND(block, p->p_siglist); 2995 fastblk = (flags & SIGPROCMASK_FASTBLK) != 0; 2996 SIG_FOREACH(sig, &block) { 2997 td = sigtd(p, sig, fastblk); 2998 2999 /* 3000 * If sigtd() selected us despite sigfastblock is 3001 * blocking, do not activate AST or wake us, to avoid 3002 * loop in AST handler. 3003 */ 3004 if (fastblk && td == curthread) 3005 continue; 3006 3007 signotify(td); 3008 if (!pslocked) 3009 mtx_lock(&ps->ps_mtx); 3010 if (p->p_flag & P_TRACED || 3011 (SIGISMEMBER(ps->ps_sigcatch, sig) && 3012 !SIGISMEMBER(td->td_sigmask, sig))) { 3013 tdsigwakeup(td, sig, SIG_CATCH, 3014 (SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR : 3015 ERESTART)); 3016 } 3017 if (!pslocked) 3018 mtx_unlock(&ps->ps_mtx); 3019 } 3020 } 3021 3022 void 3023 tdsigcleanup(struct thread *td) 3024 { 3025 struct proc *p; 3026 sigset_t unblocked; 3027 3028 p = td->td_proc; 3029 PROC_LOCK_ASSERT(p, MA_OWNED); 3030 3031 sigqueue_flush(&td->td_sigqueue); 3032 if (p->p_numthreads == 1) 3033 return; 3034 3035 /* 3036 * Since we cannot handle signals, notify signal post code 3037 * about this by filling the sigmask. 3038 * 3039 * Also, if needed, wake up thread(s) that do not block the 3040 * same signals as the exiting thread, since the thread might 3041 * have been selected for delivery and woken up. 3042 */ 3043 SIGFILLSET(unblocked); 3044 SIGSETNAND(unblocked, td->td_sigmask); 3045 SIGFILLSET(td->td_sigmask); 3046 reschedule_signals(p, unblocked, 0); 3047 3048 } 3049 3050 static int 3051 sigdeferstop_curr_flags(int cflags) 3052 { 3053 3054 MPASS((cflags & (TDF_SEINTR | TDF_SERESTART)) == 0 || 3055 (cflags & TDF_SBDRY) != 0); 3056 return (cflags & (TDF_SBDRY | TDF_SEINTR | TDF_SERESTART)); 3057 } 3058 3059 /* 3060 * Defer the delivery of SIGSTOP for the current thread, according to 3061 * the requested mode. Returns previous flags, which must be restored 3062 * by sigallowstop(). 3063 * 3064 * TDF_SBDRY, TDF_SEINTR, and TDF_SERESTART flags are only set and 3065 * cleared by the current thread, which allow the lock-less read-only 3066 * accesses below. 3067 */ 3068 int 3069 sigdeferstop_impl(int mode) 3070 { 3071 struct thread *td; 3072 int cflags, nflags; 3073 3074 td = curthread; 3075 cflags = sigdeferstop_curr_flags(td->td_flags); 3076 switch (mode) { 3077 case SIGDEFERSTOP_NOP: 3078 nflags = cflags; 3079 break; 3080 case SIGDEFERSTOP_OFF: 3081 nflags = 0; 3082 break; 3083 case SIGDEFERSTOP_SILENT: 3084 nflags = (cflags | TDF_SBDRY) & ~(TDF_SEINTR | TDF_SERESTART); 3085 break; 3086 case SIGDEFERSTOP_EINTR: 3087 nflags = (cflags | TDF_SBDRY | TDF_SEINTR) & ~TDF_SERESTART; 3088 break; 3089 case SIGDEFERSTOP_ERESTART: 3090 nflags = (cflags | TDF_SBDRY | TDF_SERESTART) & ~TDF_SEINTR; 3091 break; 3092 default: 3093 panic("sigdeferstop: invalid mode %x", mode); 3094 break; 3095 } 3096 if (cflags == nflags) 3097 return (SIGDEFERSTOP_VAL_NCHG); 3098 thread_lock(td); 3099 td->td_flags = (td->td_flags & ~cflags) | nflags; 3100 thread_unlock(td); 3101 return (cflags); 3102 } 3103 3104 /* 3105 * Restores the STOP handling mode, typically permitting the delivery 3106 * of SIGSTOP for the current thread. This does not immediately 3107 * suspend if a stop was posted. Instead, the thread will suspend 3108 * either via ast() or a subsequent interruptible sleep. 3109 */ 3110 void 3111 sigallowstop_impl(int prev) 3112 { 3113 struct thread *td; 3114 int cflags; 3115 3116 KASSERT(prev != SIGDEFERSTOP_VAL_NCHG, ("failed sigallowstop")); 3117 KASSERT((prev & ~(TDF_SBDRY | TDF_SEINTR | TDF_SERESTART)) == 0, 3118 ("sigallowstop: incorrect previous mode %x", prev)); 3119 td = curthread; 3120 cflags = sigdeferstop_curr_flags(td->td_flags); 3121 if (cflags != prev) { 3122 thread_lock(td); 3123 td->td_flags = (td->td_flags & ~cflags) | prev; 3124 thread_unlock(td); 3125 } 3126 } 3127 3128 enum sigstatus { 3129 SIGSTATUS_HANDLE, 3130 SIGSTATUS_HANDLED, 3131 SIGSTATUS_IGNORE, 3132 SIGSTATUS_SBDRY_STOP, 3133 }; 3134 3135 /* 3136 * The thread has signal "sig" pending. Figure out what to do with it: 3137 * 3138 * _HANDLE -> the caller should handle the signal 3139 * _HANDLED -> handled internally, reload pending signal set 3140 * _IGNORE -> ignored, remove from the set of pending signals and try the 3141 * next pending signal 3142 * _SBDRY_STOP -> the signal should stop the thread but this is not 3143 * permitted in the current context 3144 */ 3145 static enum sigstatus 3146 sigprocess(struct thread *td, int sig) 3147 { 3148 struct proc *p; 3149 struct sigacts *ps; 3150 struct sigqueue *queue; 3151 ksiginfo_t ksi; 3152 int prop; 3153 3154 KASSERT(_SIG_VALID(sig), ("%s: invalid signal %d", __func__, sig)); 3155 3156 p = td->td_proc; 3157 ps = p->p_sigacts; 3158 mtx_assert(&ps->ps_mtx, MA_OWNED); 3159 PROC_LOCK_ASSERT(p, MA_OWNED); 3160 3161 /* 3162 * We should allow pending but ignored signals below 3163 * if there is sigwait() active, or P_TRACED was 3164 * on when they were posted. 3165 */ 3166 if (SIGISMEMBER(ps->ps_sigignore, sig) && 3167 (p->p_flag & P_TRACED) == 0 && 3168 (td->td_flags & TDF_SIGWAIT) == 0) { 3169 return (SIGSTATUS_IGNORE); 3170 } 3171 3172 /* 3173 * If the process is going to single-thread mode to prepare 3174 * for exit, there is no sense in delivering any signal 3175 * to usermode. Another important consequence is that 3176 * msleep(..., PCATCH, ...) now is only interruptible by a 3177 * suspend request. 3178 */ 3179 if ((p->p_flag2 & P2_WEXIT) != 0) 3180 return (SIGSTATUS_IGNORE); 3181 3182 if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED) { 3183 /* 3184 * If traced, always stop. 3185 * Remove old signal from queue before the stop. 3186 * XXX shrug off debugger, it causes siginfo to 3187 * be thrown away. 3188 */ 3189 queue = &td->td_sigqueue; 3190 ksiginfo_init(&ksi); 3191 if (sigqueue_get(queue, sig, &ksi) == 0) { 3192 queue = &p->p_sigqueue; 3193 sigqueue_get(queue, sig, &ksi); 3194 } 3195 td->td_si = ksi.ksi_info; 3196 3197 mtx_unlock(&ps->ps_mtx); 3198 sig = ptracestop(td, sig, &ksi); 3199 mtx_lock(&ps->ps_mtx); 3200 3201 td->td_si.si_signo = 0; 3202 3203 /* 3204 * Keep looking if the debugger discarded or 3205 * replaced the signal. 3206 */ 3207 if (sig == 0) 3208 return (SIGSTATUS_HANDLED); 3209 3210 /* 3211 * If the signal became masked, re-queue it. 3212 */ 3213 if (SIGISMEMBER(td->td_sigmask, sig)) { 3214 ksi.ksi_flags |= KSI_HEAD; 3215 sigqueue_add(&p->p_sigqueue, sig, &ksi); 3216 return (SIGSTATUS_HANDLED); 3217 } 3218 3219 /* 3220 * If the traced bit got turned off, requeue the signal and 3221 * reload the set of pending signals. This ensures that p_sig* 3222 * and p_sigact are consistent. 3223 */ 3224 if ((p->p_flag & P_TRACED) == 0) { 3225 if ((ksi.ksi_flags & KSI_PTRACE) == 0) { 3226 ksi.ksi_flags |= KSI_HEAD; 3227 sigqueue_add(queue, sig, &ksi); 3228 } 3229 return (SIGSTATUS_HANDLED); 3230 } 3231 } 3232 3233 /* 3234 * Decide whether the signal should be returned. 3235 * Return the signal's number, or fall through 3236 * to clear it from the pending mask. 3237 */ 3238 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) { 3239 case (intptr_t)SIG_DFL: 3240 /* 3241 * Don't take default actions on system processes. 3242 */ 3243 if (p->p_pid <= 1) { 3244 #ifdef DIAGNOSTIC 3245 /* 3246 * Are you sure you want to ignore SIGSEGV 3247 * in init? XXX 3248 */ 3249 printf("Process (pid %lu) got signal %d\n", 3250 (u_long)p->p_pid, sig); 3251 #endif 3252 return (SIGSTATUS_IGNORE); 3253 } 3254 3255 /* 3256 * If there is a pending stop signal to process with 3257 * default action, stop here, then clear the signal. 3258 * Traced or exiting processes should ignore stops. 3259 * Additionally, a member of an orphaned process group 3260 * should ignore tty stops. 3261 */ 3262 prop = sigprop(sig); 3263 if (prop & SIGPROP_STOP) { 3264 mtx_unlock(&ps->ps_mtx); 3265 if ((p->p_flag & (P_TRACED | P_WEXIT | 3266 P_SINGLE_EXIT)) != 0 || ((p->p_pgrp-> 3267 pg_flags & PGRP_ORPHANED) != 0 && 3268 (prop & SIGPROP_TTYSTOP) != 0)) { 3269 mtx_lock(&ps->ps_mtx); 3270 return (SIGSTATUS_IGNORE); 3271 } 3272 if (TD_SBDRY_INTR(td)) { 3273 KASSERT((td->td_flags & TDF_SBDRY) != 0, 3274 ("lost TDF_SBDRY")); 3275 mtx_lock(&ps->ps_mtx); 3276 return (SIGSTATUS_SBDRY_STOP); 3277 } 3278 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, 3279 &p->p_mtx.lock_object, "Catching SIGSTOP"); 3280 sigqueue_delete(&td->td_sigqueue, sig); 3281 sigqueue_delete(&p->p_sigqueue, sig); 3282 p->p_flag |= P_STOPPED_SIG; 3283 p->p_xsig = sig; 3284 PROC_SLOCK(p); 3285 sig_suspend_threads(td, p); 3286 thread_suspend_switch(td, p); 3287 PROC_SUNLOCK(p); 3288 mtx_lock(&ps->ps_mtx); 3289 return (SIGSTATUS_HANDLED); 3290 } else if ((prop & SIGPROP_IGNORE) != 0 && 3291 (td->td_flags & TDF_SIGWAIT) == 0) { 3292 /* 3293 * Default action is to ignore; drop it if 3294 * not in kern_sigtimedwait(). 3295 */ 3296 return (SIGSTATUS_IGNORE); 3297 } else { 3298 return (SIGSTATUS_HANDLE); 3299 } 3300 3301 case (intptr_t)SIG_IGN: 3302 if ((td->td_flags & TDF_SIGWAIT) == 0) 3303 return (SIGSTATUS_IGNORE); 3304 else 3305 return (SIGSTATUS_HANDLE); 3306 3307 default: 3308 /* 3309 * This signal has an action, let postsig() process it. 3310 */ 3311 return (SIGSTATUS_HANDLE); 3312 } 3313 } 3314 3315 /* 3316 * If the current process has received a signal (should be caught or cause 3317 * termination, should interrupt current syscall), return the signal number. 3318 * Stop signals with default action are processed immediately, then cleared; 3319 * they aren't returned. This is checked after each entry to the system for 3320 * a syscall or trap (though this can usually be done without calling 3321 * issignal by checking the pending signal masks in cursig.) The normal call 3322 * sequence is 3323 * 3324 * while (sig = cursig(curthread)) 3325 * postsig(sig); 3326 */ 3327 static int 3328 issignal(struct thread *td) 3329 { 3330 struct proc *p; 3331 sigset_t sigpending; 3332 int sig; 3333 3334 p = td->td_proc; 3335 PROC_LOCK_ASSERT(p, MA_OWNED); 3336 3337 for (;;) { 3338 sigpending = td->td_sigqueue.sq_signals; 3339 SIGSETOR(sigpending, p->p_sigqueue.sq_signals); 3340 SIGSETNAND(sigpending, td->td_sigmask); 3341 3342 if ((p->p_flag & P_PPWAIT) != 0 || (td->td_flags & 3343 (TDF_SBDRY | TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY) 3344 SIG_STOPSIGMASK(sigpending); 3345 if (SIGISEMPTY(sigpending)) /* no signal to send */ 3346 return (0); 3347 3348 /* 3349 * Do fast sigblock if requested by usermode. Since 3350 * we do know that there was a signal pending at this 3351 * point, set the FAST_SIGBLOCK_PEND as indicator for 3352 * usermode to perform a dummy call to 3353 * FAST_SIGBLOCK_UNBLOCK, which causes immediate 3354 * delivery of postponed pending signal. 3355 */ 3356 if ((td->td_pflags & TDP_SIGFASTBLOCK) != 0) { 3357 if (td->td_sigblock_val != 0) 3358 SIGSETNAND(sigpending, fastblock_mask); 3359 if (SIGISEMPTY(sigpending)) { 3360 td->td_pflags |= TDP_SIGFASTPENDING; 3361 return (0); 3362 } 3363 } 3364 3365 if (!pt_attach_transparent && 3366 (p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED && 3367 (p->p_flag2 & P2_PTRACE_FSTP) != 0 && 3368 SIGISMEMBER(sigpending, SIGSTOP)) { 3369 /* 3370 * If debugger just attached, always consume 3371 * SIGSTOP from ptrace(PT_ATTACH) first, to 3372 * execute the debugger attach ritual in 3373 * order. 3374 */ 3375 td->td_dbgflags |= TDB_FSTP; 3376 SIGEMPTYSET(sigpending); 3377 SIGADDSET(sigpending, SIGSTOP); 3378 } 3379 3380 SIG_FOREACH(sig, &sigpending) { 3381 switch (sigprocess(td, sig)) { 3382 case SIGSTATUS_HANDLE: 3383 return (sig); 3384 case SIGSTATUS_HANDLED: 3385 goto next; 3386 case SIGSTATUS_IGNORE: 3387 sigqueue_delete(&td->td_sigqueue, sig); 3388 sigqueue_delete(&p->p_sigqueue, sig); 3389 break; 3390 case SIGSTATUS_SBDRY_STOP: 3391 return (-1); 3392 } 3393 } 3394 next:; 3395 } 3396 } 3397 3398 void 3399 thread_stopped(struct proc *p) 3400 { 3401 int n; 3402 3403 PROC_LOCK_ASSERT(p, MA_OWNED); 3404 PROC_SLOCK_ASSERT(p, MA_OWNED); 3405 n = p->p_suspcount; 3406 if (p == curproc) 3407 n++; 3408 if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) { 3409 PROC_SUNLOCK(p); 3410 p->p_flag &= ~P_WAITED; 3411 PROC_LOCK(p->p_pptr); 3412 childproc_stopped(p, (p->p_flag & P_TRACED) ? 3413 CLD_TRAPPED : CLD_STOPPED); 3414 PROC_UNLOCK(p->p_pptr); 3415 PROC_SLOCK(p); 3416 } 3417 } 3418 3419 /* 3420 * Take the action for the specified signal 3421 * from the current set of pending signals. 3422 */ 3423 int 3424 postsig(int sig) 3425 { 3426 struct thread *td; 3427 struct proc *p; 3428 struct sigacts *ps; 3429 sig_t action; 3430 ksiginfo_t ksi; 3431 sigset_t returnmask; 3432 3433 KASSERT(sig != 0, ("postsig")); 3434 3435 td = curthread; 3436 p = td->td_proc; 3437 PROC_LOCK_ASSERT(p, MA_OWNED); 3438 ps = p->p_sigacts; 3439 mtx_assert(&ps->ps_mtx, MA_OWNED); 3440 ksiginfo_init(&ksi); 3441 if (sigqueue_get(&td->td_sigqueue, sig, &ksi) == 0 && 3442 sigqueue_get(&p->p_sigqueue, sig, &ksi) == 0) 3443 return (0); 3444 ksi.ksi_signo = sig; 3445 if (ksi.ksi_code == SI_TIMER) 3446 itimer_accept(p, ksi.ksi_timerid, &ksi); 3447 action = ps->ps_sigact[_SIG_IDX(sig)]; 3448 #ifdef KTRACE 3449 if (KTRPOINT(td, KTR_PSIG)) 3450 ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ? 3451 &td->td_oldsigmask : &td->td_sigmask, ksi.ksi_code); 3452 #endif 3453 3454 if (action == SIG_DFL) { 3455 /* 3456 * Default action, where the default is to kill 3457 * the process. (Other cases were ignored above.) 3458 */ 3459 mtx_unlock(&ps->ps_mtx); 3460 proc_td_siginfo_capture(td, &ksi.ksi_info); 3461 sigexit(td, sig); 3462 /* NOTREACHED */ 3463 } else { 3464 /* 3465 * If we get here, the signal must be caught. 3466 */ 3467 KASSERT(action != SIG_IGN, ("postsig action %p", action)); 3468 KASSERT(!SIGISMEMBER(td->td_sigmask, sig), 3469 ("postsig action: blocked sig %d", sig)); 3470 3471 /* 3472 * Set the new mask value and also defer further 3473 * occurrences of this signal. 3474 * 3475 * Special case: user has done a sigsuspend. Here the 3476 * current mask is not of interest, but rather the 3477 * mask from before the sigsuspend is what we want 3478 * restored after the signal processing is completed. 3479 */ 3480 if (td->td_pflags & TDP_OLDMASK) { 3481 returnmask = td->td_oldsigmask; 3482 td->td_pflags &= ~TDP_OLDMASK; 3483 } else 3484 returnmask = td->td_sigmask; 3485 3486 if (p->p_sig == sig) { 3487 p->p_sig = 0; 3488 } 3489 (*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask); 3490 postsig_done(sig, td, ps); 3491 } 3492 return (1); 3493 } 3494 3495 int 3496 sig_ast_checksusp(struct thread *td) 3497 { 3498 struct proc *p __diagused; 3499 int ret; 3500 3501 p = td->td_proc; 3502 PROC_LOCK_ASSERT(p, MA_OWNED); 3503 3504 if (!td_ast_pending(td, TDA_SUSPEND)) 3505 return (0); 3506 3507 ret = thread_suspend_check(1); 3508 MPASS(ret == 0 || ret == EINTR || ret == ERESTART); 3509 return (ret); 3510 } 3511 3512 int 3513 sig_ast_needsigchk(struct thread *td) 3514 { 3515 struct proc *p; 3516 struct sigacts *ps; 3517 int ret, sig; 3518 3519 p = td->td_proc; 3520 PROC_LOCK_ASSERT(p, MA_OWNED); 3521 3522 if (!td_ast_pending(td, TDA_SIG)) 3523 return (0); 3524 3525 ps = p->p_sigacts; 3526 mtx_lock(&ps->ps_mtx); 3527 sig = cursig(td); 3528 if (sig == -1) { 3529 mtx_unlock(&ps->ps_mtx); 3530 KASSERT((td->td_flags & TDF_SBDRY) != 0, ("lost TDF_SBDRY")); 3531 KASSERT(TD_SBDRY_INTR(td), 3532 ("lost TDF_SERESTART of TDF_SEINTR")); 3533 KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) != 3534 (TDF_SEINTR | TDF_SERESTART), 3535 ("both TDF_SEINTR and TDF_SERESTART")); 3536 ret = TD_SBDRY_ERRNO(td); 3537 } else if (sig != 0) { 3538 ret = SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR : ERESTART; 3539 mtx_unlock(&ps->ps_mtx); 3540 } else { 3541 mtx_unlock(&ps->ps_mtx); 3542 ret = 0; 3543 } 3544 3545 /* 3546 * Do not go into sleep if this thread was the ptrace(2) 3547 * attach leader. cursig() consumed SIGSTOP from PT_ATTACH, 3548 * but we usually act on the signal by interrupting sleep, and 3549 * should do that here as well. 3550 */ 3551 if ((td->td_dbgflags & TDB_FSTP) != 0) { 3552 if (ret == 0) 3553 ret = EINTR; 3554 td->td_dbgflags &= ~TDB_FSTP; 3555 } 3556 3557 return (ret); 3558 } 3559 3560 int 3561 sig_intr(void) 3562 { 3563 struct thread *td; 3564 struct proc *p; 3565 int ret; 3566 3567 td = curthread; 3568 if (!td_ast_pending(td, TDA_SIG) && !td_ast_pending(td, TDA_SUSPEND)) 3569 return (0); 3570 3571 p = td->td_proc; 3572 3573 PROC_LOCK(p); 3574 ret = sig_ast_checksusp(td); 3575 if (ret == 0) 3576 ret = sig_ast_needsigchk(td); 3577 PROC_UNLOCK(p); 3578 return (ret); 3579 } 3580 3581 bool 3582 curproc_sigkilled(void) 3583 { 3584 struct thread *td; 3585 struct proc *p; 3586 struct sigacts *ps; 3587 bool res; 3588 3589 td = curthread; 3590 if (!td_ast_pending(td, TDA_SIG)) 3591 return (false); 3592 3593 p = td->td_proc; 3594 PROC_LOCK(p); 3595 ps = p->p_sigacts; 3596 mtx_lock(&ps->ps_mtx); 3597 res = SIGISMEMBER(td->td_sigqueue.sq_signals, SIGKILL) || 3598 SIGISMEMBER(p->p_sigqueue.sq_signals, SIGKILL); 3599 mtx_unlock(&ps->ps_mtx); 3600 PROC_UNLOCK(p); 3601 return (res); 3602 } 3603 3604 void 3605 proc_wkilled(struct proc *p) 3606 { 3607 3608 PROC_LOCK_ASSERT(p, MA_OWNED); 3609 if ((p->p_flag & P_WKILLED) == 0) 3610 p->p_flag |= P_WKILLED; 3611 } 3612 3613 /* 3614 * Kill the current process for stated reason. 3615 */ 3616 void 3617 killproc(struct proc *p, const char *why) 3618 { 3619 3620 PROC_LOCK_ASSERT(p, MA_OWNED); 3621 CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)", p, p->p_pid, 3622 p->p_comm); 3623 log(LOG_ERR, "pid %d (%s), jid %d, uid %d, was killed: %s\n", 3624 p->p_pid, p->p_comm, p->p_ucred->cr_prison->pr_id, 3625 p->p_ucred->cr_uid, why); 3626 proc_wkilled(p); 3627 kern_psignal(p, SIGKILL); 3628 } 3629 3630 /* 3631 * Send queued SIGCHLD to parent when child process's state 3632 * is changed. 3633 */ 3634 static void 3635 sigparent(struct proc *p, int reason, int status) 3636 { 3637 PROC_LOCK_ASSERT(p, MA_OWNED); 3638 PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED); 3639 3640 if (p->p_ksi != NULL) { 3641 p->p_ksi->ksi_signo = SIGCHLD; 3642 p->p_ksi->ksi_code = reason; 3643 p->p_ksi->ksi_status = status; 3644 p->p_ksi->ksi_pid = p->p_pid; 3645 p->p_ksi->ksi_uid = p->p_ucred->cr_ruid; 3646 if (KSI_ONQ(p->p_ksi)) 3647 return; 3648 } 3649 3650 /* 3651 * Do not consume p_ksi if parent is zombie, since signal is 3652 * dropped immediately. Instead, keep it since it might be 3653 * useful for reaper. 3654 */ 3655 if (p->p_pptr->p_state != PRS_ZOMBIE) 3656 pksignal(p->p_pptr, SIGCHLD, p->p_ksi); 3657 } 3658 3659 static void 3660 childproc_jobstate(struct proc *p, int reason, int sig) 3661 { 3662 struct sigacts *ps; 3663 3664 PROC_LOCK_ASSERT(p, MA_OWNED); 3665 PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED); 3666 3667 /* 3668 * Wake up parent sleeping in kern_wait(), also send 3669 * SIGCHLD to parent, but SIGCHLD does not guarantee 3670 * that parent will awake, because parent may masked 3671 * the signal. 3672 */ 3673 p->p_pptr->p_flag |= P_STATCHILD; 3674 wakeup(p->p_pptr); 3675 3676 ps = p->p_pptr->p_sigacts; 3677 mtx_lock(&ps->ps_mtx); 3678 if ((ps->ps_flag & PS_NOCLDSTOP) == 0) { 3679 mtx_unlock(&ps->ps_mtx); 3680 sigparent(p, reason, sig); 3681 } else 3682 mtx_unlock(&ps->ps_mtx); 3683 } 3684 3685 void 3686 childproc_stopped(struct proc *p, int reason) 3687 { 3688 3689 childproc_jobstate(p, reason, p->p_xsig); 3690 } 3691 3692 void 3693 childproc_continued(struct proc *p) 3694 { 3695 PROC_LOCK_ASSERT(p, MA_OWNED); 3696 p->p_flag |= P_CONTINUED; 3697 p->p_xsig = SIGCONT; 3698 childproc_jobstate(p, CLD_CONTINUED, SIGCONT); 3699 } 3700 3701 void 3702 childproc_exited(struct proc *p) 3703 { 3704 int reason, status; 3705 3706 if (WCOREDUMP(p->p_xsig)) { 3707 reason = CLD_DUMPED; 3708 status = WTERMSIG(p->p_xsig); 3709 } else if (WIFSIGNALED(p->p_xsig)) { 3710 reason = CLD_KILLED; 3711 status = WTERMSIG(p->p_xsig); 3712 } else { 3713 reason = CLD_EXITED; 3714 status = p->p_xexit; 3715 } 3716 /* 3717 * XXX avoid calling wakeup(p->p_pptr), the work is 3718 * done in exit1(). 3719 */ 3720 sigparent(p, reason, status); 3721 } 3722 3723 /* 3724 * Nonexistent system call-- signal process (may want to handle it). Flag 3725 * error in case process won't see signal immediately (blocked or ignored). 3726 */ 3727 #ifndef _SYS_SYSPROTO_H_ 3728 struct nosys_args { 3729 int dummy; 3730 }; 3731 #endif 3732 /* ARGSUSED */ 3733 int 3734 nosys(struct thread *td, struct nosys_args *args) 3735 { 3736 return (kern_nosys(td, args->dummy)); 3737 } 3738 3739 int 3740 kern_nosys(struct thread *td, int dummy) 3741 { 3742 struct proc *p; 3743 3744 p = td->td_proc; 3745 3746 if (SV_PROC_FLAG(p, SV_SIGSYS) != 0 && kern_signosys) { 3747 PROC_LOCK(p); 3748 tdsignal(td, SIGSYS); 3749 PROC_UNLOCK(p); 3750 } 3751 if (kern_lognosys == 1 || kern_lognosys == 3) { 3752 uprintf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm, 3753 td->td_sa.code); 3754 } 3755 if (kern_lognosys == 2 || kern_lognosys == 3 || 3756 (p->p_pid == 1 && (kern_lognosys & 3) == 0)) { 3757 printf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm, 3758 td->td_sa.code); 3759 } 3760 return (ENOSYS); 3761 } 3762 3763 /* 3764 * Send a SIGIO or SIGURG signal to a process or process group using stored 3765 * credentials rather than those of the current process. 3766 */ 3767 void 3768 pgsigio(struct sigio **sigiop, int sig, int checkctty) 3769 { 3770 ksiginfo_t ksi; 3771 struct sigio *sigio; 3772 3773 ksiginfo_init(&ksi); 3774 ksi.ksi_signo = sig; 3775 ksi.ksi_code = SI_KERNEL; 3776 3777 SIGIO_LOCK(); 3778 sigio = *sigiop; 3779 if (sigio == NULL) { 3780 SIGIO_UNLOCK(); 3781 return; 3782 } 3783 if (sigio->sio_pgid > 0) { 3784 PROC_LOCK(sigio->sio_proc); 3785 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred)) 3786 kern_psignal(sigio->sio_proc, sig); 3787 PROC_UNLOCK(sigio->sio_proc); 3788 } else if (sigio->sio_pgid < 0) { 3789 struct proc *p; 3790 3791 PGRP_LOCK(sigio->sio_pgrp); 3792 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) { 3793 PROC_LOCK(p); 3794 if (p->p_state == PRS_NORMAL && 3795 CANSIGIO(sigio->sio_ucred, p->p_ucred) && 3796 (checkctty == 0 || (p->p_flag & P_CONTROLT))) 3797 kern_psignal(p, sig); 3798 PROC_UNLOCK(p); 3799 } 3800 PGRP_UNLOCK(sigio->sio_pgrp); 3801 } 3802 SIGIO_UNLOCK(); 3803 } 3804 3805 static int 3806 filt_sigattach(struct knote *kn) 3807 { 3808 struct proc *p = curproc; 3809 3810 kn->kn_ptr.p_proc = p; 3811 kn->kn_flags |= EV_CLEAR; /* automatically set */ 3812 3813 knlist_add(p->p_klist, kn, 0); 3814 3815 return (0); 3816 } 3817 3818 static void 3819 filt_sigdetach(struct knote *kn) 3820 { 3821 knlist_remove(kn->kn_knlist, kn, 0); 3822 } 3823 3824 /* 3825 * signal knotes are shared with proc knotes, so we apply a mask to 3826 * the hint in order to differentiate them from process hints. This 3827 * could be avoided by using a signal-specific knote list, but probably 3828 * isn't worth the trouble. 3829 */ 3830 static int 3831 filt_signal(struct knote *kn, long hint) 3832 { 3833 3834 if (hint & NOTE_SIGNAL) { 3835 hint &= ~NOTE_SIGNAL; 3836 3837 if (kn->kn_id == hint) 3838 kn->kn_data++; 3839 } 3840 return (kn->kn_data != 0); 3841 } 3842 3843 struct sigacts * 3844 sigacts_alloc(void) 3845 { 3846 struct sigacts *ps; 3847 3848 ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO); 3849 refcount_init(&ps->ps_refcnt, 1); 3850 mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF); 3851 return (ps); 3852 } 3853 3854 void 3855 sigacts_free(struct sigacts *ps) 3856 { 3857 3858 if (refcount_release(&ps->ps_refcnt) == 0) 3859 return; 3860 mtx_destroy(&ps->ps_mtx); 3861 free(ps, M_SUBPROC); 3862 } 3863 3864 struct sigacts * 3865 sigacts_hold(struct sigacts *ps) 3866 { 3867 3868 refcount_acquire(&ps->ps_refcnt); 3869 return (ps); 3870 } 3871 3872 void 3873 sigacts_copy(struct sigacts *dest, struct sigacts *src) 3874 { 3875 3876 KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest")); 3877 mtx_lock(&src->ps_mtx); 3878 bcopy(src, dest, offsetof(struct sigacts, ps_refcnt)); 3879 mtx_unlock(&src->ps_mtx); 3880 } 3881 3882 int 3883 sigacts_shared(struct sigacts *ps) 3884 { 3885 3886 return (ps->ps_refcnt > 1); 3887 } 3888 3889 void 3890 sig_drop_caught(struct proc *p) 3891 { 3892 int sig; 3893 struct sigacts *ps; 3894 3895 ps = p->p_sigacts; 3896 PROC_LOCK_ASSERT(p, MA_OWNED); 3897 mtx_assert(&ps->ps_mtx, MA_OWNED); 3898 SIG_FOREACH(sig, &ps->ps_sigcatch) { 3899 sigdflt(ps, sig); 3900 if ((sigprop(sig) & SIGPROP_IGNORE) != 0) 3901 sigqueue_delete_proc(p, sig); 3902 } 3903 } 3904 3905 static void 3906 sigfastblock_failed(struct thread *td, bool sendsig, bool write) 3907 { 3908 ksiginfo_t ksi; 3909 3910 /* 3911 * Prevent further fetches and SIGSEGVs, allowing thread to 3912 * issue syscalls despite corruption. 3913 */ 3914 sigfastblock_clear(td); 3915 3916 if (!sendsig) 3917 return; 3918 ksiginfo_init_trap(&ksi); 3919 ksi.ksi_signo = SIGSEGV; 3920 ksi.ksi_code = write ? SEGV_ACCERR : SEGV_MAPERR; 3921 ksi.ksi_addr = td->td_sigblock_ptr; 3922 trapsignal(td, &ksi); 3923 } 3924 3925 static bool 3926 sigfastblock_fetch_sig(struct thread *td, bool sendsig, uint32_t *valp) 3927 { 3928 uint32_t res; 3929 3930 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) 3931 return (true); 3932 if (fueword32((void *)td->td_sigblock_ptr, &res) == -1) { 3933 sigfastblock_failed(td, sendsig, false); 3934 return (false); 3935 } 3936 *valp = res; 3937 td->td_sigblock_val = res & ~SIGFASTBLOCK_FLAGS; 3938 return (true); 3939 } 3940 3941 static void 3942 sigfastblock_resched(struct thread *td, bool resched) 3943 { 3944 struct proc *p; 3945 3946 if (resched) { 3947 p = td->td_proc; 3948 PROC_LOCK(p); 3949 reschedule_signals(p, td->td_sigmask, 0); 3950 PROC_UNLOCK(p); 3951 } 3952 ast_sched(td, TDA_SIG); 3953 } 3954 3955 int 3956 sys_sigfastblock(struct thread *td, struct sigfastblock_args *uap) 3957 { 3958 struct proc *p; 3959 int error, res; 3960 uint32_t oldval; 3961 3962 error = 0; 3963 p = td->td_proc; 3964 switch (uap->cmd) { 3965 case SIGFASTBLOCK_SETPTR: 3966 if ((td->td_pflags & TDP_SIGFASTBLOCK) != 0) { 3967 error = EBUSY; 3968 break; 3969 } 3970 if (((uintptr_t)(uap->ptr) & (sizeof(uint32_t) - 1)) != 0) { 3971 error = EINVAL; 3972 break; 3973 } 3974 td->td_pflags |= TDP_SIGFASTBLOCK; 3975 td->td_sigblock_ptr = uap->ptr; 3976 break; 3977 3978 case SIGFASTBLOCK_UNBLOCK: 3979 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) { 3980 error = EINVAL; 3981 break; 3982 } 3983 3984 for (;;) { 3985 res = casueword32(td->td_sigblock_ptr, 3986 SIGFASTBLOCK_PEND, &oldval, 0); 3987 if (res == -1) { 3988 error = EFAULT; 3989 sigfastblock_failed(td, false, true); 3990 break; 3991 } 3992 if (res == 0) 3993 break; 3994 MPASS(res == 1); 3995 if (oldval != SIGFASTBLOCK_PEND) { 3996 error = EBUSY; 3997 break; 3998 } 3999 error = thread_check_susp(td, false); 4000 if (error != 0) 4001 break; 4002 } 4003 if (error != 0) 4004 break; 4005 4006 /* 4007 * td_sigblock_val is cleared there, but not on a 4008 * syscall exit. The end effect is that a single 4009 * interruptible sleep, while user sigblock word is 4010 * set, might return EINTR or ERESTART to usermode 4011 * without delivering signal. All further sleeps, 4012 * until userspace clears the word and does 4013 * sigfastblock(UNBLOCK), observe current word and no 4014 * longer get interrupted. It is slight 4015 * non-conformance, with alternative to have read the 4016 * sigblock word on each syscall entry. 4017 */ 4018 td->td_sigblock_val = 0; 4019 4020 /* 4021 * Rely on normal ast mechanism to deliver pending 4022 * signals to current thread. But notify others about 4023 * fake unblock. 4024 */ 4025 sigfastblock_resched(td, error == 0 && p->p_numthreads != 1); 4026 4027 break; 4028 4029 case SIGFASTBLOCK_UNSETPTR: 4030 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) { 4031 error = EINVAL; 4032 break; 4033 } 4034 if (!sigfastblock_fetch_sig(td, false, &oldval)) { 4035 error = EFAULT; 4036 break; 4037 } 4038 if (oldval != 0 && oldval != SIGFASTBLOCK_PEND) { 4039 error = EBUSY; 4040 break; 4041 } 4042 sigfastblock_clear(td); 4043 break; 4044 4045 default: 4046 error = EINVAL; 4047 break; 4048 } 4049 return (error); 4050 } 4051 4052 void 4053 sigfastblock_clear(struct thread *td) 4054 { 4055 bool resched; 4056 4057 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) 4058 return; 4059 td->td_sigblock_val = 0; 4060 resched = (td->td_pflags & TDP_SIGFASTPENDING) != 0 || 4061 SIGPENDING(td); 4062 td->td_pflags &= ~(TDP_SIGFASTBLOCK | TDP_SIGFASTPENDING); 4063 sigfastblock_resched(td, resched); 4064 } 4065 4066 void 4067 sigfastblock_fetch(struct thread *td) 4068 { 4069 uint32_t val; 4070 4071 (void)sigfastblock_fetch_sig(td, true, &val); 4072 } 4073 4074 static void 4075 sigfastblock_setpend1(struct thread *td) 4076 { 4077 int res; 4078 uint32_t oldval; 4079 4080 if ((td->td_pflags & TDP_SIGFASTPENDING) == 0) 4081 return; 4082 res = fueword32((void *)td->td_sigblock_ptr, &oldval); 4083 if (res == -1) { 4084 sigfastblock_failed(td, true, false); 4085 return; 4086 } 4087 for (;;) { 4088 res = casueword32(td->td_sigblock_ptr, oldval, &oldval, 4089 oldval | SIGFASTBLOCK_PEND); 4090 if (res == -1) { 4091 sigfastblock_failed(td, true, true); 4092 return; 4093 } 4094 if (res == 0) { 4095 td->td_sigblock_val = oldval & ~SIGFASTBLOCK_FLAGS; 4096 td->td_pflags &= ~TDP_SIGFASTPENDING; 4097 break; 4098 } 4099 MPASS(res == 1); 4100 if (thread_check_susp(td, false) != 0) 4101 break; 4102 } 4103 } 4104 4105 static void 4106 sigfastblock_setpend(struct thread *td, bool resched) 4107 { 4108 struct proc *p; 4109 4110 sigfastblock_setpend1(td); 4111 if (resched) { 4112 p = td->td_proc; 4113 PROC_LOCK(p); 4114 reschedule_signals(p, fastblock_mask, SIGPROCMASK_FASTBLK); 4115 PROC_UNLOCK(p); 4116 } 4117 } 4118