1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1989, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 37 #include "opt_capsicum.h" 38 #include "opt_ktrace.h" 39 40 #include <sys/param.h> 41 #include <sys/capsicum.h> 42 #include <sys/ctype.h> 43 #include <sys/systm.h> 44 #include <sys/signalvar.h> 45 #include <sys/vnode.h> 46 #include <sys/acct.h> 47 #include <sys/capsicum.h> 48 #include <sys/condvar.h> 49 #include <sys/devctl.h> 50 #include <sys/event.h> 51 #include <sys/exec.h> 52 #include <sys/fcntl.h> 53 #include <sys/imgact.h> 54 #include <sys/jail.h> 55 #include <sys/kernel.h> 56 #include <sys/ktr.h> 57 #include <sys/ktrace.h> 58 #include <sys/limits.h> 59 #include <sys/lock.h> 60 #include <sys/malloc.h> 61 #include <sys/mutex.h> 62 #include <sys/refcount.h> 63 #include <sys/namei.h> 64 #include <sys/proc.h> 65 #include <sys/procdesc.h> 66 #include <sys/ptrace.h> 67 #include <sys/posix4.h> 68 #include <sys/racct.h> 69 #include <sys/resourcevar.h> 70 #include <sys/sdt.h> 71 #include <sys/sbuf.h> 72 #include <sys/sleepqueue.h> 73 #include <sys/smp.h> 74 #include <sys/stat.h> 75 #include <sys/sx.h> 76 #include <sys/syscall.h> 77 #include <sys/syscallsubr.h> 78 #include <sys/sysctl.h> 79 #include <sys/sysent.h> 80 #include <sys/syslog.h> 81 #include <sys/sysproto.h> 82 #include <sys/timers.h> 83 #include <sys/ucoredump.h> 84 #include <sys/unistd.h> 85 #include <sys/vmmeter.h> 86 #include <sys/wait.h> 87 #include <vm/vm.h> 88 #include <vm/vm_extern.h> 89 #include <vm/uma.h> 90 91 #include <machine/cpu.h> 92 93 #include <security/audit/audit.h> 94 95 #define ONSIG 32 /* NSIG for osig* syscalls. XXX. */ 96 97 SDT_PROVIDER_DECLARE(proc); 98 SDT_PROBE_DEFINE3(proc, , , signal__send, 99 "struct thread *", "struct proc *", "int"); 100 SDT_PROBE_DEFINE2(proc, , , signal__clear, 101 "int", "ksiginfo_t *"); 102 SDT_PROBE_DEFINE3(proc, , , signal__discard, 103 "struct thread *", "struct proc *", "int"); 104 105 static int killpg1(struct thread *td, int sig, int pgid, int all, 106 ksiginfo_t *ksi); 107 static int issignal(struct thread *td); 108 static void reschedule_signals(struct proc *p, sigset_t block, int flags); 109 static int sigprop(int sig); 110 static void tdsigwakeup(struct thread *, int, sig_t, int); 111 static bool sig_suspend_threads(struct thread *, struct proc *); 112 static int filt_sigattach(struct knote *kn); 113 static void filt_sigdetach(struct knote *kn); 114 static int filt_signal(struct knote *kn, long hint); 115 static struct thread *sigtd(struct proc *p, int sig, bool fast_sigblock); 116 static void sigqueue_start(void); 117 static void sigfastblock_setpend(struct thread *td, bool resched); 118 static void sig_handle_first_stop(struct thread *td, struct proc *p, 119 int sig); 120 121 static uma_zone_t ksiginfo_zone = NULL; 122 const struct filterops sig_filtops = { 123 .f_isfd = 0, 124 .f_attach = filt_sigattach, 125 .f_detach = filt_sigdetach, 126 .f_event = filt_signal, 127 }; 128 129 static int kern_forcesigexit = 1; 130 SYSCTL_INT(_kern, OID_AUTO, forcesigexit, CTLFLAG_RW, 131 &kern_forcesigexit, 0, "Force trap signal to be handled"); 132 133 static SYSCTL_NODE(_kern, OID_AUTO, sigqueue, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 134 "POSIX real time signal"); 135 136 static int max_pending_per_proc = 128; 137 SYSCTL_INT(_kern_sigqueue, OID_AUTO, max_pending_per_proc, CTLFLAG_RW, 138 &max_pending_per_proc, 0, "Max pending signals per proc"); 139 140 static int preallocate_siginfo = 1024; 141 SYSCTL_INT(_kern_sigqueue, OID_AUTO, preallocate, CTLFLAG_RDTUN, 142 &preallocate_siginfo, 0, "Preallocated signal memory size"); 143 144 static int signal_overflow = 0; 145 SYSCTL_INT(_kern_sigqueue, OID_AUTO, overflow, CTLFLAG_RD, 146 &signal_overflow, 0, "Number of signals overflew"); 147 148 static int signal_alloc_fail = 0; 149 SYSCTL_INT(_kern_sigqueue, OID_AUTO, alloc_fail, CTLFLAG_RD, 150 &signal_alloc_fail, 0, "signals failed to be allocated"); 151 152 static int kern_lognosys = 0; 153 SYSCTL_INT(_kern, OID_AUTO, lognosys, CTLFLAG_RWTUN, &kern_lognosys, 0, 154 "Log invalid syscalls"); 155 156 static int kern_signosys = 1; 157 SYSCTL_INT(_kern, OID_AUTO, signosys, CTLFLAG_RWTUN, &kern_signosys, 0, 158 "Send SIGSYS on return from invalid syscall"); 159 160 __read_frequently bool sigfastblock_fetch_always = false; 161 SYSCTL_BOOL(_kern, OID_AUTO, sigfastblock_fetch_always, CTLFLAG_RWTUN, 162 &sigfastblock_fetch_always, 0, 163 "Fetch sigfastblock word on each syscall entry for proper " 164 "blocking semantic"); 165 166 static bool kern_sig_discard_ign = true; 167 SYSCTL_BOOL(_kern, OID_AUTO, sig_discard_ign, CTLFLAG_RWTUN, 168 &kern_sig_discard_ign, 0, 169 "Discard ignored signals on delivery, otherwise queue them to " 170 "the target queue"); 171 172 bool pt_attach_transparent = true; 173 SYSCTL_BOOL(_debug, OID_AUTO, ptrace_attach_transparent, CTLFLAG_RWTUN, 174 &pt_attach_transparent, 0, 175 "Hide wakes from PT_ATTACH on interruptible sleeps"); 176 177 SYSINIT(signal, SI_SUB_P1003_1B, SI_ORDER_FIRST+3, sigqueue_start, NULL); 178 179 /* 180 * Policy -- Can ucred cr1 send SIGIO to process cr2? 181 * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG 182 * in the right situations. 183 */ 184 #define CANSIGIO(cr1, cr2) \ 185 ((cr1)->cr_uid == 0 || \ 186 (cr1)->cr_ruid == (cr2)->cr_ruid || \ 187 (cr1)->cr_uid == (cr2)->cr_ruid || \ 188 (cr1)->cr_ruid == (cr2)->cr_uid || \ 189 (cr1)->cr_uid == (cr2)->cr_uid) 190 191 /* 192 * Signal properties and actions. 193 * The array below categorizes the signals and their default actions 194 * according to the following properties: 195 */ 196 #define SIGPROP_KILL 0x01 /* terminates process by default */ 197 #define SIGPROP_CORE 0x02 /* ditto and coredumps */ 198 #define SIGPROP_STOP 0x04 /* suspend process */ 199 #define SIGPROP_TTYSTOP 0x08 /* ditto, from tty */ 200 #define SIGPROP_IGNORE 0x10 /* ignore by default */ 201 #define SIGPROP_CONT 0x20 /* continue if suspended */ 202 203 static const int sigproptbl[NSIG] = { 204 [SIGHUP] = SIGPROP_KILL, 205 [SIGINT] = SIGPROP_KILL, 206 [SIGQUIT] = SIGPROP_KILL | SIGPROP_CORE, 207 [SIGILL] = SIGPROP_KILL | SIGPROP_CORE, 208 [SIGTRAP] = SIGPROP_KILL | SIGPROP_CORE, 209 [SIGABRT] = SIGPROP_KILL | SIGPROP_CORE, 210 [SIGEMT] = SIGPROP_KILL | SIGPROP_CORE, 211 [SIGFPE] = SIGPROP_KILL | SIGPROP_CORE, 212 [SIGKILL] = SIGPROP_KILL, 213 [SIGBUS] = SIGPROP_KILL | SIGPROP_CORE, 214 [SIGSEGV] = SIGPROP_KILL | SIGPROP_CORE, 215 [SIGSYS] = SIGPROP_KILL | SIGPROP_CORE, 216 [SIGPIPE] = SIGPROP_KILL, 217 [SIGALRM] = SIGPROP_KILL, 218 [SIGTERM] = SIGPROP_KILL, 219 [SIGURG] = SIGPROP_IGNORE, 220 [SIGSTOP] = SIGPROP_STOP, 221 [SIGTSTP] = SIGPROP_STOP | SIGPROP_TTYSTOP, 222 [SIGCONT] = SIGPROP_IGNORE | SIGPROP_CONT, 223 [SIGCHLD] = SIGPROP_IGNORE, 224 [SIGTTIN] = SIGPROP_STOP | SIGPROP_TTYSTOP, 225 [SIGTTOU] = SIGPROP_STOP | SIGPROP_TTYSTOP, 226 [SIGIO] = SIGPROP_IGNORE, 227 [SIGXCPU] = SIGPROP_KILL, 228 [SIGXFSZ] = SIGPROP_KILL, 229 [SIGVTALRM] = SIGPROP_KILL, 230 [SIGPROF] = SIGPROP_KILL, 231 [SIGWINCH] = SIGPROP_IGNORE, 232 [SIGINFO] = SIGPROP_IGNORE, 233 [SIGUSR1] = SIGPROP_KILL, 234 [SIGUSR2] = SIGPROP_KILL, 235 }; 236 237 #define _SIG_FOREACH_ADVANCE(i, set) ({ \ 238 int __found; \ 239 for (;;) { \ 240 if (__bits != 0) { \ 241 int __sig = ffs(__bits); \ 242 __bits &= ~(1u << (__sig - 1)); \ 243 sig = __i * sizeof((set)->__bits[0]) * NBBY + __sig; \ 244 __found = 1; \ 245 break; \ 246 } \ 247 if (++__i == _SIG_WORDS) { \ 248 __found = 0; \ 249 break; \ 250 } \ 251 __bits = (set)->__bits[__i]; \ 252 } \ 253 __found != 0; \ 254 }) 255 256 #define SIG_FOREACH(i, set) \ 257 for (int32_t __i = -1, __bits = 0; \ 258 _SIG_FOREACH_ADVANCE(i, set); ) \ 259 260 static sigset_t fastblock_mask; 261 262 static void 263 ast_sig(struct thread *td, int tda) 264 { 265 struct proc *p; 266 int old_boundary, sig; 267 bool resched_sigs; 268 269 p = td->td_proc; 270 271 #ifdef DIAGNOSTIC 272 if (p->p_numthreads == 1 && (tda & (TDAI(TDA_SIG) | 273 TDAI(TDA_AST))) == 0) { 274 PROC_LOCK(p); 275 thread_lock(td); 276 /* 277 * Note that TDA_SIG should be re-read from 278 * td_ast, since signal might have been delivered 279 * after we cleared td_flags above. This is one of 280 * the reason for looping check for AST condition. 281 * See comment in userret() about P_PPWAIT. 282 */ 283 if ((p->p_flag & P_PPWAIT) == 0 && 284 (td->td_pflags & TDP_SIGFASTBLOCK) == 0) { 285 if (SIGPENDING(td) && ((tda | td->td_ast) & 286 (TDAI(TDA_SIG) | TDAI(TDA_AST))) == 0) { 287 thread_unlock(td); /* fix dumps */ 288 panic( 289 "failed2 to set signal flags for ast p %p " 290 "td %p tda %#x td_ast %#x fl %#x", 291 p, td, tda, td->td_ast, td->td_flags); 292 } 293 } 294 thread_unlock(td); 295 PROC_UNLOCK(p); 296 } 297 #endif 298 299 /* 300 * Check for signals. Unlocked reads of p_pendingcnt or 301 * p_siglist might cause process-directed signal to be handled 302 * later. 303 */ 304 if ((tda & TDAI(TDA_SIG)) != 0 || p->p_pendingcnt > 0 || 305 !SIGISEMPTY(p->p_siglist)) { 306 sigfastblock_fetch(td); 307 PROC_LOCK(p); 308 old_boundary = ~TDB_BOUNDARY | (td->td_dbgflags & TDB_BOUNDARY); 309 td->td_dbgflags |= TDB_BOUNDARY; 310 mtx_lock(&p->p_sigacts->ps_mtx); 311 while ((sig = cursig(td)) != 0) { 312 KASSERT(sig >= 0, ("sig %d", sig)); 313 postsig(sig); 314 } 315 mtx_unlock(&p->p_sigacts->ps_mtx); 316 td->td_dbgflags &= old_boundary; 317 PROC_UNLOCK(p); 318 resched_sigs = true; 319 } else { 320 resched_sigs = false; 321 } 322 323 /* 324 * Handle deferred update of the fast sigblock value, after 325 * the postsig() loop was performed. 326 */ 327 sigfastblock_setpend(td, resched_sigs); 328 329 /* 330 * Clear td_sa.code: signal to ptrace that syscall arguments 331 * are unavailable after this point. This AST handler is the 332 * last chance for ptracestop() to signal the tracer before 333 * the tracee returns to userspace. 334 */ 335 td->td_sa.code = 0; 336 } 337 338 static void 339 ast_sigsuspend(struct thread *td, int tda __unused) 340 { 341 MPASS((td->td_pflags & TDP_OLDMASK) != 0); 342 td->td_pflags &= ~TDP_OLDMASK; 343 kern_sigprocmask(td, SIG_SETMASK, &td->td_oldsigmask, NULL, 0); 344 } 345 346 static void 347 sigqueue_start(void) 348 { 349 ksiginfo_zone = uma_zcreate("ksiginfo", sizeof(ksiginfo_t), 350 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 351 uma_prealloc(ksiginfo_zone, preallocate_siginfo); 352 p31b_setcfg(CTL_P1003_1B_REALTIME_SIGNALS, _POSIX_REALTIME_SIGNALS); 353 p31b_setcfg(CTL_P1003_1B_RTSIG_MAX, SIGRTMAX - SIGRTMIN + 1); 354 p31b_setcfg(CTL_P1003_1B_SIGQUEUE_MAX, max_pending_per_proc); 355 SIGFILLSET(fastblock_mask); 356 SIG_CANTMASK(fastblock_mask); 357 ast_register(TDA_SIG, ASTR_UNCOND, 0, ast_sig); 358 359 /* 360 * TDA_PSELECT is for the case where the signal mask should be restored 361 * before delivering any signals so that we do not deliver any that are 362 * blocked by the normal thread mask. It is mutually exclusive with 363 * TDA_SIGSUSPEND, which should be used if we *do* want to deliver 364 * signals that are normally blocked, e.g., if it interrupted our sleep. 365 */ 366 ast_register(TDA_PSELECT, ASTR_ASTF_REQUIRED | ASTR_TDP, 367 TDP_OLDMASK, ast_sigsuspend); 368 ast_register(TDA_SIGSUSPEND, ASTR_ASTF_REQUIRED | ASTR_TDP, 369 TDP_OLDMASK, ast_sigsuspend); 370 } 371 372 ksiginfo_t * 373 ksiginfo_alloc(int mwait) 374 { 375 MPASS(mwait == M_WAITOK || mwait == M_NOWAIT); 376 377 if (ksiginfo_zone == NULL) 378 return (NULL); 379 return (uma_zalloc(ksiginfo_zone, mwait | M_ZERO)); 380 } 381 382 void 383 ksiginfo_free(ksiginfo_t *ksi) 384 { 385 uma_zfree(ksiginfo_zone, ksi); 386 } 387 388 static __inline bool 389 ksiginfo_tryfree(ksiginfo_t *ksi) 390 { 391 if ((ksi->ksi_flags & KSI_EXT) == 0) { 392 uma_zfree(ksiginfo_zone, ksi); 393 return (true); 394 } 395 return (false); 396 } 397 398 void 399 sigqueue_init(sigqueue_t *list, struct proc *p) 400 { 401 SIGEMPTYSET(list->sq_signals); 402 SIGEMPTYSET(list->sq_kill); 403 SIGEMPTYSET(list->sq_ptrace); 404 TAILQ_INIT(&list->sq_list); 405 list->sq_proc = p; 406 list->sq_flags = SQ_INIT; 407 } 408 409 /* 410 * Get a signal's ksiginfo. 411 * Return: 412 * 0 - signal not found 413 * others - signal number 414 */ 415 static int 416 sigqueue_get(sigqueue_t *sq, int signo, ksiginfo_t *si) 417 { 418 struct proc *p = sq->sq_proc; 419 struct ksiginfo *ksi, *next; 420 int count = 0; 421 422 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited")); 423 424 if (!SIGISMEMBER(sq->sq_signals, signo)) 425 return (0); 426 427 if (SIGISMEMBER(sq->sq_ptrace, signo)) { 428 count++; 429 SIGDELSET(sq->sq_ptrace, signo); 430 si->ksi_flags |= KSI_PTRACE; 431 } 432 if (SIGISMEMBER(sq->sq_kill, signo)) { 433 count++; 434 if (count == 1) 435 SIGDELSET(sq->sq_kill, signo); 436 } 437 438 TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) { 439 if (ksi->ksi_signo == signo) { 440 if (count == 0) { 441 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link); 442 ksi->ksi_sigq = NULL; 443 ksiginfo_copy(ksi, si); 444 if (ksiginfo_tryfree(ksi) && p != NULL) 445 p->p_pendingcnt--; 446 } 447 if (++count > 1) 448 break; 449 } 450 } 451 452 if (count <= 1) 453 SIGDELSET(sq->sq_signals, signo); 454 si->ksi_signo = signo; 455 return (signo); 456 } 457 458 void 459 sigqueue_take(ksiginfo_t *ksi) 460 { 461 struct ksiginfo *kp; 462 struct proc *p; 463 sigqueue_t *sq; 464 465 if (ksi == NULL || (sq = ksi->ksi_sigq) == NULL) 466 return; 467 468 p = sq->sq_proc; 469 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link); 470 ksi->ksi_sigq = NULL; 471 if (!(ksi->ksi_flags & KSI_EXT) && p != NULL) 472 p->p_pendingcnt--; 473 474 for (kp = TAILQ_FIRST(&sq->sq_list); kp != NULL; 475 kp = TAILQ_NEXT(kp, ksi_link)) { 476 if (kp->ksi_signo == ksi->ksi_signo) 477 break; 478 } 479 if (kp == NULL && !SIGISMEMBER(sq->sq_kill, ksi->ksi_signo) && 480 !SIGISMEMBER(sq->sq_ptrace, ksi->ksi_signo)) 481 SIGDELSET(sq->sq_signals, ksi->ksi_signo); 482 } 483 484 static int 485 sigqueue_add(sigqueue_t *sq, int signo, ksiginfo_t *si) 486 { 487 struct proc *p = sq->sq_proc; 488 struct ksiginfo *ksi; 489 int ret = 0; 490 491 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited")); 492 493 /* 494 * SIGKILL/SIGSTOP cannot be caught or masked, so take the fast path 495 * for these signals. 496 */ 497 if (signo == SIGKILL || signo == SIGSTOP || si == NULL) { 498 SIGADDSET(sq->sq_kill, signo); 499 goto out_set_bit; 500 } 501 502 /* directly insert the ksi, don't copy it */ 503 if (si->ksi_flags & KSI_INS) { 504 if (si->ksi_flags & KSI_HEAD) 505 TAILQ_INSERT_HEAD(&sq->sq_list, si, ksi_link); 506 else 507 TAILQ_INSERT_TAIL(&sq->sq_list, si, ksi_link); 508 si->ksi_sigq = sq; 509 goto out_set_bit; 510 } 511 512 if (__predict_false(ksiginfo_zone == NULL)) { 513 SIGADDSET(sq->sq_kill, signo); 514 goto out_set_bit; 515 } 516 517 if (p != NULL && p->p_pendingcnt >= max_pending_per_proc) { 518 signal_overflow++; 519 ret = EAGAIN; 520 } else if ((ksi = ksiginfo_alloc(M_NOWAIT)) == NULL) { 521 signal_alloc_fail++; 522 ret = EAGAIN; 523 } else { 524 if (p != NULL) 525 p->p_pendingcnt++; 526 ksiginfo_copy(si, ksi); 527 ksi->ksi_signo = signo; 528 if (si->ksi_flags & KSI_HEAD) 529 TAILQ_INSERT_HEAD(&sq->sq_list, ksi, ksi_link); 530 else 531 TAILQ_INSERT_TAIL(&sq->sq_list, ksi, ksi_link); 532 ksi->ksi_sigq = sq; 533 } 534 535 if (ret != 0) { 536 if ((si->ksi_flags & KSI_PTRACE) != 0) { 537 SIGADDSET(sq->sq_ptrace, signo); 538 ret = 0; 539 goto out_set_bit; 540 } else if ((si->ksi_flags & KSI_TRAP) != 0 || 541 (si->ksi_flags & KSI_SIGQ) == 0) { 542 SIGADDSET(sq->sq_kill, signo); 543 ret = 0; 544 goto out_set_bit; 545 } 546 return (ret); 547 } 548 549 out_set_bit: 550 SIGADDSET(sq->sq_signals, signo); 551 return (ret); 552 } 553 554 void 555 sigqueue_flush(sigqueue_t *sq) 556 { 557 struct proc *p = sq->sq_proc; 558 ksiginfo_t *ksi; 559 560 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited")); 561 562 if (p != NULL) 563 PROC_LOCK_ASSERT(p, MA_OWNED); 564 565 while ((ksi = TAILQ_FIRST(&sq->sq_list)) != NULL) { 566 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link); 567 ksi->ksi_sigq = NULL; 568 if (ksiginfo_tryfree(ksi) && p != NULL) 569 p->p_pendingcnt--; 570 } 571 572 SIGEMPTYSET(sq->sq_signals); 573 SIGEMPTYSET(sq->sq_kill); 574 SIGEMPTYSET(sq->sq_ptrace); 575 } 576 577 static void 578 sigqueue_move_set(sigqueue_t *src, sigqueue_t *dst, const sigset_t *set) 579 { 580 sigset_t tmp; 581 struct proc *p1, *p2; 582 ksiginfo_t *ksi, *next; 583 584 KASSERT(src->sq_flags & SQ_INIT, ("src sigqueue not inited")); 585 KASSERT(dst->sq_flags & SQ_INIT, ("dst sigqueue not inited")); 586 p1 = src->sq_proc; 587 p2 = dst->sq_proc; 588 /* Move siginfo to target list */ 589 TAILQ_FOREACH_SAFE(ksi, &src->sq_list, ksi_link, next) { 590 if (SIGISMEMBER(*set, ksi->ksi_signo)) { 591 TAILQ_REMOVE(&src->sq_list, ksi, ksi_link); 592 if (p1 != NULL) 593 p1->p_pendingcnt--; 594 TAILQ_INSERT_TAIL(&dst->sq_list, ksi, ksi_link); 595 ksi->ksi_sigq = dst; 596 if (p2 != NULL) 597 p2->p_pendingcnt++; 598 } 599 } 600 601 /* Move pending bits to target list */ 602 tmp = src->sq_kill; 603 SIGSETAND(tmp, *set); 604 SIGSETOR(dst->sq_kill, tmp); 605 SIGSETNAND(src->sq_kill, tmp); 606 607 tmp = src->sq_ptrace; 608 SIGSETAND(tmp, *set); 609 SIGSETOR(dst->sq_ptrace, tmp); 610 SIGSETNAND(src->sq_ptrace, tmp); 611 612 tmp = src->sq_signals; 613 SIGSETAND(tmp, *set); 614 SIGSETOR(dst->sq_signals, tmp); 615 SIGSETNAND(src->sq_signals, tmp); 616 } 617 618 #if 0 619 static void 620 sigqueue_move(sigqueue_t *src, sigqueue_t *dst, int signo) 621 { 622 sigset_t set; 623 624 SIGEMPTYSET(set); 625 SIGADDSET(set, signo); 626 sigqueue_move_set(src, dst, &set); 627 } 628 #endif 629 630 static void 631 sigqueue_delete_set(sigqueue_t *sq, const sigset_t *set) 632 { 633 struct proc *p = sq->sq_proc; 634 ksiginfo_t *ksi, *next; 635 636 KASSERT(sq->sq_flags & SQ_INIT, ("src sigqueue not inited")); 637 638 /* Remove siginfo queue */ 639 TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) { 640 if (SIGISMEMBER(*set, ksi->ksi_signo)) { 641 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link); 642 ksi->ksi_sigq = NULL; 643 if (ksiginfo_tryfree(ksi) && p != NULL) 644 p->p_pendingcnt--; 645 } 646 } 647 SIGSETNAND(sq->sq_kill, *set); 648 SIGSETNAND(sq->sq_ptrace, *set); 649 SIGSETNAND(sq->sq_signals, *set); 650 } 651 652 void 653 sigqueue_delete(sigqueue_t *sq, int signo) 654 { 655 sigset_t set; 656 657 SIGEMPTYSET(set); 658 SIGADDSET(set, signo); 659 sigqueue_delete_set(sq, &set); 660 } 661 662 /* Remove a set of signals for a process */ 663 static void 664 sigqueue_delete_set_proc(struct proc *p, const sigset_t *set) 665 { 666 sigqueue_t worklist; 667 struct thread *td0; 668 669 PROC_LOCK_ASSERT(p, MA_OWNED); 670 671 sigqueue_init(&worklist, NULL); 672 sigqueue_move_set(&p->p_sigqueue, &worklist, set); 673 674 FOREACH_THREAD_IN_PROC(p, td0) 675 sigqueue_move_set(&td0->td_sigqueue, &worklist, set); 676 677 sigqueue_flush(&worklist); 678 } 679 680 void 681 sigqueue_delete_proc(struct proc *p, int signo) 682 { 683 sigset_t set; 684 685 SIGEMPTYSET(set); 686 SIGADDSET(set, signo); 687 sigqueue_delete_set_proc(p, &set); 688 } 689 690 static void 691 sigqueue_delete_stopmask_proc(struct proc *p) 692 { 693 sigset_t set; 694 695 SIGEMPTYSET(set); 696 SIGADDSET(set, SIGSTOP); 697 SIGADDSET(set, SIGTSTP); 698 SIGADDSET(set, SIGTTIN); 699 SIGADDSET(set, SIGTTOU); 700 sigqueue_delete_set_proc(p, &set); 701 } 702 703 /* 704 * Determine signal that should be delivered to thread td, the current 705 * thread, 0 if none. If there is a pending stop signal with default 706 * action, the process stops in issignal(). 707 */ 708 int 709 cursig(struct thread *td) 710 { 711 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 712 mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED); 713 THREAD_LOCK_ASSERT(td, MA_NOTOWNED); 714 return (SIGPENDING(td) ? issignal(td) : 0); 715 } 716 717 /* 718 * Arrange for ast() to handle unmasked pending signals on return to user 719 * mode. This must be called whenever a signal is added to td_sigqueue or 720 * unmasked in td_sigmask. 721 */ 722 void 723 signotify(struct thread *td) 724 { 725 726 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 727 728 if (SIGPENDING(td)) 729 ast_sched(td, TDA_SIG); 730 } 731 732 /* 733 * Returns 1 (true) if altstack is configured for the thread, and the 734 * passed stack bottom address falls into the altstack range. Handles 735 * the 43 compat special case where the alt stack size is zero. 736 */ 737 int 738 sigonstack(size_t sp) 739 { 740 struct thread *td; 741 742 td = curthread; 743 if ((td->td_pflags & TDP_ALTSTACK) == 0) 744 return (0); 745 #if defined(COMPAT_43) 746 if (SV_PROC_FLAG(td->td_proc, SV_AOUT) && td->td_sigstk.ss_size == 0) 747 return ((td->td_sigstk.ss_flags & SS_ONSTACK) != 0); 748 #endif 749 return (sp >= (size_t)td->td_sigstk.ss_sp && 750 sp < td->td_sigstk.ss_size + (size_t)td->td_sigstk.ss_sp); 751 } 752 753 static __inline int 754 sigprop(int sig) 755 { 756 757 if (sig > 0 && sig < nitems(sigproptbl)) 758 return (sigproptbl[sig]); 759 return (0); 760 } 761 762 bool 763 sig_do_core(int sig) 764 { 765 766 return ((sigprop(sig) & SIGPROP_CORE) != 0); 767 } 768 769 static bool 770 sigact_flag_test(const struct sigaction *act, int flag) 771 { 772 773 /* 774 * SA_SIGINFO is reset when signal disposition is set to 775 * ignore or default. Other flags are kept according to user 776 * settings. 777 */ 778 return ((act->sa_flags & flag) != 0 && (flag != SA_SIGINFO || 779 ((__sighandler_t *)act->sa_sigaction != SIG_IGN && 780 (__sighandler_t *)act->sa_sigaction != SIG_DFL))); 781 } 782 783 /* 784 * kern_sigaction 785 * sigaction 786 * freebsd4_sigaction 787 * osigaction 788 */ 789 int 790 kern_sigaction(struct thread *td, int sig, const struct sigaction *act, 791 struct sigaction *oact, int flags) 792 { 793 struct sigacts *ps; 794 struct proc *p = td->td_proc; 795 796 if (!_SIG_VALID(sig)) 797 return (EINVAL); 798 if (act != NULL && act->sa_handler != SIG_DFL && 799 act->sa_handler != SIG_IGN && (act->sa_flags & ~(SA_ONSTACK | 800 SA_RESTART | SA_RESETHAND | SA_NOCLDSTOP | SA_NODEFER | 801 SA_NOCLDWAIT | SA_SIGINFO)) != 0) 802 return (EINVAL); 803 804 PROC_LOCK(p); 805 ps = p->p_sigacts; 806 mtx_lock(&ps->ps_mtx); 807 if (oact) { 808 memset(oact, 0, sizeof(*oact)); 809 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)]; 810 if (SIGISMEMBER(ps->ps_sigonstack, sig)) 811 oact->sa_flags |= SA_ONSTACK; 812 if (!SIGISMEMBER(ps->ps_sigintr, sig)) 813 oact->sa_flags |= SA_RESTART; 814 if (SIGISMEMBER(ps->ps_sigreset, sig)) 815 oact->sa_flags |= SA_RESETHAND; 816 if (SIGISMEMBER(ps->ps_signodefer, sig)) 817 oact->sa_flags |= SA_NODEFER; 818 if (SIGISMEMBER(ps->ps_siginfo, sig)) { 819 oact->sa_flags |= SA_SIGINFO; 820 oact->sa_sigaction = 821 (__siginfohandler_t *)ps->ps_sigact[_SIG_IDX(sig)]; 822 } else 823 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)]; 824 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP) 825 oact->sa_flags |= SA_NOCLDSTOP; 826 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT) 827 oact->sa_flags |= SA_NOCLDWAIT; 828 } 829 if (act) { 830 if ((sig == SIGKILL || sig == SIGSTOP) && 831 act->sa_handler != SIG_DFL) { 832 mtx_unlock(&ps->ps_mtx); 833 PROC_UNLOCK(p); 834 return (EINVAL); 835 } 836 837 /* 838 * Change setting atomically. 839 */ 840 841 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask; 842 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]); 843 if (sigact_flag_test(act, SA_SIGINFO)) { 844 ps->ps_sigact[_SIG_IDX(sig)] = 845 (__sighandler_t *)act->sa_sigaction; 846 SIGADDSET(ps->ps_siginfo, sig); 847 } else { 848 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler; 849 SIGDELSET(ps->ps_siginfo, sig); 850 } 851 if (!sigact_flag_test(act, SA_RESTART)) 852 SIGADDSET(ps->ps_sigintr, sig); 853 else 854 SIGDELSET(ps->ps_sigintr, sig); 855 if (sigact_flag_test(act, SA_ONSTACK)) 856 SIGADDSET(ps->ps_sigonstack, sig); 857 else 858 SIGDELSET(ps->ps_sigonstack, sig); 859 if (sigact_flag_test(act, SA_RESETHAND)) 860 SIGADDSET(ps->ps_sigreset, sig); 861 else 862 SIGDELSET(ps->ps_sigreset, sig); 863 if (sigact_flag_test(act, SA_NODEFER)) 864 SIGADDSET(ps->ps_signodefer, sig); 865 else 866 SIGDELSET(ps->ps_signodefer, sig); 867 if (sig == SIGCHLD) { 868 if (act->sa_flags & SA_NOCLDSTOP) 869 ps->ps_flag |= PS_NOCLDSTOP; 870 else 871 ps->ps_flag &= ~PS_NOCLDSTOP; 872 if (act->sa_flags & SA_NOCLDWAIT) { 873 /* 874 * Paranoia: since SA_NOCLDWAIT is implemented 875 * by reparenting the dying child to PID 1 (and 876 * trust it to reap the zombie), PID 1 itself 877 * is forbidden to set SA_NOCLDWAIT. 878 */ 879 if (p->p_pid == 1) 880 ps->ps_flag &= ~PS_NOCLDWAIT; 881 else 882 ps->ps_flag |= PS_NOCLDWAIT; 883 } else 884 ps->ps_flag &= ~PS_NOCLDWAIT; 885 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN) 886 ps->ps_flag |= PS_CLDSIGIGN; 887 else 888 ps->ps_flag &= ~PS_CLDSIGIGN; 889 } 890 /* 891 * Set bit in ps_sigignore for signals that are set to SIG_IGN, 892 * and for signals set to SIG_DFL where the default is to 893 * ignore. However, don't put SIGCONT in ps_sigignore, as we 894 * have to restart the process. 895 */ 896 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || 897 (sigprop(sig) & SIGPROP_IGNORE && 898 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) { 899 /* never to be seen again */ 900 sigqueue_delete_proc(p, sig); 901 if (sig != SIGCONT) 902 /* easier in psignal */ 903 SIGADDSET(ps->ps_sigignore, sig); 904 SIGDELSET(ps->ps_sigcatch, sig); 905 } else { 906 SIGDELSET(ps->ps_sigignore, sig); 907 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL) 908 SIGDELSET(ps->ps_sigcatch, sig); 909 else 910 SIGADDSET(ps->ps_sigcatch, sig); 911 } 912 #ifdef COMPAT_FREEBSD4 913 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || 914 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL || 915 (flags & KSA_FREEBSD4) == 0) 916 SIGDELSET(ps->ps_freebsd4, sig); 917 else 918 SIGADDSET(ps->ps_freebsd4, sig); 919 #endif 920 #ifdef COMPAT_43 921 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || 922 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL || 923 (flags & KSA_OSIGSET) == 0) 924 SIGDELSET(ps->ps_osigset, sig); 925 else 926 SIGADDSET(ps->ps_osigset, sig); 927 #endif 928 } 929 mtx_unlock(&ps->ps_mtx); 930 PROC_UNLOCK(p); 931 return (0); 932 } 933 934 #ifndef _SYS_SYSPROTO_H_ 935 struct sigaction_args { 936 int sig; 937 struct sigaction *act; 938 struct sigaction *oact; 939 }; 940 #endif 941 int 942 sys_sigaction(struct thread *td, struct sigaction_args *uap) 943 { 944 struct sigaction act, oact; 945 struct sigaction *actp, *oactp; 946 int error; 947 948 actp = (uap->act != NULL) ? &act : NULL; 949 oactp = (uap->oact != NULL) ? &oact : NULL; 950 if (actp) { 951 error = copyin(uap->act, actp, sizeof(act)); 952 if (error) 953 return (error); 954 } 955 error = kern_sigaction(td, uap->sig, actp, oactp, 0); 956 if (oactp && !error) 957 error = copyout(oactp, uap->oact, sizeof(oact)); 958 return (error); 959 } 960 961 #ifdef COMPAT_FREEBSD4 962 #ifndef _SYS_SYSPROTO_H_ 963 struct freebsd4_sigaction_args { 964 int sig; 965 struct sigaction *act; 966 struct sigaction *oact; 967 }; 968 #endif 969 int 970 freebsd4_sigaction(struct thread *td, struct freebsd4_sigaction_args *uap) 971 { 972 struct sigaction act, oact; 973 struct sigaction *actp, *oactp; 974 int error; 975 976 actp = (uap->act != NULL) ? &act : NULL; 977 oactp = (uap->oact != NULL) ? &oact : NULL; 978 if (actp) { 979 error = copyin(uap->act, actp, sizeof(act)); 980 if (error) 981 return (error); 982 } 983 error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4); 984 if (oactp && !error) 985 error = copyout(oactp, uap->oact, sizeof(oact)); 986 return (error); 987 } 988 #endif /* COMAPT_FREEBSD4 */ 989 990 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 991 #ifndef _SYS_SYSPROTO_H_ 992 struct osigaction_args { 993 int signum; 994 struct osigaction *nsa; 995 struct osigaction *osa; 996 }; 997 #endif 998 int 999 osigaction(struct thread *td, struct osigaction_args *uap) 1000 { 1001 struct osigaction sa; 1002 struct sigaction nsa, osa; 1003 struct sigaction *nsap, *osap; 1004 int error; 1005 1006 if (uap->signum <= 0 || uap->signum >= ONSIG) 1007 return (EINVAL); 1008 1009 nsap = (uap->nsa != NULL) ? &nsa : NULL; 1010 osap = (uap->osa != NULL) ? &osa : NULL; 1011 1012 if (nsap) { 1013 error = copyin(uap->nsa, &sa, sizeof(sa)); 1014 if (error) 1015 return (error); 1016 nsap->sa_handler = sa.sa_handler; 1017 nsap->sa_flags = sa.sa_flags; 1018 OSIG2SIG(sa.sa_mask, nsap->sa_mask); 1019 } 1020 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET); 1021 if (osap && !error) { 1022 sa.sa_handler = osap->sa_handler; 1023 sa.sa_flags = osap->sa_flags; 1024 SIG2OSIG(osap->sa_mask, sa.sa_mask); 1025 error = copyout(&sa, uap->osa, sizeof(sa)); 1026 } 1027 return (error); 1028 } 1029 1030 #if !defined(__i386__) 1031 /* Avoid replicating the same stub everywhere */ 1032 int 1033 osigreturn(struct thread *td, struct osigreturn_args *uap) 1034 { 1035 return (kern_nosys(td, 0)); 1036 } 1037 #endif 1038 #endif /* COMPAT_43 */ 1039 1040 /* 1041 * Initialize signal state for process 0; 1042 * set to ignore signals that are ignored by default. 1043 */ 1044 void 1045 siginit(struct proc *p) 1046 { 1047 int i; 1048 struct sigacts *ps; 1049 1050 PROC_LOCK(p); 1051 ps = p->p_sigacts; 1052 mtx_lock(&ps->ps_mtx); 1053 for (i = 1; i <= NSIG; i++) { 1054 if (sigprop(i) & SIGPROP_IGNORE && i != SIGCONT) { 1055 SIGADDSET(ps->ps_sigignore, i); 1056 } 1057 } 1058 mtx_unlock(&ps->ps_mtx); 1059 PROC_UNLOCK(p); 1060 } 1061 1062 /* 1063 * Reset specified signal to the default disposition. 1064 */ 1065 static void 1066 sigdflt(struct sigacts *ps, int sig) 1067 { 1068 1069 mtx_assert(&ps->ps_mtx, MA_OWNED); 1070 SIGDELSET(ps->ps_sigcatch, sig); 1071 if ((sigprop(sig) & SIGPROP_IGNORE) != 0 && sig != SIGCONT) 1072 SIGADDSET(ps->ps_sigignore, sig); 1073 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 1074 SIGDELSET(ps->ps_siginfo, sig); 1075 } 1076 1077 /* 1078 * Reset signals for an exec of the specified process. 1079 */ 1080 void 1081 execsigs(struct proc *p) 1082 { 1083 struct sigacts *ps; 1084 struct thread *td; 1085 1086 /* 1087 * Reset caught signals. Held signals remain held 1088 * through td_sigmask (unless they were caught, 1089 * and are now ignored by default). 1090 */ 1091 PROC_LOCK_ASSERT(p, MA_OWNED); 1092 ps = p->p_sigacts; 1093 mtx_lock(&ps->ps_mtx); 1094 sig_drop_caught(p); 1095 1096 /* 1097 * Reset stack state to the user stack. 1098 * Clear set of signals caught on the signal stack. 1099 */ 1100 td = curthread; 1101 MPASS(td->td_proc == p); 1102 td->td_sigstk.ss_flags = SS_DISABLE; 1103 td->td_sigstk.ss_size = 0; 1104 td->td_sigstk.ss_sp = 0; 1105 td->td_pflags &= ~TDP_ALTSTACK; 1106 /* 1107 * Reset no zombies if child dies flag as Solaris does. 1108 */ 1109 ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN); 1110 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN) 1111 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL; 1112 mtx_unlock(&ps->ps_mtx); 1113 } 1114 1115 /* 1116 * kern_sigprocmask() 1117 * 1118 * Manipulate signal mask. 1119 */ 1120 int 1121 kern_sigprocmask(struct thread *td, int how, sigset_t *set, sigset_t *oset, 1122 int flags) 1123 { 1124 sigset_t new_block, oset1; 1125 struct proc *p; 1126 int error; 1127 1128 p = td->td_proc; 1129 if ((flags & SIGPROCMASK_PROC_LOCKED) != 0) 1130 PROC_LOCK_ASSERT(p, MA_OWNED); 1131 else 1132 PROC_LOCK(p); 1133 mtx_assert(&p->p_sigacts->ps_mtx, (flags & SIGPROCMASK_PS_LOCKED) != 0 1134 ? MA_OWNED : MA_NOTOWNED); 1135 if (oset != NULL) 1136 *oset = td->td_sigmask; 1137 1138 error = 0; 1139 if (set != NULL) { 1140 switch (how) { 1141 case SIG_BLOCK: 1142 SIG_CANTMASK(*set); 1143 oset1 = td->td_sigmask; 1144 SIGSETOR(td->td_sigmask, *set); 1145 new_block = td->td_sigmask; 1146 SIGSETNAND(new_block, oset1); 1147 break; 1148 case SIG_UNBLOCK: 1149 SIGSETNAND(td->td_sigmask, *set); 1150 signotify(td); 1151 goto out; 1152 case SIG_SETMASK: 1153 SIG_CANTMASK(*set); 1154 oset1 = td->td_sigmask; 1155 if (flags & SIGPROCMASK_OLD) 1156 SIGSETLO(td->td_sigmask, *set); 1157 else 1158 td->td_sigmask = *set; 1159 new_block = td->td_sigmask; 1160 SIGSETNAND(new_block, oset1); 1161 signotify(td); 1162 break; 1163 default: 1164 error = EINVAL; 1165 goto out; 1166 } 1167 1168 /* 1169 * The new_block set contains signals that were not previously 1170 * blocked, but are blocked now. 1171 * 1172 * In case we block any signal that was not previously blocked 1173 * for td, and process has the signal pending, try to schedule 1174 * signal delivery to some thread that does not block the 1175 * signal, possibly waking it up. 1176 */ 1177 if (p->p_numthreads != 1) 1178 reschedule_signals(p, new_block, flags); 1179 } 1180 1181 out: 1182 if (!(flags & SIGPROCMASK_PROC_LOCKED)) 1183 PROC_UNLOCK(p); 1184 return (error); 1185 } 1186 1187 #ifndef _SYS_SYSPROTO_H_ 1188 struct sigprocmask_args { 1189 int how; 1190 const sigset_t *set; 1191 sigset_t *oset; 1192 }; 1193 #endif 1194 int 1195 sys_sigprocmask(struct thread *td, struct sigprocmask_args *uap) 1196 { 1197 sigset_t set, oset; 1198 sigset_t *setp, *osetp; 1199 int error; 1200 1201 setp = (uap->set != NULL) ? &set : NULL; 1202 osetp = (uap->oset != NULL) ? &oset : NULL; 1203 if (setp) { 1204 error = copyin(uap->set, setp, sizeof(set)); 1205 if (error) 1206 return (error); 1207 } 1208 error = kern_sigprocmask(td, uap->how, setp, osetp, 0); 1209 if (osetp && !error) { 1210 error = copyout(osetp, uap->oset, sizeof(oset)); 1211 } 1212 return (error); 1213 } 1214 1215 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 1216 #ifndef _SYS_SYSPROTO_H_ 1217 struct osigprocmask_args { 1218 int how; 1219 osigset_t mask; 1220 }; 1221 #endif 1222 int 1223 osigprocmask(struct thread *td, struct osigprocmask_args *uap) 1224 { 1225 sigset_t set, oset; 1226 int error; 1227 1228 OSIG2SIG(uap->mask, set); 1229 error = kern_sigprocmask(td, uap->how, &set, &oset, 1); 1230 SIG2OSIG(oset, td->td_retval[0]); 1231 return (error); 1232 } 1233 #endif /* COMPAT_43 */ 1234 1235 int 1236 sys_sigwait(struct thread *td, struct sigwait_args *uap) 1237 { 1238 ksiginfo_t ksi; 1239 sigset_t set; 1240 int error; 1241 1242 error = copyin(uap->set, &set, sizeof(set)); 1243 if (error) { 1244 td->td_retval[0] = error; 1245 return (0); 1246 } 1247 1248 error = kern_sigtimedwait(td, set, &ksi, NULL); 1249 if (error) { 1250 /* 1251 * sigwait() function shall not return EINTR, but 1252 * the syscall does. Non-ancient libc provides the 1253 * wrapper which hides EINTR. Otherwise, EINTR return 1254 * is used by libthr to handle required cancellation 1255 * point in the sigwait(). 1256 */ 1257 if (error == EINTR && td->td_proc->p_osrel < P_OSREL_SIGWAIT) 1258 return (ERESTART); 1259 td->td_retval[0] = error; 1260 return (0); 1261 } 1262 1263 error = copyout(&ksi.ksi_signo, uap->sig, sizeof(ksi.ksi_signo)); 1264 td->td_retval[0] = error; 1265 return (0); 1266 } 1267 1268 int 1269 sys_sigtimedwait(struct thread *td, struct sigtimedwait_args *uap) 1270 { 1271 struct timespec ts; 1272 struct timespec *timeout; 1273 sigset_t set; 1274 ksiginfo_t ksi; 1275 int error; 1276 1277 if (uap->timeout) { 1278 error = copyin(uap->timeout, &ts, sizeof(ts)); 1279 if (error) 1280 return (error); 1281 1282 timeout = &ts; 1283 } else 1284 timeout = NULL; 1285 1286 error = copyin(uap->set, &set, sizeof(set)); 1287 if (error) 1288 return (error); 1289 1290 error = kern_sigtimedwait(td, set, &ksi, timeout); 1291 if (error) 1292 return (error); 1293 1294 if (uap->info) 1295 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t)); 1296 1297 if (error == 0) 1298 td->td_retval[0] = ksi.ksi_signo; 1299 return (error); 1300 } 1301 1302 int 1303 sys_sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap) 1304 { 1305 ksiginfo_t ksi; 1306 sigset_t set; 1307 int error; 1308 1309 error = copyin(uap->set, &set, sizeof(set)); 1310 if (error) 1311 return (error); 1312 1313 error = kern_sigtimedwait(td, set, &ksi, NULL); 1314 if (error) 1315 return (error); 1316 1317 if (uap->info) 1318 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t)); 1319 1320 if (error == 0) 1321 td->td_retval[0] = ksi.ksi_signo; 1322 return (error); 1323 } 1324 1325 static void 1326 proc_td_siginfo_capture(struct thread *td, siginfo_t *si) 1327 { 1328 struct thread *thr; 1329 1330 FOREACH_THREAD_IN_PROC(td->td_proc, thr) { 1331 if (thr == td) 1332 thr->td_si = *si; 1333 else 1334 thr->td_si.si_signo = 0; 1335 } 1336 } 1337 1338 int 1339 kern_sigtimedwait(struct thread *td, sigset_t waitset, ksiginfo_t *ksi, 1340 struct timespec *timeout) 1341 { 1342 struct sigacts *ps; 1343 sigset_t saved_mask, new_block; 1344 struct proc *p; 1345 int error, sig, timevalid = 0; 1346 sbintime_t sbt, precision, tsbt; 1347 struct timespec ts; 1348 bool traced; 1349 1350 p = td->td_proc; 1351 error = 0; 1352 traced = false; 1353 1354 /* Ensure the sigfastblock value is up to date. */ 1355 sigfastblock_fetch(td); 1356 1357 if (timeout != NULL) { 1358 if (timeout->tv_nsec >= 0 && timeout->tv_nsec < 1000000000) { 1359 timevalid = 1; 1360 ts = *timeout; 1361 if (ts.tv_sec < INT32_MAX / 2) { 1362 tsbt = tstosbt(ts); 1363 precision = tsbt; 1364 precision >>= tc_precexp; 1365 if (TIMESEL(&sbt, tsbt)) 1366 sbt += tc_tick_sbt; 1367 sbt += tsbt; 1368 } else 1369 precision = sbt = 0; 1370 } 1371 } else 1372 precision = sbt = 0; 1373 ksiginfo_init(ksi); 1374 /* Some signals can not be waited for. */ 1375 SIG_CANTMASK(waitset); 1376 ps = p->p_sigacts; 1377 PROC_LOCK(p); 1378 saved_mask = td->td_sigmask; 1379 SIGSETNAND(td->td_sigmask, waitset); 1380 if ((p->p_sysent->sv_flags & SV_SIG_DISCIGN) != 0 || 1381 !kern_sig_discard_ign) { 1382 thread_lock(td); 1383 td->td_flags |= TDF_SIGWAIT; 1384 thread_unlock(td); 1385 } 1386 for (;;) { 1387 mtx_lock(&ps->ps_mtx); 1388 sig = cursig(td); 1389 mtx_unlock(&ps->ps_mtx); 1390 KASSERT(sig >= 0, ("sig %d", sig)); 1391 if (sig != 0 && SIGISMEMBER(waitset, sig)) { 1392 if (sigqueue_get(&td->td_sigqueue, sig, ksi) != 0 || 1393 sigqueue_get(&p->p_sigqueue, sig, ksi) != 0) { 1394 error = 0; 1395 break; 1396 } 1397 } 1398 1399 if (error != 0) 1400 break; 1401 1402 /* 1403 * POSIX says this must be checked after looking for pending 1404 * signals. 1405 */ 1406 if (timeout != NULL && !timevalid) { 1407 error = EINVAL; 1408 break; 1409 } 1410 1411 if (traced) { 1412 error = EINTR; 1413 break; 1414 } 1415 1416 error = msleep_sbt(&p->p_sigacts, &p->p_mtx, PPAUSE | PCATCH, 1417 "sigwait", sbt, precision, C_ABSOLUTE); 1418 1419 /* The syscalls can not be restarted. */ 1420 if (error == ERESTART) 1421 error = EINTR; 1422 1423 /* 1424 * If PTRACE_SCE or PTRACE_SCX were set after 1425 * userspace entered the syscall, return spurious 1426 * EINTR after wait was done. Only do this as last 1427 * resort after rechecking for possible queued signals 1428 * and expired timeouts. 1429 */ 1430 if (error == 0 && (p->p_ptevents & PTRACE_SYSCALL) != 0) 1431 traced = true; 1432 } 1433 thread_lock(td); 1434 td->td_flags &= ~TDF_SIGWAIT; 1435 thread_unlock(td); 1436 1437 new_block = saved_mask; 1438 SIGSETNAND(new_block, td->td_sigmask); 1439 td->td_sigmask = saved_mask; 1440 /* 1441 * Fewer signals can be delivered to us, reschedule signal 1442 * notification. 1443 */ 1444 if (p->p_numthreads != 1) 1445 reschedule_signals(p, new_block, 0); 1446 1447 if (error == 0) { 1448 SDT_PROBE2(proc, , , signal__clear, sig, ksi); 1449 1450 if (ksi->ksi_code == SI_TIMER) 1451 itimer_accept(p, ksi->ksi_timerid, ksi); 1452 1453 #ifdef KTRACE 1454 if (KTRPOINT(td, KTR_PSIG)) { 1455 sig_t action; 1456 1457 mtx_lock(&ps->ps_mtx); 1458 action = ps->ps_sigact[_SIG_IDX(sig)]; 1459 mtx_unlock(&ps->ps_mtx); 1460 ktrpsig(sig, action, &td->td_sigmask, ksi->ksi_code); 1461 } 1462 #endif 1463 if (sig == SIGKILL) { 1464 proc_td_siginfo_capture(td, &ksi->ksi_info); 1465 sigexit(td, sig); 1466 } 1467 } 1468 PROC_UNLOCK(p); 1469 return (error); 1470 } 1471 1472 #ifndef _SYS_SYSPROTO_H_ 1473 struct sigpending_args { 1474 sigset_t *set; 1475 }; 1476 #endif 1477 int 1478 sys_sigpending(struct thread *td, struct sigpending_args *uap) 1479 { 1480 struct proc *p = td->td_proc; 1481 sigset_t pending; 1482 1483 PROC_LOCK(p); 1484 pending = p->p_sigqueue.sq_signals; 1485 SIGSETOR(pending, td->td_sigqueue.sq_signals); 1486 PROC_UNLOCK(p); 1487 return (copyout(&pending, uap->set, sizeof(sigset_t))); 1488 } 1489 1490 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 1491 #ifndef _SYS_SYSPROTO_H_ 1492 struct osigpending_args { 1493 int dummy; 1494 }; 1495 #endif 1496 int 1497 osigpending(struct thread *td, struct osigpending_args *uap) 1498 { 1499 struct proc *p = td->td_proc; 1500 sigset_t pending; 1501 1502 PROC_LOCK(p); 1503 pending = p->p_sigqueue.sq_signals; 1504 SIGSETOR(pending, td->td_sigqueue.sq_signals); 1505 PROC_UNLOCK(p); 1506 SIG2OSIG(pending, td->td_retval[0]); 1507 return (0); 1508 } 1509 #endif /* COMPAT_43 */ 1510 1511 #if defined(COMPAT_43) 1512 /* 1513 * Generalized interface signal handler, 4.3-compatible. 1514 */ 1515 #ifndef _SYS_SYSPROTO_H_ 1516 struct osigvec_args { 1517 int signum; 1518 struct sigvec *nsv; 1519 struct sigvec *osv; 1520 }; 1521 #endif 1522 /* ARGSUSED */ 1523 int 1524 osigvec(struct thread *td, struct osigvec_args *uap) 1525 { 1526 struct sigvec vec; 1527 struct sigaction nsa, osa; 1528 struct sigaction *nsap, *osap; 1529 int error; 1530 1531 if (uap->signum <= 0 || uap->signum >= ONSIG) 1532 return (EINVAL); 1533 nsap = (uap->nsv != NULL) ? &nsa : NULL; 1534 osap = (uap->osv != NULL) ? &osa : NULL; 1535 if (nsap) { 1536 error = copyin(uap->nsv, &vec, sizeof(vec)); 1537 if (error) 1538 return (error); 1539 nsap->sa_handler = vec.sv_handler; 1540 OSIG2SIG(vec.sv_mask, nsap->sa_mask); 1541 nsap->sa_flags = vec.sv_flags; 1542 nsap->sa_flags ^= SA_RESTART; /* opposite of SV_INTERRUPT */ 1543 } 1544 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET); 1545 if (osap && !error) { 1546 vec.sv_handler = osap->sa_handler; 1547 SIG2OSIG(osap->sa_mask, vec.sv_mask); 1548 vec.sv_flags = osap->sa_flags; 1549 vec.sv_flags &= ~SA_NOCLDWAIT; 1550 vec.sv_flags ^= SA_RESTART; 1551 error = copyout(&vec, uap->osv, sizeof(vec)); 1552 } 1553 return (error); 1554 } 1555 1556 #ifndef _SYS_SYSPROTO_H_ 1557 struct osigblock_args { 1558 int mask; 1559 }; 1560 #endif 1561 int 1562 osigblock(struct thread *td, struct osigblock_args *uap) 1563 { 1564 sigset_t set, oset; 1565 1566 OSIG2SIG(uap->mask, set); 1567 kern_sigprocmask(td, SIG_BLOCK, &set, &oset, 0); 1568 SIG2OSIG(oset, td->td_retval[0]); 1569 return (0); 1570 } 1571 1572 #ifndef _SYS_SYSPROTO_H_ 1573 struct osigsetmask_args { 1574 int mask; 1575 }; 1576 #endif 1577 int 1578 osigsetmask(struct thread *td, struct osigsetmask_args *uap) 1579 { 1580 sigset_t set, oset; 1581 1582 OSIG2SIG(uap->mask, set); 1583 kern_sigprocmask(td, SIG_SETMASK, &set, &oset, 0); 1584 SIG2OSIG(oset, td->td_retval[0]); 1585 return (0); 1586 } 1587 #endif /* COMPAT_43 */ 1588 1589 /* 1590 * Suspend calling thread until signal, providing mask to be set in the 1591 * meantime. 1592 */ 1593 #ifndef _SYS_SYSPROTO_H_ 1594 struct sigsuspend_args { 1595 const sigset_t *sigmask; 1596 }; 1597 #endif 1598 /* ARGSUSED */ 1599 int 1600 sys_sigsuspend(struct thread *td, struct sigsuspend_args *uap) 1601 { 1602 sigset_t mask; 1603 int error; 1604 1605 error = copyin(uap->sigmask, &mask, sizeof(mask)); 1606 if (error) 1607 return (error); 1608 return (kern_sigsuspend(td, mask)); 1609 } 1610 1611 int 1612 kern_sigsuspend(struct thread *td, sigset_t mask) 1613 { 1614 struct proc *p = td->td_proc; 1615 int has_sig, sig; 1616 1617 /* Ensure the sigfastblock value is up to date. */ 1618 sigfastblock_fetch(td); 1619 1620 /* 1621 * When returning from sigsuspend, we want 1622 * the old mask to be restored after the 1623 * signal handler has finished. Thus, we 1624 * save it here and mark the sigacts structure 1625 * to indicate this. 1626 */ 1627 PROC_LOCK(p); 1628 kern_sigprocmask(td, SIG_SETMASK, &mask, &td->td_oldsigmask, 1629 SIGPROCMASK_PROC_LOCKED); 1630 td->td_pflags |= TDP_OLDMASK; 1631 ast_sched(td, TDA_SIGSUSPEND); 1632 1633 /* 1634 * Process signals now. Otherwise, we can get spurious wakeup 1635 * due to signal entered process queue, but delivered to other 1636 * thread. But sigsuspend should return only on signal 1637 * delivery. 1638 */ 1639 (p->p_sysent->sv_set_syscall_retval)(td, EINTR); 1640 for (has_sig = 0; !has_sig;) { 1641 while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE | PCATCH, 1642 "sigsusp", 0) == 0) 1643 /* void */; 1644 thread_suspend_check(0); 1645 mtx_lock(&p->p_sigacts->ps_mtx); 1646 while ((sig = cursig(td)) != 0) { 1647 KASSERT(sig >= 0, ("sig %d", sig)); 1648 has_sig += postsig(sig); 1649 } 1650 mtx_unlock(&p->p_sigacts->ps_mtx); 1651 1652 /* 1653 * If PTRACE_SCE or PTRACE_SCX were set after 1654 * userspace entered the syscall, return spurious 1655 * EINTR. 1656 */ 1657 if ((p->p_ptevents & PTRACE_SYSCALL) != 0) 1658 has_sig += 1; 1659 } 1660 PROC_UNLOCK(p); 1661 td->td_errno = EINTR; 1662 td->td_pflags |= TDP_NERRNO; 1663 return (EJUSTRETURN); 1664 } 1665 1666 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 1667 /* 1668 * Compatibility sigsuspend call for old binaries. Note nonstandard calling 1669 * convention: libc stub passes mask, not pointer, to save a copyin. 1670 */ 1671 #ifndef _SYS_SYSPROTO_H_ 1672 struct osigsuspend_args { 1673 osigset_t mask; 1674 }; 1675 #endif 1676 /* ARGSUSED */ 1677 int 1678 osigsuspend(struct thread *td, struct osigsuspend_args *uap) 1679 { 1680 sigset_t mask; 1681 1682 OSIG2SIG(uap->mask, mask); 1683 return (kern_sigsuspend(td, mask)); 1684 } 1685 #endif /* COMPAT_43 */ 1686 1687 #if defined(COMPAT_43) 1688 #ifndef _SYS_SYSPROTO_H_ 1689 struct osigstack_args { 1690 struct sigstack *nss; 1691 struct sigstack *oss; 1692 }; 1693 #endif 1694 /* ARGSUSED */ 1695 int 1696 osigstack(struct thread *td, struct osigstack_args *uap) 1697 { 1698 struct sigstack nss, oss; 1699 int error = 0; 1700 1701 if (uap->nss != NULL) { 1702 error = copyin(uap->nss, &nss, sizeof(nss)); 1703 if (error) 1704 return (error); 1705 } 1706 oss.ss_sp = td->td_sigstk.ss_sp; 1707 oss.ss_onstack = sigonstack(cpu_getstack(td)); 1708 if (uap->nss != NULL) { 1709 td->td_sigstk.ss_sp = nss.ss_sp; 1710 td->td_sigstk.ss_size = 0; 1711 td->td_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK; 1712 td->td_pflags |= TDP_ALTSTACK; 1713 } 1714 if (uap->oss != NULL) 1715 error = copyout(&oss, uap->oss, sizeof(oss)); 1716 1717 return (error); 1718 } 1719 #endif /* COMPAT_43 */ 1720 1721 #ifndef _SYS_SYSPROTO_H_ 1722 struct sigaltstack_args { 1723 stack_t *ss; 1724 stack_t *oss; 1725 }; 1726 #endif 1727 /* ARGSUSED */ 1728 int 1729 sys_sigaltstack(struct thread *td, struct sigaltstack_args *uap) 1730 { 1731 stack_t ss, oss; 1732 int error; 1733 1734 if (uap->ss != NULL) { 1735 error = copyin(uap->ss, &ss, sizeof(ss)); 1736 if (error) 1737 return (error); 1738 } 1739 error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL, 1740 (uap->oss != NULL) ? &oss : NULL); 1741 if (error) 1742 return (error); 1743 if (uap->oss != NULL) 1744 error = copyout(&oss, uap->oss, sizeof(stack_t)); 1745 return (error); 1746 } 1747 1748 int 1749 kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss) 1750 { 1751 struct proc *p = td->td_proc; 1752 int oonstack; 1753 1754 oonstack = sigonstack(cpu_getstack(td)); 1755 1756 if (oss != NULL) { 1757 *oss = td->td_sigstk; 1758 oss->ss_flags = (td->td_pflags & TDP_ALTSTACK) 1759 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 1760 } 1761 1762 if (ss != NULL) { 1763 if (oonstack) 1764 return (EPERM); 1765 if ((ss->ss_flags & ~SS_DISABLE) != 0) 1766 return (EINVAL); 1767 if (!(ss->ss_flags & SS_DISABLE)) { 1768 if (ss->ss_size < p->p_sysent->sv_minsigstksz) 1769 return (ENOMEM); 1770 1771 td->td_sigstk = *ss; 1772 td->td_pflags |= TDP_ALTSTACK; 1773 } else { 1774 td->td_pflags &= ~TDP_ALTSTACK; 1775 } 1776 } 1777 return (0); 1778 } 1779 1780 struct killpg1_ctx { 1781 struct thread *td; 1782 ksiginfo_t *ksi; 1783 int sig; 1784 bool sent; 1785 bool found; 1786 int ret; 1787 }; 1788 1789 static void 1790 killpg1_sendsig_locked(struct proc *p, struct killpg1_ctx *arg) 1791 { 1792 int err; 1793 1794 err = p_cansignal(arg->td, p, arg->sig); 1795 if (err == 0 && arg->sig != 0) 1796 pksignal(p, arg->sig, arg->ksi); 1797 if (err != ESRCH) 1798 arg->found = true; 1799 if (err == 0) 1800 arg->sent = true; 1801 else if (arg->ret == 0 && err != ESRCH && err != EPERM) 1802 arg->ret = err; 1803 } 1804 1805 static void 1806 killpg1_sendsig(struct proc *p, bool notself, struct killpg1_ctx *arg) 1807 { 1808 1809 if (p->p_pid <= 1 || (p->p_flag & P_SYSTEM) != 0 || 1810 (notself && p == arg->td->td_proc) || p->p_state == PRS_NEW) 1811 return; 1812 1813 PROC_LOCK(p); 1814 killpg1_sendsig_locked(p, arg); 1815 PROC_UNLOCK(p); 1816 } 1817 1818 static void 1819 kill_processes_prison_cb(struct proc *p, void *arg) 1820 { 1821 struct killpg1_ctx *ctx = arg; 1822 1823 if (p->p_pid <= 1 || (p->p_flag & P_SYSTEM) != 0 || 1824 (p == ctx->td->td_proc) || p->p_state == PRS_NEW) 1825 return; 1826 1827 killpg1_sendsig_locked(p, ctx); 1828 } 1829 1830 /* 1831 * Common code for kill process group/broadcast kill. 1832 * td is the calling thread, as usual. 1833 */ 1834 static int 1835 killpg1(struct thread *td, int sig, int pgid, int all, ksiginfo_t *ksi) 1836 { 1837 struct proc *p; 1838 struct pgrp *pgrp; 1839 struct killpg1_ctx arg; 1840 1841 arg.td = td; 1842 arg.ksi = ksi; 1843 arg.sig = sig; 1844 arg.sent = false; 1845 arg.found = false; 1846 arg.ret = 0; 1847 if (all) { 1848 /* 1849 * broadcast 1850 */ 1851 prison_proc_iterate(td->td_ucred->cr_prison, 1852 kill_processes_prison_cb, &arg); 1853 } else { 1854 again: 1855 sx_slock(&proctree_lock); 1856 if (pgid == 0) { 1857 /* 1858 * zero pgid means send to my process group. 1859 */ 1860 pgrp = td->td_proc->p_pgrp; 1861 PGRP_LOCK(pgrp); 1862 } else { 1863 pgrp = pgfind(pgid); 1864 if (pgrp == NULL) { 1865 sx_sunlock(&proctree_lock); 1866 return (ESRCH); 1867 } 1868 } 1869 sx_sunlock(&proctree_lock); 1870 if (!sx_try_xlock(&pgrp->pg_killsx)) { 1871 PGRP_UNLOCK(pgrp); 1872 sx_xlock(&pgrp->pg_killsx); 1873 sx_xunlock(&pgrp->pg_killsx); 1874 goto again; 1875 } 1876 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 1877 killpg1_sendsig(p, false, &arg); 1878 } 1879 PGRP_UNLOCK(pgrp); 1880 sx_xunlock(&pgrp->pg_killsx); 1881 } 1882 MPASS(arg.ret != 0 || arg.found || !arg.sent); 1883 if (arg.ret == 0 && !arg.sent) 1884 arg.ret = arg.found ? EPERM : ESRCH; 1885 return (arg.ret); 1886 } 1887 1888 #ifndef _SYS_SYSPROTO_H_ 1889 struct kill_args { 1890 int pid; 1891 int signum; 1892 }; 1893 #endif 1894 /* ARGSUSED */ 1895 int 1896 sys_kill(struct thread *td, struct kill_args *uap) 1897 { 1898 1899 return (kern_kill(td, uap->pid, uap->signum)); 1900 } 1901 1902 int 1903 kern_kill(struct thread *td, pid_t pid, int signum) 1904 { 1905 ksiginfo_t ksi; 1906 struct proc *p; 1907 int error; 1908 1909 /* 1910 * A process in capability mode can send signals only to himself. 1911 * The main rationale behind this is that abort(3) is implemented as 1912 * kill(getpid(), SIGABRT). 1913 */ 1914 if (pid != td->td_proc->p_pid) { 1915 if (CAP_TRACING(td)) 1916 ktrcapfail(CAPFAIL_SIGNAL, &signum); 1917 if (IN_CAPABILITY_MODE(td)) 1918 return (ECAPMODE); 1919 } 1920 1921 AUDIT_ARG_SIGNUM(signum); 1922 AUDIT_ARG_PID(pid); 1923 if ((u_int)signum > _SIG_MAXSIG) 1924 return (EINVAL); 1925 1926 ksiginfo_init(&ksi); 1927 ksi.ksi_signo = signum; 1928 ksi.ksi_code = SI_USER; 1929 ksi.ksi_pid = td->td_proc->p_pid; 1930 ksi.ksi_uid = td->td_ucred->cr_ruid; 1931 1932 if (pid > 0) { 1933 /* kill single process */ 1934 if ((p = pfind_any(pid)) == NULL) 1935 return (ESRCH); 1936 AUDIT_ARG_PROCESS(p); 1937 error = p_cansignal(td, p, signum); 1938 if (error == 0 && signum) 1939 pksignal(p, signum, &ksi); 1940 PROC_UNLOCK(p); 1941 return (error); 1942 } 1943 switch (pid) { 1944 case -1: /* broadcast signal */ 1945 return (killpg1(td, signum, 0, 1, &ksi)); 1946 case 0: /* signal own process group */ 1947 return (killpg1(td, signum, 0, 0, &ksi)); 1948 default: /* negative explicit process group */ 1949 return (killpg1(td, signum, -pid, 0, &ksi)); 1950 } 1951 /* NOTREACHED */ 1952 } 1953 1954 int 1955 sys_pdkill(struct thread *td, struct pdkill_args *uap) 1956 { 1957 struct proc *p; 1958 int error; 1959 1960 AUDIT_ARG_SIGNUM(uap->signum); 1961 AUDIT_ARG_FD(uap->fd); 1962 if ((u_int)uap->signum > _SIG_MAXSIG) 1963 return (EINVAL); 1964 1965 error = procdesc_find(td, uap->fd, &cap_pdkill_rights, &p); 1966 if (error) 1967 return (error); 1968 AUDIT_ARG_PROCESS(p); 1969 error = p_cansignal(td, p, uap->signum); 1970 if (error == 0 && uap->signum) 1971 kern_psignal(p, uap->signum); 1972 PROC_UNLOCK(p); 1973 return (error); 1974 } 1975 1976 #if defined(COMPAT_43) 1977 #ifndef _SYS_SYSPROTO_H_ 1978 struct okillpg_args { 1979 int pgid; 1980 int signum; 1981 }; 1982 #endif 1983 /* ARGSUSED */ 1984 int 1985 okillpg(struct thread *td, struct okillpg_args *uap) 1986 { 1987 ksiginfo_t ksi; 1988 1989 AUDIT_ARG_SIGNUM(uap->signum); 1990 AUDIT_ARG_PID(uap->pgid); 1991 if ((u_int)uap->signum > _SIG_MAXSIG) 1992 return (EINVAL); 1993 1994 ksiginfo_init(&ksi); 1995 ksi.ksi_signo = uap->signum; 1996 ksi.ksi_code = SI_USER; 1997 ksi.ksi_pid = td->td_proc->p_pid; 1998 ksi.ksi_uid = td->td_ucred->cr_ruid; 1999 return (killpg1(td, uap->signum, uap->pgid, 0, &ksi)); 2000 } 2001 #endif /* COMPAT_43 */ 2002 2003 #ifndef _SYS_SYSPROTO_H_ 2004 struct sigqueue_args { 2005 pid_t pid; 2006 int signum; 2007 /* union sigval */ void *value; 2008 }; 2009 #endif 2010 int 2011 sys_sigqueue(struct thread *td, struct sigqueue_args *uap) 2012 { 2013 union sigval sv; 2014 2015 sv.sival_ptr = uap->value; 2016 2017 return (kern_sigqueue(td, uap->pid, uap->signum, &sv)); 2018 } 2019 2020 int 2021 kern_sigqueue(struct thread *td, pid_t pid, int signumf, union sigval *value) 2022 { 2023 ksiginfo_t ksi; 2024 struct proc *p; 2025 struct thread *td2; 2026 u_int signum; 2027 int error; 2028 2029 signum = signumf & ~__SIGQUEUE_TID; 2030 if (signum > _SIG_MAXSIG) 2031 return (EINVAL); 2032 2033 /* 2034 * Specification says sigqueue can only send signal to 2035 * single process. 2036 */ 2037 if (pid <= 0) 2038 return (EINVAL); 2039 2040 if ((signumf & __SIGQUEUE_TID) == 0) { 2041 if ((p = pfind_any(pid)) == NULL) 2042 return (ESRCH); 2043 td2 = NULL; 2044 } else { 2045 p = td->td_proc; 2046 td2 = tdfind((lwpid_t)pid, p->p_pid); 2047 if (td2 == NULL) 2048 return (ESRCH); 2049 } 2050 2051 error = p_cansignal(td, p, signum); 2052 if (error == 0 && signum != 0) { 2053 ksiginfo_init(&ksi); 2054 ksi.ksi_flags = KSI_SIGQ; 2055 ksi.ksi_signo = signum; 2056 ksi.ksi_code = SI_QUEUE; 2057 ksi.ksi_pid = td->td_proc->p_pid; 2058 ksi.ksi_uid = td->td_ucred->cr_ruid; 2059 ksi.ksi_value = *value; 2060 error = tdsendsignal(p, td2, ksi.ksi_signo, &ksi); 2061 } 2062 PROC_UNLOCK(p); 2063 return (error); 2064 } 2065 2066 /* 2067 * Send a signal to a process group. If checktty is 1, 2068 * limit to members which have a controlling terminal. 2069 */ 2070 void 2071 pgsignal(struct pgrp *pgrp, int sig, int checkctty, ksiginfo_t *ksi) 2072 { 2073 struct proc *p; 2074 2075 if (pgrp) { 2076 PGRP_LOCK_ASSERT(pgrp, MA_OWNED); 2077 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 2078 PROC_LOCK(p); 2079 if (p->p_state == PRS_NORMAL && 2080 (checkctty == 0 || p->p_flag & P_CONTROLT)) 2081 pksignal(p, sig, ksi); 2082 PROC_UNLOCK(p); 2083 } 2084 } 2085 } 2086 2087 /* 2088 * Recalculate the signal mask and reset the signal disposition after 2089 * usermode frame for delivery is formed. Should be called after 2090 * mach-specific routine, because sysent->sv_sendsig() needs correct 2091 * ps_siginfo and signal mask. 2092 */ 2093 static void 2094 postsig_done(int sig, struct thread *td, struct sigacts *ps) 2095 { 2096 sigset_t mask; 2097 2098 mtx_assert(&ps->ps_mtx, MA_OWNED); 2099 td->td_ru.ru_nsignals++; 2100 mask = ps->ps_catchmask[_SIG_IDX(sig)]; 2101 if (!SIGISMEMBER(ps->ps_signodefer, sig)) 2102 SIGADDSET(mask, sig); 2103 kern_sigprocmask(td, SIG_BLOCK, &mask, NULL, 2104 SIGPROCMASK_PROC_LOCKED | SIGPROCMASK_PS_LOCKED); 2105 if (SIGISMEMBER(ps->ps_sigreset, sig)) 2106 sigdflt(ps, sig); 2107 } 2108 2109 /* 2110 * Send a signal caused by a trap to the current thread. If it will be 2111 * caught immediately, deliver it with correct code. Otherwise, post it 2112 * normally. 2113 */ 2114 void 2115 trapsignal(struct thread *td, ksiginfo_t *ksi) 2116 { 2117 struct sigacts *ps; 2118 struct proc *p; 2119 sigset_t sigmask; 2120 int sig; 2121 2122 p = td->td_proc; 2123 sig = ksi->ksi_signo; 2124 KASSERT(_SIG_VALID(sig), ("invalid signal")); 2125 2126 sigfastblock_fetch(td); 2127 PROC_LOCK(p); 2128 ps = p->p_sigacts; 2129 mtx_lock(&ps->ps_mtx); 2130 sigmask = td->td_sigmask; 2131 if (td->td_sigblock_val != 0) 2132 SIGSETOR(sigmask, fastblock_mask); 2133 if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) && 2134 !SIGISMEMBER(sigmask, sig)) { 2135 #ifdef KTRACE 2136 if (KTRPOINT(curthread, KTR_PSIG)) 2137 ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)], 2138 &td->td_sigmask, ksi->ksi_code); 2139 #endif 2140 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)], 2141 ksi, &td->td_sigmask); 2142 postsig_done(sig, td, ps); 2143 mtx_unlock(&ps->ps_mtx); 2144 } else { 2145 /* 2146 * Avoid a possible infinite loop if the thread 2147 * masking the signal or process is ignoring the 2148 * signal. 2149 */ 2150 if (kern_forcesigexit && (SIGISMEMBER(sigmask, sig) || 2151 ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN)) { 2152 SIGDELSET(td->td_sigmask, sig); 2153 SIGDELSET(ps->ps_sigcatch, sig); 2154 SIGDELSET(ps->ps_sigignore, sig); 2155 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 2156 td->td_pflags &= ~TDP_SIGFASTBLOCK; 2157 td->td_sigblock_val = 0; 2158 } 2159 mtx_unlock(&ps->ps_mtx); 2160 p->p_sig = sig; /* XXX to verify code */ 2161 tdsendsignal(p, td, sig, ksi); 2162 } 2163 PROC_UNLOCK(p); 2164 } 2165 2166 static struct thread * 2167 sigtd(struct proc *p, int sig, bool fast_sigblock) 2168 { 2169 struct thread *td, *signal_td; 2170 2171 PROC_LOCK_ASSERT(p, MA_OWNED); 2172 MPASS(!fast_sigblock || p == curproc); 2173 2174 /* 2175 * Check if current thread can handle the signal without 2176 * switching context to another thread. 2177 */ 2178 if (curproc == p && !SIGISMEMBER(curthread->td_sigmask, sig) && 2179 (!fast_sigblock || curthread->td_sigblock_val == 0)) 2180 return (curthread); 2181 2182 /* Find a non-stopped thread that does not mask the signal. */ 2183 signal_td = NULL; 2184 FOREACH_THREAD_IN_PROC(p, td) { 2185 if (!SIGISMEMBER(td->td_sigmask, sig) && (!fast_sigblock || 2186 td != curthread || td->td_sigblock_val == 0) && 2187 (td->td_flags & TDF_BOUNDARY) == 0) { 2188 signal_td = td; 2189 break; 2190 } 2191 } 2192 /* Select random (first) thread if no better match was found. */ 2193 if (signal_td == NULL) 2194 signal_td = FIRST_THREAD_IN_PROC(p); 2195 return (signal_td); 2196 } 2197 2198 /* 2199 * Send the signal to the process. If the signal has an action, the action 2200 * is usually performed by the target process rather than the caller; we add 2201 * the signal to the set of pending signals for the process. 2202 * 2203 * Exceptions: 2204 * o When a stop signal is sent to a sleeping process that takes the 2205 * default action, the process is stopped without awakening it. 2206 * o SIGCONT restarts stopped processes (or puts them back to sleep) 2207 * regardless of the signal action (eg, blocked or ignored). 2208 * 2209 * Other ignored signals are discarded immediately. 2210 * 2211 * NB: This function may be entered from the debugger via the "kill" DDB 2212 * command. There is little that can be done to mitigate the possibly messy 2213 * side effects of this unwise possibility. 2214 */ 2215 void 2216 kern_psignal(struct proc *p, int sig) 2217 { 2218 ksiginfo_t ksi; 2219 2220 ksiginfo_init(&ksi); 2221 ksi.ksi_signo = sig; 2222 ksi.ksi_code = SI_KERNEL; 2223 (void) tdsendsignal(p, NULL, sig, &ksi); 2224 } 2225 2226 int 2227 pksignal(struct proc *p, int sig, ksiginfo_t *ksi) 2228 { 2229 2230 return (tdsendsignal(p, NULL, sig, ksi)); 2231 } 2232 2233 /* Utility function for finding a thread to send signal event to. */ 2234 int 2235 sigev_findtd(struct proc *p, struct sigevent *sigev, struct thread **ttd) 2236 { 2237 struct thread *td; 2238 2239 if (sigev->sigev_notify == SIGEV_THREAD_ID) { 2240 td = tdfind(sigev->sigev_notify_thread_id, p->p_pid); 2241 if (td == NULL) 2242 return (ESRCH); 2243 *ttd = td; 2244 } else { 2245 *ttd = NULL; 2246 PROC_LOCK(p); 2247 } 2248 return (0); 2249 } 2250 2251 void 2252 tdsignal(struct thread *td, int sig) 2253 { 2254 ksiginfo_t ksi; 2255 2256 ksiginfo_init(&ksi); 2257 ksi.ksi_signo = sig; 2258 ksi.ksi_code = SI_KERNEL; 2259 (void) tdsendsignal(td->td_proc, td, sig, &ksi); 2260 } 2261 2262 void 2263 tdksignal(struct thread *td, int sig, ksiginfo_t *ksi) 2264 { 2265 2266 (void) tdsendsignal(td->td_proc, td, sig, ksi); 2267 } 2268 2269 static void 2270 sig_sleepq_abort(struct thread *td, int intrval) 2271 { 2272 THREAD_LOCK_ASSERT(td, MA_OWNED); 2273 2274 if (intrval == 0 && (td->td_flags & TDF_SIGWAIT) == 0) 2275 thread_unlock(td); 2276 else 2277 sleepq_abort(td, intrval); 2278 } 2279 2280 int 2281 tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi) 2282 { 2283 sig_t action; 2284 sigqueue_t *sigqueue; 2285 struct sigacts *ps; 2286 int intrval, prop, ret; 2287 2288 MPASS(td == NULL || p == td->td_proc); 2289 PROC_LOCK_ASSERT(p, MA_OWNED); 2290 2291 if (!_SIG_VALID(sig)) 2292 panic("%s(): invalid signal %d", __func__, sig); 2293 2294 KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("%s: ksi on queue", __func__)); 2295 2296 /* 2297 * IEEE Std 1003.1-2001: return success when killing a zombie. 2298 */ 2299 if (p->p_state == PRS_ZOMBIE) { 2300 if (ksi != NULL && (ksi->ksi_flags & KSI_INS) != 0) 2301 ksiginfo_tryfree(ksi); 2302 return (0); 2303 } 2304 2305 ps = p->p_sigacts; 2306 KNOTE_LOCKED(p->p_klist, NOTE_SIGNAL | sig); 2307 prop = sigprop(sig); 2308 2309 if (td == NULL) { 2310 td = sigtd(p, sig, false); 2311 sigqueue = &p->p_sigqueue; 2312 } else 2313 sigqueue = &td->td_sigqueue; 2314 2315 SDT_PROBE3(proc, , , signal__send, td, p, sig); 2316 2317 /* 2318 * If the signal is being ignored, then we forget about it 2319 * immediately, except when the target process executes 2320 * sigwait(). (Note: we don't set SIGCONT in ps_sigignore, 2321 * and if it is set to SIG_IGN, action will be SIG_DFL here.) 2322 */ 2323 mtx_lock(&ps->ps_mtx); 2324 if (SIGISMEMBER(ps->ps_sigignore, sig)) { 2325 if (kern_sig_discard_ign && 2326 (p->p_sysent->sv_flags & SV_SIG_DISCIGN) == 0) { 2327 SDT_PROBE3(proc, , , signal__discard, td, p, sig); 2328 2329 mtx_unlock(&ps->ps_mtx); 2330 if (ksi != NULL && (ksi->ksi_flags & KSI_INS) != 0) 2331 ksiginfo_tryfree(ksi); 2332 return (0); 2333 } else { 2334 action = SIG_CATCH; 2335 intrval = 0; 2336 } 2337 } else { 2338 if (SIGISMEMBER(td->td_sigmask, sig)) 2339 action = SIG_HOLD; 2340 else if (SIGISMEMBER(ps->ps_sigcatch, sig)) 2341 action = SIG_CATCH; 2342 else 2343 action = SIG_DFL; 2344 if (SIGISMEMBER(ps->ps_sigintr, sig)) 2345 intrval = EINTR; 2346 else 2347 intrval = ERESTART; 2348 } 2349 mtx_unlock(&ps->ps_mtx); 2350 2351 if (prop & SIGPROP_CONT) 2352 sigqueue_delete_stopmask_proc(p); 2353 else if (prop & SIGPROP_STOP) { 2354 if (pt_attach_transparent && 2355 (p->p_flag & P_TRACED) != 0 && 2356 (p->p_flag2 & P2_PTRACE_FSTP) != 0) { 2357 PROC_SLOCK(p); 2358 sig_handle_first_stop(NULL, p, sig); 2359 PROC_SUNLOCK(p); 2360 return (0); 2361 } 2362 2363 /* 2364 * If sending a tty stop signal to a member of an orphaned 2365 * process group, discard the signal here if the action 2366 * is default; don't stop the process below if sleeping, 2367 * and don't clear any pending SIGCONT. 2368 */ 2369 if ((prop & SIGPROP_TTYSTOP) != 0 && 2370 (p->p_pgrp->pg_flags & PGRP_ORPHANED) != 0 && 2371 action == SIG_DFL) { 2372 if (ksi != NULL && (ksi->ksi_flags & KSI_INS) != 0) 2373 ksiginfo_tryfree(ksi); 2374 return (0); 2375 } 2376 sigqueue_delete_proc(p, SIGCONT); 2377 if (p->p_flag & P_CONTINUED) { 2378 p->p_flag &= ~P_CONTINUED; 2379 PROC_LOCK(p->p_pptr); 2380 sigqueue_take(p->p_ksi); 2381 PROC_UNLOCK(p->p_pptr); 2382 } 2383 } 2384 2385 ret = sigqueue_add(sigqueue, sig, ksi); 2386 if (ret != 0) 2387 return (ret); 2388 signotify(td); 2389 /* 2390 * Defer further processing for signals which are held, 2391 * except that stopped processes must be continued by SIGCONT. 2392 */ 2393 if (action == SIG_HOLD && 2394 !((prop & SIGPROP_CONT) && (p->p_flag & P_STOPPED_SIG))) 2395 return (0); 2396 2397 /* 2398 * Some signals have a process-wide effect and a per-thread 2399 * component. Most processing occurs when the process next 2400 * tries to cross the user boundary, however there are some 2401 * times when processing needs to be done immediately, such as 2402 * waking up threads so that they can cross the user boundary. 2403 * We try to do the per-process part here. 2404 */ 2405 if (P_SHOULDSTOP(p)) { 2406 KASSERT(!(p->p_flag & P_WEXIT), 2407 ("signal to stopped but exiting process")); 2408 if (sig == SIGKILL) { 2409 /* 2410 * If traced process is already stopped, 2411 * then no further action is necessary. 2412 */ 2413 if (p->p_flag & P_TRACED) 2414 return (0); 2415 /* 2416 * SIGKILL sets process running. 2417 * It will die elsewhere. 2418 * All threads must be restarted. 2419 */ 2420 p->p_flag &= ~P_STOPPED_SIG; 2421 goto runfast; 2422 } 2423 2424 if (prop & SIGPROP_CONT) { 2425 /* 2426 * If traced process is already stopped, 2427 * then no further action is necessary. 2428 */ 2429 if (p->p_flag & P_TRACED) 2430 return (0); 2431 /* 2432 * If SIGCONT is default (or ignored), we continue the 2433 * process but don't leave the signal in sigqueue as 2434 * it has no further action. If SIGCONT is held, we 2435 * continue the process and leave the signal in 2436 * sigqueue. If the process catches SIGCONT, let it 2437 * handle the signal itself. If it isn't waiting on 2438 * an event, it goes back to run state. 2439 * Otherwise, process goes back to sleep state. 2440 */ 2441 p->p_flag &= ~P_STOPPED_SIG; 2442 PROC_SLOCK(p); 2443 if (p->p_numthreads == p->p_suspcount) { 2444 PROC_SUNLOCK(p); 2445 PROC_LOCK(p->p_pptr); 2446 childproc_continued(p); 2447 PROC_UNLOCK(p->p_pptr); 2448 PROC_SLOCK(p); 2449 } 2450 if (action == SIG_DFL) { 2451 thread_unsuspend(p); 2452 PROC_SUNLOCK(p); 2453 sigqueue_delete(sigqueue, sig); 2454 goto out_cont; 2455 } 2456 if (action == SIG_CATCH) { 2457 /* 2458 * The process wants to catch it so it needs 2459 * to run at least one thread, but which one? 2460 */ 2461 PROC_SUNLOCK(p); 2462 goto runfast; 2463 } 2464 /* 2465 * The signal is not ignored or caught. 2466 */ 2467 thread_unsuspend(p); 2468 PROC_SUNLOCK(p); 2469 goto out_cont; 2470 } 2471 2472 if (prop & SIGPROP_STOP) { 2473 /* 2474 * If traced process is already stopped, 2475 * then no further action is necessary. 2476 */ 2477 if (p->p_flag & P_TRACED) 2478 return (0); 2479 /* 2480 * Already stopped, don't need to stop again 2481 * (If we did the shell could get confused). 2482 * Just make sure the signal STOP bit set. 2483 */ 2484 p->p_flag |= P_STOPPED_SIG; 2485 sigqueue_delete(sigqueue, sig); 2486 return (0); 2487 } 2488 2489 /* 2490 * All other kinds of signals: 2491 * If a thread is sleeping interruptibly, simulate a 2492 * wakeup so that when it is continued it will be made 2493 * runnable and can look at the signal. However, don't make 2494 * the PROCESS runnable, leave it stopped. 2495 * It may run a bit until it hits a thread_suspend_check(). 2496 */ 2497 PROC_SLOCK(p); 2498 thread_lock(td); 2499 if (TD_CAN_ABORT(td)) 2500 sig_sleepq_abort(td, intrval); 2501 else 2502 thread_unlock(td); 2503 PROC_SUNLOCK(p); 2504 return (0); 2505 /* 2506 * Mutexes are short lived. Threads waiting on them will 2507 * hit thread_suspend_check() soon. 2508 */ 2509 } else if (p->p_state == PRS_NORMAL) { 2510 if (p->p_flag & P_TRACED || action == SIG_CATCH) { 2511 tdsigwakeup(td, sig, action, intrval); 2512 return (0); 2513 } 2514 2515 MPASS(action == SIG_DFL); 2516 2517 if (prop & SIGPROP_STOP) { 2518 if (p->p_flag & (P_PPWAIT|P_WEXIT)) 2519 return (0); 2520 p->p_flag |= P_STOPPED_SIG; 2521 p->p_xsig = sig; 2522 PROC_SLOCK(p); 2523 sig_suspend_threads(td, p); 2524 if (p->p_numthreads == p->p_suspcount) { 2525 /* 2526 * only thread sending signal to another 2527 * process can reach here, if thread is sending 2528 * signal to its process, because thread does 2529 * not suspend itself here, p_numthreads 2530 * should never be equal to p_suspcount. 2531 */ 2532 thread_stopped(p); 2533 PROC_SUNLOCK(p); 2534 sigqueue_delete_proc(p, p->p_xsig); 2535 } else 2536 PROC_SUNLOCK(p); 2537 return (0); 2538 } 2539 } else { 2540 /* Not in "NORMAL" state. discard the signal. */ 2541 sigqueue_delete(sigqueue, sig); 2542 return (0); 2543 } 2544 2545 /* 2546 * The process is not stopped so we need to apply the signal to all the 2547 * running threads. 2548 */ 2549 runfast: 2550 tdsigwakeup(td, sig, action, intrval); 2551 PROC_SLOCK(p); 2552 thread_unsuspend(p); 2553 PROC_SUNLOCK(p); 2554 out_cont: 2555 itimer_proc_continue(p); 2556 kqtimer_proc_continue(p); 2557 2558 return (0); 2559 } 2560 2561 /* 2562 * The force of a signal has been directed against a single 2563 * thread. We need to see what we can do about knocking it 2564 * out of any sleep it may be in etc. 2565 */ 2566 static void 2567 tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval) 2568 { 2569 struct proc *p = td->td_proc; 2570 int prop; 2571 2572 PROC_LOCK_ASSERT(p, MA_OWNED); 2573 prop = sigprop(sig); 2574 2575 PROC_SLOCK(p); 2576 thread_lock(td); 2577 /* 2578 * Bring the priority of a thread up if we want it to get 2579 * killed in this lifetime. Be careful to avoid bumping the 2580 * priority of the idle thread, since we still allow to signal 2581 * kernel processes. 2582 */ 2583 if (action == SIG_DFL && (prop & SIGPROP_KILL) != 0 && 2584 td->td_priority > PUSER && !TD_IS_IDLETHREAD(td)) 2585 sched_prio(td, PUSER); 2586 if (TD_ON_SLEEPQ(td)) { 2587 /* 2588 * If thread is sleeping uninterruptibly 2589 * we can't interrupt the sleep... the signal will 2590 * be noticed when the process returns through 2591 * trap() or syscall(). 2592 */ 2593 if ((td->td_flags & TDF_SINTR) == 0) 2594 goto out; 2595 /* 2596 * If SIGCONT is default (or ignored) and process is 2597 * asleep, we are finished; the process should not 2598 * be awakened. 2599 */ 2600 if ((prop & SIGPROP_CONT) && action == SIG_DFL) { 2601 thread_unlock(td); 2602 PROC_SUNLOCK(p); 2603 sigqueue_delete(&p->p_sigqueue, sig); 2604 /* 2605 * It may be on either list in this state. 2606 * Remove from both for now. 2607 */ 2608 sigqueue_delete(&td->td_sigqueue, sig); 2609 return; 2610 } 2611 2612 /* 2613 * Don't awaken a sleeping thread for SIGSTOP if the 2614 * STOP signal is deferred. 2615 */ 2616 if ((prop & SIGPROP_STOP) != 0 && (td->td_flags & (TDF_SBDRY | 2617 TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY) 2618 goto out; 2619 2620 /* 2621 * Give low priority threads a better chance to run. 2622 */ 2623 if (td->td_priority > PUSER && !TD_IS_IDLETHREAD(td)) 2624 sched_prio(td, PUSER); 2625 2626 sig_sleepq_abort(td, intrval); 2627 PROC_SUNLOCK(p); 2628 return; 2629 } 2630 2631 /* 2632 * Other states do nothing with the signal immediately, 2633 * other than kicking ourselves if we are running. 2634 * It will either never be noticed, or noticed very soon. 2635 */ 2636 #ifdef SMP 2637 if (TD_IS_RUNNING(td) && td != curthread) 2638 forward_signal(td); 2639 #endif 2640 2641 out: 2642 PROC_SUNLOCK(p); 2643 thread_unlock(td); 2644 } 2645 2646 static void 2647 ptrace_coredumpreq(struct thread *td, struct proc *p, 2648 struct thr_coredump_req *tcq) 2649 { 2650 struct coredump_vnode_ctx wctx; 2651 struct coredump_writer cdw; 2652 void *rl_cookie; 2653 2654 if (p->p_sysent->sv_coredump == NULL) { 2655 tcq->tc_error = ENOSYS; 2656 return; 2657 } 2658 2659 wctx.vp = tcq->tc_vp; 2660 wctx.fcred = NOCRED; 2661 2662 cdw.ctx = &wctx; 2663 cdw.write_fn = core_vn_write; 2664 cdw.extend_fn = core_vn_extend; 2665 2666 rl_cookie = vn_rangelock_wlock(tcq->tc_vp, 0, OFF_MAX); 2667 tcq->tc_error = p->p_sysent->sv_coredump(td, &cdw, 2668 tcq->tc_limit, tcq->tc_flags); 2669 vn_rangelock_unlock(tcq->tc_vp, rl_cookie); 2670 } 2671 2672 static void 2673 ptrace_syscallreq(struct thread *td, struct proc *p, 2674 struct thr_syscall_req *tsr) 2675 { 2676 struct sysentvec *sv; 2677 struct sysent *se; 2678 register_t rv_saved[2]; 2679 int error, nerror; 2680 int sc; 2681 bool audited, sy_thr_static; 2682 2683 sv = p->p_sysent; 2684 if (sv->sv_table == NULL || sv->sv_size < tsr->ts_sa.code) { 2685 tsr->ts_ret.sr_error = ENOSYS; 2686 return; 2687 } 2688 2689 sc = tsr->ts_sa.code; 2690 if (sc == SYS_syscall || sc == SYS___syscall) { 2691 sc = tsr->ts_sa.args[0]; 2692 memmove(&tsr->ts_sa.args[0], &tsr->ts_sa.args[1], 2693 sizeof(register_t) * (tsr->ts_nargs - 1)); 2694 } 2695 2696 tsr->ts_sa.callp = se = &sv->sv_table[sc]; 2697 2698 VM_CNT_INC(v_syscall); 2699 td->td_pticks = 0; 2700 if (__predict_false(td->td_cowgen != atomic_load_int( 2701 &td->td_proc->p_cowgen))) 2702 thread_cow_update(td); 2703 2704 td->td_sa = tsr->ts_sa; 2705 2706 #ifdef CAPABILITY_MODE 2707 if ((se->sy_flags & SYF_CAPENABLED) == 0) { 2708 if (CAP_TRACING(td)) 2709 ktrcapfail(CAPFAIL_SYSCALL, NULL); 2710 if (IN_CAPABILITY_MODE(td)) { 2711 tsr->ts_ret.sr_error = ECAPMODE; 2712 return; 2713 } 2714 } 2715 #endif 2716 2717 sy_thr_static = (se->sy_thrcnt & SY_THR_STATIC) != 0; 2718 audited = AUDIT_SYSCALL_ENTER(sc, td) != 0; 2719 2720 if (!sy_thr_static) { 2721 error = syscall_thread_enter(td, &se); 2722 sy_thr_static = (se->sy_thrcnt & SY_THR_STATIC) != 0; 2723 if (error != 0) { 2724 tsr->ts_ret.sr_error = error; 2725 return; 2726 } 2727 } 2728 2729 rv_saved[0] = td->td_retval[0]; 2730 rv_saved[1] = td->td_retval[1]; 2731 nerror = td->td_errno; 2732 td->td_retval[0] = 0; 2733 td->td_retval[1] = 0; 2734 2735 #ifdef KDTRACE_HOOKS 2736 if (se->sy_entry != 0) 2737 (*systrace_probe_func)(&tsr->ts_sa, SYSTRACE_ENTRY, 0); 2738 #endif 2739 tsr->ts_ret.sr_error = se->sy_call(td, tsr->ts_sa.args); 2740 #ifdef KDTRACE_HOOKS 2741 if (se->sy_return != 0) 2742 (*systrace_probe_func)(&tsr->ts_sa, SYSTRACE_RETURN, 2743 tsr->ts_ret.sr_error != 0 ? -1 : td->td_retval[0]); 2744 #endif 2745 2746 tsr->ts_ret.sr_retval[0] = td->td_retval[0]; 2747 tsr->ts_ret.sr_retval[1] = td->td_retval[1]; 2748 td->td_retval[0] = rv_saved[0]; 2749 td->td_retval[1] = rv_saved[1]; 2750 td->td_errno = nerror; 2751 2752 if (audited) 2753 AUDIT_SYSCALL_EXIT(error, td); 2754 if (!sy_thr_static) 2755 syscall_thread_exit(td, se); 2756 } 2757 2758 static void 2759 ptrace_remotereq(struct thread *td, int flag) 2760 { 2761 struct proc *p; 2762 2763 MPASS(td == curthread); 2764 p = td->td_proc; 2765 PROC_LOCK_ASSERT(p, MA_OWNED); 2766 if ((td->td_dbgflags & flag) == 0) 2767 return; 2768 KASSERT((p->p_flag & P_STOPPED_TRACE) != 0, ("not stopped")); 2769 KASSERT(td->td_remotereq != NULL, ("td_remotereq is NULL")); 2770 2771 PROC_UNLOCK(p); 2772 switch (flag) { 2773 case TDB_COREDUMPREQ: 2774 ptrace_coredumpreq(td, p, td->td_remotereq); 2775 break; 2776 case TDB_SCREMOTEREQ: 2777 ptrace_syscallreq(td, p, td->td_remotereq); 2778 break; 2779 default: 2780 __unreachable(); 2781 } 2782 PROC_LOCK(p); 2783 2784 MPASS((td->td_dbgflags & flag) != 0); 2785 td->td_dbgflags &= ~flag; 2786 td->td_remotereq = NULL; 2787 wakeup(p); 2788 } 2789 2790 /* 2791 * Suspend threads of the process p, either by directly setting the 2792 * inhibitor for the thread sleeping interruptibly, or by making the 2793 * thread suspend at the userspace boundary by scheduling a suspend AST. 2794 * 2795 * Returns true if some threads were suspended directly from the 2796 * sleeping state, and false if all threads are forced to process AST. 2797 */ 2798 static bool 2799 sig_suspend_threads(struct thread *td, struct proc *p) 2800 { 2801 struct thread *td2; 2802 bool res; 2803 2804 PROC_LOCK_ASSERT(p, MA_OWNED); 2805 PROC_SLOCK_ASSERT(p, MA_OWNED); 2806 2807 res = false; 2808 FOREACH_THREAD_IN_PROC(p, td2) { 2809 thread_lock(td2); 2810 ast_sched_locked(td2, TDA_SUSPEND); 2811 if (TD_IS_SLEEPING(td2) && (td2->td_flags & TDF_SINTR) != 0) { 2812 if (td2->td_flags & TDF_SBDRY) { 2813 /* 2814 * Once a thread is asleep with 2815 * TDF_SBDRY and without TDF_SERESTART 2816 * or TDF_SEINTR set, it should never 2817 * become suspended due to this check. 2818 */ 2819 KASSERT(!TD_IS_SUSPENDED(td2), 2820 ("thread with deferred stops suspended")); 2821 if (TD_SBDRY_INTR(td2)) { 2822 sleepq_abort(td2, TD_SBDRY_ERRNO(td2)); 2823 continue; 2824 } 2825 } else if (!TD_IS_SUSPENDED(td2)) { 2826 thread_suspend_one(td2); 2827 res = true; 2828 } 2829 } else if (!TD_IS_SUSPENDED(td2)) { 2830 #ifdef SMP 2831 if (TD_IS_RUNNING(td2) && td2 != td) 2832 forward_signal(td2); 2833 #endif 2834 } 2835 thread_unlock(td2); 2836 } 2837 return (res); 2838 } 2839 2840 static void 2841 sig_handle_first_stop(struct thread *td, struct proc *p, int sig) 2842 { 2843 if (td != NULL && (td->td_dbgflags & TDB_FSTP) == 0 && 2844 ((p->p_flag2 & P2_PTRACE_FSTP) != 0 || p->p_xthread != NULL)) 2845 return; 2846 2847 p->p_xsig = sig; 2848 p->p_xthread = td; 2849 2850 /* 2851 * If we are on sleepqueue already, let sleepqueue 2852 * code decide if it needs to go sleep after attach. 2853 */ 2854 if (td != NULL && td->td_wchan == NULL) 2855 td->td_dbgflags &= ~TDB_FSTP; 2856 2857 p->p_flag2 &= ~P2_PTRACE_FSTP; 2858 p->p_flag |= P_STOPPED_SIG | P_STOPPED_TRACE; 2859 if (sig_suspend_threads(td, p) && td == NULL) 2860 thread_stopped(p); 2861 } 2862 2863 /* 2864 * Stop the process for an event deemed interesting to the debugger. If si is 2865 * non-NULL, this is a signal exchange; the new signal requested by the 2866 * debugger will be returned for handling. If si is NULL, this is some other 2867 * type of interesting event. The debugger may request a signal be delivered in 2868 * that case as well, however it will be deferred until it can be handled. 2869 */ 2870 int 2871 ptracestop(struct thread *td, int sig, ksiginfo_t *si) 2872 { 2873 struct proc *p = td->td_proc; 2874 struct thread *td2; 2875 ksiginfo_t ksi; 2876 2877 PROC_LOCK_ASSERT(p, MA_OWNED); 2878 KASSERT(!(p->p_flag & P_WEXIT), ("Stopping exiting process")); 2879 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, 2880 &p->p_mtx.lock_object, "Stopping for traced signal"); 2881 2882 td->td_xsig = sig; 2883 2884 if (si == NULL || (si->ksi_flags & KSI_PTRACE) == 0) { 2885 td->td_dbgflags |= TDB_XSIG; 2886 CTR4(KTR_PTRACE, "ptracestop: tid %d (pid %d) flags %#x sig %d", 2887 td->td_tid, p->p_pid, td->td_dbgflags, sig); 2888 PROC_SLOCK(p); 2889 while ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_XSIG)) { 2890 if (P_KILLED(p)) { 2891 /* 2892 * Ensure that, if we've been PT_KILLed, the 2893 * exit status reflects that. Another thread 2894 * may also be in ptracestop(), having just 2895 * received the SIGKILL, but this thread was 2896 * unsuspended first. 2897 */ 2898 td->td_dbgflags &= ~TDB_XSIG; 2899 td->td_xsig = SIGKILL; 2900 p->p_ptevents = 0; 2901 break; 2902 } 2903 if (p->p_flag & P_SINGLE_EXIT && 2904 !(td->td_dbgflags & TDB_EXIT)) { 2905 /* 2906 * Ignore ptrace stops except for thread exit 2907 * events when the process exits. 2908 */ 2909 td->td_dbgflags &= ~TDB_XSIG; 2910 PROC_SUNLOCK(p); 2911 return (0); 2912 } 2913 2914 /* 2915 * Make wait(2) work. Ensure that right after the 2916 * attach, the thread which was decided to become the 2917 * leader of attach gets reported to the waiter. 2918 * Otherwise, just avoid overwriting another thread's 2919 * assignment to p_xthread. If another thread has 2920 * already set p_xthread, the current thread will get 2921 * a chance to report itself upon the next iteration. 2922 */ 2923 sig_handle_first_stop(td, p, sig); 2924 2925 if ((td->td_dbgflags & TDB_STOPATFORK) != 0) { 2926 td->td_dbgflags &= ~TDB_STOPATFORK; 2927 } 2928 stopme: 2929 td->td_dbgflags |= TDB_SSWITCH; 2930 thread_suspend_switch(td, p); 2931 td->td_dbgflags &= ~TDB_SSWITCH; 2932 if ((td->td_dbgflags & (TDB_COREDUMPREQ | 2933 TDB_SCREMOTEREQ)) != 0) { 2934 MPASS((td->td_dbgflags & (TDB_COREDUMPREQ | 2935 TDB_SCREMOTEREQ)) != 2936 (TDB_COREDUMPREQ | TDB_SCREMOTEREQ)); 2937 PROC_SUNLOCK(p); 2938 ptrace_remotereq(td, td->td_dbgflags & 2939 (TDB_COREDUMPREQ | TDB_SCREMOTEREQ)); 2940 PROC_SLOCK(p); 2941 goto stopme; 2942 } 2943 if (p->p_xthread == td) 2944 p->p_xthread = NULL; 2945 if (!(p->p_flag & P_TRACED)) 2946 break; 2947 if (td->td_dbgflags & TDB_SUSPEND) { 2948 if (p->p_flag & P_SINGLE_EXIT) 2949 break; 2950 goto stopme; 2951 } 2952 } 2953 PROC_SUNLOCK(p); 2954 } 2955 2956 if (si != NULL && sig == td->td_xsig) { 2957 /* Parent wants us to take the original signal unchanged. */ 2958 si->ksi_flags |= KSI_HEAD; 2959 if (sigqueue_add(&td->td_sigqueue, sig, si) != 0) 2960 si->ksi_signo = 0; 2961 } else if (td->td_xsig != 0) { 2962 /* 2963 * If parent wants us to take a new signal, then it will leave 2964 * it in td->td_xsig; otherwise we just look for signals again. 2965 */ 2966 ksiginfo_init(&ksi); 2967 ksi.ksi_signo = td->td_xsig; 2968 ksi.ksi_flags |= KSI_PTRACE; 2969 td2 = sigtd(p, td->td_xsig, false); 2970 tdsendsignal(p, td2, td->td_xsig, &ksi); 2971 if (td != td2) 2972 return (0); 2973 } 2974 2975 return (td->td_xsig); 2976 } 2977 2978 static void 2979 reschedule_signals(struct proc *p, sigset_t block, int flags) 2980 { 2981 struct sigacts *ps; 2982 struct thread *td; 2983 int sig; 2984 bool fastblk, pslocked; 2985 2986 PROC_LOCK_ASSERT(p, MA_OWNED); 2987 ps = p->p_sigacts; 2988 pslocked = (flags & SIGPROCMASK_PS_LOCKED) != 0; 2989 mtx_assert(&ps->ps_mtx, pslocked ? MA_OWNED : MA_NOTOWNED); 2990 if (SIGISEMPTY(p->p_siglist)) 2991 return; 2992 SIGSETAND(block, p->p_siglist); 2993 fastblk = (flags & SIGPROCMASK_FASTBLK) != 0; 2994 SIG_FOREACH(sig, &block) { 2995 td = sigtd(p, sig, fastblk); 2996 2997 /* 2998 * If sigtd() selected us despite sigfastblock is 2999 * blocking, do not activate AST or wake us, to avoid 3000 * loop in AST handler. 3001 */ 3002 if (fastblk && td == curthread) 3003 continue; 3004 3005 signotify(td); 3006 if (!pslocked) 3007 mtx_lock(&ps->ps_mtx); 3008 if (p->p_flag & P_TRACED || 3009 (SIGISMEMBER(ps->ps_sigcatch, sig) && 3010 !SIGISMEMBER(td->td_sigmask, sig))) { 3011 tdsigwakeup(td, sig, SIG_CATCH, 3012 (SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR : 3013 ERESTART)); 3014 } 3015 if (!pslocked) 3016 mtx_unlock(&ps->ps_mtx); 3017 } 3018 } 3019 3020 void 3021 tdsigcleanup(struct thread *td) 3022 { 3023 struct proc *p; 3024 sigset_t unblocked; 3025 3026 p = td->td_proc; 3027 PROC_LOCK_ASSERT(p, MA_OWNED); 3028 3029 sigqueue_flush(&td->td_sigqueue); 3030 if (p->p_numthreads == 1) 3031 return; 3032 3033 /* 3034 * Since we cannot handle signals, notify signal post code 3035 * about this by filling the sigmask. 3036 * 3037 * Also, if needed, wake up thread(s) that do not block the 3038 * same signals as the exiting thread, since the thread might 3039 * have been selected for delivery and woken up. 3040 */ 3041 SIGFILLSET(unblocked); 3042 SIGSETNAND(unblocked, td->td_sigmask); 3043 SIGFILLSET(td->td_sigmask); 3044 reschedule_signals(p, unblocked, 0); 3045 3046 } 3047 3048 static int 3049 sigdeferstop_curr_flags(int cflags) 3050 { 3051 3052 MPASS((cflags & (TDF_SEINTR | TDF_SERESTART)) == 0 || 3053 (cflags & TDF_SBDRY) != 0); 3054 return (cflags & (TDF_SBDRY | TDF_SEINTR | TDF_SERESTART)); 3055 } 3056 3057 /* 3058 * Defer the delivery of SIGSTOP for the current thread, according to 3059 * the requested mode. Returns previous flags, which must be restored 3060 * by sigallowstop(). 3061 * 3062 * TDF_SBDRY, TDF_SEINTR, and TDF_SERESTART flags are only set and 3063 * cleared by the current thread, which allow the lock-less read-only 3064 * accesses below. 3065 */ 3066 int 3067 sigdeferstop_impl(int mode) 3068 { 3069 struct thread *td; 3070 int cflags, nflags; 3071 3072 td = curthread; 3073 cflags = sigdeferstop_curr_flags(td->td_flags); 3074 switch (mode) { 3075 case SIGDEFERSTOP_NOP: 3076 nflags = cflags; 3077 break; 3078 case SIGDEFERSTOP_OFF: 3079 nflags = 0; 3080 break; 3081 case SIGDEFERSTOP_SILENT: 3082 nflags = (cflags | TDF_SBDRY) & ~(TDF_SEINTR | TDF_SERESTART); 3083 break; 3084 case SIGDEFERSTOP_EINTR: 3085 nflags = (cflags | TDF_SBDRY | TDF_SEINTR) & ~TDF_SERESTART; 3086 break; 3087 case SIGDEFERSTOP_ERESTART: 3088 nflags = (cflags | TDF_SBDRY | TDF_SERESTART) & ~TDF_SEINTR; 3089 break; 3090 default: 3091 panic("sigdeferstop: invalid mode %x", mode); 3092 break; 3093 } 3094 if (cflags == nflags) 3095 return (SIGDEFERSTOP_VAL_NCHG); 3096 thread_lock(td); 3097 td->td_flags = (td->td_flags & ~cflags) | nflags; 3098 thread_unlock(td); 3099 return (cflags); 3100 } 3101 3102 /* 3103 * Restores the STOP handling mode, typically permitting the delivery 3104 * of SIGSTOP for the current thread. This does not immediately 3105 * suspend if a stop was posted. Instead, the thread will suspend 3106 * either via ast() or a subsequent interruptible sleep. 3107 */ 3108 void 3109 sigallowstop_impl(int prev) 3110 { 3111 struct thread *td; 3112 int cflags; 3113 3114 KASSERT(prev != SIGDEFERSTOP_VAL_NCHG, ("failed sigallowstop")); 3115 KASSERT((prev & ~(TDF_SBDRY | TDF_SEINTR | TDF_SERESTART)) == 0, 3116 ("sigallowstop: incorrect previous mode %x", prev)); 3117 td = curthread; 3118 cflags = sigdeferstop_curr_flags(td->td_flags); 3119 if (cflags != prev) { 3120 thread_lock(td); 3121 td->td_flags = (td->td_flags & ~cflags) | prev; 3122 thread_unlock(td); 3123 } 3124 } 3125 3126 enum sigstatus { 3127 SIGSTATUS_HANDLE, 3128 SIGSTATUS_HANDLED, 3129 SIGSTATUS_IGNORE, 3130 SIGSTATUS_SBDRY_STOP, 3131 }; 3132 3133 /* 3134 * The thread has signal "sig" pending. Figure out what to do with it: 3135 * 3136 * _HANDLE -> the caller should handle the signal 3137 * _HANDLED -> handled internally, reload pending signal set 3138 * _IGNORE -> ignored, remove from the set of pending signals and try the 3139 * next pending signal 3140 * _SBDRY_STOP -> the signal should stop the thread but this is not 3141 * permitted in the current context 3142 */ 3143 static enum sigstatus 3144 sigprocess(struct thread *td, int sig) 3145 { 3146 struct proc *p; 3147 struct sigacts *ps; 3148 struct sigqueue *queue; 3149 ksiginfo_t ksi; 3150 int prop; 3151 3152 KASSERT(_SIG_VALID(sig), ("%s: invalid signal %d", __func__, sig)); 3153 3154 p = td->td_proc; 3155 ps = p->p_sigacts; 3156 mtx_assert(&ps->ps_mtx, MA_OWNED); 3157 PROC_LOCK_ASSERT(p, MA_OWNED); 3158 3159 /* 3160 * We should allow pending but ignored signals below 3161 * if there is sigwait() active, or P_TRACED was 3162 * on when they were posted. 3163 */ 3164 if (SIGISMEMBER(ps->ps_sigignore, sig) && 3165 (p->p_flag & P_TRACED) == 0 && 3166 (td->td_flags & TDF_SIGWAIT) == 0) { 3167 return (SIGSTATUS_IGNORE); 3168 } 3169 3170 /* 3171 * If the process is going to single-thread mode to prepare 3172 * for exit, there is no sense in delivering any signal 3173 * to usermode. Another important consequence is that 3174 * msleep(..., PCATCH, ...) now is only interruptible by a 3175 * suspend request. 3176 */ 3177 if ((p->p_flag2 & P2_WEXIT) != 0) 3178 return (SIGSTATUS_IGNORE); 3179 3180 if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED) { 3181 /* 3182 * If traced, always stop. 3183 * Remove old signal from queue before the stop. 3184 * XXX shrug off debugger, it causes siginfo to 3185 * be thrown away. 3186 */ 3187 queue = &td->td_sigqueue; 3188 ksiginfo_init(&ksi); 3189 if (sigqueue_get(queue, sig, &ksi) == 0) { 3190 queue = &p->p_sigqueue; 3191 sigqueue_get(queue, sig, &ksi); 3192 } 3193 td->td_si = ksi.ksi_info; 3194 3195 mtx_unlock(&ps->ps_mtx); 3196 sig = ptracestop(td, sig, &ksi); 3197 mtx_lock(&ps->ps_mtx); 3198 3199 td->td_si.si_signo = 0; 3200 3201 /* 3202 * Keep looking if the debugger discarded or 3203 * replaced the signal. 3204 */ 3205 if (sig == 0) 3206 return (SIGSTATUS_HANDLED); 3207 3208 /* 3209 * If the signal became masked, re-queue it. 3210 */ 3211 if (SIGISMEMBER(td->td_sigmask, sig)) { 3212 ksi.ksi_flags |= KSI_HEAD; 3213 sigqueue_add(&p->p_sigqueue, sig, &ksi); 3214 return (SIGSTATUS_HANDLED); 3215 } 3216 3217 /* 3218 * If the traced bit got turned off, requeue the signal and 3219 * reload the set of pending signals. This ensures that p_sig* 3220 * and p_sigact are consistent. 3221 */ 3222 if ((p->p_flag & P_TRACED) == 0) { 3223 if ((ksi.ksi_flags & KSI_PTRACE) == 0) { 3224 ksi.ksi_flags |= KSI_HEAD; 3225 sigqueue_add(queue, sig, &ksi); 3226 } 3227 return (SIGSTATUS_HANDLED); 3228 } 3229 } 3230 3231 /* 3232 * Decide whether the signal should be returned. 3233 * Return the signal's number, or fall through 3234 * to clear it from the pending mask. 3235 */ 3236 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) { 3237 case (intptr_t)SIG_DFL: 3238 /* 3239 * Don't take default actions on system processes. 3240 */ 3241 if (p->p_pid <= 1) { 3242 #ifdef DIAGNOSTIC 3243 /* 3244 * Are you sure you want to ignore SIGSEGV 3245 * in init? XXX 3246 */ 3247 printf("Process (pid %lu) got signal %d\n", 3248 (u_long)p->p_pid, sig); 3249 #endif 3250 return (SIGSTATUS_IGNORE); 3251 } 3252 3253 /* 3254 * If there is a pending stop signal to process with 3255 * default action, stop here, then clear the signal. 3256 * Traced or exiting processes should ignore stops. 3257 * Additionally, a member of an orphaned process group 3258 * should ignore tty stops. 3259 */ 3260 prop = sigprop(sig); 3261 if (prop & SIGPROP_STOP) { 3262 mtx_unlock(&ps->ps_mtx); 3263 if ((p->p_flag & (P_TRACED | P_WEXIT | 3264 P_SINGLE_EXIT)) != 0 || ((p->p_pgrp-> 3265 pg_flags & PGRP_ORPHANED) != 0 && 3266 (prop & SIGPROP_TTYSTOP) != 0)) { 3267 mtx_lock(&ps->ps_mtx); 3268 return (SIGSTATUS_IGNORE); 3269 } 3270 if (TD_SBDRY_INTR(td)) { 3271 KASSERT((td->td_flags & TDF_SBDRY) != 0, 3272 ("lost TDF_SBDRY")); 3273 mtx_lock(&ps->ps_mtx); 3274 return (SIGSTATUS_SBDRY_STOP); 3275 } 3276 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, 3277 &p->p_mtx.lock_object, "Catching SIGSTOP"); 3278 sigqueue_delete(&td->td_sigqueue, sig); 3279 sigqueue_delete(&p->p_sigqueue, sig); 3280 p->p_flag |= P_STOPPED_SIG; 3281 p->p_xsig = sig; 3282 PROC_SLOCK(p); 3283 sig_suspend_threads(td, p); 3284 thread_suspend_switch(td, p); 3285 PROC_SUNLOCK(p); 3286 mtx_lock(&ps->ps_mtx); 3287 return (SIGSTATUS_HANDLED); 3288 } else if ((prop & SIGPROP_IGNORE) != 0 && 3289 (td->td_flags & TDF_SIGWAIT) == 0) { 3290 /* 3291 * Default action is to ignore; drop it if 3292 * not in kern_sigtimedwait(). 3293 */ 3294 return (SIGSTATUS_IGNORE); 3295 } else { 3296 return (SIGSTATUS_HANDLE); 3297 } 3298 3299 case (intptr_t)SIG_IGN: 3300 if ((td->td_flags & TDF_SIGWAIT) == 0) 3301 return (SIGSTATUS_IGNORE); 3302 else 3303 return (SIGSTATUS_HANDLE); 3304 3305 default: 3306 /* 3307 * This signal has an action, let postsig() process it. 3308 */ 3309 return (SIGSTATUS_HANDLE); 3310 } 3311 } 3312 3313 /* 3314 * If the current process has received a signal (should be caught or cause 3315 * termination, should interrupt current syscall), return the signal number. 3316 * Stop signals with default action are processed immediately, then cleared; 3317 * they aren't returned. This is checked after each entry to the system for 3318 * a syscall or trap (though this can usually be done without calling 3319 * issignal by checking the pending signal masks in cursig.) The normal call 3320 * sequence is 3321 * 3322 * while (sig = cursig(curthread)) 3323 * postsig(sig); 3324 */ 3325 static int 3326 issignal(struct thread *td) 3327 { 3328 struct proc *p; 3329 sigset_t sigpending; 3330 int sig; 3331 3332 p = td->td_proc; 3333 PROC_LOCK_ASSERT(p, MA_OWNED); 3334 3335 for (;;) { 3336 sigpending = td->td_sigqueue.sq_signals; 3337 SIGSETOR(sigpending, p->p_sigqueue.sq_signals); 3338 SIGSETNAND(sigpending, td->td_sigmask); 3339 3340 if ((p->p_flag & P_PPWAIT) != 0 || (td->td_flags & 3341 (TDF_SBDRY | TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY) 3342 SIG_STOPSIGMASK(sigpending); 3343 if (SIGISEMPTY(sigpending)) /* no signal to send */ 3344 return (0); 3345 3346 /* 3347 * Do fast sigblock if requested by usermode. Since 3348 * we do know that there was a signal pending at this 3349 * point, set the FAST_SIGBLOCK_PEND as indicator for 3350 * usermode to perform a dummy call to 3351 * FAST_SIGBLOCK_UNBLOCK, which causes immediate 3352 * delivery of postponed pending signal. 3353 */ 3354 if ((td->td_pflags & TDP_SIGFASTBLOCK) != 0) { 3355 if (td->td_sigblock_val != 0) 3356 SIGSETNAND(sigpending, fastblock_mask); 3357 if (SIGISEMPTY(sigpending)) { 3358 td->td_pflags |= TDP_SIGFASTPENDING; 3359 return (0); 3360 } 3361 } 3362 3363 if (!pt_attach_transparent && 3364 (p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED && 3365 (p->p_flag2 & P2_PTRACE_FSTP) != 0 && 3366 SIGISMEMBER(sigpending, SIGSTOP)) { 3367 /* 3368 * If debugger just attached, always consume 3369 * SIGSTOP from ptrace(PT_ATTACH) first, to 3370 * execute the debugger attach ritual in 3371 * order. 3372 */ 3373 td->td_dbgflags |= TDB_FSTP; 3374 SIGEMPTYSET(sigpending); 3375 SIGADDSET(sigpending, SIGSTOP); 3376 } 3377 3378 SIG_FOREACH(sig, &sigpending) { 3379 switch (sigprocess(td, sig)) { 3380 case SIGSTATUS_HANDLE: 3381 return (sig); 3382 case SIGSTATUS_HANDLED: 3383 goto next; 3384 case SIGSTATUS_IGNORE: 3385 sigqueue_delete(&td->td_sigqueue, sig); 3386 sigqueue_delete(&p->p_sigqueue, sig); 3387 break; 3388 case SIGSTATUS_SBDRY_STOP: 3389 return (-1); 3390 } 3391 } 3392 next:; 3393 } 3394 } 3395 3396 void 3397 thread_stopped(struct proc *p) 3398 { 3399 int n; 3400 3401 PROC_LOCK_ASSERT(p, MA_OWNED); 3402 PROC_SLOCK_ASSERT(p, MA_OWNED); 3403 n = p->p_suspcount; 3404 if (p == curproc) 3405 n++; 3406 if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) { 3407 PROC_SUNLOCK(p); 3408 p->p_flag &= ~P_WAITED; 3409 PROC_LOCK(p->p_pptr); 3410 childproc_stopped(p, (p->p_flag & P_TRACED) ? 3411 CLD_TRAPPED : CLD_STOPPED); 3412 PROC_UNLOCK(p->p_pptr); 3413 PROC_SLOCK(p); 3414 } 3415 } 3416 3417 /* 3418 * Take the action for the specified signal 3419 * from the current set of pending signals. 3420 */ 3421 int 3422 postsig(int sig) 3423 { 3424 struct thread *td; 3425 struct proc *p; 3426 struct sigacts *ps; 3427 sig_t action; 3428 ksiginfo_t ksi; 3429 sigset_t returnmask; 3430 3431 KASSERT(sig != 0, ("postsig")); 3432 3433 td = curthread; 3434 p = td->td_proc; 3435 PROC_LOCK_ASSERT(p, MA_OWNED); 3436 ps = p->p_sigacts; 3437 mtx_assert(&ps->ps_mtx, MA_OWNED); 3438 ksiginfo_init(&ksi); 3439 if (sigqueue_get(&td->td_sigqueue, sig, &ksi) == 0 && 3440 sigqueue_get(&p->p_sigqueue, sig, &ksi) == 0) 3441 return (0); 3442 ksi.ksi_signo = sig; 3443 if (ksi.ksi_code == SI_TIMER) 3444 itimer_accept(p, ksi.ksi_timerid, &ksi); 3445 action = ps->ps_sigact[_SIG_IDX(sig)]; 3446 #ifdef KTRACE 3447 if (KTRPOINT(td, KTR_PSIG)) 3448 ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ? 3449 &td->td_oldsigmask : &td->td_sigmask, ksi.ksi_code); 3450 #endif 3451 3452 if (action == SIG_DFL) { 3453 /* 3454 * Default action, where the default is to kill 3455 * the process. (Other cases were ignored above.) 3456 */ 3457 mtx_unlock(&ps->ps_mtx); 3458 proc_td_siginfo_capture(td, &ksi.ksi_info); 3459 sigexit(td, sig); 3460 /* NOTREACHED */ 3461 } else { 3462 /* 3463 * If we get here, the signal must be caught. 3464 */ 3465 KASSERT(action != SIG_IGN, ("postsig action %p", action)); 3466 KASSERT(!SIGISMEMBER(td->td_sigmask, sig), 3467 ("postsig action: blocked sig %d", sig)); 3468 3469 /* 3470 * Set the new mask value and also defer further 3471 * occurrences of this signal. 3472 * 3473 * Special case: user has done a sigsuspend. Here the 3474 * current mask is not of interest, but rather the 3475 * mask from before the sigsuspend is what we want 3476 * restored after the signal processing is completed. 3477 */ 3478 if (td->td_pflags & TDP_OLDMASK) { 3479 returnmask = td->td_oldsigmask; 3480 td->td_pflags &= ~TDP_OLDMASK; 3481 } else 3482 returnmask = td->td_sigmask; 3483 3484 if (p->p_sig == sig) { 3485 p->p_sig = 0; 3486 } 3487 (*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask); 3488 postsig_done(sig, td, ps); 3489 } 3490 return (1); 3491 } 3492 3493 int 3494 sig_ast_checksusp(struct thread *td) 3495 { 3496 struct proc *p __diagused; 3497 int ret; 3498 3499 p = td->td_proc; 3500 PROC_LOCK_ASSERT(p, MA_OWNED); 3501 3502 if (!td_ast_pending(td, TDA_SUSPEND)) 3503 return (0); 3504 3505 ret = thread_suspend_check(1); 3506 MPASS(ret == 0 || ret == EINTR || ret == ERESTART); 3507 return (ret); 3508 } 3509 3510 int 3511 sig_ast_needsigchk(struct thread *td) 3512 { 3513 struct proc *p; 3514 struct sigacts *ps; 3515 int ret, sig; 3516 3517 p = td->td_proc; 3518 PROC_LOCK_ASSERT(p, MA_OWNED); 3519 3520 if (!td_ast_pending(td, TDA_SIG)) 3521 return (0); 3522 3523 ps = p->p_sigacts; 3524 mtx_lock(&ps->ps_mtx); 3525 sig = cursig(td); 3526 if (sig == -1) { 3527 mtx_unlock(&ps->ps_mtx); 3528 KASSERT((td->td_flags & TDF_SBDRY) != 0, ("lost TDF_SBDRY")); 3529 KASSERT(TD_SBDRY_INTR(td), 3530 ("lost TDF_SERESTART of TDF_SEINTR")); 3531 KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) != 3532 (TDF_SEINTR | TDF_SERESTART), 3533 ("both TDF_SEINTR and TDF_SERESTART")); 3534 ret = TD_SBDRY_ERRNO(td); 3535 } else if (sig != 0) { 3536 ret = SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR : ERESTART; 3537 mtx_unlock(&ps->ps_mtx); 3538 } else { 3539 mtx_unlock(&ps->ps_mtx); 3540 ret = 0; 3541 } 3542 3543 /* 3544 * Do not go into sleep if this thread was the ptrace(2) 3545 * attach leader. cursig() consumed SIGSTOP from PT_ATTACH, 3546 * but we usually act on the signal by interrupting sleep, and 3547 * should do that here as well. 3548 */ 3549 if ((td->td_dbgflags & TDB_FSTP) != 0) { 3550 if (ret == 0) 3551 ret = EINTR; 3552 td->td_dbgflags &= ~TDB_FSTP; 3553 } 3554 3555 return (ret); 3556 } 3557 3558 int 3559 sig_intr(void) 3560 { 3561 struct thread *td; 3562 struct proc *p; 3563 int ret; 3564 3565 td = curthread; 3566 if (!td_ast_pending(td, TDA_SIG) && !td_ast_pending(td, TDA_SUSPEND)) 3567 return (0); 3568 3569 p = td->td_proc; 3570 3571 PROC_LOCK(p); 3572 ret = sig_ast_checksusp(td); 3573 if (ret == 0) 3574 ret = sig_ast_needsigchk(td); 3575 PROC_UNLOCK(p); 3576 return (ret); 3577 } 3578 3579 bool 3580 curproc_sigkilled(void) 3581 { 3582 struct thread *td; 3583 struct proc *p; 3584 struct sigacts *ps; 3585 bool res; 3586 3587 td = curthread; 3588 if (!td_ast_pending(td, TDA_SIG)) 3589 return (false); 3590 3591 p = td->td_proc; 3592 PROC_LOCK(p); 3593 ps = p->p_sigacts; 3594 mtx_lock(&ps->ps_mtx); 3595 res = SIGISMEMBER(td->td_sigqueue.sq_signals, SIGKILL) || 3596 SIGISMEMBER(p->p_sigqueue.sq_signals, SIGKILL); 3597 mtx_unlock(&ps->ps_mtx); 3598 PROC_UNLOCK(p); 3599 return (res); 3600 } 3601 3602 void 3603 proc_wkilled(struct proc *p) 3604 { 3605 3606 PROC_LOCK_ASSERT(p, MA_OWNED); 3607 if ((p->p_flag & P_WKILLED) == 0) 3608 p->p_flag |= P_WKILLED; 3609 } 3610 3611 /* 3612 * Kill the current process for stated reason. 3613 */ 3614 void 3615 killproc(struct proc *p, const char *why) 3616 { 3617 3618 PROC_LOCK_ASSERT(p, MA_OWNED); 3619 CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)", p, p->p_pid, 3620 p->p_comm); 3621 log(LOG_ERR, "pid %d (%s), jid %d, uid %d, was killed: %s\n", 3622 p->p_pid, p->p_comm, p->p_ucred->cr_prison->pr_id, 3623 p->p_ucred->cr_uid, why); 3624 proc_wkilled(p); 3625 kern_psignal(p, SIGKILL); 3626 } 3627 3628 /* 3629 * Send queued SIGCHLD to parent when child process's state 3630 * is changed. 3631 */ 3632 static void 3633 sigparent(struct proc *p, int reason, int status) 3634 { 3635 PROC_LOCK_ASSERT(p, MA_OWNED); 3636 PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED); 3637 3638 if (p->p_ksi != NULL) { 3639 p->p_ksi->ksi_signo = SIGCHLD; 3640 p->p_ksi->ksi_code = reason; 3641 p->p_ksi->ksi_status = status; 3642 p->p_ksi->ksi_pid = p->p_pid; 3643 p->p_ksi->ksi_uid = p->p_ucred->cr_ruid; 3644 if (KSI_ONQ(p->p_ksi)) 3645 return; 3646 } 3647 3648 /* 3649 * Do not consume p_ksi if parent is zombie, since signal is 3650 * dropped immediately. Instead, keep it since it might be 3651 * useful for reaper. 3652 */ 3653 if (p->p_pptr->p_state != PRS_ZOMBIE) 3654 pksignal(p->p_pptr, SIGCHLD, p->p_ksi); 3655 } 3656 3657 static void 3658 childproc_jobstate(struct proc *p, int reason, int sig) 3659 { 3660 struct sigacts *ps; 3661 3662 PROC_LOCK_ASSERT(p, MA_OWNED); 3663 PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED); 3664 3665 /* 3666 * Wake up parent sleeping in kern_wait(), also send 3667 * SIGCHLD to parent, but SIGCHLD does not guarantee 3668 * that parent will awake, because parent may masked 3669 * the signal. 3670 */ 3671 p->p_pptr->p_flag |= P_STATCHILD; 3672 wakeup(p->p_pptr); 3673 3674 ps = p->p_pptr->p_sigacts; 3675 mtx_lock(&ps->ps_mtx); 3676 if ((ps->ps_flag & PS_NOCLDSTOP) == 0) { 3677 mtx_unlock(&ps->ps_mtx); 3678 sigparent(p, reason, sig); 3679 } else 3680 mtx_unlock(&ps->ps_mtx); 3681 } 3682 3683 void 3684 childproc_stopped(struct proc *p, int reason) 3685 { 3686 3687 childproc_jobstate(p, reason, p->p_xsig); 3688 } 3689 3690 void 3691 childproc_continued(struct proc *p) 3692 { 3693 PROC_LOCK_ASSERT(p, MA_OWNED); 3694 p->p_flag |= P_CONTINUED; 3695 p->p_xsig = SIGCONT; 3696 childproc_jobstate(p, CLD_CONTINUED, SIGCONT); 3697 } 3698 3699 void 3700 childproc_exited(struct proc *p) 3701 { 3702 int reason, status; 3703 3704 if (WCOREDUMP(p->p_xsig)) { 3705 reason = CLD_DUMPED; 3706 status = WTERMSIG(p->p_xsig); 3707 } else if (WIFSIGNALED(p->p_xsig)) { 3708 reason = CLD_KILLED; 3709 status = WTERMSIG(p->p_xsig); 3710 } else { 3711 reason = CLD_EXITED; 3712 status = p->p_xexit; 3713 } 3714 /* 3715 * XXX avoid calling wakeup(p->p_pptr), the work is 3716 * done in exit1(). 3717 */ 3718 sigparent(p, reason, status); 3719 } 3720 3721 /* 3722 * Nonexistent system call-- signal process (may want to handle it). Flag 3723 * error in case process won't see signal immediately (blocked or ignored). 3724 */ 3725 #ifndef _SYS_SYSPROTO_H_ 3726 struct nosys_args { 3727 int dummy; 3728 }; 3729 #endif 3730 /* ARGSUSED */ 3731 int 3732 nosys(struct thread *td, struct nosys_args *args) 3733 { 3734 return (kern_nosys(td, args->dummy)); 3735 } 3736 3737 int 3738 kern_nosys(struct thread *td, int dummy) 3739 { 3740 struct proc *p; 3741 3742 p = td->td_proc; 3743 3744 if (SV_PROC_FLAG(p, SV_SIGSYS) != 0 && kern_signosys) { 3745 PROC_LOCK(p); 3746 tdsignal(td, SIGSYS); 3747 PROC_UNLOCK(p); 3748 } 3749 if (kern_lognosys == 1 || kern_lognosys == 3) { 3750 uprintf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm, 3751 td->td_sa.code); 3752 } 3753 if (kern_lognosys == 2 || kern_lognosys == 3 || 3754 (p->p_pid == 1 && (kern_lognosys & 3) == 0)) { 3755 printf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm, 3756 td->td_sa.code); 3757 } 3758 return (ENOSYS); 3759 } 3760 3761 /* 3762 * Send a SIGIO or SIGURG signal to a process or process group using stored 3763 * credentials rather than those of the current process. 3764 */ 3765 void 3766 pgsigio(struct sigio **sigiop, int sig, int checkctty) 3767 { 3768 ksiginfo_t ksi; 3769 struct sigio *sigio; 3770 3771 ksiginfo_init(&ksi); 3772 ksi.ksi_signo = sig; 3773 ksi.ksi_code = SI_KERNEL; 3774 3775 SIGIO_LOCK(); 3776 sigio = *sigiop; 3777 if (sigio == NULL) { 3778 SIGIO_UNLOCK(); 3779 return; 3780 } 3781 if (sigio->sio_pgid > 0) { 3782 PROC_LOCK(sigio->sio_proc); 3783 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred)) 3784 kern_psignal(sigio->sio_proc, sig); 3785 PROC_UNLOCK(sigio->sio_proc); 3786 } else if (sigio->sio_pgid < 0) { 3787 struct proc *p; 3788 3789 PGRP_LOCK(sigio->sio_pgrp); 3790 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) { 3791 PROC_LOCK(p); 3792 if (p->p_state == PRS_NORMAL && 3793 CANSIGIO(sigio->sio_ucred, p->p_ucred) && 3794 (checkctty == 0 || (p->p_flag & P_CONTROLT))) 3795 kern_psignal(p, sig); 3796 PROC_UNLOCK(p); 3797 } 3798 PGRP_UNLOCK(sigio->sio_pgrp); 3799 } 3800 SIGIO_UNLOCK(); 3801 } 3802 3803 static int 3804 filt_sigattach(struct knote *kn) 3805 { 3806 struct proc *p = curproc; 3807 3808 kn->kn_ptr.p_proc = p; 3809 kn->kn_flags |= EV_CLEAR; /* automatically set */ 3810 3811 knlist_add(p->p_klist, kn, 0); 3812 3813 return (0); 3814 } 3815 3816 static void 3817 filt_sigdetach(struct knote *kn) 3818 { 3819 knlist_remove(kn->kn_knlist, kn, 0); 3820 } 3821 3822 /* 3823 * signal knotes are shared with proc knotes, so we apply a mask to 3824 * the hint in order to differentiate them from process hints. This 3825 * could be avoided by using a signal-specific knote list, but probably 3826 * isn't worth the trouble. 3827 */ 3828 static int 3829 filt_signal(struct knote *kn, long hint) 3830 { 3831 3832 if (hint & NOTE_SIGNAL) { 3833 hint &= ~NOTE_SIGNAL; 3834 3835 if (kn->kn_id == hint) 3836 kn->kn_data++; 3837 } 3838 return (kn->kn_data != 0); 3839 } 3840 3841 struct sigacts * 3842 sigacts_alloc(void) 3843 { 3844 struct sigacts *ps; 3845 3846 ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO); 3847 refcount_init(&ps->ps_refcnt, 1); 3848 mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF); 3849 return (ps); 3850 } 3851 3852 void 3853 sigacts_free(struct sigacts *ps) 3854 { 3855 3856 if (refcount_release(&ps->ps_refcnt) == 0) 3857 return; 3858 mtx_destroy(&ps->ps_mtx); 3859 free(ps, M_SUBPROC); 3860 } 3861 3862 struct sigacts * 3863 sigacts_hold(struct sigacts *ps) 3864 { 3865 3866 refcount_acquire(&ps->ps_refcnt); 3867 return (ps); 3868 } 3869 3870 void 3871 sigacts_copy(struct sigacts *dest, struct sigacts *src) 3872 { 3873 3874 KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest")); 3875 mtx_lock(&src->ps_mtx); 3876 bcopy(src, dest, offsetof(struct sigacts, ps_refcnt)); 3877 mtx_unlock(&src->ps_mtx); 3878 } 3879 3880 int 3881 sigacts_shared(struct sigacts *ps) 3882 { 3883 3884 return (ps->ps_refcnt > 1); 3885 } 3886 3887 void 3888 sig_drop_caught(struct proc *p) 3889 { 3890 int sig; 3891 struct sigacts *ps; 3892 3893 ps = p->p_sigacts; 3894 PROC_LOCK_ASSERT(p, MA_OWNED); 3895 mtx_assert(&ps->ps_mtx, MA_OWNED); 3896 SIG_FOREACH(sig, &ps->ps_sigcatch) { 3897 sigdflt(ps, sig); 3898 if ((sigprop(sig) & SIGPROP_IGNORE) != 0) 3899 sigqueue_delete_proc(p, sig); 3900 } 3901 } 3902 3903 static void 3904 sigfastblock_failed(struct thread *td, bool sendsig, bool write) 3905 { 3906 ksiginfo_t ksi; 3907 3908 /* 3909 * Prevent further fetches and SIGSEGVs, allowing thread to 3910 * issue syscalls despite corruption. 3911 */ 3912 sigfastblock_clear(td); 3913 3914 if (!sendsig) 3915 return; 3916 ksiginfo_init_trap(&ksi); 3917 ksi.ksi_signo = SIGSEGV; 3918 ksi.ksi_code = write ? SEGV_ACCERR : SEGV_MAPERR; 3919 ksi.ksi_addr = td->td_sigblock_ptr; 3920 trapsignal(td, &ksi); 3921 } 3922 3923 static bool 3924 sigfastblock_fetch_sig(struct thread *td, bool sendsig, uint32_t *valp) 3925 { 3926 uint32_t res; 3927 3928 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) 3929 return (true); 3930 if (fueword32((void *)td->td_sigblock_ptr, &res) == -1) { 3931 sigfastblock_failed(td, sendsig, false); 3932 return (false); 3933 } 3934 *valp = res; 3935 td->td_sigblock_val = res & ~SIGFASTBLOCK_FLAGS; 3936 return (true); 3937 } 3938 3939 static void 3940 sigfastblock_resched(struct thread *td, bool resched) 3941 { 3942 struct proc *p; 3943 3944 if (resched) { 3945 p = td->td_proc; 3946 PROC_LOCK(p); 3947 reschedule_signals(p, td->td_sigmask, 0); 3948 PROC_UNLOCK(p); 3949 } 3950 ast_sched(td, TDA_SIG); 3951 } 3952 3953 int 3954 sys_sigfastblock(struct thread *td, struct sigfastblock_args *uap) 3955 { 3956 struct proc *p; 3957 int error, res; 3958 uint32_t oldval; 3959 3960 error = 0; 3961 p = td->td_proc; 3962 switch (uap->cmd) { 3963 case SIGFASTBLOCK_SETPTR: 3964 if ((td->td_pflags & TDP_SIGFASTBLOCK) != 0) { 3965 error = EBUSY; 3966 break; 3967 } 3968 if (((uintptr_t)(uap->ptr) & (sizeof(uint32_t) - 1)) != 0) { 3969 error = EINVAL; 3970 break; 3971 } 3972 td->td_pflags |= TDP_SIGFASTBLOCK; 3973 td->td_sigblock_ptr = uap->ptr; 3974 break; 3975 3976 case SIGFASTBLOCK_UNBLOCK: 3977 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) { 3978 error = EINVAL; 3979 break; 3980 } 3981 3982 for (;;) { 3983 res = casueword32(td->td_sigblock_ptr, 3984 SIGFASTBLOCK_PEND, &oldval, 0); 3985 if (res == -1) { 3986 error = EFAULT; 3987 sigfastblock_failed(td, false, true); 3988 break; 3989 } 3990 if (res == 0) 3991 break; 3992 MPASS(res == 1); 3993 if (oldval != SIGFASTBLOCK_PEND) { 3994 error = EBUSY; 3995 break; 3996 } 3997 error = thread_check_susp(td, false); 3998 if (error != 0) 3999 break; 4000 } 4001 if (error != 0) 4002 break; 4003 4004 /* 4005 * td_sigblock_val is cleared there, but not on a 4006 * syscall exit. The end effect is that a single 4007 * interruptible sleep, while user sigblock word is 4008 * set, might return EINTR or ERESTART to usermode 4009 * without delivering signal. All further sleeps, 4010 * until userspace clears the word and does 4011 * sigfastblock(UNBLOCK), observe current word and no 4012 * longer get interrupted. It is slight 4013 * non-conformance, with alternative to have read the 4014 * sigblock word on each syscall entry. 4015 */ 4016 td->td_sigblock_val = 0; 4017 4018 /* 4019 * Rely on normal ast mechanism to deliver pending 4020 * signals to current thread. But notify others about 4021 * fake unblock. 4022 */ 4023 sigfastblock_resched(td, error == 0 && p->p_numthreads != 1); 4024 4025 break; 4026 4027 case SIGFASTBLOCK_UNSETPTR: 4028 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) { 4029 error = EINVAL; 4030 break; 4031 } 4032 if (!sigfastblock_fetch_sig(td, false, &oldval)) { 4033 error = EFAULT; 4034 break; 4035 } 4036 if (oldval != 0 && oldval != SIGFASTBLOCK_PEND) { 4037 error = EBUSY; 4038 break; 4039 } 4040 sigfastblock_clear(td); 4041 break; 4042 4043 default: 4044 error = EINVAL; 4045 break; 4046 } 4047 return (error); 4048 } 4049 4050 void 4051 sigfastblock_clear(struct thread *td) 4052 { 4053 bool resched; 4054 4055 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) 4056 return; 4057 td->td_sigblock_val = 0; 4058 resched = (td->td_pflags & TDP_SIGFASTPENDING) != 0 || 4059 SIGPENDING(td); 4060 td->td_pflags &= ~(TDP_SIGFASTBLOCK | TDP_SIGFASTPENDING); 4061 sigfastblock_resched(td, resched); 4062 } 4063 4064 void 4065 sigfastblock_fetch(struct thread *td) 4066 { 4067 uint32_t val; 4068 4069 (void)sigfastblock_fetch_sig(td, true, &val); 4070 } 4071 4072 static void 4073 sigfastblock_setpend1(struct thread *td) 4074 { 4075 int res; 4076 uint32_t oldval; 4077 4078 if ((td->td_pflags & TDP_SIGFASTPENDING) == 0) 4079 return; 4080 res = fueword32((void *)td->td_sigblock_ptr, &oldval); 4081 if (res == -1) { 4082 sigfastblock_failed(td, true, false); 4083 return; 4084 } 4085 for (;;) { 4086 res = casueword32(td->td_sigblock_ptr, oldval, &oldval, 4087 oldval | SIGFASTBLOCK_PEND); 4088 if (res == -1) { 4089 sigfastblock_failed(td, true, true); 4090 return; 4091 } 4092 if (res == 0) { 4093 td->td_sigblock_val = oldval & ~SIGFASTBLOCK_FLAGS; 4094 td->td_pflags &= ~TDP_SIGFASTPENDING; 4095 break; 4096 } 4097 MPASS(res == 1); 4098 if (thread_check_susp(td, false) != 0) 4099 break; 4100 } 4101 } 4102 4103 static void 4104 sigfastblock_setpend(struct thread *td, bool resched) 4105 { 4106 struct proc *p; 4107 4108 sigfastblock_setpend1(td); 4109 if (resched) { 4110 p = td->td_proc; 4111 PROC_LOCK(p); 4112 reschedule_signals(p, fastblock_mask, SIGPROCMASK_FASTBLK); 4113 PROC_UNLOCK(p); 4114 } 4115 } 4116