1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1989, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #include "opt_ktrace.h" 43 44 #include <sys/param.h> 45 #include <sys/ctype.h> 46 #include <sys/systm.h> 47 #include <sys/signalvar.h> 48 #include <sys/vnode.h> 49 #include <sys/acct.h> 50 #include <sys/capsicum.h> 51 #include <sys/compressor.h> 52 #include <sys/condvar.h> 53 #include <sys/devctl.h> 54 #include <sys/event.h> 55 #include <sys/fcntl.h> 56 #include <sys/imgact.h> 57 #include <sys/kernel.h> 58 #include <sys/ktr.h> 59 #include <sys/ktrace.h> 60 #include <sys/limits.h> 61 #include <sys/lock.h> 62 #include <sys/malloc.h> 63 #include <sys/mutex.h> 64 #include <sys/refcount.h> 65 #include <sys/namei.h> 66 #include <sys/proc.h> 67 #include <sys/procdesc.h> 68 #include <sys/ptrace.h> 69 #include <sys/posix4.h> 70 #include <sys/racct.h> 71 #include <sys/resourcevar.h> 72 #include <sys/sdt.h> 73 #include <sys/sbuf.h> 74 #include <sys/sleepqueue.h> 75 #include <sys/smp.h> 76 #include <sys/stat.h> 77 #include <sys/sx.h> 78 #include <sys/syscallsubr.h> 79 #include <sys/sysctl.h> 80 #include <sys/sysent.h> 81 #include <sys/syslog.h> 82 #include <sys/sysproto.h> 83 #include <sys/timers.h> 84 #include <sys/unistd.h> 85 #include <sys/wait.h> 86 #include <vm/vm.h> 87 #include <vm/vm_extern.h> 88 #include <vm/uma.h> 89 90 #include <sys/jail.h> 91 92 #include <machine/cpu.h> 93 94 #include <security/audit/audit.h> 95 96 #define ONSIG 32 /* NSIG for osig* syscalls. XXX. */ 97 98 SDT_PROVIDER_DECLARE(proc); 99 SDT_PROBE_DEFINE3(proc, , , signal__send, 100 "struct thread *", "struct proc *", "int"); 101 SDT_PROBE_DEFINE2(proc, , , signal__clear, 102 "int", "ksiginfo_t *"); 103 SDT_PROBE_DEFINE3(proc, , , signal__discard, 104 "struct thread *", "struct proc *", "int"); 105 106 static int coredump(struct thread *); 107 static int killpg1(struct thread *td, int sig, int pgid, int all, 108 ksiginfo_t *ksi); 109 static int issignal(struct thread *td); 110 static void reschedule_signals(struct proc *p, sigset_t block, int flags); 111 static int sigprop(int sig); 112 static void tdsigwakeup(struct thread *, int, sig_t, int); 113 static int sig_suspend_threads(struct thread *, struct proc *); 114 static int filt_sigattach(struct knote *kn); 115 static void filt_sigdetach(struct knote *kn); 116 static int filt_signal(struct knote *kn, long hint); 117 static struct thread *sigtd(struct proc *p, int sig, bool fast_sigblock); 118 static void sigqueue_start(void); 119 static void sigfastblock_setpend(struct thread *td, bool resched); 120 121 static uma_zone_t ksiginfo_zone = NULL; 122 struct filterops sig_filtops = { 123 .f_isfd = 0, 124 .f_attach = filt_sigattach, 125 .f_detach = filt_sigdetach, 126 .f_event = filt_signal, 127 }; 128 129 static int kern_logsigexit = 1; 130 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW, 131 &kern_logsigexit, 0, 132 "Log processes quitting on abnormal signals to syslog(3)"); 133 134 static int kern_forcesigexit = 1; 135 SYSCTL_INT(_kern, OID_AUTO, forcesigexit, CTLFLAG_RW, 136 &kern_forcesigexit, 0, "Force trap signal to be handled"); 137 138 static SYSCTL_NODE(_kern, OID_AUTO, sigqueue, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 139 "POSIX real time signal"); 140 141 static int max_pending_per_proc = 128; 142 SYSCTL_INT(_kern_sigqueue, OID_AUTO, max_pending_per_proc, CTLFLAG_RW, 143 &max_pending_per_proc, 0, "Max pending signals per proc"); 144 145 static int preallocate_siginfo = 1024; 146 SYSCTL_INT(_kern_sigqueue, OID_AUTO, preallocate, CTLFLAG_RDTUN, 147 &preallocate_siginfo, 0, "Preallocated signal memory size"); 148 149 static int signal_overflow = 0; 150 SYSCTL_INT(_kern_sigqueue, OID_AUTO, overflow, CTLFLAG_RD, 151 &signal_overflow, 0, "Number of signals overflew"); 152 153 static int signal_alloc_fail = 0; 154 SYSCTL_INT(_kern_sigqueue, OID_AUTO, alloc_fail, CTLFLAG_RD, 155 &signal_alloc_fail, 0, "signals failed to be allocated"); 156 157 static int kern_lognosys = 0; 158 SYSCTL_INT(_kern, OID_AUTO, lognosys, CTLFLAG_RWTUN, &kern_lognosys, 0, 159 "Log invalid syscalls"); 160 161 __read_frequently bool sigfastblock_fetch_always = false; 162 SYSCTL_BOOL(_kern, OID_AUTO, sigfastblock_fetch_always, CTLFLAG_RWTUN, 163 &sigfastblock_fetch_always, 0, 164 "Fetch sigfastblock word on each syscall entry for proper " 165 "blocking semantic"); 166 167 static bool kern_sig_discard_ign = true; 168 SYSCTL_BOOL(_kern, OID_AUTO, sig_discard_ign, CTLFLAG_RWTUN, 169 &kern_sig_discard_ign, 0, 170 "Discard ignored signals on delivery, otherwise queue them to " 171 "the target queue"); 172 173 SYSINIT(signal, SI_SUB_P1003_1B, SI_ORDER_FIRST+3, sigqueue_start, NULL); 174 175 /* 176 * Policy -- Can ucred cr1 send SIGIO to process cr2? 177 * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG 178 * in the right situations. 179 */ 180 #define CANSIGIO(cr1, cr2) \ 181 ((cr1)->cr_uid == 0 || \ 182 (cr1)->cr_ruid == (cr2)->cr_ruid || \ 183 (cr1)->cr_uid == (cr2)->cr_ruid || \ 184 (cr1)->cr_ruid == (cr2)->cr_uid || \ 185 (cr1)->cr_uid == (cr2)->cr_uid) 186 187 static int sugid_coredump; 188 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RWTUN, 189 &sugid_coredump, 0, "Allow setuid and setgid processes to dump core"); 190 191 static int capmode_coredump; 192 SYSCTL_INT(_kern, OID_AUTO, capmode_coredump, CTLFLAG_RWTUN, 193 &capmode_coredump, 0, "Allow processes in capability mode to dump core"); 194 195 static int do_coredump = 1; 196 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW, 197 &do_coredump, 0, "Enable/Disable coredumps"); 198 199 static int set_core_nodump_flag = 0; 200 SYSCTL_INT(_kern, OID_AUTO, nodump_coredump, CTLFLAG_RW, &set_core_nodump_flag, 201 0, "Enable setting the NODUMP flag on coredump files"); 202 203 static int coredump_devctl = 0; 204 SYSCTL_INT(_kern, OID_AUTO, coredump_devctl, CTLFLAG_RW, &coredump_devctl, 205 0, "Generate a devctl notification when processes coredump"); 206 207 /* 208 * Signal properties and actions. 209 * The array below categorizes the signals and their default actions 210 * according to the following properties: 211 */ 212 #define SIGPROP_KILL 0x01 /* terminates process by default */ 213 #define SIGPROP_CORE 0x02 /* ditto and coredumps */ 214 #define SIGPROP_STOP 0x04 /* suspend process */ 215 #define SIGPROP_TTYSTOP 0x08 /* ditto, from tty */ 216 #define SIGPROP_IGNORE 0x10 /* ignore by default */ 217 #define SIGPROP_CONT 0x20 /* continue if suspended */ 218 219 static int sigproptbl[NSIG] = { 220 [SIGHUP] = SIGPROP_KILL, 221 [SIGINT] = SIGPROP_KILL, 222 [SIGQUIT] = SIGPROP_KILL | SIGPROP_CORE, 223 [SIGILL] = SIGPROP_KILL | SIGPROP_CORE, 224 [SIGTRAP] = SIGPROP_KILL | SIGPROP_CORE, 225 [SIGABRT] = SIGPROP_KILL | SIGPROP_CORE, 226 [SIGEMT] = SIGPROP_KILL | SIGPROP_CORE, 227 [SIGFPE] = SIGPROP_KILL | SIGPROP_CORE, 228 [SIGKILL] = SIGPROP_KILL, 229 [SIGBUS] = SIGPROP_KILL | SIGPROP_CORE, 230 [SIGSEGV] = SIGPROP_KILL | SIGPROP_CORE, 231 [SIGSYS] = SIGPROP_KILL | SIGPROP_CORE, 232 [SIGPIPE] = SIGPROP_KILL, 233 [SIGALRM] = SIGPROP_KILL, 234 [SIGTERM] = SIGPROP_KILL, 235 [SIGURG] = SIGPROP_IGNORE, 236 [SIGSTOP] = SIGPROP_STOP, 237 [SIGTSTP] = SIGPROP_STOP | SIGPROP_TTYSTOP, 238 [SIGCONT] = SIGPROP_IGNORE | SIGPROP_CONT, 239 [SIGCHLD] = SIGPROP_IGNORE, 240 [SIGTTIN] = SIGPROP_STOP | SIGPROP_TTYSTOP, 241 [SIGTTOU] = SIGPROP_STOP | SIGPROP_TTYSTOP, 242 [SIGIO] = SIGPROP_IGNORE, 243 [SIGXCPU] = SIGPROP_KILL, 244 [SIGXFSZ] = SIGPROP_KILL, 245 [SIGVTALRM] = SIGPROP_KILL, 246 [SIGPROF] = SIGPROP_KILL, 247 [SIGWINCH] = SIGPROP_IGNORE, 248 [SIGINFO] = SIGPROP_IGNORE, 249 [SIGUSR1] = SIGPROP_KILL, 250 [SIGUSR2] = SIGPROP_KILL, 251 }; 252 253 #define _SIG_FOREACH_ADVANCE(i, set) ({ \ 254 int __found; \ 255 for (;;) { \ 256 if (__bits != 0) { \ 257 int __sig = ffs(__bits); \ 258 __bits &= ~(1u << (__sig - 1)); \ 259 sig = __i * sizeof((set)->__bits[0]) * NBBY + __sig; \ 260 __found = 1; \ 261 break; \ 262 } \ 263 if (++__i == _SIG_WORDS) { \ 264 __found = 0; \ 265 break; \ 266 } \ 267 __bits = (set)->__bits[__i]; \ 268 } \ 269 __found != 0; \ 270 }) 271 272 #define SIG_FOREACH(i, set) \ 273 for (int32_t __i = -1, __bits = 0; \ 274 _SIG_FOREACH_ADVANCE(i, set); ) \ 275 276 static sigset_t fastblock_mask; 277 278 static void 279 ast_sig(struct thread *td, int tda) 280 { 281 struct proc *p; 282 int sig; 283 bool resched_sigs; 284 285 p = td->td_proc; 286 287 #ifdef DIAGNOSTIC 288 if (p->p_numthreads == 1 && (tda & (TDAI(TDA_SIG) | 289 TDAI(TDA_AST))) == 0) { 290 PROC_LOCK(p); 291 thread_lock(td); 292 /* 293 * Note that TDA_SIG should be re-read from 294 * td_ast, since signal might have been delivered 295 * after we cleared td_flags above. This is one of 296 * the reason for looping check for AST condition. 297 * See comment in userret() about P_PPWAIT. 298 */ 299 if ((p->p_flag & P_PPWAIT) == 0 && 300 (td->td_pflags & TDP_SIGFASTBLOCK) == 0) { 301 if (SIGPENDING(td) && ((tda | td->td_ast) & 302 (TDAI(TDA_SIG) | TDAI(TDA_AST))) == 0) { 303 thread_unlock(td); /* fix dumps */ 304 panic( 305 "failed2 to set signal flags for ast p %p " 306 "td %p tda %#x td_ast %#x fl %#x", 307 p, td, tda, td->td_ast, td->td_flags); 308 } 309 } 310 thread_unlock(td); 311 PROC_UNLOCK(p); 312 } 313 #endif 314 315 /* 316 * Check for signals. Unlocked reads of p_pendingcnt or 317 * p_siglist might cause process-directed signal to be handled 318 * later. 319 */ 320 if ((tda & TDAI(TDA_SIG)) != 0 || p->p_pendingcnt > 0 || 321 !SIGISEMPTY(p->p_siglist)) { 322 sigfastblock_fetch(td); 323 PROC_LOCK(p); 324 mtx_lock(&p->p_sigacts->ps_mtx); 325 while ((sig = cursig(td)) != 0) { 326 KASSERT(sig >= 0, ("sig %d", sig)); 327 postsig(sig); 328 } 329 mtx_unlock(&p->p_sigacts->ps_mtx); 330 PROC_UNLOCK(p); 331 resched_sigs = true; 332 } else { 333 resched_sigs = false; 334 } 335 336 /* 337 * Handle deferred update of the fast sigblock value, after 338 * the postsig() loop was performed. 339 */ 340 sigfastblock_setpend(td, resched_sigs); 341 } 342 343 static void 344 ast_sigsuspend(struct thread *td, int tda __unused) 345 { 346 MPASS((td->td_pflags & TDP_OLDMASK) != 0); 347 td->td_pflags &= ~TDP_OLDMASK; 348 kern_sigprocmask(td, SIG_SETMASK, &td->td_oldsigmask, NULL, 0); 349 } 350 351 static void 352 sigqueue_start(void) 353 { 354 ksiginfo_zone = uma_zcreate("ksiginfo", sizeof(ksiginfo_t), 355 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 356 uma_prealloc(ksiginfo_zone, preallocate_siginfo); 357 p31b_setcfg(CTL_P1003_1B_REALTIME_SIGNALS, _POSIX_REALTIME_SIGNALS); 358 p31b_setcfg(CTL_P1003_1B_RTSIG_MAX, SIGRTMAX - SIGRTMIN + 1); 359 p31b_setcfg(CTL_P1003_1B_SIGQUEUE_MAX, max_pending_per_proc); 360 SIGFILLSET(fastblock_mask); 361 SIG_CANTMASK(fastblock_mask); 362 ast_register(TDA_SIG, ASTR_UNCOND, 0, ast_sig); 363 ast_register(TDA_SIGSUSPEND, ASTR_ASTF_REQUIRED | ASTR_TDP, 364 TDP_OLDMASK, ast_sigsuspend); 365 } 366 367 ksiginfo_t * 368 ksiginfo_alloc(int mwait) 369 { 370 MPASS(mwait == M_WAITOK || mwait == M_NOWAIT); 371 372 if (ksiginfo_zone == NULL) 373 return (NULL); 374 return (uma_zalloc(ksiginfo_zone, mwait | M_ZERO)); 375 } 376 377 void 378 ksiginfo_free(ksiginfo_t *ksi) 379 { 380 uma_zfree(ksiginfo_zone, ksi); 381 } 382 383 static __inline bool 384 ksiginfo_tryfree(ksiginfo_t *ksi) 385 { 386 if ((ksi->ksi_flags & KSI_EXT) == 0) { 387 uma_zfree(ksiginfo_zone, ksi); 388 return (true); 389 } 390 return (false); 391 } 392 393 void 394 sigqueue_init(sigqueue_t *list, struct proc *p) 395 { 396 SIGEMPTYSET(list->sq_signals); 397 SIGEMPTYSET(list->sq_kill); 398 SIGEMPTYSET(list->sq_ptrace); 399 TAILQ_INIT(&list->sq_list); 400 list->sq_proc = p; 401 list->sq_flags = SQ_INIT; 402 } 403 404 /* 405 * Get a signal's ksiginfo. 406 * Return: 407 * 0 - signal not found 408 * others - signal number 409 */ 410 static int 411 sigqueue_get(sigqueue_t *sq, int signo, ksiginfo_t *si) 412 { 413 struct proc *p = sq->sq_proc; 414 struct ksiginfo *ksi, *next; 415 int count = 0; 416 417 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited")); 418 419 if (!SIGISMEMBER(sq->sq_signals, signo)) 420 return (0); 421 422 if (SIGISMEMBER(sq->sq_ptrace, signo)) { 423 count++; 424 SIGDELSET(sq->sq_ptrace, signo); 425 si->ksi_flags |= KSI_PTRACE; 426 } 427 if (SIGISMEMBER(sq->sq_kill, signo)) { 428 count++; 429 if (count == 1) 430 SIGDELSET(sq->sq_kill, signo); 431 } 432 433 TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) { 434 if (ksi->ksi_signo == signo) { 435 if (count == 0) { 436 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link); 437 ksi->ksi_sigq = NULL; 438 ksiginfo_copy(ksi, si); 439 if (ksiginfo_tryfree(ksi) && p != NULL) 440 p->p_pendingcnt--; 441 } 442 if (++count > 1) 443 break; 444 } 445 } 446 447 if (count <= 1) 448 SIGDELSET(sq->sq_signals, signo); 449 si->ksi_signo = signo; 450 return (signo); 451 } 452 453 void 454 sigqueue_take(ksiginfo_t *ksi) 455 { 456 struct ksiginfo *kp; 457 struct proc *p; 458 sigqueue_t *sq; 459 460 if (ksi == NULL || (sq = ksi->ksi_sigq) == NULL) 461 return; 462 463 p = sq->sq_proc; 464 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link); 465 ksi->ksi_sigq = NULL; 466 if (!(ksi->ksi_flags & KSI_EXT) && p != NULL) 467 p->p_pendingcnt--; 468 469 for (kp = TAILQ_FIRST(&sq->sq_list); kp != NULL; 470 kp = TAILQ_NEXT(kp, ksi_link)) { 471 if (kp->ksi_signo == ksi->ksi_signo) 472 break; 473 } 474 if (kp == NULL && !SIGISMEMBER(sq->sq_kill, ksi->ksi_signo) && 475 !SIGISMEMBER(sq->sq_ptrace, ksi->ksi_signo)) 476 SIGDELSET(sq->sq_signals, ksi->ksi_signo); 477 } 478 479 static int 480 sigqueue_add(sigqueue_t *sq, int signo, ksiginfo_t *si) 481 { 482 struct proc *p = sq->sq_proc; 483 struct ksiginfo *ksi; 484 int ret = 0; 485 486 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited")); 487 488 /* 489 * SIGKILL/SIGSTOP cannot be caught or masked, so take the fast path 490 * for these signals. 491 */ 492 if (signo == SIGKILL || signo == SIGSTOP || si == NULL) { 493 SIGADDSET(sq->sq_kill, signo); 494 goto out_set_bit; 495 } 496 497 /* directly insert the ksi, don't copy it */ 498 if (si->ksi_flags & KSI_INS) { 499 if (si->ksi_flags & KSI_HEAD) 500 TAILQ_INSERT_HEAD(&sq->sq_list, si, ksi_link); 501 else 502 TAILQ_INSERT_TAIL(&sq->sq_list, si, ksi_link); 503 si->ksi_sigq = sq; 504 goto out_set_bit; 505 } 506 507 if (__predict_false(ksiginfo_zone == NULL)) { 508 SIGADDSET(sq->sq_kill, signo); 509 goto out_set_bit; 510 } 511 512 if (p != NULL && p->p_pendingcnt >= max_pending_per_proc) { 513 signal_overflow++; 514 ret = EAGAIN; 515 } else if ((ksi = ksiginfo_alloc(M_NOWAIT)) == NULL) { 516 signal_alloc_fail++; 517 ret = EAGAIN; 518 } else { 519 if (p != NULL) 520 p->p_pendingcnt++; 521 ksiginfo_copy(si, ksi); 522 ksi->ksi_signo = signo; 523 if (si->ksi_flags & KSI_HEAD) 524 TAILQ_INSERT_HEAD(&sq->sq_list, ksi, ksi_link); 525 else 526 TAILQ_INSERT_TAIL(&sq->sq_list, ksi, ksi_link); 527 ksi->ksi_sigq = sq; 528 } 529 530 if (ret != 0) { 531 if ((si->ksi_flags & KSI_PTRACE) != 0) { 532 SIGADDSET(sq->sq_ptrace, signo); 533 ret = 0; 534 goto out_set_bit; 535 } else if ((si->ksi_flags & KSI_TRAP) != 0 || 536 (si->ksi_flags & KSI_SIGQ) == 0) { 537 SIGADDSET(sq->sq_kill, signo); 538 ret = 0; 539 goto out_set_bit; 540 } 541 return (ret); 542 } 543 544 out_set_bit: 545 SIGADDSET(sq->sq_signals, signo); 546 return (ret); 547 } 548 549 void 550 sigqueue_flush(sigqueue_t *sq) 551 { 552 struct proc *p = sq->sq_proc; 553 ksiginfo_t *ksi; 554 555 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited")); 556 557 if (p != NULL) 558 PROC_LOCK_ASSERT(p, MA_OWNED); 559 560 while ((ksi = TAILQ_FIRST(&sq->sq_list)) != NULL) { 561 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link); 562 ksi->ksi_sigq = NULL; 563 if (ksiginfo_tryfree(ksi) && p != NULL) 564 p->p_pendingcnt--; 565 } 566 567 SIGEMPTYSET(sq->sq_signals); 568 SIGEMPTYSET(sq->sq_kill); 569 SIGEMPTYSET(sq->sq_ptrace); 570 } 571 572 static void 573 sigqueue_move_set(sigqueue_t *src, sigqueue_t *dst, const sigset_t *set) 574 { 575 sigset_t tmp; 576 struct proc *p1, *p2; 577 ksiginfo_t *ksi, *next; 578 579 KASSERT(src->sq_flags & SQ_INIT, ("src sigqueue not inited")); 580 KASSERT(dst->sq_flags & SQ_INIT, ("dst sigqueue not inited")); 581 p1 = src->sq_proc; 582 p2 = dst->sq_proc; 583 /* Move siginfo to target list */ 584 TAILQ_FOREACH_SAFE(ksi, &src->sq_list, ksi_link, next) { 585 if (SIGISMEMBER(*set, ksi->ksi_signo)) { 586 TAILQ_REMOVE(&src->sq_list, ksi, ksi_link); 587 if (p1 != NULL) 588 p1->p_pendingcnt--; 589 TAILQ_INSERT_TAIL(&dst->sq_list, ksi, ksi_link); 590 ksi->ksi_sigq = dst; 591 if (p2 != NULL) 592 p2->p_pendingcnt++; 593 } 594 } 595 596 /* Move pending bits to target list */ 597 tmp = src->sq_kill; 598 SIGSETAND(tmp, *set); 599 SIGSETOR(dst->sq_kill, tmp); 600 SIGSETNAND(src->sq_kill, tmp); 601 602 tmp = src->sq_ptrace; 603 SIGSETAND(tmp, *set); 604 SIGSETOR(dst->sq_ptrace, tmp); 605 SIGSETNAND(src->sq_ptrace, tmp); 606 607 tmp = src->sq_signals; 608 SIGSETAND(tmp, *set); 609 SIGSETOR(dst->sq_signals, tmp); 610 SIGSETNAND(src->sq_signals, tmp); 611 } 612 613 #if 0 614 static void 615 sigqueue_move(sigqueue_t *src, sigqueue_t *dst, int signo) 616 { 617 sigset_t set; 618 619 SIGEMPTYSET(set); 620 SIGADDSET(set, signo); 621 sigqueue_move_set(src, dst, &set); 622 } 623 #endif 624 625 static void 626 sigqueue_delete_set(sigqueue_t *sq, const sigset_t *set) 627 { 628 struct proc *p = sq->sq_proc; 629 ksiginfo_t *ksi, *next; 630 631 KASSERT(sq->sq_flags & SQ_INIT, ("src sigqueue not inited")); 632 633 /* Remove siginfo queue */ 634 TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) { 635 if (SIGISMEMBER(*set, ksi->ksi_signo)) { 636 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link); 637 ksi->ksi_sigq = NULL; 638 if (ksiginfo_tryfree(ksi) && p != NULL) 639 p->p_pendingcnt--; 640 } 641 } 642 SIGSETNAND(sq->sq_kill, *set); 643 SIGSETNAND(sq->sq_ptrace, *set); 644 SIGSETNAND(sq->sq_signals, *set); 645 } 646 647 void 648 sigqueue_delete(sigqueue_t *sq, int signo) 649 { 650 sigset_t set; 651 652 SIGEMPTYSET(set); 653 SIGADDSET(set, signo); 654 sigqueue_delete_set(sq, &set); 655 } 656 657 /* Remove a set of signals for a process */ 658 static void 659 sigqueue_delete_set_proc(struct proc *p, const sigset_t *set) 660 { 661 sigqueue_t worklist; 662 struct thread *td0; 663 664 PROC_LOCK_ASSERT(p, MA_OWNED); 665 666 sigqueue_init(&worklist, NULL); 667 sigqueue_move_set(&p->p_sigqueue, &worklist, set); 668 669 FOREACH_THREAD_IN_PROC(p, td0) 670 sigqueue_move_set(&td0->td_sigqueue, &worklist, set); 671 672 sigqueue_flush(&worklist); 673 } 674 675 void 676 sigqueue_delete_proc(struct proc *p, int signo) 677 { 678 sigset_t set; 679 680 SIGEMPTYSET(set); 681 SIGADDSET(set, signo); 682 sigqueue_delete_set_proc(p, &set); 683 } 684 685 static void 686 sigqueue_delete_stopmask_proc(struct proc *p) 687 { 688 sigset_t set; 689 690 SIGEMPTYSET(set); 691 SIGADDSET(set, SIGSTOP); 692 SIGADDSET(set, SIGTSTP); 693 SIGADDSET(set, SIGTTIN); 694 SIGADDSET(set, SIGTTOU); 695 sigqueue_delete_set_proc(p, &set); 696 } 697 698 /* 699 * Determine signal that should be delivered to thread td, the current 700 * thread, 0 if none. If there is a pending stop signal with default 701 * action, the process stops in issignal(). 702 */ 703 int 704 cursig(struct thread *td) 705 { 706 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 707 mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED); 708 THREAD_LOCK_ASSERT(td, MA_NOTOWNED); 709 return (SIGPENDING(td) ? issignal(td) : 0); 710 } 711 712 /* 713 * Arrange for ast() to handle unmasked pending signals on return to user 714 * mode. This must be called whenever a signal is added to td_sigqueue or 715 * unmasked in td_sigmask. 716 */ 717 void 718 signotify(struct thread *td) 719 { 720 721 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 722 723 if (SIGPENDING(td)) 724 ast_sched(td, TDA_SIG); 725 } 726 727 /* 728 * Returns 1 (true) if altstack is configured for the thread, and the 729 * passed stack bottom address falls into the altstack range. Handles 730 * the 43 compat special case where the alt stack size is zero. 731 */ 732 int 733 sigonstack(size_t sp) 734 { 735 struct thread *td; 736 737 td = curthread; 738 if ((td->td_pflags & TDP_ALTSTACK) == 0) 739 return (0); 740 #if defined(COMPAT_43) 741 if (SV_PROC_FLAG(td->td_proc, SV_AOUT) && td->td_sigstk.ss_size == 0) 742 return ((td->td_sigstk.ss_flags & SS_ONSTACK) != 0); 743 #endif 744 return (sp >= (size_t)td->td_sigstk.ss_sp && 745 sp < td->td_sigstk.ss_size + (size_t)td->td_sigstk.ss_sp); 746 } 747 748 static __inline int 749 sigprop(int sig) 750 { 751 752 if (sig > 0 && sig < nitems(sigproptbl)) 753 return (sigproptbl[sig]); 754 return (0); 755 } 756 757 static bool 758 sigact_flag_test(const struct sigaction *act, int flag) 759 { 760 761 /* 762 * SA_SIGINFO is reset when signal disposition is set to 763 * ignore or default. Other flags are kept according to user 764 * settings. 765 */ 766 return ((act->sa_flags & flag) != 0 && (flag != SA_SIGINFO || 767 ((__sighandler_t *)act->sa_sigaction != SIG_IGN && 768 (__sighandler_t *)act->sa_sigaction != SIG_DFL))); 769 } 770 771 /* 772 * kern_sigaction 773 * sigaction 774 * freebsd4_sigaction 775 * osigaction 776 */ 777 int 778 kern_sigaction(struct thread *td, int sig, const struct sigaction *act, 779 struct sigaction *oact, int flags) 780 { 781 struct sigacts *ps; 782 struct proc *p = td->td_proc; 783 784 if (!_SIG_VALID(sig)) 785 return (EINVAL); 786 if (act != NULL && act->sa_handler != SIG_DFL && 787 act->sa_handler != SIG_IGN && (act->sa_flags & ~(SA_ONSTACK | 788 SA_RESTART | SA_RESETHAND | SA_NOCLDSTOP | SA_NODEFER | 789 SA_NOCLDWAIT | SA_SIGINFO)) != 0) 790 return (EINVAL); 791 792 PROC_LOCK(p); 793 ps = p->p_sigacts; 794 mtx_lock(&ps->ps_mtx); 795 if (oact) { 796 memset(oact, 0, sizeof(*oact)); 797 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)]; 798 if (SIGISMEMBER(ps->ps_sigonstack, sig)) 799 oact->sa_flags |= SA_ONSTACK; 800 if (!SIGISMEMBER(ps->ps_sigintr, sig)) 801 oact->sa_flags |= SA_RESTART; 802 if (SIGISMEMBER(ps->ps_sigreset, sig)) 803 oact->sa_flags |= SA_RESETHAND; 804 if (SIGISMEMBER(ps->ps_signodefer, sig)) 805 oact->sa_flags |= SA_NODEFER; 806 if (SIGISMEMBER(ps->ps_siginfo, sig)) { 807 oact->sa_flags |= SA_SIGINFO; 808 oact->sa_sigaction = 809 (__siginfohandler_t *)ps->ps_sigact[_SIG_IDX(sig)]; 810 } else 811 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)]; 812 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP) 813 oact->sa_flags |= SA_NOCLDSTOP; 814 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT) 815 oact->sa_flags |= SA_NOCLDWAIT; 816 } 817 if (act) { 818 if ((sig == SIGKILL || sig == SIGSTOP) && 819 act->sa_handler != SIG_DFL) { 820 mtx_unlock(&ps->ps_mtx); 821 PROC_UNLOCK(p); 822 return (EINVAL); 823 } 824 825 /* 826 * Change setting atomically. 827 */ 828 829 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask; 830 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]); 831 if (sigact_flag_test(act, SA_SIGINFO)) { 832 ps->ps_sigact[_SIG_IDX(sig)] = 833 (__sighandler_t *)act->sa_sigaction; 834 SIGADDSET(ps->ps_siginfo, sig); 835 } else { 836 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler; 837 SIGDELSET(ps->ps_siginfo, sig); 838 } 839 if (!sigact_flag_test(act, SA_RESTART)) 840 SIGADDSET(ps->ps_sigintr, sig); 841 else 842 SIGDELSET(ps->ps_sigintr, sig); 843 if (sigact_flag_test(act, SA_ONSTACK)) 844 SIGADDSET(ps->ps_sigonstack, sig); 845 else 846 SIGDELSET(ps->ps_sigonstack, sig); 847 if (sigact_flag_test(act, SA_RESETHAND)) 848 SIGADDSET(ps->ps_sigreset, sig); 849 else 850 SIGDELSET(ps->ps_sigreset, sig); 851 if (sigact_flag_test(act, SA_NODEFER)) 852 SIGADDSET(ps->ps_signodefer, sig); 853 else 854 SIGDELSET(ps->ps_signodefer, sig); 855 if (sig == SIGCHLD) { 856 if (act->sa_flags & SA_NOCLDSTOP) 857 ps->ps_flag |= PS_NOCLDSTOP; 858 else 859 ps->ps_flag &= ~PS_NOCLDSTOP; 860 if (act->sa_flags & SA_NOCLDWAIT) { 861 /* 862 * Paranoia: since SA_NOCLDWAIT is implemented 863 * by reparenting the dying child to PID 1 (and 864 * trust it to reap the zombie), PID 1 itself 865 * is forbidden to set SA_NOCLDWAIT. 866 */ 867 if (p->p_pid == 1) 868 ps->ps_flag &= ~PS_NOCLDWAIT; 869 else 870 ps->ps_flag |= PS_NOCLDWAIT; 871 } else 872 ps->ps_flag &= ~PS_NOCLDWAIT; 873 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN) 874 ps->ps_flag |= PS_CLDSIGIGN; 875 else 876 ps->ps_flag &= ~PS_CLDSIGIGN; 877 } 878 /* 879 * Set bit in ps_sigignore for signals that are set to SIG_IGN, 880 * and for signals set to SIG_DFL where the default is to 881 * ignore. However, don't put SIGCONT in ps_sigignore, as we 882 * have to restart the process. 883 */ 884 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || 885 (sigprop(sig) & SIGPROP_IGNORE && 886 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) { 887 /* never to be seen again */ 888 sigqueue_delete_proc(p, sig); 889 if (sig != SIGCONT) 890 /* easier in psignal */ 891 SIGADDSET(ps->ps_sigignore, sig); 892 SIGDELSET(ps->ps_sigcatch, sig); 893 } else { 894 SIGDELSET(ps->ps_sigignore, sig); 895 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL) 896 SIGDELSET(ps->ps_sigcatch, sig); 897 else 898 SIGADDSET(ps->ps_sigcatch, sig); 899 } 900 #ifdef COMPAT_FREEBSD4 901 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || 902 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL || 903 (flags & KSA_FREEBSD4) == 0) 904 SIGDELSET(ps->ps_freebsd4, sig); 905 else 906 SIGADDSET(ps->ps_freebsd4, sig); 907 #endif 908 #ifdef COMPAT_43 909 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || 910 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL || 911 (flags & KSA_OSIGSET) == 0) 912 SIGDELSET(ps->ps_osigset, sig); 913 else 914 SIGADDSET(ps->ps_osigset, sig); 915 #endif 916 } 917 mtx_unlock(&ps->ps_mtx); 918 PROC_UNLOCK(p); 919 return (0); 920 } 921 922 #ifndef _SYS_SYSPROTO_H_ 923 struct sigaction_args { 924 int sig; 925 struct sigaction *act; 926 struct sigaction *oact; 927 }; 928 #endif 929 int 930 sys_sigaction(struct thread *td, struct sigaction_args *uap) 931 { 932 struct sigaction act, oact; 933 struct sigaction *actp, *oactp; 934 int error; 935 936 actp = (uap->act != NULL) ? &act : NULL; 937 oactp = (uap->oact != NULL) ? &oact : NULL; 938 if (actp) { 939 error = copyin(uap->act, actp, sizeof(act)); 940 if (error) 941 return (error); 942 } 943 error = kern_sigaction(td, uap->sig, actp, oactp, 0); 944 if (oactp && !error) 945 error = copyout(oactp, uap->oact, sizeof(oact)); 946 return (error); 947 } 948 949 #ifdef COMPAT_FREEBSD4 950 #ifndef _SYS_SYSPROTO_H_ 951 struct freebsd4_sigaction_args { 952 int sig; 953 struct sigaction *act; 954 struct sigaction *oact; 955 }; 956 #endif 957 int 958 freebsd4_sigaction(struct thread *td, struct freebsd4_sigaction_args *uap) 959 { 960 struct sigaction act, oact; 961 struct sigaction *actp, *oactp; 962 int error; 963 964 actp = (uap->act != NULL) ? &act : NULL; 965 oactp = (uap->oact != NULL) ? &oact : NULL; 966 if (actp) { 967 error = copyin(uap->act, actp, sizeof(act)); 968 if (error) 969 return (error); 970 } 971 error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4); 972 if (oactp && !error) 973 error = copyout(oactp, uap->oact, sizeof(oact)); 974 return (error); 975 } 976 #endif /* COMAPT_FREEBSD4 */ 977 978 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 979 #ifndef _SYS_SYSPROTO_H_ 980 struct osigaction_args { 981 int signum; 982 struct osigaction *nsa; 983 struct osigaction *osa; 984 }; 985 #endif 986 int 987 osigaction(struct thread *td, struct osigaction_args *uap) 988 { 989 struct osigaction sa; 990 struct sigaction nsa, osa; 991 struct sigaction *nsap, *osap; 992 int error; 993 994 if (uap->signum <= 0 || uap->signum >= ONSIG) 995 return (EINVAL); 996 997 nsap = (uap->nsa != NULL) ? &nsa : NULL; 998 osap = (uap->osa != NULL) ? &osa : NULL; 999 1000 if (nsap) { 1001 error = copyin(uap->nsa, &sa, sizeof(sa)); 1002 if (error) 1003 return (error); 1004 nsap->sa_handler = sa.sa_handler; 1005 nsap->sa_flags = sa.sa_flags; 1006 OSIG2SIG(sa.sa_mask, nsap->sa_mask); 1007 } 1008 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET); 1009 if (osap && !error) { 1010 sa.sa_handler = osap->sa_handler; 1011 sa.sa_flags = osap->sa_flags; 1012 SIG2OSIG(osap->sa_mask, sa.sa_mask); 1013 error = copyout(&sa, uap->osa, sizeof(sa)); 1014 } 1015 return (error); 1016 } 1017 1018 #if !defined(__i386__) 1019 /* Avoid replicating the same stub everywhere */ 1020 int 1021 osigreturn(struct thread *td, struct osigreturn_args *uap) 1022 { 1023 1024 return (nosys(td, (struct nosys_args *)uap)); 1025 } 1026 #endif 1027 #endif /* COMPAT_43 */ 1028 1029 /* 1030 * Initialize signal state for process 0; 1031 * set to ignore signals that are ignored by default. 1032 */ 1033 void 1034 siginit(struct proc *p) 1035 { 1036 int i; 1037 struct sigacts *ps; 1038 1039 PROC_LOCK(p); 1040 ps = p->p_sigacts; 1041 mtx_lock(&ps->ps_mtx); 1042 for (i = 1; i <= NSIG; i++) { 1043 if (sigprop(i) & SIGPROP_IGNORE && i != SIGCONT) { 1044 SIGADDSET(ps->ps_sigignore, i); 1045 } 1046 } 1047 mtx_unlock(&ps->ps_mtx); 1048 PROC_UNLOCK(p); 1049 } 1050 1051 /* 1052 * Reset specified signal to the default disposition. 1053 */ 1054 static void 1055 sigdflt(struct sigacts *ps, int sig) 1056 { 1057 1058 mtx_assert(&ps->ps_mtx, MA_OWNED); 1059 SIGDELSET(ps->ps_sigcatch, sig); 1060 if ((sigprop(sig) & SIGPROP_IGNORE) != 0 && sig != SIGCONT) 1061 SIGADDSET(ps->ps_sigignore, sig); 1062 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 1063 SIGDELSET(ps->ps_siginfo, sig); 1064 } 1065 1066 /* 1067 * Reset signals for an exec of the specified process. 1068 */ 1069 void 1070 execsigs(struct proc *p) 1071 { 1072 struct sigacts *ps; 1073 struct thread *td; 1074 1075 /* 1076 * Reset caught signals. Held signals remain held 1077 * through td_sigmask (unless they were caught, 1078 * and are now ignored by default). 1079 */ 1080 PROC_LOCK_ASSERT(p, MA_OWNED); 1081 ps = p->p_sigacts; 1082 mtx_lock(&ps->ps_mtx); 1083 sig_drop_caught(p); 1084 1085 /* 1086 * Reset stack state to the user stack. 1087 * Clear set of signals caught on the signal stack. 1088 */ 1089 td = curthread; 1090 MPASS(td->td_proc == p); 1091 td->td_sigstk.ss_flags = SS_DISABLE; 1092 td->td_sigstk.ss_size = 0; 1093 td->td_sigstk.ss_sp = 0; 1094 td->td_pflags &= ~TDP_ALTSTACK; 1095 /* 1096 * Reset no zombies if child dies flag as Solaris does. 1097 */ 1098 ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN); 1099 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN) 1100 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL; 1101 mtx_unlock(&ps->ps_mtx); 1102 } 1103 1104 /* 1105 * kern_sigprocmask() 1106 * 1107 * Manipulate signal mask. 1108 */ 1109 int 1110 kern_sigprocmask(struct thread *td, int how, sigset_t *set, sigset_t *oset, 1111 int flags) 1112 { 1113 sigset_t new_block, oset1; 1114 struct proc *p; 1115 int error; 1116 1117 p = td->td_proc; 1118 if ((flags & SIGPROCMASK_PROC_LOCKED) != 0) 1119 PROC_LOCK_ASSERT(p, MA_OWNED); 1120 else 1121 PROC_LOCK(p); 1122 mtx_assert(&p->p_sigacts->ps_mtx, (flags & SIGPROCMASK_PS_LOCKED) != 0 1123 ? MA_OWNED : MA_NOTOWNED); 1124 if (oset != NULL) 1125 *oset = td->td_sigmask; 1126 1127 error = 0; 1128 if (set != NULL) { 1129 switch (how) { 1130 case SIG_BLOCK: 1131 SIG_CANTMASK(*set); 1132 oset1 = td->td_sigmask; 1133 SIGSETOR(td->td_sigmask, *set); 1134 new_block = td->td_sigmask; 1135 SIGSETNAND(new_block, oset1); 1136 break; 1137 case SIG_UNBLOCK: 1138 SIGSETNAND(td->td_sigmask, *set); 1139 signotify(td); 1140 goto out; 1141 case SIG_SETMASK: 1142 SIG_CANTMASK(*set); 1143 oset1 = td->td_sigmask; 1144 if (flags & SIGPROCMASK_OLD) 1145 SIGSETLO(td->td_sigmask, *set); 1146 else 1147 td->td_sigmask = *set; 1148 new_block = td->td_sigmask; 1149 SIGSETNAND(new_block, oset1); 1150 signotify(td); 1151 break; 1152 default: 1153 error = EINVAL; 1154 goto out; 1155 } 1156 1157 /* 1158 * The new_block set contains signals that were not previously 1159 * blocked, but are blocked now. 1160 * 1161 * In case we block any signal that was not previously blocked 1162 * for td, and process has the signal pending, try to schedule 1163 * signal delivery to some thread that does not block the 1164 * signal, possibly waking it up. 1165 */ 1166 if (p->p_numthreads != 1) 1167 reschedule_signals(p, new_block, flags); 1168 } 1169 1170 out: 1171 if (!(flags & SIGPROCMASK_PROC_LOCKED)) 1172 PROC_UNLOCK(p); 1173 return (error); 1174 } 1175 1176 #ifndef _SYS_SYSPROTO_H_ 1177 struct sigprocmask_args { 1178 int how; 1179 const sigset_t *set; 1180 sigset_t *oset; 1181 }; 1182 #endif 1183 int 1184 sys_sigprocmask(struct thread *td, struct sigprocmask_args *uap) 1185 { 1186 sigset_t set, oset; 1187 sigset_t *setp, *osetp; 1188 int error; 1189 1190 setp = (uap->set != NULL) ? &set : NULL; 1191 osetp = (uap->oset != NULL) ? &oset : NULL; 1192 if (setp) { 1193 error = copyin(uap->set, setp, sizeof(set)); 1194 if (error) 1195 return (error); 1196 } 1197 error = kern_sigprocmask(td, uap->how, setp, osetp, 0); 1198 if (osetp && !error) { 1199 error = copyout(osetp, uap->oset, sizeof(oset)); 1200 } 1201 return (error); 1202 } 1203 1204 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 1205 #ifndef _SYS_SYSPROTO_H_ 1206 struct osigprocmask_args { 1207 int how; 1208 osigset_t mask; 1209 }; 1210 #endif 1211 int 1212 osigprocmask(struct thread *td, struct osigprocmask_args *uap) 1213 { 1214 sigset_t set, oset; 1215 int error; 1216 1217 OSIG2SIG(uap->mask, set); 1218 error = kern_sigprocmask(td, uap->how, &set, &oset, 1); 1219 SIG2OSIG(oset, td->td_retval[0]); 1220 return (error); 1221 } 1222 #endif /* COMPAT_43 */ 1223 1224 int 1225 sys_sigwait(struct thread *td, struct sigwait_args *uap) 1226 { 1227 ksiginfo_t ksi; 1228 sigset_t set; 1229 int error; 1230 1231 error = copyin(uap->set, &set, sizeof(set)); 1232 if (error) { 1233 td->td_retval[0] = error; 1234 return (0); 1235 } 1236 1237 error = kern_sigtimedwait(td, set, &ksi, NULL); 1238 if (error) { 1239 /* 1240 * sigwait() function shall not return EINTR, but 1241 * the syscall does. Non-ancient libc provides the 1242 * wrapper which hides EINTR. Otherwise, EINTR return 1243 * is used by libthr to handle required cancellation 1244 * point in the sigwait(). 1245 */ 1246 if (error == EINTR && td->td_proc->p_osrel < P_OSREL_SIGWAIT) 1247 return (ERESTART); 1248 td->td_retval[0] = error; 1249 return (0); 1250 } 1251 1252 error = copyout(&ksi.ksi_signo, uap->sig, sizeof(ksi.ksi_signo)); 1253 td->td_retval[0] = error; 1254 return (0); 1255 } 1256 1257 int 1258 sys_sigtimedwait(struct thread *td, struct sigtimedwait_args *uap) 1259 { 1260 struct timespec ts; 1261 struct timespec *timeout; 1262 sigset_t set; 1263 ksiginfo_t ksi; 1264 int error; 1265 1266 if (uap->timeout) { 1267 error = copyin(uap->timeout, &ts, sizeof(ts)); 1268 if (error) 1269 return (error); 1270 1271 timeout = &ts; 1272 } else 1273 timeout = NULL; 1274 1275 error = copyin(uap->set, &set, sizeof(set)); 1276 if (error) 1277 return (error); 1278 1279 error = kern_sigtimedwait(td, set, &ksi, timeout); 1280 if (error) 1281 return (error); 1282 1283 if (uap->info) 1284 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t)); 1285 1286 if (error == 0) 1287 td->td_retval[0] = ksi.ksi_signo; 1288 return (error); 1289 } 1290 1291 int 1292 sys_sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap) 1293 { 1294 ksiginfo_t ksi; 1295 sigset_t set; 1296 int error; 1297 1298 error = copyin(uap->set, &set, sizeof(set)); 1299 if (error) 1300 return (error); 1301 1302 error = kern_sigtimedwait(td, set, &ksi, NULL); 1303 if (error) 1304 return (error); 1305 1306 if (uap->info) 1307 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t)); 1308 1309 if (error == 0) 1310 td->td_retval[0] = ksi.ksi_signo; 1311 return (error); 1312 } 1313 1314 static void 1315 proc_td_siginfo_capture(struct thread *td, siginfo_t *si) 1316 { 1317 struct thread *thr; 1318 1319 FOREACH_THREAD_IN_PROC(td->td_proc, thr) { 1320 if (thr == td) 1321 thr->td_si = *si; 1322 else 1323 thr->td_si.si_signo = 0; 1324 } 1325 } 1326 1327 int 1328 kern_sigtimedwait(struct thread *td, sigset_t waitset, ksiginfo_t *ksi, 1329 struct timespec *timeout) 1330 { 1331 struct sigacts *ps; 1332 sigset_t saved_mask, new_block; 1333 struct proc *p; 1334 int error, sig, timevalid = 0; 1335 sbintime_t sbt, precision, tsbt; 1336 struct timespec ts; 1337 bool traced; 1338 1339 p = td->td_proc; 1340 error = 0; 1341 traced = false; 1342 1343 /* Ensure the sigfastblock value is up to date. */ 1344 sigfastblock_fetch(td); 1345 1346 if (timeout != NULL) { 1347 if (timeout->tv_nsec >= 0 && timeout->tv_nsec < 1000000000) { 1348 timevalid = 1; 1349 ts = *timeout; 1350 if (ts.tv_sec < INT32_MAX / 2) { 1351 tsbt = tstosbt(ts); 1352 precision = tsbt; 1353 precision >>= tc_precexp; 1354 if (TIMESEL(&sbt, tsbt)) 1355 sbt += tc_tick_sbt; 1356 sbt += tsbt; 1357 } else 1358 precision = sbt = 0; 1359 } 1360 } else 1361 precision = sbt = 0; 1362 ksiginfo_init(ksi); 1363 /* Some signals can not be waited for. */ 1364 SIG_CANTMASK(waitset); 1365 ps = p->p_sigacts; 1366 PROC_LOCK(p); 1367 saved_mask = td->td_sigmask; 1368 SIGSETNAND(td->td_sigmask, waitset); 1369 if ((p->p_sysent->sv_flags & SV_SIG_DISCIGN) != 0 || 1370 !kern_sig_discard_ign) { 1371 thread_lock(td); 1372 td->td_flags |= TDF_SIGWAIT; 1373 thread_unlock(td); 1374 } 1375 for (;;) { 1376 mtx_lock(&ps->ps_mtx); 1377 sig = cursig(td); 1378 mtx_unlock(&ps->ps_mtx); 1379 KASSERT(sig >= 0, ("sig %d", sig)); 1380 if (sig != 0 && SIGISMEMBER(waitset, sig)) { 1381 if (sigqueue_get(&td->td_sigqueue, sig, ksi) != 0 || 1382 sigqueue_get(&p->p_sigqueue, sig, ksi) != 0) { 1383 error = 0; 1384 break; 1385 } 1386 } 1387 1388 if (error != 0) 1389 break; 1390 1391 /* 1392 * POSIX says this must be checked after looking for pending 1393 * signals. 1394 */ 1395 if (timeout != NULL && !timevalid) { 1396 error = EINVAL; 1397 break; 1398 } 1399 1400 if (traced) { 1401 error = EINTR; 1402 break; 1403 } 1404 1405 error = msleep_sbt(&p->p_sigacts, &p->p_mtx, PPAUSE | PCATCH, 1406 "sigwait", sbt, precision, C_ABSOLUTE); 1407 1408 /* The syscalls can not be restarted. */ 1409 if (error == ERESTART) 1410 error = EINTR; 1411 1412 /* 1413 * If PTRACE_SCE or PTRACE_SCX were set after 1414 * userspace entered the syscall, return spurious 1415 * EINTR after wait was done. Only do this as last 1416 * resort after rechecking for possible queued signals 1417 * and expired timeouts. 1418 */ 1419 if (error == 0 && (p->p_ptevents & PTRACE_SYSCALL) != 0) 1420 traced = true; 1421 } 1422 thread_lock(td); 1423 td->td_flags &= ~TDF_SIGWAIT; 1424 thread_unlock(td); 1425 1426 new_block = saved_mask; 1427 SIGSETNAND(new_block, td->td_sigmask); 1428 td->td_sigmask = saved_mask; 1429 /* 1430 * Fewer signals can be delivered to us, reschedule signal 1431 * notification. 1432 */ 1433 if (p->p_numthreads != 1) 1434 reschedule_signals(p, new_block, 0); 1435 1436 if (error == 0) { 1437 SDT_PROBE2(proc, , , signal__clear, sig, ksi); 1438 1439 if (ksi->ksi_code == SI_TIMER) 1440 itimer_accept(p, ksi->ksi_timerid, ksi); 1441 1442 #ifdef KTRACE 1443 if (KTRPOINT(td, KTR_PSIG)) { 1444 sig_t action; 1445 1446 mtx_lock(&ps->ps_mtx); 1447 action = ps->ps_sigact[_SIG_IDX(sig)]; 1448 mtx_unlock(&ps->ps_mtx); 1449 ktrpsig(sig, action, &td->td_sigmask, ksi->ksi_code); 1450 } 1451 #endif 1452 if (sig == SIGKILL) { 1453 proc_td_siginfo_capture(td, &ksi->ksi_info); 1454 sigexit(td, sig); 1455 } 1456 } 1457 PROC_UNLOCK(p); 1458 return (error); 1459 } 1460 1461 #ifndef _SYS_SYSPROTO_H_ 1462 struct sigpending_args { 1463 sigset_t *set; 1464 }; 1465 #endif 1466 int 1467 sys_sigpending(struct thread *td, struct sigpending_args *uap) 1468 { 1469 struct proc *p = td->td_proc; 1470 sigset_t pending; 1471 1472 PROC_LOCK(p); 1473 pending = p->p_sigqueue.sq_signals; 1474 SIGSETOR(pending, td->td_sigqueue.sq_signals); 1475 PROC_UNLOCK(p); 1476 return (copyout(&pending, uap->set, sizeof(sigset_t))); 1477 } 1478 1479 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 1480 #ifndef _SYS_SYSPROTO_H_ 1481 struct osigpending_args { 1482 int dummy; 1483 }; 1484 #endif 1485 int 1486 osigpending(struct thread *td, struct osigpending_args *uap) 1487 { 1488 struct proc *p = td->td_proc; 1489 sigset_t pending; 1490 1491 PROC_LOCK(p); 1492 pending = p->p_sigqueue.sq_signals; 1493 SIGSETOR(pending, td->td_sigqueue.sq_signals); 1494 PROC_UNLOCK(p); 1495 SIG2OSIG(pending, td->td_retval[0]); 1496 return (0); 1497 } 1498 #endif /* COMPAT_43 */ 1499 1500 #if defined(COMPAT_43) 1501 /* 1502 * Generalized interface signal handler, 4.3-compatible. 1503 */ 1504 #ifndef _SYS_SYSPROTO_H_ 1505 struct osigvec_args { 1506 int signum; 1507 struct sigvec *nsv; 1508 struct sigvec *osv; 1509 }; 1510 #endif 1511 /* ARGSUSED */ 1512 int 1513 osigvec(struct thread *td, struct osigvec_args *uap) 1514 { 1515 struct sigvec vec; 1516 struct sigaction nsa, osa; 1517 struct sigaction *nsap, *osap; 1518 int error; 1519 1520 if (uap->signum <= 0 || uap->signum >= ONSIG) 1521 return (EINVAL); 1522 nsap = (uap->nsv != NULL) ? &nsa : NULL; 1523 osap = (uap->osv != NULL) ? &osa : NULL; 1524 if (nsap) { 1525 error = copyin(uap->nsv, &vec, sizeof(vec)); 1526 if (error) 1527 return (error); 1528 nsap->sa_handler = vec.sv_handler; 1529 OSIG2SIG(vec.sv_mask, nsap->sa_mask); 1530 nsap->sa_flags = vec.sv_flags; 1531 nsap->sa_flags ^= SA_RESTART; /* opposite of SV_INTERRUPT */ 1532 } 1533 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET); 1534 if (osap && !error) { 1535 vec.sv_handler = osap->sa_handler; 1536 SIG2OSIG(osap->sa_mask, vec.sv_mask); 1537 vec.sv_flags = osap->sa_flags; 1538 vec.sv_flags &= ~SA_NOCLDWAIT; 1539 vec.sv_flags ^= SA_RESTART; 1540 error = copyout(&vec, uap->osv, sizeof(vec)); 1541 } 1542 return (error); 1543 } 1544 1545 #ifndef _SYS_SYSPROTO_H_ 1546 struct osigblock_args { 1547 int mask; 1548 }; 1549 #endif 1550 int 1551 osigblock(struct thread *td, struct osigblock_args *uap) 1552 { 1553 sigset_t set, oset; 1554 1555 OSIG2SIG(uap->mask, set); 1556 kern_sigprocmask(td, SIG_BLOCK, &set, &oset, 0); 1557 SIG2OSIG(oset, td->td_retval[0]); 1558 return (0); 1559 } 1560 1561 #ifndef _SYS_SYSPROTO_H_ 1562 struct osigsetmask_args { 1563 int mask; 1564 }; 1565 #endif 1566 int 1567 osigsetmask(struct thread *td, struct osigsetmask_args *uap) 1568 { 1569 sigset_t set, oset; 1570 1571 OSIG2SIG(uap->mask, set); 1572 kern_sigprocmask(td, SIG_SETMASK, &set, &oset, 0); 1573 SIG2OSIG(oset, td->td_retval[0]); 1574 return (0); 1575 } 1576 #endif /* COMPAT_43 */ 1577 1578 /* 1579 * Suspend calling thread until signal, providing mask to be set in the 1580 * meantime. 1581 */ 1582 #ifndef _SYS_SYSPROTO_H_ 1583 struct sigsuspend_args { 1584 const sigset_t *sigmask; 1585 }; 1586 #endif 1587 /* ARGSUSED */ 1588 int 1589 sys_sigsuspend(struct thread *td, struct sigsuspend_args *uap) 1590 { 1591 sigset_t mask; 1592 int error; 1593 1594 error = copyin(uap->sigmask, &mask, sizeof(mask)); 1595 if (error) 1596 return (error); 1597 return (kern_sigsuspend(td, mask)); 1598 } 1599 1600 int 1601 kern_sigsuspend(struct thread *td, sigset_t mask) 1602 { 1603 struct proc *p = td->td_proc; 1604 int has_sig, sig; 1605 1606 /* Ensure the sigfastblock value is up to date. */ 1607 sigfastblock_fetch(td); 1608 1609 /* 1610 * When returning from sigsuspend, we want 1611 * the old mask to be restored after the 1612 * signal handler has finished. Thus, we 1613 * save it here and mark the sigacts structure 1614 * to indicate this. 1615 */ 1616 PROC_LOCK(p); 1617 kern_sigprocmask(td, SIG_SETMASK, &mask, &td->td_oldsigmask, 1618 SIGPROCMASK_PROC_LOCKED); 1619 td->td_pflags |= TDP_OLDMASK; 1620 ast_sched(td, TDA_SIGSUSPEND); 1621 1622 /* 1623 * Process signals now. Otherwise, we can get spurious wakeup 1624 * due to signal entered process queue, but delivered to other 1625 * thread. But sigsuspend should return only on signal 1626 * delivery. 1627 */ 1628 (p->p_sysent->sv_set_syscall_retval)(td, EINTR); 1629 for (has_sig = 0; !has_sig;) { 1630 while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause", 1631 0) == 0) 1632 /* void */; 1633 thread_suspend_check(0); 1634 mtx_lock(&p->p_sigacts->ps_mtx); 1635 while ((sig = cursig(td)) != 0) { 1636 KASSERT(sig >= 0, ("sig %d", sig)); 1637 has_sig += postsig(sig); 1638 } 1639 mtx_unlock(&p->p_sigacts->ps_mtx); 1640 1641 /* 1642 * If PTRACE_SCE or PTRACE_SCX were set after 1643 * userspace entered the syscall, return spurious 1644 * EINTR. 1645 */ 1646 if ((p->p_ptevents & PTRACE_SYSCALL) != 0) 1647 has_sig += 1; 1648 } 1649 PROC_UNLOCK(p); 1650 td->td_errno = EINTR; 1651 td->td_pflags |= TDP_NERRNO; 1652 return (EJUSTRETURN); 1653 } 1654 1655 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 1656 /* 1657 * Compatibility sigsuspend call for old binaries. Note nonstandard calling 1658 * convention: libc stub passes mask, not pointer, to save a copyin. 1659 */ 1660 #ifndef _SYS_SYSPROTO_H_ 1661 struct osigsuspend_args { 1662 osigset_t mask; 1663 }; 1664 #endif 1665 /* ARGSUSED */ 1666 int 1667 osigsuspend(struct thread *td, struct osigsuspend_args *uap) 1668 { 1669 sigset_t mask; 1670 1671 OSIG2SIG(uap->mask, mask); 1672 return (kern_sigsuspend(td, mask)); 1673 } 1674 #endif /* COMPAT_43 */ 1675 1676 #if defined(COMPAT_43) 1677 #ifndef _SYS_SYSPROTO_H_ 1678 struct osigstack_args { 1679 struct sigstack *nss; 1680 struct sigstack *oss; 1681 }; 1682 #endif 1683 /* ARGSUSED */ 1684 int 1685 osigstack(struct thread *td, struct osigstack_args *uap) 1686 { 1687 struct sigstack nss, oss; 1688 int error = 0; 1689 1690 if (uap->nss != NULL) { 1691 error = copyin(uap->nss, &nss, sizeof(nss)); 1692 if (error) 1693 return (error); 1694 } 1695 oss.ss_sp = td->td_sigstk.ss_sp; 1696 oss.ss_onstack = sigonstack(cpu_getstack(td)); 1697 if (uap->nss != NULL) { 1698 td->td_sigstk.ss_sp = nss.ss_sp; 1699 td->td_sigstk.ss_size = 0; 1700 td->td_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK; 1701 td->td_pflags |= TDP_ALTSTACK; 1702 } 1703 if (uap->oss != NULL) 1704 error = copyout(&oss, uap->oss, sizeof(oss)); 1705 1706 return (error); 1707 } 1708 #endif /* COMPAT_43 */ 1709 1710 #ifndef _SYS_SYSPROTO_H_ 1711 struct sigaltstack_args { 1712 stack_t *ss; 1713 stack_t *oss; 1714 }; 1715 #endif 1716 /* ARGSUSED */ 1717 int 1718 sys_sigaltstack(struct thread *td, struct sigaltstack_args *uap) 1719 { 1720 stack_t ss, oss; 1721 int error; 1722 1723 if (uap->ss != NULL) { 1724 error = copyin(uap->ss, &ss, sizeof(ss)); 1725 if (error) 1726 return (error); 1727 } 1728 error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL, 1729 (uap->oss != NULL) ? &oss : NULL); 1730 if (error) 1731 return (error); 1732 if (uap->oss != NULL) 1733 error = copyout(&oss, uap->oss, sizeof(stack_t)); 1734 return (error); 1735 } 1736 1737 int 1738 kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss) 1739 { 1740 struct proc *p = td->td_proc; 1741 int oonstack; 1742 1743 oonstack = sigonstack(cpu_getstack(td)); 1744 1745 if (oss != NULL) { 1746 *oss = td->td_sigstk; 1747 oss->ss_flags = (td->td_pflags & TDP_ALTSTACK) 1748 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 1749 } 1750 1751 if (ss != NULL) { 1752 if (oonstack) 1753 return (EPERM); 1754 if ((ss->ss_flags & ~SS_DISABLE) != 0) 1755 return (EINVAL); 1756 if (!(ss->ss_flags & SS_DISABLE)) { 1757 if (ss->ss_size < p->p_sysent->sv_minsigstksz) 1758 return (ENOMEM); 1759 1760 td->td_sigstk = *ss; 1761 td->td_pflags |= TDP_ALTSTACK; 1762 } else { 1763 td->td_pflags &= ~TDP_ALTSTACK; 1764 } 1765 } 1766 return (0); 1767 } 1768 1769 struct killpg1_ctx { 1770 struct thread *td; 1771 ksiginfo_t *ksi; 1772 int sig; 1773 bool sent; 1774 bool found; 1775 int ret; 1776 }; 1777 1778 static void 1779 killpg1_sendsig_locked(struct proc *p, struct killpg1_ctx *arg) 1780 { 1781 int err; 1782 1783 err = p_cansignal(arg->td, p, arg->sig); 1784 if (err == 0 && arg->sig != 0) 1785 pksignal(p, arg->sig, arg->ksi); 1786 if (err != ESRCH) 1787 arg->found = true; 1788 if (err == 0) 1789 arg->sent = true; 1790 else if (arg->ret == 0 && err != ESRCH && err != EPERM) 1791 arg->ret = err; 1792 } 1793 1794 static void 1795 killpg1_sendsig(struct proc *p, bool notself, struct killpg1_ctx *arg) 1796 { 1797 1798 if (p->p_pid <= 1 || (p->p_flag & P_SYSTEM) != 0 || 1799 (notself && p == arg->td->td_proc) || p->p_state == PRS_NEW) 1800 return; 1801 1802 PROC_LOCK(p); 1803 killpg1_sendsig_locked(p, arg); 1804 PROC_UNLOCK(p); 1805 } 1806 1807 static void 1808 kill_processes_prison_cb(struct proc *p, void *arg) 1809 { 1810 struct killpg1_ctx *ctx = arg; 1811 1812 if (p->p_pid <= 1 || (p->p_flag & P_SYSTEM) != 0 || 1813 (p == ctx->td->td_proc) || p->p_state == PRS_NEW) 1814 return; 1815 1816 killpg1_sendsig_locked(p, ctx); 1817 } 1818 1819 /* 1820 * Common code for kill process group/broadcast kill. 1821 * cp is calling process. 1822 */ 1823 static int 1824 killpg1(struct thread *td, int sig, int pgid, int all, ksiginfo_t *ksi) 1825 { 1826 struct proc *p; 1827 struct pgrp *pgrp; 1828 struct killpg1_ctx arg; 1829 1830 arg.td = td; 1831 arg.ksi = ksi; 1832 arg.sig = sig; 1833 arg.sent = false; 1834 arg.found = false; 1835 arg.ret = 0; 1836 if (all) { 1837 /* 1838 * broadcast 1839 */ 1840 prison_proc_iterate(td->td_ucred->cr_prison, 1841 kill_processes_prison_cb, &arg); 1842 } else { 1843 sx_slock(&proctree_lock); 1844 if (pgid == 0) { 1845 /* 1846 * zero pgid means send to my process group. 1847 */ 1848 pgrp = td->td_proc->p_pgrp; 1849 PGRP_LOCK(pgrp); 1850 } else { 1851 pgrp = pgfind(pgid); 1852 if (pgrp == NULL) { 1853 sx_sunlock(&proctree_lock); 1854 return (ESRCH); 1855 } 1856 } 1857 sx_sunlock(&proctree_lock); 1858 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 1859 killpg1_sendsig(p, false, &arg); 1860 } 1861 PGRP_UNLOCK(pgrp); 1862 } 1863 MPASS(arg.ret != 0 || arg.found || !arg.sent); 1864 if (arg.ret == 0 && !arg.sent) 1865 arg.ret = arg.found ? EPERM : ESRCH; 1866 return (arg.ret); 1867 } 1868 1869 #ifndef _SYS_SYSPROTO_H_ 1870 struct kill_args { 1871 int pid; 1872 int signum; 1873 }; 1874 #endif 1875 /* ARGSUSED */ 1876 int 1877 sys_kill(struct thread *td, struct kill_args *uap) 1878 { 1879 1880 return (kern_kill(td, uap->pid, uap->signum)); 1881 } 1882 1883 int 1884 kern_kill(struct thread *td, pid_t pid, int signum) 1885 { 1886 ksiginfo_t ksi; 1887 struct proc *p; 1888 int error; 1889 1890 /* 1891 * A process in capability mode can send signals only to himself. 1892 * The main rationale behind this is that abort(3) is implemented as 1893 * kill(getpid(), SIGABRT). 1894 */ 1895 if (IN_CAPABILITY_MODE(td) && pid != td->td_proc->p_pid) 1896 return (ECAPMODE); 1897 1898 AUDIT_ARG_SIGNUM(signum); 1899 AUDIT_ARG_PID(pid); 1900 if ((u_int)signum > _SIG_MAXSIG) 1901 return (EINVAL); 1902 1903 ksiginfo_init(&ksi); 1904 ksi.ksi_signo = signum; 1905 ksi.ksi_code = SI_USER; 1906 ksi.ksi_pid = td->td_proc->p_pid; 1907 ksi.ksi_uid = td->td_ucred->cr_ruid; 1908 1909 if (pid > 0) { 1910 /* kill single process */ 1911 if ((p = pfind_any(pid)) == NULL) 1912 return (ESRCH); 1913 AUDIT_ARG_PROCESS(p); 1914 error = p_cansignal(td, p, signum); 1915 if (error == 0 && signum) 1916 pksignal(p, signum, &ksi); 1917 PROC_UNLOCK(p); 1918 return (error); 1919 } 1920 switch (pid) { 1921 case -1: /* broadcast signal */ 1922 return (killpg1(td, signum, 0, 1, &ksi)); 1923 case 0: /* signal own process group */ 1924 return (killpg1(td, signum, 0, 0, &ksi)); 1925 default: /* negative explicit process group */ 1926 return (killpg1(td, signum, -pid, 0, &ksi)); 1927 } 1928 /* NOTREACHED */ 1929 } 1930 1931 int 1932 sys_pdkill(struct thread *td, struct pdkill_args *uap) 1933 { 1934 struct proc *p; 1935 int error; 1936 1937 AUDIT_ARG_SIGNUM(uap->signum); 1938 AUDIT_ARG_FD(uap->fd); 1939 if ((u_int)uap->signum > _SIG_MAXSIG) 1940 return (EINVAL); 1941 1942 error = procdesc_find(td, uap->fd, &cap_pdkill_rights, &p); 1943 if (error) 1944 return (error); 1945 AUDIT_ARG_PROCESS(p); 1946 error = p_cansignal(td, p, uap->signum); 1947 if (error == 0 && uap->signum) 1948 kern_psignal(p, uap->signum); 1949 PROC_UNLOCK(p); 1950 return (error); 1951 } 1952 1953 #if defined(COMPAT_43) 1954 #ifndef _SYS_SYSPROTO_H_ 1955 struct okillpg_args { 1956 int pgid; 1957 int signum; 1958 }; 1959 #endif 1960 /* ARGSUSED */ 1961 int 1962 okillpg(struct thread *td, struct okillpg_args *uap) 1963 { 1964 ksiginfo_t ksi; 1965 1966 AUDIT_ARG_SIGNUM(uap->signum); 1967 AUDIT_ARG_PID(uap->pgid); 1968 if ((u_int)uap->signum > _SIG_MAXSIG) 1969 return (EINVAL); 1970 1971 ksiginfo_init(&ksi); 1972 ksi.ksi_signo = uap->signum; 1973 ksi.ksi_code = SI_USER; 1974 ksi.ksi_pid = td->td_proc->p_pid; 1975 ksi.ksi_uid = td->td_ucred->cr_ruid; 1976 return (killpg1(td, uap->signum, uap->pgid, 0, &ksi)); 1977 } 1978 #endif /* COMPAT_43 */ 1979 1980 #ifndef _SYS_SYSPROTO_H_ 1981 struct sigqueue_args { 1982 pid_t pid; 1983 int signum; 1984 /* union sigval */ void *value; 1985 }; 1986 #endif 1987 int 1988 sys_sigqueue(struct thread *td, struct sigqueue_args *uap) 1989 { 1990 union sigval sv; 1991 1992 sv.sival_ptr = uap->value; 1993 1994 return (kern_sigqueue(td, uap->pid, uap->signum, &sv)); 1995 } 1996 1997 int 1998 kern_sigqueue(struct thread *td, pid_t pid, int signum, union sigval *value) 1999 { 2000 ksiginfo_t ksi; 2001 struct proc *p; 2002 int error; 2003 2004 if ((u_int)signum > _SIG_MAXSIG) 2005 return (EINVAL); 2006 2007 /* 2008 * Specification says sigqueue can only send signal to 2009 * single process. 2010 */ 2011 if (pid <= 0) 2012 return (EINVAL); 2013 2014 if ((p = pfind_any(pid)) == NULL) 2015 return (ESRCH); 2016 error = p_cansignal(td, p, signum); 2017 if (error == 0 && signum != 0) { 2018 ksiginfo_init(&ksi); 2019 ksi.ksi_flags = KSI_SIGQ; 2020 ksi.ksi_signo = signum; 2021 ksi.ksi_code = SI_QUEUE; 2022 ksi.ksi_pid = td->td_proc->p_pid; 2023 ksi.ksi_uid = td->td_ucred->cr_ruid; 2024 ksi.ksi_value = *value; 2025 error = pksignal(p, ksi.ksi_signo, &ksi); 2026 } 2027 PROC_UNLOCK(p); 2028 return (error); 2029 } 2030 2031 /* 2032 * Send a signal to a process group. 2033 */ 2034 void 2035 gsignal(int pgid, int sig, ksiginfo_t *ksi) 2036 { 2037 struct pgrp *pgrp; 2038 2039 if (pgid != 0) { 2040 sx_slock(&proctree_lock); 2041 pgrp = pgfind(pgid); 2042 sx_sunlock(&proctree_lock); 2043 if (pgrp != NULL) { 2044 pgsignal(pgrp, sig, 0, ksi); 2045 PGRP_UNLOCK(pgrp); 2046 } 2047 } 2048 } 2049 2050 /* 2051 * Send a signal to a process group. If checktty is 1, 2052 * limit to members which have a controlling terminal. 2053 */ 2054 void 2055 pgsignal(struct pgrp *pgrp, int sig, int checkctty, ksiginfo_t *ksi) 2056 { 2057 struct proc *p; 2058 2059 if (pgrp) { 2060 PGRP_LOCK_ASSERT(pgrp, MA_OWNED); 2061 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 2062 PROC_LOCK(p); 2063 if (p->p_state == PRS_NORMAL && 2064 (checkctty == 0 || p->p_flag & P_CONTROLT)) 2065 pksignal(p, sig, ksi); 2066 PROC_UNLOCK(p); 2067 } 2068 } 2069 } 2070 2071 /* 2072 * Recalculate the signal mask and reset the signal disposition after 2073 * usermode frame for delivery is formed. Should be called after 2074 * mach-specific routine, because sysent->sv_sendsig() needs correct 2075 * ps_siginfo and signal mask. 2076 */ 2077 static void 2078 postsig_done(int sig, struct thread *td, struct sigacts *ps) 2079 { 2080 sigset_t mask; 2081 2082 mtx_assert(&ps->ps_mtx, MA_OWNED); 2083 td->td_ru.ru_nsignals++; 2084 mask = ps->ps_catchmask[_SIG_IDX(sig)]; 2085 if (!SIGISMEMBER(ps->ps_signodefer, sig)) 2086 SIGADDSET(mask, sig); 2087 kern_sigprocmask(td, SIG_BLOCK, &mask, NULL, 2088 SIGPROCMASK_PROC_LOCKED | SIGPROCMASK_PS_LOCKED); 2089 if (SIGISMEMBER(ps->ps_sigreset, sig)) 2090 sigdflt(ps, sig); 2091 } 2092 2093 /* 2094 * Send a signal caused by a trap to the current thread. If it will be 2095 * caught immediately, deliver it with correct code. Otherwise, post it 2096 * normally. 2097 */ 2098 void 2099 trapsignal(struct thread *td, ksiginfo_t *ksi) 2100 { 2101 struct sigacts *ps; 2102 struct proc *p; 2103 sigset_t sigmask; 2104 int sig; 2105 2106 p = td->td_proc; 2107 sig = ksi->ksi_signo; 2108 KASSERT(_SIG_VALID(sig), ("invalid signal")); 2109 2110 sigfastblock_fetch(td); 2111 PROC_LOCK(p); 2112 ps = p->p_sigacts; 2113 mtx_lock(&ps->ps_mtx); 2114 sigmask = td->td_sigmask; 2115 if (td->td_sigblock_val != 0) 2116 SIGSETOR(sigmask, fastblock_mask); 2117 if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) && 2118 !SIGISMEMBER(sigmask, sig)) { 2119 #ifdef KTRACE 2120 if (KTRPOINT(curthread, KTR_PSIG)) 2121 ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)], 2122 &td->td_sigmask, ksi->ksi_code); 2123 #endif 2124 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)], 2125 ksi, &td->td_sigmask); 2126 postsig_done(sig, td, ps); 2127 mtx_unlock(&ps->ps_mtx); 2128 } else { 2129 /* 2130 * Avoid a possible infinite loop if the thread 2131 * masking the signal or process is ignoring the 2132 * signal. 2133 */ 2134 if (kern_forcesigexit && (SIGISMEMBER(sigmask, sig) || 2135 ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN)) { 2136 SIGDELSET(td->td_sigmask, sig); 2137 SIGDELSET(ps->ps_sigcatch, sig); 2138 SIGDELSET(ps->ps_sigignore, sig); 2139 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 2140 td->td_pflags &= ~TDP_SIGFASTBLOCK; 2141 td->td_sigblock_val = 0; 2142 } 2143 mtx_unlock(&ps->ps_mtx); 2144 p->p_sig = sig; /* XXX to verify code */ 2145 tdsendsignal(p, td, sig, ksi); 2146 } 2147 PROC_UNLOCK(p); 2148 } 2149 2150 static struct thread * 2151 sigtd(struct proc *p, int sig, bool fast_sigblock) 2152 { 2153 struct thread *td, *signal_td; 2154 2155 PROC_LOCK_ASSERT(p, MA_OWNED); 2156 MPASS(!fast_sigblock || p == curproc); 2157 2158 /* 2159 * Check if current thread can handle the signal without 2160 * switching context to another thread. 2161 */ 2162 if (curproc == p && !SIGISMEMBER(curthread->td_sigmask, sig) && 2163 (!fast_sigblock || curthread->td_sigblock_val == 0)) 2164 return (curthread); 2165 signal_td = NULL; 2166 FOREACH_THREAD_IN_PROC(p, td) { 2167 if (!SIGISMEMBER(td->td_sigmask, sig) && (!fast_sigblock || 2168 td != curthread || td->td_sigblock_val == 0)) { 2169 signal_td = td; 2170 break; 2171 } 2172 } 2173 if (signal_td == NULL) 2174 signal_td = FIRST_THREAD_IN_PROC(p); 2175 return (signal_td); 2176 } 2177 2178 /* 2179 * Send the signal to the process. If the signal has an action, the action 2180 * is usually performed by the target process rather than the caller; we add 2181 * the signal to the set of pending signals for the process. 2182 * 2183 * Exceptions: 2184 * o When a stop signal is sent to a sleeping process that takes the 2185 * default action, the process is stopped without awakening it. 2186 * o SIGCONT restarts stopped processes (or puts them back to sleep) 2187 * regardless of the signal action (eg, blocked or ignored). 2188 * 2189 * Other ignored signals are discarded immediately. 2190 * 2191 * NB: This function may be entered from the debugger via the "kill" DDB 2192 * command. There is little that can be done to mitigate the possibly messy 2193 * side effects of this unwise possibility. 2194 */ 2195 void 2196 kern_psignal(struct proc *p, int sig) 2197 { 2198 ksiginfo_t ksi; 2199 2200 ksiginfo_init(&ksi); 2201 ksi.ksi_signo = sig; 2202 ksi.ksi_code = SI_KERNEL; 2203 (void) tdsendsignal(p, NULL, sig, &ksi); 2204 } 2205 2206 int 2207 pksignal(struct proc *p, int sig, ksiginfo_t *ksi) 2208 { 2209 2210 return (tdsendsignal(p, NULL, sig, ksi)); 2211 } 2212 2213 /* Utility function for finding a thread to send signal event to. */ 2214 int 2215 sigev_findtd(struct proc *p, struct sigevent *sigev, struct thread **ttd) 2216 { 2217 struct thread *td; 2218 2219 if (sigev->sigev_notify == SIGEV_THREAD_ID) { 2220 td = tdfind(sigev->sigev_notify_thread_id, p->p_pid); 2221 if (td == NULL) 2222 return (ESRCH); 2223 *ttd = td; 2224 } else { 2225 *ttd = NULL; 2226 PROC_LOCK(p); 2227 } 2228 return (0); 2229 } 2230 2231 void 2232 tdsignal(struct thread *td, int sig) 2233 { 2234 ksiginfo_t ksi; 2235 2236 ksiginfo_init(&ksi); 2237 ksi.ksi_signo = sig; 2238 ksi.ksi_code = SI_KERNEL; 2239 (void) tdsendsignal(td->td_proc, td, sig, &ksi); 2240 } 2241 2242 void 2243 tdksignal(struct thread *td, int sig, ksiginfo_t *ksi) 2244 { 2245 2246 (void) tdsendsignal(td->td_proc, td, sig, ksi); 2247 } 2248 2249 static int 2250 sig_sleepq_abort(struct thread *td, int intrval) 2251 { 2252 THREAD_LOCK_ASSERT(td, MA_OWNED); 2253 2254 if (intrval == 0 && (td->td_flags & TDF_SIGWAIT) == 0) { 2255 thread_unlock(td); 2256 return (0); 2257 } 2258 return (sleepq_abort(td, intrval)); 2259 } 2260 2261 int 2262 tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi) 2263 { 2264 sig_t action; 2265 sigqueue_t *sigqueue; 2266 int prop; 2267 struct sigacts *ps; 2268 int intrval; 2269 int ret = 0; 2270 int wakeup_swapper; 2271 2272 MPASS(td == NULL || p == td->td_proc); 2273 PROC_LOCK_ASSERT(p, MA_OWNED); 2274 2275 if (!_SIG_VALID(sig)) 2276 panic("%s(): invalid signal %d", __func__, sig); 2277 2278 KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("%s: ksi on queue", __func__)); 2279 2280 /* 2281 * IEEE Std 1003.1-2001: return success when killing a zombie. 2282 */ 2283 if (p->p_state == PRS_ZOMBIE) { 2284 if (ksi != NULL && (ksi->ksi_flags & KSI_INS) != 0) 2285 ksiginfo_tryfree(ksi); 2286 return (ret); 2287 } 2288 2289 ps = p->p_sigacts; 2290 KNOTE_LOCKED(p->p_klist, NOTE_SIGNAL | sig); 2291 prop = sigprop(sig); 2292 2293 if (td == NULL) { 2294 td = sigtd(p, sig, false); 2295 sigqueue = &p->p_sigqueue; 2296 } else 2297 sigqueue = &td->td_sigqueue; 2298 2299 SDT_PROBE3(proc, , , signal__send, td, p, sig); 2300 2301 /* 2302 * If the signal is being ignored, then we forget about it 2303 * immediately, except when the target process executes 2304 * sigwait(). (Note: we don't set SIGCONT in ps_sigignore, 2305 * and if it is set to SIG_IGN, action will be SIG_DFL here.) 2306 */ 2307 mtx_lock(&ps->ps_mtx); 2308 if (SIGISMEMBER(ps->ps_sigignore, sig)) { 2309 if (kern_sig_discard_ign && 2310 (p->p_sysent->sv_flags & SV_SIG_DISCIGN) == 0) { 2311 SDT_PROBE3(proc, , , signal__discard, td, p, sig); 2312 2313 mtx_unlock(&ps->ps_mtx); 2314 if (ksi != NULL && (ksi->ksi_flags & KSI_INS) != 0) 2315 ksiginfo_tryfree(ksi); 2316 return (ret); 2317 } else { 2318 action = SIG_CATCH; 2319 intrval = 0; 2320 } 2321 } else { 2322 if (SIGISMEMBER(td->td_sigmask, sig)) 2323 action = SIG_HOLD; 2324 else if (SIGISMEMBER(ps->ps_sigcatch, sig)) 2325 action = SIG_CATCH; 2326 else 2327 action = SIG_DFL; 2328 if (SIGISMEMBER(ps->ps_sigintr, sig)) 2329 intrval = EINTR; 2330 else 2331 intrval = ERESTART; 2332 } 2333 mtx_unlock(&ps->ps_mtx); 2334 2335 if (prop & SIGPROP_CONT) 2336 sigqueue_delete_stopmask_proc(p); 2337 else if (prop & SIGPROP_STOP) { 2338 /* 2339 * If sending a tty stop signal to a member of an orphaned 2340 * process group, discard the signal here if the action 2341 * is default; don't stop the process below if sleeping, 2342 * and don't clear any pending SIGCONT. 2343 */ 2344 if ((prop & SIGPROP_TTYSTOP) != 0 && 2345 (p->p_pgrp->pg_flags & PGRP_ORPHANED) != 0 && 2346 action == SIG_DFL) { 2347 if (ksi != NULL && (ksi->ksi_flags & KSI_INS) != 0) 2348 ksiginfo_tryfree(ksi); 2349 return (ret); 2350 } 2351 sigqueue_delete_proc(p, SIGCONT); 2352 if (p->p_flag & P_CONTINUED) { 2353 p->p_flag &= ~P_CONTINUED; 2354 PROC_LOCK(p->p_pptr); 2355 sigqueue_take(p->p_ksi); 2356 PROC_UNLOCK(p->p_pptr); 2357 } 2358 } 2359 2360 ret = sigqueue_add(sigqueue, sig, ksi); 2361 if (ret != 0) 2362 return (ret); 2363 signotify(td); 2364 /* 2365 * Defer further processing for signals which are held, 2366 * except that stopped processes must be continued by SIGCONT. 2367 */ 2368 if (action == SIG_HOLD && 2369 !((prop & SIGPROP_CONT) && (p->p_flag & P_STOPPED_SIG))) 2370 return (ret); 2371 2372 wakeup_swapper = 0; 2373 2374 /* 2375 * Some signals have a process-wide effect and a per-thread 2376 * component. Most processing occurs when the process next 2377 * tries to cross the user boundary, however there are some 2378 * times when processing needs to be done immediately, such as 2379 * waking up threads so that they can cross the user boundary. 2380 * We try to do the per-process part here. 2381 */ 2382 if (P_SHOULDSTOP(p)) { 2383 KASSERT(!(p->p_flag & P_WEXIT), 2384 ("signal to stopped but exiting process")); 2385 if (sig == SIGKILL) { 2386 /* 2387 * If traced process is already stopped, 2388 * then no further action is necessary. 2389 */ 2390 if (p->p_flag & P_TRACED) 2391 goto out; 2392 /* 2393 * SIGKILL sets process running. 2394 * It will die elsewhere. 2395 * All threads must be restarted. 2396 */ 2397 p->p_flag &= ~P_STOPPED_SIG; 2398 goto runfast; 2399 } 2400 2401 if (prop & SIGPROP_CONT) { 2402 /* 2403 * If traced process is already stopped, 2404 * then no further action is necessary. 2405 */ 2406 if (p->p_flag & P_TRACED) 2407 goto out; 2408 /* 2409 * If SIGCONT is default (or ignored), we continue the 2410 * process but don't leave the signal in sigqueue as 2411 * it has no further action. If SIGCONT is held, we 2412 * continue the process and leave the signal in 2413 * sigqueue. If the process catches SIGCONT, let it 2414 * handle the signal itself. If it isn't waiting on 2415 * an event, it goes back to run state. 2416 * Otherwise, process goes back to sleep state. 2417 */ 2418 p->p_flag &= ~P_STOPPED_SIG; 2419 PROC_SLOCK(p); 2420 if (p->p_numthreads == p->p_suspcount) { 2421 PROC_SUNLOCK(p); 2422 p->p_flag |= P_CONTINUED; 2423 p->p_xsig = SIGCONT; 2424 PROC_LOCK(p->p_pptr); 2425 childproc_continued(p); 2426 PROC_UNLOCK(p->p_pptr); 2427 PROC_SLOCK(p); 2428 } 2429 if (action == SIG_DFL) { 2430 thread_unsuspend(p); 2431 PROC_SUNLOCK(p); 2432 sigqueue_delete(sigqueue, sig); 2433 goto out_cont; 2434 } 2435 if (action == SIG_CATCH) { 2436 /* 2437 * The process wants to catch it so it needs 2438 * to run at least one thread, but which one? 2439 */ 2440 PROC_SUNLOCK(p); 2441 goto runfast; 2442 } 2443 /* 2444 * The signal is not ignored or caught. 2445 */ 2446 thread_unsuspend(p); 2447 PROC_SUNLOCK(p); 2448 goto out_cont; 2449 } 2450 2451 if (prop & SIGPROP_STOP) { 2452 /* 2453 * If traced process is already stopped, 2454 * then no further action is necessary. 2455 */ 2456 if (p->p_flag & P_TRACED) 2457 goto out; 2458 /* 2459 * Already stopped, don't need to stop again 2460 * (If we did the shell could get confused). 2461 * Just make sure the signal STOP bit set. 2462 */ 2463 p->p_flag |= P_STOPPED_SIG; 2464 sigqueue_delete(sigqueue, sig); 2465 goto out; 2466 } 2467 2468 /* 2469 * All other kinds of signals: 2470 * If a thread is sleeping interruptibly, simulate a 2471 * wakeup so that when it is continued it will be made 2472 * runnable and can look at the signal. However, don't make 2473 * the PROCESS runnable, leave it stopped. 2474 * It may run a bit until it hits a thread_suspend_check(). 2475 */ 2476 PROC_SLOCK(p); 2477 thread_lock(td); 2478 if (TD_CAN_ABORT(td)) 2479 wakeup_swapper = sig_sleepq_abort(td, intrval); 2480 else 2481 thread_unlock(td); 2482 PROC_SUNLOCK(p); 2483 goto out; 2484 /* 2485 * Mutexes are short lived. Threads waiting on them will 2486 * hit thread_suspend_check() soon. 2487 */ 2488 } else if (p->p_state == PRS_NORMAL) { 2489 if (p->p_flag & P_TRACED || action == SIG_CATCH) { 2490 tdsigwakeup(td, sig, action, intrval); 2491 goto out; 2492 } 2493 2494 MPASS(action == SIG_DFL); 2495 2496 if (prop & SIGPROP_STOP) { 2497 if (p->p_flag & (P_PPWAIT|P_WEXIT)) 2498 goto out; 2499 p->p_flag |= P_STOPPED_SIG; 2500 p->p_xsig = sig; 2501 PROC_SLOCK(p); 2502 wakeup_swapper = sig_suspend_threads(td, p); 2503 if (p->p_numthreads == p->p_suspcount) { 2504 /* 2505 * only thread sending signal to another 2506 * process can reach here, if thread is sending 2507 * signal to its process, because thread does 2508 * not suspend itself here, p_numthreads 2509 * should never be equal to p_suspcount. 2510 */ 2511 thread_stopped(p); 2512 PROC_SUNLOCK(p); 2513 sigqueue_delete_proc(p, p->p_xsig); 2514 } else 2515 PROC_SUNLOCK(p); 2516 goto out; 2517 } 2518 } else { 2519 /* Not in "NORMAL" state. discard the signal. */ 2520 sigqueue_delete(sigqueue, sig); 2521 goto out; 2522 } 2523 2524 /* 2525 * The process is not stopped so we need to apply the signal to all the 2526 * running threads. 2527 */ 2528 runfast: 2529 tdsigwakeup(td, sig, action, intrval); 2530 PROC_SLOCK(p); 2531 thread_unsuspend(p); 2532 PROC_SUNLOCK(p); 2533 out_cont: 2534 itimer_proc_continue(p); 2535 kqtimer_proc_continue(p); 2536 out: 2537 /* If we jump here, proc slock should not be owned. */ 2538 PROC_SLOCK_ASSERT(p, MA_NOTOWNED); 2539 if (wakeup_swapper) 2540 kick_proc0(); 2541 2542 return (ret); 2543 } 2544 2545 /* 2546 * The force of a signal has been directed against a single 2547 * thread. We need to see what we can do about knocking it 2548 * out of any sleep it may be in etc. 2549 */ 2550 static void 2551 tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval) 2552 { 2553 struct proc *p = td->td_proc; 2554 int prop, wakeup_swapper; 2555 2556 PROC_LOCK_ASSERT(p, MA_OWNED); 2557 prop = sigprop(sig); 2558 2559 PROC_SLOCK(p); 2560 thread_lock(td); 2561 /* 2562 * Bring the priority of a thread up if we want it to get 2563 * killed in this lifetime. Be careful to avoid bumping the 2564 * priority of the idle thread, since we still allow to signal 2565 * kernel processes. 2566 */ 2567 if (action == SIG_DFL && (prop & SIGPROP_KILL) != 0 && 2568 td->td_priority > PUSER && !TD_IS_IDLETHREAD(td)) 2569 sched_prio(td, PUSER); 2570 if (TD_ON_SLEEPQ(td)) { 2571 /* 2572 * If thread is sleeping uninterruptibly 2573 * we can't interrupt the sleep... the signal will 2574 * be noticed when the process returns through 2575 * trap() or syscall(). 2576 */ 2577 if ((td->td_flags & TDF_SINTR) == 0) 2578 goto out; 2579 /* 2580 * If SIGCONT is default (or ignored) and process is 2581 * asleep, we are finished; the process should not 2582 * be awakened. 2583 */ 2584 if ((prop & SIGPROP_CONT) && action == SIG_DFL) { 2585 thread_unlock(td); 2586 PROC_SUNLOCK(p); 2587 sigqueue_delete(&p->p_sigqueue, sig); 2588 /* 2589 * It may be on either list in this state. 2590 * Remove from both for now. 2591 */ 2592 sigqueue_delete(&td->td_sigqueue, sig); 2593 return; 2594 } 2595 2596 /* 2597 * Don't awaken a sleeping thread for SIGSTOP if the 2598 * STOP signal is deferred. 2599 */ 2600 if ((prop & SIGPROP_STOP) != 0 && (td->td_flags & (TDF_SBDRY | 2601 TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY) 2602 goto out; 2603 2604 /* 2605 * Give low priority threads a better chance to run. 2606 */ 2607 if (td->td_priority > PUSER && !TD_IS_IDLETHREAD(td)) 2608 sched_prio(td, PUSER); 2609 2610 wakeup_swapper = sig_sleepq_abort(td, intrval); 2611 PROC_SUNLOCK(p); 2612 if (wakeup_swapper) 2613 kick_proc0(); 2614 return; 2615 } 2616 2617 /* 2618 * Other states do nothing with the signal immediately, 2619 * other than kicking ourselves if we are running. 2620 * It will either never be noticed, or noticed very soon. 2621 */ 2622 #ifdef SMP 2623 if (TD_IS_RUNNING(td) && td != curthread) 2624 forward_signal(td); 2625 #endif 2626 2627 out: 2628 PROC_SUNLOCK(p); 2629 thread_unlock(td); 2630 } 2631 2632 static void 2633 ptrace_coredump(struct thread *td) 2634 { 2635 struct proc *p; 2636 struct thr_coredump_req *tcq; 2637 void *rl_cookie; 2638 2639 MPASS(td == curthread); 2640 p = td->td_proc; 2641 PROC_LOCK_ASSERT(p, MA_OWNED); 2642 if ((td->td_dbgflags & TDB_COREDUMPRQ) == 0) 2643 return; 2644 KASSERT((p->p_flag & P_STOPPED_TRACE) != 0, ("not stopped")); 2645 2646 tcq = td->td_coredump; 2647 KASSERT(tcq != NULL, ("td_coredump is NULL")); 2648 2649 if (p->p_sysent->sv_coredump == NULL) { 2650 tcq->tc_error = ENOSYS; 2651 goto wake; 2652 } 2653 2654 PROC_UNLOCK(p); 2655 rl_cookie = vn_rangelock_wlock(tcq->tc_vp, 0, OFF_MAX); 2656 2657 tcq->tc_error = p->p_sysent->sv_coredump(td, tcq->tc_vp, 2658 tcq->tc_limit, tcq->tc_flags); 2659 2660 vn_rangelock_unlock(tcq->tc_vp, rl_cookie); 2661 PROC_LOCK(p); 2662 wake: 2663 td->td_dbgflags &= ~TDB_COREDUMPRQ; 2664 td->td_coredump = NULL; 2665 wakeup(p); 2666 } 2667 2668 static int 2669 sig_suspend_threads(struct thread *td, struct proc *p) 2670 { 2671 struct thread *td2; 2672 int wakeup_swapper; 2673 2674 PROC_LOCK_ASSERT(p, MA_OWNED); 2675 PROC_SLOCK_ASSERT(p, MA_OWNED); 2676 2677 wakeup_swapper = 0; 2678 FOREACH_THREAD_IN_PROC(p, td2) { 2679 thread_lock(td2); 2680 ast_sched_locked(td2, TDA_SUSPEND); 2681 if ((TD_IS_SLEEPING(td2) || TD_IS_SWAPPED(td2)) && 2682 (td2->td_flags & TDF_SINTR)) { 2683 if (td2->td_flags & TDF_SBDRY) { 2684 /* 2685 * Once a thread is asleep with 2686 * TDF_SBDRY and without TDF_SERESTART 2687 * or TDF_SEINTR set, it should never 2688 * become suspended due to this check. 2689 */ 2690 KASSERT(!TD_IS_SUSPENDED(td2), 2691 ("thread with deferred stops suspended")); 2692 if (TD_SBDRY_INTR(td2)) { 2693 wakeup_swapper |= sleepq_abort(td2, 2694 TD_SBDRY_ERRNO(td2)); 2695 continue; 2696 } 2697 } else if (!TD_IS_SUSPENDED(td2)) 2698 thread_suspend_one(td2); 2699 } else if (!TD_IS_SUSPENDED(td2)) { 2700 #ifdef SMP 2701 if (TD_IS_RUNNING(td2) && td2 != td) 2702 forward_signal(td2); 2703 #endif 2704 } 2705 thread_unlock(td2); 2706 } 2707 return (wakeup_swapper); 2708 } 2709 2710 /* 2711 * Stop the process for an event deemed interesting to the debugger. If si is 2712 * non-NULL, this is a signal exchange; the new signal requested by the 2713 * debugger will be returned for handling. If si is NULL, this is some other 2714 * type of interesting event. The debugger may request a signal be delivered in 2715 * that case as well, however it will be deferred until it can be handled. 2716 */ 2717 int 2718 ptracestop(struct thread *td, int sig, ksiginfo_t *si) 2719 { 2720 struct proc *p = td->td_proc; 2721 struct thread *td2; 2722 ksiginfo_t ksi; 2723 2724 PROC_LOCK_ASSERT(p, MA_OWNED); 2725 KASSERT(!(p->p_flag & P_WEXIT), ("Stopping exiting process")); 2726 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, 2727 &p->p_mtx.lock_object, "Stopping for traced signal"); 2728 2729 td->td_xsig = sig; 2730 2731 if (si == NULL || (si->ksi_flags & KSI_PTRACE) == 0) { 2732 td->td_dbgflags |= TDB_XSIG; 2733 CTR4(KTR_PTRACE, "ptracestop: tid %d (pid %d) flags %#x sig %d", 2734 td->td_tid, p->p_pid, td->td_dbgflags, sig); 2735 PROC_SLOCK(p); 2736 while ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_XSIG)) { 2737 if (P_KILLED(p)) { 2738 /* 2739 * Ensure that, if we've been PT_KILLed, the 2740 * exit status reflects that. Another thread 2741 * may also be in ptracestop(), having just 2742 * received the SIGKILL, but this thread was 2743 * unsuspended first. 2744 */ 2745 td->td_dbgflags &= ~TDB_XSIG; 2746 td->td_xsig = SIGKILL; 2747 p->p_ptevents = 0; 2748 break; 2749 } 2750 if (p->p_flag & P_SINGLE_EXIT && 2751 !(td->td_dbgflags & TDB_EXIT)) { 2752 /* 2753 * Ignore ptrace stops except for thread exit 2754 * events when the process exits. 2755 */ 2756 td->td_dbgflags &= ~TDB_XSIG; 2757 PROC_SUNLOCK(p); 2758 return (0); 2759 } 2760 2761 /* 2762 * Make wait(2) work. Ensure that right after the 2763 * attach, the thread which was decided to become the 2764 * leader of attach gets reported to the waiter. 2765 * Otherwise, just avoid overwriting another thread's 2766 * assignment to p_xthread. If another thread has 2767 * already set p_xthread, the current thread will get 2768 * a chance to report itself upon the next iteration. 2769 */ 2770 if ((td->td_dbgflags & TDB_FSTP) != 0 || 2771 ((p->p_flag2 & P2_PTRACE_FSTP) == 0 && 2772 p->p_xthread == NULL)) { 2773 p->p_xsig = sig; 2774 p->p_xthread = td; 2775 2776 /* 2777 * If we are on sleepqueue already, 2778 * let sleepqueue code decide if it 2779 * needs to go sleep after attach. 2780 */ 2781 if (td->td_wchan == NULL) 2782 td->td_dbgflags &= ~TDB_FSTP; 2783 2784 p->p_flag2 &= ~P2_PTRACE_FSTP; 2785 p->p_flag |= P_STOPPED_SIG | P_STOPPED_TRACE; 2786 sig_suspend_threads(td, p); 2787 } 2788 if ((td->td_dbgflags & TDB_STOPATFORK) != 0) { 2789 td->td_dbgflags &= ~TDB_STOPATFORK; 2790 } 2791 stopme: 2792 td->td_dbgflags |= TDB_SSWITCH; 2793 thread_suspend_switch(td, p); 2794 td->td_dbgflags &= ~TDB_SSWITCH; 2795 if ((td->td_dbgflags & TDB_COREDUMPRQ) != 0) { 2796 PROC_SUNLOCK(p); 2797 ptrace_coredump(td); 2798 PROC_SLOCK(p); 2799 goto stopme; 2800 } 2801 if (p->p_xthread == td) 2802 p->p_xthread = NULL; 2803 if (!(p->p_flag & P_TRACED)) 2804 break; 2805 if (td->td_dbgflags & TDB_SUSPEND) { 2806 if (p->p_flag & P_SINGLE_EXIT) 2807 break; 2808 goto stopme; 2809 } 2810 } 2811 PROC_SUNLOCK(p); 2812 } 2813 2814 if (si != NULL && sig == td->td_xsig) { 2815 /* Parent wants us to take the original signal unchanged. */ 2816 si->ksi_flags |= KSI_HEAD; 2817 if (sigqueue_add(&td->td_sigqueue, sig, si) != 0) 2818 si->ksi_signo = 0; 2819 } else if (td->td_xsig != 0) { 2820 /* 2821 * If parent wants us to take a new signal, then it will leave 2822 * it in td->td_xsig; otherwise we just look for signals again. 2823 */ 2824 ksiginfo_init(&ksi); 2825 ksi.ksi_signo = td->td_xsig; 2826 ksi.ksi_flags |= KSI_PTRACE; 2827 td2 = sigtd(p, td->td_xsig, false); 2828 tdsendsignal(p, td2, td->td_xsig, &ksi); 2829 if (td != td2) 2830 return (0); 2831 } 2832 2833 return (td->td_xsig); 2834 } 2835 2836 static void 2837 reschedule_signals(struct proc *p, sigset_t block, int flags) 2838 { 2839 struct sigacts *ps; 2840 struct thread *td; 2841 int sig; 2842 bool fastblk, pslocked; 2843 2844 PROC_LOCK_ASSERT(p, MA_OWNED); 2845 ps = p->p_sigacts; 2846 pslocked = (flags & SIGPROCMASK_PS_LOCKED) != 0; 2847 mtx_assert(&ps->ps_mtx, pslocked ? MA_OWNED : MA_NOTOWNED); 2848 if (SIGISEMPTY(p->p_siglist)) 2849 return; 2850 SIGSETAND(block, p->p_siglist); 2851 fastblk = (flags & SIGPROCMASK_FASTBLK) != 0; 2852 SIG_FOREACH(sig, &block) { 2853 td = sigtd(p, sig, fastblk); 2854 2855 /* 2856 * If sigtd() selected us despite sigfastblock is 2857 * blocking, do not activate AST or wake us, to avoid 2858 * loop in AST handler. 2859 */ 2860 if (fastblk && td == curthread) 2861 continue; 2862 2863 signotify(td); 2864 if (!pslocked) 2865 mtx_lock(&ps->ps_mtx); 2866 if (p->p_flag & P_TRACED || 2867 (SIGISMEMBER(ps->ps_sigcatch, sig) && 2868 !SIGISMEMBER(td->td_sigmask, sig))) { 2869 tdsigwakeup(td, sig, SIG_CATCH, 2870 (SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR : 2871 ERESTART)); 2872 } 2873 if (!pslocked) 2874 mtx_unlock(&ps->ps_mtx); 2875 } 2876 } 2877 2878 void 2879 tdsigcleanup(struct thread *td) 2880 { 2881 struct proc *p; 2882 sigset_t unblocked; 2883 2884 p = td->td_proc; 2885 PROC_LOCK_ASSERT(p, MA_OWNED); 2886 2887 sigqueue_flush(&td->td_sigqueue); 2888 if (p->p_numthreads == 1) 2889 return; 2890 2891 /* 2892 * Since we cannot handle signals, notify signal post code 2893 * about this by filling the sigmask. 2894 * 2895 * Also, if needed, wake up thread(s) that do not block the 2896 * same signals as the exiting thread, since the thread might 2897 * have been selected for delivery and woken up. 2898 */ 2899 SIGFILLSET(unblocked); 2900 SIGSETNAND(unblocked, td->td_sigmask); 2901 SIGFILLSET(td->td_sigmask); 2902 reschedule_signals(p, unblocked, 0); 2903 2904 } 2905 2906 static int 2907 sigdeferstop_curr_flags(int cflags) 2908 { 2909 2910 MPASS((cflags & (TDF_SEINTR | TDF_SERESTART)) == 0 || 2911 (cflags & TDF_SBDRY) != 0); 2912 return (cflags & (TDF_SBDRY | TDF_SEINTR | TDF_SERESTART)); 2913 } 2914 2915 /* 2916 * Defer the delivery of SIGSTOP for the current thread, according to 2917 * the requested mode. Returns previous flags, which must be restored 2918 * by sigallowstop(). 2919 * 2920 * TDF_SBDRY, TDF_SEINTR, and TDF_SERESTART flags are only set and 2921 * cleared by the current thread, which allow the lock-less read-only 2922 * accesses below. 2923 */ 2924 int 2925 sigdeferstop_impl(int mode) 2926 { 2927 struct thread *td; 2928 int cflags, nflags; 2929 2930 td = curthread; 2931 cflags = sigdeferstop_curr_flags(td->td_flags); 2932 switch (mode) { 2933 case SIGDEFERSTOP_NOP: 2934 nflags = cflags; 2935 break; 2936 case SIGDEFERSTOP_OFF: 2937 nflags = 0; 2938 break; 2939 case SIGDEFERSTOP_SILENT: 2940 nflags = (cflags | TDF_SBDRY) & ~(TDF_SEINTR | TDF_SERESTART); 2941 break; 2942 case SIGDEFERSTOP_EINTR: 2943 nflags = (cflags | TDF_SBDRY | TDF_SEINTR) & ~TDF_SERESTART; 2944 break; 2945 case SIGDEFERSTOP_ERESTART: 2946 nflags = (cflags | TDF_SBDRY | TDF_SERESTART) & ~TDF_SEINTR; 2947 break; 2948 default: 2949 panic("sigdeferstop: invalid mode %x", mode); 2950 break; 2951 } 2952 if (cflags == nflags) 2953 return (SIGDEFERSTOP_VAL_NCHG); 2954 thread_lock(td); 2955 td->td_flags = (td->td_flags & ~cflags) | nflags; 2956 thread_unlock(td); 2957 return (cflags); 2958 } 2959 2960 /* 2961 * Restores the STOP handling mode, typically permitting the delivery 2962 * of SIGSTOP for the current thread. This does not immediately 2963 * suspend if a stop was posted. Instead, the thread will suspend 2964 * either via ast() or a subsequent interruptible sleep. 2965 */ 2966 void 2967 sigallowstop_impl(int prev) 2968 { 2969 struct thread *td; 2970 int cflags; 2971 2972 KASSERT(prev != SIGDEFERSTOP_VAL_NCHG, ("failed sigallowstop")); 2973 KASSERT((prev & ~(TDF_SBDRY | TDF_SEINTR | TDF_SERESTART)) == 0, 2974 ("sigallowstop: incorrect previous mode %x", prev)); 2975 td = curthread; 2976 cflags = sigdeferstop_curr_flags(td->td_flags); 2977 if (cflags != prev) { 2978 thread_lock(td); 2979 td->td_flags = (td->td_flags & ~cflags) | prev; 2980 thread_unlock(td); 2981 } 2982 } 2983 2984 enum sigstatus { 2985 SIGSTATUS_HANDLE, 2986 SIGSTATUS_HANDLED, 2987 SIGSTATUS_IGNORE, 2988 SIGSTATUS_SBDRY_STOP, 2989 }; 2990 2991 /* 2992 * The thread has signal "sig" pending. Figure out what to do with it: 2993 * 2994 * _HANDLE -> the caller should handle the signal 2995 * _HANDLED -> handled internally, reload pending signal set 2996 * _IGNORE -> ignored, remove from the set of pending signals and try the 2997 * next pending signal 2998 * _SBDRY_STOP -> the signal should stop the thread but this is not 2999 * permitted in the current context 3000 */ 3001 static enum sigstatus 3002 sigprocess(struct thread *td, int sig) 3003 { 3004 struct proc *p; 3005 struct sigacts *ps; 3006 struct sigqueue *queue; 3007 ksiginfo_t ksi; 3008 int prop; 3009 3010 KASSERT(_SIG_VALID(sig), ("%s: invalid signal %d", __func__, sig)); 3011 3012 p = td->td_proc; 3013 ps = p->p_sigacts; 3014 mtx_assert(&ps->ps_mtx, MA_OWNED); 3015 PROC_LOCK_ASSERT(p, MA_OWNED); 3016 3017 /* 3018 * We should allow pending but ignored signals below 3019 * if there is sigwait() active, or P_TRACED was 3020 * on when they were posted. 3021 */ 3022 if (SIGISMEMBER(ps->ps_sigignore, sig) && 3023 (p->p_flag & P_TRACED) == 0 && 3024 (td->td_flags & TDF_SIGWAIT) == 0) { 3025 return (SIGSTATUS_IGNORE); 3026 } 3027 3028 /* 3029 * If the process is going to single-thread mode to prepare 3030 * for exit, there is no sense in delivering any signal 3031 * to usermode. Another important consequence is that 3032 * msleep(..., PCATCH, ...) now is only interruptible by a 3033 * suspend request. 3034 */ 3035 if ((p->p_flag2 & P2_WEXIT) != 0) 3036 return (SIGSTATUS_IGNORE); 3037 3038 if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED) { 3039 /* 3040 * If traced, always stop. 3041 * Remove old signal from queue before the stop. 3042 * XXX shrug off debugger, it causes siginfo to 3043 * be thrown away. 3044 */ 3045 queue = &td->td_sigqueue; 3046 ksiginfo_init(&ksi); 3047 if (sigqueue_get(queue, sig, &ksi) == 0) { 3048 queue = &p->p_sigqueue; 3049 sigqueue_get(queue, sig, &ksi); 3050 } 3051 td->td_si = ksi.ksi_info; 3052 3053 mtx_unlock(&ps->ps_mtx); 3054 sig = ptracestop(td, sig, &ksi); 3055 mtx_lock(&ps->ps_mtx); 3056 3057 td->td_si.si_signo = 0; 3058 3059 /* 3060 * Keep looking if the debugger discarded or 3061 * replaced the signal. 3062 */ 3063 if (sig == 0) 3064 return (SIGSTATUS_HANDLED); 3065 3066 /* 3067 * If the signal became masked, re-queue it. 3068 */ 3069 if (SIGISMEMBER(td->td_sigmask, sig)) { 3070 ksi.ksi_flags |= KSI_HEAD; 3071 sigqueue_add(&p->p_sigqueue, sig, &ksi); 3072 return (SIGSTATUS_HANDLED); 3073 } 3074 3075 /* 3076 * If the traced bit got turned off, requeue the signal and 3077 * reload the set of pending signals. This ensures that p_sig* 3078 * and p_sigact are consistent. 3079 */ 3080 if ((p->p_flag & P_TRACED) == 0) { 3081 if ((ksi.ksi_flags & KSI_PTRACE) == 0) { 3082 ksi.ksi_flags |= KSI_HEAD; 3083 sigqueue_add(queue, sig, &ksi); 3084 } 3085 return (SIGSTATUS_HANDLED); 3086 } 3087 } 3088 3089 /* 3090 * Decide whether the signal should be returned. 3091 * Return the signal's number, or fall through 3092 * to clear it from the pending mask. 3093 */ 3094 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) { 3095 case (intptr_t)SIG_DFL: 3096 /* 3097 * Don't take default actions on system processes. 3098 */ 3099 if (p->p_pid <= 1) { 3100 #ifdef DIAGNOSTIC 3101 /* 3102 * Are you sure you want to ignore SIGSEGV 3103 * in init? XXX 3104 */ 3105 printf("Process (pid %lu) got signal %d\n", 3106 (u_long)p->p_pid, sig); 3107 #endif 3108 return (SIGSTATUS_IGNORE); 3109 } 3110 3111 /* 3112 * If there is a pending stop signal to process with 3113 * default action, stop here, then clear the signal. 3114 * Traced or exiting processes should ignore stops. 3115 * Additionally, a member of an orphaned process group 3116 * should ignore tty stops. 3117 */ 3118 prop = sigprop(sig); 3119 if (prop & SIGPROP_STOP) { 3120 mtx_unlock(&ps->ps_mtx); 3121 if ((p->p_flag & (P_TRACED | P_WEXIT | 3122 P_SINGLE_EXIT)) != 0 || ((p->p_pgrp-> 3123 pg_flags & PGRP_ORPHANED) != 0 && 3124 (prop & SIGPROP_TTYSTOP) != 0)) { 3125 mtx_lock(&ps->ps_mtx); 3126 return (SIGSTATUS_IGNORE); 3127 } 3128 if (TD_SBDRY_INTR(td)) { 3129 KASSERT((td->td_flags & TDF_SBDRY) != 0, 3130 ("lost TDF_SBDRY")); 3131 mtx_lock(&ps->ps_mtx); 3132 return (SIGSTATUS_SBDRY_STOP); 3133 } 3134 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, 3135 &p->p_mtx.lock_object, "Catching SIGSTOP"); 3136 sigqueue_delete(&td->td_sigqueue, sig); 3137 sigqueue_delete(&p->p_sigqueue, sig); 3138 p->p_flag |= P_STOPPED_SIG; 3139 p->p_xsig = sig; 3140 PROC_SLOCK(p); 3141 sig_suspend_threads(td, p); 3142 thread_suspend_switch(td, p); 3143 PROC_SUNLOCK(p); 3144 mtx_lock(&ps->ps_mtx); 3145 return (SIGSTATUS_HANDLED); 3146 } else if ((prop & SIGPROP_IGNORE) != 0 && 3147 (td->td_flags & TDF_SIGWAIT) == 0) { 3148 /* 3149 * Default action is to ignore; drop it if 3150 * not in kern_sigtimedwait(). 3151 */ 3152 return (SIGSTATUS_IGNORE); 3153 } else { 3154 return (SIGSTATUS_HANDLE); 3155 } 3156 3157 case (intptr_t)SIG_IGN: 3158 if ((td->td_flags & TDF_SIGWAIT) == 0) 3159 return (SIGSTATUS_IGNORE); 3160 else 3161 return (SIGSTATUS_HANDLE); 3162 3163 default: 3164 /* 3165 * This signal has an action, let postsig() process it. 3166 */ 3167 return (SIGSTATUS_HANDLE); 3168 } 3169 } 3170 3171 /* 3172 * If the current process has received a signal (should be caught or cause 3173 * termination, should interrupt current syscall), return the signal number. 3174 * Stop signals with default action are processed immediately, then cleared; 3175 * they aren't returned. This is checked after each entry to the system for 3176 * a syscall or trap (though this can usually be done without calling 3177 * issignal by checking the pending signal masks in cursig.) The normal call 3178 * sequence is 3179 * 3180 * while (sig = cursig(curthread)) 3181 * postsig(sig); 3182 */ 3183 static int 3184 issignal(struct thread *td) 3185 { 3186 struct proc *p; 3187 sigset_t sigpending; 3188 int sig; 3189 3190 p = td->td_proc; 3191 PROC_LOCK_ASSERT(p, MA_OWNED); 3192 3193 for (;;) { 3194 sigpending = td->td_sigqueue.sq_signals; 3195 SIGSETOR(sigpending, p->p_sigqueue.sq_signals); 3196 SIGSETNAND(sigpending, td->td_sigmask); 3197 3198 if ((p->p_flag & P_PPWAIT) != 0 || (td->td_flags & 3199 (TDF_SBDRY | TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY) 3200 SIG_STOPSIGMASK(sigpending); 3201 if (SIGISEMPTY(sigpending)) /* no signal to send */ 3202 return (0); 3203 3204 /* 3205 * Do fast sigblock if requested by usermode. Since 3206 * we do know that there was a signal pending at this 3207 * point, set the FAST_SIGBLOCK_PEND as indicator for 3208 * usermode to perform a dummy call to 3209 * FAST_SIGBLOCK_UNBLOCK, which causes immediate 3210 * delivery of postponed pending signal. 3211 */ 3212 if ((td->td_pflags & TDP_SIGFASTBLOCK) != 0) { 3213 if (td->td_sigblock_val != 0) 3214 SIGSETNAND(sigpending, fastblock_mask); 3215 if (SIGISEMPTY(sigpending)) { 3216 td->td_pflags |= TDP_SIGFASTPENDING; 3217 return (0); 3218 } 3219 } 3220 3221 if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED && 3222 (p->p_flag2 & P2_PTRACE_FSTP) != 0 && 3223 SIGISMEMBER(sigpending, SIGSTOP)) { 3224 /* 3225 * If debugger just attached, always consume 3226 * SIGSTOP from ptrace(PT_ATTACH) first, to 3227 * execute the debugger attach ritual in 3228 * order. 3229 */ 3230 td->td_dbgflags |= TDB_FSTP; 3231 SIGEMPTYSET(sigpending); 3232 SIGADDSET(sigpending, SIGSTOP); 3233 } 3234 3235 SIG_FOREACH(sig, &sigpending) { 3236 switch (sigprocess(td, sig)) { 3237 case SIGSTATUS_HANDLE: 3238 return (sig); 3239 case SIGSTATUS_HANDLED: 3240 goto next; 3241 case SIGSTATUS_IGNORE: 3242 sigqueue_delete(&td->td_sigqueue, sig); 3243 sigqueue_delete(&p->p_sigqueue, sig); 3244 break; 3245 case SIGSTATUS_SBDRY_STOP: 3246 return (-1); 3247 } 3248 } 3249 next:; 3250 } 3251 } 3252 3253 void 3254 thread_stopped(struct proc *p) 3255 { 3256 int n; 3257 3258 PROC_LOCK_ASSERT(p, MA_OWNED); 3259 PROC_SLOCK_ASSERT(p, MA_OWNED); 3260 n = p->p_suspcount; 3261 if (p == curproc) 3262 n++; 3263 if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) { 3264 PROC_SUNLOCK(p); 3265 p->p_flag &= ~P_WAITED; 3266 PROC_LOCK(p->p_pptr); 3267 childproc_stopped(p, (p->p_flag & P_TRACED) ? 3268 CLD_TRAPPED : CLD_STOPPED); 3269 PROC_UNLOCK(p->p_pptr); 3270 PROC_SLOCK(p); 3271 } 3272 } 3273 3274 /* 3275 * Take the action for the specified signal 3276 * from the current set of pending signals. 3277 */ 3278 int 3279 postsig(int sig) 3280 { 3281 struct thread *td; 3282 struct proc *p; 3283 struct sigacts *ps; 3284 sig_t action; 3285 ksiginfo_t ksi; 3286 sigset_t returnmask; 3287 3288 KASSERT(sig != 0, ("postsig")); 3289 3290 td = curthread; 3291 p = td->td_proc; 3292 PROC_LOCK_ASSERT(p, MA_OWNED); 3293 ps = p->p_sigacts; 3294 mtx_assert(&ps->ps_mtx, MA_OWNED); 3295 ksiginfo_init(&ksi); 3296 if (sigqueue_get(&td->td_sigqueue, sig, &ksi) == 0 && 3297 sigqueue_get(&p->p_sigqueue, sig, &ksi) == 0) 3298 return (0); 3299 ksi.ksi_signo = sig; 3300 if (ksi.ksi_code == SI_TIMER) 3301 itimer_accept(p, ksi.ksi_timerid, &ksi); 3302 action = ps->ps_sigact[_SIG_IDX(sig)]; 3303 #ifdef KTRACE 3304 if (KTRPOINT(td, KTR_PSIG)) 3305 ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ? 3306 &td->td_oldsigmask : &td->td_sigmask, ksi.ksi_code); 3307 #endif 3308 3309 if (action == SIG_DFL) { 3310 /* 3311 * Default action, where the default is to kill 3312 * the process. (Other cases were ignored above.) 3313 */ 3314 mtx_unlock(&ps->ps_mtx); 3315 proc_td_siginfo_capture(td, &ksi.ksi_info); 3316 sigexit(td, sig); 3317 /* NOTREACHED */ 3318 } else { 3319 /* 3320 * If we get here, the signal must be caught. 3321 */ 3322 KASSERT(action != SIG_IGN, ("postsig action %p", action)); 3323 KASSERT(!SIGISMEMBER(td->td_sigmask, sig), 3324 ("postsig action: blocked sig %d", sig)); 3325 3326 /* 3327 * Set the new mask value and also defer further 3328 * occurrences of this signal. 3329 * 3330 * Special case: user has done a sigsuspend. Here the 3331 * current mask is not of interest, but rather the 3332 * mask from before the sigsuspend is what we want 3333 * restored after the signal processing is completed. 3334 */ 3335 if (td->td_pflags & TDP_OLDMASK) { 3336 returnmask = td->td_oldsigmask; 3337 td->td_pflags &= ~TDP_OLDMASK; 3338 } else 3339 returnmask = td->td_sigmask; 3340 3341 if (p->p_sig == sig) { 3342 p->p_sig = 0; 3343 } 3344 (*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask); 3345 postsig_done(sig, td, ps); 3346 } 3347 return (1); 3348 } 3349 3350 int 3351 sig_ast_checksusp(struct thread *td) 3352 { 3353 struct proc *p __diagused; 3354 int ret; 3355 3356 p = td->td_proc; 3357 PROC_LOCK_ASSERT(p, MA_OWNED); 3358 3359 if (!td_ast_pending(td, TDA_SUSPEND)) 3360 return (0); 3361 3362 ret = thread_suspend_check(1); 3363 MPASS(ret == 0 || ret == EINTR || ret == ERESTART); 3364 return (ret); 3365 } 3366 3367 int 3368 sig_ast_needsigchk(struct thread *td) 3369 { 3370 struct proc *p; 3371 struct sigacts *ps; 3372 int ret, sig; 3373 3374 p = td->td_proc; 3375 PROC_LOCK_ASSERT(p, MA_OWNED); 3376 3377 if (!td_ast_pending(td, TDA_SIG)) 3378 return (0); 3379 3380 ps = p->p_sigacts; 3381 mtx_lock(&ps->ps_mtx); 3382 sig = cursig(td); 3383 if (sig == -1) { 3384 mtx_unlock(&ps->ps_mtx); 3385 KASSERT((td->td_flags & TDF_SBDRY) != 0, ("lost TDF_SBDRY")); 3386 KASSERT(TD_SBDRY_INTR(td), 3387 ("lost TDF_SERESTART of TDF_SEINTR")); 3388 KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) != 3389 (TDF_SEINTR | TDF_SERESTART), 3390 ("both TDF_SEINTR and TDF_SERESTART")); 3391 ret = TD_SBDRY_ERRNO(td); 3392 } else if (sig != 0) { 3393 ret = SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR : ERESTART; 3394 mtx_unlock(&ps->ps_mtx); 3395 } else { 3396 mtx_unlock(&ps->ps_mtx); 3397 ret = 0; 3398 } 3399 3400 /* 3401 * Do not go into sleep if this thread was the ptrace(2) 3402 * attach leader. cursig() consumed SIGSTOP from PT_ATTACH, 3403 * but we usually act on the signal by interrupting sleep, and 3404 * should do that here as well. 3405 */ 3406 if ((td->td_dbgflags & TDB_FSTP) != 0) { 3407 if (ret == 0) 3408 ret = EINTR; 3409 td->td_dbgflags &= ~TDB_FSTP; 3410 } 3411 3412 return (ret); 3413 } 3414 3415 int 3416 sig_intr(void) 3417 { 3418 struct thread *td; 3419 struct proc *p; 3420 int ret; 3421 3422 td = curthread; 3423 if (!td_ast_pending(td, TDA_SIG) && !td_ast_pending(td, TDA_SUSPEND)) 3424 return (0); 3425 3426 p = td->td_proc; 3427 3428 PROC_LOCK(p); 3429 ret = sig_ast_checksusp(td); 3430 if (ret == 0) 3431 ret = sig_ast_needsigchk(td); 3432 PROC_UNLOCK(p); 3433 return (ret); 3434 } 3435 3436 bool 3437 curproc_sigkilled(void) 3438 { 3439 struct thread *td; 3440 struct proc *p; 3441 struct sigacts *ps; 3442 bool res; 3443 3444 td = curthread; 3445 if (!td_ast_pending(td, TDA_SIG)) 3446 return (false); 3447 3448 p = td->td_proc; 3449 PROC_LOCK(p); 3450 ps = p->p_sigacts; 3451 mtx_lock(&ps->ps_mtx); 3452 res = SIGISMEMBER(td->td_sigqueue.sq_signals, SIGKILL) || 3453 SIGISMEMBER(p->p_sigqueue.sq_signals, SIGKILL); 3454 mtx_unlock(&ps->ps_mtx); 3455 PROC_UNLOCK(p); 3456 return (res); 3457 } 3458 3459 void 3460 proc_wkilled(struct proc *p) 3461 { 3462 3463 PROC_LOCK_ASSERT(p, MA_OWNED); 3464 if ((p->p_flag & P_WKILLED) == 0) { 3465 p->p_flag |= P_WKILLED; 3466 /* 3467 * Notify swapper that there is a process to swap in. 3468 * The notification is racy, at worst it would take 10 3469 * seconds for the swapper process to notice. 3470 */ 3471 if ((p->p_flag & (P_INMEM | P_SWAPPINGIN)) == 0) 3472 wakeup(&proc0); 3473 } 3474 } 3475 3476 /* 3477 * Kill the current process for stated reason. 3478 */ 3479 void 3480 killproc(struct proc *p, const char *why) 3481 { 3482 3483 PROC_LOCK_ASSERT(p, MA_OWNED); 3484 CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)", p, p->p_pid, 3485 p->p_comm); 3486 log(LOG_ERR, "pid %d (%s), jid %d, uid %d, was killed: %s\n", 3487 p->p_pid, p->p_comm, p->p_ucred->cr_prison->pr_id, 3488 p->p_ucred->cr_uid, why); 3489 proc_wkilled(p); 3490 kern_psignal(p, SIGKILL); 3491 } 3492 3493 /* 3494 * Force the current process to exit with the specified signal, dumping core 3495 * if appropriate. We bypass the normal tests for masked and caught signals, 3496 * allowing unrecoverable failures to terminate the process without changing 3497 * signal state. Mark the accounting record with the signal termination. 3498 * If dumping core, save the signal number for the debugger. Calls exit and 3499 * does not return. 3500 */ 3501 void 3502 sigexit(struct thread *td, int sig) 3503 { 3504 struct proc *p = td->td_proc; 3505 3506 PROC_LOCK_ASSERT(p, MA_OWNED); 3507 proc_set_p2_wexit(p); 3508 3509 p->p_acflag |= AXSIG; 3510 /* 3511 * We must be single-threading to generate a core dump. This 3512 * ensures that the registers in the core file are up-to-date. 3513 * Also, the ELF dump handler assumes that the thread list doesn't 3514 * change out from under it. 3515 * 3516 * XXX If another thread attempts to single-thread before us 3517 * (e.g. via fork()), we won't get a dump at all. 3518 */ 3519 if ((sigprop(sig) & SIGPROP_CORE) && 3520 thread_single(p, SINGLE_NO_EXIT) == 0) { 3521 p->p_sig = sig; 3522 /* 3523 * Log signals which would cause core dumps 3524 * (Log as LOG_INFO to appease those who don't want 3525 * these messages.) 3526 * XXX : Todo, as well as euid, write out ruid too 3527 * Note that coredump() drops proc lock. 3528 */ 3529 if (coredump(td) == 0) 3530 sig |= WCOREFLAG; 3531 if (kern_logsigexit) 3532 log(LOG_INFO, 3533 "pid %d (%s), jid %d, uid %d: exited on " 3534 "signal %d%s\n", p->p_pid, p->p_comm, 3535 p->p_ucred->cr_prison->pr_id, 3536 td->td_ucred->cr_uid, 3537 sig &~ WCOREFLAG, 3538 sig & WCOREFLAG ? " (core dumped)" : ""); 3539 } else 3540 PROC_UNLOCK(p); 3541 exit1(td, 0, sig); 3542 /* NOTREACHED */ 3543 } 3544 3545 /* 3546 * Send queued SIGCHLD to parent when child process's state 3547 * is changed. 3548 */ 3549 static void 3550 sigparent(struct proc *p, int reason, int status) 3551 { 3552 PROC_LOCK_ASSERT(p, MA_OWNED); 3553 PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED); 3554 3555 if (p->p_ksi != NULL) { 3556 p->p_ksi->ksi_signo = SIGCHLD; 3557 p->p_ksi->ksi_code = reason; 3558 p->p_ksi->ksi_status = status; 3559 p->p_ksi->ksi_pid = p->p_pid; 3560 p->p_ksi->ksi_uid = p->p_ucred->cr_ruid; 3561 if (KSI_ONQ(p->p_ksi)) 3562 return; 3563 } 3564 pksignal(p->p_pptr, SIGCHLD, p->p_ksi); 3565 } 3566 3567 static void 3568 childproc_jobstate(struct proc *p, int reason, int sig) 3569 { 3570 struct sigacts *ps; 3571 3572 PROC_LOCK_ASSERT(p, MA_OWNED); 3573 PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED); 3574 3575 /* 3576 * Wake up parent sleeping in kern_wait(), also send 3577 * SIGCHLD to parent, but SIGCHLD does not guarantee 3578 * that parent will awake, because parent may masked 3579 * the signal. 3580 */ 3581 p->p_pptr->p_flag |= P_STATCHILD; 3582 wakeup(p->p_pptr); 3583 3584 ps = p->p_pptr->p_sigacts; 3585 mtx_lock(&ps->ps_mtx); 3586 if ((ps->ps_flag & PS_NOCLDSTOP) == 0) { 3587 mtx_unlock(&ps->ps_mtx); 3588 sigparent(p, reason, sig); 3589 } else 3590 mtx_unlock(&ps->ps_mtx); 3591 } 3592 3593 void 3594 childproc_stopped(struct proc *p, int reason) 3595 { 3596 3597 childproc_jobstate(p, reason, p->p_xsig); 3598 } 3599 3600 void 3601 childproc_continued(struct proc *p) 3602 { 3603 childproc_jobstate(p, CLD_CONTINUED, SIGCONT); 3604 } 3605 3606 void 3607 childproc_exited(struct proc *p) 3608 { 3609 int reason, status; 3610 3611 if (WCOREDUMP(p->p_xsig)) { 3612 reason = CLD_DUMPED; 3613 status = WTERMSIG(p->p_xsig); 3614 } else if (WIFSIGNALED(p->p_xsig)) { 3615 reason = CLD_KILLED; 3616 status = WTERMSIG(p->p_xsig); 3617 } else { 3618 reason = CLD_EXITED; 3619 status = p->p_xexit; 3620 } 3621 /* 3622 * XXX avoid calling wakeup(p->p_pptr), the work is 3623 * done in exit1(). 3624 */ 3625 sigparent(p, reason, status); 3626 } 3627 3628 #define MAX_NUM_CORE_FILES 100000 3629 #ifndef NUM_CORE_FILES 3630 #define NUM_CORE_FILES 5 3631 #endif 3632 CTASSERT(NUM_CORE_FILES >= 0 && NUM_CORE_FILES <= MAX_NUM_CORE_FILES); 3633 static int num_cores = NUM_CORE_FILES; 3634 3635 static int 3636 sysctl_debug_num_cores_check (SYSCTL_HANDLER_ARGS) 3637 { 3638 int error; 3639 int new_val; 3640 3641 new_val = num_cores; 3642 error = sysctl_handle_int(oidp, &new_val, 0, req); 3643 if (error != 0 || req->newptr == NULL) 3644 return (error); 3645 if (new_val > MAX_NUM_CORE_FILES) 3646 new_val = MAX_NUM_CORE_FILES; 3647 if (new_val < 0) 3648 new_val = 0; 3649 num_cores = new_val; 3650 return (0); 3651 } 3652 SYSCTL_PROC(_debug, OID_AUTO, ncores, 3653 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, sizeof(int), 3654 sysctl_debug_num_cores_check, "I", 3655 "Maximum number of generated process corefiles while using index format"); 3656 3657 #define GZIP_SUFFIX ".gz" 3658 #define ZSTD_SUFFIX ".zst" 3659 3660 int compress_user_cores = 0; 3661 3662 static int 3663 sysctl_compress_user_cores(SYSCTL_HANDLER_ARGS) 3664 { 3665 int error, val; 3666 3667 val = compress_user_cores; 3668 error = sysctl_handle_int(oidp, &val, 0, req); 3669 if (error != 0 || req->newptr == NULL) 3670 return (error); 3671 if (val != 0 && !compressor_avail(val)) 3672 return (EINVAL); 3673 compress_user_cores = val; 3674 return (error); 3675 } 3676 SYSCTL_PROC(_kern, OID_AUTO, compress_user_cores, 3677 CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, 0, sizeof(int), 3678 sysctl_compress_user_cores, "I", 3679 "Enable compression of user corefiles (" 3680 __XSTRING(COMPRESS_GZIP) " = gzip, " 3681 __XSTRING(COMPRESS_ZSTD) " = zstd)"); 3682 3683 int compress_user_cores_level = 6; 3684 SYSCTL_INT(_kern, OID_AUTO, compress_user_cores_level, CTLFLAG_RWTUN, 3685 &compress_user_cores_level, 0, 3686 "Corefile compression level"); 3687 3688 /* 3689 * Protect the access to corefilename[] by allproc_lock. 3690 */ 3691 #define corefilename_lock allproc_lock 3692 3693 static char corefilename[MAXPATHLEN] = {"%N.core"}; 3694 TUNABLE_STR("kern.corefile", corefilename, sizeof(corefilename)); 3695 3696 static int 3697 sysctl_kern_corefile(SYSCTL_HANDLER_ARGS) 3698 { 3699 int error; 3700 3701 sx_xlock(&corefilename_lock); 3702 error = sysctl_handle_string(oidp, corefilename, sizeof(corefilename), 3703 req); 3704 sx_xunlock(&corefilename_lock); 3705 3706 return (error); 3707 } 3708 SYSCTL_PROC(_kern, OID_AUTO, corefile, CTLTYPE_STRING | CTLFLAG_RW | 3709 CTLFLAG_MPSAFE, 0, 0, sysctl_kern_corefile, "A", 3710 "Process corefile name format string"); 3711 3712 static void 3713 vnode_close_locked(struct thread *td, struct vnode *vp) 3714 { 3715 3716 VOP_UNLOCK(vp); 3717 vn_close(vp, FWRITE, td->td_ucred, td); 3718 } 3719 3720 /* 3721 * If the core format has a %I in it, then we need to check 3722 * for existing corefiles before defining a name. 3723 * To do this we iterate over 0..ncores to find a 3724 * non-existing core file name to use. If all core files are 3725 * already used we choose the oldest one. 3726 */ 3727 static int 3728 corefile_open_last(struct thread *td, char *name, int indexpos, 3729 int indexlen, int ncores, struct vnode **vpp) 3730 { 3731 struct vnode *oldvp, *nextvp, *vp; 3732 struct vattr vattr; 3733 struct nameidata nd; 3734 int error, i, flags, oflags, cmode; 3735 char ch; 3736 struct timespec lasttime; 3737 3738 nextvp = oldvp = NULL; 3739 cmode = S_IRUSR | S_IWUSR; 3740 oflags = VN_OPEN_NOAUDIT | VN_OPEN_NAMECACHE | 3741 (capmode_coredump ? VN_OPEN_NOCAPCHECK : 0); 3742 3743 for (i = 0; i < ncores; i++) { 3744 flags = O_CREAT | FWRITE | O_NOFOLLOW; 3745 3746 ch = name[indexpos + indexlen]; 3747 (void)snprintf(name + indexpos, indexlen + 1, "%.*u", indexlen, 3748 i); 3749 name[indexpos + indexlen] = ch; 3750 3751 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name); 3752 error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred, 3753 NULL); 3754 if (error != 0) 3755 break; 3756 3757 vp = nd.ni_vp; 3758 NDFREE_PNBUF(&nd); 3759 if ((flags & O_CREAT) == O_CREAT) { 3760 nextvp = vp; 3761 break; 3762 } 3763 3764 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 3765 if (error != 0) { 3766 vnode_close_locked(td, vp); 3767 break; 3768 } 3769 3770 if (oldvp == NULL || 3771 lasttime.tv_sec > vattr.va_mtime.tv_sec || 3772 (lasttime.tv_sec == vattr.va_mtime.tv_sec && 3773 lasttime.tv_nsec >= vattr.va_mtime.tv_nsec)) { 3774 if (oldvp != NULL) 3775 vn_close(oldvp, FWRITE, td->td_ucred, td); 3776 oldvp = vp; 3777 VOP_UNLOCK(oldvp); 3778 lasttime = vattr.va_mtime; 3779 } else { 3780 vnode_close_locked(td, vp); 3781 } 3782 } 3783 3784 if (oldvp != NULL) { 3785 if (nextvp == NULL) { 3786 if ((td->td_proc->p_flag & P_SUGID) != 0) { 3787 error = EFAULT; 3788 vn_close(oldvp, FWRITE, td->td_ucred, td); 3789 } else { 3790 nextvp = oldvp; 3791 error = vn_lock(nextvp, LK_EXCLUSIVE); 3792 if (error != 0) { 3793 vn_close(nextvp, FWRITE, td->td_ucred, 3794 td); 3795 nextvp = NULL; 3796 } 3797 } 3798 } else { 3799 vn_close(oldvp, FWRITE, td->td_ucred, td); 3800 } 3801 } 3802 if (error != 0) { 3803 if (nextvp != NULL) 3804 vnode_close_locked(td, oldvp); 3805 } else { 3806 *vpp = nextvp; 3807 } 3808 3809 return (error); 3810 } 3811 3812 /* 3813 * corefile_open(comm, uid, pid, td, compress, vpp, namep) 3814 * Expand the name described in corefilename, using name, uid, and pid 3815 * and open/create core file. 3816 * corefilename is a printf-like string, with three format specifiers: 3817 * %N name of process ("name") 3818 * %P process id (pid) 3819 * %U user id (uid) 3820 * For example, "%N.core" is the default; they can be disabled completely 3821 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P". 3822 * This is controlled by the sysctl variable kern.corefile (see above). 3823 */ 3824 static int 3825 corefile_open(const char *comm, uid_t uid, pid_t pid, struct thread *td, 3826 int compress, int signum, struct vnode **vpp, char **namep) 3827 { 3828 struct sbuf sb; 3829 struct nameidata nd; 3830 const char *format; 3831 char *hostname, *name; 3832 int cmode, error, flags, i, indexpos, indexlen, oflags, ncores; 3833 3834 hostname = NULL; 3835 format = corefilename; 3836 name = malloc(MAXPATHLEN, M_TEMP, M_WAITOK | M_ZERO); 3837 indexlen = 0; 3838 indexpos = -1; 3839 ncores = num_cores; 3840 (void)sbuf_new(&sb, name, MAXPATHLEN, SBUF_FIXEDLEN); 3841 sx_slock(&corefilename_lock); 3842 for (i = 0; format[i] != '\0'; i++) { 3843 switch (format[i]) { 3844 case '%': /* Format character */ 3845 i++; 3846 switch (format[i]) { 3847 case '%': 3848 sbuf_putc(&sb, '%'); 3849 break; 3850 case 'H': /* hostname */ 3851 if (hostname == NULL) { 3852 hostname = malloc(MAXHOSTNAMELEN, 3853 M_TEMP, M_WAITOK); 3854 } 3855 getcredhostname(td->td_ucred, hostname, 3856 MAXHOSTNAMELEN); 3857 sbuf_printf(&sb, "%s", hostname); 3858 break; 3859 case 'I': /* autoincrementing index */ 3860 if (indexpos != -1) { 3861 sbuf_printf(&sb, "%%I"); 3862 break; 3863 } 3864 3865 indexpos = sbuf_len(&sb); 3866 sbuf_printf(&sb, "%u", ncores - 1); 3867 indexlen = sbuf_len(&sb) - indexpos; 3868 break; 3869 case 'N': /* process name */ 3870 sbuf_printf(&sb, "%s", comm); 3871 break; 3872 case 'P': /* process id */ 3873 sbuf_printf(&sb, "%u", pid); 3874 break; 3875 case 'S': /* signal number */ 3876 sbuf_printf(&sb, "%i", signum); 3877 break; 3878 case 'U': /* user id */ 3879 sbuf_printf(&sb, "%u", uid); 3880 break; 3881 default: 3882 log(LOG_ERR, 3883 "Unknown format character %c in " 3884 "corename `%s'\n", format[i], format); 3885 break; 3886 } 3887 break; 3888 default: 3889 sbuf_putc(&sb, format[i]); 3890 break; 3891 } 3892 } 3893 sx_sunlock(&corefilename_lock); 3894 free(hostname, M_TEMP); 3895 if (compress == COMPRESS_GZIP) 3896 sbuf_printf(&sb, GZIP_SUFFIX); 3897 else if (compress == COMPRESS_ZSTD) 3898 sbuf_printf(&sb, ZSTD_SUFFIX); 3899 if (sbuf_error(&sb) != 0) { 3900 log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too " 3901 "long\n", (long)pid, comm, (u_long)uid); 3902 sbuf_delete(&sb); 3903 free(name, M_TEMP); 3904 return (ENOMEM); 3905 } 3906 sbuf_finish(&sb); 3907 sbuf_delete(&sb); 3908 3909 if (indexpos != -1) { 3910 error = corefile_open_last(td, name, indexpos, indexlen, ncores, 3911 vpp); 3912 if (error != 0) { 3913 log(LOG_ERR, 3914 "pid %d (%s), uid (%u): Path `%s' failed " 3915 "on initial open test, error = %d\n", 3916 pid, comm, uid, name, error); 3917 } 3918 } else { 3919 cmode = S_IRUSR | S_IWUSR; 3920 oflags = VN_OPEN_NOAUDIT | VN_OPEN_NAMECACHE | 3921 (capmode_coredump ? VN_OPEN_NOCAPCHECK : 0); 3922 flags = O_CREAT | FWRITE | O_NOFOLLOW; 3923 if ((td->td_proc->p_flag & P_SUGID) != 0) 3924 flags |= O_EXCL; 3925 3926 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name); 3927 error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred, 3928 NULL); 3929 if (error == 0) { 3930 *vpp = nd.ni_vp; 3931 NDFREE_PNBUF(&nd); 3932 } 3933 } 3934 3935 if (error != 0) { 3936 #ifdef AUDIT 3937 audit_proc_coredump(td, name, error); 3938 #endif 3939 free(name, M_TEMP); 3940 return (error); 3941 } 3942 *namep = name; 3943 return (0); 3944 } 3945 3946 /* 3947 * Dump a process' core. The main routine does some 3948 * policy checking, and creates the name of the coredump; 3949 * then it passes on a vnode and a size limit to the process-specific 3950 * coredump routine if there is one; if there _is not_ one, it returns 3951 * ENOSYS; otherwise it returns the error from the process-specific routine. 3952 */ 3953 3954 static int 3955 coredump(struct thread *td) 3956 { 3957 struct proc *p = td->td_proc; 3958 struct ucred *cred = td->td_ucred; 3959 struct vnode *vp; 3960 struct flock lf; 3961 struct vattr vattr; 3962 size_t fullpathsize; 3963 int error, error1, locked; 3964 char *name; /* name of corefile */ 3965 void *rl_cookie; 3966 off_t limit; 3967 char *fullpath, *freepath = NULL; 3968 struct sbuf *sb; 3969 3970 PROC_LOCK_ASSERT(p, MA_OWNED); 3971 MPASS((p->p_flag & P_HADTHREADS) == 0 || p->p_singlethread == td); 3972 3973 if (!do_coredump || (!sugid_coredump && (p->p_flag & P_SUGID) != 0) || 3974 (p->p_flag2 & P2_NOTRACE) != 0) { 3975 PROC_UNLOCK(p); 3976 return (EFAULT); 3977 } 3978 3979 /* 3980 * Note that the bulk of limit checking is done after 3981 * the corefile is created. The exception is if the limit 3982 * for corefiles is 0, in which case we don't bother 3983 * creating the corefile at all. This layout means that 3984 * a corefile is truncated instead of not being created, 3985 * if it is larger than the limit. 3986 */ 3987 limit = (off_t)lim_cur(td, RLIMIT_CORE); 3988 if (limit == 0 || racct_get_available(p, RACCT_CORE) == 0) { 3989 PROC_UNLOCK(p); 3990 return (EFBIG); 3991 } 3992 PROC_UNLOCK(p); 3993 3994 error = corefile_open(p->p_comm, cred->cr_uid, p->p_pid, td, 3995 compress_user_cores, p->p_sig, &vp, &name); 3996 if (error != 0) 3997 return (error); 3998 3999 /* 4000 * Don't dump to non-regular files or files with links. 4001 * Do not dump into system files. Effective user must own the corefile. 4002 */ 4003 if (vp->v_type != VREG || VOP_GETATTR(vp, &vattr, cred) != 0 || 4004 vattr.va_nlink != 1 || (vp->v_vflag & VV_SYSTEM) != 0 || 4005 vattr.va_uid != cred->cr_uid) { 4006 VOP_UNLOCK(vp); 4007 error = EFAULT; 4008 goto out; 4009 } 4010 4011 VOP_UNLOCK(vp); 4012 4013 /* Postpone other writers, including core dumps of other processes. */ 4014 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX); 4015 4016 lf.l_whence = SEEK_SET; 4017 lf.l_start = 0; 4018 lf.l_len = 0; 4019 lf.l_type = F_WRLCK; 4020 locked = (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK) == 0); 4021 4022 VATTR_NULL(&vattr); 4023 vattr.va_size = 0; 4024 if (set_core_nodump_flag) 4025 vattr.va_flags = UF_NODUMP; 4026 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4027 VOP_SETATTR(vp, &vattr, cred); 4028 VOP_UNLOCK(vp); 4029 PROC_LOCK(p); 4030 p->p_acflag |= ACORE; 4031 PROC_UNLOCK(p); 4032 4033 if (p->p_sysent->sv_coredump != NULL) { 4034 error = p->p_sysent->sv_coredump(td, vp, limit, 0); 4035 } else { 4036 error = ENOSYS; 4037 } 4038 4039 if (locked) { 4040 lf.l_type = F_UNLCK; 4041 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK); 4042 } 4043 vn_rangelock_unlock(vp, rl_cookie); 4044 4045 /* 4046 * Notify the userland helper that a process triggered a core dump. 4047 * This allows the helper to run an automated debugging session. 4048 */ 4049 if (error != 0 || coredump_devctl == 0) 4050 goto out; 4051 sb = sbuf_new_auto(); 4052 if (vn_fullpath_global(p->p_textvp, &fullpath, &freepath) != 0) 4053 goto out2; 4054 sbuf_printf(sb, "comm=\""); 4055 devctl_safe_quote_sb(sb, fullpath); 4056 free(freepath, M_TEMP); 4057 sbuf_printf(sb, "\" core=\""); 4058 4059 /* 4060 * We can't lookup core file vp directly. When we're replacing a core, and 4061 * other random times, we flush the name cache, so it will fail. Instead, 4062 * if the path of the core is relative, add the current dir in front if it. 4063 */ 4064 if (name[0] != '/') { 4065 fullpathsize = MAXPATHLEN; 4066 freepath = malloc(fullpathsize, M_TEMP, M_WAITOK); 4067 if (vn_getcwd(freepath, &fullpath, &fullpathsize) != 0) { 4068 free(freepath, M_TEMP); 4069 goto out2; 4070 } 4071 devctl_safe_quote_sb(sb, fullpath); 4072 free(freepath, M_TEMP); 4073 sbuf_putc(sb, '/'); 4074 } 4075 devctl_safe_quote_sb(sb, name); 4076 sbuf_printf(sb, "\""); 4077 if (sbuf_finish(sb) == 0) 4078 devctl_notify("kernel", "signal", "coredump", sbuf_data(sb)); 4079 out2: 4080 sbuf_delete(sb); 4081 out: 4082 error1 = vn_close(vp, FWRITE, cred, td); 4083 if (error == 0) 4084 error = error1; 4085 #ifdef AUDIT 4086 audit_proc_coredump(td, name, error); 4087 #endif 4088 free(name, M_TEMP); 4089 return (error); 4090 } 4091 4092 /* 4093 * Nonexistent system call-- signal process (may want to handle it). Flag 4094 * error in case process won't see signal immediately (blocked or ignored). 4095 */ 4096 #ifndef _SYS_SYSPROTO_H_ 4097 struct nosys_args { 4098 int dummy; 4099 }; 4100 #endif 4101 /* ARGSUSED */ 4102 int 4103 nosys(struct thread *td, struct nosys_args *args) 4104 { 4105 struct proc *p; 4106 4107 p = td->td_proc; 4108 4109 PROC_LOCK(p); 4110 tdsignal(td, SIGSYS); 4111 PROC_UNLOCK(p); 4112 if (kern_lognosys == 1 || kern_lognosys == 3) { 4113 uprintf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm, 4114 td->td_sa.code); 4115 } 4116 if (kern_lognosys == 2 || kern_lognosys == 3 || 4117 (p->p_pid == 1 && (kern_lognosys & 3) == 0)) { 4118 printf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm, 4119 td->td_sa.code); 4120 } 4121 return (ENOSYS); 4122 } 4123 4124 /* 4125 * Send a SIGIO or SIGURG signal to a process or process group using stored 4126 * credentials rather than those of the current process. 4127 */ 4128 void 4129 pgsigio(struct sigio **sigiop, int sig, int checkctty) 4130 { 4131 ksiginfo_t ksi; 4132 struct sigio *sigio; 4133 4134 ksiginfo_init(&ksi); 4135 ksi.ksi_signo = sig; 4136 ksi.ksi_code = SI_KERNEL; 4137 4138 SIGIO_LOCK(); 4139 sigio = *sigiop; 4140 if (sigio == NULL) { 4141 SIGIO_UNLOCK(); 4142 return; 4143 } 4144 if (sigio->sio_pgid > 0) { 4145 PROC_LOCK(sigio->sio_proc); 4146 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred)) 4147 kern_psignal(sigio->sio_proc, sig); 4148 PROC_UNLOCK(sigio->sio_proc); 4149 } else if (sigio->sio_pgid < 0) { 4150 struct proc *p; 4151 4152 PGRP_LOCK(sigio->sio_pgrp); 4153 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) { 4154 PROC_LOCK(p); 4155 if (p->p_state == PRS_NORMAL && 4156 CANSIGIO(sigio->sio_ucred, p->p_ucred) && 4157 (checkctty == 0 || (p->p_flag & P_CONTROLT))) 4158 kern_psignal(p, sig); 4159 PROC_UNLOCK(p); 4160 } 4161 PGRP_UNLOCK(sigio->sio_pgrp); 4162 } 4163 SIGIO_UNLOCK(); 4164 } 4165 4166 static int 4167 filt_sigattach(struct knote *kn) 4168 { 4169 struct proc *p = curproc; 4170 4171 kn->kn_ptr.p_proc = p; 4172 kn->kn_flags |= EV_CLEAR; /* automatically set */ 4173 4174 knlist_add(p->p_klist, kn, 0); 4175 4176 return (0); 4177 } 4178 4179 static void 4180 filt_sigdetach(struct knote *kn) 4181 { 4182 struct proc *p = kn->kn_ptr.p_proc; 4183 4184 knlist_remove(p->p_klist, kn, 0); 4185 } 4186 4187 /* 4188 * signal knotes are shared with proc knotes, so we apply a mask to 4189 * the hint in order to differentiate them from process hints. This 4190 * could be avoided by using a signal-specific knote list, but probably 4191 * isn't worth the trouble. 4192 */ 4193 static int 4194 filt_signal(struct knote *kn, long hint) 4195 { 4196 4197 if (hint & NOTE_SIGNAL) { 4198 hint &= ~NOTE_SIGNAL; 4199 4200 if (kn->kn_id == hint) 4201 kn->kn_data++; 4202 } 4203 return (kn->kn_data != 0); 4204 } 4205 4206 struct sigacts * 4207 sigacts_alloc(void) 4208 { 4209 struct sigacts *ps; 4210 4211 ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO); 4212 refcount_init(&ps->ps_refcnt, 1); 4213 mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF); 4214 return (ps); 4215 } 4216 4217 void 4218 sigacts_free(struct sigacts *ps) 4219 { 4220 4221 if (refcount_release(&ps->ps_refcnt) == 0) 4222 return; 4223 mtx_destroy(&ps->ps_mtx); 4224 free(ps, M_SUBPROC); 4225 } 4226 4227 struct sigacts * 4228 sigacts_hold(struct sigacts *ps) 4229 { 4230 4231 refcount_acquire(&ps->ps_refcnt); 4232 return (ps); 4233 } 4234 4235 void 4236 sigacts_copy(struct sigacts *dest, struct sigacts *src) 4237 { 4238 4239 KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest")); 4240 mtx_lock(&src->ps_mtx); 4241 bcopy(src, dest, offsetof(struct sigacts, ps_refcnt)); 4242 mtx_unlock(&src->ps_mtx); 4243 } 4244 4245 int 4246 sigacts_shared(struct sigacts *ps) 4247 { 4248 4249 return (ps->ps_refcnt > 1); 4250 } 4251 4252 void 4253 sig_drop_caught(struct proc *p) 4254 { 4255 int sig; 4256 struct sigacts *ps; 4257 4258 ps = p->p_sigacts; 4259 PROC_LOCK_ASSERT(p, MA_OWNED); 4260 mtx_assert(&ps->ps_mtx, MA_OWNED); 4261 SIG_FOREACH(sig, &ps->ps_sigcatch) { 4262 sigdflt(ps, sig); 4263 if ((sigprop(sig) & SIGPROP_IGNORE) != 0) 4264 sigqueue_delete_proc(p, sig); 4265 } 4266 } 4267 4268 static void 4269 sigfastblock_failed(struct thread *td, bool sendsig, bool write) 4270 { 4271 ksiginfo_t ksi; 4272 4273 /* 4274 * Prevent further fetches and SIGSEGVs, allowing thread to 4275 * issue syscalls despite corruption. 4276 */ 4277 sigfastblock_clear(td); 4278 4279 if (!sendsig) 4280 return; 4281 ksiginfo_init_trap(&ksi); 4282 ksi.ksi_signo = SIGSEGV; 4283 ksi.ksi_code = write ? SEGV_ACCERR : SEGV_MAPERR; 4284 ksi.ksi_addr = td->td_sigblock_ptr; 4285 trapsignal(td, &ksi); 4286 } 4287 4288 static bool 4289 sigfastblock_fetch_sig(struct thread *td, bool sendsig, uint32_t *valp) 4290 { 4291 uint32_t res; 4292 4293 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) 4294 return (true); 4295 if (fueword32((void *)td->td_sigblock_ptr, &res) == -1) { 4296 sigfastblock_failed(td, sendsig, false); 4297 return (false); 4298 } 4299 *valp = res; 4300 td->td_sigblock_val = res & ~SIGFASTBLOCK_FLAGS; 4301 return (true); 4302 } 4303 4304 static void 4305 sigfastblock_resched(struct thread *td, bool resched) 4306 { 4307 struct proc *p; 4308 4309 if (resched) { 4310 p = td->td_proc; 4311 PROC_LOCK(p); 4312 reschedule_signals(p, td->td_sigmask, 0); 4313 PROC_UNLOCK(p); 4314 } 4315 ast_sched(td, TDA_SIG); 4316 } 4317 4318 int 4319 sys_sigfastblock(struct thread *td, struct sigfastblock_args *uap) 4320 { 4321 struct proc *p; 4322 int error, res; 4323 uint32_t oldval; 4324 4325 error = 0; 4326 p = td->td_proc; 4327 switch (uap->cmd) { 4328 case SIGFASTBLOCK_SETPTR: 4329 if ((td->td_pflags & TDP_SIGFASTBLOCK) != 0) { 4330 error = EBUSY; 4331 break; 4332 } 4333 if (((uintptr_t)(uap->ptr) & (sizeof(uint32_t) - 1)) != 0) { 4334 error = EINVAL; 4335 break; 4336 } 4337 td->td_pflags |= TDP_SIGFASTBLOCK; 4338 td->td_sigblock_ptr = uap->ptr; 4339 break; 4340 4341 case SIGFASTBLOCK_UNBLOCK: 4342 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) { 4343 error = EINVAL; 4344 break; 4345 } 4346 4347 for (;;) { 4348 res = casueword32(td->td_sigblock_ptr, 4349 SIGFASTBLOCK_PEND, &oldval, 0); 4350 if (res == -1) { 4351 error = EFAULT; 4352 sigfastblock_failed(td, false, true); 4353 break; 4354 } 4355 if (res == 0) 4356 break; 4357 MPASS(res == 1); 4358 if (oldval != SIGFASTBLOCK_PEND) { 4359 error = EBUSY; 4360 break; 4361 } 4362 error = thread_check_susp(td, false); 4363 if (error != 0) 4364 break; 4365 } 4366 if (error != 0) 4367 break; 4368 4369 /* 4370 * td_sigblock_val is cleared there, but not on a 4371 * syscall exit. The end effect is that a single 4372 * interruptible sleep, while user sigblock word is 4373 * set, might return EINTR or ERESTART to usermode 4374 * without delivering signal. All further sleeps, 4375 * until userspace clears the word and does 4376 * sigfastblock(UNBLOCK), observe current word and no 4377 * longer get interrupted. It is slight 4378 * non-conformance, with alternative to have read the 4379 * sigblock word on each syscall entry. 4380 */ 4381 td->td_sigblock_val = 0; 4382 4383 /* 4384 * Rely on normal ast mechanism to deliver pending 4385 * signals to current thread. But notify others about 4386 * fake unblock. 4387 */ 4388 sigfastblock_resched(td, error == 0 && p->p_numthreads != 1); 4389 4390 break; 4391 4392 case SIGFASTBLOCK_UNSETPTR: 4393 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) { 4394 error = EINVAL; 4395 break; 4396 } 4397 if (!sigfastblock_fetch_sig(td, false, &oldval)) { 4398 error = EFAULT; 4399 break; 4400 } 4401 if (oldval != 0 && oldval != SIGFASTBLOCK_PEND) { 4402 error = EBUSY; 4403 break; 4404 } 4405 sigfastblock_clear(td); 4406 break; 4407 4408 default: 4409 error = EINVAL; 4410 break; 4411 } 4412 return (error); 4413 } 4414 4415 void 4416 sigfastblock_clear(struct thread *td) 4417 { 4418 bool resched; 4419 4420 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) 4421 return; 4422 td->td_sigblock_val = 0; 4423 resched = (td->td_pflags & TDP_SIGFASTPENDING) != 0 || 4424 SIGPENDING(td); 4425 td->td_pflags &= ~(TDP_SIGFASTBLOCK | TDP_SIGFASTPENDING); 4426 sigfastblock_resched(td, resched); 4427 } 4428 4429 void 4430 sigfastblock_fetch(struct thread *td) 4431 { 4432 uint32_t val; 4433 4434 (void)sigfastblock_fetch_sig(td, true, &val); 4435 } 4436 4437 static void 4438 sigfastblock_setpend1(struct thread *td) 4439 { 4440 int res; 4441 uint32_t oldval; 4442 4443 if ((td->td_pflags & TDP_SIGFASTPENDING) == 0) 4444 return; 4445 res = fueword32((void *)td->td_sigblock_ptr, &oldval); 4446 if (res == -1) { 4447 sigfastblock_failed(td, true, false); 4448 return; 4449 } 4450 for (;;) { 4451 res = casueword32(td->td_sigblock_ptr, oldval, &oldval, 4452 oldval | SIGFASTBLOCK_PEND); 4453 if (res == -1) { 4454 sigfastblock_failed(td, true, true); 4455 return; 4456 } 4457 if (res == 0) { 4458 td->td_sigblock_val = oldval & ~SIGFASTBLOCK_FLAGS; 4459 td->td_pflags &= ~TDP_SIGFASTPENDING; 4460 break; 4461 } 4462 MPASS(res == 1); 4463 if (thread_check_susp(td, false) != 0) 4464 break; 4465 } 4466 } 4467 4468 static void 4469 sigfastblock_setpend(struct thread *td, bool resched) 4470 { 4471 struct proc *p; 4472 4473 sigfastblock_setpend1(td); 4474 if (resched) { 4475 p = td->td_proc; 4476 PROC_LOCK(p); 4477 reschedule_signals(p, fastblock_mask, SIGPROCMASK_FASTBLK); 4478 PROC_UNLOCK(p); 4479 } 4480 } 4481