1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1989, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #include "opt_ktrace.h" 43 44 #include <sys/param.h> 45 #include <sys/ctype.h> 46 #include <sys/systm.h> 47 #include <sys/signalvar.h> 48 #include <sys/vnode.h> 49 #include <sys/acct.h> 50 #include <sys/capsicum.h> 51 #include <sys/compressor.h> 52 #include <sys/condvar.h> 53 #include <sys/devctl.h> 54 #include <sys/event.h> 55 #include <sys/fcntl.h> 56 #include <sys/imgact.h> 57 #include <sys/kernel.h> 58 #include <sys/ktr.h> 59 #include <sys/ktrace.h> 60 #include <sys/limits.h> 61 #include <sys/lock.h> 62 #include <sys/malloc.h> 63 #include <sys/mutex.h> 64 #include <sys/refcount.h> 65 #include <sys/namei.h> 66 #include <sys/proc.h> 67 #include <sys/procdesc.h> 68 #include <sys/ptrace.h> 69 #include <sys/posix4.h> 70 #include <sys/racct.h> 71 #include <sys/resourcevar.h> 72 #include <sys/sdt.h> 73 #include <sys/sbuf.h> 74 #include <sys/sleepqueue.h> 75 #include <sys/smp.h> 76 #include <sys/stat.h> 77 #include <sys/sx.h> 78 #include <sys/syscallsubr.h> 79 #include <sys/sysctl.h> 80 #include <sys/sysent.h> 81 #include <sys/syslog.h> 82 #include <sys/sysproto.h> 83 #include <sys/timers.h> 84 #include <sys/unistd.h> 85 #include <sys/wait.h> 86 #include <vm/vm.h> 87 #include <vm/vm_extern.h> 88 #include <vm/uma.h> 89 90 #include <sys/jail.h> 91 92 #include <machine/cpu.h> 93 94 #include <security/audit/audit.h> 95 96 #define ONSIG 32 /* NSIG for osig* syscalls. XXX. */ 97 98 SDT_PROVIDER_DECLARE(proc); 99 SDT_PROBE_DEFINE3(proc, , , signal__send, 100 "struct thread *", "struct proc *", "int"); 101 SDT_PROBE_DEFINE2(proc, , , signal__clear, 102 "int", "ksiginfo_t *"); 103 SDT_PROBE_DEFINE3(proc, , , signal__discard, 104 "struct thread *", "struct proc *", "int"); 105 106 static int coredump(struct thread *); 107 static int killpg1(struct thread *td, int sig, int pgid, int all, 108 ksiginfo_t *ksi); 109 static int issignal(struct thread *td); 110 static void reschedule_signals(struct proc *p, sigset_t block, int flags); 111 static int sigprop(int sig); 112 static void tdsigwakeup(struct thread *, int, sig_t, int); 113 static int sig_suspend_threads(struct thread *, struct proc *); 114 static int filt_sigattach(struct knote *kn); 115 static void filt_sigdetach(struct knote *kn); 116 static int filt_signal(struct knote *kn, long hint); 117 static struct thread *sigtd(struct proc *p, int sig, bool fast_sigblock); 118 static void sigqueue_start(void); 119 static void sigfastblock_setpend(struct thread *td, bool resched); 120 121 static uma_zone_t ksiginfo_zone = NULL; 122 struct filterops sig_filtops = { 123 .f_isfd = 0, 124 .f_attach = filt_sigattach, 125 .f_detach = filt_sigdetach, 126 .f_event = filt_signal, 127 }; 128 129 static int kern_logsigexit = 1; 130 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW, 131 &kern_logsigexit, 0, 132 "Log processes quitting on abnormal signals to syslog(3)"); 133 134 static int kern_forcesigexit = 1; 135 SYSCTL_INT(_kern, OID_AUTO, forcesigexit, CTLFLAG_RW, 136 &kern_forcesigexit, 0, "Force trap signal to be handled"); 137 138 static SYSCTL_NODE(_kern, OID_AUTO, sigqueue, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 139 "POSIX real time signal"); 140 141 static int max_pending_per_proc = 128; 142 SYSCTL_INT(_kern_sigqueue, OID_AUTO, max_pending_per_proc, CTLFLAG_RW, 143 &max_pending_per_proc, 0, "Max pending signals per proc"); 144 145 static int preallocate_siginfo = 1024; 146 SYSCTL_INT(_kern_sigqueue, OID_AUTO, preallocate, CTLFLAG_RDTUN, 147 &preallocate_siginfo, 0, "Preallocated signal memory size"); 148 149 static int signal_overflow = 0; 150 SYSCTL_INT(_kern_sigqueue, OID_AUTO, overflow, CTLFLAG_RD, 151 &signal_overflow, 0, "Number of signals overflew"); 152 153 static int signal_alloc_fail = 0; 154 SYSCTL_INT(_kern_sigqueue, OID_AUTO, alloc_fail, CTLFLAG_RD, 155 &signal_alloc_fail, 0, "signals failed to be allocated"); 156 157 static int kern_lognosys = 0; 158 SYSCTL_INT(_kern, OID_AUTO, lognosys, CTLFLAG_RWTUN, &kern_lognosys, 0, 159 "Log invalid syscalls"); 160 161 __read_frequently bool sigfastblock_fetch_always = false; 162 SYSCTL_BOOL(_kern, OID_AUTO, sigfastblock_fetch_always, CTLFLAG_RWTUN, 163 &sigfastblock_fetch_always, 0, 164 "Fetch sigfastblock word on each syscall entry for proper " 165 "blocking semantic"); 166 167 static bool kern_sig_discard_ign = true; 168 SYSCTL_BOOL(_kern, OID_AUTO, sig_discard_ign, CTLFLAG_RWTUN, 169 &kern_sig_discard_ign, 0, 170 "Discard ignored signals on delivery, otherwise queue them to " 171 "the target queue"); 172 173 SYSINIT(signal, SI_SUB_P1003_1B, SI_ORDER_FIRST+3, sigqueue_start, NULL); 174 175 /* 176 * Policy -- Can ucred cr1 send SIGIO to process cr2? 177 * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG 178 * in the right situations. 179 */ 180 #define CANSIGIO(cr1, cr2) \ 181 ((cr1)->cr_uid == 0 || \ 182 (cr1)->cr_ruid == (cr2)->cr_ruid || \ 183 (cr1)->cr_uid == (cr2)->cr_ruid || \ 184 (cr1)->cr_ruid == (cr2)->cr_uid || \ 185 (cr1)->cr_uid == (cr2)->cr_uid) 186 187 static int sugid_coredump; 188 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RWTUN, 189 &sugid_coredump, 0, "Allow setuid and setgid processes to dump core"); 190 191 static int capmode_coredump; 192 SYSCTL_INT(_kern, OID_AUTO, capmode_coredump, CTLFLAG_RWTUN, 193 &capmode_coredump, 0, "Allow processes in capability mode to dump core"); 194 195 static int do_coredump = 1; 196 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW, 197 &do_coredump, 0, "Enable/Disable coredumps"); 198 199 static int set_core_nodump_flag = 0; 200 SYSCTL_INT(_kern, OID_AUTO, nodump_coredump, CTLFLAG_RW, &set_core_nodump_flag, 201 0, "Enable setting the NODUMP flag on coredump files"); 202 203 static int coredump_devctl = 0; 204 SYSCTL_INT(_kern, OID_AUTO, coredump_devctl, CTLFLAG_RW, &coredump_devctl, 205 0, "Generate a devctl notification when processes coredump"); 206 207 /* 208 * Signal properties and actions. 209 * The array below categorizes the signals and their default actions 210 * according to the following properties: 211 */ 212 #define SIGPROP_KILL 0x01 /* terminates process by default */ 213 #define SIGPROP_CORE 0x02 /* ditto and coredumps */ 214 #define SIGPROP_STOP 0x04 /* suspend process */ 215 #define SIGPROP_TTYSTOP 0x08 /* ditto, from tty */ 216 #define SIGPROP_IGNORE 0x10 /* ignore by default */ 217 #define SIGPROP_CONT 0x20 /* continue if suspended */ 218 219 static int sigproptbl[NSIG] = { 220 [SIGHUP] = SIGPROP_KILL, 221 [SIGINT] = SIGPROP_KILL, 222 [SIGQUIT] = SIGPROP_KILL | SIGPROP_CORE, 223 [SIGILL] = SIGPROP_KILL | SIGPROP_CORE, 224 [SIGTRAP] = SIGPROP_KILL | SIGPROP_CORE, 225 [SIGABRT] = SIGPROP_KILL | SIGPROP_CORE, 226 [SIGEMT] = SIGPROP_KILL | SIGPROP_CORE, 227 [SIGFPE] = SIGPROP_KILL | SIGPROP_CORE, 228 [SIGKILL] = SIGPROP_KILL, 229 [SIGBUS] = SIGPROP_KILL | SIGPROP_CORE, 230 [SIGSEGV] = SIGPROP_KILL | SIGPROP_CORE, 231 [SIGSYS] = SIGPROP_KILL | SIGPROP_CORE, 232 [SIGPIPE] = SIGPROP_KILL, 233 [SIGALRM] = SIGPROP_KILL, 234 [SIGTERM] = SIGPROP_KILL, 235 [SIGURG] = SIGPROP_IGNORE, 236 [SIGSTOP] = SIGPROP_STOP, 237 [SIGTSTP] = SIGPROP_STOP | SIGPROP_TTYSTOP, 238 [SIGCONT] = SIGPROP_IGNORE | SIGPROP_CONT, 239 [SIGCHLD] = SIGPROP_IGNORE, 240 [SIGTTIN] = SIGPROP_STOP | SIGPROP_TTYSTOP, 241 [SIGTTOU] = SIGPROP_STOP | SIGPROP_TTYSTOP, 242 [SIGIO] = SIGPROP_IGNORE, 243 [SIGXCPU] = SIGPROP_KILL, 244 [SIGXFSZ] = SIGPROP_KILL, 245 [SIGVTALRM] = SIGPROP_KILL, 246 [SIGPROF] = SIGPROP_KILL, 247 [SIGWINCH] = SIGPROP_IGNORE, 248 [SIGINFO] = SIGPROP_IGNORE, 249 [SIGUSR1] = SIGPROP_KILL, 250 [SIGUSR2] = SIGPROP_KILL, 251 }; 252 253 #define _SIG_FOREACH_ADVANCE(i, set) ({ \ 254 int __found; \ 255 for (;;) { \ 256 if (__bits != 0) { \ 257 int __sig = ffs(__bits); \ 258 __bits &= ~(1u << (__sig - 1)); \ 259 sig = __i * sizeof((set)->__bits[0]) * NBBY + __sig; \ 260 __found = 1; \ 261 break; \ 262 } \ 263 if (++__i == _SIG_WORDS) { \ 264 __found = 0; \ 265 break; \ 266 } \ 267 __bits = (set)->__bits[__i]; \ 268 } \ 269 __found != 0; \ 270 }) 271 272 #define SIG_FOREACH(i, set) \ 273 for (int32_t __i = -1, __bits = 0; \ 274 _SIG_FOREACH_ADVANCE(i, set); ) \ 275 276 static sigset_t fastblock_mask; 277 278 static void 279 ast_sig(struct thread *td, int tda) 280 { 281 struct proc *p; 282 int sig; 283 bool resched_sigs; 284 285 p = td->td_proc; 286 287 #ifdef DIAGNOSTIC 288 if (p->p_numthreads == 1 && (tda & (TDAI(TDA_SIG) | 289 TDAI(TDA_AST))) == 0) { 290 PROC_LOCK(p); 291 thread_lock(td); 292 /* 293 * Note that TDA_SIG should be re-read from 294 * td_ast, since signal might have been delivered 295 * after we cleared td_flags above. This is one of 296 * the reason for looping check for AST condition. 297 * See comment in userret() about P_PPWAIT. 298 */ 299 if ((p->p_flag & P_PPWAIT) == 0 && 300 (td->td_pflags & TDP_SIGFASTBLOCK) == 0) { 301 if (SIGPENDING(td) && ((tda | td->td_ast) & 302 (TDAI(TDA_SIG) | TDAI(TDA_AST))) == 0) { 303 thread_unlock(td); /* fix dumps */ 304 panic( 305 "failed2 to set signal flags for ast p %p " 306 "td %p tda %#x td_ast %#x fl %#x", 307 p, td, tda, td->td_ast, td->td_flags); 308 } 309 } 310 thread_unlock(td); 311 PROC_UNLOCK(p); 312 } 313 #endif 314 315 /* 316 * Check for signals. Unlocked reads of p_pendingcnt or 317 * p_siglist might cause process-directed signal to be handled 318 * later. 319 */ 320 if ((tda & TDAI(TDA_SIG)) != 0 || p->p_pendingcnt > 0 || 321 !SIGISEMPTY(p->p_siglist)) { 322 sigfastblock_fetch(td); 323 PROC_LOCK(p); 324 mtx_lock(&p->p_sigacts->ps_mtx); 325 while ((sig = cursig(td)) != 0) { 326 KASSERT(sig >= 0, ("sig %d", sig)); 327 postsig(sig); 328 } 329 mtx_unlock(&p->p_sigacts->ps_mtx); 330 PROC_UNLOCK(p); 331 resched_sigs = true; 332 } else { 333 resched_sigs = false; 334 } 335 336 /* 337 * Handle deferred update of the fast sigblock value, after 338 * the postsig() loop was performed. 339 */ 340 sigfastblock_setpend(td, resched_sigs); 341 } 342 343 static void 344 ast_sigsuspend(struct thread *td, int tda __unused) 345 { 346 MPASS((td->td_pflags & TDP_OLDMASK) != 0); 347 td->td_pflags &= ~TDP_OLDMASK; 348 kern_sigprocmask(td, SIG_SETMASK, &td->td_oldsigmask, NULL, 0); 349 } 350 351 static void 352 sigqueue_start(void) 353 { 354 ksiginfo_zone = uma_zcreate("ksiginfo", sizeof(ksiginfo_t), 355 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 356 uma_prealloc(ksiginfo_zone, preallocate_siginfo); 357 p31b_setcfg(CTL_P1003_1B_REALTIME_SIGNALS, _POSIX_REALTIME_SIGNALS); 358 p31b_setcfg(CTL_P1003_1B_RTSIG_MAX, SIGRTMAX - SIGRTMIN + 1); 359 p31b_setcfg(CTL_P1003_1B_SIGQUEUE_MAX, max_pending_per_proc); 360 SIGFILLSET(fastblock_mask); 361 SIG_CANTMASK(fastblock_mask); 362 ast_register(TDA_SIG, ASTR_UNCOND, 0, ast_sig); 363 ast_register(TDA_SIGSUSPEND, ASTR_ASTF_REQUIRED | ASTR_TDP, 364 TDP_OLDMASK, ast_sigsuspend); 365 } 366 367 ksiginfo_t * 368 ksiginfo_alloc(int mwait) 369 { 370 MPASS(mwait == M_WAITOK || mwait == M_NOWAIT); 371 372 if (ksiginfo_zone == NULL) 373 return (NULL); 374 return (uma_zalloc(ksiginfo_zone, mwait | M_ZERO)); 375 } 376 377 void 378 ksiginfo_free(ksiginfo_t *ksi) 379 { 380 uma_zfree(ksiginfo_zone, ksi); 381 } 382 383 static __inline bool 384 ksiginfo_tryfree(ksiginfo_t *ksi) 385 { 386 if ((ksi->ksi_flags & KSI_EXT) == 0) { 387 uma_zfree(ksiginfo_zone, ksi); 388 return (true); 389 } 390 return (false); 391 } 392 393 void 394 sigqueue_init(sigqueue_t *list, struct proc *p) 395 { 396 SIGEMPTYSET(list->sq_signals); 397 SIGEMPTYSET(list->sq_kill); 398 SIGEMPTYSET(list->sq_ptrace); 399 TAILQ_INIT(&list->sq_list); 400 list->sq_proc = p; 401 list->sq_flags = SQ_INIT; 402 } 403 404 /* 405 * Get a signal's ksiginfo. 406 * Return: 407 * 0 - signal not found 408 * others - signal number 409 */ 410 static int 411 sigqueue_get(sigqueue_t *sq, int signo, ksiginfo_t *si) 412 { 413 struct proc *p = sq->sq_proc; 414 struct ksiginfo *ksi, *next; 415 int count = 0; 416 417 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited")); 418 419 if (!SIGISMEMBER(sq->sq_signals, signo)) 420 return (0); 421 422 if (SIGISMEMBER(sq->sq_ptrace, signo)) { 423 count++; 424 SIGDELSET(sq->sq_ptrace, signo); 425 si->ksi_flags |= KSI_PTRACE; 426 } 427 if (SIGISMEMBER(sq->sq_kill, signo)) { 428 count++; 429 if (count == 1) 430 SIGDELSET(sq->sq_kill, signo); 431 } 432 433 TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) { 434 if (ksi->ksi_signo == signo) { 435 if (count == 0) { 436 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link); 437 ksi->ksi_sigq = NULL; 438 ksiginfo_copy(ksi, si); 439 if (ksiginfo_tryfree(ksi) && p != NULL) 440 p->p_pendingcnt--; 441 } 442 if (++count > 1) 443 break; 444 } 445 } 446 447 if (count <= 1) 448 SIGDELSET(sq->sq_signals, signo); 449 si->ksi_signo = signo; 450 return (signo); 451 } 452 453 void 454 sigqueue_take(ksiginfo_t *ksi) 455 { 456 struct ksiginfo *kp; 457 struct proc *p; 458 sigqueue_t *sq; 459 460 if (ksi == NULL || (sq = ksi->ksi_sigq) == NULL) 461 return; 462 463 p = sq->sq_proc; 464 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link); 465 ksi->ksi_sigq = NULL; 466 if (!(ksi->ksi_flags & KSI_EXT) && p != NULL) 467 p->p_pendingcnt--; 468 469 for (kp = TAILQ_FIRST(&sq->sq_list); kp != NULL; 470 kp = TAILQ_NEXT(kp, ksi_link)) { 471 if (kp->ksi_signo == ksi->ksi_signo) 472 break; 473 } 474 if (kp == NULL && !SIGISMEMBER(sq->sq_kill, ksi->ksi_signo) && 475 !SIGISMEMBER(sq->sq_ptrace, ksi->ksi_signo)) 476 SIGDELSET(sq->sq_signals, ksi->ksi_signo); 477 } 478 479 static int 480 sigqueue_add(sigqueue_t *sq, int signo, ksiginfo_t *si) 481 { 482 struct proc *p = sq->sq_proc; 483 struct ksiginfo *ksi; 484 int ret = 0; 485 486 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited")); 487 488 /* 489 * SIGKILL/SIGSTOP cannot be caught or masked, so take the fast path 490 * for these signals. 491 */ 492 if (signo == SIGKILL || signo == SIGSTOP || si == NULL) { 493 SIGADDSET(sq->sq_kill, signo); 494 goto out_set_bit; 495 } 496 497 /* directly insert the ksi, don't copy it */ 498 if (si->ksi_flags & KSI_INS) { 499 if (si->ksi_flags & KSI_HEAD) 500 TAILQ_INSERT_HEAD(&sq->sq_list, si, ksi_link); 501 else 502 TAILQ_INSERT_TAIL(&sq->sq_list, si, ksi_link); 503 si->ksi_sigq = sq; 504 goto out_set_bit; 505 } 506 507 if (__predict_false(ksiginfo_zone == NULL)) { 508 SIGADDSET(sq->sq_kill, signo); 509 goto out_set_bit; 510 } 511 512 if (p != NULL && p->p_pendingcnt >= max_pending_per_proc) { 513 signal_overflow++; 514 ret = EAGAIN; 515 } else if ((ksi = ksiginfo_alloc(M_NOWAIT)) == NULL) { 516 signal_alloc_fail++; 517 ret = EAGAIN; 518 } else { 519 if (p != NULL) 520 p->p_pendingcnt++; 521 ksiginfo_copy(si, ksi); 522 ksi->ksi_signo = signo; 523 if (si->ksi_flags & KSI_HEAD) 524 TAILQ_INSERT_HEAD(&sq->sq_list, ksi, ksi_link); 525 else 526 TAILQ_INSERT_TAIL(&sq->sq_list, ksi, ksi_link); 527 ksi->ksi_sigq = sq; 528 } 529 530 if (ret != 0) { 531 if ((si->ksi_flags & KSI_PTRACE) != 0) { 532 SIGADDSET(sq->sq_ptrace, signo); 533 ret = 0; 534 goto out_set_bit; 535 } else if ((si->ksi_flags & KSI_TRAP) != 0 || 536 (si->ksi_flags & KSI_SIGQ) == 0) { 537 SIGADDSET(sq->sq_kill, signo); 538 ret = 0; 539 goto out_set_bit; 540 } 541 return (ret); 542 } 543 544 out_set_bit: 545 SIGADDSET(sq->sq_signals, signo); 546 return (ret); 547 } 548 549 void 550 sigqueue_flush(sigqueue_t *sq) 551 { 552 struct proc *p = sq->sq_proc; 553 ksiginfo_t *ksi; 554 555 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited")); 556 557 if (p != NULL) 558 PROC_LOCK_ASSERT(p, MA_OWNED); 559 560 while ((ksi = TAILQ_FIRST(&sq->sq_list)) != NULL) { 561 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link); 562 ksi->ksi_sigq = NULL; 563 if (ksiginfo_tryfree(ksi) && p != NULL) 564 p->p_pendingcnt--; 565 } 566 567 SIGEMPTYSET(sq->sq_signals); 568 SIGEMPTYSET(sq->sq_kill); 569 SIGEMPTYSET(sq->sq_ptrace); 570 } 571 572 static void 573 sigqueue_move_set(sigqueue_t *src, sigqueue_t *dst, const sigset_t *set) 574 { 575 sigset_t tmp; 576 struct proc *p1, *p2; 577 ksiginfo_t *ksi, *next; 578 579 KASSERT(src->sq_flags & SQ_INIT, ("src sigqueue not inited")); 580 KASSERT(dst->sq_flags & SQ_INIT, ("dst sigqueue not inited")); 581 p1 = src->sq_proc; 582 p2 = dst->sq_proc; 583 /* Move siginfo to target list */ 584 TAILQ_FOREACH_SAFE(ksi, &src->sq_list, ksi_link, next) { 585 if (SIGISMEMBER(*set, ksi->ksi_signo)) { 586 TAILQ_REMOVE(&src->sq_list, ksi, ksi_link); 587 if (p1 != NULL) 588 p1->p_pendingcnt--; 589 TAILQ_INSERT_TAIL(&dst->sq_list, ksi, ksi_link); 590 ksi->ksi_sigq = dst; 591 if (p2 != NULL) 592 p2->p_pendingcnt++; 593 } 594 } 595 596 /* Move pending bits to target list */ 597 tmp = src->sq_kill; 598 SIGSETAND(tmp, *set); 599 SIGSETOR(dst->sq_kill, tmp); 600 SIGSETNAND(src->sq_kill, tmp); 601 602 tmp = src->sq_ptrace; 603 SIGSETAND(tmp, *set); 604 SIGSETOR(dst->sq_ptrace, tmp); 605 SIGSETNAND(src->sq_ptrace, tmp); 606 607 tmp = src->sq_signals; 608 SIGSETAND(tmp, *set); 609 SIGSETOR(dst->sq_signals, tmp); 610 SIGSETNAND(src->sq_signals, tmp); 611 } 612 613 #if 0 614 static void 615 sigqueue_move(sigqueue_t *src, sigqueue_t *dst, int signo) 616 { 617 sigset_t set; 618 619 SIGEMPTYSET(set); 620 SIGADDSET(set, signo); 621 sigqueue_move_set(src, dst, &set); 622 } 623 #endif 624 625 static void 626 sigqueue_delete_set(sigqueue_t *sq, const sigset_t *set) 627 { 628 struct proc *p = sq->sq_proc; 629 ksiginfo_t *ksi, *next; 630 631 KASSERT(sq->sq_flags & SQ_INIT, ("src sigqueue not inited")); 632 633 /* Remove siginfo queue */ 634 TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) { 635 if (SIGISMEMBER(*set, ksi->ksi_signo)) { 636 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link); 637 ksi->ksi_sigq = NULL; 638 if (ksiginfo_tryfree(ksi) && p != NULL) 639 p->p_pendingcnt--; 640 } 641 } 642 SIGSETNAND(sq->sq_kill, *set); 643 SIGSETNAND(sq->sq_ptrace, *set); 644 SIGSETNAND(sq->sq_signals, *set); 645 } 646 647 void 648 sigqueue_delete(sigqueue_t *sq, int signo) 649 { 650 sigset_t set; 651 652 SIGEMPTYSET(set); 653 SIGADDSET(set, signo); 654 sigqueue_delete_set(sq, &set); 655 } 656 657 /* Remove a set of signals for a process */ 658 static void 659 sigqueue_delete_set_proc(struct proc *p, const sigset_t *set) 660 { 661 sigqueue_t worklist; 662 struct thread *td0; 663 664 PROC_LOCK_ASSERT(p, MA_OWNED); 665 666 sigqueue_init(&worklist, NULL); 667 sigqueue_move_set(&p->p_sigqueue, &worklist, set); 668 669 FOREACH_THREAD_IN_PROC(p, td0) 670 sigqueue_move_set(&td0->td_sigqueue, &worklist, set); 671 672 sigqueue_flush(&worklist); 673 } 674 675 void 676 sigqueue_delete_proc(struct proc *p, int signo) 677 { 678 sigset_t set; 679 680 SIGEMPTYSET(set); 681 SIGADDSET(set, signo); 682 sigqueue_delete_set_proc(p, &set); 683 } 684 685 static void 686 sigqueue_delete_stopmask_proc(struct proc *p) 687 { 688 sigset_t set; 689 690 SIGEMPTYSET(set); 691 SIGADDSET(set, SIGSTOP); 692 SIGADDSET(set, SIGTSTP); 693 SIGADDSET(set, SIGTTIN); 694 SIGADDSET(set, SIGTTOU); 695 sigqueue_delete_set_proc(p, &set); 696 } 697 698 /* 699 * Determine signal that should be delivered to thread td, the current 700 * thread, 0 if none. If there is a pending stop signal with default 701 * action, the process stops in issignal(). 702 */ 703 int 704 cursig(struct thread *td) 705 { 706 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 707 mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED); 708 THREAD_LOCK_ASSERT(td, MA_NOTOWNED); 709 return (SIGPENDING(td) ? issignal(td) : 0); 710 } 711 712 /* 713 * Arrange for ast() to handle unmasked pending signals on return to user 714 * mode. This must be called whenever a signal is added to td_sigqueue or 715 * unmasked in td_sigmask. 716 */ 717 void 718 signotify(struct thread *td) 719 { 720 721 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 722 723 if (SIGPENDING(td)) 724 ast_sched(td, TDA_SIG); 725 } 726 727 /* 728 * Returns 1 (true) if altstack is configured for the thread, and the 729 * passed stack bottom address falls into the altstack range. Handles 730 * the 43 compat special case where the alt stack size is zero. 731 */ 732 int 733 sigonstack(size_t sp) 734 { 735 struct thread *td; 736 737 td = curthread; 738 if ((td->td_pflags & TDP_ALTSTACK) == 0) 739 return (0); 740 #if defined(COMPAT_43) 741 if (SV_PROC_FLAG(td->td_proc, SV_AOUT) && td->td_sigstk.ss_size == 0) 742 return ((td->td_sigstk.ss_flags & SS_ONSTACK) != 0); 743 #endif 744 return (sp >= (size_t)td->td_sigstk.ss_sp && 745 sp < td->td_sigstk.ss_size + (size_t)td->td_sigstk.ss_sp); 746 } 747 748 static __inline int 749 sigprop(int sig) 750 { 751 752 if (sig > 0 && sig < nitems(sigproptbl)) 753 return (sigproptbl[sig]); 754 return (0); 755 } 756 757 static bool 758 sigact_flag_test(const struct sigaction *act, int flag) 759 { 760 761 /* 762 * SA_SIGINFO is reset when signal disposition is set to 763 * ignore or default. Other flags are kept according to user 764 * settings. 765 */ 766 return ((act->sa_flags & flag) != 0 && (flag != SA_SIGINFO || 767 ((__sighandler_t *)act->sa_sigaction != SIG_IGN && 768 (__sighandler_t *)act->sa_sigaction != SIG_DFL))); 769 } 770 771 /* 772 * kern_sigaction 773 * sigaction 774 * freebsd4_sigaction 775 * osigaction 776 */ 777 int 778 kern_sigaction(struct thread *td, int sig, const struct sigaction *act, 779 struct sigaction *oact, int flags) 780 { 781 struct sigacts *ps; 782 struct proc *p = td->td_proc; 783 784 if (!_SIG_VALID(sig)) 785 return (EINVAL); 786 if (act != NULL && act->sa_handler != SIG_DFL && 787 act->sa_handler != SIG_IGN && (act->sa_flags & ~(SA_ONSTACK | 788 SA_RESTART | SA_RESETHAND | SA_NOCLDSTOP | SA_NODEFER | 789 SA_NOCLDWAIT | SA_SIGINFO)) != 0) 790 return (EINVAL); 791 792 PROC_LOCK(p); 793 ps = p->p_sigacts; 794 mtx_lock(&ps->ps_mtx); 795 if (oact) { 796 memset(oact, 0, sizeof(*oact)); 797 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)]; 798 if (SIGISMEMBER(ps->ps_sigonstack, sig)) 799 oact->sa_flags |= SA_ONSTACK; 800 if (!SIGISMEMBER(ps->ps_sigintr, sig)) 801 oact->sa_flags |= SA_RESTART; 802 if (SIGISMEMBER(ps->ps_sigreset, sig)) 803 oact->sa_flags |= SA_RESETHAND; 804 if (SIGISMEMBER(ps->ps_signodefer, sig)) 805 oact->sa_flags |= SA_NODEFER; 806 if (SIGISMEMBER(ps->ps_siginfo, sig)) { 807 oact->sa_flags |= SA_SIGINFO; 808 oact->sa_sigaction = 809 (__siginfohandler_t *)ps->ps_sigact[_SIG_IDX(sig)]; 810 } else 811 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)]; 812 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP) 813 oact->sa_flags |= SA_NOCLDSTOP; 814 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT) 815 oact->sa_flags |= SA_NOCLDWAIT; 816 } 817 if (act) { 818 if ((sig == SIGKILL || sig == SIGSTOP) && 819 act->sa_handler != SIG_DFL) { 820 mtx_unlock(&ps->ps_mtx); 821 PROC_UNLOCK(p); 822 return (EINVAL); 823 } 824 825 /* 826 * Change setting atomically. 827 */ 828 829 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask; 830 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]); 831 if (sigact_flag_test(act, SA_SIGINFO)) { 832 ps->ps_sigact[_SIG_IDX(sig)] = 833 (__sighandler_t *)act->sa_sigaction; 834 SIGADDSET(ps->ps_siginfo, sig); 835 } else { 836 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler; 837 SIGDELSET(ps->ps_siginfo, sig); 838 } 839 if (!sigact_flag_test(act, SA_RESTART)) 840 SIGADDSET(ps->ps_sigintr, sig); 841 else 842 SIGDELSET(ps->ps_sigintr, sig); 843 if (sigact_flag_test(act, SA_ONSTACK)) 844 SIGADDSET(ps->ps_sigonstack, sig); 845 else 846 SIGDELSET(ps->ps_sigonstack, sig); 847 if (sigact_flag_test(act, SA_RESETHAND)) 848 SIGADDSET(ps->ps_sigreset, sig); 849 else 850 SIGDELSET(ps->ps_sigreset, sig); 851 if (sigact_flag_test(act, SA_NODEFER)) 852 SIGADDSET(ps->ps_signodefer, sig); 853 else 854 SIGDELSET(ps->ps_signodefer, sig); 855 if (sig == SIGCHLD) { 856 if (act->sa_flags & SA_NOCLDSTOP) 857 ps->ps_flag |= PS_NOCLDSTOP; 858 else 859 ps->ps_flag &= ~PS_NOCLDSTOP; 860 if (act->sa_flags & SA_NOCLDWAIT) { 861 /* 862 * Paranoia: since SA_NOCLDWAIT is implemented 863 * by reparenting the dying child to PID 1 (and 864 * trust it to reap the zombie), PID 1 itself 865 * is forbidden to set SA_NOCLDWAIT. 866 */ 867 if (p->p_pid == 1) 868 ps->ps_flag &= ~PS_NOCLDWAIT; 869 else 870 ps->ps_flag |= PS_NOCLDWAIT; 871 } else 872 ps->ps_flag &= ~PS_NOCLDWAIT; 873 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN) 874 ps->ps_flag |= PS_CLDSIGIGN; 875 else 876 ps->ps_flag &= ~PS_CLDSIGIGN; 877 } 878 /* 879 * Set bit in ps_sigignore for signals that are set to SIG_IGN, 880 * and for signals set to SIG_DFL where the default is to 881 * ignore. However, don't put SIGCONT in ps_sigignore, as we 882 * have to restart the process. 883 */ 884 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || 885 (sigprop(sig) & SIGPROP_IGNORE && 886 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) { 887 /* never to be seen again */ 888 sigqueue_delete_proc(p, sig); 889 if (sig != SIGCONT) 890 /* easier in psignal */ 891 SIGADDSET(ps->ps_sigignore, sig); 892 SIGDELSET(ps->ps_sigcatch, sig); 893 } else { 894 SIGDELSET(ps->ps_sigignore, sig); 895 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL) 896 SIGDELSET(ps->ps_sigcatch, sig); 897 else 898 SIGADDSET(ps->ps_sigcatch, sig); 899 } 900 #ifdef COMPAT_FREEBSD4 901 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || 902 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL || 903 (flags & KSA_FREEBSD4) == 0) 904 SIGDELSET(ps->ps_freebsd4, sig); 905 else 906 SIGADDSET(ps->ps_freebsd4, sig); 907 #endif 908 #ifdef COMPAT_43 909 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || 910 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL || 911 (flags & KSA_OSIGSET) == 0) 912 SIGDELSET(ps->ps_osigset, sig); 913 else 914 SIGADDSET(ps->ps_osigset, sig); 915 #endif 916 } 917 mtx_unlock(&ps->ps_mtx); 918 PROC_UNLOCK(p); 919 return (0); 920 } 921 922 #ifndef _SYS_SYSPROTO_H_ 923 struct sigaction_args { 924 int sig; 925 struct sigaction *act; 926 struct sigaction *oact; 927 }; 928 #endif 929 int 930 sys_sigaction(struct thread *td, struct sigaction_args *uap) 931 { 932 struct sigaction act, oact; 933 struct sigaction *actp, *oactp; 934 int error; 935 936 actp = (uap->act != NULL) ? &act : NULL; 937 oactp = (uap->oact != NULL) ? &oact : NULL; 938 if (actp) { 939 error = copyin(uap->act, actp, sizeof(act)); 940 if (error) 941 return (error); 942 } 943 error = kern_sigaction(td, uap->sig, actp, oactp, 0); 944 if (oactp && !error) 945 error = copyout(oactp, uap->oact, sizeof(oact)); 946 return (error); 947 } 948 949 #ifdef COMPAT_FREEBSD4 950 #ifndef _SYS_SYSPROTO_H_ 951 struct freebsd4_sigaction_args { 952 int sig; 953 struct sigaction *act; 954 struct sigaction *oact; 955 }; 956 #endif 957 int 958 freebsd4_sigaction(struct thread *td, struct freebsd4_sigaction_args *uap) 959 { 960 struct sigaction act, oact; 961 struct sigaction *actp, *oactp; 962 int error; 963 964 actp = (uap->act != NULL) ? &act : NULL; 965 oactp = (uap->oact != NULL) ? &oact : NULL; 966 if (actp) { 967 error = copyin(uap->act, actp, sizeof(act)); 968 if (error) 969 return (error); 970 } 971 error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4); 972 if (oactp && !error) 973 error = copyout(oactp, uap->oact, sizeof(oact)); 974 return (error); 975 } 976 #endif /* COMAPT_FREEBSD4 */ 977 978 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 979 #ifndef _SYS_SYSPROTO_H_ 980 struct osigaction_args { 981 int signum; 982 struct osigaction *nsa; 983 struct osigaction *osa; 984 }; 985 #endif 986 int 987 osigaction(struct thread *td, struct osigaction_args *uap) 988 { 989 struct osigaction sa; 990 struct sigaction nsa, osa; 991 struct sigaction *nsap, *osap; 992 int error; 993 994 if (uap->signum <= 0 || uap->signum >= ONSIG) 995 return (EINVAL); 996 997 nsap = (uap->nsa != NULL) ? &nsa : NULL; 998 osap = (uap->osa != NULL) ? &osa : NULL; 999 1000 if (nsap) { 1001 error = copyin(uap->nsa, &sa, sizeof(sa)); 1002 if (error) 1003 return (error); 1004 nsap->sa_handler = sa.sa_handler; 1005 nsap->sa_flags = sa.sa_flags; 1006 OSIG2SIG(sa.sa_mask, nsap->sa_mask); 1007 } 1008 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET); 1009 if (osap && !error) { 1010 sa.sa_handler = osap->sa_handler; 1011 sa.sa_flags = osap->sa_flags; 1012 SIG2OSIG(osap->sa_mask, sa.sa_mask); 1013 error = copyout(&sa, uap->osa, sizeof(sa)); 1014 } 1015 return (error); 1016 } 1017 1018 #if !defined(__i386__) 1019 /* Avoid replicating the same stub everywhere */ 1020 int 1021 osigreturn(struct thread *td, struct osigreturn_args *uap) 1022 { 1023 1024 return (nosys(td, (struct nosys_args *)uap)); 1025 } 1026 #endif 1027 #endif /* COMPAT_43 */ 1028 1029 /* 1030 * Initialize signal state for process 0; 1031 * set to ignore signals that are ignored by default. 1032 */ 1033 void 1034 siginit(struct proc *p) 1035 { 1036 int i; 1037 struct sigacts *ps; 1038 1039 PROC_LOCK(p); 1040 ps = p->p_sigacts; 1041 mtx_lock(&ps->ps_mtx); 1042 for (i = 1; i <= NSIG; i++) { 1043 if (sigprop(i) & SIGPROP_IGNORE && i != SIGCONT) { 1044 SIGADDSET(ps->ps_sigignore, i); 1045 } 1046 } 1047 mtx_unlock(&ps->ps_mtx); 1048 PROC_UNLOCK(p); 1049 } 1050 1051 /* 1052 * Reset specified signal to the default disposition. 1053 */ 1054 static void 1055 sigdflt(struct sigacts *ps, int sig) 1056 { 1057 1058 mtx_assert(&ps->ps_mtx, MA_OWNED); 1059 SIGDELSET(ps->ps_sigcatch, sig); 1060 if ((sigprop(sig) & SIGPROP_IGNORE) != 0 && sig != SIGCONT) 1061 SIGADDSET(ps->ps_sigignore, sig); 1062 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 1063 SIGDELSET(ps->ps_siginfo, sig); 1064 } 1065 1066 /* 1067 * Reset signals for an exec of the specified process. 1068 */ 1069 void 1070 execsigs(struct proc *p) 1071 { 1072 struct sigacts *ps; 1073 struct thread *td; 1074 1075 /* 1076 * Reset caught signals. Held signals remain held 1077 * through td_sigmask (unless they were caught, 1078 * and are now ignored by default). 1079 */ 1080 PROC_LOCK_ASSERT(p, MA_OWNED); 1081 ps = p->p_sigacts; 1082 mtx_lock(&ps->ps_mtx); 1083 sig_drop_caught(p); 1084 1085 /* 1086 * Reset stack state to the user stack. 1087 * Clear set of signals caught on the signal stack. 1088 */ 1089 td = curthread; 1090 MPASS(td->td_proc == p); 1091 td->td_sigstk.ss_flags = SS_DISABLE; 1092 td->td_sigstk.ss_size = 0; 1093 td->td_sigstk.ss_sp = 0; 1094 td->td_pflags &= ~TDP_ALTSTACK; 1095 /* 1096 * Reset no zombies if child dies flag as Solaris does. 1097 */ 1098 ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN); 1099 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN) 1100 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL; 1101 mtx_unlock(&ps->ps_mtx); 1102 } 1103 1104 /* 1105 * kern_sigprocmask() 1106 * 1107 * Manipulate signal mask. 1108 */ 1109 int 1110 kern_sigprocmask(struct thread *td, int how, sigset_t *set, sigset_t *oset, 1111 int flags) 1112 { 1113 sigset_t new_block, oset1; 1114 struct proc *p; 1115 int error; 1116 1117 p = td->td_proc; 1118 if ((flags & SIGPROCMASK_PROC_LOCKED) != 0) 1119 PROC_LOCK_ASSERT(p, MA_OWNED); 1120 else 1121 PROC_LOCK(p); 1122 mtx_assert(&p->p_sigacts->ps_mtx, (flags & SIGPROCMASK_PS_LOCKED) != 0 1123 ? MA_OWNED : MA_NOTOWNED); 1124 if (oset != NULL) 1125 *oset = td->td_sigmask; 1126 1127 error = 0; 1128 if (set != NULL) { 1129 switch (how) { 1130 case SIG_BLOCK: 1131 SIG_CANTMASK(*set); 1132 oset1 = td->td_sigmask; 1133 SIGSETOR(td->td_sigmask, *set); 1134 new_block = td->td_sigmask; 1135 SIGSETNAND(new_block, oset1); 1136 break; 1137 case SIG_UNBLOCK: 1138 SIGSETNAND(td->td_sigmask, *set); 1139 signotify(td); 1140 goto out; 1141 case SIG_SETMASK: 1142 SIG_CANTMASK(*set); 1143 oset1 = td->td_sigmask; 1144 if (flags & SIGPROCMASK_OLD) 1145 SIGSETLO(td->td_sigmask, *set); 1146 else 1147 td->td_sigmask = *set; 1148 new_block = td->td_sigmask; 1149 SIGSETNAND(new_block, oset1); 1150 signotify(td); 1151 break; 1152 default: 1153 error = EINVAL; 1154 goto out; 1155 } 1156 1157 /* 1158 * The new_block set contains signals that were not previously 1159 * blocked, but are blocked now. 1160 * 1161 * In case we block any signal that was not previously blocked 1162 * for td, and process has the signal pending, try to schedule 1163 * signal delivery to some thread that does not block the 1164 * signal, possibly waking it up. 1165 */ 1166 if (p->p_numthreads != 1) 1167 reschedule_signals(p, new_block, flags); 1168 } 1169 1170 out: 1171 if (!(flags & SIGPROCMASK_PROC_LOCKED)) 1172 PROC_UNLOCK(p); 1173 return (error); 1174 } 1175 1176 #ifndef _SYS_SYSPROTO_H_ 1177 struct sigprocmask_args { 1178 int how; 1179 const sigset_t *set; 1180 sigset_t *oset; 1181 }; 1182 #endif 1183 int 1184 sys_sigprocmask(struct thread *td, struct sigprocmask_args *uap) 1185 { 1186 sigset_t set, oset; 1187 sigset_t *setp, *osetp; 1188 int error; 1189 1190 setp = (uap->set != NULL) ? &set : NULL; 1191 osetp = (uap->oset != NULL) ? &oset : NULL; 1192 if (setp) { 1193 error = copyin(uap->set, setp, sizeof(set)); 1194 if (error) 1195 return (error); 1196 } 1197 error = kern_sigprocmask(td, uap->how, setp, osetp, 0); 1198 if (osetp && !error) { 1199 error = copyout(osetp, uap->oset, sizeof(oset)); 1200 } 1201 return (error); 1202 } 1203 1204 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 1205 #ifndef _SYS_SYSPROTO_H_ 1206 struct osigprocmask_args { 1207 int how; 1208 osigset_t mask; 1209 }; 1210 #endif 1211 int 1212 osigprocmask(struct thread *td, struct osigprocmask_args *uap) 1213 { 1214 sigset_t set, oset; 1215 int error; 1216 1217 OSIG2SIG(uap->mask, set); 1218 error = kern_sigprocmask(td, uap->how, &set, &oset, 1); 1219 SIG2OSIG(oset, td->td_retval[0]); 1220 return (error); 1221 } 1222 #endif /* COMPAT_43 */ 1223 1224 int 1225 sys_sigwait(struct thread *td, struct sigwait_args *uap) 1226 { 1227 ksiginfo_t ksi; 1228 sigset_t set; 1229 int error; 1230 1231 error = copyin(uap->set, &set, sizeof(set)); 1232 if (error) { 1233 td->td_retval[0] = error; 1234 return (0); 1235 } 1236 1237 error = kern_sigtimedwait(td, set, &ksi, NULL); 1238 if (error) { 1239 /* 1240 * sigwait() function shall not return EINTR, but 1241 * the syscall does. Non-ancient libc provides the 1242 * wrapper which hides EINTR. Otherwise, EINTR return 1243 * is used by libthr to handle required cancellation 1244 * point in the sigwait(). 1245 */ 1246 if (error == EINTR && td->td_proc->p_osrel < P_OSREL_SIGWAIT) 1247 return (ERESTART); 1248 td->td_retval[0] = error; 1249 return (0); 1250 } 1251 1252 error = copyout(&ksi.ksi_signo, uap->sig, sizeof(ksi.ksi_signo)); 1253 td->td_retval[0] = error; 1254 return (0); 1255 } 1256 1257 int 1258 sys_sigtimedwait(struct thread *td, struct sigtimedwait_args *uap) 1259 { 1260 struct timespec ts; 1261 struct timespec *timeout; 1262 sigset_t set; 1263 ksiginfo_t ksi; 1264 int error; 1265 1266 if (uap->timeout) { 1267 error = copyin(uap->timeout, &ts, sizeof(ts)); 1268 if (error) 1269 return (error); 1270 1271 timeout = &ts; 1272 } else 1273 timeout = NULL; 1274 1275 error = copyin(uap->set, &set, sizeof(set)); 1276 if (error) 1277 return (error); 1278 1279 error = kern_sigtimedwait(td, set, &ksi, timeout); 1280 if (error) 1281 return (error); 1282 1283 if (uap->info) 1284 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t)); 1285 1286 if (error == 0) 1287 td->td_retval[0] = ksi.ksi_signo; 1288 return (error); 1289 } 1290 1291 int 1292 sys_sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap) 1293 { 1294 ksiginfo_t ksi; 1295 sigset_t set; 1296 int error; 1297 1298 error = copyin(uap->set, &set, sizeof(set)); 1299 if (error) 1300 return (error); 1301 1302 error = kern_sigtimedwait(td, set, &ksi, NULL); 1303 if (error) 1304 return (error); 1305 1306 if (uap->info) 1307 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t)); 1308 1309 if (error == 0) 1310 td->td_retval[0] = ksi.ksi_signo; 1311 return (error); 1312 } 1313 1314 static void 1315 proc_td_siginfo_capture(struct thread *td, siginfo_t *si) 1316 { 1317 struct thread *thr; 1318 1319 FOREACH_THREAD_IN_PROC(td->td_proc, thr) { 1320 if (thr == td) 1321 thr->td_si = *si; 1322 else 1323 thr->td_si.si_signo = 0; 1324 } 1325 } 1326 1327 int 1328 kern_sigtimedwait(struct thread *td, sigset_t waitset, ksiginfo_t *ksi, 1329 struct timespec *timeout) 1330 { 1331 struct sigacts *ps; 1332 sigset_t saved_mask, new_block; 1333 struct proc *p; 1334 int error, sig, timevalid = 0; 1335 sbintime_t sbt, precision, tsbt; 1336 struct timespec ts; 1337 bool traced; 1338 1339 p = td->td_proc; 1340 error = 0; 1341 traced = false; 1342 1343 /* Ensure the sigfastblock value is up to date. */ 1344 sigfastblock_fetch(td); 1345 1346 if (timeout != NULL) { 1347 if (timeout->tv_nsec >= 0 && timeout->tv_nsec < 1000000000) { 1348 timevalid = 1; 1349 ts = *timeout; 1350 if (ts.tv_sec < INT32_MAX / 2) { 1351 tsbt = tstosbt(ts); 1352 precision = tsbt; 1353 precision >>= tc_precexp; 1354 if (TIMESEL(&sbt, tsbt)) 1355 sbt += tc_tick_sbt; 1356 sbt += tsbt; 1357 } else 1358 precision = sbt = 0; 1359 } 1360 } else 1361 precision = sbt = 0; 1362 ksiginfo_init(ksi); 1363 /* Some signals can not be waited for. */ 1364 SIG_CANTMASK(waitset); 1365 ps = p->p_sigacts; 1366 PROC_LOCK(p); 1367 saved_mask = td->td_sigmask; 1368 SIGSETNAND(td->td_sigmask, waitset); 1369 if ((p->p_sysent->sv_flags & SV_SIG_DISCIGN) != 0 || 1370 !kern_sig_discard_ign) { 1371 thread_lock(td); 1372 td->td_flags |= TDF_SIGWAIT; 1373 thread_unlock(td); 1374 } 1375 for (;;) { 1376 mtx_lock(&ps->ps_mtx); 1377 sig = cursig(td); 1378 mtx_unlock(&ps->ps_mtx); 1379 KASSERT(sig >= 0, ("sig %d", sig)); 1380 if (sig != 0 && SIGISMEMBER(waitset, sig)) { 1381 if (sigqueue_get(&td->td_sigqueue, sig, ksi) != 0 || 1382 sigqueue_get(&p->p_sigqueue, sig, ksi) != 0) { 1383 error = 0; 1384 break; 1385 } 1386 } 1387 1388 if (error != 0) 1389 break; 1390 1391 /* 1392 * POSIX says this must be checked after looking for pending 1393 * signals. 1394 */ 1395 if (timeout != NULL && !timevalid) { 1396 error = EINVAL; 1397 break; 1398 } 1399 1400 if (traced) { 1401 error = EINTR; 1402 break; 1403 } 1404 1405 error = msleep_sbt(&p->p_sigacts, &p->p_mtx, PPAUSE | PCATCH, 1406 "sigwait", sbt, precision, C_ABSOLUTE); 1407 1408 /* The syscalls can not be restarted. */ 1409 if (error == ERESTART) 1410 error = EINTR; 1411 1412 /* 1413 * If PTRACE_SCE or PTRACE_SCX were set after 1414 * userspace entered the syscall, return spurious 1415 * EINTR after wait was done. Only do this as last 1416 * resort after rechecking for possible queued signals 1417 * and expired timeouts. 1418 */ 1419 if (error == 0 && (p->p_ptevents & PTRACE_SYSCALL) != 0) 1420 traced = true; 1421 } 1422 thread_lock(td); 1423 td->td_flags &= ~TDF_SIGWAIT; 1424 thread_unlock(td); 1425 1426 new_block = saved_mask; 1427 SIGSETNAND(new_block, td->td_sigmask); 1428 td->td_sigmask = saved_mask; 1429 /* 1430 * Fewer signals can be delivered to us, reschedule signal 1431 * notification. 1432 */ 1433 if (p->p_numthreads != 1) 1434 reschedule_signals(p, new_block, 0); 1435 1436 if (error == 0) { 1437 SDT_PROBE2(proc, , , signal__clear, sig, ksi); 1438 1439 if (ksi->ksi_code == SI_TIMER) 1440 itimer_accept(p, ksi->ksi_timerid, ksi); 1441 1442 #ifdef KTRACE 1443 if (KTRPOINT(td, KTR_PSIG)) { 1444 sig_t action; 1445 1446 mtx_lock(&ps->ps_mtx); 1447 action = ps->ps_sigact[_SIG_IDX(sig)]; 1448 mtx_unlock(&ps->ps_mtx); 1449 ktrpsig(sig, action, &td->td_sigmask, ksi->ksi_code); 1450 } 1451 #endif 1452 if (sig == SIGKILL) { 1453 proc_td_siginfo_capture(td, &ksi->ksi_info); 1454 sigexit(td, sig); 1455 } 1456 } 1457 PROC_UNLOCK(p); 1458 return (error); 1459 } 1460 1461 #ifndef _SYS_SYSPROTO_H_ 1462 struct sigpending_args { 1463 sigset_t *set; 1464 }; 1465 #endif 1466 int 1467 sys_sigpending(struct thread *td, struct sigpending_args *uap) 1468 { 1469 struct proc *p = td->td_proc; 1470 sigset_t pending; 1471 1472 PROC_LOCK(p); 1473 pending = p->p_sigqueue.sq_signals; 1474 SIGSETOR(pending, td->td_sigqueue.sq_signals); 1475 PROC_UNLOCK(p); 1476 return (copyout(&pending, uap->set, sizeof(sigset_t))); 1477 } 1478 1479 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 1480 #ifndef _SYS_SYSPROTO_H_ 1481 struct osigpending_args { 1482 int dummy; 1483 }; 1484 #endif 1485 int 1486 osigpending(struct thread *td, struct osigpending_args *uap) 1487 { 1488 struct proc *p = td->td_proc; 1489 sigset_t pending; 1490 1491 PROC_LOCK(p); 1492 pending = p->p_sigqueue.sq_signals; 1493 SIGSETOR(pending, td->td_sigqueue.sq_signals); 1494 PROC_UNLOCK(p); 1495 SIG2OSIG(pending, td->td_retval[0]); 1496 return (0); 1497 } 1498 #endif /* COMPAT_43 */ 1499 1500 #if defined(COMPAT_43) 1501 /* 1502 * Generalized interface signal handler, 4.3-compatible. 1503 */ 1504 #ifndef _SYS_SYSPROTO_H_ 1505 struct osigvec_args { 1506 int signum; 1507 struct sigvec *nsv; 1508 struct sigvec *osv; 1509 }; 1510 #endif 1511 /* ARGSUSED */ 1512 int 1513 osigvec(struct thread *td, struct osigvec_args *uap) 1514 { 1515 struct sigvec vec; 1516 struct sigaction nsa, osa; 1517 struct sigaction *nsap, *osap; 1518 int error; 1519 1520 if (uap->signum <= 0 || uap->signum >= ONSIG) 1521 return (EINVAL); 1522 nsap = (uap->nsv != NULL) ? &nsa : NULL; 1523 osap = (uap->osv != NULL) ? &osa : NULL; 1524 if (nsap) { 1525 error = copyin(uap->nsv, &vec, sizeof(vec)); 1526 if (error) 1527 return (error); 1528 nsap->sa_handler = vec.sv_handler; 1529 OSIG2SIG(vec.sv_mask, nsap->sa_mask); 1530 nsap->sa_flags = vec.sv_flags; 1531 nsap->sa_flags ^= SA_RESTART; /* opposite of SV_INTERRUPT */ 1532 } 1533 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET); 1534 if (osap && !error) { 1535 vec.sv_handler = osap->sa_handler; 1536 SIG2OSIG(osap->sa_mask, vec.sv_mask); 1537 vec.sv_flags = osap->sa_flags; 1538 vec.sv_flags &= ~SA_NOCLDWAIT; 1539 vec.sv_flags ^= SA_RESTART; 1540 error = copyout(&vec, uap->osv, sizeof(vec)); 1541 } 1542 return (error); 1543 } 1544 1545 #ifndef _SYS_SYSPROTO_H_ 1546 struct osigblock_args { 1547 int mask; 1548 }; 1549 #endif 1550 int 1551 osigblock(struct thread *td, struct osigblock_args *uap) 1552 { 1553 sigset_t set, oset; 1554 1555 OSIG2SIG(uap->mask, set); 1556 kern_sigprocmask(td, SIG_BLOCK, &set, &oset, 0); 1557 SIG2OSIG(oset, td->td_retval[0]); 1558 return (0); 1559 } 1560 1561 #ifndef _SYS_SYSPROTO_H_ 1562 struct osigsetmask_args { 1563 int mask; 1564 }; 1565 #endif 1566 int 1567 osigsetmask(struct thread *td, struct osigsetmask_args *uap) 1568 { 1569 sigset_t set, oset; 1570 1571 OSIG2SIG(uap->mask, set); 1572 kern_sigprocmask(td, SIG_SETMASK, &set, &oset, 0); 1573 SIG2OSIG(oset, td->td_retval[0]); 1574 return (0); 1575 } 1576 #endif /* COMPAT_43 */ 1577 1578 /* 1579 * Suspend calling thread until signal, providing mask to be set in the 1580 * meantime. 1581 */ 1582 #ifndef _SYS_SYSPROTO_H_ 1583 struct sigsuspend_args { 1584 const sigset_t *sigmask; 1585 }; 1586 #endif 1587 /* ARGSUSED */ 1588 int 1589 sys_sigsuspend(struct thread *td, struct sigsuspend_args *uap) 1590 { 1591 sigset_t mask; 1592 int error; 1593 1594 error = copyin(uap->sigmask, &mask, sizeof(mask)); 1595 if (error) 1596 return (error); 1597 return (kern_sigsuspend(td, mask)); 1598 } 1599 1600 int 1601 kern_sigsuspend(struct thread *td, sigset_t mask) 1602 { 1603 struct proc *p = td->td_proc; 1604 int has_sig, sig; 1605 1606 /* Ensure the sigfastblock value is up to date. */ 1607 sigfastblock_fetch(td); 1608 1609 /* 1610 * When returning from sigsuspend, we want 1611 * the old mask to be restored after the 1612 * signal handler has finished. Thus, we 1613 * save it here and mark the sigacts structure 1614 * to indicate this. 1615 */ 1616 PROC_LOCK(p); 1617 kern_sigprocmask(td, SIG_SETMASK, &mask, &td->td_oldsigmask, 1618 SIGPROCMASK_PROC_LOCKED); 1619 td->td_pflags |= TDP_OLDMASK; 1620 ast_sched(td, TDA_SIGSUSPEND); 1621 1622 /* 1623 * Process signals now. Otherwise, we can get spurious wakeup 1624 * due to signal entered process queue, but delivered to other 1625 * thread. But sigsuspend should return only on signal 1626 * delivery. 1627 */ 1628 (p->p_sysent->sv_set_syscall_retval)(td, EINTR); 1629 for (has_sig = 0; !has_sig;) { 1630 while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause", 1631 0) == 0) 1632 /* void */; 1633 thread_suspend_check(0); 1634 mtx_lock(&p->p_sigacts->ps_mtx); 1635 while ((sig = cursig(td)) != 0) { 1636 KASSERT(sig >= 0, ("sig %d", sig)); 1637 has_sig += postsig(sig); 1638 } 1639 mtx_unlock(&p->p_sigacts->ps_mtx); 1640 1641 /* 1642 * If PTRACE_SCE or PTRACE_SCX were set after 1643 * userspace entered the syscall, return spurious 1644 * EINTR. 1645 */ 1646 if ((p->p_ptevents & PTRACE_SYSCALL) != 0) 1647 has_sig += 1; 1648 } 1649 PROC_UNLOCK(p); 1650 td->td_errno = EINTR; 1651 td->td_pflags |= TDP_NERRNO; 1652 return (EJUSTRETURN); 1653 } 1654 1655 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ 1656 /* 1657 * Compatibility sigsuspend call for old binaries. Note nonstandard calling 1658 * convention: libc stub passes mask, not pointer, to save a copyin. 1659 */ 1660 #ifndef _SYS_SYSPROTO_H_ 1661 struct osigsuspend_args { 1662 osigset_t mask; 1663 }; 1664 #endif 1665 /* ARGSUSED */ 1666 int 1667 osigsuspend(struct thread *td, struct osigsuspend_args *uap) 1668 { 1669 sigset_t mask; 1670 1671 OSIG2SIG(uap->mask, mask); 1672 return (kern_sigsuspend(td, mask)); 1673 } 1674 #endif /* COMPAT_43 */ 1675 1676 #if defined(COMPAT_43) 1677 #ifndef _SYS_SYSPROTO_H_ 1678 struct osigstack_args { 1679 struct sigstack *nss; 1680 struct sigstack *oss; 1681 }; 1682 #endif 1683 /* ARGSUSED */ 1684 int 1685 osigstack(struct thread *td, struct osigstack_args *uap) 1686 { 1687 struct sigstack nss, oss; 1688 int error = 0; 1689 1690 if (uap->nss != NULL) { 1691 error = copyin(uap->nss, &nss, sizeof(nss)); 1692 if (error) 1693 return (error); 1694 } 1695 oss.ss_sp = td->td_sigstk.ss_sp; 1696 oss.ss_onstack = sigonstack(cpu_getstack(td)); 1697 if (uap->nss != NULL) { 1698 td->td_sigstk.ss_sp = nss.ss_sp; 1699 td->td_sigstk.ss_size = 0; 1700 td->td_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK; 1701 td->td_pflags |= TDP_ALTSTACK; 1702 } 1703 if (uap->oss != NULL) 1704 error = copyout(&oss, uap->oss, sizeof(oss)); 1705 1706 return (error); 1707 } 1708 #endif /* COMPAT_43 */ 1709 1710 #ifndef _SYS_SYSPROTO_H_ 1711 struct sigaltstack_args { 1712 stack_t *ss; 1713 stack_t *oss; 1714 }; 1715 #endif 1716 /* ARGSUSED */ 1717 int 1718 sys_sigaltstack(struct thread *td, struct sigaltstack_args *uap) 1719 { 1720 stack_t ss, oss; 1721 int error; 1722 1723 if (uap->ss != NULL) { 1724 error = copyin(uap->ss, &ss, sizeof(ss)); 1725 if (error) 1726 return (error); 1727 } 1728 error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL, 1729 (uap->oss != NULL) ? &oss : NULL); 1730 if (error) 1731 return (error); 1732 if (uap->oss != NULL) 1733 error = copyout(&oss, uap->oss, sizeof(stack_t)); 1734 return (error); 1735 } 1736 1737 int 1738 kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss) 1739 { 1740 struct proc *p = td->td_proc; 1741 int oonstack; 1742 1743 oonstack = sigonstack(cpu_getstack(td)); 1744 1745 if (oss != NULL) { 1746 *oss = td->td_sigstk; 1747 oss->ss_flags = (td->td_pflags & TDP_ALTSTACK) 1748 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 1749 } 1750 1751 if (ss != NULL) { 1752 if (oonstack) 1753 return (EPERM); 1754 if ((ss->ss_flags & ~SS_DISABLE) != 0) 1755 return (EINVAL); 1756 if (!(ss->ss_flags & SS_DISABLE)) { 1757 if (ss->ss_size < p->p_sysent->sv_minsigstksz) 1758 return (ENOMEM); 1759 1760 td->td_sigstk = *ss; 1761 td->td_pflags |= TDP_ALTSTACK; 1762 } else { 1763 td->td_pflags &= ~TDP_ALTSTACK; 1764 } 1765 } 1766 return (0); 1767 } 1768 1769 struct killpg1_ctx { 1770 struct thread *td; 1771 ksiginfo_t *ksi; 1772 int sig; 1773 bool sent; 1774 bool found; 1775 int ret; 1776 }; 1777 1778 static void 1779 killpg1_sendsig(struct proc *p, bool notself, struct killpg1_ctx *arg) 1780 { 1781 int err; 1782 1783 if (p->p_pid <= 1 || (p->p_flag & P_SYSTEM) != 0 || 1784 (notself && p == arg->td->td_proc) || p->p_state == PRS_NEW) 1785 return; 1786 PROC_LOCK(p); 1787 err = p_cansignal(arg->td, p, arg->sig); 1788 if (err == 0 && arg->sig != 0) 1789 pksignal(p, arg->sig, arg->ksi); 1790 PROC_UNLOCK(p); 1791 if (err != ESRCH) 1792 arg->found = true; 1793 if (err == 0) 1794 arg->sent = true; 1795 else if (arg->ret == 0 && err != ESRCH && err != EPERM) 1796 arg->ret = err; 1797 } 1798 1799 /* 1800 * Common code for kill process group/broadcast kill. 1801 * cp is calling process. 1802 */ 1803 static int 1804 killpg1(struct thread *td, int sig, int pgid, int all, ksiginfo_t *ksi) 1805 { 1806 struct proc *p; 1807 struct pgrp *pgrp; 1808 struct killpg1_ctx arg; 1809 1810 arg.td = td; 1811 arg.ksi = ksi; 1812 arg.sig = sig; 1813 arg.sent = false; 1814 arg.found = false; 1815 arg.ret = 0; 1816 if (all) { 1817 /* 1818 * broadcast 1819 */ 1820 sx_slock(&allproc_lock); 1821 FOREACH_PROC_IN_SYSTEM(p) { 1822 killpg1_sendsig(p, true, &arg); 1823 } 1824 sx_sunlock(&allproc_lock); 1825 } else { 1826 sx_slock(&proctree_lock); 1827 if (pgid == 0) { 1828 /* 1829 * zero pgid means send to my process group. 1830 */ 1831 pgrp = td->td_proc->p_pgrp; 1832 PGRP_LOCK(pgrp); 1833 } else { 1834 pgrp = pgfind(pgid); 1835 if (pgrp == NULL) { 1836 sx_sunlock(&proctree_lock); 1837 return (ESRCH); 1838 } 1839 } 1840 sx_sunlock(&proctree_lock); 1841 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 1842 killpg1_sendsig(p, false, &arg); 1843 } 1844 PGRP_UNLOCK(pgrp); 1845 } 1846 MPASS(arg.ret != 0 || arg.found || !arg.sent); 1847 if (arg.ret == 0 && !arg.sent) 1848 arg.ret = arg.found ? EPERM : ESRCH; 1849 return (arg.ret); 1850 } 1851 1852 #ifndef _SYS_SYSPROTO_H_ 1853 struct kill_args { 1854 int pid; 1855 int signum; 1856 }; 1857 #endif 1858 /* ARGSUSED */ 1859 int 1860 sys_kill(struct thread *td, struct kill_args *uap) 1861 { 1862 1863 return (kern_kill(td, uap->pid, uap->signum)); 1864 } 1865 1866 int 1867 kern_kill(struct thread *td, pid_t pid, int signum) 1868 { 1869 ksiginfo_t ksi; 1870 struct proc *p; 1871 int error; 1872 1873 /* 1874 * A process in capability mode can send signals only to himself. 1875 * The main rationale behind this is that abort(3) is implemented as 1876 * kill(getpid(), SIGABRT). 1877 */ 1878 if (IN_CAPABILITY_MODE(td) && pid != td->td_proc->p_pid) 1879 return (ECAPMODE); 1880 1881 AUDIT_ARG_SIGNUM(signum); 1882 AUDIT_ARG_PID(pid); 1883 if ((u_int)signum > _SIG_MAXSIG) 1884 return (EINVAL); 1885 1886 ksiginfo_init(&ksi); 1887 ksi.ksi_signo = signum; 1888 ksi.ksi_code = SI_USER; 1889 ksi.ksi_pid = td->td_proc->p_pid; 1890 ksi.ksi_uid = td->td_ucred->cr_ruid; 1891 1892 if (pid > 0) { 1893 /* kill single process */ 1894 if ((p = pfind_any(pid)) == NULL) 1895 return (ESRCH); 1896 AUDIT_ARG_PROCESS(p); 1897 error = p_cansignal(td, p, signum); 1898 if (error == 0 && signum) 1899 pksignal(p, signum, &ksi); 1900 PROC_UNLOCK(p); 1901 return (error); 1902 } 1903 switch (pid) { 1904 case -1: /* broadcast signal */ 1905 return (killpg1(td, signum, 0, 1, &ksi)); 1906 case 0: /* signal own process group */ 1907 return (killpg1(td, signum, 0, 0, &ksi)); 1908 default: /* negative explicit process group */ 1909 return (killpg1(td, signum, -pid, 0, &ksi)); 1910 } 1911 /* NOTREACHED */ 1912 } 1913 1914 int 1915 sys_pdkill(struct thread *td, struct pdkill_args *uap) 1916 { 1917 struct proc *p; 1918 int error; 1919 1920 AUDIT_ARG_SIGNUM(uap->signum); 1921 AUDIT_ARG_FD(uap->fd); 1922 if ((u_int)uap->signum > _SIG_MAXSIG) 1923 return (EINVAL); 1924 1925 error = procdesc_find(td, uap->fd, &cap_pdkill_rights, &p); 1926 if (error) 1927 return (error); 1928 AUDIT_ARG_PROCESS(p); 1929 error = p_cansignal(td, p, uap->signum); 1930 if (error == 0 && uap->signum) 1931 kern_psignal(p, uap->signum); 1932 PROC_UNLOCK(p); 1933 return (error); 1934 } 1935 1936 #if defined(COMPAT_43) 1937 #ifndef _SYS_SYSPROTO_H_ 1938 struct okillpg_args { 1939 int pgid; 1940 int signum; 1941 }; 1942 #endif 1943 /* ARGSUSED */ 1944 int 1945 okillpg(struct thread *td, struct okillpg_args *uap) 1946 { 1947 ksiginfo_t ksi; 1948 1949 AUDIT_ARG_SIGNUM(uap->signum); 1950 AUDIT_ARG_PID(uap->pgid); 1951 if ((u_int)uap->signum > _SIG_MAXSIG) 1952 return (EINVAL); 1953 1954 ksiginfo_init(&ksi); 1955 ksi.ksi_signo = uap->signum; 1956 ksi.ksi_code = SI_USER; 1957 ksi.ksi_pid = td->td_proc->p_pid; 1958 ksi.ksi_uid = td->td_ucred->cr_ruid; 1959 return (killpg1(td, uap->signum, uap->pgid, 0, &ksi)); 1960 } 1961 #endif /* COMPAT_43 */ 1962 1963 #ifndef _SYS_SYSPROTO_H_ 1964 struct sigqueue_args { 1965 pid_t pid; 1966 int signum; 1967 /* union sigval */ void *value; 1968 }; 1969 #endif 1970 int 1971 sys_sigqueue(struct thread *td, struct sigqueue_args *uap) 1972 { 1973 union sigval sv; 1974 1975 sv.sival_ptr = uap->value; 1976 1977 return (kern_sigqueue(td, uap->pid, uap->signum, &sv)); 1978 } 1979 1980 int 1981 kern_sigqueue(struct thread *td, pid_t pid, int signum, union sigval *value) 1982 { 1983 ksiginfo_t ksi; 1984 struct proc *p; 1985 int error; 1986 1987 if ((u_int)signum > _SIG_MAXSIG) 1988 return (EINVAL); 1989 1990 /* 1991 * Specification says sigqueue can only send signal to 1992 * single process. 1993 */ 1994 if (pid <= 0) 1995 return (EINVAL); 1996 1997 if ((p = pfind_any(pid)) == NULL) 1998 return (ESRCH); 1999 error = p_cansignal(td, p, signum); 2000 if (error == 0 && signum != 0) { 2001 ksiginfo_init(&ksi); 2002 ksi.ksi_flags = KSI_SIGQ; 2003 ksi.ksi_signo = signum; 2004 ksi.ksi_code = SI_QUEUE; 2005 ksi.ksi_pid = td->td_proc->p_pid; 2006 ksi.ksi_uid = td->td_ucred->cr_ruid; 2007 ksi.ksi_value = *value; 2008 error = pksignal(p, ksi.ksi_signo, &ksi); 2009 } 2010 PROC_UNLOCK(p); 2011 return (error); 2012 } 2013 2014 /* 2015 * Send a signal to a process group. 2016 */ 2017 void 2018 gsignal(int pgid, int sig, ksiginfo_t *ksi) 2019 { 2020 struct pgrp *pgrp; 2021 2022 if (pgid != 0) { 2023 sx_slock(&proctree_lock); 2024 pgrp = pgfind(pgid); 2025 sx_sunlock(&proctree_lock); 2026 if (pgrp != NULL) { 2027 pgsignal(pgrp, sig, 0, ksi); 2028 PGRP_UNLOCK(pgrp); 2029 } 2030 } 2031 } 2032 2033 /* 2034 * Send a signal to a process group. If checktty is 1, 2035 * limit to members which have a controlling terminal. 2036 */ 2037 void 2038 pgsignal(struct pgrp *pgrp, int sig, int checkctty, ksiginfo_t *ksi) 2039 { 2040 struct proc *p; 2041 2042 if (pgrp) { 2043 PGRP_LOCK_ASSERT(pgrp, MA_OWNED); 2044 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 2045 PROC_LOCK(p); 2046 if (p->p_state == PRS_NORMAL && 2047 (checkctty == 0 || p->p_flag & P_CONTROLT)) 2048 pksignal(p, sig, ksi); 2049 PROC_UNLOCK(p); 2050 } 2051 } 2052 } 2053 2054 /* 2055 * Recalculate the signal mask and reset the signal disposition after 2056 * usermode frame for delivery is formed. Should be called after 2057 * mach-specific routine, because sysent->sv_sendsig() needs correct 2058 * ps_siginfo and signal mask. 2059 */ 2060 static void 2061 postsig_done(int sig, struct thread *td, struct sigacts *ps) 2062 { 2063 sigset_t mask; 2064 2065 mtx_assert(&ps->ps_mtx, MA_OWNED); 2066 td->td_ru.ru_nsignals++; 2067 mask = ps->ps_catchmask[_SIG_IDX(sig)]; 2068 if (!SIGISMEMBER(ps->ps_signodefer, sig)) 2069 SIGADDSET(mask, sig); 2070 kern_sigprocmask(td, SIG_BLOCK, &mask, NULL, 2071 SIGPROCMASK_PROC_LOCKED | SIGPROCMASK_PS_LOCKED); 2072 if (SIGISMEMBER(ps->ps_sigreset, sig)) 2073 sigdflt(ps, sig); 2074 } 2075 2076 /* 2077 * Send a signal caused by a trap to the current thread. If it will be 2078 * caught immediately, deliver it with correct code. Otherwise, post it 2079 * normally. 2080 */ 2081 void 2082 trapsignal(struct thread *td, ksiginfo_t *ksi) 2083 { 2084 struct sigacts *ps; 2085 struct proc *p; 2086 sigset_t sigmask; 2087 int sig; 2088 2089 p = td->td_proc; 2090 sig = ksi->ksi_signo; 2091 KASSERT(_SIG_VALID(sig), ("invalid signal")); 2092 2093 sigfastblock_fetch(td); 2094 PROC_LOCK(p); 2095 ps = p->p_sigacts; 2096 mtx_lock(&ps->ps_mtx); 2097 sigmask = td->td_sigmask; 2098 if (td->td_sigblock_val != 0) 2099 SIGSETOR(sigmask, fastblock_mask); 2100 if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) && 2101 !SIGISMEMBER(sigmask, sig)) { 2102 #ifdef KTRACE 2103 if (KTRPOINT(curthread, KTR_PSIG)) 2104 ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)], 2105 &td->td_sigmask, ksi->ksi_code); 2106 #endif 2107 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)], 2108 ksi, &td->td_sigmask); 2109 postsig_done(sig, td, ps); 2110 mtx_unlock(&ps->ps_mtx); 2111 } else { 2112 /* 2113 * Avoid a possible infinite loop if the thread 2114 * masking the signal or process is ignoring the 2115 * signal. 2116 */ 2117 if (kern_forcesigexit && (SIGISMEMBER(sigmask, sig) || 2118 ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN)) { 2119 SIGDELSET(td->td_sigmask, sig); 2120 SIGDELSET(ps->ps_sigcatch, sig); 2121 SIGDELSET(ps->ps_sigignore, sig); 2122 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 2123 td->td_pflags &= ~TDP_SIGFASTBLOCK; 2124 td->td_sigblock_val = 0; 2125 } 2126 mtx_unlock(&ps->ps_mtx); 2127 p->p_sig = sig; /* XXX to verify code */ 2128 tdsendsignal(p, td, sig, ksi); 2129 } 2130 PROC_UNLOCK(p); 2131 } 2132 2133 static struct thread * 2134 sigtd(struct proc *p, int sig, bool fast_sigblock) 2135 { 2136 struct thread *td, *signal_td; 2137 2138 PROC_LOCK_ASSERT(p, MA_OWNED); 2139 MPASS(!fast_sigblock || p == curproc); 2140 2141 /* 2142 * Check if current thread can handle the signal without 2143 * switching context to another thread. 2144 */ 2145 if (curproc == p && !SIGISMEMBER(curthread->td_sigmask, sig) && 2146 (!fast_sigblock || curthread->td_sigblock_val == 0)) 2147 return (curthread); 2148 signal_td = NULL; 2149 FOREACH_THREAD_IN_PROC(p, td) { 2150 if (!SIGISMEMBER(td->td_sigmask, sig) && (!fast_sigblock || 2151 td != curthread || td->td_sigblock_val == 0)) { 2152 signal_td = td; 2153 break; 2154 } 2155 } 2156 if (signal_td == NULL) 2157 signal_td = FIRST_THREAD_IN_PROC(p); 2158 return (signal_td); 2159 } 2160 2161 /* 2162 * Send the signal to the process. If the signal has an action, the action 2163 * is usually performed by the target process rather than the caller; we add 2164 * the signal to the set of pending signals for the process. 2165 * 2166 * Exceptions: 2167 * o When a stop signal is sent to a sleeping process that takes the 2168 * default action, the process is stopped without awakening it. 2169 * o SIGCONT restarts stopped processes (or puts them back to sleep) 2170 * regardless of the signal action (eg, blocked or ignored). 2171 * 2172 * Other ignored signals are discarded immediately. 2173 * 2174 * NB: This function may be entered from the debugger via the "kill" DDB 2175 * command. There is little that can be done to mitigate the possibly messy 2176 * side effects of this unwise possibility. 2177 */ 2178 void 2179 kern_psignal(struct proc *p, int sig) 2180 { 2181 ksiginfo_t ksi; 2182 2183 ksiginfo_init(&ksi); 2184 ksi.ksi_signo = sig; 2185 ksi.ksi_code = SI_KERNEL; 2186 (void) tdsendsignal(p, NULL, sig, &ksi); 2187 } 2188 2189 int 2190 pksignal(struct proc *p, int sig, ksiginfo_t *ksi) 2191 { 2192 2193 return (tdsendsignal(p, NULL, sig, ksi)); 2194 } 2195 2196 /* Utility function for finding a thread to send signal event to. */ 2197 int 2198 sigev_findtd(struct proc *p, struct sigevent *sigev, struct thread **ttd) 2199 { 2200 struct thread *td; 2201 2202 if (sigev->sigev_notify == SIGEV_THREAD_ID) { 2203 td = tdfind(sigev->sigev_notify_thread_id, p->p_pid); 2204 if (td == NULL) 2205 return (ESRCH); 2206 *ttd = td; 2207 } else { 2208 *ttd = NULL; 2209 PROC_LOCK(p); 2210 } 2211 return (0); 2212 } 2213 2214 void 2215 tdsignal(struct thread *td, int sig) 2216 { 2217 ksiginfo_t ksi; 2218 2219 ksiginfo_init(&ksi); 2220 ksi.ksi_signo = sig; 2221 ksi.ksi_code = SI_KERNEL; 2222 (void) tdsendsignal(td->td_proc, td, sig, &ksi); 2223 } 2224 2225 void 2226 tdksignal(struct thread *td, int sig, ksiginfo_t *ksi) 2227 { 2228 2229 (void) tdsendsignal(td->td_proc, td, sig, ksi); 2230 } 2231 2232 static int 2233 sig_sleepq_abort(struct thread *td, int intrval) 2234 { 2235 THREAD_LOCK_ASSERT(td, MA_OWNED); 2236 2237 if (intrval == 0 && (td->td_flags & TDF_SIGWAIT) == 0) { 2238 thread_unlock(td); 2239 return (0); 2240 } 2241 return (sleepq_abort(td, intrval)); 2242 } 2243 2244 int 2245 tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi) 2246 { 2247 sig_t action; 2248 sigqueue_t *sigqueue; 2249 int prop; 2250 struct sigacts *ps; 2251 int intrval; 2252 int ret = 0; 2253 int wakeup_swapper; 2254 2255 MPASS(td == NULL || p == td->td_proc); 2256 PROC_LOCK_ASSERT(p, MA_OWNED); 2257 2258 if (!_SIG_VALID(sig)) 2259 panic("%s(): invalid signal %d", __func__, sig); 2260 2261 KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("%s: ksi on queue", __func__)); 2262 2263 /* 2264 * IEEE Std 1003.1-2001: return success when killing a zombie. 2265 */ 2266 if (p->p_state == PRS_ZOMBIE) { 2267 if (ksi != NULL && (ksi->ksi_flags & KSI_INS) != 0) 2268 ksiginfo_tryfree(ksi); 2269 return (ret); 2270 } 2271 2272 ps = p->p_sigacts; 2273 KNOTE_LOCKED(p->p_klist, NOTE_SIGNAL | sig); 2274 prop = sigprop(sig); 2275 2276 if (td == NULL) { 2277 td = sigtd(p, sig, false); 2278 sigqueue = &p->p_sigqueue; 2279 } else 2280 sigqueue = &td->td_sigqueue; 2281 2282 SDT_PROBE3(proc, , , signal__send, td, p, sig); 2283 2284 /* 2285 * If the signal is being ignored, then we forget about it 2286 * immediately, except when the target process executes 2287 * sigwait(). (Note: we don't set SIGCONT in ps_sigignore, 2288 * and if it is set to SIG_IGN, action will be SIG_DFL here.) 2289 */ 2290 mtx_lock(&ps->ps_mtx); 2291 if (SIGISMEMBER(ps->ps_sigignore, sig)) { 2292 if (kern_sig_discard_ign && 2293 (p->p_sysent->sv_flags & SV_SIG_DISCIGN) == 0) { 2294 SDT_PROBE3(proc, , , signal__discard, td, p, sig); 2295 2296 mtx_unlock(&ps->ps_mtx); 2297 if (ksi != NULL && (ksi->ksi_flags & KSI_INS) != 0) 2298 ksiginfo_tryfree(ksi); 2299 return (ret); 2300 } else { 2301 action = SIG_CATCH; 2302 intrval = 0; 2303 } 2304 } else { 2305 if (SIGISMEMBER(td->td_sigmask, sig)) 2306 action = SIG_HOLD; 2307 else if (SIGISMEMBER(ps->ps_sigcatch, sig)) 2308 action = SIG_CATCH; 2309 else 2310 action = SIG_DFL; 2311 if (SIGISMEMBER(ps->ps_sigintr, sig)) 2312 intrval = EINTR; 2313 else 2314 intrval = ERESTART; 2315 } 2316 mtx_unlock(&ps->ps_mtx); 2317 2318 if (prop & SIGPROP_CONT) 2319 sigqueue_delete_stopmask_proc(p); 2320 else if (prop & SIGPROP_STOP) { 2321 /* 2322 * If sending a tty stop signal to a member of an orphaned 2323 * process group, discard the signal here if the action 2324 * is default; don't stop the process below if sleeping, 2325 * and don't clear any pending SIGCONT. 2326 */ 2327 if ((prop & SIGPROP_TTYSTOP) != 0 && 2328 (p->p_pgrp->pg_flags & PGRP_ORPHANED) != 0 && 2329 action == SIG_DFL) { 2330 if (ksi != NULL && (ksi->ksi_flags & KSI_INS) != 0) 2331 ksiginfo_tryfree(ksi); 2332 return (ret); 2333 } 2334 sigqueue_delete_proc(p, SIGCONT); 2335 if (p->p_flag & P_CONTINUED) { 2336 p->p_flag &= ~P_CONTINUED; 2337 PROC_LOCK(p->p_pptr); 2338 sigqueue_take(p->p_ksi); 2339 PROC_UNLOCK(p->p_pptr); 2340 } 2341 } 2342 2343 ret = sigqueue_add(sigqueue, sig, ksi); 2344 if (ret != 0) 2345 return (ret); 2346 signotify(td); 2347 /* 2348 * Defer further processing for signals which are held, 2349 * except that stopped processes must be continued by SIGCONT. 2350 */ 2351 if (action == SIG_HOLD && 2352 !((prop & SIGPROP_CONT) && (p->p_flag & P_STOPPED_SIG))) 2353 return (ret); 2354 2355 wakeup_swapper = 0; 2356 2357 /* 2358 * Some signals have a process-wide effect and a per-thread 2359 * component. Most processing occurs when the process next 2360 * tries to cross the user boundary, however there are some 2361 * times when processing needs to be done immediately, such as 2362 * waking up threads so that they can cross the user boundary. 2363 * We try to do the per-process part here. 2364 */ 2365 if (P_SHOULDSTOP(p)) { 2366 KASSERT(!(p->p_flag & P_WEXIT), 2367 ("signal to stopped but exiting process")); 2368 if (sig == SIGKILL) { 2369 /* 2370 * If traced process is already stopped, 2371 * then no further action is necessary. 2372 */ 2373 if (p->p_flag & P_TRACED) 2374 goto out; 2375 /* 2376 * SIGKILL sets process running. 2377 * It will die elsewhere. 2378 * All threads must be restarted. 2379 */ 2380 p->p_flag &= ~P_STOPPED_SIG; 2381 goto runfast; 2382 } 2383 2384 if (prop & SIGPROP_CONT) { 2385 /* 2386 * If traced process is already stopped, 2387 * then no further action is necessary. 2388 */ 2389 if (p->p_flag & P_TRACED) 2390 goto out; 2391 /* 2392 * If SIGCONT is default (or ignored), we continue the 2393 * process but don't leave the signal in sigqueue as 2394 * it has no further action. If SIGCONT is held, we 2395 * continue the process and leave the signal in 2396 * sigqueue. If the process catches SIGCONT, let it 2397 * handle the signal itself. If it isn't waiting on 2398 * an event, it goes back to run state. 2399 * Otherwise, process goes back to sleep state. 2400 */ 2401 p->p_flag &= ~P_STOPPED_SIG; 2402 PROC_SLOCK(p); 2403 if (p->p_numthreads == p->p_suspcount) { 2404 PROC_SUNLOCK(p); 2405 p->p_flag |= P_CONTINUED; 2406 p->p_xsig = SIGCONT; 2407 PROC_LOCK(p->p_pptr); 2408 childproc_continued(p); 2409 PROC_UNLOCK(p->p_pptr); 2410 PROC_SLOCK(p); 2411 } 2412 if (action == SIG_DFL) { 2413 thread_unsuspend(p); 2414 PROC_SUNLOCK(p); 2415 sigqueue_delete(sigqueue, sig); 2416 goto out_cont; 2417 } 2418 if (action == SIG_CATCH) { 2419 /* 2420 * The process wants to catch it so it needs 2421 * to run at least one thread, but which one? 2422 */ 2423 PROC_SUNLOCK(p); 2424 goto runfast; 2425 } 2426 /* 2427 * The signal is not ignored or caught. 2428 */ 2429 thread_unsuspend(p); 2430 PROC_SUNLOCK(p); 2431 goto out_cont; 2432 } 2433 2434 if (prop & SIGPROP_STOP) { 2435 /* 2436 * If traced process is already stopped, 2437 * then no further action is necessary. 2438 */ 2439 if (p->p_flag & P_TRACED) 2440 goto out; 2441 /* 2442 * Already stopped, don't need to stop again 2443 * (If we did the shell could get confused). 2444 * Just make sure the signal STOP bit set. 2445 */ 2446 p->p_flag |= P_STOPPED_SIG; 2447 sigqueue_delete(sigqueue, sig); 2448 goto out; 2449 } 2450 2451 /* 2452 * All other kinds of signals: 2453 * If a thread is sleeping interruptibly, simulate a 2454 * wakeup so that when it is continued it will be made 2455 * runnable and can look at the signal. However, don't make 2456 * the PROCESS runnable, leave it stopped. 2457 * It may run a bit until it hits a thread_suspend_check(). 2458 */ 2459 PROC_SLOCK(p); 2460 thread_lock(td); 2461 if (TD_CAN_ABORT(td)) 2462 wakeup_swapper = sig_sleepq_abort(td, intrval); 2463 else 2464 thread_unlock(td); 2465 PROC_SUNLOCK(p); 2466 goto out; 2467 /* 2468 * Mutexes are short lived. Threads waiting on them will 2469 * hit thread_suspend_check() soon. 2470 */ 2471 } else if (p->p_state == PRS_NORMAL) { 2472 if (p->p_flag & P_TRACED || action == SIG_CATCH) { 2473 tdsigwakeup(td, sig, action, intrval); 2474 goto out; 2475 } 2476 2477 MPASS(action == SIG_DFL); 2478 2479 if (prop & SIGPROP_STOP) { 2480 if (p->p_flag & (P_PPWAIT|P_WEXIT)) 2481 goto out; 2482 p->p_flag |= P_STOPPED_SIG; 2483 p->p_xsig = sig; 2484 PROC_SLOCK(p); 2485 wakeup_swapper = sig_suspend_threads(td, p); 2486 if (p->p_numthreads == p->p_suspcount) { 2487 /* 2488 * only thread sending signal to another 2489 * process can reach here, if thread is sending 2490 * signal to its process, because thread does 2491 * not suspend itself here, p_numthreads 2492 * should never be equal to p_suspcount. 2493 */ 2494 thread_stopped(p); 2495 PROC_SUNLOCK(p); 2496 sigqueue_delete_proc(p, p->p_xsig); 2497 } else 2498 PROC_SUNLOCK(p); 2499 goto out; 2500 } 2501 } else { 2502 /* Not in "NORMAL" state. discard the signal. */ 2503 sigqueue_delete(sigqueue, sig); 2504 goto out; 2505 } 2506 2507 /* 2508 * The process is not stopped so we need to apply the signal to all the 2509 * running threads. 2510 */ 2511 runfast: 2512 tdsigwakeup(td, sig, action, intrval); 2513 PROC_SLOCK(p); 2514 thread_unsuspend(p); 2515 PROC_SUNLOCK(p); 2516 out_cont: 2517 itimer_proc_continue(p); 2518 kqtimer_proc_continue(p); 2519 out: 2520 /* If we jump here, proc slock should not be owned. */ 2521 PROC_SLOCK_ASSERT(p, MA_NOTOWNED); 2522 if (wakeup_swapper) 2523 kick_proc0(); 2524 2525 return (ret); 2526 } 2527 2528 /* 2529 * The force of a signal has been directed against a single 2530 * thread. We need to see what we can do about knocking it 2531 * out of any sleep it may be in etc. 2532 */ 2533 static void 2534 tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval) 2535 { 2536 struct proc *p = td->td_proc; 2537 int prop, wakeup_swapper; 2538 2539 PROC_LOCK_ASSERT(p, MA_OWNED); 2540 prop = sigprop(sig); 2541 2542 PROC_SLOCK(p); 2543 thread_lock(td); 2544 /* 2545 * Bring the priority of a thread up if we want it to get 2546 * killed in this lifetime. Be careful to avoid bumping the 2547 * priority of the idle thread, since we still allow to signal 2548 * kernel processes. 2549 */ 2550 if (action == SIG_DFL && (prop & SIGPROP_KILL) != 0 && 2551 td->td_priority > PUSER && !TD_IS_IDLETHREAD(td)) 2552 sched_prio(td, PUSER); 2553 if (TD_ON_SLEEPQ(td)) { 2554 /* 2555 * If thread is sleeping uninterruptibly 2556 * we can't interrupt the sleep... the signal will 2557 * be noticed when the process returns through 2558 * trap() or syscall(). 2559 */ 2560 if ((td->td_flags & TDF_SINTR) == 0) 2561 goto out; 2562 /* 2563 * If SIGCONT is default (or ignored) and process is 2564 * asleep, we are finished; the process should not 2565 * be awakened. 2566 */ 2567 if ((prop & SIGPROP_CONT) && action == SIG_DFL) { 2568 thread_unlock(td); 2569 PROC_SUNLOCK(p); 2570 sigqueue_delete(&p->p_sigqueue, sig); 2571 /* 2572 * It may be on either list in this state. 2573 * Remove from both for now. 2574 */ 2575 sigqueue_delete(&td->td_sigqueue, sig); 2576 return; 2577 } 2578 2579 /* 2580 * Don't awaken a sleeping thread for SIGSTOP if the 2581 * STOP signal is deferred. 2582 */ 2583 if ((prop & SIGPROP_STOP) != 0 && (td->td_flags & (TDF_SBDRY | 2584 TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY) 2585 goto out; 2586 2587 /* 2588 * Give low priority threads a better chance to run. 2589 */ 2590 if (td->td_priority > PUSER && !TD_IS_IDLETHREAD(td)) 2591 sched_prio(td, PUSER); 2592 2593 wakeup_swapper = sig_sleepq_abort(td, intrval); 2594 PROC_SUNLOCK(p); 2595 if (wakeup_swapper) 2596 kick_proc0(); 2597 return; 2598 } 2599 2600 /* 2601 * Other states do nothing with the signal immediately, 2602 * other than kicking ourselves if we are running. 2603 * It will either never be noticed, or noticed very soon. 2604 */ 2605 #ifdef SMP 2606 if (TD_IS_RUNNING(td) && td != curthread) 2607 forward_signal(td); 2608 #endif 2609 2610 out: 2611 PROC_SUNLOCK(p); 2612 thread_unlock(td); 2613 } 2614 2615 static void 2616 ptrace_coredump(struct thread *td) 2617 { 2618 struct proc *p; 2619 struct thr_coredump_req *tcq; 2620 void *rl_cookie; 2621 2622 MPASS(td == curthread); 2623 p = td->td_proc; 2624 PROC_LOCK_ASSERT(p, MA_OWNED); 2625 if ((td->td_dbgflags & TDB_COREDUMPRQ) == 0) 2626 return; 2627 KASSERT((p->p_flag & P_STOPPED_TRACE) != 0, ("not stopped")); 2628 2629 tcq = td->td_coredump; 2630 KASSERT(tcq != NULL, ("td_coredump is NULL")); 2631 2632 if (p->p_sysent->sv_coredump == NULL) { 2633 tcq->tc_error = ENOSYS; 2634 goto wake; 2635 } 2636 2637 PROC_UNLOCK(p); 2638 rl_cookie = vn_rangelock_wlock(tcq->tc_vp, 0, OFF_MAX); 2639 2640 tcq->tc_error = p->p_sysent->sv_coredump(td, tcq->tc_vp, 2641 tcq->tc_limit, tcq->tc_flags); 2642 2643 vn_rangelock_unlock(tcq->tc_vp, rl_cookie); 2644 PROC_LOCK(p); 2645 wake: 2646 td->td_dbgflags &= ~TDB_COREDUMPRQ; 2647 td->td_coredump = NULL; 2648 wakeup(p); 2649 } 2650 2651 static int 2652 sig_suspend_threads(struct thread *td, struct proc *p) 2653 { 2654 struct thread *td2; 2655 int wakeup_swapper; 2656 2657 PROC_LOCK_ASSERT(p, MA_OWNED); 2658 PROC_SLOCK_ASSERT(p, MA_OWNED); 2659 2660 wakeup_swapper = 0; 2661 FOREACH_THREAD_IN_PROC(p, td2) { 2662 thread_lock(td2); 2663 ast_sched_locked(td2, TDA_SUSPEND); 2664 if ((TD_IS_SLEEPING(td2) || TD_IS_SWAPPED(td2)) && 2665 (td2->td_flags & TDF_SINTR)) { 2666 if (td2->td_flags & TDF_SBDRY) { 2667 /* 2668 * Once a thread is asleep with 2669 * TDF_SBDRY and without TDF_SERESTART 2670 * or TDF_SEINTR set, it should never 2671 * become suspended due to this check. 2672 */ 2673 KASSERT(!TD_IS_SUSPENDED(td2), 2674 ("thread with deferred stops suspended")); 2675 if (TD_SBDRY_INTR(td2)) { 2676 wakeup_swapper |= sleepq_abort(td2, 2677 TD_SBDRY_ERRNO(td2)); 2678 continue; 2679 } 2680 } else if (!TD_IS_SUSPENDED(td2)) 2681 thread_suspend_one(td2); 2682 } else if (!TD_IS_SUSPENDED(td2)) { 2683 #ifdef SMP 2684 if (TD_IS_RUNNING(td2) && td2 != td) 2685 forward_signal(td2); 2686 #endif 2687 } 2688 thread_unlock(td2); 2689 } 2690 return (wakeup_swapper); 2691 } 2692 2693 /* 2694 * Stop the process for an event deemed interesting to the debugger. If si is 2695 * non-NULL, this is a signal exchange; the new signal requested by the 2696 * debugger will be returned for handling. If si is NULL, this is some other 2697 * type of interesting event. The debugger may request a signal be delivered in 2698 * that case as well, however it will be deferred until it can be handled. 2699 */ 2700 int 2701 ptracestop(struct thread *td, int sig, ksiginfo_t *si) 2702 { 2703 struct proc *p = td->td_proc; 2704 struct thread *td2; 2705 ksiginfo_t ksi; 2706 2707 PROC_LOCK_ASSERT(p, MA_OWNED); 2708 KASSERT(!(p->p_flag & P_WEXIT), ("Stopping exiting process")); 2709 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, 2710 &p->p_mtx.lock_object, "Stopping for traced signal"); 2711 2712 td->td_xsig = sig; 2713 2714 if (si == NULL || (si->ksi_flags & KSI_PTRACE) == 0) { 2715 td->td_dbgflags |= TDB_XSIG; 2716 CTR4(KTR_PTRACE, "ptracestop: tid %d (pid %d) flags %#x sig %d", 2717 td->td_tid, p->p_pid, td->td_dbgflags, sig); 2718 PROC_SLOCK(p); 2719 while ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_XSIG)) { 2720 if (P_KILLED(p)) { 2721 /* 2722 * Ensure that, if we've been PT_KILLed, the 2723 * exit status reflects that. Another thread 2724 * may also be in ptracestop(), having just 2725 * received the SIGKILL, but this thread was 2726 * unsuspended first. 2727 */ 2728 td->td_dbgflags &= ~TDB_XSIG; 2729 td->td_xsig = SIGKILL; 2730 p->p_ptevents = 0; 2731 break; 2732 } 2733 if (p->p_flag & P_SINGLE_EXIT && 2734 !(td->td_dbgflags & TDB_EXIT)) { 2735 /* 2736 * Ignore ptrace stops except for thread exit 2737 * events when the process exits. 2738 */ 2739 td->td_dbgflags &= ~TDB_XSIG; 2740 PROC_SUNLOCK(p); 2741 return (0); 2742 } 2743 2744 /* 2745 * Make wait(2) work. Ensure that right after the 2746 * attach, the thread which was decided to become the 2747 * leader of attach gets reported to the waiter. 2748 * Otherwise, just avoid overwriting another thread's 2749 * assignment to p_xthread. If another thread has 2750 * already set p_xthread, the current thread will get 2751 * a chance to report itself upon the next iteration. 2752 */ 2753 if ((td->td_dbgflags & TDB_FSTP) != 0 || 2754 ((p->p_flag2 & P2_PTRACE_FSTP) == 0 && 2755 p->p_xthread == NULL)) { 2756 p->p_xsig = sig; 2757 p->p_xthread = td; 2758 2759 /* 2760 * If we are on sleepqueue already, 2761 * let sleepqueue code decide if it 2762 * needs to go sleep after attach. 2763 */ 2764 if (td->td_wchan == NULL) 2765 td->td_dbgflags &= ~TDB_FSTP; 2766 2767 p->p_flag2 &= ~P2_PTRACE_FSTP; 2768 p->p_flag |= P_STOPPED_SIG | P_STOPPED_TRACE; 2769 sig_suspend_threads(td, p); 2770 } 2771 if ((td->td_dbgflags & TDB_STOPATFORK) != 0) { 2772 td->td_dbgflags &= ~TDB_STOPATFORK; 2773 } 2774 stopme: 2775 td->td_dbgflags |= TDB_SSWITCH; 2776 thread_suspend_switch(td, p); 2777 td->td_dbgflags &= ~TDB_SSWITCH; 2778 if ((td->td_dbgflags & TDB_COREDUMPRQ) != 0) { 2779 PROC_SUNLOCK(p); 2780 ptrace_coredump(td); 2781 PROC_SLOCK(p); 2782 goto stopme; 2783 } 2784 if (p->p_xthread == td) 2785 p->p_xthread = NULL; 2786 if (!(p->p_flag & P_TRACED)) 2787 break; 2788 if (td->td_dbgflags & TDB_SUSPEND) { 2789 if (p->p_flag & P_SINGLE_EXIT) 2790 break; 2791 goto stopme; 2792 } 2793 } 2794 PROC_SUNLOCK(p); 2795 } 2796 2797 if (si != NULL && sig == td->td_xsig) { 2798 /* Parent wants us to take the original signal unchanged. */ 2799 si->ksi_flags |= KSI_HEAD; 2800 if (sigqueue_add(&td->td_sigqueue, sig, si) != 0) 2801 si->ksi_signo = 0; 2802 } else if (td->td_xsig != 0) { 2803 /* 2804 * If parent wants us to take a new signal, then it will leave 2805 * it in td->td_xsig; otherwise we just look for signals again. 2806 */ 2807 ksiginfo_init(&ksi); 2808 ksi.ksi_signo = td->td_xsig; 2809 ksi.ksi_flags |= KSI_PTRACE; 2810 td2 = sigtd(p, td->td_xsig, false); 2811 tdsendsignal(p, td2, td->td_xsig, &ksi); 2812 if (td != td2) 2813 return (0); 2814 } 2815 2816 return (td->td_xsig); 2817 } 2818 2819 static void 2820 reschedule_signals(struct proc *p, sigset_t block, int flags) 2821 { 2822 struct sigacts *ps; 2823 struct thread *td; 2824 int sig; 2825 bool fastblk, pslocked; 2826 2827 PROC_LOCK_ASSERT(p, MA_OWNED); 2828 ps = p->p_sigacts; 2829 pslocked = (flags & SIGPROCMASK_PS_LOCKED) != 0; 2830 mtx_assert(&ps->ps_mtx, pslocked ? MA_OWNED : MA_NOTOWNED); 2831 if (SIGISEMPTY(p->p_siglist)) 2832 return; 2833 SIGSETAND(block, p->p_siglist); 2834 fastblk = (flags & SIGPROCMASK_FASTBLK) != 0; 2835 SIG_FOREACH(sig, &block) { 2836 td = sigtd(p, sig, fastblk); 2837 2838 /* 2839 * If sigtd() selected us despite sigfastblock is 2840 * blocking, do not activate AST or wake us, to avoid 2841 * loop in AST handler. 2842 */ 2843 if (fastblk && td == curthread) 2844 continue; 2845 2846 signotify(td); 2847 if (!pslocked) 2848 mtx_lock(&ps->ps_mtx); 2849 if (p->p_flag & P_TRACED || 2850 (SIGISMEMBER(ps->ps_sigcatch, sig) && 2851 !SIGISMEMBER(td->td_sigmask, sig))) { 2852 tdsigwakeup(td, sig, SIG_CATCH, 2853 (SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR : 2854 ERESTART)); 2855 } 2856 if (!pslocked) 2857 mtx_unlock(&ps->ps_mtx); 2858 } 2859 } 2860 2861 void 2862 tdsigcleanup(struct thread *td) 2863 { 2864 struct proc *p; 2865 sigset_t unblocked; 2866 2867 p = td->td_proc; 2868 PROC_LOCK_ASSERT(p, MA_OWNED); 2869 2870 sigqueue_flush(&td->td_sigqueue); 2871 if (p->p_numthreads == 1) 2872 return; 2873 2874 /* 2875 * Since we cannot handle signals, notify signal post code 2876 * about this by filling the sigmask. 2877 * 2878 * Also, if needed, wake up thread(s) that do not block the 2879 * same signals as the exiting thread, since the thread might 2880 * have been selected for delivery and woken up. 2881 */ 2882 SIGFILLSET(unblocked); 2883 SIGSETNAND(unblocked, td->td_sigmask); 2884 SIGFILLSET(td->td_sigmask); 2885 reschedule_signals(p, unblocked, 0); 2886 2887 } 2888 2889 static int 2890 sigdeferstop_curr_flags(int cflags) 2891 { 2892 2893 MPASS((cflags & (TDF_SEINTR | TDF_SERESTART)) == 0 || 2894 (cflags & TDF_SBDRY) != 0); 2895 return (cflags & (TDF_SBDRY | TDF_SEINTR | TDF_SERESTART)); 2896 } 2897 2898 /* 2899 * Defer the delivery of SIGSTOP for the current thread, according to 2900 * the requested mode. Returns previous flags, which must be restored 2901 * by sigallowstop(). 2902 * 2903 * TDF_SBDRY, TDF_SEINTR, and TDF_SERESTART flags are only set and 2904 * cleared by the current thread, which allow the lock-less read-only 2905 * accesses below. 2906 */ 2907 int 2908 sigdeferstop_impl(int mode) 2909 { 2910 struct thread *td; 2911 int cflags, nflags; 2912 2913 td = curthread; 2914 cflags = sigdeferstop_curr_flags(td->td_flags); 2915 switch (mode) { 2916 case SIGDEFERSTOP_NOP: 2917 nflags = cflags; 2918 break; 2919 case SIGDEFERSTOP_OFF: 2920 nflags = 0; 2921 break; 2922 case SIGDEFERSTOP_SILENT: 2923 nflags = (cflags | TDF_SBDRY) & ~(TDF_SEINTR | TDF_SERESTART); 2924 break; 2925 case SIGDEFERSTOP_EINTR: 2926 nflags = (cflags | TDF_SBDRY | TDF_SEINTR) & ~TDF_SERESTART; 2927 break; 2928 case SIGDEFERSTOP_ERESTART: 2929 nflags = (cflags | TDF_SBDRY | TDF_SERESTART) & ~TDF_SEINTR; 2930 break; 2931 default: 2932 panic("sigdeferstop: invalid mode %x", mode); 2933 break; 2934 } 2935 if (cflags == nflags) 2936 return (SIGDEFERSTOP_VAL_NCHG); 2937 thread_lock(td); 2938 td->td_flags = (td->td_flags & ~cflags) | nflags; 2939 thread_unlock(td); 2940 return (cflags); 2941 } 2942 2943 /* 2944 * Restores the STOP handling mode, typically permitting the delivery 2945 * of SIGSTOP for the current thread. This does not immediately 2946 * suspend if a stop was posted. Instead, the thread will suspend 2947 * either via ast() or a subsequent interruptible sleep. 2948 */ 2949 void 2950 sigallowstop_impl(int prev) 2951 { 2952 struct thread *td; 2953 int cflags; 2954 2955 KASSERT(prev != SIGDEFERSTOP_VAL_NCHG, ("failed sigallowstop")); 2956 KASSERT((prev & ~(TDF_SBDRY | TDF_SEINTR | TDF_SERESTART)) == 0, 2957 ("sigallowstop: incorrect previous mode %x", prev)); 2958 td = curthread; 2959 cflags = sigdeferstop_curr_flags(td->td_flags); 2960 if (cflags != prev) { 2961 thread_lock(td); 2962 td->td_flags = (td->td_flags & ~cflags) | prev; 2963 thread_unlock(td); 2964 } 2965 } 2966 2967 enum sigstatus { 2968 SIGSTATUS_HANDLE, 2969 SIGSTATUS_HANDLED, 2970 SIGSTATUS_IGNORE, 2971 SIGSTATUS_SBDRY_STOP, 2972 }; 2973 2974 /* 2975 * The thread has signal "sig" pending. Figure out what to do with it: 2976 * 2977 * _HANDLE -> the caller should handle the signal 2978 * _HANDLED -> handled internally, reload pending signal set 2979 * _IGNORE -> ignored, remove from the set of pending signals and try the 2980 * next pending signal 2981 * _SBDRY_STOP -> the signal should stop the thread but this is not 2982 * permitted in the current context 2983 */ 2984 static enum sigstatus 2985 sigprocess(struct thread *td, int sig) 2986 { 2987 struct proc *p; 2988 struct sigacts *ps; 2989 struct sigqueue *queue; 2990 ksiginfo_t ksi; 2991 int prop; 2992 2993 KASSERT(_SIG_VALID(sig), ("%s: invalid signal %d", __func__, sig)); 2994 2995 p = td->td_proc; 2996 ps = p->p_sigacts; 2997 mtx_assert(&ps->ps_mtx, MA_OWNED); 2998 PROC_LOCK_ASSERT(p, MA_OWNED); 2999 3000 /* 3001 * We should allow pending but ignored signals below 3002 * if there is sigwait() active, or P_TRACED was 3003 * on when they were posted. 3004 */ 3005 if (SIGISMEMBER(ps->ps_sigignore, sig) && 3006 (p->p_flag & P_TRACED) == 0 && 3007 (td->td_flags & TDF_SIGWAIT) == 0) { 3008 return (SIGSTATUS_IGNORE); 3009 } 3010 3011 /* 3012 * If the process is going to single-thread mode to prepare 3013 * for exit, there is no sense in delivering any signal 3014 * to usermode. Another important consequence is that 3015 * msleep(..., PCATCH, ...) now is only interruptible by a 3016 * suspend request. 3017 */ 3018 if ((p->p_flag2 & P2_WEXIT) != 0) 3019 return (SIGSTATUS_IGNORE); 3020 3021 if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED) { 3022 /* 3023 * If traced, always stop. 3024 * Remove old signal from queue before the stop. 3025 * XXX shrug off debugger, it causes siginfo to 3026 * be thrown away. 3027 */ 3028 queue = &td->td_sigqueue; 3029 ksiginfo_init(&ksi); 3030 if (sigqueue_get(queue, sig, &ksi) == 0) { 3031 queue = &p->p_sigqueue; 3032 sigqueue_get(queue, sig, &ksi); 3033 } 3034 td->td_si = ksi.ksi_info; 3035 3036 mtx_unlock(&ps->ps_mtx); 3037 sig = ptracestop(td, sig, &ksi); 3038 mtx_lock(&ps->ps_mtx); 3039 3040 td->td_si.si_signo = 0; 3041 3042 /* 3043 * Keep looking if the debugger discarded or 3044 * replaced the signal. 3045 */ 3046 if (sig == 0) 3047 return (SIGSTATUS_HANDLED); 3048 3049 /* 3050 * If the signal became masked, re-queue it. 3051 */ 3052 if (SIGISMEMBER(td->td_sigmask, sig)) { 3053 ksi.ksi_flags |= KSI_HEAD; 3054 sigqueue_add(&p->p_sigqueue, sig, &ksi); 3055 return (SIGSTATUS_HANDLED); 3056 } 3057 3058 /* 3059 * If the traced bit got turned off, requeue the signal and 3060 * reload the set of pending signals. This ensures that p_sig* 3061 * and p_sigact are consistent. 3062 */ 3063 if ((p->p_flag & P_TRACED) == 0) { 3064 if ((ksi.ksi_flags & KSI_PTRACE) == 0) { 3065 ksi.ksi_flags |= KSI_HEAD; 3066 sigqueue_add(queue, sig, &ksi); 3067 } 3068 return (SIGSTATUS_HANDLED); 3069 } 3070 } 3071 3072 /* 3073 * Decide whether the signal should be returned. 3074 * Return the signal's number, or fall through 3075 * to clear it from the pending mask. 3076 */ 3077 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) { 3078 case (intptr_t)SIG_DFL: 3079 /* 3080 * Don't take default actions on system processes. 3081 */ 3082 if (p->p_pid <= 1) { 3083 #ifdef DIAGNOSTIC 3084 /* 3085 * Are you sure you want to ignore SIGSEGV 3086 * in init? XXX 3087 */ 3088 printf("Process (pid %lu) got signal %d\n", 3089 (u_long)p->p_pid, sig); 3090 #endif 3091 return (SIGSTATUS_IGNORE); 3092 } 3093 3094 /* 3095 * If there is a pending stop signal to process with 3096 * default action, stop here, then clear the signal. 3097 * Traced or exiting processes should ignore stops. 3098 * Additionally, a member of an orphaned process group 3099 * should ignore tty stops. 3100 */ 3101 prop = sigprop(sig); 3102 if (prop & SIGPROP_STOP) { 3103 mtx_unlock(&ps->ps_mtx); 3104 if ((p->p_flag & (P_TRACED | P_WEXIT | 3105 P_SINGLE_EXIT)) != 0 || ((p->p_pgrp-> 3106 pg_flags & PGRP_ORPHANED) != 0 && 3107 (prop & SIGPROP_TTYSTOP) != 0)) { 3108 mtx_lock(&ps->ps_mtx); 3109 return (SIGSTATUS_IGNORE); 3110 } 3111 if (TD_SBDRY_INTR(td)) { 3112 KASSERT((td->td_flags & TDF_SBDRY) != 0, 3113 ("lost TDF_SBDRY")); 3114 mtx_lock(&ps->ps_mtx); 3115 return (SIGSTATUS_SBDRY_STOP); 3116 } 3117 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, 3118 &p->p_mtx.lock_object, "Catching SIGSTOP"); 3119 sigqueue_delete(&td->td_sigqueue, sig); 3120 sigqueue_delete(&p->p_sigqueue, sig); 3121 p->p_flag |= P_STOPPED_SIG; 3122 p->p_xsig = sig; 3123 PROC_SLOCK(p); 3124 sig_suspend_threads(td, p); 3125 thread_suspend_switch(td, p); 3126 PROC_SUNLOCK(p); 3127 mtx_lock(&ps->ps_mtx); 3128 return (SIGSTATUS_HANDLED); 3129 } else if ((prop & SIGPROP_IGNORE) != 0 && 3130 (td->td_flags & TDF_SIGWAIT) == 0) { 3131 /* 3132 * Default action is to ignore; drop it if 3133 * not in kern_sigtimedwait(). 3134 */ 3135 return (SIGSTATUS_IGNORE); 3136 } else { 3137 return (SIGSTATUS_HANDLE); 3138 } 3139 3140 case (intptr_t)SIG_IGN: 3141 if ((td->td_flags & TDF_SIGWAIT) == 0) 3142 return (SIGSTATUS_IGNORE); 3143 else 3144 return (SIGSTATUS_HANDLE); 3145 3146 default: 3147 /* 3148 * This signal has an action, let postsig() process it. 3149 */ 3150 return (SIGSTATUS_HANDLE); 3151 } 3152 } 3153 3154 /* 3155 * If the current process has received a signal (should be caught or cause 3156 * termination, should interrupt current syscall), return the signal number. 3157 * Stop signals with default action are processed immediately, then cleared; 3158 * they aren't returned. This is checked after each entry to the system for 3159 * a syscall or trap (though this can usually be done without calling 3160 * issignal by checking the pending signal masks in cursig.) The normal call 3161 * sequence is 3162 * 3163 * while (sig = cursig(curthread)) 3164 * postsig(sig); 3165 */ 3166 static int 3167 issignal(struct thread *td) 3168 { 3169 struct proc *p; 3170 sigset_t sigpending; 3171 int sig; 3172 3173 p = td->td_proc; 3174 PROC_LOCK_ASSERT(p, MA_OWNED); 3175 3176 for (;;) { 3177 sigpending = td->td_sigqueue.sq_signals; 3178 SIGSETOR(sigpending, p->p_sigqueue.sq_signals); 3179 SIGSETNAND(sigpending, td->td_sigmask); 3180 3181 if ((p->p_flag & P_PPWAIT) != 0 || (td->td_flags & 3182 (TDF_SBDRY | TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY) 3183 SIG_STOPSIGMASK(sigpending); 3184 if (SIGISEMPTY(sigpending)) /* no signal to send */ 3185 return (0); 3186 3187 /* 3188 * Do fast sigblock if requested by usermode. Since 3189 * we do know that there was a signal pending at this 3190 * point, set the FAST_SIGBLOCK_PEND as indicator for 3191 * usermode to perform a dummy call to 3192 * FAST_SIGBLOCK_UNBLOCK, which causes immediate 3193 * delivery of postponed pending signal. 3194 */ 3195 if ((td->td_pflags & TDP_SIGFASTBLOCK) != 0) { 3196 if (td->td_sigblock_val != 0) 3197 SIGSETNAND(sigpending, fastblock_mask); 3198 if (SIGISEMPTY(sigpending)) { 3199 td->td_pflags |= TDP_SIGFASTPENDING; 3200 return (0); 3201 } 3202 } 3203 3204 if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED && 3205 (p->p_flag2 & P2_PTRACE_FSTP) != 0 && 3206 SIGISMEMBER(sigpending, SIGSTOP)) { 3207 /* 3208 * If debugger just attached, always consume 3209 * SIGSTOP from ptrace(PT_ATTACH) first, to 3210 * execute the debugger attach ritual in 3211 * order. 3212 */ 3213 td->td_dbgflags |= TDB_FSTP; 3214 SIGEMPTYSET(sigpending); 3215 SIGADDSET(sigpending, SIGSTOP); 3216 } 3217 3218 SIG_FOREACH(sig, &sigpending) { 3219 switch (sigprocess(td, sig)) { 3220 case SIGSTATUS_HANDLE: 3221 return (sig); 3222 case SIGSTATUS_HANDLED: 3223 goto next; 3224 case SIGSTATUS_IGNORE: 3225 sigqueue_delete(&td->td_sigqueue, sig); 3226 sigqueue_delete(&p->p_sigqueue, sig); 3227 break; 3228 case SIGSTATUS_SBDRY_STOP: 3229 return (-1); 3230 } 3231 } 3232 next:; 3233 } 3234 } 3235 3236 void 3237 thread_stopped(struct proc *p) 3238 { 3239 int n; 3240 3241 PROC_LOCK_ASSERT(p, MA_OWNED); 3242 PROC_SLOCK_ASSERT(p, MA_OWNED); 3243 n = p->p_suspcount; 3244 if (p == curproc) 3245 n++; 3246 if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) { 3247 PROC_SUNLOCK(p); 3248 p->p_flag &= ~P_WAITED; 3249 PROC_LOCK(p->p_pptr); 3250 childproc_stopped(p, (p->p_flag & P_TRACED) ? 3251 CLD_TRAPPED : CLD_STOPPED); 3252 PROC_UNLOCK(p->p_pptr); 3253 PROC_SLOCK(p); 3254 } 3255 } 3256 3257 /* 3258 * Take the action for the specified signal 3259 * from the current set of pending signals. 3260 */ 3261 int 3262 postsig(int sig) 3263 { 3264 struct thread *td; 3265 struct proc *p; 3266 struct sigacts *ps; 3267 sig_t action; 3268 ksiginfo_t ksi; 3269 sigset_t returnmask; 3270 3271 KASSERT(sig != 0, ("postsig")); 3272 3273 td = curthread; 3274 p = td->td_proc; 3275 PROC_LOCK_ASSERT(p, MA_OWNED); 3276 ps = p->p_sigacts; 3277 mtx_assert(&ps->ps_mtx, MA_OWNED); 3278 ksiginfo_init(&ksi); 3279 if (sigqueue_get(&td->td_sigqueue, sig, &ksi) == 0 && 3280 sigqueue_get(&p->p_sigqueue, sig, &ksi) == 0) 3281 return (0); 3282 ksi.ksi_signo = sig; 3283 if (ksi.ksi_code == SI_TIMER) 3284 itimer_accept(p, ksi.ksi_timerid, &ksi); 3285 action = ps->ps_sigact[_SIG_IDX(sig)]; 3286 #ifdef KTRACE 3287 if (KTRPOINT(td, KTR_PSIG)) 3288 ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ? 3289 &td->td_oldsigmask : &td->td_sigmask, ksi.ksi_code); 3290 #endif 3291 3292 if (action == SIG_DFL) { 3293 /* 3294 * Default action, where the default is to kill 3295 * the process. (Other cases were ignored above.) 3296 */ 3297 mtx_unlock(&ps->ps_mtx); 3298 proc_td_siginfo_capture(td, &ksi.ksi_info); 3299 sigexit(td, sig); 3300 /* NOTREACHED */ 3301 } else { 3302 /* 3303 * If we get here, the signal must be caught. 3304 */ 3305 KASSERT(action != SIG_IGN, ("postsig action %p", action)); 3306 KASSERT(!SIGISMEMBER(td->td_sigmask, sig), 3307 ("postsig action: blocked sig %d", sig)); 3308 3309 /* 3310 * Set the new mask value and also defer further 3311 * occurrences of this signal. 3312 * 3313 * Special case: user has done a sigsuspend. Here the 3314 * current mask is not of interest, but rather the 3315 * mask from before the sigsuspend is what we want 3316 * restored after the signal processing is completed. 3317 */ 3318 if (td->td_pflags & TDP_OLDMASK) { 3319 returnmask = td->td_oldsigmask; 3320 td->td_pflags &= ~TDP_OLDMASK; 3321 } else 3322 returnmask = td->td_sigmask; 3323 3324 if (p->p_sig == sig) { 3325 p->p_sig = 0; 3326 } 3327 (*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask); 3328 postsig_done(sig, td, ps); 3329 } 3330 return (1); 3331 } 3332 3333 int 3334 sig_ast_checksusp(struct thread *td) 3335 { 3336 struct proc *p __diagused; 3337 int ret; 3338 3339 p = td->td_proc; 3340 PROC_LOCK_ASSERT(p, MA_OWNED); 3341 3342 if (!td_ast_pending(td, TDA_SUSPEND)) 3343 return (0); 3344 3345 ret = thread_suspend_check(1); 3346 MPASS(ret == 0 || ret == EINTR || ret == ERESTART); 3347 return (ret); 3348 } 3349 3350 int 3351 sig_ast_needsigchk(struct thread *td) 3352 { 3353 struct proc *p; 3354 struct sigacts *ps; 3355 int ret, sig; 3356 3357 p = td->td_proc; 3358 PROC_LOCK_ASSERT(p, MA_OWNED); 3359 3360 if (!td_ast_pending(td, TDA_SIG)) 3361 return (0); 3362 3363 ps = p->p_sigacts; 3364 mtx_lock(&ps->ps_mtx); 3365 sig = cursig(td); 3366 if (sig == -1) { 3367 mtx_unlock(&ps->ps_mtx); 3368 KASSERT((td->td_flags & TDF_SBDRY) != 0, ("lost TDF_SBDRY")); 3369 KASSERT(TD_SBDRY_INTR(td), 3370 ("lost TDF_SERESTART of TDF_SEINTR")); 3371 KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) != 3372 (TDF_SEINTR | TDF_SERESTART), 3373 ("both TDF_SEINTR and TDF_SERESTART")); 3374 ret = TD_SBDRY_ERRNO(td); 3375 } else if (sig != 0) { 3376 ret = SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR : ERESTART; 3377 mtx_unlock(&ps->ps_mtx); 3378 } else { 3379 mtx_unlock(&ps->ps_mtx); 3380 ret = 0; 3381 } 3382 3383 /* 3384 * Do not go into sleep if this thread was the ptrace(2) 3385 * attach leader. cursig() consumed SIGSTOP from PT_ATTACH, 3386 * but we usually act on the signal by interrupting sleep, and 3387 * should do that here as well. 3388 */ 3389 if ((td->td_dbgflags & TDB_FSTP) != 0) { 3390 if (ret == 0) 3391 ret = EINTR; 3392 td->td_dbgflags &= ~TDB_FSTP; 3393 } 3394 3395 return (ret); 3396 } 3397 3398 int 3399 sig_intr(void) 3400 { 3401 struct thread *td; 3402 struct proc *p; 3403 int ret; 3404 3405 td = curthread; 3406 if (!td_ast_pending(td, TDA_SIG) && !td_ast_pending(td, TDA_SUSPEND)) 3407 return (0); 3408 3409 p = td->td_proc; 3410 3411 PROC_LOCK(p); 3412 ret = sig_ast_checksusp(td); 3413 if (ret == 0) 3414 ret = sig_ast_needsigchk(td); 3415 PROC_UNLOCK(p); 3416 return (ret); 3417 } 3418 3419 bool 3420 curproc_sigkilled(void) 3421 { 3422 struct thread *td; 3423 struct proc *p; 3424 struct sigacts *ps; 3425 bool res; 3426 3427 td = curthread; 3428 if (!td_ast_pending(td, TDA_SIG)) 3429 return (false); 3430 3431 p = td->td_proc; 3432 PROC_LOCK(p); 3433 ps = p->p_sigacts; 3434 mtx_lock(&ps->ps_mtx); 3435 res = SIGISMEMBER(td->td_sigqueue.sq_signals, SIGKILL) || 3436 SIGISMEMBER(p->p_sigqueue.sq_signals, SIGKILL); 3437 mtx_unlock(&ps->ps_mtx); 3438 PROC_UNLOCK(p); 3439 return (res); 3440 } 3441 3442 void 3443 proc_wkilled(struct proc *p) 3444 { 3445 3446 PROC_LOCK_ASSERT(p, MA_OWNED); 3447 if ((p->p_flag & P_WKILLED) == 0) { 3448 p->p_flag |= P_WKILLED; 3449 /* 3450 * Notify swapper that there is a process to swap in. 3451 * The notification is racy, at worst it would take 10 3452 * seconds for the swapper process to notice. 3453 */ 3454 if ((p->p_flag & (P_INMEM | P_SWAPPINGIN)) == 0) 3455 wakeup(&proc0); 3456 } 3457 } 3458 3459 /* 3460 * Kill the current process for stated reason. 3461 */ 3462 void 3463 killproc(struct proc *p, const char *why) 3464 { 3465 3466 PROC_LOCK_ASSERT(p, MA_OWNED); 3467 CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)", p, p->p_pid, 3468 p->p_comm); 3469 log(LOG_ERR, "pid %d (%s), jid %d, uid %d, was killed: %s\n", 3470 p->p_pid, p->p_comm, p->p_ucred->cr_prison->pr_id, 3471 p->p_ucred->cr_uid, why); 3472 proc_wkilled(p); 3473 kern_psignal(p, SIGKILL); 3474 } 3475 3476 /* 3477 * Force the current process to exit with the specified signal, dumping core 3478 * if appropriate. We bypass the normal tests for masked and caught signals, 3479 * allowing unrecoverable failures to terminate the process without changing 3480 * signal state. Mark the accounting record with the signal termination. 3481 * If dumping core, save the signal number for the debugger. Calls exit and 3482 * does not return. 3483 */ 3484 void 3485 sigexit(struct thread *td, int sig) 3486 { 3487 struct proc *p = td->td_proc; 3488 3489 PROC_LOCK_ASSERT(p, MA_OWNED); 3490 proc_set_p2_wexit(p); 3491 3492 p->p_acflag |= AXSIG; 3493 /* 3494 * We must be single-threading to generate a core dump. This 3495 * ensures that the registers in the core file are up-to-date. 3496 * Also, the ELF dump handler assumes that the thread list doesn't 3497 * change out from under it. 3498 * 3499 * XXX If another thread attempts to single-thread before us 3500 * (e.g. via fork()), we won't get a dump at all. 3501 */ 3502 if ((sigprop(sig) & SIGPROP_CORE) && 3503 thread_single(p, SINGLE_NO_EXIT) == 0) { 3504 p->p_sig = sig; 3505 /* 3506 * Log signals which would cause core dumps 3507 * (Log as LOG_INFO to appease those who don't want 3508 * these messages.) 3509 * XXX : Todo, as well as euid, write out ruid too 3510 * Note that coredump() drops proc lock. 3511 */ 3512 if (coredump(td) == 0) 3513 sig |= WCOREFLAG; 3514 if (kern_logsigexit) 3515 log(LOG_INFO, 3516 "pid %d (%s), jid %d, uid %d: exited on " 3517 "signal %d%s\n", p->p_pid, p->p_comm, 3518 p->p_ucred->cr_prison->pr_id, 3519 td->td_ucred->cr_uid, 3520 sig &~ WCOREFLAG, 3521 sig & WCOREFLAG ? " (core dumped)" : ""); 3522 } else 3523 PROC_UNLOCK(p); 3524 exit1(td, 0, sig); 3525 /* NOTREACHED */ 3526 } 3527 3528 /* 3529 * Send queued SIGCHLD to parent when child process's state 3530 * is changed. 3531 */ 3532 static void 3533 sigparent(struct proc *p, int reason, int status) 3534 { 3535 PROC_LOCK_ASSERT(p, MA_OWNED); 3536 PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED); 3537 3538 if (p->p_ksi != NULL) { 3539 p->p_ksi->ksi_signo = SIGCHLD; 3540 p->p_ksi->ksi_code = reason; 3541 p->p_ksi->ksi_status = status; 3542 p->p_ksi->ksi_pid = p->p_pid; 3543 p->p_ksi->ksi_uid = p->p_ucred->cr_ruid; 3544 if (KSI_ONQ(p->p_ksi)) 3545 return; 3546 } 3547 pksignal(p->p_pptr, SIGCHLD, p->p_ksi); 3548 } 3549 3550 static void 3551 childproc_jobstate(struct proc *p, int reason, int sig) 3552 { 3553 struct sigacts *ps; 3554 3555 PROC_LOCK_ASSERT(p, MA_OWNED); 3556 PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED); 3557 3558 /* 3559 * Wake up parent sleeping in kern_wait(), also send 3560 * SIGCHLD to parent, but SIGCHLD does not guarantee 3561 * that parent will awake, because parent may masked 3562 * the signal. 3563 */ 3564 p->p_pptr->p_flag |= P_STATCHILD; 3565 wakeup(p->p_pptr); 3566 3567 ps = p->p_pptr->p_sigacts; 3568 mtx_lock(&ps->ps_mtx); 3569 if ((ps->ps_flag & PS_NOCLDSTOP) == 0) { 3570 mtx_unlock(&ps->ps_mtx); 3571 sigparent(p, reason, sig); 3572 } else 3573 mtx_unlock(&ps->ps_mtx); 3574 } 3575 3576 void 3577 childproc_stopped(struct proc *p, int reason) 3578 { 3579 3580 childproc_jobstate(p, reason, p->p_xsig); 3581 } 3582 3583 void 3584 childproc_continued(struct proc *p) 3585 { 3586 childproc_jobstate(p, CLD_CONTINUED, SIGCONT); 3587 } 3588 3589 void 3590 childproc_exited(struct proc *p) 3591 { 3592 int reason, status; 3593 3594 if (WCOREDUMP(p->p_xsig)) { 3595 reason = CLD_DUMPED; 3596 status = WTERMSIG(p->p_xsig); 3597 } else if (WIFSIGNALED(p->p_xsig)) { 3598 reason = CLD_KILLED; 3599 status = WTERMSIG(p->p_xsig); 3600 } else { 3601 reason = CLD_EXITED; 3602 status = p->p_xexit; 3603 } 3604 /* 3605 * XXX avoid calling wakeup(p->p_pptr), the work is 3606 * done in exit1(). 3607 */ 3608 sigparent(p, reason, status); 3609 } 3610 3611 #define MAX_NUM_CORE_FILES 100000 3612 #ifndef NUM_CORE_FILES 3613 #define NUM_CORE_FILES 5 3614 #endif 3615 CTASSERT(NUM_CORE_FILES >= 0 && NUM_CORE_FILES <= MAX_NUM_CORE_FILES); 3616 static int num_cores = NUM_CORE_FILES; 3617 3618 static int 3619 sysctl_debug_num_cores_check (SYSCTL_HANDLER_ARGS) 3620 { 3621 int error; 3622 int new_val; 3623 3624 new_val = num_cores; 3625 error = sysctl_handle_int(oidp, &new_val, 0, req); 3626 if (error != 0 || req->newptr == NULL) 3627 return (error); 3628 if (new_val > MAX_NUM_CORE_FILES) 3629 new_val = MAX_NUM_CORE_FILES; 3630 if (new_val < 0) 3631 new_val = 0; 3632 num_cores = new_val; 3633 return (0); 3634 } 3635 SYSCTL_PROC(_debug, OID_AUTO, ncores, 3636 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, sizeof(int), 3637 sysctl_debug_num_cores_check, "I", 3638 "Maximum number of generated process corefiles while using index format"); 3639 3640 #define GZIP_SUFFIX ".gz" 3641 #define ZSTD_SUFFIX ".zst" 3642 3643 int compress_user_cores = 0; 3644 3645 static int 3646 sysctl_compress_user_cores(SYSCTL_HANDLER_ARGS) 3647 { 3648 int error, val; 3649 3650 val = compress_user_cores; 3651 error = sysctl_handle_int(oidp, &val, 0, req); 3652 if (error != 0 || req->newptr == NULL) 3653 return (error); 3654 if (val != 0 && !compressor_avail(val)) 3655 return (EINVAL); 3656 compress_user_cores = val; 3657 return (error); 3658 } 3659 SYSCTL_PROC(_kern, OID_AUTO, compress_user_cores, 3660 CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, 0, sizeof(int), 3661 sysctl_compress_user_cores, "I", 3662 "Enable compression of user corefiles (" 3663 __XSTRING(COMPRESS_GZIP) " = gzip, " 3664 __XSTRING(COMPRESS_ZSTD) " = zstd)"); 3665 3666 int compress_user_cores_level = 6; 3667 SYSCTL_INT(_kern, OID_AUTO, compress_user_cores_level, CTLFLAG_RWTUN, 3668 &compress_user_cores_level, 0, 3669 "Corefile compression level"); 3670 3671 /* 3672 * Protect the access to corefilename[] by allproc_lock. 3673 */ 3674 #define corefilename_lock allproc_lock 3675 3676 static char corefilename[MAXPATHLEN] = {"%N.core"}; 3677 TUNABLE_STR("kern.corefile", corefilename, sizeof(corefilename)); 3678 3679 static int 3680 sysctl_kern_corefile(SYSCTL_HANDLER_ARGS) 3681 { 3682 int error; 3683 3684 sx_xlock(&corefilename_lock); 3685 error = sysctl_handle_string(oidp, corefilename, sizeof(corefilename), 3686 req); 3687 sx_xunlock(&corefilename_lock); 3688 3689 return (error); 3690 } 3691 SYSCTL_PROC(_kern, OID_AUTO, corefile, CTLTYPE_STRING | CTLFLAG_RW | 3692 CTLFLAG_MPSAFE, 0, 0, sysctl_kern_corefile, "A", 3693 "Process corefile name format string"); 3694 3695 static void 3696 vnode_close_locked(struct thread *td, struct vnode *vp) 3697 { 3698 3699 VOP_UNLOCK(vp); 3700 vn_close(vp, FWRITE, td->td_ucred, td); 3701 } 3702 3703 /* 3704 * If the core format has a %I in it, then we need to check 3705 * for existing corefiles before defining a name. 3706 * To do this we iterate over 0..ncores to find a 3707 * non-existing core file name to use. If all core files are 3708 * already used we choose the oldest one. 3709 */ 3710 static int 3711 corefile_open_last(struct thread *td, char *name, int indexpos, 3712 int indexlen, int ncores, struct vnode **vpp) 3713 { 3714 struct vnode *oldvp, *nextvp, *vp; 3715 struct vattr vattr; 3716 struct nameidata nd; 3717 int error, i, flags, oflags, cmode; 3718 char ch; 3719 struct timespec lasttime; 3720 3721 nextvp = oldvp = NULL; 3722 cmode = S_IRUSR | S_IWUSR; 3723 oflags = VN_OPEN_NOAUDIT | VN_OPEN_NAMECACHE | 3724 (capmode_coredump ? VN_OPEN_NOCAPCHECK : 0); 3725 3726 for (i = 0; i < ncores; i++) { 3727 flags = O_CREAT | FWRITE | O_NOFOLLOW; 3728 3729 ch = name[indexpos + indexlen]; 3730 (void)snprintf(name + indexpos, indexlen + 1, "%.*u", indexlen, 3731 i); 3732 name[indexpos + indexlen] = ch; 3733 3734 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name); 3735 error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred, 3736 NULL); 3737 if (error != 0) 3738 break; 3739 3740 vp = nd.ni_vp; 3741 NDFREE_PNBUF(&nd); 3742 if ((flags & O_CREAT) == O_CREAT) { 3743 nextvp = vp; 3744 break; 3745 } 3746 3747 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 3748 if (error != 0) { 3749 vnode_close_locked(td, vp); 3750 break; 3751 } 3752 3753 if (oldvp == NULL || 3754 lasttime.tv_sec > vattr.va_mtime.tv_sec || 3755 (lasttime.tv_sec == vattr.va_mtime.tv_sec && 3756 lasttime.tv_nsec >= vattr.va_mtime.tv_nsec)) { 3757 if (oldvp != NULL) 3758 vn_close(oldvp, FWRITE, td->td_ucred, td); 3759 oldvp = vp; 3760 VOP_UNLOCK(oldvp); 3761 lasttime = vattr.va_mtime; 3762 } else { 3763 vnode_close_locked(td, vp); 3764 } 3765 } 3766 3767 if (oldvp != NULL) { 3768 if (nextvp == NULL) { 3769 if ((td->td_proc->p_flag & P_SUGID) != 0) { 3770 error = EFAULT; 3771 vn_close(oldvp, FWRITE, td->td_ucred, td); 3772 } else { 3773 nextvp = oldvp; 3774 error = vn_lock(nextvp, LK_EXCLUSIVE); 3775 if (error != 0) { 3776 vn_close(nextvp, FWRITE, td->td_ucred, 3777 td); 3778 nextvp = NULL; 3779 } 3780 } 3781 } else { 3782 vn_close(oldvp, FWRITE, td->td_ucred, td); 3783 } 3784 } 3785 if (error != 0) { 3786 if (nextvp != NULL) 3787 vnode_close_locked(td, oldvp); 3788 } else { 3789 *vpp = nextvp; 3790 } 3791 3792 return (error); 3793 } 3794 3795 /* 3796 * corefile_open(comm, uid, pid, td, compress, vpp, namep) 3797 * Expand the name described in corefilename, using name, uid, and pid 3798 * and open/create core file. 3799 * corefilename is a printf-like string, with three format specifiers: 3800 * %N name of process ("name") 3801 * %P process id (pid) 3802 * %U user id (uid) 3803 * For example, "%N.core" is the default; they can be disabled completely 3804 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P". 3805 * This is controlled by the sysctl variable kern.corefile (see above). 3806 */ 3807 static int 3808 corefile_open(const char *comm, uid_t uid, pid_t pid, struct thread *td, 3809 int compress, int signum, struct vnode **vpp, char **namep) 3810 { 3811 struct sbuf sb; 3812 struct nameidata nd; 3813 const char *format; 3814 char *hostname, *name; 3815 int cmode, error, flags, i, indexpos, indexlen, oflags, ncores; 3816 3817 hostname = NULL; 3818 format = corefilename; 3819 name = malloc(MAXPATHLEN, M_TEMP, M_WAITOK | M_ZERO); 3820 indexlen = 0; 3821 indexpos = -1; 3822 ncores = num_cores; 3823 (void)sbuf_new(&sb, name, MAXPATHLEN, SBUF_FIXEDLEN); 3824 sx_slock(&corefilename_lock); 3825 for (i = 0; format[i] != '\0'; i++) { 3826 switch (format[i]) { 3827 case '%': /* Format character */ 3828 i++; 3829 switch (format[i]) { 3830 case '%': 3831 sbuf_putc(&sb, '%'); 3832 break; 3833 case 'H': /* hostname */ 3834 if (hostname == NULL) { 3835 hostname = malloc(MAXHOSTNAMELEN, 3836 M_TEMP, M_WAITOK); 3837 } 3838 getcredhostname(td->td_ucred, hostname, 3839 MAXHOSTNAMELEN); 3840 sbuf_printf(&sb, "%s", hostname); 3841 break; 3842 case 'I': /* autoincrementing index */ 3843 if (indexpos != -1) { 3844 sbuf_printf(&sb, "%%I"); 3845 break; 3846 } 3847 3848 indexpos = sbuf_len(&sb); 3849 sbuf_printf(&sb, "%u", ncores - 1); 3850 indexlen = sbuf_len(&sb) - indexpos; 3851 break; 3852 case 'N': /* process name */ 3853 sbuf_printf(&sb, "%s", comm); 3854 break; 3855 case 'P': /* process id */ 3856 sbuf_printf(&sb, "%u", pid); 3857 break; 3858 case 'S': /* signal number */ 3859 sbuf_printf(&sb, "%i", signum); 3860 break; 3861 case 'U': /* user id */ 3862 sbuf_printf(&sb, "%u", uid); 3863 break; 3864 default: 3865 log(LOG_ERR, 3866 "Unknown format character %c in " 3867 "corename `%s'\n", format[i], format); 3868 break; 3869 } 3870 break; 3871 default: 3872 sbuf_putc(&sb, format[i]); 3873 break; 3874 } 3875 } 3876 sx_sunlock(&corefilename_lock); 3877 free(hostname, M_TEMP); 3878 if (compress == COMPRESS_GZIP) 3879 sbuf_printf(&sb, GZIP_SUFFIX); 3880 else if (compress == COMPRESS_ZSTD) 3881 sbuf_printf(&sb, ZSTD_SUFFIX); 3882 if (sbuf_error(&sb) != 0) { 3883 log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too " 3884 "long\n", (long)pid, comm, (u_long)uid); 3885 sbuf_delete(&sb); 3886 free(name, M_TEMP); 3887 return (ENOMEM); 3888 } 3889 sbuf_finish(&sb); 3890 sbuf_delete(&sb); 3891 3892 if (indexpos != -1) { 3893 error = corefile_open_last(td, name, indexpos, indexlen, ncores, 3894 vpp); 3895 if (error != 0) { 3896 log(LOG_ERR, 3897 "pid %d (%s), uid (%u): Path `%s' failed " 3898 "on initial open test, error = %d\n", 3899 pid, comm, uid, name, error); 3900 } 3901 } else { 3902 cmode = S_IRUSR | S_IWUSR; 3903 oflags = VN_OPEN_NOAUDIT | VN_OPEN_NAMECACHE | 3904 (capmode_coredump ? VN_OPEN_NOCAPCHECK : 0); 3905 flags = O_CREAT | FWRITE | O_NOFOLLOW; 3906 if ((td->td_proc->p_flag & P_SUGID) != 0) 3907 flags |= O_EXCL; 3908 3909 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name); 3910 error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred, 3911 NULL); 3912 if (error == 0) { 3913 *vpp = nd.ni_vp; 3914 NDFREE_PNBUF(&nd); 3915 } 3916 } 3917 3918 if (error != 0) { 3919 #ifdef AUDIT 3920 audit_proc_coredump(td, name, error); 3921 #endif 3922 free(name, M_TEMP); 3923 return (error); 3924 } 3925 *namep = name; 3926 return (0); 3927 } 3928 3929 /* 3930 * Dump a process' core. The main routine does some 3931 * policy checking, and creates the name of the coredump; 3932 * then it passes on a vnode and a size limit to the process-specific 3933 * coredump routine if there is one; if there _is not_ one, it returns 3934 * ENOSYS; otherwise it returns the error from the process-specific routine. 3935 */ 3936 3937 static int 3938 coredump(struct thread *td) 3939 { 3940 struct proc *p = td->td_proc; 3941 struct ucred *cred = td->td_ucred; 3942 struct vnode *vp; 3943 struct flock lf; 3944 struct vattr vattr; 3945 size_t fullpathsize; 3946 int error, error1, locked; 3947 char *name; /* name of corefile */ 3948 void *rl_cookie; 3949 off_t limit; 3950 char *fullpath, *freepath = NULL; 3951 struct sbuf *sb; 3952 3953 PROC_LOCK_ASSERT(p, MA_OWNED); 3954 MPASS((p->p_flag & P_HADTHREADS) == 0 || p->p_singlethread == td); 3955 3956 if (!do_coredump || (!sugid_coredump && (p->p_flag & P_SUGID) != 0) || 3957 (p->p_flag2 & P2_NOTRACE) != 0) { 3958 PROC_UNLOCK(p); 3959 return (EFAULT); 3960 } 3961 3962 /* 3963 * Note that the bulk of limit checking is done after 3964 * the corefile is created. The exception is if the limit 3965 * for corefiles is 0, in which case we don't bother 3966 * creating the corefile at all. This layout means that 3967 * a corefile is truncated instead of not being created, 3968 * if it is larger than the limit. 3969 */ 3970 limit = (off_t)lim_cur(td, RLIMIT_CORE); 3971 if (limit == 0 || racct_get_available(p, RACCT_CORE) == 0) { 3972 PROC_UNLOCK(p); 3973 return (EFBIG); 3974 } 3975 PROC_UNLOCK(p); 3976 3977 error = corefile_open(p->p_comm, cred->cr_uid, p->p_pid, td, 3978 compress_user_cores, p->p_sig, &vp, &name); 3979 if (error != 0) 3980 return (error); 3981 3982 /* 3983 * Don't dump to non-regular files or files with links. 3984 * Do not dump into system files. Effective user must own the corefile. 3985 */ 3986 if (vp->v_type != VREG || VOP_GETATTR(vp, &vattr, cred) != 0 || 3987 vattr.va_nlink != 1 || (vp->v_vflag & VV_SYSTEM) != 0 || 3988 vattr.va_uid != cred->cr_uid) { 3989 VOP_UNLOCK(vp); 3990 error = EFAULT; 3991 goto out; 3992 } 3993 3994 VOP_UNLOCK(vp); 3995 3996 /* Postpone other writers, including core dumps of other processes. */ 3997 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX); 3998 3999 lf.l_whence = SEEK_SET; 4000 lf.l_start = 0; 4001 lf.l_len = 0; 4002 lf.l_type = F_WRLCK; 4003 locked = (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK) == 0); 4004 4005 VATTR_NULL(&vattr); 4006 vattr.va_size = 0; 4007 if (set_core_nodump_flag) 4008 vattr.va_flags = UF_NODUMP; 4009 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4010 VOP_SETATTR(vp, &vattr, cred); 4011 VOP_UNLOCK(vp); 4012 PROC_LOCK(p); 4013 p->p_acflag |= ACORE; 4014 PROC_UNLOCK(p); 4015 4016 if (p->p_sysent->sv_coredump != NULL) { 4017 error = p->p_sysent->sv_coredump(td, vp, limit, 0); 4018 } else { 4019 error = ENOSYS; 4020 } 4021 4022 if (locked) { 4023 lf.l_type = F_UNLCK; 4024 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK); 4025 } 4026 vn_rangelock_unlock(vp, rl_cookie); 4027 4028 /* 4029 * Notify the userland helper that a process triggered a core dump. 4030 * This allows the helper to run an automated debugging session. 4031 */ 4032 if (error != 0 || coredump_devctl == 0) 4033 goto out; 4034 sb = sbuf_new_auto(); 4035 if (vn_fullpath_global(p->p_textvp, &fullpath, &freepath) != 0) 4036 goto out2; 4037 sbuf_printf(sb, "comm=\""); 4038 devctl_safe_quote_sb(sb, fullpath); 4039 free(freepath, M_TEMP); 4040 sbuf_printf(sb, "\" core=\""); 4041 4042 /* 4043 * We can't lookup core file vp directly. When we're replacing a core, and 4044 * other random times, we flush the name cache, so it will fail. Instead, 4045 * if the path of the core is relative, add the current dir in front if it. 4046 */ 4047 if (name[0] != '/') { 4048 fullpathsize = MAXPATHLEN; 4049 freepath = malloc(fullpathsize, M_TEMP, M_WAITOK); 4050 if (vn_getcwd(freepath, &fullpath, &fullpathsize) != 0) { 4051 free(freepath, M_TEMP); 4052 goto out2; 4053 } 4054 devctl_safe_quote_sb(sb, fullpath); 4055 free(freepath, M_TEMP); 4056 sbuf_putc(sb, '/'); 4057 } 4058 devctl_safe_quote_sb(sb, name); 4059 sbuf_printf(sb, "\""); 4060 if (sbuf_finish(sb) == 0) 4061 devctl_notify("kernel", "signal", "coredump", sbuf_data(sb)); 4062 out2: 4063 sbuf_delete(sb); 4064 out: 4065 error1 = vn_close(vp, FWRITE, cred, td); 4066 if (error == 0) 4067 error = error1; 4068 #ifdef AUDIT 4069 audit_proc_coredump(td, name, error); 4070 #endif 4071 free(name, M_TEMP); 4072 return (error); 4073 } 4074 4075 /* 4076 * Nonexistent system call-- signal process (may want to handle it). Flag 4077 * error in case process won't see signal immediately (blocked or ignored). 4078 */ 4079 #ifndef _SYS_SYSPROTO_H_ 4080 struct nosys_args { 4081 int dummy; 4082 }; 4083 #endif 4084 /* ARGSUSED */ 4085 int 4086 nosys(struct thread *td, struct nosys_args *args) 4087 { 4088 struct proc *p; 4089 4090 p = td->td_proc; 4091 4092 PROC_LOCK(p); 4093 tdsignal(td, SIGSYS); 4094 PROC_UNLOCK(p); 4095 if (kern_lognosys == 1 || kern_lognosys == 3) { 4096 uprintf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm, 4097 td->td_sa.code); 4098 } 4099 if (kern_lognosys == 2 || kern_lognosys == 3 || 4100 (p->p_pid == 1 && (kern_lognosys & 3) == 0)) { 4101 printf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm, 4102 td->td_sa.code); 4103 } 4104 return (ENOSYS); 4105 } 4106 4107 /* 4108 * Send a SIGIO or SIGURG signal to a process or process group using stored 4109 * credentials rather than those of the current process. 4110 */ 4111 void 4112 pgsigio(struct sigio **sigiop, int sig, int checkctty) 4113 { 4114 ksiginfo_t ksi; 4115 struct sigio *sigio; 4116 4117 ksiginfo_init(&ksi); 4118 ksi.ksi_signo = sig; 4119 ksi.ksi_code = SI_KERNEL; 4120 4121 SIGIO_LOCK(); 4122 sigio = *sigiop; 4123 if (sigio == NULL) { 4124 SIGIO_UNLOCK(); 4125 return; 4126 } 4127 if (sigio->sio_pgid > 0) { 4128 PROC_LOCK(sigio->sio_proc); 4129 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred)) 4130 kern_psignal(sigio->sio_proc, sig); 4131 PROC_UNLOCK(sigio->sio_proc); 4132 } else if (sigio->sio_pgid < 0) { 4133 struct proc *p; 4134 4135 PGRP_LOCK(sigio->sio_pgrp); 4136 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) { 4137 PROC_LOCK(p); 4138 if (p->p_state == PRS_NORMAL && 4139 CANSIGIO(sigio->sio_ucred, p->p_ucred) && 4140 (checkctty == 0 || (p->p_flag & P_CONTROLT))) 4141 kern_psignal(p, sig); 4142 PROC_UNLOCK(p); 4143 } 4144 PGRP_UNLOCK(sigio->sio_pgrp); 4145 } 4146 SIGIO_UNLOCK(); 4147 } 4148 4149 static int 4150 filt_sigattach(struct knote *kn) 4151 { 4152 struct proc *p = curproc; 4153 4154 kn->kn_ptr.p_proc = p; 4155 kn->kn_flags |= EV_CLEAR; /* automatically set */ 4156 4157 knlist_add(p->p_klist, kn, 0); 4158 4159 return (0); 4160 } 4161 4162 static void 4163 filt_sigdetach(struct knote *kn) 4164 { 4165 struct proc *p = kn->kn_ptr.p_proc; 4166 4167 knlist_remove(p->p_klist, kn, 0); 4168 } 4169 4170 /* 4171 * signal knotes are shared with proc knotes, so we apply a mask to 4172 * the hint in order to differentiate them from process hints. This 4173 * could be avoided by using a signal-specific knote list, but probably 4174 * isn't worth the trouble. 4175 */ 4176 static int 4177 filt_signal(struct knote *kn, long hint) 4178 { 4179 4180 if (hint & NOTE_SIGNAL) { 4181 hint &= ~NOTE_SIGNAL; 4182 4183 if (kn->kn_id == hint) 4184 kn->kn_data++; 4185 } 4186 return (kn->kn_data != 0); 4187 } 4188 4189 struct sigacts * 4190 sigacts_alloc(void) 4191 { 4192 struct sigacts *ps; 4193 4194 ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO); 4195 refcount_init(&ps->ps_refcnt, 1); 4196 mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF); 4197 return (ps); 4198 } 4199 4200 void 4201 sigacts_free(struct sigacts *ps) 4202 { 4203 4204 if (refcount_release(&ps->ps_refcnt) == 0) 4205 return; 4206 mtx_destroy(&ps->ps_mtx); 4207 free(ps, M_SUBPROC); 4208 } 4209 4210 struct sigacts * 4211 sigacts_hold(struct sigacts *ps) 4212 { 4213 4214 refcount_acquire(&ps->ps_refcnt); 4215 return (ps); 4216 } 4217 4218 void 4219 sigacts_copy(struct sigacts *dest, struct sigacts *src) 4220 { 4221 4222 KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest")); 4223 mtx_lock(&src->ps_mtx); 4224 bcopy(src, dest, offsetof(struct sigacts, ps_refcnt)); 4225 mtx_unlock(&src->ps_mtx); 4226 } 4227 4228 int 4229 sigacts_shared(struct sigacts *ps) 4230 { 4231 4232 return (ps->ps_refcnt > 1); 4233 } 4234 4235 void 4236 sig_drop_caught(struct proc *p) 4237 { 4238 int sig; 4239 struct sigacts *ps; 4240 4241 ps = p->p_sigacts; 4242 PROC_LOCK_ASSERT(p, MA_OWNED); 4243 mtx_assert(&ps->ps_mtx, MA_OWNED); 4244 SIG_FOREACH(sig, &ps->ps_sigcatch) { 4245 sigdflt(ps, sig); 4246 if ((sigprop(sig) & SIGPROP_IGNORE) != 0) 4247 sigqueue_delete_proc(p, sig); 4248 } 4249 } 4250 4251 static void 4252 sigfastblock_failed(struct thread *td, bool sendsig, bool write) 4253 { 4254 ksiginfo_t ksi; 4255 4256 /* 4257 * Prevent further fetches and SIGSEGVs, allowing thread to 4258 * issue syscalls despite corruption. 4259 */ 4260 sigfastblock_clear(td); 4261 4262 if (!sendsig) 4263 return; 4264 ksiginfo_init_trap(&ksi); 4265 ksi.ksi_signo = SIGSEGV; 4266 ksi.ksi_code = write ? SEGV_ACCERR : SEGV_MAPERR; 4267 ksi.ksi_addr = td->td_sigblock_ptr; 4268 trapsignal(td, &ksi); 4269 } 4270 4271 static bool 4272 sigfastblock_fetch_sig(struct thread *td, bool sendsig, uint32_t *valp) 4273 { 4274 uint32_t res; 4275 4276 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) 4277 return (true); 4278 if (fueword32((void *)td->td_sigblock_ptr, &res) == -1) { 4279 sigfastblock_failed(td, sendsig, false); 4280 return (false); 4281 } 4282 *valp = res; 4283 td->td_sigblock_val = res & ~SIGFASTBLOCK_FLAGS; 4284 return (true); 4285 } 4286 4287 static void 4288 sigfastblock_resched(struct thread *td, bool resched) 4289 { 4290 struct proc *p; 4291 4292 if (resched) { 4293 p = td->td_proc; 4294 PROC_LOCK(p); 4295 reschedule_signals(p, td->td_sigmask, 0); 4296 PROC_UNLOCK(p); 4297 } 4298 ast_sched(td, TDA_SIG); 4299 } 4300 4301 int 4302 sys_sigfastblock(struct thread *td, struct sigfastblock_args *uap) 4303 { 4304 struct proc *p; 4305 int error, res; 4306 uint32_t oldval; 4307 4308 error = 0; 4309 p = td->td_proc; 4310 switch (uap->cmd) { 4311 case SIGFASTBLOCK_SETPTR: 4312 if ((td->td_pflags & TDP_SIGFASTBLOCK) != 0) { 4313 error = EBUSY; 4314 break; 4315 } 4316 if (((uintptr_t)(uap->ptr) & (sizeof(uint32_t) - 1)) != 0) { 4317 error = EINVAL; 4318 break; 4319 } 4320 td->td_pflags |= TDP_SIGFASTBLOCK; 4321 td->td_sigblock_ptr = uap->ptr; 4322 break; 4323 4324 case SIGFASTBLOCK_UNBLOCK: 4325 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) { 4326 error = EINVAL; 4327 break; 4328 } 4329 4330 for (;;) { 4331 res = casueword32(td->td_sigblock_ptr, 4332 SIGFASTBLOCK_PEND, &oldval, 0); 4333 if (res == -1) { 4334 error = EFAULT; 4335 sigfastblock_failed(td, false, true); 4336 break; 4337 } 4338 if (res == 0) 4339 break; 4340 MPASS(res == 1); 4341 if (oldval != SIGFASTBLOCK_PEND) { 4342 error = EBUSY; 4343 break; 4344 } 4345 error = thread_check_susp(td, false); 4346 if (error != 0) 4347 break; 4348 } 4349 if (error != 0) 4350 break; 4351 4352 /* 4353 * td_sigblock_val is cleared there, but not on a 4354 * syscall exit. The end effect is that a single 4355 * interruptible sleep, while user sigblock word is 4356 * set, might return EINTR or ERESTART to usermode 4357 * without delivering signal. All further sleeps, 4358 * until userspace clears the word and does 4359 * sigfastblock(UNBLOCK), observe current word and no 4360 * longer get interrupted. It is slight 4361 * non-conformance, with alternative to have read the 4362 * sigblock word on each syscall entry. 4363 */ 4364 td->td_sigblock_val = 0; 4365 4366 /* 4367 * Rely on normal ast mechanism to deliver pending 4368 * signals to current thread. But notify others about 4369 * fake unblock. 4370 */ 4371 sigfastblock_resched(td, error == 0 && p->p_numthreads != 1); 4372 4373 break; 4374 4375 case SIGFASTBLOCK_UNSETPTR: 4376 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) { 4377 error = EINVAL; 4378 break; 4379 } 4380 if (!sigfastblock_fetch_sig(td, false, &oldval)) { 4381 error = EFAULT; 4382 break; 4383 } 4384 if (oldval != 0 && oldval != SIGFASTBLOCK_PEND) { 4385 error = EBUSY; 4386 break; 4387 } 4388 sigfastblock_clear(td); 4389 break; 4390 4391 default: 4392 error = EINVAL; 4393 break; 4394 } 4395 return (error); 4396 } 4397 4398 void 4399 sigfastblock_clear(struct thread *td) 4400 { 4401 bool resched; 4402 4403 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) 4404 return; 4405 td->td_sigblock_val = 0; 4406 resched = (td->td_pflags & TDP_SIGFASTPENDING) != 0 || 4407 SIGPENDING(td); 4408 td->td_pflags &= ~(TDP_SIGFASTBLOCK | TDP_SIGFASTPENDING); 4409 sigfastblock_resched(td, resched); 4410 } 4411 4412 void 4413 sigfastblock_fetch(struct thread *td) 4414 { 4415 uint32_t val; 4416 4417 (void)sigfastblock_fetch_sig(td, true, &val); 4418 } 4419 4420 static void 4421 sigfastblock_setpend1(struct thread *td) 4422 { 4423 int res; 4424 uint32_t oldval; 4425 4426 if ((td->td_pflags & TDP_SIGFASTPENDING) == 0) 4427 return; 4428 res = fueword32((void *)td->td_sigblock_ptr, &oldval); 4429 if (res == -1) { 4430 sigfastblock_failed(td, true, false); 4431 return; 4432 } 4433 for (;;) { 4434 res = casueword32(td->td_sigblock_ptr, oldval, &oldval, 4435 oldval | SIGFASTBLOCK_PEND); 4436 if (res == -1) { 4437 sigfastblock_failed(td, true, true); 4438 return; 4439 } 4440 if (res == 0) { 4441 td->td_sigblock_val = oldval & ~SIGFASTBLOCK_FLAGS; 4442 td->td_pflags &= ~TDP_SIGFASTPENDING; 4443 break; 4444 } 4445 MPASS(res == 1); 4446 if (thread_check_susp(td, false) != 0) 4447 break; 4448 } 4449 } 4450 4451 static void 4452 sigfastblock_setpend(struct thread *td, bool resched) 4453 { 4454 struct proc *p; 4455 4456 sigfastblock_setpend1(td); 4457 if (resched) { 4458 p = td->td_proc; 4459 PROC_LOCK(p); 4460 reschedule_signals(p, fastblock_mask, SIGPROCMASK_FASTBLK); 4461 PROC_UNLOCK(p); 4462 } 4463 } 4464