1 /*- 2 * Copyright (c) 2014 John Baldwin 3 * Copyright (c) 2014, 2016 The FreeBSD Foundation 4 * 5 * Portions of this software were developed by Konstantin Belousov 6 * under sponsorship from the FreeBSD Foundation. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/capsicum.h> 36 #include <sys/lock.h> 37 #include <sys/mman.h> 38 #include <sys/mutex.h> 39 #include <sys/priv.h> 40 #include <sys/proc.h> 41 #include <sys/procctl.h> 42 #include <sys/sx.h> 43 #include <sys/syscallsubr.h> 44 #include <sys/sysproto.h> 45 #include <sys/wait.h> 46 47 #include <vm/vm.h> 48 #include <vm/pmap.h> 49 #include <vm/vm_map.h> 50 #include <vm/vm_extern.h> 51 52 static int 53 protect_setchild(struct thread *td, struct proc *p, int flags) 54 { 55 56 PROC_LOCK_ASSERT(p, MA_OWNED); 57 if (p->p_flag & P_SYSTEM || p_cansched(td, p) != 0) 58 return (0); 59 if (flags & PPROT_SET) { 60 p->p_flag |= P_PROTECTED; 61 if (flags & PPROT_INHERIT) 62 p->p_flag2 |= P2_INHERIT_PROTECTED; 63 } else { 64 p->p_flag &= ~P_PROTECTED; 65 p->p_flag2 &= ~P2_INHERIT_PROTECTED; 66 } 67 return (1); 68 } 69 70 static int 71 protect_setchildren(struct thread *td, struct proc *top, int flags) 72 { 73 struct proc *p; 74 int ret; 75 76 p = top; 77 ret = 0; 78 sx_assert(&proctree_lock, SX_LOCKED); 79 for (;;) { 80 ret |= protect_setchild(td, p, flags); 81 PROC_UNLOCK(p); 82 /* 83 * If this process has children, descend to them next, 84 * otherwise do any siblings, and if done with this level, 85 * follow back up the tree (but not past top). 86 */ 87 if (!LIST_EMPTY(&p->p_children)) 88 p = LIST_FIRST(&p->p_children); 89 else for (;;) { 90 if (p == top) { 91 PROC_LOCK(p); 92 return (ret); 93 } 94 if (LIST_NEXT(p, p_sibling)) { 95 p = LIST_NEXT(p, p_sibling); 96 break; 97 } 98 p = p->p_pptr; 99 } 100 PROC_LOCK(p); 101 } 102 } 103 104 static int 105 protect_set(struct thread *td, struct proc *p, int flags) 106 { 107 int error, ret; 108 109 switch (PPROT_OP(flags)) { 110 case PPROT_SET: 111 case PPROT_CLEAR: 112 break; 113 default: 114 return (EINVAL); 115 } 116 117 if ((PPROT_FLAGS(flags) & ~(PPROT_DESCEND | PPROT_INHERIT)) != 0) 118 return (EINVAL); 119 120 error = priv_check(td, PRIV_VM_MADV_PROTECT); 121 if (error) 122 return (error); 123 124 if (flags & PPROT_DESCEND) 125 ret = protect_setchildren(td, p, flags); 126 else 127 ret = protect_setchild(td, p, flags); 128 if (ret == 0) 129 return (EPERM); 130 return (0); 131 } 132 133 static int 134 reap_acquire(struct thread *td, struct proc *p) 135 { 136 137 sx_assert(&proctree_lock, SX_XLOCKED); 138 if (p != curproc) 139 return (EPERM); 140 if ((p->p_treeflag & P_TREE_REAPER) != 0) 141 return (EBUSY); 142 p->p_treeflag |= P_TREE_REAPER; 143 /* 144 * We do not reattach existing children and the whole tree 145 * under them to us, since p->p_reaper already seen them. 146 */ 147 return (0); 148 } 149 150 static int 151 reap_release(struct thread *td, struct proc *p) 152 { 153 154 sx_assert(&proctree_lock, SX_XLOCKED); 155 if (p != curproc) 156 return (EPERM); 157 if (p == initproc) 158 return (EINVAL); 159 if ((p->p_treeflag & P_TREE_REAPER) == 0) 160 return (EINVAL); 161 reaper_abandon_children(p, false); 162 return (0); 163 } 164 165 static int 166 reap_status(struct thread *td, struct proc *p, 167 struct procctl_reaper_status *rs) 168 { 169 struct proc *reap, *p2, *first_p; 170 171 sx_assert(&proctree_lock, SX_LOCKED); 172 bzero(rs, sizeof(*rs)); 173 if ((p->p_treeflag & P_TREE_REAPER) == 0) { 174 reap = p->p_reaper; 175 } else { 176 reap = p; 177 rs->rs_flags |= REAPER_STATUS_OWNED; 178 } 179 if (reap == initproc) 180 rs->rs_flags |= REAPER_STATUS_REALINIT; 181 rs->rs_reaper = reap->p_pid; 182 rs->rs_descendants = 0; 183 rs->rs_children = 0; 184 if (!LIST_EMPTY(&reap->p_reaplist)) { 185 first_p = LIST_FIRST(&reap->p_children); 186 if (first_p == NULL) 187 first_p = LIST_FIRST(&reap->p_reaplist); 188 rs->rs_pid = first_p->p_pid; 189 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) { 190 if (proc_realparent(p2) == reap) 191 rs->rs_children++; 192 rs->rs_descendants++; 193 } 194 } else { 195 rs->rs_pid = -1; 196 } 197 return (0); 198 } 199 200 static int 201 reap_getpids(struct thread *td, struct proc *p, struct procctl_reaper_pids *rp) 202 { 203 struct proc *reap, *p2; 204 struct procctl_reaper_pidinfo *pi, *pip; 205 u_int i, n; 206 int error; 207 208 sx_assert(&proctree_lock, SX_LOCKED); 209 PROC_UNLOCK(p); 210 reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p; 211 n = i = 0; 212 error = 0; 213 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) 214 n++; 215 sx_unlock(&proctree_lock); 216 if (rp->rp_count < n) 217 n = rp->rp_count; 218 pi = malloc(n * sizeof(*pi), M_TEMP, M_WAITOK); 219 sx_slock(&proctree_lock); 220 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) { 221 if (i == n) 222 break; 223 pip = &pi[i]; 224 bzero(pip, sizeof(*pip)); 225 pip->pi_pid = p2->p_pid; 226 pip->pi_subtree = p2->p_reapsubtree; 227 pip->pi_flags = REAPER_PIDINFO_VALID; 228 if (proc_realparent(p2) == reap) 229 pip->pi_flags |= REAPER_PIDINFO_CHILD; 230 if ((p2->p_treeflag & P_TREE_REAPER) != 0) 231 pip->pi_flags |= REAPER_PIDINFO_REAPER; 232 i++; 233 } 234 sx_sunlock(&proctree_lock); 235 error = copyout(pi, rp->rp_pids, i * sizeof(*pi)); 236 free(pi, M_TEMP); 237 sx_slock(&proctree_lock); 238 PROC_LOCK(p); 239 return (error); 240 } 241 242 static void 243 reap_kill_proc(struct thread *td, struct proc *p2, ksiginfo_t *ksi, 244 struct procctl_reaper_kill *rk, int *error) 245 { 246 int error1; 247 248 PROC_LOCK(p2); 249 error1 = p_cansignal(td, p2, rk->rk_sig); 250 if (error1 == 0) { 251 pksignal(p2, rk->rk_sig, ksi); 252 rk->rk_killed++; 253 *error = error1; 254 } else if (*error == ESRCH) { 255 rk->rk_fpid = p2->p_pid; 256 *error = error1; 257 } 258 PROC_UNLOCK(p2); 259 } 260 261 struct reap_kill_tracker { 262 struct proc *parent; 263 TAILQ_ENTRY(reap_kill_tracker) link; 264 }; 265 266 TAILQ_HEAD(reap_kill_tracker_head, reap_kill_tracker); 267 268 static void 269 reap_kill_sched(struct reap_kill_tracker_head *tracker, struct proc *p2) 270 { 271 struct reap_kill_tracker *t; 272 273 t = malloc(sizeof(struct reap_kill_tracker), M_TEMP, M_WAITOK); 274 t->parent = p2; 275 TAILQ_INSERT_TAIL(tracker, t, link); 276 } 277 278 static int 279 reap_kill(struct thread *td, struct proc *p, struct procctl_reaper_kill *rk) 280 { 281 struct proc *reap, *p2; 282 ksiginfo_t ksi; 283 struct reap_kill_tracker_head tracker; 284 struct reap_kill_tracker *t; 285 int error; 286 287 sx_assert(&proctree_lock, SX_LOCKED); 288 if (IN_CAPABILITY_MODE(td)) 289 return (ECAPMODE); 290 if (rk->rk_sig <= 0 || rk->rk_sig > _SIG_MAXSIG || 291 (rk->rk_flags & ~(REAPER_KILL_CHILDREN | 292 REAPER_KILL_SUBTREE)) != 0 || (rk->rk_flags & 293 (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE)) == 294 (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE)) 295 return (EINVAL); 296 PROC_UNLOCK(p); 297 reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p; 298 ksiginfo_init(&ksi); 299 ksi.ksi_signo = rk->rk_sig; 300 ksi.ksi_code = SI_USER; 301 ksi.ksi_pid = td->td_proc->p_pid; 302 ksi.ksi_uid = td->td_ucred->cr_ruid; 303 error = ESRCH; 304 rk->rk_killed = 0; 305 rk->rk_fpid = -1; 306 if ((rk->rk_flags & REAPER_KILL_CHILDREN) != 0) { 307 for (p2 = LIST_FIRST(&reap->p_children); p2 != NULL; 308 p2 = LIST_NEXT(p2, p_sibling)) { 309 reap_kill_proc(td, p2, &ksi, rk, &error); 310 /* 311 * Do not end the loop on error, signal 312 * everything we can. 313 */ 314 } 315 } else { 316 TAILQ_INIT(&tracker); 317 reap_kill_sched(&tracker, reap); 318 while ((t = TAILQ_FIRST(&tracker)) != NULL) { 319 MPASS((t->parent->p_treeflag & P_TREE_REAPER) != 0); 320 TAILQ_REMOVE(&tracker, t, link); 321 for (p2 = LIST_FIRST(&t->parent->p_reaplist); p2 != NULL; 322 p2 = LIST_NEXT(p2, p_reapsibling)) { 323 if (t->parent == reap && 324 (rk->rk_flags & REAPER_KILL_SUBTREE) != 0 && 325 p2->p_reapsubtree != rk->rk_subtree) 326 continue; 327 if ((p2->p_treeflag & P_TREE_REAPER) != 0) 328 reap_kill_sched(&tracker, p2); 329 reap_kill_proc(td, p2, &ksi, rk, &error); 330 } 331 free(t, M_TEMP); 332 } 333 } 334 PROC_LOCK(p); 335 return (error); 336 } 337 338 static int 339 trace_ctl(struct thread *td, struct proc *p, int state) 340 { 341 342 PROC_LOCK_ASSERT(p, MA_OWNED); 343 344 /* 345 * Ktrace changes p_traceflag from or to zero under the 346 * process lock, so the test does not need to acquire ktrace 347 * mutex. 348 */ 349 if ((p->p_flag & P_TRACED) != 0 || p->p_traceflag != 0) 350 return (EBUSY); 351 352 switch (state) { 353 case PROC_TRACE_CTL_ENABLE: 354 if (td->td_proc != p) 355 return (EPERM); 356 p->p_flag2 &= ~(P2_NOTRACE | P2_NOTRACE_EXEC); 357 break; 358 case PROC_TRACE_CTL_DISABLE_EXEC: 359 p->p_flag2 |= P2_NOTRACE_EXEC | P2_NOTRACE; 360 break; 361 case PROC_TRACE_CTL_DISABLE: 362 if ((p->p_flag2 & P2_NOTRACE_EXEC) != 0) { 363 KASSERT((p->p_flag2 & P2_NOTRACE) != 0, 364 ("dandling P2_NOTRACE_EXEC")); 365 if (td->td_proc != p) 366 return (EPERM); 367 p->p_flag2 &= ~P2_NOTRACE_EXEC; 368 } else { 369 p->p_flag2 |= P2_NOTRACE; 370 } 371 break; 372 default: 373 return (EINVAL); 374 } 375 return (0); 376 } 377 378 static int 379 trace_status(struct thread *td, struct proc *p, int *data) 380 { 381 382 if ((p->p_flag2 & P2_NOTRACE) != 0) { 383 KASSERT((p->p_flag & P_TRACED) == 0, 384 ("%d traced but tracing disabled", p->p_pid)); 385 *data = -1; 386 } else if ((p->p_flag & P_TRACED) != 0) { 387 *data = p->p_pptr->p_pid; 388 } else { 389 *data = 0; 390 } 391 return (0); 392 } 393 394 static int 395 trapcap_ctl(struct thread *td, struct proc *p, int state) 396 { 397 398 PROC_LOCK_ASSERT(p, MA_OWNED); 399 400 switch (state) { 401 case PROC_TRAPCAP_CTL_ENABLE: 402 p->p_flag2 |= P2_TRAPCAP; 403 break; 404 case PROC_TRAPCAP_CTL_DISABLE: 405 p->p_flag2 &= ~P2_TRAPCAP; 406 break; 407 default: 408 return (EINVAL); 409 } 410 return (0); 411 } 412 413 static int 414 trapcap_status(struct thread *td, struct proc *p, int *data) 415 { 416 417 *data = (p->p_flag2 & P2_TRAPCAP) != 0 ? PROC_TRAPCAP_CTL_ENABLE : 418 PROC_TRAPCAP_CTL_DISABLE; 419 return (0); 420 } 421 422 static int 423 protmax_ctl(struct thread *td, struct proc *p, int state) 424 { 425 PROC_LOCK_ASSERT(p, MA_OWNED); 426 427 switch (state) { 428 case PROC_PROTMAX_FORCE_ENABLE: 429 p->p_flag2 &= ~P2_PROTMAX_DISABLE; 430 p->p_flag2 |= P2_PROTMAX_ENABLE; 431 break; 432 case PROC_PROTMAX_FORCE_DISABLE: 433 p->p_flag2 |= P2_PROTMAX_DISABLE; 434 p->p_flag2 &= ~P2_PROTMAX_ENABLE; 435 break; 436 case PROC_PROTMAX_NOFORCE: 437 p->p_flag2 &= ~(P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE); 438 break; 439 default: 440 return (EINVAL); 441 } 442 return (0); 443 } 444 445 static int 446 protmax_status(struct thread *td, struct proc *p, int *data) 447 { 448 int d; 449 450 switch (p->p_flag2 & (P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE)) { 451 case 0: 452 d = PROC_ASLR_NOFORCE; 453 break; 454 case P2_PROTMAX_ENABLE: 455 d = PROC_PROTMAX_FORCE_ENABLE; 456 break; 457 case P2_PROTMAX_DISABLE: 458 d = PROC_PROTMAX_FORCE_DISABLE; 459 break; 460 } 461 if (kern_mmap_maxprot(p, PROT_READ) == PROT_READ) 462 d |= PROC_PROTMAX_ACTIVE; 463 *data = d; 464 return (0); 465 } 466 467 static int 468 aslr_ctl(struct thread *td, struct proc *p, int state) 469 { 470 471 PROC_LOCK_ASSERT(p, MA_OWNED); 472 473 switch (state) { 474 case PROC_ASLR_FORCE_ENABLE: 475 p->p_flag2 &= ~P2_ASLR_DISABLE; 476 p->p_flag2 |= P2_ASLR_ENABLE; 477 break; 478 case PROC_ASLR_FORCE_DISABLE: 479 p->p_flag2 |= P2_ASLR_DISABLE; 480 p->p_flag2 &= ~P2_ASLR_ENABLE; 481 break; 482 case PROC_ASLR_NOFORCE: 483 p->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE); 484 break; 485 default: 486 return (EINVAL); 487 } 488 return (0); 489 } 490 491 static int 492 aslr_status(struct thread *td, struct proc *p, int *data) 493 { 494 struct vmspace *vm; 495 int d; 496 497 switch (p->p_flag2 & (P2_ASLR_ENABLE | P2_ASLR_DISABLE)) { 498 case 0: 499 d = PROC_ASLR_NOFORCE; 500 break; 501 case P2_ASLR_ENABLE: 502 d = PROC_ASLR_FORCE_ENABLE; 503 break; 504 case P2_ASLR_DISABLE: 505 d = PROC_ASLR_FORCE_DISABLE; 506 break; 507 } 508 if ((p->p_flag & P_WEXIT) == 0) { 509 _PHOLD(p); 510 PROC_UNLOCK(p); 511 vm = vmspace_acquire_ref(p); 512 if (vm != NULL && (vm->vm_map.flags & MAP_ASLR) != 0) { 513 d |= PROC_ASLR_ACTIVE; 514 vmspace_free(vm); 515 } 516 PROC_LOCK(p); 517 _PRELE(p); 518 } 519 *data = d; 520 return (0); 521 } 522 523 #ifndef _SYS_SYSPROTO_H_ 524 struct procctl_args { 525 idtype_t idtype; 526 id_t id; 527 int com; 528 void *data; 529 }; 530 #endif 531 /* ARGSUSED */ 532 int 533 sys_procctl(struct thread *td, struct procctl_args *uap) 534 { 535 void *data; 536 union { 537 struct procctl_reaper_status rs; 538 struct procctl_reaper_pids rp; 539 struct procctl_reaper_kill rk; 540 } x; 541 int error, error1, flags, signum; 542 543 if (uap->com >= PROC_PROCCTL_MD_MIN) 544 return (cpu_procctl(td, uap->idtype, uap->id, 545 uap->com, uap->data)); 546 547 switch (uap->com) { 548 case PROC_ASLR_CTL: 549 case PROC_PROTMAX_CTL: 550 case PROC_SPROTECT: 551 case PROC_TRACE_CTL: 552 case PROC_TRAPCAP_CTL: 553 error = copyin(uap->data, &flags, sizeof(flags)); 554 if (error != 0) 555 return (error); 556 data = &flags; 557 break; 558 case PROC_REAP_ACQUIRE: 559 case PROC_REAP_RELEASE: 560 if (uap->data != NULL) 561 return (EINVAL); 562 data = NULL; 563 break; 564 case PROC_REAP_STATUS: 565 data = &x.rs; 566 break; 567 case PROC_REAP_GETPIDS: 568 error = copyin(uap->data, &x.rp, sizeof(x.rp)); 569 if (error != 0) 570 return (error); 571 data = &x.rp; 572 break; 573 case PROC_REAP_KILL: 574 error = copyin(uap->data, &x.rk, sizeof(x.rk)); 575 if (error != 0) 576 return (error); 577 data = &x.rk; 578 break; 579 case PROC_ASLR_STATUS: 580 case PROC_PROTMAX_STATUS: 581 case PROC_TRACE_STATUS: 582 case PROC_TRAPCAP_STATUS: 583 data = &flags; 584 break; 585 case PROC_PDEATHSIG_CTL: 586 error = copyin(uap->data, &signum, sizeof(signum)); 587 if (error != 0) 588 return (error); 589 data = &signum; 590 break; 591 case PROC_PDEATHSIG_STATUS: 592 data = &signum; 593 break; 594 default: 595 return (EINVAL); 596 } 597 error = kern_procctl(td, uap->idtype, uap->id, uap->com, data); 598 switch (uap->com) { 599 case PROC_REAP_STATUS: 600 if (error == 0) 601 error = copyout(&x.rs, uap->data, sizeof(x.rs)); 602 break; 603 case PROC_REAP_KILL: 604 error1 = copyout(&x.rk, uap->data, sizeof(x.rk)); 605 if (error == 0) 606 error = error1; 607 break; 608 case PROC_ASLR_STATUS: 609 case PROC_PROTMAX_STATUS: 610 case PROC_TRACE_STATUS: 611 case PROC_TRAPCAP_STATUS: 612 if (error == 0) 613 error = copyout(&flags, uap->data, sizeof(flags)); 614 break; 615 case PROC_PDEATHSIG_STATUS: 616 if (error == 0) 617 error = copyout(&signum, uap->data, sizeof(signum)); 618 break; 619 } 620 return (error); 621 } 622 623 static int 624 kern_procctl_single(struct thread *td, struct proc *p, int com, void *data) 625 { 626 627 PROC_LOCK_ASSERT(p, MA_OWNED); 628 switch (com) { 629 case PROC_ASLR_CTL: 630 return (aslr_ctl(td, p, *(int *)data)); 631 case PROC_ASLR_STATUS: 632 return (aslr_status(td, p, data)); 633 case PROC_SPROTECT: 634 return (protect_set(td, p, *(int *)data)); 635 case PROC_PROTMAX_CTL: 636 return (protmax_ctl(td, p, *(int *)data)); 637 case PROC_PROTMAX_STATUS: 638 return (protmax_status(td, p, data)); 639 case PROC_REAP_ACQUIRE: 640 return (reap_acquire(td, p)); 641 case PROC_REAP_RELEASE: 642 return (reap_release(td, p)); 643 case PROC_REAP_STATUS: 644 return (reap_status(td, p, data)); 645 case PROC_REAP_GETPIDS: 646 return (reap_getpids(td, p, data)); 647 case PROC_REAP_KILL: 648 return (reap_kill(td, p, data)); 649 case PROC_TRACE_CTL: 650 return (trace_ctl(td, p, *(int *)data)); 651 case PROC_TRACE_STATUS: 652 return (trace_status(td, p, data)); 653 case PROC_TRAPCAP_CTL: 654 return (trapcap_ctl(td, p, *(int *)data)); 655 case PROC_TRAPCAP_STATUS: 656 return (trapcap_status(td, p, data)); 657 default: 658 return (EINVAL); 659 } 660 } 661 662 int 663 kern_procctl(struct thread *td, idtype_t idtype, id_t id, int com, void *data) 664 { 665 struct pgrp *pg; 666 struct proc *p; 667 int error, first_error, ok; 668 int signum; 669 bool tree_locked; 670 671 switch (com) { 672 case PROC_ASLR_CTL: 673 case PROC_ASLR_STATUS: 674 case PROC_PROTMAX_CTL: 675 case PROC_PROTMAX_STATUS: 676 case PROC_REAP_ACQUIRE: 677 case PROC_REAP_RELEASE: 678 case PROC_REAP_STATUS: 679 case PROC_REAP_GETPIDS: 680 case PROC_REAP_KILL: 681 case PROC_TRACE_STATUS: 682 case PROC_TRAPCAP_STATUS: 683 case PROC_PDEATHSIG_CTL: 684 case PROC_PDEATHSIG_STATUS: 685 if (idtype != P_PID) 686 return (EINVAL); 687 } 688 689 switch (com) { 690 case PROC_PDEATHSIG_CTL: 691 signum = *(int *)data; 692 p = td->td_proc; 693 if ((id != 0 && id != p->p_pid) || 694 (signum != 0 && !_SIG_VALID(signum))) 695 return (EINVAL); 696 PROC_LOCK(p); 697 p->p_pdeathsig = signum; 698 PROC_UNLOCK(p); 699 return (0); 700 case PROC_PDEATHSIG_STATUS: 701 p = td->td_proc; 702 if (id != 0 && id != p->p_pid) 703 return (EINVAL); 704 PROC_LOCK(p); 705 *(int *)data = p->p_pdeathsig; 706 PROC_UNLOCK(p); 707 return (0); 708 } 709 710 switch (com) { 711 case PROC_SPROTECT: 712 case PROC_REAP_STATUS: 713 case PROC_REAP_GETPIDS: 714 case PROC_REAP_KILL: 715 case PROC_TRACE_CTL: 716 case PROC_TRAPCAP_CTL: 717 sx_slock(&proctree_lock); 718 tree_locked = true; 719 break; 720 case PROC_REAP_ACQUIRE: 721 case PROC_REAP_RELEASE: 722 sx_xlock(&proctree_lock); 723 tree_locked = true; 724 break; 725 case PROC_ASLR_CTL: 726 case PROC_ASLR_STATUS: 727 case PROC_PROTMAX_CTL: 728 case PROC_PROTMAX_STATUS: 729 case PROC_TRACE_STATUS: 730 case PROC_TRAPCAP_STATUS: 731 tree_locked = false; 732 break; 733 default: 734 return (EINVAL); 735 } 736 737 switch (idtype) { 738 case P_PID: 739 p = pfind(id); 740 if (p == NULL) { 741 error = ESRCH; 742 break; 743 } 744 error = p_cansee(td, p); 745 if (error == 0) 746 error = kern_procctl_single(td, p, com, data); 747 PROC_UNLOCK(p); 748 break; 749 case P_PGID: 750 /* 751 * Attempt to apply the operation to all members of the 752 * group. Ignore processes in the group that can't be 753 * seen. Ignore errors so long as at least one process is 754 * able to complete the request successfully. 755 */ 756 pg = pgfind(id); 757 if (pg == NULL) { 758 error = ESRCH; 759 break; 760 } 761 PGRP_UNLOCK(pg); 762 ok = 0; 763 first_error = 0; 764 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 765 PROC_LOCK(p); 766 if (p->p_state == PRS_NEW || p_cansee(td, p) != 0) { 767 PROC_UNLOCK(p); 768 continue; 769 } 770 error = kern_procctl_single(td, p, com, data); 771 PROC_UNLOCK(p); 772 if (error == 0) 773 ok = 1; 774 else if (first_error == 0) 775 first_error = error; 776 } 777 if (ok) 778 error = 0; 779 else if (first_error != 0) 780 error = first_error; 781 else 782 /* 783 * Was not able to see any processes in the 784 * process group. 785 */ 786 error = ESRCH; 787 break; 788 default: 789 error = EINVAL; 790 break; 791 } 792 if (tree_locked) 793 sx_unlock(&proctree_lock); 794 return (error); 795 } 796