1 /*- 2 * Copyright (c) 2014 John Baldwin 3 * Copyright (c) 2014, 2016 The FreeBSD Foundation 4 * 5 * Portions of this software were developed by Konstantin Belousov 6 * under sponsorship from the FreeBSD Foundation. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/capsicum.h> 36 #include <sys/lock.h> 37 #include <sys/mman.h> 38 #include <sys/mutex.h> 39 #include <sys/priv.h> 40 #include <sys/proc.h> 41 #include <sys/procctl.h> 42 #include <sys/sx.h> 43 #include <sys/syscallsubr.h> 44 #include <sys/sysproto.h> 45 #include <sys/wait.h> 46 47 #include <vm/vm.h> 48 #include <vm/pmap.h> 49 #include <vm/vm_map.h> 50 #include <vm/vm_extern.h> 51 52 static int 53 protect_setchild(struct thread *td, struct proc *p, int flags) 54 { 55 56 PROC_LOCK_ASSERT(p, MA_OWNED); 57 if (p->p_flag & P_SYSTEM || p_cansched(td, p) != 0) 58 return (0); 59 if (flags & PPROT_SET) { 60 p->p_flag |= P_PROTECTED; 61 if (flags & PPROT_INHERIT) 62 p->p_flag2 |= P2_INHERIT_PROTECTED; 63 } else { 64 p->p_flag &= ~P_PROTECTED; 65 p->p_flag2 &= ~P2_INHERIT_PROTECTED; 66 } 67 return (1); 68 } 69 70 static int 71 protect_setchildren(struct thread *td, struct proc *top, int flags) 72 { 73 struct proc *p; 74 int ret; 75 76 p = top; 77 ret = 0; 78 sx_assert(&proctree_lock, SX_LOCKED); 79 for (;;) { 80 ret |= protect_setchild(td, p, flags); 81 PROC_UNLOCK(p); 82 /* 83 * If this process has children, descend to them next, 84 * otherwise do any siblings, and if done with this level, 85 * follow back up the tree (but not past top). 86 */ 87 if (!LIST_EMPTY(&p->p_children)) 88 p = LIST_FIRST(&p->p_children); 89 else for (;;) { 90 if (p == top) { 91 PROC_LOCK(p); 92 return (ret); 93 } 94 if (LIST_NEXT(p, p_sibling)) { 95 p = LIST_NEXT(p, p_sibling); 96 break; 97 } 98 p = p->p_pptr; 99 } 100 PROC_LOCK(p); 101 } 102 } 103 104 static int 105 protect_set(struct thread *td, struct proc *p, int flags) 106 { 107 int error, ret; 108 109 switch (PPROT_OP(flags)) { 110 case PPROT_SET: 111 case PPROT_CLEAR: 112 break; 113 default: 114 return (EINVAL); 115 } 116 117 if ((PPROT_FLAGS(flags) & ~(PPROT_DESCEND | PPROT_INHERIT)) != 0) 118 return (EINVAL); 119 120 error = priv_check(td, PRIV_VM_MADV_PROTECT); 121 if (error) 122 return (error); 123 124 if (flags & PPROT_DESCEND) 125 ret = protect_setchildren(td, p, flags); 126 else 127 ret = protect_setchild(td, p, flags); 128 if (ret == 0) 129 return (EPERM); 130 return (0); 131 } 132 133 static int 134 reap_acquire(struct thread *td, struct proc *p) 135 { 136 137 sx_assert(&proctree_lock, SX_XLOCKED); 138 if (p != curproc) 139 return (EPERM); 140 if ((p->p_treeflag & P_TREE_REAPER) != 0) 141 return (EBUSY); 142 p->p_treeflag |= P_TREE_REAPER; 143 /* 144 * We do not reattach existing children and the whole tree 145 * under them to us, since p->p_reaper already seen them. 146 */ 147 return (0); 148 } 149 150 static int 151 reap_release(struct thread *td, struct proc *p) 152 { 153 154 sx_assert(&proctree_lock, SX_XLOCKED); 155 if (p != curproc) 156 return (EPERM); 157 if (p == initproc) 158 return (EINVAL); 159 if ((p->p_treeflag & P_TREE_REAPER) == 0) 160 return (EINVAL); 161 reaper_abandon_children(p, false); 162 return (0); 163 } 164 165 static int 166 reap_status(struct thread *td, struct proc *p, 167 struct procctl_reaper_status *rs) 168 { 169 struct proc *reap, *p2, *first_p; 170 171 sx_assert(&proctree_lock, SX_LOCKED); 172 bzero(rs, sizeof(*rs)); 173 if ((p->p_treeflag & P_TREE_REAPER) == 0) { 174 reap = p->p_reaper; 175 } else { 176 reap = p; 177 rs->rs_flags |= REAPER_STATUS_OWNED; 178 } 179 if (reap == initproc) 180 rs->rs_flags |= REAPER_STATUS_REALINIT; 181 rs->rs_reaper = reap->p_pid; 182 rs->rs_descendants = 0; 183 rs->rs_children = 0; 184 if (!LIST_EMPTY(&reap->p_reaplist)) { 185 first_p = LIST_FIRST(&reap->p_children); 186 if (first_p == NULL) 187 first_p = LIST_FIRST(&reap->p_reaplist); 188 rs->rs_pid = first_p->p_pid; 189 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) { 190 if (proc_realparent(p2) == reap) 191 rs->rs_children++; 192 rs->rs_descendants++; 193 } 194 } else { 195 rs->rs_pid = -1; 196 } 197 return (0); 198 } 199 200 static int 201 reap_getpids(struct thread *td, struct proc *p, struct procctl_reaper_pids *rp) 202 { 203 struct proc *reap, *p2; 204 struct procctl_reaper_pidinfo *pi, *pip; 205 u_int i, n; 206 int error; 207 208 sx_assert(&proctree_lock, SX_LOCKED); 209 PROC_UNLOCK(p); 210 reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p; 211 n = i = 0; 212 error = 0; 213 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) 214 n++; 215 sx_unlock(&proctree_lock); 216 if (rp->rp_count < n) 217 n = rp->rp_count; 218 pi = malloc(n * sizeof(*pi), M_TEMP, M_WAITOK); 219 sx_slock(&proctree_lock); 220 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) { 221 if (i == n) 222 break; 223 pip = &pi[i]; 224 bzero(pip, sizeof(*pip)); 225 pip->pi_pid = p2->p_pid; 226 pip->pi_subtree = p2->p_reapsubtree; 227 pip->pi_flags = REAPER_PIDINFO_VALID; 228 if (proc_realparent(p2) == reap) 229 pip->pi_flags |= REAPER_PIDINFO_CHILD; 230 if ((p2->p_treeflag & P_TREE_REAPER) != 0) 231 pip->pi_flags |= REAPER_PIDINFO_REAPER; 232 i++; 233 } 234 sx_sunlock(&proctree_lock); 235 error = copyout(pi, rp->rp_pids, i * sizeof(*pi)); 236 free(pi, M_TEMP); 237 sx_slock(&proctree_lock); 238 PROC_LOCK(p); 239 return (error); 240 } 241 242 static void 243 reap_kill_proc(struct thread *td, struct proc *p2, ksiginfo_t *ksi, 244 struct procctl_reaper_kill *rk, int *error) 245 { 246 int error1; 247 248 PROC_LOCK(p2); 249 error1 = p_cansignal(td, p2, rk->rk_sig); 250 if (error1 == 0) { 251 pksignal(p2, rk->rk_sig, ksi); 252 rk->rk_killed++; 253 *error = error1; 254 } else if (*error == ESRCH) { 255 rk->rk_fpid = p2->p_pid; 256 *error = error1; 257 } 258 PROC_UNLOCK(p2); 259 } 260 261 struct reap_kill_tracker { 262 struct proc *parent; 263 TAILQ_ENTRY(reap_kill_tracker) link; 264 }; 265 266 TAILQ_HEAD(reap_kill_tracker_head, reap_kill_tracker); 267 268 static void 269 reap_kill_sched(struct reap_kill_tracker_head *tracker, struct proc *p2) 270 { 271 struct reap_kill_tracker *t; 272 273 t = malloc(sizeof(struct reap_kill_tracker), M_TEMP, M_WAITOK); 274 t->parent = p2; 275 TAILQ_INSERT_TAIL(tracker, t, link); 276 } 277 278 static int 279 reap_kill(struct thread *td, struct proc *p, struct procctl_reaper_kill *rk) 280 { 281 struct proc *reap, *p2; 282 ksiginfo_t ksi; 283 struct reap_kill_tracker_head tracker; 284 struct reap_kill_tracker *t; 285 int error; 286 287 sx_assert(&proctree_lock, SX_LOCKED); 288 if (IN_CAPABILITY_MODE(td)) 289 return (ECAPMODE); 290 if (rk->rk_sig <= 0 || rk->rk_sig > _SIG_MAXSIG || 291 (rk->rk_flags & ~(REAPER_KILL_CHILDREN | 292 REAPER_KILL_SUBTREE)) != 0 || (rk->rk_flags & 293 (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE)) == 294 (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE)) 295 return (EINVAL); 296 PROC_UNLOCK(p); 297 reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p; 298 ksiginfo_init(&ksi); 299 ksi.ksi_signo = rk->rk_sig; 300 ksi.ksi_code = SI_USER; 301 ksi.ksi_pid = td->td_proc->p_pid; 302 ksi.ksi_uid = td->td_ucred->cr_ruid; 303 error = ESRCH; 304 rk->rk_killed = 0; 305 rk->rk_fpid = -1; 306 if ((rk->rk_flags & REAPER_KILL_CHILDREN) != 0) { 307 for (p2 = LIST_FIRST(&reap->p_children); p2 != NULL; 308 p2 = LIST_NEXT(p2, p_sibling)) { 309 reap_kill_proc(td, p2, &ksi, rk, &error); 310 /* 311 * Do not end the loop on error, signal 312 * everything we can. 313 */ 314 } 315 } else { 316 TAILQ_INIT(&tracker); 317 reap_kill_sched(&tracker, reap); 318 while ((t = TAILQ_FIRST(&tracker)) != NULL) { 319 MPASS((t->parent->p_treeflag & P_TREE_REAPER) != 0); 320 TAILQ_REMOVE(&tracker, t, link); 321 for (p2 = LIST_FIRST(&t->parent->p_reaplist); p2 != NULL; 322 p2 = LIST_NEXT(p2, p_reapsibling)) { 323 if (t->parent == reap && 324 (rk->rk_flags & REAPER_KILL_SUBTREE) != 0 && 325 p2->p_reapsubtree != rk->rk_subtree) 326 continue; 327 if ((p2->p_treeflag & P_TREE_REAPER) != 0) 328 reap_kill_sched(&tracker, p2); 329 reap_kill_proc(td, p2, &ksi, rk, &error); 330 } 331 free(t, M_TEMP); 332 } 333 } 334 PROC_LOCK(p); 335 return (error); 336 } 337 338 static int 339 trace_ctl(struct thread *td, struct proc *p, int state) 340 { 341 342 PROC_LOCK_ASSERT(p, MA_OWNED); 343 344 /* 345 * Ktrace changes p_traceflag from or to zero under the 346 * process lock, so the test does not need to acquire ktrace 347 * mutex. 348 */ 349 if ((p->p_flag & P_TRACED) != 0 || p->p_traceflag != 0) 350 return (EBUSY); 351 352 switch (state) { 353 case PROC_TRACE_CTL_ENABLE: 354 if (td->td_proc != p) 355 return (EPERM); 356 p->p_flag2 &= ~(P2_NOTRACE | P2_NOTRACE_EXEC); 357 break; 358 case PROC_TRACE_CTL_DISABLE_EXEC: 359 p->p_flag2 |= P2_NOTRACE_EXEC | P2_NOTRACE; 360 break; 361 case PROC_TRACE_CTL_DISABLE: 362 if ((p->p_flag2 & P2_NOTRACE_EXEC) != 0) { 363 KASSERT((p->p_flag2 & P2_NOTRACE) != 0, 364 ("dandling P2_NOTRACE_EXEC")); 365 if (td->td_proc != p) 366 return (EPERM); 367 p->p_flag2 &= ~P2_NOTRACE_EXEC; 368 } else { 369 p->p_flag2 |= P2_NOTRACE; 370 } 371 break; 372 default: 373 return (EINVAL); 374 } 375 return (0); 376 } 377 378 static int 379 trace_status(struct thread *td, struct proc *p, int *data) 380 { 381 382 if ((p->p_flag2 & P2_NOTRACE) != 0) { 383 KASSERT((p->p_flag & P_TRACED) == 0, 384 ("%d traced but tracing disabled", p->p_pid)); 385 *data = -1; 386 } else if ((p->p_flag & P_TRACED) != 0) { 387 *data = p->p_pptr->p_pid; 388 } else { 389 *data = 0; 390 } 391 return (0); 392 } 393 394 static int 395 trapcap_ctl(struct thread *td, struct proc *p, int state) 396 { 397 398 PROC_LOCK_ASSERT(p, MA_OWNED); 399 400 switch (state) { 401 case PROC_TRAPCAP_CTL_ENABLE: 402 p->p_flag2 |= P2_TRAPCAP; 403 break; 404 case PROC_TRAPCAP_CTL_DISABLE: 405 p->p_flag2 &= ~P2_TRAPCAP; 406 break; 407 default: 408 return (EINVAL); 409 } 410 return (0); 411 } 412 413 static int 414 trapcap_status(struct thread *td, struct proc *p, int *data) 415 { 416 417 *data = (p->p_flag2 & P2_TRAPCAP) != 0 ? PROC_TRAPCAP_CTL_ENABLE : 418 PROC_TRAPCAP_CTL_DISABLE; 419 return (0); 420 } 421 422 static int 423 protmax_ctl(struct thread *td, struct proc *p, int state) 424 { 425 PROC_LOCK_ASSERT(p, MA_OWNED); 426 427 switch (state) { 428 case PROC_PROTMAX_FORCE_ENABLE: 429 p->p_flag2 &= ~P2_PROTMAX_DISABLE; 430 p->p_flag2 |= P2_PROTMAX_ENABLE; 431 break; 432 case PROC_PROTMAX_FORCE_DISABLE: 433 p->p_flag2 |= P2_PROTMAX_DISABLE; 434 p->p_flag2 &= ~P2_PROTMAX_ENABLE; 435 break; 436 case PROC_PROTMAX_NOFORCE: 437 p->p_flag2 &= ~(P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE); 438 break; 439 default: 440 return (EINVAL); 441 } 442 return (0); 443 } 444 445 static int 446 protmax_status(struct thread *td, struct proc *p, int *data) 447 { 448 int d; 449 450 switch (p->p_flag2 & (P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE)) { 451 case 0: 452 d = PROC_PROTMAX_NOFORCE; 453 break; 454 case P2_PROTMAX_ENABLE: 455 d = PROC_PROTMAX_FORCE_ENABLE; 456 break; 457 case P2_PROTMAX_DISABLE: 458 d = PROC_PROTMAX_FORCE_DISABLE; 459 break; 460 } 461 if (kern_mmap_maxprot(p, PROT_READ) == PROT_READ) 462 d |= PROC_PROTMAX_ACTIVE; 463 *data = d; 464 return (0); 465 } 466 467 static int 468 aslr_ctl(struct thread *td, struct proc *p, int state) 469 { 470 471 PROC_LOCK_ASSERT(p, MA_OWNED); 472 473 switch (state) { 474 case PROC_ASLR_FORCE_ENABLE: 475 p->p_flag2 &= ~P2_ASLR_DISABLE; 476 p->p_flag2 |= P2_ASLR_ENABLE; 477 break; 478 case PROC_ASLR_FORCE_DISABLE: 479 p->p_flag2 |= P2_ASLR_DISABLE; 480 p->p_flag2 &= ~P2_ASLR_ENABLE; 481 break; 482 case PROC_ASLR_NOFORCE: 483 p->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE); 484 break; 485 default: 486 return (EINVAL); 487 } 488 return (0); 489 } 490 491 static int 492 aslr_status(struct thread *td, struct proc *p, int *data) 493 { 494 struct vmspace *vm; 495 int d; 496 497 switch (p->p_flag2 & (P2_ASLR_ENABLE | P2_ASLR_DISABLE)) { 498 case 0: 499 d = PROC_ASLR_NOFORCE; 500 break; 501 case P2_ASLR_ENABLE: 502 d = PROC_ASLR_FORCE_ENABLE; 503 break; 504 case P2_ASLR_DISABLE: 505 d = PROC_ASLR_FORCE_DISABLE; 506 break; 507 } 508 if ((p->p_flag & P_WEXIT) == 0) { 509 _PHOLD(p); 510 PROC_UNLOCK(p); 511 vm = vmspace_acquire_ref(p); 512 if (vm != NULL && (vm->vm_map.flags & MAP_ASLR) != 0) { 513 d |= PROC_ASLR_ACTIVE; 514 vmspace_free(vm); 515 } 516 PROC_LOCK(p); 517 _PRELE(p); 518 } 519 *data = d; 520 return (0); 521 } 522 523 static int 524 stackgap_ctl(struct thread *td, struct proc *p, int state) 525 { 526 PROC_LOCK_ASSERT(p, MA_OWNED); 527 528 if ((state & ~(PROC_STACKGAP_ENABLE | PROC_STACKGAP_DISABLE | 529 PROC_STACKGAP_ENABLE_EXEC | PROC_STACKGAP_DISABLE_EXEC)) != 0) 530 return (EINVAL); 531 switch (state & (PROC_STACKGAP_ENABLE | PROC_STACKGAP_DISABLE)) { 532 case PROC_STACKGAP_ENABLE: 533 if ((p->p_flag2 & P2_STKGAP_DISABLE) != 0) 534 return (EINVAL); 535 break; 536 case PROC_STACKGAP_DISABLE: 537 p->p_flag2 |= P2_STKGAP_DISABLE; 538 break; 539 case 0: 540 break; 541 default: 542 return (EINVAL); 543 } 544 switch (state & (PROC_STACKGAP_ENABLE_EXEC | 545 PROC_STACKGAP_DISABLE_EXEC)) { 546 case PROC_STACKGAP_ENABLE_EXEC: 547 p->p_flag2 &= ~P2_STKGAP_DISABLE_EXEC; 548 break; 549 case PROC_STACKGAP_DISABLE_EXEC: 550 p->p_flag2 |= P2_STKGAP_DISABLE_EXEC; 551 break; 552 case 0: 553 break; 554 default: 555 return (EINVAL); 556 } 557 return (0); 558 } 559 560 static int 561 stackgap_status(struct thread *td, struct proc *p, int *data) 562 { 563 PROC_LOCK_ASSERT(p, MA_OWNED); 564 565 *data = (p->p_flag2 & P2_STKGAP_DISABLE) != 0 ? PROC_STACKGAP_DISABLE : 566 PROC_STACKGAP_ENABLE; 567 *data |= (p->p_flag2 & P2_STKGAP_DISABLE_EXEC) != 0 ? 568 PROC_STACKGAP_DISABLE_EXEC : PROC_STACKGAP_ENABLE_EXEC; 569 return (0); 570 } 571 572 #ifndef _SYS_SYSPROTO_H_ 573 struct procctl_args { 574 idtype_t idtype; 575 id_t id; 576 int com; 577 void *data; 578 }; 579 #endif 580 /* ARGSUSED */ 581 int 582 sys_procctl(struct thread *td, struct procctl_args *uap) 583 { 584 void *data; 585 union { 586 struct procctl_reaper_status rs; 587 struct procctl_reaper_pids rp; 588 struct procctl_reaper_kill rk; 589 } x; 590 int error, error1, flags, signum; 591 592 if (uap->com >= PROC_PROCCTL_MD_MIN) 593 return (cpu_procctl(td, uap->idtype, uap->id, 594 uap->com, uap->data)); 595 596 switch (uap->com) { 597 case PROC_ASLR_CTL: 598 case PROC_PROTMAX_CTL: 599 case PROC_SPROTECT: 600 case PROC_STACKGAP_CTL: 601 case PROC_TRACE_CTL: 602 case PROC_TRAPCAP_CTL: 603 error = copyin(uap->data, &flags, sizeof(flags)); 604 if (error != 0) 605 return (error); 606 data = &flags; 607 break; 608 case PROC_REAP_ACQUIRE: 609 case PROC_REAP_RELEASE: 610 if (uap->data != NULL) 611 return (EINVAL); 612 data = NULL; 613 break; 614 case PROC_REAP_STATUS: 615 data = &x.rs; 616 break; 617 case PROC_REAP_GETPIDS: 618 error = copyin(uap->data, &x.rp, sizeof(x.rp)); 619 if (error != 0) 620 return (error); 621 data = &x.rp; 622 break; 623 case PROC_REAP_KILL: 624 error = copyin(uap->data, &x.rk, sizeof(x.rk)); 625 if (error != 0) 626 return (error); 627 data = &x.rk; 628 break; 629 case PROC_ASLR_STATUS: 630 case PROC_PROTMAX_STATUS: 631 case PROC_STACKGAP_STATUS: 632 case PROC_TRACE_STATUS: 633 case PROC_TRAPCAP_STATUS: 634 data = &flags; 635 break; 636 case PROC_PDEATHSIG_CTL: 637 error = copyin(uap->data, &signum, sizeof(signum)); 638 if (error != 0) 639 return (error); 640 data = &signum; 641 break; 642 case PROC_PDEATHSIG_STATUS: 643 data = &signum; 644 break; 645 default: 646 return (EINVAL); 647 } 648 error = kern_procctl(td, uap->idtype, uap->id, uap->com, data); 649 switch (uap->com) { 650 case PROC_REAP_STATUS: 651 if (error == 0) 652 error = copyout(&x.rs, uap->data, sizeof(x.rs)); 653 break; 654 case PROC_REAP_KILL: 655 error1 = copyout(&x.rk, uap->data, sizeof(x.rk)); 656 if (error == 0) 657 error = error1; 658 break; 659 case PROC_ASLR_STATUS: 660 case PROC_PROTMAX_STATUS: 661 case PROC_STACKGAP_STATUS: 662 case PROC_TRACE_STATUS: 663 case PROC_TRAPCAP_STATUS: 664 if (error == 0) 665 error = copyout(&flags, uap->data, sizeof(flags)); 666 break; 667 case PROC_PDEATHSIG_STATUS: 668 if (error == 0) 669 error = copyout(&signum, uap->data, sizeof(signum)); 670 break; 671 } 672 return (error); 673 } 674 675 static int 676 kern_procctl_single(struct thread *td, struct proc *p, int com, void *data) 677 { 678 679 PROC_LOCK_ASSERT(p, MA_OWNED); 680 switch (com) { 681 case PROC_ASLR_CTL: 682 return (aslr_ctl(td, p, *(int *)data)); 683 case PROC_ASLR_STATUS: 684 return (aslr_status(td, p, data)); 685 case PROC_SPROTECT: 686 return (protect_set(td, p, *(int *)data)); 687 case PROC_PROTMAX_CTL: 688 return (protmax_ctl(td, p, *(int *)data)); 689 case PROC_PROTMAX_STATUS: 690 return (protmax_status(td, p, data)); 691 case PROC_STACKGAP_CTL: 692 return (stackgap_ctl(td, p, *(int *)data)); 693 case PROC_STACKGAP_STATUS: 694 return (stackgap_status(td, p, data)); 695 case PROC_REAP_ACQUIRE: 696 return (reap_acquire(td, p)); 697 case PROC_REAP_RELEASE: 698 return (reap_release(td, p)); 699 case PROC_REAP_STATUS: 700 return (reap_status(td, p, data)); 701 case PROC_REAP_GETPIDS: 702 return (reap_getpids(td, p, data)); 703 case PROC_REAP_KILL: 704 return (reap_kill(td, p, data)); 705 case PROC_TRACE_CTL: 706 return (trace_ctl(td, p, *(int *)data)); 707 case PROC_TRACE_STATUS: 708 return (trace_status(td, p, data)); 709 case PROC_TRAPCAP_CTL: 710 return (trapcap_ctl(td, p, *(int *)data)); 711 case PROC_TRAPCAP_STATUS: 712 return (trapcap_status(td, p, data)); 713 default: 714 return (EINVAL); 715 } 716 } 717 718 int 719 kern_procctl(struct thread *td, idtype_t idtype, id_t id, int com, void *data) 720 { 721 struct pgrp *pg; 722 struct proc *p; 723 int error, first_error, ok; 724 int signum; 725 bool tree_locked; 726 727 switch (com) { 728 case PROC_ASLR_CTL: 729 case PROC_ASLR_STATUS: 730 case PROC_PROTMAX_CTL: 731 case PROC_PROTMAX_STATUS: 732 case PROC_REAP_ACQUIRE: 733 case PROC_REAP_RELEASE: 734 case PROC_REAP_STATUS: 735 case PROC_REAP_GETPIDS: 736 case PROC_REAP_KILL: 737 case PROC_STACKGAP_CTL: 738 case PROC_STACKGAP_STATUS: 739 case PROC_TRACE_STATUS: 740 case PROC_TRAPCAP_STATUS: 741 case PROC_PDEATHSIG_CTL: 742 case PROC_PDEATHSIG_STATUS: 743 if (idtype != P_PID) 744 return (EINVAL); 745 } 746 747 switch (com) { 748 case PROC_PDEATHSIG_CTL: 749 signum = *(int *)data; 750 p = td->td_proc; 751 if ((id != 0 && id != p->p_pid) || 752 (signum != 0 && !_SIG_VALID(signum))) 753 return (EINVAL); 754 PROC_LOCK(p); 755 p->p_pdeathsig = signum; 756 PROC_UNLOCK(p); 757 return (0); 758 case PROC_PDEATHSIG_STATUS: 759 p = td->td_proc; 760 if (id != 0 && id != p->p_pid) 761 return (EINVAL); 762 PROC_LOCK(p); 763 *(int *)data = p->p_pdeathsig; 764 PROC_UNLOCK(p); 765 return (0); 766 } 767 768 switch (com) { 769 case PROC_SPROTECT: 770 case PROC_REAP_STATUS: 771 case PROC_REAP_GETPIDS: 772 case PROC_REAP_KILL: 773 case PROC_TRACE_CTL: 774 case PROC_TRAPCAP_CTL: 775 sx_slock(&proctree_lock); 776 tree_locked = true; 777 break; 778 case PROC_REAP_ACQUIRE: 779 case PROC_REAP_RELEASE: 780 sx_xlock(&proctree_lock); 781 tree_locked = true; 782 break; 783 case PROC_ASLR_CTL: 784 case PROC_ASLR_STATUS: 785 case PROC_PROTMAX_CTL: 786 case PROC_PROTMAX_STATUS: 787 case PROC_STACKGAP_CTL: 788 case PROC_STACKGAP_STATUS: 789 case PROC_TRACE_STATUS: 790 case PROC_TRAPCAP_STATUS: 791 tree_locked = false; 792 break; 793 default: 794 return (EINVAL); 795 } 796 797 switch (idtype) { 798 case P_PID: 799 p = pfind(id); 800 if (p == NULL) { 801 error = ESRCH; 802 break; 803 } 804 error = p_cansee(td, p); 805 if (error == 0) 806 error = kern_procctl_single(td, p, com, data); 807 PROC_UNLOCK(p); 808 break; 809 case P_PGID: 810 /* 811 * Attempt to apply the operation to all members of the 812 * group. Ignore processes in the group that can't be 813 * seen. Ignore errors so long as at least one process is 814 * able to complete the request successfully. 815 */ 816 pg = pgfind(id); 817 if (pg == NULL) { 818 error = ESRCH; 819 break; 820 } 821 PGRP_UNLOCK(pg); 822 ok = 0; 823 first_error = 0; 824 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 825 PROC_LOCK(p); 826 if (p->p_state == PRS_NEW || p_cansee(td, p) != 0) { 827 PROC_UNLOCK(p); 828 continue; 829 } 830 error = kern_procctl_single(td, p, com, data); 831 PROC_UNLOCK(p); 832 if (error == 0) 833 ok = 1; 834 else if (first_error == 0) 835 first_error = error; 836 } 837 if (ok) 838 error = 0; 839 else if (first_error != 0) 840 error = first_error; 841 else 842 /* 843 * Was not able to see any processes in the 844 * process group. 845 */ 846 error = ESRCH; 847 break; 848 default: 849 error = EINVAL; 850 break; 851 } 852 if (tree_locked) 853 sx_unlock(&proctree_lock); 854 return (error); 855 } 856