1 /*- 2 * Copyright (c) 2014 John Baldwin 3 * Copyright (c) 2014, 2016 The FreeBSD Foundation 4 * 5 * Portions of this software were developed by Konstantin Belousov 6 * under sponsorship from the FreeBSD Foundation. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/capsicum.h> 36 #include <sys/lock.h> 37 #include <sys/mman.h> 38 #include <sys/mutex.h> 39 #include <sys/priv.h> 40 #include <sys/proc.h> 41 #include <sys/procctl.h> 42 #include <sys/sx.h> 43 #include <sys/syscallsubr.h> 44 #include <sys/sysproto.h> 45 #include <sys/wait.h> 46 47 #include <vm/vm.h> 48 #include <vm/pmap.h> 49 #include <vm/vm_map.h> 50 #include <vm/vm_extern.h> 51 52 static int 53 protect_setchild(struct thread *td, struct proc *p, int flags) 54 { 55 56 PROC_LOCK_ASSERT(p, MA_OWNED); 57 if (p->p_flag & P_SYSTEM || p_cansched(td, p) != 0) 58 return (0); 59 if (flags & PPROT_SET) { 60 p->p_flag |= P_PROTECTED; 61 if (flags & PPROT_INHERIT) 62 p->p_flag2 |= P2_INHERIT_PROTECTED; 63 } else { 64 p->p_flag &= ~P_PROTECTED; 65 p->p_flag2 &= ~P2_INHERIT_PROTECTED; 66 } 67 return (1); 68 } 69 70 static int 71 protect_setchildren(struct thread *td, struct proc *top, int flags) 72 { 73 struct proc *p; 74 int ret; 75 76 p = top; 77 ret = 0; 78 sx_assert(&proctree_lock, SX_LOCKED); 79 for (;;) { 80 ret |= protect_setchild(td, p, flags); 81 PROC_UNLOCK(p); 82 /* 83 * If this process has children, descend to them next, 84 * otherwise do any siblings, and if done with this level, 85 * follow back up the tree (but not past top). 86 */ 87 if (!LIST_EMPTY(&p->p_children)) 88 p = LIST_FIRST(&p->p_children); 89 else for (;;) { 90 if (p == top) { 91 PROC_LOCK(p); 92 return (ret); 93 } 94 if (LIST_NEXT(p, p_sibling)) { 95 p = LIST_NEXT(p, p_sibling); 96 break; 97 } 98 p = p->p_pptr; 99 } 100 PROC_LOCK(p); 101 } 102 } 103 104 static int 105 protect_set(struct thread *td, struct proc *p, int flags) 106 { 107 int error, ret; 108 109 switch (PPROT_OP(flags)) { 110 case PPROT_SET: 111 case PPROT_CLEAR: 112 break; 113 default: 114 return (EINVAL); 115 } 116 117 if ((PPROT_FLAGS(flags) & ~(PPROT_DESCEND | PPROT_INHERIT)) != 0) 118 return (EINVAL); 119 120 error = priv_check(td, PRIV_VM_MADV_PROTECT); 121 if (error) 122 return (error); 123 124 if (flags & PPROT_DESCEND) 125 ret = protect_setchildren(td, p, flags); 126 else 127 ret = protect_setchild(td, p, flags); 128 if (ret == 0) 129 return (EPERM); 130 return (0); 131 } 132 133 static int 134 reap_acquire(struct thread *td, struct proc *p) 135 { 136 137 sx_assert(&proctree_lock, SX_XLOCKED); 138 if (p != curproc) 139 return (EPERM); 140 if ((p->p_treeflag & P_TREE_REAPER) != 0) 141 return (EBUSY); 142 p->p_treeflag |= P_TREE_REAPER; 143 /* 144 * We do not reattach existing children and the whole tree 145 * under them to us, since p->p_reaper already seen them. 146 */ 147 return (0); 148 } 149 150 static int 151 reap_release(struct thread *td, struct proc *p) 152 { 153 154 sx_assert(&proctree_lock, SX_XLOCKED); 155 if (p != curproc) 156 return (EPERM); 157 if (p == initproc) 158 return (EINVAL); 159 if ((p->p_treeflag & P_TREE_REAPER) == 0) 160 return (EINVAL); 161 reaper_abandon_children(p, false); 162 return (0); 163 } 164 165 static int 166 reap_status(struct thread *td, struct proc *p, 167 struct procctl_reaper_status *rs) 168 { 169 struct proc *reap, *p2, *first_p; 170 171 sx_assert(&proctree_lock, SX_LOCKED); 172 bzero(rs, sizeof(*rs)); 173 if ((p->p_treeflag & P_TREE_REAPER) == 0) { 174 reap = p->p_reaper; 175 } else { 176 reap = p; 177 rs->rs_flags |= REAPER_STATUS_OWNED; 178 } 179 if (reap == initproc) 180 rs->rs_flags |= REAPER_STATUS_REALINIT; 181 rs->rs_reaper = reap->p_pid; 182 rs->rs_descendants = 0; 183 rs->rs_children = 0; 184 if (!LIST_EMPTY(&reap->p_reaplist)) { 185 first_p = LIST_FIRST(&reap->p_children); 186 if (first_p == NULL) 187 first_p = LIST_FIRST(&reap->p_reaplist); 188 rs->rs_pid = first_p->p_pid; 189 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) { 190 if (proc_realparent(p2) == reap) 191 rs->rs_children++; 192 rs->rs_descendants++; 193 } 194 } else { 195 rs->rs_pid = -1; 196 } 197 return (0); 198 } 199 200 static int 201 reap_getpids(struct thread *td, struct proc *p, struct procctl_reaper_pids *rp) 202 { 203 struct proc *reap, *p2; 204 struct procctl_reaper_pidinfo *pi, *pip; 205 u_int i, n; 206 int error; 207 208 sx_assert(&proctree_lock, SX_LOCKED); 209 PROC_UNLOCK(p); 210 reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p; 211 n = i = 0; 212 error = 0; 213 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) 214 n++; 215 sx_unlock(&proctree_lock); 216 if (rp->rp_count < n) 217 n = rp->rp_count; 218 pi = malloc(n * sizeof(*pi), M_TEMP, M_WAITOK); 219 sx_slock(&proctree_lock); 220 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) { 221 if (i == n) 222 break; 223 pip = &pi[i]; 224 bzero(pip, sizeof(*pip)); 225 pip->pi_pid = p2->p_pid; 226 pip->pi_subtree = p2->p_reapsubtree; 227 pip->pi_flags = REAPER_PIDINFO_VALID; 228 if (proc_realparent(p2) == reap) 229 pip->pi_flags |= REAPER_PIDINFO_CHILD; 230 if ((p2->p_treeflag & P_TREE_REAPER) != 0) 231 pip->pi_flags |= REAPER_PIDINFO_REAPER; 232 i++; 233 } 234 sx_sunlock(&proctree_lock); 235 error = copyout(pi, rp->rp_pids, i * sizeof(*pi)); 236 free(pi, M_TEMP); 237 sx_slock(&proctree_lock); 238 PROC_LOCK(p); 239 return (error); 240 } 241 242 static void 243 reap_kill_proc(struct thread *td, struct proc *p2, ksiginfo_t *ksi, 244 struct procctl_reaper_kill *rk, int *error) 245 { 246 int error1; 247 248 PROC_LOCK(p2); 249 error1 = p_cansignal(td, p2, rk->rk_sig); 250 if (error1 == 0) { 251 pksignal(p2, rk->rk_sig, ksi); 252 rk->rk_killed++; 253 *error = error1; 254 } else if (*error == ESRCH) { 255 rk->rk_fpid = p2->p_pid; 256 *error = error1; 257 } 258 PROC_UNLOCK(p2); 259 } 260 261 struct reap_kill_tracker { 262 struct proc *parent; 263 TAILQ_ENTRY(reap_kill_tracker) link; 264 }; 265 266 TAILQ_HEAD(reap_kill_tracker_head, reap_kill_tracker); 267 268 static void 269 reap_kill_sched(struct reap_kill_tracker_head *tracker, struct proc *p2) 270 { 271 struct reap_kill_tracker *t; 272 273 t = malloc(sizeof(struct reap_kill_tracker), M_TEMP, M_WAITOK); 274 t->parent = p2; 275 TAILQ_INSERT_TAIL(tracker, t, link); 276 } 277 278 static int 279 reap_kill(struct thread *td, struct proc *p, struct procctl_reaper_kill *rk) 280 { 281 struct proc *reap, *p2; 282 ksiginfo_t ksi; 283 struct reap_kill_tracker_head tracker; 284 struct reap_kill_tracker *t; 285 int error; 286 287 sx_assert(&proctree_lock, SX_LOCKED); 288 if (IN_CAPABILITY_MODE(td)) 289 return (ECAPMODE); 290 if (rk->rk_sig <= 0 || rk->rk_sig > _SIG_MAXSIG || 291 (rk->rk_flags & ~(REAPER_KILL_CHILDREN | 292 REAPER_KILL_SUBTREE)) != 0 || (rk->rk_flags & 293 (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE)) == 294 (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE)) 295 return (EINVAL); 296 PROC_UNLOCK(p); 297 reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p; 298 ksiginfo_init(&ksi); 299 ksi.ksi_signo = rk->rk_sig; 300 ksi.ksi_code = SI_USER; 301 ksi.ksi_pid = td->td_proc->p_pid; 302 ksi.ksi_uid = td->td_ucred->cr_ruid; 303 error = ESRCH; 304 rk->rk_killed = 0; 305 rk->rk_fpid = -1; 306 if ((rk->rk_flags & REAPER_KILL_CHILDREN) != 0) { 307 for (p2 = LIST_FIRST(&reap->p_children); p2 != NULL; 308 p2 = LIST_NEXT(p2, p_sibling)) { 309 reap_kill_proc(td, p2, &ksi, rk, &error); 310 /* 311 * Do not end the loop on error, signal 312 * everything we can. 313 */ 314 } 315 } else { 316 TAILQ_INIT(&tracker); 317 reap_kill_sched(&tracker, reap); 318 while ((t = TAILQ_FIRST(&tracker)) != NULL) { 319 MPASS((t->parent->p_treeflag & P_TREE_REAPER) != 0); 320 TAILQ_REMOVE(&tracker, t, link); 321 for (p2 = LIST_FIRST(&t->parent->p_reaplist); p2 != NULL; 322 p2 = LIST_NEXT(p2, p_reapsibling)) { 323 if (t->parent == reap && 324 (rk->rk_flags & REAPER_KILL_SUBTREE) != 0 && 325 p2->p_reapsubtree != rk->rk_subtree) 326 continue; 327 if ((p2->p_treeflag & P_TREE_REAPER) != 0) 328 reap_kill_sched(&tracker, p2); 329 reap_kill_proc(td, p2, &ksi, rk, &error); 330 } 331 free(t, M_TEMP); 332 } 333 } 334 PROC_LOCK(p); 335 return (error); 336 } 337 338 static int 339 trace_ctl(struct thread *td, struct proc *p, int state) 340 { 341 342 PROC_LOCK_ASSERT(p, MA_OWNED); 343 344 /* 345 * Ktrace changes p_traceflag from or to zero under the 346 * process lock, so the test does not need to acquire ktrace 347 * mutex. 348 */ 349 if ((p->p_flag & P_TRACED) != 0 || p->p_traceflag != 0) 350 return (EBUSY); 351 352 switch (state) { 353 case PROC_TRACE_CTL_ENABLE: 354 if (td->td_proc != p) 355 return (EPERM); 356 p->p_flag2 &= ~(P2_NOTRACE | P2_NOTRACE_EXEC); 357 break; 358 case PROC_TRACE_CTL_DISABLE_EXEC: 359 p->p_flag2 |= P2_NOTRACE_EXEC | P2_NOTRACE; 360 break; 361 case PROC_TRACE_CTL_DISABLE: 362 if ((p->p_flag2 & P2_NOTRACE_EXEC) != 0) { 363 KASSERT((p->p_flag2 & P2_NOTRACE) != 0, 364 ("dandling P2_NOTRACE_EXEC")); 365 if (td->td_proc != p) 366 return (EPERM); 367 p->p_flag2 &= ~P2_NOTRACE_EXEC; 368 } else { 369 p->p_flag2 |= P2_NOTRACE; 370 } 371 break; 372 default: 373 return (EINVAL); 374 } 375 return (0); 376 } 377 378 static int 379 trace_status(struct thread *td, struct proc *p, int *data) 380 { 381 382 if ((p->p_flag2 & P2_NOTRACE) != 0) { 383 KASSERT((p->p_flag & P_TRACED) == 0, 384 ("%d traced but tracing disabled", p->p_pid)); 385 *data = -1; 386 } else if ((p->p_flag & P_TRACED) != 0) { 387 *data = p->p_pptr->p_pid; 388 } else { 389 *data = 0; 390 } 391 return (0); 392 } 393 394 static int 395 trapcap_ctl(struct thread *td, struct proc *p, int state) 396 { 397 398 PROC_LOCK_ASSERT(p, MA_OWNED); 399 400 switch (state) { 401 case PROC_TRAPCAP_CTL_ENABLE: 402 p->p_flag2 |= P2_TRAPCAP; 403 break; 404 case PROC_TRAPCAP_CTL_DISABLE: 405 p->p_flag2 &= ~P2_TRAPCAP; 406 break; 407 default: 408 return (EINVAL); 409 } 410 return (0); 411 } 412 413 static int 414 trapcap_status(struct thread *td, struct proc *p, int *data) 415 { 416 417 *data = (p->p_flag2 & P2_TRAPCAP) != 0 ? PROC_TRAPCAP_CTL_ENABLE : 418 PROC_TRAPCAP_CTL_DISABLE; 419 return (0); 420 } 421 422 static int 423 no_new_privs_ctl(struct thread *td, struct proc *p, int state) 424 { 425 426 PROC_LOCK_ASSERT(p, MA_OWNED); 427 428 if (state != PROC_NO_NEW_PRIVS_ENABLE) 429 return (EINVAL); 430 p->p_flag2 |= P2_NO_NEW_PRIVS; 431 return (0); 432 } 433 434 static int 435 no_new_privs_status(struct thread *td, struct proc *p, int *data) 436 { 437 438 *data = (p->p_flag2 & P2_NO_NEW_PRIVS) != 0 ? 439 PROC_NO_NEW_PRIVS_ENABLE : PROC_NO_NEW_PRIVS_DISABLE; 440 return (0); 441 } 442 443 static int 444 protmax_ctl(struct thread *td, struct proc *p, int state) 445 { 446 PROC_LOCK_ASSERT(p, MA_OWNED); 447 448 switch (state) { 449 case PROC_PROTMAX_FORCE_ENABLE: 450 p->p_flag2 &= ~P2_PROTMAX_DISABLE; 451 p->p_flag2 |= P2_PROTMAX_ENABLE; 452 break; 453 case PROC_PROTMAX_FORCE_DISABLE: 454 p->p_flag2 |= P2_PROTMAX_DISABLE; 455 p->p_flag2 &= ~P2_PROTMAX_ENABLE; 456 break; 457 case PROC_PROTMAX_NOFORCE: 458 p->p_flag2 &= ~(P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE); 459 break; 460 default: 461 return (EINVAL); 462 } 463 return (0); 464 } 465 466 static int 467 protmax_status(struct thread *td, struct proc *p, int *data) 468 { 469 int d; 470 471 switch (p->p_flag2 & (P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE)) { 472 case 0: 473 d = PROC_PROTMAX_NOFORCE; 474 break; 475 case P2_PROTMAX_ENABLE: 476 d = PROC_PROTMAX_FORCE_ENABLE; 477 break; 478 case P2_PROTMAX_DISABLE: 479 d = PROC_PROTMAX_FORCE_DISABLE; 480 break; 481 } 482 if (kern_mmap_maxprot(p, PROT_READ) == PROT_READ) 483 d |= PROC_PROTMAX_ACTIVE; 484 *data = d; 485 return (0); 486 } 487 488 static int 489 aslr_ctl(struct thread *td, struct proc *p, int state) 490 { 491 492 PROC_LOCK_ASSERT(p, MA_OWNED); 493 494 switch (state) { 495 case PROC_ASLR_FORCE_ENABLE: 496 p->p_flag2 &= ~P2_ASLR_DISABLE; 497 p->p_flag2 |= P2_ASLR_ENABLE; 498 break; 499 case PROC_ASLR_FORCE_DISABLE: 500 p->p_flag2 |= P2_ASLR_DISABLE; 501 p->p_flag2 &= ~P2_ASLR_ENABLE; 502 break; 503 case PROC_ASLR_NOFORCE: 504 p->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE); 505 break; 506 default: 507 return (EINVAL); 508 } 509 return (0); 510 } 511 512 static int 513 aslr_status(struct thread *td, struct proc *p, int *data) 514 { 515 struct vmspace *vm; 516 int d; 517 518 switch (p->p_flag2 & (P2_ASLR_ENABLE | P2_ASLR_DISABLE)) { 519 case 0: 520 d = PROC_ASLR_NOFORCE; 521 break; 522 case P2_ASLR_ENABLE: 523 d = PROC_ASLR_FORCE_ENABLE; 524 break; 525 case P2_ASLR_DISABLE: 526 d = PROC_ASLR_FORCE_DISABLE; 527 break; 528 } 529 if ((p->p_flag & P_WEXIT) == 0) { 530 _PHOLD(p); 531 PROC_UNLOCK(p); 532 vm = vmspace_acquire_ref(p); 533 if (vm != NULL) { 534 if ((vm->vm_map.flags & MAP_ASLR) != 0) 535 d |= PROC_ASLR_ACTIVE; 536 vmspace_free(vm); 537 } 538 PROC_LOCK(p); 539 _PRELE(p); 540 } 541 *data = d; 542 return (0); 543 } 544 545 static int 546 stackgap_ctl(struct thread *td, struct proc *p, int state) 547 { 548 PROC_LOCK_ASSERT(p, MA_OWNED); 549 550 if ((state & ~(PROC_STACKGAP_ENABLE | PROC_STACKGAP_DISABLE | 551 PROC_STACKGAP_ENABLE_EXEC | PROC_STACKGAP_DISABLE_EXEC)) != 0) 552 return (EINVAL); 553 switch (state & (PROC_STACKGAP_ENABLE | PROC_STACKGAP_DISABLE)) { 554 case PROC_STACKGAP_ENABLE: 555 if ((p->p_flag2 & P2_STKGAP_DISABLE) != 0) 556 return (EINVAL); 557 break; 558 case PROC_STACKGAP_DISABLE: 559 p->p_flag2 |= P2_STKGAP_DISABLE; 560 break; 561 case 0: 562 break; 563 default: 564 return (EINVAL); 565 } 566 switch (state & (PROC_STACKGAP_ENABLE_EXEC | 567 PROC_STACKGAP_DISABLE_EXEC)) { 568 case PROC_STACKGAP_ENABLE_EXEC: 569 p->p_flag2 &= ~P2_STKGAP_DISABLE_EXEC; 570 break; 571 case PROC_STACKGAP_DISABLE_EXEC: 572 p->p_flag2 |= P2_STKGAP_DISABLE_EXEC; 573 break; 574 case 0: 575 break; 576 default: 577 return (EINVAL); 578 } 579 return (0); 580 } 581 582 static int 583 stackgap_status(struct thread *td, struct proc *p, int *data) 584 { 585 PROC_LOCK_ASSERT(p, MA_OWNED); 586 587 *data = (p->p_flag2 & P2_STKGAP_DISABLE) != 0 ? PROC_STACKGAP_DISABLE : 588 PROC_STACKGAP_ENABLE; 589 *data |= (p->p_flag2 & P2_STKGAP_DISABLE_EXEC) != 0 ? 590 PROC_STACKGAP_DISABLE_EXEC : PROC_STACKGAP_ENABLE_EXEC; 591 return (0); 592 } 593 594 static int 595 wxmap_ctl(struct thread *td, struct proc *p, int state) 596 { 597 struct vmspace *vm; 598 vm_map_t map; 599 600 PROC_LOCK_ASSERT(p, MA_OWNED); 601 if ((p->p_flag & P_WEXIT) != 0) 602 return (ESRCH); 603 604 switch (state) { 605 case PROC_WX_MAPPINGS_PERMIT: 606 p->p_flag2 |= P2_WXORX_DISABLE; 607 _PHOLD(p); 608 PROC_UNLOCK(p); 609 vm = vmspace_acquire_ref(p); 610 if (vm != NULL) { 611 map = &vm->vm_map; 612 vm_map_lock(map); 613 map->flags &= ~MAP_WXORX; 614 vm_map_unlock(map); 615 vmspace_free(vm); 616 } 617 PROC_LOCK(p); 618 _PRELE(p); 619 break; 620 case PROC_WX_MAPPINGS_DISALLOW_EXEC: 621 p->p_flag2 |= P2_WXORX_ENABLE_EXEC; 622 break; 623 default: 624 return (EINVAL); 625 } 626 627 return (0); 628 } 629 630 static int 631 wxmap_status(struct thread *td, struct proc *p, int *data) 632 { 633 struct vmspace *vm; 634 int d; 635 636 PROC_LOCK_ASSERT(p, MA_OWNED); 637 if ((p->p_flag & P_WEXIT) != 0) 638 return (ESRCH); 639 640 d = 0; 641 if ((p->p_flag2 & P2_WXORX_DISABLE) != 0) 642 d |= PROC_WX_MAPPINGS_PERMIT; 643 if ((p->p_flag2 & P2_WXORX_ENABLE_EXEC) != 0) 644 d |= PROC_WX_MAPPINGS_DISALLOW_EXEC; 645 _PHOLD(p); 646 PROC_UNLOCK(p); 647 vm = vmspace_acquire_ref(p); 648 if (vm != NULL) { 649 if ((vm->vm_map.flags & MAP_WXORX) != 0) 650 d |= PROC_WXORX_ENFORCE; 651 vmspace_free(vm); 652 } 653 PROC_LOCK(p); 654 _PRELE(p); 655 *data = d; 656 return (0); 657 } 658 659 #ifndef _SYS_SYSPROTO_H_ 660 struct procctl_args { 661 idtype_t idtype; 662 id_t id; 663 int com; 664 void *data; 665 }; 666 #endif 667 /* ARGSUSED */ 668 int 669 sys_procctl(struct thread *td, struct procctl_args *uap) 670 { 671 void *data; 672 union { 673 struct procctl_reaper_status rs; 674 struct procctl_reaper_pids rp; 675 struct procctl_reaper_kill rk; 676 } x; 677 int error, error1, flags, signum; 678 679 if (uap->com >= PROC_PROCCTL_MD_MIN) 680 return (cpu_procctl(td, uap->idtype, uap->id, 681 uap->com, uap->data)); 682 683 switch (uap->com) { 684 case PROC_ASLR_CTL: 685 case PROC_PROTMAX_CTL: 686 case PROC_SPROTECT: 687 case PROC_STACKGAP_CTL: 688 case PROC_TRACE_CTL: 689 case PROC_TRAPCAP_CTL: 690 case PROC_NO_NEW_PRIVS_CTL: 691 case PROC_WXMAP_CTL: 692 error = copyin(uap->data, &flags, sizeof(flags)); 693 if (error != 0) 694 return (error); 695 data = &flags; 696 break; 697 case PROC_REAP_ACQUIRE: 698 case PROC_REAP_RELEASE: 699 if (uap->data != NULL) 700 return (EINVAL); 701 data = NULL; 702 break; 703 case PROC_REAP_STATUS: 704 data = &x.rs; 705 break; 706 case PROC_REAP_GETPIDS: 707 error = copyin(uap->data, &x.rp, sizeof(x.rp)); 708 if (error != 0) 709 return (error); 710 data = &x.rp; 711 break; 712 case PROC_REAP_KILL: 713 error = copyin(uap->data, &x.rk, sizeof(x.rk)); 714 if (error != 0) 715 return (error); 716 data = &x.rk; 717 break; 718 case PROC_ASLR_STATUS: 719 case PROC_PROTMAX_STATUS: 720 case PROC_STACKGAP_STATUS: 721 case PROC_TRACE_STATUS: 722 case PROC_TRAPCAP_STATUS: 723 case PROC_NO_NEW_PRIVS_STATUS: 724 case PROC_WXMAP_STATUS: 725 data = &flags; 726 break; 727 case PROC_PDEATHSIG_CTL: 728 error = copyin(uap->data, &signum, sizeof(signum)); 729 if (error != 0) 730 return (error); 731 data = &signum; 732 break; 733 case PROC_PDEATHSIG_STATUS: 734 data = &signum; 735 break; 736 default: 737 return (EINVAL); 738 } 739 error = kern_procctl(td, uap->idtype, uap->id, uap->com, data); 740 switch (uap->com) { 741 case PROC_REAP_STATUS: 742 if (error == 0) 743 error = copyout(&x.rs, uap->data, sizeof(x.rs)); 744 break; 745 case PROC_REAP_KILL: 746 error1 = copyout(&x.rk, uap->data, sizeof(x.rk)); 747 if (error == 0) 748 error = error1; 749 break; 750 case PROC_ASLR_STATUS: 751 case PROC_PROTMAX_STATUS: 752 case PROC_STACKGAP_STATUS: 753 case PROC_TRACE_STATUS: 754 case PROC_TRAPCAP_STATUS: 755 case PROC_NO_NEW_PRIVS_STATUS: 756 case PROC_WXMAP_STATUS: 757 if (error == 0) 758 error = copyout(&flags, uap->data, sizeof(flags)); 759 break; 760 case PROC_PDEATHSIG_STATUS: 761 if (error == 0) 762 error = copyout(&signum, uap->data, sizeof(signum)); 763 break; 764 } 765 return (error); 766 } 767 768 static int 769 kern_procctl_single(struct thread *td, struct proc *p, int com, void *data) 770 { 771 772 PROC_LOCK_ASSERT(p, MA_OWNED); 773 switch (com) { 774 case PROC_ASLR_CTL: 775 return (aslr_ctl(td, p, *(int *)data)); 776 case PROC_ASLR_STATUS: 777 return (aslr_status(td, p, data)); 778 case PROC_SPROTECT: 779 return (protect_set(td, p, *(int *)data)); 780 case PROC_PROTMAX_CTL: 781 return (protmax_ctl(td, p, *(int *)data)); 782 case PROC_PROTMAX_STATUS: 783 return (protmax_status(td, p, data)); 784 case PROC_STACKGAP_CTL: 785 return (stackgap_ctl(td, p, *(int *)data)); 786 case PROC_STACKGAP_STATUS: 787 return (stackgap_status(td, p, data)); 788 case PROC_REAP_ACQUIRE: 789 return (reap_acquire(td, p)); 790 case PROC_REAP_RELEASE: 791 return (reap_release(td, p)); 792 case PROC_REAP_STATUS: 793 return (reap_status(td, p, data)); 794 case PROC_REAP_GETPIDS: 795 return (reap_getpids(td, p, data)); 796 case PROC_REAP_KILL: 797 return (reap_kill(td, p, data)); 798 case PROC_TRACE_CTL: 799 return (trace_ctl(td, p, *(int *)data)); 800 case PROC_TRACE_STATUS: 801 return (trace_status(td, p, data)); 802 case PROC_TRAPCAP_CTL: 803 return (trapcap_ctl(td, p, *(int *)data)); 804 case PROC_TRAPCAP_STATUS: 805 return (trapcap_status(td, p, data)); 806 case PROC_NO_NEW_PRIVS_CTL: 807 return (no_new_privs_ctl(td, p, *(int *)data)); 808 case PROC_NO_NEW_PRIVS_STATUS: 809 return (no_new_privs_status(td, p, data)); 810 case PROC_WXMAP_CTL: 811 return (wxmap_ctl(td, p, *(int *)data)); 812 case PROC_WXMAP_STATUS: 813 return (wxmap_status(td, p, data)); 814 default: 815 return (EINVAL); 816 } 817 } 818 819 int 820 kern_procctl(struct thread *td, idtype_t idtype, id_t id, int com, void *data) 821 { 822 struct pgrp *pg; 823 struct proc *p; 824 int error, first_error, ok; 825 int signum; 826 bool tree_locked; 827 828 switch (com) { 829 case PROC_ASLR_CTL: 830 case PROC_ASLR_STATUS: 831 case PROC_PROTMAX_CTL: 832 case PROC_PROTMAX_STATUS: 833 case PROC_REAP_ACQUIRE: 834 case PROC_REAP_RELEASE: 835 case PROC_REAP_STATUS: 836 case PROC_REAP_GETPIDS: 837 case PROC_REAP_KILL: 838 case PROC_STACKGAP_CTL: 839 case PROC_STACKGAP_STATUS: 840 case PROC_TRACE_STATUS: 841 case PROC_TRAPCAP_STATUS: 842 case PROC_PDEATHSIG_CTL: 843 case PROC_PDEATHSIG_STATUS: 844 case PROC_NO_NEW_PRIVS_CTL: 845 case PROC_NO_NEW_PRIVS_STATUS: 846 case PROC_WXMAP_CTL: 847 case PROC_WXMAP_STATUS: 848 if (idtype != P_PID) 849 return (EINVAL); 850 } 851 852 switch (com) { 853 case PROC_PDEATHSIG_CTL: 854 signum = *(int *)data; 855 p = td->td_proc; 856 if ((id != 0 && id != p->p_pid) || 857 (signum != 0 && !_SIG_VALID(signum))) 858 return (EINVAL); 859 PROC_LOCK(p); 860 p->p_pdeathsig = signum; 861 PROC_UNLOCK(p); 862 return (0); 863 case PROC_PDEATHSIG_STATUS: 864 p = td->td_proc; 865 if (id != 0 && id != p->p_pid) 866 return (EINVAL); 867 PROC_LOCK(p); 868 *(int *)data = p->p_pdeathsig; 869 PROC_UNLOCK(p); 870 return (0); 871 } 872 873 switch (com) { 874 case PROC_SPROTECT: 875 case PROC_REAP_STATUS: 876 case PROC_REAP_GETPIDS: 877 case PROC_REAP_KILL: 878 case PROC_TRACE_CTL: 879 case PROC_TRAPCAP_CTL: 880 case PROC_NO_NEW_PRIVS_CTL: 881 sx_slock(&proctree_lock); 882 tree_locked = true; 883 break; 884 case PROC_REAP_ACQUIRE: 885 case PROC_REAP_RELEASE: 886 sx_xlock(&proctree_lock); 887 tree_locked = true; 888 break; 889 case PROC_ASLR_CTL: 890 case PROC_ASLR_STATUS: 891 case PROC_PROTMAX_CTL: 892 case PROC_PROTMAX_STATUS: 893 case PROC_STACKGAP_CTL: 894 case PROC_STACKGAP_STATUS: 895 case PROC_TRACE_STATUS: 896 case PROC_TRAPCAP_STATUS: 897 case PROC_NO_NEW_PRIVS_STATUS: 898 case PROC_WXMAP_CTL: 899 case PROC_WXMAP_STATUS: 900 tree_locked = false; 901 break; 902 default: 903 return (EINVAL); 904 } 905 906 switch (idtype) { 907 case P_PID: 908 p = pfind(id); 909 if (p == NULL) { 910 error = ESRCH; 911 break; 912 } 913 error = p_cansee(td, p); 914 if (error == 0) 915 error = kern_procctl_single(td, p, com, data); 916 PROC_UNLOCK(p); 917 break; 918 case P_PGID: 919 /* 920 * Attempt to apply the operation to all members of the 921 * group. Ignore processes in the group that can't be 922 * seen. Ignore errors so long as at least one process is 923 * able to complete the request successfully. 924 */ 925 pg = pgfind(id); 926 if (pg == NULL) { 927 error = ESRCH; 928 break; 929 } 930 PGRP_UNLOCK(pg); 931 ok = 0; 932 first_error = 0; 933 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 934 PROC_LOCK(p); 935 if (p->p_state == PRS_NEW || p_cansee(td, p) != 0) { 936 PROC_UNLOCK(p); 937 continue; 938 } 939 error = kern_procctl_single(td, p, com, data); 940 PROC_UNLOCK(p); 941 if (error == 0) 942 ok = 1; 943 else if (first_error == 0) 944 first_error = error; 945 } 946 if (ok) 947 error = 0; 948 else if (first_error != 0) 949 error = first_error; 950 else 951 /* 952 * Was not able to see any processes in the 953 * process group. 954 */ 955 error = ESRCH; 956 break; 957 default: 958 error = EINVAL; 959 break; 960 } 961 if (tree_locked) 962 sx_unlock(&proctree_lock); 963 return (error); 964 } 965