xref: /freebsd/sys/kern/kern_procctl.c (revision c1d255d3ffdbe447de3ab875bf4e7d7accc5bfc5)
1 /*-
2  * Copyright (c) 2014 John Baldwin
3  * Copyright (c) 2014, 2016 The FreeBSD Foundation
4  *
5  * Portions of this software were developed by Konstantin Belousov
6  * under sponsorship from the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/capsicum.h>
36 #include <sys/lock.h>
37 #include <sys/mman.h>
38 #include <sys/mutex.h>
39 #include <sys/priv.h>
40 #include <sys/proc.h>
41 #include <sys/procctl.h>
42 #include <sys/sx.h>
43 #include <sys/syscallsubr.h>
44 #include <sys/sysproto.h>
45 #include <sys/wait.h>
46 
47 #include <vm/vm.h>
48 #include <vm/pmap.h>
49 #include <vm/vm_map.h>
50 #include <vm/vm_extern.h>
51 
52 static int
53 protect_setchild(struct thread *td, struct proc *p, int flags)
54 {
55 
56 	PROC_LOCK_ASSERT(p, MA_OWNED);
57 	if (p->p_flag & P_SYSTEM || p_cansched(td, p) != 0)
58 		return (0);
59 	if (flags & PPROT_SET) {
60 		p->p_flag |= P_PROTECTED;
61 		if (flags & PPROT_INHERIT)
62 			p->p_flag2 |= P2_INHERIT_PROTECTED;
63 	} else {
64 		p->p_flag &= ~P_PROTECTED;
65 		p->p_flag2 &= ~P2_INHERIT_PROTECTED;
66 	}
67 	return (1);
68 }
69 
70 static int
71 protect_setchildren(struct thread *td, struct proc *top, int flags)
72 {
73 	struct proc *p;
74 	int ret;
75 
76 	p = top;
77 	ret = 0;
78 	sx_assert(&proctree_lock, SX_LOCKED);
79 	for (;;) {
80 		ret |= protect_setchild(td, p, flags);
81 		PROC_UNLOCK(p);
82 		/*
83 		 * If this process has children, descend to them next,
84 		 * otherwise do any siblings, and if done with this level,
85 		 * follow back up the tree (but not past top).
86 		 */
87 		if (!LIST_EMPTY(&p->p_children))
88 			p = LIST_FIRST(&p->p_children);
89 		else for (;;) {
90 			if (p == top) {
91 				PROC_LOCK(p);
92 				return (ret);
93 			}
94 			if (LIST_NEXT(p, p_sibling)) {
95 				p = LIST_NEXT(p, p_sibling);
96 				break;
97 			}
98 			p = p->p_pptr;
99 		}
100 		PROC_LOCK(p);
101 	}
102 }
103 
104 static int
105 protect_set(struct thread *td, struct proc *p, int flags)
106 {
107 	int error, ret;
108 
109 	switch (PPROT_OP(flags)) {
110 	case PPROT_SET:
111 	case PPROT_CLEAR:
112 		break;
113 	default:
114 		return (EINVAL);
115 	}
116 
117 	if ((PPROT_FLAGS(flags) & ~(PPROT_DESCEND | PPROT_INHERIT)) != 0)
118 		return (EINVAL);
119 
120 	error = priv_check(td, PRIV_VM_MADV_PROTECT);
121 	if (error)
122 		return (error);
123 
124 	if (flags & PPROT_DESCEND)
125 		ret = protect_setchildren(td, p, flags);
126 	else
127 		ret = protect_setchild(td, p, flags);
128 	if (ret == 0)
129 		return (EPERM);
130 	return (0);
131 }
132 
133 static int
134 reap_acquire(struct thread *td, struct proc *p)
135 {
136 
137 	sx_assert(&proctree_lock, SX_XLOCKED);
138 	if (p != curproc)
139 		return (EPERM);
140 	if ((p->p_treeflag & P_TREE_REAPER) != 0)
141 		return (EBUSY);
142 	p->p_treeflag |= P_TREE_REAPER;
143 	/*
144 	 * We do not reattach existing children and the whole tree
145 	 * under them to us, since p->p_reaper already seen them.
146 	 */
147 	return (0);
148 }
149 
150 static int
151 reap_release(struct thread *td, struct proc *p)
152 {
153 
154 	sx_assert(&proctree_lock, SX_XLOCKED);
155 	if (p != curproc)
156 		return (EPERM);
157 	if (p == initproc)
158 		return (EINVAL);
159 	if ((p->p_treeflag & P_TREE_REAPER) == 0)
160 		return (EINVAL);
161 	reaper_abandon_children(p, false);
162 	return (0);
163 }
164 
165 static int
166 reap_status(struct thread *td, struct proc *p,
167     struct procctl_reaper_status *rs)
168 {
169 	struct proc *reap, *p2, *first_p;
170 
171 	sx_assert(&proctree_lock, SX_LOCKED);
172 	bzero(rs, sizeof(*rs));
173 	if ((p->p_treeflag & P_TREE_REAPER) == 0) {
174 		reap = p->p_reaper;
175 	} else {
176 		reap = p;
177 		rs->rs_flags |= REAPER_STATUS_OWNED;
178 	}
179 	if (reap == initproc)
180 		rs->rs_flags |= REAPER_STATUS_REALINIT;
181 	rs->rs_reaper = reap->p_pid;
182 	rs->rs_descendants = 0;
183 	rs->rs_children = 0;
184 	if (!LIST_EMPTY(&reap->p_reaplist)) {
185 		first_p = LIST_FIRST(&reap->p_children);
186 		if (first_p == NULL)
187 			first_p = LIST_FIRST(&reap->p_reaplist);
188 		rs->rs_pid = first_p->p_pid;
189 		LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) {
190 			if (proc_realparent(p2) == reap)
191 				rs->rs_children++;
192 			rs->rs_descendants++;
193 		}
194 	} else {
195 		rs->rs_pid = -1;
196 	}
197 	return (0);
198 }
199 
200 static int
201 reap_getpids(struct thread *td, struct proc *p, struct procctl_reaper_pids *rp)
202 {
203 	struct proc *reap, *p2;
204 	struct procctl_reaper_pidinfo *pi, *pip;
205 	u_int i, n;
206 	int error;
207 
208 	sx_assert(&proctree_lock, SX_LOCKED);
209 	PROC_UNLOCK(p);
210 	reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p;
211 	n = i = 0;
212 	error = 0;
213 	LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling)
214 		n++;
215 	sx_unlock(&proctree_lock);
216 	if (rp->rp_count < n)
217 		n = rp->rp_count;
218 	pi = malloc(n * sizeof(*pi), M_TEMP, M_WAITOK);
219 	sx_slock(&proctree_lock);
220 	LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) {
221 		if (i == n)
222 			break;
223 		pip = &pi[i];
224 		bzero(pip, sizeof(*pip));
225 		pip->pi_pid = p2->p_pid;
226 		pip->pi_subtree = p2->p_reapsubtree;
227 		pip->pi_flags = REAPER_PIDINFO_VALID;
228 		if (proc_realparent(p2) == reap)
229 			pip->pi_flags |= REAPER_PIDINFO_CHILD;
230 		if ((p2->p_treeflag & P_TREE_REAPER) != 0)
231 			pip->pi_flags |= REAPER_PIDINFO_REAPER;
232 		i++;
233 	}
234 	sx_sunlock(&proctree_lock);
235 	error = copyout(pi, rp->rp_pids, i * sizeof(*pi));
236 	free(pi, M_TEMP);
237 	sx_slock(&proctree_lock);
238 	PROC_LOCK(p);
239 	return (error);
240 }
241 
242 static void
243 reap_kill_proc(struct thread *td, struct proc *p2, ksiginfo_t *ksi,
244     struct procctl_reaper_kill *rk, int *error)
245 {
246 	int error1;
247 
248 	PROC_LOCK(p2);
249 	error1 = p_cansignal(td, p2, rk->rk_sig);
250 	if (error1 == 0) {
251 		pksignal(p2, rk->rk_sig, ksi);
252 		rk->rk_killed++;
253 		*error = error1;
254 	} else if (*error == ESRCH) {
255 		rk->rk_fpid = p2->p_pid;
256 		*error = error1;
257 	}
258 	PROC_UNLOCK(p2);
259 }
260 
261 struct reap_kill_tracker {
262 	struct proc *parent;
263 	TAILQ_ENTRY(reap_kill_tracker) link;
264 };
265 
266 TAILQ_HEAD(reap_kill_tracker_head, reap_kill_tracker);
267 
268 static void
269 reap_kill_sched(struct reap_kill_tracker_head *tracker, struct proc *p2)
270 {
271 	struct reap_kill_tracker *t;
272 
273 	t = malloc(sizeof(struct reap_kill_tracker), M_TEMP, M_WAITOK);
274 	t->parent = p2;
275 	TAILQ_INSERT_TAIL(tracker, t, link);
276 }
277 
278 static int
279 reap_kill(struct thread *td, struct proc *p, struct procctl_reaper_kill *rk)
280 {
281 	struct proc *reap, *p2;
282 	ksiginfo_t ksi;
283 	struct reap_kill_tracker_head tracker;
284 	struct reap_kill_tracker *t;
285 	int error;
286 
287 	sx_assert(&proctree_lock, SX_LOCKED);
288 	if (IN_CAPABILITY_MODE(td))
289 		return (ECAPMODE);
290 	if (rk->rk_sig <= 0 || rk->rk_sig > _SIG_MAXSIG ||
291 	    (rk->rk_flags & ~(REAPER_KILL_CHILDREN |
292 	    REAPER_KILL_SUBTREE)) != 0 || (rk->rk_flags &
293 	    (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE)) ==
294 	    (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE))
295 		return (EINVAL);
296 	PROC_UNLOCK(p);
297 	reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p;
298 	ksiginfo_init(&ksi);
299 	ksi.ksi_signo = rk->rk_sig;
300 	ksi.ksi_code = SI_USER;
301 	ksi.ksi_pid = td->td_proc->p_pid;
302 	ksi.ksi_uid = td->td_ucred->cr_ruid;
303 	error = ESRCH;
304 	rk->rk_killed = 0;
305 	rk->rk_fpid = -1;
306 	if ((rk->rk_flags & REAPER_KILL_CHILDREN) != 0) {
307 		for (p2 = LIST_FIRST(&reap->p_children); p2 != NULL;
308 		    p2 = LIST_NEXT(p2, p_sibling)) {
309 			reap_kill_proc(td, p2, &ksi, rk, &error);
310 			/*
311 			 * Do not end the loop on error, signal
312 			 * everything we can.
313 			 */
314 		}
315 	} else {
316 		TAILQ_INIT(&tracker);
317 		reap_kill_sched(&tracker, reap);
318 		while ((t = TAILQ_FIRST(&tracker)) != NULL) {
319 			MPASS((t->parent->p_treeflag & P_TREE_REAPER) != 0);
320 			TAILQ_REMOVE(&tracker, t, link);
321 			for (p2 = LIST_FIRST(&t->parent->p_reaplist); p2 != NULL;
322 			    p2 = LIST_NEXT(p2, p_reapsibling)) {
323 				if (t->parent == reap &&
324 				    (rk->rk_flags & REAPER_KILL_SUBTREE) != 0 &&
325 				    p2->p_reapsubtree != rk->rk_subtree)
326 					continue;
327 				if ((p2->p_treeflag & P_TREE_REAPER) != 0)
328 					reap_kill_sched(&tracker, p2);
329 				reap_kill_proc(td, p2, &ksi, rk, &error);
330 			}
331 			free(t, M_TEMP);
332 		}
333 	}
334 	PROC_LOCK(p);
335 	return (error);
336 }
337 
338 static int
339 trace_ctl(struct thread *td, struct proc *p, int state)
340 {
341 
342 	PROC_LOCK_ASSERT(p, MA_OWNED);
343 
344 	/*
345 	 * Ktrace changes p_traceflag from or to zero under the
346 	 * process lock, so the test does not need to acquire ktrace
347 	 * mutex.
348 	 */
349 	if ((p->p_flag & P_TRACED) != 0 || p->p_traceflag != 0)
350 		return (EBUSY);
351 
352 	switch (state) {
353 	case PROC_TRACE_CTL_ENABLE:
354 		if (td->td_proc != p)
355 			return (EPERM);
356 		p->p_flag2 &= ~(P2_NOTRACE | P2_NOTRACE_EXEC);
357 		break;
358 	case PROC_TRACE_CTL_DISABLE_EXEC:
359 		p->p_flag2 |= P2_NOTRACE_EXEC | P2_NOTRACE;
360 		break;
361 	case PROC_TRACE_CTL_DISABLE:
362 		if ((p->p_flag2 & P2_NOTRACE_EXEC) != 0) {
363 			KASSERT((p->p_flag2 & P2_NOTRACE) != 0,
364 			    ("dandling P2_NOTRACE_EXEC"));
365 			if (td->td_proc != p)
366 				return (EPERM);
367 			p->p_flag2 &= ~P2_NOTRACE_EXEC;
368 		} else {
369 			p->p_flag2 |= P2_NOTRACE;
370 		}
371 		break;
372 	default:
373 		return (EINVAL);
374 	}
375 	return (0);
376 }
377 
378 static int
379 trace_status(struct thread *td, struct proc *p, int *data)
380 {
381 
382 	if ((p->p_flag2 & P2_NOTRACE) != 0) {
383 		KASSERT((p->p_flag & P_TRACED) == 0,
384 		    ("%d traced but tracing disabled", p->p_pid));
385 		*data = -1;
386 	} else if ((p->p_flag & P_TRACED) != 0) {
387 		*data = p->p_pptr->p_pid;
388 	} else {
389 		*data = 0;
390 	}
391 	return (0);
392 }
393 
394 static int
395 trapcap_ctl(struct thread *td, struct proc *p, int state)
396 {
397 
398 	PROC_LOCK_ASSERT(p, MA_OWNED);
399 
400 	switch (state) {
401 	case PROC_TRAPCAP_CTL_ENABLE:
402 		p->p_flag2 |= P2_TRAPCAP;
403 		break;
404 	case PROC_TRAPCAP_CTL_DISABLE:
405 		p->p_flag2 &= ~P2_TRAPCAP;
406 		break;
407 	default:
408 		return (EINVAL);
409 	}
410 	return (0);
411 }
412 
413 static int
414 trapcap_status(struct thread *td, struct proc *p, int *data)
415 {
416 
417 	*data = (p->p_flag2 & P2_TRAPCAP) != 0 ? PROC_TRAPCAP_CTL_ENABLE :
418 	    PROC_TRAPCAP_CTL_DISABLE;
419 	return (0);
420 }
421 
422 static int
423 no_new_privs_ctl(struct thread *td, struct proc *p, int state)
424 {
425 
426 	PROC_LOCK_ASSERT(p, MA_OWNED);
427 
428 	if (state != PROC_NO_NEW_PRIVS_ENABLE)
429 		return (EINVAL);
430 	p->p_flag2 |= P2_NO_NEW_PRIVS;
431 	return (0);
432 }
433 
434 static int
435 no_new_privs_status(struct thread *td, struct proc *p, int *data)
436 {
437 
438 	*data = (p->p_flag2 & P2_NO_NEW_PRIVS) != 0 ?
439 	    PROC_NO_NEW_PRIVS_ENABLE : PROC_NO_NEW_PRIVS_DISABLE;
440 	return (0);
441 }
442 
443 static int
444 protmax_ctl(struct thread *td, struct proc *p, int state)
445 {
446 	PROC_LOCK_ASSERT(p, MA_OWNED);
447 
448 	switch (state) {
449 	case PROC_PROTMAX_FORCE_ENABLE:
450 		p->p_flag2 &= ~P2_PROTMAX_DISABLE;
451 		p->p_flag2 |= P2_PROTMAX_ENABLE;
452 		break;
453 	case PROC_PROTMAX_FORCE_DISABLE:
454 		p->p_flag2 |= P2_PROTMAX_DISABLE;
455 		p->p_flag2 &= ~P2_PROTMAX_ENABLE;
456 		break;
457 	case PROC_PROTMAX_NOFORCE:
458 		p->p_flag2 &= ~(P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE);
459 		break;
460 	default:
461 		return (EINVAL);
462 	}
463 	return (0);
464 }
465 
466 static int
467 protmax_status(struct thread *td, struct proc *p, int *data)
468 {
469 	int d;
470 
471 	switch (p->p_flag2 & (P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE)) {
472 	case 0:
473 		d = PROC_PROTMAX_NOFORCE;
474 		break;
475 	case P2_PROTMAX_ENABLE:
476 		d = PROC_PROTMAX_FORCE_ENABLE;
477 		break;
478 	case P2_PROTMAX_DISABLE:
479 		d = PROC_PROTMAX_FORCE_DISABLE;
480 		break;
481 	}
482 	if (kern_mmap_maxprot(p, PROT_READ) == PROT_READ)
483 		d |= PROC_PROTMAX_ACTIVE;
484 	*data = d;
485 	return (0);
486 }
487 
488 static int
489 aslr_ctl(struct thread *td, struct proc *p, int state)
490 {
491 
492 	PROC_LOCK_ASSERT(p, MA_OWNED);
493 
494 	switch (state) {
495 	case PROC_ASLR_FORCE_ENABLE:
496 		p->p_flag2 &= ~P2_ASLR_DISABLE;
497 		p->p_flag2 |= P2_ASLR_ENABLE;
498 		break;
499 	case PROC_ASLR_FORCE_DISABLE:
500 		p->p_flag2 |= P2_ASLR_DISABLE;
501 		p->p_flag2 &= ~P2_ASLR_ENABLE;
502 		break;
503 	case PROC_ASLR_NOFORCE:
504 		p->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE);
505 		break;
506 	default:
507 		return (EINVAL);
508 	}
509 	return (0);
510 }
511 
512 static int
513 aslr_status(struct thread *td, struct proc *p, int *data)
514 {
515 	struct vmspace *vm;
516 	int d;
517 
518 	switch (p->p_flag2 & (P2_ASLR_ENABLE | P2_ASLR_DISABLE)) {
519 	case 0:
520 		d = PROC_ASLR_NOFORCE;
521 		break;
522 	case P2_ASLR_ENABLE:
523 		d = PROC_ASLR_FORCE_ENABLE;
524 		break;
525 	case P2_ASLR_DISABLE:
526 		d = PROC_ASLR_FORCE_DISABLE;
527 		break;
528 	}
529 	if ((p->p_flag & P_WEXIT) == 0) {
530 		_PHOLD(p);
531 		PROC_UNLOCK(p);
532 		vm = vmspace_acquire_ref(p);
533 		if (vm != NULL) {
534 			if ((vm->vm_map.flags & MAP_ASLR) != 0)
535 				d |= PROC_ASLR_ACTIVE;
536 			vmspace_free(vm);
537 		}
538 		PROC_LOCK(p);
539 		_PRELE(p);
540 	}
541 	*data = d;
542 	return (0);
543 }
544 
545 static int
546 stackgap_ctl(struct thread *td, struct proc *p, int state)
547 {
548 	PROC_LOCK_ASSERT(p, MA_OWNED);
549 
550 	if ((state & ~(PROC_STACKGAP_ENABLE | PROC_STACKGAP_DISABLE |
551 	    PROC_STACKGAP_ENABLE_EXEC | PROC_STACKGAP_DISABLE_EXEC)) != 0)
552 		return (EINVAL);
553 	switch (state & (PROC_STACKGAP_ENABLE | PROC_STACKGAP_DISABLE)) {
554 	case PROC_STACKGAP_ENABLE:
555 		if ((p->p_flag2 & P2_STKGAP_DISABLE) != 0)
556 			return (EINVAL);
557 		break;
558 	case PROC_STACKGAP_DISABLE:
559 		p->p_flag2 |= P2_STKGAP_DISABLE;
560 		break;
561 	case 0:
562 		break;
563 	default:
564 		return (EINVAL);
565 	}
566 	switch (state & (PROC_STACKGAP_ENABLE_EXEC |
567 	    PROC_STACKGAP_DISABLE_EXEC)) {
568 	case PROC_STACKGAP_ENABLE_EXEC:
569 		p->p_flag2 &= ~P2_STKGAP_DISABLE_EXEC;
570 		break;
571 	case PROC_STACKGAP_DISABLE_EXEC:
572 		p->p_flag2 |= P2_STKGAP_DISABLE_EXEC;
573 		break;
574 	case 0:
575 		break;
576 	default:
577 		return (EINVAL);
578 	}
579 	return (0);
580 }
581 
582 static int
583 stackgap_status(struct thread *td, struct proc *p, int *data)
584 {
585 	PROC_LOCK_ASSERT(p, MA_OWNED);
586 
587 	*data = (p->p_flag2 & P2_STKGAP_DISABLE) != 0 ? PROC_STACKGAP_DISABLE :
588 	    PROC_STACKGAP_ENABLE;
589 	*data |= (p->p_flag2 & P2_STKGAP_DISABLE_EXEC) != 0 ?
590 	    PROC_STACKGAP_DISABLE_EXEC : PROC_STACKGAP_ENABLE_EXEC;
591 	return (0);
592 }
593 
594 #ifndef _SYS_SYSPROTO_H_
595 struct procctl_args {
596 	idtype_t idtype;
597 	id_t	id;
598 	int	com;
599 	void	*data;
600 };
601 #endif
602 /* ARGSUSED */
603 int
604 sys_procctl(struct thread *td, struct procctl_args *uap)
605 {
606 	void *data;
607 	union {
608 		struct procctl_reaper_status rs;
609 		struct procctl_reaper_pids rp;
610 		struct procctl_reaper_kill rk;
611 	} x;
612 	int error, error1, flags, signum;
613 
614 	if (uap->com >= PROC_PROCCTL_MD_MIN)
615 		return (cpu_procctl(td, uap->idtype, uap->id,
616 		    uap->com, uap->data));
617 
618 	switch (uap->com) {
619 	case PROC_ASLR_CTL:
620 	case PROC_PROTMAX_CTL:
621 	case PROC_SPROTECT:
622 	case PROC_STACKGAP_CTL:
623 	case PROC_TRACE_CTL:
624 	case PROC_TRAPCAP_CTL:
625 	case PROC_NO_NEW_PRIVS_CTL:
626 		error = copyin(uap->data, &flags, sizeof(flags));
627 		if (error != 0)
628 			return (error);
629 		data = &flags;
630 		break;
631 	case PROC_REAP_ACQUIRE:
632 	case PROC_REAP_RELEASE:
633 		if (uap->data != NULL)
634 			return (EINVAL);
635 		data = NULL;
636 		break;
637 	case PROC_REAP_STATUS:
638 		data = &x.rs;
639 		break;
640 	case PROC_REAP_GETPIDS:
641 		error = copyin(uap->data, &x.rp, sizeof(x.rp));
642 		if (error != 0)
643 			return (error);
644 		data = &x.rp;
645 		break;
646 	case PROC_REAP_KILL:
647 		error = copyin(uap->data, &x.rk, sizeof(x.rk));
648 		if (error != 0)
649 			return (error);
650 		data = &x.rk;
651 		break;
652 	case PROC_ASLR_STATUS:
653 	case PROC_PROTMAX_STATUS:
654 	case PROC_STACKGAP_STATUS:
655 	case PROC_TRACE_STATUS:
656 	case PROC_TRAPCAP_STATUS:
657 	case PROC_NO_NEW_PRIVS_STATUS:
658 		data = &flags;
659 		break;
660 	case PROC_PDEATHSIG_CTL:
661 		error = copyin(uap->data, &signum, sizeof(signum));
662 		if (error != 0)
663 			return (error);
664 		data = &signum;
665 		break;
666 	case PROC_PDEATHSIG_STATUS:
667 		data = &signum;
668 		break;
669 	default:
670 		return (EINVAL);
671 	}
672 	error = kern_procctl(td, uap->idtype, uap->id, uap->com, data);
673 	switch (uap->com) {
674 	case PROC_REAP_STATUS:
675 		if (error == 0)
676 			error = copyout(&x.rs, uap->data, sizeof(x.rs));
677 		break;
678 	case PROC_REAP_KILL:
679 		error1 = copyout(&x.rk, uap->data, sizeof(x.rk));
680 		if (error == 0)
681 			error = error1;
682 		break;
683 	case PROC_ASLR_STATUS:
684 	case PROC_PROTMAX_STATUS:
685 	case PROC_STACKGAP_STATUS:
686 	case PROC_TRACE_STATUS:
687 	case PROC_TRAPCAP_STATUS:
688 	case PROC_NO_NEW_PRIVS_STATUS:
689 		if (error == 0)
690 			error = copyout(&flags, uap->data, sizeof(flags));
691 		break;
692 	case PROC_PDEATHSIG_STATUS:
693 		if (error == 0)
694 			error = copyout(&signum, uap->data, sizeof(signum));
695 		break;
696 	}
697 	return (error);
698 }
699 
700 static int
701 kern_procctl_single(struct thread *td, struct proc *p, int com, void *data)
702 {
703 
704 	PROC_LOCK_ASSERT(p, MA_OWNED);
705 	switch (com) {
706 	case PROC_ASLR_CTL:
707 		return (aslr_ctl(td, p, *(int *)data));
708 	case PROC_ASLR_STATUS:
709 		return (aslr_status(td, p, data));
710 	case PROC_SPROTECT:
711 		return (protect_set(td, p, *(int *)data));
712 	case PROC_PROTMAX_CTL:
713 		return (protmax_ctl(td, p, *(int *)data));
714 	case PROC_PROTMAX_STATUS:
715 		return (protmax_status(td, p, data));
716 	case PROC_STACKGAP_CTL:
717 		return (stackgap_ctl(td, p, *(int *)data));
718 	case PROC_STACKGAP_STATUS:
719 		return (stackgap_status(td, p, data));
720 	case PROC_REAP_ACQUIRE:
721 		return (reap_acquire(td, p));
722 	case PROC_REAP_RELEASE:
723 		return (reap_release(td, p));
724 	case PROC_REAP_STATUS:
725 		return (reap_status(td, p, data));
726 	case PROC_REAP_GETPIDS:
727 		return (reap_getpids(td, p, data));
728 	case PROC_REAP_KILL:
729 		return (reap_kill(td, p, data));
730 	case PROC_TRACE_CTL:
731 		return (trace_ctl(td, p, *(int *)data));
732 	case PROC_TRACE_STATUS:
733 		return (trace_status(td, p, data));
734 	case PROC_TRAPCAP_CTL:
735 		return (trapcap_ctl(td, p, *(int *)data));
736 	case PROC_TRAPCAP_STATUS:
737 		return (trapcap_status(td, p, data));
738 	case PROC_NO_NEW_PRIVS_CTL:
739 		return (no_new_privs_ctl(td, p, *(int *)data));
740 	case PROC_NO_NEW_PRIVS_STATUS:
741 		return (no_new_privs_status(td, p, data));
742 	default:
743 		return (EINVAL);
744 	}
745 }
746 
747 int
748 kern_procctl(struct thread *td, idtype_t idtype, id_t id, int com, void *data)
749 {
750 	struct pgrp *pg;
751 	struct proc *p;
752 	int error, first_error, ok;
753 	int signum;
754 	bool tree_locked;
755 
756 	switch (com) {
757 	case PROC_ASLR_CTL:
758 	case PROC_ASLR_STATUS:
759 	case PROC_PROTMAX_CTL:
760 	case PROC_PROTMAX_STATUS:
761 	case PROC_REAP_ACQUIRE:
762 	case PROC_REAP_RELEASE:
763 	case PROC_REAP_STATUS:
764 	case PROC_REAP_GETPIDS:
765 	case PROC_REAP_KILL:
766 	case PROC_STACKGAP_CTL:
767 	case PROC_STACKGAP_STATUS:
768 	case PROC_TRACE_STATUS:
769 	case PROC_TRAPCAP_STATUS:
770 	case PROC_PDEATHSIG_CTL:
771 	case PROC_PDEATHSIG_STATUS:
772 	case PROC_NO_NEW_PRIVS_CTL:
773 	case PROC_NO_NEW_PRIVS_STATUS:
774 		if (idtype != P_PID)
775 			return (EINVAL);
776 	}
777 
778 	switch (com) {
779 	case PROC_PDEATHSIG_CTL:
780 		signum = *(int *)data;
781 		p = td->td_proc;
782 		if ((id != 0 && id != p->p_pid) ||
783 		    (signum != 0 && !_SIG_VALID(signum)))
784 			return (EINVAL);
785 		PROC_LOCK(p);
786 		p->p_pdeathsig = signum;
787 		PROC_UNLOCK(p);
788 		return (0);
789 	case PROC_PDEATHSIG_STATUS:
790 		p = td->td_proc;
791 		if (id != 0 && id != p->p_pid)
792 			return (EINVAL);
793 		PROC_LOCK(p);
794 		*(int *)data = p->p_pdeathsig;
795 		PROC_UNLOCK(p);
796 		return (0);
797 	}
798 
799 	switch (com) {
800 	case PROC_SPROTECT:
801 	case PROC_REAP_STATUS:
802 	case PROC_REAP_GETPIDS:
803 	case PROC_REAP_KILL:
804 	case PROC_TRACE_CTL:
805 	case PROC_TRAPCAP_CTL:
806 	case PROC_NO_NEW_PRIVS_CTL:
807 		sx_slock(&proctree_lock);
808 		tree_locked = true;
809 		break;
810 	case PROC_REAP_ACQUIRE:
811 	case PROC_REAP_RELEASE:
812 		sx_xlock(&proctree_lock);
813 		tree_locked = true;
814 		break;
815 	case PROC_ASLR_CTL:
816 	case PROC_ASLR_STATUS:
817 	case PROC_PROTMAX_CTL:
818 	case PROC_PROTMAX_STATUS:
819 	case PROC_STACKGAP_CTL:
820 	case PROC_STACKGAP_STATUS:
821 	case PROC_TRACE_STATUS:
822 	case PROC_TRAPCAP_STATUS:
823 	case PROC_NO_NEW_PRIVS_STATUS:
824 		tree_locked = false;
825 		break;
826 	default:
827 		return (EINVAL);
828 	}
829 
830 	switch (idtype) {
831 	case P_PID:
832 		p = pfind(id);
833 		if (p == NULL) {
834 			error = ESRCH;
835 			break;
836 		}
837 		error = p_cansee(td, p);
838 		if (error == 0)
839 			error = kern_procctl_single(td, p, com, data);
840 		PROC_UNLOCK(p);
841 		break;
842 	case P_PGID:
843 		/*
844 		 * Attempt to apply the operation to all members of the
845 		 * group.  Ignore processes in the group that can't be
846 		 * seen.  Ignore errors so long as at least one process is
847 		 * able to complete the request successfully.
848 		 */
849 		pg = pgfind(id);
850 		if (pg == NULL) {
851 			error = ESRCH;
852 			break;
853 		}
854 		PGRP_UNLOCK(pg);
855 		ok = 0;
856 		first_error = 0;
857 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
858 			PROC_LOCK(p);
859 			if (p->p_state == PRS_NEW || p_cansee(td, p) != 0) {
860 				PROC_UNLOCK(p);
861 				continue;
862 			}
863 			error = kern_procctl_single(td, p, com, data);
864 			PROC_UNLOCK(p);
865 			if (error == 0)
866 				ok = 1;
867 			else if (first_error == 0)
868 				first_error = error;
869 		}
870 		if (ok)
871 			error = 0;
872 		else if (first_error != 0)
873 			error = first_error;
874 		else
875 			/*
876 			 * Was not able to see any processes in the
877 			 * process group.
878 			 */
879 			error = ESRCH;
880 		break;
881 	default:
882 		error = EINVAL;
883 		break;
884 	}
885 	if (tree_locked)
886 		sx_unlock(&proctree_lock);
887 	return (error);
888 }
889