xref: /freebsd/sys/kern/kern_procctl.c (revision 517904de5ccac643589c71ac0d2751797f89e4f9)
1 /*-
2  * Copyright (c) 2014 John Baldwin
3  * Copyright (c) 2014, 2016 The FreeBSD Foundation
4  *
5  * Portions of this software were developed by Konstantin Belousov
6  * under sponsorship from the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/capsicum.h>
36 #include <sys/lock.h>
37 #include <sys/mman.h>
38 #include <sys/mutex.h>
39 #include <sys/priv.h>
40 #include <sys/proc.h>
41 #include <sys/procctl.h>
42 #include <sys/sx.h>
43 #include <sys/syscallsubr.h>
44 #include <sys/sysproto.h>
45 #include <sys/wait.h>
46 
47 #include <vm/vm.h>
48 #include <vm/pmap.h>
49 #include <vm/vm_map.h>
50 #include <vm/vm_extern.h>
51 
52 static int
53 protect_setchild(struct thread *td, struct proc *p, int flags)
54 {
55 
56 	PROC_LOCK_ASSERT(p, MA_OWNED);
57 	if (p->p_flag & P_SYSTEM || p_cansched(td, p) != 0)
58 		return (0);
59 	if (flags & PPROT_SET) {
60 		p->p_flag |= P_PROTECTED;
61 		if (flags & PPROT_INHERIT)
62 			p->p_flag2 |= P2_INHERIT_PROTECTED;
63 	} else {
64 		p->p_flag &= ~P_PROTECTED;
65 		p->p_flag2 &= ~P2_INHERIT_PROTECTED;
66 	}
67 	return (1);
68 }
69 
70 static int
71 protect_setchildren(struct thread *td, struct proc *top, int flags)
72 {
73 	struct proc *p;
74 	int ret;
75 
76 	p = top;
77 	ret = 0;
78 	sx_assert(&proctree_lock, SX_LOCKED);
79 	for (;;) {
80 		ret |= protect_setchild(td, p, flags);
81 		PROC_UNLOCK(p);
82 		/*
83 		 * If this process has children, descend to them next,
84 		 * otherwise do any siblings, and if done with this level,
85 		 * follow back up the tree (but not past top).
86 		 */
87 		if (!LIST_EMPTY(&p->p_children))
88 			p = LIST_FIRST(&p->p_children);
89 		else for (;;) {
90 			if (p == top) {
91 				PROC_LOCK(p);
92 				return (ret);
93 			}
94 			if (LIST_NEXT(p, p_sibling)) {
95 				p = LIST_NEXT(p, p_sibling);
96 				break;
97 			}
98 			p = p->p_pptr;
99 		}
100 		PROC_LOCK(p);
101 	}
102 }
103 
104 static int
105 protect_set(struct thread *td, struct proc *p, int flags)
106 {
107 	int error, ret;
108 
109 	switch (PPROT_OP(flags)) {
110 	case PPROT_SET:
111 	case PPROT_CLEAR:
112 		break;
113 	default:
114 		return (EINVAL);
115 	}
116 
117 	if ((PPROT_FLAGS(flags) & ~(PPROT_DESCEND | PPROT_INHERIT)) != 0)
118 		return (EINVAL);
119 
120 	error = priv_check(td, PRIV_VM_MADV_PROTECT);
121 	if (error)
122 		return (error);
123 
124 	if (flags & PPROT_DESCEND)
125 		ret = protect_setchildren(td, p, flags);
126 	else
127 		ret = protect_setchild(td, p, flags);
128 	if (ret == 0)
129 		return (EPERM);
130 	return (0);
131 }
132 
133 static int
134 reap_acquire(struct thread *td, struct proc *p)
135 {
136 
137 	sx_assert(&proctree_lock, SX_XLOCKED);
138 	if (p != curproc)
139 		return (EPERM);
140 	if ((p->p_treeflag & P_TREE_REAPER) != 0)
141 		return (EBUSY);
142 	p->p_treeflag |= P_TREE_REAPER;
143 	/*
144 	 * We do not reattach existing children and the whole tree
145 	 * under them to us, since p->p_reaper already seen them.
146 	 */
147 	return (0);
148 }
149 
150 static int
151 reap_release(struct thread *td, struct proc *p)
152 {
153 
154 	sx_assert(&proctree_lock, SX_XLOCKED);
155 	if (p != curproc)
156 		return (EPERM);
157 	if (p == initproc)
158 		return (EINVAL);
159 	if ((p->p_treeflag & P_TREE_REAPER) == 0)
160 		return (EINVAL);
161 	reaper_abandon_children(p, false);
162 	return (0);
163 }
164 
165 static int
166 reap_status(struct thread *td, struct proc *p,
167     struct procctl_reaper_status *rs)
168 {
169 	struct proc *reap, *p2, *first_p;
170 
171 	sx_assert(&proctree_lock, SX_LOCKED);
172 	bzero(rs, sizeof(*rs));
173 	if ((p->p_treeflag & P_TREE_REAPER) == 0) {
174 		reap = p->p_reaper;
175 	} else {
176 		reap = p;
177 		rs->rs_flags |= REAPER_STATUS_OWNED;
178 	}
179 	if (reap == initproc)
180 		rs->rs_flags |= REAPER_STATUS_REALINIT;
181 	rs->rs_reaper = reap->p_pid;
182 	rs->rs_descendants = 0;
183 	rs->rs_children = 0;
184 	if (!LIST_EMPTY(&reap->p_reaplist)) {
185 		first_p = LIST_FIRST(&reap->p_children);
186 		if (first_p == NULL)
187 			first_p = LIST_FIRST(&reap->p_reaplist);
188 		rs->rs_pid = first_p->p_pid;
189 		LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) {
190 			if (proc_realparent(p2) == reap)
191 				rs->rs_children++;
192 			rs->rs_descendants++;
193 		}
194 	} else {
195 		rs->rs_pid = -1;
196 	}
197 	return (0);
198 }
199 
200 static int
201 reap_getpids(struct thread *td, struct proc *p, struct procctl_reaper_pids *rp)
202 {
203 	struct proc *reap, *p2;
204 	struct procctl_reaper_pidinfo *pi, *pip;
205 	u_int i, n;
206 	int error;
207 
208 	sx_assert(&proctree_lock, SX_LOCKED);
209 	PROC_UNLOCK(p);
210 	reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p;
211 	n = i = 0;
212 	error = 0;
213 	LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling)
214 		n++;
215 	sx_unlock(&proctree_lock);
216 	if (rp->rp_count < n)
217 		n = rp->rp_count;
218 	pi = malloc(n * sizeof(*pi), M_TEMP, M_WAITOK);
219 	sx_slock(&proctree_lock);
220 	LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) {
221 		if (i == n)
222 			break;
223 		pip = &pi[i];
224 		bzero(pip, sizeof(*pip));
225 		pip->pi_pid = p2->p_pid;
226 		pip->pi_subtree = p2->p_reapsubtree;
227 		pip->pi_flags = REAPER_PIDINFO_VALID;
228 		if (proc_realparent(p2) == reap)
229 			pip->pi_flags |= REAPER_PIDINFO_CHILD;
230 		if ((p2->p_treeflag & P_TREE_REAPER) != 0)
231 			pip->pi_flags |= REAPER_PIDINFO_REAPER;
232 		i++;
233 	}
234 	sx_sunlock(&proctree_lock);
235 	error = copyout(pi, rp->rp_pids, i * sizeof(*pi));
236 	free(pi, M_TEMP);
237 	sx_slock(&proctree_lock);
238 	PROC_LOCK(p);
239 	return (error);
240 }
241 
242 static void
243 reap_kill_proc(struct thread *td, struct proc *p2, ksiginfo_t *ksi,
244     struct procctl_reaper_kill *rk, int *error)
245 {
246 	int error1;
247 
248 	PROC_LOCK(p2);
249 	error1 = p_cansignal(td, p2, rk->rk_sig);
250 	if (error1 == 0) {
251 		pksignal(p2, rk->rk_sig, ksi);
252 		rk->rk_killed++;
253 		*error = error1;
254 	} else if (*error == ESRCH) {
255 		rk->rk_fpid = p2->p_pid;
256 		*error = error1;
257 	}
258 	PROC_UNLOCK(p2);
259 }
260 
261 struct reap_kill_tracker {
262 	struct proc *parent;
263 	TAILQ_ENTRY(reap_kill_tracker) link;
264 };
265 
266 TAILQ_HEAD(reap_kill_tracker_head, reap_kill_tracker);
267 
268 static void
269 reap_kill_sched(struct reap_kill_tracker_head *tracker, struct proc *p2)
270 {
271 	struct reap_kill_tracker *t;
272 
273 	t = malloc(sizeof(struct reap_kill_tracker), M_TEMP, M_WAITOK);
274 	t->parent = p2;
275 	TAILQ_INSERT_TAIL(tracker, t, link);
276 }
277 
278 static int
279 reap_kill(struct thread *td, struct proc *p, struct procctl_reaper_kill *rk)
280 {
281 	struct proc *reap, *p2;
282 	ksiginfo_t ksi;
283 	struct reap_kill_tracker_head tracker;
284 	struct reap_kill_tracker *t;
285 	int error;
286 
287 	sx_assert(&proctree_lock, SX_LOCKED);
288 	if (IN_CAPABILITY_MODE(td))
289 		return (ECAPMODE);
290 	if (rk->rk_sig <= 0 || rk->rk_sig > _SIG_MAXSIG ||
291 	    (rk->rk_flags & ~(REAPER_KILL_CHILDREN |
292 	    REAPER_KILL_SUBTREE)) != 0 || (rk->rk_flags &
293 	    (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE)) ==
294 	    (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE))
295 		return (EINVAL);
296 	PROC_UNLOCK(p);
297 	reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p;
298 	ksiginfo_init(&ksi);
299 	ksi.ksi_signo = rk->rk_sig;
300 	ksi.ksi_code = SI_USER;
301 	ksi.ksi_pid = td->td_proc->p_pid;
302 	ksi.ksi_uid = td->td_ucred->cr_ruid;
303 	error = ESRCH;
304 	rk->rk_killed = 0;
305 	rk->rk_fpid = -1;
306 	if ((rk->rk_flags & REAPER_KILL_CHILDREN) != 0) {
307 		for (p2 = LIST_FIRST(&reap->p_children); p2 != NULL;
308 		    p2 = LIST_NEXT(p2, p_sibling)) {
309 			reap_kill_proc(td, p2, &ksi, rk, &error);
310 			/*
311 			 * Do not end the loop on error, signal
312 			 * everything we can.
313 			 */
314 		}
315 	} else {
316 		TAILQ_INIT(&tracker);
317 		reap_kill_sched(&tracker, reap);
318 		while ((t = TAILQ_FIRST(&tracker)) != NULL) {
319 			MPASS((t->parent->p_treeflag & P_TREE_REAPER) != 0);
320 			TAILQ_REMOVE(&tracker, t, link);
321 			for (p2 = LIST_FIRST(&t->parent->p_reaplist); p2 != NULL;
322 			    p2 = LIST_NEXT(p2, p_reapsibling)) {
323 				if (t->parent == reap &&
324 				    (rk->rk_flags & REAPER_KILL_SUBTREE) != 0 &&
325 				    p2->p_reapsubtree != rk->rk_subtree)
326 					continue;
327 				if ((p2->p_treeflag & P_TREE_REAPER) != 0)
328 					reap_kill_sched(&tracker, p2);
329 				reap_kill_proc(td, p2, &ksi, rk, &error);
330 			}
331 			free(t, M_TEMP);
332 		}
333 	}
334 	PROC_LOCK(p);
335 	return (error);
336 }
337 
338 static int
339 trace_ctl(struct thread *td, struct proc *p, int state)
340 {
341 
342 	PROC_LOCK_ASSERT(p, MA_OWNED);
343 
344 	/*
345 	 * Ktrace changes p_traceflag from or to zero under the
346 	 * process lock, so the test does not need to acquire ktrace
347 	 * mutex.
348 	 */
349 	if ((p->p_flag & P_TRACED) != 0 || p->p_traceflag != 0)
350 		return (EBUSY);
351 
352 	switch (state) {
353 	case PROC_TRACE_CTL_ENABLE:
354 		if (td->td_proc != p)
355 			return (EPERM);
356 		p->p_flag2 &= ~(P2_NOTRACE | P2_NOTRACE_EXEC);
357 		break;
358 	case PROC_TRACE_CTL_DISABLE_EXEC:
359 		p->p_flag2 |= P2_NOTRACE_EXEC | P2_NOTRACE;
360 		break;
361 	case PROC_TRACE_CTL_DISABLE:
362 		if ((p->p_flag2 & P2_NOTRACE_EXEC) != 0) {
363 			KASSERT((p->p_flag2 & P2_NOTRACE) != 0,
364 			    ("dandling P2_NOTRACE_EXEC"));
365 			if (td->td_proc != p)
366 				return (EPERM);
367 			p->p_flag2 &= ~P2_NOTRACE_EXEC;
368 		} else {
369 			p->p_flag2 |= P2_NOTRACE;
370 		}
371 		break;
372 	default:
373 		return (EINVAL);
374 	}
375 	return (0);
376 }
377 
378 static int
379 trace_status(struct thread *td, struct proc *p, int *data)
380 {
381 
382 	if ((p->p_flag2 & P2_NOTRACE) != 0) {
383 		KASSERT((p->p_flag & P_TRACED) == 0,
384 		    ("%d traced but tracing disabled", p->p_pid));
385 		*data = -1;
386 	} else if ((p->p_flag & P_TRACED) != 0) {
387 		*data = p->p_pptr->p_pid;
388 	} else {
389 		*data = 0;
390 	}
391 	return (0);
392 }
393 
394 static int
395 trapcap_ctl(struct thread *td, struct proc *p, int state)
396 {
397 
398 	PROC_LOCK_ASSERT(p, MA_OWNED);
399 
400 	switch (state) {
401 	case PROC_TRAPCAP_CTL_ENABLE:
402 		p->p_flag2 |= P2_TRAPCAP;
403 		break;
404 	case PROC_TRAPCAP_CTL_DISABLE:
405 		p->p_flag2 &= ~P2_TRAPCAP;
406 		break;
407 	default:
408 		return (EINVAL);
409 	}
410 	return (0);
411 }
412 
413 static int
414 trapcap_status(struct thread *td, struct proc *p, int *data)
415 {
416 
417 	*data = (p->p_flag2 & P2_TRAPCAP) != 0 ? PROC_TRAPCAP_CTL_ENABLE :
418 	    PROC_TRAPCAP_CTL_DISABLE;
419 	return (0);
420 }
421 
422 static int
423 no_new_privs_ctl(struct thread *td, struct proc *p, int state)
424 {
425 
426 	PROC_LOCK_ASSERT(p, MA_OWNED);
427 
428 	if (state != PROC_NO_NEW_PRIVS_ENABLE)
429 		return (EINVAL);
430 	p->p_flag2 |= P2_NO_NEW_PRIVS;
431 	return (0);
432 }
433 
434 static int
435 no_new_privs_status(struct thread *td, struct proc *p, int *data)
436 {
437 
438 	*data = (p->p_flag2 & P2_NO_NEW_PRIVS) != 0 ?
439 	    PROC_NO_NEW_PRIVS_ENABLE : PROC_NO_NEW_PRIVS_DISABLE;
440 	return (0);
441 }
442 
443 static int
444 protmax_ctl(struct thread *td, struct proc *p, int state)
445 {
446 	PROC_LOCK_ASSERT(p, MA_OWNED);
447 
448 	switch (state) {
449 	case PROC_PROTMAX_FORCE_ENABLE:
450 		p->p_flag2 &= ~P2_PROTMAX_DISABLE;
451 		p->p_flag2 |= P2_PROTMAX_ENABLE;
452 		break;
453 	case PROC_PROTMAX_FORCE_DISABLE:
454 		p->p_flag2 |= P2_PROTMAX_DISABLE;
455 		p->p_flag2 &= ~P2_PROTMAX_ENABLE;
456 		break;
457 	case PROC_PROTMAX_NOFORCE:
458 		p->p_flag2 &= ~(P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE);
459 		break;
460 	default:
461 		return (EINVAL);
462 	}
463 	return (0);
464 }
465 
466 static int
467 protmax_status(struct thread *td, struct proc *p, int *data)
468 {
469 	int d;
470 
471 	switch (p->p_flag2 & (P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE)) {
472 	case 0:
473 		d = PROC_PROTMAX_NOFORCE;
474 		break;
475 	case P2_PROTMAX_ENABLE:
476 		d = PROC_PROTMAX_FORCE_ENABLE;
477 		break;
478 	case P2_PROTMAX_DISABLE:
479 		d = PROC_PROTMAX_FORCE_DISABLE;
480 		break;
481 	}
482 	if (kern_mmap_maxprot(p, PROT_READ) == PROT_READ)
483 		d |= PROC_PROTMAX_ACTIVE;
484 	*data = d;
485 	return (0);
486 }
487 
488 static int
489 aslr_ctl(struct thread *td, struct proc *p, int state)
490 {
491 
492 	PROC_LOCK_ASSERT(p, MA_OWNED);
493 
494 	switch (state) {
495 	case PROC_ASLR_FORCE_ENABLE:
496 		p->p_flag2 &= ~P2_ASLR_DISABLE;
497 		p->p_flag2 |= P2_ASLR_ENABLE;
498 		break;
499 	case PROC_ASLR_FORCE_DISABLE:
500 		p->p_flag2 |= P2_ASLR_DISABLE;
501 		p->p_flag2 &= ~P2_ASLR_ENABLE;
502 		break;
503 	case PROC_ASLR_NOFORCE:
504 		p->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE);
505 		break;
506 	default:
507 		return (EINVAL);
508 	}
509 	return (0);
510 }
511 
512 static int
513 aslr_status(struct thread *td, struct proc *p, int *data)
514 {
515 	struct vmspace *vm;
516 	int d;
517 
518 	switch (p->p_flag2 & (P2_ASLR_ENABLE | P2_ASLR_DISABLE)) {
519 	case 0:
520 		d = PROC_ASLR_NOFORCE;
521 		break;
522 	case P2_ASLR_ENABLE:
523 		d = PROC_ASLR_FORCE_ENABLE;
524 		break;
525 	case P2_ASLR_DISABLE:
526 		d = PROC_ASLR_FORCE_DISABLE;
527 		break;
528 	}
529 	if ((p->p_flag & P_WEXIT) == 0) {
530 		_PHOLD(p);
531 		PROC_UNLOCK(p);
532 		vm = vmspace_acquire_ref(p);
533 		if (vm != NULL && (vm->vm_map.flags & MAP_ASLR) != 0) {
534 			d |= PROC_ASLR_ACTIVE;
535 			vmspace_free(vm);
536 		}
537 		PROC_LOCK(p);
538 		_PRELE(p);
539 	}
540 	*data = d;
541 	return (0);
542 }
543 
544 static int
545 stackgap_ctl(struct thread *td, struct proc *p, int state)
546 {
547 	PROC_LOCK_ASSERT(p, MA_OWNED);
548 
549 	if ((state & ~(PROC_STACKGAP_ENABLE | PROC_STACKGAP_DISABLE |
550 	    PROC_STACKGAP_ENABLE_EXEC | PROC_STACKGAP_DISABLE_EXEC)) != 0)
551 		return (EINVAL);
552 	switch (state & (PROC_STACKGAP_ENABLE | PROC_STACKGAP_DISABLE)) {
553 	case PROC_STACKGAP_ENABLE:
554 		if ((p->p_flag2 & P2_STKGAP_DISABLE) != 0)
555 			return (EINVAL);
556 		break;
557 	case PROC_STACKGAP_DISABLE:
558 		p->p_flag2 |= P2_STKGAP_DISABLE;
559 		break;
560 	case 0:
561 		break;
562 	default:
563 		return (EINVAL);
564 	}
565 	switch (state & (PROC_STACKGAP_ENABLE_EXEC |
566 	    PROC_STACKGAP_DISABLE_EXEC)) {
567 	case PROC_STACKGAP_ENABLE_EXEC:
568 		p->p_flag2 &= ~P2_STKGAP_DISABLE_EXEC;
569 		break;
570 	case PROC_STACKGAP_DISABLE_EXEC:
571 		p->p_flag2 |= P2_STKGAP_DISABLE_EXEC;
572 		break;
573 	case 0:
574 		break;
575 	default:
576 		return (EINVAL);
577 	}
578 	return (0);
579 }
580 
581 static int
582 stackgap_status(struct thread *td, struct proc *p, int *data)
583 {
584 	PROC_LOCK_ASSERT(p, MA_OWNED);
585 
586 	*data = (p->p_flag2 & P2_STKGAP_DISABLE) != 0 ? PROC_STACKGAP_DISABLE :
587 	    PROC_STACKGAP_ENABLE;
588 	*data |= (p->p_flag2 & P2_STKGAP_DISABLE_EXEC) != 0 ?
589 	    PROC_STACKGAP_DISABLE_EXEC : PROC_STACKGAP_ENABLE_EXEC;
590 	return (0);
591 }
592 
593 #ifndef _SYS_SYSPROTO_H_
594 struct procctl_args {
595 	idtype_t idtype;
596 	id_t	id;
597 	int	com;
598 	void	*data;
599 };
600 #endif
601 /* ARGSUSED */
602 int
603 sys_procctl(struct thread *td, struct procctl_args *uap)
604 {
605 	void *data;
606 	union {
607 		struct procctl_reaper_status rs;
608 		struct procctl_reaper_pids rp;
609 		struct procctl_reaper_kill rk;
610 	} x;
611 	int error, error1, flags, signum;
612 
613 	if (uap->com >= PROC_PROCCTL_MD_MIN)
614 		return (cpu_procctl(td, uap->idtype, uap->id,
615 		    uap->com, uap->data));
616 
617 	switch (uap->com) {
618 	case PROC_ASLR_CTL:
619 	case PROC_PROTMAX_CTL:
620 	case PROC_SPROTECT:
621 	case PROC_STACKGAP_CTL:
622 	case PROC_TRACE_CTL:
623 	case PROC_TRAPCAP_CTL:
624 	case PROC_NO_NEW_PRIVS_CTL:
625 		error = copyin(uap->data, &flags, sizeof(flags));
626 		if (error != 0)
627 			return (error);
628 		data = &flags;
629 		break;
630 	case PROC_REAP_ACQUIRE:
631 	case PROC_REAP_RELEASE:
632 		if (uap->data != NULL)
633 			return (EINVAL);
634 		data = NULL;
635 		break;
636 	case PROC_REAP_STATUS:
637 		data = &x.rs;
638 		break;
639 	case PROC_REAP_GETPIDS:
640 		error = copyin(uap->data, &x.rp, sizeof(x.rp));
641 		if (error != 0)
642 			return (error);
643 		data = &x.rp;
644 		break;
645 	case PROC_REAP_KILL:
646 		error = copyin(uap->data, &x.rk, sizeof(x.rk));
647 		if (error != 0)
648 			return (error);
649 		data = &x.rk;
650 		break;
651 	case PROC_ASLR_STATUS:
652 	case PROC_PROTMAX_STATUS:
653 	case PROC_STACKGAP_STATUS:
654 	case PROC_TRACE_STATUS:
655 	case PROC_TRAPCAP_STATUS:
656 	case PROC_NO_NEW_PRIVS_STATUS:
657 		data = &flags;
658 		break;
659 	case PROC_PDEATHSIG_CTL:
660 		error = copyin(uap->data, &signum, sizeof(signum));
661 		if (error != 0)
662 			return (error);
663 		data = &signum;
664 		break;
665 	case PROC_PDEATHSIG_STATUS:
666 		data = &signum;
667 		break;
668 	default:
669 		return (EINVAL);
670 	}
671 	error = kern_procctl(td, uap->idtype, uap->id, uap->com, data);
672 	switch (uap->com) {
673 	case PROC_REAP_STATUS:
674 		if (error == 0)
675 			error = copyout(&x.rs, uap->data, sizeof(x.rs));
676 		break;
677 	case PROC_REAP_KILL:
678 		error1 = copyout(&x.rk, uap->data, sizeof(x.rk));
679 		if (error == 0)
680 			error = error1;
681 		break;
682 	case PROC_ASLR_STATUS:
683 	case PROC_PROTMAX_STATUS:
684 	case PROC_STACKGAP_STATUS:
685 	case PROC_TRACE_STATUS:
686 	case PROC_TRAPCAP_STATUS:
687 	case PROC_NO_NEW_PRIVS_STATUS:
688 		if (error == 0)
689 			error = copyout(&flags, uap->data, sizeof(flags));
690 		break;
691 	case PROC_PDEATHSIG_STATUS:
692 		if (error == 0)
693 			error = copyout(&signum, uap->data, sizeof(signum));
694 		break;
695 	}
696 	return (error);
697 }
698 
699 static int
700 kern_procctl_single(struct thread *td, struct proc *p, int com, void *data)
701 {
702 
703 	PROC_LOCK_ASSERT(p, MA_OWNED);
704 	switch (com) {
705 	case PROC_ASLR_CTL:
706 		return (aslr_ctl(td, p, *(int *)data));
707 	case PROC_ASLR_STATUS:
708 		return (aslr_status(td, p, data));
709 	case PROC_SPROTECT:
710 		return (protect_set(td, p, *(int *)data));
711 	case PROC_PROTMAX_CTL:
712 		return (protmax_ctl(td, p, *(int *)data));
713 	case PROC_PROTMAX_STATUS:
714 		return (protmax_status(td, p, data));
715 	case PROC_STACKGAP_CTL:
716 		return (stackgap_ctl(td, p, *(int *)data));
717 	case PROC_STACKGAP_STATUS:
718 		return (stackgap_status(td, p, data));
719 	case PROC_REAP_ACQUIRE:
720 		return (reap_acquire(td, p));
721 	case PROC_REAP_RELEASE:
722 		return (reap_release(td, p));
723 	case PROC_REAP_STATUS:
724 		return (reap_status(td, p, data));
725 	case PROC_REAP_GETPIDS:
726 		return (reap_getpids(td, p, data));
727 	case PROC_REAP_KILL:
728 		return (reap_kill(td, p, data));
729 	case PROC_TRACE_CTL:
730 		return (trace_ctl(td, p, *(int *)data));
731 	case PROC_TRACE_STATUS:
732 		return (trace_status(td, p, data));
733 	case PROC_TRAPCAP_CTL:
734 		return (trapcap_ctl(td, p, *(int *)data));
735 	case PROC_TRAPCAP_STATUS:
736 		return (trapcap_status(td, p, data));
737 	case PROC_NO_NEW_PRIVS_CTL:
738 		return (no_new_privs_ctl(td, p, *(int *)data));
739 	case PROC_NO_NEW_PRIVS_STATUS:
740 		return (no_new_privs_status(td, p, data));
741 	default:
742 		return (EINVAL);
743 	}
744 }
745 
746 int
747 kern_procctl(struct thread *td, idtype_t idtype, id_t id, int com, void *data)
748 {
749 	struct pgrp *pg;
750 	struct proc *p;
751 	int error, first_error, ok;
752 	int signum;
753 	bool tree_locked;
754 
755 	switch (com) {
756 	case PROC_ASLR_CTL:
757 	case PROC_ASLR_STATUS:
758 	case PROC_PROTMAX_CTL:
759 	case PROC_PROTMAX_STATUS:
760 	case PROC_REAP_ACQUIRE:
761 	case PROC_REAP_RELEASE:
762 	case PROC_REAP_STATUS:
763 	case PROC_REAP_GETPIDS:
764 	case PROC_REAP_KILL:
765 	case PROC_STACKGAP_CTL:
766 	case PROC_STACKGAP_STATUS:
767 	case PROC_TRACE_STATUS:
768 	case PROC_TRAPCAP_STATUS:
769 	case PROC_PDEATHSIG_CTL:
770 	case PROC_PDEATHSIG_STATUS:
771 	case PROC_NO_NEW_PRIVS_CTL:
772 	case PROC_NO_NEW_PRIVS_STATUS:
773 		if (idtype != P_PID)
774 			return (EINVAL);
775 	}
776 
777 	switch (com) {
778 	case PROC_PDEATHSIG_CTL:
779 		signum = *(int *)data;
780 		p = td->td_proc;
781 		if ((id != 0 && id != p->p_pid) ||
782 		    (signum != 0 && !_SIG_VALID(signum)))
783 			return (EINVAL);
784 		PROC_LOCK(p);
785 		p->p_pdeathsig = signum;
786 		PROC_UNLOCK(p);
787 		return (0);
788 	case PROC_PDEATHSIG_STATUS:
789 		p = td->td_proc;
790 		if (id != 0 && id != p->p_pid)
791 			return (EINVAL);
792 		PROC_LOCK(p);
793 		*(int *)data = p->p_pdeathsig;
794 		PROC_UNLOCK(p);
795 		return (0);
796 	}
797 
798 	switch (com) {
799 	case PROC_SPROTECT:
800 	case PROC_REAP_STATUS:
801 	case PROC_REAP_GETPIDS:
802 	case PROC_REAP_KILL:
803 	case PROC_TRACE_CTL:
804 	case PROC_TRAPCAP_CTL:
805 	case PROC_NO_NEW_PRIVS_CTL:
806 		sx_slock(&proctree_lock);
807 		tree_locked = true;
808 		break;
809 	case PROC_REAP_ACQUIRE:
810 	case PROC_REAP_RELEASE:
811 		sx_xlock(&proctree_lock);
812 		tree_locked = true;
813 		break;
814 	case PROC_ASLR_CTL:
815 	case PROC_ASLR_STATUS:
816 	case PROC_PROTMAX_CTL:
817 	case PROC_PROTMAX_STATUS:
818 	case PROC_STACKGAP_CTL:
819 	case PROC_STACKGAP_STATUS:
820 	case PROC_TRACE_STATUS:
821 	case PROC_TRAPCAP_STATUS:
822 	case PROC_NO_NEW_PRIVS_STATUS:
823 		tree_locked = false;
824 		break;
825 	default:
826 		return (EINVAL);
827 	}
828 
829 	switch (idtype) {
830 	case P_PID:
831 		p = pfind(id);
832 		if (p == NULL) {
833 			error = ESRCH;
834 			break;
835 		}
836 		error = p_cansee(td, p);
837 		if (error == 0)
838 			error = kern_procctl_single(td, p, com, data);
839 		PROC_UNLOCK(p);
840 		break;
841 	case P_PGID:
842 		/*
843 		 * Attempt to apply the operation to all members of the
844 		 * group.  Ignore processes in the group that can't be
845 		 * seen.  Ignore errors so long as at least one process is
846 		 * able to complete the request successfully.
847 		 */
848 		pg = pgfind(id);
849 		if (pg == NULL) {
850 			error = ESRCH;
851 			break;
852 		}
853 		PGRP_UNLOCK(pg);
854 		ok = 0;
855 		first_error = 0;
856 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
857 			PROC_LOCK(p);
858 			if (p->p_state == PRS_NEW || p_cansee(td, p) != 0) {
859 				PROC_UNLOCK(p);
860 				continue;
861 			}
862 			error = kern_procctl_single(td, p, com, data);
863 			PROC_UNLOCK(p);
864 			if (error == 0)
865 				ok = 1;
866 			else if (first_error == 0)
867 				first_error = error;
868 		}
869 		if (ok)
870 			error = 0;
871 		else if (first_error != 0)
872 			error = first_error;
873 		else
874 			/*
875 			 * Was not able to see any processes in the
876 			 * process group.
877 			 */
878 			error = ESRCH;
879 		break;
880 	default:
881 		error = EINVAL;
882 		break;
883 	}
884 	if (tree_locked)
885 		sx_unlock(&proctree_lock);
886 	return (error);
887 }
888