xref: /freebsd/sys/kern/kern_procctl.c (revision ec0ea6efa1ad229d75c394c1a9b9cac33af2b1d3)
1 /*-
2  * Copyright (c) 2014 John Baldwin
3  * Copyright (c) 2014, 2016 The FreeBSD Foundation
4  *
5  * Portions of this software were developed by Konstantin Belousov
6  * under sponsorship from the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/capsicum.h>
36 #include <sys/lock.h>
37 #include <sys/mman.h>
38 #include <sys/mutex.h>
39 #include <sys/priv.h>
40 #include <sys/proc.h>
41 #include <sys/procctl.h>
42 #include <sys/sx.h>
43 #include <sys/syscallsubr.h>
44 #include <sys/sysproto.h>
45 #include <sys/wait.h>
46 
47 #include <vm/vm.h>
48 #include <vm/pmap.h>
49 #include <vm/vm_map.h>
50 #include <vm/vm_extern.h>
51 
52 static int
53 protect_setchild(struct thread *td, struct proc *p, int flags)
54 {
55 
56 	PROC_LOCK_ASSERT(p, MA_OWNED);
57 	if (p->p_flag & P_SYSTEM || p_cansched(td, p) != 0)
58 		return (0);
59 	if (flags & PPROT_SET) {
60 		p->p_flag |= P_PROTECTED;
61 		if (flags & PPROT_INHERIT)
62 			p->p_flag2 |= P2_INHERIT_PROTECTED;
63 	} else {
64 		p->p_flag &= ~P_PROTECTED;
65 		p->p_flag2 &= ~P2_INHERIT_PROTECTED;
66 	}
67 	return (1);
68 }
69 
70 static int
71 protect_setchildren(struct thread *td, struct proc *top, int flags)
72 {
73 	struct proc *p;
74 	int ret;
75 
76 	p = top;
77 	ret = 0;
78 	sx_assert(&proctree_lock, SX_LOCKED);
79 	for (;;) {
80 		ret |= protect_setchild(td, p, flags);
81 		PROC_UNLOCK(p);
82 		/*
83 		 * If this process has children, descend to them next,
84 		 * otherwise do any siblings, and if done with this level,
85 		 * follow back up the tree (but not past top).
86 		 */
87 		if (!LIST_EMPTY(&p->p_children))
88 			p = LIST_FIRST(&p->p_children);
89 		else for (;;) {
90 			if (p == top) {
91 				PROC_LOCK(p);
92 				return (ret);
93 			}
94 			if (LIST_NEXT(p, p_sibling)) {
95 				p = LIST_NEXT(p, p_sibling);
96 				break;
97 			}
98 			p = p->p_pptr;
99 		}
100 		PROC_LOCK(p);
101 	}
102 }
103 
104 static int
105 protect_set(struct thread *td, struct proc *p, void *data)
106 {
107 	int error, flags, ret;
108 
109 	flags = *(int *)data;
110 	switch (PPROT_OP(flags)) {
111 	case PPROT_SET:
112 	case PPROT_CLEAR:
113 		break;
114 	default:
115 		return (EINVAL);
116 	}
117 
118 	if ((PPROT_FLAGS(flags) & ~(PPROT_DESCEND | PPROT_INHERIT)) != 0)
119 		return (EINVAL);
120 
121 	error = priv_check(td, PRIV_VM_MADV_PROTECT);
122 	if (error)
123 		return (error);
124 
125 	if (flags & PPROT_DESCEND)
126 		ret = protect_setchildren(td, p, flags);
127 	else
128 		ret = protect_setchild(td, p, flags);
129 	if (ret == 0)
130 		return (EPERM);
131 	return (0);
132 }
133 
134 static int
135 reap_acquire(struct thread *td, struct proc *p, void *data __unused)
136 {
137 
138 	sx_assert(&proctree_lock, SX_XLOCKED);
139 	if (p != td->td_proc)
140 		return (EPERM);
141 	if ((p->p_treeflag & P_TREE_REAPER) != 0)
142 		return (EBUSY);
143 	p->p_treeflag |= P_TREE_REAPER;
144 	/*
145 	 * We do not reattach existing children and the whole tree
146 	 * under them to us, since p->p_reaper already seen them.
147 	 */
148 	return (0);
149 }
150 
151 static int
152 reap_release(struct thread *td, struct proc *p, void *data __unused)
153 {
154 
155 	sx_assert(&proctree_lock, SX_XLOCKED);
156 	if (p != td->td_proc)
157 		return (EPERM);
158 	if (p == initproc)
159 		return (EINVAL);
160 	if ((p->p_treeflag & P_TREE_REAPER) == 0)
161 		return (EINVAL);
162 	reaper_abandon_children(p, false);
163 	return (0);
164 }
165 
166 static int
167 reap_status(struct thread *td, struct proc *p, void *data)
168 {
169 	struct proc *reap, *p2, *first_p;
170 	struct procctl_reaper_status *rs;
171 
172 	rs = data;
173 	sx_assert(&proctree_lock, SX_LOCKED);
174 	if ((p->p_treeflag & P_TREE_REAPER) == 0) {
175 		reap = p->p_reaper;
176 	} else {
177 		reap = p;
178 		rs->rs_flags |= REAPER_STATUS_OWNED;
179 	}
180 	if (reap == initproc)
181 		rs->rs_flags |= REAPER_STATUS_REALINIT;
182 	rs->rs_reaper = reap->p_pid;
183 	rs->rs_descendants = 0;
184 	rs->rs_children = 0;
185 	if (!LIST_EMPTY(&reap->p_reaplist)) {
186 		first_p = LIST_FIRST(&reap->p_children);
187 		if (first_p == NULL)
188 			first_p = LIST_FIRST(&reap->p_reaplist);
189 		rs->rs_pid = first_p->p_pid;
190 		LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) {
191 			if (proc_realparent(p2) == reap)
192 				rs->rs_children++;
193 			rs->rs_descendants++;
194 		}
195 	} else {
196 		rs->rs_pid = -1;
197 	}
198 	return (0);
199 }
200 
201 static int
202 reap_getpids(struct thread *td, struct proc *p, void *data)
203 {
204 	struct proc *reap, *p2;
205 	struct procctl_reaper_pidinfo *pi, *pip;
206 	struct procctl_reaper_pids *rp;
207 	u_int i, n;
208 	int error;
209 
210 	rp = data;
211 	sx_assert(&proctree_lock, SX_LOCKED);
212 	PROC_UNLOCK(p);
213 	reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p;
214 	n = i = 0;
215 	error = 0;
216 	LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling)
217 		n++;
218 	sx_unlock(&proctree_lock);
219 	if (rp->rp_count < n)
220 		n = rp->rp_count;
221 	pi = malloc(n * sizeof(*pi), M_TEMP, M_WAITOK);
222 	sx_slock(&proctree_lock);
223 	LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) {
224 		if (i == n)
225 			break;
226 		pip = &pi[i];
227 		bzero(pip, sizeof(*pip));
228 		pip->pi_pid = p2->p_pid;
229 		pip->pi_subtree = p2->p_reapsubtree;
230 		pip->pi_flags = REAPER_PIDINFO_VALID;
231 		if (proc_realparent(p2) == reap)
232 			pip->pi_flags |= REAPER_PIDINFO_CHILD;
233 		if ((p2->p_treeflag & P_TREE_REAPER) != 0)
234 			pip->pi_flags |= REAPER_PIDINFO_REAPER;
235 		i++;
236 	}
237 	sx_sunlock(&proctree_lock);
238 	error = copyout(pi, rp->rp_pids, i * sizeof(*pi));
239 	free(pi, M_TEMP);
240 	sx_slock(&proctree_lock);
241 	PROC_LOCK(p);
242 	return (error);
243 }
244 
245 static void
246 reap_kill_proc(struct thread *td, struct proc *p2, ksiginfo_t *ksi,
247     struct procctl_reaper_kill *rk, int *error)
248 {
249 	int error1;
250 
251 	PROC_LOCK(p2);
252 	error1 = p_cansignal(td, p2, rk->rk_sig);
253 	if (error1 == 0) {
254 		pksignal(p2, rk->rk_sig, ksi);
255 		rk->rk_killed++;
256 		*error = error1;
257 	} else if (*error == ESRCH) {
258 		rk->rk_fpid = p2->p_pid;
259 		*error = error1;
260 	}
261 	PROC_UNLOCK(p2);
262 }
263 
264 struct reap_kill_tracker {
265 	struct proc *parent;
266 	TAILQ_ENTRY(reap_kill_tracker) link;
267 };
268 
269 TAILQ_HEAD(reap_kill_tracker_head, reap_kill_tracker);
270 
271 static void
272 reap_kill_sched(struct reap_kill_tracker_head *tracker, struct proc *p2)
273 {
274 	struct reap_kill_tracker *t;
275 
276 	t = malloc(sizeof(struct reap_kill_tracker), M_TEMP, M_WAITOK);
277 	t->parent = p2;
278 	TAILQ_INSERT_TAIL(tracker, t, link);
279 }
280 
281 static int
282 reap_kill(struct thread *td, struct proc *p, void *data)
283 {
284 	struct proc *reap, *p2;
285 	ksiginfo_t ksi;
286 	struct reap_kill_tracker_head tracker;
287 	struct reap_kill_tracker *t;
288 	struct procctl_reaper_kill *rk;
289 	int error;
290 
291 	rk = data;
292 	sx_assert(&proctree_lock, SX_LOCKED);
293 	if (IN_CAPABILITY_MODE(td))
294 		return (ECAPMODE);
295 	if (rk->rk_sig <= 0 || rk->rk_sig > _SIG_MAXSIG ||
296 	    (rk->rk_flags & ~(REAPER_KILL_CHILDREN |
297 	    REAPER_KILL_SUBTREE)) != 0 || (rk->rk_flags &
298 	    (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE)) ==
299 	    (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE))
300 		return (EINVAL);
301 	PROC_UNLOCK(p);
302 	reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p;
303 	ksiginfo_init(&ksi);
304 	ksi.ksi_signo = rk->rk_sig;
305 	ksi.ksi_code = SI_USER;
306 	ksi.ksi_pid = td->td_proc->p_pid;
307 	ksi.ksi_uid = td->td_ucred->cr_ruid;
308 	error = ESRCH;
309 	rk->rk_killed = 0;
310 	rk->rk_fpid = -1;
311 	if ((rk->rk_flags & REAPER_KILL_CHILDREN) != 0) {
312 		for (p2 = LIST_FIRST(&reap->p_children); p2 != NULL;
313 		    p2 = LIST_NEXT(p2, p_sibling)) {
314 			reap_kill_proc(td, p2, &ksi, rk, &error);
315 			/*
316 			 * Do not end the loop on error, signal
317 			 * everything we can.
318 			 */
319 		}
320 	} else {
321 		TAILQ_INIT(&tracker);
322 		reap_kill_sched(&tracker, reap);
323 		while ((t = TAILQ_FIRST(&tracker)) != NULL) {
324 			MPASS((t->parent->p_treeflag & P_TREE_REAPER) != 0);
325 			TAILQ_REMOVE(&tracker, t, link);
326 			for (p2 = LIST_FIRST(&t->parent->p_reaplist); p2 != NULL;
327 			    p2 = LIST_NEXT(p2, p_reapsibling)) {
328 				if (t->parent == reap &&
329 				    (rk->rk_flags & REAPER_KILL_SUBTREE) != 0 &&
330 				    p2->p_reapsubtree != rk->rk_subtree)
331 					continue;
332 				if ((p2->p_treeflag & P_TREE_REAPER) != 0)
333 					reap_kill_sched(&tracker, p2);
334 				reap_kill_proc(td, p2, &ksi, rk, &error);
335 			}
336 			free(t, M_TEMP);
337 		}
338 	}
339 	PROC_LOCK(p);
340 	return (error);
341 }
342 
343 static int
344 trace_ctl(struct thread *td, struct proc *p, void *data)
345 {
346 	int state;
347 
348 	PROC_LOCK_ASSERT(p, MA_OWNED);
349 	state = *(int *)data;
350 
351 	/*
352 	 * Ktrace changes p_traceflag from or to zero under the
353 	 * process lock, so the test does not need to acquire ktrace
354 	 * mutex.
355 	 */
356 	if ((p->p_flag & P_TRACED) != 0 || p->p_traceflag != 0)
357 		return (EBUSY);
358 
359 	switch (state) {
360 	case PROC_TRACE_CTL_ENABLE:
361 		if (td->td_proc != p)
362 			return (EPERM);
363 		p->p_flag2 &= ~(P2_NOTRACE | P2_NOTRACE_EXEC);
364 		break;
365 	case PROC_TRACE_CTL_DISABLE_EXEC:
366 		p->p_flag2 |= P2_NOTRACE_EXEC | P2_NOTRACE;
367 		break;
368 	case PROC_TRACE_CTL_DISABLE:
369 		if ((p->p_flag2 & P2_NOTRACE_EXEC) != 0) {
370 			KASSERT((p->p_flag2 & P2_NOTRACE) != 0,
371 			    ("dandling P2_NOTRACE_EXEC"));
372 			if (td->td_proc != p)
373 				return (EPERM);
374 			p->p_flag2 &= ~P2_NOTRACE_EXEC;
375 		} else {
376 			p->p_flag2 |= P2_NOTRACE;
377 		}
378 		break;
379 	default:
380 		return (EINVAL);
381 	}
382 	return (0);
383 }
384 
385 static int
386 trace_status(struct thread *td, struct proc *p, void *data)
387 {
388 	int *status;
389 
390 	status = data;
391 	if ((p->p_flag2 & P2_NOTRACE) != 0) {
392 		KASSERT((p->p_flag & P_TRACED) == 0,
393 		    ("%d traced but tracing disabled", p->p_pid));
394 		*status = -1;
395 	} else if ((p->p_flag & P_TRACED) != 0) {
396 		*status = p->p_pptr->p_pid;
397 	} else {
398 		*status = 0;
399 	}
400 	return (0);
401 }
402 
403 static int
404 trapcap_ctl(struct thread *td, struct proc *p, void *data)
405 {
406 	int state;
407 
408 	PROC_LOCK_ASSERT(p, MA_OWNED);
409 	state = *(int *)data;
410 
411 	switch (state) {
412 	case PROC_TRAPCAP_CTL_ENABLE:
413 		p->p_flag2 |= P2_TRAPCAP;
414 		break;
415 	case PROC_TRAPCAP_CTL_DISABLE:
416 		p->p_flag2 &= ~P2_TRAPCAP;
417 		break;
418 	default:
419 		return (EINVAL);
420 	}
421 	return (0);
422 }
423 
424 static int
425 trapcap_status(struct thread *td, struct proc *p, void *data)
426 {
427 	int *status;
428 
429 	status = data;
430 	*status = (p->p_flag2 & P2_TRAPCAP) != 0 ? PROC_TRAPCAP_CTL_ENABLE :
431 	    PROC_TRAPCAP_CTL_DISABLE;
432 	return (0);
433 }
434 
435 static int
436 no_new_privs_ctl(struct thread *td, struct proc *p, void *data)
437 {
438 	int state;
439 
440 	PROC_LOCK_ASSERT(p, MA_OWNED);
441 	state = *(int *)data;
442 
443 	if (state != PROC_NO_NEW_PRIVS_ENABLE)
444 		return (EINVAL);
445 	p->p_flag2 |= P2_NO_NEW_PRIVS;
446 	return (0);
447 }
448 
449 static int
450 no_new_privs_status(struct thread *td, struct proc *p, void *data)
451 {
452 
453 	*(int *)data = (p->p_flag2 & P2_NO_NEW_PRIVS) != 0 ?
454 	    PROC_NO_NEW_PRIVS_ENABLE : PROC_NO_NEW_PRIVS_DISABLE;
455 	return (0);
456 }
457 
458 static int
459 protmax_ctl(struct thread *td, struct proc *p, void *data)
460 {
461 	int state;
462 
463 	PROC_LOCK_ASSERT(p, MA_OWNED);
464 	state = *(int *)data;
465 
466 	switch (state) {
467 	case PROC_PROTMAX_FORCE_ENABLE:
468 		p->p_flag2 &= ~P2_PROTMAX_DISABLE;
469 		p->p_flag2 |= P2_PROTMAX_ENABLE;
470 		break;
471 	case PROC_PROTMAX_FORCE_DISABLE:
472 		p->p_flag2 |= P2_PROTMAX_DISABLE;
473 		p->p_flag2 &= ~P2_PROTMAX_ENABLE;
474 		break;
475 	case PROC_PROTMAX_NOFORCE:
476 		p->p_flag2 &= ~(P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE);
477 		break;
478 	default:
479 		return (EINVAL);
480 	}
481 	return (0);
482 }
483 
484 static int
485 protmax_status(struct thread *td, struct proc *p, void *data)
486 {
487 	int d;
488 
489 	switch (p->p_flag2 & (P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE)) {
490 	case 0:
491 		d = PROC_PROTMAX_NOFORCE;
492 		break;
493 	case P2_PROTMAX_ENABLE:
494 		d = PROC_PROTMAX_FORCE_ENABLE;
495 		break;
496 	case P2_PROTMAX_DISABLE:
497 		d = PROC_PROTMAX_FORCE_DISABLE;
498 		break;
499 	}
500 	if (kern_mmap_maxprot(p, PROT_READ) == PROT_READ)
501 		d |= PROC_PROTMAX_ACTIVE;
502 	*(int *)data = d;
503 	return (0);
504 }
505 
506 static int
507 aslr_ctl(struct thread *td, struct proc *p, void *data)
508 {
509 	int state;
510 
511 	PROC_LOCK_ASSERT(p, MA_OWNED);
512 	state = *(int *)data;
513 
514 	switch (state) {
515 	case PROC_ASLR_FORCE_ENABLE:
516 		p->p_flag2 &= ~P2_ASLR_DISABLE;
517 		p->p_flag2 |= P2_ASLR_ENABLE;
518 		break;
519 	case PROC_ASLR_FORCE_DISABLE:
520 		p->p_flag2 |= P2_ASLR_DISABLE;
521 		p->p_flag2 &= ~P2_ASLR_ENABLE;
522 		break;
523 	case PROC_ASLR_NOFORCE:
524 		p->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE);
525 		break;
526 	default:
527 		return (EINVAL);
528 	}
529 	return (0);
530 }
531 
532 static int
533 aslr_status(struct thread *td, struct proc *p, void *data)
534 {
535 	struct vmspace *vm;
536 	int d;
537 
538 	switch (p->p_flag2 & (P2_ASLR_ENABLE | P2_ASLR_DISABLE)) {
539 	case 0:
540 		d = PROC_ASLR_NOFORCE;
541 		break;
542 	case P2_ASLR_ENABLE:
543 		d = PROC_ASLR_FORCE_ENABLE;
544 		break;
545 	case P2_ASLR_DISABLE:
546 		d = PROC_ASLR_FORCE_DISABLE;
547 		break;
548 	}
549 	if ((p->p_flag & P_WEXIT) == 0) {
550 		_PHOLD(p);
551 		PROC_UNLOCK(p);
552 		vm = vmspace_acquire_ref(p);
553 		if (vm != NULL) {
554 			if ((vm->vm_map.flags & MAP_ASLR) != 0)
555 				d |= PROC_ASLR_ACTIVE;
556 			vmspace_free(vm);
557 		}
558 		PROC_LOCK(p);
559 		_PRELE(p);
560 	}
561 	*(int *)data = d;
562 	return (0);
563 }
564 
565 static int
566 stackgap_ctl(struct thread *td, struct proc *p, void *data)
567 {
568 	int state;
569 
570 	PROC_LOCK_ASSERT(p, MA_OWNED);
571 	state = *(int *)data;
572 
573 	if ((state & ~(PROC_STACKGAP_ENABLE | PROC_STACKGAP_DISABLE |
574 	    PROC_STACKGAP_ENABLE_EXEC | PROC_STACKGAP_DISABLE_EXEC)) != 0)
575 		return (EINVAL);
576 	switch (state & (PROC_STACKGAP_ENABLE | PROC_STACKGAP_DISABLE)) {
577 	case PROC_STACKGAP_ENABLE:
578 		if ((p->p_flag2 & P2_STKGAP_DISABLE) != 0)
579 			return (EINVAL);
580 		break;
581 	case PROC_STACKGAP_DISABLE:
582 		p->p_flag2 |= P2_STKGAP_DISABLE;
583 		break;
584 	case 0:
585 		break;
586 	default:
587 		return (EINVAL);
588 	}
589 	switch (state & (PROC_STACKGAP_ENABLE_EXEC |
590 	    PROC_STACKGAP_DISABLE_EXEC)) {
591 	case PROC_STACKGAP_ENABLE_EXEC:
592 		p->p_flag2 &= ~P2_STKGAP_DISABLE_EXEC;
593 		break;
594 	case PROC_STACKGAP_DISABLE_EXEC:
595 		p->p_flag2 |= P2_STKGAP_DISABLE_EXEC;
596 		break;
597 	case 0:
598 		break;
599 	default:
600 		return (EINVAL);
601 	}
602 	return (0);
603 }
604 
605 static int
606 stackgap_status(struct thread *td, struct proc *p, void *data)
607 {
608 	int d;
609 
610 	PROC_LOCK_ASSERT(p, MA_OWNED);
611 
612 	d = (p->p_flag2 & P2_STKGAP_DISABLE) != 0 ? PROC_STACKGAP_DISABLE :
613 	    PROC_STACKGAP_ENABLE;
614 	d |= (p->p_flag2 & P2_STKGAP_DISABLE_EXEC) != 0 ?
615 	    PROC_STACKGAP_DISABLE_EXEC : PROC_STACKGAP_ENABLE_EXEC;
616 	*(int *)data = d;
617 	return (0);
618 }
619 
620 static int
621 wxmap_ctl(struct thread *td, struct proc *p, void *data)
622 {
623 	struct vmspace *vm;
624 	vm_map_t map;
625 	int state;
626 
627 	PROC_LOCK_ASSERT(p, MA_OWNED);
628 	if ((p->p_flag & P_WEXIT) != 0)
629 		return (ESRCH);
630 	state = *(int *)data;
631 
632 	switch (state) {
633 	case PROC_WX_MAPPINGS_PERMIT:
634 		p->p_flag2 |= P2_WXORX_DISABLE;
635 		_PHOLD(p);
636 		PROC_UNLOCK(p);
637 		vm = vmspace_acquire_ref(p);
638 		if (vm != NULL) {
639 			map = &vm->vm_map;
640 			vm_map_lock(map);
641 			map->flags &= ~MAP_WXORX;
642 			vm_map_unlock(map);
643 			vmspace_free(vm);
644 		}
645 		PROC_LOCK(p);
646 		_PRELE(p);
647 		break;
648 	case PROC_WX_MAPPINGS_DISALLOW_EXEC:
649 		p->p_flag2 |= P2_WXORX_ENABLE_EXEC;
650 		break;
651 	default:
652 		return (EINVAL);
653 	}
654 
655 	return (0);
656 }
657 
658 static int
659 wxmap_status(struct thread *td, struct proc *p, void *data)
660 {
661 	struct vmspace *vm;
662 	int d;
663 
664 	PROC_LOCK_ASSERT(p, MA_OWNED);
665 	if ((p->p_flag & P_WEXIT) != 0)
666 		return (ESRCH);
667 
668 	d = 0;
669 	if ((p->p_flag2 & P2_WXORX_DISABLE) != 0)
670 		d |= PROC_WX_MAPPINGS_PERMIT;
671 	if ((p->p_flag2 & P2_WXORX_ENABLE_EXEC) != 0)
672 		d |= PROC_WX_MAPPINGS_DISALLOW_EXEC;
673 	_PHOLD(p);
674 	PROC_UNLOCK(p);
675 	vm = vmspace_acquire_ref(p);
676 	if (vm != NULL) {
677 		if ((vm->vm_map.flags & MAP_WXORX) != 0)
678 			d |= PROC_WXORX_ENFORCE;
679 		vmspace_free(vm);
680 	}
681 	PROC_LOCK(p);
682 	_PRELE(p);
683 	*(int *)data = d;
684 	return (0);
685 }
686 
687 static int
688 pdeathsig_ctl(struct thread *td, struct proc *p, void *data)
689 {
690 	int signum;
691 
692 	signum = *(int *)data;
693 	if (p != td->td_proc || (signum != 0 && !_SIG_VALID(signum)))
694 		return (EINVAL);
695 	p->p_pdeathsig = signum;
696 	return (0);
697 }
698 
699 static int
700 pdeathsig_status(struct thread *td, struct proc *p, void *data)
701 {
702 	if (p != td->td_proc)
703 		return (EINVAL);
704 	*(int *)data = p->p_pdeathsig;
705 	return (0);
706 }
707 
708 enum {
709 	PCTL_SLOCKED,
710 	PCTL_XLOCKED,
711 	PCTL_UNLOCKED,
712 };
713 
714 struct procctl_cmd_info {
715 	int lock_tree;
716 	bool one_proc : 1;
717 	bool esrch_is_einval : 1;
718 	bool copyout_on_error : 1;
719 	bool no_nonnull_data : 1;
720 	bool need_candebug : 1;
721 	int copyin_sz;
722 	int copyout_sz;
723 	int (*exec)(struct thread *, struct proc *, void *);
724 };
725 static const struct procctl_cmd_info procctl_cmds_info[] = {
726 	[PROC_SPROTECT] =
727 	    { .lock_tree = PCTL_SLOCKED, .one_proc = false,
728 	      .esrch_is_einval = false, .no_nonnull_data = false,
729 	      .need_candebug = false,
730 	      .copyin_sz = sizeof(int), .copyout_sz = 0,
731 	      .exec = protect_set, .copyout_on_error = false, },
732 	[PROC_REAP_ACQUIRE] =
733 	    { .lock_tree = PCTL_XLOCKED, .one_proc = true,
734 	      .esrch_is_einval = false, .no_nonnull_data = true,
735 	      .need_candebug = false,
736 	      .copyin_sz = 0, .copyout_sz = 0,
737 	      .exec = reap_acquire, .copyout_on_error = false, },
738 	[PROC_REAP_RELEASE] =
739 	    { .lock_tree = PCTL_XLOCKED, .one_proc = true,
740 	      .esrch_is_einval = false, .no_nonnull_data = true,
741 	      .need_candebug = false,
742 	      .copyin_sz = 0, .copyout_sz = 0,
743 	      .exec = reap_release, .copyout_on_error = false, },
744 	[PROC_REAP_STATUS] =
745 	    { .lock_tree = PCTL_SLOCKED, .one_proc = true,
746 	      .esrch_is_einval = false, .no_nonnull_data = false,
747 	      .need_candebug = false,
748 	      .copyin_sz = 0,
749 	      .copyout_sz = sizeof(struct procctl_reaper_status),
750 	      .exec = reap_status, .copyout_on_error = false, },
751 	[PROC_REAP_GETPIDS] =
752 	    { .lock_tree = PCTL_SLOCKED, .one_proc = true,
753 	      .esrch_is_einval = false, .no_nonnull_data = false,
754 	      .need_candebug = false,
755 	      .copyin_sz = sizeof(struct procctl_reaper_pids),
756 	      .copyout_sz = 0,
757 	      .exec = reap_getpids, .copyout_on_error = false, },
758 	[PROC_REAP_KILL] =
759 	    { .lock_tree = PCTL_SLOCKED, .one_proc = true,
760 	      .esrch_is_einval = false, .no_nonnull_data = false,
761 	      .need_candebug = false,
762 	      .copyin_sz = sizeof(struct procctl_reaper_kill),
763 	      .copyout_sz = sizeof(struct procctl_reaper_kill),
764 	      .exec = reap_kill, .copyout_on_error = true, },
765 	[PROC_TRACE_CTL] =
766 	    { .lock_tree = PCTL_SLOCKED, .one_proc = false,
767 	      .esrch_is_einval = false, .no_nonnull_data = false,
768 	      .need_candebug = true,
769 	      .copyin_sz = sizeof(int), .copyout_sz = 0,
770 	      .exec = trace_ctl, .copyout_on_error = false, },
771 	[PROC_TRACE_STATUS] =
772 	    { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
773 	      .esrch_is_einval = false, .no_nonnull_data = false,
774 	      .need_candebug = false,
775 	      .copyin_sz = 0, .copyout_sz = sizeof(int),
776 	      .exec = trace_status, .copyout_on_error = false, },
777 	[PROC_TRAPCAP_CTL] =
778 	    { .lock_tree = PCTL_SLOCKED, .one_proc = false,
779 	      .esrch_is_einval = false, .no_nonnull_data = false,
780 	      .need_candebug = true,
781 	      .copyin_sz = sizeof(int), .copyout_sz = 0,
782 	      .exec = trapcap_ctl, .copyout_on_error = false, },
783 	[PROC_TRAPCAP_STATUS] =
784 	    { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
785 	      .esrch_is_einval = false, .no_nonnull_data = false,
786 	      .need_candebug = false,
787 	      .copyin_sz = 0, .copyout_sz = sizeof(int),
788 	      .exec = trapcap_status, .copyout_on_error = false, },
789 	[PROC_PDEATHSIG_CTL] =
790 	    { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
791 	      .esrch_is_einval = true, .no_nonnull_data = false,
792 	      .need_candebug = false,
793 	      .copyin_sz = sizeof(int), .copyout_sz = 0,
794 	      .exec = pdeathsig_ctl, .copyout_on_error = false, },
795 	[PROC_PDEATHSIG_STATUS] =
796 	    { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
797 	      .esrch_is_einval = true, .no_nonnull_data = false,
798 	      .need_candebug = false,
799 	      .copyin_sz = 0, .copyout_sz = sizeof(int),
800 	      .exec = pdeathsig_status, .copyout_on_error = false, },
801 	[PROC_ASLR_CTL] =
802 	    { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
803 	      .esrch_is_einval = false, .no_nonnull_data = false,
804 	      .need_candebug = true,
805 	      .copyin_sz = sizeof(int), .copyout_sz = 0,
806 	      .exec = aslr_ctl, .copyout_on_error = false, },
807 	[PROC_ASLR_STATUS] =
808 	    { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
809 	      .esrch_is_einval = false, .no_nonnull_data = false,
810 	      .need_candebug = false,
811 	      .copyin_sz = 0, .copyout_sz = sizeof(int),
812 	      .exec = aslr_status, .copyout_on_error = false, },
813 	[PROC_PROTMAX_CTL] =
814 	    { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
815 	      .esrch_is_einval = false, .no_nonnull_data = false,
816 	      .need_candebug = true,
817 	      .copyin_sz = sizeof(int), .copyout_sz = 0,
818 	      .exec = protmax_ctl, .copyout_on_error = false, },
819 	[PROC_PROTMAX_STATUS] =
820 	    { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
821 	      .esrch_is_einval = false, .no_nonnull_data = false,
822 	      .need_candebug = false,
823 	      .copyin_sz = 0, .copyout_sz = sizeof(int),
824 	      .exec = protmax_status, .copyout_on_error = false, },
825 	[PROC_STACKGAP_CTL] =
826 	    { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
827 	      .esrch_is_einval = false, .no_nonnull_data = false,
828 	      .need_candebug = true,
829 	      .copyin_sz = sizeof(int), .copyout_sz = 0,
830 	      .exec = stackgap_ctl, .copyout_on_error = false, },
831 	[PROC_STACKGAP_STATUS] =
832 	    { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
833 	      .esrch_is_einval = false, .no_nonnull_data = false,
834 	      .need_candebug = false,
835 	      .copyin_sz = 0, .copyout_sz = sizeof(int),
836 	      .exec = stackgap_status, .copyout_on_error = false, },
837 	[PROC_NO_NEW_PRIVS_CTL] =
838 	    { .lock_tree = PCTL_SLOCKED, .one_proc = true,
839 	      .esrch_is_einval = false, .no_nonnull_data = false,
840 	      .need_candebug = true,
841 	      .copyin_sz = sizeof(int), .copyout_sz = 0,
842 	      .exec = no_new_privs_ctl, .copyout_on_error = false, },
843 	[PROC_NO_NEW_PRIVS_STATUS] =
844 	    { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
845 	      .esrch_is_einval = false, .no_nonnull_data = false,
846 	      .need_candebug = false,
847 	      .copyin_sz = 0, .copyout_sz = sizeof(int),
848 	      .exec = no_new_privs_status, .copyout_on_error = false, },
849 	[PROC_WXMAP_CTL] =
850 	    { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
851 	      .esrch_is_einval = false, .no_nonnull_data = false,
852 	      .need_candebug = true,
853 	      .copyin_sz = sizeof(int), .copyout_sz = 0,
854 	      .exec = wxmap_ctl, .copyout_on_error = false, },
855 	[PROC_WXMAP_STATUS] =
856 	    { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
857 	      .esrch_is_einval = false, .no_nonnull_data = false,
858 	      .need_candebug = false,
859 	      .copyin_sz = 0, .copyout_sz = sizeof(int),
860 	      .exec = wxmap_status, .copyout_on_error = false, },
861 };
862 
863 int
864 sys_procctl(struct thread *td, struct procctl_args *uap)
865 {
866 	union {
867 		struct procctl_reaper_status rs;
868 		struct procctl_reaper_pids rp;
869 		struct procctl_reaper_kill rk;
870 		int flags;
871 	} x;
872 	const struct procctl_cmd_info *cmd_info;
873 	int error, error1;
874 
875 	if (uap->com >= PROC_PROCCTL_MD_MIN)
876 		return (cpu_procctl(td, uap->idtype, uap->id,
877 		    uap->com, uap->data));
878 	if (uap->com == 0 || uap->com >= nitems(procctl_cmds_info))
879 		return (EINVAL);
880 	cmd_info = &procctl_cmds_info[uap->com];
881 	bzero(&x, sizeof(x));
882 
883 	if (cmd_info->copyin_sz > 0) {
884 		error = copyin(uap->data, &x, cmd_info->copyin_sz);
885 		if (error != 0)
886 			return (error);
887 	} else if (cmd_info->no_nonnull_data && uap->data != NULL) {
888 		return (EINVAL);
889 	}
890 
891 	error = kern_procctl(td, uap->idtype, uap->id, uap->com, &x);
892 
893 	if (cmd_info->copyout_sz > 0 && (error == 0 ||
894 	    cmd_info->copyout_on_error)) {
895 		error1 = copyout(&x, uap->data, cmd_info->copyout_sz);
896 		if (error == 0)
897 			error = error1;
898 	}
899 	return (error);
900 }
901 
902 static int
903 kern_procctl_single(struct thread *td, struct proc *p, int com, void *data)
904 {
905 
906 	PROC_LOCK_ASSERT(p, MA_OWNED);
907 	return (procctl_cmds_info[com].exec(td, p, data));
908 }
909 
910 int
911 kern_procctl(struct thread *td, idtype_t idtype, id_t id, int com, void *data)
912 {
913 	struct pgrp *pg;
914 	struct proc *p;
915 	const struct procctl_cmd_info *cmd_info;
916 	int error, first_error, ok;
917 
918 	MPASS(com > 0 && com < nitems(procctl_cmds_info));
919 	cmd_info = &procctl_cmds_info[com];
920 	if (idtype != P_PID && cmd_info->one_proc)
921 		return (EINVAL);
922 
923 	switch (cmd_info->lock_tree) {
924 	case PCTL_XLOCKED:
925 		sx_xlock(&proctree_lock);
926 		break;
927 	case PCTL_SLOCKED:
928 		sx_slock(&proctree_lock);
929 		break;
930 	default:
931 		break;
932 	}
933 
934 	switch (idtype) {
935 	case P_PID:
936 		if (id == 0) {
937 			p = td->td_proc;
938 			error = 0;
939 			PROC_LOCK(p);
940 		} else {
941 			p = pfind(id);
942 			if (p == NULL) {
943 				error = cmd_info->esrch_is_einval ?
944 				    EINVAL : ESRCH;
945 				break;
946 			}
947 			error = cmd_info->need_candebug ? p_candebug(td, p) :
948 			    p_cansee(td, p);
949 		}
950 		if (error == 0)
951 			error = kern_procctl_single(td, p, com, data);
952 		PROC_UNLOCK(p);
953 		break;
954 	case P_PGID:
955 		/*
956 		 * Attempt to apply the operation to all members of the
957 		 * group.  Ignore processes in the group that can't be
958 		 * seen.  Ignore errors so long as at least one process is
959 		 * able to complete the request successfully.
960 		 */
961 		pg = pgfind(id);
962 		if (pg == NULL) {
963 			error = ESRCH;
964 			break;
965 		}
966 		PGRP_UNLOCK(pg);
967 		ok = 0;
968 		first_error = 0;
969 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
970 			PROC_LOCK(p);
971 			if (p->p_state == PRS_NEW ||
972 			    p->p_state == PRS_ZOMBIE ||
973 			    (cmd_info->need_candebug ? p_candebug(td, p) :
974 			    p_cansee(td, p)) != 0) {
975 				PROC_UNLOCK(p);
976 				continue;
977 			}
978 			error = kern_procctl_single(td, p, com, data);
979 			PROC_UNLOCK(p);
980 			if (error == 0)
981 				ok = 1;
982 			else if (first_error == 0)
983 				first_error = error;
984 		}
985 		if (ok)
986 			error = 0;
987 		else if (first_error != 0)
988 			error = first_error;
989 		else
990 			/*
991 			 * Was not able to see any processes in the
992 			 * process group.
993 			 */
994 			error = ESRCH;
995 		break;
996 	default:
997 		error = EINVAL;
998 		break;
999 	}
1000 
1001 	switch (cmd_info->lock_tree) {
1002 	case PCTL_XLOCKED:
1003 		sx_xunlock(&proctree_lock);
1004 		break;
1005 	case PCTL_SLOCKED:
1006 		sx_sunlock(&proctree_lock);
1007 		break;
1008 	default:
1009 		break;
1010 	}
1011 	return (error);
1012 }
1013