xref: /freebsd/sys/kern/kern_procctl.c (revision e95923a2288fb2845c7be4822b1b92b2fc106d18)
1 /*-
2  * Copyright (c) 2014 John Baldwin
3  * Copyright (c) 2014, 2016 The FreeBSD Foundation
4  *
5  * Portions of this software were developed by Konstantin Belousov
6  * under sponsorship from the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/capsicum.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/priv.h>
39 #include <sys/proc.h>
40 #include <sys/procctl.h>
41 #include <sys/sx.h>
42 #include <sys/syscallsubr.h>
43 #include <sys/sysproto.h>
44 #include <sys/wait.h>
45 
46 #include <vm/vm.h>
47 #include <vm/pmap.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_extern.h>
50 
51 static int
52 protect_setchild(struct thread *td, struct proc *p, int flags)
53 {
54 
55 	PROC_LOCK_ASSERT(p, MA_OWNED);
56 	if (p->p_flag & P_SYSTEM || p_cansched(td, p) != 0)
57 		return (0);
58 	if (flags & PPROT_SET) {
59 		p->p_flag |= P_PROTECTED;
60 		if (flags & PPROT_INHERIT)
61 			p->p_flag2 |= P2_INHERIT_PROTECTED;
62 	} else {
63 		p->p_flag &= ~P_PROTECTED;
64 		p->p_flag2 &= ~P2_INHERIT_PROTECTED;
65 	}
66 	return (1);
67 }
68 
69 static int
70 protect_setchildren(struct thread *td, struct proc *top, int flags)
71 {
72 	struct proc *p;
73 	int ret;
74 
75 	p = top;
76 	ret = 0;
77 	sx_assert(&proctree_lock, SX_LOCKED);
78 	for (;;) {
79 		ret |= protect_setchild(td, p, flags);
80 		PROC_UNLOCK(p);
81 		/*
82 		 * If this process has children, descend to them next,
83 		 * otherwise do any siblings, and if done with this level,
84 		 * follow back up the tree (but not past top).
85 		 */
86 		if (!LIST_EMPTY(&p->p_children))
87 			p = LIST_FIRST(&p->p_children);
88 		else for (;;) {
89 			if (p == top) {
90 				PROC_LOCK(p);
91 				return (ret);
92 			}
93 			if (LIST_NEXT(p, p_sibling)) {
94 				p = LIST_NEXT(p, p_sibling);
95 				break;
96 			}
97 			p = p->p_pptr;
98 		}
99 		PROC_LOCK(p);
100 	}
101 }
102 
103 static int
104 protect_set(struct thread *td, struct proc *p, int flags)
105 {
106 	int error, ret;
107 
108 	switch (PPROT_OP(flags)) {
109 	case PPROT_SET:
110 	case PPROT_CLEAR:
111 		break;
112 	default:
113 		return (EINVAL);
114 	}
115 
116 	if ((PPROT_FLAGS(flags) & ~(PPROT_DESCEND | PPROT_INHERIT)) != 0)
117 		return (EINVAL);
118 
119 	error = priv_check(td, PRIV_VM_MADV_PROTECT);
120 	if (error)
121 		return (error);
122 
123 	if (flags & PPROT_DESCEND)
124 		ret = protect_setchildren(td, p, flags);
125 	else
126 		ret = protect_setchild(td, p, flags);
127 	if (ret == 0)
128 		return (EPERM);
129 	return (0);
130 }
131 
132 static int
133 reap_acquire(struct thread *td, struct proc *p)
134 {
135 
136 	sx_assert(&proctree_lock, SX_XLOCKED);
137 	if (p != curproc)
138 		return (EPERM);
139 	if ((p->p_treeflag & P_TREE_REAPER) != 0)
140 		return (EBUSY);
141 	p->p_treeflag |= P_TREE_REAPER;
142 	/*
143 	 * We do not reattach existing children and the whole tree
144 	 * under them to us, since p->p_reaper already seen them.
145 	 */
146 	return (0);
147 }
148 
149 static int
150 reap_release(struct thread *td, struct proc *p)
151 {
152 
153 	sx_assert(&proctree_lock, SX_XLOCKED);
154 	if (p != curproc)
155 		return (EPERM);
156 	if (p == initproc)
157 		return (EINVAL);
158 	if ((p->p_treeflag & P_TREE_REAPER) == 0)
159 		return (EINVAL);
160 	reaper_abandon_children(p, false);
161 	return (0);
162 }
163 
164 static int
165 reap_status(struct thread *td, struct proc *p,
166     struct procctl_reaper_status *rs)
167 {
168 	struct proc *reap, *p2, *first_p;
169 
170 	sx_assert(&proctree_lock, SX_LOCKED);
171 	bzero(rs, sizeof(*rs));
172 	if ((p->p_treeflag & P_TREE_REAPER) == 0) {
173 		reap = p->p_reaper;
174 	} else {
175 		reap = p;
176 		rs->rs_flags |= REAPER_STATUS_OWNED;
177 	}
178 	if (reap == initproc)
179 		rs->rs_flags |= REAPER_STATUS_REALINIT;
180 	rs->rs_reaper = reap->p_pid;
181 	rs->rs_descendants = 0;
182 	rs->rs_children = 0;
183 	if (!LIST_EMPTY(&reap->p_reaplist)) {
184 		first_p = LIST_FIRST(&reap->p_children);
185 		if (first_p == NULL)
186 			first_p = LIST_FIRST(&reap->p_reaplist);
187 		rs->rs_pid = first_p->p_pid;
188 		LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) {
189 			if (proc_realparent(p2) == reap)
190 				rs->rs_children++;
191 			rs->rs_descendants++;
192 		}
193 	} else {
194 		rs->rs_pid = -1;
195 	}
196 	return (0);
197 }
198 
199 static int
200 reap_getpids(struct thread *td, struct proc *p, struct procctl_reaper_pids *rp)
201 {
202 	struct proc *reap, *p2;
203 	struct procctl_reaper_pidinfo *pi, *pip;
204 	u_int i, n;
205 	int error;
206 
207 	sx_assert(&proctree_lock, SX_LOCKED);
208 	PROC_UNLOCK(p);
209 	reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p;
210 	n = i = 0;
211 	error = 0;
212 	LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling)
213 		n++;
214 	sx_unlock(&proctree_lock);
215 	if (rp->rp_count < n)
216 		n = rp->rp_count;
217 	pi = malloc(n * sizeof(*pi), M_TEMP, M_WAITOK);
218 	sx_slock(&proctree_lock);
219 	LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) {
220 		if (i == n)
221 			break;
222 		pip = &pi[i];
223 		bzero(pip, sizeof(*pip));
224 		pip->pi_pid = p2->p_pid;
225 		pip->pi_subtree = p2->p_reapsubtree;
226 		pip->pi_flags = REAPER_PIDINFO_VALID;
227 		if (proc_realparent(p2) == reap)
228 			pip->pi_flags |= REAPER_PIDINFO_CHILD;
229 		if ((p2->p_treeflag & P_TREE_REAPER) != 0)
230 			pip->pi_flags |= REAPER_PIDINFO_REAPER;
231 		i++;
232 	}
233 	sx_sunlock(&proctree_lock);
234 	error = copyout(pi, rp->rp_pids, i * sizeof(*pi));
235 	free(pi, M_TEMP);
236 	sx_slock(&proctree_lock);
237 	PROC_LOCK(p);
238 	return (error);
239 }
240 
241 static void
242 reap_kill_proc(struct thread *td, struct proc *p2, ksiginfo_t *ksi,
243     struct procctl_reaper_kill *rk, int *error)
244 {
245 	int error1;
246 
247 	PROC_LOCK(p2);
248 	error1 = p_cansignal(td, p2, rk->rk_sig);
249 	if (error1 == 0) {
250 		pksignal(p2, rk->rk_sig, ksi);
251 		rk->rk_killed++;
252 		*error = error1;
253 	} else if (*error == ESRCH) {
254 		rk->rk_fpid = p2->p_pid;
255 		*error = error1;
256 	}
257 	PROC_UNLOCK(p2);
258 }
259 
260 struct reap_kill_tracker {
261 	struct proc *parent;
262 	TAILQ_ENTRY(reap_kill_tracker) link;
263 };
264 
265 TAILQ_HEAD(reap_kill_tracker_head, reap_kill_tracker);
266 
267 static void
268 reap_kill_sched(struct reap_kill_tracker_head *tracker, struct proc *p2)
269 {
270 	struct reap_kill_tracker *t;
271 
272 	t = malloc(sizeof(struct reap_kill_tracker), M_TEMP, M_WAITOK);
273 	t->parent = p2;
274 	TAILQ_INSERT_TAIL(tracker, t, link);
275 }
276 
277 static int
278 reap_kill(struct thread *td, struct proc *p, struct procctl_reaper_kill *rk)
279 {
280 	struct proc *reap, *p2;
281 	ksiginfo_t ksi;
282 	struct reap_kill_tracker_head tracker;
283 	struct reap_kill_tracker *t;
284 	int error;
285 
286 	sx_assert(&proctree_lock, SX_LOCKED);
287 	if (IN_CAPABILITY_MODE(td))
288 		return (ECAPMODE);
289 	if (rk->rk_sig <= 0 || rk->rk_sig > _SIG_MAXSIG ||
290 	    (rk->rk_flags & ~(REAPER_KILL_CHILDREN |
291 	    REAPER_KILL_SUBTREE)) != 0 || (rk->rk_flags &
292 	    (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE)) ==
293 	    (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE))
294 		return (EINVAL);
295 	PROC_UNLOCK(p);
296 	reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p;
297 	ksiginfo_init(&ksi);
298 	ksi.ksi_signo = rk->rk_sig;
299 	ksi.ksi_code = SI_USER;
300 	ksi.ksi_pid = td->td_proc->p_pid;
301 	ksi.ksi_uid = td->td_ucred->cr_ruid;
302 	error = ESRCH;
303 	rk->rk_killed = 0;
304 	rk->rk_fpid = -1;
305 	if ((rk->rk_flags & REAPER_KILL_CHILDREN) != 0) {
306 		for (p2 = LIST_FIRST(&reap->p_children); p2 != NULL;
307 		    p2 = LIST_NEXT(p2, p_sibling)) {
308 			reap_kill_proc(td, p2, &ksi, rk, &error);
309 			/*
310 			 * Do not end the loop on error, signal
311 			 * everything we can.
312 			 */
313 		}
314 	} else {
315 		TAILQ_INIT(&tracker);
316 		reap_kill_sched(&tracker, reap);
317 		while ((t = TAILQ_FIRST(&tracker)) != NULL) {
318 			MPASS((t->parent->p_treeflag & P_TREE_REAPER) != 0);
319 			TAILQ_REMOVE(&tracker, t, link);
320 			for (p2 = LIST_FIRST(&t->parent->p_reaplist); p2 != NULL;
321 			    p2 = LIST_NEXT(p2, p_reapsibling)) {
322 				if (t->parent == reap &&
323 				    (rk->rk_flags & REAPER_KILL_SUBTREE) != 0 &&
324 				    p2->p_reapsubtree != rk->rk_subtree)
325 					continue;
326 				if ((p2->p_treeflag & P_TREE_REAPER) != 0)
327 					reap_kill_sched(&tracker, p2);
328 				reap_kill_proc(td, p2, &ksi, rk, &error);
329 			}
330 			free(t, M_TEMP);
331 		}
332 	}
333 	PROC_LOCK(p);
334 	return (error);
335 }
336 
337 static int
338 trace_ctl(struct thread *td, struct proc *p, int state)
339 {
340 
341 	PROC_LOCK_ASSERT(p, MA_OWNED);
342 
343 	/*
344 	 * Ktrace changes p_traceflag from or to zero under the
345 	 * process lock, so the test does not need to acquire ktrace
346 	 * mutex.
347 	 */
348 	if ((p->p_flag & P_TRACED) != 0 || p->p_traceflag != 0)
349 		return (EBUSY);
350 
351 	switch (state) {
352 	case PROC_TRACE_CTL_ENABLE:
353 		if (td->td_proc != p)
354 			return (EPERM);
355 		p->p_flag2 &= ~(P2_NOTRACE | P2_NOTRACE_EXEC);
356 		break;
357 	case PROC_TRACE_CTL_DISABLE_EXEC:
358 		p->p_flag2 |= P2_NOTRACE_EXEC | P2_NOTRACE;
359 		break;
360 	case PROC_TRACE_CTL_DISABLE:
361 		if ((p->p_flag2 & P2_NOTRACE_EXEC) != 0) {
362 			KASSERT((p->p_flag2 & P2_NOTRACE) != 0,
363 			    ("dandling P2_NOTRACE_EXEC"));
364 			if (td->td_proc != p)
365 				return (EPERM);
366 			p->p_flag2 &= ~P2_NOTRACE_EXEC;
367 		} else {
368 			p->p_flag2 |= P2_NOTRACE;
369 		}
370 		break;
371 	default:
372 		return (EINVAL);
373 	}
374 	return (0);
375 }
376 
377 static int
378 trace_status(struct thread *td, struct proc *p, int *data)
379 {
380 
381 	if ((p->p_flag2 & P2_NOTRACE) != 0) {
382 		KASSERT((p->p_flag & P_TRACED) == 0,
383 		    ("%d traced but tracing disabled", p->p_pid));
384 		*data = -1;
385 	} else if ((p->p_flag & P_TRACED) != 0) {
386 		*data = p->p_pptr->p_pid;
387 	} else {
388 		*data = 0;
389 	}
390 	return (0);
391 }
392 
393 static int
394 trapcap_ctl(struct thread *td, struct proc *p, int state)
395 {
396 
397 	PROC_LOCK_ASSERT(p, MA_OWNED);
398 
399 	switch (state) {
400 	case PROC_TRAPCAP_CTL_ENABLE:
401 		p->p_flag2 |= P2_TRAPCAP;
402 		break;
403 	case PROC_TRAPCAP_CTL_DISABLE:
404 		p->p_flag2 &= ~P2_TRAPCAP;
405 		break;
406 	default:
407 		return (EINVAL);
408 	}
409 	return (0);
410 }
411 
412 static int
413 trapcap_status(struct thread *td, struct proc *p, int *data)
414 {
415 
416 	*data = (p->p_flag2 & P2_TRAPCAP) != 0 ? PROC_TRAPCAP_CTL_ENABLE :
417 	    PROC_TRAPCAP_CTL_DISABLE;
418 	return (0);
419 }
420 
421 static int
422 aslr_ctl(struct thread *td, struct proc *p, int state)
423 {
424 
425 	PROC_LOCK_ASSERT(p, MA_OWNED);
426 
427 	switch (state) {
428 	case PROC_ASLR_FORCE_ENABLE:
429 		p->p_flag2 &= ~P2_ASLR_DISABLE;
430 		p->p_flag2 |= P2_ASLR_ENABLE;
431 		break;
432 	case PROC_ASLR_FORCE_DISABLE:
433 		p->p_flag2 |= P2_ASLR_DISABLE;
434 		p->p_flag2 &= ~P2_ASLR_ENABLE;
435 		break;
436 	case PROC_ASLR_NOFORCE:
437 		p->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE);
438 		break;
439 	default:
440 		return (EINVAL);
441 	}
442 	return (0);
443 }
444 
445 static int
446 aslr_status(struct thread *td, struct proc *p, int *data)
447 {
448 	struct vmspace *vm;
449 	int d;
450 
451 	switch (p->p_flag2 & (P2_ASLR_ENABLE | P2_ASLR_DISABLE)) {
452 	case 0:
453 		d = PROC_ASLR_NOFORCE;
454 		break;
455 	case P2_ASLR_ENABLE:
456 		d = PROC_ASLR_FORCE_ENABLE;
457 		break;
458 	case P2_ASLR_DISABLE:
459 		d = PROC_ASLR_FORCE_DISABLE;
460 		break;
461 	}
462 	if ((p->p_flag & P_WEXIT) == 0) {
463 		_PHOLD(p);
464 		PROC_UNLOCK(p);
465 		vm = vmspace_acquire_ref(p);
466 		if (vm != NULL && (vm->vm_map.flags & MAP_ASLR) != 0) {
467 			d |= PROC_ASLR_ACTIVE;
468 			vmspace_free(vm);
469 		}
470 		PROC_LOCK(p);
471 		_PRELE(p);
472 	}
473 	*data = d;
474 	return (0);
475 }
476 
477 #ifndef _SYS_SYSPROTO_H_
478 struct procctl_args {
479 	idtype_t idtype;
480 	id_t	id;
481 	int	com;
482 	void	*data;
483 };
484 #endif
485 /* ARGSUSED */
486 int
487 sys_procctl(struct thread *td, struct procctl_args *uap)
488 {
489 	void *data;
490 	union {
491 		struct procctl_reaper_status rs;
492 		struct procctl_reaper_pids rp;
493 		struct procctl_reaper_kill rk;
494 	} x;
495 	int error, error1, flags, signum;
496 
497 	if (uap->com >= PROC_PROCCTL_MD_MIN)
498 		return (cpu_procctl(td, uap->idtype, uap->id,
499 		    uap->com, uap->data));
500 
501 	switch (uap->com) {
502 	case PROC_ASLR_CTL:
503 	case PROC_SPROTECT:
504 	case PROC_TRACE_CTL:
505 	case PROC_TRAPCAP_CTL:
506 		error = copyin(uap->data, &flags, sizeof(flags));
507 		if (error != 0)
508 			return (error);
509 		data = &flags;
510 		break;
511 	case PROC_REAP_ACQUIRE:
512 	case PROC_REAP_RELEASE:
513 		if (uap->data != NULL)
514 			return (EINVAL);
515 		data = NULL;
516 		break;
517 	case PROC_REAP_STATUS:
518 		data = &x.rs;
519 		break;
520 	case PROC_REAP_GETPIDS:
521 		error = copyin(uap->data, &x.rp, sizeof(x.rp));
522 		if (error != 0)
523 			return (error);
524 		data = &x.rp;
525 		break;
526 	case PROC_REAP_KILL:
527 		error = copyin(uap->data, &x.rk, sizeof(x.rk));
528 		if (error != 0)
529 			return (error);
530 		data = &x.rk;
531 		break;
532 	case PROC_ASLR_STATUS:
533 	case PROC_TRACE_STATUS:
534 	case PROC_TRAPCAP_STATUS:
535 		data = &flags;
536 		break;
537 	case PROC_PDEATHSIG_CTL:
538 		error = copyin(uap->data, &signum, sizeof(signum));
539 		if (error != 0)
540 			return (error);
541 		data = &signum;
542 		break;
543 	case PROC_PDEATHSIG_STATUS:
544 		data = &signum;
545 		break;
546 	default:
547 		return (EINVAL);
548 	}
549 	error = kern_procctl(td, uap->idtype, uap->id, uap->com, data);
550 	switch (uap->com) {
551 	case PROC_REAP_STATUS:
552 		if (error == 0)
553 			error = copyout(&x.rs, uap->data, sizeof(x.rs));
554 		break;
555 	case PROC_REAP_KILL:
556 		error1 = copyout(&x.rk, uap->data, sizeof(x.rk));
557 		if (error == 0)
558 			error = error1;
559 		break;
560 	case PROC_ASLR_STATUS:
561 	case PROC_TRACE_STATUS:
562 	case PROC_TRAPCAP_STATUS:
563 		if (error == 0)
564 			error = copyout(&flags, uap->data, sizeof(flags));
565 		break;
566 	case PROC_PDEATHSIG_STATUS:
567 		if (error == 0)
568 			error = copyout(&signum, uap->data, sizeof(signum));
569 		break;
570 	}
571 	return (error);
572 }
573 
574 static int
575 kern_procctl_single(struct thread *td, struct proc *p, int com, void *data)
576 {
577 
578 	PROC_LOCK_ASSERT(p, MA_OWNED);
579 	switch (com) {
580 	case PROC_ASLR_CTL:
581 		return (aslr_ctl(td, p, *(int *)data));
582 	case PROC_ASLR_STATUS:
583 		return (aslr_status(td, p, data));
584 	case PROC_SPROTECT:
585 		return (protect_set(td, p, *(int *)data));
586 	case PROC_REAP_ACQUIRE:
587 		return (reap_acquire(td, p));
588 	case PROC_REAP_RELEASE:
589 		return (reap_release(td, p));
590 	case PROC_REAP_STATUS:
591 		return (reap_status(td, p, data));
592 	case PROC_REAP_GETPIDS:
593 		return (reap_getpids(td, p, data));
594 	case PROC_REAP_KILL:
595 		return (reap_kill(td, p, data));
596 	case PROC_TRACE_CTL:
597 		return (trace_ctl(td, p, *(int *)data));
598 	case PROC_TRACE_STATUS:
599 		return (trace_status(td, p, data));
600 	case PROC_TRAPCAP_CTL:
601 		return (trapcap_ctl(td, p, *(int *)data));
602 	case PROC_TRAPCAP_STATUS:
603 		return (trapcap_status(td, p, data));
604 	default:
605 		return (EINVAL);
606 	}
607 }
608 
609 int
610 kern_procctl(struct thread *td, idtype_t idtype, id_t id, int com, void *data)
611 {
612 	struct pgrp *pg;
613 	struct proc *p;
614 	int error, first_error, ok;
615 	int signum;
616 	bool tree_locked;
617 
618 	switch (com) {
619 	case PROC_ASLR_CTL:
620 	case PROC_ASLR_STATUS:
621 	case PROC_REAP_ACQUIRE:
622 	case PROC_REAP_RELEASE:
623 	case PROC_REAP_STATUS:
624 	case PROC_REAP_GETPIDS:
625 	case PROC_REAP_KILL:
626 	case PROC_TRACE_STATUS:
627 	case PROC_TRAPCAP_STATUS:
628 	case PROC_PDEATHSIG_CTL:
629 	case PROC_PDEATHSIG_STATUS:
630 		if (idtype != P_PID)
631 			return (EINVAL);
632 	}
633 
634 	switch (com) {
635 	case PROC_PDEATHSIG_CTL:
636 		signum = *(int *)data;
637 		p = td->td_proc;
638 		if ((id != 0 && id != p->p_pid) ||
639 		    (signum != 0 && !_SIG_VALID(signum)))
640 			return (EINVAL);
641 		PROC_LOCK(p);
642 		p->p_pdeathsig = signum;
643 		PROC_UNLOCK(p);
644 		return (0);
645 	case PROC_PDEATHSIG_STATUS:
646 		p = td->td_proc;
647 		if (id != 0 && id != p->p_pid)
648 			return (EINVAL);
649 		PROC_LOCK(p);
650 		*(int *)data = p->p_pdeathsig;
651 		PROC_UNLOCK(p);
652 		return (0);
653 	}
654 
655 	switch (com) {
656 	case PROC_SPROTECT:
657 	case PROC_REAP_STATUS:
658 	case PROC_REAP_GETPIDS:
659 	case PROC_REAP_KILL:
660 	case PROC_TRACE_CTL:
661 	case PROC_TRAPCAP_CTL:
662 		sx_slock(&proctree_lock);
663 		tree_locked = true;
664 		break;
665 	case PROC_REAP_ACQUIRE:
666 	case PROC_REAP_RELEASE:
667 		sx_xlock(&proctree_lock);
668 		tree_locked = true;
669 		break;
670 	case PROC_ASLR_CTL:
671 	case PROC_ASLR_STATUS:
672 	case PROC_TRACE_STATUS:
673 	case PROC_TRAPCAP_STATUS:
674 		tree_locked = false;
675 		break;
676 	default:
677 		return (EINVAL);
678 	}
679 
680 	switch (idtype) {
681 	case P_PID:
682 		p = pfind(id);
683 		if (p == NULL) {
684 			error = ESRCH;
685 			break;
686 		}
687 		error = p_cansee(td, p);
688 		if (error == 0)
689 			error = kern_procctl_single(td, p, com, data);
690 		PROC_UNLOCK(p);
691 		break;
692 	case P_PGID:
693 		/*
694 		 * Attempt to apply the operation to all members of the
695 		 * group.  Ignore processes in the group that can't be
696 		 * seen.  Ignore errors so long as at least one process is
697 		 * able to complete the request successfully.
698 		 */
699 		pg = pgfind(id);
700 		if (pg == NULL) {
701 			error = ESRCH;
702 			break;
703 		}
704 		PGRP_UNLOCK(pg);
705 		ok = 0;
706 		first_error = 0;
707 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
708 			PROC_LOCK(p);
709 			if (p->p_state == PRS_NEW || p_cansee(td, p) != 0) {
710 				PROC_UNLOCK(p);
711 				continue;
712 			}
713 			error = kern_procctl_single(td, p, com, data);
714 			PROC_UNLOCK(p);
715 			if (error == 0)
716 				ok = 1;
717 			else if (first_error == 0)
718 				first_error = error;
719 		}
720 		if (ok)
721 			error = 0;
722 		else if (first_error != 0)
723 			error = first_error;
724 		else
725 			/*
726 			 * Was not able to see any processes in the
727 			 * process group.
728 			 */
729 			error = ESRCH;
730 		break;
731 	default:
732 		error = EINVAL;
733 		break;
734 	}
735 	if (tree_locked)
736 		sx_unlock(&proctree_lock);
737 	return (error);
738 }
739