xref: /freebsd/sys/kern/kern_procctl.c (revision cbd30a72ca196976c1c700400ecd424baa1b9c16)
1 /*-
2  * Copyright (c) 2014 John Baldwin
3  * Copyright (c) 2014, 2016 The FreeBSD Foundation
4  *
5  * Portions of this software were developed by Konstantin Belousov
6  * under sponsorship from the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/capsicum.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/priv.h>
39 #include <sys/proc.h>
40 #include <sys/procctl.h>
41 #include <sys/sx.h>
42 #include <sys/syscallsubr.h>
43 #include <sys/sysproto.h>
44 #include <sys/wait.h>
45 
46 static int
47 protect_setchild(struct thread *td, struct proc *p, int flags)
48 {
49 
50 	PROC_LOCK_ASSERT(p, MA_OWNED);
51 	if (p->p_flag & P_SYSTEM || p_cansched(td, p) != 0)
52 		return (0);
53 	if (flags & PPROT_SET) {
54 		p->p_flag |= P_PROTECTED;
55 		if (flags & PPROT_INHERIT)
56 			p->p_flag2 |= P2_INHERIT_PROTECTED;
57 	} else {
58 		p->p_flag &= ~P_PROTECTED;
59 		p->p_flag2 &= ~P2_INHERIT_PROTECTED;
60 	}
61 	return (1);
62 }
63 
64 static int
65 protect_setchildren(struct thread *td, struct proc *top, int flags)
66 {
67 	struct proc *p;
68 	int ret;
69 
70 	p = top;
71 	ret = 0;
72 	sx_assert(&proctree_lock, SX_LOCKED);
73 	for (;;) {
74 		ret |= protect_setchild(td, p, flags);
75 		PROC_UNLOCK(p);
76 		/*
77 		 * If this process has children, descend to them next,
78 		 * otherwise do any siblings, and if done with this level,
79 		 * follow back up the tree (but not past top).
80 		 */
81 		if (!LIST_EMPTY(&p->p_children))
82 			p = LIST_FIRST(&p->p_children);
83 		else for (;;) {
84 			if (p == top) {
85 				PROC_LOCK(p);
86 				return (ret);
87 			}
88 			if (LIST_NEXT(p, p_sibling)) {
89 				p = LIST_NEXT(p, p_sibling);
90 				break;
91 			}
92 			p = p->p_pptr;
93 		}
94 		PROC_LOCK(p);
95 	}
96 }
97 
98 static int
99 protect_set(struct thread *td, struct proc *p, int flags)
100 {
101 	int error, ret;
102 
103 	switch (PPROT_OP(flags)) {
104 	case PPROT_SET:
105 	case PPROT_CLEAR:
106 		break;
107 	default:
108 		return (EINVAL);
109 	}
110 
111 	if ((PPROT_FLAGS(flags) & ~(PPROT_DESCEND | PPROT_INHERIT)) != 0)
112 		return (EINVAL);
113 
114 	error = priv_check(td, PRIV_VM_MADV_PROTECT);
115 	if (error)
116 		return (error);
117 
118 	if (flags & PPROT_DESCEND)
119 		ret = protect_setchildren(td, p, flags);
120 	else
121 		ret = protect_setchild(td, p, flags);
122 	if (ret == 0)
123 		return (EPERM);
124 	return (0);
125 }
126 
127 static int
128 reap_acquire(struct thread *td, struct proc *p)
129 {
130 
131 	sx_assert(&proctree_lock, SX_XLOCKED);
132 	if (p != curproc)
133 		return (EPERM);
134 	if ((p->p_treeflag & P_TREE_REAPER) != 0)
135 		return (EBUSY);
136 	p->p_treeflag |= P_TREE_REAPER;
137 	/*
138 	 * We do not reattach existing children and the whole tree
139 	 * under them to us, since p->p_reaper already seen them.
140 	 */
141 	return (0);
142 }
143 
144 static int
145 reap_release(struct thread *td, struct proc *p)
146 {
147 
148 	sx_assert(&proctree_lock, SX_XLOCKED);
149 	if (p != curproc)
150 		return (EPERM);
151 	if (p == initproc)
152 		return (EINVAL);
153 	if ((p->p_treeflag & P_TREE_REAPER) == 0)
154 		return (EINVAL);
155 	reaper_abandon_children(p, false);
156 	return (0);
157 }
158 
159 static int
160 reap_status(struct thread *td, struct proc *p,
161     struct procctl_reaper_status *rs)
162 {
163 	struct proc *reap, *p2, *first_p;
164 
165 	sx_assert(&proctree_lock, SX_LOCKED);
166 	bzero(rs, sizeof(*rs));
167 	if ((p->p_treeflag & P_TREE_REAPER) == 0) {
168 		reap = p->p_reaper;
169 	} else {
170 		reap = p;
171 		rs->rs_flags |= REAPER_STATUS_OWNED;
172 	}
173 	if (reap == initproc)
174 		rs->rs_flags |= REAPER_STATUS_REALINIT;
175 	rs->rs_reaper = reap->p_pid;
176 	rs->rs_descendants = 0;
177 	rs->rs_children = 0;
178 	if (!LIST_EMPTY(&reap->p_reaplist)) {
179 		first_p = LIST_FIRST(&reap->p_children);
180 		if (first_p == NULL)
181 			first_p = LIST_FIRST(&reap->p_reaplist);
182 		rs->rs_pid = first_p->p_pid;
183 		LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) {
184 			if (proc_realparent(p2) == reap)
185 				rs->rs_children++;
186 			rs->rs_descendants++;
187 		}
188 	} else {
189 		rs->rs_pid = -1;
190 	}
191 	return (0);
192 }
193 
194 static int
195 reap_getpids(struct thread *td, struct proc *p, struct procctl_reaper_pids *rp)
196 {
197 	struct proc *reap, *p2;
198 	struct procctl_reaper_pidinfo *pi, *pip;
199 	u_int i, n;
200 	int error;
201 
202 	sx_assert(&proctree_lock, SX_LOCKED);
203 	PROC_UNLOCK(p);
204 	reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p;
205 	n = i = 0;
206 	error = 0;
207 	LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling)
208 		n++;
209 	sx_unlock(&proctree_lock);
210 	if (rp->rp_count < n)
211 		n = rp->rp_count;
212 	pi = malloc(n * sizeof(*pi), M_TEMP, M_WAITOK);
213 	sx_slock(&proctree_lock);
214 	LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) {
215 		if (i == n)
216 			break;
217 		pip = &pi[i];
218 		bzero(pip, sizeof(*pip));
219 		pip->pi_pid = p2->p_pid;
220 		pip->pi_subtree = p2->p_reapsubtree;
221 		pip->pi_flags = REAPER_PIDINFO_VALID;
222 		if (proc_realparent(p2) == reap)
223 			pip->pi_flags |= REAPER_PIDINFO_CHILD;
224 		i++;
225 	}
226 	sx_sunlock(&proctree_lock);
227 	error = copyout(pi, rp->rp_pids, i * sizeof(*pi));
228 	free(pi, M_TEMP);
229 	sx_slock(&proctree_lock);
230 	PROC_LOCK(p);
231 	return (error);
232 }
233 
234 static int
235 reap_kill(struct thread *td, struct proc *p, struct procctl_reaper_kill *rk)
236 {
237 	struct proc *reap, *p2;
238 	ksiginfo_t ksi;
239 	int error, error1;
240 
241 	sx_assert(&proctree_lock, SX_LOCKED);
242 	if (IN_CAPABILITY_MODE(td))
243 		return (ECAPMODE);
244 	if (rk->rk_sig <= 0 || rk->rk_sig > _SIG_MAXSIG)
245 		return (EINVAL);
246 	if ((rk->rk_flags & ~(REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE)) != 0)
247 		return (EINVAL);
248 	PROC_UNLOCK(p);
249 	reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p;
250 	ksiginfo_init(&ksi);
251 	ksi.ksi_signo = rk->rk_sig;
252 	ksi.ksi_code = SI_USER;
253 	ksi.ksi_pid = td->td_proc->p_pid;
254 	ksi.ksi_uid = td->td_ucred->cr_ruid;
255 	error = ESRCH;
256 	rk->rk_killed = 0;
257 	rk->rk_fpid = -1;
258 	for (p2 = (rk->rk_flags & REAPER_KILL_CHILDREN) != 0 ?
259 	    LIST_FIRST(&reap->p_children) : LIST_FIRST(&reap->p_reaplist);
260 	    p2 != NULL;
261 	    p2 = (rk->rk_flags & REAPER_KILL_CHILDREN) != 0 ?
262 	    LIST_NEXT(p2, p_sibling) : LIST_NEXT(p2, p_reapsibling)) {
263 		if ((rk->rk_flags & REAPER_KILL_SUBTREE) != 0 &&
264 		    p2->p_reapsubtree != rk->rk_subtree)
265 			continue;
266 		PROC_LOCK(p2);
267 		error1 = p_cansignal(td, p2, rk->rk_sig);
268 		if (error1 == 0) {
269 			pksignal(p2, rk->rk_sig, &ksi);
270 			rk->rk_killed++;
271 			error = error1;
272 		} else if (error == ESRCH) {
273 			error = error1;
274 			rk->rk_fpid = p2->p_pid;
275 		}
276 		PROC_UNLOCK(p2);
277 		/* Do not end the loop on error, signal everything we can. */
278 	}
279 	PROC_LOCK(p);
280 	return (error);
281 }
282 
283 static int
284 trace_ctl(struct thread *td, struct proc *p, int state)
285 {
286 
287 	PROC_LOCK_ASSERT(p, MA_OWNED);
288 
289 	/*
290 	 * Ktrace changes p_traceflag from or to zero under the
291 	 * process lock, so the test does not need to acquire ktrace
292 	 * mutex.
293 	 */
294 	if ((p->p_flag & P_TRACED) != 0 || p->p_traceflag != 0)
295 		return (EBUSY);
296 
297 	switch (state) {
298 	case PROC_TRACE_CTL_ENABLE:
299 		if (td->td_proc != p)
300 			return (EPERM);
301 		p->p_flag2 &= ~(P2_NOTRACE | P2_NOTRACE_EXEC);
302 		break;
303 	case PROC_TRACE_CTL_DISABLE_EXEC:
304 		p->p_flag2 |= P2_NOTRACE_EXEC | P2_NOTRACE;
305 		break;
306 	case PROC_TRACE_CTL_DISABLE:
307 		if ((p->p_flag2 & P2_NOTRACE_EXEC) != 0) {
308 			KASSERT((p->p_flag2 & P2_NOTRACE) != 0,
309 			    ("dandling P2_NOTRACE_EXEC"));
310 			if (td->td_proc != p)
311 				return (EPERM);
312 			p->p_flag2 &= ~P2_NOTRACE_EXEC;
313 		} else {
314 			p->p_flag2 |= P2_NOTRACE;
315 		}
316 		break;
317 	default:
318 		return (EINVAL);
319 	}
320 	return (0);
321 }
322 
323 static int
324 trace_status(struct thread *td, struct proc *p, int *data)
325 {
326 
327 	if ((p->p_flag2 & P2_NOTRACE) != 0) {
328 		KASSERT((p->p_flag & P_TRACED) == 0,
329 		    ("%d traced but tracing disabled", p->p_pid));
330 		*data = -1;
331 	} else if ((p->p_flag & P_TRACED) != 0) {
332 		*data = p->p_pptr->p_pid;
333 	} else {
334 		*data = 0;
335 	}
336 	return (0);
337 }
338 
339 static int
340 trapcap_ctl(struct thread *td, struct proc *p, int state)
341 {
342 
343 	PROC_LOCK_ASSERT(p, MA_OWNED);
344 
345 	switch (state) {
346 	case PROC_TRAPCAP_CTL_ENABLE:
347 		p->p_flag2 |= P2_TRAPCAP;
348 		break;
349 	case PROC_TRAPCAP_CTL_DISABLE:
350 		p->p_flag2 &= ~P2_TRAPCAP;
351 		break;
352 	default:
353 		return (EINVAL);
354 	}
355 	return (0);
356 }
357 
358 static int
359 trapcap_status(struct thread *td, struct proc *p, int *data)
360 {
361 
362 	*data = (p->p_flag2 & P2_TRAPCAP) != 0 ? PROC_TRAPCAP_CTL_ENABLE :
363 	    PROC_TRAPCAP_CTL_DISABLE;
364 	return (0);
365 }
366 
367 #ifndef _SYS_SYSPROTO_H_
368 struct procctl_args {
369 	idtype_t idtype;
370 	id_t	id;
371 	int	com;
372 	void	*data;
373 };
374 #endif
375 /* ARGSUSED */
376 int
377 sys_procctl(struct thread *td, struct procctl_args *uap)
378 {
379 	void *data;
380 	union {
381 		struct procctl_reaper_status rs;
382 		struct procctl_reaper_pids rp;
383 		struct procctl_reaper_kill rk;
384 	} x;
385 	int error, error1, flags;
386 
387 	switch (uap->com) {
388 	case PROC_SPROTECT:
389 	case PROC_TRACE_CTL:
390 	case PROC_TRAPCAP_CTL:
391 		error = copyin(uap->data, &flags, sizeof(flags));
392 		if (error != 0)
393 			return (error);
394 		data = &flags;
395 		break;
396 	case PROC_REAP_ACQUIRE:
397 	case PROC_REAP_RELEASE:
398 		if (uap->data != NULL)
399 			return (EINVAL);
400 		data = NULL;
401 		break;
402 	case PROC_REAP_STATUS:
403 		data = &x.rs;
404 		break;
405 	case PROC_REAP_GETPIDS:
406 		error = copyin(uap->data, &x.rp, sizeof(x.rp));
407 		if (error != 0)
408 			return (error);
409 		data = &x.rp;
410 		break;
411 	case PROC_REAP_KILL:
412 		error = copyin(uap->data, &x.rk, sizeof(x.rk));
413 		if (error != 0)
414 			return (error);
415 		data = &x.rk;
416 		break;
417 	case PROC_TRACE_STATUS:
418 	case PROC_TRAPCAP_STATUS:
419 		data = &flags;
420 		break;
421 	default:
422 		return (EINVAL);
423 	}
424 	error = kern_procctl(td, uap->idtype, uap->id, uap->com, data);
425 	switch (uap->com) {
426 	case PROC_REAP_STATUS:
427 		if (error == 0)
428 			error = copyout(&x.rs, uap->data, sizeof(x.rs));
429 		break;
430 	case PROC_REAP_KILL:
431 		error1 = copyout(&x.rk, uap->data, sizeof(x.rk));
432 		if (error == 0)
433 			error = error1;
434 		break;
435 	case PROC_TRACE_STATUS:
436 	case PROC_TRAPCAP_STATUS:
437 		if (error == 0)
438 			error = copyout(&flags, uap->data, sizeof(flags));
439 		break;
440 	}
441 	return (error);
442 }
443 
444 static int
445 kern_procctl_single(struct thread *td, struct proc *p, int com, void *data)
446 {
447 
448 	PROC_LOCK_ASSERT(p, MA_OWNED);
449 	switch (com) {
450 	case PROC_SPROTECT:
451 		return (protect_set(td, p, *(int *)data));
452 	case PROC_REAP_ACQUIRE:
453 		return (reap_acquire(td, p));
454 	case PROC_REAP_RELEASE:
455 		return (reap_release(td, p));
456 	case PROC_REAP_STATUS:
457 		return (reap_status(td, p, data));
458 	case PROC_REAP_GETPIDS:
459 		return (reap_getpids(td, p, data));
460 	case PROC_REAP_KILL:
461 		return (reap_kill(td, p, data));
462 	case PROC_TRACE_CTL:
463 		return (trace_ctl(td, p, *(int *)data));
464 	case PROC_TRACE_STATUS:
465 		return (trace_status(td, p, data));
466 	case PROC_TRAPCAP_CTL:
467 		return (trapcap_ctl(td, p, *(int *)data));
468 	case PROC_TRAPCAP_STATUS:
469 		return (trapcap_status(td, p, data));
470 	default:
471 		return (EINVAL);
472 	}
473 }
474 
475 int
476 kern_procctl(struct thread *td, idtype_t idtype, id_t id, int com, void *data)
477 {
478 	struct pgrp *pg;
479 	struct proc *p;
480 	int error, first_error, ok;
481 	bool tree_locked;
482 
483 	switch (com) {
484 	case PROC_REAP_ACQUIRE:
485 	case PROC_REAP_RELEASE:
486 	case PROC_REAP_STATUS:
487 	case PROC_REAP_GETPIDS:
488 	case PROC_REAP_KILL:
489 	case PROC_TRACE_STATUS:
490 	case PROC_TRAPCAP_STATUS:
491 		if (idtype != P_PID)
492 			return (EINVAL);
493 	}
494 
495 	switch (com) {
496 	case PROC_SPROTECT:
497 	case PROC_REAP_STATUS:
498 	case PROC_REAP_GETPIDS:
499 	case PROC_REAP_KILL:
500 	case PROC_TRACE_CTL:
501 	case PROC_TRAPCAP_CTL:
502 		sx_slock(&proctree_lock);
503 		tree_locked = true;
504 		break;
505 	case PROC_REAP_ACQUIRE:
506 	case PROC_REAP_RELEASE:
507 		sx_xlock(&proctree_lock);
508 		tree_locked = true;
509 		break;
510 	case PROC_TRACE_STATUS:
511 	case PROC_TRAPCAP_STATUS:
512 		tree_locked = false;
513 		break;
514 	default:
515 		return (EINVAL);
516 	}
517 
518 	switch (idtype) {
519 	case P_PID:
520 		p = pfind(id);
521 		if (p == NULL) {
522 			error = ESRCH;
523 			break;
524 		}
525 		error = p_cansee(td, p);
526 		if (error == 0)
527 			error = kern_procctl_single(td, p, com, data);
528 		PROC_UNLOCK(p);
529 		break;
530 	case P_PGID:
531 		/*
532 		 * Attempt to apply the operation to all members of the
533 		 * group.  Ignore processes in the group that can't be
534 		 * seen.  Ignore errors so long as at least one process is
535 		 * able to complete the request successfully.
536 		 */
537 		pg = pgfind(id);
538 		if (pg == NULL) {
539 			error = ESRCH;
540 			break;
541 		}
542 		PGRP_UNLOCK(pg);
543 		ok = 0;
544 		first_error = 0;
545 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
546 			PROC_LOCK(p);
547 			if (p->p_state == PRS_NEW || p_cansee(td, p) != 0) {
548 				PROC_UNLOCK(p);
549 				continue;
550 			}
551 			error = kern_procctl_single(td, p, com, data);
552 			PROC_UNLOCK(p);
553 			if (error == 0)
554 				ok = 1;
555 			else if (first_error == 0)
556 				first_error = error;
557 		}
558 		if (ok)
559 			error = 0;
560 		else if (first_error != 0)
561 			error = first_error;
562 		else
563 			/*
564 			 * Was not able to see any processes in the
565 			 * process group.
566 			 */
567 			error = ESRCH;
568 		break;
569 	default:
570 		error = EINVAL;
571 		break;
572 	}
573 	if (tree_locked)
574 		sx_unlock(&proctree_lock);
575 	return (error);
576 }
577