1 /*-
2 * Copyright (c) 2014 John Baldwin
3 * Copyright (c) 2014, 2016 The FreeBSD Foundation
4 *
5 * Portions of this software were developed by Konstantin Belousov
6 * under sponsorship from the FreeBSD Foundation.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include "opt_ktrace.h"
31
32 #include <sys/param.h>
33 #include <sys/_unrhdr.h>
34 #include <sys/systm.h>
35 #include <sys/capsicum.h>
36 #include <sys/lock.h>
37 #include <sys/malloc.h>
38 #include <sys/mman.h>
39 #include <sys/mutex.h>
40 #include <sys/priv.h>
41 #include <sys/proc.h>
42 #include <sys/procctl.h>
43 #include <sys/sx.h>
44 #include <sys/syscallsubr.h>
45 #include <sys/sysproto.h>
46 #include <sys/taskqueue.h>
47 #include <sys/wait.h>
48
49 #include <vm/vm.h>
50 #include <vm/pmap.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_extern.h>
53
54 static int
protect_setchild(struct thread * td,struct proc * p,int flags)55 protect_setchild(struct thread *td, struct proc *p, int flags)
56 {
57
58 PROC_LOCK_ASSERT(p, MA_OWNED);
59 if (p->p_flag & P_SYSTEM || p_cansched(td, p) != 0)
60 return (0);
61 if (flags & PPROT_SET) {
62 p->p_flag |= P_PROTECTED;
63 if (flags & PPROT_INHERIT)
64 p->p_flag2 |= P2_INHERIT_PROTECTED;
65 } else {
66 p->p_flag &= ~P_PROTECTED;
67 p->p_flag2 &= ~P2_INHERIT_PROTECTED;
68 }
69 return (1);
70 }
71
72 static int
protect_setchildren(struct thread * td,struct proc * top,int flags)73 protect_setchildren(struct thread *td, struct proc *top, int flags)
74 {
75 struct proc *p;
76 int ret;
77
78 p = top;
79 ret = 0;
80 sx_assert(&proctree_lock, SX_LOCKED);
81 for (;;) {
82 ret |= protect_setchild(td, p, flags);
83 PROC_UNLOCK(p);
84 /*
85 * If this process has children, descend to them next,
86 * otherwise do any siblings, and if done with this level,
87 * follow back up the tree (but not past top).
88 */
89 if (!LIST_EMPTY(&p->p_children))
90 p = LIST_FIRST(&p->p_children);
91 else for (;;) {
92 if (p == top) {
93 PROC_LOCK(p);
94 return (ret);
95 }
96 if (LIST_NEXT(p, p_sibling)) {
97 p = LIST_NEXT(p, p_sibling);
98 break;
99 }
100 p = p->p_pptr;
101 }
102 PROC_LOCK(p);
103 }
104 }
105
106 static int
protect_set(struct thread * td,struct proc * p,void * data)107 protect_set(struct thread *td, struct proc *p, void *data)
108 {
109 int error, flags, ret;
110
111 flags = *(int *)data;
112 switch (PPROT_OP(flags)) {
113 case PPROT_SET:
114 case PPROT_CLEAR:
115 break;
116 default:
117 return (EINVAL);
118 }
119
120 if ((PPROT_FLAGS(flags) & ~(PPROT_DESCEND | PPROT_INHERIT)) != 0)
121 return (EINVAL);
122
123 error = priv_check(td, PRIV_VM_MADV_PROTECT);
124 if (error)
125 return (error);
126
127 if (flags & PPROT_DESCEND)
128 ret = protect_setchildren(td, p, flags);
129 else
130 ret = protect_setchild(td, p, flags);
131 if (ret == 0)
132 return (EPERM);
133 return (0);
134 }
135
136 static int
reap_acquire(struct thread * td,struct proc * p,void * data __unused)137 reap_acquire(struct thread *td, struct proc *p, void *data __unused)
138 {
139
140 sx_assert(&proctree_lock, SX_XLOCKED);
141 if (p != td->td_proc)
142 return (EPERM);
143 if ((p->p_treeflag & P_TREE_REAPER) != 0)
144 return (EBUSY);
145 p->p_treeflag |= P_TREE_REAPER;
146 /*
147 * We do not reattach existing children and the whole tree
148 * under them to us, since p->p_reaper already seen them.
149 */
150 return (0);
151 }
152
153 static int
reap_release(struct thread * td,struct proc * p,void * data __unused)154 reap_release(struct thread *td, struct proc *p, void *data __unused)
155 {
156
157 sx_assert(&proctree_lock, SX_XLOCKED);
158 if (p != td->td_proc)
159 return (EPERM);
160 if (p == initproc)
161 return (EINVAL);
162 if ((p->p_treeflag & P_TREE_REAPER) == 0)
163 return (EINVAL);
164 reaper_abandon_children(p, false);
165 return (0);
166 }
167
168 static int
reap_status(struct thread * td,struct proc * p,void * data)169 reap_status(struct thread *td, struct proc *p, void *data)
170 {
171 struct proc *reap, *p2, *first_p;
172 struct procctl_reaper_status *rs;
173
174 rs = data;
175 sx_assert(&proctree_lock, SX_LOCKED);
176 if ((p->p_treeflag & P_TREE_REAPER) == 0) {
177 reap = p->p_reaper;
178 } else {
179 reap = p;
180 rs->rs_flags |= REAPER_STATUS_OWNED;
181 }
182 if (reap == initproc)
183 rs->rs_flags |= REAPER_STATUS_REALINIT;
184 rs->rs_reaper = reap->p_pid;
185 rs->rs_descendants = 0;
186 rs->rs_children = 0;
187 if (!LIST_EMPTY(&reap->p_reaplist)) {
188 first_p = LIST_FIRST(&reap->p_children);
189 if (first_p == NULL)
190 first_p = LIST_FIRST(&reap->p_reaplist);
191 rs->rs_pid = first_p->p_pid;
192 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) {
193 if (proc_realparent(p2) == reap)
194 rs->rs_children++;
195 rs->rs_descendants++;
196 }
197 } else {
198 rs->rs_pid = -1;
199 }
200 return (0);
201 }
202
203 static int
reap_getpids(struct thread * td,struct proc * p,void * data)204 reap_getpids(struct thread *td, struct proc *p, void *data)
205 {
206 struct proc *reap, *p2;
207 struct procctl_reaper_pidinfo *pi, *pip;
208 struct procctl_reaper_pids *rp;
209 u_int i, n;
210 int error;
211
212 rp = data;
213 sx_assert(&proctree_lock, SX_LOCKED);
214 PROC_UNLOCK(p);
215 reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p;
216 n = i = 0;
217 error = 0;
218 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling)
219 n++;
220 sx_unlock(&proctree_lock);
221 if (rp->rp_count < n)
222 n = rp->rp_count;
223 pi = malloc(n * sizeof(*pi), M_TEMP, M_WAITOK);
224 sx_slock(&proctree_lock);
225 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) {
226 if (i == n)
227 break;
228 pip = &pi[i];
229 bzero(pip, sizeof(*pip));
230 pip->pi_pid = p2->p_pid;
231 pip->pi_subtree = p2->p_reapsubtree;
232 pip->pi_flags = REAPER_PIDINFO_VALID;
233 if (proc_realparent(p2) == reap)
234 pip->pi_flags |= REAPER_PIDINFO_CHILD;
235 if ((p2->p_treeflag & P_TREE_REAPER) != 0)
236 pip->pi_flags |= REAPER_PIDINFO_REAPER;
237 if ((p2->p_flag & P_STOPPED) != 0)
238 pip->pi_flags |= REAPER_PIDINFO_STOPPED;
239 if (p2->p_state == PRS_ZOMBIE)
240 pip->pi_flags |= REAPER_PIDINFO_ZOMBIE;
241 else if ((p2->p_flag & P_WEXIT) != 0)
242 pip->pi_flags |= REAPER_PIDINFO_EXITING;
243 i++;
244 }
245 sx_sunlock(&proctree_lock);
246 error = copyout(pi, rp->rp_pids, i * sizeof(*pi));
247 free(pi, M_TEMP);
248 sx_slock(&proctree_lock);
249 PROC_LOCK(p);
250 return (error);
251 }
252
253 struct reap_kill_proc_work {
254 struct ucred *cr;
255 struct proc *target;
256 ksiginfo_t *ksi;
257 struct procctl_reaper_kill *rk;
258 int *error;
259 struct task t;
260 };
261
262 static void
reap_kill_proc_locked(struct reap_kill_proc_work * w)263 reap_kill_proc_locked(struct reap_kill_proc_work *w)
264 {
265 int error1;
266 bool need_stop;
267
268 PROC_LOCK_ASSERT(w->target, MA_OWNED);
269 PROC_ASSERT_HELD(w->target);
270
271 error1 = cr_cansignal(w->cr, w->target, w->rk->rk_sig);
272 if (error1 != 0) {
273 if (*w->error == ESRCH) {
274 w->rk->rk_fpid = w->target->p_pid;
275 *w->error = error1;
276 }
277 return;
278 }
279
280 /*
281 * The need_stop indicates if the target process needs to be
282 * suspended before being signalled. This is needed when we
283 * guarantee that all processes in subtree are signalled,
284 * avoiding the race with some process not yet fully linked
285 * into all structures during fork, ignored by iterator, and
286 * then escaping signalling.
287 *
288 * The thread cannot usefully stop itself anyway, and if other
289 * thread of the current process forks while the current
290 * thread signals the whole subtree, it is an application
291 * race.
292 */
293 if ((w->target->p_flag & (P_KPROC | P_SYSTEM | P_STOPPED)) == 0)
294 need_stop = thread_single(w->target, SINGLE_ALLPROC) == 0;
295 else
296 need_stop = false;
297
298 (void)pksignal(w->target, w->rk->rk_sig, w->ksi);
299 w->rk->rk_killed++;
300 *w->error = error1;
301
302 if (need_stop)
303 thread_single_end(w->target, SINGLE_ALLPROC);
304 }
305
306 static void
reap_kill_proc_work(void * arg,int pending __unused)307 reap_kill_proc_work(void *arg, int pending __unused)
308 {
309 struct reap_kill_proc_work *w;
310
311 w = arg;
312 PROC_LOCK(w->target);
313 if ((w->target->p_flag2 & P2_WEXIT) == 0)
314 reap_kill_proc_locked(w);
315 PROC_UNLOCK(w->target);
316
317 sx_xlock(&proctree_lock);
318 w->target = NULL;
319 wakeup(&w->target);
320 sx_xunlock(&proctree_lock);
321 }
322
323 struct reap_kill_tracker {
324 struct proc *parent;
325 TAILQ_ENTRY(reap_kill_tracker) link;
326 };
327
328 TAILQ_HEAD(reap_kill_tracker_head, reap_kill_tracker);
329
330 static void
reap_kill_sched(struct reap_kill_tracker_head * tracker,struct proc * p2)331 reap_kill_sched(struct reap_kill_tracker_head *tracker, struct proc *p2)
332 {
333 struct reap_kill_tracker *t;
334
335 PROC_LOCK(p2);
336 if ((p2->p_flag2 & P2_WEXIT) != 0) {
337 PROC_UNLOCK(p2);
338 return;
339 }
340 _PHOLD(p2);
341 PROC_UNLOCK(p2);
342 t = malloc(sizeof(struct reap_kill_tracker), M_TEMP, M_WAITOK);
343 t->parent = p2;
344 TAILQ_INSERT_TAIL(tracker, t, link);
345 }
346
347 static void
reap_kill_sched_free(struct reap_kill_tracker * t)348 reap_kill_sched_free(struct reap_kill_tracker *t)
349 {
350 PRELE(t->parent);
351 free(t, M_TEMP);
352 }
353
354 static void
reap_kill_children(struct thread * td,struct proc * reaper,struct procctl_reaper_kill * rk,ksiginfo_t * ksi,int * error)355 reap_kill_children(struct thread *td, struct proc *reaper,
356 struct procctl_reaper_kill *rk, ksiginfo_t *ksi, int *error)
357 {
358 struct proc *p2;
359 int error1;
360
361 LIST_FOREACH(p2, &reaper->p_children, p_sibling) {
362 PROC_LOCK(p2);
363 if ((p2->p_flag2 & P2_WEXIT) == 0) {
364 error1 = p_cansignal(td, p2, rk->rk_sig);
365 if (error1 != 0) {
366 if (*error == ESRCH) {
367 rk->rk_fpid = p2->p_pid;
368 *error = error1;
369 }
370
371 /*
372 * Do not end the loop on error,
373 * signal everything we can.
374 */
375 } else {
376 (void)pksignal(p2, rk->rk_sig, ksi);
377 rk->rk_killed++;
378 }
379 }
380 PROC_UNLOCK(p2);
381 }
382 }
383
384 static bool
reap_kill_subtree_once(struct thread * td,struct proc * p,struct proc * reaper,struct unrhdr * pids,struct reap_kill_proc_work * w)385 reap_kill_subtree_once(struct thread *td, struct proc *p, struct proc *reaper,
386 struct unrhdr *pids, struct reap_kill_proc_work *w)
387 {
388 struct reap_kill_tracker_head tracker;
389 struct reap_kill_tracker *t;
390 struct proc *p2;
391 int r, xlocked;
392 bool res, st;
393
394 res = false;
395 TAILQ_INIT(&tracker);
396 reap_kill_sched(&tracker, reaper);
397 while ((t = TAILQ_FIRST(&tracker)) != NULL) {
398 TAILQ_REMOVE(&tracker, t, link);
399
400 /*
401 * Since reap_kill_proc() drops proctree_lock sx, it
402 * is possible that the tracked reaper is no longer.
403 * In this case the subtree is reparented to the new
404 * reaper, which should handle it.
405 */
406 if ((t->parent->p_treeflag & P_TREE_REAPER) == 0) {
407 reap_kill_sched_free(t);
408 res = true;
409 continue;
410 }
411
412 LIST_FOREACH(p2, &t->parent->p_reaplist, p_reapsibling) {
413 if (t->parent == reaper &&
414 (w->rk->rk_flags & REAPER_KILL_SUBTREE) != 0 &&
415 p2->p_reapsubtree != w->rk->rk_subtree)
416 continue;
417 if ((p2->p_treeflag & P_TREE_REAPER) != 0)
418 reap_kill_sched(&tracker, p2);
419
420 /*
421 * Handle possible pid reuse. If we recorded
422 * p2 as killed but its p_flag2 does not
423 * confirm it, that means that the process
424 * terminated and its id was reused by other
425 * process in the reaper subtree.
426 *
427 * Unlocked read of p2->p_flag2 is fine, it is
428 * our thread that set the tested flag.
429 */
430 if (alloc_unr_specific(pids, p2->p_pid) != p2->p_pid &&
431 (atomic_load_int(&p2->p_flag2) &
432 (P2_REAPKILLED | P2_WEXIT)) != 0)
433 continue;
434
435 if (p2 == td->td_proc) {
436 if ((p2->p_flag & P_HADTHREADS) != 0 &&
437 (p2->p_flag2 & P2_WEXIT) == 0) {
438 xlocked = sx_xlocked(&proctree_lock);
439 sx_unlock(&proctree_lock);
440 st = true;
441 } else {
442 st = false;
443 }
444 PROC_LOCK(p2);
445 /*
446 * sapblk ensures that only one thread
447 * in the system sets this flag.
448 */
449 p2->p_flag2 |= P2_REAPKILLED;
450 if (st)
451 r = thread_single(p2, SINGLE_NO_EXIT);
452 (void)pksignal(p2, w->rk->rk_sig, w->ksi);
453 w->rk->rk_killed++;
454 if (st && r == 0)
455 thread_single_end(p2, SINGLE_NO_EXIT);
456 PROC_UNLOCK(p2);
457 if (st) {
458 if (xlocked)
459 sx_xlock(&proctree_lock);
460 else
461 sx_slock(&proctree_lock);
462 }
463 } else {
464 PROC_LOCK(p2);
465 if ((p2->p_flag2 & P2_WEXIT) == 0) {
466 _PHOLD(p2);
467 p2->p_flag2 |= P2_REAPKILLED;
468 PROC_UNLOCK(p2);
469 w->target = p2;
470 taskqueue_enqueue(taskqueue_thread,
471 &w->t);
472 while (w->target != NULL) {
473 sx_sleep(&w->target,
474 &proctree_lock, PWAIT,
475 "reapst", 0);
476 }
477 PROC_LOCK(p2);
478 _PRELE(p2);
479 }
480 PROC_UNLOCK(p2);
481 }
482 res = true;
483 }
484 reap_kill_sched_free(t);
485 }
486 return (res);
487 }
488
489 static void
reap_kill_subtree(struct thread * td,struct proc * p,struct proc * reaper,struct reap_kill_proc_work * w)490 reap_kill_subtree(struct thread *td, struct proc *p, struct proc *reaper,
491 struct reap_kill_proc_work *w)
492 {
493 struct unrhdr pids;
494 void *ihandle;
495 struct proc *p2;
496 int pid;
497
498 /*
499 * pids records processes which were already signalled, to
500 * avoid doubling signals to them if iteration needs to be
501 * repeated.
502 */
503 init_unrhdr(&pids, 1, PID_MAX, UNR_NO_MTX);
504 PROC_LOCK(td->td_proc);
505 if ((td->td_proc->p_flag2 & P2_WEXIT) != 0) {
506 PROC_UNLOCK(td->td_proc);
507 goto out;
508 }
509 PROC_UNLOCK(td->td_proc);
510 while (reap_kill_subtree_once(td, p, reaper, &pids, w))
511 ;
512
513 ihandle = create_iter_unr(&pids);
514 while ((pid = next_iter_unr(ihandle)) != -1) {
515 p2 = pfind(pid);
516 if (p2 != NULL) {
517 p2->p_flag2 &= ~P2_REAPKILLED;
518 PROC_UNLOCK(p2);
519 }
520 }
521 free_iter_unr(ihandle);
522
523 out:
524 clean_unrhdr(&pids);
525 clear_unrhdr(&pids);
526 }
527
528 static bool
reap_kill_sapblk(struct thread * td __unused,void * data)529 reap_kill_sapblk(struct thread *td __unused, void *data)
530 {
531 struct procctl_reaper_kill *rk;
532
533 rk = data;
534 return ((rk->rk_flags & REAPER_KILL_CHILDREN) == 0);
535 }
536
537 static int
reap_kill(struct thread * td,struct proc * p,void * data)538 reap_kill(struct thread *td, struct proc *p, void *data)
539 {
540 struct reap_kill_proc_work w;
541 struct proc *reaper;
542 ksiginfo_t ksi;
543 struct procctl_reaper_kill *rk;
544 int error;
545
546 rk = data;
547 sx_assert(&proctree_lock, SX_LOCKED);
548 if (CAP_TRACING(td))
549 ktrcapfail(CAPFAIL_SIGNAL, &rk->rk_sig);
550 if (IN_CAPABILITY_MODE(td))
551 return (ECAPMODE);
552 if (rk->rk_sig <= 0 || rk->rk_sig > _SIG_MAXSIG ||
553 (rk->rk_flags & ~(REAPER_KILL_CHILDREN |
554 REAPER_KILL_SUBTREE)) != 0 || (rk->rk_flags &
555 (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE)) ==
556 (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE))
557 return (EINVAL);
558 PROC_UNLOCK(p);
559 reaper = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p;
560 ksiginfo_init(&ksi);
561 ksi.ksi_signo = rk->rk_sig;
562 ksi.ksi_code = SI_USER;
563 ksi.ksi_pid = td->td_proc->p_pid;
564 ksi.ksi_uid = td->td_ucred->cr_ruid;
565 error = ESRCH;
566 rk->rk_killed = 0;
567 rk->rk_fpid = -1;
568 if ((rk->rk_flags & REAPER_KILL_CHILDREN) != 0) {
569 reap_kill_children(td, reaper, rk, &ksi, &error);
570 } else {
571 w.cr = crhold(td->td_ucred);
572 w.ksi = &ksi;
573 w.rk = rk;
574 w.error = &error;
575 TASK_INIT(&w.t, 0, reap_kill_proc_work, &w);
576 reap_kill_subtree(td, p, reaper, &w);
577 crfree(w.cr);
578 }
579 PROC_LOCK(p);
580 return (error);
581 }
582
583 static int
trace_ctl(struct thread * td,struct proc * p,void * data)584 trace_ctl(struct thread *td, struct proc *p, void *data)
585 {
586 int state;
587
588 PROC_LOCK_ASSERT(p, MA_OWNED);
589 state = *(int *)data;
590
591 /*
592 * Ktrace changes p_traceflag from or to zero under the
593 * process lock, so the test does not need to acquire ktrace
594 * mutex.
595 */
596 if ((p->p_flag & P_TRACED) != 0 || p->p_traceflag != 0)
597 return (EBUSY);
598
599 switch (state) {
600 case PROC_TRACE_CTL_ENABLE:
601 if (td->td_proc != p)
602 return (EPERM);
603 p->p_flag2 &= ~(P2_NOTRACE | P2_NOTRACE_EXEC);
604 break;
605 case PROC_TRACE_CTL_DISABLE_EXEC:
606 p->p_flag2 |= P2_NOTRACE_EXEC | P2_NOTRACE;
607 break;
608 case PROC_TRACE_CTL_DISABLE:
609 if ((p->p_flag2 & P2_NOTRACE_EXEC) != 0) {
610 KASSERT((p->p_flag2 & P2_NOTRACE) != 0,
611 ("dandling P2_NOTRACE_EXEC"));
612 if (td->td_proc != p)
613 return (EPERM);
614 p->p_flag2 &= ~P2_NOTRACE_EXEC;
615 } else {
616 p->p_flag2 |= P2_NOTRACE;
617 }
618 break;
619 default:
620 return (EINVAL);
621 }
622 return (0);
623 }
624
625 static int
trace_status(struct thread * td,struct proc * p,void * data)626 trace_status(struct thread *td, struct proc *p, void *data)
627 {
628 int *status;
629
630 status = data;
631 if ((p->p_flag2 & P2_NOTRACE) != 0) {
632 KASSERT((p->p_flag & P_TRACED) == 0,
633 ("%d traced but tracing disabled", p->p_pid));
634 *status = -1;
635 } else if ((p->p_flag & P_TRACED) != 0) {
636 *status = p->p_pptr->p_pid;
637 } else {
638 *status = 0;
639 }
640 return (0);
641 }
642
643 static int
trapcap_ctl(struct thread * td,struct proc * p,void * data)644 trapcap_ctl(struct thread *td, struct proc *p, void *data)
645 {
646 int state;
647
648 PROC_LOCK_ASSERT(p, MA_OWNED);
649 state = *(int *)data;
650
651 switch (state) {
652 case PROC_TRAPCAP_CTL_ENABLE:
653 p->p_flag2 |= P2_TRAPCAP;
654 break;
655 case PROC_TRAPCAP_CTL_DISABLE:
656 p->p_flag2 &= ~P2_TRAPCAP;
657 break;
658 default:
659 return (EINVAL);
660 }
661 return (0);
662 }
663
664 static int
trapcap_status(struct thread * td,struct proc * p,void * data)665 trapcap_status(struct thread *td, struct proc *p, void *data)
666 {
667 int *status;
668
669 status = data;
670 *status = (p->p_flag2 & P2_TRAPCAP) != 0 ? PROC_TRAPCAP_CTL_ENABLE :
671 PROC_TRAPCAP_CTL_DISABLE;
672 return (0);
673 }
674
675 static int
no_new_privs_ctl(struct thread * td,struct proc * p,void * data)676 no_new_privs_ctl(struct thread *td, struct proc *p, void *data)
677 {
678 int state;
679
680 PROC_LOCK_ASSERT(p, MA_OWNED);
681 state = *(int *)data;
682
683 if (state != PROC_NO_NEW_PRIVS_ENABLE)
684 return (EINVAL);
685 p->p_flag2 |= P2_NO_NEW_PRIVS;
686 return (0);
687 }
688
689 static int
no_new_privs_status(struct thread * td,struct proc * p,void * data)690 no_new_privs_status(struct thread *td, struct proc *p, void *data)
691 {
692
693 *(int *)data = (p->p_flag2 & P2_NO_NEW_PRIVS) != 0 ?
694 PROC_NO_NEW_PRIVS_ENABLE : PROC_NO_NEW_PRIVS_DISABLE;
695 return (0);
696 }
697
698 static int
protmax_ctl(struct thread * td,struct proc * p,void * data)699 protmax_ctl(struct thread *td, struct proc *p, void *data)
700 {
701 int state;
702
703 PROC_LOCK_ASSERT(p, MA_OWNED);
704 state = *(int *)data;
705
706 switch (state) {
707 case PROC_PROTMAX_FORCE_ENABLE:
708 p->p_flag2 &= ~P2_PROTMAX_DISABLE;
709 p->p_flag2 |= P2_PROTMAX_ENABLE;
710 break;
711 case PROC_PROTMAX_FORCE_DISABLE:
712 p->p_flag2 |= P2_PROTMAX_DISABLE;
713 p->p_flag2 &= ~P2_PROTMAX_ENABLE;
714 break;
715 case PROC_PROTMAX_NOFORCE:
716 p->p_flag2 &= ~(P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE);
717 break;
718 default:
719 return (EINVAL);
720 }
721 return (0);
722 }
723
724 static int
protmax_status(struct thread * td,struct proc * p,void * data)725 protmax_status(struct thread *td, struct proc *p, void *data)
726 {
727 int d;
728
729 switch (p->p_flag2 & (P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE)) {
730 case 0:
731 d = PROC_PROTMAX_NOFORCE;
732 break;
733 case P2_PROTMAX_ENABLE:
734 d = PROC_PROTMAX_FORCE_ENABLE;
735 break;
736 case P2_PROTMAX_DISABLE:
737 d = PROC_PROTMAX_FORCE_DISABLE;
738 break;
739 }
740 if (kern_mmap_maxprot(p, PROT_READ) == PROT_READ)
741 d |= PROC_PROTMAX_ACTIVE;
742 *(int *)data = d;
743 return (0);
744 }
745
746 static int
aslr_ctl(struct thread * td,struct proc * p,void * data)747 aslr_ctl(struct thread *td, struct proc *p, void *data)
748 {
749 int state;
750
751 PROC_LOCK_ASSERT(p, MA_OWNED);
752 state = *(int *)data;
753
754 switch (state) {
755 case PROC_ASLR_FORCE_ENABLE:
756 p->p_flag2 &= ~P2_ASLR_DISABLE;
757 p->p_flag2 |= P2_ASLR_ENABLE;
758 break;
759 case PROC_ASLR_FORCE_DISABLE:
760 p->p_flag2 |= P2_ASLR_DISABLE;
761 p->p_flag2 &= ~P2_ASLR_ENABLE;
762 break;
763 case PROC_ASLR_NOFORCE:
764 p->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE);
765 break;
766 default:
767 return (EINVAL);
768 }
769 return (0);
770 }
771
772 static int
aslr_status(struct thread * td,struct proc * p,void * data)773 aslr_status(struct thread *td, struct proc *p, void *data)
774 {
775 struct vmspace *vm;
776 int d;
777
778 switch (p->p_flag2 & (P2_ASLR_ENABLE | P2_ASLR_DISABLE)) {
779 case 0:
780 d = PROC_ASLR_NOFORCE;
781 break;
782 case P2_ASLR_ENABLE:
783 d = PROC_ASLR_FORCE_ENABLE;
784 break;
785 case P2_ASLR_DISABLE:
786 d = PROC_ASLR_FORCE_DISABLE;
787 break;
788 }
789 if ((p->p_flag & P_WEXIT) == 0) {
790 _PHOLD(p);
791 PROC_UNLOCK(p);
792 vm = vmspace_acquire_ref(p);
793 if (vm != NULL) {
794 if ((vm->vm_map.flags & MAP_ASLR) != 0)
795 d |= PROC_ASLR_ACTIVE;
796 vmspace_free(vm);
797 }
798 PROC_LOCK(p);
799 _PRELE(p);
800 }
801 *(int *)data = d;
802 return (0);
803 }
804
805 static int
stackgap_ctl(struct thread * td,struct proc * p,void * data)806 stackgap_ctl(struct thread *td, struct proc *p, void *data)
807 {
808 int state;
809
810 PROC_LOCK_ASSERT(p, MA_OWNED);
811 state = *(int *)data;
812
813 if ((state & ~(PROC_STACKGAP_ENABLE | PROC_STACKGAP_DISABLE |
814 PROC_STACKGAP_ENABLE_EXEC | PROC_STACKGAP_DISABLE_EXEC)) != 0)
815 return (EINVAL);
816 switch (state & (PROC_STACKGAP_ENABLE | PROC_STACKGAP_DISABLE)) {
817 case PROC_STACKGAP_ENABLE:
818 if ((p->p_flag2 & P2_STKGAP_DISABLE) != 0)
819 return (EINVAL);
820 break;
821 case PROC_STACKGAP_DISABLE:
822 p->p_flag2 |= P2_STKGAP_DISABLE;
823 break;
824 case 0:
825 break;
826 default:
827 return (EINVAL);
828 }
829 switch (state & (PROC_STACKGAP_ENABLE_EXEC |
830 PROC_STACKGAP_DISABLE_EXEC)) {
831 case PROC_STACKGAP_ENABLE_EXEC:
832 p->p_flag2 &= ~P2_STKGAP_DISABLE_EXEC;
833 break;
834 case PROC_STACKGAP_DISABLE_EXEC:
835 p->p_flag2 |= P2_STKGAP_DISABLE_EXEC;
836 break;
837 case 0:
838 break;
839 default:
840 return (EINVAL);
841 }
842 return (0);
843 }
844
845 static int
stackgap_status(struct thread * td,struct proc * p,void * data)846 stackgap_status(struct thread *td, struct proc *p, void *data)
847 {
848 int d;
849
850 PROC_LOCK_ASSERT(p, MA_OWNED);
851
852 d = (p->p_flag2 & P2_STKGAP_DISABLE) != 0 ? PROC_STACKGAP_DISABLE :
853 PROC_STACKGAP_ENABLE;
854 d |= (p->p_flag2 & P2_STKGAP_DISABLE_EXEC) != 0 ?
855 PROC_STACKGAP_DISABLE_EXEC : PROC_STACKGAP_ENABLE_EXEC;
856 *(int *)data = d;
857 return (0);
858 }
859
860 static int
wxmap_ctl(struct thread * td,struct proc * p,void * data)861 wxmap_ctl(struct thread *td, struct proc *p, void *data)
862 {
863 struct vmspace *vm;
864 vm_map_t map;
865 int state;
866
867 PROC_LOCK_ASSERT(p, MA_OWNED);
868 if ((p->p_flag & P_WEXIT) != 0)
869 return (ESRCH);
870 state = *(int *)data;
871
872 switch (state) {
873 case PROC_WX_MAPPINGS_PERMIT:
874 p->p_flag2 |= P2_WXORX_DISABLE;
875 _PHOLD(p);
876 PROC_UNLOCK(p);
877 vm = vmspace_acquire_ref(p);
878 if (vm != NULL) {
879 map = &vm->vm_map;
880 vm_map_lock(map);
881 map->flags &= ~MAP_WXORX;
882 vm_map_unlock(map);
883 vmspace_free(vm);
884 }
885 PROC_LOCK(p);
886 _PRELE(p);
887 break;
888 case PROC_WX_MAPPINGS_DISALLOW_EXEC:
889 p->p_flag2 |= P2_WXORX_ENABLE_EXEC;
890 break;
891 default:
892 return (EINVAL);
893 }
894
895 return (0);
896 }
897
898 static int
wxmap_status(struct thread * td,struct proc * p,void * data)899 wxmap_status(struct thread *td, struct proc *p, void *data)
900 {
901 struct vmspace *vm;
902 int d;
903
904 PROC_LOCK_ASSERT(p, MA_OWNED);
905 if ((p->p_flag & P_WEXIT) != 0)
906 return (ESRCH);
907
908 d = 0;
909 if ((p->p_flag2 & P2_WXORX_DISABLE) != 0)
910 d |= PROC_WX_MAPPINGS_PERMIT;
911 if ((p->p_flag2 & P2_WXORX_ENABLE_EXEC) != 0)
912 d |= PROC_WX_MAPPINGS_DISALLOW_EXEC;
913 _PHOLD(p);
914 PROC_UNLOCK(p);
915 vm = vmspace_acquire_ref(p);
916 if (vm != NULL) {
917 if ((vm->vm_map.flags & MAP_WXORX) != 0)
918 d |= PROC_WXORX_ENFORCE;
919 vmspace_free(vm);
920 }
921 PROC_LOCK(p);
922 _PRELE(p);
923 *(int *)data = d;
924 return (0);
925 }
926
927 static int
pdeathsig_ctl(struct thread * td,struct proc * p,void * data)928 pdeathsig_ctl(struct thread *td, struct proc *p, void *data)
929 {
930 int signum;
931
932 signum = *(int *)data;
933 if (p != td->td_proc || (signum != 0 && !_SIG_VALID(signum)))
934 return (EINVAL);
935 p->p_pdeathsig = signum;
936 return (0);
937 }
938
939 static int
pdeathsig_status(struct thread * td,struct proc * p,void * data)940 pdeathsig_status(struct thread *td, struct proc *p, void *data)
941 {
942 if (p != td->td_proc)
943 return (EINVAL);
944 *(int *)data = p->p_pdeathsig;
945 return (0);
946 }
947
948 static int
logsigexit_ctl(struct thread * td,struct proc * p,void * data)949 logsigexit_ctl(struct thread *td, struct proc *p, void *data)
950 {
951 int state;
952
953 PROC_LOCK_ASSERT(p, MA_OWNED);
954 state = *(int *)data;
955
956 switch (state) {
957 case PROC_LOGSIGEXIT_CTL_NOFORCE:
958 p->p_flag2 &= ~(P2_LOGSIGEXIT_CTL | P2_LOGSIGEXIT_ENABLE);
959 break;
960 case PROC_LOGSIGEXIT_CTL_FORCE_ENABLE:
961 p->p_flag2 |= P2_LOGSIGEXIT_CTL | P2_LOGSIGEXIT_ENABLE;
962 break;
963 case PROC_LOGSIGEXIT_CTL_FORCE_DISABLE:
964 p->p_flag2 |= P2_LOGSIGEXIT_CTL;
965 p->p_flag2 &= ~P2_LOGSIGEXIT_ENABLE;
966 break;
967 default:
968 return (EINVAL);
969 }
970 return (0);
971 }
972
973 static int
logsigexit_status(struct thread * td,struct proc * p,void * data)974 logsigexit_status(struct thread *td, struct proc *p, void *data)
975 {
976 int state;
977
978 if ((p->p_flag2 & P2_LOGSIGEXIT_CTL) == 0)
979 state = PROC_LOGSIGEXIT_CTL_NOFORCE;
980 else if ((p->p_flag2 & P2_LOGSIGEXIT_ENABLE) != 0)
981 state = PROC_LOGSIGEXIT_CTL_FORCE_ENABLE;
982 else
983 state = PROC_LOGSIGEXIT_CTL_FORCE_DISABLE;
984 *(int *)data = state;
985 return (0);
986 }
987
988 enum {
989 PCTL_SLOCKED,
990 PCTL_XLOCKED,
991 PCTL_UNLOCKED,
992 };
993
994 struct procctl_cmd_info {
995 int lock_tree;
996 bool one_proc : 1;
997 bool esrch_is_einval : 1;
998 bool copyout_on_error : 1;
999 bool no_nonnull_data : 1;
1000 bool need_candebug : 1;
1001 int copyin_sz;
1002 int copyout_sz;
1003 int (*exec)(struct thread *, struct proc *, void *);
1004 bool (*sapblk)(struct thread *, void *);
1005 };
1006 static const struct procctl_cmd_info procctl_cmds_info[] = {
1007 [PROC_SPROTECT] =
1008 { .lock_tree = PCTL_SLOCKED, .one_proc = false,
1009 .esrch_is_einval = false, .no_nonnull_data = false,
1010 .need_candebug = false,
1011 .copyin_sz = sizeof(int), .copyout_sz = 0,
1012 .exec = protect_set, .copyout_on_error = false, },
1013 [PROC_REAP_ACQUIRE] =
1014 { .lock_tree = PCTL_XLOCKED, .one_proc = true,
1015 .esrch_is_einval = false, .no_nonnull_data = true,
1016 .need_candebug = false,
1017 .copyin_sz = 0, .copyout_sz = 0,
1018 .exec = reap_acquire, .copyout_on_error = false, },
1019 [PROC_REAP_RELEASE] =
1020 { .lock_tree = PCTL_XLOCKED, .one_proc = true,
1021 .esrch_is_einval = false, .no_nonnull_data = true,
1022 .need_candebug = false,
1023 .copyin_sz = 0, .copyout_sz = 0,
1024 .exec = reap_release, .copyout_on_error = false, },
1025 [PROC_REAP_STATUS] =
1026 { .lock_tree = PCTL_SLOCKED, .one_proc = true,
1027 .esrch_is_einval = false, .no_nonnull_data = false,
1028 .need_candebug = false,
1029 .copyin_sz = 0,
1030 .copyout_sz = sizeof(struct procctl_reaper_status),
1031 .exec = reap_status, .copyout_on_error = false, },
1032 [PROC_REAP_GETPIDS] =
1033 { .lock_tree = PCTL_SLOCKED, .one_proc = true,
1034 .esrch_is_einval = false, .no_nonnull_data = false,
1035 .need_candebug = false,
1036 .copyin_sz = sizeof(struct procctl_reaper_pids),
1037 .copyout_sz = 0,
1038 .exec = reap_getpids, .copyout_on_error = false, },
1039 [PROC_REAP_KILL] =
1040 { .lock_tree = PCTL_SLOCKED, .one_proc = true,
1041 .esrch_is_einval = false, .no_nonnull_data = false,
1042 .need_candebug = false,
1043 .copyin_sz = sizeof(struct procctl_reaper_kill),
1044 .copyout_sz = sizeof(struct procctl_reaper_kill),
1045 .exec = reap_kill, .copyout_on_error = true,
1046 .sapblk = reap_kill_sapblk, },
1047 [PROC_TRACE_CTL] =
1048 { .lock_tree = PCTL_SLOCKED, .one_proc = false,
1049 .esrch_is_einval = false, .no_nonnull_data = false,
1050 .need_candebug = true,
1051 .copyin_sz = sizeof(int), .copyout_sz = 0,
1052 .exec = trace_ctl, .copyout_on_error = false, },
1053 [PROC_TRACE_STATUS] =
1054 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1055 .esrch_is_einval = false, .no_nonnull_data = false,
1056 .need_candebug = false,
1057 .copyin_sz = 0, .copyout_sz = sizeof(int),
1058 .exec = trace_status, .copyout_on_error = false, },
1059 [PROC_TRAPCAP_CTL] =
1060 { .lock_tree = PCTL_SLOCKED, .one_proc = false,
1061 .esrch_is_einval = false, .no_nonnull_data = false,
1062 .need_candebug = true,
1063 .copyin_sz = sizeof(int), .copyout_sz = 0,
1064 .exec = trapcap_ctl, .copyout_on_error = false, },
1065 [PROC_TRAPCAP_STATUS] =
1066 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1067 .esrch_is_einval = false, .no_nonnull_data = false,
1068 .need_candebug = false,
1069 .copyin_sz = 0, .copyout_sz = sizeof(int),
1070 .exec = trapcap_status, .copyout_on_error = false, },
1071 [PROC_PDEATHSIG_CTL] =
1072 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1073 .esrch_is_einval = true, .no_nonnull_data = false,
1074 .need_candebug = false,
1075 .copyin_sz = sizeof(int), .copyout_sz = 0,
1076 .exec = pdeathsig_ctl, .copyout_on_error = false, },
1077 [PROC_PDEATHSIG_STATUS] =
1078 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1079 .esrch_is_einval = true, .no_nonnull_data = false,
1080 .need_candebug = false,
1081 .copyin_sz = 0, .copyout_sz = sizeof(int),
1082 .exec = pdeathsig_status, .copyout_on_error = false, },
1083 [PROC_ASLR_CTL] =
1084 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1085 .esrch_is_einval = false, .no_nonnull_data = false,
1086 .need_candebug = true,
1087 .copyin_sz = sizeof(int), .copyout_sz = 0,
1088 .exec = aslr_ctl, .copyout_on_error = false, },
1089 [PROC_ASLR_STATUS] =
1090 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1091 .esrch_is_einval = false, .no_nonnull_data = false,
1092 .need_candebug = false,
1093 .copyin_sz = 0, .copyout_sz = sizeof(int),
1094 .exec = aslr_status, .copyout_on_error = false, },
1095 [PROC_PROTMAX_CTL] =
1096 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1097 .esrch_is_einval = false, .no_nonnull_data = false,
1098 .need_candebug = true,
1099 .copyin_sz = sizeof(int), .copyout_sz = 0,
1100 .exec = protmax_ctl, .copyout_on_error = false, },
1101 [PROC_PROTMAX_STATUS] =
1102 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1103 .esrch_is_einval = false, .no_nonnull_data = false,
1104 .need_candebug = false,
1105 .copyin_sz = 0, .copyout_sz = sizeof(int),
1106 .exec = protmax_status, .copyout_on_error = false, },
1107 [PROC_STACKGAP_CTL] =
1108 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1109 .esrch_is_einval = false, .no_nonnull_data = false,
1110 .need_candebug = true,
1111 .copyin_sz = sizeof(int), .copyout_sz = 0,
1112 .exec = stackgap_ctl, .copyout_on_error = false, },
1113 [PROC_STACKGAP_STATUS] =
1114 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1115 .esrch_is_einval = false, .no_nonnull_data = false,
1116 .need_candebug = false,
1117 .copyin_sz = 0, .copyout_sz = sizeof(int),
1118 .exec = stackgap_status, .copyout_on_error = false, },
1119 [PROC_NO_NEW_PRIVS_CTL] =
1120 { .lock_tree = PCTL_SLOCKED, .one_proc = true,
1121 .esrch_is_einval = false, .no_nonnull_data = false,
1122 .need_candebug = true,
1123 .copyin_sz = sizeof(int), .copyout_sz = 0,
1124 .exec = no_new_privs_ctl, .copyout_on_error = false, },
1125 [PROC_NO_NEW_PRIVS_STATUS] =
1126 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1127 .esrch_is_einval = false, .no_nonnull_data = false,
1128 .need_candebug = false,
1129 .copyin_sz = 0, .copyout_sz = sizeof(int),
1130 .exec = no_new_privs_status, .copyout_on_error = false, },
1131 [PROC_WXMAP_CTL] =
1132 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1133 .esrch_is_einval = false, .no_nonnull_data = false,
1134 .need_candebug = true,
1135 .copyin_sz = sizeof(int), .copyout_sz = 0,
1136 .exec = wxmap_ctl, .copyout_on_error = false, },
1137 [PROC_WXMAP_STATUS] =
1138 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1139 .esrch_is_einval = false, .no_nonnull_data = false,
1140 .need_candebug = false,
1141 .copyin_sz = 0, .copyout_sz = sizeof(int),
1142 .exec = wxmap_status, .copyout_on_error = false, },
1143 [PROC_LOGSIGEXIT_CTL] =
1144 { .lock_tree = PCTL_SLOCKED, .one_proc = true,
1145 .esrch_is_einval = false, .no_nonnull_data = false,
1146 .need_candebug = true,
1147 .copyin_sz = sizeof(int), .copyout_sz = 0,
1148 .exec = logsigexit_ctl, .copyout_on_error = false, },
1149 [PROC_LOGSIGEXIT_STATUS] =
1150 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1151 .esrch_is_einval = false, .no_nonnull_data = false,
1152 .need_candebug = false,
1153 .copyin_sz = 0, .copyout_sz = sizeof(int),
1154 .exec = logsigexit_status, .copyout_on_error = false, },
1155 };
1156
1157 int
sys_procctl(struct thread * td,struct procctl_args * uap)1158 sys_procctl(struct thread *td, struct procctl_args *uap)
1159 {
1160 union {
1161 struct procctl_reaper_status rs;
1162 struct procctl_reaper_pids rp;
1163 struct procctl_reaper_kill rk;
1164 int flags;
1165 } x;
1166 const struct procctl_cmd_info *cmd_info;
1167 int error, error1;
1168
1169 if (uap->com >= PROC_PROCCTL_MD_MIN)
1170 return (cpu_procctl(td, uap->idtype, uap->id,
1171 uap->com, uap->data));
1172 if (uap->com <= 0 || uap->com >= nitems(procctl_cmds_info))
1173 return (EINVAL);
1174 cmd_info = &procctl_cmds_info[uap->com];
1175 bzero(&x, sizeof(x));
1176
1177 if (cmd_info->copyin_sz > 0) {
1178 error = copyin(uap->data, &x, cmd_info->copyin_sz);
1179 if (error != 0)
1180 return (error);
1181 } else if (cmd_info->no_nonnull_data && uap->data != NULL) {
1182 return (EINVAL);
1183 }
1184
1185 error = kern_procctl(td, uap->idtype, uap->id, uap->com, &x);
1186
1187 if (cmd_info->copyout_sz > 0 && (error == 0 ||
1188 cmd_info->copyout_on_error)) {
1189 error1 = copyout(&x, uap->data, cmd_info->copyout_sz);
1190 if (error == 0)
1191 error = error1;
1192 }
1193 return (error);
1194 }
1195
1196 static int
kern_procctl_single(struct thread * td,struct proc * p,int com,void * data)1197 kern_procctl_single(struct thread *td, struct proc *p, int com, void *data)
1198 {
1199
1200 PROC_LOCK_ASSERT(p, MA_OWNED);
1201 return (procctl_cmds_info[com].exec(td, p, data));
1202 }
1203
1204 int
kern_procctl(struct thread * td,idtype_t idtype,id_t id,int com,void * data)1205 kern_procctl(struct thread *td, idtype_t idtype, id_t id, int com, void *data)
1206 {
1207 struct pgrp *pg;
1208 struct proc *p;
1209 const struct procctl_cmd_info *cmd_info;
1210 int error, first_error, ok;
1211 bool sapblk;
1212
1213 MPASS(com > 0 && com < nitems(procctl_cmds_info));
1214 cmd_info = &procctl_cmds_info[com];
1215 if (idtype != P_PID && cmd_info->one_proc)
1216 return (EINVAL);
1217
1218 sapblk = false;
1219 if (cmd_info->sapblk != NULL) {
1220 sapblk = cmd_info->sapblk(td, data);
1221 if (sapblk && !stop_all_proc_block())
1222 return (ERESTART);
1223 }
1224
1225 switch (cmd_info->lock_tree) {
1226 case PCTL_XLOCKED:
1227 sx_xlock(&proctree_lock);
1228 break;
1229 case PCTL_SLOCKED:
1230 sx_slock(&proctree_lock);
1231 break;
1232 default:
1233 break;
1234 }
1235
1236 switch (idtype) {
1237 case P_PID:
1238 if (id == 0) {
1239 p = td->td_proc;
1240 error = 0;
1241 PROC_LOCK(p);
1242 } else {
1243 p = pfind(id);
1244 if (p == NULL) {
1245 error = cmd_info->esrch_is_einval ?
1246 EINVAL : ESRCH;
1247 break;
1248 }
1249 error = cmd_info->need_candebug ? p_candebug(td, p) :
1250 p_cansee(td, p);
1251 }
1252 if (error == 0)
1253 error = kern_procctl_single(td, p, com, data);
1254 PROC_UNLOCK(p);
1255 break;
1256 case P_PGID:
1257 /*
1258 * Attempt to apply the operation to all members of the
1259 * group. Ignore processes in the group that can't be
1260 * seen. Ignore errors so long as at least one process is
1261 * able to complete the request successfully.
1262 */
1263 pg = pgfind(id);
1264 if (pg == NULL) {
1265 error = ESRCH;
1266 break;
1267 }
1268 PGRP_UNLOCK(pg);
1269 ok = 0;
1270 first_error = 0;
1271 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
1272 PROC_LOCK(p);
1273 if (p->p_state == PRS_NEW ||
1274 p->p_state == PRS_ZOMBIE ||
1275 (cmd_info->need_candebug ? p_candebug(td, p) :
1276 p_cansee(td, p)) != 0) {
1277 PROC_UNLOCK(p);
1278 continue;
1279 }
1280 error = kern_procctl_single(td, p, com, data);
1281 PROC_UNLOCK(p);
1282 if (error == 0)
1283 ok = 1;
1284 else if (first_error == 0)
1285 first_error = error;
1286 }
1287 if (ok)
1288 error = 0;
1289 else if (first_error != 0)
1290 error = first_error;
1291 else
1292 /*
1293 * Was not able to see any processes in the
1294 * process group.
1295 */
1296 error = ESRCH;
1297 break;
1298 default:
1299 error = EINVAL;
1300 break;
1301 }
1302
1303 switch (cmd_info->lock_tree) {
1304 case PCTL_XLOCKED:
1305 sx_xunlock(&proctree_lock);
1306 break;
1307 case PCTL_SLOCKED:
1308 sx_sunlock(&proctree_lock);
1309 break;
1310 default:
1311 break;
1312 }
1313 if (sapblk)
1314 stop_all_proc_unblock();
1315 return (error);
1316 }
1317