xref: /freebsd/sys/kern/kern_proc.c (revision f0a75d274af375d15b97b830966b99a02b7db911)
1 /*-
2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *	@(#)kern_proc.c	8.7 (Berkeley) 2/14/95
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_ktrace.h"
36 #include "opt_kstack_pages.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/lock.h>
42 #include <sys/malloc.h>
43 #include <sys/mutex.h>
44 #include <sys/proc.h>
45 #include <sys/refcount.h>
46 #include <sys/sysent.h>
47 #include <sys/sched.h>
48 #include <sys/smp.h>
49 #include <sys/sysctl.h>
50 #include <sys/filedesc.h>
51 #include <sys/tty.h>
52 #include <sys/signalvar.h>
53 #include <sys/sx.h>
54 #include <sys/user.h>
55 #include <sys/jail.h>
56 #include <sys/vnode.h>
57 #ifdef KTRACE
58 #include <sys/uio.h>
59 #include <sys/ktrace.h>
60 #endif
61 
62 #include <vm/vm.h>
63 #include <vm/vm_extern.h>
64 #include <vm/pmap.h>
65 #include <vm/vm_map.h>
66 #include <vm/uma.h>
67 
68 MALLOC_DEFINE(M_PGRP, "pgrp", "process group header");
69 MALLOC_DEFINE(M_SESSION, "session", "session header");
70 static MALLOC_DEFINE(M_PROC, "proc", "Proc structures");
71 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures");
72 
73 static void doenterpgrp(struct proc *, struct pgrp *);
74 static void orphanpg(struct pgrp *pg);
75 static void fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp);
76 static void fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp);
77 static void pgadjustjobc(struct pgrp *pgrp, int entering);
78 static void pgdelete(struct pgrp *);
79 static int proc_ctor(void *mem, int size, void *arg, int flags);
80 static void proc_dtor(void *mem, int size, void *arg);
81 static int proc_init(void *mem, int size, int flags);
82 static void proc_fini(void *mem, int size);
83 
84 /*
85  * Other process lists
86  */
87 struct pidhashhead *pidhashtbl;
88 u_long pidhash;
89 struct pgrphashhead *pgrphashtbl;
90 u_long pgrphash;
91 struct proclist allproc;
92 struct proclist zombproc;
93 struct sx allproc_lock;
94 struct sx proctree_lock;
95 struct mtx ppeers_lock;
96 uma_zone_t proc_zone;
97 uma_zone_t ithread_zone;
98 
99 int kstack_pages = KSTACK_PAGES;
100 SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0, "");
101 
102 CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE);
103 
104 /*
105  * Initialize global process hashing structures.
106  */
107 void
108 procinit()
109 {
110 
111 	sx_init(&allproc_lock, "allproc");
112 	sx_init(&proctree_lock, "proctree");
113 	mtx_init(&ppeers_lock, "p_peers", NULL, MTX_DEF);
114 	LIST_INIT(&allproc);
115 	LIST_INIT(&zombproc);
116 	pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
117 	pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
118 	proc_zone = uma_zcreate("PROC", sched_sizeof_proc(),
119 	    proc_ctor, proc_dtor, proc_init, proc_fini,
120 	    UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
121 	uihashinit();
122 }
123 
124 /*
125  * Prepare a proc for use.
126  */
127 static int
128 proc_ctor(void *mem, int size, void *arg, int flags)
129 {
130 	struct proc *p;
131 
132 	p = (struct proc *)mem;
133 	return (0);
134 }
135 
136 /*
137  * Reclaim a proc after use.
138  */
139 static void
140 proc_dtor(void *mem, int size, void *arg)
141 {
142 	struct proc *p;
143 	struct thread *td;
144 
145 	/* INVARIANTS checks go here */
146 	p = (struct proc *)mem;
147         td = FIRST_THREAD_IN_PROC(p);
148 #ifdef INVARIANTS
149 	KASSERT((p->p_numthreads == 1),
150 	    ("bad number of threads in exiting process"));
151 	KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
152 	KASSERT(STAILQ_EMPTY(&p->p_ktr), ("proc_dtor: non-empty p_ktr"));
153 #endif
154 
155 	/* Dispose of an alternate kstack, if it exists.
156 	 * XXX What if there are more than one thread in the proc?
157 	 *     The first thread in the proc is special and not
158 	 *     freed, so you gotta do this here.
159 	 */
160 	if (((p->p_flag & P_KTHREAD) != 0) && (td->td_altkstack != 0))
161 		vm_thread_dispose_altkstack(td);
162 	if (p->p_ksi != NULL)
163 		KASSERT(! KSI_ONQ(p->p_ksi), ("SIGCHLD queue"));
164 }
165 
166 /*
167  * Initialize type-stable parts of a proc (when newly created).
168  */
169 static int
170 proc_init(void *mem, int size, int flags)
171 {
172 	struct proc *p;
173 	struct thread *td;
174 
175 	p = (struct proc *)mem;
176 	p->p_sched = (struct p_sched *)&p[1];
177 	td = thread_alloc();
178 	bzero(&p->p_mtx, sizeof(struct mtx));
179 	mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
180 	p->p_stats = pstats_alloc();
181 	proc_linkup(p, td);
182 	sched_newproc(p, td);
183 	return (0);
184 }
185 
186 /*
187  * UMA should ensure that this function is never called.
188  * Freeing a proc structure would violate type stability.
189  */
190 static void
191 proc_fini(void *mem, int size)
192 {
193 #ifdef notnow
194 	struct proc *p;
195 
196 	p = (struct proc *)mem;
197 	pstats_free(p->p_stats);
198 	thread_free(FIRST_THREAD_IN_PROC(p));
199 	mtx_destroy(&p->p_mtx);
200 	if (p->p_ksi != NULL)
201 		ksiginfo_free(p->p_ksi);
202 #else
203 	panic("proc reclaimed");
204 #endif
205 }
206 
207 /*
208  * Is p an inferior of the current process?
209  */
210 int
211 inferior(p)
212 	register struct proc *p;
213 {
214 
215 	sx_assert(&proctree_lock, SX_LOCKED);
216 	for (; p != curproc; p = p->p_pptr)
217 		if (p->p_pid == 0)
218 			return (0);
219 	return (1);
220 }
221 
222 /*
223  * Locate a process by number; return only "live" processes -- i.e., neither
224  * zombies nor newly born but incompletely initialized processes.  By not
225  * returning processes in the PRS_NEW state, we allow callers to avoid
226  * testing for that condition to avoid dereferencing p_ucred, et al.
227  */
228 struct proc *
229 pfind(pid)
230 	register pid_t pid;
231 {
232 	register struct proc *p;
233 
234 	sx_slock(&allproc_lock);
235 	LIST_FOREACH(p, PIDHASH(pid), p_hash)
236 		if (p->p_pid == pid) {
237 			if (p->p_state == PRS_NEW) {
238 				p = NULL;
239 				break;
240 			}
241 			PROC_LOCK(p);
242 			break;
243 		}
244 	sx_sunlock(&allproc_lock);
245 	return (p);
246 }
247 
248 /*
249  * Locate a process group by number.
250  * The caller must hold proctree_lock.
251  */
252 struct pgrp *
253 pgfind(pgid)
254 	register pid_t pgid;
255 {
256 	register struct pgrp *pgrp;
257 
258 	sx_assert(&proctree_lock, SX_LOCKED);
259 
260 	LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) {
261 		if (pgrp->pg_id == pgid) {
262 			PGRP_LOCK(pgrp);
263 			return (pgrp);
264 		}
265 	}
266 	return (NULL);
267 }
268 
269 /*
270  * Create a new process group.
271  * pgid must be equal to the pid of p.
272  * Begin a new session if required.
273  */
274 int
275 enterpgrp(p, pgid, pgrp, sess)
276 	register struct proc *p;
277 	pid_t pgid;
278 	struct pgrp *pgrp;
279 	struct session *sess;
280 {
281 	struct pgrp *pgrp2;
282 
283 	sx_assert(&proctree_lock, SX_XLOCKED);
284 
285 	KASSERT(pgrp != NULL, ("enterpgrp: pgrp == NULL"));
286 	KASSERT(p->p_pid == pgid,
287 	    ("enterpgrp: new pgrp and pid != pgid"));
288 
289 	pgrp2 = pgfind(pgid);
290 
291 	KASSERT(pgrp2 == NULL,
292 	    ("enterpgrp: pgrp with pgid exists"));
293 	KASSERT(!SESS_LEADER(p),
294 	    ("enterpgrp: session leader attempted setpgrp"));
295 
296 	mtx_init(&pgrp->pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK);
297 
298 	if (sess != NULL) {
299 		/*
300 		 * new session
301 		 */
302 		mtx_init(&sess->s_mtx, "session", NULL, MTX_DEF);
303 		mtx_lock(&Giant);       /* XXX TTY */
304 		PROC_LOCK(p);
305 		p->p_flag &= ~P_CONTROLT;
306 		PROC_UNLOCK(p);
307 		PGRP_LOCK(pgrp);
308 		sess->s_leader = p;
309 		sess->s_sid = p->p_pid;
310 		sess->s_count = 1;
311 		sess->s_ttyvp = NULL;
312 		sess->s_ttyp = NULL;
313 		bcopy(p->p_session->s_login, sess->s_login,
314 			    sizeof(sess->s_login));
315 		pgrp->pg_session = sess;
316 		KASSERT(p == curproc,
317 		    ("enterpgrp: mksession and p != curproc"));
318 	} else {
319 		mtx_lock(&Giant);       /* XXX TTY */
320 		pgrp->pg_session = p->p_session;
321 		SESS_LOCK(pgrp->pg_session);
322 		pgrp->pg_session->s_count++;
323 		SESS_UNLOCK(pgrp->pg_session);
324 		PGRP_LOCK(pgrp);
325 	}
326 	pgrp->pg_id = pgid;
327 	LIST_INIT(&pgrp->pg_members);
328 
329 	/*
330 	 * As we have an exclusive lock of proctree_lock,
331 	 * this should not deadlock.
332 	 */
333 	LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
334 	pgrp->pg_jobc = 0;
335 	SLIST_INIT(&pgrp->pg_sigiolst);
336 	PGRP_UNLOCK(pgrp);
337 	mtx_unlock(&Giant);       /* XXX TTY */
338 
339 	doenterpgrp(p, pgrp);
340 
341 	return (0);
342 }
343 
344 /*
345  * Move p to an existing process group
346  */
347 int
348 enterthispgrp(p, pgrp)
349 	register struct proc *p;
350 	struct pgrp *pgrp;
351 {
352 
353 	sx_assert(&proctree_lock, SX_XLOCKED);
354 	PROC_LOCK_ASSERT(p, MA_NOTOWNED);
355 	PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
356 	PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
357 	SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
358 	KASSERT(pgrp->pg_session == p->p_session,
359 		("%s: pgrp's session %p, p->p_session %p.\n",
360 		__func__,
361 		pgrp->pg_session,
362 		p->p_session));
363 	KASSERT(pgrp != p->p_pgrp,
364 		("%s: p belongs to pgrp.", __func__));
365 
366 	doenterpgrp(p, pgrp);
367 
368 	return (0);
369 }
370 
371 /*
372  * Move p to a process group
373  */
374 static void
375 doenterpgrp(p, pgrp)
376 	struct proc *p;
377 	struct pgrp *pgrp;
378 {
379 	struct pgrp *savepgrp;
380 
381 	sx_assert(&proctree_lock, SX_XLOCKED);
382 	PROC_LOCK_ASSERT(p, MA_NOTOWNED);
383 	PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
384 	PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
385 	SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
386 
387 	savepgrp = p->p_pgrp;
388 
389 	/*
390 	 * Adjust eligibility of affected pgrps to participate in job control.
391 	 * Increment eligibility counts before decrementing, otherwise we
392 	 * could reach 0 spuriously during the first call.
393 	 */
394 	fixjobc(p, pgrp, 1);
395 	fixjobc(p, p->p_pgrp, 0);
396 
397 	mtx_lock(&Giant);       /* XXX TTY */
398 	PGRP_LOCK(pgrp);
399 	PGRP_LOCK(savepgrp);
400 	PROC_LOCK(p);
401 	LIST_REMOVE(p, p_pglist);
402 	p->p_pgrp = pgrp;
403 	PROC_UNLOCK(p);
404 	LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist);
405 	PGRP_UNLOCK(savepgrp);
406 	PGRP_UNLOCK(pgrp);
407 	mtx_unlock(&Giant);     /* XXX TTY */
408 	if (LIST_EMPTY(&savepgrp->pg_members))
409 		pgdelete(savepgrp);
410 }
411 
412 /*
413  * remove process from process group
414  */
415 int
416 leavepgrp(p)
417 	register struct proc *p;
418 {
419 	struct pgrp *savepgrp;
420 
421 	sx_assert(&proctree_lock, SX_XLOCKED);
422 	savepgrp = p->p_pgrp;
423 	mtx_lock(&Giant);	/* XXX TTY */
424 	PGRP_LOCK(savepgrp);
425 	PROC_LOCK(p);
426 	LIST_REMOVE(p, p_pglist);
427 	p->p_pgrp = NULL;
428 	PROC_UNLOCK(p);
429 	PGRP_UNLOCK(savepgrp);
430 	mtx_unlock(&Giant);	/* XXX TTY */
431 	if (LIST_EMPTY(&savepgrp->pg_members))
432 		pgdelete(savepgrp);
433 	return (0);
434 }
435 
436 /*
437  * delete a process group
438  */
439 static void
440 pgdelete(pgrp)
441 	register struct pgrp *pgrp;
442 {
443 	struct session *savesess;
444 
445 	sx_assert(&proctree_lock, SX_XLOCKED);
446 	PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
447 	SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
448 
449 	/*
450 	 * Reset any sigio structures pointing to us as a result of
451 	 * F_SETOWN with our pgid.
452 	 */
453 	funsetownlst(&pgrp->pg_sigiolst);
454 
455 	mtx_lock(&Giant);       /* XXX TTY */
456 	PGRP_LOCK(pgrp);
457 	if (pgrp->pg_session->s_ttyp != NULL &&
458 	    pgrp->pg_session->s_ttyp->t_pgrp == pgrp)
459 		pgrp->pg_session->s_ttyp->t_pgrp = NULL;
460 	LIST_REMOVE(pgrp, pg_hash);
461 	savesess = pgrp->pg_session;
462 	SESSRELE(savesess);
463 	PGRP_UNLOCK(pgrp);
464 	mtx_destroy(&pgrp->pg_mtx);
465 	FREE(pgrp, M_PGRP);
466 	mtx_unlock(&Giant);     /* XXX TTY */
467 }
468 
469 static void
470 pgadjustjobc(pgrp, entering)
471 	struct pgrp *pgrp;
472 	int entering;
473 {
474 
475 	PGRP_LOCK(pgrp);
476 	if (entering)
477 		pgrp->pg_jobc++;
478 	else {
479 		--pgrp->pg_jobc;
480 		if (pgrp->pg_jobc == 0)
481 			orphanpg(pgrp);
482 	}
483 	PGRP_UNLOCK(pgrp);
484 }
485 
486 /*
487  * Adjust pgrp jobc counters when specified process changes process group.
488  * We count the number of processes in each process group that "qualify"
489  * the group for terminal job control (those with a parent in a different
490  * process group of the same session).  If that count reaches zero, the
491  * process group becomes orphaned.  Check both the specified process'
492  * process group and that of its children.
493  * entering == 0 => p is leaving specified group.
494  * entering == 1 => p is entering specified group.
495  */
496 void
497 fixjobc(p, pgrp, entering)
498 	register struct proc *p;
499 	register struct pgrp *pgrp;
500 	int entering;
501 {
502 	register struct pgrp *hispgrp;
503 	register struct session *mysession;
504 
505 	sx_assert(&proctree_lock, SX_LOCKED);
506 	PROC_LOCK_ASSERT(p, MA_NOTOWNED);
507 	PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
508 	SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
509 
510 	/*
511 	 * Check p's parent to see whether p qualifies its own process
512 	 * group; if so, adjust count for p's process group.
513 	 */
514 	mysession = pgrp->pg_session;
515 	if ((hispgrp = p->p_pptr->p_pgrp) != pgrp &&
516 	    hispgrp->pg_session == mysession)
517 		pgadjustjobc(pgrp, entering);
518 
519 	/*
520 	 * Check this process' children to see whether they qualify
521 	 * their process groups; if so, adjust counts for children's
522 	 * process groups.
523 	 */
524 	LIST_FOREACH(p, &p->p_children, p_sibling) {
525 		hispgrp = p->p_pgrp;
526 		if (hispgrp == pgrp ||
527 		    hispgrp->pg_session != mysession)
528 			continue;
529 		PROC_LOCK(p);
530 		if (p->p_state == PRS_ZOMBIE) {
531 			PROC_UNLOCK(p);
532 			continue;
533 		}
534 		PROC_UNLOCK(p);
535 		pgadjustjobc(hispgrp, entering);
536 	}
537 }
538 
539 /*
540  * A process group has become orphaned;
541  * if there are any stopped processes in the group,
542  * hang-up all process in that group.
543  */
544 static void
545 orphanpg(pg)
546 	struct pgrp *pg;
547 {
548 	register struct proc *p;
549 
550 	PGRP_LOCK_ASSERT(pg, MA_OWNED);
551 
552 	LIST_FOREACH(p, &pg->pg_members, p_pglist) {
553 		PROC_LOCK(p);
554 		if (P_SHOULDSTOP(p)) {
555 			PROC_UNLOCK(p);
556 			LIST_FOREACH(p, &pg->pg_members, p_pglist) {
557 				PROC_LOCK(p);
558 				psignal(p, SIGHUP);
559 				psignal(p, SIGCONT);
560 				PROC_UNLOCK(p);
561 			}
562 			return;
563 		}
564 		PROC_UNLOCK(p);
565 	}
566 }
567 
568 void
569 sessrele(struct session *s)
570 {
571 	int i;
572 
573 	SESS_LOCK(s);
574 	i = --s->s_count;
575 	SESS_UNLOCK(s);
576 	if (i == 0) {
577 		if (s->s_ttyp != NULL)
578 			ttyrel(s->s_ttyp);
579 		mtx_destroy(&s->s_mtx);
580 		FREE(s, M_SESSION);
581 	}
582 }
583 
584 #include "opt_ddb.h"
585 #ifdef DDB
586 #include <ddb/ddb.h>
587 
588 DB_SHOW_COMMAND(pgrpdump, pgrpdump)
589 {
590 	register struct pgrp *pgrp;
591 	register struct proc *p;
592 	register int i;
593 
594 	for (i = 0; i <= pgrphash; i++) {
595 		if (!LIST_EMPTY(&pgrphashtbl[i])) {
596 			printf("\tindx %d\n", i);
597 			LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) {
598 				printf(
599 			"\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n",
600 				    (void *)pgrp, (long)pgrp->pg_id,
601 				    (void *)pgrp->pg_session,
602 				    pgrp->pg_session->s_count,
603 				    (void *)LIST_FIRST(&pgrp->pg_members));
604 				LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
605 					printf("\t\tpid %ld addr %p pgrp %p\n",
606 					    (long)p->p_pid, (void *)p,
607 					    (void *)p->p_pgrp);
608 				}
609 			}
610 		}
611 	}
612 }
613 #endif /* DDB */
614 
615 /*
616  * Clear kinfo_proc and fill in any information that is common
617  * to all threads in the process.
618  * Must be called with the target process locked.
619  */
620 static void
621 fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp)
622 {
623 	struct thread *td0;
624 	struct tty *tp;
625 	struct session *sp;
626 	struct ucred *cred;
627 	struct sigacts *ps;
628 
629 	bzero(kp, sizeof(*kp));
630 
631 	kp->ki_structsize = sizeof(*kp);
632 	kp->ki_paddr = p;
633 	PROC_LOCK_ASSERT(p, MA_OWNED);
634 	kp->ki_addr =/* p->p_addr; */0; /* XXXKSE */
635 	kp->ki_args = p->p_args;
636 	kp->ki_textvp = p->p_textvp;
637 #ifdef KTRACE
638 	kp->ki_tracep = p->p_tracevp;
639 	mtx_lock(&ktrace_mtx);
640 	kp->ki_traceflag = p->p_traceflag;
641 	mtx_unlock(&ktrace_mtx);
642 #endif
643 	kp->ki_fd = p->p_fd;
644 	kp->ki_vmspace = p->p_vmspace;
645 	kp->ki_flag = p->p_flag;
646 	cred = p->p_ucred;
647 	if (cred) {
648 		kp->ki_uid = cred->cr_uid;
649 		kp->ki_ruid = cred->cr_ruid;
650 		kp->ki_svuid = cred->cr_svuid;
651 		/* XXX bde doesn't like KI_NGROUPS */
652 		kp->ki_ngroups = min(cred->cr_ngroups, KI_NGROUPS);
653 		bcopy(cred->cr_groups, kp->ki_groups,
654 		    kp->ki_ngroups * sizeof(gid_t));
655 		kp->ki_rgid = cred->cr_rgid;
656 		kp->ki_svgid = cred->cr_svgid;
657 		/* If jailed(cred), emulate the old P_JAILED flag. */
658 		if (jailed(cred)) {
659 			kp->ki_flag |= P_JAILED;
660 			/* If inside a jail, use 0 as a jail ID. */
661 			if (!jailed(curthread->td_ucred))
662 				kp->ki_jid = cred->cr_prison->pr_id;
663 		}
664 	}
665 	ps = p->p_sigacts;
666 	if (ps) {
667 		mtx_lock(&ps->ps_mtx);
668 		kp->ki_sigignore = ps->ps_sigignore;
669 		kp->ki_sigcatch = ps->ps_sigcatch;
670 		mtx_unlock(&ps->ps_mtx);
671 	}
672 	mtx_lock_spin(&sched_lock);
673 	if (p->p_state != PRS_NEW &&
674 	    p->p_state != PRS_ZOMBIE &&
675 	    p->p_vmspace != NULL) {
676 		struct vmspace *vm = p->p_vmspace;
677 
678 		kp->ki_size = vm->vm_map.size;
679 		kp->ki_rssize = vmspace_resident_count(vm); /*XXX*/
680 		FOREACH_THREAD_IN_PROC(p, td0) {
681 			if (!TD_IS_SWAPPED(td0))
682 				kp->ki_rssize += td0->td_kstack_pages;
683 			if (td0->td_altkstack_obj != NULL)
684 				kp->ki_rssize += td0->td_altkstack_pages;
685 		}
686 		kp->ki_swrss = vm->vm_swrss;
687 		kp->ki_tsize = vm->vm_tsize;
688 		kp->ki_dsize = vm->vm_dsize;
689 		kp->ki_ssize = vm->vm_ssize;
690 	} else if (p->p_state == PRS_ZOMBIE)
691 		kp->ki_stat = SZOMB;
692 	kp->ki_sflag = p->p_sflag;
693 	kp->ki_swtime = p->p_swtime;
694 	kp->ki_pid = p->p_pid;
695 	kp->ki_nice = p->p_nice;
696 	kp->ki_runtime = cputick2usec(p->p_rux.rux_runtime);
697 	mtx_unlock_spin(&sched_lock);
698 	if ((p->p_sflag & PS_INMEM) && p->p_stats != NULL) {
699 		kp->ki_start = p->p_stats->p_start;
700 		timevaladd(&kp->ki_start, &boottime);
701 		kp->ki_rusage = p->p_stats->p_ru;
702 		calcru(p, &kp->ki_rusage.ru_utime, &kp->ki_rusage.ru_stime);
703 		calccru(p, &kp->ki_childutime, &kp->ki_childstime);
704 
705 		/* Some callers want child-times in a single value */
706 		kp->ki_childtime = kp->ki_childstime;
707 		timevaladd(&kp->ki_childtime, &kp->ki_childutime);
708 	}
709 	tp = NULL;
710 	if (p->p_pgrp) {
711 		kp->ki_pgid = p->p_pgrp->pg_id;
712 		kp->ki_jobc = p->p_pgrp->pg_jobc;
713 		sp = p->p_pgrp->pg_session;
714 
715 		if (sp != NULL) {
716 			kp->ki_sid = sp->s_sid;
717 			SESS_LOCK(sp);
718 			strlcpy(kp->ki_login, sp->s_login,
719 			    sizeof(kp->ki_login));
720 			if (sp->s_ttyvp)
721 				kp->ki_kiflag |= KI_CTTY;
722 			if (SESS_LEADER(p))
723 				kp->ki_kiflag |= KI_SLEADER;
724 			tp = sp->s_ttyp;
725 			SESS_UNLOCK(sp);
726 		}
727 	}
728 	if ((p->p_flag & P_CONTROLT) && tp != NULL) {
729 		kp->ki_tdev = dev2udev(tp->t_dev);
730 		kp->ki_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
731 		if (tp->t_session)
732 			kp->ki_tsid = tp->t_session->s_sid;
733 	} else
734 		kp->ki_tdev = NODEV;
735 	if (p->p_comm[0] != '\0')
736 		strlcpy(kp->ki_comm, p->p_comm, sizeof(kp->ki_comm));
737 	if (p->p_sysent && p->p_sysent->sv_name != NULL &&
738 	    p->p_sysent->sv_name[0] != '\0')
739 		strlcpy(kp->ki_emul, p->p_sysent->sv_name, sizeof(kp->ki_emul));
740 	kp->ki_siglist = p->p_siglist;
741 	kp->ki_xstat = p->p_xstat;
742 	kp->ki_acflag = p->p_acflag;
743 	kp->ki_lock = p->p_lock;
744 	if (p->p_pptr)
745 		kp->ki_ppid = p->p_pptr->p_pid;
746 }
747 
748 /*
749  * Fill in information that is thread specific.
750  * Must be called with sched_lock locked.
751  */
752 static void
753 fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
754 {
755 	struct proc *p;
756 
757 	p = td->td_proc;
758 
759 	if (td->td_wmesg != NULL)
760 		strlcpy(kp->ki_wmesg, td->td_wmesg, sizeof(kp->ki_wmesg));
761 	else
762 		bzero(kp->ki_wmesg, sizeof(kp->ki_wmesg));
763 	if (td->td_name[0] != '\0')
764 		strlcpy(kp->ki_ocomm, td->td_name, sizeof(kp->ki_ocomm));
765 	if (TD_ON_LOCK(td)) {
766 		kp->ki_kiflag |= KI_LOCKBLOCK;
767 		strlcpy(kp->ki_lockname, td->td_lockname,
768 		    sizeof(kp->ki_lockname));
769 	} else {
770 		kp->ki_kiflag &= ~KI_LOCKBLOCK;
771 		bzero(kp->ki_lockname, sizeof(kp->ki_lockname));
772 	}
773 
774 	if (p->p_state == PRS_NORMAL) { /*  XXXKSE very approximate */
775 		if (TD_ON_RUNQ(td) ||
776 		    TD_CAN_RUN(td) ||
777 		    TD_IS_RUNNING(td)) {
778 			kp->ki_stat = SRUN;
779 		} else if (P_SHOULDSTOP(p)) {
780 			kp->ki_stat = SSTOP;
781 		} else if (TD_IS_SLEEPING(td)) {
782 			kp->ki_stat = SSLEEP;
783 		} else if (TD_ON_LOCK(td)) {
784 			kp->ki_stat = SLOCK;
785 		} else {
786 			kp->ki_stat = SWAIT;
787 		}
788 	} else if (p->p_state == PRS_ZOMBIE) {
789 		kp->ki_stat = SZOMB;
790 	} else {
791 		kp->ki_stat = SIDL;
792 	}
793 
794 	/* Things in the thread */
795 	kp->ki_wchan = td->td_wchan;
796 	kp->ki_pri.pri_level = td->td_priority;
797 	kp->ki_pri.pri_native = td->td_base_pri;
798 	kp->ki_lastcpu = td->td_lastcpu;
799 	kp->ki_oncpu = td->td_oncpu;
800 	kp->ki_tdflags = td->td_flags;
801 	kp->ki_tid = td->td_tid;
802 	kp->ki_numthreads = p->p_numthreads;
803 	kp->ki_pcb = td->td_pcb;
804 	kp->ki_kstack = (void *)td->td_kstack;
805 	kp->ki_pctcpu = sched_pctcpu(td);
806 	kp->ki_estcpu = td->td_estcpu;
807 	kp->ki_slptime = td->td_slptime;
808 	kp->ki_pri.pri_class = td->td_pri_class;
809 	kp->ki_pri.pri_user = td->td_user_pri;
810 
811 	/* We can't get this anymore but ps etc never used it anyway. */
812 	kp->ki_rqindex = 0;
813 
814 	SIGSETOR(kp->ki_siglist, td->td_siglist);
815 	kp->ki_sigmask = td->td_sigmask;
816 }
817 
818 /*
819  * Fill in a kinfo_proc structure for the specified process.
820  * Must be called with the target process locked.
821  */
822 void
823 fill_kinfo_proc(struct proc *p, struct kinfo_proc *kp)
824 {
825 
826 	fill_kinfo_proc_only(p, kp);
827 	mtx_lock_spin(&sched_lock);
828 	if (FIRST_THREAD_IN_PROC(p) != NULL)
829 		fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), kp);
830 	mtx_unlock_spin(&sched_lock);
831 }
832 
833 struct pstats *
834 pstats_alloc(void)
835 {
836 
837 	return (malloc(sizeof(struct pstats), M_SUBPROC, M_ZERO|M_WAITOK));
838 }
839 
840 /*
841  * Copy parts of p_stats; zero the rest of p_stats (statistics).
842  */
843 void
844 pstats_fork(struct pstats *src, struct pstats *dst)
845 {
846 
847 	bzero(&dst->pstat_startzero,
848 	    __rangeof(struct pstats, pstat_startzero, pstat_endzero));
849 	bcopy(&src->pstat_startcopy, &dst->pstat_startcopy,
850 	    __rangeof(struct pstats, pstat_startcopy, pstat_endcopy));
851 }
852 
853 void
854 pstats_free(struct pstats *ps)
855 {
856 
857 	free(ps, M_SUBPROC);
858 }
859 
860 /*
861  * Locate a zombie process by number
862  */
863 struct proc *
864 zpfind(pid_t pid)
865 {
866 	struct proc *p;
867 
868 	sx_slock(&allproc_lock);
869 	LIST_FOREACH(p, &zombproc, p_list)
870 		if (p->p_pid == pid) {
871 			PROC_LOCK(p);
872 			break;
873 		}
874 	sx_sunlock(&allproc_lock);
875 	return (p);
876 }
877 
878 #define KERN_PROC_ZOMBMASK	0x3
879 #define KERN_PROC_NOTHREADS	0x4
880 
881 /*
882  * Must be called with the process locked and will return with it unlocked.
883  */
884 static int
885 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags)
886 {
887 	struct thread *td;
888 	struct kinfo_proc kinfo_proc;
889 	int error = 0;
890 	struct proc *np;
891 	pid_t pid = p->p_pid;
892 
893 	PROC_LOCK_ASSERT(p, MA_OWNED);
894 
895 	fill_kinfo_proc_only(p, &kinfo_proc);
896 	if (flags & KERN_PROC_NOTHREADS) {
897 		mtx_lock_spin(&sched_lock);
898 		if (FIRST_THREAD_IN_PROC(p) != NULL)
899 			fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), &kinfo_proc);
900 		mtx_unlock_spin(&sched_lock);
901 		error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc,
902 				   sizeof(kinfo_proc));
903 	} else {
904 		mtx_lock_spin(&sched_lock);
905 		if (FIRST_THREAD_IN_PROC(p) != NULL)
906 			FOREACH_THREAD_IN_PROC(p, td) {
907 				fill_kinfo_thread(td, &kinfo_proc);
908 				error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc,
909 						   sizeof(kinfo_proc));
910 				if (error)
911 					break;
912 			}
913 		else
914 			error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc,
915 					   sizeof(kinfo_proc));
916 		mtx_unlock_spin(&sched_lock);
917 	}
918 	PROC_UNLOCK(p);
919 	if (error)
920 		return (error);
921 	if (flags & KERN_PROC_ZOMBMASK)
922 		np = zpfind(pid);
923 	else {
924 		if (pid == 0)
925 			return (0);
926 		np = pfind(pid);
927 	}
928 	if (np == NULL)
929 		return EAGAIN;
930 	if (np != p) {
931 		PROC_UNLOCK(np);
932 		return EAGAIN;
933 	}
934 	PROC_UNLOCK(np);
935 	return (0);
936 }
937 
938 static int
939 sysctl_kern_proc(SYSCTL_HANDLER_ARGS)
940 {
941 	int *name = (int*) arg1;
942 	u_int namelen = arg2;
943 	struct proc *p;
944 	int flags, doingzomb, oid_number;
945 	int error = 0;
946 
947 	oid_number = oidp->oid_number;
948 	if (oid_number != KERN_PROC_ALL &&
949 	    (oid_number & KERN_PROC_INC_THREAD) == 0)
950 		flags = KERN_PROC_NOTHREADS;
951 	else {
952 		flags = 0;
953 		oid_number &= ~KERN_PROC_INC_THREAD;
954 	}
955 	if (oid_number == KERN_PROC_PID) {
956 		if (namelen != 1)
957 			return (EINVAL);
958 		error = sysctl_wire_old_buffer(req, 0);
959 		if (error)
960 			return (error);
961 		p = pfind((pid_t)name[0]);
962 		if (!p)
963 			return (ESRCH);
964 		if ((error = p_cansee(curthread, p))) {
965 			PROC_UNLOCK(p);
966 			return (error);
967 		}
968 		error = sysctl_out_proc(p, req, flags);
969 		return (error);
970 	}
971 
972 	switch (oid_number) {
973 	case KERN_PROC_ALL:
974 		if (namelen != 0)
975 			return (EINVAL);
976 		break;
977 	case KERN_PROC_PROC:
978 		if (namelen != 0 && namelen != 1)
979 			return (EINVAL);
980 		break;
981 	default:
982 		if (namelen != 1)
983 			return (EINVAL);
984 		break;
985 	}
986 
987 	if (!req->oldptr) {
988 		/* overestimate by 5 procs */
989 		error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5);
990 		if (error)
991 			return (error);
992 	}
993 	error = sysctl_wire_old_buffer(req, 0);
994 	if (error != 0)
995 		return (error);
996 	sx_slock(&allproc_lock);
997 	for (doingzomb=0 ; doingzomb < 2 ; doingzomb++) {
998 		if (!doingzomb)
999 			p = LIST_FIRST(&allproc);
1000 		else
1001 			p = LIST_FIRST(&zombproc);
1002 		for (; p != 0; p = LIST_NEXT(p, p_list)) {
1003 			/*
1004 			 * Skip embryonic processes.
1005 			 */
1006 			mtx_lock_spin(&sched_lock);
1007 			if (p->p_state == PRS_NEW) {
1008 				mtx_unlock_spin(&sched_lock);
1009 				continue;
1010 			}
1011 			mtx_unlock_spin(&sched_lock);
1012 			PROC_LOCK(p);
1013 			KASSERT(p->p_ucred != NULL,
1014 			    ("process credential is NULL for non-NEW proc"));
1015 			/*
1016 			 * Show a user only appropriate processes.
1017 			 */
1018 			if (p_cansee(curthread, p)) {
1019 				PROC_UNLOCK(p);
1020 				continue;
1021 			}
1022 			/*
1023 			 * TODO - make more efficient (see notes below).
1024 			 * do by session.
1025 			 */
1026 			switch (oid_number) {
1027 
1028 			case KERN_PROC_GID:
1029 				if (p->p_ucred->cr_gid != (gid_t)name[0]) {
1030 					PROC_UNLOCK(p);
1031 					continue;
1032 				}
1033 				break;
1034 
1035 			case KERN_PROC_PGRP:
1036 				/* could do this by traversing pgrp */
1037 				if (p->p_pgrp == NULL ||
1038 				    p->p_pgrp->pg_id != (pid_t)name[0]) {
1039 					PROC_UNLOCK(p);
1040 					continue;
1041 				}
1042 				break;
1043 
1044 			case KERN_PROC_RGID:
1045 				if (p->p_ucred->cr_rgid != (gid_t)name[0]) {
1046 					PROC_UNLOCK(p);
1047 					continue;
1048 				}
1049 				break;
1050 
1051 			case KERN_PROC_SESSION:
1052 				if (p->p_session == NULL ||
1053 				    p->p_session->s_sid != (pid_t)name[0]) {
1054 					PROC_UNLOCK(p);
1055 					continue;
1056 				}
1057 				break;
1058 
1059 			case KERN_PROC_TTY:
1060 				if ((p->p_flag & P_CONTROLT) == 0 ||
1061 				    p->p_session == NULL) {
1062 					PROC_UNLOCK(p);
1063 					continue;
1064 				}
1065 				SESS_LOCK(p->p_session);
1066 				if (p->p_session->s_ttyp == NULL ||
1067 				    dev2udev(p->p_session->s_ttyp->t_dev) !=
1068 				    (dev_t)name[0]) {
1069 					SESS_UNLOCK(p->p_session);
1070 					PROC_UNLOCK(p);
1071 					continue;
1072 				}
1073 				SESS_UNLOCK(p->p_session);
1074 				break;
1075 
1076 			case KERN_PROC_UID:
1077 				if (p->p_ucred->cr_uid != (uid_t)name[0]) {
1078 					PROC_UNLOCK(p);
1079 					continue;
1080 				}
1081 				break;
1082 
1083 			case KERN_PROC_RUID:
1084 				if (p->p_ucred->cr_ruid != (uid_t)name[0]) {
1085 					PROC_UNLOCK(p);
1086 					continue;
1087 				}
1088 				break;
1089 
1090 			case KERN_PROC_PROC:
1091 				break;
1092 
1093 			default:
1094 				break;
1095 
1096 			}
1097 
1098 			error = sysctl_out_proc(p, req, flags | doingzomb);
1099 			if (error) {
1100 				sx_sunlock(&allproc_lock);
1101 				return (error);
1102 			}
1103 		}
1104 	}
1105 	sx_sunlock(&allproc_lock);
1106 	return (0);
1107 }
1108 
1109 struct pargs *
1110 pargs_alloc(int len)
1111 {
1112 	struct pargs *pa;
1113 
1114 	MALLOC(pa, struct pargs *, sizeof(struct pargs) + len, M_PARGS,
1115 		M_WAITOK);
1116 	refcount_init(&pa->ar_ref, 1);
1117 	pa->ar_length = len;
1118 	return (pa);
1119 }
1120 
1121 void
1122 pargs_free(struct pargs *pa)
1123 {
1124 
1125 	FREE(pa, M_PARGS);
1126 }
1127 
1128 void
1129 pargs_hold(struct pargs *pa)
1130 {
1131 
1132 	if (pa == NULL)
1133 		return;
1134 	refcount_acquire(&pa->ar_ref);
1135 }
1136 
1137 void
1138 pargs_drop(struct pargs *pa)
1139 {
1140 
1141 	if (pa == NULL)
1142 		return;
1143 	if (refcount_release(&pa->ar_ref))
1144 		pargs_free(pa);
1145 }
1146 
1147 /*
1148  * This sysctl allows a process to retrieve the argument list or process
1149  * title for another process without groping around in the address space
1150  * of the other process.  It also allow a process to set its own "process
1151  * title to a string of its own choice.
1152  */
1153 static int
1154 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS)
1155 {
1156 	int *name = (int*) arg1;
1157 	u_int namelen = arg2;
1158 	struct pargs *newpa, *pa;
1159 	struct proc *p;
1160 	int error = 0;
1161 
1162 	if (namelen != 1)
1163 		return (EINVAL);
1164 
1165 	p = pfind((pid_t)name[0]);
1166 	if (!p)
1167 		return (ESRCH);
1168 
1169 	if ((error = p_cansee(curthread, p)) != 0) {
1170 		PROC_UNLOCK(p);
1171 		return (error);
1172 	}
1173 
1174 	if (req->newptr && curproc != p) {
1175 		PROC_UNLOCK(p);
1176 		return (EPERM);
1177 	}
1178 
1179 	pa = p->p_args;
1180 	pargs_hold(pa);
1181 	PROC_UNLOCK(p);
1182 	if (req->oldptr != NULL && pa != NULL)
1183 		error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length);
1184 	pargs_drop(pa);
1185 	if (error != 0 || req->newptr == NULL)
1186 		return (error);
1187 
1188 	if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit)
1189 		return (ENOMEM);
1190 	newpa = pargs_alloc(req->newlen);
1191 	error = SYSCTL_IN(req, newpa->ar_args, req->newlen);
1192 	if (error != 0) {
1193 		pargs_free(newpa);
1194 		return (error);
1195 	}
1196 	PROC_LOCK(p);
1197 	pa = p->p_args;
1198 	p->p_args = newpa;
1199 	PROC_UNLOCK(p);
1200 	pargs_drop(pa);
1201 	return (0);
1202 }
1203 
1204 /*
1205  * This sysctl allows a process to retrieve the path of the executable for
1206  * itself or another process.
1207  */
1208 static int
1209 sysctl_kern_proc_pathname(SYSCTL_HANDLER_ARGS)
1210 {
1211 	pid_t *pidp = (pid_t *)arg1;
1212 	unsigned int arglen = arg2;
1213 	struct proc *p;
1214 	struct vnode *vp;
1215 	char *retbuf, *freebuf;
1216 	int error;
1217 
1218 	if (arglen != 1)
1219 		return (EINVAL);
1220 	if (*pidp == -1) {	/* -1 means this process */
1221 		p = req->td->td_proc;
1222 	} else {
1223 		p = pfind(*pidp);
1224 		if (p == NULL)
1225 			return (ESRCH);
1226 		if ((error = p_cansee(curthread, p)) != 0) {
1227 			PROC_UNLOCK(p);
1228 			return (error);
1229 		}
1230 	}
1231 
1232 	vp = p->p_textvp;
1233 	vref(vp);
1234 	if (*pidp != -1)
1235 		PROC_UNLOCK(p);
1236 	error = vn_fullpath(req->td, vp, &retbuf, &freebuf);
1237 	vrele(vp);
1238 	if (error)
1239 		return (error);
1240 	error = SYSCTL_OUT(req, retbuf, strlen(retbuf) + 1);
1241 	free(freebuf, M_TEMP);
1242 	return (error);
1243 }
1244 
1245 static int
1246 sysctl_kern_proc_sv_name(SYSCTL_HANDLER_ARGS)
1247 {
1248 	struct proc *p;
1249 	char *sv_name;
1250 	int *name;
1251 	int namelen;
1252 	int error;
1253 
1254 	namelen = arg2;
1255 	if (namelen != 1)
1256 		return (EINVAL);
1257 
1258 	name = (int *)arg1;
1259 	if ((p = pfind((pid_t)name[0])) == NULL)
1260 		return (ESRCH);
1261 	if ((error = p_cansee(curthread, p))) {
1262 		PROC_UNLOCK(p);
1263 		return (error);
1264 	}
1265 	sv_name = p->p_sysent->sv_name;
1266 	PROC_UNLOCK(p);
1267 	return (sysctl_handle_string(oidp, sv_name, 0, req));
1268 }
1269 
1270 
1271 static SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD,  0, "Process table");
1272 
1273 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT,
1274 	0, 0, sysctl_kern_proc, "S,proc", "Return entire process table");
1275 
1276 static SYSCTL_NODE(_kern_proc, KERN_PROC_GID, gid, CTLFLAG_RD,
1277 	sysctl_kern_proc, "Process table");
1278 
1279 static SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD,
1280 	sysctl_kern_proc, "Process table");
1281 
1282 static SYSCTL_NODE(_kern_proc, KERN_PROC_RGID, rgid, CTLFLAG_RD,
1283 	sysctl_kern_proc, "Process table");
1284 
1285 static SYSCTL_NODE(_kern_proc, KERN_PROC_SESSION, sid, CTLFLAG_RD,
1286 	sysctl_kern_proc, "Process table");
1287 
1288 static SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD,
1289 	sysctl_kern_proc, "Process table");
1290 
1291 static SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD,
1292 	sysctl_kern_proc, "Process table");
1293 
1294 static SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD,
1295 	sysctl_kern_proc, "Process table");
1296 
1297 static SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD,
1298 	sysctl_kern_proc, "Process table");
1299 
1300 static SYSCTL_NODE(_kern_proc, KERN_PROC_PROC, proc, CTLFLAG_RD,
1301 	sysctl_kern_proc, "Return process table, no threads");
1302 
1303 static SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args,
1304 	CTLFLAG_RW | CTLFLAG_ANYBODY,
1305 	sysctl_kern_proc_args, "Process argument list");
1306 
1307 static SYSCTL_NODE(_kern_proc, KERN_PROC_PATHNAME, pathname, CTLFLAG_RD,
1308 	sysctl_kern_proc_pathname, "Process executable path");
1309 
1310 static SYSCTL_NODE(_kern_proc, KERN_PROC_SV_NAME, sv_name, CTLFLAG_RD,
1311 	sysctl_kern_proc_sv_name, "Process syscall vector name (ABI type)");
1312 
1313 static SYSCTL_NODE(_kern_proc, (KERN_PROC_GID | KERN_PROC_INC_THREAD), gid_td,
1314 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1315 
1316 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_INC_THREAD), pgrp_td,
1317 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1318 
1319 static SYSCTL_NODE(_kern_proc, (KERN_PROC_RGID | KERN_PROC_INC_THREAD), rgid_td,
1320 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1321 
1322 static SYSCTL_NODE(_kern_proc, (KERN_PROC_SESSION | KERN_PROC_INC_THREAD),
1323 	sid_td, CTLFLAG_RD, sysctl_kern_proc, "Process table");
1324 
1325 static SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_INC_THREAD), tty_td,
1326 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1327 
1328 static SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_INC_THREAD), uid_td,
1329 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1330 
1331 static SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_INC_THREAD), ruid_td,
1332 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1333 
1334 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_INC_THREAD), pid_td,
1335 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1336 
1337 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PROC | KERN_PROC_INC_THREAD), proc_td,
1338 	CTLFLAG_RD, sysctl_kern_proc, "Return process table, no threads");
1339