xref: /freebsd/sys/kern/kern_proc.c (revision 0efd6615cd5f39b67cec82a7034e655f3b5801e3)
1 /*-
2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *	@(#)kern_proc.c	8.7 (Berkeley) 2/14/95
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_ktrace.h"
36 #include "opt_kstack_pages.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/lock.h>
42 #include <sys/malloc.h>
43 #include <sys/mutex.h>
44 #include <sys/proc.h>
45 #include <sys/refcount.h>
46 #include <sys/sysent.h>
47 #include <sys/sched.h>
48 #include <sys/smp.h>
49 #include <sys/sysctl.h>
50 #include <sys/filedesc.h>
51 #include <sys/tty.h>
52 #include <sys/signalvar.h>
53 #include <sys/sx.h>
54 #include <sys/user.h>
55 #include <sys/jail.h>
56 #include <sys/vnode.h>
57 #ifdef KTRACE
58 #include <sys/uio.h>
59 #include <sys/ktrace.h>
60 #endif
61 
62 #include <vm/vm.h>
63 #include <vm/vm_extern.h>
64 #include <vm/pmap.h>
65 #include <vm/vm_map.h>
66 #include <vm/uma.h>
67 
68 MALLOC_DEFINE(M_PGRP, "pgrp", "process group header");
69 MALLOC_DEFINE(M_SESSION, "session", "session header");
70 static MALLOC_DEFINE(M_PROC, "proc", "Proc structures");
71 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures");
72 
73 static void doenterpgrp(struct proc *, struct pgrp *);
74 static void orphanpg(struct pgrp *pg);
75 static void fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp);
76 static void fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp);
77 static void pgadjustjobc(struct pgrp *pgrp, int entering);
78 static void pgdelete(struct pgrp *);
79 static int proc_ctor(void *mem, int size, void *arg, int flags);
80 static void proc_dtor(void *mem, int size, void *arg);
81 static int proc_init(void *mem, int size, int flags);
82 static void proc_fini(void *mem, int size);
83 
84 /*
85  * Other process lists
86  */
87 struct pidhashhead *pidhashtbl;
88 u_long pidhash;
89 struct pgrphashhead *pgrphashtbl;
90 u_long pgrphash;
91 struct proclist allproc;
92 struct proclist zombproc;
93 struct sx allproc_lock;
94 struct sx proctree_lock;
95 struct mtx ppeers_lock;
96 uma_zone_t proc_zone;
97 uma_zone_t ithread_zone;
98 
99 int kstack_pages = KSTACK_PAGES;
100 SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0, "");
101 
102 CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE);
103 
104 /*
105  * Initialize global process hashing structures.
106  */
107 void
108 procinit()
109 {
110 
111 	sx_init(&allproc_lock, "allproc");
112 	sx_init(&proctree_lock, "proctree");
113 	mtx_init(&ppeers_lock, "p_peers", NULL, MTX_DEF);
114 	LIST_INIT(&allproc);
115 	LIST_INIT(&zombproc);
116 	pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
117 	pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
118 	proc_zone = uma_zcreate("PROC", sched_sizeof_proc(),
119 	    proc_ctor, proc_dtor, proc_init, proc_fini,
120 	    UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
121 	uihashinit();
122 }
123 
124 /*
125  * Prepare a proc for use.
126  */
127 static int
128 proc_ctor(void *mem, int size, void *arg, int flags)
129 {
130 	struct proc *p;
131 
132 	p = (struct proc *)mem;
133 	return (0);
134 }
135 
136 /*
137  * Reclaim a proc after use.
138  */
139 static void
140 proc_dtor(void *mem, int size, void *arg)
141 {
142 	struct proc *p;
143 	struct thread *td;
144 
145 	/* INVARIANTS checks go here */
146 	p = (struct proc *)mem;
147         td = FIRST_THREAD_IN_PROC(p);
148 #ifdef INVARIANTS
149 	KASSERT((p->p_numthreads == 1),
150 	    ("bad number of threads in exiting process"));
151 	KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
152 	KASSERT(STAILQ_EMPTY(&p->p_ktr), ("proc_dtor: non-empty p_ktr"));
153 #endif
154 
155 	/* Dispose of an alternate kstack, if it exists.
156 	 * XXX What if there are more than one thread in the proc?
157 	 *     The first thread in the proc is special and not
158 	 *     freed, so you gotta do this here.
159 	 */
160 	if (((p->p_flag & P_KTHREAD) != 0) && (td->td_altkstack != 0))
161 		vm_thread_dispose_altkstack(td);
162 	if (p->p_ksi != NULL)
163 		KASSERT(! KSI_ONQ(p->p_ksi), ("SIGCHLD queue"));
164 }
165 
166 /*
167  * Initialize type-stable parts of a proc (when newly created).
168  */
169 static int
170 proc_init(void *mem, int size, int flags)
171 {
172 	struct proc *p;
173 	struct thread *td;
174 
175 	p = (struct proc *)mem;
176 	p->p_sched = (struct p_sched *)&p[1];
177 	td = thread_alloc();
178 	bzero(&p->p_mtx, sizeof(struct mtx));
179 	mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
180 	p->p_stats = pstats_alloc();
181 	proc_linkup(p, td);
182 	sched_newproc(p, td);
183 	return (0);
184 }
185 
186 /*
187  * UMA should ensure that this function is never called.
188  * Freeing a proc structure would violate type stability.
189  */
190 static void
191 proc_fini(void *mem, int size)
192 {
193 #ifdef notnow
194 	struct proc *p;
195 
196 	p = (struct proc *)mem;
197 	pstats_free(p->p_stats);
198 	thread_free(FIRST_THREAD_IN_PROC(p));
199 	mtx_destroy(&p->p_mtx);
200 	if (p->p_ksi != NULL)
201 		ksiginfo_free(p->p_ksi);
202 #else
203 	panic("proc reclaimed");
204 #endif
205 }
206 
207 /*
208  * Is p an inferior of the current process?
209  */
210 int
211 inferior(p)
212 	register struct proc *p;
213 {
214 
215 	sx_assert(&proctree_lock, SX_LOCKED);
216 	for (; p != curproc; p = p->p_pptr)
217 		if (p->p_pid == 0)
218 			return (0);
219 	return (1);
220 }
221 
222 /*
223  * Locate a process by number; return only "live" processes -- i.e., neither
224  * zombies nor newly born but incompletely initialized processes.  By not
225  * returning processes in the PRS_NEW state, we allow callers to avoid
226  * testing for that condition to avoid dereferencing p_ucred, et al.
227  */
228 struct proc *
229 pfind(pid)
230 	register pid_t pid;
231 {
232 	register struct proc *p;
233 
234 	sx_slock(&allproc_lock);
235 	LIST_FOREACH(p, PIDHASH(pid), p_hash)
236 		if (p->p_pid == pid) {
237 			if (p->p_state == PRS_NEW) {
238 				p = NULL;
239 				break;
240 			}
241 			PROC_LOCK(p);
242 			break;
243 		}
244 	sx_sunlock(&allproc_lock);
245 	return (p);
246 }
247 
248 /*
249  * Locate a process group by number.
250  * The caller must hold proctree_lock.
251  */
252 struct pgrp *
253 pgfind(pgid)
254 	register pid_t pgid;
255 {
256 	register struct pgrp *pgrp;
257 
258 	sx_assert(&proctree_lock, SX_LOCKED);
259 
260 	LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) {
261 		if (pgrp->pg_id == pgid) {
262 			PGRP_LOCK(pgrp);
263 			return (pgrp);
264 		}
265 	}
266 	return (NULL);
267 }
268 
269 /*
270  * Create a new process group.
271  * pgid must be equal to the pid of p.
272  * Begin a new session if required.
273  */
274 int
275 enterpgrp(p, pgid, pgrp, sess)
276 	register struct proc *p;
277 	pid_t pgid;
278 	struct pgrp *pgrp;
279 	struct session *sess;
280 {
281 	struct pgrp *pgrp2;
282 
283 	sx_assert(&proctree_lock, SX_XLOCKED);
284 
285 	KASSERT(pgrp != NULL, ("enterpgrp: pgrp == NULL"));
286 	KASSERT(p->p_pid == pgid,
287 	    ("enterpgrp: new pgrp and pid != pgid"));
288 
289 	pgrp2 = pgfind(pgid);
290 
291 	KASSERT(pgrp2 == NULL,
292 	    ("enterpgrp: pgrp with pgid exists"));
293 	KASSERT(!SESS_LEADER(p),
294 	    ("enterpgrp: session leader attempted setpgrp"));
295 
296 	mtx_init(&pgrp->pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK);
297 
298 	if (sess != NULL) {
299 		/*
300 		 * new session
301 		 */
302 		mtx_init(&sess->s_mtx, "session", NULL, MTX_DEF);
303 		mtx_lock(&Giant);       /* XXX TTY */
304 		PROC_LOCK(p);
305 		p->p_flag &= ~P_CONTROLT;
306 		PROC_UNLOCK(p);
307 		PGRP_LOCK(pgrp);
308 		sess->s_leader = p;
309 		sess->s_sid = p->p_pid;
310 		sess->s_count = 1;
311 		sess->s_ttyvp = NULL;
312 		sess->s_ttyp = NULL;
313 		bcopy(p->p_session->s_login, sess->s_login,
314 			    sizeof(sess->s_login));
315 		pgrp->pg_session = sess;
316 		KASSERT(p == curproc,
317 		    ("enterpgrp: mksession and p != curproc"));
318 	} else {
319 		mtx_lock(&Giant);       /* XXX TTY */
320 		pgrp->pg_session = p->p_session;
321 		SESS_LOCK(pgrp->pg_session);
322 		pgrp->pg_session->s_count++;
323 		SESS_UNLOCK(pgrp->pg_session);
324 		PGRP_LOCK(pgrp);
325 	}
326 	pgrp->pg_id = pgid;
327 	LIST_INIT(&pgrp->pg_members);
328 
329 	/*
330 	 * As we have an exclusive lock of proctree_lock,
331 	 * this should not deadlock.
332 	 */
333 	LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
334 	pgrp->pg_jobc = 0;
335 	SLIST_INIT(&pgrp->pg_sigiolst);
336 	PGRP_UNLOCK(pgrp);
337 	mtx_unlock(&Giant);       /* XXX TTY */
338 
339 	doenterpgrp(p, pgrp);
340 
341 	return (0);
342 }
343 
344 /*
345  * Move p to an existing process group
346  */
347 int
348 enterthispgrp(p, pgrp)
349 	register struct proc *p;
350 	struct pgrp *pgrp;
351 {
352 
353 	sx_assert(&proctree_lock, SX_XLOCKED);
354 	PROC_LOCK_ASSERT(p, MA_NOTOWNED);
355 	PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
356 	PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
357 	SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
358 	KASSERT(pgrp->pg_session == p->p_session,
359 		("%s: pgrp's session %p, p->p_session %p.\n",
360 		__func__,
361 		pgrp->pg_session,
362 		p->p_session));
363 	KASSERT(pgrp != p->p_pgrp,
364 		("%s: p belongs to pgrp.", __func__));
365 
366 	doenterpgrp(p, pgrp);
367 
368 	return (0);
369 }
370 
371 /*
372  * Move p to a process group
373  */
374 static void
375 doenterpgrp(p, pgrp)
376 	struct proc *p;
377 	struct pgrp *pgrp;
378 {
379 	struct pgrp *savepgrp;
380 
381 	sx_assert(&proctree_lock, SX_XLOCKED);
382 	PROC_LOCK_ASSERT(p, MA_NOTOWNED);
383 	PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
384 	PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
385 	SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
386 
387 	savepgrp = p->p_pgrp;
388 
389 	/*
390 	 * Adjust eligibility of affected pgrps to participate in job control.
391 	 * Increment eligibility counts before decrementing, otherwise we
392 	 * could reach 0 spuriously during the first call.
393 	 */
394 	fixjobc(p, pgrp, 1);
395 	fixjobc(p, p->p_pgrp, 0);
396 
397 	mtx_lock(&Giant);       /* XXX TTY */
398 	PGRP_LOCK(pgrp);
399 	PGRP_LOCK(savepgrp);
400 	PROC_LOCK(p);
401 	LIST_REMOVE(p, p_pglist);
402 	p->p_pgrp = pgrp;
403 	PROC_UNLOCK(p);
404 	LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist);
405 	PGRP_UNLOCK(savepgrp);
406 	PGRP_UNLOCK(pgrp);
407 	mtx_unlock(&Giant);     /* XXX TTY */
408 	if (LIST_EMPTY(&savepgrp->pg_members))
409 		pgdelete(savepgrp);
410 }
411 
412 /*
413  * remove process from process group
414  */
415 int
416 leavepgrp(p)
417 	register struct proc *p;
418 {
419 	struct pgrp *savepgrp;
420 
421 	sx_assert(&proctree_lock, SX_XLOCKED);
422 	savepgrp = p->p_pgrp;
423 	mtx_lock(&Giant);	/* XXX TTY */
424 	PGRP_LOCK(savepgrp);
425 	PROC_LOCK(p);
426 	LIST_REMOVE(p, p_pglist);
427 	p->p_pgrp = NULL;
428 	PROC_UNLOCK(p);
429 	PGRP_UNLOCK(savepgrp);
430 	mtx_unlock(&Giant);	/* XXX TTY */
431 	if (LIST_EMPTY(&savepgrp->pg_members))
432 		pgdelete(savepgrp);
433 	return (0);
434 }
435 
436 /*
437  * delete a process group
438  */
439 static void
440 pgdelete(pgrp)
441 	register struct pgrp *pgrp;
442 {
443 	struct session *savesess;
444 
445 	sx_assert(&proctree_lock, SX_XLOCKED);
446 	PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
447 	SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
448 
449 	/*
450 	 * Reset any sigio structures pointing to us as a result of
451 	 * F_SETOWN with our pgid.
452 	 */
453 	funsetownlst(&pgrp->pg_sigiolst);
454 
455 	mtx_lock(&Giant);       /* XXX TTY */
456 	PGRP_LOCK(pgrp);
457 	if (pgrp->pg_session->s_ttyp != NULL &&
458 	    pgrp->pg_session->s_ttyp->t_pgrp == pgrp)
459 		pgrp->pg_session->s_ttyp->t_pgrp = NULL;
460 	LIST_REMOVE(pgrp, pg_hash);
461 	savesess = pgrp->pg_session;
462 	SESSRELE(savesess);
463 	PGRP_UNLOCK(pgrp);
464 	mtx_destroy(&pgrp->pg_mtx);
465 	FREE(pgrp, M_PGRP);
466 	mtx_unlock(&Giant);     /* XXX TTY */
467 }
468 
469 static void
470 pgadjustjobc(pgrp, entering)
471 	struct pgrp *pgrp;
472 	int entering;
473 {
474 
475 	PGRP_LOCK(pgrp);
476 	if (entering)
477 		pgrp->pg_jobc++;
478 	else {
479 		--pgrp->pg_jobc;
480 		if (pgrp->pg_jobc == 0)
481 			orphanpg(pgrp);
482 	}
483 	PGRP_UNLOCK(pgrp);
484 }
485 
486 /*
487  * Adjust pgrp jobc counters when specified process changes process group.
488  * We count the number of processes in each process group that "qualify"
489  * the group for terminal job control (those with a parent in a different
490  * process group of the same session).  If that count reaches zero, the
491  * process group becomes orphaned.  Check both the specified process'
492  * process group and that of its children.
493  * entering == 0 => p is leaving specified group.
494  * entering == 1 => p is entering specified group.
495  */
496 void
497 fixjobc(p, pgrp, entering)
498 	register struct proc *p;
499 	register struct pgrp *pgrp;
500 	int entering;
501 {
502 	register struct pgrp *hispgrp;
503 	register struct session *mysession;
504 
505 	sx_assert(&proctree_lock, SX_LOCKED);
506 	PROC_LOCK_ASSERT(p, MA_NOTOWNED);
507 	PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
508 	SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
509 
510 	/*
511 	 * Check p's parent to see whether p qualifies its own process
512 	 * group; if so, adjust count for p's process group.
513 	 */
514 	mysession = pgrp->pg_session;
515 	if ((hispgrp = p->p_pptr->p_pgrp) != pgrp &&
516 	    hispgrp->pg_session == mysession)
517 		pgadjustjobc(pgrp, entering);
518 
519 	/*
520 	 * Check this process' children to see whether they qualify
521 	 * their process groups; if so, adjust counts for children's
522 	 * process groups.
523 	 */
524 	LIST_FOREACH(p, &p->p_children, p_sibling) {
525 		hispgrp = p->p_pgrp;
526 		if (hispgrp == pgrp ||
527 		    hispgrp->pg_session != mysession)
528 			continue;
529 		PROC_LOCK(p);
530 		if (p->p_state == PRS_ZOMBIE) {
531 			PROC_UNLOCK(p);
532 			continue;
533 		}
534 		PROC_UNLOCK(p);
535 		pgadjustjobc(hispgrp, entering);
536 	}
537 }
538 
539 /*
540  * A process group has become orphaned;
541  * if there are any stopped processes in the group,
542  * hang-up all process in that group.
543  */
544 static void
545 orphanpg(pg)
546 	struct pgrp *pg;
547 {
548 	register struct proc *p;
549 
550 	PGRP_LOCK_ASSERT(pg, MA_OWNED);
551 
552 	LIST_FOREACH(p, &pg->pg_members, p_pglist) {
553 		PROC_LOCK(p);
554 		if (P_SHOULDSTOP(p)) {
555 			PROC_UNLOCK(p);
556 			LIST_FOREACH(p, &pg->pg_members, p_pglist) {
557 				PROC_LOCK(p);
558 				psignal(p, SIGHUP);
559 				psignal(p, SIGCONT);
560 				PROC_UNLOCK(p);
561 			}
562 			return;
563 		}
564 		PROC_UNLOCK(p);
565 	}
566 }
567 
568 void
569 sessrele(struct session *s)
570 {
571 	int i;
572 
573 	SESS_LOCK(s);
574 	i = --s->s_count;
575 	SESS_UNLOCK(s);
576 	if (i == 0) {
577 		if (s->s_ttyp != NULL)
578 			ttyrel(s->s_ttyp);
579 		mtx_destroy(&s->s_mtx);
580 		FREE(s, M_SESSION);
581 	}
582 }
583 
584 #include "opt_ddb.h"
585 #ifdef DDB
586 #include <ddb/ddb.h>
587 
588 DB_SHOW_COMMAND(pgrpdump, pgrpdump)
589 {
590 	register struct pgrp *pgrp;
591 	register struct proc *p;
592 	register int i;
593 
594 	for (i = 0; i <= pgrphash; i++) {
595 		if (!LIST_EMPTY(&pgrphashtbl[i])) {
596 			printf("\tindx %d\n", i);
597 			LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) {
598 				printf(
599 			"\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n",
600 				    (void *)pgrp, (long)pgrp->pg_id,
601 				    (void *)pgrp->pg_session,
602 				    pgrp->pg_session->s_count,
603 				    (void *)LIST_FIRST(&pgrp->pg_members));
604 				LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
605 					printf("\t\tpid %ld addr %p pgrp %p\n",
606 					    (long)p->p_pid, (void *)p,
607 					    (void *)p->p_pgrp);
608 				}
609 			}
610 		}
611 	}
612 }
613 #endif /* DDB */
614 
615 /*
616  * Clear kinfo_proc and fill in any information that is common
617  * to all threads in the process.
618  * Must be called with the target process locked.
619  */
620 static void
621 fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp)
622 {
623 	struct thread *td0;
624 	struct tty *tp;
625 	struct session *sp;
626 	struct ucred *cred;
627 	struct sigacts *ps;
628 
629 	bzero(kp, sizeof(*kp));
630 
631 	kp->ki_structsize = sizeof(*kp);
632 	kp->ki_paddr = p;
633 	PROC_LOCK_ASSERT(p, MA_OWNED);
634 	kp->ki_addr =/* p->p_addr; */0; /* XXXKSE */
635 	kp->ki_args = p->p_args;
636 	kp->ki_textvp = p->p_textvp;
637 #ifdef KTRACE
638 	kp->ki_tracep = p->p_tracevp;
639 	mtx_lock(&ktrace_mtx);
640 	kp->ki_traceflag = p->p_traceflag;
641 	mtx_unlock(&ktrace_mtx);
642 #endif
643 	kp->ki_fd = p->p_fd;
644 	kp->ki_vmspace = p->p_vmspace;
645 	kp->ki_flag = p->p_flag;
646 	cred = p->p_ucred;
647 	if (cred) {
648 		kp->ki_uid = cred->cr_uid;
649 		kp->ki_ruid = cred->cr_ruid;
650 		kp->ki_svuid = cred->cr_svuid;
651 		/* XXX bde doesn't like KI_NGROUPS */
652 		kp->ki_ngroups = min(cred->cr_ngroups, KI_NGROUPS);
653 		bcopy(cred->cr_groups, kp->ki_groups,
654 		    kp->ki_ngroups * sizeof(gid_t));
655 		kp->ki_rgid = cred->cr_rgid;
656 		kp->ki_svgid = cred->cr_svgid;
657 		/* If jailed(cred), emulate the old P_JAILED flag. */
658 		if (jailed(cred)) {
659 			kp->ki_flag |= P_JAILED;
660 			/* If inside a jail, use 0 as a jail ID. */
661 			if (!jailed(curthread->td_ucred))
662 				kp->ki_jid = cred->cr_prison->pr_id;
663 		}
664 	}
665 	ps = p->p_sigacts;
666 	if (ps) {
667 		mtx_lock(&ps->ps_mtx);
668 		kp->ki_sigignore = ps->ps_sigignore;
669 		kp->ki_sigcatch = ps->ps_sigcatch;
670 		mtx_unlock(&ps->ps_mtx);
671 	}
672 	mtx_lock_spin(&sched_lock);
673 	if (p->p_state != PRS_NEW &&
674 	    p->p_state != PRS_ZOMBIE &&
675 	    p->p_vmspace != NULL) {
676 		struct vmspace *vm = p->p_vmspace;
677 
678 		kp->ki_size = vm->vm_map.size;
679 		kp->ki_rssize = vmspace_resident_count(vm); /*XXX*/
680 		FOREACH_THREAD_IN_PROC(p, td0) {
681 			if (!TD_IS_SWAPPED(td0))
682 				kp->ki_rssize += td0->td_kstack_pages;
683 			if (td0->td_altkstack_obj != NULL)
684 				kp->ki_rssize += td0->td_altkstack_pages;
685 		}
686 		kp->ki_swrss = vm->vm_swrss;
687 		kp->ki_tsize = vm->vm_tsize;
688 		kp->ki_dsize = vm->vm_dsize;
689 		kp->ki_ssize = vm->vm_ssize;
690 	} else if (p->p_state == PRS_ZOMBIE)
691 		kp->ki_stat = SZOMB;
692 	kp->ki_sflag = p->p_sflag;
693 	kp->ki_swtime = p->p_swtime;
694 	kp->ki_pid = p->p_pid;
695 	kp->ki_nice = p->p_nice;
696 	kp->ki_runtime = cputick2usec(p->p_rux.rux_runtime);
697 	mtx_unlock_spin(&sched_lock);
698 	if ((p->p_sflag & PS_INMEM) && p->p_stats != NULL) {
699 		kp->ki_start = p->p_stats->p_start;
700 		timevaladd(&kp->ki_start, &boottime);
701 		kp->ki_rusage = p->p_stats->p_ru;
702 		calcru(p, &kp->ki_rusage.ru_utime, &kp->ki_rusage.ru_stime);
703 		calccru(p, &kp->ki_childutime, &kp->ki_childstime);
704 
705 		/* Some callers want child-times in a single value */
706 		kp->ki_childtime = kp->ki_childstime;
707 		timevaladd(&kp->ki_childtime, &kp->ki_childutime);
708 	}
709 	tp = NULL;
710 	if (p->p_pgrp) {
711 		kp->ki_pgid = p->p_pgrp->pg_id;
712 		kp->ki_jobc = p->p_pgrp->pg_jobc;
713 		sp = p->p_pgrp->pg_session;
714 
715 		if (sp != NULL) {
716 			kp->ki_sid = sp->s_sid;
717 			SESS_LOCK(sp);
718 			strlcpy(kp->ki_login, sp->s_login,
719 			    sizeof(kp->ki_login));
720 			if (sp->s_ttyvp)
721 				kp->ki_kiflag |= KI_CTTY;
722 			if (SESS_LEADER(p))
723 				kp->ki_kiflag |= KI_SLEADER;
724 			tp = sp->s_ttyp;
725 			SESS_UNLOCK(sp);
726 		}
727 	}
728 	if ((p->p_flag & P_CONTROLT) && tp != NULL) {
729 		kp->ki_tdev = dev2udev(tp->t_dev);
730 		kp->ki_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
731 		if (tp->t_session)
732 			kp->ki_tsid = tp->t_session->s_sid;
733 	} else
734 		kp->ki_tdev = NODEV;
735 	if (p->p_comm[0] != '\0') {
736 		strlcpy(kp->ki_comm, p->p_comm, sizeof(kp->ki_comm));
737 		/*
738 		 * Temporarily give the thread a default name of the process
739 		 * as it's erroneously used in the snmp code.
740 		 * Remove this when that is fixed. (soon I'm told)
741 		 */
742 		strlcpy(kp->ki_ocomm, p->p_comm, sizeof(kp->ki_ocomm));
743 	}
744 	if (p->p_sysent && p->p_sysent->sv_name != NULL &&
745 	    p->p_sysent->sv_name[0] != '\0')
746 		strlcpy(kp->ki_emul, p->p_sysent->sv_name, sizeof(kp->ki_emul));
747 	kp->ki_siglist = p->p_siglist;
748 	kp->ki_xstat = p->p_xstat;
749 	kp->ki_acflag = p->p_acflag;
750 	kp->ki_lock = p->p_lock;
751 	if (p->p_pptr)
752 		kp->ki_ppid = p->p_pptr->p_pid;
753 }
754 
755 /*
756  * Fill in information that is thread specific.
757  * Must be called with sched_lock locked.
758  */
759 static void
760 fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
761 {
762 	struct proc *p;
763 
764 	p = td->td_proc;
765 
766 	if (td->td_wmesg != NULL)
767 		strlcpy(kp->ki_wmesg, td->td_wmesg, sizeof(kp->ki_wmesg));
768 	else
769 		bzero(kp->ki_wmesg, sizeof(kp->ki_wmesg));
770 	if (td->td_name[0] != '\0')
771 		strlcpy(kp->ki_ocomm, td->td_name, sizeof(kp->ki_ocomm));
772 	if (TD_ON_LOCK(td)) {
773 		kp->ki_kiflag |= KI_LOCKBLOCK;
774 		strlcpy(kp->ki_lockname, td->td_lockname,
775 		    sizeof(kp->ki_lockname));
776 	} else {
777 		kp->ki_kiflag &= ~KI_LOCKBLOCK;
778 		bzero(kp->ki_lockname, sizeof(kp->ki_lockname));
779 	}
780 
781 	if (p->p_state == PRS_NORMAL) { /*  XXXKSE very approximate */
782 		if (TD_ON_RUNQ(td) ||
783 		    TD_CAN_RUN(td) ||
784 		    TD_IS_RUNNING(td)) {
785 			kp->ki_stat = SRUN;
786 		} else if (P_SHOULDSTOP(p)) {
787 			kp->ki_stat = SSTOP;
788 		} else if (TD_IS_SLEEPING(td)) {
789 			kp->ki_stat = SSLEEP;
790 		} else if (TD_ON_LOCK(td)) {
791 			kp->ki_stat = SLOCK;
792 		} else {
793 			kp->ki_stat = SWAIT;
794 		}
795 	} else if (p->p_state == PRS_ZOMBIE) {
796 		kp->ki_stat = SZOMB;
797 	} else {
798 		kp->ki_stat = SIDL;
799 	}
800 
801 	/* Things in the thread */
802 	kp->ki_wchan = td->td_wchan;
803 	kp->ki_pri.pri_level = td->td_priority;
804 	kp->ki_pri.pri_native = td->td_base_pri;
805 	kp->ki_lastcpu = td->td_lastcpu;
806 	kp->ki_oncpu = td->td_oncpu;
807 	kp->ki_tdflags = td->td_flags;
808 	kp->ki_tid = td->td_tid;
809 	kp->ki_numthreads = p->p_numthreads;
810 	kp->ki_pcb = td->td_pcb;
811 	kp->ki_kstack = (void *)td->td_kstack;
812 	kp->ki_pctcpu = sched_pctcpu(td);
813 	kp->ki_estcpu = td->td_estcpu;
814 	kp->ki_slptime = td->td_slptime;
815 	kp->ki_pri.pri_class = td->td_pri_class;
816 	kp->ki_pri.pri_user = td->td_user_pri;
817 
818 	/* We can't get this anymore but ps etc never used it anyway. */
819 	kp->ki_rqindex = 0;
820 
821 	SIGSETOR(kp->ki_siglist, td->td_siglist);
822 	kp->ki_sigmask = td->td_sigmask;
823 }
824 
825 /*
826  * Fill in a kinfo_proc structure for the specified process.
827  * Must be called with the target process locked.
828  */
829 void
830 fill_kinfo_proc(struct proc *p, struct kinfo_proc *kp)
831 {
832 
833 	fill_kinfo_proc_only(p, kp);
834 	mtx_lock_spin(&sched_lock);
835 	if (FIRST_THREAD_IN_PROC(p) != NULL)
836 		fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), kp);
837 	mtx_unlock_spin(&sched_lock);
838 }
839 
840 struct pstats *
841 pstats_alloc(void)
842 {
843 
844 	return (malloc(sizeof(struct pstats), M_SUBPROC, M_ZERO|M_WAITOK));
845 }
846 
847 /*
848  * Copy parts of p_stats; zero the rest of p_stats (statistics).
849  */
850 void
851 pstats_fork(struct pstats *src, struct pstats *dst)
852 {
853 
854 	bzero(&dst->pstat_startzero,
855 	    __rangeof(struct pstats, pstat_startzero, pstat_endzero));
856 	bcopy(&src->pstat_startcopy, &dst->pstat_startcopy,
857 	    __rangeof(struct pstats, pstat_startcopy, pstat_endcopy));
858 }
859 
860 void
861 pstats_free(struct pstats *ps)
862 {
863 
864 	free(ps, M_SUBPROC);
865 }
866 
867 /*
868  * Locate a zombie process by number
869  */
870 struct proc *
871 zpfind(pid_t pid)
872 {
873 	struct proc *p;
874 
875 	sx_slock(&allproc_lock);
876 	LIST_FOREACH(p, &zombproc, p_list)
877 		if (p->p_pid == pid) {
878 			PROC_LOCK(p);
879 			break;
880 		}
881 	sx_sunlock(&allproc_lock);
882 	return (p);
883 }
884 
885 #define KERN_PROC_ZOMBMASK	0x3
886 #define KERN_PROC_NOTHREADS	0x4
887 
888 /*
889  * Must be called with the process locked and will return with it unlocked.
890  */
891 static int
892 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags)
893 {
894 	struct thread *td;
895 	struct kinfo_proc kinfo_proc;
896 	int error = 0;
897 	struct proc *np;
898 	pid_t pid = p->p_pid;
899 
900 	PROC_LOCK_ASSERT(p, MA_OWNED);
901 
902 	fill_kinfo_proc_only(p, &kinfo_proc);
903 	if (flags & KERN_PROC_NOTHREADS) {
904 		mtx_lock_spin(&sched_lock);
905 		if (FIRST_THREAD_IN_PROC(p) != NULL)
906 			fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), &kinfo_proc);
907 		mtx_unlock_spin(&sched_lock);
908 		error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc,
909 				   sizeof(kinfo_proc));
910 	} else {
911 		mtx_lock_spin(&sched_lock);
912 		if (FIRST_THREAD_IN_PROC(p) != NULL)
913 			FOREACH_THREAD_IN_PROC(p, td) {
914 				fill_kinfo_thread(td, &kinfo_proc);
915 				error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc,
916 						   sizeof(kinfo_proc));
917 				if (error)
918 					break;
919 			}
920 		else
921 			error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc,
922 					   sizeof(kinfo_proc));
923 		mtx_unlock_spin(&sched_lock);
924 	}
925 	PROC_UNLOCK(p);
926 	if (error)
927 		return (error);
928 	if (flags & KERN_PROC_ZOMBMASK)
929 		np = zpfind(pid);
930 	else {
931 		if (pid == 0)
932 			return (0);
933 		np = pfind(pid);
934 	}
935 	if (np == NULL)
936 		return EAGAIN;
937 	if (np != p) {
938 		PROC_UNLOCK(np);
939 		return EAGAIN;
940 	}
941 	PROC_UNLOCK(np);
942 	return (0);
943 }
944 
945 static int
946 sysctl_kern_proc(SYSCTL_HANDLER_ARGS)
947 {
948 	int *name = (int*) arg1;
949 	u_int namelen = arg2;
950 	struct proc *p;
951 	int flags, doingzomb, oid_number;
952 	int error = 0;
953 
954 	oid_number = oidp->oid_number;
955 	if (oid_number != KERN_PROC_ALL &&
956 	    (oid_number & KERN_PROC_INC_THREAD) == 0)
957 		flags = KERN_PROC_NOTHREADS;
958 	else {
959 		flags = 0;
960 		oid_number &= ~KERN_PROC_INC_THREAD;
961 	}
962 	if (oid_number == KERN_PROC_PID) {
963 		if (namelen != 1)
964 			return (EINVAL);
965 		error = sysctl_wire_old_buffer(req, 0);
966 		if (error)
967 			return (error);
968 		p = pfind((pid_t)name[0]);
969 		if (!p)
970 			return (ESRCH);
971 		if ((error = p_cansee(curthread, p))) {
972 			PROC_UNLOCK(p);
973 			return (error);
974 		}
975 		error = sysctl_out_proc(p, req, flags);
976 		return (error);
977 	}
978 
979 	switch (oid_number) {
980 	case KERN_PROC_ALL:
981 		if (namelen != 0)
982 			return (EINVAL);
983 		break;
984 	case KERN_PROC_PROC:
985 		if (namelen != 0 && namelen != 1)
986 			return (EINVAL);
987 		break;
988 	default:
989 		if (namelen != 1)
990 			return (EINVAL);
991 		break;
992 	}
993 
994 	if (!req->oldptr) {
995 		/* overestimate by 5 procs */
996 		error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5);
997 		if (error)
998 			return (error);
999 	}
1000 	error = sysctl_wire_old_buffer(req, 0);
1001 	if (error != 0)
1002 		return (error);
1003 	sx_slock(&allproc_lock);
1004 	for (doingzomb=0 ; doingzomb < 2 ; doingzomb++) {
1005 		if (!doingzomb)
1006 			p = LIST_FIRST(&allproc);
1007 		else
1008 			p = LIST_FIRST(&zombproc);
1009 		for (; p != 0; p = LIST_NEXT(p, p_list)) {
1010 			/*
1011 			 * Skip embryonic processes.
1012 			 */
1013 			mtx_lock_spin(&sched_lock);
1014 			if (p->p_state == PRS_NEW) {
1015 				mtx_unlock_spin(&sched_lock);
1016 				continue;
1017 			}
1018 			mtx_unlock_spin(&sched_lock);
1019 			PROC_LOCK(p);
1020 			KASSERT(p->p_ucred != NULL,
1021 			    ("process credential is NULL for non-NEW proc"));
1022 			/*
1023 			 * Show a user only appropriate processes.
1024 			 */
1025 			if (p_cansee(curthread, p)) {
1026 				PROC_UNLOCK(p);
1027 				continue;
1028 			}
1029 			/*
1030 			 * TODO - make more efficient (see notes below).
1031 			 * do by session.
1032 			 */
1033 			switch (oid_number) {
1034 
1035 			case KERN_PROC_GID:
1036 				if (p->p_ucred->cr_gid != (gid_t)name[0]) {
1037 					PROC_UNLOCK(p);
1038 					continue;
1039 				}
1040 				break;
1041 
1042 			case KERN_PROC_PGRP:
1043 				/* could do this by traversing pgrp */
1044 				if (p->p_pgrp == NULL ||
1045 				    p->p_pgrp->pg_id != (pid_t)name[0]) {
1046 					PROC_UNLOCK(p);
1047 					continue;
1048 				}
1049 				break;
1050 
1051 			case KERN_PROC_RGID:
1052 				if (p->p_ucred->cr_rgid != (gid_t)name[0]) {
1053 					PROC_UNLOCK(p);
1054 					continue;
1055 				}
1056 				break;
1057 
1058 			case KERN_PROC_SESSION:
1059 				if (p->p_session == NULL ||
1060 				    p->p_session->s_sid != (pid_t)name[0]) {
1061 					PROC_UNLOCK(p);
1062 					continue;
1063 				}
1064 				break;
1065 
1066 			case KERN_PROC_TTY:
1067 				if ((p->p_flag & P_CONTROLT) == 0 ||
1068 				    p->p_session == NULL) {
1069 					PROC_UNLOCK(p);
1070 					continue;
1071 				}
1072 				SESS_LOCK(p->p_session);
1073 				if (p->p_session->s_ttyp == NULL ||
1074 				    dev2udev(p->p_session->s_ttyp->t_dev) !=
1075 				    (dev_t)name[0]) {
1076 					SESS_UNLOCK(p->p_session);
1077 					PROC_UNLOCK(p);
1078 					continue;
1079 				}
1080 				SESS_UNLOCK(p->p_session);
1081 				break;
1082 
1083 			case KERN_PROC_UID:
1084 				if (p->p_ucred->cr_uid != (uid_t)name[0]) {
1085 					PROC_UNLOCK(p);
1086 					continue;
1087 				}
1088 				break;
1089 
1090 			case KERN_PROC_RUID:
1091 				if (p->p_ucred->cr_ruid != (uid_t)name[0]) {
1092 					PROC_UNLOCK(p);
1093 					continue;
1094 				}
1095 				break;
1096 
1097 			case KERN_PROC_PROC:
1098 				break;
1099 
1100 			default:
1101 				break;
1102 
1103 			}
1104 
1105 			error = sysctl_out_proc(p, req, flags | doingzomb);
1106 			if (error) {
1107 				sx_sunlock(&allproc_lock);
1108 				return (error);
1109 			}
1110 		}
1111 	}
1112 	sx_sunlock(&allproc_lock);
1113 	return (0);
1114 }
1115 
1116 struct pargs *
1117 pargs_alloc(int len)
1118 {
1119 	struct pargs *pa;
1120 
1121 	MALLOC(pa, struct pargs *, sizeof(struct pargs) + len, M_PARGS,
1122 		M_WAITOK);
1123 	refcount_init(&pa->ar_ref, 1);
1124 	pa->ar_length = len;
1125 	return (pa);
1126 }
1127 
1128 void
1129 pargs_free(struct pargs *pa)
1130 {
1131 
1132 	FREE(pa, M_PARGS);
1133 }
1134 
1135 void
1136 pargs_hold(struct pargs *pa)
1137 {
1138 
1139 	if (pa == NULL)
1140 		return;
1141 	refcount_acquire(&pa->ar_ref);
1142 }
1143 
1144 void
1145 pargs_drop(struct pargs *pa)
1146 {
1147 
1148 	if (pa == NULL)
1149 		return;
1150 	if (refcount_release(&pa->ar_ref))
1151 		pargs_free(pa);
1152 }
1153 
1154 /*
1155  * This sysctl allows a process to retrieve the argument list or process
1156  * title for another process without groping around in the address space
1157  * of the other process.  It also allow a process to set its own "process
1158  * title to a string of its own choice.
1159  */
1160 static int
1161 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS)
1162 {
1163 	int *name = (int*) arg1;
1164 	u_int namelen = arg2;
1165 	struct pargs *newpa, *pa;
1166 	struct proc *p;
1167 	int error = 0;
1168 
1169 	if (namelen != 1)
1170 		return (EINVAL);
1171 
1172 	p = pfind((pid_t)name[0]);
1173 	if (!p)
1174 		return (ESRCH);
1175 
1176 	if ((error = p_cansee(curthread, p)) != 0) {
1177 		PROC_UNLOCK(p);
1178 		return (error);
1179 	}
1180 
1181 	if (req->newptr && curproc != p) {
1182 		PROC_UNLOCK(p);
1183 		return (EPERM);
1184 	}
1185 
1186 	pa = p->p_args;
1187 	pargs_hold(pa);
1188 	PROC_UNLOCK(p);
1189 	if (req->oldptr != NULL && pa != NULL)
1190 		error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length);
1191 	pargs_drop(pa);
1192 	if (error != 0 || req->newptr == NULL)
1193 		return (error);
1194 
1195 	if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit)
1196 		return (ENOMEM);
1197 	newpa = pargs_alloc(req->newlen);
1198 	error = SYSCTL_IN(req, newpa->ar_args, req->newlen);
1199 	if (error != 0) {
1200 		pargs_free(newpa);
1201 		return (error);
1202 	}
1203 	PROC_LOCK(p);
1204 	pa = p->p_args;
1205 	p->p_args = newpa;
1206 	PROC_UNLOCK(p);
1207 	pargs_drop(pa);
1208 	return (0);
1209 }
1210 
1211 /*
1212  * This sysctl allows a process to retrieve the path of the executable for
1213  * itself or another process.
1214  */
1215 static int
1216 sysctl_kern_proc_pathname(SYSCTL_HANDLER_ARGS)
1217 {
1218 	pid_t *pidp = (pid_t *)arg1;
1219 	unsigned int arglen = arg2;
1220 	struct proc *p;
1221 	struct vnode *vp;
1222 	char *retbuf, *freebuf;
1223 	int error;
1224 
1225 	if (arglen != 1)
1226 		return (EINVAL);
1227 	if (*pidp == -1) {	/* -1 means this process */
1228 		p = req->td->td_proc;
1229 	} else {
1230 		p = pfind(*pidp);
1231 		if (p == NULL)
1232 			return (ESRCH);
1233 		if ((error = p_cansee(curthread, p)) != 0) {
1234 			PROC_UNLOCK(p);
1235 			return (error);
1236 		}
1237 	}
1238 
1239 	vp = p->p_textvp;
1240 	vref(vp);
1241 	if (*pidp != -1)
1242 		PROC_UNLOCK(p);
1243 	error = vn_fullpath(req->td, vp, &retbuf, &freebuf);
1244 	vrele(vp);
1245 	if (error)
1246 		return (error);
1247 	error = SYSCTL_OUT(req, retbuf, strlen(retbuf) + 1);
1248 	free(freebuf, M_TEMP);
1249 	return (error);
1250 }
1251 
1252 static int
1253 sysctl_kern_proc_sv_name(SYSCTL_HANDLER_ARGS)
1254 {
1255 	struct proc *p;
1256 	char *sv_name;
1257 	int *name;
1258 	int namelen;
1259 	int error;
1260 
1261 	namelen = arg2;
1262 	if (namelen != 1)
1263 		return (EINVAL);
1264 
1265 	name = (int *)arg1;
1266 	if ((p = pfind((pid_t)name[0])) == NULL)
1267 		return (ESRCH);
1268 	if ((error = p_cansee(curthread, p))) {
1269 		PROC_UNLOCK(p);
1270 		return (error);
1271 	}
1272 	sv_name = p->p_sysent->sv_name;
1273 	PROC_UNLOCK(p);
1274 	return (sysctl_handle_string(oidp, sv_name, 0, req));
1275 }
1276 
1277 
1278 static SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD,  0, "Process table");
1279 
1280 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT,
1281 	0, 0, sysctl_kern_proc, "S,proc", "Return entire process table");
1282 
1283 static SYSCTL_NODE(_kern_proc, KERN_PROC_GID, gid, CTLFLAG_RD,
1284 	sysctl_kern_proc, "Process table");
1285 
1286 static SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD,
1287 	sysctl_kern_proc, "Process table");
1288 
1289 static SYSCTL_NODE(_kern_proc, KERN_PROC_RGID, rgid, CTLFLAG_RD,
1290 	sysctl_kern_proc, "Process table");
1291 
1292 static SYSCTL_NODE(_kern_proc, KERN_PROC_SESSION, sid, CTLFLAG_RD,
1293 	sysctl_kern_proc, "Process table");
1294 
1295 static SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD,
1296 	sysctl_kern_proc, "Process table");
1297 
1298 static SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD,
1299 	sysctl_kern_proc, "Process table");
1300 
1301 static SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD,
1302 	sysctl_kern_proc, "Process table");
1303 
1304 static SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD,
1305 	sysctl_kern_proc, "Process table");
1306 
1307 static SYSCTL_NODE(_kern_proc, KERN_PROC_PROC, proc, CTLFLAG_RD,
1308 	sysctl_kern_proc, "Return process table, no threads");
1309 
1310 static SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args,
1311 	CTLFLAG_RW | CTLFLAG_ANYBODY,
1312 	sysctl_kern_proc_args, "Process argument list");
1313 
1314 static SYSCTL_NODE(_kern_proc, KERN_PROC_PATHNAME, pathname, CTLFLAG_RD,
1315 	sysctl_kern_proc_pathname, "Process executable path");
1316 
1317 static SYSCTL_NODE(_kern_proc, KERN_PROC_SV_NAME, sv_name, CTLFLAG_RD,
1318 	sysctl_kern_proc_sv_name, "Process syscall vector name (ABI type)");
1319 
1320 static SYSCTL_NODE(_kern_proc, (KERN_PROC_GID | KERN_PROC_INC_THREAD), gid_td,
1321 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1322 
1323 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_INC_THREAD), pgrp_td,
1324 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1325 
1326 static SYSCTL_NODE(_kern_proc, (KERN_PROC_RGID | KERN_PROC_INC_THREAD), rgid_td,
1327 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1328 
1329 static SYSCTL_NODE(_kern_proc, (KERN_PROC_SESSION | KERN_PROC_INC_THREAD),
1330 	sid_td, CTLFLAG_RD, sysctl_kern_proc, "Process table");
1331 
1332 static SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_INC_THREAD), tty_td,
1333 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1334 
1335 static SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_INC_THREAD), uid_td,
1336 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1337 
1338 static SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_INC_THREAD), ruid_td,
1339 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1340 
1341 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_INC_THREAD), pid_td,
1342 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1343 
1344 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PROC | KERN_PROC_INC_THREAD), proc_td,
1345 	CTLFLAG_RD, sysctl_kern_proc, "Return process table, no threads");
1346