xref: /freebsd/sys/kern/kern_proc.c (revision 2546665afcaf0d53dc2c7058fee96354b3680f5a)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *	@(#)kern_proc.c	8.7 (Berkeley) 2/14/95
30  * $FreeBSD$
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "opt_ktrace.h"
37 #include "opt_kstack_pages.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/sysent.h>
47 #include <sys/sched.h>
48 #include <sys/smp.h>
49 #include <sys/sysctl.h>
50 #include <sys/filedesc.h>
51 #include <sys/tty.h>
52 #include <sys/signalvar.h>
53 #include <sys/sx.h>
54 #include <sys/user.h>
55 #include <sys/jail.h>
56 #ifdef KTRACE
57 #include <sys/uio.h>
58 #include <sys/ktrace.h>
59 #endif
60 
61 #include <vm/vm.h>
62 #include <vm/vm_extern.h>
63 #include <vm/pmap.h>
64 #include <vm/vm_map.h>
65 #include <vm/uma.h>
66 #include <machine/critical.h>
67 
68 MALLOC_DEFINE(M_PGRP, "pgrp", "process group header");
69 MALLOC_DEFINE(M_SESSION, "session", "session header");
70 static MALLOC_DEFINE(M_PROC, "proc", "Proc structures");
71 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures");
72 
73 static void doenterpgrp(struct proc *, struct pgrp *);
74 static void orphanpg(struct pgrp *pg);
75 static void pgadjustjobc(struct pgrp *pgrp, int entering);
76 static void pgdelete(struct pgrp *);
77 static int proc_ctor(void *mem, int size, void *arg, int flags);
78 static void proc_dtor(void *mem, int size, void *arg);
79 static int proc_init(void *mem, int size, int flags);
80 static void proc_fini(void *mem, int size);
81 
82 /*
83  * Other process lists
84  */
85 struct pidhashhead *pidhashtbl;
86 u_long pidhash;
87 struct pgrphashhead *pgrphashtbl;
88 u_long pgrphash;
89 struct proclist allproc;
90 struct proclist zombproc;
91 struct sx allproc_lock;
92 struct sx proctree_lock;
93 struct mtx pargs_ref_lock;
94 struct mtx ppeers_lock;
95 uma_zone_t proc_zone;
96 uma_zone_t ithread_zone;
97 
98 int kstack_pages = KSTACK_PAGES;
99 int uarea_pages = UAREA_PAGES;
100 SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0, "");
101 SYSCTL_INT(_kern, OID_AUTO, uarea_pages, CTLFLAG_RD, &uarea_pages, 0, "");
102 
103 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
104 
105 CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE);
106 
107 /*
108  * Initialize global process hashing structures.
109  */
110 void
111 procinit()
112 {
113 
114 	sx_init(&allproc_lock, "allproc");
115 	sx_init(&proctree_lock, "proctree");
116 	mtx_init(&pargs_ref_lock, "struct pargs.ref", NULL, MTX_DEF);
117 	mtx_init(&ppeers_lock, "p_peers", NULL, MTX_DEF);
118 	LIST_INIT(&allproc);
119 	LIST_INIT(&zombproc);
120 	pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
121 	pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
122 	proc_zone = uma_zcreate("PROC", sched_sizeof_proc(),
123 	    proc_ctor, proc_dtor, proc_init, proc_fini,
124 	    UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
125 	uihashinit();
126 }
127 
128 /*
129  * Prepare a proc for use.
130  */
131 static int
132 proc_ctor(void *mem, int size, void *arg, int flags)
133 {
134 	struct proc *p;
135 
136 	p = (struct proc *)mem;
137 	return (0);
138 }
139 
140 /*
141  * Reclaim a proc after use.
142  */
143 static void
144 proc_dtor(void *mem, int size, void *arg)
145 {
146 	struct proc *p;
147 	struct thread *td;
148 	struct ksegrp *kg;
149 	struct kse *ke;
150 
151 	/* INVARIANTS checks go here */
152 	p = (struct proc *)mem;
153 	KASSERT((p->p_numthreads == 1),
154 	    ("bad number of threads in exiting process"));
155         td = FIRST_THREAD_IN_PROC(p);
156 	KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
157         kg = FIRST_KSEGRP_IN_PROC(p);
158 	KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
159         ke = FIRST_KSE_IN_KSEGRP(kg);
160 	KASSERT((ke != NULL), ("proc_dtor: bad ke pointer"));
161 
162 	/* Dispose of an alternate kstack, if it exists.
163 	 * XXX What if there are more than one thread in the proc?
164 	 *     The first thread in the proc is special and not
165 	 *     freed, so you gotta do this here.
166 	 */
167 	if (((p->p_flag & P_KTHREAD) != 0) && (td->td_altkstack != 0))
168 		vm_thread_dispose_altkstack(td);
169 
170 	/*
171 	 * We want to make sure we know the initial linkages.
172 	 * so for now tear them down and remake them.
173 	 * This is probably un-needed as we can probably rely
174 	 * on the state coming in here from wait4().
175 	 */
176 	proc_linkup(p, kg, ke, td);
177 }
178 
179 /*
180  * Initialize type-stable parts of a proc (when newly created).
181  */
182 static int
183 proc_init(void *mem, int size, int flags)
184 {
185 	struct proc *p;
186 	struct thread *td;
187 	struct ksegrp *kg;
188 	struct kse *ke;
189 
190 	p = (struct proc *)mem;
191 	p->p_sched = (struct p_sched *)&p[1];
192 	vm_proc_new(p);
193 	td = thread_alloc();
194 	ke = kse_alloc();
195 	kg = ksegrp_alloc();
196 	proc_linkup(p, kg, ke, td);
197 	bzero(&p->p_mtx, sizeof(struct mtx));
198 	mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
199 	return (0);
200 }
201 
202 /*
203  * Tear down type-stable parts of a proc (just before being discarded)
204  */
205 static void
206 proc_fini(void *mem, int size)
207 {
208 	struct proc *p;
209 	struct thread *td;
210 	struct ksegrp *kg;
211 	struct kse *ke;
212 
213 	p = (struct proc *)mem;
214 	KASSERT((p->p_numthreads == 1),
215 	    ("bad number of threads in freeing process"));
216         td = FIRST_THREAD_IN_PROC(p);
217 	KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
218         kg = FIRST_KSEGRP_IN_PROC(p);
219 	KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
220         ke = FIRST_KSE_IN_KSEGRP(kg);
221 	KASSERT((ke != NULL), ("proc_dtor: bad ke pointer"));
222 	vm_proc_dispose(p);
223 	thread_free(td);
224 	ksegrp_free(kg);
225 	kse_free(ke);
226 	mtx_destroy(&p->p_mtx);
227 }
228 
229 /*
230  * Is p an inferior of the current process?
231  */
232 int
233 inferior(p)
234 	register struct proc *p;
235 {
236 
237 	sx_assert(&proctree_lock, SX_LOCKED);
238 	for (; p != curproc; p = p->p_pptr)
239 		if (p->p_pid == 0)
240 			return (0);
241 	return (1);
242 }
243 
244 /*
245  * Locate a process by number
246  */
247 struct proc *
248 pfind(pid)
249 	register pid_t pid;
250 {
251 	register struct proc *p;
252 
253 	sx_slock(&allproc_lock);
254 	LIST_FOREACH(p, PIDHASH(pid), p_hash)
255 		if (p->p_pid == pid) {
256 			PROC_LOCK(p);
257 			break;
258 		}
259 	sx_sunlock(&allproc_lock);
260 	return (p);
261 }
262 
263 /*
264  * Locate a process group by number.
265  * The caller must hold proctree_lock.
266  */
267 struct pgrp *
268 pgfind(pgid)
269 	register pid_t pgid;
270 {
271 	register struct pgrp *pgrp;
272 
273 	sx_assert(&proctree_lock, SX_LOCKED);
274 
275 	LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) {
276 		if (pgrp->pg_id == pgid) {
277 			PGRP_LOCK(pgrp);
278 			return (pgrp);
279 		}
280 	}
281 	return (NULL);
282 }
283 
284 /*
285  * Create a new process group.
286  * pgid must be equal to the pid of p.
287  * Begin a new session if required.
288  */
289 int
290 enterpgrp(p, pgid, pgrp, sess)
291 	register struct proc *p;
292 	pid_t pgid;
293 	struct pgrp *pgrp;
294 	struct session *sess;
295 {
296 	struct pgrp *pgrp2;
297 
298 	sx_assert(&proctree_lock, SX_XLOCKED);
299 
300 	KASSERT(pgrp != NULL, ("enterpgrp: pgrp == NULL"));
301 	KASSERT(p->p_pid == pgid,
302 	    ("enterpgrp: new pgrp and pid != pgid"));
303 
304 	pgrp2 = pgfind(pgid);
305 
306 	KASSERT(pgrp2 == NULL,
307 	    ("enterpgrp: pgrp with pgid exists"));
308 	KASSERT(!SESS_LEADER(p),
309 	    ("enterpgrp: session leader attempted setpgrp"));
310 
311 	mtx_init(&pgrp->pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK);
312 
313 	if (sess != NULL) {
314 		/*
315 		 * new session
316 		 */
317 		mtx_init(&sess->s_mtx, "session", NULL, MTX_DEF);
318 		PROC_LOCK(p);
319 		p->p_flag &= ~P_CONTROLT;
320 		PROC_UNLOCK(p);
321 		PGRP_LOCK(pgrp);
322 		sess->s_leader = p;
323 		sess->s_sid = p->p_pid;
324 		sess->s_count = 1;
325 		sess->s_ttyvp = NULL;
326 		sess->s_ttyp = NULL;
327 		bcopy(p->p_session->s_login, sess->s_login,
328 			    sizeof(sess->s_login));
329 		pgrp->pg_session = sess;
330 		KASSERT(p == curproc,
331 		    ("enterpgrp: mksession and p != curproc"));
332 	} else {
333 		pgrp->pg_session = p->p_session;
334 		SESS_LOCK(pgrp->pg_session);
335 		pgrp->pg_session->s_count++;
336 		SESS_UNLOCK(pgrp->pg_session);
337 		PGRP_LOCK(pgrp);
338 	}
339 	pgrp->pg_id = pgid;
340 	LIST_INIT(&pgrp->pg_members);
341 
342 	/*
343 	 * As we have an exclusive lock of proctree_lock,
344 	 * this should not deadlock.
345 	 */
346 	LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
347 	pgrp->pg_jobc = 0;
348 	SLIST_INIT(&pgrp->pg_sigiolst);
349 	PGRP_UNLOCK(pgrp);
350 
351 	doenterpgrp(p, pgrp);
352 
353 	return (0);
354 }
355 
356 /*
357  * Move p to an existing process group
358  */
359 int
360 enterthispgrp(p, pgrp)
361 	register struct proc *p;
362 	struct pgrp *pgrp;
363 {
364 
365 	sx_assert(&proctree_lock, SX_XLOCKED);
366 	PROC_LOCK_ASSERT(p, MA_NOTOWNED);
367 	PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
368 	PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
369 	SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
370 	KASSERT(pgrp->pg_session == p->p_session,
371 		("%s: pgrp's session %p, p->p_session %p.\n",
372 		__func__,
373 		pgrp->pg_session,
374 		p->p_session));
375 	KASSERT(pgrp != p->p_pgrp,
376 		("%s: p belongs to pgrp.", __func__));
377 
378 	doenterpgrp(p, pgrp);
379 
380 	return (0);
381 }
382 
383 /*
384  * Move p to a process group
385  */
386 static void
387 doenterpgrp(p, pgrp)
388 	struct proc *p;
389 	struct pgrp *pgrp;
390 {
391 	struct pgrp *savepgrp;
392 
393 	sx_assert(&proctree_lock, SX_XLOCKED);
394 	PROC_LOCK_ASSERT(p, MA_NOTOWNED);
395 	PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
396 	PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
397 	SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
398 
399 	savepgrp = p->p_pgrp;
400 
401 	/*
402 	 * Adjust eligibility of affected pgrps to participate in job control.
403 	 * Increment eligibility counts before decrementing, otherwise we
404 	 * could reach 0 spuriously during the first call.
405 	 */
406 	fixjobc(p, pgrp, 1);
407 	fixjobc(p, p->p_pgrp, 0);
408 
409 	PGRP_LOCK(pgrp);
410 	PGRP_LOCK(savepgrp);
411 	PROC_LOCK(p);
412 	LIST_REMOVE(p, p_pglist);
413 	p->p_pgrp = pgrp;
414 	PROC_UNLOCK(p);
415 	LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist);
416 	PGRP_UNLOCK(savepgrp);
417 	PGRP_UNLOCK(pgrp);
418 	if (LIST_EMPTY(&savepgrp->pg_members))
419 		pgdelete(savepgrp);
420 }
421 
422 /*
423  * remove process from process group
424  */
425 int
426 leavepgrp(p)
427 	register struct proc *p;
428 {
429 	struct pgrp *savepgrp;
430 
431 	sx_assert(&proctree_lock, SX_XLOCKED);
432 	savepgrp = p->p_pgrp;
433 	PGRP_LOCK(savepgrp);
434 	PROC_LOCK(p);
435 	LIST_REMOVE(p, p_pglist);
436 	p->p_pgrp = NULL;
437 	PROC_UNLOCK(p);
438 	PGRP_UNLOCK(savepgrp);
439 	if (LIST_EMPTY(&savepgrp->pg_members))
440 		pgdelete(savepgrp);
441 	return (0);
442 }
443 
444 /*
445  * delete a process group
446  */
447 static void
448 pgdelete(pgrp)
449 	register struct pgrp *pgrp;
450 {
451 	struct session *savesess;
452 	int i;
453 
454 	sx_assert(&proctree_lock, SX_XLOCKED);
455 	PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
456 	SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
457 
458 	/*
459 	 * Reset any sigio structures pointing to us as a result of
460 	 * F_SETOWN with our pgid.
461 	 */
462 	funsetownlst(&pgrp->pg_sigiolst);
463 
464 	PGRP_LOCK(pgrp);
465 	if (pgrp->pg_session->s_ttyp != NULL &&
466 	    pgrp->pg_session->s_ttyp->t_pgrp == pgrp)
467 		pgrp->pg_session->s_ttyp->t_pgrp = NULL;
468 	LIST_REMOVE(pgrp, pg_hash);
469 	savesess = pgrp->pg_session;
470 	SESS_LOCK(savesess);
471 	i = --savesess->s_count;
472 	SESS_UNLOCK(savesess);
473 	PGRP_UNLOCK(pgrp);
474 	if (i == 0) {
475 		if (savesess->s_ttyp != NULL)
476 			ttyrel(savesess->s_ttyp);
477 		mtx_destroy(&savesess->s_mtx);
478 		FREE(savesess, M_SESSION);
479 	}
480 	mtx_destroy(&pgrp->pg_mtx);
481 	FREE(pgrp, M_PGRP);
482 }
483 
484 static void
485 pgadjustjobc(pgrp, entering)
486 	struct pgrp *pgrp;
487 	int entering;
488 {
489 
490 	PGRP_LOCK(pgrp);
491 	if (entering)
492 		pgrp->pg_jobc++;
493 	else {
494 		--pgrp->pg_jobc;
495 		if (pgrp->pg_jobc == 0)
496 			orphanpg(pgrp);
497 	}
498 	PGRP_UNLOCK(pgrp);
499 }
500 
501 /*
502  * Adjust pgrp jobc counters when specified process changes process group.
503  * We count the number of processes in each process group that "qualify"
504  * the group for terminal job control (those with a parent in a different
505  * process group of the same session).  If that count reaches zero, the
506  * process group becomes orphaned.  Check both the specified process'
507  * process group and that of its children.
508  * entering == 0 => p is leaving specified group.
509  * entering == 1 => p is entering specified group.
510  */
511 void
512 fixjobc(p, pgrp, entering)
513 	register struct proc *p;
514 	register struct pgrp *pgrp;
515 	int entering;
516 {
517 	register struct pgrp *hispgrp;
518 	register struct session *mysession;
519 
520 	sx_assert(&proctree_lock, SX_LOCKED);
521 	PROC_LOCK_ASSERT(p, MA_NOTOWNED);
522 	PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
523 	SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
524 
525 	/*
526 	 * Check p's parent to see whether p qualifies its own process
527 	 * group; if so, adjust count for p's process group.
528 	 */
529 	mysession = pgrp->pg_session;
530 	if ((hispgrp = p->p_pptr->p_pgrp) != pgrp &&
531 	    hispgrp->pg_session == mysession)
532 		pgadjustjobc(pgrp, entering);
533 
534 	/*
535 	 * Check this process' children to see whether they qualify
536 	 * their process groups; if so, adjust counts for children's
537 	 * process groups.
538 	 */
539 	LIST_FOREACH(p, &p->p_children, p_sibling) {
540 		hispgrp = p->p_pgrp;
541 		if (hispgrp == pgrp ||
542 		    hispgrp->pg_session != mysession)
543 			continue;
544 		PROC_LOCK(p);
545 		if (p->p_state == PRS_ZOMBIE) {
546 			PROC_UNLOCK(p);
547 			continue;
548 		}
549 		PROC_UNLOCK(p);
550 		pgadjustjobc(hispgrp, entering);
551 	}
552 }
553 
554 /*
555  * A process group has become orphaned;
556  * if there are any stopped processes in the group,
557  * hang-up all process in that group.
558  */
559 static void
560 orphanpg(pg)
561 	struct pgrp *pg;
562 {
563 	register struct proc *p;
564 
565 	PGRP_LOCK_ASSERT(pg, MA_OWNED);
566 
567 	LIST_FOREACH(p, &pg->pg_members, p_pglist) {
568 		PROC_LOCK(p);
569 		if (P_SHOULDSTOP(p)) {
570 			PROC_UNLOCK(p);
571 			LIST_FOREACH(p, &pg->pg_members, p_pglist) {
572 				PROC_LOCK(p);
573 				psignal(p, SIGHUP);
574 				psignal(p, SIGCONT);
575 				PROC_UNLOCK(p);
576 			}
577 			return;
578 		}
579 		PROC_UNLOCK(p);
580 	}
581 }
582 
583 #include "opt_ddb.h"
584 #ifdef DDB
585 #include <ddb/ddb.h>
586 
587 DB_SHOW_COMMAND(pgrpdump, pgrpdump)
588 {
589 	register struct pgrp *pgrp;
590 	register struct proc *p;
591 	register int i;
592 
593 	for (i = 0; i <= pgrphash; i++) {
594 		if (!LIST_EMPTY(&pgrphashtbl[i])) {
595 			printf("\tindx %d\n", i);
596 			LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) {
597 				printf(
598 			"\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n",
599 				    (void *)pgrp, (long)pgrp->pg_id,
600 				    (void *)pgrp->pg_session,
601 				    pgrp->pg_session->s_count,
602 				    (void *)LIST_FIRST(&pgrp->pg_members));
603 				LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
604 					printf("\t\tpid %ld addr %p pgrp %p\n",
605 					    (long)p->p_pid, (void *)p,
606 					    (void *)p->p_pgrp);
607 				}
608 			}
609 		}
610 	}
611 }
612 #endif /* DDB */
613 void
614 fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp);
615 
616 /*
617  * Fill in a kinfo_proc structure for the specified process.
618  * Must be called with the target process locked.
619  */
620 void
621 fill_kinfo_proc(struct proc *p, struct kinfo_proc *kp)
622 {
623 	fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), kp);
624 }
625 
626 void
627 fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
628 {
629 	struct proc *p;
630 	struct thread *td0;
631 	struct kse *ke;
632 	struct ksegrp *kg;
633 	struct tty *tp;
634 	struct session *sp;
635 	struct timeval tv;
636 	struct sigacts *ps;
637 
638 	p = td->td_proc;
639 
640 	bzero(kp, sizeof(*kp));
641 
642 	kp->ki_structsize = sizeof(*kp);
643 	kp->ki_paddr = p;
644 	PROC_LOCK_ASSERT(p, MA_OWNED);
645 	kp->ki_addr =/* p->p_addr; */0; /* XXXKSE */
646 	kp->ki_args = p->p_args;
647 	kp->ki_textvp = p->p_textvp;
648 #ifdef KTRACE
649 	kp->ki_tracep = p->p_tracevp;
650 	mtx_lock(&ktrace_mtx);
651 	kp->ki_traceflag = p->p_traceflag;
652 	mtx_unlock(&ktrace_mtx);
653 #endif
654 	kp->ki_fd = p->p_fd;
655 	kp->ki_vmspace = p->p_vmspace;
656 	if (p->p_ucred) {
657 		kp->ki_uid = p->p_ucred->cr_uid;
658 		kp->ki_ruid = p->p_ucred->cr_ruid;
659 		kp->ki_svuid = p->p_ucred->cr_svuid;
660 		/* XXX bde doesn't like KI_NGROUPS */
661 		kp->ki_ngroups = min(p->p_ucred->cr_ngroups, KI_NGROUPS);
662 		bcopy(p->p_ucred->cr_groups, kp->ki_groups,
663 		    kp->ki_ngroups * sizeof(gid_t));
664 		kp->ki_rgid = p->p_ucred->cr_rgid;
665 		kp->ki_svgid = p->p_ucred->cr_svgid;
666 	}
667 	if (p->p_sigacts) {
668 		ps = p->p_sigacts;
669 		mtx_lock(&ps->ps_mtx);
670 		kp->ki_sigignore = ps->ps_sigignore;
671 		kp->ki_sigcatch = ps->ps_sigcatch;
672 		mtx_unlock(&ps->ps_mtx);
673 	}
674 	mtx_lock_spin(&sched_lock);
675 	if (p->p_state != PRS_NEW &&
676 	    p->p_state != PRS_ZOMBIE &&
677 	    p->p_vmspace != NULL) {
678 		struct vmspace *vm = p->p_vmspace;
679 
680 		kp->ki_size = vm->vm_map.size;
681 		kp->ki_rssize = vmspace_resident_count(vm); /*XXX*/
682 		if (p->p_sflag & PS_INMEM)
683 			kp->ki_rssize += UAREA_PAGES;
684 		FOREACH_THREAD_IN_PROC(p, td0) {
685 			if (!TD_IS_SWAPPED(td0))
686 				kp->ki_rssize += td0->td_kstack_pages;
687 			if (td0->td_altkstack_obj != NULL)
688 				kp->ki_rssize += td0->td_altkstack_pages;
689 		}
690 		kp->ki_swrss = vm->vm_swrss;
691 		kp->ki_tsize = vm->vm_tsize;
692 		kp->ki_dsize = vm->vm_dsize;
693 		kp->ki_ssize = vm->vm_ssize;
694 	}
695 	if ((p->p_sflag & PS_INMEM) && p->p_stats) {
696 		kp->ki_start = p->p_stats->p_start;
697 		timevaladd(&kp->ki_start, &boottime);
698 		kp->ki_rusage = p->p_stats->p_ru;
699 		calcru(p, &kp->ki_rusage.ru_utime, &kp->ki_rusage.ru_stime,
700 		    NULL);
701 		kp->ki_childstime = p->p_stats->p_cru.ru_stime;
702 		kp->ki_childutime = p->p_stats->p_cru.ru_utime;
703 		/* Some callers want child-times in a single value */
704 		kp->ki_childtime = kp->ki_childstime;
705 		timevaladd(&kp->ki_childtime, &kp->ki_childutime);
706 	}
707 	kp->ki_sflag = p->p_sflag;
708 	kp->ki_swtime = p->p_swtime;
709 	kp->ki_pid = p->p_pid;
710 	kp->ki_nice = p->p_nice;
711 	bintime2timeval(&p->p_runtime, &tv);
712 	kp->ki_runtime = tv.tv_sec * (u_int64_t)1000000 + tv.tv_usec;
713 	if (p->p_state != PRS_ZOMBIE) {
714 #if 0
715 		if (td == NULL) {
716 			/* XXXKSE: This should never happen. */
717 			printf("fill_kinfo_proc(): pid %d has no threads!\n",
718 			    p->p_pid);
719 			mtx_unlock_spin(&sched_lock);
720 			return;
721 		}
722 #endif
723 		if (td->td_wmesg != NULL) {
724 			strlcpy(kp->ki_wmesg, td->td_wmesg,
725 			    sizeof(kp->ki_wmesg));
726 		}
727 		if (TD_ON_LOCK(td)) {
728 			kp->ki_kiflag |= KI_LOCKBLOCK;
729 			strlcpy(kp->ki_lockname, td->td_lockname,
730 			    sizeof(kp->ki_lockname));
731 		}
732 
733 		if (p->p_state == PRS_NORMAL) { /*  XXXKSE very approximate */
734 			if (TD_ON_RUNQ(td) ||
735 			    TD_CAN_RUN(td) ||
736 			    TD_IS_RUNNING(td)) {
737 				kp->ki_stat = SRUN;
738 			} else if (P_SHOULDSTOP(p)) {
739 				kp->ki_stat = SSTOP;
740 			} else if (TD_IS_SLEEPING(td)) {
741 				kp->ki_stat = SSLEEP;
742 			} else if (TD_ON_LOCK(td)) {
743 				kp->ki_stat = SLOCK;
744 			} else {
745 				kp->ki_stat = SWAIT;
746 			}
747 		} else {
748 			kp->ki_stat = SIDL;
749 		}
750 
751 		kg = td->td_ksegrp;
752 		ke = td->td_kse;
753 
754 		/* things in the KSE GROUP */
755 		kp->ki_estcpu = kg->kg_estcpu;
756 		kp->ki_slptime = kg->kg_slptime;
757 		kp->ki_pri.pri_user = kg->kg_user_pri;
758 		kp->ki_pri.pri_class = kg->kg_pri_class;
759 
760 		/* Things in the thread */
761 		kp->ki_wchan = td->td_wchan;
762 		kp->ki_pri.pri_level = td->td_priority;
763 		kp->ki_pri.pri_native = td->td_base_pri;
764 		kp->ki_lastcpu = td->td_lastcpu;
765 		kp->ki_oncpu = td->td_oncpu;
766 		kp->ki_tdflags = td->td_flags;
767 		kp->ki_tid = td->td_tid;
768 		kp->ki_numthreads = p->p_numthreads;
769 		kp->ki_pcb = td->td_pcb;
770 		kp->ki_kstack = (void *)td->td_kstack;
771 		kp->ki_pctcpu = sched_pctcpu(td);
772 
773 		/* Things in the kse */
774 		if (ke)
775 			kp->ki_rqindex = ke->ke_rqindex;
776 		else
777 			kp->ki_rqindex = 0;
778 
779 	} else {
780 		kp->ki_stat = SZOMB;
781 	}
782 	mtx_unlock_spin(&sched_lock);
783 	sp = NULL;
784 	tp = NULL;
785 	if (p->p_pgrp) {
786 		kp->ki_pgid = p->p_pgrp->pg_id;
787 		kp->ki_jobc = p->p_pgrp->pg_jobc;
788 		sp = p->p_pgrp->pg_session;
789 
790 		if (sp != NULL) {
791 			kp->ki_sid = sp->s_sid;
792 			SESS_LOCK(sp);
793 			strlcpy(kp->ki_login, sp->s_login,
794 			    sizeof(kp->ki_login));
795 			if (sp->s_ttyvp)
796 				kp->ki_kiflag |= KI_CTTY;
797 			if (SESS_LEADER(p))
798 				kp->ki_kiflag |= KI_SLEADER;
799 			tp = sp->s_ttyp;
800 			SESS_UNLOCK(sp);
801 		}
802 	}
803 	if ((p->p_flag & P_CONTROLT) && tp != NULL) {
804 		kp->ki_tdev = dev2udev(tp->t_dev);
805 		kp->ki_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
806 		if (tp->t_session)
807 			kp->ki_tsid = tp->t_session->s_sid;
808 	} else
809 		kp->ki_tdev = NODEV;
810 	if (p->p_comm[0] != '\0') {
811 		strlcpy(kp->ki_comm, p->p_comm, sizeof(kp->ki_comm));
812 		strlcpy(kp->ki_ocomm, p->p_comm, sizeof(kp->ki_ocomm));
813 	}
814 	if (p->p_sysent && p->p_sysent->sv_name != NULL &&
815 	    p->p_sysent->sv_name[0] != '\0')
816 		strlcpy(kp->ki_emul, p->p_sysent->sv_name, sizeof(kp->ki_emul));
817 	kp->ki_siglist = p->p_siglist;
818         SIGSETOR(kp->ki_siglist, td->td_siglist);
819 	kp->ki_sigmask = td->td_sigmask;
820 	kp->ki_xstat = p->p_xstat;
821 	kp->ki_acflag = p->p_acflag;
822 	kp->ki_flag = p->p_flag;
823 	/* If jailed(p->p_ucred), emulate the old P_JAILED flag. */
824 	if (jailed(p->p_ucred))
825 		kp->ki_flag |= P_JAILED;
826 	kp->ki_lock = p->p_lock;
827 	if (p->p_pptr)
828 		kp->ki_ppid = p->p_pptr->p_pid;
829 }
830 
831 /*
832  * Locate a zombie process by number
833  */
834 struct proc *
835 zpfind(pid_t pid)
836 {
837 	struct proc *p;
838 
839 	sx_slock(&allproc_lock);
840 	LIST_FOREACH(p, &zombproc, p_list)
841 		if (p->p_pid == pid) {
842 			PROC_LOCK(p);
843 			break;
844 		}
845 	sx_sunlock(&allproc_lock);
846 	return (p);
847 }
848 
849 #define KERN_PROC_ZOMBMASK	0x3
850 #define KERN_PROC_NOTHREADS	0x4
851 
852 /*
853  * Must be called with the process locked and will return with it unlocked.
854  */
855 static int
856 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags)
857 {
858 	struct thread *td;
859 	struct kinfo_proc kinfo_proc;
860 	int error = 0;
861 	struct proc *np;
862 	pid_t pid = p->p_pid;
863 
864 	PROC_LOCK_ASSERT(p, MA_OWNED);
865 
866 	if (flags & KERN_PROC_NOTHREADS) {
867 		fill_kinfo_proc(p, &kinfo_proc);
868 		PROC_UNLOCK(p);
869 		error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc,
870 				   sizeof(kinfo_proc));
871 		PROC_LOCK(p);
872 	} else {
873 		_PHOLD(p);
874 		FOREACH_THREAD_IN_PROC(p, td) {
875 			fill_kinfo_thread(td, &kinfo_proc);
876 			PROC_UNLOCK(p);
877 			error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc,
878 					   sizeof(kinfo_proc));
879 			PROC_LOCK(p);
880 			if (error)
881 				break;
882 		}
883 		_PRELE(p);
884 	}
885 	PROC_UNLOCK(p);
886 	if (error)
887 		return (error);
888 	if (flags & KERN_PROC_ZOMBMASK)
889 		np = zpfind(pid);
890 	else {
891 		if (pid == 0)
892 			return (0);
893 		np = pfind(pid);
894 	}
895 	if (np == NULL)
896 		return EAGAIN;
897 	if (np != p) {
898 		PROC_UNLOCK(np);
899 		return EAGAIN;
900 	}
901 	PROC_UNLOCK(np);
902 	return (0);
903 }
904 
905 static int
906 sysctl_kern_proc(SYSCTL_HANDLER_ARGS)
907 {
908 	int *name = (int*) arg1;
909 	u_int namelen = arg2;
910 	struct proc *p;
911 	int flags, doingzomb, oid_number;
912 	int error = 0;
913 
914 	oid_number = oidp->oid_number;
915 	if (oid_number != KERN_PROC_ALL &&
916 	    (oid_number & KERN_PROC_INC_THREAD) == 0)
917 		flags = KERN_PROC_NOTHREADS;
918 	else {
919 		flags = 0;
920 		oid_number &= ~KERN_PROC_INC_THREAD;
921 	}
922 	if (oid_number == KERN_PROC_PID) {
923 		if (namelen != 1)
924 			return (EINVAL);
925 		p = pfind((pid_t)name[0]);
926 		if (!p)
927 			return (ESRCH);
928 		if ((error = p_cansee(curthread, p))) {
929 			PROC_UNLOCK(p);
930 			return (error);
931 		}
932 		error = sysctl_out_proc(p, req, flags);
933 		return (error);
934 	}
935 
936 	switch (oid_number) {
937 	case KERN_PROC_ALL:
938 		if (namelen != 0)
939 			return (EINVAL);
940 		break;
941 	case KERN_PROC_PROC:
942 		if (namelen != 0 && namelen != 1)
943 			return (EINVAL);
944 		break;
945 	default:
946 		if (namelen != 1)
947 			return (EINVAL);
948 		break;
949 	}
950 
951 	if (!req->oldptr) {
952 		/* overestimate by 5 procs */
953 		error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5);
954 		if (error)
955 			return (error);
956 	}
957 	error = sysctl_wire_old_buffer(req, 0);
958 	if (error != 0)
959 		return (error);
960 	sx_slock(&allproc_lock);
961 	for (doingzomb=0 ; doingzomb < 2 ; doingzomb++) {
962 		if (!doingzomb)
963 			p = LIST_FIRST(&allproc);
964 		else
965 			p = LIST_FIRST(&zombproc);
966 		for (; p != 0; p = LIST_NEXT(p, p_list)) {
967 			/*
968 			 * Skip embryonic processes.
969 			 */
970 			mtx_lock_spin(&sched_lock);
971 			if (p->p_state == PRS_NEW) {
972 				mtx_unlock_spin(&sched_lock);
973 				continue;
974 			}
975 			mtx_unlock_spin(&sched_lock);
976 			PROC_LOCK(p);
977 			/*
978 			 * Show a user only appropriate processes.
979 			 */
980 			if (p_cansee(curthread, p)) {
981 				PROC_UNLOCK(p);
982 				continue;
983 			}
984 			/*
985 			 * TODO - make more efficient (see notes below).
986 			 * do by session.
987 			 */
988 			switch (oid_number) {
989 
990 			case KERN_PROC_GID:
991 				if (p->p_ucred == NULL ||
992 				    p->p_ucred->cr_gid != (gid_t)name[0]) {
993 					PROC_UNLOCK(p);
994 					continue;
995 				}
996 				break;
997 
998 			case KERN_PROC_PGRP:
999 				/* could do this by traversing pgrp */
1000 				if (p->p_pgrp == NULL ||
1001 				    p->p_pgrp->pg_id != (pid_t)name[0]) {
1002 					PROC_UNLOCK(p);
1003 					continue;
1004 				}
1005 				break;
1006 
1007 			case KERN_PROC_RGID:
1008 				if (p->p_ucred == NULL ||
1009 				    p->p_ucred->cr_rgid != (gid_t)name[0]) {
1010 					PROC_UNLOCK(p);
1011 					continue;
1012 				}
1013 				break;
1014 
1015 			case KERN_PROC_SESSION:
1016 				if (p->p_session == NULL ||
1017 				    p->p_session->s_sid != (pid_t)name[0]) {
1018 					PROC_UNLOCK(p);
1019 					continue;
1020 				}
1021 				break;
1022 
1023 			case KERN_PROC_TTY:
1024 				if ((p->p_flag & P_CONTROLT) == 0 ||
1025 				    p->p_session == NULL) {
1026 					PROC_UNLOCK(p);
1027 					continue;
1028 				}
1029 				SESS_LOCK(p->p_session);
1030 				if (p->p_session->s_ttyp == NULL ||
1031 				    dev2udev(p->p_session->s_ttyp->t_dev) !=
1032 				    (dev_t)name[0]) {
1033 					SESS_UNLOCK(p->p_session);
1034 					PROC_UNLOCK(p);
1035 					continue;
1036 				}
1037 				SESS_UNLOCK(p->p_session);
1038 				break;
1039 
1040 			case KERN_PROC_UID:
1041 				if (p->p_ucred == NULL ||
1042 				    p->p_ucred->cr_uid != (uid_t)name[0]) {
1043 					PROC_UNLOCK(p);
1044 					continue;
1045 				}
1046 				break;
1047 
1048 			case KERN_PROC_RUID:
1049 				if (p->p_ucred == NULL ||
1050 				    p->p_ucred->cr_ruid != (uid_t)name[0]) {
1051 					PROC_UNLOCK(p);
1052 					continue;
1053 				}
1054 				break;
1055 
1056 			case KERN_PROC_PROC:
1057 				break;
1058 
1059 			default:
1060 				break;
1061 
1062 			}
1063 
1064 			error = sysctl_out_proc(p, req, flags | doingzomb);
1065 			if (error) {
1066 				sx_sunlock(&allproc_lock);
1067 				return (error);
1068 			}
1069 		}
1070 	}
1071 	sx_sunlock(&allproc_lock);
1072 	return (0);
1073 }
1074 
1075 struct pargs *
1076 pargs_alloc(int len)
1077 {
1078 	struct pargs *pa;
1079 
1080 	MALLOC(pa, struct pargs *, sizeof(struct pargs) + len, M_PARGS,
1081 		M_WAITOK);
1082 	pa->ar_ref = 1;
1083 	pa->ar_length = len;
1084 	return (pa);
1085 }
1086 
1087 void
1088 pargs_free(struct pargs *pa)
1089 {
1090 
1091 	FREE(pa, M_PARGS);
1092 }
1093 
1094 void
1095 pargs_hold(struct pargs *pa)
1096 {
1097 
1098 	if (pa == NULL)
1099 		return;
1100 	PARGS_LOCK(pa);
1101 	pa->ar_ref++;
1102 	PARGS_UNLOCK(pa);
1103 }
1104 
1105 void
1106 pargs_drop(struct pargs *pa)
1107 {
1108 
1109 	if (pa == NULL)
1110 		return;
1111 	PARGS_LOCK(pa);
1112 	if (--pa->ar_ref == 0) {
1113 		PARGS_UNLOCK(pa);
1114 		pargs_free(pa);
1115 	} else
1116 		PARGS_UNLOCK(pa);
1117 }
1118 
1119 /*
1120  * This sysctl allows a process to retrieve the argument list or process
1121  * title for another process without groping around in the address space
1122  * of the other process.  It also allow a process to set its own "process
1123  * title to a string of its own choice.
1124  */
1125 static int
1126 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS)
1127 {
1128 	int *name = (int*) arg1;
1129 	u_int namelen = arg2;
1130 	struct pargs *newpa, *pa;
1131 	struct proc *p;
1132 	int error = 0;
1133 
1134 	if (namelen != 1)
1135 		return (EINVAL);
1136 
1137 	p = pfind((pid_t)name[0]);
1138 	if (!p)
1139 		return (ESRCH);
1140 
1141 	if ((error = p_cansee(curthread, p)) != 0) {
1142 		PROC_UNLOCK(p);
1143 		return (error);
1144 	}
1145 
1146 	if (req->newptr && curproc != p) {
1147 		PROC_UNLOCK(p);
1148 		return (EPERM);
1149 	}
1150 
1151 	pa = p->p_args;
1152 	pargs_hold(pa);
1153 	PROC_UNLOCK(p);
1154 	if (req->oldptr != NULL && pa != NULL)
1155 		error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length);
1156 	pargs_drop(pa);
1157 	if (error != 0 || req->newptr == NULL)
1158 		return (error);
1159 
1160 	if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit)
1161 		return (ENOMEM);
1162 	newpa = pargs_alloc(req->newlen);
1163 	error = SYSCTL_IN(req, newpa->ar_args, req->newlen);
1164 	if (error != 0) {
1165 		pargs_free(newpa);
1166 		return (error);
1167 	}
1168 	PROC_LOCK(p);
1169 	pa = p->p_args;
1170 	p->p_args = newpa;
1171 	PROC_UNLOCK(p);
1172 	pargs_drop(pa);
1173 	return (0);
1174 }
1175 
1176 static int
1177 sysctl_kern_proc_sv_name(SYSCTL_HANDLER_ARGS)
1178 {
1179 	struct proc *p;
1180 	char *sv_name;
1181 	int *name;
1182 	int namelen;
1183 	int error;
1184 
1185 	namelen = arg2;
1186 	if (namelen != 1)
1187 		return (EINVAL);
1188 
1189 	name = (int *)arg1;
1190 	if ((p = pfind((pid_t)name[0])) == NULL)
1191 		return (ESRCH);
1192 	if ((error = p_cansee(curthread, p))) {
1193 		PROC_UNLOCK(p);
1194 		return (error);
1195 	}
1196 	sv_name = p->p_sysent->sv_name;
1197 	PROC_UNLOCK(p);
1198 	return (sysctl_handle_string(oidp, sv_name, 0, req));
1199 }
1200 
1201 
1202 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD,  0, "Process table");
1203 
1204 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT,
1205 	0, 0, sysctl_kern_proc, "S,proc", "Return entire process table");
1206 
1207 SYSCTL_NODE(_kern_proc, KERN_PROC_GID, gid, CTLFLAG_RD,
1208 	sysctl_kern_proc, "Process table");
1209 
1210 SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD,
1211 	sysctl_kern_proc, "Process table");
1212 
1213 SYSCTL_NODE(_kern_proc, KERN_PROC_RGID, rgid, CTLFLAG_RD,
1214 	sysctl_kern_proc, "Process table");
1215 
1216 SYSCTL_NODE(_kern_proc, KERN_PROC_SESSION, sid, CTLFLAG_RD,
1217 	sysctl_kern_proc, "Process table");
1218 
1219 SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD,
1220 	sysctl_kern_proc, "Process table");
1221 
1222 SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD,
1223 	sysctl_kern_proc, "Process table");
1224 
1225 SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD,
1226 	sysctl_kern_proc, "Process table");
1227 
1228 SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD,
1229 	sysctl_kern_proc, "Process table");
1230 
1231 SYSCTL_NODE(_kern_proc, KERN_PROC_PROC, proc, CTLFLAG_RD,
1232 	sysctl_kern_proc, "Return process table, no threads");
1233 
1234 SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, CTLFLAG_RW | CTLFLAG_ANYBODY,
1235 	sysctl_kern_proc_args, "Process argument list");
1236 
1237 SYSCTL_NODE(_kern_proc, KERN_PROC_SV_NAME, sv_name, CTLFLAG_RD,
1238 	sysctl_kern_proc_sv_name, "Process syscall vector name (ABI type)");
1239 
1240 SYSCTL_NODE(_kern_proc, (KERN_PROC_GID | KERN_PROC_INC_THREAD), gid_td,
1241 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1242 
1243 SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_INC_THREAD), pgrp_td,
1244 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1245 
1246 SYSCTL_NODE(_kern_proc, (KERN_PROC_RGID | KERN_PROC_INC_THREAD), rgid_td,
1247 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1248 
1249 SYSCTL_NODE(_kern_proc, (KERN_PROC_SESSION | KERN_PROC_INC_THREAD), sid_td,
1250 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1251 
1252 SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_INC_THREAD), tty_td,
1253 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1254 
1255 SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_INC_THREAD), uid_td,
1256 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1257 
1258 SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_INC_THREAD), ruid_td,
1259 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1260 
1261 SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_INC_THREAD), pid_td,
1262 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1263 
1264 SYSCTL_NODE(_kern_proc, (KERN_PROC_PROC | KERN_PROC_INC_THREAD), proc_td,
1265 	CTLFLAG_RD, sysctl_kern_proc, "Return process table, no threads");
1266