xref: /freebsd/sys/kern/kern_proc.c (revision dba6dd177bdee890cf445fbe21a5dccefd5de18e)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *	@(#)kern_proc.c	8.7 (Berkeley) 2/14/95
30  * $FreeBSD$
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "opt_ktrace.h"
37 #include "opt_kstack_pages.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/sysent.h>
47 #include <sys/sched.h>
48 #include <sys/smp.h>
49 #include <sys/sysctl.h>
50 #include <sys/filedesc.h>
51 #include <sys/tty.h>
52 #include <sys/signalvar.h>
53 #include <sys/sx.h>
54 #include <sys/user.h>
55 #include <sys/jail.h>
56 #ifdef KTRACE
57 #include <sys/uio.h>
58 #include <sys/ktrace.h>
59 #endif
60 
61 #include <vm/vm.h>
62 #include <vm/vm_extern.h>
63 #include <vm/pmap.h>
64 #include <vm/vm_map.h>
65 #include <vm/uma.h>
66 #include <machine/critical.h>
67 
68 MALLOC_DEFINE(M_PGRP, "pgrp", "process group header");
69 MALLOC_DEFINE(M_SESSION, "session", "session header");
70 static MALLOC_DEFINE(M_PROC, "proc", "Proc structures");
71 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures");
72 
73 static void doenterpgrp(struct proc *, struct pgrp *);
74 static void orphanpg(struct pgrp *pg);
75 static void pgadjustjobc(struct pgrp *pgrp, int entering);
76 static void pgdelete(struct pgrp *);
77 static void proc_ctor(void *mem, int size, void *arg);
78 static void proc_dtor(void *mem, int size, void *arg);
79 static void proc_init(void *mem, int size);
80 static void proc_fini(void *mem, int size);
81 
82 /*
83  * Other process lists
84  */
85 struct pidhashhead *pidhashtbl;
86 u_long pidhash;
87 struct pgrphashhead *pgrphashtbl;
88 u_long pgrphash;
89 struct proclist allproc;
90 struct proclist zombproc;
91 struct sx allproc_lock;
92 struct sx proctree_lock;
93 struct mtx pargs_ref_lock;
94 struct mtx ppeers_lock;
95 uma_zone_t proc_zone;
96 uma_zone_t ithread_zone;
97 
98 int kstack_pages = KSTACK_PAGES;
99 int uarea_pages = UAREA_PAGES;
100 SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0, "");
101 SYSCTL_INT(_kern, OID_AUTO, uarea_pages, CTLFLAG_RD, &uarea_pages, 0, "");
102 
103 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
104 
105 CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE);
106 
107 /*
108  * Initialize global process hashing structures.
109  */
110 void
111 procinit()
112 {
113 
114 	sx_init(&allproc_lock, "allproc");
115 	sx_init(&proctree_lock, "proctree");
116 	mtx_init(&pargs_ref_lock, "struct pargs.ref", NULL, MTX_DEF);
117 	mtx_init(&ppeers_lock, "p_peers", NULL, MTX_DEF);
118 	LIST_INIT(&allproc);
119 	LIST_INIT(&zombproc);
120 	pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
121 	pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
122 	proc_zone = uma_zcreate("PROC", sched_sizeof_proc(),
123 	    proc_ctor, proc_dtor, proc_init, proc_fini,
124 	    UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
125 	uihashinit();
126 }
127 
128 /*
129  * Prepare a proc for use.
130  */
131 static void
132 proc_ctor(void *mem, int size, void *arg)
133 {
134 	struct proc *p;
135 
136 	p = (struct proc *)mem;
137 }
138 
139 /*
140  * Reclaim a proc after use.
141  */
142 static void
143 proc_dtor(void *mem, int size, void *arg)
144 {
145 	struct proc *p;
146 	struct thread *td;
147 	struct ksegrp *kg;
148 	struct kse *ke;
149 
150 	/* INVARIANTS checks go here */
151 	p = (struct proc *)mem;
152 	KASSERT((p->p_numthreads == 1),
153 	    ("bad number of threads in exiting process"));
154         td = FIRST_THREAD_IN_PROC(p);
155 	KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
156         kg = FIRST_KSEGRP_IN_PROC(p);
157 	KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
158         ke = FIRST_KSE_IN_KSEGRP(kg);
159 	KASSERT((ke != NULL), ("proc_dtor: bad ke pointer"));
160 
161 	/* Dispose of an alternate kstack, if it exists.
162 	 * XXX What if there are more than one thread in the proc?
163 	 *     The first thread in the proc is special and not
164 	 *     freed, so you gotta do this here.
165 	 */
166 	if (((p->p_flag & P_KTHREAD) != 0) && (td->td_altkstack != 0))
167 		vm_thread_dispose_altkstack(td);
168 
169 	/*
170 	 * We want to make sure we know the initial linkages.
171 	 * so for now tear them down and remake them.
172 	 * This is probably un-needed as we can probably rely
173 	 * on the state coming in here from wait4().
174 	 */
175 	proc_linkup(p, kg, ke, td);
176 }
177 
178 /*
179  * Initialize type-stable parts of a proc (when newly created).
180  */
181 static void
182 proc_init(void *mem, int size)
183 {
184 	struct proc *p;
185 	struct thread *td;
186 	struct ksegrp *kg;
187 	struct kse *ke;
188 
189 	p = (struct proc *)mem;
190 	p->p_sched = (struct p_sched *)&p[1];
191 	vm_proc_new(p);
192 	td = thread_alloc();
193 	ke = kse_alloc();
194 	kg = ksegrp_alloc();
195 	proc_linkup(p, kg, ke, td);
196 	bzero(&p->p_mtx, sizeof(struct mtx));
197 	mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
198 }
199 
200 /*
201  * Tear down type-stable parts of a proc (just before being discarded)
202  */
203 static void
204 proc_fini(void *mem, int size)
205 {
206 	struct proc *p;
207 	struct thread *td;
208 	struct ksegrp *kg;
209 	struct kse *ke;
210 
211 	p = (struct proc *)mem;
212 	KASSERT((p->p_numthreads == 1),
213 	    ("bad number of threads in freeing process"));
214         td = FIRST_THREAD_IN_PROC(p);
215 	KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
216         kg = FIRST_KSEGRP_IN_PROC(p);
217 	KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
218         ke = FIRST_KSE_IN_KSEGRP(kg);
219 	KASSERT((ke != NULL), ("proc_dtor: bad ke pointer"));
220 	vm_proc_dispose(p);
221 	thread_free(td);
222 	ksegrp_free(kg);
223 	kse_free(ke);
224 	mtx_destroy(&p->p_mtx);
225 }
226 
227 /*
228  * Is p an inferior of the current process?
229  */
230 int
231 inferior(p)
232 	register struct proc *p;
233 {
234 
235 	sx_assert(&proctree_lock, SX_LOCKED);
236 	for (; p != curproc; p = p->p_pptr)
237 		if (p->p_pid == 0)
238 			return (0);
239 	return (1);
240 }
241 
242 /*
243  * Locate a process by number
244  */
245 struct proc *
246 pfind(pid)
247 	register pid_t pid;
248 {
249 	register struct proc *p;
250 
251 	sx_slock(&allproc_lock);
252 	LIST_FOREACH(p, PIDHASH(pid), p_hash)
253 		if (p->p_pid == pid) {
254 			PROC_LOCK(p);
255 			break;
256 		}
257 	sx_sunlock(&allproc_lock);
258 	return (p);
259 }
260 
261 /*
262  * Locate a process group by number.
263  * The caller must hold proctree_lock.
264  */
265 struct pgrp *
266 pgfind(pgid)
267 	register pid_t pgid;
268 {
269 	register struct pgrp *pgrp;
270 
271 	sx_assert(&proctree_lock, SX_LOCKED);
272 
273 	LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) {
274 		if (pgrp->pg_id == pgid) {
275 			PGRP_LOCK(pgrp);
276 			return (pgrp);
277 		}
278 	}
279 	return (NULL);
280 }
281 
282 /*
283  * Create a new process group.
284  * pgid must be equal to the pid of p.
285  * Begin a new session if required.
286  */
287 int
288 enterpgrp(p, pgid, pgrp, sess)
289 	register struct proc *p;
290 	pid_t pgid;
291 	struct pgrp *pgrp;
292 	struct session *sess;
293 {
294 	struct pgrp *pgrp2;
295 
296 	sx_assert(&proctree_lock, SX_XLOCKED);
297 
298 	KASSERT(pgrp != NULL, ("enterpgrp: pgrp == NULL"));
299 	KASSERT(p->p_pid == pgid,
300 	    ("enterpgrp: new pgrp and pid != pgid"));
301 
302 	pgrp2 = pgfind(pgid);
303 
304 	KASSERT(pgrp2 == NULL,
305 	    ("enterpgrp: pgrp with pgid exists"));
306 	KASSERT(!SESS_LEADER(p),
307 	    ("enterpgrp: session leader attempted setpgrp"));
308 
309 	mtx_init(&pgrp->pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK);
310 
311 	if (sess != NULL) {
312 		/*
313 		 * new session
314 		 */
315 		mtx_init(&sess->s_mtx, "session", NULL, MTX_DEF);
316 		PROC_LOCK(p);
317 		p->p_flag &= ~P_CONTROLT;
318 		PROC_UNLOCK(p);
319 		PGRP_LOCK(pgrp);
320 		sess->s_leader = p;
321 		sess->s_sid = p->p_pid;
322 		sess->s_count = 1;
323 		sess->s_ttyvp = NULL;
324 		sess->s_ttyp = NULL;
325 		bcopy(p->p_session->s_login, sess->s_login,
326 			    sizeof(sess->s_login));
327 		pgrp->pg_session = sess;
328 		KASSERT(p == curproc,
329 		    ("enterpgrp: mksession and p != curproc"));
330 	} else {
331 		pgrp->pg_session = p->p_session;
332 		SESS_LOCK(pgrp->pg_session);
333 		pgrp->pg_session->s_count++;
334 		SESS_UNLOCK(pgrp->pg_session);
335 		PGRP_LOCK(pgrp);
336 	}
337 	pgrp->pg_id = pgid;
338 	LIST_INIT(&pgrp->pg_members);
339 
340 	/*
341 	 * As we have an exclusive lock of proctree_lock,
342 	 * this should not deadlock.
343 	 */
344 	LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
345 	pgrp->pg_jobc = 0;
346 	SLIST_INIT(&pgrp->pg_sigiolst);
347 	PGRP_UNLOCK(pgrp);
348 
349 	doenterpgrp(p, pgrp);
350 
351 	return (0);
352 }
353 
354 /*
355  * Move p to an existing process group
356  */
357 int
358 enterthispgrp(p, pgrp)
359 	register struct proc *p;
360 	struct pgrp *pgrp;
361 {
362 
363 	sx_assert(&proctree_lock, SX_XLOCKED);
364 	PROC_LOCK_ASSERT(p, MA_NOTOWNED);
365 	PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
366 	PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
367 	SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
368 	KASSERT(pgrp->pg_session == p->p_session,
369 		("%s: pgrp's session %p, p->p_session %p.\n",
370 		__func__,
371 		pgrp->pg_session,
372 		p->p_session));
373 	KASSERT(pgrp != p->p_pgrp,
374 		("%s: p belongs to pgrp.", __func__));
375 
376 	doenterpgrp(p, pgrp);
377 
378 	return (0);
379 }
380 
381 /*
382  * Move p to a process group
383  */
384 static void
385 doenterpgrp(p, pgrp)
386 	struct proc *p;
387 	struct pgrp *pgrp;
388 {
389 	struct pgrp *savepgrp;
390 
391 	sx_assert(&proctree_lock, SX_XLOCKED);
392 	PROC_LOCK_ASSERT(p, MA_NOTOWNED);
393 	PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
394 	PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
395 	SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
396 
397 	savepgrp = p->p_pgrp;
398 
399 	/*
400 	 * Adjust eligibility of affected pgrps to participate in job control.
401 	 * Increment eligibility counts before decrementing, otherwise we
402 	 * could reach 0 spuriously during the first call.
403 	 */
404 	fixjobc(p, pgrp, 1);
405 	fixjobc(p, p->p_pgrp, 0);
406 
407 	PGRP_LOCK(pgrp);
408 	PGRP_LOCK(savepgrp);
409 	PROC_LOCK(p);
410 	LIST_REMOVE(p, p_pglist);
411 	p->p_pgrp = pgrp;
412 	PROC_UNLOCK(p);
413 	LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist);
414 	PGRP_UNLOCK(savepgrp);
415 	PGRP_UNLOCK(pgrp);
416 	if (LIST_EMPTY(&savepgrp->pg_members))
417 		pgdelete(savepgrp);
418 }
419 
420 /*
421  * remove process from process group
422  */
423 int
424 leavepgrp(p)
425 	register struct proc *p;
426 {
427 	struct pgrp *savepgrp;
428 
429 	sx_assert(&proctree_lock, SX_XLOCKED);
430 	savepgrp = p->p_pgrp;
431 	PGRP_LOCK(savepgrp);
432 	PROC_LOCK(p);
433 	LIST_REMOVE(p, p_pglist);
434 	p->p_pgrp = NULL;
435 	PROC_UNLOCK(p);
436 	PGRP_UNLOCK(savepgrp);
437 	if (LIST_EMPTY(&savepgrp->pg_members))
438 		pgdelete(savepgrp);
439 	return (0);
440 }
441 
442 /*
443  * delete a process group
444  */
445 static void
446 pgdelete(pgrp)
447 	register struct pgrp *pgrp;
448 {
449 	struct session *savesess;
450 
451 	sx_assert(&proctree_lock, SX_XLOCKED);
452 	PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
453 	SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
454 
455 	/*
456 	 * Reset any sigio structures pointing to us as a result of
457 	 * F_SETOWN with our pgid.
458 	 */
459 	funsetownlst(&pgrp->pg_sigiolst);
460 
461 	PGRP_LOCK(pgrp);
462 	if (pgrp->pg_session->s_ttyp != NULL &&
463 	    pgrp->pg_session->s_ttyp->t_pgrp == pgrp)
464 		pgrp->pg_session->s_ttyp->t_pgrp = NULL;
465 	LIST_REMOVE(pgrp, pg_hash);
466 	savesess = pgrp->pg_session;
467 	SESS_LOCK(savesess);
468 	savesess->s_count--;
469 	SESS_UNLOCK(savesess);
470 	PGRP_UNLOCK(pgrp);
471 	if (savesess->s_count == 0) {
472 		mtx_destroy(&savesess->s_mtx);
473 		FREE(pgrp->pg_session, M_SESSION);
474 	}
475 	mtx_destroy(&pgrp->pg_mtx);
476 	FREE(pgrp, M_PGRP);
477 }
478 
479 static void
480 pgadjustjobc(pgrp, entering)
481 	struct pgrp *pgrp;
482 	int entering;
483 {
484 
485 	PGRP_LOCK(pgrp);
486 	if (entering)
487 		pgrp->pg_jobc++;
488 	else {
489 		--pgrp->pg_jobc;
490 		if (pgrp->pg_jobc == 0)
491 			orphanpg(pgrp);
492 	}
493 	PGRP_UNLOCK(pgrp);
494 }
495 
496 /*
497  * Adjust pgrp jobc counters when specified process changes process group.
498  * We count the number of processes in each process group that "qualify"
499  * the group for terminal job control (those with a parent in a different
500  * process group of the same session).  If that count reaches zero, the
501  * process group becomes orphaned.  Check both the specified process'
502  * process group and that of its children.
503  * entering == 0 => p is leaving specified group.
504  * entering == 1 => p is entering specified group.
505  */
506 void
507 fixjobc(p, pgrp, entering)
508 	register struct proc *p;
509 	register struct pgrp *pgrp;
510 	int entering;
511 {
512 	register struct pgrp *hispgrp;
513 	register struct session *mysession;
514 
515 	sx_assert(&proctree_lock, SX_LOCKED);
516 	PROC_LOCK_ASSERT(p, MA_NOTOWNED);
517 	PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
518 	SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
519 
520 	/*
521 	 * Check p's parent to see whether p qualifies its own process
522 	 * group; if so, adjust count for p's process group.
523 	 */
524 	mysession = pgrp->pg_session;
525 	if ((hispgrp = p->p_pptr->p_pgrp) != pgrp &&
526 	    hispgrp->pg_session == mysession)
527 		pgadjustjobc(pgrp, entering);
528 
529 	/*
530 	 * Check this process' children to see whether they qualify
531 	 * their process groups; if so, adjust counts for children's
532 	 * process groups.
533 	 */
534 	LIST_FOREACH(p, &p->p_children, p_sibling) {
535 		hispgrp = p->p_pgrp;
536 		if (hispgrp == pgrp ||
537 		    hispgrp->pg_session != mysession)
538 			continue;
539 		PROC_LOCK(p);
540 		if (p->p_state == PRS_ZOMBIE) {
541 			PROC_UNLOCK(p);
542 			continue;
543 		}
544 		PROC_UNLOCK(p);
545 		pgadjustjobc(hispgrp, entering);
546 	}
547 }
548 
549 /*
550  * A process group has become orphaned;
551  * if there are any stopped processes in the group,
552  * hang-up all process in that group.
553  */
554 static void
555 orphanpg(pg)
556 	struct pgrp *pg;
557 {
558 	register struct proc *p;
559 
560 	PGRP_LOCK_ASSERT(pg, MA_OWNED);
561 
562 	LIST_FOREACH(p, &pg->pg_members, p_pglist) {
563 		PROC_LOCK(p);
564 		if (P_SHOULDSTOP(p)) {
565 			PROC_UNLOCK(p);
566 			LIST_FOREACH(p, &pg->pg_members, p_pglist) {
567 				PROC_LOCK(p);
568 				psignal(p, SIGHUP);
569 				psignal(p, SIGCONT);
570 				PROC_UNLOCK(p);
571 			}
572 			return;
573 		}
574 		PROC_UNLOCK(p);
575 	}
576 }
577 
578 #include "opt_ddb.h"
579 #ifdef DDB
580 #include <ddb/ddb.h>
581 
582 DB_SHOW_COMMAND(pgrpdump, pgrpdump)
583 {
584 	register struct pgrp *pgrp;
585 	register struct proc *p;
586 	register int i;
587 
588 	for (i = 0; i <= pgrphash; i++) {
589 		if (!LIST_EMPTY(&pgrphashtbl[i])) {
590 			printf("\tindx %d\n", i);
591 			LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) {
592 				printf(
593 			"\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n",
594 				    (void *)pgrp, (long)pgrp->pg_id,
595 				    (void *)pgrp->pg_session,
596 				    pgrp->pg_session->s_count,
597 				    (void *)LIST_FIRST(&pgrp->pg_members));
598 				LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
599 					printf("\t\tpid %ld addr %p pgrp %p\n",
600 					    (long)p->p_pid, (void *)p,
601 					    (void *)p->p_pgrp);
602 				}
603 			}
604 		}
605 	}
606 }
607 #endif /* DDB */
608 void
609 fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp);
610 
611 /*
612  * Fill in a kinfo_proc structure for the specified process.
613  * Must be called with the target process locked.
614  */
615 void
616 fill_kinfo_proc(struct proc *p, struct kinfo_proc *kp)
617 {
618 	fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), kp);
619 }
620 
621 void
622 fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
623 {
624 	struct proc *p;
625 	struct thread *td0;
626 	struct kse *ke;
627 	struct ksegrp *kg;
628 	struct tty *tp;
629 	struct session *sp;
630 	struct timeval tv;
631 	struct sigacts *ps;
632 
633 	p = td->td_proc;
634 
635 	bzero(kp, sizeof(*kp));
636 
637 	kp->ki_structsize = sizeof(*kp);
638 	kp->ki_paddr = p;
639 	PROC_LOCK_ASSERT(p, MA_OWNED);
640 	kp->ki_addr =/* p->p_addr; */0; /* XXXKSE */
641 	kp->ki_args = p->p_args;
642 	kp->ki_textvp = p->p_textvp;
643 #ifdef KTRACE
644 	kp->ki_tracep = p->p_tracevp;
645 	mtx_lock(&ktrace_mtx);
646 	kp->ki_traceflag = p->p_traceflag;
647 	mtx_unlock(&ktrace_mtx);
648 #endif
649 	kp->ki_fd = p->p_fd;
650 	kp->ki_vmspace = p->p_vmspace;
651 	if (p->p_ucred) {
652 		kp->ki_uid = p->p_ucred->cr_uid;
653 		kp->ki_ruid = p->p_ucred->cr_ruid;
654 		kp->ki_svuid = p->p_ucred->cr_svuid;
655 		/* XXX bde doesn't like KI_NGROUPS */
656 		kp->ki_ngroups = min(p->p_ucred->cr_ngroups, KI_NGROUPS);
657 		bcopy(p->p_ucred->cr_groups, kp->ki_groups,
658 		    kp->ki_ngroups * sizeof(gid_t));
659 		kp->ki_rgid = p->p_ucred->cr_rgid;
660 		kp->ki_svgid = p->p_ucred->cr_svgid;
661 	}
662 	if (p->p_sigacts) {
663 		ps = p->p_sigacts;
664 		mtx_lock(&ps->ps_mtx);
665 		kp->ki_sigignore = ps->ps_sigignore;
666 		kp->ki_sigcatch = ps->ps_sigcatch;
667 		mtx_unlock(&ps->ps_mtx);
668 	}
669 	mtx_lock_spin(&sched_lock);
670 	if (p->p_state != PRS_NEW &&
671 	    p->p_state != PRS_ZOMBIE &&
672 	    p->p_vmspace != NULL) {
673 		struct vmspace *vm = p->p_vmspace;
674 
675 		kp->ki_size = vm->vm_map.size;
676 		kp->ki_rssize = vmspace_resident_count(vm); /*XXX*/
677 		if (p->p_sflag & PS_INMEM)
678 			kp->ki_rssize += UAREA_PAGES;
679 		FOREACH_THREAD_IN_PROC(p, td0) {
680 			if (!TD_IS_SWAPPED(td0))
681 				kp->ki_rssize += td0->td_kstack_pages;
682 			if (td0->td_altkstack_obj != NULL)
683 				kp->ki_rssize += td0->td_altkstack_pages;
684 		}
685 		kp->ki_swrss = vm->vm_swrss;
686 		kp->ki_tsize = vm->vm_tsize;
687 		kp->ki_dsize = vm->vm_dsize;
688 		kp->ki_ssize = vm->vm_ssize;
689 	}
690 	if ((p->p_sflag & PS_INMEM) && p->p_stats) {
691 		kp->ki_start = p->p_stats->p_start;
692 		timevaladd(&kp->ki_start, &boottime);
693 		kp->ki_rusage = p->p_stats->p_ru;
694 		kp->ki_childtime.tv_sec = p->p_stats->p_cru.ru_utime.tv_sec +
695 		    p->p_stats->p_cru.ru_stime.tv_sec;
696 		kp->ki_childtime.tv_usec = p->p_stats->p_cru.ru_utime.tv_usec +
697 		    p->p_stats->p_cru.ru_stime.tv_usec;
698 	}
699 	if (p->p_state != PRS_ZOMBIE) {
700 #if 0
701 		if (td == NULL) {
702 			/* XXXKSE: This should never happen. */
703 			printf("fill_kinfo_proc(): pid %d has no threads!\n",
704 			    p->p_pid);
705 			mtx_unlock_spin(&sched_lock);
706 			return;
707 		}
708 #endif
709 		if (td->td_wmesg != NULL) {
710 			strlcpy(kp->ki_wmesg, td->td_wmesg,
711 			    sizeof(kp->ki_wmesg));
712 		}
713 		if (TD_ON_LOCK(td)) {
714 			kp->ki_kiflag |= KI_LOCKBLOCK;
715 			strlcpy(kp->ki_lockname, td->td_lockname,
716 			    sizeof(kp->ki_lockname));
717 		}
718 
719 		if (p->p_state == PRS_NORMAL) { /*  XXXKSE very approximate */
720 			if (TD_ON_RUNQ(td) ||
721 			    TD_CAN_RUN(td) ||
722 			    TD_IS_RUNNING(td)) {
723 				kp->ki_stat = SRUN;
724 			} else if (P_SHOULDSTOP(p)) {
725 				kp->ki_stat = SSTOP;
726 			} else if (TD_IS_SLEEPING(td)) {
727 				kp->ki_stat = SSLEEP;
728 			} else if (TD_ON_LOCK(td)) {
729 				kp->ki_stat = SLOCK;
730 			} else {
731 				kp->ki_stat = SWAIT;
732 			}
733 		} else {
734 			kp->ki_stat = SIDL;
735 		}
736 
737 		kp->ki_sflag = p->p_sflag;
738 		kp->ki_swtime = p->p_swtime;
739 		kp->ki_pid = p->p_pid;
740 		kg = td->td_ksegrp;
741 		ke = td->td_kse;
742 		bintime2timeval(&p->p_runtime, &tv);
743 		kp->ki_runtime =
744 		    tv.tv_sec * (u_int64_t)1000000 + tv.tv_usec;
745 
746 		/* things in the KSE GROUP */
747 		kp->ki_estcpu = kg->kg_estcpu;
748 		kp->ki_slptime = kg->kg_slptime;
749 		kp->ki_pri.pri_user = kg->kg_user_pri;
750 		kp->ki_pri.pri_class = kg->kg_pri_class;
751 		kp->ki_nice = kg->kg_nice;
752 
753 		/* Things in the thread */
754 		kp->ki_wchan = td->td_wchan;
755 		kp->ki_pri.pri_level = td->td_priority;
756 		kp->ki_pri.pri_native = td->td_base_pri;
757 		kp->ki_lastcpu = td->td_lastcpu;
758 		kp->ki_oncpu = td->td_oncpu;
759 		kp->ki_tdflags = td->td_flags;
760 		kp->ki_pcb = td->td_pcb;
761 		kp->ki_kstack = (void *)td->td_kstack;
762 		kp->ki_pctcpu = sched_pctcpu(td);
763 
764 		/* Things in the kse */
765 		if (ke)
766 			kp->ki_rqindex = ke->ke_rqindex;
767 		else
768 			kp->ki_rqindex = 0;
769 
770 	} else {
771 		kp->ki_stat = SZOMB;
772 	}
773 	mtx_unlock_spin(&sched_lock);
774 	sp = NULL;
775 	tp = NULL;
776 	if (p->p_pgrp) {
777 		kp->ki_pgid = p->p_pgrp->pg_id;
778 		kp->ki_jobc = p->p_pgrp->pg_jobc;
779 		sp = p->p_pgrp->pg_session;
780 
781 		if (sp != NULL) {
782 			kp->ki_sid = sp->s_sid;
783 			SESS_LOCK(sp);
784 			strlcpy(kp->ki_login, sp->s_login,
785 			    sizeof(kp->ki_login));
786 			if (sp->s_ttyvp)
787 				kp->ki_kiflag |= KI_CTTY;
788 			if (SESS_LEADER(p))
789 				kp->ki_kiflag |= KI_SLEADER;
790 			tp = sp->s_ttyp;
791 			SESS_UNLOCK(sp);
792 		}
793 	}
794 	if ((p->p_flag & P_CONTROLT) && tp != NULL) {
795 		kp->ki_tdev = dev2udev(tp->t_dev);
796 		kp->ki_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
797 		if (tp->t_session)
798 			kp->ki_tsid = tp->t_session->s_sid;
799 	} else
800 		kp->ki_tdev = NOUDEV;
801 	if (p->p_comm[0] != '\0') {
802 		strlcpy(kp->ki_comm, p->p_comm, sizeof(kp->ki_comm));
803 		strlcpy(kp->ki_ocomm, p->p_comm, sizeof(kp->ki_ocomm));
804 	}
805 	kp->ki_siglist = p->p_siglist;
806         SIGSETOR(kp->ki_siglist, td->td_siglist);
807 	kp->ki_sigmask = td->td_sigmask;
808 	kp->ki_xstat = p->p_xstat;
809 	kp->ki_acflag = p->p_acflag;
810 	kp->ki_flag = p->p_flag;
811 	/* If jailed(p->p_ucred), emulate the old P_JAILED flag. */
812 	if (jailed(p->p_ucred))
813 		kp->ki_flag |= P_JAILED;
814 	kp->ki_lock = p->p_lock;
815 	if (p->p_pptr)
816 		kp->ki_ppid = p->p_pptr->p_pid;
817 }
818 
819 /*
820  * Locate a zombie process by number
821  */
822 struct proc *
823 zpfind(pid_t pid)
824 {
825 	struct proc *p;
826 
827 	sx_slock(&allproc_lock);
828 	LIST_FOREACH(p, &zombproc, p_list)
829 		if (p->p_pid == pid) {
830 			PROC_LOCK(p);
831 			break;
832 		}
833 	sx_sunlock(&allproc_lock);
834 	return (p);
835 }
836 
837 #define KERN_PROC_ZOMBMASK	0x3
838 #define KERN_PROC_NOTHREADS	0x4
839 
840 /*
841  * Must be called with the process locked and will return with it unlocked.
842  */
843 static int
844 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags)
845 {
846 	struct thread *td;
847 	struct kinfo_proc kinfo_proc;
848 	int error = 0;
849 	struct proc *np;
850 	pid_t pid = p->p_pid;
851 
852 	PROC_LOCK_ASSERT(p, MA_OWNED);
853 
854 	if (flags & KERN_PROC_NOTHREADS) {
855 		fill_kinfo_proc(p, &kinfo_proc);
856 		PROC_UNLOCK(p);
857 		error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc,
858 				   sizeof(kinfo_proc));
859 		PROC_LOCK(p);
860 	} else {
861 		_PHOLD(p);
862 		FOREACH_THREAD_IN_PROC(p, td) {
863 			fill_kinfo_thread(td, &kinfo_proc);
864 			PROC_UNLOCK(p);
865 			error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc,
866 					   sizeof(kinfo_proc));
867 			PROC_LOCK(p);
868 			if (error)
869 				break;
870 		}
871 		_PRELE(p);
872 	}
873 	PROC_UNLOCK(p);
874 	if (error)
875 		return (error);
876 	if (flags & KERN_PROC_ZOMBMASK)
877 		np = zpfind(pid);
878 	else {
879 		if (pid == 0)
880 			return (0);
881 		np = pfind(pid);
882 	}
883 	if (np == NULL)
884 		return EAGAIN;
885 	if (np != p) {
886 		PROC_UNLOCK(np);
887 		return EAGAIN;
888 	}
889 	PROC_UNLOCK(np);
890 	return (0);
891 }
892 
893 static int
894 sysctl_kern_proc(SYSCTL_HANDLER_ARGS)
895 {
896 	int *name = (int*) arg1;
897 	u_int namelen = arg2;
898 	struct proc *p;
899 	int flags, doingzomb, oid_number;
900 	int error = 0;
901 
902 	oid_number = oidp->oid_number;
903 	if (oid_number != KERN_PROC_ALL &&
904 	    (oid_number & KERN_PROC_INC_THREAD) == 0)
905 		flags = KERN_PROC_NOTHREADS;
906 	else {
907 		flags = 0;
908 		oid_number &= ~KERN_PROC_INC_THREAD;
909 	}
910 	if (oid_number == KERN_PROC_PID) {
911 		if (namelen != 1)
912 			return (EINVAL);
913 		p = pfind((pid_t)name[0]);
914 		if (!p)
915 			return (ESRCH);
916 		if ((error = p_cansee(curthread, p))) {
917 			PROC_UNLOCK(p);
918 			return (error);
919 		}
920 		error = sysctl_out_proc(p, req, flags);
921 		return (error);
922 	}
923 
924 	switch (oid_number) {
925 	case KERN_PROC_ALL:
926 		if (namelen != 0)
927 			return (EINVAL);
928 		break;
929 	case KERN_PROC_PROC:
930 		if (namelen != 0 && namelen != 1)
931 			return (EINVAL);
932 		break;
933 	default:
934 		if (namelen != 1)
935 			return (EINVAL);
936 		break;
937 	}
938 
939 	if (!req->oldptr) {
940 		/* overestimate by 5 procs */
941 		error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5);
942 		if (error)
943 			return (error);
944 	}
945 	error = sysctl_wire_old_buffer(req, 0);
946 	if (error != 0)
947 		return (error);
948 	sx_slock(&allproc_lock);
949 	for (doingzomb=0 ; doingzomb < 2 ; doingzomb++) {
950 		if (!doingzomb)
951 			p = LIST_FIRST(&allproc);
952 		else
953 			p = LIST_FIRST(&zombproc);
954 		for (; p != 0; p = LIST_NEXT(p, p_list)) {
955 			/*
956 			 * Skip embryonic processes.
957 			 */
958 			mtx_lock_spin(&sched_lock);
959 			if (p->p_state == PRS_NEW) {
960 				mtx_unlock_spin(&sched_lock);
961 				continue;
962 			}
963 			mtx_unlock_spin(&sched_lock);
964 			PROC_LOCK(p);
965 			/*
966 			 * Show a user only appropriate processes.
967 			 */
968 			if (p_cansee(curthread, p)) {
969 				PROC_UNLOCK(p);
970 				continue;
971 			}
972 			/*
973 			 * TODO - make more efficient (see notes below).
974 			 * do by session.
975 			 */
976 			switch (oid_number) {
977 
978 			case KERN_PROC_PGRP:
979 				/* could do this by traversing pgrp */
980 				if (p->p_pgrp == NULL ||
981 				    p->p_pgrp->pg_id != (pid_t)name[0]) {
982 					PROC_UNLOCK(p);
983 					continue;
984 				}
985 				break;
986 
987 			case KERN_PROC_RGID:
988 				if (p->p_ucred == NULL ||
989 				    p->p_ucred->cr_rgid != (gid_t)name[0]) {
990 					PROC_UNLOCK(p);
991 					continue;
992 				}
993 				break;
994 
995 			case KERN_PROC_SESSION:
996 				if (p->p_session == NULL ||
997 				    p->p_session->s_sid != (pid_t)name[0]) {
998 					PROC_UNLOCK(p);
999 					continue;
1000 				}
1001 				break;
1002 
1003 			case KERN_PROC_TTY:
1004 				if ((p->p_flag & P_CONTROLT) == 0 ||
1005 				    p->p_session == NULL) {
1006 					PROC_UNLOCK(p);
1007 					continue;
1008 				}
1009 				SESS_LOCK(p->p_session);
1010 				if (p->p_session->s_ttyp == NULL ||
1011 				    dev2udev(p->p_session->s_ttyp->t_dev) !=
1012 				    (udev_t)name[0]) {
1013 					SESS_UNLOCK(p->p_session);
1014 					PROC_UNLOCK(p);
1015 					continue;
1016 				}
1017 				SESS_UNLOCK(p->p_session);
1018 				break;
1019 
1020 			case KERN_PROC_UID:
1021 				if (p->p_ucred == NULL ||
1022 				    p->p_ucred->cr_uid != (uid_t)name[0]) {
1023 					PROC_UNLOCK(p);
1024 					continue;
1025 				}
1026 				break;
1027 
1028 			case KERN_PROC_RUID:
1029 				if (p->p_ucred == NULL ||
1030 				    p->p_ucred->cr_ruid != (uid_t)name[0]) {
1031 					PROC_UNLOCK(p);
1032 					continue;
1033 				}
1034 				break;
1035 
1036 			case KERN_PROC_PROC:
1037 				break;
1038 
1039 			default:
1040 				break;
1041 
1042 			}
1043 
1044 			error = sysctl_out_proc(p, req, flags | doingzomb);
1045 			if (error) {
1046 				sx_sunlock(&allproc_lock);
1047 				return (error);
1048 			}
1049 		}
1050 	}
1051 	sx_sunlock(&allproc_lock);
1052 	return (0);
1053 }
1054 
1055 struct pargs *
1056 pargs_alloc(int len)
1057 {
1058 	struct pargs *pa;
1059 
1060 	MALLOC(pa, struct pargs *, sizeof(struct pargs) + len, M_PARGS,
1061 		M_WAITOK);
1062 	pa->ar_ref = 1;
1063 	pa->ar_length = len;
1064 	return (pa);
1065 }
1066 
1067 void
1068 pargs_free(struct pargs *pa)
1069 {
1070 
1071 	FREE(pa, M_PARGS);
1072 }
1073 
1074 void
1075 pargs_hold(struct pargs *pa)
1076 {
1077 
1078 	if (pa == NULL)
1079 		return;
1080 	PARGS_LOCK(pa);
1081 	pa->ar_ref++;
1082 	PARGS_UNLOCK(pa);
1083 }
1084 
1085 void
1086 pargs_drop(struct pargs *pa)
1087 {
1088 
1089 	if (pa == NULL)
1090 		return;
1091 	PARGS_LOCK(pa);
1092 	if (--pa->ar_ref == 0) {
1093 		PARGS_UNLOCK(pa);
1094 		pargs_free(pa);
1095 	} else
1096 		PARGS_UNLOCK(pa);
1097 }
1098 
1099 /*
1100  * This sysctl allows a process to retrieve the argument list or process
1101  * title for another process without groping around in the address space
1102  * of the other process.  It also allow a process to set its own "process
1103  * title to a string of its own choice.
1104  */
1105 static int
1106 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS)
1107 {
1108 	int *name = (int*) arg1;
1109 	u_int namelen = arg2;
1110 	struct pargs *newpa, *pa;
1111 	struct proc *p;
1112 	int error = 0;
1113 
1114 	if (namelen != 1)
1115 		return (EINVAL);
1116 
1117 	p = pfind((pid_t)name[0]);
1118 	if (!p)
1119 		return (ESRCH);
1120 
1121 	if ((error = p_cansee(curthread, p)) != 0) {
1122 		PROC_UNLOCK(p);
1123 		return (error);
1124 	}
1125 
1126 	if (req->newptr && curproc != p) {
1127 		PROC_UNLOCK(p);
1128 		return (EPERM);
1129 	}
1130 
1131 	pa = p->p_args;
1132 	pargs_hold(pa);
1133 	PROC_UNLOCK(p);
1134 	if (req->oldptr != NULL && pa != NULL)
1135 		error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length);
1136 	pargs_drop(pa);
1137 	if (error != 0 || req->newptr == NULL)
1138 		return (error);
1139 
1140 	if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit)
1141 		return (ENOMEM);
1142 	newpa = pargs_alloc(req->newlen);
1143 	error = SYSCTL_IN(req, newpa->ar_args, req->newlen);
1144 	if (error != 0) {
1145 		pargs_free(newpa);
1146 		return (error);
1147 	}
1148 	PROC_LOCK(p);
1149 	pa = p->p_args;
1150 	p->p_args = newpa;
1151 	PROC_UNLOCK(p);
1152 	pargs_drop(pa);
1153 	return (0);
1154 }
1155 
1156 static int
1157 sysctl_kern_proc_sv_name(SYSCTL_HANDLER_ARGS)
1158 {
1159 	struct proc *p;
1160 	char *sv_name;
1161 	int *name;
1162 	int namelen;
1163 	int error;
1164 
1165 	namelen = arg2;
1166 	if (namelen != 1)
1167 		return (EINVAL);
1168 
1169 	name = (int *)arg1;
1170 	if ((p = pfind((pid_t)name[0])) == NULL)
1171 		return (ESRCH);
1172 	if ((error = p_cansee(curthread, p))) {
1173 		PROC_UNLOCK(p);
1174 		return (error);
1175 	}
1176 	sv_name = p->p_sysent->sv_name;
1177 	PROC_UNLOCK(p);
1178 	return (sysctl_handle_string(oidp, sv_name, 0, req));
1179 }
1180 
1181 
1182 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD,  0, "Process table");
1183 
1184 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT,
1185 	0, 0, sysctl_kern_proc, "S,proc", "Return entire process table");
1186 
1187 SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD,
1188 	sysctl_kern_proc, "Process table");
1189 
1190 SYSCTL_NODE(_kern_proc, KERN_PROC_RGID, rgid, CTLFLAG_RD,
1191 	sysctl_kern_proc, "Process table");
1192 
1193 SYSCTL_NODE(_kern_proc, KERN_PROC_SESSION, sid, CTLFLAG_RD,
1194 	sysctl_kern_proc, "Process table");
1195 
1196 SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD,
1197 	sysctl_kern_proc, "Process table");
1198 
1199 SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD,
1200 	sysctl_kern_proc, "Process table");
1201 
1202 SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD,
1203 	sysctl_kern_proc, "Process table");
1204 
1205 SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD,
1206 	sysctl_kern_proc, "Process table");
1207 
1208 SYSCTL_NODE(_kern_proc, KERN_PROC_PROC, proc, CTLFLAG_RD,
1209 	sysctl_kern_proc, "Return process table, no threads");
1210 
1211 SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, CTLFLAG_RW | CTLFLAG_ANYBODY,
1212 	sysctl_kern_proc_args, "Process argument list");
1213 
1214 SYSCTL_NODE(_kern_proc, KERN_PROC_SV_NAME, sv_name, CTLFLAG_RD,
1215 	sysctl_kern_proc_sv_name, "Process syscall vector name (ABI type)");
1216 
1217 SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_INC_THREAD), pgrp_td,
1218 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1219 
1220 SYSCTL_NODE(_kern_proc, (KERN_PROC_RGID | KERN_PROC_INC_THREAD), rgid_td,
1221 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1222 
1223 SYSCTL_NODE(_kern_proc, (KERN_PROC_SESSION | KERN_PROC_INC_THREAD), sid_td,
1224 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1225 
1226 SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_INC_THREAD), tty_td,
1227 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1228 
1229 SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_INC_THREAD), uid_td,
1230 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1231 
1232 SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_INC_THREAD), ruid_td,
1233 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1234 
1235 SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_INC_THREAD), pid_td,
1236 	CTLFLAG_RD, sysctl_kern_proc, "Process table");
1237 
1238 SYSCTL_NODE(_kern_proc, (KERN_PROC_PROC | KERN_PROC_INC_THREAD), proc_td,
1239 	CTLFLAG_RD, sysctl_kern_proc, "Return process table, no threads");
1240