xref: /freebsd/sys/kern/kern_proc.c (revision 77b7cdf1999ee965ad494fddd184b18f532ac91a)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)kern_proc.c	8.7 (Berkeley) 2/14/95
34  * $FreeBSD$
35  */
36 
37 #include "opt_ktrace.h"
38 #include "opt_kstack_pages.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/lock.h>
44 #include <sys/malloc.h>
45 #include <sys/mutex.h>
46 #include <sys/proc.h>
47 #include <sys/kse.h>
48 #include <sys/sched.h>
49 #include <sys/smp.h>
50 #include <sys/sysctl.h>
51 #include <sys/filedesc.h>
52 #include <sys/tty.h>
53 #include <sys/signalvar.h>
54 #include <sys/sx.h>
55 #include <sys/user.h>
56 #include <sys/jail.h>
57 #ifdef KTRACE
58 #include <sys/uio.h>
59 #include <sys/ktrace.h>
60 #endif
61 
62 #include <vm/vm.h>
63 #include <vm/vm_extern.h>
64 #include <vm/pmap.h>
65 #include <vm/vm_map.h>
66 #include <vm/uma.h>
67 #include <machine/critical.h>
68 
69 MALLOC_DEFINE(M_PGRP, "pgrp", "process group header");
70 MALLOC_DEFINE(M_SESSION, "session", "session header");
71 static MALLOC_DEFINE(M_PROC, "proc", "Proc structures");
72 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures");
73 
74 static void doenterpgrp(struct proc *, struct pgrp *);
75 static void orphanpg(struct pgrp *pg);
76 static void pgadjustjobc(struct pgrp *pgrp, int entering);
77 static void pgdelete(struct pgrp *);
78 static void proc_ctor(void *mem, int size, void *arg);
79 static void proc_dtor(void *mem, int size, void *arg);
80 static void proc_init(void *mem, int size);
81 static void proc_fini(void *mem, int size);
82 
83 /*
84  * Other process lists
85  */
86 struct pidhashhead *pidhashtbl;
87 u_long pidhash;
88 struct pgrphashhead *pgrphashtbl;
89 u_long pgrphash;
90 struct proclist allproc;
91 struct proclist zombproc;
92 struct sx allproc_lock;
93 struct sx proctree_lock;
94 struct mtx pargs_ref_lock;
95 struct mtx ppeers_lock;
96 uma_zone_t proc_zone;
97 uma_zone_t ithread_zone;
98 
99 int kstack_pages = KSTACK_PAGES;
100 int uarea_pages = UAREA_PAGES;
101 SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0, "");
102 SYSCTL_INT(_kern, OID_AUTO, uarea_pages, CTLFLAG_RD, &uarea_pages, 0, "");
103 
104 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
105 
106 CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE);
107 
108 /*
109  * Initialize global process hashing structures.
110  */
111 void
112 procinit()
113 {
114 
115 	sx_init(&allproc_lock, "allproc");
116 	sx_init(&proctree_lock, "proctree");
117 	mtx_init(&pargs_ref_lock, "struct pargs.ref", NULL, MTX_DEF);
118 	mtx_init(&ppeers_lock, "p_peers", NULL, MTX_DEF);
119 	LIST_INIT(&allproc);
120 	LIST_INIT(&zombproc);
121 	pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
122 	pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
123 	proc_zone = uma_zcreate("PROC", sched_sizeof_proc(),
124 	    proc_ctor, proc_dtor, proc_init, proc_fini,
125 	    UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
126 	uihashinit();
127 }
128 
129 /*
130  * Prepare a proc for use.
131  */
132 static void
133 proc_ctor(void *mem, int size, void *arg)
134 {
135 	struct proc *p;
136 
137 	p = (struct proc *)mem;
138 }
139 
140 /*
141  * Reclaim a proc after use.
142  */
143 static void
144 proc_dtor(void *mem, int size, void *arg)
145 {
146 	struct proc *p;
147 	struct thread *td;
148 	struct ksegrp *kg;
149 	struct kse *ke;
150 
151 	/* INVARIANTS checks go here */
152 	p = (struct proc *)mem;
153 	KASSERT((p->p_numthreads == 1),
154 	    ("bad number of threads in exiting process"));
155         td = FIRST_THREAD_IN_PROC(p);
156 	KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
157         kg = FIRST_KSEGRP_IN_PROC(p);
158 	KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
159         ke = FIRST_KSE_IN_KSEGRP(kg);
160 	KASSERT((ke != NULL), ("proc_dtor: bad ke pointer"));
161 
162 	/* Dispose of an alternate kstack, if it exists.
163 	 * XXX What if there are more than one thread in the proc?
164 	 *     The first thread in the proc is special and not
165 	 *     freed, so you gotta do this here.
166 	 */
167 	if (((p->p_flag & P_KTHREAD) != 0) && (td->td_altkstack != 0))
168 		pmap_dispose_altkstack(td);
169 
170 	/*
171 	 * We want to make sure we know the initial linkages.
172 	 * so for now tear them down and remake them.
173 	 * This is probably un-needed as we can probably rely
174 	 * on the state coming in here from wait4().
175 	 */
176 	proc_linkup(p, kg, ke, td);
177 }
178 
179 /*
180  * Initialize type-stable parts of a proc (when newly created).
181  */
182 static void
183 proc_init(void *mem, int size)
184 {
185 	struct proc *p;
186 	struct thread *td;
187 	struct ksegrp *kg;
188 	struct kse *ke;
189 
190 	p = (struct proc *)mem;
191 	p->p_sched = (struct p_sched *)&p[1];
192 	vm_proc_new(p);
193 	td = thread_alloc();
194 	ke = kse_alloc();
195 	kg = ksegrp_alloc();
196 	proc_linkup(p, kg, ke, td);
197 	bzero(&p->p_mtx, sizeof(struct mtx));
198 	mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
199 }
200 
201 /*
202  * Tear down type-stable parts of a proc (just before being discarded)
203  */
204 static void
205 proc_fini(void *mem, int size)
206 {
207 	struct proc *p;
208 	struct thread *td;
209 	struct ksegrp *kg;
210 	struct kse *ke;
211 
212 	p = (struct proc *)mem;
213 	KASSERT((p->p_numthreads == 1),
214 	    ("bad number of threads in freeing process"));
215         td = FIRST_THREAD_IN_PROC(p);
216 	KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
217         kg = FIRST_KSEGRP_IN_PROC(p);
218 	KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
219         ke = FIRST_KSE_IN_KSEGRP(kg);
220 	KASSERT((ke != NULL), ("proc_dtor: bad ke pointer"));
221 	vm_proc_dispose(p);
222 	thread_free(td);
223 	ksegrp_free(kg);
224 	kse_free(ke);
225 	mtx_destroy(&p->p_mtx);
226 }
227 
228 /*
229  * Is p an inferior of the current process?
230  */
231 int
232 inferior(p)
233 	register struct proc *p;
234 {
235 
236 	sx_assert(&proctree_lock, SX_LOCKED);
237 	for (; p != curproc; p = p->p_pptr)
238 		if (p->p_pid == 0)
239 			return (0);
240 	return (1);
241 }
242 
243 /*
244  * Locate a process by number
245  */
246 struct proc *
247 pfind(pid)
248 	register pid_t pid;
249 {
250 	register struct proc *p;
251 
252 	sx_slock(&allproc_lock);
253 	LIST_FOREACH(p, PIDHASH(pid), p_hash)
254 		if (p->p_pid == pid) {
255 			PROC_LOCK(p);
256 			break;
257 		}
258 	sx_sunlock(&allproc_lock);
259 	return (p);
260 }
261 
262 /*
263  * Locate a process group by number.
264  * The caller must hold proctree_lock.
265  */
266 struct pgrp *
267 pgfind(pgid)
268 	register pid_t pgid;
269 {
270 	register struct pgrp *pgrp;
271 
272 	sx_assert(&proctree_lock, SX_LOCKED);
273 
274 	LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) {
275 		if (pgrp->pg_id == pgid) {
276 			PGRP_LOCK(pgrp);
277 			return (pgrp);
278 		}
279 	}
280 	return (NULL);
281 }
282 
283 /*
284  * Create a new process group.
285  * pgid must be equal to the pid of p.
286  * Begin a new session if required.
287  */
288 int
289 enterpgrp(p, pgid, pgrp, sess)
290 	register struct proc *p;
291 	pid_t pgid;
292 	struct pgrp *pgrp;
293 	struct session *sess;
294 {
295 	struct pgrp *pgrp2;
296 
297 	sx_assert(&proctree_lock, SX_XLOCKED);
298 
299 	KASSERT(pgrp != NULL, ("enterpgrp: pgrp == NULL"));
300 	KASSERT(p->p_pid == pgid,
301 	    ("enterpgrp: new pgrp and pid != pgid"));
302 
303 	pgrp2 = pgfind(pgid);
304 
305 	KASSERT(pgrp2 == NULL,
306 	    ("enterpgrp: pgrp with pgid exists"));
307 	KASSERT(!SESS_LEADER(p),
308 	    ("enterpgrp: session leader attempted setpgrp"));
309 
310 	mtx_init(&pgrp->pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK);
311 
312 	if (sess != NULL) {
313 		/*
314 		 * new session
315 		 */
316 		mtx_init(&sess->s_mtx, "session", NULL, MTX_DEF);
317 		PROC_LOCK(p);
318 		p->p_flag &= ~P_CONTROLT;
319 		PROC_UNLOCK(p);
320 		PGRP_LOCK(pgrp);
321 		sess->s_leader = p;
322 		sess->s_sid = p->p_pid;
323 		sess->s_count = 1;
324 		sess->s_ttyvp = NULL;
325 		sess->s_ttyp = NULL;
326 		bcopy(p->p_session->s_login, sess->s_login,
327 			    sizeof(sess->s_login));
328 		pgrp->pg_session = sess;
329 		KASSERT(p == curproc,
330 		    ("enterpgrp: mksession and p != curproc"));
331 	} else {
332 		pgrp->pg_session = p->p_session;
333 		SESS_LOCK(pgrp->pg_session);
334 		pgrp->pg_session->s_count++;
335 		SESS_UNLOCK(pgrp->pg_session);
336 		PGRP_LOCK(pgrp);
337 	}
338 	pgrp->pg_id = pgid;
339 	LIST_INIT(&pgrp->pg_members);
340 
341 	/*
342 	 * As we have an exclusive lock of proctree_lock,
343 	 * this should not deadlock.
344 	 */
345 	LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
346 	pgrp->pg_jobc = 0;
347 	SLIST_INIT(&pgrp->pg_sigiolst);
348 	PGRP_UNLOCK(pgrp);
349 
350 	doenterpgrp(p, pgrp);
351 
352 	return (0);
353 }
354 
355 /*
356  * Move p to an existing process group
357  */
358 int
359 enterthispgrp(p, pgrp)
360 	register struct proc *p;
361 	struct pgrp *pgrp;
362 {
363 
364 	sx_assert(&proctree_lock, SX_XLOCKED);
365 	PROC_LOCK_ASSERT(p, MA_NOTOWNED);
366 	PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
367 	PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
368 	SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
369 	KASSERT(pgrp->pg_session == p->p_session,
370 		("%s: pgrp's session %p, p->p_session %p.\n",
371 		__func__,
372 		pgrp->pg_session,
373 		p->p_session));
374 	KASSERT(pgrp != p->p_pgrp,
375 		("%s: p belongs to pgrp.", __func__));
376 
377 	doenterpgrp(p, pgrp);
378 
379 	return (0);
380 }
381 
382 /*
383  * Move p to a process group
384  */
385 static void
386 doenterpgrp(p, pgrp)
387 	struct proc *p;
388 	struct pgrp *pgrp;
389 {
390 	struct pgrp *savepgrp;
391 
392 	sx_assert(&proctree_lock, SX_XLOCKED);
393 	PROC_LOCK_ASSERT(p, MA_NOTOWNED);
394 	PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
395 	PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
396 	SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
397 
398 	savepgrp = p->p_pgrp;
399 
400 	/*
401 	 * Adjust eligibility of affected pgrps to participate in job control.
402 	 * Increment eligibility counts before decrementing, otherwise we
403 	 * could reach 0 spuriously during the first call.
404 	 */
405 	fixjobc(p, pgrp, 1);
406 	fixjobc(p, p->p_pgrp, 0);
407 
408 	PGRP_LOCK(pgrp);
409 	PGRP_LOCK(savepgrp);
410 	PROC_LOCK(p);
411 	LIST_REMOVE(p, p_pglist);
412 	p->p_pgrp = pgrp;
413 	PROC_UNLOCK(p);
414 	LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist);
415 	PGRP_UNLOCK(savepgrp);
416 	PGRP_UNLOCK(pgrp);
417 	if (LIST_EMPTY(&savepgrp->pg_members))
418 		pgdelete(savepgrp);
419 }
420 
421 /*
422  * remove process from process group
423  */
424 int
425 leavepgrp(p)
426 	register struct proc *p;
427 {
428 	struct pgrp *savepgrp;
429 
430 	sx_assert(&proctree_lock, SX_XLOCKED);
431 	savepgrp = p->p_pgrp;
432 	PGRP_LOCK(savepgrp);
433 	PROC_LOCK(p);
434 	LIST_REMOVE(p, p_pglist);
435 	p->p_pgrp = NULL;
436 	PROC_UNLOCK(p);
437 	PGRP_UNLOCK(savepgrp);
438 	if (LIST_EMPTY(&savepgrp->pg_members))
439 		pgdelete(savepgrp);
440 	return (0);
441 }
442 
443 /*
444  * delete a process group
445  */
446 static void
447 pgdelete(pgrp)
448 	register struct pgrp *pgrp;
449 {
450 	struct session *savesess;
451 
452 	sx_assert(&proctree_lock, SX_XLOCKED);
453 	PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
454 	SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
455 
456 	/*
457 	 * Reset any sigio structures pointing to us as a result of
458 	 * F_SETOWN with our pgid.
459 	 */
460 	funsetownlst(&pgrp->pg_sigiolst);
461 
462 	PGRP_LOCK(pgrp);
463 	if (pgrp->pg_session->s_ttyp != NULL &&
464 	    pgrp->pg_session->s_ttyp->t_pgrp == pgrp)
465 		pgrp->pg_session->s_ttyp->t_pgrp = NULL;
466 	LIST_REMOVE(pgrp, pg_hash);
467 	savesess = pgrp->pg_session;
468 	SESS_LOCK(savesess);
469 	savesess->s_count--;
470 	SESS_UNLOCK(savesess);
471 	PGRP_UNLOCK(pgrp);
472 	if (savesess->s_count == 0) {
473 		mtx_destroy(&savesess->s_mtx);
474 		FREE(pgrp->pg_session, M_SESSION);
475 	}
476 	mtx_destroy(&pgrp->pg_mtx);
477 	FREE(pgrp, M_PGRP);
478 }
479 
480 static void
481 pgadjustjobc(pgrp, entering)
482 	struct pgrp *pgrp;
483 	int entering;
484 {
485 
486 	PGRP_LOCK(pgrp);
487 	if (entering)
488 		pgrp->pg_jobc++;
489 	else {
490 		--pgrp->pg_jobc;
491 		if (pgrp->pg_jobc == 0)
492 			orphanpg(pgrp);
493 	}
494 	PGRP_UNLOCK(pgrp);
495 }
496 
497 /*
498  * Adjust pgrp jobc counters when specified process changes process group.
499  * We count the number of processes in each process group that "qualify"
500  * the group for terminal job control (those with a parent in a different
501  * process group of the same session).  If that count reaches zero, the
502  * process group becomes orphaned.  Check both the specified process'
503  * process group and that of its children.
504  * entering == 0 => p is leaving specified group.
505  * entering == 1 => p is entering specified group.
506  */
507 void
508 fixjobc(p, pgrp, entering)
509 	register struct proc *p;
510 	register struct pgrp *pgrp;
511 	int entering;
512 {
513 	register struct pgrp *hispgrp;
514 	register struct session *mysession;
515 
516 	sx_assert(&proctree_lock, SX_LOCKED);
517 	PROC_LOCK_ASSERT(p, MA_NOTOWNED);
518 	PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
519 	SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
520 
521 	/*
522 	 * Check p's parent to see whether p qualifies its own process
523 	 * group; if so, adjust count for p's process group.
524 	 */
525 	mysession = pgrp->pg_session;
526 	if ((hispgrp = p->p_pptr->p_pgrp) != pgrp &&
527 	    hispgrp->pg_session == mysession)
528 		pgadjustjobc(pgrp, entering);
529 
530 	/*
531 	 * Check this process' children to see whether they qualify
532 	 * their process groups; if so, adjust counts for children's
533 	 * process groups.
534 	 */
535 	LIST_FOREACH(p, &p->p_children, p_sibling) {
536 		hispgrp = p->p_pgrp;
537 		if (hispgrp == pgrp ||
538 		    hispgrp->pg_session != mysession)
539 			continue;
540 		PROC_LOCK(p);
541 		if (p->p_state == PRS_ZOMBIE) {
542 			PROC_UNLOCK(p);
543 			continue;
544 		}
545 		PROC_UNLOCK(p);
546 		pgadjustjobc(hispgrp, entering);
547 	}
548 }
549 
550 /*
551  * A process group has become orphaned;
552  * if there are any stopped processes in the group,
553  * hang-up all process in that group.
554  */
555 static void
556 orphanpg(pg)
557 	struct pgrp *pg;
558 {
559 	register struct proc *p;
560 
561 	PGRP_LOCK_ASSERT(pg, MA_OWNED);
562 
563 	LIST_FOREACH(p, &pg->pg_members, p_pglist) {
564 		PROC_LOCK(p);
565 		if (P_SHOULDSTOP(p)) {
566 			PROC_UNLOCK(p);
567 			LIST_FOREACH(p, &pg->pg_members, p_pglist) {
568 				PROC_LOCK(p);
569 				psignal(p, SIGHUP);
570 				psignal(p, SIGCONT);
571 				PROC_UNLOCK(p);
572 			}
573 			return;
574 		}
575 		PROC_UNLOCK(p);
576 	}
577 }
578 
579 #include "opt_ddb.h"
580 #ifdef DDB
581 #include <ddb/ddb.h>
582 
583 DB_SHOW_COMMAND(pgrpdump, pgrpdump)
584 {
585 	register struct pgrp *pgrp;
586 	register struct proc *p;
587 	register int i;
588 
589 	for (i = 0; i <= pgrphash; i++) {
590 		if (!LIST_EMPTY(&pgrphashtbl[i])) {
591 			printf("\tindx %d\n", i);
592 			LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) {
593 				printf(
594 			"\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n",
595 				    (void *)pgrp, (long)pgrp->pg_id,
596 				    (void *)pgrp->pg_session,
597 				    pgrp->pg_session->s_count,
598 				    (void *)LIST_FIRST(&pgrp->pg_members));
599 				LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
600 					printf("\t\tpid %ld addr %p pgrp %p\n",
601 					    (long)p->p_pid, (void *)p,
602 					    (void *)p->p_pgrp);
603 				}
604 			}
605 		}
606 	}
607 }
608 #endif /* DDB */
609 
610 /*
611  * Fill in a kinfo_proc structure for the specified process.
612  * Must be called with the target process locked.
613  */
614 void
615 fill_kinfo_proc(p, kp)
616 	struct proc *p;
617 	struct kinfo_proc *kp;
618 {
619 	struct thread *td;
620 	struct thread *td0;
621 	struct kse *ke;
622 	struct ksegrp *kg;
623 	struct tty *tp;
624 	struct session *sp;
625 	struct timeval tv;
626 
627 	td = FIRST_THREAD_IN_PROC(p);
628 
629 	bzero(kp, sizeof(*kp));
630 
631 	kp->ki_structsize = sizeof(*kp);
632 	kp->ki_paddr = p;
633 	PROC_LOCK_ASSERT(p, MA_OWNED);
634 	kp->ki_addr =/* p->p_addr; */0; /* XXXKSE */
635 	kp->ki_args = p->p_args;
636 	kp->ki_textvp = p->p_textvp;
637 #ifdef KTRACE
638 	kp->ki_tracep = p->p_tracevp;
639 	mtx_lock(&ktrace_mtx);
640 	kp->ki_traceflag = p->p_traceflag;
641 	mtx_unlock(&ktrace_mtx);
642 #endif
643 	kp->ki_fd = p->p_fd;
644 	kp->ki_vmspace = p->p_vmspace;
645 	if (p->p_ucred) {
646 		kp->ki_uid = p->p_ucred->cr_uid;
647 		kp->ki_ruid = p->p_ucred->cr_ruid;
648 		kp->ki_svuid = p->p_ucred->cr_svuid;
649 		/* XXX bde doesn't like KI_NGROUPS */
650 		kp->ki_ngroups = min(p->p_ucred->cr_ngroups, KI_NGROUPS);
651 		bcopy(p->p_ucred->cr_groups, kp->ki_groups,
652 		    kp->ki_ngroups * sizeof(gid_t));
653 		kp->ki_rgid = p->p_ucred->cr_rgid;
654 		kp->ki_svgid = p->p_ucred->cr_svgid;
655 	}
656 	if (p->p_procsig) {
657 		kp->ki_sigignore = p->p_procsig->ps_sigignore;
658 		kp->ki_sigcatch = p->p_procsig->ps_sigcatch;
659 	}
660 	mtx_lock_spin(&sched_lock);
661 	if (p->p_state != PRS_NEW &&
662 	    p->p_state != PRS_ZOMBIE &&
663 	    p->p_vmspace != NULL) {
664 		struct vmspace *vm = p->p_vmspace;
665 
666 		kp->ki_size = vm->vm_map.size;
667 		kp->ki_rssize = vmspace_resident_count(vm); /*XXX*/
668 		if (p->p_sflag & PS_INMEM)
669 			kp->ki_rssize += UAREA_PAGES;
670 		FOREACH_THREAD_IN_PROC(p, td0) {
671 			if (!TD_IS_SWAPPED(td0))
672 				kp->ki_rssize += td0->td_kstack_pages;
673 			if (td0->td_altkstack_obj != NULL)
674 				kp->ki_rssize += td0->td_altkstack_pages;
675 		}
676 		kp->ki_swrss = vm->vm_swrss;
677 		kp->ki_tsize = vm->vm_tsize;
678 		kp->ki_dsize = vm->vm_dsize;
679 		kp->ki_ssize = vm->vm_ssize;
680 	}
681 	if ((p->p_sflag & PS_INMEM) && p->p_stats) {
682 		kp->ki_start = p->p_stats->p_start;
683 		timevaladd(&kp->ki_start, &boottime);
684 		kp->ki_rusage = p->p_stats->p_ru;
685 		kp->ki_childtime.tv_sec = p->p_stats->p_cru.ru_utime.tv_sec +
686 		    p->p_stats->p_cru.ru_stime.tv_sec;
687 		kp->ki_childtime.tv_usec = p->p_stats->p_cru.ru_utime.tv_usec +
688 		    p->p_stats->p_cru.ru_stime.tv_usec;
689 	}
690 	if (p->p_state != PRS_ZOMBIE) {
691 		if (td == NULL) {
692 			/* XXXKSE: This should never happen. */
693 			printf("fill_kinfo_proc(): pid %d has no threads!\n",
694 			    p->p_pid);
695 			mtx_unlock_spin(&sched_lock);
696 			return;
697 		}
698 		if (!(p->p_flag & P_THREADED)) {
699 			if (td->td_wmesg != NULL) {
700 				strlcpy(kp->ki_wmesg, td->td_wmesg,
701 				    sizeof(kp->ki_wmesg));
702 			}
703 			if (TD_ON_LOCK(td)) {
704 				kp->ki_kiflag |= KI_LOCKBLOCK;
705 				strlcpy(kp->ki_lockname, td->td_lockname,
706 				    sizeof(kp->ki_lockname));
707 			}
708 		}
709 
710 		if (p->p_state == PRS_NORMAL) { /*  XXXKSE very approximate */
711 			if (TD_ON_RUNQ(td) ||
712 			    TD_CAN_RUN(td) ||
713 			    TD_IS_RUNNING(td)) {
714 				kp->ki_stat = SRUN;
715 			} else if (P_SHOULDSTOP(p)) {
716 				kp->ki_stat = SSTOP;
717 			} else if (TD_IS_SLEEPING(td)) {
718 				kp->ki_stat = SSLEEP;
719 			} else if (TD_ON_LOCK(td)) {
720 				kp->ki_stat = SLOCK;
721 			} else {
722 				kp->ki_stat = SWAIT;
723 			}
724 		} else {
725 			kp->ki_stat = SIDL;
726 		}
727 
728 		kp->ki_sflag = p->p_sflag;
729 		kp->ki_swtime = p->p_swtime;
730 		kp->ki_pid = p->p_pid;
731 		/* vvv XXXKSE */
732 		if (!(p->p_flag & P_THREADED)) {
733 			kg = td->td_ksegrp;
734 			ke = td->td_kse;
735 			KASSERT((ke != NULL), ("fill_kinfo_proc: Null KSE"));
736 			bintime2timeval(&p->p_runtime, &tv);
737 			kp->ki_runtime =
738 			    tv.tv_sec * (u_int64_t)1000000 + tv.tv_usec;
739 
740 			/* things in the KSE GROUP */
741 			kp->ki_estcpu = kg->kg_estcpu;
742 			kp->ki_slptime = kg->kg_slptime;
743 			kp->ki_pri.pri_user = kg->kg_user_pri;
744 			kp->ki_pri.pri_class = kg->kg_pri_class;
745 			kp->ki_nice = kg->kg_nice;
746 
747 			/* Things in the thread */
748 			kp->ki_wchan = td->td_wchan;
749 			kp->ki_pri.pri_level = td->td_priority;
750 			kp->ki_pri.pri_native = td->td_base_pri;
751 			kp->ki_lastcpu = td->td_lastcpu;
752 			kp->ki_oncpu = td->td_oncpu;
753 			kp->ki_tdflags = td->td_flags;
754 			kp->ki_pcb = td->td_pcb;
755 			kp->ki_kstack = (void *)td->td_kstack;
756 
757 			/* Things in the kse */
758 			kp->ki_rqindex = ke->ke_rqindex;
759 			kp->ki_pctcpu = sched_pctcpu(ke);
760 		} else {
761 			kp->ki_oncpu = -1;
762 			kp->ki_lastcpu = -1;
763 			kp->ki_tdflags = -1;
764 			/* All the rest are 0 for now */
765 		}
766 		/* ^^^ XXXKSE */
767 	} else {
768 		kp->ki_stat = SZOMB;
769 	}
770 	mtx_unlock_spin(&sched_lock);
771 	sp = NULL;
772 	tp = NULL;
773 	if (p->p_pgrp) {
774 		kp->ki_pgid = p->p_pgrp->pg_id;
775 		kp->ki_jobc = p->p_pgrp->pg_jobc;
776 		sp = p->p_pgrp->pg_session;
777 
778 		if (sp != NULL) {
779 			kp->ki_sid = sp->s_sid;
780 			SESS_LOCK(sp);
781 			strlcpy(kp->ki_login, sp->s_login,
782 			    sizeof(kp->ki_login));
783 			if (sp->s_ttyvp)
784 				kp->ki_kiflag |= KI_CTTY;
785 			if (SESS_LEADER(p))
786 				kp->ki_kiflag |= KI_SLEADER;
787 			tp = sp->s_ttyp;
788 			SESS_UNLOCK(sp);
789 		}
790 	}
791 	if ((p->p_flag & P_CONTROLT) && tp != NULL) {
792 		kp->ki_tdev = dev2udev(tp->t_dev);
793 		kp->ki_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
794 		if (tp->t_session)
795 			kp->ki_tsid = tp->t_session->s_sid;
796 	} else
797 		kp->ki_tdev = NOUDEV;
798 	if (p->p_comm[0] != '\0') {
799 		strlcpy(kp->ki_comm, p->p_comm, sizeof(kp->ki_comm));
800 		strlcpy(kp->ki_ocomm, p->p_comm, sizeof(kp->ki_ocomm));
801 	}
802 	kp->ki_siglist = p->p_siglist;
803         SIGSETOR(kp->ki_siglist, td->td_siglist);
804 	kp->ki_sigmask = td->td_sigmask;
805 	kp->ki_xstat = p->p_xstat;
806 	kp->ki_acflag = p->p_acflag;
807 	kp->ki_flag = p->p_flag;
808 	/* If jailed(p->p_ucred), emulate the old P_JAILED flag. */
809 	if (jailed(p->p_ucred))
810 		kp->ki_flag |= P_JAILED;
811 	kp->ki_lock = p->p_lock;
812 	if (p->p_pptr)
813 		kp->ki_ppid = p->p_pptr->p_pid;
814 }
815 
816 /*
817  * Locate a zombie process by number
818  */
819 struct proc *
820 zpfind(pid_t pid)
821 {
822 	struct proc *p;
823 
824 	sx_slock(&allproc_lock);
825 	LIST_FOREACH(p, &zombproc, p_list)
826 		if (p->p_pid == pid) {
827 			PROC_LOCK(p);
828 			break;
829 		}
830 	sx_sunlock(&allproc_lock);
831 	return (p);
832 }
833 
834 
835 /*
836  * Must be called with the process locked and will return with it unlocked.
837  */
838 static int
839 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int doingzomb)
840 {
841 	struct kinfo_proc kinfo_proc;
842 	int error;
843 	struct proc *np;
844 	pid_t pid = p->p_pid;
845 
846 	PROC_LOCK_ASSERT(p, MA_OWNED);
847 	fill_kinfo_proc(p, &kinfo_proc);
848 	PROC_UNLOCK(p);
849 	error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc, sizeof(kinfo_proc));
850 	if (error)
851 		return (error);
852 	if (doingzomb)
853 		np = zpfind(pid);
854 	else {
855 		if (pid == 0)
856 			return (0);
857 		np = pfind(pid);
858 	}
859 	if (np == NULL)
860 		return EAGAIN;
861 	if (np != p) {
862 		PROC_UNLOCK(np);
863 		return EAGAIN;
864 	}
865 	PROC_UNLOCK(np);
866 	return (0);
867 }
868 
869 static int
870 sysctl_kern_proc(SYSCTL_HANDLER_ARGS)
871 {
872 	int *name = (int*) arg1;
873 	u_int namelen = arg2;
874 	struct proc *p;
875 	int doingzomb;
876 	int error = 0;
877 
878 	if (oidp->oid_number == KERN_PROC_PID) {
879 		if (namelen != 1)
880 			return (EINVAL);
881 		p = pfind((pid_t)name[0]);
882 		if (!p)
883 			return (0);
884 		if (p_cansee(curthread, p)) {
885 			PROC_UNLOCK(p);
886 			return (0);
887 		}
888 		error = sysctl_out_proc(p, req, 0);
889 		return (error);
890 	}
891 	if (oidp->oid_number == KERN_PROC_ALL && !namelen)
892 		;
893 	else if (oidp->oid_number != KERN_PROC_ALL && namelen == 1)
894 		;
895 	else
896 		return (EINVAL);
897 
898 	if (!req->oldptr) {
899 		/* overestimate by 5 procs */
900 		error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5);
901 		if (error)
902 			return (error);
903 	}
904 	sysctl_wire_old_buffer(req, 0);
905 	sx_slock(&allproc_lock);
906 	for (doingzomb=0 ; doingzomb < 2 ; doingzomb++) {
907 		if (!doingzomb)
908 			p = LIST_FIRST(&allproc);
909 		else
910 			p = LIST_FIRST(&zombproc);
911 		for (; p != 0; p = LIST_NEXT(p, p_list)) {
912 			/*
913 			 * Skip embryonic processes.
914 			 */
915 			mtx_lock_spin(&sched_lock);
916 			if (p->p_state == PRS_NEW) {
917 				mtx_unlock_spin(&sched_lock);
918 				continue;
919 			}
920 			mtx_unlock_spin(&sched_lock);
921 			PROC_LOCK(p);
922 			/*
923 			 * Show a user only appropriate processes.
924 			 */
925 			if (p_cansee(curthread, p)) {
926 				PROC_UNLOCK(p);
927 				continue;
928 			}
929 			/*
930 			 * TODO - make more efficient (see notes below).
931 			 * do by session.
932 			 */
933 			switch (oidp->oid_number) {
934 
935 			case KERN_PROC_PGRP:
936 				/* could do this by traversing pgrp */
937 				if (p->p_pgrp == NULL ||
938 				    p->p_pgrp->pg_id != (pid_t)name[0]) {
939 					PROC_UNLOCK(p);
940 					continue;
941 				}
942 				break;
943 
944 			case KERN_PROC_TTY:
945 				if ((p->p_flag & P_CONTROLT) == 0 ||
946 				    p->p_session == NULL) {
947 					PROC_UNLOCK(p);
948 					continue;
949 				}
950 				SESS_LOCK(p->p_session);
951 				if (p->p_session->s_ttyp == NULL ||
952 				    dev2udev(p->p_session->s_ttyp->t_dev) !=
953 				    (udev_t)name[0]) {
954 					SESS_UNLOCK(p->p_session);
955 					PROC_UNLOCK(p);
956 					continue;
957 				}
958 				SESS_UNLOCK(p->p_session);
959 				break;
960 
961 			case KERN_PROC_UID:
962 				if (p->p_ucred == NULL ||
963 				    p->p_ucred->cr_uid != (uid_t)name[0]) {
964 					PROC_UNLOCK(p);
965 					continue;
966 				}
967 				break;
968 
969 			case KERN_PROC_RUID:
970 				if (p->p_ucred == NULL ||
971 				    p->p_ucred->cr_ruid != (uid_t)name[0]) {
972 					PROC_UNLOCK(p);
973 					continue;
974 				}
975 				break;
976 			}
977 
978 			error = sysctl_out_proc(p, req, doingzomb);
979 			if (error) {
980 				sx_sunlock(&allproc_lock);
981 				return (error);
982 			}
983 		}
984 	}
985 	sx_sunlock(&allproc_lock);
986 	return (0);
987 }
988 
989 struct pargs *
990 pargs_alloc(int len)
991 {
992 	struct pargs *pa;
993 
994 	MALLOC(pa, struct pargs *, sizeof(struct pargs) + len, M_PARGS,
995 		M_WAITOK);
996 	pa->ar_ref = 1;
997 	pa->ar_length = len;
998 	return (pa);
999 }
1000 
1001 void
1002 pargs_free(struct pargs *pa)
1003 {
1004 
1005 	FREE(pa, M_PARGS);
1006 }
1007 
1008 void
1009 pargs_hold(struct pargs *pa)
1010 {
1011 
1012 	if (pa == NULL)
1013 		return;
1014 	PARGS_LOCK(pa);
1015 	pa->ar_ref++;
1016 	PARGS_UNLOCK(pa);
1017 }
1018 
1019 void
1020 pargs_drop(struct pargs *pa)
1021 {
1022 
1023 	if (pa == NULL)
1024 		return;
1025 	PARGS_LOCK(pa);
1026 	if (--pa->ar_ref == 0) {
1027 		PARGS_UNLOCK(pa);
1028 		pargs_free(pa);
1029 	} else
1030 		PARGS_UNLOCK(pa);
1031 }
1032 
1033 /*
1034  * This sysctl allows a process to retrieve the argument list or process
1035  * title for another process without groping around in the address space
1036  * of the other process.  It also allow a process to set its own "process
1037  * title to a string of its own choice.
1038  */
1039 static int
1040 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS)
1041 {
1042 	int *name = (int*) arg1;
1043 	u_int namelen = arg2;
1044 	struct pargs *newpa, *pa;
1045 	struct proc *p;
1046 	int error = 0;
1047 
1048 	if (namelen != 1)
1049 		return (EINVAL);
1050 
1051 	p = pfind((pid_t)name[0]);
1052 	if (!p)
1053 		return (0);
1054 
1055 	if ((!ps_argsopen) && p_cansee(curthread, p)) {
1056 		PROC_UNLOCK(p);
1057 		return (0);
1058 	}
1059 
1060 	if (req->newptr && curproc != p) {
1061 		PROC_UNLOCK(p);
1062 		return (EPERM);
1063 	}
1064 
1065 	pa = p->p_args;
1066 	pargs_hold(pa);
1067 	PROC_UNLOCK(p);
1068 	if (req->oldptr != NULL && pa != NULL)
1069 		error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length);
1070 	pargs_drop(pa);
1071 	if (error != 0 || req->newptr == NULL)
1072 		return (error);
1073 
1074 	if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit)
1075 		return (ENOMEM);
1076 	newpa = pargs_alloc(req->newlen);
1077 	error = SYSCTL_IN(req, newpa->ar_args, req->newlen);
1078 	if (error != 0) {
1079 		pargs_free(newpa);
1080 		return (error);
1081 	}
1082 	PROC_LOCK(p);
1083 	pa = p->p_args;
1084 	p->p_args = newpa;
1085 	PROC_UNLOCK(p);
1086 	pargs_drop(pa);
1087 	return (0);
1088 }
1089 
1090 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD,  0, "Process table");
1091 
1092 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT,
1093 	0, 0, sysctl_kern_proc, "S,proc", "Return entire process table");
1094 
1095 SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD,
1096 	sysctl_kern_proc, "Process table");
1097 
1098 SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD,
1099 	sysctl_kern_proc, "Process table");
1100 
1101 SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD,
1102 	sysctl_kern_proc, "Process table");
1103 
1104 SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD,
1105 	sysctl_kern_proc, "Process table");
1106 
1107 SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD,
1108 	sysctl_kern_proc, "Process table");
1109 
1110 SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, CTLFLAG_RW | CTLFLAG_ANYBODY,
1111 	sysctl_kern_proc_args, "Process argument list");
1112 
1113