xref: /freebsd/sys/kern/kern_proc.c (revision f9218d3d4fd34f082473b3a021c6d4d109fb47cf)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)kern_proc.c	8.7 (Berkeley) 2/14/95
34  * $FreeBSD$
35  */
36 
37 #include "opt_ktrace.h"
38 #include "opt_kstack_pages.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/lock.h>
44 #include <sys/malloc.h>
45 #include <sys/mutex.h>
46 #include <sys/proc.h>
47 #include <sys/kse.h>
48 #include <sys/sched.h>
49 #include <sys/smp.h>
50 #include <sys/sysctl.h>
51 #include <sys/filedesc.h>
52 #include <sys/tty.h>
53 #include <sys/signalvar.h>
54 #include <sys/sx.h>
55 #include <sys/user.h>
56 #include <sys/jail.h>
57 #ifdef KTRACE
58 #include <sys/uio.h>
59 #include <sys/ktrace.h>
60 #endif
61 
62 #include <vm/vm.h>
63 #include <vm/vm_extern.h>
64 #include <vm/pmap.h>
65 #include <vm/vm_map.h>
66 #include <vm/uma.h>
67 #include <machine/critical.h>
68 
69 MALLOC_DEFINE(M_PGRP, "pgrp", "process group header");
70 MALLOC_DEFINE(M_SESSION, "session", "session header");
71 static MALLOC_DEFINE(M_PROC, "proc", "Proc structures");
72 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures");
73 
74 static void doenterpgrp(struct proc *, struct pgrp *);
75 
76 static void pgdelete(struct pgrp *);
77 
78 static void orphanpg(struct pgrp *pg);
79 
80 static void proc_ctor(void *mem, int size, void *arg);
81 static void proc_dtor(void *mem, int size, void *arg);
82 static void proc_init(void *mem, int size);
83 static void proc_fini(void *mem, int size);
84 
85 /*
86  * Other process lists
87  */
88 struct pidhashhead *pidhashtbl;
89 u_long pidhash;
90 struct pgrphashhead *pgrphashtbl;
91 u_long pgrphash;
92 struct proclist allproc;
93 struct proclist zombproc;
94 struct sx allproc_lock;
95 struct sx proctree_lock;
96 struct mtx pargs_ref_lock;
97 struct mtx ppeers_lock;
98 uma_zone_t proc_zone;
99 uma_zone_t ithread_zone;
100 
101 int kstack_pages = KSTACK_PAGES;
102 int uarea_pages = UAREA_PAGES;
103 SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0, "");
104 SYSCTL_INT(_kern, OID_AUTO, uarea_pages, CTLFLAG_RD, &uarea_pages, 0, "");
105 
106 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
107 
108 CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE);
109 
110 /*
111  * Initialize global process hashing structures.
112  */
113 void
114 procinit()
115 {
116 
117 	sx_init(&allproc_lock, "allproc");
118 	sx_init(&proctree_lock, "proctree");
119 	mtx_init(&pargs_ref_lock, "struct pargs.ref", NULL, MTX_DEF);
120 	mtx_init(&ppeers_lock, "p_peers", NULL, MTX_DEF);
121 	LIST_INIT(&allproc);
122 	LIST_INIT(&zombproc);
123 	pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
124 	pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
125 	proc_zone = uma_zcreate("PROC", sched_sizeof_proc(),
126 	    proc_ctor, proc_dtor, proc_init, proc_fini,
127 	    UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
128 	uihashinit();
129 }
130 
131 /*
132  * Prepare a proc for use.
133  */
134 static void
135 proc_ctor(void *mem, int size, void *arg)
136 {
137 	struct proc *p;
138 
139 	p = (struct proc *)mem;
140 }
141 
142 /*
143  * Reclaim a proc after use.
144  */
145 static void
146 proc_dtor(void *mem, int size, void *arg)
147 {
148 	struct proc *p;
149 	struct thread *td;
150 	struct ksegrp *kg;
151 	struct kse *ke;
152 
153 	/* INVARIANTS checks go here */
154 	p = (struct proc *)mem;
155 	KASSERT((p->p_numthreads == 1),
156 	    ("bad number of threads in exiting process"));
157         td = FIRST_THREAD_IN_PROC(p);
158 	KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
159         kg = FIRST_KSEGRP_IN_PROC(p);
160 	KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
161         ke = FIRST_KSE_IN_KSEGRP(kg);
162 	KASSERT((ke != NULL), ("proc_dtor: bad ke pointer"));
163 
164 	/* Dispose of an alternate kstack, if it exists.
165 	 * XXX What if there are more than one thread in the proc?
166 	 *     The first thread in the proc is special and not
167 	 *     freed, so you gotta do this here.
168 	 */
169 	if (((p->p_flag & P_KTHREAD) != 0) && (td->td_altkstack != 0))
170 		pmap_dispose_altkstack(td);
171 
172 	/*
173 	 * We want to make sure we know the initial linkages.
174 	 * so for now tear them down and remake them.
175 	 * This is probably un-needed as we can probably rely
176 	 * on the state coming in here from wait4().
177 	 */
178 	proc_linkup(p, kg, ke, td);
179 }
180 
181 /*
182  * Initialize type-stable parts of a proc (when newly created).
183  */
184 static void
185 proc_init(void *mem, int size)
186 {
187 	struct proc *p;
188 	struct thread *td;
189 	struct ksegrp *kg;
190 	struct kse *ke;
191 
192 	p = (struct proc *)mem;
193 	p->p_sched = (struct p_sched *)&p[1];
194 	vm_proc_new(p);
195 	td = thread_alloc();
196 	ke = kse_alloc();
197 	kg = ksegrp_alloc();
198 	proc_linkup(p, kg, ke, td);
199 }
200 
201 /*
202  * Tear down type-stable parts of a proc (just before being discarded)
203  */
204 static void
205 proc_fini(void *mem, int size)
206 {
207 	struct proc *p;
208 	struct thread *td;
209 	struct ksegrp *kg;
210 	struct kse *ke;
211 
212 	p = (struct proc *)mem;
213 	KASSERT((p->p_numthreads == 1),
214 	    ("bad number of threads in freeing process"));
215         td = FIRST_THREAD_IN_PROC(p);
216 	KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
217         kg = FIRST_KSEGRP_IN_PROC(p);
218 	KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
219         ke = FIRST_KSE_IN_KSEGRP(kg);
220 	KASSERT((ke != NULL), ("proc_dtor: bad ke pointer"));
221 	vm_proc_dispose(p);
222 	thread_free(td);
223 	ksegrp_free(kg);
224 	kse_free(ke);
225 }
226 
227 /*
228  * Is p an inferior of the current process?
229  */
230 int
231 inferior(p)
232 	register struct proc *p;
233 {
234 
235 	sx_assert(&proctree_lock, SX_LOCKED);
236 	for (; p != curproc; p = p->p_pptr)
237 		if (p->p_pid == 0)
238 			return (0);
239 	return (1);
240 }
241 
242 /*
243  * Locate a process by number
244  */
245 struct proc *
246 pfind(pid)
247 	register pid_t pid;
248 {
249 	register struct proc *p;
250 
251 	sx_slock(&allproc_lock);
252 	LIST_FOREACH(p, PIDHASH(pid), p_hash)
253 		if (p->p_pid == pid) {
254 			PROC_LOCK(p);
255 			break;
256 		}
257 	sx_sunlock(&allproc_lock);
258 	return (p);
259 }
260 
261 /*
262  * Locate a process group by number.
263  * The caller must hold proctree_lock.
264  */
265 struct pgrp *
266 pgfind(pgid)
267 	register pid_t pgid;
268 {
269 	register struct pgrp *pgrp;
270 
271 	sx_assert(&proctree_lock, SX_LOCKED);
272 
273 	LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) {
274 		if (pgrp->pg_id == pgid) {
275 			PGRP_LOCK(pgrp);
276 			return (pgrp);
277 		}
278 	}
279 	return (NULL);
280 }
281 
282 /*
283  * Create a new process group.
284  * pgid must be equal to the pid of p.
285  * Begin a new session if required.
286  */
287 int
288 enterpgrp(p, pgid, pgrp, sess)
289 	register struct proc *p;
290 	pid_t pgid;
291 	struct pgrp *pgrp;
292 	struct session *sess;
293 {
294 	struct pgrp *pgrp2;
295 
296 	sx_assert(&proctree_lock, SX_XLOCKED);
297 
298 	KASSERT(pgrp != NULL, ("enterpgrp: pgrp == NULL"));
299 	KASSERT(p->p_pid == pgid,
300 	    ("enterpgrp: new pgrp and pid != pgid"));
301 
302 	pgrp2 = pgfind(pgid);
303 
304 	KASSERT(pgrp2 == NULL,
305 	    ("enterpgrp: pgrp with pgid exists"));
306 	KASSERT(!SESS_LEADER(p),
307 	    ("enterpgrp: session leader attempted setpgrp"));
308 
309 	mtx_init(&pgrp->pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK);
310 
311 	if (sess != NULL) {
312 		/*
313 		 * new session
314 		 */
315 		mtx_init(&sess->s_mtx, "session", NULL, MTX_DEF);
316 		PROC_LOCK(p);
317 		p->p_flag &= ~P_CONTROLT;
318 		PROC_UNLOCK(p);
319 		PGRP_LOCK(pgrp);
320 		sess->s_leader = p;
321 		sess->s_sid = p->p_pid;
322 		sess->s_count = 1;
323 		sess->s_ttyvp = NULL;
324 		sess->s_ttyp = NULL;
325 		bcopy(p->p_session->s_login, sess->s_login,
326 			    sizeof(sess->s_login));
327 		pgrp->pg_session = sess;
328 		KASSERT(p == curproc,
329 		    ("enterpgrp: mksession and p != curproc"));
330 	} else {
331 		pgrp->pg_session = p->p_session;
332 		SESS_LOCK(pgrp->pg_session);
333 		pgrp->pg_session->s_count++;
334 		SESS_UNLOCK(pgrp->pg_session);
335 		PGRP_LOCK(pgrp);
336 	}
337 	pgrp->pg_id = pgid;
338 	LIST_INIT(&pgrp->pg_members);
339 
340 	/*
341 	 * As we have an exclusive lock of proctree_lock,
342 	 * this should not deadlock.
343 	 */
344 	LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
345 	pgrp->pg_jobc = 0;
346 	SLIST_INIT(&pgrp->pg_sigiolst);
347 	PGRP_UNLOCK(pgrp);
348 
349 	doenterpgrp(p, pgrp);
350 
351 	return (0);
352 }
353 
354 /*
355  * Move p to an existing process group
356  */
357 int
358 enterthispgrp(p, pgrp)
359 	register struct proc *p;
360 	struct pgrp *pgrp;
361 {
362 
363 	sx_assert(&proctree_lock, SX_XLOCKED);
364 	PROC_LOCK_ASSERT(p, MA_NOTOWNED);
365 	PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
366 	PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
367 	SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
368 	KASSERT(pgrp->pg_session == p->p_session,
369 		("%s: pgrp's session %p, p->p_session %p.\n",
370 		__func__,
371 		pgrp->pg_session,
372 		p->p_session));
373 	KASSERT(pgrp != p->p_pgrp,
374 		("%s: p belongs to pgrp.", __func__));
375 
376 	doenterpgrp(p, pgrp);
377 
378 	return (0);
379 }
380 
381 /*
382  * Move p to a process group
383  */
384 static void
385 doenterpgrp(p, pgrp)
386 	struct proc *p;
387 	struct pgrp *pgrp;
388 {
389 	struct pgrp *savepgrp;
390 
391 	sx_assert(&proctree_lock, SX_XLOCKED);
392 	PROC_LOCK_ASSERT(p, MA_NOTOWNED);
393 	PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
394 	PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
395 	SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
396 
397 	savepgrp = p->p_pgrp;
398 
399 	/*
400 	 * Adjust eligibility of affected pgrps to participate in job control.
401 	 * Increment eligibility counts before decrementing, otherwise we
402 	 * could reach 0 spuriously during the first call.
403 	 */
404 	fixjobc(p, pgrp, 1);
405 	fixjobc(p, p->p_pgrp, 0);
406 
407 	PGRP_LOCK(pgrp);
408 	PGRP_LOCK(savepgrp);
409 	PROC_LOCK(p);
410 	LIST_REMOVE(p, p_pglist);
411 	p->p_pgrp = pgrp;
412 	PROC_UNLOCK(p);
413 	LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist);
414 	PGRP_UNLOCK(savepgrp);
415 	PGRP_UNLOCK(pgrp);
416 	if (LIST_EMPTY(&savepgrp->pg_members))
417 		pgdelete(savepgrp);
418 }
419 
420 /*
421  * remove process from process group
422  */
423 int
424 leavepgrp(p)
425 	register struct proc *p;
426 {
427 	struct pgrp *savepgrp;
428 
429 	sx_assert(&proctree_lock, SX_XLOCKED);
430 	savepgrp = p->p_pgrp;
431 	PGRP_LOCK(savepgrp);
432 	PROC_LOCK(p);
433 	LIST_REMOVE(p, p_pglist);
434 	p->p_pgrp = NULL;
435 	PROC_UNLOCK(p);
436 	PGRP_UNLOCK(savepgrp);
437 	if (LIST_EMPTY(&savepgrp->pg_members))
438 		pgdelete(savepgrp);
439 	return (0);
440 }
441 
442 /*
443  * delete a process group
444  */
445 static void
446 pgdelete(pgrp)
447 	register struct pgrp *pgrp;
448 {
449 	struct session *savesess;
450 
451 	sx_assert(&proctree_lock, SX_XLOCKED);
452 	PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
453 	SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
454 
455 	/*
456 	 * Reset any sigio structures pointing to us as a result of
457 	 * F_SETOWN with our pgid.
458 	 */
459 	funsetownlst(&pgrp->pg_sigiolst);
460 
461 	PGRP_LOCK(pgrp);
462 	if (pgrp->pg_session->s_ttyp != NULL &&
463 	    pgrp->pg_session->s_ttyp->t_pgrp == pgrp)
464 		pgrp->pg_session->s_ttyp->t_pgrp = NULL;
465 	LIST_REMOVE(pgrp, pg_hash);
466 	savesess = pgrp->pg_session;
467 	SESS_LOCK(savesess);
468 	savesess->s_count--;
469 	SESS_UNLOCK(savesess);
470 	PGRP_UNLOCK(pgrp);
471 	if (savesess->s_count == 0) {
472 		mtx_destroy(&savesess->s_mtx);
473 		FREE(pgrp->pg_session, M_SESSION);
474 	}
475 	mtx_destroy(&pgrp->pg_mtx);
476 	FREE(pgrp, M_PGRP);
477 }
478 
479 /*
480  * Adjust pgrp jobc counters when specified process changes process group.
481  * We count the number of processes in each process group that "qualify"
482  * the group for terminal job control (those with a parent in a different
483  * process group of the same session).  If that count reaches zero, the
484  * process group becomes orphaned.  Check both the specified process'
485  * process group and that of its children.
486  * entering == 0 => p is leaving specified group.
487  * entering == 1 => p is entering specified group.
488  */
489 void
490 fixjobc(p, pgrp, entering)
491 	register struct proc *p;
492 	register struct pgrp *pgrp;
493 	int entering;
494 {
495 	register struct pgrp *hispgrp;
496 	register struct session *mysession;
497 
498 	sx_assert(&proctree_lock, SX_LOCKED);
499 	PROC_LOCK_ASSERT(p, MA_NOTOWNED);
500 	PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
501 	SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
502 
503 	/*
504 	 * Check p's parent to see whether p qualifies its own process
505 	 * group; if so, adjust count for p's process group.
506 	 */
507 	mysession = pgrp->pg_session;
508 	if ((hispgrp = p->p_pptr->p_pgrp) != pgrp &&
509 	    hispgrp->pg_session == mysession) {
510 		PGRP_LOCK(pgrp);
511 		if (entering)
512 			pgrp->pg_jobc++;
513 		else {
514 			--pgrp->pg_jobc;
515 			if (pgrp->pg_jobc == 0)
516 				orphanpg(pgrp);
517 		}
518 		PGRP_UNLOCK(pgrp);
519 	}
520 
521 	/*
522 	 * Check this process' children to see whether they qualify
523 	 * their process groups; if so, adjust counts for children's
524 	 * process groups.
525 	 */
526 	LIST_FOREACH(p, &p->p_children, p_sibling) {
527 		if ((hispgrp = p->p_pgrp) != pgrp &&
528 		    hispgrp->pg_session == mysession &&
529 		    p->p_state != PRS_ZOMBIE) {
530 			PGRP_LOCK(hispgrp);
531 			if (entering)
532 				hispgrp->pg_jobc++;
533 			else {
534 				--hispgrp->pg_jobc;
535 				if (hispgrp->pg_jobc == 0)
536 					orphanpg(hispgrp);
537 			}
538 			PGRP_UNLOCK(hispgrp);
539 		}
540 	}
541 }
542 
543 /*
544  * A process group has become orphaned;
545  * if there are any stopped processes in the group,
546  * hang-up all process in that group.
547  */
548 static void
549 orphanpg(pg)
550 	struct pgrp *pg;
551 {
552 	register struct proc *p;
553 
554 	PGRP_LOCK_ASSERT(pg, MA_OWNED);
555 
556 	mtx_lock_spin(&sched_lock);
557 	LIST_FOREACH(p, &pg->pg_members, p_pglist) {
558 		if (P_SHOULDSTOP(p)) {
559 			mtx_unlock_spin(&sched_lock);
560 			LIST_FOREACH(p, &pg->pg_members, p_pglist) {
561 				PROC_LOCK(p);
562 				psignal(p, SIGHUP);
563 				psignal(p, SIGCONT);
564 				PROC_UNLOCK(p);
565 			}
566 			return;
567 		}
568 	}
569 	mtx_unlock_spin(&sched_lock);
570 }
571 
572 #include "opt_ddb.h"
573 #ifdef DDB
574 #include <ddb/ddb.h>
575 
576 DB_SHOW_COMMAND(pgrpdump, pgrpdump)
577 {
578 	register struct pgrp *pgrp;
579 	register struct proc *p;
580 	register int i;
581 
582 	for (i = 0; i <= pgrphash; i++) {
583 		if (!LIST_EMPTY(&pgrphashtbl[i])) {
584 			printf("\tindx %d\n", i);
585 			LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) {
586 				printf(
587 			"\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n",
588 				    (void *)pgrp, (long)pgrp->pg_id,
589 				    (void *)pgrp->pg_session,
590 				    pgrp->pg_session->s_count,
591 				    (void *)LIST_FIRST(&pgrp->pg_members));
592 				LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
593 					printf("\t\tpid %ld addr %p pgrp %p\n",
594 					    (long)p->p_pid, (void *)p,
595 					    (void *)p->p_pgrp);
596 				}
597 			}
598 		}
599 	}
600 }
601 #endif /* DDB */
602 
603 /*
604  * Fill in a kinfo_proc structure for the specified process.
605  * Must be called with the target process locked.
606  */
607 void
608 fill_kinfo_proc(p, kp)
609 	struct proc *p;
610 	struct kinfo_proc *kp;
611 {
612 	struct thread *td;
613 	struct kse *ke;
614 	struct ksegrp *kg;
615 	struct tty *tp;
616 	struct session *sp;
617 	struct timeval tv;
618 
619 	bzero(kp, sizeof(*kp));
620 
621 	kp->ki_structsize = sizeof(*kp);
622 	kp->ki_paddr = p;
623 	PROC_LOCK_ASSERT(p, MA_OWNED);
624 	kp->ki_addr =/* p->p_addr; */0; /* XXXKSE */
625 	kp->ki_args = p->p_args;
626 	kp->ki_textvp = p->p_textvp;
627 #ifdef KTRACE
628 	kp->ki_tracep = p->p_tracep;
629 	mtx_lock(&ktrace_mtx);
630 	kp->ki_traceflag = p->p_traceflag;
631 	mtx_unlock(&ktrace_mtx);
632 #endif
633 	kp->ki_fd = p->p_fd;
634 	kp->ki_vmspace = p->p_vmspace;
635 	if (p->p_ucred) {
636 		kp->ki_uid = p->p_ucred->cr_uid;
637 		kp->ki_ruid = p->p_ucred->cr_ruid;
638 		kp->ki_svuid = p->p_ucred->cr_svuid;
639 		/* XXX bde doesn't like KI_NGROUPS */
640 		kp->ki_ngroups = min(p->p_ucred->cr_ngroups, KI_NGROUPS);
641 		bcopy(p->p_ucred->cr_groups, kp->ki_groups,
642 		    kp->ki_ngroups * sizeof(gid_t));
643 		kp->ki_rgid = p->p_ucred->cr_rgid;
644 		kp->ki_svgid = p->p_ucred->cr_svgid;
645 	}
646 	if (p->p_procsig) {
647 		kp->ki_sigignore = p->p_procsig->ps_sigignore;
648 		kp->ki_sigcatch = p->p_procsig->ps_sigcatch;
649 	}
650 	mtx_lock_spin(&sched_lock);
651 	if (p->p_state != PRS_NEW &&
652 	    p->p_state != PRS_ZOMBIE &&
653 	    p->p_vmspace != NULL) {
654 		struct vmspace *vm = p->p_vmspace;
655 
656 		kp->ki_size = vm->vm_map.size;
657 		kp->ki_rssize = vmspace_resident_count(vm); /*XXX*/
658 		if (p->p_sflag & PS_INMEM)
659 			kp->ki_rssize += UAREA_PAGES;
660 		FOREACH_THREAD_IN_PROC(p, td) /* XXXKSE: thread swapout check */
661 			kp->ki_rssize += KSTACK_PAGES;
662 		kp->ki_swrss = vm->vm_swrss;
663 		kp->ki_tsize = vm->vm_tsize;
664 		kp->ki_dsize = vm->vm_dsize;
665 		kp->ki_ssize = vm->vm_ssize;
666 	}
667 	if ((p->p_sflag & PS_INMEM) && p->p_stats) {
668 		kp->ki_start = p->p_stats->p_start;
669 		kp->ki_rusage = p->p_stats->p_ru;
670 		kp->ki_childtime.tv_sec = p->p_stats->p_cru.ru_utime.tv_sec +
671 		    p->p_stats->p_cru.ru_stime.tv_sec;
672 		kp->ki_childtime.tv_usec = p->p_stats->p_cru.ru_utime.tv_usec +
673 		    p->p_stats->p_cru.ru_stime.tv_usec;
674 	}
675 	if (p->p_state != PRS_ZOMBIE) {
676 		td = FIRST_THREAD_IN_PROC(p);
677 		if (td == NULL) {
678 			/* XXXKSE: This should never happen. */
679 			printf("fill_kinfo_proc(): pid %d has no threads!\n",
680 			    p->p_pid);
681 			mtx_unlock_spin(&sched_lock);
682 			return;
683 		}
684 		if (!(p->p_flag & P_THREADED)) {
685 			if (td->td_wmesg != NULL) {
686 				strlcpy(kp->ki_wmesg, td->td_wmesg,
687 				    sizeof(kp->ki_wmesg));
688 			}
689 			if (TD_ON_LOCK(td)) {
690 				kp->ki_kiflag |= KI_LOCKBLOCK;
691 				strlcpy(kp->ki_lockname, td->td_lockname,
692 				    sizeof(kp->ki_lockname));
693 			}
694 		}
695 
696 		if (p->p_state == PRS_NORMAL) { /*  XXXKSE very approximate */
697 			if (TD_ON_RUNQ(td) ||
698 			    TD_CAN_RUN(td) ||
699 			    TD_IS_RUNNING(td)) {
700 				kp->ki_stat = SRUN;
701 			} else if (P_SHOULDSTOP(p)) {
702 				kp->ki_stat = SSTOP;
703 			} else if (TD_IS_SLEEPING(td)) {
704 				kp->ki_stat = SSLEEP;
705 			} else if (TD_ON_LOCK(td)) {
706 				kp->ki_stat = SLOCK;
707 			} else {
708 				kp->ki_stat = SWAIT;
709 			}
710 		} else {
711 			kp->ki_stat = SIDL;
712 		}
713 
714 		kp->ki_sflag = p->p_sflag;
715 		kp->ki_swtime = p->p_swtime;
716 		kp->ki_pid = p->p_pid;
717 		/* vvv XXXKSE */
718 		if (!(p->p_flag & P_THREADED)) {
719 			kg = td->td_ksegrp;
720 			ke = td->td_kse;
721 			KASSERT((ke != NULL), ("fill_kinfo_proc: Null KSE"));
722 			bintime2timeval(&p->p_runtime, &tv);
723 			kp->ki_runtime =
724 			    tv.tv_sec * (u_int64_t)1000000 + tv.tv_usec;
725 
726 			/* things in the KSE GROUP */
727 			kp->ki_estcpu = kg->kg_estcpu;
728 			kp->ki_slptime = kg->kg_slptime;
729 			kp->ki_pri.pri_user = kg->kg_user_pri;
730 			kp->ki_pri.pri_class = kg->kg_pri_class;
731 			kp->ki_nice = kg->kg_nice;
732 
733 			/* Things in the thread */
734 			kp->ki_wchan = td->td_wchan;
735 			kp->ki_pri.pri_level = td->td_priority;
736 			kp->ki_pri.pri_native = td->td_base_pri;
737 			kp->ki_lastcpu = td->td_lastcpu;
738 			kp->ki_tdflags = td->td_flags;
739 			kp->ki_pcb = td->td_pcb;
740 			kp->ki_kstack = (void *)td->td_kstack;
741 
742 			/* Things in the kse */
743 			kp->ki_rqindex = ke->ke_rqindex;
744 			kp->ki_oncpu = ke->ke_oncpu;
745 			kp->ki_pctcpu = sched_pctcpu(ke);
746 		} else {
747 			kp->ki_oncpu = -1;
748 			kp->ki_lastcpu = -1;
749 			kp->ki_tdflags = -1;
750 			/* All the rest are 0 for now */
751 		}
752 		/* ^^^ XXXKSE */
753 	} else {
754 		kp->ki_stat = SZOMB;
755 	}
756 	mtx_unlock_spin(&sched_lock);
757 	sp = NULL;
758 	tp = NULL;
759 	if (p->p_pgrp) {
760 		kp->ki_pgid = p->p_pgrp->pg_id;
761 		kp->ki_jobc = p->p_pgrp->pg_jobc;
762 		sp = p->p_pgrp->pg_session;
763 
764 		if (sp != NULL) {
765 			kp->ki_sid = sp->s_sid;
766 			SESS_LOCK(sp);
767 			strlcpy(kp->ki_login, sp->s_login,
768 			    sizeof(kp->ki_login));
769 			if (sp->s_ttyvp)
770 				kp->ki_kiflag |= KI_CTTY;
771 			if (SESS_LEADER(p))
772 				kp->ki_kiflag |= KI_SLEADER;
773 			tp = sp->s_ttyp;
774 			SESS_UNLOCK(sp);
775 		}
776 	}
777 	if ((p->p_flag & P_CONTROLT) && tp != NULL) {
778 		kp->ki_tdev = dev2udev(tp->t_dev);
779 		kp->ki_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
780 		if (tp->t_session)
781 			kp->ki_tsid = tp->t_session->s_sid;
782 	} else
783 		kp->ki_tdev = NOUDEV;
784 	if (p->p_comm[0] != '\0') {
785 		strlcpy(kp->ki_comm, p->p_comm, sizeof(kp->ki_comm));
786 		strlcpy(kp->ki_ocomm, p->p_comm, sizeof(kp->ki_ocomm));
787 	}
788 	kp->ki_siglist = p->p_siglist;
789 	kp->ki_sigmask = p->p_sigmask;
790 	kp->ki_xstat = p->p_xstat;
791 	kp->ki_acflag = p->p_acflag;
792 	kp->ki_flag = p->p_flag;
793 	/* If jailed(p->p_ucred), emulate the old P_JAILED flag. */
794 	if (jailed(p->p_ucred))
795 		kp->ki_flag |= P_JAILED;
796 	kp->ki_lock = p->p_lock;
797 	if (p->p_pptr)
798 		kp->ki_ppid = p->p_pptr->p_pid;
799 }
800 
801 /*
802  * Locate a zombie process by number
803  */
804 struct proc *
805 zpfind(pid_t pid)
806 {
807 	struct proc *p;
808 
809 	sx_slock(&allproc_lock);
810 	LIST_FOREACH(p, &zombproc, p_list)
811 		if (p->p_pid == pid) {
812 			PROC_LOCK(p);
813 			break;
814 		}
815 	sx_sunlock(&allproc_lock);
816 	return (p);
817 }
818 
819 
820 /*
821  * Must be called with the process locked and will return with it unlocked.
822  */
823 static int
824 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int doingzomb)
825 {
826 	struct kinfo_proc kinfo_proc;
827 	int error;
828 	struct proc *np;
829 	pid_t pid = p->p_pid;
830 
831 	PROC_LOCK_ASSERT(p, MA_OWNED);
832 	fill_kinfo_proc(p, &kinfo_proc);
833 	PROC_UNLOCK(p);
834 	error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc, sizeof(kinfo_proc));
835 	if (error)
836 		return (error);
837 	if (doingzomb)
838 		np = zpfind(pid);
839 	else {
840 		if (pid == 0)
841 			return (0);
842 		np = pfind(pid);
843 	}
844 	if (np == NULL)
845 		return EAGAIN;
846 	if (np != p) {
847 		PROC_UNLOCK(np);
848 		return EAGAIN;
849 	}
850 	PROC_UNLOCK(np);
851 	return (0);
852 }
853 
854 static int
855 sysctl_kern_proc(SYSCTL_HANDLER_ARGS)
856 {
857 	int *name = (int*) arg1;
858 	u_int namelen = arg2;
859 	struct proc *p;
860 	int doingzomb;
861 	int error = 0;
862 
863 	if (oidp->oid_number == KERN_PROC_PID) {
864 		if (namelen != 1)
865 			return (EINVAL);
866 		p = pfind((pid_t)name[0]);
867 		if (!p)
868 			return (0);
869 		if (p_cansee(curthread, p)) {
870 			PROC_UNLOCK(p);
871 			return (0);
872 		}
873 		error = sysctl_out_proc(p, req, 0);
874 		return (error);
875 	}
876 	if (oidp->oid_number == KERN_PROC_ALL && !namelen)
877 		;
878 	else if (oidp->oid_number != KERN_PROC_ALL && namelen == 1)
879 		;
880 	else
881 		return (EINVAL);
882 
883 	if (!req->oldptr) {
884 		/* overestimate by 5 procs */
885 		error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5);
886 		if (error)
887 			return (error);
888 	}
889 	sysctl_wire_old_buffer(req, 0);
890 	sx_slock(&allproc_lock);
891 	for (doingzomb=0 ; doingzomb < 2 ; doingzomb++) {
892 		if (!doingzomb)
893 			p = LIST_FIRST(&allproc);
894 		else
895 			p = LIST_FIRST(&zombproc);
896 		for (; p != 0; p = LIST_NEXT(p, p_list)) {
897 			PROC_LOCK(p);
898 			/*
899 			 * Show a user only appropriate processes.
900 			 */
901 			if (p_cansee(curthread, p)) {
902 				PROC_UNLOCK(p);
903 				continue;
904 			}
905 			/*
906 			 * Skip embryonic processes.
907 			 */
908 			if (p->p_state == PRS_NEW) {
909 				PROC_UNLOCK(p);
910 				continue;
911 			}
912 			/*
913 			 * TODO - make more efficient (see notes below).
914 			 * do by session.
915 			 */
916 			switch (oidp->oid_number) {
917 
918 			case KERN_PROC_PGRP:
919 				/* could do this by traversing pgrp */
920 				if (p->p_pgrp == NULL ||
921 				    p->p_pgrp->pg_id != (pid_t)name[0]) {
922 					PROC_UNLOCK(p);
923 					continue;
924 				}
925 				break;
926 
927 			case KERN_PROC_TTY:
928 				if ((p->p_flag & P_CONTROLT) == 0 ||
929 				    p->p_session == NULL) {
930 					PROC_UNLOCK(p);
931 					continue;
932 				}
933 				SESS_LOCK(p->p_session);
934 				if (p->p_session->s_ttyp == NULL ||
935 				    dev2udev(p->p_session->s_ttyp->t_dev) !=
936 				    (udev_t)name[0]) {
937 					SESS_UNLOCK(p->p_session);
938 					PROC_UNLOCK(p);
939 					continue;
940 				}
941 				SESS_UNLOCK(p->p_session);
942 				break;
943 
944 			case KERN_PROC_UID:
945 				if (p->p_ucred == NULL ||
946 				    p->p_ucred->cr_uid != (uid_t)name[0]) {
947 					PROC_UNLOCK(p);
948 					continue;
949 				}
950 				break;
951 
952 			case KERN_PROC_RUID:
953 				if (p->p_ucred == NULL ||
954 				    p->p_ucred->cr_ruid != (uid_t)name[0]) {
955 					PROC_UNLOCK(p);
956 					continue;
957 				}
958 				break;
959 			}
960 
961 			error = sysctl_out_proc(p, req, doingzomb);
962 			if (error) {
963 				sx_sunlock(&allproc_lock);
964 				return (error);
965 			}
966 		}
967 	}
968 	sx_sunlock(&allproc_lock);
969 	return (0);
970 }
971 
972 struct pargs *
973 pargs_alloc(int len)
974 {
975 	struct pargs *pa;
976 
977 	MALLOC(pa, struct pargs *, sizeof(struct pargs) + len, M_PARGS,
978 		M_WAITOK);
979 	pa->ar_ref = 1;
980 	pa->ar_length = len;
981 	return (pa);
982 }
983 
984 void
985 pargs_free(struct pargs *pa)
986 {
987 
988 	FREE(pa, M_PARGS);
989 }
990 
991 void
992 pargs_hold(struct pargs *pa)
993 {
994 
995 	if (pa == NULL)
996 		return;
997 	PARGS_LOCK(pa);
998 	pa->ar_ref++;
999 	PARGS_UNLOCK(pa);
1000 }
1001 
1002 void
1003 pargs_drop(struct pargs *pa)
1004 {
1005 
1006 	if (pa == NULL)
1007 		return;
1008 	PARGS_LOCK(pa);
1009 	if (--pa->ar_ref == 0) {
1010 		PARGS_UNLOCK(pa);
1011 		pargs_free(pa);
1012 	} else
1013 		PARGS_UNLOCK(pa);
1014 }
1015 
1016 /*
1017  * This sysctl allows a process to retrieve the argument list or process
1018  * title for another process without groping around in the address space
1019  * of the other process.  It also allow a process to set its own "process
1020  * title to a string of its own choice.
1021  */
1022 static int
1023 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS)
1024 {
1025 	int *name = (int*) arg1;
1026 	u_int namelen = arg2;
1027 	struct proc *p;
1028 	struct pargs *pa;
1029 	int error = 0;
1030 
1031 	if (namelen != 1)
1032 		return (EINVAL);
1033 
1034 	p = pfind((pid_t)name[0]);
1035 	if (!p)
1036 		return (0);
1037 
1038 	if ((!ps_argsopen) && p_cansee(curthread, p)) {
1039 		PROC_UNLOCK(p);
1040 		return (0);
1041 	}
1042 	PROC_UNLOCK(p);
1043 
1044 	if (req->newptr && curproc != p)
1045 		return (EPERM);
1046 
1047 	PROC_LOCK(p);
1048 	pa = p->p_args;
1049 	pargs_hold(pa);
1050 	PROC_UNLOCK(p);
1051 	if (req->oldptr && pa != NULL) {
1052 		error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length);
1053 	}
1054 	pargs_drop(pa);
1055 	if (req->newptr == NULL)
1056 		return (error);
1057 
1058 	PROC_LOCK(p);
1059 	pa = p->p_args;
1060 	p->p_args = NULL;
1061 	PROC_UNLOCK(p);
1062 	pargs_drop(pa);
1063 
1064 	if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit)
1065 		return (error);
1066 
1067 	pa = pargs_alloc(req->newlen);
1068 	error = SYSCTL_IN(req, pa->ar_args, req->newlen);
1069 	if (!error) {
1070 		PROC_LOCK(p);
1071 		p->p_args = pa;
1072 		PROC_UNLOCK(p);
1073 	} else
1074 		pargs_free(pa);
1075 	return (error);
1076 }
1077 
1078 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD,  0, "Process table");
1079 
1080 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT,
1081 	0, 0, sysctl_kern_proc, "S,proc", "Return entire process table");
1082 
1083 SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD,
1084 	sysctl_kern_proc, "Process table");
1085 
1086 SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD,
1087 	sysctl_kern_proc, "Process table");
1088 
1089 SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD,
1090 	sysctl_kern_proc, "Process table");
1091 
1092 SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD,
1093 	sysctl_kern_proc, "Process table");
1094 
1095 SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD,
1096 	sysctl_kern_proc, "Process table");
1097 
1098 SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, CTLFLAG_RW | CTLFLAG_ANYBODY,
1099 	sysctl_kern_proc_args, "Process argument list");
1100 
1101