1 /*- 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)kern_proc.c 8.7 (Berkeley) 2/14/95 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_compat.h" 36 #include "opt_ddb.h" 37 #include "opt_kdtrace.h" 38 #include "opt_ktrace.h" 39 #include "opt_kstack_pages.h" 40 #include "opt_stack.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/lock.h> 46 #include <sys/malloc.h> 47 #include <sys/mount.h> 48 #include <sys/mutex.h> 49 #include <sys/proc.h> 50 #include <sys/refcount.h> 51 #include <sys/sbuf.h> 52 #include <sys/sysent.h> 53 #include <sys/sched.h> 54 #include <sys/smp.h> 55 #include <sys/stack.h> 56 #include <sys/sysctl.h> 57 #include <sys/filedesc.h> 58 #include <sys/tty.h> 59 #include <sys/signalvar.h> 60 #include <sys/sdt.h> 61 #include <sys/sx.h> 62 #include <sys/user.h> 63 #include <sys/jail.h> 64 #include <sys/vnode.h> 65 #include <sys/eventhandler.h> 66 #ifdef KTRACE 67 #include <sys/uio.h> 68 #include <sys/ktrace.h> 69 #endif 70 71 #ifdef DDB 72 #include <ddb/ddb.h> 73 #endif 74 75 #include <vm/vm.h> 76 #include <vm/vm_extern.h> 77 #include <vm/pmap.h> 78 #include <vm/vm_map.h> 79 #include <vm/vm_object.h> 80 #include <vm/uma.h> 81 82 SDT_PROVIDER_DEFINE(proc); 83 SDT_PROBE_DEFINE(proc, kernel, ctor, entry); 84 SDT_PROBE_ARGTYPE(proc, kernel, ctor, entry, 0, "struct proc *"); 85 SDT_PROBE_ARGTYPE(proc, kernel, ctor, entry, 1, "int"); 86 SDT_PROBE_ARGTYPE(proc, kernel, ctor, entry, 2, "void *"); 87 SDT_PROBE_ARGTYPE(proc, kernel, ctor, entry, 3, "int"); 88 SDT_PROBE_DEFINE(proc, kernel, ctor, return); 89 SDT_PROBE_ARGTYPE(proc, kernel, ctor, return, 0, "struct proc *"); 90 SDT_PROBE_ARGTYPE(proc, kernel, ctor, return, 1, "int"); 91 SDT_PROBE_ARGTYPE(proc, kernel, ctor, return, 2, "void *"); 92 SDT_PROBE_ARGTYPE(proc, kernel, ctor, return, 3, "int"); 93 SDT_PROBE_DEFINE(proc, kernel, dtor, entry); 94 SDT_PROBE_ARGTYPE(proc, kernel, dtor, entry, 0, "struct proc *"); 95 SDT_PROBE_ARGTYPE(proc, kernel, dtor, entry, 1, "int"); 96 SDT_PROBE_ARGTYPE(proc, kernel, dtor, entry, 2, "void *"); 97 SDT_PROBE_ARGTYPE(proc, kernel, dtor, entry, 3, "struct thread *"); 98 SDT_PROBE_DEFINE(proc, kernel, dtor, return); 99 SDT_PROBE_ARGTYPE(proc, kernel, dtor, return, 0, "struct proc *"); 100 SDT_PROBE_ARGTYPE(proc, kernel, dtor, return, 1, "int"); 101 SDT_PROBE_ARGTYPE(proc, kernel, dtor, return, 2, "void *"); 102 SDT_PROBE_DEFINE(proc, kernel, init, entry); 103 SDT_PROBE_ARGTYPE(proc, kernel, init, entry, 0, "struct proc *"); 104 SDT_PROBE_ARGTYPE(proc, kernel, init, entry, 1, "int"); 105 SDT_PROBE_ARGTYPE(proc, kernel, init, entry, 2, "int"); 106 SDT_PROBE_DEFINE(proc, kernel, init, return); 107 SDT_PROBE_ARGTYPE(proc, kernel, init, return, 0, "struct proc *"); 108 SDT_PROBE_ARGTYPE(proc, kernel, init, return, 1, "int"); 109 SDT_PROBE_ARGTYPE(proc, kernel, init, return, 2, "int"); 110 111 MALLOC_DEFINE(M_PGRP, "pgrp", "process group header"); 112 MALLOC_DEFINE(M_SESSION, "session", "session header"); 113 static MALLOC_DEFINE(M_PROC, "proc", "Proc structures"); 114 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures"); 115 116 static void doenterpgrp(struct proc *, struct pgrp *); 117 static void orphanpg(struct pgrp *pg); 118 static void fill_kinfo_aggregate(struct proc *p, struct kinfo_proc *kp); 119 static void fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp); 120 static void fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp, 121 int preferthread); 122 static void pgadjustjobc(struct pgrp *pgrp, int entering); 123 static void pgdelete(struct pgrp *); 124 static int proc_ctor(void *mem, int size, void *arg, int flags); 125 static void proc_dtor(void *mem, int size, void *arg); 126 static int proc_init(void *mem, int size, int flags); 127 static void proc_fini(void *mem, int size); 128 static void pargs_free(struct pargs *pa); 129 130 /* 131 * Other process lists 132 */ 133 struct pidhashhead *pidhashtbl; 134 u_long pidhash; 135 struct pgrphashhead *pgrphashtbl; 136 u_long pgrphash; 137 struct proclist allproc; 138 struct proclist zombproc; 139 struct sx allproc_lock; 140 struct sx proctree_lock; 141 struct mtx ppeers_lock; 142 uma_zone_t proc_zone; 143 uma_zone_t ithread_zone; 144 145 int kstack_pages = KSTACK_PAGES; 146 SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0, ""); 147 148 CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE); 149 150 /* 151 * Initialize global process hashing structures. 152 */ 153 void 154 procinit() 155 { 156 157 sx_init(&allproc_lock, "allproc"); 158 sx_init(&proctree_lock, "proctree"); 159 mtx_init(&ppeers_lock, "p_peers", NULL, MTX_DEF); 160 LIST_INIT(&allproc); 161 LIST_INIT(&zombproc); 162 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash); 163 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash); 164 proc_zone = uma_zcreate("PROC", sched_sizeof_proc(), 165 proc_ctor, proc_dtor, proc_init, proc_fini, 166 UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 167 uihashinit(); 168 } 169 170 /* 171 * Prepare a proc for use. 172 */ 173 static int 174 proc_ctor(void *mem, int size, void *arg, int flags) 175 { 176 struct proc *p; 177 178 p = (struct proc *)mem; 179 SDT_PROBE(proc, kernel, ctor , entry, p, size, arg, flags, 0); 180 EVENTHANDLER_INVOKE(process_ctor, p); 181 SDT_PROBE(proc, kernel, ctor , return, p, size, arg, flags, 0); 182 return (0); 183 } 184 185 /* 186 * Reclaim a proc after use. 187 */ 188 static void 189 proc_dtor(void *mem, int size, void *arg) 190 { 191 struct proc *p; 192 struct thread *td; 193 194 /* INVARIANTS checks go here */ 195 p = (struct proc *)mem; 196 td = FIRST_THREAD_IN_PROC(p); 197 SDT_PROBE(proc, kernel, dtor, entry, p, size, arg, td, 0); 198 if (td != NULL) { 199 #ifdef INVARIANTS 200 KASSERT((p->p_numthreads == 1), 201 ("bad number of threads in exiting process")); 202 KASSERT(STAILQ_EMPTY(&p->p_ktr), ("proc_dtor: non-empty p_ktr")); 203 #endif 204 /* Free all OSD associated to this thread. */ 205 osd_thread_exit(td); 206 } 207 EVENTHANDLER_INVOKE(process_dtor, p); 208 if (p->p_ksi != NULL) 209 KASSERT(! KSI_ONQ(p->p_ksi), ("SIGCHLD queue")); 210 SDT_PROBE(proc, kernel, dtor, return, p, size, arg, 0, 0); 211 } 212 213 /* 214 * Initialize type-stable parts of a proc (when newly created). 215 */ 216 static int 217 proc_init(void *mem, int size, int flags) 218 { 219 struct proc *p; 220 221 p = (struct proc *)mem; 222 SDT_PROBE(proc, kernel, init, entry, p, size, flags, 0, 0); 223 p->p_sched = (struct p_sched *)&p[1]; 224 bzero(&p->p_mtx, sizeof(struct mtx)); 225 mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 226 mtx_init(&p->p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE); 227 cv_init(&p->p_pwait, "ppwait"); 228 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 229 EVENTHANDLER_INVOKE(process_init, p); 230 p->p_stats = pstats_alloc(); 231 SDT_PROBE(proc, kernel, init, return, p, size, flags, 0, 0); 232 return (0); 233 } 234 235 /* 236 * UMA should ensure that this function is never called. 237 * Freeing a proc structure would violate type stability. 238 */ 239 static void 240 proc_fini(void *mem, int size) 241 { 242 #ifdef notnow 243 struct proc *p; 244 245 p = (struct proc *)mem; 246 EVENTHANDLER_INVOKE(process_fini, p); 247 pstats_free(p->p_stats); 248 thread_free(FIRST_THREAD_IN_PROC(p)); 249 mtx_destroy(&p->p_mtx); 250 if (p->p_ksi != NULL) 251 ksiginfo_free(p->p_ksi); 252 #else 253 panic("proc reclaimed"); 254 #endif 255 } 256 257 /* 258 * Is p an inferior of the current process? 259 */ 260 int 261 inferior(p) 262 register struct proc *p; 263 { 264 265 sx_assert(&proctree_lock, SX_LOCKED); 266 for (; p != curproc; p = p->p_pptr) 267 if (p->p_pid == 0) 268 return (0); 269 return (1); 270 } 271 272 /* 273 * Locate a process by number; return only "live" processes -- i.e., neither 274 * zombies nor newly born but incompletely initialized processes. By not 275 * returning processes in the PRS_NEW state, we allow callers to avoid 276 * testing for that condition to avoid dereferencing p_ucred, et al. 277 */ 278 struct proc * 279 pfind(pid) 280 register pid_t pid; 281 { 282 register struct proc *p; 283 284 sx_slock(&allproc_lock); 285 LIST_FOREACH(p, PIDHASH(pid), p_hash) 286 if (p->p_pid == pid) { 287 if (p->p_state == PRS_NEW) { 288 p = NULL; 289 break; 290 } 291 PROC_LOCK(p); 292 break; 293 } 294 sx_sunlock(&allproc_lock); 295 return (p); 296 } 297 298 /* 299 * Locate a process group by number. 300 * The caller must hold proctree_lock. 301 */ 302 struct pgrp * 303 pgfind(pgid) 304 register pid_t pgid; 305 { 306 register struct pgrp *pgrp; 307 308 sx_assert(&proctree_lock, SX_LOCKED); 309 310 LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) { 311 if (pgrp->pg_id == pgid) { 312 PGRP_LOCK(pgrp); 313 return (pgrp); 314 } 315 } 316 return (NULL); 317 } 318 319 /* 320 * Create a new process group. 321 * pgid must be equal to the pid of p. 322 * Begin a new session if required. 323 */ 324 int 325 enterpgrp(p, pgid, pgrp, sess) 326 register struct proc *p; 327 pid_t pgid; 328 struct pgrp *pgrp; 329 struct session *sess; 330 { 331 struct pgrp *pgrp2; 332 333 sx_assert(&proctree_lock, SX_XLOCKED); 334 335 KASSERT(pgrp != NULL, ("enterpgrp: pgrp == NULL")); 336 KASSERT(p->p_pid == pgid, 337 ("enterpgrp: new pgrp and pid != pgid")); 338 339 pgrp2 = pgfind(pgid); 340 341 KASSERT(pgrp2 == NULL, 342 ("enterpgrp: pgrp with pgid exists")); 343 KASSERT(!SESS_LEADER(p), 344 ("enterpgrp: session leader attempted setpgrp")); 345 346 mtx_init(&pgrp->pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK); 347 348 if (sess != NULL) { 349 /* 350 * new session 351 */ 352 mtx_init(&sess->s_mtx, "session", NULL, MTX_DEF); 353 PROC_LOCK(p); 354 p->p_flag &= ~P_CONTROLT; 355 PROC_UNLOCK(p); 356 PGRP_LOCK(pgrp); 357 sess->s_leader = p; 358 sess->s_sid = p->p_pid; 359 refcount_init(&sess->s_count, 1); 360 sess->s_ttyvp = NULL; 361 sess->s_ttyp = NULL; 362 bcopy(p->p_session->s_login, sess->s_login, 363 sizeof(sess->s_login)); 364 pgrp->pg_session = sess; 365 KASSERT(p == curproc, 366 ("enterpgrp: mksession and p != curproc")); 367 } else { 368 pgrp->pg_session = p->p_session; 369 sess_hold(pgrp->pg_session); 370 PGRP_LOCK(pgrp); 371 } 372 pgrp->pg_id = pgid; 373 LIST_INIT(&pgrp->pg_members); 374 375 /* 376 * As we have an exclusive lock of proctree_lock, 377 * this should not deadlock. 378 */ 379 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash); 380 pgrp->pg_jobc = 0; 381 SLIST_INIT(&pgrp->pg_sigiolst); 382 PGRP_UNLOCK(pgrp); 383 384 doenterpgrp(p, pgrp); 385 386 return (0); 387 } 388 389 /* 390 * Move p to an existing process group 391 */ 392 int 393 enterthispgrp(p, pgrp) 394 register struct proc *p; 395 struct pgrp *pgrp; 396 { 397 398 sx_assert(&proctree_lock, SX_XLOCKED); 399 PROC_LOCK_ASSERT(p, MA_NOTOWNED); 400 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED); 401 PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED); 402 SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED); 403 KASSERT(pgrp->pg_session == p->p_session, 404 ("%s: pgrp's session %p, p->p_session %p.\n", 405 __func__, 406 pgrp->pg_session, 407 p->p_session)); 408 KASSERT(pgrp != p->p_pgrp, 409 ("%s: p belongs to pgrp.", __func__)); 410 411 doenterpgrp(p, pgrp); 412 413 return (0); 414 } 415 416 /* 417 * Move p to a process group 418 */ 419 static void 420 doenterpgrp(p, pgrp) 421 struct proc *p; 422 struct pgrp *pgrp; 423 { 424 struct pgrp *savepgrp; 425 426 sx_assert(&proctree_lock, SX_XLOCKED); 427 PROC_LOCK_ASSERT(p, MA_NOTOWNED); 428 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED); 429 PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED); 430 SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED); 431 432 savepgrp = p->p_pgrp; 433 434 /* 435 * Adjust eligibility of affected pgrps to participate in job control. 436 * Increment eligibility counts before decrementing, otherwise we 437 * could reach 0 spuriously during the first call. 438 */ 439 fixjobc(p, pgrp, 1); 440 fixjobc(p, p->p_pgrp, 0); 441 442 PGRP_LOCK(pgrp); 443 PGRP_LOCK(savepgrp); 444 PROC_LOCK(p); 445 LIST_REMOVE(p, p_pglist); 446 p->p_pgrp = pgrp; 447 PROC_UNLOCK(p); 448 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist); 449 PGRP_UNLOCK(savepgrp); 450 PGRP_UNLOCK(pgrp); 451 if (LIST_EMPTY(&savepgrp->pg_members)) 452 pgdelete(savepgrp); 453 } 454 455 /* 456 * remove process from process group 457 */ 458 int 459 leavepgrp(p) 460 register struct proc *p; 461 { 462 struct pgrp *savepgrp; 463 464 sx_assert(&proctree_lock, SX_XLOCKED); 465 savepgrp = p->p_pgrp; 466 PGRP_LOCK(savepgrp); 467 PROC_LOCK(p); 468 LIST_REMOVE(p, p_pglist); 469 p->p_pgrp = NULL; 470 PROC_UNLOCK(p); 471 PGRP_UNLOCK(savepgrp); 472 if (LIST_EMPTY(&savepgrp->pg_members)) 473 pgdelete(savepgrp); 474 return (0); 475 } 476 477 /* 478 * delete a process group 479 */ 480 static void 481 pgdelete(pgrp) 482 register struct pgrp *pgrp; 483 { 484 struct session *savesess; 485 struct tty *tp; 486 487 sx_assert(&proctree_lock, SX_XLOCKED); 488 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED); 489 SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED); 490 491 /* 492 * Reset any sigio structures pointing to us as a result of 493 * F_SETOWN with our pgid. 494 */ 495 funsetownlst(&pgrp->pg_sigiolst); 496 497 PGRP_LOCK(pgrp); 498 tp = pgrp->pg_session->s_ttyp; 499 LIST_REMOVE(pgrp, pg_hash); 500 savesess = pgrp->pg_session; 501 PGRP_UNLOCK(pgrp); 502 503 /* Remove the reference to the pgrp before deallocating it. */ 504 if (tp != NULL) { 505 tty_lock(tp); 506 tty_rel_pgrp(tp, pgrp); 507 } 508 509 mtx_destroy(&pgrp->pg_mtx); 510 free(pgrp, M_PGRP); 511 sess_release(savesess); 512 } 513 514 static void 515 pgadjustjobc(pgrp, entering) 516 struct pgrp *pgrp; 517 int entering; 518 { 519 520 PGRP_LOCK(pgrp); 521 if (entering) 522 pgrp->pg_jobc++; 523 else { 524 --pgrp->pg_jobc; 525 if (pgrp->pg_jobc == 0) 526 orphanpg(pgrp); 527 } 528 PGRP_UNLOCK(pgrp); 529 } 530 531 /* 532 * Adjust pgrp jobc counters when specified process changes process group. 533 * We count the number of processes in each process group that "qualify" 534 * the group for terminal job control (those with a parent in a different 535 * process group of the same session). If that count reaches zero, the 536 * process group becomes orphaned. Check both the specified process' 537 * process group and that of its children. 538 * entering == 0 => p is leaving specified group. 539 * entering == 1 => p is entering specified group. 540 */ 541 void 542 fixjobc(p, pgrp, entering) 543 register struct proc *p; 544 register struct pgrp *pgrp; 545 int entering; 546 { 547 register struct pgrp *hispgrp; 548 register struct session *mysession; 549 550 sx_assert(&proctree_lock, SX_LOCKED); 551 PROC_LOCK_ASSERT(p, MA_NOTOWNED); 552 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED); 553 SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED); 554 555 /* 556 * Check p's parent to see whether p qualifies its own process 557 * group; if so, adjust count for p's process group. 558 */ 559 mysession = pgrp->pg_session; 560 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp && 561 hispgrp->pg_session == mysession) 562 pgadjustjobc(pgrp, entering); 563 564 /* 565 * Check this process' children to see whether they qualify 566 * their process groups; if so, adjust counts for children's 567 * process groups. 568 */ 569 LIST_FOREACH(p, &p->p_children, p_sibling) { 570 hispgrp = p->p_pgrp; 571 if (hispgrp == pgrp || 572 hispgrp->pg_session != mysession) 573 continue; 574 PROC_LOCK(p); 575 if (p->p_state == PRS_ZOMBIE) { 576 PROC_UNLOCK(p); 577 continue; 578 } 579 PROC_UNLOCK(p); 580 pgadjustjobc(hispgrp, entering); 581 } 582 } 583 584 /* 585 * A process group has become orphaned; 586 * if there are any stopped processes in the group, 587 * hang-up all process in that group. 588 */ 589 static void 590 orphanpg(pg) 591 struct pgrp *pg; 592 { 593 register struct proc *p; 594 595 PGRP_LOCK_ASSERT(pg, MA_OWNED); 596 597 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 598 PROC_LOCK(p); 599 if (P_SHOULDSTOP(p)) { 600 PROC_UNLOCK(p); 601 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 602 PROC_LOCK(p); 603 psignal(p, SIGHUP); 604 psignal(p, SIGCONT); 605 PROC_UNLOCK(p); 606 } 607 return; 608 } 609 PROC_UNLOCK(p); 610 } 611 } 612 613 void 614 sess_hold(struct session *s) 615 { 616 617 refcount_acquire(&s->s_count); 618 } 619 620 void 621 sess_release(struct session *s) 622 { 623 624 if (refcount_release(&s->s_count)) { 625 if (s->s_ttyp != NULL) { 626 tty_lock(s->s_ttyp); 627 tty_rel_sess(s->s_ttyp, s); 628 } 629 mtx_destroy(&s->s_mtx); 630 free(s, M_SESSION); 631 } 632 } 633 634 #include "opt_ddb.h" 635 #ifdef DDB 636 #include <ddb/ddb.h> 637 638 DB_SHOW_COMMAND(pgrpdump, pgrpdump) 639 { 640 register struct pgrp *pgrp; 641 register struct proc *p; 642 register int i; 643 644 for (i = 0; i <= pgrphash; i++) { 645 if (!LIST_EMPTY(&pgrphashtbl[i])) { 646 printf("\tindx %d\n", i); 647 LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) { 648 printf( 649 "\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n", 650 (void *)pgrp, (long)pgrp->pg_id, 651 (void *)pgrp->pg_session, 652 pgrp->pg_session->s_count, 653 (void *)LIST_FIRST(&pgrp->pg_members)); 654 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 655 printf("\t\tpid %ld addr %p pgrp %p\n", 656 (long)p->p_pid, (void *)p, 657 (void *)p->p_pgrp); 658 } 659 } 660 } 661 } 662 } 663 #endif /* DDB */ 664 665 /* 666 * Calculate the kinfo_proc members which contain process-wide 667 * informations. 668 * Must be called with the target process locked. 669 */ 670 static void 671 fill_kinfo_aggregate(struct proc *p, struct kinfo_proc *kp) 672 { 673 struct thread *td; 674 675 PROC_LOCK_ASSERT(p, MA_OWNED); 676 677 kp->ki_estcpu = 0; 678 kp->ki_pctcpu = 0; 679 kp->ki_runtime = 0; 680 FOREACH_THREAD_IN_PROC(p, td) { 681 thread_lock(td); 682 kp->ki_pctcpu += sched_pctcpu(td); 683 kp->ki_runtime += cputick2usec(td->td_runtime); 684 kp->ki_estcpu += td->td_estcpu; 685 thread_unlock(td); 686 } 687 } 688 689 /* 690 * Clear kinfo_proc and fill in any information that is common 691 * to all threads in the process. 692 * Must be called with the target process locked. 693 */ 694 static void 695 fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp) 696 { 697 struct thread *td0; 698 struct tty *tp; 699 struct session *sp; 700 struct ucred *cred; 701 struct sigacts *ps; 702 703 PROC_LOCK_ASSERT(p, MA_OWNED); 704 bzero(kp, sizeof(*kp)); 705 706 kp->ki_structsize = sizeof(*kp); 707 kp->ki_paddr = p; 708 kp->ki_addr =/* p->p_addr; */0; /* XXX */ 709 kp->ki_args = p->p_args; 710 kp->ki_textvp = p->p_textvp; 711 #ifdef KTRACE 712 kp->ki_tracep = p->p_tracevp; 713 mtx_lock(&ktrace_mtx); 714 kp->ki_traceflag = p->p_traceflag; 715 mtx_unlock(&ktrace_mtx); 716 #endif 717 kp->ki_fd = p->p_fd; 718 kp->ki_vmspace = p->p_vmspace; 719 kp->ki_flag = p->p_flag; 720 cred = p->p_ucred; 721 if (cred) { 722 kp->ki_uid = cred->cr_uid; 723 kp->ki_ruid = cred->cr_ruid; 724 kp->ki_svuid = cred->cr_svuid; 725 kp->ki_cr_flags = cred->cr_flags; 726 /* XXX bde doesn't like KI_NGROUPS */ 727 if (cred->cr_ngroups > KI_NGROUPS) { 728 kp->ki_ngroups = KI_NGROUPS; 729 kp->ki_cr_flags |= KI_CRF_GRP_OVERFLOW; 730 } else 731 kp->ki_ngroups = cred->cr_ngroups; 732 bcopy(cred->cr_groups, kp->ki_groups, 733 kp->ki_ngroups * sizeof(gid_t)); 734 kp->ki_rgid = cred->cr_rgid; 735 kp->ki_svgid = cred->cr_svgid; 736 /* If jailed(cred), emulate the old P_JAILED flag. */ 737 if (jailed(cred)) { 738 kp->ki_flag |= P_JAILED; 739 /* If inside the jail, use 0 as a jail ID. */ 740 if (cred->cr_prison != curthread->td_ucred->cr_prison) 741 kp->ki_jid = cred->cr_prison->pr_id; 742 } 743 } 744 ps = p->p_sigacts; 745 if (ps) { 746 mtx_lock(&ps->ps_mtx); 747 kp->ki_sigignore = ps->ps_sigignore; 748 kp->ki_sigcatch = ps->ps_sigcatch; 749 mtx_unlock(&ps->ps_mtx); 750 } 751 PROC_SLOCK(p); 752 if (p->p_state != PRS_NEW && 753 p->p_state != PRS_ZOMBIE && 754 p->p_vmspace != NULL) { 755 struct vmspace *vm = p->p_vmspace; 756 757 kp->ki_size = vm->vm_map.size; 758 kp->ki_rssize = vmspace_resident_count(vm); /*XXX*/ 759 FOREACH_THREAD_IN_PROC(p, td0) { 760 if (!TD_IS_SWAPPED(td0)) 761 kp->ki_rssize += td0->td_kstack_pages; 762 } 763 kp->ki_swrss = vm->vm_swrss; 764 kp->ki_tsize = vm->vm_tsize; 765 kp->ki_dsize = vm->vm_dsize; 766 kp->ki_ssize = vm->vm_ssize; 767 } else if (p->p_state == PRS_ZOMBIE) 768 kp->ki_stat = SZOMB; 769 if (kp->ki_flag & P_INMEM) 770 kp->ki_sflag = PS_INMEM; 771 else 772 kp->ki_sflag = 0; 773 /* Calculate legacy swtime as seconds since 'swtick'. */ 774 kp->ki_swtime = (ticks - p->p_swtick) / hz; 775 kp->ki_pid = p->p_pid; 776 kp->ki_nice = p->p_nice; 777 rufetch(p, &kp->ki_rusage); 778 kp->ki_runtime = cputick2usec(p->p_rux.rux_runtime); 779 PROC_SUNLOCK(p); 780 if ((p->p_flag & P_INMEM) && p->p_stats != NULL) { 781 kp->ki_start = p->p_stats->p_start; 782 timevaladd(&kp->ki_start, &boottime); 783 PROC_SLOCK(p); 784 calcru(p, &kp->ki_rusage.ru_utime, &kp->ki_rusage.ru_stime); 785 PROC_SUNLOCK(p); 786 calccru(p, &kp->ki_childutime, &kp->ki_childstime); 787 788 /* Some callers want child-times in a single value */ 789 kp->ki_childtime = kp->ki_childstime; 790 timevaladd(&kp->ki_childtime, &kp->ki_childutime); 791 } 792 tp = NULL; 793 if (p->p_pgrp) { 794 kp->ki_pgid = p->p_pgrp->pg_id; 795 kp->ki_jobc = p->p_pgrp->pg_jobc; 796 sp = p->p_pgrp->pg_session; 797 798 if (sp != NULL) { 799 kp->ki_sid = sp->s_sid; 800 SESS_LOCK(sp); 801 strlcpy(kp->ki_login, sp->s_login, 802 sizeof(kp->ki_login)); 803 if (sp->s_ttyvp) 804 kp->ki_kiflag |= KI_CTTY; 805 if (SESS_LEADER(p)) 806 kp->ki_kiflag |= KI_SLEADER; 807 /* XXX proctree_lock */ 808 tp = sp->s_ttyp; 809 SESS_UNLOCK(sp); 810 } 811 } 812 if ((p->p_flag & P_CONTROLT) && tp != NULL) { 813 kp->ki_tdev = tty_udev(tp); 814 kp->ki_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID; 815 if (tp->t_session) 816 kp->ki_tsid = tp->t_session->s_sid; 817 } else 818 kp->ki_tdev = NODEV; 819 if (p->p_comm[0] != '\0') 820 strlcpy(kp->ki_comm, p->p_comm, sizeof(kp->ki_comm)); 821 if (p->p_sysent && p->p_sysent->sv_name != NULL && 822 p->p_sysent->sv_name[0] != '\0') 823 strlcpy(kp->ki_emul, p->p_sysent->sv_name, sizeof(kp->ki_emul)); 824 kp->ki_siglist = p->p_siglist; 825 kp->ki_xstat = p->p_xstat; 826 kp->ki_acflag = p->p_acflag; 827 kp->ki_lock = p->p_lock; 828 if (p->p_pptr) 829 kp->ki_ppid = p->p_pptr->p_pid; 830 } 831 832 /* 833 * Fill in information that is thread specific. Must be called with p_slock 834 * locked. If 'preferthread' is set, overwrite certain process-related 835 * fields that are maintained for both threads and processes. 836 */ 837 static void 838 fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp, int preferthread) 839 { 840 struct proc *p; 841 842 p = td->td_proc; 843 PROC_LOCK_ASSERT(p, MA_OWNED); 844 845 thread_lock(td); 846 if (td->td_wmesg != NULL) 847 strlcpy(kp->ki_wmesg, td->td_wmesg, sizeof(kp->ki_wmesg)); 848 else 849 bzero(kp->ki_wmesg, sizeof(kp->ki_wmesg)); 850 if (td->td_name[0] != '\0') 851 strlcpy(kp->ki_ocomm, td->td_name, sizeof(kp->ki_ocomm)); 852 if (TD_ON_LOCK(td)) { 853 kp->ki_kiflag |= KI_LOCKBLOCK; 854 strlcpy(kp->ki_lockname, td->td_lockname, 855 sizeof(kp->ki_lockname)); 856 } else { 857 kp->ki_kiflag &= ~KI_LOCKBLOCK; 858 bzero(kp->ki_lockname, sizeof(kp->ki_lockname)); 859 } 860 861 if (p->p_state == PRS_NORMAL) { /* approximate. */ 862 if (TD_ON_RUNQ(td) || 863 TD_CAN_RUN(td) || 864 TD_IS_RUNNING(td)) { 865 kp->ki_stat = SRUN; 866 } else if (P_SHOULDSTOP(p)) { 867 kp->ki_stat = SSTOP; 868 } else if (TD_IS_SLEEPING(td)) { 869 kp->ki_stat = SSLEEP; 870 } else if (TD_ON_LOCK(td)) { 871 kp->ki_stat = SLOCK; 872 } else { 873 kp->ki_stat = SWAIT; 874 } 875 } else if (p->p_state == PRS_ZOMBIE) { 876 kp->ki_stat = SZOMB; 877 } else { 878 kp->ki_stat = SIDL; 879 } 880 881 /* Things in the thread */ 882 kp->ki_wchan = td->td_wchan; 883 kp->ki_pri.pri_level = td->td_priority; 884 kp->ki_pri.pri_native = td->td_base_pri; 885 kp->ki_lastcpu = td->td_lastcpu; 886 kp->ki_oncpu = td->td_oncpu; 887 kp->ki_tdflags = td->td_flags; 888 kp->ki_tid = td->td_tid; 889 kp->ki_numthreads = p->p_numthreads; 890 kp->ki_pcb = td->td_pcb; 891 kp->ki_kstack = (void *)td->td_kstack; 892 kp->ki_slptime = (ticks - td->td_slptick) / hz; 893 kp->ki_pri.pri_class = td->td_pri_class; 894 kp->ki_pri.pri_user = td->td_user_pri; 895 896 if (preferthread) { 897 kp->ki_runtime = cputick2usec(td->td_runtime); 898 kp->ki_pctcpu = sched_pctcpu(td); 899 kp->ki_estcpu = td->td_estcpu; 900 } 901 902 /* We can't get this anymore but ps etc never used it anyway. */ 903 kp->ki_rqindex = 0; 904 905 SIGSETOR(kp->ki_siglist, td->td_siglist); 906 kp->ki_sigmask = td->td_sigmask; 907 thread_unlock(td); 908 } 909 910 /* 911 * Fill in a kinfo_proc structure for the specified process. 912 * Must be called with the target process locked. 913 */ 914 void 915 fill_kinfo_proc(struct proc *p, struct kinfo_proc *kp) 916 { 917 918 MPASS(FIRST_THREAD_IN_PROC(p) != NULL); 919 920 fill_kinfo_proc_only(p, kp); 921 fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), kp, 0); 922 fill_kinfo_aggregate(p, kp); 923 } 924 925 struct pstats * 926 pstats_alloc(void) 927 { 928 929 return (malloc(sizeof(struct pstats), M_SUBPROC, M_ZERO|M_WAITOK)); 930 } 931 932 /* 933 * Copy parts of p_stats; zero the rest of p_stats (statistics). 934 */ 935 void 936 pstats_fork(struct pstats *src, struct pstats *dst) 937 { 938 939 bzero(&dst->pstat_startzero, 940 __rangeof(struct pstats, pstat_startzero, pstat_endzero)); 941 bcopy(&src->pstat_startcopy, &dst->pstat_startcopy, 942 __rangeof(struct pstats, pstat_startcopy, pstat_endcopy)); 943 } 944 945 void 946 pstats_free(struct pstats *ps) 947 { 948 949 free(ps, M_SUBPROC); 950 } 951 952 /* 953 * Locate a zombie process by number 954 */ 955 struct proc * 956 zpfind(pid_t pid) 957 { 958 struct proc *p; 959 960 sx_slock(&allproc_lock); 961 LIST_FOREACH(p, &zombproc, p_list) 962 if (p->p_pid == pid) { 963 PROC_LOCK(p); 964 break; 965 } 966 sx_sunlock(&allproc_lock); 967 return (p); 968 } 969 970 #define KERN_PROC_ZOMBMASK 0x3 971 #define KERN_PROC_NOTHREADS 0x4 972 973 /* 974 * Must be called with the process locked and will return with it unlocked. 975 */ 976 static int 977 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags) 978 { 979 struct thread *td; 980 struct kinfo_proc kinfo_proc; 981 int error = 0; 982 struct proc *np; 983 pid_t pid = p->p_pid; 984 985 PROC_LOCK_ASSERT(p, MA_OWNED); 986 MPASS(FIRST_THREAD_IN_PROC(p) != NULL); 987 988 fill_kinfo_proc(p, &kinfo_proc); 989 if (flags & KERN_PROC_NOTHREADS) 990 error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc, 991 sizeof(kinfo_proc)); 992 else { 993 FOREACH_THREAD_IN_PROC(p, td) { 994 fill_kinfo_thread(td, &kinfo_proc, 1); 995 error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc, 996 sizeof(kinfo_proc)); 997 if (error) 998 break; 999 } 1000 } 1001 PROC_UNLOCK(p); 1002 if (error) 1003 return (error); 1004 if (flags & KERN_PROC_ZOMBMASK) 1005 np = zpfind(pid); 1006 else { 1007 if (pid == 0) 1008 return (0); 1009 np = pfind(pid); 1010 } 1011 if (np == NULL) 1012 return (ESRCH); 1013 if (np != p) { 1014 PROC_UNLOCK(np); 1015 return (ESRCH); 1016 } 1017 PROC_UNLOCK(np); 1018 return (0); 1019 } 1020 1021 static int 1022 sysctl_kern_proc(SYSCTL_HANDLER_ARGS) 1023 { 1024 int *name = (int*) arg1; 1025 u_int namelen = arg2; 1026 struct proc *p; 1027 int flags, doingzomb, oid_number; 1028 int error = 0; 1029 1030 oid_number = oidp->oid_number; 1031 if (oid_number != KERN_PROC_ALL && 1032 (oid_number & KERN_PROC_INC_THREAD) == 0) 1033 flags = KERN_PROC_NOTHREADS; 1034 else { 1035 flags = 0; 1036 oid_number &= ~KERN_PROC_INC_THREAD; 1037 } 1038 if (oid_number == KERN_PROC_PID) { 1039 if (namelen != 1) 1040 return (EINVAL); 1041 error = sysctl_wire_old_buffer(req, 0); 1042 if (error) 1043 return (error); 1044 p = pfind((pid_t)name[0]); 1045 if (!p) 1046 return (ESRCH); 1047 if ((error = p_cansee(curthread, p))) { 1048 PROC_UNLOCK(p); 1049 return (error); 1050 } 1051 error = sysctl_out_proc(p, req, flags); 1052 return (error); 1053 } 1054 1055 switch (oid_number) { 1056 case KERN_PROC_ALL: 1057 if (namelen != 0) 1058 return (EINVAL); 1059 break; 1060 case KERN_PROC_PROC: 1061 if (namelen != 0 && namelen != 1) 1062 return (EINVAL); 1063 break; 1064 default: 1065 if (namelen != 1) 1066 return (EINVAL); 1067 break; 1068 } 1069 1070 if (!req->oldptr) { 1071 /* overestimate by 5 procs */ 1072 error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5); 1073 if (error) 1074 return (error); 1075 } 1076 error = sysctl_wire_old_buffer(req, 0); 1077 if (error != 0) 1078 return (error); 1079 sx_slock(&allproc_lock); 1080 for (doingzomb=0 ; doingzomb < 2 ; doingzomb++) { 1081 if (!doingzomb) 1082 p = LIST_FIRST(&allproc); 1083 else 1084 p = LIST_FIRST(&zombproc); 1085 for (; p != 0; p = LIST_NEXT(p, p_list)) { 1086 /* 1087 * Skip embryonic processes. 1088 */ 1089 PROC_SLOCK(p); 1090 if (p->p_state == PRS_NEW) { 1091 PROC_SUNLOCK(p); 1092 continue; 1093 } 1094 PROC_SUNLOCK(p); 1095 PROC_LOCK(p); 1096 KASSERT(p->p_ucred != NULL, 1097 ("process credential is NULL for non-NEW proc")); 1098 /* 1099 * Show a user only appropriate processes. 1100 */ 1101 if (p_cansee(curthread, p)) { 1102 PROC_UNLOCK(p); 1103 continue; 1104 } 1105 /* 1106 * TODO - make more efficient (see notes below). 1107 * do by session. 1108 */ 1109 switch (oid_number) { 1110 1111 case KERN_PROC_GID: 1112 if (p->p_ucred->cr_gid != (gid_t)name[0]) { 1113 PROC_UNLOCK(p); 1114 continue; 1115 } 1116 break; 1117 1118 case KERN_PROC_PGRP: 1119 /* could do this by traversing pgrp */ 1120 if (p->p_pgrp == NULL || 1121 p->p_pgrp->pg_id != (pid_t)name[0]) { 1122 PROC_UNLOCK(p); 1123 continue; 1124 } 1125 break; 1126 1127 case KERN_PROC_RGID: 1128 if (p->p_ucred->cr_rgid != (gid_t)name[0]) { 1129 PROC_UNLOCK(p); 1130 continue; 1131 } 1132 break; 1133 1134 case KERN_PROC_SESSION: 1135 if (p->p_session == NULL || 1136 p->p_session->s_sid != (pid_t)name[0]) { 1137 PROC_UNLOCK(p); 1138 continue; 1139 } 1140 break; 1141 1142 case KERN_PROC_TTY: 1143 if ((p->p_flag & P_CONTROLT) == 0 || 1144 p->p_session == NULL) { 1145 PROC_UNLOCK(p); 1146 continue; 1147 } 1148 /* XXX proctree_lock */ 1149 SESS_LOCK(p->p_session); 1150 if (p->p_session->s_ttyp == NULL || 1151 tty_udev(p->p_session->s_ttyp) != 1152 (dev_t)name[0]) { 1153 SESS_UNLOCK(p->p_session); 1154 PROC_UNLOCK(p); 1155 continue; 1156 } 1157 SESS_UNLOCK(p->p_session); 1158 break; 1159 1160 case KERN_PROC_UID: 1161 if (p->p_ucred->cr_uid != (uid_t)name[0]) { 1162 PROC_UNLOCK(p); 1163 continue; 1164 } 1165 break; 1166 1167 case KERN_PROC_RUID: 1168 if (p->p_ucred->cr_ruid != (uid_t)name[0]) { 1169 PROC_UNLOCK(p); 1170 continue; 1171 } 1172 break; 1173 1174 case KERN_PROC_PROC: 1175 break; 1176 1177 default: 1178 break; 1179 1180 } 1181 1182 error = sysctl_out_proc(p, req, flags | doingzomb); 1183 if (error) { 1184 sx_sunlock(&allproc_lock); 1185 return (error); 1186 } 1187 } 1188 } 1189 sx_sunlock(&allproc_lock); 1190 return (0); 1191 } 1192 1193 struct pargs * 1194 pargs_alloc(int len) 1195 { 1196 struct pargs *pa; 1197 1198 pa = malloc(sizeof(struct pargs) + len, M_PARGS, 1199 M_WAITOK); 1200 refcount_init(&pa->ar_ref, 1); 1201 pa->ar_length = len; 1202 return (pa); 1203 } 1204 1205 static void 1206 pargs_free(struct pargs *pa) 1207 { 1208 1209 free(pa, M_PARGS); 1210 } 1211 1212 void 1213 pargs_hold(struct pargs *pa) 1214 { 1215 1216 if (pa == NULL) 1217 return; 1218 refcount_acquire(&pa->ar_ref); 1219 } 1220 1221 void 1222 pargs_drop(struct pargs *pa) 1223 { 1224 1225 if (pa == NULL) 1226 return; 1227 if (refcount_release(&pa->ar_ref)) 1228 pargs_free(pa); 1229 } 1230 1231 /* 1232 * This sysctl allows a process to retrieve the argument list or process 1233 * title for another process without groping around in the address space 1234 * of the other process. It also allow a process to set its own "process 1235 * title to a string of its own choice. 1236 */ 1237 static int 1238 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS) 1239 { 1240 int *name = (int*) arg1; 1241 u_int namelen = arg2; 1242 struct pargs *newpa, *pa; 1243 struct proc *p; 1244 int error = 0; 1245 1246 if (namelen != 1) 1247 return (EINVAL); 1248 1249 p = pfind((pid_t)name[0]); 1250 if (!p) 1251 return (ESRCH); 1252 1253 if ((error = p_cansee(curthread, p)) != 0) { 1254 PROC_UNLOCK(p); 1255 return (error); 1256 } 1257 1258 if (req->newptr && curproc != p) { 1259 PROC_UNLOCK(p); 1260 return (EPERM); 1261 } 1262 1263 pa = p->p_args; 1264 pargs_hold(pa); 1265 PROC_UNLOCK(p); 1266 if (req->oldptr != NULL && pa != NULL) 1267 error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length); 1268 pargs_drop(pa); 1269 if (error != 0 || req->newptr == NULL) 1270 return (error); 1271 1272 if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit) 1273 return (ENOMEM); 1274 newpa = pargs_alloc(req->newlen); 1275 error = SYSCTL_IN(req, newpa->ar_args, req->newlen); 1276 if (error != 0) { 1277 pargs_free(newpa); 1278 return (error); 1279 } 1280 PROC_LOCK(p); 1281 pa = p->p_args; 1282 p->p_args = newpa; 1283 PROC_UNLOCK(p); 1284 pargs_drop(pa); 1285 return (0); 1286 } 1287 1288 /* 1289 * This sysctl allows a process to retrieve the path of the executable for 1290 * itself or another process. 1291 */ 1292 static int 1293 sysctl_kern_proc_pathname(SYSCTL_HANDLER_ARGS) 1294 { 1295 pid_t *pidp = (pid_t *)arg1; 1296 unsigned int arglen = arg2; 1297 struct proc *p; 1298 struct vnode *vp; 1299 char *retbuf, *freebuf; 1300 int error, vfslocked; 1301 1302 if (arglen != 1) 1303 return (EINVAL); 1304 if (*pidp == -1) { /* -1 means this process */ 1305 p = req->td->td_proc; 1306 } else { 1307 p = pfind(*pidp); 1308 if (p == NULL) 1309 return (ESRCH); 1310 if ((error = p_cansee(curthread, p)) != 0) { 1311 PROC_UNLOCK(p); 1312 return (error); 1313 } 1314 } 1315 1316 vp = p->p_textvp; 1317 if (vp == NULL) { 1318 if (*pidp != -1) 1319 PROC_UNLOCK(p); 1320 return (0); 1321 } 1322 vref(vp); 1323 if (*pidp != -1) 1324 PROC_UNLOCK(p); 1325 error = vn_fullpath(req->td, vp, &retbuf, &freebuf); 1326 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 1327 vrele(vp); 1328 VFS_UNLOCK_GIANT(vfslocked); 1329 if (error) 1330 return (error); 1331 error = SYSCTL_OUT(req, retbuf, strlen(retbuf) + 1); 1332 free(freebuf, M_TEMP); 1333 return (error); 1334 } 1335 1336 static int 1337 sysctl_kern_proc_sv_name(SYSCTL_HANDLER_ARGS) 1338 { 1339 struct proc *p; 1340 char *sv_name; 1341 int *name; 1342 int namelen; 1343 int error; 1344 1345 namelen = arg2; 1346 if (namelen != 1) 1347 return (EINVAL); 1348 1349 name = (int *)arg1; 1350 if ((p = pfind((pid_t)name[0])) == NULL) 1351 return (ESRCH); 1352 if ((error = p_cansee(curthread, p))) { 1353 PROC_UNLOCK(p); 1354 return (error); 1355 } 1356 sv_name = p->p_sysent->sv_name; 1357 PROC_UNLOCK(p); 1358 return (sysctl_handle_string(oidp, sv_name, 0, req)); 1359 } 1360 1361 #ifdef KINFO_OVMENTRY_SIZE 1362 CTASSERT(sizeof(struct kinfo_ovmentry) == KINFO_OVMENTRY_SIZE); 1363 #endif 1364 1365 #ifdef COMPAT_FREEBSD7 1366 static int 1367 sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS) 1368 { 1369 vm_map_entry_t entry, tmp_entry; 1370 unsigned int last_timestamp; 1371 char *fullpath, *freepath; 1372 struct kinfo_ovmentry *kve; 1373 struct vattr va; 1374 struct ucred *cred; 1375 int error, *name; 1376 struct vnode *vp; 1377 struct proc *p; 1378 vm_map_t map; 1379 struct vmspace *vm; 1380 1381 name = (int *)arg1; 1382 if ((p = pfind((pid_t)name[0])) == NULL) 1383 return (ESRCH); 1384 if (p->p_flag & P_WEXIT) { 1385 PROC_UNLOCK(p); 1386 return (ESRCH); 1387 } 1388 if ((error = p_candebug(curthread, p))) { 1389 PROC_UNLOCK(p); 1390 return (error); 1391 } 1392 _PHOLD(p); 1393 PROC_UNLOCK(p); 1394 vm = vmspace_acquire_ref(p); 1395 if (vm == NULL) { 1396 PRELE(p); 1397 return (ESRCH); 1398 } 1399 kve = malloc(sizeof(*kve), M_TEMP, M_WAITOK); 1400 1401 map = &p->p_vmspace->vm_map; /* XXXRW: More locking required? */ 1402 vm_map_lock_read(map); 1403 for (entry = map->header.next; entry != &map->header; 1404 entry = entry->next) { 1405 vm_object_t obj, tobj, lobj; 1406 vm_offset_t addr; 1407 int vfslocked; 1408 1409 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 1410 continue; 1411 1412 bzero(kve, sizeof(*kve)); 1413 kve->kve_structsize = sizeof(*kve); 1414 1415 kve->kve_private_resident = 0; 1416 obj = entry->object.vm_object; 1417 if (obj != NULL) { 1418 VM_OBJECT_LOCK(obj); 1419 if (obj->shadow_count == 1) 1420 kve->kve_private_resident = 1421 obj->resident_page_count; 1422 } 1423 kve->kve_resident = 0; 1424 addr = entry->start; 1425 while (addr < entry->end) { 1426 if (pmap_extract(map->pmap, addr)) 1427 kve->kve_resident++; 1428 addr += PAGE_SIZE; 1429 } 1430 1431 for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) { 1432 if (tobj != obj) 1433 VM_OBJECT_LOCK(tobj); 1434 if (lobj != obj) 1435 VM_OBJECT_UNLOCK(lobj); 1436 lobj = tobj; 1437 } 1438 1439 kve->kve_start = (void*)entry->start; 1440 kve->kve_end = (void*)entry->end; 1441 kve->kve_offset = (off_t)entry->offset; 1442 1443 if (entry->protection & VM_PROT_READ) 1444 kve->kve_protection |= KVME_PROT_READ; 1445 if (entry->protection & VM_PROT_WRITE) 1446 kve->kve_protection |= KVME_PROT_WRITE; 1447 if (entry->protection & VM_PROT_EXECUTE) 1448 kve->kve_protection |= KVME_PROT_EXEC; 1449 1450 if (entry->eflags & MAP_ENTRY_COW) 1451 kve->kve_flags |= KVME_FLAG_COW; 1452 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) 1453 kve->kve_flags |= KVME_FLAG_NEEDS_COPY; 1454 1455 last_timestamp = map->timestamp; 1456 vm_map_unlock_read(map); 1457 1458 kve->kve_fileid = 0; 1459 kve->kve_fsid = 0; 1460 freepath = NULL; 1461 fullpath = ""; 1462 if (lobj) { 1463 vp = NULL; 1464 switch (lobj->type) { 1465 case OBJT_DEFAULT: 1466 kve->kve_type = KVME_TYPE_DEFAULT; 1467 break; 1468 case OBJT_VNODE: 1469 kve->kve_type = KVME_TYPE_VNODE; 1470 vp = lobj->handle; 1471 vref(vp); 1472 break; 1473 case OBJT_SWAP: 1474 kve->kve_type = KVME_TYPE_SWAP; 1475 break; 1476 case OBJT_DEVICE: 1477 kve->kve_type = KVME_TYPE_DEVICE; 1478 break; 1479 case OBJT_PHYS: 1480 kve->kve_type = KVME_TYPE_PHYS; 1481 break; 1482 case OBJT_DEAD: 1483 kve->kve_type = KVME_TYPE_DEAD; 1484 break; 1485 case OBJT_SG: 1486 kve->kve_type = KVME_TYPE_SG; 1487 break; 1488 default: 1489 kve->kve_type = KVME_TYPE_UNKNOWN; 1490 break; 1491 } 1492 if (lobj != obj) 1493 VM_OBJECT_UNLOCK(lobj); 1494 1495 kve->kve_ref_count = obj->ref_count; 1496 kve->kve_shadow_count = obj->shadow_count; 1497 VM_OBJECT_UNLOCK(obj); 1498 if (vp != NULL) { 1499 vn_fullpath(curthread, vp, &fullpath, 1500 &freepath); 1501 cred = curthread->td_ucred; 1502 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 1503 vn_lock(vp, LK_SHARED | LK_RETRY); 1504 if (VOP_GETATTR(vp, &va, cred) == 0) { 1505 kve->kve_fileid = va.va_fileid; 1506 kve->kve_fsid = va.va_fsid; 1507 } 1508 vput(vp); 1509 VFS_UNLOCK_GIANT(vfslocked); 1510 } 1511 } else { 1512 kve->kve_type = KVME_TYPE_NONE; 1513 kve->kve_ref_count = 0; 1514 kve->kve_shadow_count = 0; 1515 } 1516 1517 strlcpy(kve->kve_path, fullpath, sizeof(kve->kve_path)); 1518 if (freepath != NULL) 1519 free(freepath, M_TEMP); 1520 1521 error = SYSCTL_OUT(req, kve, sizeof(*kve)); 1522 vm_map_lock_read(map); 1523 if (error) 1524 break; 1525 if (last_timestamp != map->timestamp) { 1526 vm_map_lookup_entry(map, addr - 1, &tmp_entry); 1527 entry = tmp_entry; 1528 } 1529 } 1530 vm_map_unlock_read(map); 1531 vmspace_free(vm); 1532 PRELE(p); 1533 free(kve, M_TEMP); 1534 return (error); 1535 } 1536 #endif /* COMPAT_FREEBSD7 */ 1537 1538 #ifdef KINFO_VMENTRY_SIZE 1539 CTASSERT(sizeof(struct kinfo_vmentry) == KINFO_VMENTRY_SIZE); 1540 #endif 1541 1542 static int 1543 sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS) 1544 { 1545 vm_map_entry_t entry, tmp_entry; 1546 unsigned int last_timestamp; 1547 char *fullpath, *freepath; 1548 struct kinfo_vmentry *kve; 1549 struct vattr va; 1550 struct ucred *cred; 1551 int error, *name; 1552 struct vnode *vp; 1553 struct proc *p; 1554 struct vmspace *vm; 1555 vm_map_t map; 1556 1557 name = (int *)arg1; 1558 if ((p = pfind((pid_t)name[0])) == NULL) 1559 return (ESRCH); 1560 if (p->p_flag & P_WEXIT) { 1561 PROC_UNLOCK(p); 1562 return (ESRCH); 1563 } 1564 if ((error = p_candebug(curthread, p))) { 1565 PROC_UNLOCK(p); 1566 return (error); 1567 } 1568 _PHOLD(p); 1569 PROC_UNLOCK(p); 1570 vm = vmspace_acquire_ref(p); 1571 if (vm == NULL) { 1572 PRELE(p); 1573 return (ESRCH); 1574 } 1575 kve = malloc(sizeof(*kve), M_TEMP, M_WAITOK); 1576 1577 map = &vm->vm_map; /* XXXRW: More locking required? */ 1578 vm_map_lock_read(map); 1579 for (entry = map->header.next; entry != &map->header; 1580 entry = entry->next) { 1581 vm_object_t obj, tobj, lobj; 1582 vm_offset_t addr; 1583 int vfslocked; 1584 1585 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 1586 continue; 1587 1588 bzero(kve, sizeof(*kve)); 1589 1590 kve->kve_private_resident = 0; 1591 obj = entry->object.vm_object; 1592 if (obj != NULL) { 1593 VM_OBJECT_LOCK(obj); 1594 if (obj->shadow_count == 1) 1595 kve->kve_private_resident = 1596 obj->resident_page_count; 1597 } 1598 kve->kve_resident = 0; 1599 addr = entry->start; 1600 while (addr < entry->end) { 1601 if (pmap_extract(map->pmap, addr)) 1602 kve->kve_resident++; 1603 addr += PAGE_SIZE; 1604 } 1605 1606 for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) { 1607 if (tobj != obj) 1608 VM_OBJECT_LOCK(tobj); 1609 if (lobj != obj) 1610 VM_OBJECT_UNLOCK(lobj); 1611 lobj = tobj; 1612 } 1613 1614 kve->kve_start = entry->start; 1615 kve->kve_end = entry->end; 1616 kve->kve_offset = entry->offset; 1617 1618 if (entry->protection & VM_PROT_READ) 1619 kve->kve_protection |= KVME_PROT_READ; 1620 if (entry->protection & VM_PROT_WRITE) 1621 kve->kve_protection |= KVME_PROT_WRITE; 1622 if (entry->protection & VM_PROT_EXECUTE) 1623 kve->kve_protection |= KVME_PROT_EXEC; 1624 1625 if (entry->eflags & MAP_ENTRY_COW) 1626 kve->kve_flags |= KVME_FLAG_COW; 1627 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) 1628 kve->kve_flags |= KVME_FLAG_NEEDS_COPY; 1629 1630 last_timestamp = map->timestamp; 1631 vm_map_unlock_read(map); 1632 1633 kve->kve_fileid = 0; 1634 kve->kve_fsid = 0; 1635 freepath = NULL; 1636 fullpath = ""; 1637 if (lobj) { 1638 vp = NULL; 1639 switch (lobj->type) { 1640 case OBJT_DEFAULT: 1641 kve->kve_type = KVME_TYPE_DEFAULT; 1642 break; 1643 case OBJT_VNODE: 1644 kve->kve_type = KVME_TYPE_VNODE; 1645 vp = lobj->handle; 1646 vref(vp); 1647 break; 1648 case OBJT_SWAP: 1649 kve->kve_type = KVME_TYPE_SWAP; 1650 break; 1651 case OBJT_DEVICE: 1652 kve->kve_type = KVME_TYPE_DEVICE; 1653 break; 1654 case OBJT_PHYS: 1655 kve->kve_type = KVME_TYPE_PHYS; 1656 break; 1657 case OBJT_DEAD: 1658 kve->kve_type = KVME_TYPE_DEAD; 1659 break; 1660 case OBJT_SG: 1661 kve->kve_type = KVME_TYPE_SG; 1662 break; 1663 default: 1664 kve->kve_type = KVME_TYPE_UNKNOWN; 1665 break; 1666 } 1667 if (lobj != obj) 1668 VM_OBJECT_UNLOCK(lobj); 1669 1670 kve->kve_ref_count = obj->ref_count; 1671 kve->kve_shadow_count = obj->shadow_count; 1672 VM_OBJECT_UNLOCK(obj); 1673 if (vp != NULL) { 1674 vn_fullpath(curthread, vp, &fullpath, 1675 &freepath); 1676 cred = curthread->td_ucred; 1677 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 1678 vn_lock(vp, LK_SHARED | LK_RETRY); 1679 if (VOP_GETATTR(vp, &va, cred) == 0) { 1680 kve->kve_fileid = va.va_fileid; 1681 kve->kve_fsid = va.va_fsid; 1682 } 1683 vput(vp); 1684 VFS_UNLOCK_GIANT(vfslocked); 1685 } 1686 } else { 1687 kve->kve_type = KVME_TYPE_NONE; 1688 kve->kve_ref_count = 0; 1689 kve->kve_shadow_count = 0; 1690 } 1691 1692 strlcpy(kve->kve_path, fullpath, sizeof(kve->kve_path)); 1693 if (freepath != NULL) 1694 free(freepath, M_TEMP); 1695 1696 /* Pack record size down */ 1697 kve->kve_structsize = offsetof(struct kinfo_vmentry, kve_path) + 1698 strlen(kve->kve_path) + 1; 1699 kve->kve_structsize = roundup(kve->kve_structsize, 1700 sizeof(uint64_t)); 1701 error = SYSCTL_OUT(req, kve, kve->kve_structsize); 1702 vm_map_lock_read(map); 1703 if (error) 1704 break; 1705 if (last_timestamp != map->timestamp) { 1706 vm_map_lookup_entry(map, addr - 1, &tmp_entry); 1707 entry = tmp_entry; 1708 } 1709 } 1710 vm_map_unlock_read(map); 1711 vmspace_free(vm); 1712 PRELE(p); 1713 free(kve, M_TEMP); 1714 return (error); 1715 } 1716 1717 #if defined(STACK) || defined(DDB) 1718 static int 1719 sysctl_kern_proc_kstack(SYSCTL_HANDLER_ARGS) 1720 { 1721 struct kinfo_kstack *kkstp; 1722 int error, i, *name, numthreads; 1723 lwpid_t *lwpidarray; 1724 struct thread *td; 1725 struct stack *st; 1726 struct sbuf sb; 1727 struct proc *p; 1728 1729 name = (int *)arg1; 1730 if ((p = pfind((pid_t)name[0])) == NULL) 1731 return (ESRCH); 1732 /* XXXRW: Not clear ESRCH is the right error during proc execve(). */ 1733 if (p->p_flag & P_WEXIT || p->p_flag & P_INEXEC) { 1734 PROC_UNLOCK(p); 1735 return (ESRCH); 1736 } 1737 if ((error = p_candebug(curthread, p))) { 1738 PROC_UNLOCK(p); 1739 return (error); 1740 } 1741 _PHOLD(p); 1742 PROC_UNLOCK(p); 1743 1744 kkstp = malloc(sizeof(*kkstp), M_TEMP, M_WAITOK); 1745 st = stack_create(); 1746 1747 lwpidarray = NULL; 1748 numthreads = 0; 1749 PROC_LOCK(p); 1750 repeat: 1751 if (numthreads < p->p_numthreads) { 1752 if (lwpidarray != NULL) { 1753 free(lwpidarray, M_TEMP); 1754 lwpidarray = NULL; 1755 } 1756 numthreads = p->p_numthreads; 1757 PROC_UNLOCK(p); 1758 lwpidarray = malloc(sizeof(*lwpidarray) * numthreads, M_TEMP, 1759 M_WAITOK | M_ZERO); 1760 PROC_LOCK(p); 1761 goto repeat; 1762 } 1763 i = 0; 1764 1765 /* 1766 * XXXRW: During the below loop, execve(2) and countless other sorts 1767 * of changes could have taken place. Should we check to see if the 1768 * vmspace has been replaced, or the like, in order to prevent 1769 * giving a snapshot that spans, say, execve(2), with some threads 1770 * before and some after? Among other things, the credentials could 1771 * have changed, in which case the right to extract debug info might 1772 * no longer be assured. 1773 */ 1774 FOREACH_THREAD_IN_PROC(p, td) { 1775 KASSERT(i < numthreads, 1776 ("sysctl_kern_proc_kstack: numthreads")); 1777 lwpidarray[i] = td->td_tid; 1778 i++; 1779 } 1780 numthreads = i; 1781 for (i = 0; i < numthreads; i++) { 1782 td = thread_find(p, lwpidarray[i]); 1783 if (td == NULL) { 1784 continue; 1785 } 1786 bzero(kkstp, sizeof(*kkstp)); 1787 (void)sbuf_new(&sb, kkstp->kkst_trace, 1788 sizeof(kkstp->kkst_trace), SBUF_FIXEDLEN); 1789 thread_lock(td); 1790 kkstp->kkst_tid = td->td_tid; 1791 if (TD_IS_SWAPPED(td)) 1792 kkstp->kkst_state = KKST_STATE_SWAPPED; 1793 else if (TD_IS_RUNNING(td)) 1794 kkstp->kkst_state = KKST_STATE_RUNNING; 1795 else { 1796 kkstp->kkst_state = KKST_STATE_STACKOK; 1797 stack_save_td(st, td); 1798 } 1799 thread_unlock(td); 1800 PROC_UNLOCK(p); 1801 stack_sbuf_print(&sb, st); 1802 sbuf_finish(&sb); 1803 sbuf_delete(&sb); 1804 error = SYSCTL_OUT(req, kkstp, sizeof(*kkstp)); 1805 PROC_LOCK(p); 1806 if (error) 1807 break; 1808 } 1809 _PRELE(p); 1810 PROC_UNLOCK(p); 1811 if (lwpidarray != NULL) 1812 free(lwpidarray, M_TEMP); 1813 stack_destroy(st); 1814 free(kkstp, M_TEMP); 1815 return (error); 1816 } 1817 #endif 1818 1819 /* 1820 * This sysctl allows a process to retrieve the full list of groups from 1821 * itself or another process. 1822 */ 1823 static int 1824 sysctl_kern_proc_groups(SYSCTL_HANDLER_ARGS) 1825 { 1826 pid_t *pidp = (pid_t *)arg1; 1827 unsigned int arglen = arg2; 1828 struct proc *p; 1829 struct ucred *cred; 1830 int error; 1831 1832 if (arglen != 1) 1833 return (EINVAL); 1834 if (*pidp == -1) { /* -1 means this process */ 1835 p = req->td->td_proc; 1836 } else { 1837 p = pfind(*pidp); 1838 if (p == NULL) 1839 return (ESRCH); 1840 if ((error = p_cansee(curthread, p)) != 0) { 1841 PROC_UNLOCK(p); 1842 return (error); 1843 } 1844 } 1845 1846 cred = crhold(p->p_ucred); 1847 if (*pidp != -1) 1848 PROC_UNLOCK(p); 1849 1850 error = SYSCTL_OUT(req, cred->cr_groups, 1851 cred->cr_ngroups * sizeof(gid_t)); 1852 crfree(cred); 1853 return (error); 1854 } 1855 1856 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table"); 1857 1858 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT| 1859 CTLFLAG_MPSAFE, 0, 0, sysctl_kern_proc, "S,proc", 1860 "Return entire process table"); 1861 1862 static SYSCTL_NODE(_kern_proc, KERN_PROC_GID, gid, CTLFLAG_RD | CTLFLAG_MPSAFE, 1863 sysctl_kern_proc, "Process table"); 1864 1865 static SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD | CTLFLAG_MPSAFE, 1866 sysctl_kern_proc, "Process table"); 1867 1868 static SYSCTL_NODE(_kern_proc, KERN_PROC_RGID, rgid, CTLFLAG_RD | CTLFLAG_MPSAFE, 1869 sysctl_kern_proc, "Process table"); 1870 1871 static SYSCTL_NODE(_kern_proc, KERN_PROC_SESSION, sid, CTLFLAG_RD | 1872 CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); 1873 1874 static SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD | CTLFLAG_MPSAFE, 1875 sysctl_kern_proc, "Process table"); 1876 1877 static SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD | CTLFLAG_MPSAFE, 1878 sysctl_kern_proc, "Process table"); 1879 1880 static SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD | CTLFLAG_MPSAFE, 1881 sysctl_kern_proc, "Process table"); 1882 1883 static SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD | CTLFLAG_MPSAFE, 1884 sysctl_kern_proc, "Process table"); 1885 1886 static SYSCTL_NODE(_kern_proc, KERN_PROC_PROC, proc, CTLFLAG_RD | CTLFLAG_MPSAFE, 1887 sysctl_kern_proc, "Return process table, no threads"); 1888 1889 static SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, 1890 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE, 1891 sysctl_kern_proc_args, "Process argument list"); 1892 1893 static SYSCTL_NODE(_kern_proc, KERN_PROC_PATHNAME, pathname, CTLFLAG_RD | 1894 CTLFLAG_MPSAFE, sysctl_kern_proc_pathname, "Process executable path"); 1895 1896 static SYSCTL_NODE(_kern_proc, KERN_PROC_SV_NAME, sv_name, CTLFLAG_RD | 1897 CTLFLAG_MPSAFE, sysctl_kern_proc_sv_name, 1898 "Process syscall vector name (ABI type)"); 1899 1900 static SYSCTL_NODE(_kern_proc, (KERN_PROC_GID | KERN_PROC_INC_THREAD), gid_td, 1901 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); 1902 1903 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_INC_THREAD), pgrp_td, 1904 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); 1905 1906 static SYSCTL_NODE(_kern_proc, (KERN_PROC_RGID | KERN_PROC_INC_THREAD), rgid_td, 1907 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); 1908 1909 static SYSCTL_NODE(_kern_proc, (KERN_PROC_SESSION | KERN_PROC_INC_THREAD), 1910 sid_td, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); 1911 1912 static SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_INC_THREAD), tty_td, 1913 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); 1914 1915 static SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_INC_THREAD), uid_td, 1916 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); 1917 1918 static SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_INC_THREAD), ruid_td, 1919 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); 1920 1921 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_INC_THREAD), pid_td, 1922 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); 1923 1924 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PROC | KERN_PROC_INC_THREAD), proc_td, 1925 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, 1926 "Return process table, no threads"); 1927 1928 #ifdef COMPAT_FREEBSD7 1929 static SYSCTL_NODE(_kern_proc, KERN_PROC_OVMMAP, ovmmap, CTLFLAG_RD | 1930 CTLFLAG_MPSAFE, sysctl_kern_proc_ovmmap, "Old Process vm map entries"); 1931 #endif 1932 1933 static SYSCTL_NODE(_kern_proc, KERN_PROC_VMMAP, vmmap, CTLFLAG_RD | 1934 CTLFLAG_MPSAFE, sysctl_kern_proc_vmmap, "Process vm map entries"); 1935 1936 #if defined(STACK) || defined(DDB) 1937 static SYSCTL_NODE(_kern_proc, KERN_PROC_KSTACK, kstack, CTLFLAG_RD | 1938 CTLFLAG_MPSAFE, sysctl_kern_proc_kstack, "Process kernel stacks"); 1939 #endif 1940 1941 static SYSCTL_NODE(_kern_proc, KERN_PROC_GROUPS, groups, CTLFLAG_RD | 1942 CTLFLAG_MPSAFE, sysctl_kern_proc_groups, "Process groups"); 1943