1 /*- 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)kern_proc.c 8.7 (Berkeley) 2/14/95 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_compat.h" 36 #include "opt_ddb.h" 37 #include "opt_kdtrace.h" 38 #include "opt_ktrace.h" 39 #include "opt_kstack_pages.h" 40 #include "opt_stack.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/elf.h> 45 #include <sys/exec.h> 46 #include <sys/kernel.h> 47 #include <sys/limits.h> 48 #include <sys/lock.h> 49 #include <sys/loginclass.h> 50 #include <sys/malloc.h> 51 #include <sys/mman.h> 52 #include <sys/mount.h> 53 #include <sys/mutex.h> 54 #include <sys/proc.h> 55 #include <sys/ptrace.h> 56 #include <sys/refcount.h> 57 #include <sys/resourcevar.h> 58 #include <sys/sbuf.h> 59 #include <sys/sysent.h> 60 #include <sys/sched.h> 61 #include <sys/smp.h> 62 #include <sys/stack.h> 63 #include <sys/sysctl.h> 64 #include <sys/filedesc.h> 65 #include <sys/tty.h> 66 #include <sys/signalvar.h> 67 #include <sys/sdt.h> 68 #include <sys/sx.h> 69 #include <sys/user.h> 70 #include <sys/jail.h> 71 #include <sys/vnode.h> 72 #include <sys/eventhandler.h> 73 74 #ifdef DDB 75 #include <ddb/ddb.h> 76 #endif 77 78 #include <vm/vm.h> 79 #include <vm/vm_extern.h> 80 #include <vm/pmap.h> 81 #include <vm/vm_map.h> 82 #include <vm/vm_object.h> 83 #include <vm/vm_page.h> 84 #include <vm/uma.h> 85 86 #ifdef COMPAT_FREEBSD32 87 #include <compat/freebsd32/freebsd32.h> 88 #include <compat/freebsd32/freebsd32_util.h> 89 #endif 90 91 SDT_PROVIDER_DEFINE(proc); 92 SDT_PROBE_DEFINE(proc, kernel, ctor, entry, entry); 93 SDT_PROBE_ARGTYPE(proc, kernel, ctor, entry, 0, "struct proc *"); 94 SDT_PROBE_ARGTYPE(proc, kernel, ctor, entry, 1, "int"); 95 SDT_PROBE_ARGTYPE(proc, kernel, ctor, entry, 2, "void *"); 96 SDT_PROBE_ARGTYPE(proc, kernel, ctor, entry, 3, "int"); 97 SDT_PROBE_DEFINE(proc, kernel, ctor, return, return); 98 SDT_PROBE_ARGTYPE(proc, kernel, ctor, return, 0, "struct proc *"); 99 SDT_PROBE_ARGTYPE(proc, kernel, ctor, return, 1, "int"); 100 SDT_PROBE_ARGTYPE(proc, kernel, ctor, return, 2, "void *"); 101 SDT_PROBE_ARGTYPE(proc, kernel, ctor, return, 3, "int"); 102 SDT_PROBE_DEFINE(proc, kernel, dtor, entry, entry); 103 SDT_PROBE_ARGTYPE(proc, kernel, dtor, entry, 0, "struct proc *"); 104 SDT_PROBE_ARGTYPE(proc, kernel, dtor, entry, 1, "int"); 105 SDT_PROBE_ARGTYPE(proc, kernel, dtor, entry, 2, "void *"); 106 SDT_PROBE_ARGTYPE(proc, kernel, dtor, entry, 3, "struct thread *"); 107 SDT_PROBE_DEFINE(proc, kernel, dtor, return, return); 108 SDT_PROBE_ARGTYPE(proc, kernel, dtor, return, 0, "struct proc *"); 109 SDT_PROBE_ARGTYPE(proc, kernel, dtor, return, 1, "int"); 110 SDT_PROBE_ARGTYPE(proc, kernel, dtor, return, 2, "void *"); 111 SDT_PROBE_DEFINE(proc, kernel, init, entry, entry); 112 SDT_PROBE_ARGTYPE(proc, kernel, init, entry, 0, "struct proc *"); 113 SDT_PROBE_ARGTYPE(proc, kernel, init, entry, 1, "int"); 114 SDT_PROBE_ARGTYPE(proc, kernel, init, entry, 2, "int"); 115 SDT_PROBE_DEFINE(proc, kernel, init, return, return); 116 SDT_PROBE_ARGTYPE(proc, kernel, init, return, 0, "struct proc *"); 117 SDT_PROBE_ARGTYPE(proc, kernel, init, return, 1, "int"); 118 SDT_PROBE_ARGTYPE(proc, kernel, init, return, 2, "int"); 119 120 MALLOC_DEFINE(M_PGRP, "pgrp", "process group header"); 121 MALLOC_DEFINE(M_SESSION, "session", "session header"); 122 static MALLOC_DEFINE(M_PROC, "proc", "Proc structures"); 123 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures"); 124 125 static void doenterpgrp(struct proc *, struct pgrp *); 126 static void orphanpg(struct pgrp *pg); 127 static void fill_kinfo_aggregate(struct proc *p, struct kinfo_proc *kp); 128 static void fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp); 129 static void fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp, 130 int preferthread); 131 static void pgadjustjobc(struct pgrp *pgrp, int entering); 132 static void pgdelete(struct pgrp *); 133 static int proc_ctor(void *mem, int size, void *arg, int flags); 134 static void proc_dtor(void *mem, int size, void *arg); 135 static int proc_init(void *mem, int size, int flags); 136 static void proc_fini(void *mem, int size); 137 static void pargs_free(struct pargs *pa); 138 139 /* 140 * Other process lists 141 */ 142 struct pidhashhead *pidhashtbl; 143 u_long pidhash; 144 struct pgrphashhead *pgrphashtbl; 145 u_long pgrphash; 146 struct proclist allproc; 147 struct proclist zombproc; 148 struct sx allproc_lock; 149 struct sx proctree_lock; 150 struct mtx ppeers_lock; 151 uma_zone_t proc_zone; 152 153 int kstack_pages = KSTACK_PAGES; 154 SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0, 155 "Kernel stack size in pages"); 156 157 CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE); 158 #ifdef COMPAT_FREEBSD32 159 CTASSERT(sizeof(struct kinfo_proc32) == KINFO_PROC32_SIZE); 160 #endif 161 162 /* 163 * Initialize global process hashing structures. 164 */ 165 void 166 procinit() 167 { 168 169 sx_init(&allproc_lock, "allproc"); 170 sx_init(&proctree_lock, "proctree"); 171 mtx_init(&ppeers_lock, "p_peers", NULL, MTX_DEF); 172 LIST_INIT(&allproc); 173 LIST_INIT(&zombproc); 174 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash); 175 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash); 176 proc_zone = uma_zcreate("PROC", sched_sizeof_proc(), 177 proc_ctor, proc_dtor, proc_init, proc_fini, 178 UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 179 uihashinit(); 180 } 181 182 /* 183 * Prepare a proc for use. 184 */ 185 static int 186 proc_ctor(void *mem, int size, void *arg, int flags) 187 { 188 struct proc *p; 189 190 p = (struct proc *)mem; 191 SDT_PROBE(proc, kernel, ctor , entry, p, size, arg, flags, 0); 192 EVENTHANDLER_INVOKE(process_ctor, p); 193 SDT_PROBE(proc, kernel, ctor , return, p, size, arg, flags, 0); 194 return (0); 195 } 196 197 /* 198 * Reclaim a proc after use. 199 */ 200 static void 201 proc_dtor(void *mem, int size, void *arg) 202 { 203 struct proc *p; 204 struct thread *td; 205 206 /* INVARIANTS checks go here */ 207 p = (struct proc *)mem; 208 td = FIRST_THREAD_IN_PROC(p); 209 SDT_PROBE(proc, kernel, dtor, entry, p, size, arg, td, 0); 210 if (td != NULL) { 211 #ifdef INVARIANTS 212 KASSERT((p->p_numthreads == 1), 213 ("bad number of threads in exiting process")); 214 KASSERT(STAILQ_EMPTY(&p->p_ktr), ("proc_dtor: non-empty p_ktr")); 215 #endif 216 /* Free all OSD associated to this thread. */ 217 osd_thread_exit(td); 218 } 219 EVENTHANDLER_INVOKE(process_dtor, p); 220 if (p->p_ksi != NULL) 221 KASSERT(! KSI_ONQ(p->p_ksi), ("SIGCHLD queue")); 222 SDT_PROBE(proc, kernel, dtor, return, p, size, arg, 0, 0); 223 } 224 225 /* 226 * Initialize type-stable parts of a proc (when newly created). 227 */ 228 static int 229 proc_init(void *mem, int size, int flags) 230 { 231 struct proc *p; 232 233 p = (struct proc *)mem; 234 SDT_PROBE(proc, kernel, init, entry, p, size, flags, 0, 0); 235 p->p_sched = (struct p_sched *)&p[1]; 236 bzero(&p->p_mtx, sizeof(struct mtx)); 237 mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 238 mtx_init(&p->p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE); 239 cv_init(&p->p_pwait, "ppwait"); 240 cv_init(&p->p_dbgwait, "dbgwait"); 241 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 242 EVENTHANDLER_INVOKE(process_init, p); 243 p->p_stats = pstats_alloc(); 244 SDT_PROBE(proc, kernel, init, return, p, size, flags, 0, 0); 245 return (0); 246 } 247 248 /* 249 * UMA should ensure that this function is never called. 250 * Freeing a proc structure would violate type stability. 251 */ 252 static void 253 proc_fini(void *mem, int size) 254 { 255 #ifdef notnow 256 struct proc *p; 257 258 p = (struct proc *)mem; 259 EVENTHANDLER_INVOKE(process_fini, p); 260 pstats_free(p->p_stats); 261 thread_free(FIRST_THREAD_IN_PROC(p)); 262 mtx_destroy(&p->p_mtx); 263 if (p->p_ksi != NULL) 264 ksiginfo_free(p->p_ksi); 265 #else 266 panic("proc reclaimed"); 267 #endif 268 } 269 270 /* 271 * Is p an inferior of the current process? 272 */ 273 int 274 inferior(p) 275 register struct proc *p; 276 { 277 278 sx_assert(&proctree_lock, SX_LOCKED); 279 for (; p != curproc; p = p->p_pptr) 280 if (p->p_pid == 0) 281 return (0); 282 return (1); 283 } 284 285 /* 286 * Locate a process by number; return only "live" processes -- i.e., neither 287 * zombies nor newly born but incompletely initialized processes. By not 288 * returning processes in the PRS_NEW state, we allow callers to avoid 289 * testing for that condition to avoid dereferencing p_ucred, et al. 290 */ 291 struct proc * 292 pfind(pid) 293 register pid_t pid; 294 { 295 register struct proc *p; 296 297 sx_slock(&allproc_lock); 298 LIST_FOREACH(p, PIDHASH(pid), p_hash) 299 if (p->p_pid == pid) { 300 PROC_LOCK(p); 301 if (p->p_state == PRS_NEW) { 302 PROC_UNLOCK(p); 303 p = NULL; 304 } 305 break; 306 } 307 sx_sunlock(&allproc_lock); 308 return (p); 309 } 310 311 /* 312 * Locate a process group by number. 313 * The caller must hold proctree_lock. 314 */ 315 struct pgrp * 316 pgfind(pgid) 317 register pid_t pgid; 318 { 319 register struct pgrp *pgrp; 320 321 sx_assert(&proctree_lock, SX_LOCKED); 322 323 LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) { 324 if (pgrp->pg_id == pgid) { 325 PGRP_LOCK(pgrp); 326 return (pgrp); 327 } 328 } 329 return (NULL); 330 } 331 332 /* 333 * Create a new process group. 334 * pgid must be equal to the pid of p. 335 * Begin a new session if required. 336 */ 337 int 338 enterpgrp(p, pgid, pgrp, sess) 339 register struct proc *p; 340 pid_t pgid; 341 struct pgrp *pgrp; 342 struct session *sess; 343 { 344 struct pgrp *pgrp2; 345 346 sx_assert(&proctree_lock, SX_XLOCKED); 347 348 KASSERT(pgrp != NULL, ("enterpgrp: pgrp == NULL")); 349 KASSERT(p->p_pid == pgid, 350 ("enterpgrp: new pgrp and pid != pgid")); 351 352 pgrp2 = pgfind(pgid); 353 354 KASSERT(pgrp2 == NULL, 355 ("enterpgrp: pgrp with pgid exists")); 356 KASSERT(!SESS_LEADER(p), 357 ("enterpgrp: session leader attempted setpgrp")); 358 359 mtx_init(&pgrp->pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK); 360 361 if (sess != NULL) { 362 /* 363 * new session 364 */ 365 mtx_init(&sess->s_mtx, "session", NULL, MTX_DEF); 366 PROC_LOCK(p); 367 p->p_flag &= ~P_CONTROLT; 368 PROC_UNLOCK(p); 369 PGRP_LOCK(pgrp); 370 sess->s_leader = p; 371 sess->s_sid = p->p_pid; 372 refcount_init(&sess->s_count, 1); 373 sess->s_ttyvp = NULL; 374 sess->s_ttydp = NULL; 375 sess->s_ttyp = NULL; 376 bcopy(p->p_session->s_login, sess->s_login, 377 sizeof(sess->s_login)); 378 pgrp->pg_session = sess; 379 KASSERT(p == curproc, 380 ("enterpgrp: mksession and p != curproc")); 381 } else { 382 pgrp->pg_session = p->p_session; 383 sess_hold(pgrp->pg_session); 384 PGRP_LOCK(pgrp); 385 } 386 pgrp->pg_id = pgid; 387 LIST_INIT(&pgrp->pg_members); 388 389 /* 390 * As we have an exclusive lock of proctree_lock, 391 * this should not deadlock. 392 */ 393 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash); 394 pgrp->pg_jobc = 0; 395 SLIST_INIT(&pgrp->pg_sigiolst); 396 PGRP_UNLOCK(pgrp); 397 398 doenterpgrp(p, pgrp); 399 400 return (0); 401 } 402 403 /* 404 * Move p to an existing process group 405 */ 406 int 407 enterthispgrp(p, pgrp) 408 register struct proc *p; 409 struct pgrp *pgrp; 410 { 411 412 sx_assert(&proctree_lock, SX_XLOCKED); 413 PROC_LOCK_ASSERT(p, MA_NOTOWNED); 414 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED); 415 PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED); 416 SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED); 417 KASSERT(pgrp->pg_session == p->p_session, 418 ("%s: pgrp's session %p, p->p_session %p.\n", 419 __func__, 420 pgrp->pg_session, 421 p->p_session)); 422 KASSERT(pgrp != p->p_pgrp, 423 ("%s: p belongs to pgrp.", __func__)); 424 425 doenterpgrp(p, pgrp); 426 427 return (0); 428 } 429 430 /* 431 * Move p to a process group 432 */ 433 static void 434 doenterpgrp(p, pgrp) 435 struct proc *p; 436 struct pgrp *pgrp; 437 { 438 struct pgrp *savepgrp; 439 440 sx_assert(&proctree_lock, SX_XLOCKED); 441 PROC_LOCK_ASSERT(p, MA_NOTOWNED); 442 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED); 443 PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED); 444 SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED); 445 446 savepgrp = p->p_pgrp; 447 448 /* 449 * Adjust eligibility of affected pgrps to participate in job control. 450 * Increment eligibility counts before decrementing, otherwise we 451 * could reach 0 spuriously during the first call. 452 */ 453 fixjobc(p, pgrp, 1); 454 fixjobc(p, p->p_pgrp, 0); 455 456 PGRP_LOCK(pgrp); 457 PGRP_LOCK(savepgrp); 458 PROC_LOCK(p); 459 LIST_REMOVE(p, p_pglist); 460 p->p_pgrp = pgrp; 461 PROC_UNLOCK(p); 462 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist); 463 PGRP_UNLOCK(savepgrp); 464 PGRP_UNLOCK(pgrp); 465 if (LIST_EMPTY(&savepgrp->pg_members)) 466 pgdelete(savepgrp); 467 } 468 469 /* 470 * remove process from process group 471 */ 472 int 473 leavepgrp(p) 474 register struct proc *p; 475 { 476 struct pgrp *savepgrp; 477 478 sx_assert(&proctree_lock, SX_XLOCKED); 479 savepgrp = p->p_pgrp; 480 PGRP_LOCK(savepgrp); 481 PROC_LOCK(p); 482 LIST_REMOVE(p, p_pglist); 483 p->p_pgrp = NULL; 484 PROC_UNLOCK(p); 485 PGRP_UNLOCK(savepgrp); 486 if (LIST_EMPTY(&savepgrp->pg_members)) 487 pgdelete(savepgrp); 488 return (0); 489 } 490 491 /* 492 * delete a process group 493 */ 494 static void 495 pgdelete(pgrp) 496 register struct pgrp *pgrp; 497 { 498 struct session *savesess; 499 struct tty *tp; 500 501 sx_assert(&proctree_lock, SX_XLOCKED); 502 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED); 503 SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED); 504 505 /* 506 * Reset any sigio structures pointing to us as a result of 507 * F_SETOWN with our pgid. 508 */ 509 funsetownlst(&pgrp->pg_sigiolst); 510 511 PGRP_LOCK(pgrp); 512 tp = pgrp->pg_session->s_ttyp; 513 LIST_REMOVE(pgrp, pg_hash); 514 savesess = pgrp->pg_session; 515 PGRP_UNLOCK(pgrp); 516 517 /* Remove the reference to the pgrp before deallocating it. */ 518 if (tp != NULL) { 519 tty_lock(tp); 520 tty_rel_pgrp(tp, pgrp); 521 } 522 523 mtx_destroy(&pgrp->pg_mtx); 524 free(pgrp, M_PGRP); 525 sess_release(savesess); 526 } 527 528 static void 529 pgadjustjobc(pgrp, entering) 530 struct pgrp *pgrp; 531 int entering; 532 { 533 534 PGRP_LOCK(pgrp); 535 if (entering) 536 pgrp->pg_jobc++; 537 else { 538 --pgrp->pg_jobc; 539 if (pgrp->pg_jobc == 0) 540 orphanpg(pgrp); 541 } 542 PGRP_UNLOCK(pgrp); 543 } 544 545 /* 546 * Adjust pgrp jobc counters when specified process changes process group. 547 * We count the number of processes in each process group that "qualify" 548 * the group for terminal job control (those with a parent in a different 549 * process group of the same session). If that count reaches zero, the 550 * process group becomes orphaned. Check both the specified process' 551 * process group and that of its children. 552 * entering == 0 => p is leaving specified group. 553 * entering == 1 => p is entering specified group. 554 */ 555 void 556 fixjobc(p, pgrp, entering) 557 register struct proc *p; 558 register struct pgrp *pgrp; 559 int entering; 560 { 561 register struct pgrp *hispgrp; 562 register struct session *mysession; 563 564 sx_assert(&proctree_lock, SX_LOCKED); 565 PROC_LOCK_ASSERT(p, MA_NOTOWNED); 566 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED); 567 SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED); 568 569 /* 570 * Check p's parent to see whether p qualifies its own process 571 * group; if so, adjust count for p's process group. 572 */ 573 mysession = pgrp->pg_session; 574 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp && 575 hispgrp->pg_session == mysession) 576 pgadjustjobc(pgrp, entering); 577 578 /* 579 * Check this process' children to see whether they qualify 580 * their process groups; if so, adjust counts for children's 581 * process groups. 582 */ 583 LIST_FOREACH(p, &p->p_children, p_sibling) { 584 hispgrp = p->p_pgrp; 585 if (hispgrp == pgrp || 586 hispgrp->pg_session != mysession) 587 continue; 588 PROC_LOCK(p); 589 if (p->p_state == PRS_ZOMBIE) { 590 PROC_UNLOCK(p); 591 continue; 592 } 593 PROC_UNLOCK(p); 594 pgadjustjobc(hispgrp, entering); 595 } 596 } 597 598 /* 599 * A process group has become orphaned; 600 * if there are any stopped processes in the group, 601 * hang-up all process in that group. 602 */ 603 static void 604 orphanpg(pg) 605 struct pgrp *pg; 606 { 607 register struct proc *p; 608 609 PGRP_LOCK_ASSERT(pg, MA_OWNED); 610 611 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 612 PROC_LOCK(p); 613 if (P_SHOULDSTOP(p)) { 614 PROC_UNLOCK(p); 615 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 616 PROC_LOCK(p); 617 kern_psignal(p, SIGHUP); 618 kern_psignal(p, SIGCONT); 619 PROC_UNLOCK(p); 620 } 621 return; 622 } 623 PROC_UNLOCK(p); 624 } 625 } 626 627 void 628 sess_hold(struct session *s) 629 { 630 631 refcount_acquire(&s->s_count); 632 } 633 634 void 635 sess_release(struct session *s) 636 { 637 638 if (refcount_release(&s->s_count)) { 639 if (s->s_ttyp != NULL) { 640 tty_lock(s->s_ttyp); 641 tty_rel_sess(s->s_ttyp, s); 642 } 643 mtx_destroy(&s->s_mtx); 644 free(s, M_SESSION); 645 } 646 } 647 648 #include "opt_ddb.h" 649 #ifdef DDB 650 #include <ddb/ddb.h> 651 652 DB_SHOW_COMMAND(pgrpdump, pgrpdump) 653 { 654 register struct pgrp *pgrp; 655 register struct proc *p; 656 register int i; 657 658 for (i = 0; i <= pgrphash; i++) { 659 if (!LIST_EMPTY(&pgrphashtbl[i])) { 660 printf("\tindx %d\n", i); 661 LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) { 662 printf( 663 "\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n", 664 (void *)pgrp, (long)pgrp->pg_id, 665 (void *)pgrp->pg_session, 666 pgrp->pg_session->s_count, 667 (void *)LIST_FIRST(&pgrp->pg_members)); 668 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 669 printf("\t\tpid %ld addr %p pgrp %p\n", 670 (long)p->p_pid, (void *)p, 671 (void *)p->p_pgrp); 672 } 673 } 674 } 675 } 676 } 677 #endif /* DDB */ 678 679 /* 680 * Calculate the kinfo_proc members which contain process-wide 681 * informations. 682 * Must be called with the target process locked. 683 */ 684 static void 685 fill_kinfo_aggregate(struct proc *p, struct kinfo_proc *kp) 686 { 687 struct thread *td; 688 689 PROC_LOCK_ASSERT(p, MA_OWNED); 690 691 kp->ki_estcpu = 0; 692 kp->ki_pctcpu = 0; 693 FOREACH_THREAD_IN_PROC(p, td) { 694 thread_lock(td); 695 kp->ki_pctcpu += sched_pctcpu(td); 696 kp->ki_estcpu += td->td_estcpu; 697 thread_unlock(td); 698 } 699 } 700 701 /* 702 * Clear kinfo_proc and fill in any information that is common 703 * to all threads in the process. 704 * Must be called with the target process locked. 705 */ 706 static void 707 fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp) 708 { 709 struct thread *td0; 710 struct tty *tp; 711 struct session *sp; 712 struct ucred *cred; 713 struct sigacts *ps; 714 715 PROC_LOCK_ASSERT(p, MA_OWNED); 716 bzero(kp, sizeof(*kp)); 717 718 kp->ki_structsize = sizeof(*kp); 719 kp->ki_paddr = p; 720 kp->ki_addr =/* p->p_addr; */0; /* XXX */ 721 kp->ki_args = p->p_args; 722 kp->ki_textvp = p->p_textvp; 723 #ifdef KTRACE 724 kp->ki_tracep = p->p_tracevp; 725 kp->ki_traceflag = p->p_traceflag; 726 #endif 727 kp->ki_fd = p->p_fd; 728 kp->ki_vmspace = p->p_vmspace; 729 kp->ki_flag = p->p_flag; 730 cred = p->p_ucred; 731 if (cred) { 732 kp->ki_uid = cred->cr_uid; 733 kp->ki_ruid = cred->cr_ruid; 734 kp->ki_svuid = cred->cr_svuid; 735 kp->ki_cr_flags = 0; 736 if (cred->cr_flags & CRED_FLAG_CAPMODE) 737 kp->ki_cr_flags |= KI_CRF_CAPABILITY_MODE; 738 /* XXX bde doesn't like KI_NGROUPS */ 739 if (cred->cr_ngroups > KI_NGROUPS) { 740 kp->ki_ngroups = KI_NGROUPS; 741 kp->ki_cr_flags |= KI_CRF_GRP_OVERFLOW; 742 } else 743 kp->ki_ngroups = cred->cr_ngroups; 744 bcopy(cred->cr_groups, kp->ki_groups, 745 kp->ki_ngroups * sizeof(gid_t)); 746 kp->ki_rgid = cred->cr_rgid; 747 kp->ki_svgid = cred->cr_svgid; 748 /* If jailed(cred), emulate the old P_JAILED flag. */ 749 if (jailed(cred)) { 750 kp->ki_flag |= P_JAILED; 751 /* If inside the jail, use 0 as a jail ID. */ 752 if (cred->cr_prison != curthread->td_ucred->cr_prison) 753 kp->ki_jid = cred->cr_prison->pr_id; 754 } 755 strlcpy(kp->ki_loginclass, cred->cr_loginclass->lc_name, 756 sizeof(kp->ki_loginclass)); 757 } 758 ps = p->p_sigacts; 759 if (ps) { 760 mtx_lock(&ps->ps_mtx); 761 kp->ki_sigignore = ps->ps_sigignore; 762 kp->ki_sigcatch = ps->ps_sigcatch; 763 mtx_unlock(&ps->ps_mtx); 764 } 765 if (p->p_state != PRS_NEW && 766 p->p_state != PRS_ZOMBIE && 767 p->p_vmspace != NULL) { 768 struct vmspace *vm = p->p_vmspace; 769 770 kp->ki_size = vm->vm_map.size; 771 kp->ki_rssize = vmspace_resident_count(vm); /*XXX*/ 772 FOREACH_THREAD_IN_PROC(p, td0) { 773 if (!TD_IS_SWAPPED(td0)) 774 kp->ki_rssize += td0->td_kstack_pages; 775 } 776 kp->ki_swrss = vm->vm_swrss; 777 kp->ki_tsize = vm->vm_tsize; 778 kp->ki_dsize = vm->vm_dsize; 779 kp->ki_ssize = vm->vm_ssize; 780 } else if (p->p_state == PRS_ZOMBIE) 781 kp->ki_stat = SZOMB; 782 if (kp->ki_flag & P_INMEM) 783 kp->ki_sflag = PS_INMEM; 784 else 785 kp->ki_sflag = 0; 786 /* Calculate legacy swtime as seconds since 'swtick'. */ 787 kp->ki_swtime = (ticks - p->p_swtick) / hz; 788 kp->ki_pid = p->p_pid; 789 kp->ki_nice = p->p_nice; 790 kp->ki_start = p->p_stats->p_start; 791 timevaladd(&kp->ki_start, &boottime); 792 PROC_SLOCK(p); 793 rufetch(p, &kp->ki_rusage); 794 kp->ki_runtime = cputick2usec(p->p_rux.rux_runtime); 795 calcru(p, &kp->ki_rusage.ru_utime, &kp->ki_rusage.ru_stime); 796 PROC_SUNLOCK(p); 797 calccru(p, &kp->ki_childutime, &kp->ki_childstime); 798 /* Some callers want child times in a single value. */ 799 kp->ki_childtime = kp->ki_childstime; 800 timevaladd(&kp->ki_childtime, &kp->ki_childutime); 801 802 tp = NULL; 803 if (p->p_pgrp) { 804 kp->ki_pgid = p->p_pgrp->pg_id; 805 kp->ki_jobc = p->p_pgrp->pg_jobc; 806 sp = p->p_pgrp->pg_session; 807 808 if (sp != NULL) { 809 kp->ki_sid = sp->s_sid; 810 SESS_LOCK(sp); 811 strlcpy(kp->ki_login, sp->s_login, 812 sizeof(kp->ki_login)); 813 if (sp->s_ttyvp) 814 kp->ki_kiflag |= KI_CTTY; 815 if (SESS_LEADER(p)) 816 kp->ki_kiflag |= KI_SLEADER; 817 /* XXX proctree_lock */ 818 tp = sp->s_ttyp; 819 SESS_UNLOCK(sp); 820 } 821 } 822 if ((p->p_flag & P_CONTROLT) && tp != NULL) { 823 kp->ki_tdev = tty_udev(tp); 824 kp->ki_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID; 825 if (tp->t_session) 826 kp->ki_tsid = tp->t_session->s_sid; 827 } else 828 kp->ki_tdev = NODEV; 829 if (p->p_comm[0] != '\0') 830 strlcpy(kp->ki_comm, p->p_comm, sizeof(kp->ki_comm)); 831 if (p->p_sysent && p->p_sysent->sv_name != NULL && 832 p->p_sysent->sv_name[0] != '\0') 833 strlcpy(kp->ki_emul, p->p_sysent->sv_name, sizeof(kp->ki_emul)); 834 kp->ki_siglist = p->p_siglist; 835 kp->ki_xstat = p->p_xstat; 836 kp->ki_acflag = p->p_acflag; 837 kp->ki_lock = p->p_lock; 838 if (p->p_pptr) 839 kp->ki_ppid = p->p_pptr->p_pid; 840 } 841 842 /* 843 * Fill in information that is thread specific. Must be called with 844 * target process locked. If 'preferthread' is set, overwrite certain 845 * process-related fields that are maintained for both threads and 846 * processes. 847 */ 848 static void 849 fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp, int preferthread) 850 { 851 struct proc *p; 852 853 p = td->td_proc; 854 kp->ki_tdaddr = td; 855 PROC_LOCK_ASSERT(p, MA_OWNED); 856 857 if (preferthread) 858 PROC_SLOCK(p); 859 thread_lock(td); 860 if (td->td_wmesg != NULL) 861 strlcpy(kp->ki_wmesg, td->td_wmesg, sizeof(kp->ki_wmesg)); 862 else 863 bzero(kp->ki_wmesg, sizeof(kp->ki_wmesg)); 864 strlcpy(kp->ki_tdname, td->td_name, sizeof(kp->ki_tdname)); 865 if (TD_ON_LOCK(td)) { 866 kp->ki_kiflag |= KI_LOCKBLOCK; 867 strlcpy(kp->ki_lockname, td->td_lockname, 868 sizeof(kp->ki_lockname)); 869 } else { 870 kp->ki_kiflag &= ~KI_LOCKBLOCK; 871 bzero(kp->ki_lockname, sizeof(kp->ki_lockname)); 872 } 873 874 if (p->p_state == PRS_NORMAL) { /* approximate. */ 875 if (TD_ON_RUNQ(td) || 876 TD_CAN_RUN(td) || 877 TD_IS_RUNNING(td)) { 878 kp->ki_stat = SRUN; 879 } else if (P_SHOULDSTOP(p)) { 880 kp->ki_stat = SSTOP; 881 } else if (TD_IS_SLEEPING(td)) { 882 kp->ki_stat = SSLEEP; 883 } else if (TD_ON_LOCK(td)) { 884 kp->ki_stat = SLOCK; 885 } else { 886 kp->ki_stat = SWAIT; 887 } 888 } else if (p->p_state == PRS_ZOMBIE) { 889 kp->ki_stat = SZOMB; 890 } else { 891 kp->ki_stat = SIDL; 892 } 893 894 /* Things in the thread */ 895 kp->ki_wchan = td->td_wchan; 896 kp->ki_pri.pri_level = td->td_priority; 897 kp->ki_pri.pri_native = td->td_base_pri; 898 kp->ki_lastcpu = td->td_lastcpu; 899 kp->ki_oncpu = td->td_oncpu; 900 kp->ki_tdflags = td->td_flags; 901 kp->ki_tid = td->td_tid; 902 kp->ki_numthreads = p->p_numthreads; 903 kp->ki_pcb = td->td_pcb; 904 kp->ki_kstack = (void *)td->td_kstack; 905 kp->ki_slptime = (ticks - td->td_slptick) / hz; 906 kp->ki_pri.pri_class = td->td_pri_class; 907 kp->ki_pri.pri_user = td->td_user_pri; 908 909 if (preferthread) { 910 rufetchtd(td, &kp->ki_rusage); 911 kp->ki_runtime = cputick2usec(td->td_rux.rux_runtime); 912 kp->ki_pctcpu = sched_pctcpu(td); 913 kp->ki_estcpu = td->td_estcpu; 914 } 915 916 /* We can't get this anymore but ps etc never used it anyway. */ 917 kp->ki_rqindex = 0; 918 919 if (preferthread) 920 kp->ki_siglist = td->td_siglist; 921 kp->ki_sigmask = td->td_sigmask; 922 thread_unlock(td); 923 if (preferthread) 924 PROC_SUNLOCK(p); 925 } 926 927 /* 928 * Fill in a kinfo_proc structure for the specified process. 929 * Must be called with the target process locked. 930 */ 931 void 932 fill_kinfo_proc(struct proc *p, struct kinfo_proc *kp) 933 { 934 935 MPASS(FIRST_THREAD_IN_PROC(p) != NULL); 936 937 fill_kinfo_proc_only(p, kp); 938 fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), kp, 0); 939 fill_kinfo_aggregate(p, kp); 940 } 941 942 struct pstats * 943 pstats_alloc(void) 944 { 945 946 return (malloc(sizeof(struct pstats), M_SUBPROC, M_ZERO|M_WAITOK)); 947 } 948 949 /* 950 * Copy parts of p_stats; zero the rest of p_stats (statistics). 951 */ 952 void 953 pstats_fork(struct pstats *src, struct pstats *dst) 954 { 955 956 bzero(&dst->pstat_startzero, 957 __rangeof(struct pstats, pstat_startzero, pstat_endzero)); 958 bcopy(&src->pstat_startcopy, &dst->pstat_startcopy, 959 __rangeof(struct pstats, pstat_startcopy, pstat_endcopy)); 960 } 961 962 void 963 pstats_free(struct pstats *ps) 964 { 965 966 free(ps, M_SUBPROC); 967 } 968 969 /* 970 * Locate a zombie process by number 971 */ 972 struct proc * 973 zpfind(pid_t pid) 974 { 975 struct proc *p; 976 977 sx_slock(&allproc_lock); 978 LIST_FOREACH(p, &zombproc, p_list) 979 if (p->p_pid == pid) { 980 PROC_LOCK(p); 981 break; 982 } 983 sx_sunlock(&allproc_lock); 984 return (p); 985 } 986 987 #define KERN_PROC_ZOMBMASK 0x3 988 #define KERN_PROC_NOTHREADS 0x4 989 990 #ifdef COMPAT_FREEBSD32 991 992 /* 993 * This function is typically used to copy out the kernel address, so 994 * it can be replaced by assignment of zero. 995 */ 996 static inline uint32_t 997 ptr32_trim(void *ptr) 998 { 999 uintptr_t uptr; 1000 1001 uptr = (uintptr_t)ptr; 1002 return ((uptr > UINT_MAX) ? 0 : uptr); 1003 } 1004 1005 #define PTRTRIM_CP(src,dst,fld) \ 1006 do { (dst).fld = ptr32_trim((src).fld); } while (0) 1007 1008 static void 1009 freebsd32_kinfo_proc_out(const struct kinfo_proc *ki, struct kinfo_proc32 *ki32) 1010 { 1011 int i; 1012 1013 bzero(ki32, sizeof(struct kinfo_proc32)); 1014 ki32->ki_structsize = sizeof(struct kinfo_proc32); 1015 CP(*ki, *ki32, ki_layout); 1016 PTRTRIM_CP(*ki, *ki32, ki_args); 1017 PTRTRIM_CP(*ki, *ki32, ki_paddr); 1018 PTRTRIM_CP(*ki, *ki32, ki_addr); 1019 PTRTRIM_CP(*ki, *ki32, ki_tracep); 1020 PTRTRIM_CP(*ki, *ki32, ki_textvp); 1021 PTRTRIM_CP(*ki, *ki32, ki_fd); 1022 PTRTRIM_CP(*ki, *ki32, ki_vmspace); 1023 PTRTRIM_CP(*ki, *ki32, ki_wchan); 1024 CP(*ki, *ki32, ki_pid); 1025 CP(*ki, *ki32, ki_ppid); 1026 CP(*ki, *ki32, ki_pgid); 1027 CP(*ki, *ki32, ki_tpgid); 1028 CP(*ki, *ki32, ki_sid); 1029 CP(*ki, *ki32, ki_tsid); 1030 CP(*ki, *ki32, ki_jobc); 1031 CP(*ki, *ki32, ki_tdev); 1032 CP(*ki, *ki32, ki_siglist); 1033 CP(*ki, *ki32, ki_sigmask); 1034 CP(*ki, *ki32, ki_sigignore); 1035 CP(*ki, *ki32, ki_sigcatch); 1036 CP(*ki, *ki32, ki_uid); 1037 CP(*ki, *ki32, ki_ruid); 1038 CP(*ki, *ki32, ki_svuid); 1039 CP(*ki, *ki32, ki_rgid); 1040 CP(*ki, *ki32, ki_svgid); 1041 CP(*ki, *ki32, ki_ngroups); 1042 for (i = 0; i < KI_NGROUPS; i++) 1043 CP(*ki, *ki32, ki_groups[i]); 1044 CP(*ki, *ki32, ki_size); 1045 CP(*ki, *ki32, ki_rssize); 1046 CP(*ki, *ki32, ki_swrss); 1047 CP(*ki, *ki32, ki_tsize); 1048 CP(*ki, *ki32, ki_dsize); 1049 CP(*ki, *ki32, ki_ssize); 1050 CP(*ki, *ki32, ki_xstat); 1051 CP(*ki, *ki32, ki_acflag); 1052 CP(*ki, *ki32, ki_pctcpu); 1053 CP(*ki, *ki32, ki_estcpu); 1054 CP(*ki, *ki32, ki_slptime); 1055 CP(*ki, *ki32, ki_swtime); 1056 CP(*ki, *ki32, ki_runtime); 1057 TV_CP(*ki, *ki32, ki_start); 1058 TV_CP(*ki, *ki32, ki_childtime); 1059 CP(*ki, *ki32, ki_flag); 1060 CP(*ki, *ki32, ki_kiflag); 1061 CP(*ki, *ki32, ki_traceflag); 1062 CP(*ki, *ki32, ki_stat); 1063 CP(*ki, *ki32, ki_nice); 1064 CP(*ki, *ki32, ki_lock); 1065 CP(*ki, *ki32, ki_rqindex); 1066 CP(*ki, *ki32, ki_oncpu); 1067 CP(*ki, *ki32, ki_lastcpu); 1068 bcopy(ki->ki_tdname, ki32->ki_tdname, TDNAMLEN + 1); 1069 bcopy(ki->ki_wmesg, ki32->ki_wmesg, WMESGLEN + 1); 1070 bcopy(ki->ki_login, ki32->ki_login, LOGNAMELEN + 1); 1071 bcopy(ki->ki_lockname, ki32->ki_lockname, LOCKNAMELEN + 1); 1072 bcopy(ki->ki_comm, ki32->ki_comm, COMMLEN + 1); 1073 bcopy(ki->ki_emul, ki32->ki_emul, KI_EMULNAMELEN + 1); 1074 bcopy(ki->ki_loginclass, ki32->ki_loginclass, LOGINCLASSLEN + 1); 1075 CP(*ki, *ki32, ki_cr_flags); 1076 CP(*ki, *ki32, ki_jid); 1077 CP(*ki, *ki32, ki_numthreads); 1078 CP(*ki, *ki32, ki_tid); 1079 CP(*ki, *ki32, ki_pri); 1080 freebsd32_rusage_out(&ki->ki_rusage, &ki32->ki_rusage); 1081 freebsd32_rusage_out(&ki->ki_rusage_ch, &ki32->ki_rusage_ch); 1082 PTRTRIM_CP(*ki, *ki32, ki_pcb); 1083 PTRTRIM_CP(*ki, *ki32, ki_kstack); 1084 PTRTRIM_CP(*ki, *ki32, ki_udata); 1085 CP(*ki, *ki32, ki_sflag); 1086 CP(*ki, *ki32, ki_tdflags); 1087 } 1088 1089 static int 1090 sysctl_out_proc_copyout(struct kinfo_proc *ki, struct sysctl_req *req) 1091 { 1092 struct kinfo_proc32 ki32; 1093 int error; 1094 1095 if (req->flags & SCTL_MASK32) { 1096 freebsd32_kinfo_proc_out(ki, &ki32); 1097 error = SYSCTL_OUT(req, &ki32, sizeof(struct kinfo_proc32)); 1098 } else 1099 error = SYSCTL_OUT(req, ki, sizeof(struct kinfo_proc)); 1100 return (error); 1101 } 1102 #else 1103 static int 1104 sysctl_out_proc_copyout(struct kinfo_proc *ki, struct sysctl_req *req) 1105 { 1106 1107 return (SYSCTL_OUT(req, ki, sizeof(struct kinfo_proc))); 1108 } 1109 #endif 1110 1111 /* 1112 * Must be called with the process locked and will return with it unlocked. 1113 */ 1114 static int 1115 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags) 1116 { 1117 struct thread *td; 1118 struct kinfo_proc kinfo_proc; 1119 int error = 0; 1120 struct proc *np; 1121 pid_t pid = p->p_pid; 1122 1123 PROC_LOCK_ASSERT(p, MA_OWNED); 1124 MPASS(FIRST_THREAD_IN_PROC(p) != NULL); 1125 1126 fill_kinfo_proc(p, &kinfo_proc); 1127 if (flags & KERN_PROC_NOTHREADS) 1128 error = sysctl_out_proc_copyout(&kinfo_proc, req); 1129 else { 1130 FOREACH_THREAD_IN_PROC(p, td) { 1131 fill_kinfo_thread(td, &kinfo_proc, 1); 1132 error = sysctl_out_proc_copyout(&kinfo_proc, req); 1133 if (error) 1134 break; 1135 } 1136 } 1137 PROC_UNLOCK(p); 1138 if (error) 1139 return (error); 1140 if (flags & KERN_PROC_ZOMBMASK) 1141 np = zpfind(pid); 1142 else { 1143 if (pid == 0) 1144 return (0); 1145 np = pfind(pid); 1146 } 1147 if (np == NULL) 1148 return (ESRCH); 1149 if (np != p) { 1150 PROC_UNLOCK(np); 1151 return (ESRCH); 1152 } 1153 PROC_UNLOCK(np); 1154 return (0); 1155 } 1156 1157 static int 1158 sysctl_kern_proc(SYSCTL_HANDLER_ARGS) 1159 { 1160 int *name = (int*) arg1; 1161 u_int namelen = arg2; 1162 struct proc *p; 1163 int flags, doingzomb, oid_number; 1164 int error = 0; 1165 1166 oid_number = oidp->oid_number; 1167 if (oid_number != KERN_PROC_ALL && 1168 (oid_number & KERN_PROC_INC_THREAD) == 0) 1169 flags = KERN_PROC_NOTHREADS; 1170 else { 1171 flags = 0; 1172 oid_number &= ~KERN_PROC_INC_THREAD; 1173 } 1174 if (oid_number == KERN_PROC_PID) { 1175 if (namelen != 1) 1176 return (EINVAL); 1177 error = sysctl_wire_old_buffer(req, 0); 1178 if (error) 1179 return (error); 1180 p = pfind((pid_t)name[0]); 1181 if (!p) 1182 return (ESRCH); 1183 if ((error = p_cansee(curthread, p))) { 1184 PROC_UNLOCK(p); 1185 return (error); 1186 } 1187 error = sysctl_out_proc(p, req, flags); 1188 return (error); 1189 } 1190 1191 switch (oid_number) { 1192 case KERN_PROC_ALL: 1193 if (namelen != 0) 1194 return (EINVAL); 1195 break; 1196 case KERN_PROC_PROC: 1197 if (namelen != 0 && namelen != 1) 1198 return (EINVAL); 1199 break; 1200 default: 1201 if (namelen != 1) 1202 return (EINVAL); 1203 break; 1204 } 1205 1206 if (!req->oldptr) { 1207 /* overestimate by 5 procs */ 1208 error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5); 1209 if (error) 1210 return (error); 1211 } 1212 error = sysctl_wire_old_buffer(req, 0); 1213 if (error != 0) 1214 return (error); 1215 sx_slock(&allproc_lock); 1216 for (doingzomb=0 ; doingzomb < 2 ; doingzomb++) { 1217 if (!doingzomb) 1218 p = LIST_FIRST(&allproc); 1219 else 1220 p = LIST_FIRST(&zombproc); 1221 for (; p != 0; p = LIST_NEXT(p, p_list)) { 1222 /* 1223 * Skip embryonic processes. 1224 */ 1225 PROC_LOCK(p); 1226 if (p->p_state == PRS_NEW) { 1227 PROC_UNLOCK(p); 1228 continue; 1229 } 1230 KASSERT(p->p_ucred != NULL, 1231 ("process credential is NULL for non-NEW proc")); 1232 /* 1233 * Show a user only appropriate processes. 1234 */ 1235 if (p_cansee(curthread, p)) { 1236 PROC_UNLOCK(p); 1237 continue; 1238 } 1239 /* 1240 * TODO - make more efficient (see notes below). 1241 * do by session. 1242 */ 1243 switch (oid_number) { 1244 1245 case KERN_PROC_GID: 1246 if (p->p_ucred->cr_gid != (gid_t)name[0]) { 1247 PROC_UNLOCK(p); 1248 continue; 1249 } 1250 break; 1251 1252 case KERN_PROC_PGRP: 1253 /* could do this by traversing pgrp */ 1254 if (p->p_pgrp == NULL || 1255 p->p_pgrp->pg_id != (pid_t)name[0]) { 1256 PROC_UNLOCK(p); 1257 continue; 1258 } 1259 break; 1260 1261 case KERN_PROC_RGID: 1262 if (p->p_ucred->cr_rgid != (gid_t)name[0]) { 1263 PROC_UNLOCK(p); 1264 continue; 1265 } 1266 break; 1267 1268 case KERN_PROC_SESSION: 1269 if (p->p_session == NULL || 1270 p->p_session->s_sid != (pid_t)name[0]) { 1271 PROC_UNLOCK(p); 1272 continue; 1273 } 1274 break; 1275 1276 case KERN_PROC_TTY: 1277 if ((p->p_flag & P_CONTROLT) == 0 || 1278 p->p_session == NULL) { 1279 PROC_UNLOCK(p); 1280 continue; 1281 } 1282 /* XXX proctree_lock */ 1283 SESS_LOCK(p->p_session); 1284 if (p->p_session->s_ttyp == NULL || 1285 tty_udev(p->p_session->s_ttyp) != 1286 (dev_t)name[0]) { 1287 SESS_UNLOCK(p->p_session); 1288 PROC_UNLOCK(p); 1289 continue; 1290 } 1291 SESS_UNLOCK(p->p_session); 1292 break; 1293 1294 case KERN_PROC_UID: 1295 if (p->p_ucred->cr_uid != (uid_t)name[0]) { 1296 PROC_UNLOCK(p); 1297 continue; 1298 } 1299 break; 1300 1301 case KERN_PROC_RUID: 1302 if (p->p_ucred->cr_ruid != (uid_t)name[0]) { 1303 PROC_UNLOCK(p); 1304 continue; 1305 } 1306 break; 1307 1308 case KERN_PROC_PROC: 1309 break; 1310 1311 default: 1312 break; 1313 1314 } 1315 1316 error = sysctl_out_proc(p, req, flags | doingzomb); 1317 if (error) { 1318 sx_sunlock(&allproc_lock); 1319 return (error); 1320 } 1321 } 1322 } 1323 sx_sunlock(&allproc_lock); 1324 return (0); 1325 } 1326 1327 struct pargs * 1328 pargs_alloc(int len) 1329 { 1330 struct pargs *pa; 1331 1332 pa = malloc(sizeof(struct pargs) + len, M_PARGS, 1333 M_WAITOK); 1334 refcount_init(&pa->ar_ref, 1); 1335 pa->ar_length = len; 1336 return (pa); 1337 } 1338 1339 static void 1340 pargs_free(struct pargs *pa) 1341 { 1342 1343 free(pa, M_PARGS); 1344 } 1345 1346 void 1347 pargs_hold(struct pargs *pa) 1348 { 1349 1350 if (pa == NULL) 1351 return; 1352 refcount_acquire(&pa->ar_ref); 1353 } 1354 1355 void 1356 pargs_drop(struct pargs *pa) 1357 { 1358 1359 if (pa == NULL) 1360 return; 1361 if (refcount_release(&pa->ar_ref)) 1362 pargs_free(pa); 1363 } 1364 1365 static int 1366 proc_read_mem(struct thread *td, struct proc *p, vm_offset_t offset, void* buf, 1367 size_t len) 1368 { 1369 struct iovec iov; 1370 struct uio uio; 1371 1372 iov.iov_base = (caddr_t)buf; 1373 iov.iov_len = len; 1374 uio.uio_iov = &iov; 1375 uio.uio_iovcnt = 1; 1376 uio.uio_offset = offset; 1377 uio.uio_resid = (ssize_t)len; 1378 uio.uio_segflg = UIO_SYSSPACE; 1379 uio.uio_rw = UIO_READ; 1380 uio.uio_td = td; 1381 1382 return (proc_rwmem(p, &uio)); 1383 } 1384 1385 static int 1386 proc_read_string(struct thread *td, struct proc *p, const char *sptr, char *buf, 1387 size_t len) 1388 { 1389 size_t i; 1390 int error; 1391 1392 error = proc_read_mem(td, p, (vm_offset_t)sptr, buf, len); 1393 /* 1394 * Reading the chunk may validly return EFAULT if the string is shorter 1395 * than the chunk and is aligned at the end of the page, assuming the 1396 * next page is not mapped. So if EFAULT is returned do a fallback to 1397 * one byte read loop. 1398 */ 1399 if (error == EFAULT) { 1400 for (i = 0; i < len; i++, buf++, sptr++) { 1401 error = proc_read_mem(td, p, (vm_offset_t)sptr, buf, 1); 1402 if (error != 0) 1403 return (error); 1404 if (*buf == '\0') 1405 break; 1406 } 1407 error = 0; 1408 } 1409 return (error); 1410 } 1411 1412 #define PROC_AUXV_MAX 256 /* Safety limit on auxv size. */ 1413 1414 enum proc_vector_type { 1415 PROC_ARG, 1416 PROC_ENV, 1417 PROC_AUX, 1418 }; 1419 1420 #ifdef COMPAT_FREEBSD32 1421 static int 1422 get_proc_vector32(struct thread *td, struct proc *p, char ***proc_vectorp, 1423 size_t *vsizep, enum proc_vector_type type) 1424 { 1425 struct freebsd32_ps_strings pss; 1426 Elf32_Auxinfo aux; 1427 vm_offset_t vptr, ptr; 1428 uint32_t *proc_vector32; 1429 char **proc_vector; 1430 size_t vsize, size; 1431 int i, error; 1432 1433 error = proc_read_mem(td, p, (vm_offset_t)(p->p_sysent->sv_psstrings), 1434 &pss, sizeof(pss)); 1435 if (error != 0) 1436 return (error); 1437 switch (type) { 1438 case PROC_ARG: 1439 vptr = (vm_offset_t)PTRIN(pss.ps_argvstr); 1440 vsize = pss.ps_nargvstr; 1441 if (vsize > ARG_MAX) 1442 return (ENOEXEC); 1443 size = vsize * sizeof(int32_t); 1444 break; 1445 case PROC_ENV: 1446 vptr = (vm_offset_t)PTRIN(pss.ps_envstr); 1447 vsize = pss.ps_nenvstr; 1448 if (vsize > ARG_MAX) 1449 return (ENOEXEC); 1450 size = vsize * sizeof(int32_t); 1451 break; 1452 case PROC_AUX: 1453 vptr = (vm_offset_t)PTRIN(pss.ps_envstr) + 1454 (pss.ps_nenvstr + 1) * sizeof(int32_t); 1455 if (vptr % 4 != 0) 1456 return (ENOEXEC); 1457 for (ptr = vptr, i = 0; i < PROC_AUXV_MAX; i++) { 1458 error = proc_read_mem(td, p, ptr, &aux, sizeof(aux)); 1459 if (error != 0) 1460 return (error); 1461 if (aux.a_type == AT_NULL) 1462 break; 1463 ptr += sizeof(aux); 1464 } 1465 if (aux.a_type != AT_NULL) 1466 return (ENOEXEC); 1467 vsize = i + 1; 1468 size = vsize * sizeof(aux); 1469 break; 1470 default: 1471 KASSERT(0, ("Wrong proc vector type: %d", type)); 1472 return (EINVAL); 1473 } 1474 proc_vector32 = malloc(size, M_TEMP, M_WAITOK); 1475 error = proc_read_mem(td, p, vptr, proc_vector32, size); 1476 if (error != 0) 1477 goto done; 1478 if (type == PROC_AUX) { 1479 *proc_vectorp = (char **)proc_vector32; 1480 *vsizep = vsize; 1481 return (0); 1482 } 1483 proc_vector = malloc(vsize * sizeof(char *), M_TEMP, M_WAITOK); 1484 for (i = 0; i < (int)vsize; i++) 1485 proc_vector[i] = PTRIN(proc_vector32[i]); 1486 *proc_vectorp = proc_vector; 1487 *vsizep = vsize; 1488 done: 1489 free(proc_vector32, M_TEMP); 1490 return (error); 1491 } 1492 #endif 1493 1494 static int 1495 get_proc_vector(struct thread *td, struct proc *p, char ***proc_vectorp, 1496 size_t *vsizep, enum proc_vector_type type) 1497 { 1498 struct ps_strings pss; 1499 Elf_Auxinfo aux; 1500 vm_offset_t vptr, ptr; 1501 char **proc_vector; 1502 size_t vsize, size; 1503 int error, i; 1504 1505 #ifdef COMPAT_FREEBSD32 1506 if (SV_PROC_FLAG(p, SV_ILP32) != 0) 1507 return (get_proc_vector32(td, p, proc_vectorp, vsizep, type)); 1508 #endif 1509 error = proc_read_mem(td, p, (vm_offset_t)(p->p_sysent->sv_psstrings), 1510 &pss, sizeof(pss)); 1511 if (error != 0) 1512 return (error); 1513 switch (type) { 1514 case PROC_ARG: 1515 vptr = (vm_offset_t)pss.ps_argvstr; 1516 vsize = pss.ps_nargvstr; 1517 if (vsize > ARG_MAX) 1518 return (ENOEXEC); 1519 size = vsize * sizeof(char *); 1520 break; 1521 case PROC_ENV: 1522 vptr = (vm_offset_t)pss.ps_envstr; 1523 vsize = pss.ps_nenvstr; 1524 if (vsize > ARG_MAX) 1525 return (ENOEXEC); 1526 size = vsize * sizeof(char *); 1527 break; 1528 case PROC_AUX: 1529 /* 1530 * The aux array is just above env array on the stack. Check 1531 * that the address is naturally aligned. 1532 */ 1533 vptr = (vm_offset_t)pss.ps_envstr + (pss.ps_nenvstr + 1) 1534 * sizeof(char *); 1535 #if __ELF_WORD_SIZE == 64 1536 if (vptr % sizeof(uint64_t) != 0) 1537 #else 1538 if (vptr % sizeof(uint32_t) != 0) 1539 #endif 1540 return (ENOEXEC); 1541 /* 1542 * We count the array size reading the aux vectors from the 1543 * stack until AT_NULL vector is returned. So (to keep the code 1544 * simple) we read the process stack twice: the first time here 1545 * to find the size and the second time when copying the vectors 1546 * to the allocated proc_vector. 1547 */ 1548 for (ptr = vptr, i = 0; i < PROC_AUXV_MAX; i++) { 1549 error = proc_read_mem(td, p, ptr, &aux, sizeof(aux)); 1550 if (error != 0) 1551 return (error); 1552 if (aux.a_type == AT_NULL) 1553 break; 1554 ptr += sizeof(aux); 1555 } 1556 /* 1557 * If the PROC_AUXV_MAX entries are iterated over, and we have 1558 * not reached AT_NULL, it is most likely we are reading wrong 1559 * data: either the process doesn't have auxv array or data has 1560 * been modified. Return the error in this case. 1561 */ 1562 if (aux.a_type != AT_NULL) 1563 return (ENOEXEC); 1564 vsize = i + 1; 1565 size = vsize * sizeof(aux); 1566 break; 1567 default: 1568 KASSERT(0, ("Wrong proc vector type: %d", type)); 1569 return (EINVAL); /* In case we are built without INVARIANTS. */ 1570 } 1571 proc_vector = malloc(size, M_TEMP, M_WAITOK); 1572 if (proc_vector == NULL) 1573 return (ENOMEM); 1574 error = proc_read_mem(td, p, vptr, proc_vector, size); 1575 if (error != 0) { 1576 free(proc_vector, M_TEMP); 1577 return (error); 1578 } 1579 *proc_vectorp = proc_vector; 1580 *vsizep = vsize; 1581 1582 return (0); 1583 } 1584 1585 #define GET_PS_STRINGS_CHUNK_SZ 256 /* Chunk size (bytes) for ps_strings operations. */ 1586 1587 static int 1588 get_ps_strings(struct thread *td, struct proc *p, struct sbuf *sb, 1589 enum proc_vector_type type, size_t nchr) 1590 { 1591 size_t done, len, vsize; 1592 int error, i; 1593 char **proc_vector, *sptr; 1594 char pss_string[GET_PS_STRINGS_CHUNK_SZ]; 1595 1596 PROC_ASSERT_HELD(p); 1597 1598 /* 1599 * We are not going to read more than 2 * (PATH_MAX + ARG_MAX) bytes. 1600 */ 1601 if (nchr > 2 * (PATH_MAX + ARG_MAX)) 1602 nchr = 2 * (PATH_MAX + ARG_MAX); 1603 1604 error = get_proc_vector(td, p, &proc_vector, &vsize, type); 1605 if (error != 0) 1606 return (error); 1607 for (done = 0, i = 0; i < (int)vsize && done < nchr; i++) { 1608 /* 1609 * The program may have scribbled into its argv array, e.g. to 1610 * remove some arguments. If that has happened, break out 1611 * before trying to read from NULL. 1612 */ 1613 if (proc_vector[i] == NULL) 1614 break; 1615 for (sptr = proc_vector[i]; ; sptr += GET_PS_STRINGS_CHUNK_SZ) { 1616 error = proc_read_string(td, p, sptr, pss_string, 1617 sizeof(pss_string)); 1618 if (error != 0) 1619 goto done; 1620 len = strnlen(pss_string, GET_PS_STRINGS_CHUNK_SZ); 1621 if (done + len >= nchr) 1622 len = nchr - done - 1; 1623 sbuf_bcat(sb, pss_string, len); 1624 if (len != GET_PS_STRINGS_CHUNK_SZ) 1625 break; 1626 done += GET_PS_STRINGS_CHUNK_SZ; 1627 } 1628 sbuf_bcat(sb, "", 1); 1629 done += len + 1; 1630 } 1631 done: 1632 free(proc_vector, M_TEMP); 1633 return (error); 1634 } 1635 1636 int 1637 proc_getargv(struct thread *td, struct proc *p, struct sbuf *sb, size_t nchr) 1638 { 1639 1640 return (get_ps_strings(curthread, p, sb, PROC_ARG, nchr)); 1641 } 1642 1643 int 1644 proc_getenvv(struct thread *td, struct proc *p, struct sbuf *sb, size_t nchr) 1645 { 1646 1647 return (get_ps_strings(curthread, p, sb, PROC_ENV, nchr)); 1648 } 1649 1650 /* 1651 * This sysctl allows a process to retrieve the argument list or process 1652 * title for another process without groping around in the address space 1653 * of the other process. It also allow a process to set its own "process 1654 * title to a string of its own choice. 1655 */ 1656 static int 1657 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS) 1658 { 1659 int *name = (int*) arg1; 1660 u_int namelen = arg2; 1661 struct pargs *newpa, *pa; 1662 struct proc *p; 1663 struct sbuf sb; 1664 int error = 0, error2; 1665 1666 if (namelen != 1) 1667 return (EINVAL); 1668 1669 p = pfind((pid_t)name[0]); 1670 if (!p) 1671 return (ESRCH); 1672 1673 if ((error = p_cansee(curthread, p)) != 0) { 1674 PROC_UNLOCK(p); 1675 return (error); 1676 } 1677 1678 if (req->newptr && curproc != p) { 1679 PROC_UNLOCK(p); 1680 return (EPERM); 1681 } 1682 1683 pa = p->p_args; 1684 if (pa != NULL) { 1685 pargs_hold(pa); 1686 PROC_UNLOCK(p); 1687 error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length); 1688 pargs_drop(pa); 1689 } else if ((p->p_flag & (P_WEXIT | P_SYSTEM)) == 0) { 1690 _PHOLD(p); 1691 PROC_UNLOCK(p); 1692 sbuf_new_for_sysctl(&sb, NULL, GET_PS_STRINGS_CHUNK_SZ, req); 1693 error = proc_getargv(curthread, p, &sb, req->oldlen); 1694 error2 = sbuf_finish(&sb); 1695 PRELE(p); 1696 sbuf_delete(&sb); 1697 if (error == 0 && error2 != 0) 1698 error = error2; 1699 } else { 1700 PROC_UNLOCK(p); 1701 } 1702 if (error != 0 || req->newptr == NULL) 1703 return (error); 1704 1705 if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit) 1706 return (ENOMEM); 1707 newpa = pargs_alloc(req->newlen); 1708 error = SYSCTL_IN(req, newpa->ar_args, req->newlen); 1709 if (error != 0) { 1710 pargs_free(newpa); 1711 return (error); 1712 } 1713 PROC_LOCK(p); 1714 pa = p->p_args; 1715 p->p_args = newpa; 1716 PROC_UNLOCK(p); 1717 pargs_drop(pa); 1718 return (0); 1719 } 1720 1721 /* 1722 * This sysctl allows a process to retrieve environment of another process. 1723 */ 1724 static int 1725 sysctl_kern_proc_env(SYSCTL_HANDLER_ARGS) 1726 { 1727 int *name = (int*) arg1; 1728 u_int namelen = arg2; 1729 struct proc *p; 1730 struct sbuf sb; 1731 int error, error2; 1732 1733 if (namelen != 1) 1734 return (EINVAL); 1735 1736 p = pfind((pid_t)name[0]); 1737 if (p == NULL) 1738 return (ESRCH); 1739 if ((p->p_flag & P_WEXIT) != 0) { 1740 PROC_UNLOCK(p); 1741 return (ESRCH); 1742 } 1743 if ((error = p_candebug(curthread, p)) != 0) { 1744 PROC_UNLOCK(p); 1745 return (error); 1746 } 1747 if ((p->p_flag & P_SYSTEM) != 0) { 1748 PROC_UNLOCK(p); 1749 return (0); 1750 } 1751 _PHOLD(p); 1752 PROC_UNLOCK(p); 1753 sbuf_new_for_sysctl(&sb, NULL, GET_PS_STRINGS_CHUNK_SZ, req); 1754 error = proc_getenvv(curthread, p, &sb, req->oldlen); 1755 error2 = sbuf_finish(&sb); 1756 PRELE(p); 1757 sbuf_delete(&sb); 1758 return (error != 0 ? error : error2); 1759 } 1760 1761 /* 1762 * This sysctl allows a process to retrieve ELF auxiliary vector of 1763 * another process. 1764 */ 1765 static int 1766 sysctl_kern_proc_auxv(SYSCTL_HANDLER_ARGS) 1767 { 1768 int *name = (int*) arg1; 1769 u_int namelen = arg2; 1770 struct proc *p; 1771 size_t vsize, size; 1772 char **auxv; 1773 int error; 1774 1775 if (namelen != 1) 1776 return (EINVAL); 1777 1778 p = pfind((pid_t)name[0]); 1779 if (p == NULL) 1780 return (ESRCH); 1781 if (p->p_flag & P_WEXIT) { 1782 PROC_UNLOCK(p); 1783 return (ESRCH); 1784 } 1785 error = p_candebug(curthread, p); 1786 if (error != 0) { 1787 PROC_UNLOCK(p); 1788 return (error); 1789 } 1790 if ((p->p_flag & P_SYSTEM) != 0) { 1791 PROC_UNLOCK(p); 1792 return (0); 1793 } 1794 _PHOLD(p); 1795 PROC_UNLOCK(p); 1796 error = get_proc_vector(curthread, p, &auxv, &vsize, PROC_AUX); 1797 if (error == 0) { 1798 #ifdef COMPAT_FREEBSD32 1799 if (SV_PROC_FLAG(p, SV_ILP32) != 0) 1800 size = vsize * sizeof(Elf32_Auxinfo); 1801 else 1802 #endif 1803 size = vsize * sizeof(Elf_Auxinfo); 1804 PRELE(p); 1805 error = SYSCTL_OUT(req, auxv, size); 1806 free(auxv, M_TEMP); 1807 } else { 1808 PRELE(p); 1809 } 1810 return (error); 1811 } 1812 1813 /* 1814 * This sysctl allows a process to retrieve the path of the executable for 1815 * itself or another process. 1816 */ 1817 static int 1818 sysctl_kern_proc_pathname(SYSCTL_HANDLER_ARGS) 1819 { 1820 pid_t *pidp = (pid_t *)arg1; 1821 unsigned int arglen = arg2; 1822 struct proc *p; 1823 struct vnode *vp; 1824 char *retbuf, *freebuf; 1825 int error, vfslocked; 1826 1827 if (arglen != 1) 1828 return (EINVAL); 1829 if (*pidp == -1) { /* -1 means this process */ 1830 p = req->td->td_proc; 1831 } else { 1832 p = pfind(*pidp); 1833 if (p == NULL) 1834 return (ESRCH); 1835 if ((error = p_cansee(curthread, p)) != 0) { 1836 PROC_UNLOCK(p); 1837 return (error); 1838 } 1839 } 1840 1841 vp = p->p_textvp; 1842 if (vp == NULL) { 1843 if (*pidp != -1) 1844 PROC_UNLOCK(p); 1845 return (0); 1846 } 1847 vref(vp); 1848 if (*pidp != -1) 1849 PROC_UNLOCK(p); 1850 error = vn_fullpath(req->td, vp, &retbuf, &freebuf); 1851 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 1852 vrele(vp); 1853 VFS_UNLOCK_GIANT(vfslocked); 1854 if (error) 1855 return (error); 1856 error = SYSCTL_OUT(req, retbuf, strlen(retbuf) + 1); 1857 free(freebuf, M_TEMP); 1858 return (error); 1859 } 1860 1861 static int 1862 sysctl_kern_proc_sv_name(SYSCTL_HANDLER_ARGS) 1863 { 1864 struct proc *p; 1865 char *sv_name; 1866 int *name; 1867 int namelen; 1868 int error; 1869 1870 namelen = arg2; 1871 if (namelen != 1) 1872 return (EINVAL); 1873 1874 name = (int *)arg1; 1875 if ((p = pfind((pid_t)name[0])) == NULL) 1876 return (ESRCH); 1877 if ((error = p_cansee(curthread, p))) { 1878 PROC_UNLOCK(p); 1879 return (error); 1880 } 1881 sv_name = p->p_sysent->sv_name; 1882 PROC_UNLOCK(p); 1883 return (sysctl_handle_string(oidp, sv_name, 0, req)); 1884 } 1885 1886 #ifdef KINFO_OVMENTRY_SIZE 1887 CTASSERT(sizeof(struct kinfo_ovmentry) == KINFO_OVMENTRY_SIZE); 1888 #endif 1889 1890 #ifdef COMPAT_FREEBSD7 1891 static int 1892 sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS) 1893 { 1894 vm_map_entry_t entry, tmp_entry; 1895 unsigned int last_timestamp; 1896 char *fullpath, *freepath; 1897 struct kinfo_ovmentry *kve; 1898 struct vattr va; 1899 struct ucred *cred; 1900 int error, *name; 1901 struct vnode *vp; 1902 struct proc *p; 1903 vm_map_t map; 1904 struct vmspace *vm; 1905 1906 name = (int *)arg1; 1907 if ((p = pfind((pid_t)name[0])) == NULL) 1908 return (ESRCH); 1909 if (p->p_flag & P_WEXIT) { 1910 PROC_UNLOCK(p); 1911 return (ESRCH); 1912 } 1913 if ((error = p_candebug(curthread, p))) { 1914 PROC_UNLOCK(p); 1915 return (error); 1916 } 1917 _PHOLD(p); 1918 PROC_UNLOCK(p); 1919 vm = vmspace_acquire_ref(p); 1920 if (vm == NULL) { 1921 PRELE(p); 1922 return (ESRCH); 1923 } 1924 kve = malloc(sizeof(*kve), M_TEMP, M_WAITOK); 1925 1926 map = &vm->vm_map; 1927 vm_map_lock_read(map); 1928 for (entry = map->header.next; entry != &map->header; 1929 entry = entry->next) { 1930 vm_object_t obj, tobj, lobj; 1931 vm_offset_t addr; 1932 int vfslocked; 1933 1934 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 1935 continue; 1936 1937 bzero(kve, sizeof(*kve)); 1938 kve->kve_structsize = sizeof(*kve); 1939 1940 kve->kve_private_resident = 0; 1941 obj = entry->object.vm_object; 1942 if (obj != NULL) { 1943 VM_OBJECT_LOCK(obj); 1944 if (obj->shadow_count == 1) 1945 kve->kve_private_resident = 1946 obj->resident_page_count; 1947 } 1948 kve->kve_resident = 0; 1949 addr = entry->start; 1950 while (addr < entry->end) { 1951 if (pmap_extract(map->pmap, addr)) 1952 kve->kve_resident++; 1953 addr += PAGE_SIZE; 1954 } 1955 1956 for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) { 1957 if (tobj != obj) 1958 VM_OBJECT_LOCK(tobj); 1959 if (lobj != obj) 1960 VM_OBJECT_UNLOCK(lobj); 1961 lobj = tobj; 1962 } 1963 1964 kve->kve_start = (void*)entry->start; 1965 kve->kve_end = (void*)entry->end; 1966 kve->kve_offset = (off_t)entry->offset; 1967 1968 if (entry->protection & VM_PROT_READ) 1969 kve->kve_protection |= KVME_PROT_READ; 1970 if (entry->protection & VM_PROT_WRITE) 1971 kve->kve_protection |= KVME_PROT_WRITE; 1972 if (entry->protection & VM_PROT_EXECUTE) 1973 kve->kve_protection |= KVME_PROT_EXEC; 1974 1975 if (entry->eflags & MAP_ENTRY_COW) 1976 kve->kve_flags |= KVME_FLAG_COW; 1977 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) 1978 kve->kve_flags |= KVME_FLAG_NEEDS_COPY; 1979 if (entry->eflags & MAP_ENTRY_NOCOREDUMP) 1980 kve->kve_flags |= KVME_FLAG_NOCOREDUMP; 1981 1982 last_timestamp = map->timestamp; 1983 vm_map_unlock_read(map); 1984 1985 kve->kve_fileid = 0; 1986 kve->kve_fsid = 0; 1987 freepath = NULL; 1988 fullpath = ""; 1989 if (lobj) { 1990 vp = NULL; 1991 switch (lobj->type) { 1992 case OBJT_DEFAULT: 1993 kve->kve_type = KVME_TYPE_DEFAULT; 1994 break; 1995 case OBJT_VNODE: 1996 kve->kve_type = KVME_TYPE_VNODE; 1997 vp = lobj->handle; 1998 vref(vp); 1999 break; 2000 case OBJT_SWAP: 2001 kve->kve_type = KVME_TYPE_SWAP; 2002 break; 2003 case OBJT_DEVICE: 2004 kve->kve_type = KVME_TYPE_DEVICE; 2005 break; 2006 case OBJT_PHYS: 2007 kve->kve_type = KVME_TYPE_PHYS; 2008 break; 2009 case OBJT_DEAD: 2010 kve->kve_type = KVME_TYPE_DEAD; 2011 break; 2012 case OBJT_SG: 2013 kve->kve_type = KVME_TYPE_SG; 2014 break; 2015 default: 2016 kve->kve_type = KVME_TYPE_UNKNOWN; 2017 break; 2018 } 2019 if (lobj != obj) 2020 VM_OBJECT_UNLOCK(lobj); 2021 2022 kve->kve_ref_count = obj->ref_count; 2023 kve->kve_shadow_count = obj->shadow_count; 2024 VM_OBJECT_UNLOCK(obj); 2025 if (vp != NULL) { 2026 vn_fullpath(curthread, vp, &fullpath, 2027 &freepath); 2028 cred = curthread->td_ucred; 2029 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 2030 vn_lock(vp, LK_SHARED | LK_RETRY); 2031 if (VOP_GETATTR(vp, &va, cred) == 0) { 2032 kve->kve_fileid = va.va_fileid; 2033 kve->kve_fsid = va.va_fsid; 2034 } 2035 vput(vp); 2036 VFS_UNLOCK_GIANT(vfslocked); 2037 } 2038 } else { 2039 kve->kve_type = KVME_TYPE_NONE; 2040 kve->kve_ref_count = 0; 2041 kve->kve_shadow_count = 0; 2042 } 2043 2044 strlcpy(kve->kve_path, fullpath, sizeof(kve->kve_path)); 2045 if (freepath != NULL) 2046 free(freepath, M_TEMP); 2047 2048 error = SYSCTL_OUT(req, kve, sizeof(*kve)); 2049 vm_map_lock_read(map); 2050 if (error) 2051 break; 2052 if (last_timestamp != map->timestamp) { 2053 vm_map_lookup_entry(map, addr - 1, &tmp_entry); 2054 entry = tmp_entry; 2055 } 2056 } 2057 vm_map_unlock_read(map); 2058 vmspace_free(vm); 2059 PRELE(p); 2060 free(kve, M_TEMP); 2061 return (error); 2062 } 2063 #endif /* COMPAT_FREEBSD7 */ 2064 2065 #ifdef KINFO_VMENTRY_SIZE 2066 CTASSERT(sizeof(struct kinfo_vmentry) == KINFO_VMENTRY_SIZE); 2067 #endif 2068 2069 static int 2070 sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS) 2071 { 2072 vm_map_entry_t entry, tmp_entry; 2073 unsigned int last_timestamp; 2074 char *fullpath, *freepath; 2075 struct kinfo_vmentry *kve; 2076 struct vattr va; 2077 struct ucred *cred; 2078 int error, *name; 2079 struct vnode *vp; 2080 struct proc *p; 2081 struct vmspace *vm; 2082 vm_map_t map; 2083 2084 name = (int *)arg1; 2085 if ((p = pfind((pid_t)name[0])) == NULL) 2086 return (ESRCH); 2087 if (p->p_flag & P_WEXIT) { 2088 PROC_UNLOCK(p); 2089 return (ESRCH); 2090 } 2091 if ((error = p_candebug(curthread, p))) { 2092 PROC_UNLOCK(p); 2093 return (error); 2094 } 2095 _PHOLD(p); 2096 PROC_UNLOCK(p); 2097 vm = vmspace_acquire_ref(p); 2098 if (vm == NULL) { 2099 PRELE(p); 2100 return (ESRCH); 2101 } 2102 kve = malloc(sizeof(*kve), M_TEMP, M_WAITOK); 2103 2104 map = &vm->vm_map; 2105 vm_map_lock_read(map); 2106 for (entry = map->header.next; entry != &map->header; 2107 entry = entry->next) { 2108 vm_object_t obj, tobj, lobj; 2109 vm_offset_t addr; 2110 vm_paddr_t locked_pa; 2111 int vfslocked, mincoreinfo; 2112 2113 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 2114 continue; 2115 2116 bzero(kve, sizeof(*kve)); 2117 2118 kve->kve_private_resident = 0; 2119 obj = entry->object.vm_object; 2120 if (obj != NULL) { 2121 VM_OBJECT_LOCK(obj); 2122 if (obj->shadow_count == 1) 2123 kve->kve_private_resident = 2124 obj->resident_page_count; 2125 } 2126 kve->kve_resident = 0; 2127 addr = entry->start; 2128 while (addr < entry->end) { 2129 locked_pa = 0; 2130 mincoreinfo = pmap_mincore(map->pmap, addr, &locked_pa); 2131 if (locked_pa != 0) 2132 vm_page_unlock(PHYS_TO_VM_PAGE(locked_pa)); 2133 if (mincoreinfo & MINCORE_INCORE) 2134 kve->kve_resident++; 2135 if (mincoreinfo & MINCORE_SUPER) 2136 kve->kve_flags |= KVME_FLAG_SUPER; 2137 addr += PAGE_SIZE; 2138 } 2139 2140 for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) { 2141 if (tobj != obj) 2142 VM_OBJECT_LOCK(tobj); 2143 if (lobj != obj) 2144 VM_OBJECT_UNLOCK(lobj); 2145 lobj = tobj; 2146 } 2147 2148 kve->kve_start = entry->start; 2149 kve->kve_end = entry->end; 2150 kve->kve_offset = entry->offset; 2151 2152 if (entry->protection & VM_PROT_READ) 2153 kve->kve_protection |= KVME_PROT_READ; 2154 if (entry->protection & VM_PROT_WRITE) 2155 kve->kve_protection |= KVME_PROT_WRITE; 2156 if (entry->protection & VM_PROT_EXECUTE) 2157 kve->kve_protection |= KVME_PROT_EXEC; 2158 2159 if (entry->eflags & MAP_ENTRY_COW) 2160 kve->kve_flags |= KVME_FLAG_COW; 2161 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) 2162 kve->kve_flags |= KVME_FLAG_NEEDS_COPY; 2163 if (entry->eflags & MAP_ENTRY_NOCOREDUMP) 2164 kve->kve_flags |= KVME_FLAG_NOCOREDUMP; 2165 2166 last_timestamp = map->timestamp; 2167 vm_map_unlock_read(map); 2168 2169 freepath = NULL; 2170 fullpath = ""; 2171 if (lobj) { 2172 vp = NULL; 2173 switch (lobj->type) { 2174 case OBJT_DEFAULT: 2175 kve->kve_type = KVME_TYPE_DEFAULT; 2176 break; 2177 case OBJT_VNODE: 2178 kve->kve_type = KVME_TYPE_VNODE; 2179 vp = lobj->handle; 2180 vref(vp); 2181 break; 2182 case OBJT_SWAP: 2183 kve->kve_type = KVME_TYPE_SWAP; 2184 break; 2185 case OBJT_DEVICE: 2186 kve->kve_type = KVME_TYPE_DEVICE; 2187 break; 2188 case OBJT_PHYS: 2189 kve->kve_type = KVME_TYPE_PHYS; 2190 break; 2191 case OBJT_DEAD: 2192 kve->kve_type = KVME_TYPE_DEAD; 2193 break; 2194 case OBJT_SG: 2195 kve->kve_type = KVME_TYPE_SG; 2196 break; 2197 default: 2198 kve->kve_type = KVME_TYPE_UNKNOWN; 2199 break; 2200 } 2201 if (lobj != obj) 2202 VM_OBJECT_UNLOCK(lobj); 2203 2204 kve->kve_ref_count = obj->ref_count; 2205 kve->kve_shadow_count = obj->shadow_count; 2206 VM_OBJECT_UNLOCK(obj); 2207 if (vp != NULL) { 2208 vn_fullpath(curthread, vp, &fullpath, 2209 &freepath); 2210 kve->kve_vn_type = vntype_to_kinfo(vp->v_type); 2211 cred = curthread->td_ucred; 2212 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 2213 vn_lock(vp, LK_SHARED | LK_RETRY); 2214 if (VOP_GETATTR(vp, &va, cred) == 0) { 2215 kve->kve_vn_fileid = va.va_fileid; 2216 kve->kve_vn_fsid = va.va_fsid; 2217 kve->kve_vn_mode = 2218 MAKEIMODE(va.va_type, va.va_mode); 2219 kve->kve_vn_size = va.va_size; 2220 kve->kve_vn_rdev = va.va_rdev; 2221 kve->kve_status = KF_ATTR_VALID; 2222 } 2223 vput(vp); 2224 VFS_UNLOCK_GIANT(vfslocked); 2225 } 2226 } else { 2227 kve->kve_type = KVME_TYPE_NONE; 2228 kve->kve_ref_count = 0; 2229 kve->kve_shadow_count = 0; 2230 } 2231 2232 strlcpy(kve->kve_path, fullpath, sizeof(kve->kve_path)); 2233 if (freepath != NULL) 2234 free(freepath, M_TEMP); 2235 2236 /* Pack record size down */ 2237 kve->kve_structsize = offsetof(struct kinfo_vmentry, kve_path) + 2238 strlen(kve->kve_path) + 1; 2239 kve->kve_structsize = roundup(kve->kve_structsize, 2240 sizeof(uint64_t)); 2241 error = SYSCTL_OUT(req, kve, kve->kve_structsize); 2242 vm_map_lock_read(map); 2243 if (error) 2244 break; 2245 if (last_timestamp != map->timestamp) { 2246 vm_map_lookup_entry(map, addr - 1, &tmp_entry); 2247 entry = tmp_entry; 2248 } 2249 } 2250 vm_map_unlock_read(map); 2251 vmspace_free(vm); 2252 PRELE(p); 2253 free(kve, M_TEMP); 2254 return (error); 2255 } 2256 2257 #if defined(STACK) || defined(DDB) 2258 static int 2259 sysctl_kern_proc_kstack(SYSCTL_HANDLER_ARGS) 2260 { 2261 struct kinfo_kstack *kkstp; 2262 int error, i, *name, numthreads; 2263 lwpid_t *lwpidarray; 2264 struct thread *td; 2265 struct stack *st; 2266 struct sbuf sb; 2267 struct proc *p; 2268 2269 name = (int *)arg1; 2270 if ((p = pfind((pid_t)name[0])) == NULL) 2271 return (ESRCH); 2272 /* XXXRW: Not clear ESRCH is the right error during proc execve(). */ 2273 if (p->p_flag & P_WEXIT || p->p_flag & P_INEXEC) { 2274 PROC_UNLOCK(p); 2275 return (ESRCH); 2276 } 2277 if ((error = p_candebug(curthread, p))) { 2278 PROC_UNLOCK(p); 2279 return (error); 2280 } 2281 _PHOLD(p); 2282 PROC_UNLOCK(p); 2283 2284 kkstp = malloc(sizeof(*kkstp), M_TEMP, M_WAITOK); 2285 st = stack_create(); 2286 2287 lwpidarray = NULL; 2288 numthreads = 0; 2289 PROC_LOCK(p); 2290 repeat: 2291 if (numthreads < p->p_numthreads) { 2292 if (lwpidarray != NULL) { 2293 free(lwpidarray, M_TEMP); 2294 lwpidarray = NULL; 2295 } 2296 numthreads = p->p_numthreads; 2297 PROC_UNLOCK(p); 2298 lwpidarray = malloc(sizeof(*lwpidarray) * numthreads, M_TEMP, 2299 M_WAITOK | M_ZERO); 2300 PROC_LOCK(p); 2301 goto repeat; 2302 } 2303 i = 0; 2304 2305 /* 2306 * XXXRW: During the below loop, execve(2) and countless other sorts 2307 * of changes could have taken place. Should we check to see if the 2308 * vmspace has been replaced, or the like, in order to prevent 2309 * giving a snapshot that spans, say, execve(2), with some threads 2310 * before and some after? Among other things, the credentials could 2311 * have changed, in which case the right to extract debug info might 2312 * no longer be assured. 2313 */ 2314 FOREACH_THREAD_IN_PROC(p, td) { 2315 KASSERT(i < numthreads, 2316 ("sysctl_kern_proc_kstack: numthreads")); 2317 lwpidarray[i] = td->td_tid; 2318 i++; 2319 } 2320 numthreads = i; 2321 for (i = 0; i < numthreads; i++) { 2322 td = thread_find(p, lwpidarray[i]); 2323 if (td == NULL) { 2324 continue; 2325 } 2326 bzero(kkstp, sizeof(*kkstp)); 2327 (void)sbuf_new(&sb, kkstp->kkst_trace, 2328 sizeof(kkstp->kkst_trace), SBUF_FIXEDLEN); 2329 thread_lock(td); 2330 kkstp->kkst_tid = td->td_tid; 2331 if (TD_IS_SWAPPED(td)) 2332 kkstp->kkst_state = KKST_STATE_SWAPPED; 2333 else if (TD_IS_RUNNING(td)) 2334 kkstp->kkst_state = KKST_STATE_RUNNING; 2335 else { 2336 kkstp->kkst_state = KKST_STATE_STACKOK; 2337 stack_save_td(st, td); 2338 } 2339 thread_unlock(td); 2340 PROC_UNLOCK(p); 2341 stack_sbuf_print(&sb, st); 2342 sbuf_finish(&sb); 2343 sbuf_delete(&sb); 2344 error = SYSCTL_OUT(req, kkstp, sizeof(*kkstp)); 2345 PROC_LOCK(p); 2346 if (error) 2347 break; 2348 } 2349 _PRELE(p); 2350 PROC_UNLOCK(p); 2351 if (lwpidarray != NULL) 2352 free(lwpidarray, M_TEMP); 2353 stack_destroy(st); 2354 free(kkstp, M_TEMP); 2355 return (error); 2356 } 2357 #endif 2358 2359 /* 2360 * This sysctl allows a process to retrieve the full list of groups from 2361 * itself or another process. 2362 */ 2363 static int 2364 sysctl_kern_proc_groups(SYSCTL_HANDLER_ARGS) 2365 { 2366 pid_t *pidp = (pid_t *)arg1; 2367 unsigned int arglen = arg2; 2368 struct proc *p; 2369 struct ucred *cred; 2370 int error; 2371 2372 if (arglen != 1) 2373 return (EINVAL); 2374 if (*pidp == -1) { /* -1 means this process */ 2375 p = req->td->td_proc; 2376 } else { 2377 p = pfind(*pidp); 2378 if (p == NULL) 2379 return (ESRCH); 2380 if ((error = p_cansee(curthread, p)) != 0) { 2381 PROC_UNLOCK(p); 2382 return (error); 2383 } 2384 } 2385 2386 cred = crhold(p->p_ucred); 2387 if (*pidp != -1) 2388 PROC_UNLOCK(p); 2389 2390 error = SYSCTL_OUT(req, cred->cr_groups, 2391 cred->cr_ngroups * sizeof(gid_t)); 2392 crfree(cred); 2393 return (error); 2394 } 2395 2396 /* 2397 * This sysctl allows a process to retrieve the resource limits for 2398 * another process. 2399 */ 2400 static int 2401 sysctl_kern_proc_rlimit(SYSCTL_HANDLER_ARGS) 2402 { 2403 int *name = (int*) arg1; 2404 u_int namelen = arg2; 2405 struct plimit *limp; 2406 struct proc *p; 2407 int error = 0; 2408 2409 if (namelen != 1) 2410 return (EINVAL); 2411 2412 p = pfind((pid_t)name[0]); 2413 if (p == NULL) 2414 return (ESRCH); 2415 2416 if ((error = p_cansee(curthread, p)) != 0) { 2417 PROC_UNLOCK(p); 2418 return (error); 2419 } 2420 2421 /* 2422 * Check the request size. We alow sizes smaller rlimit array for 2423 * backward binary compatibility: the number of resource limits may 2424 * grow. 2425 */ 2426 if (sizeof(limp->pl_rlimit) < req->oldlen) { 2427 PROC_UNLOCK(p); 2428 return (EINVAL); 2429 } 2430 2431 limp = lim_hold(p->p_limit); 2432 PROC_UNLOCK(p); 2433 error = SYSCTL_OUT(req, limp->pl_rlimit, req->oldlen); 2434 lim_free(limp); 2435 return (error); 2436 } 2437 2438 /* 2439 * This sysctl allows a process to retrieve ps_strings structure location of 2440 * another process. 2441 */ 2442 static int 2443 sysctl_kern_proc_ps_strings(SYSCTL_HANDLER_ARGS) 2444 { 2445 int *name = (int*) arg1; 2446 u_int namelen = arg2; 2447 struct proc *p; 2448 vm_offset_t ps_strings; 2449 int error; 2450 #ifdef COMPAT_FREEBSD32 2451 uint32_t ps_strings32; 2452 #endif 2453 2454 if (namelen != 1) 2455 return (EINVAL); 2456 2457 p = pfind((pid_t)name[0]); 2458 if (p == NULL) 2459 return (ESRCH); 2460 error = p_candebug(curthread, p); 2461 if (error != 0) { 2462 PROC_UNLOCK(p); 2463 return (error); 2464 } 2465 #ifdef COMPAT_FREEBSD32 2466 if ((req->flags & SCTL_MASK32) != 0) { 2467 /* 2468 * We return 0 if the 32 bit emulation request is for a 64 bit 2469 * process. 2470 */ 2471 ps_strings32 = SV_PROC_FLAG(p, SV_ILP32) != 0 ? 2472 PTROUT(p->p_sysent->sv_psstrings) : 0; 2473 PROC_UNLOCK(p); 2474 error = SYSCTL_OUT(req, &ps_strings32, sizeof(ps_strings32)); 2475 return (error); 2476 } 2477 #endif 2478 ps_strings = p->p_sysent->sv_psstrings; 2479 PROC_UNLOCK(p); 2480 error = SYSCTL_OUT(req, &ps_strings, sizeof(ps_strings)); 2481 return (error); 2482 } 2483 2484 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table"); 2485 2486 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT| 2487 CTLFLAG_MPSAFE, 0, 0, sysctl_kern_proc, "S,proc", 2488 "Return entire process table"); 2489 2490 static SYSCTL_NODE(_kern_proc, KERN_PROC_GID, gid, CTLFLAG_RD | CTLFLAG_MPSAFE, 2491 sysctl_kern_proc, "Process table"); 2492 2493 static SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD | CTLFLAG_MPSAFE, 2494 sysctl_kern_proc, "Process table"); 2495 2496 static SYSCTL_NODE(_kern_proc, KERN_PROC_RGID, rgid, CTLFLAG_RD | CTLFLAG_MPSAFE, 2497 sysctl_kern_proc, "Process table"); 2498 2499 static SYSCTL_NODE(_kern_proc, KERN_PROC_SESSION, sid, CTLFLAG_RD | 2500 CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); 2501 2502 static SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD | CTLFLAG_MPSAFE, 2503 sysctl_kern_proc, "Process table"); 2504 2505 static SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD | CTLFLAG_MPSAFE, 2506 sysctl_kern_proc, "Process table"); 2507 2508 static SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD | CTLFLAG_MPSAFE, 2509 sysctl_kern_proc, "Process table"); 2510 2511 static SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD | CTLFLAG_MPSAFE, 2512 sysctl_kern_proc, "Process table"); 2513 2514 static SYSCTL_NODE(_kern_proc, KERN_PROC_PROC, proc, CTLFLAG_RD | CTLFLAG_MPSAFE, 2515 sysctl_kern_proc, "Return process table, no threads"); 2516 2517 static SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, 2518 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE, 2519 sysctl_kern_proc_args, "Process argument list"); 2520 2521 static SYSCTL_NODE(_kern_proc, KERN_PROC_ENV, env, 2522 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE, 2523 sysctl_kern_proc_env, "Process environment"); 2524 2525 static SYSCTL_NODE(_kern_proc, KERN_PROC_AUXV, auxv, 2526 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE, 2527 sysctl_kern_proc_auxv, "Process ELF auxiliary vector"); 2528 2529 static SYSCTL_NODE(_kern_proc, KERN_PROC_PATHNAME, pathname, CTLFLAG_RD | 2530 CTLFLAG_MPSAFE, sysctl_kern_proc_pathname, "Process executable path"); 2531 2532 static SYSCTL_NODE(_kern_proc, KERN_PROC_SV_NAME, sv_name, CTLFLAG_RD | 2533 CTLFLAG_MPSAFE, sysctl_kern_proc_sv_name, 2534 "Process syscall vector name (ABI type)"); 2535 2536 static SYSCTL_NODE(_kern_proc, (KERN_PROC_GID | KERN_PROC_INC_THREAD), gid_td, 2537 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); 2538 2539 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_INC_THREAD), pgrp_td, 2540 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); 2541 2542 static SYSCTL_NODE(_kern_proc, (KERN_PROC_RGID | KERN_PROC_INC_THREAD), rgid_td, 2543 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); 2544 2545 static SYSCTL_NODE(_kern_proc, (KERN_PROC_SESSION | KERN_PROC_INC_THREAD), 2546 sid_td, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); 2547 2548 static SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_INC_THREAD), tty_td, 2549 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); 2550 2551 static SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_INC_THREAD), uid_td, 2552 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); 2553 2554 static SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_INC_THREAD), ruid_td, 2555 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); 2556 2557 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_INC_THREAD), pid_td, 2558 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); 2559 2560 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PROC | KERN_PROC_INC_THREAD), proc_td, 2561 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, 2562 "Return process table, no threads"); 2563 2564 #ifdef COMPAT_FREEBSD7 2565 static SYSCTL_NODE(_kern_proc, KERN_PROC_OVMMAP, ovmmap, CTLFLAG_RD | 2566 CTLFLAG_MPSAFE, sysctl_kern_proc_ovmmap, "Old Process vm map entries"); 2567 #endif 2568 2569 static SYSCTL_NODE(_kern_proc, KERN_PROC_VMMAP, vmmap, CTLFLAG_RD | 2570 CTLFLAG_MPSAFE, sysctl_kern_proc_vmmap, "Process vm map entries"); 2571 2572 #if defined(STACK) || defined(DDB) 2573 static SYSCTL_NODE(_kern_proc, KERN_PROC_KSTACK, kstack, CTLFLAG_RD | 2574 CTLFLAG_MPSAFE, sysctl_kern_proc_kstack, "Process kernel stacks"); 2575 #endif 2576 2577 static SYSCTL_NODE(_kern_proc, KERN_PROC_GROUPS, groups, CTLFLAG_RD | 2578 CTLFLAG_MPSAFE, sysctl_kern_proc_groups, "Process groups"); 2579 2580 static SYSCTL_NODE(_kern_proc, KERN_PROC_RLIMIT, rlimit, CTLFLAG_RD | 2581 CTLFLAG_MPSAFE, sysctl_kern_proc_rlimit, "Process resource limits"); 2582 2583 static SYSCTL_NODE(_kern_proc, KERN_PROC_PS_STRINGS, ps_strings, 2584 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE, 2585 sysctl_kern_proc_ps_strings, "Process ps_strings location"); 2586