1 /*- 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)kern_proc.c 8.7 (Berkeley) 2/14/95 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_ddb.h" 36 #include "opt_kdtrace.h" 37 #include "opt_ktrace.h" 38 #include "opt_kstack_pages.h" 39 #include "opt_stack.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/kernel.h> 44 #include <sys/lock.h> 45 #include <sys/malloc.h> 46 #include <sys/mount.h> 47 #include <sys/mutex.h> 48 #include <sys/proc.h> 49 #include <sys/refcount.h> 50 #include <sys/sbuf.h> 51 #include <sys/sysent.h> 52 #include <sys/sched.h> 53 #include <sys/smp.h> 54 #include <sys/stack.h> 55 #include <sys/sysctl.h> 56 #include <sys/filedesc.h> 57 #include <sys/tty.h> 58 #include <sys/signalvar.h> 59 #include <sys/sdt.h> 60 #include <sys/sx.h> 61 #include <sys/user.h> 62 #include <sys/jail.h> 63 #include <sys/vnode.h> 64 #include <sys/eventhandler.h> 65 #ifdef KTRACE 66 #include <sys/uio.h> 67 #include <sys/ktrace.h> 68 #endif 69 70 #ifdef DDB 71 #include <ddb/ddb.h> 72 #endif 73 74 #include <vm/vm.h> 75 #include <vm/vm_extern.h> 76 #include <vm/pmap.h> 77 #include <vm/vm_map.h> 78 #include <vm/vm_object.h> 79 #include <vm/uma.h> 80 81 SDT_PROVIDER_DEFINE(proc); 82 SDT_PROBE_DEFINE(proc, kernel, ctor, entry); 83 SDT_PROBE_ARGTYPE(proc, kernel, ctor, entry, 0, "struct proc *"); 84 SDT_PROBE_ARGTYPE(proc, kernel, ctor, entry, 1, "int"); 85 SDT_PROBE_ARGTYPE(proc, kernel, ctor, entry, 2, "void *"); 86 SDT_PROBE_ARGTYPE(proc, kernel, ctor, entry, 3, "int"); 87 SDT_PROBE_DEFINE(proc, kernel, ctor, return); 88 SDT_PROBE_ARGTYPE(proc, kernel, ctor, return, 0, "struct proc *"); 89 SDT_PROBE_ARGTYPE(proc, kernel, ctor, return, 1, "int"); 90 SDT_PROBE_ARGTYPE(proc, kernel, ctor, return, 2, "void *"); 91 SDT_PROBE_ARGTYPE(proc, kernel, ctor, return, 3, "int"); 92 SDT_PROBE_DEFINE(proc, kernel, dtor, entry); 93 SDT_PROBE_ARGTYPE(proc, kernel, dtor, entry, 0, "struct proc *"); 94 SDT_PROBE_ARGTYPE(proc, kernel, dtor, entry, 1, "int"); 95 SDT_PROBE_ARGTYPE(proc, kernel, dtor, entry, 2, "void *"); 96 SDT_PROBE_ARGTYPE(proc, kernel, dtor, entry, 3, "struct thread *"); 97 SDT_PROBE_DEFINE(proc, kernel, dtor, return); 98 SDT_PROBE_ARGTYPE(proc, kernel, dtor, return, 0, "struct proc *"); 99 SDT_PROBE_ARGTYPE(proc, kernel, dtor, return, 1, "int"); 100 SDT_PROBE_ARGTYPE(proc, kernel, dtor, return, 2, "void *"); 101 SDT_PROBE_DEFINE(proc, kernel, init, entry); 102 SDT_PROBE_ARGTYPE(proc, kernel, init, entry, 0, "struct proc *"); 103 SDT_PROBE_ARGTYPE(proc, kernel, init, entry, 1, "int"); 104 SDT_PROBE_ARGTYPE(proc, kernel, init, entry, 2, "int"); 105 SDT_PROBE_DEFINE(proc, kernel, init, return); 106 SDT_PROBE_ARGTYPE(proc, kernel, init, return, 0, "struct proc *"); 107 SDT_PROBE_ARGTYPE(proc, kernel, init, return, 1, "int"); 108 SDT_PROBE_ARGTYPE(proc, kernel, init, return, 2, "int"); 109 110 MALLOC_DEFINE(M_PGRP, "pgrp", "process group header"); 111 MALLOC_DEFINE(M_SESSION, "session", "session header"); 112 static MALLOC_DEFINE(M_PROC, "proc", "Proc structures"); 113 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures"); 114 115 static void doenterpgrp(struct proc *, struct pgrp *); 116 static void orphanpg(struct pgrp *pg); 117 static void fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp); 118 static void fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp, 119 int preferthread); 120 static void pgadjustjobc(struct pgrp *pgrp, int entering); 121 static void pgdelete(struct pgrp *); 122 static int proc_ctor(void *mem, int size, void *arg, int flags); 123 static void proc_dtor(void *mem, int size, void *arg); 124 static int proc_init(void *mem, int size, int flags); 125 static void proc_fini(void *mem, int size); 126 127 /* 128 * Other process lists 129 */ 130 struct pidhashhead *pidhashtbl; 131 u_long pidhash; 132 struct pgrphashhead *pgrphashtbl; 133 u_long pgrphash; 134 struct proclist allproc; 135 struct proclist zombproc; 136 struct sx allproc_lock; 137 struct sx proctree_lock; 138 struct mtx ppeers_lock; 139 uma_zone_t proc_zone; 140 uma_zone_t ithread_zone; 141 142 int kstack_pages = KSTACK_PAGES; 143 SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0, ""); 144 145 CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE); 146 147 /* 148 * Initialize global process hashing structures. 149 */ 150 void 151 procinit() 152 { 153 154 sx_init(&allproc_lock, "allproc"); 155 sx_init(&proctree_lock, "proctree"); 156 mtx_init(&ppeers_lock, "p_peers", NULL, MTX_DEF); 157 LIST_INIT(&allproc); 158 LIST_INIT(&zombproc); 159 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash); 160 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash); 161 proc_zone = uma_zcreate("PROC", sched_sizeof_proc(), 162 proc_ctor, proc_dtor, proc_init, proc_fini, 163 UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 164 uihashinit(); 165 } 166 167 /* 168 * Prepare a proc for use. 169 */ 170 static int 171 proc_ctor(void *mem, int size, void *arg, int flags) 172 { 173 struct proc *p; 174 175 p = (struct proc *)mem; 176 SDT_PROBE(proc, kernel, ctor , entry, p, size, arg, flags, 0); 177 EVENTHANDLER_INVOKE(process_ctor, p); 178 SDT_PROBE(proc, kernel, ctor , return, p, size, arg, flags, 0); 179 return (0); 180 } 181 182 /* 183 * Reclaim a proc after use. 184 */ 185 static void 186 proc_dtor(void *mem, int size, void *arg) 187 { 188 struct proc *p; 189 struct thread *td; 190 191 /* INVARIANTS checks go here */ 192 p = (struct proc *)mem; 193 td = FIRST_THREAD_IN_PROC(p); 194 SDT_PROBE(proc, kernel, dtor, entry, p, size, arg, td, 0); 195 if (td != NULL) { 196 #ifdef INVARIANTS 197 KASSERT((p->p_numthreads == 1), 198 ("bad number of threads in exiting process")); 199 KASSERT(STAILQ_EMPTY(&p->p_ktr), ("proc_dtor: non-empty p_ktr")); 200 #endif 201 /* Dispose of an alternate kstack, if it exists. 202 * XXX What if there are more than one thread in the proc? 203 * The first thread in the proc is special and not 204 * freed, so you gotta do this here. 205 */ 206 if (((p->p_flag & P_KTHREAD) != 0) && (td->td_altkstack != 0)) 207 vm_thread_dispose_altkstack(td); 208 } 209 EVENTHANDLER_INVOKE(process_dtor, p); 210 if (p->p_ksi != NULL) 211 KASSERT(! KSI_ONQ(p->p_ksi), ("SIGCHLD queue")); 212 SDT_PROBE(proc, kernel, dtor, return, p, size, arg, 0, 0); 213 } 214 215 /* 216 * Initialize type-stable parts of a proc (when newly created). 217 */ 218 static int 219 proc_init(void *mem, int size, int flags) 220 { 221 struct proc *p; 222 223 p = (struct proc *)mem; 224 SDT_PROBE(proc, kernel, init, entry, p, size, flags, 0, 0); 225 p->p_sched = (struct p_sched *)&p[1]; 226 bzero(&p->p_mtx, sizeof(struct mtx)); 227 mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 228 mtx_init(&p->p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE); 229 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 230 EVENTHANDLER_INVOKE(process_init, p); 231 p->p_stats = pstats_alloc(); 232 SDT_PROBE(proc, kernel, init, return, p, size, flags, 0, 0); 233 return (0); 234 } 235 236 /* 237 * UMA should ensure that this function is never called. 238 * Freeing a proc structure would violate type stability. 239 */ 240 static void 241 proc_fini(void *mem, int size) 242 { 243 #ifdef notnow 244 struct proc *p; 245 246 p = (struct proc *)mem; 247 EVENTHANDLER_INVOKE(process_fini, p); 248 pstats_free(p->p_stats); 249 thread_free(FIRST_THREAD_IN_PROC(p)); 250 mtx_destroy(&p->p_mtx); 251 if (p->p_ksi != NULL) 252 ksiginfo_free(p->p_ksi); 253 #else 254 panic("proc reclaimed"); 255 #endif 256 } 257 258 /* 259 * Is p an inferior of the current process? 260 */ 261 int 262 inferior(p) 263 register struct proc *p; 264 { 265 266 sx_assert(&proctree_lock, SX_LOCKED); 267 for (; p != curproc; p = p->p_pptr) 268 if (p->p_pid == 0) 269 return (0); 270 return (1); 271 } 272 273 /* 274 * Locate a process by number; return only "live" processes -- i.e., neither 275 * zombies nor newly born but incompletely initialized processes. By not 276 * returning processes in the PRS_NEW state, we allow callers to avoid 277 * testing for that condition to avoid dereferencing p_ucred, et al. 278 */ 279 struct proc * 280 pfind(pid) 281 register pid_t pid; 282 { 283 register struct proc *p; 284 285 sx_slock(&allproc_lock); 286 LIST_FOREACH(p, PIDHASH(pid), p_hash) 287 if (p->p_pid == pid) { 288 if (p->p_state == PRS_NEW) { 289 p = NULL; 290 break; 291 } 292 PROC_LOCK(p); 293 break; 294 } 295 sx_sunlock(&allproc_lock); 296 return (p); 297 } 298 299 /* 300 * Locate a process group by number. 301 * The caller must hold proctree_lock. 302 */ 303 struct pgrp * 304 pgfind(pgid) 305 register pid_t pgid; 306 { 307 register struct pgrp *pgrp; 308 309 sx_assert(&proctree_lock, SX_LOCKED); 310 311 LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) { 312 if (pgrp->pg_id == pgid) { 313 PGRP_LOCK(pgrp); 314 return (pgrp); 315 } 316 } 317 return (NULL); 318 } 319 320 /* 321 * Create a new process group. 322 * pgid must be equal to the pid of p. 323 * Begin a new session if required. 324 */ 325 int 326 enterpgrp(p, pgid, pgrp, sess) 327 register struct proc *p; 328 pid_t pgid; 329 struct pgrp *pgrp; 330 struct session *sess; 331 { 332 struct pgrp *pgrp2; 333 334 sx_assert(&proctree_lock, SX_XLOCKED); 335 336 KASSERT(pgrp != NULL, ("enterpgrp: pgrp == NULL")); 337 KASSERT(p->p_pid == pgid, 338 ("enterpgrp: new pgrp and pid != pgid")); 339 340 pgrp2 = pgfind(pgid); 341 342 KASSERT(pgrp2 == NULL, 343 ("enterpgrp: pgrp with pgid exists")); 344 KASSERT(!SESS_LEADER(p), 345 ("enterpgrp: session leader attempted setpgrp")); 346 347 mtx_init(&pgrp->pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK); 348 349 if (sess != NULL) { 350 /* 351 * new session 352 */ 353 mtx_init(&sess->s_mtx, "session", NULL, MTX_DEF); 354 mtx_lock(&Giant); /* XXX TTY */ 355 PROC_LOCK(p); 356 p->p_flag &= ~P_CONTROLT; 357 PROC_UNLOCK(p); 358 PGRP_LOCK(pgrp); 359 sess->s_leader = p; 360 sess->s_sid = p->p_pid; 361 sess->s_count = 1; 362 sess->s_ttyvp = NULL; 363 sess->s_ttyp = NULL; 364 bcopy(p->p_session->s_login, sess->s_login, 365 sizeof(sess->s_login)); 366 pgrp->pg_session = sess; 367 KASSERT(p == curproc, 368 ("enterpgrp: mksession and p != curproc")); 369 } else { 370 mtx_lock(&Giant); /* XXX TTY */ 371 pgrp->pg_session = p->p_session; 372 SESS_LOCK(pgrp->pg_session); 373 pgrp->pg_session->s_count++; 374 SESS_UNLOCK(pgrp->pg_session); 375 PGRP_LOCK(pgrp); 376 } 377 pgrp->pg_id = pgid; 378 LIST_INIT(&pgrp->pg_members); 379 380 /* 381 * As we have an exclusive lock of proctree_lock, 382 * this should not deadlock. 383 */ 384 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash); 385 pgrp->pg_jobc = 0; 386 SLIST_INIT(&pgrp->pg_sigiolst); 387 PGRP_UNLOCK(pgrp); 388 mtx_unlock(&Giant); /* XXX TTY */ 389 390 doenterpgrp(p, pgrp); 391 392 return (0); 393 } 394 395 /* 396 * Move p to an existing process group 397 */ 398 int 399 enterthispgrp(p, pgrp) 400 register struct proc *p; 401 struct pgrp *pgrp; 402 { 403 404 sx_assert(&proctree_lock, SX_XLOCKED); 405 PROC_LOCK_ASSERT(p, MA_NOTOWNED); 406 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED); 407 PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED); 408 SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED); 409 KASSERT(pgrp->pg_session == p->p_session, 410 ("%s: pgrp's session %p, p->p_session %p.\n", 411 __func__, 412 pgrp->pg_session, 413 p->p_session)); 414 KASSERT(pgrp != p->p_pgrp, 415 ("%s: p belongs to pgrp.", __func__)); 416 417 doenterpgrp(p, pgrp); 418 419 return (0); 420 } 421 422 /* 423 * Move p to a process group 424 */ 425 static void 426 doenterpgrp(p, pgrp) 427 struct proc *p; 428 struct pgrp *pgrp; 429 { 430 struct pgrp *savepgrp; 431 432 sx_assert(&proctree_lock, SX_XLOCKED); 433 PROC_LOCK_ASSERT(p, MA_NOTOWNED); 434 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED); 435 PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED); 436 SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED); 437 438 savepgrp = p->p_pgrp; 439 440 /* 441 * Adjust eligibility of affected pgrps to participate in job control. 442 * Increment eligibility counts before decrementing, otherwise we 443 * could reach 0 spuriously during the first call. 444 */ 445 fixjobc(p, pgrp, 1); 446 fixjobc(p, p->p_pgrp, 0); 447 448 mtx_lock(&Giant); /* XXX TTY */ 449 PGRP_LOCK(pgrp); 450 PGRP_LOCK(savepgrp); 451 PROC_LOCK(p); 452 LIST_REMOVE(p, p_pglist); 453 p->p_pgrp = pgrp; 454 PROC_UNLOCK(p); 455 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist); 456 PGRP_UNLOCK(savepgrp); 457 PGRP_UNLOCK(pgrp); 458 mtx_unlock(&Giant); /* XXX TTY */ 459 if (LIST_EMPTY(&savepgrp->pg_members)) 460 pgdelete(savepgrp); 461 } 462 463 /* 464 * remove process from process group 465 */ 466 int 467 leavepgrp(p) 468 register struct proc *p; 469 { 470 struct pgrp *savepgrp; 471 472 sx_assert(&proctree_lock, SX_XLOCKED); 473 savepgrp = p->p_pgrp; 474 mtx_lock(&Giant); /* XXX TTY */ 475 PGRP_LOCK(savepgrp); 476 PROC_LOCK(p); 477 LIST_REMOVE(p, p_pglist); 478 p->p_pgrp = NULL; 479 PROC_UNLOCK(p); 480 PGRP_UNLOCK(savepgrp); 481 mtx_unlock(&Giant); /* XXX TTY */ 482 if (LIST_EMPTY(&savepgrp->pg_members)) 483 pgdelete(savepgrp); 484 return (0); 485 } 486 487 /* 488 * delete a process group 489 */ 490 static void 491 pgdelete(pgrp) 492 register struct pgrp *pgrp; 493 { 494 struct session *savesess; 495 496 sx_assert(&proctree_lock, SX_XLOCKED); 497 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED); 498 SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED); 499 500 /* 501 * Reset any sigio structures pointing to us as a result of 502 * F_SETOWN with our pgid. 503 */ 504 funsetownlst(&pgrp->pg_sigiolst); 505 506 mtx_lock(&Giant); /* XXX TTY */ 507 PGRP_LOCK(pgrp); 508 if (pgrp->pg_session->s_ttyp != NULL && 509 pgrp->pg_session->s_ttyp->t_pgrp == pgrp) 510 pgrp->pg_session->s_ttyp->t_pgrp = NULL; 511 LIST_REMOVE(pgrp, pg_hash); 512 savesess = pgrp->pg_session; 513 SESSRELE(savesess); 514 PGRP_UNLOCK(pgrp); 515 mtx_destroy(&pgrp->pg_mtx); 516 FREE(pgrp, M_PGRP); 517 mtx_unlock(&Giant); /* XXX TTY */ 518 } 519 520 static void 521 pgadjustjobc(pgrp, entering) 522 struct pgrp *pgrp; 523 int entering; 524 { 525 526 PGRP_LOCK(pgrp); 527 if (entering) 528 pgrp->pg_jobc++; 529 else { 530 --pgrp->pg_jobc; 531 if (pgrp->pg_jobc == 0) 532 orphanpg(pgrp); 533 } 534 PGRP_UNLOCK(pgrp); 535 } 536 537 /* 538 * Adjust pgrp jobc counters when specified process changes process group. 539 * We count the number of processes in each process group that "qualify" 540 * the group for terminal job control (those with a parent in a different 541 * process group of the same session). If that count reaches zero, the 542 * process group becomes orphaned. Check both the specified process' 543 * process group and that of its children. 544 * entering == 0 => p is leaving specified group. 545 * entering == 1 => p is entering specified group. 546 */ 547 void 548 fixjobc(p, pgrp, entering) 549 register struct proc *p; 550 register struct pgrp *pgrp; 551 int entering; 552 { 553 register struct pgrp *hispgrp; 554 register struct session *mysession; 555 556 sx_assert(&proctree_lock, SX_LOCKED); 557 PROC_LOCK_ASSERT(p, MA_NOTOWNED); 558 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED); 559 SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED); 560 561 /* 562 * Check p's parent to see whether p qualifies its own process 563 * group; if so, adjust count for p's process group. 564 */ 565 mysession = pgrp->pg_session; 566 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp && 567 hispgrp->pg_session == mysession) 568 pgadjustjobc(pgrp, entering); 569 570 /* 571 * Check this process' children to see whether they qualify 572 * their process groups; if so, adjust counts for children's 573 * process groups. 574 */ 575 LIST_FOREACH(p, &p->p_children, p_sibling) { 576 hispgrp = p->p_pgrp; 577 if (hispgrp == pgrp || 578 hispgrp->pg_session != mysession) 579 continue; 580 PROC_LOCK(p); 581 if (p->p_state == PRS_ZOMBIE) { 582 PROC_UNLOCK(p); 583 continue; 584 } 585 PROC_UNLOCK(p); 586 pgadjustjobc(hispgrp, entering); 587 } 588 } 589 590 /* 591 * A process group has become orphaned; 592 * if there are any stopped processes in the group, 593 * hang-up all process in that group. 594 */ 595 static void 596 orphanpg(pg) 597 struct pgrp *pg; 598 { 599 register struct proc *p; 600 601 PGRP_LOCK_ASSERT(pg, MA_OWNED); 602 603 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 604 PROC_LOCK(p); 605 if (P_SHOULDSTOP(p)) { 606 PROC_UNLOCK(p); 607 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 608 PROC_LOCK(p); 609 psignal(p, SIGHUP); 610 psignal(p, SIGCONT); 611 PROC_UNLOCK(p); 612 } 613 return; 614 } 615 PROC_UNLOCK(p); 616 } 617 } 618 619 void 620 sessrele(struct session *s) 621 { 622 int i; 623 624 SESS_LOCK(s); 625 i = --s->s_count; 626 SESS_UNLOCK(s); 627 if (i == 0) { 628 if (s->s_ttyp != NULL) 629 ttyrel(s->s_ttyp); 630 mtx_destroy(&s->s_mtx); 631 FREE(s, M_SESSION); 632 } 633 } 634 635 #include "opt_ddb.h" 636 #ifdef DDB 637 #include <ddb/ddb.h> 638 639 DB_SHOW_COMMAND(pgrpdump, pgrpdump) 640 { 641 register struct pgrp *pgrp; 642 register struct proc *p; 643 register int i; 644 645 for (i = 0; i <= pgrphash; i++) { 646 if (!LIST_EMPTY(&pgrphashtbl[i])) { 647 printf("\tindx %d\n", i); 648 LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) { 649 printf( 650 "\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n", 651 (void *)pgrp, (long)pgrp->pg_id, 652 (void *)pgrp->pg_session, 653 pgrp->pg_session->s_count, 654 (void *)LIST_FIRST(&pgrp->pg_members)); 655 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 656 printf("\t\tpid %ld addr %p pgrp %p\n", 657 (long)p->p_pid, (void *)p, 658 (void *)p->p_pgrp); 659 } 660 } 661 } 662 } 663 } 664 #endif /* DDB */ 665 666 /* 667 * Clear kinfo_proc and fill in any information that is common 668 * to all threads in the process. 669 * Must be called with the target process locked. 670 */ 671 static void 672 fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp) 673 { 674 struct thread *td0; 675 struct tty *tp; 676 struct session *sp; 677 struct ucred *cred; 678 struct sigacts *ps; 679 680 PROC_LOCK_ASSERT(p, MA_OWNED); 681 bzero(kp, sizeof(*kp)); 682 683 kp->ki_structsize = sizeof(*kp); 684 kp->ki_paddr = p; 685 kp->ki_addr =/* p->p_addr; */0; /* XXX */ 686 kp->ki_args = p->p_args; 687 kp->ki_textvp = p->p_textvp; 688 #ifdef KTRACE 689 kp->ki_tracep = p->p_tracevp; 690 mtx_lock(&ktrace_mtx); 691 kp->ki_traceflag = p->p_traceflag; 692 mtx_unlock(&ktrace_mtx); 693 #endif 694 kp->ki_fd = p->p_fd; 695 kp->ki_vmspace = p->p_vmspace; 696 kp->ki_flag = p->p_flag; 697 cred = p->p_ucred; 698 if (cred) { 699 kp->ki_uid = cred->cr_uid; 700 kp->ki_ruid = cred->cr_ruid; 701 kp->ki_svuid = cred->cr_svuid; 702 /* XXX bde doesn't like KI_NGROUPS */ 703 kp->ki_ngroups = min(cred->cr_ngroups, KI_NGROUPS); 704 bcopy(cred->cr_groups, kp->ki_groups, 705 kp->ki_ngroups * sizeof(gid_t)); 706 kp->ki_rgid = cred->cr_rgid; 707 kp->ki_svgid = cred->cr_svgid; 708 /* If jailed(cred), emulate the old P_JAILED flag. */ 709 if (jailed(cred)) { 710 kp->ki_flag |= P_JAILED; 711 /* If inside a jail, use 0 as a jail ID. */ 712 if (!jailed(curthread->td_ucred)) 713 kp->ki_jid = cred->cr_prison->pr_id; 714 } 715 } 716 ps = p->p_sigacts; 717 if (ps) { 718 mtx_lock(&ps->ps_mtx); 719 kp->ki_sigignore = ps->ps_sigignore; 720 kp->ki_sigcatch = ps->ps_sigcatch; 721 mtx_unlock(&ps->ps_mtx); 722 } 723 PROC_SLOCK(p); 724 if (p->p_state != PRS_NEW && 725 p->p_state != PRS_ZOMBIE && 726 p->p_vmspace != NULL) { 727 struct vmspace *vm = p->p_vmspace; 728 729 kp->ki_size = vm->vm_map.size; 730 kp->ki_rssize = vmspace_resident_count(vm); /*XXX*/ 731 FOREACH_THREAD_IN_PROC(p, td0) { 732 if (!TD_IS_SWAPPED(td0)) 733 kp->ki_rssize += td0->td_kstack_pages; 734 if (td0->td_altkstack_obj != NULL) 735 kp->ki_rssize += td0->td_altkstack_pages; 736 } 737 kp->ki_swrss = vm->vm_swrss; 738 kp->ki_tsize = vm->vm_tsize; 739 kp->ki_dsize = vm->vm_dsize; 740 kp->ki_ssize = vm->vm_ssize; 741 } else if (p->p_state == PRS_ZOMBIE) 742 kp->ki_stat = SZOMB; 743 if (kp->ki_flag & P_INMEM) 744 kp->ki_sflag = PS_INMEM; 745 else 746 kp->ki_sflag = 0; 747 /* Calculate legacy swtime as seconds since 'swtick'. */ 748 kp->ki_swtime = (ticks - p->p_swtick) / hz; 749 kp->ki_pid = p->p_pid; 750 kp->ki_nice = p->p_nice; 751 rufetch(p, &kp->ki_rusage); 752 kp->ki_runtime = cputick2usec(p->p_rux.rux_runtime); 753 PROC_SUNLOCK(p); 754 if ((p->p_flag & P_INMEM) && p->p_stats != NULL) { 755 kp->ki_start = p->p_stats->p_start; 756 timevaladd(&kp->ki_start, &boottime); 757 PROC_SLOCK(p); 758 calcru(p, &kp->ki_rusage.ru_utime, &kp->ki_rusage.ru_stime); 759 PROC_SUNLOCK(p); 760 calccru(p, &kp->ki_childutime, &kp->ki_childstime); 761 762 /* Some callers want child-times in a single value */ 763 kp->ki_childtime = kp->ki_childstime; 764 timevaladd(&kp->ki_childtime, &kp->ki_childutime); 765 } 766 tp = NULL; 767 if (p->p_pgrp) { 768 kp->ki_pgid = p->p_pgrp->pg_id; 769 kp->ki_jobc = p->p_pgrp->pg_jobc; 770 sp = p->p_pgrp->pg_session; 771 772 if (sp != NULL) { 773 kp->ki_sid = sp->s_sid; 774 SESS_LOCK(sp); 775 strlcpy(kp->ki_login, sp->s_login, 776 sizeof(kp->ki_login)); 777 if (sp->s_ttyvp) 778 kp->ki_kiflag |= KI_CTTY; 779 if (SESS_LEADER(p)) 780 kp->ki_kiflag |= KI_SLEADER; 781 tp = sp->s_ttyp; 782 SESS_UNLOCK(sp); 783 } 784 } 785 if ((p->p_flag & P_CONTROLT) && tp != NULL) { 786 kp->ki_tdev = dev2udev(tp->t_dev); 787 kp->ki_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID; 788 if (tp->t_session) 789 kp->ki_tsid = tp->t_session->s_sid; 790 } else 791 kp->ki_tdev = NODEV; 792 if (p->p_comm[0] != '\0') 793 strlcpy(kp->ki_comm, p->p_comm, sizeof(kp->ki_comm)); 794 if (p->p_sysent && p->p_sysent->sv_name != NULL && 795 p->p_sysent->sv_name[0] != '\0') 796 strlcpy(kp->ki_emul, p->p_sysent->sv_name, sizeof(kp->ki_emul)); 797 kp->ki_siglist = p->p_siglist; 798 kp->ki_xstat = p->p_xstat; 799 kp->ki_acflag = p->p_acflag; 800 kp->ki_lock = p->p_lock; 801 if (p->p_pptr) 802 kp->ki_ppid = p->p_pptr->p_pid; 803 } 804 805 /* 806 * Fill in information that is thread specific. Must be called with p_slock 807 * locked. If 'preferthread' is set, overwrite certain process-related 808 * fields that are maintained for both threads and processes. 809 */ 810 static void 811 fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp, int preferthread) 812 { 813 struct proc *p; 814 815 p = td->td_proc; 816 PROC_LOCK_ASSERT(p, MA_OWNED); 817 818 thread_lock(td); 819 if (td->td_wmesg != NULL) 820 strlcpy(kp->ki_wmesg, td->td_wmesg, sizeof(kp->ki_wmesg)); 821 else 822 bzero(kp->ki_wmesg, sizeof(kp->ki_wmesg)); 823 if (td->td_name[0] != '\0') 824 strlcpy(kp->ki_ocomm, td->td_name, sizeof(kp->ki_ocomm)); 825 if (TD_ON_LOCK(td)) { 826 kp->ki_kiflag |= KI_LOCKBLOCK; 827 strlcpy(kp->ki_lockname, td->td_lockname, 828 sizeof(kp->ki_lockname)); 829 } else { 830 kp->ki_kiflag &= ~KI_LOCKBLOCK; 831 bzero(kp->ki_lockname, sizeof(kp->ki_lockname)); 832 } 833 834 if (p->p_state == PRS_NORMAL) { /* approximate. */ 835 if (TD_ON_RUNQ(td) || 836 TD_CAN_RUN(td) || 837 TD_IS_RUNNING(td)) { 838 kp->ki_stat = SRUN; 839 } else if (P_SHOULDSTOP(p)) { 840 kp->ki_stat = SSTOP; 841 } else if (TD_IS_SLEEPING(td)) { 842 kp->ki_stat = SSLEEP; 843 } else if (TD_ON_LOCK(td)) { 844 kp->ki_stat = SLOCK; 845 } else { 846 kp->ki_stat = SWAIT; 847 } 848 } else if (p->p_state == PRS_ZOMBIE) { 849 kp->ki_stat = SZOMB; 850 } else { 851 kp->ki_stat = SIDL; 852 } 853 854 /* Things in the thread */ 855 kp->ki_wchan = td->td_wchan; 856 kp->ki_pri.pri_level = td->td_priority; 857 kp->ki_pri.pri_native = td->td_base_pri; 858 kp->ki_lastcpu = td->td_lastcpu; 859 kp->ki_oncpu = td->td_oncpu; 860 kp->ki_tdflags = td->td_flags; 861 kp->ki_tid = td->td_tid; 862 kp->ki_numthreads = p->p_numthreads; 863 kp->ki_pcb = td->td_pcb; 864 kp->ki_kstack = (void *)td->td_kstack; 865 kp->ki_pctcpu = sched_pctcpu(td); 866 kp->ki_estcpu = td->td_estcpu; 867 kp->ki_slptime = (ticks - td->td_slptick) / hz; 868 kp->ki_pri.pri_class = td->td_pri_class; 869 kp->ki_pri.pri_user = td->td_user_pri; 870 871 if (preferthread) 872 kp->ki_runtime = cputick2usec(td->td_runtime); 873 874 /* We can't get this anymore but ps etc never used it anyway. */ 875 kp->ki_rqindex = 0; 876 877 SIGSETOR(kp->ki_siglist, td->td_siglist); 878 kp->ki_sigmask = td->td_sigmask; 879 thread_unlock(td); 880 } 881 882 /* 883 * Fill in a kinfo_proc structure for the specified process. 884 * Must be called with the target process locked. 885 */ 886 void 887 fill_kinfo_proc(struct proc *p, struct kinfo_proc *kp) 888 { 889 890 fill_kinfo_proc_only(p, kp); 891 if (FIRST_THREAD_IN_PROC(p) != NULL) 892 fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), kp, 0); 893 } 894 895 struct pstats * 896 pstats_alloc(void) 897 { 898 899 return (malloc(sizeof(struct pstats), M_SUBPROC, M_ZERO|M_WAITOK)); 900 } 901 902 /* 903 * Copy parts of p_stats; zero the rest of p_stats (statistics). 904 */ 905 void 906 pstats_fork(struct pstats *src, struct pstats *dst) 907 { 908 909 bzero(&dst->pstat_startzero, 910 __rangeof(struct pstats, pstat_startzero, pstat_endzero)); 911 bcopy(&src->pstat_startcopy, &dst->pstat_startcopy, 912 __rangeof(struct pstats, pstat_startcopy, pstat_endcopy)); 913 } 914 915 void 916 pstats_free(struct pstats *ps) 917 { 918 919 free(ps, M_SUBPROC); 920 } 921 922 /* 923 * Locate a zombie process by number 924 */ 925 struct proc * 926 zpfind(pid_t pid) 927 { 928 struct proc *p; 929 930 sx_slock(&allproc_lock); 931 LIST_FOREACH(p, &zombproc, p_list) 932 if (p->p_pid == pid) { 933 PROC_LOCK(p); 934 break; 935 } 936 sx_sunlock(&allproc_lock); 937 return (p); 938 } 939 940 #define KERN_PROC_ZOMBMASK 0x3 941 #define KERN_PROC_NOTHREADS 0x4 942 943 /* 944 * Must be called with the process locked and will return with it unlocked. 945 */ 946 static int 947 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags) 948 { 949 struct thread *td; 950 struct kinfo_proc kinfo_proc; 951 int error = 0; 952 struct proc *np; 953 pid_t pid = p->p_pid; 954 955 PROC_LOCK_ASSERT(p, MA_OWNED); 956 957 fill_kinfo_proc_only(p, &kinfo_proc); 958 if (flags & KERN_PROC_NOTHREADS) { 959 if (FIRST_THREAD_IN_PROC(p) != NULL) 960 fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), 961 &kinfo_proc, 0); 962 error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc, 963 sizeof(kinfo_proc)); 964 } else { 965 if (FIRST_THREAD_IN_PROC(p) != NULL) 966 FOREACH_THREAD_IN_PROC(p, td) { 967 fill_kinfo_thread(td, &kinfo_proc, 1); 968 error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc, 969 sizeof(kinfo_proc)); 970 if (error) 971 break; 972 } 973 else 974 error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc, 975 sizeof(kinfo_proc)); 976 } 977 PROC_UNLOCK(p); 978 if (error) 979 return (error); 980 if (flags & KERN_PROC_ZOMBMASK) 981 np = zpfind(pid); 982 else { 983 if (pid == 0) 984 return (0); 985 np = pfind(pid); 986 } 987 if (np == NULL) 988 return EAGAIN; 989 if (np != p) { 990 PROC_UNLOCK(np); 991 return EAGAIN; 992 } 993 PROC_UNLOCK(np); 994 return (0); 995 } 996 997 static int 998 sysctl_kern_proc(SYSCTL_HANDLER_ARGS) 999 { 1000 int *name = (int*) arg1; 1001 u_int namelen = arg2; 1002 struct proc *p; 1003 int flags, doingzomb, oid_number; 1004 int error = 0; 1005 1006 oid_number = oidp->oid_number; 1007 if (oid_number != KERN_PROC_ALL && 1008 (oid_number & KERN_PROC_INC_THREAD) == 0) 1009 flags = KERN_PROC_NOTHREADS; 1010 else { 1011 flags = 0; 1012 oid_number &= ~KERN_PROC_INC_THREAD; 1013 } 1014 if (oid_number == KERN_PROC_PID) { 1015 if (namelen != 1) 1016 return (EINVAL); 1017 error = sysctl_wire_old_buffer(req, 0); 1018 if (error) 1019 return (error); 1020 p = pfind((pid_t)name[0]); 1021 if (!p) 1022 return (ESRCH); 1023 if ((error = p_cansee(curthread, p))) { 1024 PROC_UNLOCK(p); 1025 return (error); 1026 } 1027 error = sysctl_out_proc(p, req, flags); 1028 return (error); 1029 } 1030 1031 switch (oid_number) { 1032 case KERN_PROC_ALL: 1033 if (namelen != 0) 1034 return (EINVAL); 1035 break; 1036 case KERN_PROC_PROC: 1037 if (namelen != 0 && namelen != 1) 1038 return (EINVAL); 1039 break; 1040 default: 1041 if (namelen != 1) 1042 return (EINVAL); 1043 break; 1044 } 1045 1046 if (!req->oldptr) { 1047 /* overestimate by 5 procs */ 1048 error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5); 1049 if (error) 1050 return (error); 1051 } 1052 error = sysctl_wire_old_buffer(req, 0); 1053 if (error != 0) 1054 return (error); 1055 sx_slock(&allproc_lock); 1056 for (doingzomb=0 ; doingzomb < 2 ; doingzomb++) { 1057 if (!doingzomb) 1058 p = LIST_FIRST(&allproc); 1059 else 1060 p = LIST_FIRST(&zombproc); 1061 for (; p != 0; p = LIST_NEXT(p, p_list)) { 1062 /* 1063 * Skip embryonic processes. 1064 */ 1065 PROC_SLOCK(p); 1066 if (p->p_state == PRS_NEW) { 1067 PROC_SUNLOCK(p); 1068 continue; 1069 } 1070 PROC_SUNLOCK(p); 1071 PROC_LOCK(p); 1072 KASSERT(p->p_ucred != NULL, 1073 ("process credential is NULL for non-NEW proc")); 1074 /* 1075 * Show a user only appropriate processes. 1076 */ 1077 if (p_cansee(curthread, p)) { 1078 PROC_UNLOCK(p); 1079 continue; 1080 } 1081 /* 1082 * TODO - make more efficient (see notes below). 1083 * do by session. 1084 */ 1085 switch (oid_number) { 1086 1087 case KERN_PROC_GID: 1088 if (p->p_ucred->cr_gid != (gid_t)name[0]) { 1089 PROC_UNLOCK(p); 1090 continue; 1091 } 1092 break; 1093 1094 case KERN_PROC_PGRP: 1095 /* could do this by traversing pgrp */ 1096 if (p->p_pgrp == NULL || 1097 p->p_pgrp->pg_id != (pid_t)name[0]) { 1098 PROC_UNLOCK(p); 1099 continue; 1100 } 1101 break; 1102 1103 case KERN_PROC_RGID: 1104 if (p->p_ucred->cr_rgid != (gid_t)name[0]) { 1105 PROC_UNLOCK(p); 1106 continue; 1107 } 1108 break; 1109 1110 case KERN_PROC_SESSION: 1111 if (p->p_session == NULL || 1112 p->p_session->s_sid != (pid_t)name[0]) { 1113 PROC_UNLOCK(p); 1114 continue; 1115 } 1116 break; 1117 1118 case KERN_PROC_TTY: 1119 if ((p->p_flag & P_CONTROLT) == 0 || 1120 p->p_session == NULL) { 1121 PROC_UNLOCK(p); 1122 continue; 1123 } 1124 SESS_LOCK(p->p_session); 1125 if (p->p_session->s_ttyp == NULL || 1126 dev2udev(p->p_session->s_ttyp->t_dev) != 1127 (dev_t)name[0]) { 1128 SESS_UNLOCK(p->p_session); 1129 PROC_UNLOCK(p); 1130 continue; 1131 } 1132 SESS_UNLOCK(p->p_session); 1133 break; 1134 1135 case KERN_PROC_UID: 1136 if (p->p_ucred->cr_uid != (uid_t)name[0]) { 1137 PROC_UNLOCK(p); 1138 continue; 1139 } 1140 break; 1141 1142 case KERN_PROC_RUID: 1143 if (p->p_ucred->cr_ruid != (uid_t)name[0]) { 1144 PROC_UNLOCK(p); 1145 continue; 1146 } 1147 break; 1148 1149 case KERN_PROC_PROC: 1150 break; 1151 1152 default: 1153 break; 1154 1155 } 1156 1157 error = sysctl_out_proc(p, req, flags | doingzomb); 1158 if (error) { 1159 sx_sunlock(&allproc_lock); 1160 return (error); 1161 } 1162 } 1163 } 1164 sx_sunlock(&allproc_lock); 1165 return (0); 1166 } 1167 1168 struct pargs * 1169 pargs_alloc(int len) 1170 { 1171 struct pargs *pa; 1172 1173 MALLOC(pa, struct pargs *, sizeof(struct pargs) + len, M_PARGS, 1174 M_WAITOK); 1175 refcount_init(&pa->ar_ref, 1); 1176 pa->ar_length = len; 1177 return (pa); 1178 } 1179 1180 void 1181 pargs_free(struct pargs *pa) 1182 { 1183 1184 FREE(pa, M_PARGS); 1185 } 1186 1187 void 1188 pargs_hold(struct pargs *pa) 1189 { 1190 1191 if (pa == NULL) 1192 return; 1193 refcount_acquire(&pa->ar_ref); 1194 } 1195 1196 void 1197 pargs_drop(struct pargs *pa) 1198 { 1199 1200 if (pa == NULL) 1201 return; 1202 if (refcount_release(&pa->ar_ref)) 1203 pargs_free(pa); 1204 } 1205 1206 /* 1207 * This sysctl allows a process to retrieve the argument list or process 1208 * title for another process without groping around in the address space 1209 * of the other process. It also allow a process to set its own "process 1210 * title to a string of its own choice. 1211 */ 1212 static int 1213 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS) 1214 { 1215 int *name = (int*) arg1; 1216 u_int namelen = arg2; 1217 struct pargs *newpa, *pa; 1218 struct proc *p; 1219 int error = 0; 1220 1221 if (namelen != 1) 1222 return (EINVAL); 1223 1224 p = pfind((pid_t)name[0]); 1225 if (!p) 1226 return (ESRCH); 1227 1228 if ((error = p_cansee(curthread, p)) != 0) { 1229 PROC_UNLOCK(p); 1230 return (error); 1231 } 1232 1233 if (req->newptr && curproc != p) { 1234 PROC_UNLOCK(p); 1235 return (EPERM); 1236 } 1237 1238 pa = p->p_args; 1239 pargs_hold(pa); 1240 PROC_UNLOCK(p); 1241 if (req->oldptr != NULL && pa != NULL) 1242 error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length); 1243 pargs_drop(pa); 1244 if (error != 0 || req->newptr == NULL) 1245 return (error); 1246 1247 if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit) 1248 return (ENOMEM); 1249 newpa = pargs_alloc(req->newlen); 1250 error = SYSCTL_IN(req, newpa->ar_args, req->newlen); 1251 if (error != 0) { 1252 pargs_free(newpa); 1253 return (error); 1254 } 1255 PROC_LOCK(p); 1256 pa = p->p_args; 1257 p->p_args = newpa; 1258 PROC_UNLOCK(p); 1259 pargs_drop(pa); 1260 return (0); 1261 } 1262 1263 /* 1264 * This sysctl allows a process to retrieve the path of the executable for 1265 * itself or another process. 1266 */ 1267 static int 1268 sysctl_kern_proc_pathname(SYSCTL_HANDLER_ARGS) 1269 { 1270 pid_t *pidp = (pid_t *)arg1; 1271 unsigned int arglen = arg2; 1272 struct proc *p; 1273 struct vnode *vp; 1274 char *retbuf, *freebuf; 1275 int error; 1276 1277 if (arglen != 1) 1278 return (EINVAL); 1279 if (*pidp == -1) { /* -1 means this process */ 1280 p = req->td->td_proc; 1281 } else { 1282 p = pfind(*pidp); 1283 if (p == NULL) 1284 return (ESRCH); 1285 if ((error = p_cansee(curthread, p)) != 0) { 1286 PROC_UNLOCK(p); 1287 return (error); 1288 } 1289 } 1290 1291 vp = p->p_textvp; 1292 if (vp == NULL) { 1293 if (*pidp != -1) 1294 PROC_UNLOCK(p); 1295 return (0); 1296 } 1297 vref(vp); 1298 if (*pidp != -1) 1299 PROC_UNLOCK(p); 1300 error = vn_fullpath(req->td, vp, &retbuf, &freebuf); 1301 vrele(vp); 1302 if (error) 1303 return (error); 1304 error = SYSCTL_OUT(req, retbuf, strlen(retbuf) + 1); 1305 free(freebuf, M_TEMP); 1306 return (error); 1307 } 1308 1309 static int 1310 sysctl_kern_proc_sv_name(SYSCTL_HANDLER_ARGS) 1311 { 1312 struct proc *p; 1313 char *sv_name; 1314 int *name; 1315 int namelen; 1316 int error; 1317 1318 namelen = arg2; 1319 if (namelen != 1) 1320 return (EINVAL); 1321 1322 name = (int *)arg1; 1323 if ((p = pfind((pid_t)name[0])) == NULL) 1324 return (ESRCH); 1325 if ((error = p_cansee(curthread, p))) { 1326 PROC_UNLOCK(p); 1327 return (error); 1328 } 1329 sv_name = p->p_sysent->sv_name; 1330 PROC_UNLOCK(p); 1331 return (sysctl_handle_string(oidp, sv_name, 0, req)); 1332 } 1333 1334 static int 1335 sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS) 1336 { 1337 vm_map_entry_t entry, tmp_entry; 1338 unsigned int last_timestamp; 1339 char *fullpath, *freepath; 1340 struct kinfo_vmentry *kve; 1341 int error, *name; 1342 struct vnode *vp; 1343 struct proc *p; 1344 vm_map_t map; 1345 1346 name = (int *)arg1; 1347 if ((p = pfind((pid_t)name[0])) == NULL) 1348 return (ESRCH); 1349 if (p->p_flag & P_WEXIT) { 1350 PROC_UNLOCK(p); 1351 return (ESRCH); 1352 } 1353 if ((error = p_candebug(curthread, p))) { 1354 PROC_UNLOCK(p); 1355 return (error); 1356 } 1357 _PHOLD(p); 1358 PROC_UNLOCK(p); 1359 1360 kve = malloc(sizeof(*kve), M_TEMP, M_WAITOK); 1361 1362 map = &p->p_vmspace->vm_map; /* XXXRW: More locking required? */ 1363 vm_map_lock_read(map); 1364 for (entry = map->header.next; entry != &map->header; 1365 entry = entry->next) { 1366 vm_object_t obj, tobj, lobj; 1367 vm_offset_t addr; 1368 int vfslocked; 1369 1370 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 1371 continue; 1372 1373 bzero(kve, sizeof(*kve)); 1374 kve->kve_structsize = sizeof(*kve); 1375 1376 kve->kve_private_resident = 0; 1377 obj = entry->object.vm_object; 1378 if (obj != NULL) { 1379 VM_OBJECT_LOCK(obj); 1380 if (obj->shadow_count == 1) 1381 kve->kve_private_resident = 1382 obj->resident_page_count; 1383 } 1384 kve->kve_resident = 0; 1385 addr = entry->start; 1386 while (addr < entry->end) { 1387 if (pmap_extract(map->pmap, addr)) 1388 kve->kve_resident++; 1389 addr += PAGE_SIZE; 1390 } 1391 1392 for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) { 1393 if (tobj != obj) 1394 VM_OBJECT_LOCK(tobj); 1395 if (lobj != obj) 1396 VM_OBJECT_UNLOCK(lobj); 1397 lobj = tobj; 1398 } 1399 1400 freepath = NULL; 1401 fullpath = ""; 1402 if (lobj) { 1403 vp = NULL; 1404 switch(lobj->type) { 1405 case OBJT_DEFAULT: 1406 kve->kve_type = KVME_TYPE_DEFAULT; 1407 break; 1408 case OBJT_VNODE: 1409 kve->kve_type = KVME_TYPE_VNODE; 1410 vp = lobj->handle; 1411 vref(vp); 1412 break; 1413 case OBJT_SWAP: 1414 kve->kve_type = KVME_TYPE_SWAP; 1415 break; 1416 case OBJT_DEVICE: 1417 kve->kve_type = KVME_TYPE_DEVICE; 1418 break; 1419 case OBJT_PHYS: 1420 kve->kve_type = KVME_TYPE_PHYS; 1421 break; 1422 case OBJT_DEAD: 1423 kve->kve_type = KVME_TYPE_DEAD; 1424 break; 1425 default: 1426 kve->kve_type = KVME_TYPE_UNKNOWN; 1427 break; 1428 } 1429 if (lobj != obj) 1430 VM_OBJECT_UNLOCK(lobj); 1431 1432 kve->kve_ref_count = obj->ref_count; 1433 kve->kve_shadow_count = obj->shadow_count; 1434 VM_OBJECT_UNLOCK(obj); 1435 if (vp != NULL) { 1436 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 1437 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1438 vn_fullpath(curthread, vp, &fullpath, 1439 &freepath); 1440 vput(vp); 1441 VFS_UNLOCK_GIANT(vfslocked); 1442 } 1443 } else { 1444 kve->kve_type = KVME_TYPE_NONE; 1445 kve->kve_ref_count = 0; 1446 kve->kve_shadow_count = 0; 1447 } 1448 1449 kve->kve_start = (void*)entry->start; 1450 kve->kve_end = (void*)entry->end; 1451 1452 if (entry->protection & VM_PROT_READ) 1453 kve->kve_protection |= KVME_PROT_READ; 1454 if (entry->protection & VM_PROT_WRITE) 1455 kve->kve_protection |= KVME_PROT_WRITE; 1456 if (entry->protection & VM_PROT_EXECUTE) 1457 kve->kve_protection |= KVME_PROT_EXEC; 1458 1459 if (entry->eflags & MAP_ENTRY_COW) 1460 kve->kve_flags |= KVME_FLAG_COW; 1461 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) 1462 kve->kve_flags |= KVME_FLAG_NEEDS_COPY; 1463 1464 strlcpy(kve->kve_path, fullpath, sizeof(kve->kve_path)); 1465 if (freepath != NULL) 1466 free(freepath, M_TEMP); 1467 1468 last_timestamp = map->timestamp; 1469 vm_map_unlock_read(map); 1470 error = SYSCTL_OUT(req, kve, sizeof(*kve)); 1471 vm_map_lock_read(map); 1472 if (error) 1473 break; 1474 if (last_timestamp + 1 != map->timestamp) { 1475 vm_map_lookup_entry(map, addr - 1, &tmp_entry); 1476 entry = tmp_entry; 1477 } 1478 } 1479 vm_map_unlock_read(map); 1480 PRELE(p); 1481 free(kve, M_TEMP); 1482 return (error); 1483 } 1484 1485 #if defined(STACK) || defined(DDB) 1486 static int 1487 sysctl_kern_proc_kstack(SYSCTL_HANDLER_ARGS) 1488 { 1489 struct kinfo_kstack *kkstp; 1490 int error, i, *name, numthreads; 1491 lwpid_t *lwpidarray; 1492 struct thread *td; 1493 struct stack *st; 1494 struct sbuf sb; 1495 struct proc *p; 1496 1497 name = (int *)arg1; 1498 if ((p = pfind((pid_t)name[0])) == NULL) 1499 return (ESRCH); 1500 /* XXXRW: Not clear ESRCH is the right error during proc execve(). */ 1501 if (p->p_flag & P_WEXIT || p->p_flag & P_INEXEC) { 1502 PROC_UNLOCK(p); 1503 return (ESRCH); 1504 } 1505 if ((error = p_candebug(curthread, p))) { 1506 PROC_UNLOCK(p); 1507 return (error); 1508 } 1509 _PHOLD(p); 1510 PROC_UNLOCK(p); 1511 1512 kkstp = malloc(sizeof(*kkstp), M_TEMP, M_WAITOK); 1513 st = stack_create(); 1514 1515 lwpidarray = NULL; 1516 numthreads = 0; 1517 PROC_LOCK(p); 1518 repeat: 1519 if (numthreads < p->p_numthreads) { 1520 if (lwpidarray != NULL) { 1521 free(lwpidarray, M_TEMP); 1522 lwpidarray = NULL; 1523 } 1524 numthreads = p->p_numthreads; 1525 PROC_UNLOCK(p); 1526 lwpidarray = malloc(sizeof(*lwpidarray) * numthreads, M_TEMP, 1527 M_WAITOK | M_ZERO); 1528 PROC_LOCK(p); 1529 goto repeat; 1530 } 1531 i = 0; 1532 1533 /* 1534 * XXXRW: During the below loop, execve(2) and countless other sorts 1535 * of changes could have taken place. Should we check to see if the 1536 * vmspace has been replaced, or the like, in order to prevent 1537 * giving a snapshot that spans, say, execve(2), with some threads 1538 * before and some after? Among other things, the credentials could 1539 * have changed, in which case the right to extract debug info might 1540 * no longer be assured. 1541 */ 1542 FOREACH_THREAD_IN_PROC(p, td) { 1543 KASSERT(i < numthreads, 1544 ("sysctl_kern_proc_kstack: numthreads")); 1545 lwpidarray[i] = td->td_tid; 1546 i++; 1547 } 1548 numthreads = i; 1549 for (i = 0; i < numthreads; i++) { 1550 td = thread_find(p, lwpidarray[i]); 1551 if (td == NULL) { 1552 continue; 1553 } 1554 bzero(kkstp, sizeof(*kkstp)); 1555 (void)sbuf_new(&sb, kkstp->kkst_trace, 1556 sizeof(kkstp->kkst_trace), SBUF_FIXEDLEN); 1557 thread_lock(td); 1558 kkstp->kkst_tid = td->td_tid; 1559 if (TD_IS_SWAPPED(td)) 1560 kkstp->kkst_state = KKST_STATE_SWAPPED; 1561 else if (TD_IS_RUNNING(td)) 1562 kkstp->kkst_state = KKST_STATE_RUNNING; 1563 else { 1564 kkstp->kkst_state = KKST_STATE_STACKOK; 1565 stack_save_td(st, td); 1566 } 1567 thread_unlock(td); 1568 PROC_UNLOCK(p); 1569 stack_sbuf_print(&sb, st); 1570 sbuf_finish(&sb); 1571 sbuf_delete(&sb); 1572 error = SYSCTL_OUT(req, kkstp, sizeof(*kkstp)); 1573 PROC_LOCK(p); 1574 if (error) 1575 break; 1576 } 1577 _PRELE(p); 1578 PROC_UNLOCK(p); 1579 if (lwpidarray != NULL) 1580 free(lwpidarray, M_TEMP); 1581 stack_destroy(st); 1582 free(kkstp, M_TEMP); 1583 return (error); 1584 } 1585 #endif 1586 1587 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table"); 1588 1589 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT, 1590 0, 0, sysctl_kern_proc, "S,proc", "Return entire process table"); 1591 1592 static SYSCTL_NODE(_kern_proc, KERN_PROC_GID, gid, CTLFLAG_RD, 1593 sysctl_kern_proc, "Process table"); 1594 1595 static SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD, 1596 sysctl_kern_proc, "Process table"); 1597 1598 static SYSCTL_NODE(_kern_proc, KERN_PROC_RGID, rgid, CTLFLAG_RD, 1599 sysctl_kern_proc, "Process table"); 1600 1601 static SYSCTL_NODE(_kern_proc, KERN_PROC_SESSION, sid, CTLFLAG_RD, 1602 sysctl_kern_proc, "Process table"); 1603 1604 static SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD, 1605 sysctl_kern_proc, "Process table"); 1606 1607 static SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD, 1608 sysctl_kern_proc, "Process table"); 1609 1610 static SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD, 1611 sysctl_kern_proc, "Process table"); 1612 1613 static SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD, 1614 sysctl_kern_proc, "Process table"); 1615 1616 static SYSCTL_NODE(_kern_proc, KERN_PROC_PROC, proc, CTLFLAG_RD, 1617 sysctl_kern_proc, "Return process table, no threads"); 1618 1619 static SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, 1620 CTLFLAG_RW | CTLFLAG_ANYBODY, 1621 sysctl_kern_proc_args, "Process argument list"); 1622 1623 static SYSCTL_NODE(_kern_proc, KERN_PROC_PATHNAME, pathname, CTLFLAG_RD, 1624 sysctl_kern_proc_pathname, "Process executable path"); 1625 1626 static SYSCTL_NODE(_kern_proc, KERN_PROC_SV_NAME, sv_name, CTLFLAG_RD, 1627 sysctl_kern_proc_sv_name, "Process syscall vector name (ABI type)"); 1628 1629 static SYSCTL_NODE(_kern_proc, (KERN_PROC_GID | KERN_PROC_INC_THREAD), gid_td, 1630 CTLFLAG_RD, sysctl_kern_proc, "Process table"); 1631 1632 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_INC_THREAD), pgrp_td, 1633 CTLFLAG_RD, sysctl_kern_proc, "Process table"); 1634 1635 static SYSCTL_NODE(_kern_proc, (KERN_PROC_RGID | KERN_PROC_INC_THREAD), rgid_td, 1636 CTLFLAG_RD, sysctl_kern_proc, "Process table"); 1637 1638 static SYSCTL_NODE(_kern_proc, (KERN_PROC_SESSION | KERN_PROC_INC_THREAD), 1639 sid_td, CTLFLAG_RD, sysctl_kern_proc, "Process table"); 1640 1641 static SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_INC_THREAD), tty_td, 1642 CTLFLAG_RD, sysctl_kern_proc, "Process table"); 1643 1644 static SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_INC_THREAD), uid_td, 1645 CTLFLAG_RD, sysctl_kern_proc, "Process table"); 1646 1647 static SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_INC_THREAD), ruid_td, 1648 CTLFLAG_RD, sysctl_kern_proc, "Process table"); 1649 1650 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_INC_THREAD), pid_td, 1651 CTLFLAG_RD, sysctl_kern_proc, "Process table"); 1652 1653 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PROC | KERN_PROC_INC_THREAD), proc_td, 1654 CTLFLAG_RD, sysctl_kern_proc, "Return process table, no threads"); 1655 1656 static SYSCTL_NODE(_kern_proc, KERN_PROC_VMMAP, vmmap, CTLFLAG_RD, 1657 sysctl_kern_proc_vmmap, "Process vm map entries"); 1658 1659 #if defined(STACK) || defined(DDB) 1660 static SYSCTL_NODE(_kern_proc, KERN_PROC_KSTACK, kstack, CTLFLAG_RD, 1661 sysctl_kern_proc_kstack, "Process kernel stacks"); 1662 #endif 1663