1 /*- 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)kern_proc.c 8.7 (Berkeley) 2/14/95 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_compat.h" 36 #include "opt_ddb.h" 37 #include "opt_kdtrace.h" 38 #include "opt_ktrace.h" 39 #include "opt_kstack_pages.h" 40 #include "opt_stack.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/lock.h> 46 #include <sys/malloc.h> 47 #include <sys/mount.h> 48 #include <sys/mutex.h> 49 #include <sys/proc.h> 50 #include <sys/refcount.h> 51 #include <sys/sbuf.h> 52 #include <sys/sysent.h> 53 #include <sys/sched.h> 54 #include <sys/smp.h> 55 #include <sys/stack.h> 56 #include <sys/sysctl.h> 57 #include <sys/filedesc.h> 58 #include <sys/tty.h> 59 #include <sys/signalvar.h> 60 #include <sys/sdt.h> 61 #include <sys/sx.h> 62 #include <sys/user.h> 63 #include <sys/jail.h> 64 #include <sys/vnode.h> 65 #include <sys/eventhandler.h> 66 #ifdef KTRACE 67 #include <sys/uio.h> 68 #include <sys/ktrace.h> 69 #endif 70 71 #ifdef DDB 72 #include <ddb/ddb.h> 73 #endif 74 75 #include <vm/vm.h> 76 #include <vm/vm_extern.h> 77 #include <vm/pmap.h> 78 #include <vm/vm_map.h> 79 #include <vm/vm_object.h> 80 #include <vm/uma.h> 81 82 SDT_PROVIDER_DEFINE(proc); 83 SDT_PROBE_DEFINE(proc, kernel, ctor, entry); 84 SDT_PROBE_ARGTYPE(proc, kernel, ctor, entry, 0, "struct proc *"); 85 SDT_PROBE_ARGTYPE(proc, kernel, ctor, entry, 1, "int"); 86 SDT_PROBE_ARGTYPE(proc, kernel, ctor, entry, 2, "void *"); 87 SDT_PROBE_ARGTYPE(proc, kernel, ctor, entry, 3, "int"); 88 SDT_PROBE_DEFINE(proc, kernel, ctor, return); 89 SDT_PROBE_ARGTYPE(proc, kernel, ctor, return, 0, "struct proc *"); 90 SDT_PROBE_ARGTYPE(proc, kernel, ctor, return, 1, "int"); 91 SDT_PROBE_ARGTYPE(proc, kernel, ctor, return, 2, "void *"); 92 SDT_PROBE_ARGTYPE(proc, kernel, ctor, return, 3, "int"); 93 SDT_PROBE_DEFINE(proc, kernel, dtor, entry); 94 SDT_PROBE_ARGTYPE(proc, kernel, dtor, entry, 0, "struct proc *"); 95 SDT_PROBE_ARGTYPE(proc, kernel, dtor, entry, 1, "int"); 96 SDT_PROBE_ARGTYPE(proc, kernel, dtor, entry, 2, "void *"); 97 SDT_PROBE_ARGTYPE(proc, kernel, dtor, entry, 3, "struct thread *"); 98 SDT_PROBE_DEFINE(proc, kernel, dtor, return); 99 SDT_PROBE_ARGTYPE(proc, kernel, dtor, return, 0, "struct proc *"); 100 SDT_PROBE_ARGTYPE(proc, kernel, dtor, return, 1, "int"); 101 SDT_PROBE_ARGTYPE(proc, kernel, dtor, return, 2, "void *"); 102 SDT_PROBE_DEFINE(proc, kernel, init, entry); 103 SDT_PROBE_ARGTYPE(proc, kernel, init, entry, 0, "struct proc *"); 104 SDT_PROBE_ARGTYPE(proc, kernel, init, entry, 1, "int"); 105 SDT_PROBE_ARGTYPE(proc, kernel, init, entry, 2, "int"); 106 SDT_PROBE_DEFINE(proc, kernel, init, return); 107 SDT_PROBE_ARGTYPE(proc, kernel, init, return, 0, "struct proc *"); 108 SDT_PROBE_ARGTYPE(proc, kernel, init, return, 1, "int"); 109 SDT_PROBE_ARGTYPE(proc, kernel, init, return, 2, "int"); 110 111 MALLOC_DEFINE(M_PGRP, "pgrp", "process group header"); 112 MALLOC_DEFINE(M_SESSION, "session", "session header"); 113 static MALLOC_DEFINE(M_PROC, "proc", "Proc structures"); 114 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures"); 115 116 static void doenterpgrp(struct proc *, struct pgrp *); 117 static void orphanpg(struct pgrp *pg); 118 static void fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp); 119 static void fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp, 120 int preferthread); 121 static void pgadjustjobc(struct pgrp *pgrp, int entering); 122 static void pgdelete(struct pgrp *); 123 static int proc_ctor(void *mem, int size, void *arg, int flags); 124 static void proc_dtor(void *mem, int size, void *arg); 125 static int proc_init(void *mem, int size, int flags); 126 static void proc_fini(void *mem, int size); 127 static void pargs_free(struct pargs *pa); 128 129 /* 130 * Other process lists 131 */ 132 struct pidhashhead *pidhashtbl; 133 u_long pidhash; 134 struct pgrphashhead *pgrphashtbl; 135 u_long pgrphash; 136 struct proclist allproc; 137 struct proclist zombproc; 138 struct sx allproc_lock; 139 struct sx proctree_lock; 140 struct mtx ppeers_lock; 141 uma_zone_t proc_zone; 142 uma_zone_t ithread_zone; 143 144 int kstack_pages = KSTACK_PAGES; 145 SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0, ""); 146 147 CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE); 148 149 /* 150 * Initialize global process hashing structures. 151 */ 152 void 153 procinit() 154 { 155 156 sx_init(&allproc_lock, "allproc"); 157 sx_init(&proctree_lock, "proctree"); 158 mtx_init(&ppeers_lock, "p_peers", NULL, MTX_DEF); 159 LIST_INIT(&allproc); 160 LIST_INIT(&zombproc); 161 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash); 162 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash); 163 proc_zone = uma_zcreate("PROC", sched_sizeof_proc(), 164 proc_ctor, proc_dtor, proc_init, proc_fini, 165 UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 166 uihashinit(); 167 } 168 169 /* 170 * Prepare a proc for use. 171 */ 172 static int 173 proc_ctor(void *mem, int size, void *arg, int flags) 174 { 175 struct proc *p; 176 177 p = (struct proc *)mem; 178 SDT_PROBE(proc, kernel, ctor , entry, p, size, arg, flags, 0); 179 EVENTHANDLER_INVOKE(process_ctor, p); 180 SDT_PROBE(proc, kernel, ctor , return, p, size, arg, flags, 0); 181 return (0); 182 } 183 184 /* 185 * Reclaim a proc after use. 186 */ 187 static void 188 proc_dtor(void *mem, int size, void *arg) 189 { 190 struct proc *p; 191 struct thread *td; 192 193 /* INVARIANTS checks go here */ 194 p = (struct proc *)mem; 195 td = FIRST_THREAD_IN_PROC(p); 196 SDT_PROBE(proc, kernel, dtor, entry, p, size, arg, td, 0); 197 if (td != NULL) { 198 #ifdef INVARIANTS 199 KASSERT((p->p_numthreads == 1), 200 ("bad number of threads in exiting process")); 201 KASSERT(STAILQ_EMPTY(&p->p_ktr), ("proc_dtor: non-empty p_ktr")); 202 #endif 203 /* Free all OSD associated to this thread. */ 204 osd_thread_exit(td); 205 206 /* Dispose of an alternate kstack, if it exists. 207 * XXX What if there are more than one thread in the proc? 208 * The first thread in the proc is special and not 209 * freed, so you gotta do this here. 210 */ 211 if (((p->p_flag & P_KTHREAD) != 0) && (td->td_altkstack != 0)) 212 vm_thread_dispose_altkstack(td); 213 } 214 EVENTHANDLER_INVOKE(process_dtor, p); 215 if (p->p_ksi != NULL) 216 KASSERT(! KSI_ONQ(p->p_ksi), ("SIGCHLD queue")); 217 SDT_PROBE(proc, kernel, dtor, return, p, size, arg, 0, 0); 218 } 219 220 /* 221 * Initialize type-stable parts of a proc (when newly created). 222 */ 223 static int 224 proc_init(void *mem, int size, int flags) 225 { 226 struct proc *p; 227 228 p = (struct proc *)mem; 229 SDT_PROBE(proc, kernel, init, entry, p, size, flags, 0, 0); 230 p->p_sched = (struct p_sched *)&p[1]; 231 bzero(&p->p_mtx, sizeof(struct mtx)); 232 mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 233 mtx_init(&p->p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE); 234 cv_init(&p->p_pwait, "ppwait"); 235 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 236 EVENTHANDLER_INVOKE(process_init, p); 237 p->p_stats = pstats_alloc(); 238 SDT_PROBE(proc, kernel, init, return, p, size, flags, 0, 0); 239 return (0); 240 } 241 242 /* 243 * UMA should ensure that this function is never called. 244 * Freeing a proc structure would violate type stability. 245 */ 246 static void 247 proc_fini(void *mem, int size) 248 { 249 #ifdef notnow 250 struct proc *p; 251 252 p = (struct proc *)mem; 253 EVENTHANDLER_INVOKE(process_fini, p); 254 pstats_free(p->p_stats); 255 thread_free(FIRST_THREAD_IN_PROC(p)); 256 mtx_destroy(&p->p_mtx); 257 if (p->p_ksi != NULL) 258 ksiginfo_free(p->p_ksi); 259 #else 260 panic("proc reclaimed"); 261 #endif 262 } 263 264 /* 265 * Is p an inferior of the current process? 266 */ 267 int 268 inferior(p) 269 register struct proc *p; 270 { 271 272 sx_assert(&proctree_lock, SX_LOCKED); 273 for (; p != curproc; p = p->p_pptr) 274 if (p->p_pid == 0) 275 return (0); 276 return (1); 277 } 278 279 /* 280 * Locate a process by number; return only "live" processes -- i.e., neither 281 * zombies nor newly born but incompletely initialized processes. By not 282 * returning processes in the PRS_NEW state, we allow callers to avoid 283 * testing for that condition to avoid dereferencing p_ucred, et al. 284 */ 285 struct proc * 286 pfind(pid) 287 register pid_t pid; 288 { 289 register struct proc *p; 290 291 sx_slock(&allproc_lock); 292 LIST_FOREACH(p, PIDHASH(pid), p_hash) 293 if (p->p_pid == pid) { 294 if (p->p_state == PRS_NEW) { 295 p = NULL; 296 break; 297 } 298 PROC_LOCK(p); 299 break; 300 } 301 sx_sunlock(&allproc_lock); 302 return (p); 303 } 304 305 /* 306 * Locate a process group by number. 307 * The caller must hold proctree_lock. 308 */ 309 struct pgrp * 310 pgfind(pgid) 311 register pid_t pgid; 312 { 313 register struct pgrp *pgrp; 314 315 sx_assert(&proctree_lock, SX_LOCKED); 316 317 LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) { 318 if (pgrp->pg_id == pgid) { 319 PGRP_LOCK(pgrp); 320 return (pgrp); 321 } 322 } 323 return (NULL); 324 } 325 326 /* 327 * Create a new process group. 328 * pgid must be equal to the pid of p. 329 * Begin a new session if required. 330 */ 331 int 332 enterpgrp(p, pgid, pgrp, sess) 333 register struct proc *p; 334 pid_t pgid; 335 struct pgrp *pgrp; 336 struct session *sess; 337 { 338 struct pgrp *pgrp2; 339 340 sx_assert(&proctree_lock, SX_XLOCKED); 341 342 KASSERT(pgrp != NULL, ("enterpgrp: pgrp == NULL")); 343 KASSERT(p->p_pid == pgid, 344 ("enterpgrp: new pgrp and pid != pgid")); 345 346 pgrp2 = pgfind(pgid); 347 348 KASSERT(pgrp2 == NULL, 349 ("enterpgrp: pgrp with pgid exists")); 350 KASSERT(!SESS_LEADER(p), 351 ("enterpgrp: session leader attempted setpgrp")); 352 353 mtx_init(&pgrp->pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK); 354 355 if (sess != NULL) { 356 /* 357 * new session 358 */ 359 mtx_init(&sess->s_mtx, "session", NULL, MTX_DEF); 360 PROC_LOCK(p); 361 p->p_flag &= ~P_CONTROLT; 362 PROC_UNLOCK(p); 363 PGRP_LOCK(pgrp); 364 sess->s_leader = p; 365 sess->s_sid = p->p_pid; 366 refcount_init(&sess->s_count, 1); 367 sess->s_ttyvp = NULL; 368 sess->s_ttyp = NULL; 369 bcopy(p->p_session->s_login, sess->s_login, 370 sizeof(sess->s_login)); 371 pgrp->pg_session = sess; 372 KASSERT(p == curproc, 373 ("enterpgrp: mksession and p != curproc")); 374 } else { 375 pgrp->pg_session = p->p_session; 376 sess_hold(pgrp->pg_session); 377 PGRP_LOCK(pgrp); 378 } 379 pgrp->pg_id = pgid; 380 LIST_INIT(&pgrp->pg_members); 381 382 /* 383 * As we have an exclusive lock of proctree_lock, 384 * this should not deadlock. 385 */ 386 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash); 387 pgrp->pg_jobc = 0; 388 SLIST_INIT(&pgrp->pg_sigiolst); 389 PGRP_UNLOCK(pgrp); 390 391 doenterpgrp(p, pgrp); 392 393 return (0); 394 } 395 396 /* 397 * Move p to an existing process group 398 */ 399 int 400 enterthispgrp(p, pgrp) 401 register struct proc *p; 402 struct pgrp *pgrp; 403 { 404 405 sx_assert(&proctree_lock, SX_XLOCKED); 406 PROC_LOCK_ASSERT(p, MA_NOTOWNED); 407 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED); 408 PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED); 409 SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED); 410 KASSERT(pgrp->pg_session == p->p_session, 411 ("%s: pgrp's session %p, p->p_session %p.\n", 412 __func__, 413 pgrp->pg_session, 414 p->p_session)); 415 KASSERT(pgrp != p->p_pgrp, 416 ("%s: p belongs to pgrp.", __func__)); 417 418 doenterpgrp(p, pgrp); 419 420 return (0); 421 } 422 423 /* 424 * Move p to a process group 425 */ 426 static void 427 doenterpgrp(p, pgrp) 428 struct proc *p; 429 struct pgrp *pgrp; 430 { 431 struct pgrp *savepgrp; 432 433 sx_assert(&proctree_lock, SX_XLOCKED); 434 PROC_LOCK_ASSERT(p, MA_NOTOWNED); 435 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED); 436 PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED); 437 SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED); 438 439 savepgrp = p->p_pgrp; 440 441 /* 442 * Adjust eligibility of affected pgrps to participate in job control. 443 * Increment eligibility counts before decrementing, otherwise we 444 * could reach 0 spuriously during the first call. 445 */ 446 fixjobc(p, pgrp, 1); 447 fixjobc(p, p->p_pgrp, 0); 448 449 PGRP_LOCK(pgrp); 450 PGRP_LOCK(savepgrp); 451 PROC_LOCK(p); 452 LIST_REMOVE(p, p_pglist); 453 p->p_pgrp = pgrp; 454 PROC_UNLOCK(p); 455 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist); 456 PGRP_UNLOCK(savepgrp); 457 PGRP_UNLOCK(pgrp); 458 if (LIST_EMPTY(&savepgrp->pg_members)) 459 pgdelete(savepgrp); 460 } 461 462 /* 463 * remove process from process group 464 */ 465 int 466 leavepgrp(p) 467 register struct proc *p; 468 { 469 struct pgrp *savepgrp; 470 471 sx_assert(&proctree_lock, SX_XLOCKED); 472 savepgrp = p->p_pgrp; 473 PGRP_LOCK(savepgrp); 474 PROC_LOCK(p); 475 LIST_REMOVE(p, p_pglist); 476 p->p_pgrp = NULL; 477 PROC_UNLOCK(p); 478 PGRP_UNLOCK(savepgrp); 479 if (LIST_EMPTY(&savepgrp->pg_members)) 480 pgdelete(savepgrp); 481 return (0); 482 } 483 484 /* 485 * delete a process group 486 */ 487 static void 488 pgdelete(pgrp) 489 register struct pgrp *pgrp; 490 { 491 struct session *savesess; 492 struct tty *tp; 493 494 sx_assert(&proctree_lock, SX_XLOCKED); 495 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED); 496 SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED); 497 498 /* 499 * Reset any sigio structures pointing to us as a result of 500 * F_SETOWN with our pgid. 501 */ 502 funsetownlst(&pgrp->pg_sigiolst); 503 504 PGRP_LOCK(pgrp); 505 tp = pgrp->pg_session->s_ttyp; 506 LIST_REMOVE(pgrp, pg_hash); 507 savesess = pgrp->pg_session; 508 PGRP_UNLOCK(pgrp); 509 510 /* Remove the reference to the pgrp before deallocating it. */ 511 if (tp != NULL) { 512 tty_lock(tp); 513 tty_rel_pgrp(tp, pgrp); 514 } 515 516 mtx_destroy(&pgrp->pg_mtx); 517 free(pgrp, M_PGRP); 518 sess_release(savesess); 519 } 520 521 static void 522 pgadjustjobc(pgrp, entering) 523 struct pgrp *pgrp; 524 int entering; 525 { 526 527 PGRP_LOCK(pgrp); 528 if (entering) 529 pgrp->pg_jobc++; 530 else { 531 --pgrp->pg_jobc; 532 if (pgrp->pg_jobc == 0) 533 orphanpg(pgrp); 534 } 535 PGRP_UNLOCK(pgrp); 536 } 537 538 /* 539 * Adjust pgrp jobc counters when specified process changes process group. 540 * We count the number of processes in each process group that "qualify" 541 * the group for terminal job control (those with a parent in a different 542 * process group of the same session). If that count reaches zero, the 543 * process group becomes orphaned. Check both the specified process' 544 * process group and that of its children. 545 * entering == 0 => p is leaving specified group. 546 * entering == 1 => p is entering specified group. 547 */ 548 void 549 fixjobc(p, pgrp, entering) 550 register struct proc *p; 551 register struct pgrp *pgrp; 552 int entering; 553 { 554 register struct pgrp *hispgrp; 555 register struct session *mysession; 556 557 sx_assert(&proctree_lock, SX_LOCKED); 558 PROC_LOCK_ASSERT(p, MA_NOTOWNED); 559 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED); 560 SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED); 561 562 /* 563 * Check p's parent to see whether p qualifies its own process 564 * group; if so, adjust count for p's process group. 565 */ 566 mysession = pgrp->pg_session; 567 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp && 568 hispgrp->pg_session == mysession) 569 pgadjustjobc(pgrp, entering); 570 571 /* 572 * Check this process' children to see whether they qualify 573 * their process groups; if so, adjust counts for children's 574 * process groups. 575 */ 576 LIST_FOREACH(p, &p->p_children, p_sibling) { 577 hispgrp = p->p_pgrp; 578 if (hispgrp == pgrp || 579 hispgrp->pg_session != mysession) 580 continue; 581 PROC_LOCK(p); 582 if (p->p_state == PRS_ZOMBIE) { 583 PROC_UNLOCK(p); 584 continue; 585 } 586 PROC_UNLOCK(p); 587 pgadjustjobc(hispgrp, entering); 588 } 589 } 590 591 /* 592 * A process group has become orphaned; 593 * if there are any stopped processes in the group, 594 * hang-up all process in that group. 595 */ 596 static void 597 orphanpg(pg) 598 struct pgrp *pg; 599 { 600 register struct proc *p; 601 602 PGRP_LOCK_ASSERT(pg, MA_OWNED); 603 604 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 605 PROC_LOCK(p); 606 if (P_SHOULDSTOP(p)) { 607 PROC_UNLOCK(p); 608 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 609 PROC_LOCK(p); 610 psignal(p, SIGHUP); 611 psignal(p, SIGCONT); 612 PROC_UNLOCK(p); 613 } 614 return; 615 } 616 PROC_UNLOCK(p); 617 } 618 } 619 620 void 621 sess_hold(struct session *s) 622 { 623 624 refcount_acquire(&s->s_count); 625 } 626 627 void 628 sess_release(struct session *s) 629 { 630 631 if (refcount_release(&s->s_count)) { 632 if (s->s_ttyp != NULL) { 633 tty_lock(s->s_ttyp); 634 tty_rel_sess(s->s_ttyp, s); 635 } 636 mtx_destroy(&s->s_mtx); 637 free(s, M_SESSION); 638 } 639 } 640 641 #include "opt_ddb.h" 642 #ifdef DDB 643 #include <ddb/ddb.h> 644 645 DB_SHOW_COMMAND(pgrpdump, pgrpdump) 646 { 647 register struct pgrp *pgrp; 648 register struct proc *p; 649 register int i; 650 651 for (i = 0; i <= pgrphash; i++) { 652 if (!LIST_EMPTY(&pgrphashtbl[i])) { 653 printf("\tindx %d\n", i); 654 LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) { 655 printf( 656 "\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n", 657 (void *)pgrp, (long)pgrp->pg_id, 658 (void *)pgrp->pg_session, 659 pgrp->pg_session->s_count, 660 (void *)LIST_FIRST(&pgrp->pg_members)); 661 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 662 printf("\t\tpid %ld addr %p pgrp %p\n", 663 (long)p->p_pid, (void *)p, 664 (void *)p->p_pgrp); 665 } 666 } 667 } 668 } 669 } 670 #endif /* DDB */ 671 672 /* 673 * Clear kinfo_proc and fill in any information that is common 674 * to all threads in the process. 675 * Must be called with the target process locked. 676 */ 677 static void 678 fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp) 679 { 680 struct thread *td0; 681 struct tty *tp; 682 struct session *sp; 683 struct ucred *cred; 684 struct sigacts *ps; 685 686 PROC_LOCK_ASSERT(p, MA_OWNED); 687 bzero(kp, sizeof(*kp)); 688 689 kp->ki_structsize = sizeof(*kp); 690 kp->ki_paddr = p; 691 kp->ki_addr =/* p->p_addr; */0; /* XXX */ 692 kp->ki_args = p->p_args; 693 kp->ki_textvp = p->p_textvp; 694 #ifdef KTRACE 695 kp->ki_tracep = p->p_tracevp; 696 mtx_lock(&ktrace_mtx); 697 kp->ki_traceflag = p->p_traceflag; 698 mtx_unlock(&ktrace_mtx); 699 #endif 700 kp->ki_fd = p->p_fd; 701 kp->ki_vmspace = p->p_vmspace; 702 kp->ki_flag = p->p_flag; 703 cred = p->p_ucred; 704 if (cred) { 705 kp->ki_uid = cred->cr_uid; 706 kp->ki_ruid = cred->cr_ruid; 707 kp->ki_svuid = cred->cr_svuid; 708 /* XXX bde doesn't like KI_NGROUPS */ 709 kp->ki_ngroups = min(cred->cr_ngroups, KI_NGROUPS); 710 bcopy(cred->cr_groups, kp->ki_groups, 711 kp->ki_ngroups * sizeof(gid_t)); 712 kp->ki_rgid = cred->cr_rgid; 713 kp->ki_svgid = cred->cr_svgid; 714 /* If jailed(cred), emulate the old P_JAILED flag. */ 715 if (jailed(cred)) { 716 kp->ki_flag |= P_JAILED; 717 /* If inside a jail, use 0 as a jail ID. */ 718 if (!jailed(curthread->td_ucred)) 719 kp->ki_jid = cred->cr_prison->pr_id; 720 } 721 } 722 ps = p->p_sigacts; 723 if (ps) { 724 mtx_lock(&ps->ps_mtx); 725 kp->ki_sigignore = ps->ps_sigignore; 726 kp->ki_sigcatch = ps->ps_sigcatch; 727 mtx_unlock(&ps->ps_mtx); 728 } 729 PROC_SLOCK(p); 730 if (p->p_state != PRS_NEW && 731 p->p_state != PRS_ZOMBIE && 732 p->p_vmspace != NULL) { 733 struct vmspace *vm = p->p_vmspace; 734 735 kp->ki_size = vm->vm_map.size; 736 kp->ki_rssize = vmspace_resident_count(vm); /*XXX*/ 737 FOREACH_THREAD_IN_PROC(p, td0) { 738 if (!TD_IS_SWAPPED(td0)) 739 kp->ki_rssize += td0->td_kstack_pages; 740 if (td0->td_altkstack_obj != NULL) 741 kp->ki_rssize += td0->td_altkstack_pages; 742 } 743 kp->ki_swrss = vm->vm_swrss; 744 kp->ki_tsize = vm->vm_tsize; 745 kp->ki_dsize = vm->vm_dsize; 746 kp->ki_ssize = vm->vm_ssize; 747 } else if (p->p_state == PRS_ZOMBIE) 748 kp->ki_stat = SZOMB; 749 if (kp->ki_flag & P_INMEM) 750 kp->ki_sflag = PS_INMEM; 751 else 752 kp->ki_sflag = 0; 753 /* Calculate legacy swtime as seconds since 'swtick'. */ 754 kp->ki_swtime = (ticks - p->p_swtick) / hz; 755 kp->ki_pid = p->p_pid; 756 kp->ki_nice = p->p_nice; 757 rufetch(p, &kp->ki_rusage); 758 kp->ki_runtime = cputick2usec(p->p_rux.rux_runtime); 759 PROC_SUNLOCK(p); 760 if ((p->p_flag & P_INMEM) && p->p_stats != NULL) { 761 kp->ki_start = p->p_stats->p_start; 762 timevaladd(&kp->ki_start, &boottime); 763 PROC_SLOCK(p); 764 calcru(p, &kp->ki_rusage.ru_utime, &kp->ki_rusage.ru_stime); 765 PROC_SUNLOCK(p); 766 calccru(p, &kp->ki_childutime, &kp->ki_childstime); 767 768 /* Some callers want child-times in a single value */ 769 kp->ki_childtime = kp->ki_childstime; 770 timevaladd(&kp->ki_childtime, &kp->ki_childutime); 771 } 772 tp = NULL; 773 if (p->p_pgrp) { 774 kp->ki_pgid = p->p_pgrp->pg_id; 775 kp->ki_jobc = p->p_pgrp->pg_jobc; 776 sp = p->p_pgrp->pg_session; 777 778 if (sp != NULL) { 779 kp->ki_sid = sp->s_sid; 780 SESS_LOCK(sp); 781 strlcpy(kp->ki_login, sp->s_login, 782 sizeof(kp->ki_login)); 783 if (sp->s_ttyvp) 784 kp->ki_kiflag |= KI_CTTY; 785 if (SESS_LEADER(p)) 786 kp->ki_kiflag |= KI_SLEADER; 787 /* XXX proctree_lock */ 788 tp = sp->s_ttyp; 789 SESS_UNLOCK(sp); 790 } 791 } 792 if ((p->p_flag & P_CONTROLT) && tp != NULL) { 793 kp->ki_tdev = tty_udev(tp); 794 kp->ki_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID; 795 if (tp->t_session) 796 kp->ki_tsid = tp->t_session->s_sid; 797 } else 798 kp->ki_tdev = NODEV; 799 if (p->p_comm[0] != '\0') 800 strlcpy(kp->ki_comm, p->p_comm, sizeof(kp->ki_comm)); 801 if (p->p_sysent && p->p_sysent->sv_name != NULL && 802 p->p_sysent->sv_name[0] != '\0') 803 strlcpy(kp->ki_emul, p->p_sysent->sv_name, sizeof(kp->ki_emul)); 804 kp->ki_siglist = p->p_siglist; 805 kp->ki_xstat = p->p_xstat; 806 kp->ki_acflag = p->p_acflag; 807 kp->ki_lock = p->p_lock; 808 if (p->p_pptr) 809 kp->ki_ppid = p->p_pptr->p_pid; 810 } 811 812 /* 813 * Fill in information that is thread specific. Must be called with p_slock 814 * locked. If 'preferthread' is set, overwrite certain process-related 815 * fields that are maintained for both threads and processes. 816 */ 817 static void 818 fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp, int preferthread) 819 { 820 struct proc *p; 821 822 p = td->td_proc; 823 PROC_LOCK_ASSERT(p, MA_OWNED); 824 825 thread_lock(td); 826 if (td->td_wmesg != NULL) 827 strlcpy(kp->ki_wmesg, td->td_wmesg, sizeof(kp->ki_wmesg)); 828 else 829 bzero(kp->ki_wmesg, sizeof(kp->ki_wmesg)); 830 if (td->td_name[0] != '\0') 831 strlcpy(kp->ki_ocomm, td->td_name, sizeof(kp->ki_ocomm)); 832 if (TD_ON_LOCK(td)) { 833 kp->ki_kiflag |= KI_LOCKBLOCK; 834 strlcpy(kp->ki_lockname, td->td_lockname, 835 sizeof(kp->ki_lockname)); 836 } else { 837 kp->ki_kiflag &= ~KI_LOCKBLOCK; 838 bzero(kp->ki_lockname, sizeof(kp->ki_lockname)); 839 } 840 841 if (p->p_state == PRS_NORMAL) { /* approximate. */ 842 if (TD_ON_RUNQ(td) || 843 TD_CAN_RUN(td) || 844 TD_IS_RUNNING(td)) { 845 kp->ki_stat = SRUN; 846 } else if (P_SHOULDSTOP(p)) { 847 kp->ki_stat = SSTOP; 848 } else if (TD_IS_SLEEPING(td)) { 849 kp->ki_stat = SSLEEP; 850 } else if (TD_ON_LOCK(td)) { 851 kp->ki_stat = SLOCK; 852 } else { 853 kp->ki_stat = SWAIT; 854 } 855 } else if (p->p_state == PRS_ZOMBIE) { 856 kp->ki_stat = SZOMB; 857 } else { 858 kp->ki_stat = SIDL; 859 } 860 861 /* Things in the thread */ 862 kp->ki_wchan = td->td_wchan; 863 kp->ki_pri.pri_level = td->td_priority; 864 kp->ki_pri.pri_native = td->td_base_pri; 865 kp->ki_lastcpu = td->td_lastcpu; 866 kp->ki_oncpu = td->td_oncpu; 867 kp->ki_tdflags = td->td_flags; 868 kp->ki_tid = td->td_tid; 869 kp->ki_numthreads = p->p_numthreads; 870 kp->ki_pcb = td->td_pcb; 871 kp->ki_kstack = (void *)td->td_kstack; 872 kp->ki_pctcpu = sched_pctcpu(td); 873 kp->ki_estcpu = td->td_estcpu; 874 kp->ki_slptime = (ticks - td->td_slptick) / hz; 875 kp->ki_pri.pri_class = td->td_pri_class; 876 kp->ki_pri.pri_user = td->td_user_pri; 877 878 if (preferthread) 879 kp->ki_runtime = cputick2usec(td->td_runtime); 880 881 /* We can't get this anymore but ps etc never used it anyway. */ 882 kp->ki_rqindex = 0; 883 884 SIGSETOR(kp->ki_siglist, td->td_siglist); 885 kp->ki_sigmask = td->td_sigmask; 886 thread_unlock(td); 887 } 888 889 /* 890 * Fill in a kinfo_proc structure for the specified process. 891 * Must be called with the target process locked. 892 */ 893 void 894 fill_kinfo_proc(struct proc *p, struct kinfo_proc *kp) 895 { 896 897 fill_kinfo_proc_only(p, kp); 898 if (FIRST_THREAD_IN_PROC(p) != NULL) 899 fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), kp, 0); 900 } 901 902 struct pstats * 903 pstats_alloc(void) 904 { 905 906 return (malloc(sizeof(struct pstats), M_SUBPROC, M_ZERO|M_WAITOK)); 907 } 908 909 /* 910 * Copy parts of p_stats; zero the rest of p_stats (statistics). 911 */ 912 void 913 pstats_fork(struct pstats *src, struct pstats *dst) 914 { 915 916 bzero(&dst->pstat_startzero, 917 __rangeof(struct pstats, pstat_startzero, pstat_endzero)); 918 bcopy(&src->pstat_startcopy, &dst->pstat_startcopy, 919 __rangeof(struct pstats, pstat_startcopy, pstat_endcopy)); 920 } 921 922 void 923 pstats_free(struct pstats *ps) 924 { 925 926 free(ps, M_SUBPROC); 927 } 928 929 /* 930 * Locate a zombie process by number 931 */ 932 struct proc * 933 zpfind(pid_t pid) 934 { 935 struct proc *p; 936 937 sx_slock(&allproc_lock); 938 LIST_FOREACH(p, &zombproc, p_list) 939 if (p->p_pid == pid) { 940 PROC_LOCK(p); 941 break; 942 } 943 sx_sunlock(&allproc_lock); 944 return (p); 945 } 946 947 #define KERN_PROC_ZOMBMASK 0x3 948 #define KERN_PROC_NOTHREADS 0x4 949 950 /* 951 * Must be called with the process locked and will return with it unlocked. 952 */ 953 static int 954 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags) 955 { 956 struct thread *td; 957 struct kinfo_proc kinfo_proc; 958 int error = 0; 959 struct proc *np; 960 pid_t pid = p->p_pid; 961 962 PROC_LOCK_ASSERT(p, MA_OWNED); 963 964 fill_kinfo_proc_only(p, &kinfo_proc); 965 if (flags & KERN_PROC_NOTHREADS) { 966 if (FIRST_THREAD_IN_PROC(p) != NULL) 967 fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), 968 &kinfo_proc, 0); 969 error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc, 970 sizeof(kinfo_proc)); 971 } else { 972 if (FIRST_THREAD_IN_PROC(p) != NULL) 973 FOREACH_THREAD_IN_PROC(p, td) { 974 fill_kinfo_thread(td, &kinfo_proc, 1); 975 error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc, 976 sizeof(kinfo_proc)); 977 if (error) 978 break; 979 } 980 else 981 error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc, 982 sizeof(kinfo_proc)); 983 } 984 PROC_UNLOCK(p); 985 if (error) 986 return (error); 987 if (flags & KERN_PROC_ZOMBMASK) 988 np = zpfind(pid); 989 else { 990 if (pid == 0) 991 return (0); 992 np = pfind(pid); 993 } 994 if (np == NULL) 995 return (ESRCH); 996 if (np != p) { 997 PROC_UNLOCK(np); 998 return (ESRCH); 999 } 1000 PROC_UNLOCK(np); 1001 return (0); 1002 } 1003 1004 static int 1005 sysctl_kern_proc(SYSCTL_HANDLER_ARGS) 1006 { 1007 int *name = (int*) arg1; 1008 u_int namelen = arg2; 1009 struct proc *p; 1010 int flags, doingzomb, oid_number; 1011 int error = 0; 1012 1013 oid_number = oidp->oid_number; 1014 if (oid_number != KERN_PROC_ALL && 1015 (oid_number & KERN_PROC_INC_THREAD) == 0) 1016 flags = KERN_PROC_NOTHREADS; 1017 else { 1018 flags = 0; 1019 oid_number &= ~KERN_PROC_INC_THREAD; 1020 } 1021 if (oid_number == KERN_PROC_PID) { 1022 if (namelen != 1) 1023 return (EINVAL); 1024 error = sysctl_wire_old_buffer(req, 0); 1025 if (error) 1026 return (error); 1027 p = pfind((pid_t)name[0]); 1028 if (!p) 1029 return (ESRCH); 1030 if ((error = p_cansee(curthread, p))) { 1031 PROC_UNLOCK(p); 1032 return (error); 1033 } 1034 error = sysctl_out_proc(p, req, flags); 1035 return (error); 1036 } 1037 1038 switch (oid_number) { 1039 case KERN_PROC_ALL: 1040 if (namelen != 0) 1041 return (EINVAL); 1042 break; 1043 case KERN_PROC_PROC: 1044 if (namelen != 0 && namelen != 1) 1045 return (EINVAL); 1046 break; 1047 default: 1048 if (namelen != 1) 1049 return (EINVAL); 1050 break; 1051 } 1052 1053 if (!req->oldptr) { 1054 /* overestimate by 5 procs */ 1055 error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5); 1056 if (error) 1057 return (error); 1058 } 1059 error = sysctl_wire_old_buffer(req, 0); 1060 if (error != 0) 1061 return (error); 1062 sx_slock(&allproc_lock); 1063 for (doingzomb=0 ; doingzomb < 2 ; doingzomb++) { 1064 if (!doingzomb) 1065 p = LIST_FIRST(&allproc); 1066 else 1067 p = LIST_FIRST(&zombproc); 1068 for (; p != 0; p = LIST_NEXT(p, p_list)) { 1069 /* 1070 * Skip embryonic processes. 1071 */ 1072 PROC_SLOCK(p); 1073 if (p->p_state == PRS_NEW) { 1074 PROC_SUNLOCK(p); 1075 continue; 1076 } 1077 PROC_SUNLOCK(p); 1078 PROC_LOCK(p); 1079 KASSERT(p->p_ucred != NULL, 1080 ("process credential is NULL for non-NEW proc")); 1081 /* 1082 * Show a user only appropriate processes. 1083 */ 1084 if (p_cansee(curthread, p)) { 1085 PROC_UNLOCK(p); 1086 continue; 1087 } 1088 /* 1089 * TODO - make more efficient (see notes below). 1090 * do by session. 1091 */ 1092 switch (oid_number) { 1093 1094 case KERN_PROC_GID: 1095 if (p->p_ucred->cr_gid != (gid_t)name[0]) { 1096 PROC_UNLOCK(p); 1097 continue; 1098 } 1099 break; 1100 1101 case KERN_PROC_PGRP: 1102 /* could do this by traversing pgrp */ 1103 if (p->p_pgrp == NULL || 1104 p->p_pgrp->pg_id != (pid_t)name[0]) { 1105 PROC_UNLOCK(p); 1106 continue; 1107 } 1108 break; 1109 1110 case KERN_PROC_RGID: 1111 if (p->p_ucred->cr_rgid != (gid_t)name[0]) { 1112 PROC_UNLOCK(p); 1113 continue; 1114 } 1115 break; 1116 1117 case KERN_PROC_SESSION: 1118 if (p->p_session == NULL || 1119 p->p_session->s_sid != (pid_t)name[0]) { 1120 PROC_UNLOCK(p); 1121 continue; 1122 } 1123 break; 1124 1125 case KERN_PROC_TTY: 1126 if ((p->p_flag & P_CONTROLT) == 0 || 1127 p->p_session == NULL) { 1128 PROC_UNLOCK(p); 1129 continue; 1130 } 1131 /* XXX proctree_lock */ 1132 SESS_LOCK(p->p_session); 1133 if (p->p_session->s_ttyp == NULL || 1134 tty_udev(p->p_session->s_ttyp) != 1135 (dev_t)name[0]) { 1136 SESS_UNLOCK(p->p_session); 1137 PROC_UNLOCK(p); 1138 continue; 1139 } 1140 SESS_UNLOCK(p->p_session); 1141 break; 1142 1143 case KERN_PROC_UID: 1144 if (p->p_ucred->cr_uid != (uid_t)name[0]) { 1145 PROC_UNLOCK(p); 1146 continue; 1147 } 1148 break; 1149 1150 case KERN_PROC_RUID: 1151 if (p->p_ucred->cr_ruid != (uid_t)name[0]) { 1152 PROC_UNLOCK(p); 1153 continue; 1154 } 1155 break; 1156 1157 case KERN_PROC_PROC: 1158 break; 1159 1160 default: 1161 break; 1162 1163 } 1164 1165 error = sysctl_out_proc(p, req, flags | doingzomb); 1166 if (error) { 1167 sx_sunlock(&allproc_lock); 1168 return (error); 1169 } 1170 } 1171 } 1172 sx_sunlock(&allproc_lock); 1173 return (0); 1174 } 1175 1176 struct pargs * 1177 pargs_alloc(int len) 1178 { 1179 struct pargs *pa; 1180 1181 pa = malloc(sizeof(struct pargs) + len, M_PARGS, 1182 M_WAITOK); 1183 refcount_init(&pa->ar_ref, 1); 1184 pa->ar_length = len; 1185 return (pa); 1186 } 1187 1188 static void 1189 pargs_free(struct pargs *pa) 1190 { 1191 1192 free(pa, M_PARGS); 1193 } 1194 1195 void 1196 pargs_hold(struct pargs *pa) 1197 { 1198 1199 if (pa == NULL) 1200 return; 1201 refcount_acquire(&pa->ar_ref); 1202 } 1203 1204 void 1205 pargs_drop(struct pargs *pa) 1206 { 1207 1208 if (pa == NULL) 1209 return; 1210 if (refcount_release(&pa->ar_ref)) 1211 pargs_free(pa); 1212 } 1213 1214 /* 1215 * This sysctl allows a process to retrieve the argument list or process 1216 * title for another process without groping around in the address space 1217 * of the other process. It also allow a process to set its own "process 1218 * title to a string of its own choice. 1219 */ 1220 static int 1221 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS) 1222 { 1223 int *name = (int*) arg1; 1224 u_int namelen = arg2; 1225 struct pargs *newpa, *pa; 1226 struct proc *p; 1227 int error = 0; 1228 1229 if (namelen != 1) 1230 return (EINVAL); 1231 1232 p = pfind((pid_t)name[0]); 1233 if (!p) 1234 return (ESRCH); 1235 1236 if ((error = p_cansee(curthread, p)) != 0) { 1237 PROC_UNLOCK(p); 1238 return (error); 1239 } 1240 1241 if (req->newptr && curproc != p) { 1242 PROC_UNLOCK(p); 1243 return (EPERM); 1244 } 1245 1246 pa = p->p_args; 1247 pargs_hold(pa); 1248 PROC_UNLOCK(p); 1249 if (req->oldptr != NULL && pa != NULL) 1250 error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length); 1251 pargs_drop(pa); 1252 if (error != 0 || req->newptr == NULL) 1253 return (error); 1254 1255 if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit) 1256 return (ENOMEM); 1257 newpa = pargs_alloc(req->newlen); 1258 error = SYSCTL_IN(req, newpa->ar_args, req->newlen); 1259 if (error != 0) { 1260 pargs_free(newpa); 1261 return (error); 1262 } 1263 PROC_LOCK(p); 1264 pa = p->p_args; 1265 p->p_args = newpa; 1266 PROC_UNLOCK(p); 1267 pargs_drop(pa); 1268 return (0); 1269 } 1270 1271 /* 1272 * This sysctl allows a process to retrieve the path of the executable for 1273 * itself or another process. 1274 */ 1275 static int 1276 sysctl_kern_proc_pathname(SYSCTL_HANDLER_ARGS) 1277 { 1278 pid_t *pidp = (pid_t *)arg1; 1279 unsigned int arglen = arg2; 1280 struct proc *p; 1281 struct vnode *vp; 1282 char *retbuf, *freebuf; 1283 int error, vfslocked; 1284 1285 if (arglen != 1) 1286 return (EINVAL); 1287 if (*pidp == -1) { /* -1 means this process */ 1288 p = req->td->td_proc; 1289 } else { 1290 p = pfind(*pidp); 1291 if (p == NULL) 1292 return (ESRCH); 1293 if ((error = p_cansee(curthread, p)) != 0) { 1294 PROC_UNLOCK(p); 1295 return (error); 1296 } 1297 } 1298 1299 vp = p->p_textvp; 1300 if (vp == NULL) { 1301 if (*pidp != -1) 1302 PROC_UNLOCK(p); 1303 return (0); 1304 } 1305 vref(vp); 1306 if (*pidp != -1) 1307 PROC_UNLOCK(p); 1308 error = vn_fullpath(req->td, vp, &retbuf, &freebuf); 1309 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 1310 vrele(vp); 1311 VFS_UNLOCK_GIANT(vfslocked); 1312 if (error) 1313 return (error); 1314 error = SYSCTL_OUT(req, retbuf, strlen(retbuf) + 1); 1315 free(freebuf, M_TEMP); 1316 return (error); 1317 } 1318 1319 static int 1320 sysctl_kern_proc_sv_name(SYSCTL_HANDLER_ARGS) 1321 { 1322 struct proc *p; 1323 char *sv_name; 1324 int *name; 1325 int namelen; 1326 int error; 1327 1328 namelen = arg2; 1329 if (namelen != 1) 1330 return (EINVAL); 1331 1332 name = (int *)arg1; 1333 if ((p = pfind((pid_t)name[0])) == NULL) 1334 return (ESRCH); 1335 if ((error = p_cansee(curthread, p))) { 1336 PROC_UNLOCK(p); 1337 return (error); 1338 } 1339 sv_name = p->p_sysent->sv_name; 1340 PROC_UNLOCK(p); 1341 return (sysctl_handle_string(oidp, sv_name, 0, req)); 1342 } 1343 1344 #ifdef KINFO_OVMENTRY_SIZE 1345 CTASSERT(sizeof(struct kinfo_ovmentry) == KINFO_OVMENTRY_SIZE); 1346 #endif 1347 1348 #ifdef COMPAT_FREEBSD7 1349 static int 1350 sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS) 1351 { 1352 vm_map_entry_t entry, tmp_entry; 1353 unsigned int last_timestamp; 1354 char *fullpath, *freepath; 1355 struct kinfo_ovmentry *kve; 1356 struct vattr va; 1357 struct ucred *cred; 1358 int error, *name; 1359 struct vnode *vp; 1360 struct proc *p; 1361 vm_map_t map; 1362 struct vmspace *vm; 1363 1364 name = (int *)arg1; 1365 if ((p = pfind((pid_t)name[0])) == NULL) 1366 return (ESRCH); 1367 if (p->p_flag & P_WEXIT) { 1368 PROC_UNLOCK(p); 1369 return (ESRCH); 1370 } 1371 if ((error = p_candebug(curthread, p))) { 1372 PROC_UNLOCK(p); 1373 return (error); 1374 } 1375 _PHOLD(p); 1376 PROC_UNLOCK(p); 1377 vm = vmspace_acquire_ref(p); 1378 if (vm == NULL) { 1379 PRELE(p); 1380 return (ESRCH); 1381 } 1382 kve = malloc(sizeof(*kve), M_TEMP, M_WAITOK); 1383 1384 map = &p->p_vmspace->vm_map; /* XXXRW: More locking required? */ 1385 vm_map_lock_read(map); 1386 for (entry = map->header.next; entry != &map->header; 1387 entry = entry->next) { 1388 vm_object_t obj, tobj, lobj; 1389 vm_offset_t addr; 1390 int vfslocked; 1391 1392 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 1393 continue; 1394 1395 bzero(kve, sizeof(*kve)); 1396 kve->kve_structsize = sizeof(*kve); 1397 1398 kve->kve_private_resident = 0; 1399 obj = entry->object.vm_object; 1400 if (obj != NULL) { 1401 VM_OBJECT_LOCK(obj); 1402 if (obj->shadow_count == 1) 1403 kve->kve_private_resident = 1404 obj->resident_page_count; 1405 } 1406 kve->kve_resident = 0; 1407 addr = entry->start; 1408 while (addr < entry->end) { 1409 if (pmap_extract(map->pmap, addr)) 1410 kve->kve_resident++; 1411 addr += PAGE_SIZE; 1412 } 1413 1414 for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) { 1415 if (tobj != obj) 1416 VM_OBJECT_LOCK(tobj); 1417 if (lobj != obj) 1418 VM_OBJECT_UNLOCK(lobj); 1419 lobj = tobj; 1420 } 1421 1422 kve->kve_start = (void*)entry->start; 1423 kve->kve_end = (void*)entry->end; 1424 kve->kve_offset = (off_t)entry->offset; 1425 1426 if (entry->protection & VM_PROT_READ) 1427 kve->kve_protection |= KVME_PROT_READ; 1428 if (entry->protection & VM_PROT_WRITE) 1429 kve->kve_protection |= KVME_PROT_WRITE; 1430 if (entry->protection & VM_PROT_EXECUTE) 1431 kve->kve_protection |= KVME_PROT_EXEC; 1432 1433 if (entry->eflags & MAP_ENTRY_COW) 1434 kve->kve_flags |= KVME_FLAG_COW; 1435 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) 1436 kve->kve_flags |= KVME_FLAG_NEEDS_COPY; 1437 1438 last_timestamp = map->timestamp; 1439 vm_map_unlock_read(map); 1440 1441 kve->kve_fileid = 0; 1442 kve->kve_fsid = 0; 1443 freepath = NULL; 1444 fullpath = ""; 1445 if (lobj) { 1446 vp = NULL; 1447 switch (lobj->type) { 1448 case OBJT_DEFAULT: 1449 kve->kve_type = KVME_TYPE_DEFAULT; 1450 break; 1451 case OBJT_VNODE: 1452 kve->kve_type = KVME_TYPE_VNODE; 1453 vp = lobj->handle; 1454 vref(vp); 1455 break; 1456 case OBJT_SWAP: 1457 kve->kve_type = KVME_TYPE_SWAP; 1458 break; 1459 case OBJT_DEVICE: 1460 kve->kve_type = KVME_TYPE_DEVICE; 1461 break; 1462 case OBJT_PHYS: 1463 kve->kve_type = KVME_TYPE_PHYS; 1464 break; 1465 case OBJT_DEAD: 1466 kve->kve_type = KVME_TYPE_DEAD; 1467 break; 1468 default: 1469 kve->kve_type = KVME_TYPE_UNKNOWN; 1470 break; 1471 } 1472 if (lobj != obj) 1473 VM_OBJECT_UNLOCK(lobj); 1474 1475 kve->kve_ref_count = obj->ref_count; 1476 kve->kve_shadow_count = obj->shadow_count; 1477 VM_OBJECT_UNLOCK(obj); 1478 if (vp != NULL) { 1479 vn_fullpath(curthread, vp, &fullpath, 1480 &freepath); 1481 cred = curthread->td_ucred; 1482 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 1483 vn_lock(vp, LK_SHARED | LK_RETRY); 1484 if (VOP_GETATTR(vp, &va, cred) == 0) { 1485 kve->kve_fileid = va.va_fileid; 1486 kve->kve_fsid = va.va_fsid; 1487 } 1488 vput(vp); 1489 VFS_UNLOCK_GIANT(vfslocked); 1490 } 1491 } else { 1492 kve->kve_type = KVME_TYPE_NONE; 1493 kve->kve_ref_count = 0; 1494 kve->kve_shadow_count = 0; 1495 } 1496 1497 strlcpy(kve->kve_path, fullpath, sizeof(kve->kve_path)); 1498 if (freepath != NULL) 1499 free(freepath, M_TEMP); 1500 1501 error = SYSCTL_OUT(req, kve, sizeof(*kve)); 1502 vm_map_lock_read(map); 1503 if (error) 1504 break; 1505 if (last_timestamp != map->timestamp) { 1506 vm_map_lookup_entry(map, addr - 1, &tmp_entry); 1507 entry = tmp_entry; 1508 } 1509 } 1510 vm_map_unlock_read(map); 1511 vmspace_free(vm); 1512 PRELE(p); 1513 free(kve, M_TEMP); 1514 return (error); 1515 } 1516 #endif /* COMPAT_FREEBSD7 */ 1517 1518 #ifdef KINFO_VMENTRY_SIZE 1519 CTASSERT(sizeof(struct kinfo_vmentry) == KINFO_VMENTRY_SIZE); 1520 #endif 1521 1522 static int 1523 sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS) 1524 { 1525 vm_map_entry_t entry, tmp_entry; 1526 unsigned int last_timestamp; 1527 char *fullpath, *freepath; 1528 struct kinfo_vmentry *kve; 1529 struct vattr va; 1530 struct ucred *cred; 1531 int error, *name; 1532 struct vnode *vp; 1533 struct proc *p; 1534 struct vmspace *vm; 1535 vm_map_t map; 1536 1537 name = (int *)arg1; 1538 if ((p = pfind((pid_t)name[0])) == NULL) 1539 return (ESRCH); 1540 if (p->p_flag & P_WEXIT) { 1541 PROC_UNLOCK(p); 1542 return (ESRCH); 1543 } 1544 if ((error = p_candebug(curthread, p))) { 1545 PROC_UNLOCK(p); 1546 return (error); 1547 } 1548 _PHOLD(p); 1549 PROC_UNLOCK(p); 1550 vm = vmspace_acquire_ref(p); 1551 if (vm == NULL) { 1552 PRELE(p); 1553 return (ESRCH); 1554 } 1555 kve = malloc(sizeof(*kve), M_TEMP, M_WAITOK); 1556 1557 map = &vm->vm_map; /* XXXRW: More locking required? */ 1558 vm_map_lock_read(map); 1559 for (entry = map->header.next; entry != &map->header; 1560 entry = entry->next) { 1561 vm_object_t obj, tobj, lobj; 1562 vm_offset_t addr; 1563 int vfslocked; 1564 1565 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 1566 continue; 1567 1568 bzero(kve, sizeof(*kve)); 1569 1570 kve->kve_private_resident = 0; 1571 obj = entry->object.vm_object; 1572 if (obj != NULL) { 1573 VM_OBJECT_LOCK(obj); 1574 if (obj->shadow_count == 1) 1575 kve->kve_private_resident = 1576 obj->resident_page_count; 1577 } 1578 kve->kve_resident = 0; 1579 addr = entry->start; 1580 while (addr < entry->end) { 1581 if (pmap_extract(map->pmap, addr)) 1582 kve->kve_resident++; 1583 addr += PAGE_SIZE; 1584 } 1585 1586 for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) { 1587 if (tobj != obj) 1588 VM_OBJECT_LOCK(tobj); 1589 if (lobj != obj) 1590 VM_OBJECT_UNLOCK(lobj); 1591 lobj = tobj; 1592 } 1593 1594 kve->kve_start = entry->start; 1595 kve->kve_end = entry->end; 1596 kve->kve_offset = entry->offset; 1597 1598 if (entry->protection & VM_PROT_READ) 1599 kve->kve_protection |= KVME_PROT_READ; 1600 if (entry->protection & VM_PROT_WRITE) 1601 kve->kve_protection |= KVME_PROT_WRITE; 1602 if (entry->protection & VM_PROT_EXECUTE) 1603 kve->kve_protection |= KVME_PROT_EXEC; 1604 1605 if (entry->eflags & MAP_ENTRY_COW) 1606 kve->kve_flags |= KVME_FLAG_COW; 1607 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) 1608 kve->kve_flags |= KVME_FLAG_NEEDS_COPY; 1609 1610 last_timestamp = map->timestamp; 1611 vm_map_unlock_read(map); 1612 1613 kve->kve_fileid = 0; 1614 kve->kve_fsid = 0; 1615 freepath = NULL; 1616 fullpath = ""; 1617 if (lobj) { 1618 vp = NULL; 1619 switch (lobj->type) { 1620 case OBJT_DEFAULT: 1621 kve->kve_type = KVME_TYPE_DEFAULT; 1622 break; 1623 case OBJT_VNODE: 1624 kve->kve_type = KVME_TYPE_VNODE; 1625 vp = lobj->handle; 1626 vref(vp); 1627 break; 1628 case OBJT_SWAP: 1629 kve->kve_type = KVME_TYPE_SWAP; 1630 break; 1631 case OBJT_DEVICE: 1632 kve->kve_type = KVME_TYPE_DEVICE; 1633 break; 1634 case OBJT_PHYS: 1635 kve->kve_type = KVME_TYPE_PHYS; 1636 break; 1637 case OBJT_DEAD: 1638 kve->kve_type = KVME_TYPE_DEAD; 1639 break; 1640 default: 1641 kve->kve_type = KVME_TYPE_UNKNOWN; 1642 break; 1643 } 1644 if (lobj != obj) 1645 VM_OBJECT_UNLOCK(lobj); 1646 1647 kve->kve_ref_count = obj->ref_count; 1648 kve->kve_shadow_count = obj->shadow_count; 1649 VM_OBJECT_UNLOCK(obj); 1650 if (vp != NULL) { 1651 vn_fullpath(curthread, vp, &fullpath, 1652 &freepath); 1653 cred = curthread->td_ucred; 1654 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 1655 vn_lock(vp, LK_SHARED | LK_RETRY); 1656 if (VOP_GETATTR(vp, &va, cred) == 0) { 1657 kve->kve_fileid = va.va_fileid; 1658 kve->kve_fsid = va.va_fsid; 1659 } 1660 vput(vp); 1661 VFS_UNLOCK_GIANT(vfslocked); 1662 } 1663 } else { 1664 kve->kve_type = KVME_TYPE_NONE; 1665 kve->kve_ref_count = 0; 1666 kve->kve_shadow_count = 0; 1667 } 1668 1669 strlcpy(kve->kve_path, fullpath, sizeof(kve->kve_path)); 1670 if (freepath != NULL) 1671 free(freepath, M_TEMP); 1672 1673 /* Pack record size down */ 1674 kve->kve_structsize = offsetof(struct kinfo_vmentry, kve_path) + 1675 strlen(kve->kve_path) + 1; 1676 kve->kve_structsize = roundup(kve->kve_structsize, 1677 sizeof(uint64_t)); 1678 error = SYSCTL_OUT(req, kve, kve->kve_structsize); 1679 vm_map_lock_read(map); 1680 if (error) 1681 break; 1682 if (last_timestamp != map->timestamp) { 1683 vm_map_lookup_entry(map, addr - 1, &tmp_entry); 1684 entry = tmp_entry; 1685 } 1686 } 1687 vm_map_unlock_read(map); 1688 vmspace_free(vm); 1689 PRELE(p); 1690 free(kve, M_TEMP); 1691 return (error); 1692 } 1693 1694 #if defined(STACK) || defined(DDB) 1695 static int 1696 sysctl_kern_proc_kstack(SYSCTL_HANDLER_ARGS) 1697 { 1698 struct kinfo_kstack *kkstp; 1699 int error, i, *name, numthreads; 1700 lwpid_t *lwpidarray; 1701 struct thread *td; 1702 struct stack *st; 1703 struct sbuf sb; 1704 struct proc *p; 1705 1706 name = (int *)arg1; 1707 if ((p = pfind((pid_t)name[0])) == NULL) 1708 return (ESRCH); 1709 /* XXXRW: Not clear ESRCH is the right error during proc execve(). */ 1710 if (p->p_flag & P_WEXIT || p->p_flag & P_INEXEC) { 1711 PROC_UNLOCK(p); 1712 return (ESRCH); 1713 } 1714 if ((error = p_candebug(curthread, p))) { 1715 PROC_UNLOCK(p); 1716 return (error); 1717 } 1718 _PHOLD(p); 1719 PROC_UNLOCK(p); 1720 1721 kkstp = malloc(sizeof(*kkstp), M_TEMP, M_WAITOK); 1722 st = stack_create(); 1723 1724 lwpidarray = NULL; 1725 numthreads = 0; 1726 PROC_LOCK(p); 1727 repeat: 1728 if (numthreads < p->p_numthreads) { 1729 if (lwpidarray != NULL) { 1730 free(lwpidarray, M_TEMP); 1731 lwpidarray = NULL; 1732 } 1733 numthreads = p->p_numthreads; 1734 PROC_UNLOCK(p); 1735 lwpidarray = malloc(sizeof(*lwpidarray) * numthreads, M_TEMP, 1736 M_WAITOK | M_ZERO); 1737 PROC_LOCK(p); 1738 goto repeat; 1739 } 1740 i = 0; 1741 1742 /* 1743 * XXXRW: During the below loop, execve(2) and countless other sorts 1744 * of changes could have taken place. Should we check to see if the 1745 * vmspace has been replaced, or the like, in order to prevent 1746 * giving a snapshot that spans, say, execve(2), with some threads 1747 * before and some after? Among other things, the credentials could 1748 * have changed, in which case the right to extract debug info might 1749 * no longer be assured. 1750 */ 1751 FOREACH_THREAD_IN_PROC(p, td) { 1752 KASSERT(i < numthreads, 1753 ("sysctl_kern_proc_kstack: numthreads")); 1754 lwpidarray[i] = td->td_tid; 1755 i++; 1756 } 1757 numthreads = i; 1758 for (i = 0; i < numthreads; i++) { 1759 td = thread_find(p, lwpidarray[i]); 1760 if (td == NULL) { 1761 continue; 1762 } 1763 bzero(kkstp, sizeof(*kkstp)); 1764 (void)sbuf_new(&sb, kkstp->kkst_trace, 1765 sizeof(kkstp->kkst_trace), SBUF_FIXEDLEN); 1766 thread_lock(td); 1767 kkstp->kkst_tid = td->td_tid; 1768 if (TD_IS_SWAPPED(td)) 1769 kkstp->kkst_state = KKST_STATE_SWAPPED; 1770 else if (TD_IS_RUNNING(td)) 1771 kkstp->kkst_state = KKST_STATE_RUNNING; 1772 else { 1773 kkstp->kkst_state = KKST_STATE_STACKOK; 1774 stack_save_td(st, td); 1775 } 1776 thread_unlock(td); 1777 PROC_UNLOCK(p); 1778 stack_sbuf_print(&sb, st); 1779 sbuf_finish(&sb); 1780 sbuf_delete(&sb); 1781 error = SYSCTL_OUT(req, kkstp, sizeof(*kkstp)); 1782 PROC_LOCK(p); 1783 if (error) 1784 break; 1785 } 1786 _PRELE(p); 1787 PROC_UNLOCK(p); 1788 if (lwpidarray != NULL) 1789 free(lwpidarray, M_TEMP); 1790 stack_destroy(st); 1791 free(kkstp, M_TEMP); 1792 return (error); 1793 } 1794 #endif 1795 1796 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table"); 1797 1798 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT| 1799 CTLFLAG_MPSAFE, 0, 0, sysctl_kern_proc, "S,proc", 1800 "Return entire process table"); 1801 1802 static SYSCTL_NODE(_kern_proc, KERN_PROC_GID, gid, CTLFLAG_RD | CTLFLAG_MPSAFE, 1803 sysctl_kern_proc, "Process table"); 1804 1805 static SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD | CTLFLAG_MPSAFE, 1806 sysctl_kern_proc, "Process table"); 1807 1808 static SYSCTL_NODE(_kern_proc, KERN_PROC_RGID, rgid, CTLFLAG_RD | CTLFLAG_MPSAFE, 1809 sysctl_kern_proc, "Process table"); 1810 1811 static SYSCTL_NODE(_kern_proc, KERN_PROC_SESSION, sid, CTLFLAG_RD | 1812 CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); 1813 1814 static SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD | CTLFLAG_MPSAFE, 1815 sysctl_kern_proc, "Process table"); 1816 1817 static SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD | CTLFLAG_MPSAFE, 1818 sysctl_kern_proc, "Process table"); 1819 1820 static SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD | CTLFLAG_MPSAFE, 1821 sysctl_kern_proc, "Process table"); 1822 1823 static SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD | CTLFLAG_MPSAFE, 1824 sysctl_kern_proc, "Process table"); 1825 1826 static SYSCTL_NODE(_kern_proc, KERN_PROC_PROC, proc, CTLFLAG_RD | CTLFLAG_MPSAFE, 1827 sysctl_kern_proc, "Return process table, no threads"); 1828 1829 static SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, 1830 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE, 1831 sysctl_kern_proc_args, "Process argument list"); 1832 1833 static SYSCTL_NODE(_kern_proc, KERN_PROC_PATHNAME, pathname, CTLFLAG_RD | 1834 CTLFLAG_MPSAFE, sysctl_kern_proc_pathname, "Process executable path"); 1835 1836 static SYSCTL_NODE(_kern_proc, KERN_PROC_SV_NAME, sv_name, CTLFLAG_RD | 1837 CTLFLAG_MPSAFE, sysctl_kern_proc_sv_name, 1838 "Process syscall vector name (ABI type)"); 1839 1840 static SYSCTL_NODE(_kern_proc, (KERN_PROC_GID | KERN_PROC_INC_THREAD), gid_td, 1841 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); 1842 1843 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_INC_THREAD), pgrp_td, 1844 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); 1845 1846 static SYSCTL_NODE(_kern_proc, (KERN_PROC_RGID | KERN_PROC_INC_THREAD), rgid_td, 1847 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); 1848 1849 static SYSCTL_NODE(_kern_proc, (KERN_PROC_SESSION | KERN_PROC_INC_THREAD), 1850 sid_td, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); 1851 1852 static SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_INC_THREAD), tty_td, 1853 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); 1854 1855 static SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_INC_THREAD), uid_td, 1856 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); 1857 1858 static SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_INC_THREAD), ruid_td, 1859 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); 1860 1861 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_INC_THREAD), pid_td, 1862 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); 1863 1864 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PROC | KERN_PROC_INC_THREAD), proc_td, 1865 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, 1866 "Return process table, no threads"); 1867 1868 #ifdef COMPAT_FREEBSD7 1869 static SYSCTL_NODE(_kern_proc, KERN_PROC_OVMMAP, ovmmap, CTLFLAG_RD | 1870 CTLFLAG_MPSAFE, sysctl_kern_proc_ovmmap, "Old Process vm map entries"); 1871 #endif 1872 1873 static SYSCTL_NODE(_kern_proc, KERN_PROC_VMMAP, vmmap, CTLFLAG_RD | 1874 CTLFLAG_MPSAFE, sysctl_kern_proc_vmmap, "Process vm map entries"); 1875 1876 #if defined(STACK) || defined(DDB) 1877 static SYSCTL_NODE(_kern_proc, KERN_PROC_KSTACK, kstack, CTLFLAG_RD | 1878 CTLFLAG_MPSAFE, sysctl_kern_proc_kstack, "Process kernel stacks"); 1879 #endif 1880