1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1989, 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software developed by the Computer Systems
8 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
9 * BG 91-66 and contributed to Berkeley.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36
37 /*
38 * Proc traversal interface for kvm. ps and w are (probably) the exclusive
39 * users of this code, so we've factored it out into a separate module.
40 * Thus, we keep this grunge out of the other kvm applications (i.e.,
41 * most other applications are interested only in open/close/read/nlist).
42 */
43
44 #include <sys/param.h>
45 #define _WANT_UCRED /* make ucred.h give us 'struct ucred' */
46 #include <sys/ucred.h>
47 #include <sys/queue.h>
48 #include <sys/_lock.h>
49 #include <sys/_mutex.h>
50 #include <sys/_task.h>
51 #include <sys/cpuset.h>
52 #include <sys/user.h>
53 #include <sys/proc.h>
54 #define _WANT_PRISON /* make jail.h give us 'struct prison' */
55 #include <sys/jail.h>
56 #include <sys/exec.h>
57 #include <sys/stat.h>
58 #include <sys/sysent.h>
59 #include <sys/ioctl.h>
60 #include <sys/tty.h>
61 #include <sys/file.h>
62 #include <sys/conf.h>
63 #define _WANT_KW_EXITCODE
64 #include <sys/wait.h>
65 #include <stdio.h>
66 #include <stdlib.h>
67 #include <stdbool.h>
68 #include <unistd.h>
69 #include <nlist.h>
70 #include <kvm.h>
71
72 #include <sys/sysctl.h>
73
74 #include <limits.h>
75 #include <memory.h>
76 #include <paths.h>
77
78 #include "kvm_private.h"
79
80 #define KREAD(kd, addr, obj) \
81 (kvm_read(kd, addr, (char *)(obj), sizeof(*obj)) != sizeof(*obj))
82
83 static int ticks;
84 static int hz;
85 static uint64_t cpu_tick_frequency;
86
87 /*
88 * From sys/kern/kern_tc.c. Depends on cpu_tick_frequency, which is
89 * read/initialized before this function is ever called.
90 */
91 static uint64_t
cputick2usec(uint64_t tick)92 cputick2usec(uint64_t tick)
93 {
94 if (cpu_tick_frequency == 0)
95 return (0);
96 return ((tick / cpu_tick_frequency) * 1000000ULL) +
97 ((tick % cpu_tick_frequency) * 1000000ULL) / cpu_tick_frequency;
98 }
99
100 /*
101 * Read proc's from memory file into buffer bp, which has space to hold
102 * at most maxcnt procs.
103 */
104 static int
kvm_proclist(kvm_t * kd,int what,int arg,struct proc * p,struct kinfo_proc * bp,int maxcnt)105 kvm_proclist(kvm_t *kd, int what, int arg, struct proc *p,
106 struct kinfo_proc *bp, int maxcnt)
107 {
108 int cnt = 0;
109 struct kinfo_proc kinfo_proc, *kp;
110 struct pgrp pgrp;
111 struct session sess;
112 struct cdev t_cdev;
113 struct tty tty;
114 struct vmspace vmspace;
115 struct sigacts sigacts;
116 #if 0
117 struct pstats pstats;
118 #endif
119 struct ucred ucred;
120 struct prison pr;
121 struct thread mtd;
122 struct proc proc;
123 struct proc pproc;
124 struct sysentvec sysent;
125 char svname[KI_EMULNAMELEN];
126 struct thread *td = NULL;
127 bool first_thread;
128
129 kp = &kinfo_proc;
130 kp->ki_structsize = sizeof(kinfo_proc);
131 /*
132 * Loop on the processes, then threads within the process if requested.
133 */
134 if (what == KERN_PROC_ALL)
135 what |= KERN_PROC_INC_THREAD;
136 for (; cnt < maxcnt && p != NULL; p = LIST_NEXT(&proc, p_list)) {
137 memset(kp, 0, sizeof *kp);
138 if (KREAD(kd, (u_long)p, &proc)) {
139 _kvm_err(kd, kd->program, "can't read proc at %p", p);
140 return (-1);
141 }
142 if (proc.p_state == PRS_NEW)
143 continue;
144 if (KREAD(kd, (u_long)proc.p_ucred, &ucred) == 0) {
145 kp->ki_ruid = ucred.cr_ruid;
146 kp->ki_svuid = ucred.cr_svuid;
147 kp->ki_rgid = ucred.cr_rgid;
148 kp->ki_svgid = ucred.cr_svgid;
149 kp->ki_cr_flags = ucred.cr_flags;
150 if (ucred.cr_ngroups > KI_NGROUPS) {
151 kp->ki_ngroups = KI_NGROUPS;
152 kp->ki_cr_flags |= KI_CRF_GRP_OVERFLOW;
153 } else
154 kp->ki_ngroups = ucred.cr_ngroups;
155 kvm_read(kd, (u_long)ucred.cr_groups, kp->ki_groups,
156 kp->ki_ngroups * sizeof(gid_t));
157 kp->ki_uid = ucred.cr_uid;
158 if (ucred.cr_prison != NULL) {
159 if (KREAD(kd, (u_long)ucred.cr_prison, &pr)) {
160 _kvm_err(kd, kd->program,
161 "can't read prison at %p",
162 ucred.cr_prison);
163 return (-1);
164 }
165 kp->ki_jid = pr.pr_id;
166 }
167 }
168
169 switch(what & ~KERN_PROC_INC_THREAD) {
170
171 case KERN_PROC_GID:
172 if (kp->ki_groups[0] != (gid_t)arg)
173 continue;
174 break;
175
176 case KERN_PROC_PID:
177 if (proc.p_pid != (pid_t)arg)
178 continue;
179 break;
180
181 case KERN_PROC_RGID:
182 if (kp->ki_rgid != (gid_t)arg)
183 continue;
184 break;
185
186 case KERN_PROC_UID:
187 if (kp->ki_uid != (uid_t)arg)
188 continue;
189 break;
190
191 case KERN_PROC_RUID:
192 if (kp->ki_ruid != (uid_t)arg)
193 continue;
194 break;
195 }
196 /*
197 * We're going to add another proc to the set. If this
198 * will overflow the buffer, assume the reason is because
199 * nprocs (or the proc list) is corrupt and declare an error.
200 */
201 if (cnt >= maxcnt) {
202 _kvm_err(kd, kd->program, "nprocs corrupt");
203 return (-1);
204 }
205 /*
206 * gather kinfo_proc
207 */
208 kp->ki_paddr = p;
209 kp->ki_addr = 0; /* XXX uarea */
210 /* kp->ki_kstack = proc.p_thread.td_kstack; XXXKSE */
211 kp->ki_args = proc.p_args;
212 kp->ki_numthreads = proc.p_numthreads;
213 kp->ki_tracep = NULL; /* XXXKIB do not expose ktr_io_params */
214 kp->ki_textvp = proc.p_textvp;
215 kp->ki_fd = proc.p_fd;
216 kp->ki_pd = proc.p_pd;
217 kp->ki_vmspace = proc.p_vmspace;
218 if (proc.p_sigacts != NULL) {
219 if (KREAD(kd, (u_long)proc.p_sigacts, &sigacts)) {
220 _kvm_err(kd, kd->program,
221 "can't read sigacts at %p", proc.p_sigacts);
222 return (-1);
223 }
224 kp->ki_sigignore = sigacts.ps_sigignore;
225 kp->ki_sigcatch = sigacts.ps_sigcatch;
226 }
227 #if 0
228 if ((proc.p_flag & P_INMEM) && proc.p_stats != NULL) {
229 if (KREAD(kd, (u_long)proc.p_stats, &pstats)) {
230 _kvm_err(kd, kd->program,
231 "can't read stats at %x", proc.p_stats);
232 return (-1);
233 }
234 kp->ki_start = pstats.p_start;
235
236 /*
237 * XXX: The times here are probably zero and need
238 * to be calculated from the raw data in p_rux and
239 * p_crux.
240 */
241 kp->ki_rusage = pstats.p_ru;
242 kp->ki_childstime = pstats.p_cru.ru_stime;
243 kp->ki_childutime = pstats.p_cru.ru_utime;
244 /* Some callers want child-times in a single value */
245 timeradd(&kp->ki_childstime, &kp->ki_childutime,
246 &kp->ki_childtime);
247 }
248 #endif
249 if (proc.p_oppid)
250 kp->ki_ppid = proc.p_oppid;
251 else if (proc.p_pptr) {
252 if (KREAD(kd, (u_long)proc.p_pptr, &pproc)) {
253 _kvm_err(kd, kd->program,
254 "can't read pproc at %p", proc.p_pptr);
255 return (-1);
256 }
257 kp->ki_ppid = pproc.p_pid;
258 } else
259 kp->ki_ppid = 0;
260 if (proc.p_pgrp == NULL)
261 goto nopgrp;
262 if (KREAD(kd, (u_long)proc.p_pgrp, &pgrp)) {
263 _kvm_err(kd, kd->program, "can't read pgrp at %p",
264 proc.p_pgrp);
265 return (-1);
266 }
267 kp->ki_pgid = pgrp.pg_id;
268 kp->ki_jobc = -1; /* Or calculate? Arguably not. */
269 if (KREAD(kd, (u_long)pgrp.pg_session, &sess)) {
270 _kvm_err(kd, kd->program, "can't read session at %p",
271 pgrp.pg_session);
272 return (-1);
273 }
274 kp->ki_sid = sess.s_sid;
275 (void)memcpy(kp->ki_login, sess.s_login,
276 sizeof(kp->ki_login));
277 if ((proc.p_flag & P_CONTROLT) && sess.s_ttyp != NULL) {
278 if (KREAD(kd, (u_long)sess.s_ttyp, &tty)) {
279 _kvm_err(kd, kd->program,
280 "can't read tty at %p", sess.s_ttyp);
281 return (-1);
282 }
283 if (tty.t_dev != NULL) {
284 if (KREAD(kd, (u_long)tty.t_dev, &t_cdev)) {
285 _kvm_err(kd, kd->program,
286 "can't read cdev at %p",
287 tty.t_dev);
288 return (-1);
289 }
290 #if 0
291 kp->ki_tdev = t_cdev.si_udev;
292 #else
293 kp->ki_tdev = NODEV;
294 #endif
295 }
296 if (tty.t_pgrp != NULL) {
297 if (KREAD(kd, (u_long)tty.t_pgrp, &pgrp)) {
298 _kvm_err(kd, kd->program,
299 "can't read tpgrp at %p",
300 tty.t_pgrp);
301 return (-1);
302 }
303 kp->ki_tpgid = pgrp.pg_id;
304 } else
305 kp->ki_tpgid = -1;
306 if (tty.t_session != NULL) {
307 if (KREAD(kd, (u_long)tty.t_session, &sess)) {
308 _kvm_err(kd, kd->program,
309 "can't read session at %p",
310 tty.t_session);
311 return (-1);
312 }
313 kp->ki_tsid = sess.s_sid;
314 }
315 } else {
316 nopgrp:
317 kp->ki_tdev = NODEV;
318 }
319
320 (void)kvm_read(kd, (u_long)proc.p_vmspace,
321 (char *)&vmspace, sizeof(vmspace));
322 kp->ki_size = vmspace.vm_map.size;
323 /*
324 * Approximate the kernel's method of calculating
325 * this field.
326 */
327 #define pmap_resident_count(pm) ((pm)->pm_stats.resident_count)
328 kp->ki_rssize = pmap_resident_count(&vmspace.vm_pmap);
329 kp->ki_swrss = vmspace.vm_swrss;
330 kp->ki_tsize = vmspace.vm_tsize;
331 kp->ki_dsize = vmspace.vm_dsize;
332 kp->ki_ssize = vmspace.vm_ssize;
333
334 switch (what & ~KERN_PROC_INC_THREAD) {
335
336 case KERN_PROC_PGRP:
337 if (kp->ki_pgid != (pid_t)arg)
338 continue;
339 break;
340
341 case KERN_PROC_SESSION:
342 if (kp->ki_sid != (pid_t)arg)
343 continue;
344 break;
345
346 case KERN_PROC_TTY:
347 if ((proc.p_flag & P_CONTROLT) == 0 ||
348 kp->ki_tdev != (dev_t)arg)
349 continue;
350 break;
351 }
352 if (proc.p_comm[0] != 0)
353 strlcpy(kp->ki_comm, proc.p_comm, MAXCOMLEN);
354 (void)kvm_read(kd, (u_long)proc.p_sysent, (char *)&sysent,
355 sizeof(sysent));
356 (void)kvm_read(kd, (u_long)sysent.sv_name, (char *)&svname,
357 sizeof(svname));
358 if (svname[0] != 0)
359 strlcpy(kp->ki_emul, svname, KI_EMULNAMELEN);
360 kp->ki_runtime = cputick2usec(proc.p_rux.rux_runtime);
361 kp->ki_pid = proc.p_pid;
362 kp->ki_xstat = KW_EXITCODE(proc.p_xexit, proc.p_xsig);
363 kp->ki_acflag = proc.p_acflag;
364 kp->ki_lock = proc.p_lock;
365 kp->ki_tdev_freebsd11 = kp->ki_tdev; /* truncate */
366
367 /* Per-thread items; iterate as appropriate. */
368 td = TAILQ_FIRST(&proc.p_threads);
369 for (first_thread = true; cnt < maxcnt && td != NULL &&
370 (first_thread || (what & KERN_PROC_INC_THREAD));
371 first_thread = false) {
372 if (proc.p_state != PRS_ZOMBIE) {
373 if (KREAD(kd, (u_long)td, &mtd)) {
374 _kvm_err(kd, kd->program,
375 "can't read thread at %p", td);
376 return (-1);
377 }
378 if (what & KERN_PROC_INC_THREAD)
379 td = TAILQ_NEXT(&mtd, td_plist);
380 } else
381 td = NULL;
382 if ((proc.p_state != PRS_ZOMBIE) && mtd.td_wmesg)
383 (void)kvm_read(kd, (u_long)mtd.td_wmesg,
384 kp->ki_wmesg, WMESGLEN);
385 else
386 memset(kp->ki_wmesg, 0, WMESGLEN);
387 if (proc.p_pgrp == NULL) {
388 kp->ki_kiflag = 0;
389 } else {
390 kp->ki_kiflag = sess.s_ttyvp ? KI_CTTY : 0;
391 if (sess.s_leader == p)
392 kp->ki_kiflag |= KI_SLEADER;
393 }
394 if ((proc.p_state != PRS_ZOMBIE) &&
395 (mtd.td_blocked != 0)) {
396 kp->ki_kiflag |= KI_LOCKBLOCK;
397 if (mtd.td_lockname)
398 (void)kvm_read(kd,
399 (u_long)mtd.td_lockname,
400 kp->ki_lockname, LOCKNAMELEN);
401 else
402 memset(kp->ki_lockname, 0,
403 LOCKNAMELEN);
404 kp->ki_lockname[LOCKNAMELEN] = 0;
405 } else
406 kp->ki_kiflag &= ~KI_LOCKBLOCK;
407 kp->ki_siglist = proc.p_siglist;
408 if (proc.p_state != PRS_ZOMBIE) {
409 SIGSETOR(kp->ki_siglist, mtd.td_siglist);
410 kp->ki_sigmask = mtd.td_sigmask;
411 kp->ki_swtime = (ticks - proc.p_swtick) / hz;
412 kp->ki_flag = proc.p_flag;
413 kp->ki_sflag = 0;
414 kp->ki_nice = proc.p_nice;
415 kp->ki_traceflag = proc.p_traceflag;
416 if (proc.p_state == PRS_NORMAL) {
417 if (TD_ON_RUNQ(&mtd) ||
418 TD_CAN_RUN(&mtd) ||
419 TD_IS_RUNNING(&mtd)) {
420 kp->ki_stat = SRUN;
421 } else if (TD_GET_STATE(&mtd) ==
422 TDS_INHIBITED) {
423 if (P_SHOULDSTOP(&proc)) {
424 kp->ki_stat = SSTOP;
425 } else if (
426 TD_IS_SLEEPING(&mtd)) {
427 kp->ki_stat = SSLEEP;
428 } else if (TD_ON_LOCK(&mtd)) {
429 kp->ki_stat = SLOCK;
430 } else {
431 kp->ki_stat = SWAIT;
432 }
433 }
434 } else {
435 kp->ki_stat = SIDL;
436 }
437 /* Stuff from the thread */
438 kp->ki_pri.pri_level = mtd.td_priority;
439 kp->ki_pri.pri_native = mtd.td_base_pri;
440 kp->ki_lastcpu = mtd.td_lastcpu;
441 kp->ki_wchan = mtd.td_wchan;
442 kp->ki_oncpu = mtd.td_oncpu;
443 if (mtd.td_name[0] != '\0')
444 strlcpy(kp->ki_tdname, mtd.td_name,
445 sizeof(kp->ki_tdname));
446 else
447 memset(kp->ki_tdname, 0,
448 sizeof(kp->ki_tdname));
449 kp->ki_pctcpu = 0;
450 kp->ki_rqindex = 0;
451
452 /*
453 * Note: legacy fields; wraps at NO_CPU_OLD
454 * or the old max CPU value as appropriate
455 */
456 if (mtd.td_lastcpu == NOCPU)
457 kp->ki_lastcpu_old = NOCPU_OLD;
458 else if (mtd.td_lastcpu > MAXCPU_OLD)
459 kp->ki_lastcpu_old = MAXCPU_OLD;
460 else
461 kp->ki_lastcpu_old = mtd.td_lastcpu;
462
463 if (mtd.td_oncpu == NOCPU)
464 kp->ki_oncpu_old = NOCPU_OLD;
465 else if (mtd.td_oncpu > MAXCPU_OLD)
466 kp->ki_oncpu_old = MAXCPU_OLD;
467 else
468 kp->ki_oncpu_old = mtd.td_oncpu;
469 kp->ki_tid = mtd.td_tid;
470 } else {
471 memset(&kp->ki_sigmask, 0,
472 sizeof(kp->ki_sigmask));
473 kp->ki_stat = SZOMB;
474 kp->ki_tid = 0;
475 }
476
477 bcopy(&kinfo_proc, bp, sizeof(kinfo_proc));
478 ++bp;
479 ++cnt;
480 }
481 }
482 return (cnt);
483 }
484
485 /*
486 * Build proc info array by reading in proc list from a crash dump.
487 * Return number of procs read. maxcnt is the max we will read.
488 */
489 static int
kvm_deadprocs(kvm_t * kd,int what,int arg,u_long a_allproc,u_long a_zombproc,int maxcnt)490 kvm_deadprocs(kvm_t *kd, int what, int arg, u_long a_allproc,
491 u_long a_zombproc, int maxcnt)
492 {
493 struct kinfo_proc *bp = kd->procbase;
494 int acnt, zcnt = 0;
495 struct proc *p;
496
497 if (KREAD(kd, a_allproc, &p)) {
498 _kvm_err(kd, kd->program, "cannot read allproc");
499 return (-1);
500 }
501 acnt = kvm_proclist(kd, what, arg, p, bp, maxcnt);
502 if (acnt < 0)
503 return (acnt);
504
505 if (a_zombproc != 0) {
506 if (KREAD(kd, a_zombproc, &p)) {
507 _kvm_err(kd, kd->program, "cannot read zombproc");
508 return (-1);
509 }
510 zcnt = kvm_proclist(kd, what, arg, p, bp + acnt, maxcnt - acnt);
511 if (zcnt < 0)
512 zcnt = 0;
513 }
514
515 return (acnt + zcnt);
516 }
517
518 struct kinfo_proc *
kvm_getprocs(kvm_t * kd,int op,int arg,int * cnt)519 kvm_getprocs(kvm_t *kd, int op, int arg, int *cnt)
520 {
521 int mib[4], st, nprocs;
522 size_t size, osize;
523 int temp_op;
524
525 if (kd->procbase != 0) {
526 free((void *)kd->procbase);
527 /*
528 * Clear this pointer in case this call fails. Otherwise,
529 * kvm_close() will free it again.
530 */
531 kd->procbase = 0;
532 }
533 if (ISALIVE(kd)) {
534 size = 0;
535 mib[0] = CTL_KERN;
536 mib[1] = KERN_PROC;
537 mib[2] = op;
538 mib[3] = arg;
539 temp_op = op & ~KERN_PROC_INC_THREAD;
540 st = sysctl(mib,
541 temp_op == KERN_PROC_ALL || temp_op == KERN_PROC_PROC ?
542 3 : 4, NULL, &size, NULL, 0);
543 if (st == -1) {
544 _kvm_syserr(kd, kd->program, "kvm_getprocs");
545 return (0);
546 }
547 /*
548 * We can't continue with a size of 0 because we pass
549 * it to realloc() (via _kvm_realloc()), and passing 0
550 * to realloc() results in undefined behavior.
551 */
552 if (size == 0) {
553 /*
554 * XXX: We should probably return an invalid,
555 * but non-NULL, pointer here so any client
556 * program trying to dereference it will
557 * crash. However, _kvm_freeprocs() calls
558 * free() on kd->procbase if it isn't NULL,
559 * and free()'ing a junk pointer isn't good.
560 * Then again, _kvm_freeprocs() isn't used
561 * anywhere . . .
562 */
563 kd->procbase = _kvm_malloc(kd, 1);
564 goto liveout;
565 }
566 do {
567 size += size / 10;
568 kd->procbase = (struct kinfo_proc *)
569 _kvm_realloc(kd, kd->procbase, size);
570 if (kd->procbase == NULL)
571 return (0);
572 osize = size;
573 st = sysctl(mib, temp_op == KERN_PROC_ALL ||
574 temp_op == KERN_PROC_PROC ? 3 : 4,
575 kd->procbase, &size, NULL, 0);
576 } while (st == -1 && errno == ENOMEM && size == osize);
577 if (st == -1) {
578 _kvm_syserr(kd, kd->program, "kvm_getprocs");
579 return (0);
580 }
581 /*
582 * We have to check the size again because sysctl()
583 * may "round up" oldlenp if oldp is NULL; hence it
584 * might've told us that there was data to get when
585 * there really isn't any.
586 */
587 if (size > 0 &&
588 kd->procbase->ki_structsize != sizeof(struct kinfo_proc)) {
589 _kvm_err(kd, kd->program,
590 "kinfo_proc size mismatch (expected %zu, got %d)",
591 sizeof(struct kinfo_proc),
592 kd->procbase->ki_structsize);
593 return (0);
594 }
595 liveout:
596 nprocs = size == 0 ? 0 : size / kd->procbase->ki_structsize;
597 } else {
598 struct nlist nl[6], *p;
599 struct nlist nlz[2];
600
601 nl[0].n_name = "_nprocs";
602 nl[1].n_name = "_allproc";
603 nl[2].n_name = "_ticks";
604 nl[3].n_name = "_hz";
605 nl[4].n_name = "_cpu_tick_frequency";
606 nl[5].n_name = 0;
607
608 nlz[0].n_name = "_zombproc";
609 nlz[1].n_name = 0;
610
611 if (!kd->arch->ka_native(kd)) {
612 _kvm_err(kd, kd->program,
613 "cannot read procs from non-native core");
614 return (0);
615 }
616
617 if (kvm_nlist(kd, nl) != 0) {
618 for (p = nl; p->n_type != 0; ++p)
619 ;
620 _kvm_err(kd, kd->program,
621 "%s: no such symbol", p->n_name);
622 return (0);
623 }
624 (void) kvm_nlist(kd, nlz); /* attempt to get zombproc */
625 if (KREAD(kd, nl[0].n_value, &nprocs)) {
626 _kvm_err(kd, kd->program, "can't read nprocs");
627 return (0);
628 }
629 /*
630 * If returning all threads, we don't know how many that
631 * might be. Presume that there are, on average, no more
632 * than 10 threads per process.
633 */
634 if (op == KERN_PROC_ALL || (op & KERN_PROC_INC_THREAD))
635 nprocs *= 10; /* XXX */
636 if (KREAD(kd, nl[2].n_value, &ticks)) {
637 _kvm_err(kd, kd->program, "can't read ticks");
638 return (0);
639 }
640 if (KREAD(kd, nl[3].n_value, &hz)) {
641 _kvm_err(kd, kd->program, "can't read hz");
642 return (0);
643 }
644 if (KREAD(kd, nl[4].n_value, &cpu_tick_frequency)) {
645 _kvm_err(kd, kd->program,
646 "can't read cpu_tick_frequency");
647 return (0);
648 }
649 size = nprocs * sizeof(struct kinfo_proc);
650 kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
651 if (kd->procbase == NULL)
652 return (0);
653
654 nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value,
655 nlz[0].n_value, nprocs);
656 if (nprocs <= 0) {
657 _kvm_freeprocs(kd);
658 nprocs = 0;
659 }
660 #ifdef notdef
661 else {
662 size = nprocs * sizeof(struct kinfo_proc);
663 kd->procbase = realloc(kd->procbase, size);
664 }
665 #endif
666 }
667 *cnt = nprocs;
668 return (kd->procbase);
669 }
670
671 void
_kvm_freeprocs(kvm_t * kd)672 _kvm_freeprocs(kvm_t *kd)
673 {
674
675 free(kd->procbase);
676 kd->procbase = NULL;
677 }
678
679 void *
_kvm_realloc(kvm_t * kd,void * p,size_t n)680 _kvm_realloc(kvm_t *kd, void *p, size_t n)
681 {
682 void *np;
683
684 np = reallocf(p, n);
685 if (np == NULL)
686 _kvm_err(kd, kd->program, "out of memory");
687 return (np);
688 }
689
690 /*
691 * Get the command args or environment.
692 */
693 static char **
kvm_argv(kvm_t * kd,const struct kinfo_proc * kp,int env,int nchr)694 kvm_argv(kvm_t *kd, const struct kinfo_proc *kp, int env, int nchr)
695 {
696 int oid[4];
697 int i;
698 size_t bufsz;
699 static int buflen;
700 static char *buf, *p;
701 static char **bufp;
702 static int argc;
703 char **nbufp;
704
705 if (!ISALIVE(kd)) {
706 _kvm_err(kd, kd->program,
707 "cannot read user space from dead kernel");
708 return (NULL);
709 }
710
711 if (nchr == 0 || nchr > ARG_MAX)
712 nchr = ARG_MAX;
713 if (buflen == 0) {
714 buf = malloc(nchr);
715 if (buf == NULL) {
716 _kvm_err(kd, kd->program, "cannot allocate memory");
717 return (NULL);
718 }
719 argc = 32;
720 bufp = malloc(sizeof(char *) * argc);
721 if (bufp == NULL) {
722 free(buf);
723 buf = NULL;
724 _kvm_err(kd, kd->program, "cannot allocate memory");
725 return (NULL);
726 }
727 buflen = nchr;
728 } else if (nchr > buflen) {
729 p = realloc(buf, nchr);
730 if (p != NULL) {
731 buf = p;
732 buflen = nchr;
733 }
734 }
735 oid[0] = CTL_KERN;
736 oid[1] = KERN_PROC;
737 oid[2] = env ? KERN_PROC_ENV : KERN_PROC_ARGS;
738 oid[3] = kp->ki_pid;
739 bufsz = buflen;
740 if (sysctl(oid, 4, buf, &bufsz, 0, 0) == -1) {
741 /*
742 * If the supplied buf is too short to hold the requested
743 * value the sysctl returns with ENOMEM. The buf is filled
744 * with the truncated value and the returned bufsz is equal
745 * to the requested len.
746 */
747 if (errno != ENOMEM || bufsz != (size_t)buflen)
748 return (NULL);
749 buf[bufsz - 1] = '\0';
750 errno = 0;
751 } else if (bufsz == 0)
752 return (NULL);
753 i = 0;
754 p = buf;
755 do {
756 bufp[i++] = p;
757 p += strlen(p) + 1;
758 if (i >= argc) {
759 argc += argc;
760 nbufp = realloc(bufp, sizeof(char *) * argc);
761 if (nbufp == NULL)
762 return (NULL);
763 bufp = nbufp;
764 }
765 } while (p < buf + bufsz);
766 bufp[i++] = 0;
767 return (bufp);
768 }
769
770 char **
kvm_getargv(kvm_t * kd,const struct kinfo_proc * kp,int nchr)771 kvm_getargv(kvm_t *kd, const struct kinfo_proc *kp, int nchr)
772 {
773 return (kvm_argv(kd, kp, 0, nchr));
774 }
775
776 char **
kvm_getenvv(kvm_t * kd,const struct kinfo_proc * kp,int nchr)777 kvm_getenvv(kvm_t *kd, const struct kinfo_proc *kp, int nchr)
778 {
779 return (kvm_argv(kd, kp, 1, nchr));
780 }
781