1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 /*
26 * Mdb kernel support module. This module is loaded automatically when the
27 * kvm target is initialized. Any global functions declared here are exported
28 * for the resolution of symbols in subsequently loaded modules.
29 *
30 * WARNING: Do not assume that static variables in mdb_ks will be initialized
31 * to zero.
32 */
33
34 #include <mdb/mdb_target.h>
35 #include <mdb/mdb_param.h>
36 #include <mdb/mdb_modapi.h>
37 #include <mdb/mdb_ks.h>
38
39 #include <sys/types.h>
40 #include <sys/procfs.h>
41 #include <sys/proc.h>
42 #include <sys/dnlc.h>
43 #include <sys/autoconf.h>
44 #include <sys/machelf.h>
45 #include <sys/modctl.h>
46 #include <sys/hwconf.h>
47 #include <sys/kobj.h>
48 #include <sys/fs/autofs.h>
49 #include <sys/ddi_impldefs.h>
50 #include <sys/refstr_impl.h>
51 #include <sys/cpuvar.h>
52 #include <sys/dlpi.h>
53 #include <sys/clock_impl.h>
54 #include <sys/swap.h>
55 #include <errno.h>
56
57 #include <vm/seg_vn.h>
58 #include <vm/page.h>
59
60 #define MDB_PATH_NELEM 256 /* Maximum path components */
61
62 typedef struct mdb_path {
63 size_t mdp_nelem; /* Number of components */
64 uint_t mdp_complete; /* Path completely resolved? */
65 uintptr_t mdp_vnode[MDB_PATH_NELEM]; /* Array of vnode_t addresses */
66 char *mdp_name[MDB_PATH_NELEM]; /* Array of name components */
67 } mdb_path_t;
68
69 static int mdb_autonode2path(uintptr_t, mdb_path_t *);
70 static int mdb_sprintpath(char *, size_t, mdb_path_t *);
71
72 /*
73 * Kernel parameters from <sys/param.h> which we keep in-core:
74 */
75 unsigned long _mdb_ks_pagesize;
76 unsigned int _mdb_ks_pageshift;
77 unsigned long _mdb_ks_pageoffset;
78 unsigned long long _mdb_ks_pagemask;
79 unsigned long _mdb_ks_mmu_pagesize;
80 unsigned int _mdb_ks_mmu_pageshift;
81 unsigned long _mdb_ks_mmu_pageoffset;
82 unsigned long _mdb_ks_mmu_pagemask;
83 uintptr_t _mdb_ks_kernelbase;
84 uintptr_t _mdb_ks_userlimit;
85 uintptr_t _mdb_ks_userlimit32;
86 uintptr_t _mdb_ks_argsbase;
87 unsigned long _mdb_ks_msg_bsize;
88 unsigned long _mdb_ks_defaultstksz;
89 int _mdb_ks_ncpu;
90 int _mdb_ks_ncpu_log2;
91 int _mdb_ks_ncpu_p2;
92
93 /*
94 * In-core copy of DNLC information:
95 */
96 #define MDB_DNLC_HSIZE 1024
97 #define MDB_DNLC_HASH(vp) (((uintptr_t)(vp) >> 3) & (MDB_DNLC_HSIZE - 1))
98 #define MDB_DNLC_NCACHE_SZ(ncp) (sizeof (ncache_t) + (ncp)->namlen)
99 #define MDB_DNLC_MAX_RETRY 4
100
101 static ncache_t **dnlc_hash; /* mdbs hash array of dnlc entries */
102
103 /*
104 * copy of page_hash-related data
105 */
106 static int page_hash_loaded;
107 static long mdb_page_hashsz;
108 static uint_t mdb_page_hashsz_shift; /* Needed for PAGE_HASH_FUNC */
109 static uintptr_t mdb_page_hash; /* base address of page hash */
110 #define page_hashsz mdb_page_hashsz
111 #define page_hashsz_shift mdb_page_hashsz_shift
112
113 /*
114 * This will be the location of the vnodeops pointer for "autofs_vnodeops"
115 * The pointer still needs to be read with mdb_vread() to get the location
116 * of the vnodeops structure for autofs.
117 */
118 static struct vnodeops *autofs_vnops_ptr;
119
120 /*
121 * STREAMS queue registrations:
122 */
123 typedef struct mdb_qinfo {
124 const mdb_qops_t *qi_ops; /* Address of ops vector */
125 uintptr_t qi_addr; /* Address of qinit structure (key) */
126 struct mdb_qinfo *qi_next; /* Next qinfo in list */
127 } mdb_qinfo_t;
128
129 static mdb_qinfo_t *qi_head; /* Head of qinfo chain */
130
131 /*
132 * Device naming callback structure:
133 */
134 typedef struct nm_query {
135 const char *nm_name; /* Device driver name [in/out] */
136 major_t nm_major; /* Device major number [in/out] */
137 ushort_t nm_found; /* Did we find a match? [out] */
138 } nm_query_t;
139
140 /*
141 * Address-to-modctl callback structure:
142 */
143 typedef struct a2m_query {
144 uintptr_t a2m_addr; /* Virtual address [in] */
145 uintptr_t a2m_where; /* Modctl address [out] */
146 } a2m_query_t;
147
148 /*
149 * Segment-to-mdb_map callback structure:
150 */
151 typedef struct {
152 struct seg_ops *asm_segvn_ops; /* Address of segvn ops [in] */
153 void (*asm_callback)(const struct mdb_map *, void *); /* Callb [in] */
154 void *asm_cbdata; /* Callback data [in] */
155 } asmap_arg_t;
156
157 static void
dnlc_free(void)158 dnlc_free(void)
159 {
160 ncache_t *ncp, *next;
161 int i;
162
163 if (dnlc_hash == NULL) {
164 return;
165 }
166
167 /*
168 * Free up current dnlc entries
169 */
170 for (i = 0; i < MDB_DNLC_HSIZE; i++) {
171 for (ncp = dnlc_hash[i]; ncp; ncp = next) {
172 next = ncp->hash_next;
173 mdb_free(ncp, MDB_DNLC_NCACHE_SZ(ncp));
174 }
175 }
176 mdb_free(dnlc_hash, MDB_DNLC_HSIZE * sizeof (ncache_t *));
177 dnlc_hash = NULL;
178 }
179
180 char bad_dnlc[] = "inconsistent dnlc chain: %d, ncache va: %p"
181 " - continuing with the rest\n";
182
183 static int
dnlc_load(void)184 dnlc_load(void)
185 {
186 int i; /* hash index */
187 int retry_cnt = 0;
188 int skip_bad_chains = 0;
189 int nc_hashsz; /* kernel hash array size */
190 uintptr_t nc_hash_addr; /* kernel va of ncache hash array */
191 uintptr_t head; /* kernel va of head of hash chain */
192
193 /*
194 * If we've already cached the DNLC and we're looking at a dump,
195 * our cache is good forever, so don't bother re-loading.
196 */
197 if (dnlc_hash && mdb_prop_postmortem) {
198 return (0);
199 }
200
201 /*
202 * For a core dump, retries wont help.
203 * Just print and skip any bad chains.
204 */
205 if (mdb_prop_postmortem) {
206 skip_bad_chains = 1;
207 }
208 retry:
209 if (retry_cnt++ >= MDB_DNLC_MAX_RETRY) {
210 /*
211 * Give up retrying the rapidly changing dnlc.
212 * Just print and skip any bad chains
213 */
214 skip_bad_chains = 1;
215 }
216
217 dnlc_free(); /* Free up the mdb hashed dnlc - if any */
218
219 /*
220 * Although nc_hashsz and the location of nc_hash doesn't currently
221 * change, it may do in the future with a more dynamic dnlc.
222 * So always read these values afresh.
223 */
224 if (mdb_readvar(&nc_hashsz, "nc_hashsz") == -1) {
225 mdb_warn("failed to read nc_hashsz");
226 return (-1);
227 }
228 if (mdb_readvar(&nc_hash_addr, "nc_hash") == -1) {
229 mdb_warn("failed to read nc_hash");
230 return (-1);
231 }
232
233 /*
234 * Allocate the mdb dnlc hash array
235 */
236 dnlc_hash = mdb_zalloc(MDB_DNLC_HSIZE * sizeof (ncache_t *), UM_SLEEP);
237
238 /* for each kernel hash chain */
239 for (i = 0, head = nc_hash_addr; i < nc_hashsz;
240 i++, head += sizeof (nc_hash_t)) {
241 nc_hash_t nch; /* kernel hash chain header */
242 ncache_t *ncp; /* name cache pointer */
243 int hash; /* mdb hash value */
244 uintptr_t nc_va; /* kernel va of next ncache */
245 uintptr_t ncprev_va; /* kernel va of previous ncache */
246 int khash; /* kernel dnlc hash value */
247 uchar_t namelen; /* name length */
248 ncache_t nc; /* name cache entry */
249 int nc_size; /* size of a name cache entry */
250
251 /*
252 * We read each element of the nc_hash array individually
253 * just before we process the entries in its chain. This is
254 * because the chain can change so rapidly on a running system.
255 */
256 if (mdb_vread(&nch, sizeof (nc_hash_t), head) == -1) {
257 mdb_warn("failed to read nc_hash chain header %d", i);
258 dnlc_free();
259 return (-1);
260 }
261
262 ncprev_va = head;
263 nc_va = (uintptr_t)(nch.hash_next);
264 /* for each entry in the chain */
265 while (nc_va != head) {
266 /*
267 * The size of the ncache entries varies
268 * because the name is appended to the structure.
269 * So we read in the structure then re-read
270 * for the structure plus name.
271 */
272 if (mdb_vread(&nc, sizeof (ncache_t), nc_va) == -1) {
273 if (skip_bad_chains) {
274 mdb_warn(bad_dnlc, i, nc_va);
275 break;
276 }
277 goto retry;
278 }
279 nc_size = MDB_DNLC_NCACHE_SZ(&nc);
280 ncp = mdb_alloc(nc_size, UM_SLEEP);
281 if (mdb_vread(ncp, nc_size - 1, nc_va) == -1) {
282 mdb_free(ncp, nc_size);
283 if (skip_bad_chains) {
284 mdb_warn(bad_dnlc, i, nc_va);
285 break;
286 }
287 goto retry;
288 }
289
290 /*
291 * Check for chain consistency
292 */
293 if ((uintptr_t)ncp->hash_prev != ncprev_va) {
294 mdb_free(ncp, nc_size);
295 if (skip_bad_chains) {
296 mdb_warn(bad_dnlc, i, nc_va);
297 break;
298 }
299 goto retry;
300 }
301 /*
302 * Terminate the new name with a null.
303 * Note, we allowed space for this null when
304 * allocating space for the entry.
305 */
306 ncp->name[ncp->namlen] = '\0';
307
308 /*
309 * Validate new entry by re-hashing using the
310 * kernel dnlc hash function and comparing the hash
311 */
312 DNLCHASH(ncp->name, ncp->dp, khash, namelen);
313 if ((namelen != ncp->namlen) ||
314 (khash != ncp->hash)) {
315 mdb_free(ncp, nc_size);
316 if (skip_bad_chains) {
317 mdb_warn(bad_dnlc, i, nc_va);
318 break;
319 }
320 goto retry;
321 }
322
323 /*
324 * Finally put the validated entry into the mdb
325 * hash chains. Reuse the kernel next hash field
326 * for the mdb hash chain pointer.
327 */
328 hash = MDB_DNLC_HASH(ncp->vp);
329 ncprev_va = nc_va;
330 nc_va = (uintptr_t)(ncp->hash_next);
331 ncp->hash_next = dnlc_hash[hash];
332 dnlc_hash[hash] = ncp;
333 }
334 }
335 return (0);
336 }
337
338 /*ARGSUSED*/
339 int
dnlcdump(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)340 dnlcdump(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
341 {
342 ncache_t *ent;
343 int i;
344
345 if ((flags & DCMD_ADDRSPEC) || argc != 0)
346 return (DCMD_USAGE);
347
348 if (dnlc_load() == -1)
349 return (DCMD_ERR);
350
351 mdb_printf("%<u>%-?s %-?s %-32s%</u>\n", "VP", "DVP", "NAME");
352
353 for (i = 0; i < MDB_DNLC_HSIZE; i++) {
354 for (ent = dnlc_hash[i]; ent != NULL; ent = ent->hash_next) {
355 mdb_printf("%0?p %0?p %s\n",
356 ent->vp, ent->dp, ent->name);
357 }
358 }
359
360 return (DCMD_OK);
361 }
362
363 static int
mdb_sprintpath(char * buf,size_t len,mdb_path_t * path)364 mdb_sprintpath(char *buf, size_t len, mdb_path_t *path)
365 {
366 char *s = buf;
367 int i;
368
369 if (len < sizeof ("/..."))
370 return (-1);
371
372 if (!path->mdp_complete) {
373 (void) strcpy(s, "??");
374 s += 2;
375
376 if (path->mdp_nelem == 0)
377 return (-1);
378 }
379
380 if (path->mdp_nelem == 0) {
381 (void) strcpy(s, "/");
382 return (0);
383 }
384
385 for (i = path->mdp_nelem - 1; i >= 0; i--) {
386 /*
387 * Number of bytes left is the distance from where we
388 * are to the end, minus 2 for '/' and '\0'
389 */
390 ssize_t left = (ssize_t)(&buf[len] - s) - 2;
391
392 if (left <= 0)
393 break;
394
395 *s++ = '/';
396 (void) strncpy(s, path->mdp_name[i], left);
397 s[left - 1] = '\0';
398 s += strlen(s);
399
400 if (left < strlen(path->mdp_name[i]))
401 break;
402 }
403
404 if (i >= 0)
405 (void) strcpy(&buf[len - 4], "...");
406
407 return (0);
408 }
409
410 static int
mdb_autonode2path(uintptr_t addr,mdb_path_t * path)411 mdb_autonode2path(uintptr_t addr, mdb_path_t *path)
412 {
413 fninfo_t fni;
414 fnnode_t fn;
415
416 vnode_t vn;
417 vfs_t vfs;
418 struct vnodeops *autofs_vnops = NULL;
419
420 /*
421 * "autofs_vnops_ptr" is the address of the pointer to the vnodeops
422 * structure for autofs. We want to read it each time we access
423 * it since autofs could (in theory) be unloaded and reloaded.
424 */
425 if (mdb_vread(&autofs_vnops, sizeof (autofs_vnops),
426 (uintptr_t)autofs_vnops_ptr) == -1)
427 return (-1);
428
429 if (mdb_vread(&vn, sizeof (vn), addr) == -1)
430 return (-1);
431
432 if (autofs_vnops == NULL || vn.v_op != autofs_vnops)
433 return (-1);
434
435 addr = (uintptr_t)vn.v_data;
436
437 if (mdb_vread(&vfs, sizeof (vfs), (uintptr_t)vn.v_vfsp) == -1 ||
438 mdb_vread(&fni, sizeof (fni), (uintptr_t)vfs.vfs_data) == -1 ||
439 mdb_vread(&vn, sizeof (vn), (uintptr_t)fni.fi_rootvp) == -1)
440 return (-1);
441
442 for (;;) {
443 size_t elem = path->mdp_nelem++;
444 char elemstr[MAXNAMELEN];
445 char *c, *p;
446
447 if (elem == MDB_PATH_NELEM) {
448 path->mdp_nelem--;
449 return (-1);
450 }
451
452 if (mdb_vread(&fn, sizeof (fn), addr) != sizeof (fn)) {
453 path->mdp_nelem--;
454 return (-1);
455 }
456
457 if (mdb_readstr(elemstr, sizeof (elemstr),
458 (uintptr_t)fn.fn_name) <= 0) {
459 (void) strcpy(elemstr, "?");
460 }
461
462 c = mdb_alloc(strlen(elemstr) + 1, UM_SLEEP | UM_GC);
463 (void) strcpy(c, elemstr);
464
465 path->mdp_vnode[elem] = (uintptr_t)fn.fn_vnode;
466
467 if (addr == (uintptr_t)fn.fn_parent) {
468 path->mdp_name[elem] = &c[1];
469 path->mdp_complete = TRUE;
470 break;
471 }
472
473 if ((p = strrchr(c, '/')) != NULL)
474 path->mdp_name[elem] = p + 1;
475 else
476 path->mdp_name[elem] = c;
477
478 addr = (uintptr_t)fn.fn_parent;
479 }
480
481 return (0);
482 }
483
484 int
mdb_vnode2path(uintptr_t addr,char * buf,size_t buflen)485 mdb_vnode2path(uintptr_t addr, char *buf, size_t buflen)
486 {
487 uintptr_t rootdir;
488 ncache_t *ent;
489 vnode_t vp;
490 mdb_path_t path;
491
492 /*
493 * Check to see if we have a cached value for this vnode
494 */
495 if (mdb_vread(&vp, sizeof (vp), addr) != -1 &&
496 vp.v_path != NULL &&
497 mdb_readstr(buf, buflen, (uintptr_t)vp.v_path) != -1)
498 return (0);
499
500 if (dnlc_load() == -1)
501 return (-1);
502
503 if (mdb_readvar(&rootdir, "rootdir") == -1) {
504 mdb_warn("failed to read 'rootdir'");
505 return (-1);
506 }
507
508 bzero(&path, sizeof (mdb_path_t));
509 again:
510 if ((addr == NULL) && (path.mdp_nelem == 0)) {
511 /*
512 * 0 elems && complete tells sprintpath to just print "/"
513 */
514 path.mdp_complete = TRUE;
515 goto out;
516 }
517
518 if (addr == rootdir) {
519 path.mdp_complete = TRUE;
520 goto out;
521 }
522
523 for (ent = dnlc_hash[MDB_DNLC_HASH(addr)]; ent; ent = ent->hash_next) {
524 if ((uintptr_t)ent->vp == addr) {
525 if (strcmp(ent->name, "..") == 0 ||
526 strcmp(ent->name, ".") == 0)
527 continue;
528
529 path.mdp_vnode[path.mdp_nelem] = (uintptr_t)ent->vp;
530 path.mdp_name[path.mdp_nelem] = ent->name;
531 path.mdp_nelem++;
532
533 if (path.mdp_nelem == MDB_PATH_NELEM) {
534 path.mdp_nelem--;
535 mdb_warn("path exceeded maximum expected "
536 "elements\n");
537 return (-1);
538 }
539
540 addr = (uintptr_t)ent->dp;
541 goto again;
542 }
543 }
544
545 (void) mdb_autonode2path(addr, &path);
546
547 out:
548 return (mdb_sprintpath(buf, buflen, &path));
549 }
550
551
552 uintptr_t
mdb_pid2proc(pid_t pid,proc_t * proc)553 mdb_pid2proc(pid_t pid, proc_t *proc)
554 {
555 int pid_hashsz, hash;
556 uintptr_t paddr, pidhash, procdir;
557 struct pid pidp;
558
559 if (mdb_readvar(&pidhash, "pidhash") == -1)
560 return (NULL);
561
562 if (mdb_readvar(&pid_hashsz, "pid_hashsz") == -1)
563 return (NULL);
564
565 if (mdb_readvar(&procdir, "procdir") == -1)
566 return (NULL);
567
568 hash = pid & (pid_hashsz - 1);
569
570 if (mdb_vread(&paddr, sizeof (paddr),
571 pidhash + (hash * sizeof (paddr))) == -1)
572 return (NULL);
573
574 while (paddr != 0) {
575 if (mdb_vread(&pidp, sizeof (pidp), paddr) == -1)
576 return (NULL);
577
578 if (pidp.pid_id == pid) {
579 uintptr_t procp;
580
581 if (mdb_vread(&procp, sizeof (procp), procdir +
582 (pidp.pid_prslot * sizeof (procp))) == -1)
583 return (NULL);
584
585 if (proc != NULL)
586 (void) mdb_vread(proc, sizeof (proc_t), procp);
587
588 return (procp);
589 }
590 paddr = (uintptr_t)pidp.pid_link;
591 }
592 return (NULL);
593 }
594
595 int
mdb_cpu2cpuid(uintptr_t cpup)596 mdb_cpu2cpuid(uintptr_t cpup)
597 {
598 cpu_t cpu;
599
600 if (mdb_vread(&cpu, sizeof (cpu_t), cpup) != sizeof (cpu_t))
601 return (-1);
602
603 return (cpu.cpu_id);
604 }
605
606 int
mdb_cpuset_find(uintptr_t cpusetp)607 mdb_cpuset_find(uintptr_t cpusetp)
608 {
609 ulong_t *cpuset;
610 size_t nr_words = BT_BITOUL(NCPU);
611 size_t sz = nr_words * sizeof (ulong_t);
612 size_t i;
613 int cpu = -1;
614
615 cpuset = mdb_alloc(sz, UM_SLEEP);
616
617 if (mdb_vread((void *)cpuset, sz, cpusetp) != sz)
618 goto out;
619
620 for (i = 0; i < nr_words; i++) {
621 size_t j;
622 ulong_t m;
623
624 for (j = 0, m = 1; j < BT_NBIPUL; j++, m <<= 1) {
625 if (cpuset[i] & m) {
626 cpu = i * BT_NBIPUL + j;
627 goto out;
628 }
629 }
630 }
631
632 out:
633 mdb_free(cpuset, sz);
634 return (cpu);
635 }
636
637 static int
page_hash_load(void)638 page_hash_load(void)
639 {
640 if (page_hash_loaded) {
641 return (1);
642 }
643
644 if (mdb_readvar(&mdb_page_hashsz, "page_hashsz") == -1) {
645 mdb_warn("unable to read page_hashsz");
646 return (0);
647 }
648 if (mdb_readvar(&mdb_page_hashsz_shift, "page_hashsz_shift") == -1) {
649 mdb_warn("unable to read page_hashsz_shift");
650 return (0);
651 }
652 if (mdb_readvar(&mdb_page_hash, "page_hash") == -1) {
653 mdb_warn("unable to read page_hash");
654 return (0);
655 }
656
657 page_hash_loaded = 1; /* zeroed on state change */
658 return (1);
659 }
660
661 uintptr_t
mdb_page_lookup(uintptr_t vp,u_offset_t offset)662 mdb_page_lookup(uintptr_t vp, u_offset_t offset)
663 {
664 size_t ndx;
665 uintptr_t page_hash_entry, pp;
666
667 if (!page_hash_loaded && !page_hash_load()) {
668 return (NULL);
669 }
670
671 ndx = PAGE_HASH_FUNC(vp, offset);
672 page_hash_entry = mdb_page_hash + ndx * sizeof (uintptr_t);
673
674 if (mdb_vread(&pp, sizeof (pp), page_hash_entry) < 0) {
675 mdb_warn("unable to read page_hash[%ld] (%p)", ndx,
676 page_hash_entry);
677 return (NULL);
678 }
679
680 while (pp != NULL) {
681 page_t page;
682 long nndx;
683
684 if (mdb_vread(&page, sizeof (page), pp) < 0) {
685 mdb_warn("unable to read page_t at %p", pp);
686 return (NULL);
687 }
688
689 if ((uintptr_t)page.p_vnode == vp &&
690 (uint64_t)page.p_offset == offset)
691 return (pp);
692
693 /*
694 * Double check that the pages actually hash to the
695 * bucket we're searching. If not, our version of
696 * PAGE_HASH_FUNC() doesn't match the kernel's, and we're
697 * not going to be able to find the page. The most
698 * likely reason for this that mdb_ks doesn't match the
699 * kernel we're running against.
700 */
701 nndx = PAGE_HASH_FUNC(page.p_vnode, page.p_offset);
702 if (page.p_vnode != NULL && nndx != ndx) {
703 mdb_warn("mdb_page_lookup: mdb_ks PAGE_HASH_FUNC() "
704 "mismatch: in bucket %ld, but page %p hashes to "
705 "bucket %ld\n", ndx, pp, nndx);
706 return (NULL);
707 }
708
709 pp = (uintptr_t)page.p_hash;
710 }
711
712 return (NULL);
713 }
714
715 char
mdb_vtype2chr(vtype_t type,mode_t mode)716 mdb_vtype2chr(vtype_t type, mode_t mode)
717 {
718 static const char vttab[] = {
719 ' ', /* VNON */
720 ' ', /* VREG */
721 '/', /* VDIR */
722 ' ', /* VBLK */
723 ' ', /* VCHR */
724 '@', /* VLNK */
725 '|', /* VFIFO */
726 '>', /* VDOOR */
727 ' ', /* VPROC */
728 '=', /* VSOCK */
729 ' ', /* VBAD */
730 };
731
732 if (type < 0 || type >= sizeof (vttab) / sizeof (vttab[0]))
733 return ('?');
734
735 if (type == VREG && (mode & 0111) != 0)
736 return ('*');
737
738 return (vttab[type]);
739 }
740
741 struct pfn2page {
742 pfn_t pfn;
743 page_t *pp;
744 };
745
746 /*ARGSUSED*/
747 static int
pfn2page_cb(uintptr_t addr,const struct memseg * msp,void * data)748 pfn2page_cb(uintptr_t addr, const struct memseg *msp, void *data)
749 {
750 struct pfn2page *p = data;
751
752 if (p->pfn >= msp->pages_base && p->pfn < msp->pages_end) {
753 p->pp = msp->pages + (p->pfn - msp->pages_base);
754 return (WALK_DONE);
755 }
756
757 return (WALK_NEXT);
758 }
759
760 uintptr_t
mdb_pfn2page(pfn_t pfn)761 mdb_pfn2page(pfn_t pfn)
762 {
763 struct pfn2page arg;
764 struct page page;
765
766 arg.pfn = pfn;
767 arg.pp = NULL;
768
769 if (mdb_walk("memseg", (mdb_walk_cb_t)pfn2page_cb, &arg) == -1) {
770 mdb_warn("pfn2page: can't walk memsegs");
771 return (0);
772 }
773 if (arg.pp == NULL) {
774 mdb_warn("pfn2page: unable to find page_t for pfn %lx\n",
775 pfn);
776 return (0);
777 }
778
779 if (mdb_vread(&page, sizeof (page_t), (uintptr_t)arg.pp) == -1) {
780 mdb_warn("pfn2page: can't read page 0x%lx at %p", pfn, arg.pp);
781 return (0);
782 }
783 if (page.p_pagenum != pfn) {
784 mdb_warn("pfn2page: page_t 0x%p should have PFN 0x%lx, "
785 "but actually has 0x%lx\n", arg.pp, pfn, page.p_pagenum);
786 return (0);
787 }
788
789 return ((uintptr_t)arg.pp);
790 }
791
792 pfn_t
mdb_page2pfn(uintptr_t addr)793 mdb_page2pfn(uintptr_t addr)
794 {
795 struct page page;
796
797 if (mdb_vread(&page, sizeof (page_t), addr) == -1) {
798 mdb_warn("pp2pfn: can't read page at %p", addr);
799 return ((pfn_t)(-1));
800 }
801
802 return (page.p_pagenum);
803 }
804
805 static int
a2m_walk_modctl(uintptr_t addr,const struct modctl * m,a2m_query_t * a2m)806 a2m_walk_modctl(uintptr_t addr, const struct modctl *m, a2m_query_t *a2m)
807 {
808 struct module mod;
809
810 if (m->mod_mp == NULL)
811 return (0);
812
813 if (mdb_vread(&mod, sizeof (mod), (uintptr_t)m->mod_mp) == -1) {
814 mdb_warn("couldn't read modctl %p's module", addr);
815 return (0);
816 }
817
818 if (a2m->a2m_addr >= (uintptr_t)mod.text &&
819 a2m->a2m_addr < (uintptr_t)mod.text + mod.text_size)
820 goto found;
821
822 if (a2m->a2m_addr >= (uintptr_t)mod.data &&
823 a2m->a2m_addr < (uintptr_t)mod.data + mod.data_size)
824 goto found;
825
826 return (0);
827
828 found:
829 a2m->a2m_where = addr;
830 return (-1);
831 }
832
833 uintptr_t
mdb_addr2modctl(uintptr_t addr)834 mdb_addr2modctl(uintptr_t addr)
835 {
836 a2m_query_t a2m;
837
838 a2m.a2m_addr = addr;
839 a2m.a2m_where = NULL;
840
841 (void) mdb_walk("modctl", (mdb_walk_cb_t)a2m_walk_modctl, &a2m);
842 return (a2m.a2m_where);
843 }
844
845 static mdb_qinfo_t *
qi_lookup(uintptr_t qinit_addr)846 qi_lookup(uintptr_t qinit_addr)
847 {
848 mdb_qinfo_t *qip;
849
850 for (qip = qi_head; qip != NULL; qip = qip->qi_next) {
851 if (qip->qi_addr == qinit_addr)
852 return (qip);
853 }
854
855 return (NULL);
856 }
857
858 void
mdb_qops_install(const mdb_qops_t * qops,uintptr_t qinit_addr)859 mdb_qops_install(const mdb_qops_t *qops, uintptr_t qinit_addr)
860 {
861 mdb_qinfo_t *qip = qi_lookup(qinit_addr);
862
863 if (qip != NULL) {
864 qip->qi_ops = qops;
865 return;
866 }
867
868 qip = mdb_alloc(sizeof (mdb_qinfo_t), UM_SLEEP);
869
870 qip->qi_ops = qops;
871 qip->qi_addr = qinit_addr;
872 qip->qi_next = qi_head;
873
874 qi_head = qip;
875 }
876
877 void
mdb_qops_remove(const mdb_qops_t * qops,uintptr_t qinit_addr)878 mdb_qops_remove(const mdb_qops_t *qops, uintptr_t qinit_addr)
879 {
880 mdb_qinfo_t *qip, *p = NULL;
881
882 for (qip = qi_head; qip != NULL; p = qip, qip = qip->qi_next) {
883 if (qip->qi_addr == qinit_addr && qip->qi_ops == qops) {
884 if (qi_head == qip)
885 qi_head = qip->qi_next;
886 else
887 p->qi_next = qip->qi_next;
888 mdb_free(qip, sizeof (mdb_qinfo_t));
889 return;
890 }
891 }
892 }
893
894 char *
mdb_qname(const queue_t * q,char * buf,size_t nbytes)895 mdb_qname(const queue_t *q, char *buf, size_t nbytes)
896 {
897 struct module_info mi;
898 struct qinit qi;
899
900 if (mdb_vread(&qi, sizeof (qi), (uintptr_t)q->q_qinfo) == -1) {
901 mdb_warn("failed to read qinit at %p", q->q_qinfo);
902 goto err;
903 }
904
905 if (mdb_vread(&mi, sizeof (mi), (uintptr_t)qi.qi_minfo) == -1) {
906 mdb_warn("failed to read module_info at %p", qi.qi_minfo);
907 goto err;
908 }
909
910 if (mdb_readstr(buf, nbytes, (uintptr_t)mi.mi_idname) <= 0) {
911 mdb_warn("failed to read mi_idname at %p", mi.mi_idname);
912 goto err;
913 }
914
915 return (buf);
916
917 err:
918 (void) mdb_snprintf(buf, nbytes, "???");
919 return (buf);
920 }
921
922 void
mdb_qinfo(const queue_t * q,char * buf,size_t nbytes)923 mdb_qinfo(const queue_t *q, char *buf, size_t nbytes)
924 {
925 mdb_qinfo_t *qip = qi_lookup((uintptr_t)q->q_qinfo);
926 buf[0] = '\0';
927
928 if (qip != NULL)
929 qip->qi_ops->q_info(q, buf, nbytes);
930 }
931
932 uintptr_t
mdb_qrnext(const queue_t * q)933 mdb_qrnext(const queue_t *q)
934 {
935 mdb_qinfo_t *qip = qi_lookup((uintptr_t)q->q_qinfo);
936
937 if (qip != NULL)
938 return (qip->qi_ops->q_rnext(q));
939
940 return (NULL);
941 }
942
943 uintptr_t
mdb_qwnext(const queue_t * q)944 mdb_qwnext(const queue_t *q)
945 {
946 mdb_qinfo_t *qip = qi_lookup((uintptr_t)q->q_qinfo);
947
948 if (qip != NULL)
949 return (qip->qi_ops->q_wnext(q));
950
951 return (NULL);
952 }
953
954 uintptr_t
mdb_qrnext_default(const queue_t * q)955 mdb_qrnext_default(const queue_t *q)
956 {
957 return ((uintptr_t)q->q_next);
958 }
959
960 uintptr_t
mdb_qwnext_default(const queue_t * q)961 mdb_qwnext_default(const queue_t *q)
962 {
963 return ((uintptr_t)q->q_next);
964 }
965
966 /*
967 * The following three routines borrowed from modsubr.c
968 */
969 static int
nm_hash(const char * name)970 nm_hash(const char *name)
971 {
972 char c;
973 int hash = 0;
974
975 for (c = *name++; c; c = *name++)
976 hash ^= c;
977
978 return (hash & MOD_BIND_HASHMASK);
979 }
980
981 static uintptr_t
find_mbind(const char * name,uintptr_t * hashtab)982 find_mbind(const char *name, uintptr_t *hashtab)
983 {
984 int hashndx;
985 uintptr_t mb;
986 struct bind mb_local;
987 char node_name[MAXPATHLEN + 1];
988
989 hashndx = nm_hash(name);
990 mb = hashtab[hashndx];
991 while (mb) {
992 if (mdb_vread(&mb_local, sizeof (mb_local), mb) == -1) {
993 mdb_warn("failed to read struct bind at %p", mb);
994 return (NULL);
995 }
996 if (mdb_readstr(node_name, sizeof (node_name),
997 (uintptr_t)mb_local.b_name) == -1) {
998 mdb_warn("failed to read node name string at %p",
999 mb_local.b_name);
1000 return (NULL);
1001 }
1002
1003 if (strcmp(name, node_name) == 0)
1004 break;
1005
1006 mb = (uintptr_t)mb_local.b_next;
1007 }
1008 return (mb);
1009 }
1010
1011 int
mdb_name_to_major(const char * name,major_t * major)1012 mdb_name_to_major(const char *name, major_t *major)
1013 {
1014 uintptr_t mbind;
1015 uintptr_t mb_hashtab[MOD_BIND_HASHSIZE];
1016 struct bind mbind_local;
1017
1018
1019 if (mdb_readsym(mb_hashtab, sizeof (mb_hashtab), "mb_hashtab") == -1) {
1020 mdb_warn("failed to read symbol 'mb_hashtab'");
1021 return (-1);
1022 }
1023
1024 if ((mbind = find_mbind(name, mb_hashtab)) != NULL) {
1025 if (mdb_vread(&mbind_local, sizeof (mbind_local), mbind) ==
1026 -1) {
1027 mdb_warn("failed to read mbind struct at %p", mbind);
1028 return (-1);
1029 }
1030
1031 *major = (major_t)mbind_local.b_num;
1032 return (0);
1033 }
1034 return (-1);
1035 }
1036
1037 const char *
mdb_major_to_name(major_t major)1038 mdb_major_to_name(major_t major)
1039 {
1040 static char name[MODMAXNAMELEN + 1];
1041
1042 uintptr_t devnamesp;
1043 struct devnames dn;
1044 uint_t devcnt;
1045
1046 if (mdb_readvar(&devcnt, "devcnt") == -1 || major >= devcnt ||
1047 mdb_readvar(&devnamesp, "devnamesp") == -1)
1048 return (NULL);
1049
1050 if (mdb_vread(&dn, sizeof (struct devnames), devnamesp +
1051 major * sizeof (struct devnames)) != sizeof (struct devnames))
1052 return (NULL);
1053
1054 if (mdb_readstr(name, MODMAXNAMELEN + 1, (uintptr_t)dn.dn_name) == -1)
1055 return (NULL);
1056
1057 return ((const char *)name);
1058 }
1059
1060 /*
1061 * Return the name of the driver attached to the dip in drivername.
1062 */
1063 int
mdb_devinfo2driver(uintptr_t dip_addr,char * drivername,size_t namebufsize)1064 mdb_devinfo2driver(uintptr_t dip_addr, char *drivername, size_t namebufsize)
1065 {
1066 struct dev_info devinfo;
1067 char bind_name[MAXPATHLEN + 1];
1068 major_t major;
1069 const char *namestr;
1070
1071
1072 if (mdb_vread(&devinfo, sizeof (devinfo), dip_addr) == -1) {
1073 mdb_warn("failed to read devinfo at %p", dip_addr);
1074 return (-1);
1075 }
1076
1077 if (mdb_readstr(bind_name, sizeof (bind_name),
1078 (uintptr_t)devinfo.devi_binding_name) == -1) {
1079 mdb_warn("failed to read binding name at %p",
1080 devinfo.devi_binding_name);
1081 return (-1);
1082 }
1083
1084 /*
1085 * Many->one relation: various names to one major number
1086 */
1087 if (mdb_name_to_major(bind_name, &major) == -1) {
1088 mdb_warn("failed to translate bind name to major number\n");
1089 return (-1);
1090 }
1091
1092 /*
1093 * One->one relation: one major number corresponds to one driver
1094 */
1095 if ((namestr = mdb_major_to_name(major)) == NULL) {
1096 (void) strncpy(drivername, "???", namebufsize);
1097 return (-1);
1098 }
1099
1100 (void) strncpy(drivername, namestr, namebufsize);
1101 return (0);
1102 }
1103
1104 /*
1105 * Find the name of the driver attached to this dip (if any), given:
1106 * - the address of a dip (in core)
1107 * - the NAME of the global pointer to the driver's i_ddi_soft_state struct
1108 * - pointer to a pointer to receive the address
1109 */
1110 int
mdb_devinfo2statep(uintptr_t dip_addr,char * soft_statep_name,uintptr_t * statep)1111 mdb_devinfo2statep(uintptr_t dip_addr, char *soft_statep_name,
1112 uintptr_t *statep)
1113 {
1114 struct dev_info dev_info;
1115
1116
1117 if (mdb_vread(&dev_info, sizeof (dev_info), dip_addr) == -1) {
1118 mdb_warn("failed to read devinfo at %p", dip_addr);
1119 return (-1);
1120 }
1121
1122 return (mdb_get_soft_state_byname(soft_statep_name,
1123 dev_info.devi_instance, statep, NULL, 0));
1124 }
1125
1126 /*
1127 * Returns a pointer to the top of the soft state struct for the instance
1128 * specified (in state_addr), given the address of the global soft state
1129 * pointer and size of the struct. Also fills in the buffer pointed to by
1130 * state_buf_p (if non-NULL) with the contents of the state struct.
1131 */
1132 int
mdb_get_soft_state_byaddr(uintptr_t ssaddr,uint_t instance,uintptr_t * state_addr,void * state_buf_p,size_t sizeof_state)1133 mdb_get_soft_state_byaddr(uintptr_t ssaddr, uint_t instance,
1134 uintptr_t *state_addr, void *state_buf_p, size_t sizeof_state)
1135 {
1136 struct i_ddi_soft_state ss;
1137 void *statep;
1138
1139
1140 if (mdb_vread(&ss, sizeof (ss), ssaddr) == -1)
1141 return (-1);
1142
1143 if (instance >= ss.n_items)
1144 return (-1);
1145
1146 if (mdb_vread(&statep, sizeof (statep), (uintptr_t)ss.array +
1147 (sizeof (statep) * instance)) == -1)
1148 return (-1);
1149
1150 if (state_addr != NULL)
1151 *state_addr = (uintptr_t)statep;
1152
1153 if (statep == NULL) {
1154 errno = ENOENT;
1155 return (-1);
1156 }
1157
1158 if (state_buf_p != NULL) {
1159
1160 /* Read the state struct into the buffer in local space. */
1161 if (mdb_vread(state_buf_p, sizeof_state,
1162 (uintptr_t)statep) == -1)
1163 return (-1);
1164 }
1165
1166 return (0);
1167 }
1168
1169
1170 /*
1171 * Returns a pointer to the top of the soft state struct for the instance
1172 * specified (in state_addr), given the name of the global soft state pointer
1173 * and size of the struct. Also fills in the buffer pointed to by
1174 * state_buf_p (if non-NULL) with the contents of the state struct.
1175 */
1176 int
mdb_get_soft_state_byname(char * softstatep_name,uint_t instance,uintptr_t * state_addr,void * state_buf_p,size_t sizeof_state)1177 mdb_get_soft_state_byname(char *softstatep_name, uint_t instance,
1178 uintptr_t *state_addr, void *state_buf_p, size_t sizeof_state)
1179 {
1180 uintptr_t ssaddr;
1181
1182 if (mdb_readvar((void *)&ssaddr, softstatep_name) == -1)
1183 return (-1);
1184
1185 return (mdb_get_soft_state_byaddr(ssaddr, instance, state_addr,
1186 state_buf_p, sizeof_state));
1187 }
1188
1189 static const mdb_dcmd_t dcmds[] = {
1190 { "dnlc", NULL, "print DNLC contents", dnlcdump },
1191 { NULL }
1192 };
1193
1194 static const mdb_modinfo_t modinfo = { MDB_API_VERSION, dcmds };
1195
1196 /*ARGSUSED*/
1197 static void
update_vars(void * arg)1198 update_vars(void *arg)
1199 {
1200 GElf_Sym sym;
1201
1202 if (mdb_lookup_by_name("auto_vnodeops", &sym) == 0)
1203 autofs_vnops_ptr = (struct vnodeops *)(uintptr_t)sym.st_value;
1204 else
1205 autofs_vnops_ptr = NULL;
1206
1207 (void) mdb_readvar(&_mdb_ks_pagesize, "_pagesize");
1208 (void) mdb_readvar(&_mdb_ks_pageshift, "_pageshift");
1209 (void) mdb_readvar(&_mdb_ks_pageoffset, "_pageoffset");
1210 (void) mdb_readvar(&_mdb_ks_pagemask, "_pagemask");
1211 (void) mdb_readvar(&_mdb_ks_mmu_pagesize, "_mmu_pagesize");
1212 (void) mdb_readvar(&_mdb_ks_mmu_pageshift, "_mmu_pageshift");
1213 (void) mdb_readvar(&_mdb_ks_mmu_pageoffset, "_mmu_pageoffset");
1214 (void) mdb_readvar(&_mdb_ks_mmu_pagemask, "_mmu_pagemask");
1215 (void) mdb_readvar(&_mdb_ks_kernelbase, "_kernelbase");
1216
1217 (void) mdb_readvar(&_mdb_ks_userlimit, "_userlimit");
1218 (void) mdb_readvar(&_mdb_ks_userlimit32, "_userlimit32");
1219 (void) mdb_readvar(&_mdb_ks_argsbase, "_argsbase");
1220 (void) mdb_readvar(&_mdb_ks_msg_bsize, "_msg_bsize");
1221 (void) mdb_readvar(&_mdb_ks_defaultstksz, "_defaultstksz");
1222 (void) mdb_readvar(&_mdb_ks_ncpu, "_ncpu");
1223 (void) mdb_readvar(&_mdb_ks_ncpu_log2, "_ncpu_log2");
1224 (void) mdb_readvar(&_mdb_ks_ncpu_p2, "_ncpu_p2");
1225
1226 page_hash_loaded = 0; /* invalidate cached page_hash state */
1227 }
1228
1229 const mdb_modinfo_t *
_mdb_init(void)1230 _mdb_init(void)
1231 {
1232 /*
1233 * When used with mdb, mdb_ks is a separate dmod. With kmdb, however,
1234 * mdb_ks is compiled into the debugger module. kmdb cannot
1235 * automatically modunload itself when it exits. If it restarts after
1236 * debugger fault, static variables may not be initialized to zero.
1237 * They must be manually reinitialized here.
1238 */
1239 dnlc_hash = NULL;
1240 qi_head = NULL;
1241
1242 mdb_callback_add(MDB_CALLBACK_STCHG, update_vars, NULL);
1243
1244 update_vars(NULL);
1245
1246 return (&modinfo);
1247 }
1248
1249 void
_mdb_fini(void)1250 _mdb_fini(void)
1251 {
1252 dnlc_free();
1253 while (qi_head != NULL) {
1254 mdb_qinfo_t *qip = qi_head;
1255 qi_head = qip->qi_next;
1256 mdb_free(qip, sizeof (mdb_qinfo_t));
1257 }
1258 }
1259
1260 /*
1261 * Interface between MDB kproc target and mdb_ks. The kproc target relies
1262 * on looking up and invoking these functions in mdb_ks so that dependencies
1263 * on the current kernel implementation are isolated in mdb_ks.
1264 */
1265
1266 /*
1267 * Given the address of a proc_t, return the p.p_as pointer; return NULL
1268 * if we were unable to read a proc structure from the given address.
1269 */
1270 uintptr_t
mdb_kproc_as(uintptr_t proc_addr)1271 mdb_kproc_as(uintptr_t proc_addr)
1272 {
1273 proc_t p;
1274
1275 if (mdb_vread(&p, sizeof (p), proc_addr) == sizeof (p))
1276 return ((uintptr_t)p.p_as);
1277
1278 return (NULL);
1279 }
1280
1281 /*
1282 * Given the address of a proc_t, return the p.p_model value; return
1283 * PR_MODEL_UNKNOWN if we were unable to read a proc structure or if
1284 * the model value does not match one of the two known values.
1285 */
1286 uint_t
mdb_kproc_model(uintptr_t proc_addr)1287 mdb_kproc_model(uintptr_t proc_addr)
1288 {
1289 proc_t p;
1290
1291 if (mdb_vread(&p, sizeof (p), proc_addr) == sizeof (p)) {
1292 switch (p.p_model) {
1293 case DATAMODEL_ILP32:
1294 return (PR_MODEL_ILP32);
1295 case DATAMODEL_LP64:
1296 return (PR_MODEL_LP64);
1297 }
1298 }
1299
1300 return (PR_MODEL_UNKNOWN);
1301 }
1302
1303 /*
1304 * Callback function for walking process's segment list. For each segment,
1305 * we fill in an mdb_map_t describing its properties, and then invoke
1306 * the callback function provided by the kproc target.
1307 */
1308 static int
asmap_step(uintptr_t addr,const struct seg * seg,asmap_arg_t * asmp)1309 asmap_step(uintptr_t addr, const struct seg *seg, asmap_arg_t *asmp)
1310 {
1311 struct segvn_data svd;
1312 mdb_map_t map;
1313
1314 if (seg->s_ops == asmp->asm_segvn_ops && mdb_vread(&svd,
1315 sizeof (svd), (uintptr_t)seg->s_data) == sizeof (svd)) {
1316
1317 if (svd.vp != NULL) {
1318 if (mdb_vnode2path((uintptr_t)svd.vp, map.map_name,
1319 MDB_TGT_MAPSZ) != 0) {
1320 (void) mdb_snprintf(map.map_name,
1321 MDB_TGT_MAPSZ, "[ vnode %p ]", svd.vp);
1322 }
1323 } else
1324 (void) strcpy(map.map_name, "[ anon ]");
1325
1326 } else {
1327 (void) mdb_snprintf(map.map_name, MDB_TGT_MAPSZ,
1328 "[ seg %p ]", addr);
1329 }
1330
1331 map.map_base = (uintptr_t)seg->s_base;
1332 map.map_size = seg->s_size;
1333 map.map_flags = 0;
1334
1335 asmp->asm_callback((const struct mdb_map *)&map, asmp->asm_cbdata);
1336 return (WALK_NEXT);
1337 }
1338
1339 /*
1340 * Given a process address space, walk its segment list using the seg walker,
1341 * convert the segment data to an mdb_map_t, and pass this information
1342 * back to the kproc target via the given callback function.
1343 */
1344 int
mdb_kproc_asiter(uintptr_t as,void (* func)(const struct mdb_map *,void *),void * p)1345 mdb_kproc_asiter(uintptr_t as,
1346 void (*func)(const struct mdb_map *, void *), void *p)
1347 {
1348 asmap_arg_t arg;
1349 GElf_Sym sym;
1350
1351 arg.asm_segvn_ops = NULL;
1352 arg.asm_callback = func;
1353 arg.asm_cbdata = p;
1354
1355 if (mdb_lookup_by_name("segvn_ops", &sym) == 0)
1356 arg.asm_segvn_ops = (struct seg_ops *)(uintptr_t)sym.st_value;
1357
1358 return (mdb_pwalk("seg", (mdb_walk_cb_t)asmap_step, &arg, as));
1359 }
1360
1361 /*
1362 * Copy the auxv array from the given process's u-area into the provided
1363 * buffer. If the buffer is NULL, only return the size of the auxv array
1364 * so the caller knows how much space will be required.
1365 */
1366 int
mdb_kproc_auxv(uintptr_t proc,auxv_t * auxv)1367 mdb_kproc_auxv(uintptr_t proc, auxv_t *auxv)
1368 {
1369 if (auxv != NULL) {
1370 proc_t p;
1371
1372 if (mdb_vread(&p, sizeof (p), proc) != sizeof (p))
1373 return (-1);
1374
1375 bcopy(p.p_user.u_auxv, auxv,
1376 sizeof (auxv_t) * __KERN_NAUXV_IMPL);
1377 }
1378
1379 return (__KERN_NAUXV_IMPL);
1380 }
1381
1382 /*
1383 * Given a process address, return the PID.
1384 */
1385 pid_t
mdb_kproc_pid(uintptr_t proc_addr)1386 mdb_kproc_pid(uintptr_t proc_addr)
1387 {
1388 struct pid pid;
1389 proc_t p;
1390
1391 if (mdb_vread(&p, sizeof (p), proc_addr) == sizeof (p) &&
1392 mdb_vread(&pid, sizeof (pid), (uintptr_t)p.p_pidp) == sizeof (pid))
1393 return (pid.pid_id);
1394
1395 return (-1);
1396 }
1397
1398 /*
1399 * Interface between the MDB kvm target and mdb_ks. The kvm target relies
1400 * on looking up and invoking these functions in mdb_ks so that dependencies
1401 * on the current kernel implementation are isolated in mdb_ks.
1402 */
1403
1404 /*
1405 * Determine whether or not the thread that panicked the given kernel was a
1406 * kernel thread (panic_thread->t_procp == &p0).
1407 */
1408 void
mdb_dump_print_content(dumphdr_t * dh,pid_t content)1409 mdb_dump_print_content(dumphdr_t *dh, pid_t content)
1410 {
1411 GElf_Sym sym;
1412 uintptr_t pt;
1413 uintptr_t procp;
1414 int expcont = 0;
1415 int actcont;
1416
1417 (void) mdb_readvar(&expcont, "dump_conflags");
1418 actcont = dh->dump_flags & DF_CONTENT;
1419
1420 if (actcont == DF_ALL) {
1421 mdb_printf("dump content: all kernel and user pages\n");
1422 return;
1423 } else if (actcont == DF_CURPROC) {
1424 mdb_printf("dump content: kernel pages and pages from "
1425 "PID %d", content);
1426 return;
1427 }
1428
1429 mdb_printf("dump content: kernel pages only\n");
1430 if (!(expcont & DF_CURPROC))
1431 return;
1432
1433 if (mdb_readvar(&pt, "panic_thread") != sizeof (pt) || pt == NULL)
1434 goto kthreadpanic_err;
1435
1436 if (mdb_vread(&procp, sizeof (procp), pt + OFFSETOF(kthread_t,
1437 t_procp)) == -1 || procp == NULL)
1438 goto kthreadpanic_err;
1439
1440 if (mdb_lookup_by_name("p0", &sym) != 0)
1441 goto kthreadpanic_err;
1442
1443 if (procp == (uintptr_t)sym.st_value) {
1444 mdb_printf(" (curproc requested, but a kernel thread "
1445 "panicked)\n");
1446 } else {
1447 mdb_printf(" (curproc requested, but the process that "
1448 "panicked could not be dumped)\n");
1449 }
1450
1451 return;
1452
1453 kthreadpanic_err:
1454 mdb_printf(" (curproc requested, but the process that panicked could "
1455 "not be found)\n");
1456 }
1457
1458 /*
1459 * Determine the process that was saved in a `curproc' dump. This process will
1460 * be recorded as the first element in dump_pids[].
1461 */
1462 int
mdb_dump_find_curproc(void)1463 mdb_dump_find_curproc(void)
1464 {
1465 uintptr_t pidp;
1466 pid_t pid = -1;
1467
1468 if (mdb_readvar(&pidp, "dump_pids") == sizeof (pidp) &&
1469 mdb_vread(&pid, sizeof (pid), pidp) == sizeof (pid) &&
1470 pid > 0)
1471 return (pid);
1472 else
1473 return (-1);
1474 }
1475
1476
1477 /*
1478 * Following three funcs extracted from sunddi.c
1479 */
1480
1481 /*
1482 * Return core address of root node of devinfo tree
1483 */
1484 static uintptr_t
mdb_ddi_root_node(void)1485 mdb_ddi_root_node(void)
1486 {
1487 uintptr_t top_devinfo_addr;
1488
1489 /* return (top_devinfo); */
1490 if (mdb_readvar(&top_devinfo_addr, "top_devinfo") == -1) {
1491 mdb_warn("failed to read top_devinfo");
1492 return (NULL);
1493 }
1494 return (top_devinfo_addr);
1495 }
1496
1497 /*
1498 * Return the name of the devinfo node pointed at by 'dip_addr' in the buffer
1499 * pointed at by 'name.'
1500 *
1501 * - dip_addr is a pointer to a dev_info struct in core.
1502 */
1503 static char *
mdb_ddi_deviname(uintptr_t dip_addr,char * name,size_t name_size)1504 mdb_ddi_deviname(uintptr_t dip_addr, char *name, size_t name_size)
1505 {
1506 uintptr_t addrname;
1507 ssize_t length;
1508 char *local_namep = name;
1509 size_t local_name_size = name_size;
1510 struct dev_info local_dip;
1511
1512
1513 if (dip_addr == mdb_ddi_root_node()) {
1514 if (name_size < 1) {
1515 mdb_warn("failed to get node name: buf too small\n");
1516 return (NULL);
1517 }
1518
1519 *name = '\0';
1520 return (name);
1521 }
1522
1523 if (name_size < 2) {
1524 mdb_warn("failed to get node name: buf too small\n");
1525 return (NULL);
1526 }
1527
1528 local_namep = name;
1529 *local_namep++ = '/';
1530 *local_namep = '\0';
1531 local_name_size--;
1532
1533 if (mdb_vread(&local_dip, sizeof (struct dev_info), dip_addr) == -1) {
1534 mdb_warn("failed to read devinfo struct");
1535 }
1536
1537 length = mdb_readstr(local_namep, local_name_size,
1538 (uintptr_t)local_dip.devi_node_name);
1539 if (length == -1) {
1540 mdb_warn("failed to read node name");
1541 return (NULL);
1542 }
1543 local_namep += length;
1544 local_name_size -= length;
1545 addrname = (uintptr_t)local_dip.devi_addr;
1546
1547 if (addrname != NULL) {
1548
1549 if (local_name_size < 2) {
1550 mdb_warn("not enough room for node address string");
1551 return (name);
1552 }
1553 *local_namep++ = '@';
1554 *local_namep = '\0';
1555 local_name_size--;
1556
1557 length = mdb_readstr(local_namep, local_name_size, addrname);
1558 if (length == -1) {
1559 mdb_warn("failed to read name");
1560 return (NULL);
1561 }
1562 }
1563
1564 return (name);
1565 }
1566
1567 /*
1568 * Generate the full path under the /devices dir to the device entry.
1569 *
1570 * dip is a pointer to a devinfo struct in core (not in local memory).
1571 */
1572 char *
mdb_ddi_pathname(uintptr_t dip_addr,char * path,size_t pathlen)1573 mdb_ddi_pathname(uintptr_t dip_addr, char *path, size_t pathlen)
1574 {
1575 struct dev_info local_dip;
1576 uintptr_t parent_dip;
1577 char *bp;
1578 size_t buf_left;
1579
1580
1581 if (dip_addr == mdb_ddi_root_node()) {
1582 *path = '\0';
1583 return (path);
1584 }
1585
1586
1587 if (mdb_vread(&local_dip, sizeof (struct dev_info), dip_addr) == -1) {
1588 mdb_warn("failed to read devinfo struct");
1589 }
1590
1591 parent_dip = (uintptr_t)local_dip.devi_parent;
1592 (void) mdb_ddi_pathname(parent_dip, path, pathlen);
1593
1594 bp = path + strlen(path);
1595 buf_left = pathlen - strlen(path);
1596 (void) mdb_ddi_deviname(dip_addr, bp, buf_left);
1597 return (path);
1598 }
1599
1600
1601 /*
1602 * Read in the string value of a refstr, which is appended to the end of
1603 * the structure.
1604 */
1605 ssize_t
mdb_read_refstr(uintptr_t refstr_addr,char * str,size_t nbytes)1606 mdb_read_refstr(uintptr_t refstr_addr, char *str, size_t nbytes)
1607 {
1608 struct refstr *r = (struct refstr *)refstr_addr;
1609
1610 return (mdb_readstr(str, nbytes, (uintptr_t)r->rs_string));
1611 }
1612
1613 /*
1614 * Chase an mblk list by b_next and return the length.
1615 */
1616 int
mdb_mblk_count(const mblk_t * mb)1617 mdb_mblk_count(const mblk_t *mb)
1618 {
1619 int count;
1620 mblk_t mblk;
1621
1622 if (mb == NULL)
1623 return (0);
1624
1625 count = 1;
1626 while (mb->b_next != NULL) {
1627 count++;
1628 if (mdb_vread(&mblk, sizeof (mblk), (uintptr_t)mb->b_next) ==
1629 -1)
1630 break;
1631 mb = &mblk;
1632 }
1633 return (count);
1634 }
1635
1636 /*
1637 * Write the given MAC address as a printable string in the usual colon-
1638 * separated format. Assumes that buflen is at least 2.
1639 */
1640 void
mdb_mac_addr(const uint8_t * addr,size_t alen,char * buf,size_t buflen)1641 mdb_mac_addr(const uint8_t *addr, size_t alen, char *buf, size_t buflen)
1642 {
1643 int slen;
1644
1645 if (alen == 0 || buflen < 4) {
1646 (void) strcpy(buf, "?");
1647 return;
1648 }
1649 for (;;) {
1650 /*
1651 * If there are more MAC address bytes available, but we won't
1652 * have any room to print them, then add "..." to the string
1653 * instead. See below for the 'magic number' explanation.
1654 */
1655 if ((alen == 2 && buflen < 6) || (alen > 2 && buflen < 7)) {
1656 (void) strcpy(buf, "...");
1657 break;
1658 }
1659 slen = mdb_snprintf(buf, buflen, "%02x", *addr++);
1660 buf += slen;
1661 if (--alen == 0)
1662 break;
1663 *buf++ = ':';
1664 buflen -= slen + 1;
1665 /*
1666 * At this point, based on the first 'if' statement above,
1667 * either alen == 1 and buflen >= 3, or alen > 1 and
1668 * buflen >= 4. The first case leaves room for the final "xx"
1669 * number and trailing NUL byte. The second leaves room for at
1670 * least "...". Thus the apparently 'magic' numbers chosen for
1671 * that statement.
1672 */
1673 }
1674 }
1675
1676 /*
1677 * Produce a string that represents a DLPI primitive, or NULL if no such string
1678 * is possible.
1679 */
1680 const char *
mdb_dlpi_prim(int prim)1681 mdb_dlpi_prim(int prim)
1682 {
1683 switch (prim) {
1684 case DL_INFO_REQ: return ("DL_INFO_REQ");
1685 case DL_INFO_ACK: return ("DL_INFO_ACK");
1686 case DL_ATTACH_REQ: return ("DL_ATTACH_REQ");
1687 case DL_DETACH_REQ: return ("DL_DETACH_REQ");
1688 case DL_BIND_REQ: return ("DL_BIND_REQ");
1689 case DL_BIND_ACK: return ("DL_BIND_ACK");
1690 case DL_UNBIND_REQ: return ("DL_UNBIND_REQ");
1691 case DL_OK_ACK: return ("DL_OK_ACK");
1692 case DL_ERROR_ACK: return ("DL_ERROR_ACK");
1693 case DL_ENABMULTI_REQ: return ("DL_ENABMULTI_REQ");
1694 case DL_DISABMULTI_REQ: return ("DL_DISABMULTI_REQ");
1695 case DL_PROMISCON_REQ: return ("DL_PROMISCON_REQ");
1696 case DL_PROMISCOFF_REQ: return ("DL_PROMISCOFF_REQ");
1697 case DL_UNITDATA_REQ: return ("DL_UNITDATA_REQ");
1698 case DL_UNITDATA_IND: return ("DL_UNITDATA_IND");
1699 case DL_UDERROR_IND: return ("DL_UDERROR_IND");
1700 case DL_PHYS_ADDR_REQ: return ("DL_PHYS_ADDR_REQ");
1701 case DL_PHYS_ADDR_ACK: return ("DL_PHYS_ADDR_ACK");
1702 case DL_SET_PHYS_ADDR_REQ: return ("DL_SET_PHYS_ADDR_REQ");
1703 case DL_NOTIFY_REQ: return ("DL_NOTIFY_REQ");
1704 case DL_NOTIFY_ACK: return ("DL_NOTIFY_ACK");
1705 case DL_NOTIFY_IND: return ("DL_NOTIFY_IND");
1706 case DL_NOTIFY_CONF: return ("DL_NOTIFY_CONF");
1707 case DL_CAPABILITY_REQ: return ("DL_CAPABILITY_REQ");
1708 case DL_CAPABILITY_ACK: return ("DL_CAPABILITY_ACK");
1709 case DL_CONTROL_REQ: return ("DL_CONTROL_REQ");
1710 case DL_CONTROL_ACK: return ("DL_CONTROL_ACK");
1711 case DL_PASSIVE_REQ: return ("DL_PASSIVE_REQ");
1712 default: return (NULL);
1713 }
1714 }
1715
1716 /*
1717 * mdb_gethrtime() returns the hires system time. This will be the timestamp at
1718 * which we dropped into, if called from, kmdb(1); the core dump's hires time
1719 * if inspecting one; or the running system's hires time if we're inspecting
1720 * a live kernel.
1721 */
1722 hrtime_t
mdb_gethrtime(void)1723 mdb_gethrtime(void)
1724 {
1725 uintptr_t ptr;
1726 GElf_Sym sym;
1727 lbolt_info_t lbi;
1728 hrtime_t ts;
1729
1730 /*
1731 * We first check whether the lbolt info structure has been allocated
1732 * and initialized. If not, lbolt_hybrid will be pointing at
1733 * lbolt_bootstrap.
1734 */
1735 if (mdb_lookup_by_name("lbolt_bootstrap", &sym) == -1)
1736 return (0);
1737
1738 if (mdb_readvar(&ptr, "lbolt_hybrid") == -1)
1739 return (0);
1740
1741 if (ptr == (uintptr_t)sym.st_value)
1742 return (0);
1743
1744 #ifdef _KMDB
1745 if (mdb_readvar(&ptr, "lb_info") == -1)
1746 return (0);
1747
1748 if (mdb_vread(&lbi, sizeof (lbolt_info_t), ptr) !=
1749 sizeof (lbolt_info_t))
1750 return (0);
1751
1752 ts = lbi.lbi_debug_ts;
1753 #else
1754 if (mdb_prop_postmortem) {
1755 if (mdb_readvar(&ptr, "lb_info") == -1)
1756 return (0);
1757
1758 if (mdb_vread(&lbi, sizeof (lbolt_info_t), ptr) !=
1759 sizeof (lbolt_info_t))
1760 return (0);
1761
1762 ts = lbi.lbi_debug_ts;
1763 } else {
1764 ts = gethrtime();
1765 }
1766 #endif
1767 return (ts);
1768 }
1769
1770 /*
1771 * mdb_get_lbolt() returns the number of clock ticks since system boot.
1772 * Depending on the context in which it's called, the value will be derived
1773 * from different sources per mdb_gethrtime(). If inspecting a panicked
1774 * system, the routine returns the 'panic_lbolt64' variable from the core file.
1775 */
1776 int64_t
mdb_get_lbolt(void)1777 mdb_get_lbolt(void)
1778 {
1779 lbolt_info_t lbi;
1780 uintptr_t ptr;
1781 int64_t pl;
1782 hrtime_t ts;
1783 int nsec;
1784
1785 if (mdb_readvar(&pl, "panic_lbolt64") != -1 && pl > 0)
1786 return (pl);
1787
1788 /*
1789 * mdb_gethrtime() will return zero if the lbolt info structure hasn't
1790 * been allocated and initialized yet, or if it fails to read it.
1791 */
1792 if ((ts = mdb_gethrtime()) <= 0)
1793 return (0);
1794
1795 /*
1796 * Load the time spent in kmdb, if any.
1797 */
1798 if (mdb_readvar(&ptr, "lb_info") == -1)
1799 return (0);
1800
1801 if (mdb_vread(&lbi, sizeof (lbolt_info_t), ptr) !=
1802 sizeof (lbolt_info_t))
1803 return (0);
1804
1805 if (mdb_readvar(&nsec, "nsec_per_tick") == -1 || nsec == 0) {
1806 mdb_warn("failed to read 'nsec_per_tick'");
1807 return (-1);
1808 }
1809
1810 return ((ts/nsec) - lbi.lbi_debug_time);
1811 }
1812