1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 *
25 * Copyright 2019 Joyent, Inc.
26 */
27
28 /*
29 * This part of the file contains the mdb support for dcmds:
30 * ::memseg_list
31 * and walkers for:
32 * memseg - a memseg list walker for ::memseg_list
33 *
34 */
35
36 #include <sys/types.h>
37 #include <sys/machparam.h>
38 #include <sys/controlregs.h>
39 #include <sys/mach_mmu.h>
40 #ifdef __xpv
41 #include <sys/hypervisor.h>
42 #endif
43 #include <vm/as.h>
44
45 #include <mdb/mdb_modapi.h>
46 #include <mdb/mdb_target.h>
47
48 #include <vm/page.h>
49 #include <vm/hat_i86.h>
50
51 #define VA_SIGN_BIT (1UL << 47)
52 #define VA_LOW_BITS ((1UL << 48) - 1)
53 #define VA_SIGN_EXTEND(va) ((((va) & VA_LOW_BITS) ^ VA_SIGN_BIT) - VA_SIGN_BIT)
54
55 struct pfn2pp {
56 pfn_t pfn;
57 page_t *pp;
58 };
59
60 static int do_va2pa(uintptr_t, struct as *, int, physaddr_t *, pfn_t *);
61 static void init_mmu(void);
62
63 int
platform_vtop(uintptr_t addr,struct as * asp,physaddr_t * pap)64 platform_vtop(uintptr_t addr, struct as *asp, physaddr_t *pap)
65 {
66 if (asp == NULL)
67 return (DCMD_ERR);
68
69 init_mmu();
70
71 if (mmu.num_level == 0)
72 return (DCMD_ERR);
73
74 return (do_va2pa(addr, asp, 0, pap, NULL));
75 }
76
77 /*
78 * ::memseg_list dcmd and walker to implement it.
79 */
80 /*ARGSUSED*/
81 int
memseg_list(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)82 memseg_list(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
83 {
84 struct memseg ms;
85
86 if (!(flags & DCMD_ADDRSPEC)) {
87 if (mdb_pwalk_dcmd("memseg", "memseg_list",
88 0, NULL, 0) == -1) {
89 mdb_warn("can't walk memseg");
90 return (DCMD_ERR);
91 }
92 return (DCMD_OK);
93 }
94
95 if (DCMD_HDRSPEC(flags))
96 mdb_printf("%<u>%?s %?s %?s %?s %?s%</u>\n", "ADDR",
97 "PAGES", "EPAGES", "BASE", "END");
98
99 if (mdb_vread(&ms, sizeof (struct memseg), addr) == -1) {
100 mdb_warn("can't read memseg at %#lx", addr);
101 return (DCMD_ERR);
102 }
103
104 mdb_printf("%0?lx %0?lx %0?lx %0?lx %0?lx\n", addr,
105 ms.pages, ms.epages, ms.pages_base, ms.pages_end);
106
107 return (DCMD_OK);
108 }
109
110 /*
111 * walk the memseg structures
112 */
113 int
memseg_walk_init(mdb_walk_state_t * wsp)114 memseg_walk_init(mdb_walk_state_t *wsp)
115 {
116 if (wsp->walk_addr != 0) {
117 mdb_warn("memseg only supports global walks\n");
118 return (WALK_ERR);
119 }
120
121 if (mdb_readvar(&wsp->walk_addr, "memsegs") == -1) {
122 mdb_warn("symbol 'memsegs' not found");
123 return (WALK_ERR);
124 }
125
126 wsp->walk_data = mdb_alloc(sizeof (struct memseg), UM_SLEEP);
127 return (WALK_NEXT);
128
129 }
130
131 int
memseg_walk_step(mdb_walk_state_t * wsp)132 memseg_walk_step(mdb_walk_state_t *wsp)
133 {
134 int status;
135
136 if (wsp->walk_addr == 0) {
137 return (WALK_DONE);
138 }
139
140 if (mdb_vread(wsp->walk_data, sizeof (struct memseg),
141 wsp->walk_addr) == -1) {
142 mdb_warn("failed to read struct memseg at %p", wsp->walk_addr);
143 return (WALK_DONE);
144 }
145
146 status = wsp->walk_callback(wsp->walk_addr, wsp->walk_data,
147 wsp->walk_cbdata);
148
149 wsp->walk_addr = (uintptr_t)(((struct memseg *)wsp->walk_data)->next);
150
151 return (status);
152 }
153
154 void
memseg_walk_fini(mdb_walk_state_t * wsp)155 memseg_walk_fini(mdb_walk_state_t *wsp)
156 {
157 mdb_free(wsp->walk_data, sizeof (struct memseg));
158 }
159
160 /*
161 * Now HAT related dcmds.
162 */
163
164 static struct hat *khat; /* value of kas.a_hat */
165 struct hat_mmu_info mmu;
166 uintptr_t kernelbase;
167
168 /*
169 * stuff for i86xpv images
170 */
171 static int is_xpv;
172 static uintptr_t mfn_list_addr; /* kernel MFN list address */
173 uintptr_t xen_virt_start; /* address of mfn_to_pfn[] table */
174 ulong_t mfn_count; /* number of pfn's in the MFN list */
175 pfn_t *mfn_list; /* local MFN list copy */
176
177 /*
178 * read mmu parameters from kernel
179 */
180 static void
init_mmu(void)181 init_mmu(void)
182 {
183 struct as kas;
184
185 if (mmu.num_level != 0)
186 return;
187
188 if (mdb_readsym(&mmu, sizeof (mmu), "mmu") == -1)
189 mdb_warn("Can't use HAT information before mmu_init()\n");
190 if (mdb_readsym(&kas, sizeof (kas), "kas") == -1)
191 mdb_warn("Couldn't find kas - kernel's struct as\n");
192 if (mdb_readsym(&kernelbase, sizeof (kernelbase), "kernelbase") == -1)
193 mdb_warn("Couldn't find kernelbase\n");
194 khat = kas.a_hat;
195
196 /*
197 * Is this a paravirtualized domain image?
198 */
199 if (mdb_readsym(&mfn_list_addr, sizeof (mfn_list_addr),
200 "mfn_list") == -1 ||
201 mdb_readsym(&xen_virt_start, sizeof (xen_virt_start),
202 "xen_virt_start") == -1 ||
203 mdb_readsym(&mfn_count, sizeof (mfn_count), "mfn_count") == -1) {
204 mfn_list_addr = 0;
205 }
206
207 is_xpv = mfn_list_addr != 0;
208
209 #ifndef _KMDB
210 /*
211 * recreate the local mfn_list
212 */
213 if (is_xpv) {
214 size_t sz = mfn_count * sizeof (pfn_t);
215 mfn_list = mdb_zalloc(sz, UM_SLEEP);
216
217 if (mdb_vread(mfn_list, sz, (uintptr_t)mfn_list_addr) == -1) {
218 mdb_warn("Failed to read MFN list\n");
219 mdb_free(mfn_list, sz);
220 mfn_list = NULL;
221 }
222 }
223 #endif
224 }
225
226 void
free_mmu(void)227 free_mmu(void)
228 {
229 #ifdef __xpv
230 if (mfn_list != NULL)
231 mdb_free(mfn_list, mfn_count * sizeof (mfn_t));
232 #endif
233 }
234
235 #ifdef __xpv
236
237 #ifdef _KMDB
238
239 /*
240 * Convert between MFNs and PFNs. Since we're in kmdb we can go directly
241 * through the machine to phys mapping and the MFN list.
242 */
243
244 pfn_t
mdb_mfn_to_pfn(mfn_t mfn)245 mdb_mfn_to_pfn(mfn_t mfn)
246 {
247 pfn_t pfn;
248 mfn_t tmp;
249 pfn_t *pfn_list;
250
251 if (mfn_list_addr == 0)
252 return (-(pfn_t)1);
253
254 pfn_list = (pfn_t *)xen_virt_start;
255 if (mdb_vread(&pfn, sizeof (pfn), (uintptr_t)(pfn_list + mfn)) == -1)
256 return (-(pfn_t)1);
257
258 if (mdb_vread(&tmp, sizeof (tmp),
259 (uintptr_t)(mfn_list_addr + (pfn * sizeof (mfn_t)))) == -1)
260 return (-(pfn_t)1);
261
262 if (pfn >= mfn_count || tmp != mfn)
263 return (-(pfn_t)1);
264
265 return (pfn);
266 }
267
268 mfn_t
mdb_pfn_to_mfn(pfn_t pfn)269 mdb_pfn_to_mfn(pfn_t pfn)
270 {
271 mfn_t mfn;
272
273 init_mmu();
274
275 if (mfn_list_addr == 0 || pfn >= mfn_count)
276 return (-(mfn_t)1);
277
278 if (mdb_vread(&mfn, sizeof (mfn),
279 (uintptr_t)(mfn_list_addr + (pfn * sizeof (mfn_t)))) == -1)
280 return (-(mfn_t)1);
281
282 return (mfn);
283 }
284
285 #else /* _KMDB */
286
287 /*
288 * Convert between MFNs and PFNs. Since a crash dump doesn't include the
289 * MFN->PFN translation table (it's part of the hypervisor, not our image)
290 * we do the MFN->PFN translation by searching the PFN->MFN (mfn_list)
291 * table, if it's there.
292 */
293
294 pfn_t
mdb_mfn_to_pfn(mfn_t mfn)295 mdb_mfn_to_pfn(mfn_t mfn)
296 {
297 pfn_t pfn;
298
299 init_mmu();
300
301 if (mfn_list == NULL)
302 return (-(pfn_t)1);
303
304 for (pfn = 0; pfn < mfn_count; ++pfn) {
305 if (mfn_list[pfn] != mfn)
306 continue;
307 return (pfn);
308 }
309
310 return (-(pfn_t)1);
311 }
312
313 mfn_t
mdb_pfn_to_mfn(pfn_t pfn)314 mdb_pfn_to_mfn(pfn_t pfn)
315 {
316 init_mmu();
317
318 if (mfn_list == NULL || pfn >= mfn_count)
319 return (-(mfn_t)1);
320
321 return (mfn_list[pfn]);
322 }
323
324 #endif /* _KMDB */
325
326 static paddr_t
mdb_ma_to_pa(uint64_t ma)327 mdb_ma_to_pa(uint64_t ma)
328 {
329 pfn_t pfn = mdb_mfn_to_pfn(mmu_btop(ma));
330 if (pfn == -(pfn_t)1)
331 return (-(paddr_t)1);
332
333 return (mmu_ptob((paddr_t)pfn) | (ma & (MMU_PAGESIZE - 1)));
334 }
335
336 #else /* __xpv */
337
338 #define mdb_ma_to_pa(ma) (ma)
339 #define mdb_mfn_to_pfn(mfn) (mfn)
340 #define mdb_pfn_to_mfn(pfn) (pfn)
341
342 #endif /* __xpv */
343
344 /*
345 * ::mfntopfn dcmd translates hypervisor machine page number
346 * to physical page number
347 */
348 /*ARGSUSED*/
349 int
mfntopfn_dcmd(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)350 mfntopfn_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
351 {
352 pfn_t pfn;
353
354 if ((flags & DCMD_ADDRSPEC) == 0) {
355 mdb_warn("MFN missing\n");
356 return (DCMD_USAGE);
357 }
358
359 if ((pfn = mdb_mfn_to_pfn((pfn_t)addr)) == -(pfn_t)1) {
360 mdb_warn("Invalid mfn %lr\n", (pfn_t)addr);
361 return (DCMD_ERR);
362 }
363
364 mdb_printf("%lr\n", pfn);
365
366 return (DCMD_OK);
367 }
368
369 /*
370 * ::pfntomfn dcmd translates physical page number to
371 * hypervisor machine page number
372 */
373 /*ARGSUSED*/
374 int
pfntomfn_dcmd(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)375 pfntomfn_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
376 {
377 pfn_t mfn;
378
379 if ((flags & DCMD_ADDRSPEC) == 0) {
380 mdb_warn("PFN missing\n");
381 return (DCMD_USAGE);
382 }
383
384 if ((mfn = mdb_pfn_to_mfn((pfn_t)addr)) == -(pfn_t)1) {
385 mdb_warn("Invalid pfn %lr\n", (pfn_t)addr);
386 return (DCMD_ABORT);
387 }
388
389 mdb_printf("%lr\n", mfn);
390
391 if (flags & DCMD_LOOP)
392 mdb_set_dot(addr + 1);
393 return (DCMD_OK);
394 }
395
396 static pfn_t
pte2mfn(x86pte_t pte,uint_t level)397 pte2mfn(x86pte_t pte, uint_t level)
398 {
399 pfn_t mfn;
400 if (level > 0 && (pte & PT_PAGESIZE))
401 mfn = mmu_btop(pte & PT_PADDR_LGPG);
402 else
403 mfn = mmu_btop(pte & PT_PADDR);
404 return (mfn);
405 }
406
407 static int
do_pte_dcmd(int level,uint64_t pte)408 do_pte_dcmd(int level, uint64_t pte)
409 {
410 static char *attr[] = {
411 "wrback", "wrthru", "uncached", "uncached",
412 "wrback", "wrthru", "wrcombine", "uncached"};
413 int pat_index = 0;
414 pfn_t mfn;
415
416 mdb_printf("pte=0x%llr: ", pte);
417
418 mfn = pte2mfn(pte, level);
419 mdb_printf("%s=0x%lr ", is_xpv ? "mfn" : "pfn", mfn);
420
421 if (PTE_GET(pte, mmu.pt_nx))
422 mdb_printf("noexec ");
423
424 if (PTE_GET(pte, PT_NOCONSIST))
425 mdb_printf("noconsist ");
426
427 if (PTE_GET(pte, PT_NOSYNC))
428 mdb_printf("nosync ");
429
430 if (PTE_GET(pte, mmu.pt_global))
431 mdb_printf("global ");
432
433 if (level > 0 && PTE_GET(pte, PT_PAGESIZE))
434 mdb_printf("largepage ");
435
436 if (level > 0 && PTE_GET(pte, PT_MOD))
437 mdb_printf("mod ");
438
439 if (level > 0 && PTE_GET(pte, PT_REF))
440 mdb_printf("ref ");
441
442 if (PTE_GET(pte, PT_USER))
443 mdb_printf("user ");
444
445 if (PTE_GET(pte, PT_WRITABLE))
446 mdb_printf("write ");
447
448 /*
449 * Report non-standard cacheability
450 */
451 pat_index = 0;
452 if (level > 0) {
453 if (PTE_GET(pte, PT_PAGESIZE) && PTE_GET(pte, PT_PAT_LARGE))
454 pat_index += 4;
455 } else {
456 if (PTE_GET(pte, PT_PAT_4K))
457 pat_index += 4;
458 }
459
460 if (PTE_GET(pte, PT_NOCACHE))
461 pat_index += 2;
462
463 if (PTE_GET(pte, PT_WRITETHRU))
464 pat_index += 1;
465
466 if (pat_index != 0)
467 mdb_printf("%s", attr[pat_index]);
468
469 if (PTE_GET(pte, PT_VALID) == 0)
470 mdb_printf(" !VALID ");
471
472 mdb_printf("\n");
473 return (DCMD_OK);
474 }
475
476 /*
477 * Print a PTE in more human friendly way. The PTE is assumed to be in
478 * a level 0 page table, unless -l specifies another level.
479 */
480 /*ARGSUSED*/
481 int
pte_dcmd(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)482 pte_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
483 {
484 uint64_t level = 0;
485
486 init_mmu();
487
488 if (mmu.num_level == 0)
489 return (DCMD_ERR);
490
491 if ((flags & DCMD_ADDRSPEC) == 0)
492 return (DCMD_USAGE);
493
494 if (mdb_getopts(argc, argv,
495 'l', MDB_OPT_UINT64, &level, NULL) != argc)
496 return (DCMD_USAGE);
497
498 if (level > mmu.max_level) {
499 mdb_warn("invalid level %lu\n", level);
500 return (DCMD_ERR);
501 }
502
503 if (addr == 0)
504 return (DCMD_OK);
505
506 return (do_pte_dcmd((int)level, addr));
507 }
508
509 static size_t
va2entry(htable_t * htable,uintptr_t addr)510 va2entry(htable_t *htable, uintptr_t addr)
511 {
512 size_t entry = (addr - htable->ht_vaddr);
513
514 entry >>= mmu.level_shift[htable->ht_level];
515 return (entry & HTABLE_NUM_PTES(htable) - 1);
516 }
517
518 static x86pte_t
get_pte(hat_t * hat,htable_t * htable,uintptr_t addr)519 get_pte(hat_t *hat, htable_t *htable, uintptr_t addr)
520 {
521 x86pte_t buf;
522
523 if (htable->ht_flags & HTABLE_COPIED) {
524 uintptr_t ptr = (uintptr_t)hat->hat_copied_ptes;
525 ptr += va2entry(htable, addr) << mmu.pte_size_shift;
526 return (*(x86pte_t *)ptr);
527 }
528
529 paddr_t paddr = mmu_ptob((paddr_t)htable->ht_pfn);
530 paddr += va2entry(htable, addr) << mmu.pte_size_shift;
531
532 if ((mdb_pread(&buf, mmu.pte_size, paddr)) == mmu.pte_size)
533 return (buf);
534
535 return (0);
536 }
537
538 static int
do_va2pa(uintptr_t addr,struct as * asp,int print_level,physaddr_t * pap,pfn_t * mfnp)539 do_va2pa(uintptr_t addr, struct as *asp, int print_level, physaddr_t *pap,
540 pfn_t *mfnp)
541 {
542 struct as as;
543 struct hat *hatp;
544 struct hat hat;
545 htable_t *ht;
546 htable_t htable;
547 uintptr_t base;
548 int h;
549 int level;
550 int found = 0;
551 x86pte_t pte;
552 physaddr_t paddr;
553
554 if (asp != NULL) {
555 if (mdb_vread(&as, sizeof (as), (uintptr_t)asp) == -1) {
556 mdb_warn("Couldn't read struct as\n");
557 return (DCMD_ERR);
558 }
559 hatp = as.a_hat;
560 } else {
561 hatp = khat;
562 }
563
564 /*
565 * read the hat and its hash table
566 */
567 if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
568 mdb_warn("Couldn't read struct hat\n");
569 return (DCMD_ERR);
570 }
571
572 /*
573 * read the htable hashtable
574 */
575 for (level = 0; level <= mmu.max_level; ++level) {
576 if (level == TOP_LEVEL(&hat))
577 base = 0;
578 else
579 base = addr & mmu.level_mask[level + 1];
580
581 for (h = 0; h < hat.hat_num_hash; ++h) {
582 if (mdb_vread(&ht, sizeof (htable_t *),
583 (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
584 mdb_warn("Couldn't read htable\n");
585 return (DCMD_ERR);
586 }
587 for (; ht != NULL; ht = htable.ht_next) {
588 if (mdb_vread(&htable, sizeof (htable_t),
589 (uintptr_t)ht) == -1) {
590 mdb_warn("Couldn't read htable\n");
591 return (DCMD_ERR);
592 }
593
594 if (htable.ht_vaddr != base ||
595 htable.ht_level != level)
596 continue;
597
598 pte = get_pte(&hat, &htable, addr);
599
600 if (print_level) {
601 mdb_printf("\tlevel=%d htable=0x%p "
602 "pte=0x%llr\n", level, ht, pte);
603 }
604
605 if (!PTE_ISVALID(pte)) {
606 mdb_printf("Address %p is unmapped.\n",
607 addr);
608 return (DCMD_ERR);
609 }
610
611 if (found)
612 continue;
613
614 if (PTE_IS_LGPG(pte, level))
615 paddr = mdb_ma_to_pa(pte &
616 PT_PADDR_LGPG);
617 else
618 paddr = mdb_ma_to_pa(pte & PT_PADDR);
619 paddr += addr & mmu.level_offset[level];
620 if (pap != NULL)
621 *pap = paddr;
622 if (mfnp != NULL)
623 *mfnp = pte2mfn(pte, level);
624 found = 1;
625 }
626 }
627 }
628
629 if (!found)
630 return (DCMD_ERR);
631 return (DCMD_OK);
632 }
633
634 int
va2pfn_dcmd(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)635 va2pfn_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
636 {
637 uintptr_t addrspace;
638 char *addrspace_str = NULL;
639 int piped = flags & DCMD_PIPE_OUT;
640 pfn_t pfn;
641 pfn_t mfn;
642 int rc;
643
644 init_mmu();
645
646 if (mmu.num_level == 0)
647 return (DCMD_ERR);
648
649 if (mdb_getopts(argc, argv,
650 'a', MDB_OPT_STR, &addrspace_str, NULL) != argc)
651 return (DCMD_USAGE);
652
653 if ((flags & DCMD_ADDRSPEC) == 0)
654 return (DCMD_USAGE);
655
656 /*
657 * parse the address space
658 */
659 if (addrspace_str != NULL)
660 addrspace = mdb_strtoull(addrspace_str);
661 else
662 addrspace = 0;
663
664 rc = do_va2pa(addr, (struct as *)addrspace, !piped, NULL, &mfn);
665
666 if (rc != DCMD_OK)
667 return (rc);
668
669 if ((pfn = mdb_mfn_to_pfn(mfn)) == -(pfn_t)1) {
670 mdb_warn("Invalid mfn %lr\n", mfn);
671 return (DCMD_ERR);
672 }
673
674 if (piped) {
675 mdb_printf("0x%lr\n", pfn);
676 return (DCMD_OK);
677 }
678
679 mdb_printf("Virtual address 0x%p maps pfn 0x%lr", addr, pfn);
680
681 if (is_xpv)
682 mdb_printf(" (mfn 0x%lr)", mfn);
683
684 mdb_printf("\n");
685
686 return (DCMD_OK);
687 }
688
689 /*
690 * Report all hat's that either use PFN as a page table or that map the page.
691 */
692 static int
do_report_maps(pfn_t pfn)693 do_report_maps(pfn_t pfn)
694 {
695 struct hat *hatp;
696 struct hat hat;
697 htable_t *ht;
698 htable_t htable;
699 uintptr_t base;
700 int h;
701 int level;
702 int entry;
703 x86pte_t pte;
704 physaddr_t paddr;
705 size_t len;
706
707 /*
708 * The hats are kept in a list with khat at the head.
709 */
710 for (hatp = khat; hatp != NULL; hatp = hat.hat_next) {
711 /*
712 * read the hat and its hash table
713 */
714 if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
715 mdb_warn("Couldn't read struct hat\n");
716 return (DCMD_ERR);
717 }
718
719 /*
720 * read the htable hashtable
721 */
722 paddr = 0;
723 for (h = 0; h < hat.hat_num_hash; ++h) {
724 if (mdb_vread(&ht, sizeof (htable_t *),
725 (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
726 mdb_warn("Couldn't read htable\n");
727 return (DCMD_ERR);
728 }
729 for (; ht != NULL; ht = htable.ht_next) {
730 if (mdb_vread(&htable, sizeof (htable_t),
731 (uintptr_t)ht) == -1) {
732 mdb_warn("Couldn't read htable\n");
733 return (DCMD_ERR);
734 }
735
736 /*
737 * only report kernel addresses once
738 */
739 if (hatp != khat &&
740 htable.ht_vaddr >= kernelbase)
741 continue;
742
743 /*
744 * Is the PFN a pagetable itself?
745 */
746 if (htable.ht_pfn == pfn) {
747 mdb_printf("Pagetable for "
748 "hat=%p htable=%p\n", hatp, ht);
749 continue;
750 }
751
752 /*
753 * otherwise, examine page mappings
754 */
755 level = htable.ht_level;
756 if (level > mmu.max_page_level)
757 continue;
758 paddr = mmu_ptob((physaddr_t)htable.ht_pfn);
759 for (entry = 0;
760 entry < HTABLE_NUM_PTES(&htable);
761 ++entry) {
762
763 base = htable.ht_vaddr + entry *
764 mmu.level_size[level];
765
766 /*
767 * only report kernel addresses once
768 */
769 if (hatp != khat &&
770 base >= kernelbase)
771 continue;
772
773 len = mdb_pread(&pte, mmu.pte_size,
774 paddr + entry * mmu.pte_size);
775 if (len != mmu.pte_size)
776 return (DCMD_ERR);
777
778 if ((pte & PT_VALID) == 0)
779 continue;
780 if (level == 0 || !(pte & PT_PAGESIZE))
781 pte &= PT_PADDR;
782 else
783 pte &= PT_PADDR_LGPG;
784 if (mmu_btop(mdb_ma_to_pa(pte)) != pfn)
785 continue;
786 mdb_printf("hat=%p maps addr=%p\n",
787 hatp, (caddr_t)base);
788 }
789 }
790 }
791 }
792
793 return (DCMD_OK);
794 }
795
796 /*
797 * given a PFN as its address argument, prints out the uses of it
798 */
799 /*ARGSUSED*/
800 int
report_maps_dcmd(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)801 report_maps_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
802 {
803 pfn_t pfn;
804 uint_t mflag = 0;
805
806 init_mmu();
807
808 if (mmu.num_level == 0)
809 return (DCMD_ERR);
810
811 if ((flags & DCMD_ADDRSPEC) == 0)
812 return (DCMD_USAGE);
813
814 if (mdb_getopts(argc, argv,
815 'm', MDB_OPT_SETBITS, TRUE, &mflag, NULL) != argc)
816 return (DCMD_USAGE);
817
818 pfn = (pfn_t)addr;
819 if (mflag)
820 pfn = mdb_mfn_to_pfn(pfn);
821
822 return (do_report_maps(pfn));
823 }
824
825 static int
do_ptable_dcmd(pfn_t pfn,uint64_t level)826 do_ptable_dcmd(pfn_t pfn, uint64_t level)
827 {
828 struct hat *hatp;
829 struct hat hat;
830 htable_t *ht;
831 htable_t htable;
832 uintptr_t base;
833 int h;
834 int entry;
835 uintptr_t pagesize;
836 x86pte_t pte;
837 physaddr_t paddr;
838 size_t len;
839
840 /*
841 * The hats are kept in a list with khat at the head.
842 */
843 for (hatp = khat; hatp != NULL; hatp = hat.hat_next) {
844 /*
845 * read the hat and its hash table
846 */
847 if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
848 mdb_warn("Couldn't read struct hat\n");
849 return (DCMD_ERR);
850 }
851
852 /*
853 * read the htable hashtable
854 */
855 paddr = 0;
856 for (h = 0; h < hat.hat_num_hash; ++h) {
857 if (mdb_vread(&ht, sizeof (htable_t *),
858 (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
859 mdb_warn("Couldn't read htable\n");
860 return (DCMD_ERR);
861 }
862 for (; ht != NULL; ht = htable.ht_next) {
863 if (mdb_vread(&htable, sizeof (htable_t),
864 (uintptr_t)ht) == -1) {
865 mdb_warn("Couldn't read htable\n");
866 return (DCMD_ERR);
867 }
868
869 /*
870 * Is this the PFN for this htable
871 */
872 if (htable.ht_pfn == pfn)
873 goto found_it;
874 }
875 }
876 }
877
878 found_it:
879 if (htable.ht_pfn == pfn) {
880 mdb_printf("htable=%p\n", ht);
881 if (level == (uint64_t)-1) {
882 level = htable.ht_level;
883 } else if (htable.ht_level != level) {
884 mdb_warn("htable has level %d but forcing level %lu\n",
885 htable.ht_level, level);
886 }
887 base = htable.ht_vaddr;
888 pagesize = mmu.level_size[level];
889 } else {
890 if (level == (uint64_t)-1)
891 level = 0;
892 mdb_warn("couldn't find matching htable, using level=%lu, "
893 "base address=0x0\n", level);
894 base = 0;
895 pagesize = mmu.level_size[level];
896 }
897
898 paddr = mmu_ptob((physaddr_t)pfn);
899 for (entry = 0; entry < mmu.ptes_per_table; ++entry) {
900 len = mdb_pread(&pte, mmu.pte_size,
901 paddr + entry * mmu.pte_size);
902 if (len != mmu.pte_size)
903 return (DCMD_ERR);
904
905 if (pte == 0)
906 continue;
907
908 mdb_printf("[%3d] va=0x%p ", entry,
909 VA_SIGN_EXTEND(base + entry * pagesize));
910 do_pte_dcmd(level, pte);
911 }
912
913 return (DCMD_OK);
914 }
915
916 /*
917 * Dump the page table at the given PFN
918 */
919 /*ARGSUSED*/
920 int
ptable_dcmd(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)921 ptable_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
922 {
923 pfn_t pfn;
924 uint_t mflag = 0;
925 uint64_t level = (uint64_t)-1;
926
927 init_mmu();
928
929 if (mmu.num_level == 0)
930 return (DCMD_ERR);
931
932 if ((flags & DCMD_ADDRSPEC) == 0)
933 return (DCMD_USAGE);
934
935 if (mdb_getopts(argc, argv,
936 'm', MDB_OPT_SETBITS, TRUE, &mflag,
937 'l', MDB_OPT_UINT64, &level, NULL) != argc)
938 return (DCMD_USAGE);
939
940 if (level != (uint64_t)-1 && level > mmu.max_level) {
941 mdb_warn("invalid level %lu\n", level);
942 return (DCMD_ERR);
943 }
944
945 pfn = (pfn_t)addr;
946 if (mflag)
947 pfn = mdb_mfn_to_pfn(pfn);
948
949 return (do_ptable_dcmd(pfn, level));
950 }
951
952 static int
do_htables_dcmd(hat_t * hatp)953 do_htables_dcmd(hat_t *hatp)
954 {
955 struct hat hat;
956 htable_t *ht;
957 htable_t htable;
958 int h;
959
960 /*
961 * read the hat and its hash table
962 */
963 if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
964 mdb_warn("Couldn't read struct hat\n");
965 return (DCMD_ERR);
966 }
967
968 /*
969 * read the htable hashtable
970 */
971 for (h = 0; h < hat.hat_num_hash; ++h) {
972 if (mdb_vread(&ht, sizeof (htable_t *),
973 (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
974 mdb_warn("Couldn't read htable ptr\\n");
975 return (DCMD_ERR);
976 }
977 for (; ht != NULL; ht = htable.ht_next) {
978 mdb_printf("%p\n", ht);
979 if (mdb_vread(&htable, sizeof (htable_t),
980 (uintptr_t)ht) == -1) {
981 mdb_warn("Couldn't read htable\n");
982 return (DCMD_ERR);
983 }
984 }
985 }
986 return (DCMD_OK);
987 }
988
989 /*
990 * Dump the htables for the given hat
991 */
992 /*ARGSUSED*/
993 int
htables_dcmd(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)994 htables_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
995 {
996 hat_t *hat;
997
998 init_mmu();
999
1000 if (mmu.num_level == 0)
1001 return (DCMD_ERR);
1002
1003 if ((flags & DCMD_ADDRSPEC) == 0)
1004 return (DCMD_USAGE);
1005
1006 hat = (hat_t *)addr;
1007
1008 return (do_htables_dcmd(hat));
1009 }
1010
1011 static uintptr_t
entry2va(size_t * entries)1012 entry2va(size_t *entries)
1013 {
1014 uintptr_t va = 0;
1015
1016 for (level_t l = mmu.max_level; l >= 0; l--)
1017 va += entries[l] << mmu.level_shift[l];
1018
1019 return (VA_SIGN_EXTEND(va));
1020 }
1021
1022 static void
ptmap_report(size_t * entries,uintptr_t start,boolean_t user,boolean_t writable,boolean_t wflag)1023 ptmap_report(size_t *entries, uintptr_t start,
1024 boolean_t user, boolean_t writable, boolean_t wflag)
1025 {
1026 uint64_t curva = entry2va(entries);
1027
1028 mdb_printf("mapped %s,%s range of %lu bytes: %a-%a\n",
1029 user ? "user" : "kernel", writable ? "writable" : "read-only",
1030 curva - start, start, curva - 1);
1031 if (wflag && start >= kernelbase)
1032 (void) mdb_call_dcmd("whatis", start, DCMD_ADDRSPEC, 0, NULL);
1033 }
1034
1035 int
ptmap_dcmd(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)1036 ptmap_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
1037 {
1038 physaddr_t paddrs[MAX_NUM_LEVEL] = { 0, };
1039 size_t entry[MAX_NUM_LEVEL] = { 0, };
1040 uintptr_t start = (uintptr_t)-1;
1041 boolean_t writable = B_FALSE;
1042 boolean_t user = B_FALSE;
1043 boolean_t wflag = B_FALSE;
1044 level_t curlevel;
1045
1046 if ((flags & DCMD_ADDRSPEC) == 0)
1047 return (DCMD_USAGE);
1048
1049 if (mdb_getopts(argc, argv,
1050 'w', MDB_OPT_SETBITS, TRUE, &wflag, NULL) != argc)
1051 return (DCMD_USAGE);
1052
1053 init_mmu();
1054
1055 if (mmu.num_level == 0)
1056 return (DCMD_ERR);
1057
1058 curlevel = mmu.max_level;
1059
1060 paddrs[curlevel] = addr & MMU_PAGEMASK;
1061
1062 for (;;) {
1063 physaddr_t pte_addr;
1064 x86pte_t pte;
1065
1066 pte_addr = paddrs[curlevel] +
1067 (entry[curlevel] << mmu.pte_size_shift);
1068
1069 if (mdb_pread(&pte, sizeof (pte), pte_addr) != sizeof (pte)) {
1070 mdb_warn("couldn't read pte at %p", pte_addr);
1071 return (DCMD_ERR);
1072 }
1073
1074 if (PTE_GET(pte, PT_VALID) == 0) {
1075 if (start != (uintptr_t)-1) {
1076 ptmap_report(entry, start,
1077 user, writable, wflag);
1078 start = (uintptr_t)-1;
1079 }
1080 } else if (curlevel == 0 || PTE_GET(pte, PT_PAGESIZE)) {
1081 if (start == (uintptr_t)-1) {
1082 start = entry2va(entry);
1083 user = PTE_GET(pte, PT_USER);
1084 writable = PTE_GET(pte, PT_WRITABLE);
1085 } else if (user != PTE_GET(pte, PT_USER) ||
1086 writable != PTE_GET(pte, PT_WRITABLE)) {
1087 ptmap_report(entry, start,
1088 user, writable, wflag);
1089 start = entry2va(entry);
1090 user = PTE_GET(pte, PT_USER);
1091 writable = PTE_GET(pte, PT_WRITABLE);
1092 }
1093 } else {
1094 /* Descend a level. */
1095 physaddr_t pa = mmu_ptob(pte2mfn(pte, curlevel));
1096 paddrs[--curlevel] = pa;
1097 entry[curlevel] = 0;
1098 continue;
1099 }
1100
1101 while (++entry[curlevel] == mmu.ptes_per_table) {
1102 /* Ascend back up. */
1103 entry[curlevel] = 0;
1104 if (curlevel == mmu.max_level) {
1105 if (start != (uintptr_t)-1) {
1106 ptmap_report(entry, start,
1107 user, writable, wflag);
1108 }
1109 goto out;
1110 }
1111
1112 curlevel++;
1113 }
1114 }
1115
1116 out:
1117 return (DCMD_OK);
1118 }
1119