xref: /titanic_51/usr/src/cmd/mdb/i86pc/modules/unix/i86mmu.c (revision 45916cd2fec6e79bca5dee0421bd39e3c2910d1e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * This part of the file contains the mdb support for dcmds:
31  *	::memseg_list
32  *	::page_num2pp
33  * and walkers for:
34  *	memseg - a memseg list walker for ::memseg_list
35  *
36  */
37 
38 #include <sys/types.h>
39 #include <sys/machparam.h>
40 #include <sys/controlregs.h>
41 #include <vm/as.h>
42 
43 #include <mdb/mdb_modapi.h>
44 #include <mdb/mdb_target.h>
45 
46 #include <vm/page.h>
47 #include <vm/hat_i86.h>
48 
49 struct pfn2pp {
50 	pfn_t pfn;
51 	page_t *pp;
52 };
53 
54 static int do_va2pfn(uintptr_t, struct as *, int, physaddr_t *);
55 static void get_mmu(void);
56 
57 int
58 platform_vtop(uintptr_t addr, struct as *asp, physaddr_t *pap)
59 {
60 	if (asp == NULL)
61 		return (DCMD_ERR);
62 
63 	/*
64 	 * The kernel has to at least have made it thru mmu_init()
65 	 */
66 	get_mmu();
67 	if (mmu.num_level == 0)
68 		return (DCMD_ERR);
69 
70 	return (do_va2pfn(addr, asp, 0, pap));
71 }
72 
73 
74 /*ARGSUSED*/
75 int
76 page_num2pp_cb(uintptr_t addr, void *ignored, uintptr_t *data)
77 {
78 	struct memseg ms, *msp = &ms;
79 	struct pfn2pp *p = (struct pfn2pp *)data;
80 
81 	if (mdb_vread(msp, sizeof (struct memseg), addr) == -1) {
82 		mdb_warn("can't read memseg at %#lx", addr);
83 		return (DCMD_ERR);
84 	}
85 
86 	if (p->pfn >= msp->pages_base && p->pfn < msp->pages_end) {
87 		p->pp = msp->pages + (p->pfn - msp->pages_base);
88 		return (WALK_DONE);
89 	}
90 
91 	return (WALK_NEXT);
92 }
93 
94 /*
95  * ::page_num2pp dcmd
96  */
97 /*ARGSUSED*/
98 int
99 page_num2pp(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
100 {
101 	struct pfn2pp pfn2pp;
102 	page_t page;
103 
104 	if ((flags & DCMD_ADDRSPEC) == 0) {
105 		mdb_warn("page frame number missing\n");
106 			return (DCMD_USAGE);
107 	}
108 
109 	pfn2pp.pfn = (pfn_t)addr;
110 	pfn2pp.pp = NULL;
111 
112 	if (mdb_walk("memseg", (mdb_walk_cb_t)page_num2pp_cb,
113 	    (void *)&pfn2pp) == -1) {
114 		mdb_warn("can't walk memseg");
115 		return (DCMD_ERR);
116 	}
117 
118 	if (pfn2pp.pp == NULL)
119 		return (DCMD_ERR);
120 
121 	mdb_printf("%x has page at %p\n", pfn2pp.pfn, pfn2pp.pp);
122 
123 	if (mdb_vread(&page, sizeof (page_t),
124 	    (uintptr_t)pfn2pp.pp) == -1) {
125 		mdb_warn("can't read page at %p", &page);
126 		return (DCMD_ERR);
127 	}
128 
129 	if (page.p_pagenum != pfn2pp.pfn) {
130 		mdb_warn("WARNING! Found page structure contains "
131 			"different pagenumber %x\n", page.p_pagenum);
132 	}
133 
134 	return (DCMD_OK);
135 }
136 
137 
138 
139 
140 
141 /*
142  * ::memseg_list dcmd and walker to implement it.
143  */
144 /*ARGSUSED*/
145 int
146 memseg_list(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
147 {
148 	struct memseg ms;
149 
150 	if (!(flags & DCMD_ADDRSPEC)) {
151 		if (mdb_pwalk_dcmd("memseg", "memseg_list",
152 		    0, NULL, 0) == -1) {
153 			mdb_warn("can't walk memseg");
154 			return (DCMD_ERR);
155 		}
156 		return (DCMD_OK);
157 	}
158 
159 	if (DCMD_HDRSPEC(flags))
160 		mdb_printf("%<u>%?s %?s %?s %?s %?s%</u>\n", "ADDR",
161 			"PAGES", "EPAGES", "BASE", "END");
162 
163 	if (mdb_vread(&ms, sizeof (struct memseg), addr) == -1) {
164 		mdb_warn("can't read memseg at %#lx", addr);
165 		return (DCMD_ERR);
166 	}
167 
168 	mdb_printf("%0?lx %0?lx %0?lx %0?lx %0?lx\n", addr,
169 		ms.pages, ms.epages, ms.pages_base, ms.pages_end);
170 
171 	return (DCMD_OK);
172 }
173 
174 /*
175  * walk the memseg structures
176  */
177 int
178 memseg_walk_init(mdb_walk_state_t *wsp)
179 {
180 	if (wsp->walk_addr != NULL) {
181 		mdb_warn("memseg only supports global walks\n");
182 		return (WALK_ERR);
183 	}
184 
185 	if (mdb_readvar(&wsp->walk_addr, "memsegs") == -1) {
186 		mdb_warn("symbol 'memsegs' not found");
187 		return (WALK_ERR);
188 	}
189 
190 	wsp->walk_data = mdb_alloc(sizeof (struct memseg), UM_SLEEP);
191 	return (WALK_NEXT);
192 
193 }
194 
195 int
196 memseg_walk_step(mdb_walk_state_t *wsp)
197 {
198 	int status;
199 
200 	if (wsp->walk_addr == 0) {
201 		return (WALK_DONE);
202 	}
203 
204 	if (mdb_vread(wsp->walk_data, sizeof (struct memseg),
205 	    wsp->walk_addr) == -1) {
206 		mdb_warn("failed to read struct memseg at %p", wsp->walk_addr);
207 		return (WALK_DONE);
208 	}
209 
210 	status = wsp->walk_callback(wsp->walk_addr, wsp->walk_data,
211 	    wsp->walk_cbdata);
212 
213 	wsp->walk_addr = (uintptr_t)(((struct memseg *)wsp->walk_data)->next);
214 
215 	return (status);
216 }
217 
218 void
219 memseg_walk_fini(mdb_walk_state_t *wsp)
220 {
221 	mdb_free(wsp->walk_data, sizeof (struct memseg));
222 }
223 
224 /*
225  * HAT related dcmds:
226  *
227  * ::pte [-p XXXXXXXX] [-l 0/1/2/3]
228  *
229  * dcmd that interprets the -p argument as a page table entry and
230  * prints it in more human readable form. The PTE is assumed to be in
231  * a level 0 page table, unless -l specifies another level.
232  *
233  * ::vatopfn [-v] [-a as]
234  *
235  * Given a virtual address, returns the PFN, if any, mapped at the address.
236  * -v shows the intermediate htable/page table entries used to resolve the
237  * mapping. By default the virtual address is assumed to be in the kernel's
238  * address space.  -a is used to specify a different address space.
239  */
240 
241 struct hat *khat;		/* value of kas.a_hat */
242 struct hat_mmu_info mmu;
243 uintptr_t kernelbase;
244 
245 /*
246  * read mmu parameters from kernel
247  */
248 static void
249 get_mmu(void)
250 {
251 	struct as kas;
252 
253 	if (mmu.num_level != 0)
254 		return;
255 
256 	if (mdb_readsym(&mmu, sizeof (mmu), "mmu") == -1)
257 		mdb_warn("Can't use HAT information before mmu_init()\n");
258 	if (mdb_readsym(&kas, sizeof (kas), "kas") == -1)
259 		mdb_warn("Couldn't find kas - kernel's struct as\n");
260 	if (mdb_readsym(&kernelbase, sizeof (kernelbase), "kernelbase") == -1)
261 		mdb_warn("Couldn't find kernelbase\n");
262 	khat = kas.a_hat;
263 }
264 
265 /*
266  * Print a PTE in more human friendly way. The PTE is assumed to be in
267  * a level 0 page table, unless -l specifies another level.
268  *
269  * The PTE value can be specified as the -p option, since on a 32 bit kernel
270  * with PAE running it's larger than a uintptr_t.
271  */
272 static int
273 do_pte_dcmd(int level, uint64_t pte)
274 {
275 	static char *attr[] = {
276 	    "wrback", "wrthru", "uncached", "uncached",
277 	    "wrback", "wrthru", "wrcombine", "uncached"};
278 	int pat_index = 0;
279 
280 	mdb_printf("PTE=%llx: ", pte);
281 	if (PTE_GET(pte, mmu.pt_nx))
282 		mdb_printf("noexec ");
283 
284 	mdb_printf("page=0x%llx ", PTE2PFN(pte, level));
285 
286 	if (PTE_GET(pte, PT_NOCONSIST))
287 		mdb_printf("noconsist ");
288 
289 	if (PTE_GET(pte, PT_NOSYNC))
290 		mdb_printf("nosync ");
291 
292 	if (PTE_GET(pte, mmu.pt_global))
293 		mdb_printf("global ");
294 
295 	if (level > 0 && PTE_GET(pte, PT_PAGESIZE))
296 		mdb_printf("largepage ");
297 
298 	if (level > 0 && PTE_GET(pte, PT_MOD))
299 		mdb_printf("mod ");
300 
301 	if (level > 0 && PTE_GET(pte, PT_REF))
302 		mdb_printf("ref ");
303 
304 	if (PTE_GET(pte, PT_USER))
305 		mdb_printf("user ");
306 
307 	if (PTE_GET(pte, PT_WRITABLE))
308 		mdb_printf("write ");
309 
310 	/*
311 	 * Report non-standard cacheability
312 	 */
313 	pat_index = 0;
314 	if (level > 0) {
315 		if (PTE_GET(pte, PT_PAGESIZE) && PTE_GET(pte, PT_PAT_LARGE))
316 			pat_index += 4;
317 	} else {
318 		if (PTE_GET(pte, PT_PAT_4K))
319 			pat_index += 4;
320 	}
321 
322 	if (PTE_GET(pte, PT_NOCACHE))
323 		pat_index += 2;
324 
325 	if (PTE_GET(pte, PT_WRITETHRU))
326 		pat_index += 1;
327 
328 	if (pat_index != 0)
329 		mdb_printf("%s", attr[pat_index]);
330 
331 	if (PTE_GET(pte, PT_VALID) == 0)
332 		mdb_printf(" !VALID ");
333 
334 	mdb_printf("\n");
335 	return (DCMD_OK);
336 }
337 
338 /*
339  * Print a PTE in more human friendly way. The PTE is assumed to be in
340  * a level 0 page table, unless -l specifies another level.
341  *
342  * The PTE value can be specified as the -p option, since on a 32 bit kernel
343  * with PAE running it's larger than a uintptr_t.
344  */
345 /*ARGSUSED*/
346 int
347 pte_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
348 {
349 	int level = 0;
350 	uint64_t pte = 0;
351 	char *level_str = NULL;
352 	char *pte_str = NULL;
353 
354 	/*
355 	 * The kernel has to at least have made it thru mmu_init()
356 	 */
357 	get_mmu();
358 	if (mmu.num_level == 0)
359 		return (DCMD_ERR);
360 
361 	if (mdb_getopts(argc, argv,
362 	    'p', MDB_OPT_STR, &pte_str,
363 	    'l', MDB_OPT_STR, &level_str) != argc)
364 		return (DCMD_USAGE);
365 
366 	/*
367 	 * parse the PTE to decode, if it's 0, we don't do anything
368 	 */
369 	if (pte_str != NULL) {
370 		pte = mdb_strtoull(pte_str);
371 	} else {
372 		if ((flags & DCMD_ADDRSPEC) == 0)
373 			return (DCMD_USAGE);
374 		pte = addr;
375 	}
376 	if (pte == 0)
377 		return (DCMD_OK);
378 
379 	/*
380 	 * parse the level if supplied
381 	 */
382 	if (level_str != NULL) {
383 		level = mdb_strtoull(level_str);
384 		if (level < 0 || level > mmu.max_level)
385 			return (DCMD_ERR);
386 	}
387 
388 	return (do_pte_dcmd(level, pte));
389 }
390 
391 static int
392 do_va2pfn(uintptr_t addr, struct as *asp, int print_level, physaddr_t *pap)
393 {
394 	struct as as;
395 	struct hat *hatp;
396 	struct hat hat;
397 	htable_t *ht;
398 	htable_t htable;
399 	uintptr_t base;
400 	int h;
401 	int level;
402 	int found = 0;
403 	x86pte_t pte;
404 	x86pte_t buf;
405 	x86pte32_t *pte32 = (x86pte32_t *)&buf;
406 	physaddr_t paddr;
407 	size_t len;
408 
409 	if (asp != NULL) {
410 		if (mdb_vread(&as, sizeof (as), (uintptr_t)asp) == -1) {
411 			mdb_warn("Couldn't read struct as\n");
412 			return (DCMD_ERR);
413 		}
414 		hatp = as.a_hat;
415 	} else {
416 		hatp = khat;
417 	}
418 
419 	/*
420 	 * read the hat and its hash table
421 	 */
422 	if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
423 		mdb_warn("Couldn't read struct hat\n");
424 		return (DCMD_ERR);
425 	}
426 
427 	/*
428 	 * read the htable hashtable
429 	 */
430 	*pap = 0;
431 	for (level = 0; level <= mmu.max_level; ++level) {
432 		if (level == mmu.max_level)
433 			base = 0;
434 		else
435 			base = addr & mmu.level_mask[level + 1];
436 
437 		for (h = 0; h < hat.hat_num_hash; ++h) {
438 			if (mdb_vread(&ht, sizeof (htable_t *),
439 			    (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
440 				mdb_warn("Couldn't read htable\n");
441 				return (DCMD_ERR);
442 			}
443 			for (; ht != NULL; ht = htable.ht_next) {
444 				if (mdb_vread(&htable, sizeof (htable_t),
445 				    (uintptr_t)ht) == -1) {
446 					mdb_warn("Couldn't read htable\n");
447 					return (DCMD_ERR);
448 				}
449 				if (htable.ht_vaddr != base ||
450 				    htable.ht_level != level)
451 					continue;
452 
453 				/*
454 				 * found - read the page table entry
455 				 */
456 				paddr = htable.ht_pfn << MMU_PAGESHIFT;
457 				paddr += ((addr - base) >>
458 				    mmu.level_shift[level]) <<
459 				    mmu.pte_size_shift;
460 				len = mdb_pread(&buf, mmu.pte_size, paddr);
461 				if (len != mmu.pte_size)
462 					return (DCMD_ERR);
463 				if (mmu.pte_size == sizeof (x86pte_t))
464 					pte = buf;
465 				else
466 					pte = *pte32;
467 
468 				if (!found) {
469 					if (PTE_IS_LGPG(pte, level))
470 						paddr = pte & PT_PADDR_LGPG;
471 					else
472 						paddr = pte & PT_PADDR;
473 					paddr += addr & mmu.level_offset[level];
474 					*pap = paddr;
475 					found = 1;
476 				}
477 				if (print_level == 0)
478 					continue;
479 				mdb_printf("\tlevel=%d htable=%p pte=%llx\n",
480 				    level, ht, pte);
481 			}
482 		}
483 	}
484 
485 done:
486 	if (!found)
487 		return (DCMD_ERR);
488 	return (DCMD_OK);
489 }
490 
491 int
492 va2pfn_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
493 {
494 	uintptr_t addrspace;
495 	char *addrspace_str = NULL;
496 	uint64_t physaddr;
497 	int rc;
498 
499 	/*
500 	 * The kernel has to at least have made it thru mmu_init()
501 	 */
502 	get_mmu();
503 	if (mmu.num_level == 0)
504 		return (DCMD_ERR);
505 
506 	if (mdb_getopts(argc, argv,
507 	    'a', MDB_OPT_STR, &addrspace_str) != argc)
508 		return (DCMD_USAGE);
509 
510 	if ((flags & DCMD_ADDRSPEC) == 0)
511 		return (DCMD_USAGE);
512 
513 	/*
514 	 * parse the address space
515 	 */
516 	if (addrspace_str != NULL)
517 		addrspace = mdb_strtoull(addrspace_str);
518 	else
519 		addrspace = 0;
520 
521 	rc = do_va2pfn(addr, (struct as *)addrspace, 1, &physaddr);
522 
523 	if (rc == DCMD_OK)
524 		mdb_printf("Virtual %p maps Physical %llx\n", addr, physaddr);
525 
526 	return (rc);
527 }
528 
529 /*
530  * Report all hat's that either use PFN as a page table or that map the page.
531  */
532 static int
533 do_report_maps(pfn_t pfn)
534 {
535 	struct hat *hatp, *end;
536 	struct hat hat;
537 	htable_t *ht;
538 	htable_t htable;
539 	uintptr_t base;
540 	int h;
541 	int level;
542 	int entry;
543 	x86pte_t pte;
544 	x86pte_t buf;
545 	x86pte32_t *pte32 = (x86pte32_t *)&buf;
546 	physaddr_t paddr;
547 	size_t len;
548 	int count;
549 
550 	if (mdb_vread(&hat, sizeof (hat), (uintptr_t)khat) == -1) {
551 		mdb_warn("Couldn't read khat\n");
552 		return (DCMD_ERR);
553 	}
554 
555 	end = hat.hat_next;
556 
557 	/*
558 	 * The hats are kept in a circular list with khat at the head, but
559 	 * not part of the list proper. Accordingly, we know when we pass
560 	 * knat.hat_next a second time that we've iterated through every
561 	 * hat structure.
562 	 */
563 	for (hatp = khat, count = 0; hatp != end || count++ == 0;
564 	    hatp = hat.hat_next) {
565 		/*
566 		 * read the hat and its hash table
567 		 */
568 		if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
569 			mdb_warn("Couldn't read struct hat\n");
570 			return (DCMD_ERR);
571 		}
572 
573 		/*
574 		 * read the htable hashtable
575 		 */
576 		paddr = 0;
577 		for (h = 0; h < hat.hat_num_hash; ++h) {
578 			if (mdb_vread(&ht, sizeof (htable_t *),
579 			    (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
580 				mdb_warn("Couldn't read htable\n");
581 				return (DCMD_ERR);
582 			}
583 			for (; ht != NULL; ht = htable.ht_next) {
584 				if (mdb_vread(&htable, sizeof (htable_t),
585 				    (uintptr_t)ht) == -1) {
586 					mdb_warn("Couldn't read htable\n");
587 					return (DCMD_ERR);
588 				}
589 
590 				/*
591 				 * only report kernel addresses once
592 				 */
593 				if (hatp != khat &&
594 				    htable.ht_vaddr >= kernelbase)
595 					continue;
596 
597 				/*
598 				 * Is the PFN a pagetable itself?
599 				 */
600 				if (htable.ht_pfn == pfn) {
601 					mdb_printf("Pagetable for "
602 					    "hat=%p htable=%p\n", hatp, ht);
603 					continue;
604 				}
605 
606 				/*
607 				 * otherwise, examine page mappings
608 				 */
609 				level = htable.ht_level;
610 				if (level > mmu.max_page_level)
611 					continue;
612 				paddr = htable.ht_pfn << MMU_PAGESHIFT;
613 				for (entry = 0; entry < htable.ht_num_ptes;
614 				    ++entry) {
615 
616 					base = htable.ht_vaddr + entry *
617 					    mmu.level_size[level];
618 
619 					/*
620 					 * only report kernel addresses once
621 					 */
622 					if (hatp != khat &&
623 					    base >= kernelbase)
624 						continue;
625 
626 					len = mdb_pread(&buf, mmu.pte_size,
627 					    paddr + entry * mmu.pte_size);
628 					if (len != mmu.pte_size)
629 						return (DCMD_ERR);
630 					if (mmu.pte_size == sizeof (x86pte_t))
631 						pte = buf;
632 					else
633 						pte = *pte32;
634 
635 					if ((pte & PT_VALID) == 0)
636 						continue;
637 					if (level == 0 || !(pte & PT_PAGESIZE))
638 						pte &= PT_PADDR;
639 					else
640 						pte &= PT_PADDR_LGPG;
641 					if ((pte >> MMU_PAGESHIFT) != pfn)
642 						continue;
643 					mdb_printf("hat=%p maps addr=%p\n",
644 						hatp, (caddr_t)base);
645 				}
646 			}
647 		}
648 	}
649 
650 done:
651 	return (DCMD_OK);
652 }
653 
654 /*
655  * given a PFN as its address argument, prints out the uses of it
656  */
657 /*ARGSUSED*/
658 int
659 report_maps_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
660 {
661 	/*
662 	 * The kernel has to at least have made it thru mmu_init()
663 	 */
664 	get_mmu();
665 	if (mmu.num_level == 0)
666 		return (DCMD_ERR);
667 
668 	if ((flags & DCMD_ADDRSPEC) == 0)
669 		return (DCMD_USAGE);
670 
671 	return (do_report_maps((pfn_t)addr));
672 }
673 
674 /*
675  * Dump the page table at the given PFN
676  */
677 static int
678 do_ptable_dcmd(pfn_t pfn)
679 {
680 	struct hat *hatp, *end;
681 	struct hat hat;
682 	htable_t *ht;
683 	htable_t htable;
684 	uintptr_t base;
685 	int h;
686 	int level;
687 	int entry;
688 	uintptr_t pagesize;
689 	x86pte_t pte;
690 	x86pte_t buf;
691 	x86pte32_t *pte32 = (x86pte32_t *)&buf;
692 	physaddr_t paddr;
693 	size_t len;
694 	int count;
695 
696 	if (mdb_vread(&hat, sizeof (hat), (uintptr_t)khat) == -1) {
697 		mdb_warn("Couldn't read khat\n");
698 		return (DCMD_ERR);
699 	}
700 
701 	end = hat.hat_next;
702 
703 	/*
704 	 * The hats are kept in a circular list with khat at the head, but
705 	 * not part of the list proper. Accordingly, we know when we pass
706 	 * knat.hat_next a second time that we've iterated through every
707 	 * hat structure.
708 	 */
709 	for (hatp = khat, count = 0; hatp != end || count++ == 0;
710 	    hatp = hat.hat_next) {
711 		/*
712 		 * read the hat and its hash table
713 		 */
714 		if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
715 			mdb_warn("Couldn't read struct hat\n");
716 			return (DCMD_ERR);
717 		}
718 
719 		/*
720 		 * read the htable hashtable
721 		 */
722 		paddr = 0;
723 		for (h = 0; h < hat.hat_num_hash; ++h) {
724 			if (mdb_vread(&ht, sizeof (htable_t *),
725 			    (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
726 				mdb_warn("Couldn't read htable\n");
727 				return (DCMD_ERR);
728 			}
729 			for (; ht != NULL; ht = htable.ht_next) {
730 				if (mdb_vread(&htable, sizeof (htable_t),
731 				    (uintptr_t)ht) == -1) {
732 					mdb_warn("Couldn't read htable\n");
733 					return (DCMD_ERR);
734 				}
735 
736 				/*
737 				 * Is this the PFN for this htable
738 				 */
739 				if (htable.ht_pfn == pfn)
740 					goto found_it;
741 			}
742 		}
743 	}
744 
745 found_it:
746 	if (htable.ht_pfn == pfn) {
747 		mdb_printf("htable=%p\n", ht);
748 		level = htable.ht_level;
749 		base = htable.ht_vaddr;
750 		pagesize = mmu.level_size[level];
751 	} else {
752 		mdb_printf("Unknown pagetable - assuming level/addr 0");
753 		level = 0;	/* assume level == 0 for PFN */
754 		base = 0;
755 		pagesize = MMU_PAGESIZE;
756 	}
757 
758 	paddr = pfn << MMU_PAGESHIFT;
759 	for (entry = 0; entry < mmu.ptes_per_table; ++entry) {
760 		len = mdb_pread(&buf, mmu.pte_size,
761 		    paddr + entry * mmu.pte_size);
762 		if (len != mmu.pte_size)
763 			return (DCMD_ERR);
764 		if (mmu.pte_size == sizeof (x86pte_t))
765 			pte = buf;
766 		else
767 			pte = *pte32;
768 
769 		if (pte == 0)
770 			continue;
771 
772 		mdb_printf("[%3d] va=%p ", entry, base + entry * pagesize);
773 		do_pte_dcmd(level, pte);
774 	}
775 
776 done:
777 	return (DCMD_OK);
778 }
779 
780 /*
781  * given a PFN as its address argument, prints out the uses of it
782  */
783 /*ARGSUSED*/
784 int
785 ptable_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
786 {
787 	/*
788 	 * The kernel has to at least have made it thru mmu_init()
789 	 */
790 	get_mmu();
791 	if (mmu.num_level == 0)
792 		return (DCMD_ERR);
793 
794 	if ((flags & DCMD_ADDRSPEC) == 0)
795 		return (DCMD_USAGE);
796 
797 	return (do_ptable_dcmd((pfn_t)addr));
798 }
799