xref: /titanic_41/usr/src/cmd/mdb/i86pc/modules/unix/i86mmu.c (revision 890e8ff10cfc85bc7d33064a9a30c3e8477b4813)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * This part of the file contains the mdb support for dcmds:
30  *	::memseg_list
31  *	::page_num2pp
32  * and walkers for:
33  *	memseg - a memseg list walker for ::memseg_list
34  *
35  */
36 
37 #include <sys/types.h>
38 #include <sys/machparam.h>
39 #include <sys/controlregs.h>
40 #include <vm/as.h>
41 
42 #include <mdb/mdb_modapi.h>
43 #include <mdb/mdb_target.h>
44 
45 #include <vm/page.h>
46 #include <vm/hat_i86.h>
47 
48 struct pfn2pp {
49 	pfn_t pfn;
50 	page_t *pp;
51 };
52 
53 static int do_va2pfn(uintptr_t, struct as *, int, physaddr_t *);
54 static void get_mmu(void);
55 
56 int
57 platform_vtop(uintptr_t addr, struct as *asp, physaddr_t *pap)
58 {
59 	if (asp == NULL)
60 		return (DCMD_ERR);
61 
62 	/*
63 	 * The kernel has to at least have made it thru mmu_init()
64 	 */
65 	get_mmu();
66 	if (mmu.num_level == 0)
67 		return (DCMD_ERR);
68 
69 	return (do_va2pfn(addr, asp, 0, pap));
70 }
71 
72 
73 /*ARGSUSED*/
74 int
75 page_num2pp_cb(uintptr_t addr, void *ignored, uintptr_t *data)
76 {
77 	struct memseg ms, *msp = &ms;
78 	struct pfn2pp *p = (struct pfn2pp *)data;
79 
80 	if (mdb_vread(msp, sizeof (struct memseg), addr) == -1) {
81 		mdb_warn("can't read memseg at %#lx", addr);
82 		return (DCMD_ERR);
83 	}
84 
85 	if (p->pfn >= msp->pages_base && p->pfn < msp->pages_end) {
86 		p->pp = msp->pages + (p->pfn - msp->pages_base);
87 		return (WALK_DONE);
88 	}
89 
90 	return (WALK_NEXT);
91 }
92 
93 /*
94  * ::page_num2pp dcmd
95  */
96 /*ARGSUSED*/
97 int
98 page_num2pp(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
99 {
100 	struct pfn2pp pfn2pp;
101 	page_t page;
102 
103 	if ((flags & DCMD_ADDRSPEC) == 0) {
104 		mdb_warn("page frame number missing\n");
105 			return (DCMD_USAGE);
106 	}
107 
108 	pfn2pp.pfn = (pfn_t)addr;
109 	pfn2pp.pp = NULL;
110 
111 	if (mdb_walk("memseg", (mdb_walk_cb_t)page_num2pp_cb,
112 	    (void *)&pfn2pp) == -1) {
113 		mdb_warn("can't walk memseg");
114 		return (DCMD_ERR);
115 	}
116 
117 	if (pfn2pp.pp == NULL)
118 		return (DCMD_ERR);
119 
120 	mdb_printf("%x has page at %p\n", pfn2pp.pfn, pfn2pp.pp);
121 
122 	if (mdb_vread(&page, sizeof (page_t),
123 	    (uintptr_t)pfn2pp.pp) == -1) {
124 		mdb_warn("can't read page at %p", &page);
125 		return (DCMD_ERR);
126 	}
127 
128 	if (page.p_pagenum != pfn2pp.pfn) {
129 		mdb_warn("WARNING! Found page structure contains "
130 			"different pagenumber %x\n", page.p_pagenum);
131 	}
132 
133 	return (DCMD_OK);
134 }
135 
136 
137 
138 
139 
140 /*
141  * ::memseg_list dcmd and walker to implement it.
142  */
143 /*ARGSUSED*/
144 int
145 memseg_list(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
146 {
147 	struct memseg ms;
148 
149 	if (!(flags & DCMD_ADDRSPEC)) {
150 		if (mdb_pwalk_dcmd("memseg", "memseg_list",
151 		    0, NULL, 0) == -1) {
152 			mdb_warn("can't walk memseg");
153 			return (DCMD_ERR);
154 		}
155 		return (DCMD_OK);
156 	}
157 
158 	if (DCMD_HDRSPEC(flags))
159 		mdb_printf("%<u>%?s %?s %?s %?s %?s%</u>\n", "ADDR",
160 			"PAGES", "EPAGES", "BASE", "END");
161 
162 	if (mdb_vread(&ms, sizeof (struct memseg), addr) == -1) {
163 		mdb_warn("can't read memseg at %#lx", addr);
164 		return (DCMD_ERR);
165 	}
166 
167 	mdb_printf("%0?lx %0?lx %0?lx %0?lx %0?lx\n", addr,
168 		ms.pages, ms.epages, ms.pages_base, ms.pages_end);
169 
170 	return (DCMD_OK);
171 }
172 
173 /*
174  * walk the memseg structures
175  */
176 int
177 memseg_walk_init(mdb_walk_state_t *wsp)
178 {
179 	if (wsp->walk_addr != NULL) {
180 		mdb_warn("memseg only supports global walks\n");
181 		return (WALK_ERR);
182 	}
183 
184 	if (mdb_readvar(&wsp->walk_addr, "memsegs") == -1) {
185 		mdb_warn("symbol 'memsegs' not found");
186 		return (WALK_ERR);
187 	}
188 
189 	wsp->walk_data = mdb_alloc(sizeof (struct memseg), UM_SLEEP);
190 	return (WALK_NEXT);
191 
192 }
193 
194 int
195 memseg_walk_step(mdb_walk_state_t *wsp)
196 {
197 	int status;
198 
199 	if (wsp->walk_addr == 0) {
200 		return (WALK_DONE);
201 	}
202 
203 	if (mdb_vread(wsp->walk_data, sizeof (struct memseg),
204 	    wsp->walk_addr) == -1) {
205 		mdb_warn("failed to read struct memseg at %p", wsp->walk_addr);
206 		return (WALK_DONE);
207 	}
208 
209 	status = wsp->walk_callback(wsp->walk_addr, wsp->walk_data,
210 	    wsp->walk_cbdata);
211 
212 	wsp->walk_addr = (uintptr_t)(((struct memseg *)wsp->walk_data)->next);
213 
214 	return (status);
215 }
216 
217 void
218 memseg_walk_fini(mdb_walk_state_t *wsp)
219 {
220 	mdb_free(wsp->walk_data, sizeof (struct memseg));
221 }
222 
223 /*
224  * HAT related dcmds:
225  *
226  * ::pte [-p XXXXXXXX] [-l 0/1/2/3]
227  *
228  * dcmd that interprets the -p argument as a page table entry and
229  * prints it in more human readable form. The PTE is assumed to be in
230  * a level 0 page table, unless -l specifies another level.
231  *
232  * ::vatopfn [-v] [-a as]
233  *
234  * Given a virtual address, returns the PFN, if any, mapped at the address.
235  * -v shows the intermediate htable/page table entries used to resolve the
236  * mapping. By default the virtual address is assumed to be in the kernel's
237  * address space.  -a is used to specify a different address space.
238  */
239 
240 struct hat *khat;		/* value of kas.a_hat */
241 struct hat_mmu_info mmu;
242 uintptr_t kernelbase;
243 
244 /*
245  * read mmu parameters from kernel
246  */
247 static void
248 get_mmu(void)
249 {
250 	struct as kas;
251 
252 	if (mmu.num_level != 0)
253 		return;
254 
255 	if (mdb_readsym(&mmu, sizeof (mmu), "mmu") == -1)
256 		mdb_warn("Can't use HAT information before mmu_init()\n");
257 	if (mdb_readsym(&kas, sizeof (kas), "kas") == -1)
258 		mdb_warn("Couldn't find kas - kernel's struct as\n");
259 	if (mdb_readsym(&kernelbase, sizeof (kernelbase), "kernelbase") == -1)
260 		mdb_warn("Couldn't find kernelbase\n");
261 	khat = kas.a_hat;
262 }
263 
264 /*
265  * Print a PTE in more human friendly way. The PTE is assumed to be in
266  * a level 0 page table, unless -l specifies another level.
267  *
268  * The PTE value can be specified as the -p option, since on a 32 bit kernel
269  * with PAE running it's larger than a uintptr_t.
270  */
271 static int
272 do_pte_dcmd(int level, uint64_t pte)
273 {
274 	static char *attr[] = {
275 	    "wrback", "wrthru", "uncached", "uncached",
276 	    "wrback", "wrthru", "wrcombine", "uncached"};
277 	int pat_index = 0;
278 
279 	mdb_printf("PTE=%llx: ", pte);
280 	if (PTE_GET(pte, mmu.pt_nx))
281 		mdb_printf("noexec ");
282 
283 	mdb_printf("page=0x%llx ", PTE2PFN(pte, level));
284 
285 	if (PTE_GET(pte, PT_NOCONSIST))
286 		mdb_printf("noconsist ");
287 
288 	if (PTE_GET(pte, PT_NOSYNC))
289 		mdb_printf("nosync ");
290 
291 	if (PTE_GET(pte, mmu.pt_global))
292 		mdb_printf("global ");
293 
294 	if (level > 0 && PTE_GET(pte, PT_PAGESIZE))
295 		mdb_printf("largepage ");
296 
297 	if (level > 0 && PTE_GET(pte, PT_MOD))
298 		mdb_printf("mod ");
299 
300 	if (level > 0 && PTE_GET(pte, PT_REF))
301 		mdb_printf("ref ");
302 
303 	if (PTE_GET(pte, PT_USER))
304 		mdb_printf("user ");
305 
306 	if (PTE_GET(pte, PT_WRITABLE))
307 		mdb_printf("write ");
308 
309 	/*
310 	 * Report non-standard cacheability
311 	 */
312 	pat_index = 0;
313 	if (level > 0) {
314 		if (PTE_GET(pte, PT_PAGESIZE) && PTE_GET(pte, PT_PAT_LARGE))
315 			pat_index += 4;
316 	} else {
317 		if (PTE_GET(pte, PT_PAT_4K))
318 			pat_index += 4;
319 	}
320 
321 	if (PTE_GET(pte, PT_NOCACHE))
322 		pat_index += 2;
323 
324 	if (PTE_GET(pte, PT_WRITETHRU))
325 		pat_index += 1;
326 
327 	if (pat_index != 0)
328 		mdb_printf("%s", attr[pat_index]);
329 
330 	if (PTE_GET(pte, PT_VALID) == 0)
331 		mdb_printf(" !VALID ");
332 
333 	mdb_printf("\n");
334 	return (DCMD_OK);
335 }
336 
337 /*
338  * Print a PTE in more human friendly way. The PTE is assumed to be in
339  * a level 0 page table, unless -l specifies another level.
340  *
341  * The PTE value can be specified as the -p option, since on a 32 bit kernel
342  * with PAE running it's larger than a uintptr_t.
343  */
344 /*ARGSUSED*/
345 int
346 pte_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
347 {
348 	int level = 0;
349 	uint64_t pte = 0;
350 	char *level_str = NULL;
351 	char *pte_str = NULL;
352 
353 	/*
354 	 * The kernel has to at least have made it thru mmu_init()
355 	 */
356 	get_mmu();
357 	if (mmu.num_level == 0)
358 		return (DCMD_ERR);
359 
360 	if (mdb_getopts(argc, argv,
361 	    'p', MDB_OPT_STR, &pte_str,
362 	    'l', MDB_OPT_STR, &level_str) != argc)
363 		return (DCMD_USAGE);
364 
365 	/*
366 	 * parse the PTE to decode, if it's 0, we don't do anything
367 	 */
368 	if (pte_str != NULL) {
369 		pte = mdb_strtoull(pte_str);
370 	} else {
371 		if ((flags & DCMD_ADDRSPEC) == 0)
372 			return (DCMD_USAGE);
373 		pte = addr;
374 	}
375 	if (pte == 0)
376 		return (DCMD_OK);
377 
378 	/*
379 	 * parse the level if supplied
380 	 */
381 	if (level_str != NULL) {
382 		level = mdb_strtoull(level_str);
383 		if (level < 0 || level > mmu.max_level)
384 			return (DCMD_ERR);
385 	}
386 
387 	return (do_pte_dcmd(level, pte));
388 }
389 
390 static int
391 do_va2pfn(uintptr_t addr, struct as *asp, int print_level, physaddr_t *pap)
392 {
393 	struct as as;
394 	struct hat *hatp;
395 	struct hat hat;
396 	htable_t *ht;
397 	htable_t htable;
398 	uintptr_t base;
399 	int h;
400 	int level;
401 	int found = 0;
402 	x86pte_t pte;
403 	x86pte_t buf;
404 	x86pte32_t *pte32 = (x86pte32_t *)&buf;
405 	physaddr_t paddr;
406 	size_t len;
407 
408 	if (asp != NULL) {
409 		if (mdb_vread(&as, sizeof (as), (uintptr_t)asp) == -1) {
410 			mdb_warn("Couldn't read struct as\n");
411 			return (DCMD_ERR);
412 		}
413 		hatp = as.a_hat;
414 	} else {
415 		hatp = khat;
416 	}
417 
418 	/*
419 	 * read the hat and its hash table
420 	 */
421 	if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
422 		mdb_warn("Couldn't read struct hat\n");
423 		return (DCMD_ERR);
424 	}
425 
426 	/*
427 	 * read the htable hashtable
428 	 */
429 	*pap = 0;
430 	for (level = 0; level <= mmu.max_level; ++level) {
431 		if (level == mmu.max_level)
432 			base = 0;
433 		else
434 			base = addr & mmu.level_mask[level + 1];
435 
436 		for (h = 0; h < hat.hat_num_hash; ++h) {
437 			if (mdb_vread(&ht, sizeof (htable_t *),
438 			    (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
439 				mdb_warn("Couldn't read htable\n");
440 				return (DCMD_ERR);
441 			}
442 			for (; ht != NULL; ht = htable.ht_next) {
443 				if (mdb_vread(&htable, sizeof (htable_t),
444 				    (uintptr_t)ht) == -1) {
445 					mdb_warn("Couldn't read htable\n");
446 					return (DCMD_ERR);
447 				}
448 				if (htable.ht_vaddr != base ||
449 				    htable.ht_level != level)
450 					continue;
451 
452 				/*
453 				 * found - read the page table entry
454 				 */
455 				paddr = htable.ht_pfn << MMU_PAGESHIFT;
456 				paddr += ((addr - base) >>
457 				    mmu.level_shift[level]) <<
458 				    mmu.pte_size_shift;
459 				len = mdb_pread(&buf, mmu.pte_size, paddr);
460 				if (len != mmu.pte_size)
461 					return (DCMD_ERR);
462 				if (mmu.pte_size == sizeof (x86pte_t))
463 					pte = buf;
464 				else
465 					pte = *pte32;
466 
467 				if (!found) {
468 					if (PTE_IS_LGPG(pte, level))
469 						paddr = pte & PT_PADDR_LGPG;
470 					else
471 						paddr = pte & PT_PADDR;
472 					paddr += addr & mmu.level_offset[level];
473 					*pap = paddr;
474 					found = 1;
475 				}
476 				if (print_level == 0)
477 					continue;
478 				mdb_printf("\tlevel=%d htable=%p pte=%llx\n",
479 				    level, ht, pte);
480 			}
481 		}
482 	}
483 
484 done:
485 	if (!found)
486 		return (DCMD_ERR);
487 	return (DCMD_OK);
488 }
489 
490 int
491 va2pfn_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
492 {
493 	uintptr_t addrspace;
494 	char *addrspace_str = NULL;
495 	uint64_t physaddr;
496 	int rc;
497 
498 	/*
499 	 * The kernel has to at least have made it thru mmu_init()
500 	 */
501 	get_mmu();
502 	if (mmu.num_level == 0)
503 		return (DCMD_ERR);
504 
505 	if (mdb_getopts(argc, argv,
506 	    'a', MDB_OPT_STR, &addrspace_str) != argc)
507 		return (DCMD_USAGE);
508 
509 	if ((flags & DCMD_ADDRSPEC) == 0)
510 		return (DCMD_USAGE);
511 
512 	/*
513 	 * parse the address space
514 	 */
515 	if (addrspace_str != NULL)
516 		addrspace = mdb_strtoull(addrspace_str);
517 	else
518 		addrspace = 0;
519 
520 	rc = do_va2pfn(addr, (struct as *)addrspace, 1, &physaddr);
521 
522 	if (rc == DCMD_OK)
523 		mdb_printf("Virtual %p maps Physical %llx\n", addr, physaddr);
524 
525 	return (rc);
526 }
527 
528 /*
529  * Report all hat's that either use PFN as a page table or that map the page.
530  */
531 static int
532 do_report_maps(pfn_t pfn)
533 {
534 	struct hat *hatp;
535 	struct hat hat;
536 	htable_t *ht;
537 	htable_t htable;
538 	uintptr_t base;
539 	int h;
540 	int level;
541 	int entry;
542 	x86pte_t pte;
543 	x86pte_t buf;
544 	x86pte32_t *pte32 = (x86pte32_t *)&buf;
545 	physaddr_t paddr;
546 	size_t len;
547 
548 	/*
549 	 * The hats are kept in a list with khat at the head.
550 	 */
551 	for (hatp = khat; hatp != NULL; hatp = hat.hat_next) {
552 		/*
553 		 * read the hat and its hash table
554 		 */
555 		if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
556 			mdb_warn("Couldn't read struct hat\n");
557 			return (DCMD_ERR);
558 		}
559 
560 		/*
561 		 * read the htable hashtable
562 		 */
563 		paddr = 0;
564 		for (h = 0; h < hat.hat_num_hash; ++h) {
565 			if (mdb_vread(&ht, sizeof (htable_t *),
566 			    (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
567 				mdb_warn("Couldn't read htable\n");
568 				return (DCMD_ERR);
569 			}
570 			for (; ht != NULL; ht = htable.ht_next) {
571 				if (mdb_vread(&htable, sizeof (htable_t),
572 				    (uintptr_t)ht) == -1) {
573 					mdb_warn("Couldn't read htable\n");
574 					return (DCMD_ERR);
575 				}
576 
577 				/*
578 				 * only report kernel addresses once
579 				 */
580 				if (hatp != khat &&
581 				    htable.ht_vaddr >= kernelbase)
582 					continue;
583 
584 				/*
585 				 * Is the PFN a pagetable itself?
586 				 */
587 				if (htable.ht_pfn == pfn) {
588 					mdb_printf("Pagetable for "
589 					    "hat=%p htable=%p\n", hatp, ht);
590 					continue;
591 				}
592 
593 				/*
594 				 * otherwise, examine page mappings
595 				 */
596 				level = htable.ht_level;
597 				if (level > mmu.max_page_level)
598 					continue;
599 				paddr = htable.ht_pfn << MMU_PAGESHIFT;
600 				for (entry = 0; entry < htable.ht_num_ptes;
601 				    ++entry) {
602 
603 					base = htable.ht_vaddr + entry *
604 					    mmu.level_size[level];
605 
606 					/*
607 					 * only report kernel addresses once
608 					 */
609 					if (hatp != khat &&
610 					    base >= kernelbase)
611 						continue;
612 
613 					len = mdb_pread(&buf, mmu.pte_size,
614 					    paddr + entry * mmu.pte_size);
615 					if (len != mmu.pte_size)
616 						return (DCMD_ERR);
617 					if (mmu.pte_size == sizeof (x86pte_t))
618 						pte = buf;
619 					else
620 						pte = *pte32;
621 
622 					if ((pte & PT_VALID) == 0)
623 						continue;
624 					if (level == 0 || !(pte & PT_PAGESIZE))
625 						pte &= PT_PADDR;
626 					else
627 						pte &= PT_PADDR_LGPG;
628 					if ((pte >> MMU_PAGESHIFT) != pfn)
629 						continue;
630 					mdb_printf("hat=%p maps addr=%p\n",
631 						hatp, (caddr_t)base);
632 				}
633 			}
634 		}
635 	}
636 
637 done:
638 	return (DCMD_OK);
639 }
640 
641 /*
642  * given a PFN as its address argument, prints out the uses of it
643  */
644 /*ARGSUSED*/
645 int
646 report_maps_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
647 {
648 	/*
649 	 * The kernel has to at least have made it thru mmu_init()
650 	 */
651 	get_mmu();
652 	if (mmu.num_level == 0)
653 		return (DCMD_ERR);
654 
655 	if ((flags & DCMD_ADDRSPEC) == 0)
656 		return (DCMD_USAGE);
657 
658 	return (do_report_maps((pfn_t)addr));
659 }
660 
661 /*
662  * Dump the page table at the given PFN
663  */
664 static int
665 do_ptable_dcmd(pfn_t pfn)
666 {
667 	struct hat *hatp;
668 	struct hat hat;
669 	htable_t *ht;
670 	htable_t htable;
671 	uintptr_t base;
672 	int h;
673 	int level;
674 	int entry;
675 	uintptr_t pagesize;
676 	x86pte_t pte;
677 	x86pte_t buf;
678 	x86pte32_t *pte32 = (x86pte32_t *)&buf;
679 	physaddr_t paddr;
680 	size_t len;
681 
682 	/*
683 	 * The hats are kept in a list with khat at the head.
684 	 */
685 	for (hatp = khat; hatp != NULL; hatp = hat.hat_next) {
686 		/*
687 		 * read the hat and its hash table
688 		 */
689 		if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
690 			mdb_warn("Couldn't read struct hat\n");
691 			return (DCMD_ERR);
692 		}
693 
694 		/*
695 		 * read the htable hashtable
696 		 */
697 		paddr = 0;
698 		for (h = 0; h < hat.hat_num_hash; ++h) {
699 			if (mdb_vread(&ht, sizeof (htable_t *),
700 			    (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
701 				mdb_warn("Couldn't read htable\n");
702 				return (DCMD_ERR);
703 			}
704 			for (; ht != NULL; ht = htable.ht_next) {
705 				if (mdb_vread(&htable, sizeof (htable_t),
706 				    (uintptr_t)ht) == -1) {
707 					mdb_warn("Couldn't read htable\n");
708 					return (DCMD_ERR);
709 				}
710 
711 				/*
712 				 * Is this the PFN for this htable
713 				 */
714 				if (htable.ht_pfn == pfn)
715 					goto found_it;
716 			}
717 		}
718 	}
719 
720 found_it:
721 	if (htable.ht_pfn == pfn) {
722 		mdb_printf("htable=%p\n", ht);
723 		level = htable.ht_level;
724 		base = htable.ht_vaddr;
725 		pagesize = mmu.level_size[level];
726 	} else {
727 		mdb_printf("Unknown pagetable - assuming level/addr 0");
728 		level = 0;	/* assume level == 0 for PFN */
729 		base = 0;
730 		pagesize = MMU_PAGESIZE;
731 	}
732 
733 	paddr = pfn << MMU_PAGESHIFT;
734 	for (entry = 0; entry < mmu.ptes_per_table; ++entry) {
735 		len = mdb_pread(&buf, mmu.pte_size,
736 		    paddr + entry * mmu.pte_size);
737 		if (len != mmu.pte_size)
738 			return (DCMD_ERR);
739 		if (mmu.pte_size == sizeof (x86pte_t))
740 			pte = buf;
741 		else
742 			pte = *pte32;
743 
744 		if (pte == 0)
745 			continue;
746 
747 		mdb_printf("[%3d] va=%p ", entry, base + entry * pagesize);
748 		do_pte_dcmd(level, pte);
749 	}
750 
751 done:
752 	return (DCMD_OK);
753 }
754 
755 /*
756  * given a PFN as its address argument, prints out the uses of it
757  */
758 /*ARGSUSED*/
759 int
760 ptable_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
761 {
762 	/*
763 	 * The kernel has to at least have made it thru mmu_init()
764 	 */
765 	get_mmu();
766 	if (mmu.num_level == 0)
767 		return (DCMD_ERR);
768 
769 	if ((flags & DCMD_ADDRSPEC) == 0)
770 		return (DCMD_USAGE);
771 
772 	return (do_ptable_dcmd((pfn_t)addr));
773 }
774