1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * This part of the file contains the mdb support for dcmds: 28 * ::memseg_list 29 * and walkers for: 30 * memseg - a memseg list walker for ::memseg_list 31 * 32 */ 33 34 #include <sys/types.h> 35 #include <sys/machparam.h> 36 #include <sys/controlregs.h> 37 #include <sys/mach_mmu.h> 38 #ifdef __xpv 39 #include <sys/hypervisor.h> 40 #endif 41 #include <vm/as.h> 42 43 #include <mdb/mdb_modapi.h> 44 #include <mdb/mdb_target.h> 45 46 #include <vm/page.h> 47 #include <vm/hat_i86.h> 48 49 struct pfn2pp { 50 pfn_t pfn; 51 page_t *pp; 52 }; 53 54 static int do_va2pa(uintptr_t, struct as *, int, physaddr_t *, pfn_t *); 55 static void init_mmu(void); 56 57 int 58 platform_vtop(uintptr_t addr, struct as *asp, physaddr_t *pap) 59 { 60 if (asp == NULL) 61 return (DCMD_ERR); 62 63 init_mmu(); 64 65 if (mmu.num_level == 0) 66 return (DCMD_ERR); 67 68 return (do_va2pa(addr, asp, 0, pap, NULL)); 69 } 70 71 /* 72 * ::memseg_list dcmd and walker to implement it. 73 */ 74 /*ARGSUSED*/ 75 int 76 memseg_list(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 77 { 78 struct memseg ms; 79 80 if (!(flags & DCMD_ADDRSPEC)) { 81 if (mdb_pwalk_dcmd("memseg", "memseg_list", 82 0, NULL, 0) == -1) { 83 mdb_warn("can't walk memseg"); 84 return (DCMD_ERR); 85 } 86 return (DCMD_OK); 87 } 88 89 if (DCMD_HDRSPEC(flags)) 90 mdb_printf("%<u>%?s %?s %?s %?s %?s%</u>\n", "ADDR", 91 "PAGES", "EPAGES", "BASE", "END"); 92 93 if (mdb_vread(&ms, sizeof (struct memseg), addr) == -1) { 94 mdb_warn("can't read memseg at %#lx", addr); 95 return (DCMD_ERR); 96 } 97 98 mdb_printf("%0?lx %0?lx %0?lx %0?lx %0?lx\n", addr, 99 ms.pages, ms.epages, ms.pages_base, ms.pages_end); 100 101 return (DCMD_OK); 102 } 103 104 /* 105 * walk the memseg structures 106 */ 107 int 108 memseg_walk_init(mdb_walk_state_t *wsp) 109 { 110 if (wsp->walk_addr != NULL) { 111 mdb_warn("memseg only supports global walks\n"); 112 return (WALK_ERR); 113 } 114 115 if (mdb_readvar(&wsp->walk_addr, "memsegs") == -1) { 116 mdb_warn("symbol 'memsegs' not found"); 117 return (WALK_ERR); 118 } 119 120 wsp->walk_data = mdb_alloc(sizeof (struct memseg), UM_SLEEP); 121 return (WALK_NEXT); 122 123 } 124 125 int 126 memseg_walk_step(mdb_walk_state_t *wsp) 127 { 128 int status; 129 130 if (wsp->walk_addr == 0) { 131 return (WALK_DONE); 132 } 133 134 if (mdb_vread(wsp->walk_data, sizeof (struct memseg), 135 wsp->walk_addr) == -1) { 136 mdb_warn("failed to read struct memseg at %p", wsp->walk_addr); 137 return (WALK_DONE); 138 } 139 140 status = wsp->walk_callback(wsp->walk_addr, wsp->walk_data, 141 wsp->walk_cbdata); 142 143 wsp->walk_addr = (uintptr_t)(((struct memseg *)wsp->walk_data)->next); 144 145 return (status); 146 } 147 148 void 149 memseg_walk_fini(mdb_walk_state_t *wsp) 150 { 151 mdb_free(wsp->walk_data, sizeof (struct memseg)); 152 } 153 154 /* 155 * Now HAT related dcmds. 156 */ 157 158 static struct hat *khat; /* value of kas.a_hat */ 159 struct hat_mmu_info mmu; 160 uintptr_t kernelbase; 161 162 /* 163 * stuff for i86xpv images 164 */ 165 static int is_xpv; 166 static uintptr_t mfn_list_addr; /* kernel MFN list address */ 167 uintptr_t xen_virt_start; /* address of mfn_to_pfn[] table */ 168 ulong_t mfn_count; /* number of pfn's in the MFN list */ 169 pfn_t *mfn_list; /* local MFN list copy */ 170 171 /* 172 * read mmu parameters from kernel 173 */ 174 static void 175 init_mmu(void) 176 { 177 struct as kas; 178 179 if (mmu.num_level != 0) 180 return; 181 182 if (mdb_readsym(&mmu, sizeof (mmu), "mmu") == -1) 183 mdb_warn("Can't use HAT information before mmu_init()\n"); 184 if (mdb_readsym(&kas, sizeof (kas), "kas") == -1) 185 mdb_warn("Couldn't find kas - kernel's struct as\n"); 186 if (mdb_readsym(&kernelbase, sizeof (kernelbase), "kernelbase") == -1) 187 mdb_warn("Couldn't find kernelbase\n"); 188 khat = kas.a_hat; 189 190 /* 191 * Is this a paravirtualized domain image? 192 */ 193 if (mdb_readsym(&mfn_list_addr, sizeof (mfn_list_addr), 194 "mfn_list") == -1 || 195 mdb_readsym(&xen_virt_start, sizeof (xen_virt_start), 196 "xen_virt_start") == -1 || 197 mdb_readsym(&mfn_count, sizeof (mfn_count), "mfn_count") == -1) { 198 mfn_list_addr = NULL; 199 } 200 201 is_xpv = mfn_list_addr != NULL; 202 203 #ifndef _KMDB 204 /* 205 * recreate the local mfn_list 206 */ 207 if (is_xpv) { 208 size_t sz = mfn_count * sizeof (pfn_t); 209 mfn_list = mdb_zalloc(sz, UM_SLEEP); 210 211 if (mdb_vread(mfn_list, sz, (uintptr_t)mfn_list_addr) == -1) { 212 mdb_warn("Failed to read MFN list\n"); 213 mdb_free(mfn_list, sz); 214 mfn_list = NULL; 215 } 216 } 217 #endif 218 } 219 220 void 221 free_mmu(void) 222 { 223 #ifdef __xpv 224 if (mfn_list != NULL) 225 mdb_free(mfn_list, mfn_count * sizeof (mfn_t)); 226 #endif 227 } 228 229 #ifdef __xpv 230 231 #ifdef _KMDB 232 233 /* 234 * Convert between MFNs and PFNs. Since we're in kmdb we can go directly 235 * through the machine to phys mapping and the MFN list. 236 */ 237 238 pfn_t 239 mdb_mfn_to_pfn(mfn_t mfn) 240 { 241 pfn_t pfn; 242 mfn_t tmp; 243 pfn_t *pfn_list; 244 245 if (mfn_list_addr == NULL) 246 return (-(pfn_t)1); 247 248 pfn_list = (pfn_t *)xen_virt_start; 249 if (mdb_vread(&pfn, sizeof (pfn), (uintptr_t)(pfn_list + mfn)) == -1) 250 return (-(pfn_t)1); 251 252 if (mdb_vread(&tmp, sizeof (tmp), 253 (uintptr_t)(mfn_list_addr + (pfn * sizeof (mfn_t)))) == -1) 254 return (-(pfn_t)1); 255 256 if (pfn >= mfn_count || tmp != mfn) 257 return (-(pfn_t)1); 258 259 return (pfn); 260 } 261 262 mfn_t 263 mdb_pfn_to_mfn(pfn_t pfn) 264 { 265 mfn_t mfn; 266 267 init_mmu(); 268 269 if (mfn_list_addr == NULL || pfn >= mfn_count) 270 return (-(mfn_t)1); 271 272 if (mdb_vread(&mfn, sizeof (mfn), 273 (uintptr_t)(mfn_list_addr + (pfn * sizeof (mfn_t)))) == -1) 274 return (-(mfn_t)1); 275 276 return (mfn); 277 } 278 279 #else /* _KMDB */ 280 281 /* 282 * Convert between MFNs and PFNs. Since a crash dump doesn't include the 283 * MFN->PFN translation table (it's part of the hypervisor, not our image) 284 * we do the MFN->PFN translation by searching the PFN->MFN (mfn_list) 285 * table, if it's there. 286 */ 287 288 pfn_t 289 mdb_mfn_to_pfn(mfn_t mfn) 290 { 291 pfn_t pfn; 292 293 init_mmu(); 294 295 if (mfn_list == NULL) 296 return (-(pfn_t)1); 297 298 for (pfn = 0; pfn < mfn_count; ++pfn) { 299 if (mfn_list[pfn] != mfn) 300 continue; 301 return (pfn); 302 } 303 304 return (-(pfn_t)1); 305 } 306 307 mfn_t 308 mdb_pfn_to_mfn(pfn_t pfn) 309 { 310 init_mmu(); 311 312 if (mfn_list == NULL || pfn >= mfn_count) 313 return (-(mfn_t)1); 314 315 return (mfn_list[pfn]); 316 } 317 318 #endif /* _KMDB */ 319 320 static paddr_t 321 mdb_ma_to_pa(uint64_t ma) 322 { 323 pfn_t pfn = mdb_mfn_to_pfn(mmu_btop(ma)); 324 if (pfn == -(pfn_t)1) 325 return (-(paddr_t)1); 326 327 return (mmu_ptob((paddr_t)pfn) | (ma & (MMU_PAGESIZE - 1))); 328 } 329 330 #else /* __xpv */ 331 332 #define mdb_ma_to_pa(ma) (ma) 333 #define mdb_mfn_to_pfn(mfn) (mfn) 334 #define mdb_pfn_to_mfn(pfn) (pfn) 335 336 #endif /* __xpv */ 337 338 /* 339 * ::mfntopfn dcmd translates hypervisor machine page number 340 * to physical page number 341 */ 342 /*ARGSUSED*/ 343 int 344 mfntopfn_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 345 { 346 pfn_t pfn; 347 348 if ((flags & DCMD_ADDRSPEC) == 0) { 349 mdb_warn("MFN missing\n"); 350 return (DCMD_USAGE); 351 } 352 353 if ((pfn = mdb_mfn_to_pfn((pfn_t)addr)) == -(pfn_t)1) { 354 mdb_warn("Invalid mfn %lr\n", (pfn_t)addr); 355 return (DCMD_ERR); 356 } 357 358 mdb_printf("%lr\n", pfn); 359 360 return (DCMD_OK); 361 } 362 363 /* 364 * ::pfntomfn dcmd translates physical page number to 365 * hypervisor machine page number 366 */ 367 /*ARGSUSED*/ 368 int 369 pfntomfn_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 370 { 371 pfn_t mfn; 372 373 if ((flags & DCMD_ADDRSPEC) == 0) { 374 mdb_warn("PFN missing\n"); 375 return (DCMD_USAGE); 376 } 377 378 if ((mfn = mdb_pfn_to_mfn((pfn_t)addr)) == -(pfn_t)1) { 379 mdb_warn("Invalid pfn %lr\n", (pfn_t)addr); 380 return (DCMD_ABORT); 381 } 382 383 mdb_printf("%lr\n", mfn); 384 385 if (flags & DCMD_LOOP) 386 mdb_set_dot(addr + 1); 387 return (DCMD_OK); 388 } 389 390 static pfn_t 391 pte2mfn(x86pte_t pte, uint_t level) 392 { 393 pfn_t mfn; 394 if (level > 0 && (pte & PT_PAGESIZE)) 395 mfn = mmu_btop(pte & PT_PADDR_LGPG); 396 else 397 mfn = mmu_btop(pte & PT_PADDR); 398 return (mfn); 399 } 400 401 /* 402 * Print a PTE in more human friendly way. The PTE is assumed to be in 403 * a level 0 page table, unless -l specifies another level. 404 * 405 * The PTE value can be specified as the -p option, since on a 32 bit kernel 406 * with PAE running it's larger than a uintptr_t. 407 */ 408 static int 409 do_pte_dcmd(int level, uint64_t pte) 410 { 411 static char *attr[] = { 412 "wrback", "wrthru", "uncached", "uncached", 413 "wrback", "wrthru", "wrcombine", "uncached"}; 414 int pat_index = 0; 415 pfn_t mfn; 416 417 mdb_printf("pte=%llr: ", pte); 418 if (PTE_GET(pte, mmu.pt_nx)) 419 mdb_printf("noexec "); 420 421 mfn = pte2mfn(pte, level); 422 mdb_printf("%s=0x%lr ", is_xpv ? "mfn" : "pfn", mfn); 423 424 if (PTE_GET(pte, PT_NOCONSIST)) 425 mdb_printf("noconsist "); 426 427 if (PTE_GET(pte, PT_NOSYNC)) 428 mdb_printf("nosync "); 429 430 if (PTE_GET(pte, mmu.pt_global)) 431 mdb_printf("global "); 432 433 if (level > 0 && PTE_GET(pte, PT_PAGESIZE)) 434 mdb_printf("largepage "); 435 436 if (level > 0 && PTE_GET(pte, PT_MOD)) 437 mdb_printf("mod "); 438 439 if (level > 0 && PTE_GET(pte, PT_REF)) 440 mdb_printf("ref "); 441 442 if (PTE_GET(pte, PT_USER)) 443 mdb_printf("user "); 444 445 if (PTE_GET(pte, PT_WRITABLE)) 446 mdb_printf("write "); 447 448 /* 449 * Report non-standard cacheability 450 */ 451 pat_index = 0; 452 if (level > 0) { 453 if (PTE_GET(pte, PT_PAGESIZE) && PTE_GET(pte, PT_PAT_LARGE)) 454 pat_index += 4; 455 } else { 456 if (PTE_GET(pte, PT_PAT_4K)) 457 pat_index += 4; 458 } 459 460 if (PTE_GET(pte, PT_NOCACHE)) 461 pat_index += 2; 462 463 if (PTE_GET(pte, PT_WRITETHRU)) 464 pat_index += 1; 465 466 if (pat_index != 0) 467 mdb_printf("%s", attr[pat_index]); 468 469 if (PTE_GET(pte, PT_VALID) == 0) 470 mdb_printf(" !VALID "); 471 472 mdb_printf("\n"); 473 return (DCMD_OK); 474 } 475 476 /* 477 * Print a PTE in more human friendly way. The PTE is assumed to be in 478 * a level 0 page table, unless -l specifies another level. 479 * 480 * The PTE value can be specified as the -p option, since on a 32 bit kernel 481 * with PAE running it's larger than a uintptr_t. 482 */ 483 /*ARGSUSED*/ 484 int 485 pte_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 486 { 487 int level = 0; 488 uint64_t pte = 0; 489 char *level_str = NULL; 490 char *pte_str = NULL; 491 492 init_mmu(); 493 494 if (mmu.num_level == 0) 495 return (DCMD_ERR); 496 497 if (mdb_getopts(argc, argv, 498 'p', MDB_OPT_STR, &pte_str, 499 'l', MDB_OPT_STR, &level_str) != argc) 500 return (DCMD_USAGE); 501 502 /* 503 * parse the PTE to decode, if it's 0, we don't do anything 504 */ 505 if (pte_str != NULL) { 506 pte = mdb_strtoull(pte_str); 507 } else { 508 if ((flags & DCMD_ADDRSPEC) == 0) 509 return (DCMD_USAGE); 510 pte = addr; 511 } 512 if (pte == 0) 513 return (DCMD_OK); 514 515 /* 516 * parse the level if supplied 517 */ 518 if (level_str != NULL) { 519 level = mdb_strtoull(level_str); 520 if (level < 0 || level > mmu.max_level) 521 return (DCMD_ERR); 522 } 523 524 return (do_pte_dcmd(level, pte)); 525 } 526 527 static size_t 528 va2entry(htable_t *htable, uintptr_t addr) 529 { 530 size_t entry = (addr - htable->ht_vaddr); 531 532 entry >>= mmu.level_shift[htable->ht_level]; 533 return (entry & HTABLE_NUM_PTES(htable) - 1); 534 } 535 536 static x86pte_t 537 get_pte(hat_t *hat, htable_t *htable, uintptr_t addr) 538 { 539 x86pte_t buf; 540 x86pte32_t *pte32 = (x86pte32_t *)&buf; 541 size_t len; 542 543 if (htable->ht_flags & HTABLE_VLP) { 544 uintptr_t ptr = (uintptr_t)hat->hat_vlp_ptes; 545 ptr += va2entry(htable, addr) << mmu.pte_size_shift; 546 len = mdb_vread(&buf, mmu.pte_size, ptr); 547 } else { 548 paddr_t paddr = mmu_ptob((paddr_t)htable->ht_pfn); 549 paddr += va2entry(htable, addr) << mmu.pte_size_shift; 550 len = mdb_pread(&buf, mmu.pte_size, paddr); 551 } 552 553 if (len != mmu.pte_size) 554 return (0); 555 556 if (mmu.pte_size == sizeof (x86pte_t)) 557 return (buf); 558 return (*pte32); 559 } 560 561 static int 562 do_va2pa(uintptr_t addr, struct as *asp, int print_level, physaddr_t *pap, 563 pfn_t *mfnp) 564 { 565 struct as as; 566 struct hat *hatp; 567 struct hat hat; 568 htable_t *ht; 569 htable_t htable; 570 uintptr_t base; 571 int h; 572 int level; 573 int found = 0; 574 x86pte_t pte; 575 physaddr_t paddr; 576 577 if (asp != NULL) { 578 if (mdb_vread(&as, sizeof (as), (uintptr_t)asp) == -1) { 579 mdb_warn("Couldn't read struct as\n"); 580 return (DCMD_ERR); 581 } 582 hatp = as.a_hat; 583 } else { 584 hatp = khat; 585 } 586 587 /* 588 * read the hat and its hash table 589 */ 590 if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) { 591 mdb_warn("Couldn't read struct hat\n"); 592 return (DCMD_ERR); 593 } 594 595 /* 596 * read the htable hashtable 597 */ 598 for (level = 0; level <= mmu.max_level; ++level) { 599 if (level == TOP_LEVEL(&hat)) 600 base = 0; 601 else 602 base = addr & mmu.level_mask[level + 1]; 603 604 for (h = 0; h < hat.hat_num_hash; ++h) { 605 if (mdb_vread(&ht, sizeof (htable_t *), 606 (uintptr_t)(hat.hat_ht_hash + h)) == -1) { 607 mdb_warn("Couldn't read htable\n"); 608 return (DCMD_ERR); 609 } 610 for (; ht != NULL; ht = htable.ht_next) { 611 if (mdb_vread(&htable, sizeof (htable_t), 612 (uintptr_t)ht) == -1) { 613 mdb_warn("Couldn't read htable\n"); 614 return (DCMD_ERR); 615 } 616 617 if (htable.ht_vaddr != base || 618 htable.ht_level != level) 619 continue; 620 621 pte = get_pte(&hat, &htable, addr); 622 623 if (print_level) { 624 mdb_printf("\tlevel=%d htable=%p " 625 "pte=%llr\n", level, ht, pte); 626 } 627 628 if (!PTE_ISVALID(pte)) { 629 mdb_printf("Address %p is unmapped.\n", 630 addr); 631 return (DCMD_ERR); 632 } 633 634 if (found) 635 continue; 636 637 if (PTE_IS_LGPG(pte, level)) 638 paddr = mdb_ma_to_pa(pte & 639 PT_PADDR_LGPG); 640 else 641 paddr = mdb_ma_to_pa(pte & PT_PADDR); 642 paddr += addr & mmu.level_offset[level]; 643 if (pap != NULL) 644 *pap = paddr; 645 if (mfnp != NULL) 646 *mfnp = pte2mfn(pte, level); 647 found = 1; 648 } 649 } 650 } 651 652 done: 653 if (!found) 654 return (DCMD_ERR); 655 return (DCMD_OK); 656 } 657 658 int 659 va2pfn_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 660 { 661 uintptr_t addrspace; 662 char *addrspace_str = NULL; 663 int piped = flags & DCMD_PIPE_OUT; 664 pfn_t pfn; 665 pfn_t mfn; 666 int rc; 667 668 init_mmu(); 669 670 if (mmu.num_level == 0) 671 return (DCMD_ERR); 672 673 if (mdb_getopts(argc, argv, 674 'a', MDB_OPT_STR, &addrspace_str) != argc) 675 return (DCMD_USAGE); 676 677 if ((flags & DCMD_ADDRSPEC) == 0) 678 return (DCMD_USAGE); 679 680 /* 681 * parse the address space 682 */ 683 if (addrspace_str != NULL) 684 addrspace = mdb_strtoull(addrspace_str); 685 else 686 addrspace = 0; 687 688 rc = do_va2pa(addr, (struct as *)addrspace, !piped, NULL, &mfn); 689 690 if (rc != DCMD_OK) 691 return (rc); 692 693 if ((pfn = mdb_mfn_to_pfn(mfn)) == -(pfn_t)1) { 694 mdb_warn("Invalid mfn %lr\n", mfn); 695 return (DCMD_ERR); 696 } 697 698 if (piped) { 699 mdb_printf("0x%lr\n", pfn); 700 return (DCMD_OK); 701 } 702 703 mdb_printf("Virtual address 0x%p maps pfn 0x%lr", addr, pfn); 704 705 if (is_xpv) 706 mdb_printf(" (mfn 0x%lr)", mfn); 707 708 mdb_printf("\n"); 709 710 return (DCMD_OK); 711 } 712 713 /* 714 * Report all hat's that either use PFN as a page table or that map the page. 715 */ 716 static int 717 do_report_maps(pfn_t pfn) 718 { 719 struct hat *hatp; 720 struct hat hat; 721 htable_t *ht; 722 htable_t htable; 723 uintptr_t base; 724 int h; 725 int level; 726 int entry; 727 x86pte_t pte; 728 x86pte_t buf; 729 x86pte32_t *pte32 = (x86pte32_t *)&buf; 730 physaddr_t paddr; 731 size_t len; 732 733 /* 734 * The hats are kept in a list with khat at the head. 735 */ 736 for (hatp = khat; hatp != NULL; hatp = hat.hat_next) { 737 /* 738 * read the hat and its hash table 739 */ 740 if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) { 741 mdb_warn("Couldn't read struct hat\n"); 742 return (DCMD_ERR); 743 } 744 745 /* 746 * read the htable hashtable 747 */ 748 paddr = 0; 749 for (h = 0; h < hat.hat_num_hash; ++h) { 750 if (mdb_vread(&ht, sizeof (htable_t *), 751 (uintptr_t)(hat.hat_ht_hash + h)) == -1) { 752 mdb_warn("Couldn't read htable\n"); 753 return (DCMD_ERR); 754 } 755 for (; ht != NULL; ht = htable.ht_next) { 756 if (mdb_vread(&htable, sizeof (htable_t), 757 (uintptr_t)ht) == -1) { 758 mdb_warn("Couldn't read htable\n"); 759 return (DCMD_ERR); 760 } 761 762 /* 763 * only report kernel addresses once 764 */ 765 if (hatp != khat && 766 htable.ht_vaddr >= kernelbase) 767 continue; 768 769 /* 770 * Is the PFN a pagetable itself? 771 */ 772 if (htable.ht_pfn == pfn) { 773 mdb_printf("Pagetable for " 774 "hat=%p htable=%p\n", hatp, ht); 775 continue; 776 } 777 778 /* 779 * otherwise, examine page mappings 780 */ 781 level = htable.ht_level; 782 if (level > mmu.max_page_level) 783 continue; 784 paddr = mmu_ptob((physaddr_t)htable.ht_pfn); 785 for (entry = 0; 786 entry < HTABLE_NUM_PTES(&htable); 787 ++entry) { 788 789 base = htable.ht_vaddr + entry * 790 mmu.level_size[level]; 791 792 /* 793 * only report kernel addresses once 794 */ 795 if (hatp != khat && 796 base >= kernelbase) 797 continue; 798 799 len = mdb_pread(&buf, mmu.pte_size, 800 paddr + entry * mmu.pte_size); 801 if (len != mmu.pte_size) 802 return (DCMD_ERR); 803 if (mmu.pte_size == sizeof (x86pte_t)) 804 pte = buf; 805 else 806 pte = *pte32; 807 808 if ((pte & PT_VALID) == 0) 809 continue; 810 if (level == 0 || !(pte & PT_PAGESIZE)) 811 pte &= PT_PADDR; 812 else 813 pte &= PT_PADDR_LGPG; 814 if (mmu_btop(mdb_ma_to_pa(pte)) != pfn) 815 continue; 816 mdb_printf("hat=%p maps addr=%p\n", 817 hatp, (caddr_t)base); 818 } 819 } 820 } 821 } 822 823 done: 824 return (DCMD_OK); 825 } 826 827 /* 828 * given a PFN as its address argument, prints out the uses of it 829 */ 830 /*ARGSUSED*/ 831 int 832 report_maps_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 833 { 834 pfn_t pfn; 835 uint_t mflag = 0; 836 837 init_mmu(); 838 839 if (mmu.num_level == 0) 840 return (DCMD_ERR); 841 842 if ((flags & DCMD_ADDRSPEC) == 0) 843 return (DCMD_USAGE); 844 845 if (mdb_getopts(argc, argv, 846 'm', MDB_OPT_SETBITS, TRUE, &mflag, NULL) != argc) 847 return (DCMD_USAGE); 848 849 pfn = (pfn_t)addr; 850 if (mflag) 851 pfn = mdb_mfn_to_pfn(pfn); 852 853 return (do_report_maps(pfn)); 854 } 855 856 static int 857 do_ptable_dcmd(pfn_t pfn) 858 { 859 struct hat *hatp; 860 struct hat hat; 861 htable_t *ht; 862 htable_t htable; 863 uintptr_t base; 864 int h; 865 int level; 866 int entry; 867 uintptr_t pagesize; 868 x86pte_t pte; 869 x86pte_t buf; 870 x86pte32_t *pte32 = (x86pte32_t *)&buf; 871 physaddr_t paddr; 872 size_t len; 873 874 /* 875 * The hats are kept in a list with khat at the head. 876 */ 877 for (hatp = khat; hatp != NULL; hatp = hat.hat_next) { 878 /* 879 * read the hat and its hash table 880 */ 881 if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) { 882 mdb_warn("Couldn't read struct hat\n"); 883 return (DCMD_ERR); 884 } 885 886 /* 887 * read the htable hashtable 888 */ 889 paddr = 0; 890 for (h = 0; h < hat.hat_num_hash; ++h) { 891 if (mdb_vread(&ht, sizeof (htable_t *), 892 (uintptr_t)(hat.hat_ht_hash + h)) == -1) { 893 mdb_warn("Couldn't read htable\n"); 894 return (DCMD_ERR); 895 } 896 for (; ht != NULL; ht = htable.ht_next) { 897 if (mdb_vread(&htable, sizeof (htable_t), 898 (uintptr_t)ht) == -1) { 899 mdb_warn("Couldn't read htable\n"); 900 return (DCMD_ERR); 901 } 902 903 /* 904 * Is this the PFN for this htable 905 */ 906 if (htable.ht_pfn == pfn) 907 goto found_it; 908 } 909 } 910 } 911 912 found_it: 913 if (htable.ht_pfn == pfn) { 914 mdb_printf("htable=%p\n", ht); 915 level = htable.ht_level; 916 base = htable.ht_vaddr; 917 pagesize = mmu.level_size[level]; 918 } else { 919 mdb_printf("Unknown pagetable - assuming level/addr 0"); 920 level = 0; /* assume level == 0 for PFN */ 921 base = 0; 922 pagesize = MMU_PAGESIZE; 923 } 924 925 paddr = mmu_ptob((physaddr_t)pfn); 926 for (entry = 0; entry < mmu.ptes_per_table; ++entry) { 927 len = mdb_pread(&buf, mmu.pte_size, 928 paddr + entry * mmu.pte_size); 929 if (len != mmu.pte_size) 930 return (DCMD_ERR); 931 if (mmu.pte_size == sizeof (x86pte_t)) 932 pte = buf; 933 else 934 pte = *pte32; 935 936 if (pte == 0) 937 continue; 938 939 mdb_printf("[%3d] va=%p ", entry, base + entry * pagesize); 940 do_pte_dcmd(level, pte); 941 } 942 943 done: 944 return (DCMD_OK); 945 } 946 947 /* 948 * Dump the page table at the given PFN 949 */ 950 /*ARGSUSED*/ 951 int 952 ptable_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 953 { 954 pfn_t pfn; 955 uint_t mflag = 0; 956 957 init_mmu(); 958 959 if (mmu.num_level == 0) 960 return (DCMD_ERR); 961 962 if ((flags & DCMD_ADDRSPEC) == 0) 963 return (DCMD_USAGE); 964 965 if (mdb_getopts(argc, argv, 966 'm', MDB_OPT_SETBITS, TRUE, &mflag, NULL) != argc) 967 return (DCMD_USAGE); 968 969 pfn = (pfn_t)addr; 970 if (mflag) 971 pfn = mdb_mfn_to_pfn(pfn); 972 973 return (do_ptable_dcmd(pfn)); 974 } 975 976 static int 977 do_htables_dcmd(hat_t *hatp) 978 { 979 struct hat hat; 980 htable_t *ht; 981 htable_t htable; 982 int h; 983 984 /* 985 * read the hat and its hash table 986 */ 987 if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) { 988 mdb_warn("Couldn't read struct hat\n"); 989 return (DCMD_ERR); 990 } 991 992 /* 993 * read the htable hashtable 994 */ 995 for (h = 0; h < hat.hat_num_hash; ++h) { 996 if (mdb_vread(&ht, sizeof (htable_t *), 997 (uintptr_t)(hat.hat_ht_hash + h)) == -1) { 998 mdb_warn("Couldn't read htable ptr\\n"); 999 return (DCMD_ERR); 1000 } 1001 for (; ht != NULL; ht = htable.ht_next) { 1002 mdb_printf("%p\n", ht); 1003 if (mdb_vread(&htable, sizeof (htable_t), 1004 (uintptr_t)ht) == -1) { 1005 mdb_warn("Couldn't read htable\n"); 1006 return (DCMD_ERR); 1007 } 1008 } 1009 } 1010 return (DCMD_OK); 1011 } 1012 1013 /* 1014 * Dump the htables for the given hat 1015 */ 1016 /*ARGSUSED*/ 1017 int 1018 htables_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 1019 { 1020 hat_t *hat; 1021 1022 init_mmu(); 1023 1024 if (mmu.num_level == 0) 1025 return (DCMD_ERR); 1026 1027 if ((flags & DCMD_ADDRSPEC) == 0) 1028 return (DCMD_USAGE); 1029 1030 hat = (hat_t *)addr; 1031 1032 return (do_htables_dcmd(hat)); 1033 } 1034