1 /*- 2 * Copyright (c) 2006 Peter Wemm 3 * Copyright (c) 2019 Leandro Lupori 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * From: FreeBSD: src/lib/libkvm/kvm_minidump_riscv.c 27 */ 28 29 #include <sys/cdefs.h> 30 #include <sys/param.h> 31 #include <vm/vm.h> 32 33 #include <kvm.h> 34 35 #include <limits.h> 36 #include <stdint.h> 37 #include <stdlib.h> 38 #include <string.h> 39 #include <unistd.h> 40 41 #include "../../sys/powerpc/include/minidump.h" 42 #include "kvm_private.h" 43 #include "kvm_powerpc64.h" 44 45 /* 46 * PowerPC64 HPT machine dependent routines for kvm and minidumps. 47 * 48 * Address Translation parameters: 49 * 50 * b = 12 (SLB base page size: 4 KB) 51 * b = 24 (SLB base page size: 16 MB) 52 * p = 12 (page size: 4 KB) 53 * p = 24 (page size: 16 MB) 54 * s = 28 (segment size: 256 MB) 55 */ 56 57 /* Large (huge) page params */ 58 #define LP_PAGE_SHIFT 24 59 #define LP_PAGE_SIZE (1ULL << LP_PAGE_SHIFT) 60 #define LP_PAGE_MASK 0x00ffffffULL 61 62 /* SLB */ 63 64 #define SEGMENT_LENGTH 0x10000000ULL 65 66 #define round_seg(x) roundup2((uint64_t)(x), SEGMENT_LENGTH) 67 68 /* Virtual real-mode VSID in LPARs */ 69 #define VSID_VRMA 0x1ffffffULL 70 71 #define SLBV_L 0x0000000000000100ULL /* Large page selector */ 72 #define SLBV_CLASS 0x0000000000000080ULL /* Class selector */ 73 #define SLBV_LP_MASK 0x0000000000000030ULL 74 #define SLBV_VSID_MASK 0x3ffffffffffff000ULL /* Virtual SegID mask */ 75 #define SLBV_VSID_SHIFT 12 76 77 #define SLBE_B_MASK 0x0000000006000000ULL 78 #define SLBE_B_256MB 0x0000000000000000ULL 79 #define SLBE_VALID 0x0000000008000000ULL /* SLB entry valid */ 80 #define SLBE_INDEX_MASK 0x0000000000000fffULL /* SLB index mask */ 81 #define SLBE_ESID_MASK 0xfffffffff0000000ULL /* Effective SegID mask */ 82 #define SLBE_ESID_SHIFT 28 83 84 /* PTE */ 85 86 #define LPTEH_VSID_SHIFT 12 87 #define LPTEH_AVPN_MASK 0xffffffffffffff80ULL 88 #define LPTEH_B_MASK 0xc000000000000000ULL 89 #define LPTEH_B_256MB 0x0000000000000000ULL 90 #define LPTEH_BIG 0x0000000000000004ULL /* 4KB/16MB page */ 91 #define LPTEH_HID 0x0000000000000002ULL 92 #define LPTEH_VALID 0x0000000000000001ULL 93 94 #define LPTEL_RPGN 0xfffffffffffff000ULL 95 #define LPTEL_LP_MASK 0x00000000000ff000ULL 96 #define LPTEL_NOEXEC 0x0000000000000004ULL 97 98 /* Supervisor (U: RW, S: RW) */ 99 #define LPTEL_BW 0x0000000000000002ULL 100 101 /* Both Read Only (U: RO, S: RO) */ 102 #define LPTEL_BR 0x0000000000000003ULL 103 104 #define LPTEL_RW LPTEL_BW 105 #define LPTEL_RO LPTEL_BR 106 107 /* 108 * PTE AVA field manipulation macros. 109 * 110 * AVA[0:54] = PTEH[2:56] 111 * AVA[VSID] = AVA[0:49] = PTEH[2:51] 112 * AVA[PAGE] = AVA[50:54] = PTEH[52:56] 113 */ 114 #define PTEH_AVA_VSID_MASK 0x3ffffffffffff000UL 115 #define PTEH_AVA_VSID_SHIFT 12 116 #define PTEH_AVA_VSID(p) \ 117 (((p) & PTEH_AVA_VSID_MASK) >> PTEH_AVA_VSID_SHIFT) 118 119 #define PTEH_AVA_PAGE_MASK 0x0000000000000f80UL 120 #define PTEH_AVA_PAGE_SHIFT 7 121 #define PTEH_AVA_PAGE(p) \ 122 (((p) & PTEH_AVA_PAGE_MASK) >> PTEH_AVA_PAGE_SHIFT) 123 124 /* Masks to obtain the Physical Address from PTE low 64-bit word. */ 125 #define PTEL_PA_MASK 0x0ffffffffffff000UL 126 #define PTEL_LP_PA_MASK 0x0fffffffff000000UL 127 128 #define PTE_HASH_MASK 0x0000007fffffffffUL 129 130 /* 131 * Number of AVA/VA page bits to shift right, in order to leave only the 132 * ones that should be considered. 133 * 134 * q = MIN(54, 77-b) (PowerISA v2.07B, 5.7.7.3) 135 * n = q + 1 - 50 (VSID size in bits) 136 * s(ava) = 5 - n 137 * s(va) = (28 - b) - n 138 * 139 * q: bit number of lower limit of VA/AVA bits to compare 140 * n: number of AVA/VA page bits to compare 141 * s: shift amount 142 * 28 - b: VA page size in bits 143 */ 144 #define AVA_PAGE_SHIFT(b) (5 - (MIN(54, 77-(b)) + 1 - 50)) 145 #define VA_PAGE_SHIFT(b) (28 - (b) - (MIN(54, 77-(b)) + 1 - 50)) 146 147 /* Kernel ESID -> VSID mapping */ 148 #define KERNEL_VSID_BIT 0x0000001000000000UL /* Bit set in all kernel VSIDs */ 149 #define KERNEL_VSID(esid) ((((((uint64_t)esid << 8) | ((uint64_t)esid >> 28)) \ 150 * 0x13bbUL) & (KERNEL_VSID_BIT - 1)) | \ 151 KERNEL_VSID_BIT) 152 153 /* Types */ 154 155 typedef uint64_t ppc64_physaddr_t; 156 157 typedef struct { 158 uint64_t slbv; 159 uint64_t slbe; 160 } ppc64_slb_entry_t; 161 162 typedef struct { 163 uint64_t pte_hi; 164 uint64_t pte_lo; 165 } ppc64_pt_entry_t; 166 167 struct hpt_data { 168 ppc64_slb_entry_t *slbs; 169 uint32_t slbsize; 170 }; 171 172 173 static void 174 slb_fill(ppc64_slb_entry_t *slb, uint64_t ea, uint64_t i) 175 { 176 uint64_t esid; 177 178 esid = ea >> SLBE_ESID_SHIFT; 179 slb->slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT; 180 slb->slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID | i; 181 } 182 183 static int 184 slb_init(kvm_t *kd) 185 { 186 struct minidumphdr *hdr; 187 struct hpt_data *data; 188 ppc64_slb_entry_t *slb; 189 uint32_t slbsize; 190 uint64_t ea, i, maxmem; 191 192 hdr = &kd->vmst->hdr; 193 data = PPC64_MMU_DATA(kd); 194 195 /* Alloc SLBs */ 196 maxmem = hdr->bitmapsize * 8 * PPC64_PAGE_SIZE; 197 slbsize = round_seg(hdr->kernend + 1 - hdr->kernbase + maxmem) / 198 SEGMENT_LENGTH * sizeof(ppc64_slb_entry_t); 199 data->slbs = _kvm_malloc(kd, slbsize); 200 if (data->slbs == NULL) { 201 _kvm_err(kd, kd->program, "cannot allocate slbs"); 202 return (-1); 203 } 204 data->slbsize = slbsize; 205 206 dprintf("%s: maxmem=0x%jx, segs=%jd, slbsize=0x%jx\n", 207 __func__, (uintmax_t)maxmem, 208 (uintmax_t)slbsize / sizeof(ppc64_slb_entry_t), (uintmax_t)slbsize); 209 210 /* 211 * Generate needed SLB entries. 212 * 213 * When translating addresses from EA to VA to PA, the needed SLB 214 * entry could be generated on the fly, but this is not the case 215 * for the walk_pages method, that needs to search the SLB entry 216 * by VSID, in order to find out the EA from a PTE. 217 */ 218 219 /* VM area */ 220 for (ea = hdr->kernbase, i = 0, slb = data->slbs; 221 ea < hdr->kernend; ea += SEGMENT_LENGTH, i++, slb++) 222 slb_fill(slb, ea, i); 223 224 /* DMAP area */ 225 for (ea = hdr->dmapbase; 226 ea < MIN(hdr->dmapend, hdr->dmapbase + maxmem); 227 ea += SEGMENT_LENGTH, i++, slb++) { 228 slb_fill(slb, ea, i); 229 if (hdr->hw_direct_map) 230 slb->slbv |= SLBV_L; 231 } 232 233 return (0); 234 } 235 236 static void 237 ppc64mmu_hpt_cleanup(kvm_t *kd) 238 { 239 struct hpt_data *data; 240 241 if (kd->vmst == NULL) 242 return; 243 244 data = PPC64_MMU_DATA(kd); 245 free(data->slbs); 246 free(data); 247 PPC64_MMU_DATA(kd) = NULL; 248 } 249 250 static int 251 ppc64mmu_hpt_init(kvm_t *kd) 252 { 253 struct hpt_data *data; 254 255 /* Alloc MMU data */ 256 data = _kvm_malloc(kd, sizeof(*data)); 257 if (data == NULL) { 258 _kvm_err(kd, kd->program, "cannot allocate MMU data"); 259 return (-1); 260 } 261 data->slbs = NULL; 262 PPC64_MMU_DATA(kd) = data; 263 264 if (slb_init(kd) == -1) 265 goto failed; 266 267 return (0); 268 269 failed: 270 ppc64mmu_hpt_cleanup(kd); 271 return (-1); 272 } 273 274 static ppc64_slb_entry_t * 275 slb_search(kvm_t *kd, kvaddr_t ea) 276 { 277 struct hpt_data *data; 278 ppc64_slb_entry_t *slb; 279 int i, n; 280 281 data = PPC64_MMU_DATA(kd); 282 slb = data->slbs; 283 n = data->slbsize / sizeof(ppc64_slb_entry_t); 284 285 /* SLB search */ 286 for (i = 0; i < n; i++, slb++) { 287 if ((slb->slbe & SLBE_VALID) == 0) 288 continue; 289 290 /* Compare 36-bit ESID of EA with segment one (64-s) */ 291 if ((slb->slbe & SLBE_ESID_MASK) != (ea & SLBE_ESID_MASK)) 292 continue; 293 294 /* Match found */ 295 dprintf("SEG#%02d: slbv=0x%016jx, slbe=0x%016jx\n", 296 i, (uintmax_t)slb->slbv, (uintmax_t)slb->slbe); 297 break; 298 } 299 300 /* SLB not found */ 301 if (i == n) { 302 _kvm_err(kd, kd->program, "%s: segment not found for EA 0x%jx", 303 __func__, (uintmax_t)ea); 304 return (NULL); 305 } 306 return (slb); 307 } 308 309 static ppc64_pt_entry_t 310 pte_get(kvm_t *kd, u_long ptex) 311 { 312 ppc64_pt_entry_t pte, *p; 313 314 p = _kvm_pmap_get(kd, ptex, sizeof(pte)); 315 pte.pte_hi = be64toh(p->pte_hi); 316 pte.pte_lo = be64toh(p->pte_lo); 317 return (pte); 318 } 319 320 static int 321 pte_search(kvm_t *kd, ppc64_slb_entry_t *slb, uint64_t hid, kvaddr_t ea, 322 ppc64_pt_entry_t *p) 323 { 324 uint64_t hash, hmask; 325 uint64_t pteg, ptex; 326 uint64_t va_vsid, va_page; 327 int b; 328 int ava_pg_shift, va_pg_shift; 329 ppc64_pt_entry_t pte; 330 331 /* 332 * Get VA: 333 * 334 * va(78) = va_vsid(50) || va_page(s-b) || offset(b) 335 * 336 * va_vsid: 50-bit VSID (78-s) 337 * va_page: (s-b)-bit VA page 338 */ 339 b = slb->slbv & SLBV_L? LP_PAGE_SHIFT : PPC64_PAGE_SHIFT; 340 va_vsid = (slb->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT; 341 va_page = (ea & ~SLBE_ESID_MASK) >> b; 342 343 dprintf("%s: hid=0x%jx, ea=0x%016jx, b=%d, va_vsid=0x%010jx, " 344 "va_page=0x%04jx\n", 345 __func__, (uintmax_t)hid, (uintmax_t)ea, b, 346 (uintmax_t)va_vsid, (uintmax_t)va_page); 347 348 /* 349 * Get hash: 350 * 351 * Primary hash: va_vsid(11:49) ^ va_page(s-b) 352 * Secondary hash: ~primary_hash 353 */ 354 hash = (va_vsid & PTE_HASH_MASK) ^ va_page; 355 if (hid) 356 hash = ~hash & PTE_HASH_MASK; 357 358 /* 359 * Get PTEG: 360 * 361 * pteg = (hash(0:38) & hmask) << 3 362 * 363 * hmask (hash mask): mask generated from HTABSIZE || 11*0b1 364 * hmask = number_of_ptegs - 1 365 */ 366 hmask = kd->vmst->hdr.pmapsize / (8 * sizeof(ppc64_pt_entry_t)) - 1; 367 pteg = (hash & hmask) << 3; 368 369 ava_pg_shift = AVA_PAGE_SHIFT(b); 370 va_pg_shift = VA_PAGE_SHIFT(b); 371 372 dprintf("%s: hash=0x%010jx, hmask=0x%010jx, (hash & hmask)=0x%010jx, " 373 "pteg=0x%011jx, ava_pg_shift=%d, va_pg_shift=%d\n", 374 __func__, (uintmax_t)hash, (uintmax_t)hmask, 375 (uintmax_t)(hash & hmask), (uintmax_t)pteg, 376 ava_pg_shift, va_pg_shift); 377 378 /* Search PTEG */ 379 for (ptex = pteg; ptex < pteg + 8; ptex++) { 380 pte = pte_get(kd, ptex); 381 382 /* Check H, V and B */ 383 if ((pte.pte_hi & LPTEH_HID) != hid || 384 (pte.pte_hi & LPTEH_VALID) == 0 || 385 (pte.pte_hi & LPTEH_B_MASK) != LPTEH_B_256MB) 386 continue; 387 388 /* Compare AVA with VA */ 389 if (PTEH_AVA_VSID(pte.pte_hi) != va_vsid || 390 (PTEH_AVA_PAGE(pte.pte_hi) >> ava_pg_shift) != 391 (va_page >> va_pg_shift)) 392 continue; 393 394 /* 395 * Check if PTE[L] matches SLBV[L]. 396 * 397 * Note: this check ignores PTE[LP], as does the kernel. 398 */ 399 if (b == PPC64_PAGE_SHIFT) { 400 if (pte.pte_hi & LPTEH_BIG) 401 continue; 402 } else if ((pte.pte_hi & LPTEH_BIG) == 0) 403 continue; 404 405 /* Match found */ 406 dprintf("%s: PTE found: ptex=0x%jx, pteh=0x%016jx, " 407 "ptel=0x%016jx\n", 408 __func__, (uintmax_t)ptex, (uintmax_t)pte.pte_hi, 409 (uintmax_t)pte.pte_lo); 410 break; 411 } 412 413 /* Not found? */ 414 if (ptex == pteg + 8) { 415 /* Try secondary hash */ 416 if (hid == 0) 417 return (pte_search(kd, slb, LPTEH_HID, ea, p)); 418 else { 419 _kvm_err(kd, kd->program, 420 "%s: pte not found", __func__); 421 return (-1); 422 } 423 } 424 425 /* PTE found */ 426 *p = pte; 427 return (0); 428 } 429 430 static int 431 pte_lookup(kvm_t *kd, kvaddr_t ea, ppc64_pt_entry_t *pte) 432 { 433 ppc64_slb_entry_t *slb; 434 435 /* First, find SLB */ 436 if ((slb = slb_search(kd, ea)) == NULL) 437 return (-1); 438 439 /* Next, find PTE */ 440 return (pte_search(kd, slb, 0, ea, pte)); 441 } 442 443 static int 444 ppc64mmu_hpt_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) 445 { 446 struct minidumphdr *hdr; 447 struct vmstate *vm; 448 ppc64_pt_entry_t pte; 449 ppc64_physaddr_t pgoff, pgpa; 450 off_t ptoff; 451 int err; 452 453 vm = kd->vmst; 454 hdr = &vm->hdr; 455 pgoff = va & PPC64_PAGE_MASK; 456 457 dprintf("%s: va=0x%016jx\n", __func__, (uintmax_t)va); 458 459 /* 460 * A common use case of libkvm is to first find a symbol address 461 * from the kernel image and then use kvatop to translate it and 462 * to be able to fetch its corresponding data. 463 * 464 * The problem is that, in PowerPC64 case, the addresses of relocated 465 * data won't match those in the kernel image. This is handled here by 466 * adding the relocation offset to those addresses. 467 */ 468 if (va < hdr->dmapbase) 469 va += hdr->startkernel - PPC64_KERNBASE; 470 471 /* Handle DMAP */ 472 if (va >= hdr->dmapbase && va <= hdr->dmapend) { 473 pgpa = (va & ~hdr->dmapbase) & ~PPC64_PAGE_MASK; 474 ptoff = _kvm_pt_find(kd, pgpa, PPC64_PAGE_SIZE); 475 if (ptoff == -1) { 476 _kvm_err(kd, kd->program, "%s: " 477 "direct map address 0x%jx not in minidump", 478 __func__, (uintmax_t)va); 479 goto invalid; 480 } 481 *pa = ptoff + pgoff; 482 return (PPC64_PAGE_SIZE - pgoff); 483 /* Translate VA to PA */ 484 } else if (va >= hdr->kernbase) { 485 if ((err = pte_lookup(kd, va, &pte)) == -1) { 486 _kvm_err(kd, kd->program, 487 "%s: pte not valid", __func__); 488 goto invalid; 489 } 490 491 if (pte.pte_hi & LPTEH_BIG) 492 pgpa = (pte.pte_lo & PTEL_LP_PA_MASK) | 493 (va & ~PPC64_PAGE_MASK & LP_PAGE_MASK); 494 else 495 pgpa = pte.pte_lo & PTEL_PA_MASK; 496 dprintf("%s: pgpa=0x%016jx\n", __func__, (uintmax_t)pgpa); 497 498 ptoff = _kvm_pt_find(kd, pgpa, PPC64_PAGE_SIZE); 499 if (ptoff == -1) { 500 _kvm_err(kd, kd->program, "%s: " 501 "physical address 0x%jx not in minidump", 502 __func__, (uintmax_t)pgpa); 503 goto invalid; 504 } 505 *pa = ptoff + pgoff; 506 return (PPC64_PAGE_SIZE - pgoff); 507 } else { 508 _kvm_err(kd, kd->program, 509 "%s: virtual address 0x%jx not minidumped", 510 __func__, (uintmax_t)va); 511 goto invalid; 512 } 513 514 invalid: 515 _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); 516 return (0); 517 } 518 519 static vm_prot_t 520 entry_to_prot(ppc64_pt_entry_t *pte) 521 { 522 vm_prot_t prot = VM_PROT_READ; 523 524 if (pte->pte_lo & LPTEL_RW) 525 prot |= VM_PROT_WRITE; 526 if ((pte->pte_lo & LPTEL_NOEXEC) != 0) 527 prot |= VM_PROT_EXECUTE; 528 return (prot); 529 } 530 531 static ppc64_slb_entry_t * 532 slb_vsid_search(kvm_t *kd, uint64_t vsid) 533 { 534 struct hpt_data *data; 535 ppc64_slb_entry_t *slb; 536 int i, n; 537 538 data = PPC64_MMU_DATA(kd); 539 slb = data->slbs; 540 n = data->slbsize / sizeof(ppc64_slb_entry_t); 541 vsid <<= SLBV_VSID_SHIFT; 542 543 /* SLB search */ 544 for (i = 0; i < n; i++, slb++) { 545 /* Check if valid and compare VSID */ 546 if ((slb->slbe & SLBE_VALID) && 547 (slb->slbv & SLBV_VSID_MASK) == vsid) 548 break; 549 } 550 551 /* SLB not found */ 552 if (i == n) { 553 _kvm_err(kd, kd->program, 554 "%s: segment not found for VSID 0x%jx", 555 __func__, (uintmax_t)vsid >> SLBV_VSID_SHIFT); 556 return (NULL); 557 } 558 return (slb); 559 } 560 561 static u_long 562 get_ea(kvm_t *kd, ppc64_pt_entry_t *pte, u_long ptex) 563 { 564 ppc64_slb_entry_t *slb; 565 uint64_t ea, hash, vsid; 566 int b, shift; 567 568 /* Find SLB */ 569 vsid = PTEH_AVA_VSID(pte->pte_hi); 570 if ((slb = slb_vsid_search(kd, vsid)) == NULL) 571 return (~0UL); 572 573 /* Get ESID part of EA */ 574 ea = slb->slbe & SLBE_ESID_MASK; 575 576 b = slb->slbv & SLBV_L? LP_PAGE_SHIFT : PPC64_PAGE_SHIFT; 577 578 /* 579 * If there are less than 64K PTEGs (16-bit), the upper bits of 580 * EA page must be obtained from PTEH's AVA. 581 */ 582 if (kd->vmst->hdr.pmapsize / (8 * sizeof(ppc64_pt_entry_t)) < 583 0x10000U) { 584 /* 585 * Add 0 to 5 EA bits, right after VSID. 586 * b == 12: 5 bits 587 * b == 24: 4 bits 588 */ 589 shift = AVA_PAGE_SHIFT(b); 590 ea |= (PTEH_AVA_PAGE(pte->pte_hi) >> shift) << 591 (SLBE_ESID_SHIFT - 5 + shift); 592 } 593 594 /* Get VA page from hash and add to EA. */ 595 hash = (ptex & ~7) >> 3; 596 if (pte->pte_hi & LPTEH_HID) 597 hash = ~hash & PTE_HASH_MASK; 598 ea |= ((hash ^ (vsid & PTE_HASH_MASK)) << b) & ~SLBE_ESID_MASK; 599 return (ea); 600 } 601 602 static int 603 ppc64mmu_hpt_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg) 604 { 605 struct vmstate *vm; 606 int ret; 607 unsigned int pagesz; 608 u_long dva, pa, va; 609 u_long ptex, nptes; 610 uint64_t vsid; 611 612 ret = 0; 613 vm = kd->vmst; 614 nptes = vm->hdr.pmapsize / sizeof(ppc64_pt_entry_t); 615 616 /* Walk through PTEs */ 617 for (ptex = 0; ptex < nptes; ptex++) { 618 ppc64_pt_entry_t pte = pte_get(kd, ptex); 619 if ((pte.pte_hi & LPTEH_VALID) == 0) 620 continue; 621 622 /* Skip non-kernel related pages, as well as VRMA ones */ 623 vsid = PTEH_AVA_VSID(pte.pte_hi); 624 if ((vsid & KERNEL_VSID_BIT) == 0 || 625 (vsid >> PPC64_PAGE_SHIFT) == VSID_VRMA) 626 continue; 627 628 /* Retrieve page's VA (EA on PPC64 terminology) */ 629 if ((va = get_ea(kd, &pte, ptex)) == ~0UL) 630 goto out; 631 632 /* Get PA and page size */ 633 if (pte.pte_hi & LPTEH_BIG) { 634 pa = pte.pte_lo & PTEL_LP_PA_MASK; 635 pagesz = LP_PAGE_SIZE; 636 } else { 637 pa = pte.pte_lo & PTEL_PA_MASK; 638 pagesz = PPC64_PAGE_SIZE; 639 } 640 641 /* Get DMAP address */ 642 dva = vm->hdr.dmapbase + pa; 643 644 if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, 645 entry_to_prot(&pte), pagesz, 0)) 646 goto out; 647 } 648 ret = 1; 649 650 out: 651 return (ret); 652 } 653 654 655 static struct ppc64_mmu_ops ops = { 656 .init = ppc64mmu_hpt_init, 657 .cleanup = ppc64mmu_hpt_cleanup, 658 .kvatop = ppc64mmu_hpt_kvatop, 659 .walk_pages = ppc64mmu_hpt_walk_pages, 660 }; 661 struct ppc64_mmu_ops *ppc64_mmu_ops_hpt = &ops; 662