xref: /freebsd/lib/libkvm/kvm_minidump_powerpc64_hpt.c (revision a134ebd6e63f658f2d3d04ac0c60d23bcaa86dd7)
1 /*-
2  * Copyright (c) 2006 Peter Wemm
3  * Copyright (c) 2019 Leandro Lupori
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * From: FreeBSD: src/lib/libkvm/kvm_minidump_riscv.c
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <vm/vm.h>
34 
35 #include <kvm.h>
36 
37 #include <limits.h>
38 #include <stdint.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <unistd.h>
42 
43 #include "../../sys/powerpc/include/minidump.h"
44 #include "kvm_private.h"
45 #include "kvm_powerpc64.h"
46 
47 /*
48  * PowerPC64 HPT machine dependent routines for kvm and minidumps.
49  *
50  * Address Translation parameters:
51  *
52  * b = 12 (SLB base page size: 4 KB)
53  * b = 24 (SLB base page size: 16 MB)
54  * p = 12 (page size: 4 KB)
55  * p = 24 (page size: 16 MB)
56  * s = 28 (segment size: 256 MB)
57  */
58 
59 /* Large (huge) page params */
60 #define	LP_PAGE_SHIFT		24
61 #define	LP_PAGE_SIZE		(1ULL << LP_PAGE_SHIFT)
62 #define	LP_PAGE_MASK		0x00ffffffULL
63 
64 /* SLB */
65 
66 #define	SEGMENT_LENGTH		0x10000000ULL
67 
68 #define	round_seg(x)		roundup2((uint64_t)(x), SEGMENT_LENGTH)
69 
70 /* Virtual real-mode VSID in LPARs */
71 #define	VSID_VRMA		0x1ffffffULL
72 
73 #define	SLBV_L			0x0000000000000100ULL /* Large page selector */
74 #define	SLBV_CLASS		0x0000000000000080ULL /* Class selector */
75 #define	SLBV_LP_MASK		0x0000000000000030ULL
76 #define	SLBV_VSID_MASK		0x3ffffffffffff000ULL /* Virtual SegID mask */
77 #define	SLBV_VSID_SHIFT		12
78 
79 #define	SLBE_B_MASK		0x0000000006000000ULL
80 #define	SLBE_B_256MB		0x0000000000000000ULL
81 #define	SLBE_VALID		0x0000000008000000ULL /* SLB entry valid */
82 #define	SLBE_INDEX_MASK		0x0000000000000fffULL /* SLB index mask */
83 #define	SLBE_ESID_MASK		0xfffffffff0000000ULL /* Effective SegID mask */
84 #define	SLBE_ESID_SHIFT		28
85 
86 /* PTE */
87 
88 #define	LPTEH_VSID_SHIFT	12
89 #define	LPTEH_AVPN_MASK		0xffffffffffffff80ULL
90 #define	LPTEH_B_MASK		0xc000000000000000ULL
91 #define	LPTEH_B_256MB		0x0000000000000000ULL
92 #define	LPTEH_BIG		0x0000000000000004ULL	/* 4KB/16MB page */
93 #define	LPTEH_HID		0x0000000000000002ULL
94 #define	LPTEH_VALID		0x0000000000000001ULL
95 
96 #define	LPTEL_RPGN		0xfffffffffffff000ULL
97 #define	LPTEL_LP_MASK		0x00000000000ff000ULL
98 #define	LPTEL_NOEXEC		0x0000000000000004ULL
99 
100 /* Supervisor        (U: RW, S: RW) */
101 #define	LPTEL_BW		0x0000000000000002ULL
102 
103 /* Both Read Only    (U: RO, S: RO) */
104 #define	LPTEL_BR		0x0000000000000003ULL
105 
106 #define	LPTEL_RW		LPTEL_BW
107 #define	LPTEL_RO		LPTEL_BR
108 
109 /*
110  * PTE AVA field manipulation macros.
111  *
112  * AVA[0:54] = PTEH[2:56]
113  * AVA[VSID] = AVA[0:49] = PTEH[2:51]
114  * AVA[PAGE] = AVA[50:54] = PTEH[52:56]
115  */
116 #define	PTEH_AVA_VSID_MASK	0x3ffffffffffff000UL
117 #define	PTEH_AVA_VSID_SHIFT	12
118 #define	PTEH_AVA_VSID(p) \
119 	(((p) & PTEH_AVA_VSID_MASK) >> PTEH_AVA_VSID_SHIFT)
120 
121 #define	PTEH_AVA_PAGE_MASK	0x0000000000000f80UL
122 #define	PTEH_AVA_PAGE_SHIFT	7
123 #define	PTEH_AVA_PAGE(p) \
124 	(((p) & PTEH_AVA_PAGE_MASK) >> PTEH_AVA_PAGE_SHIFT)
125 
126 /* Masks to obtain the Physical Address from PTE low 64-bit word. */
127 #define	PTEL_PA_MASK		0x0ffffffffffff000UL
128 #define	PTEL_LP_PA_MASK		0x0fffffffff000000UL
129 
130 #define	PTE_HASH_MASK		0x0000007fffffffffUL
131 
132 /*
133  * Number of AVA/VA page bits to shift right, in order to leave only the
134  * ones that should be considered.
135  *
136  * q = MIN(54, 77-b) (PowerISA v2.07B, 5.7.7.3)
137  * n = q + 1 - 50 (VSID size in bits)
138  * s(ava) = 5 - n
139  * s(va) = (28 - b) - n
140  *
141  * q: bit number of lower limit of VA/AVA bits to compare
142  * n: number of AVA/VA page bits to compare
143  * s: shift amount
144  * 28 - b: VA page size in bits
145  */
146 #define	AVA_PAGE_SHIFT(b)	(5 - (MIN(54, 77-(b)) + 1 - 50))
147 #define	VA_PAGE_SHIFT(b)	(28 - (b) - (MIN(54, 77-(b)) + 1 - 50))
148 
149 /* Kernel ESID -> VSID mapping */
150 #define	KERNEL_VSID_BIT	0x0000001000000000UL /* Bit set in all kernel VSIDs */
151 #define	KERNEL_VSID(esid) ((((((uint64_t)esid << 8) | ((uint64_t)esid >> 28)) \
152 				* 0x13bbUL) & (KERNEL_VSID_BIT - 1)) | \
153 				KERNEL_VSID_BIT)
154 
155 /* Types */
156 
157 typedef uint64_t	ppc64_physaddr_t;
158 
159 typedef struct {
160 	uint64_t slbv;
161 	uint64_t slbe;
162 } ppc64_slb_entry_t;
163 
164 typedef struct {
165 	uint64_t pte_hi;
166 	uint64_t pte_lo;
167 } ppc64_pt_entry_t;
168 
169 struct hpt_data {
170 	ppc64_slb_entry_t *slbs;
171 	uint32_t slbsize;
172 };
173 
174 
175 static void
176 slb_fill(ppc64_slb_entry_t *slb, uint64_t ea, uint64_t i)
177 {
178 	uint64_t esid;
179 
180 	esid = ea >> SLBE_ESID_SHIFT;
181 	slb->slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT;
182 	slb->slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID | i;
183 }
184 
185 static int
186 slb_init(kvm_t *kd)
187 {
188 	struct minidumphdr *hdr;
189 	struct hpt_data *data;
190 	ppc64_slb_entry_t *slb;
191 	uint32_t slbsize;
192 	uint64_t ea, i, maxmem;
193 
194 	hdr = &kd->vmst->hdr;
195 	data = PPC64_MMU_DATA(kd);
196 
197 	/* Alloc SLBs */
198 	maxmem = hdr->bitmapsize * 8 * PPC64_PAGE_SIZE;
199 	slbsize = round_seg(hdr->kernend + 1 - hdr->kernbase + maxmem) /
200 	    SEGMENT_LENGTH * sizeof(ppc64_slb_entry_t);
201 	data->slbs = _kvm_malloc(kd, slbsize);
202 	if (data->slbs == NULL) {
203 		_kvm_err(kd, kd->program, "cannot allocate slbs");
204 		return (-1);
205 	}
206 	data->slbsize = slbsize;
207 
208 	dprintf("%s: maxmem=0x%jx, segs=%jd, slbsize=0x%jx\n",
209 	    __func__, (uintmax_t)maxmem,
210 	    (uintmax_t)slbsize / sizeof(ppc64_slb_entry_t), (uintmax_t)slbsize);
211 
212 	/*
213 	 * Generate needed SLB entries.
214 	 *
215 	 * When translating addresses from EA to VA to PA, the needed SLB
216 	 * entry could be generated on the fly, but this is not the case
217 	 * for the walk_pages method, that needs to search the SLB entry
218 	 * by VSID, in order to find out the EA from a PTE.
219 	 */
220 
221 	/* VM area */
222 	for (ea = hdr->kernbase, i = 0, slb = data->slbs;
223 	    ea < hdr->kernend; ea += SEGMENT_LENGTH, i++, slb++)
224 		slb_fill(slb, ea, i);
225 
226 	/* DMAP area */
227 	for (ea = hdr->dmapbase;
228 	    ea < MIN(hdr->dmapend, hdr->dmapbase + maxmem);
229 	    ea += SEGMENT_LENGTH, i++, slb++) {
230 		slb_fill(slb, ea, i);
231 		if (hdr->hw_direct_map)
232 			slb->slbv |= SLBV_L;
233 	}
234 
235 	return (0);
236 }
237 
238 static void
239 ppc64mmu_hpt_cleanup(kvm_t *kd)
240 {
241 	struct hpt_data *data;
242 
243 	if (kd->vmst == NULL)
244 		return;
245 
246 	data = PPC64_MMU_DATA(kd);
247 	free(data->slbs);
248 	free(data);
249 	PPC64_MMU_DATA(kd) = NULL;
250 }
251 
252 static int
253 ppc64mmu_hpt_init(kvm_t *kd)
254 {
255 	struct hpt_data *data;
256 	struct minidumphdr *hdr;
257 
258 	hdr = &kd->vmst->hdr;
259 
260 	/* Alloc MMU data */
261 	data = _kvm_malloc(kd, sizeof(*data));
262 	if (data == NULL) {
263 		_kvm_err(kd, kd->program, "cannot allocate MMU data");
264 		return (-1);
265 	}
266 	data->slbs = NULL;
267 	PPC64_MMU_DATA(kd) = data;
268 
269 	if (slb_init(kd) == -1)
270 		goto failed;
271 
272 	return (0);
273 
274 failed:
275 	ppc64mmu_hpt_cleanup(kd);
276 	return (-1);
277 }
278 
279 static ppc64_slb_entry_t *
280 slb_search(kvm_t *kd, kvaddr_t ea)
281 {
282 	struct hpt_data *data;
283 	ppc64_slb_entry_t *slb;
284 	int i, n;
285 
286 	data = PPC64_MMU_DATA(kd);
287 	slb = data->slbs;
288 	n = data->slbsize / sizeof(ppc64_slb_entry_t);
289 
290 	/* SLB search */
291 	for (i = 0; i < n; i++, slb++) {
292 		if ((slb->slbe & SLBE_VALID) == 0)
293 			continue;
294 
295 		/* Compare 36-bit ESID of EA with segment one (64-s) */
296 		if ((slb->slbe & SLBE_ESID_MASK) != (ea & SLBE_ESID_MASK))
297 			continue;
298 
299 		/* Match found */
300 		dprintf("SEG#%02d: slbv=0x%016jx, slbe=0x%016jx\n",
301 		    i, (uintmax_t)slb->slbv, (uintmax_t)slb->slbe);
302 		break;
303 	}
304 
305 	/* SLB not found */
306 	if (i == n) {
307 		_kvm_err(kd, kd->program, "%s: segment not found for EA 0x%jx",
308 		    __func__, (uintmax_t)ea);
309 		return (NULL);
310 	}
311 	return (slb);
312 }
313 
314 static ppc64_pt_entry_t
315 pte_get(kvm_t *kd, u_long ptex)
316 {
317 	ppc64_pt_entry_t pte, *p;
318 
319 	p = _kvm_pmap_get(kd, ptex, sizeof(pte));
320 	pte.pte_hi = be64toh(p->pte_hi);
321 	pte.pte_lo = be64toh(p->pte_lo);
322 	return (pte);
323 }
324 
325 static int
326 pte_search(kvm_t *kd, ppc64_slb_entry_t *slb, uint64_t hid, kvaddr_t ea,
327     ppc64_pt_entry_t *p)
328 {
329 	uint64_t hash, hmask;
330 	uint64_t pteg, ptex;
331 	uint64_t va_vsid, va_page;
332 	int b;
333 	int ava_pg_shift, va_pg_shift;
334 	ppc64_pt_entry_t pte;
335 
336 	/*
337 	 * Get VA:
338 	 *
339 	 * va(78) = va_vsid(50) || va_page(s-b) || offset(b)
340 	 *
341 	 * va_vsid: 50-bit VSID (78-s)
342 	 * va_page: (s-b)-bit VA page
343 	 */
344 	b = slb->slbv & SLBV_L? LP_PAGE_SHIFT : PPC64_PAGE_SHIFT;
345 	va_vsid = (slb->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT;
346 	va_page = (ea & ~SLBE_ESID_MASK) >> b;
347 
348 	dprintf("%s: hid=0x%jx, ea=0x%016jx, b=%d, va_vsid=0x%010jx, "
349 	    "va_page=0x%04jx\n",
350 	    __func__, (uintmax_t)hid, (uintmax_t)ea, b,
351 	    (uintmax_t)va_vsid, (uintmax_t)va_page);
352 
353 	/*
354 	 * Get hash:
355 	 *
356 	 * Primary hash: va_vsid(11:49) ^ va_page(s-b)
357 	 * Secondary hash: ~primary_hash
358 	 */
359 	hash = (va_vsid & PTE_HASH_MASK) ^ va_page;
360 	if (hid)
361 		hash = ~hash & PTE_HASH_MASK;
362 
363 	/*
364 	 * Get PTEG:
365 	 *
366 	 * pteg = (hash(0:38) & hmask) << 3
367 	 *
368 	 * hmask (hash mask): mask generated from HTABSIZE || 11*0b1
369 	 * hmask = number_of_ptegs - 1
370 	 */
371 	hmask = kd->vmst->hdr.pmapsize / (8 * sizeof(ppc64_pt_entry_t)) - 1;
372 	pteg = (hash & hmask) << 3;
373 
374 	ava_pg_shift = AVA_PAGE_SHIFT(b);
375 	va_pg_shift = VA_PAGE_SHIFT(b);
376 
377 	dprintf("%s: hash=0x%010jx, hmask=0x%010jx, (hash & hmask)=0x%010jx, "
378 	    "pteg=0x%011jx, ava_pg_shift=%d, va_pg_shift=%d\n",
379 	    __func__, (uintmax_t)hash, (uintmax_t)hmask,
380 	    (uintmax_t)(hash & hmask), (uintmax_t)pteg,
381 	    ava_pg_shift, va_pg_shift);
382 
383 	/* Search PTEG */
384 	for (ptex = pteg; ptex < pteg + 8; ptex++) {
385 		pte = pte_get(kd, ptex);
386 
387 		/* Check H, V and B */
388 		if ((pte.pte_hi & LPTEH_HID) != hid ||
389 		    (pte.pte_hi & LPTEH_VALID) == 0 ||
390 		    (pte.pte_hi & LPTEH_B_MASK) != LPTEH_B_256MB)
391 			continue;
392 
393 		/* Compare AVA with VA */
394 		if (PTEH_AVA_VSID(pte.pte_hi) != va_vsid ||
395 		    (PTEH_AVA_PAGE(pte.pte_hi) >> ava_pg_shift) !=
396 		    (va_page >> va_pg_shift))
397 			continue;
398 
399 		/*
400 		 * Check if PTE[L] matches SLBV[L].
401 		 *
402 		 * Note: this check ignores PTE[LP], as does the kernel.
403 		 */
404 		if (b == PPC64_PAGE_SHIFT) {
405 			if (pte.pte_hi & LPTEH_BIG)
406 				continue;
407 		} else if ((pte.pte_hi & LPTEH_BIG) == 0)
408 			continue;
409 
410 		/* Match found */
411 		dprintf("%s: PTE found: ptex=0x%jx, pteh=0x%016jx, "
412 		    "ptel=0x%016jx\n",
413 		    __func__, (uintmax_t)ptex, (uintmax_t)pte.pte_hi,
414 		    (uintmax_t)pte.pte_lo);
415 		break;
416 	}
417 
418 	/* Not found? */
419 	if (ptex == pteg + 8) {
420 		/* Try secondary hash */
421 		if (hid == 0)
422 			return (pte_search(kd, slb, LPTEH_HID, ea, p));
423 		else {
424 			_kvm_err(kd, kd->program,
425 			    "%s: pte not found", __func__);
426 			return (-1);
427 		}
428 	}
429 
430 	/* PTE found */
431 	*p = pte;
432 	return (0);
433 }
434 
435 static int
436 pte_lookup(kvm_t *kd, kvaddr_t ea, ppc64_pt_entry_t *pte)
437 {
438 	ppc64_slb_entry_t *slb;
439 
440 	/* First, find SLB */
441 	if ((slb = slb_search(kd, ea)) == NULL)
442 		return (-1);
443 
444 	/* Next, find PTE */
445 	return (pte_search(kd, slb, 0, ea, pte));
446 }
447 
448 static int
449 ppc64mmu_hpt_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
450 {
451 	struct minidumphdr *hdr;
452 	struct vmstate *vm;
453 	ppc64_pt_entry_t pte;
454 	ppc64_physaddr_t pgoff, pgpa;
455 	off_t ptoff;
456 	int err;
457 
458 	vm = kd->vmst;
459 	hdr = &vm->hdr;
460 	pgoff = va & PPC64_PAGE_MASK;
461 
462 	dprintf("%s: va=0x%016jx\n", __func__, (uintmax_t)va);
463 
464 	/*
465 	 * A common use case of libkvm is to first find a symbol address
466 	 * from the kernel image and then use kvatop to translate it and
467 	 * to be able to fetch its corresponding data.
468 	 *
469 	 * The problem is that, in PowerPC64 case, the addresses of relocated
470 	 * data won't match those in the kernel image. This is handled here by
471 	 * adding the relocation offset to those addresses.
472 	 */
473 	if (va < hdr->dmapbase)
474 		va += hdr->startkernel - PPC64_KERNBASE;
475 
476 	/* Handle DMAP */
477 	if (va >= hdr->dmapbase && va <= hdr->dmapend) {
478 		pgpa = (va & ~hdr->dmapbase) & ~PPC64_PAGE_MASK;
479 		ptoff = _kvm_pt_find(kd, pgpa, PPC64_PAGE_SIZE);
480 		if (ptoff == -1) {
481 			_kvm_err(kd, kd->program, "%s: "
482 			    "direct map address 0x%jx not in minidump",
483 			    __func__, (uintmax_t)va);
484 			goto invalid;
485 		}
486 		*pa = ptoff + pgoff;
487 		return (PPC64_PAGE_SIZE - pgoff);
488 	/* Translate VA to PA */
489 	} else if (va >= hdr->kernbase) {
490 		if ((err = pte_lookup(kd, va, &pte)) == -1) {
491 			_kvm_err(kd, kd->program,
492 			    "%s: pte not valid", __func__);
493 			goto invalid;
494 		}
495 
496 		if (pte.pte_hi & LPTEH_BIG)
497 			pgpa = (pte.pte_lo & PTEL_LP_PA_MASK) |
498 			    (va & ~PPC64_PAGE_MASK & LP_PAGE_MASK);
499 		else
500 			pgpa = pte.pte_lo & PTEL_PA_MASK;
501 		dprintf("%s: pgpa=0x%016jx\n", __func__, (uintmax_t)pgpa);
502 
503 		ptoff = _kvm_pt_find(kd, pgpa, PPC64_PAGE_SIZE);
504 		if (ptoff == -1) {
505 			_kvm_err(kd, kd->program, "%s: "
506 			    "physical address 0x%jx not in minidump",
507 			    __func__, (uintmax_t)pgpa);
508 			goto invalid;
509 		}
510 		*pa = ptoff + pgoff;
511 		return (PPC64_PAGE_SIZE - pgoff);
512 	} else {
513 		_kvm_err(kd, kd->program,
514 		    "%s: virtual address 0x%jx not minidumped",
515 		    __func__, (uintmax_t)va);
516 		goto invalid;
517 	}
518 
519 invalid:
520 	_kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
521 	return (0);
522 }
523 
524 static vm_prot_t
525 entry_to_prot(ppc64_pt_entry_t *pte)
526 {
527 	vm_prot_t prot = VM_PROT_READ;
528 
529 	if (pte->pte_lo & LPTEL_RW)
530 		prot |= VM_PROT_WRITE;
531 	if ((pte->pte_lo & LPTEL_NOEXEC) != 0)
532 		prot |= VM_PROT_EXECUTE;
533 	return (prot);
534 }
535 
536 static ppc64_slb_entry_t *
537 slb_vsid_search(kvm_t *kd, uint64_t vsid)
538 {
539 	struct hpt_data *data;
540 	ppc64_slb_entry_t *slb;
541 	int i, n;
542 
543 	data = PPC64_MMU_DATA(kd);
544 	slb = data->slbs;
545 	n = data->slbsize / sizeof(ppc64_slb_entry_t);
546 	vsid <<= SLBV_VSID_SHIFT;
547 
548 	/* SLB search */
549 	for (i = 0; i < n; i++, slb++) {
550 		/* Check if valid and compare VSID */
551 		if ((slb->slbe & SLBE_VALID) &&
552 		    (slb->slbv & SLBV_VSID_MASK) == vsid)
553 			break;
554 	}
555 
556 	/* SLB not found */
557 	if (i == n) {
558 		_kvm_err(kd, kd->program,
559 		    "%s: segment not found for VSID 0x%jx",
560 		    __func__, (uintmax_t)vsid >> SLBV_VSID_SHIFT);
561 		return (NULL);
562 	}
563 	return (slb);
564 }
565 
566 static u_long
567 get_ea(kvm_t *kd, ppc64_pt_entry_t *pte, u_long ptex)
568 {
569 	ppc64_slb_entry_t *slb;
570 	uint64_t ea, hash, vsid;
571 	int b, shift;
572 
573 	/* Find SLB */
574 	vsid = PTEH_AVA_VSID(pte->pte_hi);
575 	if ((slb = slb_vsid_search(kd, vsid)) == NULL)
576 		return (~0UL);
577 
578 	/* Get ESID part of EA */
579 	ea = slb->slbe & SLBE_ESID_MASK;
580 
581 	b = slb->slbv & SLBV_L? LP_PAGE_SHIFT : PPC64_PAGE_SHIFT;
582 
583 	/*
584 	 * If there are less than 64K PTEGs (16-bit), the upper bits of
585 	 * EA page must be obtained from PTEH's AVA.
586 	 */
587 	if (kd->vmst->hdr.pmapsize / (8 * sizeof(ppc64_pt_entry_t)) <
588 	    0x10000U) {
589 		/*
590 		 * Add 0 to 5 EA bits, right after VSID.
591 		 * b == 12: 5 bits
592 		 * b == 24: 4 bits
593 		 */
594 		shift = AVA_PAGE_SHIFT(b);
595 		ea |= (PTEH_AVA_PAGE(pte->pte_hi) >> shift) <<
596 		    (SLBE_ESID_SHIFT - 5 + shift);
597 	}
598 
599 	/* Get VA page from hash and add to EA. */
600 	hash = (ptex & ~7) >> 3;
601 	if (pte->pte_hi & LPTEH_HID)
602 		hash = ~hash & PTE_HASH_MASK;
603 	ea |= ((hash ^ (vsid & PTE_HASH_MASK)) << b) & ~SLBE_ESID_MASK;
604 	return (ea);
605 }
606 
607 static int
608 ppc64mmu_hpt_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
609 {
610 	struct vmstate *vm;
611 	int ret;
612 	unsigned int pagesz;
613 	u_long dva, pa, va;
614 	u_long ptex, nptes;
615 	uint64_t vsid;
616 
617 	ret = 0;
618 	vm = kd->vmst;
619 	nptes = vm->hdr.pmapsize / sizeof(ppc64_pt_entry_t);
620 
621 	/* Walk through PTEs */
622 	for (ptex = 0; ptex < nptes; ptex++) {
623 		ppc64_pt_entry_t pte = pte_get(kd, ptex);
624 		if ((pte.pte_hi & LPTEH_VALID) == 0)
625 			continue;
626 
627 		/* Skip non-kernel related pages, as well as VRMA ones */
628 		vsid = PTEH_AVA_VSID(pte.pte_hi);
629 		if ((vsid & KERNEL_VSID_BIT) == 0 ||
630 		    (vsid >> PPC64_PAGE_SHIFT) == VSID_VRMA)
631 			continue;
632 
633 		/* Retrieve page's VA (EA on PPC64 terminology) */
634 		if ((va = get_ea(kd, &pte, ptex)) == ~0UL)
635 			goto out;
636 
637 		/* Get PA and page size */
638 		if (pte.pte_hi & LPTEH_BIG) {
639 			pa = pte.pte_lo & PTEL_LP_PA_MASK;
640 			pagesz = LP_PAGE_SIZE;
641 		} else {
642 			pa = pte.pte_lo & PTEL_PA_MASK;
643 			pagesz = PPC64_PAGE_SIZE;
644 		}
645 
646 		/* Get DMAP address */
647 		dva = vm->hdr.dmapbase + pa;
648 
649 		if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
650 		    entry_to_prot(&pte), pagesz, 0))
651 			goto out;
652 	}
653 	ret = 1;
654 
655 out:
656 	return (ret);
657 }
658 
659 
660 static struct ppc64_mmu_ops ops = {
661 	.init		= ppc64mmu_hpt_init,
662 	.cleanup	= ppc64mmu_hpt_cleanup,
663 	.kvatop		= ppc64mmu_hpt_kvatop,
664 	.walk_pages	= ppc64mmu_hpt_walk_pages,
665 };
666 struct ppc64_mmu_ops *ppc64_mmu_ops_hpt = &ops;
667