xref: /freebsd/lib/libkvm/kvm_minidump_powerpc64_hpt.c (revision d3c34fc0f4733a783b8f4fc518b9cfe109b603b4)
1*d3c34fc0SLeandro Lupori /*-
2*d3c34fc0SLeandro Lupori  * Copyright (c) 2006 Peter Wemm
3*d3c34fc0SLeandro Lupori  * Copyright (c) 2019 Leandro Lupori
4*d3c34fc0SLeandro Lupori  *
5*d3c34fc0SLeandro Lupori  * Redistribution and use in source and binary forms, with or without
6*d3c34fc0SLeandro Lupori  * modification, are permitted provided that the following conditions
7*d3c34fc0SLeandro Lupori  * are met:
8*d3c34fc0SLeandro Lupori  * 1. Redistributions of source code must retain the above copyright
9*d3c34fc0SLeandro Lupori  *    notice, this list of conditions and the following disclaimer.
10*d3c34fc0SLeandro Lupori  * 2. Redistributions in binary form must reproduce the above copyright
11*d3c34fc0SLeandro Lupori  *    notice, this list of conditions and the following disclaimer in the
12*d3c34fc0SLeandro Lupori  *    documentation and/or other materials provided with the distribution.
13*d3c34fc0SLeandro Lupori  *
14*d3c34fc0SLeandro Lupori  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15*d3c34fc0SLeandro Lupori  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16*d3c34fc0SLeandro Lupori  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17*d3c34fc0SLeandro Lupori  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18*d3c34fc0SLeandro Lupori  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19*d3c34fc0SLeandro Lupori  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20*d3c34fc0SLeandro Lupori  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21*d3c34fc0SLeandro Lupori  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22*d3c34fc0SLeandro Lupori  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23*d3c34fc0SLeandro Lupori  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24*d3c34fc0SLeandro Lupori  * SUCH DAMAGE.
25*d3c34fc0SLeandro Lupori  *
26*d3c34fc0SLeandro Lupori  * From: FreeBSD: src/lib/libkvm/kvm_minidump_riscv.c
27*d3c34fc0SLeandro Lupori  */
28*d3c34fc0SLeandro Lupori 
29*d3c34fc0SLeandro Lupori #include <sys/cdefs.h>
30*d3c34fc0SLeandro Lupori __FBSDID("$FreeBSD$");
31*d3c34fc0SLeandro Lupori 
32*d3c34fc0SLeandro Lupori #include <sys/param.h>
33*d3c34fc0SLeandro Lupori #include <vm/vm.h>
34*d3c34fc0SLeandro Lupori 
35*d3c34fc0SLeandro Lupori #include <kvm.h>
36*d3c34fc0SLeandro Lupori 
37*d3c34fc0SLeandro Lupori #include <limits.h>
38*d3c34fc0SLeandro Lupori #include <stdint.h>
39*d3c34fc0SLeandro Lupori #include <stdlib.h>
40*d3c34fc0SLeandro Lupori #include <string.h>
41*d3c34fc0SLeandro Lupori #include <unistd.h>
42*d3c34fc0SLeandro Lupori 
43*d3c34fc0SLeandro Lupori #include "../../sys/powerpc/include/minidump.h"
44*d3c34fc0SLeandro Lupori #include "kvm_private.h"
45*d3c34fc0SLeandro Lupori #include "kvm_powerpc64.h"
46*d3c34fc0SLeandro Lupori 
47*d3c34fc0SLeandro Lupori /*
48*d3c34fc0SLeandro Lupori  * PowerPC64 HPT machine dependent routines for kvm and minidumps.
49*d3c34fc0SLeandro Lupori  *
50*d3c34fc0SLeandro Lupori  * Address Translation parameters:
51*d3c34fc0SLeandro Lupori  *
52*d3c34fc0SLeandro Lupori  * b = 12 (SLB base page size: 4 KB)
53*d3c34fc0SLeandro Lupori  * b = 24 (SLB base page size: 16 MB)
54*d3c34fc0SLeandro Lupori  * p = 12 (page size: 4 KB)
55*d3c34fc0SLeandro Lupori  * p = 24 (page size: 16 MB)
56*d3c34fc0SLeandro Lupori  * s = 28 (segment size: 256 MB)
57*d3c34fc0SLeandro Lupori  */
58*d3c34fc0SLeandro Lupori 
59*d3c34fc0SLeandro Lupori /* Large (huge) page params */
60*d3c34fc0SLeandro Lupori #define	LP_PAGE_SHIFT		24
61*d3c34fc0SLeandro Lupori #define	LP_PAGE_SIZE		(1ULL << LP_PAGE_SHIFT)
62*d3c34fc0SLeandro Lupori #define	LP_PAGE_MASK		0x00ffffffULL
63*d3c34fc0SLeandro Lupori 
64*d3c34fc0SLeandro Lupori /* SLB */
65*d3c34fc0SLeandro Lupori 
66*d3c34fc0SLeandro Lupori #define	SEGMENT_LENGTH		0x10000000ULL
67*d3c34fc0SLeandro Lupori 
68*d3c34fc0SLeandro Lupori #define	round_seg(x)		roundup2((uint64_t)(x), SEGMENT_LENGTH)
69*d3c34fc0SLeandro Lupori 
70*d3c34fc0SLeandro Lupori /* Virtual real-mode VSID in LPARs */
71*d3c34fc0SLeandro Lupori #define	VSID_VRMA		0x1ffffffULL
72*d3c34fc0SLeandro Lupori 
73*d3c34fc0SLeandro Lupori #define	SLBV_L			0x0000000000000100ULL /* Large page selector */
74*d3c34fc0SLeandro Lupori #define	SLBV_CLASS		0x0000000000000080ULL /* Class selector */
75*d3c34fc0SLeandro Lupori #define	SLBV_LP_MASK		0x0000000000000030ULL
76*d3c34fc0SLeandro Lupori #define	SLBV_VSID_MASK		0x3ffffffffffff000ULL /* Virtual SegID mask */
77*d3c34fc0SLeandro Lupori #define	SLBV_VSID_SHIFT		12
78*d3c34fc0SLeandro Lupori 
79*d3c34fc0SLeandro Lupori #define	SLBE_B_MASK		0x0000000006000000ULL
80*d3c34fc0SLeandro Lupori #define	SLBE_B_256MB		0x0000000000000000ULL
81*d3c34fc0SLeandro Lupori #define	SLBE_VALID		0x0000000008000000ULL /* SLB entry valid */
82*d3c34fc0SLeandro Lupori #define	SLBE_INDEX_MASK		0x0000000000000fffULL /* SLB index mask */
83*d3c34fc0SLeandro Lupori #define	SLBE_ESID_MASK		0xfffffffff0000000ULL /* Effective SegID mask */
84*d3c34fc0SLeandro Lupori #define	SLBE_ESID_SHIFT		28
85*d3c34fc0SLeandro Lupori 
86*d3c34fc0SLeandro Lupori /* PTE */
87*d3c34fc0SLeandro Lupori 
88*d3c34fc0SLeandro Lupori #define	LPTEH_VSID_SHIFT	12
89*d3c34fc0SLeandro Lupori #define	LPTEH_AVPN_MASK		0xffffffffffffff80ULL
90*d3c34fc0SLeandro Lupori #define	LPTEH_B_MASK		0xc000000000000000ULL
91*d3c34fc0SLeandro Lupori #define	LPTEH_B_256MB		0x0000000000000000ULL
92*d3c34fc0SLeandro Lupori #define	LPTEH_BIG		0x0000000000000004ULL	/* 4KB/16MB page */
93*d3c34fc0SLeandro Lupori #define	LPTEH_HID		0x0000000000000002ULL
94*d3c34fc0SLeandro Lupori #define	LPTEH_VALID		0x0000000000000001ULL
95*d3c34fc0SLeandro Lupori 
96*d3c34fc0SLeandro Lupori #define	LPTEL_RPGN		0xfffffffffffff000ULL
97*d3c34fc0SLeandro Lupori #define	LPTEL_LP_MASK		0x00000000000ff000ULL
98*d3c34fc0SLeandro Lupori #define	LPTEL_NOEXEC		0x0000000000000004ULL
99*d3c34fc0SLeandro Lupori 
100*d3c34fc0SLeandro Lupori /* Supervisor        (U: RW, S: RW) */
101*d3c34fc0SLeandro Lupori #define	LPTEL_BW		0x0000000000000002ULL
102*d3c34fc0SLeandro Lupori 
103*d3c34fc0SLeandro Lupori /* Both Read Only    (U: RO, S: RO) */
104*d3c34fc0SLeandro Lupori #define	LPTEL_BR		0x0000000000000003ULL
105*d3c34fc0SLeandro Lupori 
106*d3c34fc0SLeandro Lupori #define	LPTEL_RW		LPTEL_BW
107*d3c34fc0SLeandro Lupori #define	LPTEL_RO		LPTEL_BR
108*d3c34fc0SLeandro Lupori 
109*d3c34fc0SLeandro Lupori /*
110*d3c34fc0SLeandro Lupori  * PTE AVA field manipulation macros.
111*d3c34fc0SLeandro Lupori  *
112*d3c34fc0SLeandro Lupori  * AVA[0:54] = PTEH[2:56]
113*d3c34fc0SLeandro Lupori  * AVA[VSID] = AVA[0:49] = PTEH[2:51]
114*d3c34fc0SLeandro Lupori  * AVA[PAGE] = AVA[50:54] = PTEH[52:56]
115*d3c34fc0SLeandro Lupori  */
116*d3c34fc0SLeandro Lupori #define	PTEH_AVA_VSID_MASK	0x3ffffffffffff000UL
117*d3c34fc0SLeandro Lupori #define	PTEH_AVA_VSID_SHIFT	12
118*d3c34fc0SLeandro Lupori #define	PTEH_AVA_VSID(p) \
119*d3c34fc0SLeandro Lupori 	(((p) & PTEH_AVA_VSID_MASK) >> PTEH_AVA_VSID_SHIFT)
120*d3c34fc0SLeandro Lupori 
121*d3c34fc0SLeandro Lupori #define	PTEH_AVA_PAGE_MASK	0x0000000000000f80UL
122*d3c34fc0SLeandro Lupori #define	PTEH_AVA_PAGE_SHIFT	7
123*d3c34fc0SLeandro Lupori #define	PTEH_AVA_PAGE(p) \
124*d3c34fc0SLeandro Lupori 	(((p) & PTEH_AVA_PAGE_MASK) >> PTEH_AVA_PAGE_SHIFT)
125*d3c34fc0SLeandro Lupori 
126*d3c34fc0SLeandro Lupori /* Masks to obtain the Physical Address from PTE low 64-bit word. */
127*d3c34fc0SLeandro Lupori #define	PTEL_PA_MASK		0x0ffffffffffff000UL
128*d3c34fc0SLeandro Lupori #define	PTEL_LP_PA_MASK		0x0fffffffff000000UL
129*d3c34fc0SLeandro Lupori 
130*d3c34fc0SLeandro Lupori #define	PTE_HASH_MASK		0x0000007fffffffffUL
131*d3c34fc0SLeandro Lupori 
132*d3c34fc0SLeandro Lupori /*
133*d3c34fc0SLeandro Lupori  * Number of AVA/VA page bits to shift right, in order to leave only the
134*d3c34fc0SLeandro Lupori  * ones that should be considered.
135*d3c34fc0SLeandro Lupori  *
136*d3c34fc0SLeandro Lupori  * q = MIN(54, 77-b) (PowerISA v2.07B, 5.7.7.3)
137*d3c34fc0SLeandro Lupori  * n = q + 1 - 50 (VSID size in bits)
138*d3c34fc0SLeandro Lupori  * s(ava) = 5 - n
139*d3c34fc0SLeandro Lupori  * s(va) = (28 - b) - n
140*d3c34fc0SLeandro Lupori  *
141*d3c34fc0SLeandro Lupori  * q: bit number of lower limit of VA/AVA bits to compare
142*d3c34fc0SLeandro Lupori  * n: number of AVA/VA page bits to compare
143*d3c34fc0SLeandro Lupori  * s: shift amount
144*d3c34fc0SLeandro Lupori  * 28 - b: VA page size in bits
145*d3c34fc0SLeandro Lupori  */
146*d3c34fc0SLeandro Lupori #define	AVA_PAGE_SHIFT(b)	(5 - (MIN(54, 77-(b)) + 1 - 50))
147*d3c34fc0SLeandro Lupori #define	VA_PAGE_SHIFT(b)	(28 - (b) - (MIN(54, 77-(b)) + 1 - 50))
148*d3c34fc0SLeandro Lupori 
149*d3c34fc0SLeandro Lupori /* Kernel ESID -> VSID mapping */
150*d3c34fc0SLeandro Lupori #define	KERNEL_VSID_BIT	0x0000001000000000UL /* Bit set in all kernel VSIDs */
151*d3c34fc0SLeandro Lupori #define	KERNEL_VSID(esid) ((((((uint64_t)esid << 8) | ((uint64_t)esid >> 28)) \
152*d3c34fc0SLeandro Lupori 				* 0x13bbUL) & (KERNEL_VSID_BIT - 1)) | \
153*d3c34fc0SLeandro Lupori 				KERNEL_VSID_BIT)
154*d3c34fc0SLeandro Lupori 
155*d3c34fc0SLeandro Lupori /* Types */
156*d3c34fc0SLeandro Lupori 
157*d3c34fc0SLeandro Lupori typedef uint64_t	ppc64_physaddr_t;
158*d3c34fc0SLeandro Lupori 
159*d3c34fc0SLeandro Lupori typedef struct {
160*d3c34fc0SLeandro Lupori 	uint64_t slbv;
161*d3c34fc0SLeandro Lupori 	uint64_t slbe;
162*d3c34fc0SLeandro Lupori } ppc64_slb_entry_t;
163*d3c34fc0SLeandro Lupori 
164*d3c34fc0SLeandro Lupori typedef struct {
165*d3c34fc0SLeandro Lupori 	uint64_t pte_hi;
166*d3c34fc0SLeandro Lupori 	uint64_t pte_lo;
167*d3c34fc0SLeandro Lupori } ppc64_pt_entry_t;
168*d3c34fc0SLeandro Lupori 
169*d3c34fc0SLeandro Lupori struct hpt_data {
170*d3c34fc0SLeandro Lupori 	ppc64_slb_entry_t *slbs;
171*d3c34fc0SLeandro Lupori 	uint32_t slbsize;
172*d3c34fc0SLeandro Lupori };
173*d3c34fc0SLeandro Lupori 
174*d3c34fc0SLeandro Lupori 
175*d3c34fc0SLeandro Lupori static void
176*d3c34fc0SLeandro Lupori slb_fill(ppc64_slb_entry_t *slb, uint64_t ea, uint64_t i)
177*d3c34fc0SLeandro Lupori {
178*d3c34fc0SLeandro Lupori 	uint64_t esid;
179*d3c34fc0SLeandro Lupori 
180*d3c34fc0SLeandro Lupori 	esid = ea >> SLBE_ESID_SHIFT;
181*d3c34fc0SLeandro Lupori 	slb->slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT;
182*d3c34fc0SLeandro Lupori 	slb->slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID | i;
183*d3c34fc0SLeandro Lupori }
184*d3c34fc0SLeandro Lupori 
185*d3c34fc0SLeandro Lupori static int
186*d3c34fc0SLeandro Lupori slb_init(kvm_t *kd)
187*d3c34fc0SLeandro Lupori {
188*d3c34fc0SLeandro Lupori 	struct minidumphdr *hdr;
189*d3c34fc0SLeandro Lupori 	struct hpt_data *data;
190*d3c34fc0SLeandro Lupori 	ppc64_slb_entry_t *slb;
191*d3c34fc0SLeandro Lupori 	uint32_t slbsize;
192*d3c34fc0SLeandro Lupori 	uint64_t ea, i, maxmem;
193*d3c34fc0SLeandro Lupori 
194*d3c34fc0SLeandro Lupori 	hdr = &kd->vmst->hdr;
195*d3c34fc0SLeandro Lupori 	data = PPC64_MMU_DATA(kd);
196*d3c34fc0SLeandro Lupori 
197*d3c34fc0SLeandro Lupori 	/* Alloc SLBs */
198*d3c34fc0SLeandro Lupori 	maxmem = hdr->bitmapsize * 8 * PPC64_PAGE_SIZE;
199*d3c34fc0SLeandro Lupori 	slbsize = round_seg(hdr->kernend + 1 - hdr->kernbase + maxmem) /
200*d3c34fc0SLeandro Lupori 	    SEGMENT_LENGTH * sizeof(ppc64_slb_entry_t);
201*d3c34fc0SLeandro Lupori 	data->slbs = _kvm_malloc(kd, slbsize);
202*d3c34fc0SLeandro Lupori 	if (data->slbs == NULL) {
203*d3c34fc0SLeandro Lupori 		_kvm_err(kd, kd->program, "cannot allocate slbs");
204*d3c34fc0SLeandro Lupori 		return (-1);
205*d3c34fc0SLeandro Lupori 	}
206*d3c34fc0SLeandro Lupori 	data->slbsize = slbsize;
207*d3c34fc0SLeandro Lupori 
208*d3c34fc0SLeandro Lupori 	dprintf("%s: maxmem=0x%jx, segs=%jd, slbsize=0x%jx\n",
209*d3c34fc0SLeandro Lupori 	    __func__, (uintmax_t)maxmem,
210*d3c34fc0SLeandro Lupori 	    (uintmax_t)slbsize / sizeof(ppc64_slb_entry_t), (uintmax_t)slbsize);
211*d3c34fc0SLeandro Lupori 
212*d3c34fc0SLeandro Lupori 	/*
213*d3c34fc0SLeandro Lupori 	 * Generate needed SLB entries.
214*d3c34fc0SLeandro Lupori 	 *
215*d3c34fc0SLeandro Lupori 	 * When translating addresses from EA to VA to PA, the needed SLB
216*d3c34fc0SLeandro Lupori 	 * entry could be generated on the fly, but this is not the case
217*d3c34fc0SLeandro Lupori 	 * for the walk_pages method, that needs to search the SLB entry
218*d3c34fc0SLeandro Lupori 	 * by VSID, in order to find out the EA from a PTE.
219*d3c34fc0SLeandro Lupori 	 */
220*d3c34fc0SLeandro Lupori 
221*d3c34fc0SLeandro Lupori 	/* VM area */
222*d3c34fc0SLeandro Lupori 	for (ea = hdr->kernbase, i = 0, slb = data->slbs;
223*d3c34fc0SLeandro Lupori 	    ea < hdr->kernend; ea += SEGMENT_LENGTH, i++, slb++)
224*d3c34fc0SLeandro Lupori 		slb_fill(slb, ea, i);
225*d3c34fc0SLeandro Lupori 
226*d3c34fc0SLeandro Lupori 	/* DMAP area */
227*d3c34fc0SLeandro Lupori 	for (ea = hdr->dmapbase;
228*d3c34fc0SLeandro Lupori 	    ea < MIN(hdr->dmapend, hdr->dmapbase + maxmem);
229*d3c34fc0SLeandro Lupori 	    ea += SEGMENT_LENGTH, i++, slb++) {
230*d3c34fc0SLeandro Lupori 		slb_fill(slb, ea, i);
231*d3c34fc0SLeandro Lupori 		if (hdr->hw_direct_map)
232*d3c34fc0SLeandro Lupori 			slb->slbv |= SLBV_L;
233*d3c34fc0SLeandro Lupori 	}
234*d3c34fc0SLeandro Lupori 
235*d3c34fc0SLeandro Lupori 	return (0);
236*d3c34fc0SLeandro Lupori }
237*d3c34fc0SLeandro Lupori 
238*d3c34fc0SLeandro Lupori static void
239*d3c34fc0SLeandro Lupori ppc64mmu_hpt_cleanup(kvm_t *kd)
240*d3c34fc0SLeandro Lupori {
241*d3c34fc0SLeandro Lupori 	struct hpt_data *data;
242*d3c34fc0SLeandro Lupori 
243*d3c34fc0SLeandro Lupori 	if (kd->vmst == NULL)
244*d3c34fc0SLeandro Lupori 		return;
245*d3c34fc0SLeandro Lupori 
246*d3c34fc0SLeandro Lupori 	data = PPC64_MMU_DATA(kd);
247*d3c34fc0SLeandro Lupori 	free(data->slbs);
248*d3c34fc0SLeandro Lupori 	free(data);
249*d3c34fc0SLeandro Lupori 	PPC64_MMU_DATA(kd) = NULL;
250*d3c34fc0SLeandro Lupori }
251*d3c34fc0SLeandro Lupori 
252*d3c34fc0SLeandro Lupori static int
253*d3c34fc0SLeandro Lupori ppc64mmu_hpt_init(kvm_t *kd)
254*d3c34fc0SLeandro Lupori {
255*d3c34fc0SLeandro Lupori 	struct hpt_data *data;
256*d3c34fc0SLeandro Lupori 	struct minidumphdr *hdr;
257*d3c34fc0SLeandro Lupori 
258*d3c34fc0SLeandro Lupori 	hdr = &kd->vmst->hdr;
259*d3c34fc0SLeandro Lupori 
260*d3c34fc0SLeandro Lupori 	/* Alloc MMU data */
261*d3c34fc0SLeandro Lupori 	data = _kvm_malloc(kd, sizeof(*data));
262*d3c34fc0SLeandro Lupori 	if (data == NULL) {
263*d3c34fc0SLeandro Lupori 		_kvm_err(kd, kd->program, "cannot allocate MMU data");
264*d3c34fc0SLeandro Lupori 		return (-1);
265*d3c34fc0SLeandro Lupori 	}
266*d3c34fc0SLeandro Lupori 	data->slbs = NULL;
267*d3c34fc0SLeandro Lupori 	PPC64_MMU_DATA(kd) = data;
268*d3c34fc0SLeandro Lupori 
269*d3c34fc0SLeandro Lupori 	if (slb_init(kd) == -1)
270*d3c34fc0SLeandro Lupori 		goto failed;
271*d3c34fc0SLeandro Lupori 
272*d3c34fc0SLeandro Lupori 	return (0);
273*d3c34fc0SLeandro Lupori 
274*d3c34fc0SLeandro Lupori failed:
275*d3c34fc0SLeandro Lupori 	ppc64mmu_hpt_cleanup(kd);
276*d3c34fc0SLeandro Lupori 	return (-1);
277*d3c34fc0SLeandro Lupori }
278*d3c34fc0SLeandro Lupori 
279*d3c34fc0SLeandro Lupori static ppc64_slb_entry_t *
280*d3c34fc0SLeandro Lupori slb_search(kvm_t *kd, kvaddr_t ea)
281*d3c34fc0SLeandro Lupori {
282*d3c34fc0SLeandro Lupori 	struct hpt_data *data;
283*d3c34fc0SLeandro Lupori 	ppc64_slb_entry_t *slb;
284*d3c34fc0SLeandro Lupori 	int i, n;
285*d3c34fc0SLeandro Lupori 
286*d3c34fc0SLeandro Lupori 	data = PPC64_MMU_DATA(kd);
287*d3c34fc0SLeandro Lupori 	slb = data->slbs;
288*d3c34fc0SLeandro Lupori 	n = data->slbsize / sizeof(ppc64_slb_entry_t);
289*d3c34fc0SLeandro Lupori 
290*d3c34fc0SLeandro Lupori 	/* SLB search */
291*d3c34fc0SLeandro Lupori 	for (i = 0; i < n; i++, slb++) {
292*d3c34fc0SLeandro Lupori 		if ((slb->slbe & SLBE_VALID) == 0)
293*d3c34fc0SLeandro Lupori 			continue;
294*d3c34fc0SLeandro Lupori 
295*d3c34fc0SLeandro Lupori 		/* Compare 36-bit ESID of EA with segment one (64-s) */
296*d3c34fc0SLeandro Lupori 		if ((slb->slbe & SLBE_ESID_MASK) != (ea & SLBE_ESID_MASK))
297*d3c34fc0SLeandro Lupori 			continue;
298*d3c34fc0SLeandro Lupori 
299*d3c34fc0SLeandro Lupori 		/* Match found */
300*d3c34fc0SLeandro Lupori 		dprintf("SEG#%02d: slbv=0x%016jx, slbe=0x%016jx\n",
301*d3c34fc0SLeandro Lupori 		    i, (uintmax_t)slb->slbv, (uintmax_t)slb->slbe);
302*d3c34fc0SLeandro Lupori 		break;
303*d3c34fc0SLeandro Lupori 	}
304*d3c34fc0SLeandro Lupori 
305*d3c34fc0SLeandro Lupori 	/* SLB not found */
306*d3c34fc0SLeandro Lupori 	if (i == n) {
307*d3c34fc0SLeandro Lupori 		_kvm_err(kd, kd->program, "%s: segment not found for EA 0x%jx",
308*d3c34fc0SLeandro Lupori 		    __func__, (uintmax_t)ea);
309*d3c34fc0SLeandro Lupori 		return (NULL);
310*d3c34fc0SLeandro Lupori 	}
311*d3c34fc0SLeandro Lupori 	return (slb);
312*d3c34fc0SLeandro Lupori }
313*d3c34fc0SLeandro Lupori 
314*d3c34fc0SLeandro Lupori static ppc64_pt_entry_t
315*d3c34fc0SLeandro Lupori pte_get(kvm_t *kd, u_long ptex)
316*d3c34fc0SLeandro Lupori {
317*d3c34fc0SLeandro Lupori 	ppc64_pt_entry_t pte, *p;
318*d3c34fc0SLeandro Lupori 
319*d3c34fc0SLeandro Lupori 	p = _kvm_pmap_get(kd, ptex, sizeof(pte));
320*d3c34fc0SLeandro Lupori 	pte.pte_hi = be64toh(p->pte_hi);
321*d3c34fc0SLeandro Lupori 	pte.pte_lo = be64toh(p->pte_lo);
322*d3c34fc0SLeandro Lupori 	return (pte);
323*d3c34fc0SLeandro Lupori }
324*d3c34fc0SLeandro Lupori 
325*d3c34fc0SLeandro Lupori static int
326*d3c34fc0SLeandro Lupori pte_search(kvm_t *kd, ppc64_slb_entry_t *slb, uint64_t hid, kvaddr_t ea,
327*d3c34fc0SLeandro Lupori     ppc64_pt_entry_t *p)
328*d3c34fc0SLeandro Lupori {
329*d3c34fc0SLeandro Lupori 	uint64_t hash, hmask;
330*d3c34fc0SLeandro Lupori 	uint64_t pteg, ptex;
331*d3c34fc0SLeandro Lupori 	uint64_t va_vsid, va_page;
332*d3c34fc0SLeandro Lupori 	int b;
333*d3c34fc0SLeandro Lupori 	int ava_pg_shift, va_pg_shift;
334*d3c34fc0SLeandro Lupori 	ppc64_pt_entry_t pte;
335*d3c34fc0SLeandro Lupori 
336*d3c34fc0SLeandro Lupori 	/*
337*d3c34fc0SLeandro Lupori 	 * Get VA:
338*d3c34fc0SLeandro Lupori 	 *
339*d3c34fc0SLeandro Lupori 	 * va(78) = va_vsid(50) || va_page(s-b) || offset(b)
340*d3c34fc0SLeandro Lupori 	 *
341*d3c34fc0SLeandro Lupori 	 * va_vsid: 50-bit VSID (78-s)
342*d3c34fc0SLeandro Lupori 	 * va_page: (s-b)-bit VA page
343*d3c34fc0SLeandro Lupori 	 */
344*d3c34fc0SLeandro Lupori 	b = slb->slbv & SLBV_L? LP_PAGE_SHIFT : PPC64_PAGE_SHIFT;
345*d3c34fc0SLeandro Lupori 	va_vsid = (slb->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT;
346*d3c34fc0SLeandro Lupori 	va_page = (ea & ~SLBE_ESID_MASK) >> b;
347*d3c34fc0SLeandro Lupori 
348*d3c34fc0SLeandro Lupori 	dprintf("%s: hid=0x%jx, ea=0x%016jx, b=%d, va_vsid=0x%010jx, "
349*d3c34fc0SLeandro Lupori 	    "va_page=0x%04jx\n",
350*d3c34fc0SLeandro Lupori 	    __func__, (uintmax_t)hid, (uintmax_t)ea, b,
351*d3c34fc0SLeandro Lupori 	    (uintmax_t)va_vsid, (uintmax_t)va_page);
352*d3c34fc0SLeandro Lupori 
353*d3c34fc0SLeandro Lupori 	/*
354*d3c34fc0SLeandro Lupori 	 * Get hash:
355*d3c34fc0SLeandro Lupori 	 *
356*d3c34fc0SLeandro Lupori 	 * Primary hash: va_vsid(11:49) ^ va_page(s-b)
357*d3c34fc0SLeandro Lupori 	 * Secondary hash: ~primary_hash
358*d3c34fc0SLeandro Lupori 	 */
359*d3c34fc0SLeandro Lupori 	hash = (va_vsid & PTE_HASH_MASK) ^ va_page;
360*d3c34fc0SLeandro Lupori 	if (hid)
361*d3c34fc0SLeandro Lupori 		hash = ~hash & PTE_HASH_MASK;
362*d3c34fc0SLeandro Lupori 
363*d3c34fc0SLeandro Lupori 	/*
364*d3c34fc0SLeandro Lupori 	 * Get PTEG:
365*d3c34fc0SLeandro Lupori 	 *
366*d3c34fc0SLeandro Lupori 	 * pteg = (hash(0:38) & hmask) << 3
367*d3c34fc0SLeandro Lupori 	 *
368*d3c34fc0SLeandro Lupori 	 * hmask (hash mask): mask generated from HTABSIZE || 11*0b1
369*d3c34fc0SLeandro Lupori 	 * hmask = number_of_ptegs - 1
370*d3c34fc0SLeandro Lupori 	 */
371*d3c34fc0SLeandro Lupori 	hmask = kd->vmst->hdr.pmapsize / (8 * sizeof(ppc64_pt_entry_t)) - 1;
372*d3c34fc0SLeandro Lupori 	pteg = (hash & hmask) << 3;
373*d3c34fc0SLeandro Lupori 
374*d3c34fc0SLeandro Lupori 	ava_pg_shift = AVA_PAGE_SHIFT(b);
375*d3c34fc0SLeandro Lupori 	va_pg_shift = VA_PAGE_SHIFT(b);
376*d3c34fc0SLeandro Lupori 
377*d3c34fc0SLeandro Lupori 	dprintf("%s: hash=0x%010jx, hmask=0x%010jx, (hash & hmask)=0x%010jx, "
378*d3c34fc0SLeandro Lupori 	    "pteg=0x%011jx, ava_pg_shift=%d, va_pg_shift=%d\n",
379*d3c34fc0SLeandro Lupori 	    __func__, (uintmax_t)hash, (uintmax_t)hmask,
380*d3c34fc0SLeandro Lupori 	    (uintmax_t)(hash & hmask), (uintmax_t)pteg,
381*d3c34fc0SLeandro Lupori 	    ava_pg_shift, va_pg_shift);
382*d3c34fc0SLeandro Lupori 
383*d3c34fc0SLeandro Lupori 	/* Search PTEG */
384*d3c34fc0SLeandro Lupori 	for (ptex = pteg; ptex < pteg + 8; ptex++) {
385*d3c34fc0SLeandro Lupori 		pte = pte_get(kd, ptex);
386*d3c34fc0SLeandro Lupori 
387*d3c34fc0SLeandro Lupori 		/* Check H, V and B */
388*d3c34fc0SLeandro Lupori 		if ((pte.pte_hi & LPTEH_HID) != hid ||
389*d3c34fc0SLeandro Lupori 		    (pte.pte_hi & LPTEH_VALID) == 0 ||
390*d3c34fc0SLeandro Lupori 		    (pte.pte_hi & LPTEH_B_MASK) != LPTEH_B_256MB)
391*d3c34fc0SLeandro Lupori 			continue;
392*d3c34fc0SLeandro Lupori 
393*d3c34fc0SLeandro Lupori 		/* Compare AVA with VA */
394*d3c34fc0SLeandro Lupori 		if (PTEH_AVA_VSID(pte.pte_hi) != va_vsid ||
395*d3c34fc0SLeandro Lupori 		    (PTEH_AVA_PAGE(pte.pte_hi) >> ava_pg_shift) !=
396*d3c34fc0SLeandro Lupori 		    (va_page >> va_pg_shift))
397*d3c34fc0SLeandro Lupori 			continue;
398*d3c34fc0SLeandro Lupori 
399*d3c34fc0SLeandro Lupori 		/*
400*d3c34fc0SLeandro Lupori 		 * Check if PTE[L] matches SLBV[L].
401*d3c34fc0SLeandro Lupori 		 *
402*d3c34fc0SLeandro Lupori 		 * Note: this check ignores PTE[LP], as does the kernel.
403*d3c34fc0SLeandro Lupori 		 */
404*d3c34fc0SLeandro Lupori 		if (b == PPC64_PAGE_SHIFT) {
405*d3c34fc0SLeandro Lupori 			if (pte.pte_hi & LPTEH_BIG)
406*d3c34fc0SLeandro Lupori 				continue;
407*d3c34fc0SLeandro Lupori 		} else if ((pte.pte_hi & LPTEH_BIG) == 0)
408*d3c34fc0SLeandro Lupori 			continue;
409*d3c34fc0SLeandro Lupori 
410*d3c34fc0SLeandro Lupori 		/* Match found */
411*d3c34fc0SLeandro Lupori 		dprintf("%s: PTE found: ptex=0x%jx, pteh=0x%016jx, "
412*d3c34fc0SLeandro Lupori 		    "ptel=0x%016jx\n",
413*d3c34fc0SLeandro Lupori 		    __func__, (uintmax_t)ptex, (uintmax_t)pte.pte_hi,
414*d3c34fc0SLeandro Lupori 		    (uintmax_t)pte.pte_lo);
415*d3c34fc0SLeandro Lupori 		break;
416*d3c34fc0SLeandro Lupori 	}
417*d3c34fc0SLeandro Lupori 
418*d3c34fc0SLeandro Lupori 	/* Not found? */
419*d3c34fc0SLeandro Lupori 	if (ptex == pteg + 8) {
420*d3c34fc0SLeandro Lupori 		/* Try secondary hash */
421*d3c34fc0SLeandro Lupori 		if (hid == 0)
422*d3c34fc0SLeandro Lupori 			return (pte_search(kd, slb, LPTEH_HID, ea, p));
423*d3c34fc0SLeandro Lupori 		else {
424*d3c34fc0SLeandro Lupori 			_kvm_err(kd, kd->program,
425*d3c34fc0SLeandro Lupori 			    "%s: pte not found", __func__);
426*d3c34fc0SLeandro Lupori 			return (-1);
427*d3c34fc0SLeandro Lupori 		}
428*d3c34fc0SLeandro Lupori 	}
429*d3c34fc0SLeandro Lupori 
430*d3c34fc0SLeandro Lupori 	/* PTE found */
431*d3c34fc0SLeandro Lupori 	*p = pte;
432*d3c34fc0SLeandro Lupori 	return (0);
433*d3c34fc0SLeandro Lupori }
434*d3c34fc0SLeandro Lupori 
435*d3c34fc0SLeandro Lupori static int
436*d3c34fc0SLeandro Lupori pte_lookup(kvm_t *kd, kvaddr_t ea, ppc64_pt_entry_t *pte)
437*d3c34fc0SLeandro Lupori {
438*d3c34fc0SLeandro Lupori 	ppc64_slb_entry_t *slb;
439*d3c34fc0SLeandro Lupori 
440*d3c34fc0SLeandro Lupori 	/* First, find SLB */
441*d3c34fc0SLeandro Lupori 	if ((slb = slb_search(kd, ea)) == NULL)
442*d3c34fc0SLeandro Lupori 		return (-1);
443*d3c34fc0SLeandro Lupori 
444*d3c34fc0SLeandro Lupori 	/* Next, find PTE */
445*d3c34fc0SLeandro Lupori 	return (pte_search(kd, slb, 0, ea, pte));
446*d3c34fc0SLeandro Lupori }
447*d3c34fc0SLeandro Lupori 
448*d3c34fc0SLeandro Lupori static int
449*d3c34fc0SLeandro Lupori ppc64mmu_hpt_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
450*d3c34fc0SLeandro Lupori {
451*d3c34fc0SLeandro Lupori 	struct minidumphdr *hdr;
452*d3c34fc0SLeandro Lupori 	struct vmstate *vm;
453*d3c34fc0SLeandro Lupori 	ppc64_pt_entry_t pte;
454*d3c34fc0SLeandro Lupori 	ppc64_physaddr_t pgoff, pgpa;
455*d3c34fc0SLeandro Lupori 	off_t ptoff;
456*d3c34fc0SLeandro Lupori 	int err;
457*d3c34fc0SLeandro Lupori 
458*d3c34fc0SLeandro Lupori 	vm = kd->vmst;
459*d3c34fc0SLeandro Lupori 	hdr = &vm->hdr;
460*d3c34fc0SLeandro Lupori 	pgoff = va & PPC64_PAGE_MASK;
461*d3c34fc0SLeandro Lupori 
462*d3c34fc0SLeandro Lupori 	dprintf("%s: va=0x%016jx\n", __func__, (uintmax_t)va);
463*d3c34fc0SLeandro Lupori 
464*d3c34fc0SLeandro Lupori 	/*
465*d3c34fc0SLeandro Lupori 	 * A common use case of libkvm is to first find a symbol address
466*d3c34fc0SLeandro Lupori 	 * from the kernel image and then use kvatop to translate it and
467*d3c34fc0SLeandro Lupori 	 * to be able to fetch its corresponding data.
468*d3c34fc0SLeandro Lupori 	 *
469*d3c34fc0SLeandro Lupori 	 * The problem is that, in PowerPC64 case, the addresses of relocated
470*d3c34fc0SLeandro Lupori 	 * data won't match those in the kernel image. This is handled here by
471*d3c34fc0SLeandro Lupori 	 * adding the relocation offset to those addresses.
472*d3c34fc0SLeandro Lupori 	 */
473*d3c34fc0SLeandro Lupori 	if (va < hdr->dmapbase)
474*d3c34fc0SLeandro Lupori 		va += hdr->startkernel - PPC64_KERNBASE;
475*d3c34fc0SLeandro Lupori 
476*d3c34fc0SLeandro Lupori 	/* Handle DMAP */
477*d3c34fc0SLeandro Lupori 	if (va >= hdr->dmapbase && va <= hdr->dmapend) {
478*d3c34fc0SLeandro Lupori 		pgpa = (va & ~hdr->dmapbase) & ~PPC64_PAGE_MASK;
479*d3c34fc0SLeandro Lupori 		ptoff = _kvm_pt_find(kd, pgpa, PPC64_PAGE_SIZE);
480*d3c34fc0SLeandro Lupori 		if (ptoff == -1) {
481*d3c34fc0SLeandro Lupori 			_kvm_err(kd, kd->program, "%s: "
482*d3c34fc0SLeandro Lupori 			    "direct map address 0x%jx not in minidump",
483*d3c34fc0SLeandro Lupori 			    __func__, (uintmax_t)va);
484*d3c34fc0SLeandro Lupori 			goto invalid;
485*d3c34fc0SLeandro Lupori 		}
486*d3c34fc0SLeandro Lupori 		*pa = ptoff + pgoff;
487*d3c34fc0SLeandro Lupori 		return (PPC64_PAGE_SIZE - pgoff);
488*d3c34fc0SLeandro Lupori 	/* Translate VA to PA */
489*d3c34fc0SLeandro Lupori 	} else if (va >= hdr->kernbase) {
490*d3c34fc0SLeandro Lupori 		if ((err = pte_lookup(kd, va, &pte)) == -1) {
491*d3c34fc0SLeandro Lupori 			_kvm_err(kd, kd->program,
492*d3c34fc0SLeandro Lupori 			    "%s: pte not valid", __func__);
493*d3c34fc0SLeandro Lupori 			goto invalid;
494*d3c34fc0SLeandro Lupori 		}
495*d3c34fc0SLeandro Lupori 
496*d3c34fc0SLeandro Lupori 		if (pte.pte_hi & LPTEH_BIG)
497*d3c34fc0SLeandro Lupori 			pgpa = (pte.pte_lo & PTEL_LP_PA_MASK) |
498*d3c34fc0SLeandro Lupori 			    (va & ~PPC64_PAGE_MASK & LP_PAGE_MASK);
499*d3c34fc0SLeandro Lupori 		else
500*d3c34fc0SLeandro Lupori 			pgpa = pte.pte_lo & PTEL_PA_MASK;
501*d3c34fc0SLeandro Lupori 		dprintf("%s: pgpa=0x%016jx\n", __func__, (uintmax_t)pgpa);
502*d3c34fc0SLeandro Lupori 
503*d3c34fc0SLeandro Lupori 		ptoff = _kvm_pt_find(kd, pgpa, PPC64_PAGE_SIZE);
504*d3c34fc0SLeandro Lupori 		if (ptoff == -1) {
505*d3c34fc0SLeandro Lupori 			_kvm_err(kd, kd->program, "%s: "
506*d3c34fc0SLeandro Lupori 			    "physical address 0x%jx not in minidump",
507*d3c34fc0SLeandro Lupori 			    __func__, (uintmax_t)pgpa);
508*d3c34fc0SLeandro Lupori 			goto invalid;
509*d3c34fc0SLeandro Lupori 		}
510*d3c34fc0SLeandro Lupori 		*pa = ptoff + pgoff;
511*d3c34fc0SLeandro Lupori 		return (PPC64_PAGE_SIZE - pgoff);
512*d3c34fc0SLeandro Lupori 	} else {
513*d3c34fc0SLeandro Lupori 		_kvm_err(kd, kd->program,
514*d3c34fc0SLeandro Lupori 		    "%s: virtual address 0x%jx not minidumped",
515*d3c34fc0SLeandro Lupori 		    __func__, (uintmax_t)va);
516*d3c34fc0SLeandro Lupori 		goto invalid;
517*d3c34fc0SLeandro Lupori 	}
518*d3c34fc0SLeandro Lupori 
519*d3c34fc0SLeandro Lupori invalid:
520*d3c34fc0SLeandro Lupori 	_kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
521*d3c34fc0SLeandro Lupori 	return (0);
522*d3c34fc0SLeandro Lupori }
523*d3c34fc0SLeandro Lupori 
524*d3c34fc0SLeandro Lupori static vm_prot_t
525*d3c34fc0SLeandro Lupori entry_to_prot(ppc64_pt_entry_t *pte)
526*d3c34fc0SLeandro Lupori {
527*d3c34fc0SLeandro Lupori 	vm_prot_t prot = VM_PROT_READ;
528*d3c34fc0SLeandro Lupori 
529*d3c34fc0SLeandro Lupori 	if (pte->pte_lo & LPTEL_RW)
530*d3c34fc0SLeandro Lupori 		prot |= VM_PROT_WRITE;
531*d3c34fc0SLeandro Lupori 	if ((pte->pte_lo & LPTEL_NOEXEC) != 0)
532*d3c34fc0SLeandro Lupori 		prot |= VM_PROT_EXECUTE;
533*d3c34fc0SLeandro Lupori 	return (prot);
534*d3c34fc0SLeandro Lupori }
535*d3c34fc0SLeandro Lupori 
536*d3c34fc0SLeandro Lupori static ppc64_slb_entry_t *
537*d3c34fc0SLeandro Lupori slb_vsid_search(kvm_t *kd, uint64_t vsid)
538*d3c34fc0SLeandro Lupori {
539*d3c34fc0SLeandro Lupori 	struct hpt_data *data;
540*d3c34fc0SLeandro Lupori 	ppc64_slb_entry_t *slb;
541*d3c34fc0SLeandro Lupori 	int i, n;
542*d3c34fc0SLeandro Lupori 
543*d3c34fc0SLeandro Lupori 	data = PPC64_MMU_DATA(kd);
544*d3c34fc0SLeandro Lupori 	slb = data->slbs;
545*d3c34fc0SLeandro Lupori 	n = data->slbsize / sizeof(ppc64_slb_entry_t);
546*d3c34fc0SLeandro Lupori 	vsid <<= SLBV_VSID_SHIFT;
547*d3c34fc0SLeandro Lupori 
548*d3c34fc0SLeandro Lupori 	/* SLB search */
549*d3c34fc0SLeandro Lupori 	for (i = 0; i < n; i++, slb++) {
550*d3c34fc0SLeandro Lupori 		/* Check if valid and compare VSID */
551*d3c34fc0SLeandro Lupori 		if ((slb->slbe & SLBE_VALID) &&
552*d3c34fc0SLeandro Lupori 		    (slb->slbv & SLBV_VSID_MASK) == vsid)
553*d3c34fc0SLeandro Lupori 			break;
554*d3c34fc0SLeandro Lupori 	}
555*d3c34fc0SLeandro Lupori 
556*d3c34fc0SLeandro Lupori 	/* SLB not found */
557*d3c34fc0SLeandro Lupori 	if (i == n) {
558*d3c34fc0SLeandro Lupori 		_kvm_err(kd, kd->program,
559*d3c34fc0SLeandro Lupori 		    "%s: segment not found for VSID 0x%jx",
560*d3c34fc0SLeandro Lupori 		    __func__, (uintmax_t)vsid >> SLBV_VSID_SHIFT);
561*d3c34fc0SLeandro Lupori 		return (NULL);
562*d3c34fc0SLeandro Lupori 	}
563*d3c34fc0SLeandro Lupori 	return (slb);
564*d3c34fc0SLeandro Lupori }
565*d3c34fc0SLeandro Lupori 
566*d3c34fc0SLeandro Lupori static u_long
567*d3c34fc0SLeandro Lupori get_ea(kvm_t *kd, ppc64_pt_entry_t *pte, u_long ptex)
568*d3c34fc0SLeandro Lupori {
569*d3c34fc0SLeandro Lupori 	ppc64_slb_entry_t *slb;
570*d3c34fc0SLeandro Lupori 	uint64_t ea, hash, vsid;
571*d3c34fc0SLeandro Lupori 	int b, shift;
572*d3c34fc0SLeandro Lupori 
573*d3c34fc0SLeandro Lupori 	/* Find SLB */
574*d3c34fc0SLeandro Lupori 	vsid = PTEH_AVA_VSID(pte->pte_hi);
575*d3c34fc0SLeandro Lupori 	if ((slb = slb_vsid_search(kd, vsid)) == NULL)
576*d3c34fc0SLeandro Lupori 		return (~0UL);
577*d3c34fc0SLeandro Lupori 
578*d3c34fc0SLeandro Lupori 	/* Get ESID part of EA */
579*d3c34fc0SLeandro Lupori 	ea = slb->slbe & SLBE_ESID_MASK;
580*d3c34fc0SLeandro Lupori 
581*d3c34fc0SLeandro Lupori 	b = slb->slbv & SLBV_L? LP_PAGE_SHIFT : PPC64_PAGE_SHIFT;
582*d3c34fc0SLeandro Lupori 
583*d3c34fc0SLeandro Lupori 	/*
584*d3c34fc0SLeandro Lupori 	 * If there are less than 64K PTEGs (16-bit), the upper bits of
585*d3c34fc0SLeandro Lupori 	 * EA page must be obtained from PTEH's AVA.
586*d3c34fc0SLeandro Lupori 	 */
587*d3c34fc0SLeandro Lupori 	if (kd->vmst->hdr.pmapsize / (8 * sizeof(ppc64_pt_entry_t)) <
588*d3c34fc0SLeandro Lupori 	    0x10000U) {
589*d3c34fc0SLeandro Lupori 		/*
590*d3c34fc0SLeandro Lupori 		 * Add 0 to 5 EA bits, right after VSID.
591*d3c34fc0SLeandro Lupori 		 * b == 12: 5 bits
592*d3c34fc0SLeandro Lupori 		 * b == 24: 4 bits
593*d3c34fc0SLeandro Lupori 		 */
594*d3c34fc0SLeandro Lupori 		shift = AVA_PAGE_SHIFT(b);
595*d3c34fc0SLeandro Lupori 		ea |= (PTEH_AVA_PAGE(pte->pte_hi) >> shift) <<
596*d3c34fc0SLeandro Lupori 		    (SLBE_ESID_SHIFT - 5 + shift);
597*d3c34fc0SLeandro Lupori 	}
598*d3c34fc0SLeandro Lupori 
599*d3c34fc0SLeandro Lupori 	/* Get VA page from hash and add to EA. */
600*d3c34fc0SLeandro Lupori 	hash = (ptex & ~7) >> 3;
601*d3c34fc0SLeandro Lupori 	if (pte->pte_hi & LPTEH_HID)
602*d3c34fc0SLeandro Lupori 		hash = ~hash & PTE_HASH_MASK;
603*d3c34fc0SLeandro Lupori 	ea |= ((hash ^ (vsid & PTE_HASH_MASK)) << b) & ~SLBE_ESID_MASK;
604*d3c34fc0SLeandro Lupori 	return (ea);
605*d3c34fc0SLeandro Lupori }
606*d3c34fc0SLeandro Lupori 
607*d3c34fc0SLeandro Lupori static int
608*d3c34fc0SLeandro Lupori ppc64mmu_hpt_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
609*d3c34fc0SLeandro Lupori {
610*d3c34fc0SLeandro Lupori 	struct vmstate *vm;
611*d3c34fc0SLeandro Lupori 	int ret;
612*d3c34fc0SLeandro Lupori 	unsigned int pagesz;
613*d3c34fc0SLeandro Lupori 	u_long dva, pa, va;
614*d3c34fc0SLeandro Lupori 	u_long ptex, nptes;
615*d3c34fc0SLeandro Lupori 	uint64_t vsid;
616*d3c34fc0SLeandro Lupori 
617*d3c34fc0SLeandro Lupori 	ret = 0;
618*d3c34fc0SLeandro Lupori 	vm = kd->vmst;
619*d3c34fc0SLeandro Lupori 	nptes = vm->hdr.pmapsize / sizeof(ppc64_pt_entry_t);
620*d3c34fc0SLeandro Lupori 
621*d3c34fc0SLeandro Lupori 	/* Walk through PTEs */
622*d3c34fc0SLeandro Lupori 	for (ptex = 0; ptex < nptes; ptex++) {
623*d3c34fc0SLeandro Lupori 		ppc64_pt_entry_t pte = pte_get(kd, ptex);
624*d3c34fc0SLeandro Lupori 		if ((pte.pte_hi & LPTEH_VALID) == 0)
625*d3c34fc0SLeandro Lupori 			continue;
626*d3c34fc0SLeandro Lupori 
627*d3c34fc0SLeandro Lupori 		/* Skip non-kernel related pages, as well as VRMA ones */
628*d3c34fc0SLeandro Lupori 		vsid = PTEH_AVA_VSID(pte.pte_hi);
629*d3c34fc0SLeandro Lupori 		if ((vsid & KERNEL_VSID_BIT) == 0 ||
630*d3c34fc0SLeandro Lupori 		    (vsid >> PPC64_PAGE_SHIFT) == VSID_VRMA)
631*d3c34fc0SLeandro Lupori 			continue;
632*d3c34fc0SLeandro Lupori 
633*d3c34fc0SLeandro Lupori 		/* Retrieve page's VA (EA on PPC64 terminology) */
634*d3c34fc0SLeandro Lupori 		if ((va = get_ea(kd, &pte, ptex)) == ~0UL)
635*d3c34fc0SLeandro Lupori 			goto out;
636*d3c34fc0SLeandro Lupori 
637*d3c34fc0SLeandro Lupori 		/* Get PA and page size */
638*d3c34fc0SLeandro Lupori 		if (pte.pte_hi & LPTEH_BIG) {
639*d3c34fc0SLeandro Lupori 			pa = pte.pte_lo & PTEL_LP_PA_MASK;
640*d3c34fc0SLeandro Lupori 			pagesz = LP_PAGE_SIZE;
641*d3c34fc0SLeandro Lupori 		} else {
642*d3c34fc0SLeandro Lupori 			pa = pte.pte_lo & PTEL_PA_MASK;
643*d3c34fc0SLeandro Lupori 			pagesz = PPC64_PAGE_SIZE;
644*d3c34fc0SLeandro Lupori 		}
645*d3c34fc0SLeandro Lupori 
646*d3c34fc0SLeandro Lupori 		/* Get DMAP address */
647*d3c34fc0SLeandro Lupori 		dva = vm->hdr.dmapbase + pa;
648*d3c34fc0SLeandro Lupori 
649*d3c34fc0SLeandro Lupori 		if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
650*d3c34fc0SLeandro Lupori 		    entry_to_prot(&pte), pagesz, 0))
651*d3c34fc0SLeandro Lupori 			goto out;
652*d3c34fc0SLeandro Lupori 	}
653*d3c34fc0SLeandro Lupori 	ret = 1;
654*d3c34fc0SLeandro Lupori 
655*d3c34fc0SLeandro Lupori out:
656*d3c34fc0SLeandro Lupori 	return (ret);
657*d3c34fc0SLeandro Lupori }
658*d3c34fc0SLeandro Lupori 
659*d3c34fc0SLeandro Lupori 
660*d3c34fc0SLeandro Lupori static struct ppc64_mmu_ops ops = {
661*d3c34fc0SLeandro Lupori 	.init		= ppc64mmu_hpt_init,
662*d3c34fc0SLeandro Lupori 	.cleanup	= ppc64mmu_hpt_cleanup,
663*d3c34fc0SLeandro Lupori 	.kvatop		= ppc64mmu_hpt_kvatop,
664*d3c34fc0SLeandro Lupori 	.walk_pages	= ppc64mmu_hpt_walk_pages,
665*d3c34fc0SLeandro Lupori };
666*d3c34fc0SLeandro Lupori struct ppc64_mmu_ops *ppc64_mmu_ops_hpt = &ops;
667