1*d3c34fc0SLeandro Lupori /*-
2*d3c34fc0SLeandro Lupori * Copyright (c) 2006 Peter Wemm
3*d3c34fc0SLeandro Lupori * Copyright (c) 2019 Leandro Lupori
4*d3c34fc0SLeandro Lupori *
5*d3c34fc0SLeandro Lupori * Redistribution and use in source and binary forms, with or without
6*d3c34fc0SLeandro Lupori * modification, are permitted provided that the following conditions
7*d3c34fc0SLeandro Lupori * are met:
8*d3c34fc0SLeandro Lupori * 1. Redistributions of source code must retain the above copyright
9*d3c34fc0SLeandro Lupori * notice, this list of conditions and the following disclaimer.
10*d3c34fc0SLeandro Lupori * 2. Redistributions in binary form must reproduce the above copyright
11*d3c34fc0SLeandro Lupori * notice, this list of conditions and the following disclaimer in the
12*d3c34fc0SLeandro Lupori * documentation and/or other materials provided with the distribution.
13*d3c34fc0SLeandro Lupori *
14*d3c34fc0SLeandro Lupori * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15*d3c34fc0SLeandro Lupori * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16*d3c34fc0SLeandro Lupori * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17*d3c34fc0SLeandro Lupori * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18*d3c34fc0SLeandro Lupori * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19*d3c34fc0SLeandro Lupori * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20*d3c34fc0SLeandro Lupori * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21*d3c34fc0SLeandro Lupori * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22*d3c34fc0SLeandro Lupori * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23*d3c34fc0SLeandro Lupori * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24*d3c34fc0SLeandro Lupori * SUCH DAMAGE.
25*d3c34fc0SLeandro Lupori *
26*d3c34fc0SLeandro Lupori * From: FreeBSD: src/lib/libkvm/kvm_minidump_riscv.c
27*d3c34fc0SLeandro Lupori */
28*d3c34fc0SLeandro Lupori
29*d3c34fc0SLeandro Lupori #include <sys/param.h>
30*d3c34fc0SLeandro Lupori #include <vm/vm.h>
31*d3c34fc0SLeandro Lupori
32*d3c34fc0SLeandro Lupori #include <kvm.h>
33*d3c34fc0SLeandro Lupori
34*d3c34fc0SLeandro Lupori #include <limits.h>
35*d3c34fc0SLeandro Lupori #include <stdint.h>
36*d3c34fc0SLeandro Lupori #include <stdlib.h>
37*d3c34fc0SLeandro Lupori #include <string.h>
38*d3c34fc0SLeandro Lupori #include <unistd.h>
39*d3c34fc0SLeandro Lupori
40*d3c34fc0SLeandro Lupori #include "../../sys/powerpc/include/minidump.h"
41*d3c34fc0SLeandro Lupori #include "kvm_private.h"
42*d3c34fc0SLeandro Lupori #include "kvm_powerpc64.h"
43*d3c34fc0SLeandro Lupori
44*d3c34fc0SLeandro Lupori /*
45*d3c34fc0SLeandro Lupori * PowerPC64 HPT machine dependent routines for kvm and minidumps.
46*d3c34fc0SLeandro Lupori *
47*d3c34fc0SLeandro Lupori * Address Translation parameters:
48*d3c34fc0SLeandro Lupori *
49*d3c34fc0SLeandro Lupori * b = 12 (SLB base page size: 4 KB)
50*d3c34fc0SLeandro Lupori * b = 24 (SLB base page size: 16 MB)
51*d3c34fc0SLeandro Lupori * p = 12 (page size: 4 KB)
52*d3c34fc0SLeandro Lupori * p = 24 (page size: 16 MB)
53*d3c34fc0SLeandro Lupori * s = 28 (segment size: 256 MB)
54*d3c34fc0SLeandro Lupori */
55*d3c34fc0SLeandro Lupori
56*d3c34fc0SLeandro Lupori /* Large (huge) page params */
57*d3c34fc0SLeandro Lupori #define LP_PAGE_SHIFT 24
58*d3c34fc0SLeandro Lupori #define LP_PAGE_SIZE (1ULL << LP_PAGE_SHIFT)
59*d3c34fc0SLeandro Lupori #define LP_PAGE_MASK 0x00ffffffULL
60*d3c34fc0SLeandro Lupori
61*d3c34fc0SLeandro Lupori /* SLB */
62*d3c34fc0SLeandro Lupori
63*d3c34fc0SLeandro Lupori #define SEGMENT_LENGTH 0x10000000ULL
64*d3c34fc0SLeandro Lupori
65*d3c34fc0SLeandro Lupori #define round_seg(x) roundup2((uint64_t)(x), SEGMENT_LENGTH)
66*d3c34fc0SLeandro Lupori
67*d3c34fc0SLeandro Lupori /* Virtual real-mode VSID in LPARs */
68*d3c34fc0SLeandro Lupori #define VSID_VRMA 0x1ffffffULL
69*d3c34fc0SLeandro Lupori
70*d3c34fc0SLeandro Lupori #define SLBV_L 0x0000000000000100ULL /* Large page selector */
71*d3c34fc0SLeandro Lupori #define SLBV_CLASS 0x0000000000000080ULL /* Class selector */
72*d3c34fc0SLeandro Lupori #define SLBV_LP_MASK 0x0000000000000030ULL
73*d3c34fc0SLeandro Lupori #define SLBV_VSID_MASK 0x3ffffffffffff000ULL /* Virtual SegID mask */
74*d3c34fc0SLeandro Lupori #define SLBV_VSID_SHIFT 12
75*d3c34fc0SLeandro Lupori
76*d3c34fc0SLeandro Lupori #define SLBE_B_MASK 0x0000000006000000ULL
77*d3c34fc0SLeandro Lupori #define SLBE_B_256MB 0x0000000000000000ULL
78*d3c34fc0SLeandro Lupori #define SLBE_VALID 0x0000000008000000ULL /* SLB entry valid */
79*d3c34fc0SLeandro Lupori #define SLBE_INDEX_MASK 0x0000000000000fffULL /* SLB index mask */
80*d3c34fc0SLeandro Lupori #define SLBE_ESID_MASK 0xfffffffff0000000ULL /* Effective SegID mask */
81*d3c34fc0SLeandro Lupori #define SLBE_ESID_SHIFT 28
82*d3c34fc0SLeandro Lupori
83*d3c34fc0SLeandro Lupori /* PTE */
84*d3c34fc0SLeandro Lupori
85*d3c34fc0SLeandro Lupori #define LPTEH_VSID_SHIFT 12
86*d3c34fc0SLeandro Lupori #define LPTEH_AVPN_MASK 0xffffffffffffff80ULL
87*d3c34fc0SLeandro Lupori #define LPTEH_B_MASK 0xc000000000000000ULL
88*d3c34fc0SLeandro Lupori #define LPTEH_B_256MB 0x0000000000000000ULL
89*d3c34fc0SLeandro Lupori #define LPTEH_BIG 0x0000000000000004ULL /* 4KB/16MB page */
90*d3c34fc0SLeandro Lupori #define LPTEH_HID 0x0000000000000002ULL
91*d3c34fc0SLeandro Lupori #define LPTEH_VALID 0x0000000000000001ULL
92*d3c34fc0SLeandro Lupori
93*d3c34fc0SLeandro Lupori #define LPTEL_RPGN 0xfffffffffffff000ULL
94*d3c34fc0SLeandro Lupori #define LPTEL_LP_MASK 0x00000000000ff000ULL
95*d3c34fc0SLeandro Lupori #define LPTEL_NOEXEC 0x0000000000000004ULL
96*d3c34fc0SLeandro Lupori
97*d3c34fc0SLeandro Lupori /* Supervisor (U: RW, S: RW) */
98*d3c34fc0SLeandro Lupori #define LPTEL_BW 0x0000000000000002ULL
99*d3c34fc0SLeandro Lupori
100*d3c34fc0SLeandro Lupori /* Both Read Only (U: RO, S: RO) */
101*d3c34fc0SLeandro Lupori #define LPTEL_BR 0x0000000000000003ULL
102*d3c34fc0SLeandro Lupori
103*d3c34fc0SLeandro Lupori #define LPTEL_RW LPTEL_BW
104*d3c34fc0SLeandro Lupori #define LPTEL_RO LPTEL_BR
105*d3c34fc0SLeandro Lupori
106*d3c34fc0SLeandro Lupori /*
107*d3c34fc0SLeandro Lupori * PTE AVA field manipulation macros.
108*d3c34fc0SLeandro Lupori *
109*d3c34fc0SLeandro Lupori * AVA[0:54] = PTEH[2:56]
110*d3c34fc0SLeandro Lupori * AVA[VSID] = AVA[0:49] = PTEH[2:51]
111*d3c34fc0SLeandro Lupori * AVA[PAGE] = AVA[50:54] = PTEH[52:56]
112*d3c34fc0SLeandro Lupori */
113*d3c34fc0SLeandro Lupori #define PTEH_AVA_VSID_MASK 0x3ffffffffffff000UL
114*d3c34fc0SLeandro Lupori #define PTEH_AVA_VSID_SHIFT 12
115*d3c34fc0SLeandro Lupori #define PTEH_AVA_VSID(p) \
116*d3c34fc0SLeandro Lupori (((p) & PTEH_AVA_VSID_MASK) >> PTEH_AVA_VSID_SHIFT)
117*d3c34fc0SLeandro Lupori
118*d3c34fc0SLeandro Lupori #define PTEH_AVA_PAGE_MASK 0x0000000000000f80UL
119*d3c34fc0SLeandro Lupori #define PTEH_AVA_PAGE_SHIFT 7
120*d3c34fc0SLeandro Lupori #define PTEH_AVA_PAGE(p) \
121*d3c34fc0SLeandro Lupori (((p) & PTEH_AVA_PAGE_MASK) >> PTEH_AVA_PAGE_SHIFT)
122*d3c34fc0SLeandro Lupori
123*d3c34fc0SLeandro Lupori /* Masks to obtain the Physical Address from PTE low 64-bit word. */
124*d3c34fc0SLeandro Lupori #define PTEL_PA_MASK 0x0ffffffffffff000UL
125*d3c34fc0SLeandro Lupori #define PTEL_LP_PA_MASK 0x0fffffffff000000UL
126*d3c34fc0SLeandro Lupori
127*d3c34fc0SLeandro Lupori #define PTE_HASH_MASK 0x0000007fffffffffUL
128*d3c34fc0SLeandro Lupori
129*d3c34fc0SLeandro Lupori /*
130*d3c34fc0SLeandro Lupori * Number of AVA/VA page bits to shift right, in order to leave only the
131*d3c34fc0SLeandro Lupori * ones that should be considered.
132*d3c34fc0SLeandro Lupori *
133*d3c34fc0SLeandro Lupori * q = MIN(54, 77-b) (PowerISA v2.07B, 5.7.7.3)
134*d3c34fc0SLeandro Lupori * n = q + 1 - 50 (VSID size in bits)
135*d3c34fc0SLeandro Lupori * s(ava) = 5 - n
136*d3c34fc0SLeandro Lupori * s(va) = (28 - b) - n
137*d3c34fc0SLeandro Lupori *
138*d3c34fc0SLeandro Lupori * q: bit number of lower limit of VA/AVA bits to compare
139*d3c34fc0SLeandro Lupori * n: number of AVA/VA page bits to compare
140*d3c34fc0SLeandro Lupori * s: shift amount
141*d3c34fc0SLeandro Lupori * 28 - b: VA page size in bits
142*d3c34fc0SLeandro Lupori */
143*d3c34fc0SLeandro Lupori #define AVA_PAGE_SHIFT(b) (5 - (MIN(54, 77-(b)) + 1 - 50))
144*d3c34fc0SLeandro Lupori #define VA_PAGE_SHIFT(b) (28 - (b) - (MIN(54, 77-(b)) + 1 - 50))
145*d3c34fc0SLeandro Lupori
146*d3c34fc0SLeandro Lupori /* Kernel ESID -> VSID mapping */
147*d3c34fc0SLeandro Lupori #define KERNEL_VSID_BIT 0x0000001000000000UL /* Bit set in all kernel VSIDs */
148*d3c34fc0SLeandro Lupori #define KERNEL_VSID(esid) ((((((uint64_t)esid << 8) | ((uint64_t)esid >> 28)) \
149*d3c34fc0SLeandro Lupori * 0x13bbUL) & (KERNEL_VSID_BIT - 1)) | \
150*d3c34fc0SLeandro Lupori KERNEL_VSID_BIT)
151*d3c34fc0SLeandro Lupori
152*d3c34fc0SLeandro Lupori /* Types */
153*d3c34fc0SLeandro Lupori
154*d3c34fc0SLeandro Lupori typedef uint64_t ppc64_physaddr_t;
155*d3c34fc0SLeandro Lupori
156*d3c34fc0SLeandro Lupori typedef struct {
157*d3c34fc0SLeandro Lupori uint64_t slbv;
158*d3c34fc0SLeandro Lupori uint64_t slbe;
159*d3c34fc0SLeandro Lupori } ppc64_slb_entry_t;
160*d3c34fc0SLeandro Lupori
161*d3c34fc0SLeandro Lupori typedef struct {
162*d3c34fc0SLeandro Lupori uint64_t pte_hi;
163*d3c34fc0SLeandro Lupori uint64_t pte_lo;
164*d3c34fc0SLeandro Lupori } ppc64_pt_entry_t;
165*d3c34fc0SLeandro Lupori
166*d3c34fc0SLeandro Lupori struct hpt_data {
167*d3c34fc0SLeandro Lupori ppc64_slb_entry_t *slbs;
168*d3c34fc0SLeandro Lupori uint32_t slbsize;
169*d3c34fc0SLeandro Lupori };
170*d3c34fc0SLeandro Lupori
171*d3c34fc0SLeandro Lupori
172*d3c34fc0SLeandro Lupori static void
slb_fill(ppc64_slb_entry_t * slb,uint64_t ea,uint64_t i)173*d3c34fc0SLeandro Lupori slb_fill(ppc64_slb_entry_t *slb, uint64_t ea, uint64_t i)
174*d3c34fc0SLeandro Lupori {
175*d3c34fc0SLeandro Lupori uint64_t esid;
176*d3c34fc0SLeandro Lupori
177*d3c34fc0SLeandro Lupori esid = ea >> SLBE_ESID_SHIFT;
178*d3c34fc0SLeandro Lupori slb->slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT;
179*d3c34fc0SLeandro Lupori slb->slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID | i;
180*d3c34fc0SLeandro Lupori }
181*d3c34fc0SLeandro Lupori
182*d3c34fc0SLeandro Lupori static int
slb_init(kvm_t * kd)183*d3c34fc0SLeandro Lupori slb_init(kvm_t *kd)
184*d3c34fc0SLeandro Lupori {
185*d3c34fc0SLeandro Lupori struct minidumphdr *hdr;
186*d3c34fc0SLeandro Lupori struct hpt_data *data;
187*d3c34fc0SLeandro Lupori ppc64_slb_entry_t *slb;
188*d3c34fc0SLeandro Lupori uint32_t slbsize;
189*d3c34fc0SLeandro Lupori uint64_t ea, i, maxmem;
190*d3c34fc0SLeandro Lupori
191*d3c34fc0SLeandro Lupori hdr = &kd->vmst->hdr;
192*d3c34fc0SLeandro Lupori data = PPC64_MMU_DATA(kd);
193*d3c34fc0SLeandro Lupori
194*d3c34fc0SLeandro Lupori /* Alloc SLBs */
195*d3c34fc0SLeandro Lupori maxmem = hdr->bitmapsize * 8 * PPC64_PAGE_SIZE;
196*d3c34fc0SLeandro Lupori slbsize = round_seg(hdr->kernend + 1 - hdr->kernbase + maxmem) /
197*d3c34fc0SLeandro Lupori SEGMENT_LENGTH * sizeof(ppc64_slb_entry_t);
198*d3c34fc0SLeandro Lupori data->slbs = _kvm_malloc(kd, slbsize);
199*d3c34fc0SLeandro Lupori if (data->slbs == NULL) {
200*d3c34fc0SLeandro Lupori _kvm_err(kd, kd->program, "cannot allocate slbs");
201*d3c34fc0SLeandro Lupori return (-1);
202*d3c34fc0SLeandro Lupori }
203*d3c34fc0SLeandro Lupori data->slbsize = slbsize;
204*d3c34fc0SLeandro Lupori
205*d3c34fc0SLeandro Lupori dprintf("%s: maxmem=0x%jx, segs=%jd, slbsize=0x%jx\n",
206*d3c34fc0SLeandro Lupori __func__, (uintmax_t)maxmem,
207*d3c34fc0SLeandro Lupori (uintmax_t)slbsize / sizeof(ppc64_slb_entry_t), (uintmax_t)slbsize);
208*d3c34fc0SLeandro Lupori
209*d3c34fc0SLeandro Lupori /*
210*d3c34fc0SLeandro Lupori * Generate needed SLB entries.
211*d3c34fc0SLeandro Lupori *
212*d3c34fc0SLeandro Lupori * When translating addresses from EA to VA to PA, the needed SLB
213*d3c34fc0SLeandro Lupori * entry could be generated on the fly, but this is not the case
214*d3c34fc0SLeandro Lupori * for the walk_pages method, that needs to search the SLB entry
215*d3c34fc0SLeandro Lupori * by VSID, in order to find out the EA from a PTE.
216*d3c34fc0SLeandro Lupori */
217*d3c34fc0SLeandro Lupori
218*d3c34fc0SLeandro Lupori /* VM area */
219*d3c34fc0SLeandro Lupori for (ea = hdr->kernbase, i = 0, slb = data->slbs;
220*d3c34fc0SLeandro Lupori ea < hdr->kernend; ea += SEGMENT_LENGTH, i++, slb++)
221*d3c34fc0SLeandro Lupori slb_fill(slb, ea, i);
222*d3c34fc0SLeandro Lupori
223*d3c34fc0SLeandro Lupori /* DMAP area */
224*d3c34fc0SLeandro Lupori for (ea = hdr->dmapbase;
225*d3c34fc0SLeandro Lupori ea < MIN(hdr->dmapend, hdr->dmapbase + maxmem);
226*d3c34fc0SLeandro Lupori ea += SEGMENT_LENGTH, i++, slb++) {
227*d3c34fc0SLeandro Lupori slb_fill(slb, ea, i);
228*d3c34fc0SLeandro Lupori if (hdr->hw_direct_map)
229*d3c34fc0SLeandro Lupori slb->slbv |= SLBV_L;
230*d3c34fc0SLeandro Lupori }
231*d3c34fc0SLeandro Lupori
232*d3c34fc0SLeandro Lupori return (0);
233*d3c34fc0SLeandro Lupori }
234*d3c34fc0SLeandro Lupori
235*d3c34fc0SLeandro Lupori static void
ppc64mmu_hpt_cleanup(kvm_t * kd)236*d3c34fc0SLeandro Lupori ppc64mmu_hpt_cleanup(kvm_t *kd)
237*d3c34fc0SLeandro Lupori {
238*d3c34fc0SLeandro Lupori struct hpt_data *data;
239*d3c34fc0SLeandro Lupori
240*d3c34fc0SLeandro Lupori if (kd->vmst == NULL)
241*d3c34fc0SLeandro Lupori return;
242*d3c34fc0SLeandro Lupori
243*d3c34fc0SLeandro Lupori data = PPC64_MMU_DATA(kd);
244*d3c34fc0SLeandro Lupori free(data->slbs);
245*d3c34fc0SLeandro Lupori free(data);
246*d3c34fc0SLeandro Lupori PPC64_MMU_DATA(kd) = NULL;
247*d3c34fc0SLeandro Lupori }
248*d3c34fc0SLeandro Lupori
249*d3c34fc0SLeandro Lupori static int
ppc64mmu_hpt_init(kvm_t * kd)250*d3c34fc0SLeandro Lupori ppc64mmu_hpt_init(kvm_t *kd)
251*d3c34fc0SLeandro Lupori {
252*d3c34fc0SLeandro Lupori struct hpt_data *data;
253*d3c34fc0SLeandro Lupori
254*d3c34fc0SLeandro Lupori /* Alloc MMU data */
255*d3c34fc0SLeandro Lupori data = _kvm_malloc(kd, sizeof(*data));
256*d3c34fc0SLeandro Lupori if (data == NULL) {
257*d3c34fc0SLeandro Lupori _kvm_err(kd, kd->program, "cannot allocate MMU data");
258*d3c34fc0SLeandro Lupori return (-1);
259*d3c34fc0SLeandro Lupori }
260*d3c34fc0SLeandro Lupori data->slbs = NULL;
261*d3c34fc0SLeandro Lupori PPC64_MMU_DATA(kd) = data;
262*d3c34fc0SLeandro Lupori
263*d3c34fc0SLeandro Lupori if (slb_init(kd) == -1)
264*d3c34fc0SLeandro Lupori goto failed;
265*d3c34fc0SLeandro Lupori
266*d3c34fc0SLeandro Lupori return (0);
267*d3c34fc0SLeandro Lupori
268*d3c34fc0SLeandro Lupori failed:
269*d3c34fc0SLeandro Lupori ppc64mmu_hpt_cleanup(kd);
270*d3c34fc0SLeandro Lupori return (-1);
271*d3c34fc0SLeandro Lupori }
272*d3c34fc0SLeandro Lupori
273*d3c34fc0SLeandro Lupori static ppc64_slb_entry_t *
slb_search(kvm_t * kd,kvaddr_t ea)274*d3c34fc0SLeandro Lupori slb_search(kvm_t *kd, kvaddr_t ea)
275*d3c34fc0SLeandro Lupori {
276*d3c34fc0SLeandro Lupori struct hpt_data *data;
277*d3c34fc0SLeandro Lupori ppc64_slb_entry_t *slb;
278*d3c34fc0SLeandro Lupori int i, n;
279*d3c34fc0SLeandro Lupori
280*d3c34fc0SLeandro Lupori data = PPC64_MMU_DATA(kd);
281*d3c34fc0SLeandro Lupori slb = data->slbs;
282*d3c34fc0SLeandro Lupori n = data->slbsize / sizeof(ppc64_slb_entry_t);
283*d3c34fc0SLeandro Lupori
284*d3c34fc0SLeandro Lupori /* SLB search */
285*d3c34fc0SLeandro Lupori for (i = 0; i < n; i++, slb++) {
286*d3c34fc0SLeandro Lupori if ((slb->slbe & SLBE_VALID) == 0)
287*d3c34fc0SLeandro Lupori continue;
288*d3c34fc0SLeandro Lupori
289*d3c34fc0SLeandro Lupori /* Compare 36-bit ESID of EA with segment one (64-s) */
290*d3c34fc0SLeandro Lupori if ((slb->slbe & SLBE_ESID_MASK) != (ea & SLBE_ESID_MASK))
291*d3c34fc0SLeandro Lupori continue;
292*d3c34fc0SLeandro Lupori
293*d3c34fc0SLeandro Lupori /* Match found */
294*d3c34fc0SLeandro Lupori dprintf("SEG#%02d: slbv=0x%016jx, slbe=0x%016jx\n",
295*d3c34fc0SLeandro Lupori i, (uintmax_t)slb->slbv, (uintmax_t)slb->slbe);
296*d3c34fc0SLeandro Lupori break;
297*d3c34fc0SLeandro Lupori }
298*d3c34fc0SLeandro Lupori
299*d3c34fc0SLeandro Lupori /* SLB not found */
300*d3c34fc0SLeandro Lupori if (i == n) {
301*d3c34fc0SLeandro Lupori _kvm_err(kd, kd->program, "%s: segment not found for EA 0x%jx",
302*d3c34fc0SLeandro Lupori __func__, (uintmax_t)ea);
303*d3c34fc0SLeandro Lupori return (NULL);
304*d3c34fc0SLeandro Lupori }
305*d3c34fc0SLeandro Lupori return (slb);
306*d3c34fc0SLeandro Lupori }
307*d3c34fc0SLeandro Lupori
308*d3c34fc0SLeandro Lupori static ppc64_pt_entry_t
pte_get(kvm_t * kd,u_long ptex)309*d3c34fc0SLeandro Lupori pte_get(kvm_t *kd, u_long ptex)
310*d3c34fc0SLeandro Lupori {
311*d3c34fc0SLeandro Lupori ppc64_pt_entry_t pte, *p;
312*d3c34fc0SLeandro Lupori
313*d3c34fc0SLeandro Lupori p = _kvm_pmap_get(kd, ptex, sizeof(pte));
314*d3c34fc0SLeandro Lupori pte.pte_hi = be64toh(p->pte_hi);
315*d3c34fc0SLeandro Lupori pte.pte_lo = be64toh(p->pte_lo);
316*d3c34fc0SLeandro Lupori return (pte);
317*d3c34fc0SLeandro Lupori }
318*d3c34fc0SLeandro Lupori
319*d3c34fc0SLeandro Lupori static int
pte_search(kvm_t * kd,ppc64_slb_entry_t * slb,uint64_t hid,kvaddr_t ea,ppc64_pt_entry_t * p)320*d3c34fc0SLeandro Lupori pte_search(kvm_t *kd, ppc64_slb_entry_t *slb, uint64_t hid, kvaddr_t ea,
321*d3c34fc0SLeandro Lupori ppc64_pt_entry_t *p)
322*d3c34fc0SLeandro Lupori {
323*d3c34fc0SLeandro Lupori uint64_t hash, hmask;
324*d3c34fc0SLeandro Lupori uint64_t pteg, ptex;
325*d3c34fc0SLeandro Lupori uint64_t va_vsid, va_page;
326*d3c34fc0SLeandro Lupori int b;
327*d3c34fc0SLeandro Lupori int ava_pg_shift, va_pg_shift;
328*d3c34fc0SLeandro Lupori ppc64_pt_entry_t pte;
329*d3c34fc0SLeandro Lupori
330*d3c34fc0SLeandro Lupori /*
331*d3c34fc0SLeandro Lupori * Get VA:
332*d3c34fc0SLeandro Lupori *
333*d3c34fc0SLeandro Lupori * va(78) = va_vsid(50) || va_page(s-b) || offset(b)
334*d3c34fc0SLeandro Lupori *
335*d3c34fc0SLeandro Lupori * va_vsid: 50-bit VSID (78-s)
336*d3c34fc0SLeandro Lupori * va_page: (s-b)-bit VA page
337*d3c34fc0SLeandro Lupori */
338*d3c34fc0SLeandro Lupori b = slb->slbv & SLBV_L? LP_PAGE_SHIFT : PPC64_PAGE_SHIFT;
339*d3c34fc0SLeandro Lupori va_vsid = (slb->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT;
340*d3c34fc0SLeandro Lupori va_page = (ea & ~SLBE_ESID_MASK) >> b;
341*d3c34fc0SLeandro Lupori
342*d3c34fc0SLeandro Lupori dprintf("%s: hid=0x%jx, ea=0x%016jx, b=%d, va_vsid=0x%010jx, "
343*d3c34fc0SLeandro Lupori "va_page=0x%04jx\n",
344*d3c34fc0SLeandro Lupori __func__, (uintmax_t)hid, (uintmax_t)ea, b,
345*d3c34fc0SLeandro Lupori (uintmax_t)va_vsid, (uintmax_t)va_page);
346*d3c34fc0SLeandro Lupori
347*d3c34fc0SLeandro Lupori /*
348*d3c34fc0SLeandro Lupori * Get hash:
349*d3c34fc0SLeandro Lupori *
350*d3c34fc0SLeandro Lupori * Primary hash: va_vsid(11:49) ^ va_page(s-b)
351*d3c34fc0SLeandro Lupori * Secondary hash: ~primary_hash
352*d3c34fc0SLeandro Lupori */
353*d3c34fc0SLeandro Lupori hash = (va_vsid & PTE_HASH_MASK) ^ va_page;
354*d3c34fc0SLeandro Lupori if (hid)
355*d3c34fc0SLeandro Lupori hash = ~hash & PTE_HASH_MASK;
356*d3c34fc0SLeandro Lupori
357*d3c34fc0SLeandro Lupori /*
358*d3c34fc0SLeandro Lupori * Get PTEG:
359*d3c34fc0SLeandro Lupori *
360*d3c34fc0SLeandro Lupori * pteg = (hash(0:38) & hmask) << 3
361*d3c34fc0SLeandro Lupori *
362*d3c34fc0SLeandro Lupori * hmask (hash mask): mask generated from HTABSIZE || 11*0b1
363*d3c34fc0SLeandro Lupori * hmask = number_of_ptegs - 1
364*d3c34fc0SLeandro Lupori */
365*d3c34fc0SLeandro Lupori hmask = kd->vmst->hdr.pmapsize / (8 * sizeof(ppc64_pt_entry_t)) - 1;
366*d3c34fc0SLeandro Lupori pteg = (hash & hmask) << 3;
367*d3c34fc0SLeandro Lupori
368*d3c34fc0SLeandro Lupori ava_pg_shift = AVA_PAGE_SHIFT(b);
369*d3c34fc0SLeandro Lupori va_pg_shift = VA_PAGE_SHIFT(b);
370*d3c34fc0SLeandro Lupori
371*d3c34fc0SLeandro Lupori dprintf("%s: hash=0x%010jx, hmask=0x%010jx, (hash & hmask)=0x%010jx, "
372*d3c34fc0SLeandro Lupori "pteg=0x%011jx, ava_pg_shift=%d, va_pg_shift=%d\n",
373*d3c34fc0SLeandro Lupori __func__, (uintmax_t)hash, (uintmax_t)hmask,
374*d3c34fc0SLeandro Lupori (uintmax_t)(hash & hmask), (uintmax_t)pteg,
375*d3c34fc0SLeandro Lupori ava_pg_shift, va_pg_shift);
376*d3c34fc0SLeandro Lupori
377*d3c34fc0SLeandro Lupori /* Search PTEG */
378*d3c34fc0SLeandro Lupori for (ptex = pteg; ptex < pteg + 8; ptex++) {
379*d3c34fc0SLeandro Lupori pte = pte_get(kd, ptex);
380*d3c34fc0SLeandro Lupori
381*d3c34fc0SLeandro Lupori /* Check H, V and B */
382*d3c34fc0SLeandro Lupori if ((pte.pte_hi & LPTEH_HID) != hid ||
383*d3c34fc0SLeandro Lupori (pte.pte_hi & LPTEH_VALID) == 0 ||
384*d3c34fc0SLeandro Lupori (pte.pte_hi & LPTEH_B_MASK) != LPTEH_B_256MB)
385*d3c34fc0SLeandro Lupori continue;
386*d3c34fc0SLeandro Lupori
387*d3c34fc0SLeandro Lupori /* Compare AVA with VA */
388*d3c34fc0SLeandro Lupori if (PTEH_AVA_VSID(pte.pte_hi) != va_vsid ||
389*d3c34fc0SLeandro Lupori (PTEH_AVA_PAGE(pte.pte_hi) >> ava_pg_shift) !=
390*d3c34fc0SLeandro Lupori (va_page >> va_pg_shift))
391*d3c34fc0SLeandro Lupori continue;
392*d3c34fc0SLeandro Lupori
393*d3c34fc0SLeandro Lupori /*
394*d3c34fc0SLeandro Lupori * Check if PTE[L] matches SLBV[L].
395*d3c34fc0SLeandro Lupori *
396*d3c34fc0SLeandro Lupori * Note: this check ignores PTE[LP], as does the kernel.
397*d3c34fc0SLeandro Lupori */
398*d3c34fc0SLeandro Lupori if (b == PPC64_PAGE_SHIFT) {
399*d3c34fc0SLeandro Lupori if (pte.pte_hi & LPTEH_BIG)
400*d3c34fc0SLeandro Lupori continue;
401*d3c34fc0SLeandro Lupori } else if ((pte.pte_hi & LPTEH_BIG) == 0)
402*d3c34fc0SLeandro Lupori continue;
403*d3c34fc0SLeandro Lupori
404*d3c34fc0SLeandro Lupori /* Match found */
405*d3c34fc0SLeandro Lupori dprintf("%s: PTE found: ptex=0x%jx, pteh=0x%016jx, "
406*d3c34fc0SLeandro Lupori "ptel=0x%016jx\n",
407*d3c34fc0SLeandro Lupori __func__, (uintmax_t)ptex, (uintmax_t)pte.pte_hi,
408*d3c34fc0SLeandro Lupori (uintmax_t)pte.pte_lo);
409*d3c34fc0SLeandro Lupori break;
410*d3c34fc0SLeandro Lupori }
411*d3c34fc0SLeandro Lupori
412*d3c34fc0SLeandro Lupori /* Not found? */
413*d3c34fc0SLeandro Lupori if (ptex == pteg + 8) {
414*d3c34fc0SLeandro Lupori /* Try secondary hash */
415*d3c34fc0SLeandro Lupori if (hid == 0)
416*d3c34fc0SLeandro Lupori return (pte_search(kd, slb, LPTEH_HID, ea, p));
417*d3c34fc0SLeandro Lupori else {
418*d3c34fc0SLeandro Lupori _kvm_err(kd, kd->program,
419*d3c34fc0SLeandro Lupori "%s: pte not found", __func__);
420*d3c34fc0SLeandro Lupori return (-1);
421*d3c34fc0SLeandro Lupori }
422*d3c34fc0SLeandro Lupori }
423*d3c34fc0SLeandro Lupori
424*d3c34fc0SLeandro Lupori /* PTE found */
425*d3c34fc0SLeandro Lupori *p = pte;
426*d3c34fc0SLeandro Lupori return (0);
427*d3c34fc0SLeandro Lupori }
428*d3c34fc0SLeandro Lupori
429*d3c34fc0SLeandro Lupori static int
pte_lookup(kvm_t * kd,kvaddr_t ea,ppc64_pt_entry_t * pte)430*d3c34fc0SLeandro Lupori pte_lookup(kvm_t *kd, kvaddr_t ea, ppc64_pt_entry_t *pte)
431*d3c34fc0SLeandro Lupori {
432*d3c34fc0SLeandro Lupori ppc64_slb_entry_t *slb;
433*d3c34fc0SLeandro Lupori
434*d3c34fc0SLeandro Lupori /* First, find SLB */
435*d3c34fc0SLeandro Lupori if ((slb = slb_search(kd, ea)) == NULL)
436*d3c34fc0SLeandro Lupori return (-1);
437*d3c34fc0SLeandro Lupori
438*d3c34fc0SLeandro Lupori /* Next, find PTE */
439*d3c34fc0SLeandro Lupori return (pte_search(kd, slb, 0, ea, pte));
440*d3c34fc0SLeandro Lupori }
441*d3c34fc0SLeandro Lupori
442*d3c34fc0SLeandro Lupori static int
ppc64mmu_hpt_kvatop(kvm_t * kd,kvaddr_t va,off_t * pa)443*d3c34fc0SLeandro Lupori ppc64mmu_hpt_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
444*d3c34fc0SLeandro Lupori {
445*d3c34fc0SLeandro Lupori struct minidumphdr *hdr;
446*d3c34fc0SLeandro Lupori struct vmstate *vm;
447*d3c34fc0SLeandro Lupori ppc64_pt_entry_t pte;
448*d3c34fc0SLeandro Lupori ppc64_physaddr_t pgoff, pgpa;
449*d3c34fc0SLeandro Lupori off_t ptoff;
450*d3c34fc0SLeandro Lupori int err;
451*d3c34fc0SLeandro Lupori
452*d3c34fc0SLeandro Lupori vm = kd->vmst;
453*d3c34fc0SLeandro Lupori hdr = &vm->hdr;
454*d3c34fc0SLeandro Lupori pgoff = va & PPC64_PAGE_MASK;
455*d3c34fc0SLeandro Lupori
456*d3c34fc0SLeandro Lupori dprintf("%s: va=0x%016jx\n", __func__, (uintmax_t)va);
457*d3c34fc0SLeandro Lupori
458*d3c34fc0SLeandro Lupori /*
459*d3c34fc0SLeandro Lupori * A common use case of libkvm is to first find a symbol address
460*d3c34fc0SLeandro Lupori * from the kernel image and then use kvatop to translate it and
461*d3c34fc0SLeandro Lupori * to be able to fetch its corresponding data.
462*d3c34fc0SLeandro Lupori *
463*d3c34fc0SLeandro Lupori * The problem is that, in PowerPC64 case, the addresses of relocated
464*d3c34fc0SLeandro Lupori * data won't match those in the kernel image. This is handled here by
465*d3c34fc0SLeandro Lupori * adding the relocation offset to those addresses.
466*d3c34fc0SLeandro Lupori */
467*d3c34fc0SLeandro Lupori if (va < hdr->dmapbase)
468*d3c34fc0SLeandro Lupori va += hdr->startkernel - PPC64_KERNBASE;
469*d3c34fc0SLeandro Lupori
470*d3c34fc0SLeandro Lupori /* Handle DMAP */
471*d3c34fc0SLeandro Lupori if (va >= hdr->dmapbase && va <= hdr->dmapend) {
472*d3c34fc0SLeandro Lupori pgpa = (va & ~hdr->dmapbase) & ~PPC64_PAGE_MASK;
473*d3c34fc0SLeandro Lupori ptoff = _kvm_pt_find(kd, pgpa, PPC64_PAGE_SIZE);
474*d3c34fc0SLeandro Lupori if (ptoff == -1) {
475*d3c34fc0SLeandro Lupori _kvm_err(kd, kd->program, "%s: "
476*d3c34fc0SLeandro Lupori "direct map address 0x%jx not in minidump",
477*d3c34fc0SLeandro Lupori __func__, (uintmax_t)va);
478*d3c34fc0SLeandro Lupori goto invalid;
479*d3c34fc0SLeandro Lupori }
480*d3c34fc0SLeandro Lupori *pa = ptoff + pgoff;
481*d3c34fc0SLeandro Lupori return (PPC64_PAGE_SIZE - pgoff);
482*d3c34fc0SLeandro Lupori /* Translate VA to PA */
483*d3c34fc0SLeandro Lupori } else if (va >= hdr->kernbase) {
484*d3c34fc0SLeandro Lupori if ((err = pte_lookup(kd, va, &pte)) == -1) {
485*d3c34fc0SLeandro Lupori _kvm_err(kd, kd->program,
486*d3c34fc0SLeandro Lupori "%s: pte not valid", __func__);
487*d3c34fc0SLeandro Lupori goto invalid;
488*d3c34fc0SLeandro Lupori }
489*d3c34fc0SLeandro Lupori
490*d3c34fc0SLeandro Lupori if (pte.pte_hi & LPTEH_BIG)
491*d3c34fc0SLeandro Lupori pgpa = (pte.pte_lo & PTEL_LP_PA_MASK) |
492*d3c34fc0SLeandro Lupori (va & ~PPC64_PAGE_MASK & LP_PAGE_MASK);
493*d3c34fc0SLeandro Lupori else
494*d3c34fc0SLeandro Lupori pgpa = pte.pte_lo & PTEL_PA_MASK;
495*d3c34fc0SLeandro Lupori dprintf("%s: pgpa=0x%016jx\n", __func__, (uintmax_t)pgpa);
496*d3c34fc0SLeandro Lupori
497*d3c34fc0SLeandro Lupori ptoff = _kvm_pt_find(kd, pgpa, PPC64_PAGE_SIZE);
498*d3c34fc0SLeandro Lupori if (ptoff == -1) {
499*d3c34fc0SLeandro Lupori _kvm_err(kd, kd->program, "%s: "
500*d3c34fc0SLeandro Lupori "physical address 0x%jx not in minidump",
501*d3c34fc0SLeandro Lupori __func__, (uintmax_t)pgpa);
502*d3c34fc0SLeandro Lupori goto invalid;
503*d3c34fc0SLeandro Lupori }
504*d3c34fc0SLeandro Lupori *pa = ptoff + pgoff;
505*d3c34fc0SLeandro Lupori return (PPC64_PAGE_SIZE - pgoff);
506*d3c34fc0SLeandro Lupori } else {
507*d3c34fc0SLeandro Lupori _kvm_err(kd, kd->program,
508*d3c34fc0SLeandro Lupori "%s: virtual address 0x%jx not minidumped",
509*d3c34fc0SLeandro Lupori __func__, (uintmax_t)va);
510*d3c34fc0SLeandro Lupori goto invalid;
511*d3c34fc0SLeandro Lupori }
512*d3c34fc0SLeandro Lupori
513*d3c34fc0SLeandro Lupori invalid:
514*d3c34fc0SLeandro Lupori _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
515*d3c34fc0SLeandro Lupori return (0);
516*d3c34fc0SLeandro Lupori }
517*d3c34fc0SLeandro Lupori
518*d3c34fc0SLeandro Lupori static vm_prot_t
entry_to_prot(ppc64_pt_entry_t * pte)519*d3c34fc0SLeandro Lupori entry_to_prot(ppc64_pt_entry_t *pte)
520*d3c34fc0SLeandro Lupori {
521*d3c34fc0SLeandro Lupori vm_prot_t prot = VM_PROT_READ;
522*d3c34fc0SLeandro Lupori
523*d3c34fc0SLeandro Lupori if (pte->pte_lo & LPTEL_RW)
524*d3c34fc0SLeandro Lupori prot |= VM_PROT_WRITE;
525*d3c34fc0SLeandro Lupori if ((pte->pte_lo & LPTEL_NOEXEC) != 0)
526*d3c34fc0SLeandro Lupori prot |= VM_PROT_EXECUTE;
527*d3c34fc0SLeandro Lupori return (prot);
528*d3c34fc0SLeandro Lupori }
529*d3c34fc0SLeandro Lupori
530*d3c34fc0SLeandro Lupori static ppc64_slb_entry_t *
slb_vsid_search(kvm_t * kd,uint64_t vsid)531*d3c34fc0SLeandro Lupori slb_vsid_search(kvm_t *kd, uint64_t vsid)
532*d3c34fc0SLeandro Lupori {
533*d3c34fc0SLeandro Lupori struct hpt_data *data;
534*d3c34fc0SLeandro Lupori ppc64_slb_entry_t *slb;
535*d3c34fc0SLeandro Lupori int i, n;
536*d3c34fc0SLeandro Lupori
537*d3c34fc0SLeandro Lupori data = PPC64_MMU_DATA(kd);
538*d3c34fc0SLeandro Lupori slb = data->slbs;
539*d3c34fc0SLeandro Lupori n = data->slbsize / sizeof(ppc64_slb_entry_t);
540*d3c34fc0SLeandro Lupori vsid <<= SLBV_VSID_SHIFT;
541*d3c34fc0SLeandro Lupori
542*d3c34fc0SLeandro Lupori /* SLB search */
543*d3c34fc0SLeandro Lupori for (i = 0; i < n; i++, slb++) {
544*d3c34fc0SLeandro Lupori /* Check if valid and compare VSID */
545*d3c34fc0SLeandro Lupori if ((slb->slbe & SLBE_VALID) &&
546*d3c34fc0SLeandro Lupori (slb->slbv & SLBV_VSID_MASK) == vsid)
547*d3c34fc0SLeandro Lupori break;
548*d3c34fc0SLeandro Lupori }
549*d3c34fc0SLeandro Lupori
550*d3c34fc0SLeandro Lupori /* SLB not found */
551*d3c34fc0SLeandro Lupori if (i == n) {
552*d3c34fc0SLeandro Lupori _kvm_err(kd, kd->program,
553*d3c34fc0SLeandro Lupori "%s: segment not found for VSID 0x%jx",
554*d3c34fc0SLeandro Lupori __func__, (uintmax_t)vsid >> SLBV_VSID_SHIFT);
555*d3c34fc0SLeandro Lupori return (NULL);
556*d3c34fc0SLeandro Lupori }
557*d3c34fc0SLeandro Lupori return (slb);
558*d3c34fc0SLeandro Lupori }
559*d3c34fc0SLeandro Lupori
560*d3c34fc0SLeandro Lupori static u_long
get_ea(kvm_t * kd,ppc64_pt_entry_t * pte,u_long ptex)561*d3c34fc0SLeandro Lupori get_ea(kvm_t *kd, ppc64_pt_entry_t *pte, u_long ptex)
562*d3c34fc0SLeandro Lupori {
563*d3c34fc0SLeandro Lupori ppc64_slb_entry_t *slb;
564*d3c34fc0SLeandro Lupori uint64_t ea, hash, vsid;
565*d3c34fc0SLeandro Lupori int b, shift;
566*d3c34fc0SLeandro Lupori
567*d3c34fc0SLeandro Lupori /* Find SLB */
568*d3c34fc0SLeandro Lupori vsid = PTEH_AVA_VSID(pte->pte_hi);
569*d3c34fc0SLeandro Lupori if ((slb = slb_vsid_search(kd, vsid)) == NULL)
570*d3c34fc0SLeandro Lupori return (~0UL);
571*d3c34fc0SLeandro Lupori
572*d3c34fc0SLeandro Lupori /* Get ESID part of EA */
573*d3c34fc0SLeandro Lupori ea = slb->slbe & SLBE_ESID_MASK;
574*d3c34fc0SLeandro Lupori
575*d3c34fc0SLeandro Lupori b = slb->slbv & SLBV_L? LP_PAGE_SHIFT : PPC64_PAGE_SHIFT;
576*d3c34fc0SLeandro Lupori
577*d3c34fc0SLeandro Lupori /*
578*d3c34fc0SLeandro Lupori * If there are less than 64K PTEGs (16-bit), the upper bits of
579*d3c34fc0SLeandro Lupori * EA page must be obtained from PTEH's AVA.
580*d3c34fc0SLeandro Lupori */
581*d3c34fc0SLeandro Lupori if (kd->vmst->hdr.pmapsize / (8 * sizeof(ppc64_pt_entry_t)) <
582*d3c34fc0SLeandro Lupori 0x10000U) {
583*d3c34fc0SLeandro Lupori /*
584*d3c34fc0SLeandro Lupori * Add 0 to 5 EA bits, right after VSID.
585*d3c34fc0SLeandro Lupori * b == 12: 5 bits
586*d3c34fc0SLeandro Lupori * b == 24: 4 bits
587*d3c34fc0SLeandro Lupori */
588*d3c34fc0SLeandro Lupori shift = AVA_PAGE_SHIFT(b);
589*d3c34fc0SLeandro Lupori ea |= (PTEH_AVA_PAGE(pte->pte_hi) >> shift) <<
590*d3c34fc0SLeandro Lupori (SLBE_ESID_SHIFT - 5 + shift);
591*d3c34fc0SLeandro Lupori }
592*d3c34fc0SLeandro Lupori
593*d3c34fc0SLeandro Lupori /* Get VA page from hash and add to EA. */
594*d3c34fc0SLeandro Lupori hash = (ptex & ~7) >> 3;
595*d3c34fc0SLeandro Lupori if (pte->pte_hi & LPTEH_HID)
596*d3c34fc0SLeandro Lupori hash = ~hash & PTE_HASH_MASK;
597*d3c34fc0SLeandro Lupori ea |= ((hash ^ (vsid & PTE_HASH_MASK)) << b) & ~SLBE_ESID_MASK;
598*d3c34fc0SLeandro Lupori return (ea);
599*d3c34fc0SLeandro Lupori }
600*d3c34fc0SLeandro Lupori
601*d3c34fc0SLeandro Lupori static int
ppc64mmu_hpt_walk_pages(kvm_t * kd,kvm_walk_pages_cb_t * cb,void * arg)602*d3c34fc0SLeandro Lupori ppc64mmu_hpt_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
603*d3c34fc0SLeandro Lupori {
604*d3c34fc0SLeandro Lupori struct vmstate *vm;
605*d3c34fc0SLeandro Lupori int ret;
606*d3c34fc0SLeandro Lupori unsigned int pagesz;
607*d3c34fc0SLeandro Lupori u_long dva, pa, va;
608*d3c34fc0SLeandro Lupori u_long ptex, nptes;
609*d3c34fc0SLeandro Lupori uint64_t vsid;
610*d3c34fc0SLeandro Lupori
611*d3c34fc0SLeandro Lupori ret = 0;
612*d3c34fc0SLeandro Lupori vm = kd->vmst;
613*d3c34fc0SLeandro Lupori nptes = vm->hdr.pmapsize / sizeof(ppc64_pt_entry_t);
614*d3c34fc0SLeandro Lupori
615*d3c34fc0SLeandro Lupori /* Walk through PTEs */
616*d3c34fc0SLeandro Lupori for (ptex = 0; ptex < nptes; ptex++) {
617*d3c34fc0SLeandro Lupori ppc64_pt_entry_t pte = pte_get(kd, ptex);
618*d3c34fc0SLeandro Lupori if ((pte.pte_hi & LPTEH_VALID) == 0)
619*d3c34fc0SLeandro Lupori continue;
620*d3c34fc0SLeandro Lupori
621*d3c34fc0SLeandro Lupori /* Skip non-kernel related pages, as well as VRMA ones */
622*d3c34fc0SLeandro Lupori vsid = PTEH_AVA_VSID(pte.pte_hi);
623*d3c34fc0SLeandro Lupori if ((vsid & KERNEL_VSID_BIT) == 0 ||
624*d3c34fc0SLeandro Lupori (vsid >> PPC64_PAGE_SHIFT) == VSID_VRMA)
625*d3c34fc0SLeandro Lupori continue;
626*d3c34fc0SLeandro Lupori
627*d3c34fc0SLeandro Lupori /* Retrieve page's VA (EA on PPC64 terminology) */
628*d3c34fc0SLeandro Lupori if ((va = get_ea(kd, &pte, ptex)) == ~0UL)
629*d3c34fc0SLeandro Lupori goto out;
630*d3c34fc0SLeandro Lupori
631*d3c34fc0SLeandro Lupori /* Get PA and page size */
632*d3c34fc0SLeandro Lupori if (pte.pte_hi & LPTEH_BIG) {
633*d3c34fc0SLeandro Lupori pa = pte.pte_lo & PTEL_LP_PA_MASK;
634*d3c34fc0SLeandro Lupori pagesz = LP_PAGE_SIZE;
635*d3c34fc0SLeandro Lupori } else {
636*d3c34fc0SLeandro Lupori pa = pte.pte_lo & PTEL_PA_MASK;
637*d3c34fc0SLeandro Lupori pagesz = PPC64_PAGE_SIZE;
638*d3c34fc0SLeandro Lupori }
639*d3c34fc0SLeandro Lupori
640*d3c34fc0SLeandro Lupori /* Get DMAP address */
641*d3c34fc0SLeandro Lupori dva = vm->hdr.dmapbase + pa;
642*d3c34fc0SLeandro Lupori
643*d3c34fc0SLeandro Lupori if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
644*d3c34fc0SLeandro Lupori entry_to_prot(&pte), pagesz, 0))
645*d3c34fc0SLeandro Lupori goto out;
646*d3c34fc0SLeandro Lupori }
647*d3c34fc0SLeandro Lupori ret = 1;
648*d3c34fc0SLeandro Lupori
649*d3c34fc0SLeandro Lupori out:
650*d3c34fc0SLeandro Lupori return (ret);
651*d3c34fc0SLeandro Lupori }
652*d3c34fc0SLeandro Lupori
653*d3c34fc0SLeandro Lupori
654*d3c34fc0SLeandro Lupori static struct ppc64_mmu_ops ops = {
655*d3c34fc0SLeandro Lupori .init = ppc64mmu_hpt_init,
656*d3c34fc0SLeandro Lupori .cleanup = ppc64mmu_hpt_cleanup,
657*d3c34fc0SLeandro Lupori .kvatop = ppc64mmu_hpt_kvatop,
658*d3c34fc0SLeandro Lupori .walk_pages = ppc64mmu_hpt_walk_pages,
659*d3c34fc0SLeandro Lupori };
660*d3c34fc0SLeandro Lupori struct ppc64_mmu_ops *ppc64_mmu_ops_hpt = &ops;
661