1bef5da7fSNathan Whitehorn /*-
2*4d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause AND BSD-4-Clause
371e3c308SPedro F. Giffuni *
4bef5da7fSNathan Whitehorn * Copyright (c) 2001 The NetBSD Foundation, Inc.
5bef5da7fSNathan Whitehorn * All rights reserved.
6bef5da7fSNathan Whitehorn *
7bef5da7fSNathan Whitehorn * This code is derived from software contributed to The NetBSD Foundation
8bef5da7fSNathan Whitehorn * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
9bef5da7fSNathan Whitehorn *
10bef5da7fSNathan Whitehorn * Redistribution and use in source and binary forms, with or without
11bef5da7fSNathan Whitehorn * modification, are permitted provided that the following conditions
12bef5da7fSNathan Whitehorn * are met:
13bef5da7fSNathan Whitehorn * 1. Redistributions of source code must retain the above copyright
14bef5da7fSNathan Whitehorn * notice, this list of conditions and the following disclaimer.
15bef5da7fSNathan Whitehorn * 2. Redistributions in binary form must reproduce the above copyright
16bef5da7fSNathan Whitehorn * notice, this list of conditions and the following disclaimer in the
17bef5da7fSNathan Whitehorn * documentation and/or other materials provided with the distribution.
18bef5da7fSNathan Whitehorn *
19bef5da7fSNathan Whitehorn * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20bef5da7fSNathan Whitehorn * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21bef5da7fSNathan Whitehorn * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22bef5da7fSNathan Whitehorn * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23bef5da7fSNathan Whitehorn * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24bef5da7fSNathan Whitehorn * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25bef5da7fSNathan Whitehorn * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26bef5da7fSNathan Whitehorn * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27bef5da7fSNathan Whitehorn * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28bef5da7fSNathan Whitehorn * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29bef5da7fSNathan Whitehorn * POSSIBILITY OF SUCH DAMAGE.
30bef5da7fSNathan Whitehorn */
31bef5da7fSNathan Whitehorn /*-
32bef5da7fSNathan Whitehorn * Copyright (C) 1995, 1996 Wolfgang Solfrank.
33bef5da7fSNathan Whitehorn * Copyright (C) 1995, 1996 TooLs GmbH.
34bef5da7fSNathan Whitehorn * All rights reserved.
35bef5da7fSNathan Whitehorn *
36bef5da7fSNathan Whitehorn * Redistribution and use in source and binary forms, with or without
37bef5da7fSNathan Whitehorn * modification, are permitted provided that the following conditions
38bef5da7fSNathan Whitehorn * are met:
39bef5da7fSNathan Whitehorn * 1. Redistributions of source code must retain the above copyright
40bef5da7fSNathan Whitehorn * notice, this list of conditions and the following disclaimer.
41bef5da7fSNathan Whitehorn * 2. Redistributions in binary form must reproduce the above copyright
42bef5da7fSNathan Whitehorn * notice, this list of conditions and the following disclaimer in the
43bef5da7fSNathan Whitehorn * documentation and/or other materials provided with the distribution.
44bef5da7fSNathan Whitehorn * 3. All advertising materials mentioning features or use of this software
45bef5da7fSNathan Whitehorn * must display the following acknowledgement:
46bef5da7fSNathan Whitehorn * This product includes software developed by TooLs GmbH.
47bef5da7fSNathan Whitehorn * 4. The name of TooLs GmbH may not be used to endorse or promote products
48bef5da7fSNathan Whitehorn * derived from this software without specific prior written permission.
49bef5da7fSNathan Whitehorn *
50bef5da7fSNathan Whitehorn * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
51bef5da7fSNathan Whitehorn * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
52bef5da7fSNathan Whitehorn * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
53bef5da7fSNathan Whitehorn * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
54bef5da7fSNathan Whitehorn * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
55bef5da7fSNathan Whitehorn * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
56bef5da7fSNathan Whitehorn * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
57bef5da7fSNathan Whitehorn * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
58bef5da7fSNathan Whitehorn * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
59bef5da7fSNathan Whitehorn * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60bef5da7fSNathan Whitehorn *
61bef5da7fSNathan Whitehorn * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
62bef5da7fSNathan Whitehorn */
63bef5da7fSNathan Whitehorn /*-
64bef5da7fSNathan Whitehorn * Copyright (C) 2001 Benno Rice.
65bef5da7fSNathan Whitehorn * All rights reserved.
66bef5da7fSNathan Whitehorn *
67bef5da7fSNathan Whitehorn * Redistribution and use in source and binary forms, with or without
68bef5da7fSNathan Whitehorn * modification, are permitted provided that the following conditions
69bef5da7fSNathan Whitehorn * are met:
70bef5da7fSNathan Whitehorn * 1. Redistributions of source code must retain the above copyright
71bef5da7fSNathan Whitehorn * notice, this list of conditions and the following disclaimer.
72bef5da7fSNathan Whitehorn * 2. Redistributions in binary form must reproduce the above copyright
73bef5da7fSNathan Whitehorn * notice, this list of conditions and the following disclaimer in the
74bef5da7fSNathan Whitehorn * documentation and/or other materials provided with the distribution.
75bef5da7fSNathan Whitehorn *
76bef5da7fSNathan Whitehorn * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
77bef5da7fSNathan Whitehorn * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
78bef5da7fSNathan Whitehorn * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
79bef5da7fSNathan Whitehorn * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
80bef5da7fSNathan Whitehorn * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
81bef5da7fSNathan Whitehorn * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
82bef5da7fSNathan Whitehorn * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
83bef5da7fSNathan Whitehorn * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
84bef5da7fSNathan Whitehorn * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
85bef5da7fSNathan Whitehorn * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
86bef5da7fSNathan Whitehorn */
87bef5da7fSNathan Whitehorn
88bef5da7fSNathan Whitehorn #include <sys/cdefs.h>
89bef5da7fSNathan Whitehorn /*
90bef5da7fSNathan Whitehorn * Native 64-bit page table operations for running without a hypervisor.
91bef5da7fSNathan Whitehorn */
92bef5da7fSNathan Whitehorn
93bef5da7fSNathan Whitehorn #include <sys/param.h>
94bef5da7fSNathan Whitehorn #include <sys/kernel.h>
95bef5da7fSNathan Whitehorn #include <sys/ktr.h>
96bef5da7fSNathan Whitehorn #include <sys/lock.h>
97bef5da7fSNathan Whitehorn #include <sys/mutex.h>
98bef5da7fSNathan Whitehorn #include <sys/proc.h>
997e55df27SNathan Whitehorn #include <sys/sched.h>
100bef5da7fSNathan Whitehorn #include <sys/sysctl.h>
101bef5da7fSNathan Whitehorn #include <sys/systm.h>
102827cc9b9SNathan Whitehorn #include <sys/rwlock.h>
103827cc9b9SNathan Whitehorn #include <sys/endian.h>
104bef5da7fSNathan Whitehorn
105bef5da7fSNathan Whitehorn #include <sys/kdb.h>
106bef5da7fSNathan Whitehorn
107bef5da7fSNathan Whitehorn #include <vm/vm.h>
108bef5da7fSNathan Whitehorn #include <vm/vm_param.h>
109bef5da7fSNathan Whitehorn #include <vm/vm_kern.h>
110bef5da7fSNathan Whitehorn #include <vm/vm_page.h>
111bef5da7fSNathan Whitehorn #include <vm/vm_map.h>
112bef5da7fSNathan Whitehorn #include <vm/vm_object.h>
113bef5da7fSNathan Whitehorn #include <vm/vm_extern.h>
114bef5da7fSNathan Whitehorn #include <vm/vm_pageout.h>
115bef5da7fSNathan Whitehorn
11610d0cdfcSJustin Hibbits #include <machine/cpu.h>
1175ab39b65SJustin Hibbits #include <machine/hid.h>
118bef5da7fSNathan Whitehorn #include <machine/md_var.h>
119bef5da7fSNathan Whitehorn #include <machine/mmuvar.h>
120bef5da7fSNathan Whitehorn
121bef5da7fSNathan Whitehorn #include "mmu_oea64.h"
122bef5da7fSNathan Whitehorn
123bef5da7fSNathan Whitehorn #define PTESYNC() __asm __volatile("ptesync");
124bef5da7fSNathan Whitehorn #define TLBSYNC() __asm __volatile("tlbsync; ptesync");
125bef5da7fSNathan Whitehorn #define SYNC() __asm __volatile("sync");
126bef5da7fSNathan Whitehorn #define EIEIO() __asm __volatile("eieio");
127bef5da7fSNathan Whitehorn
128bef5da7fSNathan Whitehorn #define VSID_HASH_MASK 0x0000007fffffffffULL
129bef5da7fSNathan Whitehorn
13010d0cdfcSJustin Hibbits /* POWER9 only permits a 64k partition table size. */
13110d0cdfcSJustin Hibbits #define PART_SIZE 0x10000
13210d0cdfcSJustin Hibbits
133e2d6c417SLeandro Lupori /* Actual page sizes (to be used with tlbie, when L=0) */
134e2d6c417SLeandro Lupori #define AP_4K 0x00
135e2d6c417SLeandro Lupori #define AP_16M 0x80
136e2d6c417SLeandro Lupori
137e2d6c417SLeandro Lupori #define LPTE_KERNEL_VSID_BIT (KERNEL_VSID_BIT << \
138e2d6c417SLeandro Lupori (16 - (ADDR_API_SHFT64 - ADDR_PIDX_SHFT)))
139e2d6c417SLeandro Lupori
140e2d6c417SLeandro Lupori /* Abbreviated Virtual Address Page - high bits */
141e2d6c417SLeandro Lupori #define LPTE_AVA_PGNHI_MASK 0x0000000000000F80ULL
142e2d6c417SLeandro Lupori #define LPTE_AVA_PGNHI_SHIFT 7
143e2d6c417SLeandro Lupori
144e2d6c417SLeandro Lupori /* Effective Address Page - low bits */
145e2d6c417SLeandro Lupori #define EA_PAGELO_MASK 0x7ffULL
146e2d6c417SLeandro Lupori #define EA_PAGELO_SHIFT 11
147e2d6c417SLeandro Lupori
148bc94b700SJustin Hibbits static bool moea64_crop_tlbie;
149bc94b700SJustin Hibbits static bool moea64_need_lock;
150204d7432SJustin Hibbits
151e2d6c417SLeandro Lupori /*
152e2d6c417SLeandro Lupori * The tlbie instruction has two forms: an old one used by PowerISA
153e2d6c417SLeandro Lupori * 2.03 and prior, and a newer one used by PowerISA 2.06 and later.
154e2d6c417SLeandro Lupori * We need to support both.
155e2d6c417SLeandro Lupori */
156bef5da7fSNathan Whitehorn static __inline void
TLBIE(uint64_t vpn,uint64_t oldptehi)157e2d6c417SLeandro Lupori TLBIE(uint64_t vpn, uint64_t oldptehi)
158e2d6c417SLeandro Lupori {
159bef5da7fSNathan Whitehorn #ifndef __powerpc64__
160bef5da7fSNathan Whitehorn register_t vpn_hi, vpn_lo;
161bef5da7fSNathan Whitehorn register_t msr;
16250e13823SNathan Whitehorn register_t scratch, intr;
163bef5da7fSNathan Whitehorn #endif
164bef5da7fSNathan Whitehorn
16550e13823SNathan Whitehorn static volatile u_int tlbie_lock = 0;
1669f1a007dSJustin Hibbits bool need_lock = moea64_need_lock;
16750e13823SNathan Whitehorn
168bef5da7fSNathan Whitehorn vpn <<= ADDR_PIDX_SHFT;
169bef5da7fSNathan Whitehorn
17050e13823SNathan Whitehorn /* Hobo spinlock: we need stronger guarantees than mutexes provide */
1719f1a007dSJustin Hibbits if (need_lock) {
17250e13823SNathan Whitehorn while (!atomic_cmpset_int(&tlbie_lock, 0, 1));
17350e13823SNathan Whitehorn isync(); /* Flush instruction queue once lock acquired */
17450e13823SNathan Whitehorn
175e2d6c417SLeandro Lupori if (moea64_crop_tlbie) {
176204d7432SJustin Hibbits vpn &= ~(0xffffULL << 48);
177e2d6c417SLeandro Lupori #ifdef __powerpc64__
178e2d6c417SLeandro Lupori if ((oldptehi & LPTE_BIG) != 0)
179e2d6c417SLeandro Lupori __asm __volatile("tlbie %0, 1" :: "r"(vpn) :
180e2d6c417SLeandro Lupori "memory");
181e2d6c417SLeandro Lupori else
182e2d6c417SLeandro Lupori __asm __volatile("tlbie %0, 0" :: "r"(vpn) :
183e2d6c417SLeandro Lupori "memory");
184e2d6c417SLeandro Lupori __asm __volatile("eieio; tlbsync; ptesync" :::
185e2d6c417SLeandro Lupori "memory");
186e2d6c417SLeandro Lupori goto done;
187e2d6c417SLeandro Lupori #endif
188e2d6c417SLeandro Lupori }
1899f1a007dSJustin Hibbits }
190204d7432SJustin Hibbits
191bef5da7fSNathan Whitehorn #ifdef __powerpc64__
192bc94b700SJustin Hibbits /*
193e2d6c417SLeandro Lupori * If this page has LPTE_BIG set and is from userspace, then
194e2d6c417SLeandro Lupori * it must be a superpage with 4KB base/16MB actual page size.
195e2d6c417SLeandro Lupori */
196e2d6c417SLeandro Lupori if ((oldptehi & LPTE_BIG) != 0 &&
197e2d6c417SLeandro Lupori (oldptehi & LPTE_KERNEL_VSID_BIT) == 0)
198e2d6c417SLeandro Lupori vpn |= AP_16M;
199e2d6c417SLeandro Lupori
200e2d6c417SLeandro Lupori /*
201bc94b700SJustin Hibbits * Explicitly clobber r0. The tlbie instruction has two forms: an old
202bc94b700SJustin Hibbits * one used by PowerISA 2.03 and prior, and a newer one used by PowerISA
203bc94b700SJustin Hibbits * 2.06 (maybe 2.05?) and later. We need to support both, and it just
204bc94b700SJustin Hibbits * so happens that since we use 4k pages we can simply zero out r0, and
205bc94b700SJustin Hibbits * clobber it, and the assembler will interpret the single-operand form
206bc94b700SJustin Hibbits * of tlbie as having RB set, and everything else as 0. The RS operand
207bc94b700SJustin Hibbits * in the newer form is in the same position as the L(page size) bit of
208bc94b700SJustin Hibbits * the old form, so a slong as RS is 0, we're good on both sides.
209bc94b700SJustin Hibbits */
210e2d6c417SLeandro Lupori __asm __volatile("li 0, 0 \n tlbie %0, 0" :: "r"(vpn) : "r0", "memory");
21150e13823SNathan Whitehorn __asm __volatile("eieio; tlbsync; ptesync" ::: "memory");
2128b2133d4SLeandro Lupori done:
2138b2133d4SLeandro Lupori
214bef5da7fSNathan Whitehorn #else
215bef5da7fSNathan Whitehorn vpn_hi = (uint32_t)(vpn >> 32);
216bef5da7fSNathan Whitehorn vpn_lo = (uint32_t)vpn;
217bef5da7fSNathan Whitehorn
21850e13823SNathan Whitehorn intr = intr_disable();
219bef5da7fSNathan Whitehorn __asm __volatile("\
220bef5da7fSNathan Whitehorn mfmsr %0; \
221bef5da7fSNathan Whitehorn mr %1, %0; \
222bef5da7fSNathan Whitehorn insrdi %1,%5,1,0; \
223bef5da7fSNathan Whitehorn mtmsrd %1; isync; \
224bef5da7fSNathan Whitehorn \
225bef5da7fSNathan Whitehorn sld %1,%2,%4; \
226bef5da7fSNathan Whitehorn or %1,%1,%3; \
227bef5da7fSNathan Whitehorn tlbie %1; \
228bef5da7fSNathan Whitehorn \
229bef5da7fSNathan Whitehorn mtmsrd %0; isync; \
230bef5da7fSNathan Whitehorn eieio; \
231bef5da7fSNathan Whitehorn tlbsync; \
232bef5da7fSNathan Whitehorn ptesync;"
233bef5da7fSNathan Whitehorn : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1)
234bef5da7fSNathan Whitehorn : "memory");
235284ea613SNathan Whitehorn intr_restore(intr);
236e71dfa7bSNathan Whitehorn #endif
23750e13823SNathan Whitehorn
23850e13823SNathan Whitehorn /* No barriers or special ops -- taken care of by ptesync above */
2399f1a007dSJustin Hibbits if (need_lock)
24050e13823SNathan Whitehorn tlbie_lock = 0;
241bef5da7fSNathan Whitehorn }
242bef5da7fSNathan Whitehorn
24397f7cde4SNathan Whitehorn #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR)
24497f7cde4SNathan Whitehorn #define ENABLE_TRANS(msr) mtmsr(msr)
245bef5da7fSNathan Whitehorn
246bef5da7fSNathan Whitehorn /*
247bef5da7fSNathan Whitehorn * PTEG data.
248bef5da7fSNathan Whitehorn */
249827cc9b9SNathan Whitehorn static volatile struct lpte *moea64_pteg_table;
250827cc9b9SNathan Whitehorn static struct rwlock moea64_eviction_lock;
251bef5da7fSNathan Whitehorn
252bc94b700SJustin Hibbits static volatile struct pate *moea64_part_table;
253bc94b700SJustin Hibbits
254bef5da7fSNathan Whitehorn /*
25595ca4720SLeandro Lupori * Dump function.
25695ca4720SLeandro Lupori */
25745b69dd6SJustin Hibbits static void *moea64_dump_pmap_native(void *ctx, void *buf,
25895ca4720SLeandro Lupori u_long *nbytes);
25995ca4720SLeandro Lupori
26095ca4720SLeandro Lupori /*
261bef5da7fSNathan Whitehorn * PTE calls.
262bef5da7fSNathan Whitehorn */
26345b69dd6SJustin Hibbits static int64_t moea64_pte_insert_native(struct pvo_entry *);
26445b69dd6SJustin Hibbits static int64_t moea64_pte_synch_native(struct pvo_entry *);
26545b69dd6SJustin Hibbits static int64_t moea64_pte_clear_native(struct pvo_entry *, uint64_t);
26645b69dd6SJustin Hibbits static int64_t moea64_pte_replace_native(struct pvo_entry *, int);
26745b69dd6SJustin Hibbits static int64_t moea64_pte_unset_native(struct pvo_entry *);
268e2d6c417SLeandro Lupori static int64_t moea64_pte_insert_sp_native(struct pvo_entry *);
269e2d6c417SLeandro Lupori static int64_t moea64_pte_unset_sp_native(struct pvo_entry *);
270e2d6c417SLeandro Lupori static int64_t moea64_pte_replace_sp_native(struct pvo_entry *);
271bef5da7fSNathan Whitehorn
272bef5da7fSNathan Whitehorn /*
273bef5da7fSNathan Whitehorn * Utility routines.
274bef5da7fSNathan Whitehorn */
27545b69dd6SJustin Hibbits static void moea64_bootstrap_native(
276bef5da7fSNathan Whitehorn vm_offset_t kernelstart, vm_offset_t kernelend);
27745b69dd6SJustin Hibbits static void moea64_cpu_bootstrap_native(int ap);
278bef5da7fSNathan Whitehorn static void tlbia(void);
27945b69dd6SJustin Hibbits static void moea64_install_native(void);
280bef5da7fSNathan Whitehorn
28145b69dd6SJustin Hibbits static struct pmap_funcs moea64_native_methods = {
28245b69dd6SJustin Hibbits .install = moea64_install_native,
28345b69dd6SJustin Hibbits
284bef5da7fSNathan Whitehorn /* Internal interfaces */
28545b69dd6SJustin Hibbits .bootstrap = moea64_bootstrap_native,
28645b69dd6SJustin Hibbits .cpu_bootstrap = moea64_cpu_bootstrap_native,
28745b69dd6SJustin Hibbits .dumpsys_dump_pmap = moea64_dump_pmap_native,
288bef5da7fSNathan Whitehorn };
289bef5da7fSNathan Whitehorn
29045b69dd6SJustin Hibbits static struct moea64_funcs moea64_native_funcs = {
29145b69dd6SJustin Hibbits .pte_synch = moea64_pte_synch_native,
29245b69dd6SJustin Hibbits .pte_clear = moea64_pte_clear_native,
29345b69dd6SJustin Hibbits .pte_unset = moea64_pte_unset_native,
29445b69dd6SJustin Hibbits .pte_replace = moea64_pte_replace_native,
29545b69dd6SJustin Hibbits .pte_insert = moea64_pte_insert_native,
296e2d6c417SLeandro Lupori .pte_insert_sp = moea64_pte_insert_sp_native,
297e2d6c417SLeandro Lupori .pte_unset_sp = moea64_pte_unset_sp_native,
298e2d6c417SLeandro Lupori .pte_replace_sp = moea64_pte_replace_sp_native,
29945b69dd6SJustin Hibbits };
30045b69dd6SJustin Hibbits
30145b69dd6SJustin Hibbits MMU_DEF_INHERIT(oea64_mmu_native, MMU_TYPE_G5, moea64_native_methods, oea64_mmu);
30245b69dd6SJustin Hibbits
30345b69dd6SJustin Hibbits static void
moea64_install_native(void)304ff490346SDimitry Andric moea64_install_native(void)
30545b69dd6SJustin Hibbits {
30645b69dd6SJustin Hibbits
30745b69dd6SJustin Hibbits /* Install the MOEA64 ops. */
30845b69dd6SJustin Hibbits moea64_ops = &moea64_native_funcs;
30949c894ddSJustin Hibbits
31049c894ddSJustin Hibbits moea64_install();
31145b69dd6SJustin Hibbits }
312bef5da7fSNathan Whitehorn
313827cc9b9SNathan Whitehorn static int64_t
moea64_pte_synch_native(struct pvo_entry * pvo)31445b69dd6SJustin Hibbits moea64_pte_synch_native(struct pvo_entry *pvo)
315bef5da7fSNathan Whitehorn {
316827cc9b9SNathan Whitehorn volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
317a7954011SJustin Hibbits uint64_t ptelo, pvo_ptevpn;
318bef5da7fSNathan Whitehorn
319827cc9b9SNathan Whitehorn PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
320827cc9b9SNathan Whitehorn
321a7954011SJustin Hibbits pvo_ptevpn = moea64_pte_vpn_from_pvo_vpn(pvo);
322827cc9b9SNathan Whitehorn
323827cc9b9SNathan Whitehorn rw_rlock(&moea64_eviction_lock);
324a7954011SJustin Hibbits if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != pvo_ptevpn) {
325827cc9b9SNathan Whitehorn /* Evicted */
326827cc9b9SNathan Whitehorn rw_runlock(&moea64_eviction_lock);
327827cc9b9SNathan Whitehorn return (-1);
328bef5da7fSNathan Whitehorn }
329bef5da7fSNathan Whitehorn
330bef5da7fSNathan Whitehorn PTESYNC();
331827cc9b9SNathan Whitehorn ptelo = be64toh(pt->pte_lo);
33252a19048SNathan Whitehorn
333827cc9b9SNathan Whitehorn rw_runlock(&moea64_eviction_lock);
334827cc9b9SNathan Whitehorn
335827cc9b9SNathan Whitehorn return (ptelo & (LPTE_REF | LPTE_CHG));
336bef5da7fSNathan Whitehorn }
337bef5da7fSNathan Whitehorn
338827cc9b9SNathan Whitehorn static int64_t
moea64_pte_clear_native(struct pvo_entry * pvo,uint64_t ptebit)33945b69dd6SJustin Hibbits moea64_pte_clear_native(struct pvo_entry *pvo, uint64_t ptebit)
340bef5da7fSNathan Whitehorn {
341827cc9b9SNathan Whitehorn volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
342827cc9b9SNathan Whitehorn struct lpte properpt;
343827cc9b9SNathan Whitehorn uint64_t ptelo;
344827cc9b9SNathan Whitehorn
345827cc9b9SNathan Whitehorn PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
346827cc9b9SNathan Whitehorn
347827cc9b9SNathan Whitehorn moea64_pte_from_pvo(pvo, &properpt);
348827cc9b9SNathan Whitehorn
349827cc9b9SNathan Whitehorn rw_rlock(&moea64_eviction_lock);
3504a38fe54SNathan Whitehorn if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) !=
351827cc9b9SNathan Whitehorn (properpt.pte_hi & LPTE_AVPN_MASK)) {
352827cc9b9SNathan Whitehorn /* Evicted */
353827cc9b9SNathan Whitehorn rw_runlock(&moea64_eviction_lock);
354827cc9b9SNathan Whitehorn return (-1);
355827cc9b9SNathan Whitehorn }
356827cc9b9SNathan Whitehorn
357827cc9b9SNathan Whitehorn if (ptebit == LPTE_REF) {
358827cc9b9SNathan Whitehorn /* See "Resetting the Reference Bit" in arch manual */
359827cc9b9SNathan Whitehorn PTESYNC();
360827cc9b9SNathan Whitehorn /* 2-step here safe: precision is not guaranteed */
3614a38fe54SNathan Whitehorn ptelo = be64toh(pt->pte_lo);
362827cc9b9SNathan Whitehorn
363827cc9b9SNathan Whitehorn /* One-byte store to avoid touching the C bit */
364827cc9b9SNathan Whitehorn ((volatile uint8_t *)(&pt->pte_lo))[6] =
3654a38fe54SNathan Whitehorn #if BYTE_ORDER == BIG_ENDIAN
366827cc9b9SNathan Whitehorn ((uint8_t *)(&properpt.pte_lo))[6];
3674a38fe54SNathan Whitehorn #else
3684a38fe54SNathan Whitehorn ((uint8_t *)(&properpt.pte_lo))[1];
3694a38fe54SNathan Whitehorn #endif
370827cc9b9SNathan Whitehorn rw_runlock(&moea64_eviction_lock);
371827cc9b9SNathan Whitehorn
372827cc9b9SNathan Whitehorn critical_enter();
373e2d6c417SLeandro Lupori TLBIE(pvo->pvo_vpn, properpt.pte_hi);
374827cc9b9SNathan Whitehorn critical_exit();
375827cc9b9SNathan Whitehorn } else {
376827cc9b9SNathan Whitehorn rw_runlock(&moea64_eviction_lock);
37745b69dd6SJustin Hibbits ptelo = moea64_pte_unset_native(pvo);
37845b69dd6SJustin Hibbits moea64_pte_insert_native(pvo);
379827cc9b9SNathan Whitehorn }
380827cc9b9SNathan Whitehorn
381827cc9b9SNathan Whitehorn return (ptelo & (LPTE_REF | LPTE_CHG));
382827cc9b9SNathan Whitehorn }
383827cc9b9SNathan Whitehorn
384e2d6c417SLeandro Lupori static __always_inline int64_t
moea64_pte_unset_locked(volatile struct lpte * pt,uint64_t vpn)385e2d6c417SLeandro Lupori moea64_pte_unset_locked(volatile struct lpte *pt, uint64_t vpn)
386827cc9b9SNathan Whitehorn {
3878801df34SBrandon Bergren uint64_t ptelo, ptehi;
388bef5da7fSNathan Whitehorn
389bef5da7fSNathan Whitehorn /*
390827cc9b9SNathan Whitehorn * Invalidate the pte, briefly locking it to collect RC bits. No
391827cc9b9SNathan Whitehorn * atomics needed since this is protected against eviction by the lock.
392bef5da7fSNathan Whitehorn */
393b7d0d1faSNathan Whitehorn isync();
39450e13823SNathan Whitehorn critical_enter();
3958801df34SBrandon Bergren ptehi = (be64toh(pt->pte_hi) & ~LPTE_VALID) | LPTE_LOCKED;
3968801df34SBrandon Bergren pt->pte_hi = htobe64(ptehi);
397b7d0d1faSNathan Whitehorn PTESYNC();
3988801df34SBrandon Bergren TLBIE(vpn, ptehi);
399827cc9b9SNathan Whitehorn ptelo = be64toh(pt->pte_lo);
400827cc9b9SNathan Whitehorn *((volatile int32_t *)(&pt->pte_hi) + 1) = 0; /* Release lock */
40150e13823SNathan Whitehorn critical_exit();
402bef5da7fSNathan Whitehorn
403827cc9b9SNathan Whitehorn /* Keep statistics */
4047c382eeaSJustin Hibbits STAT_MOEA64(moea64_pte_valid--);
405827cc9b9SNathan Whitehorn
406827cc9b9SNathan Whitehorn return (ptelo & (LPTE_CHG | LPTE_REF));
407bef5da7fSNathan Whitehorn }
408bef5da7fSNathan Whitehorn
409827cc9b9SNathan Whitehorn static int64_t
moea64_pte_unset_native(struct pvo_entry * pvo)410e2d6c417SLeandro Lupori moea64_pte_unset_native(struct pvo_entry *pvo)
411e2d6c417SLeandro Lupori {
412e2d6c417SLeandro Lupori volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
413e2d6c417SLeandro Lupori int64_t ret;
414e2d6c417SLeandro Lupori uint64_t pvo_ptevpn;
415e2d6c417SLeandro Lupori
416e2d6c417SLeandro Lupori pvo_ptevpn = moea64_pte_vpn_from_pvo_vpn(pvo);
417e2d6c417SLeandro Lupori
418e2d6c417SLeandro Lupori rw_rlock(&moea64_eviction_lock);
419e2d6c417SLeandro Lupori
4208801df34SBrandon Bergren if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != pvo_ptevpn) {
421e2d6c417SLeandro Lupori /* Evicted */
422e2d6c417SLeandro Lupori STAT_MOEA64(moea64_pte_overflow--);
423e2d6c417SLeandro Lupori ret = -1;
424e2d6c417SLeandro Lupori } else
425e2d6c417SLeandro Lupori ret = moea64_pte_unset_locked(pt, pvo->pvo_vpn);
426e2d6c417SLeandro Lupori
427e2d6c417SLeandro Lupori rw_runlock(&moea64_eviction_lock);
428e2d6c417SLeandro Lupori
429e2d6c417SLeandro Lupori return (ret);
430e2d6c417SLeandro Lupori }
431e2d6c417SLeandro Lupori
432e2d6c417SLeandro Lupori static int64_t
moea64_pte_replace_inval_native(struct pvo_entry * pvo,volatile struct lpte * pt)43345b69dd6SJustin Hibbits moea64_pte_replace_inval_native(struct pvo_entry *pvo,
434197a7e48SJustin Hibbits volatile struct lpte *pt)
435197a7e48SJustin Hibbits {
436197a7e48SJustin Hibbits struct lpte properpt;
4378801df34SBrandon Bergren uint64_t ptelo, ptehi;
438197a7e48SJustin Hibbits
439197a7e48SJustin Hibbits moea64_pte_from_pvo(pvo, &properpt);
440197a7e48SJustin Hibbits
441197a7e48SJustin Hibbits rw_rlock(&moea64_eviction_lock);
442f475e00fSBrandon Bergren if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) !=
443197a7e48SJustin Hibbits (properpt.pte_hi & LPTE_AVPN_MASK)) {
444197a7e48SJustin Hibbits /* Evicted */
445197a7e48SJustin Hibbits STAT_MOEA64(moea64_pte_overflow--);
446197a7e48SJustin Hibbits rw_runlock(&moea64_eviction_lock);
447197a7e48SJustin Hibbits return (-1);
448197a7e48SJustin Hibbits }
449197a7e48SJustin Hibbits
450197a7e48SJustin Hibbits /*
451197a7e48SJustin Hibbits * Replace the pte, briefly locking it to collect RC bits. No
452197a7e48SJustin Hibbits * atomics needed since this is protected against eviction by the lock.
453197a7e48SJustin Hibbits */
454197a7e48SJustin Hibbits isync();
455197a7e48SJustin Hibbits critical_enter();
4568801df34SBrandon Bergren ptehi = (be64toh(pt->pte_hi) & ~LPTE_VALID) | LPTE_LOCKED;
4578801df34SBrandon Bergren pt->pte_hi = htobe64(ptehi);
458197a7e48SJustin Hibbits PTESYNC();
4598801df34SBrandon Bergren TLBIE(pvo->pvo_vpn, ptehi);
460197a7e48SJustin Hibbits ptelo = be64toh(pt->pte_lo);
461197a7e48SJustin Hibbits EIEIO();
462197a7e48SJustin Hibbits pt->pte_lo = htobe64(properpt.pte_lo);
463197a7e48SJustin Hibbits EIEIO();
464197a7e48SJustin Hibbits pt->pte_hi = htobe64(properpt.pte_hi); /* Release lock */
465197a7e48SJustin Hibbits PTESYNC();
466197a7e48SJustin Hibbits critical_exit();
467197a7e48SJustin Hibbits rw_runlock(&moea64_eviction_lock);
468197a7e48SJustin Hibbits
469197a7e48SJustin Hibbits return (ptelo & (LPTE_CHG | LPTE_REF));
470197a7e48SJustin Hibbits }
471197a7e48SJustin Hibbits
472197a7e48SJustin Hibbits static int64_t
moea64_pte_replace_native(struct pvo_entry * pvo,int flags)47345b69dd6SJustin Hibbits moea64_pte_replace_native(struct pvo_entry *pvo, int flags)
474bef5da7fSNathan Whitehorn {
475827cc9b9SNathan Whitehorn volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
476827cc9b9SNathan Whitehorn struct lpte properpt;
477827cc9b9SNathan Whitehorn int64_t ptelo;
478bef5da7fSNathan Whitehorn
479827cc9b9SNathan Whitehorn if (flags == 0) {
480827cc9b9SNathan Whitehorn /* Just some software bits changing. */
481827cc9b9SNathan Whitehorn moea64_pte_from_pvo(pvo, &properpt);
482827cc9b9SNathan Whitehorn
483827cc9b9SNathan Whitehorn rw_rlock(&moea64_eviction_lock);
4844a38fe54SNathan Whitehorn if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) !=
485827cc9b9SNathan Whitehorn (properpt.pte_hi & LPTE_AVPN_MASK)) {
486827cc9b9SNathan Whitehorn rw_runlock(&moea64_eviction_lock);
487827cc9b9SNathan Whitehorn return (-1);
488827cc9b9SNathan Whitehorn }
4894a38fe54SNathan Whitehorn pt->pte_hi = htobe64(properpt.pte_hi);
4904a38fe54SNathan Whitehorn ptelo = be64toh(pt->pte_lo);
491827cc9b9SNathan Whitehorn rw_runlock(&moea64_eviction_lock);
492827cc9b9SNathan Whitehorn } else {
493827cc9b9SNathan Whitehorn /* Otherwise, need reinsertion and deletion */
49445b69dd6SJustin Hibbits ptelo = moea64_pte_replace_inval_native(pvo, pt);
495827cc9b9SNathan Whitehorn }
496827cc9b9SNathan Whitehorn
497827cc9b9SNathan Whitehorn return (ptelo);
498bef5da7fSNathan Whitehorn }
499bef5da7fSNathan Whitehorn
500bef5da7fSNathan Whitehorn static void
moea64_cpu_bootstrap_native(int ap)50145b69dd6SJustin Hibbits moea64_cpu_bootstrap_native(int ap)
502bef5da7fSNathan Whitehorn {
503bef5da7fSNathan Whitehorn int i = 0;
504bef5da7fSNathan Whitehorn #ifdef __powerpc64__
505bce6d88bSJustin Hibbits struct slb *slb = PCPU_GET(aim.slb);
506bef5da7fSNathan Whitehorn register_t seg0;
507bef5da7fSNathan Whitehorn #endif
508bef5da7fSNathan Whitehorn
509bef5da7fSNathan Whitehorn /*
510bef5da7fSNathan Whitehorn * Initialize segment registers and MMU
511bef5da7fSNathan Whitehorn */
512bef5da7fSNathan Whitehorn
51397f7cde4SNathan Whitehorn mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR);
514bef5da7fSNathan Whitehorn
5158cd3016cSJustin Hibbits switch(mfpvr() >> 16) {
5168cd3016cSJustin Hibbits case IBMPOWER9:
5178cd3016cSJustin Hibbits mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_RADIX);
5188cd3016cSJustin Hibbits break;
5198cd3016cSJustin Hibbits }
5208cd3016cSJustin Hibbits
521bef5da7fSNathan Whitehorn /*
522bef5da7fSNathan Whitehorn * Install kernel SLB entries
523bef5da7fSNathan Whitehorn */
524bef5da7fSNathan Whitehorn
525bef5da7fSNathan Whitehorn #ifdef __powerpc64__
526bef5da7fSNathan Whitehorn __asm __volatile ("slbia");
527bef5da7fSNathan Whitehorn __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) :
528bef5da7fSNathan Whitehorn "r"(0));
529bef5da7fSNathan Whitehorn
530312fb3d8SNathan Whitehorn for (i = 0; i < n_slbs; i++) {
531bef5da7fSNathan Whitehorn if (!(slb[i].slbe & SLBE_VALID))
532bef5da7fSNathan Whitehorn continue;
533bef5da7fSNathan Whitehorn
534bef5da7fSNathan Whitehorn __asm __volatile ("slbmte %0, %1" ::
535bef5da7fSNathan Whitehorn "r"(slb[i].slbv), "r"(slb[i].slbe));
536bef5da7fSNathan Whitehorn }
537bef5da7fSNathan Whitehorn #else
538bef5da7fSNathan Whitehorn for (i = 0; i < 16; i++)
539bef5da7fSNathan Whitehorn mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]);
540bef5da7fSNathan Whitehorn #endif
541bef5da7fSNathan Whitehorn
542bef5da7fSNathan Whitehorn /*
543bef5da7fSNathan Whitehorn * Install page table
544bef5da7fSNathan Whitehorn */
545bef5da7fSNathan Whitehorn
546bc94b700SJustin Hibbits if (cpu_features2 & PPC_FEATURE2_ARCH_3_00)
547bc94b700SJustin Hibbits mtspr(SPR_PTCR,
548bc94b700SJustin Hibbits ((uintptr_t)moea64_part_table & ~DMAP_BASE_ADDRESS) |
549bc94b700SJustin Hibbits flsl((PART_SIZE >> 12) - 1));
550bc94b700SJustin Hibbits else
551bef5da7fSNathan Whitehorn __asm __volatile ("ptesync; mtsdr1 %0; isync"
552f9edb09dSNathan Whitehorn :: "r"(((uintptr_t)moea64_pteg_table & ~DMAP_BASE_ADDRESS)
553bef5da7fSNathan Whitehorn | (uintptr_t)(flsl(moea64_pteg_mask >> 11))));
554bef5da7fSNathan Whitehorn tlbia();
555bef5da7fSNathan Whitehorn }
556bef5da7fSNathan Whitehorn
557bef5da7fSNathan Whitehorn static void
moea64_bootstrap_native(vm_offset_t kernelstart,vm_offset_t kernelend)55845b69dd6SJustin Hibbits moea64_bootstrap_native(vm_offset_t kernelstart, vm_offset_t kernelend)
559bef5da7fSNathan Whitehorn {
560bef5da7fSNathan Whitehorn vm_size_t size;
561bef5da7fSNathan Whitehorn vm_offset_t off;
562bef5da7fSNathan Whitehorn vm_paddr_t pa;
563bef5da7fSNathan Whitehorn register_t msr;
564bef5da7fSNathan Whitehorn
56545b69dd6SJustin Hibbits moea64_early_bootstrap(kernelstart, kernelend);
566bef5da7fSNathan Whitehorn
567204d7432SJustin Hibbits switch (mfpvr() >> 16) {
568bc94b700SJustin Hibbits case IBMPOWER9:
569bc94b700SJustin Hibbits moea64_need_lock = false;
570bc94b700SJustin Hibbits break;
571204d7432SJustin Hibbits case IBMPOWER4:
572204d7432SJustin Hibbits case IBMPOWER4PLUS:
573204d7432SJustin Hibbits case IBM970:
574204d7432SJustin Hibbits case IBM970FX:
575204d7432SJustin Hibbits case IBM970GX:
576204d7432SJustin Hibbits case IBM970MP:
577204d7432SJustin Hibbits moea64_crop_tlbie = true;
578bc94b700SJustin Hibbits default:
579bc94b700SJustin Hibbits moea64_need_lock = true;
580204d7432SJustin Hibbits }
581bef5da7fSNathan Whitehorn /*
582bef5da7fSNathan Whitehorn * Allocate PTEG table.
583bef5da7fSNathan Whitehorn */
584bef5da7fSNathan Whitehorn
585bef5da7fSNathan Whitehorn size = moea64_pteg_count * sizeof(struct lpteg);
586402c7806SJustin Hibbits CTR2(KTR_PMAP, "moea64_bootstrap: %lu PTEGs, %lu bytes",
587bef5da7fSNathan Whitehorn moea64_pteg_count, size);
588827cc9b9SNathan Whitehorn rw_init(&moea64_eviction_lock, "pte eviction");
589bef5da7fSNathan Whitehorn
590bef5da7fSNathan Whitehorn /*
591bef5da7fSNathan Whitehorn * We now need to allocate memory. This memory, to be allocated,
592bef5da7fSNathan Whitehorn * has to reside in a page table. The page table we are about to
593bef5da7fSNathan Whitehorn * allocate. We don't have BAT. So drop to data real mode for a minute
594bef5da7fSNathan Whitehorn * as a measure of last resort. We do this a couple times.
595bef5da7fSNathan Whitehorn */
59610d0cdfcSJustin Hibbits /*
59710d0cdfcSJustin Hibbits * PTEG table must be aligned on a 256k boundary, but can be placed
598b00df92bSNathan Whitehorn * anywhere with that alignment on POWER ISA 3+ systems. On earlier
599b00df92bSNathan Whitehorn * systems, offset addition is done by the CPU with bitwise OR rather
600b00df92bSNathan Whitehorn * than addition, so the table must also be aligned on a boundary of
601b00df92bSNathan Whitehorn * its own size. Pick the larger of the two, which works on all
602b00df92bSNathan Whitehorn * systems.
60310d0cdfcSJustin Hibbits */
604b9ff14e6SNathan Whitehorn moea64_pteg_table = (struct lpte *)moea64_bootstrap_alloc(size,
605b9ff14e6SNathan Whitehorn MAX(256*1024, size));
606f9edb09dSNathan Whitehorn if (hw_direct_map)
607f9edb09dSNathan Whitehorn moea64_pteg_table =
608f9edb09dSNathan Whitehorn (struct lpte *)PHYS_TO_DMAP((vm_offset_t)moea64_pteg_table);
609bc94b700SJustin Hibbits /* Allocate partition table (ISA 3.0). */
610bc94b700SJustin Hibbits if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) {
611bc94b700SJustin Hibbits moea64_part_table =
612bc94b700SJustin Hibbits (struct pate *)moea64_bootstrap_alloc(PART_SIZE, PART_SIZE);
613bc94b700SJustin Hibbits moea64_part_table =
614bc94b700SJustin Hibbits (struct pate *)PHYS_TO_DMAP((vm_offset_t)moea64_part_table);
615bc94b700SJustin Hibbits }
616bef5da7fSNathan Whitehorn DISABLE_TRANS(msr);
617827cc9b9SNathan Whitehorn bzero(__DEVOLATILE(void *, moea64_pteg_table), moea64_pteg_count *
618827cc9b9SNathan Whitehorn sizeof(struct lpteg));
619bc94b700SJustin Hibbits if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) {
620bc94b700SJustin Hibbits bzero(__DEVOLATILE(void *, moea64_part_table), PART_SIZE);
6211e936efbSBrandon Bergren moea64_part_table[0].pagetab = htobe64(
622bc94b700SJustin Hibbits (DMAP_TO_PHYS((vm_offset_t)moea64_pteg_table)) |
6231e936efbSBrandon Bergren (uintptr_t)(flsl((moea64_pteg_count - 1) >> 11)));
624bc94b700SJustin Hibbits }
625bef5da7fSNathan Whitehorn ENABLE_TRANS(msr);
626bef5da7fSNathan Whitehorn
627bef5da7fSNathan Whitehorn CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table);
628bef5da7fSNathan Whitehorn
62945b69dd6SJustin Hibbits moea64_mid_bootstrap(kernelstart, kernelend);
630bef5da7fSNathan Whitehorn
631bef5da7fSNathan Whitehorn /*
632bef5da7fSNathan Whitehorn * Add a mapping for the page table itself if there is no direct map.
633bef5da7fSNathan Whitehorn */
634bef5da7fSNathan Whitehorn if (!hw_direct_map) {
635bef5da7fSNathan Whitehorn size = moea64_pteg_count * sizeof(struct lpteg);
636bef5da7fSNathan Whitehorn off = (vm_offset_t)(moea64_pteg_table);
637bef5da7fSNathan Whitehorn DISABLE_TRANS(msr);
638bef5da7fSNathan Whitehorn for (pa = off; pa < off + size; pa += PAGE_SIZE)
639bef5da7fSNathan Whitehorn pmap_kenter(pa, pa);
640bef5da7fSNathan Whitehorn ENABLE_TRANS(msr);
641bef5da7fSNathan Whitehorn }
642bef5da7fSNathan Whitehorn
643bef5da7fSNathan Whitehorn /* Bring up virtual memory */
64445b69dd6SJustin Hibbits moea64_late_bootstrap(kernelstart, kernelend);
645bef5da7fSNathan Whitehorn }
646bef5da7fSNathan Whitehorn
647bef5da7fSNathan Whitehorn static void
tlbia(void)648bef5da7fSNathan Whitehorn tlbia(void)
649bef5da7fSNathan Whitehorn {
650bef5da7fSNathan Whitehorn vm_offset_t i;
651bef5da7fSNathan Whitehorn #ifndef __powerpc64__
652bef5da7fSNathan Whitehorn register_t msr, scratch;
653bef5da7fSNathan Whitehorn #endif
654bef5da7fSNathan Whitehorn
655312fb3d8SNathan Whitehorn i = 0xc00; /* IS = 11 */
656312fb3d8SNathan Whitehorn switch (mfpvr() >> 16) {
657312fb3d8SNathan Whitehorn case IBM970:
658312fb3d8SNathan Whitehorn case IBM970FX:
659312fb3d8SNathan Whitehorn case IBM970MP:
660312fb3d8SNathan Whitehorn case IBM970GX:
661312fb3d8SNathan Whitehorn case IBMPOWER4:
662312fb3d8SNathan Whitehorn case IBMPOWER4PLUS:
663312fb3d8SNathan Whitehorn case IBMPOWER5:
664312fb3d8SNathan Whitehorn case IBMPOWER5PLUS:
665312fb3d8SNathan Whitehorn i = 0; /* IS not supported */
666312fb3d8SNathan Whitehorn break;
667312fb3d8SNathan Whitehorn }
668312fb3d8SNathan Whitehorn
669bef5da7fSNathan Whitehorn TLBSYNC();
670bef5da7fSNathan Whitehorn
671bc94b700SJustin Hibbits for (; i < 0x400000; i += 0x00001000) {
672bef5da7fSNathan Whitehorn #ifdef __powerpc64__
673bef5da7fSNathan Whitehorn __asm __volatile("tlbiel %0" :: "r"(i));
674bef5da7fSNathan Whitehorn #else
675bef5da7fSNathan Whitehorn __asm __volatile("\
676bef5da7fSNathan Whitehorn mfmsr %0; \
677bef5da7fSNathan Whitehorn mr %1, %0; \
678bef5da7fSNathan Whitehorn insrdi %1,%3,1,0; \
679bef5da7fSNathan Whitehorn mtmsrd %1; \
680bef5da7fSNathan Whitehorn isync; \
681bef5da7fSNathan Whitehorn \
682bef5da7fSNathan Whitehorn tlbiel %2; \
683bef5da7fSNathan Whitehorn \
684bef5da7fSNathan Whitehorn mtmsrd %0; \
685bef5da7fSNathan Whitehorn isync;"
686bef5da7fSNathan Whitehorn : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1));
687bef5da7fSNathan Whitehorn #endif
688bef5da7fSNathan Whitehorn }
689bef5da7fSNathan Whitehorn
690bef5da7fSNathan Whitehorn EIEIO();
691bef5da7fSNathan Whitehorn TLBSYNC();
692bef5da7fSNathan Whitehorn }
693bef5da7fSNathan Whitehorn
694827cc9b9SNathan Whitehorn static int
atomic_pte_lock(volatile struct lpte * pte,uint64_t bitmask,uint64_t * oldhi)695827cc9b9SNathan Whitehorn atomic_pte_lock(volatile struct lpte *pte, uint64_t bitmask, uint64_t *oldhi)
696827cc9b9SNathan Whitehorn {
697827cc9b9SNathan Whitehorn int ret;
698d418d3f6SBrandon Bergren #ifdef __powerpc64__
699d418d3f6SBrandon Bergren uint64_t temp;
700d418d3f6SBrandon Bergren #else
701827cc9b9SNathan Whitehorn uint32_t oldhihalf;
702d418d3f6SBrandon Bergren #endif
703827cc9b9SNathan Whitehorn
704827cc9b9SNathan Whitehorn /*
705827cc9b9SNathan Whitehorn * Note: in principle, if just the locked bit were set here, we
706827cc9b9SNathan Whitehorn * could avoid needing the eviction lock. However, eviction occurs
707827cc9b9SNathan Whitehorn * so rarely that it isn't worth bothering about in practice.
708827cc9b9SNathan Whitehorn */
709d418d3f6SBrandon Bergren #ifdef __powerpc64__
710d418d3f6SBrandon Bergren /*
711d418d3f6SBrandon Bergren * Note: Success of this sequence has the side effect of invalidating
712d418d3f6SBrandon Bergren * the PTE, as we are setting it to LPTE_LOCKED and discarding the
713d418d3f6SBrandon Bergren * other bits, including LPTE_V.
714d418d3f6SBrandon Bergren */
715d418d3f6SBrandon Bergren __asm __volatile (
716d418d3f6SBrandon Bergren "1:\tldarx %1, 0, %3\n\t" /* load old value */
717d418d3f6SBrandon Bergren "and. %0,%1,%4\n\t" /* check if any bits set */
718d418d3f6SBrandon Bergren "bne 2f\n\t" /* exit if any set */
719d418d3f6SBrandon Bergren "stdcx. %5, 0, %3\n\t" /* attempt to store */
720d418d3f6SBrandon Bergren "bne- 1b\n\t" /* spin if failed */
721d418d3f6SBrandon Bergren "li %0, 1\n\t" /* success - retval = 1 */
722d418d3f6SBrandon Bergren "b 3f\n\t" /* we've succeeded */
723d418d3f6SBrandon Bergren "2:\n\t"
724d418d3f6SBrandon Bergren "stdcx. %1, 0, %3\n\t" /* clear reservation (74xx) */
725d418d3f6SBrandon Bergren "li %0, 0\n\t" /* failure - retval = 0 */
726d418d3f6SBrandon Bergren "3:\n\t"
727d418d3f6SBrandon Bergren : "=&r" (ret), "=&r"(temp), "=m" (pte->pte_hi)
728d418d3f6SBrandon Bergren : "r" ((volatile char *)&pte->pte_hi),
729d418d3f6SBrandon Bergren "r" (htobe64(bitmask)), "r" (htobe64(LPTE_LOCKED)),
730d418d3f6SBrandon Bergren "m" (pte->pte_hi)
731d418d3f6SBrandon Bergren : "cr0", "cr1", "cr2", "memory");
732d418d3f6SBrandon Bergren *oldhi = be64toh(temp);
733d418d3f6SBrandon Bergren #else
734d418d3f6SBrandon Bergren /*
735d418d3f6SBrandon Bergren * This code is used on bridge mode only.
736d418d3f6SBrandon Bergren */
737827cc9b9SNathan Whitehorn __asm __volatile (
738827cc9b9SNathan Whitehorn "1:\tlwarx %1, 0, %3\n\t" /* load old value */
739827cc9b9SNathan Whitehorn "and. %0,%1,%4\n\t" /* check if any bits set */
740827cc9b9SNathan Whitehorn "bne 2f\n\t" /* exit if any set */
741827cc9b9SNathan Whitehorn "stwcx. %5, 0, %3\n\t" /* attempt to store */
742827cc9b9SNathan Whitehorn "bne- 1b\n\t" /* spin if failed */
743827cc9b9SNathan Whitehorn "li %0, 1\n\t" /* success - retval = 1 */
744827cc9b9SNathan Whitehorn "b 3f\n\t" /* we've succeeded */
745827cc9b9SNathan Whitehorn "2:\n\t"
746827cc9b9SNathan Whitehorn "stwcx. %1, 0, %3\n\t" /* clear reservation (74xx) */
747827cc9b9SNathan Whitehorn "li %0, 0\n\t" /* failure - retval = 0 */
748827cc9b9SNathan Whitehorn "3:\n\t"
749827cc9b9SNathan Whitehorn : "=&r" (ret), "=&r"(oldhihalf), "=m" (pte->pte_hi)
750827cc9b9SNathan Whitehorn : "r" ((volatile char *)&pte->pte_hi + 4),
751827cc9b9SNathan Whitehorn "r" ((uint32_t)bitmask), "r" ((uint32_t)LPTE_LOCKED),
752827cc9b9SNathan Whitehorn "m" (pte->pte_hi)
753827cc9b9SNathan Whitehorn : "cr0", "cr1", "cr2", "memory");
754827cc9b9SNathan Whitehorn
755827cc9b9SNathan Whitehorn *oldhi = (pte->pte_hi & 0xffffffff00000000ULL) | oldhihalf;
756d418d3f6SBrandon Bergren #endif
757827cc9b9SNathan Whitehorn
758827cc9b9SNathan Whitehorn return (ret);
759827cc9b9SNathan Whitehorn }
760827cc9b9SNathan Whitehorn
761bef5da7fSNathan Whitehorn static uintptr_t
moea64_insert_to_pteg_native(struct lpte * pvo_pt,uintptr_t slotbase,uint64_t mask)762827cc9b9SNathan Whitehorn moea64_insert_to_pteg_native(struct lpte *pvo_pt, uintptr_t slotbase,
763827cc9b9SNathan Whitehorn uint64_t mask)
764bef5da7fSNathan Whitehorn {
765827cc9b9SNathan Whitehorn volatile struct lpte *pt;
766827cc9b9SNathan Whitehorn uint64_t oldptehi, va;
767827cc9b9SNathan Whitehorn uintptr_t k;
768827cc9b9SNathan Whitehorn int i, j;
769bef5da7fSNathan Whitehorn
770bef5da7fSNathan Whitehorn /* Start at a random slot */
771bef5da7fSNathan Whitehorn i = mftb() % 8;
772bef5da7fSNathan Whitehorn for (j = 0; j < 8; j++) {
773827cc9b9SNathan Whitehorn k = slotbase + (i + j) % 8;
774827cc9b9SNathan Whitehorn pt = &moea64_pteg_table[k];
775827cc9b9SNathan Whitehorn /* Invalidate and seize lock only if no bits in mask set */
776827cc9b9SNathan Whitehorn if (atomic_pte_lock(pt, mask, &oldptehi)) /* Lock obtained */
777827cc9b9SNathan Whitehorn break;
778bef5da7fSNathan Whitehorn }
779bef5da7fSNathan Whitehorn
780827cc9b9SNathan Whitehorn if (j == 8)
781827cc9b9SNathan Whitehorn return (-1);
782827cc9b9SNathan Whitehorn
783827cc9b9SNathan Whitehorn if (oldptehi & LPTE_VALID) {
784827cc9b9SNathan Whitehorn KASSERT(!(oldptehi & LPTE_WIRED), ("Unmapped wired entry"));
785827cc9b9SNathan Whitehorn /*
786827cc9b9SNathan Whitehorn * Need to invalidate old entry completely: see
787827cc9b9SNathan Whitehorn * "Modifying a Page Table Entry". Need to reconstruct
788827cc9b9SNathan Whitehorn * the virtual address for the outgoing entry to do that.
789827cc9b9SNathan Whitehorn */
7904420fc89SJustin Hibbits va = oldptehi >> (ADDR_SR_SHFT - ADDR_API_SHFT64);
791827cc9b9SNathan Whitehorn if (oldptehi & LPTE_HID)
792827cc9b9SNathan Whitehorn va = (((k >> 3) ^ moea64_pteg_mask) ^ va) &
7934420fc89SJustin Hibbits (ADDR_PIDX >> ADDR_PIDX_SHFT);
794827cc9b9SNathan Whitehorn else
7954420fc89SJustin Hibbits va = ((k >> 3) ^ va) & (ADDR_PIDX >> ADDR_PIDX_SHFT);
796827cc9b9SNathan Whitehorn va |= (oldptehi & LPTE_AVPN_MASK) <<
797827cc9b9SNathan Whitehorn (ADDR_API_SHFT64 - ADDR_PIDX_SHFT);
798827cc9b9SNathan Whitehorn PTESYNC();
799e2d6c417SLeandro Lupori TLBIE(va, oldptehi);
8007c382eeaSJustin Hibbits STAT_MOEA64(moea64_pte_valid--);
8017c382eeaSJustin Hibbits STAT_MOEA64(moea64_pte_overflow++);
802827cc9b9SNathan Whitehorn }
803827cc9b9SNathan Whitehorn
804827cc9b9SNathan Whitehorn /*
805827cc9b9SNathan Whitehorn * Update the PTE as per "Adding a Page Table Entry". Lock is released
806827cc9b9SNathan Whitehorn * by setting the high doubleworld.
807827cc9b9SNathan Whitehorn */
8084a38fe54SNathan Whitehorn pt->pte_lo = htobe64(pvo_pt->pte_lo);
809827cc9b9SNathan Whitehorn EIEIO();
8104a38fe54SNathan Whitehorn pt->pte_hi = htobe64(pvo_pt->pte_hi);
811827cc9b9SNathan Whitehorn PTESYNC();
812827cc9b9SNathan Whitehorn
813827cc9b9SNathan Whitehorn /* Keep statistics */
8147c382eeaSJustin Hibbits STAT_MOEA64(moea64_pte_valid++);
815827cc9b9SNathan Whitehorn
816bef5da7fSNathan Whitehorn return (k);
817bef5da7fSNathan Whitehorn }
818bef5da7fSNathan Whitehorn
819e2d6c417SLeandro Lupori static __always_inline int64_t
moea64_pte_insert_locked(struct pvo_entry * pvo,struct lpte * insertpt,uint64_t mask)820e2d6c417SLeandro Lupori moea64_pte_insert_locked(struct pvo_entry *pvo, struct lpte *insertpt,
821e2d6c417SLeandro Lupori uint64_t mask)
822bef5da7fSNathan Whitehorn {
823827cc9b9SNathan Whitehorn uintptr_t slot;
824827cc9b9SNathan Whitehorn
825bef5da7fSNathan Whitehorn /*
826bef5da7fSNathan Whitehorn * First try primary hash.
827bef5da7fSNathan Whitehorn */
828e2d6c417SLeandro Lupori slot = moea64_insert_to_pteg_native(insertpt, pvo->pvo_pte.slot,
829e2d6c417SLeandro Lupori mask | LPTE_WIRED | LPTE_LOCKED);
830827cc9b9SNathan Whitehorn if (slot != -1) {
831827cc9b9SNathan Whitehorn pvo->pvo_pte.slot = slot;
832827cc9b9SNathan Whitehorn return (0);
833bef5da7fSNathan Whitehorn }
834bef5da7fSNathan Whitehorn
835bef5da7fSNathan Whitehorn /*
836bef5da7fSNathan Whitehorn * Now try secondary hash.
837bef5da7fSNathan Whitehorn */
838827cc9b9SNathan Whitehorn pvo->pvo_vaddr ^= PVO_HID;
839e2d6c417SLeandro Lupori insertpt->pte_hi ^= LPTE_HID;
840827cc9b9SNathan Whitehorn pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3);
841e2d6c417SLeandro Lupori slot = moea64_insert_to_pteg_native(insertpt, pvo->pvo_pte.slot,
842e2d6c417SLeandro Lupori mask | LPTE_WIRED | LPTE_LOCKED);
843827cc9b9SNathan Whitehorn if (slot != -1) {
844827cc9b9SNathan Whitehorn pvo->pvo_pte.slot = slot;
845827cc9b9SNathan Whitehorn return (0);
846bef5da7fSNathan Whitehorn }
847bef5da7fSNathan Whitehorn
848e2d6c417SLeandro Lupori return (-1);
849e2d6c417SLeandro Lupori }
850e2d6c417SLeandro Lupori
851e2d6c417SLeandro Lupori static int64_t
moea64_pte_insert_native(struct pvo_entry * pvo)852e2d6c417SLeandro Lupori moea64_pte_insert_native(struct pvo_entry *pvo)
853e2d6c417SLeandro Lupori {
854e2d6c417SLeandro Lupori struct lpte insertpt;
855e2d6c417SLeandro Lupori int64_t ret;
856e2d6c417SLeandro Lupori
857e2d6c417SLeandro Lupori /* Initialize PTE */
858e2d6c417SLeandro Lupori moea64_pte_from_pvo(pvo, &insertpt);
859e2d6c417SLeandro Lupori
860e2d6c417SLeandro Lupori /* Make sure further insertion is locked out during evictions */
861e2d6c417SLeandro Lupori rw_rlock(&moea64_eviction_lock);
862e2d6c417SLeandro Lupori
863e2d6c417SLeandro Lupori pvo->pvo_pte.slot &= ~7ULL; /* Base slot address */
864e2d6c417SLeandro Lupori ret = moea64_pte_insert_locked(pvo, &insertpt, LPTE_VALID);
865e2d6c417SLeandro Lupori if (ret == -1) {
866bef5da7fSNathan Whitehorn /*
867bef5da7fSNathan Whitehorn * Out of luck. Find a PTE to sacrifice.
868bef5da7fSNathan Whitehorn */
869827cc9b9SNathan Whitehorn
870827cc9b9SNathan Whitehorn /* Lock out all insertions for a bit */
871827cc9b9SNathan Whitehorn if (!rw_try_upgrade(&moea64_eviction_lock)) {
872827cc9b9SNathan Whitehorn rw_runlock(&moea64_eviction_lock);
873827cc9b9SNathan Whitehorn rw_wlock(&moea64_eviction_lock);
874bef5da7fSNathan Whitehorn }
875e2d6c417SLeandro Lupori /* Don't evict large pages */
876e2d6c417SLeandro Lupori ret = moea64_pte_insert_locked(pvo, &insertpt, LPTE_BIG);
877827cc9b9SNathan Whitehorn rw_wunlock(&moea64_eviction_lock);
878bef5da7fSNathan Whitehorn /* No freeable slots in either PTEG? We're hosed. */
879e2d6c417SLeandro Lupori if (ret == -1)
880bef5da7fSNathan Whitehorn panic("moea64_pte_insert: overflow");
881e2d6c417SLeandro Lupori } else
882e2d6c417SLeandro Lupori rw_runlock(&moea64_eviction_lock);
883e2d6c417SLeandro Lupori
884e2d6c417SLeandro Lupori return (0);
885bef5da7fSNathan Whitehorn }
886bef5da7fSNathan Whitehorn
88795ca4720SLeandro Lupori static void *
moea64_dump_pmap_native(void * ctx,void * buf,u_long * nbytes)88845b69dd6SJustin Hibbits moea64_dump_pmap_native(void *ctx, void *buf, u_long *nbytes)
88995ca4720SLeandro Lupori {
89095ca4720SLeandro Lupori struct dump_context *dctx;
89195ca4720SLeandro Lupori u_long ptex, ptex_end;
89295ca4720SLeandro Lupori
89395ca4720SLeandro Lupori dctx = (struct dump_context *)ctx;
89495ca4720SLeandro Lupori ptex = dctx->ptex;
89595ca4720SLeandro Lupori ptex_end = ptex + dctx->blksz / sizeof(struct lpte);
89695ca4720SLeandro Lupori ptex_end = MIN(ptex_end, dctx->ptex_end);
89795ca4720SLeandro Lupori *nbytes = (ptex_end - ptex) * sizeof(struct lpte);
89895ca4720SLeandro Lupori
89995ca4720SLeandro Lupori if (*nbytes == 0)
90095ca4720SLeandro Lupori return (NULL);
90195ca4720SLeandro Lupori
90295ca4720SLeandro Lupori dctx->ptex = ptex_end;
90395ca4720SLeandro Lupori return (__DEVOLATILE(struct lpte *, moea64_pteg_table) + ptex);
90495ca4720SLeandro Lupori }
905e2d6c417SLeandro Lupori
906e2d6c417SLeandro Lupori static __always_inline uint64_t
moea64_vpn_from_pte(uint64_t ptehi,uintptr_t slot)907e2d6c417SLeandro Lupori moea64_vpn_from_pte(uint64_t ptehi, uintptr_t slot)
908e2d6c417SLeandro Lupori {
909e2d6c417SLeandro Lupori uint64_t pgn, pgnlo, vsid;
910e2d6c417SLeandro Lupori
911e2d6c417SLeandro Lupori vsid = (ptehi & LPTE_AVA_MASK) >> LPTE_VSID_SHIFT;
912e2d6c417SLeandro Lupori if ((ptehi & LPTE_HID) != 0)
913e2d6c417SLeandro Lupori slot ^= (moea64_pteg_mask << 3);
914e2d6c417SLeandro Lupori pgnlo = ((vsid & VSID_HASH_MASK) ^ (slot >> 3)) & EA_PAGELO_MASK;
915e2d6c417SLeandro Lupori pgn = ((ptehi & LPTE_AVA_PGNHI_MASK) << (EA_PAGELO_SHIFT -
916e2d6c417SLeandro Lupori LPTE_AVA_PGNHI_SHIFT)) | pgnlo;
917e2d6c417SLeandro Lupori return ((vsid << 16) | pgn);
918e2d6c417SLeandro Lupori }
919e2d6c417SLeandro Lupori
920e2d6c417SLeandro Lupori static __always_inline int64_t
moea64_pte_unset_sp_locked(struct pvo_entry * pvo)921e2d6c417SLeandro Lupori moea64_pte_unset_sp_locked(struct pvo_entry *pvo)
922e2d6c417SLeandro Lupori {
923e2d6c417SLeandro Lupori volatile struct lpte *pt;
924e2d6c417SLeandro Lupori uint64_t ptehi, refchg, vpn;
925e2d6c417SLeandro Lupori vm_offset_t eva;
926e2d6c417SLeandro Lupori
927e2d6c417SLeandro Lupori refchg = 0;
928e2d6c417SLeandro Lupori eva = PVO_VADDR(pvo) + HPT_SP_SIZE;
929e2d6c417SLeandro Lupori
930e2d6c417SLeandro Lupori for (; pvo != NULL && PVO_VADDR(pvo) < eva;
931c9d26ca2SJohn Baldwin pvo = RB_NEXT(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo)) {
932e2d6c417SLeandro Lupori pt = moea64_pteg_table + pvo->pvo_pte.slot;
933e2d6c417SLeandro Lupori ptehi = be64toh(pt->pte_hi);
934e2d6c417SLeandro Lupori if ((ptehi & LPTE_AVPN_MASK) !=
935e2d6c417SLeandro Lupori moea64_pte_vpn_from_pvo_vpn(pvo)) {
936e2d6c417SLeandro Lupori /* Evicted: invalidate new entry */
937e2d6c417SLeandro Lupori STAT_MOEA64(moea64_pte_overflow--);
938e2d6c417SLeandro Lupori vpn = moea64_vpn_from_pte(ptehi, pvo->pvo_pte.slot);
939e2d6c417SLeandro Lupori CTR1(KTR_PMAP, "Evicted page in pte_unset_sp: vpn=%jx",
940e2d6c417SLeandro Lupori (uintmax_t)vpn);
941e2d6c417SLeandro Lupori /* Assume evicted page was modified */
942e2d6c417SLeandro Lupori refchg |= LPTE_CHG;
943e2d6c417SLeandro Lupori } else
944e2d6c417SLeandro Lupori vpn = pvo->pvo_vpn;
945e2d6c417SLeandro Lupori
946e2d6c417SLeandro Lupori refchg |= moea64_pte_unset_locked(pt, vpn);
947e2d6c417SLeandro Lupori }
948e2d6c417SLeandro Lupori
949e2d6c417SLeandro Lupori return (refchg);
950e2d6c417SLeandro Lupori }
951e2d6c417SLeandro Lupori
952e2d6c417SLeandro Lupori static int64_t
moea64_pte_unset_sp_native(struct pvo_entry * pvo)953e2d6c417SLeandro Lupori moea64_pte_unset_sp_native(struct pvo_entry *pvo)
954e2d6c417SLeandro Lupori {
955e2d6c417SLeandro Lupori uint64_t refchg;
956e2d6c417SLeandro Lupori
957e2d6c417SLeandro Lupori PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
958e2d6c417SLeandro Lupori KASSERT((PVO_VADDR(pvo) & HPT_SP_MASK) == 0,
959e2d6c417SLeandro Lupori ("%s: va %#jx unaligned", __func__, (uintmax_t)PVO_VADDR(pvo)));
960e2d6c417SLeandro Lupori
961e2d6c417SLeandro Lupori rw_rlock(&moea64_eviction_lock);
962e2d6c417SLeandro Lupori refchg = moea64_pte_unset_sp_locked(pvo);
963e2d6c417SLeandro Lupori rw_runlock(&moea64_eviction_lock);
964e2d6c417SLeandro Lupori
965e2d6c417SLeandro Lupori return (refchg);
966e2d6c417SLeandro Lupori }
967e2d6c417SLeandro Lupori
968e2d6c417SLeandro Lupori static __always_inline int64_t
moea64_pte_insert_sp_locked(struct pvo_entry * pvo)969e2d6c417SLeandro Lupori moea64_pte_insert_sp_locked(struct pvo_entry *pvo)
970e2d6c417SLeandro Lupori {
971e2d6c417SLeandro Lupori struct lpte insertpt;
972e2d6c417SLeandro Lupori int64_t ret;
973e2d6c417SLeandro Lupori vm_offset_t eva;
974e2d6c417SLeandro Lupori
975e2d6c417SLeandro Lupori eva = PVO_VADDR(pvo) + HPT_SP_SIZE;
976e2d6c417SLeandro Lupori
977e2d6c417SLeandro Lupori for (; pvo != NULL && PVO_VADDR(pvo) < eva;
978c9d26ca2SJohn Baldwin pvo = RB_NEXT(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo)) {
979e2d6c417SLeandro Lupori moea64_pte_from_pvo(pvo, &insertpt);
980e2d6c417SLeandro Lupori pvo->pvo_pte.slot &= ~7ULL; /* Base slot address */
981e2d6c417SLeandro Lupori
982e2d6c417SLeandro Lupori ret = moea64_pte_insert_locked(pvo, &insertpt, LPTE_VALID);
983e2d6c417SLeandro Lupori if (ret == -1) {
984e2d6c417SLeandro Lupori /* Lock out all insertions for a bit */
985e2d6c417SLeandro Lupori if (!rw_try_upgrade(&moea64_eviction_lock)) {
986e2d6c417SLeandro Lupori rw_runlock(&moea64_eviction_lock);
987e2d6c417SLeandro Lupori rw_wlock(&moea64_eviction_lock);
988e2d6c417SLeandro Lupori }
989e2d6c417SLeandro Lupori /* Don't evict large pages */
990e2d6c417SLeandro Lupori ret = moea64_pte_insert_locked(pvo, &insertpt,
991e2d6c417SLeandro Lupori LPTE_BIG);
992e2d6c417SLeandro Lupori rw_downgrade(&moea64_eviction_lock);
993e2d6c417SLeandro Lupori /* No freeable slots in either PTEG? We're hosed. */
994e2d6c417SLeandro Lupori if (ret == -1)
995e2d6c417SLeandro Lupori panic("moea64_pte_insert_sp: overflow");
996e2d6c417SLeandro Lupori }
997e2d6c417SLeandro Lupori }
998e2d6c417SLeandro Lupori
999e2d6c417SLeandro Lupori return (0);
1000e2d6c417SLeandro Lupori }
1001e2d6c417SLeandro Lupori
1002e2d6c417SLeandro Lupori static int64_t
moea64_pte_insert_sp_native(struct pvo_entry * pvo)1003e2d6c417SLeandro Lupori moea64_pte_insert_sp_native(struct pvo_entry *pvo)
1004e2d6c417SLeandro Lupori {
1005e2d6c417SLeandro Lupori PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
1006e2d6c417SLeandro Lupori KASSERT((PVO_VADDR(pvo) & HPT_SP_MASK) == 0,
1007e2d6c417SLeandro Lupori ("%s: va %#jx unaligned", __func__, (uintmax_t)PVO_VADDR(pvo)));
1008e2d6c417SLeandro Lupori
1009e2d6c417SLeandro Lupori rw_rlock(&moea64_eviction_lock);
1010e2d6c417SLeandro Lupori moea64_pte_insert_sp_locked(pvo);
1011e2d6c417SLeandro Lupori rw_runlock(&moea64_eviction_lock);
1012e2d6c417SLeandro Lupori
1013e2d6c417SLeandro Lupori return (0);
1014e2d6c417SLeandro Lupori }
1015e2d6c417SLeandro Lupori
1016e2d6c417SLeandro Lupori static int64_t
moea64_pte_replace_sp_native(struct pvo_entry * pvo)1017e2d6c417SLeandro Lupori moea64_pte_replace_sp_native(struct pvo_entry *pvo)
1018e2d6c417SLeandro Lupori {
1019e2d6c417SLeandro Lupori uint64_t refchg;
1020e2d6c417SLeandro Lupori
1021e2d6c417SLeandro Lupori PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
1022e2d6c417SLeandro Lupori KASSERT((PVO_VADDR(pvo) & HPT_SP_MASK) == 0,
1023e2d6c417SLeandro Lupori ("%s: va %#jx unaligned", __func__, (uintmax_t)PVO_VADDR(pvo)));
1024e2d6c417SLeandro Lupori
1025e2d6c417SLeandro Lupori rw_rlock(&moea64_eviction_lock);
1026e2d6c417SLeandro Lupori refchg = moea64_pte_unset_sp_locked(pvo);
1027e2d6c417SLeandro Lupori moea64_pte_insert_sp_locked(pvo);
1028e2d6c417SLeandro Lupori rw_runlock(&moea64_eviction_lock);
1029e2d6c417SLeandro Lupori
1030e2d6c417SLeandro Lupori return (refchg);
1031e2d6c417SLeandro Lupori }
1032