xref: /freebsd/sys/powerpc/ps3/mmu_ps3.c (revision ddd5b8e9b4d8957fce018c520657cdfa4ecffad3)
1 /*-
2  * Copyright (C) 2010 Nathan Whitehorn
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
18  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
20  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
21  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
22  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
23  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25 
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
28 
29 #include <sys/param.h>
30 #include <sys/kernel.h>
31 #include <sys/ktr.h>
32 #include <sys/lock.h>
33 #include <sys/msgbuf.h>
34 #include <sys/mutex.h>
35 #include <sys/proc.h>
36 #include <sys/sysctl.h>
37 #include <sys/systm.h>
38 #include <sys/vmmeter.h>
39 
40 #include <vm/vm.h>
41 #include <vm/vm_param.h>
42 #include <vm/vm_kern.h>
43 #include <vm/vm_page.h>
44 #include <vm/vm_map.h>
45 #include <vm/vm_object.h>
46 #include <vm/vm_extern.h>
47 #include <vm/vm_pageout.h>
48 #include <vm/uma.h>
49 
50 #include <powerpc/aim/mmu_oea64.h>
51 
52 #include "mmu_if.h"
53 #include "moea64_if.h"
54 #include "ps3-hvcall.h"
55 
56 #define VSID_HASH_MASK		0x0000007fffffffffUL
57 #define PTESYNC()		__asm __volatile("ptesync")
58 
59 extern int ps3fb_remap(void);
60 
61 static uint64_t mps3_vas_id;
62 
63 /*
64  * Kernel MMU interface
65  */
66 
67 static void	mps3_bootstrap(mmu_t mmup, vm_offset_t kernelstart,
68 		    vm_offset_t kernelend);
69 static void	mps3_cpu_bootstrap(mmu_t mmup, int ap);
70 static void	mps3_pte_synch(mmu_t, uintptr_t pt, struct lpte *pvo_pt);
71 static void	mps3_pte_clear(mmu_t, uintptr_t pt, struct lpte *pvo_pt,
72 		    uint64_t vpn, uint64_t ptebit);
73 static void	mps3_pte_unset(mmu_t, uintptr_t pt, struct lpte *pvo_pt,
74 		    uint64_t vpn);
75 static void	mps3_pte_change(mmu_t, uintptr_t pt, struct lpte *pvo_pt,
76 		    uint64_t vpn);
77 static int	mps3_pte_insert(mmu_t, u_int ptegidx, struct lpte *pvo_pt);
78 static uintptr_t mps3_pvo_to_pte(mmu_t, const struct pvo_entry *pvo);
79 
80 
81 static mmu_method_t mps3_methods[] = {
82         MMUMETHOD(mmu_bootstrap,	mps3_bootstrap),
83         MMUMETHOD(mmu_cpu_bootstrap,	mps3_cpu_bootstrap),
84 
85 	MMUMETHOD(moea64_pte_synch,	mps3_pte_synch),
86 	MMUMETHOD(moea64_pte_clear,	mps3_pte_clear),
87 	MMUMETHOD(moea64_pte_unset,	mps3_pte_unset),
88 	MMUMETHOD(moea64_pte_change,	mps3_pte_change),
89 	MMUMETHOD(moea64_pte_insert,	mps3_pte_insert),
90 	MMUMETHOD(moea64_pvo_to_pte,	mps3_pvo_to_pte),
91 
92         { 0, 0 }
93 };
94 
95 MMU_DEF_INHERIT(ps3_mmu, "mmu_ps3", mps3_methods, 0, oea64_mmu);
96 
97 static void
98 mps3_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
99 {
100 	uint64_t final_pteg_count;
101 
102 	moea64_early_bootstrap(mmup, kernelstart, kernelend);
103 
104 	lv1_construct_virtual_address_space(
105 	    20 /* log_2(moea64_pteg_count) */, 2 /* n page sizes */,
106 	    (24UL << 56) | (16UL << 48) /* page sizes 16 MB + 64 KB */,
107 	    &mps3_vas_id, &final_pteg_count
108 	);
109 
110 	moea64_pteg_count = final_pteg_count / sizeof(struct lpteg);
111 
112 	moea64_mid_bootstrap(mmup, kernelstart, kernelend);
113 	moea64_late_bootstrap(mmup, kernelstart, kernelend);
114 }
115 
116 static void
117 mps3_cpu_bootstrap(mmu_t mmup, int ap)
118 {
119 	struct slb *slb = PCPU_GET(slb);
120 	register_t seg0;
121 	int i;
122 
123 	mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR);
124 
125 	/*
126 	 * Destroy the loader's address space if we are coming up for
127 	 * the first time, and redo the FB mapping so we can continue
128 	 * having a console.
129 	 */
130 
131 	if (!ap)
132 		lv1_destruct_virtual_address_space(0);
133 
134 	lv1_select_virtual_address_space(mps3_vas_id);
135 
136 	if (!ap)
137 		ps3fb_remap();
138 
139 	/*
140 	 * Install kernel SLB entries
141 	 */
142 
143         __asm __volatile ("slbia");
144         __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) : "r"(0));
145 	for (i = 0; i < 64; i++) {
146 		if (!(slb[i].slbe & SLBE_VALID))
147 			continue;
148 
149 		__asm __volatile ("slbmte %0, %1" ::
150 		    "r"(slb[i].slbv), "r"(slb[i].slbe));
151 	}
152 }
153 
154 static void
155 mps3_pte_synch(mmu_t mmu, uintptr_t slot, struct lpte *pvo_pt)
156 {
157 	uint64_t halfbucket[4], rcbits;
158 
159 	PTESYNC();
160 	lv1_read_htab_entries(mps3_vas_id, slot & ~0x3UL, &halfbucket[0],
161 	    &halfbucket[1], &halfbucket[2], &halfbucket[3], &rcbits);
162 
163 	/*
164 	 * rcbits contains the low 12 bits of each PTEs 2nd part,
165 	 * spaced at 16-bit intervals
166 	 */
167 
168 	KASSERT((halfbucket[slot & 0x3] & LPTE_AVPN_MASK) ==
169 	    (pvo_pt->pte_hi & LPTE_AVPN_MASK),
170 	    ("PTE upper word %#lx != %#lx\n",
171 	    halfbucket[slot & 0x3], pvo_pt->pte_hi));
172 
173  	pvo_pt->pte_lo |= (rcbits >> ((3 - (slot & 0x3))*16)) &
174 	    (LPTE_CHG | LPTE_REF);
175 }
176 
177 static void
178 mps3_pte_clear(mmu_t mmu, uintptr_t slot, struct lpte *pvo_pt, uint64_t vpn,
179     u_int64_t ptebit)
180 {
181 
182 	lv1_write_htab_entry(mps3_vas_id, slot, pvo_pt->pte_hi,
183 	    pvo_pt->pte_lo & ~ptebit);
184 }
185 
186 static void
187 mps3_pte_unset(mmu_t mmu, uintptr_t slot, struct lpte *pvo_pt, uint64_t vpn)
188 {
189 
190 	mps3_pte_synch(mmu, slot, pvo_pt);
191 	pvo_pt->pte_hi &= ~LPTE_VALID;
192 	lv1_write_htab_entry(mps3_vas_id, slot, 0, 0);
193 	moea64_pte_valid--;
194 }
195 
196 static void
197 mps3_pte_change(mmu_t mmu, uintptr_t slot, struct lpte *pvo_pt, uint64_t vpn)
198 {
199 
200 	mps3_pte_synch(mmu, slot, pvo_pt);
201 	lv1_write_htab_entry(mps3_vas_id, slot, pvo_pt->pte_hi,
202 	    pvo_pt->pte_lo);
203 }
204 
205 static int
206 mps3_pte_insert(mmu_t mmu, u_int ptegidx, struct lpte *pvo_pt)
207 {
208 	int result;
209 	struct lpte evicted;
210 	struct pvo_entry *pvo;
211 	uint64_t index;
212 
213 	pvo_pt->pte_hi |= LPTE_VALID;
214 	pvo_pt->pte_hi &= ~LPTE_HID;
215 	evicted.pte_hi = 0;
216 	PTESYNC();
217 	result = lv1_insert_htab_entry(mps3_vas_id, ptegidx << 3,
218 	    pvo_pt->pte_hi, pvo_pt->pte_lo, LPTE_LOCKED | LPTE_WIRED, 0,
219 	    &index, &evicted.pte_hi, &evicted.pte_lo);
220 
221 	if (result != 0) {
222 		/* No freeable slots in either PTEG? We're hosed. */
223 		panic("mps3_pte_insert: overflow (%d)", result);
224 		return (-1);
225 	}
226 
227 	/*
228 	 * See where we ended up.
229 	 */
230 	if (index >> 3 != ptegidx)
231 		pvo_pt->pte_hi |= LPTE_HID;
232 
233 	moea64_pte_valid++;
234 
235 	if (!evicted.pte_hi)
236 		return (index & 0x7);
237 
238 	/*
239 	 * Synchronize the sacrifice PTE with its PVO, then mark both
240 	 * invalid. The PVO will be reused when/if the VM system comes
241 	 * here after a fault.
242 	 */
243 
244 	ptegidx = index >> 3; /* Where the sacrifice PTE was found */
245 	if (evicted.pte_hi & LPTE_HID)
246 		ptegidx ^= moea64_pteg_mask; /* PTEs indexed by primary */
247 
248 	KASSERT((evicted.pte_hi & (LPTE_WIRED | LPTE_LOCKED)) == 0,
249 	    ("Evicted a wired PTE"));
250 
251 	result = 0;
252 	LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) {
253 		if (!PVO_PTEGIDX_ISSET(pvo))
254 			continue;
255 
256 		if (pvo->pvo_pte.lpte.pte_hi == (evicted.pte_hi | LPTE_VALID)) {
257 			KASSERT(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID,
258 			    ("Invalid PVO for valid PTE!"));
259 			pvo->pvo_pte.lpte.pte_hi &= ~LPTE_VALID;
260 			pvo->pvo_pte.lpte.pte_lo |=
261 			    evicted.pte_lo & (LPTE_REF | LPTE_CHG);
262 			PVO_PTEGIDX_CLR(pvo);
263 			moea64_pte_valid--;
264 			moea64_pte_overflow++;
265 			result = 1;
266 			break;
267 		}
268 	}
269 
270 	KASSERT(result == 1, ("PVO for sacrifice PTE not found"));
271 
272 	return (index & 0x7);
273 }
274 
275 static __inline u_int
276 va_to_pteg(uint64_t vsid, vm_offset_t addr, int large)
277 {
278 	uint64_t hash;
279 	int shift;
280 
281 	shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT;
282 	hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >>
283 	    shift);
284 	return (hash & moea64_pteg_mask);
285 }
286 
287 uintptr_t
288 mps3_pvo_to_pte(mmu_t mmu, const struct pvo_entry *pvo)
289 {
290 	uint64_t vsid;
291 	u_int ptegidx;
292 
293 	/* If the PTEG index is not set, then there is no page table entry */
294 	if (!PVO_PTEGIDX_ISSET(pvo))
295 		return (-1);
296 
297 	vsid = PVO_VSID(pvo);
298 	ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo), pvo->pvo_vaddr & PVO_LARGE);
299 
300 	/*
301 	 * We can find the actual pte entry without searching by grabbing
302 	 * the PTEG index from 3 unused bits in pvo_vaddr and by
303 	 * noticing the HID bit.
304 	 */
305 	if (pvo->pvo_pte.lpte.pte_hi & LPTE_HID)
306 		ptegidx ^= moea64_pteg_mask;
307 
308 	return ((ptegidx << 3) | PVO_PTEGIDX_GET(pvo));
309 }
310 
311