xref: /freebsd/sys/powerpc/aim/slb.c (revision 32ba16b6e6dbfa5e4f536695191a8816bd6a8765)
1 /*-
2  * Copyright (c) 2010 Nathan Whitehorn
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #include <sys/param.h>
30 #include <sys/kernel.h>
31 #include <sys/lock.h>
32 #include <sys/mutex.h>
33 #include <sys/proc.h>
34 #include <sys/systm.h>
35 #include <sys/tree.h>
36 
37 #include <vm/vm.h>
38 #include <vm/pmap.h>
39 #include <vm/uma.h>
40 #include <vm/vm_map.h>
41 
42 #include <machine/md_var.h>
43 #include <machine/pmap.h>
44 #include <machine/vmparam.h>
45 
46 uintptr_t moea64_get_unique_vsid(void);
47 void moea64_release_vsid(uint64_t vsid);
48 
49 struct slbcontainer {
50 	struct slb slb;
51 	SPLAY_ENTRY(slbcontainer) slb_node;
52 };
53 
54 static int slb_compare(struct slbcontainer *a, struct slbcontainer *b);
55 static void slb_zone_init(void *);
56 
57 SPLAY_PROTOTYPE(slb_tree, slbcontainer, slb_node, slb_compare);
58 SPLAY_GENERATE(slb_tree, slbcontainer, slb_node, slb_compare);
59 
60 uma_zone_t slb_zone;
61 uma_zone_t slb_cache_zone;
62 
63 SYSINIT(slb_zone_init, SI_SUB_KMEM, SI_ORDER_ANY, slb_zone_init, NULL);
64 
65 int
66 va_to_slb_entry(pmap_t pm, vm_offset_t va, struct slb *slb)
67 {
68 	struct slbcontainer cont, *found;
69 	uint64_t esid;
70 
71 	esid = (uintptr_t)va >> ADDR_SR_SHFT;
72 	slb->slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
73 
74 	if (pm == kernel_pmap) {
75 		/* Set kernel VSID to deterministic value */
76 		slb->slbv = va_to_vsid(kernel_pmap, va) << SLBV_VSID_SHIFT;
77 
78 		/* Figure out if this is a large-page mapping */
79 		if (hw_direct_map && va < VM_MIN_KERNEL_ADDRESS) {
80 			/*
81 			 * XXX: If we have set up a direct map, assumes
82 			 * all physical memory is mapped with large pages.
83 			 */
84 			if (mem_valid(va, 0) == 0)
85 				slb->slbv |= SLBV_L;
86 		}
87 
88 		return (0);
89 	}
90 
91 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
92 
93 	cont.slb.slbe = slb->slbe;
94 	found = SPLAY_FIND(slb_tree, &pm->pm_slbtree, &cont);
95 
96 	if (found == NULL)
97 		return (-1);
98 
99 	slb->slbv = found->slb.slbv;
100 	return (0);
101 }
102 
103 uint64_t
104 va_to_vsid(pmap_t pm, vm_offset_t va)
105 {
106 	struct slb entry;
107 	int large;
108 
109 	/* Shortcut kernel case */
110 	if (pm == kernel_pmap) {
111 		large = 0;
112 		if (hw_direct_map && va < VM_MIN_KERNEL_ADDRESS &&
113 		    mem_valid(va, 0) == 0)
114 			large = 1;
115 
116 		return (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT, large));
117 	}
118 
119 	/*
120 	 * If there is no vsid for this VA, we need to add a new entry
121 	 * to the PMAP's segment table.
122 	 */
123 
124 	if (va_to_slb_entry(pm, va, &entry) != 0)
125 		return (allocate_vsid(pm, (uintptr_t)va >> ADDR_SR_SHFT, 0));
126 
127 	return ((entry.slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT);
128 }
129 
130 uint64_t
131 allocate_vsid(pmap_t pm, uint64_t esid, int large)
132 {
133 	uint64_t vsid;
134 	struct slbcontainer *slb_entry, kern_entry;
135 	struct slb *prespill;
136 
137 	prespill = NULL;
138 
139 	if (pm == kernel_pmap) {
140 		vsid = va_to_vsid(pm, esid << ADDR_SR_SHFT);
141 		slb_entry = &kern_entry;
142 		prespill = PCPU_GET(slb);
143 	} else {
144 		vsid = moea64_get_unique_vsid();
145 		slb_entry = uma_zalloc(slb_zone, M_NOWAIT);
146 
147 		if (slb_entry == NULL)
148 			panic("Could not allocate SLB mapping!");
149 
150 		prespill = pm->pm_slb;
151 	}
152 
153 	slb_entry->slb.slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
154 	slb_entry->slb.slbv = vsid << SLBV_VSID_SHIFT;
155 
156 	if (large)
157 		slb_entry->slb.slbv |= SLBV_L;
158 
159 	if (pm != kernel_pmap) {
160 		PMAP_LOCK_ASSERT(pm, MA_OWNED);
161 		SPLAY_INSERT(slb_tree, &pm->pm_slbtree, slb_entry);
162 	}
163 
164 	/*
165 	 * Someone probably wants this soon, and it may be a wired
166 	 * SLB mapping, so pre-spill this entry.
167 	 */
168 	if (prespill != NULL)
169 		slb_insert(pm, prespill, &slb_entry->slb);
170 
171 	return (vsid);
172 }
173 
174 /* Lock entries mapping kernel text and stacks */
175 
176 #define SLB_SPILLABLE(slbe) \
177 	(((slbe & SLBE_ESID_MASK) < VM_MIN_KERNEL_ADDRESS && \
178 	    (slbe & SLBE_ESID_MASK) > 16*SEGMENT_LENGTH) || \
179 	    (slbe & SLBE_ESID_MASK) > VM_MAX_KERNEL_ADDRESS)
180 void
181 slb_insert(pmap_t pm, struct slb *slbcache, struct slb *slb_entry)
182 {
183 	uint64_t slbe, slbv;
184 	int i, j, to_spill;
185 
186 	/* We don't want to be preempted while modifying the kernel map */
187 	critical_enter();
188 
189 	to_spill = -1;
190 	slbv = slb_entry->slbv;
191 	slbe = slb_entry->slbe;
192 
193 	/* Hunt for a likely candidate */
194 
195 	for (i = mftb() % 64, j = 0; j < 64; j++, i = (i+1) % 64) {
196 		if (pm == kernel_pmap && i == USER_SR)
197 				continue;
198 
199 		if (!(slbcache[i].slbe & SLBE_VALID)) {
200 			to_spill = i;
201 			break;
202 		}
203 
204 		if (to_spill < 0 && (pm != kernel_pmap ||
205 		    SLB_SPILLABLE(slbcache[i].slbe)))
206 			to_spill = i;
207 	}
208 
209 	if (to_spill < 0)
210 		panic("SLB spill on ESID %#lx, but no available candidates!\n",
211 		   (slbe & SLBE_ESID_MASK) >> SLBE_ESID_SHIFT);
212 
213 	if (slbcache[to_spill].slbe & SLBE_VALID) {
214 		/* Invalidate this first to avoid races */
215 		slbcache[to_spill].slbe = 0;
216 		mb();
217 	}
218 	slbcache[to_spill].slbv = slbv;
219 	slbcache[to_spill].slbe = slbe | (uint64_t)to_spill;
220 
221 	/* If it is for this CPU, put it in the SLB right away */
222 	if (pm == kernel_pmap && pmap_bootstrapped) {
223 		/* slbie not required */
224 		__asm __volatile ("slbmte %0, %1" ::
225 		    "r"(slbcache[to_spill].slbv),
226 		    "r"(slbcache[to_spill].slbe));
227 	}
228 
229 	critical_exit();
230 }
231 
232 int
233 vsid_to_esid(pmap_t pm, uint64_t vsid, uint64_t *esid)
234 {
235 	uint64_t slbv;
236 	struct slbcontainer *entry;
237 
238 #ifdef INVARIANTS
239 	if (pm == kernel_pmap)
240 		panic("vsid_to_esid only works on user pmaps");
241 
242 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
243 #endif
244 
245 	slbv = vsid << SLBV_VSID_SHIFT;
246 
247 	SPLAY_FOREACH(entry, slb_tree, &pm->pm_slbtree) {
248 		if (slbv == entry->slb.slbv) {
249 			*esid = entry->slb.slbe >> SLBE_ESID_SHIFT;
250 			return (0);
251 		}
252 	}
253 
254 	return (-1);
255 }
256 
257 void
258 free_vsids(pmap_t pm)
259 {
260 	struct slbcontainer *entry;
261 
262 	while (!SPLAY_EMPTY(&pm->pm_slbtree)) {
263 		entry = SPLAY_MIN(slb_tree, &pm->pm_slbtree);
264 
265 		SPLAY_REMOVE(slb_tree, &pm->pm_slbtree, entry);
266 
267 		moea64_release_vsid(entry->slb.slbv >> SLBV_VSID_SHIFT);
268 		uma_zfree(slb_zone, entry);
269 	}
270 }
271 
272 static int
273 slb_compare(struct slbcontainer *a, struct slbcontainer *b)
274 {
275 	if (a->slb.slbe == b->slb.slbe)
276 		return (0);
277 	else if (a->slb.slbe < b->slb.slbe)
278 		return (-1);
279 	else
280 		return (1);
281 }
282 
283 static void
284 slb_zone_init(void *dummy)
285 {
286 
287 	slb_zone = uma_zcreate("SLB segment", sizeof(struct slbcontainer),
288 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
289 	slb_cache_zone = uma_zcreate("SLB cache", 64*sizeof(struct slb),
290 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
291 }
292 
293 struct slb *
294 slb_alloc_user_cache(void)
295 {
296 	return (uma_zalloc(slb_cache_zone, M_ZERO));
297 }
298 
299 void
300 slb_free_user_cache(struct slb *slb)
301 {
302 	uma_zfree(slb_cache_zone, slb);
303 }
304