xref: /freebsd/sys/powerpc/aim/slb.c (revision 1de7b4b805ddbf2429da511c053686ac4591ed89)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2010 Nathan Whitehorn
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #include <sys/param.h>
32 #include <sys/kernel.h>
33 #include <sys/lock.h>
34 #include <sys/malloc.h>
35 #include <sys/mutex.h>
36 #include <sys/proc.h>
37 #include <sys/systm.h>
38 
39 #include <vm/vm.h>
40 #include <vm/pmap.h>
41 #include <vm/uma.h>
42 #include <vm/vm.h>
43 #include <vm/vm_map.h>
44 #include <vm/vm_page.h>
45 #include <vm/vm_pageout.h>
46 
47 #include <machine/md_var.h>
48 #include <machine/platform.h>
49 #include <machine/vmparam.h>
50 
51 uintptr_t moea64_get_unique_vsid(void);
52 void moea64_release_vsid(uint64_t vsid);
53 static void slb_zone_init(void *);
54 
55 static uma_zone_t slbt_zone;
56 static uma_zone_t slb_cache_zone;
57 int n_slbs = 64;
58 
59 SYSINIT(slb_zone_init, SI_SUB_KMEM, SI_ORDER_ANY, slb_zone_init, NULL);
60 
61 struct slbtnode {
62 	uint16_t	ua_alloc;
63 	uint8_t		ua_level;
64 	/* Only 36 bits needed for full 64-bit address space. */
65 	uint64_t	ua_base;
66 	union {
67 		struct slbtnode	*ua_child[16];
68 		struct slb	slb_entries[16];
69 	} u;
70 };
71 
72 /*
73  * For a full 64-bit address space, there are 36 bits in play in an
74  * esid, so 8 levels, with the leaf being at level 0.
75  *
76  * |3333|3322|2222|2222|1111|1111|11  |    |    |  esid
77  * |5432|1098|7654|3210|9876|5432|1098|7654|3210|  bits
78  * +----+----+----+----+----+----+----+----+----+--------
79  * | 8  | 7  | 6  | 5  | 4  | 3  | 2  | 1  | 0  | level
80  */
81 #define UAD_ROOT_LEVEL  8
82 #define UAD_LEAF_LEVEL  0
83 
84 static inline int
85 esid2idx(uint64_t esid, int level)
86 {
87 	int shift;
88 
89 	shift = level * 4;
90 	return ((esid >> shift) & 0xF);
91 }
92 
93 /*
94  * The ua_base field should have 0 bits after the first 4*(level+1)
95  * bits; i.e. only
96  */
97 #define uad_baseok(ua)                          \
98 	(esid2base(ua->ua_base, ua->ua_level) == ua->ua_base)
99 
100 
101 static inline uint64_t
102 esid2base(uint64_t esid, int level)
103 {
104 	uint64_t mask;
105 	int shift;
106 
107 	shift = (level + 1) * 4;
108 	mask = ~((1ULL << shift) - 1);
109 	return (esid & mask);
110 }
111 
112 /*
113  * Allocate a new leaf node for the specified esid/vmhandle from the
114  * parent node.
115  */
116 static struct slb *
117 make_new_leaf(uint64_t esid, uint64_t slbv, struct slbtnode *parent)
118 {
119 	struct slbtnode *child;
120 	struct slb *retval;
121 	int idx;
122 
123 	idx = esid2idx(esid, parent->ua_level);
124 	KASSERT(parent->u.ua_child[idx] == NULL, ("Child already exists!"));
125 
126 	/* unlock and M_WAITOK and loop? */
127 	child = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
128 	KASSERT(child != NULL, ("unhandled NULL case"));
129 
130 	child->ua_level = UAD_LEAF_LEVEL;
131 	child->ua_base = esid2base(esid, child->ua_level);
132 	idx = esid2idx(esid, child->ua_level);
133 	child->u.slb_entries[idx].slbv = slbv;
134 	child->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
135 	setbit(&child->ua_alloc, idx);
136 
137 	retval = &child->u.slb_entries[idx];
138 
139 	/*
140 	 * The above stores must be visible before the next one, so
141 	 * that a lockless searcher always sees a valid path through
142 	 * the tree.
143 	 */
144 	powerpc_lwsync();
145 
146 	idx = esid2idx(esid, parent->ua_level);
147 	parent->u.ua_child[idx] = child;
148 	setbit(&parent->ua_alloc, idx);
149 
150 	return (retval);
151 }
152 
153 /*
154  * Allocate a new intermediate node to fit between the parent and
155  * esid.
156  */
157 static struct slbtnode*
158 make_intermediate(uint64_t esid, struct slbtnode *parent)
159 {
160 	struct slbtnode *child, *inter;
161 	int idx, level;
162 
163 	idx = esid2idx(esid, parent->ua_level);
164 	child = parent->u.ua_child[idx];
165 	KASSERT(esid2base(esid, child->ua_level) != child->ua_base,
166 	    ("No need for an intermediate node?"));
167 
168 	/*
169 	 * Find the level where the existing child and our new esid
170 	 * meet.  It must be lower than parent->ua_level or we would
171 	 * have chosen a different index in parent.
172 	 */
173 	level = child->ua_level + 1;
174 	while (esid2base(esid, level) !=
175 	    esid2base(child->ua_base, level))
176 		level++;
177 	KASSERT(level < parent->ua_level,
178 	    ("Found splitting level %d for %09jx and %09jx, "
179 	    "but it's the same as %p's",
180 	    level, esid, child->ua_base, parent));
181 
182 	/* unlock and M_WAITOK and loop? */
183 	inter = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
184 	KASSERT(inter != NULL, ("unhandled NULL case"));
185 
186 	/* Set up intermediate node to point to child ... */
187 	inter->ua_level = level;
188 	inter->ua_base = esid2base(esid, inter->ua_level);
189 	idx = esid2idx(child->ua_base, inter->ua_level);
190 	inter->u.ua_child[idx] = child;
191 	setbit(&inter->ua_alloc, idx);
192 	powerpc_lwsync();
193 
194 	/* Set up parent to point to intermediate node ... */
195 	idx = esid2idx(inter->ua_base, parent->ua_level);
196 	parent->u.ua_child[idx] = inter;
197 	setbit(&parent->ua_alloc, idx);
198 
199 	return (inter);
200 }
201 
202 uint64_t
203 kernel_va_to_slbv(vm_offset_t va)
204 {
205 	uint64_t slbv;
206 
207 	/* Set kernel VSID to deterministic value */
208 	slbv = (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT)) << SLBV_VSID_SHIFT;
209 
210 	/* Figure out if this is a large-page mapping */
211 	if (hw_direct_map && va < VM_MIN_KERNEL_ADDRESS) {
212 		/*
213 		 * XXX: If we have set up a direct map, assumes
214 		 * all physical memory is mapped with large pages.
215 		 */
216 		if (mem_valid(va, 0) == 0)
217 			slbv |= SLBV_L;
218 	}
219 
220 	return (slbv);
221 }
222 
223 struct slb *
224 user_va_to_slb_entry(pmap_t pm, vm_offset_t va)
225 {
226 	uint64_t esid = va >> ADDR_SR_SHFT;
227 	struct slbtnode *ua;
228 	int idx;
229 
230 	ua = pm->pm_slb_tree_root;
231 
232 	for (;;) {
233 		KASSERT(uad_baseok(ua), ("uad base %016jx level %d bad!",
234 		    ua->ua_base, ua->ua_level));
235 		idx = esid2idx(esid, ua->ua_level);
236 
237 		/*
238 		 * This code is specific to ppc64 where a load is
239 		 * atomic, so no need for atomic_load macro.
240 		 */
241 		if (ua->ua_level == UAD_LEAF_LEVEL)
242 			return ((ua->u.slb_entries[idx].slbe & SLBE_VALID) ?
243 			    &ua->u.slb_entries[idx] : NULL);
244 
245 		/*
246 		 * The following accesses are implicitly ordered under the POWER
247 		 * ISA by load dependencies (the store ordering is provided by
248 		 * the powerpc_lwsync() calls elsewhere) and so are run without
249 		 * barriers.
250 		 */
251 		ua = ua->u.ua_child[idx];
252 		if (ua == NULL ||
253 		    esid2base(esid, ua->ua_level) != ua->ua_base)
254 			return (NULL);
255 	}
256 
257 	return (NULL);
258 }
259 
260 uint64_t
261 va_to_vsid(pmap_t pm, vm_offset_t va)
262 {
263 	struct slb *entry;
264 
265 	/* Shortcut kernel case */
266 	if (pm == kernel_pmap)
267 		return (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT));
268 
269 	/*
270 	 * If there is no vsid for this VA, we need to add a new entry
271 	 * to the PMAP's segment table.
272 	 */
273 
274 	entry = user_va_to_slb_entry(pm, va);
275 
276 	if (entry == NULL)
277 		return (allocate_user_vsid(pm,
278 		    (uintptr_t)va >> ADDR_SR_SHFT, 0));
279 
280 	return ((entry->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT);
281 }
282 
283 uint64_t
284 allocate_user_vsid(pmap_t pm, uint64_t esid, int large)
285 {
286 	uint64_t vsid, slbv;
287 	struct slbtnode *ua, *next, *inter;
288 	struct slb *slb;
289 	int idx;
290 
291 	KASSERT(pm != kernel_pmap, ("Attempting to allocate a kernel VSID"));
292 
293 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
294 	vsid = moea64_get_unique_vsid();
295 
296 	slbv = vsid << SLBV_VSID_SHIFT;
297 	if (large)
298 		slbv |= SLBV_L;
299 
300 	ua = pm->pm_slb_tree_root;
301 
302 	/* Descend to the correct leaf or NULL pointer. */
303 	for (;;) {
304 		KASSERT(uad_baseok(ua),
305 		   ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level));
306 		idx = esid2idx(esid, ua->ua_level);
307 
308 		if (ua->ua_level == UAD_LEAF_LEVEL) {
309 			ua->u.slb_entries[idx].slbv = slbv;
310 			eieio();
311 			ua->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT)
312 			    | SLBE_VALID;
313 			setbit(&ua->ua_alloc, idx);
314 			slb = &ua->u.slb_entries[idx];
315 			break;
316 		}
317 
318 		next = ua->u.ua_child[idx];
319 		if (next == NULL) {
320 			slb = make_new_leaf(esid, slbv, ua);
321 			break;
322                 }
323 
324 		/*
325 		 * Check if the next item down has an okay ua_base.
326 		 * If not, we need to allocate an intermediate node.
327 		 */
328 		if (esid2base(esid, next->ua_level) != next->ua_base) {
329 			inter = make_intermediate(esid, ua);
330 			slb = make_new_leaf(esid, slbv, inter);
331 			break;
332 		}
333 
334 		ua = next;
335 	}
336 
337 	/*
338 	 * Someone probably wants this soon, and it may be a wired
339 	 * SLB mapping, so pre-spill this entry.
340 	 */
341 	eieio();
342 	slb_insert_user(pm, slb);
343 
344 	return (vsid);
345 }
346 
347 void
348 free_vsid(pmap_t pm, uint64_t esid, int large)
349 {
350 	struct slbtnode *ua;
351 	int idx;
352 
353 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
354 
355 	ua = pm->pm_slb_tree_root;
356 	/* Descend to the correct leaf. */
357 	for (;;) {
358 		KASSERT(uad_baseok(ua),
359 		   ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level));
360 
361 		idx = esid2idx(esid, ua->ua_level);
362 		if (ua->ua_level == UAD_LEAF_LEVEL) {
363 			ua->u.slb_entries[idx].slbv = 0;
364 			eieio();
365 			ua->u.slb_entries[idx].slbe = 0;
366 			clrbit(&ua->ua_alloc, idx);
367 			return;
368 		}
369 
370 		ua = ua->u.ua_child[idx];
371 		if (ua == NULL ||
372 		    esid2base(esid, ua->ua_level) != ua->ua_base) {
373 			/* Perhaps just return instead of assert? */
374 			KASSERT(0,
375 			    ("Asked to remove an entry that was never inserted!"));
376 			return;
377 		}
378 	}
379 }
380 
381 static void
382 free_slb_tree_node(struct slbtnode *ua)
383 {
384 	int idx;
385 
386 	for (idx = 0; idx < 16; idx++) {
387 		if (ua->ua_level != UAD_LEAF_LEVEL) {
388 			if (ua->u.ua_child[idx] != NULL)
389 				free_slb_tree_node(ua->u.ua_child[idx]);
390 		} else {
391 			if (ua->u.slb_entries[idx].slbv != 0)
392 				moea64_release_vsid(ua->u.slb_entries[idx].slbv
393 				    >> SLBV_VSID_SHIFT);
394 		}
395 	}
396 
397 	uma_zfree(slbt_zone, ua);
398 }
399 
400 void
401 slb_free_tree(pmap_t pm)
402 {
403 
404 	free_slb_tree_node(pm->pm_slb_tree_root);
405 }
406 
407 struct slbtnode *
408 slb_alloc_tree(void)
409 {
410 	struct slbtnode *root;
411 
412 	root = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
413 	root->ua_level = UAD_ROOT_LEVEL;
414 
415 	return (root);
416 }
417 
418 /* Lock entries mapping kernel text and stacks */
419 
420 void
421 slb_insert_kernel(uint64_t slbe, uint64_t slbv)
422 {
423 	struct slb *slbcache;
424 	int i;
425 
426 	/* We don't want to be preempted while modifying the kernel map */
427 	critical_enter();
428 
429 	slbcache = PCPU_GET(slb);
430 
431 	/* Check for an unused slot, abusing the user slot as a full flag */
432 	if (slbcache[USER_SLB_SLOT].slbe == 0) {
433 		for (i = 0; i < n_slbs; i++) {
434 			if (i == USER_SLB_SLOT)
435 				continue;
436 			if (!(slbcache[i].slbe & SLBE_VALID))
437 				goto fillkernslb;
438 		}
439 
440 		if (i == n_slbs)
441 			slbcache[USER_SLB_SLOT].slbe = 1;
442 	}
443 
444 	i = mftb() % n_slbs;
445 	if (i == USER_SLB_SLOT)
446 			i = (i+1) % n_slbs;
447 
448 fillkernslb:
449 	KASSERT(i != USER_SLB_SLOT,
450 	    ("Filling user SLB slot with a kernel mapping"));
451 	slbcache[i].slbv = slbv;
452 	slbcache[i].slbe = slbe | (uint64_t)i;
453 
454 	/* If it is for this CPU, put it in the SLB right away */
455 	if (pmap_bootstrapped) {
456 		/* slbie not required */
457 		__asm __volatile ("slbmte %0, %1" ::
458 		    "r"(slbcache[i].slbv), "r"(slbcache[i].slbe));
459 	}
460 
461 	critical_exit();
462 }
463 
464 void
465 slb_insert_user(pmap_t pm, struct slb *slb)
466 {
467 	int i;
468 
469 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
470 
471 	if (pm->pm_slb_len < n_slbs) {
472 		i = pm->pm_slb_len;
473 		pm->pm_slb_len++;
474 	} else {
475 		i = mftb() % n_slbs;
476 	}
477 
478 	/* Note that this replacement is atomic with respect to trap_subr */
479 	pm->pm_slb[i] = slb;
480 }
481 
482 static void *
483 slb_uma_real_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
484 {
485 	static vm_offset_t realmax = 0;
486 	void *va;
487 	vm_page_t m;
488 
489 	if (realmax == 0)
490 		realmax = platform_real_maxaddr();
491 
492 	*flags = UMA_SLAB_PRIV;
493 	m = vm_page_alloc_contig(NULL, 0,
494 	    malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED,
495 	    1, 0, realmax, PAGE_SIZE, PAGE_SIZE, VM_MEMATTR_DEFAULT);
496 	if (m == NULL)
497 		return (NULL);
498 
499 	va = (void *) VM_PAGE_TO_PHYS(m);
500 
501 	if (!hw_direct_map)
502 		pmap_kenter((vm_offset_t)va, VM_PAGE_TO_PHYS(m));
503 
504 	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
505 		bzero(va, PAGE_SIZE);
506 
507 	return (va);
508 }
509 
510 static void
511 slb_zone_init(void *dummy)
512 {
513 
514 	slbt_zone = uma_zcreate("SLB tree node", sizeof(struct slbtnode),
515 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
516 	slb_cache_zone = uma_zcreate("SLB cache",
517 	    (n_slbs + 1)*sizeof(struct slb *), NULL, NULL, NULL, NULL,
518 	    UMA_ALIGN_PTR, UMA_ZONE_VM);
519 
520 	if (platform_real_maxaddr() != VM_MAX_ADDRESS) {
521 		uma_zone_set_allocf(slb_cache_zone, slb_uma_real_alloc);
522 		uma_zone_set_allocf(slbt_zone, slb_uma_real_alloc);
523 	}
524 }
525 
526 struct slb **
527 slb_alloc_user_cache(void)
528 {
529 	return (uma_zalloc(slb_cache_zone, M_ZERO));
530 }
531 
532 void
533 slb_free_user_cache(struct slb **slb)
534 {
535 	uma_zfree(slb_cache_zone, slb);
536 }
537