xref: /freebsd/sys/powerpc/aim/slb.c (revision 85a6f8076fd9859cb8abd69ac4930acbf183a696)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2010 Nathan Whitehorn
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #include <sys/param.h>
32 #include <sys/kernel.h>
33 #include <sys/lock.h>
34 #include <sys/malloc.h>
35 #include <sys/mutex.h>
36 #include <sys/proc.h>
37 #include <sys/systm.h>
38 
39 #include <vm/vm.h>
40 #include <vm/pmap.h>
41 #include <vm/uma.h>
42 #include <vm/vm.h>
43 #include <vm/vm_map.h>
44 #include <vm/vm_page.h>
45 #include <vm/vm_pageout.h>
46 
47 #include <machine/md_var.h>
48 #include <machine/platform.h>
49 #include <machine/vmparam.h>
50 #include <machine/trap.h>
51 
52 #include "mmu_oea64.h"
53 
54 uintptr_t moea64_get_unique_vsid(void);
55 void moea64_release_vsid(uint64_t vsid);
56 static void slb_zone_init(void *);
57 
58 static uma_zone_t slbt_zone;
59 static uma_zone_t slb_cache_zone;
60 int n_slbs = 64;
61 
62 SYSINIT(slb_zone_init, SI_SUB_KMEM, SI_ORDER_ANY, slb_zone_init, NULL);
63 
64 struct slbtnode {
65 	uint16_t	ua_alloc;
66 	uint8_t		ua_level;
67 	/* Only 36 bits needed for full 64-bit address space. */
68 	uint64_t	ua_base;
69 	union {
70 		struct slbtnode	*ua_child[16];
71 		struct slb	slb_entries[16];
72 	} u;
73 };
74 
75 /*
76  * For a full 64-bit address space, there are 36 bits in play in an
77  * esid, so 8 levels, with the leaf being at level 0.
78  *
79  * |3333|3322|2222|2222|1111|1111|11  |    |    |  esid
80  * |5432|1098|7654|3210|9876|5432|1098|7654|3210|  bits
81  * +----+----+----+----+----+----+----+----+----+--------
82  * | 8  | 7  | 6  | 5  | 4  | 3  | 2  | 1  | 0  | level
83  */
84 #define UAD_ROOT_LEVEL  8
85 #define UAD_LEAF_LEVEL  0
86 
87 static inline int
88 esid2idx(uint64_t esid, int level)
89 {
90 	int shift;
91 
92 	shift = level * 4;
93 	return ((esid >> shift) & 0xF);
94 }
95 
96 /*
97  * The ua_base field should have 0 bits after the first 4*(level+1)
98  * bits; i.e. only
99  */
100 #define uad_baseok(ua)                          \
101 	(esid2base(ua->ua_base, ua->ua_level) == ua->ua_base)
102 
103 static inline uint64_t
104 esid2base(uint64_t esid, int level)
105 {
106 	uint64_t mask;
107 	int shift;
108 
109 	shift = (level + 1) * 4;
110 	mask = ~((1ULL << shift) - 1);
111 	return (esid & mask);
112 }
113 
114 /*
115  * Allocate a new leaf node for the specified esid/vmhandle from the
116  * parent node.
117  */
118 static struct slb *
119 make_new_leaf(uint64_t esid, uint64_t slbv, struct slbtnode *parent)
120 {
121 	struct slbtnode *child;
122 	struct slb *retval;
123 	int idx;
124 
125 	idx = esid2idx(esid, parent->ua_level);
126 	KASSERT(parent->u.ua_child[idx] == NULL, ("Child already exists!"));
127 
128 	/* unlock and M_WAITOK and loop? */
129 	child = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
130 	KASSERT(child != NULL, ("unhandled NULL case"));
131 
132 	child->ua_level = UAD_LEAF_LEVEL;
133 	child->ua_base = esid2base(esid, child->ua_level);
134 	idx = esid2idx(esid, child->ua_level);
135 	child->u.slb_entries[idx].slbv = slbv;
136 	child->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
137 	setbit(&child->ua_alloc, idx);
138 
139 	retval = &child->u.slb_entries[idx];
140 
141 	/*
142 	 * The above stores must be visible before the next one, so
143 	 * that a lockless searcher always sees a valid path through
144 	 * the tree.
145 	 */
146 	powerpc_lwsync();
147 
148 	idx = esid2idx(esid, parent->ua_level);
149 	parent->u.ua_child[idx] = child;
150 	setbit(&parent->ua_alloc, idx);
151 
152 	return (retval);
153 }
154 
155 /*
156  * Allocate a new intermediate node to fit between the parent and
157  * esid.
158  */
159 static struct slbtnode*
160 make_intermediate(uint64_t esid, struct slbtnode *parent)
161 {
162 	struct slbtnode *child, *inter;
163 	int idx, level;
164 
165 	idx = esid2idx(esid, parent->ua_level);
166 	child = parent->u.ua_child[idx];
167 	KASSERT(esid2base(esid, child->ua_level) != child->ua_base,
168 	    ("No need for an intermediate node?"));
169 
170 	/*
171 	 * Find the level where the existing child and our new esid
172 	 * meet.  It must be lower than parent->ua_level or we would
173 	 * have chosen a different index in parent.
174 	 */
175 	level = child->ua_level + 1;
176 	while (esid2base(esid, level) !=
177 	    esid2base(child->ua_base, level))
178 		level++;
179 	KASSERT(level < parent->ua_level,
180 	    ("Found splitting level %d for %09jx and %09jx, "
181 	    "but it's the same as %p's",
182 	    level, esid, child->ua_base, parent));
183 
184 	/* unlock and M_WAITOK and loop? */
185 	inter = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
186 	KASSERT(inter != NULL, ("unhandled NULL case"));
187 
188 	/* Set up intermediate node to point to child ... */
189 	inter->ua_level = level;
190 	inter->ua_base = esid2base(esid, inter->ua_level);
191 	idx = esid2idx(child->ua_base, inter->ua_level);
192 	inter->u.ua_child[idx] = child;
193 	setbit(&inter->ua_alloc, idx);
194 	powerpc_lwsync();
195 
196 	/* Set up parent to point to intermediate node ... */
197 	idx = esid2idx(inter->ua_base, parent->ua_level);
198 	parent->u.ua_child[idx] = inter;
199 	setbit(&parent->ua_alloc, idx);
200 
201 	return (inter);
202 }
203 
204 uint64_t
205 kernel_va_to_slbv(vm_offset_t va)
206 {
207 	uint64_t slbv;
208 
209 	/* Set kernel VSID to deterministic value */
210 	slbv = (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT)) << SLBV_VSID_SHIFT;
211 
212 	/*
213 	 * Figure out if this is a large-page mapping.
214 	 */
215 	if (hw_direct_map && va > DMAP_BASE_ADDRESS && va < DMAP_MAX_ADDRESS) {
216 		/*
217 		 * XXX: If we have set up a direct map, assumes
218 		 * all physical memory is mapped with large pages.
219 		 */
220 
221 		if (mem_valid(DMAP_TO_PHYS(va), 0) == 0)
222 			slbv |= SLBV_L;
223 	} else if (moea64_large_page_size != 0 &&
224 	    va >= (vm_offset_t)vm_page_array &&
225 	    va <= (uintptr_t)(&vm_page_array[vm_page_array_size]))
226 		slbv |= SLBV_L;
227 
228 	return (slbv);
229 }
230 
231 struct slb *
232 user_va_to_slb_entry(pmap_t pm, vm_offset_t va)
233 {
234 	uint64_t esid = va >> ADDR_SR_SHFT;
235 	struct slbtnode *ua;
236 	int idx;
237 
238 	ua = pm->pm_slb_tree_root;
239 
240 	for (;;) {
241 		KASSERT(uad_baseok(ua), ("uad base %016jx level %d bad!",
242 		    ua->ua_base, ua->ua_level));
243 		idx = esid2idx(esid, ua->ua_level);
244 
245 		/*
246 		 * This code is specific to ppc64 where a load is
247 		 * atomic, so no need for atomic_load macro.
248 		 */
249 		if (ua->ua_level == UAD_LEAF_LEVEL)
250 			return ((ua->u.slb_entries[idx].slbe & SLBE_VALID) ?
251 			    &ua->u.slb_entries[idx] : NULL);
252 
253 		/*
254 		 * The following accesses are implicitly ordered under the POWER
255 		 * ISA by load dependencies (the store ordering is provided by
256 		 * the powerpc_lwsync() calls elsewhere) and so are run without
257 		 * barriers.
258 		 */
259 		ua = ua->u.ua_child[idx];
260 		if (ua == NULL ||
261 		    esid2base(esid, ua->ua_level) != ua->ua_base)
262 			return (NULL);
263 	}
264 
265 	return (NULL);
266 }
267 
268 uint64_t
269 va_to_vsid(pmap_t pm, vm_offset_t va)
270 {
271 	struct slb *entry;
272 
273 	/* Shortcut kernel case */
274 	if (pm == kernel_pmap)
275 		return (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT));
276 
277 	/*
278 	 * If there is no vsid for this VA, we need to add a new entry
279 	 * to the PMAP's segment table.
280 	 */
281 
282 	entry = user_va_to_slb_entry(pm, va);
283 
284 	if (entry == NULL)
285 		return (allocate_user_vsid(pm,
286 		    (uintptr_t)va >> ADDR_SR_SHFT, 0));
287 
288 	return ((entry->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT);
289 }
290 
291 uint64_t
292 allocate_user_vsid(pmap_t pm, uint64_t esid, int large)
293 {
294 	uint64_t vsid, slbv;
295 	struct slbtnode *ua, *next, *inter;
296 	struct slb *slb;
297 	int idx;
298 
299 	KASSERT(pm != kernel_pmap, ("Attempting to allocate a kernel VSID"));
300 
301 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
302 	vsid = moea64_get_unique_vsid();
303 
304 	slbv = vsid << SLBV_VSID_SHIFT;
305 	if (large)
306 		slbv |= SLBV_L;
307 
308 	ua = pm->pm_slb_tree_root;
309 
310 	/* Descend to the correct leaf or NULL pointer. */
311 	for (;;) {
312 		KASSERT(uad_baseok(ua),
313 		   ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level));
314 		idx = esid2idx(esid, ua->ua_level);
315 
316 		if (ua->ua_level == UAD_LEAF_LEVEL) {
317 			ua->u.slb_entries[idx].slbv = slbv;
318 			eieio();
319 			ua->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT)
320 			    | SLBE_VALID;
321 			setbit(&ua->ua_alloc, idx);
322 			slb = &ua->u.slb_entries[idx];
323 			break;
324 		}
325 
326 		next = ua->u.ua_child[idx];
327 		if (next == NULL) {
328 			slb = make_new_leaf(esid, slbv, ua);
329 			break;
330                 }
331 
332 		/*
333 		 * Check if the next item down has an okay ua_base.
334 		 * If not, we need to allocate an intermediate node.
335 		 */
336 		if (esid2base(esid, next->ua_level) != next->ua_base) {
337 			inter = make_intermediate(esid, ua);
338 			slb = make_new_leaf(esid, slbv, inter);
339 			break;
340 		}
341 
342 		ua = next;
343 	}
344 
345 	/*
346 	 * Someone probably wants this soon, and it may be a wired
347 	 * SLB mapping, so pre-spill this entry.
348 	 */
349 	eieio();
350 	slb_insert_user(pm, slb);
351 
352 	return (vsid);
353 }
354 
355 void
356 free_vsid(pmap_t pm, uint64_t esid, int large)
357 {
358 	struct slbtnode *ua;
359 	int idx;
360 
361 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
362 
363 	ua = pm->pm_slb_tree_root;
364 	/* Descend to the correct leaf. */
365 	for (;;) {
366 		KASSERT(uad_baseok(ua),
367 		   ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level));
368 
369 		idx = esid2idx(esid, ua->ua_level);
370 		if (ua->ua_level == UAD_LEAF_LEVEL) {
371 			ua->u.slb_entries[idx].slbv = 0;
372 			eieio();
373 			ua->u.slb_entries[idx].slbe = 0;
374 			clrbit(&ua->ua_alloc, idx);
375 			return;
376 		}
377 
378 		ua = ua->u.ua_child[idx];
379 		if (ua == NULL ||
380 		    esid2base(esid, ua->ua_level) != ua->ua_base) {
381 			/* Perhaps just return instead of assert? */
382 			KASSERT(0,
383 			    ("Asked to remove an entry that was never inserted!"));
384 			return;
385 		}
386 	}
387 }
388 
389 static void
390 free_slb_tree_node(struct slbtnode *ua)
391 {
392 	int idx;
393 
394 	for (idx = 0; idx < 16; idx++) {
395 		if (ua->ua_level != UAD_LEAF_LEVEL) {
396 			if (ua->u.ua_child[idx] != NULL)
397 				free_slb_tree_node(ua->u.ua_child[idx]);
398 		} else {
399 			if (ua->u.slb_entries[idx].slbv != 0)
400 				moea64_release_vsid(ua->u.slb_entries[idx].slbv
401 				    >> SLBV_VSID_SHIFT);
402 		}
403 	}
404 
405 	uma_zfree(slbt_zone, ua);
406 }
407 
408 void
409 slb_free_tree(pmap_t pm)
410 {
411 
412 	free_slb_tree_node(pm->pm_slb_tree_root);
413 }
414 
415 struct slbtnode *
416 slb_alloc_tree(void)
417 {
418 	struct slbtnode *root;
419 
420 	root = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
421 	KASSERT(root != NULL, ("unhandled NULL case"));
422 	root->ua_level = UAD_ROOT_LEVEL;
423 
424 	return (root);
425 }
426 
427 /* Lock entries mapping kernel text and stacks */
428 
429 void
430 slb_insert_kernel(uint64_t slbe, uint64_t slbv)
431 {
432 	struct slb *slbcache;
433 	int i;
434 
435 	/* We don't want to be preempted while modifying the kernel map */
436 	critical_enter();
437 
438 	slbcache = PCPU_GET(aim.slb);
439 
440 	/* Check for an unused slot, abusing the user slot as a full flag */
441 	if (slbcache[USER_SLB_SLOT].slbe == 0) {
442 		for (i = 0; i < n_slbs; i++) {
443 			if (i == USER_SLB_SLOT)
444 				continue;
445 			if (!(slbcache[i].slbe & SLBE_VALID))
446 				goto fillkernslb;
447 		}
448 
449 		if (i == n_slbs)
450 			slbcache[USER_SLB_SLOT].slbe = 1;
451 	}
452 
453 	i = mftb() % n_slbs;
454 	if (i == USER_SLB_SLOT)
455 			i = (i+1) % n_slbs;
456 
457 fillkernslb:
458 	KASSERT(i != USER_SLB_SLOT,
459 	    ("Filling user SLB slot with a kernel mapping"));
460 	slbcache[i].slbv = slbv;
461 	slbcache[i].slbe = slbe | (uint64_t)i;
462 
463 	/* If it is for this CPU, put it in the SLB right away */
464 	if (pmap_bootstrapped) {
465 		/* slbie not required */
466 		__asm __volatile ("slbmte %0, %1" ::
467 		    "r"(slbcache[i].slbv), "r"(slbcache[i].slbe));
468 	}
469 
470 	critical_exit();
471 }
472 
473 void
474 slb_insert_user(pmap_t pm, struct slb *slb)
475 {
476 	int i;
477 
478 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
479 
480 	if (pm->pm_slb_len < n_slbs) {
481 		i = pm->pm_slb_len;
482 		pm->pm_slb_len++;
483 	} else {
484 		i = mftb() % n_slbs;
485 	}
486 
487 	/* Note that this replacement is atomic with respect to trap_subr */
488 	pm->pm_slb[i] = slb;
489 }
490 
491 static void *
492 slb_uma_real_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
493     u_int8_t *flags, int wait)
494 {
495 	static vm_offset_t realmax = 0;
496 	void *va;
497 	vm_page_t m;
498 
499 	if (realmax == 0)
500 		realmax = platform_real_maxaddr();
501 
502 	*flags = UMA_SLAB_PRIV;
503 	m = vm_page_alloc_contig_domain(NULL, 0, domain,
504 	    malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED,
505 	    1, 0, realmax, PAGE_SIZE, PAGE_SIZE, VM_MEMATTR_DEFAULT);
506 	if (m == NULL)
507 		return (NULL);
508 
509 	if (hw_direct_map)
510 		va = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
511 	else {
512 		va = (void *)(VM_PAGE_TO_PHYS(m) | DMAP_BASE_ADDRESS);
513 		pmap_kenter((vm_offset_t)va, VM_PAGE_TO_PHYS(m));
514 	}
515 
516 	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
517 		bzero(va, PAGE_SIZE);
518 
519 	return (va);
520 }
521 
522 static void
523 slb_zone_init(void *dummy)
524 {
525 	slbt_zone = uma_zcreate("SLB tree node", sizeof(struct slbtnode),
526 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
527 	    UMA_ZONE_CONTIG | UMA_ZONE_VM);
528 	slb_cache_zone = uma_zcreate("SLB cache",
529 	    (n_slbs + 1)*sizeof(struct slb *), NULL, NULL, NULL, NULL,
530 	    UMA_ALIGN_PTR, UMA_ZONE_CONTIG | UMA_ZONE_VM);
531 
532 	if (platform_real_maxaddr() != VM_MAX_ADDRESS) {
533 		uma_zone_set_allocf(slb_cache_zone, slb_uma_real_alloc);
534 		uma_zone_set_allocf(slbt_zone, slb_uma_real_alloc);
535 	}
536 }
537 
538 struct slb **
539 slb_alloc_user_cache(void)
540 {
541 	return (uma_zalloc(slb_cache_zone, M_ZERO));
542 }
543 
544 void
545 slb_free_user_cache(struct slb **slb)
546 {
547 	uma_zfree(slb_cache_zone, slb);
548 }
549 
550 /* Handle kernel SLB faults -- runs in real mode, all seat belts off */
551 void
552 handle_kernel_slb_spill(int type, register_t dar, register_t srr0)
553 {
554 	struct slb *slbcache;
555 	uint64_t slbe, slbv;
556 	uint64_t esid, addr;
557 	int i;
558 
559 	addr = (type == EXC_ISE) ? srr0 : dar;
560 	slbcache = PCPU_GET(aim.slb);
561 	esid = (uintptr_t)addr >> ADDR_SR_SHFT;
562 	slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
563 
564 	/* See if the hardware flushed this somehow (can happen in LPARs) */
565 	for (i = 0; i < n_slbs; i++)
566 		if (slbcache[i].slbe == (slbe | (uint64_t)i))
567 			return;
568 
569 	/* Not in the map, needs to actually be added */
570 	slbv = kernel_va_to_slbv(addr);
571 	if (slbcache[USER_SLB_SLOT].slbe == 0) {
572 		for (i = 0; i < n_slbs; i++) {
573 			if (i == USER_SLB_SLOT)
574 				continue;
575 			if (!(slbcache[i].slbe & SLBE_VALID))
576 				goto fillkernslb;
577 		}
578 
579 		if (i == n_slbs)
580 			slbcache[USER_SLB_SLOT].slbe = 1;
581 	}
582 
583 	/* Sacrifice a random SLB entry that is not the user entry */
584 	i = mftb() % n_slbs;
585 	if (i == USER_SLB_SLOT)
586 		i = (i+1) % n_slbs;
587 
588 fillkernslb:
589 	/* Write new entry */
590 	slbcache[i].slbv = slbv;
591 	slbcache[i].slbe = slbe | (uint64_t)i;
592 
593 	/* Trap handler will restore from cache on exit */
594 }
595 
596 int
597 handle_user_slb_spill(pmap_t pm, vm_offset_t addr)
598 {
599 	struct slb *user_entry;
600 	uint64_t esid;
601 	int i;
602 
603 	if (pm->pm_slb == NULL)
604 		return (-1);
605 
606 	esid = (uintptr_t)addr >> ADDR_SR_SHFT;
607 
608 	PMAP_LOCK(pm);
609 	user_entry = user_va_to_slb_entry(pm, addr);
610 
611 	if (user_entry == NULL) {
612 		/* allocate_vsid auto-spills it */
613 		(void)allocate_user_vsid(pm, esid, 0);
614 	} else {
615 		/*
616 		 * Check that another CPU has not already mapped this.
617 		 * XXX: Per-thread SLB caches would be better.
618 		 */
619 		for (i = 0; i < pm->pm_slb_len; i++)
620 			if (pm->pm_slb[i] == user_entry)
621 				break;
622 
623 		if (i == pm->pm_slb_len)
624 			slb_insert_user(pm, user_entry);
625 	}
626 	PMAP_UNLOCK(pm);
627 
628 	return (0);
629 }
630