xref: /freebsd/sys/powerpc/aim/slb.c (revision 49b49cda41feabe3439f7318e8bf40e3896c7bf4)
1 /*-
2  * Copyright (c) 2010 Nathan Whitehorn
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #include <sys/param.h>
30 #include <sys/kernel.h>
31 #include <sys/lock.h>
32 #include <sys/malloc.h>
33 #include <sys/mutex.h>
34 #include <sys/proc.h>
35 #include <sys/systm.h>
36 
37 #include <vm/vm.h>
38 #include <vm/pmap.h>
39 #include <vm/uma.h>
40 #include <vm/vm.h>
41 #include <vm/vm_map.h>
42 #include <vm/vm_page.h>
43 #include <vm/vm_pageout.h>
44 
45 #include <machine/md_var.h>
46 #include <machine/platform.h>
47 #include <machine/pmap.h>
48 #include <machine/vmparam.h>
49 
50 uintptr_t moea64_get_unique_vsid(void);
51 void moea64_release_vsid(uint64_t vsid);
52 static void slb_zone_init(void *);
53 
54 static uma_zone_t slbt_zone;
55 static uma_zone_t slb_cache_zone;
56 int n_slbs = 64;
57 
58 SYSINIT(slb_zone_init, SI_SUB_KMEM, SI_ORDER_ANY, slb_zone_init, NULL);
59 
60 struct slbtnode {
61 	uint16_t	ua_alloc;
62 	uint8_t		ua_level;
63 	/* Only 36 bits needed for full 64-bit address space. */
64 	uint64_t	ua_base;
65 	union {
66 		struct slbtnode	*ua_child[16];
67 		struct slb	slb_entries[16];
68 	} u;
69 };
70 
71 /*
72  * For a full 64-bit address space, there are 36 bits in play in an
73  * esid, so 8 levels, with the leaf being at level 0.
74  *
75  * |3333|3322|2222|2222|1111|1111|11  |    |    |  esid
76  * |5432|1098|7654|3210|9876|5432|1098|7654|3210|  bits
77  * +----+----+----+----+----+----+----+----+----+--------
78  * | 8  | 7  | 6  | 5  | 4  | 3  | 2  | 1  | 0  | level
79  */
80 #define UAD_ROOT_LEVEL  8
81 #define UAD_LEAF_LEVEL  0
82 
83 static inline int
84 esid2idx(uint64_t esid, int level)
85 {
86 	int shift;
87 
88 	shift = level * 4;
89 	return ((esid >> shift) & 0xF);
90 }
91 
92 /*
93  * The ua_base field should have 0 bits after the first 4*(level+1)
94  * bits; i.e. only
95  */
96 #define uad_baseok(ua)                          \
97 	(esid2base(ua->ua_base, ua->ua_level) == ua->ua_base)
98 
99 
100 static inline uint64_t
101 esid2base(uint64_t esid, int level)
102 {
103 	uint64_t mask;
104 	int shift;
105 
106 	shift = (level + 1) * 4;
107 	mask = ~((1ULL << shift) - 1);
108 	return (esid & mask);
109 }
110 
111 /*
112  * Allocate a new leaf node for the specified esid/vmhandle from the
113  * parent node.
114  */
115 static struct slb *
116 make_new_leaf(uint64_t esid, uint64_t slbv, struct slbtnode *parent)
117 {
118 	struct slbtnode *child;
119 	struct slb *retval;
120 	int idx;
121 
122 	idx = esid2idx(esid, parent->ua_level);
123 	KASSERT(parent->u.ua_child[idx] == NULL, ("Child already exists!"));
124 
125 	/* unlock and M_WAITOK and loop? */
126 	child = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
127 	KASSERT(child != NULL, ("unhandled NULL case"));
128 
129 	child->ua_level = UAD_LEAF_LEVEL;
130 	child->ua_base = esid2base(esid, child->ua_level);
131 	idx = esid2idx(esid, child->ua_level);
132 	child->u.slb_entries[idx].slbv = slbv;
133 	child->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
134 	setbit(&child->ua_alloc, idx);
135 
136 	retval = &child->u.slb_entries[idx];
137 
138 	/*
139 	 * The above stores must be visible before the next one, so
140 	 * that a lockless searcher always sees a valid path through
141 	 * the tree.
142 	 */
143 	powerpc_lwsync();
144 
145 	idx = esid2idx(esid, parent->ua_level);
146 	parent->u.ua_child[idx] = child;
147 	setbit(&parent->ua_alloc, idx);
148 
149 	return (retval);
150 }
151 
152 /*
153  * Allocate a new intermediate node to fit between the parent and
154  * esid.
155  */
156 static struct slbtnode*
157 make_intermediate(uint64_t esid, struct slbtnode *parent)
158 {
159 	struct slbtnode *child, *inter;
160 	int idx, level;
161 
162 	idx = esid2idx(esid, parent->ua_level);
163 	child = parent->u.ua_child[idx];
164 	KASSERT(esid2base(esid, child->ua_level) != child->ua_base,
165 	    ("No need for an intermediate node?"));
166 
167 	/*
168 	 * Find the level where the existing child and our new esid
169 	 * meet.  It must be lower than parent->ua_level or we would
170 	 * have chosen a different index in parent.
171 	 */
172 	level = child->ua_level + 1;
173 	while (esid2base(esid, level) !=
174 	    esid2base(child->ua_base, level))
175 		level++;
176 	KASSERT(level < parent->ua_level,
177 	    ("Found splitting level %d for %09jx and %09jx, "
178 	    "but it's the same as %p's",
179 	    level, esid, child->ua_base, parent));
180 
181 	/* unlock and M_WAITOK and loop? */
182 	inter = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
183 	KASSERT(inter != NULL, ("unhandled NULL case"));
184 
185 	/* Set up intermediate node to point to child ... */
186 	inter->ua_level = level;
187 	inter->ua_base = esid2base(esid, inter->ua_level);
188 	idx = esid2idx(child->ua_base, inter->ua_level);
189 	inter->u.ua_child[idx] = child;
190 	setbit(&inter->ua_alloc, idx);
191 	powerpc_lwsync();
192 
193 	/* Set up parent to point to intermediate node ... */
194 	idx = esid2idx(inter->ua_base, parent->ua_level);
195 	parent->u.ua_child[idx] = inter;
196 	setbit(&parent->ua_alloc, idx);
197 
198 	return (inter);
199 }
200 
201 uint64_t
202 kernel_va_to_slbv(vm_offset_t va)
203 {
204 	uint64_t slbv;
205 
206 	/* Set kernel VSID to deterministic value */
207 	slbv = (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT)) << SLBV_VSID_SHIFT;
208 
209 	/* Figure out if this is a large-page mapping */
210 	if (hw_direct_map && va < VM_MIN_KERNEL_ADDRESS) {
211 		/*
212 		 * XXX: If we have set up a direct map, assumes
213 		 * all physical memory is mapped with large pages.
214 		 */
215 		if (mem_valid(va, 0) == 0)
216 			slbv |= SLBV_L;
217 	}
218 
219 	return (slbv);
220 }
221 
222 struct slb *
223 user_va_to_slb_entry(pmap_t pm, vm_offset_t va)
224 {
225 	uint64_t esid = va >> ADDR_SR_SHFT;
226 	struct slbtnode *ua;
227 	int idx;
228 
229 	ua = pm->pm_slb_tree_root;
230 
231 	for (;;) {
232 		KASSERT(uad_baseok(ua), ("uad base %016jx level %d bad!",
233 		    ua->ua_base, ua->ua_level));
234 		idx = esid2idx(esid, ua->ua_level);
235 
236 		/*
237 		 * This code is specific to ppc64 where a load is
238 		 * atomic, so no need for atomic_load macro.
239 		 */
240 		if (ua->ua_level == UAD_LEAF_LEVEL)
241 			return ((ua->u.slb_entries[idx].slbe & SLBE_VALID) ?
242 			    &ua->u.slb_entries[idx] : NULL);
243 
244 		/*
245 		 * The following accesses are implicitly ordered under the POWER
246 		 * ISA by load dependencies (the store ordering is provided by
247 		 * the powerpc_lwsync() calls elsewhere) and so are run without
248 		 * barriers.
249 		 */
250 		ua = ua->u.ua_child[idx];
251 		if (ua == NULL ||
252 		    esid2base(esid, ua->ua_level) != ua->ua_base)
253 			return (NULL);
254 	}
255 
256 	return (NULL);
257 }
258 
259 uint64_t
260 va_to_vsid(pmap_t pm, vm_offset_t va)
261 {
262 	struct slb *entry;
263 
264 	/* Shortcut kernel case */
265 	if (pm == kernel_pmap)
266 		return (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT));
267 
268 	/*
269 	 * If there is no vsid for this VA, we need to add a new entry
270 	 * to the PMAP's segment table.
271 	 */
272 
273 	entry = user_va_to_slb_entry(pm, va);
274 
275 	if (entry == NULL)
276 		return (allocate_user_vsid(pm,
277 		    (uintptr_t)va >> ADDR_SR_SHFT, 0));
278 
279 	return ((entry->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT);
280 }
281 
282 uint64_t
283 allocate_user_vsid(pmap_t pm, uint64_t esid, int large)
284 {
285 	uint64_t vsid, slbv;
286 	struct slbtnode *ua, *next, *inter;
287 	struct slb *slb;
288 	int idx;
289 
290 	KASSERT(pm != kernel_pmap, ("Attempting to allocate a kernel VSID"));
291 
292 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
293 	vsid = moea64_get_unique_vsid();
294 
295 	slbv = vsid << SLBV_VSID_SHIFT;
296 	if (large)
297 		slbv |= SLBV_L;
298 
299 	ua = pm->pm_slb_tree_root;
300 
301 	/* Descend to the correct leaf or NULL pointer. */
302 	for (;;) {
303 		KASSERT(uad_baseok(ua),
304 		   ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level));
305 		idx = esid2idx(esid, ua->ua_level);
306 
307 		if (ua->ua_level == UAD_LEAF_LEVEL) {
308 			ua->u.slb_entries[idx].slbv = slbv;
309 			eieio();
310 			ua->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT)
311 			    | SLBE_VALID;
312 			setbit(&ua->ua_alloc, idx);
313 			slb = &ua->u.slb_entries[idx];
314 			break;
315 		}
316 
317 		next = ua->u.ua_child[idx];
318 		if (next == NULL) {
319 			slb = make_new_leaf(esid, slbv, ua);
320 			break;
321                 }
322 
323 		/*
324 		 * Check if the next item down has an okay ua_base.
325 		 * If not, we need to allocate an intermediate node.
326 		 */
327 		if (esid2base(esid, next->ua_level) != next->ua_base) {
328 			inter = make_intermediate(esid, ua);
329 			slb = make_new_leaf(esid, slbv, inter);
330 			break;
331 		}
332 
333 		ua = next;
334 	}
335 
336 	/*
337 	 * Someone probably wants this soon, and it may be a wired
338 	 * SLB mapping, so pre-spill this entry.
339 	 */
340 	eieio();
341 	slb_insert_user(pm, slb);
342 
343 	return (vsid);
344 }
345 
346 void
347 free_vsid(pmap_t pm, uint64_t esid, int large)
348 {
349 	struct slbtnode *ua;
350 	int idx;
351 
352 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
353 
354 	ua = pm->pm_slb_tree_root;
355 	/* Descend to the correct leaf. */
356 	for (;;) {
357 		KASSERT(uad_baseok(ua),
358 		   ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level));
359 
360 		idx = esid2idx(esid, ua->ua_level);
361 		if (ua->ua_level == UAD_LEAF_LEVEL) {
362 			ua->u.slb_entries[idx].slbv = 0;
363 			eieio();
364 			ua->u.slb_entries[idx].slbe = 0;
365 			clrbit(&ua->ua_alloc, idx);
366 			return;
367 		}
368 
369 		ua = ua->u.ua_child[idx];
370 		if (ua == NULL ||
371 		    esid2base(esid, ua->ua_level) != ua->ua_base) {
372 			/* Perhaps just return instead of assert? */
373 			KASSERT(0,
374 			    ("Asked to remove an entry that was never inserted!"));
375 			return;
376 		}
377 	}
378 }
379 
380 static void
381 free_slb_tree_node(struct slbtnode *ua)
382 {
383 	int idx;
384 
385 	for (idx = 0; idx < 16; idx++) {
386 		if (ua->ua_level != UAD_LEAF_LEVEL) {
387 			if (ua->u.ua_child[idx] != NULL)
388 				free_slb_tree_node(ua->u.ua_child[idx]);
389 		} else {
390 			if (ua->u.slb_entries[idx].slbv != 0)
391 				moea64_release_vsid(ua->u.slb_entries[idx].slbv
392 				    >> SLBV_VSID_SHIFT);
393 		}
394 	}
395 
396 	uma_zfree(slbt_zone, ua);
397 }
398 
399 void
400 slb_free_tree(pmap_t pm)
401 {
402 
403 	free_slb_tree_node(pm->pm_slb_tree_root);
404 }
405 
406 struct slbtnode *
407 slb_alloc_tree(void)
408 {
409 	struct slbtnode *root;
410 
411 	root = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
412 	root->ua_level = UAD_ROOT_LEVEL;
413 
414 	return (root);
415 }
416 
417 /* Lock entries mapping kernel text and stacks */
418 
419 void
420 slb_insert_kernel(uint64_t slbe, uint64_t slbv)
421 {
422 	struct slb *slbcache;
423 	int i;
424 
425 	/* We don't want to be preempted while modifying the kernel map */
426 	critical_enter();
427 
428 	slbcache = PCPU_GET(slb);
429 
430 	/* Check for an unused slot, abusing the user slot as a full flag */
431 	if (slbcache[USER_SLB_SLOT].slbe == 0) {
432 		for (i = 0; i < n_slbs; i++) {
433 			if (i == USER_SLB_SLOT)
434 				continue;
435 			if (!(slbcache[i].slbe & SLBE_VALID))
436 				goto fillkernslb;
437 		}
438 
439 		if (i == n_slbs)
440 			slbcache[USER_SLB_SLOT].slbe = 1;
441 	}
442 
443 	i = mftb() % n_slbs;
444 	if (i == USER_SLB_SLOT)
445 			i = (i+1) % n_slbs;
446 
447 fillkernslb:
448 	KASSERT(i != USER_SLB_SLOT,
449 	    ("Filling user SLB slot with a kernel mapping"));
450 	slbcache[i].slbv = slbv;
451 	slbcache[i].slbe = slbe | (uint64_t)i;
452 
453 	/* If it is for this CPU, put it in the SLB right away */
454 	if (pmap_bootstrapped) {
455 		/* slbie not required */
456 		__asm __volatile ("slbmte %0, %1" ::
457 		    "r"(slbcache[i].slbv), "r"(slbcache[i].slbe));
458 	}
459 
460 	critical_exit();
461 }
462 
463 void
464 slb_insert_user(pmap_t pm, struct slb *slb)
465 {
466 	int i;
467 
468 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
469 
470 	if (pm->pm_slb_len < n_slbs) {
471 		i = pm->pm_slb_len;
472 		pm->pm_slb_len++;
473 	} else {
474 		i = mftb() % n_slbs;
475 	}
476 
477 	/* Note that this replacement is atomic with respect to trap_subr */
478 	pm->pm_slb[i] = slb;
479 }
480 
481 static void *
482 slb_uma_real_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
483 {
484 	static vm_offset_t realmax = 0;
485 	void *va;
486 	vm_page_t m;
487 	int pflags;
488 
489 	if (realmax == 0)
490 		realmax = platform_real_maxaddr();
491 
492 	*flags = UMA_SLAB_PRIV;
493 	pflags = malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
494 
495 	for (;;) {
496 		m = vm_page_alloc_contig(NULL, 0, pflags, 1, 0, realmax,
497 		    PAGE_SIZE, PAGE_SIZE, VM_MEMATTR_DEFAULT);
498 		if (m == NULL) {
499 			if (wait & M_NOWAIT)
500 				return (NULL);
501 			VM_WAIT;
502 		} else
503                         break;
504         }
505 
506 	va = (void *) VM_PAGE_TO_PHYS(m);
507 
508 	if (!hw_direct_map)
509 		pmap_kenter((vm_offset_t)va, VM_PAGE_TO_PHYS(m));
510 
511 	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
512 		bzero(va, PAGE_SIZE);
513 
514 	return (va);
515 }
516 
517 static void
518 slb_zone_init(void *dummy)
519 {
520 
521 	slbt_zone = uma_zcreate("SLB tree node", sizeof(struct slbtnode),
522 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
523 	slb_cache_zone = uma_zcreate("SLB cache",
524 	    (n_slbs + 1)*sizeof(struct slb *), NULL, NULL, NULL, NULL,
525 	    UMA_ALIGN_PTR, UMA_ZONE_VM);
526 
527 	if (platform_real_maxaddr() != VM_MAX_ADDRESS) {
528 		uma_zone_set_allocf(slb_cache_zone, slb_uma_real_alloc);
529 		uma_zone_set_allocf(slbt_zone, slb_uma_real_alloc);
530 	}
531 }
532 
533 struct slb **
534 slb_alloc_user_cache(void)
535 {
536 	return (uma_zalloc(slb_cache_zone, M_ZERO));
537 }
538 
539 void
540 slb_free_user_cache(struct slb **slb)
541 {
542 	uma_zfree(slb_cache_zone, slb);
543 }
544