xref: /freebsd/sys/powerpc/aim/slb.c (revision 95ee2897e98f5d444f26ed2334cc7c439f9c16c6)
1c3e289e1SNathan Whitehorn /*-
2*4d846d26SWarner Losh  * SPDX-License-Identifier: BSD-2-Clause
371e3c308SPedro F. Giffuni  *
4c3e289e1SNathan Whitehorn  * Copyright (c) 2010 Nathan Whitehorn
5c3e289e1SNathan Whitehorn  * All rights reserved.
6c3e289e1SNathan Whitehorn  *
7c3e289e1SNathan Whitehorn  * Redistribution and use in source and binary forms, with or without
8c3e289e1SNathan Whitehorn  * modification, are permitted provided that the following conditions
9c3e289e1SNathan Whitehorn  * are met:
10c3e289e1SNathan Whitehorn  *
11c3e289e1SNathan Whitehorn  * 1. Redistributions of source code must retain the above copyright
12c3e289e1SNathan Whitehorn  *    notice, this list of conditions and the following disclaimer.
13c3e289e1SNathan Whitehorn  * 2. Redistributions in binary form must reproduce the above copyright
14c3e289e1SNathan Whitehorn  *    notice, this list of conditions and the following disclaimer in the
15c3e289e1SNathan Whitehorn  *    documentation and/or other materials provided with the distribution.
16c3e289e1SNathan Whitehorn  *
17c3e289e1SNathan Whitehorn  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18c3e289e1SNathan Whitehorn  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19c3e289e1SNathan Whitehorn  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20c3e289e1SNathan Whitehorn  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21c3e289e1SNathan Whitehorn  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22c3e289e1SNathan Whitehorn  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23c3e289e1SNathan Whitehorn  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24c3e289e1SNathan Whitehorn  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25c3e289e1SNathan Whitehorn  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26c3e289e1SNathan Whitehorn  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27c3e289e1SNathan Whitehorn  */
28c3e289e1SNathan Whitehorn 
29c3e289e1SNathan Whitehorn #include <sys/param.h>
30c3e289e1SNathan Whitehorn #include <sys/kernel.h>
31c3e289e1SNathan Whitehorn #include <sys/lock.h>
32b32ecf44SKonstantin Belousov #include <sys/malloc.h>
33c3e289e1SNathan Whitehorn #include <sys/mutex.h>
34c3e289e1SNathan Whitehorn #include <sys/proc.h>
35c3e289e1SNathan Whitehorn #include <sys/systm.h>
36c3e289e1SNathan Whitehorn 
37c3e289e1SNathan Whitehorn #include <vm/vm.h>
38c3e289e1SNathan Whitehorn #include <vm/pmap.h>
39c3e289e1SNathan Whitehorn #include <vm/uma.h>
406413b057SNathan Whitehorn #include <vm/vm.h>
41c3e289e1SNathan Whitehorn #include <vm/vm_map.h>
426413b057SNathan Whitehorn #include <vm/vm_page.h>
436413b057SNathan Whitehorn #include <vm/vm_pageout.h>
44c3e289e1SNathan Whitehorn 
45c3e289e1SNathan Whitehorn #include <machine/md_var.h>
466413b057SNathan Whitehorn #include <machine/platform.h>
47c3e289e1SNathan Whitehorn #include <machine/vmparam.h>
48988d63afSJustin Hibbits #include <machine/trap.h>
49988d63afSJustin Hibbits 
50988d63afSJustin Hibbits #include "mmu_oea64.h"
51c3e289e1SNathan Whitehorn 
52c3e289e1SNathan Whitehorn uintptr_t moea64_get_unique_vsid(void);
53c3e289e1SNathan Whitehorn void moea64_release_vsid(uint64_t vsid);
54c3e289e1SNathan Whitehorn static void slb_zone_init(void *);
55c3e289e1SNathan Whitehorn 
5617763042SNathan Whitehorn static uma_zone_t slbt_zone;
5717763042SNathan Whitehorn static uma_zone_t slb_cache_zone;
5817763042SNathan Whitehorn int n_slbs = 64;
59c3e289e1SNathan Whitehorn 
60c3e289e1SNathan Whitehorn SYSINIT(slb_zone_init, SI_SUB_KMEM, SI_ORDER_ANY, slb_zone_init, NULL);
61c3e289e1SNathan Whitehorn 
6295fa3335SNathan Whitehorn struct slbtnode {
6395fa3335SNathan Whitehorn 	uint16_t	ua_alloc;
6495fa3335SNathan Whitehorn 	uint8_t		ua_level;
6595fa3335SNathan Whitehorn 	/* Only 36 bits needed for full 64-bit address space. */
6695fa3335SNathan Whitehorn 	uint64_t	ua_base;
6795fa3335SNathan Whitehorn 	union {
6895fa3335SNathan Whitehorn 		struct slbtnode	*ua_child[16];
6995fa3335SNathan Whitehorn 		struct slb	slb_entries[16];
7095fa3335SNathan Whitehorn 	} u;
7195fa3335SNathan Whitehorn };
7295fa3335SNathan Whitehorn 
7395fa3335SNathan Whitehorn /*
7495fa3335SNathan Whitehorn  * For a full 64-bit address space, there are 36 bits in play in an
7595fa3335SNathan Whitehorn  * esid, so 8 levels, with the leaf being at level 0.
7695fa3335SNathan Whitehorn  *
7795fa3335SNathan Whitehorn  * |3333|3322|2222|2222|1111|1111|11  |    |    |  esid
7895fa3335SNathan Whitehorn  * |5432|1098|7654|3210|9876|5432|1098|7654|3210|  bits
7995fa3335SNathan Whitehorn  * +----+----+----+----+----+----+----+----+----+--------
8095fa3335SNathan Whitehorn  * | 8  | 7  | 6  | 5  | 4  | 3  | 2  | 1  | 0  | level
8195fa3335SNathan Whitehorn  */
8295fa3335SNathan Whitehorn #define UAD_ROOT_LEVEL  8
8395fa3335SNathan Whitehorn #define UAD_LEAF_LEVEL  0
8495fa3335SNathan Whitehorn 
8595fa3335SNathan Whitehorn static inline int
esid2idx(uint64_t esid,int level)8695fa3335SNathan Whitehorn esid2idx(uint64_t esid, int level)
87c3e289e1SNathan Whitehorn {
8895fa3335SNathan Whitehorn 	int shift;
8995fa3335SNathan Whitehorn 
9095fa3335SNathan Whitehorn 	shift = level * 4;
9195fa3335SNathan Whitehorn 	return ((esid >> shift) & 0xF);
9295fa3335SNathan Whitehorn }
9395fa3335SNathan Whitehorn 
9495fa3335SNathan Whitehorn /*
9595fa3335SNathan Whitehorn  * The ua_base field should have 0 bits after the first 4*(level+1)
9695fa3335SNathan Whitehorn  * bits; i.e. only
9795fa3335SNathan Whitehorn  */
9895fa3335SNathan Whitehorn #define uad_baseok(ua)                          \
9995fa3335SNathan Whitehorn 	(esid2base(ua->ua_base, ua->ua_level) == ua->ua_base)
10095fa3335SNathan Whitehorn 
10195fa3335SNathan Whitehorn static inline uint64_t
esid2base(uint64_t esid,int level)10295fa3335SNathan Whitehorn esid2base(uint64_t esid, int level)
10395fa3335SNathan Whitehorn {
10495fa3335SNathan Whitehorn 	uint64_t mask;
10595fa3335SNathan Whitehorn 	int shift;
10695fa3335SNathan Whitehorn 
10795fa3335SNathan Whitehorn 	shift = (level + 1) * 4;
10895fa3335SNathan Whitehorn 	mask = ~((1ULL << shift) - 1);
10995fa3335SNathan Whitehorn 	return (esid & mask);
11095fa3335SNathan Whitehorn }
11195fa3335SNathan Whitehorn 
11295fa3335SNathan Whitehorn /*
11395fa3335SNathan Whitehorn  * Allocate a new leaf node for the specified esid/vmhandle from the
11495fa3335SNathan Whitehorn  * parent node.
11595fa3335SNathan Whitehorn  */
11695fa3335SNathan Whitehorn static struct slb *
make_new_leaf(uint64_t esid,uint64_t slbv,struct slbtnode * parent)11795fa3335SNathan Whitehorn make_new_leaf(uint64_t esid, uint64_t slbv, struct slbtnode *parent)
11895fa3335SNathan Whitehorn {
11995fa3335SNathan Whitehorn 	struct slbtnode *child;
12095fa3335SNathan Whitehorn 	struct slb *retval;
12195fa3335SNathan Whitehorn 	int idx;
12295fa3335SNathan Whitehorn 
12395fa3335SNathan Whitehorn 	idx = esid2idx(esid, parent->ua_level);
12495fa3335SNathan Whitehorn 	KASSERT(parent->u.ua_child[idx] == NULL, ("Child already exists!"));
12595fa3335SNathan Whitehorn 
12695fa3335SNathan Whitehorn 	/* unlock and M_WAITOK and loop? */
12795fa3335SNathan Whitehorn 	child = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
12895fa3335SNathan Whitehorn 	KASSERT(child != NULL, ("unhandled NULL case"));
12995fa3335SNathan Whitehorn 
13095fa3335SNathan Whitehorn 	child->ua_level = UAD_LEAF_LEVEL;
13195fa3335SNathan Whitehorn 	child->ua_base = esid2base(esid, child->ua_level);
13295fa3335SNathan Whitehorn 	idx = esid2idx(esid, child->ua_level);
13395fa3335SNathan Whitehorn 	child->u.slb_entries[idx].slbv = slbv;
13495fa3335SNathan Whitehorn 	child->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
13595fa3335SNathan Whitehorn 	setbit(&child->ua_alloc, idx);
13695fa3335SNathan Whitehorn 
13795fa3335SNathan Whitehorn 	retval = &child->u.slb_entries[idx];
13895fa3335SNathan Whitehorn 
13995fa3335SNathan Whitehorn 	/*
14095fa3335SNathan Whitehorn 	 * The above stores must be visible before the next one, so
14195fa3335SNathan Whitehorn 	 * that a lockless searcher always sees a valid path through
14295fa3335SNathan Whitehorn 	 * the tree.
14395fa3335SNathan Whitehorn 	 */
1448accb334SNathan Whitehorn 	powerpc_lwsync();
14595fa3335SNathan Whitehorn 
14695fa3335SNathan Whitehorn 	idx = esid2idx(esid, parent->ua_level);
14795fa3335SNathan Whitehorn 	parent->u.ua_child[idx] = child;
14895fa3335SNathan Whitehorn 	setbit(&parent->ua_alloc, idx);
14995fa3335SNathan Whitehorn 
15095fa3335SNathan Whitehorn 	return (retval);
15195fa3335SNathan Whitehorn }
15295fa3335SNathan Whitehorn 
15395fa3335SNathan Whitehorn /*
15495fa3335SNathan Whitehorn  * Allocate a new intermediate node to fit between the parent and
15595fa3335SNathan Whitehorn  * esid.
15695fa3335SNathan Whitehorn  */
15795fa3335SNathan Whitehorn static struct slbtnode*
make_intermediate(uint64_t esid,struct slbtnode * parent)15895fa3335SNathan Whitehorn make_intermediate(uint64_t esid, struct slbtnode *parent)
15995fa3335SNathan Whitehorn {
16095fa3335SNathan Whitehorn 	struct slbtnode *child, *inter;
16195fa3335SNathan Whitehorn 	int idx, level;
16295fa3335SNathan Whitehorn 
16395fa3335SNathan Whitehorn 	idx = esid2idx(esid, parent->ua_level);
16495fa3335SNathan Whitehorn 	child = parent->u.ua_child[idx];
16595fa3335SNathan Whitehorn 	KASSERT(esid2base(esid, child->ua_level) != child->ua_base,
16695fa3335SNathan Whitehorn 	    ("No need for an intermediate node?"));
16795fa3335SNathan Whitehorn 
16895fa3335SNathan Whitehorn 	/*
16995fa3335SNathan Whitehorn 	 * Find the level where the existing child and our new esid
17095fa3335SNathan Whitehorn 	 * meet.  It must be lower than parent->ua_level or we would
17195fa3335SNathan Whitehorn 	 * have chosen a different index in parent.
17295fa3335SNathan Whitehorn 	 */
17395fa3335SNathan Whitehorn 	level = child->ua_level + 1;
17495fa3335SNathan Whitehorn 	while (esid2base(esid, level) !=
17595fa3335SNathan Whitehorn 	    esid2base(child->ua_base, level))
17695fa3335SNathan Whitehorn 		level++;
17795fa3335SNathan Whitehorn 	KASSERT(level < parent->ua_level,
17895fa3335SNathan Whitehorn 	    ("Found splitting level %d for %09jx and %09jx, "
17995fa3335SNathan Whitehorn 	    "but it's the same as %p's",
18095fa3335SNathan Whitehorn 	    level, esid, child->ua_base, parent));
18195fa3335SNathan Whitehorn 
18295fa3335SNathan Whitehorn 	/* unlock and M_WAITOK and loop? */
18395fa3335SNathan Whitehorn 	inter = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
18495fa3335SNathan Whitehorn 	KASSERT(inter != NULL, ("unhandled NULL case"));
18595fa3335SNathan Whitehorn 
18695fa3335SNathan Whitehorn 	/* Set up intermediate node to point to child ... */
18795fa3335SNathan Whitehorn 	inter->ua_level = level;
18895fa3335SNathan Whitehorn 	inter->ua_base = esid2base(esid, inter->ua_level);
18995fa3335SNathan Whitehorn 	idx = esid2idx(child->ua_base, inter->ua_level);
19095fa3335SNathan Whitehorn 	inter->u.ua_child[idx] = child;
19195fa3335SNathan Whitehorn 	setbit(&inter->ua_alloc, idx);
1928accb334SNathan Whitehorn 	powerpc_lwsync();
19395fa3335SNathan Whitehorn 
19495fa3335SNathan Whitehorn 	/* Set up parent to point to intermediate node ... */
19595fa3335SNathan Whitehorn 	idx = esid2idx(inter->ua_base, parent->ua_level);
19695fa3335SNathan Whitehorn 	parent->u.ua_child[idx] = inter;
19795fa3335SNathan Whitehorn 	setbit(&parent->ua_alloc, idx);
19895fa3335SNathan Whitehorn 
19995fa3335SNathan Whitehorn 	return (inter);
20095fa3335SNathan Whitehorn }
20195fa3335SNathan Whitehorn 
20295fa3335SNathan Whitehorn uint64_t
kernel_va_to_slbv(vm_offset_t va)20395fa3335SNathan Whitehorn kernel_va_to_slbv(vm_offset_t va)
20495fa3335SNathan Whitehorn {
20549ffb2cfSAndreas Tobler 	uint64_t slbv;
206c3e289e1SNathan Whitehorn 
207c3e289e1SNathan Whitehorn 	/* Set kernel VSID to deterministic value */
20854c56208SNathan Whitehorn 	slbv = (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT)) << SLBV_VSID_SHIFT;
209c3e289e1SNathan Whitehorn 
210f9edb09dSNathan Whitehorn 	/*
211f9edb09dSNathan Whitehorn 	 * Figure out if this is a large-page mapping.
212f9edb09dSNathan Whitehorn 	 */
213f9edb09dSNathan Whitehorn 	if (hw_direct_map && va > DMAP_BASE_ADDRESS && va < DMAP_MAX_ADDRESS) {
214c3e289e1SNathan Whitehorn 		/*
215c3e289e1SNathan Whitehorn 		 * XXX: If we have set up a direct map, assumes
216c3e289e1SNathan Whitehorn 		 * all physical memory is mapped with large pages.
217c3e289e1SNathan Whitehorn 		 */
218f9edb09dSNathan Whitehorn 
219f9edb09dSNathan Whitehorn 		if (mem_valid(DMAP_TO_PHYS(va), 0) == 0)
22095fa3335SNathan Whitehorn 			slbv |= SLBV_L;
221caef3e12SJustin Hibbits 	} else if (moea64_large_page_size != 0 &&
222caef3e12SJustin Hibbits 	    va >= (vm_offset_t)vm_page_array &&
223caef3e12SJustin Hibbits 	    va <= (uintptr_t)(&vm_page_array[vm_page_array_size]))
224caef3e12SJustin Hibbits 		slbv |= SLBV_L;
225c3e289e1SNathan Whitehorn 
22695fa3335SNathan Whitehorn 	return (slbv);
227c3e289e1SNathan Whitehorn }
228c3e289e1SNathan Whitehorn 
22995fa3335SNathan Whitehorn struct slb *
user_va_to_slb_entry(pmap_t pm,vm_offset_t va)23095fa3335SNathan Whitehorn user_va_to_slb_entry(pmap_t pm, vm_offset_t va)
23195fa3335SNathan Whitehorn {
23295fa3335SNathan Whitehorn 	uint64_t esid = va >> ADDR_SR_SHFT;
23395fa3335SNathan Whitehorn 	struct slbtnode *ua;
23495fa3335SNathan Whitehorn 	int idx;
235c3e289e1SNathan Whitehorn 
23695fa3335SNathan Whitehorn 	ua = pm->pm_slb_tree_root;
237c3e289e1SNathan Whitehorn 
23895fa3335SNathan Whitehorn 	for (;;) {
23995fa3335SNathan Whitehorn 		KASSERT(uad_baseok(ua), ("uad base %016jx level %d bad!",
24095fa3335SNathan Whitehorn 		    ua->ua_base, ua->ua_level));
24195fa3335SNathan Whitehorn 		idx = esid2idx(esid, ua->ua_level);
242c3e289e1SNathan Whitehorn 
24395fa3335SNathan Whitehorn 		/*
24495fa3335SNathan Whitehorn 		 * This code is specific to ppc64 where a load is
24595fa3335SNathan Whitehorn 		 * atomic, so no need for atomic_load macro.
24695fa3335SNathan Whitehorn 		 */
24795fa3335SNathan Whitehorn 		if (ua->ua_level == UAD_LEAF_LEVEL)
24895fa3335SNathan Whitehorn 			return ((ua->u.slb_entries[idx].slbe & SLBE_VALID) ?
24995fa3335SNathan Whitehorn 			    &ua->u.slb_entries[idx] : NULL);
25095fa3335SNathan Whitehorn 
2518accb334SNathan Whitehorn 		/*
2528accb334SNathan Whitehorn 		 * The following accesses are implicitly ordered under the POWER
2538accb334SNathan Whitehorn 		 * ISA by load dependencies (the store ordering is provided by
2548accb334SNathan Whitehorn 		 * the powerpc_lwsync() calls elsewhere) and so are run without
2558accb334SNathan Whitehorn 		 * barriers.
2568accb334SNathan Whitehorn 		 */
25795fa3335SNathan Whitehorn 		ua = ua->u.ua_child[idx];
25895fa3335SNathan Whitehorn 		if (ua == NULL ||
25995fa3335SNathan Whitehorn 		    esid2base(esid, ua->ua_level) != ua->ua_base)
26095fa3335SNathan Whitehorn 			return (NULL);
26195fa3335SNathan Whitehorn 	}
26295fa3335SNathan Whitehorn 
26395fa3335SNathan Whitehorn 	return (NULL);
264c3e289e1SNathan Whitehorn }
265c3e289e1SNathan Whitehorn 
266c3e289e1SNathan Whitehorn uint64_t
va_to_vsid(pmap_t pm,vm_offset_t va)267c3e289e1SNathan Whitehorn va_to_vsid(pmap_t pm, vm_offset_t va)
268c3e289e1SNathan Whitehorn {
26995fa3335SNathan Whitehorn 	struct slb *entry;
270c3e289e1SNathan Whitehorn 
271c3e289e1SNathan Whitehorn 	/* Shortcut kernel case */
2723b4b3830SNathan Whitehorn 	if (pm == kernel_pmap)
2733b4b3830SNathan Whitehorn 		return (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT));
274c3e289e1SNathan Whitehorn 
275c3e289e1SNathan Whitehorn 	/*
276c3e289e1SNathan Whitehorn 	 * If there is no vsid for this VA, we need to add a new entry
277c3e289e1SNathan Whitehorn 	 * to the PMAP's segment table.
278c3e289e1SNathan Whitehorn 	 */
279c3e289e1SNathan Whitehorn 
28095fa3335SNathan Whitehorn 	entry = user_va_to_slb_entry(pm, va);
28195fa3335SNathan Whitehorn 
28295fa3335SNathan Whitehorn 	if (entry == NULL)
2836416b9a8SNathan Whitehorn 		return (allocate_user_vsid(pm,
2846416b9a8SNathan Whitehorn 		    (uintptr_t)va >> ADDR_SR_SHFT, 0));
285c3e289e1SNathan Whitehorn 
28695fa3335SNathan Whitehorn 	return ((entry->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT);
287c3e289e1SNathan Whitehorn }
288c3e289e1SNathan Whitehorn 
289c3e289e1SNathan Whitehorn uint64_t
allocate_user_vsid(pmap_t pm,uint64_t esid,int large)2906416b9a8SNathan Whitehorn allocate_user_vsid(pmap_t pm, uint64_t esid, int large)
291c3e289e1SNathan Whitehorn {
29295fa3335SNathan Whitehorn 	uint64_t vsid, slbv;
29395fa3335SNathan Whitehorn 	struct slbtnode *ua, *next, *inter;
29495fa3335SNathan Whitehorn 	struct slb *slb;
29595fa3335SNathan Whitehorn 	int idx;
296c3e289e1SNathan Whitehorn 
29795fa3335SNathan Whitehorn 	KASSERT(pm != kernel_pmap, ("Attempting to allocate a kernel VSID"));
298c3e289e1SNathan Whitehorn 
29995fa3335SNathan Whitehorn 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
300c3e289e1SNathan Whitehorn 	vsid = moea64_get_unique_vsid();
301c3e289e1SNathan Whitehorn 
30295fa3335SNathan Whitehorn 	slbv = vsid << SLBV_VSID_SHIFT;
30395fa3335SNathan Whitehorn 	if (large)
30495fa3335SNathan Whitehorn 		slbv |= SLBV_L;
305c3e289e1SNathan Whitehorn 
30695fa3335SNathan Whitehorn 	ua = pm->pm_slb_tree_root;
30795fa3335SNathan Whitehorn 
30895fa3335SNathan Whitehorn 	/* Descend to the correct leaf or NULL pointer. */
30995fa3335SNathan Whitehorn 	for (;;) {
31095fa3335SNathan Whitehorn 		KASSERT(uad_baseok(ua),
31195fa3335SNathan Whitehorn 		   ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level));
31295fa3335SNathan Whitehorn 		idx = esid2idx(esid, ua->ua_level);
31395fa3335SNathan Whitehorn 
31495fa3335SNathan Whitehorn 		if (ua->ua_level == UAD_LEAF_LEVEL) {
31595fa3335SNathan Whitehorn 			ua->u.slb_entries[idx].slbv = slbv;
31695fa3335SNathan Whitehorn 			eieio();
31795fa3335SNathan Whitehorn 			ua->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT)
31895fa3335SNathan Whitehorn 			    | SLBE_VALID;
31995fa3335SNathan Whitehorn 			setbit(&ua->ua_alloc, idx);
32095fa3335SNathan Whitehorn 			slb = &ua->u.slb_entries[idx];
32195fa3335SNathan Whitehorn 			break;
322c3e289e1SNathan Whitehorn 		}
323c3e289e1SNathan Whitehorn 
32495fa3335SNathan Whitehorn 		next = ua->u.ua_child[idx];
32595fa3335SNathan Whitehorn 		if (next == NULL) {
32695fa3335SNathan Whitehorn 			slb = make_new_leaf(esid, slbv, ua);
32795fa3335SNathan Whitehorn 			break;
32895fa3335SNathan Whitehorn                 }
329c3e289e1SNathan Whitehorn 
33095fa3335SNathan Whitehorn 		/*
33195fa3335SNathan Whitehorn 		 * Check if the next item down has an okay ua_base.
33295fa3335SNathan Whitehorn 		 * If not, we need to allocate an intermediate node.
33395fa3335SNathan Whitehorn 		 */
33495fa3335SNathan Whitehorn 		if (esid2base(esid, next->ua_level) != next->ua_base) {
33595fa3335SNathan Whitehorn 			inter = make_intermediate(esid, ua);
33695fa3335SNathan Whitehorn 			slb = make_new_leaf(esid, slbv, inter);
33795fa3335SNathan Whitehorn 			break;
33895fa3335SNathan Whitehorn 		}
339c3e289e1SNathan Whitehorn 
34095fa3335SNathan Whitehorn 		ua = next;
341c3e289e1SNathan Whitehorn 	}
342c3e289e1SNathan Whitehorn 
343c3e289e1SNathan Whitehorn 	/*
344c3e289e1SNathan Whitehorn 	 * Someone probably wants this soon, and it may be a wired
345c3e289e1SNathan Whitehorn 	 * SLB mapping, so pre-spill this entry.
346c3e289e1SNathan Whitehorn 	 */
34795fa3335SNathan Whitehorn 	eieio();
3486416b9a8SNathan Whitehorn 	slb_insert_user(pm, slb);
349c3e289e1SNathan Whitehorn 
350c3e289e1SNathan Whitehorn 	return (vsid);
351c3e289e1SNathan Whitehorn }
352c3e289e1SNathan Whitehorn 
35395fa3335SNathan Whitehorn void
free_vsid(pmap_t pm,uint64_t esid,int large)35495fa3335SNathan Whitehorn free_vsid(pmap_t pm, uint64_t esid, int large)
35595fa3335SNathan Whitehorn {
35695fa3335SNathan Whitehorn 	struct slbtnode *ua;
35795fa3335SNathan Whitehorn 	int idx;
35895fa3335SNathan Whitehorn 
35995fa3335SNathan Whitehorn 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
36095fa3335SNathan Whitehorn 
36195fa3335SNathan Whitehorn 	ua = pm->pm_slb_tree_root;
36295fa3335SNathan Whitehorn 	/* Descend to the correct leaf. */
36395fa3335SNathan Whitehorn 	for (;;) {
36495fa3335SNathan Whitehorn 		KASSERT(uad_baseok(ua),
36595fa3335SNathan Whitehorn 		   ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level));
36695fa3335SNathan Whitehorn 
36795fa3335SNathan Whitehorn 		idx = esid2idx(esid, ua->ua_level);
36895fa3335SNathan Whitehorn 		if (ua->ua_level == UAD_LEAF_LEVEL) {
36995fa3335SNathan Whitehorn 			ua->u.slb_entries[idx].slbv = 0;
37095fa3335SNathan Whitehorn 			eieio();
37195fa3335SNathan Whitehorn 			ua->u.slb_entries[idx].slbe = 0;
37295fa3335SNathan Whitehorn 			clrbit(&ua->ua_alloc, idx);
37395fa3335SNathan Whitehorn 			return;
37495fa3335SNathan Whitehorn 		}
37595fa3335SNathan Whitehorn 
37695fa3335SNathan Whitehorn 		ua = ua->u.ua_child[idx];
37795fa3335SNathan Whitehorn 		if (ua == NULL ||
37895fa3335SNathan Whitehorn 		    esid2base(esid, ua->ua_level) != ua->ua_base) {
37995fa3335SNathan Whitehorn 			/* Perhaps just return instead of assert? */
38095fa3335SNathan Whitehorn 			KASSERT(0,
38195fa3335SNathan Whitehorn 			    ("Asked to remove an entry that was never inserted!"));
38295fa3335SNathan Whitehorn 			return;
38395fa3335SNathan Whitehorn 		}
38495fa3335SNathan Whitehorn 	}
38595fa3335SNathan Whitehorn }
38695fa3335SNathan Whitehorn 
38795fa3335SNathan Whitehorn static void
free_slb_tree_node(struct slbtnode * ua)38895fa3335SNathan Whitehorn free_slb_tree_node(struct slbtnode *ua)
38995fa3335SNathan Whitehorn {
39095fa3335SNathan Whitehorn 	int idx;
39195fa3335SNathan Whitehorn 
39295fa3335SNathan Whitehorn 	for (idx = 0; idx < 16; idx++) {
39395fa3335SNathan Whitehorn 		if (ua->ua_level != UAD_LEAF_LEVEL) {
39495fa3335SNathan Whitehorn 			if (ua->u.ua_child[idx] != NULL)
39595fa3335SNathan Whitehorn 				free_slb_tree_node(ua->u.ua_child[idx]);
39695fa3335SNathan Whitehorn 		} else {
39795fa3335SNathan Whitehorn 			if (ua->u.slb_entries[idx].slbv != 0)
39895fa3335SNathan Whitehorn 				moea64_release_vsid(ua->u.slb_entries[idx].slbv
39995fa3335SNathan Whitehorn 				    >> SLBV_VSID_SHIFT);
40095fa3335SNathan Whitehorn 		}
40195fa3335SNathan Whitehorn 	}
40295fa3335SNathan Whitehorn 
40395fa3335SNathan Whitehorn 	uma_zfree(slbt_zone, ua);
40495fa3335SNathan Whitehorn }
40595fa3335SNathan Whitehorn 
40695fa3335SNathan Whitehorn void
slb_free_tree(pmap_t pm)40795fa3335SNathan Whitehorn slb_free_tree(pmap_t pm)
40895fa3335SNathan Whitehorn {
40995fa3335SNathan Whitehorn 
41095fa3335SNathan Whitehorn 	free_slb_tree_node(pm->pm_slb_tree_root);
41195fa3335SNathan Whitehorn }
41295fa3335SNathan Whitehorn 
41395fa3335SNathan Whitehorn struct slbtnode *
slb_alloc_tree(void)41495fa3335SNathan Whitehorn slb_alloc_tree(void)
41595fa3335SNathan Whitehorn {
41695fa3335SNathan Whitehorn 	struct slbtnode *root;
41795fa3335SNathan Whitehorn 
41895fa3335SNathan Whitehorn 	root = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
41954fdf3bfSLeandro Lupori 	KASSERT(root != NULL, ("unhandled NULL case"));
42095fa3335SNathan Whitehorn 	root->ua_level = UAD_ROOT_LEVEL;
42195fa3335SNathan Whitehorn 
42295fa3335SNathan Whitehorn 	return (root);
42395fa3335SNathan Whitehorn }
42495fa3335SNathan Whitehorn 
425c3e289e1SNathan Whitehorn /* Lock entries mapping kernel text and stacks */
426c3e289e1SNathan Whitehorn 
427c3e289e1SNathan Whitehorn void
slb_insert_kernel(uint64_t slbe,uint64_t slbv)4286416b9a8SNathan Whitehorn slb_insert_kernel(uint64_t slbe, uint64_t slbv)
429c3e289e1SNathan Whitehorn {
4306416b9a8SNathan Whitehorn 	struct slb *slbcache;
431ae09ab8fSNathan Whitehorn 	int i;
432c3e289e1SNathan Whitehorn 
433c3e289e1SNathan Whitehorn 	/* We don't want to be preempted while modifying the kernel map */
434c3e289e1SNathan Whitehorn 	critical_enter();
435c3e289e1SNathan Whitehorn 
436bce6d88bSJustin Hibbits 	slbcache = PCPU_GET(aim.slb);
437c3e289e1SNathan Whitehorn 
43854c56208SNathan Whitehorn 	/* Check for an unused slot, abusing the user slot as a full flag */
43954c56208SNathan Whitehorn 	if (slbcache[USER_SLB_SLOT].slbe == 0) {
44017763042SNathan Whitehorn 		for (i = 0; i < n_slbs; i++) {
44117763042SNathan Whitehorn 			if (i == USER_SLB_SLOT)
44217763042SNathan Whitehorn 				continue;
4436416b9a8SNathan Whitehorn 			if (!(slbcache[i].slbe & SLBE_VALID))
4446416b9a8SNathan Whitehorn 				goto fillkernslb;
4456416b9a8SNathan Whitehorn 		}
4466416b9a8SNathan Whitehorn 
44717763042SNathan Whitehorn 		if (i == n_slbs)
44854c56208SNathan Whitehorn 			slbcache[USER_SLB_SLOT].slbe = 1;
4496416b9a8SNathan Whitehorn 	}
450c3e289e1SNathan Whitehorn 
451ae09ab8fSNathan Whitehorn 	i = mftb() % n_slbs;
45254c56208SNathan Whitehorn 	if (i == USER_SLB_SLOT)
453ae09ab8fSNathan Whitehorn 			i = (i+1) % n_slbs;
454c3e289e1SNathan Whitehorn 
4556416b9a8SNathan Whitehorn fillkernslb:
45617763042SNathan Whitehorn 	KASSERT(i != USER_SLB_SLOT,
45717763042SNathan Whitehorn 	    ("Filling user SLB slot with a kernel mapping"));
4586416b9a8SNathan Whitehorn 	slbcache[i].slbv = slbv;
4596416b9a8SNathan Whitehorn 	slbcache[i].slbe = slbe | (uint64_t)i;
460c3e289e1SNathan Whitehorn 
461c3e289e1SNathan Whitehorn 	/* If it is for this CPU, put it in the SLB right away */
4626416b9a8SNathan Whitehorn 	if (pmap_bootstrapped) {
463c3e289e1SNathan Whitehorn 		/* slbie not required */
464c3e289e1SNathan Whitehorn 		__asm __volatile ("slbmte %0, %1" ::
4656416b9a8SNathan Whitehorn 		    "r"(slbcache[i].slbv), "r"(slbcache[i].slbe));
466c3e289e1SNathan Whitehorn 	}
467c3e289e1SNathan Whitehorn 
468c3e289e1SNathan Whitehorn 	critical_exit();
469c3e289e1SNathan Whitehorn }
470c3e289e1SNathan Whitehorn 
4716416b9a8SNathan Whitehorn void
slb_insert_user(pmap_t pm,struct slb * slb)4726416b9a8SNathan Whitehorn slb_insert_user(pmap_t pm, struct slb *slb)
4736416b9a8SNathan Whitehorn {
4746416b9a8SNathan Whitehorn 	int i;
4756416b9a8SNathan Whitehorn 
4766416b9a8SNathan Whitehorn 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
4776416b9a8SNathan Whitehorn 
47817763042SNathan Whitehorn 	if (pm->pm_slb_len < n_slbs) {
4796416b9a8SNathan Whitehorn 		i = pm->pm_slb_len;
4806416b9a8SNathan Whitehorn 		pm->pm_slb_len++;
4816416b9a8SNathan Whitehorn 	} else {
48217763042SNathan Whitehorn 		i = mftb() % n_slbs;
4836416b9a8SNathan Whitehorn 	}
4846416b9a8SNathan Whitehorn 
4856416b9a8SNathan Whitehorn 	/* Note that this replacement is atomic with respect to trap_subr */
4866416b9a8SNathan Whitehorn 	pm->pm_slb[i] = slb;
4876416b9a8SNathan Whitehorn }
488c3e289e1SNathan Whitehorn 
4896413b057SNathan Whitehorn static void *
slb_uma_real_alloc(uma_zone_t zone,vm_size_t bytes,int domain,u_int8_t * flags,int wait)490ab3185d1SJeff Roberson slb_uma_real_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
491ab3185d1SJeff Roberson     u_int8_t *flags, int wait)
4926413b057SNathan Whitehorn {
4936413b057SNathan Whitehorn 	static vm_offset_t realmax = 0;
4946413b057SNathan Whitehorn 	void *va;
4956413b057SNathan Whitehorn 	vm_page_t m;
4966413b057SNathan Whitehorn 
4976413b057SNathan Whitehorn 	if (realmax == 0)
4986413b057SNathan Whitehorn 		realmax = platform_real_maxaddr();
4996413b057SNathan Whitehorn 
5006413b057SNathan Whitehorn 	*flags = UMA_SLAB_PRIV;
50184c39222SMark Johnston 	m = vm_page_alloc_noobj_contig_domain(domain, malloc2vm_flags(wait) |
50284c39222SMark Johnston 	    VM_ALLOC_WIRED, 1, 0, realmax, PAGE_SIZE, PAGE_SIZE,
50384c39222SMark Johnston 	    VM_MEMATTR_DEFAULT);
5048d6fbbb8SJeff Roberson 	if (m == NULL)
5056413b057SNathan Whitehorn 		return (NULL);
5066413b057SNathan Whitehorn 
507b934fc74SLeandro Lupori 	if (hw_direct_map)
50868b9c019SNathan Whitehorn 		va = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
509b934fc74SLeandro Lupori 	else {
510b934fc74SLeandro Lupori 		va = (void *)(VM_PAGE_TO_PHYS(m) | DMAP_BASE_ADDRESS);
5116413b057SNathan Whitehorn 		pmap_kenter((vm_offset_t)va, VM_PAGE_TO_PHYS(m));
512b934fc74SLeandro Lupori 	}
5136413b057SNathan Whitehorn 
5146413b057SNathan Whitehorn 	return (va);
5156413b057SNathan Whitehorn }
5166413b057SNathan Whitehorn 
517c3e289e1SNathan Whitehorn static void
slb_zone_init(void * dummy)518c3e289e1SNathan Whitehorn slb_zone_init(void *dummy)
519c3e289e1SNathan Whitehorn {
52095fa3335SNathan Whitehorn 	slbt_zone = uma_zcreate("SLB tree node", sizeof(struct slbtnode),
52110c8fb47SRyan Libby 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
5229fab908aSRyan Libby 	    UMA_ZONE_CONTIG | UMA_ZONE_VM);
52317763042SNathan Whitehorn 	slb_cache_zone = uma_zcreate("SLB cache",
52417763042SNathan Whitehorn 	    (n_slbs + 1)*sizeof(struct slb *), NULL, NULL, NULL, NULL,
5259fab908aSRyan Libby 	    UMA_ALIGN_PTR, UMA_ZONE_CONTIG | UMA_ZONE_VM);
5266413b057SNathan Whitehorn 
5276413b057SNathan Whitehorn 	if (platform_real_maxaddr() != VM_MAX_ADDRESS) {
5286413b057SNathan Whitehorn 		uma_zone_set_allocf(slb_cache_zone, slb_uma_real_alloc);
5296413b057SNathan Whitehorn 		uma_zone_set_allocf(slbt_zone, slb_uma_real_alloc);
5306413b057SNathan Whitehorn 	}
531c3e289e1SNathan Whitehorn }
532c3e289e1SNathan Whitehorn 
5336416b9a8SNathan Whitehorn struct slb **
slb_alloc_user_cache(void)534c3e289e1SNathan Whitehorn slb_alloc_user_cache(void)
535c3e289e1SNathan Whitehorn {
536ff80ab1cSAlfredo Dal'Ava Junior 	return (uma_zalloc(slb_cache_zone, M_WAITOK | M_ZERO));
537c3e289e1SNathan Whitehorn }
538c3e289e1SNathan Whitehorn 
539c3e289e1SNathan Whitehorn void
slb_free_user_cache(struct slb ** slb)5406416b9a8SNathan Whitehorn slb_free_user_cache(struct slb **slb)
541c3e289e1SNathan Whitehorn {
542c3e289e1SNathan Whitehorn 	uma_zfree(slb_cache_zone, slb);
543c3e289e1SNathan Whitehorn }
544988d63afSJustin Hibbits 
545988d63afSJustin Hibbits /* Handle kernel SLB faults -- runs in real mode, all seat belts off */
546988d63afSJustin Hibbits void
handle_kernel_slb_spill(int type,register_t dar,register_t srr0)547988d63afSJustin Hibbits handle_kernel_slb_spill(int type, register_t dar, register_t srr0)
548988d63afSJustin Hibbits {
549988d63afSJustin Hibbits 	struct slb *slbcache;
550988d63afSJustin Hibbits 	uint64_t slbe, slbv;
551988d63afSJustin Hibbits 	uint64_t esid, addr;
552988d63afSJustin Hibbits 	int i;
553988d63afSJustin Hibbits 
554988d63afSJustin Hibbits 	addr = (type == EXC_ISE) ? srr0 : dar;
555988d63afSJustin Hibbits 	slbcache = PCPU_GET(aim.slb);
556988d63afSJustin Hibbits 	esid = (uintptr_t)addr >> ADDR_SR_SHFT;
557988d63afSJustin Hibbits 	slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
558988d63afSJustin Hibbits 
559988d63afSJustin Hibbits 	/* See if the hardware flushed this somehow (can happen in LPARs) */
560988d63afSJustin Hibbits 	for (i = 0; i < n_slbs; i++)
561988d63afSJustin Hibbits 		if (slbcache[i].slbe == (slbe | (uint64_t)i))
562988d63afSJustin Hibbits 			return;
563988d63afSJustin Hibbits 
564988d63afSJustin Hibbits 	/* Not in the map, needs to actually be added */
565988d63afSJustin Hibbits 	slbv = kernel_va_to_slbv(addr);
566988d63afSJustin Hibbits 	if (slbcache[USER_SLB_SLOT].slbe == 0) {
567988d63afSJustin Hibbits 		for (i = 0; i < n_slbs; i++) {
568988d63afSJustin Hibbits 			if (i == USER_SLB_SLOT)
569988d63afSJustin Hibbits 				continue;
570988d63afSJustin Hibbits 			if (!(slbcache[i].slbe & SLBE_VALID))
571988d63afSJustin Hibbits 				goto fillkernslb;
572988d63afSJustin Hibbits 		}
573988d63afSJustin Hibbits 
574988d63afSJustin Hibbits 		if (i == n_slbs)
575988d63afSJustin Hibbits 			slbcache[USER_SLB_SLOT].slbe = 1;
576988d63afSJustin Hibbits 	}
577988d63afSJustin Hibbits 
578988d63afSJustin Hibbits 	/* Sacrifice a random SLB entry that is not the user entry */
579988d63afSJustin Hibbits 	i = mftb() % n_slbs;
580988d63afSJustin Hibbits 	if (i == USER_SLB_SLOT)
581988d63afSJustin Hibbits 		i = (i+1) % n_slbs;
582988d63afSJustin Hibbits 
583988d63afSJustin Hibbits fillkernslb:
584988d63afSJustin Hibbits 	/* Write new entry */
585988d63afSJustin Hibbits 	slbcache[i].slbv = slbv;
586988d63afSJustin Hibbits 	slbcache[i].slbe = slbe | (uint64_t)i;
587988d63afSJustin Hibbits 
588988d63afSJustin Hibbits 	/* Trap handler will restore from cache on exit */
589988d63afSJustin Hibbits }
590988d63afSJustin Hibbits 
591988d63afSJustin Hibbits int
handle_user_slb_spill(pmap_t pm,vm_offset_t addr)592988d63afSJustin Hibbits handle_user_slb_spill(pmap_t pm, vm_offset_t addr)
593988d63afSJustin Hibbits {
594988d63afSJustin Hibbits 	struct slb *user_entry;
595988d63afSJustin Hibbits 	uint64_t esid;
596988d63afSJustin Hibbits 	int i;
597988d63afSJustin Hibbits 
598988d63afSJustin Hibbits 	if (pm->pm_slb == NULL)
599988d63afSJustin Hibbits 		return (-1);
600988d63afSJustin Hibbits 
601988d63afSJustin Hibbits 	esid = (uintptr_t)addr >> ADDR_SR_SHFT;
602988d63afSJustin Hibbits 
603988d63afSJustin Hibbits 	PMAP_LOCK(pm);
604988d63afSJustin Hibbits 	user_entry = user_va_to_slb_entry(pm, addr);
605988d63afSJustin Hibbits 
606988d63afSJustin Hibbits 	if (user_entry == NULL) {
607988d63afSJustin Hibbits 		/* allocate_vsid auto-spills it */
608988d63afSJustin Hibbits 		(void)allocate_user_vsid(pm, esid, 0);
609988d63afSJustin Hibbits 	} else {
610988d63afSJustin Hibbits 		/*
611988d63afSJustin Hibbits 		 * Check that another CPU has not already mapped this.
612988d63afSJustin Hibbits 		 * XXX: Per-thread SLB caches would be better.
613988d63afSJustin Hibbits 		 */
614988d63afSJustin Hibbits 		for (i = 0; i < pm->pm_slb_len; i++)
615988d63afSJustin Hibbits 			if (pm->pm_slb[i] == user_entry)
616988d63afSJustin Hibbits 				break;
617988d63afSJustin Hibbits 
618988d63afSJustin Hibbits 		if (i == pm->pm_slb_len)
619988d63afSJustin Hibbits 			slb_insert_user(pm, user_entry);
620988d63afSJustin Hibbits 	}
621988d63afSJustin Hibbits 	PMAP_UNLOCK(pm);
622988d63afSJustin Hibbits 
623988d63afSJustin Hibbits 	return (0);
624988d63afSJustin Hibbits }
625