xref: /freebsd/sys/vm/vm_radix.c (revision 70bc3f4331a1b6e7045ae5326cbe03428503b612)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2013 EMC Corp.
5  * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org>
6  * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  */
31 
32 /*
33  * Path-compressed radix trie implementation.
34  * The following code is not generalized into a general purpose library
35  * because there are way too many parameters embedded that should really
36  * be decided by the library consumers.  At the same time, consumers
37  * of this code must achieve highest possible performance.
38  *
39  * The implementation takes into account the following rationale:
40  * - Size of the nodes should be as small as possible but still big enough
41  *   to avoid a large maximum depth for the trie.  This is a balance
42  *   between the necessity to not wire too much physical memory for the nodes
43  *   and the necessity to avoid too much cache pollution during the trie
44  *   operations.
45  * - There is not a huge bias toward the number of lookup operations over
46  *   the number of insert and remove operations.  This basically implies
47  *   that optimizations supposedly helping one operation but hurting the
48  *   other might be carefully evaluated.
49  * - On average not many nodes are expected to be fully populated, hence
50  *   level compression may just complicate things.
51  */
52 
53 #include <sys/cdefs.h>
54 __FBSDID("$FreeBSD$");
55 
56 #include "opt_ddb.h"
57 
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/kernel.h>
61 #include <sys/libkern.h>
62 #include <sys/proc.h>
63 #include <sys/vmmeter.h>
64 #include <sys/smr.h>
65 #include <sys/smr_types.h>
66 
67 #include <vm/uma.h>
68 #include <vm/vm.h>
69 #include <vm/vm_param.h>
70 #include <vm/vm_object.h>
71 #include <vm/vm_page.h>
72 #include <vm/vm_radix.h>
73 
74 #ifdef DDB
75 #include <ddb/ddb.h>
76 #endif
77 
78 /*
79  * These widths should allow the pointers to a node's children to fit within
80  * a single cache line.  The extra levels from a narrow width should not be
81  * a problem thanks to path compression.
82  */
83 #ifdef __LP64__
84 #define	VM_RADIX_WIDTH	4
85 #else
86 #define	VM_RADIX_WIDTH	3
87 #endif
88 
89 #define	VM_RADIX_COUNT	(1 << VM_RADIX_WIDTH)
90 #define	VM_RADIX_MASK	(VM_RADIX_COUNT - 1)
91 #define	VM_RADIX_LIMIT							\
92 	(howmany(sizeof(vm_pindex_t) * NBBY, VM_RADIX_WIDTH) - 1)
93 
94 /* Flag bits stored in node pointers. */
95 #define	VM_RADIX_ISLEAF	0x1
96 #define	VM_RADIX_FLAGS	0x1
97 #define	VM_RADIX_PAD	VM_RADIX_FLAGS
98 
99 /* Returns one unit associated with specified level. */
100 #define	VM_RADIX_UNITLEVEL(lev)						\
101 	((vm_pindex_t)1 << ((lev) * VM_RADIX_WIDTH))
102 
103 enum vm_radix_access { SMR, LOCKED, UNSERIALIZED };
104 
105 struct vm_radix_node;
106 typedef SMR_POINTER(struct vm_radix_node *) smrnode_t;
107 
108 struct vm_radix_node {
109 	vm_pindex_t	rn_owner;			/* Owner of record. */
110 	uint16_t	rn_count;			/* Valid children. */
111 	uint8_t		rn_clev;			/* Current level. */
112 	int8_t		rn_last;			/* zero last ptr. */
113 	smrnode_t	rn_child[VM_RADIX_COUNT];	/* Child nodes. */
114 };
115 
116 static uma_zone_t vm_radix_node_zone;
117 static smr_t vm_radix_smr;
118 
119 static void vm_radix_node_store(smrnode_t *p, struct vm_radix_node *v,
120     enum vm_radix_access access);
121 
122 /*
123  * Allocate a radix node.
124  */
125 static struct vm_radix_node *
126 vm_radix_node_get(vm_pindex_t owner, uint16_t count, uint16_t clevel)
127 {
128 	struct vm_radix_node *rnode;
129 
130 	rnode = uma_zalloc_smr(vm_radix_node_zone, M_NOWAIT);
131 	if (rnode == NULL)
132 		return (NULL);
133 
134 	/*
135 	 * We want to clear the last child pointer after the final section
136 	 * has exited so lookup can not return false negatives.  It is done
137 	 * here because it will be cache-cold in the dtor callback.
138 	 */
139 	if (rnode->rn_last != 0) {
140 		vm_radix_node_store(&rnode->rn_child[rnode->rn_last - 1],
141 		    NULL, UNSERIALIZED);
142 		rnode->rn_last = 0;
143 	}
144 	rnode->rn_owner = owner;
145 	rnode->rn_count = count;
146 	rnode->rn_clev = clevel;
147 	return (rnode);
148 }
149 
150 /*
151  * Free radix node.
152  */
153 static __inline void
154 vm_radix_node_put(struct vm_radix_node *rnode, int8_t last)
155 {
156 #ifdef INVARIANTS
157 	int slot;
158 
159 	KASSERT(rnode->rn_count == 0,
160 	    ("vm_radix_node_put: rnode %p has %d children", rnode,
161 	    rnode->rn_count));
162 	for (slot = 0; slot < VM_RADIX_COUNT; slot++) {
163 		if (slot == last)
164 			continue;
165 		KASSERT(smr_unserialized_load(&rnode->rn_child[slot], true) ==
166 		    NULL, ("vm_radix_node_put: rnode %p has a child", rnode));
167 	}
168 #endif
169 	/* Off by one so a freshly zero'd node is not assigned to. */
170 	rnode->rn_last = last + 1;
171 	uma_zfree_smr(vm_radix_node_zone, rnode);
172 }
173 
174 /*
175  * Return the position in the array for a given level.
176  */
177 static __inline int
178 vm_radix_slot(vm_pindex_t index, uint16_t level)
179 {
180 
181 	return ((index >> (level * VM_RADIX_WIDTH)) & VM_RADIX_MASK);
182 }
183 
184 /* Computes the key (index) with the low-order 'level' radix-digits zeroed. */
185 static __inline vm_pindex_t
186 vm_radix_trimkey(vm_pindex_t index, uint16_t level)
187 {
188 	return (index & -VM_RADIX_UNITLEVEL(level));
189 }
190 
191 /*
192  * Fetch a node pointer from a slot in another node.
193  */
194 static __inline struct vm_radix_node *
195 vm_radix_node_load(smrnode_t *p, enum vm_radix_access access)
196 {
197 
198 	switch (access) {
199 	case UNSERIALIZED:
200 		return (smr_unserialized_load(p, true));
201 	case LOCKED:
202 		return (smr_serialized_load(p, true));
203 	case SMR:
204 		return (smr_entered_load(p, vm_radix_smr));
205 	}
206 	__assert_unreachable();
207 }
208 
209 static __inline void
210 vm_radix_node_store(smrnode_t *p, struct vm_radix_node *v,
211     enum vm_radix_access access)
212 {
213 
214 	switch (access) {
215 	case UNSERIALIZED:
216 		smr_unserialized_store(p, v, true);
217 		break;
218 	case LOCKED:
219 		smr_serialized_store(p, v, true);
220 		break;
221 	case SMR:
222 		panic("vm_radix_node_store: Not supported in smr section.");
223 	}
224 }
225 
226 /*
227  * Get the root node for a radix tree.
228  */
229 static __inline struct vm_radix_node *
230 vm_radix_root_load(struct vm_radix *rtree, enum vm_radix_access access)
231 {
232 
233 	return (vm_radix_node_load((smrnode_t *)&rtree->rt_root, access));
234 }
235 
236 /*
237  * Set the root node for a radix tree.
238  */
239 static __inline void
240 vm_radix_root_store(struct vm_radix *rtree, struct vm_radix_node *rnode,
241     enum vm_radix_access access)
242 {
243 
244 	vm_radix_node_store((smrnode_t *)&rtree->rt_root, rnode, access);
245 }
246 
247 /*
248  * Returns TRUE if the specified radix node is a leaf and FALSE otherwise.
249  */
250 static __inline bool
251 vm_radix_isleaf(struct vm_radix_node *rnode)
252 {
253 
254 	return (((uintptr_t)rnode & VM_RADIX_ISLEAF) != 0);
255 }
256 
257 /*
258  * Returns the associated page extracted from rnode.
259  */
260 static __inline vm_page_t
261 vm_radix_topage(struct vm_radix_node *rnode)
262 {
263 
264 	return ((vm_page_t)((uintptr_t)rnode & ~VM_RADIX_FLAGS));
265 }
266 
267 /*
268  * Adds the page as a child of the provided node.
269  */
270 static __inline void
271 vm_radix_addpage(struct vm_radix_node *rnode, vm_pindex_t index, uint16_t clev,
272     vm_page_t page, enum vm_radix_access access)
273 {
274 	int slot;
275 
276 	slot = vm_radix_slot(index, clev);
277 	vm_radix_node_store(&rnode->rn_child[slot],
278 	    (struct vm_radix_node *)((uintptr_t)page | VM_RADIX_ISLEAF), access);
279 }
280 
281 /*
282  * Returns the level where two keys differ.
283  * It cannot accept 2 equal keys.
284  */
285 static __inline uint16_t
286 vm_radix_keydiff(vm_pindex_t index1, vm_pindex_t index2)
287 {
288 
289 	KASSERT(index1 != index2, ("%s: passing the same key value %jx",
290 	    __func__, (uintmax_t)index1));
291 	CTASSERT(sizeof(long long) >= sizeof(vm_pindex_t));
292 
293 	/*
294 	 * From the highest-order bit where the indexes differ,
295 	 * compute the highest level in the trie where they differ.
296 	 */
297 	return ((flsll(index1 ^ index2) - 1) / VM_RADIX_WIDTH);
298 }
299 
300 /*
301  * Returns TRUE if it can be determined that key does not belong to the
302  * specified rnode.  Otherwise, returns FALSE.
303  */
304 static __inline bool
305 vm_radix_keybarr(struct vm_radix_node *rnode, vm_pindex_t idx)
306 {
307 
308 	if (rnode->rn_clev < VM_RADIX_LIMIT) {
309 		idx = vm_radix_trimkey(idx, rnode->rn_clev + 1);
310 		return (idx != rnode->rn_owner);
311 	}
312 	return (false);
313 }
314 
315 /*
316  * Internal helper for vm_radix_reclaim_allnodes().
317  * This function is recursive.
318  */
319 static void
320 vm_radix_reclaim_allnodes_int(struct vm_radix_node *rnode)
321 {
322 	struct vm_radix_node *child;
323 	int slot;
324 
325 	KASSERT(rnode->rn_count <= VM_RADIX_COUNT,
326 	    ("vm_radix_reclaim_allnodes_int: bad count in rnode %p", rnode));
327 	for (slot = 0; rnode->rn_count != 0; slot++) {
328 		child = vm_radix_node_load(&rnode->rn_child[slot], UNSERIALIZED);
329 		if (child == NULL)
330 			continue;
331 		if (!vm_radix_isleaf(child))
332 			vm_radix_reclaim_allnodes_int(child);
333 		vm_radix_node_store(&rnode->rn_child[slot], NULL, UNSERIALIZED);
334 		rnode->rn_count--;
335 	}
336 	vm_radix_node_put(rnode, -1);
337 }
338 
339 #ifndef UMA_MD_SMALL_ALLOC
340 void vm_radix_reserve_kva(void);
341 /*
342  * Reserve the KVA necessary to satisfy the node allocation.
343  * This is mandatory in architectures not supporting direct
344  * mapping as they will need otherwise to carve into the kernel maps for
345  * every node allocation, resulting into deadlocks for consumers already
346  * working with kernel maps.
347  */
348 void
349 vm_radix_reserve_kva(void)
350 {
351 
352 	/*
353 	 * Calculate the number of reserved nodes, discounting the pages that
354 	 * are needed to store them.
355 	 */
356 	if (!uma_zone_reserve_kva(vm_radix_node_zone,
357 	    ((vm_paddr_t)vm_cnt.v_page_count * PAGE_SIZE) / (PAGE_SIZE +
358 	    sizeof(struct vm_radix_node))))
359 		panic("%s: unable to reserve KVA", __func__);
360 }
361 #endif
362 
363 /*
364  * Initialize the UMA slab zone.
365  */
366 void
367 vm_radix_zinit(void)
368 {
369 
370 	vm_radix_node_zone = uma_zcreate("RADIX NODE",
371 	    sizeof(struct vm_radix_node), NULL, NULL, NULL, NULL,
372 	    VM_RADIX_PAD, UMA_ZONE_VM | UMA_ZONE_SMR | UMA_ZONE_ZINIT);
373 	vm_radix_smr = uma_zone_get_smr(vm_radix_node_zone);
374 }
375 
376 /*
377  * Inserts the key-value pair into the trie.
378  * Panics if the key already exists.
379  */
380 int
381 vm_radix_insert(struct vm_radix *rtree, vm_page_t page)
382 {
383 	vm_pindex_t index, newind;
384 	struct vm_radix_node *rnode, *tmp;
385 	smrnode_t *parentp;
386 	vm_page_t m;
387 	int slot;
388 	uint16_t clev;
389 
390 	index = page->pindex;
391 
392 	/*
393 	 * The owner of record for root is not really important because it
394 	 * will never be used.
395 	 */
396 	rnode = vm_radix_root_load(rtree, LOCKED);
397 	if (rnode == NULL) {
398 		rtree->rt_root = (uintptr_t)page | VM_RADIX_ISLEAF;
399 		return (0);
400 	}
401 	parentp = (smrnode_t *)&rtree->rt_root;
402 	for (;;) {
403 		if (vm_radix_isleaf(rnode)) {
404 			m = vm_radix_topage(rnode);
405 			if (m->pindex == index)
406 				panic("%s: key %jx is already present",
407 				    __func__, (uintmax_t)index);
408 			clev = vm_radix_keydiff(m->pindex, index);
409 			tmp = vm_radix_node_get(vm_radix_trimkey(index,
410 			    clev + 1), 2, clev);
411 			if (tmp == NULL)
412 				return (ENOMEM);
413 			/* These writes are not yet visible due to ordering. */
414 			vm_radix_addpage(tmp, index, clev, page, UNSERIALIZED);
415 			vm_radix_addpage(tmp, m->pindex, clev, m, UNSERIALIZED);
416 			/* Synchronize to make leaf visible. */
417 			vm_radix_node_store(parentp, tmp, LOCKED);
418 			return (0);
419 		} else if (vm_radix_keybarr(rnode, index))
420 			break;
421 		slot = vm_radix_slot(index, rnode->rn_clev);
422 		parentp = &rnode->rn_child[slot];
423 		tmp = vm_radix_node_load(parentp, LOCKED);
424 		if (tmp == NULL) {
425 			rnode->rn_count++;
426 			vm_radix_addpage(rnode, index, rnode->rn_clev, page,
427 			    LOCKED);
428 			return (0);
429 		}
430 		rnode = tmp;
431 	}
432 
433 	/*
434 	 * A new node is needed because the right insertion level is reached.
435 	 * Setup the new intermediate node and add the 2 children: the
436 	 * new object and the older edge.
437 	 */
438 	newind = rnode->rn_owner;
439 	clev = vm_radix_keydiff(newind, index);
440 	tmp = vm_radix_node_get(vm_radix_trimkey(index, clev + 1), 2, clev);
441 	if (tmp == NULL)
442 		return (ENOMEM);
443 	slot = vm_radix_slot(newind, clev);
444 	/* These writes are not yet visible due to ordering. */
445 	vm_radix_addpage(tmp, index, clev, page, UNSERIALIZED);
446 	vm_radix_node_store(&tmp->rn_child[slot], rnode, UNSERIALIZED);
447 	/* Serializing write to make the above visible. */
448 	vm_radix_node_store(parentp, tmp, LOCKED);
449 
450 	return (0);
451 }
452 
453 /*
454  * Returns the value stored at the index.  If the index is not present,
455  * NULL is returned.
456  */
457 static __always_inline vm_page_t
458 _vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index,
459     enum vm_radix_access access)
460 {
461 	struct vm_radix_node *rnode;
462 	vm_page_t m;
463 	int slot;
464 
465 	rnode = vm_radix_root_load(rtree, access);
466 	while (rnode != NULL) {
467 		if (vm_radix_isleaf(rnode)) {
468 			m = vm_radix_topage(rnode);
469 			if (m->pindex == index)
470 				return (m);
471 			break;
472 		}
473 		if (vm_radix_keybarr(rnode, index))
474 			break;
475 		slot = vm_radix_slot(index, rnode->rn_clev);
476 		rnode = vm_radix_node_load(&rnode->rn_child[slot], access);
477 	}
478 	return (NULL);
479 }
480 
481 /*
482  * Returns the value stored at the index assuming there is an external lock.
483  *
484  * If the index is not present, NULL is returned.
485  */
486 vm_page_t
487 vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index)
488 {
489 
490 	return _vm_radix_lookup(rtree, index, LOCKED);
491 }
492 
493 /*
494  * Returns the value stored at the index without requiring an external lock.
495  *
496  * If the index is not present, NULL is returned.
497  */
498 vm_page_t
499 vm_radix_lookup_unlocked(struct vm_radix *rtree, vm_pindex_t index)
500 {
501 	vm_page_t m;
502 
503 	smr_enter(vm_radix_smr);
504 	m = _vm_radix_lookup(rtree, index, SMR);
505 	smr_exit(vm_radix_smr);
506 
507 	return (m);
508 }
509 
510 /*
511  * Look up the nearest entry at a position greater than or equal to index.
512  */
513 vm_page_t
514 vm_radix_lookup_ge(struct vm_radix *rtree, vm_pindex_t index)
515 {
516 	struct vm_radix_node *stack[VM_RADIX_LIMIT];
517 	vm_pindex_t inc;
518 	vm_page_t m;
519 	struct vm_radix_node *child, *rnode;
520 #ifdef INVARIANTS
521 	int loops = 0;
522 #endif
523 	int slot, tos;
524 
525 	rnode = vm_radix_root_load(rtree, LOCKED);
526 	if (rnode == NULL)
527 		return (NULL);
528 	else if (vm_radix_isleaf(rnode)) {
529 		m = vm_radix_topage(rnode);
530 		if (m->pindex >= index)
531 			return (m);
532 		else
533 			return (NULL);
534 	}
535 	tos = 0;
536 	for (;;) {
537 		/*
538 		 * If the keys differ before the current bisection node,
539 		 * then the search key might rollback to the earliest
540 		 * available bisection node or to the smallest key
541 		 * in the current node (if the owner is greater than the
542 		 * search key).
543 		 */
544 		if (vm_radix_keybarr(rnode, index)) {
545 			if (index > rnode->rn_owner) {
546 ascend:
547 				KASSERT(++loops < 1000,
548 				    ("vm_radix_lookup_ge: too many loops"));
549 
550 				/*
551 				 * Pop nodes from the stack until either the
552 				 * stack is empty or a node that could have a
553 				 * matching descendant is found.
554 				 */
555 				do {
556 					if (tos == 0)
557 						return (NULL);
558 					rnode = stack[--tos];
559 				} while (vm_radix_slot(index,
560 				    rnode->rn_clev) == (VM_RADIX_COUNT - 1));
561 
562 				/*
563 				 * The following computation cannot overflow
564 				 * because index's slot at the current level
565 				 * is less than VM_RADIX_COUNT - 1.
566 				 */
567 				index = vm_radix_trimkey(index,
568 				    rnode->rn_clev);
569 				index += VM_RADIX_UNITLEVEL(rnode->rn_clev);
570 			} else
571 				index = rnode->rn_owner;
572 			KASSERT(!vm_radix_keybarr(rnode, index),
573 			    ("vm_radix_lookup_ge: keybarr failed"));
574 		}
575 		slot = vm_radix_slot(index, rnode->rn_clev);
576 		child = vm_radix_node_load(&rnode->rn_child[slot], LOCKED);
577 		if (vm_radix_isleaf(child)) {
578 			m = vm_radix_topage(child);
579 			if (m->pindex >= index)
580 				return (m);
581 		} else if (child != NULL)
582 			goto descend;
583 
584 		/*
585 		 * Look for an available edge or page within the current
586 		 * bisection node.
587 		 */
588                 if (slot < (VM_RADIX_COUNT - 1)) {
589 			inc = VM_RADIX_UNITLEVEL(rnode->rn_clev);
590 			index = vm_radix_trimkey(index, rnode->rn_clev);
591 			do {
592 				index += inc;
593 				slot++;
594 				child = vm_radix_node_load(&rnode->rn_child[slot],
595 				    LOCKED);
596 				if (vm_radix_isleaf(child)) {
597 					m = vm_radix_topage(child);
598 					if (m->pindex >= index)
599 						return (m);
600 				} else if (child != NULL)
601 					goto descend;
602 			} while (slot < (VM_RADIX_COUNT - 1));
603 		}
604 		KASSERT(child == NULL || vm_radix_isleaf(child),
605 		    ("vm_radix_lookup_ge: child is radix node"));
606 
607 		/*
608 		 * If a page or edge greater than the search slot is not found
609 		 * in the current node, ascend to the next higher-level node.
610 		 */
611 		goto ascend;
612 descend:
613 		KASSERT(rnode->rn_clev > 0,
614 		    ("vm_radix_lookup_ge: pushing leaf's parent"));
615 		KASSERT(tos < VM_RADIX_LIMIT,
616 		    ("vm_radix_lookup_ge: stack overflow"));
617 		stack[tos++] = rnode;
618 		rnode = child;
619 	}
620 }
621 
622 /*
623  * Look up the nearest entry at a position less than or equal to index.
624  */
625 vm_page_t
626 vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index)
627 {
628 	struct vm_radix_node *stack[VM_RADIX_LIMIT];
629 	vm_pindex_t inc;
630 	vm_page_t m;
631 	struct vm_radix_node *child, *rnode;
632 #ifdef INVARIANTS
633 	int loops = 0;
634 #endif
635 	int slot, tos;
636 
637 	rnode = vm_radix_root_load(rtree, LOCKED);
638 	if (rnode == NULL)
639 		return (NULL);
640 	else if (vm_radix_isleaf(rnode)) {
641 		m = vm_radix_topage(rnode);
642 		if (m->pindex <= index)
643 			return (m);
644 		else
645 			return (NULL);
646 	}
647 	tos = 0;
648 	for (;;) {
649 		/*
650 		 * If the keys differ before the current bisection node,
651 		 * then the search key might rollback to the earliest
652 		 * available bisection node or to the largest key
653 		 * in the current node (if the owner is smaller than the
654 		 * search key).
655 		 */
656 		if (vm_radix_keybarr(rnode, index)) {
657 			if (index > rnode->rn_owner) {
658 				index = rnode->rn_owner + VM_RADIX_COUNT *
659 				    VM_RADIX_UNITLEVEL(rnode->rn_clev);
660 			} else {
661 ascend:
662 				KASSERT(++loops < 1000,
663 				    ("vm_radix_lookup_le: too many loops"));
664 
665 				/*
666 				 * Pop nodes from the stack until either the
667 				 * stack is empty or a node that could have a
668 				 * matching descendant is found.
669 				 */
670 				do {
671 					if (tos == 0)
672 						return (NULL);
673 					rnode = stack[--tos];
674 				} while (vm_radix_slot(index,
675 				    rnode->rn_clev) == 0);
676 
677 				/*
678 				 * The following computation cannot overflow
679 				 * because index's slot at the current level
680 				 * is greater than 0.
681 				 */
682 				index = vm_radix_trimkey(index,
683 				    rnode->rn_clev);
684 			}
685 			index--;
686 			KASSERT(!vm_radix_keybarr(rnode, index),
687 			    ("vm_radix_lookup_le: keybarr failed"));
688 		}
689 		slot = vm_radix_slot(index, rnode->rn_clev);
690 		child = vm_radix_node_load(&rnode->rn_child[slot], LOCKED);
691 		if (vm_radix_isleaf(child)) {
692 			m = vm_radix_topage(child);
693 			if (m->pindex <= index)
694 				return (m);
695 		} else if (child != NULL)
696 			goto descend;
697 
698 		/*
699 		 * Look for an available edge or page within the current
700 		 * bisection node.
701 		 */
702 		if (slot > 0) {
703 			inc = VM_RADIX_UNITLEVEL(rnode->rn_clev);
704 			index |= inc - 1;
705 			do {
706 				index -= inc;
707 				slot--;
708 				child = vm_radix_node_load(&rnode->rn_child[slot],
709 				    LOCKED);
710 				if (vm_radix_isleaf(child)) {
711 					m = vm_radix_topage(child);
712 					if (m->pindex <= index)
713 						return (m);
714 				} else if (child != NULL)
715 					goto descend;
716 			} while (slot > 0);
717 		}
718 		KASSERT(child == NULL || vm_radix_isleaf(child),
719 		    ("vm_radix_lookup_le: child is radix node"));
720 
721 		/*
722 		 * If a page or edge smaller than the search slot is not found
723 		 * in the current node, ascend to the next higher-level node.
724 		 */
725 		goto ascend;
726 descend:
727 		KASSERT(rnode->rn_clev > 0,
728 		    ("vm_radix_lookup_le: pushing leaf's parent"));
729 		KASSERT(tos < VM_RADIX_LIMIT,
730 		    ("vm_radix_lookup_le: stack overflow"));
731 		stack[tos++] = rnode;
732 		rnode = child;
733 	}
734 }
735 
736 /*
737  * Remove the specified index from the trie, and return the value stored at
738  * that index.  If the index is not present, return NULL.
739  */
740 vm_page_t
741 vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index)
742 {
743 	struct vm_radix_node *rnode, *parent, *tmp;
744 	vm_page_t m;
745 	int i, slot;
746 
747 	rnode = vm_radix_root_load(rtree, LOCKED);
748 	if (vm_radix_isleaf(rnode)) {
749 		m = vm_radix_topage(rnode);
750 		if (m->pindex != index)
751 			return (NULL);
752 		vm_radix_root_store(rtree, NULL, LOCKED);
753 		return (m);
754 	}
755 	parent = NULL;
756 	for (;;) {
757 		if (rnode == NULL)
758 			return (NULL);
759 		slot = vm_radix_slot(index, rnode->rn_clev);
760 		tmp = vm_radix_node_load(&rnode->rn_child[slot], LOCKED);
761 		if (vm_radix_isleaf(tmp)) {
762 			m = vm_radix_topage(tmp);
763 			if (m->pindex != index)
764 				return (NULL);
765 			vm_radix_node_store(&rnode->rn_child[slot], NULL, LOCKED);
766 			rnode->rn_count--;
767 			if (rnode->rn_count > 1)
768 				return (m);
769 			for (i = 0; i < VM_RADIX_COUNT; i++) {
770 				tmp = vm_radix_node_load(&rnode->rn_child[i],
771 				    LOCKED);
772 				if (tmp != NULL)
773 					break;
774 			}
775 			KASSERT(tmp != NULL,
776 			    ("%s: invalid node configuration", __func__));
777 			if (parent == NULL)
778 				vm_radix_root_store(rtree, tmp, LOCKED);
779 			else {
780 				slot = vm_radix_slot(index, parent->rn_clev);
781 				KASSERT(vm_radix_node_load(
782 				    &parent->rn_child[slot], LOCKED) == rnode,
783 				    ("%s: invalid child value", __func__));
784 				vm_radix_node_store(&parent->rn_child[slot],
785 				    tmp, LOCKED);
786 			}
787 			/*
788 			 * The child is still valid and we can not zero the
789 			 * pointer until all smr references are gone.
790 			 */
791 			rnode->rn_count--;
792 			vm_radix_node_put(rnode, i);
793 			return (m);
794 		}
795 		parent = rnode;
796 		rnode = tmp;
797 	}
798 }
799 
800 /*
801  * Remove and free all the nodes from the radix tree.
802  * This function is recursive but there is a tight control on it as the
803  * maximum depth of the tree is fixed.
804  */
805 void
806 vm_radix_reclaim_allnodes(struct vm_radix *rtree)
807 {
808 	struct vm_radix_node *root;
809 
810 	root = vm_radix_root_load(rtree, LOCKED);
811 	if (root == NULL)
812 		return;
813 	vm_radix_root_store(rtree, NULL, UNSERIALIZED);
814 	if (!vm_radix_isleaf(root))
815 		vm_radix_reclaim_allnodes_int(root);
816 }
817 
818 /*
819  * Replace an existing page in the trie with another one.
820  * Panics if there is not an old page in the trie at the new page's index.
821  */
822 vm_page_t
823 vm_radix_replace(struct vm_radix *rtree, vm_page_t newpage)
824 {
825 	struct vm_radix_node *rnode, *tmp;
826 	vm_page_t m;
827 	vm_pindex_t index;
828 	int slot;
829 
830 	index = newpage->pindex;
831 	rnode = vm_radix_root_load(rtree, LOCKED);
832 	if (rnode == NULL)
833 		panic("%s: replacing page on an empty trie", __func__);
834 	if (vm_radix_isleaf(rnode)) {
835 		m = vm_radix_topage(rnode);
836 		if (m->pindex != index)
837 			panic("%s: original replacing root key not found",
838 			    __func__);
839 		rtree->rt_root = (uintptr_t)newpage | VM_RADIX_ISLEAF;
840 		return (m);
841 	}
842 	for (;;) {
843 		slot = vm_radix_slot(index, rnode->rn_clev);
844 		tmp = vm_radix_node_load(&rnode->rn_child[slot], LOCKED);
845 		if (vm_radix_isleaf(tmp)) {
846 			m = vm_radix_topage(tmp);
847 			if (m->pindex == index) {
848 				vm_radix_node_store(&rnode->rn_child[slot],
849 				    (struct vm_radix_node *)((uintptr_t)newpage |
850 				    VM_RADIX_ISLEAF), LOCKED);
851 				return (m);
852 			} else
853 				break;
854 		} else if (tmp == NULL || vm_radix_keybarr(tmp, index))
855 			break;
856 		rnode = tmp;
857 	}
858 	panic("%s: original replacing page not found", __func__);
859 }
860 
861 void
862 vm_radix_wait(void)
863 {
864 	uma_zwait(vm_radix_node_zone);
865 }
866 
867 #ifdef DDB
868 /*
869  * Show details about the given radix node.
870  */
871 DB_SHOW_COMMAND(radixnode, db_show_radixnode)
872 {
873 	struct vm_radix_node *rnode, *tmp;
874 	int i;
875 
876         if (!have_addr)
877                 return;
878 	rnode = (struct vm_radix_node *)addr;
879 	db_printf("radixnode %p, owner %jx, children count %u, level %u:\n",
880 	    (void *)rnode, (uintmax_t)rnode->rn_owner, rnode->rn_count,
881 	    rnode->rn_clev);
882 	for (i = 0; i < VM_RADIX_COUNT; i++) {
883 		tmp = vm_radix_node_load(&rnode->rn_child[i], UNSERIALIZED);
884 		if (tmp != NULL)
885 			db_printf("slot: %d, val: %p, page: %p, clev: %d\n",
886 			    i, (void *)tmp,
887 			    vm_radix_isleaf(tmp) ?  vm_radix_topage(tmp) : NULL,
888 			    rnode->rn_clev);
889 	}
890 }
891 #endif /* DDB */
892