xref: /freebsd/sys/vm/vm_radix.c (revision ddd5b8e9b4d8957fce018c520657cdfa4ecffad3)
1 /*
2  * Copyright (c) 2013 EMC Corp.
3  * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org>
4  * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  */
29 
30 /*
31  * Path-compressed radix trie implementation.
32  * The following code is not generalized into a general purpose library
33  * because there are way too many parameters embedded that should really
34  * be decided by the library consumers.  At the same time, consumers
35  * of this code must achieve highest possible performance.
36  *
37  * The implementation takes into account the following rationale:
38  * - Size of the nodes should be as small as possible but still big enough
39  *   to avoid a large maximum depth for the trie.  This is a balance
40  *   between the necessity to not wire too much physical memory for the nodes
41  *   and the necessity to avoid too much cache pollution during the trie
42  *   operations.
43  * - There is not a huge bias toward the number of lookup operations over
44  *   the number of insert and remove operations.  This basically implies
45  *   that optimizations supposedly helping one operation but hurting the
46  *   other might be carefully evaluated.
47  * - On average not many nodes are expected to be fully populated, hence
48  *   level compression may just complicate things.
49  */
50 
51 #include <sys/cdefs.h>
52 __FBSDID("$FreeBSD$");
53 
54 #include "opt_ddb.h"
55 
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/kernel.h>
59 #include <sys/vmmeter.h>
60 
61 #include <vm/uma.h>
62 #include <vm/vm.h>
63 #include <vm/vm_param.h>
64 #include <vm/vm_page.h>
65 #include <vm/vm_radix.h>
66 
67 #ifdef DDB
68 #include <ddb/ddb.h>
69 #endif
70 
71 /*
72  * These widths should allow the pointers to a node's children to fit within
73  * a single cache line.  The extra levels from a narrow width should not be
74  * a problem thanks to path compression.
75  */
76 #ifdef __LP64__
77 #define	VM_RADIX_WIDTH	4
78 #else
79 #define	VM_RADIX_WIDTH	3
80 #endif
81 
82 #define	VM_RADIX_COUNT	(1 << VM_RADIX_WIDTH)
83 #define	VM_RADIX_MASK	(VM_RADIX_COUNT - 1)
84 #define	VM_RADIX_LIMIT							\
85 	(howmany((sizeof(vm_pindex_t) * NBBY), VM_RADIX_WIDTH) - 1)
86 
87 /* Flag bits stored in node pointers. */
88 #define	VM_RADIX_ISLEAF	0x1
89 #define	VM_RADIX_FLAGS	0x1
90 #define	VM_RADIX_PAD	VM_RADIX_FLAGS
91 
92 /* Returns one unit associated with specified level. */
93 #define	VM_RADIX_UNITLEVEL(lev)						\
94 	((vm_pindex_t)1 << ((VM_RADIX_LIMIT - (lev)) * VM_RADIX_WIDTH))
95 
96 struct vm_radix_node {
97 	vm_pindex_t	 rn_owner;			/* Owner of record. */
98 	uint16_t	 rn_count;			/* Valid children. */
99 	uint16_t	 rn_clev;			/* Current level. */
100 	void		*rn_child[VM_RADIX_COUNT];	/* Child nodes. */
101 };
102 
103 static uma_zone_t vm_radix_node_zone;
104 
105 /*
106  * Allocate a radix node.  Pre-allocation should ensure that the request
107  * will always be satisfied.
108  */
109 static __inline struct vm_radix_node *
110 vm_radix_node_get(vm_pindex_t owner, uint16_t count, uint16_t clevel)
111 {
112 	struct vm_radix_node *rnode;
113 
114 	rnode = uma_zalloc(vm_radix_node_zone, M_NOWAIT);
115 
116 	/*
117 	 * The required number of nodes should already be pre-allocated
118 	 * by vm_radix_prealloc().  However, UMA can hold a few nodes
119 	 * in per-CPU buckets, which will not be accessible by the
120 	 * current CPU.  Thus, the allocation could return NULL when
121 	 * the pre-allocated pool is close to exhaustion.  Anyway,
122 	 * in practice this should never occur because a new node
123 	 * is not always required for insert.  Thus, the pre-allocated
124 	 * pool should have some extra pages that prevent this from
125 	 * becoming a problem.
126 	 */
127 	if (rnode == NULL)
128 		panic("%s: uma_zalloc() returned NULL for a new node",
129 		    __func__);
130 	rnode->rn_owner = owner;
131 	rnode->rn_count = count;
132 	rnode->rn_clev = clevel;
133 	return (rnode);
134 }
135 
136 /*
137  * Free radix node.
138  */
139 static __inline void
140 vm_radix_node_put(struct vm_radix_node *rnode)
141 {
142 
143 	uma_zfree(vm_radix_node_zone, rnode);
144 }
145 
146 /*
147  * Return the position in the array for a given level.
148  */
149 static __inline int
150 vm_radix_slot(vm_pindex_t index, uint16_t level)
151 {
152 
153 	return ((index >> ((VM_RADIX_LIMIT - level) * VM_RADIX_WIDTH)) &
154 	    VM_RADIX_MASK);
155 }
156 
157 /* Trims the key after the specified level. */
158 static __inline vm_pindex_t
159 vm_radix_trimkey(vm_pindex_t index, uint16_t level)
160 {
161 	vm_pindex_t ret;
162 
163 	ret = index;
164 	if (level < VM_RADIX_LIMIT) {
165 		ret >>= (VM_RADIX_LIMIT - level) * VM_RADIX_WIDTH;
166 		ret <<= (VM_RADIX_LIMIT - level) * VM_RADIX_WIDTH;
167 	}
168 	return (ret);
169 }
170 
171 /*
172  * Get the root node for a radix tree.
173  */
174 static __inline struct vm_radix_node *
175 vm_radix_getroot(struct vm_radix *rtree)
176 {
177 
178 	return ((struct vm_radix_node *)rtree->rt_root);
179 }
180 
181 /*
182  * Set the root node for a radix tree.
183  */
184 static __inline void
185 vm_radix_setroot(struct vm_radix *rtree, struct vm_radix_node *rnode)
186 {
187 
188 	rtree->rt_root = (uintptr_t)rnode;
189 }
190 
191 /*
192  * Returns TRUE if the specified radix node is a leaf and FALSE otherwise.
193  */
194 static __inline boolean_t
195 vm_radix_isleaf(struct vm_radix_node *rnode)
196 {
197 
198 	return (((uintptr_t)rnode & VM_RADIX_ISLEAF) != 0);
199 }
200 
201 /*
202  * Returns the associated page extracted from rnode.
203  */
204 static __inline vm_page_t
205 vm_radix_topage(struct vm_radix_node *rnode)
206 {
207 
208 	return ((vm_page_t)((uintptr_t)rnode & ~VM_RADIX_FLAGS));
209 }
210 
211 /*
212  * Adds the page as a child of the provided node.
213  */
214 static __inline void
215 vm_radix_addpage(struct vm_radix_node *rnode, vm_pindex_t index, uint16_t clev,
216     vm_page_t page)
217 {
218 	int slot;
219 
220 	slot = vm_radix_slot(index, clev);
221 	rnode->rn_child[slot] = (void *)((uintptr_t)page | VM_RADIX_ISLEAF);
222 }
223 
224 /*
225  * Returns the slot where two keys differ.
226  * It cannot accept 2 equal keys.
227  */
228 static __inline uint16_t
229 vm_radix_keydiff(vm_pindex_t index1, vm_pindex_t index2)
230 {
231 	uint16_t clev;
232 
233 	KASSERT(index1 != index2, ("%s: passing the same key value %jx",
234 	    __func__, (uintmax_t)index1));
235 
236 	index1 ^= index2;
237 	for (clev = 0; clev <= VM_RADIX_LIMIT ; clev++)
238 		if (vm_radix_slot(index1, clev))
239 			return (clev);
240 	panic("%s: cannot reach this point", __func__);
241 	return (0);
242 }
243 
244 /*
245  * Returns TRUE if it can be determined that key does not belong to the
246  * specified rnode.  Otherwise, returns FALSE.
247  */
248 static __inline boolean_t
249 vm_radix_keybarr(struct vm_radix_node *rnode, vm_pindex_t idx)
250 {
251 
252 	if (rnode->rn_clev > 0) {
253 		idx = vm_radix_trimkey(idx, rnode->rn_clev - 1);
254 		return (idx != rnode->rn_owner);
255 	}
256 	return (FALSE);
257 }
258 
259 /*
260  * Adjusts the idx key to the first upper level available, based on a valid
261  * initial level and map of available levels.
262  * Returns a value bigger than 0 to signal that there are not valid levels
263  * available.
264  */
265 static __inline int
266 vm_radix_addlev(vm_pindex_t *idx, boolean_t *levels, uint16_t ilev)
267 {
268 	vm_pindex_t wrapidx;
269 
270 	for (; levels[ilev] == FALSE ||
271 	    vm_radix_slot(*idx, ilev) == (VM_RADIX_COUNT - 1); ilev--)
272 		if (ilev == 0)
273 			break;
274 	KASSERT(ilev > 0 || levels[0],
275 	    ("%s: levels back-scanning problem", __func__));
276 	if (ilev == 0 && vm_radix_slot(*idx, ilev) == (VM_RADIX_COUNT - 1))
277 		return (1);
278 	wrapidx = *idx;
279 	*idx = vm_radix_trimkey(*idx, ilev);
280 	*idx += VM_RADIX_UNITLEVEL(ilev);
281 	return (*idx < wrapidx);
282 }
283 
284 /*
285  * Adjusts the idx key to the first lower level available, based on a valid
286  * initial level and map of available levels.
287  * Returns a value bigger than 0 to signal that there are not valid levels
288  * available.
289  */
290 static __inline int
291 vm_radix_declev(vm_pindex_t *idx, boolean_t *levels, uint16_t ilev)
292 {
293 	vm_pindex_t wrapidx;
294 
295 	for (; levels[ilev] == FALSE ||
296 	    vm_radix_slot(*idx, ilev) == 0; ilev--)
297 		if (ilev == 0)
298 			break;
299 	KASSERT(ilev > 0 || levels[0],
300 	    ("%s: levels back-scanning problem", __func__));
301 	if (ilev == 0 && vm_radix_slot(*idx, ilev) == 0)
302 		return (1);
303 	wrapidx = *idx;
304 	*idx = vm_radix_trimkey(*idx, ilev);
305 	*idx |= VM_RADIX_UNITLEVEL(ilev) - 1;
306 	*idx -= VM_RADIX_UNITLEVEL(ilev);
307 	return (*idx > wrapidx);
308 }
309 
310 /*
311  * Internal helper for vm_radix_reclaim_allnodes().
312  * This function is recursive.
313  */
314 static void
315 vm_radix_reclaim_allnodes_int(struct vm_radix_node *rnode)
316 {
317 	int slot;
318 
319 	KASSERT(rnode->rn_count <= VM_RADIX_COUNT,
320 	    ("vm_radix_reclaim_allnodes_int: bad count in rnode %p", rnode));
321 	for (slot = 0; rnode->rn_count != 0; slot++) {
322 		if (rnode->rn_child[slot] == NULL)
323 			continue;
324 		if (!vm_radix_isleaf(rnode->rn_child[slot]))
325 			vm_radix_reclaim_allnodes_int(rnode->rn_child[slot]);
326 		rnode->rn_child[slot] = NULL;
327 		rnode->rn_count--;
328 	}
329 	vm_radix_node_put(rnode);
330 }
331 
332 #ifdef INVARIANTS
333 /*
334  * Radix node zone destructor.
335  */
336 static void
337 vm_radix_node_zone_dtor(void *mem, int size __unused, void *arg __unused)
338 {
339 	struct vm_radix_node *rnode;
340 	int slot;
341 
342 	rnode = mem;
343 	KASSERT(rnode->rn_count == 0,
344 	    ("vm_radix_node_put: rnode %p has %d children", rnode,
345 	    rnode->rn_count));
346 	for (slot = 0; slot < VM_RADIX_COUNT; slot++)
347 		KASSERT(rnode->rn_child[slot] == NULL,
348 		    ("vm_radix_node_put: rnode %p has a child", rnode));
349 }
350 #endif
351 
352 /*
353  * Radix node zone initializer.
354  */
355 static int
356 vm_radix_node_zone_init(void *mem, int size __unused, int flags __unused)
357 {
358 	struct vm_radix_node *rnode;
359 
360 	rnode = mem;
361 	memset(rnode->rn_child, 0, sizeof(rnode->rn_child));
362 	return (0);
363 }
364 
365 /*
366  * Pre-allocate intermediate nodes from the UMA slab zone.
367  */
368 static void
369 vm_radix_prealloc(void *arg __unused)
370 {
371 
372 	if (!uma_zone_reserve_kva(vm_radix_node_zone, cnt.v_page_count))
373 		panic("%s: unable to create new zone", __func__);
374 	uma_prealloc(vm_radix_node_zone, cnt.v_page_count);
375 }
376 SYSINIT(vm_radix_prealloc, SI_SUB_KMEM, SI_ORDER_SECOND, vm_radix_prealloc,
377     NULL);
378 
379 /*
380  * Initialize the UMA slab zone.
381  * Until vm_radix_prealloc() is called, the zone will be served by the
382  * UMA boot-time pre-allocated pool of pages.
383  */
384 void
385 vm_radix_init(void)
386 {
387 
388 	vm_radix_node_zone = uma_zcreate("RADIX NODE",
389 	    sizeof(struct vm_radix_node), NULL,
390 #ifdef INVARIANTS
391 	    vm_radix_node_zone_dtor,
392 #else
393 	    NULL,
394 #endif
395 	    vm_radix_node_zone_init, NULL, VM_RADIX_PAD, UMA_ZONE_VM |
396 	    UMA_ZONE_NOFREE);
397 }
398 
399 /*
400  * Inserts the key-value pair into the trie.
401  * Panics if the key already exists.
402  */
403 void
404 vm_radix_insert(struct vm_radix *rtree, vm_page_t page)
405 {
406 	vm_pindex_t index, newind;
407 	struct vm_radix_node *parent, *rnode, *tmp;
408 	vm_page_t m;
409 	int slot;
410 	uint16_t clev;
411 
412 	index = page->pindex;
413 
414 	/*
415 	 * The owner of record for root is not really important because it
416 	 * will never be used.
417 	 */
418 	rnode = vm_radix_getroot(rtree);
419 	if (rnode == NULL) {
420 		rnode = vm_radix_node_get(0, 1, 0);
421 		vm_radix_setroot(rtree, rnode);
422 		vm_radix_addpage(rnode, index, 0, page);
423 		return;
424 	}
425 	do {
426 		slot = vm_radix_slot(index, rnode->rn_clev);
427 		if (vm_radix_isleaf(rnode->rn_child[slot])) {
428 			m = vm_radix_topage(rnode->rn_child[slot]);
429 			if (m->pindex == index)
430 				panic("%s: key %jx is already present",
431 				    __func__, (uintmax_t)index);
432 			clev = vm_radix_keydiff(m->pindex, index);
433 			tmp = vm_radix_node_get(vm_radix_trimkey(index,
434 			    clev - 1), 2, clev);
435 			rnode->rn_child[slot] = tmp;
436 			vm_radix_addpage(tmp, index, clev, page);
437 			vm_radix_addpage(tmp, m->pindex, clev, m);
438 			return;
439 		}
440 		if (rnode->rn_child[slot] == NULL) {
441 			rnode->rn_count++;
442 			vm_radix_addpage(rnode, index, rnode->rn_clev, page);
443 			return;
444 		}
445 		parent = rnode;
446 		rnode = rnode->rn_child[slot];
447 	} while (!vm_radix_keybarr(rnode, index));
448 
449 	/*
450 	 * A new node is needed because the right insertion level is reached.
451 	 * Setup the new intermediate node and add the 2 children: the
452 	 * new object and the older edge.
453 	 */
454 	newind = rnode->rn_owner;
455 	clev = vm_radix_keydiff(newind, index);
456 	tmp = vm_radix_node_get(vm_radix_trimkey(index, clev - 1), 2,
457 	    clev);
458 	parent->rn_child[slot] = tmp;
459 	vm_radix_addpage(tmp, index, clev, page);
460 	slot = vm_radix_slot(newind, clev);
461 	tmp->rn_child[slot] = rnode;
462 }
463 
464 /*
465  * Returns the value stored at the index.  If the index is not present,
466  * NULL is returned.
467  */
468 vm_page_t
469 vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index)
470 {
471 	struct vm_radix_node *rnode;
472 	vm_page_t m;
473 	int slot;
474 
475 	rnode = vm_radix_getroot(rtree);
476 	while (rnode != NULL) {
477 		if (vm_radix_keybarr(rnode, index))
478 			return (NULL);
479 		slot = vm_radix_slot(index, rnode->rn_clev);
480 		rnode = rnode->rn_child[slot];
481 		if (vm_radix_isleaf(rnode)) {
482 			m = vm_radix_topage(rnode);
483 			if (m->pindex == index)
484 				return (m);
485 			else
486 				return (NULL);
487 		}
488 	}
489 	return (NULL);
490 }
491 
492 /*
493  * Look up the nearest entry at a position bigger than or equal to index.
494  */
495 vm_page_t
496 vm_radix_lookup_ge(struct vm_radix *rtree, vm_pindex_t index)
497 {
498 	vm_pindex_t inc;
499 	vm_page_t m;
500 	struct vm_radix_node *child, *rnode;
501 	int slot;
502 	uint16_t difflev;
503 	boolean_t maplevels[VM_RADIX_LIMIT + 1];
504 #ifdef INVARIANTS
505 	int loops = 0;
506 #endif
507 
508 restart:
509 	KASSERT(++loops < 1000, ("%s: too many loops", __func__));
510 	for (difflev = 0; difflev < (VM_RADIX_LIMIT + 1); difflev++)
511 		maplevels[difflev] = FALSE;
512 	rnode = vm_radix_getroot(rtree);
513 	while (rnode != NULL) {
514 		maplevels[rnode->rn_clev] = TRUE;
515 
516 		/*
517 		 * If the keys differ before the current bisection node
518 		 * the search key might rollback to the earliest
519 		 * available bisection node, or to the smaller value
520 		 * in the current domain (if the owner is bigger than the
521 		 * search key).
522 		 * The maplevels array records any node has been seen
523 		 * at a given level.  This aids the search for a valid
524 		 * bisection node.
525 		 */
526 		if (vm_radix_keybarr(rnode, index)) {
527 			difflev = vm_radix_keydiff(index, rnode->rn_owner);
528 			if (index > rnode->rn_owner) {
529 				if (vm_radix_addlev(&index, maplevels,
530 				    difflev) > 0)
531 					break;
532 			} else
533 				index = vm_radix_trimkey(rnode->rn_owner,
534 				    difflev);
535 			goto restart;
536 		}
537 		slot = vm_radix_slot(index, rnode->rn_clev);
538 		child = rnode->rn_child[slot];
539 		if (vm_radix_isleaf(child)) {
540 			m = vm_radix_topage(child);
541 			if (m->pindex >= index)
542 				return (m);
543 		} else if (child != NULL)
544 			goto descend;
545 
546 		/*
547 		 * Look for an available edge or page within the current
548 		 * bisection node.
549 		 */
550                 if (slot < (VM_RADIX_COUNT - 1)) {
551 			inc = VM_RADIX_UNITLEVEL(rnode->rn_clev);
552 			index = vm_radix_trimkey(index, rnode->rn_clev);
553 			do {
554 				index += inc;
555 				slot++;
556 				child = rnode->rn_child[slot];
557 				if (vm_radix_isleaf(child)) {
558 					m = vm_radix_topage(child);
559 					if (m->pindex >= index)
560 						return (m);
561 				} else if (child != NULL)
562 					goto descend;
563 			} while (slot < (VM_RADIX_COUNT - 1));
564 		}
565 		KASSERT(child == NULL || vm_radix_isleaf(child),
566 		    ("vm_radix_lookup_ge: child is radix node"));
567 
568 		/*
569 		 * If a valid page or edge bigger than the search slot is
570 		 * found in the traversal, skip to the next higher-level key.
571 		 */
572 		if (rnode->rn_clev == 0 || vm_radix_addlev(&index, maplevels,
573 		    rnode->rn_clev - 1) > 0)
574 			break;
575 		goto restart;
576 descend:
577 		rnode = child;
578 	}
579 	return (NULL);
580 }
581 
582 /*
583  * Look up the nearest entry at a position less than or equal to index.
584  */
585 vm_page_t
586 vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index)
587 {
588 	vm_pindex_t inc;
589 	vm_page_t m;
590 	struct vm_radix_node *child, *rnode;
591 	int slot;
592 	uint16_t difflev;
593 	boolean_t maplevels[VM_RADIX_LIMIT + 1];
594 #ifdef INVARIANTS
595 	int loops = 0;
596 #endif
597 
598 restart:
599 	KASSERT(++loops < 1000, ("%s: too many loops", __func__));
600 	for (difflev = 0; difflev < (VM_RADIX_LIMIT + 1); difflev++)
601 		maplevels[difflev] = FALSE;
602 	rnode = vm_radix_getroot(rtree);
603 	while (rnode != NULL) {
604 		maplevels[rnode->rn_clev] = TRUE;
605 
606 		/*
607 		 * If the keys differ before the current bisection node
608 		 * the search key might rollback to the earliest
609 		 * available bisection node, or to the higher value
610 		 * in the current domain (if the owner is smaller than the
611 		 * search key).
612 		 * The maplevels array records any node has been seen
613 		 * at a given level.  This aids the search for a valid
614 		 * bisection node.
615 		 */
616 		if (vm_radix_keybarr(rnode, index)) {
617 			difflev = vm_radix_keydiff(index, rnode->rn_owner);
618 			if (index > rnode->rn_owner) {
619 				index = vm_radix_trimkey(rnode->rn_owner,
620 				    difflev);
621 				index |= VM_RADIX_UNITLEVEL(difflev) - 1;
622 			} else if (vm_radix_declev(&index, maplevels,
623 			    difflev) > 0)
624 				break;
625 			goto restart;
626 		}
627 		slot = vm_radix_slot(index, rnode->rn_clev);
628 		child = rnode->rn_child[slot];
629 		if (vm_radix_isleaf(child)) {
630 			m = vm_radix_topage(child);
631 			if (m->pindex <= index)
632 				return (m);
633 		} else if (child != NULL)
634 			goto descend;
635 
636 		/*
637 		 * Look for an available edge or page within the current
638 		 * bisection node.
639 		 */
640 		if (slot > 0) {
641 			inc = VM_RADIX_UNITLEVEL(rnode->rn_clev);
642 			index = vm_radix_trimkey(index, rnode->rn_clev);
643 			index |= inc - 1;
644 			do {
645 				index -= inc;
646 				slot--;
647 				child = rnode->rn_child[slot];
648 				if (vm_radix_isleaf(child)) {
649 					m = vm_radix_topage(child);
650 					if (m->pindex <= index)
651 						return (m);
652 				} else if (child != NULL)
653 					goto descend;
654 			} while (slot > 0);
655 		}
656 		KASSERT(child == NULL || vm_radix_isleaf(child),
657 		    ("vm_radix_lookup_le: child is radix node"));
658 
659 		/*
660 		 * If a valid page or edge smaller than the search slot is
661 		 * found in the traversal, skip to the next higher-level key.
662 		 */
663 		if (rnode->rn_clev == 0 || vm_radix_declev(&index, maplevels,
664 		    rnode->rn_clev - 1) > 0)
665 			break;
666 		goto restart;
667 descend:
668 		rnode = child;
669 	}
670 	return (NULL);
671 }
672 
673 /*
674  * Remove the specified index from the tree.
675  * Panics if the key is not present.
676  */
677 void
678 vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index)
679 {
680 	struct vm_radix_node *rnode, *parent;
681 	vm_page_t m;
682 	int i, slot;
683 
684 	parent = NULL;
685 	rnode = vm_radix_getroot(rtree);
686 	for (;;) {
687 		if (rnode == NULL)
688 			panic("vm_radix_remove: impossible to locate the key");
689 		slot = vm_radix_slot(index, rnode->rn_clev);
690 		if (vm_radix_isleaf(rnode->rn_child[slot])) {
691 			m = vm_radix_topage(rnode->rn_child[slot]);
692 			if (m->pindex != index)
693 				panic("%s: invalid key found", __func__);
694 			rnode->rn_child[slot] = NULL;
695 			rnode->rn_count--;
696 			if (rnode->rn_count > 1)
697 				break;
698 			if (parent == NULL) {
699 				if (rnode->rn_count == 0) {
700 					vm_radix_node_put(rnode);
701 					vm_radix_setroot(rtree, NULL);
702 				}
703 				break;
704 			}
705 			for (i = 0; i < VM_RADIX_COUNT; i++)
706 				if (rnode->rn_child[i] != NULL)
707 					break;
708 			KASSERT(i != VM_RADIX_COUNT,
709 			    ("%s: invalid node configuration", __func__));
710 			slot = vm_radix_slot(index, parent->rn_clev);
711 			KASSERT(parent->rn_child[slot] == rnode,
712 			    ("%s: invalid child value", __func__));
713 			parent->rn_child[slot] = rnode->rn_child[i];
714 			rnode->rn_count--;
715 			rnode->rn_child[i] = NULL;
716 			vm_radix_node_put(rnode);
717 			break;
718 		}
719 		parent = rnode;
720 		rnode = rnode->rn_child[slot];
721 	}
722 }
723 
724 /*
725  * Remove and free all the nodes from the radix tree.
726  * This function is recursive but there is a tight control on it as the
727  * maximum depth of the tree is fixed.
728  */
729 void
730 vm_radix_reclaim_allnodes(struct vm_radix *rtree)
731 {
732 	struct vm_radix_node *root;
733 
734 	root = vm_radix_getroot(rtree);
735 	if (root == NULL)
736 		return;
737 	vm_radix_setroot(rtree, NULL);
738 	vm_radix_reclaim_allnodes_int(root);
739 }
740 
741 #ifdef DDB
742 /*
743  * Show details about the given radix node.
744  */
745 DB_SHOW_COMMAND(radixnode, db_show_radixnode)
746 {
747 	struct vm_radix_node *rnode;
748 	int i;
749 
750         if (!have_addr)
751                 return;
752 	rnode = (struct vm_radix_node *)addr;
753 	db_printf("radixnode %p, owner %jx, children count %u, level %u:\n",
754 	    (void *)rnode, (uintmax_t)rnode->rn_owner, rnode->rn_count,
755 	    rnode->rn_clev);
756 	for (i = 0; i < VM_RADIX_COUNT; i++)
757 		if (rnode->rn_child[i] != NULL)
758 			db_printf("slot: %d, val: %p, page: %p, clev: %d\n",
759 			    i, (void *)rnode->rn_child[i],
760 			    vm_radix_isleaf(rnode->rn_child[i]) ?
761 			    vm_radix_topage(rnode->rn_child[i]) : NULL,
762 			    rnode->rn_clev);
763 }
764 #endif /* DDB */
765