xref: /freebsd/sys/kern/subr_pctrie.c (revision 2e3507c25e42292b45a5482e116d278f5515d04d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2013 EMC Corp.
5  * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org>
6  * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  */
31 
32 /*
33  * Path-compressed radix trie implementation.
34  *
35  * The implementation takes into account the following rationale:
36  * - Size of the nodes should be as small as possible but still big enough
37  *   to avoid a large maximum depth for the trie.  This is a balance
38  *   between the necessity to not wire too much physical memory for the nodes
39  *   and the necessity to avoid too much cache pollution during the trie
40  *   operations.
41  * - There is not a huge bias toward the number of lookup operations over
42  *   the number of insert and remove operations.  This basically implies
43  *   that optimizations supposedly helping one operation but hurting the
44  *   other might be carefully evaluated.
45  * - On average not many nodes are expected to be fully populated, hence
46  *   level compression may just complicate things.
47  */
48 
49 #include <sys/cdefs.h>
50 #include "opt_ddb.h"
51 
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/kernel.h>
55 #include <sys/libkern.h>
56 #include <sys/pctrie.h>
57 #include <sys/proc.h>	/* smr.h depends on struct thread. */
58 #include <sys/smr.h>
59 #include <sys/smr_types.h>
60 
61 #ifdef DDB
62 #include <ddb/ddb.h>
63 #endif
64 
65 #define	PCTRIE_MASK	(PCTRIE_COUNT - 1)
66 #define	PCTRIE_LIMIT	(howmany(sizeof(uint64_t) * NBBY, PCTRIE_WIDTH) - 1)
67 
68 #if PCTRIE_WIDTH == 3
69 typedef uint8_t pn_popmap_t;
70 #elif PCTRIE_WIDTH == 4
71 typedef uint16_t pn_popmap_t;
72 #elif PCTRIE_WIDTH == 5
73 typedef uint32_t pn_popmap_t;
74 #else
75 #error Unsupported width
76 #endif
77 _Static_assert(sizeof(pn_popmap_t) <= sizeof(int),
78     "pn_popmap_t too wide");
79 
80 struct pctrie_node;
81 typedef SMR_POINTER(struct pctrie_node *) smr_pctnode_t;
82 
83 struct pctrie_node {
84 	uint64_t	pn_owner;			/* Owner of record. */
85 	pn_popmap_t	pn_popmap;			/* Valid children. */
86 	uint8_t		pn_clev;			/* Level * WIDTH. */
87 	smr_pctnode_t	pn_child[PCTRIE_COUNT];		/* Child nodes. */
88 };
89 
90 enum pctrie_access { PCTRIE_SMR, PCTRIE_LOCKED, PCTRIE_UNSERIALIZED };
91 
92 static __inline void pctrie_node_store(smr_pctnode_t *p, void *val,
93     enum pctrie_access access);
94 
95 /*
96  * Map index to an array position for the children of node,
97  */
98 static __inline int
99 pctrie_slot(struct pctrie_node *node, uint64_t index)
100 {
101 	return ((index >> node->pn_clev) & PCTRIE_MASK);
102 }
103 
104 /*
105  * Returns true if index does not belong to the specified node.  Otherwise,
106  * sets slot value, and returns false.
107  */
108 static __inline bool
109 pctrie_keybarr(struct pctrie_node *node, uint64_t index, int *slot)
110 {
111 	index = (index - node->pn_owner) >> node->pn_clev;
112 	if (index >= PCTRIE_COUNT)
113 		return (true);
114 	*slot = index;
115 	return (false);
116 }
117 
118 /*
119  * Check radix node.
120  */
121 static __inline void
122 pctrie_node_put(struct pctrie_node *node)
123 {
124 #ifdef INVARIANTS
125 	int slot;
126 
127 	KASSERT(powerof2(node->pn_popmap),
128 	    ("pctrie_node_put: node %p has too many children %04x", node,
129 	    node->pn_popmap));
130 	for (slot = 0; slot < PCTRIE_COUNT; slot++) {
131 		if ((node->pn_popmap & (1 << slot)) != 0)
132 			continue;
133 		KASSERT(smr_unserialized_load(&node->pn_child[slot], true) ==
134 		    PCTRIE_NULL,
135 		    ("pctrie_node_put: node %p has a child", node));
136 	}
137 #endif
138 }
139 
140 /*
141  * Fetch a node pointer from a slot.
142  */
143 static __inline struct pctrie_node *
144 pctrie_node_load(smr_pctnode_t *p, smr_t smr, enum pctrie_access access)
145 {
146 	switch (access) {
147 	case PCTRIE_UNSERIALIZED:
148 		return (smr_unserialized_load(p, true));
149 	case PCTRIE_LOCKED:
150 		return (smr_serialized_load(p, true));
151 	case PCTRIE_SMR:
152 		return (smr_entered_load(p, smr));
153 	}
154 	__assert_unreachable();
155 }
156 
157 static __inline void
158 pctrie_node_store(smr_pctnode_t *p, void *v, enum pctrie_access access)
159 {
160 	switch (access) {
161 	case PCTRIE_UNSERIALIZED:
162 		smr_unserialized_store(p, v, true);
163 		break;
164 	case PCTRIE_LOCKED:
165 		smr_serialized_store(p, v, true);
166 		break;
167 	case PCTRIE_SMR:
168 		panic("%s: Not supported in SMR section.", __func__);
169 		break;
170 	default:
171 		__assert_unreachable();
172 		break;
173 	}
174 }
175 
176 /*
177  * Get the root node for a tree.
178  */
179 static __inline struct pctrie_node *
180 pctrie_root_load(struct pctrie *ptree, smr_t smr, enum pctrie_access access)
181 {
182 	return (pctrie_node_load((smr_pctnode_t *)&ptree->pt_root, smr, access));
183 }
184 
185 /*
186  * Set the root node for a tree.
187  */
188 static __inline void
189 pctrie_root_store(struct pctrie *ptree, struct pctrie_node *node,
190     enum pctrie_access access)
191 {
192 	pctrie_node_store((smr_pctnode_t *)&ptree->pt_root, node, access);
193 }
194 
195 /*
196  * Returns TRUE if the specified node is a leaf and FALSE otherwise.
197  */
198 static __inline bool
199 pctrie_isleaf(struct pctrie_node *node)
200 {
201 
202 	return (((uintptr_t)node & PCTRIE_ISLEAF) != 0);
203 }
204 
205 /*
206  * Returns val with leaf bit set.
207  */
208 static __inline void *
209 pctrie_toleaf(uint64_t *val)
210 {
211 	return ((void *)((uintptr_t)val | PCTRIE_ISLEAF));
212 }
213 
214 /*
215  * Returns the associated val extracted from node.
216  */
217 static __inline uint64_t *
218 pctrie_toval(struct pctrie_node *node)
219 {
220 
221 	return ((uint64_t *)((uintptr_t)node & ~PCTRIE_FLAGS));
222 }
223 
224 /*
225  * Make 'child' a child of 'node'.
226  */
227 static __inline void
228 pctrie_addnode(struct pctrie_node *node, uint64_t index,
229     struct pctrie_node *child, enum pctrie_access access)
230 {
231 	int slot;
232 
233 	slot = pctrie_slot(node, index);
234 	pctrie_node_store(&node->pn_child[slot], child, access);
235 	node->pn_popmap ^= 1 << slot;
236 	KASSERT((node->pn_popmap & (1 << slot)) != 0,
237 	    ("%s: bad popmap slot %d in node %p", __func__, slot, node));
238 }
239 
240 /*
241  * pctrie node zone initializer.
242  */
243 int
244 pctrie_zone_init(void *mem, int size __unused, int flags __unused)
245 {
246 	struct pctrie_node *node;
247 
248 	node = mem;
249 	node->pn_popmap = 0;
250 	for (int i = 0; i < nitems(node->pn_child); i++)
251 		pctrie_node_store(&node->pn_child[i], PCTRIE_NULL,
252 		    PCTRIE_UNSERIALIZED);
253 	return (0);
254 }
255 
256 size_t
257 pctrie_node_size(void)
258 {
259 
260 	return (sizeof(struct pctrie_node));
261 }
262 
263 /*
264  * Looks for where to insert the key-value pair into the trie.  Completes the
265  * insertion if it replaces a null leaf; otherwise, returns insertion location
266  * to caller.  Panics if the key already exists.
267  */
268 void *
269 pctrie_insert_lookup(struct pctrie *ptree, uint64_t *val)
270 {
271 	uint64_t index;
272 	struct pctrie_node *node, *parent;
273 	int slot;
274 
275 	index = *val;
276 
277 	/*
278 	 * The owner of record for root is not really important because it
279 	 * will never be used.
280 	 */
281 	node = pctrie_root_load(ptree, NULL, PCTRIE_LOCKED);
282 	parent = NULL;
283 	for (;;) {
284 		if (pctrie_isleaf(node)) {
285 			if (node == PCTRIE_NULL) {
286 				if (parent == NULL)
287 					ptree->pt_root = pctrie_toleaf(val);
288 				else
289 					pctrie_addnode(parent, index,
290 					    pctrie_toleaf(val), PCTRIE_LOCKED);
291 				return (NULL);
292 			}
293 			if (*pctrie_toval(node) == index)
294 				panic("%s: key %jx is already present",
295 				    __func__, (uintmax_t)index);
296 			break;
297 		}
298 		if (pctrie_keybarr(node, index, &slot))
299 			break;
300 		parent = node;
301 		node = pctrie_node_load(&node->pn_child[slot], NULL,
302 		    PCTRIE_LOCKED);
303 	}
304 
305 	/*
306 	 * 'node' must be replaced in the tree with a new branch node, with
307 	 * children 'node' and 'val'. Return the place that points to 'node'
308 	 * now, and will point to to the new branching node later.
309 	 */
310 	return ((parent != NULL) ? &parent->pn_child[slot]:
311 	    (smr_pctnode_t *)&ptree->pt_root);
312 }
313 
314 /*
315  * Uses new node to insert key-value pair into the trie at given location.
316  */
317 void
318 pctrie_insert_node(void *parentp, struct pctrie_node *parent, uint64_t *val)
319 {
320 	struct pctrie_node *node;
321 	uint64_t index, newind;
322 
323 	/*
324 	 * Clear the last child pointer of the newly allocated parent.  We want
325 	 * to clear it after the final section has exited so lookup can not
326 	 * return false negatives.  It is done here because it will be
327 	 * cache-cold in the dtor callback.
328 	 */
329 	if (parent->pn_popmap != 0) {
330 		pctrie_node_store(&parent->pn_child[ffs(parent->pn_popmap) - 1],
331 		    PCTRIE_NULL, PCTRIE_UNSERIALIZED);
332 		parent->pn_popmap = 0;
333 	}
334 
335 	/*
336 	 * Recover the values of the two children of the new parent node.  If
337 	 * 'node' is not a leaf, this stores into 'newind' the 'owner' field,
338 	 * which must be first in the node.
339 	 */
340 	index = *val;
341 	node = pctrie_node_load(parentp, NULL, PCTRIE_UNSERIALIZED);
342 	newind = *pctrie_toval(node);
343 
344 	/*
345 	 * From the highest-order bit where the indexes differ,
346 	 * compute the highest level in the trie where they differ.  Then,
347 	 * compute the least index of this subtrie.
348 	 */
349 	_Static_assert(sizeof(long long) >= sizeof(uint64_t),
350 	    "uint64 too wide");
351 	_Static_assert(sizeof(uint64_t) * NBBY <=
352 	    (1 << (sizeof(parent->pn_clev) * NBBY)), "pn_clev too narrow");
353 	parent->pn_clev = rounddown(flsll(index ^ newind) - 1, PCTRIE_WIDTH);
354 	parent->pn_owner = PCTRIE_COUNT;
355 	parent->pn_owner = index & -(parent->pn_owner << parent->pn_clev);
356 
357 
358 	/* These writes are not yet visible due to ordering. */
359 	pctrie_addnode(parent, index, pctrie_toleaf(val), PCTRIE_UNSERIALIZED);
360 	pctrie_addnode(parent, newind, node, PCTRIE_UNSERIALIZED);
361 	/* Synchronize to make the above visible. */
362 	pctrie_node_store(parentp, parent, PCTRIE_LOCKED);
363 }
364 
365 /*
366  * Returns the value stored at the index.  If the index is not present,
367  * NULL is returned.
368  */
369 static __always_inline uint64_t *
370 _pctrie_lookup(struct pctrie *ptree, uint64_t index, smr_t smr,
371     enum pctrie_access access)
372 {
373 	struct pctrie_node *node;
374 	uint64_t *m;
375 	int slot;
376 
377 	node = pctrie_root_load(ptree, smr, access);
378 	for (;;) {
379 		if (pctrie_isleaf(node)) {
380 			if ((m = pctrie_toval(node)) != NULL && *m == index)
381 				return (m);
382 			break;
383 		}
384 		if (pctrie_keybarr(node, index, &slot))
385 			break;
386 		node = pctrie_node_load(&node->pn_child[slot], smr, access);
387 	}
388 	return (NULL);
389 }
390 
391 /*
392  * Returns the value stored at the index, assuming access is externally
393  * synchronized by a lock.
394  *
395  * If the index is not present, NULL is returned.
396  */
397 uint64_t *
398 pctrie_lookup(struct pctrie *ptree, uint64_t index)
399 {
400 	return (_pctrie_lookup(ptree, index, NULL, PCTRIE_LOCKED));
401 }
402 
403 /*
404  * Returns the value stored at the index without requiring an external lock.
405  *
406  * If the index is not present, NULL is returned.
407  */
408 uint64_t *
409 pctrie_lookup_unlocked(struct pctrie *ptree, uint64_t index, smr_t smr)
410 {
411 	uint64_t *res;
412 
413 	smr_enter(smr);
414 	res = _pctrie_lookup(ptree, index, smr, PCTRIE_SMR);
415 	smr_exit(smr);
416 	return (res);
417 }
418 
419 /*
420  * Returns the value with the least index that is greater than or equal to the
421  * specified index, or NULL if there are no such values.
422  *
423  * Requires that access be externally synchronized by a lock.
424  */
425 uint64_t *
426 pctrie_lookup_ge(struct pctrie *ptree, uint64_t index)
427 {
428 	struct pctrie_node *node, *succ;
429 	uint64_t *m;
430 	int slot;
431 
432 	/*
433 	 * Descend the trie as if performing an ordinary lookup for the
434 	 * specified value.  However, unlike an ordinary lookup, as we descend
435 	 * the trie, we use "succ" to remember the last branching-off point,
436 	 * that is, the interior node under which the least value that is both
437 	 * outside our current path down the trie and greater than the specified
438 	 * index resides.  (The node's popmap makes it fast and easy to
439 	 * recognize a branching-off point.)  If our ordinary lookup fails to
440 	 * yield a value that is greater than or equal to the specified index,
441 	 * then we will exit this loop and perform a lookup starting from
442 	 * "succ".  If "succ" is not NULL, then that lookup is guaranteed to
443 	 * succeed.
444 	 */
445 	node = pctrie_root_load(ptree, NULL, PCTRIE_LOCKED);
446 	succ = NULL;
447 	for (;;) {
448 		if (pctrie_isleaf(node)) {
449 			if ((m = pctrie_toval(node)) != NULL && *m >= index)
450 				return (m);
451 			break;
452 		}
453 		if (pctrie_keybarr(node, index, &slot)) {
454 			/*
455 			 * If all values in this subtree are > index, then the
456 			 * least value in this subtree is the answer.
457 			 */
458 			if (node->pn_owner > index)
459 				succ = node;
460 			break;
461 		}
462 
463 		/*
464 		 * Just in case the next search step leads to a subtree of all
465 		 * values < index, check popmap to see if a next bigger step, to
466 		 * a subtree of all pages with values > index, is available.  If
467 		 * so, remember to restart the search here.
468 		 */
469 		if ((node->pn_popmap >> slot) > 1)
470 			succ = node;
471 		node = pctrie_node_load(&node->pn_child[slot], NULL,
472 		    PCTRIE_LOCKED);
473 	}
474 
475 	/*
476 	 * Restart the search from the last place visited in the subtree that
477 	 * included some values > index, if there was such a place.
478 	 */
479 	if (succ == NULL)
480 		return (NULL);
481 	if (succ != node) {
482 		/*
483 		 * Take a step to the next bigger sibling of the node chosen
484 		 * last time.  In that subtree, all values > index.
485 		 */
486 		slot = pctrie_slot(succ, index) + 1;
487 		KASSERT((succ->pn_popmap >> slot) != 0,
488 		    ("%s: no popmap siblings past slot %d in node %p",
489 		    __func__, slot, succ));
490 		slot += ffs(succ->pn_popmap >> slot) - 1;
491 		succ = pctrie_node_load(&succ->pn_child[slot], NULL,
492 		    PCTRIE_LOCKED);
493 	}
494 
495 	/*
496 	 * Find the value in the subtree rooted at "succ" with the least index.
497 	 */
498 	while (!pctrie_isleaf(succ)) {
499 		KASSERT(succ->pn_popmap != 0,
500 		    ("%s: no popmap children in node %p",  __func__, succ));
501 		slot = ffs(succ->pn_popmap) - 1;
502 		succ = pctrie_node_load(&succ->pn_child[slot], NULL,
503 		    PCTRIE_LOCKED);
504 	}
505 	return (pctrie_toval(succ));
506 }
507 
508 /*
509  * Returns the value with the greatest index that is less than or equal to the
510  * specified index, or NULL if there are no such values.
511  *
512  * Requires that access be externally synchronized by a lock.
513  */
514 uint64_t *
515 pctrie_lookup_le(struct pctrie *ptree, uint64_t index)
516 {
517 	struct pctrie_node *node, *pred;
518 	uint64_t *m;
519 	int slot;
520 
521 	/*
522 	 * Mirror the implementation of pctrie_lookup_ge, described above.
523 	 */
524 	node = pctrie_root_load(ptree, NULL, PCTRIE_LOCKED);
525 	pred = NULL;
526 	for (;;) {
527 		if (pctrie_isleaf(node)) {
528 			if ((m = pctrie_toval(node)) != NULL && *m <= index)
529 				return (m);
530 			break;
531 		}
532 		if (pctrie_keybarr(node, index, &slot)) {
533 			if (node->pn_owner < index)
534 				pred = node;
535 			break;
536 		}
537 		if ((node->pn_popmap & ((1 << slot) - 1)) != 0)
538 			pred = node;
539 		node = pctrie_node_load(&node->pn_child[slot], NULL,
540 		    PCTRIE_LOCKED);
541 	}
542 	if (pred == NULL)
543 		return (NULL);
544 	if (pred != node) {
545 		slot = pctrie_slot(pred, index);
546 		KASSERT((pred->pn_popmap & ((1 << slot) - 1)) != 0,
547 		    ("%s: no popmap siblings before slot %d in node %p",
548 		    __func__, slot, pred));
549 		slot = fls(pred->pn_popmap & ((1 << slot) - 1)) - 1;
550 		pred = pctrie_node_load(&pred->pn_child[slot], NULL,
551 		    PCTRIE_LOCKED);
552 	}
553 	while (!pctrie_isleaf(pred)) {
554 		KASSERT(pred->pn_popmap != 0,
555 		    ("%s: no popmap children in node %p",  __func__, pred));
556 		slot = fls(pred->pn_popmap) - 1;
557 		pred = pctrie_node_load(&pred->pn_child[slot], NULL,
558 		    PCTRIE_LOCKED);
559 	}
560 	return (pctrie_toval(pred));
561 }
562 
563 /*
564  * Remove the specified index from the tree, and return the value stored at
565  * that index.  If the index is not present, return NULL.
566  */
567 uint64_t *
568 pctrie_remove_lookup(struct pctrie *ptree, uint64_t index,
569     struct pctrie_node **freenode)
570 {
571 	struct pctrie_node *child, *node, *parent;
572 	uint64_t *m;
573 	int slot;
574 
575 	*freenode = node = NULL;
576 	child = pctrie_root_load(ptree, NULL, PCTRIE_LOCKED);
577 	for (;;) {
578 		if (pctrie_isleaf(child))
579 			break;
580 		parent = node;
581 		node = child;
582 		slot = pctrie_slot(node, index);
583 		child = pctrie_node_load(&node->pn_child[slot], NULL,
584 		    PCTRIE_LOCKED);
585 	}
586 	if ((m = pctrie_toval(child)) == NULL || *m != index)
587 		return (NULL);
588 	if (node == NULL) {
589 		pctrie_root_store(ptree, PCTRIE_NULL, PCTRIE_LOCKED);
590 		return (m);
591 	}
592 	KASSERT((node->pn_popmap & (1 << slot)) != 0,
593 	    ("%s: bad popmap slot %d in node %p",
594 	    __func__, slot, node));
595 	node->pn_popmap ^= 1 << slot;
596 	pctrie_node_store(&node->pn_child[slot], PCTRIE_NULL, PCTRIE_LOCKED);
597 	if (!powerof2(node->pn_popmap))
598 		return (m);
599 	KASSERT(node->pn_popmap != 0, ("%s: bad popmap all zeroes", __func__));
600 	slot = ffs(node->pn_popmap) - 1;
601 	child = pctrie_node_load(&node->pn_child[slot], NULL, PCTRIE_LOCKED);
602 	KASSERT(child != PCTRIE_NULL,
603 	    ("%s: bad popmap slot %d in node %p", __func__, slot, node));
604 	if (parent == NULL)
605 		pctrie_root_store(ptree, child, PCTRIE_LOCKED);
606 	else {
607 		slot = pctrie_slot(parent, index);
608 		KASSERT(node ==
609 		    pctrie_node_load(&parent->pn_child[slot], NULL,
610 		    PCTRIE_LOCKED), ("%s: invalid child value", __func__));
611 		pctrie_node_store(&parent->pn_child[slot], child,
612 		    PCTRIE_LOCKED);
613 	}
614 	/*
615 	 * The child is still valid and we can not zero the
616 	 * pointer until all SMR references are gone.
617 	 */
618 	pctrie_node_put(node);
619 	*freenode = node;
620 	return (m);
621 }
622 
623 /*
624  * Prune all the leaves of 'node' before its first non-leaf child, make child
625  * zero of 'node' point up to 'parent', make 'node' into 'parent' and that
626  * non-leaf child into 'node'.  Repeat until a node has been stripped of all
627  * children, and mark it for freeing, returning its parent.
628  */
629 static struct pctrie_node *
630 pctrie_reclaim_prune(struct pctrie_node **pnode,
631     struct pctrie_node *parent)
632 {
633 	struct pctrie_node *child, *node;
634 	int slot;
635 
636 	node = *pnode;
637 	while (node->pn_popmap != 0) {
638 		slot = ffs(node->pn_popmap) - 1;
639 		node->pn_popmap ^= 1 << slot;
640 		child = pctrie_node_load(&node->pn_child[slot], NULL,
641 		    PCTRIE_UNSERIALIZED);
642 		pctrie_node_store(&node->pn_child[slot], PCTRIE_NULL,
643 		    PCTRIE_UNSERIALIZED);
644 		if (pctrie_isleaf(child))
645 			continue;
646 		/* Climb one level down the trie. */
647 		pctrie_node_store(&node->pn_child[0], parent,
648 		    PCTRIE_UNSERIALIZED);
649 		parent = node;
650 		node = child;
651 	}
652 	*pnode = parent;
653 	return (node);
654 }
655 
656 /*
657  * Recover the node parent from its first child and continue pruning.
658  */
659 struct pctrie_node *
660 pctrie_reclaim_resume(struct pctrie_node **pnode)
661 {
662 	struct pctrie_node *parent, *node;
663 
664 	node = *pnode;
665 	if (node == NULL)
666 		return (NULL);
667 	/* Climb one level up the trie. */
668 	parent = pctrie_node_load(&node->pn_child[0], NULL,
669 	    PCTRIE_UNSERIALIZED);
670 	pctrie_node_store(&node->pn_child[0], PCTRIE_NULL, PCTRIE_UNSERIALIZED);
671 	return (pctrie_reclaim_prune(pnode, parent));
672 }
673 
674 /*
675  * Find the trie root, and start pruning with a NULL parent.
676  */
677 struct pctrie_node *
678 pctrie_reclaim_begin(struct pctrie_node **pnode,
679     struct pctrie *ptree)
680 {
681 	struct pctrie_node *node;
682 
683 	node = pctrie_root_load(ptree, NULL, PCTRIE_UNSERIALIZED);
684 	pctrie_root_store(ptree, PCTRIE_NULL, PCTRIE_UNSERIALIZED);
685 	if (pctrie_isleaf(node))
686 		return (NULL);
687 	*pnode = node;
688 	return (pctrie_reclaim_prune(pnode, NULL));
689 }
690 
691 /*
692  * Replace an existing value in the trie with another one.
693  * Panics if there is not an old value in the trie at the new value's index.
694  */
695 uint64_t *
696 pctrie_replace(struct pctrie *ptree, uint64_t *newval)
697 {
698 	struct pctrie_node *leaf, *parent, *node;
699 	uint64_t *m;
700 	uint64_t index;
701 	int slot;
702 
703 	leaf = pctrie_toleaf(newval);
704 	index = *newval;
705 	node = pctrie_root_load(ptree, NULL, PCTRIE_LOCKED);
706 	parent = NULL;
707 	for (;;) {
708 		if (pctrie_isleaf(node)) {
709 			if ((m = pctrie_toval(node)) != NULL && *m == index) {
710 				if (parent == NULL)
711 					ptree->pt_root = leaf;
712 				else
713 					pctrie_node_store(
714 					    &parent->pn_child[slot], leaf,
715 					    PCTRIE_LOCKED);
716 				return (m);
717 			}
718 			break;
719 		}
720 		if (pctrie_keybarr(node, index, &slot))
721 			break;
722 		parent = node;
723 		node = pctrie_node_load(&node->pn_child[slot], NULL,
724 		    PCTRIE_LOCKED);
725 	}
726 	panic("%s: original replacing value not found", __func__);
727 }
728 
729 #ifdef DDB
730 /*
731  * Show details about the given node.
732  */
733 DB_SHOW_COMMAND(pctrienode, db_show_pctrienode)
734 {
735 	struct pctrie_node *node, *tmp;
736 	int slot;
737 	pn_popmap_t popmap;
738 
739         if (!have_addr)
740                 return;
741 	node = (struct pctrie_node *)addr;
742 	db_printf("node %p, owner %jx, children popmap %04x, level %u:\n",
743 	    (void *)node, (uintmax_t)node->pn_owner, node->pn_popmap,
744 	    node->pn_clev / PCTRIE_WIDTH);
745 	for (popmap = node->pn_popmap; popmap != 0; popmap ^= 1 << slot) {
746 		slot = ffs(popmap) - 1;
747 		tmp = pctrie_node_load(&node->pn_child[slot], NULL,
748 		    PCTRIE_UNSERIALIZED);
749 		db_printf("slot: %d, val: %p, value: %p, clev: %d\n",
750 		    slot, (void *)tmp,
751 		    pctrie_isleaf(tmp) ? pctrie_toval(tmp) : NULL,
752 		    node->pn_clev / PCTRIE_WIDTH);
753 	}
754 }
755 #endif /* DDB */
756