1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2013 EMC Corp. 5 * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org> 6 * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 */ 31 32 /* 33 * Path-compressed radix trie implementation. 34 * 35 * The implementation takes into account the following rationale: 36 * - Size of the nodes should be as small as possible but still big enough 37 * to avoid a large maximum depth for the trie. This is a balance 38 * between the necessity to not wire too much physical memory for the nodes 39 * and the necessity to avoid too much cache pollution during the trie 40 * operations. 41 * - There is not a huge bias toward the number of lookup operations over 42 * the number of insert and remove operations. This basically implies 43 * that optimizations supposedly helping one operation but hurting the 44 * other might be carefully evaluated. 45 * - On average not many nodes are expected to be fully populated, hence 46 * level compression may just complicate things. 47 */ 48 49 #include <sys/cdefs.h> 50 __FBSDID("$FreeBSD$"); 51 52 #include "opt_ddb.h" 53 54 #include <sys/param.h> 55 #include <sys/systm.h> 56 #include <sys/kernel.h> 57 #include <sys/libkern.h> 58 #include <sys/pctrie.h> 59 #include <sys/proc.h> /* smr.h depends on struct thread. */ 60 #include <sys/smr.h> 61 #include <sys/smr_types.h> 62 63 #ifdef DDB 64 #include <ddb/ddb.h> 65 #endif 66 67 #define PCTRIE_MASK (PCTRIE_COUNT - 1) 68 #define PCTRIE_LIMIT (howmany(sizeof(uint64_t) * NBBY, PCTRIE_WIDTH) - 1) 69 70 #if PCTRIE_WIDTH == 3 71 typedef uint8_t pn_popmap_t; 72 #elif PCTRIE_WIDTH == 4 73 typedef uint16_t pn_popmap_t; 74 #elif PCTRIE_WIDTH == 5 75 typedef uint32_t pn_popmap_t; 76 #else 77 #error Unsupported width 78 #endif 79 _Static_assert(sizeof(pn_popmap_t) <= sizeof(int), 80 "pn_popmap_t too wide"); 81 82 /* Set of all flag bits stored in node pointers. */ 83 #define PCTRIE_FLAGS (PCTRIE_ISLEAF) 84 #define PCTRIE_PAD PCTRIE_FLAGS 85 86 struct pctrie_node; 87 typedef SMR_POINTER(struct pctrie_node *) smr_pctnode_t; 88 89 struct pctrie_node { 90 uint64_t pn_owner; /* Owner of record. */ 91 pn_popmap_t pn_popmap; /* Valid children. */ 92 uint8_t pn_clev; /* Level * WIDTH. */ 93 smr_pctnode_t pn_child[PCTRIE_COUNT]; /* Child nodes. */ 94 }; 95 96 enum pctrie_access { PCTRIE_SMR, PCTRIE_LOCKED, PCTRIE_UNSERIALIZED }; 97 98 static __inline void pctrie_node_store(smr_pctnode_t *p, void *val, 99 enum pctrie_access access); 100 101 /* 102 * Map index to an array position for the children of node, 103 */ 104 static __inline int 105 pctrie_slot(struct pctrie_node *node, uint64_t index) 106 { 107 return ((index >> node->pn_clev) & PCTRIE_MASK); 108 } 109 110 /* 111 * Returns true if index does not belong to the specified node. Otherwise, 112 * sets slot value, and returns false. 113 */ 114 static __inline bool 115 pctrie_keybarr(struct pctrie_node *node, uint64_t index, int *slot) 116 { 117 index = (index - node->pn_owner) >> node->pn_clev; 118 if (index >= PCTRIE_COUNT) 119 return (true); 120 *slot = index; 121 return (false); 122 } 123 124 /* 125 * Allocate a node. Pre-allocation should ensure that the request 126 * will always be satisfied. 127 */ 128 static struct pctrie_node * 129 pctrie_node_get(struct pctrie *ptree, pctrie_alloc_t allocfn, uint64_t index, 130 uint64_t newind) 131 { 132 struct pctrie_node *node; 133 134 node = allocfn(ptree); 135 if (node == NULL) 136 return (NULL); 137 138 /* 139 * We want to clear the last child pointer after the final section 140 * has exited so lookup can not return false negatives. It is done 141 * here because it will be cache-cold in the dtor callback. 142 */ 143 if (node->pn_popmap != 0) { 144 pctrie_node_store(&node->pn_child[ffs(node->pn_popmap) - 1], 145 PCTRIE_NULL, PCTRIE_UNSERIALIZED); 146 node->pn_popmap = 0; 147 } 148 149 /* 150 * From the highest-order bit where the indexes differ, 151 * compute the highest level in the trie where they differ. Then, 152 * compute the least index of this subtrie. 153 */ 154 KASSERT(index != newind, ("%s: passing the same key value %jx", 155 __func__, (uintmax_t)index)); 156 _Static_assert(sizeof(long long) >= sizeof(uint64_t), 157 "uint64 too wide"); 158 _Static_assert(sizeof(uint64_t) * NBBY <= 159 (1 << (sizeof(node->pn_clev) * NBBY)), "pn_clev too narrow"); 160 node->pn_clev = rounddown(flsll(index ^ newind) - 1, PCTRIE_WIDTH); 161 node->pn_owner = PCTRIE_COUNT; 162 node->pn_owner = index & -(node->pn_owner << node->pn_clev); 163 return (node); 164 } 165 166 /* 167 * Free radix node. 168 */ 169 static __inline void 170 pctrie_node_put(struct pctrie *ptree, struct pctrie_node *node, 171 pctrie_free_t freefn) 172 { 173 #ifdef INVARIANTS 174 int slot; 175 176 KASSERT(powerof2(node->pn_popmap), 177 ("pctrie_node_put: node %p has too many children %04x", node, 178 node->pn_popmap)); 179 for (slot = 0; slot < PCTRIE_COUNT; slot++) { 180 if ((node->pn_popmap & (1 << slot)) != 0) 181 continue; 182 KASSERT(smr_unserialized_load(&node->pn_child[slot], true) == 183 PCTRIE_NULL, 184 ("pctrie_node_put: node %p has a child", node)); 185 } 186 #endif 187 freefn(ptree, node); 188 } 189 190 /* 191 * Fetch a node pointer from a slot. 192 */ 193 static __inline struct pctrie_node * 194 pctrie_node_load(smr_pctnode_t *p, smr_t smr, enum pctrie_access access) 195 { 196 switch (access) { 197 case PCTRIE_UNSERIALIZED: 198 return (smr_unserialized_load(p, true)); 199 case PCTRIE_LOCKED: 200 return (smr_serialized_load(p, true)); 201 case PCTRIE_SMR: 202 return (smr_entered_load(p, smr)); 203 } 204 __assert_unreachable(); 205 } 206 207 static __inline void 208 pctrie_node_store(smr_pctnode_t *p, void *v, enum pctrie_access access) 209 { 210 switch (access) { 211 case PCTRIE_UNSERIALIZED: 212 smr_unserialized_store(p, v, true); 213 break; 214 case PCTRIE_LOCKED: 215 smr_serialized_store(p, v, true); 216 break; 217 case PCTRIE_SMR: 218 panic("%s: Not supported in SMR section.", __func__); 219 break; 220 default: 221 __assert_unreachable(); 222 break; 223 } 224 } 225 226 /* 227 * Get the root node for a tree. 228 */ 229 static __inline struct pctrie_node * 230 pctrie_root_load(struct pctrie *ptree, smr_t smr, enum pctrie_access access) 231 { 232 return (pctrie_node_load((smr_pctnode_t *)&ptree->pt_root, smr, access)); 233 } 234 235 /* 236 * Set the root node for a tree. 237 */ 238 static __inline void 239 pctrie_root_store(struct pctrie *ptree, struct pctrie_node *node, 240 enum pctrie_access access) 241 { 242 pctrie_node_store((smr_pctnode_t *)&ptree->pt_root, node, access); 243 } 244 245 /* 246 * Returns TRUE if the specified node is a leaf and FALSE otherwise. 247 */ 248 static __inline bool 249 pctrie_isleaf(struct pctrie_node *node) 250 { 251 252 return (((uintptr_t)node & PCTRIE_ISLEAF) != 0); 253 } 254 255 /* 256 * Returns val with leaf bit set. 257 */ 258 static __inline void * 259 pctrie_toleaf(uint64_t *val) 260 { 261 return ((void *)((uintptr_t)val | PCTRIE_ISLEAF)); 262 } 263 264 /* 265 * Returns the associated val extracted from node. 266 */ 267 static __inline uint64_t * 268 pctrie_toval(struct pctrie_node *node) 269 { 270 271 return ((uint64_t *)((uintptr_t)node & ~PCTRIE_FLAGS)); 272 } 273 274 /* 275 * Make 'child' a child of 'node'. 276 */ 277 static __inline void 278 pctrie_addnode(struct pctrie_node *node, uint64_t index, 279 struct pctrie_node *child, enum pctrie_access access) 280 { 281 int slot; 282 283 slot = pctrie_slot(node, index); 284 pctrie_node_store(&node->pn_child[slot], child, access); 285 node->pn_popmap ^= 1 << slot; 286 KASSERT((node->pn_popmap & (1 << slot)) != 0, 287 ("%s: bad popmap slot %d in node %p", __func__, slot, node)); 288 } 289 290 /* 291 * Internal helper for pctrie_reclaim_allnodes(). 292 * This function is recursive. 293 */ 294 static void 295 pctrie_reclaim_allnodes_int(struct pctrie *ptree, struct pctrie_node *node, 296 pctrie_free_t freefn) 297 { 298 struct pctrie_node *child; 299 int slot; 300 301 while (node->pn_popmap != 0) { 302 slot = ffs(node->pn_popmap) - 1; 303 child = pctrie_node_load(&node->pn_child[slot], NULL, 304 PCTRIE_UNSERIALIZED); 305 KASSERT(child != PCTRIE_NULL, 306 ("%s: bad popmap slot %d in node %p", 307 __func__, slot, node)); 308 if (!pctrie_isleaf(child)) 309 pctrie_reclaim_allnodes_int(ptree, child, freefn); 310 node->pn_popmap ^= 1 << slot; 311 pctrie_node_store(&node->pn_child[slot], PCTRIE_NULL, 312 PCTRIE_UNSERIALIZED); 313 } 314 pctrie_node_put(ptree, node, freefn); 315 } 316 317 /* 318 * pctrie node zone initializer. 319 */ 320 int 321 pctrie_zone_init(void *mem, int size __unused, int flags __unused) 322 { 323 struct pctrie_node *node; 324 325 node = mem; 326 node->pn_popmap = 0; 327 for (int i = 0; i < nitems(node->pn_child); i++) 328 pctrie_node_store(&node->pn_child[i], PCTRIE_NULL, 329 PCTRIE_UNSERIALIZED); 330 return (0); 331 } 332 333 size_t 334 pctrie_node_size(void) 335 { 336 337 return (sizeof(struct pctrie_node)); 338 } 339 340 /* 341 * Inserts the key-value pair into the trie. 342 * Panics if the key already exists. 343 */ 344 int 345 pctrie_insert(struct pctrie *ptree, uint64_t *val, pctrie_alloc_t allocfn) 346 { 347 uint64_t index, newind; 348 struct pctrie_node *leaf, *node, *parent; 349 smr_pctnode_t *parentp; 350 int slot; 351 352 index = *val; 353 leaf = pctrie_toleaf(val); 354 355 /* 356 * The owner of record for root is not really important because it 357 * will never be used. 358 */ 359 node = pctrie_root_load(ptree, NULL, PCTRIE_LOCKED); 360 parent = NULL; 361 for (;;) { 362 if (pctrie_isleaf(node)) { 363 if (node == PCTRIE_NULL) { 364 if (parent == NULL) 365 ptree->pt_root = leaf; 366 else 367 pctrie_addnode(parent, index, leaf, 368 PCTRIE_LOCKED); 369 return (0); 370 } 371 newind = *pctrie_toval(node); 372 if (newind == index) 373 panic("%s: key %jx is already present", 374 __func__, (uintmax_t)index); 375 break; 376 } 377 if (pctrie_keybarr(node, index, &slot)) { 378 newind = node->pn_owner; 379 break; 380 } 381 parent = node; 382 node = pctrie_node_load(&node->pn_child[slot], NULL, 383 PCTRIE_LOCKED); 384 } 385 386 /* 387 * A new node is needed because the right insertion level is reached. 388 * Setup the new intermediate node and add the 2 children: the 389 * new object and the older edge or object. 390 */ 391 parentp = (parent != NULL) ? &parent->pn_child[slot]: 392 (smr_pctnode_t *)&ptree->pt_root; 393 parent = pctrie_node_get(ptree, allocfn, index, newind); 394 if (parent == NULL) 395 return (ENOMEM); 396 /* These writes are not yet visible due to ordering. */ 397 pctrie_addnode(parent, index, leaf, PCTRIE_UNSERIALIZED); 398 pctrie_addnode(parent, newind, node, PCTRIE_UNSERIALIZED); 399 /* Synchronize to make the above visible. */ 400 pctrie_node_store(parentp, parent, PCTRIE_LOCKED); 401 return (0); 402 } 403 404 /* 405 * Returns the value stored at the index. If the index is not present, 406 * NULL is returned. 407 */ 408 static __always_inline uint64_t * 409 _pctrie_lookup(struct pctrie *ptree, uint64_t index, smr_t smr, 410 enum pctrie_access access) 411 { 412 struct pctrie_node *node; 413 uint64_t *m; 414 int slot; 415 416 node = pctrie_root_load(ptree, smr, access); 417 for (;;) { 418 if (pctrie_isleaf(node)) { 419 if ((m = pctrie_toval(node)) != NULL && *m == index) 420 return (m); 421 break; 422 } 423 if (pctrie_keybarr(node, index, &slot)) 424 break; 425 node = pctrie_node_load(&node->pn_child[slot], smr, access); 426 } 427 return (NULL); 428 } 429 430 /* 431 * Returns the value stored at the index, assuming access is externally 432 * synchronized by a lock. 433 * 434 * If the index is not present, NULL is returned. 435 */ 436 uint64_t * 437 pctrie_lookup(struct pctrie *ptree, uint64_t index) 438 { 439 return (_pctrie_lookup(ptree, index, NULL, PCTRIE_LOCKED)); 440 } 441 442 /* 443 * Returns the value stored at the index without requiring an external lock. 444 * 445 * If the index is not present, NULL is returned. 446 */ 447 uint64_t * 448 pctrie_lookup_unlocked(struct pctrie *ptree, uint64_t index, smr_t smr) 449 { 450 uint64_t *res; 451 452 smr_enter(smr); 453 res = _pctrie_lookup(ptree, index, smr, PCTRIE_SMR); 454 smr_exit(smr); 455 return (res); 456 } 457 458 /* 459 * Returns the value with the least index that is greater than or equal to the 460 * specified index, or NULL if there are no such values. 461 * 462 * Requires that access be externally synchronized by a lock. 463 */ 464 uint64_t * 465 pctrie_lookup_ge(struct pctrie *ptree, uint64_t index) 466 { 467 struct pctrie_node *node, *succ; 468 uint64_t *m; 469 int slot; 470 471 /* 472 * Descend the trie as if performing an ordinary lookup for the 473 * specified value. However, unlike an ordinary lookup, as we descend 474 * the trie, we use "succ" to remember the last branching-off point, 475 * that is, the interior node under which the least value that is both 476 * outside our current path down the trie and greater than the specified 477 * index resides. (The node's popmap makes it fast and easy to 478 * recognize a branching-off point.) If our ordinary lookup fails to 479 * yield a value that is greater than or equal to the specified index, 480 * then we will exit this loop and perform a lookup starting from 481 * "succ". If "succ" is not NULL, then that lookup is guaranteed to 482 * succeed. 483 */ 484 node = pctrie_root_load(ptree, NULL, PCTRIE_LOCKED); 485 succ = NULL; 486 for (;;) { 487 if (pctrie_isleaf(node)) { 488 if ((m = pctrie_toval(node)) != NULL && *m >= index) 489 return (m); 490 break; 491 } 492 if (pctrie_keybarr(node, index, &slot)) { 493 /* 494 * If all values in this subtree are > index, then the 495 * least value in this subtree is the answer. 496 */ 497 if (node->pn_owner > index) 498 succ = node; 499 break; 500 } 501 502 /* 503 * Just in case the next search step leads to a subtree of all 504 * values < index, check popmap to see if a next bigger step, to 505 * a subtree of all pages with values > index, is available. If 506 * so, remember to restart the search here. 507 */ 508 if ((node->pn_popmap >> slot) > 1) 509 succ = node; 510 node = pctrie_node_load(&node->pn_child[slot], NULL, 511 PCTRIE_LOCKED); 512 } 513 514 /* 515 * Restart the search from the last place visited in the subtree that 516 * included some values > index, if there was such a place. 517 */ 518 if (succ == NULL) 519 return (NULL); 520 if (succ != node) { 521 /* 522 * Take a step to the next bigger sibling of the node chosen 523 * last time. In that subtree, all values > index. 524 */ 525 slot = pctrie_slot(succ, index) + 1; 526 KASSERT((succ->pn_popmap >> slot) != 0, 527 ("%s: no popmap siblings past slot %d in node %p", 528 __func__, slot, succ)); 529 slot += ffs(succ->pn_popmap >> slot) - 1; 530 succ = pctrie_node_load(&succ->pn_child[slot], NULL, 531 PCTRIE_LOCKED); 532 } 533 534 /* 535 * Find the value in the subtree rooted at "succ" with the least index. 536 */ 537 while (!pctrie_isleaf(succ)) { 538 KASSERT(succ->pn_popmap != 0, 539 ("%s: no popmap children in node %p", __func__, succ)); 540 slot = ffs(succ->pn_popmap) - 1; 541 succ = pctrie_node_load(&succ->pn_child[slot], NULL, 542 PCTRIE_LOCKED); 543 } 544 return (pctrie_toval(succ)); 545 } 546 547 /* 548 * Returns the value with the greatest index that is less than or equal to the 549 * specified index, or NULL if there are no such values. 550 * 551 * Requires that access be externally synchronized by a lock. 552 */ 553 uint64_t * 554 pctrie_lookup_le(struct pctrie *ptree, uint64_t index) 555 { 556 struct pctrie_node *node, *pred; 557 uint64_t *m; 558 int slot; 559 560 /* 561 * Mirror the implementation of pctrie_lookup_ge, described above. 562 */ 563 node = pctrie_root_load(ptree, NULL, PCTRIE_LOCKED); 564 pred = NULL; 565 for (;;) { 566 if (pctrie_isleaf(node)) { 567 if ((m = pctrie_toval(node)) != NULL && *m <= index) 568 return (m); 569 break; 570 } 571 if (pctrie_keybarr(node, index, &slot)) { 572 if (node->pn_owner < index) 573 pred = node; 574 break; 575 } 576 if ((node->pn_popmap & ((1 << slot) - 1)) != 0) 577 pred = node; 578 node = pctrie_node_load(&node->pn_child[slot], NULL, 579 PCTRIE_LOCKED); 580 } 581 if (pred == NULL) 582 return (NULL); 583 if (pred != node) { 584 slot = pctrie_slot(pred, index); 585 KASSERT((pred->pn_popmap & ((1 << slot) - 1)) != 0, 586 ("%s: no popmap siblings before slot %d in node %p", 587 __func__, slot, pred)); 588 slot = fls(pred->pn_popmap & ((1 << slot) - 1)) - 1; 589 pred = pctrie_node_load(&pred->pn_child[slot], NULL, 590 PCTRIE_LOCKED); 591 } 592 while (!pctrie_isleaf(pred)) { 593 KASSERT(pred->pn_popmap != 0, 594 ("%s: no popmap children in node %p", __func__, pred)); 595 slot = fls(pred->pn_popmap) - 1; 596 pred = pctrie_node_load(&pred->pn_child[slot], NULL, 597 PCTRIE_LOCKED); 598 } 599 return (pctrie_toval(pred)); 600 } 601 602 /* 603 * Remove the specified index from the tree. 604 * Panics if the key is not present. 605 */ 606 void 607 pctrie_remove(struct pctrie *ptree, uint64_t index, pctrie_free_t freefn) 608 { 609 struct pctrie_node *child, *node, *parent; 610 uint64_t *m; 611 int slot; 612 613 node = NULL; 614 child = pctrie_root_load(ptree, NULL, PCTRIE_LOCKED); 615 for (;;) { 616 if (pctrie_isleaf(child)) 617 break; 618 parent = node; 619 node = child; 620 slot = pctrie_slot(node, index); 621 child = pctrie_node_load(&node->pn_child[slot], NULL, 622 PCTRIE_LOCKED); 623 } 624 if ((m = pctrie_toval(child)) == NULL || *m != index) 625 panic("%s: key not found", __func__); 626 if (node == NULL) { 627 pctrie_root_store(ptree, PCTRIE_NULL, PCTRIE_LOCKED); 628 return; 629 } 630 KASSERT((node->pn_popmap & (1 << slot)) != 0, 631 ("%s: bad popmap slot %d in node %p", 632 __func__, slot, node)); 633 node->pn_popmap ^= 1 << slot; 634 pctrie_node_store(&node->pn_child[slot], PCTRIE_NULL, PCTRIE_LOCKED); 635 if (!powerof2(node->pn_popmap)) 636 return; 637 KASSERT(node->pn_popmap != 0, ("%s: bad popmap all zeroes", __func__)); 638 slot = ffs(node->pn_popmap) - 1; 639 child = pctrie_node_load(&node->pn_child[slot], NULL, PCTRIE_LOCKED); 640 KASSERT(child != PCTRIE_NULL, 641 ("%s: bad popmap slot %d in node %p", __func__, slot, node)); 642 if (parent == NULL) 643 pctrie_root_store(ptree, child, PCTRIE_LOCKED); 644 else { 645 slot = pctrie_slot(parent, index); 646 KASSERT(node == 647 pctrie_node_load(&parent->pn_child[slot], NULL, 648 PCTRIE_LOCKED), ("%s: invalid child value", __func__)); 649 pctrie_node_store(&parent->pn_child[slot], child, 650 PCTRIE_LOCKED); 651 } 652 /* 653 * The child is still valid and we can not zero the 654 * pointer until all SMR references are gone. 655 */ 656 pctrie_node_put(ptree, node, freefn); 657 } 658 659 /* 660 * Remove and free all the nodes from the tree. 661 * This function is recursive but there is a tight control on it as the 662 * maximum depth of the tree is fixed. 663 */ 664 void 665 pctrie_reclaim_allnodes(struct pctrie *ptree, pctrie_free_t freefn) 666 { 667 struct pctrie_node *root; 668 669 root = pctrie_root_load(ptree, NULL, PCTRIE_LOCKED); 670 if (root == PCTRIE_NULL) 671 return; 672 pctrie_root_store(ptree, PCTRIE_NULL, PCTRIE_UNSERIALIZED); 673 if (!pctrie_isleaf(root)) 674 pctrie_reclaim_allnodes_int(ptree, root, freefn); 675 } 676 677 #ifdef DDB 678 /* 679 * Show details about the given node. 680 */ 681 DB_SHOW_COMMAND(pctrienode, db_show_pctrienode) 682 { 683 struct pctrie_node *node, *tmp; 684 int slot; 685 pn_popmap_t popmap; 686 687 if (!have_addr) 688 return; 689 node = (struct pctrie_node *)addr; 690 db_printf("node %p, owner %jx, children popmap %04x, level %u:\n", 691 (void *)node, (uintmax_t)node->pn_owner, node->pn_popmap, 692 node->pn_clev / PCTRIE_WIDTH); 693 for (popmap = node->pn_popmap; popmap != 0; popmap ^= 1 << slot) { 694 slot = ffs(popmap) - 1; 695 tmp = pctrie_node_load(&node->pn_child[slot], NULL, 696 PCTRIE_UNSERIALIZED); 697 db_printf("slot: %d, val: %p, value: %p, clev: %d\n", 698 slot, (void *)tmp, 699 pctrie_isleaf(tmp) ? pctrie_toval(tmp) : NULL, 700 node->pn_clev / PCTRIE_WIDTH); 701 } 702 } 703 #endif /* DDB */ 704