1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2013 EMC Corp. 5 * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org> 6 * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 */ 31 32 /* 33 * Path-compressed radix trie implementation. 34 * 35 * The implementation takes into account the following rationale: 36 * - Size of the nodes should be as small as possible but still big enough 37 * to avoid a large maximum depth for the trie. This is a balance 38 * between the necessity to not wire too much physical memory for the nodes 39 * and the necessity to avoid too much cache pollution during the trie 40 * operations. 41 * - There is not a huge bias toward the number of lookup operations over 42 * the number of insert and remove operations. This basically implies 43 * that optimizations supposedly helping one operation but hurting the 44 * other might be carefully evaluated. 45 * - On average not many nodes are expected to be fully populated, hence 46 * level compression may just complicate things. 47 */ 48 49 #include <sys/cdefs.h> 50 #include "opt_ddb.h" 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/kernel.h> 55 #include <sys/libkern.h> 56 #include <sys/pctrie.h> 57 #include <sys/proc.h> /* smr.h depends on struct thread. */ 58 #include <sys/smr.h> 59 #include <sys/smr_types.h> 60 61 #ifdef DDB 62 #include <ddb/ddb.h> 63 #endif 64 65 #if PCTRIE_WIDTH == 3 66 typedef uint8_t pn_popmap_t; 67 #elif PCTRIE_WIDTH == 4 68 typedef uint16_t pn_popmap_t; 69 #elif PCTRIE_WIDTH == 5 70 typedef uint32_t pn_popmap_t; 71 #else 72 #error Unsupported width 73 #endif 74 _Static_assert(sizeof(pn_popmap_t) <= sizeof(int), 75 "pn_popmap_t too wide"); 76 77 struct pctrie_node; 78 typedef SMR_POINTER(struct pctrie_node *) smr_pctnode_t; 79 80 struct pctrie_node { 81 uint64_t pn_owner; /* Owner of record. */ 82 pn_popmap_t pn_popmap; /* Valid children. */ 83 uint8_t pn_clev; /* Level * WIDTH. */ 84 smr_pctnode_t pn_child[PCTRIE_COUNT]; /* Child nodes. */ 85 }; 86 87 /* 88 * Map index to an array position for the children of node, 89 */ 90 static __inline int 91 pctrie_slot(struct pctrie_node *node, uint64_t index) 92 { 93 return ((index >> node->pn_clev) & (PCTRIE_COUNT - 1)); 94 } 95 96 /* 97 * Returns true if index does not belong to the specified node. Otherwise, 98 * sets slot value, and returns false. 99 */ 100 static __inline bool 101 pctrie_keybarr(struct pctrie_node *node, uint64_t index, int *slot) 102 { 103 index = (index - node->pn_owner) >> node->pn_clev; 104 if (index >= PCTRIE_COUNT) 105 return (true); 106 *slot = index; 107 return (false); 108 } 109 110 /* 111 * Check radix node. 112 */ 113 static __inline void 114 pctrie_node_put(struct pctrie_node *node) 115 { 116 #ifdef INVARIANTS 117 int slot; 118 119 KASSERT(powerof2(node->pn_popmap), 120 ("pctrie_node_put: node %p has too many children %04x", node, 121 node->pn_popmap)); 122 for (slot = 0; slot < PCTRIE_COUNT; slot++) { 123 if ((node->pn_popmap & (1 << slot)) != 0) 124 continue; 125 KASSERT(smr_unserialized_load(&node->pn_child[slot], true) == 126 PCTRIE_NULL, 127 ("pctrie_node_put: node %p has a child", node)); 128 } 129 #endif 130 } 131 132 enum pctrie_access { PCTRIE_SMR, PCTRIE_LOCKED, PCTRIE_UNSERIALIZED }; 133 134 /* 135 * Fetch a node pointer from a slot. 136 */ 137 static __inline struct pctrie_node * 138 pctrie_node_load(smr_pctnode_t *p, smr_t smr, enum pctrie_access access) 139 { 140 switch (access) { 141 case PCTRIE_UNSERIALIZED: 142 return (smr_unserialized_load(p, true)); 143 case PCTRIE_LOCKED: 144 return (smr_serialized_load(p, true)); 145 case PCTRIE_SMR: 146 return (smr_entered_load(p, smr)); 147 } 148 __assert_unreachable(); 149 } 150 151 static __inline void 152 pctrie_node_store(smr_pctnode_t *p, void *v, enum pctrie_access access) 153 { 154 switch (access) { 155 case PCTRIE_UNSERIALIZED: 156 smr_unserialized_store(p, v, true); 157 break; 158 case PCTRIE_LOCKED: 159 smr_serialized_store(p, v, true); 160 break; 161 case PCTRIE_SMR: 162 panic("%s: Not supported in SMR section.", __func__); 163 break; 164 default: 165 __assert_unreachable(); 166 break; 167 } 168 } 169 170 /* 171 * Get the root node for a tree. 172 */ 173 static __inline struct pctrie_node * 174 pctrie_root_load(struct pctrie *ptree, smr_t smr, enum pctrie_access access) 175 { 176 return (pctrie_node_load((smr_pctnode_t *)&ptree->pt_root, smr, access)); 177 } 178 179 /* 180 * Set the root node for a tree. 181 */ 182 static __inline void 183 pctrie_root_store(struct pctrie *ptree, struct pctrie_node *node, 184 enum pctrie_access access) 185 { 186 pctrie_node_store((smr_pctnode_t *)&ptree->pt_root, node, access); 187 } 188 189 /* 190 * Returns TRUE if the specified node is a leaf and FALSE otherwise. 191 */ 192 static __inline bool 193 pctrie_isleaf(struct pctrie_node *node) 194 { 195 return (((uintptr_t)node & PCTRIE_ISLEAF) != 0); 196 } 197 198 /* 199 * Returns val with leaf bit set. 200 */ 201 static __inline void * 202 pctrie_toleaf(uint64_t *val) 203 { 204 return ((void *)((uintptr_t)val | PCTRIE_ISLEAF)); 205 } 206 207 /* 208 * Returns the associated val extracted from node. 209 */ 210 static __inline uint64_t * 211 pctrie_toval(struct pctrie_node *node) 212 { 213 return ((uint64_t *)((uintptr_t)node & ~PCTRIE_FLAGS)); 214 } 215 216 /* 217 * Returns the associated pointer extracted from node and field offset. 218 */ 219 static __inline void * 220 pctrie_toptr(struct pctrie_node *node, int keyoff) 221 { 222 return ((void *)(((uintptr_t)node & ~PCTRIE_FLAGS) - keyoff)); 223 } 224 225 /* 226 * Make 'child' a child of 'node'. 227 */ 228 static __inline void 229 pctrie_addnode(struct pctrie_node *node, uint64_t index, 230 struct pctrie_node *child, enum pctrie_access access) 231 { 232 int slot; 233 234 slot = pctrie_slot(node, index); 235 pctrie_node_store(&node->pn_child[slot], child, access); 236 node->pn_popmap ^= 1 << slot; 237 KASSERT((node->pn_popmap & (1 << slot)) != 0, 238 ("%s: bad popmap slot %d in node %p", __func__, slot, node)); 239 } 240 241 /* 242 * pctrie node zone initializer. 243 */ 244 int 245 pctrie_zone_init(void *mem, int size __unused, int flags __unused) 246 { 247 struct pctrie_node *node; 248 249 node = mem; 250 node->pn_popmap = 0; 251 for (int i = 0; i < nitems(node->pn_child); i++) 252 pctrie_node_store(&node->pn_child[i], PCTRIE_NULL, 253 PCTRIE_UNSERIALIZED); 254 return (0); 255 } 256 257 size_t 258 pctrie_node_size(void) 259 { 260 261 return (sizeof(struct pctrie_node)); 262 } 263 264 enum pctrie_insert_neighbor_mode { 265 PCTRIE_INSERT_NEIGHBOR_NONE, 266 PCTRIE_INSERT_NEIGHBOR_LT, 267 PCTRIE_INSERT_NEIGHBOR_GT, 268 }; 269 270 /* 271 * Look for where to insert the key-value pair into the trie. Complete the 272 * insertion if it replaces a null leaf. Return the insertion location if the 273 * insertion needs to be completed by the caller; otherwise return NULL. 274 * 275 * If the key is already present in the trie, populate *found_out as if by 276 * pctrie_lookup(). 277 * 278 * With mode PCTRIE_INSERT_NEIGHBOR_GT or PCTRIE_INSERT_NEIGHBOR_LT, set 279 * *neighbor_out to the lowest level node we encounter during the insert lookup 280 * that is a parent of the next greater or lesser entry. The value is not 281 * defined if the key was already present in the trie. 282 * 283 * Note that mode is expected to be a compile-time constant, and this procedure 284 * is expected to be inlined into callers with extraneous code optimized out. 285 */ 286 static __always_inline void * 287 pctrie_insert_lookup_compound(struct pctrie *ptree, uint64_t *val, 288 uint64_t **found_out, struct pctrie_node **neighbor_out, 289 enum pctrie_insert_neighbor_mode mode) 290 { 291 uint64_t index; 292 struct pctrie_node *node, *parent; 293 int slot; 294 295 index = *val; 296 297 /* 298 * The owner of record for root is not really important because it 299 * will never be used. 300 */ 301 node = pctrie_root_load(ptree, NULL, PCTRIE_LOCKED); 302 parent = NULL; 303 for (;;) { 304 if (pctrie_isleaf(node)) { 305 if (node == PCTRIE_NULL) { 306 if (parent == NULL) 307 ptree->pt_root = pctrie_toleaf(val); 308 else 309 pctrie_addnode(parent, index, 310 pctrie_toleaf(val), PCTRIE_LOCKED); 311 return (NULL); 312 } 313 if (*pctrie_toval(node) == index) { 314 *found_out = pctrie_toval(node); 315 return (NULL); 316 } 317 break; 318 } 319 if (pctrie_keybarr(node, index, &slot)) 320 break; 321 /* 322 * Descend. If we're tracking the next neighbor and this node 323 * contains a neighboring entry in the right direction, record 324 * it. 325 */ 326 if (mode == PCTRIE_INSERT_NEIGHBOR_LT) { 327 if ((node->pn_popmap & ((1 << slot) - 1)) != 0) 328 *neighbor_out = node; 329 } else if (mode == PCTRIE_INSERT_NEIGHBOR_GT) { 330 if ((node->pn_popmap >> slot) > 1) 331 *neighbor_out = node; 332 } 333 parent = node; 334 node = pctrie_node_load(&node->pn_child[slot], NULL, 335 PCTRIE_LOCKED); 336 } 337 338 /* 339 * The caller will split this node. If we're tracking the next 340 * neighbor, record the old node if the old entry is in the right 341 * direction. 342 */ 343 if (mode == PCTRIE_INSERT_NEIGHBOR_LT) { 344 if (*pctrie_toval(node) < index) 345 *neighbor_out = node; 346 } else if (mode == PCTRIE_INSERT_NEIGHBOR_GT) { 347 if (*pctrie_toval(node) > index) 348 *neighbor_out = node; 349 } 350 351 /* 352 * 'node' must be replaced in the tree with a new branch node, with 353 * children 'node' and 'val'. Return the place that points to 'node' 354 * now, and will point to to the new branching node later. 355 */ 356 return ((parent != NULL) ? &parent->pn_child[slot]: 357 (smr_pctnode_t *)&ptree->pt_root); 358 } 359 360 /* 361 * Wrap pctrie_insert_lookup_compound to implement a strict insertion. Panic 362 * if the key already exists, and do not look for neighboring entries. 363 */ 364 void * 365 pctrie_insert_lookup_strict(struct pctrie *ptree, uint64_t *val) 366 { 367 void *parentp; 368 uint64_t *found; 369 370 found = NULL; 371 parentp = pctrie_insert_lookup_compound(ptree, val, &found, NULL, 372 PCTRIE_INSERT_NEIGHBOR_NONE); 373 if (__predict_false(found != NULL)) 374 panic("%s: key %jx is already present", __func__, 375 (uintmax_t)*val); 376 return (parentp); 377 } 378 379 /* 380 * Wrap pctrie_insert_lookup_compound to implement find-or-insert. Do not look 381 * for neighboring entries. 382 */ 383 void * 384 pctrie_insert_lookup(struct pctrie *ptree, uint64_t *val, 385 uint64_t **found_out) 386 { 387 *found_out = NULL; 388 return (pctrie_insert_lookup_compound(ptree, val, found_out, NULL, 389 PCTRIE_INSERT_NEIGHBOR_NONE)); 390 } 391 392 /* 393 * Wrap pctrie_insert_lookup_compound to implement find or insert and find next 394 * greater entry. Find a subtree that contains the next entry greater than the 395 * newly-inserted or to-be-inserted entry. 396 */ 397 void * 398 pctrie_insert_lookup_gt(struct pctrie *ptree, uint64_t *val, 399 uint64_t **found_out, struct pctrie_node **neighbor_out) 400 { 401 *found_out = NULL; 402 *neighbor_out = NULL; 403 return (pctrie_insert_lookup_compound(ptree, val, found_out, 404 neighbor_out, PCTRIE_INSERT_NEIGHBOR_GT)); 405 } 406 407 /* 408 * Wrap pctrie_insert_lookup_compound to implement find or insert and find next 409 * lesser entry. Find a subtree that contains the next entry less than the 410 * newly-inserted or to-be-inserted entry. 411 */ 412 void * 413 pctrie_insert_lookup_lt(struct pctrie *ptree, uint64_t *val, 414 uint64_t **found_out, struct pctrie_node **neighbor_out) 415 { 416 *found_out = NULL; 417 *neighbor_out = NULL; 418 return (pctrie_insert_lookup_compound(ptree, val, found_out, 419 neighbor_out, PCTRIE_INSERT_NEIGHBOR_LT)); 420 } 421 422 /* 423 * Uses new node to insert key-value pair into the trie at given location. 424 */ 425 void 426 pctrie_insert_node(void *parentp, struct pctrie_node *parent, uint64_t *val) 427 { 428 struct pctrie_node *node; 429 uint64_t index, newind; 430 431 /* 432 * Clear the last child pointer of the newly allocated parent. We want 433 * to clear it after the final section has exited so lookup can not 434 * return false negatives. It is done here because it will be 435 * cache-cold in the dtor callback. 436 */ 437 if (parent->pn_popmap != 0) { 438 pctrie_node_store(&parent->pn_child[ffs(parent->pn_popmap) - 1], 439 PCTRIE_NULL, PCTRIE_UNSERIALIZED); 440 parent->pn_popmap = 0; 441 } 442 443 /* 444 * Recover the values of the two children of the new parent node. If 445 * 'node' is not a leaf, this stores into 'newind' the 'owner' field, 446 * which must be first in the node. 447 */ 448 index = *val; 449 node = pctrie_node_load(parentp, NULL, PCTRIE_UNSERIALIZED); 450 newind = *pctrie_toval(node); 451 452 /* 453 * From the highest-order bit where the indexes differ, 454 * compute the highest level in the trie where they differ. Then, 455 * compute the least index of this subtrie. 456 */ 457 _Static_assert(sizeof(long long) >= sizeof(uint64_t), 458 "uint64 too wide"); 459 _Static_assert(sizeof(uint64_t) * NBBY <= 460 (1 << (sizeof(parent->pn_clev) * NBBY)), "pn_clev too narrow"); 461 parent->pn_clev = rounddown(ilog2(index ^ newind), PCTRIE_WIDTH); 462 parent->pn_owner = PCTRIE_COUNT; 463 parent->pn_owner = index & -(parent->pn_owner << parent->pn_clev); 464 465 466 /* These writes are not yet visible due to ordering. */ 467 pctrie_addnode(parent, index, pctrie_toleaf(val), PCTRIE_UNSERIALIZED); 468 pctrie_addnode(parent, newind, node, PCTRIE_UNSERIALIZED); 469 /* Synchronize to make the above visible. */ 470 pctrie_node_store(parentp, parent, PCTRIE_LOCKED); 471 } 472 473 /* 474 * Return the value associated with the node, if the node is a leaf that matches 475 * the index; otherwise NULL. 476 */ 477 static __always_inline uint64_t * 478 pctrie_match_value(struct pctrie_node *node, uint64_t index) 479 { 480 uint64_t *m; 481 482 if (!pctrie_isleaf(node) || (m = pctrie_toval(node)) == NULL || 483 *m != index) 484 m = NULL; 485 return (m); 486 } 487 488 /* 489 * Returns the value stored at the index. If the index is not present, 490 * NULL is returned. 491 */ 492 static __always_inline uint64_t * 493 _pctrie_lookup(struct pctrie *ptree, uint64_t index, smr_t smr, 494 enum pctrie_access access) 495 { 496 struct pctrie_node *node; 497 int slot; 498 499 node = pctrie_root_load(ptree, smr, access); 500 /* Seek a node that matches index. */ 501 while (!pctrie_isleaf(node) && !pctrie_keybarr(node, index, &slot)) 502 node = pctrie_node_load(&node->pn_child[slot], smr, access); 503 return (pctrie_match_value(node, index)); 504 } 505 506 /* 507 * Returns the value stored at the index, assuming access is externally 508 * synchronized by a lock. 509 * 510 * If the index is not present, NULL is returned. 511 */ 512 uint64_t * 513 pctrie_lookup(struct pctrie *ptree, uint64_t index) 514 { 515 return (_pctrie_lookup(ptree, index, NULL, PCTRIE_LOCKED)); 516 } 517 518 /* 519 * Returns the value stored at the index without requiring an external lock. 520 * 521 * If the index is not present, NULL is returned. 522 */ 523 uint64_t * 524 pctrie_lookup_unlocked(struct pctrie *ptree, uint64_t index, smr_t smr) 525 { 526 uint64_t *res; 527 528 smr_enter(smr); 529 res = _pctrie_lookup(ptree, index, smr, PCTRIE_SMR); 530 smr_exit(smr); 531 return (res); 532 } 533 534 /* 535 * Returns the last node examined in the search for the index, and updates the 536 * search path to that node. 537 */ 538 static __always_inline struct pctrie_node * 539 _pctrie_iter_lookup_node(struct pctrie_iter *it, uint64_t index, smr_t smr, 540 enum pctrie_access access) 541 { 542 struct pctrie_node *node; 543 int slot; 544 545 /* 546 * Climb the search path to find the lowest node from which to start the 547 * search for a value matching 'index'. 548 */ 549 while (it->top != 0) { 550 node = it->path[it->top - 1]; 551 KASSERT(!powerof2(node->pn_popmap), 552 ("%s: freed node in iter path", __func__)); 553 if (!pctrie_keybarr(node, index, &slot)) { 554 node = pctrie_node_load( 555 &node->pn_child[slot], smr, access); 556 break; 557 } 558 --it->top; 559 } 560 if (it->top == 0) 561 node = pctrie_root_load(it->ptree, smr, access); 562 563 /* Seek a node that matches index. */ 564 while (!pctrie_isleaf(node) && !pctrie_keybarr(node, index, &slot)) { 565 KASSERT(it->top < nitems(it->path), 566 ("%s: path overflow in trie %p", __func__, it->ptree)); 567 it->path[it->top++] = node; 568 node = pctrie_node_load(&node->pn_child[slot], smr, access); 569 } 570 return (node); 571 } 572 573 /* 574 * Returns the value stored at a given index value, possibly NULL. 575 */ 576 static __always_inline uint64_t * 577 _pctrie_iter_lookup(struct pctrie_iter *it, uint64_t index, smr_t smr, 578 enum pctrie_access access) 579 { 580 struct pctrie_node *node; 581 582 it->index = index; 583 node = _pctrie_iter_lookup_node(it, index, smr, access); 584 return (pctrie_match_value(node, index)); 585 } 586 587 /* 588 * Returns the value stored at a given index value, possibly NULL. 589 */ 590 uint64_t * 591 pctrie_iter_lookup(struct pctrie_iter *it, uint64_t index) 592 { 593 return (_pctrie_iter_lookup(it, index, NULL, PCTRIE_LOCKED)); 594 } 595 596 /* 597 * Returns the value stored at a fixed offset from the current index value, 598 * possibly NULL. 599 */ 600 static __always_inline uint64_t * 601 _pctrie_iter_stride(struct pctrie_iter *it, int stride, smr_t smr, 602 enum pctrie_access access) 603 { 604 uint64_t index = it->index + stride; 605 606 /* Detect stride overflow. */ 607 if ((stride > 0) != (index > it->index)) 608 return (NULL); 609 /* Detect crossing limit */ 610 if ((index < it->limit) != (it->index < it->limit)) 611 return (NULL); 612 613 return (_pctrie_iter_lookup(it, index, smr, access)); 614 } 615 616 /* 617 * Returns the value stored at a fixed offset from the current index value, 618 * possibly NULL. 619 */ 620 uint64_t * 621 pctrie_iter_stride(struct pctrie_iter *it, int stride) 622 { 623 return (_pctrie_iter_stride(it, stride, NULL, PCTRIE_LOCKED)); 624 } 625 626 /* 627 * Returns the value stored at one more than the current index value, possibly 628 * NULL, assuming access is externally synchronized by a lock. 629 */ 630 uint64_t * 631 pctrie_iter_next(struct pctrie_iter *it) 632 { 633 return (_pctrie_iter_stride(it, 1, NULL, PCTRIE_LOCKED)); 634 } 635 636 /* 637 * Returns the value stored at one less than the current index value, possibly 638 * NULL, assuming access is externally synchronized by a lock. 639 */ 640 uint64_t * 641 pctrie_iter_prev(struct pctrie_iter *it) 642 { 643 return (_pctrie_iter_stride(it, -1, NULL, PCTRIE_LOCKED)); 644 } 645 646 /* 647 * Returns the value with the least index that is greater than or equal to the 648 * specified index, or NULL if there are no such values. 649 * 650 * Requires that access be externally synchronized by a lock. 651 */ 652 static __inline uint64_t * 653 pctrie_lookup_ge_node(struct pctrie_node *node, uint64_t index) 654 { 655 struct pctrie_node *succ; 656 uint64_t *m; 657 int slot; 658 659 /* 660 * Descend the trie as if performing an ordinary lookup for the 661 * specified value. However, unlike an ordinary lookup, as we descend 662 * the trie, we use "succ" to remember the last branching-off point, 663 * that is, the interior node under which the least value that is both 664 * outside our current path down the trie and greater than the specified 665 * index resides. (The node's popmap makes it fast and easy to 666 * recognize a branching-off point.) If our ordinary lookup fails to 667 * yield a value that is greater than or equal to the specified index, 668 * then we will exit this loop and perform a lookup starting from 669 * "succ". If "succ" is not NULL, then that lookup is guaranteed to 670 * succeed. 671 */ 672 succ = NULL; 673 for (;;) { 674 if (pctrie_isleaf(node)) { 675 if ((m = pctrie_toval(node)) != NULL && *m >= index) 676 return (m); 677 break; 678 } 679 if (pctrie_keybarr(node, index, &slot)) { 680 /* 681 * If all values in this subtree are > index, then the 682 * least value in this subtree is the answer. 683 */ 684 if (node->pn_owner > index) 685 succ = node; 686 break; 687 } 688 689 /* 690 * Just in case the next search step leads to a subtree of all 691 * values < index, check popmap to see if a next bigger step, to 692 * a subtree of all pages with values > index, is available. If 693 * so, remember to restart the search here. 694 */ 695 if ((node->pn_popmap >> slot) > 1) 696 succ = node; 697 node = pctrie_node_load(&node->pn_child[slot], NULL, 698 PCTRIE_LOCKED); 699 } 700 701 /* 702 * Restart the search from the last place visited in the subtree that 703 * included some values > index, if there was such a place. 704 */ 705 if (succ == NULL) 706 return (NULL); 707 if (succ != node) { 708 /* 709 * Take a step to the next bigger sibling of the node chosen 710 * last time. In that subtree, all values > index. 711 */ 712 slot = pctrie_slot(succ, index) + 1; 713 KASSERT((succ->pn_popmap >> slot) != 0, 714 ("%s: no popmap siblings past slot %d in node %p", 715 __func__, slot, succ)); 716 slot += ffs(succ->pn_popmap >> slot) - 1; 717 succ = pctrie_node_load(&succ->pn_child[slot], NULL, 718 PCTRIE_LOCKED); 719 } 720 721 /* 722 * Find the value in the subtree rooted at "succ" with the least index. 723 */ 724 while (!pctrie_isleaf(succ)) { 725 KASSERT(succ->pn_popmap != 0, 726 ("%s: no popmap children in node %p", __func__, succ)); 727 slot = ffs(succ->pn_popmap) - 1; 728 succ = pctrie_node_load(&succ->pn_child[slot], NULL, 729 PCTRIE_LOCKED); 730 } 731 return (pctrie_toval(succ)); 732 } 733 734 uint64_t * 735 pctrie_lookup_ge(struct pctrie *ptree, uint64_t index) 736 { 737 return (pctrie_lookup_ge_node( 738 pctrie_root_load(ptree, NULL, PCTRIE_LOCKED), index)); 739 } 740 741 uint64_t * 742 pctrie_subtree_lookup_gt(struct pctrie_node *node, uint64_t index) 743 { 744 if (node == NULL || index + 1 == 0) 745 return (NULL); 746 return (pctrie_lookup_ge_node(node, index + 1)); 747 } 748 749 /* 750 * Find first leaf >= index, and fill iter with the path to the parent of that 751 * leaf. Return NULL if there is no such leaf less than limit. 752 */ 753 uint64_t * 754 pctrie_iter_lookup_ge(struct pctrie_iter *it, uint64_t index) 755 { 756 struct pctrie_node *node; 757 uint64_t *m; 758 int slot; 759 760 /* Seek a node that matches index. */ 761 node = _pctrie_iter_lookup_node(it, index, NULL, PCTRIE_LOCKED); 762 763 /* 764 * If no such node was found, and instead this path leads only to nodes 765 * < index, back up to find a subtrie with the least value > index. 766 */ 767 if (pctrie_isleaf(node) ? 768 (m = pctrie_toval(node)) == NULL || *m < index : 769 node->pn_owner < index) { 770 /* Climb the path to find a node with a descendant > index. */ 771 while (it->top != 0) { 772 node = it->path[it->top - 1]; 773 slot = pctrie_slot(node, index) + 1; 774 if ((node->pn_popmap >> slot) != 0) 775 break; 776 --it->top; 777 } 778 if (it->top == 0) 779 return (NULL); 780 781 /* Step to the least child with a descendant > index. */ 782 slot += ffs(node->pn_popmap >> slot) - 1; 783 node = pctrie_node_load(&node->pn_child[slot], NULL, 784 PCTRIE_LOCKED); 785 } 786 /* Descend to the least leaf of the subtrie. */ 787 while (!pctrie_isleaf(node)) { 788 if (it->limit != 0 && node->pn_owner >= it->limit) 789 return (NULL); 790 slot = ffs(node->pn_popmap) - 1; 791 KASSERT(it->top < nitems(it->path), 792 ("%s: path overflow in trie %p", __func__, it->ptree)); 793 it->path[it->top++] = node; 794 node = pctrie_node_load(&node->pn_child[slot], NULL, 795 PCTRIE_LOCKED); 796 } 797 m = pctrie_toval(node); 798 if (it->limit != 0 && *m >= it->limit) 799 return (NULL); 800 it->index = *m; 801 return (m); 802 } 803 804 /* 805 * Find the first leaf with value at least 'jump' greater than the previous 806 * leaf. Return NULL if that value is >= limit. 807 */ 808 uint64_t * 809 pctrie_iter_jump_ge(struct pctrie_iter *it, int64_t jump) 810 { 811 uint64_t index = it->index + jump; 812 813 /* Detect jump overflow. */ 814 if ((jump > 0) != (index > it->index)) 815 return (NULL); 816 if (it->limit != 0 && index >= it->limit) 817 return (NULL); 818 return (pctrie_iter_lookup_ge(it, index)); 819 } 820 821 #ifdef INVARIANTS 822 void 823 pctrie_subtree_lookup_gt_assert(struct pctrie_node *node, uint64_t index, 824 struct pctrie *ptree, uint64_t *res) 825 { 826 uint64_t *expected; 827 828 if (index + 1 == 0) 829 expected = NULL; 830 else 831 expected = pctrie_lookup_ge(ptree, index + 1); 832 KASSERT(res == expected, 833 ("pctrie subtree lookup gt result different from root lookup: " 834 "ptree %p, index %ju, subtree %p, found %p, expected %p", ptree, 835 (uintmax_t)index, node, res, expected)); 836 } 837 #endif 838 839 /* 840 * Returns the value with the greatest index that is less than or equal to the 841 * specified index, or NULL if there are no such values. 842 * 843 * Requires that access be externally synchronized by a lock. 844 */ 845 static __inline uint64_t * 846 pctrie_lookup_le_node(struct pctrie_node *node, uint64_t index) 847 { 848 struct pctrie_node *pred; 849 uint64_t *m; 850 int slot; 851 852 /* 853 * Mirror the implementation of pctrie_lookup_ge_node, described above. 854 */ 855 pred = NULL; 856 for (;;) { 857 if (pctrie_isleaf(node)) { 858 if ((m = pctrie_toval(node)) != NULL && *m <= index) 859 return (m); 860 break; 861 } 862 if (pctrie_keybarr(node, index, &slot)) { 863 if (node->pn_owner < index) 864 pred = node; 865 break; 866 } 867 if ((node->pn_popmap & ((1 << slot) - 1)) != 0) 868 pred = node; 869 node = pctrie_node_load(&node->pn_child[slot], NULL, 870 PCTRIE_LOCKED); 871 } 872 if (pred == NULL) 873 return (NULL); 874 if (pred != node) { 875 slot = pctrie_slot(pred, index); 876 KASSERT((pred->pn_popmap & ((1 << slot) - 1)) != 0, 877 ("%s: no popmap siblings before slot %d in node %p", 878 __func__, slot, pred)); 879 slot = ilog2(pred->pn_popmap & ((1 << slot) - 1)); 880 pred = pctrie_node_load(&pred->pn_child[slot], NULL, 881 PCTRIE_LOCKED); 882 } 883 while (!pctrie_isleaf(pred)) { 884 KASSERT(pred->pn_popmap != 0, 885 ("%s: no popmap children in node %p", __func__, pred)); 886 slot = ilog2(pred->pn_popmap); 887 pred = pctrie_node_load(&pred->pn_child[slot], NULL, 888 PCTRIE_LOCKED); 889 } 890 return (pctrie_toval(pred)); 891 } 892 893 uint64_t * 894 pctrie_lookup_le(struct pctrie *ptree, uint64_t index) 895 { 896 return (pctrie_lookup_le_node( 897 pctrie_root_load(ptree, NULL, PCTRIE_LOCKED), index)); 898 } 899 900 uint64_t * 901 pctrie_subtree_lookup_lt(struct pctrie_node *node, uint64_t index) 902 { 903 if (node == NULL || index == 0) 904 return (NULL); 905 return (pctrie_lookup_le_node(node, index - 1)); 906 } 907 908 /* 909 * Find first leaf <= index, and fill iter with the path to the parent of that 910 * leaf. Return NULL if there is no such leaf greater than limit. 911 */ 912 uint64_t * 913 pctrie_iter_lookup_le(struct pctrie_iter *it, uint64_t index) 914 { 915 struct pctrie_node *node; 916 uint64_t *m; 917 int slot; 918 919 /* Seek a node that matches index. */ 920 node = _pctrie_iter_lookup_node(it, index, NULL, PCTRIE_LOCKED); 921 922 /* 923 * If no such node was found, and instead this path leads only to nodes 924 * > index, back up to find a subtrie with the least value > index. 925 */ 926 if (pctrie_isleaf(node) ? 927 (m = pctrie_toval(node)) == NULL || *m > index : 928 node->pn_owner > index) { 929 /* Climb the path to find a node with a descendant < index. */ 930 while (it->top != 0) { 931 node = it->path[it->top - 1]; 932 slot = pctrie_slot(node, index); 933 if ((node->pn_popmap & ((1 << slot) - 1)) != 0) 934 break; 935 --it->top; 936 } 937 if (it->top == 0) 938 return (NULL); 939 940 /* Step to the greatest child with a descendant < index. */ 941 slot = ilog2(node->pn_popmap & ((1 << slot) - 1)); 942 node = pctrie_node_load(&node->pn_child[slot], NULL, 943 PCTRIE_LOCKED); 944 } 945 /* Descend to the greatest leaf of the subtrie. */ 946 while (!pctrie_isleaf(node)) { 947 if (it->limit != 0 && it->limit >= 948 node->pn_owner + (PCTRIE_COUNT << node->pn_clev) - 1) 949 return (NULL); 950 slot = ilog2(node->pn_popmap); 951 KASSERT(it->top < nitems(it->path), 952 ("%s: path overflow in trie %p", __func__, it->ptree)); 953 it->path[it->top++] = node; 954 node = pctrie_node_load(&node->pn_child[slot], NULL, 955 PCTRIE_LOCKED); 956 } 957 m = pctrie_toval(node); 958 if (it->limit != 0 && *m <= it->limit) 959 return (NULL); 960 it->index = *m; 961 return (m); 962 } 963 964 /* 965 * Find the first leaf with value at most 'jump' less than the previous 966 * leaf. Return NULL if that value is <= limit. 967 */ 968 uint64_t * 969 pctrie_iter_jump_le(struct pctrie_iter *it, int64_t jump) 970 { 971 uint64_t index = it->index - jump; 972 973 /* Detect jump overflow. */ 974 if ((jump > 0) != (index < it->index)) 975 return (NULL); 976 if (it->limit != 0 && index <= it->limit) 977 return (NULL); 978 return (pctrie_iter_lookup_le(it, index)); 979 } 980 981 #ifdef INVARIANTS 982 void 983 pctrie_subtree_lookup_lt_assert(struct pctrie_node *node, uint64_t index, 984 struct pctrie *ptree, uint64_t *res) 985 { 986 uint64_t *expected; 987 988 if (index == 0) 989 expected = NULL; 990 else 991 expected = pctrie_lookup_le(ptree, index - 1); 992 KASSERT(res == expected, 993 ("pctrie subtree lookup lt result different from root lookup: " 994 "ptree %p, index %ju, subtree %p, found %p, expected %p", ptree, 995 (uintmax_t)index, node, res, expected)); 996 } 997 #endif 998 999 static void 1000 pctrie_remove(struct pctrie *ptree, uint64_t index, struct pctrie_node *parent, 1001 struct pctrie_node *node, struct pctrie_node **freenode) 1002 { 1003 struct pctrie_node *child; 1004 int slot; 1005 1006 if (node == NULL) { 1007 pctrie_root_store(ptree, PCTRIE_NULL, PCTRIE_LOCKED); 1008 return; 1009 } 1010 slot = pctrie_slot(node, index); 1011 KASSERT((node->pn_popmap & (1 << slot)) != 0, 1012 ("%s: bad popmap slot %d in node %p", 1013 __func__, slot, node)); 1014 node->pn_popmap ^= 1 << slot; 1015 pctrie_node_store(&node->pn_child[slot], PCTRIE_NULL, PCTRIE_LOCKED); 1016 if (!powerof2(node->pn_popmap)) 1017 return; 1018 KASSERT(node->pn_popmap != 0, ("%s: bad popmap all zeroes", __func__)); 1019 slot = ffs(node->pn_popmap) - 1; 1020 child = pctrie_node_load(&node->pn_child[slot], NULL, PCTRIE_LOCKED); 1021 KASSERT(child != PCTRIE_NULL, 1022 ("%s: bad popmap slot %d in node %p", __func__, slot, node)); 1023 if (parent == NULL) 1024 pctrie_root_store(ptree, child, PCTRIE_LOCKED); 1025 else { 1026 slot = pctrie_slot(parent, index); 1027 KASSERT(node == 1028 pctrie_node_load(&parent->pn_child[slot], NULL, 1029 PCTRIE_LOCKED), ("%s: invalid child value", __func__)); 1030 pctrie_node_store(&parent->pn_child[slot], child, 1031 PCTRIE_LOCKED); 1032 } 1033 /* 1034 * The child is still valid and we can not zero the 1035 * pointer until all SMR references are gone. 1036 */ 1037 pctrie_node_put(node); 1038 *freenode = node; 1039 } 1040 1041 /* 1042 * Remove the specified index from the tree, and return the value stored at 1043 * that index. If the index is not present, return NULL. 1044 */ 1045 uint64_t * 1046 pctrie_remove_lookup(struct pctrie *ptree, uint64_t index, 1047 struct pctrie_node **freenode) 1048 { 1049 struct pctrie_node *child, *node, *parent; 1050 uint64_t *m; 1051 int slot; 1052 1053 DEBUG_POISON_POINTER(parent); 1054 *freenode = node = NULL; 1055 child = pctrie_root_load(ptree, NULL, PCTRIE_LOCKED); 1056 while (!pctrie_isleaf(child)) { 1057 parent = node; 1058 node = child; 1059 slot = pctrie_slot(node, index); 1060 child = pctrie_node_load(&node->pn_child[slot], NULL, 1061 PCTRIE_LOCKED); 1062 } 1063 m = pctrie_match_value(child, index); 1064 if (m != NULL) 1065 pctrie_remove(ptree, index, parent, node, freenode); 1066 return (m); 1067 } 1068 1069 /* 1070 * Remove from the trie the leaf last chosen by the iterator, and 1071 * adjust the path if it's last member is to be freed. 1072 */ 1073 uint64_t * 1074 pctrie_iter_remove(struct pctrie_iter *it, struct pctrie_node **freenode) 1075 { 1076 struct pctrie_node *child, *node, *parent; 1077 uint64_t *m; 1078 int slot; 1079 1080 DEBUG_POISON_POINTER(parent); 1081 *freenode = NULL; 1082 if (it->top >= 1) { 1083 parent = (it->top >= 2) ? it->path[it->top - 2] : NULL; 1084 node = it->path[it->top - 1]; 1085 slot = pctrie_slot(node, it->index); 1086 child = pctrie_node_load(&node->pn_child[slot], NULL, 1087 PCTRIE_LOCKED); 1088 } else { 1089 node = NULL; 1090 child = pctrie_root_load(it->ptree, NULL, PCTRIE_LOCKED); 1091 } 1092 m = pctrie_match_value(child, it->index); 1093 if (m != NULL) 1094 pctrie_remove(it->ptree, it->index, parent, node, freenode); 1095 if (*freenode != NULL) 1096 --it->top; 1097 return (m); 1098 } 1099 1100 /* 1101 * Return the current leaf, assuming access is externally synchronized by a 1102 * lock. 1103 */ 1104 uint64_t * 1105 pctrie_iter_value(struct pctrie_iter *it) 1106 { 1107 struct pctrie_node *node; 1108 int slot; 1109 1110 if (it->top == 0) 1111 node = pctrie_root_load(it->ptree, NULL, 1112 PCTRIE_LOCKED); 1113 else { 1114 node = it->path[it->top - 1]; 1115 slot = pctrie_slot(node, it->index); 1116 node = pctrie_node_load(&node->pn_child[slot], NULL, 1117 PCTRIE_LOCKED); 1118 } 1119 return (pctrie_toval(node)); 1120 } 1121 1122 /* 1123 * Walk the subtrie rooted at *pnode in order, invoking callback on leaves and 1124 * using the leftmost child pointer for path reversal, until an interior node 1125 * is stripped of all children, and returned for deallocation, with *pnode left 1126 * pointing to the parent of that node. 1127 */ 1128 static __always_inline struct pctrie_node * 1129 pctrie_reclaim_prune(struct pctrie_node **pnode, struct pctrie_node *parent, 1130 pctrie_cb_t callback, int keyoff, void *arg) 1131 { 1132 struct pctrie_node *child, *node; 1133 int slot; 1134 1135 node = *pnode; 1136 while (node->pn_popmap != 0) { 1137 slot = ffs(node->pn_popmap) - 1; 1138 node->pn_popmap ^= 1 << slot; 1139 child = pctrie_node_load(&node->pn_child[slot], NULL, 1140 PCTRIE_UNSERIALIZED); 1141 pctrie_node_store(&node->pn_child[slot], PCTRIE_NULL, 1142 PCTRIE_UNSERIALIZED); 1143 if (pctrie_isleaf(child)) { 1144 if (callback != NULL) 1145 callback(pctrie_toptr(child, keyoff), arg); 1146 continue; 1147 } 1148 /* Climb one level down the trie. */ 1149 pctrie_node_store(&node->pn_child[0], parent, 1150 PCTRIE_UNSERIALIZED); 1151 parent = node; 1152 node = child; 1153 } 1154 *pnode = parent; 1155 return (node); 1156 } 1157 1158 /* 1159 * Recover the node parent from its first child and continue pruning. 1160 */ 1161 static __always_inline struct pctrie_node * 1162 pctrie_reclaim_resume_compound(struct pctrie_node **pnode, 1163 pctrie_cb_t callback, int keyoff, void *arg) 1164 { 1165 struct pctrie_node *parent, *node; 1166 1167 node = *pnode; 1168 if (node == NULL) 1169 return (NULL); 1170 /* Climb one level up the trie. */ 1171 parent = pctrie_node_load(&node->pn_child[0], NULL, 1172 PCTRIE_UNSERIALIZED); 1173 pctrie_node_store(&node->pn_child[0], PCTRIE_NULL, PCTRIE_UNSERIALIZED); 1174 return (pctrie_reclaim_prune(pnode, parent, callback, keyoff, arg)); 1175 } 1176 1177 /* 1178 * Find the trie root, and start pruning with a NULL parent. 1179 */ 1180 static __always_inline struct pctrie_node * 1181 pctrie_reclaim_begin_compound(struct pctrie_node **pnode, 1182 struct pctrie *ptree, 1183 pctrie_cb_t callback, int keyoff, void *arg) 1184 { 1185 struct pctrie_node *node; 1186 1187 node = pctrie_root_load(ptree, NULL, PCTRIE_UNSERIALIZED); 1188 pctrie_root_store(ptree, PCTRIE_NULL, PCTRIE_UNSERIALIZED); 1189 if (pctrie_isleaf(node)) { 1190 if (callback != NULL && node != PCTRIE_NULL) 1191 callback(pctrie_toptr(node, keyoff), arg); 1192 return (NULL); 1193 } 1194 *pnode = node; 1195 return (pctrie_reclaim_prune(pnode, NULL, callback, keyoff, arg)); 1196 } 1197 1198 struct pctrie_node * 1199 pctrie_reclaim_resume(struct pctrie_node **pnode) 1200 { 1201 return (pctrie_reclaim_resume_compound(pnode, NULL, 0, NULL)); 1202 } 1203 1204 struct pctrie_node * 1205 pctrie_reclaim_begin(struct pctrie_node **pnode, struct pctrie *ptree) 1206 { 1207 return (pctrie_reclaim_begin_compound(pnode, ptree, NULL, 0, NULL)); 1208 } 1209 1210 struct pctrie_node * 1211 pctrie_reclaim_resume_cb(struct pctrie_node **pnode, 1212 pctrie_cb_t callback, int keyoff, void *arg) 1213 { 1214 return (pctrie_reclaim_resume_compound(pnode, callback, keyoff, arg)); 1215 } 1216 1217 struct pctrie_node * 1218 pctrie_reclaim_begin_cb(struct pctrie_node **pnode, struct pctrie *ptree, 1219 pctrie_cb_t callback, int keyoff, void *arg) 1220 { 1221 return (pctrie_reclaim_begin_compound(pnode, ptree, 1222 callback, keyoff, arg)); 1223 } 1224 1225 /* 1226 * Replace an existing value in the trie with another one. 1227 * Panics if there is not an old value in the trie at the new value's index. 1228 */ 1229 uint64_t * 1230 pctrie_replace(struct pctrie *ptree, uint64_t *newval) 1231 { 1232 struct pctrie_node *leaf, *parent, *node; 1233 uint64_t *m; 1234 uint64_t index; 1235 int slot; 1236 1237 leaf = pctrie_toleaf(newval); 1238 index = *newval; 1239 node = pctrie_root_load(ptree, NULL, PCTRIE_LOCKED); 1240 parent = NULL; 1241 for (;;) { 1242 if (pctrie_isleaf(node)) { 1243 if ((m = pctrie_toval(node)) != NULL && *m == index) { 1244 if (parent == NULL) 1245 ptree->pt_root = leaf; 1246 else 1247 pctrie_node_store( 1248 &parent->pn_child[slot], leaf, 1249 PCTRIE_LOCKED); 1250 return (m); 1251 } 1252 break; 1253 } 1254 if (pctrie_keybarr(node, index, &slot)) 1255 break; 1256 parent = node; 1257 node = pctrie_node_load(&node->pn_child[slot], NULL, 1258 PCTRIE_LOCKED); 1259 } 1260 panic("%s: original replacing value not found", __func__); 1261 } 1262 1263 #ifdef DDB 1264 /* 1265 * Show details about the given node. 1266 */ 1267 DB_SHOW_COMMAND(pctrienode, db_show_pctrienode) 1268 { 1269 struct pctrie_node *node, *tmp; 1270 int slot; 1271 pn_popmap_t popmap; 1272 1273 if (!have_addr) 1274 return; 1275 node = (struct pctrie_node *)addr; 1276 db_printf("node %p, owner %jx, children popmap %04x, level %u:\n", 1277 (void *)node, (uintmax_t)node->pn_owner, node->pn_popmap, 1278 node->pn_clev / PCTRIE_WIDTH); 1279 for (popmap = node->pn_popmap; popmap != 0; popmap ^= 1 << slot) { 1280 slot = ffs(popmap) - 1; 1281 tmp = pctrie_node_load(&node->pn_child[slot], NULL, 1282 PCTRIE_UNSERIALIZED); 1283 db_printf("slot: %d, val: %p, value: %p, clev: %d\n", 1284 slot, (void *)tmp, 1285 pctrie_isleaf(tmp) ? pctrie_toval(tmp) : NULL, 1286 node->pn_clev / PCTRIE_WIDTH); 1287 } 1288 } 1289 #endif /* DDB */ 1290