1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2013 EMC Corp. 5 * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org> 6 * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 */ 31 32 /* 33 * Path-compressed radix trie implementation. 34 * The following code is not generalized into a general purpose library 35 * because there are way too many parameters embedded that should really 36 * be decided by the library consumers. At the same time, consumers 37 * of this code must achieve highest possible performance. 38 * 39 * The implementation takes into account the following rationale: 40 * - Size of the nodes should be as small as possible but still big enough 41 * to avoid a large maximum depth for the trie. This is a balance 42 * between the necessity to not wire too much physical memory for the nodes 43 * and the necessity to avoid too much cache pollution during the trie 44 * operations. 45 * - There is not a huge bias toward the number of lookup operations over 46 * the number of insert and remove operations. This basically implies 47 * that optimizations supposedly helping one operation but hurting the 48 * other might be carefully evaluated. 49 * - On average not many nodes are expected to be fully populated, hence 50 * level compression may just complicate things. 51 */ 52 53 #include <sys/cdefs.h> 54 __FBSDID("$FreeBSD$"); 55 56 #include "opt_ddb.h" 57 58 #include <sys/param.h> 59 #include <sys/systm.h> 60 #include <sys/kernel.h> 61 #include <sys/libkern.h> 62 #include <sys/proc.h> 63 #include <sys/vmmeter.h> 64 #include <sys/smr.h> 65 #include <sys/smr_types.h> 66 67 #include <vm/uma.h> 68 #include <vm/vm.h> 69 #include <vm/vm_param.h> 70 #include <vm/vm_object.h> 71 #include <vm/vm_page.h> 72 #include <vm/vm_radix.h> 73 74 #ifdef DDB 75 #include <ddb/ddb.h> 76 #endif 77 78 /* 79 * These widths should allow the pointers to a node's children to fit within 80 * a single cache line. The extra levels from a narrow width should not be 81 * a problem thanks to path compression. 82 */ 83 #ifdef __LP64__ 84 #define VM_RADIX_WIDTH 4 85 #else 86 #define VM_RADIX_WIDTH 3 87 #endif 88 89 #define VM_RADIX_COUNT (1 << VM_RADIX_WIDTH) 90 #define VM_RADIX_MASK (VM_RADIX_COUNT - 1) 91 #define VM_RADIX_LIMIT \ 92 (howmany(sizeof(vm_pindex_t) * NBBY, VM_RADIX_WIDTH) - 1) 93 94 #if VM_RADIX_WIDTH == 3 95 typedef uint8_t rn_popmap_t; 96 #elif VM_RADIX_WIDTH == 4 97 typedef uint16_t rn_popmap_t; 98 #elif VM_RADIX_WIDTH == 5 99 typedef uint32_t rn_popmap_t; 100 #else 101 #error Unsupported width 102 #endif 103 _Static_assert(sizeof(rn_popmap_t) <= sizeof(int), 104 "rn_popmap_t too wide"); 105 106 /* Flag bits stored in node pointers. */ 107 #define VM_RADIX_ISLEAF 0x1 108 #define VM_RADIX_FLAGS 0x1 109 #define VM_RADIX_PAD VM_RADIX_FLAGS 110 111 /* Returns one unit associated with specified level. */ 112 #define VM_RADIX_UNITLEVEL(lev) \ 113 ((vm_pindex_t)1 << ((lev) * VM_RADIX_WIDTH)) 114 115 enum vm_radix_access { SMR, LOCKED, UNSERIALIZED }; 116 117 struct vm_radix_node; 118 typedef SMR_POINTER(struct vm_radix_node *) smrnode_t; 119 120 struct vm_radix_node { 121 vm_pindex_t rn_owner; /* Owner of record. */ 122 rn_popmap_t rn_popmap; /* Valid children. */ 123 uint8_t rn_clev; /* Current level. */ 124 smrnode_t rn_child[VM_RADIX_COUNT]; /* Child nodes. */ 125 }; 126 127 static uma_zone_t vm_radix_node_zone; 128 static smr_t vm_radix_smr; 129 130 static void vm_radix_node_store(smrnode_t *p, struct vm_radix_node *v, 131 enum vm_radix_access access); 132 133 /* 134 * Return the position in the array for a given level. 135 */ 136 static __inline int 137 vm_radix_slot(vm_pindex_t index, uint16_t level) 138 { 139 return ((index >> (level * VM_RADIX_WIDTH)) & VM_RADIX_MASK); 140 } 141 142 /* Computes the key (index) with the low-order 'level' radix-digits zeroed. */ 143 static __inline vm_pindex_t 144 vm_radix_trimkey(vm_pindex_t index, uint16_t level) 145 { 146 return (index & -VM_RADIX_UNITLEVEL(level)); 147 } 148 149 /* 150 * Allocate a radix node. 151 */ 152 static struct vm_radix_node * 153 vm_radix_node_get(vm_pindex_t index, uint16_t clevel) 154 { 155 struct vm_radix_node *rnode; 156 157 rnode = uma_zalloc_smr(vm_radix_node_zone, M_NOWAIT); 158 if (rnode == NULL) 159 return (NULL); 160 161 /* 162 * We want to clear the last child pointer after the final section 163 * has exited so lookup can not return false negatives. It is done 164 * here because it will be cache-cold in the dtor callback. 165 */ 166 if (rnode->rn_popmap != 0) { 167 vm_radix_node_store(&rnode->rn_child[ffs(rnode->rn_popmap) - 1], 168 NULL, UNSERIALIZED); 169 rnode->rn_popmap = 0; 170 } 171 rnode->rn_owner = vm_radix_trimkey(index, clevel + 1); 172 rnode->rn_clev = clevel; 173 return (rnode); 174 } 175 176 /* 177 * Free radix node. 178 */ 179 static __inline void 180 vm_radix_node_put(struct vm_radix_node *rnode) 181 { 182 #ifdef INVARIANTS 183 int slot; 184 185 KASSERT(powerof2(rnode->rn_popmap), 186 ("vm_radix_node_put: rnode %p has too many children %04x", rnode, 187 rnode->rn_popmap)); 188 for (slot = 0; slot < VM_RADIX_COUNT; slot++) { 189 if ((rnode->rn_popmap & (1 << slot)) != 0) 190 continue; 191 KASSERT(smr_unserialized_load(&rnode->rn_child[slot], true) == 192 NULL, ("vm_radix_node_put: rnode %p has a child", rnode)); 193 } 194 #endif 195 uma_zfree_smr(vm_radix_node_zone, rnode); 196 } 197 198 /* 199 * Fetch a node pointer from a slot in another node. 200 */ 201 static __inline struct vm_radix_node * 202 vm_radix_node_load(smrnode_t *p, enum vm_radix_access access) 203 { 204 205 switch (access) { 206 case UNSERIALIZED: 207 return (smr_unserialized_load(p, true)); 208 case LOCKED: 209 return (smr_serialized_load(p, true)); 210 case SMR: 211 return (smr_entered_load(p, vm_radix_smr)); 212 } 213 __assert_unreachable(); 214 } 215 216 static __inline void 217 vm_radix_node_store(smrnode_t *p, struct vm_radix_node *v, 218 enum vm_radix_access access) 219 { 220 221 switch (access) { 222 case UNSERIALIZED: 223 smr_unserialized_store(p, v, true); 224 break; 225 case LOCKED: 226 smr_serialized_store(p, v, true); 227 break; 228 case SMR: 229 panic("vm_radix_node_store: Not supported in smr section."); 230 } 231 } 232 233 /* 234 * Get the root node for a radix tree. 235 */ 236 static __inline struct vm_radix_node * 237 vm_radix_root_load(struct vm_radix *rtree, enum vm_radix_access access) 238 { 239 240 return (vm_radix_node_load((smrnode_t *)&rtree->rt_root, access)); 241 } 242 243 /* 244 * Set the root node for a radix tree. 245 */ 246 static __inline void 247 vm_radix_root_store(struct vm_radix *rtree, struct vm_radix_node *rnode, 248 enum vm_radix_access access) 249 { 250 251 vm_radix_node_store((smrnode_t *)&rtree->rt_root, rnode, access); 252 } 253 254 /* 255 * Returns TRUE if the specified radix node is a leaf and FALSE otherwise. 256 */ 257 static __inline bool 258 vm_radix_isleaf(struct vm_radix_node *rnode) 259 { 260 261 return (((uintptr_t)rnode & VM_RADIX_ISLEAF) != 0); 262 } 263 264 /* 265 * Returns page cast to radix node with leaf bit set. 266 */ 267 static __inline struct vm_radix_node * 268 vm_radix_toleaf(vm_page_t page) 269 { 270 return ((struct vm_radix_node *)((uintptr_t)page | VM_RADIX_ISLEAF)); 271 } 272 273 /* 274 * Returns the associated page extracted from rnode. 275 */ 276 static __inline vm_page_t 277 vm_radix_topage(struct vm_radix_node *rnode) 278 { 279 280 return ((vm_page_t)((uintptr_t)rnode & ~VM_RADIX_FLAGS)); 281 } 282 283 /* 284 * Adds the page as a child of the provided node. 285 */ 286 static __inline void 287 vm_radix_addpage(struct vm_radix_node *rnode, vm_pindex_t index, uint16_t clev, 288 vm_page_t page, enum vm_radix_access access) 289 { 290 int slot; 291 292 slot = vm_radix_slot(index, clev); 293 vm_radix_node_store(&rnode->rn_child[slot], 294 vm_radix_toleaf(page), access); 295 rnode->rn_popmap ^= 1 << slot; 296 KASSERT((rnode->rn_popmap & (1 << slot)) != 0, 297 ("%s: bad popmap slot %d in rnode %p", __func__, slot, rnode)); 298 } 299 300 /* 301 * Returns the level where two keys differ. 302 * It cannot accept 2 equal keys. 303 */ 304 static __inline uint16_t 305 vm_radix_keydiff(vm_pindex_t index1, vm_pindex_t index2) 306 { 307 308 KASSERT(index1 != index2, ("%s: passing the same key value %jx", 309 __func__, (uintmax_t)index1)); 310 CTASSERT(sizeof(long long) >= sizeof(vm_pindex_t)); 311 312 /* 313 * From the highest-order bit where the indexes differ, 314 * compute the highest level in the trie where they differ. 315 */ 316 return ((flsll(index1 ^ index2) - 1) / VM_RADIX_WIDTH); 317 } 318 319 /* 320 * Returns TRUE if it can be determined that key does not belong to the 321 * specified rnode. Otherwise, returns FALSE. 322 */ 323 static __inline bool 324 vm_radix_keybarr(struct vm_radix_node *rnode, vm_pindex_t idx) 325 { 326 327 if (rnode->rn_clev < VM_RADIX_LIMIT) { 328 idx = vm_radix_trimkey(idx, rnode->rn_clev + 1); 329 return (idx != rnode->rn_owner); 330 } 331 return (false); 332 } 333 334 /* 335 * Internal helper for vm_radix_reclaim_allnodes(). 336 * This function is recursive. 337 */ 338 static void 339 vm_radix_reclaim_allnodes_int(struct vm_radix_node *rnode) 340 { 341 struct vm_radix_node *child; 342 int slot; 343 344 while (rnode->rn_popmap != 0) { 345 slot = ffs(rnode->rn_popmap) - 1; 346 child = vm_radix_node_load(&rnode->rn_child[slot], 347 UNSERIALIZED); 348 KASSERT(child != NULL, ("%s: bad popmap slot %d in rnode %p", 349 __func__, slot, rnode)); 350 if (!vm_radix_isleaf(child)) 351 vm_radix_reclaim_allnodes_int(child); 352 rnode->rn_popmap ^= 1 << slot; 353 vm_radix_node_store(&rnode->rn_child[slot], NULL, 354 UNSERIALIZED); 355 } 356 vm_radix_node_put(rnode); 357 } 358 359 #ifndef UMA_MD_SMALL_ALLOC 360 void vm_radix_reserve_kva(void); 361 /* 362 * Reserve the KVA necessary to satisfy the node allocation. 363 * This is mandatory in architectures not supporting direct 364 * mapping as they will need otherwise to carve into the kernel maps for 365 * every node allocation, resulting into deadlocks for consumers already 366 * working with kernel maps. 367 */ 368 void 369 vm_radix_reserve_kva(void) 370 { 371 372 /* 373 * Calculate the number of reserved nodes, discounting the pages that 374 * are needed to store them. 375 */ 376 if (!uma_zone_reserve_kva(vm_radix_node_zone, 377 ((vm_paddr_t)vm_cnt.v_page_count * PAGE_SIZE) / (PAGE_SIZE + 378 sizeof(struct vm_radix_node)))) 379 panic("%s: unable to reserve KVA", __func__); 380 } 381 #endif 382 383 /* 384 * Initialize the UMA slab zone. 385 */ 386 void 387 vm_radix_zinit(void) 388 { 389 390 vm_radix_node_zone = uma_zcreate("RADIX NODE", 391 sizeof(struct vm_radix_node), NULL, NULL, NULL, NULL, 392 VM_RADIX_PAD, UMA_ZONE_VM | UMA_ZONE_SMR | UMA_ZONE_ZINIT); 393 vm_radix_smr = uma_zone_get_smr(vm_radix_node_zone); 394 } 395 396 /* 397 * Inserts the key-value pair into the trie. 398 * Panics if the key already exists. 399 */ 400 int 401 vm_radix_insert(struct vm_radix *rtree, vm_page_t page) 402 { 403 vm_pindex_t index, newind; 404 struct vm_radix_node *rnode, *tmp; 405 smrnode_t *parentp; 406 vm_page_t m; 407 int slot; 408 uint16_t clev; 409 410 index = page->pindex; 411 412 /* 413 * The owner of record for root is not really important because it 414 * will never be used. 415 */ 416 rnode = vm_radix_root_load(rtree, LOCKED); 417 if (rnode == NULL) { 418 rtree->rt_root = (uintptr_t)vm_radix_toleaf(page); 419 return (0); 420 } 421 parentp = (smrnode_t *)&rtree->rt_root; 422 for (;;) { 423 if (vm_radix_isleaf(rnode)) { 424 m = vm_radix_topage(rnode); 425 if (m->pindex == index) 426 panic("%s: key %jx is already present", 427 __func__, (uintmax_t)index); 428 clev = vm_radix_keydiff(m->pindex, index); 429 tmp = vm_radix_node_get(index, clev); 430 if (tmp == NULL) 431 return (ENOMEM); 432 /* These writes are not yet visible due to ordering. */ 433 vm_radix_addpage(tmp, index, clev, page, UNSERIALIZED); 434 vm_radix_addpage(tmp, m->pindex, clev, m, UNSERIALIZED); 435 /* Synchronize to make leaf visible. */ 436 vm_radix_node_store(parentp, tmp, LOCKED); 437 return (0); 438 } else if (vm_radix_keybarr(rnode, index)) 439 break; 440 slot = vm_radix_slot(index, rnode->rn_clev); 441 parentp = &rnode->rn_child[slot]; 442 tmp = vm_radix_node_load(parentp, LOCKED); 443 if (tmp == NULL) { 444 vm_radix_addpage(rnode, index, rnode->rn_clev, page, 445 LOCKED); 446 return (0); 447 } 448 rnode = tmp; 449 } 450 451 /* 452 * A new node is needed because the right insertion level is reached. 453 * Setup the new intermediate node and add the 2 children: the 454 * new object and the older edge. 455 */ 456 newind = rnode->rn_owner; 457 clev = vm_radix_keydiff(newind, index); 458 tmp = vm_radix_node_get(index, clev); 459 if (tmp == NULL) 460 return (ENOMEM); 461 slot = vm_radix_slot(newind, clev); 462 /* These writes are not yet visible due to ordering. */ 463 vm_radix_addpage(tmp, index, clev, page, UNSERIALIZED); 464 vm_radix_node_store(&tmp->rn_child[slot], rnode, UNSERIALIZED); 465 tmp->rn_popmap ^= 1 << slot; 466 /* Serializing write to make the above visible. */ 467 vm_radix_node_store(parentp, tmp, LOCKED); 468 469 return (0); 470 } 471 472 /* 473 * Returns the value stored at the index. If the index is not present, 474 * NULL is returned. 475 */ 476 static __always_inline vm_page_t 477 _vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index, 478 enum vm_radix_access access) 479 { 480 struct vm_radix_node *rnode; 481 vm_page_t m; 482 int slot; 483 484 rnode = vm_radix_root_load(rtree, access); 485 while (rnode != NULL) { 486 if (vm_radix_isleaf(rnode)) { 487 m = vm_radix_topage(rnode); 488 if (m->pindex == index) 489 return (m); 490 break; 491 } 492 if (vm_radix_keybarr(rnode, index)) 493 break; 494 slot = vm_radix_slot(index, rnode->rn_clev); 495 rnode = vm_radix_node_load(&rnode->rn_child[slot], access); 496 } 497 return (NULL); 498 } 499 500 /* 501 * Returns the value stored at the index assuming there is an external lock. 502 * 503 * If the index is not present, NULL is returned. 504 */ 505 vm_page_t 506 vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index) 507 { 508 509 return _vm_radix_lookup(rtree, index, LOCKED); 510 } 511 512 /* 513 * Returns the value stored at the index without requiring an external lock. 514 * 515 * If the index is not present, NULL is returned. 516 */ 517 vm_page_t 518 vm_radix_lookup_unlocked(struct vm_radix *rtree, vm_pindex_t index) 519 { 520 vm_page_t m; 521 522 smr_enter(vm_radix_smr); 523 m = _vm_radix_lookup(rtree, index, SMR); 524 smr_exit(vm_radix_smr); 525 526 return (m); 527 } 528 529 /* 530 * Look up the nearest entry at a position greater than or equal to index. 531 */ 532 vm_page_t 533 vm_radix_lookup_ge(struct vm_radix *rtree, vm_pindex_t index) 534 { 535 struct vm_radix_node *stack[VM_RADIX_LIMIT]; 536 vm_page_t m; 537 struct vm_radix_node *child, *rnode; 538 #ifdef INVARIANTS 539 int loops = 0; 540 #endif 541 int slot, tos; 542 543 rnode = vm_radix_root_load(rtree, LOCKED); 544 if (rnode == NULL) 545 return (NULL); 546 else if (vm_radix_isleaf(rnode)) { 547 m = vm_radix_topage(rnode); 548 if (m->pindex >= index) 549 return (m); 550 else 551 return (NULL); 552 } 553 tos = 0; 554 for (;;) { 555 /* 556 * If the keys differ before the current bisection node, 557 * then the search key might rollback to the earliest 558 * available bisection node or to the smallest key 559 * in the current node (if the owner is greater than the 560 * search key). 561 */ 562 if (vm_radix_keybarr(rnode, index)) { 563 if (index > rnode->rn_owner) { 564 ascend: 565 KASSERT(++loops < 1000, 566 ("vm_radix_lookup_ge: too many loops")); 567 568 /* 569 * Pop nodes from the stack until either the 570 * stack is empty or a node that could have a 571 * matching descendant is found. 572 */ 573 do { 574 if (tos == 0) 575 return (NULL); 576 rnode = stack[--tos]; 577 } while (vm_radix_slot(index, 578 rnode->rn_clev) == (VM_RADIX_COUNT - 1)); 579 580 /* 581 * The following computation cannot overflow 582 * because index's slot at the current level 583 * is less than VM_RADIX_COUNT - 1. 584 */ 585 index = vm_radix_trimkey(index, 586 rnode->rn_clev); 587 index += VM_RADIX_UNITLEVEL(rnode->rn_clev); 588 } else 589 index = rnode->rn_owner; 590 KASSERT(!vm_radix_keybarr(rnode, index), 591 ("vm_radix_lookup_ge: keybarr failed")); 592 } 593 slot = vm_radix_slot(index, rnode->rn_clev); 594 child = vm_radix_node_load(&rnode->rn_child[slot], LOCKED); 595 if (vm_radix_isleaf(child)) { 596 m = vm_radix_topage(child); 597 if (m->pindex >= index) 598 return (m); 599 } else if (child != NULL) 600 goto descend; 601 602 /* Find the first set bit beyond the first slot+1 bits. */ 603 slot = ffs(rnode->rn_popmap & (-2 << slot)) - 1; 604 if (slot < 0) { 605 /* 606 * A page or edge greater than the search slot is not 607 * found in the current node; ascend to the next 608 * higher-level node. 609 */ 610 goto ascend; 611 } 612 child = vm_radix_node_load(&rnode->rn_child[slot], LOCKED); 613 KASSERT(child != NULL, ("%s: bad popmap slot %d in rnode %p", 614 __func__, slot, rnode)); 615 if (vm_radix_isleaf(child)) 616 return (vm_radix_topage(child)); 617 index = vm_radix_trimkey(index, rnode->rn_clev + 1) + 618 slot * VM_RADIX_UNITLEVEL(rnode->rn_clev); 619 descend: 620 KASSERT(rnode->rn_clev > 0, 621 ("vm_radix_lookup_ge: pushing leaf's parent")); 622 KASSERT(tos < VM_RADIX_LIMIT, 623 ("vm_radix_lookup_ge: stack overflow")); 624 stack[tos++] = rnode; 625 rnode = child; 626 } 627 } 628 629 /* 630 * Look up the nearest entry at a position less than or equal to index. 631 */ 632 vm_page_t 633 vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index) 634 { 635 struct vm_radix_node *stack[VM_RADIX_LIMIT]; 636 vm_page_t m; 637 struct vm_radix_node *child, *rnode; 638 #ifdef INVARIANTS 639 int loops = 0; 640 #endif 641 int slot, tos; 642 643 rnode = vm_radix_root_load(rtree, LOCKED); 644 if (rnode == NULL) 645 return (NULL); 646 else if (vm_radix_isleaf(rnode)) { 647 m = vm_radix_topage(rnode); 648 if (m->pindex <= index) 649 return (m); 650 else 651 return (NULL); 652 } 653 tos = 0; 654 for (;;) { 655 /* 656 * If the keys differ before the current bisection node, 657 * then the search key might rollback to the earliest 658 * available bisection node or to the largest key 659 * in the current node (if the owner is smaller than the 660 * search key). 661 */ 662 if (vm_radix_keybarr(rnode, index)) { 663 if (index > rnode->rn_owner) { 664 index = rnode->rn_owner + VM_RADIX_COUNT * 665 VM_RADIX_UNITLEVEL(rnode->rn_clev); 666 } else { 667 ascend: 668 KASSERT(++loops < 1000, 669 ("vm_radix_lookup_le: too many loops")); 670 671 /* 672 * Pop nodes from the stack until either the 673 * stack is empty or a node that could have a 674 * matching descendant is found. 675 */ 676 do { 677 if (tos == 0) 678 return (NULL); 679 rnode = stack[--tos]; 680 } while (vm_radix_slot(index, 681 rnode->rn_clev) == 0); 682 683 /* 684 * The following computation cannot overflow 685 * because index's slot at the current level 686 * is greater than 0. 687 */ 688 index = vm_radix_trimkey(index, 689 rnode->rn_clev); 690 } 691 index--; 692 KASSERT(!vm_radix_keybarr(rnode, index), 693 ("vm_radix_lookup_le: keybarr failed")); 694 } 695 slot = vm_radix_slot(index, rnode->rn_clev); 696 child = vm_radix_node_load(&rnode->rn_child[slot], LOCKED); 697 if (vm_radix_isleaf(child)) { 698 m = vm_radix_topage(child); 699 if (m->pindex <= index) 700 return (m); 701 } else if (child != NULL) 702 goto descend; 703 704 /* Find the last set bit among the first slot bits. */ 705 slot = fls(rnode->rn_popmap & ((1 << slot) - 1)) - 1; 706 if (slot < 0) { 707 /* 708 * A page or edge smaller than the search slot is not 709 * found in the current node; ascend to the next 710 * higher-level node. 711 */ 712 goto ascend; 713 } 714 child = vm_radix_node_load(&rnode->rn_child[slot], LOCKED); 715 KASSERT(child != NULL, ("%s: bad popmap slot %d in rnode %p", 716 __func__, slot, rnode)); 717 if (vm_radix_isleaf(child)) 718 return (vm_radix_topage(child)); 719 index = vm_radix_trimkey(index, rnode->rn_clev + 1) + 720 (slot + 1) * VM_RADIX_UNITLEVEL(rnode->rn_clev) - 1; 721 descend: 722 KASSERT(rnode->rn_clev > 0, 723 ("vm_radix_lookup_le: pushing leaf's parent")); 724 KASSERT(tos < VM_RADIX_LIMIT, 725 ("vm_radix_lookup_le: stack overflow")); 726 stack[tos++] = rnode; 727 rnode = child; 728 } 729 } 730 731 /* 732 * Remove the specified index from the trie, and return the value stored at 733 * that index. If the index is not present, return NULL. 734 */ 735 vm_page_t 736 vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index) 737 { 738 struct vm_radix_node *rnode, *parent, *tmp; 739 vm_page_t m; 740 int slot; 741 742 rnode = vm_radix_root_load(rtree, LOCKED); 743 if (vm_radix_isleaf(rnode)) { 744 m = vm_radix_topage(rnode); 745 if (m->pindex != index) 746 return (NULL); 747 vm_radix_root_store(rtree, NULL, LOCKED); 748 return (m); 749 } 750 parent = NULL; 751 for (;;) { 752 if (rnode == NULL) 753 return (NULL); 754 slot = vm_radix_slot(index, rnode->rn_clev); 755 tmp = vm_radix_node_load(&rnode->rn_child[slot], LOCKED); 756 if (vm_radix_isleaf(tmp)) { 757 m = vm_radix_topage(tmp); 758 if (m->pindex != index) 759 return (NULL); 760 KASSERT((rnode->rn_popmap & (1 << slot)) != 0, 761 ("%s: bad popmap slot %d in rnode %p", 762 __func__, slot, rnode)); 763 rnode->rn_popmap ^= 1 << slot; 764 vm_radix_node_store( 765 &rnode->rn_child[slot], NULL, LOCKED); 766 if (!powerof2(rnode->rn_popmap)) 767 return (m); 768 KASSERT(rnode->rn_popmap != 0, 769 ("%s: bad popmap all zeroes", __func__)); 770 slot = ffs(rnode->rn_popmap) - 1; 771 tmp = vm_radix_node_load(&rnode->rn_child[slot], LOCKED); 772 KASSERT(tmp != NULL, 773 ("%s: bad popmap slot %d in rnode %p", 774 __func__, slot, rnode)); 775 if (parent == NULL) 776 vm_radix_root_store(rtree, tmp, LOCKED); 777 else { 778 slot = vm_radix_slot(index, parent->rn_clev); 779 KASSERT(vm_radix_node_load( 780 &parent->rn_child[slot], LOCKED) == rnode, 781 ("%s: invalid child value", __func__)); 782 vm_radix_node_store(&parent->rn_child[slot], 783 tmp, LOCKED); 784 } 785 /* 786 * The child is still valid and we can not zero the 787 * pointer until all smr references are gone. 788 */ 789 vm_radix_node_put(rnode); 790 return (m); 791 } 792 parent = rnode; 793 rnode = tmp; 794 } 795 } 796 797 /* 798 * Remove and free all the nodes from the radix tree. 799 * This function is recursive but there is a tight control on it as the 800 * maximum depth of the tree is fixed. 801 */ 802 void 803 vm_radix_reclaim_allnodes(struct vm_radix *rtree) 804 { 805 struct vm_radix_node *root; 806 807 root = vm_radix_root_load(rtree, LOCKED); 808 if (root == NULL) 809 return; 810 vm_radix_root_store(rtree, NULL, UNSERIALIZED); 811 if (!vm_radix_isleaf(root)) 812 vm_radix_reclaim_allnodes_int(root); 813 } 814 815 /* 816 * Replace an existing page in the trie with another one. 817 * Panics if there is not an old page in the trie at the new page's index. 818 */ 819 vm_page_t 820 vm_radix_replace(struct vm_radix *rtree, vm_page_t newpage) 821 { 822 struct vm_radix_node *rnode, *tmp; 823 vm_page_t m; 824 vm_pindex_t index; 825 int slot; 826 827 index = newpage->pindex; 828 rnode = vm_radix_root_load(rtree, LOCKED); 829 if (rnode == NULL) 830 panic("%s: replacing page on an empty trie", __func__); 831 if (vm_radix_isleaf(rnode)) { 832 m = vm_radix_topage(rnode); 833 if (m->pindex != index) 834 panic("%s: original replacing root key not found", 835 __func__); 836 rtree->rt_root = (uintptr_t)vm_radix_toleaf(newpage); 837 return (m); 838 } 839 for (;;) { 840 slot = vm_radix_slot(index, rnode->rn_clev); 841 tmp = vm_radix_node_load(&rnode->rn_child[slot], LOCKED); 842 if (vm_radix_isleaf(tmp)) { 843 m = vm_radix_topage(tmp); 844 if (m->pindex != index) 845 break; 846 vm_radix_node_store(&rnode->rn_child[slot], 847 vm_radix_toleaf(newpage), LOCKED); 848 return (m); 849 } else if (tmp == NULL || vm_radix_keybarr(tmp, index)) 850 break; 851 rnode = tmp; 852 } 853 panic("%s: original replacing page not found", __func__); 854 } 855 856 void 857 vm_radix_wait(void) 858 { 859 uma_zwait(vm_radix_node_zone); 860 } 861 862 #ifdef DDB 863 /* 864 * Show details about the given radix node. 865 */ 866 DB_SHOW_COMMAND(radixnode, db_show_radixnode) 867 { 868 struct vm_radix_node *rnode, *tmp; 869 int slot; 870 rn_popmap_t popmap; 871 872 if (!have_addr) 873 return; 874 rnode = (struct vm_radix_node *)addr; 875 db_printf("radixnode %p, owner %jx, children popmap %04x, level %u:\n", 876 (void *)rnode, (uintmax_t)rnode->rn_owner, rnode->rn_popmap, 877 rnode->rn_clev); 878 for (popmap = rnode->rn_popmap; popmap != 0; popmap ^= 1 << slot) { 879 slot = ffs(popmap) - 1; 880 tmp = vm_radix_node_load(&rnode->rn_child[slot], UNSERIALIZED); 881 db_printf("slot: %d, val: %p, page: %p, clev: %d\n", 882 slot, (void *)tmp, 883 vm_radix_isleaf(tmp) ? vm_radix_topage(tmp) : NULL, 884 rnode->rn_clev); 885 } 886 } 887 #endif /* DDB */ 888