1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2013 EMC Corp. 5 * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org> 6 * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 */ 31 32 /* 33 * Path-compressed radix trie implementation. 34 * The following code is not generalized into a general purpose library 35 * because there are way too many parameters embedded that should really 36 * be decided by the library consumers. At the same time, consumers 37 * of this code must achieve highest possible performance. 38 * 39 * The implementation takes into account the following rationale: 40 * - Size of the nodes should be as small as possible but still big enough 41 * to avoid a large maximum depth for the trie. This is a balance 42 * between the necessity to not wire too much physical memory for the nodes 43 * and the necessity to avoid too much cache pollution during the trie 44 * operations. 45 * - There is not a huge bias toward the number of lookup operations over 46 * the number of insert and remove operations. This basically implies 47 * that optimizations supposedly helping one operation but hurting the 48 * other might be carefully evaluated. 49 * - On average not many nodes are expected to be fully populated, hence 50 * level compression may just complicate things. 51 */ 52 53 #include <sys/cdefs.h> 54 __FBSDID("$FreeBSD$"); 55 56 #include "opt_ddb.h" 57 58 #include <sys/param.h> 59 #include <sys/systm.h> 60 #include <sys/kernel.h> 61 #include <sys/libkern.h> 62 #include <sys/proc.h> 63 #include <sys/vmmeter.h> 64 #include <sys/smr.h> 65 #include <sys/smr_types.h> 66 67 #include <vm/uma.h> 68 #include <vm/vm.h> 69 #include <vm/vm_param.h> 70 #include <vm/vm_object.h> 71 #include <vm/vm_page.h> 72 #include <vm/vm_radix.h> 73 74 #ifdef DDB 75 #include <ddb/ddb.h> 76 #endif 77 78 /* 79 * These widths should allow the pointers to a node's children to fit within 80 * a single cache line. The extra levels from a narrow width should not be 81 * a problem thanks to path compression. 82 */ 83 #ifdef __LP64__ 84 #define VM_RADIX_WIDTH 4 85 #else 86 #define VM_RADIX_WIDTH 3 87 #endif 88 89 #define VM_RADIX_COUNT (1 << VM_RADIX_WIDTH) 90 #define VM_RADIX_MASK (VM_RADIX_COUNT - 1) 91 #define VM_RADIX_LIMIT \ 92 (howmany(sizeof(vm_pindex_t) * NBBY, VM_RADIX_WIDTH) - 1) 93 94 #if VM_RADIX_WIDTH == 3 95 typedef uint8_t rn_popmap_t; 96 #elif VM_RADIX_WIDTH == 4 97 typedef uint16_t rn_popmap_t; 98 #elif VM_RADIX_WIDTH == 5 99 typedef uint32_t rn_popmap_t; 100 #else 101 #error Unsupported width 102 #endif 103 _Static_assert(sizeof(rn_popmap_t) <= sizeof(int), 104 "rn_popmap_t too wide"); 105 106 /* Flag bits stored in node pointers. */ 107 #define VM_RADIX_ISLEAF 0x1 108 #define VM_RADIX_FLAGS 0x1 109 #define VM_RADIX_PAD VM_RADIX_FLAGS 110 111 /* Returns one unit associated with specified level. */ 112 #define VM_RADIX_UNITLEVEL(lev) \ 113 ((vm_pindex_t)1 << ((lev) * VM_RADIX_WIDTH)) 114 115 enum vm_radix_access { SMR, LOCKED, UNSERIALIZED }; 116 117 struct vm_radix_node; 118 typedef SMR_POINTER(struct vm_radix_node *) smrnode_t; 119 120 struct vm_radix_node { 121 vm_pindex_t rn_owner; /* Owner of record. */ 122 rn_popmap_t rn_popmap; /* Valid children. */ 123 uint8_t rn_clev; /* Current level. */ 124 smrnode_t rn_child[VM_RADIX_COUNT]; /* Child nodes. */ 125 }; 126 127 static uma_zone_t vm_radix_node_zone; 128 static smr_t vm_radix_smr; 129 130 static void vm_radix_node_store(smrnode_t *p, struct vm_radix_node *v, 131 enum vm_radix_access access); 132 133 /* 134 * Return the position in the array for a given level. 135 */ 136 static __inline int 137 vm_radix_slot(vm_pindex_t index, uint16_t level) 138 { 139 return ((index >> (level * VM_RADIX_WIDTH)) & VM_RADIX_MASK); 140 } 141 142 /* Computes the key (index) with the low-order 'level' radix-digits zeroed. */ 143 static __inline vm_pindex_t 144 vm_radix_trimkey(vm_pindex_t index, uint16_t level) 145 { 146 return (index & -VM_RADIX_UNITLEVEL(level)); 147 } 148 149 /* 150 * Allocate a radix node. 151 */ 152 static struct vm_radix_node * 153 vm_radix_node_get(vm_pindex_t index, uint16_t clevel) 154 { 155 struct vm_radix_node *rnode; 156 157 rnode = uma_zalloc_smr(vm_radix_node_zone, M_NOWAIT); 158 if (rnode == NULL) 159 return (NULL); 160 161 /* 162 * We want to clear the last child pointer after the final section 163 * has exited so lookup can not return false negatives. It is done 164 * here because it will be cache-cold in the dtor callback. 165 */ 166 if (rnode->rn_popmap != 0) { 167 vm_radix_node_store(&rnode->rn_child[ffs(rnode->rn_popmap) - 1], 168 NULL, UNSERIALIZED); 169 rnode->rn_popmap = 0; 170 } 171 rnode->rn_owner = vm_radix_trimkey(index, clevel + 1); 172 rnode->rn_clev = clevel; 173 return (rnode); 174 } 175 176 /* 177 * Free radix node. 178 */ 179 static __inline void 180 vm_radix_node_put(struct vm_radix_node *rnode) 181 { 182 #ifdef INVARIANTS 183 int slot; 184 185 KASSERT(powerof2(rnode->rn_popmap), 186 ("vm_radix_node_put: rnode %p has too many children %04x", rnode, 187 rnode->rn_popmap)); 188 for (slot = 0; slot < VM_RADIX_COUNT; slot++) { 189 if ((rnode->rn_popmap & (1 << slot)) != 0) 190 continue; 191 KASSERT(smr_unserialized_load(&rnode->rn_child[slot], true) == 192 NULL, ("vm_radix_node_put: rnode %p has a child", rnode)); 193 } 194 #endif 195 uma_zfree_smr(vm_radix_node_zone, rnode); 196 } 197 198 /* 199 * Fetch a node pointer from a slot in another node. 200 */ 201 static __inline struct vm_radix_node * 202 vm_radix_node_load(smrnode_t *p, enum vm_radix_access access) 203 { 204 205 switch (access) { 206 case UNSERIALIZED: 207 return (smr_unserialized_load(p, true)); 208 case LOCKED: 209 return (smr_serialized_load(p, true)); 210 case SMR: 211 return (smr_entered_load(p, vm_radix_smr)); 212 } 213 __assert_unreachable(); 214 } 215 216 static __inline void 217 vm_radix_node_store(smrnode_t *p, struct vm_radix_node *v, 218 enum vm_radix_access access) 219 { 220 221 switch (access) { 222 case UNSERIALIZED: 223 smr_unserialized_store(p, v, true); 224 break; 225 case LOCKED: 226 smr_serialized_store(p, v, true); 227 break; 228 case SMR: 229 panic("vm_radix_node_store: Not supported in smr section."); 230 } 231 } 232 233 /* 234 * Get the root node for a radix tree. 235 */ 236 static __inline struct vm_radix_node * 237 vm_radix_root_load(struct vm_radix *rtree, enum vm_radix_access access) 238 { 239 240 return (vm_radix_node_load((smrnode_t *)&rtree->rt_root, access)); 241 } 242 243 /* 244 * Set the root node for a radix tree. 245 */ 246 static __inline void 247 vm_radix_root_store(struct vm_radix *rtree, struct vm_radix_node *rnode, 248 enum vm_radix_access access) 249 { 250 251 vm_radix_node_store((smrnode_t *)&rtree->rt_root, rnode, access); 252 } 253 254 /* 255 * Returns TRUE if the specified radix node is a leaf and FALSE otherwise. 256 */ 257 static __inline bool 258 vm_radix_isleaf(struct vm_radix_node *rnode) 259 { 260 261 return (((uintptr_t)rnode & VM_RADIX_ISLEAF) != 0); 262 } 263 264 /* 265 * Returns page cast to radix node with leaf bit set. 266 */ 267 static __inline struct vm_radix_node * 268 vm_radix_toleaf(vm_page_t page) 269 { 270 return ((struct vm_radix_node *)((uintptr_t)page | VM_RADIX_ISLEAF)); 271 } 272 273 /* 274 * Returns the associated page extracted from rnode. 275 */ 276 static __inline vm_page_t 277 vm_radix_topage(struct vm_radix_node *rnode) 278 { 279 280 return ((vm_page_t)((uintptr_t)rnode & ~VM_RADIX_FLAGS)); 281 } 282 283 /* 284 * Make 'child' a child of 'rnode'. 285 */ 286 static __inline void 287 vm_radix_addnode(struct vm_radix_node *rnode, vm_pindex_t index, uint16_t clev, 288 struct vm_radix_node *child, enum vm_radix_access access) 289 { 290 int slot; 291 292 slot = vm_radix_slot(index, clev); 293 vm_radix_node_store(&rnode->rn_child[slot], child, access); 294 rnode->rn_popmap ^= 1 << slot; 295 KASSERT((rnode->rn_popmap & (1 << slot)) != 0, 296 ("%s: bad popmap slot %d in rnode %p", __func__, slot, rnode)); 297 } 298 299 /* 300 * Returns the level where two keys differ. 301 * It cannot accept 2 equal keys. 302 */ 303 static __inline uint16_t 304 vm_radix_keydiff(vm_pindex_t index1, vm_pindex_t index2) 305 { 306 307 KASSERT(index1 != index2, ("%s: passing the same key value %jx", 308 __func__, (uintmax_t)index1)); 309 CTASSERT(sizeof(long long) >= sizeof(vm_pindex_t)); 310 311 /* 312 * From the highest-order bit where the indexes differ, 313 * compute the highest level in the trie where they differ. 314 */ 315 return ((flsll(index1 ^ index2) - 1) / VM_RADIX_WIDTH); 316 } 317 318 /* 319 * Returns TRUE if it can be determined that key does not belong to the 320 * specified rnode. Otherwise, returns FALSE. 321 */ 322 static __inline bool 323 vm_radix_keybarr(struct vm_radix_node *rnode, vm_pindex_t idx) 324 { 325 326 if (rnode->rn_clev < VM_RADIX_LIMIT) { 327 idx = vm_radix_trimkey(idx, rnode->rn_clev + 1); 328 return (idx != rnode->rn_owner); 329 } 330 return (false); 331 } 332 333 /* 334 * Internal helper for vm_radix_reclaim_allnodes(). 335 * This function is recursive. 336 */ 337 static void 338 vm_radix_reclaim_allnodes_int(struct vm_radix_node *rnode) 339 { 340 struct vm_radix_node *child; 341 int slot; 342 343 while (rnode->rn_popmap != 0) { 344 slot = ffs(rnode->rn_popmap) - 1; 345 child = vm_radix_node_load(&rnode->rn_child[slot], 346 UNSERIALIZED); 347 KASSERT(child != NULL, ("%s: bad popmap slot %d in rnode %p", 348 __func__, slot, rnode)); 349 if (!vm_radix_isleaf(child)) 350 vm_radix_reclaim_allnodes_int(child); 351 rnode->rn_popmap ^= 1 << slot; 352 vm_radix_node_store(&rnode->rn_child[slot], NULL, 353 UNSERIALIZED); 354 } 355 vm_radix_node_put(rnode); 356 } 357 358 #ifndef UMA_MD_SMALL_ALLOC 359 void vm_radix_reserve_kva(void); 360 /* 361 * Reserve the KVA necessary to satisfy the node allocation. 362 * This is mandatory in architectures not supporting direct 363 * mapping as they will need otherwise to carve into the kernel maps for 364 * every node allocation, resulting into deadlocks for consumers already 365 * working with kernel maps. 366 */ 367 void 368 vm_radix_reserve_kva(void) 369 { 370 371 /* 372 * Calculate the number of reserved nodes, discounting the pages that 373 * are needed to store them. 374 */ 375 if (!uma_zone_reserve_kva(vm_radix_node_zone, 376 ((vm_paddr_t)vm_cnt.v_page_count * PAGE_SIZE) / (PAGE_SIZE + 377 sizeof(struct vm_radix_node)))) 378 panic("%s: unable to reserve KVA", __func__); 379 } 380 #endif 381 382 /* 383 * Initialize the UMA slab zone. 384 */ 385 void 386 vm_radix_zinit(void) 387 { 388 389 vm_radix_node_zone = uma_zcreate("RADIX NODE", 390 sizeof(struct vm_radix_node), NULL, NULL, NULL, NULL, 391 VM_RADIX_PAD, UMA_ZONE_VM | UMA_ZONE_SMR | UMA_ZONE_ZINIT); 392 vm_radix_smr = uma_zone_get_smr(vm_radix_node_zone); 393 } 394 395 /* 396 * Inserts the key-value pair into the trie. 397 * Panics if the key already exists. 398 */ 399 int 400 vm_radix_insert(struct vm_radix *rtree, vm_page_t page) 401 { 402 vm_pindex_t index, newind; 403 struct vm_radix_node *leaf, *rnode, *tmp; 404 smrnode_t *parentp; 405 int slot; 406 uint16_t clev; 407 408 index = page->pindex; 409 leaf = vm_radix_toleaf(page); 410 411 /* 412 * The owner of record for root is not really important because it 413 * will never be used. 414 */ 415 rnode = vm_radix_root_load(rtree, LOCKED); 416 if (rnode == NULL) { 417 rtree->rt_root = (uintptr_t)leaf; 418 return (0); 419 } 420 for (parentp = (smrnode_t *)&rtree->rt_root;; rnode = tmp) { 421 if (vm_radix_isleaf(rnode)) { 422 newind = vm_radix_topage(rnode)->pindex; 423 if (newind == index) 424 panic("%s: key %jx is already present", 425 __func__, (uintmax_t)index); 426 break; 427 } else if (vm_radix_keybarr(rnode, index)) { 428 newind = rnode->rn_owner; 429 break; 430 } 431 slot = vm_radix_slot(index, rnode->rn_clev); 432 parentp = &rnode->rn_child[slot]; 433 tmp = vm_radix_node_load(parentp, LOCKED); 434 if (tmp == NULL) { 435 vm_radix_addnode(rnode, index, rnode->rn_clev, leaf, 436 LOCKED); 437 return (0); 438 } 439 } 440 441 /* 442 * A new node is needed because the right insertion level is reached. 443 * Setup the new intermediate node and add the 2 children: the 444 * new object and the older edge or object. 445 */ 446 clev = vm_radix_keydiff(newind, index); 447 tmp = vm_radix_node_get(index, clev); 448 if (tmp == NULL) 449 return (ENOMEM); 450 /* These writes are not yet visible due to ordering. */ 451 vm_radix_addnode(tmp, index, clev, leaf, UNSERIALIZED); 452 vm_radix_addnode(tmp, newind, clev, rnode, UNSERIALIZED); 453 /* Serializing write to make the above visible. */ 454 vm_radix_node_store(parentp, tmp, LOCKED); 455 return (0); 456 } 457 458 /* 459 * Returns the value stored at the index. If the index is not present, 460 * NULL is returned. 461 */ 462 static __always_inline vm_page_t 463 _vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index, 464 enum vm_radix_access access) 465 { 466 struct vm_radix_node *rnode; 467 vm_page_t m; 468 int slot; 469 470 rnode = vm_radix_root_load(rtree, access); 471 while (rnode != NULL) { 472 if (vm_radix_isleaf(rnode)) { 473 m = vm_radix_topage(rnode); 474 if (m->pindex == index) 475 return (m); 476 break; 477 } 478 if (vm_radix_keybarr(rnode, index)) 479 break; 480 slot = vm_radix_slot(index, rnode->rn_clev); 481 rnode = vm_radix_node_load(&rnode->rn_child[slot], access); 482 } 483 return (NULL); 484 } 485 486 /* 487 * Returns the value stored at the index assuming there is an external lock. 488 * 489 * If the index is not present, NULL is returned. 490 */ 491 vm_page_t 492 vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index) 493 { 494 495 return _vm_radix_lookup(rtree, index, LOCKED); 496 } 497 498 /* 499 * Returns the value stored at the index without requiring an external lock. 500 * 501 * If the index is not present, NULL is returned. 502 */ 503 vm_page_t 504 vm_radix_lookup_unlocked(struct vm_radix *rtree, vm_pindex_t index) 505 { 506 vm_page_t m; 507 508 smr_enter(vm_radix_smr); 509 m = _vm_radix_lookup(rtree, index, SMR); 510 smr_exit(vm_radix_smr); 511 512 return (m); 513 } 514 515 /* 516 * Returns the page with the least pindex that is greater than or equal to the 517 * specified pindex, or NULL if there are no such pages. 518 * 519 * Requires that access be externally synchronized by a lock. 520 */ 521 vm_page_t 522 vm_radix_lookup_ge(struct vm_radix *rtree, vm_pindex_t index) 523 { 524 struct vm_radix_node *rnode, *succ; 525 vm_page_t m; 526 int slot; 527 528 /* 529 * Descend the trie as if performing an ordinary lookup for the page 530 * with the specified pindex. However, unlike an ordinary lookup, as we 531 * descend the trie, we use "succ" to remember the last branching-off 532 * point, that is, the interior node under which the page with the least 533 * pindex that is both outside our current path down the trie and more 534 * than the specified pindex resides. (The node's popmap makes it fast 535 * and easy to recognize a branching-off point.) If our ordinary lookup 536 * fails to yield a page with a pindex that is greater than or equal to 537 * the specified pindex, then we will exit this loop and perform a 538 * lookup starting from "succ". If "succ" is not NULL, then that lookup 539 * is guaranteed to succeed. 540 */ 541 rnode = vm_radix_root_load(rtree, LOCKED); 542 succ = NULL; 543 while (rnode != NULL) { 544 if (vm_radix_isleaf(rnode)) { 545 m = vm_radix_topage(rnode); 546 if (m->pindex >= index) 547 return (m); 548 break; 549 } 550 if (vm_radix_keybarr(rnode, index)) { 551 /* 552 * If all pages in this subtree have pindex > index, 553 * then the page in this subtree with the least pindex 554 * is the answer. 555 */ 556 if (rnode->rn_owner > index) 557 succ = rnode; 558 break; 559 } 560 slot = vm_radix_slot(index, rnode->rn_clev); 561 562 /* 563 * Just in case the next search step leads to a subtree of all 564 * pages with pindex < index, check popmap to see if a next 565 * bigger step, to a subtree of all pages with pindex > index, 566 * is available. If so, remember to restart the search here. 567 */ 568 if ((rnode->rn_popmap >> slot) > 1) 569 succ = rnode; 570 rnode = vm_radix_node_load(&rnode->rn_child[slot], LOCKED); 571 } 572 573 /* 574 * Restart the search from the last place visited in the subtree that 575 * included some pages with pindex > index, if there was such a place. 576 */ 577 if (succ == NULL) 578 return (NULL); 579 if (succ != rnode) { 580 /* 581 * Take a step to the next bigger sibling of the node chosen 582 * last time. In that subtree, all pages have pindex > index. 583 */ 584 slot = vm_radix_slot(index, succ->rn_clev) + 1; 585 KASSERT((succ->rn_popmap >> slot) != 0, 586 ("%s: no popmap siblings past slot %d in node %p", 587 __func__, slot, succ)); 588 slot += ffs(succ->rn_popmap >> slot) - 1; 589 succ = vm_radix_node_load(&succ->rn_child[slot], LOCKED); 590 } 591 592 /* 593 * Find the page in the subtree rooted at "succ" with the least pindex. 594 */ 595 while (!vm_radix_isleaf(succ)) { 596 KASSERT(succ->rn_popmap != 0, 597 ("%s: no popmap children in node %p", __func__, succ)); 598 slot = ffs(succ->rn_popmap) - 1; 599 succ = vm_radix_node_load(&succ->rn_child[slot], LOCKED); 600 } 601 return (vm_radix_topage(succ)); 602 } 603 604 /* 605 * Returns the page with the greatest pindex that is less than or equal to the 606 * specified pindex, or NULL if there are no such pages. 607 * 608 * Requires that access be externally synchronized by a lock. 609 */ 610 vm_page_t 611 vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index) 612 { 613 struct vm_radix_node *pred, *rnode; 614 vm_page_t m; 615 int slot; 616 617 /* 618 * Mirror the implementation of vm_radix_lookup_ge, described above. 619 */ 620 rnode = vm_radix_root_load(rtree, LOCKED); 621 pred = NULL; 622 while (rnode != NULL) { 623 if (vm_radix_isleaf(rnode)) { 624 m = vm_radix_topage(rnode); 625 if (m->pindex <= index) 626 return (m); 627 break; 628 } 629 if (vm_radix_keybarr(rnode, index)) { 630 if (rnode->rn_owner < index) 631 pred = rnode; 632 break; 633 } 634 slot = vm_radix_slot(index, rnode->rn_clev); 635 if ((rnode->rn_popmap & ((1 << slot) - 1)) != 0) 636 pred = rnode; 637 rnode = vm_radix_node_load(&rnode->rn_child[slot], LOCKED); 638 } 639 if (pred == NULL) 640 return (NULL); 641 if (pred != rnode) { 642 slot = vm_radix_slot(index, pred->rn_clev); 643 KASSERT((pred->rn_popmap & ((1 << slot) - 1)) != 0, 644 ("%s: no popmap siblings before slot %d in node %p", 645 __func__, slot, pred)); 646 slot = fls(pred->rn_popmap & ((1 << slot) - 1)) - 1; 647 pred = vm_radix_node_load(&pred->rn_child[slot], LOCKED); 648 } 649 while (!vm_radix_isleaf(pred)) { 650 KASSERT(pred->rn_popmap != 0, 651 ("%s: no popmap children in node %p", __func__, pred)); 652 slot = fls(pred->rn_popmap) - 1; 653 pred = vm_radix_node_load(&pred->rn_child[slot], LOCKED); 654 } 655 return (vm_radix_topage(pred)); 656 } 657 658 /* 659 * Remove the specified index from the trie, and return the value stored at 660 * that index. If the index is not present, return NULL. 661 */ 662 vm_page_t 663 vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index) 664 { 665 struct vm_radix_node *rnode, *parent, *tmp; 666 vm_page_t m; 667 int slot; 668 669 rnode = vm_radix_root_load(rtree, LOCKED); 670 if (vm_radix_isleaf(rnode)) { 671 m = vm_radix_topage(rnode); 672 if (m->pindex != index) 673 return (NULL); 674 vm_radix_root_store(rtree, NULL, LOCKED); 675 return (m); 676 } 677 parent = NULL; 678 for (;;) { 679 if (rnode == NULL) 680 return (NULL); 681 slot = vm_radix_slot(index, rnode->rn_clev); 682 tmp = vm_radix_node_load(&rnode->rn_child[slot], LOCKED); 683 if (vm_radix_isleaf(tmp)) { 684 m = vm_radix_topage(tmp); 685 if (m->pindex != index) 686 return (NULL); 687 KASSERT((rnode->rn_popmap & (1 << slot)) != 0, 688 ("%s: bad popmap slot %d in rnode %p", 689 __func__, slot, rnode)); 690 rnode->rn_popmap ^= 1 << slot; 691 vm_radix_node_store( 692 &rnode->rn_child[slot], NULL, LOCKED); 693 if (!powerof2(rnode->rn_popmap)) 694 return (m); 695 KASSERT(rnode->rn_popmap != 0, 696 ("%s: bad popmap all zeroes", __func__)); 697 slot = ffs(rnode->rn_popmap) - 1; 698 tmp = vm_radix_node_load(&rnode->rn_child[slot], LOCKED); 699 KASSERT(tmp != NULL, 700 ("%s: bad popmap slot %d in rnode %p", 701 __func__, slot, rnode)); 702 if (parent == NULL) 703 vm_radix_root_store(rtree, tmp, LOCKED); 704 else { 705 slot = vm_radix_slot(index, parent->rn_clev); 706 KASSERT(vm_radix_node_load( 707 &parent->rn_child[slot], LOCKED) == rnode, 708 ("%s: invalid child value", __func__)); 709 vm_radix_node_store(&parent->rn_child[slot], 710 tmp, LOCKED); 711 } 712 /* 713 * The child is still valid and we can not zero the 714 * pointer until all smr references are gone. 715 */ 716 vm_radix_node_put(rnode); 717 return (m); 718 } 719 parent = rnode; 720 rnode = tmp; 721 } 722 } 723 724 /* 725 * Remove and free all the nodes from the radix tree. 726 * This function is recursive but there is a tight control on it as the 727 * maximum depth of the tree is fixed. 728 */ 729 void 730 vm_radix_reclaim_allnodes(struct vm_radix *rtree) 731 { 732 struct vm_radix_node *root; 733 734 root = vm_radix_root_load(rtree, LOCKED); 735 if (root == NULL) 736 return; 737 vm_radix_root_store(rtree, NULL, UNSERIALIZED); 738 if (!vm_radix_isleaf(root)) 739 vm_radix_reclaim_allnodes_int(root); 740 } 741 742 /* 743 * Replace an existing page in the trie with another one. 744 * Panics if there is not an old page in the trie at the new page's index. 745 */ 746 vm_page_t 747 vm_radix_replace(struct vm_radix *rtree, vm_page_t newpage) 748 { 749 struct vm_radix_node *rnode, *tmp; 750 vm_page_t m; 751 vm_pindex_t index; 752 int slot; 753 754 index = newpage->pindex; 755 rnode = vm_radix_root_load(rtree, LOCKED); 756 if (rnode == NULL) 757 panic("%s: replacing page on an empty trie", __func__); 758 if (vm_radix_isleaf(rnode)) { 759 m = vm_radix_topage(rnode); 760 if (m->pindex != index) 761 panic("%s: original replacing root key not found", 762 __func__); 763 rtree->rt_root = (uintptr_t)vm_radix_toleaf(newpage); 764 return (m); 765 } 766 for (;;) { 767 slot = vm_radix_slot(index, rnode->rn_clev); 768 tmp = vm_radix_node_load(&rnode->rn_child[slot], LOCKED); 769 if (vm_radix_isleaf(tmp)) { 770 m = vm_radix_topage(tmp); 771 if (m->pindex != index) 772 break; 773 vm_radix_node_store(&rnode->rn_child[slot], 774 vm_radix_toleaf(newpage), LOCKED); 775 return (m); 776 } else if (tmp == NULL || vm_radix_keybarr(tmp, index)) 777 break; 778 rnode = tmp; 779 } 780 panic("%s: original replacing page not found", __func__); 781 } 782 783 void 784 vm_radix_wait(void) 785 { 786 uma_zwait(vm_radix_node_zone); 787 } 788 789 #ifdef DDB 790 /* 791 * Show details about the given radix node. 792 */ 793 DB_SHOW_COMMAND(radixnode, db_show_radixnode) 794 { 795 struct vm_radix_node *rnode, *tmp; 796 int slot; 797 rn_popmap_t popmap; 798 799 if (!have_addr) 800 return; 801 rnode = (struct vm_radix_node *)addr; 802 db_printf("radixnode %p, owner %jx, children popmap %04x, level %u:\n", 803 (void *)rnode, (uintmax_t)rnode->rn_owner, rnode->rn_popmap, 804 rnode->rn_clev); 805 for (popmap = rnode->rn_popmap; popmap != 0; popmap ^= 1 << slot) { 806 slot = ffs(popmap) - 1; 807 tmp = vm_radix_node_load(&rnode->rn_child[slot], UNSERIALIZED); 808 db_printf("slot: %d, val: %p, page: %p, clev: %d\n", 809 slot, (void *)tmp, 810 vm_radix_isleaf(tmp) ? vm_radix_topage(tmp) : NULL, 811 rnode->rn_clev); 812 } 813 } 814 #endif /* DDB */ 815