1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2013 EMC Corp. 5 * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org> 6 * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 */ 31 32 /* 33 * Path-compressed radix trie implementation. 34 * The following code is not generalized into a general purpose library 35 * because there are way too many parameters embedded that should really 36 * be decided by the library consumers. At the same time, consumers 37 * of this code must achieve highest possible performance. 38 * 39 * The implementation takes into account the following rationale: 40 * - Size of the nodes should be as small as possible but still big enough 41 * to avoid a large maximum depth for the trie. This is a balance 42 * between the necessity to not wire too much physical memory for the nodes 43 * and the necessity to avoid too much cache pollution during the trie 44 * operations. 45 * - There is not a huge bias toward the number of lookup operations over 46 * the number of insert and remove operations. This basically implies 47 * that optimizations supposedly helping one operation but hurting the 48 * other might be carefully evaluated. 49 * - On average not many nodes are expected to be fully populated, hence 50 * level compression may just complicate things. 51 */ 52 53 #include <sys/cdefs.h> 54 __FBSDID("$FreeBSD$"); 55 56 #include "opt_ddb.h" 57 58 #include <sys/param.h> 59 #include <sys/systm.h> 60 #include <sys/kernel.h> 61 #include <sys/vmmeter.h> 62 63 #include <vm/uma.h> 64 #include <vm/vm.h> 65 #include <vm/vm_param.h> 66 #include <vm/vm_page.h> 67 #include <vm/vm_radix.h> 68 69 #ifdef DDB 70 #include <ddb/ddb.h> 71 #endif 72 73 /* 74 * These widths should allow the pointers to a node's children to fit within 75 * a single cache line. The extra levels from a narrow width should not be 76 * a problem thanks to path compression. 77 */ 78 #ifdef __LP64__ 79 #define VM_RADIX_WIDTH 4 80 #else 81 #define VM_RADIX_WIDTH 3 82 #endif 83 84 #define VM_RADIX_COUNT (1 << VM_RADIX_WIDTH) 85 #define VM_RADIX_MASK (VM_RADIX_COUNT - 1) 86 #define VM_RADIX_LIMIT \ 87 (howmany(sizeof(vm_pindex_t) * NBBY, VM_RADIX_WIDTH) - 1) 88 89 /* Flag bits stored in node pointers. */ 90 #define VM_RADIX_ISLEAF 0x1 91 #define VM_RADIX_FLAGS 0x1 92 #define VM_RADIX_PAD VM_RADIX_FLAGS 93 94 /* Returns one unit associated with specified level. */ 95 #define VM_RADIX_UNITLEVEL(lev) \ 96 ((vm_pindex_t)1 << ((lev) * VM_RADIX_WIDTH)) 97 98 struct vm_radix_node { 99 vm_pindex_t rn_owner; /* Owner of record. */ 100 uint16_t rn_count; /* Valid children. */ 101 uint16_t rn_clev; /* Current level. */ 102 void *rn_child[VM_RADIX_COUNT]; /* Child nodes. */ 103 }; 104 105 static uma_zone_t vm_radix_node_zone; 106 107 /* 108 * Allocate a radix node. 109 */ 110 static __inline struct vm_radix_node * 111 vm_radix_node_get(vm_pindex_t owner, uint16_t count, uint16_t clevel) 112 { 113 struct vm_radix_node *rnode; 114 115 rnode = uma_zalloc(vm_radix_node_zone, M_NOWAIT | M_ZERO); 116 if (rnode == NULL) 117 return (NULL); 118 rnode->rn_owner = owner; 119 rnode->rn_count = count; 120 rnode->rn_clev = clevel; 121 return (rnode); 122 } 123 124 /* 125 * Free radix node. 126 */ 127 static __inline void 128 vm_radix_node_put(struct vm_radix_node *rnode) 129 { 130 131 uma_zfree(vm_radix_node_zone, rnode); 132 } 133 134 /* 135 * Return the position in the array for a given level. 136 */ 137 static __inline int 138 vm_radix_slot(vm_pindex_t index, uint16_t level) 139 { 140 141 return ((index >> (level * VM_RADIX_WIDTH)) & VM_RADIX_MASK); 142 } 143 144 /* Trims the key after the specified level. */ 145 static __inline vm_pindex_t 146 vm_radix_trimkey(vm_pindex_t index, uint16_t level) 147 { 148 vm_pindex_t ret; 149 150 ret = index; 151 if (level > 0) { 152 ret >>= level * VM_RADIX_WIDTH; 153 ret <<= level * VM_RADIX_WIDTH; 154 } 155 return (ret); 156 } 157 158 /* 159 * Get the root node for a radix tree. 160 */ 161 static __inline struct vm_radix_node * 162 vm_radix_getroot(struct vm_radix *rtree) 163 { 164 165 return ((struct vm_radix_node *)rtree->rt_root); 166 } 167 168 /* 169 * Set the root node for a radix tree. 170 */ 171 static __inline void 172 vm_radix_setroot(struct vm_radix *rtree, struct vm_radix_node *rnode) 173 { 174 175 rtree->rt_root = (uintptr_t)rnode; 176 } 177 178 /* 179 * Returns TRUE if the specified radix node is a leaf and FALSE otherwise. 180 */ 181 static __inline boolean_t 182 vm_radix_isleaf(struct vm_radix_node *rnode) 183 { 184 185 return (((uintptr_t)rnode & VM_RADIX_ISLEAF) != 0); 186 } 187 188 /* 189 * Returns the associated page extracted from rnode. 190 */ 191 static __inline vm_page_t 192 vm_radix_topage(struct vm_radix_node *rnode) 193 { 194 195 return ((vm_page_t)((uintptr_t)rnode & ~VM_RADIX_FLAGS)); 196 } 197 198 /* 199 * Adds the page as a child of the provided node. 200 */ 201 static __inline void 202 vm_radix_addpage(struct vm_radix_node *rnode, vm_pindex_t index, uint16_t clev, 203 vm_page_t page) 204 { 205 int slot; 206 207 slot = vm_radix_slot(index, clev); 208 rnode->rn_child[slot] = (void *)((uintptr_t)page | VM_RADIX_ISLEAF); 209 } 210 211 /* 212 * Returns the slot where two keys differ. 213 * It cannot accept 2 equal keys. 214 */ 215 static __inline uint16_t 216 vm_radix_keydiff(vm_pindex_t index1, vm_pindex_t index2) 217 { 218 uint16_t clev; 219 220 KASSERT(index1 != index2, ("%s: passing the same key value %jx", 221 __func__, (uintmax_t)index1)); 222 223 index1 ^= index2; 224 for (clev = VM_RADIX_LIMIT;; clev--) 225 if (vm_radix_slot(index1, clev) != 0) 226 return (clev); 227 } 228 229 /* 230 * Returns TRUE if it can be determined that key does not belong to the 231 * specified rnode. Otherwise, returns FALSE. 232 */ 233 static __inline boolean_t 234 vm_radix_keybarr(struct vm_radix_node *rnode, vm_pindex_t idx) 235 { 236 237 if (rnode->rn_clev < VM_RADIX_LIMIT) { 238 idx = vm_radix_trimkey(idx, rnode->rn_clev + 1); 239 return (idx != rnode->rn_owner); 240 } 241 return (FALSE); 242 } 243 244 /* 245 * Internal helper for vm_radix_reclaim_allnodes(). 246 * This function is recursive. 247 */ 248 static void 249 vm_radix_reclaim_allnodes_int(struct vm_radix_node *rnode) 250 { 251 int slot; 252 253 KASSERT(rnode->rn_count <= VM_RADIX_COUNT, 254 ("vm_radix_reclaim_allnodes_int: bad count in rnode %p", rnode)); 255 for (slot = 0; rnode->rn_count != 0; slot++) { 256 if (rnode->rn_child[slot] == NULL) 257 continue; 258 if (!vm_radix_isleaf(rnode->rn_child[slot])) 259 vm_radix_reclaim_allnodes_int(rnode->rn_child[slot]); 260 rnode->rn_child[slot] = NULL; 261 rnode->rn_count--; 262 } 263 vm_radix_node_put(rnode); 264 } 265 266 #ifdef INVARIANTS 267 /* 268 * Radix node zone destructor. 269 */ 270 static void 271 vm_radix_node_zone_dtor(void *mem, int size __unused, void *arg __unused) 272 { 273 struct vm_radix_node *rnode; 274 int slot; 275 276 rnode = mem; 277 KASSERT(rnode->rn_count == 0, 278 ("vm_radix_node_put: rnode %p has %d children", rnode, 279 rnode->rn_count)); 280 for (slot = 0; slot < VM_RADIX_COUNT; slot++) 281 KASSERT(rnode->rn_child[slot] == NULL, 282 ("vm_radix_node_put: rnode %p has a child", rnode)); 283 } 284 #endif 285 286 #ifndef UMA_MD_SMALL_ALLOC 287 /* 288 * Reserve the KVA necessary to satisfy the node allocation. 289 * This is mandatory in architectures not supporting direct 290 * mapping as they will need otherwise to carve into the kernel maps for 291 * every node allocation, resulting into deadlocks for consumers already 292 * working with kernel maps. 293 */ 294 static void 295 vm_radix_reserve_kva(void *arg __unused) 296 { 297 298 /* 299 * Calculate the number of reserved nodes, discounting the pages that 300 * are needed to store them. 301 */ 302 if (!uma_zone_reserve_kva(vm_radix_node_zone, 303 ((vm_paddr_t)vm_cnt.v_page_count * PAGE_SIZE) / (PAGE_SIZE + 304 sizeof(struct vm_radix_node)))) 305 panic("%s: unable to reserve KVA", __func__); 306 } 307 SYSINIT(vm_radix_reserve_kva, SI_SUB_KMEM, SI_ORDER_THIRD, 308 vm_radix_reserve_kva, NULL); 309 #endif 310 311 /* 312 * Initialize the UMA slab zone. 313 */ 314 void 315 vm_radix_zinit(void) 316 { 317 318 vm_radix_node_zone = uma_zcreate("RADIX NODE", 319 sizeof(struct vm_radix_node), NULL, 320 #ifdef INVARIANTS 321 vm_radix_node_zone_dtor, 322 #else 323 NULL, 324 #endif 325 NULL, NULL, VM_RADIX_PAD, UMA_ZONE_VM); 326 } 327 328 /* 329 * Inserts the key-value pair into the trie. 330 * Panics if the key already exists. 331 */ 332 int 333 vm_radix_insert(struct vm_radix *rtree, vm_page_t page) 334 { 335 vm_pindex_t index, newind; 336 void **parentp; 337 struct vm_radix_node *rnode, *tmp; 338 vm_page_t m; 339 int slot; 340 uint16_t clev; 341 342 index = page->pindex; 343 344 /* 345 * The owner of record for root is not really important because it 346 * will never be used. 347 */ 348 rnode = vm_radix_getroot(rtree); 349 if (rnode == NULL) { 350 rtree->rt_root = (uintptr_t)page | VM_RADIX_ISLEAF; 351 return (0); 352 } 353 parentp = (void **)&rtree->rt_root; 354 for (;;) { 355 if (vm_radix_isleaf(rnode)) { 356 m = vm_radix_topage(rnode); 357 if (m->pindex == index) 358 panic("%s: key %jx is already present", 359 __func__, (uintmax_t)index); 360 clev = vm_radix_keydiff(m->pindex, index); 361 tmp = vm_radix_node_get(vm_radix_trimkey(index, 362 clev + 1), 2, clev); 363 if (tmp == NULL) 364 return (ENOMEM); 365 *parentp = tmp; 366 vm_radix_addpage(tmp, index, clev, page); 367 vm_radix_addpage(tmp, m->pindex, clev, m); 368 return (0); 369 } else if (vm_radix_keybarr(rnode, index)) 370 break; 371 slot = vm_radix_slot(index, rnode->rn_clev); 372 if (rnode->rn_child[slot] == NULL) { 373 rnode->rn_count++; 374 vm_radix_addpage(rnode, index, rnode->rn_clev, page); 375 return (0); 376 } 377 parentp = &rnode->rn_child[slot]; 378 rnode = rnode->rn_child[slot]; 379 } 380 381 /* 382 * A new node is needed because the right insertion level is reached. 383 * Setup the new intermediate node and add the 2 children: the 384 * new object and the older edge. 385 */ 386 newind = rnode->rn_owner; 387 clev = vm_radix_keydiff(newind, index); 388 tmp = vm_radix_node_get(vm_radix_trimkey(index, clev + 1), 2, clev); 389 if (tmp == NULL) 390 return (ENOMEM); 391 *parentp = tmp; 392 vm_radix_addpage(tmp, index, clev, page); 393 slot = vm_radix_slot(newind, clev); 394 tmp->rn_child[slot] = rnode; 395 return (0); 396 } 397 398 /* 399 * Returns TRUE if the specified radix tree contains a single leaf and FALSE 400 * otherwise. 401 */ 402 boolean_t 403 vm_radix_is_singleton(struct vm_radix *rtree) 404 { 405 struct vm_radix_node *rnode; 406 407 rnode = vm_radix_getroot(rtree); 408 if (rnode == NULL) 409 return (FALSE); 410 return (vm_radix_isleaf(rnode)); 411 } 412 413 /* 414 * Returns the value stored at the index. If the index is not present, 415 * NULL is returned. 416 */ 417 vm_page_t 418 vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index) 419 { 420 struct vm_radix_node *rnode; 421 vm_page_t m; 422 int slot; 423 424 rnode = vm_radix_getroot(rtree); 425 while (rnode != NULL) { 426 if (vm_radix_isleaf(rnode)) { 427 m = vm_radix_topage(rnode); 428 if (m->pindex == index) 429 return (m); 430 else 431 break; 432 } else if (vm_radix_keybarr(rnode, index)) 433 break; 434 slot = vm_radix_slot(index, rnode->rn_clev); 435 rnode = rnode->rn_child[slot]; 436 } 437 return (NULL); 438 } 439 440 /* 441 * Look up the nearest entry at a position bigger than or equal to index. 442 */ 443 vm_page_t 444 vm_radix_lookup_ge(struct vm_radix *rtree, vm_pindex_t index) 445 { 446 struct vm_radix_node *stack[VM_RADIX_LIMIT]; 447 vm_pindex_t inc; 448 vm_page_t m; 449 struct vm_radix_node *child, *rnode; 450 #ifdef INVARIANTS 451 int loops = 0; 452 #endif 453 int slot, tos; 454 455 rnode = vm_radix_getroot(rtree); 456 if (rnode == NULL) 457 return (NULL); 458 else if (vm_radix_isleaf(rnode)) { 459 m = vm_radix_topage(rnode); 460 if (m->pindex >= index) 461 return (m); 462 else 463 return (NULL); 464 } 465 tos = 0; 466 for (;;) { 467 /* 468 * If the keys differ before the current bisection node, 469 * then the search key might rollback to the earliest 470 * available bisection node or to the smallest key 471 * in the current node (if the owner is bigger than the 472 * search key). 473 */ 474 if (vm_radix_keybarr(rnode, index)) { 475 if (index > rnode->rn_owner) { 476 ascend: 477 KASSERT(++loops < 1000, 478 ("vm_radix_lookup_ge: too many loops")); 479 480 /* 481 * Pop nodes from the stack until either the 482 * stack is empty or a node that could have a 483 * matching descendant is found. 484 */ 485 do { 486 if (tos == 0) 487 return (NULL); 488 rnode = stack[--tos]; 489 } while (vm_radix_slot(index, 490 rnode->rn_clev) == (VM_RADIX_COUNT - 1)); 491 492 /* 493 * The following computation cannot overflow 494 * because index's slot at the current level 495 * is less than VM_RADIX_COUNT - 1. 496 */ 497 index = vm_radix_trimkey(index, 498 rnode->rn_clev); 499 index += VM_RADIX_UNITLEVEL(rnode->rn_clev); 500 } else 501 index = rnode->rn_owner; 502 KASSERT(!vm_radix_keybarr(rnode, index), 503 ("vm_radix_lookup_ge: keybarr failed")); 504 } 505 slot = vm_radix_slot(index, rnode->rn_clev); 506 child = rnode->rn_child[slot]; 507 if (vm_radix_isleaf(child)) { 508 m = vm_radix_topage(child); 509 if (m->pindex >= index) 510 return (m); 511 } else if (child != NULL) 512 goto descend; 513 514 /* 515 * Look for an available edge or page within the current 516 * bisection node. 517 */ 518 if (slot < (VM_RADIX_COUNT - 1)) { 519 inc = VM_RADIX_UNITLEVEL(rnode->rn_clev); 520 index = vm_radix_trimkey(index, rnode->rn_clev); 521 do { 522 index += inc; 523 slot++; 524 child = rnode->rn_child[slot]; 525 if (vm_radix_isleaf(child)) { 526 m = vm_radix_topage(child); 527 if (m->pindex >= index) 528 return (m); 529 } else if (child != NULL) 530 goto descend; 531 } while (slot < (VM_RADIX_COUNT - 1)); 532 } 533 KASSERT(child == NULL || vm_radix_isleaf(child), 534 ("vm_radix_lookup_ge: child is radix node")); 535 536 /* 537 * If a page or edge bigger than the search slot is not found 538 * in the current node, ascend to the next higher-level node. 539 */ 540 goto ascend; 541 descend: 542 KASSERT(rnode->rn_clev > 0, 543 ("vm_radix_lookup_ge: pushing leaf's parent")); 544 KASSERT(tos < VM_RADIX_LIMIT, 545 ("vm_radix_lookup_ge: stack overflow")); 546 stack[tos++] = rnode; 547 rnode = child; 548 } 549 } 550 551 /* 552 * Look up the nearest entry at a position less than or equal to index. 553 */ 554 vm_page_t 555 vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index) 556 { 557 struct vm_radix_node *stack[VM_RADIX_LIMIT]; 558 vm_pindex_t inc; 559 vm_page_t m; 560 struct vm_radix_node *child, *rnode; 561 #ifdef INVARIANTS 562 int loops = 0; 563 #endif 564 int slot, tos; 565 566 rnode = vm_radix_getroot(rtree); 567 if (rnode == NULL) 568 return (NULL); 569 else if (vm_radix_isleaf(rnode)) { 570 m = vm_radix_topage(rnode); 571 if (m->pindex <= index) 572 return (m); 573 else 574 return (NULL); 575 } 576 tos = 0; 577 for (;;) { 578 /* 579 * If the keys differ before the current bisection node, 580 * then the search key might rollback to the earliest 581 * available bisection node or to the largest key 582 * in the current node (if the owner is smaller than the 583 * search key). 584 */ 585 if (vm_radix_keybarr(rnode, index)) { 586 if (index > rnode->rn_owner) { 587 index = rnode->rn_owner + VM_RADIX_COUNT * 588 VM_RADIX_UNITLEVEL(rnode->rn_clev); 589 } else { 590 ascend: 591 KASSERT(++loops < 1000, 592 ("vm_radix_lookup_le: too many loops")); 593 594 /* 595 * Pop nodes from the stack until either the 596 * stack is empty or a node that could have a 597 * matching descendant is found. 598 */ 599 do { 600 if (tos == 0) 601 return (NULL); 602 rnode = stack[--tos]; 603 } while (vm_radix_slot(index, 604 rnode->rn_clev) == 0); 605 606 /* 607 * The following computation cannot overflow 608 * because index's slot at the current level 609 * is greater than 0. 610 */ 611 index = vm_radix_trimkey(index, 612 rnode->rn_clev); 613 } 614 index--; 615 KASSERT(!vm_radix_keybarr(rnode, index), 616 ("vm_radix_lookup_le: keybarr failed")); 617 } 618 slot = vm_radix_slot(index, rnode->rn_clev); 619 child = rnode->rn_child[slot]; 620 if (vm_radix_isleaf(child)) { 621 m = vm_radix_topage(child); 622 if (m->pindex <= index) 623 return (m); 624 } else if (child != NULL) 625 goto descend; 626 627 /* 628 * Look for an available edge or page within the current 629 * bisection node. 630 */ 631 if (slot > 0) { 632 inc = VM_RADIX_UNITLEVEL(rnode->rn_clev); 633 index |= inc - 1; 634 do { 635 index -= inc; 636 slot--; 637 child = rnode->rn_child[slot]; 638 if (vm_radix_isleaf(child)) { 639 m = vm_radix_topage(child); 640 if (m->pindex <= index) 641 return (m); 642 } else if (child != NULL) 643 goto descend; 644 } while (slot > 0); 645 } 646 KASSERT(child == NULL || vm_radix_isleaf(child), 647 ("vm_radix_lookup_le: child is radix node")); 648 649 /* 650 * If a page or edge smaller than the search slot is not found 651 * in the current node, ascend to the next higher-level node. 652 */ 653 goto ascend; 654 descend: 655 KASSERT(rnode->rn_clev > 0, 656 ("vm_radix_lookup_le: pushing leaf's parent")); 657 KASSERT(tos < VM_RADIX_LIMIT, 658 ("vm_radix_lookup_le: stack overflow")); 659 stack[tos++] = rnode; 660 rnode = child; 661 } 662 } 663 664 /* 665 * Remove the specified index from the trie, and return the value stored at 666 * that index. If the index is not present, return NULL. 667 */ 668 vm_page_t 669 vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index) 670 { 671 struct vm_radix_node *rnode, *parent; 672 vm_page_t m; 673 int i, slot; 674 675 rnode = vm_radix_getroot(rtree); 676 if (vm_radix_isleaf(rnode)) { 677 m = vm_radix_topage(rnode); 678 if (m->pindex != index) 679 return (NULL); 680 vm_radix_setroot(rtree, NULL); 681 return (m); 682 } 683 parent = NULL; 684 for (;;) { 685 if (rnode == NULL) 686 return (NULL); 687 slot = vm_radix_slot(index, rnode->rn_clev); 688 if (vm_radix_isleaf(rnode->rn_child[slot])) { 689 m = vm_radix_topage(rnode->rn_child[slot]); 690 if (m->pindex != index) 691 return (NULL); 692 rnode->rn_child[slot] = NULL; 693 rnode->rn_count--; 694 if (rnode->rn_count > 1) 695 return (m); 696 for (i = 0; i < VM_RADIX_COUNT; i++) 697 if (rnode->rn_child[i] != NULL) 698 break; 699 KASSERT(i != VM_RADIX_COUNT, 700 ("%s: invalid node configuration", __func__)); 701 if (parent == NULL) 702 vm_radix_setroot(rtree, rnode->rn_child[i]); 703 else { 704 slot = vm_radix_slot(index, parent->rn_clev); 705 KASSERT(parent->rn_child[slot] == rnode, 706 ("%s: invalid child value", __func__)); 707 parent->rn_child[slot] = rnode->rn_child[i]; 708 } 709 rnode->rn_count--; 710 rnode->rn_child[i] = NULL; 711 vm_radix_node_put(rnode); 712 return (m); 713 } 714 parent = rnode; 715 rnode = rnode->rn_child[slot]; 716 } 717 } 718 719 /* 720 * Remove and free all the nodes from the radix tree. 721 * This function is recursive but there is a tight control on it as the 722 * maximum depth of the tree is fixed. 723 */ 724 void 725 vm_radix_reclaim_allnodes(struct vm_radix *rtree) 726 { 727 struct vm_radix_node *root; 728 729 root = vm_radix_getroot(rtree); 730 if (root == NULL) 731 return; 732 vm_radix_setroot(rtree, NULL); 733 if (!vm_radix_isleaf(root)) 734 vm_radix_reclaim_allnodes_int(root); 735 } 736 737 /* 738 * Replace an existing page in the trie with another one. 739 * Panics if there is not an old page in the trie at the new page's index. 740 */ 741 vm_page_t 742 vm_radix_replace(struct vm_radix *rtree, vm_page_t newpage) 743 { 744 struct vm_radix_node *rnode; 745 vm_page_t m; 746 vm_pindex_t index; 747 int slot; 748 749 index = newpage->pindex; 750 rnode = vm_radix_getroot(rtree); 751 if (rnode == NULL) 752 panic("%s: replacing page on an empty trie", __func__); 753 if (vm_radix_isleaf(rnode)) { 754 m = vm_radix_topage(rnode); 755 if (m->pindex != index) 756 panic("%s: original replacing root key not found", 757 __func__); 758 rtree->rt_root = (uintptr_t)newpage | VM_RADIX_ISLEAF; 759 return (m); 760 } 761 for (;;) { 762 slot = vm_radix_slot(index, rnode->rn_clev); 763 if (vm_radix_isleaf(rnode->rn_child[slot])) { 764 m = vm_radix_topage(rnode->rn_child[slot]); 765 if (m->pindex == index) { 766 rnode->rn_child[slot] = 767 (void *)((uintptr_t)newpage | 768 VM_RADIX_ISLEAF); 769 return (m); 770 } else 771 break; 772 } else if (rnode->rn_child[slot] == NULL || 773 vm_radix_keybarr(rnode->rn_child[slot], index)) 774 break; 775 rnode = rnode->rn_child[slot]; 776 } 777 panic("%s: original replacing page not found", __func__); 778 } 779 780 void 781 vm_radix_wait(void) 782 { 783 uma_zwait(vm_radix_node_zone); 784 } 785 786 #ifdef DDB 787 /* 788 * Show details about the given radix node. 789 */ 790 DB_SHOW_COMMAND(radixnode, db_show_radixnode) 791 { 792 struct vm_radix_node *rnode; 793 int i; 794 795 if (!have_addr) 796 return; 797 rnode = (struct vm_radix_node *)addr; 798 db_printf("radixnode %p, owner %jx, children count %u, level %u:\n", 799 (void *)rnode, (uintmax_t)rnode->rn_owner, rnode->rn_count, 800 rnode->rn_clev); 801 for (i = 0; i < VM_RADIX_COUNT; i++) 802 if (rnode->rn_child[i] != NULL) 803 db_printf("slot: %d, val: %p, page: %p, clev: %d\n", 804 i, (void *)rnode->rn_child[i], 805 vm_radix_isleaf(rnode->rn_child[i]) ? 806 vm_radix_topage(rnode->rn_child[i]) : NULL, 807 rnode->rn_clev); 808 } 809 #endif /* DDB */ 810