1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2013 EMC Corp. 5 * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org> 6 * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 */ 31 32 /* 33 * Path-compressed radix trie implementation. 34 * The following code is not generalized into a general purpose library 35 * because there are way too many parameters embedded that should really 36 * be decided by the library consumers. At the same time, consumers 37 * of this code must achieve highest possible performance. 38 * 39 * The implementation takes into account the following rationale: 40 * - Size of the nodes should be as small as possible but still big enough 41 * to avoid a large maximum depth for the trie. This is a balance 42 * between the necessity to not wire too much physical memory for the nodes 43 * and the necessity to avoid too much cache pollution during the trie 44 * operations. 45 * - There is not a huge bias toward the number of lookup operations over 46 * the number of insert and remove operations. This basically implies 47 * that optimizations supposedly helping one operation but hurting the 48 * other might be carefully evaluated. 49 * - On average not many nodes are expected to be fully populated, hence 50 * level compression may just complicate things. 51 */ 52 53 #include <sys/cdefs.h> 54 __FBSDID("$FreeBSD$"); 55 56 #include "opt_ddb.h" 57 58 #include <sys/param.h> 59 #include <sys/systm.h> 60 #include <sys/kernel.h> 61 #include <sys/vmmeter.h> 62 63 #include <vm/uma.h> 64 #include <vm/vm.h> 65 #include <vm/vm_param.h> 66 #include <vm/vm_page.h> 67 #include <vm/vm_radix.h> 68 69 #ifdef DDB 70 #include <ddb/ddb.h> 71 #endif 72 73 /* 74 * These widths should allow the pointers to a node's children to fit within 75 * a single cache line. The extra levels from a narrow width should not be 76 * a problem thanks to path compression. 77 */ 78 #ifdef __LP64__ 79 #define VM_RADIX_WIDTH 4 80 #else 81 #define VM_RADIX_WIDTH 3 82 #endif 83 84 #define VM_RADIX_COUNT (1 << VM_RADIX_WIDTH) 85 #define VM_RADIX_MASK (VM_RADIX_COUNT - 1) 86 #define VM_RADIX_LIMIT \ 87 (howmany(sizeof(vm_pindex_t) * NBBY, VM_RADIX_WIDTH) - 1) 88 89 /* Flag bits stored in node pointers. */ 90 #define VM_RADIX_ISLEAF 0x1 91 #define VM_RADIX_FLAGS 0x1 92 #define VM_RADIX_PAD VM_RADIX_FLAGS 93 94 /* Returns one unit associated with specified level. */ 95 #define VM_RADIX_UNITLEVEL(lev) \ 96 ((vm_pindex_t)1 << ((lev) * VM_RADIX_WIDTH)) 97 98 struct vm_radix_node { 99 vm_pindex_t rn_owner; /* Owner of record. */ 100 uint16_t rn_count; /* Valid children. */ 101 uint16_t rn_clev; /* Current level. */ 102 void *rn_child[VM_RADIX_COUNT]; /* Child nodes. */ 103 }; 104 105 static uma_zone_t vm_radix_node_zone; 106 107 /* 108 * Allocate a radix node. 109 */ 110 static __inline struct vm_radix_node * 111 vm_radix_node_get(vm_pindex_t owner, uint16_t count, uint16_t clevel) 112 { 113 struct vm_radix_node *rnode; 114 115 rnode = uma_zalloc(vm_radix_node_zone, M_NOWAIT); 116 if (rnode == NULL) 117 return (NULL); 118 rnode->rn_owner = owner; 119 rnode->rn_count = count; 120 rnode->rn_clev = clevel; 121 return (rnode); 122 } 123 124 /* 125 * Free radix node. 126 */ 127 static __inline void 128 vm_radix_node_put(struct vm_radix_node *rnode) 129 { 130 131 uma_zfree(vm_radix_node_zone, rnode); 132 } 133 134 /* 135 * Return the position in the array for a given level. 136 */ 137 static __inline int 138 vm_radix_slot(vm_pindex_t index, uint16_t level) 139 { 140 141 return ((index >> (level * VM_RADIX_WIDTH)) & VM_RADIX_MASK); 142 } 143 144 /* Trims the key after the specified level. */ 145 static __inline vm_pindex_t 146 vm_radix_trimkey(vm_pindex_t index, uint16_t level) 147 { 148 vm_pindex_t ret; 149 150 ret = index; 151 if (level > 0) { 152 ret >>= level * VM_RADIX_WIDTH; 153 ret <<= level * VM_RADIX_WIDTH; 154 } 155 return (ret); 156 } 157 158 /* 159 * Get the root node for a radix tree. 160 */ 161 static __inline struct vm_radix_node * 162 vm_radix_getroot(struct vm_radix *rtree) 163 { 164 165 return ((struct vm_radix_node *)rtree->rt_root); 166 } 167 168 /* 169 * Set the root node for a radix tree. 170 */ 171 static __inline void 172 vm_radix_setroot(struct vm_radix *rtree, struct vm_radix_node *rnode) 173 { 174 175 rtree->rt_root = (uintptr_t)rnode; 176 } 177 178 /* 179 * Returns TRUE if the specified radix node is a leaf and FALSE otherwise. 180 */ 181 static __inline boolean_t 182 vm_radix_isleaf(struct vm_radix_node *rnode) 183 { 184 185 return (((uintptr_t)rnode & VM_RADIX_ISLEAF) != 0); 186 } 187 188 /* 189 * Returns the associated page extracted from rnode. 190 */ 191 static __inline vm_page_t 192 vm_radix_topage(struct vm_radix_node *rnode) 193 { 194 195 return ((vm_page_t)((uintptr_t)rnode & ~VM_RADIX_FLAGS)); 196 } 197 198 /* 199 * Adds the page as a child of the provided node. 200 */ 201 static __inline void 202 vm_radix_addpage(struct vm_radix_node *rnode, vm_pindex_t index, uint16_t clev, 203 vm_page_t page) 204 { 205 int slot; 206 207 slot = vm_radix_slot(index, clev); 208 rnode->rn_child[slot] = (void *)((uintptr_t)page | VM_RADIX_ISLEAF); 209 } 210 211 /* 212 * Returns the slot where two keys differ. 213 * It cannot accept 2 equal keys. 214 */ 215 static __inline uint16_t 216 vm_radix_keydiff(vm_pindex_t index1, vm_pindex_t index2) 217 { 218 uint16_t clev; 219 220 KASSERT(index1 != index2, ("%s: passing the same key value %jx", 221 __func__, (uintmax_t)index1)); 222 223 index1 ^= index2; 224 for (clev = VM_RADIX_LIMIT;; clev--) 225 if (vm_radix_slot(index1, clev) != 0) 226 return (clev); 227 } 228 229 /* 230 * Returns TRUE if it can be determined that key does not belong to the 231 * specified rnode. Otherwise, returns FALSE. 232 */ 233 static __inline boolean_t 234 vm_radix_keybarr(struct vm_radix_node *rnode, vm_pindex_t idx) 235 { 236 237 if (rnode->rn_clev < VM_RADIX_LIMIT) { 238 idx = vm_radix_trimkey(idx, rnode->rn_clev + 1); 239 return (idx != rnode->rn_owner); 240 } 241 return (FALSE); 242 } 243 244 /* 245 * Internal helper for vm_radix_reclaim_allnodes(). 246 * This function is recursive. 247 */ 248 static void 249 vm_radix_reclaim_allnodes_int(struct vm_radix_node *rnode) 250 { 251 int slot; 252 253 KASSERT(rnode->rn_count <= VM_RADIX_COUNT, 254 ("vm_radix_reclaim_allnodes_int: bad count in rnode %p", rnode)); 255 for (slot = 0; rnode->rn_count != 0; slot++) { 256 if (rnode->rn_child[slot] == NULL) 257 continue; 258 if (!vm_radix_isleaf(rnode->rn_child[slot])) 259 vm_radix_reclaim_allnodes_int(rnode->rn_child[slot]); 260 rnode->rn_child[slot] = NULL; 261 rnode->rn_count--; 262 } 263 vm_radix_node_put(rnode); 264 } 265 266 #ifdef INVARIANTS 267 /* 268 * Radix node zone destructor. 269 */ 270 static void 271 vm_radix_node_zone_dtor(void *mem, int size __unused, void *arg __unused) 272 { 273 struct vm_radix_node *rnode; 274 int slot; 275 276 rnode = mem; 277 KASSERT(rnode->rn_count == 0, 278 ("vm_radix_node_put: rnode %p has %d children", rnode, 279 rnode->rn_count)); 280 for (slot = 0; slot < VM_RADIX_COUNT; slot++) 281 KASSERT(rnode->rn_child[slot] == NULL, 282 ("vm_radix_node_put: rnode %p has a child", rnode)); 283 } 284 #endif 285 286 static int 287 vm_radix_node_zone_init(void *mem, int size __unused, int flags __unused) 288 { 289 struct vm_radix_node *rnode; 290 291 rnode = mem; 292 bzero(rnode, sizeof(*rnode)); 293 return (0); 294 } 295 296 #ifndef UMA_MD_SMALL_ALLOC 297 void vm_radix_reserve_kva(void); 298 /* 299 * Reserve the KVA necessary to satisfy the node allocation. 300 * This is mandatory in architectures not supporting direct 301 * mapping as they will need otherwise to carve into the kernel maps for 302 * every node allocation, resulting into deadlocks for consumers already 303 * working with kernel maps. 304 */ 305 void 306 vm_radix_reserve_kva(void) 307 { 308 309 /* 310 * Calculate the number of reserved nodes, discounting the pages that 311 * are needed to store them. 312 */ 313 if (!uma_zone_reserve_kva(vm_radix_node_zone, 314 ((vm_paddr_t)vm_cnt.v_page_count * PAGE_SIZE) / (PAGE_SIZE + 315 sizeof(struct vm_radix_node)))) 316 panic("%s: unable to reserve KVA", __func__); 317 } 318 #endif 319 320 /* 321 * Initialize the UMA slab zone. 322 */ 323 void 324 vm_radix_zinit(void) 325 { 326 327 vm_radix_node_zone = uma_zcreate("RADIX NODE", 328 sizeof(struct vm_radix_node), NULL, 329 #ifdef INVARIANTS 330 vm_radix_node_zone_dtor, 331 #else 332 NULL, 333 #endif 334 vm_radix_node_zone_init, NULL, VM_RADIX_PAD, UMA_ZONE_VM); 335 } 336 337 /* 338 * Inserts the key-value pair into the trie. 339 * Panics if the key already exists. 340 */ 341 int 342 vm_radix_insert(struct vm_radix *rtree, vm_page_t page) 343 { 344 vm_pindex_t index, newind; 345 void **parentp; 346 struct vm_radix_node *rnode, *tmp; 347 vm_page_t m; 348 int slot; 349 uint16_t clev; 350 351 index = page->pindex; 352 353 /* 354 * The owner of record for root is not really important because it 355 * will never be used. 356 */ 357 rnode = vm_radix_getroot(rtree); 358 if (rnode == NULL) { 359 rtree->rt_root = (uintptr_t)page | VM_RADIX_ISLEAF; 360 return (0); 361 } 362 parentp = (void **)&rtree->rt_root; 363 for (;;) { 364 if (vm_radix_isleaf(rnode)) { 365 m = vm_radix_topage(rnode); 366 if (m->pindex == index) 367 panic("%s: key %jx is already present", 368 __func__, (uintmax_t)index); 369 clev = vm_radix_keydiff(m->pindex, index); 370 tmp = vm_radix_node_get(vm_radix_trimkey(index, 371 clev + 1), 2, clev); 372 if (tmp == NULL) 373 return (ENOMEM); 374 *parentp = tmp; 375 vm_radix_addpage(tmp, index, clev, page); 376 vm_radix_addpage(tmp, m->pindex, clev, m); 377 return (0); 378 } else if (vm_radix_keybarr(rnode, index)) 379 break; 380 slot = vm_radix_slot(index, rnode->rn_clev); 381 if (rnode->rn_child[slot] == NULL) { 382 rnode->rn_count++; 383 vm_radix_addpage(rnode, index, rnode->rn_clev, page); 384 return (0); 385 } 386 parentp = &rnode->rn_child[slot]; 387 rnode = rnode->rn_child[slot]; 388 } 389 390 /* 391 * A new node is needed because the right insertion level is reached. 392 * Setup the new intermediate node and add the 2 children: the 393 * new object and the older edge. 394 */ 395 newind = rnode->rn_owner; 396 clev = vm_radix_keydiff(newind, index); 397 tmp = vm_radix_node_get(vm_radix_trimkey(index, clev + 1), 2, clev); 398 if (tmp == NULL) 399 return (ENOMEM); 400 *parentp = tmp; 401 vm_radix_addpage(tmp, index, clev, page); 402 slot = vm_radix_slot(newind, clev); 403 tmp->rn_child[slot] = rnode; 404 return (0); 405 } 406 407 /* 408 * Returns TRUE if the specified radix tree contains a single leaf and FALSE 409 * otherwise. 410 */ 411 boolean_t 412 vm_radix_is_singleton(struct vm_radix *rtree) 413 { 414 struct vm_radix_node *rnode; 415 416 rnode = vm_radix_getroot(rtree); 417 if (rnode == NULL) 418 return (FALSE); 419 return (vm_radix_isleaf(rnode)); 420 } 421 422 /* 423 * Returns the value stored at the index. If the index is not present, 424 * NULL is returned. 425 */ 426 vm_page_t 427 vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index) 428 { 429 struct vm_radix_node *rnode; 430 vm_page_t m; 431 int slot; 432 433 rnode = vm_radix_getroot(rtree); 434 while (rnode != NULL) { 435 if (vm_radix_isleaf(rnode)) { 436 m = vm_radix_topage(rnode); 437 if (m->pindex == index) 438 return (m); 439 else 440 break; 441 } else if (vm_radix_keybarr(rnode, index)) 442 break; 443 slot = vm_radix_slot(index, rnode->rn_clev); 444 rnode = rnode->rn_child[slot]; 445 } 446 return (NULL); 447 } 448 449 /* 450 * Look up the nearest entry at a position bigger than or equal to index. 451 */ 452 vm_page_t 453 vm_radix_lookup_ge(struct vm_radix *rtree, vm_pindex_t index) 454 { 455 struct vm_radix_node *stack[VM_RADIX_LIMIT]; 456 vm_pindex_t inc; 457 vm_page_t m; 458 struct vm_radix_node *child, *rnode; 459 #ifdef INVARIANTS 460 int loops = 0; 461 #endif 462 int slot, tos; 463 464 rnode = vm_radix_getroot(rtree); 465 if (rnode == NULL) 466 return (NULL); 467 else if (vm_radix_isleaf(rnode)) { 468 m = vm_radix_topage(rnode); 469 if (m->pindex >= index) 470 return (m); 471 else 472 return (NULL); 473 } 474 tos = 0; 475 for (;;) { 476 /* 477 * If the keys differ before the current bisection node, 478 * then the search key might rollback to the earliest 479 * available bisection node or to the smallest key 480 * in the current node (if the owner is bigger than the 481 * search key). 482 */ 483 if (vm_radix_keybarr(rnode, index)) { 484 if (index > rnode->rn_owner) { 485 ascend: 486 KASSERT(++loops < 1000, 487 ("vm_radix_lookup_ge: too many loops")); 488 489 /* 490 * Pop nodes from the stack until either the 491 * stack is empty or a node that could have a 492 * matching descendant is found. 493 */ 494 do { 495 if (tos == 0) 496 return (NULL); 497 rnode = stack[--tos]; 498 } while (vm_radix_slot(index, 499 rnode->rn_clev) == (VM_RADIX_COUNT - 1)); 500 501 /* 502 * The following computation cannot overflow 503 * because index's slot at the current level 504 * is less than VM_RADIX_COUNT - 1. 505 */ 506 index = vm_radix_trimkey(index, 507 rnode->rn_clev); 508 index += VM_RADIX_UNITLEVEL(rnode->rn_clev); 509 } else 510 index = rnode->rn_owner; 511 KASSERT(!vm_radix_keybarr(rnode, index), 512 ("vm_radix_lookup_ge: keybarr failed")); 513 } 514 slot = vm_radix_slot(index, rnode->rn_clev); 515 child = rnode->rn_child[slot]; 516 if (vm_radix_isleaf(child)) { 517 m = vm_radix_topage(child); 518 if (m->pindex >= index) 519 return (m); 520 } else if (child != NULL) 521 goto descend; 522 523 /* 524 * Look for an available edge or page within the current 525 * bisection node. 526 */ 527 if (slot < (VM_RADIX_COUNT - 1)) { 528 inc = VM_RADIX_UNITLEVEL(rnode->rn_clev); 529 index = vm_radix_trimkey(index, rnode->rn_clev); 530 do { 531 index += inc; 532 slot++; 533 child = rnode->rn_child[slot]; 534 if (vm_radix_isleaf(child)) { 535 m = vm_radix_topage(child); 536 if (m->pindex >= index) 537 return (m); 538 } else if (child != NULL) 539 goto descend; 540 } while (slot < (VM_RADIX_COUNT - 1)); 541 } 542 KASSERT(child == NULL || vm_radix_isleaf(child), 543 ("vm_radix_lookup_ge: child is radix node")); 544 545 /* 546 * If a page or edge bigger than the search slot is not found 547 * in the current node, ascend to the next higher-level node. 548 */ 549 goto ascend; 550 descend: 551 KASSERT(rnode->rn_clev > 0, 552 ("vm_radix_lookup_ge: pushing leaf's parent")); 553 KASSERT(tos < VM_RADIX_LIMIT, 554 ("vm_radix_lookup_ge: stack overflow")); 555 stack[tos++] = rnode; 556 rnode = child; 557 } 558 } 559 560 /* 561 * Look up the nearest entry at a position less than or equal to index. 562 */ 563 vm_page_t 564 vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index) 565 { 566 struct vm_radix_node *stack[VM_RADIX_LIMIT]; 567 vm_pindex_t inc; 568 vm_page_t m; 569 struct vm_radix_node *child, *rnode; 570 #ifdef INVARIANTS 571 int loops = 0; 572 #endif 573 int slot, tos; 574 575 rnode = vm_radix_getroot(rtree); 576 if (rnode == NULL) 577 return (NULL); 578 else if (vm_radix_isleaf(rnode)) { 579 m = vm_radix_topage(rnode); 580 if (m->pindex <= index) 581 return (m); 582 else 583 return (NULL); 584 } 585 tos = 0; 586 for (;;) { 587 /* 588 * If the keys differ before the current bisection node, 589 * then the search key might rollback to the earliest 590 * available bisection node or to the largest key 591 * in the current node (if the owner is smaller than the 592 * search key). 593 */ 594 if (vm_radix_keybarr(rnode, index)) { 595 if (index > rnode->rn_owner) { 596 index = rnode->rn_owner + VM_RADIX_COUNT * 597 VM_RADIX_UNITLEVEL(rnode->rn_clev); 598 } else { 599 ascend: 600 KASSERT(++loops < 1000, 601 ("vm_radix_lookup_le: too many loops")); 602 603 /* 604 * Pop nodes from the stack until either the 605 * stack is empty or a node that could have a 606 * matching descendant is found. 607 */ 608 do { 609 if (tos == 0) 610 return (NULL); 611 rnode = stack[--tos]; 612 } while (vm_radix_slot(index, 613 rnode->rn_clev) == 0); 614 615 /* 616 * The following computation cannot overflow 617 * because index's slot at the current level 618 * is greater than 0. 619 */ 620 index = vm_radix_trimkey(index, 621 rnode->rn_clev); 622 } 623 index--; 624 KASSERT(!vm_radix_keybarr(rnode, index), 625 ("vm_radix_lookup_le: keybarr failed")); 626 } 627 slot = vm_radix_slot(index, rnode->rn_clev); 628 child = rnode->rn_child[slot]; 629 if (vm_radix_isleaf(child)) { 630 m = vm_radix_topage(child); 631 if (m->pindex <= index) 632 return (m); 633 } else if (child != NULL) 634 goto descend; 635 636 /* 637 * Look for an available edge or page within the current 638 * bisection node. 639 */ 640 if (slot > 0) { 641 inc = VM_RADIX_UNITLEVEL(rnode->rn_clev); 642 index |= inc - 1; 643 do { 644 index -= inc; 645 slot--; 646 child = rnode->rn_child[slot]; 647 if (vm_radix_isleaf(child)) { 648 m = vm_radix_topage(child); 649 if (m->pindex <= index) 650 return (m); 651 } else if (child != NULL) 652 goto descend; 653 } while (slot > 0); 654 } 655 KASSERT(child == NULL || vm_radix_isleaf(child), 656 ("vm_radix_lookup_le: child is radix node")); 657 658 /* 659 * If a page or edge smaller than the search slot is not found 660 * in the current node, ascend to the next higher-level node. 661 */ 662 goto ascend; 663 descend: 664 KASSERT(rnode->rn_clev > 0, 665 ("vm_radix_lookup_le: pushing leaf's parent")); 666 KASSERT(tos < VM_RADIX_LIMIT, 667 ("vm_radix_lookup_le: stack overflow")); 668 stack[tos++] = rnode; 669 rnode = child; 670 } 671 } 672 673 /* 674 * Remove the specified index from the trie, and return the value stored at 675 * that index. If the index is not present, return NULL. 676 */ 677 vm_page_t 678 vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index) 679 { 680 struct vm_radix_node *rnode, *parent; 681 vm_page_t m; 682 int i, slot; 683 684 rnode = vm_radix_getroot(rtree); 685 if (vm_radix_isleaf(rnode)) { 686 m = vm_radix_topage(rnode); 687 if (m->pindex != index) 688 return (NULL); 689 vm_radix_setroot(rtree, NULL); 690 return (m); 691 } 692 parent = NULL; 693 for (;;) { 694 if (rnode == NULL) 695 return (NULL); 696 slot = vm_radix_slot(index, rnode->rn_clev); 697 if (vm_radix_isleaf(rnode->rn_child[slot])) { 698 m = vm_radix_topage(rnode->rn_child[slot]); 699 if (m->pindex != index) 700 return (NULL); 701 rnode->rn_child[slot] = NULL; 702 rnode->rn_count--; 703 if (rnode->rn_count > 1) 704 return (m); 705 for (i = 0; i < VM_RADIX_COUNT; i++) 706 if (rnode->rn_child[i] != NULL) 707 break; 708 KASSERT(i != VM_RADIX_COUNT, 709 ("%s: invalid node configuration", __func__)); 710 if (parent == NULL) 711 vm_radix_setroot(rtree, rnode->rn_child[i]); 712 else { 713 slot = vm_radix_slot(index, parent->rn_clev); 714 KASSERT(parent->rn_child[slot] == rnode, 715 ("%s: invalid child value", __func__)); 716 parent->rn_child[slot] = rnode->rn_child[i]; 717 } 718 rnode->rn_count--; 719 rnode->rn_child[i] = NULL; 720 vm_radix_node_put(rnode); 721 return (m); 722 } 723 parent = rnode; 724 rnode = rnode->rn_child[slot]; 725 } 726 } 727 728 /* 729 * Remove and free all the nodes from the radix tree. 730 * This function is recursive but there is a tight control on it as the 731 * maximum depth of the tree is fixed. 732 */ 733 void 734 vm_radix_reclaim_allnodes(struct vm_radix *rtree) 735 { 736 struct vm_radix_node *root; 737 738 root = vm_radix_getroot(rtree); 739 if (root == NULL) 740 return; 741 vm_radix_setroot(rtree, NULL); 742 if (!vm_radix_isleaf(root)) 743 vm_radix_reclaim_allnodes_int(root); 744 } 745 746 /* 747 * Replace an existing page in the trie with another one. 748 * Panics if there is not an old page in the trie at the new page's index. 749 */ 750 vm_page_t 751 vm_radix_replace(struct vm_radix *rtree, vm_page_t newpage) 752 { 753 struct vm_radix_node *rnode; 754 vm_page_t m; 755 vm_pindex_t index; 756 int slot; 757 758 index = newpage->pindex; 759 rnode = vm_radix_getroot(rtree); 760 if (rnode == NULL) 761 panic("%s: replacing page on an empty trie", __func__); 762 if (vm_radix_isleaf(rnode)) { 763 m = vm_radix_topage(rnode); 764 if (m->pindex != index) 765 panic("%s: original replacing root key not found", 766 __func__); 767 rtree->rt_root = (uintptr_t)newpage | VM_RADIX_ISLEAF; 768 return (m); 769 } 770 for (;;) { 771 slot = vm_radix_slot(index, rnode->rn_clev); 772 if (vm_radix_isleaf(rnode->rn_child[slot])) { 773 m = vm_radix_topage(rnode->rn_child[slot]); 774 if (m->pindex == index) { 775 rnode->rn_child[slot] = 776 (void *)((uintptr_t)newpage | 777 VM_RADIX_ISLEAF); 778 return (m); 779 } else 780 break; 781 } else if (rnode->rn_child[slot] == NULL || 782 vm_radix_keybarr(rnode->rn_child[slot], index)) 783 break; 784 rnode = rnode->rn_child[slot]; 785 } 786 panic("%s: original replacing page not found", __func__); 787 } 788 789 void 790 vm_radix_wait(void) 791 { 792 uma_zwait(vm_radix_node_zone); 793 } 794 795 #ifdef DDB 796 /* 797 * Show details about the given radix node. 798 */ 799 DB_SHOW_COMMAND(radixnode, db_show_radixnode) 800 { 801 struct vm_radix_node *rnode; 802 int i; 803 804 if (!have_addr) 805 return; 806 rnode = (struct vm_radix_node *)addr; 807 db_printf("radixnode %p, owner %jx, children count %u, level %u:\n", 808 (void *)rnode, (uintmax_t)rnode->rn_owner, rnode->rn_count, 809 rnode->rn_clev); 810 for (i = 0; i < VM_RADIX_COUNT; i++) 811 if (rnode->rn_child[i] != NULL) 812 db_printf("slot: %d, val: %p, page: %p, clev: %d\n", 813 i, (void *)rnode->rn_child[i], 814 vm_radix_isleaf(rnode->rn_child[i]) ? 815 vm_radix_topage(rnode->rn_child[i]) : NULL, 816 rnode->rn_clev); 817 } 818 #endif /* DDB */ 819