1*fe267a55SPedro F. Giffuni /*- 2*fe267a55SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3*fe267a55SPedro F. Giffuni * 4774d251dSAttilio Rao * Copyright (c) 2013 EMC Corp. 5774d251dSAttilio Rao * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org> 6774d251dSAttilio Rao * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com> 7774d251dSAttilio Rao * All rights reserved. 8774d251dSAttilio Rao * 9774d251dSAttilio Rao * Redistribution and use in source and binary forms, with or without 10774d251dSAttilio Rao * modification, are permitted provided that the following conditions 11774d251dSAttilio Rao * are met: 12774d251dSAttilio Rao * 1. Redistributions of source code must retain the above copyright 13774d251dSAttilio Rao * notice, this list of conditions and the following disclaimer. 14774d251dSAttilio Rao * 2. Redistributions in binary form must reproduce the above copyright 15774d251dSAttilio Rao * notice, this list of conditions and the following disclaimer in the 16774d251dSAttilio Rao * documentation and/or other materials provided with the distribution. 17774d251dSAttilio Rao * 18774d251dSAttilio Rao * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19774d251dSAttilio Rao * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20774d251dSAttilio Rao * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21774d251dSAttilio Rao * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22774d251dSAttilio Rao * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23774d251dSAttilio Rao * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24774d251dSAttilio Rao * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25774d251dSAttilio Rao * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26774d251dSAttilio Rao * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27774d251dSAttilio Rao * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28774d251dSAttilio Rao * SUCH DAMAGE. 29774d251dSAttilio Rao * 30774d251dSAttilio Rao */ 31774d251dSAttilio Rao 32774d251dSAttilio Rao /* 33774d251dSAttilio Rao * Path-compressed radix trie implementation. 34774d251dSAttilio Rao * The following code is not generalized into a general purpose library 35774d251dSAttilio Rao * because there are way too many parameters embedded that should really 36774d251dSAttilio Rao * be decided by the library consumers. At the same time, consumers 37774d251dSAttilio Rao * of this code must achieve highest possible performance. 38774d251dSAttilio Rao * 39774d251dSAttilio Rao * The implementation takes into account the following rationale: 40774d251dSAttilio Rao * - Size of the nodes should be as small as possible but still big enough 41774d251dSAttilio Rao * to avoid a large maximum depth for the trie. This is a balance 42774d251dSAttilio Rao * between the necessity to not wire too much physical memory for the nodes 43774d251dSAttilio Rao * and the necessity to avoid too much cache pollution during the trie 44774d251dSAttilio Rao * operations. 45774d251dSAttilio Rao * - There is not a huge bias toward the number of lookup operations over 46774d251dSAttilio Rao * the number of insert and remove operations. This basically implies 47774d251dSAttilio Rao * that optimizations supposedly helping one operation but hurting the 48774d251dSAttilio Rao * other might be carefully evaluated. 49774d251dSAttilio Rao * - On average not many nodes are expected to be fully populated, hence 50774d251dSAttilio Rao * level compression may just complicate things. 51774d251dSAttilio Rao */ 52774d251dSAttilio Rao 53774d251dSAttilio Rao #include <sys/cdefs.h> 54774d251dSAttilio Rao __FBSDID("$FreeBSD$"); 55774d251dSAttilio Rao 56774d251dSAttilio Rao #include "opt_ddb.h" 57774d251dSAttilio Rao 58774d251dSAttilio Rao #include <sys/param.h> 59774d251dSAttilio Rao #include <sys/systm.h> 60774d251dSAttilio Rao #include <sys/kernel.h> 61774d251dSAttilio Rao #include <sys/vmmeter.h> 62774d251dSAttilio Rao 63774d251dSAttilio Rao #include <vm/uma.h> 64774d251dSAttilio Rao #include <vm/vm.h> 65774d251dSAttilio Rao #include <vm/vm_param.h> 66774d251dSAttilio Rao #include <vm/vm_page.h> 67774d251dSAttilio Rao #include <vm/vm_radix.h> 68774d251dSAttilio Rao 69774d251dSAttilio Rao #ifdef DDB 70774d251dSAttilio Rao #include <ddb/ddb.h> 71774d251dSAttilio Rao #endif 72774d251dSAttilio Rao 73774d251dSAttilio Rao /* 74774d251dSAttilio Rao * These widths should allow the pointers to a node's children to fit within 75774d251dSAttilio Rao * a single cache line. The extra levels from a narrow width should not be 76774d251dSAttilio Rao * a problem thanks to path compression. 77774d251dSAttilio Rao */ 78774d251dSAttilio Rao #ifdef __LP64__ 79774d251dSAttilio Rao #define VM_RADIX_WIDTH 4 80774d251dSAttilio Rao #else 81774d251dSAttilio Rao #define VM_RADIX_WIDTH 3 82774d251dSAttilio Rao #endif 83774d251dSAttilio Rao 84774d251dSAttilio Rao #define VM_RADIX_COUNT (1 << VM_RADIX_WIDTH) 85774d251dSAttilio Rao #define VM_RADIX_MASK (VM_RADIX_COUNT - 1) 86774d251dSAttilio Rao #define VM_RADIX_LIMIT \ 87b66bb393SPedro F. Giffuni (howmany(sizeof(vm_pindex_t) * NBBY, VM_RADIX_WIDTH) - 1) 88774d251dSAttilio Rao 89774d251dSAttilio Rao /* Flag bits stored in node pointers. */ 90774d251dSAttilio Rao #define VM_RADIX_ISLEAF 0x1 91774d251dSAttilio Rao #define VM_RADIX_FLAGS 0x1 92774d251dSAttilio Rao #define VM_RADIX_PAD VM_RADIX_FLAGS 93774d251dSAttilio Rao 94774d251dSAttilio Rao /* Returns one unit associated with specified level. */ 95774d251dSAttilio Rao #define VM_RADIX_UNITLEVEL(lev) \ 969f2e6008SAlan Cox ((vm_pindex_t)1 << ((lev) * VM_RADIX_WIDTH)) 97774d251dSAttilio Rao 98774d251dSAttilio Rao struct vm_radix_node { 99774d251dSAttilio Rao vm_pindex_t rn_owner; /* Owner of record. */ 100774d251dSAttilio Rao uint16_t rn_count; /* Valid children. */ 101774d251dSAttilio Rao uint16_t rn_clev; /* Current level. */ 1022c899fedSAlan Cox void *rn_child[VM_RADIX_COUNT]; /* Child nodes. */ 103774d251dSAttilio Rao }; 104774d251dSAttilio Rao 105774d251dSAttilio Rao static uma_zone_t vm_radix_node_zone; 106774d251dSAttilio Rao 107774d251dSAttilio Rao /* 108e946b949SAttilio Rao * Allocate a radix node. 109774d251dSAttilio Rao */ 110774d251dSAttilio Rao static __inline struct vm_radix_node * 111774d251dSAttilio Rao vm_radix_node_get(vm_pindex_t owner, uint16_t count, uint16_t clevel) 112774d251dSAttilio Rao { 113774d251dSAttilio Rao struct vm_radix_node *rnode; 114774d251dSAttilio Rao 115e946b949SAttilio Rao rnode = uma_zalloc(vm_radix_node_zone, M_NOWAIT | M_ZERO); 116774d251dSAttilio Rao if (rnode == NULL) 117e946b949SAttilio Rao return (NULL); 118774d251dSAttilio Rao rnode->rn_owner = owner; 119774d251dSAttilio Rao rnode->rn_count = count; 120774d251dSAttilio Rao rnode->rn_clev = clevel; 121774d251dSAttilio Rao return (rnode); 122774d251dSAttilio Rao } 123774d251dSAttilio Rao 124774d251dSAttilio Rao /* 125774d251dSAttilio Rao * Free radix node. 126774d251dSAttilio Rao */ 127774d251dSAttilio Rao static __inline void 128774d251dSAttilio Rao vm_radix_node_put(struct vm_radix_node *rnode) 129774d251dSAttilio Rao { 130774d251dSAttilio Rao 131774d251dSAttilio Rao uma_zfree(vm_radix_node_zone, rnode); 132774d251dSAttilio Rao } 133774d251dSAttilio Rao 134774d251dSAttilio Rao /* 135774d251dSAttilio Rao * Return the position in the array for a given level. 136774d251dSAttilio Rao */ 137774d251dSAttilio Rao static __inline int 138774d251dSAttilio Rao vm_radix_slot(vm_pindex_t index, uint16_t level) 139774d251dSAttilio Rao { 140774d251dSAttilio Rao 1419f2e6008SAlan Cox return ((index >> (level * VM_RADIX_WIDTH)) & VM_RADIX_MASK); 142774d251dSAttilio Rao } 143774d251dSAttilio Rao 144774d251dSAttilio Rao /* Trims the key after the specified level. */ 145774d251dSAttilio Rao static __inline vm_pindex_t 146774d251dSAttilio Rao vm_radix_trimkey(vm_pindex_t index, uint16_t level) 147774d251dSAttilio Rao { 148774d251dSAttilio Rao vm_pindex_t ret; 149774d251dSAttilio Rao 150774d251dSAttilio Rao ret = index; 1519f2e6008SAlan Cox if (level > 0) { 1529f2e6008SAlan Cox ret >>= level * VM_RADIX_WIDTH; 1539f2e6008SAlan Cox ret <<= level * VM_RADIX_WIDTH; 154774d251dSAttilio Rao } 155774d251dSAttilio Rao return (ret); 156774d251dSAttilio Rao } 157774d251dSAttilio Rao 158774d251dSAttilio Rao /* 159774d251dSAttilio Rao * Get the root node for a radix tree. 160774d251dSAttilio Rao */ 161774d251dSAttilio Rao static __inline struct vm_radix_node * 162774d251dSAttilio Rao vm_radix_getroot(struct vm_radix *rtree) 163774d251dSAttilio Rao { 164774d251dSAttilio Rao 1652c899fedSAlan Cox return ((struct vm_radix_node *)rtree->rt_root); 166774d251dSAttilio Rao } 167774d251dSAttilio Rao 168774d251dSAttilio Rao /* 169774d251dSAttilio Rao * Set the root node for a radix tree. 170774d251dSAttilio Rao */ 171774d251dSAttilio Rao static __inline void 172774d251dSAttilio Rao vm_radix_setroot(struct vm_radix *rtree, struct vm_radix_node *rnode) 173774d251dSAttilio Rao { 174774d251dSAttilio Rao 175774d251dSAttilio Rao rtree->rt_root = (uintptr_t)rnode; 176774d251dSAttilio Rao } 177774d251dSAttilio Rao 178774d251dSAttilio Rao /* 1793fc10b73SAlan Cox * Returns TRUE if the specified radix node is a leaf and FALSE otherwise. 1803fc10b73SAlan Cox */ 1813fc10b73SAlan Cox static __inline boolean_t 1823fc10b73SAlan Cox vm_radix_isleaf(struct vm_radix_node *rnode) 1833fc10b73SAlan Cox { 1843fc10b73SAlan Cox 1853fc10b73SAlan Cox return (((uintptr_t)rnode & VM_RADIX_ISLEAF) != 0); 1863fc10b73SAlan Cox } 1873fc10b73SAlan Cox 1883fc10b73SAlan Cox /* 18996f1a842SAlan Cox * Returns the associated page extracted from rnode. 190774d251dSAttilio Rao */ 191774d251dSAttilio Rao static __inline vm_page_t 19296f1a842SAlan Cox vm_radix_topage(struct vm_radix_node *rnode) 193774d251dSAttilio Rao { 194774d251dSAttilio Rao 19596f1a842SAlan Cox return ((vm_page_t)((uintptr_t)rnode & ~VM_RADIX_FLAGS)); 196774d251dSAttilio Rao } 197774d251dSAttilio Rao 198774d251dSAttilio Rao /* 199774d251dSAttilio Rao * Adds the page as a child of the provided node. 200774d251dSAttilio Rao */ 201774d251dSAttilio Rao static __inline void 202774d251dSAttilio Rao vm_radix_addpage(struct vm_radix_node *rnode, vm_pindex_t index, uint16_t clev, 203774d251dSAttilio Rao vm_page_t page) 204774d251dSAttilio Rao { 205774d251dSAttilio Rao int slot; 206774d251dSAttilio Rao 207774d251dSAttilio Rao slot = vm_radix_slot(index, clev); 208774d251dSAttilio Rao rnode->rn_child[slot] = (void *)((uintptr_t)page | VM_RADIX_ISLEAF); 209774d251dSAttilio Rao } 210774d251dSAttilio Rao 211774d251dSAttilio Rao /* 212774d251dSAttilio Rao * Returns the slot where two keys differ. 213774d251dSAttilio Rao * It cannot accept 2 equal keys. 214774d251dSAttilio Rao */ 215774d251dSAttilio Rao static __inline uint16_t 216774d251dSAttilio Rao vm_radix_keydiff(vm_pindex_t index1, vm_pindex_t index2) 217774d251dSAttilio Rao { 218774d251dSAttilio Rao uint16_t clev; 219774d251dSAttilio Rao 220774d251dSAttilio Rao KASSERT(index1 != index2, ("%s: passing the same key value %jx", 221774d251dSAttilio Rao __func__, (uintmax_t)index1)); 222774d251dSAttilio Rao 223774d251dSAttilio Rao index1 ^= index2; 2249f2e6008SAlan Cox for (clev = VM_RADIX_LIMIT;; clev--) 225bb0e1de4SAlan Cox if (vm_radix_slot(index1, clev) != 0) 226774d251dSAttilio Rao return (clev); 227774d251dSAttilio Rao } 228774d251dSAttilio Rao 229774d251dSAttilio Rao /* 230774d251dSAttilio Rao * Returns TRUE if it can be determined that key does not belong to the 231774d251dSAttilio Rao * specified rnode. Otherwise, returns FALSE. 232774d251dSAttilio Rao */ 233774d251dSAttilio Rao static __inline boolean_t 234774d251dSAttilio Rao vm_radix_keybarr(struct vm_radix_node *rnode, vm_pindex_t idx) 235774d251dSAttilio Rao { 236774d251dSAttilio Rao 2379f2e6008SAlan Cox if (rnode->rn_clev < VM_RADIX_LIMIT) { 2389f2e6008SAlan Cox idx = vm_radix_trimkey(idx, rnode->rn_clev + 1); 239c1c82b36SAlan Cox return (idx != rnode->rn_owner); 240774d251dSAttilio Rao } 241774d251dSAttilio Rao return (FALSE); 242774d251dSAttilio Rao } 243774d251dSAttilio Rao 244774d251dSAttilio Rao /* 245774d251dSAttilio Rao * Internal helper for vm_radix_reclaim_allnodes(). 246774d251dSAttilio Rao * This function is recursive. 247774d251dSAttilio Rao */ 248774d251dSAttilio Rao static void 249774d251dSAttilio Rao vm_radix_reclaim_allnodes_int(struct vm_radix_node *rnode) 250774d251dSAttilio Rao { 251774d251dSAttilio Rao int slot; 252774d251dSAttilio Rao 253652615dcSAlan Cox KASSERT(rnode->rn_count <= VM_RADIX_COUNT, 254652615dcSAlan Cox ("vm_radix_reclaim_allnodes_int: bad count in rnode %p", rnode)); 255652615dcSAlan Cox for (slot = 0; rnode->rn_count != 0; slot++) { 256774d251dSAttilio Rao if (rnode->rn_child[slot] == NULL) 257774d251dSAttilio Rao continue; 2583fc10b73SAlan Cox if (!vm_radix_isleaf(rnode->rn_child[slot])) 259774d251dSAttilio Rao vm_radix_reclaim_allnodes_int(rnode->rn_child[slot]); 260774d251dSAttilio Rao rnode->rn_child[slot] = NULL; 261774d251dSAttilio Rao rnode->rn_count--; 262774d251dSAttilio Rao } 263774d251dSAttilio Rao vm_radix_node_put(rnode); 264774d251dSAttilio Rao } 265774d251dSAttilio Rao 266774d251dSAttilio Rao #ifdef INVARIANTS 267774d251dSAttilio Rao /* 268774d251dSAttilio Rao * Radix node zone destructor. 269774d251dSAttilio Rao */ 270774d251dSAttilio Rao static void 271774d251dSAttilio Rao vm_radix_node_zone_dtor(void *mem, int size __unused, void *arg __unused) 272774d251dSAttilio Rao { 273774d251dSAttilio Rao struct vm_radix_node *rnode; 274774d251dSAttilio Rao int slot; 275774d251dSAttilio Rao 276774d251dSAttilio Rao rnode = mem; 277774d251dSAttilio Rao KASSERT(rnode->rn_count == 0, 278774d251dSAttilio Rao ("vm_radix_node_put: rnode %p has %d children", rnode, 279774d251dSAttilio Rao rnode->rn_count)); 280774d251dSAttilio Rao for (slot = 0; slot < VM_RADIX_COUNT; slot++) 281774d251dSAttilio Rao KASSERT(rnode->rn_child[slot] == NULL, 282774d251dSAttilio Rao ("vm_radix_node_put: rnode %p has a child", rnode)); 283774d251dSAttilio Rao } 284774d251dSAttilio Rao #endif 285774d251dSAttilio Rao 286e946b949SAttilio Rao #ifndef UMA_MD_SMALL_ALLOC 287774d251dSAttilio Rao /* 288e946b949SAttilio Rao * Reserve the KVA necessary to satisfy the node allocation. 289e946b949SAttilio Rao * This is mandatory in architectures not supporting direct 290e946b949SAttilio Rao * mapping as they will need otherwise to carve into the kernel maps for 291e946b949SAttilio Rao * every node allocation, resulting into deadlocks for consumers already 292e946b949SAttilio Rao * working with kernel maps. 293774d251dSAttilio Rao */ 294774d251dSAttilio Rao static void 295e946b949SAttilio Rao vm_radix_reserve_kva(void *arg __unused) 296774d251dSAttilio Rao { 297774d251dSAttilio Rao 298880659feSAlan Cox /* 299880659feSAlan Cox * Calculate the number of reserved nodes, discounting the pages that 300880659feSAlan Cox * are needed to store them. 301880659feSAlan Cox */ 302e946b949SAttilio Rao if (!uma_zone_reserve_kva(vm_radix_node_zone, 30344f1c916SBryan Drewery ((vm_paddr_t)vm_cnt.v_page_count * PAGE_SIZE) / (PAGE_SIZE + 304e946b949SAttilio Rao sizeof(struct vm_radix_node)))) 305e946b949SAttilio Rao panic("%s: unable to reserve KVA", __func__); 306774d251dSAttilio Rao } 307af3b2549SHans Petter Selasky SYSINIT(vm_radix_reserve_kva, SI_SUB_KMEM, SI_ORDER_THIRD, 308e946b949SAttilio Rao vm_radix_reserve_kva, NULL); 309e946b949SAttilio Rao #endif 310774d251dSAttilio Rao 311774d251dSAttilio Rao /* 312774d251dSAttilio Rao * Initialize the UMA slab zone. 313774d251dSAttilio Rao */ 314774d251dSAttilio Rao void 315cd1241fbSKonstantin Belousov vm_radix_zinit(void) 316774d251dSAttilio Rao { 317774d251dSAttilio Rao 318774d251dSAttilio Rao vm_radix_node_zone = uma_zcreate("RADIX NODE", 319774d251dSAttilio Rao sizeof(struct vm_radix_node), NULL, 320774d251dSAttilio Rao #ifdef INVARIANTS 321774d251dSAttilio Rao vm_radix_node_zone_dtor, 322774d251dSAttilio Rao #else 323774d251dSAttilio Rao NULL, 324774d251dSAttilio Rao #endif 325e946b949SAttilio Rao NULL, NULL, VM_RADIX_PAD, UMA_ZONE_VM); 326774d251dSAttilio Rao } 327774d251dSAttilio Rao 328774d251dSAttilio Rao /* 329774d251dSAttilio Rao * Inserts the key-value pair into the trie. 330774d251dSAttilio Rao * Panics if the key already exists. 331774d251dSAttilio Rao */ 332e946b949SAttilio Rao int 333774d251dSAttilio Rao vm_radix_insert(struct vm_radix *rtree, vm_page_t page) 334774d251dSAttilio Rao { 335774d251dSAttilio Rao vm_pindex_t index, newind; 336a08f2cf6SAlan Cox void **parentp; 337a08f2cf6SAlan Cox struct vm_radix_node *rnode, *tmp; 338774d251dSAttilio Rao vm_page_t m; 339774d251dSAttilio Rao int slot; 340774d251dSAttilio Rao uint16_t clev; 341774d251dSAttilio Rao 342774d251dSAttilio Rao index = page->pindex; 343774d251dSAttilio Rao 344774d251dSAttilio Rao /* 345774d251dSAttilio Rao * The owner of record for root is not really important because it 346774d251dSAttilio Rao * will never be used. 347774d251dSAttilio Rao */ 348774d251dSAttilio Rao rnode = vm_radix_getroot(rtree); 349774d251dSAttilio Rao if (rnode == NULL) { 350a08f2cf6SAlan Cox rtree->rt_root = (uintptr_t)page | VM_RADIX_ISLEAF; 351e946b949SAttilio Rao return (0); 352774d251dSAttilio Rao } 353a08f2cf6SAlan Cox parentp = (void **)&rtree->rt_root; 354a08f2cf6SAlan Cox for (;;) { 355a08f2cf6SAlan Cox if (vm_radix_isleaf(rnode)) { 356a08f2cf6SAlan Cox m = vm_radix_topage(rnode); 357774d251dSAttilio Rao if (m->pindex == index) 358774d251dSAttilio Rao panic("%s: key %jx is already present", 359774d251dSAttilio Rao __func__, (uintmax_t)index); 360774d251dSAttilio Rao clev = vm_radix_keydiff(m->pindex, index); 361774d251dSAttilio Rao tmp = vm_radix_node_get(vm_radix_trimkey(index, 3629f2e6008SAlan Cox clev + 1), 2, clev); 363563a19d5SAlan Cox if (tmp == NULL) 364e946b949SAttilio Rao return (ENOMEM); 365a08f2cf6SAlan Cox *parentp = tmp; 366774d251dSAttilio Rao vm_radix_addpage(tmp, index, clev, page); 367774d251dSAttilio Rao vm_radix_addpage(tmp, m->pindex, clev, m); 368e946b949SAttilio Rao return (0); 369a08f2cf6SAlan Cox } else if (vm_radix_keybarr(rnode, index)) 370a08f2cf6SAlan Cox break; 371a08f2cf6SAlan Cox slot = vm_radix_slot(index, rnode->rn_clev); 372774d251dSAttilio Rao if (rnode->rn_child[slot] == NULL) { 373774d251dSAttilio Rao rnode->rn_count++; 374774d251dSAttilio Rao vm_radix_addpage(rnode, index, rnode->rn_clev, page); 375e946b949SAttilio Rao return (0); 376774d251dSAttilio Rao } 377a08f2cf6SAlan Cox parentp = &rnode->rn_child[slot]; 378774d251dSAttilio Rao rnode = rnode->rn_child[slot]; 379a08f2cf6SAlan Cox } 380774d251dSAttilio Rao 381774d251dSAttilio Rao /* 382774d251dSAttilio Rao * A new node is needed because the right insertion level is reached. 383774d251dSAttilio Rao * Setup the new intermediate node and add the 2 children: the 384774d251dSAttilio Rao * new object and the older edge. 385774d251dSAttilio Rao */ 38672abda64SAlan Cox newind = rnode->rn_owner; 38772abda64SAlan Cox clev = vm_radix_keydiff(newind, index); 388e946b949SAttilio Rao tmp = vm_radix_node_get(vm_radix_trimkey(index, clev + 1), 2, clev); 389563a19d5SAlan Cox if (tmp == NULL) 390e946b949SAttilio Rao return (ENOMEM); 391a08f2cf6SAlan Cox *parentp = tmp; 39272abda64SAlan Cox vm_radix_addpage(tmp, index, clev, page); 393774d251dSAttilio Rao slot = vm_radix_slot(newind, clev); 39472abda64SAlan Cox tmp->rn_child[slot] = rnode; 395e946b949SAttilio Rao return (0); 396774d251dSAttilio Rao } 397774d251dSAttilio Rao 398774d251dSAttilio Rao /* 399776cad90SAlan Cox * Returns TRUE if the specified radix tree contains a single leaf and FALSE 400776cad90SAlan Cox * otherwise. 401776cad90SAlan Cox */ 402776cad90SAlan Cox boolean_t 403776cad90SAlan Cox vm_radix_is_singleton(struct vm_radix *rtree) 404776cad90SAlan Cox { 405776cad90SAlan Cox struct vm_radix_node *rnode; 406776cad90SAlan Cox 407776cad90SAlan Cox rnode = vm_radix_getroot(rtree); 408776cad90SAlan Cox if (rnode == NULL) 409776cad90SAlan Cox return (FALSE); 410776cad90SAlan Cox return (vm_radix_isleaf(rnode)); 411776cad90SAlan Cox } 412776cad90SAlan Cox 413776cad90SAlan Cox /* 414774d251dSAttilio Rao * Returns the value stored at the index. If the index is not present, 415774d251dSAttilio Rao * NULL is returned. 416774d251dSAttilio Rao */ 417774d251dSAttilio Rao vm_page_t 418774d251dSAttilio Rao vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index) 419774d251dSAttilio Rao { 420774d251dSAttilio Rao struct vm_radix_node *rnode; 421774d251dSAttilio Rao vm_page_t m; 422774d251dSAttilio Rao int slot; 423774d251dSAttilio Rao 424774d251dSAttilio Rao rnode = vm_radix_getroot(rtree); 425774d251dSAttilio Rao while (rnode != NULL) { 42696f1a842SAlan Cox if (vm_radix_isleaf(rnode)) { 42796f1a842SAlan Cox m = vm_radix_topage(rnode); 428774d251dSAttilio Rao if (m->pindex == index) 429774d251dSAttilio Rao return (m); 430774d251dSAttilio Rao else 4316f9c0b15SAlan Cox break; 4326f9c0b15SAlan Cox } else if (vm_radix_keybarr(rnode, index)) 4336f9c0b15SAlan Cox break; 4346f9c0b15SAlan Cox slot = vm_radix_slot(index, rnode->rn_clev); 4356f9c0b15SAlan Cox rnode = rnode->rn_child[slot]; 436774d251dSAttilio Rao } 437774d251dSAttilio Rao return (NULL); 438774d251dSAttilio Rao } 439774d251dSAttilio Rao 440774d251dSAttilio Rao /* 441774d251dSAttilio Rao * Look up the nearest entry at a position bigger than or equal to index. 442774d251dSAttilio Rao */ 443774d251dSAttilio Rao vm_page_t 444774d251dSAttilio Rao vm_radix_lookup_ge(struct vm_radix *rtree, vm_pindex_t index) 445774d251dSAttilio Rao { 4462d4b9a64SAlan Cox struct vm_radix_node *stack[VM_RADIX_LIMIT]; 447774d251dSAttilio Rao vm_pindex_t inc; 448774d251dSAttilio Rao vm_page_t m; 44996f1a842SAlan Cox struct vm_radix_node *child, *rnode; 450774d251dSAttilio Rao #ifdef INVARIANTS 451774d251dSAttilio Rao int loops = 0; 452774d251dSAttilio Rao #endif 4532d4b9a64SAlan Cox int slot, tos; 454774d251dSAttilio Rao 4556f9c0b15SAlan Cox rnode = vm_radix_getroot(rtree); 4566f9c0b15SAlan Cox if (rnode == NULL) 4576f9c0b15SAlan Cox return (NULL); 4586f9c0b15SAlan Cox else if (vm_radix_isleaf(rnode)) { 4596f9c0b15SAlan Cox m = vm_radix_topage(rnode); 4606f9c0b15SAlan Cox if (m->pindex >= index) 4616f9c0b15SAlan Cox return (m); 4626f9c0b15SAlan Cox else 4636f9c0b15SAlan Cox return (NULL); 4646f9c0b15SAlan Cox } 4652d4b9a64SAlan Cox tos = 0; 4666f9c0b15SAlan Cox for (;;) { 467774d251dSAttilio Rao /* 46840076ebcSAlan Cox * If the keys differ before the current bisection node, 46940076ebcSAlan Cox * then the search key might rollback to the earliest 47040076ebcSAlan Cox * available bisection node or to the smallest key 47140076ebcSAlan Cox * in the current node (if the owner is bigger than the 472774d251dSAttilio Rao * search key). 473774d251dSAttilio Rao */ 474774d251dSAttilio Rao if (vm_radix_keybarr(rnode, index)) { 475774d251dSAttilio Rao if (index > rnode->rn_owner) { 4762d4b9a64SAlan Cox ascend: 4772d4b9a64SAlan Cox KASSERT(++loops < 1000, 4782d4b9a64SAlan Cox ("vm_radix_lookup_ge: too many loops")); 4792d4b9a64SAlan Cox 4802d4b9a64SAlan Cox /* 4812d4b9a64SAlan Cox * Pop nodes from the stack until either the 4822d4b9a64SAlan Cox * stack is empty or a node that could have a 4832d4b9a64SAlan Cox * matching descendant is found. 4842d4b9a64SAlan Cox */ 4852d4b9a64SAlan Cox do { 4862d4b9a64SAlan Cox if (tos == 0) 4872d4b9a64SAlan Cox return (NULL); 4882d4b9a64SAlan Cox rnode = stack[--tos]; 4892d4b9a64SAlan Cox } while (vm_radix_slot(index, 4902d4b9a64SAlan Cox rnode->rn_clev) == (VM_RADIX_COUNT - 1)); 4912d4b9a64SAlan Cox 4922d4b9a64SAlan Cox /* 4932d4b9a64SAlan Cox * The following computation cannot overflow 4942d4b9a64SAlan Cox * because index's slot at the current level 4952d4b9a64SAlan Cox * is less than VM_RADIX_COUNT - 1. 4962d4b9a64SAlan Cox */ 4972d4b9a64SAlan Cox index = vm_radix_trimkey(index, 4982d4b9a64SAlan Cox rnode->rn_clev); 4992d4b9a64SAlan Cox index += VM_RADIX_UNITLEVEL(rnode->rn_clev); 50040076ebcSAlan Cox } else 50140076ebcSAlan Cox index = rnode->rn_owner; 5022d4b9a64SAlan Cox KASSERT(!vm_radix_keybarr(rnode, index), 5032d4b9a64SAlan Cox ("vm_radix_lookup_ge: keybarr failed")); 504774d251dSAttilio Rao } 505774d251dSAttilio Rao slot = vm_radix_slot(index, rnode->rn_clev); 50696f1a842SAlan Cox child = rnode->rn_child[slot]; 50796f1a842SAlan Cox if (vm_radix_isleaf(child)) { 50896f1a842SAlan Cox m = vm_radix_topage(child); 50996f1a842SAlan Cox if (m->pindex >= index) 510774d251dSAttilio Rao return (m); 51196f1a842SAlan Cox } else if (child != NULL) 51296f1a842SAlan Cox goto descend; 513774d251dSAttilio Rao 514774d251dSAttilio Rao /* 515774d251dSAttilio Rao * Look for an available edge or page within the current 516774d251dSAttilio Rao * bisection node. 517774d251dSAttilio Rao */ 518774d251dSAttilio Rao if (slot < (VM_RADIX_COUNT - 1)) { 519774d251dSAttilio Rao inc = VM_RADIX_UNITLEVEL(rnode->rn_clev); 520774d251dSAttilio Rao index = vm_radix_trimkey(index, rnode->rn_clev); 52196f1a842SAlan Cox do { 522774d251dSAttilio Rao index += inc; 523774d251dSAttilio Rao slot++; 52496f1a842SAlan Cox child = rnode->rn_child[slot]; 52596f1a842SAlan Cox if (vm_radix_isleaf(child)) { 52696f1a842SAlan Cox m = vm_radix_topage(child); 52796f1a842SAlan Cox if (m->pindex >= index) 528774d251dSAttilio Rao return (m); 52996f1a842SAlan Cox } else if (child != NULL) 53096f1a842SAlan Cox goto descend; 53196f1a842SAlan Cox } while (slot < (VM_RADIX_COUNT - 1)); 532774d251dSAttilio Rao } 53396f1a842SAlan Cox KASSERT(child == NULL || vm_radix_isleaf(child), 53496f1a842SAlan Cox ("vm_radix_lookup_ge: child is radix node")); 535774d251dSAttilio Rao 536774d251dSAttilio Rao /* 5372d4b9a64SAlan Cox * If a page or edge bigger than the search slot is not found 5382d4b9a64SAlan Cox * in the current node, ascend to the next higher-level node. 539774d251dSAttilio Rao */ 5402d4b9a64SAlan Cox goto ascend; 54196f1a842SAlan Cox descend: 5429f2e6008SAlan Cox KASSERT(rnode->rn_clev > 0, 5432d4b9a64SAlan Cox ("vm_radix_lookup_ge: pushing leaf's parent")); 5442d4b9a64SAlan Cox KASSERT(tos < VM_RADIX_LIMIT, 5452d4b9a64SAlan Cox ("vm_radix_lookup_ge: stack overflow")); 5462d4b9a64SAlan Cox stack[tos++] = rnode; 54796f1a842SAlan Cox rnode = child; 548774d251dSAttilio Rao } 549774d251dSAttilio Rao } 550774d251dSAttilio Rao 551774d251dSAttilio Rao /* 552774d251dSAttilio Rao * Look up the nearest entry at a position less than or equal to index. 553774d251dSAttilio Rao */ 554774d251dSAttilio Rao vm_page_t 555774d251dSAttilio Rao vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index) 556774d251dSAttilio Rao { 5572d4b9a64SAlan Cox struct vm_radix_node *stack[VM_RADIX_LIMIT]; 558774d251dSAttilio Rao vm_pindex_t inc; 559774d251dSAttilio Rao vm_page_t m; 56096f1a842SAlan Cox struct vm_radix_node *child, *rnode; 561774d251dSAttilio Rao #ifdef INVARIANTS 562774d251dSAttilio Rao int loops = 0; 563774d251dSAttilio Rao #endif 5642d4b9a64SAlan Cox int slot, tos; 565774d251dSAttilio Rao 5666f9c0b15SAlan Cox rnode = vm_radix_getroot(rtree); 5676f9c0b15SAlan Cox if (rnode == NULL) 5686f9c0b15SAlan Cox return (NULL); 5696f9c0b15SAlan Cox else if (vm_radix_isleaf(rnode)) { 5706f9c0b15SAlan Cox m = vm_radix_topage(rnode); 5716f9c0b15SAlan Cox if (m->pindex <= index) 5726f9c0b15SAlan Cox return (m); 5736f9c0b15SAlan Cox else 5746f9c0b15SAlan Cox return (NULL); 5756f9c0b15SAlan Cox } 5762d4b9a64SAlan Cox tos = 0; 5776f9c0b15SAlan Cox for (;;) { 578774d251dSAttilio Rao /* 57940076ebcSAlan Cox * If the keys differ before the current bisection node, 58040076ebcSAlan Cox * then the search key might rollback to the earliest 58140076ebcSAlan Cox * available bisection node or to the largest key 58240076ebcSAlan Cox * in the current node (if the owner is smaller than the 583774d251dSAttilio Rao * search key). 584774d251dSAttilio Rao */ 585774d251dSAttilio Rao if (vm_radix_keybarr(rnode, index)) { 586774d251dSAttilio Rao if (index > rnode->rn_owner) { 58740076ebcSAlan Cox index = rnode->rn_owner + VM_RADIX_COUNT * 5882d4b9a64SAlan Cox VM_RADIX_UNITLEVEL(rnode->rn_clev); 58940076ebcSAlan Cox } else { 5902d4b9a64SAlan Cox ascend: 5912d4b9a64SAlan Cox KASSERT(++loops < 1000, 5922d4b9a64SAlan Cox ("vm_radix_lookup_le: too many loops")); 5932d4b9a64SAlan Cox 5942d4b9a64SAlan Cox /* 5952d4b9a64SAlan Cox * Pop nodes from the stack until either the 5962d4b9a64SAlan Cox * stack is empty or a node that could have a 5972d4b9a64SAlan Cox * matching descendant is found. 5982d4b9a64SAlan Cox */ 5992d4b9a64SAlan Cox do { 6002d4b9a64SAlan Cox if (tos == 0) 6012d4b9a64SAlan Cox return (NULL); 6022d4b9a64SAlan Cox rnode = stack[--tos]; 6032d4b9a64SAlan Cox } while (vm_radix_slot(index, 6042d4b9a64SAlan Cox rnode->rn_clev) == 0); 6052d4b9a64SAlan Cox 6062d4b9a64SAlan Cox /* 6072d4b9a64SAlan Cox * The following computation cannot overflow 6082d4b9a64SAlan Cox * because index's slot at the current level 6092d4b9a64SAlan Cox * is greater than 0. 6102d4b9a64SAlan Cox */ 6112d4b9a64SAlan Cox index = vm_radix_trimkey(index, 6122d4b9a64SAlan Cox rnode->rn_clev); 613774d251dSAttilio Rao } 6142d4b9a64SAlan Cox index--; 6152d4b9a64SAlan Cox KASSERT(!vm_radix_keybarr(rnode, index), 6162d4b9a64SAlan Cox ("vm_radix_lookup_le: keybarr failed")); 61740076ebcSAlan Cox } 618774d251dSAttilio Rao slot = vm_radix_slot(index, rnode->rn_clev); 61996f1a842SAlan Cox child = rnode->rn_child[slot]; 62096f1a842SAlan Cox if (vm_radix_isleaf(child)) { 62196f1a842SAlan Cox m = vm_radix_topage(child); 62296f1a842SAlan Cox if (m->pindex <= index) 623774d251dSAttilio Rao return (m); 62496f1a842SAlan Cox } else if (child != NULL) 62596f1a842SAlan Cox goto descend; 626774d251dSAttilio Rao 627774d251dSAttilio Rao /* 628774d251dSAttilio Rao * Look for an available edge or page within the current 629774d251dSAttilio Rao * bisection node. 630774d251dSAttilio Rao */ 631774d251dSAttilio Rao if (slot > 0) { 632774d251dSAttilio Rao inc = VM_RADIX_UNITLEVEL(rnode->rn_clev); 633774d251dSAttilio Rao index |= inc - 1; 63496f1a842SAlan Cox do { 635774d251dSAttilio Rao index -= inc; 636774d251dSAttilio Rao slot--; 63796f1a842SAlan Cox child = rnode->rn_child[slot]; 63896f1a842SAlan Cox if (vm_radix_isleaf(child)) { 63996f1a842SAlan Cox m = vm_radix_topage(child); 64096f1a842SAlan Cox if (m->pindex <= index) 641774d251dSAttilio Rao return (m); 64296f1a842SAlan Cox } else if (child != NULL) 64396f1a842SAlan Cox goto descend; 64496f1a842SAlan Cox } while (slot > 0); 645774d251dSAttilio Rao } 64696f1a842SAlan Cox KASSERT(child == NULL || vm_radix_isleaf(child), 64796f1a842SAlan Cox ("vm_radix_lookup_le: child is radix node")); 648774d251dSAttilio Rao 649774d251dSAttilio Rao /* 6502d4b9a64SAlan Cox * If a page or edge smaller than the search slot is not found 6512d4b9a64SAlan Cox * in the current node, ascend to the next higher-level node. 652774d251dSAttilio Rao */ 6532d4b9a64SAlan Cox goto ascend; 65496f1a842SAlan Cox descend: 6559f2e6008SAlan Cox KASSERT(rnode->rn_clev > 0, 6562d4b9a64SAlan Cox ("vm_radix_lookup_le: pushing leaf's parent")); 6572d4b9a64SAlan Cox KASSERT(tos < VM_RADIX_LIMIT, 6582d4b9a64SAlan Cox ("vm_radix_lookup_le: stack overflow")); 6592d4b9a64SAlan Cox stack[tos++] = rnode; 66096f1a842SAlan Cox rnode = child; 661774d251dSAttilio Rao } 662774d251dSAttilio Rao } 663774d251dSAttilio Rao 664774d251dSAttilio Rao /* 665e94965d8SAlan Cox * Remove the specified index from the trie, and return the value stored at 666e94965d8SAlan Cox * that index. If the index is not present, return NULL. 667774d251dSAttilio Rao */ 668e94965d8SAlan Cox vm_page_t 669774d251dSAttilio Rao vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index) 670774d251dSAttilio Rao { 671774d251dSAttilio Rao struct vm_radix_node *rnode, *parent; 672774d251dSAttilio Rao vm_page_t m; 673774d251dSAttilio Rao int i, slot; 674774d251dSAttilio Rao 675774d251dSAttilio Rao rnode = vm_radix_getroot(rtree); 676a08f2cf6SAlan Cox if (vm_radix_isleaf(rnode)) { 677a08f2cf6SAlan Cox m = vm_radix_topage(rnode); 678a08f2cf6SAlan Cox if (m->pindex != index) 679e94965d8SAlan Cox return (NULL); 680a08f2cf6SAlan Cox vm_radix_setroot(rtree, NULL); 681e94965d8SAlan Cox return (m); 682a08f2cf6SAlan Cox } 683a08f2cf6SAlan Cox parent = NULL; 684774d251dSAttilio Rao for (;;) { 685774d251dSAttilio Rao if (rnode == NULL) 686e94965d8SAlan Cox return (NULL); 687774d251dSAttilio Rao slot = vm_radix_slot(index, rnode->rn_clev); 68896f1a842SAlan Cox if (vm_radix_isleaf(rnode->rn_child[slot])) { 68996f1a842SAlan Cox m = vm_radix_topage(rnode->rn_child[slot]); 69096f1a842SAlan Cox if (m->pindex != index) 691e94965d8SAlan Cox return (NULL); 692774d251dSAttilio Rao rnode->rn_child[slot] = NULL; 693774d251dSAttilio Rao rnode->rn_count--; 694774d251dSAttilio Rao if (rnode->rn_count > 1) 695e94965d8SAlan Cox return (m); 696774d251dSAttilio Rao for (i = 0; i < VM_RADIX_COUNT; i++) 697774d251dSAttilio Rao if (rnode->rn_child[i] != NULL) 698774d251dSAttilio Rao break; 699774d251dSAttilio Rao KASSERT(i != VM_RADIX_COUNT, 700774d251dSAttilio Rao ("%s: invalid node configuration", __func__)); 701a08f2cf6SAlan Cox if (parent == NULL) 702a08f2cf6SAlan Cox vm_radix_setroot(rtree, rnode->rn_child[i]); 703a08f2cf6SAlan Cox else { 704774d251dSAttilio Rao slot = vm_radix_slot(index, parent->rn_clev); 705774d251dSAttilio Rao KASSERT(parent->rn_child[slot] == rnode, 706774d251dSAttilio Rao ("%s: invalid child value", __func__)); 707774d251dSAttilio Rao parent->rn_child[slot] = rnode->rn_child[i]; 708a08f2cf6SAlan Cox } 709774d251dSAttilio Rao rnode->rn_count--; 710774d251dSAttilio Rao rnode->rn_child[i] = NULL; 711774d251dSAttilio Rao vm_radix_node_put(rnode); 712e94965d8SAlan Cox return (m); 713774d251dSAttilio Rao } 714774d251dSAttilio Rao parent = rnode; 715774d251dSAttilio Rao rnode = rnode->rn_child[slot]; 716774d251dSAttilio Rao } 717774d251dSAttilio Rao } 718774d251dSAttilio Rao 719774d251dSAttilio Rao /* 720774d251dSAttilio Rao * Remove and free all the nodes from the radix tree. 721774d251dSAttilio Rao * This function is recursive but there is a tight control on it as the 722774d251dSAttilio Rao * maximum depth of the tree is fixed. 723774d251dSAttilio Rao */ 724774d251dSAttilio Rao void 725774d251dSAttilio Rao vm_radix_reclaim_allnodes(struct vm_radix *rtree) 726774d251dSAttilio Rao { 727774d251dSAttilio Rao struct vm_radix_node *root; 728774d251dSAttilio Rao 729774d251dSAttilio Rao root = vm_radix_getroot(rtree); 730774d251dSAttilio Rao if (root == NULL) 731774d251dSAttilio Rao return; 732774d251dSAttilio Rao vm_radix_setroot(rtree, NULL); 733a08f2cf6SAlan Cox if (!vm_radix_isleaf(root)) 734652615dcSAlan Cox vm_radix_reclaim_allnodes_int(root); 735774d251dSAttilio Rao } 736774d251dSAttilio Rao 737e946b949SAttilio Rao /* 738703b304fSAlan Cox * Replace an existing page in the trie with another one. 739703b304fSAlan Cox * Panics if there is not an old page in the trie at the new page's index. 740e946b949SAttilio Rao */ 741e946b949SAttilio Rao vm_page_t 742703b304fSAlan Cox vm_radix_replace(struct vm_radix *rtree, vm_page_t newpage) 743e946b949SAttilio Rao { 744e946b949SAttilio Rao struct vm_radix_node *rnode; 745e946b949SAttilio Rao vm_page_t m; 746703b304fSAlan Cox vm_pindex_t index; 747e946b949SAttilio Rao int slot; 748e946b949SAttilio Rao 749703b304fSAlan Cox index = newpage->pindex; 750e946b949SAttilio Rao rnode = vm_radix_getroot(rtree); 751e946b949SAttilio Rao if (rnode == NULL) 752e946b949SAttilio Rao panic("%s: replacing page on an empty trie", __func__); 753e946b949SAttilio Rao if (vm_radix_isleaf(rnode)) { 754e946b949SAttilio Rao m = vm_radix_topage(rnode); 755e946b949SAttilio Rao if (m->pindex != index) 756e946b949SAttilio Rao panic("%s: original replacing root key not found", 757e946b949SAttilio Rao __func__); 758e946b949SAttilio Rao rtree->rt_root = (uintptr_t)newpage | VM_RADIX_ISLEAF; 759e946b949SAttilio Rao return (m); 760e946b949SAttilio Rao } 761e946b949SAttilio Rao for (;;) { 762e946b949SAttilio Rao slot = vm_radix_slot(index, rnode->rn_clev); 763e946b949SAttilio Rao if (vm_radix_isleaf(rnode->rn_child[slot])) { 764e946b949SAttilio Rao m = vm_radix_topage(rnode->rn_child[slot]); 765e946b949SAttilio Rao if (m->pindex == index) { 766e946b949SAttilio Rao rnode->rn_child[slot] = 767e946b949SAttilio Rao (void *)((uintptr_t)newpage | 768e946b949SAttilio Rao VM_RADIX_ISLEAF); 769e946b949SAttilio Rao return (m); 770e946b949SAttilio Rao } else 771e946b949SAttilio Rao break; 772e946b949SAttilio Rao } else if (rnode->rn_child[slot] == NULL || 773e946b949SAttilio Rao vm_radix_keybarr(rnode->rn_child[slot], index)) 774e946b949SAttilio Rao break; 775e946b949SAttilio Rao rnode = rnode->rn_child[slot]; 776e946b949SAttilio Rao } 777e946b949SAttilio Rao panic("%s: original replacing page not found", __func__); 778e946b949SAttilio Rao } 779e946b949SAttilio Rao 7808d6fbbb8SJeff Roberson void 7818d6fbbb8SJeff Roberson vm_radix_wait(void) 7828d6fbbb8SJeff Roberson { 7838d6fbbb8SJeff Roberson uma_zwait(vm_radix_node_zone); 7848d6fbbb8SJeff Roberson } 7858d6fbbb8SJeff Roberson 786774d251dSAttilio Rao #ifdef DDB 787774d251dSAttilio Rao /* 788774d251dSAttilio Rao * Show details about the given radix node. 789774d251dSAttilio Rao */ 790774d251dSAttilio Rao DB_SHOW_COMMAND(radixnode, db_show_radixnode) 791774d251dSAttilio Rao { 792774d251dSAttilio Rao struct vm_radix_node *rnode; 793774d251dSAttilio Rao int i; 794774d251dSAttilio Rao 795774d251dSAttilio Rao if (!have_addr) 796774d251dSAttilio Rao return; 797774d251dSAttilio Rao rnode = (struct vm_radix_node *)addr; 798774d251dSAttilio Rao db_printf("radixnode %p, owner %jx, children count %u, level %u:\n", 799774d251dSAttilio Rao (void *)rnode, (uintmax_t)rnode->rn_owner, rnode->rn_count, 800774d251dSAttilio Rao rnode->rn_clev); 801774d251dSAttilio Rao for (i = 0; i < VM_RADIX_COUNT; i++) 802774d251dSAttilio Rao if (rnode->rn_child[i] != NULL) 803774d251dSAttilio Rao db_printf("slot: %d, val: %p, page: %p, clev: %d\n", 804774d251dSAttilio Rao i, (void *)rnode->rn_child[i], 80596f1a842SAlan Cox vm_radix_isleaf(rnode->rn_child[i]) ? 80696f1a842SAlan Cox vm_radix_topage(rnode->rn_child[i]) : NULL, 807774d251dSAttilio Rao rnode->rn_clev); 808774d251dSAttilio Rao } 809774d251dSAttilio Rao #endif /* DDB */ 810