1fe267a55SPedro F. Giffuni /*- 2fe267a55SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3fe267a55SPedro F. Giffuni * 4774d251dSAttilio Rao * Copyright (c) 2013 EMC Corp. 5774d251dSAttilio Rao * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org> 6774d251dSAttilio Rao * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com> 7774d251dSAttilio Rao * All rights reserved. 8774d251dSAttilio Rao * 9774d251dSAttilio Rao * Redistribution and use in source and binary forms, with or without 10774d251dSAttilio Rao * modification, are permitted provided that the following conditions 11774d251dSAttilio Rao * are met: 12774d251dSAttilio Rao * 1. Redistributions of source code must retain the above copyright 13774d251dSAttilio Rao * notice, this list of conditions and the following disclaimer. 14774d251dSAttilio Rao * 2. Redistributions in binary form must reproduce the above copyright 15774d251dSAttilio Rao * notice, this list of conditions and the following disclaimer in the 16774d251dSAttilio Rao * documentation and/or other materials provided with the distribution. 17774d251dSAttilio Rao * 18774d251dSAttilio Rao * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19774d251dSAttilio Rao * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20774d251dSAttilio Rao * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21774d251dSAttilio Rao * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22774d251dSAttilio Rao * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23774d251dSAttilio Rao * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24774d251dSAttilio Rao * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25774d251dSAttilio Rao * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26774d251dSAttilio Rao * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27774d251dSAttilio Rao * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28774d251dSAttilio Rao * SUCH DAMAGE. 29774d251dSAttilio Rao * 30774d251dSAttilio Rao */ 31774d251dSAttilio Rao 32774d251dSAttilio Rao /* 33774d251dSAttilio Rao * Path-compressed radix trie implementation. 34774d251dSAttilio Rao * The following code is not generalized into a general purpose library 35774d251dSAttilio Rao * because there are way too many parameters embedded that should really 36774d251dSAttilio Rao * be decided by the library consumers. At the same time, consumers 37774d251dSAttilio Rao * of this code must achieve highest possible performance. 38774d251dSAttilio Rao * 39774d251dSAttilio Rao * The implementation takes into account the following rationale: 40774d251dSAttilio Rao * - Size of the nodes should be as small as possible but still big enough 41774d251dSAttilio Rao * to avoid a large maximum depth for the trie. This is a balance 42774d251dSAttilio Rao * between the necessity to not wire too much physical memory for the nodes 43774d251dSAttilio Rao * and the necessity to avoid too much cache pollution during the trie 44774d251dSAttilio Rao * operations. 45774d251dSAttilio Rao * - There is not a huge bias toward the number of lookup operations over 46774d251dSAttilio Rao * the number of insert and remove operations. This basically implies 47774d251dSAttilio Rao * that optimizations supposedly helping one operation but hurting the 48774d251dSAttilio Rao * other might be carefully evaluated. 49774d251dSAttilio Rao * - On average not many nodes are expected to be fully populated, hence 50774d251dSAttilio Rao * level compression may just complicate things. 51774d251dSAttilio Rao */ 52774d251dSAttilio Rao 53774d251dSAttilio Rao #include <sys/cdefs.h> 54774d251dSAttilio Rao __FBSDID("$FreeBSD$"); 55774d251dSAttilio Rao 56774d251dSAttilio Rao #include "opt_ddb.h" 57774d251dSAttilio Rao 58774d251dSAttilio Rao #include <sys/param.h> 59774d251dSAttilio Rao #include <sys/systm.h> 60774d251dSAttilio Rao #include <sys/kernel.h> 61774d251dSAttilio Rao #include <sys/vmmeter.h> 62774d251dSAttilio Rao 63774d251dSAttilio Rao #include <vm/uma.h> 64774d251dSAttilio Rao #include <vm/vm.h> 65774d251dSAttilio Rao #include <vm/vm_param.h> 66774d251dSAttilio Rao #include <vm/vm_page.h> 67774d251dSAttilio Rao #include <vm/vm_radix.h> 68774d251dSAttilio Rao 69774d251dSAttilio Rao #ifdef DDB 70774d251dSAttilio Rao #include <ddb/ddb.h> 71774d251dSAttilio Rao #endif 72774d251dSAttilio Rao 73774d251dSAttilio Rao /* 74774d251dSAttilio Rao * These widths should allow the pointers to a node's children to fit within 75774d251dSAttilio Rao * a single cache line. The extra levels from a narrow width should not be 76774d251dSAttilio Rao * a problem thanks to path compression. 77774d251dSAttilio Rao */ 78774d251dSAttilio Rao #ifdef __LP64__ 79774d251dSAttilio Rao #define VM_RADIX_WIDTH 4 80774d251dSAttilio Rao #else 81774d251dSAttilio Rao #define VM_RADIX_WIDTH 3 82774d251dSAttilio Rao #endif 83774d251dSAttilio Rao 84774d251dSAttilio Rao #define VM_RADIX_COUNT (1 << VM_RADIX_WIDTH) 85774d251dSAttilio Rao #define VM_RADIX_MASK (VM_RADIX_COUNT - 1) 86774d251dSAttilio Rao #define VM_RADIX_LIMIT \ 87b66bb393SPedro F. Giffuni (howmany(sizeof(vm_pindex_t) * NBBY, VM_RADIX_WIDTH) - 1) 88774d251dSAttilio Rao 89774d251dSAttilio Rao /* Flag bits stored in node pointers. */ 90774d251dSAttilio Rao #define VM_RADIX_ISLEAF 0x1 91774d251dSAttilio Rao #define VM_RADIX_FLAGS 0x1 92774d251dSAttilio Rao #define VM_RADIX_PAD VM_RADIX_FLAGS 93774d251dSAttilio Rao 94774d251dSAttilio Rao /* Returns one unit associated with specified level. */ 95774d251dSAttilio Rao #define VM_RADIX_UNITLEVEL(lev) \ 969f2e6008SAlan Cox ((vm_pindex_t)1 << ((lev) * VM_RADIX_WIDTH)) 97774d251dSAttilio Rao 98774d251dSAttilio Rao struct vm_radix_node { 99774d251dSAttilio Rao vm_pindex_t rn_owner; /* Owner of record. */ 100774d251dSAttilio Rao uint16_t rn_count; /* Valid children. */ 101774d251dSAttilio Rao uint16_t rn_clev; /* Current level. */ 1022c899fedSAlan Cox void *rn_child[VM_RADIX_COUNT]; /* Child nodes. */ 103774d251dSAttilio Rao }; 104774d251dSAttilio Rao 105774d251dSAttilio Rao static uma_zone_t vm_radix_node_zone; 106774d251dSAttilio Rao 107774d251dSAttilio Rao /* 108e946b949SAttilio Rao * Allocate a radix node. 109774d251dSAttilio Rao */ 110774d251dSAttilio Rao static __inline struct vm_radix_node * 111774d251dSAttilio Rao vm_radix_node_get(vm_pindex_t owner, uint16_t count, uint16_t clevel) 112774d251dSAttilio Rao { 113774d251dSAttilio Rao struct vm_radix_node *rnode; 114774d251dSAttilio Rao 115e946b949SAttilio Rao rnode = uma_zalloc(vm_radix_node_zone, M_NOWAIT | M_ZERO); 116774d251dSAttilio Rao if (rnode == NULL) 117e946b949SAttilio Rao return (NULL); 118774d251dSAttilio Rao rnode->rn_owner = owner; 119774d251dSAttilio Rao rnode->rn_count = count; 120774d251dSAttilio Rao rnode->rn_clev = clevel; 121774d251dSAttilio Rao return (rnode); 122774d251dSAttilio Rao } 123774d251dSAttilio Rao 124774d251dSAttilio Rao /* 125774d251dSAttilio Rao * Free radix node. 126774d251dSAttilio Rao */ 127774d251dSAttilio Rao static __inline void 128774d251dSAttilio Rao vm_radix_node_put(struct vm_radix_node *rnode) 129774d251dSAttilio Rao { 130774d251dSAttilio Rao 131774d251dSAttilio Rao uma_zfree(vm_radix_node_zone, rnode); 132774d251dSAttilio Rao } 133774d251dSAttilio Rao 134774d251dSAttilio Rao /* 135774d251dSAttilio Rao * Return the position in the array for a given level. 136774d251dSAttilio Rao */ 137774d251dSAttilio Rao static __inline int 138774d251dSAttilio Rao vm_radix_slot(vm_pindex_t index, uint16_t level) 139774d251dSAttilio Rao { 140774d251dSAttilio Rao 1419f2e6008SAlan Cox return ((index >> (level * VM_RADIX_WIDTH)) & VM_RADIX_MASK); 142774d251dSAttilio Rao } 143774d251dSAttilio Rao 144774d251dSAttilio Rao /* Trims the key after the specified level. */ 145774d251dSAttilio Rao static __inline vm_pindex_t 146774d251dSAttilio Rao vm_radix_trimkey(vm_pindex_t index, uint16_t level) 147774d251dSAttilio Rao { 148774d251dSAttilio Rao vm_pindex_t ret; 149774d251dSAttilio Rao 150774d251dSAttilio Rao ret = index; 1519f2e6008SAlan Cox if (level > 0) { 1529f2e6008SAlan Cox ret >>= level * VM_RADIX_WIDTH; 1539f2e6008SAlan Cox ret <<= level * VM_RADIX_WIDTH; 154774d251dSAttilio Rao } 155774d251dSAttilio Rao return (ret); 156774d251dSAttilio Rao } 157774d251dSAttilio Rao 158774d251dSAttilio Rao /* 159774d251dSAttilio Rao * Get the root node for a radix tree. 160774d251dSAttilio Rao */ 161774d251dSAttilio Rao static __inline struct vm_radix_node * 162774d251dSAttilio Rao vm_radix_getroot(struct vm_radix *rtree) 163774d251dSAttilio Rao { 164774d251dSAttilio Rao 1652c899fedSAlan Cox return ((struct vm_radix_node *)rtree->rt_root); 166774d251dSAttilio Rao } 167774d251dSAttilio Rao 168774d251dSAttilio Rao /* 169774d251dSAttilio Rao * Set the root node for a radix tree. 170774d251dSAttilio Rao */ 171774d251dSAttilio Rao static __inline void 172774d251dSAttilio Rao vm_radix_setroot(struct vm_radix *rtree, struct vm_radix_node *rnode) 173774d251dSAttilio Rao { 174774d251dSAttilio Rao 175774d251dSAttilio Rao rtree->rt_root = (uintptr_t)rnode; 176774d251dSAttilio Rao } 177774d251dSAttilio Rao 178774d251dSAttilio Rao /* 1793fc10b73SAlan Cox * Returns TRUE if the specified radix node is a leaf and FALSE otherwise. 1803fc10b73SAlan Cox */ 1813fc10b73SAlan Cox static __inline boolean_t 1823fc10b73SAlan Cox vm_radix_isleaf(struct vm_radix_node *rnode) 1833fc10b73SAlan Cox { 1843fc10b73SAlan Cox 1853fc10b73SAlan Cox return (((uintptr_t)rnode & VM_RADIX_ISLEAF) != 0); 1863fc10b73SAlan Cox } 1873fc10b73SAlan Cox 1883fc10b73SAlan Cox /* 18996f1a842SAlan Cox * Returns the associated page extracted from rnode. 190774d251dSAttilio Rao */ 191774d251dSAttilio Rao static __inline vm_page_t 19296f1a842SAlan Cox vm_radix_topage(struct vm_radix_node *rnode) 193774d251dSAttilio Rao { 194774d251dSAttilio Rao 19596f1a842SAlan Cox return ((vm_page_t)((uintptr_t)rnode & ~VM_RADIX_FLAGS)); 196774d251dSAttilio Rao } 197774d251dSAttilio Rao 198774d251dSAttilio Rao /* 199774d251dSAttilio Rao * Adds the page as a child of the provided node. 200774d251dSAttilio Rao */ 201774d251dSAttilio Rao static __inline void 202774d251dSAttilio Rao vm_radix_addpage(struct vm_radix_node *rnode, vm_pindex_t index, uint16_t clev, 203774d251dSAttilio Rao vm_page_t page) 204774d251dSAttilio Rao { 205774d251dSAttilio Rao int slot; 206774d251dSAttilio Rao 207774d251dSAttilio Rao slot = vm_radix_slot(index, clev); 208774d251dSAttilio Rao rnode->rn_child[slot] = (void *)((uintptr_t)page | VM_RADIX_ISLEAF); 209774d251dSAttilio Rao } 210774d251dSAttilio Rao 211774d251dSAttilio Rao /* 212774d251dSAttilio Rao * Returns the slot where two keys differ. 213774d251dSAttilio Rao * It cannot accept 2 equal keys. 214774d251dSAttilio Rao */ 215774d251dSAttilio Rao static __inline uint16_t 216774d251dSAttilio Rao vm_radix_keydiff(vm_pindex_t index1, vm_pindex_t index2) 217774d251dSAttilio Rao { 218774d251dSAttilio Rao uint16_t clev; 219774d251dSAttilio Rao 220774d251dSAttilio Rao KASSERT(index1 != index2, ("%s: passing the same key value %jx", 221774d251dSAttilio Rao __func__, (uintmax_t)index1)); 222774d251dSAttilio Rao 223774d251dSAttilio Rao index1 ^= index2; 2249f2e6008SAlan Cox for (clev = VM_RADIX_LIMIT;; clev--) 225bb0e1de4SAlan Cox if (vm_radix_slot(index1, clev) != 0) 226774d251dSAttilio Rao return (clev); 227774d251dSAttilio Rao } 228774d251dSAttilio Rao 229774d251dSAttilio Rao /* 230774d251dSAttilio Rao * Returns TRUE if it can be determined that key does not belong to the 231774d251dSAttilio Rao * specified rnode. Otherwise, returns FALSE. 232774d251dSAttilio Rao */ 233774d251dSAttilio Rao static __inline boolean_t 234774d251dSAttilio Rao vm_radix_keybarr(struct vm_radix_node *rnode, vm_pindex_t idx) 235774d251dSAttilio Rao { 236774d251dSAttilio Rao 2379f2e6008SAlan Cox if (rnode->rn_clev < VM_RADIX_LIMIT) { 2389f2e6008SAlan Cox idx = vm_radix_trimkey(idx, rnode->rn_clev + 1); 239c1c82b36SAlan Cox return (idx != rnode->rn_owner); 240774d251dSAttilio Rao } 241774d251dSAttilio Rao return (FALSE); 242774d251dSAttilio Rao } 243774d251dSAttilio Rao 244774d251dSAttilio Rao /* 245774d251dSAttilio Rao * Internal helper for vm_radix_reclaim_allnodes(). 246774d251dSAttilio Rao * This function is recursive. 247774d251dSAttilio Rao */ 248774d251dSAttilio Rao static void 249774d251dSAttilio Rao vm_radix_reclaim_allnodes_int(struct vm_radix_node *rnode) 250774d251dSAttilio Rao { 251774d251dSAttilio Rao int slot; 252774d251dSAttilio Rao 253652615dcSAlan Cox KASSERT(rnode->rn_count <= VM_RADIX_COUNT, 254652615dcSAlan Cox ("vm_radix_reclaim_allnodes_int: bad count in rnode %p", rnode)); 255652615dcSAlan Cox for (slot = 0; rnode->rn_count != 0; slot++) { 256774d251dSAttilio Rao if (rnode->rn_child[slot] == NULL) 257774d251dSAttilio Rao continue; 2583fc10b73SAlan Cox if (!vm_radix_isleaf(rnode->rn_child[slot])) 259774d251dSAttilio Rao vm_radix_reclaim_allnodes_int(rnode->rn_child[slot]); 260774d251dSAttilio Rao rnode->rn_child[slot] = NULL; 261774d251dSAttilio Rao rnode->rn_count--; 262774d251dSAttilio Rao } 263774d251dSAttilio Rao vm_radix_node_put(rnode); 264774d251dSAttilio Rao } 265774d251dSAttilio Rao 266774d251dSAttilio Rao #ifdef INVARIANTS 267774d251dSAttilio Rao /* 268774d251dSAttilio Rao * Radix node zone destructor. 269774d251dSAttilio Rao */ 270774d251dSAttilio Rao static void 271774d251dSAttilio Rao vm_radix_node_zone_dtor(void *mem, int size __unused, void *arg __unused) 272774d251dSAttilio Rao { 273774d251dSAttilio Rao struct vm_radix_node *rnode; 274774d251dSAttilio Rao int slot; 275774d251dSAttilio Rao 276774d251dSAttilio Rao rnode = mem; 277774d251dSAttilio Rao KASSERT(rnode->rn_count == 0, 278774d251dSAttilio Rao ("vm_radix_node_put: rnode %p has %d children", rnode, 279774d251dSAttilio Rao rnode->rn_count)); 280774d251dSAttilio Rao for (slot = 0; slot < VM_RADIX_COUNT; slot++) 281774d251dSAttilio Rao KASSERT(rnode->rn_child[slot] == NULL, 282774d251dSAttilio Rao ("vm_radix_node_put: rnode %p has a child", rnode)); 283774d251dSAttilio Rao } 284774d251dSAttilio Rao #endif 285774d251dSAttilio Rao 286e946b949SAttilio Rao #ifndef UMA_MD_SMALL_ALLOC 287*ae941b1bSGleb Smirnoff void vm_radix_reserve_kva(void); 288774d251dSAttilio Rao /* 289e946b949SAttilio Rao * Reserve the KVA necessary to satisfy the node allocation. 290e946b949SAttilio Rao * This is mandatory in architectures not supporting direct 291e946b949SAttilio Rao * mapping as they will need otherwise to carve into the kernel maps for 292e946b949SAttilio Rao * every node allocation, resulting into deadlocks for consumers already 293e946b949SAttilio Rao * working with kernel maps. 294774d251dSAttilio Rao */ 295*ae941b1bSGleb Smirnoff void 296*ae941b1bSGleb Smirnoff vm_radix_reserve_kva(void) 297774d251dSAttilio Rao { 298774d251dSAttilio Rao 299880659feSAlan Cox /* 300880659feSAlan Cox * Calculate the number of reserved nodes, discounting the pages that 301880659feSAlan Cox * are needed to store them. 302880659feSAlan Cox */ 303e946b949SAttilio Rao if (!uma_zone_reserve_kva(vm_radix_node_zone, 30444f1c916SBryan Drewery ((vm_paddr_t)vm_cnt.v_page_count * PAGE_SIZE) / (PAGE_SIZE + 305e946b949SAttilio Rao sizeof(struct vm_radix_node)))) 306e946b949SAttilio Rao panic("%s: unable to reserve KVA", __func__); 307774d251dSAttilio Rao } 308e946b949SAttilio Rao #endif 309774d251dSAttilio Rao 310774d251dSAttilio Rao /* 311774d251dSAttilio Rao * Initialize the UMA slab zone. 312774d251dSAttilio Rao */ 313774d251dSAttilio Rao void 314cd1241fbSKonstantin Belousov vm_radix_zinit(void) 315774d251dSAttilio Rao { 316774d251dSAttilio Rao 317774d251dSAttilio Rao vm_radix_node_zone = uma_zcreate("RADIX NODE", 318774d251dSAttilio Rao sizeof(struct vm_radix_node), NULL, 319774d251dSAttilio Rao #ifdef INVARIANTS 320774d251dSAttilio Rao vm_radix_node_zone_dtor, 321774d251dSAttilio Rao #else 322774d251dSAttilio Rao NULL, 323774d251dSAttilio Rao #endif 324e946b949SAttilio Rao NULL, NULL, VM_RADIX_PAD, UMA_ZONE_VM); 325774d251dSAttilio Rao } 326774d251dSAttilio Rao 327774d251dSAttilio Rao /* 328774d251dSAttilio Rao * Inserts the key-value pair into the trie. 329774d251dSAttilio Rao * Panics if the key already exists. 330774d251dSAttilio Rao */ 331e946b949SAttilio Rao int 332774d251dSAttilio Rao vm_radix_insert(struct vm_radix *rtree, vm_page_t page) 333774d251dSAttilio Rao { 334774d251dSAttilio Rao vm_pindex_t index, newind; 335a08f2cf6SAlan Cox void **parentp; 336a08f2cf6SAlan Cox struct vm_radix_node *rnode, *tmp; 337774d251dSAttilio Rao vm_page_t m; 338774d251dSAttilio Rao int slot; 339774d251dSAttilio Rao uint16_t clev; 340774d251dSAttilio Rao 341774d251dSAttilio Rao index = page->pindex; 342774d251dSAttilio Rao 343774d251dSAttilio Rao /* 344774d251dSAttilio Rao * The owner of record for root is not really important because it 345774d251dSAttilio Rao * will never be used. 346774d251dSAttilio Rao */ 347774d251dSAttilio Rao rnode = vm_radix_getroot(rtree); 348774d251dSAttilio Rao if (rnode == NULL) { 349a08f2cf6SAlan Cox rtree->rt_root = (uintptr_t)page | VM_RADIX_ISLEAF; 350e946b949SAttilio Rao return (0); 351774d251dSAttilio Rao } 352a08f2cf6SAlan Cox parentp = (void **)&rtree->rt_root; 353a08f2cf6SAlan Cox for (;;) { 354a08f2cf6SAlan Cox if (vm_radix_isleaf(rnode)) { 355a08f2cf6SAlan Cox m = vm_radix_topage(rnode); 356774d251dSAttilio Rao if (m->pindex == index) 357774d251dSAttilio Rao panic("%s: key %jx is already present", 358774d251dSAttilio Rao __func__, (uintmax_t)index); 359774d251dSAttilio Rao clev = vm_radix_keydiff(m->pindex, index); 360774d251dSAttilio Rao tmp = vm_radix_node_get(vm_radix_trimkey(index, 3619f2e6008SAlan Cox clev + 1), 2, clev); 362563a19d5SAlan Cox if (tmp == NULL) 363e946b949SAttilio Rao return (ENOMEM); 364a08f2cf6SAlan Cox *parentp = tmp; 365774d251dSAttilio Rao vm_radix_addpage(tmp, index, clev, page); 366774d251dSAttilio Rao vm_radix_addpage(tmp, m->pindex, clev, m); 367e946b949SAttilio Rao return (0); 368a08f2cf6SAlan Cox } else if (vm_radix_keybarr(rnode, index)) 369a08f2cf6SAlan Cox break; 370a08f2cf6SAlan Cox slot = vm_radix_slot(index, rnode->rn_clev); 371774d251dSAttilio Rao if (rnode->rn_child[slot] == NULL) { 372774d251dSAttilio Rao rnode->rn_count++; 373774d251dSAttilio Rao vm_radix_addpage(rnode, index, rnode->rn_clev, page); 374e946b949SAttilio Rao return (0); 375774d251dSAttilio Rao } 376a08f2cf6SAlan Cox parentp = &rnode->rn_child[slot]; 377774d251dSAttilio Rao rnode = rnode->rn_child[slot]; 378a08f2cf6SAlan Cox } 379774d251dSAttilio Rao 380774d251dSAttilio Rao /* 381774d251dSAttilio Rao * A new node is needed because the right insertion level is reached. 382774d251dSAttilio Rao * Setup the new intermediate node and add the 2 children: the 383774d251dSAttilio Rao * new object and the older edge. 384774d251dSAttilio Rao */ 38572abda64SAlan Cox newind = rnode->rn_owner; 38672abda64SAlan Cox clev = vm_radix_keydiff(newind, index); 387e946b949SAttilio Rao tmp = vm_radix_node_get(vm_radix_trimkey(index, clev + 1), 2, clev); 388563a19d5SAlan Cox if (tmp == NULL) 389e946b949SAttilio Rao return (ENOMEM); 390a08f2cf6SAlan Cox *parentp = tmp; 39172abda64SAlan Cox vm_radix_addpage(tmp, index, clev, page); 392774d251dSAttilio Rao slot = vm_radix_slot(newind, clev); 39372abda64SAlan Cox tmp->rn_child[slot] = rnode; 394e946b949SAttilio Rao return (0); 395774d251dSAttilio Rao } 396774d251dSAttilio Rao 397774d251dSAttilio Rao /* 398776cad90SAlan Cox * Returns TRUE if the specified radix tree contains a single leaf and FALSE 399776cad90SAlan Cox * otherwise. 400776cad90SAlan Cox */ 401776cad90SAlan Cox boolean_t 402776cad90SAlan Cox vm_radix_is_singleton(struct vm_radix *rtree) 403776cad90SAlan Cox { 404776cad90SAlan Cox struct vm_radix_node *rnode; 405776cad90SAlan Cox 406776cad90SAlan Cox rnode = vm_radix_getroot(rtree); 407776cad90SAlan Cox if (rnode == NULL) 408776cad90SAlan Cox return (FALSE); 409776cad90SAlan Cox return (vm_radix_isleaf(rnode)); 410776cad90SAlan Cox } 411776cad90SAlan Cox 412776cad90SAlan Cox /* 413774d251dSAttilio Rao * Returns the value stored at the index. If the index is not present, 414774d251dSAttilio Rao * NULL is returned. 415774d251dSAttilio Rao */ 416774d251dSAttilio Rao vm_page_t 417774d251dSAttilio Rao vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index) 418774d251dSAttilio Rao { 419774d251dSAttilio Rao struct vm_radix_node *rnode; 420774d251dSAttilio Rao vm_page_t m; 421774d251dSAttilio Rao int slot; 422774d251dSAttilio Rao 423774d251dSAttilio Rao rnode = vm_radix_getroot(rtree); 424774d251dSAttilio Rao while (rnode != NULL) { 42596f1a842SAlan Cox if (vm_radix_isleaf(rnode)) { 42696f1a842SAlan Cox m = vm_radix_topage(rnode); 427774d251dSAttilio Rao if (m->pindex == index) 428774d251dSAttilio Rao return (m); 429774d251dSAttilio Rao else 4306f9c0b15SAlan Cox break; 4316f9c0b15SAlan Cox } else if (vm_radix_keybarr(rnode, index)) 4326f9c0b15SAlan Cox break; 4336f9c0b15SAlan Cox slot = vm_radix_slot(index, rnode->rn_clev); 4346f9c0b15SAlan Cox rnode = rnode->rn_child[slot]; 435774d251dSAttilio Rao } 436774d251dSAttilio Rao return (NULL); 437774d251dSAttilio Rao } 438774d251dSAttilio Rao 439774d251dSAttilio Rao /* 440774d251dSAttilio Rao * Look up the nearest entry at a position bigger than or equal to index. 441774d251dSAttilio Rao */ 442774d251dSAttilio Rao vm_page_t 443774d251dSAttilio Rao vm_radix_lookup_ge(struct vm_radix *rtree, vm_pindex_t index) 444774d251dSAttilio Rao { 4452d4b9a64SAlan Cox struct vm_radix_node *stack[VM_RADIX_LIMIT]; 446774d251dSAttilio Rao vm_pindex_t inc; 447774d251dSAttilio Rao vm_page_t m; 44896f1a842SAlan Cox struct vm_radix_node *child, *rnode; 449774d251dSAttilio Rao #ifdef INVARIANTS 450774d251dSAttilio Rao int loops = 0; 451774d251dSAttilio Rao #endif 4522d4b9a64SAlan Cox int slot, tos; 453774d251dSAttilio Rao 4546f9c0b15SAlan Cox rnode = vm_radix_getroot(rtree); 4556f9c0b15SAlan Cox if (rnode == NULL) 4566f9c0b15SAlan Cox return (NULL); 4576f9c0b15SAlan Cox else if (vm_radix_isleaf(rnode)) { 4586f9c0b15SAlan Cox m = vm_radix_topage(rnode); 4596f9c0b15SAlan Cox if (m->pindex >= index) 4606f9c0b15SAlan Cox return (m); 4616f9c0b15SAlan Cox else 4626f9c0b15SAlan Cox return (NULL); 4636f9c0b15SAlan Cox } 4642d4b9a64SAlan Cox tos = 0; 4656f9c0b15SAlan Cox for (;;) { 466774d251dSAttilio Rao /* 46740076ebcSAlan Cox * If the keys differ before the current bisection node, 46840076ebcSAlan Cox * then the search key might rollback to the earliest 46940076ebcSAlan Cox * available bisection node or to the smallest key 47040076ebcSAlan Cox * in the current node (if the owner is bigger than the 471774d251dSAttilio Rao * search key). 472774d251dSAttilio Rao */ 473774d251dSAttilio Rao if (vm_radix_keybarr(rnode, index)) { 474774d251dSAttilio Rao if (index > rnode->rn_owner) { 4752d4b9a64SAlan Cox ascend: 4762d4b9a64SAlan Cox KASSERT(++loops < 1000, 4772d4b9a64SAlan Cox ("vm_radix_lookup_ge: too many loops")); 4782d4b9a64SAlan Cox 4792d4b9a64SAlan Cox /* 4802d4b9a64SAlan Cox * Pop nodes from the stack until either the 4812d4b9a64SAlan Cox * stack is empty or a node that could have a 4822d4b9a64SAlan Cox * matching descendant is found. 4832d4b9a64SAlan Cox */ 4842d4b9a64SAlan Cox do { 4852d4b9a64SAlan Cox if (tos == 0) 4862d4b9a64SAlan Cox return (NULL); 4872d4b9a64SAlan Cox rnode = stack[--tos]; 4882d4b9a64SAlan Cox } while (vm_radix_slot(index, 4892d4b9a64SAlan Cox rnode->rn_clev) == (VM_RADIX_COUNT - 1)); 4902d4b9a64SAlan Cox 4912d4b9a64SAlan Cox /* 4922d4b9a64SAlan Cox * The following computation cannot overflow 4932d4b9a64SAlan Cox * because index's slot at the current level 4942d4b9a64SAlan Cox * is less than VM_RADIX_COUNT - 1. 4952d4b9a64SAlan Cox */ 4962d4b9a64SAlan Cox index = vm_radix_trimkey(index, 4972d4b9a64SAlan Cox rnode->rn_clev); 4982d4b9a64SAlan Cox index += VM_RADIX_UNITLEVEL(rnode->rn_clev); 49940076ebcSAlan Cox } else 50040076ebcSAlan Cox index = rnode->rn_owner; 5012d4b9a64SAlan Cox KASSERT(!vm_radix_keybarr(rnode, index), 5022d4b9a64SAlan Cox ("vm_radix_lookup_ge: keybarr failed")); 503774d251dSAttilio Rao } 504774d251dSAttilio Rao slot = vm_radix_slot(index, rnode->rn_clev); 50596f1a842SAlan Cox child = rnode->rn_child[slot]; 50696f1a842SAlan Cox if (vm_radix_isleaf(child)) { 50796f1a842SAlan Cox m = vm_radix_topage(child); 50896f1a842SAlan Cox if (m->pindex >= index) 509774d251dSAttilio Rao return (m); 51096f1a842SAlan Cox } else if (child != NULL) 51196f1a842SAlan Cox goto descend; 512774d251dSAttilio Rao 513774d251dSAttilio Rao /* 514774d251dSAttilio Rao * Look for an available edge or page within the current 515774d251dSAttilio Rao * bisection node. 516774d251dSAttilio Rao */ 517774d251dSAttilio Rao if (slot < (VM_RADIX_COUNT - 1)) { 518774d251dSAttilio Rao inc = VM_RADIX_UNITLEVEL(rnode->rn_clev); 519774d251dSAttilio Rao index = vm_radix_trimkey(index, rnode->rn_clev); 52096f1a842SAlan Cox do { 521774d251dSAttilio Rao index += inc; 522774d251dSAttilio Rao slot++; 52396f1a842SAlan Cox child = rnode->rn_child[slot]; 52496f1a842SAlan Cox if (vm_radix_isleaf(child)) { 52596f1a842SAlan Cox m = vm_radix_topage(child); 52696f1a842SAlan Cox if (m->pindex >= index) 527774d251dSAttilio Rao return (m); 52896f1a842SAlan Cox } else if (child != NULL) 52996f1a842SAlan Cox goto descend; 53096f1a842SAlan Cox } while (slot < (VM_RADIX_COUNT - 1)); 531774d251dSAttilio Rao } 53296f1a842SAlan Cox KASSERT(child == NULL || vm_radix_isleaf(child), 53396f1a842SAlan Cox ("vm_radix_lookup_ge: child is radix node")); 534774d251dSAttilio Rao 535774d251dSAttilio Rao /* 5362d4b9a64SAlan Cox * If a page or edge bigger than the search slot is not found 5372d4b9a64SAlan Cox * in the current node, ascend to the next higher-level node. 538774d251dSAttilio Rao */ 5392d4b9a64SAlan Cox goto ascend; 54096f1a842SAlan Cox descend: 5419f2e6008SAlan Cox KASSERT(rnode->rn_clev > 0, 5422d4b9a64SAlan Cox ("vm_radix_lookup_ge: pushing leaf's parent")); 5432d4b9a64SAlan Cox KASSERT(tos < VM_RADIX_LIMIT, 5442d4b9a64SAlan Cox ("vm_radix_lookup_ge: stack overflow")); 5452d4b9a64SAlan Cox stack[tos++] = rnode; 54696f1a842SAlan Cox rnode = child; 547774d251dSAttilio Rao } 548774d251dSAttilio Rao } 549774d251dSAttilio Rao 550774d251dSAttilio Rao /* 551774d251dSAttilio Rao * Look up the nearest entry at a position less than or equal to index. 552774d251dSAttilio Rao */ 553774d251dSAttilio Rao vm_page_t 554774d251dSAttilio Rao vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index) 555774d251dSAttilio Rao { 5562d4b9a64SAlan Cox struct vm_radix_node *stack[VM_RADIX_LIMIT]; 557774d251dSAttilio Rao vm_pindex_t inc; 558774d251dSAttilio Rao vm_page_t m; 55996f1a842SAlan Cox struct vm_radix_node *child, *rnode; 560774d251dSAttilio Rao #ifdef INVARIANTS 561774d251dSAttilio Rao int loops = 0; 562774d251dSAttilio Rao #endif 5632d4b9a64SAlan Cox int slot, tos; 564774d251dSAttilio Rao 5656f9c0b15SAlan Cox rnode = vm_radix_getroot(rtree); 5666f9c0b15SAlan Cox if (rnode == NULL) 5676f9c0b15SAlan Cox return (NULL); 5686f9c0b15SAlan Cox else if (vm_radix_isleaf(rnode)) { 5696f9c0b15SAlan Cox m = vm_radix_topage(rnode); 5706f9c0b15SAlan Cox if (m->pindex <= index) 5716f9c0b15SAlan Cox return (m); 5726f9c0b15SAlan Cox else 5736f9c0b15SAlan Cox return (NULL); 5746f9c0b15SAlan Cox } 5752d4b9a64SAlan Cox tos = 0; 5766f9c0b15SAlan Cox for (;;) { 577774d251dSAttilio Rao /* 57840076ebcSAlan Cox * If the keys differ before the current bisection node, 57940076ebcSAlan Cox * then the search key might rollback to the earliest 58040076ebcSAlan Cox * available bisection node or to the largest key 58140076ebcSAlan Cox * in the current node (if the owner is smaller than the 582774d251dSAttilio Rao * search key). 583774d251dSAttilio Rao */ 584774d251dSAttilio Rao if (vm_radix_keybarr(rnode, index)) { 585774d251dSAttilio Rao if (index > rnode->rn_owner) { 58640076ebcSAlan Cox index = rnode->rn_owner + VM_RADIX_COUNT * 5872d4b9a64SAlan Cox VM_RADIX_UNITLEVEL(rnode->rn_clev); 58840076ebcSAlan Cox } else { 5892d4b9a64SAlan Cox ascend: 5902d4b9a64SAlan Cox KASSERT(++loops < 1000, 5912d4b9a64SAlan Cox ("vm_radix_lookup_le: too many loops")); 5922d4b9a64SAlan Cox 5932d4b9a64SAlan Cox /* 5942d4b9a64SAlan Cox * Pop nodes from the stack until either the 5952d4b9a64SAlan Cox * stack is empty or a node that could have a 5962d4b9a64SAlan Cox * matching descendant is found. 5972d4b9a64SAlan Cox */ 5982d4b9a64SAlan Cox do { 5992d4b9a64SAlan Cox if (tos == 0) 6002d4b9a64SAlan Cox return (NULL); 6012d4b9a64SAlan Cox rnode = stack[--tos]; 6022d4b9a64SAlan Cox } while (vm_radix_slot(index, 6032d4b9a64SAlan Cox rnode->rn_clev) == 0); 6042d4b9a64SAlan Cox 6052d4b9a64SAlan Cox /* 6062d4b9a64SAlan Cox * The following computation cannot overflow 6072d4b9a64SAlan Cox * because index's slot at the current level 6082d4b9a64SAlan Cox * is greater than 0. 6092d4b9a64SAlan Cox */ 6102d4b9a64SAlan Cox index = vm_radix_trimkey(index, 6112d4b9a64SAlan Cox rnode->rn_clev); 612774d251dSAttilio Rao } 6132d4b9a64SAlan Cox index--; 6142d4b9a64SAlan Cox KASSERT(!vm_radix_keybarr(rnode, index), 6152d4b9a64SAlan Cox ("vm_radix_lookup_le: keybarr failed")); 61640076ebcSAlan Cox } 617774d251dSAttilio Rao slot = vm_radix_slot(index, rnode->rn_clev); 61896f1a842SAlan Cox child = rnode->rn_child[slot]; 61996f1a842SAlan Cox if (vm_radix_isleaf(child)) { 62096f1a842SAlan Cox m = vm_radix_topage(child); 62196f1a842SAlan Cox if (m->pindex <= index) 622774d251dSAttilio Rao return (m); 62396f1a842SAlan Cox } else if (child != NULL) 62496f1a842SAlan Cox goto descend; 625774d251dSAttilio Rao 626774d251dSAttilio Rao /* 627774d251dSAttilio Rao * Look for an available edge or page within the current 628774d251dSAttilio Rao * bisection node. 629774d251dSAttilio Rao */ 630774d251dSAttilio Rao if (slot > 0) { 631774d251dSAttilio Rao inc = VM_RADIX_UNITLEVEL(rnode->rn_clev); 632774d251dSAttilio Rao index |= inc - 1; 63396f1a842SAlan Cox do { 634774d251dSAttilio Rao index -= inc; 635774d251dSAttilio Rao slot--; 63696f1a842SAlan Cox child = rnode->rn_child[slot]; 63796f1a842SAlan Cox if (vm_radix_isleaf(child)) { 63896f1a842SAlan Cox m = vm_radix_topage(child); 63996f1a842SAlan Cox if (m->pindex <= index) 640774d251dSAttilio Rao return (m); 64196f1a842SAlan Cox } else if (child != NULL) 64296f1a842SAlan Cox goto descend; 64396f1a842SAlan Cox } while (slot > 0); 644774d251dSAttilio Rao } 64596f1a842SAlan Cox KASSERT(child == NULL || vm_radix_isleaf(child), 64696f1a842SAlan Cox ("vm_radix_lookup_le: child is radix node")); 647774d251dSAttilio Rao 648774d251dSAttilio Rao /* 6492d4b9a64SAlan Cox * If a page or edge smaller than the search slot is not found 6502d4b9a64SAlan Cox * in the current node, ascend to the next higher-level node. 651774d251dSAttilio Rao */ 6522d4b9a64SAlan Cox goto ascend; 65396f1a842SAlan Cox descend: 6549f2e6008SAlan Cox KASSERT(rnode->rn_clev > 0, 6552d4b9a64SAlan Cox ("vm_radix_lookup_le: pushing leaf's parent")); 6562d4b9a64SAlan Cox KASSERT(tos < VM_RADIX_LIMIT, 6572d4b9a64SAlan Cox ("vm_radix_lookup_le: stack overflow")); 6582d4b9a64SAlan Cox stack[tos++] = rnode; 65996f1a842SAlan Cox rnode = child; 660774d251dSAttilio Rao } 661774d251dSAttilio Rao } 662774d251dSAttilio Rao 663774d251dSAttilio Rao /* 664e94965d8SAlan Cox * Remove the specified index from the trie, and return the value stored at 665e94965d8SAlan Cox * that index. If the index is not present, return NULL. 666774d251dSAttilio Rao */ 667e94965d8SAlan Cox vm_page_t 668774d251dSAttilio Rao vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index) 669774d251dSAttilio Rao { 670774d251dSAttilio Rao struct vm_radix_node *rnode, *parent; 671774d251dSAttilio Rao vm_page_t m; 672774d251dSAttilio Rao int i, slot; 673774d251dSAttilio Rao 674774d251dSAttilio Rao rnode = vm_radix_getroot(rtree); 675a08f2cf6SAlan Cox if (vm_radix_isleaf(rnode)) { 676a08f2cf6SAlan Cox m = vm_radix_topage(rnode); 677a08f2cf6SAlan Cox if (m->pindex != index) 678e94965d8SAlan Cox return (NULL); 679a08f2cf6SAlan Cox vm_radix_setroot(rtree, NULL); 680e94965d8SAlan Cox return (m); 681a08f2cf6SAlan Cox } 682a08f2cf6SAlan Cox parent = NULL; 683774d251dSAttilio Rao for (;;) { 684774d251dSAttilio Rao if (rnode == NULL) 685e94965d8SAlan Cox return (NULL); 686774d251dSAttilio Rao slot = vm_radix_slot(index, rnode->rn_clev); 68796f1a842SAlan Cox if (vm_radix_isleaf(rnode->rn_child[slot])) { 68896f1a842SAlan Cox m = vm_radix_topage(rnode->rn_child[slot]); 68996f1a842SAlan Cox if (m->pindex != index) 690e94965d8SAlan Cox return (NULL); 691774d251dSAttilio Rao rnode->rn_child[slot] = NULL; 692774d251dSAttilio Rao rnode->rn_count--; 693774d251dSAttilio Rao if (rnode->rn_count > 1) 694e94965d8SAlan Cox return (m); 695774d251dSAttilio Rao for (i = 0; i < VM_RADIX_COUNT; i++) 696774d251dSAttilio Rao if (rnode->rn_child[i] != NULL) 697774d251dSAttilio Rao break; 698774d251dSAttilio Rao KASSERT(i != VM_RADIX_COUNT, 699774d251dSAttilio Rao ("%s: invalid node configuration", __func__)); 700a08f2cf6SAlan Cox if (parent == NULL) 701a08f2cf6SAlan Cox vm_radix_setroot(rtree, rnode->rn_child[i]); 702a08f2cf6SAlan Cox else { 703774d251dSAttilio Rao slot = vm_radix_slot(index, parent->rn_clev); 704774d251dSAttilio Rao KASSERT(parent->rn_child[slot] == rnode, 705774d251dSAttilio Rao ("%s: invalid child value", __func__)); 706774d251dSAttilio Rao parent->rn_child[slot] = rnode->rn_child[i]; 707a08f2cf6SAlan Cox } 708774d251dSAttilio Rao rnode->rn_count--; 709774d251dSAttilio Rao rnode->rn_child[i] = NULL; 710774d251dSAttilio Rao vm_radix_node_put(rnode); 711e94965d8SAlan Cox return (m); 712774d251dSAttilio Rao } 713774d251dSAttilio Rao parent = rnode; 714774d251dSAttilio Rao rnode = rnode->rn_child[slot]; 715774d251dSAttilio Rao } 716774d251dSAttilio Rao } 717774d251dSAttilio Rao 718774d251dSAttilio Rao /* 719774d251dSAttilio Rao * Remove and free all the nodes from the radix tree. 720774d251dSAttilio Rao * This function is recursive but there is a tight control on it as the 721774d251dSAttilio Rao * maximum depth of the tree is fixed. 722774d251dSAttilio Rao */ 723774d251dSAttilio Rao void 724774d251dSAttilio Rao vm_radix_reclaim_allnodes(struct vm_radix *rtree) 725774d251dSAttilio Rao { 726774d251dSAttilio Rao struct vm_radix_node *root; 727774d251dSAttilio Rao 728774d251dSAttilio Rao root = vm_radix_getroot(rtree); 729774d251dSAttilio Rao if (root == NULL) 730774d251dSAttilio Rao return; 731774d251dSAttilio Rao vm_radix_setroot(rtree, NULL); 732a08f2cf6SAlan Cox if (!vm_radix_isleaf(root)) 733652615dcSAlan Cox vm_radix_reclaim_allnodes_int(root); 734774d251dSAttilio Rao } 735774d251dSAttilio Rao 736e946b949SAttilio Rao /* 737703b304fSAlan Cox * Replace an existing page in the trie with another one. 738703b304fSAlan Cox * Panics if there is not an old page in the trie at the new page's index. 739e946b949SAttilio Rao */ 740e946b949SAttilio Rao vm_page_t 741703b304fSAlan Cox vm_radix_replace(struct vm_radix *rtree, vm_page_t newpage) 742e946b949SAttilio Rao { 743e946b949SAttilio Rao struct vm_radix_node *rnode; 744e946b949SAttilio Rao vm_page_t m; 745703b304fSAlan Cox vm_pindex_t index; 746e946b949SAttilio Rao int slot; 747e946b949SAttilio Rao 748703b304fSAlan Cox index = newpage->pindex; 749e946b949SAttilio Rao rnode = vm_radix_getroot(rtree); 750e946b949SAttilio Rao if (rnode == NULL) 751e946b949SAttilio Rao panic("%s: replacing page on an empty trie", __func__); 752e946b949SAttilio Rao if (vm_radix_isleaf(rnode)) { 753e946b949SAttilio Rao m = vm_radix_topage(rnode); 754e946b949SAttilio Rao if (m->pindex != index) 755e946b949SAttilio Rao panic("%s: original replacing root key not found", 756e946b949SAttilio Rao __func__); 757e946b949SAttilio Rao rtree->rt_root = (uintptr_t)newpage | VM_RADIX_ISLEAF; 758e946b949SAttilio Rao return (m); 759e946b949SAttilio Rao } 760e946b949SAttilio Rao for (;;) { 761e946b949SAttilio Rao slot = vm_radix_slot(index, rnode->rn_clev); 762e946b949SAttilio Rao if (vm_radix_isleaf(rnode->rn_child[slot])) { 763e946b949SAttilio Rao m = vm_radix_topage(rnode->rn_child[slot]); 764e946b949SAttilio Rao if (m->pindex == index) { 765e946b949SAttilio Rao rnode->rn_child[slot] = 766e946b949SAttilio Rao (void *)((uintptr_t)newpage | 767e946b949SAttilio Rao VM_RADIX_ISLEAF); 768e946b949SAttilio Rao return (m); 769e946b949SAttilio Rao } else 770e946b949SAttilio Rao break; 771e946b949SAttilio Rao } else if (rnode->rn_child[slot] == NULL || 772e946b949SAttilio Rao vm_radix_keybarr(rnode->rn_child[slot], index)) 773e946b949SAttilio Rao break; 774e946b949SAttilio Rao rnode = rnode->rn_child[slot]; 775e946b949SAttilio Rao } 776e946b949SAttilio Rao panic("%s: original replacing page not found", __func__); 777e946b949SAttilio Rao } 778e946b949SAttilio Rao 7798d6fbbb8SJeff Roberson void 7808d6fbbb8SJeff Roberson vm_radix_wait(void) 7818d6fbbb8SJeff Roberson { 7828d6fbbb8SJeff Roberson uma_zwait(vm_radix_node_zone); 7838d6fbbb8SJeff Roberson } 7848d6fbbb8SJeff Roberson 785774d251dSAttilio Rao #ifdef DDB 786774d251dSAttilio Rao /* 787774d251dSAttilio Rao * Show details about the given radix node. 788774d251dSAttilio Rao */ 789774d251dSAttilio Rao DB_SHOW_COMMAND(radixnode, db_show_radixnode) 790774d251dSAttilio Rao { 791774d251dSAttilio Rao struct vm_radix_node *rnode; 792774d251dSAttilio Rao int i; 793774d251dSAttilio Rao 794774d251dSAttilio Rao if (!have_addr) 795774d251dSAttilio Rao return; 796774d251dSAttilio Rao rnode = (struct vm_radix_node *)addr; 797774d251dSAttilio Rao db_printf("radixnode %p, owner %jx, children count %u, level %u:\n", 798774d251dSAttilio Rao (void *)rnode, (uintmax_t)rnode->rn_owner, rnode->rn_count, 799774d251dSAttilio Rao rnode->rn_clev); 800774d251dSAttilio Rao for (i = 0; i < VM_RADIX_COUNT; i++) 801774d251dSAttilio Rao if (rnode->rn_child[i] != NULL) 802774d251dSAttilio Rao db_printf("slot: %d, val: %p, page: %p, clev: %d\n", 803774d251dSAttilio Rao i, (void *)rnode->rn_child[i], 80496f1a842SAlan Cox vm_radix_isleaf(rnode->rn_child[i]) ? 80596f1a842SAlan Cox vm_radix_topage(rnode->rn_child[i]) : NULL, 806774d251dSAttilio Rao rnode->rn_clev); 807774d251dSAttilio Rao } 808774d251dSAttilio Rao #endif /* DDB */ 809