1774d251dSAttilio Rao /* 2774d251dSAttilio Rao * Copyright (c) 2013 EMC Corp. 3774d251dSAttilio Rao * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org> 4774d251dSAttilio Rao * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com> 5774d251dSAttilio Rao * All rights reserved. 6774d251dSAttilio Rao * 7774d251dSAttilio Rao * Redistribution and use in source and binary forms, with or without 8774d251dSAttilio Rao * modification, are permitted provided that the following conditions 9774d251dSAttilio Rao * are met: 10774d251dSAttilio Rao * 1. Redistributions of source code must retain the above copyright 11774d251dSAttilio Rao * notice, this list of conditions and the following disclaimer. 12774d251dSAttilio Rao * 2. Redistributions in binary form must reproduce the above copyright 13774d251dSAttilio Rao * notice, this list of conditions and the following disclaimer in the 14774d251dSAttilio Rao * documentation and/or other materials provided with the distribution. 15774d251dSAttilio Rao * 16774d251dSAttilio Rao * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17774d251dSAttilio Rao * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18774d251dSAttilio Rao * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19774d251dSAttilio Rao * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20774d251dSAttilio Rao * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21774d251dSAttilio Rao * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22774d251dSAttilio Rao * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23774d251dSAttilio Rao * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24774d251dSAttilio Rao * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25774d251dSAttilio Rao * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26774d251dSAttilio Rao * SUCH DAMAGE. 27774d251dSAttilio Rao * 28774d251dSAttilio Rao */ 29774d251dSAttilio Rao 30774d251dSAttilio Rao /* 31774d251dSAttilio Rao * Path-compressed radix trie implementation. 32774d251dSAttilio Rao * The following code is not generalized into a general purpose library 33774d251dSAttilio Rao * because there are way too many parameters embedded that should really 34774d251dSAttilio Rao * be decided by the library consumers. At the same time, consumers 35774d251dSAttilio Rao * of this code must achieve highest possible performance. 36774d251dSAttilio Rao * 37774d251dSAttilio Rao * The implementation takes into account the following rationale: 38774d251dSAttilio Rao * - Size of the nodes should be as small as possible but still big enough 39774d251dSAttilio Rao * to avoid a large maximum depth for the trie. This is a balance 40774d251dSAttilio Rao * between the necessity to not wire too much physical memory for the nodes 41774d251dSAttilio Rao * and the necessity to avoid too much cache pollution during the trie 42774d251dSAttilio Rao * operations. 43774d251dSAttilio Rao * - There is not a huge bias toward the number of lookup operations over 44774d251dSAttilio Rao * the number of insert and remove operations. This basically implies 45774d251dSAttilio Rao * that optimizations supposedly helping one operation but hurting the 46774d251dSAttilio Rao * other might be carefully evaluated. 47774d251dSAttilio Rao * - On average not many nodes are expected to be fully populated, hence 48774d251dSAttilio Rao * level compression may just complicate things. 49774d251dSAttilio Rao */ 50774d251dSAttilio Rao 51774d251dSAttilio Rao #include <sys/cdefs.h> 52774d251dSAttilio Rao __FBSDID("$FreeBSD$"); 53774d251dSAttilio Rao 54774d251dSAttilio Rao #include "opt_ddb.h" 55774d251dSAttilio Rao 56774d251dSAttilio Rao #include <sys/param.h> 57774d251dSAttilio Rao #include <sys/systm.h> 58774d251dSAttilio Rao #include <sys/kernel.h> 59774d251dSAttilio Rao #include <sys/vmmeter.h> 60774d251dSAttilio Rao 61774d251dSAttilio Rao #include <vm/uma.h> 62774d251dSAttilio Rao #include <vm/vm.h> 63774d251dSAttilio Rao #include <vm/vm_param.h> 64774d251dSAttilio Rao #include <vm/vm_page.h> 65774d251dSAttilio Rao #include <vm/vm_radix.h> 66774d251dSAttilio Rao 67774d251dSAttilio Rao #ifdef DDB 68774d251dSAttilio Rao #include <ddb/ddb.h> 69774d251dSAttilio Rao #endif 70774d251dSAttilio Rao 71774d251dSAttilio Rao /* 72774d251dSAttilio Rao * These widths should allow the pointers to a node's children to fit within 73774d251dSAttilio Rao * a single cache line. The extra levels from a narrow width should not be 74774d251dSAttilio Rao * a problem thanks to path compression. 75774d251dSAttilio Rao */ 76774d251dSAttilio Rao #ifdef __LP64__ 77774d251dSAttilio Rao #define VM_RADIX_WIDTH 4 78774d251dSAttilio Rao #else 79774d251dSAttilio Rao #define VM_RADIX_WIDTH 3 80774d251dSAttilio Rao #endif 81774d251dSAttilio Rao 82774d251dSAttilio Rao #define VM_RADIX_COUNT (1 << VM_RADIX_WIDTH) 83774d251dSAttilio Rao #define VM_RADIX_MASK (VM_RADIX_COUNT - 1) 84774d251dSAttilio Rao #define VM_RADIX_LIMIT \ 85b66bb393SPedro F. Giffuni (howmany(sizeof(vm_pindex_t) * NBBY, VM_RADIX_WIDTH) - 1) 86774d251dSAttilio Rao 87774d251dSAttilio Rao /* Flag bits stored in node pointers. */ 88774d251dSAttilio Rao #define VM_RADIX_ISLEAF 0x1 89774d251dSAttilio Rao #define VM_RADIX_FLAGS 0x1 90774d251dSAttilio Rao #define VM_RADIX_PAD VM_RADIX_FLAGS 91774d251dSAttilio Rao 92774d251dSAttilio Rao /* Returns one unit associated with specified level. */ 93774d251dSAttilio Rao #define VM_RADIX_UNITLEVEL(lev) \ 949f2e6008SAlan Cox ((vm_pindex_t)1 << ((lev) * VM_RADIX_WIDTH)) 95774d251dSAttilio Rao 96774d251dSAttilio Rao struct vm_radix_node { 97774d251dSAttilio Rao vm_pindex_t rn_owner; /* Owner of record. */ 98774d251dSAttilio Rao uint16_t rn_count; /* Valid children. */ 99774d251dSAttilio Rao uint16_t rn_clev; /* Current level. */ 1002c899fedSAlan Cox void *rn_child[VM_RADIX_COUNT]; /* Child nodes. */ 101774d251dSAttilio Rao }; 102774d251dSAttilio Rao 103774d251dSAttilio Rao static uma_zone_t vm_radix_node_zone; 104774d251dSAttilio Rao 105774d251dSAttilio Rao /* 106e946b949SAttilio Rao * Allocate a radix node. 107774d251dSAttilio Rao */ 108774d251dSAttilio Rao static __inline struct vm_radix_node * 109774d251dSAttilio Rao vm_radix_node_get(vm_pindex_t owner, uint16_t count, uint16_t clevel) 110774d251dSAttilio Rao { 111774d251dSAttilio Rao struct vm_radix_node *rnode; 112774d251dSAttilio Rao 113e946b949SAttilio Rao rnode = uma_zalloc(vm_radix_node_zone, M_NOWAIT | M_ZERO); 114774d251dSAttilio Rao if (rnode == NULL) 115e946b949SAttilio Rao return (NULL); 116774d251dSAttilio Rao rnode->rn_owner = owner; 117774d251dSAttilio Rao rnode->rn_count = count; 118774d251dSAttilio Rao rnode->rn_clev = clevel; 119774d251dSAttilio Rao return (rnode); 120774d251dSAttilio Rao } 121774d251dSAttilio Rao 122774d251dSAttilio Rao /* 123774d251dSAttilio Rao * Free radix node. 124774d251dSAttilio Rao */ 125774d251dSAttilio Rao static __inline void 126774d251dSAttilio Rao vm_radix_node_put(struct vm_radix_node *rnode) 127774d251dSAttilio Rao { 128774d251dSAttilio Rao 129774d251dSAttilio Rao uma_zfree(vm_radix_node_zone, rnode); 130774d251dSAttilio Rao } 131774d251dSAttilio Rao 132774d251dSAttilio Rao /* 133774d251dSAttilio Rao * Return the position in the array for a given level. 134774d251dSAttilio Rao */ 135774d251dSAttilio Rao static __inline int 136774d251dSAttilio Rao vm_radix_slot(vm_pindex_t index, uint16_t level) 137774d251dSAttilio Rao { 138774d251dSAttilio Rao 1399f2e6008SAlan Cox return ((index >> (level * VM_RADIX_WIDTH)) & VM_RADIX_MASK); 140774d251dSAttilio Rao } 141774d251dSAttilio Rao 142774d251dSAttilio Rao /* Trims the key after the specified level. */ 143774d251dSAttilio Rao static __inline vm_pindex_t 144774d251dSAttilio Rao vm_radix_trimkey(vm_pindex_t index, uint16_t level) 145774d251dSAttilio Rao { 146774d251dSAttilio Rao vm_pindex_t ret; 147774d251dSAttilio Rao 148774d251dSAttilio Rao ret = index; 1499f2e6008SAlan Cox if (level > 0) { 1509f2e6008SAlan Cox ret >>= level * VM_RADIX_WIDTH; 1519f2e6008SAlan Cox ret <<= level * VM_RADIX_WIDTH; 152774d251dSAttilio Rao } 153774d251dSAttilio Rao return (ret); 154774d251dSAttilio Rao } 155774d251dSAttilio Rao 156774d251dSAttilio Rao /* 157774d251dSAttilio Rao * Get the root node for a radix tree. 158774d251dSAttilio Rao */ 159774d251dSAttilio Rao static __inline struct vm_radix_node * 160774d251dSAttilio Rao vm_radix_getroot(struct vm_radix *rtree) 161774d251dSAttilio Rao { 162774d251dSAttilio Rao 1632c899fedSAlan Cox return ((struct vm_radix_node *)rtree->rt_root); 164774d251dSAttilio Rao } 165774d251dSAttilio Rao 166774d251dSAttilio Rao /* 167774d251dSAttilio Rao * Set the root node for a radix tree. 168774d251dSAttilio Rao */ 169774d251dSAttilio Rao static __inline void 170774d251dSAttilio Rao vm_radix_setroot(struct vm_radix *rtree, struct vm_radix_node *rnode) 171774d251dSAttilio Rao { 172774d251dSAttilio Rao 173774d251dSAttilio Rao rtree->rt_root = (uintptr_t)rnode; 174774d251dSAttilio Rao } 175774d251dSAttilio Rao 176774d251dSAttilio Rao /* 1773fc10b73SAlan Cox * Returns TRUE if the specified radix node is a leaf and FALSE otherwise. 1783fc10b73SAlan Cox */ 1793fc10b73SAlan Cox static __inline boolean_t 1803fc10b73SAlan Cox vm_radix_isleaf(struct vm_radix_node *rnode) 1813fc10b73SAlan Cox { 1823fc10b73SAlan Cox 1833fc10b73SAlan Cox return (((uintptr_t)rnode & VM_RADIX_ISLEAF) != 0); 1843fc10b73SAlan Cox } 1853fc10b73SAlan Cox 1863fc10b73SAlan Cox /* 18796f1a842SAlan Cox * Returns the associated page extracted from rnode. 188774d251dSAttilio Rao */ 189774d251dSAttilio Rao static __inline vm_page_t 19096f1a842SAlan Cox vm_radix_topage(struct vm_radix_node *rnode) 191774d251dSAttilio Rao { 192774d251dSAttilio Rao 19396f1a842SAlan Cox return ((vm_page_t)((uintptr_t)rnode & ~VM_RADIX_FLAGS)); 194774d251dSAttilio Rao } 195774d251dSAttilio Rao 196774d251dSAttilio Rao /* 197774d251dSAttilio Rao * Adds the page as a child of the provided node. 198774d251dSAttilio Rao */ 199774d251dSAttilio Rao static __inline void 200774d251dSAttilio Rao vm_radix_addpage(struct vm_radix_node *rnode, vm_pindex_t index, uint16_t clev, 201774d251dSAttilio Rao vm_page_t page) 202774d251dSAttilio Rao { 203774d251dSAttilio Rao int slot; 204774d251dSAttilio Rao 205774d251dSAttilio Rao slot = vm_radix_slot(index, clev); 206774d251dSAttilio Rao rnode->rn_child[slot] = (void *)((uintptr_t)page | VM_RADIX_ISLEAF); 207774d251dSAttilio Rao } 208774d251dSAttilio Rao 209774d251dSAttilio Rao /* 210774d251dSAttilio Rao * Returns the slot where two keys differ. 211774d251dSAttilio Rao * It cannot accept 2 equal keys. 212774d251dSAttilio Rao */ 213774d251dSAttilio Rao static __inline uint16_t 214774d251dSAttilio Rao vm_radix_keydiff(vm_pindex_t index1, vm_pindex_t index2) 215774d251dSAttilio Rao { 216774d251dSAttilio Rao uint16_t clev; 217774d251dSAttilio Rao 218774d251dSAttilio Rao KASSERT(index1 != index2, ("%s: passing the same key value %jx", 219774d251dSAttilio Rao __func__, (uintmax_t)index1)); 220774d251dSAttilio Rao 221774d251dSAttilio Rao index1 ^= index2; 2229f2e6008SAlan Cox for (clev = VM_RADIX_LIMIT;; clev--) 223bb0e1de4SAlan Cox if (vm_radix_slot(index1, clev) != 0) 224774d251dSAttilio Rao return (clev); 225774d251dSAttilio Rao } 226774d251dSAttilio Rao 227774d251dSAttilio Rao /* 228774d251dSAttilio Rao * Returns TRUE if it can be determined that key does not belong to the 229774d251dSAttilio Rao * specified rnode. Otherwise, returns FALSE. 230774d251dSAttilio Rao */ 231774d251dSAttilio Rao static __inline boolean_t 232774d251dSAttilio Rao vm_radix_keybarr(struct vm_radix_node *rnode, vm_pindex_t idx) 233774d251dSAttilio Rao { 234774d251dSAttilio Rao 2359f2e6008SAlan Cox if (rnode->rn_clev < VM_RADIX_LIMIT) { 2369f2e6008SAlan Cox idx = vm_radix_trimkey(idx, rnode->rn_clev + 1); 237c1c82b36SAlan Cox return (idx != rnode->rn_owner); 238774d251dSAttilio Rao } 239774d251dSAttilio Rao return (FALSE); 240774d251dSAttilio Rao } 241774d251dSAttilio Rao 242774d251dSAttilio Rao /* 243774d251dSAttilio Rao * Internal helper for vm_radix_reclaim_allnodes(). 244774d251dSAttilio Rao * This function is recursive. 245774d251dSAttilio Rao */ 246774d251dSAttilio Rao static void 247774d251dSAttilio Rao vm_radix_reclaim_allnodes_int(struct vm_radix_node *rnode) 248774d251dSAttilio Rao { 249774d251dSAttilio Rao int slot; 250774d251dSAttilio Rao 251652615dcSAlan Cox KASSERT(rnode->rn_count <= VM_RADIX_COUNT, 252652615dcSAlan Cox ("vm_radix_reclaim_allnodes_int: bad count in rnode %p", rnode)); 253652615dcSAlan Cox for (slot = 0; rnode->rn_count != 0; slot++) { 254774d251dSAttilio Rao if (rnode->rn_child[slot] == NULL) 255774d251dSAttilio Rao continue; 2563fc10b73SAlan Cox if (!vm_radix_isleaf(rnode->rn_child[slot])) 257774d251dSAttilio Rao vm_radix_reclaim_allnodes_int(rnode->rn_child[slot]); 258774d251dSAttilio Rao rnode->rn_child[slot] = NULL; 259774d251dSAttilio Rao rnode->rn_count--; 260774d251dSAttilio Rao } 261774d251dSAttilio Rao vm_radix_node_put(rnode); 262774d251dSAttilio Rao } 263774d251dSAttilio Rao 264774d251dSAttilio Rao #ifdef INVARIANTS 265774d251dSAttilio Rao /* 266774d251dSAttilio Rao * Radix node zone destructor. 267774d251dSAttilio Rao */ 268774d251dSAttilio Rao static void 269774d251dSAttilio Rao vm_radix_node_zone_dtor(void *mem, int size __unused, void *arg __unused) 270774d251dSAttilio Rao { 271774d251dSAttilio Rao struct vm_radix_node *rnode; 272774d251dSAttilio Rao int slot; 273774d251dSAttilio Rao 274774d251dSAttilio Rao rnode = mem; 275774d251dSAttilio Rao KASSERT(rnode->rn_count == 0, 276774d251dSAttilio Rao ("vm_radix_node_put: rnode %p has %d children", rnode, 277774d251dSAttilio Rao rnode->rn_count)); 278774d251dSAttilio Rao for (slot = 0; slot < VM_RADIX_COUNT; slot++) 279774d251dSAttilio Rao KASSERT(rnode->rn_child[slot] == NULL, 280774d251dSAttilio Rao ("vm_radix_node_put: rnode %p has a child", rnode)); 281774d251dSAttilio Rao } 282774d251dSAttilio Rao #endif 283774d251dSAttilio Rao 284e946b949SAttilio Rao #ifndef UMA_MD_SMALL_ALLOC 285774d251dSAttilio Rao /* 286e946b949SAttilio Rao * Reserve the KVA necessary to satisfy the node allocation. 287e946b949SAttilio Rao * This is mandatory in architectures not supporting direct 288e946b949SAttilio Rao * mapping as they will need otherwise to carve into the kernel maps for 289e946b949SAttilio Rao * every node allocation, resulting into deadlocks for consumers already 290e946b949SAttilio Rao * working with kernel maps. 291774d251dSAttilio Rao */ 292774d251dSAttilio Rao static void 293e946b949SAttilio Rao vm_radix_reserve_kva(void *arg __unused) 294774d251dSAttilio Rao { 295774d251dSAttilio Rao 296880659feSAlan Cox /* 297880659feSAlan Cox * Calculate the number of reserved nodes, discounting the pages that 298880659feSAlan Cox * are needed to store them. 299880659feSAlan Cox */ 300e946b949SAttilio Rao if (!uma_zone_reserve_kva(vm_radix_node_zone, 30144f1c916SBryan Drewery ((vm_paddr_t)vm_cnt.v_page_count * PAGE_SIZE) / (PAGE_SIZE + 302e946b949SAttilio Rao sizeof(struct vm_radix_node)))) 303e946b949SAttilio Rao panic("%s: unable to reserve KVA", __func__); 304774d251dSAttilio Rao } 305af3b2549SHans Petter Selasky SYSINIT(vm_radix_reserve_kva, SI_SUB_KMEM, SI_ORDER_THIRD, 306e946b949SAttilio Rao vm_radix_reserve_kva, NULL); 307e946b949SAttilio Rao #endif 308774d251dSAttilio Rao 309774d251dSAttilio Rao /* 310774d251dSAttilio Rao * Initialize the UMA slab zone. 311774d251dSAttilio Rao */ 312774d251dSAttilio Rao void 313cd1241fbSKonstantin Belousov vm_radix_zinit(void) 314774d251dSAttilio Rao { 315774d251dSAttilio Rao 316774d251dSAttilio Rao vm_radix_node_zone = uma_zcreate("RADIX NODE", 317774d251dSAttilio Rao sizeof(struct vm_radix_node), NULL, 318774d251dSAttilio Rao #ifdef INVARIANTS 319774d251dSAttilio Rao vm_radix_node_zone_dtor, 320774d251dSAttilio Rao #else 321774d251dSAttilio Rao NULL, 322774d251dSAttilio Rao #endif 323e946b949SAttilio Rao NULL, NULL, VM_RADIX_PAD, UMA_ZONE_VM); 324774d251dSAttilio Rao } 325774d251dSAttilio Rao 326774d251dSAttilio Rao /* 327774d251dSAttilio Rao * Inserts the key-value pair into the trie. 328774d251dSAttilio Rao * Panics if the key already exists. 329774d251dSAttilio Rao */ 330e946b949SAttilio Rao int 331774d251dSAttilio Rao vm_radix_insert(struct vm_radix *rtree, vm_page_t page) 332774d251dSAttilio Rao { 333774d251dSAttilio Rao vm_pindex_t index, newind; 334a08f2cf6SAlan Cox void **parentp; 335a08f2cf6SAlan Cox struct vm_radix_node *rnode, *tmp; 336774d251dSAttilio Rao vm_page_t m; 337774d251dSAttilio Rao int slot; 338774d251dSAttilio Rao uint16_t clev; 339774d251dSAttilio Rao 340774d251dSAttilio Rao index = page->pindex; 341774d251dSAttilio Rao 342774d251dSAttilio Rao /* 343774d251dSAttilio Rao * The owner of record for root is not really important because it 344774d251dSAttilio Rao * will never be used. 345774d251dSAttilio Rao */ 346774d251dSAttilio Rao rnode = vm_radix_getroot(rtree); 347774d251dSAttilio Rao if (rnode == NULL) { 348a08f2cf6SAlan Cox rtree->rt_root = (uintptr_t)page | VM_RADIX_ISLEAF; 349e946b949SAttilio Rao return (0); 350774d251dSAttilio Rao } 351a08f2cf6SAlan Cox parentp = (void **)&rtree->rt_root; 352a08f2cf6SAlan Cox for (;;) { 353a08f2cf6SAlan Cox if (vm_radix_isleaf(rnode)) { 354a08f2cf6SAlan Cox m = vm_radix_topage(rnode); 355774d251dSAttilio Rao if (m->pindex == index) 356774d251dSAttilio Rao panic("%s: key %jx is already present", 357774d251dSAttilio Rao __func__, (uintmax_t)index); 358774d251dSAttilio Rao clev = vm_radix_keydiff(m->pindex, index); 359774d251dSAttilio Rao tmp = vm_radix_node_get(vm_radix_trimkey(index, 3609f2e6008SAlan Cox clev + 1), 2, clev); 361563a19d5SAlan Cox if (tmp == NULL) 362e946b949SAttilio Rao return (ENOMEM); 363a08f2cf6SAlan Cox *parentp = tmp; 364774d251dSAttilio Rao vm_radix_addpage(tmp, index, clev, page); 365774d251dSAttilio Rao vm_radix_addpage(tmp, m->pindex, clev, m); 366e946b949SAttilio Rao return (0); 367a08f2cf6SAlan Cox } else if (vm_radix_keybarr(rnode, index)) 368a08f2cf6SAlan Cox break; 369a08f2cf6SAlan Cox slot = vm_radix_slot(index, rnode->rn_clev); 370774d251dSAttilio Rao if (rnode->rn_child[slot] == NULL) { 371774d251dSAttilio Rao rnode->rn_count++; 372774d251dSAttilio Rao vm_radix_addpage(rnode, index, rnode->rn_clev, page); 373e946b949SAttilio Rao return (0); 374774d251dSAttilio Rao } 375a08f2cf6SAlan Cox parentp = &rnode->rn_child[slot]; 376774d251dSAttilio Rao rnode = rnode->rn_child[slot]; 377a08f2cf6SAlan Cox } 378774d251dSAttilio Rao 379774d251dSAttilio Rao /* 380774d251dSAttilio Rao * A new node is needed because the right insertion level is reached. 381774d251dSAttilio Rao * Setup the new intermediate node and add the 2 children: the 382774d251dSAttilio Rao * new object and the older edge. 383774d251dSAttilio Rao */ 38472abda64SAlan Cox newind = rnode->rn_owner; 38572abda64SAlan Cox clev = vm_radix_keydiff(newind, index); 386e946b949SAttilio Rao tmp = vm_radix_node_get(vm_radix_trimkey(index, clev + 1), 2, clev); 387563a19d5SAlan Cox if (tmp == NULL) 388e946b949SAttilio Rao return (ENOMEM); 389a08f2cf6SAlan Cox *parentp = tmp; 39072abda64SAlan Cox vm_radix_addpage(tmp, index, clev, page); 391774d251dSAttilio Rao slot = vm_radix_slot(newind, clev); 39272abda64SAlan Cox tmp->rn_child[slot] = rnode; 393e946b949SAttilio Rao return (0); 394774d251dSAttilio Rao } 395774d251dSAttilio Rao 396774d251dSAttilio Rao /* 397776cad90SAlan Cox * Returns TRUE if the specified radix tree contains a single leaf and FALSE 398776cad90SAlan Cox * otherwise. 399776cad90SAlan Cox */ 400776cad90SAlan Cox boolean_t 401776cad90SAlan Cox vm_radix_is_singleton(struct vm_radix *rtree) 402776cad90SAlan Cox { 403776cad90SAlan Cox struct vm_radix_node *rnode; 404776cad90SAlan Cox 405776cad90SAlan Cox rnode = vm_radix_getroot(rtree); 406776cad90SAlan Cox if (rnode == NULL) 407776cad90SAlan Cox return (FALSE); 408776cad90SAlan Cox return (vm_radix_isleaf(rnode)); 409776cad90SAlan Cox } 410776cad90SAlan Cox 411776cad90SAlan Cox /* 412774d251dSAttilio Rao * Returns the value stored at the index. If the index is not present, 413774d251dSAttilio Rao * NULL is returned. 414774d251dSAttilio Rao */ 415774d251dSAttilio Rao vm_page_t 416774d251dSAttilio Rao vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index) 417774d251dSAttilio Rao { 418774d251dSAttilio Rao struct vm_radix_node *rnode; 419774d251dSAttilio Rao vm_page_t m; 420774d251dSAttilio Rao int slot; 421774d251dSAttilio Rao 422774d251dSAttilio Rao rnode = vm_radix_getroot(rtree); 423774d251dSAttilio Rao while (rnode != NULL) { 42496f1a842SAlan Cox if (vm_radix_isleaf(rnode)) { 42596f1a842SAlan Cox m = vm_radix_topage(rnode); 426774d251dSAttilio Rao if (m->pindex == index) 427774d251dSAttilio Rao return (m); 428774d251dSAttilio Rao else 4296f9c0b15SAlan Cox break; 4306f9c0b15SAlan Cox } else if (vm_radix_keybarr(rnode, index)) 4316f9c0b15SAlan Cox break; 4326f9c0b15SAlan Cox slot = vm_radix_slot(index, rnode->rn_clev); 4336f9c0b15SAlan Cox rnode = rnode->rn_child[slot]; 434774d251dSAttilio Rao } 435774d251dSAttilio Rao return (NULL); 436774d251dSAttilio Rao } 437774d251dSAttilio Rao 438774d251dSAttilio Rao /* 439774d251dSAttilio Rao * Look up the nearest entry at a position bigger than or equal to index. 440774d251dSAttilio Rao */ 441774d251dSAttilio Rao vm_page_t 442774d251dSAttilio Rao vm_radix_lookup_ge(struct vm_radix *rtree, vm_pindex_t index) 443774d251dSAttilio Rao { 4442d4b9a64SAlan Cox struct vm_radix_node *stack[VM_RADIX_LIMIT]; 445774d251dSAttilio Rao vm_pindex_t inc; 446774d251dSAttilio Rao vm_page_t m; 44796f1a842SAlan Cox struct vm_radix_node *child, *rnode; 448774d251dSAttilio Rao #ifdef INVARIANTS 449774d251dSAttilio Rao int loops = 0; 450774d251dSAttilio Rao #endif 4512d4b9a64SAlan Cox int slot, tos; 452774d251dSAttilio Rao 4536f9c0b15SAlan Cox rnode = vm_radix_getroot(rtree); 4546f9c0b15SAlan Cox if (rnode == NULL) 4556f9c0b15SAlan Cox return (NULL); 4566f9c0b15SAlan Cox else if (vm_radix_isleaf(rnode)) { 4576f9c0b15SAlan Cox m = vm_radix_topage(rnode); 4586f9c0b15SAlan Cox if (m->pindex >= index) 4596f9c0b15SAlan Cox return (m); 4606f9c0b15SAlan Cox else 4616f9c0b15SAlan Cox return (NULL); 4626f9c0b15SAlan Cox } 4632d4b9a64SAlan Cox tos = 0; 4646f9c0b15SAlan Cox for (;;) { 465774d251dSAttilio Rao /* 46640076ebcSAlan Cox * If the keys differ before the current bisection node, 46740076ebcSAlan Cox * then the search key might rollback to the earliest 46840076ebcSAlan Cox * available bisection node or to the smallest key 46940076ebcSAlan Cox * in the current node (if the owner is bigger than the 470774d251dSAttilio Rao * search key). 471774d251dSAttilio Rao */ 472774d251dSAttilio Rao if (vm_radix_keybarr(rnode, index)) { 473774d251dSAttilio Rao if (index > rnode->rn_owner) { 4742d4b9a64SAlan Cox ascend: 4752d4b9a64SAlan Cox KASSERT(++loops < 1000, 4762d4b9a64SAlan Cox ("vm_radix_lookup_ge: too many loops")); 4772d4b9a64SAlan Cox 4782d4b9a64SAlan Cox /* 4792d4b9a64SAlan Cox * Pop nodes from the stack until either the 4802d4b9a64SAlan Cox * stack is empty or a node that could have a 4812d4b9a64SAlan Cox * matching descendant is found. 4822d4b9a64SAlan Cox */ 4832d4b9a64SAlan Cox do { 4842d4b9a64SAlan Cox if (tos == 0) 4852d4b9a64SAlan Cox return (NULL); 4862d4b9a64SAlan Cox rnode = stack[--tos]; 4872d4b9a64SAlan Cox } while (vm_radix_slot(index, 4882d4b9a64SAlan Cox rnode->rn_clev) == (VM_RADIX_COUNT - 1)); 4892d4b9a64SAlan Cox 4902d4b9a64SAlan Cox /* 4912d4b9a64SAlan Cox * The following computation cannot overflow 4922d4b9a64SAlan Cox * because index's slot at the current level 4932d4b9a64SAlan Cox * is less than VM_RADIX_COUNT - 1. 4942d4b9a64SAlan Cox */ 4952d4b9a64SAlan Cox index = vm_radix_trimkey(index, 4962d4b9a64SAlan Cox rnode->rn_clev); 4972d4b9a64SAlan Cox index += VM_RADIX_UNITLEVEL(rnode->rn_clev); 49840076ebcSAlan Cox } else 49940076ebcSAlan Cox index = rnode->rn_owner; 5002d4b9a64SAlan Cox KASSERT(!vm_radix_keybarr(rnode, index), 5012d4b9a64SAlan Cox ("vm_radix_lookup_ge: keybarr failed")); 502774d251dSAttilio Rao } 503774d251dSAttilio Rao slot = vm_radix_slot(index, rnode->rn_clev); 50496f1a842SAlan Cox child = rnode->rn_child[slot]; 50596f1a842SAlan Cox if (vm_radix_isleaf(child)) { 50696f1a842SAlan Cox m = vm_radix_topage(child); 50796f1a842SAlan Cox if (m->pindex >= index) 508774d251dSAttilio Rao return (m); 50996f1a842SAlan Cox } else if (child != NULL) 51096f1a842SAlan Cox goto descend; 511774d251dSAttilio Rao 512774d251dSAttilio Rao /* 513774d251dSAttilio Rao * Look for an available edge or page within the current 514774d251dSAttilio Rao * bisection node. 515774d251dSAttilio Rao */ 516774d251dSAttilio Rao if (slot < (VM_RADIX_COUNT - 1)) { 517774d251dSAttilio Rao inc = VM_RADIX_UNITLEVEL(rnode->rn_clev); 518774d251dSAttilio Rao index = vm_radix_trimkey(index, rnode->rn_clev); 51996f1a842SAlan Cox do { 520774d251dSAttilio Rao index += inc; 521774d251dSAttilio Rao slot++; 52296f1a842SAlan Cox child = rnode->rn_child[slot]; 52396f1a842SAlan Cox if (vm_radix_isleaf(child)) { 52496f1a842SAlan Cox m = vm_radix_topage(child); 52596f1a842SAlan Cox if (m->pindex >= index) 526774d251dSAttilio Rao return (m); 52796f1a842SAlan Cox } else if (child != NULL) 52896f1a842SAlan Cox goto descend; 52996f1a842SAlan Cox } while (slot < (VM_RADIX_COUNT - 1)); 530774d251dSAttilio Rao } 53196f1a842SAlan Cox KASSERT(child == NULL || vm_radix_isleaf(child), 53296f1a842SAlan Cox ("vm_radix_lookup_ge: child is radix node")); 533774d251dSAttilio Rao 534774d251dSAttilio Rao /* 5352d4b9a64SAlan Cox * If a page or edge bigger than the search slot is not found 5362d4b9a64SAlan Cox * in the current node, ascend to the next higher-level node. 537774d251dSAttilio Rao */ 5382d4b9a64SAlan Cox goto ascend; 53996f1a842SAlan Cox descend: 5409f2e6008SAlan Cox KASSERT(rnode->rn_clev > 0, 5412d4b9a64SAlan Cox ("vm_radix_lookup_ge: pushing leaf's parent")); 5422d4b9a64SAlan Cox KASSERT(tos < VM_RADIX_LIMIT, 5432d4b9a64SAlan Cox ("vm_radix_lookup_ge: stack overflow")); 5442d4b9a64SAlan Cox stack[tos++] = rnode; 54596f1a842SAlan Cox rnode = child; 546774d251dSAttilio Rao } 547774d251dSAttilio Rao } 548774d251dSAttilio Rao 549774d251dSAttilio Rao /* 550774d251dSAttilio Rao * Look up the nearest entry at a position less than or equal to index. 551774d251dSAttilio Rao */ 552774d251dSAttilio Rao vm_page_t 553774d251dSAttilio Rao vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index) 554774d251dSAttilio Rao { 5552d4b9a64SAlan Cox struct vm_radix_node *stack[VM_RADIX_LIMIT]; 556774d251dSAttilio Rao vm_pindex_t inc; 557774d251dSAttilio Rao vm_page_t m; 55896f1a842SAlan Cox struct vm_radix_node *child, *rnode; 559774d251dSAttilio Rao #ifdef INVARIANTS 560774d251dSAttilio Rao int loops = 0; 561774d251dSAttilio Rao #endif 5622d4b9a64SAlan Cox int slot, tos; 563774d251dSAttilio Rao 5646f9c0b15SAlan Cox rnode = vm_radix_getroot(rtree); 5656f9c0b15SAlan Cox if (rnode == NULL) 5666f9c0b15SAlan Cox return (NULL); 5676f9c0b15SAlan Cox else if (vm_radix_isleaf(rnode)) { 5686f9c0b15SAlan Cox m = vm_radix_topage(rnode); 5696f9c0b15SAlan Cox if (m->pindex <= index) 5706f9c0b15SAlan Cox return (m); 5716f9c0b15SAlan Cox else 5726f9c0b15SAlan Cox return (NULL); 5736f9c0b15SAlan Cox } 5742d4b9a64SAlan Cox tos = 0; 5756f9c0b15SAlan Cox for (;;) { 576774d251dSAttilio Rao /* 57740076ebcSAlan Cox * If the keys differ before the current bisection node, 57840076ebcSAlan Cox * then the search key might rollback to the earliest 57940076ebcSAlan Cox * available bisection node or to the largest key 58040076ebcSAlan Cox * in the current node (if the owner is smaller than the 581774d251dSAttilio Rao * search key). 582774d251dSAttilio Rao */ 583774d251dSAttilio Rao if (vm_radix_keybarr(rnode, index)) { 584774d251dSAttilio Rao if (index > rnode->rn_owner) { 58540076ebcSAlan Cox index = rnode->rn_owner + VM_RADIX_COUNT * 5862d4b9a64SAlan Cox VM_RADIX_UNITLEVEL(rnode->rn_clev); 58740076ebcSAlan Cox } else { 5882d4b9a64SAlan Cox ascend: 5892d4b9a64SAlan Cox KASSERT(++loops < 1000, 5902d4b9a64SAlan Cox ("vm_radix_lookup_le: too many loops")); 5912d4b9a64SAlan Cox 5922d4b9a64SAlan Cox /* 5932d4b9a64SAlan Cox * Pop nodes from the stack until either the 5942d4b9a64SAlan Cox * stack is empty or a node that could have a 5952d4b9a64SAlan Cox * matching descendant is found. 5962d4b9a64SAlan Cox */ 5972d4b9a64SAlan Cox do { 5982d4b9a64SAlan Cox if (tos == 0) 5992d4b9a64SAlan Cox return (NULL); 6002d4b9a64SAlan Cox rnode = stack[--tos]; 6012d4b9a64SAlan Cox } while (vm_radix_slot(index, 6022d4b9a64SAlan Cox rnode->rn_clev) == 0); 6032d4b9a64SAlan Cox 6042d4b9a64SAlan Cox /* 6052d4b9a64SAlan Cox * The following computation cannot overflow 6062d4b9a64SAlan Cox * because index's slot at the current level 6072d4b9a64SAlan Cox * is greater than 0. 6082d4b9a64SAlan Cox */ 6092d4b9a64SAlan Cox index = vm_radix_trimkey(index, 6102d4b9a64SAlan Cox rnode->rn_clev); 611774d251dSAttilio Rao } 6122d4b9a64SAlan Cox index--; 6132d4b9a64SAlan Cox KASSERT(!vm_radix_keybarr(rnode, index), 6142d4b9a64SAlan Cox ("vm_radix_lookup_le: keybarr failed")); 61540076ebcSAlan Cox } 616774d251dSAttilio Rao slot = vm_radix_slot(index, rnode->rn_clev); 61796f1a842SAlan Cox child = rnode->rn_child[slot]; 61896f1a842SAlan Cox if (vm_radix_isleaf(child)) { 61996f1a842SAlan Cox m = vm_radix_topage(child); 62096f1a842SAlan Cox if (m->pindex <= index) 621774d251dSAttilio Rao return (m); 62296f1a842SAlan Cox } else if (child != NULL) 62396f1a842SAlan Cox goto descend; 624774d251dSAttilio Rao 625774d251dSAttilio Rao /* 626774d251dSAttilio Rao * Look for an available edge or page within the current 627774d251dSAttilio Rao * bisection node. 628774d251dSAttilio Rao */ 629774d251dSAttilio Rao if (slot > 0) { 630774d251dSAttilio Rao inc = VM_RADIX_UNITLEVEL(rnode->rn_clev); 631774d251dSAttilio Rao index |= inc - 1; 63296f1a842SAlan Cox do { 633774d251dSAttilio Rao index -= inc; 634774d251dSAttilio Rao slot--; 63596f1a842SAlan Cox child = rnode->rn_child[slot]; 63696f1a842SAlan Cox if (vm_radix_isleaf(child)) { 63796f1a842SAlan Cox m = vm_radix_topage(child); 63896f1a842SAlan Cox if (m->pindex <= index) 639774d251dSAttilio Rao return (m); 64096f1a842SAlan Cox } else if (child != NULL) 64196f1a842SAlan Cox goto descend; 64296f1a842SAlan Cox } while (slot > 0); 643774d251dSAttilio Rao } 64496f1a842SAlan Cox KASSERT(child == NULL || vm_radix_isleaf(child), 64596f1a842SAlan Cox ("vm_radix_lookup_le: child is radix node")); 646774d251dSAttilio Rao 647774d251dSAttilio Rao /* 6482d4b9a64SAlan Cox * If a page or edge smaller than the search slot is not found 6492d4b9a64SAlan Cox * in the current node, ascend to the next higher-level node. 650774d251dSAttilio Rao */ 6512d4b9a64SAlan Cox goto ascend; 65296f1a842SAlan Cox descend: 6539f2e6008SAlan Cox KASSERT(rnode->rn_clev > 0, 6542d4b9a64SAlan Cox ("vm_radix_lookup_le: pushing leaf's parent")); 6552d4b9a64SAlan Cox KASSERT(tos < VM_RADIX_LIMIT, 6562d4b9a64SAlan Cox ("vm_radix_lookup_le: stack overflow")); 6572d4b9a64SAlan Cox stack[tos++] = rnode; 65896f1a842SAlan Cox rnode = child; 659774d251dSAttilio Rao } 660774d251dSAttilio Rao } 661774d251dSAttilio Rao 662774d251dSAttilio Rao /* 663e94965d8SAlan Cox * Remove the specified index from the trie, and return the value stored at 664e94965d8SAlan Cox * that index. If the index is not present, return NULL. 665774d251dSAttilio Rao */ 666e94965d8SAlan Cox vm_page_t 667774d251dSAttilio Rao vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index) 668774d251dSAttilio Rao { 669774d251dSAttilio Rao struct vm_radix_node *rnode, *parent; 670774d251dSAttilio Rao vm_page_t m; 671774d251dSAttilio Rao int i, slot; 672774d251dSAttilio Rao 673774d251dSAttilio Rao rnode = vm_radix_getroot(rtree); 674a08f2cf6SAlan Cox if (vm_radix_isleaf(rnode)) { 675a08f2cf6SAlan Cox m = vm_radix_topage(rnode); 676a08f2cf6SAlan Cox if (m->pindex != index) 677e94965d8SAlan Cox return (NULL); 678a08f2cf6SAlan Cox vm_radix_setroot(rtree, NULL); 679e94965d8SAlan Cox return (m); 680a08f2cf6SAlan Cox } 681a08f2cf6SAlan Cox parent = NULL; 682774d251dSAttilio Rao for (;;) { 683774d251dSAttilio Rao if (rnode == NULL) 684e94965d8SAlan Cox return (NULL); 685774d251dSAttilio Rao slot = vm_radix_slot(index, rnode->rn_clev); 68696f1a842SAlan Cox if (vm_radix_isleaf(rnode->rn_child[slot])) { 68796f1a842SAlan Cox m = vm_radix_topage(rnode->rn_child[slot]); 68896f1a842SAlan Cox if (m->pindex != index) 689e94965d8SAlan Cox return (NULL); 690774d251dSAttilio Rao rnode->rn_child[slot] = NULL; 691774d251dSAttilio Rao rnode->rn_count--; 692774d251dSAttilio Rao if (rnode->rn_count > 1) 693e94965d8SAlan Cox return (m); 694774d251dSAttilio Rao for (i = 0; i < VM_RADIX_COUNT; i++) 695774d251dSAttilio Rao if (rnode->rn_child[i] != NULL) 696774d251dSAttilio Rao break; 697774d251dSAttilio Rao KASSERT(i != VM_RADIX_COUNT, 698774d251dSAttilio Rao ("%s: invalid node configuration", __func__)); 699a08f2cf6SAlan Cox if (parent == NULL) 700a08f2cf6SAlan Cox vm_radix_setroot(rtree, rnode->rn_child[i]); 701a08f2cf6SAlan Cox else { 702774d251dSAttilio Rao slot = vm_radix_slot(index, parent->rn_clev); 703774d251dSAttilio Rao KASSERT(parent->rn_child[slot] == rnode, 704774d251dSAttilio Rao ("%s: invalid child value", __func__)); 705774d251dSAttilio Rao parent->rn_child[slot] = rnode->rn_child[i]; 706a08f2cf6SAlan Cox } 707774d251dSAttilio Rao rnode->rn_count--; 708774d251dSAttilio Rao rnode->rn_child[i] = NULL; 709774d251dSAttilio Rao vm_radix_node_put(rnode); 710e94965d8SAlan Cox return (m); 711774d251dSAttilio Rao } 712774d251dSAttilio Rao parent = rnode; 713774d251dSAttilio Rao rnode = rnode->rn_child[slot]; 714774d251dSAttilio Rao } 715774d251dSAttilio Rao } 716774d251dSAttilio Rao 717774d251dSAttilio Rao /* 718774d251dSAttilio Rao * Remove and free all the nodes from the radix tree. 719774d251dSAttilio Rao * This function is recursive but there is a tight control on it as the 720774d251dSAttilio Rao * maximum depth of the tree is fixed. 721774d251dSAttilio Rao */ 722774d251dSAttilio Rao void 723774d251dSAttilio Rao vm_radix_reclaim_allnodes(struct vm_radix *rtree) 724774d251dSAttilio Rao { 725774d251dSAttilio Rao struct vm_radix_node *root; 726774d251dSAttilio Rao 727774d251dSAttilio Rao root = vm_radix_getroot(rtree); 728774d251dSAttilio Rao if (root == NULL) 729774d251dSAttilio Rao return; 730774d251dSAttilio Rao vm_radix_setroot(rtree, NULL); 731a08f2cf6SAlan Cox if (!vm_radix_isleaf(root)) 732652615dcSAlan Cox vm_radix_reclaim_allnodes_int(root); 733774d251dSAttilio Rao } 734774d251dSAttilio Rao 735e946b949SAttilio Rao /* 736703b304fSAlan Cox * Replace an existing page in the trie with another one. 737703b304fSAlan Cox * Panics if there is not an old page in the trie at the new page's index. 738e946b949SAttilio Rao */ 739e946b949SAttilio Rao vm_page_t 740703b304fSAlan Cox vm_radix_replace(struct vm_radix *rtree, vm_page_t newpage) 741e946b949SAttilio Rao { 742e946b949SAttilio Rao struct vm_radix_node *rnode; 743e946b949SAttilio Rao vm_page_t m; 744703b304fSAlan Cox vm_pindex_t index; 745e946b949SAttilio Rao int slot; 746e946b949SAttilio Rao 747703b304fSAlan Cox index = newpage->pindex; 748e946b949SAttilio Rao rnode = vm_radix_getroot(rtree); 749e946b949SAttilio Rao if (rnode == NULL) 750e946b949SAttilio Rao panic("%s: replacing page on an empty trie", __func__); 751e946b949SAttilio Rao if (vm_radix_isleaf(rnode)) { 752e946b949SAttilio Rao m = vm_radix_topage(rnode); 753e946b949SAttilio Rao if (m->pindex != index) 754e946b949SAttilio Rao panic("%s: original replacing root key not found", 755e946b949SAttilio Rao __func__); 756e946b949SAttilio Rao rtree->rt_root = (uintptr_t)newpage | VM_RADIX_ISLEAF; 757e946b949SAttilio Rao return (m); 758e946b949SAttilio Rao } 759e946b949SAttilio Rao for (;;) { 760e946b949SAttilio Rao slot = vm_radix_slot(index, rnode->rn_clev); 761e946b949SAttilio Rao if (vm_radix_isleaf(rnode->rn_child[slot])) { 762e946b949SAttilio Rao m = vm_radix_topage(rnode->rn_child[slot]); 763e946b949SAttilio Rao if (m->pindex == index) { 764e946b949SAttilio Rao rnode->rn_child[slot] = 765e946b949SAttilio Rao (void *)((uintptr_t)newpage | 766e946b949SAttilio Rao VM_RADIX_ISLEAF); 767e946b949SAttilio Rao return (m); 768e946b949SAttilio Rao } else 769e946b949SAttilio Rao break; 770e946b949SAttilio Rao } else if (rnode->rn_child[slot] == NULL || 771e946b949SAttilio Rao vm_radix_keybarr(rnode->rn_child[slot], index)) 772e946b949SAttilio Rao break; 773e946b949SAttilio Rao rnode = rnode->rn_child[slot]; 774e946b949SAttilio Rao } 775e946b949SAttilio Rao panic("%s: original replacing page not found", __func__); 776e946b949SAttilio Rao } 777e946b949SAttilio Rao 778*8d6fbbb8SJeff Roberson void 779*8d6fbbb8SJeff Roberson vm_radix_wait(void) 780*8d6fbbb8SJeff Roberson { 781*8d6fbbb8SJeff Roberson uma_zwait(vm_radix_node_zone); 782*8d6fbbb8SJeff Roberson } 783*8d6fbbb8SJeff Roberson 784774d251dSAttilio Rao #ifdef DDB 785774d251dSAttilio Rao /* 786774d251dSAttilio Rao * Show details about the given radix node. 787774d251dSAttilio Rao */ 788774d251dSAttilio Rao DB_SHOW_COMMAND(radixnode, db_show_radixnode) 789774d251dSAttilio Rao { 790774d251dSAttilio Rao struct vm_radix_node *rnode; 791774d251dSAttilio Rao int i; 792774d251dSAttilio Rao 793774d251dSAttilio Rao if (!have_addr) 794774d251dSAttilio Rao return; 795774d251dSAttilio Rao rnode = (struct vm_radix_node *)addr; 796774d251dSAttilio Rao db_printf("radixnode %p, owner %jx, children count %u, level %u:\n", 797774d251dSAttilio Rao (void *)rnode, (uintmax_t)rnode->rn_owner, rnode->rn_count, 798774d251dSAttilio Rao rnode->rn_clev); 799774d251dSAttilio Rao for (i = 0; i < VM_RADIX_COUNT; i++) 800774d251dSAttilio Rao if (rnode->rn_child[i] != NULL) 801774d251dSAttilio Rao db_printf("slot: %d, val: %p, page: %p, clev: %d\n", 802774d251dSAttilio Rao i, (void *)rnode->rn_child[i], 80396f1a842SAlan Cox vm_radix_isleaf(rnode->rn_child[i]) ? 80496f1a842SAlan Cox vm_radix_topage(rnode->rn_child[i]) : NULL, 805774d251dSAttilio Rao rnode->rn_clev); 806774d251dSAttilio Rao } 807774d251dSAttilio Rao #endif /* DDB */ 808