1f8a47341SAlan Cox /*- 2fe267a55SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3fe267a55SPedro F. Giffuni * 4f8a47341SAlan Cox * Copyright (c) 2002-2006 Rice University 5ec179322SAlan Cox * Copyright (c) 2007-2011 Alan L. Cox <alc@cs.rice.edu> 6f8a47341SAlan Cox * All rights reserved. 7f8a47341SAlan Cox * 8f8a47341SAlan Cox * This software was developed for the FreeBSD Project by Alan L. Cox, 9f8a47341SAlan Cox * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro. 10f8a47341SAlan Cox * 11f8a47341SAlan Cox * Redistribution and use in source and binary forms, with or without 12f8a47341SAlan Cox * modification, are permitted provided that the following conditions 13f8a47341SAlan Cox * are met: 14f8a47341SAlan Cox * 1. Redistributions of source code must retain the above copyright 15f8a47341SAlan Cox * notice, this list of conditions and the following disclaimer. 16f8a47341SAlan Cox * 2. Redistributions in binary form must reproduce the above copyright 17f8a47341SAlan Cox * notice, this list of conditions and the following disclaimer in the 18f8a47341SAlan Cox * documentation and/or other materials provided with the distribution. 19f8a47341SAlan Cox * 20f8a47341SAlan Cox * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21f8a47341SAlan Cox * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22f8a47341SAlan Cox * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23f8a47341SAlan Cox * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24f8a47341SAlan Cox * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25f8a47341SAlan Cox * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26f8a47341SAlan Cox * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 27f8a47341SAlan Cox * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28f8a47341SAlan Cox * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29f8a47341SAlan Cox * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY 30f8a47341SAlan Cox * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31f8a47341SAlan Cox * POSSIBILITY OF SUCH DAMAGE. 32f8a47341SAlan Cox */ 33f8a47341SAlan Cox 34f8a47341SAlan Cox /* 35f8a47341SAlan Cox * Superpage reservation management module 36c68c3537SAlan Cox * 37c68c3537SAlan Cox * Any external functions defined by this module are only to be used by the 38c68c3537SAlan Cox * virtual memory system. 39f8a47341SAlan Cox */ 40f8a47341SAlan Cox 41f8a47341SAlan Cox #include <sys/cdefs.h> 42f8a47341SAlan Cox __FBSDID("$FreeBSD$"); 43f8a47341SAlan Cox 44f8a47341SAlan Cox #include "opt_vm.h" 45f8a47341SAlan Cox 46f8a47341SAlan Cox #include <sys/param.h> 47f8a47341SAlan Cox #include <sys/kernel.h> 48f8a47341SAlan Cox #include <sys/lock.h> 49f8a47341SAlan Cox #include <sys/malloc.h> 50f8a47341SAlan Cox #include <sys/mutex.h> 51f8a47341SAlan Cox #include <sys/queue.h> 5289f6b863SAttilio Rao #include <sys/rwlock.h> 53f8a47341SAlan Cox #include <sys/sbuf.h> 54f8a47341SAlan Cox #include <sys/sysctl.h> 55f8a47341SAlan Cox #include <sys/systm.h> 5672346b22SCy Schubert #include <sys/counter.h> 5772346b22SCy Schubert #include <sys/ktr.h> 589ed01c32SGleb Smirnoff #include <sys/vmmeter.h> 595c930c89SJeff Roberson #include <sys/smp.h> 60f8a47341SAlan Cox 61f8a47341SAlan Cox #include <vm/vm.h> 62f8a47341SAlan Cox #include <vm/vm_param.h> 63f8a47341SAlan Cox #include <vm/vm_object.h> 64f8a47341SAlan Cox #include <vm/vm_page.h> 65e2068d0bSJeff Roberson #include <vm/vm_pageout.h> 66f8a47341SAlan Cox #include <vm/vm_phys.h> 67e2068d0bSJeff Roberson #include <vm/vm_pagequeue.h> 68774d251dSAttilio Rao #include <vm/vm_radix.h> 69f8a47341SAlan Cox #include <vm/vm_reserv.h> 70f8a47341SAlan Cox 71f8a47341SAlan Cox /* 72f8a47341SAlan Cox * The reservation system supports the speculative allocation of large physical 733453bca8SAlan Cox * pages ("superpages"). Speculative allocation enables the fully automatic 74f8a47341SAlan Cox * utilization of superpages by the virtual memory system. In other words, no 75f8a47341SAlan Cox * programmatic directives are required to use superpages. 76f8a47341SAlan Cox */ 77f8a47341SAlan Cox 78f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 79f8a47341SAlan Cox 80f2a496d6SKonstantin Belousov #ifndef VM_LEVEL_0_ORDER_MAX 81f2a496d6SKonstantin Belousov #define VM_LEVEL_0_ORDER_MAX VM_LEVEL_0_ORDER 82f2a496d6SKonstantin Belousov #endif 83f2a496d6SKonstantin Belousov 84f8a47341SAlan Cox /* 85f8a47341SAlan Cox * The number of small pages that are contained in a level 0 reservation 86f8a47341SAlan Cox */ 87f8a47341SAlan Cox #define VM_LEVEL_0_NPAGES (1 << VM_LEVEL_0_ORDER) 88f2a496d6SKonstantin Belousov #define VM_LEVEL_0_NPAGES_MAX (1 << VM_LEVEL_0_ORDER_MAX) 89f8a47341SAlan Cox 90f8a47341SAlan Cox /* 91f8a47341SAlan Cox * The number of bits by which a physical address is shifted to obtain the 92f8a47341SAlan Cox * reservation number 93f8a47341SAlan Cox */ 94f8a47341SAlan Cox #define VM_LEVEL_0_SHIFT (VM_LEVEL_0_ORDER + PAGE_SHIFT) 95f8a47341SAlan Cox 96f8a47341SAlan Cox /* 97f8a47341SAlan Cox * The size of a level 0 reservation in bytes 98f8a47341SAlan Cox */ 99f8a47341SAlan Cox #define VM_LEVEL_0_SIZE (1 << VM_LEVEL_0_SHIFT) 100f8a47341SAlan Cox 101f8a47341SAlan Cox /* 102f8a47341SAlan Cox * Computes the index of the small page underlying the given (object, pindex) 103f8a47341SAlan Cox * within the reservation's array of small pages. 104f8a47341SAlan Cox */ 105f8a47341SAlan Cox #define VM_RESERV_INDEX(object, pindex) \ 106f8a47341SAlan Cox (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1)) 107f8a47341SAlan Cox 108f8a47341SAlan Cox /* 109ec179322SAlan Cox * The size of a population map entry 110ec179322SAlan Cox */ 111ec179322SAlan Cox typedef u_long popmap_t; 112ec179322SAlan Cox 113ec179322SAlan Cox /* 114ec179322SAlan Cox * The number of bits in a population map entry 115ec179322SAlan Cox */ 116ec179322SAlan Cox #define NBPOPMAP (NBBY * sizeof(popmap_t)) 117ec179322SAlan Cox 118ec179322SAlan Cox /* 119ec179322SAlan Cox * The number of population map entries in a reservation 120ec179322SAlan Cox */ 121ec179322SAlan Cox #define NPOPMAP howmany(VM_LEVEL_0_NPAGES, NBPOPMAP) 122f2a496d6SKonstantin Belousov #define NPOPMAP_MAX howmany(VM_LEVEL_0_NPAGES_MAX, NBPOPMAP) 123ec179322SAlan Cox 124ec179322SAlan Cox /* 1252ef6727eSJeff Roberson * Number of elapsed ticks before we update the LRU queue position. Used 1262ef6727eSJeff Roberson * to reduce contention and churn on the list. 1272ef6727eSJeff Roberson */ 1282ef6727eSJeff Roberson #define PARTPOPSLOP 1 1292ef6727eSJeff Roberson 1302ef6727eSJeff Roberson /* 1313180f757SAlan Cox * Clear a bit in the population map. 1323180f757SAlan Cox */ 1333180f757SAlan Cox static __inline void 1343180f757SAlan Cox popmap_clear(popmap_t popmap[], int i) 1353180f757SAlan Cox { 1363180f757SAlan Cox 1373180f757SAlan Cox popmap[i / NBPOPMAP] &= ~(1UL << (i % NBPOPMAP)); 1383180f757SAlan Cox } 1393180f757SAlan Cox 1403180f757SAlan Cox /* 1413180f757SAlan Cox * Set a bit in the population map. 1423180f757SAlan Cox */ 1433180f757SAlan Cox static __inline void 1443180f757SAlan Cox popmap_set(popmap_t popmap[], int i) 1453180f757SAlan Cox { 1463180f757SAlan Cox 1473180f757SAlan Cox popmap[i / NBPOPMAP] |= 1UL << (i % NBPOPMAP); 1483180f757SAlan Cox } 1493180f757SAlan Cox 1503180f757SAlan Cox /* 1513180f757SAlan Cox * Is a bit in the population map clear? 1523180f757SAlan Cox */ 1533180f757SAlan Cox static __inline boolean_t 1543180f757SAlan Cox popmap_is_clear(popmap_t popmap[], int i) 1553180f757SAlan Cox { 1563180f757SAlan Cox 1573180f757SAlan Cox return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) == 0); 1583180f757SAlan Cox } 1593180f757SAlan Cox 1603180f757SAlan Cox /* 1613180f757SAlan Cox * Is a bit in the population map set? 1623180f757SAlan Cox */ 1633180f757SAlan Cox static __inline boolean_t 1643180f757SAlan Cox popmap_is_set(popmap_t popmap[], int i) 1653180f757SAlan Cox { 1663180f757SAlan Cox 1673180f757SAlan Cox return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) != 0); 1683180f757SAlan Cox } 1693180f757SAlan Cox 1703180f757SAlan Cox /* 171f8a47341SAlan Cox * The reservation structure 172f8a47341SAlan Cox * 173f8a47341SAlan Cox * A reservation structure is constructed whenever a large physical page is 174f8a47341SAlan Cox * speculatively allocated to an object. The reservation provides the small 175f8a47341SAlan Cox * physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets 176f8a47341SAlan Cox * within that object. The reservation's "popcnt" tracks the number of these 177f8a47341SAlan Cox * small physical pages that are in use at any given time. When and if the 1783453bca8SAlan Cox * reservation is not fully utilized, it appears in the queue of partially 179f8a47341SAlan Cox * populated reservations. The reservation always appears on the containing 180f8a47341SAlan Cox * object's list of reservations. 181f8a47341SAlan Cox * 1823453bca8SAlan Cox * A partially populated reservation can be broken and reclaimed at any time. 183e2068d0bSJeff Roberson * 184b378d296SMark Johnston * c - constant after boot 1855c930c89SJeff Roberson * d - vm_reserv_domain_lock 186e2068d0bSJeff Roberson * o - vm_reserv_object_lock 187b378d296SMark Johnston * r - vm_reserv_lock 188b378d296SMark Johnston * s - vm_reserv_domain_scan_lock 189f8a47341SAlan Cox */ 190f8a47341SAlan Cox struct vm_reserv { 1915c930c89SJeff Roberson struct mtx lock; /* reservation lock. */ 192fe6d5344SMark Johnston TAILQ_ENTRY(vm_reserv) partpopq; /* (d, r) per-domain queue. */ 1935c930c89SJeff Roberson LIST_ENTRY(vm_reserv) objq; /* (o, r) object queue */ 1945c930c89SJeff Roberson vm_object_t object; /* (o, r) containing object */ 1955c930c89SJeff Roberson vm_pindex_t pindex; /* (o, r) offset in object */ 196e2068d0bSJeff Roberson vm_page_t pages; /* (c) first page */ 1975c930c89SJeff Roberson uint16_t popcnt; /* (r) # of pages in use */ 198fe6d5344SMark Johnston uint8_t domain; /* (c) NUMA domain. */ 199fe6d5344SMark Johnston char inpartpopq; /* (d, r) */ 2002ef6727eSJeff Roberson int lasttick; /* (r) last pop update tick. */ 201f2a496d6SKonstantin Belousov popmap_t popmap[NPOPMAP_MAX]; /* (r) bit vector, used pages */ 202f8a47341SAlan Cox }; 203f8a47341SAlan Cox 204b378d296SMark Johnston TAILQ_HEAD(vm_reserv_queue, vm_reserv); 205b378d296SMark Johnston 2065c930c89SJeff Roberson #define vm_reserv_lockptr(rv) (&(rv)->lock) 2075c930c89SJeff Roberson #define vm_reserv_assert_locked(rv) \ 2085c930c89SJeff Roberson mtx_assert(vm_reserv_lockptr(rv), MA_OWNED) 2095c930c89SJeff Roberson #define vm_reserv_lock(rv) mtx_lock(vm_reserv_lockptr(rv)) 2105c930c89SJeff Roberson #define vm_reserv_trylock(rv) mtx_trylock(vm_reserv_lockptr(rv)) 2115c930c89SJeff Roberson #define vm_reserv_unlock(rv) mtx_unlock(vm_reserv_lockptr(rv)) 2125c930c89SJeff Roberson 213f8a47341SAlan Cox /* 214f8a47341SAlan Cox * The reservation array 215f8a47341SAlan Cox * 216f8a47341SAlan Cox * This array is analoguous in function to vm_page_array. It differs in the 217f8a47341SAlan Cox * respect that it may contain a greater number of useful reservation 218f8a47341SAlan Cox * structures than there are (physical) superpages. These "invalid" 219f8a47341SAlan Cox * reservation structures exist to trade-off space for time in the 220f8a47341SAlan Cox * implementation of vm_reserv_from_page(). Invalid reservation structures are 221f8a47341SAlan Cox * distinguishable from "valid" reservation structures by inspecting the 222f8a47341SAlan Cox * reservation's "pages" field. Invalid reservation structures have a NULL 223f8a47341SAlan Cox * "pages" field. 224f8a47341SAlan Cox * 225f8a47341SAlan Cox * vm_reserv_from_page() maps a small (physical) page to an element of this 226f8a47341SAlan Cox * array by computing a physical reservation number from the page's physical 227f8a47341SAlan Cox * address. The physical reservation number is used as the array index. 228f8a47341SAlan Cox * 229f8a47341SAlan Cox * An "active" reservation is a valid reservation structure that has a non-NULL 230f8a47341SAlan Cox * "object" field and a non-zero "popcnt" field. In other words, every active 231f8a47341SAlan Cox * reservation belongs to a particular object. Moreover, every active 232f8a47341SAlan Cox * reservation has an entry in the containing object's list of reservations. 233f8a47341SAlan Cox */ 234f8a47341SAlan Cox static vm_reserv_t vm_reserv_array; 235f8a47341SAlan Cox 236f8a47341SAlan Cox /* 237fe6d5344SMark Johnston * The per-domain partially populated reservation queues 238f8a47341SAlan Cox * 239fe6d5344SMark Johnston * These queues enable the fast recovery of an unused free small page from a 240fe6d5344SMark Johnston * partially populated reservation. The reservation at the head of a queue 2413453bca8SAlan Cox * is the least recently changed, partially populated reservation. 242f8a47341SAlan Cox * 243fe6d5344SMark Johnston * Access to this queue is synchronized by the per-domain reservation lock. 244b378d296SMark Johnston * Threads reclaiming free pages from the queue must hold the per-domain scan 245b378d296SMark Johnston * lock. 246f8a47341SAlan Cox */ 247fe6d5344SMark Johnston struct vm_reserv_domain { 248fe6d5344SMark Johnston struct mtx lock; 249b378d296SMark Johnston struct vm_reserv_queue partpop; /* (d) */ 250b378d296SMark Johnston struct vm_reserv marker; /* (d, s) scan marker/lock */ 251fe6d5344SMark Johnston } __aligned(CACHE_LINE_SIZE); 252fe6d5344SMark Johnston 253fe6d5344SMark Johnston static struct vm_reserv_domain vm_rvd[MAXMEMDOM]; 254fe6d5344SMark Johnston 255fe6d5344SMark Johnston #define vm_reserv_domain_lockptr(d) (&vm_rvd[(d)].lock) 256b378d296SMark Johnston #define vm_reserv_domain_assert_locked(d) \ 257b378d296SMark Johnston mtx_assert(vm_reserv_domain_lockptr(d), MA_OWNED) 258fe6d5344SMark Johnston #define vm_reserv_domain_lock(d) mtx_lock(vm_reserv_domain_lockptr(d)) 259fe6d5344SMark Johnston #define vm_reserv_domain_unlock(d) mtx_unlock(vm_reserv_domain_lockptr(d)) 260f8a47341SAlan Cox 261b378d296SMark Johnston #define vm_reserv_domain_scan_lock(d) mtx_lock(&vm_rvd[(d)].marker.lock) 262b378d296SMark Johnston #define vm_reserv_domain_scan_unlock(d) mtx_unlock(&vm_rvd[(d)].marker.lock) 263b378d296SMark Johnston 264f8a47341SAlan Cox static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD, 0, "Reservation Info"); 265f8a47341SAlan Cox 2665c930c89SJeff Roberson static counter_u64_t vm_reserv_broken = EARLY_COUNTER; 2675c930c89SJeff Roberson SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD, 2685c930c89SJeff Roberson &vm_reserv_broken, "Cumulative number of broken reservations"); 269f8a47341SAlan Cox 2705c930c89SJeff Roberson static counter_u64_t vm_reserv_freed = EARLY_COUNTER; 2715c930c89SJeff Roberson SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD, 2725c930c89SJeff Roberson &vm_reserv_freed, "Cumulative number of freed reservations"); 273f8a47341SAlan Cox 274e0a63baaSAlan Cox static int sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS); 275e0a63baaSAlan Cox 276*a314aba8SMateusz Guzik SYSCTL_PROC(_vm_reserv, OID_AUTO, fullpop, CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RD, 277*a314aba8SMateusz Guzik NULL, 0, sysctl_vm_reserv_fullpop, "I", "Current number of full reservations"); 278e0a63baaSAlan Cox 279f8a47341SAlan Cox static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS); 280f8a47341SAlan Cox 281f8a47341SAlan Cox SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, 2823453bca8SAlan Cox sysctl_vm_reserv_partpopq, "A", "Partially populated reservation queues"); 283f8a47341SAlan Cox 2845c930c89SJeff Roberson static counter_u64_t vm_reserv_reclaimed = EARLY_COUNTER; 2855c930c89SJeff Roberson SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD, 2865c930c89SJeff Roberson &vm_reserv_reclaimed, "Cumulative number of reclaimed reservations"); 287f8a47341SAlan Cox 288e2068d0bSJeff Roberson /* 289e2068d0bSJeff Roberson * The object lock pool is used to synchronize the rvq. We can not use a 290e2068d0bSJeff Roberson * pool mutex because it is required before malloc works. 291e2068d0bSJeff Roberson * 292e2068d0bSJeff Roberson * The "hash" function could be made faster without divide and modulo. 293e2068d0bSJeff Roberson */ 294e2068d0bSJeff Roberson #define VM_RESERV_OBJ_LOCK_COUNT MAXCPU 295e2068d0bSJeff Roberson 296e2068d0bSJeff Roberson struct mtx_padalign vm_reserv_object_mtx[VM_RESERV_OBJ_LOCK_COUNT]; 297e2068d0bSJeff Roberson 298e2068d0bSJeff Roberson #define vm_reserv_object_lock_idx(object) \ 299e2068d0bSJeff Roberson (((uintptr_t)object / sizeof(*object)) % VM_RESERV_OBJ_LOCK_COUNT) 300e2068d0bSJeff Roberson #define vm_reserv_object_lock_ptr(object) \ 301e2068d0bSJeff Roberson &vm_reserv_object_mtx[vm_reserv_object_lock_idx((object))] 302e2068d0bSJeff Roberson #define vm_reserv_object_lock(object) \ 303e2068d0bSJeff Roberson mtx_lock(vm_reserv_object_lock_ptr((object))) 304e2068d0bSJeff Roberson #define vm_reserv_object_unlock(object) \ 305e2068d0bSJeff Roberson mtx_unlock(vm_reserv_object_lock_ptr((object))) 306e2068d0bSJeff Roberson 307ada27a3bSKonstantin Belousov static void vm_reserv_break(vm_reserv_t rv); 308ec179322SAlan Cox static void vm_reserv_depopulate(vm_reserv_t rv, int index); 309f8a47341SAlan Cox static vm_reserv_t vm_reserv_from_page(vm_page_t m); 310f8a47341SAlan Cox static boolean_t vm_reserv_has_pindex(vm_reserv_t rv, 311f8a47341SAlan Cox vm_pindex_t pindex); 312ec179322SAlan Cox static void vm_reserv_populate(vm_reserv_t rv, int index); 31344aab2c3SAlan Cox static void vm_reserv_reclaim(vm_reserv_t rv); 314f8a47341SAlan Cox 315f8a47341SAlan Cox /* 316e0a63baaSAlan Cox * Returns the current number of full reservations. 317e0a63baaSAlan Cox * 318fe6d5344SMark Johnston * Since the number of full reservations is computed without acquiring any 319fe6d5344SMark Johnston * locks, the returned value is inexact. 320e0a63baaSAlan Cox */ 321e0a63baaSAlan Cox static int 322e0a63baaSAlan Cox sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS) 323e0a63baaSAlan Cox { 324e0a63baaSAlan Cox vm_paddr_t paddr; 325e0a63baaSAlan Cox struct vm_phys_seg *seg; 326e0a63baaSAlan Cox vm_reserv_t rv; 327e0a63baaSAlan Cox int fullpop, segind; 328e0a63baaSAlan Cox 329e0a63baaSAlan Cox fullpop = 0; 330e0a63baaSAlan Cox for (segind = 0; segind < vm_phys_nsegs; segind++) { 331e0a63baaSAlan Cox seg = &vm_phys_segs[segind]; 332e0a63baaSAlan Cox paddr = roundup2(seg->start, VM_LEVEL_0_SIZE); 3336b821a74SAleksandr Rybalko while (paddr + VM_LEVEL_0_SIZE > paddr && paddr + 3346b821a74SAleksandr Rybalko VM_LEVEL_0_SIZE <= seg->end) { 335e0a63baaSAlan Cox rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT]; 336e0a63baaSAlan Cox fullpop += rv->popcnt == VM_LEVEL_0_NPAGES; 337e0a63baaSAlan Cox paddr += VM_LEVEL_0_SIZE; 338e0a63baaSAlan Cox } 339e0a63baaSAlan Cox } 340e0a63baaSAlan Cox return (sysctl_handle_int(oidp, &fullpop, 0, req)); 341e0a63baaSAlan Cox } 342e0a63baaSAlan Cox 343e0a63baaSAlan Cox /* 3443453bca8SAlan Cox * Describes the current state of the partially populated reservation queue. 345f8a47341SAlan Cox */ 346f8a47341SAlan Cox static int 347f8a47341SAlan Cox sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS) 348f8a47341SAlan Cox { 349f8a47341SAlan Cox struct sbuf sbuf; 350f8a47341SAlan Cox vm_reserv_t rv; 351ef435ae7SJeff Roberson int counter, error, domain, level, unused_pages; 352f8a47341SAlan Cox 35300f0e671SMatthew D Fleming error = sysctl_wire_old_buffer(req, 0); 35400f0e671SMatthew D Fleming if (error != 0) 35500f0e671SMatthew D Fleming return (error); 3564e657159SMatthew D Fleming sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 357ef435ae7SJeff Roberson sbuf_printf(&sbuf, "\nDOMAIN LEVEL SIZE NUMBER\n\n"); 358ef435ae7SJeff Roberson for (domain = 0; domain < vm_ndomains; domain++) { 359f8a47341SAlan Cox for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) { 360f8a47341SAlan Cox counter = 0; 361f8a47341SAlan Cox unused_pages = 0; 3625c930c89SJeff Roberson vm_reserv_domain_lock(domain); 363fe6d5344SMark Johnston TAILQ_FOREACH(rv, &vm_rvd[domain].partpop, partpopq) { 364b378d296SMark Johnston if (rv == &vm_rvd[domain].marker) 365b378d296SMark Johnston continue; 366f8a47341SAlan Cox counter++; 367f8a47341SAlan Cox unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt; 368f8a47341SAlan Cox } 3695c930c89SJeff Roberson vm_reserv_domain_unlock(domain); 370ef435ae7SJeff Roberson sbuf_printf(&sbuf, "%6d, %7d, %6dK, %6d\n", 371ef435ae7SJeff Roberson domain, level, 3722cf36c8fSAlan Cox unused_pages * ((int)PAGE_SIZE / 1024), counter); 373f8a47341SAlan Cox } 374ef435ae7SJeff Roberson } 3754e657159SMatthew D Fleming error = sbuf_finish(&sbuf); 376f8a47341SAlan Cox sbuf_delete(&sbuf); 377f8a47341SAlan Cox return (error); 378f8a47341SAlan Cox } 379f8a47341SAlan Cox 380f8a47341SAlan Cox /* 381e2068d0bSJeff Roberson * Remove a reservation from the object's objq. 382e2068d0bSJeff Roberson */ 383e2068d0bSJeff Roberson static void 384e2068d0bSJeff Roberson vm_reserv_remove(vm_reserv_t rv) 385e2068d0bSJeff Roberson { 386e2068d0bSJeff Roberson vm_object_t object; 387e2068d0bSJeff Roberson 3885c930c89SJeff Roberson vm_reserv_assert_locked(rv); 3895c930c89SJeff Roberson CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 3905c930c89SJeff Roberson __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 391e2068d0bSJeff Roberson KASSERT(rv->object != NULL, 392e2068d0bSJeff Roberson ("vm_reserv_remove: reserv %p is free", rv)); 393e2068d0bSJeff Roberson KASSERT(!rv->inpartpopq, 394e2068d0bSJeff Roberson ("vm_reserv_remove: reserv %p's inpartpopq is TRUE", rv)); 395e2068d0bSJeff Roberson object = rv->object; 396e2068d0bSJeff Roberson vm_reserv_object_lock(object); 397e2068d0bSJeff Roberson LIST_REMOVE(rv, objq); 398e2068d0bSJeff Roberson rv->object = NULL; 399e2068d0bSJeff Roberson vm_reserv_object_unlock(object); 400e2068d0bSJeff Roberson } 401e2068d0bSJeff Roberson 402e2068d0bSJeff Roberson /* 403e2068d0bSJeff Roberson * Insert a new reservation into the object's objq. 404e2068d0bSJeff Roberson */ 405e2068d0bSJeff Roberson static void 406e2068d0bSJeff Roberson vm_reserv_insert(vm_reserv_t rv, vm_object_t object, vm_pindex_t pindex) 407e2068d0bSJeff Roberson { 408e2068d0bSJeff Roberson int i; 409e2068d0bSJeff Roberson 4105c930c89SJeff Roberson vm_reserv_assert_locked(rv); 4115c930c89SJeff Roberson CTR6(KTR_VM, 4125c930c89SJeff Roberson "%s: rv %p(%p) object %p new %p popcnt %d", 4135c930c89SJeff Roberson __FUNCTION__, rv, rv->pages, rv->object, object, 4145c930c89SJeff Roberson rv->popcnt); 415e2068d0bSJeff Roberson KASSERT(rv->object == NULL, 416e2068d0bSJeff Roberson ("vm_reserv_insert: reserv %p isn't free", rv)); 417e2068d0bSJeff Roberson KASSERT(rv->popcnt == 0, 418e2068d0bSJeff Roberson ("vm_reserv_insert: reserv %p's popcnt is corrupted", rv)); 419e2068d0bSJeff Roberson KASSERT(!rv->inpartpopq, 420e2068d0bSJeff Roberson ("vm_reserv_insert: reserv %p's inpartpopq is TRUE", rv)); 421e2068d0bSJeff Roberson for (i = 0; i < NPOPMAP; i++) 422e2068d0bSJeff Roberson KASSERT(rv->popmap[i] == 0, 423e2068d0bSJeff Roberson ("vm_reserv_insert: reserv %p's popmap is corrupted", rv)); 424e2068d0bSJeff Roberson vm_reserv_object_lock(object); 425e2068d0bSJeff Roberson rv->pindex = pindex; 426e2068d0bSJeff Roberson rv->object = object; 4272ef6727eSJeff Roberson rv->lasttick = ticks; 428e2068d0bSJeff Roberson LIST_INSERT_HEAD(&object->rvq, rv, objq); 429e2068d0bSJeff Roberson vm_reserv_object_unlock(object); 430e2068d0bSJeff Roberson } 431e2068d0bSJeff Roberson 432e2068d0bSJeff Roberson /* 433f8a47341SAlan Cox * Reduces the given reservation's population count. If the population count 434f8a47341SAlan Cox * becomes zero, the reservation is destroyed. Additionally, moves the 4353453bca8SAlan Cox * reservation to the tail of the partially populated reservation queue if the 436f8a47341SAlan Cox * population count is non-zero. 437f8a47341SAlan Cox */ 438f8a47341SAlan Cox static void 439ec179322SAlan Cox vm_reserv_depopulate(vm_reserv_t rv, int index) 440f8a47341SAlan Cox { 4415c930c89SJeff Roberson struct vm_domain *vmd; 442f8a47341SAlan Cox 4435c930c89SJeff Roberson vm_reserv_assert_locked(rv); 4445c930c89SJeff Roberson CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 4455c930c89SJeff Roberson __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 446f8a47341SAlan Cox KASSERT(rv->object != NULL, 447f8a47341SAlan Cox ("vm_reserv_depopulate: reserv %p is free", rv)); 4483180f757SAlan Cox KASSERT(popmap_is_set(rv->popmap, index), 449a08c1515SAlan Cox ("vm_reserv_depopulate: reserv %p's popmap[%d] is clear", rv, 450a08c1515SAlan Cox index)); 451f8a47341SAlan Cox KASSERT(rv->popcnt > 0, 452f8a47341SAlan Cox ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv)); 4532d3f4181SJeff Roberson KASSERT(rv->domain < vm_ndomains, 454ef435ae7SJeff Roberson ("vm_reserv_depopulate: reserv %p's domain is corrupted %d", 455ef435ae7SJeff Roberson rv, rv->domain)); 4565c930c89SJeff Roberson if (rv->popcnt == VM_LEVEL_0_NPAGES) { 457dd05fa19SAlan Cox KASSERT(rv->pages->psind == 1, 458dd05fa19SAlan Cox ("vm_reserv_depopulate: reserv %p is already demoted", 459dd05fa19SAlan Cox rv)); 460dd05fa19SAlan Cox rv->pages->psind = 0; 461f8a47341SAlan Cox } 4623180f757SAlan Cox popmap_clear(rv->popmap, index); 463f8a47341SAlan Cox rv->popcnt--; 4642ef6727eSJeff Roberson if ((unsigned)(ticks - rv->lasttick) >= PARTPOPSLOP || 4652ef6727eSJeff Roberson rv->popcnt == 0) { 4665c930c89SJeff Roberson vm_reserv_domain_lock(rv->domain); 4675c930c89SJeff Roberson if (rv->inpartpopq) { 468fe6d5344SMark Johnston TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); 4695c930c89SJeff Roberson rv->inpartpopq = FALSE; 4705c930c89SJeff Roberson } 4715c930c89SJeff Roberson if (rv->popcnt != 0) { 472f8a47341SAlan Cox rv->inpartpopq = TRUE; 473fe6d5344SMark Johnston TAILQ_INSERT_TAIL(&vm_rvd[rv->domain].partpop, rv, 474fe6d5344SMark Johnston partpopq); 475f8a47341SAlan Cox } 4765c930c89SJeff Roberson vm_reserv_domain_unlock(rv->domain); 4772ef6727eSJeff Roberson rv->lasttick = ticks; 4782ef6727eSJeff Roberson } 4795c930c89SJeff Roberson vmd = VM_DOMAIN(rv->domain); 4805c930c89SJeff Roberson if (rv->popcnt == 0) { 4815c930c89SJeff Roberson vm_reserv_remove(rv); 4825c930c89SJeff Roberson vm_domain_free_lock(vmd); 4835c930c89SJeff Roberson vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER); 4845c930c89SJeff Roberson vm_domain_free_unlock(vmd); 4855c930c89SJeff Roberson counter_u64_add(vm_reserv_freed, 1); 4865c930c89SJeff Roberson } 4875c930c89SJeff Roberson vm_domain_freecnt_inc(vmd, 1); 488f8a47341SAlan Cox } 489f8a47341SAlan Cox 490f8a47341SAlan Cox /* 491f8a47341SAlan Cox * Returns the reservation to which the given page might belong. 492f8a47341SAlan Cox */ 493f8a47341SAlan Cox static __inline vm_reserv_t 494f8a47341SAlan Cox vm_reserv_from_page(vm_page_t m) 495f8a47341SAlan Cox { 496f8a47341SAlan Cox 497f8a47341SAlan Cox return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]); 498f8a47341SAlan Cox } 499f8a47341SAlan Cox 500f8a47341SAlan Cox /* 501e2068d0bSJeff Roberson * Returns an existing reservation or NULL and initialized successor pointer. 502e2068d0bSJeff Roberson */ 503e2068d0bSJeff Roberson static vm_reserv_t 504e2068d0bSJeff Roberson vm_reserv_from_object(vm_object_t object, vm_pindex_t pindex, 505e2068d0bSJeff Roberson vm_page_t mpred, vm_page_t *msuccp) 506e2068d0bSJeff Roberson { 507e2068d0bSJeff Roberson vm_reserv_t rv; 508e2068d0bSJeff Roberson vm_page_t msucc; 509e2068d0bSJeff Roberson 510e2068d0bSJeff Roberson msucc = NULL; 511e2068d0bSJeff Roberson if (mpred != NULL) { 512e2068d0bSJeff Roberson KASSERT(mpred->object == object, 513e2068d0bSJeff Roberson ("vm_reserv_from_object: object doesn't contain mpred")); 514e2068d0bSJeff Roberson KASSERT(mpred->pindex < pindex, 515e2068d0bSJeff Roberson ("vm_reserv_from_object: mpred doesn't precede pindex")); 516e2068d0bSJeff Roberson rv = vm_reserv_from_page(mpred); 517e2068d0bSJeff Roberson if (rv->object == object && vm_reserv_has_pindex(rv, pindex)) 518e2068d0bSJeff Roberson goto found; 519e2068d0bSJeff Roberson msucc = TAILQ_NEXT(mpred, listq); 520e2068d0bSJeff Roberson } else 521e2068d0bSJeff Roberson msucc = TAILQ_FIRST(&object->memq); 522e2068d0bSJeff Roberson if (msucc != NULL) { 523e2068d0bSJeff Roberson KASSERT(msucc->pindex > pindex, 524e2068d0bSJeff Roberson ("vm_reserv_from_object: msucc doesn't succeed pindex")); 525e2068d0bSJeff Roberson rv = vm_reserv_from_page(msucc); 526e2068d0bSJeff Roberson if (rv->object == object && vm_reserv_has_pindex(rv, pindex)) 527e2068d0bSJeff Roberson goto found; 528e2068d0bSJeff Roberson } 529e2068d0bSJeff Roberson rv = NULL; 530e2068d0bSJeff Roberson 531e2068d0bSJeff Roberson found: 532e2068d0bSJeff Roberson *msuccp = msucc; 533e2068d0bSJeff Roberson 534e2068d0bSJeff Roberson return (rv); 535e2068d0bSJeff Roberson } 536e2068d0bSJeff Roberson 537e2068d0bSJeff Roberson /* 538f8a47341SAlan Cox * Returns TRUE if the given reservation contains the given page index and 539f8a47341SAlan Cox * FALSE otherwise. 540f8a47341SAlan Cox */ 541f8a47341SAlan Cox static __inline boolean_t 542f8a47341SAlan Cox vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex) 543f8a47341SAlan Cox { 544f8a47341SAlan Cox 545f8a47341SAlan Cox return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0); 546f8a47341SAlan Cox } 547f8a47341SAlan Cox 548f8a47341SAlan Cox /* 549f8a47341SAlan Cox * Increases the given reservation's population count. Moves the reservation 5503453bca8SAlan Cox * to the tail of the partially populated reservation queue. 551f8a47341SAlan Cox */ 552f8a47341SAlan Cox static void 553ec179322SAlan Cox vm_reserv_populate(vm_reserv_t rv, int index) 554f8a47341SAlan Cox { 555f8a47341SAlan Cox 5565c930c89SJeff Roberson vm_reserv_assert_locked(rv); 5575c930c89SJeff Roberson CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 5585c930c89SJeff Roberson __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 559f8a47341SAlan Cox KASSERT(rv->object != NULL, 560f8a47341SAlan Cox ("vm_reserv_populate: reserv %p is free", rv)); 5613180f757SAlan Cox KASSERT(popmap_is_clear(rv->popmap, index), 562a08c1515SAlan Cox ("vm_reserv_populate: reserv %p's popmap[%d] is set", rv, 563a08c1515SAlan Cox index)); 564f8a47341SAlan Cox KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES, 565f8a47341SAlan Cox ("vm_reserv_populate: reserv %p is already full", rv)); 566dd05fa19SAlan Cox KASSERT(rv->pages->psind == 0, 567dd05fa19SAlan Cox ("vm_reserv_populate: reserv %p is already promoted", rv)); 5682d3f4181SJeff Roberson KASSERT(rv->domain < vm_ndomains, 569ef435ae7SJeff Roberson ("vm_reserv_populate: reserv %p's domain is corrupted %d", 570ef435ae7SJeff Roberson rv, rv->domain)); 5715c930c89SJeff Roberson popmap_set(rv->popmap, index); 5725c930c89SJeff Roberson rv->popcnt++; 5732ef6727eSJeff Roberson if ((unsigned)(ticks - rv->lasttick) < PARTPOPSLOP && 5742ef6727eSJeff Roberson rv->inpartpopq && rv->popcnt != VM_LEVEL_0_NPAGES) 5752ef6727eSJeff Roberson return; 5762ef6727eSJeff Roberson rv->lasttick = ticks; 5775c930c89SJeff Roberson vm_reserv_domain_lock(rv->domain); 578f8a47341SAlan Cox if (rv->inpartpopq) { 579fe6d5344SMark Johnston TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); 580f8a47341SAlan Cox rv->inpartpopq = FALSE; 581f8a47341SAlan Cox } 582f8a47341SAlan Cox if (rv->popcnt < VM_LEVEL_0_NPAGES) { 583f8a47341SAlan Cox rv->inpartpopq = TRUE; 584fe6d5344SMark Johnston TAILQ_INSERT_TAIL(&vm_rvd[rv->domain].partpop, rv, partpopq); 5855c930c89SJeff Roberson } else { 5865c930c89SJeff Roberson KASSERT(rv->pages->psind == 0, 5875c930c89SJeff Roberson ("vm_reserv_populate: reserv %p is already promoted", 5885c930c89SJeff Roberson rv)); 589dd05fa19SAlan Cox rv->pages->psind = 1; 590f8a47341SAlan Cox } 5915c930c89SJeff Roberson vm_reserv_domain_unlock(rv->domain); 5925c930c89SJeff Roberson } 593f8a47341SAlan Cox 594f8a47341SAlan Cox /* 595e2068d0bSJeff Roberson * Allocates a contiguous set of physical pages of the given size "npages" 5962d5039dbSAlan Cox * from existing or newly created reservations. All of the physical pages 597e2068d0bSJeff Roberson * must be at or above the given physical address "low" and below the given 598e2068d0bSJeff Roberson * physical address "high". The given value "alignment" determines the 599e2068d0bSJeff Roberson * alignment of the first physical page in the set. If the given value 600e2068d0bSJeff Roberson * "boundary" is non-zero, then the set of physical pages cannot cross any 601e2068d0bSJeff Roberson * physical address boundary that is a multiple of that value. Both 602e2068d0bSJeff Roberson * "alignment" and "boundary" must be a power of two. 603e2068d0bSJeff Roberson * 604e2068d0bSJeff Roberson * The page "mpred" must immediately precede the offset "pindex" within the 605e2068d0bSJeff Roberson * specified object. 606e2068d0bSJeff Roberson * 6072d5039dbSAlan Cox * The object must be locked. 608e2068d0bSJeff Roberson */ 609e2068d0bSJeff Roberson vm_page_t 6102d5039dbSAlan Cox vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, int domain, 6112d5039dbSAlan Cox int req, vm_page_t mpred, u_long npages, vm_paddr_t low, vm_paddr_t high, 6122d5039dbSAlan Cox u_long alignment, vm_paddr_t boundary) 613c68c3537SAlan Cox { 6145c930c89SJeff Roberson struct vm_domain *vmd; 615c68c3537SAlan Cox vm_paddr_t pa, size; 616920da7e4SAlan Cox vm_page_t m, m_ret, msucc; 617c68c3537SAlan Cox vm_pindex_t first, leftcap, rightcap; 618c68c3537SAlan Cox vm_reserv_t rv; 619c68c3537SAlan Cox u_long allocpages, maxpages, minpages; 620c68c3537SAlan Cox int i, index, n; 621c68c3537SAlan Cox 62289f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 623c68c3537SAlan Cox KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0")); 624c68c3537SAlan Cox 625c68c3537SAlan Cox /* 626c68c3537SAlan Cox * Is a reservation fundamentally impossible? 627c68c3537SAlan Cox */ 628c68c3537SAlan Cox if (pindex < VM_RESERV_INDEX(object, pindex) || 629c68c3537SAlan Cox pindex + npages > object->size) 630c68c3537SAlan Cox return (NULL); 631c68c3537SAlan Cox 632c68c3537SAlan Cox /* 633c68c3537SAlan Cox * All reservations of a particular size have the same alignment. 634c68c3537SAlan Cox * Assuming that the first page is allocated from a reservation, the 635c68c3537SAlan Cox * least significant bits of its physical address can be determined 636c68c3537SAlan Cox * from its offset from the beginning of the reservation and the size 637c68c3537SAlan Cox * of the reservation. 638c68c3537SAlan Cox * 639c68c3537SAlan Cox * Could the specified index within a reservation of the smallest 640c68c3537SAlan Cox * possible size satisfy the alignment and boundary requirements? 641c68c3537SAlan Cox */ 642c68c3537SAlan Cox pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT; 643c68c3537SAlan Cox if ((pa & (alignment - 1)) != 0) 644c68c3537SAlan Cox return (NULL); 645c68c3537SAlan Cox size = npages << PAGE_SHIFT; 646c68c3537SAlan Cox if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0) 647c68c3537SAlan Cox return (NULL); 648c68c3537SAlan Cox 649c68c3537SAlan Cox /* 6502d5039dbSAlan Cox * Look for an existing reservation. 651c68c3537SAlan Cox */ 652e2068d0bSJeff Roberson rv = vm_reserv_from_object(object, pindex, mpred, &msucc); 6532d5039dbSAlan Cox if (rv != NULL) { 6542d5039dbSAlan Cox KASSERT(object != kernel_object || rv->domain == domain, 6552d5039dbSAlan Cox ("vm_reserv_alloc_contig: domain mismatch")); 6562d5039dbSAlan Cox index = VM_RESERV_INDEX(object, pindex); 6572d5039dbSAlan Cox /* Does the allocation fit within the reservation? */ 6582d5039dbSAlan Cox if (index + npages > VM_LEVEL_0_NPAGES) 659e2068d0bSJeff Roberson return (NULL); 6602d5039dbSAlan Cox domain = rv->domain; 6612d5039dbSAlan Cox vmd = VM_DOMAIN(domain); 6622d5039dbSAlan Cox vm_reserv_lock(rv); 6632d5039dbSAlan Cox /* Handle reclaim race. */ 6642d5039dbSAlan Cox if (rv->object != object) 6652d5039dbSAlan Cox goto out; 6662d5039dbSAlan Cox m = &rv->pages[index]; 6672d5039dbSAlan Cox pa = VM_PAGE_TO_PHYS(m); 6682d5039dbSAlan Cox if (pa < low || pa + size > high || 6692d5039dbSAlan Cox (pa & (alignment - 1)) != 0 || 6702d5039dbSAlan Cox ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0) 6712d5039dbSAlan Cox goto out; 6722d5039dbSAlan Cox /* Handle vm_page_rename(m, new_object, ...). */ 6732d5039dbSAlan Cox for (i = 0; i < npages; i++) 6742d5039dbSAlan Cox if (popmap_is_set(rv->popmap, index + i)) 6752d5039dbSAlan Cox goto out; 6762d5039dbSAlan Cox if (!vm_domain_allocate(vmd, req, npages)) 6772d5039dbSAlan Cox goto out; 6782d5039dbSAlan Cox for (i = 0; i < npages; i++) 6792d5039dbSAlan Cox vm_reserv_populate(rv, index + i); 6802d5039dbSAlan Cox vm_reserv_unlock(rv); 6812d5039dbSAlan Cox return (m); 6822d5039dbSAlan Cox out: 6832d5039dbSAlan Cox vm_reserv_unlock(rv); 6842d5039dbSAlan Cox return (NULL); 6852d5039dbSAlan Cox } 686c68c3537SAlan Cox 687c68c3537SAlan Cox /* 688c68c3537SAlan Cox * Could at least one reservation fit between the first index to the 68964f096eeSAlan Cox * left that can be used ("leftcap") and the first index to the right 69064f096eeSAlan Cox * that cannot be used ("rightcap")? 691e2068d0bSJeff Roberson * 692e2068d0bSJeff Roberson * We must synchronize with the reserv object lock to protect the 693e2068d0bSJeff Roberson * pindex/object of the resulting reservations against rename while 694e2068d0bSJeff Roberson * we are inspecting. 695c68c3537SAlan Cox */ 696c68c3537SAlan Cox first = pindex - VM_RESERV_INDEX(object, pindex); 697e2068d0bSJeff Roberson minpages = VM_RESERV_INDEX(object, pindex) + npages; 698e2068d0bSJeff Roberson maxpages = roundup2(minpages, VM_LEVEL_0_NPAGES); 699e2068d0bSJeff Roberson allocpages = maxpages; 700e2068d0bSJeff Roberson vm_reserv_object_lock(object); 701c68c3537SAlan Cox if (mpred != NULL) { 702c68c3537SAlan Cox if ((rv = vm_reserv_from_page(mpred))->object != object) 703c68c3537SAlan Cox leftcap = mpred->pindex + 1; 704c68c3537SAlan Cox else 705c68c3537SAlan Cox leftcap = rv->pindex + VM_LEVEL_0_NPAGES; 706e2068d0bSJeff Roberson if (leftcap > first) { 707e2068d0bSJeff Roberson vm_reserv_object_unlock(object); 708c68c3537SAlan Cox return (NULL); 709c68c3537SAlan Cox } 710e2068d0bSJeff Roberson } 711c68c3537SAlan Cox if (msucc != NULL) { 712c68c3537SAlan Cox if ((rv = vm_reserv_from_page(msucc))->object != object) 713c68c3537SAlan Cox rightcap = msucc->pindex; 714c68c3537SAlan Cox else 715c68c3537SAlan Cox rightcap = rv->pindex; 716c68c3537SAlan Cox if (first + maxpages > rightcap) { 717e2068d0bSJeff Roberson if (maxpages == VM_LEVEL_0_NPAGES) { 718e2068d0bSJeff Roberson vm_reserv_object_unlock(object); 719c68c3537SAlan Cox return (NULL); 720e2068d0bSJeff Roberson } 72164f096eeSAlan Cox 72264f096eeSAlan Cox /* 72364f096eeSAlan Cox * At least one reservation will fit between "leftcap" 72464f096eeSAlan Cox * and "rightcap". However, a reservation for the 72564f096eeSAlan Cox * last of the requested pages will not fit. Reduce 72664f096eeSAlan Cox * the size of the upcoming allocation accordingly. 72764f096eeSAlan Cox */ 728c68c3537SAlan Cox allocpages = minpages; 729c68c3537SAlan Cox } 730c68c3537SAlan Cox } 731e2068d0bSJeff Roberson vm_reserv_object_unlock(object); 732c68c3537SAlan Cox 733c68c3537SAlan Cox /* 734c68c3537SAlan Cox * Would the last new reservation extend past the end of the object? 73563967687SJeff Roberson * 73663967687SJeff Roberson * If the object is unlikely to grow don't allocate a reservation for 73763967687SJeff Roberson * the tail. 738c68c3537SAlan Cox */ 73963967687SJeff Roberson if ((object->flags & OBJ_ANON) == 0 && 74063967687SJeff Roberson first + maxpages > object->size) { 741c68c3537SAlan Cox if (maxpages == VM_LEVEL_0_NPAGES) 742c68c3537SAlan Cox return (NULL); 743c68c3537SAlan Cox allocpages = minpages; 744c68c3537SAlan Cox } 745c68c3537SAlan Cox 746c68c3537SAlan Cox /* 74764f096eeSAlan Cox * Allocate the physical pages. The alignment and boundary specified 74864f096eeSAlan Cox * for this allocation may be different from the alignment and 74964f096eeSAlan Cox * boundary specified for the requested pages. For instance, the 75064f096eeSAlan Cox * specified index may not be the first page within the first new 75164f096eeSAlan Cox * reservation. 752c68c3537SAlan Cox */ 7535c930c89SJeff Roberson m = NULL; 7545c930c89SJeff Roberson vmd = VM_DOMAIN(domain); 7555c930c89SJeff Roberson if (vm_domain_allocate(vmd, req, npages)) { 7565c930c89SJeff Roberson vm_domain_free_lock(vmd); 7575c930c89SJeff Roberson m = vm_phys_alloc_contig(domain, allocpages, low, high, 7585c930c89SJeff Roberson ulmax(alignment, VM_LEVEL_0_SIZE), 7595c930c89SJeff Roberson boundary > VM_LEVEL_0_SIZE ? boundary : 0); 7605c930c89SJeff Roberson vm_domain_free_unlock(vmd); 7615c930c89SJeff Roberson if (m == NULL) { 7625c930c89SJeff Roberson vm_domain_freecnt_inc(vmd, npages); 7635c930c89SJeff Roberson return (NULL); 7645c930c89SJeff Roberson } 7655c930c89SJeff Roberson } else 766c68c3537SAlan Cox return (NULL); 767e2068d0bSJeff Roberson KASSERT(vm_phys_domain(m) == domain, 7687a469c8eSJeff Roberson ("vm_reserv_alloc_contig: Page domain does not match requested.")); 76964f096eeSAlan Cox 77064f096eeSAlan Cox /* 77164f096eeSAlan Cox * The allocated physical pages always begin at a reservation 77264f096eeSAlan Cox * boundary, but they do not always end at a reservation boundary. 77364f096eeSAlan Cox * Initialize every reservation that is completely covered by the 77464f096eeSAlan Cox * allocated physical pages. 77564f096eeSAlan Cox */ 776c68c3537SAlan Cox m_ret = NULL; 777c68c3537SAlan Cox index = VM_RESERV_INDEX(object, pindex); 778c68c3537SAlan Cox do { 779c68c3537SAlan Cox rv = vm_reserv_from_page(m); 780c68c3537SAlan Cox KASSERT(rv->pages == m, 781c68c3537SAlan Cox ("vm_reserv_alloc_contig: reserv %p's pages is corrupted", 782c68c3537SAlan Cox rv)); 7835c930c89SJeff Roberson vm_reserv_lock(rv); 784e2068d0bSJeff Roberson vm_reserv_insert(rv, object, first); 785c68c3537SAlan Cox n = ulmin(VM_LEVEL_0_NPAGES - index, npages); 786c68c3537SAlan Cox for (i = 0; i < n; i++) 787ec179322SAlan Cox vm_reserv_populate(rv, index + i); 788c68c3537SAlan Cox npages -= n; 789c68c3537SAlan Cox if (m_ret == NULL) { 790c68c3537SAlan Cox m_ret = &rv->pages[index]; 791c68c3537SAlan Cox index = 0; 792c68c3537SAlan Cox } 7935c930c89SJeff Roberson vm_reserv_unlock(rv); 794c68c3537SAlan Cox m += VM_LEVEL_0_NPAGES; 795c68c3537SAlan Cox first += VM_LEVEL_0_NPAGES; 796c68c3537SAlan Cox allocpages -= VM_LEVEL_0_NPAGES; 79764f096eeSAlan Cox } while (allocpages >= VM_LEVEL_0_NPAGES); 798c68c3537SAlan Cox return (m_ret); 799e2068d0bSJeff Roberson } 800c68c3537SAlan Cox 801c68c3537SAlan Cox /* 8022d5039dbSAlan Cox * Allocate a physical page from an existing or newly created reservation. 803e2068d0bSJeff Roberson * 804e2068d0bSJeff Roberson * The page "mpred" must immediately precede the offset "pindex" within the 805e2068d0bSJeff Roberson * specified object. 806e2068d0bSJeff Roberson * 807e2068d0bSJeff Roberson * The object must be locked. 808c68c3537SAlan Cox */ 809e2068d0bSJeff Roberson vm_page_t 8102d5039dbSAlan Cox vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, int domain, 8112d5039dbSAlan Cox int req, vm_page_t mpred) 812e2068d0bSJeff Roberson { 813e2068d0bSJeff Roberson struct vm_domain *vmd; 814e2068d0bSJeff Roberson vm_page_t m, msucc; 8152d5039dbSAlan Cox vm_pindex_t first, leftcap, rightcap; 816e2068d0bSJeff Roberson vm_reserv_t rv; 81730fbfddaSJeff Roberson int index; 818e2068d0bSJeff Roberson 819e2068d0bSJeff Roberson VM_OBJECT_ASSERT_WLOCKED(object); 820e2068d0bSJeff Roberson 821e2068d0bSJeff Roberson /* 8222d5039dbSAlan Cox * Is a reservation fundamentally impossible? 823e2068d0bSJeff Roberson */ 824e2068d0bSJeff Roberson if (pindex < VM_RESERV_INDEX(object, pindex) || 8252d5039dbSAlan Cox pindex >= object->size) 826e2068d0bSJeff Roberson return (NULL); 827e2068d0bSJeff Roberson 828e2068d0bSJeff Roberson /* 829e2068d0bSJeff Roberson * Look for an existing reservation. 830e2068d0bSJeff Roberson */ 831e2068d0bSJeff Roberson rv = vm_reserv_from_object(object, pindex, mpred, &msucc); 8322d5039dbSAlan Cox if (rv != NULL) { 833e2068d0bSJeff Roberson KASSERT(object != kernel_object || rv->domain == domain, 8342d5039dbSAlan Cox ("vm_reserv_alloc_page: domain mismatch")); 835e2068d0bSJeff Roberson domain = rv->domain; 836e2068d0bSJeff Roberson vmd = VM_DOMAIN(domain); 837c68c3537SAlan Cox index = VM_RESERV_INDEX(object, pindex); 838c68c3537SAlan Cox m = &rv->pages[index]; 8395c930c89SJeff Roberson vm_reserv_lock(rv); 840e2068d0bSJeff Roberson /* Handle reclaim race. */ 8415c930c89SJeff Roberson if (rv->object != object || 842c68c3537SAlan Cox /* Handle vm_page_rename(m, new_object, ...). */ 8435c930c89SJeff Roberson popmap_is_set(rv->popmap, index)) { 844e2068d0bSJeff Roberson m = NULL; 8455c930c89SJeff Roberson goto out; 84630fbfddaSJeff Roberson } 8475c930c89SJeff Roberson if (vm_domain_allocate(vmd, req, 1) == 0) 8485c930c89SJeff Roberson m = NULL; 8495c930c89SJeff Roberson else 8505c930c89SJeff Roberson vm_reserv_populate(rv, index); 8515c930c89SJeff Roberson out: 8525c930c89SJeff Roberson vm_reserv_unlock(rv); 853c68c3537SAlan Cox return (m); 854c68c3537SAlan Cox } 855c68c3537SAlan Cox 856c68c3537SAlan Cox /* 857c68c3537SAlan Cox * Could a reservation fit between the first index to the left that 858c68c3537SAlan Cox * can be used and the first index to the right that cannot be used? 859e2068d0bSJeff Roberson * 860e2068d0bSJeff Roberson * We must synchronize with the reserv object lock to protect the 861e2068d0bSJeff Roberson * pindex/object of the resulting reservations against rename while 862e2068d0bSJeff Roberson * we are inspecting. 863f8a47341SAlan Cox */ 864c68c3537SAlan Cox first = pindex - VM_RESERV_INDEX(object, pindex); 865e2068d0bSJeff Roberson vm_reserv_object_lock(object); 866c68c3537SAlan Cox if (mpred != NULL) { 867c68c3537SAlan Cox if ((rv = vm_reserv_from_page(mpred))->object != object) 868f8a47341SAlan Cox leftcap = mpred->pindex + 1; 869f8a47341SAlan Cox else 870f8a47341SAlan Cox leftcap = rv->pindex + VM_LEVEL_0_NPAGES; 871e2068d0bSJeff Roberson if (leftcap > first) { 872e2068d0bSJeff Roberson vm_reserv_object_unlock(object); 873c68c3537SAlan Cox return (NULL); 874c68c3537SAlan Cox } 875e2068d0bSJeff Roberson } 876c68c3537SAlan Cox if (msucc != NULL) { 877c68c3537SAlan Cox if ((rv = vm_reserv_from_page(msucc))->object != object) 878f8a47341SAlan Cox rightcap = msucc->pindex; 879f8a47341SAlan Cox else 880f8a47341SAlan Cox rightcap = rv->pindex; 881e2068d0bSJeff Roberson if (first + VM_LEVEL_0_NPAGES > rightcap) { 882e2068d0bSJeff Roberson vm_reserv_object_unlock(object); 883f8a47341SAlan Cox return (NULL); 884c68c3537SAlan Cox } 885e2068d0bSJeff Roberson } 886e2068d0bSJeff Roberson vm_reserv_object_unlock(object); 887f8a47341SAlan Cox 888f8a47341SAlan Cox /* 88963967687SJeff Roberson * Would the last new reservation extend past the end of the object? 89063967687SJeff Roberson * 89163967687SJeff Roberson * If the object is unlikely to grow don't allocate a reservation for 89263967687SJeff Roberson * the tail. 893f8a47341SAlan Cox */ 89463967687SJeff Roberson if ((object->flags & OBJ_ANON) == 0 && 89563967687SJeff Roberson first + VM_LEVEL_0_NPAGES > object->size) 896f8a47341SAlan Cox return (NULL); 897f8a47341SAlan Cox 898f8a47341SAlan Cox /* 899c68c3537SAlan Cox * Allocate and populate the new reservation. 900f8a47341SAlan Cox */ 9015c930c89SJeff Roberson m = NULL; 9025c930c89SJeff Roberson vmd = VM_DOMAIN(domain); 9035c930c89SJeff Roberson if (vm_domain_allocate(vmd, req, 1)) { 9045c930c89SJeff Roberson vm_domain_free_lock(vmd); 9055c930c89SJeff Roberson m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT, 9065c930c89SJeff Roberson VM_LEVEL_0_ORDER); 9075c930c89SJeff Roberson vm_domain_free_unlock(vmd); 9085c930c89SJeff Roberson if (m == NULL) { 9095c930c89SJeff Roberson vm_domain_freecnt_inc(vmd, 1); 9105c930c89SJeff Roberson return (NULL); 9115c930c89SJeff Roberson } 9125c930c89SJeff Roberson } else 913c68c3537SAlan Cox return (NULL); 914f8a47341SAlan Cox rv = vm_reserv_from_page(m); 9155c930c89SJeff Roberson vm_reserv_lock(rv); 916f8a47341SAlan Cox KASSERT(rv->pages == m, 917c68c3537SAlan Cox ("vm_reserv_alloc_page: reserv %p's pages is corrupted", rv)); 918e2068d0bSJeff Roberson vm_reserv_insert(rv, object, first); 919ec179322SAlan Cox index = VM_RESERV_INDEX(object, pindex); 920ec179322SAlan Cox vm_reserv_populate(rv, index); 9215c930c89SJeff Roberson vm_reserv_unlock(rv); 9225c930c89SJeff Roberson 923ec179322SAlan Cox return (&rv->pages[index]); 924f8a47341SAlan Cox } 925f8a47341SAlan Cox 926f8a47341SAlan Cox /* 927ada27a3bSKonstantin Belousov * Breaks the given reservation. All free pages in the reservation 928ada27a3bSKonstantin Belousov * are returned to the physical memory allocator. The reservation's 929ada27a3bSKonstantin Belousov * population count and map are reset to their initial state. 930ec179322SAlan Cox * 9313453bca8SAlan Cox * The given reservation must not be in the partially populated reservation 932fe6d5344SMark Johnston * queue. 933ec179322SAlan Cox */ 934ec179322SAlan Cox static void 935ada27a3bSKonstantin Belousov vm_reserv_break(vm_reserv_t rv) 936ec179322SAlan Cox { 937e67a5068SDoug Moore u_long changes; 938e67a5068SDoug Moore int bitpos, hi, i, lo; 939ec179322SAlan Cox 9405c930c89SJeff Roberson vm_reserv_assert_locked(rv); 9415c930c89SJeff Roberson CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 9425c930c89SJeff Roberson __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 943e2068d0bSJeff Roberson vm_reserv_remove(rv); 944c4be9169SKonstantin Belousov rv->pages->psind = 0; 945e67a5068SDoug Moore hi = lo = -1; 946e67a5068SDoug Moore for (i = 0; i <= NPOPMAP; i++) { 947e67a5068SDoug Moore /* 948e67a5068SDoug Moore * "changes" is a bitmask that marks where a new sequence of 949e67a5068SDoug Moore * 0s or 1s begins in popmap[i], with last bit in popmap[i-1] 950e67a5068SDoug Moore * considered to be 1 if and only if lo == hi. The bits of 951e67a5068SDoug Moore * popmap[-1] and popmap[NPOPMAP] are considered all 1s. 952e67a5068SDoug Moore */ 953ec179322SAlan Cox if (i == NPOPMAP) 954e67a5068SDoug Moore changes = lo != hi; 955e67a5068SDoug Moore else { 956e67a5068SDoug Moore changes = rv->popmap[i]; 957e67a5068SDoug Moore changes ^= (changes << 1) | (lo == hi); 958e67a5068SDoug Moore rv->popmap[i] = 0; 959ec179322SAlan Cox } 960e67a5068SDoug Moore while (changes != 0) { 961e67a5068SDoug Moore /* 962e67a5068SDoug Moore * If the next change marked begins a run of 0s, set 963e67a5068SDoug Moore * lo to mark that position. Otherwise set hi and 964e67a5068SDoug Moore * free pages from lo up to hi. 965e67a5068SDoug Moore */ 966e67a5068SDoug Moore bitpos = ffsl(changes) - 1; 967e67a5068SDoug Moore changes ^= 1UL << bitpos; 968e67a5068SDoug Moore if (lo == hi) 969e67a5068SDoug Moore lo = NBPOPMAP * i + bitpos; 970e67a5068SDoug Moore else { 971e67a5068SDoug Moore hi = NBPOPMAP * i + bitpos; 9725c930c89SJeff Roberson vm_domain_free_lock(VM_DOMAIN(rv->domain)); 973b8590daeSDoug Moore vm_phys_enqueue_contig(&rv->pages[lo], hi - lo); 9745c930c89SJeff Roberson vm_domain_free_unlock(VM_DOMAIN(rv->domain)); 975e67a5068SDoug Moore lo = hi; 976e67a5068SDoug Moore } 977e67a5068SDoug Moore } 978e67a5068SDoug Moore } 979e67a5068SDoug Moore rv->popcnt = 0; 9805c930c89SJeff Roberson counter_u64_add(vm_reserv_broken, 1); 981ec179322SAlan Cox } 982ec179322SAlan Cox 983ec179322SAlan Cox /* 984f8a47341SAlan Cox * Breaks all reservations belonging to the given object. 985f8a47341SAlan Cox */ 986f8a47341SAlan Cox void 987f8a47341SAlan Cox vm_reserv_break_all(vm_object_t object) 988f8a47341SAlan Cox { 989f8a47341SAlan Cox vm_reserv_t rv; 990f8a47341SAlan Cox 991e2068d0bSJeff Roberson /* 992e2068d0bSJeff Roberson * This access of object->rvq is unsynchronized so that the 993e2068d0bSJeff Roberson * object rvq lock can nest after the domain_free lock. We 994e2068d0bSJeff Roberson * must check for races in the results. However, the object 995e2068d0bSJeff Roberson * lock prevents new additions, so we are guaranteed that when 996e2068d0bSJeff Roberson * it returns NULL the object is properly empty. 997e2068d0bSJeff Roberson */ 998f8a47341SAlan Cox while ((rv = LIST_FIRST(&object->rvq)) != NULL) { 9995c930c89SJeff Roberson vm_reserv_lock(rv); 1000e2068d0bSJeff Roberson /* Reclaim race. */ 10015c930c89SJeff Roberson if (rv->object != object) { 10025c930c89SJeff Roberson vm_reserv_unlock(rv); 1003e2068d0bSJeff Roberson continue; 10045c930c89SJeff Roberson } 10055c930c89SJeff Roberson vm_reserv_domain_lock(rv->domain); 1006f8a47341SAlan Cox if (rv->inpartpopq) { 1007fe6d5344SMark Johnston TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); 1008f8a47341SAlan Cox rv->inpartpopq = FALSE; 1009f8a47341SAlan Cox } 10105c930c89SJeff Roberson vm_reserv_domain_unlock(rv->domain); 1011ada27a3bSKonstantin Belousov vm_reserv_break(rv); 10125c930c89SJeff Roberson vm_reserv_unlock(rv); 1013f8a47341SAlan Cox } 1014f8a47341SAlan Cox } 1015f8a47341SAlan Cox 1016f8a47341SAlan Cox /* 1017f8a47341SAlan Cox * Frees the given page if it belongs to a reservation. Returns TRUE if the 1018f8a47341SAlan Cox * page is freed and FALSE otherwise. 1019f8a47341SAlan Cox */ 1020f8a47341SAlan Cox boolean_t 1021f8a47341SAlan Cox vm_reserv_free_page(vm_page_t m) 1022f8a47341SAlan Cox { 1023f8a47341SAlan Cox vm_reserv_t rv; 10245c930c89SJeff Roberson boolean_t ret; 1025f8a47341SAlan Cox 1026f8a47341SAlan Cox rv = vm_reserv_from_page(m); 1027908e3da1SAlan Cox if (rv->object == NULL) 1028908e3da1SAlan Cox return (FALSE); 10295c930c89SJeff Roberson vm_reserv_lock(rv); 10305c930c89SJeff Roberson /* Re-validate after lock. */ 10315c930c89SJeff Roberson if (rv->object != NULL) { 1032ec179322SAlan Cox vm_reserv_depopulate(rv, m - rv->pages); 10335c930c89SJeff Roberson ret = TRUE; 10345c930c89SJeff Roberson } else 10355c930c89SJeff Roberson ret = FALSE; 10365c930c89SJeff Roberson vm_reserv_unlock(rv); 10375c930c89SJeff Roberson 10385c930c89SJeff Roberson return (ret); 1039f8a47341SAlan Cox } 1040f8a47341SAlan Cox 1041f8a47341SAlan Cox /* 1042f8a47341SAlan Cox * Initializes the reservation management system. Specifically, initializes 1043f8a47341SAlan Cox * the reservation array. 1044f8a47341SAlan Cox * 1045f8a47341SAlan Cox * Requires that vm_page_array and first_page are initialized! 1046f8a47341SAlan Cox */ 1047f8a47341SAlan Cox void 1048f8a47341SAlan Cox vm_reserv_init(void) 1049f8a47341SAlan Cox { 1050f8a47341SAlan Cox vm_paddr_t paddr; 105109e5f3c4SAlan Cox struct vm_phys_seg *seg; 10525c930c89SJeff Roberson struct vm_reserv *rv; 1053b378d296SMark Johnston struct vm_reserv_domain *rvd; 1054b378d296SMark Johnston int i, j, segind; 1055f8a47341SAlan Cox 1056f8a47341SAlan Cox /* 1057f8a47341SAlan Cox * Initialize the reservation array. Specifically, initialize the 1058f8a47341SAlan Cox * "pages" field for every element that has an underlying superpage. 1059f8a47341SAlan Cox */ 106009e5f3c4SAlan Cox for (segind = 0; segind < vm_phys_nsegs; segind++) { 106109e5f3c4SAlan Cox seg = &vm_phys_segs[segind]; 106209e5f3c4SAlan Cox paddr = roundup2(seg->start, VM_LEVEL_0_SIZE); 10636b821a74SAleksandr Rybalko while (paddr + VM_LEVEL_0_SIZE > paddr && paddr + 10646b821a74SAleksandr Rybalko VM_LEVEL_0_SIZE <= seg->end) { 10655c930c89SJeff Roberson rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT]; 10665c930c89SJeff Roberson rv->pages = PHYS_TO_VM_PAGE(paddr); 10675c930c89SJeff Roberson rv->domain = seg->domain; 10685c930c89SJeff Roberson mtx_init(&rv->lock, "vm reserv", NULL, MTX_DEF); 1069f8a47341SAlan Cox paddr += VM_LEVEL_0_SIZE; 1070f8a47341SAlan Cox } 1071f8a47341SAlan Cox } 10725c930c89SJeff Roberson for (i = 0; i < MAXMEMDOM; i++) { 1073b378d296SMark Johnston rvd = &vm_rvd[i]; 1074b378d296SMark Johnston mtx_init(&rvd->lock, "vm reserv domain", NULL, MTX_DEF); 1075b378d296SMark Johnston TAILQ_INIT(&rvd->partpop); 1076b378d296SMark Johnston mtx_init(&rvd->marker.lock, "vm reserv marker", NULL, MTX_DEF); 1077b378d296SMark Johnston 1078b378d296SMark Johnston /* 1079b378d296SMark Johnston * Fully populated reservations should never be present in the 1080b378d296SMark Johnston * partially populated reservation queues. 1081b378d296SMark Johnston */ 1082b378d296SMark Johnston rvd->marker.popcnt = VM_LEVEL_0_NPAGES; 1083b378d296SMark Johnston for (j = 0; j < NBPOPMAP; j++) 1084b378d296SMark Johnston popmap_set(rvd->marker.popmap, j); 1085f8a47341SAlan Cox } 1086f8a47341SAlan Cox 10875c930c89SJeff Roberson for (i = 0; i < VM_RESERV_OBJ_LOCK_COUNT; i++) 10885c930c89SJeff Roberson mtx_init(&vm_reserv_object_mtx[i], "resv obj lock", NULL, 10895c930c89SJeff Roberson MTX_DEF); 10905c930c89SJeff Roberson } 10915c930c89SJeff Roberson 1092f8a47341SAlan Cox /* 1093c869e672SAlan Cox * Returns true if the given page belongs to a reservation and that page is 1094c869e672SAlan Cox * free. Otherwise, returns false. 1095c869e672SAlan Cox */ 1096c869e672SAlan Cox bool 1097c869e672SAlan Cox vm_reserv_is_page_free(vm_page_t m) 1098c869e672SAlan Cox { 1099c869e672SAlan Cox vm_reserv_t rv; 1100c869e672SAlan Cox 1101c869e672SAlan Cox rv = vm_reserv_from_page(m); 1102c869e672SAlan Cox if (rv->object == NULL) 1103c869e672SAlan Cox return (false); 1104c869e672SAlan Cox return (popmap_is_clear(rv->popmap, m - rv->pages)); 1105c869e672SAlan Cox } 1106c869e672SAlan Cox 1107c869e672SAlan Cox /* 1108c869e672SAlan Cox * If the given page belongs to a reservation, returns the level of that 1109c869e672SAlan Cox * reservation. Otherwise, returns -1. 1110c869e672SAlan Cox */ 1111c869e672SAlan Cox int 1112c869e672SAlan Cox vm_reserv_level(vm_page_t m) 1113c869e672SAlan Cox { 1114c869e672SAlan Cox vm_reserv_t rv; 1115c869e672SAlan Cox 1116c869e672SAlan Cox rv = vm_reserv_from_page(m); 1117c869e672SAlan Cox return (rv->object != NULL ? 0 : -1); 1118c869e672SAlan Cox } 1119c869e672SAlan Cox 1120c869e672SAlan Cox /* 11213453bca8SAlan Cox * Returns a reservation level if the given page belongs to a fully populated 1122f8a47341SAlan Cox * reservation and -1 otherwise. 1123f8a47341SAlan Cox */ 1124f8a47341SAlan Cox int 1125f8a47341SAlan Cox vm_reserv_level_iffullpop(vm_page_t m) 1126f8a47341SAlan Cox { 1127f8a47341SAlan Cox vm_reserv_t rv; 1128f8a47341SAlan Cox 1129f8a47341SAlan Cox rv = vm_reserv_from_page(m); 1130f8a47341SAlan Cox return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1); 1131f8a47341SAlan Cox } 1132f8a47341SAlan Cox 1133f8a47341SAlan Cox /* 1134b378d296SMark Johnston * Remove a partially populated reservation from the queue. 1135b378d296SMark Johnston */ 1136b378d296SMark Johnston static void 1137b378d296SMark Johnston vm_reserv_dequeue(vm_reserv_t rv) 1138b378d296SMark Johnston { 1139b378d296SMark Johnston 1140b378d296SMark Johnston vm_reserv_domain_assert_locked(rv->domain); 1141b378d296SMark Johnston vm_reserv_assert_locked(rv); 1142b378d296SMark Johnston CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 1143b378d296SMark Johnston __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 1144b378d296SMark Johnston KASSERT(rv->inpartpopq, 1145b378d296SMark Johnston ("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv)); 1146b378d296SMark Johnston 1147b378d296SMark Johnston TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); 1148b378d296SMark Johnston rv->inpartpopq = FALSE; 1149b378d296SMark Johnston } 1150b378d296SMark Johnston 1151b378d296SMark Johnston /* 11523453bca8SAlan Cox * Breaks the given partially populated reservation, releasing its free pages 11533453bca8SAlan Cox * to the physical memory allocator. 1154f8a47341SAlan Cox */ 115544aab2c3SAlan Cox static void 115644aab2c3SAlan Cox vm_reserv_reclaim(vm_reserv_t rv) 1157f8a47341SAlan Cox { 1158f8a47341SAlan Cox 11595c930c89SJeff Roberson vm_reserv_assert_locked(rv); 11605c930c89SJeff Roberson CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 11615c930c89SJeff Roberson __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 1162b378d296SMark Johnston if (rv->inpartpopq) { 11635c930c89SJeff Roberson vm_reserv_domain_lock(rv->domain); 1164b378d296SMark Johnston vm_reserv_dequeue(rv); 11655c930c89SJeff Roberson vm_reserv_domain_unlock(rv->domain); 1166b378d296SMark Johnston } 1167ada27a3bSKonstantin Belousov vm_reserv_break(rv); 11685c930c89SJeff Roberson counter_u64_add(vm_reserv_reclaimed, 1); 116944aab2c3SAlan Cox } 117044aab2c3SAlan Cox 117144aab2c3SAlan Cox /* 1172b378d296SMark Johnston * Breaks a reservation near the head of the partially populated reservation 11733453bca8SAlan Cox * queue, releasing its free pages to the physical memory allocator. Returns 11743453bca8SAlan Cox * TRUE if a reservation is broken and FALSE otherwise. 117544aab2c3SAlan Cox */ 1176b378d296SMark Johnston bool 1177ef435ae7SJeff Roberson vm_reserv_reclaim_inactive(int domain) 117844aab2c3SAlan Cox { 117944aab2c3SAlan Cox vm_reserv_t rv; 118044aab2c3SAlan Cox 1181b378d296SMark Johnston vm_reserv_domain_lock(domain); 1182b378d296SMark Johnston TAILQ_FOREACH(rv, &vm_rvd[domain].partpop, partpopq) { 1183b378d296SMark Johnston /* 1184b378d296SMark Johnston * A locked reservation is likely being updated or reclaimed, 1185b378d296SMark Johnston * so just skip ahead. 1186b378d296SMark Johnston */ 1187b378d296SMark Johnston if (rv != &vm_rvd[domain].marker && vm_reserv_trylock(rv)) { 1188b378d296SMark Johnston vm_reserv_dequeue(rv); 1189b378d296SMark Johnston break; 11905c930c89SJeff Roberson } 1191b378d296SMark Johnston } 1192b378d296SMark Johnston vm_reserv_domain_unlock(domain); 1193b378d296SMark Johnston if (rv != NULL) { 119444aab2c3SAlan Cox vm_reserv_reclaim(rv); 11955c930c89SJeff Roberson vm_reserv_unlock(rv); 1196b378d296SMark Johnston return (true); 1197f8a47341SAlan Cox } 1198b378d296SMark Johnston return (false); 1199f8a47341SAlan Cox } 1200f8a47341SAlan Cox 1201f8a47341SAlan Cox /* 1202f96e8a0bSDoug Moore * Determine whether this reservation has free pages that satisfy the given 1203f96e8a0bSDoug Moore * request for contiguous physical memory. Start searching from the lower 1204f96e8a0bSDoug Moore * bound, defined by low_index. 1205f96e8a0bSDoug Moore */ 1206f96e8a0bSDoug Moore static bool 1207f96e8a0bSDoug Moore vm_reserv_test_contig(vm_reserv_t rv, u_long npages, vm_paddr_t low, 1208f96e8a0bSDoug Moore vm_paddr_t high, u_long alignment, vm_paddr_t boundary) 1209f96e8a0bSDoug Moore { 1210f96e8a0bSDoug Moore vm_paddr_t pa, size; 1211f96e8a0bSDoug Moore u_long changes; 1212f96e8a0bSDoug Moore int bitpos, bits_left, i, hi, lo, n; 1213f96e8a0bSDoug Moore 1214f96e8a0bSDoug Moore vm_reserv_assert_locked(rv); 1215f96e8a0bSDoug Moore size = npages << PAGE_SHIFT; 1216f96e8a0bSDoug Moore pa = VM_PAGE_TO_PHYS(&rv->pages[0]); 1217f96e8a0bSDoug Moore lo = (pa < low) ? 1218f96e8a0bSDoug Moore ((low + PAGE_MASK - pa) >> PAGE_SHIFT) : 0; 1219f96e8a0bSDoug Moore i = lo / NBPOPMAP; 1220f96e8a0bSDoug Moore changes = rv->popmap[i] | ((1UL << (lo % NBPOPMAP)) - 1); 1221f96e8a0bSDoug Moore hi = (pa + VM_LEVEL_0_SIZE > high) ? 1222f96e8a0bSDoug Moore ((high + PAGE_MASK - pa) >> PAGE_SHIFT) : VM_LEVEL_0_NPAGES; 1223f96e8a0bSDoug Moore n = hi / NBPOPMAP; 1224f96e8a0bSDoug Moore bits_left = hi % NBPOPMAP; 1225f96e8a0bSDoug Moore hi = lo = -1; 1226f96e8a0bSDoug Moore for (;;) { 1227f96e8a0bSDoug Moore /* 1228f96e8a0bSDoug Moore * "changes" is a bitmask that marks where a new sequence of 1229f96e8a0bSDoug Moore * 0s or 1s begins in popmap[i], with last bit in popmap[i-1] 1230f96e8a0bSDoug Moore * considered to be 1 if and only if lo == hi. The bits of 1231f96e8a0bSDoug Moore * popmap[-1] and popmap[NPOPMAP] are considered all 1s. 1232f96e8a0bSDoug Moore */ 1233f96e8a0bSDoug Moore changes ^= (changes << 1) | (lo == hi); 1234f96e8a0bSDoug Moore while (changes != 0) { 1235f96e8a0bSDoug Moore /* 1236f96e8a0bSDoug Moore * If the next change marked begins a run of 0s, set 1237f96e8a0bSDoug Moore * lo to mark that position. Otherwise set hi and 1238f96e8a0bSDoug Moore * look for a satisfactory first page from lo up to hi. 1239f96e8a0bSDoug Moore */ 1240f96e8a0bSDoug Moore bitpos = ffsl(changes) - 1; 1241f96e8a0bSDoug Moore changes ^= 1UL << bitpos; 1242f96e8a0bSDoug Moore if (lo == hi) { 1243f96e8a0bSDoug Moore lo = NBPOPMAP * i + bitpos; 1244f96e8a0bSDoug Moore continue; 1245f96e8a0bSDoug Moore } 1246f96e8a0bSDoug Moore hi = NBPOPMAP * i + bitpos; 1247f96e8a0bSDoug Moore pa = VM_PAGE_TO_PHYS(&rv->pages[lo]); 1248f96e8a0bSDoug Moore if ((pa & (alignment - 1)) != 0) { 1249f96e8a0bSDoug Moore /* Skip to next aligned page. */ 1250f96e8a0bSDoug Moore lo += (((pa - 1) | (alignment - 1)) + 1) >> 1251f96e8a0bSDoug Moore PAGE_SHIFT; 1252f96e8a0bSDoug Moore if (lo >= VM_LEVEL_0_NPAGES) 1253f96e8a0bSDoug Moore return (false); 1254f96e8a0bSDoug Moore pa = VM_PAGE_TO_PHYS(&rv->pages[lo]); 1255f96e8a0bSDoug Moore } 1256f96e8a0bSDoug Moore if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0) { 1257f96e8a0bSDoug Moore /* Skip to next boundary-matching page. */ 1258f96e8a0bSDoug Moore lo += (((pa - 1) | (boundary - 1)) + 1) >> 1259f96e8a0bSDoug Moore PAGE_SHIFT; 1260f96e8a0bSDoug Moore if (lo >= VM_LEVEL_0_NPAGES) 1261f96e8a0bSDoug Moore return (false); 1262f96e8a0bSDoug Moore pa = VM_PAGE_TO_PHYS(&rv->pages[lo]); 1263f96e8a0bSDoug Moore } 1264f96e8a0bSDoug Moore if (lo * PAGE_SIZE + size <= hi * PAGE_SIZE) 1265f96e8a0bSDoug Moore return (true); 1266f96e8a0bSDoug Moore lo = hi; 1267f96e8a0bSDoug Moore } 1268f96e8a0bSDoug Moore if (++i < n) 1269f96e8a0bSDoug Moore changes = rv->popmap[i]; 1270f96e8a0bSDoug Moore else if (i == n) 1271f96e8a0bSDoug Moore changes = bits_left == 0 ? -1UL : 1272f96e8a0bSDoug Moore (rv->popmap[n] | (-1UL << bits_left)); 1273f96e8a0bSDoug Moore else 1274f96e8a0bSDoug Moore return (false); 1275f96e8a0bSDoug Moore } 1276f96e8a0bSDoug Moore } 1277f96e8a0bSDoug Moore 1278f96e8a0bSDoug Moore /* 12793453bca8SAlan Cox * Searches the partially populated reservation queue for the least recently 12803453bca8SAlan Cox * changed reservation with free pages that satisfy the given request for 12813453bca8SAlan Cox * contiguous physical memory. If a satisfactory reservation is found, it is 1282f96e8a0bSDoug Moore * broken. Returns true if a reservation is broken and false otherwise. 128344aab2c3SAlan Cox */ 1284b378d296SMark Johnston bool 1285ef435ae7SJeff Roberson vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low, 1286ef435ae7SJeff Roberson vm_paddr_t high, u_long alignment, vm_paddr_t boundary) 128744aab2c3SAlan Cox { 1288b378d296SMark Johnston struct vm_reserv_queue *queue; 1289ec179322SAlan Cox vm_paddr_t pa, size; 1290b378d296SMark Johnston vm_reserv_t marker, rv, rvn; 129144aab2c3SAlan Cox 1292c68c3537SAlan Cox if (npages > VM_LEVEL_0_NPAGES - 1) 1293f96e8a0bSDoug Moore return (false); 1294b378d296SMark Johnston marker = &vm_rvd[domain].marker; 1295b378d296SMark Johnston queue = &vm_rvd[domain].partpop; 1296c68c3537SAlan Cox size = npages << PAGE_SHIFT; 1297b378d296SMark Johnston 1298b378d296SMark Johnston vm_reserv_domain_scan_lock(domain); 12995c930c89SJeff Roberson vm_reserv_domain_lock(domain); 1300b378d296SMark Johnston TAILQ_FOREACH_SAFE(rv, queue, partpopq, rvn) { 1301f96e8a0bSDoug Moore pa = VM_PAGE_TO_PHYS(&rv->pages[0]); 1302f96e8a0bSDoug Moore if (pa + VM_LEVEL_0_SIZE - size < low) { 1303ec179322SAlan Cox /* This entire reservation is too low; go to next. */ 130444aab2c3SAlan Cox continue; 130544aab2c3SAlan Cox } 130644aab2c3SAlan Cox if (pa + size > high) { 1307ec179322SAlan Cox /* This entire reservation is too high; go to next. */ 1308ec179322SAlan Cox continue; 130985f2a0c9SMax Laier } 1310b378d296SMark Johnston 13115c930c89SJeff Roberson if (vm_reserv_trylock(rv) == 0) { 1312b378d296SMark Johnston TAILQ_INSERT_AFTER(queue, rv, marker, partpopq); 13135c930c89SJeff Roberson vm_reserv_domain_unlock(domain); 13145c930c89SJeff Roberson vm_reserv_lock(rv); 1315b378d296SMark Johnston if (!rv->inpartpopq || 1316b378d296SMark Johnston TAILQ_NEXT(rv, partpopq) != marker) { 1317b378d296SMark Johnston vm_reserv_unlock(rv); 13185c930c89SJeff Roberson vm_reserv_domain_lock(domain); 1319b378d296SMark Johnston rvn = TAILQ_NEXT(marker, partpopq); 1320b378d296SMark Johnston TAILQ_REMOVE(queue, marker, partpopq); 13215c930c89SJeff Roberson continue; 13225c930c89SJeff Roberson } 1323b378d296SMark Johnston vm_reserv_domain_lock(domain); 1324b378d296SMark Johnston TAILQ_REMOVE(queue, marker, partpopq); 1325b378d296SMark Johnston } 13265c930c89SJeff Roberson vm_reserv_domain_unlock(domain); 1327f96e8a0bSDoug Moore if (vm_reserv_test_contig(rv, npages, low, high, 1328f96e8a0bSDoug Moore alignment, boundary)) { 1329b378d296SMark Johnston vm_reserv_domain_scan_unlock(domain); 133044aab2c3SAlan Cox vm_reserv_reclaim(rv); 13315c930c89SJeff Roberson vm_reserv_unlock(rv); 1332f96e8a0bSDoug Moore return (true); 133344aab2c3SAlan Cox } 13345c930c89SJeff Roberson vm_reserv_unlock(rv); 13355c930c89SJeff Roberson vm_reserv_domain_lock(domain); 133644aab2c3SAlan Cox } 13375c930c89SJeff Roberson vm_reserv_domain_unlock(domain); 1338b378d296SMark Johnston vm_reserv_domain_scan_unlock(domain); 1339f96e8a0bSDoug Moore return (false); 134044aab2c3SAlan Cox } 134144aab2c3SAlan Cox 134244aab2c3SAlan Cox /* 1343f8a47341SAlan Cox * Transfers the reservation underlying the given page to a new object. 1344f8a47341SAlan Cox * 1345f8a47341SAlan Cox * The object must be locked. 1346f8a47341SAlan Cox */ 1347f8a47341SAlan Cox void 1348f8a47341SAlan Cox vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object, 1349f8a47341SAlan Cox vm_pindex_t old_object_offset) 1350f8a47341SAlan Cox { 1351f8a47341SAlan Cox vm_reserv_t rv; 1352f8a47341SAlan Cox 135389f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(new_object); 1354f8a47341SAlan Cox rv = vm_reserv_from_page(m); 1355f8a47341SAlan Cox if (rv->object == old_object) { 13565c930c89SJeff Roberson vm_reserv_lock(rv); 13575c930c89SJeff Roberson CTR6(KTR_VM, 13585c930c89SJeff Roberson "%s: rv %p object %p new %p popcnt %d inpartpop %d", 13595c930c89SJeff Roberson __FUNCTION__, rv, rv->object, new_object, rv->popcnt, 13605c930c89SJeff Roberson rv->inpartpopq); 1361f8a47341SAlan Cox if (rv->object == old_object) { 1362e2068d0bSJeff Roberson vm_reserv_object_lock(old_object); 1363e2068d0bSJeff Roberson rv->object = NULL; 1364f8a47341SAlan Cox LIST_REMOVE(rv, objq); 1365e2068d0bSJeff Roberson vm_reserv_object_unlock(old_object); 1366e2068d0bSJeff Roberson vm_reserv_object_lock(new_object); 1367f8a47341SAlan Cox rv->object = new_object; 1368f8a47341SAlan Cox rv->pindex -= old_object_offset; 1369e2068d0bSJeff Roberson LIST_INSERT_HEAD(&new_object->rvq, rv, objq); 1370e2068d0bSJeff Roberson vm_reserv_object_unlock(new_object); 1371f8a47341SAlan Cox } 13725c930c89SJeff Roberson vm_reserv_unlock(rv); 1373f8a47341SAlan Cox } 1374f8a47341SAlan Cox } 1375f8a47341SAlan Cox 1376f8a47341SAlan Cox /* 1377c869e672SAlan Cox * Returns the size (in bytes) of a reservation of the specified level. 1378c869e672SAlan Cox */ 1379c869e672SAlan Cox int 1380c869e672SAlan Cox vm_reserv_size(int level) 1381c869e672SAlan Cox { 1382c869e672SAlan Cox 1383c869e672SAlan Cox switch (level) { 1384c869e672SAlan Cox case 0: 1385c869e672SAlan Cox return (VM_LEVEL_0_SIZE); 1386c869e672SAlan Cox case -1: 1387c869e672SAlan Cox return (PAGE_SIZE); 1388c869e672SAlan Cox default: 1389c869e672SAlan Cox return (0); 1390c869e672SAlan Cox } 1391c869e672SAlan Cox } 1392c869e672SAlan Cox 1393c869e672SAlan Cox /* 1394f8a47341SAlan Cox * Allocates the virtual and physical memory required by the reservation 1395f8a47341SAlan Cox * management system's data structures, in particular, the reservation array. 1396f8a47341SAlan Cox */ 1397f8a47341SAlan Cox vm_paddr_t 13983e5e1b51SJeff Roberson vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end) 1399f8a47341SAlan Cox { 14003e5e1b51SJeff Roberson vm_paddr_t new_end, high_water; 1401f8a47341SAlan Cox size_t size; 14023e5e1b51SJeff Roberson int i; 14033e5e1b51SJeff Roberson 14043e5e1b51SJeff Roberson high_water = phys_avail[1]; 14053e5e1b51SJeff Roberson for (i = 0; i < vm_phys_nsegs; i++) { 14063e5e1b51SJeff Roberson if (vm_phys_segs[i].end > high_water) 14073e5e1b51SJeff Roberson high_water = vm_phys_segs[i].end; 14083e5e1b51SJeff Roberson } 14093e5e1b51SJeff Roberson 14103e5e1b51SJeff Roberson /* Skip the first chunk. It is already accounted for. */ 14113e5e1b51SJeff Roberson for (i = 2; phys_avail[i + 1] != 0; i += 2) { 14123e5e1b51SJeff Roberson if (phys_avail[i + 1] > high_water) 14133e5e1b51SJeff Roberson high_water = phys_avail[i + 1]; 14143e5e1b51SJeff Roberson } 1415f8a47341SAlan Cox 1416f8a47341SAlan Cox /* 1417f8a47341SAlan Cox * Calculate the size (in bytes) of the reservation array. Round up 1418f8a47341SAlan Cox * from "high_water" because every small page is mapped to an element 1419f8a47341SAlan Cox * in the reservation array based on its physical address. Thus, the 1420f8a47341SAlan Cox * number of elements in the reservation array can be greater than the 1421f8a47341SAlan Cox * number of superpages. 1422f8a47341SAlan Cox */ 1423f8a47341SAlan Cox size = howmany(high_water, VM_LEVEL_0_SIZE) * sizeof(struct vm_reserv); 1424f8a47341SAlan Cox 1425f8a47341SAlan Cox /* 1426f8a47341SAlan Cox * Allocate and map the physical memory for the reservation array. The 1427f8a47341SAlan Cox * next available virtual address is returned by reference. 1428f8a47341SAlan Cox */ 1429f8a47341SAlan Cox new_end = end - round_page(size); 1430f8a47341SAlan Cox vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end, 1431f8a47341SAlan Cox VM_PROT_READ | VM_PROT_WRITE); 1432f8a47341SAlan Cox bzero(vm_reserv_array, size); 1433f8a47341SAlan Cox 1434f8a47341SAlan Cox /* 1435f8a47341SAlan Cox * Return the next available physical address. 1436f8a47341SAlan Cox */ 1437f8a47341SAlan Cox return (new_end); 1438f8a47341SAlan Cox } 1439f8a47341SAlan Cox 14408b5e1472SAlan Cox /* 14415c930c89SJeff Roberson * Initializes the reservation management system. Specifically, initializes 14425c930c89SJeff Roberson * the reservation counters. 14435c930c89SJeff Roberson */ 14445c930c89SJeff Roberson static void 14455c930c89SJeff Roberson vm_reserv_counter_init(void *unused) 14465c930c89SJeff Roberson { 14475c930c89SJeff Roberson 14485c930c89SJeff Roberson vm_reserv_freed = counter_u64_alloc(M_WAITOK); 14495c930c89SJeff Roberson vm_reserv_broken = counter_u64_alloc(M_WAITOK); 14505c930c89SJeff Roberson vm_reserv_reclaimed = counter_u64_alloc(M_WAITOK); 14515c930c89SJeff Roberson } 14525c930c89SJeff Roberson SYSINIT(vm_reserv_counter_init, SI_SUB_CPU, SI_ORDER_ANY, 14535c930c89SJeff Roberson vm_reserv_counter_init, NULL); 14545c930c89SJeff Roberson 14555c930c89SJeff Roberson /* 14568b5e1472SAlan Cox * Returns the superpage containing the given page. 14578b5e1472SAlan Cox */ 14588b5e1472SAlan Cox vm_page_t 14598b5e1472SAlan Cox vm_reserv_to_superpage(vm_page_t m) 14608b5e1472SAlan Cox { 14618b5e1472SAlan Cox vm_reserv_t rv; 14628b5e1472SAlan Cox 14638b5e1472SAlan Cox VM_OBJECT_ASSERT_LOCKED(m->object); 14648b5e1472SAlan Cox rv = vm_reserv_from_page(m); 14655c930c89SJeff Roberson if (rv->object == m->object && rv->popcnt == VM_LEVEL_0_NPAGES) 14665c930c89SJeff Roberson m = rv->pages; 14675c930c89SJeff Roberson else 14685c930c89SJeff Roberson m = NULL; 14695c930c89SJeff Roberson 14705c930c89SJeff Roberson return (m); 14718b5e1472SAlan Cox } 14728b5e1472SAlan Cox 1473f8a47341SAlan Cox #endif /* VM_NRESERVLEVEL > 0 */ 1474