1f8a47341SAlan Cox /*- 2fe267a55SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3fe267a55SPedro F. Giffuni * 4f8a47341SAlan Cox * Copyright (c) 2002-2006 Rice University 5ec179322SAlan Cox * Copyright (c) 2007-2011 Alan L. Cox <alc@cs.rice.edu> 6f8a47341SAlan Cox * All rights reserved. 7f8a47341SAlan Cox * 8f8a47341SAlan Cox * This software was developed for the FreeBSD Project by Alan L. Cox, 9f8a47341SAlan Cox * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro. 10f8a47341SAlan Cox * 11f8a47341SAlan Cox * Redistribution and use in source and binary forms, with or without 12f8a47341SAlan Cox * modification, are permitted provided that the following conditions 13f8a47341SAlan Cox * are met: 14f8a47341SAlan Cox * 1. Redistributions of source code must retain the above copyright 15f8a47341SAlan Cox * notice, this list of conditions and the following disclaimer. 16f8a47341SAlan Cox * 2. Redistributions in binary form must reproduce the above copyright 17f8a47341SAlan Cox * notice, this list of conditions and the following disclaimer in the 18f8a47341SAlan Cox * documentation and/or other materials provided with the distribution. 19f8a47341SAlan Cox * 20f8a47341SAlan Cox * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21f8a47341SAlan Cox * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22f8a47341SAlan Cox * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23f8a47341SAlan Cox * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24f8a47341SAlan Cox * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25f8a47341SAlan Cox * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26f8a47341SAlan Cox * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 27f8a47341SAlan Cox * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28f8a47341SAlan Cox * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29f8a47341SAlan Cox * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY 30f8a47341SAlan Cox * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31f8a47341SAlan Cox * POSSIBILITY OF SUCH DAMAGE. 32f8a47341SAlan Cox */ 33f8a47341SAlan Cox 34f8a47341SAlan Cox /* 35f8a47341SAlan Cox * Superpage reservation management module 36c68c3537SAlan Cox * 37c68c3537SAlan Cox * Any external functions defined by this module are only to be used by the 38c68c3537SAlan Cox * virtual memory system. 39f8a47341SAlan Cox */ 40f8a47341SAlan Cox 41f8a47341SAlan Cox #include <sys/cdefs.h> 42f8a47341SAlan Cox __FBSDID("$FreeBSD$"); 43f8a47341SAlan Cox 44f8a47341SAlan Cox #include "opt_vm.h" 45f8a47341SAlan Cox 46f8a47341SAlan Cox #include <sys/param.h> 47f8a47341SAlan Cox #include <sys/kernel.h> 48f8a47341SAlan Cox #include <sys/lock.h> 49f8a47341SAlan Cox #include <sys/malloc.h> 50f8a47341SAlan Cox #include <sys/mutex.h> 51f8a47341SAlan Cox #include <sys/queue.h> 5289f6b863SAttilio Rao #include <sys/rwlock.h> 53f8a47341SAlan Cox #include <sys/sbuf.h> 54f8a47341SAlan Cox #include <sys/sysctl.h> 55f8a47341SAlan Cox #include <sys/systm.h> 5672346b22SCy Schubert #include <sys/counter.h> 5772346b22SCy Schubert #include <sys/ktr.h> 589ed01c32SGleb Smirnoff #include <sys/vmmeter.h> 595c930c89SJeff Roberson #include <sys/smp.h> 60f8a47341SAlan Cox 61f8a47341SAlan Cox #include <vm/vm.h> 62f8a47341SAlan Cox #include <vm/vm_param.h> 63f8a47341SAlan Cox #include <vm/vm_object.h> 64f8a47341SAlan Cox #include <vm/vm_page.h> 65e2068d0bSJeff Roberson #include <vm/vm_pageout.h> 66e2068d0bSJeff Roberson #include <vm/vm_pagequeue.h> 67*431fb8abSMark Johnston #include <vm/vm_phys.h> 68774d251dSAttilio Rao #include <vm/vm_radix.h> 69f8a47341SAlan Cox #include <vm/vm_reserv.h> 70f8a47341SAlan Cox 71f8a47341SAlan Cox /* 72f8a47341SAlan Cox * The reservation system supports the speculative allocation of large physical 733453bca8SAlan Cox * pages ("superpages"). Speculative allocation enables the fully automatic 74f8a47341SAlan Cox * utilization of superpages by the virtual memory system. In other words, no 75f8a47341SAlan Cox * programmatic directives are required to use superpages. 76f8a47341SAlan Cox */ 77f8a47341SAlan Cox 78f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 79f8a47341SAlan Cox 80f2a496d6SKonstantin Belousov #ifndef VM_LEVEL_0_ORDER_MAX 81f2a496d6SKonstantin Belousov #define VM_LEVEL_0_ORDER_MAX VM_LEVEL_0_ORDER 82f2a496d6SKonstantin Belousov #endif 83f2a496d6SKonstantin Belousov 84f8a47341SAlan Cox /* 85f8a47341SAlan Cox * The number of small pages that are contained in a level 0 reservation 86f8a47341SAlan Cox */ 87f8a47341SAlan Cox #define VM_LEVEL_0_NPAGES (1 << VM_LEVEL_0_ORDER) 88f2a496d6SKonstantin Belousov #define VM_LEVEL_0_NPAGES_MAX (1 << VM_LEVEL_0_ORDER_MAX) 89f8a47341SAlan Cox 90f8a47341SAlan Cox /* 91f8a47341SAlan Cox * The number of bits by which a physical address is shifted to obtain the 92f8a47341SAlan Cox * reservation number 93f8a47341SAlan Cox */ 94f8a47341SAlan Cox #define VM_LEVEL_0_SHIFT (VM_LEVEL_0_ORDER + PAGE_SHIFT) 95f8a47341SAlan Cox 96f8a47341SAlan Cox /* 97f8a47341SAlan Cox * The size of a level 0 reservation in bytes 98f8a47341SAlan Cox */ 99f8a47341SAlan Cox #define VM_LEVEL_0_SIZE (1 << VM_LEVEL_0_SHIFT) 100f8a47341SAlan Cox 101f8a47341SAlan Cox /* 102f8a47341SAlan Cox * Computes the index of the small page underlying the given (object, pindex) 103f8a47341SAlan Cox * within the reservation's array of small pages. 104f8a47341SAlan Cox */ 105f8a47341SAlan Cox #define VM_RESERV_INDEX(object, pindex) \ 106f8a47341SAlan Cox (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1)) 107f8a47341SAlan Cox 108f8a47341SAlan Cox /* 109ec179322SAlan Cox * The size of a population map entry 110ec179322SAlan Cox */ 111ec179322SAlan Cox typedef u_long popmap_t; 112ec179322SAlan Cox 113ec179322SAlan Cox /* 114ec179322SAlan Cox * The number of bits in a population map entry 115ec179322SAlan Cox */ 116ec179322SAlan Cox #define NBPOPMAP (NBBY * sizeof(popmap_t)) 117ec179322SAlan Cox 118ec179322SAlan Cox /* 119ec179322SAlan Cox * The number of population map entries in a reservation 120ec179322SAlan Cox */ 121ec179322SAlan Cox #define NPOPMAP howmany(VM_LEVEL_0_NPAGES, NBPOPMAP) 122f2a496d6SKonstantin Belousov #define NPOPMAP_MAX howmany(VM_LEVEL_0_NPAGES_MAX, NBPOPMAP) 123ec179322SAlan Cox 124ec179322SAlan Cox /* 1252ef6727eSJeff Roberson * Number of elapsed ticks before we update the LRU queue position. Used 1262ef6727eSJeff Roberson * to reduce contention and churn on the list. 1272ef6727eSJeff Roberson */ 1282ef6727eSJeff Roberson #define PARTPOPSLOP 1 1292ef6727eSJeff Roberson 1302ef6727eSJeff Roberson /* 1313180f757SAlan Cox * Clear a bit in the population map. 1323180f757SAlan Cox */ 1333180f757SAlan Cox static __inline void 1343180f757SAlan Cox popmap_clear(popmap_t popmap[], int i) 1353180f757SAlan Cox { 1363180f757SAlan Cox 1373180f757SAlan Cox popmap[i / NBPOPMAP] &= ~(1UL << (i % NBPOPMAP)); 1383180f757SAlan Cox } 1393180f757SAlan Cox 1403180f757SAlan Cox /* 1413180f757SAlan Cox * Set a bit in the population map. 1423180f757SAlan Cox */ 1433180f757SAlan Cox static __inline void 1443180f757SAlan Cox popmap_set(popmap_t popmap[], int i) 1453180f757SAlan Cox { 1463180f757SAlan Cox 1473180f757SAlan Cox popmap[i / NBPOPMAP] |= 1UL << (i % NBPOPMAP); 1483180f757SAlan Cox } 1493180f757SAlan Cox 1503180f757SAlan Cox /* 1513180f757SAlan Cox * Is a bit in the population map clear? 1523180f757SAlan Cox */ 1533180f757SAlan Cox static __inline boolean_t 1543180f757SAlan Cox popmap_is_clear(popmap_t popmap[], int i) 1553180f757SAlan Cox { 1563180f757SAlan Cox 1573180f757SAlan Cox return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) == 0); 1583180f757SAlan Cox } 1593180f757SAlan Cox 1603180f757SAlan Cox /* 1613180f757SAlan Cox * Is a bit in the population map set? 1623180f757SAlan Cox */ 1633180f757SAlan Cox static __inline boolean_t 1643180f757SAlan Cox popmap_is_set(popmap_t popmap[], int i) 1653180f757SAlan Cox { 1663180f757SAlan Cox 1673180f757SAlan Cox return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) != 0); 1683180f757SAlan Cox } 1693180f757SAlan Cox 1703180f757SAlan Cox /* 171f8a47341SAlan Cox * The reservation structure 172f8a47341SAlan Cox * 173f8a47341SAlan Cox * A reservation structure is constructed whenever a large physical page is 174f8a47341SAlan Cox * speculatively allocated to an object. The reservation provides the small 175f8a47341SAlan Cox * physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets 176f8a47341SAlan Cox * within that object. The reservation's "popcnt" tracks the number of these 177f8a47341SAlan Cox * small physical pages that are in use at any given time. When and if the 1783453bca8SAlan Cox * reservation is not fully utilized, it appears in the queue of partially 179f8a47341SAlan Cox * populated reservations. The reservation always appears on the containing 180f8a47341SAlan Cox * object's list of reservations. 181f8a47341SAlan Cox * 1823453bca8SAlan Cox * A partially populated reservation can be broken and reclaimed at any time. 183e2068d0bSJeff Roberson * 184b378d296SMark Johnston * c - constant after boot 1855c930c89SJeff Roberson * d - vm_reserv_domain_lock 186e2068d0bSJeff Roberson * o - vm_reserv_object_lock 187b378d296SMark Johnston * r - vm_reserv_lock 188b378d296SMark Johnston * s - vm_reserv_domain_scan_lock 189f8a47341SAlan Cox */ 190f8a47341SAlan Cox struct vm_reserv { 1915c930c89SJeff Roberson struct mtx lock; /* reservation lock. */ 192fe6d5344SMark Johnston TAILQ_ENTRY(vm_reserv) partpopq; /* (d, r) per-domain queue. */ 1935c930c89SJeff Roberson LIST_ENTRY(vm_reserv) objq; /* (o, r) object queue */ 1945c930c89SJeff Roberson vm_object_t object; /* (o, r) containing object */ 1955c930c89SJeff Roberson vm_pindex_t pindex; /* (o, r) offset in object */ 196e2068d0bSJeff Roberson vm_page_t pages; /* (c) first page */ 1975c930c89SJeff Roberson uint16_t popcnt; /* (r) # of pages in use */ 198fe6d5344SMark Johnston uint8_t domain; /* (c) NUMA domain. */ 199fe6d5344SMark Johnston char inpartpopq; /* (d, r) */ 2002ef6727eSJeff Roberson int lasttick; /* (r) last pop update tick. */ 201f2a496d6SKonstantin Belousov popmap_t popmap[NPOPMAP_MAX]; /* (r) bit vector, used pages */ 202f8a47341SAlan Cox }; 203f8a47341SAlan Cox 204b378d296SMark Johnston TAILQ_HEAD(vm_reserv_queue, vm_reserv); 205b378d296SMark Johnston 2065c930c89SJeff Roberson #define vm_reserv_lockptr(rv) (&(rv)->lock) 2075c930c89SJeff Roberson #define vm_reserv_assert_locked(rv) \ 2085c930c89SJeff Roberson mtx_assert(vm_reserv_lockptr(rv), MA_OWNED) 2095c930c89SJeff Roberson #define vm_reserv_lock(rv) mtx_lock(vm_reserv_lockptr(rv)) 2105c930c89SJeff Roberson #define vm_reserv_trylock(rv) mtx_trylock(vm_reserv_lockptr(rv)) 2115c930c89SJeff Roberson #define vm_reserv_unlock(rv) mtx_unlock(vm_reserv_lockptr(rv)) 2125c930c89SJeff Roberson 213f8a47341SAlan Cox /* 214f8a47341SAlan Cox * The reservation array 215f8a47341SAlan Cox * 216f8a47341SAlan Cox * This array is analoguous in function to vm_page_array. It differs in the 217f8a47341SAlan Cox * respect that it may contain a greater number of useful reservation 218f8a47341SAlan Cox * structures than there are (physical) superpages. These "invalid" 219f8a47341SAlan Cox * reservation structures exist to trade-off space for time in the 220f8a47341SAlan Cox * implementation of vm_reserv_from_page(). Invalid reservation structures are 221f8a47341SAlan Cox * distinguishable from "valid" reservation structures by inspecting the 222f8a47341SAlan Cox * reservation's "pages" field. Invalid reservation structures have a NULL 223f8a47341SAlan Cox * "pages" field. 224f8a47341SAlan Cox * 225f8a47341SAlan Cox * vm_reserv_from_page() maps a small (physical) page to an element of this 226f8a47341SAlan Cox * array by computing a physical reservation number from the page's physical 227f8a47341SAlan Cox * address. The physical reservation number is used as the array index. 228f8a47341SAlan Cox * 229f8a47341SAlan Cox * An "active" reservation is a valid reservation structure that has a non-NULL 230f8a47341SAlan Cox * "object" field and a non-zero "popcnt" field. In other words, every active 231f8a47341SAlan Cox * reservation belongs to a particular object. Moreover, every active 232f8a47341SAlan Cox * reservation has an entry in the containing object's list of reservations. 233f8a47341SAlan Cox */ 234f8a47341SAlan Cox static vm_reserv_t vm_reserv_array; 235f8a47341SAlan Cox 236f8a47341SAlan Cox /* 237fe6d5344SMark Johnston * The per-domain partially populated reservation queues 238f8a47341SAlan Cox * 239fe6d5344SMark Johnston * These queues enable the fast recovery of an unused free small page from a 240fe6d5344SMark Johnston * partially populated reservation. The reservation at the head of a queue 2413453bca8SAlan Cox * is the least recently changed, partially populated reservation. 242f8a47341SAlan Cox * 243fe6d5344SMark Johnston * Access to this queue is synchronized by the per-domain reservation lock. 244b378d296SMark Johnston * Threads reclaiming free pages from the queue must hold the per-domain scan 245b378d296SMark Johnston * lock. 246f8a47341SAlan Cox */ 247fe6d5344SMark Johnston struct vm_reserv_domain { 248fe6d5344SMark Johnston struct mtx lock; 249b378d296SMark Johnston struct vm_reserv_queue partpop; /* (d) */ 250b378d296SMark Johnston struct vm_reserv marker; /* (d, s) scan marker/lock */ 251fe6d5344SMark Johnston } __aligned(CACHE_LINE_SIZE); 252fe6d5344SMark Johnston 253fe6d5344SMark Johnston static struct vm_reserv_domain vm_rvd[MAXMEMDOM]; 254fe6d5344SMark Johnston 255fe6d5344SMark Johnston #define vm_reserv_domain_lockptr(d) (&vm_rvd[(d)].lock) 256b378d296SMark Johnston #define vm_reserv_domain_assert_locked(d) \ 257b378d296SMark Johnston mtx_assert(vm_reserv_domain_lockptr(d), MA_OWNED) 258fe6d5344SMark Johnston #define vm_reserv_domain_lock(d) mtx_lock(vm_reserv_domain_lockptr(d)) 259fe6d5344SMark Johnston #define vm_reserv_domain_unlock(d) mtx_unlock(vm_reserv_domain_lockptr(d)) 260f8a47341SAlan Cox 261b378d296SMark Johnston #define vm_reserv_domain_scan_lock(d) mtx_lock(&vm_rvd[(d)].marker.lock) 262b378d296SMark Johnston #define vm_reserv_domain_scan_unlock(d) mtx_unlock(&vm_rvd[(d)].marker.lock) 263b378d296SMark Johnston 2647029da5cSPawel Biernacki static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 2657029da5cSPawel Biernacki "Reservation Info"); 266f8a47341SAlan Cox 267d869a17eSMark Johnston static COUNTER_U64_DEFINE_EARLY(vm_reserv_broken); 2685c930c89SJeff Roberson SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD, 2695c930c89SJeff Roberson &vm_reserv_broken, "Cumulative number of broken reservations"); 270f8a47341SAlan Cox 271d869a17eSMark Johnston static COUNTER_U64_DEFINE_EARLY(vm_reserv_freed); 2725c930c89SJeff Roberson SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD, 2735c930c89SJeff Roberson &vm_reserv_freed, "Cumulative number of freed reservations"); 274f8a47341SAlan Cox 275e0a63baaSAlan Cox static int sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS); 276e0a63baaSAlan Cox 277a314aba8SMateusz Guzik SYSCTL_PROC(_vm_reserv, OID_AUTO, fullpop, CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RD, 278a314aba8SMateusz Guzik NULL, 0, sysctl_vm_reserv_fullpop, "I", "Current number of full reservations"); 279e0a63baaSAlan Cox 280f8a47341SAlan Cox static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS); 281f8a47341SAlan Cox 2827029da5cSPawel Biernacki SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq, 283114484b7SMark Johnston CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 2847029da5cSPawel Biernacki sysctl_vm_reserv_partpopq, "A", 2857029da5cSPawel Biernacki "Partially populated reservation queues"); 286f8a47341SAlan Cox 287d869a17eSMark Johnston static COUNTER_U64_DEFINE_EARLY(vm_reserv_reclaimed); 2885c930c89SJeff Roberson SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD, 2895c930c89SJeff Roberson &vm_reserv_reclaimed, "Cumulative number of reclaimed reservations"); 290f8a47341SAlan Cox 291e2068d0bSJeff Roberson /* 292e2068d0bSJeff Roberson * The object lock pool is used to synchronize the rvq. We can not use a 293e2068d0bSJeff Roberson * pool mutex because it is required before malloc works. 294e2068d0bSJeff Roberson * 295e2068d0bSJeff Roberson * The "hash" function could be made faster without divide and modulo. 296e2068d0bSJeff Roberson */ 297e2068d0bSJeff Roberson #define VM_RESERV_OBJ_LOCK_COUNT MAXCPU 298e2068d0bSJeff Roberson 299e2068d0bSJeff Roberson struct mtx_padalign vm_reserv_object_mtx[VM_RESERV_OBJ_LOCK_COUNT]; 300e2068d0bSJeff Roberson 301e2068d0bSJeff Roberson #define vm_reserv_object_lock_idx(object) \ 302e2068d0bSJeff Roberson (((uintptr_t)object / sizeof(*object)) % VM_RESERV_OBJ_LOCK_COUNT) 303e2068d0bSJeff Roberson #define vm_reserv_object_lock_ptr(object) \ 304e2068d0bSJeff Roberson &vm_reserv_object_mtx[vm_reserv_object_lock_idx((object))] 305e2068d0bSJeff Roberson #define vm_reserv_object_lock(object) \ 306e2068d0bSJeff Roberson mtx_lock(vm_reserv_object_lock_ptr((object))) 307e2068d0bSJeff Roberson #define vm_reserv_object_unlock(object) \ 308e2068d0bSJeff Roberson mtx_unlock(vm_reserv_object_lock_ptr((object))) 309e2068d0bSJeff Roberson 310ada27a3bSKonstantin Belousov static void vm_reserv_break(vm_reserv_t rv); 311ec179322SAlan Cox static void vm_reserv_depopulate(vm_reserv_t rv, int index); 312f8a47341SAlan Cox static vm_reserv_t vm_reserv_from_page(vm_page_t m); 313f8a47341SAlan Cox static boolean_t vm_reserv_has_pindex(vm_reserv_t rv, 314f8a47341SAlan Cox vm_pindex_t pindex); 315ec179322SAlan Cox static void vm_reserv_populate(vm_reserv_t rv, int index); 31644aab2c3SAlan Cox static void vm_reserv_reclaim(vm_reserv_t rv); 317f8a47341SAlan Cox 318f8a47341SAlan Cox /* 319e0a63baaSAlan Cox * Returns the current number of full reservations. 320e0a63baaSAlan Cox * 321fe6d5344SMark Johnston * Since the number of full reservations is computed without acquiring any 322fe6d5344SMark Johnston * locks, the returned value is inexact. 323e0a63baaSAlan Cox */ 324e0a63baaSAlan Cox static int 325e0a63baaSAlan Cox sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS) 326e0a63baaSAlan Cox { 327e0a63baaSAlan Cox vm_paddr_t paddr; 328e0a63baaSAlan Cox struct vm_phys_seg *seg; 329e0a63baaSAlan Cox vm_reserv_t rv; 330e0a63baaSAlan Cox int fullpop, segind; 331e0a63baaSAlan Cox 332e0a63baaSAlan Cox fullpop = 0; 333e0a63baaSAlan Cox for (segind = 0; segind < vm_phys_nsegs; segind++) { 334e0a63baaSAlan Cox seg = &vm_phys_segs[segind]; 335e0a63baaSAlan Cox paddr = roundup2(seg->start, VM_LEVEL_0_SIZE); 3367988971aSD Scott Phillips #ifdef VM_PHYSSEG_SPARSE 3377988971aSD Scott Phillips rv = seg->first_reserv + (paddr >> VM_LEVEL_0_SHIFT) - 3387988971aSD Scott Phillips (seg->start >> VM_LEVEL_0_SHIFT); 3397988971aSD Scott Phillips #else 3407988971aSD Scott Phillips rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT]; 3417988971aSD Scott Phillips #endif 3426b821a74SAleksandr Rybalko while (paddr + VM_LEVEL_0_SIZE > paddr && paddr + 3436b821a74SAleksandr Rybalko VM_LEVEL_0_SIZE <= seg->end) { 344e0a63baaSAlan Cox fullpop += rv->popcnt == VM_LEVEL_0_NPAGES; 345e0a63baaSAlan Cox paddr += VM_LEVEL_0_SIZE; 3467988971aSD Scott Phillips rv++; 347e0a63baaSAlan Cox } 348e0a63baaSAlan Cox } 349e0a63baaSAlan Cox return (sysctl_handle_int(oidp, &fullpop, 0, req)); 350e0a63baaSAlan Cox } 351e0a63baaSAlan Cox 352e0a63baaSAlan Cox /* 3533453bca8SAlan Cox * Describes the current state of the partially populated reservation queue. 354f8a47341SAlan Cox */ 355f8a47341SAlan Cox static int 356f8a47341SAlan Cox sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS) 357f8a47341SAlan Cox { 358f8a47341SAlan Cox struct sbuf sbuf; 359f8a47341SAlan Cox vm_reserv_t rv; 360ef435ae7SJeff Roberson int counter, error, domain, level, unused_pages; 361f8a47341SAlan Cox 36200f0e671SMatthew D Fleming error = sysctl_wire_old_buffer(req, 0); 36300f0e671SMatthew D Fleming if (error != 0) 36400f0e671SMatthew D Fleming return (error); 3654e657159SMatthew D Fleming sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 366ef435ae7SJeff Roberson sbuf_printf(&sbuf, "\nDOMAIN LEVEL SIZE NUMBER\n\n"); 367ef435ae7SJeff Roberson for (domain = 0; domain < vm_ndomains; domain++) { 368f8a47341SAlan Cox for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) { 369f8a47341SAlan Cox counter = 0; 370f8a47341SAlan Cox unused_pages = 0; 3715c930c89SJeff Roberson vm_reserv_domain_lock(domain); 372fe6d5344SMark Johnston TAILQ_FOREACH(rv, &vm_rvd[domain].partpop, partpopq) { 373b378d296SMark Johnston if (rv == &vm_rvd[domain].marker) 374b378d296SMark Johnston continue; 375f8a47341SAlan Cox counter++; 376f8a47341SAlan Cox unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt; 377f8a47341SAlan Cox } 3785c930c89SJeff Roberson vm_reserv_domain_unlock(domain); 379ef435ae7SJeff Roberson sbuf_printf(&sbuf, "%6d, %7d, %6dK, %6d\n", 380ef435ae7SJeff Roberson domain, level, 3812cf36c8fSAlan Cox unused_pages * ((int)PAGE_SIZE / 1024), counter); 382f8a47341SAlan Cox } 383ef435ae7SJeff Roberson } 3844e657159SMatthew D Fleming error = sbuf_finish(&sbuf); 385f8a47341SAlan Cox sbuf_delete(&sbuf); 386f8a47341SAlan Cox return (error); 387f8a47341SAlan Cox } 388f8a47341SAlan Cox 389f8a47341SAlan Cox /* 390e2068d0bSJeff Roberson * Remove a reservation from the object's objq. 391e2068d0bSJeff Roberson */ 392e2068d0bSJeff Roberson static void 393e2068d0bSJeff Roberson vm_reserv_remove(vm_reserv_t rv) 394e2068d0bSJeff Roberson { 395e2068d0bSJeff Roberson vm_object_t object; 396e2068d0bSJeff Roberson 3975c930c89SJeff Roberson vm_reserv_assert_locked(rv); 3985c930c89SJeff Roberson CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 3995c930c89SJeff Roberson __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 400e2068d0bSJeff Roberson KASSERT(rv->object != NULL, 401e2068d0bSJeff Roberson ("vm_reserv_remove: reserv %p is free", rv)); 402e2068d0bSJeff Roberson KASSERT(!rv->inpartpopq, 403e2068d0bSJeff Roberson ("vm_reserv_remove: reserv %p's inpartpopq is TRUE", rv)); 404e2068d0bSJeff Roberson object = rv->object; 405e2068d0bSJeff Roberson vm_reserv_object_lock(object); 406e2068d0bSJeff Roberson LIST_REMOVE(rv, objq); 407e2068d0bSJeff Roberson rv->object = NULL; 408e2068d0bSJeff Roberson vm_reserv_object_unlock(object); 409e2068d0bSJeff Roberson } 410e2068d0bSJeff Roberson 411e2068d0bSJeff Roberson /* 412e2068d0bSJeff Roberson * Insert a new reservation into the object's objq. 413e2068d0bSJeff Roberson */ 414e2068d0bSJeff Roberson static void 415e2068d0bSJeff Roberson vm_reserv_insert(vm_reserv_t rv, vm_object_t object, vm_pindex_t pindex) 416e2068d0bSJeff Roberson { 417e2068d0bSJeff Roberson int i; 418e2068d0bSJeff Roberson 4195c930c89SJeff Roberson vm_reserv_assert_locked(rv); 4205c930c89SJeff Roberson CTR6(KTR_VM, 4215c930c89SJeff Roberson "%s: rv %p(%p) object %p new %p popcnt %d", 4225c930c89SJeff Roberson __FUNCTION__, rv, rv->pages, rv->object, object, 4235c930c89SJeff Roberson rv->popcnt); 424e2068d0bSJeff Roberson KASSERT(rv->object == NULL, 425e2068d0bSJeff Roberson ("vm_reserv_insert: reserv %p isn't free", rv)); 426e2068d0bSJeff Roberson KASSERT(rv->popcnt == 0, 427e2068d0bSJeff Roberson ("vm_reserv_insert: reserv %p's popcnt is corrupted", rv)); 428e2068d0bSJeff Roberson KASSERT(!rv->inpartpopq, 429e2068d0bSJeff Roberson ("vm_reserv_insert: reserv %p's inpartpopq is TRUE", rv)); 430e2068d0bSJeff Roberson for (i = 0; i < NPOPMAP; i++) 431e2068d0bSJeff Roberson KASSERT(rv->popmap[i] == 0, 432e2068d0bSJeff Roberson ("vm_reserv_insert: reserv %p's popmap is corrupted", rv)); 433e2068d0bSJeff Roberson vm_reserv_object_lock(object); 434e2068d0bSJeff Roberson rv->pindex = pindex; 435e2068d0bSJeff Roberson rv->object = object; 4362ef6727eSJeff Roberson rv->lasttick = ticks; 437e2068d0bSJeff Roberson LIST_INSERT_HEAD(&object->rvq, rv, objq); 438e2068d0bSJeff Roberson vm_reserv_object_unlock(object); 439e2068d0bSJeff Roberson } 440e2068d0bSJeff Roberson 441e2068d0bSJeff Roberson /* 442f8a47341SAlan Cox * Reduces the given reservation's population count. If the population count 443f8a47341SAlan Cox * becomes zero, the reservation is destroyed. Additionally, moves the 4443453bca8SAlan Cox * reservation to the tail of the partially populated reservation queue if the 445f8a47341SAlan Cox * population count is non-zero. 446f8a47341SAlan Cox */ 447f8a47341SAlan Cox static void 448ec179322SAlan Cox vm_reserv_depopulate(vm_reserv_t rv, int index) 449f8a47341SAlan Cox { 4505c930c89SJeff Roberson struct vm_domain *vmd; 451f8a47341SAlan Cox 4525c930c89SJeff Roberson vm_reserv_assert_locked(rv); 4535c930c89SJeff Roberson CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 4545c930c89SJeff Roberson __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 455f8a47341SAlan Cox KASSERT(rv->object != NULL, 456f8a47341SAlan Cox ("vm_reserv_depopulate: reserv %p is free", rv)); 4573180f757SAlan Cox KASSERT(popmap_is_set(rv->popmap, index), 458a08c1515SAlan Cox ("vm_reserv_depopulate: reserv %p's popmap[%d] is clear", rv, 459a08c1515SAlan Cox index)); 460f8a47341SAlan Cox KASSERT(rv->popcnt > 0, 461f8a47341SAlan Cox ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv)); 4622d3f4181SJeff Roberson KASSERT(rv->domain < vm_ndomains, 463ef435ae7SJeff Roberson ("vm_reserv_depopulate: reserv %p's domain is corrupted %d", 464ef435ae7SJeff Roberson rv, rv->domain)); 4655c930c89SJeff Roberson if (rv->popcnt == VM_LEVEL_0_NPAGES) { 466dd05fa19SAlan Cox KASSERT(rv->pages->psind == 1, 467dd05fa19SAlan Cox ("vm_reserv_depopulate: reserv %p is already demoted", 468dd05fa19SAlan Cox rv)); 469dd05fa19SAlan Cox rv->pages->psind = 0; 470f8a47341SAlan Cox } 4713180f757SAlan Cox popmap_clear(rv->popmap, index); 472f8a47341SAlan Cox rv->popcnt--; 4732ef6727eSJeff Roberson if ((unsigned)(ticks - rv->lasttick) >= PARTPOPSLOP || 4742ef6727eSJeff Roberson rv->popcnt == 0) { 4755c930c89SJeff Roberson vm_reserv_domain_lock(rv->domain); 4765c930c89SJeff Roberson if (rv->inpartpopq) { 477fe6d5344SMark Johnston TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); 4785c930c89SJeff Roberson rv->inpartpopq = FALSE; 4795c930c89SJeff Roberson } 4805c930c89SJeff Roberson if (rv->popcnt != 0) { 481f8a47341SAlan Cox rv->inpartpopq = TRUE; 482fe6d5344SMark Johnston TAILQ_INSERT_TAIL(&vm_rvd[rv->domain].partpop, rv, 483fe6d5344SMark Johnston partpopq); 484f8a47341SAlan Cox } 4855c930c89SJeff Roberson vm_reserv_domain_unlock(rv->domain); 4862ef6727eSJeff Roberson rv->lasttick = ticks; 4872ef6727eSJeff Roberson } 4885c930c89SJeff Roberson vmd = VM_DOMAIN(rv->domain); 4895c930c89SJeff Roberson if (rv->popcnt == 0) { 4905c930c89SJeff Roberson vm_reserv_remove(rv); 4915c930c89SJeff Roberson vm_domain_free_lock(vmd); 4925c930c89SJeff Roberson vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER); 4935c930c89SJeff Roberson vm_domain_free_unlock(vmd); 4945c930c89SJeff Roberson counter_u64_add(vm_reserv_freed, 1); 4955c930c89SJeff Roberson } 4965c930c89SJeff Roberson vm_domain_freecnt_inc(vmd, 1); 497f8a47341SAlan Cox } 498f8a47341SAlan Cox 499f8a47341SAlan Cox /* 500f8a47341SAlan Cox * Returns the reservation to which the given page might belong. 501f8a47341SAlan Cox */ 502f8a47341SAlan Cox static __inline vm_reserv_t 503f8a47341SAlan Cox vm_reserv_from_page(vm_page_t m) 504f8a47341SAlan Cox { 5057988971aSD Scott Phillips #ifdef VM_PHYSSEG_SPARSE 5067988971aSD Scott Phillips struct vm_phys_seg *seg; 507f8a47341SAlan Cox 5087988971aSD Scott Phillips seg = &vm_phys_segs[m->segind]; 5097988971aSD Scott Phillips return (seg->first_reserv + (VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT) - 5107988971aSD Scott Phillips (seg->start >> VM_LEVEL_0_SHIFT)); 5117988971aSD Scott Phillips #else 512f8a47341SAlan Cox return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]); 5137988971aSD Scott Phillips #endif 514f8a47341SAlan Cox } 515f8a47341SAlan Cox 516f8a47341SAlan Cox /* 517e2068d0bSJeff Roberson * Returns an existing reservation or NULL and initialized successor pointer. 518e2068d0bSJeff Roberson */ 519e2068d0bSJeff Roberson static vm_reserv_t 520e2068d0bSJeff Roberson vm_reserv_from_object(vm_object_t object, vm_pindex_t pindex, 521e2068d0bSJeff Roberson vm_page_t mpred, vm_page_t *msuccp) 522e2068d0bSJeff Roberson { 523e2068d0bSJeff Roberson vm_reserv_t rv; 524e2068d0bSJeff Roberson vm_page_t msucc; 525e2068d0bSJeff Roberson 526e2068d0bSJeff Roberson msucc = NULL; 527e2068d0bSJeff Roberson if (mpred != NULL) { 528e2068d0bSJeff Roberson KASSERT(mpred->object == object, 529e2068d0bSJeff Roberson ("vm_reserv_from_object: object doesn't contain mpred")); 530e2068d0bSJeff Roberson KASSERT(mpred->pindex < pindex, 531e2068d0bSJeff Roberson ("vm_reserv_from_object: mpred doesn't precede pindex")); 532e2068d0bSJeff Roberson rv = vm_reserv_from_page(mpred); 533e2068d0bSJeff Roberson if (rv->object == object && vm_reserv_has_pindex(rv, pindex)) 534e2068d0bSJeff Roberson goto found; 535e2068d0bSJeff Roberson msucc = TAILQ_NEXT(mpred, listq); 536e2068d0bSJeff Roberson } else 537e2068d0bSJeff Roberson msucc = TAILQ_FIRST(&object->memq); 538e2068d0bSJeff Roberson if (msucc != NULL) { 539e2068d0bSJeff Roberson KASSERT(msucc->pindex > pindex, 540e2068d0bSJeff Roberson ("vm_reserv_from_object: msucc doesn't succeed pindex")); 541e2068d0bSJeff Roberson rv = vm_reserv_from_page(msucc); 542e2068d0bSJeff Roberson if (rv->object == object && vm_reserv_has_pindex(rv, pindex)) 543e2068d0bSJeff Roberson goto found; 544e2068d0bSJeff Roberson } 545e2068d0bSJeff Roberson rv = NULL; 546e2068d0bSJeff Roberson 547e2068d0bSJeff Roberson found: 548e2068d0bSJeff Roberson *msuccp = msucc; 549e2068d0bSJeff Roberson 550e2068d0bSJeff Roberson return (rv); 551e2068d0bSJeff Roberson } 552e2068d0bSJeff Roberson 553e2068d0bSJeff Roberson /* 554f8a47341SAlan Cox * Returns TRUE if the given reservation contains the given page index and 555f8a47341SAlan Cox * FALSE otherwise. 556f8a47341SAlan Cox */ 557f8a47341SAlan Cox static __inline boolean_t 558f8a47341SAlan Cox vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex) 559f8a47341SAlan Cox { 560f8a47341SAlan Cox 561f8a47341SAlan Cox return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0); 562f8a47341SAlan Cox } 563f8a47341SAlan Cox 564f8a47341SAlan Cox /* 565f8a47341SAlan Cox * Increases the given reservation's population count. Moves the reservation 5663453bca8SAlan Cox * to the tail of the partially populated reservation queue. 567f8a47341SAlan Cox */ 568f8a47341SAlan Cox static void 569ec179322SAlan Cox vm_reserv_populate(vm_reserv_t rv, int index) 570f8a47341SAlan Cox { 571f8a47341SAlan Cox 5725c930c89SJeff Roberson vm_reserv_assert_locked(rv); 5735c930c89SJeff Roberson CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 5745c930c89SJeff Roberson __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 575f8a47341SAlan Cox KASSERT(rv->object != NULL, 576f8a47341SAlan Cox ("vm_reserv_populate: reserv %p is free", rv)); 5773180f757SAlan Cox KASSERT(popmap_is_clear(rv->popmap, index), 578a08c1515SAlan Cox ("vm_reserv_populate: reserv %p's popmap[%d] is set", rv, 579a08c1515SAlan Cox index)); 580f8a47341SAlan Cox KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES, 581f8a47341SAlan Cox ("vm_reserv_populate: reserv %p is already full", rv)); 582dd05fa19SAlan Cox KASSERT(rv->pages->psind == 0, 583dd05fa19SAlan Cox ("vm_reserv_populate: reserv %p is already promoted", rv)); 5842d3f4181SJeff Roberson KASSERT(rv->domain < vm_ndomains, 585ef435ae7SJeff Roberson ("vm_reserv_populate: reserv %p's domain is corrupted %d", 586ef435ae7SJeff Roberson rv, rv->domain)); 5875c930c89SJeff Roberson popmap_set(rv->popmap, index); 5885c930c89SJeff Roberson rv->popcnt++; 5892ef6727eSJeff Roberson if ((unsigned)(ticks - rv->lasttick) < PARTPOPSLOP && 5902ef6727eSJeff Roberson rv->inpartpopq && rv->popcnt != VM_LEVEL_0_NPAGES) 5912ef6727eSJeff Roberson return; 5922ef6727eSJeff Roberson rv->lasttick = ticks; 5935c930c89SJeff Roberson vm_reserv_domain_lock(rv->domain); 594f8a47341SAlan Cox if (rv->inpartpopq) { 595fe6d5344SMark Johnston TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); 596f8a47341SAlan Cox rv->inpartpopq = FALSE; 597f8a47341SAlan Cox } 598f8a47341SAlan Cox if (rv->popcnt < VM_LEVEL_0_NPAGES) { 599f8a47341SAlan Cox rv->inpartpopq = TRUE; 600fe6d5344SMark Johnston TAILQ_INSERT_TAIL(&vm_rvd[rv->domain].partpop, rv, partpopq); 6015c930c89SJeff Roberson } else { 6025c930c89SJeff Roberson KASSERT(rv->pages->psind == 0, 6035c930c89SJeff Roberson ("vm_reserv_populate: reserv %p is already promoted", 6045c930c89SJeff Roberson rv)); 605dd05fa19SAlan Cox rv->pages->psind = 1; 606f8a47341SAlan Cox } 6075c930c89SJeff Roberson vm_reserv_domain_unlock(rv->domain); 6085c930c89SJeff Roberson } 609f8a47341SAlan Cox 610f8a47341SAlan Cox /* 611e2068d0bSJeff Roberson * Allocates a contiguous set of physical pages of the given size "npages" 6122d5039dbSAlan Cox * from existing or newly created reservations. All of the physical pages 613e2068d0bSJeff Roberson * must be at or above the given physical address "low" and below the given 614e2068d0bSJeff Roberson * physical address "high". The given value "alignment" determines the 615e2068d0bSJeff Roberson * alignment of the first physical page in the set. If the given value 616e2068d0bSJeff Roberson * "boundary" is non-zero, then the set of physical pages cannot cross any 617e2068d0bSJeff Roberson * physical address boundary that is a multiple of that value. Both 618e2068d0bSJeff Roberson * "alignment" and "boundary" must be a power of two. 619e2068d0bSJeff Roberson * 620e2068d0bSJeff Roberson * The page "mpred" must immediately precede the offset "pindex" within the 621e2068d0bSJeff Roberson * specified object. 622e2068d0bSJeff Roberson * 6232d5039dbSAlan Cox * The object must be locked. 624e2068d0bSJeff Roberson */ 625e2068d0bSJeff Roberson vm_page_t 6262d5039dbSAlan Cox vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, int domain, 6272d5039dbSAlan Cox int req, vm_page_t mpred, u_long npages, vm_paddr_t low, vm_paddr_t high, 6282d5039dbSAlan Cox u_long alignment, vm_paddr_t boundary) 629c68c3537SAlan Cox { 6305c930c89SJeff Roberson struct vm_domain *vmd; 631c68c3537SAlan Cox vm_paddr_t pa, size; 632920da7e4SAlan Cox vm_page_t m, m_ret, msucc; 633c68c3537SAlan Cox vm_pindex_t first, leftcap, rightcap; 634c68c3537SAlan Cox vm_reserv_t rv; 635c68c3537SAlan Cox u_long allocpages, maxpages, minpages; 636c68c3537SAlan Cox int i, index, n; 637c68c3537SAlan Cox 63889f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 639c68c3537SAlan Cox KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0")); 640c68c3537SAlan Cox 641c68c3537SAlan Cox /* 642c68c3537SAlan Cox * Is a reservation fundamentally impossible? 643c68c3537SAlan Cox */ 644c68c3537SAlan Cox if (pindex < VM_RESERV_INDEX(object, pindex) || 645c68c3537SAlan Cox pindex + npages > object->size) 646c68c3537SAlan Cox return (NULL); 647c68c3537SAlan Cox 648c68c3537SAlan Cox /* 649c68c3537SAlan Cox * All reservations of a particular size have the same alignment. 650c68c3537SAlan Cox * Assuming that the first page is allocated from a reservation, the 651c68c3537SAlan Cox * least significant bits of its physical address can be determined 652c68c3537SAlan Cox * from its offset from the beginning of the reservation and the size 653c68c3537SAlan Cox * of the reservation. 654c68c3537SAlan Cox * 655c68c3537SAlan Cox * Could the specified index within a reservation of the smallest 656c68c3537SAlan Cox * possible size satisfy the alignment and boundary requirements? 657c68c3537SAlan Cox */ 658c68c3537SAlan Cox pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT; 659c68c3537SAlan Cox if ((pa & (alignment - 1)) != 0) 660c68c3537SAlan Cox return (NULL); 661c68c3537SAlan Cox size = npages << PAGE_SHIFT; 662c68c3537SAlan Cox if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0) 663c68c3537SAlan Cox return (NULL); 664c68c3537SAlan Cox 665c68c3537SAlan Cox /* 6662d5039dbSAlan Cox * Look for an existing reservation. 667c68c3537SAlan Cox */ 668e2068d0bSJeff Roberson rv = vm_reserv_from_object(object, pindex, mpred, &msucc); 6692d5039dbSAlan Cox if (rv != NULL) { 6702d5039dbSAlan Cox KASSERT(object != kernel_object || rv->domain == domain, 6712d5039dbSAlan Cox ("vm_reserv_alloc_contig: domain mismatch")); 6722d5039dbSAlan Cox index = VM_RESERV_INDEX(object, pindex); 6732d5039dbSAlan Cox /* Does the allocation fit within the reservation? */ 6742d5039dbSAlan Cox if (index + npages > VM_LEVEL_0_NPAGES) 675e2068d0bSJeff Roberson return (NULL); 6762d5039dbSAlan Cox domain = rv->domain; 6772d5039dbSAlan Cox vmd = VM_DOMAIN(domain); 6782d5039dbSAlan Cox vm_reserv_lock(rv); 6792d5039dbSAlan Cox /* Handle reclaim race. */ 6802d5039dbSAlan Cox if (rv->object != object) 6812d5039dbSAlan Cox goto out; 6822d5039dbSAlan Cox m = &rv->pages[index]; 6832d5039dbSAlan Cox pa = VM_PAGE_TO_PHYS(m); 6842d5039dbSAlan Cox if (pa < low || pa + size > high || 6852d5039dbSAlan Cox (pa & (alignment - 1)) != 0 || 6862d5039dbSAlan Cox ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0) 6872d5039dbSAlan Cox goto out; 6882d5039dbSAlan Cox /* Handle vm_page_rename(m, new_object, ...). */ 6892d5039dbSAlan Cox for (i = 0; i < npages; i++) 6902d5039dbSAlan Cox if (popmap_is_set(rv->popmap, index + i)) 6912d5039dbSAlan Cox goto out; 6922d5039dbSAlan Cox if (!vm_domain_allocate(vmd, req, npages)) 6932d5039dbSAlan Cox goto out; 6942d5039dbSAlan Cox for (i = 0; i < npages; i++) 6952d5039dbSAlan Cox vm_reserv_populate(rv, index + i); 6962d5039dbSAlan Cox vm_reserv_unlock(rv); 6972d5039dbSAlan Cox return (m); 6982d5039dbSAlan Cox out: 6992d5039dbSAlan Cox vm_reserv_unlock(rv); 7002d5039dbSAlan Cox return (NULL); 7012d5039dbSAlan Cox } 702c68c3537SAlan Cox 703c68c3537SAlan Cox /* 704c68c3537SAlan Cox * Could at least one reservation fit between the first index to the 70564f096eeSAlan Cox * left that can be used ("leftcap") and the first index to the right 70664f096eeSAlan Cox * that cannot be used ("rightcap")? 707e2068d0bSJeff Roberson * 708e2068d0bSJeff Roberson * We must synchronize with the reserv object lock to protect the 709e2068d0bSJeff Roberson * pindex/object of the resulting reservations against rename while 710e2068d0bSJeff Roberson * we are inspecting. 711c68c3537SAlan Cox */ 712c68c3537SAlan Cox first = pindex - VM_RESERV_INDEX(object, pindex); 713e2068d0bSJeff Roberson minpages = VM_RESERV_INDEX(object, pindex) + npages; 714e2068d0bSJeff Roberson maxpages = roundup2(minpages, VM_LEVEL_0_NPAGES); 715e2068d0bSJeff Roberson allocpages = maxpages; 716e2068d0bSJeff Roberson vm_reserv_object_lock(object); 717c68c3537SAlan Cox if (mpred != NULL) { 718c68c3537SAlan Cox if ((rv = vm_reserv_from_page(mpred))->object != object) 719c68c3537SAlan Cox leftcap = mpred->pindex + 1; 720c68c3537SAlan Cox else 721c68c3537SAlan Cox leftcap = rv->pindex + VM_LEVEL_0_NPAGES; 722e2068d0bSJeff Roberson if (leftcap > first) { 723e2068d0bSJeff Roberson vm_reserv_object_unlock(object); 724c68c3537SAlan Cox return (NULL); 725c68c3537SAlan Cox } 726e2068d0bSJeff Roberson } 727c68c3537SAlan Cox if (msucc != NULL) { 728c68c3537SAlan Cox if ((rv = vm_reserv_from_page(msucc))->object != object) 729c68c3537SAlan Cox rightcap = msucc->pindex; 730c68c3537SAlan Cox else 731c68c3537SAlan Cox rightcap = rv->pindex; 732c68c3537SAlan Cox if (first + maxpages > rightcap) { 733e2068d0bSJeff Roberson if (maxpages == VM_LEVEL_0_NPAGES) { 734e2068d0bSJeff Roberson vm_reserv_object_unlock(object); 735c68c3537SAlan Cox return (NULL); 736e2068d0bSJeff Roberson } 73764f096eeSAlan Cox 73864f096eeSAlan Cox /* 73964f096eeSAlan Cox * At least one reservation will fit between "leftcap" 74064f096eeSAlan Cox * and "rightcap". However, a reservation for the 74164f096eeSAlan Cox * last of the requested pages will not fit. Reduce 74264f096eeSAlan Cox * the size of the upcoming allocation accordingly. 74364f096eeSAlan Cox */ 744c68c3537SAlan Cox allocpages = minpages; 745c68c3537SAlan Cox } 746c68c3537SAlan Cox } 747e2068d0bSJeff Roberson vm_reserv_object_unlock(object); 748c68c3537SAlan Cox 749c68c3537SAlan Cox /* 750c68c3537SAlan Cox * Would the last new reservation extend past the end of the object? 75163967687SJeff Roberson * 75263967687SJeff Roberson * If the object is unlikely to grow don't allocate a reservation for 75363967687SJeff Roberson * the tail. 754c68c3537SAlan Cox */ 75563967687SJeff Roberson if ((object->flags & OBJ_ANON) == 0 && 75663967687SJeff Roberson first + maxpages > object->size) { 757c68c3537SAlan Cox if (maxpages == VM_LEVEL_0_NPAGES) 758c68c3537SAlan Cox return (NULL); 759c68c3537SAlan Cox allocpages = minpages; 760c68c3537SAlan Cox } 761c68c3537SAlan Cox 762c68c3537SAlan Cox /* 76364f096eeSAlan Cox * Allocate the physical pages. The alignment and boundary specified 76464f096eeSAlan Cox * for this allocation may be different from the alignment and 76564f096eeSAlan Cox * boundary specified for the requested pages. For instance, the 76664f096eeSAlan Cox * specified index may not be the first page within the first new 76764f096eeSAlan Cox * reservation. 768c68c3537SAlan Cox */ 7695c930c89SJeff Roberson m = NULL; 7705c930c89SJeff Roberson vmd = VM_DOMAIN(domain); 7715c930c89SJeff Roberson if (vm_domain_allocate(vmd, req, npages)) { 7725c930c89SJeff Roberson vm_domain_free_lock(vmd); 7735c930c89SJeff Roberson m = vm_phys_alloc_contig(domain, allocpages, low, high, 7745c930c89SJeff Roberson ulmax(alignment, VM_LEVEL_0_SIZE), 7755c930c89SJeff Roberson boundary > VM_LEVEL_0_SIZE ? boundary : 0); 7765c930c89SJeff Roberson vm_domain_free_unlock(vmd); 7775c930c89SJeff Roberson if (m == NULL) { 7785c930c89SJeff Roberson vm_domain_freecnt_inc(vmd, npages); 7795c930c89SJeff Roberson return (NULL); 7805c930c89SJeff Roberson } 7815c930c89SJeff Roberson } else 782c68c3537SAlan Cox return (NULL); 783*431fb8abSMark Johnston KASSERT(vm_page_domain(m) == domain, 7847a469c8eSJeff Roberson ("vm_reserv_alloc_contig: Page domain does not match requested.")); 78564f096eeSAlan Cox 78664f096eeSAlan Cox /* 78764f096eeSAlan Cox * The allocated physical pages always begin at a reservation 78864f096eeSAlan Cox * boundary, but they do not always end at a reservation boundary. 78964f096eeSAlan Cox * Initialize every reservation that is completely covered by the 79064f096eeSAlan Cox * allocated physical pages. 79164f096eeSAlan Cox */ 792c68c3537SAlan Cox m_ret = NULL; 793c68c3537SAlan Cox index = VM_RESERV_INDEX(object, pindex); 794c68c3537SAlan Cox do { 795c68c3537SAlan Cox rv = vm_reserv_from_page(m); 796c68c3537SAlan Cox KASSERT(rv->pages == m, 797c68c3537SAlan Cox ("vm_reserv_alloc_contig: reserv %p's pages is corrupted", 798c68c3537SAlan Cox rv)); 7995c930c89SJeff Roberson vm_reserv_lock(rv); 800e2068d0bSJeff Roberson vm_reserv_insert(rv, object, first); 801c68c3537SAlan Cox n = ulmin(VM_LEVEL_0_NPAGES - index, npages); 802c68c3537SAlan Cox for (i = 0; i < n; i++) 803ec179322SAlan Cox vm_reserv_populate(rv, index + i); 804c68c3537SAlan Cox npages -= n; 805c68c3537SAlan Cox if (m_ret == NULL) { 806c68c3537SAlan Cox m_ret = &rv->pages[index]; 807c68c3537SAlan Cox index = 0; 808c68c3537SAlan Cox } 8095c930c89SJeff Roberson vm_reserv_unlock(rv); 810c68c3537SAlan Cox m += VM_LEVEL_0_NPAGES; 811c68c3537SAlan Cox first += VM_LEVEL_0_NPAGES; 812c68c3537SAlan Cox allocpages -= VM_LEVEL_0_NPAGES; 81364f096eeSAlan Cox } while (allocpages >= VM_LEVEL_0_NPAGES); 814c68c3537SAlan Cox return (m_ret); 815e2068d0bSJeff Roberson } 816c68c3537SAlan Cox 817c68c3537SAlan Cox /* 8182d5039dbSAlan Cox * Allocate a physical page from an existing or newly created reservation. 819e2068d0bSJeff Roberson * 820e2068d0bSJeff Roberson * The page "mpred" must immediately precede the offset "pindex" within the 821e2068d0bSJeff Roberson * specified object. 822e2068d0bSJeff Roberson * 823e2068d0bSJeff Roberson * The object must be locked. 824c68c3537SAlan Cox */ 825e2068d0bSJeff Roberson vm_page_t 8262d5039dbSAlan Cox vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, int domain, 8272d5039dbSAlan Cox int req, vm_page_t mpred) 828e2068d0bSJeff Roberson { 829e2068d0bSJeff Roberson struct vm_domain *vmd; 830e2068d0bSJeff Roberson vm_page_t m, msucc; 8312d5039dbSAlan Cox vm_pindex_t first, leftcap, rightcap; 832e2068d0bSJeff Roberson vm_reserv_t rv; 83330fbfddaSJeff Roberson int index; 834e2068d0bSJeff Roberson 835e2068d0bSJeff Roberson VM_OBJECT_ASSERT_WLOCKED(object); 836e2068d0bSJeff Roberson 837e2068d0bSJeff Roberson /* 8382d5039dbSAlan Cox * Is a reservation fundamentally impossible? 839e2068d0bSJeff Roberson */ 840e2068d0bSJeff Roberson if (pindex < VM_RESERV_INDEX(object, pindex) || 8412d5039dbSAlan Cox pindex >= object->size) 842e2068d0bSJeff Roberson return (NULL); 843e2068d0bSJeff Roberson 844e2068d0bSJeff Roberson /* 845e2068d0bSJeff Roberson * Look for an existing reservation. 846e2068d0bSJeff Roberson */ 847e2068d0bSJeff Roberson rv = vm_reserv_from_object(object, pindex, mpred, &msucc); 8482d5039dbSAlan Cox if (rv != NULL) { 849e2068d0bSJeff Roberson KASSERT(object != kernel_object || rv->domain == domain, 8502d5039dbSAlan Cox ("vm_reserv_alloc_page: domain mismatch")); 851e2068d0bSJeff Roberson domain = rv->domain; 852e2068d0bSJeff Roberson vmd = VM_DOMAIN(domain); 853c68c3537SAlan Cox index = VM_RESERV_INDEX(object, pindex); 854c68c3537SAlan Cox m = &rv->pages[index]; 8555c930c89SJeff Roberson vm_reserv_lock(rv); 856e2068d0bSJeff Roberson /* Handle reclaim race. */ 8575c930c89SJeff Roberson if (rv->object != object || 858c68c3537SAlan Cox /* Handle vm_page_rename(m, new_object, ...). */ 8595c930c89SJeff Roberson popmap_is_set(rv->popmap, index)) { 860e2068d0bSJeff Roberson m = NULL; 8615c930c89SJeff Roberson goto out; 86230fbfddaSJeff Roberson } 8635c930c89SJeff Roberson if (vm_domain_allocate(vmd, req, 1) == 0) 8645c930c89SJeff Roberson m = NULL; 8655c930c89SJeff Roberson else 8665c930c89SJeff Roberson vm_reserv_populate(rv, index); 8675c930c89SJeff Roberson out: 8685c930c89SJeff Roberson vm_reserv_unlock(rv); 869c68c3537SAlan Cox return (m); 870c68c3537SAlan Cox } 871c68c3537SAlan Cox 872c68c3537SAlan Cox /* 873c68c3537SAlan Cox * Could a reservation fit between the first index to the left that 874c68c3537SAlan Cox * can be used and the first index to the right that cannot be used? 875e2068d0bSJeff Roberson * 876e2068d0bSJeff Roberson * We must synchronize with the reserv object lock to protect the 877e2068d0bSJeff Roberson * pindex/object of the resulting reservations against rename while 878e2068d0bSJeff Roberson * we are inspecting. 879f8a47341SAlan Cox */ 880c68c3537SAlan Cox first = pindex - VM_RESERV_INDEX(object, pindex); 881e2068d0bSJeff Roberson vm_reserv_object_lock(object); 882c68c3537SAlan Cox if (mpred != NULL) { 883c68c3537SAlan Cox if ((rv = vm_reserv_from_page(mpred))->object != object) 884f8a47341SAlan Cox leftcap = mpred->pindex + 1; 885f8a47341SAlan Cox else 886f8a47341SAlan Cox leftcap = rv->pindex + VM_LEVEL_0_NPAGES; 887e2068d0bSJeff Roberson if (leftcap > first) { 888e2068d0bSJeff Roberson vm_reserv_object_unlock(object); 889c68c3537SAlan Cox return (NULL); 890c68c3537SAlan Cox } 891e2068d0bSJeff Roberson } 892c68c3537SAlan Cox if (msucc != NULL) { 893c68c3537SAlan Cox if ((rv = vm_reserv_from_page(msucc))->object != object) 894f8a47341SAlan Cox rightcap = msucc->pindex; 895f8a47341SAlan Cox else 896f8a47341SAlan Cox rightcap = rv->pindex; 897e2068d0bSJeff Roberson if (first + VM_LEVEL_0_NPAGES > rightcap) { 898e2068d0bSJeff Roberson vm_reserv_object_unlock(object); 899f8a47341SAlan Cox return (NULL); 900c68c3537SAlan Cox } 901e2068d0bSJeff Roberson } 902e2068d0bSJeff Roberson vm_reserv_object_unlock(object); 903f8a47341SAlan Cox 904f8a47341SAlan Cox /* 90563967687SJeff Roberson * Would the last new reservation extend past the end of the object? 90663967687SJeff Roberson * 90763967687SJeff Roberson * If the object is unlikely to grow don't allocate a reservation for 90863967687SJeff Roberson * the tail. 909f8a47341SAlan Cox */ 91063967687SJeff Roberson if ((object->flags & OBJ_ANON) == 0 && 91163967687SJeff Roberson first + VM_LEVEL_0_NPAGES > object->size) 912f8a47341SAlan Cox return (NULL); 913f8a47341SAlan Cox 914f8a47341SAlan Cox /* 915c68c3537SAlan Cox * Allocate and populate the new reservation. 916f8a47341SAlan Cox */ 9175c930c89SJeff Roberson m = NULL; 9185c930c89SJeff Roberson vmd = VM_DOMAIN(domain); 9195c930c89SJeff Roberson if (vm_domain_allocate(vmd, req, 1)) { 9205c930c89SJeff Roberson vm_domain_free_lock(vmd); 9215c930c89SJeff Roberson m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT, 9225c930c89SJeff Roberson VM_LEVEL_0_ORDER); 9235c930c89SJeff Roberson vm_domain_free_unlock(vmd); 9245c930c89SJeff Roberson if (m == NULL) { 9255c930c89SJeff Roberson vm_domain_freecnt_inc(vmd, 1); 9265c930c89SJeff Roberson return (NULL); 9275c930c89SJeff Roberson } 9285c930c89SJeff Roberson } else 929c68c3537SAlan Cox return (NULL); 930f8a47341SAlan Cox rv = vm_reserv_from_page(m); 9315c930c89SJeff Roberson vm_reserv_lock(rv); 932f8a47341SAlan Cox KASSERT(rv->pages == m, 933c68c3537SAlan Cox ("vm_reserv_alloc_page: reserv %p's pages is corrupted", rv)); 934e2068d0bSJeff Roberson vm_reserv_insert(rv, object, first); 935ec179322SAlan Cox index = VM_RESERV_INDEX(object, pindex); 936ec179322SAlan Cox vm_reserv_populate(rv, index); 9375c930c89SJeff Roberson vm_reserv_unlock(rv); 9385c930c89SJeff Roberson 939ec179322SAlan Cox return (&rv->pages[index]); 940f8a47341SAlan Cox } 941f8a47341SAlan Cox 942f8a47341SAlan Cox /* 943ada27a3bSKonstantin Belousov * Breaks the given reservation. All free pages in the reservation 944ada27a3bSKonstantin Belousov * are returned to the physical memory allocator. The reservation's 945ada27a3bSKonstantin Belousov * population count and map are reset to their initial state. 946ec179322SAlan Cox * 9473453bca8SAlan Cox * The given reservation must not be in the partially populated reservation 948fe6d5344SMark Johnston * queue. 949ec179322SAlan Cox */ 950ec179322SAlan Cox static void 951ada27a3bSKonstantin Belousov vm_reserv_break(vm_reserv_t rv) 952ec179322SAlan Cox { 953e67a5068SDoug Moore u_long changes; 954e67a5068SDoug Moore int bitpos, hi, i, lo; 955ec179322SAlan Cox 9565c930c89SJeff Roberson vm_reserv_assert_locked(rv); 9575c930c89SJeff Roberson CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 9585c930c89SJeff Roberson __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 959e2068d0bSJeff Roberson vm_reserv_remove(rv); 960c4be9169SKonstantin Belousov rv->pages->psind = 0; 961e67a5068SDoug Moore hi = lo = -1; 962e67a5068SDoug Moore for (i = 0; i <= NPOPMAP; i++) { 963e67a5068SDoug Moore /* 964e67a5068SDoug Moore * "changes" is a bitmask that marks where a new sequence of 965e67a5068SDoug Moore * 0s or 1s begins in popmap[i], with last bit in popmap[i-1] 966e67a5068SDoug Moore * considered to be 1 if and only if lo == hi. The bits of 967e67a5068SDoug Moore * popmap[-1] and popmap[NPOPMAP] are considered all 1s. 968e67a5068SDoug Moore */ 969ec179322SAlan Cox if (i == NPOPMAP) 970e67a5068SDoug Moore changes = lo != hi; 971e67a5068SDoug Moore else { 972e67a5068SDoug Moore changes = rv->popmap[i]; 973e67a5068SDoug Moore changes ^= (changes << 1) | (lo == hi); 974e67a5068SDoug Moore rv->popmap[i] = 0; 975ec179322SAlan Cox } 976e67a5068SDoug Moore while (changes != 0) { 977e67a5068SDoug Moore /* 978e67a5068SDoug Moore * If the next change marked begins a run of 0s, set 979e67a5068SDoug Moore * lo to mark that position. Otherwise set hi and 980e67a5068SDoug Moore * free pages from lo up to hi. 981e67a5068SDoug Moore */ 982e67a5068SDoug Moore bitpos = ffsl(changes) - 1; 983e67a5068SDoug Moore changes ^= 1UL << bitpos; 984e67a5068SDoug Moore if (lo == hi) 985e67a5068SDoug Moore lo = NBPOPMAP * i + bitpos; 986e67a5068SDoug Moore else { 987e67a5068SDoug Moore hi = NBPOPMAP * i + bitpos; 9885c930c89SJeff Roberson vm_domain_free_lock(VM_DOMAIN(rv->domain)); 989b8590daeSDoug Moore vm_phys_enqueue_contig(&rv->pages[lo], hi - lo); 9905c930c89SJeff Roberson vm_domain_free_unlock(VM_DOMAIN(rv->domain)); 991e67a5068SDoug Moore lo = hi; 992e67a5068SDoug Moore } 993e67a5068SDoug Moore } 994e67a5068SDoug Moore } 995e67a5068SDoug Moore rv->popcnt = 0; 9965c930c89SJeff Roberson counter_u64_add(vm_reserv_broken, 1); 997ec179322SAlan Cox } 998ec179322SAlan Cox 999ec179322SAlan Cox /* 1000f8a47341SAlan Cox * Breaks all reservations belonging to the given object. 1001f8a47341SAlan Cox */ 1002f8a47341SAlan Cox void 1003f8a47341SAlan Cox vm_reserv_break_all(vm_object_t object) 1004f8a47341SAlan Cox { 1005f8a47341SAlan Cox vm_reserv_t rv; 1006f8a47341SAlan Cox 1007e2068d0bSJeff Roberson /* 1008e2068d0bSJeff Roberson * This access of object->rvq is unsynchronized so that the 1009e2068d0bSJeff Roberson * object rvq lock can nest after the domain_free lock. We 1010e2068d0bSJeff Roberson * must check for races in the results. However, the object 1011e2068d0bSJeff Roberson * lock prevents new additions, so we are guaranteed that when 1012e2068d0bSJeff Roberson * it returns NULL the object is properly empty. 1013e2068d0bSJeff Roberson */ 1014f8a47341SAlan Cox while ((rv = LIST_FIRST(&object->rvq)) != NULL) { 10155c930c89SJeff Roberson vm_reserv_lock(rv); 1016e2068d0bSJeff Roberson /* Reclaim race. */ 10175c930c89SJeff Roberson if (rv->object != object) { 10185c930c89SJeff Roberson vm_reserv_unlock(rv); 1019e2068d0bSJeff Roberson continue; 10205c930c89SJeff Roberson } 10215c930c89SJeff Roberson vm_reserv_domain_lock(rv->domain); 1022f8a47341SAlan Cox if (rv->inpartpopq) { 1023fe6d5344SMark Johnston TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); 1024f8a47341SAlan Cox rv->inpartpopq = FALSE; 1025f8a47341SAlan Cox } 10265c930c89SJeff Roberson vm_reserv_domain_unlock(rv->domain); 1027ada27a3bSKonstantin Belousov vm_reserv_break(rv); 10285c930c89SJeff Roberson vm_reserv_unlock(rv); 1029f8a47341SAlan Cox } 1030f8a47341SAlan Cox } 1031f8a47341SAlan Cox 1032f8a47341SAlan Cox /* 1033f8a47341SAlan Cox * Frees the given page if it belongs to a reservation. Returns TRUE if the 1034f8a47341SAlan Cox * page is freed and FALSE otherwise. 1035f8a47341SAlan Cox */ 1036f8a47341SAlan Cox boolean_t 1037f8a47341SAlan Cox vm_reserv_free_page(vm_page_t m) 1038f8a47341SAlan Cox { 1039f8a47341SAlan Cox vm_reserv_t rv; 10405c930c89SJeff Roberson boolean_t ret; 1041f8a47341SAlan Cox 1042f8a47341SAlan Cox rv = vm_reserv_from_page(m); 1043908e3da1SAlan Cox if (rv->object == NULL) 1044908e3da1SAlan Cox return (FALSE); 10455c930c89SJeff Roberson vm_reserv_lock(rv); 10465c930c89SJeff Roberson /* Re-validate after lock. */ 10475c930c89SJeff Roberson if (rv->object != NULL) { 1048ec179322SAlan Cox vm_reserv_depopulate(rv, m - rv->pages); 10495c930c89SJeff Roberson ret = TRUE; 10505c930c89SJeff Roberson } else 10515c930c89SJeff Roberson ret = FALSE; 10525c930c89SJeff Roberson vm_reserv_unlock(rv); 10535c930c89SJeff Roberson 10545c930c89SJeff Roberson return (ret); 1055f8a47341SAlan Cox } 1056f8a47341SAlan Cox 1057f8a47341SAlan Cox /* 1058f8a47341SAlan Cox * Initializes the reservation management system. Specifically, initializes 1059f8a47341SAlan Cox * the reservation array. 1060f8a47341SAlan Cox * 1061f8a47341SAlan Cox * Requires that vm_page_array and first_page are initialized! 1062f8a47341SAlan Cox */ 1063f8a47341SAlan Cox void 1064f8a47341SAlan Cox vm_reserv_init(void) 1065f8a47341SAlan Cox { 1066f8a47341SAlan Cox vm_paddr_t paddr; 106709e5f3c4SAlan Cox struct vm_phys_seg *seg; 10685c930c89SJeff Roberson struct vm_reserv *rv; 1069b378d296SMark Johnston struct vm_reserv_domain *rvd; 10707988971aSD Scott Phillips #ifdef VM_PHYSSEG_SPARSE 10717988971aSD Scott Phillips vm_pindex_t used; 10727988971aSD Scott Phillips #endif 1073b378d296SMark Johnston int i, j, segind; 1074f8a47341SAlan Cox 1075f8a47341SAlan Cox /* 1076f8a47341SAlan Cox * Initialize the reservation array. Specifically, initialize the 1077f8a47341SAlan Cox * "pages" field for every element that has an underlying superpage. 1078f8a47341SAlan Cox */ 10797988971aSD Scott Phillips #ifdef VM_PHYSSEG_SPARSE 10807988971aSD Scott Phillips used = 0; 10817988971aSD Scott Phillips #endif 108209e5f3c4SAlan Cox for (segind = 0; segind < vm_phys_nsegs; segind++) { 108309e5f3c4SAlan Cox seg = &vm_phys_segs[segind]; 10847988971aSD Scott Phillips #ifdef VM_PHYSSEG_SPARSE 10857988971aSD Scott Phillips seg->first_reserv = &vm_reserv_array[used]; 10867988971aSD Scott Phillips used += howmany(seg->end, VM_LEVEL_0_SIZE) - 10877988971aSD Scott Phillips seg->start / VM_LEVEL_0_SIZE; 10887988971aSD Scott Phillips #else 10897988971aSD Scott Phillips seg->first_reserv = 10907988971aSD Scott Phillips &vm_reserv_array[seg->start >> VM_LEVEL_0_SHIFT]; 10917988971aSD Scott Phillips #endif 109209e5f3c4SAlan Cox paddr = roundup2(seg->start, VM_LEVEL_0_SIZE); 10937988971aSD Scott Phillips rv = seg->first_reserv + (paddr >> VM_LEVEL_0_SHIFT) - 10947988971aSD Scott Phillips (seg->start >> VM_LEVEL_0_SHIFT); 10956b821a74SAleksandr Rybalko while (paddr + VM_LEVEL_0_SIZE > paddr && paddr + 10966b821a74SAleksandr Rybalko VM_LEVEL_0_SIZE <= seg->end) { 10975c930c89SJeff Roberson rv->pages = PHYS_TO_VM_PAGE(paddr); 10985c930c89SJeff Roberson rv->domain = seg->domain; 10995c930c89SJeff Roberson mtx_init(&rv->lock, "vm reserv", NULL, MTX_DEF); 1100f8a47341SAlan Cox paddr += VM_LEVEL_0_SIZE; 11017988971aSD Scott Phillips rv++; 1102f8a47341SAlan Cox } 1103f8a47341SAlan Cox } 11045c930c89SJeff Roberson for (i = 0; i < MAXMEMDOM; i++) { 1105b378d296SMark Johnston rvd = &vm_rvd[i]; 1106b378d296SMark Johnston mtx_init(&rvd->lock, "vm reserv domain", NULL, MTX_DEF); 1107b378d296SMark Johnston TAILQ_INIT(&rvd->partpop); 1108b378d296SMark Johnston mtx_init(&rvd->marker.lock, "vm reserv marker", NULL, MTX_DEF); 1109b378d296SMark Johnston 1110b378d296SMark Johnston /* 1111b378d296SMark Johnston * Fully populated reservations should never be present in the 1112b378d296SMark Johnston * partially populated reservation queues. 1113b378d296SMark Johnston */ 1114b378d296SMark Johnston rvd->marker.popcnt = VM_LEVEL_0_NPAGES; 1115b378d296SMark Johnston for (j = 0; j < NBPOPMAP; j++) 1116b378d296SMark Johnston popmap_set(rvd->marker.popmap, j); 1117f8a47341SAlan Cox } 1118f8a47341SAlan Cox 11195c930c89SJeff Roberson for (i = 0; i < VM_RESERV_OBJ_LOCK_COUNT; i++) 11205c930c89SJeff Roberson mtx_init(&vm_reserv_object_mtx[i], "resv obj lock", NULL, 11215c930c89SJeff Roberson MTX_DEF); 11225c930c89SJeff Roberson } 11235c930c89SJeff Roberson 1124f8a47341SAlan Cox /* 1125c869e672SAlan Cox * Returns true if the given page belongs to a reservation and that page is 1126c869e672SAlan Cox * free. Otherwise, returns false. 1127c869e672SAlan Cox */ 1128c869e672SAlan Cox bool 1129c869e672SAlan Cox vm_reserv_is_page_free(vm_page_t m) 1130c869e672SAlan Cox { 1131c869e672SAlan Cox vm_reserv_t rv; 1132c869e672SAlan Cox 1133c869e672SAlan Cox rv = vm_reserv_from_page(m); 1134c869e672SAlan Cox if (rv->object == NULL) 1135c869e672SAlan Cox return (false); 1136c869e672SAlan Cox return (popmap_is_clear(rv->popmap, m - rv->pages)); 1137c869e672SAlan Cox } 1138c869e672SAlan Cox 1139c869e672SAlan Cox /* 1140c869e672SAlan Cox * If the given page belongs to a reservation, returns the level of that 1141c869e672SAlan Cox * reservation. Otherwise, returns -1. 1142c869e672SAlan Cox */ 1143c869e672SAlan Cox int 1144c869e672SAlan Cox vm_reserv_level(vm_page_t m) 1145c869e672SAlan Cox { 1146c869e672SAlan Cox vm_reserv_t rv; 1147c869e672SAlan Cox 1148c869e672SAlan Cox rv = vm_reserv_from_page(m); 1149c869e672SAlan Cox return (rv->object != NULL ? 0 : -1); 1150c869e672SAlan Cox } 1151c869e672SAlan Cox 1152c869e672SAlan Cox /* 11533453bca8SAlan Cox * Returns a reservation level if the given page belongs to a fully populated 1154f8a47341SAlan Cox * reservation and -1 otherwise. 1155f8a47341SAlan Cox */ 1156f8a47341SAlan Cox int 1157f8a47341SAlan Cox vm_reserv_level_iffullpop(vm_page_t m) 1158f8a47341SAlan Cox { 1159f8a47341SAlan Cox vm_reserv_t rv; 1160f8a47341SAlan Cox 1161f8a47341SAlan Cox rv = vm_reserv_from_page(m); 1162f8a47341SAlan Cox return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1); 1163f8a47341SAlan Cox } 1164f8a47341SAlan Cox 1165f8a47341SAlan Cox /* 1166b378d296SMark Johnston * Remove a partially populated reservation from the queue. 1167b378d296SMark Johnston */ 1168b378d296SMark Johnston static void 1169b378d296SMark Johnston vm_reserv_dequeue(vm_reserv_t rv) 1170b378d296SMark Johnston { 1171b378d296SMark Johnston 1172b378d296SMark Johnston vm_reserv_domain_assert_locked(rv->domain); 1173b378d296SMark Johnston vm_reserv_assert_locked(rv); 1174b378d296SMark Johnston CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 1175b378d296SMark Johnston __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 1176b378d296SMark Johnston KASSERT(rv->inpartpopq, 1177b378d296SMark Johnston ("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv)); 1178b378d296SMark Johnston 1179b378d296SMark Johnston TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); 1180b378d296SMark Johnston rv->inpartpopq = FALSE; 1181b378d296SMark Johnston } 1182b378d296SMark Johnston 1183b378d296SMark Johnston /* 11843453bca8SAlan Cox * Breaks the given partially populated reservation, releasing its free pages 11853453bca8SAlan Cox * to the physical memory allocator. 1186f8a47341SAlan Cox */ 118744aab2c3SAlan Cox static void 118844aab2c3SAlan Cox vm_reserv_reclaim(vm_reserv_t rv) 1189f8a47341SAlan Cox { 1190f8a47341SAlan Cox 11915c930c89SJeff Roberson vm_reserv_assert_locked(rv); 11925c930c89SJeff Roberson CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 11935c930c89SJeff Roberson __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 1194b378d296SMark Johnston if (rv->inpartpopq) { 11955c930c89SJeff Roberson vm_reserv_domain_lock(rv->domain); 1196b378d296SMark Johnston vm_reserv_dequeue(rv); 11975c930c89SJeff Roberson vm_reserv_domain_unlock(rv->domain); 1198b378d296SMark Johnston } 1199ada27a3bSKonstantin Belousov vm_reserv_break(rv); 12005c930c89SJeff Roberson counter_u64_add(vm_reserv_reclaimed, 1); 120144aab2c3SAlan Cox } 120244aab2c3SAlan Cox 120344aab2c3SAlan Cox /* 1204b378d296SMark Johnston * Breaks a reservation near the head of the partially populated reservation 12053453bca8SAlan Cox * queue, releasing its free pages to the physical memory allocator. Returns 12063453bca8SAlan Cox * TRUE if a reservation is broken and FALSE otherwise. 120744aab2c3SAlan Cox */ 1208b378d296SMark Johnston bool 1209ef435ae7SJeff Roberson vm_reserv_reclaim_inactive(int domain) 121044aab2c3SAlan Cox { 121144aab2c3SAlan Cox vm_reserv_t rv; 121244aab2c3SAlan Cox 1213b378d296SMark Johnston vm_reserv_domain_lock(domain); 1214b378d296SMark Johnston TAILQ_FOREACH(rv, &vm_rvd[domain].partpop, partpopq) { 1215b378d296SMark Johnston /* 1216b378d296SMark Johnston * A locked reservation is likely being updated or reclaimed, 1217b378d296SMark Johnston * so just skip ahead. 1218b378d296SMark Johnston */ 1219b378d296SMark Johnston if (rv != &vm_rvd[domain].marker && vm_reserv_trylock(rv)) { 1220b378d296SMark Johnston vm_reserv_dequeue(rv); 1221b378d296SMark Johnston break; 12225c930c89SJeff Roberson } 1223b378d296SMark Johnston } 1224b378d296SMark Johnston vm_reserv_domain_unlock(domain); 1225b378d296SMark Johnston if (rv != NULL) { 122644aab2c3SAlan Cox vm_reserv_reclaim(rv); 12275c930c89SJeff Roberson vm_reserv_unlock(rv); 1228b378d296SMark Johnston return (true); 1229f8a47341SAlan Cox } 1230b378d296SMark Johnston return (false); 1231f8a47341SAlan Cox } 1232f8a47341SAlan Cox 1233f8a47341SAlan Cox /* 1234f96e8a0bSDoug Moore * Determine whether this reservation has free pages that satisfy the given 1235f96e8a0bSDoug Moore * request for contiguous physical memory. Start searching from the lower 1236f96e8a0bSDoug Moore * bound, defined by low_index. 1237f96e8a0bSDoug Moore */ 1238f96e8a0bSDoug Moore static bool 1239f96e8a0bSDoug Moore vm_reserv_test_contig(vm_reserv_t rv, u_long npages, vm_paddr_t low, 1240f96e8a0bSDoug Moore vm_paddr_t high, u_long alignment, vm_paddr_t boundary) 1241f96e8a0bSDoug Moore { 1242f96e8a0bSDoug Moore vm_paddr_t pa, size; 1243f96e8a0bSDoug Moore u_long changes; 1244f96e8a0bSDoug Moore int bitpos, bits_left, i, hi, lo, n; 1245f96e8a0bSDoug Moore 1246f96e8a0bSDoug Moore vm_reserv_assert_locked(rv); 1247f96e8a0bSDoug Moore size = npages << PAGE_SHIFT; 1248f96e8a0bSDoug Moore pa = VM_PAGE_TO_PHYS(&rv->pages[0]); 1249f96e8a0bSDoug Moore lo = (pa < low) ? 1250f96e8a0bSDoug Moore ((low + PAGE_MASK - pa) >> PAGE_SHIFT) : 0; 1251f96e8a0bSDoug Moore i = lo / NBPOPMAP; 1252f96e8a0bSDoug Moore changes = rv->popmap[i] | ((1UL << (lo % NBPOPMAP)) - 1); 1253f96e8a0bSDoug Moore hi = (pa + VM_LEVEL_0_SIZE > high) ? 1254f96e8a0bSDoug Moore ((high + PAGE_MASK - pa) >> PAGE_SHIFT) : VM_LEVEL_0_NPAGES; 1255f96e8a0bSDoug Moore n = hi / NBPOPMAP; 1256f96e8a0bSDoug Moore bits_left = hi % NBPOPMAP; 1257f96e8a0bSDoug Moore hi = lo = -1; 1258f96e8a0bSDoug Moore for (;;) { 1259f96e8a0bSDoug Moore /* 1260f96e8a0bSDoug Moore * "changes" is a bitmask that marks where a new sequence of 1261f96e8a0bSDoug Moore * 0s or 1s begins in popmap[i], with last bit in popmap[i-1] 1262f96e8a0bSDoug Moore * considered to be 1 if and only if lo == hi. The bits of 1263f96e8a0bSDoug Moore * popmap[-1] and popmap[NPOPMAP] are considered all 1s. 1264f96e8a0bSDoug Moore */ 1265f96e8a0bSDoug Moore changes ^= (changes << 1) | (lo == hi); 1266f96e8a0bSDoug Moore while (changes != 0) { 1267f96e8a0bSDoug Moore /* 1268f96e8a0bSDoug Moore * If the next change marked begins a run of 0s, set 1269f96e8a0bSDoug Moore * lo to mark that position. Otherwise set hi and 1270f96e8a0bSDoug Moore * look for a satisfactory first page from lo up to hi. 1271f96e8a0bSDoug Moore */ 1272f96e8a0bSDoug Moore bitpos = ffsl(changes) - 1; 1273f96e8a0bSDoug Moore changes ^= 1UL << bitpos; 1274f96e8a0bSDoug Moore if (lo == hi) { 1275f96e8a0bSDoug Moore lo = NBPOPMAP * i + bitpos; 1276f96e8a0bSDoug Moore continue; 1277f96e8a0bSDoug Moore } 1278f96e8a0bSDoug Moore hi = NBPOPMAP * i + bitpos; 1279f96e8a0bSDoug Moore pa = VM_PAGE_TO_PHYS(&rv->pages[lo]); 1280f96e8a0bSDoug Moore if ((pa & (alignment - 1)) != 0) { 1281f96e8a0bSDoug Moore /* Skip to next aligned page. */ 1282f96e8a0bSDoug Moore lo += (((pa - 1) | (alignment - 1)) + 1) >> 1283f96e8a0bSDoug Moore PAGE_SHIFT; 1284f96e8a0bSDoug Moore if (lo >= VM_LEVEL_0_NPAGES) 1285f96e8a0bSDoug Moore return (false); 1286f96e8a0bSDoug Moore pa = VM_PAGE_TO_PHYS(&rv->pages[lo]); 1287f96e8a0bSDoug Moore } 1288f96e8a0bSDoug Moore if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0) { 1289f96e8a0bSDoug Moore /* Skip to next boundary-matching page. */ 1290f96e8a0bSDoug Moore lo += (((pa - 1) | (boundary - 1)) + 1) >> 1291f96e8a0bSDoug Moore PAGE_SHIFT; 1292f96e8a0bSDoug Moore if (lo >= VM_LEVEL_0_NPAGES) 1293f96e8a0bSDoug Moore return (false); 1294f96e8a0bSDoug Moore pa = VM_PAGE_TO_PHYS(&rv->pages[lo]); 1295f96e8a0bSDoug Moore } 1296f96e8a0bSDoug Moore if (lo * PAGE_SIZE + size <= hi * PAGE_SIZE) 1297f96e8a0bSDoug Moore return (true); 1298f96e8a0bSDoug Moore lo = hi; 1299f96e8a0bSDoug Moore } 1300f96e8a0bSDoug Moore if (++i < n) 1301f96e8a0bSDoug Moore changes = rv->popmap[i]; 1302f96e8a0bSDoug Moore else if (i == n) 1303f96e8a0bSDoug Moore changes = bits_left == 0 ? -1UL : 1304f96e8a0bSDoug Moore (rv->popmap[n] | (-1UL << bits_left)); 1305f96e8a0bSDoug Moore else 1306f96e8a0bSDoug Moore return (false); 1307f96e8a0bSDoug Moore } 1308f96e8a0bSDoug Moore } 1309f96e8a0bSDoug Moore 1310f96e8a0bSDoug Moore /* 13113453bca8SAlan Cox * Searches the partially populated reservation queue for the least recently 13123453bca8SAlan Cox * changed reservation with free pages that satisfy the given request for 13133453bca8SAlan Cox * contiguous physical memory. If a satisfactory reservation is found, it is 1314f96e8a0bSDoug Moore * broken. Returns true if a reservation is broken and false otherwise. 131544aab2c3SAlan Cox */ 1316b378d296SMark Johnston bool 1317ef435ae7SJeff Roberson vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low, 1318ef435ae7SJeff Roberson vm_paddr_t high, u_long alignment, vm_paddr_t boundary) 131944aab2c3SAlan Cox { 1320b378d296SMark Johnston struct vm_reserv_queue *queue; 1321ec179322SAlan Cox vm_paddr_t pa, size; 1322b378d296SMark Johnston vm_reserv_t marker, rv, rvn; 132344aab2c3SAlan Cox 1324c68c3537SAlan Cox if (npages > VM_LEVEL_0_NPAGES - 1) 1325f96e8a0bSDoug Moore return (false); 1326b378d296SMark Johnston marker = &vm_rvd[domain].marker; 1327b378d296SMark Johnston queue = &vm_rvd[domain].partpop; 1328c68c3537SAlan Cox size = npages << PAGE_SHIFT; 1329b378d296SMark Johnston 1330b378d296SMark Johnston vm_reserv_domain_scan_lock(domain); 13315c930c89SJeff Roberson vm_reserv_domain_lock(domain); 1332b378d296SMark Johnston TAILQ_FOREACH_SAFE(rv, queue, partpopq, rvn) { 1333f96e8a0bSDoug Moore pa = VM_PAGE_TO_PHYS(&rv->pages[0]); 1334f96e8a0bSDoug Moore if (pa + VM_LEVEL_0_SIZE - size < low) { 1335ec179322SAlan Cox /* This entire reservation is too low; go to next. */ 133644aab2c3SAlan Cox continue; 133744aab2c3SAlan Cox } 133844aab2c3SAlan Cox if (pa + size > high) { 1339ec179322SAlan Cox /* This entire reservation is too high; go to next. */ 1340ec179322SAlan Cox continue; 134185f2a0c9SMax Laier } 1342b378d296SMark Johnston 13435c930c89SJeff Roberson if (vm_reserv_trylock(rv) == 0) { 1344b378d296SMark Johnston TAILQ_INSERT_AFTER(queue, rv, marker, partpopq); 13455c930c89SJeff Roberson vm_reserv_domain_unlock(domain); 13465c930c89SJeff Roberson vm_reserv_lock(rv); 1347b378d296SMark Johnston if (!rv->inpartpopq || 1348b378d296SMark Johnston TAILQ_NEXT(rv, partpopq) != marker) { 1349b378d296SMark Johnston vm_reserv_unlock(rv); 13505c930c89SJeff Roberson vm_reserv_domain_lock(domain); 1351b378d296SMark Johnston rvn = TAILQ_NEXT(marker, partpopq); 1352b378d296SMark Johnston TAILQ_REMOVE(queue, marker, partpopq); 13535c930c89SJeff Roberson continue; 13545c930c89SJeff Roberson } 1355b378d296SMark Johnston vm_reserv_domain_lock(domain); 1356b378d296SMark Johnston TAILQ_REMOVE(queue, marker, partpopq); 1357b378d296SMark Johnston } 13585c930c89SJeff Roberson vm_reserv_domain_unlock(domain); 1359f96e8a0bSDoug Moore if (vm_reserv_test_contig(rv, npages, low, high, 1360f96e8a0bSDoug Moore alignment, boundary)) { 1361b378d296SMark Johnston vm_reserv_domain_scan_unlock(domain); 136244aab2c3SAlan Cox vm_reserv_reclaim(rv); 13635c930c89SJeff Roberson vm_reserv_unlock(rv); 1364f96e8a0bSDoug Moore return (true); 136544aab2c3SAlan Cox } 13665c930c89SJeff Roberson vm_reserv_unlock(rv); 13675c930c89SJeff Roberson vm_reserv_domain_lock(domain); 136844aab2c3SAlan Cox } 13695c930c89SJeff Roberson vm_reserv_domain_unlock(domain); 1370b378d296SMark Johnston vm_reserv_domain_scan_unlock(domain); 1371f96e8a0bSDoug Moore return (false); 137244aab2c3SAlan Cox } 137344aab2c3SAlan Cox 137444aab2c3SAlan Cox /* 1375f8a47341SAlan Cox * Transfers the reservation underlying the given page to a new object. 1376f8a47341SAlan Cox * 1377f8a47341SAlan Cox * The object must be locked. 1378f8a47341SAlan Cox */ 1379f8a47341SAlan Cox void 1380f8a47341SAlan Cox vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object, 1381f8a47341SAlan Cox vm_pindex_t old_object_offset) 1382f8a47341SAlan Cox { 1383f8a47341SAlan Cox vm_reserv_t rv; 1384f8a47341SAlan Cox 138589f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(new_object); 1386f8a47341SAlan Cox rv = vm_reserv_from_page(m); 1387f8a47341SAlan Cox if (rv->object == old_object) { 13885c930c89SJeff Roberson vm_reserv_lock(rv); 13895c930c89SJeff Roberson CTR6(KTR_VM, 13905c930c89SJeff Roberson "%s: rv %p object %p new %p popcnt %d inpartpop %d", 13915c930c89SJeff Roberson __FUNCTION__, rv, rv->object, new_object, rv->popcnt, 13925c930c89SJeff Roberson rv->inpartpopq); 1393f8a47341SAlan Cox if (rv->object == old_object) { 1394e2068d0bSJeff Roberson vm_reserv_object_lock(old_object); 1395e2068d0bSJeff Roberson rv->object = NULL; 1396f8a47341SAlan Cox LIST_REMOVE(rv, objq); 1397e2068d0bSJeff Roberson vm_reserv_object_unlock(old_object); 1398e2068d0bSJeff Roberson vm_reserv_object_lock(new_object); 1399f8a47341SAlan Cox rv->object = new_object; 1400f8a47341SAlan Cox rv->pindex -= old_object_offset; 1401e2068d0bSJeff Roberson LIST_INSERT_HEAD(&new_object->rvq, rv, objq); 1402e2068d0bSJeff Roberson vm_reserv_object_unlock(new_object); 1403f8a47341SAlan Cox } 14045c930c89SJeff Roberson vm_reserv_unlock(rv); 1405f8a47341SAlan Cox } 1406f8a47341SAlan Cox } 1407f8a47341SAlan Cox 1408f8a47341SAlan Cox /* 1409c869e672SAlan Cox * Returns the size (in bytes) of a reservation of the specified level. 1410c869e672SAlan Cox */ 1411c869e672SAlan Cox int 1412c869e672SAlan Cox vm_reserv_size(int level) 1413c869e672SAlan Cox { 1414c869e672SAlan Cox 1415c869e672SAlan Cox switch (level) { 1416c869e672SAlan Cox case 0: 1417c869e672SAlan Cox return (VM_LEVEL_0_SIZE); 1418c869e672SAlan Cox case -1: 1419c869e672SAlan Cox return (PAGE_SIZE); 1420c869e672SAlan Cox default: 1421c869e672SAlan Cox return (0); 1422c869e672SAlan Cox } 1423c869e672SAlan Cox } 1424c869e672SAlan Cox 1425c869e672SAlan Cox /* 1426f8a47341SAlan Cox * Allocates the virtual and physical memory required by the reservation 1427f8a47341SAlan Cox * management system's data structures, in particular, the reservation array. 1428f8a47341SAlan Cox */ 1429f8a47341SAlan Cox vm_paddr_t 14303e5e1b51SJeff Roberson vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end) 1431f8a47341SAlan Cox { 14327988971aSD Scott Phillips vm_paddr_t new_end; 14337988971aSD Scott Phillips vm_pindex_t count; 1434f8a47341SAlan Cox size_t size; 14353e5e1b51SJeff Roberson int i; 14363e5e1b51SJeff Roberson 14377988971aSD Scott Phillips count = 0; 14383e5e1b51SJeff Roberson for (i = 0; i < vm_phys_nsegs; i++) { 14397988971aSD Scott Phillips #ifdef VM_PHYSSEG_SPARSE 14407988971aSD Scott Phillips count += howmany(vm_phys_segs[i].end, VM_LEVEL_0_SIZE) - 14417988971aSD Scott Phillips vm_phys_segs[i].start / VM_LEVEL_0_SIZE; 14427988971aSD Scott Phillips #else 14437988971aSD Scott Phillips count = MAX(count, 14447988971aSD Scott Phillips howmany(vm_phys_segs[i].end, VM_LEVEL_0_SIZE)); 14457988971aSD Scott Phillips #endif 14463e5e1b51SJeff Roberson } 14473e5e1b51SJeff Roberson 14487988971aSD Scott Phillips for (i = 0; phys_avail[i + 1] != 0; i += 2) { 14497988971aSD Scott Phillips #ifdef VM_PHYSSEG_SPARSE 14507988971aSD Scott Phillips count += howmany(phys_avail[i + 1], VM_LEVEL_0_SIZE) - 14517988971aSD Scott Phillips phys_avail[i] / VM_LEVEL_0_SIZE; 14527988971aSD Scott Phillips #else 14537988971aSD Scott Phillips count = MAX(count, 14547988971aSD Scott Phillips howmany(phys_avail[i + 1], VM_LEVEL_0_SIZE)); 14557988971aSD Scott Phillips #endif 14563e5e1b51SJeff Roberson } 1457f8a47341SAlan Cox 1458f8a47341SAlan Cox /* 14597988971aSD Scott Phillips * Calculate the size (in bytes) of the reservation array. Rounding up 14607988971aSD Scott Phillips * for partial superpages at boundaries, as every small page is mapped 14617988971aSD Scott Phillips * to an element in the reservation array based on its physical address. 14627988971aSD Scott Phillips * Thus, the number of elements in the reservation array can be greater 14637988971aSD Scott Phillips * than the number of superpages. 1464f8a47341SAlan Cox */ 14657988971aSD Scott Phillips size = count * sizeof(struct vm_reserv); 1466f8a47341SAlan Cox 1467f8a47341SAlan Cox /* 1468f8a47341SAlan Cox * Allocate and map the physical memory for the reservation array. The 1469f8a47341SAlan Cox * next available virtual address is returned by reference. 1470f8a47341SAlan Cox */ 1471f8a47341SAlan Cox new_end = end - round_page(size); 1472f8a47341SAlan Cox vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end, 1473f8a47341SAlan Cox VM_PROT_READ | VM_PROT_WRITE); 1474f8a47341SAlan Cox bzero(vm_reserv_array, size); 1475f8a47341SAlan Cox 1476f8a47341SAlan Cox /* 1477f8a47341SAlan Cox * Return the next available physical address. 1478f8a47341SAlan Cox */ 1479f8a47341SAlan Cox return (new_end); 1480f8a47341SAlan Cox } 1481f8a47341SAlan Cox 14828b5e1472SAlan Cox /* 14838b5e1472SAlan Cox * Returns the superpage containing the given page. 14848b5e1472SAlan Cox */ 14858b5e1472SAlan Cox vm_page_t 14868b5e1472SAlan Cox vm_reserv_to_superpage(vm_page_t m) 14878b5e1472SAlan Cox { 14888b5e1472SAlan Cox vm_reserv_t rv; 14898b5e1472SAlan Cox 14908b5e1472SAlan Cox VM_OBJECT_ASSERT_LOCKED(m->object); 14918b5e1472SAlan Cox rv = vm_reserv_from_page(m); 14925c930c89SJeff Roberson if (rv->object == m->object && rv->popcnt == VM_LEVEL_0_NPAGES) 14935c930c89SJeff Roberson m = rv->pages; 14945c930c89SJeff Roberson else 14955c930c89SJeff Roberson m = NULL; 14965c930c89SJeff Roberson 14975c930c89SJeff Roberson return (m); 14988b5e1472SAlan Cox } 14998b5e1472SAlan Cox 1500f8a47341SAlan Cox #endif /* VM_NRESERVLEVEL > 0 */ 1501