111752d88SAlan Cox /*- 24d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause 3fe267a55SPedro F. Giffuni * 411752d88SAlan Cox * Copyright (c) 2002-2006 Rice University 511752d88SAlan Cox * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu> 611752d88SAlan Cox * All rights reserved. 711752d88SAlan Cox * 811752d88SAlan Cox * This software was developed for the FreeBSD Project by Alan L. Cox, 911752d88SAlan Cox * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro. 1011752d88SAlan Cox * 1111752d88SAlan Cox * Redistribution and use in source and binary forms, with or without 1211752d88SAlan Cox * modification, are permitted provided that the following conditions 1311752d88SAlan Cox * are met: 1411752d88SAlan Cox * 1. Redistributions of source code must retain the above copyright 1511752d88SAlan Cox * notice, this list of conditions and the following disclaimer. 1611752d88SAlan Cox * 2. Redistributions in binary form must reproduce the above copyright 1711752d88SAlan Cox * notice, this list of conditions and the following disclaimer in the 1811752d88SAlan Cox * documentation and/or other materials provided with the distribution. 1911752d88SAlan Cox * 2011752d88SAlan Cox * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 2111752d88SAlan Cox * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 2211752d88SAlan Cox * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 2311752d88SAlan Cox * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 2411752d88SAlan Cox * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 2511752d88SAlan Cox * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 2611752d88SAlan Cox * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 2711752d88SAlan Cox * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 2811752d88SAlan Cox * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2911752d88SAlan Cox * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY 3011752d88SAlan Cox * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 3111752d88SAlan Cox * POSSIBILITY OF SUCH DAMAGE. 3211752d88SAlan Cox */ 3311752d88SAlan Cox 34fbd80bd0SAlan Cox /* 35fbd80bd0SAlan Cox * Physical memory system implementation 36fbd80bd0SAlan Cox * 37fbd80bd0SAlan Cox * Any external functions defined by this module are only to be used by the 38fbd80bd0SAlan Cox * virtual memory system. 39fbd80bd0SAlan Cox */ 40fbd80bd0SAlan Cox 4111752d88SAlan Cox #include <sys/cdefs.h> 4211752d88SAlan Cox __FBSDID("$FreeBSD$"); 4311752d88SAlan Cox 4411752d88SAlan Cox #include "opt_ddb.h" 45174b5f38SJohn Baldwin #include "opt_vm.h" 4611752d88SAlan Cox 4711752d88SAlan Cox #include <sys/param.h> 4811752d88SAlan Cox #include <sys/systm.h> 49662e7fa8SMark Johnston #include <sys/domainset.h> 5011752d88SAlan Cox #include <sys/lock.h> 5111752d88SAlan Cox #include <sys/kernel.h> 5211752d88SAlan Cox #include <sys/malloc.h> 5311752d88SAlan Cox #include <sys/mutex.h> 547e226537SAttilio Rao #include <sys/proc.h> 5511752d88SAlan Cox #include <sys/queue.h> 5638d6b2dcSRoger Pau Monné #include <sys/rwlock.h> 5711752d88SAlan Cox #include <sys/sbuf.h> 5811752d88SAlan Cox #include <sys/sysctl.h> 5938d6b2dcSRoger Pau Monné #include <sys/tree.h> 6011752d88SAlan Cox #include <sys/vmmeter.h> 6111752d88SAlan Cox 6211752d88SAlan Cox #include <ddb/ddb.h> 6311752d88SAlan Cox 6411752d88SAlan Cox #include <vm/vm.h> 6501e115abSDoug Moore #include <vm/vm_extern.h> 6611752d88SAlan Cox #include <vm/vm_param.h> 6711752d88SAlan Cox #include <vm/vm_kern.h> 6811752d88SAlan Cox #include <vm/vm_object.h> 6911752d88SAlan Cox #include <vm/vm_page.h> 7011752d88SAlan Cox #include <vm/vm_phys.h> 71e2068d0bSJeff Roberson #include <vm/vm_pagequeue.h> 7211752d88SAlan Cox 73449c2e92SKonstantin Belousov _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX, 74449c2e92SKonstantin Belousov "Too many physsegs."); 7511752d88SAlan Cox 76b6715dabSJeff Roberson #ifdef NUMA 77cdfeced8SJeff Roberson struct mem_affinity __read_mostly *mem_affinity; 78cdfeced8SJeff Roberson int __read_mostly *mem_locality; 7962d70a81SJohn Baldwin #endif 80a3870a18SJohn Baldwin 81cdfeced8SJeff Roberson int __read_mostly vm_ndomains = 1; 82463406acSMark Johnston domainset_t __read_mostly all_domains = DOMAINSET_T_INITIALIZER(0x1); 837e226537SAttilio Rao 84cdfeced8SJeff Roberson struct vm_phys_seg __read_mostly vm_phys_segs[VM_PHYSSEG_MAX]; 85cdfeced8SJeff Roberson int __read_mostly vm_phys_nsegs; 8681302f1dSMark Johnston static struct vm_phys_seg vm_phys_early_segs[8]; 8781302f1dSMark Johnston static int vm_phys_early_nsegs; 8811752d88SAlan Cox 8938d6b2dcSRoger Pau Monné struct vm_phys_fictitious_seg; 9038d6b2dcSRoger Pau Monné static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *, 9138d6b2dcSRoger Pau Monné struct vm_phys_fictitious_seg *); 9238d6b2dcSRoger Pau Monné 9338d6b2dcSRoger Pau Monné RB_HEAD(fict_tree, vm_phys_fictitious_seg) vm_phys_fictitious_tree = 94b649c2acSDoug Moore RB_INITIALIZER(&vm_phys_fictitious_tree); 9538d6b2dcSRoger Pau Monné 9638d6b2dcSRoger Pau Monné struct vm_phys_fictitious_seg { 9738d6b2dcSRoger Pau Monné RB_ENTRY(vm_phys_fictitious_seg) node; 9838d6b2dcSRoger Pau Monné /* Memory region data */ 99b6de32bdSKonstantin Belousov vm_paddr_t start; 100b6de32bdSKonstantin Belousov vm_paddr_t end; 101b6de32bdSKonstantin Belousov vm_page_t first_page; 10238d6b2dcSRoger Pau Monné }; 10338d6b2dcSRoger Pau Monné 10438d6b2dcSRoger Pau Monné RB_GENERATE_STATIC(fict_tree, vm_phys_fictitious_seg, node, 10538d6b2dcSRoger Pau Monné vm_phys_fictitious_cmp); 10638d6b2dcSRoger Pau Monné 107cdfeced8SJeff Roberson static struct rwlock_padalign vm_phys_fictitious_reg_lock; 108c0432fc3SMark Johnston MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages"); 109b6de32bdSKonstantin Belousov 110cdfeced8SJeff Roberson static struct vm_freelist __aligned(CACHE_LINE_SIZE) 111f2a496d6SKonstantin Belousov vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL] 112f2a496d6SKonstantin Belousov [VM_NFREEORDER_MAX]; 11311752d88SAlan Cox 114cdfeced8SJeff Roberson static int __read_mostly vm_nfreelists; 115d866a563SAlan Cox 116d866a563SAlan Cox /* 11721943937SJeff Roberson * These "avail lists" are globals used to communicate boot-time physical 11821943937SJeff Roberson * memory layout to other parts of the kernel. Each physically contiguous 11921943937SJeff Roberson * region of memory is defined by a start address at an even index and an 12021943937SJeff Roberson * end address at the following odd index. Each list is terminated by a 12121943937SJeff Roberson * pair of zero entries. 12221943937SJeff Roberson * 12321943937SJeff Roberson * dump_avail tells the dump code what regions to include in a crash dump, and 12421943937SJeff Roberson * phys_avail is all of the remaining physical memory that is available for 12521943937SJeff Roberson * the vm system. 12621943937SJeff Roberson * 12721943937SJeff Roberson * Initially dump_avail and phys_avail are identical. Boot time memory 12821943937SJeff Roberson * allocations remove extents from phys_avail that may still be included 12921943937SJeff Roberson * in dumps. 13021943937SJeff Roberson */ 13121943937SJeff Roberson vm_paddr_t phys_avail[PHYS_AVAIL_COUNT]; 13221943937SJeff Roberson vm_paddr_t dump_avail[PHYS_AVAIL_COUNT]; 13321943937SJeff Roberson 13421943937SJeff Roberson /* 135d866a563SAlan Cox * Provides the mapping from VM_FREELIST_* to free list indices (flind). 136d866a563SAlan Cox */ 137cdfeced8SJeff Roberson static int __read_mostly vm_freelist_to_flind[VM_NFREELIST]; 138d866a563SAlan Cox 139d866a563SAlan Cox CTASSERT(VM_FREELIST_DEFAULT == 0); 140d866a563SAlan Cox 141d866a563SAlan Cox #ifdef VM_FREELIST_DMA32 142d866a563SAlan Cox #define VM_DMA32_BOUNDARY ((vm_paddr_t)1 << 32) 143d866a563SAlan Cox #endif 144d866a563SAlan Cox 145d866a563SAlan Cox /* 146d866a563SAlan Cox * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about 147d866a563SAlan Cox * the ordering of the free list boundaries. 148d866a563SAlan Cox */ 149d866a563SAlan Cox #if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY) 150d866a563SAlan Cox CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY); 151d866a563SAlan Cox #endif 15211752d88SAlan Cox 15311752d88SAlan Cox static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS); 1547029da5cSPawel Biernacki SYSCTL_OID(_vm, OID_AUTO, phys_free, 155114484b7SMark Johnston CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 1567029da5cSPawel Biernacki sysctl_vm_phys_free, "A", 1577029da5cSPawel Biernacki "Phys Free Info"); 15811752d88SAlan Cox 15911752d88SAlan Cox static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS); 1607029da5cSPawel Biernacki SYSCTL_OID(_vm, OID_AUTO, phys_segs, 161114484b7SMark Johnston CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 1627029da5cSPawel Biernacki sysctl_vm_phys_segs, "A", 1637029da5cSPawel Biernacki "Phys Seg Info"); 16411752d88SAlan Cox 165b6715dabSJeff Roberson #ifdef NUMA 166415d7ccaSAdrian Chadd static int sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS); 1677029da5cSPawel Biernacki SYSCTL_OID(_vm, OID_AUTO, phys_locality, 168114484b7SMark Johnston CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 1697029da5cSPawel Biernacki sysctl_vm_phys_locality, "A", 1707029da5cSPawel Biernacki "Phys Locality Info"); 1716520495aSAdrian Chadd #endif 172415d7ccaSAdrian Chadd 1737e226537SAttilio Rao SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD, 1747e226537SAttilio Rao &vm_ndomains, 0, "Number of physical memory domains available."); 175a3870a18SJohn Baldwin 176d866a563SAlan Cox static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain); 177d866a563SAlan Cox static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end); 17811752d88SAlan Cox static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, 179370a338aSAlan Cox int order, int tail); 180c606ab59SDoug Moore 18138d6b2dcSRoger Pau Monné /* 18238d6b2dcSRoger Pau Monné * Red-black tree helpers for vm fictitious range management. 18338d6b2dcSRoger Pau Monné */ 18438d6b2dcSRoger Pau Monné static inline int 18538d6b2dcSRoger Pau Monné vm_phys_fictitious_in_range(struct vm_phys_fictitious_seg *p, 18638d6b2dcSRoger Pau Monné struct vm_phys_fictitious_seg *range) 18738d6b2dcSRoger Pau Monné { 18838d6b2dcSRoger Pau Monné 18938d6b2dcSRoger Pau Monné KASSERT(range->start != 0 && range->end != 0, 19038d6b2dcSRoger Pau Monné ("Invalid range passed on search for vm_fictitious page")); 19138d6b2dcSRoger Pau Monné if (p->start >= range->end) 19238d6b2dcSRoger Pau Monné return (1); 19338d6b2dcSRoger Pau Monné if (p->start < range->start) 19438d6b2dcSRoger Pau Monné return (-1); 19538d6b2dcSRoger Pau Monné 19638d6b2dcSRoger Pau Monné return (0); 19738d6b2dcSRoger Pau Monné } 19838d6b2dcSRoger Pau Monné 19938d6b2dcSRoger Pau Monné static int 20038d6b2dcSRoger Pau Monné vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *p1, 20138d6b2dcSRoger Pau Monné struct vm_phys_fictitious_seg *p2) 20238d6b2dcSRoger Pau Monné { 20338d6b2dcSRoger Pau Monné 20438d6b2dcSRoger Pau Monné /* Check if this is a search for a page */ 20538d6b2dcSRoger Pau Monné if (p1->end == 0) 20638d6b2dcSRoger Pau Monné return (vm_phys_fictitious_in_range(p1, p2)); 20738d6b2dcSRoger Pau Monné 20838d6b2dcSRoger Pau Monné KASSERT(p2->end != 0, 20938d6b2dcSRoger Pau Monné ("Invalid range passed as second parameter to vm fictitious comparison")); 21038d6b2dcSRoger Pau Monné 21138d6b2dcSRoger Pau Monné /* Searching to add a new range */ 21238d6b2dcSRoger Pau Monné if (p1->end <= p2->start) 21338d6b2dcSRoger Pau Monné return (-1); 21438d6b2dcSRoger Pau Monné if (p1->start >= p2->end) 21538d6b2dcSRoger Pau Monné return (1); 21638d6b2dcSRoger Pau Monné 21738d6b2dcSRoger Pau Monné panic("Trying to add overlapping vm fictitious ranges:\n" 21838d6b2dcSRoger Pau Monné "[%#jx:%#jx] and [%#jx:%#jx]", (uintmax_t)p1->start, 21938d6b2dcSRoger Pau Monné (uintmax_t)p1->end, (uintmax_t)p2->start, (uintmax_t)p2->end); 22038d6b2dcSRoger Pau Monné } 22138d6b2dcSRoger Pau Monné 2226f4acaf4SJeff Roberson int 2236f4acaf4SJeff Roberson vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high) 224449c2e92SKonstantin Belousov { 225b6715dabSJeff Roberson #ifdef NUMA 2266f4acaf4SJeff Roberson domainset_t mask; 2276f4acaf4SJeff Roberson int i; 228449c2e92SKonstantin Belousov 2296f4acaf4SJeff Roberson if (vm_ndomains == 1 || mem_affinity == NULL) 2306f4acaf4SJeff Roberson return (0); 2316f4acaf4SJeff Roberson 2326f4acaf4SJeff Roberson DOMAINSET_ZERO(&mask); 2336f4acaf4SJeff Roberson /* 2346f4acaf4SJeff Roberson * Check for any memory that overlaps low, high. 2356f4acaf4SJeff Roberson */ 2366f4acaf4SJeff Roberson for (i = 0; mem_affinity[i].end != 0; i++) 2376f4acaf4SJeff Roberson if (mem_affinity[i].start <= high && 2386f4acaf4SJeff Roberson mem_affinity[i].end >= low) 2396f4acaf4SJeff Roberson DOMAINSET_SET(mem_affinity[i].domain, &mask); 2406f4acaf4SJeff Roberson if (prefer != -1 && DOMAINSET_ISSET(prefer, &mask)) 2416f4acaf4SJeff Roberson return (prefer); 2426f4acaf4SJeff Roberson if (DOMAINSET_EMPTY(&mask)) 2436f4acaf4SJeff Roberson panic("vm_phys_domain_match: Impossible constraint"); 2446f4acaf4SJeff Roberson return (DOMAINSET_FFS(&mask) - 1); 2456f4acaf4SJeff Roberson #else 2466f4acaf4SJeff Roberson return (0); 2476f4acaf4SJeff Roberson #endif 248449c2e92SKonstantin Belousov } 249449c2e92SKonstantin Belousov 25011752d88SAlan Cox /* 25111752d88SAlan Cox * Outputs the state of the physical memory allocator, specifically, 25211752d88SAlan Cox * the amount of physical memory in each free list. 25311752d88SAlan Cox */ 25411752d88SAlan Cox static int 25511752d88SAlan Cox sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS) 25611752d88SAlan Cox { 25711752d88SAlan Cox struct sbuf sbuf; 25811752d88SAlan Cox struct vm_freelist *fl; 2597e226537SAttilio Rao int dom, error, flind, oind, pind; 26011752d88SAlan Cox 26100f0e671SMatthew D Fleming error = sysctl_wire_old_buffer(req, 0); 26200f0e671SMatthew D Fleming if (error != 0) 26300f0e671SMatthew D Fleming return (error); 2647e226537SAttilio Rao sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req); 2657e226537SAttilio Rao for (dom = 0; dom < vm_ndomains; dom++) { 266eb2f42fbSAlan Cox sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom); 26711752d88SAlan Cox for (flind = 0; flind < vm_nfreelists; flind++) { 268eb2f42fbSAlan Cox sbuf_printf(&sbuf, "\nFREE LIST %d:\n" 26911752d88SAlan Cox "\n ORDER (SIZE) | NUMBER" 27011752d88SAlan Cox "\n ", flind); 27111752d88SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) 27211752d88SAlan Cox sbuf_printf(&sbuf, " | POOL %d", pind); 27311752d88SAlan Cox sbuf_printf(&sbuf, "\n-- "); 27411752d88SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) 27511752d88SAlan Cox sbuf_printf(&sbuf, "-- -- "); 27611752d88SAlan Cox sbuf_printf(&sbuf, "--\n"); 27711752d88SAlan Cox for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { 278d689bc00SAlan Cox sbuf_printf(&sbuf, " %2d (%6dK)", oind, 27911752d88SAlan Cox 1 << (PAGE_SHIFT - 10 + oind)); 28011752d88SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) { 2817e226537SAttilio Rao fl = vm_phys_free_queues[dom][flind][pind]; 282eb2f42fbSAlan Cox sbuf_printf(&sbuf, " | %6d", 2837e226537SAttilio Rao fl[oind].lcnt); 28411752d88SAlan Cox } 28511752d88SAlan Cox sbuf_printf(&sbuf, "\n"); 28611752d88SAlan Cox } 2877e226537SAttilio Rao } 28811752d88SAlan Cox } 2894e657159SMatthew D Fleming error = sbuf_finish(&sbuf); 29011752d88SAlan Cox sbuf_delete(&sbuf); 29111752d88SAlan Cox return (error); 29211752d88SAlan Cox } 29311752d88SAlan Cox 29411752d88SAlan Cox /* 29511752d88SAlan Cox * Outputs the set of physical memory segments. 29611752d88SAlan Cox */ 29711752d88SAlan Cox static int 29811752d88SAlan Cox sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS) 29911752d88SAlan Cox { 30011752d88SAlan Cox struct sbuf sbuf; 30111752d88SAlan Cox struct vm_phys_seg *seg; 30211752d88SAlan Cox int error, segind; 30311752d88SAlan Cox 30400f0e671SMatthew D Fleming error = sysctl_wire_old_buffer(req, 0); 30500f0e671SMatthew D Fleming if (error != 0) 30600f0e671SMatthew D Fleming return (error); 3074e657159SMatthew D Fleming sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 30811752d88SAlan Cox for (segind = 0; segind < vm_phys_nsegs; segind++) { 30911752d88SAlan Cox sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind); 31011752d88SAlan Cox seg = &vm_phys_segs[segind]; 31111752d88SAlan Cox sbuf_printf(&sbuf, "start: %#jx\n", 31211752d88SAlan Cox (uintmax_t)seg->start); 31311752d88SAlan Cox sbuf_printf(&sbuf, "end: %#jx\n", 31411752d88SAlan Cox (uintmax_t)seg->end); 315a3870a18SJohn Baldwin sbuf_printf(&sbuf, "domain: %d\n", seg->domain); 31611752d88SAlan Cox sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues); 31711752d88SAlan Cox } 3184e657159SMatthew D Fleming error = sbuf_finish(&sbuf); 31911752d88SAlan Cox sbuf_delete(&sbuf); 32011752d88SAlan Cox return (error); 32111752d88SAlan Cox } 32211752d88SAlan Cox 323415d7ccaSAdrian Chadd /* 324415d7ccaSAdrian Chadd * Return affinity, or -1 if there's no affinity information. 325415d7ccaSAdrian Chadd */ 3266520495aSAdrian Chadd int 327415d7ccaSAdrian Chadd vm_phys_mem_affinity(int f, int t) 328415d7ccaSAdrian Chadd { 329415d7ccaSAdrian Chadd 330b6715dabSJeff Roberson #ifdef NUMA 331415d7ccaSAdrian Chadd if (mem_locality == NULL) 332415d7ccaSAdrian Chadd return (-1); 333415d7ccaSAdrian Chadd if (f >= vm_ndomains || t >= vm_ndomains) 334415d7ccaSAdrian Chadd return (-1); 335415d7ccaSAdrian Chadd return (mem_locality[f * vm_ndomains + t]); 3366520495aSAdrian Chadd #else 3376520495aSAdrian Chadd return (-1); 3386520495aSAdrian Chadd #endif 339415d7ccaSAdrian Chadd } 340415d7ccaSAdrian Chadd 341b6715dabSJeff Roberson #ifdef NUMA 342415d7ccaSAdrian Chadd /* 343415d7ccaSAdrian Chadd * Outputs the VM locality table. 344415d7ccaSAdrian Chadd */ 345415d7ccaSAdrian Chadd static int 346415d7ccaSAdrian Chadd sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS) 347415d7ccaSAdrian Chadd { 348415d7ccaSAdrian Chadd struct sbuf sbuf; 349415d7ccaSAdrian Chadd int error, i, j; 350415d7ccaSAdrian Chadd 351415d7ccaSAdrian Chadd error = sysctl_wire_old_buffer(req, 0); 352415d7ccaSAdrian Chadd if (error != 0) 353415d7ccaSAdrian Chadd return (error); 354415d7ccaSAdrian Chadd sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 355415d7ccaSAdrian Chadd 356415d7ccaSAdrian Chadd sbuf_printf(&sbuf, "\n"); 357415d7ccaSAdrian Chadd 358415d7ccaSAdrian Chadd for (i = 0; i < vm_ndomains; i++) { 359415d7ccaSAdrian Chadd sbuf_printf(&sbuf, "%d: ", i); 360415d7ccaSAdrian Chadd for (j = 0; j < vm_ndomains; j++) { 361415d7ccaSAdrian Chadd sbuf_printf(&sbuf, "%d ", vm_phys_mem_affinity(i, j)); 362415d7ccaSAdrian Chadd } 363415d7ccaSAdrian Chadd sbuf_printf(&sbuf, "\n"); 364415d7ccaSAdrian Chadd } 365415d7ccaSAdrian Chadd error = sbuf_finish(&sbuf); 366415d7ccaSAdrian Chadd sbuf_delete(&sbuf); 367415d7ccaSAdrian Chadd return (error); 368415d7ccaSAdrian Chadd } 3696520495aSAdrian Chadd #endif 370415d7ccaSAdrian Chadd 3717e226537SAttilio Rao static void 3727e226537SAttilio Rao vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail) 373a3870a18SJohn Baldwin { 374a3870a18SJohn Baldwin 3757e226537SAttilio Rao m->order = order; 3767e226537SAttilio Rao if (tail) 3775cd29d0fSMark Johnston TAILQ_INSERT_TAIL(&fl[order].pl, m, listq); 3787e226537SAttilio Rao else 3795cd29d0fSMark Johnston TAILQ_INSERT_HEAD(&fl[order].pl, m, listq); 3807e226537SAttilio Rao fl[order].lcnt++; 381a3870a18SJohn Baldwin } 3827e226537SAttilio Rao 3837e226537SAttilio Rao static void 3847e226537SAttilio Rao vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order) 3857e226537SAttilio Rao { 3867e226537SAttilio Rao 3875cd29d0fSMark Johnston TAILQ_REMOVE(&fl[order].pl, m, listq); 3887e226537SAttilio Rao fl[order].lcnt--; 3897e226537SAttilio Rao m->order = VM_NFREEORDER; 390a3870a18SJohn Baldwin } 391a3870a18SJohn Baldwin 39211752d88SAlan Cox /* 39311752d88SAlan Cox * Create a physical memory segment. 39411752d88SAlan Cox */ 39511752d88SAlan Cox static void 396d866a563SAlan Cox _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain) 39711752d88SAlan Cox { 39811752d88SAlan Cox struct vm_phys_seg *seg; 39911752d88SAlan Cox 40011752d88SAlan Cox KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX, 40111752d88SAlan Cox ("vm_phys_create_seg: increase VM_PHYSSEG_MAX")); 402ef435ae7SJeff Roberson KASSERT(domain >= 0 && domain < vm_ndomains, 4037e226537SAttilio Rao ("vm_phys_create_seg: invalid domain provided")); 40411752d88SAlan Cox seg = &vm_phys_segs[vm_phys_nsegs++]; 405271f0f12SAlan Cox while (seg > vm_phys_segs && (seg - 1)->start >= end) { 406271f0f12SAlan Cox *seg = *(seg - 1); 407271f0f12SAlan Cox seg--; 408271f0f12SAlan Cox } 40911752d88SAlan Cox seg->start = start; 41011752d88SAlan Cox seg->end = end; 411a3870a18SJohn Baldwin seg->domain = domain; 41211752d88SAlan Cox } 41311752d88SAlan Cox 414a3870a18SJohn Baldwin static void 415d866a563SAlan Cox vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end) 416a3870a18SJohn Baldwin { 417b6715dabSJeff Roberson #ifdef NUMA 418a3870a18SJohn Baldwin int i; 419a3870a18SJohn Baldwin 420a3870a18SJohn Baldwin if (mem_affinity == NULL) { 421d866a563SAlan Cox _vm_phys_create_seg(start, end, 0); 422a3870a18SJohn Baldwin return; 423a3870a18SJohn Baldwin } 424a3870a18SJohn Baldwin 425a3870a18SJohn Baldwin for (i = 0;; i++) { 426a3870a18SJohn Baldwin if (mem_affinity[i].end == 0) 427a3870a18SJohn Baldwin panic("Reached end of affinity info"); 428a3870a18SJohn Baldwin if (mem_affinity[i].end <= start) 429a3870a18SJohn Baldwin continue; 430a3870a18SJohn Baldwin if (mem_affinity[i].start > start) 431a3870a18SJohn Baldwin panic("No affinity info for start %jx", 432a3870a18SJohn Baldwin (uintmax_t)start); 433a3870a18SJohn Baldwin if (mem_affinity[i].end >= end) { 434d866a563SAlan Cox _vm_phys_create_seg(start, end, 435a3870a18SJohn Baldwin mem_affinity[i].domain); 436a3870a18SJohn Baldwin break; 437a3870a18SJohn Baldwin } 438d866a563SAlan Cox _vm_phys_create_seg(start, mem_affinity[i].end, 439a3870a18SJohn Baldwin mem_affinity[i].domain); 440a3870a18SJohn Baldwin start = mem_affinity[i].end; 441a3870a18SJohn Baldwin } 44262d70a81SJohn Baldwin #else 44362d70a81SJohn Baldwin _vm_phys_create_seg(start, end, 0); 44462d70a81SJohn Baldwin #endif 445a3870a18SJohn Baldwin } 446a3870a18SJohn Baldwin 44711752d88SAlan Cox /* 448271f0f12SAlan Cox * Add a physical memory segment. 449271f0f12SAlan Cox */ 450271f0f12SAlan Cox void 451271f0f12SAlan Cox vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end) 452271f0f12SAlan Cox { 453d866a563SAlan Cox vm_paddr_t paddr; 454271f0f12SAlan Cox 455271f0f12SAlan Cox KASSERT((start & PAGE_MASK) == 0, 456271f0f12SAlan Cox ("vm_phys_define_seg: start is not page aligned")); 457271f0f12SAlan Cox KASSERT((end & PAGE_MASK) == 0, 458271f0f12SAlan Cox ("vm_phys_define_seg: end is not page aligned")); 459d866a563SAlan Cox 460d866a563SAlan Cox /* 461d866a563SAlan Cox * Split the physical memory segment if it spans two or more free 462d866a563SAlan Cox * list boundaries. 463d866a563SAlan Cox */ 464d866a563SAlan Cox paddr = start; 465d866a563SAlan Cox #ifdef VM_FREELIST_LOWMEM 466d866a563SAlan Cox if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) { 467d866a563SAlan Cox vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY); 468d866a563SAlan Cox paddr = VM_LOWMEM_BOUNDARY; 469d866a563SAlan Cox } 470271f0f12SAlan Cox #endif 471d866a563SAlan Cox #ifdef VM_FREELIST_DMA32 472d866a563SAlan Cox if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) { 473d866a563SAlan Cox vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY); 474d866a563SAlan Cox paddr = VM_DMA32_BOUNDARY; 475d866a563SAlan Cox } 476d866a563SAlan Cox #endif 477d866a563SAlan Cox vm_phys_create_seg(paddr, end); 478271f0f12SAlan Cox } 479271f0f12SAlan Cox 480271f0f12SAlan Cox /* 48111752d88SAlan Cox * Initialize the physical memory allocator. 482d866a563SAlan Cox * 483d866a563SAlan Cox * Requires that vm_page_array is initialized! 48411752d88SAlan Cox */ 48511752d88SAlan Cox void 48611752d88SAlan Cox vm_phys_init(void) 48711752d88SAlan Cox { 48811752d88SAlan Cox struct vm_freelist *fl; 48972aebdd7SAlan Cox struct vm_phys_seg *end_seg, *prev_seg, *seg, *tmp_seg; 49052526922SJohn Baldwin #if defined(VM_DMA32_NPAGES_THRESHOLD) || defined(VM_PHYSSEG_SPARSE) 491d866a563SAlan Cox u_long npages; 49252526922SJohn Baldwin #endif 493d866a563SAlan Cox int dom, flind, freelist, oind, pind, segind; 49411752d88SAlan Cox 495d866a563SAlan Cox /* 496d866a563SAlan Cox * Compute the number of free lists, and generate the mapping from the 497d866a563SAlan Cox * manifest constants VM_FREELIST_* to the free list indices. 498d866a563SAlan Cox * 499d866a563SAlan Cox * Initially, the entries of vm_freelist_to_flind[] are set to either 500d866a563SAlan Cox * 0 or 1 to indicate which free lists should be created. 501d866a563SAlan Cox */ 50252526922SJohn Baldwin #ifdef VM_DMA32_NPAGES_THRESHOLD 503d866a563SAlan Cox npages = 0; 50452526922SJohn Baldwin #endif 505d866a563SAlan Cox for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) { 506d866a563SAlan Cox seg = &vm_phys_segs[segind]; 507d866a563SAlan Cox #ifdef VM_FREELIST_LOWMEM 508d866a563SAlan Cox if (seg->end <= VM_LOWMEM_BOUNDARY) 509d866a563SAlan Cox vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1; 510d866a563SAlan Cox else 511d866a563SAlan Cox #endif 512d866a563SAlan Cox #ifdef VM_FREELIST_DMA32 513d866a563SAlan Cox if ( 514d866a563SAlan Cox #ifdef VM_DMA32_NPAGES_THRESHOLD 515d866a563SAlan Cox /* 516d866a563SAlan Cox * Create the DMA32 free list only if the amount of 517d866a563SAlan Cox * physical memory above physical address 4G exceeds the 518d866a563SAlan Cox * given threshold. 519d866a563SAlan Cox */ 520d866a563SAlan Cox npages > VM_DMA32_NPAGES_THRESHOLD && 521d866a563SAlan Cox #endif 522d866a563SAlan Cox seg->end <= VM_DMA32_BOUNDARY) 523d866a563SAlan Cox vm_freelist_to_flind[VM_FREELIST_DMA32] = 1; 524d866a563SAlan Cox else 525d866a563SAlan Cox #endif 526d866a563SAlan Cox { 52752526922SJohn Baldwin #ifdef VM_DMA32_NPAGES_THRESHOLD 528d866a563SAlan Cox npages += atop(seg->end - seg->start); 52952526922SJohn Baldwin #endif 530d866a563SAlan Cox vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1; 531d866a563SAlan Cox } 532d866a563SAlan Cox } 533d866a563SAlan Cox /* Change each entry into a running total of the free lists. */ 534d866a563SAlan Cox for (freelist = 1; freelist < VM_NFREELIST; freelist++) { 535d866a563SAlan Cox vm_freelist_to_flind[freelist] += 536d866a563SAlan Cox vm_freelist_to_flind[freelist - 1]; 537d866a563SAlan Cox } 538d866a563SAlan Cox vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1]; 539d866a563SAlan Cox KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists")); 540d866a563SAlan Cox /* Change each entry into a free list index. */ 541d866a563SAlan Cox for (freelist = 0; freelist < VM_NFREELIST; freelist++) 542d866a563SAlan Cox vm_freelist_to_flind[freelist]--; 543d866a563SAlan Cox 544d866a563SAlan Cox /* 545d866a563SAlan Cox * Initialize the first_page and free_queues fields of each physical 546d866a563SAlan Cox * memory segment. 547d866a563SAlan Cox */ 548271f0f12SAlan Cox #ifdef VM_PHYSSEG_SPARSE 549d866a563SAlan Cox npages = 0; 55011752d88SAlan Cox #endif 551271f0f12SAlan Cox for (segind = 0; segind < vm_phys_nsegs; segind++) { 552271f0f12SAlan Cox seg = &vm_phys_segs[segind]; 553271f0f12SAlan Cox #ifdef VM_PHYSSEG_SPARSE 554d866a563SAlan Cox seg->first_page = &vm_page_array[npages]; 555d866a563SAlan Cox npages += atop(seg->end - seg->start); 556271f0f12SAlan Cox #else 557271f0f12SAlan Cox seg->first_page = PHYS_TO_VM_PAGE(seg->start); 55811752d88SAlan Cox #endif 559d866a563SAlan Cox #ifdef VM_FREELIST_LOWMEM 560d866a563SAlan Cox if (seg->end <= VM_LOWMEM_BOUNDARY) { 561d866a563SAlan Cox flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM]; 562d866a563SAlan Cox KASSERT(flind >= 0, 563d866a563SAlan Cox ("vm_phys_init: LOWMEM flind < 0")); 564d866a563SAlan Cox } else 565d866a563SAlan Cox #endif 566d866a563SAlan Cox #ifdef VM_FREELIST_DMA32 567d866a563SAlan Cox if (seg->end <= VM_DMA32_BOUNDARY) { 568d866a563SAlan Cox flind = vm_freelist_to_flind[VM_FREELIST_DMA32]; 569d866a563SAlan Cox KASSERT(flind >= 0, 570d866a563SAlan Cox ("vm_phys_init: DMA32 flind < 0")); 571d866a563SAlan Cox } else 572d866a563SAlan Cox #endif 573d866a563SAlan Cox { 574d866a563SAlan Cox flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT]; 575d866a563SAlan Cox KASSERT(flind >= 0, 576d866a563SAlan Cox ("vm_phys_init: DEFAULT flind < 0")); 57711752d88SAlan Cox } 578d866a563SAlan Cox seg->free_queues = &vm_phys_free_queues[seg->domain][flind]; 579d866a563SAlan Cox } 580d866a563SAlan Cox 581d866a563SAlan Cox /* 58272aebdd7SAlan Cox * Coalesce physical memory segments that are contiguous and share the 58372aebdd7SAlan Cox * same per-domain free queues. 58472aebdd7SAlan Cox */ 58572aebdd7SAlan Cox prev_seg = vm_phys_segs; 58672aebdd7SAlan Cox seg = &vm_phys_segs[1]; 58772aebdd7SAlan Cox end_seg = &vm_phys_segs[vm_phys_nsegs]; 58872aebdd7SAlan Cox while (seg < end_seg) { 58972aebdd7SAlan Cox if (prev_seg->end == seg->start && 59072aebdd7SAlan Cox prev_seg->free_queues == seg->free_queues) { 59172aebdd7SAlan Cox prev_seg->end = seg->end; 59272aebdd7SAlan Cox KASSERT(prev_seg->domain == seg->domain, 59372aebdd7SAlan Cox ("vm_phys_init: free queues cannot span domains")); 59472aebdd7SAlan Cox vm_phys_nsegs--; 59572aebdd7SAlan Cox end_seg--; 59672aebdd7SAlan Cox for (tmp_seg = seg; tmp_seg < end_seg; tmp_seg++) 59772aebdd7SAlan Cox *tmp_seg = *(tmp_seg + 1); 59872aebdd7SAlan Cox } else { 59972aebdd7SAlan Cox prev_seg = seg; 60072aebdd7SAlan Cox seg++; 60172aebdd7SAlan Cox } 60272aebdd7SAlan Cox } 60372aebdd7SAlan Cox 60472aebdd7SAlan Cox /* 605d866a563SAlan Cox * Initialize the free queues. 606d866a563SAlan Cox */ 6077e226537SAttilio Rao for (dom = 0; dom < vm_ndomains; dom++) { 60811752d88SAlan Cox for (flind = 0; flind < vm_nfreelists; flind++) { 60911752d88SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) { 6107e226537SAttilio Rao fl = vm_phys_free_queues[dom][flind][pind]; 61111752d88SAlan Cox for (oind = 0; oind < VM_NFREEORDER; oind++) 61211752d88SAlan Cox TAILQ_INIT(&fl[oind].pl); 61311752d88SAlan Cox } 61411752d88SAlan Cox } 615a3870a18SJohn Baldwin } 616d866a563SAlan Cox 61738d6b2dcSRoger Pau Monné rw_init(&vm_phys_fictitious_reg_lock, "vmfctr"); 61811752d88SAlan Cox } 61911752d88SAlan Cox 62011752d88SAlan Cox /* 621662e7fa8SMark Johnston * Register info about the NUMA topology of the system. 622662e7fa8SMark Johnston * 623662e7fa8SMark Johnston * Invoked by platform-dependent code prior to vm_phys_init(). 624662e7fa8SMark Johnston */ 625662e7fa8SMark Johnston void 626662e7fa8SMark Johnston vm_phys_register_domains(int ndomains, struct mem_affinity *affinity, 627662e7fa8SMark Johnston int *locality) 628662e7fa8SMark Johnston { 629662e7fa8SMark Johnston #ifdef NUMA 630b61f3142SMark Johnston int d, i; 631662e7fa8SMark Johnston 632b61f3142SMark Johnston /* 633b61f3142SMark Johnston * For now the only override value that we support is 1, which 634b61f3142SMark Johnston * effectively disables NUMA-awareness in the allocators. 635b61f3142SMark Johnston */ 636b61f3142SMark Johnston d = 0; 637b61f3142SMark Johnston TUNABLE_INT_FETCH("vm.numa.disabled", &d); 638b61f3142SMark Johnston if (d) 639b61f3142SMark Johnston ndomains = 1; 640b61f3142SMark Johnston 641b61f3142SMark Johnston if (ndomains > 1) { 642662e7fa8SMark Johnston vm_ndomains = ndomains; 643662e7fa8SMark Johnston mem_affinity = affinity; 644662e7fa8SMark Johnston mem_locality = locality; 645b61f3142SMark Johnston } 646662e7fa8SMark Johnston 647662e7fa8SMark Johnston for (i = 0; i < vm_ndomains; i++) 648662e7fa8SMark Johnston DOMAINSET_SET(i, &all_domains); 649662e7fa8SMark Johnston #else 650662e7fa8SMark Johnston (void)ndomains; 651662e7fa8SMark Johnston (void)affinity; 652662e7fa8SMark Johnston (void)locality; 653662e7fa8SMark Johnston #endif 654662e7fa8SMark Johnston } 655662e7fa8SMark Johnston 656662e7fa8SMark Johnston /* 65711752d88SAlan Cox * Split a contiguous, power of two-sized set of physical pages. 658370a338aSAlan Cox * 659370a338aSAlan Cox * When this function is called by a page allocation function, the caller 660370a338aSAlan Cox * should request insertion at the head unless the order [order, oind) queues 661370a338aSAlan Cox * are known to be empty. The objective being to reduce the likelihood of 662370a338aSAlan Cox * long-term fragmentation by promoting contemporaneous allocation and 663370a338aSAlan Cox * (hopefully) deallocation. 66411752d88SAlan Cox */ 66511752d88SAlan Cox static __inline void 666370a338aSAlan Cox vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order, 667370a338aSAlan Cox int tail) 66811752d88SAlan Cox { 66911752d88SAlan Cox vm_page_t m_buddy; 67011752d88SAlan Cox 67111752d88SAlan Cox while (oind > order) { 67211752d88SAlan Cox oind--; 67311752d88SAlan Cox m_buddy = &m[1 << oind]; 67411752d88SAlan Cox KASSERT(m_buddy->order == VM_NFREEORDER, 67511752d88SAlan Cox ("vm_phys_split_pages: page %p has unexpected order %d", 67611752d88SAlan Cox m_buddy, m_buddy->order)); 677370a338aSAlan Cox vm_freelist_add(fl, m_buddy, oind, tail); 67811752d88SAlan Cox } 67911752d88SAlan Cox } 68011752d88SAlan Cox 68111752d88SAlan Cox /* 6827493904eSAlan Cox * Add the physical pages [m, m + npages) at the end of a power-of-two aligned 6837493904eSAlan Cox * and sized set to the specified free list. 6847493904eSAlan Cox * 6857493904eSAlan Cox * When this function is called by a page allocation function, the caller 6867493904eSAlan Cox * should request insertion at the head unless the lower-order queues are 6877493904eSAlan Cox * known to be empty. The objective being to reduce the likelihood of long- 6887493904eSAlan Cox * term fragmentation by promoting contemporaneous allocation and (hopefully) 6897493904eSAlan Cox * deallocation. 6907493904eSAlan Cox * 6917493904eSAlan Cox * The physical page m's buddy must not be free. 6927493904eSAlan Cox */ 6937493904eSAlan Cox static void 6947493904eSAlan Cox vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail) 6957493904eSAlan Cox { 6967493904eSAlan Cox u_int n; 6977493904eSAlan Cox int order; 6987493904eSAlan Cox 6997493904eSAlan Cox KASSERT(npages > 0, ("vm_phys_enq_range: npages is 0")); 7007493904eSAlan Cox KASSERT(((VM_PAGE_TO_PHYS(m) + npages * PAGE_SIZE) & 7017493904eSAlan Cox ((PAGE_SIZE << (fls(npages) - 1)) - 1)) == 0, 7027493904eSAlan Cox ("vm_phys_enq_range: page %p and npages %u are misaligned", 7037493904eSAlan Cox m, npages)); 7047493904eSAlan Cox do { 7057493904eSAlan Cox KASSERT(m->order == VM_NFREEORDER, 7067493904eSAlan Cox ("vm_phys_enq_range: page %p has unexpected order %d", 7077493904eSAlan Cox m, m->order)); 7087493904eSAlan Cox order = ffs(npages) - 1; 7097493904eSAlan Cox KASSERT(order < VM_NFREEORDER, 7107493904eSAlan Cox ("vm_phys_enq_range: order %d is out of range", order)); 7117493904eSAlan Cox vm_freelist_add(fl, m, order, tail); 7127493904eSAlan Cox n = 1 << order; 7137493904eSAlan Cox m += n; 7147493904eSAlan Cox npages -= n; 7157493904eSAlan Cox } while (npages > 0); 7167493904eSAlan Cox } 7177493904eSAlan Cox 7187493904eSAlan Cox /* 7198119cdd3SDoug Moore * Set the pool for a contiguous, power of two-sized set of physical pages. 7208119cdd3SDoug Moore */ 7218119cdd3SDoug Moore static void 7228119cdd3SDoug Moore vm_phys_set_pool(int pool, vm_page_t m, int order) 7238119cdd3SDoug Moore { 7248119cdd3SDoug Moore vm_page_t m_tmp; 7258119cdd3SDoug Moore 7268119cdd3SDoug Moore for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++) 7278119cdd3SDoug Moore m_tmp->pool = pool; 7288119cdd3SDoug Moore } 7298119cdd3SDoug Moore 7308119cdd3SDoug Moore /* 73189ea39a7SAlan Cox * Tries to allocate the specified number of pages from the specified pool 73289ea39a7SAlan Cox * within the specified domain. Returns the actual number of allocated pages 73389ea39a7SAlan Cox * and a pointer to each page through the array ma[]. 73489ea39a7SAlan Cox * 73532d81f21SAlan Cox * The returned pages may not be physically contiguous. However, in contrast 73632d81f21SAlan Cox * to performing multiple, back-to-back calls to vm_phys_alloc_pages(..., 0), 73732d81f21SAlan Cox * calling this function once to allocate the desired number of pages will 73832d81f21SAlan Cox * avoid wasted time in vm_phys_split_pages(). 73989ea39a7SAlan Cox * 74089ea39a7SAlan Cox * The free page queues for the specified domain must be locked. 74189ea39a7SAlan Cox */ 74289ea39a7SAlan Cox int 74389ea39a7SAlan Cox vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[]) 74489ea39a7SAlan Cox { 74589ea39a7SAlan Cox struct vm_freelist *alt, *fl; 74689ea39a7SAlan Cox vm_page_t m; 74789ea39a7SAlan Cox int avail, end, flind, freelist, i, need, oind, pind; 74889ea39a7SAlan Cox 74989ea39a7SAlan Cox KASSERT(domain >= 0 && domain < vm_ndomains, 75089ea39a7SAlan Cox ("vm_phys_alloc_npages: domain %d is out of range", domain)); 75189ea39a7SAlan Cox KASSERT(pool < VM_NFREEPOOL, 75289ea39a7SAlan Cox ("vm_phys_alloc_npages: pool %d is out of range", pool)); 75389ea39a7SAlan Cox KASSERT(npages <= 1 << (VM_NFREEORDER - 1), 75489ea39a7SAlan Cox ("vm_phys_alloc_npages: npages %d is out of range", npages)); 75589ea39a7SAlan Cox vm_domain_free_assert_locked(VM_DOMAIN(domain)); 75689ea39a7SAlan Cox i = 0; 75789ea39a7SAlan Cox for (freelist = 0; freelist < VM_NFREELIST; freelist++) { 75889ea39a7SAlan Cox flind = vm_freelist_to_flind[freelist]; 75989ea39a7SAlan Cox if (flind < 0) 76089ea39a7SAlan Cox continue; 76189ea39a7SAlan Cox fl = vm_phys_free_queues[domain][flind][pool]; 76289ea39a7SAlan Cox for (oind = 0; oind < VM_NFREEORDER; oind++) { 76389ea39a7SAlan Cox while ((m = TAILQ_FIRST(&fl[oind].pl)) != NULL) { 76489ea39a7SAlan Cox vm_freelist_rem(fl, m, oind); 76589ea39a7SAlan Cox avail = 1 << oind; 76689ea39a7SAlan Cox need = imin(npages - i, avail); 76789ea39a7SAlan Cox for (end = i + need; i < end;) 76889ea39a7SAlan Cox ma[i++] = m++; 76989ea39a7SAlan Cox if (need < avail) { 7707493904eSAlan Cox /* 7717493904eSAlan Cox * Return excess pages to fl. Its 7727493904eSAlan Cox * order [0, oind) queues are empty. 7737493904eSAlan Cox */ 7747493904eSAlan Cox vm_phys_enq_range(m, avail - need, fl, 7757493904eSAlan Cox 1); 77689ea39a7SAlan Cox return (npages); 77789ea39a7SAlan Cox } else if (i == npages) 77889ea39a7SAlan Cox return (npages); 77989ea39a7SAlan Cox } 78089ea39a7SAlan Cox } 78189ea39a7SAlan Cox for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { 78289ea39a7SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) { 78389ea39a7SAlan Cox alt = vm_phys_free_queues[domain][flind][pind]; 78489ea39a7SAlan Cox while ((m = TAILQ_FIRST(&alt[oind].pl)) != 78589ea39a7SAlan Cox NULL) { 78689ea39a7SAlan Cox vm_freelist_rem(alt, m, oind); 78789ea39a7SAlan Cox vm_phys_set_pool(pool, m, oind); 78889ea39a7SAlan Cox avail = 1 << oind; 78989ea39a7SAlan Cox need = imin(npages - i, avail); 79089ea39a7SAlan Cox for (end = i + need; i < end;) 79189ea39a7SAlan Cox ma[i++] = m++; 79289ea39a7SAlan Cox if (need < avail) { 7937493904eSAlan Cox /* 7947493904eSAlan Cox * Return excess pages to fl. 7957493904eSAlan Cox * Its order [0, oind) queues 7967493904eSAlan Cox * are empty. 7977493904eSAlan Cox */ 7987493904eSAlan Cox vm_phys_enq_range(m, avail - 7997493904eSAlan Cox need, fl, 1); 80089ea39a7SAlan Cox return (npages); 80189ea39a7SAlan Cox } else if (i == npages) 80289ea39a7SAlan Cox return (npages); 80389ea39a7SAlan Cox } 80489ea39a7SAlan Cox } 80589ea39a7SAlan Cox } 80689ea39a7SAlan Cox } 80789ea39a7SAlan Cox return (i); 80889ea39a7SAlan Cox } 80989ea39a7SAlan Cox 81089ea39a7SAlan Cox /* 81111752d88SAlan Cox * Allocate a contiguous, power of two-sized set of physical pages 81211752d88SAlan Cox * from the free lists. 8138941dc44SAlan Cox * 8148941dc44SAlan Cox * The free page queues must be locked. 81511752d88SAlan Cox */ 81611752d88SAlan Cox vm_page_t 817ef435ae7SJeff Roberson vm_phys_alloc_pages(int domain, int pool, int order) 81811752d88SAlan Cox { 81949ca10d4SJayachandran C. vm_page_t m; 8200db2102aSMichael Zhilin int freelist; 82149ca10d4SJayachandran C. 8220db2102aSMichael Zhilin for (freelist = 0; freelist < VM_NFREELIST; freelist++) { 8230db2102aSMichael Zhilin m = vm_phys_alloc_freelist_pages(domain, freelist, pool, order); 82449ca10d4SJayachandran C. if (m != NULL) 82549ca10d4SJayachandran C. return (m); 82649ca10d4SJayachandran C. } 82749ca10d4SJayachandran C. return (NULL); 82849ca10d4SJayachandran C. } 82949ca10d4SJayachandran C. 83049ca10d4SJayachandran C. /* 831d866a563SAlan Cox * Allocate a contiguous, power of two-sized set of physical pages from the 832d866a563SAlan Cox * specified free list. The free list must be specified using one of the 833d866a563SAlan Cox * manifest constants VM_FREELIST_*. 834d866a563SAlan Cox * 835d866a563SAlan Cox * The free page queues must be locked. 83649ca10d4SJayachandran C. */ 83749ca10d4SJayachandran C. vm_page_t 8380db2102aSMichael Zhilin vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order) 83949ca10d4SJayachandran C. { 840ef435ae7SJeff Roberson struct vm_freelist *alt, *fl; 84111752d88SAlan Cox vm_page_t m; 8420db2102aSMichael Zhilin int oind, pind, flind; 84311752d88SAlan Cox 844ef435ae7SJeff Roberson KASSERT(domain >= 0 && domain < vm_ndomains, 845ef435ae7SJeff Roberson ("vm_phys_alloc_freelist_pages: domain %d is out of range", 846ef435ae7SJeff Roberson domain)); 8470db2102aSMichael Zhilin KASSERT(freelist < VM_NFREELIST, 848d866a563SAlan Cox ("vm_phys_alloc_freelist_pages: freelist %d is out of range", 8495be93778SAndrew Turner freelist)); 85011752d88SAlan Cox KASSERT(pool < VM_NFREEPOOL, 85149ca10d4SJayachandran C. ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool)); 85211752d88SAlan Cox KASSERT(order < VM_NFREEORDER, 85349ca10d4SJayachandran C. ("vm_phys_alloc_freelist_pages: order %d is out of range", order)); 8546520495aSAdrian Chadd 8550db2102aSMichael Zhilin flind = vm_freelist_to_flind[freelist]; 8560db2102aSMichael Zhilin /* Check if freelist is present */ 8570db2102aSMichael Zhilin if (flind < 0) 8580db2102aSMichael Zhilin return (NULL); 8590db2102aSMichael Zhilin 860e2068d0bSJeff Roberson vm_domain_free_assert_locked(VM_DOMAIN(domain)); 8617e226537SAttilio Rao fl = &vm_phys_free_queues[domain][flind][pool][0]; 86211752d88SAlan Cox for (oind = order; oind < VM_NFREEORDER; oind++) { 86311752d88SAlan Cox m = TAILQ_FIRST(&fl[oind].pl); 86411752d88SAlan Cox if (m != NULL) { 8657e226537SAttilio Rao vm_freelist_rem(fl, m, oind); 866370a338aSAlan Cox /* The order [order, oind) queues are empty. */ 867370a338aSAlan Cox vm_phys_split_pages(m, oind, fl, order, 1); 86811752d88SAlan Cox return (m); 86911752d88SAlan Cox } 87011752d88SAlan Cox } 87111752d88SAlan Cox 87211752d88SAlan Cox /* 87311752d88SAlan Cox * The given pool was empty. Find the largest 87411752d88SAlan Cox * contiguous, power-of-two-sized set of pages in any 87511752d88SAlan Cox * pool. Transfer these pages to the given pool, and 87611752d88SAlan Cox * use them to satisfy the allocation. 87711752d88SAlan Cox */ 87811752d88SAlan Cox for (oind = VM_NFREEORDER - 1; oind >= order; oind--) { 87911752d88SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) { 8807e226537SAttilio Rao alt = &vm_phys_free_queues[domain][flind][pind][0]; 88111752d88SAlan Cox m = TAILQ_FIRST(&alt[oind].pl); 88211752d88SAlan Cox if (m != NULL) { 8837e226537SAttilio Rao vm_freelist_rem(alt, m, oind); 88411752d88SAlan Cox vm_phys_set_pool(pool, m, oind); 885370a338aSAlan Cox /* The order [order, oind) queues are empty. */ 886370a338aSAlan Cox vm_phys_split_pages(m, oind, fl, order, 1); 88711752d88SAlan Cox return (m); 88811752d88SAlan Cox } 88911752d88SAlan Cox } 89011752d88SAlan Cox } 89111752d88SAlan Cox return (NULL); 89211752d88SAlan Cox } 89311752d88SAlan Cox 89411752d88SAlan Cox /* 89511752d88SAlan Cox * Find the vm_page corresponding to the given physical address. 89611752d88SAlan Cox */ 89711752d88SAlan Cox vm_page_t 89811752d88SAlan Cox vm_phys_paddr_to_vm_page(vm_paddr_t pa) 89911752d88SAlan Cox { 90011752d88SAlan Cox struct vm_phys_seg *seg; 90111752d88SAlan Cox int segind; 90211752d88SAlan Cox 90311752d88SAlan Cox for (segind = 0; segind < vm_phys_nsegs; segind++) { 90411752d88SAlan Cox seg = &vm_phys_segs[segind]; 90511752d88SAlan Cox if (pa >= seg->start && pa < seg->end) 90611752d88SAlan Cox return (&seg->first_page[atop(pa - seg->start)]); 90711752d88SAlan Cox } 908f06a3a36SAndrew Thompson return (NULL); 90911752d88SAlan Cox } 91011752d88SAlan Cox 911b6de32bdSKonstantin Belousov vm_page_t 912b6de32bdSKonstantin Belousov vm_phys_fictitious_to_vm_page(vm_paddr_t pa) 913b6de32bdSKonstantin Belousov { 91438d6b2dcSRoger Pau Monné struct vm_phys_fictitious_seg tmp, *seg; 915b6de32bdSKonstantin Belousov vm_page_t m; 916b6de32bdSKonstantin Belousov 917b6de32bdSKonstantin Belousov m = NULL; 91838d6b2dcSRoger Pau Monné tmp.start = pa; 91938d6b2dcSRoger Pau Monné tmp.end = 0; 92038d6b2dcSRoger Pau Monné 92138d6b2dcSRoger Pau Monné rw_rlock(&vm_phys_fictitious_reg_lock); 92238d6b2dcSRoger Pau Monné seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp); 92338d6b2dcSRoger Pau Monné rw_runlock(&vm_phys_fictitious_reg_lock); 92438d6b2dcSRoger Pau Monné if (seg == NULL) 92538d6b2dcSRoger Pau Monné return (NULL); 92638d6b2dcSRoger Pau Monné 927b6de32bdSKonstantin Belousov m = &seg->first_page[atop(pa - seg->start)]; 92838d6b2dcSRoger Pau Monné KASSERT((m->flags & PG_FICTITIOUS) != 0, ("%p not fictitious", m)); 92938d6b2dcSRoger Pau Monné 930b6de32bdSKonstantin Belousov return (m); 931b6de32bdSKonstantin Belousov } 932b6de32bdSKonstantin Belousov 9335ebe728dSRoger Pau Monné static inline void 9345ebe728dSRoger Pau Monné vm_phys_fictitious_init_range(vm_page_t range, vm_paddr_t start, 9355ebe728dSRoger Pau Monné long page_count, vm_memattr_t memattr) 9365ebe728dSRoger Pau Monné { 9375ebe728dSRoger Pau Monné long i; 9385ebe728dSRoger Pau Monné 939f93f7cf1SMark Johnston bzero(range, page_count * sizeof(*range)); 9405ebe728dSRoger Pau Monné for (i = 0; i < page_count; i++) { 9415ebe728dSRoger Pau Monné vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr); 9425ebe728dSRoger Pau Monné range[i].oflags &= ~VPO_UNMANAGED; 9435ebe728dSRoger Pau Monné range[i].busy_lock = VPB_UNBUSIED; 9445ebe728dSRoger Pau Monné } 9455ebe728dSRoger Pau Monné } 9465ebe728dSRoger Pau Monné 947b6de32bdSKonstantin Belousov int 948b6de32bdSKonstantin Belousov vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end, 949b6de32bdSKonstantin Belousov vm_memattr_t memattr) 950b6de32bdSKonstantin Belousov { 951b6de32bdSKonstantin Belousov struct vm_phys_fictitious_seg *seg; 952b6de32bdSKonstantin Belousov vm_page_t fp; 9535ebe728dSRoger Pau Monné long page_count; 954b6de32bdSKonstantin Belousov #ifdef VM_PHYSSEG_DENSE 9555ebe728dSRoger Pau Monné long pi, pe; 9565ebe728dSRoger Pau Monné long dpage_count; 957b6de32bdSKonstantin Belousov #endif 958b6de32bdSKonstantin Belousov 9595ebe728dSRoger Pau Monné KASSERT(start < end, 9605ebe728dSRoger Pau Monné ("Start of segment isn't less than end (start: %jx end: %jx)", 9615ebe728dSRoger Pau Monné (uintmax_t)start, (uintmax_t)end)); 9625ebe728dSRoger Pau Monné 963b6de32bdSKonstantin Belousov page_count = (end - start) / PAGE_SIZE; 964b6de32bdSKonstantin Belousov 965b6de32bdSKonstantin Belousov #ifdef VM_PHYSSEG_DENSE 966b6de32bdSKonstantin Belousov pi = atop(start); 9675ebe728dSRoger Pau Monné pe = atop(end); 9685ebe728dSRoger Pau Monné if (pi >= first_page && (pi - first_page) < vm_page_array_size) { 969b6de32bdSKonstantin Belousov fp = &vm_page_array[pi - first_page]; 9705ebe728dSRoger Pau Monné if ((pe - first_page) > vm_page_array_size) { 9715ebe728dSRoger Pau Monné /* 9725ebe728dSRoger Pau Monné * We have a segment that starts inside 9735ebe728dSRoger Pau Monné * of vm_page_array, but ends outside of it. 9745ebe728dSRoger Pau Monné * 9755ebe728dSRoger Pau Monné * Use vm_page_array pages for those that are 9765ebe728dSRoger Pau Monné * inside of the vm_page_array range, and 9775ebe728dSRoger Pau Monné * allocate the remaining ones. 9785ebe728dSRoger Pau Monné */ 9795ebe728dSRoger Pau Monné dpage_count = vm_page_array_size - (pi - first_page); 9805ebe728dSRoger Pau Monné vm_phys_fictitious_init_range(fp, start, dpage_count, 9815ebe728dSRoger Pau Monné memattr); 9825ebe728dSRoger Pau Monné page_count -= dpage_count; 9835ebe728dSRoger Pau Monné start += ptoa(dpage_count); 9845ebe728dSRoger Pau Monné goto alloc; 9855ebe728dSRoger Pau Monné } 9865ebe728dSRoger Pau Monné /* 9875ebe728dSRoger Pau Monné * We can allocate the full range from vm_page_array, 9885ebe728dSRoger Pau Monné * so there's no need to register the range in the tree. 9895ebe728dSRoger Pau Monné */ 9905ebe728dSRoger Pau Monné vm_phys_fictitious_init_range(fp, start, page_count, memattr); 9915ebe728dSRoger Pau Monné return (0); 9925ebe728dSRoger Pau Monné } else if (pe > first_page && (pe - first_page) < vm_page_array_size) { 9935ebe728dSRoger Pau Monné /* 9945ebe728dSRoger Pau Monné * We have a segment that ends inside of vm_page_array, 9955ebe728dSRoger Pau Monné * but starts outside of it. 9965ebe728dSRoger Pau Monné */ 9975ebe728dSRoger Pau Monné fp = &vm_page_array[0]; 9985ebe728dSRoger Pau Monné dpage_count = pe - first_page; 9995ebe728dSRoger Pau Monné vm_phys_fictitious_init_range(fp, ptoa(first_page), dpage_count, 10005ebe728dSRoger Pau Monné memattr); 10015ebe728dSRoger Pau Monné end -= ptoa(dpage_count); 10025ebe728dSRoger Pau Monné page_count -= dpage_count; 10035ebe728dSRoger Pau Monné goto alloc; 10045ebe728dSRoger Pau Monné } else if (pi < first_page && pe > (first_page + vm_page_array_size)) { 10055ebe728dSRoger Pau Monné /* 10065ebe728dSRoger Pau Monné * Trying to register a fictitious range that expands before 10075ebe728dSRoger Pau Monné * and after vm_page_array. 10085ebe728dSRoger Pau Monné */ 10095ebe728dSRoger Pau Monné return (EINVAL); 10105ebe728dSRoger Pau Monné } else { 10115ebe728dSRoger Pau Monné alloc: 1012b6de32bdSKonstantin Belousov #endif 1013b6de32bdSKonstantin Belousov fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES, 1014f93f7cf1SMark Johnston M_WAITOK); 10155ebe728dSRoger Pau Monné #ifdef VM_PHYSSEG_DENSE 1016b6de32bdSKonstantin Belousov } 10175ebe728dSRoger Pau Monné #endif 10185ebe728dSRoger Pau Monné vm_phys_fictitious_init_range(fp, start, page_count, memattr); 101938d6b2dcSRoger Pau Monné 102038d6b2dcSRoger Pau Monné seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO); 1021b6de32bdSKonstantin Belousov seg->start = start; 1022b6de32bdSKonstantin Belousov seg->end = end; 1023b6de32bdSKonstantin Belousov seg->first_page = fp; 102438d6b2dcSRoger Pau Monné 102538d6b2dcSRoger Pau Monné rw_wlock(&vm_phys_fictitious_reg_lock); 102638d6b2dcSRoger Pau Monné RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg); 102738d6b2dcSRoger Pau Monné rw_wunlock(&vm_phys_fictitious_reg_lock); 102838d6b2dcSRoger Pau Monné 1029b6de32bdSKonstantin Belousov return (0); 1030b6de32bdSKonstantin Belousov } 1031b6de32bdSKonstantin Belousov 1032b6de32bdSKonstantin Belousov void 1033b6de32bdSKonstantin Belousov vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end) 1034b6de32bdSKonstantin Belousov { 103538d6b2dcSRoger Pau Monné struct vm_phys_fictitious_seg *seg, tmp; 1036b6de32bdSKonstantin Belousov #ifdef VM_PHYSSEG_DENSE 10375ebe728dSRoger Pau Monné long pi, pe; 1038b6de32bdSKonstantin Belousov #endif 1039b6de32bdSKonstantin Belousov 10405ebe728dSRoger Pau Monné KASSERT(start < end, 10415ebe728dSRoger Pau Monné ("Start of segment isn't less than end (start: %jx end: %jx)", 10425ebe728dSRoger Pau Monné (uintmax_t)start, (uintmax_t)end)); 10435ebe728dSRoger Pau Monné 1044b6de32bdSKonstantin Belousov #ifdef VM_PHYSSEG_DENSE 1045b6de32bdSKonstantin Belousov pi = atop(start); 10465ebe728dSRoger Pau Monné pe = atop(end); 10475ebe728dSRoger Pau Monné if (pi >= first_page && (pi - first_page) < vm_page_array_size) { 10485ebe728dSRoger Pau Monné if ((pe - first_page) <= vm_page_array_size) { 10495ebe728dSRoger Pau Monné /* 10505ebe728dSRoger Pau Monné * This segment was allocated using vm_page_array 10515ebe728dSRoger Pau Monné * only, there's nothing to do since those pages 10525ebe728dSRoger Pau Monné * were never added to the tree. 10535ebe728dSRoger Pau Monné */ 10545ebe728dSRoger Pau Monné return; 10555ebe728dSRoger Pau Monné } 10565ebe728dSRoger Pau Monné /* 10575ebe728dSRoger Pau Monné * We have a segment that starts inside 10585ebe728dSRoger Pau Monné * of vm_page_array, but ends outside of it. 10595ebe728dSRoger Pau Monné * 10605ebe728dSRoger Pau Monné * Calculate how many pages were added to the 10615ebe728dSRoger Pau Monné * tree and free them. 10625ebe728dSRoger Pau Monné */ 10635ebe728dSRoger Pau Monné start = ptoa(first_page + vm_page_array_size); 10645ebe728dSRoger Pau Monné } else if (pe > first_page && (pe - first_page) < vm_page_array_size) { 10655ebe728dSRoger Pau Monné /* 10665ebe728dSRoger Pau Monné * We have a segment that ends inside of vm_page_array, 10675ebe728dSRoger Pau Monné * but starts outside of it. 10685ebe728dSRoger Pau Monné */ 10695ebe728dSRoger Pau Monné end = ptoa(first_page); 10705ebe728dSRoger Pau Monné } else if (pi < first_page && pe > (first_page + vm_page_array_size)) { 10715ebe728dSRoger Pau Monné /* Since it's not possible to register such a range, panic. */ 10725ebe728dSRoger Pau Monné panic( 10735ebe728dSRoger Pau Monné "Unregistering not registered fictitious range [%#jx:%#jx]", 10745ebe728dSRoger Pau Monné (uintmax_t)start, (uintmax_t)end); 10755ebe728dSRoger Pau Monné } 1076b6de32bdSKonstantin Belousov #endif 107738d6b2dcSRoger Pau Monné tmp.start = start; 107838d6b2dcSRoger Pau Monné tmp.end = 0; 1079b6de32bdSKonstantin Belousov 108038d6b2dcSRoger Pau Monné rw_wlock(&vm_phys_fictitious_reg_lock); 108138d6b2dcSRoger Pau Monné seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp); 108238d6b2dcSRoger Pau Monné if (seg->start != start || seg->end != end) { 108338d6b2dcSRoger Pau Monné rw_wunlock(&vm_phys_fictitious_reg_lock); 108438d6b2dcSRoger Pau Monné panic( 108538d6b2dcSRoger Pau Monné "Unregistering not registered fictitious range [%#jx:%#jx]", 108638d6b2dcSRoger Pau Monné (uintmax_t)start, (uintmax_t)end); 108738d6b2dcSRoger Pau Monné } 108838d6b2dcSRoger Pau Monné RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg); 108938d6b2dcSRoger Pau Monné rw_wunlock(&vm_phys_fictitious_reg_lock); 109038d6b2dcSRoger Pau Monné free(seg->first_page, M_FICT_PAGES); 109138d6b2dcSRoger Pau Monné free(seg, M_FICT_PAGES); 1092b6de32bdSKonstantin Belousov } 1093b6de32bdSKonstantin Belousov 109411752d88SAlan Cox /* 109511752d88SAlan Cox * Free a contiguous, power of two-sized set of physical pages. 10968941dc44SAlan Cox * 10978941dc44SAlan Cox * The free page queues must be locked. 109811752d88SAlan Cox */ 109911752d88SAlan Cox void 110011752d88SAlan Cox vm_phys_free_pages(vm_page_t m, int order) 110111752d88SAlan Cox { 110211752d88SAlan Cox struct vm_freelist *fl; 110311752d88SAlan Cox struct vm_phys_seg *seg; 11045c1f2cc4SAlan Cox vm_paddr_t pa; 110511752d88SAlan Cox vm_page_t m_buddy; 110611752d88SAlan Cox 110711752d88SAlan Cox KASSERT(m->order == VM_NFREEORDER, 11083921068fSJeff Roberson ("vm_phys_free_pages: page %p has unexpected order %d", 11093921068fSJeff Roberson m, m->order)); 111011752d88SAlan Cox KASSERT(m->pool < VM_NFREEPOOL, 11118941dc44SAlan Cox ("vm_phys_free_pages: page %p has unexpected pool %d", 111211752d88SAlan Cox m, m->pool)); 111311752d88SAlan Cox KASSERT(order < VM_NFREEORDER, 11148941dc44SAlan Cox ("vm_phys_free_pages: order %d is out of range", order)); 111511752d88SAlan Cox seg = &vm_phys_segs[m->segind]; 1116e2068d0bSJeff Roberson vm_domain_free_assert_locked(VM_DOMAIN(seg->domain)); 11175c1f2cc4SAlan Cox if (order < VM_NFREEORDER - 1) { 11185c1f2cc4SAlan Cox pa = VM_PAGE_TO_PHYS(m); 11195c1f2cc4SAlan Cox do { 11205c1f2cc4SAlan Cox pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order)); 11215c1f2cc4SAlan Cox if (pa < seg->start || pa >= seg->end) 112211752d88SAlan Cox break; 11235c1f2cc4SAlan Cox m_buddy = &seg->first_page[atop(pa - seg->start)]; 112411752d88SAlan Cox if (m_buddy->order != order) 112511752d88SAlan Cox break; 112611752d88SAlan Cox fl = (*seg->free_queues)[m_buddy->pool]; 11277e226537SAttilio Rao vm_freelist_rem(fl, m_buddy, order); 112811752d88SAlan Cox if (m_buddy->pool != m->pool) 112911752d88SAlan Cox vm_phys_set_pool(m->pool, m_buddy, order); 113011752d88SAlan Cox order++; 11315c1f2cc4SAlan Cox pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1); 113211752d88SAlan Cox m = &seg->first_page[atop(pa - seg->start)]; 11335c1f2cc4SAlan Cox } while (order < VM_NFREEORDER - 1); 113411752d88SAlan Cox } 113511752d88SAlan Cox fl = (*seg->free_queues)[m->pool]; 11367e226537SAttilio Rao vm_freelist_add(fl, m, order, 1); 113711752d88SAlan Cox } 113811752d88SAlan Cox 113911752d88SAlan Cox /* 1140b8590daeSDoug Moore * Return the largest possible order of a set of pages starting at m. 11415c1f2cc4SAlan Cox */ 1142b8590daeSDoug Moore static int 1143b8590daeSDoug Moore max_order(vm_page_t m) 11445c1f2cc4SAlan Cox { 11455c1f2cc4SAlan Cox 11465c1f2cc4SAlan Cox /* 11475c1f2cc4SAlan Cox * Unsigned "min" is used here so that "order" is assigned 11485c1f2cc4SAlan Cox * "VM_NFREEORDER - 1" when "m"'s physical address is zero 11495c1f2cc4SAlan Cox * or the low-order bits of its physical address are zero 11505c1f2cc4SAlan Cox * because the size of a physical address exceeds the size of 11515c1f2cc4SAlan Cox * a long. 11525c1f2cc4SAlan Cox */ 1153b8590daeSDoug Moore return (min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1, 1154b8590daeSDoug Moore VM_NFREEORDER - 1)); 11555c1f2cc4SAlan Cox } 1156b8590daeSDoug Moore 1157b8590daeSDoug Moore /* 1158b8590daeSDoug Moore * Free a contiguous, arbitrarily sized set of physical pages, without 1159b8590daeSDoug Moore * merging across set boundaries. 1160b8590daeSDoug Moore * 1161b8590daeSDoug Moore * The free page queues must be locked. 1162b8590daeSDoug Moore */ 1163b8590daeSDoug Moore void 1164b8590daeSDoug Moore vm_phys_enqueue_contig(vm_page_t m, u_long npages) 1165b8590daeSDoug Moore { 1166b8590daeSDoug Moore struct vm_freelist *fl; 1167b8590daeSDoug Moore struct vm_phys_seg *seg; 1168b8590daeSDoug Moore vm_page_t m_end; 1169b8590daeSDoug Moore int order; 1170b8590daeSDoug Moore 1171b8590daeSDoug Moore /* 1172b8590daeSDoug Moore * Avoid unnecessary coalescing by freeing the pages in the largest 1173b8590daeSDoug Moore * possible power-of-two-sized subsets. 1174b8590daeSDoug Moore */ 1175b8590daeSDoug Moore vm_domain_free_assert_locked(vm_pagequeue_domain(m)); 1176b8590daeSDoug Moore seg = &vm_phys_segs[m->segind]; 1177b8590daeSDoug Moore fl = (*seg->free_queues)[m->pool]; 1178b8590daeSDoug Moore m_end = m + npages; 1179b8590daeSDoug Moore /* Free blocks of increasing size. */ 1180b8590daeSDoug Moore while ((order = max_order(m)) < VM_NFREEORDER - 1 && 1181b8590daeSDoug Moore m + (1 << order) <= m_end) { 1182b8590daeSDoug Moore KASSERT(seg == &vm_phys_segs[m->segind], 1183b8590daeSDoug Moore ("%s: page range [%p,%p) spans multiple segments", 1184b8590daeSDoug Moore __func__, m_end - npages, m)); 1185b8590daeSDoug Moore vm_freelist_add(fl, m, order, 1); 1186b8590daeSDoug Moore m += 1 << order; 11875c1f2cc4SAlan Cox } 1188b8590daeSDoug Moore /* Free blocks of maximum size. */ 1189b8590daeSDoug Moore while (m + (1 << order) <= m_end) { 1190b8590daeSDoug Moore KASSERT(seg == &vm_phys_segs[m->segind], 1191b8590daeSDoug Moore ("%s: page range [%p,%p) spans multiple segments", 1192b8590daeSDoug Moore __func__, m_end - npages, m)); 1193b8590daeSDoug Moore vm_freelist_add(fl, m, order, 1); 1194b8590daeSDoug Moore m += 1 << order; 1195b8590daeSDoug Moore } 1196b8590daeSDoug Moore /* Free blocks of diminishing size. */ 1197b8590daeSDoug Moore while (m < m_end) { 1198b8590daeSDoug Moore KASSERT(seg == &vm_phys_segs[m->segind], 1199b8590daeSDoug Moore ("%s: page range [%p,%p) spans multiple segments", 1200b8590daeSDoug Moore __func__, m_end - npages, m)); 1201b8590daeSDoug Moore order = flsl(m_end - m) - 1; 1202b8590daeSDoug Moore vm_freelist_add(fl, m, order, 1); 1203b8590daeSDoug Moore m += 1 << order; 1204b8590daeSDoug Moore } 1205b8590daeSDoug Moore } 1206b8590daeSDoug Moore 1207b8590daeSDoug Moore /* 1208b8590daeSDoug Moore * Free a contiguous, arbitrarily sized set of physical pages. 1209b8590daeSDoug Moore * 1210b8590daeSDoug Moore * The free page queues must be locked. 1211b8590daeSDoug Moore */ 1212b8590daeSDoug Moore void 1213b8590daeSDoug Moore vm_phys_free_contig(vm_page_t m, u_long npages) 1214b8590daeSDoug Moore { 1215b8590daeSDoug Moore int order_start, order_end; 1216b8590daeSDoug Moore vm_page_t m_start, m_end; 1217b8590daeSDoug Moore 1218b8590daeSDoug Moore vm_domain_free_assert_locked(vm_pagequeue_domain(m)); 1219b8590daeSDoug Moore 1220b8590daeSDoug Moore m_start = m; 1221b8590daeSDoug Moore order_start = max_order(m_start); 1222b8590daeSDoug Moore if (order_start < VM_NFREEORDER - 1) 1223b8590daeSDoug Moore m_start += 1 << order_start; 1224b8590daeSDoug Moore m_end = m + npages; 1225b8590daeSDoug Moore order_end = max_order(m_end); 1226b8590daeSDoug Moore if (order_end < VM_NFREEORDER - 1) 1227b8590daeSDoug Moore m_end -= 1 << order_end; 1228b8590daeSDoug Moore /* 1229b8590daeSDoug Moore * Avoid unnecessary coalescing by freeing the pages at the start and 1230b8590daeSDoug Moore * end of the range last. 1231b8590daeSDoug Moore */ 1232b8590daeSDoug Moore if (m_start < m_end) 1233b8590daeSDoug Moore vm_phys_enqueue_contig(m_start, m_end - m_start); 1234b8590daeSDoug Moore if (order_start < VM_NFREEORDER - 1) 1235b8590daeSDoug Moore vm_phys_free_pages(m, order_start); 1236b8590daeSDoug Moore if (order_end < VM_NFREEORDER - 1) 1237b8590daeSDoug Moore vm_phys_free_pages(m_end, order_end); 12385c1f2cc4SAlan Cox } 12395c1f2cc4SAlan Cox 12405c1f2cc4SAlan Cox /* 1241c869e672SAlan Cox * Scan physical memory between the specified addresses "low" and "high" for a 1242c869e672SAlan Cox * run of contiguous physical pages that satisfy the specified conditions, and 1243c869e672SAlan Cox * return the lowest page in the run. The specified "alignment" determines 1244c869e672SAlan Cox * the alignment of the lowest physical page in the run. If the specified 1245c869e672SAlan Cox * "boundary" is non-zero, then the run of physical pages cannot span a 1246c869e672SAlan Cox * physical address that is a multiple of "boundary". 1247c869e672SAlan Cox * 1248c869e672SAlan Cox * "npages" must be greater than zero. Both "alignment" and "boundary" must 1249c869e672SAlan Cox * be a power of two. 1250c869e672SAlan Cox */ 1251c869e672SAlan Cox vm_page_t 12523f289c3fSJeff Roberson vm_phys_scan_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high, 1253c869e672SAlan Cox u_long alignment, vm_paddr_t boundary, int options) 1254c869e672SAlan Cox { 1255c869e672SAlan Cox vm_paddr_t pa_end; 1256c869e672SAlan Cox vm_page_t m_end, m_run, m_start; 1257c869e672SAlan Cox struct vm_phys_seg *seg; 1258c869e672SAlan Cox int segind; 1259c869e672SAlan Cox 1260c869e672SAlan Cox KASSERT(npages > 0, ("npages is 0")); 1261c869e672SAlan Cox KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 1262c869e672SAlan Cox KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 1263c869e672SAlan Cox if (low >= high) 1264c869e672SAlan Cox return (NULL); 1265c869e672SAlan Cox for (segind = 0; segind < vm_phys_nsegs; segind++) { 1266c869e672SAlan Cox seg = &vm_phys_segs[segind]; 12673f289c3fSJeff Roberson if (seg->domain != domain) 12683f289c3fSJeff Roberson continue; 1269c869e672SAlan Cox if (seg->start >= high) 1270c869e672SAlan Cox break; 1271c869e672SAlan Cox if (low >= seg->end) 1272c869e672SAlan Cox continue; 1273c869e672SAlan Cox if (low <= seg->start) 1274c869e672SAlan Cox m_start = seg->first_page; 1275c869e672SAlan Cox else 1276c869e672SAlan Cox m_start = &seg->first_page[atop(low - seg->start)]; 1277c869e672SAlan Cox if (high < seg->end) 1278c869e672SAlan Cox pa_end = high; 1279c869e672SAlan Cox else 1280c869e672SAlan Cox pa_end = seg->end; 1281c869e672SAlan Cox if (pa_end - VM_PAGE_TO_PHYS(m_start) < ptoa(npages)) 1282c869e672SAlan Cox continue; 1283c869e672SAlan Cox m_end = &seg->first_page[atop(pa_end - seg->start)]; 1284c869e672SAlan Cox m_run = vm_page_scan_contig(npages, m_start, m_end, 1285c869e672SAlan Cox alignment, boundary, options); 1286c869e672SAlan Cox if (m_run != NULL) 1287c869e672SAlan Cox return (m_run); 1288c869e672SAlan Cox } 1289c869e672SAlan Cox return (NULL); 1290c869e672SAlan Cox } 1291c869e672SAlan Cox 1292c869e672SAlan Cox /* 12939742373aSAlan Cox * Search for the given physical page "m" in the free lists. If the search 1294*6062d9faSMark Johnston * succeeds, remove "m" from the free lists and return true. Otherwise, return 1295*6062d9faSMark Johnston * false, indicating that "m" is not in the free lists. 12967bfda801SAlan Cox * 12977bfda801SAlan Cox * The free page queues must be locked. 12987bfda801SAlan Cox */ 1299*6062d9faSMark Johnston bool 13007bfda801SAlan Cox vm_phys_unfree_page(vm_page_t m) 13017bfda801SAlan Cox { 13027bfda801SAlan Cox struct vm_freelist *fl; 13037bfda801SAlan Cox struct vm_phys_seg *seg; 13047bfda801SAlan Cox vm_paddr_t pa, pa_half; 13057bfda801SAlan Cox vm_page_t m_set, m_tmp; 13067bfda801SAlan Cox int order; 13077bfda801SAlan Cox 13087bfda801SAlan Cox /* 13097bfda801SAlan Cox * First, find the contiguous, power of two-sized set of free 13107bfda801SAlan Cox * physical pages containing the given physical page "m" and 13117bfda801SAlan Cox * assign it to "m_set". 13127bfda801SAlan Cox */ 13137bfda801SAlan Cox seg = &vm_phys_segs[m->segind]; 1314e2068d0bSJeff Roberson vm_domain_free_assert_locked(VM_DOMAIN(seg->domain)); 13157bfda801SAlan Cox for (m_set = m, order = 0; m_set->order == VM_NFREEORDER && 1316bc8794a1SAlan Cox order < VM_NFREEORDER - 1; ) { 13177bfda801SAlan Cox order++; 13187bfda801SAlan Cox pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order)); 13192fbced65SAlan Cox if (pa >= seg->start) 13207bfda801SAlan Cox m_set = &seg->first_page[atop(pa - seg->start)]; 1321e35395ceSAlan Cox else 1322*6062d9faSMark Johnston return (false); 13237bfda801SAlan Cox } 1324e35395ceSAlan Cox if (m_set->order < order) 1325*6062d9faSMark Johnston return (false); 1326e35395ceSAlan Cox if (m_set->order == VM_NFREEORDER) 1327*6062d9faSMark Johnston return (false); 13287bfda801SAlan Cox KASSERT(m_set->order < VM_NFREEORDER, 13297bfda801SAlan Cox ("vm_phys_unfree_page: page %p has unexpected order %d", 13307bfda801SAlan Cox m_set, m_set->order)); 13317bfda801SAlan Cox 13327bfda801SAlan Cox /* 13337bfda801SAlan Cox * Next, remove "m_set" from the free lists. Finally, extract 13347bfda801SAlan Cox * "m" from "m_set" using an iterative algorithm: While "m_set" 13357bfda801SAlan Cox * is larger than a page, shrink "m_set" by returning the half 13367bfda801SAlan Cox * of "m_set" that does not contain "m" to the free lists. 13377bfda801SAlan Cox */ 13387bfda801SAlan Cox fl = (*seg->free_queues)[m_set->pool]; 13397bfda801SAlan Cox order = m_set->order; 13407e226537SAttilio Rao vm_freelist_rem(fl, m_set, order); 13417bfda801SAlan Cox while (order > 0) { 13427bfda801SAlan Cox order--; 13437bfda801SAlan Cox pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order)); 13447bfda801SAlan Cox if (m->phys_addr < pa_half) 13457bfda801SAlan Cox m_tmp = &seg->first_page[atop(pa_half - seg->start)]; 13467bfda801SAlan Cox else { 13477bfda801SAlan Cox m_tmp = m_set; 13487bfda801SAlan Cox m_set = &seg->first_page[atop(pa_half - seg->start)]; 13497bfda801SAlan Cox } 13507e226537SAttilio Rao vm_freelist_add(fl, m_tmp, order, 0); 13517bfda801SAlan Cox } 13527bfda801SAlan Cox KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency")); 1353*6062d9faSMark Johnston return (true); 13547bfda801SAlan Cox } 13557bfda801SAlan Cox 13567bfda801SAlan Cox /* 1357fa8a6585SDoug Moore * Find a run of contiguous physical pages from the specified page list. 1358fa8a6585SDoug Moore */ 1359fa8a6585SDoug Moore static vm_page_t 1360fa8a6585SDoug Moore vm_phys_find_freelist_contig(struct vm_freelist *fl, int oind, u_long npages, 1361fa8a6585SDoug Moore vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary) 1362fa8a6585SDoug Moore { 1363fa8a6585SDoug Moore struct vm_phys_seg *seg; 1364fa8a6585SDoug Moore vm_paddr_t frag, lbound, pa, page_size, pa_end, pa_pre, size; 1365fa8a6585SDoug Moore vm_page_t m, m_listed, m_ret; 1366fa8a6585SDoug Moore int order; 1367fa8a6585SDoug Moore 1368fa8a6585SDoug Moore KASSERT(npages > 0, ("npages is 0")); 1369fa8a6585SDoug Moore KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 1370fa8a6585SDoug Moore KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 1371fa8a6585SDoug Moore /* Search for a run satisfying the specified conditions. */ 1372fa8a6585SDoug Moore page_size = PAGE_SIZE; 1373fa8a6585SDoug Moore size = npages << PAGE_SHIFT; 1374fa8a6585SDoug Moore frag = (npages & ~(~0UL << oind)) << PAGE_SHIFT; 1375fa8a6585SDoug Moore TAILQ_FOREACH(m_listed, &fl[oind].pl, listq) { 1376fa8a6585SDoug Moore /* 1377fa8a6585SDoug Moore * Determine if the address range starting at pa is 1378fa8a6585SDoug Moore * too low. 1379fa8a6585SDoug Moore */ 1380fa8a6585SDoug Moore pa = VM_PAGE_TO_PHYS(m_listed); 1381fa8a6585SDoug Moore if (pa < low) 1382fa8a6585SDoug Moore continue; 1383fa8a6585SDoug Moore 1384fa8a6585SDoug Moore /* 1385fa8a6585SDoug Moore * If this is not the first free oind-block in this range, bail 1386fa8a6585SDoug Moore * out. We have seen the first free block already, or will see 1387fa8a6585SDoug Moore * it before failing to find an appropriate range. 1388fa8a6585SDoug Moore */ 1389fa8a6585SDoug Moore seg = &vm_phys_segs[m_listed->segind]; 1390fa8a6585SDoug Moore lbound = low > seg->start ? low : seg->start; 1391fa8a6585SDoug Moore pa_pre = pa - (page_size << oind); 1392fa8a6585SDoug Moore m = &seg->first_page[atop(pa_pre - seg->start)]; 1393fa8a6585SDoug Moore if (pa != 0 && pa_pre >= lbound && m->order == oind) 1394fa8a6585SDoug Moore continue; 1395fa8a6585SDoug Moore 1396fa8a6585SDoug Moore if (!vm_addr_align_ok(pa, alignment)) 1397fa8a6585SDoug Moore /* Advance to satisfy alignment condition. */ 1398fa8a6585SDoug Moore pa = roundup2(pa, alignment); 1399fa8a6585SDoug Moore else if (frag != 0 && lbound + frag <= pa) { 1400fa8a6585SDoug Moore /* 1401fa8a6585SDoug Moore * Back up to the first aligned free block in this 1402fa8a6585SDoug Moore * range, without moving below lbound. 1403fa8a6585SDoug Moore */ 1404fa8a6585SDoug Moore pa_end = pa; 1405fa8a6585SDoug Moore for (order = oind - 1; order >= 0; order--) { 1406fa8a6585SDoug Moore pa_pre = pa_end - (page_size << order); 1407fa8a6585SDoug Moore if (!vm_addr_align_ok(pa_pre, alignment)) 1408fa8a6585SDoug Moore break; 1409fa8a6585SDoug Moore m = &seg->first_page[atop(pa_pre - seg->start)]; 1410fa8a6585SDoug Moore if (pa_pre >= lbound && m->order == order) 1411fa8a6585SDoug Moore pa_end = pa_pre; 1412fa8a6585SDoug Moore } 1413fa8a6585SDoug Moore /* 1414fa8a6585SDoug Moore * If the extra small blocks are enough to complete the 1415fa8a6585SDoug Moore * fragment, use them. Otherwise, look to allocate the 1416fa8a6585SDoug Moore * fragment at the other end. 1417fa8a6585SDoug Moore */ 1418fa8a6585SDoug Moore if (pa_end + frag <= pa) 1419fa8a6585SDoug Moore pa = pa_end; 1420fa8a6585SDoug Moore } 1421fa8a6585SDoug Moore 1422fa8a6585SDoug Moore /* Advance as necessary to satisfy boundary conditions. */ 1423fa8a6585SDoug Moore if (!vm_addr_bound_ok(pa, size, boundary)) 1424fa8a6585SDoug Moore pa = roundup2(pa + 1, boundary); 1425fa8a6585SDoug Moore pa_end = pa + size; 1426fa8a6585SDoug Moore 1427fa8a6585SDoug Moore /* 1428fa8a6585SDoug Moore * Determine if the address range is valid (without overflow in 1429fa8a6585SDoug Moore * pa_end calculation), and fits within the segment. 1430fa8a6585SDoug Moore */ 1431fa8a6585SDoug Moore if (pa_end < pa || seg->end < pa_end) 1432fa8a6585SDoug Moore continue; 1433fa8a6585SDoug Moore 1434fa8a6585SDoug Moore m_ret = &seg->first_page[atop(pa - seg->start)]; 1435fa8a6585SDoug Moore 1436fa8a6585SDoug Moore /* 1437fa8a6585SDoug Moore * Determine whether there are enough free oind-blocks here to 1438fa8a6585SDoug Moore * satisfy the allocation request. 1439fa8a6585SDoug Moore */ 1440fa8a6585SDoug Moore pa = VM_PAGE_TO_PHYS(m_listed); 1441fa8a6585SDoug Moore do { 1442fa8a6585SDoug Moore pa += page_size << oind; 1443fa8a6585SDoug Moore if (pa >= pa_end) 1444fa8a6585SDoug Moore return (m_ret); 1445fa8a6585SDoug Moore m = &seg->first_page[atop(pa - seg->start)]; 1446fa8a6585SDoug Moore } while (oind == m->order); 1447fa8a6585SDoug Moore 1448fa8a6585SDoug Moore /* 1449fa8a6585SDoug Moore * Determine if an additional series of free blocks of 1450fa8a6585SDoug Moore * diminishing size can help to satisfy the allocation request. 1451fa8a6585SDoug Moore */ 1452fa8a6585SDoug Moore while (m->order < oind && 1453fa8a6585SDoug Moore pa + 2 * (page_size << m->order) > pa_end) { 1454fa8a6585SDoug Moore pa += page_size << m->order; 1455fa8a6585SDoug Moore if (pa >= pa_end) 1456fa8a6585SDoug Moore return (m_ret); 1457fa8a6585SDoug Moore m = &seg->first_page[atop(pa - seg->start)]; 1458fa8a6585SDoug Moore } 1459fa8a6585SDoug Moore } 1460fa8a6585SDoug Moore return (NULL); 1461fa8a6585SDoug Moore } 1462fa8a6585SDoug Moore 1463fa8a6585SDoug Moore /* 1464fa8a6585SDoug Moore * Find a run of contiguous physical pages from the specified free list 1465342056faSDoug Moore * table. 1466c869e672SAlan Cox */ 1467c869e672SAlan Cox static vm_page_t 1468fa8a6585SDoug Moore vm_phys_find_queues_contig( 1469342056faSDoug Moore struct vm_freelist (*queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX], 1470342056faSDoug Moore u_long npages, vm_paddr_t low, vm_paddr_t high, 1471342056faSDoug Moore u_long alignment, vm_paddr_t boundary) 1472c869e672SAlan Cox { 1473c869e672SAlan Cox struct vm_freelist *fl; 1474fa8a6585SDoug Moore vm_page_t m_ret; 1475c869e672SAlan Cox vm_paddr_t pa, pa_end, size; 1476c869e672SAlan Cox int oind, order, pind; 1477c869e672SAlan Cox 1478c869e672SAlan Cox KASSERT(npages > 0, ("npages is 0")); 1479c869e672SAlan Cox KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 1480c869e672SAlan Cox KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 1481c869e672SAlan Cox /* Compute the queue that is the best fit for npages. */ 14829161b4deSAlan Cox order = flsl(npages - 1); 1483fa8a6585SDoug Moore /* Search for a large enough free block. */ 1484c869e672SAlan Cox size = npages << PAGE_SHIFT; 1485fa8a6585SDoug Moore for (oind = order; oind < VM_NFREEORDER; oind++) { 1486c869e672SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) { 1487342056faSDoug Moore fl = (*queues)[pind]; 14885cd29d0fSMark Johnston TAILQ_FOREACH(m_ret, &fl[oind].pl, listq) { 1489c869e672SAlan Cox /* 1490da92ecbcSDoug Moore * Determine if the address range starting at pa 1491da92ecbcSDoug Moore * is within the given range, satisfies the 1492da92ecbcSDoug Moore * given alignment, and does not cross the given 1493da92ecbcSDoug Moore * boundary. 149411752d88SAlan Cox */ 1495da92ecbcSDoug Moore pa = VM_PAGE_TO_PHYS(m_ret); 1496da92ecbcSDoug Moore pa_end = pa + size; 1497fa8a6585SDoug Moore if (low <= pa && pa_end <= high && 1498fa8a6585SDoug Moore vm_addr_ok(pa, size, alignment, boundary)) 1499fa8a6585SDoug Moore return (m_ret); 1500fa8a6585SDoug Moore } 1501fa8a6585SDoug Moore } 1502fa8a6585SDoug Moore } 1503da92ecbcSDoug Moore if (order < VM_NFREEORDER) 1504fa8a6585SDoug Moore return (NULL); 1505fa8a6585SDoug Moore /* Search for a long-enough sequence of small blocks. */ 1506fa8a6585SDoug Moore oind = VM_NFREEORDER - 1; 1507fa8a6585SDoug Moore for (pind = 0; pind < VM_NFREEPOOL; pind++) { 1508fa8a6585SDoug Moore fl = (*queues)[pind]; 1509fa8a6585SDoug Moore m_ret = vm_phys_find_freelist_contig(fl, oind, npages, 1510fa8a6585SDoug Moore low, high, alignment, boundary); 1511fa8a6585SDoug Moore if (m_ret != NULL) 1512fa8a6585SDoug Moore return (m_ret); 151311752d88SAlan Cox } 151411752d88SAlan Cox return (NULL); 151511752d88SAlan Cox } 151611752d88SAlan Cox 1517b7565d44SJeff Roberson /* 1518342056faSDoug Moore * Allocate a contiguous set of physical pages of the given size 1519342056faSDoug Moore * "npages" from the free lists. All of the physical pages must be at 1520342056faSDoug Moore * or above the given physical address "low" and below the given 1521342056faSDoug Moore * physical address "high". The given value "alignment" determines the 1522342056faSDoug Moore * alignment of the first physical page in the set. If the given value 1523342056faSDoug Moore * "boundary" is non-zero, then the set of physical pages cannot cross 1524342056faSDoug Moore * any physical address boundary that is a multiple of that value. Both 1525342056faSDoug Moore * "alignment" and "boundary" must be a power of two. 1526342056faSDoug Moore */ 1527342056faSDoug Moore vm_page_t 1528342056faSDoug Moore vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high, 1529342056faSDoug Moore u_long alignment, vm_paddr_t boundary) 1530342056faSDoug Moore { 1531342056faSDoug Moore vm_paddr_t pa_end, pa_start; 1532fa8a6585SDoug Moore struct vm_freelist *fl; 1533fa8a6585SDoug Moore vm_page_t m, m_run; 1534342056faSDoug Moore struct vm_phys_seg *seg; 1535342056faSDoug Moore struct vm_freelist (*queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX]; 1536fa8a6585SDoug Moore int oind, segind; 1537342056faSDoug Moore 1538342056faSDoug Moore KASSERT(npages > 0, ("npages is 0")); 1539342056faSDoug Moore KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 1540342056faSDoug Moore KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 1541342056faSDoug Moore vm_domain_free_assert_locked(VM_DOMAIN(domain)); 1542342056faSDoug Moore if (low >= high) 1543342056faSDoug Moore return (NULL); 1544342056faSDoug Moore queues = NULL; 1545342056faSDoug Moore m_run = NULL; 1546342056faSDoug Moore for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) { 1547342056faSDoug Moore seg = &vm_phys_segs[segind]; 1548342056faSDoug Moore if (seg->start >= high || seg->domain != domain) 1549342056faSDoug Moore continue; 1550342056faSDoug Moore if (low >= seg->end) 1551342056faSDoug Moore break; 1552342056faSDoug Moore if (low <= seg->start) 1553342056faSDoug Moore pa_start = seg->start; 1554342056faSDoug Moore else 1555342056faSDoug Moore pa_start = low; 1556342056faSDoug Moore if (high < seg->end) 1557342056faSDoug Moore pa_end = high; 1558342056faSDoug Moore else 1559342056faSDoug Moore pa_end = seg->end; 1560342056faSDoug Moore if (pa_end - pa_start < ptoa(npages)) 1561342056faSDoug Moore continue; 1562342056faSDoug Moore /* 1563342056faSDoug Moore * If a previous segment led to a search using 1564342056faSDoug Moore * the same free lists as would this segment, then 1565342056faSDoug Moore * we've actually already searched within this 1566342056faSDoug Moore * too. So skip it. 1567342056faSDoug Moore */ 1568342056faSDoug Moore if (seg->free_queues == queues) 1569342056faSDoug Moore continue; 1570342056faSDoug Moore queues = seg->free_queues; 1571fa8a6585SDoug Moore m_run = vm_phys_find_queues_contig(queues, npages, 1572342056faSDoug Moore low, high, alignment, boundary); 1573342056faSDoug Moore if (m_run != NULL) 1574342056faSDoug Moore break; 1575342056faSDoug Moore } 1576fa8a6585SDoug Moore if (m_run == NULL) 1577fa8a6585SDoug Moore return (NULL); 1578fa8a6585SDoug Moore 1579fa8a6585SDoug Moore /* Allocate pages from the page-range found. */ 1580fa8a6585SDoug Moore for (m = m_run; m < &m_run[npages]; m = &m[1 << oind]) { 1581fa8a6585SDoug Moore fl = (*queues)[m->pool]; 1582fa8a6585SDoug Moore oind = m->order; 1583fa8a6585SDoug Moore vm_freelist_rem(fl, m, oind); 1584fa8a6585SDoug Moore if (m->pool != VM_FREEPOOL_DEFAULT) 1585fa8a6585SDoug Moore vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind); 1586fa8a6585SDoug Moore } 1587fa8a6585SDoug Moore /* Return excess pages to the free lists. */ 1588fa8a6585SDoug Moore if (&m_run[npages] < m) { 1589fa8a6585SDoug Moore fl = (*queues)[VM_FREEPOOL_DEFAULT]; 1590fa8a6585SDoug Moore vm_phys_enq_range(&m_run[npages], m - &m_run[npages], fl, 0); 1591fa8a6585SDoug Moore } 1592342056faSDoug Moore return (m_run); 1593342056faSDoug Moore } 1594342056faSDoug Moore 1595342056faSDoug Moore /* 1596b7565d44SJeff Roberson * Return the index of the first unused slot which may be the terminating 1597b7565d44SJeff Roberson * entry. 1598b7565d44SJeff Roberson */ 1599b7565d44SJeff Roberson static int 1600b7565d44SJeff Roberson vm_phys_avail_count(void) 1601b7565d44SJeff Roberson { 1602b7565d44SJeff Roberson int i; 1603b7565d44SJeff Roberson 1604b7565d44SJeff Roberson for (i = 0; phys_avail[i + 1]; i += 2) 1605b7565d44SJeff Roberson continue; 1606b7565d44SJeff Roberson if (i > PHYS_AVAIL_ENTRIES) 1607b7565d44SJeff Roberson panic("Improperly terminated phys_avail %d entries", i); 1608b7565d44SJeff Roberson 1609b7565d44SJeff Roberson return (i); 1610b7565d44SJeff Roberson } 1611b7565d44SJeff Roberson 1612b7565d44SJeff Roberson /* 1613b7565d44SJeff Roberson * Assert that a phys_avail entry is valid. 1614b7565d44SJeff Roberson */ 1615b7565d44SJeff Roberson static void 1616b7565d44SJeff Roberson vm_phys_avail_check(int i) 1617b7565d44SJeff Roberson { 1618b7565d44SJeff Roberson if (phys_avail[i] & PAGE_MASK) 1619b7565d44SJeff Roberson panic("Unaligned phys_avail[%d]: %#jx", i, 1620b7565d44SJeff Roberson (intmax_t)phys_avail[i]); 1621b7565d44SJeff Roberson if (phys_avail[i+1] & PAGE_MASK) 1622b7565d44SJeff Roberson panic("Unaligned phys_avail[%d + 1]: %#jx", i, 1623b7565d44SJeff Roberson (intmax_t)phys_avail[i]); 1624b7565d44SJeff Roberson if (phys_avail[i + 1] < phys_avail[i]) 1625b7565d44SJeff Roberson panic("phys_avail[%d] start %#jx < end %#jx", i, 1626b7565d44SJeff Roberson (intmax_t)phys_avail[i], (intmax_t)phys_avail[i+1]); 1627b7565d44SJeff Roberson } 1628b7565d44SJeff Roberson 1629b7565d44SJeff Roberson /* 1630b7565d44SJeff Roberson * Return the index of an overlapping phys_avail entry or -1. 1631b7565d44SJeff Roberson */ 1632be3f5f29SJeff Roberson #ifdef NUMA 1633b7565d44SJeff Roberson static int 1634b7565d44SJeff Roberson vm_phys_avail_find(vm_paddr_t pa) 1635b7565d44SJeff Roberson { 1636b7565d44SJeff Roberson int i; 1637b7565d44SJeff Roberson 1638b7565d44SJeff Roberson for (i = 0; phys_avail[i + 1]; i += 2) 1639b7565d44SJeff Roberson if (phys_avail[i] <= pa && phys_avail[i + 1] > pa) 1640b7565d44SJeff Roberson return (i); 1641b7565d44SJeff Roberson return (-1); 1642b7565d44SJeff Roberson } 1643be3f5f29SJeff Roberson #endif 1644b7565d44SJeff Roberson 1645b7565d44SJeff Roberson /* 1646b7565d44SJeff Roberson * Return the index of the largest entry. 1647b7565d44SJeff Roberson */ 1648b7565d44SJeff Roberson int 1649b7565d44SJeff Roberson vm_phys_avail_largest(void) 1650b7565d44SJeff Roberson { 1651b7565d44SJeff Roberson vm_paddr_t sz, largesz; 1652b7565d44SJeff Roberson int largest; 1653b7565d44SJeff Roberson int i; 1654b7565d44SJeff Roberson 1655b7565d44SJeff Roberson largest = 0; 1656b7565d44SJeff Roberson largesz = 0; 1657b7565d44SJeff Roberson for (i = 0; phys_avail[i + 1]; i += 2) { 1658b7565d44SJeff Roberson sz = vm_phys_avail_size(i); 1659b7565d44SJeff Roberson if (sz > largesz) { 1660b7565d44SJeff Roberson largesz = sz; 1661b7565d44SJeff Roberson largest = i; 1662b7565d44SJeff Roberson } 1663b7565d44SJeff Roberson } 1664b7565d44SJeff Roberson 1665b7565d44SJeff Roberson return (largest); 1666b7565d44SJeff Roberson } 1667b7565d44SJeff Roberson 1668b7565d44SJeff Roberson vm_paddr_t 1669b7565d44SJeff Roberson vm_phys_avail_size(int i) 1670b7565d44SJeff Roberson { 1671b7565d44SJeff Roberson 1672b7565d44SJeff Roberson return (phys_avail[i + 1] - phys_avail[i]); 1673b7565d44SJeff Roberson } 1674b7565d44SJeff Roberson 1675b7565d44SJeff Roberson /* 1676b7565d44SJeff Roberson * Split an entry at the address 'pa'. Return zero on success or errno. 1677b7565d44SJeff Roberson */ 1678b7565d44SJeff Roberson static int 1679b7565d44SJeff Roberson vm_phys_avail_split(vm_paddr_t pa, int i) 1680b7565d44SJeff Roberson { 1681b7565d44SJeff Roberson int cnt; 1682b7565d44SJeff Roberson 1683b7565d44SJeff Roberson vm_phys_avail_check(i); 1684b7565d44SJeff Roberson if (pa <= phys_avail[i] || pa >= phys_avail[i + 1]) 1685b7565d44SJeff Roberson panic("vm_phys_avail_split: invalid address"); 1686b7565d44SJeff Roberson cnt = vm_phys_avail_count(); 1687b7565d44SJeff Roberson if (cnt >= PHYS_AVAIL_ENTRIES) 1688b7565d44SJeff Roberson return (ENOSPC); 1689b7565d44SJeff Roberson memmove(&phys_avail[i + 2], &phys_avail[i], 1690b7565d44SJeff Roberson (cnt - i) * sizeof(phys_avail[0])); 1691b7565d44SJeff Roberson phys_avail[i + 1] = pa; 1692b7565d44SJeff Roberson phys_avail[i + 2] = pa; 1693b7565d44SJeff Roberson vm_phys_avail_check(i); 1694b7565d44SJeff Roberson vm_phys_avail_check(i+2); 1695b7565d44SJeff Roberson 1696b7565d44SJeff Roberson return (0); 1697b7565d44SJeff Roberson } 1698b7565d44SJeff Roberson 169931991a5aSMitchell Horne /* 170031991a5aSMitchell Horne * Check if a given physical address can be included as part of a crash dump. 170131991a5aSMitchell Horne */ 170231991a5aSMitchell Horne bool 170331991a5aSMitchell Horne vm_phys_is_dumpable(vm_paddr_t pa) 170431991a5aSMitchell Horne { 170531991a5aSMitchell Horne vm_page_t m; 170631991a5aSMitchell Horne int i; 170731991a5aSMitchell Horne 170831991a5aSMitchell Horne if ((m = vm_phys_paddr_to_vm_page(pa)) != NULL) 170931991a5aSMitchell Horne return ((m->flags & PG_NODUMP) == 0); 171031991a5aSMitchell Horne 171131991a5aSMitchell Horne for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) { 171231991a5aSMitchell Horne if (pa >= dump_avail[i] && pa < dump_avail[i + 1]) 171331991a5aSMitchell Horne return (true); 171431991a5aSMitchell Horne } 171531991a5aSMitchell Horne return (false); 171631991a5aSMitchell Horne } 171731991a5aSMitchell Horne 171881302f1dSMark Johnston void 171981302f1dSMark Johnston vm_phys_early_add_seg(vm_paddr_t start, vm_paddr_t end) 172081302f1dSMark Johnston { 172181302f1dSMark Johnston struct vm_phys_seg *seg; 172281302f1dSMark Johnston 172381302f1dSMark Johnston if (vm_phys_early_nsegs == -1) 172481302f1dSMark Johnston panic("%s: called after initialization", __func__); 172581302f1dSMark Johnston if (vm_phys_early_nsegs == nitems(vm_phys_early_segs)) 172681302f1dSMark Johnston panic("%s: ran out of early segments", __func__); 172781302f1dSMark Johnston 172881302f1dSMark Johnston seg = &vm_phys_early_segs[vm_phys_early_nsegs++]; 172981302f1dSMark Johnston seg->start = start; 173081302f1dSMark Johnston seg->end = end; 173181302f1dSMark Johnston } 173281302f1dSMark Johnston 1733b7565d44SJeff Roberson /* 1734b7565d44SJeff Roberson * This routine allocates NUMA node specific memory before the page 1735b7565d44SJeff Roberson * allocator is bootstrapped. 1736b7565d44SJeff Roberson */ 1737b7565d44SJeff Roberson vm_paddr_t 1738b7565d44SJeff Roberson vm_phys_early_alloc(int domain, size_t alloc_size) 1739b7565d44SJeff Roberson { 17402e7838aeSJohn Baldwin #ifdef NUMA 17412e7838aeSJohn Baldwin int mem_index; 17422e7838aeSJohn Baldwin #endif 17432e7838aeSJohn Baldwin int i, biggestone; 1744b7565d44SJeff Roberson vm_paddr_t pa, mem_start, mem_end, size, biggestsize, align; 1745b7565d44SJeff Roberson 174681302f1dSMark Johnston KASSERT(domain == -1 || (domain >= 0 && domain < vm_ndomains), 174781302f1dSMark Johnston ("%s: invalid domain index %d", __func__, domain)); 1748b7565d44SJeff Roberson 1749b7565d44SJeff Roberson /* 1750b7565d44SJeff Roberson * Search the mem_affinity array for the biggest address 1751b7565d44SJeff Roberson * range in the desired domain. This is used to constrain 1752b7565d44SJeff Roberson * the phys_avail selection below. 1753b7565d44SJeff Roberson */ 1754b7565d44SJeff Roberson biggestsize = 0; 1755b7565d44SJeff Roberson mem_start = 0; 1756b7565d44SJeff Roberson mem_end = -1; 1757b7565d44SJeff Roberson #ifdef NUMA 17582e7838aeSJohn Baldwin mem_index = 0; 1759b7565d44SJeff Roberson if (mem_affinity != NULL) { 1760b7565d44SJeff Roberson for (i = 0;; i++) { 1761b7565d44SJeff Roberson size = mem_affinity[i].end - mem_affinity[i].start; 1762b7565d44SJeff Roberson if (size == 0) 1763b7565d44SJeff Roberson break; 176481302f1dSMark Johnston if (domain != -1 && mem_affinity[i].domain != domain) 1765b7565d44SJeff Roberson continue; 1766b7565d44SJeff Roberson if (size > biggestsize) { 1767b7565d44SJeff Roberson mem_index = i; 1768b7565d44SJeff Roberson biggestsize = size; 1769b7565d44SJeff Roberson } 1770b7565d44SJeff Roberson } 1771b7565d44SJeff Roberson mem_start = mem_affinity[mem_index].start; 1772b7565d44SJeff Roberson mem_end = mem_affinity[mem_index].end; 1773b7565d44SJeff Roberson } 1774b7565d44SJeff Roberson #endif 1775b7565d44SJeff Roberson 1776b7565d44SJeff Roberson /* 1777b7565d44SJeff Roberson * Now find biggest physical segment in within the desired 1778b7565d44SJeff Roberson * numa domain. 1779b7565d44SJeff Roberson */ 1780b7565d44SJeff Roberson biggestsize = 0; 1781b7565d44SJeff Roberson biggestone = 0; 1782b7565d44SJeff Roberson for (i = 0; phys_avail[i + 1] != 0; i += 2) { 1783b7565d44SJeff Roberson /* skip regions that are out of range */ 1784b7565d44SJeff Roberson if (phys_avail[i+1] - alloc_size < mem_start || 1785b7565d44SJeff Roberson phys_avail[i+1] > mem_end) 1786b7565d44SJeff Roberson continue; 1787b7565d44SJeff Roberson size = vm_phys_avail_size(i); 1788b7565d44SJeff Roberson if (size > biggestsize) { 1789b7565d44SJeff Roberson biggestone = i; 1790b7565d44SJeff Roberson biggestsize = size; 1791b7565d44SJeff Roberson } 1792b7565d44SJeff Roberson } 1793b7565d44SJeff Roberson alloc_size = round_page(alloc_size); 1794b7565d44SJeff Roberson 1795b7565d44SJeff Roberson /* 1796b7565d44SJeff Roberson * Grab single pages from the front to reduce fragmentation. 1797b7565d44SJeff Roberson */ 1798b7565d44SJeff Roberson if (alloc_size == PAGE_SIZE) { 1799b7565d44SJeff Roberson pa = phys_avail[biggestone]; 1800b7565d44SJeff Roberson phys_avail[biggestone] += PAGE_SIZE; 1801b7565d44SJeff Roberson vm_phys_avail_check(biggestone); 1802b7565d44SJeff Roberson return (pa); 1803b7565d44SJeff Roberson } 1804b7565d44SJeff Roberson 1805b7565d44SJeff Roberson /* 1806b7565d44SJeff Roberson * Naturally align large allocations. 1807b7565d44SJeff Roberson */ 1808b7565d44SJeff Roberson align = phys_avail[biggestone + 1] & (alloc_size - 1); 1809b7565d44SJeff Roberson if (alloc_size + align > biggestsize) 1810b7565d44SJeff Roberson panic("cannot find a large enough size\n"); 1811b7565d44SJeff Roberson if (align != 0 && 1812b7565d44SJeff Roberson vm_phys_avail_split(phys_avail[biggestone + 1] - align, 1813b7565d44SJeff Roberson biggestone) != 0) 1814b7565d44SJeff Roberson /* Wasting memory. */ 1815b7565d44SJeff Roberson phys_avail[biggestone + 1] -= align; 1816b7565d44SJeff Roberson 1817b7565d44SJeff Roberson phys_avail[biggestone + 1] -= alloc_size; 1818b7565d44SJeff Roberson vm_phys_avail_check(biggestone); 1819b7565d44SJeff Roberson pa = phys_avail[biggestone + 1]; 1820b7565d44SJeff Roberson return (pa); 1821b7565d44SJeff Roberson } 1822b7565d44SJeff Roberson 1823b7565d44SJeff Roberson void 1824b7565d44SJeff Roberson vm_phys_early_startup(void) 1825b7565d44SJeff Roberson { 182681302f1dSMark Johnston struct vm_phys_seg *seg; 1827b7565d44SJeff Roberson int i; 1828b7565d44SJeff Roberson 1829b7565d44SJeff Roberson for (i = 0; phys_avail[i + 1] != 0; i += 2) { 1830b7565d44SJeff Roberson phys_avail[i] = round_page(phys_avail[i]); 1831b7565d44SJeff Roberson phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 1832b7565d44SJeff Roberson } 1833b7565d44SJeff Roberson 183481302f1dSMark Johnston for (i = 0; i < vm_phys_early_nsegs; i++) { 183581302f1dSMark Johnston seg = &vm_phys_early_segs[i]; 183681302f1dSMark Johnston vm_phys_add_seg(seg->start, seg->end); 183781302f1dSMark Johnston } 183881302f1dSMark Johnston vm_phys_early_nsegs = -1; 183981302f1dSMark Johnston 1840b7565d44SJeff Roberson #ifdef NUMA 1841b7565d44SJeff Roberson /* Force phys_avail to be split by domain. */ 1842b7565d44SJeff Roberson if (mem_affinity != NULL) { 1843b7565d44SJeff Roberson int idx; 1844b7565d44SJeff Roberson 1845b7565d44SJeff Roberson for (i = 0; mem_affinity[i].end != 0; i++) { 1846b7565d44SJeff Roberson idx = vm_phys_avail_find(mem_affinity[i].start); 1847b7565d44SJeff Roberson if (idx != -1 && 1848b7565d44SJeff Roberson phys_avail[idx] != mem_affinity[i].start) 1849b7565d44SJeff Roberson vm_phys_avail_split(mem_affinity[i].start, idx); 1850b7565d44SJeff Roberson idx = vm_phys_avail_find(mem_affinity[i].end); 1851b7565d44SJeff Roberson if (idx != -1 && 1852b7565d44SJeff Roberson phys_avail[idx] != mem_affinity[i].end) 1853b7565d44SJeff Roberson vm_phys_avail_split(mem_affinity[i].end, idx); 1854b7565d44SJeff Roberson } 1855b7565d44SJeff Roberson } 1856b7565d44SJeff Roberson #endif 1857b7565d44SJeff Roberson } 1858b7565d44SJeff Roberson 185911752d88SAlan Cox #ifdef DDB 186011752d88SAlan Cox /* 186111752d88SAlan Cox * Show the number of physical pages in each of the free lists. 186211752d88SAlan Cox */ 1863c84c5e00SMitchell Horne DB_SHOW_COMMAND_FLAGS(freepages, db_show_freepages, DB_CMD_MEMSAFE) 186411752d88SAlan Cox { 186511752d88SAlan Cox struct vm_freelist *fl; 18667e226537SAttilio Rao int flind, oind, pind, dom; 186711752d88SAlan Cox 18687e226537SAttilio Rao for (dom = 0; dom < vm_ndomains; dom++) { 18697e226537SAttilio Rao db_printf("DOMAIN: %d\n", dom); 187011752d88SAlan Cox for (flind = 0; flind < vm_nfreelists; flind++) { 187111752d88SAlan Cox db_printf("FREE LIST %d:\n" 187211752d88SAlan Cox "\n ORDER (SIZE) | NUMBER" 187311752d88SAlan Cox "\n ", flind); 187411752d88SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) 187511752d88SAlan Cox db_printf(" | POOL %d", pind); 187611752d88SAlan Cox db_printf("\n-- "); 187711752d88SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) 187811752d88SAlan Cox db_printf("-- -- "); 187911752d88SAlan Cox db_printf("--\n"); 188011752d88SAlan Cox for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { 188111752d88SAlan Cox db_printf(" %2.2d (%6.6dK)", oind, 188211752d88SAlan Cox 1 << (PAGE_SHIFT - 10 + oind)); 188311752d88SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) { 18847e226537SAttilio Rao fl = vm_phys_free_queues[dom][flind][pind]; 188511752d88SAlan Cox db_printf(" | %6.6d", fl[oind].lcnt); 188611752d88SAlan Cox } 188711752d88SAlan Cox db_printf("\n"); 188811752d88SAlan Cox } 188911752d88SAlan Cox db_printf("\n"); 189011752d88SAlan Cox } 18917e226537SAttilio Rao db_printf("\n"); 18927e226537SAttilio Rao } 189311752d88SAlan Cox } 189411752d88SAlan Cox #endif 1895