111752d88SAlan Cox /*- 2fe267a55SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3fe267a55SPedro F. Giffuni * 411752d88SAlan Cox * Copyright (c) 2002-2006 Rice University 511752d88SAlan Cox * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu> 611752d88SAlan Cox * All rights reserved. 711752d88SAlan Cox * 811752d88SAlan Cox * This software was developed for the FreeBSD Project by Alan L. Cox, 911752d88SAlan Cox * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro. 1011752d88SAlan Cox * 1111752d88SAlan Cox * Redistribution and use in source and binary forms, with or without 1211752d88SAlan Cox * modification, are permitted provided that the following conditions 1311752d88SAlan Cox * are met: 1411752d88SAlan Cox * 1. Redistributions of source code must retain the above copyright 1511752d88SAlan Cox * notice, this list of conditions and the following disclaimer. 1611752d88SAlan Cox * 2. Redistributions in binary form must reproduce the above copyright 1711752d88SAlan Cox * notice, this list of conditions and the following disclaimer in the 1811752d88SAlan Cox * documentation and/or other materials provided with the distribution. 1911752d88SAlan Cox * 2011752d88SAlan Cox * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 2111752d88SAlan Cox * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 2211752d88SAlan Cox * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 2311752d88SAlan Cox * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 2411752d88SAlan Cox * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 2511752d88SAlan Cox * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 2611752d88SAlan Cox * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 2711752d88SAlan Cox * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 2811752d88SAlan Cox * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2911752d88SAlan Cox * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY 3011752d88SAlan Cox * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 3111752d88SAlan Cox * POSSIBILITY OF SUCH DAMAGE. 3211752d88SAlan Cox */ 3311752d88SAlan Cox 34fbd80bd0SAlan Cox /* 35fbd80bd0SAlan Cox * Physical memory system implementation 36fbd80bd0SAlan Cox * 37fbd80bd0SAlan Cox * Any external functions defined by this module are only to be used by the 38fbd80bd0SAlan Cox * virtual memory system. 39fbd80bd0SAlan Cox */ 40fbd80bd0SAlan Cox 4111752d88SAlan Cox #include <sys/cdefs.h> 4211752d88SAlan Cox __FBSDID("$FreeBSD$"); 4311752d88SAlan Cox 4411752d88SAlan Cox #include "opt_ddb.h" 45174b5f38SJohn Baldwin #include "opt_vm.h" 4611752d88SAlan Cox 4711752d88SAlan Cox #include <sys/param.h> 4811752d88SAlan Cox #include <sys/systm.h> 4911752d88SAlan Cox #include <sys/lock.h> 5011752d88SAlan Cox #include <sys/kernel.h> 5111752d88SAlan Cox #include <sys/malloc.h> 5211752d88SAlan Cox #include <sys/mutex.h> 537e226537SAttilio Rao #include <sys/proc.h> 5411752d88SAlan Cox #include <sys/queue.h> 5538d6b2dcSRoger Pau Monné #include <sys/rwlock.h> 5611752d88SAlan Cox #include <sys/sbuf.h> 5711752d88SAlan Cox #include <sys/sysctl.h> 5838d6b2dcSRoger Pau Monné #include <sys/tree.h> 5911752d88SAlan Cox #include <sys/vmmeter.h> 606520495aSAdrian Chadd #include <sys/seq.h> 6111752d88SAlan Cox 6211752d88SAlan Cox #include <ddb/ddb.h> 6311752d88SAlan Cox 6411752d88SAlan Cox #include <vm/vm.h> 6511752d88SAlan Cox #include <vm/vm_param.h> 6611752d88SAlan Cox #include <vm/vm_kern.h> 6711752d88SAlan Cox #include <vm/vm_object.h> 6811752d88SAlan Cox #include <vm/vm_page.h> 6911752d88SAlan Cox #include <vm/vm_phys.h> 70e2068d0bSJeff Roberson #include <vm/vm_pagequeue.h> 7111752d88SAlan Cox 72449c2e92SKonstantin Belousov _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX, 73449c2e92SKonstantin Belousov "Too many physsegs."); 7411752d88SAlan Cox 75b6715dabSJeff Roberson #ifdef NUMA 76cdfeced8SJeff Roberson struct mem_affinity __read_mostly *mem_affinity; 77cdfeced8SJeff Roberson int __read_mostly *mem_locality; 7862d70a81SJohn Baldwin #endif 79a3870a18SJohn Baldwin 80cdfeced8SJeff Roberson int __read_mostly vm_ndomains = 1; 817e226537SAttilio Rao 82cdfeced8SJeff Roberson struct vm_phys_seg __read_mostly vm_phys_segs[VM_PHYSSEG_MAX]; 83cdfeced8SJeff Roberson int __read_mostly vm_phys_nsegs; 8411752d88SAlan Cox 8538d6b2dcSRoger Pau Monné struct vm_phys_fictitious_seg; 8638d6b2dcSRoger Pau Monné static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *, 8738d6b2dcSRoger Pau Monné struct vm_phys_fictitious_seg *); 8838d6b2dcSRoger Pau Monné 8938d6b2dcSRoger Pau Monné RB_HEAD(fict_tree, vm_phys_fictitious_seg) vm_phys_fictitious_tree = 9038d6b2dcSRoger Pau Monné RB_INITIALIZER(_vm_phys_fictitious_tree); 9138d6b2dcSRoger Pau Monné 9238d6b2dcSRoger Pau Monné struct vm_phys_fictitious_seg { 9338d6b2dcSRoger Pau Monné RB_ENTRY(vm_phys_fictitious_seg) node; 9438d6b2dcSRoger Pau Monné /* Memory region data */ 95b6de32bdSKonstantin Belousov vm_paddr_t start; 96b6de32bdSKonstantin Belousov vm_paddr_t end; 97b6de32bdSKonstantin Belousov vm_page_t first_page; 9838d6b2dcSRoger Pau Monné }; 9938d6b2dcSRoger Pau Monné 10038d6b2dcSRoger Pau Monné RB_GENERATE_STATIC(fict_tree, vm_phys_fictitious_seg, node, 10138d6b2dcSRoger Pau Monné vm_phys_fictitious_cmp); 10238d6b2dcSRoger Pau Monné 103cdfeced8SJeff Roberson static struct rwlock_padalign vm_phys_fictitious_reg_lock; 104c0432fc3SMark Johnston MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages"); 105b6de32bdSKonstantin Belousov 106cdfeced8SJeff Roberson static struct vm_freelist __aligned(CACHE_LINE_SIZE) 1077e226537SAttilio Rao vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER]; 10811752d88SAlan Cox 109cdfeced8SJeff Roberson static int __read_mostly vm_nfreelists; 110d866a563SAlan Cox 111d866a563SAlan Cox /* 112d866a563SAlan Cox * Provides the mapping from VM_FREELIST_* to free list indices (flind). 113d866a563SAlan Cox */ 114cdfeced8SJeff Roberson static int __read_mostly vm_freelist_to_flind[VM_NFREELIST]; 115d866a563SAlan Cox 116d866a563SAlan Cox CTASSERT(VM_FREELIST_DEFAULT == 0); 117d866a563SAlan Cox 118d866a563SAlan Cox #ifdef VM_FREELIST_ISADMA 119d866a563SAlan Cox #define VM_ISADMA_BOUNDARY 16777216 120d866a563SAlan Cox #endif 121d866a563SAlan Cox #ifdef VM_FREELIST_DMA32 122d866a563SAlan Cox #define VM_DMA32_BOUNDARY ((vm_paddr_t)1 << 32) 123d866a563SAlan Cox #endif 124d866a563SAlan Cox 125d866a563SAlan Cox /* 126d866a563SAlan Cox * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about 127d866a563SAlan Cox * the ordering of the free list boundaries. 128d866a563SAlan Cox */ 129d866a563SAlan Cox #if defined(VM_ISADMA_BOUNDARY) && defined(VM_LOWMEM_BOUNDARY) 130d866a563SAlan Cox CTASSERT(VM_ISADMA_BOUNDARY < VM_LOWMEM_BOUNDARY); 131d866a563SAlan Cox #endif 132d866a563SAlan Cox #if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY) 133d866a563SAlan Cox CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY); 134d866a563SAlan Cox #endif 13511752d88SAlan Cox 13611752d88SAlan Cox static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS); 13711752d88SAlan Cox SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD, 13811752d88SAlan Cox NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info"); 13911752d88SAlan Cox 14011752d88SAlan Cox static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS); 14111752d88SAlan Cox SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD, 14211752d88SAlan Cox NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info"); 14311752d88SAlan Cox 144b6715dabSJeff Roberson #ifdef NUMA 145415d7ccaSAdrian Chadd static int sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS); 146415d7ccaSAdrian Chadd SYSCTL_OID(_vm, OID_AUTO, phys_locality, CTLTYPE_STRING | CTLFLAG_RD, 147415d7ccaSAdrian Chadd NULL, 0, sysctl_vm_phys_locality, "A", "Phys Locality Info"); 1486520495aSAdrian Chadd #endif 149415d7ccaSAdrian Chadd 1507e226537SAttilio Rao SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD, 1517e226537SAttilio Rao &vm_ndomains, 0, "Number of physical memory domains available."); 152a3870a18SJohn Baldwin 153c869e672SAlan Cox static vm_page_t vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, 154c869e672SAlan Cox u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 155c869e672SAlan Cox vm_paddr_t boundary); 156d866a563SAlan Cox static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain); 157d866a563SAlan Cox static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end); 15811752d88SAlan Cox static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, 159*370a338aSAlan Cox int order, int tail); 16011752d88SAlan Cox 16138d6b2dcSRoger Pau Monné /* 16238d6b2dcSRoger Pau Monné * Red-black tree helpers for vm fictitious range management. 16338d6b2dcSRoger Pau Monné */ 16438d6b2dcSRoger Pau Monné static inline int 16538d6b2dcSRoger Pau Monné vm_phys_fictitious_in_range(struct vm_phys_fictitious_seg *p, 16638d6b2dcSRoger Pau Monné struct vm_phys_fictitious_seg *range) 16738d6b2dcSRoger Pau Monné { 16838d6b2dcSRoger Pau Monné 16938d6b2dcSRoger Pau Monné KASSERT(range->start != 0 && range->end != 0, 17038d6b2dcSRoger Pau Monné ("Invalid range passed on search for vm_fictitious page")); 17138d6b2dcSRoger Pau Monné if (p->start >= range->end) 17238d6b2dcSRoger Pau Monné return (1); 17338d6b2dcSRoger Pau Monné if (p->start < range->start) 17438d6b2dcSRoger Pau Monné return (-1); 17538d6b2dcSRoger Pau Monné 17638d6b2dcSRoger Pau Monné return (0); 17738d6b2dcSRoger Pau Monné } 17838d6b2dcSRoger Pau Monné 17938d6b2dcSRoger Pau Monné static int 18038d6b2dcSRoger Pau Monné vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *p1, 18138d6b2dcSRoger Pau Monné struct vm_phys_fictitious_seg *p2) 18238d6b2dcSRoger Pau Monné { 18338d6b2dcSRoger Pau Monné 18438d6b2dcSRoger Pau Monné /* Check if this is a search for a page */ 18538d6b2dcSRoger Pau Monné if (p1->end == 0) 18638d6b2dcSRoger Pau Monné return (vm_phys_fictitious_in_range(p1, p2)); 18738d6b2dcSRoger Pau Monné 18838d6b2dcSRoger Pau Monné KASSERT(p2->end != 0, 18938d6b2dcSRoger Pau Monné ("Invalid range passed as second parameter to vm fictitious comparison")); 19038d6b2dcSRoger Pau Monné 19138d6b2dcSRoger Pau Monné /* Searching to add a new range */ 19238d6b2dcSRoger Pau Monné if (p1->end <= p2->start) 19338d6b2dcSRoger Pau Monné return (-1); 19438d6b2dcSRoger Pau Monné if (p1->start >= p2->end) 19538d6b2dcSRoger Pau Monné return (1); 19638d6b2dcSRoger Pau Monné 19738d6b2dcSRoger Pau Monné panic("Trying to add overlapping vm fictitious ranges:\n" 19838d6b2dcSRoger Pau Monné "[%#jx:%#jx] and [%#jx:%#jx]", (uintmax_t)p1->start, 19938d6b2dcSRoger Pau Monné (uintmax_t)p1->end, (uintmax_t)p2->start, (uintmax_t)p2->end); 20038d6b2dcSRoger Pau Monné } 20138d6b2dcSRoger Pau Monné 2026f4acaf4SJeff Roberson int 2036f4acaf4SJeff Roberson vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high) 204449c2e92SKonstantin Belousov { 205b6715dabSJeff Roberson #ifdef NUMA 2066f4acaf4SJeff Roberson domainset_t mask; 2076f4acaf4SJeff Roberson int i; 208449c2e92SKonstantin Belousov 2096f4acaf4SJeff Roberson if (vm_ndomains == 1 || mem_affinity == NULL) 2106f4acaf4SJeff Roberson return (0); 2116f4acaf4SJeff Roberson 2126f4acaf4SJeff Roberson DOMAINSET_ZERO(&mask); 2136f4acaf4SJeff Roberson /* 2146f4acaf4SJeff Roberson * Check for any memory that overlaps low, high. 2156f4acaf4SJeff Roberson */ 2166f4acaf4SJeff Roberson for (i = 0; mem_affinity[i].end != 0; i++) 2176f4acaf4SJeff Roberson if (mem_affinity[i].start <= high && 2186f4acaf4SJeff Roberson mem_affinity[i].end >= low) 2196f4acaf4SJeff Roberson DOMAINSET_SET(mem_affinity[i].domain, &mask); 2206f4acaf4SJeff Roberson if (prefer != -1 && DOMAINSET_ISSET(prefer, &mask)) 2216f4acaf4SJeff Roberson return (prefer); 2226f4acaf4SJeff Roberson if (DOMAINSET_EMPTY(&mask)) 2236f4acaf4SJeff Roberson panic("vm_phys_domain_match: Impossible constraint"); 2246f4acaf4SJeff Roberson return (DOMAINSET_FFS(&mask) - 1); 2256f4acaf4SJeff Roberson #else 2266f4acaf4SJeff Roberson return (0); 2276f4acaf4SJeff Roberson #endif 228449c2e92SKonstantin Belousov } 229449c2e92SKonstantin Belousov 23011752d88SAlan Cox /* 23111752d88SAlan Cox * Outputs the state of the physical memory allocator, specifically, 23211752d88SAlan Cox * the amount of physical memory in each free list. 23311752d88SAlan Cox */ 23411752d88SAlan Cox static int 23511752d88SAlan Cox sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS) 23611752d88SAlan Cox { 23711752d88SAlan Cox struct sbuf sbuf; 23811752d88SAlan Cox struct vm_freelist *fl; 2397e226537SAttilio Rao int dom, error, flind, oind, pind; 24011752d88SAlan Cox 24100f0e671SMatthew D Fleming error = sysctl_wire_old_buffer(req, 0); 24200f0e671SMatthew D Fleming if (error != 0) 24300f0e671SMatthew D Fleming return (error); 2447e226537SAttilio Rao sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req); 2457e226537SAttilio Rao for (dom = 0; dom < vm_ndomains; dom++) { 246eb2f42fbSAlan Cox sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom); 24711752d88SAlan Cox for (flind = 0; flind < vm_nfreelists; flind++) { 248eb2f42fbSAlan Cox sbuf_printf(&sbuf, "\nFREE LIST %d:\n" 24911752d88SAlan Cox "\n ORDER (SIZE) | NUMBER" 25011752d88SAlan Cox "\n ", flind); 25111752d88SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) 25211752d88SAlan Cox sbuf_printf(&sbuf, " | POOL %d", pind); 25311752d88SAlan Cox sbuf_printf(&sbuf, "\n-- "); 25411752d88SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) 25511752d88SAlan Cox sbuf_printf(&sbuf, "-- -- "); 25611752d88SAlan Cox sbuf_printf(&sbuf, "--\n"); 25711752d88SAlan Cox for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { 258d689bc00SAlan Cox sbuf_printf(&sbuf, " %2d (%6dK)", oind, 25911752d88SAlan Cox 1 << (PAGE_SHIFT - 10 + oind)); 26011752d88SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) { 2617e226537SAttilio Rao fl = vm_phys_free_queues[dom][flind][pind]; 262eb2f42fbSAlan Cox sbuf_printf(&sbuf, " | %6d", 2637e226537SAttilio Rao fl[oind].lcnt); 26411752d88SAlan Cox } 26511752d88SAlan Cox sbuf_printf(&sbuf, "\n"); 26611752d88SAlan Cox } 2677e226537SAttilio Rao } 26811752d88SAlan Cox } 2694e657159SMatthew D Fleming error = sbuf_finish(&sbuf); 27011752d88SAlan Cox sbuf_delete(&sbuf); 27111752d88SAlan Cox return (error); 27211752d88SAlan Cox } 27311752d88SAlan Cox 27411752d88SAlan Cox /* 27511752d88SAlan Cox * Outputs the set of physical memory segments. 27611752d88SAlan Cox */ 27711752d88SAlan Cox static int 27811752d88SAlan Cox sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS) 27911752d88SAlan Cox { 28011752d88SAlan Cox struct sbuf sbuf; 28111752d88SAlan Cox struct vm_phys_seg *seg; 28211752d88SAlan Cox int error, segind; 28311752d88SAlan Cox 28400f0e671SMatthew D Fleming error = sysctl_wire_old_buffer(req, 0); 28500f0e671SMatthew D Fleming if (error != 0) 28600f0e671SMatthew D Fleming return (error); 2874e657159SMatthew D Fleming sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 28811752d88SAlan Cox for (segind = 0; segind < vm_phys_nsegs; segind++) { 28911752d88SAlan Cox sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind); 29011752d88SAlan Cox seg = &vm_phys_segs[segind]; 29111752d88SAlan Cox sbuf_printf(&sbuf, "start: %#jx\n", 29211752d88SAlan Cox (uintmax_t)seg->start); 29311752d88SAlan Cox sbuf_printf(&sbuf, "end: %#jx\n", 29411752d88SAlan Cox (uintmax_t)seg->end); 295a3870a18SJohn Baldwin sbuf_printf(&sbuf, "domain: %d\n", seg->domain); 29611752d88SAlan Cox sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues); 29711752d88SAlan Cox } 2984e657159SMatthew D Fleming error = sbuf_finish(&sbuf); 29911752d88SAlan Cox sbuf_delete(&sbuf); 30011752d88SAlan Cox return (error); 30111752d88SAlan Cox } 30211752d88SAlan Cox 303415d7ccaSAdrian Chadd /* 304415d7ccaSAdrian Chadd * Return affinity, or -1 if there's no affinity information. 305415d7ccaSAdrian Chadd */ 3066520495aSAdrian Chadd int 307415d7ccaSAdrian Chadd vm_phys_mem_affinity(int f, int t) 308415d7ccaSAdrian Chadd { 309415d7ccaSAdrian Chadd 310b6715dabSJeff Roberson #ifdef NUMA 311415d7ccaSAdrian Chadd if (mem_locality == NULL) 312415d7ccaSAdrian Chadd return (-1); 313415d7ccaSAdrian Chadd if (f >= vm_ndomains || t >= vm_ndomains) 314415d7ccaSAdrian Chadd return (-1); 315415d7ccaSAdrian Chadd return (mem_locality[f * vm_ndomains + t]); 3166520495aSAdrian Chadd #else 3176520495aSAdrian Chadd return (-1); 3186520495aSAdrian Chadd #endif 319415d7ccaSAdrian Chadd } 320415d7ccaSAdrian Chadd 321b6715dabSJeff Roberson #ifdef NUMA 322415d7ccaSAdrian Chadd /* 323415d7ccaSAdrian Chadd * Outputs the VM locality table. 324415d7ccaSAdrian Chadd */ 325415d7ccaSAdrian Chadd static int 326415d7ccaSAdrian Chadd sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS) 327415d7ccaSAdrian Chadd { 328415d7ccaSAdrian Chadd struct sbuf sbuf; 329415d7ccaSAdrian Chadd int error, i, j; 330415d7ccaSAdrian Chadd 331415d7ccaSAdrian Chadd error = sysctl_wire_old_buffer(req, 0); 332415d7ccaSAdrian Chadd if (error != 0) 333415d7ccaSAdrian Chadd return (error); 334415d7ccaSAdrian Chadd sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 335415d7ccaSAdrian Chadd 336415d7ccaSAdrian Chadd sbuf_printf(&sbuf, "\n"); 337415d7ccaSAdrian Chadd 338415d7ccaSAdrian Chadd for (i = 0; i < vm_ndomains; i++) { 339415d7ccaSAdrian Chadd sbuf_printf(&sbuf, "%d: ", i); 340415d7ccaSAdrian Chadd for (j = 0; j < vm_ndomains; j++) { 341415d7ccaSAdrian Chadd sbuf_printf(&sbuf, "%d ", vm_phys_mem_affinity(i, j)); 342415d7ccaSAdrian Chadd } 343415d7ccaSAdrian Chadd sbuf_printf(&sbuf, "\n"); 344415d7ccaSAdrian Chadd } 345415d7ccaSAdrian Chadd error = sbuf_finish(&sbuf); 346415d7ccaSAdrian Chadd sbuf_delete(&sbuf); 347415d7ccaSAdrian Chadd return (error); 348415d7ccaSAdrian Chadd } 3496520495aSAdrian Chadd #endif 350415d7ccaSAdrian Chadd 3517e226537SAttilio Rao static void 3527e226537SAttilio Rao vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail) 353a3870a18SJohn Baldwin { 354a3870a18SJohn Baldwin 3557e226537SAttilio Rao m->order = order; 3567e226537SAttilio Rao if (tail) 3575cd29d0fSMark Johnston TAILQ_INSERT_TAIL(&fl[order].pl, m, listq); 3587e226537SAttilio Rao else 3595cd29d0fSMark Johnston TAILQ_INSERT_HEAD(&fl[order].pl, m, listq); 3607e226537SAttilio Rao fl[order].lcnt++; 361a3870a18SJohn Baldwin } 3627e226537SAttilio Rao 3637e226537SAttilio Rao static void 3647e226537SAttilio Rao vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order) 3657e226537SAttilio Rao { 3667e226537SAttilio Rao 3675cd29d0fSMark Johnston TAILQ_REMOVE(&fl[order].pl, m, listq); 3687e226537SAttilio Rao fl[order].lcnt--; 3697e226537SAttilio Rao m->order = VM_NFREEORDER; 370a3870a18SJohn Baldwin } 371a3870a18SJohn Baldwin 37211752d88SAlan Cox /* 37311752d88SAlan Cox * Create a physical memory segment. 37411752d88SAlan Cox */ 37511752d88SAlan Cox static void 376d866a563SAlan Cox _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain) 37711752d88SAlan Cox { 37811752d88SAlan Cox struct vm_phys_seg *seg; 37911752d88SAlan Cox 38011752d88SAlan Cox KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX, 38111752d88SAlan Cox ("vm_phys_create_seg: increase VM_PHYSSEG_MAX")); 382ef435ae7SJeff Roberson KASSERT(domain >= 0 && domain < vm_ndomains, 3837e226537SAttilio Rao ("vm_phys_create_seg: invalid domain provided")); 38411752d88SAlan Cox seg = &vm_phys_segs[vm_phys_nsegs++]; 385271f0f12SAlan Cox while (seg > vm_phys_segs && (seg - 1)->start >= end) { 386271f0f12SAlan Cox *seg = *(seg - 1); 387271f0f12SAlan Cox seg--; 388271f0f12SAlan Cox } 38911752d88SAlan Cox seg->start = start; 39011752d88SAlan Cox seg->end = end; 391a3870a18SJohn Baldwin seg->domain = domain; 39211752d88SAlan Cox } 39311752d88SAlan Cox 394a3870a18SJohn Baldwin static void 395d866a563SAlan Cox vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end) 396a3870a18SJohn Baldwin { 397b6715dabSJeff Roberson #ifdef NUMA 398a3870a18SJohn Baldwin int i; 399a3870a18SJohn Baldwin 400a3870a18SJohn Baldwin if (mem_affinity == NULL) { 401d866a563SAlan Cox _vm_phys_create_seg(start, end, 0); 402a3870a18SJohn Baldwin return; 403a3870a18SJohn Baldwin } 404a3870a18SJohn Baldwin 405a3870a18SJohn Baldwin for (i = 0;; i++) { 406a3870a18SJohn Baldwin if (mem_affinity[i].end == 0) 407a3870a18SJohn Baldwin panic("Reached end of affinity info"); 408a3870a18SJohn Baldwin if (mem_affinity[i].end <= start) 409a3870a18SJohn Baldwin continue; 410a3870a18SJohn Baldwin if (mem_affinity[i].start > start) 411a3870a18SJohn Baldwin panic("No affinity info for start %jx", 412a3870a18SJohn Baldwin (uintmax_t)start); 413a3870a18SJohn Baldwin if (mem_affinity[i].end >= end) { 414d866a563SAlan Cox _vm_phys_create_seg(start, end, 415a3870a18SJohn Baldwin mem_affinity[i].domain); 416a3870a18SJohn Baldwin break; 417a3870a18SJohn Baldwin } 418d866a563SAlan Cox _vm_phys_create_seg(start, mem_affinity[i].end, 419a3870a18SJohn Baldwin mem_affinity[i].domain); 420a3870a18SJohn Baldwin start = mem_affinity[i].end; 421a3870a18SJohn Baldwin } 42262d70a81SJohn Baldwin #else 42362d70a81SJohn Baldwin _vm_phys_create_seg(start, end, 0); 42462d70a81SJohn Baldwin #endif 425a3870a18SJohn Baldwin } 426a3870a18SJohn Baldwin 42711752d88SAlan Cox /* 428271f0f12SAlan Cox * Add a physical memory segment. 429271f0f12SAlan Cox */ 430271f0f12SAlan Cox void 431271f0f12SAlan Cox vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end) 432271f0f12SAlan Cox { 433d866a563SAlan Cox vm_paddr_t paddr; 434271f0f12SAlan Cox 435271f0f12SAlan Cox KASSERT((start & PAGE_MASK) == 0, 436271f0f12SAlan Cox ("vm_phys_define_seg: start is not page aligned")); 437271f0f12SAlan Cox KASSERT((end & PAGE_MASK) == 0, 438271f0f12SAlan Cox ("vm_phys_define_seg: end is not page aligned")); 439d866a563SAlan Cox 440d866a563SAlan Cox /* 441d866a563SAlan Cox * Split the physical memory segment if it spans two or more free 442d866a563SAlan Cox * list boundaries. 443d866a563SAlan Cox */ 444d866a563SAlan Cox paddr = start; 445271f0f12SAlan Cox #ifdef VM_FREELIST_ISADMA 446d866a563SAlan Cox if (paddr < VM_ISADMA_BOUNDARY && end > VM_ISADMA_BOUNDARY) { 447d866a563SAlan Cox vm_phys_create_seg(paddr, VM_ISADMA_BOUNDARY); 448d866a563SAlan Cox paddr = VM_ISADMA_BOUNDARY; 449d866a563SAlan Cox } 450271f0f12SAlan Cox #endif 451d866a563SAlan Cox #ifdef VM_FREELIST_LOWMEM 452d866a563SAlan Cox if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) { 453d866a563SAlan Cox vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY); 454d866a563SAlan Cox paddr = VM_LOWMEM_BOUNDARY; 455d866a563SAlan Cox } 456271f0f12SAlan Cox #endif 457d866a563SAlan Cox #ifdef VM_FREELIST_DMA32 458d866a563SAlan Cox if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) { 459d866a563SAlan Cox vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY); 460d866a563SAlan Cox paddr = VM_DMA32_BOUNDARY; 461d866a563SAlan Cox } 462d866a563SAlan Cox #endif 463d866a563SAlan Cox vm_phys_create_seg(paddr, end); 464271f0f12SAlan Cox } 465271f0f12SAlan Cox 466271f0f12SAlan Cox /* 46711752d88SAlan Cox * Initialize the physical memory allocator. 468d866a563SAlan Cox * 469d866a563SAlan Cox * Requires that vm_page_array is initialized! 47011752d88SAlan Cox */ 47111752d88SAlan Cox void 47211752d88SAlan Cox vm_phys_init(void) 47311752d88SAlan Cox { 47411752d88SAlan Cox struct vm_freelist *fl; 475271f0f12SAlan Cox struct vm_phys_seg *seg; 476d866a563SAlan Cox u_long npages; 477d866a563SAlan Cox int dom, flind, freelist, oind, pind, segind; 47811752d88SAlan Cox 479d866a563SAlan Cox /* 480d866a563SAlan Cox * Compute the number of free lists, and generate the mapping from the 481d866a563SAlan Cox * manifest constants VM_FREELIST_* to the free list indices. 482d866a563SAlan Cox * 483d866a563SAlan Cox * Initially, the entries of vm_freelist_to_flind[] are set to either 484d866a563SAlan Cox * 0 or 1 to indicate which free lists should be created. 485d866a563SAlan Cox */ 486d866a563SAlan Cox npages = 0; 487d866a563SAlan Cox for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) { 488d866a563SAlan Cox seg = &vm_phys_segs[segind]; 489d866a563SAlan Cox #ifdef VM_FREELIST_ISADMA 490d866a563SAlan Cox if (seg->end <= VM_ISADMA_BOUNDARY) 491d866a563SAlan Cox vm_freelist_to_flind[VM_FREELIST_ISADMA] = 1; 492d866a563SAlan Cox else 493d866a563SAlan Cox #endif 494d866a563SAlan Cox #ifdef VM_FREELIST_LOWMEM 495d866a563SAlan Cox if (seg->end <= VM_LOWMEM_BOUNDARY) 496d866a563SAlan Cox vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1; 497d866a563SAlan Cox else 498d866a563SAlan Cox #endif 499d866a563SAlan Cox #ifdef VM_FREELIST_DMA32 500d866a563SAlan Cox if ( 501d866a563SAlan Cox #ifdef VM_DMA32_NPAGES_THRESHOLD 502d866a563SAlan Cox /* 503d866a563SAlan Cox * Create the DMA32 free list only if the amount of 504d866a563SAlan Cox * physical memory above physical address 4G exceeds the 505d866a563SAlan Cox * given threshold. 506d866a563SAlan Cox */ 507d866a563SAlan Cox npages > VM_DMA32_NPAGES_THRESHOLD && 508d866a563SAlan Cox #endif 509d866a563SAlan Cox seg->end <= VM_DMA32_BOUNDARY) 510d866a563SAlan Cox vm_freelist_to_flind[VM_FREELIST_DMA32] = 1; 511d866a563SAlan Cox else 512d866a563SAlan Cox #endif 513d866a563SAlan Cox { 514d866a563SAlan Cox npages += atop(seg->end - seg->start); 515d866a563SAlan Cox vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1; 516d866a563SAlan Cox } 517d866a563SAlan Cox } 518d866a563SAlan Cox /* Change each entry into a running total of the free lists. */ 519d866a563SAlan Cox for (freelist = 1; freelist < VM_NFREELIST; freelist++) { 520d866a563SAlan Cox vm_freelist_to_flind[freelist] += 521d866a563SAlan Cox vm_freelist_to_flind[freelist - 1]; 522d866a563SAlan Cox } 523d866a563SAlan Cox vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1]; 524d866a563SAlan Cox KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists")); 525d866a563SAlan Cox /* Change each entry into a free list index. */ 526d866a563SAlan Cox for (freelist = 0; freelist < VM_NFREELIST; freelist++) 527d866a563SAlan Cox vm_freelist_to_flind[freelist]--; 528d866a563SAlan Cox 529d866a563SAlan Cox /* 530d866a563SAlan Cox * Initialize the first_page and free_queues fields of each physical 531d866a563SAlan Cox * memory segment. 532d866a563SAlan Cox */ 533271f0f12SAlan Cox #ifdef VM_PHYSSEG_SPARSE 534d866a563SAlan Cox npages = 0; 53511752d88SAlan Cox #endif 536271f0f12SAlan Cox for (segind = 0; segind < vm_phys_nsegs; segind++) { 537271f0f12SAlan Cox seg = &vm_phys_segs[segind]; 538271f0f12SAlan Cox #ifdef VM_PHYSSEG_SPARSE 539d866a563SAlan Cox seg->first_page = &vm_page_array[npages]; 540d866a563SAlan Cox npages += atop(seg->end - seg->start); 541271f0f12SAlan Cox #else 542271f0f12SAlan Cox seg->first_page = PHYS_TO_VM_PAGE(seg->start); 54311752d88SAlan Cox #endif 544d866a563SAlan Cox #ifdef VM_FREELIST_ISADMA 545d866a563SAlan Cox if (seg->end <= VM_ISADMA_BOUNDARY) { 546d866a563SAlan Cox flind = vm_freelist_to_flind[VM_FREELIST_ISADMA]; 547d866a563SAlan Cox KASSERT(flind >= 0, 548d866a563SAlan Cox ("vm_phys_init: ISADMA flind < 0")); 549d866a563SAlan Cox } else 550d866a563SAlan Cox #endif 551d866a563SAlan Cox #ifdef VM_FREELIST_LOWMEM 552d866a563SAlan Cox if (seg->end <= VM_LOWMEM_BOUNDARY) { 553d866a563SAlan Cox flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM]; 554d866a563SAlan Cox KASSERT(flind >= 0, 555d866a563SAlan Cox ("vm_phys_init: LOWMEM flind < 0")); 556d866a563SAlan Cox } else 557d866a563SAlan Cox #endif 558d866a563SAlan Cox #ifdef VM_FREELIST_DMA32 559d866a563SAlan Cox if (seg->end <= VM_DMA32_BOUNDARY) { 560d866a563SAlan Cox flind = vm_freelist_to_flind[VM_FREELIST_DMA32]; 561d866a563SAlan Cox KASSERT(flind >= 0, 562d866a563SAlan Cox ("vm_phys_init: DMA32 flind < 0")); 563d866a563SAlan Cox } else 564d866a563SAlan Cox #endif 565d866a563SAlan Cox { 566d866a563SAlan Cox flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT]; 567d866a563SAlan Cox KASSERT(flind >= 0, 568d866a563SAlan Cox ("vm_phys_init: DEFAULT flind < 0")); 56911752d88SAlan Cox } 570d866a563SAlan Cox seg->free_queues = &vm_phys_free_queues[seg->domain][flind]; 571d866a563SAlan Cox } 572d866a563SAlan Cox 573d866a563SAlan Cox /* 574d866a563SAlan Cox * Initialize the free queues. 575d866a563SAlan Cox */ 5767e226537SAttilio Rao for (dom = 0; dom < vm_ndomains; dom++) { 57711752d88SAlan Cox for (flind = 0; flind < vm_nfreelists; flind++) { 57811752d88SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) { 5797e226537SAttilio Rao fl = vm_phys_free_queues[dom][flind][pind]; 58011752d88SAlan Cox for (oind = 0; oind < VM_NFREEORDER; oind++) 58111752d88SAlan Cox TAILQ_INIT(&fl[oind].pl); 58211752d88SAlan Cox } 58311752d88SAlan Cox } 584a3870a18SJohn Baldwin } 585d866a563SAlan Cox 58638d6b2dcSRoger Pau Monné rw_init(&vm_phys_fictitious_reg_lock, "vmfctr"); 58711752d88SAlan Cox } 58811752d88SAlan Cox 58911752d88SAlan Cox /* 59011752d88SAlan Cox * Split a contiguous, power of two-sized set of physical pages. 591*370a338aSAlan Cox * 592*370a338aSAlan Cox * When this function is called by a page allocation function, the caller 593*370a338aSAlan Cox * should request insertion at the head unless the order [order, oind) queues 594*370a338aSAlan Cox * are known to be empty. The objective being to reduce the likelihood of 595*370a338aSAlan Cox * long-term fragmentation by promoting contemporaneous allocation and 596*370a338aSAlan Cox * (hopefully) deallocation. 59711752d88SAlan Cox */ 59811752d88SAlan Cox static __inline void 599*370a338aSAlan Cox vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order, 600*370a338aSAlan Cox int tail) 60111752d88SAlan Cox { 60211752d88SAlan Cox vm_page_t m_buddy; 60311752d88SAlan Cox 60411752d88SAlan Cox while (oind > order) { 60511752d88SAlan Cox oind--; 60611752d88SAlan Cox m_buddy = &m[1 << oind]; 60711752d88SAlan Cox KASSERT(m_buddy->order == VM_NFREEORDER, 60811752d88SAlan Cox ("vm_phys_split_pages: page %p has unexpected order %d", 60911752d88SAlan Cox m_buddy, m_buddy->order)); 610*370a338aSAlan Cox vm_freelist_add(fl, m_buddy, oind, tail); 61111752d88SAlan Cox } 61211752d88SAlan Cox } 61311752d88SAlan Cox 61411752d88SAlan Cox /* 6157493904eSAlan Cox * Add the physical pages [m, m + npages) at the end of a power-of-two aligned 6167493904eSAlan Cox * and sized set to the specified free list. 6177493904eSAlan Cox * 6187493904eSAlan Cox * When this function is called by a page allocation function, the caller 6197493904eSAlan Cox * should request insertion at the head unless the lower-order queues are 6207493904eSAlan Cox * known to be empty. The objective being to reduce the likelihood of long- 6217493904eSAlan Cox * term fragmentation by promoting contemporaneous allocation and (hopefully) 6227493904eSAlan Cox * deallocation. 6237493904eSAlan Cox * 6247493904eSAlan Cox * The physical page m's buddy must not be free. 6257493904eSAlan Cox */ 6267493904eSAlan Cox static void 6277493904eSAlan Cox vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail) 6287493904eSAlan Cox { 6297493904eSAlan Cox u_int n; 6307493904eSAlan Cox int order; 6317493904eSAlan Cox 6327493904eSAlan Cox KASSERT(npages > 0, ("vm_phys_enq_range: npages is 0")); 6337493904eSAlan Cox KASSERT(((VM_PAGE_TO_PHYS(m) + npages * PAGE_SIZE) & 6347493904eSAlan Cox ((PAGE_SIZE << (fls(npages) - 1)) - 1)) == 0, 6357493904eSAlan Cox ("vm_phys_enq_range: page %p and npages %u are misaligned", 6367493904eSAlan Cox m, npages)); 6377493904eSAlan Cox do { 6387493904eSAlan Cox KASSERT(m->order == VM_NFREEORDER, 6397493904eSAlan Cox ("vm_phys_enq_range: page %p has unexpected order %d", 6407493904eSAlan Cox m, m->order)); 6417493904eSAlan Cox order = ffs(npages) - 1; 6427493904eSAlan Cox KASSERT(order < VM_NFREEORDER, 6437493904eSAlan Cox ("vm_phys_enq_range: order %d is out of range", order)); 6447493904eSAlan Cox vm_freelist_add(fl, m, order, tail); 6457493904eSAlan Cox n = 1 << order; 6467493904eSAlan Cox m += n; 6477493904eSAlan Cox npages -= n; 6487493904eSAlan Cox } while (npages > 0); 6497493904eSAlan Cox } 6507493904eSAlan Cox 6517493904eSAlan Cox /* 65289ea39a7SAlan Cox * Tries to allocate the specified number of pages from the specified pool 65389ea39a7SAlan Cox * within the specified domain. Returns the actual number of allocated pages 65489ea39a7SAlan Cox * and a pointer to each page through the array ma[]. 65589ea39a7SAlan Cox * 65632d81f21SAlan Cox * The returned pages may not be physically contiguous. However, in contrast 65732d81f21SAlan Cox * to performing multiple, back-to-back calls to vm_phys_alloc_pages(..., 0), 65832d81f21SAlan Cox * calling this function once to allocate the desired number of pages will 65932d81f21SAlan Cox * avoid wasted time in vm_phys_split_pages(). 66089ea39a7SAlan Cox * 66189ea39a7SAlan Cox * The free page queues for the specified domain must be locked. 66289ea39a7SAlan Cox */ 66389ea39a7SAlan Cox int 66489ea39a7SAlan Cox vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[]) 66589ea39a7SAlan Cox { 66689ea39a7SAlan Cox struct vm_freelist *alt, *fl; 66789ea39a7SAlan Cox vm_page_t m; 66889ea39a7SAlan Cox int avail, end, flind, freelist, i, need, oind, pind; 66989ea39a7SAlan Cox 67089ea39a7SAlan Cox KASSERT(domain >= 0 && domain < vm_ndomains, 67189ea39a7SAlan Cox ("vm_phys_alloc_npages: domain %d is out of range", domain)); 67289ea39a7SAlan Cox KASSERT(pool < VM_NFREEPOOL, 67389ea39a7SAlan Cox ("vm_phys_alloc_npages: pool %d is out of range", pool)); 67489ea39a7SAlan Cox KASSERT(npages <= 1 << (VM_NFREEORDER - 1), 67589ea39a7SAlan Cox ("vm_phys_alloc_npages: npages %d is out of range", npages)); 67689ea39a7SAlan Cox vm_domain_free_assert_locked(VM_DOMAIN(domain)); 67789ea39a7SAlan Cox i = 0; 67889ea39a7SAlan Cox for (freelist = 0; freelist < VM_NFREELIST; freelist++) { 67989ea39a7SAlan Cox flind = vm_freelist_to_flind[freelist]; 68089ea39a7SAlan Cox if (flind < 0) 68189ea39a7SAlan Cox continue; 68289ea39a7SAlan Cox fl = vm_phys_free_queues[domain][flind][pool]; 68389ea39a7SAlan Cox for (oind = 0; oind < VM_NFREEORDER; oind++) { 68489ea39a7SAlan Cox while ((m = TAILQ_FIRST(&fl[oind].pl)) != NULL) { 68589ea39a7SAlan Cox vm_freelist_rem(fl, m, oind); 68689ea39a7SAlan Cox avail = 1 << oind; 68789ea39a7SAlan Cox need = imin(npages - i, avail); 68889ea39a7SAlan Cox for (end = i + need; i < end;) 68989ea39a7SAlan Cox ma[i++] = m++; 69089ea39a7SAlan Cox if (need < avail) { 6917493904eSAlan Cox /* 6927493904eSAlan Cox * Return excess pages to fl. Its 6937493904eSAlan Cox * order [0, oind) queues are empty. 6947493904eSAlan Cox */ 6957493904eSAlan Cox vm_phys_enq_range(m, avail - need, fl, 6967493904eSAlan Cox 1); 69789ea39a7SAlan Cox return (npages); 69889ea39a7SAlan Cox } else if (i == npages) 69989ea39a7SAlan Cox return (npages); 70089ea39a7SAlan Cox } 70189ea39a7SAlan Cox } 70289ea39a7SAlan Cox for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { 70389ea39a7SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) { 70489ea39a7SAlan Cox alt = vm_phys_free_queues[domain][flind][pind]; 70589ea39a7SAlan Cox while ((m = TAILQ_FIRST(&alt[oind].pl)) != 70689ea39a7SAlan Cox NULL) { 70789ea39a7SAlan Cox vm_freelist_rem(alt, m, oind); 70889ea39a7SAlan Cox vm_phys_set_pool(pool, m, oind); 70989ea39a7SAlan Cox avail = 1 << oind; 71089ea39a7SAlan Cox need = imin(npages - i, avail); 71189ea39a7SAlan Cox for (end = i + need; i < end;) 71289ea39a7SAlan Cox ma[i++] = m++; 71389ea39a7SAlan Cox if (need < avail) { 7147493904eSAlan Cox /* 7157493904eSAlan Cox * Return excess pages to fl. 7167493904eSAlan Cox * Its order [0, oind) queues 7177493904eSAlan Cox * are empty. 7187493904eSAlan Cox */ 7197493904eSAlan Cox vm_phys_enq_range(m, avail - 7207493904eSAlan Cox need, fl, 1); 72189ea39a7SAlan Cox return (npages); 72289ea39a7SAlan Cox } else if (i == npages) 72389ea39a7SAlan Cox return (npages); 72489ea39a7SAlan Cox } 72589ea39a7SAlan Cox } 72689ea39a7SAlan Cox } 72789ea39a7SAlan Cox } 72889ea39a7SAlan Cox return (i); 72989ea39a7SAlan Cox } 73089ea39a7SAlan Cox 73189ea39a7SAlan Cox /* 73211752d88SAlan Cox * Allocate a contiguous, power of two-sized set of physical pages 73311752d88SAlan Cox * from the free lists. 7348941dc44SAlan Cox * 7358941dc44SAlan Cox * The free page queues must be locked. 73611752d88SAlan Cox */ 73711752d88SAlan Cox vm_page_t 738ef435ae7SJeff Roberson vm_phys_alloc_pages(int domain, int pool, int order) 73911752d88SAlan Cox { 74049ca10d4SJayachandran C. vm_page_t m; 7410db2102aSMichael Zhilin int freelist; 74249ca10d4SJayachandran C. 7430db2102aSMichael Zhilin for (freelist = 0; freelist < VM_NFREELIST; freelist++) { 7440db2102aSMichael Zhilin m = vm_phys_alloc_freelist_pages(domain, freelist, pool, order); 74549ca10d4SJayachandran C. if (m != NULL) 74649ca10d4SJayachandran C. return (m); 74749ca10d4SJayachandran C. } 74849ca10d4SJayachandran C. return (NULL); 74949ca10d4SJayachandran C. } 75049ca10d4SJayachandran C. 75149ca10d4SJayachandran C. /* 752d866a563SAlan Cox * Allocate a contiguous, power of two-sized set of physical pages from the 753d866a563SAlan Cox * specified free list. The free list must be specified using one of the 754d866a563SAlan Cox * manifest constants VM_FREELIST_*. 755d866a563SAlan Cox * 756d866a563SAlan Cox * The free page queues must be locked. 75749ca10d4SJayachandran C. */ 75849ca10d4SJayachandran C. vm_page_t 7590db2102aSMichael Zhilin vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order) 76049ca10d4SJayachandran C. { 761ef435ae7SJeff Roberson struct vm_freelist *alt, *fl; 76211752d88SAlan Cox vm_page_t m; 7630db2102aSMichael Zhilin int oind, pind, flind; 76411752d88SAlan Cox 765ef435ae7SJeff Roberson KASSERT(domain >= 0 && domain < vm_ndomains, 766ef435ae7SJeff Roberson ("vm_phys_alloc_freelist_pages: domain %d is out of range", 767ef435ae7SJeff Roberson domain)); 7680db2102aSMichael Zhilin KASSERT(freelist < VM_NFREELIST, 769d866a563SAlan Cox ("vm_phys_alloc_freelist_pages: freelist %d is out of range", 7705be93778SAndrew Turner freelist)); 77111752d88SAlan Cox KASSERT(pool < VM_NFREEPOOL, 77249ca10d4SJayachandran C. ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool)); 77311752d88SAlan Cox KASSERT(order < VM_NFREEORDER, 77449ca10d4SJayachandran C. ("vm_phys_alloc_freelist_pages: order %d is out of range", order)); 7756520495aSAdrian Chadd 7760db2102aSMichael Zhilin flind = vm_freelist_to_flind[freelist]; 7770db2102aSMichael Zhilin /* Check if freelist is present */ 7780db2102aSMichael Zhilin if (flind < 0) 7790db2102aSMichael Zhilin return (NULL); 7800db2102aSMichael Zhilin 781e2068d0bSJeff Roberson vm_domain_free_assert_locked(VM_DOMAIN(domain)); 7827e226537SAttilio Rao fl = &vm_phys_free_queues[domain][flind][pool][0]; 78311752d88SAlan Cox for (oind = order; oind < VM_NFREEORDER; oind++) { 78411752d88SAlan Cox m = TAILQ_FIRST(&fl[oind].pl); 78511752d88SAlan Cox if (m != NULL) { 7867e226537SAttilio Rao vm_freelist_rem(fl, m, oind); 787*370a338aSAlan Cox /* The order [order, oind) queues are empty. */ 788*370a338aSAlan Cox vm_phys_split_pages(m, oind, fl, order, 1); 78911752d88SAlan Cox return (m); 79011752d88SAlan Cox } 79111752d88SAlan Cox } 79211752d88SAlan Cox 79311752d88SAlan Cox /* 79411752d88SAlan Cox * The given pool was empty. Find the largest 79511752d88SAlan Cox * contiguous, power-of-two-sized set of pages in any 79611752d88SAlan Cox * pool. Transfer these pages to the given pool, and 79711752d88SAlan Cox * use them to satisfy the allocation. 79811752d88SAlan Cox */ 79911752d88SAlan Cox for (oind = VM_NFREEORDER - 1; oind >= order; oind--) { 80011752d88SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) { 8017e226537SAttilio Rao alt = &vm_phys_free_queues[domain][flind][pind][0]; 80211752d88SAlan Cox m = TAILQ_FIRST(&alt[oind].pl); 80311752d88SAlan Cox if (m != NULL) { 8047e226537SAttilio Rao vm_freelist_rem(alt, m, oind); 80511752d88SAlan Cox vm_phys_set_pool(pool, m, oind); 806*370a338aSAlan Cox /* The order [order, oind) queues are empty. */ 807*370a338aSAlan Cox vm_phys_split_pages(m, oind, fl, order, 1); 80811752d88SAlan Cox return (m); 80911752d88SAlan Cox } 81011752d88SAlan Cox } 81111752d88SAlan Cox } 81211752d88SAlan Cox return (NULL); 81311752d88SAlan Cox } 81411752d88SAlan Cox 81511752d88SAlan Cox /* 81611752d88SAlan Cox * Find the vm_page corresponding to the given physical address. 81711752d88SAlan Cox */ 81811752d88SAlan Cox vm_page_t 81911752d88SAlan Cox vm_phys_paddr_to_vm_page(vm_paddr_t pa) 82011752d88SAlan Cox { 82111752d88SAlan Cox struct vm_phys_seg *seg; 82211752d88SAlan Cox int segind; 82311752d88SAlan Cox 82411752d88SAlan Cox for (segind = 0; segind < vm_phys_nsegs; segind++) { 82511752d88SAlan Cox seg = &vm_phys_segs[segind]; 82611752d88SAlan Cox if (pa >= seg->start && pa < seg->end) 82711752d88SAlan Cox return (&seg->first_page[atop(pa - seg->start)]); 82811752d88SAlan Cox } 829f06a3a36SAndrew Thompson return (NULL); 83011752d88SAlan Cox } 83111752d88SAlan Cox 832b6de32bdSKonstantin Belousov vm_page_t 833b6de32bdSKonstantin Belousov vm_phys_fictitious_to_vm_page(vm_paddr_t pa) 834b6de32bdSKonstantin Belousov { 83538d6b2dcSRoger Pau Monné struct vm_phys_fictitious_seg tmp, *seg; 836b6de32bdSKonstantin Belousov vm_page_t m; 837b6de32bdSKonstantin Belousov 838b6de32bdSKonstantin Belousov m = NULL; 83938d6b2dcSRoger Pau Monné tmp.start = pa; 84038d6b2dcSRoger Pau Monné tmp.end = 0; 84138d6b2dcSRoger Pau Monné 84238d6b2dcSRoger Pau Monné rw_rlock(&vm_phys_fictitious_reg_lock); 84338d6b2dcSRoger Pau Monné seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp); 84438d6b2dcSRoger Pau Monné rw_runlock(&vm_phys_fictitious_reg_lock); 84538d6b2dcSRoger Pau Monné if (seg == NULL) 84638d6b2dcSRoger Pau Monné return (NULL); 84738d6b2dcSRoger Pau Monné 848b6de32bdSKonstantin Belousov m = &seg->first_page[atop(pa - seg->start)]; 84938d6b2dcSRoger Pau Monné KASSERT((m->flags & PG_FICTITIOUS) != 0, ("%p not fictitious", m)); 85038d6b2dcSRoger Pau Monné 851b6de32bdSKonstantin Belousov return (m); 852b6de32bdSKonstantin Belousov } 853b6de32bdSKonstantin Belousov 8545ebe728dSRoger Pau Monné static inline void 8555ebe728dSRoger Pau Monné vm_phys_fictitious_init_range(vm_page_t range, vm_paddr_t start, 8565ebe728dSRoger Pau Monné long page_count, vm_memattr_t memattr) 8575ebe728dSRoger Pau Monné { 8585ebe728dSRoger Pau Monné long i; 8595ebe728dSRoger Pau Monné 860f93f7cf1SMark Johnston bzero(range, page_count * sizeof(*range)); 8615ebe728dSRoger Pau Monné for (i = 0; i < page_count; i++) { 8625ebe728dSRoger Pau Monné vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr); 8635ebe728dSRoger Pau Monné range[i].oflags &= ~VPO_UNMANAGED; 8645ebe728dSRoger Pau Monné range[i].busy_lock = VPB_UNBUSIED; 8655ebe728dSRoger Pau Monné } 8665ebe728dSRoger Pau Monné } 8675ebe728dSRoger Pau Monné 868b6de32bdSKonstantin Belousov int 869b6de32bdSKonstantin Belousov vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end, 870b6de32bdSKonstantin Belousov vm_memattr_t memattr) 871b6de32bdSKonstantin Belousov { 872b6de32bdSKonstantin Belousov struct vm_phys_fictitious_seg *seg; 873b6de32bdSKonstantin Belousov vm_page_t fp; 8745ebe728dSRoger Pau Monné long page_count; 875b6de32bdSKonstantin Belousov #ifdef VM_PHYSSEG_DENSE 8765ebe728dSRoger Pau Monné long pi, pe; 8775ebe728dSRoger Pau Monné long dpage_count; 878b6de32bdSKonstantin Belousov #endif 879b6de32bdSKonstantin Belousov 8805ebe728dSRoger Pau Monné KASSERT(start < end, 8815ebe728dSRoger Pau Monné ("Start of segment isn't less than end (start: %jx end: %jx)", 8825ebe728dSRoger Pau Monné (uintmax_t)start, (uintmax_t)end)); 8835ebe728dSRoger Pau Monné 884b6de32bdSKonstantin Belousov page_count = (end - start) / PAGE_SIZE; 885b6de32bdSKonstantin Belousov 886b6de32bdSKonstantin Belousov #ifdef VM_PHYSSEG_DENSE 887b6de32bdSKonstantin Belousov pi = atop(start); 8885ebe728dSRoger Pau Monné pe = atop(end); 8895ebe728dSRoger Pau Monné if (pi >= first_page && (pi - first_page) < vm_page_array_size) { 890b6de32bdSKonstantin Belousov fp = &vm_page_array[pi - first_page]; 8915ebe728dSRoger Pau Monné if ((pe - first_page) > vm_page_array_size) { 8925ebe728dSRoger Pau Monné /* 8935ebe728dSRoger Pau Monné * We have a segment that starts inside 8945ebe728dSRoger Pau Monné * of vm_page_array, but ends outside of it. 8955ebe728dSRoger Pau Monné * 8965ebe728dSRoger Pau Monné * Use vm_page_array pages for those that are 8975ebe728dSRoger Pau Monné * inside of the vm_page_array range, and 8985ebe728dSRoger Pau Monné * allocate the remaining ones. 8995ebe728dSRoger Pau Monné */ 9005ebe728dSRoger Pau Monné dpage_count = vm_page_array_size - (pi - first_page); 9015ebe728dSRoger Pau Monné vm_phys_fictitious_init_range(fp, start, dpage_count, 9025ebe728dSRoger Pau Monné memattr); 9035ebe728dSRoger Pau Monné page_count -= dpage_count; 9045ebe728dSRoger Pau Monné start += ptoa(dpage_count); 9055ebe728dSRoger Pau Monné goto alloc; 9065ebe728dSRoger Pau Monné } 9075ebe728dSRoger Pau Monné /* 9085ebe728dSRoger Pau Monné * We can allocate the full range from vm_page_array, 9095ebe728dSRoger Pau Monné * so there's no need to register the range in the tree. 9105ebe728dSRoger Pau Monné */ 9115ebe728dSRoger Pau Monné vm_phys_fictitious_init_range(fp, start, page_count, memattr); 9125ebe728dSRoger Pau Monné return (0); 9135ebe728dSRoger Pau Monné } else if (pe > first_page && (pe - first_page) < vm_page_array_size) { 9145ebe728dSRoger Pau Monné /* 9155ebe728dSRoger Pau Monné * We have a segment that ends inside of vm_page_array, 9165ebe728dSRoger Pau Monné * but starts outside of it. 9175ebe728dSRoger Pau Monné */ 9185ebe728dSRoger Pau Monné fp = &vm_page_array[0]; 9195ebe728dSRoger Pau Monné dpage_count = pe - first_page; 9205ebe728dSRoger Pau Monné vm_phys_fictitious_init_range(fp, ptoa(first_page), dpage_count, 9215ebe728dSRoger Pau Monné memattr); 9225ebe728dSRoger Pau Monné end -= ptoa(dpage_count); 9235ebe728dSRoger Pau Monné page_count -= dpage_count; 9245ebe728dSRoger Pau Monné goto alloc; 9255ebe728dSRoger Pau Monné } else if (pi < first_page && pe > (first_page + vm_page_array_size)) { 9265ebe728dSRoger Pau Monné /* 9275ebe728dSRoger Pau Monné * Trying to register a fictitious range that expands before 9285ebe728dSRoger Pau Monné * and after vm_page_array. 9295ebe728dSRoger Pau Monné */ 9305ebe728dSRoger Pau Monné return (EINVAL); 9315ebe728dSRoger Pau Monné } else { 9325ebe728dSRoger Pau Monné alloc: 933b6de32bdSKonstantin Belousov #endif 934b6de32bdSKonstantin Belousov fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES, 935f93f7cf1SMark Johnston M_WAITOK); 9365ebe728dSRoger Pau Monné #ifdef VM_PHYSSEG_DENSE 937b6de32bdSKonstantin Belousov } 9385ebe728dSRoger Pau Monné #endif 9395ebe728dSRoger Pau Monné vm_phys_fictitious_init_range(fp, start, page_count, memattr); 94038d6b2dcSRoger Pau Monné 94138d6b2dcSRoger Pau Monné seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO); 942b6de32bdSKonstantin Belousov seg->start = start; 943b6de32bdSKonstantin Belousov seg->end = end; 944b6de32bdSKonstantin Belousov seg->first_page = fp; 94538d6b2dcSRoger Pau Monné 94638d6b2dcSRoger Pau Monné rw_wlock(&vm_phys_fictitious_reg_lock); 94738d6b2dcSRoger Pau Monné RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg); 94838d6b2dcSRoger Pau Monné rw_wunlock(&vm_phys_fictitious_reg_lock); 94938d6b2dcSRoger Pau Monné 950b6de32bdSKonstantin Belousov return (0); 951b6de32bdSKonstantin Belousov } 952b6de32bdSKonstantin Belousov 953b6de32bdSKonstantin Belousov void 954b6de32bdSKonstantin Belousov vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end) 955b6de32bdSKonstantin Belousov { 95638d6b2dcSRoger Pau Monné struct vm_phys_fictitious_seg *seg, tmp; 957b6de32bdSKonstantin Belousov #ifdef VM_PHYSSEG_DENSE 9585ebe728dSRoger Pau Monné long pi, pe; 959b6de32bdSKonstantin Belousov #endif 960b6de32bdSKonstantin Belousov 9615ebe728dSRoger Pau Monné KASSERT(start < end, 9625ebe728dSRoger Pau Monné ("Start of segment isn't less than end (start: %jx end: %jx)", 9635ebe728dSRoger Pau Monné (uintmax_t)start, (uintmax_t)end)); 9645ebe728dSRoger Pau Monné 965b6de32bdSKonstantin Belousov #ifdef VM_PHYSSEG_DENSE 966b6de32bdSKonstantin Belousov pi = atop(start); 9675ebe728dSRoger Pau Monné pe = atop(end); 9685ebe728dSRoger Pau Monné if (pi >= first_page && (pi - first_page) < vm_page_array_size) { 9695ebe728dSRoger Pau Monné if ((pe - first_page) <= vm_page_array_size) { 9705ebe728dSRoger Pau Monné /* 9715ebe728dSRoger Pau Monné * This segment was allocated using vm_page_array 9725ebe728dSRoger Pau Monné * only, there's nothing to do since those pages 9735ebe728dSRoger Pau Monné * were never added to the tree. 9745ebe728dSRoger Pau Monné */ 9755ebe728dSRoger Pau Monné return; 9765ebe728dSRoger Pau Monné } 9775ebe728dSRoger Pau Monné /* 9785ebe728dSRoger Pau Monné * We have a segment that starts inside 9795ebe728dSRoger Pau Monné * of vm_page_array, but ends outside of it. 9805ebe728dSRoger Pau Monné * 9815ebe728dSRoger Pau Monné * Calculate how many pages were added to the 9825ebe728dSRoger Pau Monné * tree and free them. 9835ebe728dSRoger Pau Monné */ 9845ebe728dSRoger Pau Monné start = ptoa(first_page + vm_page_array_size); 9855ebe728dSRoger Pau Monné } else if (pe > first_page && (pe - first_page) < vm_page_array_size) { 9865ebe728dSRoger Pau Monné /* 9875ebe728dSRoger Pau Monné * We have a segment that ends inside of vm_page_array, 9885ebe728dSRoger Pau Monné * but starts outside of it. 9895ebe728dSRoger Pau Monné */ 9905ebe728dSRoger Pau Monné end = ptoa(first_page); 9915ebe728dSRoger Pau Monné } else if (pi < first_page && pe > (first_page + vm_page_array_size)) { 9925ebe728dSRoger Pau Monné /* Since it's not possible to register such a range, panic. */ 9935ebe728dSRoger Pau Monné panic( 9945ebe728dSRoger Pau Monné "Unregistering not registered fictitious range [%#jx:%#jx]", 9955ebe728dSRoger Pau Monné (uintmax_t)start, (uintmax_t)end); 9965ebe728dSRoger Pau Monné } 997b6de32bdSKonstantin Belousov #endif 99838d6b2dcSRoger Pau Monné tmp.start = start; 99938d6b2dcSRoger Pau Monné tmp.end = 0; 1000b6de32bdSKonstantin Belousov 100138d6b2dcSRoger Pau Monné rw_wlock(&vm_phys_fictitious_reg_lock); 100238d6b2dcSRoger Pau Monné seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp); 100338d6b2dcSRoger Pau Monné if (seg->start != start || seg->end != end) { 100438d6b2dcSRoger Pau Monné rw_wunlock(&vm_phys_fictitious_reg_lock); 100538d6b2dcSRoger Pau Monné panic( 100638d6b2dcSRoger Pau Monné "Unregistering not registered fictitious range [%#jx:%#jx]", 100738d6b2dcSRoger Pau Monné (uintmax_t)start, (uintmax_t)end); 100838d6b2dcSRoger Pau Monné } 100938d6b2dcSRoger Pau Monné RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg); 101038d6b2dcSRoger Pau Monné rw_wunlock(&vm_phys_fictitious_reg_lock); 101138d6b2dcSRoger Pau Monné free(seg->first_page, M_FICT_PAGES); 101238d6b2dcSRoger Pau Monné free(seg, M_FICT_PAGES); 1013b6de32bdSKonstantin Belousov } 1014b6de32bdSKonstantin Belousov 101511752d88SAlan Cox /* 101611752d88SAlan Cox * Free a contiguous, power of two-sized set of physical pages. 10178941dc44SAlan Cox * 10188941dc44SAlan Cox * The free page queues must be locked. 101911752d88SAlan Cox */ 102011752d88SAlan Cox void 102111752d88SAlan Cox vm_phys_free_pages(vm_page_t m, int order) 102211752d88SAlan Cox { 102311752d88SAlan Cox struct vm_freelist *fl; 102411752d88SAlan Cox struct vm_phys_seg *seg; 10255c1f2cc4SAlan Cox vm_paddr_t pa; 102611752d88SAlan Cox vm_page_t m_buddy; 102711752d88SAlan Cox 102811752d88SAlan Cox KASSERT(m->order == VM_NFREEORDER, 10298941dc44SAlan Cox ("vm_phys_free_pages: page %p has unexpected order %d", 103011752d88SAlan Cox m, m->order)); 103111752d88SAlan Cox KASSERT(m->pool < VM_NFREEPOOL, 10328941dc44SAlan Cox ("vm_phys_free_pages: page %p has unexpected pool %d", 103311752d88SAlan Cox m, m->pool)); 103411752d88SAlan Cox KASSERT(order < VM_NFREEORDER, 10358941dc44SAlan Cox ("vm_phys_free_pages: order %d is out of range", order)); 103611752d88SAlan Cox seg = &vm_phys_segs[m->segind]; 1037e2068d0bSJeff Roberson vm_domain_free_assert_locked(VM_DOMAIN(seg->domain)); 10385c1f2cc4SAlan Cox if (order < VM_NFREEORDER - 1) { 10395c1f2cc4SAlan Cox pa = VM_PAGE_TO_PHYS(m); 10405c1f2cc4SAlan Cox do { 10415c1f2cc4SAlan Cox pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order)); 10425c1f2cc4SAlan Cox if (pa < seg->start || pa >= seg->end) 104311752d88SAlan Cox break; 10445c1f2cc4SAlan Cox m_buddy = &seg->first_page[atop(pa - seg->start)]; 104511752d88SAlan Cox if (m_buddy->order != order) 104611752d88SAlan Cox break; 104711752d88SAlan Cox fl = (*seg->free_queues)[m_buddy->pool]; 10487e226537SAttilio Rao vm_freelist_rem(fl, m_buddy, order); 104911752d88SAlan Cox if (m_buddy->pool != m->pool) 105011752d88SAlan Cox vm_phys_set_pool(m->pool, m_buddy, order); 105111752d88SAlan Cox order++; 10525c1f2cc4SAlan Cox pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1); 105311752d88SAlan Cox m = &seg->first_page[atop(pa - seg->start)]; 10545c1f2cc4SAlan Cox } while (order < VM_NFREEORDER - 1); 105511752d88SAlan Cox } 105611752d88SAlan Cox fl = (*seg->free_queues)[m->pool]; 10577e226537SAttilio Rao vm_freelist_add(fl, m, order, 1); 105811752d88SAlan Cox } 105911752d88SAlan Cox 106011752d88SAlan Cox /* 10615c1f2cc4SAlan Cox * Free a contiguous, arbitrarily sized set of physical pages. 10625c1f2cc4SAlan Cox * 10635c1f2cc4SAlan Cox * The free page queues must be locked. 10645c1f2cc4SAlan Cox */ 10655c1f2cc4SAlan Cox void 10665c1f2cc4SAlan Cox vm_phys_free_contig(vm_page_t m, u_long npages) 10675c1f2cc4SAlan Cox { 10685c1f2cc4SAlan Cox u_int n; 10695c1f2cc4SAlan Cox int order; 10705c1f2cc4SAlan Cox 10715c1f2cc4SAlan Cox /* 10725c1f2cc4SAlan Cox * Avoid unnecessary coalescing by freeing the pages in the largest 10735c1f2cc4SAlan Cox * possible power-of-two-sized subsets. 10745c1f2cc4SAlan Cox */ 1075e2068d0bSJeff Roberson vm_domain_free_assert_locked(vm_pagequeue_domain(m)); 10765c1f2cc4SAlan Cox for (;; npages -= n) { 10775c1f2cc4SAlan Cox /* 10785c1f2cc4SAlan Cox * Unsigned "min" is used here so that "order" is assigned 10795c1f2cc4SAlan Cox * "VM_NFREEORDER - 1" when "m"'s physical address is zero 10805c1f2cc4SAlan Cox * or the low-order bits of its physical address are zero 10815c1f2cc4SAlan Cox * because the size of a physical address exceeds the size of 10825c1f2cc4SAlan Cox * a long. 10835c1f2cc4SAlan Cox */ 10845c1f2cc4SAlan Cox order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1, 10855c1f2cc4SAlan Cox VM_NFREEORDER - 1); 10865c1f2cc4SAlan Cox n = 1 << order; 10875c1f2cc4SAlan Cox if (npages < n) 10885c1f2cc4SAlan Cox break; 10895c1f2cc4SAlan Cox vm_phys_free_pages(m, order); 10905c1f2cc4SAlan Cox m += n; 10915c1f2cc4SAlan Cox } 10925c1f2cc4SAlan Cox /* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */ 10935c1f2cc4SAlan Cox for (; npages > 0; npages -= n) { 10945c1f2cc4SAlan Cox order = flsl(npages) - 1; 10955c1f2cc4SAlan Cox n = 1 << order; 10965c1f2cc4SAlan Cox vm_phys_free_pages(m, order); 10975c1f2cc4SAlan Cox m += n; 10985c1f2cc4SAlan Cox } 10995c1f2cc4SAlan Cox } 11005c1f2cc4SAlan Cox 11015c1f2cc4SAlan Cox /* 1102c869e672SAlan Cox * Scan physical memory between the specified addresses "low" and "high" for a 1103c869e672SAlan Cox * run of contiguous physical pages that satisfy the specified conditions, and 1104c869e672SAlan Cox * return the lowest page in the run. The specified "alignment" determines 1105c869e672SAlan Cox * the alignment of the lowest physical page in the run. If the specified 1106c869e672SAlan Cox * "boundary" is non-zero, then the run of physical pages cannot span a 1107c869e672SAlan Cox * physical address that is a multiple of "boundary". 1108c869e672SAlan Cox * 1109c869e672SAlan Cox * "npages" must be greater than zero. Both "alignment" and "boundary" must 1110c869e672SAlan Cox * be a power of two. 1111c869e672SAlan Cox */ 1112c869e672SAlan Cox vm_page_t 11133f289c3fSJeff Roberson vm_phys_scan_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high, 1114c869e672SAlan Cox u_long alignment, vm_paddr_t boundary, int options) 1115c869e672SAlan Cox { 1116c869e672SAlan Cox vm_paddr_t pa_end; 1117c869e672SAlan Cox vm_page_t m_end, m_run, m_start; 1118c869e672SAlan Cox struct vm_phys_seg *seg; 1119c869e672SAlan Cox int segind; 1120c869e672SAlan Cox 1121c869e672SAlan Cox KASSERT(npages > 0, ("npages is 0")); 1122c869e672SAlan Cox KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 1123c869e672SAlan Cox KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 1124c869e672SAlan Cox if (low >= high) 1125c869e672SAlan Cox return (NULL); 1126c869e672SAlan Cox for (segind = 0; segind < vm_phys_nsegs; segind++) { 1127c869e672SAlan Cox seg = &vm_phys_segs[segind]; 11283f289c3fSJeff Roberson if (seg->domain != domain) 11293f289c3fSJeff Roberson continue; 1130c869e672SAlan Cox if (seg->start >= high) 1131c869e672SAlan Cox break; 1132c869e672SAlan Cox if (low >= seg->end) 1133c869e672SAlan Cox continue; 1134c869e672SAlan Cox if (low <= seg->start) 1135c869e672SAlan Cox m_start = seg->first_page; 1136c869e672SAlan Cox else 1137c869e672SAlan Cox m_start = &seg->first_page[atop(low - seg->start)]; 1138c869e672SAlan Cox if (high < seg->end) 1139c869e672SAlan Cox pa_end = high; 1140c869e672SAlan Cox else 1141c869e672SAlan Cox pa_end = seg->end; 1142c869e672SAlan Cox if (pa_end - VM_PAGE_TO_PHYS(m_start) < ptoa(npages)) 1143c869e672SAlan Cox continue; 1144c869e672SAlan Cox m_end = &seg->first_page[atop(pa_end - seg->start)]; 1145c869e672SAlan Cox m_run = vm_page_scan_contig(npages, m_start, m_end, 1146c869e672SAlan Cox alignment, boundary, options); 1147c869e672SAlan Cox if (m_run != NULL) 1148c869e672SAlan Cox return (m_run); 1149c869e672SAlan Cox } 1150c869e672SAlan Cox return (NULL); 1151c869e672SAlan Cox } 1152c869e672SAlan Cox 1153c869e672SAlan Cox /* 115411752d88SAlan Cox * Set the pool for a contiguous, power of two-sized set of physical pages. 115511752d88SAlan Cox */ 11567bfda801SAlan Cox void 115711752d88SAlan Cox vm_phys_set_pool(int pool, vm_page_t m, int order) 115811752d88SAlan Cox { 115911752d88SAlan Cox vm_page_t m_tmp; 116011752d88SAlan Cox 116111752d88SAlan Cox for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++) 116211752d88SAlan Cox m_tmp->pool = pool; 116311752d88SAlan Cox } 116411752d88SAlan Cox 116511752d88SAlan Cox /* 11669742373aSAlan Cox * Search for the given physical page "m" in the free lists. If the search 11679742373aSAlan Cox * succeeds, remove "m" from the free lists and return TRUE. Otherwise, return 11689742373aSAlan Cox * FALSE, indicating that "m" is not in the free lists. 11697bfda801SAlan Cox * 11707bfda801SAlan Cox * The free page queues must be locked. 11717bfda801SAlan Cox */ 1172e35395ceSAlan Cox boolean_t 11737bfda801SAlan Cox vm_phys_unfree_page(vm_page_t m) 11747bfda801SAlan Cox { 11757bfda801SAlan Cox struct vm_freelist *fl; 11767bfda801SAlan Cox struct vm_phys_seg *seg; 11777bfda801SAlan Cox vm_paddr_t pa, pa_half; 11787bfda801SAlan Cox vm_page_t m_set, m_tmp; 11797bfda801SAlan Cox int order; 11807bfda801SAlan Cox 11817bfda801SAlan Cox /* 11827bfda801SAlan Cox * First, find the contiguous, power of two-sized set of free 11837bfda801SAlan Cox * physical pages containing the given physical page "m" and 11847bfda801SAlan Cox * assign it to "m_set". 11857bfda801SAlan Cox */ 11867bfda801SAlan Cox seg = &vm_phys_segs[m->segind]; 1187e2068d0bSJeff Roberson vm_domain_free_assert_locked(VM_DOMAIN(seg->domain)); 11887bfda801SAlan Cox for (m_set = m, order = 0; m_set->order == VM_NFREEORDER && 1189bc8794a1SAlan Cox order < VM_NFREEORDER - 1; ) { 11907bfda801SAlan Cox order++; 11917bfda801SAlan Cox pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order)); 11922fbced65SAlan Cox if (pa >= seg->start) 11937bfda801SAlan Cox m_set = &seg->first_page[atop(pa - seg->start)]; 1194e35395ceSAlan Cox else 1195e35395ceSAlan Cox return (FALSE); 11967bfda801SAlan Cox } 1197e35395ceSAlan Cox if (m_set->order < order) 1198e35395ceSAlan Cox return (FALSE); 1199e35395ceSAlan Cox if (m_set->order == VM_NFREEORDER) 1200e35395ceSAlan Cox return (FALSE); 12017bfda801SAlan Cox KASSERT(m_set->order < VM_NFREEORDER, 12027bfda801SAlan Cox ("vm_phys_unfree_page: page %p has unexpected order %d", 12037bfda801SAlan Cox m_set, m_set->order)); 12047bfda801SAlan Cox 12057bfda801SAlan Cox /* 12067bfda801SAlan Cox * Next, remove "m_set" from the free lists. Finally, extract 12077bfda801SAlan Cox * "m" from "m_set" using an iterative algorithm: While "m_set" 12087bfda801SAlan Cox * is larger than a page, shrink "m_set" by returning the half 12097bfda801SAlan Cox * of "m_set" that does not contain "m" to the free lists. 12107bfda801SAlan Cox */ 12117bfda801SAlan Cox fl = (*seg->free_queues)[m_set->pool]; 12127bfda801SAlan Cox order = m_set->order; 12137e226537SAttilio Rao vm_freelist_rem(fl, m_set, order); 12147bfda801SAlan Cox while (order > 0) { 12157bfda801SAlan Cox order--; 12167bfda801SAlan Cox pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order)); 12177bfda801SAlan Cox if (m->phys_addr < pa_half) 12187bfda801SAlan Cox m_tmp = &seg->first_page[atop(pa_half - seg->start)]; 12197bfda801SAlan Cox else { 12207bfda801SAlan Cox m_tmp = m_set; 12217bfda801SAlan Cox m_set = &seg->first_page[atop(pa_half - seg->start)]; 12227bfda801SAlan Cox } 12237e226537SAttilio Rao vm_freelist_add(fl, m_tmp, order, 0); 12247bfda801SAlan Cox } 12257bfda801SAlan Cox KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency")); 1226e35395ceSAlan Cox return (TRUE); 12277bfda801SAlan Cox } 12287bfda801SAlan Cox 12297bfda801SAlan Cox /* 12302f9f48d6SAlan Cox * Allocate a contiguous set of physical pages of the given size 12312f9f48d6SAlan Cox * "npages" from the free lists. All of the physical pages must be at 12322f9f48d6SAlan Cox * or above the given physical address "low" and below the given 12332f9f48d6SAlan Cox * physical address "high". The given value "alignment" determines the 12342f9f48d6SAlan Cox * alignment of the first physical page in the set. If the given value 12352f9f48d6SAlan Cox * "boundary" is non-zero, then the set of physical pages cannot cross 12362f9f48d6SAlan Cox * any physical address boundary that is a multiple of that value. Both 123711752d88SAlan Cox * "alignment" and "boundary" must be a power of two. 123811752d88SAlan Cox */ 123911752d88SAlan Cox vm_page_t 1240ef435ae7SJeff Roberson vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high, 12415c1f2cc4SAlan Cox u_long alignment, vm_paddr_t boundary) 124211752d88SAlan Cox { 1243c869e672SAlan Cox vm_paddr_t pa_end, pa_start; 1244c869e672SAlan Cox vm_page_t m_run; 1245c869e672SAlan Cox struct vm_phys_seg *seg; 1246ef435ae7SJeff Roberson int segind; 124711752d88SAlan Cox 1248c869e672SAlan Cox KASSERT(npages > 0, ("npages is 0")); 1249c869e672SAlan Cox KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 1250c869e672SAlan Cox KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 1251e2068d0bSJeff Roberson vm_domain_free_assert_locked(VM_DOMAIN(domain)); 1252c869e672SAlan Cox if (low >= high) 1253c869e672SAlan Cox return (NULL); 1254c869e672SAlan Cox m_run = NULL; 1255477bffbeSAlan Cox for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) { 1256c869e672SAlan Cox seg = &vm_phys_segs[segind]; 1257477bffbeSAlan Cox if (seg->start >= high || seg->domain != domain) 125811752d88SAlan Cox continue; 1259477bffbeSAlan Cox if (low >= seg->end) 1260477bffbeSAlan Cox break; 1261c869e672SAlan Cox if (low <= seg->start) 1262c869e672SAlan Cox pa_start = seg->start; 1263c869e672SAlan Cox else 1264c869e672SAlan Cox pa_start = low; 1265c869e672SAlan Cox if (high < seg->end) 1266c869e672SAlan Cox pa_end = high; 1267c869e672SAlan Cox else 1268c869e672SAlan Cox pa_end = seg->end; 1269c869e672SAlan Cox if (pa_end - pa_start < ptoa(npages)) 1270c869e672SAlan Cox continue; 1271c869e672SAlan Cox m_run = vm_phys_alloc_seg_contig(seg, npages, low, high, 1272c869e672SAlan Cox alignment, boundary); 1273c869e672SAlan Cox if (m_run != NULL) 1274c869e672SAlan Cox break; 1275c869e672SAlan Cox } 1276c869e672SAlan Cox return (m_run); 1277c869e672SAlan Cox } 127811752d88SAlan Cox 127911752d88SAlan Cox /* 1280c869e672SAlan Cox * Allocate a run of contiguous physical pages from the free list for the 1281c869e672SAlan Cox * specified segment. 1282c869e672SAlan Cox */ 1283c869e672SAlan Cox static vm_page_t 1284c869e672SAlan Cox vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, u_long npages, 1285c869e672SAlan Cox vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary) 1286c869e672SAlan Cox { 1287c869e672SAlan Cox struct vm_freelist *fl; 1288c869e672SAlan Cox vm_paddr_t pa, pa_end, size; 1289c869e672SAlan Cox vm_page_t m, m_ret; 1290c869e672SAlan Cox u_long npages_end; 1291c869e672SAlan Cox int oind, order, pind; 1292c869e672SAlan Cox 1293c869e672SAlan Cox KASSERT(npages > 0, ("npages is 0")); 1294c869e672SAlan Cox KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 1295c869e672SAlan Cox KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 1296e2068d0bSJeff Roberson vm_domain_free_assert_locked(VM_DOMAIN(seg->domain)); 1297c869e672SAlan Cox /* Compute the queue that is the best fit for npages. */ 12989161b4deSAlan Cox order = flsl(npages - 1); 1299c869e672SAlan Cox /* Search for a run satisfying the specified conditions. */ 1300c869e672SAlan Cox size = npages << PAGE_SHIFT; 1301c869e672SAlan Cox for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER; 1302c869e672SAlan Cox oind++) { 1303c869e672SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) { 1304c869e672SAlan Cox fl = (*seg->free_queues)[pind]; 13055cd29d0fSMark Johnston TAILQ_FOREACH(m_ret, &fl[oind].pl, listq) { 1306c869e672SAlan Cox /* 130711752d88SAlan Cox * Is the size of this allocation request 130811752d88SAlan Cox * larger than the largest block size? 130911752d88SAlan Cox */ 131011752d88SAlan Cox if (order >= VM_NFREEORDER) { 131111752d88SAlan Cox /* 1312c869e672SAlan Cox * Determine if a sufficient number of 1313c869e672SAlan Cox * subsequent blocks to satisfy the 1314c869e672SAlan Cox * allocation request are free. 131511752d88SAlan Cox */ 131611752d88SAlan Cox pa = VM_PAGE_TO_PHYS(m_ret); 1317c869e672SAlan Cox pa_end = pa + size; 131879e9552eSKonstantin Belousov if (pa_end < pa) 131979e9552eSKonstantin Belousov continue; 132011752d88SAlan Cox for (;;) { 1321c869e672SAlan Cox pa += 1 << (PAGE_SHIFT + 1322c869e672SAlan Cox VM_NFREEORDER - 1); 1323c869e672SAlan Cox if (pa >= pa_end || 1324c869e672SAlan Cox pa < seg->start || 132511752d88SAlan Cox pa >= seg->end) 132611752d88SAlan Cox break; 1327c869e672SAlan Cox m = &seg->first_page[atop(pa - 1328c869e672SAlan Cox seg->start)]; 1329c869e672SAlan Cox if (m->order != VM_NFREEORDER - 1330c869e672SAlan Cox 1) 133111752d88SAlan Cox break; 133211752d88SAlan Cox } 1333c869e672SAlan Cox /* If not, go to the next block. */ 1334c869e672SAlan Cox if (pa < pa_end) 133511752d88SAlan Cox continue; 133611752d88SAlan Cox } 133711752d88SAlan Cox 133811752d88SAlan Cox /* 1339c869e672SAlan Cox * Determine if the blocks are within the 1340c869e672SAlan Cox * given range, satisfy the given alignment, 1341c869e672SAlan Cox * and do not cross the given boundary. 134211752d88SAlan Cox */ 134311752d88SAlan Cox pa = VM_PAGE_TO_PHYS(m_ret); 1344c869e672SAlan Cox pa_end = pa + size; 1345d9c9c81cSPedro F. Giffuni if (pa >= low && pa_end <= high && 1346d9c9c81cSPedro F. Giffuni (pa & (alignment - 1)) == 0 && 1347d9c9c81cSPedro F. Giffuni rounddown2(pa ^ (pa_end - 1), boundary) == 0) 134811752d88SAlan Cox goto done; 134911752d88SAlan Cox } 135011752d88SAlan Cox } 135111752d88SAlan Cox } 135211752d88SAlan Cox return (NULL); 135311752d88SAlan Cox done: 135411752d88SAlan Cox for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) { 135511752d88SAlan Cox fl = (*seg->free_queues)[m->pool]; 13569161b4deSAlan Cox vm_freelist_rem(fl, m, oind); 13579161b4deSAlan Cox if (m->pool != VM_FREEPOOL_DEFAULT) 13589161b4deSAlan Cox vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind); 135911752d88SAlan Cox } 13605c1f2cc4SAlan Cox /* Return excess pages to the free lists. */ 13619161b4deSAlan Cox npages_end = roundup2(npages, 1 << oind); 13627493904eSAlan Cox if (npages < npages_end) { 13637493904eSAlan Cox fl = (*seg->free_queues)[VM_FREEPOOL_DEFAULT]; 13647493904eSAlan Cox vm_phys_enq_range(&m_ret[npages], npages_end - npages, fl, 0); 13657493904eSAlan Cox } 136611752d88SAlan Cox return (m_ret); 136711752d88SAlan Cox } 136811752d88SAlan Cox 136911752d88SAlan Cox #ifdef DDB 137011752d88SAlan Cox /* 137111752d88SAlan Cox * Show the number of physical pages in each of the free lists. 137211752d88SAlan Cox */ 137311752d88SAlan Cox DB_SHOW_COMMAND(freepages, db_show_freepages) 137411752d88SAlan Cox { 137511752d88SAlan Cox struct vm_freelist *fl; 13767e226537SAttilio Rao int flind, oind, pind, dom; 137711752d88SAlan Cox 13787e226537SAttilio Rao for (dom = 0; dom < vm_ndomains; dom++) { 13797e226537SAttilio Rao db_printf("DOMAIN: %d\n", dom); 138011752d88SAlan Cox for (flind = 0; flind < vm_nfreelists; flind++) { 138111752d88SAlan Cox db_printf("FREE LIST %d:\n" 138211752d88SAlan Cox "\n ORDER (SIZE) | NUMBER" 138311752d88SAlan Cox "\n ", flind); 138411752d88SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) 138511752d88SAlan Cox db_printf(" | POOL %d", pind); 138611752d88SAlan Cox db_printf("\n-- "); 138711752d88SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) 138811752d88SAlan Cox db_printf("-- -- "); 138911752d88SAlan Cox db_printf("--\n"); 139011752d88SAlan Cox for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { 139111752d88SAlan Cox db_printf(" %2.2d (%6.6dK)", oind, 139211752d88SAlan Cox 1 << (PAGE_SHIFT - 10 + oind)); 139311752d88SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) { 13947e226537SAttilio Rao fl = vm_phys_free_queues[dom][flind][pind]; 139511752d88SAlan Cox db_printf(" | %6.6d", fl[oind].lcnt); 139611752d88SAlan Cox } 139711752d88SAlan Cox db_printf("\n"); 139811752d88SAlan Cox } 139911752d88SAlan Cox db_printf("\n"); 140011752d88SAlan Cox } 14017e226537SAttilio Rao db_printf("\n"); 14027e226537SAttilio Rao } 140311752d88SAlan Cox } 140411752d88SAlan Cox #endif 1405