xref: /freebsd/sys/vm/vm_phys.c (revision 69cbb18746b69cbcdf79f1728d0435a1c86fff58)
111752d88SAlan Cox /*-
24d846d26SWarner Losh  * SPDX-License-Identifier: BSD-2-Clause
3fe267a55SPedro F. Giffuni  *
411752d88SAlan Cox  * Copyright (c) 2002-2006 Rice University
511752d88SAlan Cox  * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
611752d88SAlan Cox  * All rights reserved.
711752d88SAlan Cox  *
811752d88SAlan Cox  * This software was developed for the FreeBSD Project by Alan L. Cox,
911752d88SAlan Cox  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
1011752d88SAlan Cox  *
1111752d88SAlan Cox  * Redistribution and use in source and binary forms, with or without
1211752d88SAlan Cox  * modification, are permitted provided that the following conditions
1311752d88SAlan Cox  * are met:
1411752d88SAlan Cox  * 1. Redistributions of source code must retain the above copyright
1511752d88SAlan Cox  *    notice, this list of conditions and the following disclaimer.
1611752d88SAlan Cox  * 2. Redistributions in binary form must reproduce the above copyright
1711752d88SAlan Cox  *    notice, this list of conditions and the following disclaimer in the
1811752d88SAlan Cox  *    documentation and/or other materials provided with the distribution.
1911752d88SAlan Cox  *
2011752d88SAlan Cox  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
2111752d88SAlan Cox  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
2211752d88SAlan Cox  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
2311752d88SAlan Cox  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
2411752d88SAlan Cox  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
2511752d88SAlan Cox  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
2611752d88SAlan Cox  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
2711752d88SAlan Cox  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
2811752d88SAlan Cox  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2911752d88SAlan Cox  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
3011752d88SAlan Cox  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
3111752d88SAlan Cox  * POSSIBILITY OF SUCH DAMAGE.
3211752d88SAlan Cox  */
3311752d88SAlan Cox 
34fbd80bd0SAlan Cox /*
35fbd80bd0SAlan Cox  *	Physical memory system implementation
36fbd80bd0SAlan Cox  *
37fbd80bd0SAlan Cox  * Any external functions defined by this module are only to be used by the
38fbd80bd0SAlan Cox  * virtual memory system.
39fbd80bd0SAlan Cox  */
40fbd80bd0SAlan Cox 
4111752d88SAlan Cox #include <sys/cdefs.h>
4211752d88SAlan Cox #include "opt_ddb.h"
43174b5f38SJohn Baldwin #include "opt_vm.h"
4411752d88SAlan Cox 
4511752d88SAlan Cox #include <sys/param.h>
4611752d88SAlan Cox #include <sys/systm.h>
47662e7fa8SMark Johnston #include <sys/domainset.h>
4811752d88SAlan Cox #include <sys/lock.h>
4911752d88SAlan Cox #include <sys/kernel.h>
5011752d88SAlan Cox #include <sys/malloc.h>
5111752d88SAlan Cox #include <sys/mutex.h>
527e226537SAttilio Rao #include <sys/proc.h>
5311752d88SAlan Cox #include <sys/queue.h>
5438d6b2dcSRoger Pau Monné #include <sys/rwlock.h>
5511752d88SAlan Cox #include <sys/sbuf.h>
5611752d88SAlan Cox #include <sys/sysctl.h>
5738d6b2dcSRoger Pau Monné #include <sys/tree.h>
5811752d88SAlan Cox #include <sys/vmmeter.h>
5911752d88SAlan Cox 
6011752d88SAlan Cox #include <ddb/ddb.h>
6111752d88SAlan Cox 
6211752d88SAlan Cox #include <vm/vm.h>
6301e115abSDoug Moore #include <vm/vm_extern.h>
6411752d88SAlan Cox #include <vm/vm_param.h>
6511752d88SAlan Cox #include <vm/vm_kern.h>
6611752d88SAlan Cox #include <vm/vm_object.h>
6711752d88SAlan Cox #include <vm/vm_page.h>
6811752d88SAlan Cox #include <vm/vm_phys.h>
69e2068d0bSJeff Roberson #include <vm/vm_pagequeue.h>
7011752d88SAlan Cox 
71449c2e92SKonstantin Belousov _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX,
72449c2e92SKonstantin Belousov     "Too many physsegs.");
73c9b06fa5SDoug Moore _Static_assert(sizeof(long long) >= sizeof(vm_paddr_t),
74c9b06fa5SDoug Moore     "vm_paddr_t too big for ffsll, flsll.");
7511752d88SAlan Cox 
76b6715dabSJeff Roberson #ifdef NUMA
77cdfeced8SJeff Roberson struct mem_affinity __read_mostly *mem_affinity;
78cdfeced8SJeff Roberson int __read_mostly *mem_locality;
79c415cfc8SZhenlei Huang 
80c415cfc8SZhenlei Huang static int numa_disabled;
81c415cfc8SZhenlei Huang static SYSCTL_NODE(_vm, OID_AUTO, numa, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
82c415cfc8SZhenlei Huang     "NUMA options");
83c415cfc8SZhenlei Huang SYSCTL_INT(_vm_numa, OID_AUTO, disabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
84c415cfc8SZhenlei Huang     &numa_disabled, 0, "NUMA-awareness in the allocators is disabled");
8562d70a81SJohn Baldwin #endif
86a3870a18SJohn Baldwin 
87cdfeced8SJeff Roberson int __read_mostly vm_ndomains = 1;
88463406acSMark Johnston domainset_t __read_mostly all_domains = DOMAINSET_T_INITIALIZER(0x1);
897e226537SAttilio Rao 
90cdfeced8SJeff Roberson struct vm_phys_seg __read_mostly vm_phys_segs[VM_PHYSSEG_MAX];
91cdfeced8SJeff Roberson int __read_mostly vm_phys_nsegs;
9281302f1dSMark Johnston static struct vm_phys_seg vm_phys_early_segs[8];
9381302f1dSMark Johnston static int vm_phys_early_nsegs;
9411752d88SAlan Cox 
9538d6b2dcSRoger Pau Monné struct vm_phys_fictitious_seg;
9638d6b2dcSRoger Pau Monné static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *,
9738d6b2dcSRoger Pau Monné     struct vm_phys_fictitious_seg *);
9838d6b2dcSRoger Pau Monné 
9938d6b2dcSRoger Pau Monné RB_HEAD(fict_tree, vm_phys_fictitious_seg) vm_phys_fictitious_tree =
100b649c2acSDoug Moore     RB_INITIALIZER(&vm_phys_fictitious_tree);
10138d6b2dcSRoger Pau Monné 
10238d6b2dcSRoger Pau Monné struct vm_phys_fictitious_seg {
10338d6b2dcSRoger Pau Monné 	RB_ENTRY(vm_phys_fictitious_seg) node;
10438d6b2dcSRoger Pau Monné 	/* Memory region data */
105b6de32bdSKonstantin Belousov 	vm_paddr_t	start;
106b6de32bdSKonstantin Belousov 	vm_paddr_t	end;
107b6de32bdSKonstantin Belousov 	vm_page_t	first_page;
10838d6b2dcSRoger Pau Monné };
10938d6b2dcSRoger Pau Monné 
11038d6b2dcSRoger Pau Monné RB_GENERATE_STATIC(fict_tree, vm_phys_fictitious_seg, node,
11138d6b2dcSRoger Pau Monné     vm_phys_fictitious_cmp);
11238d6b2dcSRoger Pau Monné 
113cdfeced8SJeff Roberson static struct rwlock_padalign vm_phys_fictitious_reg_lock;
114c0432fc3SMark Johnston MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages");
115b6de32bdSKonstantin Belousov 
116cdfeced8SJeff Roberson static struct vm_freelist __aligned(CACHE_LINE_SIZE)
117f2a496d6SKonstantin Belousov     vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL]
118f2a496d6SKonstantin Belousov     [VM_NFREEORDER_MAX];
11911752d88SAlan Cox 
120cdfeced8SJeff Roberson static int __read_mostly vm_nfreelists;
121d866a563SAlan Cox 
122d866a563SAlan Cox /*
12321943937SJeff Roberson  * These "avail lists" are globals used to communicate boot-time physical
12421943937SJeff Roberson  * memory layout to other parts of the kernel.  Each physically contiguous
12521943937SJeff Roberson  * region of memory is defined by a start address at an even index and an
12621943937SJeff Roberson  * end address at the following odd index.  Each list is terminated by a
12721943937SJeff Roberson  * pair of zero entries.
12821943937SJeff Roberson  *
12921943937SJeff Roberson  * dump_avail tells the dump code what regions to include in a crash dump, and
13021943937SJeff Roberson  * phys_avail is all of the remaining physical memory that is available for
13121943937SJeff Roberson  * the vm system.
13221943937SJeff Roberson  *
13321943937SJeff Roberson  * Initially dump_avail and phys_avail are identical.  Boot time memory
13421943937SJeff Roberson  * allocations remove extents from phys_avail that may still be included
13521943937SJeff Roberson  * in dumps.
13621943937SJeff Roberson  */
13721943937SJeff Roberson vm_paddr_t phys_avail[PHYS_AVAIL_COUNT];
13821943937SJeff Roberson vm_paddr_t dump_avail[PHYS_AVAIL_COUNT];
13921943937SJeff Roberson 
14021943937SJeff Roberson /*
141d866a563SAlan Cox  * Provides the mapping from VM_FREELIST_* to free list indices (flind).
142d866a563SAlan Cox  */
143cdfeced8SJeff Roberson static int __read_mostly vm_freelist_to_flind[VM_NFREELIST];
144d866a563SAlan Cox 
145d866a563SAlan Cox CTASSERT(VM_FREELIST_DEFAULT == 0);
146d866a563SAlan Cox 
147d866a563SAlan Cox #ifdef VM_FREELIST_DMA32
148d866a563SAlan Cox #define	VM_DMA32_BOUNDARY	((vm_paddr_t)1 << 32)
149d866a563SAlan Cox #endif
150d866a563SAlan Cox 
151d866a563SAlan Cox /*
152d866a563SAlan Cox  * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about
153d866a563SAlan Cox  * the ordering of the free list boundaries.
154d866a563SAlan Cox  */
155d866a563SAlan Cox #if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY)
156d866a563SAlan Cox CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY);
157d866a563SAlan Cox #endif
15811752d88SAlan Cox 
15911752d88SAlan Cox static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
1607029da5cSPawel Biernacki SYSCTL_OID(_vm, OID_AUTO, phys_free,
161114484b7SMark Johnston     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1627029da5cSPawel Biernacki     sysctl_vm_phys_free, "A",
1637029da5cSPawel Biernacki     "Phys Free Info");
16411752d88SAlan Cox 
16511752d88SAlan Cox static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
1667029da5cSPawel Biernacki SYSCTL_OID(_vm, OID_AUTO, phys_segs,
167114484b7SMark Johnston     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1687029da5cSPawel Biernacki     sysctl_vm_phys_segs, "A",
1697029da5cSPawel Biernacki     "Phys Seg Info");
17011752d88SAlan Cox 
171b6715dabSJeff Roberson #ifdef NUMA
172415d7ccaSAdrian Chadd static int sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS);
1737029da5cSPawel Biernacki SYSCTL_OID(_vm, OID_AUTO, phys_locality,
174114484b7SMark Johnston     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1757029da5cSPawel Biernacki     sysctl_vm_phys_locality, "A",
1767029da5cSPawel Biernacki     "Phys Locality Info");
1776520495aSAdrian Chadd #endif
178415d7ccaSAdrian Chadd 
1797e226537SAttilio Rao SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD,
1807e226537SAttilio Rao     &vm_ndomains, 0, "Number of physical memory domains available.");
181a3870a18SJohn Baldwin 
182d866a563SAlan Cox static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain);
183d866a563SAlan Cox static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end);
18411752d88SAlan Cox static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
185370a338aSAlan Cox     int order, int tail);
186c606ab59SDoug Moore 
18738d6b2dcSRoger Pau Monné /*
18838d6b2dcSRoger Pau Monné  * Red-black tree helpers for vm fictitious range management.
18938d6b2dcSRoger Pau Monné  */
19038d6b2dcSRoger Pau Monné static inline int
19138d6b2dcSRoger Pau Monné vm_phys_fictitious_in_range(struct vm_phys_fictitious_seg *p,
19238d6b2dcSRoger Pau Monné     struct vm_phys_fictitious_seg *range)
19338d6b2dcSRoger Pau Monné {
19438d6b2dcSRoger Pau Monné 
19538d6b2dcSRoger Pau Monné 	KASSERT(range->start != 0 && range->end != 0,
19638d6b2dcSRoger Pau Monné 	    ("Invalid range passed on search for vm_fictitious page"));
19738d6b2dcSRoger Pau Monné 	if (p->start >= range->end)
19838d6b2dcSRoger Pau Monné 		return (1);
19938d6b2dcSRoger Pau Monné 	if (p->start < range->start)
20038d6b2dcSRoger Pau Monné 		return (-1);
20138d6b2dcSRoger Pau Monné 
20238d6b2dcSRoger Pau Monné 	return (0);
20338d6b2dcSRoger Pau Monné }
20438d6b2dcSRoger Pau Monné 
20538d6b2dcSRoger Pau Monné static int
20638d6b2dcSRoger Pau Monné vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *p1,
20738d6b2dcSRoger Pau Monné     struct vm_phys_fictitious_seg *p2)
20838d6b2dcSRoger Pau Monné {
20938d6b2dcSRoger Pau Monné 
21038d6b2dcSRoger Pau Monné 	/* Check if this is a search for a page */
21138d6b2dcSRoger Pau Monné 	if (p1->end == 0)
21238d6b2dcSRoger Pau Monné 		return (vm_phys_fictitious_in_range(p1, p2));
21338d6b2dcSRoger Pau Monné 
21438d6b2dcSRoger Pau Monné 	KASSERT(p2->end != 0,
21538d6b2dcSRoger Pau Monné     ("Invalid range passed as second parameter to vm fictitious comparison"));
21638d6b2dcSRoger Pau Monné 
21738d6b2dcSRoger Pau Monné 	/* Searching to add a new range */
21838d6b2dcSRoger Pau Monné 	if (p1->end <= p2->start)
21938d6b2dcSRoger Pau Monné 		return (-1);
22038d6b2dcSRoger Pau Monné 	if (p1->start >= p2->end)
22138d6b2dcSRoger Pau Monné 		return (1);
22238d6b2dcSRoger Pau Monné 
22338d6b2dcSRoger Pau Monné 	panic("Trying to add overlapping vm fictitious ranges:\n"
22438d6b2dcSRoger Pau Monné 	    "[%#jx:%#jx] and [%#jx:%#jx]", (uintmax_t)p1->start,
22538d6b2dcSRoger Pau Monné 	    (uintmax_t)p1->end, (uintmax_t)p2->start, (uintmax_t)p2->end);
22638d6b2dcSRoger Pau Monné }
22738d6b2dcSRoger Pau Monné 
2286f4acaf4SJeff Roberson int
229cb20a74cSStephen J. Kiernan vm_phys_domain_match(int prefer __numa_used, vm_paddr_t low __numa_used,
230cb20a74cSStephen J. Kiernan     vm_paddr_t high __numa_used)
231449c2e92SKonstantin Belousov {
232b6715dabSJeff Roberson #ifdef NUMA
2336f4acaf4SJeff Roberson 	domainset_t mask;
2346f4acaf4SJeff Roberson 	int i;
235449c2e92SKonstantin Belousov 
2366f4acaf4SJeff Roberson 	if (vm_ndomains == 1 || mem_affinity == NULL)
2376f4acaf4SJeff Roberson 		return (0);
2386f4acaf4SJeff Roberson 
2396f4acaf4SJeff Roberson 	DOMAINSET_ZERO(&mask);
2406f4acaf4SJeff Roberson 	/*
2416f4acaf4SJeff Roberson 	 * Check for any memory that overlaps low, high.
2426f4acaf4SJeff Roberson 	 */
2436f4acaf4SJeff Roberson 	for (i = 0; mem_affinity[i].end != 0; i++)
2446f4acaf4SJeff Roberson 		if (mem_affinity[i].start <= high &&
2456f4acaf4SJeff Roberson 		    mem_affinity[i].end >= low)
2466f4acaf4SJeff Roberson 			DOMAINSET_SET(mem_affinity[i].domain, &mask);
2476f4acaf4SJeff Roberson 	if (prefer != -1 && DOMAINSET_ISSET(prefer, &mask))
2486f4acaf4SJeff Roberson 		return (prefer);
2496f4acaf4SJeff Roberson 	if (DOMAINSET_EMPTY(&mask))
2506f4acaf4SJeff Roberson 		panic("vm_phys_domain_match:  Impossible constraint");
2516f4acaf4SJeff Roberson 	return (DOMAINSET_FFS(&mask) - 1);
2526f4acaf4SJeff Roberson #else
2536f4acaf4SJeff Roberson 	return (0);
2546f4acaf4SJeff Roberson #endif
255449c2e92SKonstantin Belousov }
256449c2e92SKonstantin Belousov 
25711752d88SAlan Cox /*
25811752d88SAlan Cox  * Outputs the state of the physical memory allocator, specifically,
25911752d88SAlan Cox  * the amount of physical memory in each free list.
26011752d88SAlan Cox  */
26111752d88SAlan Cox static int
26211752d88SAlan Cox sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
26311752d88SAlan Cox {
26411752d88SAlan Cox 	struct sbuf sbuf;
26511752d88SAlan Cox 	struct vm_freelist *fl;
2667e226537SAttilio Rao 	int dom, error, flind, oind, pind;
26711752d88SAlan Cox 
26800f0e671SMatthew D Fleming 	error = sysctl_wire_old_buffer(req, 0);
26900f0e671SMatthew D Fleming 	if (error != 0)
27000f0e671SMatthew D Fleming 		return (error);
2717e226537SAttilio Rao 	sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req);
2727e226537SAttilio Rao 	for (dom = 0; dom < vm_ndomains; dom++) {
273eb2f42fbSAlan Cox 		sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom);
27411752d88SAlan Cox 		for (flind = 0; flind < vm_nfreelists; flind++) {
275eb2f42fbSAlan Cox 			sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
27611752d88SAlan Cox 			    "\n  ORDER (SIZE)  |  NUMBER"
27711752d88SAlan Cox 			    "\n              ", flind);
27811752d88SAlan Cox 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
27911752d88SAlan Cox 				sbuf_printf(&sbuf, "  |  POOL %d", pind);
28011752d88SAlan Cox 			sbuf_printf(&sbuf, "\n--            ");
28111752d88SAlan Cox 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
28211752d88SAlan Cox 				sbuf_printf(&sbuf, "-- --      ");
28311752d88SAlan Cox 			sbuf_printf(&sbuf, "--\n");
28411752d88SAlan Cox 			for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
285d689bc00SAlan Cox 				sbuf_printf(&sbuf, "  %2d (%6dK)", oind,
28611752d88SAlan Cox 				    1 << (PAGE_SHIFT - 10 + oind));
28711752d88SAlan Cox 				for (pind = 0; pind < VM_NFREEPOOL; pind++) {
2887e226537SAttilio Rao 				fl = vm_phys_free_queues[dom][flind][pind];
289eb2f42fbSAlan Cox 					sbuf_printf(&sbuf, "  |  %6d",
2907e226537SAttilio Rao 					    fl[oind].lcnt);
29111752d88SAlan Cox 				}
29211752d88SAlan Cox 				sbuf_printf(&sbuf, "\n");
29311752d88SAlan Cox 			}
2947e226537SAttilio Rao 		}
29511752d88SAlan Cox 	}
2964e657159SMatthew D Fleming 	error = sbuf_finish(&sbuf);
29711752d88SAlan Cox 	sbuf_delete(&sbuf);
29811752d88SAlan Cox 	return (error);
29911752d88SAlan Cox }
30011752d88SAlan Cox 
30111752d88SAlan Cox /*
30211752d88SAlan Cox  * Outputs the set of physical memory segments.
30311752d88SAlan Cox  */
30411752d88SAlan Cox static int
30511752d88SAlan Cox sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
30611752d88SAlan Cox {
30711752d88SAlan Cox 	struct sbuf sbuf;
30811752d88SAlan Cox 	struct vm_phys_seg *seg;
30911752d88SAlan Cox 	int error, segind;
31011752d88SAlan Cox 
31100f0e671SMatthew D Fleming 	error = sysctl_wire_old_buffer(req, 0);
31200f0e671SMatthew D Fleming 	if (error != 0)
31300f0e671SMatthew D Fleming 		return (error);
3144e657159SMatthew D Fleming 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
31511752d88SAlan Cox 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
31611752d88SAlan Cox 		sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
31711752d88SAlan Cox 		seg = &vm_phys_segs[segind];
31811752d88SAlan Cox 		sbuf_printf(&sbuf, "start:     %#jx\n",
31911752d88SAlan Cox 		    (uintmax_t)seg->start);
32011752d88SAlan Cox 		sbuf_printf(&sbuf, "end:       %#jx\n",
32111752d88SAlan Cox 		    (uintmax_t)seg->end);
322a3870a18SJohn Baldwin 		sbuf_printf(&sbuf, "domain:    %d\n", seg->domain);
32311752d88SAlan Cox 		sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
32411752d88SAlan Cox 	}
3254e657159SMatthew D Fleming 	error = sbuf_finish(&sbuf);
32611752d88SAlan Cox 	sbuf_delete(&sbuf);
32711752d88SAlan Cox 	return (error);
32811752d88SAlan Cox }
32911752d88SAlan Cox 
330415d7ccaSAdrian Chadd /*
331415d7ccaSAdrian Chadd  * Return affinity, or -1 if there's no affinity information.
332415d7ccaSAdrian Chadd  */
3336520495aSAdrian Chadd int
334cb20a74cSStephen J. Kiernan vm_phys_mem_affinity(int f __numa_used, int t __numa_used)
335415d7ccaSAdrian Chadd {
336415d7ccaSAdrian Chadd 
337b6715dabSJeff Roberson #ifdef NUMA
338415d7ccaSAdrian Chadd 	if (mem_locality == NULL)
339415d7ccaSAdrian Chadd 		return (-1);
340415d7ccaSAdrian Chadd 	if (f >= vm_ndomains || t >= vm_ndomains)
341415d7ccaSAdrian Chadd 		return (-1);
342415d7ccaSAdrian Chadd 	return (mem_locality[f * vm_ndomains + t]);
3436520495aSAdrian Chadd #else
3446520495aSAdrian Chadd 	return (-1);
3456520495aSAdrian Chadd #endif
346415d7ccaSAdrian Chadd }
347415d7ccaSAdrian Chadd 
348b6715dabSJeff Roberson #ifdef NUMA
349415d7ccaSAdrian Chadd /*
350415d7ccaSAdrian Chadd  * Outputs the VM locality table.
351415d7ccaSAdrian Chadd  */
352415d7ccaSAdrian Chadd static int
353415d7ccaSAdrian Chadd sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS)
354415d7ccaSAdrian Chadd {
355415d7ccaSAdrian Chadd 	struct sbuf sbuf;
356415d7ccaSAdrian Chadd 	int error, i, j;
357415d7ccaSAdrian Chadd 
358415d7ccaSAdrian Chadd 	error = sysctl_wire_old_buffer(req, 0);
359415d7ccaSAdrian Chadd 	if (error != 0)
360415d7ccaSAdrian Chadd 		return (error);
361415d7ccaSAdrian Chadd 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
362415d7ccaSAdrian Chadd 
363415d7ccaSAdrian Chadd 	sbuf_printf(&sbuf, "\n");
364415d7ccaSAdrian Chadd 
365415d7ccaSAdrian Chadd 	for (i = 0; i < vm_ndomains; i++) {
366415d7ccaSAdrian Chadd 		sbuf_printf(&sbuf, "%d: ", i);
367415d7ccaSAdrian Chadd 		for (j = 0; j < vm_ndomains; j++) {
368415d7ccaSAdrian Chadd 			sbuf_printf(&sbuf, "%d ", vm_phys_mem_affinity(i, j));
369415d7ccaSAdrian Chadd 		}
370415d7ccaSAdrian Chadd 		sbuf_printf(&sbuf, "\n");
371415d7ccaSAdrian Chadd 	}
372415d7ccaSAdrian Chadd 	error = sbuf_finish(&sbuf);
373415d7ccaSAdrian Chadd 	sbuf_delete(&sbuf);
374415d7ccaSAdrian Chadd 	return (error);
375415d7ccaSAdrian Chadd }
3766520495aSAdrian Chadd #endif
377415d7ccaSAdrian Chadd 
3787e226537SAttilio Rao static void
3797e226537SAttilio Rao vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail)
380a3870a18SJohn Baldwin {
381a3870a18SJohn Baldwin 
3827e226537SAttilio Rao 	m->order = order;
3837e226537SAttilio Rao 	if (tail)
3845cd29d0fSMark Johnston 		TAILQ_INSERT_TAIL(&fl[order].pl, m, listq);
3857e226537SAttilio Rao 	else
3865cd29d0fSMark Johnston 		TAILQ_INSERT_HEAD(&fl[order].pl, m, listq);
3877e226537SAttilio Rao 	fl[order].lcnt++;
388a3870a18SJohn Baldwin }
3897e226537SAttilio Rao 
3907e226537SAttilio Rao static void
3917e226537SAttilio Rao vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order)
3927e226537SAttilio Rao {
3937e226537SAttilio Rao 
3945cd29d0fSMark Johnston 	TAILQ_REMOVE(&fl[order].pl, m, listq);
3957e226537SAttilio Rao 	fl[order].lcnt--;
3967e226537SAttilio Rao 	m->order = VM_NFREEORDER;
397a3870a18SJohn Baldwin }
398a3870a18SJohn Baldwin 
39911752d88SAlan Cox /*
40011752d88SAlan Cox  * Create a physical memory segment.
40111752d88SAlan Cox  */
40211752d88SAlan Cox static void
403d866a563SAlan Cox _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain)
40411752d88SAlan Cox {
40511752d88SAlan Cox 	struct vm_phys_seg *seg;
40611752d88SAlan Cox 
40711752d88SAlan Cox 	KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
40811752d88SAlan Cox 	    ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
409ef435ae7SJeff Roberson 	KASSERT(domain >= 0 && domain < vm_ndomains,
4107e226537SAttilio Rao 	    ("vm_phys_create_seg: invalid domain provided"));
41111752d88SAlan Cox 	seg = &vm_phys_segs[vm_phys_nsegs++];
412271f0f12SAlan Cox 	while (seg > vm_phys_segs && (seg - 1)->start >= end) {
413271f0f12SAlan Cox 		*seg = *(seg - 1);
414271f0f12SAlan Cox 		seg--;
415271f0f12SAlan Cox 	}
41611752d88SAlan Cox 	seg->start = start;
41711752d88SAlan Cox 	seg->end = end;
418a3870a18SJohn Baldwin 	seg->domain = domain;
41911752d88SAlan Cox }
42011752d88SAlan Cox 
421a3870a18SJohn Baldwin static void
422d866a563SAlan Cox vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end)
423a3870a18SJohn Baldwin {
424b6715dabSJeff Roberson #ifdef NUMA
425a3870a18SJohn Baldwin 	int i;
426a3870a18SJohn Baldwin 
427a3870a18SJohn Baldwin 	if (mem_affinity == NULL) {
428d866a563SAlan Cox 		_vm_phys_create_seg(start, end, 0);
429a3870a18SJohn Baldwin 		return;
430a3870a18SJohn Baldwin 	}
431a3870a18SJohn Baldwin 
432a3870a18SJohn Baldwin 	for (i = 0;; i++) {
433a3870a18SJohn Baldwin 		if (mem_affinity[i].end == 0)
434a3870a18SJohn Baldwin 			panic("Reached end of affinity info");
435a3870a18SJohn Baldwin 		if (mem_affinity[i].end <= start)
436a3870a18SJohn Baldwin 			continue;
437a3870a18SJohn Baldwin 		if (mem_affinity[i].start > start)
438a3870a18SJohn Baldwin 			panic("No affinity info for start %jx",
439a3870a18SJohn Baldwin 			    (uintmax_t)start);
440a3870a18SJohn Baldwin 		if (mem_affinity[i].end >= end) {
441d866a563SAlan Cox 			_vm_phys_create_seg(start, end,
442a3870a18SJohn Baldwin 			    mem_affinity[i].domain);
443a3870a18SJohn Baldwin 			break;
444a3870a18SJohn Baldwin 		}
445d866a563SAlan Cox 		_vm_phys_create_seg(start, mem_affinity[i].end,
446a3870a18SJohn Baldwin 		    mem_affinity[i].domain);
447a3870a18SJohn Baldwin 		start = mem_affinity[i].end;
448a3870a18SJohn Baldwin 	}
44962d70a81SJohn Baldwin #else
45062d70a81SJohn Baldwin 	_vm_phys_create_seg(start, end, 0);
45162d70a81SJohn Baldwin #endif
452a3870a18SJohn Baldwin }
453a3870a18SJohn Baldwin 
45411752d88SAlan Cox /*
455271f0f12SAlan Cox  * Add a physical memory segment.
456271f0f12SAlan Cox  */
457271f0f12SAlan Cox void
458271f0f12SAlan Cox vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end)
459271f0f12SAlan Cox {
460d866a563SAlan Cox 	vm_paddr_t paddr;
461271f0f12SAlan Cox 
462271f0f12SAlan Cox 	KASSERT((start & PAGE_MASK) == 0,
463271f0f12SAlan Cox 	    ("vm_phys_define_seg: start is not page aligned"));
464271f0f12SAlan Cox 	KASSERT((end & PAGE_MASK) == 0,
465271f0f12SAlan Cox 	    ("vm_phys_define_seg: end is not page aligned"));
466d866a563SAlan Cox 
467d866a563SAlan Cox 	/*
468d866a563SAlan Cox 	 * Split the physical memory segment if it spans two or more free
469d866a563SAlan Cox 	 * list boundaries.
470d866a563SAlan Cox 	 */
471d866a563SAlan Cox 	paddr = start;
472d866a563SAlan Cox #ifdef	VM_FREELIST_LOWMEM
473d866a563SAlan Cox 	if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) {
474d866a563SAlan Cox 		vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY);
475d866a563SAlan Cox 		paddr = VM_LOWMEM_BOUNDARY;
476d866a563SAlan Cox 	}
477271f0f12SAlan Cox #endif
478d866a563SAlan Cox #ifdef	VM_FREELIST_DMA32
479d866a563SAlan Cox 	if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) {
480d866a563SAlan Cox 		vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY);
481d866a563SAlan Cox 		paddr = VM_DMA32_BOUNDARY;
482d866a563SAlan Cox 	}
483d866a563SAlan Cox #endif
484d866a563SAlan Cox 	vm_phys_create_seg(paddr, end);
485271f0f12SAlan Cox }
486271f0f12SAlan Cox 
487271f0f12SAlan Cox /*
48811752d88SAlan Cox  * Initialize the physical memory allocator.
489d866a563SAlan Cox  *
490d866a563SAlan Cox  * Requires that vm_page_array is initialized!
49111752d88SAlan Cox  */
49211752d88SAlan Cox void
49311752d88SAlan Cox vm_phys_init(void)
49411752d88SAlan Cox {
49511752d88SAlan Cox 	struct vm_freelist *fl;
49672aebdd7SAlan Cox 	struct vm_phys_seg *end_seg, *prev_seg, *seg, *tmp_seg;
49752526922SJohn Baldwin #if defined(VM_DMA32_NPAGES_THRESHOLD) || defined(VM_PHYSSEG_SPARSE)
498d866a563SAlan Cox 	u_long npages;
49952526922SJohn Baldwin #endif
500d866a563SAlan Cox 	int dom, flind, freelist, oind, pind, segind;
50111752d88SAlan Cox 
502d866a563SAlan Cox 	/*
503d866a563SAlan Cox 	 * Compute the number of free lists, and generate the mapping from the
504d866a563SAlan Cox 	 * manifest constants VM_FREELIST_* to the free list indices.
505d866a563SAlan Cox 	 *
506d866a563SAlan Cox 	 * Initially, the entries of vm_freelist_to_flind[] are set to either
507d866a563SAlan Cox 	 * 0 or 1 to indicate which free lists should be created.
508d866a563SAlan Cox 	 */
50952526922SJohn Baldwin #ifdef	VM_DMA32_NPAGES_THRESHOLD
510d866a563SAlan Cox 	npages = 0;
51152526922SJohn Baldwin #endif
512d866a563SAlan Cox 	for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
513d866a563SAlan Cox 		seg = &vm_phys_segs[segind];
514d866a563SAlan Cox #ifdef	VM_FREELIST_LOWMEM
515d866a563SAlan Cox 		if (seg->end <= VM_LOWMEM_BOUNDARY)
516d866a563SAlan Cox 			vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1;
517d866a563SAlan Cox 		else
518d866a563SAlan Cox #endif
519d866a563SAlan Cox #ifdef	VM_FREELIST_DMA32
520d866a563SAlan Cox 		if (
521d866a563SAlan Cox #ifdef	VM_DMA32_NPAGES_THRESHOLD
522d866a563SAlan Cox 		    /*
523d866a563SAlan Cox 		     * Create the DMA32 free list only if the amount of
524d866a563SAlan Cox 		     * physical memory above physical address 4G exceeds the
525d866a563SAlan Cox 		     * given threshold.
526d866a563SAlan Cox 		     */
527d866a563SAlan Cox 		    npages > VM_DMA32_NPAGES_THRESHOLD &&
528d866a563SAlan Cox #endif
529d866a563SAlan Cox 		    seg->end <= VM_DMA32_BOUNDARY)
530d866a563SAlan Cox 			vm_freelist_to_flind[VM_FREELIST_DMA32] = 1;
531d866a563SAlan Cox 		else
532d866a563SAlan Cox #endif
533d866a563SAlan Cox 		{
53452526922SJohn Baldwin #ifdef	VM_DMA32_NPAGES_THRESHOLD
535d866a563SAlan Cox 			npages += atop(seg->end - seg->start);
53652526922SJohn Baldwin #endif
537d866a563SAlan Cox 			vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1;
538d866a563SAlan Cox 		}
539d866a563SAlan Cox 	}
540d866a563SAlan Cox 	/* Change each entry into a running total of the free lists. */
541d866a563SAlan Cox 	for (freelist = 1; freelist < VM_NFREELIST; freelist++) {
542d866a563SAlan Cox 		vm_freelist_to_flind[freelist] +=
543d866a563SAlan Cox 		    vm_freelist_to_flind[freelist - 1];
544d866a563SAlan Cox 	}
545d866a563SAlan Cox 	vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1];
546d866a563SAlan Cox 	KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists"));
547d866a563SAlan Cox 	/* Change each entry into a free list index. */
548d866a563SAlan Cox 	for (freelist = 0; freelist < VM_NFREELIST; freelist++)
549d866a563SAlan Cox 		vm_freelist_to_flind[freelist]--;
550d866a563SAlan Cox 
551d866a563SAlan Cox 	/*
552d866a563SAlan Cox 	 * Initialize the first_page and free_queues fields of each physical
553d866a563SAlan Cox 	 * memory segment.
554d866a563SAlan Cox 	 */
555271f0f12SAlan Cox #ifdef VM_PHYSSEG_SPARSE
556d866a563SAlan Cox 	npages = 0;
55711752d88SAlan Cox #endif
558271f0f12SAlan Cox 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
559271f0f12SAlan Cox 		seg = &vm_phys_segs[segind];
560271f0f12SAlan Cox #ifdef VM_PHYSSEG_SPARSE
561d866a563SAlan Cox 		seg->first_page = &vm_page_array[npages];
562d866a563SAlan Cox 		npages += atop(seg->end - seg->start);
563271f0f12SAlan Cox #else
564271f0f12SAlan Cox 		seg->first_page = PHYS_TO_VM_PAGE(seg->start);
56511752d88SAlan Cox #endif
566d866a563SAlan Cox #ifdef	VM_FREELIST_LOWMEM
567d866a563SAlan Cox 		if (seg->end <= VM_LOWMEM_BOUNDARY) {
568d866a563SAlan Cox 			flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM];
569d866a563SAlan Cox 			KASSERT(flind >= 0,
570d866a563SAlan Cox 			    ("vm_phys_init: LOWMEM flind < 0"));
571d866a563SAlan Cox 		} else
572d866a563SAlan Cox #endif
573d866a563SAlan Cox #ifdef	VM_FREELIST_DMA32
574d866a563SAlan Cox 		if (seg->end <= VM_DMA32_BOUNDARY) {
575d866a563SAlan Cox 			flind = vm_freelist_to_flind[VM_FREELIST_DMA32];
576d866a563SAlan Cox 			KASSERT(flind >= 0,
577d866a563SAlan Cox 			    ("vm_phys_init: DMA32 flind < 0"));
578d866a563SAlan Cox 		} else
579d866a563SAlan Cox #endif
580d866a563SAlan Cox 		{
581d866a563SAlan Cox 			flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT];
582d866a563SAlan Cox 			KASSERT(flind >= 0,
583d866a563SAlan Cox 			    ("vm_phys_init: DEFAULT flind < 0"));
58411752d88SAlan Cox 		}
585d866a563SAlan Cox 		seg->free_queues = &vm_phys_free_queues[seg->domain][flind];
586d866a563SAlan Cox 	}
587d866a563SAlan Cox 
588d866a563SAlan Cox 	/*
58972aebdd7SAlan Cox 	 * Coalesce physical memory segments that are contiguous and share the
59072aebdd7SAlan Cox 	 * same per-domain free queues.
59172aebdd7SAlan Cox 	 */
59272aebdd7SAlan Cox 	prev_seg = vm_phys_segs;
59372aebdd7SAlan Cox 	seg = &vm_phys_segs[1];
59472aebdd7SAlan Cox 	end_seg = &vm_phys_segs[vm_phys_nsegs];
59572aebdd7SAlan Cox 	while (seg < end_seg) {
59672aebdd7SAlan Cox 		if (prev_seg->end == seg->start &&
59772aebdd7SAlan Cox 		    prev_seg->free_queues == seg->free_queues) {
59872aebdd7SAlan Cox 			prev_seg->end = seg->end;
59972aebdd7SAlan Cox 			KASSERT(prev_seg->domain == seg->domain,
60072aebdd7SAlan Cox 			    ("vm_phys_init: free queues cannot span domains"));
60172aebdd7SAlan Cox 			vm_phys_nsegs--;
60272aebdd7SAlan Cox 			end_seg--;
60372aebdd7SAlan Cox 			for (tmp_seg = seg; tmp_seg < end_seg; tmp_seg++)
60472aebdd7SAlan Cox 				*tmp_seg = *(tmp_seg + 1);
60572aebdd7SAlan Cox 		} else {
60672aebdd7SAlan Cox 			prev_seg = seg;
60772aebdd7SAlan Cox 			seg++;
60872aebdd7SAlan Cox 		}
60972aebdd7SAlan Cox 	}
61072aebdd7SAlan Cox 
61172aebdd7SAlan Cox 	/*
612d866a563SAlan Cox 	 * Initialize the free queues.
613d866a563SAlan Cox 	 */
6147e226537SAttilio Rao 	for (dom = 0; dom < vm_ndomains; dom++) {
61511752d88SAlan Cox 		for (flind = 0; flind < vm_nfreelists; flind++) {
61611752d88SAlan Cox 			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
6177e226537SAttilio Rao 				fl = vm_phys_free_queues[dom][flind][pind];
61811752d88SAlan Cox 				for (oind = 0; oind < VM_NFREEORDER; oind++)
61911752d88SAlan Cox 					TAILQ_INIT(&fl[oind].pl);
62011752d88SAlan Cox 			}
62111752d88SAlan Cox 		}
622a3870a18SJohn Baldwin 	}
623d866a563SAlan Cox 
62438d6b2dcSRoger Pau Monné 	rw_init(&vm_phys_fictitious_reg_lock, "vmfctr");
62511752d88SAlan Cox }
62611752d88SAlan Cox 
62711752d88SAlan Cox /*
628662e7fa8SMark Johnston  * Register info about the NUMA topology of the system.
629662e7fa8SMark Johnston  *
630662e7fa8SMark Johnston  * Invoked by platform-dependent code prior to vm_phys_init().
631662e7fa8SMark Johnston  */
632662e7fa8SMark Johnston void
633cb20a74cSStephen J. Kiernan vm_phys_register_domains(int ndomains __numa_used,
634cb20a74cSStephen J. Kiernan     struct mem_affinity *affinity __numa_used, int *locality __numa_used)
635662e7fa8SMark Johnston {
636662e7fa8SMark Johnston #ifdef NUMA
637c415cfc8SZhenlei Huang 	int i;
638662e7fa8SMark Johnston 
639b61f3142SMark Johnston 	/*
640b61f3142SMark Johnston 	 * For now the only override value that we support is 1, which
641b61f3142SMark Johnston 	 * effectively disables NUMA-awareness in the allocators.
642b61f3142SMark Johnston 	 */
643c415cfc8SZhenlei Huang 	TUNABLE_INT_FETCH("vm.numa.disabled", &numa_disabled);
644c415cfc8SZhenlei Huang 	if (numa_disabled)
645b61f3142SMark Johnston 		ndomains = 1;
646b61f3142SMark Johnston 
647b61f3142SMark Johnston 	if (ndomains > 1) {
648662e7fa8SMark Johnston 		vm_ndomains = ndomains;
649662e7fa8SMark Johnston 		mem_affinity = affinity;
650662e7fa8SMark Johnston 		mem_locality = locality;
651b61f3142SMark Johnston 	}
652662e7fa8SMark Johnston 
653662e7fa8SMark Johnston 	for (i = 0; i < vm_ndomains; i++)
654662e7fa8SMark Johnston 		DOMAINSET_SET(i, &all_domains);
655662e7fa8SMark Johnston #endif
656662e7fa8SMark Johnston }
657662e7fa8SMark Johnston 
658662e7fa8SMark Johnston /*
65911752d88SAlan Cox  * Split a contiguous, power of two-sized set of physical pages.
660370a338aSAlan Cox  *
661370a338aSAlan Cox  * When this function is called by a page allocation function, the caller
662370a338aSAlan Cox  * should request insertion at the head unless the order [order, oind) queues
663370a338aSAlan Cox  * are known to be empty.  The objective being to reduce the likelihood of
664370a338aSAlan Cox  * long-term fragmentation by promoting contemporaneous allocation and
665370a338aSAlan Cox  * (hopefully) deallocation.
66611752d88SAlan Cox  */
66711752d88SAlan Cox static __inline void
668370a338aSAlan Cox vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order,
669370a338aSAlan Cox     int tail)
67011752d88SAlan Cox {
67111752d88SAlan Cox 	vm_page_t m_buddy;
67211752d88SAlan Cox 
67311752d88SAlan Cox 	while (oind > order) {
67411752d88SAlan Cox 		oind--;
67511752d88SAlan Cox 		m_buddy = &m[1 << oind];
67611752d88SAlan Cox 		KASSERT(m_buddy->order == VM_NFREEORDER,
67711752d88SAlan Cox 		    ("vm_phys_split_pages: page %p has unexpected order %d",
67811752d88SAlan Cox 		    m_buddy, m_buddy->order));
679370a338aSAlan Cox 		vm_freelist_add(fl, m_buddy, oind, tail);
68011752d88SAlan Cox         }
68111752d88SAlan Cox }
68211752d88SAlan Cox 
683d7ec4a88SMark Johnston static void
684d7ec4a88SMark Johnston vm_phys_enq_chunk(struct vm_freelist *fl, vm_page_t m, int order, int tail)
685d7ec4a88SMark Johnston {
686d7ec4a88SMark Johnston 	KASSERT(order >= 0 && order < VM_NFREEORDER,
687d7ec4a88SMark Johnston 	    ("%s: invalid order %d", __func__, order));
688d7ec4a88SMark Johnston 
689d7ec4a88SMark Johnston 	vm_freelist_add(fl, m, order, tail);
690d7ec4a88SMark Johnston }
691d7ec4a88SMark Johnston 
69211752d88SAlan Cox /*
693e77f4e7fSDoug Moore  * Add the physical pages [m, m + npages) at the beginning of a power-of-two
694e77f4e7fSDoug Moore  * aligned and sized set to the specified free list.
695e77f4e7fSDoug Moore  *
696e77f4e7fSDoug Moore  * When this function is called by a page allocation function, the caller
697e77f4e7fSDoug Moore  * should request insertion at the head unless the lower-order queues are
698e77f4e7fSDoug Moore  * known to be empty.  The objective being to reduce the likelihood of long-
699e77f4e7fSDoug Moore  * term fragmentation by promoting contemporaneous allocation and (hopefully)
700e77f4e7fSDoug Moore  * deallocation.
701e77f4e7fSDoug Moore  *
702e77f4e7fSDoug Moore  * The physical page m's buddy must not be free.
703e77f4e7fSDoug Moore  */
704e77f4e7fSDoug Moore static void
705e3537f92SDoug Moore vm_phys_enq_beg(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
706e77f4e7fSDoug Moore {
707e77f4e7fSDoug Moore         int order;
708e77f4e7fSDoug Moore 
709e77f4e7fSDoug Moore 	KASSERT(npages == 0 ||
710e77f4e7fSDoug Moore 	    (VM_PAGE_TO_PHYS(m) &
711543d55d7SDoug Moore 	    ((PAGE_SIZE << ilog2(npages)) - 1)) == 0,
712e77f4e7fSDoug Moore 	    ("%s: page %p and npages %u are misaligned",
713e77f4e7fSDoug Moore 	    __func__, m, npages));
714e77f4e7fSDoug Moore         while (npages > 0) {
715e77f4e7fSDoug Moore 		KASSERT(m->order == VM_NFREEORDER,
716e77f4e7fSDoug Moore 		    ("%s: page %p has unexpected order %d",
717e77f4e7fSDoug Moore 		    __func__, m, m->order));
718543d55d7SDoug Moore 		order = ilog2(npages);
719e77f4e7fSDoug Moore 		KASSERT(order < VM_NFREEORDER,
720e77f4e7fSDoug Moore 		    ("%s: order %d is out of range", __func__, order));
721d7ec4a88SMark Johnston 		vm_phys_enq_chunk(fl, m, order, tail);
722e77f4e7fSDoug Moore 		m += 1 << order;
723e77f4e7fSDoug Moore 		npages -= 1 << order;
724e77f4e7fSDoug Moore 	}
725e77f4e7fSDoug Moore }
726e77f4e7fSDoug Moore 
727e77f4e7fSDoug Moore /*
7287493904eSAlan Cox  * Add the physical pages [m, m + npages) at the end of a power-of-two aligned
7297493904eSAlan Cox  * and sized set to the specified free list.
7307493904eSAlan Cox  *
7317493904eSAlan Cox  * When this function is called by a page allocation function, the caller
7327493904eSAlan Cox  * should request insertion at the head unless the lower-order queues are
7337493904eSAlan Cox  * known to be empty.  The objective being to reduce the likelihood of long-
7347493904eSAlan Cox  * term fragmentation by promoting contemporaneous allocation and (hopefully)
7357493904eSAlan Cox  * deallocation.
7367493904eSAlan Cox  *
737ccdb2827SDoug Moore  * If npages is zero, this function does nothing and ignores the physical page
738ccdb2827SDoug Moore  * parameter m.  Otherwise, the physical page m's buddy must not be free.
7397493904eSAlan Cox  */
740c9b06fa5SDoug Moore static vm_page_t
741e3537f92SDoug Moore vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
7427493904eSAlan Cox {
7437493904eSAlan Cox 	int order;
7447493904eSAlan Cox 
745ccdb2827SDoug Moore 	KASSERT(npages == 0 ||
746ccdb2827SDoug Moore 	    ((VM_PAGE_TO_PHYS(m) + npages * PAGE_SIZE) &
747543d55d7SDoug Moore 	    ((PAGE_SIZE << ilog2(npages)) - 1)) == 0,
7487493904eSAlan Cox 	    ("vm_phys_enq_range: page %p and npages %u are misaligned",
7497493904eSAlan Cox 	    m, npages));
750c9b06fa5SDoug Moore 	while (npages > 0) {
7517493904eSAlan Cox 		KASSERT(m->order == VM_NFREEORDER,
7527493904eSAlan Cox 		    ("vm_phys_enq_range: page %p has unexpected order %d",
7537493904eSAlan Cox 		    m, m->order));
7547493904eSAlan Cox 		order = ffs(npages) - 1;
755d7ec4a88SMark Johnston 		vm_phys_enq_chunk(fl, m, order, tail);
756c9b06fa5SDoug Moore 		m += 1 << order;
757c9b06fa5SDoug Moore 		npages -= 1 << order;
758c9b06fa5SDoug Moore 	}
759c9b06fa5SDoug Moore 	return (m);
7607493904eSAlan Cox }
7617493904eSAlan Cox 
7627493904eSAlan Cox /*
763e3537f92SDoug Moore  * Set the pool for a contiguous, power of two-sized set of physical pages.
764e3537f92SDoug Moore  */
765e3537f92SDoug Moore static void
766e3537f92SDoug Moore vm_phys_set_pool(int pool, vm_page_t m, int order)
767e3537f92SDoug Moore {
768e3537f92SDoug Moore 	vm_page_t m_tmp;
769e3537f92SDoug Moore 
770e3537f92SDoug Moore 	for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
771e3537f92SDoug Moore 		m_tmp->pool = pool;
772e3537f92SDoug Moore }
773e3537f92SDoug Moore 
774e3537f92SDoug Moore /*
77589ea39a7SAlan Cox  * Tries to allocate the specified number of pages from the specified pool
77689ea39a7SAlan Cox  * within the specified domain.  Returns the actual number of allocated pages
77789ea39a7SAlan Cox  * and a pointer to each page through the array ma[].
77889ea39a7SAlan Cox  *
77932d81f21SAlan Cox  * The returned pages may not be physically contiguous.  However, in contrast
78032d81f21SAlan Cox  * to performing multiple, back-to-back calls to vm_phys_alloc_pages(..., 0),
78132d81f21SAlan Cox  * calling this function once to allocate the desired number of pages will
782e3537f92SDoug Moore  * avoid wasted time in vm_phys_split_pages().
78389ea39a7SAlan Cox  *
78489ea39a7SAlan Cox  * The free page queues for the specified domain must be locked.
78589ea39a7SAlan Cox  */
78689ea39a7SAlan Cox int
78789ea39a7SAlan Cox vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[])
78889ea39a7SAlan Cox {
78989ea39a7SAlan Cox 	struct vm_freelist *alt, *fl;
79089ea39a7SAlan Cox 	vm_page_t m;
791c9b06fa5SDoug Moore 	int avail, end, flind, freelist, i, oind, pind;
79289ea39a7SAlan Cox 
79389ea39a7SAlan Cox 	KASSERT(domain >= 0 && domain < vm_ndomains,
79489ea39a7SAlan Cox 	    ("vm_phys_alloc_npages: domain %d is out of range", domain));
79589ea39a7SAlan Cox 	KASSERT(pool < VM_NFREEPOOL,
79689ea39a7SAlan Cox 	    ("vm_phys_alloc_npages: pool %d is out of range", pool));
79789ea39a7SAlan Cox 	KASSERT(npages <= 1 << (VM_NFREEORDER - 1),
79889ea39a7SAlan Cox 	    ("vm_phys_alloc_npages: npages %d is out of range", npages));
79989ea39a7SAlan Cox 	vm_domain_free_assert_locked(VM_DOMAIN(domain));
80089ea39a7SAlan Cox 	i = 0;
80189ea39a7SAlan Cox 	for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
80289ea39a7SAlan Cox 		flind = vm_freelist_to_flind[freelist];
80389ea39a7SAlan Cox 		if (flind < 0)
80489ea39a7SAlan Cox 			continue;
80589ea39a7SAlan Cox 		fl = vm_phys_free_queues[domain][flind][pool];
80689ea39a7SAlan Cox 		for (oind = 0; oind < VM_NFREEORDER; oind++) {
80789ea39a7SAlan Cox 			while ((m = TAILQ_FIRST(&fl[oind].pl)) != NULL) {
80889ea39a7SAlan Cox 				vm_freelist_rem(fl, m, oind);
809c9b06fa5SDoug Moore 				avail = i + (1 << oind);
810c9b06fa5SDoug Moore 				end = imin(npages, avail);
811e3537f92SDoug Moore 				while (i < end)
81289ea39a7SAlan Cox 					ma[i++] = m++;
813c9b06fa5SDoug Moore 				if (i == npages) {
8147493904eSAlan Cox 					/*
815c9b06fa5SDoug Moore 					 * Return excess pages to fl.  Its order
816c9b06fa5SDoug Moore 					 * [0, oind) queues are empty.
8177493904eSAlan Cox 					 */
818e3537f92SDoug Moore 					vm_phys_enq_range(m, avail - i, fl, 1);
81989ea39a7SAlan Cox 					return (npages);
820c9b06fa5SDoug Moore 				}
82189ea39a7SAlan Cox 			}
82289ea39a7SAlan Cox 		}
82389ea39a7SAlan Cox 		for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
82489ea39a7SAlan Cox 			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
82589ea39a7SAlan Cox 				alt = vm_phys_free_queues[domain][flind][pind];
82689ea39a7SAlan Cox 				while ((m = TAILQ_FIRST(&alt[oind].pl)) !=
82789ea39a7SAlan Cox 				    NULL) {
82889ea39a7SAlan Cox 					vm_freelist_rem(alt, m, oind);
829e3537f92SDoug Moore 					vm_phys_set_pool(pool, m, oind);
830c9b06fa5SDoug Moore 					avail = i + (1 << oind);
831c9b06fa5SDoug Moore 					end = imin(npages, avail);
832e3537f92SDoug Moore 					while (i < end)
83389ea39a7SAlan Cox 						ma[i++] = m++;
834c9b06fa5SDoug Moore 					if (i == npages) {
8357493904eSAlan Cox 						/*
8367493904eSAlan Cox 						 * Return excess pages to fl.
8377493904eSAlan Cox 						 * Its order [0, oind) queues
8387493904eSAlan Cox 						 * are empty.
8397493904eSAlan Cox 						 */
840c9b06fa5SDoug Moore 						vm_phys_enq_range(m, avail - i,
841e3537f92SDoug Moore 						    fl, 1);
84289ea39a7SAlan Cox 						return (npages);
843c9b06fa5SDoug Moore 					}
84489ea39a7SAlan Cox 				}
84589ea39a7SAlan Cox 			}
84689ea39a7SAlan Cox 		}
84789ea39a7SAlan Cox 	}
84889ea39a7SAlan Cox 	return (i);
84989ea39a7SAlan Cox }
85089ea39a7SAlan Cox 
85189ea39a7SAlan Cox /*
85211752d88SAlan Cox  * Allocate a contiguous, power of two-sized set of physical pages
853e3537f92SDoug Moore  * from the free lists.
8548941dc44SAlan Cox  *
8558941dc44SAlan Cox  * The free page queues must be locked.
85611752d88SAlan Cox  */
85711752d88SAlan Cox vm_page_t
858ef435ae7SJeff Roberson vm_phys_alloc_pages(int domain, int pool, int order)
85911752d88SAlan Cox {
86049ca10d4SJayachandran C. 	vm_page_t m;
8610db2102aSMichael Zhilin 	int freelist;
86249ca10d4SJayachandran C. 
8630db2102aSMichael Zhilin 	for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
8640db2102aSMichael Zhilin 		m = vm_phys_alloc_freelist_pages(domain, freelist, pool, order);
86549ca10d4SJayachandran C. 		if (m != NULL)
86649ca10d4SJayachandran C. 			return (m);
86749ca10d4SJayachandran C. 	}
86849ca10d4SJayachandran C. 	return (NULL);
86949ca10d4SJayachandran C. }
87049ca10d4SJayachandran C. 
87149ca10d4SJayachandran C. /*
872d866a563SAlan Cox  * Allocate a contiguous, power of two-sized set of physical pages from the
873d866a563SAlan Cox  * specified free list.  The free list must be specified using one of the
874e3537f92SDoug Moore  * manifest constants VM_FREELIST_*.
875d866a563SAlan Cox  *
876d866a563SAlan Cox  * The free page queues must be locked.
87749ca10d4SJayachandran C.  */
87849ca10d4SJayachandran C. vm_page_t
8790db2102aSMichael Zhilin vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order)
88049ca10d4SJayachandran C. {
881ef435ae7SJeff Roberson 	struct vm_freelist *alt, *fl;
88211752d88SAlan Cox 	vm_page_t m;
8830db2102aSMichael Zhilin 	int oind, pind, flind;
88411752d88SAlan Cox 
885ef435ae7SJeff Roberson 	KASSERT(domain >= 0 && domain < vm_ndomains,
886ef435ae7SJeff Roberson 	    ("vm_phys_alloc_freelist_pages: domain %d is out of range",
887ef435ae7SJeff Roberson 	    domain));
8880db2102aSMichael Zhilin 	KASSERT(freelist < VM_NFREELIST,
889d866a563SAlan Cox 	    ("vm_phys_alloc_freelist_pages: freelist %d is out of range",
8905be93778SAndrew Turner 	    freelist));
89111752d88SAlan Cox 	KASSERT(pool < VM_NFREEPOOL,
89249ca10d4SJayachandran C. 	    ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
89311752d88SAlan Cox 	KASSERT(order < VM_NFREEORDER,
89449ca10d4SJayachandran C. 	    ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
8956520495aSAdrian Chadd 
8960db2102aSMichael Zhilin 	flind = vm_freelist_to_flind[freelist];
8970db2102aSMichael Zhilin 	/* Check if freelist is present */
8980db2102aSMichael Zhilin 	if (flind < 0)
8990db2102aSMichael Zhilin 		return (NULL);
9000db2102aSMichael Zhilin 
901e2068d0bSJeff Roberson 	vm_domain_free_assert_locked(VM_DOMAIN(domain));
9027e226537SAttilio Rao 	fl = &vm_phys_free_queues[domain][flind][pool][0];
90311752d88SAlan Cox 	for (oind = order; oind < VM_NFREEORDER; oind++) {
90411752d88SAlan Cox 		m = TAILQ_FIRST(&fl[oind].pl);
90511752d88SAlan Cox 		if (m != NULL) {
9067e226537SAttilio Rao 			vm_freelist_rem(fl, m, oind);
907370a338aSAlan Cox 			/* The order [order, oind) queues are empty. */
908370a338aSAlan Cox 			vm_phys_split_pages(m, oind, fl, order, 1);
90911752d88SAlan Cox 			return (m);
91011752d88SAlan Cox 		}
91111752d88SAlan Cox 	}
91211752d88SAlan Cox 
91311752d88SAlan Cox 	/*
91411752d88SAlan Cox 	 * The given pool was empty.  Find the largest
91511752d88SAlan Cox 	 * contiguous, power-of-two-sized set of pages in any
91611752d88SAlan Cox 	 * pool.  Transfer these pages to the given pool, and
91711752d88SAlan Cox 	 * use them to satisfy the allocation.
91811752d88SAlan Cox 	 */
91911752d88SAlan Cox 	for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
92011752d88SAlan Cox 		for (pind = 0; pind < VM_NFREEPOOL; pind++) {
9217e226537SAttilio Rao 			alt = &vm_phys_free_queues[domain][flind][pind][0];
92211752d88SAlan Cox 			m = TAILQ_FIRST(&alt[oind].pl);
92311752d88SAlan Cox 			if (m != NULL) {
9247e226537SAttilio Rao 				vm_freelist_rem(alt, m, oind);
925e3537f92SDoug Moore 				vm_phys_set_pool(pool, m, oind);
926370a338aSAlan Cox 				/* The order [order, oind) queues are empty. */
927370a338aSAlan Cox 				vm_phys_split_pages(m, oind, fl, order, 1);
92811752d88SAlan Cox 				return (m);
92911752d88SAlan Cox 			}
93011752d88SAlan Cox 		}
93111752d88SAlan Cox 	}
93211752d88SAlan Cox 	return (NULL);
93311752d88SAlan Cox }
93411752d88SAlan Cox 
93511752d88SAlan Cox /*
936*69cbb187SMark Johnston  * Find the vm_page corresponding to the given physical address, which must lie
937*69cbb187SMark Johnston  * within the given physical memory segment.
938*69cbb187SMark Johnston  */
939*69cbb187SMark Johnston vm_page_t
940*69cbb187SMark Johnston vm_phys_seg_paddr_to_vm_page(struct vm_phys_seg *seg, vm_paddr_t pa)
941*69cbb187SMark Johnston {
942*69cbb187SMark Johnston 	KASSERT(pa >= seg->start && pa < seg->end,
943*69cbb187SMark Johnston 	    ("%s: pa %#jx is out of range", __func__, (uintmax_t)pa));
944*69cbb187SMark Johnston 
945*69cbb187SMark Johnston 	return (&seg->first_page[atop(pa - seg->start)]);
946*69cbb187SMark Johnston }
947*69cbb187SMark Johnston 
948*69cbb187SMark Johnston /*
94911752d88SAlan Cox  * Find the vm_page corresponding to the given physical address.
95011752d88SAlan Cox  */
95111752d88SAlan Cox vm_page_t
95211752d88SAlan Cox vm_phys_paddr_to_vm_page(vm_paddr_t pa)
95311752d88SAlan Cox {
95411752d88SAlan Cox 	struct vm_phys_seg *seg;
95511752d88SAlan Cox 
9569e817428SDoug Moore 	if ((seg = vm_phys_paddr_to_seg(pa)) != NULL)
957*69cbb187SMark Johnston 		return (vm_phys_seg_paddr_to_vm_page(seg, pa));
958f06a3a36SAndrew Thompson 	return (NULL);
95911752d88SAlan Cox }
96011752d88SAlan Cox 
961b6de32bdSKonstantin Belousov vm_page_t
962b6de32bdSKonstantin Belousov vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
963b6de32bdSKonstantin Belousov {
96438d6b2dcSRoger Pau Monné 	struct vm_phys_fictitious_seg tmp, *seg;
965b6de32bdSKonstantin Belousov 	vm_page_t m;
966b6de32bdSKonstantin Belousov 
967b6de32bdSKonstantin Belousov 	m = NULL;
96838d6b2dcSRoger Pau Monné 	tmp.start = pa;
96938d6b2dcSRoger Pau Monné 	tmp.end = 0;
97038d6b2dcSRoger Pau Monné 
97138d6b2dcSRoger Pau Monné 	rw_rlock(&vm_phys_fictitious_reg_lock);
97238d6b2dcSRoger Pau Monné 	seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
97338d6b2dcSRoger Pau Monné 	rw_runlock(&vm_phys_fictitious_reg_lock);
97438d6b2dcSRoger Pau Monné 	if (seg == NULL)
97538d6b2dcSRoger Pau Monné 		return (NULL);
97638d6b2dcSRoger Pau Monné 
977b6de32bdSKonstantin Belousov 	m = &seg->first_page[atop(pa - seg->start)];
97838d6b2dcSRoger Pau Monné 	KASSERT((m->flags & PG_FICTITIOUS) != 0, ("%p not fictitious", m));
97938d6b2dcSRoger Pau Monné 
980b6de32bdSKonstantin Belousov 	return (m);
981b6de32bdSKonstantin Belousov }
982b6de32bdSKonstantin Belousov 
9835ebe728dSRoger Pau Monné static inline void
9845ebe728dSRoger Pau Monné vm_phys_fictitious_init_range(vm_page_t range, vm_paddr_t start,
9855ebe728dSRoger Pau Monné     long page_count, vm_memattr_t memattr)
9865ebe728dSRoger Pau Monné {
9875ebe728dSRoger Pau Monné 	long i;
9885ebe728dSRoger Pau Monné 
989f93f7cf1SMark Johnston 	bzero(range, page_count * sizeof(*range));
9905ebe728dSRoger Pau Monné 	for (i = 0; i < page_count; i++) {
9915ebe728dSRoger Pau Monné 		vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr);
9925ebe728dSRoger Pau Monné 		range[i].oflags &= ~VPO_UNMANAGED;
9935ebe728dSRoger Pau Monné 		range[i].busy_lock = VPB_UNBUSIED;
9945ebe728dSRoger Pau Monné 	}
9955ebe728dSRoger Pau Monné }
9965ebe728dSRoger Pau Monné 
997b6de32bdSKonstantin Belousov int
998b6de32bdSKonstantin Belousov vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
999b6de32bdSKonstantin Belousov     vm_memattr_t memattr)
1000b6de32bdSKonstantin Belousov {
1001b6de32bdSKonstantin Belousov 	struct vm_phys_fictitious_seg *seg;
1002b6de32bdSKonstantin Belousov 	vm_page_t fp;
10035ebe728dSRoger Pau Monné 	long page_count;
1004b6de32bdSKonstantin Belousov #ifdef VM_PHYSSEG_DENSE
10055ebe728dSRoger Pau Monné 	long pi, pe;
10065ebe728dSRoger Pau Monné 	long dpage_count;
1007b6de32bdSKonstantin Belousov #endif
1008b6de32bdSKonstantin Belousov 
10095ebe728dSRoger Pau Monné 	KASSERT(start < end,
10105ebe728dSRoger Pau Monné 	    ("Start of segment isn't less than end (start: %jx end: %jx)",
10115ebe728dSRoger Pau Monné 	    (uintmax_t)start, (uintmax_t)end));
10125ebe728dSRoger Pau Monné 
1013b6de32bdSKonstantin Belousov 	page_count = (end - start) / PAGE_SIZE;
1014b6de32bdSKonstantin Belousov 
1015b6de32bdSKonstantin Belousov #ifdef VM_PHYSSEG_DENSE
1016b6de32bdSKonstantin Belousov 	pi = atop(start);
10175ebe728dSRoger Pau Monné 	pe = atop(end);
10185ebe728dSRoger Pau Monné 	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
1019b6de32bdSKonstantin Belousov 		fp = &vm_page_array[pi - first_page];
10205ebe728dSRoger Pau Monné 		if ((pe - first_page) > vm_page_array_size) {
10215ebe728dSRoger Pau Monné 			/*
10225ebe728dSRoger Pau Monné 			 * We have a segment that starts inside
10235ebe728dSRoger Pau Monné 			 * of vm_page_array, but ends outside of it.
10245ebe728dSRoger Pau Monné 			 *
10255ebe728dSRoger Pau Monné 			 * Use vm_page_array pages for those that are
10265ebe728dSRoger Pau Monné 			 * inside of the vm_page_array range, and
10275ebe728dSRoger Pau Monné 			 * allocate the remaining ones.
10285ebe728dSRoger Pau Monné 			 */
10295ebe728dSRoger Pau Monné 			dpage_count = vm_page_array_size - (pi - first_page);
10305ebe728dSRoger Pau Monné 			vm_phys_fictitious_init_range(fp, start, dpage_count,
10315ebe728dSRoger Pau Monné 			    memattr);
10325ebe728dSRoger Pau Monné 			page_count -= dpage_count;
10335ebe728dSRoger Pau Monné 			start += ptoa(dpage_count);
10345ebe728dSRoger Pau Monné 			goto alloc;
10355ebe728dSRoger Pau Monné 		}
10365ebe728dSRoger Pau Monné 		/*
10375ebe728dSRoger Pau Monné 		 * We can allocate the full range from vm_page_array,
10385ebe728dSRoger Pau Monné 		 * so there's no need to register the range in the tree.
10395ebe728dSRoger Pau Monné 		 */
10405ebe728dSRoger Pau Monné 		vm_phys_fictitious_init_range(fp, start, page_count, memattr);
10415ebe728dSRoger Pau Monné 		return (0);
10425ebe728dSRoger Pau Monné 	} else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
10435ebe728dSRoger Pau Monné 		/*
10445ebe728dSRoger Pau Monné 		 * We have a segment that ends inside of vm_page_array,
10455ebe728dSRoger Pau Monné 		 * but starts outside of it.
10465ebe728dSRoger Pau Monné 		 */
10475ebe728dSRoger Pau Monné 		fp = &vm_page_array[0];
10485ebe728dSRoger Pau Monné 		dpage_count = pe - first_page;
10495ebe728dSRoger Pau Monné 		vm_phys_fictitious_init_range(fp, ptoa(first_page), dpage_count,
10505ebe728dSRoger Pau Monné 		    memattr);
10515ebe728dSRoger Pau Monné 		end -= ptoa(dpage_count);
10525ebe728dSRoger Pau Monné 		page_count -= dpage_count;
10535ebe728dSRoger Pau Monné 		goto alloc;
10545ebe728dSRoger Pau Monné 	} else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
10555ebe728dSRoger Pau Monné 		/*
10565ebe728dSRoger Pau Monné 		 * Trying to register a fictitious range that expands before
10575ebe728dSRoger Pau Monné 		 * and after vm_page_array.
10585ebe728dSRoger Pau Monné 		 */
10595ebe728dSRoger Pau Monné 		return (EINVAL);
10605ebe728dSRoger Pau Monné 	} else {
10615ebe728dSRoger Pau Monné alloc:
1062b6de32bdSKonstantin Belousov #endif
1063b6de32bdSKonstantin Belousov 		fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
1064f93f7cf1SMark Johnston 		    M_WAITOK);
10655ebe728dSRoger Pau Monné #ifdef VM_PHYSSEG_DENSE
1066b6de32bdSKonstantin Belousov 	}
10675ebe728dSRoger Pau Monné #endif
10685ebe728dSRoger Pau Monné 	vm_phys_fictitious_init_range(fp, start, page_count, memattr);
106938d6b2dcSRoger Pau Monné 
107038d6b2dcSRoger Pau Monné 	seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO);
1071b6de32bdSKonstantin Belousov 	seg->start = start;
1072b6de32bdSKonstantin Belousov 	seg->end = end;
1073b6de32bdSKonstantin Belousov 	seg->first_page = fp;
107438d6b2dcSRoger Pau Monné 
107538d6b2dcSRoger Pau Monné 	rw_wlock(&vm_phys_fictitious_reg_lock);
107638d6b2dcSRoger Pau Monné 	RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg);
107738d6b2dcSRoger Pau Monné 	rw_wunlock(&vm_phys_fictitious_reg_lock);
107838d6b2dcSRoger Pau Monné 
1079b6de32bdSKonstantin Belousov 	return (0);
1080b6de32bdSKonstantin Belousov }
1081b6de32bdSKonstantin Belousov 
1082b6de32bdSKonstantin Belousov void
1083b6de32bdSKonstantin Belousov vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
1084b6de32bdSKonstantin Belousov {
108538d6b2dcSRoger Pau Monné 	struct vm_phys_fictitious_seg *seg, tmp;
1086b6de32bdSKonstantin Belousov #ifdef VM_PHYSSEG_DENSE
10875ebe728dSRoger Pau Monné 	long pi, pe;
1088b6de32bdSKonstantin Belousov #endif
1089b6de32bdSKonstantin Belousov 
10905ebe728dSRoger Pau Monné 	KASSERT(start < end,
10915ebe728dSRoger Pau Monné 	    ("Start of segment isn't less than end (start: %jx end: %jx)",
10925ebe728dSRoger Pau Monné 	    (uintmax_t)start, (uintmax_t)end));
10935ebe728dSRoger Pau Monné 
1094b6de32bdSKonstantin Belousov #ifdef VM_PHYSSEG_DENSE
1095b6de32bdSKonstantin Belousov 	pi = atop(start);
10965ebe728dSRoger Pau Monné 	pe = atop(end);
10975ebe728dSRoger Pau Monné 	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
10985ebe728dSRoger Pau Monné 		if ((pe - first_page) <= vm_page_array_size) {
10995ebe728dSRoger Pau Monné 			/*
11005ebe728dSRoger Pau Monné 			 * This segment was allocated using vm_page_array
11015ebe728dSRoger Pau Monné 			 * only, there's nothing to do since those pages
11025ebe728dSRoger Pau Monné 			 * were never added to the tree.
11035ebe728dSRoger Pau Monné 			 */
11045ebe728dSRoger Pau Monné 			return;
11055ebe728dSRoger Pau Monné 		}
11065ebe728dSRoger Pau Monné 		/*
11075ebe728dSRoger Pau Monné 		 * We have a segment that starts inside
11085ebe728dSRoger Pau Monné 		 * of vm_page_array, but ends outside of it.
11095ebe728dSRoger Pau Monné 		 *
11105ebe728dSRoger Pau Monné 		 * Calculate how many pages were added to the
11115ebe728dSRoger Pau Monné 		 * tree and free them.
11125ebe728dSRoger Pau Monné 		 */
11135ebe728dSRoger Pau Monné 		start = ptoa(first_page + vm_page_array_size);
11145ebe728dSRoger Pau Monné 	} else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
11155ebe728dSRoger Pau Monné 		/*
11165ebe728dSRoger Pau Monné 		 * We have a segment that ends inside of vm_page_array,
11175ebe728dSRoger Pau Monné 		 * but starts outside of it.
11185ebe728dSRoger Pau Monné 		 */
11195ebe728dSRoger Pau Monné 		end = ptoa(first_page);
11205ebe728dSRoger Pau Monné 	} else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
11215ebe728dSRoger Pau Monné 		/* Since it's not possible to register such a range, panic. */
11225ebe728dSRoger Pau Monné 		panic(
11235ebe728dSRoger Pau Monné 		    "Unregistering not registered fictitious range [%#jx:%#jx]",
11245ebe728dSRoger Pau Monné 		    (uintmax_t)start, (uintmax_t)end);
11255ebe728dSRoger Pau Monné 	}
1126b6de32bdSKonstantin Belousov #endif
112738d6b2dcSRoger Pau Monné 	tmp.start = start;
112838d6b2dcSRoger Pau Monné 	tmp.end = 0;
1129b6de32bdSKonstantin Belousov 
113038d6b2dcSRoger Pau Monné 	rw_wlock(&vm_phys_fictitious_reg_lock);
113138d6b2dcSRoger Pau Monné 	seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
113238d6b2dcSRoger Pau Monné 	if (seg->start != start || seg->end != end) {
113338d6b2dcSRoger Pau Monné 		rw_wunlock(&vm_phys_fictitious_reg_lock);
113438d6b2dcSRoger Pau Monné 		panic(
113538d6b2dcSRoger Pau Monné 		    "Unregistering not registered fictitious range [%#jx:%#jx]",
113638d6b2dcSRoger Pau Monné 		    (uintmax_t)start, (uintmax_t)end);
113738d6b2dcSRoger Pau Monné 	}
113838d6b2dcSRoger Pau Monné 	RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg);
113938d6b2dcSRoger Pau Monné 	rw_wunlock(&vm_phys_fictitious_reg_lock);
114038d6b2dcSRoger Pau Monné 	free(seg->first_page, M_FICT_PAGES);
114138d6b2dcSRoger Pau Monné 	free(seg, M_FICT_PAGES);
1142b6de32bdSKonstantin Belousov }
1143b6de32bdSKonstantin Belousov 
114411752d88SAlan Cox /*
1145e3537f92SDoug Moore  * Free a contiguous, power of two-sized set of physical pages.
11468941dc44SAlan Cox  *
11478941dc44SAlan Cox  * The free page queues must be locked.
114811752d88SAlan Cox  */
114911752d88SAlan Cox void
115011752d88SAlan Cox vm_phys_free_pages(vm_page_t m, int order)
115111752d88SAlan Cox {
115211752d88SAlan Cox 	struct vm_freelist *fl;
115311752d88SAlan Cox 	struct vm_phys_seg *seg;
11545c1f2cc4SAlan Cox 	vm_paddr_t pa;
115511752d88SAlan Cox 	vm_page_t m_buddy;
115611752d88SAlan Cox 
115711752d88SAlan Cox 	KASSERT(m->order == VM_NFREEORDER,
11583921068fSJeff Roberson 	    ("vm_phys_free_pages: page %p has unexpected order %d",
11593921068fSJeff Roberson 	    m, m->order));
1160e3537f92SDoug Moore 	KASSERT(m->pool < VM_NFREEPOOL,
1161e3537f92SDoug Moore 	    ("vm_phys_free_pages: page %p has unexpected pool %d",
1162e3537f92SDoug Moore 	    m, m->pool));
116311752d88SAlan Cox 	KASSERT(order < VM_NFREEORDER,
11648941dc44SAlan Cox 	    ("vm_phys_free_pages: order %d is out of range", order));
116511752d88SAlan Cox 	seg = &vm_phys_segs[m->segind];
1166e2068d0bSJeff Roberson 	vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
11675c1f2cc4SAlan Cox 	if (order < VM_NFREEORDER - 1) {
11685c1f2cc4SAlan Cox 		pa = VM_PAGE_TO_PHYS(m);
11695c1f2cc4SAlan Cox 		do {
11705c1f2cc4SAlan Cox 			pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
11715c1f2cc4SAlan Cox 			if (pa < seg->start || pa >= seg->end)
117211752d88SAlan Cox 				break;
1173*69cbb187SMark Johnston 			m_buddy = vm_phys_seg_paddr_to_vm_page(seg, pa);
117411752d88SAlan Cox 			if (m_buddy->order != order)
117511752d88SAlan Cox 				break;
117611752d88SAlan Cox 			fl = (*seg->free_queues)[m_buddy->pool];
11777e226537SAttilio Rao 			vm_freelist_rem(fl, m_buddy, order);
1178e3537f92SDoug Moore 			if (m_buddy->pool != m->pool)
1179e3537f92SDoug Moore 				vm_phys_set_pool(m->pool, m_buddy, order);
118011752d88SAlan Cox 			order++;
11815c1f2cc4SAlan Cox 			pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
1182*69cbb187SMark Johnston 			m = vm_phys_seg_paddr_to_vm_page(seg, pa);
11835c1f2cc4SAlan Cox 		} while (order < VM_NFREEORDER - 1);
118411752d88SAlan Cox 	}
1185e3537f92SDoug Moore 	fl = (*seg->free_queues)[m->pool];
11867e226537SAttilio Rao 	vm_freelist_add(fl, m, order, 1);
118711752d88SAlan Cox }
118811752d88SAlan Cox 
118911752d88SAlan Cox /*
1190e3537f92SDoug Moore  * Free a contiguous, arbitrarily sized set of physical pages, without
1191e3537f92SDoug Moore  * merging across set boundaries.
1192b8590daeSDoug Moore  *
1193b8590daeSDoug Moore  * The free page queues must be locked.
1194b8590daeSDoug Moore  */
1195b8590daeSDoug Moore void
1196e3537f92SDoug Moore vm_phys_enqueue_contig(vm_page_t m, u_long npages)
1197b8590daeSDoug Moore {
1198b8590daeSDoug Moore 	struct vm_freelist *fl;
1199b8590daeSDoug Moore 	struct vm_phys_seg *seg;
1200b8590daeSDoug Moore 	vm_page_t m_end;
1201c9b06fa5SDoug Moore 	vm_paddr_t diff, lo;
1202b8590daeSDoug Moore 	int order;
1203b8590daeSDoug Moore 
1204b8590daeSDoug Moore 	/*
1205b8590daeSDoug Moore 	 * Avoid unnecessary coalescing by freeing the pages in the largest
1206b8590daeSDoug Moore 	 * possible power-of-two-sized subsets.
1207b8590daeSDoug Moore 	 */
1208b8590daeSDoug Moore 	vm_domain_free_assert_locked(vm_pagequeue_domain(m));
1209b8590daeSDoug Moore 	seg = &vm_phys_segs[m->segind];
1210e3537f92SDoug Moore 	fl = (*seg->free_queues)[m->pool];
1211b8590daeSDoug Moore 	m_end = m + npages;
1212b8590daeSDoug Moore 	/* Free blocks of increasing size. */
12136dd15b7aSDoug Moore 	lo = atop(VM_PAGE_TO_PHYS(m));
1214c9b06fa5SDoug Moore 	if (m < m_end &&
1215c9b06fa5SDoug Moore 	    (diff = lo ^ (lo + npages - 1)) != 0) {
1216543d55d7SDoug Moore 		order = min(ilog2(diff), VM_NFREEORDER - 1);
1217e3537f92SDoug Moore 		m = vm_phys_enq_range(m, roundup2(lo, 1 << order) - lo, fl, 1);
12185c1f2cc4SAlan Cox 	}
1219c9b06fa5SDoug Moore 
1220b8590daeSDoug Moore 	/* Free blocks of maximum size. */
1221c9b06fa5SDoug Moore 	order = VM_NFREEORDER - 1;
1222b8590daeSDoug Moore 	while (m + (1 << order) <= m_end) {
1223b8590daeSDoug Moore 		KASSERT(seg == &vm_phys_segs[m->segind],
1224b8590daeSDoug Moore 		    ("%s: page range [%p,%p) spans multiple segments",
1225b8590daeSDoug Moore 		    __func__, m_end - npages, m));
1226d7ec4a88SMark Johnston 		vm_phys_enq_chunk(fl, m, order, 1);
1227b8590daeSDoug Moore 		m += 1 << order;
1228b8590daeSDoug Moore 	}
1229b8590daeSDoug Moore 	/* Free blocks of diminishing size. */
1230e3537f92SDoug Moore 	vm_phys_enq_beg(m, m_end - m, fl, 1);
1231b8590daeSDoug Moore }
1232b8590daeSDoug Moore 
1233b8590daeSDoug Moore /*
1234b8590daeSDoug Moore  * Free a contiguous, arbitrarily sized set of physical pages.
1235b8590daeSDoug Moore  *
1236b8590daeSDoug Moore  * The free page queues must be locked.
1237b8590daeSDoug Moore  */
1238b8590daeSDoug Moore void
1239b8590daeSDoug Moore vm_phys_free_contig(vm_page_t m, u_long npages)
1240b8590daeSDoug Moore {
12416dd15b7aSDoug Moore 	vm_paddr_t lo;
1242b8590daeSDoug Moore 	vm_page_t m_start, m_end;
12436dd15b7aSDoug Moore 	unsigned max_order, order_start, order_end;
1244b8590daeSDoug Moore 
1245b8590daeSDoug Moore 	vm_domain_free_assert_locked(vm_pagequeue_domain(m));
1246b8590daeSDoug Moore 
12476dd15b7aSDoug Moore 	lo = atop(VM_PAGE_TO_PHYS(m));
1248543d55d7SDoug Moore 	max_order = min(ilog2(lo ^ (lo + npages)), VM_NFREEORDER - 1);
1249e3537f92SDoug Moore 
1250e3537f92SDoug Moore 	m_start = m;
12516dd15b7aSDoug Moore 	order_start = ffsll(lo) - 1;
12526dd15b7aSDoug Moore 	if (order_start < max_order)
1253b8590daeSDoug Moore 		m_start += 1 << order_start;
1254e3537f92SDoug Moore 	m_end = m + npages;
12556dd15b7aSDoug Moore 	order_end = ffsll(lo + npages) - 1;
12566dd15b7aSDoug Moore 	if (order_end < max_order)
1257b8590daeSDoug Moore 		m_end -= 1 << order_end;
1258b8590daeSDoug Moore 	/*
1259b8590daeSDoug Moore 	 * Avoid unnecessary coalescing by freeing the pages at the start and
1260b8590daeSDoug Moore 	 * end of the range last.
1261b8590daeSDoug Moore 	 */
1262b8590daeSDoug Moore 	if (m_start < m_end)
1263e3537f92SDoug Moore 		vm_phys_enqueue_contig(m_start, m_end - m_start);
1264e3537f92SDoug Moore 	if (order_start < max_order)
1265b8590daeSDoug Moore 		vm_phys_free_pages(m, order_start);
1266e3537f92SDoug Moore 	if (order_end < max_order)
1267b8590daeSDoug Moore 		vm_phys_free_pages(m_end, order_end);
12685c1f2cc4SAlan Cox }
12695c1f2cc4SAlan Cox 
12705c1f2cc4SAlan Cox /*
12719e817428SDoug Moore  * Identify the first address range within segment segind or greater
12729e817428SDoug Moore  * that matches the domain, lies within the low/high range, and has
12739e817428SDoug Moore  * enough pages.  Return -1 if there is none.
1274c869e672SAlan Cox  */
12759e817428SDoug Moore int
12769e817428SDoug Moore vm_phys_find_range(vm_page_t bounds[], int segind, int domain,
12779e817428SDoug Moore     u_long npages, vm_paddr_t low, vm_paddr_t high)
1278c869e672SAlan Cox {
12799e817428SDoug Moore 	vm_paddr_t pa_end, pa_start;
12809e817428SDoug Moore 	struct vm_phys_seg *end_seg, *seg;
1281c869e672SAlan Cox 
12829e817428SDoug Moore 	KASSERT(npages > 0, ("npages is zero"));
128358d42717SAlan Cox 	KASSERT(domain >= 0 && domain < vm_ndomains, ("domain out of range"));
12849e817428SDoug Moore 	end_seg = &vm_phys_segs[vm_phys_nsegs];
12859e817428SDoug Moore 	for (seg = &vm_phys_segs[segind]; seg < end_seg; seg++) {
12863f289c3fSJeff Roberson 		if (seg->domain != domain)
12873f289c3fSJeff Roberson 			continue;
1288c869e672SAlan Cox 		if (seg->start >= high)
12899e817428SDoug Moore 			return (-1);
12909e817428SDoug Moore 		pa_start = MAX(low, seg->start);
12919e817428SDoug Moore 		pa_end = MIN(high, seg->end);
12929e817428SDoug Moore 		if (pa_end - pa_start < ptoa(npages))
1293c869e672SAlan Cox 			continue;
1294*69cbb187SMark Johnston 		bounds[0] = vm_phys_seg_paddr_to_vm_page(seg, pa_start);
1295*69cbb187SMark Johnston 		bounds[1] = vm_phys_seg_paddr_to_vm_page(seg, pa_end);
12969e817428SDoug Moore 		return (seg - vm_phys_segs);
1297c869e672SAlan Cox 	}
12989e817428SDoug Moore 	return (-1);
1299c869e672SAlan Cox }
1300c869e672SAlan Cox 
1301c869e672SAlan Cox /*
13029742373aSAlan Cox  * Search for the given physical page "m" in the free lists.  If the search
13036062d9faSMark Johnston  * succeeds, remove "m" from the free lists and return true.  Otherwise, return
13046062d9faSMark Johnston  * false, indicating that "m" is not in the free lists.
13057bfda801SAlan Cox  *
13067bfda801SAlan Cox  * The free page queues must be locked.
13077bfda801SAlan Cox  */
13086062d9faSMark Johnston bool
13097bfda801SAlan Cox vm_phys_unfree_page(vm_page_t m)
13107bfda801SAlan Cox {
13117bfda801SAlan Cox 	struct vm_freelist *fl;
13127bfda801SAlan Cox 	struct vm_phys_seg *seg;
13137bfda801SAlan Cox 	vm_paddr_t pa, pa_half;
13147bfda801SAlan Cox 	vm_page_t m_set, m_tmp;
1315e3537f92SDoug Moore 	int order;
13167bfda801SAlan Cox 
13177bfda801SAlan Cox 	/*
13187bfda801SAlan Cox 	 * First, find the contiguous, power of two-sized set of free
13197bfda801SAlan Cox 	 * physical pages containing the given physical page "m" and
13207bfda801SAlan Cox 	 * assign it to "m_set".
13217bfda801SAlan Cox 	 */
13227bfda801SAlan Cox 	seg = &vm_phys_segs[m->segind];
1323e2068d0bSJeff Roberson 	vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
13247bfda801SAlan Cox 	for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
1325bc8794a1SAlan Cox 	    order < VM_NFREEORDER - 1; ) {
13267bfda801SAlan Cox 		order++;
13277bfda801SAlan Cox 		pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
13282fbced65SAlan Cox 		if (pa >= seg->start)
1329*69cbb187SMark Johnston 			m_set = vm_phys_seg_paddr_to_vm_page(seg, pa);
1330e35395ceSAlan Cox 		else
13316062d9faSMark Johnston 			return (false);
13327bfda801SAlan Cox 	}
1333e35395ceSAlan Cox 	if (m_set->order < order)
13346062d9faSMark Johnston 		return (false);
1335e35395ceSAlan Cox 	if (m_set->order == VM_NFREEORDER)
13366062d9faSMark Johnston 		return (false);
13377bfda801SAlan Cox 	KASSERT(m_set->order < VM_NFREEORDER,
13387bfda801SAlan Cox 	    ("vm_phys_unfree_page: page %p has unexpected order %d",
13397bfda801SAlan Cox 	    m_set, m_set->order));
13407bfda801SAlan Cox 
13417bfda801SAlan Cox 	/*
13427bfda801SAlan Cox 	 * Next, remove "m_set" from the free lists.  Finally, extract
13437bfda801SAlan Cox 	 * "m" from "m_set" using an iterative algorithm: While "m_set"
13447bfda801SAlan Cox 	 * is larger than a page, shrink "m_set" by returning the half
13457bfda801SAlan Cox 	 * of "m_set" that does not contain "m" to the free lists.
13467bfda801SAlan Cox 	 */
1347e3537f92SDoug Moore 	fl = (*seg->free_queues)[m_set->pool];
13487bfda801SAlan Cox 	order = m_set->order;
13497e226537SAttilio Rao 	vm_freelist_rem(fl, m_set, order);
13507bfda801SAlan Cox 	while (order > 0) {
13517bfda801SAlan Cox 		order--;
13527bfda801SAlan Cox 		pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
13537bfda801SAlan Cox 		if (m->phys_addr < pa_half)
1354*69cbb187SMark Johnston 			m_tmp = vm_phys_seg_paddr_to_vm_page(seg, pa_half);
13557bfda801SAlan Cox 		else {
13567bfda801SAlan Cox 			m_tmp = m_set;
1357*69cbb187SMark Johnston 			m_set = vm_phys_seg_paddr_to_vm_page(seg, pa_half);
13587bfda801SAlan Cox 		}
13597e226537SAttilio Rao 		vm_freelist_add(fl, m_tmp, order, 0);
13607bfda801SAlan Cox 	}
13617bfda801SAlan Cox 	KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
13626062d9faSMark Johnston 	return (true);
13637bfda801SAlan Cox }
13647bfda801SAlan Cox 
13657bfda801SAlan Cox /*
13662a4897bdSDoug Moore  * Find a run of contiguous physical pages, meeting alignment requirements, from
13672a4897bdSDoug Moore  * a list of max-sized page blocks, where we need at least two consecutive
13682a4897bdSDoug Moore  * blocks to satisfy the (large) page request.
1369fa8a6585SDoug Moore  */
1370fa8a6585SDoug Moore static vm_page_t
13712a4897bdSDoug Moore vm_phys_find_freelist_contig(struct vm_freelist *fl, u_long npages,
1372fa8a6585SDoug Moore     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1373fa8a6585SDoug Moore {
1374fa8a6585SDoug Moore 	struct vm_phys_seg *seg;
13752a4897bdSDoug Moore 	vm_page_t m, m_iter, m_ret;
13762a4897bdSDoug Moore 	vm_paddr_t max_size, size;
13772a4897bdSDoug Moore 	int max_order;
1378fa8a6585SDoug Moore 
13792a4897bdSDoug Moore 	max_order = VM_NFREEORDER - 1;
1380fa8a6585SDoug Moore 	size = npages << PAGE_SHIFT;
13812a4897bdSDoug Moore 	max_size = (vm_paddr_t)1 << (PAGE_SHIFT + max_order);
13822a4897bdSDoug Moore 	KASSERT(size > max_size, ("size is too small"));
13832a4897bdSDoug Moore 
1384fa8a6585SDoug Moore 	/*
13852a4897bdSDoug Moore 	 * In order to avoid examining any free max-sized page block more than
13862a4897bdSDoug Moore 	 * twice, identify the ones that are first in a physically-contiguous
13872a4897bdSDoug Moore 	 * sequence of such blocks, and only for those walk the sequence to
13882a4897bdSDoug Moore 	 * check if there are enough free blocks starting at a properly aligned
13892a4897bdSDoug Moore 	 * block.  Thus, no block is checked for free-ness more than twice.
1390fa8a6585SDoug Moore 	 */
13912a4897bdSDoug Moore 	TAILQ_FOREACH(m, &fl[max_order].pl, listq) {
13922a4897bdSDoug Moore 		/*
13932a4897bdSDoug Moore 		 * Skip m unless it is first in a sequence of free max page
13942a4897bdSDoug Moore 		 * blocks >= low in its segment.
13952a4897bdSDoug Moore 		 */
13962a4897bdSDoug Moore 		seg = &vm_phys_segs[m->segind];
13972a4897bdSDoug Moore 		if (VM_PAGE_TO_PHYS(m) < MAX(low, seg->start))
13982a4897bdSDoug Moore 			continue;
13992a4897bdSDoug Moore 		if (VM_PAGE_TO_PHYS(m) >= max_size &&
14002a4897bdSDoug Moore 		    VM_PAGE_TO_PHYS(m) - max_size >= MAX(low, seg->start) &&
14012a4897bdSDoug Moore 		    max_order == m[-1 << max_order].order)
1402fa8a6585SDoug Moore 			continue;
1403fa8a6585SDoug Moore 
1404fa8a6585SDoug Moore 		/*
14052a4897bdSDoug Moore 		 * Advance m_ret from m to the first of the sequence, if any,
14062a4897bdSDoug Moore 		 * that satisfies alignment conditions and might leave enough
14072a4897bdSDoug Moore 		 * space.
1408fa8a6585SDoug Moore 		 */
14092a4897bdSDoug Moore 		m_ret = m;
14102a4897bdSDoug Moore 		while (!vm_addr_ok(VM_PAGE_TO_PHYS(m_ret),
14112a4897bdSDoug Moore 		    size, alignment, boundary) &&
14122a4897bdSDoug Moore 		    VM_PAGE_TO_PHYS(m_ret) + size <= MIN(high, seg->end) &&
14132a4897bdSDoug Moore 		    max_order == m_ret[1 << max_order].order)
14142a4897bdSDoug Moore 			m_ret += 1 << max_order;
14152a4897bdSDoug Moore 
14162a4897bdSDoug Moore 		/*
14172a4897bdSDoug Moore 		 * Skip m unless some block m_ret in the sequence is properly
14182a4897bdSDoug Moore 		 * aligned, and begins a sequence of enough pages less than
14192a4897bdSDoug Moore 		 * high, and in the same segment.
14202a4897bdSDoug Moore 		 */
14212a4897bdSDoug Moore 		if (VM_PAGE_TO_PHYS(m_ret) + size > MIN(high, seg->end))
1422fa8a6585SDoug Moore 			continue;
1423fa8a6585SDoug Moore 
1424fa8a6585SDoug Moore 		/*
14252a4897bdSDoug Moore 		 * Skip m unless the blocks to allocate starting at m_ret are
14262a4897bdSDoug Moore 		 * all free.
1427fa8a6585SDoug Moore 		 */
14282a4897bdSDoug Moore 		for (m_iter = m_ret;
14292a4897bdSDoug Moore 		    m_iter < m_ret + npages && max_order == m_iter->order;
14302a4897bdSDoug Moore 		    m_iter += 1 << max_order) {
1431fa8a6585SDoug Moore 		}
14322a4897bdSDoug Moore 		if (m_iter < m_ret + npages)
1433fa8a6585SDoug Moore 			continue;
1434fa8a6585SDoug Moore 		return (m_ret);
1435fa8a6585SDoug Moore 	}
1436fa8a6585SDoug Moore 	return (NULL);
1437fa8a6585SDoug Moore }
1438fa8a6585SDoug Moore 
1439fa8a6585SDoug Moore /*
1440fa8a6585SDoug Moore  * Find a run of contiguous physical pages from the specified free list
1441342056faSDoug Moore  * table.
1442c869e672SAlan Cox  */
1443c869e672SAlan Cox static vm_page_t
1444fa8a6585SDoug Moore vm_phys_find_queues_contig(
1445342056faSDoug Moore     struct vm_freelist (*queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX],
1446342056faSDoug Moore     u_long npages, vm_paddr_t low, vm_paddr_t high,
1447342056faSDoug Moore     u_long alignment, vm_paddr_t boundary)
1448c869e672SAlan Cox {
1449c869e672SAlan Cox 	struct vm_freelist *fl;
1450fa8a6585SDoug Moore 	vm_page_t m_ret;
1451c869e672SAlan Cox 	vm_paddr_t pa, pa_end, size;
1452c869e672SAlan Cox 	int oind, order, pind;
1453c869e672SAlan Cox 
1454c869e672SAlan Cox 	KASSERT(npages > 0, ("npages is 0"));
1455c869e672SAlan Cox 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1456c869e672SAlan Cox 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1457c869e672SAlan Cox 	/* Compute the queue that is the best fit for npages. */
14589161b4deSAlan Cox 	order = flsl(npages - 1);
1459fa8a6585SDoug Moore 	/* Search for a large enough free block. */
1460c869e672SAlan Cox 	size = npages << PAGE_SHIFT;
1461fa8a6585SDoug Moore 	for (oind = order; oind < VM_NFREEORDER; oind++) {
1462c869e672SAlan Cox 		for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1463342056faSDoug Moore 			fl = (*queues)[pind];
14645cd29d0fSMark Johnston 			TAILQ_FOREACH(m_ret, &fl[oind].pl, listq) {
1465c869e672SAlan Cox 				/*
1466da92ecbcSDoug Moore 				 * Determine if the address range starting at pa
1467da92ecbcSDoug Moore 				 * is within the given range, satisfies the
1468da92ecbcSDoug Moore 				 * given alignment, and does not cross the given
1469da92ecbcSDoug Moore 				 * boundary.
147011752d88SAlan Cox 				 */
1471da92ecbcSDoug Moore 				pa = VM_PAGE_TO_PHYS(m_ret);
1472da92ecbcSDoug Moore 				pa_end = pa + size;
1473fa8a6585SDoug Moore 				if (low <= pa && pa_end <= high &&
1474fa8a6585SDoug Moore 				    vm_addr_ok(pa, size, alignment, boundary))
1475fa8a6585SDoug Moore 					return (m_ret);
1476fa8a6585SDoug Moore 			}
1477fa8a6585SDoug Moore 		}
1478fa8a6585SDoug Moore 	}
1479da92ecbcSDoug Moore 	if (order < VM_NFREEORDER)
1480fa8a6585SDoug Moore 		return (NULL);
14812a4897bdSDoug Moore 	/* Search for a long-enough sequence of max-order blocks. */
1482fa8a6585SDoug Moore 	for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1483fa8a6585SDoug Moore 		fl = (*queues)[pind];
14842a4897bdSDoug Moore 		m_ret = vm_phys_find_freelist_contig(fl, npages,
1485fa8a6585SDoug Moore 		    low, high, alignment, boundary);
1486fa8a6585SDoug Moore 		if (m_ret != NULL)
1487fa8a6585SDoug Moore 			return (m_ret);
148811752d88SAlan Cox 	}
148911752d88SAlan Cox 	return (NULL);
149011752d88SAlan Cox }
149111752d88SAlan Cox 
1492b7565d44SJeff Roberson /*
1493342056faSDoug Moore  * Allocate a contiguous set of physical pages of the given size
1494342056faSDoug Moore  * "npages" from the free lists.  All of the physical pages must be at
1495342056faSDoug Moore  * or above the given physical address "low" and below the given
1496342056faSDoug Moore  * physical address "high".  The given value "alignment" determines the
1497342056faSDoug Moore  * alignment of the first physical page in the set.  If the given value
1498342056faSDoug Moore  * "boundary" is non-zero, then the set of physical pages cannot cross
1499342056faSDoug Moore  * any physical address boundary that is a multiple of that value.  Both
1500e3537f92SDoug Moore  * "alignment" and "boundary" must be a power of two.
1501342056faSDoug Moore  */
1502342056faSDoug Moore vm_page_t
1503342056faSDoug Moore vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
1504342056faSDoug Moore     u_long alignment, vm_paddr_t boundary)
1505342056faSDoug Moore {
1506342056faSDoug Moore 	vm_paddr_t pa_end, pa_start;
1507fa8a6585SDoug Moore 	struct vm_freelist *fl;
1508fa8a6585SDoug Moore 	vm_page_t m, m_run;
1509342056faSDoug Moore 	struct vm_phys_seg *seg;
1510342056faSDoug Moore 	struct vm_freelist (*queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX];
1511fa8a6585SDoug Moore 	int oind, segind;
1512342056faSDoug Moore 
1513342056faSDoug Moore 	KASSERT(npages > 0, ("npages is 0"));
1514342056faSDoug Moore 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1515342056faSDoug Moore 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1516342056faSDoug Moore 	vm_domain_free_assert_locked(VM_DOMAIN(domain));
1517342056faSDoug Moore 	if (low >= high)
1518342056faSDoug Moore 		return (NULL);
1519342056faSDoug Moore 	queues = NULL;
1520342056faSDoug Moore 	m_run = NULL;
1521342056faSDoug Moore 	for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
1522342056faSDoug Moore 		seg = &vm_phys_segs[segind];
1523342056faSDoug Moore 		if (seg->start >= high || seg->domain != domain)
1524342056faSDoug Moore 			continue;
1525342056faSDoug Moore 		if (low >= seg->end)
1526342056faSDoug Moore 			break;
1527342056faSDoug Moore 		if (low <= seg->start)
1528342056faSDoug Moore 			pa_start = seg->start;
1529342056faSDoug Moore 		else
1530342056faSDoug Moore 			pa_start = low;
1531342056faSDoug Moore 		if (high < seg->end)
1532342056faSDoug Moore 			pa_end = high;
1533342056faSDoug Moore 		else
1534342056faSDoug Moore 			pa_end = seg->end;
1535342056faSDoug Moore 		if (pa_end - pa_start < ptoa(npages))
1536342056faSDoug Moore 			continue;
1537342056faSDoug Moore 		/*
1538342056faSDoug Moore 		 * If a previous segment led to a search using
1539342056faSDoug Moore 		 * the same free lists as would this segment, then
1540342056faSDoug Moore 		 * we've actually already searched within this
1541342056faSDoug Moore 		 * too.  So skip it.
1542342056faSDoug Moore 		 */
1543342056faSDoug Moore 		if (seg->free_queues == queues)
1544342056faSDoug Moore 			continue;
1545342056faSDoug Moore 		queues = seg->free_queues;
1546fa8a6585SDoug Moore 		m_run = vm_phys_find_queues_contig(queues, npages,
1547342056faSDoug Moore 		    low, high, alignment, boundary);
1548342056faSDoug Moore 		if (m_run != NULL)
1549342056faSDoug Moore 			break;
1550342056faSDoug Moore 	}
1551fa8a6585SDoug Moore 	if (m_run == NULL)
1552fa8a6585SDoug Moore 		return (NULL);
1553fa8a6585SDoug Moore 
1554fa8a6585SDoug Moore 	/* Allocate pages from the page-range found. */
1555fa8a6585SDoug Moore 	for (m = m_run; m < &m_run[npages]; m = &m[1 << oind]) {
1556fa8a6585SDoug Moore 		fl = (*queues)[m->pool];
1557fa8a6585SDoug Moore 		oind = m->order;
1558fa8a6585SDoug Moore 		vm_freelist_rem(fl, m, oind);
1559e3537f92SDoug Moore 		if (m->pool != VM_FREEPOOL_DEFAULT)
1560e3537f92SDoug Moore 			vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind);
1561fa8a6585SDoug Moore 	}
1562fa8a6585SDoug Moore 	/* Return excess pages to the free lists. */
1563fa8a6585SDoug Moore 	fl = (*queues)[VM_FREEPOOL_DEFAULT];
1564e3537f92SDoug Moore 	vm_phys_enq_range(&m_run[npages], m - &m_run[npages], fl, 0);
15652a4897bdSDoug Moore 
15662a4897bdSDoug Moore 	/* Return page verified to satisfy conditions of request. */
15672a4897bdSDoug Moore 	pa_start = VM_PAGE_TO_PHYS(m_run);
15682a4897bdSDoug Moore 	KASSERT(low <= pa_start,
15692a4897bdSDoug Moore 	    ("memory allocated below minimum requested range"));
15702a4897bdSDoug Moore 	KASSERT(pa_start + ptoa(npages) <= high,
15712a4897bdSDoug Moore 	    ("memory allocated above maximum requested range"));
15722a4897bdSDoug Moore 	seg = &vm_phys_segs[m_run->segind];
15732a4897bdSDoug Moore 	KASSERT(seg->domain == domain,
15742a4897bdSDoug Moore 	    ("memory not allocated from specified domain"));
15752a4897bdSDoug Moore 	KASSERT(vm_addr_ok(pa_start, ptoa(npages), alignment, boundary),
15762a4897bdSDoug Moore 	    ("memory alignment/boundary constraints not satisfied"));
1577342056faSDoug Moore 	return (m_run);
1578342056faSDoug Moore }
1579342056faSDoug Moore 
1580342056faSDoug Moore /*
1581b7565d44SJeff Roberson  * Return the index of the first unused slot which may be the terminating
1582b7565d44SJeff Roberson  * entry.
1583b7565d44SJeff Roberson  */
1584b7565d44SJeff Roberson static int
1585b7565d44SJeff Roberson vm_phys_avail_count(void)
1586b7565d44SJeff Roberson {
1587b7565d44SJeff Roberson 	int i;
1588b7565d44SJeff Roberson 
1589b7565d44SJeff Roberson 	for (i = 0; phys_avail[i + 1]; i += 2)
1590b7565d44SJeff Roberson 		continue;
1591b7565d44SJeff Roberson 	if (i > PHYS_AVAIL_ENTRIES)
1592b7565d44SJeff Roberson 		panic("Improperly terminated phys_avail %d entries", i);
1593b7565d44SJeff Roberson 
1594b7565d44SJeff Roberson 	return (i);
1595b7565d44SJeff Roberson }
1596b7565d44SJeff Roberson 
1597b7565d44SJeff Roberson /*
1598b7565d44SJeff Roberson  * Assert that a phys_avail entry is valid.
1599b7565d44SJeff Roberson  */
1600b7565d44SJeff Roberson static void
1601b7565d44SJeff Roberson vm_phys_avail_check(int i)
1602b7565d44SJeff Roberson {
1603b7565d44SJeff Roberson 	if (phys_avail[i] & PAGE_MASK)
1604b7565d44SJeff Roberson 		panic("Unaligned phys_avail[%d]: %#jx", i,
1605b7565d44SJeff Roberson 		    (intmax_t)phys_avail[i]);
1606b7565d44SJeff Roberson 	if (phys_avail[i+1] & PAGE_MASK)
1607b7565d44SJeff Roberson 		panic("Unaligned phys_avail[%d + 1]: %#jx", i,
1608b7565d44SJeff Roberson 		    (intmax_t)phys_avail[i]);
1609b7565d44SJeff Roberson 	if (phys_avail[i + 1] < phys_avail[i])
1610b7565d44SJeff Roberson 		panic("phys_avail[%d] start %#jx < end %#jx", i,
1611b7565d44SJeff Roberson 		    (intmax_t)phys_avail[i], (intmax_t)phys_avail[i+1]);
1612b7565d44SJeff Roberson }
1613b7565d44SJeff Roberson 
1614b7565d44SJeff Roberson /*
1615b7565d44SJeff Roberson  * Return the index of an overlapping phys_avail entry or -1.
1616b7565d44SJeff Roberson  */
1617be3f5f29SJeff Roberson #ifdef NUMA
1618b7565d44SJeff Roberson static int
1619b7565d44SJeff Roberson vm_phys_avail_find(vm_paddr_t pa)
1620b7565d44SJeff Roberson {
1621b7565d44SJeff Roberson 	int i;
1622b7565d44SJeff Roberson 
1623b7565d44SJeff Roberson 	for (i = 0; phys_avail[i + 1]; i += 2)
1624b7565d44SJeff Roberson 		if (phys_avail[i] <= pa && phys_avail[i + 1] > pa)
1625b7565d44SJeff Roberson 			return (i);
1626b7565d44SJeff Roberson 	return (-1);
1627b7565d44SJeff Roberson }
1628be3f5f29SJeff Roberson #endif
1629b7565d44SJeff Roberson 
1630b7565d44SJeff Roberson /*
1631b7565d44SJeff Roberson  * Return the index of the largest entry.
1632b7565d44SJeff Roberson  */
1633b7565d44SJeff Roberson int
1634b7565d44SJeff Roberson vm_phys_avail_largest(void)
1635b7565d44SJeff Roberson {
1636b7565d44SJeff Roberson 	vm_paddr_t sz, largesz;
1637b7565d44SJeff Roberson 	int largest;
1638b7565d44SJeff Roberson 	int i;
1639b7565d44SJeff Roberson 
1640b7565d44SJeff Roberson 	largest = 0;
1641b7565d44SJeff Roberson 	largesz = 0;
1642b7565d44SJeff Roberson 	for (i = 0; phys_avail[i + 1]; i += 2) {
1643b7565d44SJeff Roberson 		sz = vm_phys_avail_size(i);
1644b7565d44SJeff Roberson 		if (sz > largesz) {
1645b7565d44SJeff Roberson 			largesz = sz;
1646b7565d44SJeff Roberson 			largest = i;
1647b7565d44SJeff Roberson 		}
1648b7565d44SJeff Roberson 	}
1649b7565d44SJeff Roberson 
1650b7565d44SJeff Roberson 	return (largest);
1651b7565d44SJeff Roberson }
1652b7565d44SJeff Roberson 
1653b7565d44SJeff Roberson vm_paddr_t
1654b7565d44SJeff Roberson vm_phys_avail_size(int i)
1655b7565d44SJeff Roberson {
1656b7565d44SJeff Roberson 
1657b7565d44SJeff Roberson 	return (phys_avail[i + 1] - phys_avail[i]);
1658b7565d44SJeff Roberson }
1659b7565d44SJeff Roberson 
1660b7565d44SJeff Roberson /*
1661b7565d44SJeff Roberson  * Split an entry at the address 'pa'.  Return zero on success or errno.
1662b7565d44SJeff Roberson  */
1663b7565d44SJeff Roberson static int
1664b7565d44SJeff Roberson vm_phys_avail_split(vm_paddr_t pa, int i)
1665b7565d44SJeff Roberson {
1666b7565d44SJeff Roberson 	int cnt;
1667b7565d44SJeff Roberson 
1668b7565d44SJeff Roberson 	vm_phys_avail_check(i);
1669b7565d44SJeff Roberson 	if (pa <= phys_avail[i] || pa >= phys_avail[i + 1])
1670b7565d44SJeff Roberson 		panic("vm_phys_avail_split: invalid address");
1671b7565d44SJeff Roberson 	cnt = vm_phys_avail_count();
1672b7565d44SJeff Roberson 	if (cnt >= PHYS_AVAIL_ENTRIES)
1673b7565d44SJeff Roberson 		return (ENOSPC);
1674b7565d44SJeff Roberson 	memmove(&phys_avail[i + 2], &phys_avail[i],
1675b7565d44SJeff Roberson 	    (cnt - i) * sizeof(phys_avail[0]));
1676b7565d44SJeff Roberson 	phys_avail[i + 1] = pa;
1677b7565d44SJeff Roberson 	phys_avail[i + 2] = pa;
1678b7565d44SJeff Roberson 	vm_phys_avail_check(i);
1679b7565d44SJeff Roberson 	vm_phys_avail_check(i+2);
1680b7565d44SJeff Roberson 
1681b7565d44SJeff Roberson 	return (0);
1682b7565d44SJeff Roberson }
1683b7565d44SJeff Roberson 
168431991a5aSMitchell Horne /*
168531991a5aSMitchell Horne  * Check if a given physical address can be included as part of a crash dump.
168631991a5aSMitchell Horne  */
168731991a5aSMitchell Horne bool
168831991a5aSMitchell Horne vm_phys_is_dumpable(vm_paddr_t pa)
168931991a5aSMitchell Horne {
169031991a5aSMitchell Horne 	vm_page_t m;
169131991a5aSMitchell Horne 	int i;
169231991a5aSMitchell Horne 
169331991a5aSMitchell Horne 	if ((m = vm_phys_paddr_to_vm_page(pa)) != NULL)
169431991a5aSMitchell Horne 		return ((m->flags & PG_NODUMP) == 0);
169531991a5aSMitchell Horne 
169631991a5aSMitchell Horne 	for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
169731991a5aSMitchell Horne 		if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
169831991a5aSMitchell Horne 			return (true);
169931991a5aSMitchell Horne 	}
170031991a5aSMitchell Horne 	return (false);
170131991a5aSMitchell Horne }
170231991a5aSMitchell Horne 
170381302f1dSMark Johnston void
170481302f1dSMark Johnston vm_phys_early_add_seg(vm_paddr_t start, vm_paddr_t end)
170581302f1dSMark Johnston {
170681302f1dSMark Johnston 	struct vm_phys_seg *seg;
170781302f1dSMark Johnston 
170881302f1dSMark Johnston 	if (vm_phys_early_nsegs == -1)
170981302f1dSMark Johnston 		panic("%s: called after initialization", __func__);
171081302f1dSMark Johnston 	if (vm_phys_early_nsegs == nitems(vm_phys_early_segs))
171181302f1dSMark Johnston 		panic("%s: ran out of early segments", __func__);
171281302f1dSMark Johnston 
171381302f1dSMark Johnston 	seg = &vm_phys_early_segs[vm_phys_early_nsegs++];
171481302f1dSMark Johnston 	seg->start = start;
171581302f1dSMark Johnston 	seg->end = end;
171681302f1dSMark Johnston }
171781302f1dSMark Johnston 
1718b7565d44SJeff Roberson /*
1719b7565d44SJeff Roberson  * This routine allocates NUMA node specific memory before the page
1720b7565d44SJeff Roberson  * allocator is bootstrapped.
1721b7565d44SJeff Roberson  */
1722b7565d44SJeff Roberson vm_paddr_t
1723b7565d44SJeff Roberson vm_phys_early_alloc(int domain, size_t alloc_size)
1724b7565d44SJeff Roberson {
17252e7838aeSJohn Baldwin #ifdef NUMA
17262e7838aeSJohn Baldwin 	int mem_index;
17272e7838aeSJohn Baldwin #endif
17282e7838aeSJohn Baldwin 	int i, biggestone;
1729b7565d44SJeff Roberson 	vm_paddr_t pa, mem_start, mem_end, size, biggestsize, align;
1730b7565d44SJeff Roberson 
173181302f1dSMark Johnston 	KASSERT(domain == -1 || (domain >= 0 && domain < vm_ndomains),
173281302f1dSMark Johnston 	    ("%s: invalid domain index %d", __func__, domain));
1733b7565d44SJeff Roberson 
1734b7565d44SJeff Roberson 	/*
1735b7565d44SJeff Roberson 	 * Search the mem_affinity array for the biggest address
1736b7565d44SJeff Roberson 	 * range in the desired domain.  This is used to constrain
1737b7565d44SJeff Roberson 	 * the phys_avail selection below.
1738b7565d44SJeff Roberson 	 */
1739b7565d44SJeff Roberson 	biggestsize = 0;
1740b7565d44SJeff Roberson 	mem_start = 0;
1741b7565d44SJeff Roberson 	mem_end = -1;
1742b7565d44SJeff Roberson #ifdef NUMA
17432e7838aeSJohn Baldwin 	mem_index = 0;
1744b7565d44SJeff Roberson 	if (mem_affinity != NULL) {
1745b7565d44SJeff Roberson 		for (i = 0;; i++) {
1746b7565d44SJeff Roberson 			size = mem_affinity[i].end - mem_affinity[i].start;
1747b7565d44SJeff Roberson 			if (size == 0)
1748b7565d44SJeff Roberson 				break;
174981302f1dSMark Johnston 			if (domain != -1 && mem_affinity[i].domain != domain)
1750b7565d44SJeff Roberson 				continue;
1751b7565d44SJeff Roberson 			if (size > biggestsize) {
1752b7565d44SJeff Roberson 				mem_index = i;
1753b7565d44SJeff Roberson 				biggestsize = size;
1754b7565d44SJeff Roberson 			}
1755b7565d44SJeff Roberson 		}
1756b7565d44SJeff Roberson 		mem_start = mem_affinity[mem_index].start;
1757b7565d44SJeff Roberson 		mem_end = mem_affinity[mem_index].end;
1758b7565d44SJeff Roberson 	}
1759b7565d44SJeff Roberson #endif
1760b7565d44SJeff Roberson 
1761b7565d44SJeff Roberson 	/*
1762b7565d44SJeff Roberson 	 * Now find biggest physical segment in within the desired
1763b7565d44SJeff Roberson 	 * numa domain.
1764b7565d44SJeff Roberson 	 */
1765b7565d44SJeff Roberson 	biggestsize = 0;
1766b7565d44SJeff Roberson 	biggestone = 0;
1767b7565d44SJeff Roberson 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1768b7565d44SJeff Roberson 		/* skip regions that are out of range */
1769b7565d44SJeff Roberson 		if (phys_avail[i+1] - alloc_size < mem_start ||
1770b7565d44SJeff Roberson 		    phys_avail[i+1] > mem_end)
1771b7565d44SJeff Roberson 			continue;
1772b7565d44SJeff Roberson 		size = vm_phys_avail_size(i);
1773b7565d44SJeff Roberson 		if (size > biggestsize) {
1774b7565d44SJeff Roberson 			biggestone = i;
1775b7565d44SJeff Roberson 			biggestsize = size;
1776b7565d44SJeff Roberson 		}
1777b7565d44SJeff Roberson 	}
1778b7565d44SJeff Roberson 	alloc_size = round_page(alloc_size);
1779b7565d44SJeff Roberson 
1780b7565d44SJeff Roberson 	/*
1781b7565d44SJeff Roberson 	 * Grab single pages from the front to reduce fragmentation.
1782b7565d44SJeff Roberson 	 */
1783b7565d44SJeff Roberson 	if (alloc_size == PAGE_SIZE) {
1784b7565d44SJeff Roberson 		pa = phys_avail[biggestone];
1785b7565d44SJeff Roberson 		phys_avail[biggestone] += PAGE_SIZE;
1786b7565d44SJeff Roberson 		vm_phys_avail_check(biggestone);
1787b7565d44SJeff Roberson 		return (pa);
1788b7565d44SJeff Roberson 	}
1789b7565d44SJeff Roberson 
1790b7565d44SJeff Roberson 	/*
1791b7565d44SJeff Roberson 	 * Naturally align large allocations.
1792b7565d44SJeff Roberson 	 */
1793b7565d44SJeff Roberson 	align = phys_avail[biggestone + 1] & (alloc_size - 1);
1794b7565d44SJeff Roberson 	if (alloc_size + align > biggestsize)
1795b7565d44SJeff Roberson 		panic("cannot find a large enough size\n");
1796b7565d44SJeff Roberson 	if (align != 0 &&
1797b7565d44SJeff Roberson 	    vm_phys_avail_split(phys_avail[biggestone + 1] - align,
1798b7565d44SJeff Roberson 	    biggestone) != 0)
1799b7565d44SJeff Roberson 		/* Wasting memory. */
1800b7565d44SJeff Roberson 		phys_avail[biggestone + 1] -= align;
1801b7565d44SJeff Roberson 
1802b7565d44SJeff Roberson 	phys_avail[biggestone + 1] -= alloc_size;
1803b7565d44SJeff Roberson 	vm_phys_avail_check(biggestone);
1804b7565d44SJeff Roberson 	pa = phys_avail[biggestone + 1];
1805b7565d44SJeff Roberson 	return (pa);
1806b7565d44SJeff Roberson }
1807b7565d44SJeff Roberson 
1808b7565d44SJeff Roberson void
1809b7565d44SJeff Roberson vm_phys_early_startup(void)
1810b7565d44SJeff Roberson {
181181302f1dSMark Johnston 	struct vm_phys_seg *seg;
1812b7565d44SJeff Roberson 	int i;
1813b7565d44SJeff Roberson 
1814b7565d44SJeff Roberson 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1815b7565d44SJeff Roberson 		phys_avail[i] = round_page(phys_avail[i]);
1816b7565d44SJeff Roberson 		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
1817b7565d44SJeff Roberson 	}
1818b7565d44SJeff Roberson 
181981302f1dSMark Johnston 	for (i = 0; i < vm_phys_early_nsegs; i++) {
182081302f1dSMark Johnston 		seg = &vm_phys_early_segs[i];
182181302f1dSMark Johnston 		vm_phys_add_seg(seg->start, seg->end);
182281302f1dSMark Johnston 	}
182381302f1dSMark Johnston 	vm_phys_early_nsegs = -1;
182481302f1dSMark Johnston 
1825b7565d44SJeff Roberson #ifdef NUMA
1826b7565d44SJeff Roberson 	/* Force phys_avail to be split by domain. */
1827b7565d44SJeff Roberson 	if (mem_affinity != NULL) {
1828b7565d44SJeff Roberson 		int idx;
1829b7565d44SJeff Roberson 
1830b7565d44SJeff Roberson 		for (i = 0; mem_affinity[i].end != 0; i++) {
1831b7565d44SJeff Roberson 			idx = vm_phys_avail_find(mem_affinity[i].start);
1832b7565d44SJeff Roberson 			if (idx != -1 &&
1833b7565d44SJeff Roberson 			    phys_avail[idx] != mem_affinity[i].start)
1834b7565d44SJeff Roberson 				vm_phys_avail_split(mem_affinity[i].start, idx);
1835b7565d44SJeff Roberson 			idx = vm_phys_avail_find(mem_affinity[i].end);
1836b7565d44SJeff Roberson 			if (idx != -1 &&
1837b7565d44SJeff Roberson 			    phys_avail[idx] != mem_affinity[i].end)
1838b7565d44SJeff Roberson 				vm_phys_avail_split(mem_affinity[i].end, idx);
1839b7565d44SJeff Roberson 		}
1840b7565d44SJeff Roberson 	}
1841b7565d44SJeff Roberson #endif
1842b7565d44SJeff Roberson }
1843b7565d44SJeff Roberson 
184411752d88SAlan Cox #ifdef DDB
184511752d88SAlan Cox /*
184611752d88SAlan Cox  * Show the number of physical pages in each of the free lists.
184711752d88SAlan Cox  */
1848c84c5e00SMitchell Horne DB_SHOW_COMMAND_FLAGS(freepages, db_show_freepages, DB_CMD_MEMSAFE)
184911752d88SAlan Cox {
185011752d88SAlan Cox 	struct vm_freelist *fl;
18517e226537SAttilio Rao 	int flind, oind, pind, dom;
185211752d88SAlan Cox 
18537e226537SAttilio Rao 	for (dom = 0; dom < vm_ndomains; dom++) {
18547e226537SAttilio Rao 		db_printf("DOMAIN: %d\n", dom);
185511752d88SAlan Cox 		for (flind = 0; flind < vm_nfreelists; flind++) {
185611752d88SAlan Cox 			db_printf("FREE LIST %d:\n"
185711752d88SAlan Cox 			    "\n  ORDER (SIZE)  |  NUMBER"
185811752d88SAlan Cox 			    "\n              ", flind);
185911752d88SAlan Cox 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
186011752d88SAlan Cox 				db_printf("  |  POOL %d", pind);
186111752d88SAlan Cox 			db_printf("\n--            ");
186211752d88SAlan Cox 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
186311752d88SAlan Cox 				db_printf("-- --      ");
186411752d88SAlan Cox 			db_printf("--\n");
186511752d88SAlan Cox 			for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
186611752d88SAlan Cox 				db_printf("  %2.2d (%6.6dK)", oind,
186711752d88SAlan Cox 				    1 << (PAGE_SHIFT - 10 + oind));
186811752d88SAlan Cox 				for (pind = 0; pind < VM_NFREEPOOL; pind++) {
18697e226537SAttilio Rao 				fl = vm_phys_free_queues[dom][flind][pind];
187011752d88SAlan Cox 					db_printf("  |  %6.6d", fl[oind].lcnt);
187111752d88SAlan Cox 				}
187211752d88SAlan Cox 				db_printf("\n");
187311752d88SAlan Cox 			}
187411752d88SAlan Cox 			db_printf("\n");
187511752d88SAlan Cox 		}
18767e226537SAttilio Rao 		db_printf("\n");
18777e226537SAttilio Rao 	}
187811752d88SAlan Cox }
187911752d88SAlan Cox #endif
1880