xref: /freebsd/sys/vm/vm_phys.c (revision 72aebdd7428fa905d50f884ec2a8310eb14f5aef)
111752d88SAlan Cox /*-
2fe267a55SPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3fe267a55SPedro F. Giffuni  *
411752d88SAlan Cox  * Copyright (c) 2002-2006 Rice University
511752d88SAlan Cox  * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
611752d88SAlan Cox  * All rights reserved.
711752d88SAlan Cox  *
811752d88SAlan Cox  * This software was developed for the FreeBSD Project by Alan L. Cox,
911752d88SAlan Cox  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
1011752d88SAlan Cox  *
1111752d88SAlan Cox  * Redistribution and use in source and binary forms, with or without
1211752d88SAlan Cox  * modification, are permitted provided that the following conditions
1311752d88SAlan Cox  * are met:
1411752d88SAlan Cox  * 1. Redistributions of source code must retain the above copyright
1511752d88SAlan Cox  *    notice, this list of conditions and the following disclaimer.
1611752d88SAlan Cox  * 2. Redistributions in binary form must reproduce the above copyright
1711752d88SAlan Cox  *    notice, this list of conditions and the following disclaimer in the
1811752d88SAlan Cox  *    documentation and/or other materials provided with the distribution.
1911752d88SAlan Cox  *
2011752d88SAlan Cox  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
2111752d88SAlan Cox  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
2211752d88SAlan Cox  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
2311752d88SAlan Cox  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
2411752d88SAlan Cox  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
2511752d88SAlan Cox  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
2611752d88SAlan Cox  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
2711752d88SAlan Cox  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
2811752d88SAlan Cox  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2911752d88SAlan Cox  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
3011752d88SAlan Cox  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
3111752d88SAlan Cox  * POSSIBILITY OF SUCH DAMAGE.
3211752d88SAlan Cox  */
3311752d88SAlan Cox 
34fbd80bd0SAlan Cox /*
35fbd80bd0SAlan Cox  *	Physical memory system implementation
36fbd80bd0SAlan Cox  *
37fbd80bd0SAlan Cox  * Any external functions defined by this module are only to be used by the
38fbd80bd0SAlan Cox  * virtual memory system.
39fbd80bd0SAlan Cox  */
40fbd80bd0SAlan Cox 
4111752d88SAlan Cox #include <sys/cdefs.h>
4211752d88SAlan Cox __FBSDID("$FreeBSD$");
4311752d88SAlan Cox 
4411752d88SAlan Cox #include "opt_ddb.h"
45174b5f38SJohn Baldwin #include "opt_vm.h"
4611752d88SAlan Cox 
4711752d88SAlan Cox #include <sys/param.h>
4811752d88SAlan Cox #include <sys/systm.h>
4911752d88SAlan Cox #include <sys/lock.h>
5011752d88SAlan Cox #include <sys/kernel.h>
5111752d88SAlan Cox #include <sys/malloc.h>
5211752d88SAlan Cox #include <sys/mutex.h>
537e226537SAttilio Rao #include <sys/proc.h>
5411752d88SAlan Cox #include <sys/queue.h>
5538d6b2dcSRoger Pau Monné #include <sys/rwlock.h>
5611752d88SAlan Cox #include <sys/sbuf.h>
5711752d88SAlan Cox #include <sys/sysctl.h>
5838d6b2dcSRoger Pau Monné #include <sys/tree.h>
5911752d88SAlan Cox #include <sys/vmmeter.h>
606520495aSAdrian Chadd #include <sys/seq.h>
6111752d88SAlan Cox 
6211752d88SAlan Cox #include <ddb/ddb.h>
6311752d88SAlan Cox 
6411752d88SAlan Cox #include <vm/vm.h>
6511752d88SAlan Cox #include <vm/vm_param.h>
6611752d88SAlan Cox #include <vm/vm_kern.h>
6711752d88SAlan Cox #include <vm/vm_object.h>
6811752d88SAlan Cox #include <vm/vm_page.h>
6911752d88SAlan Cox #include <vm/vm_phys.h>
70e2068d0bSJeff Roberson #include <vm/vm_pagequeue.h>
7111752d88SAlan Cox 
72449c2e92SKonstantin Belousov _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX,
73449c2e92SKonstantin Belousov     "Too many physsegs.");
7411752d88SAlan Cox 
75b6715dabSJeff Roberson #ifdef NUMA
76cdfeced8SJeff Roberson struct mem_affinity __read_mostly *mem_affinity;
77cdfeced8SJeff Roberson int __read_mostly *mem_locality;
7862d70a81SJohn Baldwin #endif
79a3870a18SJohn Baldwin 
80cdfeced8SJeff Roberson int __read_mostly vm_ndomains = 1;
817e226537SAttilio Rao 
82cdfeced8SJeff Roberson struct vm_phys_seg __read_mostly vm_phys_segs[VM_PHYSSEG_MAX];
83cdfeced8SJeff Roberson int __read_mostly vm_phys_nsegs;
8411752d88SAlan Cox 
8538d6b2dcSRoger Pau Monné struct vm_phys_fictitious_seg;
8638d6b2dcSRoger Pau Monné static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *,
8738d6b2dcSRoger Pau Monné     struct vm_phys_fictitious_seg *);
8838d6b2dcSRoger Pau Monné 
8938d6b2dcSRoger Pau Monné RB_HEAD(fict_tree, vm_phys_fictitious_seg) vm_phys_fictitious_tree =
9038d6b2dcSRoger Pau Monné     RB_INITIALIZER(_vm_phys_fictitious_tree);
9138d6b2dcSRoger Pau Monné 
9238d6b2dcSRoger Pau Monné struct vm_phys_fictitious_seg {
9338d6b2dcSRoger Pau Monné 	RB_ENTRY(vm_phys_fictitious_seg) node;
9438d6b2dcSRoger Pau Monné 	/* Memory region data */
95b6de32bdSKonstantin Belousov 	vm_paddr_t	start;
96b6de32bdSKonstantin Belousov 	vm_paddr_t	end;
97b6de32bdSKonstantin Belousov 	vm_page_t	first_page;
9838d6b2dcSRoger Pau Monné };
9938d6b2dcSRoger Pau Monné 
10038d6b2dcSRoger Pau Monné RB_GENERATE_STATIC(fict_tree, vm_phys_fictitious_seg, node,
10138d6b2dcSRoger Pau Monné     vm_phys_fictitious_cmp);
10238d6b2dcSRoger Pau Monné 
103cdfeced8SJeff Roberson static struct rwlock_padalign vm_phys_fictitious_reg_lock;
104c0432fc3SMark Johnston MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages");
105b6de32bdSKonstantin Belousov 
106cdfeced8SJeff Roberson static struct vm_freelist __aligned(CACHE_LINE_SIZE)
1077e226537SAttilio Rao     vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER];
10811752d88SAlan Cox 
109cdfeced8SJeff Roberson static int __read_mostly vm_nfreelists;
110d866a563SAlan Cox 
111d866a563SAlan Cox /*
112d866a563SAlan Cox  * Provides the mapping from VM_FREELIST_* to free list indices (flind).
113d866a563SAlan Cox  */
114cdfeced8SJeff Roberson static int __read_mostly vm_freelist_to_flind[VM_NFREELIST];
115d866a563SAlan Cox 
116d866a563SAlan Cox CTASSERT(VM_FREELIST_DEFAULT == 0);
117d866a563SAlan Cox 
118d866a563SAlan Cox #ifdef VM_FREELIST_DMA32
119d866a563SAlan Cox #define	VM_DMA32_BOUNDARY	((vm_paddr_t)1 << 32)
120d866a563SAlan Cox #endif
121d866a563SAlan Cox 
122d866a563SAlan Cox /*
123d866a563SAlan Cox  * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about
124d866a563SAlan Cox  * the ordering of the free list boundaries.
125d866a563SAlan Cox  */
126d866a563SAlan Cox #if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY)
127d866a563SAlan Cox CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY);
128d866a563SAlan Cox #endif
12911752d88SAlan Cox 
13011752d88SAlan Cox static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
13111752d88SAlan Cox SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD,
13211752d88SAlan Cox     NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info");
13311752d88SAlan Cox 
13411752d88SAlan Cox static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
13511752d88SAlan Cox SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD,
13611752d88SAlan Cox     NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info");
13711752d88SAlan Cox 
138b6715dabSJeff Roberson #ifdef NUMA
139415d7ccaSAdrian Chadd static int sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS);
140415d7ccaSAdrian Chadd SYSCTL_OID(_vm, OID_AUTO, phys_locality, CTLTYPE_STRING | CTLFLAG_RD,
141415d7ccaSAdrian Chadd     NULL, 0, sysctl_vm_phys_locality, "A", "Phys Locality Info");
1426520495aSAdrian Chadd #endif
143415d7ccaSAdrian Chadd 
1447e226537SAttilio Rao SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD,
1457e226537SAttilio Rao     &vm_ndomains, 0, "Number of physical memory domains available.");
146a3870a18SJohn Baldwin 
147c869e672SAlan Cox static vm_page_t vm_phys_alloc_seg_contig(struct vm_phys_seg *seg,
148c869e672SAlan Cox     u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
149c869e672SAlan Cox     vm_paddr_t boundary);
150d866a563SAlan Cox static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain);
151d866a563SAlan Cox static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end);
15211752d88SAlan Cox static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
153370a338aSAlan Cox     int order, int tail);
15411752d88SAlan Cox 
15538d6b2dcSRoger Pau Monné /*
15638d6b2dcSRoger Pau Monné  * Red-black tree helpers for vm fictitious range management.
15738d6b2dcSRoger Pau Monné  */
15838d6b2dcSRoger Pau Monné static inline int
15938d6b2dcSRoger Pau Monné vm_phys_fictitious_in_range(struct vm_phys_fictitious_seg *p,
16038d6b2dcSRoger Pau Monné     struct vm_phys_fictitious_seg *range)
16138d6b2dcSRoger Pau Monné {
16238d6b2dcSRoger Pau Monné 
16338d6b2dcSRoger Pau Monné 	KASSERT(range->start != 0 && range->end != 0,
16438d6b2dcSRoger Pau Monné 	    ("Invalid range passed on search for vm_fictitious page"));
16538d6b2dcSRoger Pau Monné 	if (p->start >= range->end)
16638d6b2dcSRoger Pau Monné 		return (1);
16738d6b2dcSRoger Pau Monné 	if (p->start < range->start)
16838d6b2dcSRoger Pau Monné 		return (-1);
16938d6b2dcSRoger Pau Monné 
17038d6b2dcSRoger Pau Monné 	return (0);
17138d6b2dcSRoger Pau Monné }
17238d6b2dcSRoger Pau Monné 
17338d6b2dcSRoger Pau Monné static int
17438d6b2dcSRoger Pau Monné vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *p1,
17538d6b2dcSRoger Pau Monné     struct vm_phys_fictitious_seg *p2)
17638d6b2dcSRoger Pau Monné {
17738d6b2dcSRoger Pau Monné 
17838d6b2dcSRoger Pau Monné 	/* Check if this is a search for a page */
17938d6b2dcSRoger Pau Monné 	if (p1->end == 0)
18038d6b2dcSRoger Pau Monné 		return (vm_phys_fictitious_in_range(p1, p2));
18138d6b2dcSRoger Pau Monné 
18238d6b2dcSRoger Pau Monné 	KASSERT(p2->end != 0,
18338d6b2dcSRoger Pau Monné     ("Invalid range passed as second parameter to vm fictitious comparison"));
18438d6b2dcSRoger Pau Monné 
18538d6b2dcSRoger Pau Monné 	/* Searching to add a new range */
18638d6b2dcSRoger Pau Monné 	if (p1->end <= p2->start)
18738d6b2dcSRoger Pau Monné 		return (-1);
18838d6b2dcSRoger Pau Monné 	if (p1->start >= p2->end)
18938d6b2dcSRoger Pau Monné 		return (1);
19038d6b2dcSRoger Pau Monné 
19138d6b2dcSRoger Pau Monné 	panic("Trying to add overlapping vm fictitious ranges:\n"
19238d6b2dcSRoger Pau Monné 	    "[%#jx:%#jx] and [%#jx:%#jx]", (uintmax_t)p1->start,
19338d6b2dcSRoger Pau Monné 	    (uintmax_t)p1->end, (uintmax_t)p2->start, (uintmax_t)p2->end);
19438d6b2dcSRoger Pau Monné }
19538d6b2dcSRoger Pau Monné 
1966f4acaf4SJeff Roberson int
1976f4acaf4SJeff Roberson vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high)
198449c2e92SKonstantin Belousov {
199b6715dabSJeff Roberson #ifdef NUMA
2006f4acaf4SJeff Roberson 	domainset_t mask;
2016f4acaf4SJeff Roberson 	int i;
202449c2e92SKonstantin Belousov 
2036f4acaf4SJeff Roberson 	if (vm_ndomains == 1 || mem_affinity == NULL)
2046f4acaf4SJeff Roberson 		return (0);
2056f4acaf4SJeff Roberson 
2066f4acaf4SJeff Roberson 	DOMAINSET_ZERO(&mask);
2076f4acaf4SJeff Roberson 	/*
2086f4acaf4SJeff Roberson 	 * Check for any memory that overlaps low, high.
2096f4acaf4SJeff Roberson 	 */
2106f4acaf4SJeff Roberson 	for (i = 0; mem_affinity[i].end != 0; i++)
2116f4acaf4SJeff Roberson 		if (mem_affinity[i].start <= high &&
2126f4acaf4SJeff Roberson 		    mem_affinity[i].end >= low)
2136f4acaf4SJeff Roberson 			DOMAINSET_SET(mem_affinity[i].domain, &mask);
2146f4acaf4SJeff Roberson 	if (prefer != -1 && DOMAINSET_ISSET(prefer, &mask))
2156f4acaf4SJeff Roberson 		return (prefer);
2166f4acaf4SJeff Roberson 	if (DOMAINSET_EMPTY(&mask))
2176f4acaf4SJeff Roberson 		panic("vm_phys_domain_match:  Impossible constraint");
2186f4acaf4SJeff Roberson 	return (DOMAINSET_FFS(&mask) - 1);
2196f4acaf4SJeff Roberson #else
2206f4acaf4SJeff Roberson 	return (0);
2216f4acaf4SJeff Roberson #endif
222449c2e92SKonstantin Belousov }
223449c2e92SKonstantin Belousov 
22411752d88SAlan Cox /*
22511752d88SAlan Cox  * Outputs the state of the physical memory allocator, specifically,
22611752d88SAlan Cox  * the amount of physical memory in each free list.
22711752d88SAlan Cox  */
22811752d88SAlan Cox static int
22911752d88SAlan Cox sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
23011752d88SAlan Cox {
23111752d88SAlan Cox 	struct sbuf sbuf;
23211752d88SAlan Cox 	struct vm_freelist *fl;
2337e226537SAttilio Rao 	int dom, error, flind, oind, pind;
23411752d88SAlan Cox 
23500f0e671SMatthew D Fleming 	error = sysctl_wire_old_buffer(req, 0);
23600f0e671SMatthew D Fleming 	if (error != 0)
23700f0e671SMatthew D Fleming 		return (error);
2387e226537SAttilio Rao 	sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req);
2397e226537SAttilio Rao 	for (dom = 0; dom < vm_ndomains; dom++) {
240eb2f42fbSAlan Cox 		sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom);
24111752d88SAlan Cox 		for (flind = 0; flind < vm_nfreelists; flind++) {
242eb2f42fbSAlan Cox 			sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
24311752d88SAlan Cox 			    "\n  ORDER (SIZE)  |  NUMBER"
24411752d88SAlan Cox 			    "\n              ", flind);
24511752d88SAlan Cox 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
24611752d88SAlan Cox 				sbuf_printf(&sbuf, "  |  POOL %d", pind);
24711752d88SAlan Cox 			sbuf_printf(&sbuf, "\n--            ");
24811752d88SAlan Cox 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
24911752d88SAlan Cox 				sbuf_printf(&sbuf, "-- --      ");
25011752d88SAlan Cox 			sbuf_printf(&sbuf, "--\n");
25111752d88SAlan Cox 			for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
252d689bc00SAlan Cox 				sbuf_printf(&sbuf, "  %2d (%6dK)", oind,
25311752d88SAlan Cox 				    1 << (PAGE_SHIFT - 10 + oind));
25411752d88SAlan Cox 				for (pind = 0; pind < VM_NFREEPOOL; pind++) {
2557e226537SAttilio Rao 				fl = vm_phys_free_queues[dom][flind][pind];
256eb2f42fbSAlan Cox 					sbuf_printf(&sbuf, "  |  %6d",
2577e226537SAttilio Rao 					    fl[oind].lcnt);
25811752d88SAlan Cox 				}
25911752d88SAlan Cox 				sbuf_printf(&sbuf, "\n");
26011752d88SAlan Cox 			}
2617e226537SAttilio Rao 		}
26211752d88SAlan Cox 	}
2634e657159SMatthew D Fleming 	error = sbuf_finish(&sbuf);
26411752d88SAlan Cox 	sbuf_delete(&sbuf);
26511752d88SAlan Cox 	return (error);
26611752d88SAlan Cox }
26711752d88SAlan Cox 
26811752d88SAlan Cox /*
26911752d88SAlan Cox  * Outputs the set of physical memory segments.
27011752d88SAlan Cox  */
27111752d88SAlan Cox static int
27211752d88SAlan Cox sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
27311752d88SAlan Cox {
27411752d88SAlan Cox 	struct sbuf sbuf;
27511752d88SAlan Cox 	struct vm_phys_seg *seg;
27611752d88SAlan Cox 	int error, segind;
27711752d88SAlan Cox 
27800f0e671SMatthew D Fleming 	error = sysctl_wire_old_buffer(req, 0);
27900f0e671SMatthew D Fleming 	if (error != 0)
28000f0e671SMatthew D Fleming 		return (error);
2814e657159SMatthew D Fleming 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
28211752d88SAlan Cox 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
28311752d88SAlan Cox 		sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
28411752d88SAlan Cox 		seg = &vm_phys_segs[segind];
28511752d88SAlan Cox 		sbuf_printf(&sbuf, "start:     %#jx\n",
28611752d88SAlan Cox 		    (uintmax_t)seg->start);
28711752d88SAlan Cox 		sbuf_printf(&sbuf, "end:       %#jx\n",
28811752d88SAlan Cox 		    (uintmax_t)seg->end);
289a3870a18SJohn Baldwin 		sbuf_printf(&sbuf, "domain:    %d\n", seg->domain);
29011752d88SAlan Cox 		sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
29111752d88SAlan Cox 	}
2924e657159SMatthew D Fleming 	error = sbuf_finish(&sbuf);
29311752d88SAlan Cox 	sbuf_delete(&sbuf);
29411752d88SAlan Cox 	return (error);
29511752d88SAlan Cox }
29611752d88SAlan Cox 
297415d7ccaSAdrian Chadd /*
298415d7ccaSAdrian Chadd  * Return affinity, or -1 if there's no affinity information.
299415d7ccaSAdrian Chadd  */
3006520495aSAdrian Chadd int
301415d7ccaSAdrian Chadd vm_phys_mem_affinity(int f, int t)
302415d7ccaSAdrian Chadd {
303415d7ccaSAdrian Chadd 
304b6715dabSJeff Roberson #ifdef NUMA
305415d7ccaSAdrian Chadd 	if (mem_locality == NULL)
306415d7ccaSAdrian Chadd 		return (-1);
307415d7ccaSAdrian Chadd 	if (f >= vm_ndomains || t >= vm_ndomains)
308415d7ccaSAdrian Chadd 		return (-1);
309415d7ccaSAdrian Chadd 	return (mem_locality[f * vm_ndomains + t]);
3106520495aSAdrian Chadd #else
3116520495aSAdrian Chadd 	return (-1);
3126520495aSAdrian Chadd #endif
313415d7ccaSAdrian Chadd }
314415d7ccaSAdrian Chadd 
315b6715dabSJeff Roberson #ifdef NUMA
316415d7ccaSAdrian Chadd /*
317415d7ccaSAdrian Chadd  * Outputs the VM locality table.
318415d7ccaSAdrian Chadd  */
319415d7ccaSAdrian Chadd static int
320415d7ccaSAdrian Chadd sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS)
321415d7ccaSAdrian Chadd {
322415d7ccaSAdrian Chadd 	struct sbuf sbuf;
323415d7ccaSAdrian Chadd 	int error, i, j;
324415d7ccaSAdrian Chadd 
325415d7ccaSAdrian Chadd 	error = sysctl_wire_old_buffer(req, 0);
326415d7ccaSAdrian Chadd 	if (error != 0)
327415d7ccaSAdrian Chadd 		return (error);
328415d7ccaSAdrian Chadd 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
329415d7ccaSAdrian Chadd 
330415d7ccaSAdrian Chadd 	sbuf_printf(&sbuf, "\n");
331415d7ccaSAdrian Chadd 
332415d7ccaSAdrian Chadd 	for (i = 0; i < vm_ndomains; i++) {
333415d7ccaSAdrian Chadd 		sbuf_printf(&sbuf, "%d: ", i);
334415d7ccaSAdrian Chadd 		for (j = 0; j < vm_ndomains; j++) {
335415d7ccaSAdrian Chadd 			sbuf_printf(&sbuf, "%d ", vm_phys_mem_affinity(i, j));
336415d7ccaSAdrian Chadd 		}
337415d7ccaSAdrian Chadd 		sbuf_printf(&sbuf, "\n");
338415d7ccaSAdrian Chadd 	}
339415d7ccaSAdrian Chadd 	error = sbuf_finish(&sbuf);
340415d7ccaSAdrian Chadd 	sbuf_delete(&sbuf);
341415d7ccaSAdrian Chadd 	return (error);
342415d7ccaSAdrian Chadd }
3436520495aSAdrian Chadd #endif
344415d7ccaSAdrian Chadd 
3457e226537SAttilio Rao static void
3467e226537SAttilio Rao vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail)
347a3870a18SJohn Baldwin {
348a3870a18SJohn Baldwin 
3497e226537SAttilio Rao 	m->order = order;
3507e226537SAttilio Rao 	if (tail)
3515cd29d0fSMark Johnston 		TAILQ_INSERT_TAIL(&fl[order].pl, m, listq);
3527e226537SAttilio Rao 	else
3535cd29d0fSMark Johnston 		TAILQ_INSERT_HEAD(&fl[order].pl, m, listq);
3547e226537SAttilio Rao 	fl[order].lcnt++;
355a3870a18SJohn Baldwin }
3567e226537SAttilio Rao 
3577e226537SAttilio Rao static void
3587e226537SAttilio Rao vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order)
3597e226537SAttilio Rao {
3607e226537SAttilio Rao 
3615cd29d0fSMark Johnston 	TAILQ_REMOVE(&fl[order].pl, m, listq);
3627e226537SAttilio Rao 	fl[order].lcnt--;
3637e226537SAttilio Rao 	m->order = VM_NFREEORDER;
364a3870a18SJohn Baldwin }
365a3870a18SJohn Baldwin 
36611752d88SAlan Cox /*
36711752d88SAlan Cox  * Create a physical memory segment.
36811752d88SAlan Cox  */
36911752d88SAlan Cox static void
370d866a563SAlan Cox _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain)
37111752d88SAlan Cox {
37211752d88SAlan Cox 	struct vm_phys_seg *seg;
37311752d88SAlan Cox 
37411752d88SAlan Cox 	KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
37511752d88SAlan Cox 	    ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
376ef435ae7SJeff Roberson 	KASSERT(domain >= 0 && domain < vm_ndomains,
3777e226537SAttilio Rao 	    ("vm_phys_create_seg: invalid domain provided"));
37811752d88SAlan Cox 	seg = &vm_phys_segs[vm_phys_nsegs++];
379271f0f12SAlan Cox 	while (seg > vm_phys_segs && (seg - 1)->start >= end) {
380271f0f12SAlan Cox 		*seg = *(seg - 1);
381271f0f12SAlan Cox 		seg--;
382271f0f12SAlan Cox 	}
38311752d88SAlan Cox 	seg->start = start;
38411752d88SAlan Cox 	seg->end = end;
385a3870a18SJohn Baldwin 	seg->domain = domain;
38611752d88SAlan Cox }
38711752d88SAlan Cox 
388a3870a18SJohn Baldwin static void
389d866a563SAlan Cox vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end)
390a3870a18SJohn Baldwin {
391b6715dabSJeff Roberson #ifdef NUMA
392a3870a18SJohn Baldwin 	int i;
393a3870a18SJohn Baldwin 
394a3870a18SJohn Baldwin 	if (mem_affinity == NULL) {
395d866a563SAlan Cox 		_vm_phys_create_seg(start, end, 0);
396a3870a18SJohn Baldwin 		return;
397a3870a18SJohn Baldwin 	}
398a3870a18SJohn Baldwin 
399a3870a18SJohn Baldwin 	for (i = 0;; i++) {
400a3870a18SJohn Baldwin 		if (mem_affinity[i].end == 0)
401a3870a18SJohn Baldwin 			panic("Reached end of affinity info");
402a3870a18SJohn Baldwin 		if (mem_affinity[i].end <= start)
403a3870a18SJohn Baldwin 			continue;
404a3870a18SJohn Baldwin 		if (mem_affinity[i].start > start)
405a3870a18SJohn Baldwin 			panic("No affinity info for start %jx",
406a3870a18SJohn Baldwin 			    (uintmax_t)start);
407a3870a18SJohn Baldwin 		if (mem_affinity[i].end >= end) {
408d866a563SAlan Cox 			_vm_phys_create_seg(start, end,
409a3870a18SJohn Baldwin 			    mem_affinity[i].domain);
410a3870a18SJohn Baldwin 			break;
411a3870a18SJohn Baldwin 		}
412d866a563SAlan Cox 		_vm_phys_create_seg(start, mem_affinity[i].end,
413a3870a18SJohn Baldwin 		    mem_affinity[i].domain);
414a3870a18SJohn Baldwin 		start = mem_affinity[i].end;
415a3870a18SJohn Baldwin 	}
41662d70a81SJohn Baldwin #else
41762d70a81SJohn Baldwin 	_vm_phys_create_seg(start, end, 0);
41862d70a81SJohn Baldwin #endif
419a3870a18SJohn Baldwin }
420a3870a18SJohn Baldwin 
42111752d88SAlan Cox /*
422271f0f12SAlan Cox  * Add a physical memory segment.
423271f0f12SAlan Cox  */
424271f0f12SAlan Cox void
425271f0f12SAlan Cox vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end)
426271f0f12SAlan Cox {
427d866a563SAlan Cox 	vm_paddr_t paddr;
428271f0f12SAlan Cox 
429271f0f12SAlan Cox 	KASSERT((start & PAGE_MASK) == 0,
430271f0f12SAlan Cox 	    ("vm_phys_define_seg: start is not page aligned"));
431271f0f12SAlan Cox 	KASSERT((end & PAGE_MASK) == 0,
432271f0f12SAlan Cox 	    ("vm_phys_define_seg: end is not page aligned"));
433d866a563SAlan Cox 
434d866a563SAlan Cox 	/*
435d866a563SAlan Cox 	 * Split the physical memory segment if it spans two or more free
436d866a563SAlan Cox 	 * list boundaries.
437d866a563SAlan Cox 	 */
438d866a563SAlan Cox 	paddr = start;
439d866a563SAlan Cox #ifdef	VM_FREELIST_LOWMEM
440d866a563SAlan Cox 	if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) {
441d866a563SAlan Cox 		vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY);
442d866a563SAlan Cox 		paddr = VM_LOWMEM_BOUNDARY;
443d866a563SAlan Cox 	}
444271f0f12SAlan Cox #endif
445d866a563SAlan Cox #ifdef	VM_FREELIST_DMA32
446d866a563SAlan Cox 	if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) {
447d866a563SAlan Cox 		vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY);
448d866a563SAlan Cox 		paddr = VM_DMA32_BOUNDARY;
449d866a563SAlan Cox 	}
450d866a563SAlan Cox #endif
451d866a563SAlan Cox 	vm_phys_create_seg(paddr, end);
452271f0f12SAlan Cox }
453271f0f12SAlan Cox 
454271f0f12SAlan Cox /*
45511752d88SAlan Cox  * Initialize the physical memory allocator.
456d866a563SAlan Cox  *
457d866a563SAlan Cox  * Requires that vm_page_array is initialized!
45811752d88SAlan Cox  */
45911752d88SAlan Cox void
46011752d88SAlan Cox vm_phys_init(void)
46111752d88SAlan Cox {
46211752d88SAlan Cox 	struct vm_freelist *fl;
463*72aebdd7SAlan Cox 	struct vm_phys_seg *end_seg, *prev_seg, *seg, *tmp_seg;
464d866a563SAlan Cox 	u_long npages;
465d866a563SAlan Cox 	int dom, flind, freelist, oind, pind, segind;
46611752d88SAlan Cox 
467d866a563SAlan Cox 	/*
468d866a563SAlan Cox 	 * Compute the number of free lists, and generate the mapping from the
469d866a563SAlan Cox 	 * manifest constants VM_FREELIST_* to the free list indices.
470d866a563SAlan Cox 	 *
471d866a563SAlan Cox 	 * Initially, the entries of vm_freelist_to_flind[] are set to either
472d866a563SAlan Cox 	 * 0 or 1 to indicate which free lists should be created.
473d866a563SAlan Cox 	 */
474d866a563SAlan Cox 	npages = 0;
475d866a563SAlan Cox 	for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
476d866a563SAlan Cox 		seg = &vm_phys_segs[segind];
477d866a563SAlan Cox #ifdef	VM_FREELIST_LOWMEM
478d866a563SAlan Cox 		if (seg->end <= VM_LOWMEM_BOUNDARY)
479d866a563SAlan Cox 			vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1;
480d866a563SAlan Cox 		else
481d866a563SAlan Cox #endif
482d866a563SAlan Cox #ifdef	VM_FREELIST_DMA32
483d866a563SAlan Cox 		if (
484d866a563SAlan Cox #ifdef	VM_DMA32_NPAGES_THRESHOLD
485d866a563SAlan Cox 		    /*
486d866a563SAlan Cox 		     * Create the DMA32 free list only if the amount of
487d866a563SAlan Cox 		     * physical memory above physical address 4G exceeds the
488d866a563SAlan Cox 		     * given threshold.
489d866a563SAlan Cox 		     */
490d866a563SAlan Cox 		    npages > VM_DMA32_NPAGES_THRESHOLD &&
491d866a563SAlan Cox #endif
492d866a563SAlan Cox 		    seg->end <= VM_DMA32_BOUNDARY)
493d866a563SAlan Cox 			vm_freelist_to_flind[VM_FREELIST_DMA32] = 1;
494d866a563SAlan Cox 		else
495d866a563SAlan Cox #endif
496d866a563SAlan Cox 		{
497d866a563SAlan Cox 			npages += atop(seg->end - seg->start);
498d866a563SAlan Cox 			vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1;
499d866a563SAlan Cox 		}
500d866a563SAlan Cox 	}
501d866a563SAlan Cox 	/* Change each entry into a running total of the free lists. */
502d866a563SAlan Cox 	for (freelist = 1; freelist < VM_NFREELIST; freelist++) {
503d866a563SAlan Cox 		vm_freelist_to_flind[freelist] +=
504d866a563SAlan Cox 		    vm_freelist_to_flind[freelist - 1];
505d866a563SAlan Cox 	}
506d866a563SAlan Cox 	vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1];
507d866a563SAlan Cox 	KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists"));
508d866a563SAlan Cox 	/* Change each entry into a free list index. */
509d866a563SAlan Cox 	for (freelist = 0; freelist < VM_NFREELIST; freelist++)
510d866a563SAlan Cox 		vm_freelist_to_flind[freelist]--;
511d866a563SAlan Cox 
512d866a563SAlan Cox 	/*
513d866a563SAlan Cox 	 * Initialize the first_page and free_queues fields of each physical
514d866a563SAlan Cox 	 * memory segment.
515d866a563SAlan Cox 	 */
516271f0f12SAlan Cox #ifdef VM_PHYSSEG_SPARSE
517d866a563SAlan Cox 	npages = 0;
51811752d88SAlan Cox #endif
519271f0f12SAlan Cox 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
520271f0f12SAlan Cox 		seg = &vm_phys_segs[segind];
521271f0f12SAlan Cox #ifdef VM_PHYSSEG_SPARSE
522d866a563SAlan Cox 		seg->first_page = &vm_page_array[npages];
523d866a563SAlan Cox 		npages += atop(seg->end - seg->start);
524271f0f12SAlan Cox #else
525271f0f12SAlan Cox 		seg->first_page = PHYS_TO_VM_PAGE(seg->start);
52611752d88SAlan Cox #endif
527d866a563SAlan Cox #ifdef	VM_FREELIST_LOWMEM
528d866a563SAlan Cox 		if (seg->end <= VM_LOWMEM_BOUNDARY) {
529d866a563SAlan Cox 			flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM];
530d866a563SAlan Cox 			KASSERT(flind >= 0,
531d866a563SAlan Cox 			    ("vm_phys_init: LOWMEM flind < 0"));
532d866a563SAlan Cox 		} else
533d866a563SAlan Cox #endif
534d866a563SAlan Cox #ifdef	VM_FREELIST_DMA32
535d866a563SAlan Cox 		if (seg->end <= VM_DMA32_BOUNDARY) {
536d866a563SAlan Cox 			flind = vm_freelist_to_flind[VM_FREELIST_DMA32];
537d866a563SAlan Cox 			KASSERT(flind >= 0,
538d866a563SAlan Cox 			    ("vm_phys_init: DMA32 flind < 0"));
539d866a563SAlan Cox 		} else
540d866a563SAlan Cox #endif
541d866a563SAlan Cox 		{
542d866a563SAlan Cox 			flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT];
543d866a563SAlan Cox 			KASSERT(flind >= 0,
544d866a563SAlan Cox 			    ("vm_phys_init: DEFAULT flind < 0"));
54511752d88SAlan Cox 		}
546d866a563SAlan Cox 		seg->free_queues = &vm_phys_free_queues[seg->domain][flind];
547d866a563SAlan Cox 	}
548d866a563SAlan Cox 
549d866a563SAlan Cox 	/*
550*72aebdd7SAlan Cox 	 * Coalesce physical memory segments that are contiguous and share the
551*72aebdd7SAlan Cox 	 * same per-domain free queues.
552*72aebdd7SAlan Cox 	 */
553*72aebdd7SAlan Cox 	prev_seg = vm_phys_segs;
554*72aebdd7SAlan Cox 	seg = &vm_phys_segs[1];
555*72aebdd7SAlan Cox 	end_seg = &vm_phys_segs[vm_phys_nsegs];
556*72aebdd7SAlan Cox 	while (seg < end_seg) {
557*72aebdd7SAlan Cox 		if (prev_seg->end == seg->start &&
558*72aebdd7SAlan Cox 		    prev_seg->free_queues == seg->free_queues) {
559*72aebdd7SAlan Cox 			prev_seg->end = seg->end;
560*72aebdd7SAlan Cox 			KASSERT(prev_seg->domain == seg->domain,
561*72aebdd7SAlan Cox 			    ("vm_phys_init: free queues cannot span domains"));
562*72aebdd7SAlan Cox 			vm_phys_nsegs--;
563*72aebdd7SAlan Cox 			end_seg--;
564*72aebdd7SAlan Cox 			for (tmp_seg = seg; tmp_seg < end_seg; tmp_seg++)
565*72aebdd7SAlan Cox 				*tmp_seg = *(tmp_seg + 1);
566*72aebdd7SAlan Cox 		} else {
567*72aebdd7SAlan Cox 			prev_seg = seg;
568*72aebdd7SAlan Cox 			seg++;
569*72aebdd7SAlan Cox 		}
570*72aebdd7SAlan Cox 	}
571*72aebdd7SAlan Cox 
572*72aebdd7SAlan Cox 	/*
573d866a563SAlan Cox 	 * Initialize the free queues.
574d866a563SAlan Cox 	 */
5757e226537SAttilio Rao 	for (dom = 0; dom < vm_ndomains; dom++) {
57611752d88SAlan Cox 		for (flind = 0; flind < vm_nfreelists; flind++) {
57711752d88SAlan Cox 			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
5787e226537SAttilio Rao 				fl = vm_phys_free_queues[dom][flind][pind];
57911752d88SAlan Cox 				for (oind = 0; oind < VM_NFREEORDER; oind++)
58011752d88SAlan Cox 					TAILQ_INIT(&fl[oind].pl);
58111752d88SAlan Cox 			}
58211752d88SAlan Cox 		}
583a3870a18SJohn Baldwin 	}
584d866a563SAlan Cox 
58538d6b2dcSRoger Pau Monné 	rw_init(&vm_phys_fictitious_reg_lock, "vmfctr");
58611752d88SAlan Cox }
58711752d88SAlan Cox 
58811752d88SAlan Cox /*
58911752d88SAlan Cox  * Split a contiguous, power of two-sized set of physical pages.
590370a338aSAlan Cox  *
591370a338aSAlan Cox  * When this function is called by a page allocation function, the caller
592370a338aSAlan Cox  * should request insertion at the head unless the order [order, oind) queues
593370a338aSAlan Cox  * are known to be empty.  The objective being to reduce the likelihood of
594370a338aSAlan Cox  * long-term fragmentation by promoting contemporaneous allocation and
595370a338aSAlan Cox  * (hopefully) deallocation.
59611752d88SAlan Cox  */
59711752d88SAlan Cox static __inline void
598370a338aSAlan Cox vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order,
599370a338aSAlan Cox     int tail)
60011752d88SAlan Cox {
60111752d88SAlan Cox 	vm_page_t m_buddy;
60211752d88SAlan Cox 
60311752d88SAlan Cox 	while (oind > order) {
60411752d88SAlan Cox 		oind--;
60511752d88SAlan Cox 		m_buddy = &m[1 << oind];
60611752d88SAlan Cox 		KASSERT(m_buddy->order == VM_NFREEORDER,
60711752d88SAlan Cox 		    ("vm_phys_split_pages: page %p has unexpected order %d",
60811752d88SAlan Cox 		    m_buddy, m_buddy->order));
609370a338aSAlan Cox 		vm_freelist_add(fl, m_buddy, oind, tail);
61011752d88SAlan Cox         }
61111752d88SAlan Cox }
61211752d88SAlan Cox 
61311752d88SAlan Cox /*
6147493904eSAlan Cox  * Add the physical pages [m, m + npages) at the end of a power-of-two aligned
6157493904eSAlan Cox  * and sized set to the specified free list.
6167493904eSAlan Cox  *
6177493904eSAlan Cox  * When this function is called by a page allocation function, the caller
6187493904eSAlan Cox  * should request insertion at the head unless the lower-order queues are
6197493904eSAlan Cox  * known to be empty.  The objective being to reduce the likelihood of long-
6207493904eSAlan Cox  * term fragmentation by promoting contemporaneous allocation and (hopefully)
6217493904eSAlan Cox  * deallocation.
6227493904eSAlan Cox  *
6237493904eSAlan Cox  * The physical page m's buddy must not be free.
6247493904eSAlan Cox  */
6257493904eSAlan Cox static void
6267493904eSAlan Cox vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
6277493904eSAlan Cox {
6287493904eSAlan Cox 	u_int n;
6297493904eSAlan Cox 	int order;
6307493904eSAlan Cox 
6317493904eSAlan Cox 	KASSERT(npages > 0, ("vm_phys_enq_range: npages is 0"));
6327493904eSAlan Cox 	KASSERT(((VM_PAGE_TO_PHYS(m) + npages * PAGE_SIZE) &
6337493904eSAlan Cox 	    ((PAGE_SIZE << (fls(npages) - 1)) - 1)) == 0,
6347493904eSAlan Cox 	    ("vm_phys_enq_range: page %p and npages %u are misaligned",
6357493904eSAlan Cox 	    m, npages));
6367493904eSAlan Cox 	do {
6377493904eSAlan Cox 		KASSERT(m->order == VM_NFREEORDER,
6387493904eSAlan Cox 		    ("vm_phys_enq_range: page %p has unexpected order %d",
6397493904eSAlan Cox 		    m, m->order));
6407493904eSAlan Cox 		order = ffs(npages) - 1;
6417493904eSAlan Cox 		KASSERT(order < VM_NFREEORDER,
6427493904eSAlan Cox 		    ("vm_phys_enq_range: order %d is out of range", order));
6437493904eSAlan Cox 		vm_freelist_add(fl, m, order, tail);
6447493904eSAlan Cox 		n = 1 << order;
6457493904eSAlan Cox 		m += n;
6467493904eSAlan Cox 		npages -= n;
6477493904eSAlan Cox 	} while (npages > 0);
6487493904eSAlan Cox }
6497493904eSAlan Cox 
6507493904eSAlan Cox /*
65189ea39a7SAlan Cox  * Tries to allocate the specified number of pages from the specified pool
65289ea39a7SAlan Cox  * within the specified domain.  Returns the actual number of allocated pages
65389ea39a7SAlan Cox  * and a pointer to each page through the array ma[].
65489ea39a7SAlan Cox  *
65532d81f21SAlan Cox  * The returned pages may not be physically contiguous.  However, in contrast
65632d81f21SAlan Cox  * to performing multiple, back-to-back calls to vm_phys_alloc_pages(..., 0),
65732d81f21SAlan Cox  * calling this function once to allocate the desired number of pages will
65832d81f21SAlan Cox  * avoid wasted time in vm_phys_split_pages().
65989ea39a7SAlan Cox  *
66089ea39a7SAlan Cox  * The free page queues for the specified domain must be locked.
66189ea39a7SAlan Cox  */
66289ea39a7SAlan Cox int
66389ea39a7SAlan Cox vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[])
66489ea39a7SAlan Cox {
66589ea39a7SAlan Cox 	struct vm_freelist *alt, *fl;
66689ea39a7SAlan Cox 	vm_page_t m;
66789ea39a7SAlan Cox 	int avail, end, flind, freelist, i, need, oind, pind;
66889ea39a7SAlan Cox 
66989ea39a7SAlan Cox 	KASSERT(domain >= 0 && domain < vm_ndomains,
67089ea39a7SAlan Cox 	    ("vm_phys_alloc_npages: domain %d is out of range", domain));
67189ea39a7SAlan Cox 	KASSERT(pool < VM_NFREEPOOL,
67289ea39a7SAlan Cox 	    ("vm_phys_alloc_npages: pool %d is out of range", pool));
67389ea39a7SAlan Cox 	KASSERT(npages <= 1 << (VM_NFREEORDER - 1),
67489ea39a7SAlan Cox 	    ("vm_phys_alloc_npages: npages %d is out of range", npages));
67589ea39a7SAlan Cox 	vm_domain_free_assert_locked(VM_DOMAIN(domain));
67689ea39a7SAlan Cox 	i = 0;
67789ea39a7SAlan Cox 	for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
67889ea39a7SAlan Cox 		flind = vm_freelist_to_flind[freelist];
67989ea39a7SAlan Cox 		if (flind < 0)
68089ea39a7SAlan Cox 			continue;
68189ea39a7SAlan Cox 		fl = vm_phys_free_queues[domain][flind][pool];
68289ea39a7SAlan Cox 		for (oind = 0; oind < VM_NFREEORDER; oind++) {
68389ea39a7SAlan Cox 			while ((m = TAILQ_FIRST(&fl[oind].pl)) != NULL) {
68489ea39a7SAlan Cox 				vm_freelist_rem(fl, m, oind);
68589ea39a7SAlan Cox 				avail = 1 << oind;
68689ea39a7SAlan Cox 				need = imin(npages - i, avail);
68789ea39a7SAlan Cox 				for (end = i + need; i < end;)
68889ea39a7SAlan Cox 					ma[i++] = m++;
68989ea39a7SAlan Cox 				if (need < avail) {
6907493904eSAlan Cox 					/*
6917493904eSAlan Cox 					 * Return excess pages to fl.  Its
6927493904eSAlan Cox 					 * order [0, oind) queues are empty.
6937493904eSAlan Cox 					 */
6947493904eSAlan Cox 					vm_phys_enq_range(m, avail - need, fl,
6957493904eSAlan Cox 					    1);
69689ea39a7SAlan Cox 					return (npages);
69789ea39a7SAlan Cox 				} else if (i == npages)
69889ea39a7SAlan Cox 					return (npages);
69989ea39a7SAlan Cox 			}
70089ea39a7SAlan Cox 		}
70189ea39a7SAlan Cox 		for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
70289ea39a7SAlan Cox 			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
70389ea39a7SAlan Cox 				alt = vm_phys_free_queues[domain][flind][pind];
70489ea39a7SAlan Cox 				while ((m = TAILQ_FIRST(&alt[oind].pl)) !=
70589ea39a7SAlan Cox 				    NULL) {
70689ea39a7SAlan Cox 					vm_freelist_rem(alt, m, oind);
70789ea39a7SAlan Cox 					vm_phys_set_pool(pool, m, oind);
70889ea39a7SAlan Cox 					avail = 1 << oind;
70989ea39a7SAlan Cox 					need = imin(npages - i, avail);
71089ea39a7SAlan Cox 					for (end = i + need; i < end;)
71189ea39a7SAlan Cox 						ma[i++] = m++;
71289ea39a7SAlan Cox 					if (need < avail) {
7137493904eSAlan Cox 						/*
7147493904eSAlan Cox 						 * Return excess pages to fl.
7157493904eSAlan Cox 						 * Its order [0, oind) queues
7167493904eSAlan Cox 						 * are empty.
7177493904eSAlan Cox 						 */
7187493904eSAlan Cox 						vm_phys_enq_range(m, avail -
7197493904eSAlan Cox 						    need, fl, 1);
72089ea39a7SAlan Cox 						return (npages);
72189ea39a7SAlan Cox 					} else if (i == npages)
72289ea39a7SAlan Cox 						return (npages);
72389ea39a7SAlan Cox 				}
72489ea39a7SAlan Cox 			}
72589ea39a7SAlan Cox 		}
72689ea39a7SAlan Cox 	}
72789ea39a7SAlan Cox 	return (i);
72889ea39a7SAlan Cox }
72989ea39a7SAlan Cox 
73089ea39a7SAlan Cox /*
73111752d88SAlan Cox  * Allocate a contiguous, power of two-sized set of physical pages
73211752d88SAlan Cox  * from the free lists.
7338941dc44SAlan Cox  *
7348941dc44SAlan Cox  * The free page queues must be locked.
73511752d88SAlan Cox  */
73611752d88SAlan Cox vm_page_t
737ef435ae7SJeff Roberson vm_phys_alloc_pages(int domain, int pool, int order)
73811752d88SAlan Cox {
73949ca10d4SJayachandran C. 	vm_page_t m;
7400db2102aSMichael Zhilin 	int freelist;
74149ca10d4SJayachandran C. 
7420db2102aSMichael Zhilin 	for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
7430db2102aSMichael Zhilin 		m = vm_phys_alloc_freelist_pages(domain, freelist, pool, order);
74449ca10d4SJayachandran C. 		if (m != NULL)
74549ca10d4SJayachandran C. 			return (m);
74649ca10d4SJayachandran C. 	}
74749ca10d4SJayachandran C. 	return (NULL);
74849ca10d4SJayachandran C. }
74949ca10d4SJayachandran C. 
75049ca10d4SJayachandran C. /*
751d866a563SAlan Cox  * Allocate a contiguous, power of two-sized set of physical pages from the
752d866a563SAlan Cox  * specified free list.  The free list must be specified using one of the
753d866a563SAlan Cox  * manifest constants VM_FREELIST_*.
754d866a563SAlan Cox  *
755d866a563SAlan Cox  * The free page queues must be locked.
75649ca10d4SJayachandran C.  */
75749ca10d4SJayachandran C. vm_page_t
7580db2102aSMichael Zhilin vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order)
75949ca10d4SJayachandran C. {
760ef435ae7SJeff Roberson 	struct vm_freelist *alt, *fl;
76111752d88SAlan Cox 	vm_page_t m;
7620db2102aSMichael Zhilin 	int oind, pind, flind;
76311752d88SAlan Cox 
764ef435ae7SJeff Roberson 	KASSERT(domain >= 0 && domain < vm_ndomains,
765ef435ae7SJeff Roberson 	    ("vm_phys_alloc_freelist_pages: domain %d is out of range",
766ef435ae7SJeff Roberson 	    domain));
7670db2102aSMichael Zhilin 	KASSERT(freelist < VM_NFREELIST,
768d866a563SAlan Cox 	    ("vm_phys_alloc_freelist_pages: freelist %d is out of range",
7695be93778SAndrew Turner 	    freelist));
77011752d88SAlan Cox 	KASSERT(pool < VM_NFREEPOOL,
77149ca10d4SJayachandran C. 	    ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
77211752d88SAlan Cox 	KASSERT(order < VM_NFREEORDER,
77349ca10d4SJayachandran C. 	    ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
7746520495aSAdrian Chadd 
7750db2102aSMichael Zhilin 	flind = vm_freelist_to_flind[freelist];
7760db2102aSMichael Zhilin 	/* Check if freelist is present */
7770db2102aSMichael Zhilin 	if (flind < 0)
7780db2102aSMichael Zhilin 		return (NULL);
7790db2102aSMichael Zhilin 
780e2068d0bSJeff Roberson 	vm_domain_free_assert_locked(VM_DOMAIN(domain));
7817e226537SAttilio Rao 	fl = &vm_phys_free_queues[domain][flind][pool][0];
78211752d88SAlan Cox 	for (oind = order; oind < VM_NFREEORDER; oind++) {
78311752d88SAlan Cox 		m = TAILQ_FIRST(&fl[oind].pl);
78411752d88SAlan Cox 		if (m != NULL) {
7857e226537SAttilio Rao 			vm_freelist_rem(fl, m, oind);
786370a338aSAlan Cox 			/* The order [order, oind) queues are empty. */
787370a338aSAlan Cox 			vm_phys_split_pages(m, oind, fl, order, 1);
78811752d88SAlan Cox 			return (m);
78911752d88SAlan Cox 		}
79011752d88SAlan Cox 	}
79111752d88SAlan Cox 
79211752d88SAlan Cox 	/*
79311752d88SAlan Cox 	 * The given pool was empty.  Find the largest
79411752d88SAlan Cox 	 * contiguous, power-of-two-sized set of pages in any
79511752d88SAlan Cox 	 * pool.  Transfer these pages to the given pool, and
79611752d88SAlan Cox 	 * use them to satisfy the allocation.
79711752d88SAlan Cox 	 */
79811752d88SAlan Cox 	for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
79911752d88SAlan Cox 		for (pind = 0; pind < VM_NFREEPOOL; pind++) {
8007e226537SAttilio Rao 			alt = &vm_phys_free_queues[domain][flind][pind][0];
80111752d88SAlan Cox 			m = TAILQ_FIRST(&alt[oind].pl);
80211752d88SAlan Cox 			if (m != NULL) {
8037e226537SAttilio Rao 				vm_freelist_rem(alt, m, oind);
80411752d88SAlan Cox 				vm_phys_set_pool(pool, m, oind);
805370a338aSAlan Cox 				/* The order [order, oind) queues are empty. */
806370a338aSAlan Cox 				vm_phys_split_pages(m, oind, fl, order, 1);
80711752d88SAlan Cox 				return (m);
80811752d88SAlan Cox 			}
80911752d88SAlan Cox 		}
81011752d88SAlan Cox 	}
81111752d88SAlan Cox 	return (NULL);
81211752d88SAlan Cox }
81311752d88SAlan Cox 
81411752d88SAlan Cox /*
81511752d88SAlan Cox  * Find the vm_page corresponding to the given physical address.
81611752d88SAlan Cox  */
81711752d88SAlan Cox vm_page_t
81811752d88SAlan Cox vm_phys_paddr_to_vm_page(vm_paddr_t pa)
81911752d88SAlan Cox {
82011752d88SAlan Cox 	struct vm_phys_seg *seg;
82111752d88SAlan Cox 	int segind;
82211752d88SAlan Cox 
82311752d88SAlan Cox 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
82411752d88SAlan Cox 		seg = &vm_phys_segs[segind];
82511752d88SAlan Cox 		if (pa >= seg->start && pa < seg->end)
82611752d88SAlan Cox 			return (&seg->first_page[atop(pa - seg->start)]);
82711752d88SAlan Cox 	}
828f06a3a36SAndrew Thompson 	return (NULL);
82911752d88SAlan Cox }
83011752d88SAlan Cox 
831b6de32bdSKonstantin Belousov vm_page_t
832b6de32bdSKonstantin Belousov vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
833b6de32bdSKonstantin Belousov {
83438d6b2dcSRoger Pau Monné 	struct vm_phys_fictitious_seg tmp, *seg;
835b6de32bdSKonstantin Belousov 	vm_page_t m;
836b6de32bdSKonstantin Belousov 
837b6de32bdSKonstantin Belousov 	m = NULL;
83838d6b2dcSRoger Pau Monné 	tmp.start = pa;
83938d6b2dcSRoger Pau Monné 	tmp.end = 0;
84038d6b2dcSRoger Pau Monné 
84138d6b2dcSRoger Pau Monné 	rw_rlock(&vm_phys_fictitious_reg_lock);
84238d6b2dcSRoger Pau Monné 	seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
84338d6b2dcSRoger Pau Monné 	rw_runlock(&vm_phys_fictitious_reg_lock);
84438d6b2dcSRoger Pau Monné 	if (seg == NULL)
84538d6b2dcSRoger Pau Monné 		return (NULL);
84638d6b2dcSRoger Pau Monné 
847b6de32bdSKonstantin Belousov 	m = &seg->first_page[atop(pa - seg->start)];
84838d6b2dcSRoger Pau Monné 	KASSERT((m->flags & PG_FICTITIOUS) != 0, ("%p not fictitious", m));
84938d6b2dcSRoger Pau Monné 
850b6de32bdSKonstantin Belousov 	return (m);
851b6de32bdSKonstantin Belousov }
852b6de32bdSKonstantin Belousov 
8535ebe728dSRoger Pau Monné static inline void
8545ebe728dSRoger Pau Monné vm_phys_fictitious_init_range(vm_page_t range, vm_paddr_t start,
8555ebe728dSRoger Pau Monné     long page_count, vm_memattr_t memattr)
8565ebe728dSRoger Pau Monné {
8575ebe728dSRoger Pau Monné 	long i;
8585ebe728dSRoger Pau Monné 
859f93f7cf1SMark Johnston 	bzero(range, page_count * sizeof(*range));
8605ebe728dSRoger Pau Monné 	for (i = 0; i < page_count; i++) {
8615ebe728dSRoger Pau Monné 		vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr);
8625ebe728dSRoger Pau Monné 		range[i].oflags &= ~VPO_UNMANAGED;
8635ebe728dSRoger Pau Monné 		range[i].busy_lock = VPB_UNBUSIED;
8645ebe728dSRoger Pau Monné 	}
8655ebe728dSRoger Pau Monné }
8665ebe728dSRoger Pau Monné 
867b6de32bdSKonstantin Belousov int
868b6de32bdSKonstantin Belousov vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
869b6de32bdSKonstantin Belousov     vm_memattr_t memattr)
870b6de32bdSKonstantin Belousov {
871b6de32bdSKonstantin Belousov 	struct vm_phys_fictitious_seg *seg;
872b6de32bdSKonstantin Belousov 	vm_page_t fp;
8735ebe728dSRoger Pau Monné 	long page_count;
874b6de32bdSKonstantin Belousov #ifdef VM_PHYSSEG_DENSE
8755ebe728dSRoger Pau Monné 	long pi, pe;
8765ebe728dSRoger Pau Monné 	long dpage_count;
877b6de32bdSKonstantin Belousov #endif
878b6de32bdSKonstantin Belousov 
8795ebe728dSRoger Pau Monné 	KASSERT(start < end,
8805ebe728dSRoger Pau Monné 	    ("Start of segment isn't less than end (start: %jx end: %jx)",
8815ebe728dSRoger Pau Monné 	    (uintmax_t)start, (uintmax_t)end));
8825ebe728dSRoger Pau Monné 
883b6de32bdSKonstantin Belousov 	page_count = (end - start) / PAGE_SIZE;
884b6de32bdSKonstantin Belousov 
885b6de32bdSKonstantin Belousov #ifdef VM_PHYSSEG_DENSE
886b6de32bdSKonstantin Belousov 	pi = atop(start);
8875ebe728dSRoger Pau Monné 	pe = atop(end);
8885ebe728dSRoger Pau Monné 	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
889b6de32bdSKonstantin Belousov 		fp = &vm_page_array[pi - first_page];
8905ebe728dSRoger Pau Monné 		if ((pe - first_page) > vm_page_array_size) {
8915ebe728dSRoger Pau Monné 			/*
8925ebe728dSRoger Pau Monné 			 * We have a segment that starts inside
8935ebe728dSRoger Pau Monné 			 * of vm_page_array, but ends outside of it.
8945ebe728dSRoger Pau Monné 			 *
8955ebe728dSRoger Pau Monné 			 * Use vm_page_array pages for those that are
8965ebe728dSRoger Pau Monné 			 * inside of the vm_page_array range, and
8975ebe728dSRoger Pau Monné 			 * allocate the remaining ones.
8985ebe728dSRoger Pau Monné 			 */
8995ebe728dSRoger Pau Monné 			dpage_count = vm_page_array_size - (pi - first_page);
9005ebe728dSRoger Pau Monné 			vm_phys_fictitious_init_range(fp, start, dpage_count,
9015ebe728dSRoger Pau Monné 			    memattr);
9025ebe728dSRoger Pau Monné 			page_count -= dpage_count;
9035ebe728dSRoger Pau Monné 			start += ptoa(dpage_count);
9045ebe728dSRoger Pau Monné 			goto alloc;
9055ebe728dSRoger Pau Monné 		}
9065ebe728dSRoger Pau Monné 		/*
9075ebe728dSRoger Pau Monné 		 * We can allocate the full range from vm_page_array,
9085ebe728dSRoger Pau Monné 		 * so there's no need to register the range in the tree.
9095ebe728dSRoger Pau Monné 		 */
9105ebe728dSRoger Pau Monné 		vm_phys_fictitious_init_range(fp, start, page_count, memattr);
9115ebe728dSRoger Pau Monné 		return (0);
9125ebe728dSRoger Pau Monné 	} else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
9135ebe728dSRoger Pau Monné 		/*
9145ebe728dSRoger Pau Monné 		 * We have a segment that ends inside of vm_page_array,
9155ebe728dSRoger Pau Monné 		 * but starts outside of it.
9165ebe728dSRoger Pau Monné 		 */
9175ebe728dSRoger Pau Monné 		fp = &vm_page_array[0];
9185ebe728dSRoger Pau Monné 		dpage_count = pe - first_page;
9195ebe728dSRoger Pau Monné 		vm_phys_fictitious_init_range(fp, ptoa(first_page), dpage_count,
9205ebe728dSRoger Pau Monné 		    memattr);
9215ebe728dSRoger Pau Monné 		end -= ptoa(dpage_count);
9225ebe728dSRoger Pau Monné 		page_count -= dpage_count;
9235ebe728dSRoger Pau Monné 		goto alloc;
9245ebe728dSRoger Pau Monné 	} else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
9255ebe728dSRoger Pau Monné 		/*
9265ebe728dSRoger Pau Monné 		 * Trying to register a fictitious range that expands before
9275ebe728dSRoger Pau Monné 		 * and after vm_page_array.
9285ebe728dSRoger Pau Monné 		 */
9295ebe728dSRoger Pau Monné 		return (EINVAL);
9305ebe728dSRoger Pau Monné 	} else {
9315ebe728dSRoger Pau Monné alloc:
932b6de32bdSKonstantin Belousov #endif
933b6de32bdSKonstantin Belousov 		fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
934f93f7cf1SMark Johnston 		    M_WAITOK);
9355ebe728dSRoger Pau Monné #ifdef VM_PHYSSEG_DENSE
936b6de32bdSKonstantin Belousov 	}
9375ebe728dSRoger Pau Monné #endif
9385ebe728dSRoger Pau Monné 	vm_phys_fictitious_init_range(fp, start, page_count, memattr);
93938d6b2dcSRoger Pau Monné 
94038d6b2dcSRoger Pau Monné 	seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO);
941b6de32bdSKonstantin Belousov 	seg->start = start;
942b6de32bdSKonstantin Belousov 	seg->end = end;
943b6de32bdSKonstantin Belousov 	seg->first_page = fp;
94438d6b2dcSRoger Pau Monné 
94538d6b2dcSRoger Pau Monné 	rw_wlock(&vm_phys_fictitious_reg_lock);
94638d6b2dcSRoger Pau Monné 	RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg);
94738d6b2dcSRoger Pau Monné 	rw_wunlock(&vm_phys_fictitious_reg_lock);
94838d6b2dcSRoger Pau Monné 
949b6de32bdSKonstantin Belousov 	return (0);
950b6de32bdSKonstantin Belousov }
951b6de32bdSKonstantin Belousov 
952b6de32bdSKonstantin Belousov void
953b6de32bdSKonstantin Belousov vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
954b6de32bdSKonstantin Belousov {
95538d6b2dcSRoger Pau Monné 	struct vm_phys_fictitious_seg *seg, tmp;
956b6de32bdSKonstantin Belousov #ifdef VM_PHYSSEG_DENSE
9575ebe728dSRoger Pau Monné 	long pi, pe;
958b6de32bdSKonstantin Belousov #endif
959b6de32bdSKonstantin Belousov 
9605ebe728dSRoger Pau Monné 	KASSERT(start < end,
9615ebe728dSRoger Pau Monné 	    ("Start of segment isn't less than end (start: %jx end: %jx)",
9625ebe728dSRoger Pau Monné 	    (uintmax_t)start, (uintmax_t)end));
9635ebe728dSRoger Pau Monné 
964b6de32bdSKonstantin Belousov #ifdef VM_PHYSSEG_DENSE
965b6de32bdSKonstantin Belousov 	pi = atop(start);
9665ebe728dSRoger Pau Monné 	pe = atop(end);
9675ebe728dSRoger Pau Monné 	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
9685ebe728dSRoger Pau Monné 		if ((pe - first_page) <= vm_page_array_size) {
9695ebe728dSRoger Pau Monné 			/*
9705ebe728dSRoger Pau Monné 			 * This segment was allocated using vm_page_array
9715ebe728dSRoger Pau Monné 			 * only, there's nothing to do since those pages
9725ebe728dSRoger Pau Monné 			 * were never added to the tree.
9735ebe728dSRoger Pau Monné 			 */
9745ebe728dSRoger Pau Monné 			return;
9755ebe728dSRoger Pau Monné 		}
9765ebe728dSRoger Pau Monné 		/*
9775ebe728dSRoger Pau Monné 		 * We have a segment that starts inside
9785ebe728dSRoger Pau Monné 		 * of vm_page_array, but ends outside of it.
9795ebe728dSRoger Pau Monné 		 *
9805ebe728dSRoger Pau Monné 		 * Calculate how many pages were added to the
9815ebe728dSRoger Pau Monné 		 * tree and free them.
9825ebe728dSRoger Pau Monné 		 */
9835ebe728dSRoger Pau Monné 		start = ptoa(first_page + vm_page_array_size);
9845ebe728dSRoger Pau Monné 	} else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
9855ebe728dSRoger Pau Monné 		/*
9865ebe728dSRoger Pau Monné 		 * We have a segment that ends inside of vm_page_array,
9875ebe728dSRoger Pau Monné 		 * but starts outside of it.
9885ebe728dSRoger Pau Monné 		 */
9895ebe728dSRoger Pau Monné 		end = ptoa(first_page);
9905ebe728dSRoger Pau Monné 	} else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
9915ebe728dSRoger Pau Monné 		/* Since it's not possible to register such a range, panic. */
9925ebe728dSRoger Pau Monné 		panic(
9935ebe728dSRoger Pau Monné 		    "Unregistering not registered fictitious range [%#jx:%#jx]",
9945ebe728dSRoger Pau Monné 		    (uintmax_t)start, (uintmax_t)end);
9955ebe728dSRoger Pau Monné 	}
996b6de32bdSKonstantin Belousov #endif
99738d6b2dcSRoger Pau Monné 	tmp.start = start;
99838d6b2dcSRoger Pau Monné 	tmp.end = 0;
999b6de32bdSKonstantin Belousov 
100038d6b2dcSRoger Pau Monné 	rw_wlock(&vm_phys_fictitious_reg_lock);
100138d6b2dcSRoger Pau Monné 	seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
100238d6b2dcSRoger Pau Monné 	if (seg->start != start || seg->end != end) {
100338d6b2dcSRoger Pau Monné 		rw_wunlock(&vm_phys_fictitious_reg_lock);
100438d6b2dcSRoger Pau Monné 		panic(
100538d6b2dcSRoger Pau Monné 		    "Unregistering not registered fictitious range [%#jx:%#jx]",
100638d6b2dcSRoger Pau Monné 		    (uintmax_t)start, (uintmax_t)end);
100738d6b2dcSRoger Pau Monné 	}
100838d6b2dcSRoger Pau Monné 	RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg);
100938d6b2dcSRoger Pau Monné 	rw_wunlock(&vm_phys_fictitious_reg_lock);
101038d6b2dcSRoger Pau Monné 	free(seg->first_page, M_FICT_PAGES);
101138d6b2dcSRoger Pau Monné 	free(seg, M_FICT_PAGES);
1012b6de32bdSKonstantin Belousov }
1013b6de32bdSKonstantin Belousov 
101411752d88SAlan Cox /*
101511752d88SAlan Cox  * Free a contiguous, power of two-sized set of physical pages.
10168941dc44SAlan Cox  *
10178941dc44SAlan Cox  * The free page queues must be locked.
101811752d88SAlan Cox  */
101911752d88SAlan Cox void
102011752d88SAlan Cox vm_phys_free_pages(vm_page_t m, int order)
102111752d88SAlan Cox {
102211752d88SAlan Cox 	struct vm_freelist *fl;
102311752d88SAlan Cox 	struct vm_phys_seg *seg;
10245c1f2cc4SAlan Cox 	vm_paddr_t pa;
102511752d88SAlan Cox 	vm_page_t m_buddy;
102611752d88SAlan Cox 
102711752d88SAlan Cox 	KASSERT(m->order == VM_NFREEORDER,
10288941dc44SAlan Cox 	    ("vm_phys_free_pages: page %p has unexpected order %d",
102911752d88SAlan Cox 	    m, m->order));
103011752d88SAlan Cox 	KASSERT(m->pool < VM_NFREEPOOL,
10318941dc44SAlan Cox 	    ("vm_phys_free_pages: page %p has unexpected pool %d",
103211752d88SAlan Cox 	    m, m->pool));
103311752d88SAlan Cox 	KASSERT(order < VM_NFREEORDER,
10348941dc44SAlan Cox 	    ("vm_phys_free_pages: order %d is out of range", order));
103511752d88SAlan Cox 	seg = &vm_phys_segs[m->segind];
1036e2068d0bSJeff Roberson 	vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
10375c1f2cc4SAlan Cox 	if (order < VM_NFREEORDER - 1) {
10385c1f2cc4SAlan Cox 		pa = VM_PAGE_TO_PHYS(m);
10395c1f2cc4SAlan Cox 		do {
10405c1f2cc4SAlan Cox 			pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
10415c1f2cc4SAlan Cox 			if (pa < seg->start || pa >= seg->end)
104211752d88SAlan Cox 				break;
10435c1f2cc4SAlan Cox 			m_buddy = &seg->first_page[atop(pa - seg->start)];
104411752d88SAlan Cox 			if (m_buddy->order != order)
104511752d88SAlan Cox 				break;
104611752d88SAlan Cox 			fl = (*seg->free_queues)[m_buddy->pool];
10477e226537SAttilio Rao 			vm_freelist_rem(fl, m_buddy, order);
104811752d88SAlan Cox 			if (m_buddy->pool != m->pool)
104911752d88SAlan Cox 				vm_phys_set_pool(m->pool, m_buddy, order);
105011752d88SAlan Cox 			order++;
10515c1f2cc4SAlan Cox 			pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
105211752d88SAlan Cox 			m = &seg->first_page[atop(pa - seg->start)];
10535c1f2cc4SAlan Cox 		} while (order < VM_NFREEORDER - 1);
105411752d88SAlan Cox 	}
105511752d88SAlan Cox 	fl = (*seg->free_queues)[m->pool];
10567e226537SAttilio Rao 	vm_freelist_add(fl, m, order, 1);
105711752d88SAlan Cox }
105811752d88SAlan Cox 
105911752d88SAlan Cox /*
10605c1f2cc4SAlan Cox  * Free a contiguous, arbitrarily sized set of physical pages.
10615c1f2cc4SAlan Cox  *
10625c1f2cc4SAlan Cox  * The free page queues must be locked.
10635c1f2cc4SAlan Cox  */
10645c1f2cc4SAlan Cox void
10655c1f2cc4SAlan Cox vm_phys_free_contig(vm_page_t m, u_long npages)
10665c1f2cc4SAlan Cox {
10675c1f2cc4SAlan Cox 	u_int n;
10685c1f2cc4SAlan Cox 	int order;
10695c1f2cc4SAlan Cox 
10705c1f2cc4SAlan Cox 	/*
10715c1f2cc4SAlan Cox 	 * Avoid unnecessary coalescing by freeing the pages in the largest
10725c1f2cc4SAlan Cox 	 * possible power-of-two-sized subsets.
10735c1f2cc4SAlan Cox 	 */
1074e2068d0bSJeff Roberson 	vm_domain_free_assert_locked(vm_pagequeue_domain(m));
10755c1f2cc4SAlan Cox 	for (;; npages -= n) {
10765c1f2cc4SAlan Cox 		/*
10775c1f2cc4SAlan Cox 		 * Unsigned "min" is used here so that "order" is assigned
10785c1f2cc4SAlan Cox 		 * "VM_NFREEORDER - 1" when "m"'s physical address is zero
10795c1f2cc4SAlan Cox 		 * or the low-order bits of its physical address are zero
10805c1f2cc4SAlan Cox 		 * because the size of a physical address exceeds the size of
10815c1f2cc4SAlan Cox 		 * a long.
10825c1f2cc4SAlan Cox 		 */
10835c1f2cc4SAlan Cox 		order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
10845c1f2cc4SAlan Cox 		    VM_NFREEORDER - 1);
10855c1f2cc4SAlan Cox 		n = 1 << order;
10865c1f2cc4SAlan Cox 		if (npages < n)
10875c1f2cc4SAlan Cox 			break;
10885c1f2cc4SAlan Cox 		vm_phys_free_pages(m, order);
10895c1f2cc4SAlan Cox 		m += n;
10905c1f2cc4SAlan Cox 	}
10915c1f2cc4SAlan Cox 	/* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */
10925c1f2cc4SAlan Cox 	for (; npages > 0; npages -= n) {
10935c1f2cc4SAlan Cox 		order = flsl(npages) - 1;
10945c1f2cc4SAlan Cox 		n = 1 << order;
10955c1f2cc4SAlan Cox 		vm_phys_free_pages(m, order);
10965c1f2cc4SAlan Cox 		m += n;
10975c1f2cc4SAlan Cox 	}
10985c1f2cc4SAlan Cox }
10995c1f2cc4SAlan Cox 
11005c1f2cc4SAlan Cox /*
1101c869e672SAlan Cox  * Scan physical memory between the specified addresses "low" and "high" for a
1102c869e672SAlan Cox  * run of contiguous physical pages that satisfy the specified conditions, and
1103c869e672SAlan Cox  * return the lowest page in the run.  The specified "alignment" determines
1104c869e672SAlan Cox  * the alignment of the lowest physical page in the run.  If the specified
1105c869e672SAlan Cox  * "boundary" is non-zero, then the run of physical pages cannot span a
1106c869e672SAlan Cox  * physical address that is a multiple of "boundary".
1107c869e672SAlan Cox  *
1108c869e672SAlan Cox  * "npages" must be greater than zero.  Both "alignment" and "boundary" must
1109c869e672SAlan Cox  * be a power of two.
1110c869e672SAlan Cox  */
1111c869e672SAlan Cox vm_page_t
11123f289c3fSJeff Roberson vm_phys_scan_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
1113c869e672SAlan Cox     u_long alignment, vm_paddr_t boundary, int options)
1114c869e672SAlan Cox {
1115c869e672SAlan Cox 	vm_paddr_t pa_end;
1116c869e672SAlan Cox 	vm_page_t m_end, m_run, m_start;
1117c869e672SAlan Cox 	struct vm_phys_seg *seg;
1118c869e672SAlan Cox 	int segind;
1119c869e672SAlan Cox 
1120c869e672SAlan Cox 	KASSERT(npages > 0, ("npages is 0"));
1121c869e672SAlan Cox 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1122c869e672SAlan Cox 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1123c869e672SAlan Cox 	if (low >= high)
1124c869e672SAlan Cox 		return (NULL);
1125c869e672SAlan Cox 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
1126c869e672SAlan Cox 		seg = &vm_phys_segs[segind];
11273f289c3fSJeff Roberson 		if (seg->domain != domain)
11283f289c3fSJeff Roberson 			continue;
1129c869e672SAlan Cox 		if (seg->start >= high)
1130c869e672SAlan Cox 			break;
1131c869e672SAlan Cox 		if (low >= seg->end)
1132c869e672SAlan Cox 			continue;
1133c869e672SAlan Cox 		if (low <= seg->start)
1134c869e672SAlan Cox 			m_start = seg->first_page;
1135c869e672SAlan Cox 		else
1136c869e672SAlan Cox 			m_start = &seg->first_page[atop(low - seg->start)];
1137c869e672SAlan Cox 		if (high < seg->end)
1138c869e672SAlan Cox 			pa_end = high;
1139c869e672SAlan Cox 		else
1140c869e672SAlan Cox 			pa_end = seg->end;
1141c869e672SAlan Cox 		if (pa_end - VM_PAGE_TO_PHYS(m_start) < ptoa(npages))
1142c869e672SAlan Cox 			continue;
1143c869e672SAlan Cox 		m_end = &seg->first_page[atop(pa_end - seg->start)];
1144c869e672SAlan Cox 		m_run = vm_page_scan_contig(npages, m_start, m_end,
1145c869e672SAlan Cox 		    alignment, boundary, options);
1146c869e672SAlan Cox 		if (m_run != NULL)
1147c869e672SAlan Cox 			return (m_run);
1148c869e672SAlan Cox 	}
1149c869e672SAlan Cox 	return (NULL);
1150c869e672SAlan Cox }
1151c869e672SAlan Cox 
1152c869e672SAlan Cox /*
115311752d88SAlan Cox  * Set the pool for a contiguous, power of two-sized set of physical pages.
115411752d88SAlan Cox  */
11557bfda801SAlan Cox void
115611752d88SAlan Cox vm_phys_set_pool(int pool, vm_page_t m, int order)
115711752d88SAlan Cox {
115811752d88SAlan Cox 	vm_page_t m_tmp;
115911752d88SAlan Cox 
116011752d88SAlan Cox 	for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
116111752d88SAlan Cox 		m_tmp->pool = pool;
116211752d88SAlan Cox }
116311752d88SAlan Cox 
116411752d88SAlan Cox /*
11659742373aSAlan Cox  * Search for the given physical page "m" in the free lists.  If the search
11669742373aSAlan Cox  * succeeds, remove "m" from the free lists and return TRUE.  Otherwise, return
11679742373aSAlan Cox  * FALSE, indicating that "m" is not in the free lists.
11687bfda801SAlan Cox  *
11697bfda801SAlan Cox  * The free page queues must be locked.
11707bfda801SAlan Cox  */
1171e35395ceSAlan Cox boolean_t
11727bfda801SAlan Cox vm_phys_unfree_page(vm_page_t m)
11737bfda801SAlan Cox {
11747bfda801SAlan Cox 	struct vm_freelist *fl;
11757bfda801SAlan Cox 	struct vm_phys_seg *seg;
11767bfda801SAlan Cox 	vm_paddr_t pa, pa_half;
11777bfda801SAlan Cox 	vm_page_t m_set, m_tmp;
11787bfda801SAlan Cox 	int order;
11797bfda801SAlan Cox 
11807bfda801SAlan Cox 	/*
11817bfda801SAlan Cox 	 * First, find the contiguous, power of two-sized set of free
11827bfda801SAlan Cox 	 * physical pages containing the given physical page "m" and
11837bfda801SAlan Cox 	 * assign it to "m_set".
11847bfda801SAlan Cox 	 */
11857bfda801SAlan Cox 	seg = &vm_phys_segs[m->segind];
1186e2068d0bSJeff Roberson 	vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
11877bfda801SAlan Cox 	for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
1188bc8794a1SAlan Cox 	    order < VM_NFREEORDER - 1; ) {
11897bfda801SAlan Cox 		order++;
11907bfda801SAlan Cox 		pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
11912fbced65SAlan Cox 		if (pa >= seg->start)
11927bfda801SAlan Cox 			m_set = &seg->first_page[atop(pa - seg->start)];
1193e35395ceSAlan Cox 		else
1194e35395ceSAlan Cox 			return (FALSE);
11957bfda801SAlan Cox 	}
1196e35395ceSAlan Cox 	if (m_set->order < order)
1197e35395ceSAlan Cox 		return (FALSE);
1198e35395ceSAlan Cox 	if (m_set->order == VM_NFREEORDER)
1199e35395ceSAlan Cox 		return (FALSE);
12007bfda801SAlan Cox 	KASSERT(m_set->order < VM_NFREEORDER,
12017bfda801SAlan Cox 	    ("vm_phys_unfree_page: page %p has unexpected order %d",
12027bfda801SAlan Cox 	    m_set, m_set->order));
12037bfda801SAlan Cox 
12047bfda801SAlan Cox 	/*
12057bfda801SAlan Cox 	 * Next, remove "m_set" from the free lists.  Finally, extract
12067bfda801SAlan Cox 	 * "m" from "m_set" using an iterative algorithm: While "m_set"
12077bfda801SAlan Cox 	 * is larger than a page, shrink "m_set" by returning the half
12087bfda801SAlan Cox 	 * of "m_set" that does not contain "m" to the free lists.
12097bfda801SAlan Cox 	 */
12107bfda801SAlan Cox 	fl = (*seg->free_queues)[m_set->pool];
12117bfda801SAlan Cox 	order = m_set->order;
12127e226537SAttilio Rao 	vm_freelist_rem(fl, m_set, order);
12137bfda801SAlan Cox 	while (order > 0) {
12147bfda801SAlan Cox 		order--;
12157bfda801SAlan Cox 		pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
12167bfda801SAlan Cox 		if (m->phys_addr < pa_half)
12177bfda801SAlan Cox 			m_tmp = &seg->first_page[atop(pa_half - seg->start)];
12187bfda801SAlan Cox 		else {
12197bfda801SAlan Cox 			m_tmp = m_set;
12207bfda801SAlan Cox 			m_set = &seg->first_page[atop(pa_half - seg->start)];
12217bfda801SAlan Cox 		}
12227e226537SAttilio Rao 		vm_freelist_add(fl, m_tmp, order, 0);
12237bfda801SAlan Cox 	}
12247bfda801SAlan Cox 	KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
1225e35395ceSAlan Cox 	return (TRUE);
12267bfda801SAlan Cox }
12277bfda801SAlan Cox 
12287bfda801SAlan Cox /*
12292f9f48d6SAlan Cox  * Allocate a contiguous set of physical pages of the given size
12302f9f48d6SAlan Cox  * "npages" from the free lists.  All of the physical pages must be at
12312f9f48d6SAlan Cox  * or above the given physical address "low" and below the given
12322f9f48d6SAlan Cox  * physical address "high".  The given value "alignment" determines the
12332f9f48d6SAlan Cox  * alignment of the first physical page in the set.  If the given value
12342f9f48d6SAlan Cox  * "boundary" is non-zero, then the set of physical pages cannot cross
12352f9f48d6SAlan Cox  * any physical address boundary that is a multiple of that value.  Both
123611752d88SAlan Cox  * "alignment" and "boundary" must be a power of two.
123711752d88SAlan Cox  */
123811752d88SAlan Cox vm_page_t
1239ef435ae7SJeff Roberson vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
12405c1f2cc4SAlan Cox     u_long alignment, vm_paddr_t boundary)
124111752d88SAlan Cox {
1242c869e672SAlan Cox 	vm_paddr_t pa_end, pa_start;
1243c869e672SAlan Cox 	vm_page_t m_run;
1244c869e672SAlan Cox 	struct vm_phys_seg *seg;
1245ef435ae7SJeff Roberson 	int segind;
124611752d88SAlan Cox 
1247c869e672SAlan Cox 	KASSERT(npages > 0, ("npages is 0"));
1248c869e672SAlan Cox 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1249c869e672SAlan Cox 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1250e2068d0bSJeff Roberson 	vm_domain_free_assert_locked(VM_DOMAIN(domain));
1251c869e672SAlan Cox 	if (low >= high)
1252c869e672SAlan Cox 		return (NULL);
1253c869e672SAlan Cox 	m_run = NULL;
1254477bffbeSAlan Cox 	for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
1255c869e672SAlan Cox 		seg = &vm_phys_segs[segind];
1256477bffbeSAlan Cox 		if (seg->start >= high || seg->domain != domain)
125711752d88SAlan Cox 			continue;
1258477bffbeSAlan Cox 		if (low >= seg->end)
1259477bffbeSAlan Cox 			break;
1260c869e672SAlan Cox 		if (low <= seg->start)
1261c869e672SAlan Cox 			pa_start = seg->start;
1262c869e672SAlan Cox 		else
1263c869e672SAlan Cox 			pa_start = low;
1264c869e672SAlan Cox 		if (high < seg->end)
1265c869e672SAlan Cox 			pa_end = high;
1266c869e672SAlan Cox 		else
1267c869e672SAlan Cox 			pa_end = seg->end;
1268c869e672SAlan Cox 		if (pa_end - pa_start < ptoa(npages))
1269c869e672SAlan Cox 			continue;
1270c869e672SAlan Cox 		m_run = vm_phys_alloc_seg_contig(seg, npages, low, high,
1271c869e672SAlan Cox 		    alignment, boundary);
1272c869e672SAlan Cox 		if (m_run != NULL)
1273c869e672SAlan Cox 			break;
1274c869e672SAlan Cox 	}
1275c869e672SAlan Cox 	return (m_run);
1276c869e672SAlan Cox }
127711752d88SAlan Cox 
127811752d88SAlan Cox /*
1279c869e672SAlan Cox  * Allocate a run of contiguous physical pages from the free list for the
1280c869e672SAlan Cox  * specified segment.
1281c869e672SAlan Cox  */
1282c869e672SAlan Cox static vm_page_t
1283c869e672SAlan Cox vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, u_long npages,
1284c869e672SAlan Cox     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1285c869e672SAlan Cox {
1286c869e672SAlan Cox 	struct vm_freelist *fl;
1287c869e672SAlan Cox 	vm_paddr_t pa, pa_end, size;
1288c869e672SAlan Cox 	vm_page_t m, m_ret;
1289c869e672SAlan Cox 	u_long npages_end;
1290c869e672SAlan Cox 	int oind, order, pind;
1291c869e672SAlan Cox 
1292c869e672SAlan Cox 	KASSERT(npages > 0, ("npages is 0"));
1293c869e672SAlan Cox 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1294c869e672SAlan Cox 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1295e2068d0bSJeff Roberson 	vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
1296c869e672SAlan Cox 	/* Compute the queue that is the best fit for npages. */
12979161b4deSAlan Cox 	order = flsl(npages - 1);
1298c869e672SAlan Cox 	/* Search for a run satisfying the specified conditions. */
1299c869e672SAlan Cox 	size = npages << PAGE_SHIFT;
1300c869e672SAlan Cox 	for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER;
1301c869e672SAlan Cox 	    oind++) {
1302c869e672SAlan Cox 		for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1303c869e672SAlan Cox 			fl = (*seg->free_queues)[pind];
13045cd29d0fSMark Johnston 			TAILQ_FOREACH(m_ret, &fl[oind].pl, listq) {
1305c869e672SAlan Cox 				/*
130611752d88SAlan Cox 				 * Is the size of this allocation request
130711752d88SAlan Cox 				 * larger than the largest block size?
130811752d88SAlan Cox 				 */
130911752d88SAlan Cox 				if (order >= VM_NFREEORDER) {
131011752d88SAlan Cox 					/*
1311c869e672SAlan Cox 					 * Determine if a sufficient number of
1312c869e672SAlan Cox 					 * subsequent blocks to satisfy the
1313c869e672SAlan Cox 					 * allocation request are free.
131411752d88SAlan Cox 					 */
131511752d88SAlan Cox 					pa = VM_PAGE_TO_PHYS(m_ret);
1316c869e672SAlan Cox 					pa_end = pa + size;
131779e9552eSKonstantin Belousov 					if (pa_end < pa)
131879e9552eSKonstantin Belousov 						continue;
131911752d88SAlan Cox 					for (;;) {
1320c869e672SAlan Cox 						pa += 1 << (PAGE_SHIFT +
1321c869e672SAlan Cox 						    VM_NFREEORDER - 1);
1322c869e672SAlan Cox 						if (pa >= pa_end ||
1323c869e672SAlan Cox 						    pa < seg->start ||
132411752d88SAlan Cox 						    pa >= seg->end)
132511752d88SAlan Cox 							break;
1326c869e672SAlan Cox 						m = &seg->first_page[atop(pa -
1327c869e672SAlan Cox 						    seg->start)];
1328c869e672SAlan Cox 						if (m->order != VM_NFREEORDER -
1329c869e672SAlan Cox 						    1)
133011752d88SAlan Cox 							break;
133111752d88SAlan Cox 					}
1332c869e672SAlan Cox 					/* If not, go to the next block. */
1333c869e672SAlan Cox 					if (pa < pa_end)
133411752d88SAlan Cox 						continue;
133511752d88SAlan Cox 				}
133611752d88SAlan Cox 
133711752d88SAlan Cox 				/*
1338c869e672SAlan Cox 				 * Determine if the blocks are within the
1339c869e672SAlan Cox 				 * given range, satisfy the given alignment,
1340c869e672SAlan Cox 				 * and do not cross the given boundary.
134111752d88SAlan Cox 				 */
134211752d88SAlan Cox 				pa = VM_PAGE_TO_PHYS(m_ret);
1343c869e672SAlan Cox 				pa_end = pa + size;
1344d9c9c81cSPedro F. Giffuni 				if (pa >= low && pa_end <= high &&
1345d9c9c81cSPedro F. Giffuni 				    (pa & (alignment - 1)) == 0 &&
1346d9c9c81cSPedro F. Giffuni 				    rounddown2(pa ^ (pa_end - 1), boundary) == 0)
134711752d88SAlan Cox 					goto done;
134811752d88SAlan Cox 			}
134911752d88SAlan Cox 		}
135011752d88SAlan Cox 	}
135111752d88SAlan Cox 	return (NULL);
135211752d88SAlan Cox done:
135311752d88SAlan Cox 	for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) {
135411752d88SAlan Cox 		fl = (*seg->free_queues)[m->pool];
13559161b4deSAlan Cox 		vm_freelist_rem(fl, m, oind);
13569161b4deSAlan Cox 		if (m->pool != VM_FREEPOOL_DEFAULT)
13579161b4deSAlan Cox 			vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind);
135811752d88SAlan Cox 	}
13595c1f2cc4SAlan Cox 	/* Return excess pages to the free lists. */
13609161b4deSAlan Cox 	npages_end = roundup2(npages, 1 << oind);
13617493904eSAlan Cox 	if (npages < npages_end) {
13627493904eSAlan Cox 		fl = (*seg->free_queues)[VM_FREEPOOL_DEFAULT];
13637493904eSAlan Cox 		vm_phys_enq_range(&m_ret[npages], npages_end - npages, fl, 0);
13647493904eSAlan Cox 	}
136511752d88SAlan Cox 	return (m_ret);
136611752d88SAlan Cox }
136711752d88SAlan Cox 
136811752d88SAlan Cox #ifdef DDB
136911752d88SAlan Cox /*
137011752d88SAlan Cox  * Show the number of physical pages in each of the free lists.
137111752d88SAlan Cox  */
137211752d88SAlan Cox DB_SHOW_COMMAND(freepages, db_show_freepages)
137311752d88SAlan Cox {
137411752d88SAlan Cox 	struct vm_freelist *fl;
13757e226537SAttilio Rao 	int flind, oind, pind, dom;
137611752d88SAlan Cox 
13777e226537SAttilio Rao 	for (dom = 0; dom < vm_ndomains; dom++) {
13787e226537SAttilio Rao 		db_printf("DOMAIN: %d\n", dom);
137911752d88SAlan Cox 		for (flind = 0; flind < vm_nfreelists; flind++) {
138011752d88SAlan Cox 			db_printf("FREE LIST %d:\n"
138111752d88SAlan Cox 			    "\n  ORDER (SIZE)  |  NUMBER"
138211752d88SAlan Cox 			    "\n              ", flind);
138311752d88SAlan Cox 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
138411752d88SAlan Cox 				db_printf("  |  POOL %d", pind);
138511752d88SAlan Cox 			db_printf("\n--            ");
138611752d88SAlan Cox 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
138711752d88SAlan Cox 				db_printf("-- --      ");
138811752d88SAlan Cox 			db_printf("--\n");
138911752d88SAlan Cox 			for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
139011752d88SAlan Cox 				db_printf("  %2.2d (%6.6dK)", oind,
139111752d88SAlan Cox 				    1 << (PAGE_SHIFT - 10 + oind));
139211752d88SAlan Cox 				for (pind = 0; pind < VM_NFREEPOOL; pind++) {
13937e226537SAttilio Rao 				fl = vm_phys_free_queues[dom][flind][pind];
139411752d88SAlan Cox 					db_printf("  |  %6.6d", fl[oind].lcnt);
139511752d88SAlan Cox 				}
139611752d88SAlan Cox 				db_printf("\n");
139711752d88SAlan Cox 			}
139811752d88SAlan Cox 			db_printf("\n");
139911752d88SAlan Cox 		}
14007e226537SAttilio Rao 		db_printf("\n");
14017e226537SAttilio Rao 	}
140211752d88SAlan Cox }
140311752d88SAlan Cox #endif
1404