xref: /freebsd/sys/vm/vm_phys.c (revision 6f4acaf4c9eaacdc7cfb8f804c45404d919ef45c)
111752d88SAlan Cox /*-
2fe267a55SPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3fe267a55SPedro F. Giffuni  *
411752d88SAlan Cox  * Copyright (c) 2002-2006 Rice University
511752d88SAlan Cox  * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
611752d88SAlan Cox  * All rights reserved.
711752d88SAlan Cox  *
811752d88SAlan Cox  * This software was developed for the FreeBSD Project by Alan L. Cox,
911752d88SAlan Cox  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
1011752d88SAlan Cox  *
1111752d88SAlan Cox  * Redistribution and use in source and binary forms, with or without
1211752d88SAlan Cox  * modification, are permitted provided that the following conditions
1311752d88SAlan Cox  * are met:
1411752d88SAlan Cox  * 1. Redistributions of source code must retain the above copyright
1511752d88SAlan Cox  *    notice, this list of conditions and the following disclaimer.
1611752d88SAlan Cox  * 2. Redistributions in binary form must reproduce the above copyright
1711752d88SAlan Cox  *    notice, this list of conditions and the following disclaimer in the
1811752d88SAlan Cox  *    documentation and/or other materials provided with the distribution.
1911752d88SAlan Cox  *
2011752d88SAlan Cox  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
2111752d88SAlan Cox  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
2211752d88SAlan Cox  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
2311752d88SAlan Cox  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
2411752d88SAlan Cox  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
2511752d88SAlan Cox  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
2611752d88SAlan Cox  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
2711752d88SAlan Cox  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
2811752d88SAlan Cox  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2911752d88SAlan Cox  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
3011752d88SAlan Cox  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
3111752d88SAlan Cox  * POSSIBILITY OF SUCH DAMAGE.
3211752d88SAlan Cox  */
3311752d88SAlan Cox 
34fbd80bd0SAlan Cox /*
35fbd80bd0SAlan Cox  *	Physical memory system implementation
36fbd80bd0SAlan Cox  *
37fbd80bd0SAlan Cox  * Any external functions defined by this module are only to be used by the
38fbd80bd0SAlan Cox  * virtual memory system.
39fbd80bd0SAlan Cox  */
40fbd80bd0SAlan Cox 
4111752d88SAlan Cox #include <sys/cdefs.h>
4211752d88SAlan Cox __FBSDID("$FreeBSD$");
4311752d88SAlan Cox 
4411752d88SAlan Cox #include "opt_ddb.h"
45174b5f38SJohn Baldwin #include "opt_vm.h"
4611752d88SAlan Cox 
4711752d88SAlan Cox #include <sys/param.h>
4811752d88SAlan Cox #include <sys/systm.h>
4911752d88SAlan Cox #include <sys/lock.h>
5011752d88SAlan Cox #include <sys/kernel.h>
5111752d88SAlan Cox #include <sys/malloc.h>
5211752d88SAlan Cox #include <sys/mutex.h>
537e226537SAttilio Rao #include <sys/proc.h>
5411752d88SAlan Cox #include <sys/queue.h>
5538d6b2dcSRoger Pau Monné #include <sys/rwlock.h>
5611752d88SAlan Cox #include <sys/sbuf.h>
5711752d88SAlan Cox #include <sys/sysctl.h>
5838d6b2dcSRoger Pau Monné #include <sys/tree.h>
5911752d88SAlan Cox #include <sys/vmmeter.h>
606520495aSAdrian Chadd #include <sys/seq.h>
6111752d88SAlan Cox 
6211752d88SAlan Cox #include <ddb/ddb.h>
6311752d88SAlan Cox 
6411752d88SAlan Cox #include <vm/vm.h>
6511752d88SAlan Cox #include <vm/vm_param.h>
6611752d88SAlan Cox #include <vm/vm_kern.h>
6711752d88SAlan Cox #include <vm/vm_object.h>
6811752d88SAlan Cox #include <vm/vm_page.h>
6911752d88SAlan Cox #include <vm/vm_phys.h>
7011752d88SAlan Cox 
71449c2e92SKonstantin Belousov _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX,
72449c2e92SKonstantin Belousov     "Too many physsegs.");
7311752d88SAlan Cox 
7462d70a81SJohn Baldwin #ifdef VM_NUMA_ALLOC
75a3870a18SJohn Baldwin struct mem_affinity *mem_affinity;
76415d7ccaSAdrian Chadd int *mem_locality;
7762d70a81SJohn Baldwin #endif
78a3870a18SJohn Baldwin 
797e226537SAttilio Rao int vm_ndomains = 1;
807e226537SAttilio Rao 
81449c2e92SKonstantin Belousov struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX];
82449c2e92SKonstantin Belousov int vm_phys_nsegs;
8311752d88SAlan Cox 
8438d6b2dcSRoger Pau Monné struct vm_phys_fictitious_seg;
8538d6b2dcSRoger Pau Monné static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *,
8638d6b2dcSRoger Pau Monné     struct vm_phys_fictitious_seg *);
8738d6b2dcSRoger Pau Monné 
8838d6b2dcSRoger Pau Monné RB_HEAD(fict_tree, vm_phys_fictitious_seg) vm_phys_fictitious_tree =
8938d6b2dcSRoger Pau Monné     RB_INITIALIZER(_vm_phys_fictitious_tree);
9038d6b2dcSRoger Pau Monné 
9138d6b2dcSRoger Pau Monné struct vm_phys_fictitious_seg {
9238d6b2dcSRoger Pau Monné 	RB_ENTRY(vm_phys_fictitious_seg) node;
9338d6b2dcSRoger Pau Monné 	/* Memory region data */
94b6de32bdSKonstantin Belousov 	vm_paddr_t	start;
95b6de32bdSKonstantin Belousov 	vm_paddr_t	end;
96b6de32bdSKonstantin Belousov 	vm_page_t	first_page;
9738d6b2dcSRoger Pau Monné };
9838d6b2dcSRoger Pau Monné 
9938d6b2dcSRoger Pau Monné RB_GENERATE_STATIC(fict_tree, vm_phys_fictitious_seg, node,
10038d6b2dcSRoger Pau Monné     vm_phys_fictitious_cmp);
10138d6b2dcSRoger Pau Monné 
10238d6b2dcSRoger Pau Monné static struct rwlock vm_phys_fictitious_reg_lock;
103c0432fc3SMark Johnston MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages");
104b6de32bdSKonstantin Belousov 
10511752d88SAlan Cox static struct vm_freelist
1067e226537SAttilio Rao     vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER];
10711752d88SAlan Cox 
108d866a563SAlan Cox static int vm_nfreelists;
109d866a563SAlan Cox 
110d866a563SAlan Cox /*
111d866a563SAlan Cox  * Provides the mapping from VM_FREELIST_* to free list indices (flind).
112d866a563SAlan Cox  */
113d866a563SAlan Cox static int vm_freelist_to_flind[VM_NFREELIST];
114d866a563SAlan Cox 
115d866a563SAlan Cox CTASSERT(VM_FREELIST_DEFAULT == 0);
116d866a563SAlan Cox 
117d866a563SAlan Cox #ifdef VM_FREELIST_ISADMA
118d866a563SAlan Cox #define	VM_ISADMA_BOUNDARY	16777216
119d866a563SAlan Cox #endif
120d866a563SAlan Cox #ifdef VM_FREELIST_DMA32
121d866a563SAlan Cox #define	VM_DMA32_BOUNDARY	((vm_paddr_t)1 << 32)
122d866a563SAlan Cox #endif
123d866a563SAlan Cox 
124d866a563SAlan Cox /*
125d866a563SAlan Cox  * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about
126d866a563SAlan Cox  * the ordering of the free list boundaries.
127d866a563SAlan Cox  */
128d866a563SAlan Cox #if defined(VM_ISADMA_BOUNDARY) && defined(VM_LOWMEM_BOUNDARY)
129d866a563SAlan Cox CTASSERT(VM_ISADMA_BOUNDARY < VM_LOWMEM_BOUNDARY);
130d866a563SAlan Cox #endif
131d866a563SAlan Cox #if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY)
132d866a563SAlan Cox CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY);
133d866a563SAlan Cox #endif
13411752d88SAlan Cox 
13511752d88SAlan Cox static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
13611752d88SAlan Cox SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD,
13711752d88SAlan Cox     NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info");
13811752d88SAlan Cox 
13911752d88SAlan Cox static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
14011752d88SAlan Cox SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD,
14111752d88SAlan Cox     NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info");
14211752d88SAlan Cox 
14362d70a81SJohn Baldwin #ifdef VM_NUMA_ALLOC
144415d7ccaSAdrian Chadd static int sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS);
145415d7ccaSAdrian Chadd SYSCTL_OID(_vm, OID_AUTO, phys_locality, CTLTYPE_STRING | CTLFLAG_RD,
146415d7ccaSAdrian Chadd     NULL, 0, sysctl_vm_phys_locality, "A", "Phys Locality Info");
1476520495aSAdrian Chadd #endif
148415d7ccaSAdrian Chadd 
1497e226537SAttilio Rao SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD,
1507e226537SAttilio Rao     &vm_ndomains, 0, "Number of physical memory domains available.");
151a3870a18SJohn Baldwin 
152c869e672SAlan Cox static vm_page_t vm_phys_alloc_seg_contig(struct vm_phys_seg *seg,
153c869e672SAlan Cox     u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
154c869e672SAlan Cox     vm_paddr_t boundary);
155d866a563SAlan Cox static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain);
156d866a563SAlan Cox static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end);
15711752d88SAlan Cox static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
15811752d88SAlan Cox     int order);
15911752d88SAlan Cox 
16038d6b2dcSRoger Pau Monné /*
16138d6b2dcSRoger Pau Monné  * Red-black tree helpers for vm fictitious range management.
16238d6b2dcSRoger Pau Monné  */
16338d6b2dcSRoger Pau Monné static inline int
16438d6b2dcSRoger Pau Monné vm_phys_fictitious_in_range(struct vm_phys_fictitious_seg *p,
16538d6b2dcSRoger Pau Monné     struct vm_phys_fictitious_seg *range)
16638d6b2dcSRoger Pau Monné {
16738d6b2dcSRoger Pau Monné 
16838d6b2dcSRoger Pau Monné 	KASSERT(range->start != 0 && range->end != 0,
16938d6b2dcSRoger Pau Monné 	    ("Invalid range passed on search for vm_fictitious page"));
17038d6b2dcSRoger Pau Monné 	if (p->start >= range->end)
17138d6b2dcSRoger Pau Monné 		return (1);
17238d6b2dcSRoger Pau Monné 	if (p->start < range->start)
17338d6b2dcSRoger Pau Monné 		return (-1);
17438d6b2dcSRoger Pau Monné 
17538d6b2dcSRoger Pau Monné 	return (0);
17638d6b2dcSRoger Pau Monné }
17738d6b2dcSRoger Pau Monné 
17838d6b2dcSRoger Pau Monné static int
17938d6b2dcSRoger Pau Monné vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *p1,
18038d6b2dcSRoger Pau Monné     struct vm_phys_fictitious_seg *p2)
18138d6b2dcSRoger Pau Monné {
18238d6b2dcSRoger Pau Monné 
18338d6b2dcSRoger Pau Monné 	/* Check if this is a search for a page */
18438d6b2dcSRoger Pau Monné 	if (p1->end == 0)
18538d6b2dcSRoger Pau Monné 		return (vm_phys_fictitious_in_range(p1, p2));
18638d6b2dcSRoger Pau Monné 
18738d6b2dcSRoger Pau Monné 	KASSERT(p2->end != 0,
18838d6b2dcSRoger Pau Monné     ("Invalid range passed as second parameter to vm fictitious comparison"));
18938d6b2dcSRoger Pau Monné 
19038d6b2dcSRoger Pau Monné 	/* Searching to add a new range */
19138d6b2dcSRoger Pau Monné 	if (p1->end <= p2->start)
19238d6b2dcSRoger Pau Monné 		return (-1);
19338d6b2dcSRoger Pau Monné 	if (p1->start >= p2->end)
19438d6b2dcSRoger Pau Monné 		return (1);
19538d6b2dcSRoger Pau Monné 
19638d6b2dcSRoger Pau Monné 	panic("Trying to add overlapping vm fictitious ranges:\n"
19738d6b2dcSRoger Pau Monné 	    "[%#jx:%#jx] and [%#jx:%#jx]", (uintmax_t)p1->start,
19838d6b2dcSRoger Pau Monné 	    (uintmax_t)p1->end, (uintmax_t)p2->start, (uintmax_t)p2->end);
19938d6b2dcSRoger Pau Monné }
20038d6b2dcSRoger Pau Monné 
201*6f4acaf4SJeff Roberson int
202*6f4acaf4SJeff Roberson vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high)
203449c2e92SKonstantin Belousov {
204*6f4acaf4SJeff Roberson #ifdef VM_NUMA_ALLOC
205*6f4acaf4SJeff Roberson 	domainset_t mask;
206*6f4acaf4SJeff Roberson 	int i;
207449c2e92SKonstantin Belousov 
208*6f4acaf4SJeff Roberson 	if (vm_ndomains == 1 || mem_affinity == NULL)
209*6f4acaf4SJeff Roberson 		return (0);
210*6f4acaf4SJeff Roberson 
211*6f4acaf4SJeff Roberson 	DOMAINSET_ZERO(&mask);
212*6f4acaf4SJeff Roberson 	/*
213*6f4acaf4SJeff Roberson 	 * Check for any memory that overlaps low, high.
214*6f4acaf4SJeff Roberson 	 */
215*6f4acaf4SJeff Roberson 	for (i = 0; mem_affinity[i].end != 0; i++)
216*6f4acaf4SJeff Roberson 		if (mem_affinity[i].start <= high &&
217*6f4acaf4SJeff Roberson 		    mem_affinity[i].end >= low)
218*6f4acaf4SJeff Roberson 			DOMAINSET_SET(mem_affinity[i].domain, &mask);
219*6f4acaf4SJeff Roberson 	if (prefer != -1 && DOMAINSET_ISSET(prefer, &mask))
220*6f4acaf4SJeff Roberson 		return (prefer);
221*6f4acaf4SJeff Roberson 	if (DOMAINSET_EMPTY(&mask))
222*6f4acaf4SJeff Roberson 		panic("vm_phys_domain_match:  Impossible constraint");
223*6f4acaf4SJeff Roberson 	return (DOMAINSET_FFS(&mask) - 1);
224*6f4acaf4SJeff Roberson #else
225*6f4acaf4SJeff Roberson 	return (0);
226*6f4acaf4SJeff Roberson #endif
227449c2e92SKonstantin Belousov }
228449c2e92SKonstantin Belousov 
22911752d88SAlan Cox /*
23011752d88SAlan Cox  * Outputs the state of the physical memory allocator, specifically,
23111752d88SAlan Cox  * the amount of physical memory in each free list.
23211752d88SAlan Cox  */
23311752d88SAlan Cox static int
23411752d88SAlan Cox sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
23511752d88SAlan Cox {
23611752d88SAlan Cox 	struct sbuf sbuf;
23711752d88SAlan Cox 	struct vm_freelist *fl;
2387e226537SAttilio Rao 	int dom, error, flind, oind, pind;
23911752d88SAlan Cox 
24000f0e671SMatthew D Fleming 	error = sysctl_wire_old_buffer(req, 0);
24100f0e671SMatthew D Fleming 	if (error != 0)
24200f0e671SMatthew D Fleming 		return (error);
2437e226537SAttilio Rao 	sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req);
2447e226537SAttilio Rao 	for (dom = 0; dom < vm_ndomains; dom++) {
245eb2f42fbSAlan Cox 		sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom);
24611752d88SAlan Cox 		for (flind = 0; flind < vm_nfreelists; flind++) {
247eb2f42fbSAlan Cox 			sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
24811752d88SAlan Cox 			    "\n  ORDER (SIZE)  |  NUMBER"
24911752d88SAlan Cox 			    "\n              ", flind);
25011752d88SAlan Cox 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
25111752d88SAlan Cox 				sbuf_printf(&sbuf, "  |  POOL %d", pind);
25211752d88SAlan Cox 			sbuf_printf(&sbuf, "\n--            ");
25311752d88SAlan Cox 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
25411752d88SAlan Cox 				sbuf_printf(&sbuf, "-- --      ");
25511752d88SAlan Cox 			sbuf_printf(&sbuf, "--\n");
25611752d88SAlan Cox 			for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
257d689bc00SAlan Cox 				sbuf_printf(&sbuf, "  %2d (%6dK)", oind,
25811752d88SAlan Cox 				    1 << (PAGE_SHIFT - 10 + oind));
25911752d88SAlan Cox 				for (pind = 0; pind < VM_NFREEPOOL; pind++) {
2607e226537SAttilio Rao 				fl = vm_phys_free_queues[dom][flind][pind];
261eb2f42fbSAlan Cox 					sbuf_printf(&sbuf, "  |  %6d",
2627e226537SAttilio Rao 					    fl[oind].lcnt);
26311752d88SAlan Cox 				}
26411752d88SAlan Cox 				sbuf_printf(&sbuf, "\n");
26511752d88SAlan Cox 			}
2667e226537SAttilio Rao 		}
26711752d88SAlan Cox 	}
2684e657159SMatthew D Fleming 	error = sbuf_finish(&sbuf);
26911752d88SAlan Cox 	sbuf_delete(&sbuf);
27011752d88SAlan Cox 	return (error);
27111752d88SAlan Cox }
27211752d88SAlan Cox 
27311752d88SAlan Cox /*
27411752d88SAlan Cox  * Outputs the set of physical memory segments.
27511752d88SAlan Cox  */
27611752d88SAlan Cox static int
27711752d88SAlan Cox sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
27811752d88SAlan Cox {
27911752d88SAlan Cox 	struct sbuf sbuf;
28011752d88SAlan Cox 	struct vm_phys_seg *seg;
28111752d88SAlan Cox 	int error, segind;
28211752d88SAlan Cox 
28300f0e671SMatthew D Fleming 	error = sysctl_wire_old_buffer(req, 0);
28400f0e671SMatthew D Fleming 	if (error != 0)
28500f0e671SMatthew D Fleming 		return (error);
2864e657159SMatthew D Fleming 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
28711752d88SAlan Cox 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
28811752d88SAlan Cox 		sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
28911752d88SAlan Cox 		seg = &vm_phys_segs[segind];
29011752d88SAlan Cox 		sbuf_printf(&sbuf, "start:     %#jx\n",
29111752d88SAlan Cox 		    (uintmax_t)seg->start);
29211752d88SAlan Cox 		sbuf_printf(&sbuf, "end:       %#jx\n",
29311752d88SAlan Cox 		    (uintmax_t)seg->end);
294a3870a18SJohn Baldwin 		sbuf_printf(&sbuf, "domain:    %d\n", seg->domain);
29511752d88SAlan Cox 		sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
29611752d88SAlan Cox 	}
2974e657159SMatthew D Fleming 	error = sbuf_finish(&sbuf);
29811752d88SAlan Cox 	sbuf_delete(&sbuf);
29911752d88SAlan Cox 	return (error);
30011752d88SAlan Cox }
30111752d88SAlan Cox 
302415d7ccaSAdrian Chadd /*
303415d7ccaSAdrian Chadd  * Return affinity, or -1 if there's no affinity information.
304415d7ccaSAdrian Chadd  */
3056520495aSAdrian Chadd int
306415d7ccaSAdrian Chadd vm_phys_mem_affinity(int f, int t)
307415d7ccaSAdrian Chadd {
308415d7ccaSAdrian Chadd 
30962d70a81SJohn Baldwin #ifdef VM_NUMA_ALLOC
310415d7ccaSAdrian Chadd 	if (mem_locality == NULL)
311415d7ccaSAdrian Chadd 		return (-1);
312415d7ccaSAdrian Chadd 	if (f >= vm_ndomains || t >= vm_ndomains)
313415d7ccaSAdrian Chadd 		return (-1);
314415d7ccaSAdrian Chadd 	return (mem_locality[f * vm_ndomains + t]);
3156520495aSAdrian Chadd #else
3166520495aSAdrian Chadd 	return (-1);
3176520495aSAdrian Chadd #endif
318415d7ccaSAdrian Chadd }
319415d7ccaSAdrian Chadd 
32062d70a81SJohn Baldwin #ifdef VM_NUMA_ALLOC
321415d7ccaSAdrian Chadd /*
322415d7ccaSAdrian Chadd  * Outputs the VM locality table.
323415d7ccaSAdrian Chadd  */
324415d7ccaSAdrian Chadd static int
325415d7ccaSAdrian Chadd sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS)
326415d7ccaSAdrian Chadd {
327415d7ccaSAdrian Chadd 	struct sbuf sbuf;
328415d7ccaSAdrian Chadd 	int error, i, j;
329415d7ccaSAdrian Chadd 
330415d7ccaSAdrian Chadd 	error = sysctl_wire_old_buffer(req, 0);
331415d7ccaSAdrian Chadd 	if (error != 0)
332415d7ccaSAdrian Chadd 		return (error);
333415d7ccaSAdrian Chadd 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
334415d7ccaSAdrian Chadd 
335415d7ccaSAdrian Chadd 	sbuf_printf(&sbuf, "\n");
336415d7ccaSAdrian Chadd 
337415d7ccaSAdrian Chadd 	for (i = 0; i < vm_ndomains; i++) {
338415d7ccaSAdrian Chadd 		sbuf_printf(&sbuf, "%d: ", i);
339415d7ccaSAdrian Chadd 		for (j = 0; j < vm_ndomains; j++) {
340415d7ccaSAdrian Chadd 			sbuf_printf(&sbuf, "%d ", vm_phys_mem_affinity(i, j));
341415d7ccaSAdrian Chadd 		}
342415d7ccaSAdrian Chadd 		sbuf_printf(&sbuf, "\n");
343415d7ccaSAdrian Chadd 	}
344415d7ccaSAdrian Chadd 	error = sbuf_finish(&sbuf);
345415d7ccaSAdrian Chadd 	sbuf_delete(&sbuf);
346415d7ccaSAdrian Chadd 	return (error);
347415d7ccaSAdrian Chadd }
3486520495aSAdrian Chadd #endif
349415d7ccaSAdrian Chadd 
3507e226537SAttilio Rao static void
3517e226537SAttilio Rao vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail)
352a3870a18SJohn Baldwin {
353a3870a18SJohn Baldwin 
3547e226537SAttilio Rao 	m->order = order;
3557e226537SAttilio Rao 	if (tail)
356c325e866SKonstantin Belousov 		TAILQ_INSERT_TAIL(&fl[order].pl, m, plinks.q);
3577e226537SAttilio Rao 	else
358c325e866SKonstantin Belousov 		TAILQ_INSERT_HEAD(&fl[order].pl, m, plinks.q);
3597e226537SAttilio Rao 	fl[order].lcnt++;
360a3870a18SJohn Baldwin }
3617e226537SAttilio Rao 
3627e226537SAttilio Rao static void
3637e226537SAttilio Rao vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order)
3647e226537SAttilio Rao {
3657e226537SAttilio Rao 
366c325e866SKonstantin Belousov 	TAILQ_REMOVE(&fl[order].pl, m, plinks.q);
3677e226537SAttilio Rao 	fl[order].lcnt--;
3687e226537SAttilio Rao 	m->order = VM_NFREEORDER;
369a3870a18SJohn Baldwin }
370a3870a18SJohn Baldwin 
37111752d88SAlan Cox /*
37211752d88SAlan Cox  * Create a physical memory segment.
37311752d88SAlan Cox  */
37411752d88SAlan Cox static void
375d866a563SAlan Cox _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain)
37611752d88SAlan Cox {
37711752d88SAlan Cox 	struct vm_phys_seg *seg;
37811752d88SAlan Cox 
37911752d88SAlan Cox 	KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
38011752d88SAlan Cox 	    ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
381ef435ae7SJeff Roberson 	KASSERT(domain >= 0 && domain < vm_ndomains,
3827e226537SAttilio Rao 	    ("vm_phys_create_seg: invalid domain provided"));
38311752d88SAlan Cox 	seg = &vm_phys_segs[vm_phys_nsegs++];
384271f0f12SAlan Cox 	while (seg > vm_phys_segs && (seg - 1)->start >= end) {
385271f0f12SAlan Cox 		*seg = *(seg - 1);
386271f0f12SAlan Cox 		seg--;
387271f0f12SAlan Cox 	}
38811752d88SAlan Cox 	seg->start = start;
38911752d88SAlan Cox 	seg->end = end;
390a3870a18SJohn Baldwin 	seg->domain = domain;
39111752d88SAlan Cox }
39211752d88SAlan Cox 
393a3870a18SJohn Baldwin static void
394d866a563SAlan Cox vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end)
395a3870a18SJohn Baldwin {
39662d70a81SJohn Baldwin #ifdef VM_NUMA_ALLOC
397a3870a18SJohn Baldwin 	int i;
398a3870a18SJohn Baldwin 
399a3870a18SJohn Baldwin 	if (mem_affinity == NULL) {
400d866a563SAlan Cox 		_vm_phys_create_seg(start, end, 0);
401a3870a18SJohn Baldwin 		return;
402a3870a18SJohn Baldwin 	}
403a3870a18SJohn Baldwin 
404a3870a18SJohn Baldwin 	for (i = 0;; i++) {
405a3870a18SJohn Baldwin 		if (mem_affinity[i].end == 0)
406a3870a18SJohn Baldwin 			panic("Reached end of affinity info");
407a3870a18SJohn Baldwin 		if (mem_affinity[i].end <= start)
408a3870a18SJohn Baldwin 			continue;
409a3870a18SJohn Baldwin 		if (mem_affinity[i].start > start)
410a3870a18SJohn Baldwin 			panic("No affinity info for start %jx",
411a3870a18SJohn Baldwin 			    (uintmax_t)start);
412a3870a18SJohn Baldwin 		if (mem_affinity[i].end >= end) {
413d866a563SAlan Cox 			_vm_phys_create_seg(start, end,
414a3870a18SJohn Baldwin 			    mem_affinity[i].domain);
415a3870a18SJohn Baldwin 			break;
416a3870a18SJohn Baldwin 		}
417d866a563SAlan Cox 		_vm_phys_create_seg(start, mem_affinity[i].end,
418a3870a18SJohn Baldwin 		    mem_affinity[i].domain);
419a3870a18SJohn Baldwin 		start = mem_affinity[i].end;
420a3870a18SJohn Baldwin 	}
42162d70a81SJohn Baldwin #else
42262d70a81SJohn Baldwin 	_vm_phys_create_seg(start, end, 0);
42362d70a81SJohn Baldwin #endif
424a3870a18SJohn Baldwin }
425a3870a18SJohn Baldwin 
42611752d88SAlan Cox /*
427271f0f12SAlan Cox  * Add a physical memory segment.
428271f0f12SAlan Cox  */
429271f0f12SAlan Cox void
430271f0f12SAlan Cox vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end)
431271f0f12SAlan Cox {
432d866a563SAlan Cox 	vm_paddr_t paddr;
433271f0f12SAlan Cox 
434271f0f12SAlan Cox 	KASSERT((start & PAGE_MASK) == 0,
435271f0f12SAlan Cox 	    ("vm_phys_define_seg: start is not page aligned"));
436271f0f12SAlan Cox 	KASSERT((end & PAGE_MASK) == 0,
437271f0f12SAlan Cox 	    ("vm_phys_define_seg: end is not page aligned"));
438d866a563SAlan Cox 
439d866a563SAlan Cox 	/*
440d866a563SAlan Cox 	 * Split the physical memory segment if it spans two or more free
441d866a563SAlan Cox 	 * list boundaries.
442d866a563SAlan Cox 	 */
443d866a563SAlan Cox 	paddr = start;
444271f0f12SAlan Cox #ifdef	VM_FREELIST_ISADMA
445d866a563SAlan Cox 	if (paddr < VM_ISADMA_BOUNDARY && end > VM_ISADMA_BOUNDARY) {
446d866a563SAlan Cox 		vm_phys_create_seg(paddr, VM_ISADMA_BOUNDARY);
447d866a563SAlan Cox 		paddr = VM_ISADMA_BOUNDARY;
448d866a563SAlan Cox 	}
449271f0f12SAlan Cox #endif
450d866a563SAlan Cox #ifdef	VM_FREELIST_LOWMEM
451d866a563SAlan Cox 	if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) {
452d866a563SAlan Cox 		vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY);
453d866a563SAlan Cox 		paddr = VM_LOWMEM_BOUNDARY;
454d866a563SAlan Cox 	}
455271f0f12SAlan Cox #endif
456d866a563SAlan Cox #ifdef	VM_FREELIST_DMA32
457d866a563SAlan Cox 	if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) {
458d866a563SAlan Cox 		vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY);
459d866a563SAlan Cox 		paddr = VM_DMA32_BOUNDARY;
460d866a563SAlan Cox 	}
461d866a563SAlan Cox #endif
462d866a563SAlan Cox 	vm_phys_create_seg(paddr, end);
463271f0f12SAlan Cox }
464271f0f12SAlan Cox 
465271f0f12SAlan Cox /*
46611752d88SAlan Cox  * Initialize the physical memory allocator.
467d866a563SAlan Cox  *
468d866a563SAlan Cox  * Requires that vm_page_array is initialized!
46911752d88SAlan Cox  */
47011752d88SAlan Cox void
47111752d88SAlan Cox vm_phys_init(void)
47211752d88SAlan Cox {
47311752d88SAlan Cox 	struct vm_freelist *fl;
474271f0f12SAlan Cox 	struct vm_phys_seg *seg;
475d866a563SAlan Cox 	u_long npages;
476d866a563SAlan Cox 	int dom, flind, freelist, oind, pind, segind;
47711752d88SAlan Cox 
478d866a563SAlan Cox 	/*
479d866a563SAlan Cox 	 * Compute the number of free lists, and generate the mapping from the
480d866a563SAlan Cox 	 * manifest constants VM_FREELIST_* to the free list indices.
481d866a563SAlan Cox 	 *
482d866a563SAlan Cox 	 * Initially, the entries of vm_freelist_to_flind[] are set to either
483d866a563SAlan Cox 	 * 0 or 1 to indicate which free lists should be created.
484d866a563SAlan Cox 	 */
485d866a563SAlan Cox 	npages = 0;
486d866a563SAlan Cox 	for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
487d866a563SAlan Cox 		seg = &vm_phys_segs[segind];
488d866a563SAlan Cox #ifdef	VM_FREELIST_ISADMA
489d866a563SAlan Cox 		if (seg->end <= VM_ISADMA_BOUNDARY)
490d866a563SAlan Cox 			vm_freelist_to_flind[VM_FREELIST_ISADMA] = 1;
491d866a563SAlan Cox 		else
492d866a563SAlan Cox #endif
493d866a563SAlan Cox #ifdef	VM_FREELIST_LOWMEM
494d866a563SAlan Cox 		if (seg->end <= VM_LOWMEM_BOUNDARY)
495d866a563SAlan Cox 			vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1;
496d866a563SAlan Cox 		else
497d866a563SAlan Cox #endif
498d866a563SAlan Cox #ifdef	VM_FREELIST_DMA32
499d866a563SAlan Cox 		if (
500d866a563SAlan Cox #ifdef	VM_DMA32_NPAGES_THRESHOLD
501d866a563SAlan Cox 		    /*
502d866a563SAlan Cox 		     * Create the DMA32 free list only if the amount of
503d866a563SAlan Cox 		     * physical memory above physical address 4G exceeds the
504d866a563SAlan Cox 		     * given threshold.
505d866a563SAlan Cox 		     */
506d866a563SAlan Cox 		    npages > VM_DMA32_NPAGES_THRESHOLD &&
507d866a563SAlan Cox #endif
508d866a563SAlan Cox 		    seg->end <= VM_DMA32_BOUNDARY)
509d866a563SAlan Cox 			vm_freelist_to_flind[VM_FREELIST_DMA32] = 1;
510d866a563SAlan Cox 		else
511d866a563SAlan Cox #endif
512d866a563SAlan Cox 		{
513d866a563SAlan Cox 			npages += atop(seg->end - seg->start);
514d866a563SAlan Cox 			vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1;
515d866a563SAlan Cox 		}
516d866a563SAlan Cox 	}
517d866a563SAlan Cox 	/* Change each entry into a running total of the free lists. */
518d866a563SAlan Cox 	for (freelist = 1; freelist < VM_NFREELIST; freelist++) {
519d866a563SAlan Cox 		vm_freelist_to_flind[freelist] +=
520d866a563SAlan Cox 		    vm_freelist_to_flind[freelist - 1];
521d866a563SAlan Cox 	}
522d866a563SAlan Cox 	vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1];
523d866a563SAlan Cox 	KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists"));
524d866a563SAlan Cox 	/* Change each entry into a free list index. */
525d866a563SAlan Cox 	for (freelist = 0; freelist < VM_NFREELIST; freelist++)
526d866a563SAlan Cox 		vm_freelist_to_flind[freelist]--;
527d866a563SAlan Cox 
528d866a563SAlan Cox 	/*
529d866a563SAlan Cox 	 * Initialize the first_page and free_queues fields of each physical
530d866a563SAlan Cox 	 * memory segment.
531d866a563SAlan Cox 	 */
532271f0f12SAlan Cox #ifdef VM_PHYSSEG_SPARSE
533d866a563SAlan Cox 	npages = 0;
53411752d88SAlan Cox #endif
535271f0f12SAlan Cox 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
536271f0f12SAlan Cox 		seg = &vm_phys_segs[segind];
537271f0f12SAlan Cox #ifdef VM_PHYSSEG_SPARSE
538d866a563SAlan Cox 		seg->first_page = &vm_page_array[npages];
539d866a563SAlan Cox 		npages += atop(seg->end - seg->start);
540271f0f12SAlan Cox #else
541271f0f12SAlan Cox 		seg->first_page = PHYS_TO_VM_PAGE(seg->start);
54211752d88SAlan Cox #endif
543d866a563SAlan Cox #ifdef	VM_FREELIST_ISADMA
544d866a563SAlan Cox 		if (seg->end <= VM_ISADMA_BOUNDARY) {
545d866a563SAlan Cox 			flind = vm_freelist_to_flind[VM_FREELIST_ISADMA];
546d866a563SAlan Cox 			KASSERT(flind >= 0,
547d866a563SAlan Cox 			    ("vm_phys_init: ISADMA flind < 0"));
548d866a563SAlan Cox 		} else
549d866a563SAlan Cox #endif
550d866a563SAlan Cox #ifdef	VM_FREELIST_LOWMEM
551d866a563SAlan Cox 		if (seg->end <= VM_LOWMEM_BOUNDARY) {
552d866a563SAlan Cox 			flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM];
553d866a563SAlan Cox 			KASSERT(flind >= 0,
554d866a563SAlan Cox 			    ("vm_phys_init: LOWMEM flind < 0"));
555d866a563SAlan Cox 		} else
556d866a563SAlan Cox #endif
557d866a563SAlan Cox #ifdef	VM_FREELIST_DMA32
558d866a563SAlan Cox 		if (seg->end <= VM_DMA32_BOUNDARY) {
559d866a563SAlan Cox 			flind = vm_freelist_to_flind[VM_FREELIST_DMA32];
560d866a563SAlan Cox 			KASSERT(flind >= 0,
561d866a563SAlan Cox 			    ("vm_phys_init: DMA32 flind < 0"));
562d866a563SAlan Cox 		} else
563d866a563SAlan Cox #endif
564d866a563SAlan Cox 		{
565d866a563SAlan Cox 			flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT];
566d866a563SAlan Cox 			KASSERT(flind >= 0,
567d866a563SAlan Cox 			    ("vm_phys_init: DEFAULT flind < 0"));
56811752d88SAlan Cox 		}
569d866a563SAlan Cox 		seg->free_queues = &vm_phys_free_queues[seg->domain][flind];
570d866a563SAlan Cox 	}
571d866a563SAlan Cox 
572d866a563SAlan Cox 	/*
573d866a563SAlan Cox 	 * Initialize the free queues.
574d866a563SAlan Cox 	 */
5757e226537SAttilio Rao 	for (dom = 0; dom < vm_ndomains; dom++) {
57611752d88SAlan Cox 		for (flind = 0; flind < vm_nfreelists; flind++) {
57711752d88SAlan Cox 			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
5787e226537SAttilio Rao 				fl = vm_phys_free_queues[dom][flind][pind];
57911752d88SAlan Cox 				for (oind = 0; oind < VM_NFREEORDER; oind++)
58011752d88SAlan Cox 					TAILQ_INIT(&fl[oind].pl);
58111752d88SAlan Cox 			}
58211752d88SAlan Cox 		}
583a3870a18SJohn Baldwin 	}
584d866a563SAlan Cox 
58538d6b2dcSRoger Pau Monné 	rw_init(&vm_phys_fictitious_reg_lock, "vmfctr");
58611752d88SAlan Cox }
58711752d88SAlan Cox 
58811752d88SAlan Cox /*
58911752d88SAlan Cox  * Split a contiguous, power of two-sized set of physical pages.
59011752d88SAlan Cox  */
59111752d88SAlan Cox static __inline void
59211752d88SAlan Cox vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order)
59311752d88SAlan Cox {
59411752d88SAlan Cox 	vm_page_t m_buddy;
59511752d88SAlan Cox 
59611752d88SAlan Cox 	while (oind > order) {
59711752d88SAlan Cox 		oind--;
59811752d88SAlan Cox 		m_buddy = &m[1 << oind];
59911752d88SAlan Cox 		KASSERT(m_buddy->order == VM_NFREEORDER,
60011752d88SAlan Cox 		    ("vm_phys_split_pages: page %p has unexpected order %d",
60111752d88SAlan Cox 		    m_buddy, m_buddy->order));
6027e226537SAttilio Rao 		vm_freelist_add(fl, m_buddy, oind, 0);
60311752d88SAlan Cox         }
60411752d88SAlan Cox }
60511752d88SAlan Cox 
60611752d88SAlan Cox /*
60711752d88SAlan Cox  * Allocate a contiguous, power of two-sized set of physical pages
60811752d88SAlan Cox  * from the free lists.
6098941dc44SAlan Cox  *
6108941dc44SAlan Cox  * The free page queues must be locked.
61111752d88SAlan Cox  */
61211752d88SAlan Cox vm_page_t
613ef435ae7SJeff Roberson vm_phys_alloc_pages(int domain, int pool, int order)
61411752d88SAlan Cox {
61549ca10d4SJayachandran C. 	vm_page_t m;
6160db2102aSMichael Zhilin 	int freelist;
61749ca10d4SJayachandran C. 
6180db2102aSMichael Zhilin 	for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
6190db2102aSMichael Zhilin 		m = vm_phys_alloc_freelist_pages(domain, freelist, pool, order);
62049ca10d4SJayachandran C. 		if (m != NULL)
62149ca10d4SJayachandran C. 			return (m);
62249ca10d4SJayachandran C. 	}
62349ca10d4SJayachandran C. 	return (NULL);
62449ca10d4SJayachandran C. }
62549ca10d4SJayachandran C. 
62649ca10d4SJayachandran C. /*
627d866a563SAlan Cox  * Allocate a contiguous, power of two-sized set of physical pages from the
628d866a563SAlan Cox  * specified free list.  The free list must be specified using one of the
629d866a563SAlan Cox  * manifest constants VM_FREELIST_*.
630d866a563SAlan Cox  *
631d866a563SAlan Cox  * The free page queues must be locked.
63249ca10d4SJayachandran C.  */
63349ca10d4SJayachandran C. vm_page_t
6340db2102aSMichael Zhilin vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order)
63549ca10d4SJayachandran C. {
636ef435ae7SJeff Roberson 	struct vm_freelist *alt, *fl;
63711752d88SAlan Cox 	vm_page_t m;
6380db2102aSMichael Zhilin 	int oind, pind, flind;
63911752d88SAlan Cox 
640ef435ae7SJeff Roberson 	KASSERT(domain >= 0 && domain < vm_ndomains,
641ef435ae7SJeff Roberson 	    ("vm_phys_alloc_freelist_pages: domain %d is out of range",
642ef435ae7SJeff Roberson 	    domain));
6430db2102aSMichael Zhilin 	KASSERT(freelist < VM_NFREELIST,
644d866a563SAlan Cox 	    ("vm_phys_alloc_freelist_pages: freelist %d is out of range",
6455be93778SAndrew Turner 	    freelist));
64611752d88SAlan Cox 	KASSERT(pool < VM_NFREEPOOL,
64749ca10d4SJayachandran C. 	    ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
64811752d88SAlan Cox 	KASSERT(order < VM_NFREEORDER,
64949ca10d4SJayachandran C. 	    ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
6506520495aSAdrian Chadd 
6510db2102aSMichael Zhilin 	flind = vm_freelist_to_flind[freelist];
6520db2102aSMichael Zhilin 	/* Check if freelist is present */
6530db2102aSMichael Zhilin 	if (flind < 0)
6540db2102aSMichael Zhilin 		return (NULL);
6550db2102aSMichael Zhilin 
65611752d88SAlan Cox 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
6577e226537SAttilio Rao 	fl = &vm_phys_free_queues[domain][flind][pool][0];
65811752d88SAlan Cox 	for (oind = order; oind < VM_NFREEORDER; oind++) {
65911752d88SAlan Cox 		m = TAILQ_FIRST(&fl[oind].pl);
66011752d88SAlan Cox 		if (m != NULL) {
6617e226537SAttilio Rao 			vm_freelist_rem(fl, m, oind);
66211752d88SAlan Cox 			vm_phys_split_pages(m, oind, fl, order);
66311752d88SAlan Cox 			return (m);
66411752d88SAlan Cox 		}
66511752d88SAlan Cox 	}
66611752d88SAlan Cox 
66711752d88SAlan Cox 	/*
66811752d88SAlan Cox 	 * The given pool was empty.  Find the largest
66911752d88SAlan Cox 	 * contiguous, power-of-two-sized set of pages in any
67011752d88SAlan Cox 	 * pool.  Transfer these pages to the given pool, and
67111752d88SAlan Cox 	 * use them to satisfy the allocation.
67211752d88SAlan Cox 	 */
67311752d88SAlan Cox 	for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
67411752d88SAlan Cox 		for (pind = 0; pind < VM_NFREEPOOL; pind++) {
6757e226537SAttilio Rao 			alt = &vm_phys_free_queues[domain][flind][pind][0];
67611752d88SAlan Cox 			m = TAILQ_FIRST(&alt[oind].pl);
67711752d88SAlan Cox 			if (m != NULL) {
6787e226537SAttilio Rao 				vm_freelist_rem(alt, m, oind);
67911752d88SAlan Cox 				vm_phys_set_pool(pool, m, oind);
68011752d88SAlan Cox 				vm_phys_split_pages(m, oind, fl, order);
68111752d88SAlan Cox 				return (m);
68211752d88SAlan Cox 			}
68311752d88SAlan Cox 		}
68411752d88SAlan Cox 	}
68511752d88SAlan Cox 	return (NULL);
68611752d88SAlan Cox }
68711752d88SAlan Cox 
68811752d88SAlan Cox /*
68911752d88SAlan Cox  * Find the vm_page corresponding to the given physical address.
69011752d88SAlan Cox  */
69111752d88SAlan Cox vm_page_t
69211752d88SAlan Cox vm_phys_paddr_to_vm_page(vm_paddr_t pa)
69311752d88SAlan Cox {
69411752d88SAlan Cox 	struct vm_phys_seg *seg;
69511752d88SAlan Cox 	int segind;
69611752d88SAlan Cox 
69711752d88SAlan Cox 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
69811752d88SAlan Cox 		seg = &vm_phys_segs[segind];
69911752d88SAlan Cox 		if (pa >= seg->start && pa < seg->end)
70011752d88SAlan Cox 			return (&seg->first_page[atop(pa - seg->start)]);
70111752d88SAlan Cox 	}
702f06a3a36SAndrew Thompson 	return (NULL);
70311752d88SAlan Cox }
70411752d88SAlan Cox 
705b6de32bdSKonstantin Belousov vm_page_t
706b6de32bdSKonstantin Belousov vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
707b6de32bdSKonstantin Belousov {
70838d6b2dcSRoger Pau Monné 	struct vm_phys_fictitious_seg tmp, *seg;
709b6de32bdSKonstantin Belousov 	vm_page_t m;
710b6de32bdSKonstantin Belousov 
711b6de32bdSKonstantin Belousov 	m = NULL;
71238d6b2dcSRoger Pau Monné 	tmp.start = pa;
71338d6b2dcSRoger Pau Monné 	tmp.end = 0;
71438d6b2dcSRoger Pau Monné 
71538d6b2dcSRoger Pau Monné 	rw_rlock(&vm_phys_fictitious_reg_lock);
71638d6b2dcSRoger Pau Monné 	seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
71738d6b2dcSRoger Pau Monné 	rw_runlock(&vm_phys_fictitious_reg_lock);
71838d6b2dcSRoger Pau Monné 	if (seg == NULL)
71938d6b2dcSRoger Pau Monné 		return (NULL);
72038d6b2dcSRoger Pau Monné 
721b6de32bdSKonstantin Belousov 	m = &seg->first_page[atop(pa - seg->start)];
72238d6b2dcSRoger Pau Monné 	KASSERT((m->flags & PG_FICTITIOUS) != 0, ("%p not fictitious", m));
72338d6b2dcSRoger Pau Monné 
724b6de32bdSKonstantin Belousov 	return (m);
725b6de32bdSKonstantin Belousov }
726b6de32bdSKonstantin Belousov 
7275ebe728dSRoger Pau Monné static inline void
7285ebe728dSRoger Pau Monné vm_phys_fictitious_init_range(vm_page_t range, vm_paddr_t start,
7295ebe728dSRoger Pau Monné     long page_count, vm_memattr_t memattr)
7305ebe728dSRoger Pau Monné {
7315ebe728dSRoger Pau Monné 	long i;
7325ebe728dSRoger Pau Monné 
733f93f7cf1SMark Johnston 	bzero(range, page_count * sizeof(*range));
7345ebe728dSRoger Pau Monné 	for (i = 0; i < page_count; i++) {
7355ebe728dSRoger Pau Monné 		vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr);
7365ebe728dSRoger Pau Monné 		range[i].oflags &= ~VPO_UNMANAGED;
7375ebe728dSRoger Pau Monné 		range[i].busy_lock = VPB_UNBUSIED;
7385ebe728dSRoger Pau Monné 	}
7395ebe728dSRoger Pau Monné }
7405ebe728dSRoger Pau Monné 
741b6de32bdSKonstantin Belousov int
742b6de32bdSKonstantin Belousov vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
743b6de32bdSKonstantin Belousov     vm_memattr_t memattr)
744b6de32bdSKonstantin Belousov {
745b6de32bdSKonstantin Belousov 	struct vm_phys_fictitious_seg *seg;
746b6de32bdSKonstantin Belousov 	vm_page_t fp;
7475ebe728dSRoger Pau Monné 	long page_count;
748b6de32bdSKonstantin Belousov #ifdef VM_PHYSSEG_DENSE
7495ebe728dSRoger Pau Monné 	long pi, pe;
7505ebe728dSRoger Pau Monné 	long dpage_count;
751b6de32bdSKonstantin Belousov #endif
752b6de32bdSKonstantin Belousov 
7535ebe728dSRoger Pau Monné 	KASSERT(start < end,
7545ebe728dSRoger Pau Monné 	    ("Start of segment isn't less than end (start: %jx end: %jx)",
7555ebe728dSRoger Pau Monné 	    (uintmax_t)start, (uintmax_t)end));
7565ebe728dSRoger Pau Monné 
757b6de32bdSKonstantin Belousov 	page_count = (end - start) / PAGE_SIZE;
758b6de32bdSKonstantin Belousov 
759b6de32bdSKonstantin Belousov #ifdef VM_PHYSSEG_DENSE
760b6de32bdSKonstantin Belousov 	pi = atop(start);
7615ebe728dSRoger Pau Monné 	pe = atop(end);
7625ebe728dSRoger Pau Monné 	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
763b6de32bdSKonstantin Belousov 		fp = &vm_page_array[pi - first_page];
7645ebe728dSRoger Pau Monné 		if ((pe - first_page) > vm_page_array_size) {
7655ebe728dSRoger Pau Monné 			/*
7665ebe728dSRoger Pau Monné 			 * We have a segment that starts inside
7675ebe728dSRoger Pau Monné 			 * of vm_page_array, but ends outside of it.
7685ebe728dSRoger Pau Monné 			 *
7695ebe728dSRoger Pau Monné 			 * Use vm_page_array pages for those that are
7705ebe728dSRoger Pau Monné 			 * inside of the vm_page_array range, and
7715ebe728dSRoger Pau Monné 			 * allocate the remaining ones.
7725ebe728dSRoger Pau Monné 			 */
7735ebe728dSRoger Pau Monné 			dpage_count = vm_page_array_size - (pi - first_page);
7745ebe728dSRoger Pau Monné 			vm_phys_fictitious_init_range(fp, start, dpage_count,
7755ebe728dSRoger Pau Monné 			    memattr);
7765ebe728dSRoger Pau Monné 			page_count -= dpage_count;
7775ebe728dSRoger Pau Monné 			start += ptoa(dpage_count);
7785ebe728dSRoger Pau Monné 			goto alloc;
7795ebe728dSRoger Pau Monné 		}
7805ebe728dSRoger Pau Monné 		/*
7815ebe728dSRoger Pau Monné 		 * We can allocate the full range from vm_page_array,
7825ebe728dSRoger Pau Monné 		 * so there's no need to register the range in the tree.
7835ebe728dSRoger Pau Monné 		 */
7845ebe728dSRoger Pau Monné 		vm_phys_fictitious_init_range(fp, start, page_count, memattr);
7855ebe728dSRoger Pau Monné 		return (0);
7865ebe728dSRoger Pau Monné 	} else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
7875ebe728dSRoger Pau Monné 		/*
7885ebe728dSRoger Pau Monné 		 * We have a segment that ends inside of vm_page_array,
7895ebe728dSRoger Pau Monné 		 * but starts outside of it.
7905ebe728dSRoger Pau Monné 		 */
7915ebe728dSRoger Pau Monné 		fp = &vm_page_array[0];
7925ebe728dSRoger Pau Monné 		dpage_count = pe - first_page;
7935ebe728dSRoger Pau Monné 		vm_phys_fictitious_init_range(fp, ptoa(first_page), dpage_count,
7945ebe728dSRoger Pau Monné 		    memattr);
7955ebe728dSRoger Pau Monné 		end -= ptoa(dpage_count);
7965ebe728dSRoger Pau Monné 		page_count -= dpage_count;
7975ebe728dSRoger Pau Monné 		goto alloc;
7985ebe728dSRoger Pau Monné 	} else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
7995ebe728dSRoger Pau Monné 		/*
8005ebe728dSRoger Pau Monné 		 * Trying to register a fictitious range that expands before
8015ebe728dSRoger Pau Monné 		 * and after vm_page_array.
8025ebe728dSRoger Pau Monné 		 */
8035ebe728dSRoger Pau Monné 		return (EINVAL);
8045ebe728dSRoger Pau Monné 	} else {
8055ebe728dSRoger Pau Monné alloc:
806b6de32bdSKonstantin Belousov #endif
807b6de32bdSKonstantin Belousov 		fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
808f93f7cf1SMark Johnston 		    M_WAITOK);
8095ebe728dSRoger Pau Monné #ifdef VM_PHYSSEG_DENSE
810b6de32bdSKonstantin Belousov 	}
8115ebe728dSRoger Pau Monné #endif
8125ebe728dSRoger Pau Monné 	vm_phys_fictitious_init_range(fp, start, page_count, memattr);
81338d6b2dcSRoger Pau Monné 
81438d6b2dcSRoger Pau Monné 	seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO);
815b6de32bdSKonstantin Belousov 	seg->start = start;
816b6de32bdSKonstantin Belousov 	seg->end = end;
817b6de32bdSKonstantin Belousov 	seg->first_page = fp;
81838d6b2dcSRoger Pau Monné 
81938d6b2dcSRoger Pau Monné 	rw_wlock(&vm_phys_fictitious_reg_lock);
82038d6b2dcSRoger Pau Monné 	RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg);
82138d6b2dcSRoger Pau Monné 	rw_wunlock(&vm_phys_fictitious_reg_lock);
82238d6b2dcSRoger Pau Monné 
823b6de32bdSKonstantin Belousov 	return (0);
824b6de32bdSKonstantin Belousov }
825b6de32bdSKonstantin Belousov 
826b6de32bdSKonstantin Belousov void
827b6de32bdSKonstantin Belousov vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
828b6de32bdSKonstantin Belousov {
82938d6b2dcSRoger Pau Monné 	struct vm_phys_fictitious_seg *seg, tmp;
830b6de32bdSKonstantin Belousov #ifdef VM_PHYSSEG_DENSE
8315ebe728dSRoger Pau Monné 	long pi, pe;
832b6de32bdSKonstantin Belousov #endif
833b6de32bdSKonstantin Belousov 
8345ebe728dSRoger Pau Monné 	KASSERT(start < end,
8355ebe728dSRoger Pau Monné 	    ("Start of segment isn't less than end (start: %jx end: %jx)",
8365ebe728dSRoger Pau Monné 	    (uintmax_t)start, (uintmax_t)end));
8375ebe728dSRoger Pau Monné 
838b6de32bdSKonstantin Belousov #ifdef VM_PHYSSEG_DENSE
839b6de32bdSKonstantin Belousov 	pi = atop(start);
8405ebe728dSRoger Pau Monné 	pe = atop(end);
8415ebe728dSRoger Pau Monné 	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
8425ebe728dSRoger Pau Monné 		if ((pe - first_page) <= vm_page_array_size) {
8435ebe728dSRoger Pau Monné 			/*
8445ebe728dSRoger Pau Monné 			 * This segment was allocated using vm_page_array
8455ebe728dSRoger Pau Monné 			 * only, there's nothing to do since those pages
8465ebe728dSRoger Pau Monné 			 * were never added to the tree.
8475ebe728dSRoger Pau Monné 			 */
8485ebe728dSRoger Pau Monné 			return;
8495ebe728dSRoger Pau Monné 		}
8505ebe728dSRoger Pau Monné 		/*
8515ebe728dSRoger Pau Monné 		 * We have a segment that starts inside
8525ebe728dSRoger Pau Monné 		 * of vm_page_array, but ends outside of it.
8535ebe728dSRoger Pau Monné 		 *
8545ebe728dSRoger Pau Monné 		 * Calculate how many pages were added to the
8555ebe728dSRoger Pau Monné 		 * tree and free them.
8565ebe728dSRoger Pau Monné 		 */
8575ebe728dSRoger Pau Monné 		start = ptoa(first_page + vm_page_array_size);
8585ebe728dSRoger Pau Monné 	} else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
8595ebe728dSRoger Pau Monné 		/*
8605ebe728dSRoger Pau Monné 		 * We have a segment that ends inside of vm_page_array,
8615ebe728dSRoger Pau Monné 		 * but starts outside of it.
8625ebe728dSRoger Pau Monné 		 */
8635ebe728dSRoger Pau Monné 		end = ptoa(first_page);
8645ebe728dSRoger Pau Monné 	} else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
8655ebe728dSRoger Pau Monné 		/* Since it's not possible to register such a range, panic. */
8665ebe728dSRoger Pau Monné 		panic(
8675ebe728dSRoger Pau Monné 		    "Unregistering not registered fictitious range [%#jx:%#jx]",
8685ebe728dSRoger Pau Monné 		    (uintmax_t)start, (uintmax_t)end);
8695ebe728dSRoger Pau Monné 	}
870b6de32bdSKonstantin Belousov #endif
87138d6b2dcSRoger Pau Monné 	tmp.start = start;
87238d6b2dcSRoger Pau Monné 	tmp.end = 0;
873b6de32bdSKonstantin Belousov 
87438d6b2dcSRoger Pau Monné 	rw_wlock(&vm_phys_fictitious_reg_lock);
87538d6b2dcSRoger Pau Monné 	seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
87638d6b2dcSRoger Pau Monné 	if (seg->start != start || seg->end != end) {
87738d6b2dcSRoger Pau Monné 		rw_wunlock(&vm_phys_fictitious_reg_lock);
87838d6b2dcSRoger Pau Monné 		panic(
87938d6b2dcSRoger Pau Monné 		    "Unregistering not registered fictitious range [%#jx:%#jx]",
88038d6b2dcSRoger Pau Monné 		    (uintmax_t)start, (uintmax_t)end);
88138d6b2dcSRoger Pau Monné 	}
88238d6b2dcSRoger Pau Monné 	RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg);
88338d6b2dcSRoger Pau Monné 	rw_wunlock(&vm_phys_fictitious_reg_lock);
88438d6b2dcSRoger Pau Monné 	free(seg->first_page, M_FICT_PAGES);
88538d6b2dcSRoger Pau Monné 	free(seg, M_FICT_PAGES);
886b6de32bdSKonstantin Belousov }
887b6de32bdSKonstantin Belousov 
88811752d88SAlan Cox /*
88911752d88SAlan Cox  * Free a contiguous, power of two-sized set of physical pages.
8908941dc44SAlan Cox  *
8918941dc44SAlan Cox  * The free page queues must be locked.
89211752d88SAlan Cox  */
89311752d88SAlan Cox void
89411752d88SAlan Cox vm_phys_free_pages(vm_page_t m, int order)
89511752d88SAlan Cox {
89611752d88SAlan Cox 	struct vm_freelist *fl;
89711752d88SAlan Cox 	struct vm_phys_seg *seg;
8985c1f2cc4SAlan Cox 	vm_paddr_t pa;
89911752d88SAlan Cox 	vm_page_t m_buddy;
90011752d88SAlan Cox 
90111752d88SAlan Cox 	KASSERT(m->order == VM_NFREEORDER,
9028941dc44SAlan Cox 	    ("vm_phys_free_pages: page %p has unexpected order %d",
90311752d88SAlan Cox 	    m, m->order));
90411752d88SAlan Cox 	KASSERT(m->pool < VM_NFREEPOOL,
9058941dc44SAlan Cox 	    ("vm_phys_free_pages: page %p has unexpected pool %d",
90611752d88SAlan Cox 	    m, m->pool));
90711752d88SAlan Cox 	KASSERT(order < VM_NFREEORDER,
9088941dc44SAlan Cox 	    ("vm_phys_free_pages: order %d is out of range", order));
90911752d88SAlan Cox 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
91011752d88SAlan Cox 	seg = &vm_phys_segs[m->segind];
9115c1f2cc4SAlan Cox 	if (order < VM_NFREEORDER - 1) {
9125c1f2cc4SAlan Cox 		pa = VM_PAGE_TO_PHYS(m);
9135c1f2cc4SAlan Cox 		do {
9145c1f2cc4SAlan Cox 			pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
9155c1f2cc4SAlan Cox 			if (pa < seg->start || pa >= seg->end)
91611752d88SAlan Cox 				break;
9175c1f2cc4SAlan Cox 			m_buddy = &seg->first_page[atop(pa - seg->start)];
91811752d88SAlan Cox 			if (m_buddy->order != order)
91911752d88SAlan Cox 				break;
92011752d88SAlan Cox 			fl = (*seg->free_queues)[m_buddy->pool];
9217e226537SAttilio Rao 			vm_freelist_rem(fl, m_buddy, order);
92211752d88SAlan Cox 			if (m_buddy->pool != m->pool)
92311752d88SAlan Cox 				vm_phys_set_pool(m->pool, m_buddy, order);
92411752d88SAlan Cox 			order++;
9255c1f2cc4SAlan Cox 			pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
92611752d88SAlan Cox 			m = &seg->first_page[atop(pa - seg->start)];
9275c1f2cc4SAlan Cox 		} while (order < VM_NFREEORDER - 1);
92811752d88SAlan Cox 	}
92911752d88SAlan Cox 	fl = (*seg->free_queues)[m->pool];
9307e226537SAttilio Rao 	vm_freelist_add(fl, m, order, 1);
93111752d88SAlan Cox }
93211752d88SAlan Cox 
93311752d88SAlan Cox /*
9345c1f2cc4SAlan Cox  * Free a contiguous, arbitrarily sized set of physical pages.
9355c1f2cc4SAlan Cox  *
9365c1f2cc4SAlan Cox  * The free page queues must be locked.
9375c1f2cc4SAlan Cox  */
9385c1f2cc4SAlan Cox void
9395c1f2cc4SAlan Cox vm_phys_free_contig(vm_page_t m, u_long npages)
9405c1f2cc4SAlan Cox {
9415c1f2cc4SAlan Cox 	u_int n;
9425c1f2cc4SAlan Cox 	int order;
9435c1f2cc4SAlan Cox 
9445c1f2cc4SAlan Cox 	/*
9455c1f2cc4SAlan Cox 	 * Avoid unnecessary coalescing by freeing the pages in the largest
9465c1f2cc4SAlan Cox 	 * possible power-of-two-sized subsets.
9475c1f2cc4SAlan Cox 	 */
9485c1f2cc4SAlan Cox 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
9495c1f2cc4SAlan Cox 	for (;; npages -= n) {
9505c1f2cc4SAlan Cox 		/*
9515c1f2cc4SAlan Cox 		 * Unsigned "min" is used here so that "order" is assigned
9525c1f2cc4SAlan Cox 		 * "VM_NFREEORDER - 1" when "m"'s physical address is zero
9535c1f2cc4SAlan Cox 		 * or the low-order bits of its physical address are zero
9545c1f2cc4SAlan Cox 		 * because the size of a physical address exceeds the size of
9555c1f2cc4SAlan Cox 		 * a long.
9565c1f2cc4SAlan Cox 		 */
9575c1f2cc4SAlan Cox 		order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
9585c1f2cc4SAlan Cox 		    VM_NFREEORDER - 1);
9595c1f2cc4SAlan Cox 		n = 1 << order;
9605c1f2cc4SAlan Cox 		if (npages < n)
9615c1f2cc4SAlan Cox 			break;
9625c1f2cc4SAlan Cox 		vm_phys_free_pages(m, order);
9635c1f2cc4SAlan Cox 		m += n;
9645c1f2cc4SAlan Cox 	}
9655c1f2cc4SAlan Cox 	/* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */
9665c1f2cc4SAlan Cox 	for (; npages > 0; npages -= n) {
9675c1f2cc4SAlan Cox 		order = flsl(npages) - 1;
9685c1f2cc4SAlan Cox 		n = 1 << order;
9695c1f2cc4SAlan Cox 		vm_phys_free_pages(m, order);
9705c1f2cc4SAlan Cox 		m += n;
9715c1f2cc4SAlan Cox 	}
9725c1f2cc4SAlan Cox }
9735c1f2cc4SAlan Cox 
9745c1f2cc4SAlan Cox /*
975c869e672SAlan Cox  * Scan physical memory between the specified addresses "low" and "high" for a
976c869e672SAlan Cox  * run of contiguous physical pages that satisfy the specified conditions, and
977c869e672SAlan Cox  * return the lowest page in the run.  The specified "alignment" determines
978c869e672SAlan Cox  * the alignment of the lowest physical page in the run.  If the specified
979c869e672SAlan Cox  * "boundary" is non-zero, then the run of physical pages cannot span a
980c869e672SAlan Cox  * physical address that is a multiple of "boundary".
981c869e672SAlan Cox  *
982c869e672SAlan Cox  * "npages" must be greater than zero.  Both "alignment" and "boundary" must
983c869e672SAlan Cox  * be a power of two.
984c869e672SAlan Cox  */
985c869e672SAlan Cox vm_page_t
9863f289c3fSJeff Roberson vm_phys_scan_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
987c869e672SAlan Cox     u_long alignment, vm_paddr_t boundary, int options)
988c869e672SAlan Cox {
989c869e672SAlan Cox 	vm_paddr_t pa_end;
990c869e672SAlan Cox 	vm_page_t m_end, m_run, m_start;
991c869e672SAlan Cox 	struct vm_phys_seg *seg;
992c869e672SAlan Cox 	int segind;
993c869e672SAlan Cox 
994c869e672SAlan Cox 	KASSERT(npages > 0, ("npages is 0"));
995c869e672SAlan Cox 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
996c869e672SAlan Cox 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
997c869e672SAlan Cox 	if (low >= high)
998c869e672SAlan Cox 		return (NULL);
999c869e672SAlan Cox 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
1000c869e672SAlan Cox 		seg = &vm_phys_segs[segind];
10013f289c3fSJeff Roberson 		if (seg->domain != domain)
10023f289c3fSJeff Roberson 			continue;
1003c869e672SAlan Cox 		if (seg->start >= high)
1004c869e672SAlan Cox 			break;
1005c869e672SAlan Cox 		if (low >= seg->end)
1006c869e672SAlan Cox 			continue;
1007c869e672SAlan Cox 		if (low <= seg->start)
1008c869e672SAlan Cox 			m_start = seg->first_page;
1009c869e672SAlan Cox 		else
1010c869e672SAlan Cox 			m_start = &seg->first_page[atop(low - seg->start)];
1011c869e672SAlan Cox 		if (high < seg->end)
1012c869e672SAlan Cox 			pa_end = high;
1013c869e672SAlan Cox 		else
1014c869e672SAlan Cox 			pa_end = seg->end;
1015c869e672SAlan Cox 		if (pa_end - VM_PAGE_TO_PHYS(m_start) < ptoa(npages))
1016c869e672SAlan Cox 			continue;
1017c869e672SAlan Cox 		m_end = &seg->first_page[atop(pa_end - seg->start)];
1018c869e672SAlan Cox 		m_run = vm_page_scan_contig(npages, m_start, m_end,
1019c869e672SAlan Cox 		    alignment, boundary, options);
1020c869e672SAlan Cox 		if (m_run != NULL)
1021c869e672SAlan Cox 			return (m_run);
1022c869e672SAlan Cox 	}
1023c869e672SAlan Cox 	return (NULL);
1024c869e672SAlan Cox }
1025c869e672SAlan Cox 
1026c869e672SAlan Cox /*
102711752d88SAlan Cox  * Set the pool for a contiguous, power of two-sized set of physical pages.
102811752d88SAlan Cox  */
10297bfda801SAlan Cox void
103011752d88SAlan Cox vm_phys_set_pool(int pool, vm_page_t m, int order)
103111752d88SAlan Cox {
103211752d88SAlan Cox 	vm_page_t m_tmp;
103311752d88SAlan Cox 
103411752d88SAlan Cox 	for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
103511752d88SAlan Cox 		m_tmp->pool = pool;
103611752d88SAlan Cox }
103711752d88SAlan Cox 
103811752d88SAlan Cox /*
10399742373aSAlan Cox  * Search for the given physical page "m" in the free lists.  If the search
10409742373aSAlan Cox  * succeeds, remove "m" from the free lists and return TRUE.  Otherwise, return
10419742373aSAlan Cox  * FALSE, indicating that "m" is not in the free lists.
10427bfda801SAlan Cox  *
10437bfda801SAlan Cox  * The free page queues must be locked.
10447bfda801SAlan Cox  */
1045e35395ceSAlan Cox boolean_t
10467bfda801SAlan Cox vm_phys_unfree_page(vm_page_t m)
10477bfda801SAlan Cox {
10487bfda801SAlan Cox 	struct vm_freelist *fl;
10497bfda801SAlan Cox 	struct vm_phys_seg *seg;
10507bfda801SAlan Cox 	vm_paddr_t pa, pa_half;
10517bfda801SAlan Cox 	vm_page_t m_set, m_tmp;
10527bfda801SAlan Cox 	int order;
10537bfda801SAlan Cox 
10547bfda801SAlan Cox 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
10557bfda801SAlan Cox 
10567bfda801SAlan Cox 	/*
10577bfda801SAlan Cox 	 * First, find the contiguous, power of two-sized set of free
10587bfda801SAlan Cox 	 * physical pages containing the given physical page "m" and
10597bfda801SAlan Cox 	 * assign it to "m_set".
10607bfda801SAlan Cox 	 */
10617bfda801SAlan Cox 	seg = &vm_phys_segs[m->segind];
10627bfda801SAlan Cox 	for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
1063bc8794a1SAlan Cox 	    order < VM_NFREEORDER - 1; ) {
10647bfda801SAlan Cox 		order++;
10657bfda801SAlan Cox 		pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
10662fbced65SAlan Cox 		if (pa >= seg->start)
10677bfda801SAlan Cox 			m_set = &seg->first_page[atop(pa - seg->start)];
1068e35395ceSAlan Cox 		else
1069e35395ceSAlan Cox 			return (FALSE);
10707bfda801SAlan Cox 	}
1071e35395ceSAlan Cox 	if (m_set->order < order)
1072e35395ceSAlan Cox 		return (FALSE);
1073e35395ceSAlan Cox 	if (m_set->order == VM_NFREEORDER)
1074e35395ceSAlan Cox 		return (FALSE);
10757bfda801SAlan Cox 	KASSERT(m_set->order < VM_NFREEORDER,
10767bfda801SAlan Cox 	    ("vm_phys_unfree_page: page %p has unexpected order %d",
10777bfda801SAlan Cox 	    m_set, m_set->order));
10787bfda801SAlan Cox 
10797bfda801SAlan Cox 	/*
10807bfda801SAlan Cox 	 * Next, remove "m_set" from the free lists.  Finally, extract
10817bfda801SAlan Cox 	 * "m" from "m_set" using an iterative algorithm: While "m_set"
10827bfda801SAlan Cox 	 * is larger than a page, shrink "m_set" by returning the half
10837bfda801SAlan Cox 	 * of "m_set" that does not contain "m" to the free lists.
10847bfda801SAlan Cox 	 */
10857bfda801SAlan Cox 	fl = (*seg->free_queues)[m_set->pool];
10867bfda801SAlan Cox 	order = m_set->order;
10877e226537SAttilio Rao 	vm_freelist_rem(fl, m_set, order);
10887bfda801SAlan Cox 	while (order > 0) {
10897bfda801SAlan Cox 		order--;
10907bfda801SAlan Cox 		pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
10917bfda801SAlan Cox 		if (m->phys_addr < pa_half)
10927bfda801SAlan Cox 			m_tmp = &seg->first_page[atop(pa_half - seg->start)];
10937bfda801SAlan Cox 		else {
10947bfda801SAlan Cox 			m_tmp = m_set;
10957bfda801SAlan Cox 			m_set = &seg->first_page[atop(pa_half - seg->start)];
10967bfda801SAlan Cox 		}
10977e226537SAttilio Rao 		vm_freelist_add(fl, m_tmp, order, 0);
10987bfda801SAlan Cox 	}
10997bfda801SAlan Cox 	KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
1100e35395ceSAlan Cox 	return (TRUE);
11017bfda801SAlan Cox }
11027bfda801SAlan Cox 
11037bfda801SAlan Cox /*
11042f9f48d6SAlan Cox  * Allocate a contiguous set of physical pages of the given size
11052f9f48d6SAlan Cox  * "npages" from the free lists.  All of the physical pages must be at
11062f9f48d6SAlan Cox  * or above the given physical address "low" and below the given
11072f9f48d6SAlan Cox  * physical address "high".  The given value "alignment" determines the
11082f9f48d6SAlan Cox  * alignment of the first physical page in the set.  If the given value
11092f9f48d6SAlan Cox  * "boundary" is non-zero, then the set of physical pages cannot cross
11102f9f48d6SAlan Cox  * any physical address boundary that is a multiple of that value.  Both
111111752d88SAlan Cox  * "alignment" and "boundary" must be a power of two.
111211752d88SAlan Cox  */
111311752d88SAlan Cox vm_page_t
1114ef435ae7SJeff Roberson vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
11155c1f2cc4SAlan Cox     u_long alignment, vm_paddr_t boundary)
111611752d88SAlan Cox {
1117c869e672SAlan Cox 	vm_paddr_t pa_end, pa_start;
1118c869e672SAlan Cox 	vm_page_t m_run;
1119c869e672SAlan Cox 	struct vm_phys_seg *seg;
1120ef435ae7SJeff Roberson 	int segind;
112111752d88SAlan Cox 
1122c869e672SAlan Cox 	KASSERT(npages > 0, ("npages is 0"));
1123c869e672SAlan Cox 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1124c869e672SAlan Cox 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1125fbd80bd0SAlan Cox 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1126c869e672SAlan Cox 	if (low >= high)
1127c869e672SAlan Cox 		return (NULL);
1128c869e672SAlan Cox 	m_run = NULL;
1129477bffbeSAlan Cox 	for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
1130c869e672SAlan Cox 		seg = &vm_phys_segs[segind];
1131477bffbeSAlan Cox 		if (seg->start >= high || seg->domain != domain)
113211752d88SAlan Cox 			continue;
1133477bffbeSAlan Cox 		if (low >= seg->end)
1134477bffbeSAlan Cox 			break;
1135c869e672SAlan Cox 		if (low <= seg->start)
1136c869e672SAlan Cox 			pa_start = seg->start;
1137c869e672SAlan Cox 		else
1138c869e672SAlan Cox 			pa_start = low;
1139c869e672SAlan Cox 		if (high < seg->end)
1140c869e672SAlan Cox 			pa_end = high;
1141c869e672SAlan Cox 		else
1142c869e672SAlan Cox 			pa_end = seg->end;
1143c869e672SAlan Cox 		if (pa_end - pa_start < ptoa(npages))
1144c869e672SAlan Cox 			continue;
1145c869e672SAlan Cox 		m_run = vm_phys_alloc_seg_contig(seg, npages, low, high,
1146c869e672SAlan Cox 		    alignment, boundary);
1147c869e672SAlan Cox 		if (m_run != NULL)
1148c869e672SAlan Cox 			break;
1149c869e672SAlan Cox 	}
1150c869e672SAlan Cox 	return (m_run);
1151c869e672SAlan Cox }
115211752d88SAlan Cox 
115311752d88SAlan Cox /*
1154c869e672SAlan Cox  * Allocate a run of contiguous physical pages from the free list for the
1155c869e672SAlan Cox  * specified segment.
1156c869e672SAlan Cox  */
1157c869e672SAlan Cox static vm_page_t
1158c869e672SAlan Cox vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, u_long npages,
1159c869e672SAlan Cox     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1160c869e672SAlan Cox {
1161c869e672SAlan Cox 	struct vm_freelist *fl;
1162c869e672SAlan Cox 	vm_paddr_t pa, pa_end, size;
1163c869e672SAlan Cox 	vm_page_t m, m_ret;
1164c869e672SAlan Cox 	u_long npages_end;
1165c869e672SAlan Cox 	int oind, order, pind;
1166c869e672SAlan Cox 
1167c869e672SAlan Cox 	KASSERT(npages > 0, ("npages is 0"));
1168c869e672SAlan Cox 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1169c869e672SAlan Cox 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1170c869e672SAlan Cox 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1171c869e672SAlan Cox 	/* Compute the queue that is the best fit for npages. */
1172c869e672SAlan Cox 	for (order = 0; (1 << order) < npages; order++);
1173c869e672SAlan Cox 	/* Search for a run satisfying the specified conditions. */
1174c869e672SAlan Cox 	size = npages << PAGE_SHIFT;
1175c869e672SAlan Cox 	for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER;
1176c869e672SAlan Cox 	    oind++) {
1177c869e672SAlan Cox 		for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1178c869e672SAlan Cox 			fl = (*seg->free_queues)[pind];
1179c869e672SAlan Cox 			TAILQ_FOREACH(m_ret, &fl[oind].pl, plinks.q) {
1180c869e672SAlan Cox 				/*
118111752d88SAlan Cox 				 * Is the size of this allocation request
118211752d88SAlan Cox 				 * larger than the largest block size?
118311752d88SAlan Cox 				 */
118411752d88SAlan Cox 				if (order >= VM_NFREEORDER) {
118511752d88SAlan Cox 					/*
1186c869e672SAlan Cox 					 * Determine if a sufficient number of
1187c869e672SAlan Cox 					 * subsequent blocks to satisfy the
1188c869e672SAlan Cox 					 * allocation request are free.
118911752d88SAlan Cox 					 */
119011752d88SAlan Cox 					pa = VM_PAGE_TO_PHYS(m_ret);
1191c869e672SAlan Cox 					pa_end = pa + size;
119211752d88SAlan Cox 					for (;;) {
1193c869e672SAlan Cox 						pa += 1 << (PAGE_SHIFT +
1194c869e672SAlan Cox 						    VM_NFREEORDER - 1);
1195c869e672SAlan Cox 						if (pa >= pa_end ||
1196c869e672SAlan Cox 						    pa < seg->start ||
119711752d88SAlan Cox 						    pa >= seg->end)
119811752d88SAlan Cox 							break;
1199c869e672SAlan Cox 						m = &seg->first_page[atop(pa -
1200c869e672SAlan Cox 						    seg->start)];
1201c869e672SAlan Cox 						if (m->order != VM_NFREEORDER -
1202c869e672SAlan Cox 						    1)
120311752d88SAlan Cox 							break;
120411752d88SAlan Cox 					}
1205c869e672SAlan Cox 					/* If not, go to the next block. */
1206c869e672SAlan Cox 					if (pa < pa_end)
120711752d88SAlan Cox 						continue;
120811752d88SAlan Cox 				}
120911752d88SAlan Cox 
121011752d88SAlan Cox 				/*
1211c869e672SAlan Cox 				 * Determine if the blocks are within the
1212c869e672SAlan Cox 				 * given range, satisfy the given alignment,
1213c869e672SAlan Cox 				 * and do not cross the given boundary.
121411752d88SAlan Cox 				 */
121511752d88SAlan Cox 				pa = VM_PAGE_TO_PHYS(m_ret);
1216c869e672SAlan Cox 				pa_end = pa + size;
1217d9c9c81cSPedro F. Giffuni 				if (pa >= low && pa_end <= high &&
1218d9c9c81cSPedro F. Giffuni 				    (pa & (alignment - 1)) == 0 &&
1219d9c9c81cSPedro F. Giffuni 				    rounddown2(pa ^ (pa_end - 1), boundary) == 0)
122011752d88SAlan Cox 					goto done;
122111752d88SAlan Cox 			}
122211752d88SAlan Cox 		}
122311752d88SAlan Cox 	}
122411752d88SAlan Cox 	return (NULL);
122511752d88SAlan Cox done:
122611752d88SAlan Cox 	for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) {
122711752d88SAlan Cox 		fl = (*seg->free_queues)[m->pool];
12287e226537SAttilio Rao 		vm_freelist_rem(fl, m, m->order);
122911752d88SAlan Cox 	}
123011752d88SAlan Cox 	if (m_ret->pool != VM_FREEPOOL_DEFAULT)
123111752d88SAlan Cox 		vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m_ret, oind);
123211752d88SAlan Cox 	fl = (*seg->free_queues)[m_ret->pool];
123311752d88SAlan Cox 	vm_phys_split_pages(m_ret, oind, fl, order);
12345c1f2cc4SAlan Cox 	/* Return excess pages to the free lists. */
12355c1f2cc4SAlan Cox 	npages_end = roundup2(npages, 1 << imin(oind, order));
12365c1f2cc4SAlan Cox 	if (npages < npages_end)
12375c1f2cc4SAlan Cox 		vm_phys_free_contig(&m_ret[npages], npages_end - npages);
123811752d88SAlan Cox 	return (m_ret);
123911752d88SAlan Cox }
124011752d88SAlan Cox 
124111752d88SAlan Cox #ifdef DDB
124211752d88SAlan Cox /*
124311752d88SAlan Cox  * Show the number of physical pages in each of the free lists.
124411752d88SAlan Cox  */
124511752d88SAlan Cox DB_SHOW_COMMAND(freepages, db_show_freepages)
124611752d88SAlan Cox {
124711752d88SAlan Cox 	struct vm_freelist *fl;
12487e226537SAttilio Rao 	int flind, oind, pind, dom;
124911752d88SAlan Cox 
12507e226537SAttilio Rao 	for (dom = 0; dom < vm_ndomains; dom++) {
12517e226537SAttilio Rao 		db_printf("DOMAIN: %d\n", dom);
125211752d88SAlan Cox 		for (flind = 0; flind < vm_nfreelists; flind++) {
125311752d88SAlan Cox 			db_printf("FREE LIST %d:\n"
125411752d88SAlan Cox 			    "\n  ORDER (SIZE)  |  NUMBER"
125511752d88SAlan Cox 			    "\n              ", flind);
125611752d88SAlan Cox 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
125711752d88SAlan Cox 				db_printf("  |  POOL %d", pind);
125811752d88SAlan Cox 			db_printf("\n--            ");
125911752d88SAlan Cox 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
126011752d88SAlan Cox 				db_printf("-- --      ");
126111752d88SAlan Cox 			db_printf("--\n");
126211752d88SAlan Cox 			for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
126311752d88SAlan Cox 				db_printf("  %2.2d (%6.6dK)", oind,
126411752d88SAlan Cox 				    1 << (PAGE_SHIFT - 10 + oind));
126511752d88SAlan Cox 				for (pind = 0; pind < VM_NFREEPOOL; pind++) {
12667e226537SAttilio Rao 				fl = vm_phys_free_queues[dom][flind][pind];
126711752d88SAlan Cox 					db_printf("  |  %6.6d", fl[oind].lcnt);
126811752d88SAlan Cox 				}
126911752d88SAlan Cox 				db_printf("\n");
127011752d88SAlan Cox 			}
127111752d88SAlan Cox 			db_printf("\n");
127211752d88SAlan Cox 		}
12737e226537SAttilio Rao 		db_printf("\n");
12747e226537SAttilio Rao 	}
127511752d88SAlan Cox }
127611752d88SAlan Cox #endif
1277