xref: /freebsd/sys/vm/vm_phys.c (revision ba3c1f5972d7b90feb6e6da47905ff2757e0fe57)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2002-2006 Rice University
5  * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
6  * All rights reserved.
7  *
8  * This software was developed for the FreeBSD Project by Alan L. Cox,
9  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
24  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
30  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  *	Physical memory system implementation
36  *
37  * Any external functions defined by this module are only to be used by the
38  * virtual memory system.
39  */
40 
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43 
44 #include "opt_ddb.h"
45 #include "opt_vm.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/domainset.h>
50 #include <sys/lock.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/mutex.h>
54 #include <sys/proc.h>
55 #include <sys/queue.h>
56 #include <sys/rwlock.h>
57 #include <sys/sbuf.h>
58 #include <sys/sysctl.h>
59 #include <sys/tree.h>
60 #include <sys/vmmeter.h>
61 
62 #include <ddb/ddb.h>
63 
64 #include <vm/vm.h>
65 #include <vm/vm_extern.h>
66 #include <vm/vm_param.h>
67 #include <vm/vm_kern.h>
68 #include <vm/vm_object.h>
69 #include <vm/vm_page.h>
70 #include <vm/vm_phys.h>
71 #include <vm/vm_pagequeue.h>
72 
73 _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX,
74     "Too many physsegs.");
75 _Static_assert(sizeof(long long) >= sizeof(vm_paddr_t),
76     "vm_paddr_t too big for ffsll, flsll.");
77 
78 #ifdef NUMA
79 struct mem_affinity __read_mostly *mem_affinity;
80 int __read_mostly *mem_locality;
81 #endif
82 
83 int __read_mostly vm_ndomains = 1;
84 domainset_t __read_mostly all_domains = DOMAINSET_T_INITIALIZER(0x1);
85 
86 struct vm_phys_seg __read_mostly vm_phys_segs[VM_PHYSSEG_MAX];
87 int __read_mostly vm_phys_nsegs;
88 static struct vm_phys_seg vm_phys_early_segs[8];
89 static int vm_phys_early_nsegs;
90 
91 struct vm_phys_fictitious_seg;
92 static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *,
93     struct vm_phys_fictitious_seg *);
94 
95 RB_HEAD(fict_tree, vm_phys_fictitious_seg) vm_phys_fictitious_tree =
96     RB_INITIALIZER(&vm_phys_fictitious_tree);
97 
98 struct vm_phys_fictitious_seg {
99 	RB_ENTRY(vm_phys_fictitious_seg) node;
100 	/* Memory region data */
101 	vm_paddr_t	start;
102 	vm_paddr_t	end;
103 	vm_page_t	first_page;
104 };
105 
106 RB_GENERATE_STATIC(fict_tree, vm_phys_fictitious_seg, node,
107     vm_phys_fictitious_cmp);
108 
109 static struct rwlock_padalign vm_phys_fictitious_reg_lock;
110 MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages");
111 
112 static struct vm_freelist __aligned(CACHE_LINE_SIZE)
113     vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL]
114     [VM_NFREEORDER_MAX];
115 
116 static int __read_mostly vm_nfreelists;
117 
118 /*
119  * These "avail lists" are globals used to communicate boot-time physical
120  * memory layout to other parts of the kernel.  Each physically contiguous
121  * region of memory is defined by a start address at an even index and an
122  * end address at the following odd index.  Each list is terminated by a
123  * pair of zero entries.
124  *
125  * dump_avail tells the dump code what regions to include in a crash dump, and
126  * phys_avail is all of the remaining physical memory that is available for
127  * the vm system.
128  *
129  * Initially dump_avail and phys_avail are identical.  Boot time memory
130  * allocations remove extents from phys_avail that may still be included
131  * in dumps.
132  */
133 vm_paddr_t phys_avail[PHYS_AVAIL_COUNT];
134 vm_paddr_t dump_avail[PHYS_AVAIL_COUNT];
135 
136 /*
137  * Provides the mapping from VM_FREELIST_* to free list indices (flind).
138  */
139 static int __read_mostly vm_freelist_to_flind[VM_NFREELIST];
140 
141 CTASSERT(VM_FREELIST_DEFAULT == 0);
142 
143 #ifdef VM_FREELIST_DMA32
144 #define	VM_DMA32_BOUNDARY	((vm_paddr_t)1 << 32)
145 #endif
146 
147 /*
148  * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about
149  * the ordering of the free list boundaries.
150  */
151 #if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY)
152 CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY);
153 #endif
154 
155 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
156 SYSCTL_OID(_vm, OID_AUTO, phys_free,
157     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
158     sysctl_vm_phys_free, "A",
159     "Phys Free Info");
160 
161 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
162 SYSCTL_OID(_vm, OID_AUTO, phys_segs,
163     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
164     sysctl_vm_phys_segs, "A",
165     "Phys Seg Info");
166 
167 #ifdef NUMA
168 static int sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS);
169 SYSCTL_OID(_vm, OID_AUTO, phys_locality,
170     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
171     sysctl_vm_phys_locality, "A",
172     "Phys Locality Info");
173 #endif
174 
175 SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD,
176     &vm_ndomains, 0, "Number of physical memory domains available.");
177 
178 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain);
179 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end);
180 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
181     int order, int tail);
182 
183 /*
184  * Red-black tree helpers for vm fictitious range management.
185  */
186 static inline int
187 vm_phys_fictitious_in_range(struct vm_phys_fictitious_seg *p,
188     struct vm_phys_fictitious_seg *range)
189 {
190 
191 	KASSERT(range->start != 0 && range->end != 0,
192 	    ("Invalid range passed on search for vm_fictitious page"));
193 	if (p->start >= range->end)
194 		return (1);
195 	if (p->start < range->start)
196 		return (-1);
197 
198 	return (0);
199 }
200 
201 static int
202 vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *p1,
203     struct vm_phys_fictitious_seg *p2)
204 {
205 
206 	/* Check if this is a search for a page */
207 	if (p1->end == 0)
208 		return (vm_phys_fictitious_in_range(p1, p2));
209 
210 	KASSERT(p2->end != 0,
211     ("Invalid range passed as second parameter to vm fictitious comparison"));
212 
213 	/* Searching to add a new range */
214 	if (p1->end <= p2->start)
215 		return (-1);
216 	if (p1->start >= p2->end)
217 		return (1);
218 
219 	panic("Trying to add overlapping vm fictitious ranges:\n"
220 	    "[%#jx:%#jx] and [%#jx:%#jx]", (uintmax_t)p1->start,
221 	    (uintmax_t)p1->end, (uintmax_t)p2->start, (uintmax_t)p2->end);
222 }
223 
224 int
225 vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high)
226 {
227 #ifdef NUMA
228 	domainset_t mask;
229 	int i;
230 
231 	if (vm_ndomains == 1 || mem_affinity == NULL)
232 		return (0);
233 
234 	DOMAINSET_ZERO(&mask);
235 	/*
236 	 * Check for any memory that overlaps low, high.
237 	 */
238 	for (i = 0; mem_affinity[i].end != 0; i++)
239 		if (mem_affinity[i].start <= high &&
240 		    mem_affinity[i].end >= low)
241 			DOMAINSET_SET(mem_affinity[i].domain, &mask);
242 	if (prefer != -1 && DOMAINSET_ISSET(prefer, &mask))
243 		return (prefer);
244 	if (DOMAINSET_EMPTY(&mask))
245 		panic("vm_phys_domain_match:  Impossible constraint");
246 	return (DOMAINSET_FFS(&mask) - 1);
247 #else
248 	return (0);
249 #endif
250 }
251 
252 /*
253  * Outputs the state of the physical memory allocator, specifically,
254  * the amount of physical memory in each free list.
255  */
256 static int
257 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
258 {
259 	struct sbuf sbuf;
260 	struct vm_freelist *fl;
261 	int dom, error, flind, oind, pind;
262 
263 	error = sysctl_wire_old_buffer(req, 0);
264 	if (error != 0)
265 		return (error);
266 	sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req);
267 	for (dom = 0; dom < vm_ndomains; dom++) {
268 		sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom);
269 		for (flind = 0; flind < vm_nfreelists; flind++) {
270 			sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
271 			    "\n  ORDER (SIZE)  |  NUMBER"
272 			    "\n              ", flind);
273 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
274 				sbuf_printf(&sbuf, "  |  POOL %d", pind);
275 			sbuf_printf(&sbuf, "\n--            ");
276 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
277 				sbuf_printf(&sbuf, "-- --      ");
278 			sbuf_printf(&sbuf, "--\n");
279 			for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
280 				sbuf_printf(&sbuf, "  %2d (%6dK)", oind,
281 				    1 << (PAGE_SHIFT - 10 + oind));
282 				for (pind = 0; pind < VM_NFREEPOOL; pind++) {
283 				fl = vm_phys_free_queues[dom][flind][pind];
284 					sbuf_printf(&sbuf, "  |  %6d",
285 					    fl[oind].lcnt);
286 				}
287 				sbuf_printf(&sbuf, "\n");
288 			}
289 		}
290 	}
291 	error = sbuf_finish(&sbuf);
292 	sbuf_delete(&sbuf);
293 	return (error);
294 }
295 
296 /*
297  * Outputs the set of physical memory segments.
298  */
299 static int
300 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
301 {
302 	struct sbuf sbuf;
303 	struct vm_phys_seg *seg;
304 	int error, segind;
305 
306 	error = sysctl_wire_old_buffer(req, 0);
307 	if (error != 0)
308 		return (error);
309 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
310 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
311 		sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
312 		seg = &vm_phys_segs[segind];
313 		sbuf_printf(&sbuf, "start:     %#jx\n",
314 		    (uintmax_t)seg->start);
315 		sbuf_printf(&sbuf, "end:       %#jx\n",
316 		    (uintmax_t)seg->end);
317 		sbuf_printf(&sbuf, "domain:    %d\n", seg->domain);
318 		sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
319 	}
320 	error = sbuf_finish(&sbuf);
321 	sbuf_delete(&sbuf);
322 	return (error);
323 }
324 
325 /*
326  * Return affinity, or -1 if there's no affinity information.
327  */
328 int
329 vm_phys_mem_affinity(int f, int t)
330 {
331 
332 #ifdef NUMA
333 	if (mem_locality == NULL)
334 		return (-1);
335 	if (f >= vm_ndomains || t >= vm_ndomains)
336 		return (-1);
337 	return (mem_locality[f * vm_ndomains + t]);
338 #else
339 	return (-1);
340 #endif
341 }
342 
343 #ifdef NUMA
344 /*
345  * Outputs the VM locality table.
346  */
347 static int
348 sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS)
349 {
350 	struct sbuf sbuf;
351 	int error, i, j;
352 
353 	error = sysctl_wire_old_buffer(req, 0);
354 	if (error != 0)
355 		return (error);
356 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
357 
358 	sbuf_printf(&sbuf, "\n");
359 
360 	for (i = 0; i < vm_ndomains; i++) {
361 		sbuf_printf(&sbuf, "%d: ", i);
362 		for (j = 0; j < vm_ndomains; j++) {
363 			sbuf_printf(&sbuf, "%d ", vm_phys_mem_affinity(i, j));
364 		}
365 		sbuf_printf(&sbuf, "\n");
366 	}
367 	error = sbuf_finish(&sbuf);
368 	sbuf_delete(&sbuf);
369 	return (error);
370 }
371 #endif
372 
373 static void
374 vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail)
375 {
376 
377 	m->order = order;
378 	if (tail)
379 		TAILQ_INSERT_TAIL(&fl[order].pl, m, listq);
380 	else
381 		TAILQ_INSERT_HEAD(&fl[order].pl, m, listq);
382 	fl[order].lcnt++;
383 }
384 
385 static void
386 vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order)
387 {
388 
389 	TAILQ_REMOVE(&fl[order].pl, m, listq);
390 	fl[order].lcnt--;
391 	m->order = VM_NFREEORDER;
392 }
393 
394 /*
395  * Create a physical memory segment.
396  */
397 static void
398 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain)
399 {
400 	struct vm_phys_seg *seg;
401 
402 	KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
403 	    ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
404 	KASSERT(domain >= 0 && domain < vm_ndomains,
405 	    ("vm_phys_create_seg: invalid domain provided"));
406 	seg = &vm_phys_segs[vm_phys_nsegs++];
407 	while (seg > vm_phys_segs && (seg - 1)->start >= end) {
408 		*seg = *(seg - 1);
409 		seg--;
410 	}
411 	seg->start = start;
412 	seg->end = end;
413 	seg->domain = domain;
414 }
415 
416 static void
417 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end)
418 {
419 #ifdef NUMA
420 	int i;
421 
422 	if (mem_affinity == NULL) {
423 		_vm_phys_create_seg(start, end, 0);
424 		return;
425 	}
426 
427 	for (i = 0;; i++) {
428 		if (mem_affinity[i].end == 0)
429 			panic("Reached end of affinity info");
430 		if (mem_affinity[i].end <= start)
431 			continue;
432 		if (mem_affinity[i].start > start)
433 			panic("No affinity info for start %jx",
434 			    (uintmax_t)start);
435 		if (mem_affinity[i].end >= end) {
436 			_vm_phys_create_seg(start, end,
437 			    mem_affinity[i].domain);
438 			break;
439 		}
440 		_vm_phys_create_seg(start, mem_affinity[i].end,
441 		    mem_affinity[i].domain);
442 		start = mem_affinity[i].end;
443 	}
444 #else
445 	_vm_phys_create_seg(start, end, 0);
446 #endif
447 }
448 
449 /*
450  * Add a physical memory segment.
451  */
452 void
453 vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end)
454 {
455 	vm_paddr_t paddr;
456 
457 	KASSERT((start & PAGE_MASK) == 0,
458 	    ("vm_phys_define_seg: start is not page aligned"));
459 	KASSERT((end & PAGE_MASK) == 0,
460 	    ("vm_phys_define_seg: end is not page aligned"));
461 
462 	/*
463 	 * Split the physical memory segment if it spans two or more free
464 	 * list boundaries.
465 	 */
466 	paddr = start;
467 #ifdef	VM_FREELIST_LOWMEM
468 	if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) {
469 		vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY);
470 		paddr = VM_LOWMEM_BOUNDARY;
471 	}
472 #endif
473 #ifdef	VM_FREELIST_DMA32
474 	if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) {
475 		vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY);
476 		paddr = VM_DMA32_BOUNDARY;
477 	}
478 #endif
479 	vm_phys_create_seg(paddr, end);
480 }
481 
482 /*
483  * Initialize the physical memory allocator.
484  *
485  * Requires that vm_page_array is initialized!
486  */
487 void
488 vm_phys_init(void)
489 {
490 	struct vm_freelist *fl;
491 	struct vm_phys_seg *end_seg, *prev_seg, *seg, *tmp_seg;
492 #if defined(VM_DMA32_NPAGES_THRESHOLD) || defined(VM_PHYSSEG_SPARSE)
493 	u_long npages;
494 #endif
495 	int dom, flind, freelist, oind, pind, segind;
496 
497 	/*
498 	 * Compute the number of free lists, and generate the mapping from the
499 	 * manifest constants VM_FREELIST_* to the free list indices.
500 	 *
501 	 * Initially, the entries of vm_freelist_to_flind[] are set to either
502 	 * 0 or 1 to indicate which free lists should be created.
503 	 */
504 #ifdef	VM_DMA32_NPAGES_THRESHOLD
505 	npages = 0;
506 #endif
507 	for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
508 		seg = &vm_phys_segs[segind];
509 #ifdef	VM_FREELIST_LOWMEM
510 		if (seg->end <= VM_LOWMEM_BOUNDARY)
511 			vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1;
512 		else
513 #endif
514 #ifdef	VM_FREELIST_DMA32
515 		if (
516 #ifdef	VM_DMA32_NPAGES_THRESHOLD
517 		    /*
518 		     * Create the DMA32 free list only if the amount of
519 		     * physical memory above physical address 4G exceeds the
520 		     * given threshold.
521 		     */
522 		    npages > VM_DMA32_NPAGES_THRESHOLD &&
523 #endif
524 		    seg->end <= VM_DMA32_BOUNDARY)
525 			vm_freelist_to_flind[VM_FREELIST_DMA32] = 1;
526 		else
527 #endif
528 		{
529 #ifdef	VM_DMA32_NPAGES_THRESHOLD
530 			npages += atop(seg->end - seg->start);
531 #endif
532 			vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1;
533 		}
534 	}
535 	/* Change each entry into a running total of the free lists. */
536 	for (freelist = 1; freelist < VM_NFREELIST; freelist++) {
537 		vm_freelist_to_flind[freelist] +=
538 		    vm_freelist_to_flind[freelist - 1];
539 	}
540 	vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1];
541 	KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists"));
542 	/* Change each entry into a free list index. */
543 	for (freelist = 0; freelist < VM_NFREELIST; freelist++)
544 		vm_freelist_to_flind[freelist]--;
545 
546 	/*
547 	 * Initialize the first_page and free_queues fields of each physical
548 	 * memory segment.
549 	 */
550 #ifdef VM_PHYSSEG_SPARSE
551 	npages = 0;
552 #endif
553 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
554 		seg = &vm_phys_segs[segind];
555 #ifdef VM_PHYSSEG_SPARSE
556 		seg->first_page = &vm_page_array[npages];
557 		npages += atop(seg->end - seg->start);
558 #else
559 		seg->first_page = PHYS_TO_VM_PAGE(seg->start);
560 #endif
561 #ifdef	VM_FREELIST_LOWMEM
562 		if (seg->end <= VM_LOWMEM_BOUNDARY) {
563 			flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM];
564 			KASSERT(flind >= 0,
565 			    ("vm_phys_init: LOWMEM flind < 0"));
566 		} else
567 #endif
568 #ifdef	VM_FREELIST_DMA32
569 		if (seg->end <= VM_DMA32_BOUNDARY) {
570 			flind = vm_freelist_to_flind[VM_FREELIST_DMA32];
571 			KASSERT(flind >= 0,
572 			    ("vm_phys_init: DMA32 flind < 0"));
573 		} else
574 #endif
575 		{
576 			flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT];
577 			KASSERT(flind >= 0,
578 			    ("vm_phys_init: DEFAULT flind < 0"));
579 		}
580 		seg->free_queues = &vm_phys_free_queues[seg->domain][flind];
581 	}
582 
583 	/*
584 	 * Coalesce physical memory segments that are contiguous and share the
585 	 * same per-domain free queues.
586 	 */
587 	prev_seg = vm_phys_segs;
588 	seg = &vm_phys_segs[1];
589 	end_seg = &vm_phys_segs[vm_phys_nsegs];
590 	while (seg < end_seg) {
591 		if (prev_seg->end == seg->start &&
592 		    prev_seg->free_queues == seg->free_queues) {
593 			prev_seg->end = seg->end;
594 			KASSERT(prev_seg->domain == seg->domain,
595 			    ("vm_phys_init: free queues cannot span domains"));
596 			vm_phys_nsegs--;
597 			end_seg--;
598 			for (tmp_seg = seg; tmp_seg < end_seg; tmp_seg++)
599 				*tmp_seg = *(tmp_seg + 1);
600 		} else {
601 			prev_seg = seg;
602 			seg++;
603 		}
604 	}
605 
606 	/*
607 	 * Initialize the free queues.
608 	 */
609 	for (dom = 0; dom < vm_ndomains; dom++) {
610 		for (flind = 0; flind < vm_nfreelists; flind++) {
611 			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
612 				fl = vm_phys_free_queues[dom][flind][pind];
613 				for (oind = 0; oind < VM_NFREEORDER; oind++)
614 					TAILQ_INIT(&fl[oind].pl);
615 			}
616 		}
617 	}
618 
619 	rw_init(&vm_phys_fictitious_reg_lock, "vmfctr");
620 }
621 
622 /*
623  * Register info about the NUMA topology of the system.
624  *
625  * Invoked by platform-dependent code prior to vm_phys_init().
626  */
627 void
628 vm_phys_register_domains(int ndomains, struct mem_affinity *affinity,
629     int *locality)
630 {
631 #ifdef NUMA
632 	int d, i;
633 
634 	/*
635 	 * For now the only override value that we support is 1, which
636 	 * effectively disables NUMA-awareness in the allocators.
637 	 */
638 	d = 0;
639 	TUNABLE_INT_FETCH("vm.numa.disabled", &d);
640 	if (d)
641 		ndomains = 1;
642 
643 	if (ndomains > 1) {
644 		vm_ndomains = ndomains;
645 		mem_affinity = affinity;
646 		mem_locality = locality;
647 	}
648 
649 	for (i = 0; i < vm_ndomains; i++)
650 		DOMAINSET_SET(i, &all_domains);
651 #else
652 	(void)ndomains;
653 	(void)affinity;
654 	(void)locality;
655 #endif
656 }
657 
658 /*
659  * Split a contiguous, power of two-sized set of physical pages.
660  *
661  * When this function is called by a page allocation function, the caller
662  * should request insertion at the head unless the order [order, oind) queues
663  * are known to be empty.  The objective being to reduce the likelihood of
664  * long-term fragmentation by promoting contemporaneous allocation and
665  * (hopefully) deallocation.
666  */
667 static __inline void
668 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order,
669     int tail)
670 {
671 	vm_page_t m_buddy;
672 
673 	while (oind > order) {
674 		oind--;
675 		m_buddy = &m[1 << oind];
676 		KASSERT(m_buddy->order == VM_NFREEORDER,
677 		    ("vm_phys_split_pages: page %p has unexpected order %d",
678 		    m_buddy, m_buddy->order));
679 		vm_freelist_add(fl, m_buddy, oind, tail);
680         }
681 }
682 
683 /*
684  * Add the physical pages [m, m + npages) at the beginning of a power-of-two
685  * aligned and sized set to the specified free list.
686  *
687  * When this function is called by a page allocation function, the caller
688  * should request insertion at the head unless the lower-order queues are
689  * known to be empty.  The objective being to reduce the likelihood of long-
690  * term fragmentation by promoting contemporaneous allocation and (hopefully)
691  * deallocation.
692  *
693  * The physical page m's buddy must not be free.
694  */
695 static void
696 vm_phys_enq_beg(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
697 {
698         int order;
699 
700 	KASSERT(npages == 0 ||
701 	    (VM_PAGE_TO_PHYS(m) &
702 	    ((PAGE_SIZE << (fls(npages) - 1)) - 1)) == 0,
703 	    ("%s: page %p and npages %u are misaligned",
704 	    __func__, m, npages));
705         while (npages > 0) {
706 		KASSERT(m->order == VM_NFREEORDER,
707 		    ("%s: page %p has unexpected order %d",
708 		    __func__, m, m->order));
709                 order = fls(npages) - 1;
710 		KASSERT(order < VM_NFREEORDER,
711 		    ("%s: order %d is out of range", __func__, order));
712                 vm_freelist_add(fl, m, order, tail);
713 		m += 1 << order;
714                 npages -= 1 << order;
715         }
716 }
717 
718 /*
719  * Add the physical pages [m, m + npages) at the end of a power-of-two aligned
720  * and sized set to the specified free list.
721  *
722  * When this function is called by a page allocation function, the caller
723  * should request insertion at the head unless the lower-order queues are
724  * known to be empty.  The objective being to reduce the likelihood of long-
725  * term fragmentation by promoting contemporaneous allocation and (hopefully)
726  * deallocation.
727  *
728  * If npages is zero, this function does nothing and ignores the physical page
729  * parameter m.  Otherwise, the physical page m's buddy must not be free.
730  */
731 static vm_page_t
732 vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
733 {
734 	int order;
735 
736 	KASSERT(npages == 0 ||
737 	    ((VM_PAGE_TO_PHYS(m) + npages * PAGE_SIZE) &
738 	    ((PAGE_SIZE << (fls(npages) - 1)) - 1)) == 0,
739 	    ("vm_phys_enq_range: page %p and npages %u are misaligned",
740 	    m, npages));
741 	while (npages > 0) {
742 		KASSERT(m->order == VM_NFREEORDER,
743 		    ("vm_phys_enq_range: page %p has unexpected order %d",
744 		    m, m->order));
745 		order = ffs(npages) - 1;
746 		KASSERT(order < VM_NFREEORDER,
747 		    ("vm_phys_enq_range: order %d is out of range", order));
748 		vm_freelist_add(fl, m, order, tail);
749 		m += 1 << order;
750 		npages -= 1 << order;
751 	}
752 	return (m);
753 }
754 
755 /*
756  * Set the pool for a contiguous, power of two-sized set of physical pages.
757  */
758 static void
759 vm_phys_set_pool(int pool, vm_page_t m, int order)
760 {
761 	vm_page_t m_tmp;
762 
763 	for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
764 		m_tmp->pool = pool;
765 }
766 
767 /*
768  * Tries to allocate the specified number of pages from the specified pool
769  * within the specified domain.  Returns the actual number of allocated pages
770  * and a pointer to each page through the array ma[].
771  *
772  * The returned pages may not be physically contiguous.  However, in contrast
773  * to performing multiple, back-to-back calls to vm_phys_alloc_pages(..., 0),
774  * calling this function once to allocate the desired number of pages will
775  * avoid wasted time in vm_phys_split_pages().
776  *
777  * The free page queues for the specified domain must be locked.
778  */
779 int
780 vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[])
781 {
782 	struct vm_freelist *alt, *fl;
783 	vm_page_t m;
784 	int avail, end, flind, freelist, i, oind, pind;
785 
786 	KASSERT(domain >= 0 && domain < vm_ndomains,
787 	    ("vm_phys_alloc_npages: domain %d is out of range", domain));
788 	KASSERT(pool < VM_NFREEPOOL,
789 	    ("vm_phys_alloc_npages: pool %d is out of range", pool));
790 	KASSERT(npages <= 1 << (VM_NFREEORDER - 1),
791 	    ("vm_phys_alloc_npages: npages %d is out of range", npages));
792 	vm_domain_free_assert_locked(VM_DOMAIN(domain));
793 	i = 0;
794 	for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
795 		flind = vm_freelist_to_flind[freelist];
796 		if (flind < 0)
797 			continue;
798 		fl = vm_phys_free_queues[domain][flind][pool];
799 		for (oind = 0; oind < VM_NFREEORDER; oind++) {
800 			while ((m = TAILQ_FIRST(&fl[oind].pl)) != NULL) {
801 				vm_freelist_rem(fl, m, oind);
802 				avail = i + (1 << oind);
803 				end = imin(npages, avail);
804 				while (i < end)
805 					ma[i++] = m++;
806 				if (i == npages) {
807 					/*
808 					 * Return excess pages to fl.  Its order
809 					 * [0, oind) queues are empty.
810 					 */
811 					vm_phys_enq_range(m, avail - i, fl, 1);
812 					return (npages);
813 				}
814 			}
815 		}
816 		for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
817 			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
818 				alt = vm_phys_free_queues[domain][flind][pind];
819 				while ((m = TAILQ_FIRST(&alt[oind].pl)) !=
820 				    NULL) {
821 					vm_freelist_rem(alt, m, oind);
822 					vm_phys_set_pool(pool, m, oind);
823 					avail = i + (1 << oind);
824 					end = imin(npages, avail);
825 					while (i < end)
826 						ma[i++] = m++;
827 					if (i == npages) {
828 						/*
829 						 * Return excess pages to fl.
830 						 * Its order [0, oind) queues
831 						 * are empty.
832 						 */
833 						vm_phys_enq_range(m, avail - i,
834 						    fl, 1);
835 						return (npages);
836 					}
837 				}
838 			}
839 		}
840 	}
841 	return (i);
842 }
843 
844 /*
845  * Allocate a contiguous, power of two-sized set of physical pages
846  * from the free lists.
847  *
848  * The free page queues must be locked.
849  */
850 vm_page_t
851 vm_phys_alloc_pages(int domain, int pool, int order)
852 {
853 	vm_page_t m;
854 	int freelist;
855 
856 	for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
857 		m = vm_phys_alloc_freelist_pages(domain, freelist, pool, order);
858 		if (m != NULL)
859 			return (m);
860 	}
861 	return (NULL);
862 }
863 
864 /*
865  * Allocate a contiguous, power of two-sized set of physical pages from the
866  * specified free list.  The free list must be specified using one of the
867  * manifest constants VM_FREELIST_*.
868  *
869  * The free page queues must be locked.
870  */
871 vm_page_t
872 vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order)
873 {
874 	struct vm_freelist *alt, *fl;
875 	vm_page_t m;
876 	int oind, pind, flind;
877 
878 	KASSERT(domain >= 0 && domain < vm_ndomains,
879 	    ("vm_phys_alloc_freelist_pages: domain %d is out of range",
880 	    domain));
881 	KASSERT(freelist < VM_NFREELIST,
882 	    ("vm_phys_alloc_freelist_pages: freelist %d is out of range",
883 	    freelist));
884 	KASSERT(pool < VM_NFREEPOOL,
885 	    ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
886 	KASSERT(order < VM_NFREEORDER,
887 	    ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
888 
889 	flind = vm_freelist_to_flind[freelist];
890 	/* Check if freelist is present */
891 	if (flind < 0)
892 		return (NULL);
893 
894 	vm_domain_free_assert_locked(VM_DOMAIN(domain));
895 	fl = &vm_phys_free_queues[domain][flind][pool][0];
896 	for (oind = order; oind < VM_NFREEORDER; oind++) {
897 		m = TAILQ_FIRST(&fl[oind].pl);
898 		if (m != NULL) {
899 			vm_freelist_rem(fl, m, oind);
900 			/* The order [order, oind) queues are empty. */
901 			vm_phys_split_pages(m, oind, fl, order, 1);
902 			return (m);
903 		}
904 	}
905 
906 	/*
907 	 * The given pool was empty.  Find the largest
908 	 * contiguous, power-of-two-sized set of pages in any
909 	 * pool.  Transfer these pages to the given pool, and
910 	 * use them to satisfy the allocation.
911 	 */
912 	for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
913 		for (pind = 0; pind < VM_NFREEPOOL; pind++) {
914 			alt = &vm_phys_free_queues[domain][flind][pind][0];
915 			m = TAILQ_FIRST(&alt[oind].pl);
916 			if (m != NULL) {
917 				vm_freelist_rem(alt, m, oind);
918 				vm_phys_set_pool(pool, m, oind);
919 				/* The order [order, oind) queues are empty. */
920 				vm_phys_split_pages(m, oind, fl, order, 1);
921 				return (m);
922 			}
923 		}
924 	}
925 	return (NULL);
926 }
927 
928 /*
929  * Find the vm_page corresponding to the given physical address.
930  */
931 vm_page_t
932 vm_phys_paddr_to_vm_page(vm_paddr_t pa)
933 {
934 	struct vm_phys_seg *seg;
935 
936 	if ((seg = vm_phys_paddr_to_seg(pa)) != NULL)
937 		return (&seg->first_page[atop(pa - seg->start)]);
938 	return (NULL);
939 }
940 
941 vm_page_t
942 vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
943 {
944 	struct vm_phys_fictitious_seg tmp, *seg;
945 	vm_page_t m;
946 
947 	m = NULL;
948 	tmp.start = pa;
949 	tmp.end = 0;
950 
951 	rw_rlock(&vm_phys_fictitious_reg_lock);
952 	seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
953 	rw_runlock(&vm_phys_fictitious_reg_lock);
954 	if (seg == NULL)
955 		return (NULL);
956 
957 	m = &seg->first_page[atop(pa - seg->start)];
958 	KASSERT((m->flags & PG_FICTITIOUS) != 0, ("%p not fictitious", m));
959 
960 	return (m);
961 }
962 
963 static inline void
964 vm_phys_fictitious_init_range(vm_page_t range, vm_paddr_t start,
965     long page_count, vm_memattr_t memattr)
966 {
967 	long i;
968 
969 	bzero(range, page_count * sizeof(*range));
970 	for (i = 0; i < page_count; i++) {
971 		vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr);
972 		range[i].oflags &= ~VPO_UNMANAGED;
973 		range[i].busy_lock = VPB_UNBUSIED;
974 	}
975 }
976 
977 int
978 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
979     vm_memattr_t memattr)
980 {
981 	struct vm_phys_fictitious_seg *seg;
982 	vm_page_t fp;
983 	long page_count;
984 #ifdef VM_PHYSSEG_DENSE
985 	long pi, pe;
986 	long dpage_count;
987 #endif
988 
989 	KASSERT(start < end,
990 	    ("Start of segment isn't less than end (start: %jx end: %jx)",
991 	    (uintmax_t)start, (uintmax_t)end));
992 
993 	page_count = (end - start) / PAGE_SIZE;
994 
995 #ifdef VM_PHYSSEG_DENSE
996 	pi = atop(start);
997 	pe = atop(end);
998 	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
999 		fp = &vm_page_array[pi - first_page];
1000 		if ((pe - first_page) > vm_page_array_size) {
1001 			/*
1002 			 * We have a segment that starts inside
1003 			 * of vm_page_array, but ends outside of it.
1004 			 *
1005 			 * Use vm_page_array pages for those that are
1006 			 * inside of the vm_page_array range, and
1007 			 * allocate the remaining ones.
1008 			 */
1009 			dpage_count = vm_page_array_size - (pi - first_page);
1010 			vm_phys_fictitious_init_range(fp, start, dpage_count,
1011 			    memattr);
1012 			page_count -= dpage_count;
1013 			start += ptoa(dpage_count);
1014 			goto alloc;
1015 		}
1016 		/*
1017 		 * We can allocate the full range from vm_page_array,
1018 		 * so there's no need to register the range in the tree.
1019 		 */
1020 		vm_phys_fictitious_init_range(fp, start, page_count, memattr);
1021 		return (0);
1022 	} else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
1023 		/*
1024 		 * We have a segment that ends inside of vm_page_array,
1025 		 * but starts outside of it.
1026 		 */
1027 		fp = &vm_page_array[0];
1028 		dpage_count = pe - first_page;
1029 		vm_phys_fictitious_init_range(fp, ptoa(first_page), dpage_count,
1030 		    memattr);
1031 		end -= ptoa(dpage_count);
1032 		page_count -= dpage_count;
1033 		goto alloc;
1034 	} else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
1035 		/*
1036 		 * Trying to register a fictitious range that expands before
1037 		 * and after vm_page_array.
1038 		 */
1039 		return (EINVAL);
1040 	} else {
1041 alloc:
1042 #endif
1043 		fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
1044 		    M_WAITOK);
1045 #ifdef VM_PHYSSEG_DENSE
1046 	}
1047 #endif
1048 	vm_phys_fictitious_init_range(fp, start, page_count, memattr);
1049 
1050 	seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO);
1051 	seg->start = start;
1052 	seg->end = end;
1053 	seg->first_page = fp;
1054 
1055 	rw_wlock(&vm_phys_fictitious_reg_lock);
1056 	RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg);
1057 	rw_wunlock(&vm_phys_fictitious_reg_lock);
1058 
1059 	return (0);
1060 }
1061 
1062 void
1063 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
1064 {
1065 	struct vm_phys_fictitious_seg *seg, tmp;
1066 #ifdef VM_PHYSSEG_DENSE
1067 	long pi, pe;
1068 #endif
1069 
1070 	KASSERT(start < end,
1071 	    ("Start of segment isn't less than end (start: %jx end: %jx)",
1072 	    (uintmax_t)start, (uintmax_t)end));
1073 
1074 #ifdef VM_PHYSSEG_DENSE
1075 	pi = atop(start);
1076 	pe = atop(end);
1077 	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
1078 		if ((pe - first_page) <= vm_page_array_size) {
1079 			/*
1080 			 * This segment was allocated using vm_page_array
1081 			 * only, there's nothing to do since those pages
1082 			 * were never added to the tree.
1083 			 */
1084 			return;
1085 		}
1086 		/*
1087 		 * We have a segment that starts inside
1088 		 * of vm_page_array, but ends outside of it.
1089 		 *
1090 		 * Calculate how many pages were added to the
1091 		 * tree and free them.
1092 		 */
1093 		start = ptoa(first_page + vm_page_array_size);
1094 	} else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
1095 		/*
1096 		 * We have a segment that ends inside of vm_page_array,
1097 		 * but starts outside of it.
1098 		 */
1099 		end = ptoa(first_page);
1100 	} else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
1101 		/* Since it's not possible to register such a range, panic. */
1102 		panic(
1103 		    "Unregistering not registered fictitious range [%#jx:%#jx]",
1104 		    (uintmax_t)start, (uintmax_t)end);
1105 	}
1106 #endif
1107 	tmp.start = start;
1108 	tmp.end = 0;
1109 
1110 	rw_wlock(&vm_phys_fictitious_reg_lock);
1111 	seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
1112 	if (seg->start != start || seg->end != end) {
1113 		rw_wunlock(&vm_phys_fictitious_reg_lock);
1114 		panic(
1115 		    "Unregistering not registered fictitious range [%#jx:%#jx]",
1116 		    (uintmax_t)start, (uintmax_t)end);
1117 	}
1118 	RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg);
1119 	rw_wunlock(&vm_phys_fictitious_reg_lock);
1120 	free(seg->first_page, M_FICT_PAGES);
1121 	free(seg, M_FICT_PAGES);
1122 }
1123 
1124 /*
1125  * Free a contiguous, power of two-sized set of physical pages.
1126  *
1127  * The free page queues must be locked.
1128  */
1129 void
1130 vm_phys_free_pages(vm_page_t m, int order)
1131 {
1132 	struct vm_freelist *fl;
1133 	struct vm_phys_seg *seg;
1134 	vm_paddr_t pa;
1135 	vm_page_t m_buddy;
1136 
1137 	KASSERT(m->order == VM_NFREEORDER,
1138 	    ("vm_phys_free_pages: page %p has unexpected order %d",
1139 	    m, m->order));
1140 	KASSERT(m->pool < VM_NFREEPOOL,
1141 	    ("vm_phys_free_pages: page %p has unexpected pool %d",
1142 	    m, m->pool));
1143 	KASSERT(order < VM_NFREEORDER,
1144 	    ("vm_phys_free_pages: order %d is out of range", order));
1145 	seg = &vm_phys_segs[m->segind];
1146 	vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
1147 	if (order < VM_NFREEORDER - 1) {
1148 		pa = VM_PAGE_TO_PHYS(m);
1149 		do {
1150 			pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
1151 			if (pa < seg->start || pa >= seg->end)
1152 				break;
1153 			m_buddy = &seg->first_page[atop(pa - seg->start)];
1154 			if (m_buddy->order != order)
1155 				break;
1156 			fl = (*seg->free_queues)[m_buddy->pool];
1157 			vm_freelist_rem(fl, m_buddy, order);
1158 			if (m_buddy->pool != m->pool)
1159 				vm_phys_set_pool(m->pool, m_buddy, order);
1160 			order++;
1161 			pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
1162 			m = &seg->first_page[atop(pa - seg->start)];
1163 		} while (order < VM_NFREEORDER - 1);
1164 	}
1165 	fl = (*seg->free_queues)[m->pool];
1166 	vm_freelist_add(fl, m, order, 1);
1167 }
1168 
1169 /*
1170  * Return the largest possible order of a set of pages starting at m.
1171  */
1172 static int
1173 max_order(vm_page_t m)
1174 {
1175 
1176 	/*
1177 	 * Unsigned "min" is used here so that "order" is assigned
1178 	 * "VM_NFREEORDER - 1" when "m"'s physical address is zero
1179 	 * or the low-order bits of its physical address are zero
1180 	 * because the size of a physical address exceeds the size of
1181 	 * a long.
1182 	 */
1183 	return (min(ffsll(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
1184 	    VM_NFREEORDER - 1));
1185 }
1186 
1187 /*
1188  * Free a contiguous, arbitrarily sized set of physical pages, without
1189  * merging across set boundaries.
1190  *
1191  * The free page queues must be locked.
1192  */
1193 void
1194 vm_phys_enqueue_contig(vm_page_t m, u_long npages)
1195 {
1196 	struct vm_freelist *fl;
1197 	struct vm_phys_seg *seg;
1198 	vm_page_t m_end;
1199 	vm_paddr_t diff, lo;
1200 	int order;
1201 
1202 	/*
1203 	 * Avoid unnecessary coalescing by freeing the pages in the largest
1204 	 * possible power-of-two-sized subsets.
1205 	 */
1206 	vm_domain_free_assert_locked(vm_pagequeue_domain(m));
1207 	seg = &vm_phys_segs[m->segind];
1208 	fl = (*seg->free_queues)[m->pool];
1209 	m_end = m + npages;
1210 	/* Free blocks of increasing size. */
1211 	lo = VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT;
1212 	if (m < m_end &&
1213 	    (diff = lo ^ (lo + npages - 1)) != 0) {
1214 		order = min(flsll(diff) - 1, VM_NFREEORDER - 1);
1215 		m = vm_phys_enq_range(m, roundup2(lo, 1 << order) - lo, fl, 1);
1216 	}
1217 
1218 	/* Free blocks of maximum size. */
1219 	order = VM_NFREEORDER - 1;
1220 	while (m + (1 << order) <= m_end) {
1221 		KASSERT(seg == &vm_phys_segs[m->segind],
1222 		    ("%s: page range [%p,%p) spans multiple segments",
1223 		    __func__, m_end - npages, m));
1224 		vm_freelist_add(fl, m, order, 1);
1225 		m += 1 << order;
1226 	}
1227 	/* Free blocks of diminishing size. */
1228 	vm_phys_enq_beg(m, m_end - m, fl, 1);
1229 }
1230 
1231 /*
1232  * Free a contiguous, arbitrarily sized set of physical pages.
1233  *
1234  * The free page queues must be locked.
1235  */
1236 void
1237 vm_phys_free_contig(vm_page_t m, u_long npages)
1238 {
1239 	int order_start, order_end;
1240 	vm_page_t m_start, m_end;
1241 
1242 	vm_domain_free_assert_locked(vm_pagequeue_domain(m));
1243 
1244 	m_start = m;
1245 	order_start = max_order(m_start);
1246 	if (order_start < VM_NFREEORDER - 1)
1247 		m_start += 1 << order_start;
1248 	m_end = m + npages;
1249 	order_end = max_order(m_end);
1250 	if (order_end < VM_NFREEORDER - 1)
1251 		m_end -= 1 << order_end;
1252 	/*
1253 	 * Avoid unnecessary coalescing by freeing the pages at the start and
1254 	 * end of the range last.
1255 	 */
1256 	if (m_start < m_end)
1257 		vm_phys_enqueue_contig(m_start, m_end - m_start);
1258 	if (order_start < VM_NFREEORDER - 1)
1259 		vm_phys_free_pages(m, order_start);
1260 	if (order_end < VM_NFREEORDER - 1)
1261 		vm_phys_free_pages(m_end, order_end);
1262 }
1263 
1264 /*
1265  * Identify the first address range within segment segind or greater
1266  * that matches the domain, lies within the low/high range, and has
1267  * enough pages.  Return -1 if there is none.
1268  */
1269 int
1270 vm_phys_find_range(vm_page_t bounds[], int segind, int domain,
1271     u_long npages, vm_paddr_t low, vm_paddr_t high)
1272 {
1273 	vm_paddr_t pa_end, pa_start;
1274 	struct vm_phys_seg *end_seg, *seg;
1275 
1276 	KASSERT(npages > 0, ("npages is zero"));
1277 	KASSERT(domain >= 0 && domain < vm_ndomains, ("domain out of range"));
1278 	end_seg = &vm_phys_segs[vm_phys_nsegs];
1279 	for (seg = &vm_phys_segs[segind]; seg < end_seg; seg++) {
1280 		if (seg->domain != domain)
1281 			continue;
1282 		if (seg->start >= high)
1283 			return (-1);
1284 		pa_start = MAX(low, seg->start);
1285 		pa_end = MIN(high, seg->end);
1286 		if (pa_end - pa_start < ptoa(npages))
1287 			continue;
1288 		bounds[0] = &seg->first_page[atop(pa_start - seg->start)];
1289 		bounds[1] = &seg->first_page[atop(pa_end - seg->start)];
1290 		return (seg - vm_phys_segs);
1291 	}
1292 	return (-1);
1293 }
1294 
1295 /*
1296  * Search for the given physical page "m" in the free lists.  If the search
1297  * succeeds, remove "m" from the free lists and return true.  Otherwise, return
1298  * false, indicating that "m" is not in the free lists.
1299  *
1300  * The free page queues must be locked.
1301  */
1302 bool
1303 vm_phys_unfree_page(vm_page_t m)
1304 {
1305 	struct vm_freelist *fl;
1306 	struct vm_phys_seg *seg;
1307 	vm_paddr_t pa, pa_half;
1308 	vm_page_t m_set, m_tmp;
1309 	int order;
1310 
1311 	/*
1312 	 * First, find the contiguous, power of two-sized set of free
1313 	 * physical pages containing the given physical page "m" and
1314 	 * assign it to "m_set".
1315 	 */
1316 	seg = &vm_phys_segs[m->segind];
1317 	vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
1318 	for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
1319 	    order < VM_NFREEORDER - 1; ) {
1320 		order++;
1321 		pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
1322 		if (pa >= seg->start)
1323 			m_set = &seg->first_page[atop(pa - seg->start)];
1324 		else
1325 			return (false);
1326 	}
1327 	if (m_set->order < order)
1328 		return (false);
1329 	if (m_set->order == VM_NFREEORDER)
1330 		return (false);
1331 	KASSERT(m_set->order < VM_NFREEORDER,
1332 	    ("vm_phys_unfree_page: page %p has unexpected order %d",
1333 	    m_set, m_set->order));
1334 
1335 	/*
1336 	 * Next, remove "m_set" from the free lists.  Finally, extract
1337 	 * "m" from "m_set" using an iterative algorithm: While "m_set"
1338 	 * is larger than a page, shrink "m_set" by returning the half
1339 	 * of "m_set" that does not contain "m" to the free lists.
1340 	 */
1341 	fl = (*seg->free_queues)[m_set->pool];
1342 	order = m_set->order;
1343 	vm_freelist_rem(fl, m_set, order);
1344 	while (order > 0) {
1345 		order--;
1346 		pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
1347 		if (m->phys_addr < pa_half)
1348 			m_tmp = &seg->first_page[atop(pa_half - seg->start)];
1349 		else {
1350 			m_tmp = m_set;
1351 			m_set = &seg->first_page[atop(pa_half - seg->start)];
1352 		}
1353 		vm_freelist_add(fl, m_tmp, order, 0);
1354 	}
1355 	KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
1356 	return (true);
1357 }
1358 
1359 /*
1360  * Find a run of contiguous physical pages from the specified page list.
1361  */
1362 static vm_page_t
1363 vm_phys_find_freelist_contig(struct vm_freelist *fl, int oind, u_long npages,
1364     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1365 {
1366 	struct vm_phys_seg *seg;
1367 	vm_paddr_t frag, lbound, pa, page_size, pa_end, pa_pre, size;
1368 	vm_page_t m, m_listed, m_ret;
1369 	int order;
1370 
1371 	KASSERT(npages > 0, ("npages is 0"));
1372 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1373 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1374 	/* Search for a run satisfying the specified conditions. */
1375 	page_size = PAGE_SIZE;
1376 	size = npages << PAGE_SHIFT;
1377 	frag = (npages & ~(~0UL << oind)) << PAGE_SHIFT;
1378 	TAILQ_FOREACH(m_listed, &fl[oind].pl, listq) {
1379 		/*
1380 		 * Determine if the address range starting at pa is
1381 		 * too low.
1382 		 */
1383 		pa = VM_PAGE_TO_PHYS(m_listed);
1384 		if (pa < low)
1385 			continue;
1386 
1387 		/*
1388 		 * If this is not the first free oind-block in this range, bail
1389 		 * out. We have seen the first free block already, or will see
1390 		 * it before failing to find an appropriate range.
1391 		 */
1392 		seg = &vm_phys_segs[m_listed->segind];
1393 		lbound = low > seg->start ? low : seg->start;
1394 		pa_pre = pa - (page_size << oind);
1395 		m = &seg->first_page[atop(pa_pre - seg->start)];
1396 		if (pa != 0 && pa_pre >= lbound && m->order == oind)
1397 			continue;
1398 
1399 		if (!vm_addr_align_ok(pa, alignment))
1400 			/* Advance to satisfy alignment condition. */
1401 			pa = roundup2(pa, alignment);
1402 		else if (frag != 0 && lbound + frag <= pa) {
1403 			/*
1404 			 * Back up to the first aligned free block in this
1405 			 * range, without moving below lbound.
1406 			 */
1407 			pa_end = pa;
1408 			for (order = oind - 1; order >= 0; order--) {
1409 				pa_pre = pa_end - (page_size << order);
1410 				if (!vm_addr_align_ok(pa_pre, alignment))
1411 					break;
1412 				m = &seg->first_page[atop(pa_pre - seg->start)];
1413 				if (pa_pre >= lbound && m->order == order)
1414 					pa_end = pa_pre;
1415 			}
1416 			/*
1417 			 * If the extra small blocks are enough to complete the
1418 			 * fragment, use them.  Otherwise, look to allocate the
1419 			 * fragment at the other end.
1420 			 */
1421 			if (pa_end + frag <= pa)
1422 				pa = pa_end;
1423 		}
1424 
1425 		/* Advance as necessary to satisfy boundary conditions. */
1426 		if (!vm_addr_bound_ok(pa, size, boundary))
1427 			pa = roundup2(pa + 1, boundary);
1428 		pa_end = pa + size;
1429 
1430 		/*
1431 		 * Determine if the address range is valid (without overflow in
1432 		 * pa_end calculation), and fits within the segment.
1433 		 */
1434 		if (pa_end < pa || seg->end < pa_end)
1435 			continue;
1436 
1437 		m_ret = &seg->first_page[atop(pa - seg->start)];
1438 
1439 		/*
1440 		 * Determine whether there are enough free oind-blocks here to
1441 		 * satisfy the allocation request.
1442 		 */
1443 		pa = VM_PAGE_TO_PHYS(m_listed);
1444 		do {
1445 			pa += page_size << oind;
1446 			if (pa >= pa_end)
1447 				return (m_ret);
1448 			m = &seg->first_page[atop(pa - seg->start)];
1449 		} while (oind == m->order);
1450 
1451 		/*
1452 		 * Determine if an additional series of free blocks of
1453 		 * diminishing size can help to satisfy the allocation request.
1454 		 */
1455 		while (m->order < oind &&
1456 		    pa + 2 * (page_size << m->order) > pa_end) {
1457 			pa += page_size << m->order;
1458 			if (pa >= pa_end)
1459 				return (m_ret);
1460 			m = &seg->first_page[atop(pa - seg->start)];
1461 		}
1462 	}
1463 	return (NULL);
1464 }
1465 
1466 /*
1467  * Find a run of contiguous physical pages from the specified free list
1468  * table.
1469  */
1470 static vm_page_t
1471 vm_phys_find_queues_contig(
1472     struct vm_freelist (*queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX],
1473     u_long npages, vm_paddr_t low, vm_paddr_t high,
1474     u_long alignment, vm_paddr_t boundary)
1475 {
1476 	struct vm_freelist *fl;
1477 	vm_page_t m_ret;
1478 	vm_paddr_t pa, pa_end, size;
1479 	int oind, order, pind;
1480 
1481 	KASSERT(npages > 0, ("npages is 0"));
1482 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1483 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1484 	/* Compute the queue that is the best fit for npages. */
1485 	order = flsl(npages - 1);
1486 	/* Search for a large enough free block. */
1487 	size = npages << PAGE_SHIFT;
1488 	for (oind = order; oind < VM_NFREEORDER; oind++) {
1489 		for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1490 			fl = (*queues)[pind];
1491 			TAILQ_FOREACH(m_ret, &fl[oind].pl, listq) {
1492 				/*
1493 				 * Determine if the address range starting at pa
1494 				 * is within the given range, satisfies the
1495 				 * given alignment, and does not cross the given
1496 				 * boundary.
1497 				 */
1498 				pa = VM_PAGE_TO_PHYS(m_ret);
1499 				pa_end = pa + size;
1500 				if (low <= pa && pa_end <= high &&
1501 				    vm_addr_ok(pa, size, alignment, boundary))
1502 					return (m_ret);
1503 			}
1504 		}
1505 	}
1506 	if (order < VM_NFREEORDER)
1507 		return (NULL);
1508 	/* Search for a long-enough sequence of small blocks. */
1509 	oind = VM_NFREEORDER - 1;
1510 	for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1511 		fl = (*queues)[pind];
1512 		m_ret = vm_phys_find_freelist_contig(fl, oind, npages,
1513 		    low, high, alignment, boundary);
1514 		if (m_ret != NULL)
1515 			return (m_ret);
1516 	}
1517 	return (NULL);
1518 }
1519 
1520 /*
1521  * Allocate a contiguous set of physical pages of the given size
1522  * "npages" from the free lists.  All of the physical pages must be at
1523  * or above the given physical address "low" and below the given
1524  * physical address "high".  The given value "alignment" determines the
1525  * alignment of the first physical page in the set.  If the given value
1526  * "boundary" is non-zero, then the set of physical pages cannot cross
1527  * any physical address boundary that is a multiple of that value.  Both
1528  * "alignment" and "boundary" must be a power of two.
1529  */
1530 vm_page_t
1531 vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
1532     u_long alignment, vm_paddr_t boundary)
1533 {
1534 	vm_paddr_t pa_end, pa_start;
1535 	struct vm_freelist *fl;
1536 	vm_page_t m, m_run;
1537 	struct vm_phys_seg *seg;
1538 	struct vm_freelist (*queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX];
1539 	int oind, segind;
1540 
1541 	KASSERT(npages > 0, ("npages is 0"));
1542 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1543 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1544 	vm_domain_free_assert_locked(VM_DOMAIN(domain));
1545 	if (low >= high)
1546 		return (NULL);
1547 	queues = NULL;
1548 	m_run = NULL;
1549 	for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
1550 		seg = &vm_phys_segs[segind];
1551 		if (seg->start >= high || seg->domain != domain)
1552 			continue;
1553 		if (low >= seg->end)
1554 			break;
1555 		if (low <= seg->start)
1556 			pa_start = seg->start;
1557 		else
1558 			pa_start = low;
1559 		if (high < seg->end)
1560 			pa_end = high;
1561 		else
1562 			pa_end = seg->end;
1563 		if (pa_end - pa_start < ptoa(npages))
1564 			continue;
1565 		/*
1566 		 * If a previous segment led to a search using
1567 		 * the same free lists as would this segment, then
1568 		 * we've actually already searched within this
1569 		 * too.  So skip it.
1570 		 */
1571 		if (seg->free_queues == queues)
1572 			continue;
1573 		queues = seg->free_queues;
1574 		m_run = vm_phys_find_queues_contig(queues, npages,
1575 		    low, high, alignment, boundary);
1576 		if (m_run != NULL)
1577 			break;
1578 	}
1579 	if (m_run == NULL)
1580 		return (NULL);
1581 
1582 	/* Allocate pages from the page-range found. */
1583 	for (m = m_run; m < &m_run[npages]; m = &m[1 << oind]) {
1584 		fl = (*queues)[m->pool];
1585 		oind = m->order;
1586 		vm_freelist_rem(fl, m, oind);
1587 		if (m->pool != VM_FREEPOOL_DEFAULT)
1588 			vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind);
1589 	}
1590 	/* Return excess pages to the free lists. */
1591 	fl = (*queues)[VM_FREEPOOL_DEFAULT];
1592 	vm_phys_enq_range(&m_run[npages], m - &m_run[npages], fl, 0);
1593 	return (m_run);
1594 }
1595 
1596 /*
1597  * Return the index of the first unused slot which may be the terminating
1598  * entry.
1599  */
1600 static int
1601 vm_phys_avail_count(void)
1602 {
1603 	int i;
1604 
1605 	for (i = 0; phys_avail[i + 1]; i += 2)
1606 		continue;
1607 	if (i > PHYS_AVAIL_ENTRIES)
1608 		panic("Improperly terminated phys_avail %d entries", i);
1609 
1610 	return (i);
1611 }
1612 
1613 /*
1614  * Assert that a phys_avail entry is valid.
1615  */
1616 static void
1617 vm_phys_avail_check(int i)
1618 {
1619 	if (phys_avail[i] & PAGE_MASK)
1620 		panic("Unaligned phys_avail[%d]: %#jx", i,
1621 		    (intmax_t)phys_avail[i]);
1622 	if (phys_avail[i+1] & PAGE_MASK)
1623 		panic("Unaligned phys_avail[%d + 1]: %#jx", i,
1624 		    (intmax_t)phys_avail[i]);
1625 	if (phys_avail[i + 1] < phys_avail[i])
1626 		panic("phys_avail[%d] start %#jx < end %#jx", i,
1627 		    (intmax_t)phys_avail[i], (intmax_t)phys_avail[i+1]);
1628 }
1629 
1630 /*
1631  * Return the index of an overlapping phys_avail entry or -1.
1632  */
1633 #ifdef NUMA
1634 static int
1635 vm_phys_avail_find(vm_paddr_t pa)
1636 {
1637 	int i;
1638 
1639 	for (i = 0; phys_avail[i + 1]; i += 2)
1640 		if (phys_avail[i] <= pa && phys_avail[i + 1] > pa)
1641 			return (i);
1642 	return (-1);
1643 }
1644 #endif
1645 
1646 /*
1647  * Return the index of the largest entry.
1648  */
1649 int
1650 vm_phys_avail_largest(void)
1651 {
1652 	vm_paddr_t sz, largesz;
1653 	int largest;
1654 	int i;
1655 
1656 	largest = 0;
1657 	largesz = 0;
1658 	for (i = 0; phys_avail[i + 1]; i += 2) {
1659 		sz = vm_phys_avail_size(i);
1660 		if (sz > largesz) {
1661 			largesz = sz;
1662 			largest = i;
1663 		}
1664 	}
1665 
1666 	return (largest);
1667 }
1668 
1669 vm_paddr_t
1670 vm_phys_avail_size(int i)
1671 {
1672 
1673 	return (phys_avail[i + 1] - phys_avail[i]);
1674 }
1675 
1676 /*
1677  * Split an entry at the address 'pa'.  Return zero on success or errno.
1678  */
1679 static int
1680 vm_phys_avail_split(vm_paddr_t pa, int i)
1681 {
1682 	int cnt;
1683 
1684 	vm_phys_avail_check(i);
1685 	if (pa <= phys_avail[i] || pa >= phys_avail[i + 1])
1686 		panic("vm_phys_avail_split: invalid address");
1687 	cnt = vm_phys_avail_count();
1688 	if (cnt >= PHYS_AVAIL_ENTRIES)
1689 		return (ENOSPC);
1690 	memmove(&phys_avail[i + 2], &phys_avail[i],
1691 	    (cnt - i) * sizeof(phys_avail[0]));
1692 	phys_avail[i + 1] = pa;
1693 	phys_avail[i + 2] = pa;
1694 	vm_phys_avail_check(i);
1695 	vm_phys_avail_check(i+2);
1696 
1697 	return (0);
1698 }
1699 
1700 /*
1701  * Check if a given physical address can be included as part of a crash dump.
1702  */
1703 bool
1704 vm_phys_is_dumpable(vm_paddr_t pa)
1705 {
1706 	vm_page_t m;
1707 	int i;
1708 
1709 	if ((m = vm_phys_paddr_to_vm_page(pa)) != NULL)
1710 		return ((m->flags & PG_NODUMP) == 0);
1711 
1712 	for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
1713 		if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
1714 			return (true);
1715 	}
1716 	return (false);
1717 }
1718 
1719 void
1720 vm_phys_early_add_seg(vm_paddr_t start, vm_paddr_t end)
1721 {
1722 	struct vm_phys_seg *seg;
1723 
1724 	if (vm_phys_early_nsegs == -1)
1725 		panic("%s: called after initialization", __func__);
1726 	if (vm_phys_early_nsegs == nitems(vm_phys_early_segs))
1727 		panic("%s: ran out of early segments", __func__);
1728 
1729 	seg = &vm_phys_early_segs[vm_phys_early_nsegs++];
1730 	seg->start = start;
1731 	seg->end = end;
1732 }
1733 
1734 /*
1735  * This routine allocates NUMA node specific memory before the page
1736  * allocator is bootstrapped.
1737  */
1738 vm_paddr_t
1739 vm_phys_early_alloc(int domain, size_t alloc_size)
1740 {
1741 #ifdef NUMA
1742 	int mem_index;
1743 #endif
1744 	int i, biggestone;
1745 	vm_paddr_t pa, mem_start, mem_end, size, biggestsize, align;
1746 
1747 	KASSERT(domain == -1 || (domain >= 0 && domain < vm_ndomains),
1748 	    ("%s: invalid domain index %d", __func__, domain));
1749 
1750 	/*
1751 	 * Search the mem_affinity array for the biggest address
1752 	 * range in the desired domain.  This is used to constrain
1753 	 * the phys_avail selection below.
1754 	 */
1755 	biggestsize = 0;
1756 	mem_start = 0;
1757 	mem_end = -1;
1758 #ifdef NUMA
1759 	mem_index = 0;
1760 	if (mem_affinity != NULL) {
1761 		for (i = 0;; i++) {
1762 			size = mem_affinity[i].end - mem_affinity[i].start;
1763 			if (size == 0)
1764 				break;
1765 			if (domain != -1 && mem_affinity[i].domain != domain)
1766 				continue;
1767 			if (size > biggestsize) {
1768 				mem_index = i;
1769 				biggestsize = size;
1770 			}
1771 		}
1772 		mem_start = mem_affinity[mem_index].start;
1773 		mem_end = mem_affinity[mem_index].end;
1774 	}
1775 #endif
1776 
1777 	/*
1778 	 * Now find biggest physical segment in within the desired
1779 	 * numa domain.
1780 	 */
1781 	biggestsize = 0;
1782 	biggestone = 0;
1783 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1784 		/* skip regions that are out of range */
1785 		if (phys_avail[i+1] - alloc_size < mem_start ||
1786 		    phys_avail[i+1] > mem_end)
1787 			continue;
1788 		size = vm_phys_avail_size(i);
1789 		if (size > biggestsize) {
1790 			biggestone = i;
1791 			biggestsize = size;
1792 		}
1793 	}
1794 	alloc_size = round_page(alloc_size);
1795 
1796 	/*
1797 	 * Grab single pages from the front to reduce fragmentation.
1798 	 */
1799 	if (alloc_size == PAGE_SIZE) {
1800 		pa = phys_avail[biggestone];
1801 		phys_avail[biggestone] += PAGE_SIZE;
1802 		vm_phys_avail_check(biggestone);
1803 		return (pa);
1804 	}
1805 
1806 	/*
1807 	 * Naturally align large allocations.
1808 	 */
1809 	align = phys_avail[biggestone + 1] & (alloc_size - 1);
1810 	if (alloc_size + align > biggestsize)
1811 		panic("cannot find a large enough size\n");
1812 	if (align != 0 &&
1813 	    vm_phys_avail_split(phys_avail[biggestone + 1] - align,
1814 	    biggestone) != 0)
1815 		/* Wasting memory. */
1816 		phys_avail[biggestone + 1] -= align;
1817 
1818 	phys_avail[biggestone + 1] -= alloc_size;
1819 	vm_phys_avail_check(biggestone);
1820 	pa = phys_avail[biggestone + 1];
1821 	return (pa);
1822 }
1823 
1824 void
1825 vm_phys_early_startup(void)
1826 {
1827 	struct vm_phys_seg *seg;
1828 	int i;
1829 
1830 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1831 		phys_avail[i] = round_page(phys_avail[i]);
1832 		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
1833 	}
1834 
1835 	for (i = 0; i < vm_phys_early_nsegs; i++) {
1836 		seg = &vm_phys_early_segs[i];
1837 		vm_phys_add_seg(seg->start, seg->end);
1838 	}
1839 	vm_phys_early_nsegs = -1;
1840 
1841 #ifdef NUMA
1842 	/* Force phys_avail to be split by domain. */
1843 	if (mem_affinity != NULL) {
1844 		int idx;
1845 
1846 		for (i = 0; mem_affinity[i].end != 0; i++) {
1847 			idx = vm_phys_avail_find(mem_affinity[i].start);
1848 			if (idx != -1 &&
1849 			    phys_avail[idx] != mem_affinity[i].start)
1850 				vm_phys_avail_split(mem_affinity[i].start, idx);
1851 			idx = vm_phys_avail_find(mem_affinity[i].end);
1852 			if (idx != -1 &&
1853 			    phys_avail[idx] != mem_affinity[i].end)
1854 				vm_phys_avail_split(mem_affinity[i].end, idx);
1855 		}
1856 	}
1857 #endif
1858 }
1859 
1860 #ifdef DDB
1861 /*
1862  * Show the number of physical pages in each of the free lists.
1863  */
1864 DB_SHOW_COMMAND_FLAGS(freepages, db_show_freepages, DB_CMD_MEMSAFE)
1865 {
1866 	struct vm_freelist *fl;
1867 	int flind, oind, pind, dom;
1868 
1869 	for (dom = 0; dom < vm_ndomains; dom++) {
1870 		db_printf("DOMAIN: %d\n", dom);
1871 		for (flind = 0; flind < vm_nfreelists; flind++) {
1872 			db_printf("FREE LIST %d:\n"
1873 			    "\n  ORDER (SIZE)  |  NUMBER"
1874 			    "\n              ", flind);
1875 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
1876 				db_printf("  |  POOL %d", pind);
1877 			db_printf("\n--            ");
1878 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
1879 				db_printf("-- --      ");
1880 			db_printf("--\n");
1881 			for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
1882 				db_printf("  %2.2d (%6.6dK)", oind,
1883 				    1 << (PAGE_SHIFT - 10 + oind));
1884 				for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1885 				fl = vm_phys_free_queues[dom][flind][pind];
1886 					db_printf("  |  %6.6d", fl[oind].lcnt);
1887 				}
1888 				db_printf("\n");
1889 			}
1890 			db_printf("\n");
1891 		}
1892 		db_printf("\n");
1893 	}
1894 }
1895 #endif
1896