xref: /freebsd/sys/vm/vm_phys.c (revision 69cbb18746b69cbcdf79f1728d0435a1c86fff58)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2002-2006 Rice University
5  * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
6  * All rights reserved.
7  *
8  * This software was developed for the FreeBSD Project by Alan L. Cox,
9  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
24  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
30  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  *	Physical memory system implementation
36  *
37  * Any external functions defined by this module are only to be used by the
38  * virtual memory system.
39  */
40 
41 #include <sys/cdefs.h>
42 #include "opt_ddb.h"
43 #include "opt_vm.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/domainset.h>
48 #include <sys/lock.h>
49 #include <sys/kernel.h>
50 #include <sys/malloc.h>
51 #include <sys/mutex.h>
52 #include <sys/proc.h>
53 #include <sys/queue.h>
54 #include <sys/rwlock.h>
55 #include <sys/sbuf.h>
56 #include <sys/sysctl.h>
57 #include <sys/tree.h>
58 #include <sys/vmmeter.h>
59 
60 #include <ddb/ddb.h>
61 
62 #include <vm/vm.h>
63 #include <vm/vm_extern.h>
64 #include <vm/vm_param.h>
65 #include <vm/vm_kern.h>
66 #include <vm/vm_object.h>
67 #include <vm/vm_page.h>
68 #include <vm/vm_phys.h>
69 #include <vm/vm_pagequeue.h>
70 
71 _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX,
72     "Too many physsegs.");
73 _Static_assert(sizeof(long long) >= sizeof(vm_paddr_t),
74     "vm_paddr_t too big for ffsll, flsll.");
75 
76 #ifdef NUMA
77 struct mem_affinity __read_mostly *mem_affinity;
78 int __read_mostly *mem_locality;
79 
80 static int numa_disabled;
81 static SYSCTL_NODE(_vm, OID_AUTO, numa, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
82     "NUMA options");
83 SYSCTL_INT(_vm_numa, OID_AUTO, disabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
84     &numa_disabled, 0, "NUMA-awareness in the allocators is disabled");
85 #endif
86 
87 int __read_mostly vm_ndomains = 1;
88 domainset_t __read_mostly all_domains = DOMAINSET_T_INITIALIZER(0x1);
89 
90 struct vm_phys_seg __read_mostly vm_phys_segs[VM_PHYSSEG_MAX];
91 int __read_mostly vm_phys_nsegs;
92 static struct vm_phys_seg vm_phys_early_segs[8];
93 static int vm_phys_early_nsegs;
94 
95 struct vm_phys_fictitious_seg;
96 static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *,
97     struct vm_phys_fictitious_seg *);
98 
99 RB_HEAD(fict_tree, vm_phys_fictitious_seg) vm_phys_fictitious_tree =
100     RB_INITIALIZER(&vm_phys_fictitious_tree);
101 
102 struct vm_phys_fictitious_seg {
103 	RB_ENTRY(vm_phys_fictitious_seg) node;
104 	/* Memory region data */
105 	vm_paddr_t	start;
106 	vm_paddr_t	end;
107 	vm_page_t	first_page;
108 };
109 
110 RB_GENERATE_STATIC(fict_tree, vm_phys_fictitious_seg, node,
111     vm_phys_fictitious_cmp);
112 
113 static struct rwlock_padalign vm_phys_fictitious_reg_lock;
114 MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages");
115 
116 static struct vm_freelist __aligned(CACHE_LINE_SIZE)
117     vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL]
118     [VM_NFREEORDER_MAX];
119 
120 static int __read_mostly vm_nfreelists;
121 
122 /*
123  * These "avail lists" are globals used to communicate boot-time physical
124  * memory layout to other parts of the kernel.  Each physically contiguous
125  * region of memory is defined by a start address at an even index and an
126  * end address at the following odd index.  Each list is terminated by a
127  * pair of zero entries.
128  *
129  * dump_avail tells the dump code what regions to include in a crash dump, and
130  * phys_avail is all of the remaining physical memory that is available for
131  * the vm system.
132  *
133  * Initially dump_avail and phys_avail are identical.  Boot time memory
134  * allocations remove extents from phys_avail that may still be included
135  * in dumps.
136  */
137 vm_paddr_t phys_avail[PHYS_AVAIL_COUNT];
138 vm_paddr_t dump_avail[PHYS_AVAIL_COUNT];
139 
140 /*
141  * Provides the mapping from VM_FREELIST_* to free list indices (flind).
142  */
143 static int __read_mostly vm_freelist_to_flind[VM_NFREELIST];
144 
145 CTASSERT(VM_FREELIST_DEFAULT == 0);
146 
147 #ifdef VM_FREELIST_DMA32
148 #define	VM_DMA32_BOUNDARY	((vm_paddr_t)1 << 32)
149 #endif
150 
151 /*
152  * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about
153  * the ordering of the free list boundaries.
154  */
155 #if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY)
156 CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY);
157 #endif
158 
159 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
160 SYSCTL_OID(_vm, OID_AUTO, phys_free,
161     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
162     sysctl_vm_phys_free, "A",
163     "Phys Free Info");
164 
165 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
166 SYSCTL_OID(_vm, OID_AUTO, phys_segs,
167     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
168     sysctl_vm_phys_segs, "A",
169     "Phys Seg Info");
170 
171 #ifdef NUMA
172 static int sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS);
173 SYSCTL_OID(_vm, OID_AUTO, phys_locality,
174     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
175     sysctl_vm_phys_locality, "A",
176     "Phys Locality Info");
177 #endif
178 
179 SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD,
180     &vm_ndomains, 0, "Number of physical memory domains available.");
181 
182 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain);
183 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end);
184 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
185     int order, int tail);
186 
187 /*
188  * Red-black tree helpers for vm fictitious range management.
189  */
190 static inline int
191 vm_phys_fictitious_in_range(struct vm_phys_fictitious_seg *p,
192     struct vm_phys_fictitious_seg *range)
193 {
194 
195 	KASSERT(range->start != 0 && range->end != 0,
196 	    ("Invalid range passed on search for vm_fictitious page"));
197 	if (p->start >= range->end)
198 		return (1);
199 	if (p->start < range->start)
200 		return (-1);
201 
202 	return (0);
203 }
204 
205 static int
206 vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *p1,
207     struct vm_phys_fictitious_seg *p2)
208 {
209 
210 	/* Check if this is a search for a page */
211 	if (p1->end == 0)
212 		return (vm_phys_fictitious_in_range(p1, p2));
213 
214 	KASSERT(p2->end != 0,
215     ("Invalid range passed as second parameter to vm fictitious comparison"));
216 
217 	/* Searching to add a new range */
218 	if (p1->end <= p2->start)
219 		return (-1);
220 	if (p1->start >= p2->end)
221 		return (1);
222 
223 	panic("Trying to add overlapping vm fictitious ranges:\n"
224 	    "[%#jx:%#jx] and [%#jx:%#jx]", (uintmax_t)p1->start,
225 	    (uintmax_t)p1->end, (uintmax_t)p2->start, (uintmax_t)p2->end);
226 }
227 
228 int
229 vm_phys_domain_match(int prefer __numa_used, vm_paddr_t low __numa_used,
230     vm_paddr_t high __numa_used)
231 {
232 #ifdef NUMA
233 	domainset_t mask;
234 	int i;
235 
236 	if (vm_ndomains == 1 || mem_affinity == NULL)
237 		return (0);
238 
239 	DOMAINSET_ZERO(&mask);
240 	/*
241 	 * Check for any memory that overlaps low, high.
242 	 */
243 	for (i = 0; mem_affinity[i].end != 0; i++)
244 		if (mem_affinity[i].start <= high &&
245 		    mem_affinity[i].end >= low)
246 			DOMAINSET_SET(mem_affinity[i].domain, &mask);
247 	if (prefer != -1 && DOMAINSET_ISSET(prefer, &mask))
248 		return (prefer);
249 	if (DOMAINSET_EMPTY(&mask))
250 		panic("vm_phys_domain_match:  Impossible constraint");
251 	return (DOMAINSET_FFS(&mask) - 1);
252 #else
253 	return (0);
254 #endif
255 }
256 
257 /*
258  * Outputs the state of the physical memory allocator, specifically,
259  * the amount of physical memory in each free list.
260  */
261 static int
262 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
263 {
264 	struct sbuf sbuf;
265 	struct vm_freelist *fl;
266 	int dom, error, flind, oind, pind;
267 
268 	error = sysctl_wire_old_buffer(req, 0);
269 	if (error != 0)
270 		return (error);
271 	sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req);
272 	for (dom = 0; dom < vm_ndomains; dom++) {
273 		sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom);
274 		for (flind = 0; flind < vm_nfreelists; flind++) {
275 			sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
276 			    "\n  ORDER (SIZE)  |  NUMBER"
277 			    "\n              ", flind);
278 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
279 				sbuf_printf(&sbuf, "  |  POOL %d", pind);
280 			sbuf_printf(&sbuf, "\n--            ");
281 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
282 				sbuf_printf(&sbuf, "-- --      ");
283 			sbuf_printf(&sbuf, "--\n");
284 			for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
285 				sbuf_printf(&sbuf, "  %2d (%6dK)", oind,
286 				    1 << (PAGE_SHIFT - 10 + oind));
287 				for (pind = 0; pind < VM_NFREEPOOL; pind++) {
288 				fl = vm_phys_free_queues[dom][flind][pind];
289 					sbuf_printf(&sbuf, "  |  %6d",
290 					    fl[oind].lcnt);
291 				}
292 				sbuf_printf(&sbuf, "\n");
293 			}
294 		}
295 	}
296 	error = sbuf_finish(&sbuf);
297 	sbuf_delete(&sbuf);
298 	return (error);
299 }
300 
301 /*
302  * Outputs the set of physical memory segments.
303  */
304 static int
305 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
306 {
307 	struct sbuf sbuf;
308 	struct vm_phys_seg *seg;
309 	int error, segind;
310 
311 	error = sysctl_wire_old_buffer(req, 0);
312 	if (error != 0)
313 		return (error);
314 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
315 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
316 		sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
317 		seg = &vm_phys_segs[segind];
318 		sbuf_printf(&sbuf, "start:     %#jx\n",
319 		    (uintmax_t)seg->start);
320 		sbuf_printf(&sbuf, "end:       %#jx\n",
321 		    (uintmax_t)seg->end);
322 		sbuf_printf(&sbuf, "domain:    %d\n", seg->domain);
323 		sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
324 	}
325 	error = sbuf_finish(&sbuf);
326 	sbuf_delete(&sbuf);
327 	return (error);
328 }
329 
330 /*
331  * Return affinity, or -1 if there's no affinity information.
332  */
333 int
334 vm_phys_mem_affinity(int f __numa_used, int t __numa_used)
335 {
336 
337 #ifdef NUMA
338 	if (mem_locality == NULL)
339 		return (-1);
340 	if (f >= vm_ndomains || t >= vm_ndomains)
341 		return (-1);
342 	return (mem_locality[f * vm_ndomains + t]);
343 #else
344 	return (-1);
345 #endif
346 }
347 
348 #ifdef NUMA
349 /*
350  * Outputs the VM locality table.
351  */
352 static int
353 sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS)
354 {
355 	struct sbuf sbuf;
356 	int error, i, j;
357 
358 	error = sysctl_wire_old_buffer(req, 0);
359 	if (error != 0)
360 		return (error);
361 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
362 
363 	sbuf_printf(&sbuf, "\n");
364 
365 	for (i = 0; i < vm_ndomains; i++) {
366 		sbuf_printf(&sbuf, "%d: ", i);
367 		for (j = 0; j < vm_ndomains; j++) {
368 			sbuf_printf(&sbuf, "%d ", vm_phys_mem_affinity(i, j));
369 		}
370 		sbuf_printf(&sbuf, "\n");
371 	}
372 	error = sbuf_finish(&sbuf);
373 	sbuf_delete(&sbuf);
374 	return (error);
375 }
376 #endif
377 
378 static void
379 vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail)
380 {
381 
382 	m->order = order;
383 	if (tail)
384 		TAILQ_INSERT_TAIL(&fl[order].pl, m, listq);
385 	else
386 		TAILQ_INSERT_HEAD(&fl[order].pl, m, listq);
387 	fl[order].lcnt++;
388 }
389 
390 static void
391 vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order)
392 {
393 
394 	TAILQ_REMOVE(&fl[order].pl, m, listq);
395 	fl[order].lcnt--;
396 	m->order = VM_NFREEORDER;
397 }
398 
399 /*
400  * Create a physical memory segment.
401  */
402 static void
403 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain)
404 {
405 	struct vm_phys_seg *seg;
406 
407 	KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
408 	    ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
409 	KASSERT(domain >= 0 && domain < vm_ndomains,
410 	    ("vm_phys_create_seg: invalid domain provided"));
411 	seg = &vm_phys_segs[vm_phys_nsegs++];
412 	while (seg > vm_phys_segs && (seg - 1)->start >= end) {
413 		*seg = *(seg - 1);
414 		seg--;
415 	}
416 	seg->start = start;
417 	seg->end = end;
418 	seg->domain = domain;
419 }
420 
421 static void
422 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end)
423 {
424 #ifdef NUMA
425 	int i;
426 
427 	if (mem_affinity == NULL) {
428 		_vm_phys_create_seg(start, end, 0);
429 		return;
430 	}
431 
432 	for (i = 0;; i++) {
433 		if (mem_affinity[i].end == 0)
434 			panic("Reached end of affinity info");
435 		if (mem_affinity[i].end <= start)
436 			continue;
437 		if (mem_affinity[i].start > start)
438 			panic("No affinity info for start %jx",
439 			    (uintmax_t)start);
440 		if (mem_affinity[i].end >= end) {
441 			_vm_phys_create_seg(start, end,
442 			    mem_affinity[i].domain);
443 			break;
444 		}
445 		_vm_phys_create_seg(start, mem_affinity[i].end,
446 		    mem_affinity[i].domain);
447 		start = mem_affinity[i].end;
448 	}
449 #else
450 	_vm_phys_create_seg(start, end, 0);
451 #endif
452 }
453 
454 /*
455  * Add a physical memory segment.
456  */
457 void
458 vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end)
459 {
460 	vm_paddr_t paddr;
461 
462 	KASSERT((start & PAGE_MASK) == 0,
463 	    ("vm_phys_define_seg: start is not page aligned"));
464 	KASSERT((end & PAGE_MASK) == 0,
465 	    ("vm_phys_define_seg: end is not page aligned"));
466 
467 	/*
468 	 * Split the physical memory segment if it spans two or more free
469 	 * list boundaries.
470 	 */
471 	paddr = start;
472 #ifdef	VM_FREELIST_LOWMEM
473 	if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) {
474 		vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY);
475 		paddr = VM_LOWMEM_BOUNDARY;
476 	}
477 #endif
478 #ifdef	VM_FREELIST_DMA32
479 	if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) {
480 		vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY);
481 		paddr = VM_DMA32_BOUNDARY;
482 	}
483 #endif
484 	vm_phys_create_seg(paddr, end);
485 }
486 
487 /*
488  * Initialize the physical memory allocator.
489  *
490  * Requires that vm_page_array is initialized!
491  */
492 void
493 vm_phys_init(void)
494 {
495 	struct vm_freelist *fl;
496 	struct vm_phys_seg *end_seg, *prev_seg, *seg, *tmp_seg;
497 #if defined(VM_DMA32_NPAGES_THRESHOLD) || defined(VM_PHYSSEG_SPARSE)
498 	u_long npages;
499 #endif
500 	int dom, flind, freelist, oind, pind, segind;
501 
502 	/*
503 	 * Compute the number of free lists, and generate the mapping from the
504 	 * manifest constants VM_FREELIST_* to the free list indices.
505 	 *
506 	 * Initially, the entries of vm_freelist_to_flind[] are set to either
507 	 * 0 or 1 to indicate which free lists should be created.
508 	 */
509 #ifdef	VM_DMA32_NPAGES_THRESHOLD
510 	npages = 0;
511 #endif
512 	for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
513 		seg = &vm_phys_segs[segind];
514 #ifdef	VM_FREELIST_LOWMEM
515 		if (seg->end <= VM_LOWMEM_BOUNDARY)
516 			vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1;
517 		else
518 #endif
519 #ifdef	VM_FREELIST_DMA32
520 		if (
521 #ifdef	VM_DMA32_NPAGES_THRESHOLD
522 		    /*
523 		     * Create the DMA32 free list only if the amount of
524 		     * physical memory above physical address 4G exceeds the
525 		     * given threshold.
526 		     */
527 		    npages > VM_DMA32_NPAGES_THRESHOLD &&
528 #endif
529 		    seg->end <= VM_DMA32_BOUNDARY)
530 			vm_freelist_to_flind[VM_FREELIST_DMA32] = 1;
531 		else
532 #endif
533 		{
534 #ifdef	VM_DMA32_NPAGES_THRESHOLD
535 			npages += atop(seg->end - seg->start);
536 #endif
537 			vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1;
538 		}
539 	}
540 	/* Change each entry into a running total of the free lists. */
541 	for (freelist = 1; freelist < VM_NFREELIST; freelist++) {
542 		vm_freelist_to_flind[freelist] +=
543 		    vm_freelist_to_flind[freelist - 1];
544 	}
545 	vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1];
546 	KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists"));
547 	/* Change each entry into a free list index. */
548 	for (freelist = 0; freelist < VM_NFREELIST; freelist++)
549 		vm_freelist_to_flind[freelist]--;
550 
551 	/*
552 	 * Initialize the first_page and free_queues fields of each physical
553 	 * memory segment.
554 	 */
555 #ifdef VM_PHYSSEG_SPARSE
556 	npages = 0;
557 #endif
558 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
559 		seg = &vm_phys_segs[segind];
560 #ifdef VM_PHYSSEG_SPARSE
561 		seg->first_page = &vm_page_array[npages];
562 		npages += atop(seg->end - seg->start);
563 #else
564 		seg->first_page = PHYS_TO_VM_PAGE(seg->start);
565 #endif
566 #ifdef	VM_FREELIST_LOWMEM
567 		if (seg->end <= VM_LOWMEM_BOUNDARY) {
568 			flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM];
569 			KASSERT(flind >= 0,
570 			    ("vm_phys_init: LOWMEM flind < 0"));
571 		} else
572 #endif
573 #ifdef	VM_FREELIST_DMA32
574 		if (seg->end <= VM_DMA32_BOUNDARY) {
575 			flind = vm_freelist_to_flind[VM_FREELIST_DMA32];
576 			KASSERT(flind >= 0,
577 			    ("vm_phys_init: DMA32 flind < 0"));
578 		} else
579 #endif
580 		{
581 			flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT];
582 			KASSERT(flind >= 0,
583 			    ("vm_phys_init: DEFAULT flind < 0"));
584 		}
585 		seg->free_queues = &vm_phys_free_queues[seg->domain][flind];
586 	}
587 
588 	/*
589 	 * Coalesce physical memory segments that are contiguous and share the
590 	 * same per-domain free queues.
591 	 */
592 	prev_seg = vm_phys_segs;
593 	seg = &vm_phys_segs[1];
594 	end_seg = &vm_phys_segs[vm_phys_nsegs];
595 	while (seg < end_seg) {
596 		if (prev_seg->end == seg->start &&
597 		    prev_seg->free_queues == seg->free_queues) {
598 			prev_seg->end = seg->end;
599 			KASSERT(prev_seg->domain == seg->domain,
600 			    ("vm_phys_init: free queues cannot span domains"));
601 			vm_phys_nsegs--;
602 			end_seg--;
603 			for (tmp_seg = seg; tmp_seg < end_seg; tmp_seg++)
604 				*tmp_seg = *(tmp_seg + 1);
605 		} else {
606 			prev_seg = seg;
607 			seg++;
608 		}
609 	}
610 
611 	/*
612 	 * Initialize the free queues.
613 	 */
614 	for (dom = 0; dom < vm_ndomains; dom++) {
615 		for (flind = 0; flind < vm_nfreelists; flind++) {
616 			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
617 				fl = vm_phys_free_queues[dom][flind][pind];
618 				for (oind = 0; oind < VM_NFREEORDER; oind++)
619 					TAILQ_INIT(&fl[oind].pl);
620 			}
621 		}
622 	}
623 
624 	rw_init(&vm_phys_fictitious_reg_lock, "vmfctr");
625 }
626 
627 /*
628  * Register info about the NUMA topology of the system.
629  *
630  * Invoked by platform-dependent code prior to vm_phys_init().
631  */
632 void
633 vm_phys_register_domains(int ndomains __numa_used,
634     struct mem_affinity *affinity __numa_used, int *locality __numa_used)
635 {
636 #ifdef NUMA
637 	int i;
638 
639 	/*
640 	 * For now the only override value that we support is 1, which
641 	 * effectively disables NUMA-awareness in the allocators.
642 	 */
643 	TUNABLE_INT_FETCH("vm.numa.disabled", &numa_disabled);
644 	if (numa_disabled)
645 		ndomains = 1;
646 
647 	if (ndomains > 1) {
648 		vm_ndomains = ndomains;
649 		mem_affinity = affinity;
650 		mem_locality = locality;
651 	}
652 
653 	for (i = 0; i < vm_ndomains; i++)
654 		DOMAINSET_SET(i, &all_domains);
655 #endif
656 }
657 
658 /*
659  * Split a contiguous, power of two-sized set of physical pages.
660  *
661  * When this function is called by a page allocation function, the caller
662  * should request insertion at the head unless the order [order, oind) queues
663  * are known to be empty.  The objective being to reduce the likelihood of
664  * long-term fragmentation by promoting contemporaneous allocation and
665  * (hopefully) deallocation.
666  */
667 static __inline void
668 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order,
669     int tail)
670 {
671 	vm_page_t m_buddy;
672 
673 	while (oind > order) {
674 		oind--;
675 		m_buddy = &m[1 << oind];
676 		KASSERT(m_buddy->order == VM_NFREEORDER,
677 		    ("vm_phys_split_pages: page %p has unexpected order %d",
678 		    m_buddy, m_buddy->order));
679 		vm_freelist_add(fl, m_buddy, oind, tail);
680         }
681 }
682 
683 static void
684 vm_phys_enq_chunk(struct vm_freelist *fl, vm_page_t m, int order, int tail)
685 {
686 	KASSERT(order >= 0 && order < VM_NFREEORDER,
687 	    ("%s: invalid order %d", __func__, order));
688 
689 	vm_freelist_add(fl, m, order, tail);
690 }
691 
692 /*
693  * Add the physical pages [m, m + npages) at the beginning of a power-of-two
694  * aligned and sized set to the specified free list.
695  *
696  * When this function is called by a page allocation function, the caller
697  * should request insertion at the head unless the lower-order queues are
698  * known to be empty.  The objective being to reduce the likelihood of long-
699  * term fragmentation by promoting contemporaneous allocation and (hopefully)
700  * deallocation.
701  *
702  * The physical page m's buddy must not be free.
703  */
704 static void
705 vm_phys_enq_beg(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
706 {
707         int order;
708 
709 	KASSERT(npages == 0 ||
710 	    (VM_PAGE_TO_PHYS(m) &
711 	    ((PAGE_SIZE << ilog2(npages)) - 1)) == 0,
712 	    ("%s: page %p and npages %u are misaligned",
713 	    __func__, m, npages));
714         while (npages > 0) {
715 		KASSERT(m->order == VM_NFREEORDER,
716 		    ("%s: page %p has unexpected order %d",
717 		    __func__, m, m->order));
718 		order = ilog2(npages);
719 		KASSERT(order < VM_NFREEORDER,
720 		    ("%s: order %d is out of range", __func__, order));
721 		vm_phys_enq_chunk(fl, m, order, tail);
722 		m += 1 << order;
723 		npages -= 1 << order;
724 	}
725 }
726 
727 /*
728  * Add the physical pages [m, m + npages) at the end of a power-of-two aligned
729  * and sized set to the specified free list.
730  *
731  * When this function is called by a page allocation function, the caller
732  * should request insertion at the head unless the lower-order queues are
733  * known to be empty.  The objective being to reduce the likelihood of long-
734  * term fragmentation by promoting contemporaneous allocation and (hopefully)
735  * deallocation.
736  *
737  * If npages is zero, this function does nothing and ignores the physical page
738  * parameter m.  Otherwise, the physical page m's buddy must not be free.
739  */
740 static vm_page_t
741 vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
742 {
743 	int order;
744 
745 	KASSERT(npages == 0 ||
746 	    ((VM_PAGE_TO_PHYS(m) + npages * PAGE_SIZE) &
747 	    ((PAGE_SIZE << ilog2(npages)) - 1)) == 0,
748 	    ("vm_phys_enq_range: page %p and npages %u are misaligned",
749 	    m, npages));
750 	while (npages > 0) {
751 		KASSERT(m->order == VM_NFREEORDER,
752 		    ("vm_phys_enq_range: page %p has unexpected order %d",
753 		    m, m->order));
754 		order = ffs(npages) - 1;
755 		vm_phys_enq_chunk(fl, m, order, tail);
756 		m += 1 << order;
757 		npages -= 1 << order;
758 	}
759 	return (m);
760 }
761 
762 /*
763  * Set the pool for a contiguous, power of two-sized set of physical pages.
764  */
765 static void
766 vm_phys_set_pool(int pool, vm_page_t m, int order)
767 {
768 	vm_page_t m_tmp;
769 
770 	for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
771 		m_tmp->pool = pool;
772 }
773 
774 /*
775  * Tries to allocate the specified number of pages from the specified pool
776  * within the specified domain.  Returns the actual number of allocated pages
777  * and a pointer to each page through the array ma[].
778  *
779  * The returned pages may not be physically contiguous.  However, in contrast
780  * to performing multiple, back-to-back calls to vm_phys_alloc_pages(..., 0),
781  * calling this function once to allocate the desired number of pages will
782  * avoid wasted time in vm_phys_split_pages().
783  *
784  * The free page queues for the specified domain must be locked.
785  */
786 int
787 vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[])
788 {
789 	struct vm_freelist *alt, *fl;
790 	vm_page_t m;
791 	int avail, end, flind, freelist, i, oind, pind;
792 
793 	KASSERT(domain >= 0 && domain < vm_ndomains,
794 	    ("vm_phys_alloc_npages: domain %d is out of range", domain));
795 	KASSERT(pool < VM_NFREEPOOL,
796 	    ("vm_phys_alloc_npages: pool %d is out of range", pool));
797 	KASSERT(npages <= 1 << (VM_NFREEORDER - 1),
798 	    ("vm_phys_alloc_npages: npages %d is out of range", npages));
799 	vm_domain_free_assert_locked(VM_DOMAIN(domain));
800 	i = 0;
801 	for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
802 		flind = vm_freelist_to_flind[freelist];
803 		if (flind < 0)
804 			continue;
805 		fl = vm_phys_free_queues[domain][flind][pool];
806 		for (oind = 0; oind < VM_NFREEORDER; oind++) {
807 			while ((m = TAILQ_FIRST(&fl[oind].pl)) != NULL) {
808 				vm_freelist_rem(fl, m, oind);
809 				avail = i + (1 << oind);
810 				end = imin(npages, avail);
811 				while (i < end)
812 					ma[i++] = m++;
813 				if (i == npages) {
814 					/*
815 					 * Return excess pages to fl.  Its order
816 					 * [0, oind) queues are empty.
817 					 */
818 					vm_phys_enq_range(m, avail - i, fl, 1);
819 					return (npages);
820 				}
821 			}
822 		}
823 		for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
824 			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
825 				alt = vm_phys_free_queues[domain][flind][pind];
826 				while ((m = TAILQ_FIRST(&alt[oind].pl)) !=
827 				    NULL) {
828 					vm_freelist_rem(alt, m, oind);
829 					vm_phys_set_pool(pool, m, oind);
830 					avail = i + (1 << oind);
831 					end = imin(npages, avail);
832 					while (i < end)
833 						ma[i++] = m++;
834 					if (i == npages) {
835 						/*
836 						 * Return excess pages to fl.
837 						 * Its order [0, oind) queues
838 						 * are empty.
839 						 */
840 						vm_phys_enq_range(m, avail - i,
841 						    fl, 1);
842 						return (npages);
843 					}
844 				}
845 			}
846 		}
847 	}
848 	return (i);
849 }
850 
851 /*
852  * Allocate a contiguous, power of two-sized set of physical pages
853  * from the free lists.
854  *
855  * The free page queues must be locked.
856  */
857 vm_page_t
858 vm_phys_alloc_pages(int domain, int pool, int order)
859 {
860 	vm_page_t m;
861 	int freelist;
862 
863 	for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
864 		m = vm_phys_alloc_freelist_pages(domain, freelist, pool, order);
865 		if (m != NULL)
866 			return (m);
867 	}
868 	return (NULL);
869 }
870 
871 /*
872  * Allocate a contiguous, power of two-sized set of physical pages from the
873  * specified free list.  The free list must be specified using one of the
874  * manifest constants VM_FREELIST_*.
875  *
876  * The free page queues must be locked.
877  */
878 vm_page_t
879 vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order)
880 {
881 	struct vm_freelist *alt, *fl;
882 	vm_page_t m;
883 	int oind, pind, flind;
884 
885 	KASSERT(domain >= 0 && domain < vm_ndomains,
886 	    ("vm_phys_alloc_freelist_pages: domain %d is out of range",
887 	    domain));
888 	KASSERT(freelist < VM_NFREELIST,
889 	    ("vm_phys_alloc_freelist_pages: freelist %d is out of range",
890 	    freelist));
891 	KASSERT(pool < VM_NFREEPOOL,
892 	    ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
893 	KASSERT(order < VM_NFREEORDER,
894 	    ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
895 
896 	flind = vm_freelist_to_flind[freelist];
897 	/* Check if freelist is present */
898 	if (flind < 0)
899 		return (NULL);
900 
901 	vm_domain_free_assert_locked(VM_DOMAIN(domain));
902 	fl = &vm_phys_free_queues[domain][flind][pool][0];
903 	for (oind = order; oind < VM_NFREEORDER; oind++) {
904 		m = TAILQ_FIRST(&fl[oind].pl);
905 		if (m != NULL) {
906 			vm_freelist_rem(fl, m, oind);
907 			/* The order [order, oind) queues are empty. */
908 			vm_phys_split_pages(m, oind, fl, order, 1);
909 			return (m);
910 		}
911 	}
912 
913 	/*
914 	 * The given pool was empty.  Find the largest
915 	 * contiguous, power-of-two-sized set of pages in any
916 	 * pool.  Transfer these pages to the given pool, and
917 	 * use them to satisfy the allocation.
918 	 */
919 	for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
920 		for (pind = 0; pind < VM_NFREEPOOL; pind++) {
921 			alt = &vm_phys_free_queues[domain][flind][pind][0];
922 			m = TAILQ_FIRST(&alt[oind].pl);
923 			if (m != NULL) {
924 				vm_freelist_rem(alt, m, oind);
925 				vm_phys_set_pool(pool, m, oind);
926 				/* The order [order, oind) queues are empty. */
927 				vm_phys_split_pages(m, oind, fl, order, 1);
928 				return (m);
929 			}
930 		}
931 	}
932 	return (NULL);
933 }
934 
935 /*
936  * Find the vm_page corresponding to the given physical address, which must lie
937  * within the given physical memory segment.
938  */
939 vm_page_t
940 vm_phys_seg_paddr_to_vm_page(struct vm_phys_seg *seg, vm_paddr_t pa)
941 {
942 	KASSERT(pa >= seg->start && pa < seg->end,
943 	    ("%s: pa %#jx is out of range", __func__, (uintmax_t)pa));
944 
945 	return (&seg->first_page[atop(pa - seg->start)]);
946 }
947 
948 /*
949  * Find the vm_page corresponding to the given physical address.
950  */
951 vm_page_t
952 vm_phys_paddr_to_vm_page(vm_paddr_t pa)
953 {
954 	struct vm_phys_seg *seg;
955 
956 	if ((seg = vm_phys_paddr_to_seg(pa)) != NULL)
957 		return (vm_phys_seg_paddr_to_vm_page(seg, pa));
958 	return (NULL);
959 }
960 
961 vm_page_t
962 vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
963 {
964 	struct vm_phys_fictitious_seg tmp, *seg;
965 	vm_page_t m;
966 
967 	m = NULL;
968 	tmp.start = pa;
969 	tmp.end = 0;
970 
971 	rw_rlock(&vm_phys_fictitious_reg_lock);
972 	seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
973 	rw_runlock(&vm_phys_fictitious_reg_lock);
974 	if (seg == NULL)
975 		return (NULL);
976 
977 	m = &seg->first_page[atop(pa - seg->start)];
978 	KASSERT((m->flags & PG_FICTITIOUS) != 0, ("%p not fictitious", m));
979 
980 	return (m);
981 }
982 
983 static inline void
984 vm_phys_fictitious_init_range(vm_page_t range, vm_paddr_t start,
985     long page_count, vm_memattr_t memattr)
986 {
987 	long i;
988 
989 	bzero(range, page_count * sizeof(*range));
990 	for (i = 0; i < page_count; i++) {
991 		vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr);
992 		range[i].oflags &= ~VPO_UNMANAGED;
993 		range[i].busy_lock = VPB_UNBUSIED;
994 	}
995 }
996 
997 int
998 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
999     vm_memattr_t memattr)
1000 {
1001 	struct vm_phys_fictitious_seg *seg;
1002 	vm_page_t fp;
1003 	long page_count;
1004 #ifdef VM_PHYSSEG_DENSE
1005 	long pi, pe;
1006 	long dpage_count;
1007 #endif
1008 
1009 	KASSERT(start < end,
1010 	    ("Start of segment isn't less than end (start: %jx end: %jx)",
1011 	    (uintmax_t)start, (uintmax_t)end));
1012 
1013 	page_count = (end - start) / PAGE_SIZE;
1014 
1015 #ifdef VM_PHYSSEG_DENSE
1016 	pi = atop(start);
1017 	pe = atop(end);
1018 	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
1019 		fp = &vm_page_array[pi - first_page];
1020 		if ((pe - first_page) > vm_page_array_size) {
1021 			/*
1022 			 * We have a segment that starts inside
1023 			 * of vm_page_array, but ends outside of it.
1024 			 *
1025 			 * Use vm_page_array pages for those that are
1026 			 * inside of the vm_page_array range, and
1027 			 * allocate the remaining ones.
1028 			 */
1029 			dpage_count = vm_page_array_size - (pi - first_page);
1030 			vm_phys_fictitious_init_range(fp, start, dpage_count,
1031 			    memattr);
1032 			page_count -= dpage_count;
1033 			start += ptoa(dpage_count);
1034 			goto alloc;
1035 		}
1036 		/*
1037 		 * We can allocate the full range from vm_page_array,
1038 		 * so there's no need to register the range in the tree.
1039 		 */
1040 		vm_phys_fictitious_init_range(fp, start, page_count, memattr);
1041 		return (0);
1042 	} else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
1043 		/*
1044 		 * We have a segment that ends inside of vm_page_array,
1045 		 * but starts outside of it.
1046 		 */
1047 		fp = &vm_page_array[0];
1048 		dpage_count = pe - first_page;
1049 		vm_phys_fictitious_init_range(fp, ptoa(first_page), dpage_count,
1050 		    memattr);
1051 		end -= ptoa(dpage_count);
1052 		page_count -= dpage_count;
1053 		goto alloc;
1054 	} else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
1055 		/*
1056 		 * Trying to register a fictitious range that expands before
1057 		 * and after vm_page_array.
1058 		 */
1059 		return (EINVAL);
1060 	} else {
1061 alloc:
1062 #endif
1063 		fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
1064 		    M_WAITOK);
1065 #ifdef VM_PHYSSEG_DENSE
1066 	}
1067 #endif
1068 	vm_phys_fictitious_init_range(fp, start, page_count, memattr);
1069 
1070 	seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO);
1071 	seg->start = start;
1072 	seg->end = end;
1073 	seg->first_page = fp;
1074 
1075 	rw_wlock(&vm_phys_fictitious_reg_lock);
1076 	RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg);
1077 	rw_wunlock(&vm_phys_fictitious_reg_lock);
1078 
1079 	return (0);
1080 }
1081 
1082 void
1083 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
1084 {
1085 	struct vm_phys_fictitious_seg *seg, tmp;
1086 #ifdef VM_PHYSSEG_DENSE
1087 	long pi, pe;
1088 #endif
1089 
1090 	KASSERT(start < end,
1091 	    ("Start of segment isn't less than end (start: %jx end: %jx)",
1092 	    (uintmax_t)start, (uintmax_t)end));
1093 
1094 #ifdef VM_PHYSSEG_DENSE
1095 	pi = atop(start);
1096 	pe = atop(end);
1097 	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
1098 		if ((pe - first_page) <= vm_page_array_size) {
1099 			/*
1100 			 * This segment was allocated using vm_page_array
1101 			 * only, there's nothing to do since those pages
1102 			 * were never added to the tree.
1103 			 */
1104 			return;
1105 		}
1106 		/*
1107 		 * We have a segment that starts inside
1108 		 * of vm_page_array, but ends outside of it.
1109 		 *
1110 		 * Calculate how many pages were added to the
1111 		 * tree and free them.
1112 		 */
1113 		start = ptoa(first_page + vm_page_array_size);
1114 	} else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
1115 		/*
1116 		 * We have a segment that ends inside of vm_page_array,
1117 		 * but starts outside of it.
1118 		 */
1119 		end = ptoa(first_page);
1120 	} else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
1121 		/* Since it's not possible to register such a range, panic. */
1122 		panic(
1123 		    "Unregistering not registered fictitious range [%#jx:%#jx]",
1124 		    (uintmax_t)start, (uintmax_t)end);
1125 	}
1126 #endif
1127 	tmp.start = start;
1128 	tmp.end = 0;
1129 
1130 	rw_wlock(&vm_phys_fictitious_reg_lock);
1131 	seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
1132 	if (seg->start != start || seg->end != end) {
1133 		rw_wunlock(&vm_phys_fictitious_reg_lock);
1134 		panic(
1135 		    "Unregistering not registered fictitious range [%#jx:%#jx]",
1136 		    (uintmax_t)start, (uintmax_t)end);
1137 	}
1138 	RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg);
1139 	rw_wunlock(&vm_phys_fictitious_reg_lock);
1140 	free(seg->first_page, M_FICT_PAGES);
1141 	free(seg, M_FICT_PAGES);
1142 }
1143 
1144 /*
1145  * Free a contiguous, power of two-sized set of physical pages.
1146  *
1147  * The free page queues must be locked.
1148  */
1149 void
1150 vm_phys_free_pages(vm_page_t m, int order)
1151 {
1152 	struct vm_freelist *fl;
1153 	struct vm_phys_seg *seg;
1154 	vm_paddr_t pa;
1155 	vm_page_t m_buddy;
1156 
1157 	KASSERT(m->order == VM_NFREEORDER,
1158 	    ("vm_phys_free_pages: page %p has unexpected order %d",
1159 	    m, m->order));
1160 	KASSERT(m->pool < VM_NFREEPOOL,
1161 	    ("vm_phys_free_pages: page %p has unexpected pool %d",
1162 	    m, m->pool));
1163 	KASSERT(order < VM_NFREEORDER,
1164 	    ("vm_phys_free_pages: order %d is out of range", order));
1165 	seg = &vm_phys_segs[m->segind];
1166 	vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
1167 	if (order < VM_NFREEORDER - 1) {
1168 		pa = VM_PAGE_TO_PHYS(m);
1169 		do {
1170 			pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
1171 			if (pa < seg->start || pa >= seg->end)
1172 				break;
1173 			m_buddy = vm_phys_seg_paddr_to_vm_page(seg, pa);
1174 			if (m_buddy->order != order)
1175 				break;
1176 			fl = (*seg->free_queues)[m_buddy->pool];
1177 			vm_freelist_rem(fl, m_buddy, order);
1178 			if (m_buddy->pool != m->pool)
1179 				vm_phys_set_pool(m->pool, m_buddy, order);
1180 			order++;
1181 			pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
1182 			m = vm_phys_seg_paddr_to_vm_page(seg, pa);
1183 		} while (order < VM_NFREEORDER - 1);
1184 	}
1185 	fl = (*seg->free_queues)[m->pool];
1186 	vm_freelist_add(fl, m, order, 1);
1187 }
1188 
1189 /*
1190  * Free a contiguous, arbitrarily sized set of physical pages, without
1191  * merging across set boundaries.
1192  *
1193  * The free page queues must be locked.
1194  */
1195 void
1196 vm_phys_enqueue_contig(vm_page_t m, u_long npages)
1197 {
1198 	struct vm_freelist *fl;
1199 	struct vm_phys_seg *seg;
1200 	vm_page_t m_end;
1201 	vm_paddr_t diff, lo;
1202 	int order;
1203 
1204 	/*
1205 	 * Avoid unnecessary coalescing by freeing the pages in the largest
1206 	 * possible power-of-two-sized subsets.
1207 	 */
1208 	vm_domain_free_assert_locked(vm_pagequeue_domain(m));
1209 	seg = &vm_phys_segs[m->segind];
1210 	fl = (*seg->free_queues)[m->pool];
1211 	m_end = m + npages;
1212 	/* Free blocks of increasing size. */
1213 	lo = atop(VM_PAGE_TO_PHYS(m));
1214 	if (m < m_end &&
1215 	    (diff = lo ^ (lo + npages - 1)) != 0) {
1216 		order = min(ilog2(diff), VM_NFREEORDER - 1);
1217 		m = vm_phys_enq_range(m, roundup2(lo, 1 << order) - lo, fl, 1);
1218 	}
1219 
1220 	/* Free blocks of maximum size. */
1221 	order = VM_NFREEORDER - 1;
1222 	while (m + (1 << order) <= m_end) {
1223 		KASSERT(seg == &vm_phys_segs[m->segind],
1224 		    ("%s: page range [%p,%p) spans multiple segments",
1225 		    __func__, m_end - npages, m));
1226 		vm_phys_enq_chunk(fl, m, order, 1);
1227 		m += 1 << order;
1228 	}
1229 	/* Free blocks of diminishing size. */
1230 	vm_phys_enq_beg(m, m_end - m, fl, 1);
1231 }
1232 
1233 /*
1234  * Free a contiguous, arbitrarily sized set of physical pages.
1235  *
1236  * The free page queues must be locked.
1237  */
1238 void
1239 vm_phys_free_contig(vm_page_t m, u_long npages)
1240 {
1241 	vm_paddr_t lo;
1242 	vm_page_t m_start, m_end;
1243 	unsigned max_order, order_start, order_end;
1244 
1245 	vm_domain_free_assert_locked(vm_pagequeue_domain(m));
1246 
1247 	lo = atop(VM_PAGE_TO_PHYS(m));
1248 	max_order = min(ilog2(lo ^ (lo + npages)), VM_NFREEORDER - 1);
1249 
1250 	m_start = m;
1251 	order_start = ffsll(lo) - 1;
1252 	if (order_start < max_order)
1253 		m_start += 1 << order_start;
1254 	m_end = m + npages;
1255 	order_end = ffsll(lo + npages) - 1;
1256 	if (order_end < max_order)
1257 		m_end -= 1 << order_end;
1258 	/*
1259 	 * Avoid unnecessary coalescing by freeing the pages at the start and
1260 	 * end of the range last.
1261 	 */
1262 	if (m_start < m_end)
1263 		vm_phys_enqueue_contig(m_start, m_end - m_start);
1264 	if (order_start < max_order)
1265 		vm_phys_free_pages(m, order_start);
1266 	if (order_end < max_order)
1267 		vm_phys_free_pages(m_end, order_end);
1268 }
1269 
1270 /*
1271  * Identify the first address range within segment segind or greater
1272  * that matches the domain, lies within the low/high range, and has
1273  * enough pages.  Return -1 if there is none.
1274  */
1275 int
1276 vm_phys_find_range(vm_page_t bounds[], int segind, int domain,
1277     u_long npages, vm_paddr_t low, vm_paddr_t high)
1278 {
1279 	vm_paddr_t pa_end, pa_start;
1280 	struct vm_phys_seg *end_seg, *seg;
1281 
1282 	KASSERT(npages > 0, ("npages is zero"));
1283 	KASSERT(domain >= 0 && domain < vm_ndomains, ("domain out of range"));
1284 	end_seg = &vm_phys_segs[vm_phys_nsegs];
1285 	for (seg = &vm_phys_segs[segind]; seg < end_seg; seg++) {
1286 		if (seg->domain != domain)
1287 			continue;
1288 		if (seg->start >= high)
1289 			return (-1);
1290 		pa_start = MAX(low, seg->start);
1291 		pa_end = MIN(high, seg->end);
1292 		if (pa_end - pa_start < ptoa(npages))
1293 			continue;
1294 		bounds[0] = vm_phys_seg_paddr_to_vm_page(seg, pa_start);
1295 		bounds[1] = vm_phys_seg_paddr_to_vm_page(seg, pa_end);
1296 		return (seg - vm_phys_segs);
1297 	}
1298 	return (-1);
1299 }
1300 
1301 /*
1302  * Search for the given physical page "m" in the free lists.  If the search
1303  * succeeds, remove "m" from the free lists and return true.  Otherwise, return
1304  * false, indicating that "m" is not in the free lists.
1305  *
1306  * The free page queues must be locked.
1307  */
1308 bool
1309 vm_phys_unfree_page(vm_page_t m)
1310 {
1311 	struct vm_freelist *fl;
1312 	struct vm_phys_seg *seg;
1313 	vm_paddr_t pa, pa_half;
1314 	vm_page_t m_set, m_tmp;
1315 	int order;
1316 
1317 	/*
1318 	 * First, find the contiguous, power of two-sized set of free
1319 	 * physical pages containing the given physical page "m" and
1320 	 * assign it to "m_set".
1321 	 */
1322 	seg = &vm_phys_segs[m->segind];
1323 	vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
1324 	for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
1325 	    order < VM_NFREEORDER - 1; ) {
1326 		order++;
1327 		pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
1328 		if (pa >= seg->start)
1329 			m_set = vm_phys_seg_paddr_to_vm_page(seg, pa);
1330 		else
1331 			return (false);
1332 	}
1333 	if (m_set->order < order)
1334 		return (false);
1335 	if (m_set->order == VM_NFREEORDER)
1336 		return (false);
1337 	KASSERT(m_set->order < VM_NFREEORDER,
1338 	    ("vm_phys_unfree_page: page %p has unexpected order %d",
1339 	    m_set, m_set->order));
1340 
1341 	/*
1342 	 * Next, remove "m_set" from the free lists.  Finally, extract
1343 	 * "m" from "m_set" using an iterative algorithm: While "m_set"
1344 	 * is larger than a page, shrink "m_set" by returning the half
1345 	 * of "m_set" that does not contain "m" to the free lists.
1346 	 */
1347 	fl = (*seg->free_queues)[m_set->pool];
1348 	order = m_set->order;
1349 	vm_freelist_rem(fl, m_set, order);
1350 	while (order > 0) {
1351 		order--;
1352 		pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
1353 		if (m->phys_addr < pa_half)
1354 			m_tmp = vm_phys_seg_paddr_to_vm_page(seg, pa_half);
1355 		else {
1356 			m_tmp = m_set;
1357 			m_set = vm_phys_seg_paddr_to_vm_page(seg, pa_half);
1358 		}
1359 		vm_freelist_add(fl, m_tmp, order, 0);
1360 	}
1361 	KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
1362 	return (true);
1363 }
1364 
1365 /*
1366  * Find a run of contiguous physical pages, meeting alignment requirements, from
1367  * a list of max-sized page blocks, where we need at least two consecutive
1368  * blocks to satisfy the (large) page request.
1369  */
1370 static vm_page_t
1371 vm_phys_find_freelist_contig(struct vm_freelist *fl, u_long npages,
1372     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1373 {
1374 	struct vm_phys_seg *seg;
1375 	vm_page_t m, m_iter, m_ret;
1376 	vm_paddr_t max_size, size;
1377 	int max_order;
1378 
1379 	max_order = VM_NFREEORDER - 1;
1380 	size = npages << PAGE_SHIFT;
1381 	max_size = (vm_paddr_t)1 << (PAGE_SHIFT + max_order);
1382 	KASSERT(size > max_size, ("size is too small"));
1383 
1384 	/*
1385 	 * In order to avoid examining any free max-sized page block more than
1386 	 * twice, identify the ones that are first in a physically-contiguous
1387 	 * sequence of such blocks, and only for those walk the sequence to
1388 	 * check if there are enough free blocks starting at a properly aligned
1389 	 * block.  Thus, no block is checked for free-ness more than twice.
1390 	 */
1391 	TAILQ_FOREACH(m, &fl[max_order].pl, listq) {
1392 		/*
1393 		 * Skip m unless it is first in a sequence of free max page
1394 		 * blocks >= low in its segment.
1395 		 */
1396 		seg = &vm_phys_segs[m->segind];
1397 		if (VM_PAGE_TO_PHYS(m) < MAX(low, seg->start))
1398 			continue;
1399 		if (VM_PAGE_TO_PHYS(m) >= max_size &&
1400 		    VM_PAGE_TO_PHYS(m) - max_size >= MAX(low, seg->start) &&
1401 		    max_order == m[-1 << max_order].order)
1402 			continue;
1403 
1404 		/*
1405 		 * Advance m_ret from m to the first of the sequence, if any,
1406 		 * that satisfies alignment conditions and might leave enough
1407 		 * space.
1408 		 */
1409 		m_ret = m;
1410 		while (!vm_addr_ok(VM_PAGE_TO_PHYS(m_ret),
1411 		    size, alignment, boundary) &&
1412 		    VM_PAGE_TO_PHYS(m_ret) + size <= MIN(high, seg->end) &&
1413 		    max_order == m_ret[1 << max_order].order)
1414 			m_ret += 1 << max_order;
1415 
1416 		/*
1417 		 * Skip m unless some block m_ret in the sequence is properly
1418 		 * aligned, and begins a sequence of enough pages less than
1419 		 * high, and in the same segment.
1420 		 */
1421 		if (VM_PAGE_TO_PHYS(m_ret) + size > MIN(high, seg->end))
1422 			continue;
1423 
1424 		/*
1425 		 * Skip m unless the blocks to allocate starting at m_ret are
1426 		 * all free.
1427 		 */
1428 		for (m_iter = m_ret;
1429 		    m_iter < m_ret + npages && max_order == m_iter->order;
1430 		    m_iter += 1 << max_order) {
1431 		}
1432 		if (m_iter < m_ret + npages)
1433 			continue;
1434 		return (m_ret);
1435 	}
1436 	return (NULL);
1437 }
1438 
1439 /*
1440  * Find a run of contiguous physical pages from the specified free list
1441  * table.
1442  */
1443 static vm_page_t
1444 vm_phys_find_queues_contig(
1445     struct vm_freelist (*queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX],
1446     u_long npages, vm_paddr_t low, vm_paddr_t high,
1447     u_long alignment, vm_paddr_t boundary)
1448 {
1449 	struct vm_freelist *fl;
1450 	vm_page_t m_ret;
1451 	vm_paddr_t pa, pa_end, size;
1452 	int oind, order, pind;
1453 
1454 	KASSERT(npages > 0, ("npages is 0"));
1455 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1456 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1457 	/* Compute the queue that is the best fit for npages. */
1458 	order = flsl(npages - 1);
1459 	/* Search for a large enough free block. */
1460 	size = npages << PAGE_SHIFT;
1461 	for (oind = order; oind < VM_NFREEORDER; oind++) {
1462 		for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1463 			fl = (*queues)[pind];
1464 			TAILQ_FOREACH(m_ret, &fl[oind].pl, listq) {
1465 				/*
1466 				 * Determine if the address range starting at pa
1467 				 * is within the given range, satisfies the
1468 				 * given alignment, and does not cross the given
1469 				 * boundary.
1470 				 */
1471 				pa = VM_PAGE_TO_PHYS(m_ret);
1472 				pa_end = pa + size;
1473 				if (low <= pa && pa_end <= high &&
1474 				    vm_addr_ok(pa, size, alignment, boundary))
1475 					return (m_ret);
1476 			}
1477 		}
1478 	}
1479 	if (order < VM_NFREEORDER)
1480 		return (NULL);
1481 	/* Search for a long-enough sequence of max-order blocks. */
1482 	for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1483 		fl = (*queues)[pind];
1484 		m_ret = vm_phys_find_freelist_contig(fl, npages,
1485 		    low, high, alignment, boundary);
1486 		if (m_ret != NULL)
1487 			return (m_ret);
1488 	}
1489 	return (NULL);
1490 }
1491 
1492 /*
1493  * Allocate a contiguous set of physical pages of the given size
1494  * "npages" from the free lists.  All of the physical pages must be at
1495  * or above the given physical address "low" and below the given
1496  * physical address "high".  The given value "alignment" determines the
1497  * alignment of the first physical page in the set.  If the given value
1498  * "boundary" is non-zero, then the set of physical pages cannot cross
1499  * any physical address boundary that is a multiple of that value.  Both
1500  * "alignment" and "boundary" must be a power of two.
1501  */
1502 vm_page_t
1503 vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
1504     u_long alignment, vm_paddr_t boundary)
1505 {
1506 	vm_paddr_t pa_end, pa_start;
1507 	struct vm_freelist *fl;
1508 	vm_page_t m, m_run;
1509 	struct vm_phys_seg *seg;
1510 	struct vm_freelist (*queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX];
1511 	int oind, segind;
1512 
1513 	KASSERT(npages > 0, ("npages is 0"));
1514 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1515 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1516 	vm_domain_free_assert_locked(VM_DOMAIN(domain));
1517 	if (low >= high)
1518 		return (NULL);
1519 	queues = NULL;
1520 	m_run = NULL;
1521 	for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
1522 		seg = &vm_phys_segs[segind];
1523 		if (seg->start >= high || seg->domain != domain)
1524 			continue;
1525 		if (low >= seg->end)
1526 			break;
1527 		if (low <= seg->start)
1528 			pa_start = seg->start;
1529 		else
1530 			pa_start = low;
1531 		if (high < seg->end)
1532 			pa_end = high;
1533 		else
1534 			pa_end = seg->end;
1535 		if (pa_end - pa_start < ptoa(npages))
1536 			continue;
1537 		/*
1538 		 * If a previous segment led to a search using
1539 		 * the same free lists as would this segment, then
1540 		 * we've actually already searched within this
1541 		 * too.  So skip it.
1542 		 */
1543 		if (seg->free_queues == queues)
1544 			continue;
1545 		queues = seg->free_queues;
1546 		m_run = vm_phys_find_queues_contig(queues, npages,
1547 		    low, high, alignment, boundary);
1548 		if (m_run != NULL)
1549 			break;
1550 	}
1551 	if (m_run == NULL)
1552 		return (NULL);
1553 
1554 	/* Allocate pages from the page-range found. */
1555 	for (m = m_run; m < &m_run[npages]; m = &m[1 << oind]) {
1556 		fl = (*queues)[m->pool];
1557 		oind = m->order;
1558 		vm_freelist_rem(fl, m, oind);
1559 		if (m->pool != VM_FREEPOOL_DEFAULT)
1560 			vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind);
1561 	}
1562 	/* Return excess pages to the free lists. */
1563 	fl = (*queues)[VM_FREEPOOL_DEFAULT];
1564 	vm_phys_enq_range(&m_run[npages], m - &m_run[npages], fl, 0);
1565 
1566 	/* Return page verified to satisfy conditions of request. */
1567 	pa_start = VM_PAGE_TO_PHYS(m_run);
1568 	KASSERT(low <= pa_start,
1569 	    ("memory allocated below minimum requested range"));
1570 	KASSERT(pa_start + ptoa(npages) <= high,
1571 	    ("memory allocated above maximum requested range"));
1572 	seg = &vm_phys_segs[m_run->segind];
1573 	KASSERT(seg->domain == domain,
1574 	    ("memory not allocated from specified domain"));
1575 	KASSERT(vm_addr_ok(pa_start, ptoa(npages), alignment, boundary),
1576 	    ("memory alignment/boundary constraints not satisfied"));
1577 	return (m_run);
1578 }
1579 
1580 /*
1581  * Return the index of the first unused slot which may be the terminating
1582  * entry.
1583  */
1584 static int
1585 vm_phys_avail_count(void)
1586 {
1587 	int i;
1588 
1589 	for (i = 0; phys_avail[i + 1]; i += 2)
1590 		continue;
1591 	if (i > PHYS_AVAIL_ENTRIES)
1592 		panic("Improperly terminated phys_avail %d entries", i);
1593 
1594 	return (i);
1595 }
1596 
1597 /*
1598  * Assert that a phys_avail entry is valid.
1599  */
1600 static void
1601 vm_phys_avail_check(int i)
1602 {
1603 	if (phys_avail[i] & PAGE_MASK)
1604 		panic("Unaligned phys_avail[%d]: %#jx", i,
1605 		    (intmax_t)phys_avail[i]);
1606 	if (phys_avail[i+1] & PAGE_MASK)
1607 		panic("Unaligned phys_avail[%d + 1]: %#jx", i,
1608 		    (intmax_t)phys_avail[i]);
1609 	if (phys_avail[i + 1] < phys_avail[i])
1610 		panic("phys_avail[%d] start %#jx < end %#jx", i,
1611 		    (intmax_t)phys_avail[i], (intmax_t)phys_avail[i+1]);
1612 }
1613 
1614 /*
1615  * Return the index of an overlapping phys_avail entry or -1.
1616  */
1617 #ifdef NUMA
1618 static int
1619 vm_phys_avail_find(vm_paddr_t pa)
1620 {
1621 	int i;
1622 
1623 	for (i = 0; phys_avail[i + 1]; i += 2)
1624 		if (phys_avail[i] <= pa && phys_avail[i + 1] > pa)
1625 			return (i);
1626 	return (-1);
1627 }
1628 #endif
1629 
1630 /*
1631  * Return the index of the largest entry.
1632  */
1633 int
1634 vm_phys_avail_largest(void)
1635 {
1636 	vm_paddr_t sz, largesz;
1637 	int largest;
1638 	int i;
1639 
1640 	largest = 0;
1641 	largesz = 0;
1642 	for (i = 0; phys_avail[i + 1]; i += 2) {
1643 		sz = vm_phys_avail_size(i);
1644 		if (sz > largesz) {
1645 			largesz = sz;
1646 			largest = i;
1647 		}
1648 	}
1649 
1650 	return (largest);
1651 }
1652 
1653 vm_paddr_t
1654 vm_phys_avail_size(int i)
1655 {
1656 
1657 	return (phys_avail[i + 1] - phys_avail[i]);
1658 }
1659 
1660 /*
1661  * Split an entry at the address 'pa'.  Return zero on success or errno.
1662  */
1663 static int
1664 vm_phys_avail_split(vm_paddr_t pa, int i)
1665 {
1666 	int cnt;
1667 
1668 	vm_phys_avail_check(i);
1669 	if (pa <= phys_avail[i] || pa >= phys_avail[i + 1])
1670 		panic("vm_phys_avail_split: invalid address");
1671 	cnt = vm_phys_avail_count();
1672 	if (cnt >= PHYS_AVAIL_ENTRIES)
1673 		return (ENOSPC);
1674 	memmove(&phys_avail[i + 2], &phys_avail[i],
1675 	    (cnt - i) * sizeof(phys_avail[0]));
1676 	phys_avail[i + 1] = pa;
1677 	phys_avail[i + 2] = pa;
1678 	vm_phys_avail_check(i);
1679 	vm_phys_avail_check(i+2);
1680 
1681 	return (0);
1682 }
1683 
1684 /*
1685  * Check if a given physical address can be included as part of a crash dump.
1686  */
1687 bool
1688 vm_phys_is_dumpable(vm_paddr_t pa)
1689 {
1690 	vm_page_t m;
1691 	int i;
1692 
1693 	if ((m = vm_phys_paddr_to_vm_page(pa)) != NULL)
1694 		return ((m->flags & PG_NODUMP) == 0);
1695 
1696 	for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
1697 		if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
1698 			return (true);
1699 	}
1700 	return (false);
1701 }
1702 
1703 void
1704 vm_phys_early_add_seg(vm_paddr_t start, vm_paddr_t end)
1705 {
1706 	struct vm_phys_seg *seg;
1707 
1708 	if (vm_phys_early_nsegs == -1)
1709 		panic("%s: called after initialization", __func__);
1710 	if (vm_phys_early_nsegs == nitems(vm_phys_early_segs))
1711 		panic("%s: ran out of early segments", __func__);
1712 
1713 	seg = &vm_phys_early_segs[vm_phys_early_nsegs++];
1714 	seg->start = start;
1715 	seg->end = end;
1716 }
1717 
1718 /*
1719  * This routine allocates NUMA node specific memory before the page
1720  * allocator is bootstrapped.
1721  */
1722 vm_paddr_t
1723 vm_phys_early_alloc(int domain, size_t alloc_size)
1724 {
1725 #ifdef NUMA
1726 	int mem_index;
1727 #endif
1728 	int i, biggestone;
1729 	vm_paddr_t pa, mem_start, mem_end, size, biggestsize, align;
1730 
1731 	KASSERT(domain == -1 || (domain >= 0 && domain < vm_ndomains),
1732 	    ("%s: invalid domain index %d", __func__, domain));
1733 
1734 	/*
1735 	 * Search the mem_affinity array for the biggest address
1736 	 * range in the desired domain.  This is used to constrain
1737 	 * the phys_avail selection below.
1738 	 */
1739 	biggestsize = 0;
1740 	mem_start = 0;
1741 	mem_end = -1;
1742 #ifdef NUMA
1743 	mem_index = 0;
1744 	if (mem_affinity != NULL) {
1745 		for (i = 0;; i++) {
1746 			size = mem_affinity[i].end - mem_affinity[i].start;
1747 			if (size == 0)
1748 				break;
1749 			if (domain != -1 && mem_affinity[i].domain != domain)
1750 				continue;
1751 			if (size > biggestsize) {
1752 				mem_index = i;
1753 				biggestsize = size;
1754 			}
1755 		}
1756 		mem_start = mem_affinity[mem_index].start;
1757 		mem_end = mem_affinity[mem_index].end;
1758 	}
1759 #endif
1760 
1761 	/*
1762 	 * Now find biggest physical segment in within the desired
1763 	 * numa domain.
1764 	 */
1765 	biggestsize = 0;
1766 	biggestone = 0;
1767 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1768 		/* skip regions that are out of range */
1769 		if (phys_avail[i+1] - alloc_size < mem_start ||
1770 		    phys_avail[i+1] > mem_end)
1771 			continue;
1772 		size = vm_phys_avail_size(i);
1773 		if (size > biggestsize) {
1774 			biggestone = i;
1775 			biggestsize = size;
1776 		}
1777 	}
1778 	alloc_size = round_page(alloc_size);
1779 
1780 	/*
1781 	 * Grab single pages from the front to reduce fragmentation.
1782 	 */
1783 	if (alloc_size == PAGE_SIZE) {
1784 		pa = phys_avail[biggestone];
1785 		phys_avail[biggestone] += PAGE_SIZE;
1786 		vm_phys_avail_check(biggestone);
1787 		return (pa);
1788 	}
1789 
1790 	/*
1791 	 * Naturally align large allocations.
1792 	 */
1793 	align = phys_avail[biggestone + 1] & (alloc_size - 1);
1794 	if (alloc_size + align > biggestsize)
1795 		panic("cannot find a large enough size\n");
1796 	if (align != 0 &&
1797 	    vm_phys_avail_split(phys_avail[biggestone + 1] - align,
1798 	    biggestone) != 0)
1799 		/* Wasting memory. */
1800 		phys_avail[biggestone + 1] -= align;
1801 
1802 	phys_avail[biggestone + 1] -= alloc_size;
1803 	vm_phys_avail_check(biggestone);
1804 	pa = phys_avail[biggestone + 1];
1805 	return (pa);
1806 }
1807 
1808 void
1809 vm_phys_early_startup(void)
1810 {
1811 	struct vm_phys_seg *seg;
1812 	int i;
1813 
1814 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1815 		phys_avail[i] = round_page(phys_avail[i]);
1816 		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
1817 	}
1818 
1819 	for (i = 0; i < vm_phys_early_nsegs; i++) {
1820 		seg = &vm_phys_early_segs[i];
1821 		vm_phys_add_seg(seg->start, seg->end);
1822 	}
1823 	vm_phys_early_nsegs = -1;
1824 
1825 #ifdef NUMA
1826 	/* Force phys_avail to be split by domain. */
1827 	if (mem_affinity != NULL) {
1828 		int idx;
1829 
1830 		for (i = 0; mem_affinity[i].end != 0; i++) {
1831 			idx = vm_phys_avail_find(mem_affinity[i].start);
1832 			if (idx != -1 &&
1833 			    phys_avail[idx] != mem_affinity[i].start)
1834 				vm_phys_avail_split(mem_affinity[i].start, idx);
1835 			idx = vm_phys_avail_find(mem_affinity[i].end);
1836 			if (idx != -1 &&
1837 			    phys_avail[idx] != mem_affinity[i].end)
1838 				vm_phys_avail_split(mem_affinity[i].end, idx);
1839 		}
1840 	}
1841 #endif
1842 }
1843 
1844 #ifdef DDB
1845 /*
1846  * Show the number of physical pages in each of the free lists.
1847  */
1848 DB_SHOW_COMMAND_FLAGS(freepages, db_show_freepages, DB_CMD_MEMSAFE)
1849 {
1850 	struct vm_freelist *fl;
1851 	int flind, oind, pind, dom;
1852 
1853 	for (dom = 0; dom < vm_ndomains; dom++) {
1854 		db_printf("DOMAIN: %d\n", dom);
1855 		for (flind = 0; flind < vm_nfreelists; flind++) {
1856 			db_printf("FREE LIST %d:\n"
1857 			    "\n  ORDER (SIZE)  |  NUMBER"
1858 			    "\n              ", flind);
1859 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
1860 				db_printf("  |  POOL %d", pind);
1861 			db_printf("\n--            ");
1862 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
1863 				db_printf("-- --      ");
1864 			db_printf("--\n");
1865 			for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
1866 				db_printf("  %2.2d (%6.6dK)", oind,
1867 				    1 << (PAGE_SHIFT - 10 + oind));
1868 				for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1869 				fl = vm_phys_free_queues[dom][flind][pind];
1870 					db_printf("  |  %6.6d", fl[oind].lcnt);
1871 				}
1872 				db_printf("\n");
1873 			}
1874 			db_printf("\n");
1875 		}
1876 		db_printf("\n");
1877 	}
1878 }
1879 #endif
1880