xref: /freebsd/sys/vm/vm_phys.c (revision 557dc337e6cb6664a322c3988632b5ca75ff33ed)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2006 Rice University
5  * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
6  * All rights reserved.
7  *
8  * This software was developed for the FreeBSD Project by Alan L. Cox,
9  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
24  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
30  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  *	Physical memory system implementation
36  *
37  * Any external functions defined by this module are only to be used by the
38  * virtual memory system.
39  */
40 
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43 
44 #include "opt_ddb.h"
45 #include "opt_vm.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/domainset.h>
50 #include <sys/lock.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/mutex.h>
54 #include <sys/proc.h>
55 #include <sys/queue.h>
56 #include <sys/rwlock.h>
57 #include <sys/sbuf.h>
58 #include <sys/sysctl.h>
59 #include <sys/tree.h>
60 #include <sys/vmmeter.h>
61 
62 #include <ddb/ddb.h>
63 
64 #include <vm/vm.h>
65 #include <vm/vm_extern.h>
66 #include <vm/vm_param.h>
67 #include <vm/vm_kern.h>
68 #include <vm/vm_object.h>
69 #include <vm/vm_page.h>
70 #include <vm/vm_phys.h>
71 #include <vm/vm_pagequeue.h>
72 
73 _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX,
74     "Too many physsegs.");
75 
76 #ifdef NUMA
77 struct mem_affinity __read_mostly *mem_affinity;
78 int __read_mostly *mem_locality;
79 #endif
80 
81 int __read_mostly vm_ndomains = 1;
82 domainset_t __read_mostly all_domains = DOMAINSET_T_INITIALIZER(0x1);
83 
84 struct vm_phys_seg __read_mostly vm_phys_segs[VM_PHYSSEG_MAX];
85 int __read_mostly vm_phys_nsegs;
86 static struct vm_phys_seg vm_phys_early_segs[8];
87 static int vm_phys_early_nsegs;
88 
89 struct vm_phys_fictitious_seg;
90 static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *,
91     struct vm_phys_fictitious_seg *);
92 
93 RB_HEAD(fict_tree, vm_phys_fictitious_seg) vm_phys_fictitious_tree =
94     RB_INITIALIZER(&vm_phys_fictitious_tree);
95 
96 struct vm_phys_fictitious_seg {
97 	RB_ENTRY(vm_phys_fictitious_seg) node;
98 	/* Memory region data */
99 	vm_paddr_t	start;
100 	vm_paddr_t	end;
101 	vm_page_t	first_page;
102 };
103 
104 RB_GENERATE_STATIC(fict_tree, vm_phys_fictitious_seg, node,
105     vm_phys_fictitious_cmp);
106 
107 static struct rwlock_padalign vm_phys_fictitious_reg_lock;
108 MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages");
109 
110 static struct vm_freelist __aligned(CACHE_LINE_SIZE)
111     vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL]
112     [VM_NFREEORDER_MAX];
113 
114 static int __read_mostly vm_nfreelists;
115 
116 /*
117  * These "avail lists" are globals used to communicate boot-time physical
118  * memory layout to other parts of the kernel.  Each physically contiguous
119  * region of memory is defined by a start address at an even index and an
120  * end address at the following odd index.  Each list is terminated by a
121  * pair of zero entries.
122  *
123  * dump_avail tells the dump code what regions to include in a crash dump, and
124  * phys_avail is all of the remaining physical memory that is available for
125  * the vm system.
126  *
127  * Initially dump_avail and phys_avail are identical.  Boot time memory
128  * allocations remove extents from phys_avail that may still be included
129  * in dumps.
130  */
131 vm_paddr_t phys_avail[PHYS_AVAIL_COUNT];
132 vm_paddr_t dump_avail[PHYS_AVAIL_COUNT];
133 
134 /*
135  * Provides the mapping from VM_FREELIST_* to free list indices (flind).
136  */
137 static int __read_mostly vm_freelist_to_flind[VM_NFREELIST];
138 
139 CTASSERT(VM_FREELIST_DEFAULT == 0);
140 
141 #ifdef VM_FREELIST_DMA32
142 #define	VM_DMA32_BOUNDARY	((vm_paddr_t)1 << 32)
143 #endif
144 
145 /*
146  * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about
147  * the ordering of the free list boundaries.
148  */
149 #if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY)
150 CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY);
151 #endif
152 
153 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
154 SYSCTL_OID(_vm, OID_AUTO, phys_free,
155     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
156     sysctl_vm_phys_free, "A",
157     "Phys Free Info");
158 
159 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
160 SYSCTL_OID(_vm, OID_AUTO, phys_segs,
161     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
162     sysctl_vm_phys_segs, "A",
163     "Phys Seg Info");
164 
165 #ifdef NUMA
166 static int sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS);
167 SYSCTL_OID(_vm, OID_AUTO, phys_locality,
168     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
169     sysctl_vm_phys_locality, "A",
170     "Phys Locality Info");
171 #endif
172 
173 SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD,
174     &vm_ndomains, 0, "Number of physical memory domains available.");
175 
176 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain);
177 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end);
178 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
179     int order, int tail);
180 
181 /*
182  * Red-black tree helpers for vm fictitious range management.
183  */
184 static inline int
185 vm_phys_fictitious_in_range(struct vm_phys_fictitious_seg *p,
186     struct vm_phys_fictitious_seg *range)
187 {
188 
189 	KASSERT(range->start != 0 && range->end != 0,
190 	    ("Invalid range passed on search for vm_fictitious page"));
191 	if (p->start >= range->end)
192 		return (1);
193 	if (p->start < range->start)
194 		return (-1);
195 
196 	return (0);
197 }
198 
199 static int
200 vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *p1,
201     struct vm_phys_fictitious_seg *p2)
202 {
203 
204 	/* Check if this is a search for a page */
205 	if (p1->end == 0)
206 		return (vm_phys_fictitious_in_range(p1, p2));
207 
208 	KASSERT(p2->end != 0,
209     ("Invalid range passed as second parameter to vm fictitious comparison"));
210 
211 	/* Searching to add a new range */
212 	if (p1->end <= p2->start)
213 		return (-1);
214 	if (p1->start >= p2->end)
215 		return (1);
216 
217 	panic("Trying to add overlapping vm fictitious ranges:\n"
218 	    "[%#jx:%#jx] and [%#jx:%#jx]", (uintmax_t)p1->start,
219 	    (uintmax_t)p1->end, (uintmax_t)p2->start, (uintmax_t)p2->end);
220 }
221 
222 int
223 vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high)
224 {
225 #ifdef NUMA
226 	domainset_t mask;
227 	int i;
228 
229 	if (vm_ndomains == 1 || mem_affinity == NULL)
230 		return (0);
231 
232 	DOMAINSET_ZERO(&mask);
233 	/*
234 	 * Check for any memory that overlaps low, high.
235 	 */
236 	for (i = 0; mem_affinity[i].end != 0; i++)
237 		if (mem_affinity[i].start <= high &&
238 		    mem_affinity[i].end >= low)
239 			DOMAINSET_SET(mem_affinity[i].domain, &mask);
240 	if (prefer != -1 && DOMAINSET_ISSET(prefer, &mask))
241 		return (prefer);
242 	if (DOMAINSET_EMPTY(&mask))
243 		panic("vm_phys_domain_match:  Impossible constraint");
244 	return (DOMAINSET_FFS(&mask) - 1);
245 #else
246 	return (0);
247 #endif
248 }
249 
250 /*
251  * Outputs the state of the physical memory allocator, specifically,
252  * the amount of physical memory in each free list.
253  */
254 static int
255 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
256 {
257 	struct sbuf sbuf;
258 	struct vm_freelist *fl;
259 	int dom, error, flind, oind, pind;
260 
261 	error = sysctl_wire_old_buffer(req, 0);
262 	if (error != 0)
263 		return (error);
264 	sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req);
265 	for (dom = 0; dom < vm_ndomains; dom++) {
266 		sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom);
267 		for (flind = 0; flind < vm_nfreelists; flind++) {
268 			sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
269 			    "\n  ORDER (SIZE)  |  NUMBER"
270 			    "\n              ", flind);
271 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
272 				sbuf_printf(&sbuf, "  |  POOL %d", pind);
273 			sbuf_printf(&sbuf, "\n--            ");
274 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
275 				sbuf_printf(&sbuf, "-- --      ");
276 			sbuf_printf(&sbuf, "--\n");
277 			for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
278 				sbuf_printf(&sbuf, "  %2d (%6dK)", oind,
279 				    1 << (PAGE_SHIFT - 10 + oind));
280 				for (pind = 0; pind < VM_NFREEPOOL; pind++) {
281 				fl = vm_phys_free_queues[dom][flind][pind];
282 					sbuf_printf(&sbuf, "  |  %6d",
283 					    fl[oind].lcnt);
284 				}
285 				sbuf_printf(&sbuf, "\n");
286 			}
287 		}
288 	}
289 	error = sbuf_finish(&sbuf);
290 	sbuf_delete(&sbuf);
291 	return (error);
292 }
293 
294 /*
295  * Outputs the set of physical memory segments.
296  */
297 static int
298 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
299 {
300 	struct sbuf sbuf;
301 	struct vm_phys_seg *seg;
302 	int error, segind;
303 
304 	error = sysctl_wire_old_buffer(req, 0);
305 	if (error != 0)
306 		return (error);
307 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
308 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
309 		sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
310 		seg = &vm_phys_segs[segind];
311 		sbuf_printf(&sbuf, "start:     %#jx\n",
312 		    (uintmax_t)seg->start);
313 		sbuf_printf(&sbuf, "end:       %#jx\n",
314 		    (uintmax_t)seg->end);
315 		sbuf_printf(&sbuf, "domain:    %d\n", seg->domain);
316 		sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
317 	}
318 	error = sbuf_finish(&sbuf);
319 	sbuf_delete(&sbuf);
320 	return (error);
321 }
322 
323 /*
324  * Return affinity, or -1 if there's no affinity information.
325  */
326 int
327 vm_phys_mem_affinity(int f, int t)
328 {
329 
330 #ifdef NUMA
331 	if (mem_locality == NULL)
332 		return (-1);
333 	if (f >= vm_ndomains || t >= vm_ndomains)
334 		return (-1);
335 	return (mem_locality[f * vm_ndomains + t]);
336 #else
337 	return (-1);
338 #endif
339 }
340 
341 #ifdef NUMA
342 /*
343  * Outputs the VM locality table.
344  */
345 static int
346 sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS)
347 {
348 	struct sbuf sbuf;
349 	int error, i, j;
350 
351 	error = sysctl_wire_old_buffer(req, 0);
352 	if (error != 0)
353 		return (error);
354 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
355 
356 	sbuf_printf(&sbuf, "\n");
357 
358 	for (i = 0; i < vm_ndomains; i++) {
359 		sbuf_printf(&sbuf, "%d: ", i);
360 		for (j = 0; j < vm_ndomains; j++) {
361 			sbuf_printf(&sbuf, "%d ", vm_phys_mem_affinity(i, j));
362 		}
363 		sbuf_printf(&sbuf, "\n");
364 	}
365 	error = sbuf_finish(&sbuf);
366 	sbuf_delete(&sbuf);
367 	return (error);
368 }
369 #endif
370 
371 static void
372 vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail)
373 {
374 
375 	m->order = order;
376 	if (tail)
377 		TAILQ_INSERT_TAIL(&fl[order].pl, m, listq);
378 	else
379 		TAILQ_INSERT_HEAD(&fl[order].pl, m, listq);
380 	fl[order].lcnt++;
381 }
382 
383 static void
384 vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order)
385 {
386 
387 	TAILQ_REMOVE(&fl[order].pl, m, listq);
388 	fl[order].lcnt--;
389 	m->order = VM_NFREEORDER;
390 }
391 
392 /*
393  * Create a physical memory segment.
394  */
395 static void
396 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain)
397 {
398 	struct vm_phys_seg *seg;
399 
400 	KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
401 	    ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
402 	KASSERT(domain >= 0 && domain < vm_ndomains,
403 	    ("vm_phys_create_seg: invalid domain provided"));
404 	seg = &vm_phys_segs[vm_phys_nsegs++];
405 	while (seg > vm_phys_segs && (seg - 1)->start >= end) {
406 		*seg = *(seg - 1);
407 		seg--;
408 	}
409 	seg->start = start;
410 	seg->end = end;
411 	seg->domain = domain;
412 }
413 
414 static void
415 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end)
416 {
417 #ifdef NUMA
418 	int i;
419 
420 	if (mem_affinity == NULL) {
421 		_vm_phys_create_seg(start, end, 0);
422 		return;
423 	}
424 
425 	for (i = 0;; i++) {
426 		if (mem_affinity[i].end == 0)
427 			panic("Reached end of affinity info");
428 		if (mem_affinity[i].end <= start)
429 			continue;
430 		if (mem_affinity[i].start > start)
431 			panic("No affinity info for start %jx",
432 			    (uintmax_t)start);
433 		if (mem_affinity[i].end >= end) {
434 			_vm_phys_create_seg(start, end,
435 			    mem_affinity[i].domain);
436 			break;
437 		}
438 		_vm_phys_create_seg(start, mem_affinity[i].end,
439 		    mem_affinity[i].domain);
440 		start = mem_affinity[i].end;
441 	}
442 #else
443 	_vm_phys_create_seg(start, end, 0);
444 #endif
445 }
446 
447 /*
448  * Add a physical memory segment.
449  */
450 void
451 vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end)
452 {
453 	vm_paddr_t paddr;
454 
455 	KASSERT((start & PAGE_MASK) == 0,
456 	    ("vm_phys_define_seg: start is not page aligned"));
457 	KASSERT((end & PAGE_MASK) == 0,
458 	    ("vm_phys_define_seg: end is not page aligned"));
459 
460 	/*
461 	 * Split the physical memory segment if it spans two or more free
462 	 * list boundaries.
463 	 */
464 	paddr = start;
465 #ifdef	VM_FREELIST_LOWMEM
466 	if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) {
467 		vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY);
468 		paddr = VM_LOWMEM_BOUNDARY;
469 	}
470 #endif
471 #ifdef	VM_FREELIST_DMA32
472 	if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) {
473 		vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY);
474 		paddr = VM_DMA32_BOUNDARY;
475 	}
476 #endif
477 	vm_phys_create_seg(paddr, end);
478 }
479 
480 /*
481  * Initialize the physical memory allocator.
482  *
483  * Requires that vm_page_array is initialized!
484  */
485 void
486 vm_phys_init(void)
487 {
488 	struct vm_freelist *fl;
489 	struct vm_phys_seg *end_seg, *prev_seg, *seg, *tmp_seg;
490 	u_long npages;
491 	int dom, flind, freelist, oind, pind, segind;
492 
493 	/*
494 	 * Compute the number of free lists, and generate the mapping from the
495 	 * manifest constants VM_FREELIST_* to the free list indices.
496 	 *
497 	 * Initially, the entries of vm_freelist_to_flind[] are set to either
498 	 * 0 or 1 to indicate which free lists should be created.
499 	 */
500 	npages = 0;
501 	for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
502 		seg = &vm_phys_segs[segind];
503 #ifdef	VM_FREELIST_LOWMEM
504 		if (seg->end <= VM_LOWMEM_BOUNDARY)
505 			vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1;
506 		else
507 #endif
508 #ifdef	VM_FREELIST_DMA32
509 		if (
510 #ifdef	VM_DMA32_NPAGES_THRESHOLD
511 		    /*
512 		     * Create the DMA32 free list only if the amount of
513 		     * physical memory above physical address 4G exceeds the
514 		     * given threshold.
515 		     */
516 		    npages > VM_DMA32_NPAGES_THRESHOLD &&
517 #endif
518 		    seg->end <= VM_DMA32_BOUNDARY)
519 			vm_freelist_to_flind[VM_FREELIST_DMA32] = 1;
520 		else
521 #endif
522 		{
523 			npages += atop(seg->end - seg->start);
524 			vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1;
525 		}
526 	}
527 	/* Change each entry into a running total of the free lists. */
528 	for (freelist = 1; freelist < VM_NFREELIST; freelist++) {
529 		vm_freelist_to_flind[freelist] +=
530 		    vm_freelist_to_flind[freelist - 1];
531 	}
532 	vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1];
533 	KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists"));
534 	/* Change each entry into a free list index. */
535 	for (freelist = 0; freelist < VM_NFREELIST; freelist++)
536 		vm_freelist_to_flind[freelist]--;
537 
538 	/*
539 	 * Initialize the first_page and free_queues fields of each physical
540 	 * memory segment.
541 	 */
542 #ifdef VM_PHYSSEG_SPARSE
543 	npages = 0;
544 #endif
545 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
546 		seg = &vm_phys_segs[segind];
547 #ifdef VM_PHYSSEG_SPARSE
548 		seg->first_page = &vm_page_array[npages];
549 		npages += atop(seg->end - seg->start);
550 #else
551 		seg->first_page = PHYS_TO_VM_PAGE(seg->start);
552 #endif
553 #ifdef	VM_FREELIST_LOWMEM
554 		if (seg->end <= VM_LOWMEM_BOUNDARY) {
555 			flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM];
556 			KASSERT(flind >= 0,
557 			    ("vm_phys_init: LOWMEM flind < 0"));
558 		} else
559 #endif
560 #ifdef	VM_FREELIST_DMA32
561 		if (seg->end <= VM_DMA32_BOUNDARY) {
562 			flind = vm_freelist_to_flind[VM_FREELIST_DMA32];
563 			KASSERT(flind >= 0,
564 			    ("vm_phys_init: DMA32 flind < 0"));
565 		} else
566 #endif
567 		{
568 			flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT];
569 			KASSERT(flind >= 0,
570 			    ("vm_phys_init: DEFAULT flind < 0"));
571 		}
572 		seg->free_queues = &vm_phys_free_queues[seg->domain][flind];
573 	}
574 
575 	/*
576 	 * Coalesce physical memory segments that are contiguous and share the
577 	 * same per-domain free queues.
578 	 */
579 	prev_seg = vm_phys_segs;
580 	seg = &vm_phys_segs[1];
581 	end_seg = &vm_phys_segs[vm_phys_nsegs];
582 	while (seg < end_seg) {
583 		if (prev_seg->end == seg->start &&
584 		    prev_seg->free_queues == seg->free_queues) {
585 			prev_seg->end = seg->end;
586 			KASSERT(prev_seg->domain == seg->domain,
587 			    ("vm_phys_init: free queues cannot span domains"));
588 			vm_phys_nsegs--;
589 			end_seg--;
590 			for (tmp_seg = seg; tmp_seg < end_seg; tmp_seg++)
591 				*tmp_seg = *(tmp_seg + 1);
592 		} else {
593 			prev_seg = seg;
594 			seg++;
595 		}
596 	}
597 
598 	/*
599 	 * Initialize the free queues.
600 	 */
601 	for (dom = 0; dom < vm_ndomains; dom++) {
602 		for (flind = 0; flind < vm_nfreelists; flind++) {
603 			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
604 				fl = vm_phys_free_queues[dom][flind][pind];
605 				for (oind = 0; oind < VM_NFREEORDER; oind++)
606 					TAILQ_INIT(&fl[oind].pl);
607 			}
608 		}
609 	}
610 
611 	rw_init(&vm_phys_fictitious_reg_lock, "vmfctr");
612 }
613 
614 /*
615  * Register info about the NUMA topology of the system.
616  *
617  * Invoked by platform-dependent code prior to vm_phys_init().
618  */
619 void
620 vm_phys_register_domains(int ndomains, struct mem_affinity *affinity,
621     int *locality)
622 {
623 #ifdef NUMA
624 	int d, i;
625 
626 	/*
627 	 * For now the only override value that we support is 1, which
628 	 * effectively disables NUMA-awareness in the allocators.
629 	 */
630 	d = 0;
631 	TUNABLE_INT_FETCH("vm.numa.disabled", &d);
632 	if (d)
633 		ndomains = 1;
634 
635 	if (ndomains > 1) {
636 		vm_ndomains = ndomains;
637 		mem_affinity = affinity;
638 		mem_locality = locality;
639 	}
640 
641 	for (i = 0; i < vm_ndomains; i++)
642 		DOMAINSET_SET(i, &all_domains);
643 #else
644 	(void)ndomains;
645 	(void)affinity;
646 	(void)locality;
647 #endif
648 }
649 
650 /*
651  * Split a contiguous, power of two-sized set of physical pages.
652  *
653  * When this function is called by a page allocation function, the caller
654  * should request insertion at the head unless the order [order, oind) queues
655  * are known to be empty.  The objective being to reduce the likelihood of
656  * long-term fragmentation by promoting contemporaneous allocation and
657  * (hopefully) deallocation.
658  */
659 static __inline void
660 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order,
661     int tail)
662 {
663 	vm_page_t m_buddy;
664 
665 	while (oind > order) {
666 		oind--;
667 		m_buddy = &m[1 << oind];
668 		KASSERT(m_buddy->order == VM_NFREEORDER,
669 		    ("vm_phys_split_pages: page %p has unexpected order %d",
670 		    m_buddy, m_buddy->order));
671 		vm_freelist_add(fl, m_buddy, oind, tail);
672         }
673 }
674 
675 /*
676  * Add the physical pages [m, m + npages) at the end of a power-of-two aligned
677  * and sized set to the specified free list.
678  *
679  * When this function is called by a page allocation function, the caller
680  * should request insertion at the head unless the lower-order queues are
681  * known to be empty.  The objective being to reduce the likelihood of long-
682  * term fragmentation by promoting contemporaneous allocation and (hopefully)
683  * deallocation.
684  *
685  * The physical page m's buddy must not be free.
686  */
687 static void
688 vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
689 {
690 	u_int n;
691 	int order;
692 
693 	KASSERT(npages > 0, ("vm_phys_enq_range: npages is 0"));
694 	KASSERT(((VM_PAGE_TO_PHYS(m) + npages * PAGE_SIZE) &
695 	    ((PAGE_SIZE << (fls(npages) - 1)) - 1)) == 0,
696 	    ("vm_phys_enq_range: page %p and npages %u are misaligned",
697 	    m, npages));
698 	do {
699 		KASSERT(m->order == VM_NFREEORDER,
700 		    ("vm_phys_enq_range: page %p has unexpected order %d",
701 		    m, m->order));
702 		order = ffs(npages) - 1;
703 		KASSERT(order < VM_NFREEORDER,
704 		    ("vm_phys_enq_range: order %d is out of range", order));
705 		vm_freelist_add(fl, m, order, tail);
706 		n = 1 << order;
707 		m += n;
708 		npages -= n;
709 	} while (npages > 0);
710 }
711 
712 /*
713  * Set the pool for a contiguous, power of two-sized set of physical pages.
714  */
715 static void
716 vm_phys_set_pool(int pool, vm_page_t m, int order)
717 {
718 	vm_page_t m_tmp;
719 
720 	for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
721 		m_tmp->pool = pool;
722 }
723 
724 /*
725  * Tries to allocate the specified number of pages from the specified pool
726  * within the specified domain.  Returns the actual number of allocated pages
727  * and a pointer to each page through the array ma[].
728  *
729  * The returned pages may not be physically contiguous.  However, in contrast
730  * to performing multiple, back-to-back calls to vm_phys_alloc_pages(..., 0),
731  * calling this function once to allocate the desired number of pages will
732  * avoid wasted time in vm_phys_split_pages().
733  *
734  * The free page queues for the specified domain must be locked.
735  */
736 int
737 vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[])
738 {
739 	struct vm_freelist *alt, *fl;
740 	vm_page_t m;
741 	int avail, end, flind, freelist, i, need, oind, pind;
742 
743 	KASSERT(domain >= 0 && domain < vm_ndomains,
744 	    ("vm_phys_alloc_npages: domain %d is out of range", domain));
745 	KASSERT(pool < VM_NFREEPOOL,
746 	    ("vm_phys_alloc_npages: pool %d is out of range", pool));
747 	KASSERT(npages <= 1 << (VM_NFREEORDER - 1),
748 	    ("vm_phys_alloc_npages: npages %d is out of range", npages));
749 	vm_domain_free_assert_locked(VM_DOMAIN(domain));
750 	i = 0;
751 	for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
752 		flind = vm_freelist_to_flind[freelist];
753 		if (flind < 0)
754 			continue;
755 		fl = vm_phys_free_queues[domain][flind][pool];
756 		for (oind = 0; oind < VM_NFREEORDER; oind++) {
757 			while ((m = TAILQ_FIRST(&fl[oind].pl)) != NULL) {
758 				vm_freelist_rem(fl, m, oind);
759 				avail = 1 << oind;
760 				need = imin(npages - i, avail);
761 				for (end = i + need; i < end;)
762 					ma[i++] = m++;
763 				if (need < avail) {
764 					/*
765 					 * Return excess pages to fl.  Its
766 					 * order [0, oind) queues are empty.
767 					 */
768 					vm_phys_enq_range(m, avail - need, fl,
769 					    1);
770 					return (npages);
771 				} else if (i == npages)
772 					return (npages);
773 			}
774 		}
775 		for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
776 			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
777 				alt = vm_phys_free_queues[domain][flind][pind];
778 				while ((m = TAILQ_FIRST(&alt[oind].pl)) !=
779 				    NULL) {
780 					vm_freelist_rem(alt, m, oind);
781 					vm_phys_set_pool(pool, m, oind);
782 					avail = 1 << oind;
783 					need = imin(npages - i, avail);
784 					for (end = i + need; i < end;)
785 						ma[i++] = m++;
786 					if (need < avail) {
787 						/*
788 						 * Return excess pages to fl.
789 						 * Its order [0, oind) queues
790 						 * are empty.
791 						 */
792 						vm_phys_enq_range(m, avail -
793 						    need, fl, 1);
794 						return (npages);
795 					} else if (i == npages)
796 						return (npages);
797 				}
798 			}
799 		}
800 	}
801 	return (i);
802 }
803 
804 /*
805  * Allocate a contiguous, power of two-sized set of physical pages
806  * from the free lists.
807  *
808  * The free page queues must be locked.
809  */
810 vm_page_t
811 vm_phys_alloc_pages(int domain, int pool, int order)
812 {
813 	vm_page_t m;
814 	int freelist;
815 
816 	for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
817 		m = vm_phys_alloc_freelist_pages(domain, freelist, pool, order);
818 		if (m != NULL)
819 			return (m);
820 	}
821 	return (NULL);
822 }
823 
824 /*
825  * Allocate a contiguous, power of two-sized set of physical pages from the
826  * specified free list.  The free list must be specified using one of the
827  * manifest constants VM_FREELIST_*.
828  *
829  * The free page queues must be locked.
830  */
831 vm_page_t
832 vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order)
833 {
834 	struct vm_freelist *alt, *fl;
835 	vm_page_t m;
836 	int oind, pind, flind;
837 
838 	KASSERT(domain >= 0 && domain < vm_ndomains,
839 	    ("vm_phys_alloc_freelist_pages: domain %d is out of range",
840 	    domain));
841 	KASSERT(freelist < VM_NFREELIST,
842 	    ("vm_phys_alloc_freelist_pages: freelist %d is out of range",
843 	    freelist));
844 	KASSERT(pool < VM_NFREEPOOL,
845 	    ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
846 	KASSERT(order < VM_NFREEORDER,
847 	    ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
848 
849 	flind = vm_freelist_to_flind[freelist];
850 	/* Check if freelist is present */
851 	if (flind < 0)
852 		return (NULL);
853 
854 	vm_domain_free_assert_locked(VM_DOMAIN(domain));
855 	fl = &vm_phys_free_queues[domain][flind][pool][0];
856 	for (oind = order; oind < VM_NFREEORDER; oind++) {
857 		m = TAILQ_FIRST(&fl[oind].pl);
858 		if (m != NULL) {
859 			vm_freelist_rem(fl, m, oind);
860 			/* The order [order, oind) queues are empty. */
861 			vm_phys_split_pages(m, oind, fl, order, 1);
862 			return (m);
863 		}
864 	}
865 
866 	/*
867 	 * The given pool was empty.  Find the largest
868 	 * contiguous, power-of-two-sized set of pages in any
869 	 * pool.  Transfer these pages to the given pool, and
870 	 * use them to satisfy the allocation.
871 	 */
872 	for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
873 		for (pind = 0; pind < VM_NFREEPOOL; pind++) {
874 			alt = &vm_phys_free_queues[domain][flind][pind][0];
875 			m = TAILQ_FIRST(&alt[oind].pl);
876 			if (m != NULL) {
877 				vm_freelist_rem(alt, m, oind);
878 				vm_phys_set_pool(pool, m, oind);
879 				/* The order [order, oind) queues are empty. */
880 				vm_phys_split_pages(m, oind, fl, order, 1);
881 				return (m);
882 			}
883 		}
884 	}
885 	return (NULL);
886 }
887 
888 /*
889  * Find the vm_page corresponding to the given physical address.
890  */
891 vm_page_t
892 vm_phys_paddr_to_vm_page(vm_paddr_t pa)
893 {
894 	struct vm_phys_seg *seg;
895 	int segind;
896 
897 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
898 		seg = &vm_phys_segs[segind];
899 		if (pa >= seg->start && pa < seg->end)
900 			return (&seg->first_page[atop(pa - seg->start)]);
901 	}
902 	return (NULL);
903 }
904 
905 vm_page_t
906 vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
907 {
908 	struct vm_phys_fictitious_seg tmp, *seg;
909 	vm_page_t m;
910 
911 	m = NULL;
912 	tmp.start = pa;
913 	tmp.end = 0;
914 
915 	rw_rlock(&vm_phys_fictitious_reg_lock);
916 	seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
917 	rw_runlock(&vm_phys_fictitious_reg_lock);
918 	if (seg == NULL)
919 		return (NULL);
920 
921 	m = &seg->first_page[atop(pa - seg->start)];
922 	KASSERT((m->flags & PG_FICTITIOUS) != 0, ("%p not fictitious", m));
923 
924 	return (m);
925 }
926 
927 static inline void
928 vm_phys_fictitious_init_range(vm_page_t range, vm_paddr_t start,
929     long page_count, vm_memattr_t memattr)
930 {
931 	long i;
932 
933 	bzero(range, page_count * sizeof(*range));
934 	for (i = 0; i < page_count; i++) {
935 		vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr);
936 		range[i].oflags &= ~VPO_UNMANAGED;
937 		range[i].busy_lock = VPB_UNBUSIED;
938 	}
939 }
940 
941 int
942 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
943     vm_memattr_t memattr)
944 {
945 	struct vm_phys_fictitious_seg *seg;
946 	vm_page_t fp;
947 	long page_count;
948 #ifdef VM_PHYSSEG_DENSE
949 	long pi, pe;
950 	long dpage_count;
951 #endif
952 
953 	KASSERT(start < end,
954 	    ("Start of segment isn't less than end (start: %jx end: %jx)",
955 	    (uintmax_t)start, (uintmax_t)end));
956 
957 	page_count = (end - start) / PAGE_SIZE;
958 
959 #ifdef VM_PHYSSEG_DENSE
960 	pi = atop(start);
961 	pe = atop(end);
962 	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
963 		fp = &vm_page_array[pi - first_page];
964 		if ((pe - first_page) > vm_page_array_size) {
965 			/*
966 			 * We have a segment that starts inside
967 			 * of vm_page_array, but ends outside of it.
968 			 *
969 			 * Use vm_page_array pages for those that are
970 			 * inside of the vm_page_array range, and
971 			 * allocate the remaining ones.
972 			 */
973 			dpage_count = vm_page_array_size - (pi - first_page);
974 			vm_phys_fictitious_init_range(fp, start, dpage_count,
975 			    memattr);
976 			page_count -= dpage_count;
977 			start += ptoa(dpage_count);
978 			goto alloc;
979 		}
980 		/*
981 		 * We can allocate the full range from vm_page_array,
982 		 * so there's no need to register the range in the tree.
983 		 */
984 		vm_phys_fictitious_init_range(fp, start, page_count, memattr);
985 		return (0);
986 	} else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
987 		/*
988 		 * We have a segment that ends inside of vm_page_array,
989 		 * but starts outside of it.
990 		 */
991 		fp = &vm_page_array[0];
992 		dpage_count = pe - first_page;
993 		vm_phys_fictitious_init_range(fp, ptoa(first_page), dpage_count,
994 		    memattr);
995 		end -= ptoa(dpage_count);
996 		page_count -= dpage_count;
997 		goto alloc;
998 	} else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
999 		/*
1000 		 * Trying to register a fictitious range that expands before
1001 		 * and after vm_page_array.
1002 		 */
1003 		return (EINVAL);
1004 	} else {
1005 alloc:
1006 #endif
1007 		fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
1008 		    M_WAITOK);
1009 #ifdef VM_PHYSSEG_DENSE
1010 	}
1011 #endif
1012 	vm_phys_fictitious_init_range(fp, start, page_count, memattr);
1013 
1014 	seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO);
1015 	seg->start = start;
1016 	seg->end = end;
1017 	seg->first_page = fp;
1018 
1019 	rw_wlock(&vm_phys_fictitious_reg_lock);
1020 	RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg);
1021 	rw_wunlock(&vm_phys_fictitious_reg_lock);
1022 
1023 	return (0);
1024 }
1025 
1026 void
1027 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
1028 {
1029 	struct vm_phys_fictitious_seg *seg, tmp;
1030 #ifdef VM_PHYSSEG_DENSE
1031 	long pi, pe;
1032 #endif
1033 
1034 	KASSERT(start < end,
1035 	    ("Start of segment isn't less than end (start: %jx end: %jx)",
1036 	    (uintmax_t)start, (uintmax_t)end));
1037 
1038 #ifdef VM_PHYSSEG_DENSE
1039 	pi = atop(start);
1040 	pe = atop(end);
1041 	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
1042 		if ((pe - first_page) <= vm_page_array_size) {
1043 			/*
1044 			 * This segment was allocated using vm_page_array
1045 			 * only, there's nothing to do since those pages
1046 			 * were never added to the tree.
1047 			 */
1048 			return;
1049 		}
1050 		/*
1051 		 * We have a segment that starts inside
1052 		 * of vm_page_array, but ends outside of it.
1053 		 *
1054 		 * Calculate how many pages were added to the
1055 		 * tree and free them.
1056 		 */
1057 		start = ptoa(first_page + vm_page_array_size);
1058 	} else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
1059 		/*
1060 		 * We have a segment that ends inside of vm_page_array,
1061 		 * but starts outside of it.
1062 		 */
1063 		end = ptoa(first_page);
1064 	} else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
1065 		/* Since it's not possible to register such a range, panic. */
1066 		panic(
1067 		    "Unregistering not registered fictitious range [%#jx:%#jx]",
1068 		    (uintmax_t)start, (uintmax_t)end);
1069 	}
1070 #endif
1071 	tmp.start = start;
1072 	tmp.end = 0;
1073 
1074 	rw_wlock(&vm_phys_fictitious_reg_lock);
1075 	seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
1076 	if (seg->start != start || seg->end != end) {
1077 		rw_wunlock(&vm_phys_fictitious_reg_lock);
1078 		panic(
1079 		    "Unregistering not registered fictitious range [%#jx:%#jx]",
1080 		    (uintmax_t)start, (uintmax_t)end);
1081 	}
1082 	RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg);
1083 	rw_wunlock(&vm_phys_fictitious_reg_lock);
1084 	free(seg->first_page, M_FICT_PAGES);
1085 	free(seg, M_FICT_PAGES);
1086 }
1087 
1088 /*
1089  * Free a contiguous, power of two-sized set of physical pages.
1090  *
1091  * The free page queues must be locked.
1092  */
1093 void
1094 vm_phys_free_pages(vm_page_t m, int order)
1095 {
1096 	struct vm_freelist *fl;
1097 	struct vm_phys_seg *seg;
1098 	vm_paddr_t pa;
1099 	vm_page_t m_buddy;
1100 
1101 	KASSERT(m->order == VM_NFREEORDER,
1102 	    ("vm_phys_free_pages: page %p has unexpected order %d",
1103 	    m, m->order));
1104 	KASSERT(m->pool < VM_NFREEPOOL,
1105 	    ("vm_phys_free_pages: page %p has unexpected pool %d",
1106 	    m, m->pool));
1107 	KASSERT(order < VM_NFREEORDER,
1108 	    ("vm_phys_free_pages: order %d is out of range", order));
1109 	seg = &vm_phys_segs[m->segind];
1110 	vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
1111 	if (order < VM_NFREEORDER - 1) {
1112 		pa = VM_PAGE_TO_PHYS(m);
1113 		do {
1114 			pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
1115 			if (pa < seg->start || pa >= seg->end)
1116 				break;
1117 			m_buddy = &seg->first_page[atop(pa - seg->start)];
1118 			if (m_buddy->order != order)
1119 				break;
1120 			fl = (*seg->free_queues)[m_buddy->pool];
1121 			vm_freelist_rem(fl, m_buddy, order);
1122 			if (m_buddy->pool != m->pool)
1123 				vm_phys_set_pool(m->pool, m_buddy, order);
1124 			order++;
1125 			pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
1126 			m = &seg->first_page[atop(pa - seg->start)];
1127 		} while (order < VM_NFREEORDER - 1);
1128 	}
1129 	fl = (*seg->free_queues)[m->pool];
1130 	vm_freelist_add(fl, m, order, 1);
1131 }
1132 
1133 /*
1134  * Return the largest possible order of a set of pages starting at m.
1135  */
1136 static int
1137 max_order(vm_page_t m)
1138 {
1139 
1140 	/*
1141 	 * Unsigned "min" is used here so that "order" is assigned
1142 	 * "VM_NFREEORDER - 1" when "m"'s physical address is zero
1143 	 * or the low-order bits of its physical address are zero
1144 	 * because the size of a physical address exceeds the size of
1145 	 * a long.
1146 	 */
1147 	return (min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
1148 	    VM_NFREEORDER - 1));
1149 }
1150 
1151 /*
1152  * Free a contiguous, arbitrarily sized set of physical pages, without
1153  * merging across set boundaries.
1154  *
1155  * The free page queues must be locked.
1156  */
1157 void
1158 vm_phys_enqueue_contig(vm_page_t m, u_long npages)
1159 {
1160 	struct vm_freelist *fl;
1161 	struct vm_phys_seg *seg;
1162 	vm_page_t m_end;
1163 	int order;
1164 
1165 	/*
1166 	 * Avoid unnecessary coalescing by freeing the pages in the largest
1167 	 * possible power-of-two-sized subsets.
1168 	 */
1169 	vm_domain_free_assert_locked(vm_pagequeue_domain(m));
1170 	seg = &vm_phys_segs[m->segind];
1171 	fl = (*seg->free_queues)[m->pool];
1172 	m_end = m + npages;
1173 	/* Free blocks of increasing size. */
1174 	while ((order = max_order(m)) < VM_NFREEORDER - 1 &&
1175 	    m + (1 << order) <= m_end) {
1176 		KASSERT(seg == &vm_phys_segs[m->segind],
1177 		    ("%s: page range [%p,%p) spans multiple segments",
1178 		    __func__, m_end - npages, m));
1179 		vm_freelist_add(fl, m, order, 1);
1180 		m += 1 << order;
1181 	}
1182 	/* Free blocks of maximum size. */
1183 	while (m + (1 << order) <= m_end) {
1184 		KASSERT(seg == &vm_phys_segs[m->segind],
1185 		    ("%s: page range [%p,%p) spans multiple segments",
1186 		    __func__, m_end - npages, m));
1187 		vm_freelist_add(fl, m, order, 1);
1188 		m += 1 << order;
1189 	}
1190 	/* Free blocks of diminishing size. */
1191 	while (m < m_end) {
1192 		KASSERT(seg == &vm_phys_segs[m->segind],
1193 		    ("%s: page range [%p,%p) spans multiple segments",
1194 		    __func__, m_end - npages, m));
1195 		order = flsl(m_end - m) - 1;
1196 		vm_freelist_add(fl, m, order, 1);
1197 		m += 1 << order;
1198 	}
1199 }
1200 
1201 /*
1202  * Free a contiguous, arbitrarily sized set of physical pages.
1203  *
1204  * The free page queues must be locked.
1205  */
1206 void
1207 vm_phys_free_contig(vm_page_t m, u_long npages)
1208 {
1209 	int order_start, order_end;
1210 	vm_page_t m_start, m_end;
1211 
1212 	vm_domain_free_assert_locked(vm_pagequeue_domain(m));
1213 
1214 	m_start = m;
1215 	order_start = max_order(m_start);
1216 	if (order_start < VM_NFREEORDER - 1)
1217 		m_start += 1 << order_start;
1218 	m_end = m + npages;
1219 	order_end = max_order(m_end);
1220 	if (order_end < VM_NFREEORDER - 1)
1221 		m_end -= 1 << order_end;
1222 	/*
1223 	 * Avoid unnecessary coalescing by freeing the pages at the start and
1224 	 * end of the range last.
1225 	 */
1226 	if (m_start < m_end)
1227 		vm_phys_enqueue_contig(m_start, m_end - m_start);
1228 	if (order_start < VM_NFREEORDER - 1)
1229 		vm_phys_free_pages(m, order_start);
1230 	if (order_end < VM_NFREEORDER - 1)
1231 		vm_phys_free_pages(m_end, order_end);
1232 }
1233 
1234 /*
1235  * Scan physical memory between the specified addresses "low" and "high" for a
1236  * run of contiguous physical pages that satisfy the specified conditions, and
1237  * return the lowest page in the run.  The specified "alignment" determines
1238  * the alignment of the lowest physical page in the run.  If the specified
1239  * "boundary" is non-zero, then the run of physical pages cannot span a
1240  * physical address that is a multiple of "boundary".
1241  *
1242  * "npages" must be greater than zero.  Both "alignment" and "boundary" must
1243  * be a power of two.
1244  */
1245 vm_page_t
1246 vm_phys_scan_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
1247     u_long alignment, vm_paddr_t boundary, int options)
1248 {
1249 	vm_paddr_t pa_end;
1250 	vm_page_t m_end, m_run, m_start;
1251 	struct vm_phys_seg *seg;
1252 	int segind;
1253 
1254 	KASSERT(npages > 0, ("npages is 0"));
1255 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1256 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1257 	if (low >= high)
1258 		return (NULL);
1259 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
1260 		seg = &vm_phys_segs[segind];
1261 		if (seg->domain != domain)
1262 			continue;
1263 		if (seg->start >= high)
1264 			break;
1265 		if (low >= seg->end)
1266 			continue;
1267 		if (low <= seg->start)
1268 			m_start = seg->first_page;
1269 		else
1270 			m_start = &seg->first_page[atop(low - seg->start)];
1271 		if (high < seg->end)
1272 			pa_end = high;
1273 		else
1274 			pa_end = seg->end;
1275 		if (pa_end - VM_PAGE_TO_PHYS(m_start) < ptoa(npages))
1276 			continue;
1277 		m_end = &seg->first_page[atop(pa_end - seg->start)];
1278 		m_run = vm_page_scan_contig(npages, m_start, m_end,
1279 		    alignment, boundary, options);
1280 		if (m_run != NULL)
1281 			return (m_run);
1282 	}
1283 	return (NULL);
1284 }
1285 
1286 /*
1287  * Search for the given physical page "m" in the free lists.  If the search
1288  * succeeds, remove "m" from the free lists and return TRUE.  Otherwise, return
1289  * FALSE, indicating that "m" is not in the free lists.
1290  *
1291  * The free page queues must be locked.
1292  */
1293 boolean_t
1294 vm_phys_unfree_page(vm_page_t m)
1295 {
1296 	struct vm_freelist *fl;
1297 	struct vm_phys_seg *seg;
1298 	vm_paddr_t pa, pa_half;
1299 	vm_page_t m_set, m_tmp;
1300 	int order;
1301 
1302 	/*
1303 	 * First, find the contiguous, power of two-sized set of free
1304 	 * physical pages containing the given physical page "m" and
1305 	 * assign it to "m_set".
1306 	 */
1307 	seg = &vm_phys_segs[m->segind];
1308 	vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
1309 	for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
1310 	    order < VM_NFREEORDER - 1; ) {
1311 		order++;
1312 		pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
1313 		if (pa >= seg->start)
1314 			m_set = &seg->first_page[atop(pa - seg->start)];
1315 		else
1316 			return (FALSE);
1317 	}
1318 	if (m_set->order < order)
1319 		return (FALSE);
1320 	if (m_set->order == VM_NFREEORDER)
1321 		return (FALSE);
1322 	KASSERT(m_set->order < VM_NFREEORDER,
1323 	    ("vm_phys_unfree_page: page %p has unexpected order %d",
1324 	    m_set, m_set->order));
1325 
1326 	/*
1327 	 * Next, remove "m_set" from the free lists.  Finally, extract
1328 	 * "m" from "m_set" using an iterative algorithm: While "m_set"
1329 	 * is larger than a page, shrink "m_set" by returning the half
1330 	 * of "m_set" that does not contain "m" to the free lists.
1331 	 */
1332 	fl = (*seg->free_queues)[m_set->pool];
1333 	order = m_set->order;
1334 	vm_freelist_rem(fl, m_set, order);
1335 	while (order > 0) {
1336 		order--;
1337 		pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
1338 		if (m->phys_addr < pa_half)
1339 			m_tmp = &seg->first_page[atop(pa_half - seg->start)];
1340 		else {
1341 			m_tmp = m_set;
1342 			m_set = &seg->first_page[atop(pa_half - seg->start)];
1343 		}
1344 		vm_freelist_add(fl, m_tmp, order, 0);
1345 	}
1346 	KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
1347 	return (TRUE);
1348 }
1349 
1350 /*
1351  * Allocate a run of contiguous physical pages from the specified free list
1352  * table.
1353  */
1354 static vm_page_t
1355 vm_phys_alloc_queues_contig(
1356     struct vm_freelist (*queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX],
1357     u_long npages, vm_paddr_t low, vm_paddr_t high,
1358     u_long alignment, vm_paddr_t boundary)
1359 {
1360 	struct vm_phys_seg *seg;
1361 	struct vm_freelist *fl;
1362 	vm_paddr_t pa, pa_end, size;
1363 	vm_page_t m, m_ret;
1364 	u_long npages_end;
1365 	int oind, order, pind;
1366 
1367 	KASSERT(npages > 0, ("npages is 0"));
1368 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1369 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1370 	/* Compute the queue that is the best fit for npages. */
1371 	order = flsl(npages - 1);
1372 	/* Search for a run satisfying the specified conditions. */
1373 	size = npages << PAGE_SHIFT;
1374 	for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER;
1375 	    oind++) {
1376 		for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1377 			fl = (*queues)[pind];
1378 			TAILQ_FOREACH(m_ret, &fl[oind].pl, listq) {
1379 				/*
1380 				 * Determine if the address range starting at pa
1381 				 * is within the given range, satisfies the
1382 				 * given alignment, and does not cross the given
1383 				 * boundary.
1384 				 */
1385 				pa = VM_PAGE_TO_PHYS(m_ret);
1386 				pa_end = pa + size;
1387 				if (pa < low || pa_end > high ||
1388 				    !vm_addr_ok(pa, size, alignment, boundary))
1389 					continue;
1390 
1391 				/*
1392 				 * Is the size of this allocation request
1393 				 * no more than the largest block size?
1394 				 */
1395 				if (order < VM_NFREEORDER)
1396 					goto done;
1397 
1398 				/*
1399 				 * Determine if the address range is valid
1400 				 * (without overflow in pa_end calculation)
1401 				 * and fits within the segment.
1402 				 */
1403 				seg = &vm_phys_segs[m_ret->segind];
1404 				if (pa_end < pa || seg->end < pa_end)
1405 					continue;
1406 
1407 				/*
1408 				 * Determine if a series of free oind-blocks
1409 				 * starting here can satisfy the allocation
1410 				 * request.
1411 				 */
1412 				do {
1413 					pa += 1 <<
1414 					    (PAGE_SHIFT + VM_NFREEORDER - 1);
1415 					if (pa >= pa_end)
1416 						goto done;
1417 				} while (VM_NFREEORDER - 1 == seg->first_page[
1418 				    atop(pa - seg->start)].order);
1419 
1420 				/*
1421 				 * Determine if an additional series of free
1422 				 * blocks of diminishing size can help to
1423 				 * satisfy the allocation request.
1424 				 */
1425 				for (;;) {
1426 					m = &seg->first_page[
1427 					    atop(pa - seg->start)];
1428 					if (m->order == VM_NFREEORDER ||
1429 					    pa + (2 << (PAGE_SHIFT + m->order))
1430 					    <= pa_end)
1431 						break;
1432 					pa += 1 << (PAGE_SHIFT + m->order);
1433 					if (pa >= pa_end)
1434 						goto done;
1435 				}
1436 			}
1437 		}
1438 	}
1439 	return (NULL);
1440 done:
1441 	for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) {
1442 		fl = (*queues)[m->pool];
1443 		oind = m->order;
1444 		vm_freelist_rem(fl, m, oind);
1445 		if (m->pool != VM_FREEPOOL_DEFAULT)
1446 			vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind);
1447 	}
1448 	/* Return excess pages to the free lists. */
1449 	npages_end = roundup2(npages, 1 << oind);
1450 	if (npages < npages_end) {
1451 		fl = (*queues)[VM_FREEPOOL_DEFAULT];
1452 		vm_phys_enq_range(&m_ret[npages], npages_end - npages, fl, 0);
1453 	}
1454 	return (m_ret);
1455 }
1456 
1457 /*
1458  * Allocate a contiguous set of physical pages of the given size
1459  * "npages" from the free lists.  All of the physical pages must be at
1460  * or above the given physical address "low" and below the given
1461  * physical address "high".  The given value "alignment" determines the
1462  * alignment of the first physical page in the set.  If the given value
1463  * "boundary" is non-zero, then the set of physical pages cannot cross
1464  * any physical address boundary that is a multiple of that value.  Both
1465  * "alignment" and "boundary" must be a power of two.
1466  */
1467 vm_page_t
1468 vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
1469     u_long alignment, vm_paddr_t boundary)
1470 {
1471 	vm_paddr_t pa_end, pa_start;
1472 	vm_page_t m_run;
1473 	struct vm_phys_seg *seg;
1474 	struct vm_freelist (*queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX];
1475 	int segind;
1476 
1477 	KASSERT(npages > 0, ("npages is 0"));
1478 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1479 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1480 	vm_domain_free_assert_locked(VM_DOMAIN(domain));
1481 	if (low >= high)
1482 		return (NULL);
1483 	queues = NULL;
1484 	m_run = NULL;
1485 	for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
1486 		seg = &vm_phys_segs[segind];
1487 		if (seg->start >= high || seg->domain != domain)
1488 			continue;
1489 		if (low >= seg->end)
1490 			break;
1491 		if (low <= seg->start)
1492 			pa_start = seg->start;
1493 		else
1494 			pa_start = low;
1495 		if (high < seg->end)
1496 			pa_end = high;
1497 		else
1498 			pa_end = seg->end;
1499 		if (pa_end - pa_start < ptoa(npages))
1500 			continue;
1501 		/*
1502 		 * If a previous segment led to a search using
1503 		 * the same free lists as would this segment, then
1504 		 * we've actually already searched within this
1505 		 * too.  So skip it.
1506 		 */
1507 		if (seg->free_queues == queues)
1508 			continue;
1509 		queues = seg->free_queues;
1510 		m_run = vm_phys_alloc_queues_contig(queues, npages,
1511 		    low, high, alignment, boundary);
1512 		if (m_run != NULL)
1513 			break;
1514 	}
1515 	return (m_run);
1516 }
1517 
1518 /*
1519  * Return the index of the first unused slot which may be the terminating
1520  * entry.
1521  */
1522 static int
1523 vm_phys_avail_count(void)
1524 {
1525 	int i;
1526 
1527 	for (i = 0; phys_avail[i + 1]; i += 2)
1528 		continue;
1529 	if (i > PHYS_AVAIL_ENTRIES)
1530 		panic("Improperly terminated phys_avail %d entries", i);
1531 
1532 	return (i);
1533 }
1534 
1535 /*
1536  * Assert that a phys_avail entry is valid.
1537  */
1538 static void
1539 vm_phys_avail_check(int i)
1540 {
1541 	if (phys_avail[i] & PAGE_MASK)
1542 		panic("Unaligned phys_avail[%d]: %#jx", i,
1543 		    (intmax_t)phys_avail[i]);
1544 	if (phys_avail[i+1] & PAGE_MASK)
1545 		panic("Unaligned phys_avail[%d + 1]: %#jx", i,
1546 		    (intmax_t)phys_avail[i]);
1547 	if (phys_avail[i + 1] < phys_avail[i])
1548 		panic("phys_avail[%d] start %#jx < end %#jx", i,
1549 		    (intmax_t)phys_avail[i], (intmax_t)phys_avail[i+1]);
1550 }
1551 
1552 /*
1553  * Return the index of an overlapping phys_avail entry or -1.
1554  */
1555 #ifdef NUMA
1556 static int
1557 vm_phys_avail_find(vm_paddr_t pa)
1558 {
1559 	int i;
1560 
1561 	for (i = 0; phys_avail[i + 1]; i += 2)
1562 		if (phys_avail[i] <= pa && phys_avail[i + 1] > pa)
1563 			return (i);
1564 	return (-1);
1565 }
1566 #endif
1567 
1568 /*
1569  * Return the index of the largest entry.
1570  */
1571 int
1572 vm_phys_avail_largest(void)
1573 {
1574 	vm_paddr_t sz, largesz;
1575 	int largest;
1576 	int i;
1577 
1578 	largest = 0;
1579 	largesz = 0;
1580 	for (i = 0; phys_avail[i + 1]; i += 2) {
1581 		sz = vm_phys_avail_size(i);
1582 		if (sz > largesz) {
1583 			largesz = sz;
1584 			largest = i;
1585 		}
1586 	}
1587 
1588 	return (largest);
1589 }
1590 
1591 vm_paddr_t
1592 vm_phys_avail_size(int i)
1593 {
1594 
1595 	return (phys_avail[i + 1] - phys_avail[i]);
1596 }
1597 
1598 /*
1599  * Split an entry at the address 'pa'.  Return zero on success or errno.
1600  */
1601 static int
1602 vm_phys_avail_split(vm_paddr_t pa, int i)
1603 {
1604 	int cnt;
1605 
1606 	vm_phys_avail_check(i);
1607 	if (pa <= phys_avail[i] || pa >= phys_avail[i + 1])
1608 		panic("vm_phys_avail_split: invalid address");
1609 	cnt = vm_phys_avail_count();
1610 	if (cnt >= PHYS_AVAIL_ENTRIES)
1611 		return (ENOSPC);
1612 	memmove(&phys_avail[i + 2], &phys_avail[i],
1613 	    (cnt - i) * sizeof(phys_avail[0]));
1614 	phys_avail[i + 1] = pa;
1615 	phys_avail[i + 2] = pa;
1616 	vm_phys_avail_check(i);
1617 	vm_phys_avail_check(i+2);
1618 
1619 	return (0);
1620 }
1621 
1622 /*
1623  * Check if a given physical address can be included as part of a crash dump.
1624  */
1625 bool
1626 vm_phys_is_dumpable(vm_paddr_t pa)
1627 {
1628 	vm_page_t m;
1629 	int i;
1630 
1631 	if ((m = vm_phys_paddr_to_vm_page(pa)) != NULL)
1632 		return ((m->flags & PG_NODUMP) == 0);
1633 
1634 	for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
1635 		if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
1636 			return (true);
1637 	}
1638 	return (false);
1639 }
1640 
1641 void
1642 vm_phys_early_add_seg(vm_paddr_t start, vm_paddr_t end)
1643 {
1644 	struct vm_phys_seg *seg;
1645 
1646 	if (vm_phys_early_nsegs == -1)
1647 		panic("%s: called after initialization", __func__);
1648 	if (vm_phys_early_nsegs == nitems(vm_phys_early_segs))
1649 		panic("%s: ran out of early segments", __func__);
1650 
1651 	seg = &vm_phys_early_segs[vm_phys_early_nsegs++];
1652 	seg->start = start;
1653 	seg->end = end;
1654 }
1655 
1656 /*
1657  * This routine allocates NUMA node specific memory before the page
1658  * allocator is bootstrapped.
1659  */
1660 vm_paddr_t
1661 vm_phys_early_alloc(int domain, size_t alloc_size)
1662 {
1663 	int i, mem_index, biggestone;
1664 	vm_paddr_t pa, mem_start, mem_end, size, biggestsize, align;
1665 
1666 	KASSERT(domain == -1 || (domain >= 0 && domain < vm_ndomains),
1667 	    ("%s: invalid domain index %d", __func__, domain));
1668 
1669 	/*
1670 	 * Search the mem_affinity array for the biggest address
1671 	 * range in the desired domain.  This is used to constrain
1672 	 * the phys_avail selection below.
1673 	 */
1674 	biggestsize = 0;
1675 	mem_index = 0;
1676 	mem_start = 0;
1677 	mem_end = -1;
1678 #ifdef NUMA
1679 	if (mem_affinity != NULL) {
1680 		for (i = 0;; i++) {
1681 			size = mem_affinity[i].end - mem_affinity[i].start;
1682 			if (size == 0)
1683 				break;
1684 			if (domain != -1 && mem_affinity[i].domain != domain)
1685 				continue;
1686 			if (size > biggestsize) {
1687 				mem_index = i;
1688 				biggestsize = size;
1689 			}
1690 		}
1691 		mem_start = mem_affinity[mem_index].start;
1692 		mem_end = mem_affinity[mem_index].end;
1693 	}
1694 #endif
1695 
1696 	/*
1697 	 * Now find biggest physical segment in within the desired
1698 	 * numa domain.
1699 	 */
1700 	biggestsize = 0;
1701 	biggestone = 0;
1702 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1703 		/* skip regions that are out of range */
1704 		if (phys_avail[i+1] - alloc_size < mem_start ||
1705 		    phys_avail[i+1] > mem_end)
1706 			continue;
1707 		size = vm_phys_avail_size(i);
1708 		if (size > biggestsize) {
1709 			biggestone = i;
1710 			biggestsize = size;
1711 		}
1712 	}
1713 	alloc_size = round_page(alloc_size);
1714 
1715 	/*
1716 	 * Grab single pages from the front to reduce fragmentation.
1717 	 */
1718 	if (alloc_size == PAGE_SIZE) {
1719 		pa = phys_avail[biggestone];
1720 		phys_avail[biggestone] += PAGE_SIZE;
1721 		vm_phys_avail_check(biggestone);
1722 		return (pa);
1723 	}
1724 
1725 	/*
1726 	 * Naturally align large allocations.
1727 	 */
1728 	align = phys_avail[biggestone + 1] & (alloc_size - 1);
1729 	if (alloc_size + align > biggestsize)
1730 		panic("cannot find a large enough size\n");
1731 	if (align != 0 &&
1732 	    vm_phys_avail_split(phys_avail[biggestone + 1] - align,
1733 	    biggestone) != 0)
1734 		/* Wasting memory. */
1735 		phys_avail[biggestone + 1] -= align;
1736 
1737 	phys_avail[biggestone + 1] -= alloc_size;
1738 	vm_phys_avail_check(biggestone);
1739 	pa = phys_avail[biggestone + 1];
1740 	return (pa);
1741 }
1742 
1743 void
1744 vm_phys_early_startup(void)
1745 {
1746 	struct vm_phys_seg *seg;
1747 	int i;
1748 
1749 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1750 		phys_avail[i] = round_page(phys_avail[i]);
1751 		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
1752 	}
1753 
1754 	for (i = 0; i < vm_phys_early_nsegs; i++) {
1755 		seg = &vm_phys_early_segs[i];
1756 		vm_phys_add_seg(seg->start, seg->end);
1757 	}
1758 	vm_phys_early_nsegs = -1;
1759 
1760 #ifdef NUMA
1761 	/* Force phys_avail to be split by domain. */
1762 	if (mem_affinity != NULL) {
1763 		int idx;
1764 
1765 		for (i = 0; mem_affinity[i].end != 0; i++) {
1766 			idx = vm_phys_avail_find(mem_affinity[i].start);
1767 			if (idx != -1 &&
1768 			    phys_avail[idx] != mem_affinity[i].start)
1769 				vm_phys_avail_split(mem_affinity[i].start, idx);
1770 			idx = vm_phys_avail_find(mem_affinity[i].end);
1771 			if (idx != -1 &&
1772 			    phys_avail[idx] != mem_affinity[i].end)
1773 				vm_phys_avail_split(mem_affinity[i].end, idx);
1774 		}
1775 	}
1776 #endif
1777 }
1778 
1779 #ifdef DDB
1780 /*
1781  * Show the number of physical pages in each of the free lists.
1782  */
1783 DB_SHOW_COMMAND(freepages, db_show_freepages)
1784 {
1785 	struct vm_freelist *fl;
1786 	int flind, oind, pind, dom;
1787 
1788 	for (dom = 0; dom < vm_ndomains; dom++) {
1789 		db_printf("DOMAIN: %d\n", dom);
1790 		for (flind = 0; flind < vm_nfreelists; flind++) {
1791 			db_printf("FREE LIST %d:\n"
1792 			    "\n  ORDER (SIZE)  |  NUMBER"
1793 			    "\n              ", flind);
1794 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
1795 				db_printf("  |  POOL %d", pind);
1796 			db_printf("\n--            ");
1797 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
1798 				db_printf("-- --      ");
1799 			db_printf("--\n");
1800 			for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
1801 				db_printf("  %2.2d (%6.6dK)", oind,
1802 				    1 << (PAGE_SHIFT - 10 + oind));
1803 				for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1804 				fl = vm_phys_free_queues[dom][flind][pind];
1805 					db_printf("  |  %6.6d", fl[oind].lcnt);
1806 				}
1807 				db_printf("\n");
1808 			}
1809 			db_printf("\n");
1810 		}
1811 		db_printf("\n");
1812 	}
1813 }
1814 #endif
1815