xref: /freebsd/sys/vm/vm_phys.c (revision 137a344c6341d1469432e9deb3a25593f96672ad)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2006 Rice University
5  * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
6  * All rights reserved.
7  *
8  * This software was developed for the FreeBSD Project by Alan L. Cox,
9  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
24  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
30  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  *	Physical memory system implementation
36  *
37  * Any external functions defined by this module are only to be used by the
38  * virtual memory system.
39  */
40 
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43 
44 #include "opt_ddb.h"
45 #include "opt_vm.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/lock.h>
50 #include <sys/kernel.h>
51 #include <sys/malloc.h>
52 #include <sys/mutex.h>
53 #include <sys/proc.h>
54 #include <sys/queue.h>
55 #include <sys/rwlock.h>
56 #include <sys/sbuf.h>
57 #include <sys/sysctl.h>
58 #include <sys/tree.h>
59 #include <sys/vmmeter.h>
60 #include <sys/seq.h>
61 
62 #include <ddb/ddb.h>
63 
64 #include <vm/vm.h>
65 #include <vm/vm_param.h>
66 #include <vm/vm_kern.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_page.h>
69 #include <vm/vm_phys.h>
70 
71 _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX,
72     "Too many physsegs.");
73 
74 #ifdef NUMA
75 struct mem_affinity *mem_affinity;
76 int *mem_locality;
77 #endif
78 
79 int vm_ndomains = 1;
80 
81 struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX];
82 int vm_phys_nsegs;
83 
84 struct vm_phys_fictitious_seg;
85 static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *,
86     struct vm_phys_fictitious_seg *);
87 
88 RB_HEAD(fict_tree, vm_phys_fictitious_seg) vm_phys_fictitious_tree =
89     RB_INITIALIZER(_vm_phys_fictitious_tree);
90 
91 struct vm_phys_fictitious_seg {
92 	RB_ENTRY(vm_phys_fictitious_seg) node;
93 	/* Memory region data */
94 	vm_paddr_t	start;
95 	vm_paddr_t	end;
96 	vm_page_t	first_page;
97 };
98 
99 RB_GENERATE_STATIC(fict_tree, vm_phys_fictitious_seg, node,
100     vm_phys_fictitious_cmp);
101 
102 static struct rwlock vm_phys_fictitious_reg_lock;
103 MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages");
104 
105 static struct vm_freelist
106     vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER];
107 
108 static int vm_nfreelists;
109 
110 /*
111  * Provides the mapping from VM_FREELIST_* to free list indices (flind).
112  */
113 static int vm_freelist_to_flind[VM_NFREELIST];
114 
115 CTASSERT(VM_FREELIST_DEFAULT == 0);
116 
117 #ifdef VM_FREELIST_ISADMA
118 #define	VM_ISADMA_BOUNDARY	16777216
119 #endif
120 #ifdef VM_FREELIST_DMA32
121 #define	VM_DMA32_BOUNDARY	((vm_paddr_t)1 << 32)
122 #endif
123 
124 /*
125  * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about
126  * the ordering of the free list boundaries.
127  */
128 #if defined(VM_ISADMA_BOUNDARY) && defined(VM_LOWMEM_BOUNDARY)
129 CTASSERT(VM_ISADMA_BOUNDARY < VM_LOWMEM_BOUNDARY);
130 #endif
131 #if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY)
132 CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY);
133 #endif
134 
135 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
136 SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD,
137     NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info");
138 
139 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
140 SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD,
141     NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info");
142 
143 #ifdef NUMA
144 static int sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS);
145 SYSCTL_OID(_vm, OID_AUTO, phys_locality, CTLTYPE_STRING | CTLFLAG_RD,
146     NULL, 0, sysctl_vm_phys_locality, "A", "Phys Locality Info");
147 #endif
148 
149 SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD,
150     &vm_ndomains, 0, "Number of physical memory domains available.");
151 
152 static vm_page_t vm_phys_alloc_seg_contig(struct vm_phys_seg *seg,
153     u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
154     vm_paddr_t boundary);
155 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain);
156 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end);
157 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
158     int order);
159 
160 /*
161  * Red-black tree helpers for vm fictitious range management.
162  */
163 static inline int
164 vm_phys_fictitious_in_range(struct vm_phys_fictitious_seg *p,
165     struct vm_phys_fictitious_seg *range)
166 {
167 
168 	KASSERT(range->start != 0 && range->end != 0,
169 	    ("Invalid range passed on search for vm_fictitious page"));
170 	if (p->start >= range->end)
171 		return (1);
172 	if (p->start < range->start)
173 		return (-1);
174 
175 	return (0);
176 }
177 
178 static int
179 vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *p1,
180     struct vm_phys_fictitious_seg *p2)
181 {
182 
183 	/* Check if this is a search for a page */
184 	if (p1->end == 0)
185 		return (vm_phys_fictitious_in_range(p1, p2));
186 
187 	KASSERT(p2->end != 0,
188     ("Invalid range passed as second parameter to vm fictitious comparison"));
189 
190 	/* Searching to add a new range */
191 	if (p1->end <= p2->start)
192 		return (-1);
193 	if (p1->start >= p2->end)
194 		return (1);
195 
196 	panic("Trying to add overlapping vm fictitious ranges:\n"
197 	    "[%#jx:%#jx] and [%#jx:%#jx]", (uintmax_t)p1->start,
198 	    (uintmax_t)p1->end, (uintmax_t)p2->start, (uintmax_t)p2->end);
199 }
200 
201 int
202 vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high)
203 {
204 #ifdef NUMA
205 	domainset_t mask;
206 	int i;
207 
208 	if (vm_ndomains == 1 || mem_affinity == NULL)
209 		return (0);
210 
211 	DOMAINSET_ZERO(&mask);
212 	/*
213 	 * Check for any memory that overlaps low, high.
214 	 */
215 	for (i = 0; mem_affinity[i].end != 0; i++)
216 		if (mem_affinity[i].start <= high &&
217 		    mem_affinity[i].end >= low)
218 			DOMAINSET_SET(mem_affinity[i].domain, &mask);
219 	if (prefer != -1 && DOMAINSET_ISSET(prefer, &mask))
220 		return (prefer);
221 	if (DOMAINSET_EMPTY(&mask))
222 		panic("vm_phys_domain_match:  Impossible constraint");
223 	return (DOMAINSET_FFS(&mask) - 1);
224 #else
225 	return (0);
226 #endif
227 }
228 
229 /*
230  * Outputs the state of the physical memory allocator, specifically,
231  * the amount of physical memory in each free list.
232  */
233 static int
234 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
235 {
236 	struct sbuf sbuf;
237 	struct vm_freelist *fl;
238 	int dom, error, flind, oind, pind;
239 
240 	error = sysctl_wire_old_buffer(req, 0);
241 	if (error != 0)
242 		return (error);
243 	sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req);
244 	for (dom = 0; dom < vm_ndomains; dom++) {
245 		sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom);
246 		for (flind = 0; flind < vm_nfreelists; flind++) {
247 			sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
248 			    "\n  ORDER (SIZE)  |  NUMBER"
249 			    "\n              ", flind);
250 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
251 				sbuf_printf(&sbuf, "  |  POOL %d", pind);
252 			sbuf_printf(&sbuf, "\n--            ");
253 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
254 				sbuf_printf(&sbuf, "-- --      ");
255 			sbuf_printf(&sbuf, "--\n");
256 			for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
257 				sbuf_printf(&sbuf, "  %2d (%6dK)", oind,
258 				    1 << (PAGE_SHIFT - 10 + oind));
259 				for (pind = 0; pind < VM_NFREEPOOL; pind++) {
260 				fl = vm_phys_free_queues[dom][flind][pind];
261 					sbuf_printf(&sbuf, "  |  %6d",
262 					    fl[oind].lcnt);
263 				}
264 				sbuf_printf(&sbuf, "\n");
265 			}
266 		}
267 	}
268 	error = sbuf_finish(&sbuf);
269 	sbuf_delete(&sbuf);
270 	return (error);
271 }
272 
273 /*
274  * Outputs the set of physical memory segments.
275  */
276 static int
277 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
278 {
279 	struct sbuf sbuf;
280 	struct vm_phys_seg *seg;
281 	int error, segind;
282 
283 	error = sysctl_wire_old_buffer(req, 0);
284 	if (error != 0)
285 		return (error);
286 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
287 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
288 		sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
289 		seg = &vm_phys_segs[segind];
290 		sbuf_printf(&sbuf, "start:     %#jx\n",
291 		    (uintmax_t)seg->start);
292 		sbuf_printf(&sbuf, "end:       %#jx\n",
293 		    (uintmax_t)seg->end);
294 		sbuf_printf(&sbuf, "domain:    %d\n", seg->domain);
295 		sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
296 	}
297 	error = sbuf_finish(&sbuf);
298 	sbuf_delete(&sbuf);
299 	return (error);
300 }
301 
302 /*
303  * Return affinity, or -1 if there's no affinity information.
304  */
305 int
306 vm_phys_mem_affinity(int f, int t)
307 {
308 
309 #ifdef NUMA
310 	if (mem_locality == NULL)
311 		return (-1);
312 	if (f >= vm_ndomains || t >= vm_ndomains)
313 		return (-1);
314 	return (mem_locality[f * vm_ndomains + t]);
315 #else
316 	return (-1);
317 #endif
318 }
319 
320 #ifdef NUMA
321 /*
322  * Outputs the VM locality table.
323  */
324 static int
325 sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS)
326 {
327 	struct sbuf sbuf;
328 	int error, i, j;
329 
330 	error = sysctl_wire_old_buffer(req, 0);
331 	if (error != 0)
332 		return (error);
333 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
334 
335 	sbuf_printf(&sbuf, "\n");
336 
337 	for (i = 0; i < vm_ndomains; i++) {
338 		sbuf_printf(&sbuf, "%d: ", i);
339 		for (j = 0; j < vm_ndomains; j++) {
340 			sbuf_printf(&sbuf, "%d ", vm_phys_mem_affinity(i, j));
341 		}
342 		sbuf_printf(&sbuf, "\n");
343 	}
344 	error = sbuf_finish(&sbuf);
345 	sbuf_delete(&sbuf);
346 	return (error);
347 }
348 #endif
349 
350 static void
351 vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail)
352 {
353 
354 	m->order = order;
355 	if (tail)
356 		TAILQ_INSERT_TAIL(&fl[order].pl, m, plinks.q);
357 	else
358 		TAILQ_INSERT_HEAD(&fl[order].pl, m, plinks.q);
359 	fl[order].lcnt++;
360 }
361 
362 static void
363 vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order)
364 {
365 
366 	TAILQ_REMOVE(&fl[order].pl, m, plinks.q);
367 	fl[order].lcnt--;
368 	m->order = VM_NFREEORDER;
369 }
370 
371 /*
372  * Create a physical memory segment.
373  */
374 static void
375 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain)
376 {
377 	struct vm_phys_seg *seg;
378 
379 	KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
380 	    ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
381 	KASSERT(domain >= 0 && domain < vm_ndomains,
382 	    ("vm_phys_create_seg: invalid domain provided"));
383 	seg = &vm_phys_segs[vm_phys_nsegs++];
384 	while (seg > vm_phys_segs && (seg - 1)->start >= end) {
385 		*seg = *(seg - 1);
386 		seg--;
387 	}
388 	seg->start = start;
389 	seg->end = end;
390 	seg->domain = domain;
391 }
392 
393 static void
394 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end)
395 {
396 #ifdef NUMA
397 	int i;
398 
399 	if (mem_affinity == NULL) {
400 		_vm_phys_create_seg(start, end, 0);
401 		return;
402 	}
403 
404 	for (i = 0;; i++) {
405 		if (mem_affinity[i].end == 0)
406 			panic("Reached end of affinity info");
407 		if (mem_affinity[i].end <= start)
408 			continue;
409 		if (mem_affinity[i].start > start)
410 			panic("No affinity info for start %jx",
411 			    (uintmax_t)start);
412 		if (mem_affinity[i].end >= end) {
413 			_vm_phys_create_seg(start, end,
414 			    mem_affinity[i].domain);
415 			break;
416 		}
417 		_vm_phys_create_seg(start, mem_affinity[i].end,
418 		    mem_affinity[i].domain);
419 		start = mem_affinity[i].end;
420 	}
421 #else
422 	_vm_phys_create_seg(start, end, 0);
423 #endif
424 }
425 
426 /*
427  * Add a physical memory segment.
428  */
429 void
430 vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end)
431 {
432 	vm_paddr_t paddr;
433 
434 	KASSERT((start & PAGE_MASK) == 0,
435 	    ("vm_phys_define_seg: start is not page aligned"));
436 	KASSERT((end & PAGE_MASK) == 0,
437 	    ("vm_phys_define_seg: end is not page aligned"));
438 
439 	/*
440 	 * Split the physical memory segment if it spans two or more free
441 	 * list boundaries.
442 	 */
443 	paddr = start;
444 #ifdef	VM_FREELIST_ISADMA
445 	if (paddr < VM_ISADMA_BOUNDARY && end > VM_ISADMA_BOUNDARY) {
446 		vm_phys_create_seg(paddr, VM_ISADMA_BOUNDARY);
447 		paddr = VM_ISADMA_BOUNDARY;
448 	}
449 #endif
450 #ifdef	VM_FREELIST_LOWMEM
451 	if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) {
452 		vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY);
453 		paddr = VM_LOWMEM_BOUNDARY;
454 	}
455 #endif
456 #ifdef	VM_FREELIST_DMA32
457 	if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) {
458 		vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY);
459 		paddr = VM_DMA32_BOUNDARY;
460 	}
461 #endif
462 	vm_phys_create_seg(paddr, end);
463 }
464 
465 /*
466  * Initialize the physical memory allocator.
467  *
468  * Requires that vm_page_array is initialized!
469  */
470 void
471 vm_phys_init(void)
472 {
473 	struct vm_freelist *fl;
474 	struct vm_phys_seg *seg;
475 	u_long npages;
476 	int dom, flind, freelist, oind, pind, segind;
477 
478 	/*
479 	 * Compute the number of free lists, and generate the mapping from the
480 	 * manifest constants VM_FREELIST_* to the free list indices.
481 	 *
482 	 * Initially, the entries of vm_freelist_to_flind[] are set to either
483 	 * 0 or 1 to indicate which free lists should be created.
484 	 */
485 	npages = 0;
486 	for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
487 		seg = &vm_phys_segs[segind];
488 #ifdef	VM_FREELIST_ISADMA
489 		if (seg->end <= VM_ISADMA_BOUNDARY)
490 			vm_freelist_to_flind[VM_FREELIST_ISADMA] = 1;
491 		else
492 #endif
493 #ifdef	VM_FREELIST_LOWMEM
494 		if (seg->end <= VM_LOWMEM_BOUNDARY)
495 			vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1;
496 		else
497 #endif
498 #ifdef	VM_FREELIST_DMA32
499 		if (
500 #ifdef	VM_DMA32_NPAGES_THRESHOLD
501 		    /*
502 		     * Create the DMA32 free list only if the amount of
503 		     * physical memory above physical address 4G exceeds the
504 		     * given threshold.
505 		     */
506 		    npages > VM_DMA32_NPAGES_THRESHOLD &&
507 #endif
508 		    seg->end <= VM_DMA32_BOUNDARY)
509 			vm_freelist_to_flind[VM_FREELIST_DMA32] = 1;
510 		else
511 #endif
512 		{
513 			npages += atop(seg->end - seg->start);
514 			vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1;
515 		}
516 	}
517 	/* Change each entry into a running total of the free lists. */
518 	for (freelist = 1; freelist < VM_NFREELIST; freelist++) {
519 		vm_freelist_to_flind[freelist] +=
520 		    vm_freelist_to_flind[freelist - 1];
521 	}
522 	vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1];
523 	KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists"));
524 	/* Change each entry into a free list index. */
525 	for (freelist = 0; freelist < VM_NFREELIST; freelist++)
526 		vm_freelist_to_flind[freelist]--;
527 
528 	/*
529 	 * Initialize the first_page and free_queues fields of each physical
530 	 * memory segment.
531 	 */
532 #ifdef VM_PHYSSEG_SPARSE
533 	npages = 0;
534 #endif
535 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
536 		seg = &vm_phys_segs[segind];
537 #ifdef VM_PHYSSEG_SPARSE
538 		seg->first_page = &vm_page_array[npages];
539 		npages += atop(seg->end - seg->start);
540 #else
541 		seg->first_page = PHYS_TO_VM_PAGE(seg->start);
542 #endif
543 #ifdef	VM_FREELIST_ISADMA
544 		if (seg->end <= VM_ISADMA_BOUNDARY) {
545 			flind = vm_freelist_to_flind[VM_FREELIST_ISADMA];
546 			KASSERT(flind >= 0,
547 			    ("vm_phys_init: ISADMA flind < 0"));
548 		} else
549 #endif
550 #ifdef	VM_FREELIST_LOWMEM
551 		if (seg->end <= VM_LOWMEM_BOUNDARY) {
552 			flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM];
553 			KASSERT(flind >= 0,
554 			    ("vm_phys_init: LOWMEM flind < 0"));
555 		} else
556 #endif
557 #ifdef	VM_FREELIST_DMA32
558 		if (seg->end <= VM_DMA32_BOUNDARY) {
559 			flind = vm_freelist_to_flind[VM_FREELIST_DMA32];
560 			KASSERT(flind >= 0,
561 			    ("vm_phys_init: DMA32 flind < 0"));
562 		} else
563 #endif
564 		{
565 			flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT];
566 			KASSERT(flind >= 0,
567 			    ("vm_phys_init: DEFAULT flind < 0"));
568 		}
569 		seg->free_queues = &vm_phys_free_queues[seg->domain][flind];
570 	}
571 
572 	/*
573 	 * Initialize the free queues.
574 	 */
575 	for (dom = 0; dom < vm_ndomains; dom++) {
576 		for (flind = 0; flind < vm_nfreelists; flind++) {
577 			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
578 				fl = vm_phys_free_queues[dom][flind][pind];
579 				for (oind = 0; oind < VM_NFREEORDER; oind++)
580 					TAILQ_INIT(&fl[oind].pl);
581 			}
582 		}
583 	}
584 
585 	rw_init(&vm_phys_fictitious_reg_lock, "vmfctr");
586 }
587 
588 /*
589  * Split a contiguous, power of two-sized set of physical pages.
590  */
591 static __inline void
592 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order)
593 {
594 	vm_page_t m_buddy;
595 
596 	while (oind > order) {
597 		oind--;
598 		m_buddy = &m[1 << oind];
599 		KASSERT(m_buddy->order == VM_NFREEORDER,
600 		    ("vm_phys_split_pages: page %p has unexpected order %d",
601 		    m_buddy, m_buddy->order));
602 		vm_freelist_add(fl, m_buddy, oind, 0);
603         }
604 }
605 
606 /*
607  * Allocate a contiguous, power of two-sized set of physical pages
608  * from the free lists.
609  *
610  * The free page queues must be locked.
611  */
612 vm_page_t
613 vm_phys_alloc_pages(int domain, int pool, int order)
614 {
615 	vm_page_t m;
616 	int freelist;
617 
618 	for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
619 		m = vm_phys_alloc_freelist_pages(domain, freelist, pool, order);
620 		if (m != NULL)
621 			return (m);
622 	}
623 	return (NULL);
624 }
625 
626 /*
627  * Allocate a contiguous, power of two-sized set of physical pages from the
628  * specified free list.  The free list must be specified using one of the
629  * manifest constants VM_FREELIST_*.
630  *
631  * The free page queues must be locked.
632  */
633 vm_page_t
634 vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order)
635 {
636 	struct vm_freelist *alt, *fl;
637 	vm_page_t m;
638 	int oind, pind, flind;
639 
640 	KASSERT(domain >= 0 && domain < vm_ndomains,
641 	    ("vm_phys_alloc_freelist_pages: domain %d is out of range",
642 	    domain));
643 	KASSERT(freelist < VM_NFREELIST,
644 	    ("vm_phys_alloc_freelist_pages: freelist %d is out of range",
645 	    freelist));
646 	KASSERT(pool < VM_NFREEPOOL,
647 	    ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
648 	KASSERT(order < VM_NFREEORDER,
649 	    ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
650 
651 	flind = vm_freelist_to_flind[freelist];
652 	/* Check if freelist is present */
653 	if (flind < 0)
654 		return (NULL);
655 
656 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
657 	fl = &vm_phys_free_queues[domain][flind][pool][0];
658 	for (oind = order; oind < VM_NFREEORDER; oind++) {
659 		m = TAILQ_FIRST(&fl[oind].pl);
660 		if (m != NULL) {
661 			vm_freelist_rem(fl, m, oind);
662 			vm_phys_split_pages(m, oind, fl, order);
663 			return (m);
664 		}
665 	}
666 
667 	/*
668 	 * The given pool was empty.  Find the largest
669 	 * contiguous, power-of-two-sized set of pages in any
670 	 * pool.  Transfer these pages to the given pool, and
671 	 * use them to satisfy the allocation.
672 	 */
673 	for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
674 		for (pind = 0; pind < VM_NFREEPOOL; pind++) {
675 			alt = &vm_phys_free_queues[domain][flind][pind][0];
676 			m = TAILQ_FIRST(&alt[oind].pl);
677 			if (m != NULL) {
678 				vm_freelist_rem(alt, m, oind);
679 				vm_phys_set_pool(pool, m, oind);
680 				vm_phys_split_pages(m, oind, fl, order);
681 				return (m);
682 			}
683 		}
684 	}
685 	return (NULL);
686 }
687 
688 /*
689  * Find the vm_page corresponding to the given physical address.
690  */
691 vm_page_t
692 vm_phys_paddr_to_vm_page(vm_paddr_t pa)
693 {
694 	struct vm_phys_seg *seg;
695 	int segind;
696 
697 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
698 		seg = &vm_phys_segs[segind];
699 		if (pa >= seg->start && pa < seg->end)
700 			return (&seg->first_page[atop(pa - seg->start)]);
701 	}
702 	return (NULL);
703 }
704 
705 vm_page_t
706 vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
707 {
708 	struct vm_phys_fictitious_seg tmp, *seg;
709 	vm_page_t m;
710 
711 	m = NULL;
712 	tmp.start = pa;
713 	tmp.end = 0;
714 
715 	rw_rlock(&vm_phys_fictitious_reg_lock);
716 	seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
717 	rw_runlock(&vm_phys_fictitious_reg_lock);
718 	if (seg == NULL)
719 		return (NULL);
720 
721 	m = &seg->first_page[atop(pa - seg->start)];
722 	KASSERT((m->flags & PG_FICTITIOUS) != 0, ("%p not fictitious", m));
723 
724 	return (m);
725 }
726 
727 static inline void
728 vm_phys_fictitious_init_range(vm_page_t range, vm_paddr_t start,
729     long page_count, vm_memattr_t memattr)
730 {
731 	long i;
732 
733 	bzero(range, page_count * sizeof(*range));
734 	for (i = 0; i < page_count; i++) {
735 		vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr);
736 		range[i].oflags &= ~VPO_UNMANAGED;
737 		range[i].busy_lock = VPB_UNBUSIED;
738 	}
739 }
740 
741 int
742 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
743     vm_memattr_t memattr)
744 {
745 	struct vm_phys_fictitious_seg *seg;
746 	vm_page_t fp;
747 	long page_count;
748 #ifdef VM_PHYSSEG_DENSE
749 	long pi, pe;
750 	long dpage_count;
751 #endif
752 
753 	KASSERT(start < end,
754 	    ("Start of segment isn't less than end (start: %jx end: %jx)",
755 	    (uintmax_t)start, (uintmax_t)end));
756 
757 	page_count = (end - start) / PAGE_SIZE;
758 
759 #ifdef VM_PHYSSEG_DENSE
760 	pi = atop(start);
761 	pe = atop(end);
762 	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
763 		fp = &vm_page_array[pi - first_page];
764 		if ((pe - first_page) > vm_page_array_size) {
765 			/*
766 			 * We have a segment that starts inside
767 			 * of vm_page_array, but ends outside of it.
768 			 *
769 			 * Use vm_page_array pages for those that are
770 			 * inside of the vm_page_array range, and
771 			 * allocate the remaining ones.
772 			 */
773 			dpage_count = vm_page_array_size - (pi - first_page);
774 			vm_phys_fictitious_init_range(fp, start, dpage_count,
775 			    memattr);
776 			page_count -= dpage_count;
777 			start += ptoa(dpage_count);
778 			goto alloc;
779 		}
780 		/*
781 		 * We can allocate the full range from vm_page_array,
782 		 * so there's no need to register the range in the tree.
783 		 */
784 		vm_phys_fictitious_init_range(fp, start, page_count, memattr);
785 		return (0);
786 	} else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
787 		/*
788 		 * We have a segment that ends inside of vm_page_array,
789 		 * but starts outside of it.
790 		 */
791 		fp = &vm_page_array[0];
792 		dpage_count = pe - first_page;
793 		vm_phys_fictitious_init_range(fp, ptoa(first_page), dpage_count,
794 		    memattr);
795 		end -= ptoa(dpage_count);
796 		page_count -= dpage_count;
797 		goto alloc;
798 	} else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
799 		/*
800 		 * Trying to register a fictitious range that expands before
801 		 * and after vm_page_array.
802 		 */
803 		return (EINVAL);
804 	} else {
805 alloc:
806 #endif
807 		fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
808 		    M_WAITOK);
809 #ifdef VM_PHYSSEG_DENSE
810 	}
811 #endif
812 	vm_phys_fictitious_init_range(fp, start, page_count, memattr);
813 
814 	seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO);
815 	seg->start = start;
816 	seg->end = end;
817 	seg->first_page = fp;
818 
819 	rw_wlock(&vm_phys_fictitious_reg_lock);
820 	RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg);
821 	rw_wunlock(&vm_phys_fictitious_reg_lock);
822 
823 	return (0);
824 }
825 
826 void
827 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
828 {
829 	struct vm_phys_fictitious_seg *seg, tmp;
830 #ifdef VM_PHYSSEG_DENSE
831 	long pi, pe;
832 #endif
833 
834 	KASSERT(start < end,
835 	    ("Start of segment isn't less than end (start: %jx end: %jx)",
836 	    (uintmax_t)start, (uintmax_t)end));
837 
838 #ifdef VM_PHYSSEG_DENSE
839 	pi = atop(start);
840 	pe = atop(end);
841 	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
842 		if ((pe - first_page) <= vm_page_array_size) {
843 			/*
844 			 * This segment was allocated using vm_page_array
845 			 * only, there's nothing to do since those pages
846 			 * were never added to the tree.
847 			 */
848 			return;
849 		}
850 		/*
851 		 * We have a segment that starts inside
852 		 * of vm_page_array, but ends outside of it.
853 		 *
854 		 * Calculate how many pages were added to the
855 		 * tree and free them.
856 		 */
857 		start = ptoa(first_page + vm_page_array_size);
858 	} else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
859 		/*
860 		 * We have a segment that ends inside of vm_page_array,
861 		 * but starts outside of it.
862 		 */
863 		end = ptoa(first_page);
864 	} else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
865 		/* Since it's not possible to register such a range, panic. */
866 		panic(
867 		    "Unregistering not registered fictitious range [%#jx:%#jx]",
868 		    (uintmax_t)start, (uintmax_t)end);
869 	}
870 #endif
871 	tmp.start = start;
872 	tmp.end = 0;
873 
874 	rw_wlock(&vm_phys_fictitious_reg_lock);
875 	seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
876 	if (seg->start != start || seg->end != end) {
877 		rw_wunlock(&vm_phys_fictitious_reg_lock);
878 		panic(
879 		    "Unregistering not registered fictitious range [%#jx:%#jx]",
880 		    (uintmax_t)start, (uintmax_t)end);
881 	}
882 	RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg);
883 	rw_wunlock(&vm_phys_fictitious_reg_lock);
884 	free(seg->first_page, M_FICT_PAGES);
885 	free(seg, M_FICT_PAGES);
886 }
887 
888 /*
889  * Free a contiguous, power of two-sized set of physical pages.
890  *
891  * The free page queues must be locked.
892  */
893 void
894 vm_phys_free_pages(vm_page_t m, int order)
895 {
896 	struct vm_freelist *fl;
897 	struct vm_phys_seg *seg;
898 	vm_paddr_t pa;
899 	vm_page_t m_buddy;
900 
901 	KASSERT(m->order == VM_NFREEORDER,
902 	    ("vm_phys_free_pages: page %p has unexpected order %d",
903 	    m, m->order));
904 	KASSERT(m->pool < VM_NFREEPOOL,
905 	    ("vm_phys_free_pages: page %p has unexpected pool %d",
906 	    m, m->pool));
907 	KASSERT(order < VM_NFREEORDER,
908 	    ("vm_phys_free_pages: order %d is out of range", order));
909 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
910 	seg = &vm_phys_segs[m->segind];
911 	if (order < VM_NFREEORDER - 1) {
912 		pa = VM_PAGE_TO_PHYS(m);
913 		do {
914 			pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
915 			if (pa < seg->start || pa >= seg->end)
916 				break;
917 			m_buddy = &seg->first_page[atop(pa - seg->start)];
918 			if (m_buddy->order != order)
919 				break;
920 			fl = (*seg->free_queues)[m_buddy->pool];
921 			vm_freelist_rem(fl, m_buddy, order);
922 			if (m_buddy->pool != m->pool)
923 				vm_phys_set_pool(m->pool, m_buddy, order);
924 			order++;
925 			pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
926 			m = &seg->first_page[atop(pa - seg->start)];
927 		} while (order < VM_NFREEORDER - 1);
928 	}
929 	fl = (*seg->free_queues)[m->pool];
930 	vm_freelist_add(fl, m, order, 1);
931 }
932 
933 /*
934  * Free a contiguous, arbitrarily sized set of physical pages.
935  *
936  * The free page queues must be locked.
937  */
938 void
939 vm_phys_free_contig(vm_page_t m, u_long npages)
940 {
941 	u_int n;
942 	int order;
943 
944 	/*
945 	 * Avoid unnecessary coalescing by freeing the pages in the largest
946 	 * possible power-of-two-sized subsets.
947 	 */
948 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
949 	for (;; npages -= n) {
950 		/*
951 		 * Unsigned "min" is used here so that "order" is assigned
952 		 * "VM_NFREEORDER - 1" when "m"'s physical address is zero
953 		 * or the low-order bits of its physical address are zero
954 		 * because the size of a physical address exceeds the size of
955 		 * a long.
956 		 */
957 		order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
958 		    VM_NFREEORDER - 1);
959 		n = 1 << order;
960 		if (npages < n)
961 			break;
962 		vm_phys_free_pages(m, order);
963 		m += n;
964 	}
965 	/* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */
966 	for (; npages > 0; npages -= n) {
967 		order = flsl(npages) - 1;
968 		n = 1 << order;
969 		vm_phys_free_pages(m, order);
970 		m += n;
971 	}
972 }
973 
974 /*
975  * Scan physical memory between the specified addresses "low" and "high" for a
976  * run of contiguous physical pages that satisfy the specified conditions, and
977  * return the lowest page in the run.  The specified "alignment" determines
978  * the alignment of the lowest physical page in the run.  If the specified
979  * "boundary" is non-zero, then the run of physical pages cannot span a
980  * physical address that is a multiple of "boundary".
981  *
982  * "npages" must be greater than zero.  Both "alignment" and "boundary" must
983  * be a power of two.
984  */
985 vm_page_t
986 vm_phys_scan_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
987     u_long alignment, vm_paddr_t boundary, int options)
988 {
989 	vm_paddr_t pa_end;
990 	vm_page_t m_end, m_run, m_start;
991 	struct vm_phys_seg *seg;
992 	int segind;
993 
994 	KASSERT(npages > 0, ("npages is 0"));
995 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
996 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
997 	if (low >= high)
998 		return (NULL);
999 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
1000 		seg = &vm_phys_segs[segind];
1001 		if (seg->domain != domain)
1002 			continue;
1003 		if (seg->start >= high)
1004 			break;
1005 		if (low >= seg->end)
1006 			continue;
1007 		if (low <= seg->start)
1008 			m_start = seg->first_page;
1009 		else
1010 			m_start = &seg->first_page[atop(low - seg->start)];
1011 		if (high < seg->end)
1012 			pa_end = high;
1013 		else
1014 			pa_end = seg->end;
1015 		if (pa_end - VM_PAGE_TO_PHYS(m_start) < ptoa(npages))
1016 			continue;
1017 		m_end = &seg->first_page[atop(pa_end - seg->start)];
1018 		m_run = vm_page_scan_contig(npages, m_start, m_end,
1019 		    alignment, boundary, options);
1020 		if (m_run != NULL)
1021 			return (m_run);
1022 	}
1023 	return (NULL);
1024 }
1025 
1026 /*
1027  * Set the pool for a contiguous, power of two-sized set of physical pages.
1028  */
1029 void
1030 vm_phys_set_pool(int pool, vm_page_t m, int order)
1031 {
1032 	vm_page_t m_tmp;
1033 
1034 	for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
1035 		m_tmp->pool = pool;
1036 }
1037 
1038 /*
1039  * Search for the given physical page "m" in the free lists.  If the search
1040  * succeeds, remove "m" from the free lists and return TRUE.  Otherwise, return
1041  * FALSE, indicating that "m" is not in the free lists.
1042  *
1043  * The free page queues must be locked.
1044  */
1045 boolean_t
1046 vm_phys_unfree_page(vm_page_t m)
1047 {
1048 	struct vm_freelist *fl;
1049 	struct vm_phys_seg *seg;
1050 	vm_paddr_t pa, pa_half;
1051 	vm_page_t m_set, m_tmp;
1052 	int order;
1053 
1054 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1055 
1056 	/*
1057 	 * First, find the contiguous, power of two-sized set of free
1058 	 * physical pages containing the given physical page "m" and
1059 	 * assign it to "m_set".
1060 	 */
1061 	seg = &vm_phys_segs[m->segind];
1062 	for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
1063 	    order < VM_NFREEORDER - 1; ) {
1064 		order++;
1065 		pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
1066 		if (pa >= seg->start)
1067 			m_set = &seg->first_page[atop(pa - seg->start)];
1068 		else
1069 			return (FALSE);
1070 	}
1071 	if (m_set->order < order)
1072 		return (FALSE);
1073 	if (m_set->order == VM_NFREEORDER)
1074 		return (FALSE);
1075 	KASSERT(m_set->order < VM_NFREEORDER,
1076 	    ("vm_phys_unfree_page: page %p has unexpected order %d",
1077 	    m_set, m_set->order));
1078 
1079 	/*
1080 	 * Next, remove "m_set" from the free lists.  Finally, extract
1081 	 * "m" from "m_set" using an iterative algorithm: While "m_set"
1082 	 * is larger than a page, shrink "m_set" by returning the half
1083 	 * of "m_set" that does not contain "m" to the free lists.
1084 	 */
1085 	fl = (*seg->free_queues)[m_set->pool];
1086 	order = m_set->order;
1087 	vm_freelist_rem(fl, m_set, order);
1088 	while (order > 0) {
1089 		order--;
1090 		pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
1091 		if (m->phys_addr < pa_half)
1092 			m_tmp = &seg->first_page[atop(pa_half - seg->start)];
1093 		else {
1094 			m_tmp = m_set;
1095 			m_set = &seg->first_page[atop(pa_half - seg->start)];
1096 		}
1097 		vm_freelist_add(fl, m_tmp, order, 0);
1098 	}
1099 	KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
1100 	return (TRUE);
1101 }
1102 
1103 /*
1104  * Allocate a contiguous set of physical pages of the given size
1105  * "npages" from the free lists.  All of the physical pages must be at
1106  * or above the given physical address "low" and below the given
1107  * physical address "high".  The given value "alignment" determines the
1108  * alignment of the first physical page in the set.  If the given value
1109  * "boundary" is non-zero, then the set of physical pages cannot cross
1110  * any physical address boundary that is a multiple of that value.  Both
1111  * "alignment" and "boundary" must be a power of two.
1112  */
1113 vm_page_t
1114 vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
1115     u_long alignment, vm_paddr_t boundary)
1116 {
1117 	vm_paddr_t pa_end, pa_start;
1118 	vm_page_t m_run;
1119 	struct vm_phys_seg *seg;
1120 	int segind;
1121 
1122 	KASSERT(npages > 0, ("npages is 0"));
1123 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1124 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1125 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1126 	if (low >= high)
1127 		return (NULL);
1128 	m_run = NULL;
1129 	for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
1130 		seg = &vm_phys_segs[segind];
1131 		if (seg->start >= high || seg->domain != domain)
1132 			continue;
1133 		if (low >= seg->end)
1134 			break;
1135 		if (low <= seg->start)
1136 			pa_start = seg->start;
1137 		else
1138 			pa_start = low;
1139 		if (high < seg->end)
1140 			pa_end = high;
1141 		else
1142 			pa_end = seg->end;
1143 		if (pa_end - pa_start < ptoa(npages))
1144 			continue;
1145 		m_run = vm_phys_alloc_seg_contig(seg, npages, low, high,
1146 		    alignment, boundary);
1147 		if (m_run != NULL)
1148 			break;
1149 	}
1150 	return (m_run);
1151 }
1152 
1153 /*
1154  * Allocate a run of contiguous physical pages from the free list for the
1155  * specified segment.
1156  */
1157 static vm_page_t
1158 vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, u_long npages,
1159     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1160 {
1161 	struct vm_freelist *fl;
1162 	vm_paddr_t pa, pa_end, size;
1163 	vm_page_t m, m_ret;
1164 	u_long npages_end;
1165 	int oind, order, pind;
1166 
1167 	KASSERT(npages > 0, ("npages is 0"));
1168 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1169 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1170 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1171 	/* Compute the queue that is the best fit for npages. */
1172 	for (order = 0; (1 << order) < npages; order++);
1173 	/* Search for a run satisfying the specified conditions. */
1174 	size = npages << PAGE_SHIFT;
1175 	for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER;
1176 	    oind++) {
1177 		for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1178 			fl = (*seg->free_queues)[pind];
1179 			TAILQ_FOREACH(m_ret, &fl[oind].pl, plinks.q) {
1180 				/*
1181 				 * Is the size of this allocation request
1182 				 * larger than the largest block size?
1183 				 */
1184 				if (order >= VM_NFREEORDER) {
1185 					/*
1186 					 * Determine if a sufficient number of
1187 					 * subsequent blocks to satisfy the
1188 					 * allocation request are free.
1189 					 */
1190 					pa = VM_PAGE_TO_PHYS(m_ret);
1191 					pa_end = pa + size;
1192 					for (;;) {
1193 						pa += 1 << (PAGE_SHIFT +
1194 						    VM_NFREEORDER - 1);
1195 						if (pa >= pa_end ||
1196 						    pa < seg->start ||
1197 						    pa >= seg->end)
1198 							break;
1199 						m = &seg->first_page[atop(pa -
1200 						    seg->start)];
1201 						if (m->order != VM_NFREEORDER -
1202 						    1)
1203 							break;
1204 					}
1205 					/* If not, go to the next block. */
1206 					if (pa < pa_end)
1207 						continue;
1208 				}
1209 
1210 				/*
1211 				 * Determine if the blocks are within the
1212 				 * given range, satisfy the given alignment,
1213 				 * and do not cross the given boundary.
1214 				 */
1215 				pa = VM_PAGE_TO_PHYS(m_ret);
1216 				pa_end = pa + size;
1217 				if (pa >= low && pa_end <= high &&
1218 				    (pa & (alignment - 1)) == 0 &&
1219 				    rounddown2(pa ^ (pa_end - 1), boundary) == 0)
1220 					goto done;
1221 			}
1222 		}
1223 	}
1224 	return (NULL);
1225 done:
1226 	for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) {
1227 		fl = (*seg->free_queues)[m->pool];
1228 		vm_freelist_rem(fl, m, m->order);
1229 	}
1230 	if (m_ret->pool != VM_FREEPOOL_DEFAULT)
1231 		vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m_ret, oind);
1232 	fl = (*seg->free_queues)[m_ret->pool];
1233 	vm_phys_split_pages(m_ret, oind, fl, order);
1234 	/* Return excess pages to the free lists. */
1235 	npages_end = roundup2(npages, 1 << imin(oind, order));
1236 	if (npages < npages_end)
1237 		vm_phys_free_contig(&m_ret[npages], npages_end - npages);
1238 	return (m_ret);
1239 }
1240 
1241 #ifdef DDB
1242 /*
1243  * Show the number of physical pages in each of the free lists.
1244  */
1245 DB_SHOW_COMMAND(freepages, db_show_freepages)
1246 {
1247 	struct vm_freelist *fl;
1248 	int flind, oind, pind, dom;
1249 
1250 	for (dom = 0; dom < vm_ndomains; dom++) {
1251 		db_printf("DOMAIN: %d\n", dom);
1252 		for (flind = 0; flind < vm_nfreelists; flind++) {
1253 			db_printf("FREE LIST %d:\n"
1254 			    "\n  ORDER (SIZE)  |  NUMBER"
1255 			    "\n              ", flind);
1256 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
1257 				db_printf("  |  POOL %d", pind);
1258 			db_printf("\n--            ");
1259 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
1260 				db_printf("-- --      ");
1261 			db_printf("--\n");
1262 			for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
1263 				db_printf("  %2.2d (%6.6dK)", oind,
1264 				    1 << (PAGE_SHIFT - 10 + oind));
1265 				for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1266 				fl = vm_phys_free_queues[dom][flind][pind];
1267 					db_printf("  |  %6.6d", fl[oind].lcnt);
1268 				}
1269 				db_printf("\n");
1270 			}
1271 			db_printf("\n");
1272 		}
1273 		db_printf("\n");
1274 	}
1275 }
1276 #endif
1277