xref: /freebsd/sys/vm/vm_phys.c (revision 3f289c3fcf39b200550e2702068014cdd801d4da)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2006 Rice University
5  * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
6  * All rights reserved.
7  *
8  * This software was developed for the FreeBSD Project by Alan L. Cox,
9  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
24  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
30  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  *	Physical memory system implementation
36  *
37  * Any external functions defined by this module are only to be used by the
38  * virtual memory system.
39  */
40 
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43 
44 #include "opt_ddb.h"
45 #include "opt_vm.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/lock.h>
50 #include <sys/kernel.h>
51 #include <sys/malloc.h>
52 #include <sys/mutex.h>
53 #include <sys/proc.h>
54 #include <sys/queue.h>
55 #include <sys/rwlock.h>
56 #include <sys/sbuf.h>
57 #include <sys/sysctl.h>
58 #include <sys/tree.h>
59 #include <sys/vmmeter.h>
60 #include <sys/seq.h>
61 
62 #include <ddb/ddb.h>
63 
64 #include <vm/vm.h>
65 #include <vm/vm_param.h>
66 #include <vm/vm_kern.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_page.h>
69 #include <vm/vm_phys.h>
70 
71 _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX,
72     "Too many physsegs.");
73 
74 #ifdef VM_NUMA_ALLOC
75 struct mem_affinity *mem_affinity;
76 int *mem_locality;
77 #endif
78 
79 int vm_ndomains = 1;
80 
81 struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX];
82 int vm_phys_nsegs;
83 
84 struct vm_phys_fictitious_seg;
85 static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *,
86     struct vm_phys_fictitious_seg *);
87 
88 RB_HEAD(fict_tree, vm_phys_fictitious_seg) vm_phys_fictitious_tree =
89     RB_INITIALIZER(_vm_phys_fictitious_tree);
90 
91 struct vm_phys_fictitious_seg {
92 	RB_ENTRY(vm_phys_fictitious_seg) node;
93 	/* Memory region data */
94 	vm_paddr_t	start;
95 	vm_paddr_t	end;
96 	vm_page_t	first_page;
97 };
98 
99 RB_GENERATE_STATIC(fict_tree, vm_phys_fictitious_seg, node,
100     vm_phys_fictitious_cmp);
101 
102 static struct rwlock vm_phys_fictitious_reg_lock;
103 MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages");
104 
105 static struct vm_freelist
106     vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER];
107 
108 static int vm_nfreelists;
109 
110 /*
111  * Provides the mapping from VM_FREELIST_* to free list indices (flind).
112  */
113 static int vm_freelist_to_flind[VM_NFREELIST];
114 
115 CTASSERT(VM_FREELIST_DEFAULT == 0);
116 
117 #ifdef VM_FREELIST_ISADMA
118 #define	VM_ISADMA_BOUNDARY	16777216
119 #endif
120 #ifdef VM_FREELIST_DMA32
121 #define	VM_DMA32_BOUNDARY	((vm_paddr_t)1 << 32)
122 #endif
123 
124 /*
125  * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about
126  * the ordering of the free list boundaries.
127  */
128 #if defined(VM_ISADMA_BOUNDARY) && defined(VM_LOWMEM_BOUNDARY)
129 CTASSERT(VM_ISADMA_BOUNDARY < VM_LOWMEM_BOUNDARY);
130 #endif
131 #if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY)
132 CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY);
133 #endif
134 
135 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
136 SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD,
137     NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info");
138 
139 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
140 SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD,
141     NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info");
142 
143 #ifdef VM_NUMA_ALLOC
144 static int sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS);
145 SYSCTL_OID(_vm, OID_AUTO, phys_locality, CTLTYPE_STRING | CTLFLAG_RD,
146     NULL, 0, sysctl_vm_phys_locality, "A", "Phys Locality Info");
147 #endif
148 
149 SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD,
150     &vm_ndomains, 0, "Number of physical memory domains available.");
151 
152 static vm_page_t vm_phys_alloc_seg_contig(struct vm_phys_seg *seg,
153     u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
154     vm_paddr_t boundary);
155 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain);
156 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end);
157 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
158     int order);
159 
160 /*
161  * Red-black tree helpers for vm fictitious range management.
162  */
163 static inline int
164 vm_phys_fictitious_in_range(struct vm_phys_fictitious_seg *p,
165     struct vm_phys_fictitious_seg *range)
166 {
167 
168 	KASSERT(range->start != 0 && range->end != 0,
169 	    ("Invalid range passed on search for vm_fictitious page"));
170 	if (p->start >= range->end)
171 		return (1);
172 	if (p->start < range->start)
173 		return (-1);
174 
175 	return (0);
176 }
177 
178 static int
179 vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *p1,
180     struct vm_phys_fictitious_seg *p2)
181 {
182 
183 	/* Check if this is a search for a page */
184 	if (p1->end == 0)
185 		return (vm_phys_fictitious_in_range(p1, p2));
186 
187 	KASSERT(p2->end != 0,
188     ("Invalid range passed as second parameter to vm fictitious comparison"));
189 
190 	/* Searching to add a new range */
191 	if (p1->end <= p2->start)
192 		return (-1);
193 	if (p1->start >= p2->end)
194 		return (1);
195 
196 	panic("Trying to add overlapping vm fictitious ranges:\n"
197 	    "[%#jx:%#jx] and [%#jx:%#jx]", (uintmax_t)p1->start,
198 	    (uintmax_t)p1->end, (uintmax_t)p2->start, (uintmax_t)p2->end);
199 }
200 
201 boolean_t
202 vm_phys_domain_intersects(long mask, vm_paddr_t low, vm_paddr_t high)
203 {
204 	struct vm_phys_seg *s;
205 	int idx;
206 
207 	while ((idx = ffsl(mask)) != 0) {
208 		idx--;	/* ffsl counts from 1 */
209 		mask &= ~(1UL << idx);
210 		s = &vm_phys_segs[idx];
211 		if (low < s->end && high > s->start)
212 			return (TRUE);
213 	}
214 	return (FALSE);
215 }
216 
217 /*
218  * Outputs the state of the physical memory allocator, specifically,
219  * the amount of physical memory in each free list.
220  */
221 static int
222 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
223 {
224 	struct sbuf sbuf;
225 	struct vm_freelist *fl;
226 	int dom, error, flind, oind, pind;
227 
228 	error = sysctl_wire_old_buffer(req, 0);
229 	if (error != 0)
230 		return (error);
231 	sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req);
232 	for (dom = 0; dom < vm_ndomains; dom++) {
233 		sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom);
234 		for (flind = 0; flind < vm_nfreelists; flind++) {
235 			sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
236 			    "\n  ORDER (SIZE)  |  NUMBER"
237 			    "\n              ", flind);
238 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
239 				sbuf_printf(&sbuf, "  |  POOL %d", pind);
240 			sbuf_printf(&sbuf, "\n--            ");
241 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
242 				sbuf_printf(&sbuf, "-- --      ");
243 			sbuf_printf(&sbuf, "--\n");
244 			for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
245 				sbuf_printf(&sbuf, "  %2d (%6dK)", oind,
246 				    1 << (PAGE_SHIFT - 10 + oind));
247 				for (pind = 0; pind < VM_NFREEPOOL; pind++) {
248 				fl = vm_phys_free_queues[dom][flind][pind];
249 					sbuf_printf(&sbuf, "  |  %6d",
250 					    fl[oind].lcnt);
251 				}
252 				sbuf_printf(&sbuf, "\n");
253 			}
254 		}
255 	}
256 	error = sbuf_finish(&sbuf);
257 	sbuf_delete(&sbuf);
258 	return (error);
259 }
260 
261 /*
262  * Outputs the set of physical memory segments.
263  */
264 static int
265 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
266 {
267 	struct sbuf sbuf;
268 	struct vm_phys_seg *seg;
269 	int error, segind;
270 
271 	error = sysctl_wire_old_buffer(req, 0);
272 	if (error != 0)
273 		return (error);
274 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
275 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
276 		sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
277 		seg = &vm_phys_segs[segind];
278 		sbuf_printf(&sbuf, "start:     %#jx\n",
279 		    (uintmax_t)seg->start);
280 		sbuf_printf(&sbuf, "end:       %#jx\n",
281 		    (uintmax_t)seg->end);
282 		sbuf_printf(&sbuf, "domain:    %d\n", seg->domain);
283 		sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
284 	}
285 	error = sbuf_finish(&sbuf);
286 	sbuf_delete(&sbuf);
287 	return (error);
288 }
289 
290 /*
291  * Return affinity, or -1 if there's no affinity information.
292  */
293 int
294 vm_phys_mem_affinity(int f, int t)
295 {
296 
297 #ifdef VM_NUMA_ALLOC
298 	if (mem_locality == NULL)
299 		return (-1);
300 	if (f >= vm_ndomains || t >= vm_ndomains)
301 		return (-1);
302 	return (mem_locality[f * vm_ndomains + t]);
303 #else
304 	return (-1);
305 #endif
306 }
307 
308 #ifdef VM_NUMA_ALLOC
309 /*
310  * Outputs the VM locality table.
311  */
312 static int
313 sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS)
314 {
315 	struct sbuf sbuf;
316 	int error, i, j;
317 
318 	error = sysctl_wire_old_buffer(req, 0);
319 	if (error != 0)
320 		return (error);
321 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
322 
323 	sbuf_printf(&sbuf, "\n");
324 
325 	for (i = 0; i < vm_ndomains; i++) {
326 		sbuf_printf(&sbuf, "%d: ", i);
327 		for (j = 0; j < vm_ndomains; j++) {
328 			sbuf_printf(&sbuf, "%d ", vm_phys_mem_affinity(i, j));
329 		}
330 		sbuf_printf(&sbuf, "\n");
331 	}
332 	error = sbuf_finish(&sbuf);
333 	sbuf_delete(&sbuf);
334 	return (error);
335 }
336 #endif
337 
338 static void
339 vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail)
340 {
341 
342 	m->order = order;
343 	if (tail)
344 		TAILQ_INSERT_TAIL(&fl[order].pl, m, plinks.q);
345 	else
346 		TAILQ_INSERT_HEAD(&fl[order].pl, m, plinks.q);
347 	fl[order].lcnt++;
348 }
349 
350 static void
351 vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order)
352 {
353 
354 	TAILQ_REMOVE(&fl[order].pl, m, plinks.q);
355 	fl[order].lcnt--;
356 	m->order = VM_NFREEORDER;
357 }
358 
359 /*
360  * Create a physical memory segment.
361  */
362 static void
363 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain)
364 {
365 	struct vm_phys_seg *seg;
366 
367 	KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
368 	    ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
369 	KASSERT(domain >= 0 && domain < vm_ndomains,
370 	    ("vm_phys_create_seg: invalid domain provided"));
371 	seg = &vm_phys_segs[vm_phys_nsegs++];
372 	while (seg > vm_phys_segs && (seg - 1)->start >= end) {
373 		*seg = *(seg - 1);
374 		seg--;
375 	}
376 	seg->start = start;
377 	seg->end = end;
378 	seg->domain = domain;
379 }
380 
381 static void
382 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end)
383 {
384 #ifdef VM_NUMA_ALLOC
385 	int i;
386 
387 	if (mem_affinity == NULL) {
388 		_vm_phys_create_seg(start, end, 0);
389 		return;
390 	}
391 
392 	for (i = 0;; i++) {
393 		if (mem_affinity[i].end == 0)
394 			panic("Reached end of affinity info");
395 		if (mem_affinity[i].end <= start)
396 			continue;
397 		if (mem_affinity[i].start > start)
398 			panic("No affinity info for start %jx",
399 			    (uintmax_t)start);
400 		if (mem_affinity[i].end >= end) {
401 			_vm_phys_create_seg(start, end,
402 			    mem_affinity[i].domain);
403 			break;
404 		}
405 		_vm_phys_create_seg(start, mem_affinity[i].end,
406 		    mem_affinity[i].domain);
407 		start = mem_affinity[i].end;
408 	}
409 #else
410 	_vm_phys_create_seg(start, end, 0);
411 #endif
412 }
413 
414 /*
415  * Add a physical memory segment.
416  */
417 void
418 vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end)
419 {
420 	vm_paddr_t paddr;
421 
422 	KASSERT((start & PAGE_MASK) == 0,
423 	    ("vm_phys_define_seg: start is not page aligned"));
424 	KASSERT((end & PAGE_MASK) == 0,
425 	    ("vm_phys_define_seg: end is not page aligned"));
426 
427 	/*
428 	 * Split the physical memory segment if it spans two or more free
429 	 * list boundaries.
430 	 */
431 	paddr = start;
432 #ifdef	VM_FREELIST_ISADMA
433 	if (paddr < VM_ISADMA_BOUNDARY && end > VM_ISADMA_BOUNDARY) {
434 		vm_phys_create_seg(paddr, VM_ISADMA_BOUNDARY);
435 		paddr = VM_ISADMA_BOUNDARY;
436 	}
437 #endif
438 #ifdef	VM_FREELIST_LOWMEM
439 	if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) {
440 		vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY);
441 		paddr = VM_LOWMEM_BOUNDARY;
442 	}
443 #endif
444 #ifdef	VM_FREELIST_DMA32
445 	if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) {
446 		vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY);
447 		paddr = VM_DMA32_BOUNDARY;
448 	}
449 #endif
450 	vm_phys_create_seg(paddr, end);
451 }
452 
453 /*
454  * Initialize the physical memory allocator.
455  *
456  * Requires that vm_page_array is initialized!
457  */
458 void
459 vm_phys_init(void)
460 {
461 	struct vm_freelist *fl;
462 	struct vm_phys_seg *seg;
463 	u_long npages;
464 	int dom, flind, freelist, oind, pind, segind;
465 
466 	/*
467 	 * Compute the number of free lists, and generate the mapping from the
468 	 * manifest constants VM_FREELIST_* to the free list indices.
469 	 *
470 	 * Initially, the entries of vm_freelist_to_flind[] are set to either
471 	 * 0 or 1 to indicate which free lists should be created.
472 	 */
473 	npages = 0;
474 	for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
475 		seg = &vm_phys_segs[segind];
476 #ifdef	VM_FREELIST_ISADMA
477 		if (seg->end <= VM_ISADMA_BOUNDARY)
478 			vm_freelist_to_flind[VM_FREELIST_ISADMA] = 1;
479 		else
480 #endif
481 #ifdef	VM_FREELIST_LOWMEM
482 		if (seg->end <= VM_LOWMEM_BOUNDARY)
483 			vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1;
484 		else
485 #endif
486 #ifdef	VM_FREELIST_DMA32
487 		if (
488 #ifdef	VM_DMA32_NPAGES_THRESHOLD
489 		    /*
490 		     * Create the DMA32 free list only if the amount of
491 		     * physical memory above physical address 4G exceeds the
492 		     * given threshold.
493 		     */
494 		    npages > VM_DMA32_NPAGES_THRESHOLD &&
495 #endif
496 		    seg->end <= VM_DMA32_BOUNDARY)
497 			vm_freelist_to_flind[VM_FREELIST_DMA32] = 1;
498 		else
499 #endif
500 		{
501 			npages += atop(seg->end - seg->start);
502 			vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1;
503 		}
504 	}
505 	/* Change each entry into a running total of the free lists. */
506 	for (freelist = 1; freelist < VM_NFREELIST; freelist++) {
507 		vm_freelist_to_flind[freelist] +=
508 		    vm_freelist_to_flind[freelist - 1];
509 	}
510 	vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1];
511 	KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists"));
512 	/* Change each entry into a free list index. */
513 	for (freelist = 0; freelist < VM_NFREELIST; freelist++)
514 		vm_freelist_to_flind[freelist]--;
515 
516 	/*
517 	 * Initialize the first_page and free_queues fields of each physical
518 	 * memory segment.
519 	 */
520 #ifdef VM_PHYSSEG_SPARSE
521 	npages = 0;
522 #endif
523 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
524 		seg = &vm_phys_segs[segind];
525 #ifdef VM_PHYSSEG_SPARSE
526 		seg->first_page = &vm_page_array[npages];
527 		npages += atop(seg->end - seg->start);
528 #else
529 		seg->first_page = PHYS_TO_VM_PAGE(seg->start);
530 #endif
531 #ifdef	VM_FREELIST_ISADMA
532 		if (seg->end <= VM_ISADMA_BOUNDARY) {
533 			flind = vm_freelist_to_flind[VM_FREELIST_ISADMA];
534 			KASSERT(flind >= 0,
535 			    ("vm_phys_init: ISADMA flind < 0"));
536 		} else
537 #endif
538 #ifdef	VM_FREELIST_LOWMEM
539 		if (seg->end <= VM_LOWMEM_BOUNDARY) {
540 			flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM];
541 			KASSERT(flind >= 0,
542 			    ("vm_phys_init: LOWMEM flind < 0"));
543 		} else
544 #endif
545 #ifdef	VM_FREELIST_DMA32
546 		if (seg->end <= VM_DMA32_BOUNDARY) {
547 			flind = vm_freelist_to_flind[VM_FREELIST_DMA32];
548 			KASSERT(flind >= 0,
549 			    ("vm_phys_init: DMA32 flind < 0"));
550 		} else
551 #endif
552 		{
553 			flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT];
554 			KASSERT(flind >= 0,
555 			    ("vm_phys_init: DEFAULT flind < 0"));
556 		}
557 		seg->free_queues = &vm_phys_free_queues[seg->domain][flind];
558 	}
559 
560 	/*
561 	 * Initialize the free queues.
562 	 */
563 	for (dom = 0; dom < vm_ndomains; dom++) {
564 		for (flind = 0; flind < vm_nfreelists; flind++) {
565 			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
566 				fl = vm_phys_free_queues[dom][flind][pind];
567 				for (oind = 0; oind < VM_NFREEORDER; oind++)
568 					TAILQ_INIT(&fl[oind].pl);
569 			}
570 		}
571 	}
572 
573 	rw_init(&vm_phys_fictitious_reg_lock, "vmfctr");
574 }
575 
576 /*
577  * Split a contiguous, power of two-sized set of physical pages.
578  */
579 static __inline void
580 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order)
581 {
582 	vm_page_t m_buddy;
583 
584 	while (oind > order) {
585 		oind--;
586 		m_buddy = &m[1 << oind];
587 		KASSERT(m_buddy->order == VM_NFREEORDER,
588 		    ("vm_phys_split_pages: page %p has unexpected order %d",
589 		    m_buddy, m_buddy->order));
590 		vm_freelist_add(fl, m_buddy, oind, 0);
591         }
592 }
593 
594 /*
595  * Allocate a contiguous, power of two-sized set of physical pages
596  * from the free lists.
597  *
598  * The free page queues must be locked.
599  */
600 vm_page_t
601 vm_phys_alloc_pages(int domain, int pool, int order)
602 {
603 	vm_page_t m;
604 	int freelist;
605 
606 	for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
607 		m = vm_phys_alloc_freelist_pages(domain, freelist, pool, order);
608 		if (m != NULL)
609 			return (m);
610 	}
611 	return (NULL);
612 }
613 
614 /*
615  * Allocate a contiguous, power of two-sized set of physical pages from the
616  * specified free list.  The free list must be specified using one of the
617  * manifest constants VM_FREELIST_*.
618  *
619  * The free page queues must be locked.
620  */
621 vm_page_t
622 vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order)
623 {
624 	struct vm_freelist *alt, *fl;
625 	vm_page_t m;
626 	int oind, pind, flind;
627 
628 	KASSERT(domain >= 0 && domain < vm_ndomains,
629 	    ("vm_phys_alloc_freelist_pages: domain %d is out of range",
630 	    domain));
631 	KASSERT(freelist < VM_NFREELIST,
632 	    ("vm_phys_alloc_freelist_pages: freelist %d is out of range",
633 	    freelist));
634 	KASSERT(pool < VM_NFREEPOOL,
635 	    ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
636 	KASSERT(order < VM_NFREEORDER,
637 	    ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
638 
639 	flind = vm_freelist_to_flind[freelist];
640 	/* Check if freelist is present */
641 	if (flind < 0)
642 		return (NULL);
643 
644 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
645 	fl = &vm_phys_free_queues[domain][flind][pool][0];
646 	for (oind = order; oind < VM_NFREEORDER; oind++) {
647 		m = TAILQ_FIRST(&fl[oind].pl);
648 		if (m != NULL) {
649 			vm_freelist_rem(fl, m, oind);
650 			vm_phys_split_pages(m, oind, fl, order);
651 			return (m);
652 		}
653 	}
654 
655 	/*
656 	 * The given pool was empty.  Find the largest
657 	 * contiguous, power-of-two-sized set of pages in any
658 	 * pool.  Transfer these pages to the given pool, and
659 	 * use them to satisfy the allocation.
660 	 */
661 	for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
662 		for (pind = 0; pind < VM_NFREEPOOL; pind++) {
663 			alt = &vm_phys_free_queues[domain][flind][pind][0];
664 			m = TAILQ_FIRST(&alt[oind].pl);
665 			if (m != NULL) {
666 				vm_freelist_rem(alt, m, oind);
667 				vm_phys_set_pool(pool, m, oind);
668 				vm_phys_split_pages(m, oind, fl, order);
669 				return (m);
670 			}
671 		}
672 	}
673 	return (NULL);
674 }
675 
676 /*
677  * Find the vm_page corresponding to the given physical address.
678  */
679 vm_page_t
680 vm_phys_paddr_to_vm_page(vm_paddr_t pa)
681 {
682 	struct vm_phys_seg *seg;
683 	int segind;
684 
685 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
686 		seg = &vm_phys_segs[segind];
687 		if (pa >= seg->start && pa < seg->end)
688 			return (&seg->first_page[atop(pa - seg->start)]);
689 	}
690 	return (NULL);
691 }
692 
693 vm_page_t
694 vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
695 {
696 	struct vm_phys_fictitious_seg tmp, *seg;
697 	vm_page_t m;
698 
699 	m = NULL;
700 	tmp.start = pa;
701 	tmp.end = 0;
702 
703 	rw_rlock(&vm_phys_fictitious_reg_lock);
704 	seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
705 	rw_runlock(&vm_phys_fictitious_reg_lock);
706 	if (seg == NULL)
707 		return (NULL);
708 
709 	m = &seg->first_page[atop(pa - seg->start)];
710 	KASSERT((m->flags & PG_FICTITIOUS) != 0, ("%p not fictitious", m));
711 
712 	return (m);
713 }
714 
715 static inline void
716 vm_phys_fictitious_init_range(vm_page_t range, vm_paddr_t start,
717     long page_count, vm_memattr_t memattr)
718 {
719 	long i;
720 
721 	bzero(range, page_count * sizeof(*range));
722 	for (i = 0; i < page_count; i++) {
723 		vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr);
724 		range[i].oflags &= ~VPO_UNMANAGED;
725 		range[i].busy_lock = VPB_UNBUSIED;
726 	}
727 }
728 
729 int
730 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
731     vm_memattr_t memattr)
732 {
733 	struct vm_phys_fictitious_seg *seg;
734 	vm_page_t fp;
735 	long page_count;
736 #ifdef VM_PHYSSEG_DENSE
737 	long pi, pe;
738 	long dpage_count;
739 #endif
740 
741 	KASSERT(start < end,
742 	    ("Start of segment isn't less than end (start: %jx end: %jx)",
743 	    (uintmax_t)start, (uintmax_t)end));
744 
745 	page_count = (end - start) / PAGE_SIZE;
746 
747 #ifdef VM_PHYSSEG_DENSE
748 	pi = atop(start);
749 	pe = atop(end);
750 	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
751 		fp = &vm_page_array[pi - first_page];
752 		if ((pe - first_page) > vm_page_array_size) {
753 			/*
754 			 * We have a segment that starts inside
755 			 * of vm_page_array, but ends outside of it.
756 			 *
757 			 * Use vm_page_array pages for those that are
758 			 * inside of the vm_page_array range, and
759 			 * allocate the remaining ones.
760 			 */
761 			dpage_count = vm_page_array_size - (pi - first_page);
762 			vm_phys_fictitious_init_range(fp, start, dpage_count,
763 			    memattr);
764 			page_count -= dpage_count;
765 			start += ptoa(dpage_count);
766 			goto alloc;
767 		}
768 		/*
769 		 * We can allocate the full range from vm_page_array,
770 		 * so there's no need to register the range in the tree.
771 		 */
772 		vm_phys_fictitious_init_range(fp, start, page_count, memattr);
773 		return (0);
774 	} else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
775 		/*
776 		 * We have a segment that ends inside of vm_page_array,
777 		 * but starts outside of it.
778 		 */
779 		fp = &vm_page_array[0];
780 		dpage_count = pe - first_page;
781 		vm_phys_fictitious_init_range(fp, ptoa(first_page), dpage_count,
782 		    memattr);
783 		end -= ptoa(dpage_count);
784 		page_count -= dpage_count;
785 		goto alloc;
786 	} else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
787 		/*
788 		 * Trying to register a fictitious range that expands before
789 		 * and after vm_page_array.
790 		 */
791 		return (EINVAL);
792 	} else {
793 alloc:
794 #endif
795 		fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
796 		    M_WAITOK);
797 #ifdef VM_PHYSSEG_DENSE
798 	}
799 #endif
800 	vm_phys_fictitious_init_range(fp, start, page_count, memattr);
801 
802 	seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO);
803 	seg->start = start;
804 	seg->end = end;
805 	seg->first_page = fp;
806 
807 	rw_wlock(&vm_phys_fictitious_reg_lock);
808 	RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg);
809 	rw_wunlock(&vm_phys_fictitious_reg_lock);
810 
811 	return (0);
812 }
813 
814 void
815 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
816 {
817 	struct vm_phys_fictitious_seg *seg, tmp;
818 #ifdef VM_PHYSSEG_DENSE
819 	long pi, pe;
820 #endif
821 
822 	KASSERT(start < end,
823 	    ("Start of segment isn't less than end (start: %jx end: %jx)",
824 	    (uintmax_t)start, (uintmax_t)end));
825 
826 #ifdef VM_PHYSSEG_DENSE
827 	pi = atop(start);
828 	pe = atop(end);
829 	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
830 		if ((pe - first_page) <= vm_page_array_size) {
831 			/*
832 			 * This segment was allocated using vm_page_array
833 			 * only, there's nothing to do since those pages
834 			 * were never added to the tree.
835 			 */
836 			return;
837 		}
838 		/*
839 		 * We have a segment that starts inside
840 		 * of vm_page_array, but ends outside of it.
841 		 *
842 		 * Calculate how many pages were added to the
843 		 * tree and free them.
844 		 */
845 		start = ptoa(first_page + vm_page_array_size);
846 	} else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
847 		/*
848 		 * We have a segment that ends inside of vm_page_array,
849 		 * but starts outside of it.
850 		 */
851 		end = ptoa(first_page);
852 	} else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
853 		/* Since it's not possible to register such a range, panic. */
854 		panic(
855 		    "Unregistering not registered fictitious range [%#jx:%#jx]",
856 		    (uintmax_t)start, (uintmax_t)end);
857 	}
858 #endif
859 	tmp.start = start;
860 	tmp.end = 0;
861 
862 	rw_wlock(&vm_phys_fictitious_reg_lock);
863 	seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
864 	if (seg->start != start || seg->end != end) {
865 		rw_wunlock(&vm_phys_fictitious_reg_lock);
866 		panic(
867 		    "Unregistering not registered fictitious range [%#jx:%#jx]",
868 		    (uintmax_t)start, (uintmax_t)end);
869 	}
870 	RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg);
871 	rw_wunlock(&vm_phys_fictitious_reg_lock);
872 	free(seg->first_page, M_FICT_PAGES);
873 	free(seg, M_FICT_PAGES);
874 }
875 
876 /*
877  * Free a contiguous, power of two-sized set of physical pages.
878  *
879  * The free page queues must be locked.
880  */
881 void
882 vm_phys_free_pages(vm_page_t m, int order)
883 {
884 	struct vm_freelist *fl;
885 	struct vm_phys_seg *seg;
886 	vm_paddr_t pa;
887 	vm_page_t m_buddy;
888 
889 	KASSERT(m->order == VM_NFREEORDER,
890 	    ("vm_phys_free_pages: page %p has unexpected order %d",
891 	    m, m->order));
892 	KASSERT(m->pool < VM_NFREEPOOL,
893 	    ("vm_phys_free_pages: page %p has unexpected pool %d",
894 	    m, m->pool));
895 	KASSERT(order < VM_NFREEORDER,
896 	    ("vm_phys_free_pages: order %d is out of range", order));
897 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
898 	seg = &vm_phys_segs[m->segind];
899 	if (order < VM_NFREEORDER - 1) {
900 		pa = VM_PAGE_TO_PHYS(m);
901 		do {
902 			pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
903 			if (pa < seg->start || pa >= seg->end)
904 				break;
905 			m_buddy = &seg->first_page[atop(pa - seg->start)];
906 			if (m_buddy->order != order)
907 				break;
908 			fl = (*seg->free_queues)[m_buddy->pool];
909 			vm_freelist_rem(fl, m_buddy, order);
910 			if (m_buddy->pool != m->pool)
911 				vm_phys_set_pool(m->pool, m_buddy, order);
912 			order++;
913 			pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
914 			m = &seg->first_page[atop(pa - seg->start)];
915 		} while (order < VM_NFREEORDER - 1);
916 	}
917 	fl = (*seg->free_queues)[m->pool];
918 	vm_freelist_add(fl, m, order, 1);
919 }
920 
921 /*
922  * Free a contiguous, arbitrarily sized set of physical pages.
923  *
924  * The free page queues must be locked.
925  */
926 void
927 vm_phys_free_contig(vm_page_t m, u_long npages)
928 {
929 	u_int n;
930 	int order;
931 
932 	/*
933 	 * Avoid unnecessary coalescing by freeing the pages in the largest
934 	 * possible power-of-two-sized subsets.
935 	 */
936 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
937 	for (;; npages -= n) {
938 		/*
939 		 * Unsigned "min" is used here so that "order" is assigned
940 		 * "VM_NFREEORDER - 1" when "m"'s physical address is zero
941 		 * or the low-order bits of its physical address are zero
942 		 * because the size of a physical address exceeds the size of
943 		 * a long.
944 		 */
945 		order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
946 		    VM_NFREEORDER - 1);
947 		n = 1 << order;
948 		if (npages < n)
949 			break;
950 		vm_phys_free_pages(m, order);
951 		m += n;
952 	}
953 	/* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */
954 	for (; npages > 0; npages -= n) {
955 		order = flsl(npages) - 1;
956 		n = 1 << order;
957 		vm_phys_free_pages(m, order);
958 		m += n;
959 	}
960 }
961 
962 /*
963  * Scan physical memory between the specified addresses "low" and "high" for a
964  * run of contiguous physical pages that satisfy the specified conditions, and
965  * return the lowest page in the run.  The specified "alignment" determines
966  * the alignment of the lowest physical page in the run.  If the specified
967  * "boundary" is non-zero, then the run of physical pages cannot span a
968  * physical address that is a multiple of "boundary".
969  *
970  * "npages" must be greater than zero.  Both "alignment" and "boundary" must
971  * be a power of two.
972  */
973 vm_page_t
974 vm_phys_scan_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
975     u_long alignment, vm_paddr_t boundary, int options)
976 {
977 	vm_paddr_t pa_end;
978 	vm_page_t m_end, m_run, m_start;
979 	struct vm_phys_seg *seg;
980 	int segind;
981 
982 	KASSERT(npages > 0, ("npages is 0"));
983 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
984 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
985 	if (low >= high)
986 		return (NULL);
987 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
988 		seg = &vm_phys_segs[segind];
989 		if (seg->domain != domain)
990 			continue;
991 		if (seg->start >= high)
992 			break;
993 		if (low >= seg->end)
994 			continue;
995 		if (low <= seg->start)
996 			m_start = seg->first_page;
997 		else
998 			m_start = &seg->first_page[atop(low - seg->start)];
999 		if (high < seg->end)
1000 			pa_end = high;
1001 		else
1002 			pa_end = seg->end;
1003 		if (pa_end - VM_PAGE_TO_PHYS(m_start) < ptoa(npages))
1004 			continue;
1005 		m_end = &seg->first_page[atop(pa_end - seg->start)];
1006 		m_run = vm_page_scan_contig(npages, m_start, m_end,
1007 		    alignment, boundary, options);
1008 		if (m_run != NULL)
1009 			return (m_run);
1010 	}
1011 	return (NULL);
1012 }
1013 
1014 /*
1015  * Set the pool for a contiguous, power of two-sized set of physical pages.
1016  */
1017 void
1018 vm_phys_set_pool(int pool, vm_page_t m, int order)
1019 {
1020 	vm_page_t m_tmp;
1021 
1022 	for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
1023 		m_tmp->pool = pool;
1024 }
1025 
1026 /*
1027  * Search for the given physical page "m" in the free lists.  If the search
1028  * succeeds, remove "m" from the free lists and return TRUE.  Otherwise, return
1029  * FALSE, indicating that "m" is not in the free lists.
1030  *
1031  * The free page queues must be locked.
1032  */
1033 boolean_t
1034 vm_phys_unfree_page(vm_page_t m)
1035 {
1036 	struct vm_freelist *fl;
1037 	struct vm_phys_seg *seg;
1038 	vm_paddr_t pa, pa_half;
1039 	vm_page_t m_set, m_tmp;
1040 	int order;
1041 
1042 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1043 
1044 	/*
1045 	 * First, find the contiguous, power of two-sized set of free
1046 	 * physical pages containing the given physical page "m" and
1047 	 * assign it to "m_set".
1048 	 */
1049 	seg = &vm_phys_segs[m->segind];
1050 	for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
1051 	    order < VM_NFREEORDER - 1; ) {
1052 		order++;
1053 		pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
1054 		if (pa >= seg->start)
1055 			m_set = &seg->first_page[atop(pa - seg->start)];
1056 		else
1057 			return (FALSE);
1058 	}
1059 	if (m_set->order < order)
1060 		return (FALSE);
1061 	if (m_set->order == VM_NFREEORDER)
1062 		return (FALSE);
1063 	KASSERT(m_set->order < VM_NFREEORDER,
1064 	    ("vm_phys_unfree_page: page %p has unexpected order %d",
1065 	    m_set, m_set->order));
1066 
1067 	/*
1068 	 * Next, remove "m_set" from the free lists.  Finally, extract
1069 	 * "m" from "m_set" using an iterative algorithm: While "m_set"
1070 	 * is larger than a page, shrink "m_set" by returning the half
1071 	 * of "m_set" that does not contain "m" to the free lists.
1072 	 */
1073 	fl = (*seg->free_queues)[m_set->pool];
1074 	order = m_set->order;
1075 	vm_freelist_rem(fl, m_set, order);
1076 	while (order > 0) {
1077 		order--;
1078 		pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
1079 		if (m->phys_addr < pa_half)
1080 			m_tmp = &seg->first_page[atop(pa_half - seg->start)];
1081 		else {
1082 			m_tmp = m_set;
1083 			m_set = &seg->first_page[atop(pa_half - seg->start)];
1084 		}
1085 		vm_freelist_add(fl, m_tmp, order, 0);
1086 	}
1087 	KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
1088 	return (TRUE);
1089 }
1090 
1091 /*
1092  * Allocate a contiguous set of physical pages of the given size
1093  * "npages" from the free lists.  All of the physical pages must be at
1094  * or above the given physical address "low" and below the given
1095  * physical address "high".  The given value "alignment" determines the
1096  * alignment of the first physical page in the set.  If the given value
1097  * "boundary" is non-zero, then the set of physical pages cannot cross
1098  * any physical address boundary that is a multiple of that value.  Both
1099  * "alignment" and "boundary" must be a power of two.
1100  */
1101 vm_page_t
1102 vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
1103     u_long alignment, vm_paddr_t boundary)
1104 {
1105 	vm_paddr_t pa_end, pa_start;
1106 	vm_page_t m_run;
1107 	struct vm_phys_seg *seg;
1108 	int segind;
1109 
1110 	KASSERT(npages > 0, ("npages is 0"));
1111 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1112 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1113 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1114 	if (low >= high)
1115 		return (NULL);
1116 	m_run = NULL;
1117 	for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
1118 		seg = &vm_phys_segs[segind];
1119 		if (seg->start >= high || seg->domain != domain)
1120 			continue;
1121 		if (low >= seg->end)
1122 			break;
1123 		if (low <= seg->start)
1124 			pa_start = seg->start;
1125 		else
1126 			pa_start = low;
1127 		if (high < seg->end)
1128 			pa_end = high;
1129 		else
1130 			pa_end = seg->end;
1131 		if (pa_end - pa_start < ptoa(npages))
1132 			continue;
1133 		m_run = vm_phys_alloc_seg_contig(seg, npages, low, high,
1134 		    alignment, boundary);
1135 		if (m_run != NULL)
1136 			break;
1137 	}
1138 	return (m_run);
1139 }
1140 
1141 /*
1142  * Allocate a run of contiguous physical pages from the free list for the
1143  * specified segment.
1144  */
1145 static vm_page_t
1146 vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, u_long npages,
1147     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1148 {
1149 	struct vm_freelist *fl;
1150 	vm_paddr_t pa, pa_end, size;
1151 	vm_page_t m, m_ret;
1152 	u_long npages_end;
1153 	int oind, order, pind;
1154 
1155 	KASSERT(npages > 0, ("npages is 0"));
1156 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1157 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1158 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1159 	/* Compute the queue that is the best fit for npages. */
1160 	for (order = 0; (1 << order) < npages; order++);
1161 	/* Search for a run satisfying the specified conditions. */
1162 	size = npages << PAGE_SHIFT;
1163 	for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER;
1164 	    oind++) {
1165 		for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1166 			fl = (*seg->free_queues)[pind];
1167 			TAILQ_FOREACH(m_ret, &fl[oind].pl, plinks.q) {
1168 				/*
1169 				 * Is the size of this allocation request
1170 				 * larger than the largest block size?
1171 				 */
1172 				if (order >= VM_NFREEORDER) {
1173 					/*
1174 					 * Determine if a sufficient number of
1175 					 * subsequent blocks to satisfy the
1176 					 * allocation request are free.
1177 					 */
1178 					pa = VM_PAGE_TO_PHYS(m_ret);
1179 					pa_end = pa + size;
1180 					for (;;) {
1181 						pa += 1 << (PAGE_SHIFT +
1182 						    VM_NFREEORDER - 1);
1183 						if (pa >= pa_end ||
1184 						    pa < seg->start ||
1185 						    pa >= seg->end)
1186 							break;
1187 						m = &seg->first_page[atop(pa -
1188 						    seg->start)];
1189 						if (m->order != VM_NFREEORDER -
1190 						    1)
1191 							break;
1192 					}
1193 					/* If not, go to the next block. */
1194 					if (pa < pa_end)
1195 						continue;
1196 				}
1197 
1198 				/*
1199 				 * Determine if the blocks are within the
1200 				 * given range, satisfy the given alignment,
1201 				 * and do not cross the given boundary.
1202 				 */
1203 				pa = VM_PAGE_TO_PHYS(m_ret);
1204 				pa_end = pa + size;
1205 				if (pa >= low && pa_end <= high &&
1206 				    (pa & (alignment - 1)) == 0 &&
1207 				    rounddown2(pa ^ (pa_end - 1), boundary) == 0)
1208 					goto done;
1209 			}
1210 		}
1211 	}
1212 	return (NULL);
1213 done:
1214 	for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) {
1215 		fl = (*seg->free_queues)[m->pool];
1216 		vm_freelist_rem(fl, m, m->order);
1217 	}
1218 	if (m_ret->pool != VM_FREEPOOL_DEFAULT)
1219 		vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m_ret, oind);
1220 	fl = (*seg->free_queues)[m_ret->pool];
1221 	vm_phys_split_pages(m_ret, oind, fl, order);
1222 	/* Return excess pages to the free lists. */
1223 	npages_end = roundup2(npages, 1 << imin(oind, order));
1224 	if (npages < npages_end)
1225 		vm_phys_free_contig(&m_ret[npages], npages_end - npages);
1226 	return (m_ret);
1227 }
1228 
1229 #ifdef DDB
1230 /*
1231  * Show the number of physical pages in each of the free lists.
1232  */
1233 DB_SHOW_COMMAND(freepages, db_show_freepages)
1234 {
1235 	struct vm_freelist *fl;
1236 	int flind, oind, pind, dom;
1237 
1238 	for (dom = 0; dom < vm_ndomains; dom++) {
1239 		db_printf("DOMAIN: %d\n", dom);
1240 		for (flind = 0; flind < vm_nfreelists; flind++) {
1241 			db_printf("FREE LIST %d:\n"
1242 			    "\n  ORDER (SIZE)  |  NUMBER"
1243 			    "\n              ", flind);
1244 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
1245 				db_printf("  |  POOL %d", pind);
1246 			db_printf("\n--            ");
1247 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
1248 				db_printf("-- --      ");
1249 			db_printf("--\n");
1250 			for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
1251 				db_printf("  %2.2d (%6.6dK)", oind,
1252 				    1 << (PAGE_SHIFT - 10 + oind));
1253 				for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1254 				fl = vm_phys_free_queues[dom][flind][pind];
1255 					db_printf("  |  %6.6d", fl[oind].lcnt);
1256 				}
1257 				db_printf("\n");
1258 			}
1259 			db_printf("\n");
1260 		}
1261 		db_printf("\n");
1262 	}
1263 }
1264 #endif
1265