xref: /freebsd/sys/vm/vm_phys.c (revision 596596fec79f04e1f413850b44159224ff1fb8dc)
1 /*-
2  * Copyright (c) 2002-2006 Rice University
3  * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
4  * All rights reserved.
5  *
6  * This software was developed for the FreeBSD Project by Alan L. Cox,
7  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
22  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
28  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  *	Physical memory system implementation
34  *
35  * Any external functions defined by this module are only to be used by the
36  * virtual memory system.
37  */
38 
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41 
42 #include "opt_ddb.h"
43 #include "opt_vm.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/lock.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/mutex.h>
51 #if MAXMEMDOM > 1
52 #include <sys/proc.h>
53 #endif
54 #include <sys/queue.h>
55 #include <sys/rwlock.h>
56 #include <sys/sbuf.h>
57 #include <sys/sysctl.h>
58 #include <sys/tree.h>
59 #include <sys/vmmeter.h>
60 
61 #include <ddb/ddb.h>
62 
63 #include <vm/vm.h>
64 #include <vm/vm_param.h>
65 #include <vm/vm_kern.h>
66 #include <vm/vm_object.h>
67 #include <vm/vm_page.h>
68 #include <vm/vm_phys.h>
69 
70 _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX,
71     "Too many physsegs.");
72 
73 struct mem_affinity *mem_affinity;
74 
75 int vm_ndomains = 1;
76 
77 struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX];
78 int vm_phys_nsegs;
79 
80 struct vm_phys_fictitious_seg;
81 static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *,
82     struct vm_phys_fictitious_seg *);
83 
84 RB_HEAD(fict_tree, vm_phys_fictitious_seg) vm_phys_fictitious_tree =
85     RB_INITIALIZER(_vm_phys_fictitious_tree);
86 
87 struct vm_phys_fictitious_seg {
88 	RB_ENTRY(vm_phys_fictitious_seg) node;
89 	/* Memory region data */
90 	vm_paddr_t	start;
91 	vm_paddr_t	end;
92 	vm_page_t	first_page;
93 };
94 
95 RB_GENERATE_STATIC(fict_tree, vm_phys_fictitious_seg, node,
96     vm_phys_fictitious_cmp);
97 
98 static struct rwlock vm_phys_fictitious_reg_lock;
99 MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages");
100 
101 static struct vm_freelist
102     vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER];
103 
104 static int vm_nfreelists;
105 
106 /*
107  * Provides the mapping from VM_FREELIST_* to free list indices (flind).
108  */
109 static int vm_freelist_to_flind[VM_NFREELIST];
110 
111 CTASSERT(VM_FREELIST_DEFAULT == 0);
112 
113 #ifdef VM_FREELIST_ISADMA
114 #define	VM_ISADMA_BOUNDARY	16777216
115 #endif
116 #ifdef VM_FREELIST_DMA32
117 #define	VM_DMA32_BOUNDARY	((vm_paddr_t)1 << 32)
118 #endif
119 
120 /*
121  * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about
122  * the ordering of the free list boundaries.
123  */
124 #if defined(VM_ISADMA_BOUNDARY) && defined(VM_LOWMEM_BOUNDARY)
125 CTASSERT(VM_ISADMA_BOUNDARY < VM_LOWMEM_BOUNDARY);
126 #endif
127 #if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY)
128 CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY);
129 #endif
130 
131 static int cnt_prezero;
132 SYSCTL_INT(_vm_stats_misc, OID_AUTO, cnt_prezero, CTLFLAG_RD,
133     &cnt_prezero, 0, "The number of physical pages prezeroed at idle time");
134 
135 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
136 SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD,
137     NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info");
138 
139 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
140 SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD,
141     NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info");
142 
143 SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD,
144     &vm_ndomains, 0, "Number of physical memory domains available.");
145 
146 static vm_page_t vm_phys_alloc_domain_pages(int domain, int flind, int pool,
147     int order);
148 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain);
149 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end);
150 static int vm_phys_paddr_to_segind(vm_paddr_t pa);
151 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
152     int order);
153 
154 /*
155  * Red-black tree helpers for vm fictitious range management.
156  */
157 static inline int
158 vm_phys_fictitious_in_range(struct vm_phys_fictitious_seg *p,
159     struct vm_phys_fictitious_seg *range)
160 {
161 
162 	KASSERT(range->start != 0 && range->end != 0,
163 	    ("Invalid range passed on search for vm_fictitious page"));
164 	if (p->start >= range->end)
165 		return (1);
166 	if (p->start < range->start)
167 		return (-1);
168 
169 	return (0);
170 }
171 
172 static int
173 vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *p1,
174     struct vm_phys_fictitious_seg *p2)
175 {
176 
177 	/* Check if this is a search for a page */
178 	if (p1->end == 0)
179 		return (vm_phys_fictitious_in_range(p1, p2));
180 
181 	KASSERT(p2->end != 0,
182     ("Invalid range passed as second parameter to vm fictitious comparison"));
183 
184 	/* Searching to add a new range */
185 	if (p1->end <= p2->start)
186 		return (-1);
187 	if (p1->start >= p2->end)
188 		return (1);
189 
190 	panic("Trying to add overlapping vm fictitious ranges:\n"
191 	    "[%#jx:%#jx] and [%#jx:%#jx]", (uintmax_t)p1->start,
192 	    (uintmax_t)p1->end, (uintmax_t)p2->start, (uintmax_t)p2->end);
193 }
194 
195 static __inline int
196 vm_rr_selectdomain(void)
197 {
198 #if MAXMEMDOM > 1
199 	struct thread *td;
200 
201 	td = curthread;
202 
203 	td->td_dom_rr_idx++;
204 	td->td_dom_rr_idx %= vm_ndomains;
205 	return (td->td_dom_rr_idx);
206 #else
207 	return (0);
208 #endif
209 }
210 
211 boolean_t
212 vm_phys_domain_intersects(long mask, vm_paddr_t low, vm_paddr_t high)
213 {
214 	struct vm_phys_seg *s;
215 	int idx;
216 
217 	while ((idx = ffsl(mask)) != 0) {
218 		idx--;	/* ffsl counts from 1 */
219 		mask &= ~(1UL << idx);
220 		s = &vm_phys_segs[idx];
221 		if (low < s->end && high > s->start)
222 			return (TRUE);
223 	}
224 	return (FALSE);
225 }
226 
227 /*
228  * Outputs the state of the physical memory allocator, specifically,
229  * the amount of physical memory in each free list.
230  */
231 static int
232 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
233 {
234 	struct sbuf sbuf;
235 	struct vm_freelist *fl;
236 	int dom, error, flind, oind, pind;
237 
238 	error = sysctl_wire_old_buffer(req, 0);
239 	if (error != 0)
240 		return (error);
241 	sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req);
242 	for (dom = 0; dom < vm_ndomains; dom++) {
243 		sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom);
244 		for (flind = 0; flind < vm_nfreelists; flind++) {
245 			sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
246 			    "\n  ORDER (SIZE)  |  NUMBER"
247 			    "\n              ", flind);
248 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
249 				sbuf_printf(&sbuf, "  |  POOL %d", pind);
250 			sbuf_printf(&sbuf, "\n--            ");
251 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
252 				sbuf_printf(&sbuf, "-- --      ");
253 			sbuf_printf(&sbuf, "--\n");
254 			for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
255 				sbuf_printf(&sbuf, "  %2d (%6dK)", oind,
256 				    1 << (PAGE_SHIFT - 10 + oind));
257 				for (pind = 0; pind < VM_NFREEPOOL; pind++) {
258 				fl = vm_phys_free_queues[dom][flind][pind];
259 					sbuf_printf(&sbuf, "  |  %6d",
260 					    fl[oind].lcnt);
261 				}
262 				sbuf_printf(&sbuf, "\n");
263 			}
264 		}
265 	}
266 	sbuf_putc(&sbuf, 0); /* nullterm */
267 	error = sbuf_finish(&sbuf);
268 	sbuf_delete(&sbuf);
269 	return (error);
270 }
271 
272 /*
273  * Outputs the set of physical memory segments.
274  */
275 static int
276 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
277 {
278 	struct sbuf sbuf;
279 	struct vm_phys_seg *seg;
280 	int error, segind;
281 
282 	error = sysctl_wire_old_buffer(req, 0);
283 	if (error != 0)
284 		return (error);
285 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
286 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
287 		sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
288 		seg = &vm_phys_segs[segind];
289 		sbuf_printf(&sbuf, "start:     %#jx\n",
290 		    (uintmax_t)seg->start);
291 		sbuf_printf(&sbuf, "end:       %#jx\n",
292 		    (uintmax_t)seg->end);
293 		sbuf_printf(&sbuf, "domain:    %d\n", seg->domain);
294 		sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
295 	}
296 	sbuf_putc(&sbuf, 0); /* nullterm */
297 	error = sbuf_finish(&sbuf);
298 	sbuf_delete(&sbuf);
299 	return (error);
300 }
301 
302 static void
303 vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail)
304 {
305 
306 	m->order = order;
307 	if (tail)
308 		TAILQ_INSERT_TAIL(&fl[order].pl, m, plinks.q);
309 	else
310 		TAILQ_INSERT_HEAD(&fl[order].pl, m, plinks.q);
311 	fl[order].lcnt++;
312 }
313 
314 static void
315 vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order)
316 {
317 
318 	TAILQ_REMOVE(&fl[order].pl, m, plinks.q);
319 	fl[order].lcnt--;
320 	m->order = VM_NFREEORDER;
321 }
322 
323 /*
324  * Create a physical memory segment.
325  */
326 static void
327 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain)
328 {
329 	struct vm_phys_seg *seg;
330 
331 	KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
332 	    ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
333 	KASSERT(domain < vm_ndomains,
334 	    ("vm_phys_create_seg: invalid domain provided"));
335 	seg = &vm_phys_segs[vm_phys_nsegs++];
336 	while (seg > vm_phys_segs && (seg - 1)->start >= end) {
337 		*seg = *(seg - 1);
338 		seg--;
339 	}
340 	seg->start = start;
341 	seg->end = end;
342 	seg->domain = domain;
343 }
344 
345 static void
346 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end)
347 {
348 	int i;
349 
350 	if (mem_affinity == NULL) {
351 		_vm_phys_create_seg(start, end, 0);
352 		return;
353 	}
354 
355 	for (i = 0;; i++) {
356 		if (mem_affinity[i].end == 0)
357 			panic("Reached end of affinity info");
358 		if (mem_affinity[i].end <= start)
359 			continue;
360 		if (mem_affinity[i].start > start)
361 			panic("No affinity info for start %jx",
362 			    (uintmax_t)start);
363 		if (mem_affinity[i].end >= end) {
364 			_vm_phys_create_seg(start, end,
365 			    mem_affinity[i].domain);
366 			break;
367 		}
368 		_vm_phys_create_seg(start, mem_affinity[i].end,
369 		    mem_affinity[i].domain);
370 		start = mem_affinity[i].end;
371 	}
372 }
373 
374 /*
375  * Add a physical memory segment.
376  */
377 void
378 vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end)
379 {
380 	vm_paddr_t paddr;
381 
382 	KASSERT((start & PAGE_MASK) == 0,
383 	    ("vm_phys_define_seg: start is not page aligned"));
384 	KASSERT((end & PAGE_MASK) == 0,
385 	    ("vm_phys_define_seg: end is not page aligned"));
386 
387 	/*
388 	 * Split the physical memory segment if it spans two or more free
389 	 * list boundaries.
390 	 */
391 	paddr = start;
392 #ifdef	VM_FREELIST_ISADMA
393 	if (paddr < VM_ISADMA_BOUNDARY && end > VM_ISADMA_BOUNDARY) {
394 		vm_phys_create_seg(paddr, VM_ISADMA_BOUNDARY);
395 		paddr = VM_ISADMA_BOUNDARY;
396 	}
397 #endif
398 #ifdef	VM_FREELIST_LOWMEM
399 	if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) {
400 		vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY);
401 		paddr = VM_LOWMEM_BOUNDARY;
402 	}
403 #endif
404 #ifdef	VM_FREELIST_DMA32
405 	if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) {
406 		vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY);
407 		paddr = VM_DMA32_BOUNDARY;
408 	}
409 #endif
410 	vm_phys_create_seg(paddr, end);
411 }
412 
413 /*
414  * Initialize the physical memory allocator.
415  *
416  * Requires that vm_page_array is initialized!
417  */
418 void
419 vm_phys_init(void)
420 {
421 	struct vm_freelist *fl;
422 	struct vm_phys_seg *seg;
423 	u_long npages;
424 	int dom, flind, freelist, oind, pind, segind;
425 
426 	/*
427 	 * Compute the number of free lists, and generate the mapping from the
428 	 * manifest constants VM_FREELIST_* to the free list indices.
429 	 *
430 	 * Initially, the entries of vm_freelist_to_flind[] are set to either
431 	 * 0 or 1 to indicate which free lists should be created.
432 	 */
433 	npages = 0;
434 	for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
435 		seg = &vm_phys_segs[segind];
436 #ifdef	VM_FREELIST_ISADMA
437 		if (seg->end <= VM_ISADMA_BOUNDARY)
438 			vm_freelist_to_flind[VM_FREELIST_ISADMA] = 1;
439 		else
440 #endif
441 #ifdef	VM_FREELIST_LOWMEM
442 		if (seg->end <= VM_LOWMEM_BOUNDARY)
443 			vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1;
444 		else
445 #endif
446 #ifdef	VM_FREELIST_DMA32
447 		if (
448 #ifdef	VM_DMA32_NPAGES_THRESHOLD
449 		    /*
450 		     * Create the DMA32 free list only if the amount of
451 		     * physical memory above physical address 4G exceeds the
452 		     * given threshold.
453 		     */
454 		    npages > VM_DMA32_NPAGES_THRESHOLD &&
455 #endif
456 		    seg->end <= VM_DMA32_BOUNDARY)
457 			vm_freelist_to_flind[VM_FREELIST_DMA32] = 1;
458 		else
459 #endif
460 		{
461 			npages += atop(seg->end - seg->start);
462 			vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1;
463 		}
464 	}
465 	/* Change each entry into a running total of the free lists. */
466 	for (freelist = 1; freelist < VM_NFREELIST; freelist++) {
467 		vm_freelist_to_flind[freelist] +=
468 		    vm_freelist_to_flind[freelist - 1];
469 	}
470 	vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1];
471 	KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists"));
472 	/* Change each entry into a free list index. */
473 	for (freelist = 0; freelist < VM_NFREELIST; freelist++)
474 		vm_freelist_to_flind[freelist]--;
475 
476 	/*
477 	 * Initialize the first_page and free_queues fields of each physical
478 	 * memory segment.
479 	 */
480 #ifdef VM_PHYSSEG_SPARSE
481 	npages = 0;
482 #endif
483 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
484 		seg = &vm_phys_segs[segind];
485 #ifdef VM_PHYSSEG_SPARSE
486 		seg->first_page = &vm_page_array[npages];
487 		npages += atop(seg->end - seg->start);
488 #else
489 		seg->first_page = PHYS_TO_VM_PAGE(seg->start);
490 #endif
491 #ifdef	VM_FREELIST_ISADMA
492 		if (seg->end <= VM_ISADMA_BOUNDARY) {
493 			flind = vm_freelist_to_flind[VM_FREELIST_ISADMA];
494 			KASSERT(flind >= 0,
495 			    ("vm_phys_init: ISADMA flind < 0"));
496 		} else
497 #endif
498 #ifdef	VM_FREELIST_LOWMEM
499 		if (seg->end <= VM_LOWMEM_BOUNDARY) {
500 			flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM];
501 			KASSERT(flind >= 0,
502 			    ("vm_phys_init: LOWMEM flind < 0"));
503 		} else
504 #endif
505 #ifdef	VM_FREELIST_DMA32
506 		if (seg->end <= VM_DMA32_BOUNDARY) {
507 			flind = vm_freelist_to_flind[VM_FREELIST_DMA32];
508 			KASSERT(flind >= 0,
509 			    ("vm_phys_init: DMA32 flind < 0"));
510 		} else
511 #endif
512 		{
513 			flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT];
514 			KASSERT(flind >= 0,
515 			    ("vm_phys_init: DEFAULT flind < 0"));
516 		}
517 		seg->free_queues = &vm_phys_free_queues[seg->domain][flind];
518 	}
519 
520 	/*
521 	 * Initialize the free queues.
522 	 */
523 	for (dom = 0; dom < vm_ndomains; dom++) {
524 		for (flind = 0; flind < vm_nfreelists; flind++) {
525 			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
526 				fl = vm_phys_free_queues[dom][flind][pind];
527 				for (oind = 0; oind < VM_NFREEORDER; oind++)
528 					TAILQ_INIT(&fl[oind].pl);
529 			}
530 		}
531 	}
532 
533 	rw_init(&vm_phys_fictitious_reg_lock, "vmfctr");
534 }
535 
536 /*
537  * Split a contiguous, power of two-sized set of physical pages.
538  */
539 static __inline void
540 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order)
541 {
542 	vm_page_t m_buddy;
543 
544 	while (oind > order) {
545 		oind--;
546 		m_buddy = &m[1 << oind];
547 		KASSERT(m_buddy->order == VM_NFREEORDER,
548 		    ("vm_phys_split_pages: page %p has unexpected order %d",
549 		    m_buddy, m_buddy->order));
550 		vm_freelist_add(fl, m_buddy, oind, 0);
551         }
552 }
553 
554 /*
555  * Initialize a physical page and add it to the free lists.
556  */
557 void
558 vm_phys_add_page(vm_paddr_t pa)
559 {
560 	vm_page_t m;
561 	struct vm_domain *vmd;
562 
563 	vm_cnt.v_page_count++;
564 	m = vm_phys_paddr_to_vm_page(pa);
565 	m->phys_addr = pa;
566 	m->queue = PQ_NONE;
567 	m->segind = vm_phys_paddr_to_segind(pa);
568 	vmd = vm_phys_domain(m);
569 	vmd->vmd_page_count++;
570 	vmd->vmd_segs |= 1UL << m->segind;
571 	KASSERT(m->order == VM_NFREEORDER,
572 	    ("vm_phys_add_page: page %p has unexpected order %d",
573 	    m, m->order));
574 	m->pool = VM_FREEPOOL_DEFAULT;
575 	pmap_page_init(m);
576 	mtx_lock(&vm_page_queue_free_mtx);
577 	vm_phys_freecnt_adj(m, 1);
578 	vm_phys_free_pages(m, 0);
579 	mtx_unlock(&vm_page_queue_free_mtx);
580 }
581 
582 /*
583  * Allocate a contiguous, power of two-sized set of physical pages
584  * from the free lists.
585  *
586  * The free page queues must be locked.
587  */
588 vm_page_t
589 vm_phys_alloc_pages(int pool, int order)
590 {
591 	vm_page_t m;
592 	int dom, domain, flind;
593 
594 	KASSERT(pool < VM_NFREEPOOL,
595 	    ("vm_phys_alloc_pages: pool %d is out of range", pool));
596 	KASSERT(order < VM_NFREEORDER,
597 	    ("vm_phys_alloc_pages: order %d is out of range", order));
598 
599 	for (dom = 0; dom < vm_ndomains; dom++) {
600 		domain = vm_rr_selectdomain();
601 		for (flind = 0; flind < vm_nfreelists; flind++) {
602 			m = vm_phys_alloc_domain_pages(domain, flind, pool,
603 			    order);
604 			if (m != NULL)
605 				return (m);
606 		}
607 	}
608 	return (NULL);
609 }
610 
611 /*
612  * Allocate a contiguous, power of two-sized set of physical pages from the
613  * specified free list.  The free list must be specified using one of the
614  * manifest constants VM_FREELIST_*.
615  *
616  * The free page queues must be locked.
617  */
618 vm_page_t
619 vm_phys_alloc_freelist_pages(int freelist, int pool, int order)
620 {
621 	vm_page_t m;
622 	int dom, domain;
623 
624 	KASSERT(freelist < VM_NFREELIST,
625 	    ("vm_phys_alloc_freelist_pages: freelist %d is out of range",
626 	    freelist));
627 	KASSERT(pool < VM_NFREEPOOL,
628 	    ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
629 	KASSERT(order < VM_NFREEORDER,
630 	    ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
631 	for (dom = 0; dom < vm_ndomains; dom++) {
632 		domain = vm_rr_selectdomain();
633 		m = vm_phys_alloc_domain_pages(domain,
634 		    vm_freelist_to_flind[freelist], pool, order);
635 		if (m != NULL)
636 			return (m);
637 	}
638 	return (NULL);
639 }
640 
641 static vm_page_t
642 vm_phys_alloc_domain_pages(int domain, int flind, int pool, int order)
643 {
644 	struct vm_freelist *fl;
645 	struct vm_freelist *alt;
646 	int oind, pind;
647 	vm_page_t m;
648 
649 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
650 	fl = &vm_phys_free_queues[domain][flind][pool][0];
651 	for (oind = order; oind < VM_NFREEORDER; oind++) {
652 		m = TAILQ_FIRST(&fl[oind].pl);
653 		if (m != NULL) {
654 			vm_freelist_rem(fl, m, oind);
655 			vm_phys_split_pages(m, oind, fl, order);
656 			return (m);
657 		}
658 	}
659 
660 	/*
661 	 * The given pool was empty.  Find the largest
662 	 * contiguous, power-of-two-sized set of pages in any
663 	 * pool.  Transfer these pages to the given pool, and
664 	 * use them to satisfy the allocation.
665 	 */
666 	for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
667 		for (pind = 0; pind < VM_NFREEPOOL; pind++) {
668 			alt = &vm_phys_free_queues[domain][flind][pind][0];
669 			m = TAILQ_FIRST(&alt[oind].pl);
670 			if (m != NULL) {
671 				vm_freelist_rem(alt, m, oind);
672 				vm_phys_set_pool(pool, m, oind);
673 				vm_phys_split_pages(m, oind, fl, order);
674 				return (m);
675 			}
676 		}
677 	}
678 	return (NULL);
679 }
680 
681 /*
682  * Find the vm_page corresponding to the given physical address.
683  */
684 vm_page_t
685 vm_phys_paddr_to_vm_page(vm_paddr_t pa)
686 {
687 	struct vm_phys_seg *seg;
688 	int segind;
689 
690 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
691 		seg = &vm_phys_segs[segind];
692 		if (pa >= seg->start && pa < seg->end)
693 			return (&seg->first_page[atop(pa - seg->start)]);
694 	}
695 	return (NULL);
696 }
697 
698 vm_page_t
699 vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
700 {
701 	struct vm_phys_fictitious_seg tmp, *seg;
702 	vm_page_t m;
703 
704 	m = NULL;
705 	tmp.start = pa;
706 	tmp.end = 0;
707 
708 	rw_rlock(&vm_phys_fictitious_reg_lock);
709 	seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
710 	rw_runlock(&vm_phys_fictitious_reg_lock);
711 	if (seg == NULL)
712 		return (NULL);
713 
714 	m = &seg->first_page[atop(pa - seg->start)];
715 	KASSERT((m->flags & PG_FICTITIOUS) != 0, ("%p not fictitious", m));
716 
717 	return (m);
718 }
719 
720 static inline void
721 vm_phys_fictitious_init_range(vm_page_t range, vm_paddr_t start,
722     long page_count, vm_memattr_t memattr)
723 {
724 	long i;
725 
726 	for (i = 0; i < page_count; i++) {
727 		vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr);
728 		range[i].oflags &= ~VPO_UNMANAGED;
729 		range[i].busy_lock = VPB_UNBUSIED;
730 	}
731 }
732 
733 int
734 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
735     vm_memattr_t memattr)
736 {
737 	struct vm_phys_fictitious_seg *seg;
738 	vm_page_t fp;
739 	long page_count;
740 #ifdef VM_PHYSSEG_DENSE
741 	long pi, pe;
742 	long dpage_count;
743 #endif
744 
745 	KASSERT(start < end,
746 	    ("Start of segment isn't less than end (start: %jx end: %jx)",
747 	    (uintmax_t)start, (uintmax_t)end));
748 
749 	page_count = (end - start) / PAGE_SIZE;
750 
751 #ifdef VM_PHYSSEG_DENSE
752 	pi = atop(start);
753 	pe = atop(end);
754 	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
755 		fp = &vm_page_array[pi - first_page];
756 		if ((pe - first_page) > vm_page_array_size) {
757 			/*
758 			 * We have a segment that starts inside
759 			 * of vm_page_array, but ends outside of it.
760 			 *
761 			 * Use vm_page_array pages for those that are
762 			 * inside of the vm_page_array range, and
763 			 * allocate the remaining ones.
764 			 */
765 			dpage_count = vm_page_array_size - (pi - first_page);
766 			vm_phys_fictitious_init_range(fp, start, dpage_count,
767 			    memattr);
768 			page_count -= dpage_count;
769 			start += ptoa(dpage_count);
770 			goto alloc;
771 		}
772 		/*
773 		 * We can allocate the full range from vm_page_array,
774 		 * so there's no need to register the range in the tree.
775 		 */
776 		vm_phys_fictitious_init_range(fp, start, page_count, memattr);
777 		return (0);
778 	} else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
779 		/*
780 		 * We have a segment that ends inside of vm_page_array,
781 		 * but starts outside of it.
782 		 */
783 		fp = &vm_page_array[0];
784 		dpage_count = pe - first_page;
785 		vm_phys_fictitious_init_range(fp, ptoa(first_page), dpage_count,
786 		    memattr);
787 		end -= ptoa(dpage_count);
788 		page_count -= dpage_count;
789 		goto alloc;
790 	} else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
791 		/*
792 		 * Trying to register a fictitious range that expands before
793 		 * and after vm_page_array.
794 		 */
795 		return (EINVAL);
796 	} else {
797 alloc:
798 #endif
799 		fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
800 		    M_WAITOK | M_ZERO);
801 #ifdef VM_PHYSSEG_DENSE
802 	}
803 #endif
804 	vm_phys_fictitious_init_range(fp, start, page_count, memattr);
805 
806 	seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO);
807 	seg->start = start;
808 	seg->end = end;
809 	seg->first_page = fp;
810 
811 	rw_wlock(&vm_phys_fictitious_reg_lock);
812 	RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg);
813 	rw_wunlock(&vm_phys_fictitious_reg_lock);
814 
815 	return (0);
816 }
817 
818 void
819 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
820 {
821 	struct vm_phys_fictitious_seg *seg, tmp;
822 #ifdef VM_PHYSSEG_DENSE
823 	long pi, pe;
824 #endif
825 
826 	KASSERT(start < end,
827 	    ("Start of segment isn't less than end (start: %jx end: %jx)",
828 	    (uintmax_t)start, (uintmax_t)end));
829 
830 #ifdef VM_PHYSSEG_DENSE
831 	pi = atop(start);
832 	pe = atop(end);
833 	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
834 		if ((pe - first_page) <= vm_page_array_size) {
835 			/*
836 			 * This segment was allocated using vm_page_array
837 			 * only, there's nothing to do since those pages
838 			 * were never added to the tree.
839 			 */
840 			return;
841 		}
842 		/*
843 		 * We have a segment that starts inside
844 		 * of vm_page_array, but ends outside of it.
845 		 *
846 		 * Calculate how many pages were added to the
847 		 * tree and free them.
848 		 */
849 		start = ptoa(first_page + vm_page_array_size);
850 	} else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
851 		/*
852 		 * We have a segment that ends inside of vm_page_array,
853 		 * but starts outside of it.
854 		 */
855 		end = ptoa(first_page);
856 	} else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
857 		/* Since it's not possible to register such a range, panic. */
858 		panic(
859 		    "Unregistering not registered fictitious range [%#jx:%#jx]",
860 		    (uintmax_t)start, (uintmax_t)end);
861 	}
862 #endif
863 	tmp.start = start;
864 	tmp.end = 0;
865 
866 	rw_wlock(&vm_phys_fictitious_reg_lock);
867 	seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
868 	if (seg->start != start || seg->end != end) {
869 		rw_wunlock(&vm_phys_fictitious_reg_lock);
870 		panic(
871 		    "Unregistering not registered fictitious range [%#jx:%#jx]",
872 		    (uintmax_t)start, (uintmax_t)end);
873 	}
874 	RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg);
875 	rw_wunlock(&vm_phys_fictitious_reg_lock);
876 	free(seg->first_page, M_FICT_PAGES);
877 	free(seg, M_FICT_PAGES);
878 }
879 
880 /*
881  * Find the segment containing the given physical address.
882  */
883 static int
884 vm_phys_paddr_to_segind(vm_paddr_t pa)
885 {
886 	struct vm_phys_seg *seg;
887 	int segind;
888 
889 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
890 		seg = &vm_phys_segs[segind];
891 		if (pa >= seg->start && pa < seg->end)
892 			return (segind);
893 	}
894 	panic("vm_phys_paddr_to_segind: paddr %#jx is not in any segment" ,
895 	    (uintmax_t)pa);
896 }
897 
898 /*
899  * Free a contiguous, power of two-sized set of physical pages.
900  *
901  * The free page queues must be locked.
902  */
903 void
904 vm_phys_free_pages(vm_page_t m, int order)
905 {
906 	struct vm_freelist *fl;
907 	struct vm_phys_seg *seg;
908 	vm_paddr_t pa;
909 	vm_page_t m_buddy;
910 
911 	KASSERT(m->order == VM_NFREEORDER,
912 	    ("vm_phys_free_pages: page %p has unexpected order %d",
913 	    m, m->order));
914 	KASSERT(m->pool < VM_NFREEPOOL,
915 	    ("vm_phys_free_pages: page %p has unexpected pool %d",
916 	    m, m->pool));
917 	KASSERT(order < VM_NFREEORDER,
918 	    ("vm_phys_free_pages: order %d is out of range", order));
919 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
920 	seg = &vm_phys_segs[m->segind];
921 	if (order < VM_NFREEORDER - 1) {
922 		pa = VM_PAGE_TO_PHYS(m);
923 		do {
924 			pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
925 			if (pa < seg->start || pa >= seg->end)
926 				break;
927 			m_buddy = &seg->first_page[atop(pa - seg->start)];
928 			if (m_buddy->order != order)
929 				break;
930 			fl = (*seg->free_queues)[m_buddy->pool];
931 			vm_freelist_rem(fl, m_buddy, order);
932 			if (m_buddy->pool != m->pool)
933 				vm_phys_set_pool(m->pool, m_buddy, order);
934 			order++;
935 			pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
936 			m = &seg->first_page[atop(pa - seg->start)];
937 		} while (order < VM_NFREEORDER - 1);
938 	}
939 	fl = (*seg->free_queues)[m->pool];
940 	vm_freelist_add(fl, m, order, 1);
941 }
942 
943 /*
944  * Free a contiguous, arbitrarily sized set of physical pages.
945  *
946  * The free page queues must be locked.
947  */
948 void
949 vm_phys_free_contig(vm_page_t m, u_long npages)
950 {
951 	u_int n;
952 	int order;
953 
954 	/*
955 	 * Avoid unnecessary coalescing by freeing the pages in the largest
956 	 * possible power-of-two-sized subsets.
957 	 */
958 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
959 	for (;; npages -= n) {
960 		/*
961 		 * Unsigned "min" is used here so that "order" is assigned
962 		 * "VM_NFREEORDER - 1" when "m"'s physical address is zero
963 		 * or the low-order bits of its physical address are zero
964 		 * because the size of a physical address exceeds the size of
965 		 * a long.
966 		 */
967 		order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
968 		    VM_NFREEORDER - 1);
969 		n = 1 << order;
970 		if (npages < n)
971 			break;
972 		vm_phys_free_pages(m, order);
973 		m += n;
974 	}
975 	/* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */
976 	for (; npages > 0; npages -= n) {
977 		order = flsl(npages) - 1;
978 		n = 1 << order;
979 		vm_phys_free_pages(m, order);
980 		m += n;
981 	}
982 }
983 
984 /*
985  * Set the pool for a contiguous, power of two-sized set of physical pages.
986  */
987 void
988 vm_phys_set_pool(int pool, vm_page_t m, int order)
989 {
990 	vm_page_t m_tmp;
991 
992 	for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
993 		m_tmp->pool = pool;
994 }
995 
996 /*
997  * Search for the given physical page "m" in the free lists.  If the search
998  * succeeds, remove "m" from the free lists and return TRUE.  Otherwise, return
999  * FALSE, indicating that "m" is not in the free lists.
1000  *
1001  * The free page queues must be locked.
1002  */
1003 boolean_t
1004 vm_phys_unfree_page(vm_page_t m)
1005 {
1006 	struct vm_freelist *fl;
1007 	struct vm_phys_seg *seg;
1008 	vm_paddr_t pa, pa_half;
1009 	vm_page_t m_set, m_tmp;
1010 	int order;
1011 
1012 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1013 
1014 	/*
1015 	 * First, find the contiguous, power of two-sized set of free
1016 	 * physical pages containing the given physical page "m" and
1017 	 * assign it to "m_set".
1018 	 */
1019 	seg = &vm_phys_segs[m->segind];
1020 	for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
1021 	    order < VM_NFREEORDER - 1; ) {
1022 		order++;
1023 		pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
1024 		if (pa >= seg->start)
1025 			m_set = &seg->first_page[atop(pa - seg->start)];
1026 		else
1027 			return (FALSE);
1028 	}
1029 	if (m_set->order < order)
1030 		return (FALSE);
1031 	if (m_set->order == VM_NFREEORDER)
1032 		return (FALSE);
1033 	KASSERT(m_set->order < VM_NFREEORDER,
1034 	    ("vm_phys_unfree_page: page %p has unexpected order %d",
1035 	    m_set, m_set->order));
1036 
1037 	/*
1038 	 * Next, remove "m_set" from the free lists.  Finally, extract
1039 	 * "m" from "m_set" using an iterative algorithm: While "m_set"
1040 	 * is larger than a page, shrink "m_set" by returning the half
1041 	 * of "m_set" that does not contain "m" to the free lists.
1042 	 */
1043 	fl = (*seg->free_queues)[m_set->pool];
1044 	order = m_set->order;
1045 	vm_freelist_rem(fl, m_set, order);
1046 	while (order > 0) {
1047 		order--;
1048 		pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
1049 		if (m->phys_addr < pa_half)
1050 			m_tmp = &seg->first_page[atop(pa_half - seg->start)];
1051 		else {
1052 			m_tmp = m_set;
1053 			m_set = &seg->first_page[atop(pa_half - seg->start)];
1054 		}
1055 		vm_freelist_add(fl, m_tmp, order, 0);
1056 	}
1057 	KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
1058 	return (TRUE);
1059 }
1060 
1061 /*
1062  * Try to zero one physical page.  Used by an idle priority thread.
1063  */
1064 boolean_t
1065 vm_phys_zero_pages_idle(void)
1066 {
1067 	static struct vm_freelist *fl;
1068 	static int flind, oind, pind;
1069 	vm_page_t m, m_tmp;
1070 	int domain;
1071 
1072 	domain = vm_rr_selectdomain();
1073 	fl = vm_phys_free_queues[domain][0][0];
1074 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1075 	for (;;) {
1076 		TAILQ_FOREACH_REVERSE(m, &fl[oind].pl, pglist, plinks.q) {
1077 			for (m_tmp = m; m_tmp < &m[1 << oind]; m_tmp++) {
1078 				if ((m_tmp->flags & (PG_CACHED | PG_ZERO)) == 0) {
1079 					vm_phys_unfree_page(m_tmp);
1080 					vm_phys_freecnt_adj(m, -1);
1081 					mtx_unlock(&vm_page_queue_free_mtx);
1082 					pmap_zero_page_idle(m_tmp);
1083 					m_tmp->flags |= PG_ZERO;
1084 					mtx_lock(&vm_page_queue_free_mtx);
1085 					vm_phys_freecnt_adj(m, 1);
1086 					vm_phys_free_pages(m_tmp, 0);
1087 					vm_page_zero_count++;
1088 					cnt_prezero++;
1089 					return (TRUE);
1090 				}
1091 			}
1092 		}
1093 		oind++;
1094 		if (oind == VM_NFREEORDER) {
1095 			oind = 0;
1096 			pind++;
1097 			if (pind == VM_NFREEPOOL) {
1098 				pind = 0;
1099 				flind++;
1100 				if (flind == vm_nfreelists)
1101 					flind = 0;
1102 			}
1103 			fl = vm_phys_free_queues[domain][flind][pind];
1104 		}
1105 	}
1106 }
1107 
1108 /*
1109  * Allocate a contiguous set of physical pages of the given size
1110  * "npages" from the free lists.  All of the physical pages must be at
1111  * or above the given physical address "low" and below the given
1112  * physical address "high".  The given value "alignment" determines the
1113  * alignment of the first physical page in the set.  If the given value
1114  * "boundary" is non-zero, then the set of physical pages cannot cross
1115  * any physical address boundary that is a multiple of that value.  Both
1116  * "alignment" and "boundary" must be a power of two.
1117  */
1118 vm_page_t
1119 vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
1120     u_long alignment, vm_paddr_t boundary)
1121 {
1122 	struct vm_freelist *fl;
1123 	struct vm_phys_seg *seg;
1124 	vm_paddr_t pa, pa_last, size;
1125 	vm_page_t m, m_ret;
1126 	u_long npages_end;
1127 	int dom, domain, flind, oind, order, pind;
1128 
1129 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1130 	size = npages << PAGE_SHIFT;
1131 	KASSERT(size != 0,
1132 	    ("vm_phys_alloc_contig: size must not be 0"));
1133 	KASSERT((alignment & (alignment - 1)) == 0,
1134 	    ("vm_phys_alloc_contig: alignment must be a power of 2"));
1135 	KASSERT((boundary & (boundary - 1)) == 0,
1136 	    ("vm_phys_alloc_contig: boundary must be a power of 2"));
1137 	/* Compute the queue that is the best fit for npages. */
1138 	for (order = 0; (1 << order) < npages; order++);
1139 	dom = 0;
1140 restartdom:
1141 	domain = vm_rr_selectdomain();
1142 	for (flind = 0; flind < vm_nfreelists; flind++) {
1143 		for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER; oind++) {
1144 			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1145 				fl = &vm_phys_free_queues[domain][flind][pind][0];
1146 				TAILQ_FOREACH(m_ret, &fl[oind].pl, plinks.q) {
1147 					/*
1148 					 * A free list may contain physical pages
1149 					 * from one or more segments.
1150 					 */
1151 					seg = &vm_phys_segs[m_ret->segind];
1152 					if (seg->start > high ||
1153 					    low >= seg->end)
1154 						continue;
1155 
1156 					/*
1157 					 * Is the size of this allocation request
1158 					 * larger than the largest block size?
1159 					 */
1160 					if (order >= VM_NFREEORDER) {
1161 						/*
1162 						 * Determine if a sufficient number
1163 						 * of subsequent blocks to satisfy
1164 						 * the allocation request are free.
1165 						 */
1166 						pa = VM_PAGE_TO_PHYS(m_ret);
1167 						pa_last = pa + size;
1168 						for (;;) {
1169 							pa += 1 << (PAGE_SHIFT + VM_NFREEORDER - 1);
1170 							if (pa >= pa_last)
1171 								break;
1172 							if (pa < seg->start ||
1173 							    pa >= seg->end)
1174 								break;
1175 							m = &seg->first_page[atop(pa - seg->start)];
1176 							if (m->order != VM_NFREEORDER - 1)
1177 								break;
1178 						}
1179 						/* If not, continue to the next block. */
1180 						if (pa < pa_last)
1181 							continue;
1182 					}
1183 
1184 					/*
1185 					 * Determine if the blocks are within the given range,
1186 					 * satisfy the given alignment, and do not cross the
1187 					 * given boundary.
1188 					 */
1189 					pa = VM_PAGE_TO_PHYS(m_ret);
1190 					if (pa >= low &&
1191 					    pa + size <= high &&
1192 					    (pa & (alignment - 1)) == 0 &&
1193 					    ((pa ^ (pa + size - 1)) & ~(boundary - 1)) == 0)
1194 						goto done;
1195 				}
1196 			}
1197 		}
1198 	}
1199 	if (++dom < vm_ndomains)
1200 		goto restartdom;
1201 	return (NULL);
1202 done:
1203 	for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) {
1204 		fl = (*seg->free_queues)[m->pool];
1205 		vm_freelist_rem(fl, m, m->order);
1206 	}
1207 	if (m_ret->pool != VM_FREEPOOL_DEFAULT)
1208 		vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m_ret, oind);
1209 	fl = (*seg->free_queues)[m_ret->pool];
1210 	vm_phys_split_pages(m_ret, oind, fl, order);
1211 	/* Return excess pages to the free lists. */
1212 	npages_end = roundup2(npages, 1 << imin(oind, order));
1213 	if (npages < npages_end)
1214 		vm_phys_free_contig(&m_ret[npages], npages_end - npages);
1215 	return (m_ret);
1216 }
1217 
1218 #ifdef DDB
1219 /*
1220  * Show the number of physical pages in each of the free lists.
1221  */
1222 DB_SHOW_COMMAND(freepages, db_show_freepages)
1223 {
1224 	struct vm_freelist *fl;
1225 	int flind, oind, pind, dom;
1226 
1227 	for (dom = 0; dom < vm_ndomains; dom++) {
1228 		db_printf("DOMAIN: %d\n", dom);
1229 		for (flind = 0; flind < vm_nfreelists; flind++) {
1230 			db_printf("FREE LIST %d:\n"
1231 			    "\n  ORDER (SIZE)  |  NUMBER"
1232 			    "\n              ", flind);
1233 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
1234 				db_printf("  |  POOL %d", pind);
1235 			db_printf("\n--            ");
1236 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
1237 				db_printf("-- --      ");
1238 			db_printf("--\n");
1239 			for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
1240 				db_printf("  %2.2d (%6.6dK)", oind,
1241 				    1 << (PAGE_SHIFT - 10 + oind));
1242 				for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1243 				fl = vm_phys_free_queues[dom][flind][pind];
1244 					db_printf("  |  %6.6d", fl[oind].lcnt);
1245 				}
1246 				db_printf("\n");
1247 			}
1248 			db_printf("\n");
1249 		}
1250 		db_printf("\n");
1251 	}
1252 }
1253 #endif
1254