xref: /freebsd/sys/vm/vm_phys.c (revision 718cf2ccb9956613756ab15d7a0e28f2c8e91cab)
1 /*-
2  * Copyright (c) 2002-2006 Rice University
3  * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
4  * All rights reserved.
5  *
6  * This software was developed for the FreeBSD Project by Alan L. Cox,
7  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
22  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
28  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  *	Physical memory system implementation
34  *
35  * Any external functions defined by this module are only to be used by the
36  * virtual memory system.
37  */
38 
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41 
42 #include "opt_ddb.h"
43 #include "opt_vm.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/lock.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/mutex.h>
51 #include <sys/proc.h>
52 #include <sys/queue.h>
53 #include <sys/rwlock.h>
54 #include <sys/sbuf.h>
55 #include <sys/sysctl.h>
56 #include <sys/tree.h>
57 #include <sys/vmmeter.h>
58 #include <sys/seq.h>
59 
60 #include <ddb/ddb.h>
61 
62 #include <vm/vm.h>
63 #include <vm/vm_param.h>
64 #include <vm/vm_kern.h>
65 #include <vm/vm_object.h>
66 #include <vm/vm_page.h>
67 #include <vm/vm_phys.h>
68 
69 #include <vm/vm_domain.h>
70 
71 _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX,
72     "Too many physsegs.");
73 
74 #ifdef VM_NUMA_ALLOC
75 struct mem_affinity *mem_affinity;
76 int *mem_locality;
77 #endif
78 
79 int vm_ndomains = 1;
80 
81 struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX];
82 int vm_phys_nsegs;
83 
84 struct vm_phys_fictitious_seg;
85 static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *,
86     struct vm_phys_fictitious_seg *);
87 
88 RB_HEAD(fict_tree, vm_phys_fictitious_seg) vm_phys_fictitious_tree =
89     RB_INITIALIZER(_vm_phys_fictitious_tree);
90 
91 struct vm_phys_fictitious_seg {
92 	RB_ENTRY(vm_phys_fictitious_seg) node;
93 	/* Memory region data */
94 	vm_paddr_t	start;
95 	vm_paddr_t	end;
96 	vm_page_t	first_page;
97 };
98 
99 RB_GENERATE_STATIC(fict_tree, vm_phys_fictitious_seg, node,
100     vm_phys_fictitious_cmp);
101 
102 static struct rwlock vm_phys_fictitious_reg_lock;
103 MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages");
104 
105 static struct vm_freelist
106     vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER];
107 
108 static int vm_nfreelists;
109 
110 /*
111  * Provides the mapping from VM_FREELIST_* to free list indices (flind).
112  */
113 static int vm_freelist_to_flind[VM_NFREELIST];
114 
115 CTASSERT(VM_FREELIST_DEFAULT == 0);
116 
117 #ifdef VM_FREELIST_ISADMA
118 #define	VM_ISADMA_BOUNDARY	16777216
119 #endif
120 #ifdef VM_FREELIST_DMA32
121 #define	VM_DMA32_BOUNDARY	((vm_paddr_t)1 << 32)
122 #endif
123 
124 /*
125  * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about
126  * the ordering of the free list boundaries.
127  */
128 #if defined(VM_ISADMA_BOUNDARY) && defined(VM_LOWMEM_BOUNDARY)
129 CTASSERT(VM_ISADMA_BOUNDARY < VM_LOWMEM_BOUNDARY);
130 #endif
131 #if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY)
132 CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY);
133 #endif
134 
135 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
136 SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD,
137     NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info");
138 
139 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
140 SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD,
141     NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info");
142 
143 #ifdef VM_NUMA_ALLOC
144 static int sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS);
145 SYSCTL_OID(_vm, OID_AUTO, phys_locality, CTLTYPE_STRING | CTLFLAG_RD,
146     NULL, 0, sysctl_vm_phys_locality, "A", "Phys Locality Info");
147 #endif
148 
149 SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD,
150     &vm_ndomains, 0, "Number of physical memory domains available.");
151 
152 /*
153  * Default to first-touch + round-robin.
154  */
155 static struct mtx vm_default_policy_mtx;
156 MTX_SYSINIT(vm_default_policy, &vm_default_policy_mtx, "default policy mutex",
157     MTX_DEF);
158 #ifdef VM_NUMA_ALLOC
159 static struct vm_domain_policy vm_default_policy =
160     VM_DOMAIN_POLICY_STATIC_INITIALISER(VM_POLICY_FIRST_TOUCH_ROUND_ROBIN, 0);
161 #else
162 /* Use round-robin so the domain policy code will only try once per allocation */
163 static struct vm_domain_policy vm_default_policy =
164     VM_DOMAIN_POLICY_STATIC_INITIALISER(VM_POLICY_ROUND_ROBIN, 0);
165 #endif
166 
167 static vm_page_t vm_phys_alloc_domain_pages(int domain, int flind, int pool,
168     int order);
169 static vm_page_t vm_phys_alloc_seg_contig(struct vm_phys_seg *seg,
170     u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
171     vm_paddr_t boundary);
172 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain);
173 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end);
174 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
175     int order);
176 
177 static int
178 sysctl_vm_default_policy(SYSCTL_HANDLER_ARGS)
179 {
180 	char policy_name[32];
181 	int error;
182 
183 	mtx_lock(&vm_default_policy_mtx);
184 
185 	/* Map policy to output string */
186 	switch (vm_default_policy.p.policy) {
187 	case VM_POLICY_FIRST_TOUCH:
188 		strcpy(policy_name, "first-touch");
189 		break;
190 	case VM_POLICY_FIRST_TOUCH_ROUND_ROBIN:
191 		strcpy(policy_name, "first-touch-rr");
192 		break;
193 	case VM_POLICY_ROUND_ROBIN:
194 	default:
195 		strcpy(policy_name, "rr");
196 		break;
197 	}
198 	mtx_unlock(&vm_default_policy_mtx);
199 
200 	error = sysctl_handle_string(oidp, &policy_name[0],
201 	    sizeof(policy_name), req);
202 	if (error != 0 || req->newptr == NULL)
203 		return (error);
204 
205 	mtx_lock(&vm_default_policy_mtx);
206 	/* Set: match on the subset of policies that make sense as a default */
207 	if (strcmp("first-touch-rr", policy_name) == 0) {
208 		vm_domain_policy_set(&vm_default_policy,
209 		    VM_POLICY_FIRST_TOUCH_ROUND_ROBIN, 0);
210 	} else if (strcmp("first-touch", policy_name) == 0) {
211 		vm_domain_policy_set(&vm_default_policy,
212 		    VM_POLICY_FIRST_TOUCH, 0);
213 	} else if (strcmp("rr", policy_name) == 0) {
214 		vm_domain_policy_set(&vm_default_policy,
215 		    VM_POLICY_ROUND_ROBIN, 0);
216 	} else {
217 		error = EINVAL;
218 		goto finish;
219 	}
220 
221 	error = 0;
222 finish:
223 	mtx_unlock(&vm_default_policy_mtx);
224 	return (error);
225 }
226 
227 SYSCTL_PROC(_vm, OID_AUTO, default_policy, CTLTYPE_STRING | CTLFLAG_RW,
228     0, 0, sysctl_vm_default_policy, "A",
229     "Default policy (rr, first-touch, first-touch-rr");
230 
231 /*
232  * Red-black tree helpers for vm fictitious range management.
233  */
234 static inline int
235 vm_phys_fictitious_in_range(struct vm_phys_fictitious_seg *p,
236     struct vm_phys_fictitious_seg *range)
237 {
238 
239 	KASSERT(range->start != 0 && range->end != 0,
240 	    ("Invalid range passed on search for vm_fictitious page"));
241 	if (p->start >= range->end)
242 		return (1);
243 	if (p->start < range->start)
244 		return (-1);
245 
246 	return (0);
247 }
248 
249 static int
250 vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *p1,
251     struct vm_phys_fictitious_seg *p2)
252 {
253 
254 	/* Check if this is a search for a page */
255 	if (p1->end == 0)
256 		return (vm_phys_fictitious_in_range(p1, p2));
257 
258 	KASSERT(p2->end != 0,
259     ("Invalid range passed as second parameter to vm fictitious comparison"));
260 
261 	/* Searching to add a new range */
262 	if (p1->end <= p2->start)
263 		return (-1);
264 	if (p1->start >= p2->end)
265 		return (1);
266 
267 	panic("Trying to add overlapping vm fictitious ranges:\n"
268 	    "[%#jx:%#jx] and [%#jx:%#jx]", (uintmax_t)p1->start,
269 	    (uintmax_t)p1->end, (uintmax_t)p2->start, (uintmax_t)p2->end);
270 }
271 
272 #ifdef notyet
273 static __inline int
274 vm_rr_selectdomain(void)
275 {
276 #ifdef VM_NUMA_ALLOC
277 	struct thread *td;
278 
279 	td = curthread;
280 
281 	td->td_dom_rr_idx++;
282 	td->td_dom_rr_idx %= vm_ndomains;
283 	return (td->td_dom_rr_idx);
284 #else
285 	return (0);
286 #endif
287 }
288 #endif /* notyet */
289 
290 /*
291  * Initialise a VM domain iterator.
292  *
293  * Check the thread policy, then the proc policy,
294  * then default to the system policy.
295  *
296  * Later on the various layers will have this logic
297  * plumbed into them and the phys code will be explicitly
298  * handed a VM domain policy to use.
299  */
300 static void
301 vm_policy_iterator_init(struct vm_domain_iterator *vi)
302 {
303 #ifdef VM_NUMA_ALLOC
304 	struct vm_domain_policy lcl;
305 #endif
306 
307 	vm_domain_iterator_init(vi);
308 
309 #ifdef VM_NUMA_ALLOC
310 	/* Copy out the thread policy */
311 	vm_domain_policy_localcopy(&lcl, &curthread->td_vm_dom_policy);
312 	if (lcl.p.policy != VM_POLICY_NONE) {
313 		/* Thread policy is present; use it */
314 		vm_domain_iterator_set_policy(vi, &lcl);
315 		return;
316 	}
317 
318 	vm_domain_policy_localcopy(&lcl,
319 	    &curthread->td_proc->p_vm_dom_policy);
320 	if (lcl.p.policy != VM_POLICY_NONE) {
321 		/* Process policy is present; use it */
322 		vm_domain_iterator_set_policy(vi, &lcl);
323 		return;
324 	}
325 #endif
326 	/* Use system default policy */
327 	vm_domain_iterator_set_policy(vi, &vm_default_policy);
328 }
329 
330 static void
331 vm_policy_iterator_finish(struct vm_domain_iterator *vi)
332 {
333 
334 	vm_domain_iterator_cleanup(vi);
335 }
336 
337 boolean_t
338 vm_phys_domain_intersects(long mask, vm_paddr_t low, vm_paddr_t high)
339 {
340 	struct vm_phys_seg *s;
341 	int idx;
342 
343 	while ((idx = ffsl(mask)) != 0) {
344 		idx--;	/* ffsl counts from 1 */
345 		mask &= ~(1UL << idx);
346 		s = &vm_phys_segs[idx];
347 		if (low < s->end && high > s->start)
348 			return (TRUE);
349 	}
350 	return (FALSE);
351 }
352 
353 /*
354  * Outputs the state of the physical memory allocator, specifically,
355  * the amount of physical memory in each free list.
356  */
357 static int
358 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
359 {
360 	struct sbuf sbuf;
361 	struct vm_freelist *fl;
362 	int dom, error, flind, oind, pind;
363 
364 	error = sysctl_wire_old_buffer(req, 0);
365 	if (error != 0)
366 		return (error);
367 	sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req);
368 	for (dom = 0; dom < vm_ndomains; dom++) {
369 		sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom);
370 		for (flind = 0; flind < vm_nfreelists; flind++) {
371 			sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
372 			    "\n  ORDER (SIZE)  |  NUMBER"
373 			    "\n              ", flind);
374 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
375 				sbuf_printf(&sbuf, "  |  POOL %d", pind);
376 			sbuf_printf(&sbuf, "\n--            ");
377 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
378 				sbuf_printf(&sbuf, "-- --      ");
379 			sbuf_printf(&sbuf, "--\n");
380 			for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
381 				sbuf_printf(&sbuf, "  %2d (%6dK)", oind,
382 				    1 << (PAGE_SHIFT - 10 + oind));
383 				for (pind = 0; pind < VM_NFREEPOOL; pind++) {
384 				fl = vm_phys_free_queues[dom][flind][pind];
385 					sbuf_printf(&sbuf, "  |  %6d",
386 					    fl[oind].lcnt);
387 				}
388 				sbuf_printf(&sbuf, "\n");
389 			}
390 		}
391 	}
392 	error = sbuf_finish(&sbuf);
393 	sbuf_delete(&sbuf);
394 	return (error);
395 }
396 
397 /*
398  * Outputs the set of physical memory segments.
399  */
400 static int
401 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
402 {
403 	struct sbuf sbuf;
404 	struct vm_phys_seg *seg;
405 	int error, segind;
406 
407 	error = sysctl_wire_old_buffer(req, 0);
408 	if (error != 0)
409 		return (error);
410 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
411 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
412 		sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
413 		seg = &vm_phys_segs[segind];
414 		sbuf_printf(&sbuf, "start:     %#jx\n",
415 		    (uintmax_t)seg->start);
416 		sbuf_printf(&sbuf, "end:       %#jx\n",
417 		    (uintmax_t)seg->end);
418 		sbuf_printf(&sbuf, "domain:    %d\n", seg->domain);
419 		sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
420 	}
421 	error = sbuf_finish(&sbuf);
422 	sbuf_delete(&sbuf);
423 	return (error);
424 }
425 
426 /*
427  * Return affinity, or -1 if there's no affinity information.
428  */
429 int
430 vm_phys_mem_affinity(int f, int t)
431 {
432 
433 #ifdef VM_NUMA_ALLOC
434 	if (mem_locality == NULL)
435 		return (-1);
436 	if (f >= vm_ndomains || t >= vm_ndomains)
437 		return (-1);
438 	return (mem_locality[f * vm_ndomains + t]);
439 #else
440 	return (-1);
441 #endif
442 }
443 
444 #ifdef VM_NUMA_ALLOC
445 /*
446  * Outputs the VM locality table.
447  */
448 static int
449 sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS)
450 {
451 	struct sbuf sbuf;
452 	int error, i, j;
453 
454 	error = sysctl_wire_old_buffer(req, 0);
455 	if (error != 0)
456 		return (error);
457 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
458 
459 	sbuf_printf(&sbuf, "\n");
460 
461 	for (i = 0; i < vm_ndomains; i++) {
462 		sbuf_printf(&sbuf, "%d: ", i);
463 		for (j = 0; j < vm_ndomains; j++) {
464 			sbuf_printf(&sbuf, "%d ", vm_phys_mem_affinity(i, j));
465 		}
466 		sbuf_printf(&sbuf, "\n");
467 	}
468 	error = sbuf_finish(&sbuf);
469 	sbuf_delete(&sbuf);
470 	return (error);
471 }
472 #endif
473 
474 static void
475 vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail)
476 {
477 
478 	m->order = order;
479 	if (tail)
480 		TAILQ_INSERT_TAIL(&fl[order].pl, m, plinks.q);
481 	else
482 		TAILQ_INSERT_HEAD(&fl[order].pl, m, plinks.q);
483 	fl[order].lcnt++;
484 }
485 
486 static void
487 vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order)
488 {
489 
490 	TAILQ_REMOVE(&fl[order].pl, m, plinks.q);
491 	fl[order].lcnt--;
492 	m->order = VM_NFREEORDER;
493 }
494 
495 /*
496  * Create a physical memory segment.
497  */
498 static void
499 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain)
500 {
501 	struct vm_phys_seg *seg;
502 
503 	KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
504 	    ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
505 	KASSERT(domain < vm_ndomains,
506 	    ("vm_phys_create_seg: invalid domain provided"));
507 	seg = &vm_phys_segs[vm_phys_nsegs++];
508 	while (seg > vm_phys_segs && (seg - 1)->start >= end) {
509 		*seg = *(seg - 1);
510 		seg--;
511 	}
512 	seg->start = start;
513 	seg->end = end;
514 	seg->domain = domain;
515 }
516 
517 static void
518 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end)
519 {
520 #ifdef VM_NUMA_ALLOC
521 	int i;
522 
523 	if (mem_affinity == NULL) {
524 		_vm_phys_create_seg(start, end, 0);
525 		return;
526 	}
527 
528 	for (i = 0;; i++) {
529 		if (mem_affinity[i].end == 0)
530 			panic("Reached end of affinity info");
531 		if (mem_affinity[i].end <= start)
532 			continue;
533 		if (mem_affinity[i].start > start)
534 			panic("No affinity info for start %jx",
535 			    (uintmax_t)start);
536 		if (mem_affinity[i].end >= end) {
537 			_vm_phys_create_seg(start, end,
538 			    mem_affinity[i].domain);
539 			break;
540 		}
541 		_vm_phys_create_seg(start, mem_affinity[i].end,
542 		    mem_affinity[i].domain);
543 		start = mem_affinity[i].end;
544 	}
545 #else
546 	_vm_phys_create_seg(start, end, 0);
547 #endif
548 }
549 
550 /*
551  * Add a physical memory segment.
552  */
553 void
554 vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end)
555 {
556 	vm_paddr_t paddr;
557 
558 	KASSERT((start & PAGE_MASK) == 0,
559 	    ("vm_phys_define_seg: start is not page aligned"));
560 	KASSERT((end & PAGE_MASK) == 0,
561 	    ("vm_phys_define_seg: end is not page aligned"));
562 
563 	/*
564 	 * Split the physical memory segment if it spans two or more free
565 	 * list boundaries.
566 	 */
567 	paddr = start;
568 #ifdef	VM_FREELIST_ISADMA
569 	if (paddr < VM_ISADMA_BOUNDARY && end > VM_ISADMA_BOUNDARY) {
570 		vm_phys_create_seg(paddr, VM_ISADMA_BOUNDARY);
571 		paddr = VM_ISADMA_BOUNDARY;
572 	}
573 #endif
574 #ifdef	VM_FREELIST_LOWMEM
575 	if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) {
576 		vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY);
577 		paddr = VM_LOWMEM_BOUNDARY;
578 	}
579 #endif
580 #ifdef	VM_FREELIST_DMA32
581 	if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) {
582 		vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY);
583 		paddr = VM_DMA32_BOUNDARY;
584 	}
585 #endif
586 	vm_phys_create_seg(paddr, end);
587 }
588 
589 /*
590  * Initialize the physical memory allocator.
591  *
592  * Requires that vm_page_array is initialized!
593  */
594 void
595 vm_phys_init(void)
596 {
597 	struct vm_freelist *fl;
598 	struct vm_phys_seg *seg;
599 	u_long npages;
600 	int dom, flind, freelist, oind, pind, segind;
601 
602 	/*
603 	 * Compute the number of free lists, and generate the mapping from the
604 	 * manifest constants VM_FREELIST_* to the free list indices.
605 	 *
606 	 * Initially, the entries of vm_freelist_to_flind[] are set to either
607 	 * 0 or 1 to indicate which free lists should be created.
608 	 */
609 	npages = 0;
610 	for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
611 		seg = &vm_phys_segs[segind];
612 #ifdef	VM_FREELIST_ISADMA
613 		if (seg->end <= VM_ISADMA_BOUNDARY)
614 			vm_freelist_to_flind[VM_FREELIST_ISADMA] = 1;
615 		else
616 #endif
617 #ifdef	VM_FREELIST_LOWMEM
618 		if (seg->end <= VM_LOWMEM_BOUNDARY)
619 			vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1;
620 		else
621 #endif
622 #ifdef	VM_FREELIST_DMA32
623 		if (
624 #ifdef	VM_DMA32_NPAGES_THRESHOLD
625 		    /*
626 		     * Create the DMA32 free list only if the amount of
627 		     * physical memory above physical address 4G exceeds the
628 		     * given threshold.
629 		     */
630 		    npages > VM_DMA32_NPAGES_THRESHOLD &&
631 #endif
632 		    seg->end <= VM_DMA32_BOUNDARY)
633 			vm_freelist_to_flind[VM_FREELIST_DMA32] = 1;
634 		else
635 #endif
636 		{
637 			npages += atop(seg->end - seg->start);
638 			vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1;
639 		}
640 	}
641 	/* Change each entry into a running total of the free lists. */
642 	for (freelist = 1; freelist < VM_NFREELIST; freelist++) {
643 		vm_freelist_to_flind[freelist] +=
644 		    vm_freelist_to_flind[freelist - 1];
645 	}
646 	vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1];
647 	KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists"));
648 	/* Change each entry into a free list index. */
649 	for (freelist = 0; freelist < VM_NFREELIST; freelist++)
650 		vm_freelist_to_flind[freelist]--;
651 
652 	/*
653 	 * Initialize the first_page and free_queues fields of each physical
654 	 * memory segment.
655 	 */
656 #ifdef VM_PHYSSEG_SPARSE
657 	npages = 0;
658 #endif
659 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
660 		seg = &vm_phys_segs[segind];
661 #ifdef VM_PHYSSEG_SPARSE
662 		seg->first_page = &vm_page_array[npages];
663 		npages += atop(seg->end - seg->start);
664 #else
665 		seg->first_page = PHYS_TO_VM_PAGE(seg->start);
666 #endif
667 #ifdef	VM_FREELIST_ISADMA
668 		if (seg->end <= VM_ISADMA_BOUNDARY) {
669 			flind = vm_freelist_to_flind[VM_FREELIST_ISADMA];
670 			KASSERT(flind >= 0,
671 			    ("vm_phys_init: ISADMA flind < 0"));
672 		} else
673 #endif
674 #ifdef	VM_FREELIST_LOWMEM
675 		if (seg->end <= VM_LOWMEM_BOUNDARY) {
676 			flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM];
677 			KASSERT(flind >= 0,
678 			    ("vm_phys_init: LOWMEM flind < 0"));
679 		} else
680 #endif
681 #ifdef	VM_FREELIST_DMA32
682 		if (seg->end <= VM_DMA32_BOUNDARY) {
683 			flind = vm_freelist_to_flind[VM_FREELIST_DMA32];
684 			KASSERT(flind >= 0,
685 			    ("vm_phys_init: DMA32 flind < 0"));
686 		} else
687 #endif
688 		{
689 			flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT];
690 			KASSERT(flind >= 0,
691 			    ("vm_phys_init: DEFAULT flind < 0"));
692 		}
693 		seg->free_queues = &vm_phys_free_queues[seg->domain][flind];
694 	}
695 
696 	/*
697 	 * Initialize the free queues.
698 	 */
699 	for (dom = 0; dom < vm_ndomains; dom++) {
700 		for (flind = 0; flind < vm_nfreelists; flind++) {
701 			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
702 				fl = vm_phys_free_queues[dom][flind][pind];
703 				for (oind = 0; oind < VM_NFREEORDER; oind++)
704 					TAILQ_INIT(&fl[oind].pl);
705 			}
706 		}
707 	}
708 
709 	rw_init(&vm_phys_fictitious_reg_lock, "vmfctr");
710 }
711 
712 /*
713  * Split a contiguous, power of two-sized set of physical pages.
714  */
715 static __inline void
716 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order)
717 {
718 	vm_page_t m_buddy;
719 
720 	while (oind > order) {
721 		oind--;
722 		m_buddy = &m[1 << oind];
723 		KASSERT(m_buddy->order == VM_NFREEORDER,
724 		    ("vm_phys_split_pages: page %p has unexpected order %d",
725 		    m_buddy, m_buddy->order));
726 		vm_freelist_add(fl, m_buddy, oind, 0);
727         }
728 }
729 
730 /*
731  * Allocate a contiguous, power of two-sized set of physical pages
732  * from the free lists.
733  *
734  * The free page queues must be locked.
735  */
736 vm_page_t
737 vm_phys_alloc_pages(int pool, int order)
738 {
739 	vm_page_t m;
740 	int domain, flind;
741 	struct vm_domain_iterator vi;
742 
743 	KASSERT(pool < VM_NFREEPOOL,
744 	    ("vm_phys_alloc_pages: pool %d is out of range", pool));
745 	KASSERT(order < VM_NFREEORDER,
746 	    ("vm_phys_alloc_pages: order %d is out of range", order));
747 
748 	vm_policy_iterator_init(&vi);
749 
750 	while ((vm_domain_iterator_run(&vi, &domain)) == 0) {
751 		for (flind = 0; flind < vm_nfreelists; flind++) {
752 			m = vm_phys_alloc_domain_pages(domain, flind, pool,
753 			    order);
754 			if (m != NULL)
755 				return (m);
756 		}
757 	}
758 
759 	vm_policy_iterator_finish(&vi);
760 	return (NULL);
761 }
762 
763 /*
764  * Allocate a contiguous, power of two-sized set of physical pages from the
765  * specified free list.  The free list must be specified using one of the
766  * manifest constants VM_FREELIST_*.
767  *
768  * The free page queues must be locked.
769  */
770 vm_page_t
771 vm_phys_alloc_freelist_pages(int freelist, int pool, int order)
772 {
773 	vm_page_t m;
774 	struct vm_domain_iterator vi;
775 	int domain;
776 
777 	KASSERT(freelist < VM_NFREELIST,
778 	    ("vm_phys_alloc_freelist_pages: freelist %d is out of range",
779 	    freelist));
780 	KASSERT(pool < VM_NFREEPOOL,
781 	    ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
782 	KASSERT(order < VM_NFREEORDER,
783 	    ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
784 
785 	vm_policy_iterator_init(&vi);
786 
787 	while ((vm_domain_iterator_run(&vi, &domain)) == 0) {
788 		m = vm_phys_alloc_domain_pages(domain,
789 		    vm_freelist_to_flind[freelist], pool, order);
790 		if (m != NULL)
791 			return (m);
792 	}
793 
794 	vm_policy_iterator_finish(&vi);
795 	return (NULL);
796 }
797 
798 static vm_page_t
799 vm_phys_alloc_domain_pages(int domain, int flind, int pool, int order)
800 {
801 	struct vm_freelist *fl;
802 	struct vm_freelist *alt;
803 	int oind, pind;
804 	vm_page_t m;
805 
806 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
807 	fl = &vm_phys_free_queues[domain][flind][pool][0];
808 	for (oind = order; oind < VM_NFREEORDER; oind++) {
809 		m = TAILQ_FIRST(&fl[oind].pl);
810 		if (m != NULL) {
811 			vm_freelist_rem(fl, m, oind);
812 			vm_phys_split_pages(m, oind, fl, order);
813 			return (m);
814 		}
815 	}
816 
817 	/*
818 	 * The given pool was empty.  Find the largest
819 	 * contiguous, power-of-two-sized set of pages in any
820 	 * pool.  Transfer these pages to the given pool, and
821 	 * use them to satisfy the allocation.
822 	 */
823 	for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
824 		for (pind = 0; pind < VM_NFREEPOOL; pind++) {
825 			alt = &vm_phys_free_queues[domain][flind][pind][0];
826 			m = TAILQ_FIRST(&alt[oind].pl);
827 			if (m != NULL) {
828 				vm_freelist_rem(alt, m, oind);
829 				vm_phys_set_pool(pool, m, oind);
830 				vm_phys_split_pages(m, oind, fl, order);
831 				return (m);
832 			}
833 		}
834 	}
835 	return (NULL);
836 }
837 
838 /*
839  * Find the vm_page corresponding to the given physical address.
840  */
841 vm_page_t
842 vm_phys_paddr_to_vm_page(vm_paddr_t pa)
843 {
844 	struct vm_phys_seg *seg;
845 	int segind;
846 
847 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
848 		seg = &vm_phys_segs[segind];
849 		if (pa >= seg->start && pa < seg->end)
850 			return (&seg->first_page[atop(pa - seg->start)]);
851 	}
852 	return (NULL);
853 }
854 
855 vm_page_t
856 vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
857 {
858 	struct vm_phys_fictitious_seg tmp, *seg;
859 	vm_page_t m;
860 
861 	m = NULL;
862 	tmp.start = pa;
863 	tmp.end = 0;
864 
865 	rw_rlock(&vm_phys_fictitious_reg_lock);
866 	seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
867 	rw_runlock(&vm_phys_fictitious_reg_lock);
868 	if (seg == NULL)
869 		return (NULL);
870 
871 	m = &seg->first_page[atop(pa - seg->start)];
872 	KASSERT((m->flags & PG_FICTITIOUS) != 0, ("%p not fictitious", m));
873 
874 	return (m);
875 }
876 
877 static inline void
878 vm_phys_fictitious_init_range(vm_page_t range, vm_paddr_t start,
879     long page_count, vm_memattr_t memattr)
880 {
881 	long i;
882 
883 	bzero(range, page_count * sizeof(*range));
884 	for (i = 0; i < page_count; i++) {
885 		vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr);
886 		range[i].oflags &= ~VPO_UNMANAGED;
887 		range[i].busy_lock = VPB_UNBUSIED;
888 	}
889 }
890 
891 int
892 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
893     vm_memattr_t memattr)
894 {
895 	struct vm_phys_fictitious_seg *seg;
896 	vm_page_t fp;
897 	long page_count;
898 #ifdef VM_PHYSSEG_DENSE
899 	long pi, pe;
900 	long dpage_count;
901 #endif
902 
903 	KASSERT(start < end,
904 	    ("Start of segment isn't less than end (start: %jx end: %jx)",
905 	    (uintmax_t)start, (uintmax_t)end));
906 
907 	page_count = (end - start) / PAGE_SIZE;
908 
909 #ifdef VM_PHYSSEG_DENSE
910 	pi = atop(start);
911 	pe = atop(end);
912 	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
913 		fp = &vm_page_array[pi - first_page];
914 		if ((pe - first_page) > vm_page_array_size) {
915 			/*
916 			 * We have a segment that starts inside
917 			 * of vm_page_array, but ends outside of it.
918 			 *
919 			 * Use vm_page_array pages for those that are
920 			 * inside of the vm_page_array range, and
921 			 * allocate the remaining ones.
922 			 */
923 			dpage_count = vm_page_array_size - (pi - first_page);
924 			vm_phys_fictitious_init_range(fp, start, dpage_count,
925 			    memattr);
926 			page_count -= dpage_count;
927 			start += ptoa(dpage_count);
928 			goto alloc;
929 		}
930 		/*
931 		 * We can allocate the full range from vm_page_array,
932 		 * so there's no need to register the range in the tree.
933 		 */
934 		vm_phys_fictitious_init_range(fp, start, page_count, memattr);
935 		return (0);
936 	} else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
937 		/*
938 		 * We have a segment that ends inside of vm_page_array,
939 		 * but starts outside of it.
940 		 */
941 		fp = &vm_page_array[0];
942 		dpage_count = pe - first_page;
943 		vm_phys_fictitious_init_range(fp, ptoa(first_page), dpage_count,
944 		    memattr);
945 		end -= ptoa(dpage_count);
946 		page_count -= dpage_count;
947 		goto alloc;
948 	} else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
949 		/*
950 		 * Trying to register a fictitious range that expands before
951 		 * and after vm_page_array.
952 		 */
953 		return (EINVAL);
954 	} else {
955 alloc:
956 #endif
957 		fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
958 		    M_WAITOK);
959 #ifdef VM_PHYSSEG_DENSE
960 	}
961 #endif
962 	vm_phys_fictitious_init_range(fp, start, page_count, memattr);
963 
964 	seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO);
965 	seg->start = start;
966 	seg->end = end;
967 	seg->first_page = fp;
968 
969 	rw_wlock(&vm_phys_fictitious_reg_lock);
970 	RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg);
971 	rw_wunlock(&vm_phys_fictitious_reg_lock);
972 
973 	return (0);
974 }
975 
976 void
977 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
978 {
979 	struct vm_phys_fictitious_seg *seg, tmp;
980 #ifdef VM_PHYSSEG_DENSE
981 	long pi, pe;
982 #endif
983 
984 	KASSERT(start < end,
985 	    ("Start of segment isn't less than end (start: %jx end: %jx)",
986 	    (uintmax_t)start, (uintmax_t)end));
987 
988 #ifdef VM_PHYSSEG_DENSE
989 	pi = atop(start);
990 	pe = atop(end);
991 	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
992 		if ((pe - first_page) <= vm_page_array_size) {
993 			/*
994 			 * This segment was allocated using vm_page_array
995 			 * only, there's nothing to do since those pages
996 			 * were never added to the tree.
997 			 */
998 			return;
999 		}
1000 		/*
1001 		 * We have a segment that starts inside
1002 		 * of vm_page_array, but ends outside of it.
1003 		 *
1004 		 * Calculate how many pages were added to the
1005 		 * tree and free them.
1006 		 */
1007 		start = ptoa(first_page + vm_page_array_size);
1008 	} else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
1009 		/*
1010 		 * We have a segment that ends inside of vm_page_array,
1011 		 * but starts outside of it.
1012 		 */
1013 		end = ptoa(first_page);
1014 	} else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
1015 		/* Since it's not possible to register such a range, panic. */
1016 		panic(
1017 		    "Unregistering not registered fictitious range [%#jx:%#jx]",
1018 		    (uintmax_t)start, (uintmax_t)end);
1019 	}
1020 #endif
1021 	tmp.start = start;
1022 	tmp.end = 0;
1023 
1024 	rw_wlock(&vm_phys_fictitious_reg_lock);
1025 	seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
1026 	if (seg->start != start || seg->end != end) {
1027 		rw_wunlock(&vm_phys_fictitious_reg_lock);
1028 		panic(
1029 		    "Unregistering not registered fictitious range [%#jx:%#jx]",
1030 		    (uintmax_t)start, (uintmax_t)end);
1031 	}
1032 	RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg);
1033 	rw_wunlock(&vm_phys_fictitious_reg_lock);
1034 	free(seg->first_page, M_FICT_PAGES);
1035 	free(seg, M_FICT_PAGES);
1036 }
1037 
1038 /*
1039  * Find the segment containing the given physical address.
1040  */
1041 int
1042 vm_phys_paddr_to_segind(vm_paddr_t pa)
1043 {
1044 	struct vm_phys_seg *seg;
1045 	int segind;
1046 
1047 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
1048 		seg = &vm_phys_segs[segind];
1049 		if (pa >= seg->start && pa < seg->end)
1050 			return (segind);
1051 	}
1052 	panic("vm_phys_paddr_to_segind: paddr %#jx is not in any segment" ,
1053 	    (uintmax_t)pa);
1054 }
1055 
1056 /*
1057  * Free a contiguous, power of two-sized set of physical pages.
1058  *
1059  * The free page queues must be locked.
1060  */
1061 void
1062 vm_phys_free_pages(vm_page_t m, int order)
1063 {
1064 	struct vm_freelist *fl;
1065 	struct vm_phys_seg *seg;
1066 	vm_paddr_t pa;
1067 	vm_page_t m_buddy;
1068 
1069 	KASSERT(m->order == VM_NFREEORDER,
1070 	    ("vm_phys_free_pages: page %p has unexpected order %d",
1071 	    m, m->order));
1072 	KASSERT(m->pool < VM_NFREEPOOL,
1073 	    ("vm_phys_free_pages: page %p has unexpected pool %d",
1074 	    m, m->pool));
1075 	KASSERT(order < VM_NFREEORDER,
1076 	    ("vm_phys_free_pages: order %d is out of range", order));
1077 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1078 	seg = &vm_phys_segs[m->segind];
1079 	if (order < VM_NFREEORDER - 1) {
1080 		pa = VM_PAGE_TO_PHYS(m);
1081 		do {
1082 			pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
1083 			if (pa < seg->start || pa >= seg->end)
1084 				break;
1085 			m_buddy = &seg->first_page[atop(pa - seg->start)];
1086 			if (m_buddy->order != order)
1087 				break;
1088 			fl = (*seg->free_queues)[m_buddy->pool];
1089 			vm_freelist_rem(fl, m_buddy, order);
1090 			if (m_buddy->pool != m->pool)
1091 				vm_phys_set_pool(m->pool, m_buddy, order);
1092 			order++;
1093 			pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
1094 			m = &seg->first_page[atop(pa - seg->start)];
1095 		} while (order < VM_NFREEORDER - 1);
1096 	}
1097 	fl = (*seg->free_queues)[m->pool];
1098 	vm_freelist_add(fl, m, order, 1);
1099 }
1100 
1101 /*
1102  * Free a contiguous, arbitrarily sized set of physical pages.
1103  *
1104  * The free page queues must be locked.
1105  */
1106 void
1107 vm_phys_free_contig(vm_page_t m, u_long npages)
1108 {
1109 	u_int n;
1110 	int order;
1111 
1112 	/*
1113 	 * Avoid unnecessary coalescing by freeing the pages in the largest
1114 	 * possible power-of-two-sized subsets.
1115 	 */
1116 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1117 	for (;; npages -= n) {
1118 		/*
1119 		 * Unsigned "min" is used here so that "order" is assigned
1120 		 * "VM_NFREEORDER - 1" when "m"'s physical address is zero
1121 		 * or the low-order bits of its physical address are zero
1122 		 * because the size of a physical address exceeds the size of
1123 		 * a long.
1124 		 */
1125 		order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
1126 		    VM_NFREEORDER - 1);
1127 		n = 1 << order;
1128 		if (npages < n)
1129 			break;
1130 		vm_phys_free_pages(m, order);
1131 		m += n;
1132 	}
1133 	/* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */
1134 	for (; npages > 0; npages -= n) {
1135 		order = flsl(npages) - 1;
1136 		n = 1 << order;
1137 		vm_phys_free_pages(m, order);
1138 		m += n;
1139 	}
1140 }
1141 
1142 /*
1143  * Scan physical memory between the specified addresses "low" and "high" for a
1144  * run of contiguous physical pages that satisfy the specified conditions, and
1145  * return the lowest page in the run.  The specified "alignment" determines
1146  * the alignment of the lowest physical page in the run.  If the specified
1147  * "boundary" is non-zero, then the run of physical pages cannot span a
1148  * physical address that is a multiple of "boundary".
1149  *
1150  * "npages" must be greater than zero.  Both "alignment" and "boundary" must
1151  * be a power of two.
1152  */
1153 vm_page_t
1154 vm_phys_scan_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
1155     u_long alignment, vm_paddr_t boundary, int options)
1156 {
1157 	vm_paddr_t pa_end;
1158 	vm_page_t m_end, m_run, m_start;
1159 	struct vm_phys_seg *seg;
1160 	int segind;
1161 
1162 	KASSERT(npages > 0, ("npages is 0"));
1163 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1164 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1165 	if (low >= high)
1166 		return (NULL);
1167 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
1168 		seg = &vm_phys_segs[segind];
1169 		if (seg->start >= high)
1170 			break;
1171 		if (low >= seg->end)
1172 			continue;
1173 		if (low <= seg->start)
1174 			m_start = seg->first_page;
1175 		else
1176 			m_start = &seg->first_page[atop(low - seg->start)];
1177 		if (high < seg->end)
1178 			pa_end = high;
1179 		else
1180 			pa_end = seg->end;
1181 		if (pa_end - VM_PAGE_TO_PHYS(m_start) < ptoa(npages))
1182 			continue;
1183 		m_end = &seg->first_page[atop(pa_end - seg->start)];
1184 		m_run = vm_page_scan_contig(npages, m_start, m_end,
1185 		    alignment, boundary, options);
1186 		if (m_run != NULL)
1187 			return (m_run);
1188 	}
1189 	return (NULL);
1190 }
1191 
1192 /*
1193  * Set the pool for a contiguous, power of two-sized set of physical pages.
1194  */
1195 void
1196 vm_phys_set_pool(int pool, vm_page_t m, int order)
1197 {
1198 	vm_page_t m_tmp;
1199 
1200 	for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
1201 		m_tmp->pool = pool;
1202 }
1203 
1204 /*
1205  * Search for the given physical page "m" in the free lists.  If the search
1206  * succeeds, remove "m" from the free lists and return TRUE.  Otherwise, return
1207  * FALSE, indicating that "m" is not in the free lists.
1208  *
1209  * The free page queues must be locked.
1210  */
1211 boolean_t
1212 vm_phys_unfree_page(vm_page_t m)
1213 {
1214 	struct vm_freelist *fl;
1215 	struct vm_phys_seg *seg;
1216 	vm_paddr_t pa, pa_half;
1217 	vm_page_t m_set, m_tmp;
1218 	int order;
1219 
1220 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1221 
1222 	/*
1223 	 * First, find the contiguous, power of two-sized set of free
1224 	 * physical pages containing the given physical page "m" and
1225 	 * assign it to "m_set".
1226 	 */
1227 	seg = &vm_phys_segs[m->segind];
1228 	for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
1229 	    order < VM_NFREEORDER - 1; ) {
1230 		order++;
1231 		pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
1232 		if (pa >= seg->start)
1233 			m_set = &seg->first_page[atop(pa - seg->start)];
1234 		else
1235 			return (FALSE);
1236 	}
1237 	if (m_set->order < order)
1238 		return (FALSE);
1239 	if (m_set->order == VM_NFREEORDER)
1240 		return (FALSE);
1241 	KASSERT(m_set->order < VM_NFREEORDER,
1242 	    ("vm_phys_unfree_page: page %p has unexpected order %d",
1243 	    m_set, m_set->order));
1244 
1245 	/*
1246 	 * Next, remove "m_set" from the free lists.  Finally, extract
1247 	 * "m" from "m_set" using an iterative algorithm: While "m_set"
1248 	 * is larger than a page, shrink "m_set" by returning the half
1249 	 * of "m_set" that does not contain "m" to the free lists.
1250 	 */
1251 	fl = (*seg->free_queues)[m_set->pool];
1252 	order = m_set->order;
1253 	vm_freelist_rem(fl, m_set, order);
1254 	while (order > 0) {
1255 		order--;
1256 		pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
1257 		if (m->phys_addr < pa_half)
1258 			m_tmp = &seg->first_page[atop(pa_half - seg->start)];
1259 		else {
1260 			m_tmp = m_set;
1261 			m_set = &seg->first_page[atop(pa_half - seg->start)];
1262 		}
1263 		vm_freelist_add(fl, m_tmp, order, 0);
1264 	}
1265 	KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
1266 	return (TRUE);
1267 }
1268 
1269 /*
1270  * Allocate a contiguous set of physical pages of the given size
1271  * "npages" from the free lists.  All of the physical pages must be at
1272  * or above the given physical address "low" and below the given
1273  * physical address "high".  The given value "alignment" determines the
1274  * alignment of the first physical page in the set.  If the given value
1275  * "boundary" is non-zero, then the set of physical pages cannot cross
1276  * any physical address boundary that is a multiple of that value.  Both
1277  * "alignment" and "boundary" must be a power of two.
1278  */
1279 vm_page_t
1280 vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
1281     u_long alignment, vm_paddr_t boundary)
1282 {
1283 	vm_paddr_t pa_end, pa_start;
1284 	vm_page_t m_run;
1285 	struct vm_domain_iterator vi;
1286 	struct vm_phys_seg *seg;
1287 	int domain, segind;
1288 
1289 	KASSERT(npages > 0, ("npages is 0"));
1290 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1291 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1292 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1293 	if (low >= high)
1294 		return (NULL);
1295 	vm_policy_iterator_init(&vi);
1296 restartdom:
1297 	if (vm_domain_iterator_run(&vi, &domain) != 0) {
1298 		vm_policy_iterator_finish(&vi);
1299 		return (NULL);
1300 	}
1301 	m_run = NULL;
1302 	for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
1303 		seg = &vm_phys_segs[segind];
1304 		if (seg->start >= high || seg->domain != domain)
1305 			continue;
1306 		if (low >= seg->end)
1307 			break;
1308 		if (low <= seg->start)
1309 			pa_start = seg->start;
1310 		else
1311 			pa_start = low;
1312 		if (high < seg->end)
1313 			pa_end = high;
1314 		else
1315 			pa_end = seg->end;
1316 		if (pa_end - pa_start < ptoa(npages))
1317 			continue;
1318 		m_run = vm_phys_alloc_seg_contig(seg, npages, low, high,
1319 		    alignment, boundary);
1320 		if (m_run != NULL)
1321 			break;
1322 	}
1323 	if (m_run == NULL && !vm_domain_iterator_isdone(&vi))
1324 		goto restartdom;
1325 	vm_policy_iterator_finish(&vi);
1326 	return (m_run);
1327 }
1328 
1329 /*
1330  * Allocate a run of contiguous physical pages from the free list for the
1331  * specified segment.
1332  */
1333 static vm_page_t
1334 vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, u_long npages,
1335     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1336 {
1337 	struct vm_freelist *fl;
1338 	vm_paddr_t pa, pa_end, size;
1339 	vm_page_t m, m_ret;
1340 	u_long npages_end;
1341 	int oind, order, pind;
1342 
1343 	KASSERT(npages > 0, ("npages is 0"));
1344 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1345 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1346 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1347 	/* Compute the queue that is the best fit for npages. */
1348 	for (order = 0; (1 << order) < npages; order++);
1349 	/* Search for a run satisfying the specified conditions. */
1350 	size = npages << PAGE_SHIFT;
1351 	for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER;
1352 	    oind++) {
1353 		for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1354 			fl = (*seg->free_queues)[pind];
1355 			TAILQ_FOREACH(m_ret, &fl[oind].pl, plinks.q) {
1356 				/*
1357 				 * Is the size of this allocation request
1358 				 * larger than the largest block size?
1359 				 */
1360 				if (order >= VM_NFREEORDER) {
1361 					/*
1362 					 * Determine if a sufficient number of
1363 					 * subsequent blocks to satisfy the
1364 					 * allocation request are free.
1365 					 */
1366 					pa = VM_PAGE_TO_PHYS(m_ret);
1367 					pa_end = pa + size;
1368 					for (;;) {
1369 						pa += 1 << (PAGE_SHIFT +
1370 						    VM_NFREEORDER - 1);
1371 						if (pa >= pa_end ||
1372 						    pa < seg->start ||
1373 						    pa >= seg->end)
1374 							break;
1375 						m = &seg->first_page[atop(pa -
1376 						    seg->start)];
1377 						if (m->order != VM_NFREEORDER -
1378 						    1)
1379 							break;
1380 					}
1381 					/* If not, go to the next block. */
1382 					if (pa < pa_end)
1383 						continue;
1384 				}
1385 
1386 				/*
1387 				 * Determine if the blocks are within the
1388 				 * given range, satisfy the given alignment,
1389 				 * and do not cross the given boundary.
1390 				 */
1391 				pa = VM_PAGE_TO_PHYS(m_ret);
1392 				pa_end = pa + size;
1393 				if (pa >= low && pa_end <= high &&
1394 				    (pa & (alignment - 1)) == 0 &&
1395 				    rounddown2(pa ^ (pa_end - 1), boundary) == 0)
1396 					goto done;
1397 			}
1398 		}
1399 	}
1400 	return (NULL);
1401 done:
1402 	for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) {
1403 		fl = (*seg->free_queues)[m->pool];
1404 		vm_freelist_rem(fl, m, m->order);
1405 	}
1406 	if (m_ret->pool != VM_FREEPOOL_DEFAULT)
1407 		vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m_ret, oind);
1408 	fl = (*seg->free_queues)[m_ret->pool];
1409 	vm_phys_split_pages(m_ret, oind, fl, order);
1410 	/* Return excess pages to the free lists. */
1411 	npages_end = roundup2(npages, 1 << imin(oind, order));
1412 	if (npages < npages_end)
1413 		vm_phys_free_contig(&m_ret[npages], npages_end - npages);
1414 	return (m_ret);
1415 }
1416 
1417 #ifdef DDB
1418 /*
1419  * Show the number of physical pages in each of the free lists.
1420  */
1421 DB_SHOW_COMMAND(freepages, db_show_freepages)
1422 {
1423 	struct vm_freelist *fl;
1424 	int flind, oind, pind, dom;
1425 
1426 	for (dom = 0; dom < vm_ndomains; dom++) {
1427 		db_printf("DOMAIN: %d\n", dom);
1428 		for (flind = 0; flind < vm_nfreelists; flind++) {
1429 			db_printf("FREE LIST %d:\n"
1430 			    "\n  ORDER (SIZE)  |  NUMBER"
1431 			    "\n              ", flind);
1432 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
1433 				db_printf("  |  POOL %d", pind);
1434 			db_printf("\n--            ");
1435 			for (pind = 0; pind < VM_NFREEPOOL; pind++)
1436 				db_printf("-- --      ");
1437 			db_printf("--\n");
1438 			for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
1439 				db_printf("  %2.2d (%6.6dK)", oind,
1440 				    1 << (PAGE_SHIFT - 10 + oind));
1441 				for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1442 				fl = vm_phys_free_queues[dom][flind][pind];
1443 					db_printf("  |  %6.6d", fl[oind].lcnt);
1444 				}
1445 				db_printf("\n");
1446 			}
1447 			db_printf("\n");
1448 		}
1449 		db_printf("\n");
1450 	}
1451 }
1452 #endif
1453