xref: /freebsd/sys/vm/vm_phys.c (revision bc8794a12ac54e575378ad829f5479bbfc55d16c)
111752d88SAlan Cox /*-
211752d88SAlan Cox  * Copyright (c) 2002-2006 Rice University
311752d88SAlan Cox  * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
411752d88SAlan Cox  * All rights reserved.
511752d88SAlan Cox  *
611752d88SAlan Cox  * This software was developed for the FreeBSD Project by Alan L. Cox,
711752d88SAlan Cox  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
811752d88SAlan Cox  *
911752d88SAlan Cox  * Redistribution and use in source and binary forms, with or without
1011752d88SAlan Cox  * modification, are permitted provided that the following conditions
1111752d88SAlan Cox  * are met:
1211752d88SAlan Cox  * 1. Redistributions of source code must retain the above copyright
1311752d88SAlan Cox  *    notice, this list of conditions and the following disclaimer.
1411752d88SAlan Cox  * 2. Redistributions in binary form must reproduce the above copyright
1511752d88SAlan Cox  *    notice, this list of conditions and the following disclaimer in the
1611752d88SAlan Cox  *    documentation and/or other materials provided with the distribution.
1711752d88SAlan Cox  *
1811752d88SAlan Cox  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1911752d88SAlan Cox  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
2011752d88SAlan Cox  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
2111752d88SAlan Cox  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
2211752d88SAlan Cox  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
2311752d88SAlan Cox  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
2411752d88SAlan Cox  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
2511752d88SAlan Cox  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
2611752d88SAlan Cox  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2711752d88SAlan Cox  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
2811752d88SAlan Cox  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2911752d88SAlan Cox  * POSSIBILITY OF SUCH DAMAGE.
3011752d88SAlan Cox  */
3111752d88SAlan Cox 
3211752d88SAlan Cox #include <sys/cdefs.h>
3311752d88SAlan Cox __FBSDID("$FreeBSD$");
3411752d88SAlan Cox 
3511752d88SAlan Cox #include "opt_ddb.h"
3611752d88SAlan Cox 
3711752d88SAlan Cox #include <sys/param.h>
3811752d88SAlan Cox #include <sys/systm.h>
3911752d88SAlan Cox #include <sys/lock.h>
4011752d88SAlan Cox #include <sys/kernel.h>
4111752d88SAlan Cox #include <sys/malloc.h>
4211752d88SAlan Cox #include <sys/mutex.h>
4311752d88SAlan Cox #include <sys/queue.h>
4411752d88SAlan Cox #include <sys/sbuf.h>
4511752d88SAlan Cox #include <sys/sysctl.h>
4611752d88SAlan Cox #include <sys/vmmeter.h>
477bfda801SAlan Cox #include <sys/vnode.h>
4811752d88SAlan Cox 
4911752d88SAlan Cox #include <ddb/ddb.h>
5011752d88SAlan Cox 
5111752d88SAlan Cox #include <vm/vm.h>
5211752d88SAlan Cox #include <vm/vm_param.h>
5311752d88SAlan Cox #include <vm/vm_kern.h>
5411752d88SAlan Cox #include <vm/vm_object.h>
5511752d88SAlan Cox #include <vm/vm_page.h>
5611752d88SAlan Cox #include <vm/vm_phys.h>
5711752d88SAlan Cox 
5811752d88SAlan Cox struct vm_freelist {
5911752d88SAlan Cox 	struct pglist pl;
6011752d88SAlan Cox 	int lcnt;
6111752d88SAlan Cox };
6211752d88SAlan Cox 
6311752d88SAlan Cox struct vm_phys_seg {
6411752d88SAlan Cox 	vm_paddr_t	start;
6511752d88SAlan Cox 	vm_paddr_t	end;
6611752d88SAlan Cox 	vm_page_t	first_page;
6711752d88SAlan Cox 	struct vm_freelist (*free_queues)[VM_NFREEPOOL][VM_NFREEORDER];
6811752d88SAlan Cox };
6911752d88SAlan Cox 
7011752d88SAlan Cox static struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX];
7111752d88SAlan Cox 
7211752d88SAlan Cox static int vm_phys_nsegs;
7311752d88SAlan Cox 
7411752d88SAlan Cox static struct vm_freelist
7511752d88SAlan Cox     vm_phys_free_queues[VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER];
7611752d88SAlan Cox 
7711752d88SAlan Cox static int vm_nfreelists = VM_FREELIST_DEFAULT + 1;
7811752d88SAlan Cox 
7911752d88SAlan Cox static int cnt_prezero;
8011752d88SAlan Cox SYSCTL_INT(_vm_stats_misc, OID_AUTO, cnt_prezero, CTLFLAG_RD,
8111752d88SAlan Cox     &cnt_prezero, 0, "The number of physical pages prezeroed at idle time");
8211752d88SAlan Cox 
8311752d88SAlan Cox static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
8411752d88SAlan Cox SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD,
8511752d88SAlan Cox     NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info");
8611752d88SAlan Cox 
8711752d88SAlan Cox static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
8811752d88SAlan Cox SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD,
8911752d88SAlan Cox     NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info");
9011752d88SAlan Cox 
9111752d88SAlan Cox static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind);
9211752d88SAlan Cox static int vm_phys_paddr_to_segind(vm_paddr_t pa);
9311752d88SAlan Cox static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
9411752d88SAlan Cox     int order);
9511752d88SAlan Cox 
9611752d88SAlan Cox /*
9711752d88SAlan Cox  * Outputs the state of the physical memory allocator, specifically,
9811752d88SAlan Cox  * the amount of physical memory in each free list.
9911752d88SAlan Cox  */
10011752d88SAlan Cox static int
10111752d88SAlan Cox sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
10211752d88SAlan Cox {
10311752d88SAlan Cox 	struct sbuf sbuf;
10411752d88SAlan Cox 	struct vm_freelist *fl;
10511752d88SAlan Cox 	char *cbuf;
10611752d88SAlan Cox 	const int cbufsize = vm_nfreelists*(VM_NFREEORDER + 1)*81;
10711752d88SAlan Cox 	int error, flind, oind, pind;
10811752d88SAlan Cox 
10911752d88SAlan Cox 	cbuf = malloc(cbufsize, M_TEMP, M_WAITOK | M_ZERO);
11011752d88SAlan Cox 	sbuf_new(&sbuf, cbuf, cbufsize, SBUF_FIXEDLEN);
11111752d88SAlan Cox 	for (flind = 0; flind < vm_nfreelists; flind++) {
11211752d88SAlan Cox 		sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
11311752d88SAlan Cox 		    "\n  ORDER (SIZE)  |  NUMBER"
11411752d88SAlan Cox 		    "\n              ", flind);
11511752d88SAlan Cox 		for (pind = 0; pind < VM_NFREEPOOL; pind++)
11611752d88SAlan Cox 			sbuf_printf(&sbuf, "  |  POOL %d", pind);
11711752d88SAlan Cox 		sbuf_printf(&sbuf, "\n--            ");
11811752d88SAlan Cox 		for (pind = 0; pind < VM_NFREEPOOL; pind++)
11911752d88SAlan Cox 			sbuf_printf(&sbuf, "-- --      ");
12011752d88SAlan Cox 		sbuf_printf(&sbuf, "--\n");
12111752d88SAlan Cox 		for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
12211752d88SAlan Cox 			sbuf_printf(&sbuf, "  %2.2d (%6.6dK)", oind,
12311752d88SAlan Cox 			    1 << (PAGE_SHIFT - 10 + oind));
12411752d88SAlan Cox 			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
12511752d88SAlan Cox 				fl = vm_phys_free_queues[flind][pind];
12611752d88SAlan Cox 				sbuf_printf(&sbuf, "  |  %6.6d", fl[oind].lcnt);
12711752d88SAlan Cox 			}
12811752d88SAlan Cox 			sbuf_printf(&sbuf, "\n");
12911752d88SAlan Cox 		}
13011752d88SAlan Cox 	}
13111752d88SAlan Cox 	sbuf_finish(&sbuf);
13211752d88SAlan Cox 	error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
13311752d88SAlan Cox 	sbuf_delete(&sbuf);
13411752d88SAlan Cox 	free(cbuf, M_TEMP);
13511752d88SAlan Cox 	return (error);
13611752d88SAlan Cox }
13711752d88SAlan Cox 
13811752d88SAlan Cox /*
13911752d88SAlan Cox  * Outputs the set of physical memory segments.
14011752d88SAlan Cox  */
14111752d88SAlan Cox static int
14211752d88SAlan Cox sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
14311752d88SAlan Cox {
14411752d88SAlan Cox 	struct sbuf sbuf;
14511752d88SAlan Cox 	struct vm_phys_seg *seg;
14611752d88SAlan Cox 	char *cbuf;
14711752d88SAlan Cox 	const int cbufsize = VM_PHYSSEG_MAX*(VM_NFREEORDER + 1)*81;
14811752d88SAlan Cox 	int error, segind;
14911752d88SAlan Cox 
15011752d88SAlan Cox 	cbuf = malloc(cbufsize, M_TEMP, M_WAITOK | M_ZERO);
15111752d88SAlan Cox 	sbuf_new(&sbuf, cbuf, cbufsize, SBUF_FIXEDLEN);
15211752d88SAlan Cox 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
15311752d88SAlan Cox 		sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
15411752d88SAlan Cox 		seg = &vm_phys_segs[segind];
15511752d88SAlan Cox 		sbuf_printf(&sbuf, "start:     %#jx\n",
15611752d88SAlan Cox 		    (uintmax_t)seg->start);
15711752d88SAlan Cox 		sbuf_printf(&sbuf, "end:       %#jx\n",
15811752d88SAlan Cox 		    (uintmax_t)seg->end);
15911752d88SAlan Cox 		sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
16011752d88SAlan Cox 	}
16111752d88SAlan Cox 	sbuf_finish(&sbuf);
16211752d88SAlan Cox 	error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
16311752d88SAlan Cox 	sbuf_delete(&sbuf);
16411752d88SAlan Cox 	free(cbuf, M_TEMP);
16511752d88SAlan Cox 	return (error);
16611752d88SAlan Cox }
16711752d88SAlan Cox 
16811752d88SAlan Cox /*
16911752d88SAlan Cox  * Create a physical memory segment.
17011752d88SAlan Cox  */
17111752d88SAlan Cox static void
17211752d88SAlan Cox vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind)
17311752d88SAlan Cox {
17411752d88SAlan Cox 	struct vm_phys_seg *seg;
17511752d88SAlan Cox #ifdef VM_PHYSSEG_SPARSE
17611752d88SAlan Cox 	long pages;
17711752d88SAlan Cox 	int segind;
17811752d88SAlan Cox 
17911752d88SAlan Cox 	pages = 0;
18011752d88SAlan Cox 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
18111752d88SAlan Cox 		seg = &vm_phys_segs[segind];
18211752d88SAlan Cox 		pages += atop(seg->end - seg->start);
18311752d88SAlan Cox 	}
18411752d88SAlan Cox #endif
18511752d88SAlan Cox 	KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
18611752d88SAlan Cox 	    ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
18711752d88SAlan Cox 	seg = &vm_phys_segs[vm_phys_nsegs++];
18811752d88SAlan Cox 	seg->start = start;
18911752d88SAlan Cox 	seg->end = end;
19011752d88SAlan Cox #ifdef VM_PHYSSEG_SPARSE
19111752d88SAlan Cox 	seg->first_page = &vm_page_array[pages];
19211752d88SAlan Cox #else
19311752d88SAlan Cox 	seg->first_page = PHYS_TO_VM_PAGE(start);
19411752d88SAlan Cox #endif
19511752d88SAlan Cox 	seg->free_queues = &vm_phys_free_queues[flind];
19611752d88SAlan Cox }
19711752d88SAlan Cox 
19811752d88SAlan Cox /*
19911752d88SAlan Cox  * Initialize the physical memory allocator.
20011752d88SAlan Cox  */
20111752d88SAlan Cox void
20211752d88SAlan Cox vm_phys_init(void)
20311752d88SAlan Cox {
20411752d88SAlan Cox 	struct vm_freelist *fl;
20511752d88SAlan Cox 	int flind, i, oind, pind;
20611752d88SAlan Cox 
20711752d88SAlan Cox 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
20811752d88SAlan Cox #ifdef	VM_FREELIST_ISADMA
20911752d88SAlan Cox 		if (phys_avail[i] < 16777216) {
21011752d88SAlan Cox 			if (phys_avail[i + 1] > 16777216) {
21111752d88SAlan Cox 				vm_phys_create_seg(phys_avail[i], 16777216,
21211752d88SAlan Cox 				    VM_FREELIST_ISADMA);
21311752d88SAlan Cox 				vm_phys_create_seg(16777216, phys_avail[i + 1],
21411752d88SAlan Cox 				    VM_FREELIST_DEFAULT);
21511752d88SAlan Cox 			} else {
21611752d88SAlan Cox 				vm_phys_create_seg(phys_avail[i],
21711752d88SAlan Cox 				    phys_avail[i + 1], VM_FREELIST_ISADMA);
21811752d88SAlan Cox 			}
21911752d88SAlan Cox 			if (VM_FREELIST_ISADMA >= vm_nfreelists)
22011752d88SAlan Cox 				vm_nfreelists = VM_FREELIST_ISADMA + 1;
22111752d88SAlan Cox 		} else
22211752d88SAlan Cox #endif
22311752d88SAlan Cox #ifdef	VM_FREELIST_HIGHMEM
22411752d88SAlan Cox 		if (phys_avail[i + 1] > VM_HIGHMEM_ADDRESS) {
22511752d88SAlan Cox 			if (phys_avail[i] < VM_HIGHMEM_ADDRESS) {
22611752d88SAlan Cox 				vm_phys_create_seg(phys_avail[i],
22711752d88SAlan Cox 				    VM_HIGHMEM_ADDRESS, VM_FREELIST_DEFAULT);
22811752d88SAlan Cox 				vm_phys_create_seg(VM_HIGHMEM_ADDRESS,
22911752d88SAlan Cox 				    phys_avail[i + 1], VM_FREELIST_HIGHMEM);
23011752d88SAlan Cox 			} else {
23111752d88SAlan Cox 				vm_phys_create_seg(phys_avail[i],
23211752d88SAlan Cox 				    phys_avail[i + 1], VM_FREELIST_HIGHMEM);
23311752d88SAlan Cox 			}
23411752d88SAlan Cox 			if (VM_FREELIST_HIGHMEM >= vm_nfreelists)
23511752d88SAlan Cox 				vm_nfreelists = VM_FREELIST_HIGHMEM + 1;
23611752d88SAlan Cox 		} else
23711752d88SAlan Cox #endif
23811752d88SAlan Cox 		vm_phys_create_seg(phys_avail[i], phys_avail[i + 1],
23911752d88SAlan Cox 		    VM_FREELIST_DEFAULT);
24011752d88SAlan Cox 	}
24111752d88SAlan Cox 	for (flind = 0; flind < vm_nfreelists; flind++) {
24211752d88SAlan Cox 		for (pind = 0; pind < VM_NFREEPOOL; pind++) {
24311752d88SAlan Cox 			fl = vm_phys_free_queues[flind][pind];
24411752d88SAlan Cox 			for (oind = 0; oind < VM_NFREEORDER; oind++)
24511752d88SAlan Cox 				TAILQ_INIT(&fl[oind].pl);
24611752d88SAlan Cox 		}
24711752d88SAlan Cox 	}
24811752d88SAlan Cox }
24911752d88SAlan Cox 
25011752d88SAlan Cox /*
25111752d88SAlan Cox  * Split a contiguous, power of two-sized set of physical pages.
25211752d88SAlan Cox  */
25311752d88SAlan Cox static __inline void
25411752d88SAlan Cox vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order)
25511752d88SAlan Cox {
25611752d88SAlan Cox 	vm_page_t m_buddy;
25711752d88SAlan Cox 
25811752d88SAlan Cox 	while (oind > order) {
25911752d88SAlan Cox 		oind--;
26011752d88SAlan Cox 		m_buddy = &m[1 << oind];
26111752d88SAlan Cox 		KASSERT(m_buddy->order == VM_NFREEORDER,
26211752d88SAlan Cox 		    ("vm_phys_split_pages: page %p has unexpected order %d",
26311752d88SAlan Cox 		    m_buddy, m_buddy->order));
26411752d88SAlan Cox 		m_buddy->order = oind;
26511752d88SAlan Cox 		TAILQ_INSERT_HEAD(&fl[oind].pl, m_buddy, pageq);
26611752d88SAlan Cox 		fl[oind].lcnt++;
26711752d88SAlan Cox         }
26811752d88SAlan Cox }
26911752d88SAlan Cox 
27011752d88SAlan Cox /*
27111752d88SAlan Cox  * Initialize a physical page and add it to the free lists.
27211752d88SAlan Cox  */
27311752d88SAlan Cox void
27411752d88SAlan Cox vm_phys_add_page(vm_paddr_t pa)
27511752d88SAlan Cox {
27611752d88SAlan Cox 	vm_page_t m;
27711752d88SAlan Cox 
27811752d88SAlan Cox 	cnt.v_page_count++;
27911752d88SAlan Cox 	m = vm_phys_paddr_to_vm_page(pa);
28011752d88SAlan Cox 	m->phys_addr = pa;
28111752d88SAlan Cox 	m->segind = vm_phys_paddr_to_segind(pa);
28211752d88SAlan Cox 	m->flags = PG_FREE;
28311752d88SAlan Cox 	KASSERT(m->order == VM_NFREEORDER,
28411752d88SAlan Cox 	    ("vm_phys_add_page: page %p has unexpected order %d",
28511752d88SAlan Cox 	    m, m->order));
28611752d88SAlan Cox 	m->pool = VM_FREEPOOL_DEFAULT;
28711752d88SAlan Cox 	pmap_page_init(m);
2888941dc44SAlan Cox 	mtx_lock(&vm_page_queue_free_mtx);
2897bfda801SAlan Cox 	cnt.v_free_count++;
29011752d88SAlan Cox 	vm_phys_free_pages(m, 0);
2918941dc44SAlan Cox 	mtx_unlock(&vm_page_queue_free_mtx);
29211752d88SAlan Cox }
29311752d88SAlan Cox 
29411752d88SAlan Cox /*
29511752d88SAlan Cox  * Allocate a contiguous, power of two-sized set of physical pages
29611752d88SAlan Cox  * from the free lists.
2978941dc44SAlan Cox  *
2988941dc44SAlan Cox  * The free page queues must be locked.
29911752d88SAlan Cox  */
30011752d88SAlan Cox vm_page_t
30111752d88SAlan Cox vm_phys_alloc_pages(int pool, int order)
30211752d88SAlan Cox {
30311752d88SAlan Cox 	struct vm_freelist *fl;
30411752d88SAlan Cox 	struct vm_freelist *alt;
30511752d88SAlan Cox 	int flind, oind, pind;
30611752d88SAlan Cox 	vm_page_t m;
30711752d88SAlan Cox 
30811752d88SAlan Cox 	KASSERT(pool < VM_NFREEPOOL,
3098941dc44SAlan Cox 	    ("vm_phys_alloc_pages: pool %d is out of range", pool));
31011752d88SAlan Cox 	KASSERT(order < VM_NFREEORDER,
3118941dc44SAlan Cox 	    ("vm_phys_alloc_pages: order %d is out of range", order));
31211752d88SAlan Cox 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
31311752d88SAlan Cox 	for (flind = 0; flind < vm_nfreelists; flind++) {
31411752d88SAlan Cox 		fl = vm_phys_free_queues[flind][pool];
31511752d88SAlan Cox 		for (oind = order; oind < VM_NFREEORDER; oind++) {
31611752d88SAlan Cox 			m = TAILQ_FIRST(&fl[oind].pl);
31711752d88SAlan Cox 			if (m != NULL) {
31811752d88SAlan Cox 				TAILQ_REMOVE(&fl[oind].pl, m, pageq);
31911752d88SAlan Cox 				fl[oind].lcnt--;
32011752d88SAlan Cox 				m->order = VM_NFREEORDER;
32111752d88SAlan Cox 				vm_phys_split_pages(m, oind, fl, order);
32211752d88SAlan Cox 				return (m);
32311752d88SAlan Cox 			}
32411752d88SAlan Cox 		}
32511752d88SAlan Cox 
32611752d88SAlan Cox 		/*
32711752d88SAlan Cox 		 * The given pool was empty.  Find the largest
32811752d88SAlan Cox 		 * contiguous, power-of-two-sized set of pages in any
32911752d88SAlan Cox 		 * pool.  Transfer these pages to the given pool, and
33011752d88SAlan Cox 		 * use them to satisfy the allocation.
33111752d88SAlan Cox 		 */
33211752d88SAlan Cox 		for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
33311752d88SAlan Cox 			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
33411752d88SAlan Cox 				alt = vm_phys_free_queues[flind][pind];
33511752d88SAlan Cox 				m = TAILQ_FIRST(&alt[oind].pl);
33611752d88SAlan Cox 				if (m != NULL) {
33711752d88SAlan Cox 					TAILQ_REMOVE(&alt[oind].pl, m, pageq);
33811752d88SAlan Cox 					alt[oind].lcnt--;
33911752d88SAlan Cox 					m->order = VM_NFREEORDER;
34011752d88SAlan Cox 					vm_phys_set_pool(pool, m, oind);
34111752d88SAlan Cox 					vm_phys_split_pages(m, oind, fl, order);
34211752d88SAlan Cox 					return (m);
34311752d88SAlan Cox 				}
34411752d88SAlan Cox 			}
34511752d88SAlan Cox 		}
34611752d88SAlan Cox 	}
34711752d88SAlan Cox 	return (NULL);
34811752d88SAlan Cox }
34911752d88SAlan Cox 
35011752d88SAlan Cox /*
35111752d88SAlan Cox  * Allocate physical memory from phys_avail[].
35211752d88SAlan Cox  */
35311752d88SAlan Cox vm_paddr_t
35411752d88SAlan Cox vm_phys_bootstrap_alloc(vm_size_t size, unsigned long alignment)
35511752d88SAlan Cox {
35611752d88SAlan Cox 	vm_paddr_t pa;
35711752d88SAlan Cox 	int i;
35811752d88SAlan Cox 
35911752d88SAlan Cox 	size = round_page(size);
36011752d88SAlan Cox 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
36111752d88SAlan Cox 		if (phys_avail[i + 1] - phys_avail[i] < size)
36211752d88SAlan Cox 			continue;
36311752d88SAlan Cox 		pa = phys_avail[i];
36411752d88SAlan Cox 		phys_avail[i] += size;
36511752d88SAlan Cox 		return (pa);
36611752d88SAlan Cox 	}
36711752d88SAlan Cox 	panic("vm_phys_bootstrap_alloc");
36811752d88SAlan Cox }
36911752d88SAlan Cox 
37011752d88SAlan Cox /*
37111752d88SAlan Cox  * Find the vm_page corresponding to the given physical address.
37211752d88SAlan Cox  */
37311752d88SAlan Cox vm_page_t
37411752d88SAlan Cox vm_phys_paddr_to_vm_page(vm_paddr_t pa)
37511752d88SAlan Cox {
37611752d88SAlan Cox 	struct vm_phys_seg *seg;
37711752d88SAlan Cox 	int segind;
37811752d88SAlan Cox 
37911752d88SAlan Cox 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
38011752d88SAlan Cox 		seg = &vm_phys_segs[segind];
38111752d88SAlan Cox 		if (pa >= seg->start && pa < seg->end)
38211752d88SAlan Cox 			return (&seg->first_page[atop(pa - seg->start)]);
38311752d88SAlan Cox 	}
38411752d88SAlan Cox 	panic("vm_phys_paddr_to_vm_page: paddr %#jx is not in any segment",
38511752d88SAlan Cox 	    (uintmax_t)pa);
38611752d88SAlan Cox }
38711752d88SAlan Cox 
38811752d88SAlan Cox /*
38911752d88SAlan Cox  * Find the segment containing the given physical address.
39011752d88SAlan Cox  */
39111752d88SAlan Cox static int
39211752d88SAlan Cox vm_phys_paddr_to_segind(vm_paddr_t pa)
39311752d88SAlan Cox {
39411752d88SAlan Cox 	struct vm_phys_seg *seg;
39511752d88SAlan Cox 	int segind;
39611752d88SAlan Cox 
39711752d88SAlan Cox 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
39811752d88SAlan Cox 		seg = &vm_phys_segs[segind];
39911752d88SAlan Cox 		if (pa >= seg->start && pa < seg->end)
40011752d88SAlan Cox 			return (segind);
40111752d88SAlan Cox 	}
40211752d88SAlan Cox 	panic("vm_phys_paddr_to_segind: paddr %#jx is not in any segment" ,
40311752d88SAlan Cox 	    (uintmax_t)pa);
40411752d88SAlan Cox }
40511752d88SAlan Cox 
40611752d88SAlan Cox /*
40711752d88SAlan Cox  * Free a contiguous, power of two-sized set of physical pages.
4088941dc44SAlan Cox  *
4098941dc44SAlan Cox  * The free page queues must be locked.
41011752d88SAlan Cox  */
41111752d88SAlan Cox void
41211752d88SAlan Cox vm_phys_free_pages(vm_page_t m, int order)
41311752d88SAlan Cox {
41411752d88SAlan Cox 	struct vm_freelist *fl;
41511752d88SAlan Cox 	struct vm_phys_seg *seg;
41611752d88SAlan Cox 	vm_paddr_t pa, pa_buddy;
41711752d88SAlan Cox 	vm_page_t m_buddy;
41811752d88SAlan Cox 
41911752d88SAlan Cox 	KASSERT(m->order == VM_NFREEORDER,
4208941dc44SAlan Cox 	    ("vm_phys_free_pages: page %p has unexpected order %d",
42111752d88SAlan Cox 	    m, m->order));
42211752d88SAlan Cox 	KASSERT(m->pool < VM_NFREEPOOL,
4238941dc44SAlan Cox 	    ("vm_phys_free_pages: page %p has unexpected pool %d",
42411752d88SAlan Cox 	    m, m->pool));
42511752d88SAlan Cox 	KASSERT(order < VM_NFREEORDER,
4268941dc44SAlan Cox 	    ("vm_phys_free_pages: order %d is out of range", order));
42711752d88SAlan Cox 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
42811752d88SAlan Cox 	pa = VM_PAGE_TO_PHYS(m);
42911752d88SAlan Cox 	seg = &vm_phys_segs[m->segind];
43011752d88SAlan Cox 	while (order < VM_NFREEORDER - 1) {
43111752d88SAlan Cox 		pa_buddy = pa ^ (1 << (PAGE_SHIFT + order));
43211752d88SAlan Cox 		if (pa_buddy < seg->start ||
43311752d88SAlan Cox 		    pa_buddy >= seg->end)
43411752d88SAlan Cox 			break;
43511752d88SAlan Cox 		m_buddy = &seg->first_page[atop(pa_buddy - seg->start)];
43611752d88SAlan Cox 		if (m_buddy->order != order)
43711752d88SAlan Cox 			break;
43811752d88SAlan Cox 		fl = (*seg->free_queues)[m_buddy->pool];
43911752d88SAlan Cox 		TAILQ_REMOVE(&fl[m_buddy->order].pl, m_buddy, pageq);
44011752d88SAlan Cox 		fl[m_buddy->order].lcnt--;
44111752d88SAlan Cox 		m_buddy->order = VM_NFREEORDER;
44211752d88SAlan Cox 		if (m_buddy->pool != m->pool)
44311752d88SAlan Cox 			vm_phys_set_pool(m->pool, m_buddy, order);
44411752d88SAlan Cox 		order++;
44511752d88SAlan Cox 		pa &= ~((1 << (PAGE_SHIFT + order)) - 1);
44611752d88SAlan Cox 		m = &seg->first_page[atop(pa - seg->start)];
44711752d88SAlan Cox 	}
44811752d88SAlan Cox 	m->order = order;
44911752d88SAlan Cox 	fl = (*seg->free_queues)[m->pool];
45011752d88SAlan Cox 	TAILQ_INSERT_TAIL(&fl[order].pl, m, pageq);
45111752d88SAlan Cox 	fl[order].lcnt++;
45211752d88SAlan Cox }
45311752d88SAlan Cox 
45411752d88SAlan Cox /*
45511752d88SAlan Cox  * Set the pool for a contiguous, power of two-sized set of physical pages.
45611752d88SAlan Cox  */
4577bfda801SAlan Cox void
45811752d88SAlan Cox vm_phys_set_pool(int pool, vm_page_t m, int order)
45911752d88SAlan Cox {
46011752d88SAlan Cox 	vm_page_t m_tmp;
46111752d88SAlan Cox 
46211752d88SAlan Cox 	for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
46311752d88SAlan Cox 		m_tmp->pool = pool;
46411752d88SAlan Cox }
46511752d88SAlan Cox 
46611752d88SAlan Cox /*
4677bfda801SAlan Cox  * Remove the given physical page "m" from the free lists.
4687bfda801SAlan Cox  *
4697bfda801SAlan Cox  * The free page queues must be locked.
4707bfda801SAlan Cox  */
4717bfda801SAlan Cox void
4727bfda801SAlan Cox vm_phys_unfree_page(vm_page_t m)
4737bfda801SAlan Cox {
4747bfda801SAlan Cox 	struct vm_freelist *fl;
4757bfda801SAlan Cox 	struct vm_phys_seg *seg;
4767bfda801SAlan Cox 	vm_paddr_t pa, pa_half;
4777bfda801SAlan Cox 	vm_page_t m_set, m_tmp;
4787bfda801SAlan Cox 	int order;
4797bfda801SAlan Cox 
4807bfda801SAlan Cox 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
4817bfda801SAlan Cox 
4827bfda801SAlan Cox 	/*
4837bfda801SAlan Cox 	 * First, find the contiguous, power of two-sized set of free
4847bfda801SAlan Cox 	 * physical pages containing the given physical page "m" and
4857bfda801SAlan Cox 	 * assign it to "m_set".
4867bfda801SAlan Cox 	 */
4877bfda801SAlan Cox 	seg = &vm_phys_segs[m->segind];
4887bfda801SAlan Cox 	for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
489bc8794a1SAlan Cox 	    order < VM_NFREEORDER - 1; ) {
4907bfda801SAlan Cox 		order++;
4917bfda801SAlan Cox 		pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
4927bfda801SAlan Cox 		KASSERT(pa >= seg->start && pa < seg->end,
4937bfda801SAlan Cox 		    ("vm_phys_unfree_page: paddr %#jx is not within segment %p",
4947bfda801SAlan Cox 		    (uintmax_t)pa, seg));
4957bfda801SAlan Cox 		m_set = &seg->first_page[atop(pa - seg->start)];
4967bfda801SAlan Cox 	}
4977bfda801SAlan Cox 	KASSERT(m_set->order >= order, ("vm_phys_unfree_page: page %p's order"
4987bfda801SAlan Cox 	    " (%d) is less than expected (%d)", m_set, m_set->order, order));
4997bfda801SAlan Cox 	KASSERT(m_set->order < VM_NFREEORDER,
5007bfda801SAlan Cox 	    ("vm_phys_unfree_page: page %p has unexpected order %d",
5017bfda801SAlan Cox 	    m_set, m_set->order));
5027bfda801SAlan Cox 
5037bfda801SAlan Cox 	/*
5047bfda801SAlan Cox 	 * Next, remove "m_set" from the free lists.  Finally, extract
5057bfda801SAlan Cox 	 * "m" from "m_set" using an iterative algorithm: While "m_set"
5067bfda801SAlan Cox 	 * is larger than a page, shrink "m_set" by returning the half
5077bfda801SAlan Cox 	 * of "m_set" that does not contain "m" to the free lists.
5087bfda801SAlan Cox 	 */
5097bfda801SAlan Cox 	fl = (*seg->free_queues)[m_set->pool];
5107bfda801SAlan Cox 	order = m_set->order;
5117bfda801SAlan Cox 	TAILQ_REMOVE(&fl[order].pl, m_set, pageq);
5127bfda801SAlan Cox 	fl[order].lcnt--;
5137bfda801SAlan Cox 	m_set->order = VM_NFREEORDER;
5147bfda801SAlan Cox 	while (order > 0) {
5157bfda801SAlan Cox 		order--;
5167bfda801SAlan Cox 		pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
5177bfda801SAlan Cox 		if (m->phys_addr < pa_half)
5187bfda801SAlan Cox 			m_tmp = &seg->first_page[atop(pa_half - seg->start)];
5197bfda801SAlan Cox 		else {
5207bfda801SAlan Cox 			m_tmp = m_set;
5217bfda801SAlan Cox 			m_set = &seg->first_page[atop(pa_half - seg->start)];
5227bfda801SAlan Cox 		}
5237bfda801SAlan Cox 		m_tmp->order = order;
5247bfda801SAlan Cox 		TAILQ_INSERT_HEAD(&fl[order].pl, m_tmp, pageq);
5257bfda801SAlan Cox 		fl[order].lcnt++;
5267bfda801SAlan Cox 	}
5277bfda801SAlan Cox 	KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
5287bfda801SAlan Cox }
5297bfda801SAlan Cox 
5307bfda801SAlan Cox /*
5317bfda801SAlan Cox  * Try to zero one physical page.  Used by an idle priority thread.
53211752d88SAlan Cox  */
53311752d88SAlan Cox boolean_t
53411752d88SAlan Cox vm_phys_zero_pages_idle(void)
53511752d88SAlan Cox {
5367bfda801SAlan Cox 	static struct vm_freelist *fl = vm_phys_free_queues[0][0];
5377bfda801SAlan Cox 	static int flind, oind, pind;
53811752d88SAlan Cox 	vm_page_t m, m_tmp;
53911752d88SAlan Cox 
54011752d88SAlan Cox 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
5417bfda801SAlan Cox 	for (;;) {
5427bfda801SAlan Cox 		TAILQ_FOREACH_REVERSE(m, &fl[oind].pl, pglist, pageq) {
5437bfda801SAlan Cox 			for (m_tmp = m; m_tmp < &m[1 << oind]; m_tmp++) {
5447bfda801SAlan Cox 				if ((m_tmp->flags & (PG_CACHED | PG_ZERO)) == 0) {
5457bfda801SAlan Cox 					vm_phys_unfree_page(m_tmp);
5467bfda801SAlan Cox 					cnt.v_free_count--;
54711752d88SAlan Cox 					mtx_unlock(&vm_page_queue_free_mtx);
54811752d88SAlan Cox 					pmap_zero_page_idle(m_tmp);
54911752d88SAlan Cox 					m_tmp->flags |= PG_ZERO;
55011752d88SAlan Cox 					mtx_lock(&vm_page_queue_free_mtx);
5517bfda801SAlan Cox 					cnt.v_free_count++;
5527bfda801SAlan Cox 					vm_phys_free_pages(m_tmp, 0);
5537bfda801SAlan Cox 					vm_page_zero_count++;
5547bfda801SAlan Cox 					cnt_prezero++;
55511752d88SAlan Cox 					return (TRUE);
55611752d88SAlan Cox 				}
55711752d88SAlan Cox 			}
55811752d88SAlan Cox 		}
5597bfda801SAlan Cox 		oind++;
5607bfda801SAlan Cox 		if (oind == VM_NFREEORDER) {
5617bfda801SAlan Cox 			oind = 0;
5627bfda801SAlan Cox 			pind++;
5637bfda801SAlan Cox 			if (pind == VM_NFREEPOOL) {
5647bfda801SAlan Cox 				pind = 0;
5657bfda801SAlan Cox 				flind++;
5667bfda801SAlan Cox 				if (flind == vm_nfreelists)
5677bfda801SAlan Cox 					flind = 0;
5687bfda801SAlan Cox 			}
5697bfda801SAlan Cox 			fl = vm_phys_free_queues[flind][pind];
5707bfda801SAlan Cox 		}
5717bfda801SAlan Cox 	}
57211752d88SAlan Cox }
57311752d88SAlan Cox 
57411752d88SAlan Cox /*
5752f9f48d6SAlan Cox  * Allocate a contiguous set of physical pages of the given size
5762f9f48d6SAlan Cox  * "npages" from the free lists.  All of the physical pages must be at
5772f9f48d6SAlan Cox  * or above the given physical address "low" and below the given
5782f9f48d6SAlan Cox  * physical address "high".  The given value "alignment" determines the
5792f9f48d6SAlan Cox  * alignment of the first physical page in the set.  If the given value
5802f9f48d6SAlan Cox  * "boundary" is non-zero, then the set of physical pages cannot cross
5812f9f48d6SAlan Cox  * any physical address boundary that is a multiple of that value.  Both
58211752d88SAlan Cox  * "alignment" and "boundary" must be a power of two.
58311752d88SAlan Cox  */
58411752d88SAlan Cox vm_page_t
58511752d88SAlan Cox vm_phys_alloc_contig(unsigned long npages, vm_paddr_t low, vm_paddr_t high,
58611752d88SAlan Cox     unsigned long alignment, unsigned long boundary)
58711752d88SAlan Cox {
58811752d88SAlan Cox 	struct vm_freelist *fl;
58911752d88SAlan Cox 	struct vm_phys_seg *seg;
5907bfda801SAlan Cox 	vm_object_t m_object;
59111752d88SAlan Cox 	vm_paddr_t pa, pa_last, size;
59211752d88SAlan Cox 	vm_page_t m, m_ret;
59311752d88SAlan Cox 	int flind, i, oind, order, pind;
59411752d88SAlan Cox 
59511752d88SAlan Cox 	size = npages << PAGE_SHIFT;
59611752d88SAlan Cox 	KASSERT(size != 0,
59711752d88SAlan Cox 	    ("vm_phys_alloc_contig: size must not be 0"));
59811752d88SAlan Cox 	KASSERT((alignment & (alignment - 1)) == 0,
59911752d88SAlan Cox 	    ("vm_phys_alloc_contig: alignment must be a power of 2"));
60011752d88SAlan Cox 	KASSERT((boundary & (boundary - 1)) == 0,
60111752d88SAlan Cox 	    ("vm_phys_alloc_contig: boundary must be a power of 2"));
60211752d88SAlan Cox 	/* Compute the queue that is the best fit for npages. */
60311752d88SAlan Cox 	for (order = 0; (1 << order) < npages; order++);
60411752d88SAlan Cox 	mtx_lock(&vm_page_queue_free_mtx);
60511752d88SAlan Cox 	for (flind = 0; flind < vm_nfreelists; flind++) {
60611752d88SAlan Cox 		for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER; oind++) {
60711752d88SAlan Cox 			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
60811752d88SAlan Cox 				fl = vm_phys_free_queues[flind][pind];
60911752d88SAlan Cox 				TAILQ_FOREACH(m_ret, &fl[oind].pl, pageq) {
61011752d88SAlan Cox 					/*
61111752d88SAlan Cox 					 * A free list may contain physical pages
61211752d88SAlan Cox 					 * from one or more segments.
61311752d88SAlan Cox 					 */
61411752d88SAlan Cox 					seg = &vm_phys_segs[m_ret->segind];
61511752d88SAlan Cox 					if (seg->start > high ||
61611752d88SAlan Cox 					    low >= seg->end)
61711752d88SAlan Cox 						continue;
61811752d88SAlan Cox 
61911752d88SAlan Cox 					/*
62011752d88SAlan Cox 					 * Is the size of this allocation request
62111752d88SAlan Cox 					 * larger than the largest block size?
62211752d88SAlan Cox 					 */
62311752d88SAlan Cox 					if (order >= VM_NFREEORDER) {
62411752d88SAlan Cox 						/*
62511752d88SAlan Cox 						 * Determine if a sufficient number
62611752d88SAlan Cox 						 * of subsequent blocks to satisfy
62711752d88SAlan Cox 						 * the allocation request are free.
62811752d88SAlan Cox 						 */
62911752d88SAlan Cox 						pa = VM_PAGE_TO_PHYS(m_ret);
63011752d88SAlan Cox 						pa_last = pa + size;
63111752d88SAlan Cox 						for (;;) {
63211752d88SAlan Cox 							pa += 1 << (PAGE_SHIFT + VM_NFREEORDER - 1);
63311752d88SAlan Cox 							if (pa >= pa_last)
63411752d88SAlan Cox 								break;
63511752d88SAlan Cox 							if (pa < seg->start ||
63611752d88SAlan Cox 							    pa >= seg->end)
63711752d88SAlan Cox 								break;
63811752d88SAlan Cox 							m = &seg->first_page[atop(pa - seg->start)];
63911752d88SAlan Cox 							if (m->order != VM_NFREEORDER - 1)
64011752d88SAlan Cox 								break;
64111752d88SAlan Cox 						}
64211752d88SAlan Cox 						/* If not, continue to the next block. */
64311752d88SAlan Cox 						if (pa < pa_last)
64411752d88SAlan Cox 							continue;
64511752d88SAlan Cox 					}
64611752d88SAlan Cox 
64711752d88SAlan Cox 					/*
64811752d88SAlan Cox 					 * Determine if the blocks are within the given range,
64911752d88SAlan Cox 					 * satisfy the given alignment, and do not cross the
65011752d88SAlan Cox 					 * given boundary.
65111752d88SAlan Cox 					 */
65211752d88SAlan Cox 					pa = VM_PAGE_TO_PHYS(m_ret);
65311752d88SAlan Cox 					if (pa >= low &&
65411752d88SAlan Cox 					    pa + size <= high &&
65511752d88SAlan Cox 					    (pa & (alignment - 1)) == 0 &&
65611752d88SAlan Cox 					    ((pa ^ (pa + size - 1)) & ~(boundary - 1)) == 0)
65711752d88SAlan Cox 						goto done;
65811752d88SAlan Cox 				}
65911752d88SAlan Cox 			}
66011752d88SAlan Cox 		}
66111752d88SAlan Cox 	}
66211752d88SAlan Cox 	mtx_unlock(&vm_page_queue_free_mtx);
66311752d88SAlan Cox 	return (NULL);
66411752d88SAlan Cox done:
66511752d88SAlan Cox 	for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) {
66611752d88SAlan Cox 		fl = (*seg->free_queues)[m->pool];
66711752d88SAlan Cox 		TAILQ_REMOVE(&fl[m->order].pl, m, pageq);
66811752d88SAlan Cox 		fl[m->order].lcnt--;
66911752d88SAlan Cox 		m->order = VM_NFREEORDER;
67011752d88SAlan Cox 	}
67111752d88SAlan Cox 	if (m_ret->pool != VM_FREEPOOL_DEFAULT)
67211752d88SAlan Cox 		vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m_ret, oind);
67311752d88SAlan Cox 	fl = (*seg->free_queues)[m_ret->pool];
67411752d88SAlan Cox 	vm_phys_split_pages(m_ret, oind, fl, order);
67511752d88SAlan Cox 	for (i = 0; i < npages; i++) {
67611752d88SAlan Cox 		m = &m_ret[i];
67711752d88SAlan Cox 		KASSERT(m->queue == PQ_NONE,
67811752d88SAlan Cox 		    ("vm_phys_alloc_contig: page %p has unexpected queue %d",
67911752d88SAlan Cox 		    m, m->queue));
6807bfda801SAlan Cox 		m_object = m->object;
6817bfda801SAlan Cox 		if ((m->flags & PG_CACHED) != 0)
6827bfda801SAlan Cox 			vm_page_cache_remove(m);
6837bfda801SAlan Cox 		else {
6847bfda801SAlan Cox 			KASSERT(VM_PAGE_IS_FREE(m),
6857bfda801SAlan Cox 			    ("vm_phys_alloc_contig: page %p is not free", m));
6867bfda801SAlan Cox 			cnt.v_free_count--;
6877bfda801SAlan Cox 		}
68811752d88SAlan Cox 		m->valid = VM_PAGE_BITS_ALL;
68911752d88SAlan Cox 		if (m->flags & PG_ZERO)
69011752d88SAlan Cox 			vm_page_zero_count--;
69111752d88SAlan Cox 		/* Don't clear the PG_ZERO flag; we'll need it later. */
69211752d88SAlan Cox 		m->flags = PG_UNMANAGED | (m->flags & PG_ZERO);
69311752d88SAlan Cox 		m->oflags = 0;
69411752d88SAlan Cox 		KASSERT(m->dirty == 0,
69511752d88SAlan Cox 		    ("vm_phys_alloc_contig: page %p was dirty", m));
69611752d88SAlan Cox 		m->wire_count = 0;
69711752d88SAlan Cox 		m->busy = 0;
6987bfda801SAlan Cox 		if (m_object != NULL &&
6997bfda801SAlan Cox 		    m_object->type == OBJT_VNODE &&
7007bfda801SAlan Cox 		    m_object->cache == NULL) {
7017bfda801SAlan Cox 			mtx_unlock(&vm_page_queue_free_mtx);
7027bfda801SAlan Cox 			vdrop(m_object->handle);
7037bfda801SAlan Cox 			mtx_lock(&vm_page_queue_free_mtx);
7047bfda801SAlan Cox 		}
70511752d88SAlan Cox 	}
70611752d88SAlan Cox 	for (; i < roundup2(npages, 1 << imin(oind, order)); i++) {
70711752d88SAlan Cox 		m = &m_ret[i];
70811752d88SAlan Cox 		KASSERT(m->order == VM_NFREEORDER,
70911752d88SAlan Cox 		    ("vm_phys_alloc_contig: page %p has unexpected order %d",
71011752d88SAlan Cox 		    m, m->order));
7118941dc44SAlan Cox 		vm_phys_free_pages(m, 0);
71211752d88SAlan Cox 	}
71311752d88SAlan Cox 	mtx_unlock(&vm_page_queue_free_mtx);
71411752d88SAlan Cox 	return (m_ret);
71511752d88SAlan Cox }
71611752d88SAlan Cox 
71711752d88SAlan Cox #ifdef DDB
71811752d88SAlan Cox /*
71911752d88SAlan Cox  * Show the number of physical pages in each of the free lists.
72011752d88SAlan Cox  */
72111752d88SAlan Cox DB_SHOW_COMMAND(freepages, db_show_freepages)
72211752d88SAlan Cox {
72311752d88SAlan Cox 	struct vm_freelist *fl;
72411752d88SAlan Cox 	int flind, oind, pind;
72511752d88SAlan Cox 
72611752d88SAlan Cox 	for (flind = 0; flind < vm_nfreelists; flind++) {
72711752d88SAlan Cox 		db_printf("FREE LIST %d:\n"
72811752d88SAlan Cox 		    "\n  ORDER (SIZE)  |  NUMBER"
72911752d88SAlan Cox 		    "\n              ", flind);
73011752d88SAlan Cox 		for (pind = 0; pind < VM_NFREEPOOL; pind++)
73111752d88SAlan Cox 			db_printf("  |  POOL %d", pind);
73211752d88SAlan Cox 		db_printf("\n--            ");
73311752d88SAlan Cox 		for (pind = 0; pind < VM_NFREEPOOL; pind++)
73411752d88SAlan Cox 			db_printf("-- --      ");
73511752d88SAlan Cox 		db_printf("--\n");
73611752d88SAlan Cox 		for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
73711752d88SAlan Cox 			db_printf("  %2.2d (%6.6dK)", oind,
73811752d88SAlan Cox 			    1 << (PAGE_SHIFT - 10 + oind));
73911752d88SAlan Cox 			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
74011752d88SAlan Cox 				fl = vm_phys_free_queues[flind][pind];
74111752d88SAlan Cox 				db_printf("  |  %6.6d", fl[oind].lcnt);
74211752d88SAlan Cox 			}
74311752d88SAlan Cox 			db_printf("\n");
74411752d88SAlan Cox 		}
74511752d88SAlan Cox 		db_printf("\n");
74611752d88SAlan Cox 	}
74711752d88SAlan Cox }
74811752d88SAlan Cox #endif
749