xref: /freebsd/sys/vm/vm_reserv.c (revision 84e2ae64c597000a0152c6772b2c8925773c6f6c)
1f8a47341SAlan Cox /*-
2fe267a55SPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3fe267a55SPedro F. Giffuni  *
4f8a47341SAlan Cox  * Copyright (c) 2002-2006 Rice University
5ec179322SAlan Cox  * Copyright (c) 2007-2011 Alan L. Cox <alc@cs.rice.edu>
6f8a47341SAlan Cox  * All rights reserved.
7f8a47341SAlan Cox  *
8f8a47341SAlan Cox  * This software was developed for the FreeBSD Project by Alan L. Cox,
9f8a47341SAlan Cox  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
10f8a47341SAlan Cox  *
11f8a47341SAlan Cox  * Redistribution and use in source and binary forms, with or without
12f8a47341SAlan Cox  * modification, are permitted provided that the following conditions
13f8a47341SAlan Cox  * are met:
14f8a47341SAlan Cox  * 1. Redistributions of source code must retain the above copyright
15f8a47341SAlan Cox  *    notice, this list of conditions and the following disclaimer.
16f8a47341SAlan Cox  * 2. Redistributions in binary form must reproduce the above copyright
17f8a47341SAlan Cox  *    notice, this list of conditions and the following disclaimer in the
18f8a47341SAlan Cox  *    documentation and/or other materials provided with the distribution.
19f8a47341SAlan Cox  *
20f8a47341SAlan Cox  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21f8a47341SAlan Cox  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22f8a47341SAlan Cox  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23f8a47341SAlan Cox  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
24f8a47341SAlan Cox  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25f8a47341SAlan Cox  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26f8a47341SAlan Cox  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27f8a47341SAlan Cox  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28f8a47341SAlan Cox  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29f8a47341SAlan Cox  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
30f8a47341SAlan Cox  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31f8a47341SAlan Cox  * POSSIBILITY OF SUCH DAMAGE.
32f8a47341SAlan Cox  */
33f8a47341SAlan Cox 
34f8a47341SAlan Cox /*
35f8a47341SAlan Cox  *	Superpage reservation management module
36c68c3537SAlan Cox  *
37c68c3537SAlan Cox  * Any external functions defined by this module are only to be used by the
38c68c3537SAlan Cox  * virtual memory system.
39f8a47341SAlan Cox  */
40f8a47341SAlan Cox 
41f8a47341SAlan Cox #include <sys/cdefs.h>
42f8a47341SAlan Cox __FBSDID("$FreeBSD$");
43f8a47341SAlan Cox 
44f8a47341SAlan Cox #include "opt_vm.h"
45f8a47341SAlan Cox 
46f8a47341SAlan Cox #include <sys/param.h>
47f8a47341SAlan Cox #include <sys/kernel.h>
48f8a47341SAlan Cox #include <sys/lock.h>
49f8a47341SAlan Cox #include <sys/malloc.h>
50f8a47341SAlan Cox #include <sys/mutex.h>
51f8a47341SAlan Cox #include <sys/queue.h>
5289f6b863SAttilio Rao #include <sys/rwlock.h>
53f8a47341SAlan Cox #include <sys/sbuf.h>
54f8a47341SAlan Cox #include <sys/sysctl.h>
55f8a47341SAlan Cox #include <sys/systm.h>
56*84e2ae64SDoug Moore #include <sys/bitstring.h>
5772346b22SCy Schubert #include <sys/counter.h>
5872346b22SCy Schubert #include <sys/ktr.h>
599ed01c32SGleb Smirnoff #include <sys/vmmeter.h>
605c930c89SJeff Roberson #include <sys/smp.h>
61f8a47341SAlan Cox 
62f8a47341SAlan Cox #include <vm/vm.h>
63f76916c0SDoug Moore #include <vm/vm_extern.h>
64f8a47341SAlan Cox #include <vm/vm_param.h>
65f8a47341SAlan Cox #include <vm/vm_object.h>
66f8a47341SAlan Cox #include <vm/vm_page.h>
67e2068d0bSJeff Roberson #include <vm/vm_pageout.h>
68e2068d0bSJeff Roberson #include <vm/vm_pagequeue.h>
69431fb8abSMark Johnston #include <vm/vm_phys.h>
70774d251dSAttilio Rao #include <vm/vm_radix.h>
71f8a47341SAlan Cox #include <vm/vm_reserv.h>
72f8a47341SAlan Cox 
73f8a47341SAlan Cox /*
74f8a47341SAlan Cox  * The reservation system supports the speculative allocation of large physical
753453bca8SAlan Cox  * pages ("superpages").  Speculative allocation enables the fully automatic
76f8a47341SAlan Cox  * utilization of superpages by the virtual memory system.  In other words, no
77f8a47341SAlan Cox  * programmatic directives are required to use superpages.
78f8a47341SAlan Cox  */
79f8a47341SAlan Cox 
80f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0
81f8a47341SAlan Cox 
82f2a496d6SKonstantin Belousov #ifndef VM_LEVEL_0_ORDER_MAX
83f2a496d6SKonstantin Belousov #define	VM_LEVEL_0_ORDER_MAX	VM_LEVEL_0_ORDER
84f2a496d6SKonstantin Belousov #endif
85f2a496d6SKonstantin Belousov 
86f8a47341SAlan Cox /*
87f8a47341SAlan Cox  * The number of small pages that are contained in a level 0 reservation
88f8a47341SAlan Cox  */
89f8a47341SAlan Cox #define	VM_LEVEL_0_NPAGES	(1 << VM_LEVEL_0_ORDER)
90f2a496d6SKonstantin Belousov #define	VM_LEVEL_0_NPAGES_MAX	(1 << VM_LEVEL_0_ORDER_MAX)
91f8a47341SAlan Cox 
92f8a47341SAlan Cox /*
93f8a47341SAlan Cox  * The number of bits by which a physical address is shifted to obtain the
94f8a47341SAlan Cox  * reservation number
95f8a47341SAlan Cox  */
96f8a47341SAlan Cox #define	VM_LEVEL_0_SHIFT	(VM_LEVEL_0_ORDER + PAGE_SHIFT)
97f8a47341SAlan Cox 
98f8a47341SAlan Cox /*
99f8a47341SAlan Cox  * The size of a level 0 reservation in bytes
100f8a47341SAlan Cox  */
101f8a47341SAlan Cox #define	VM_LEVEL_0_SIZE		(1 << VM_LEVEL_0_SHIFT)
102f8a47341SAlan Cox 
103f8a47341SAlan Cox /*
104f8a47341SAlan Cox  * Computes the index of the small page underlying the given (object, pindex)
105f8a47341SAlan Cox  * within the reservation's array of small pages.
106f8a47341SAlan Cox  */
107f8a47341SAlan Cox #define	VM_RESERV_INDEX(object, pindex)	\
108f8a47341SAlan Cox     (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1))
109f8a47341SAlan Cox 
110f8a47341SAlan Cox /*
1112ef6727eSJeff Roberson  * Number of elapsed ticks before we update the LRU queue position.  Used
1122ef6727eSJeff Roberson  * to reduce contention and churn on the list.
1132ef6727eSJeff Roberson  */
1142ef6727eSJeff Roberson #define	PARTPOPSLOP	1
1152ef6727eSJeff Roberson 
1162ef6727eSJeff Roberson /*
117f8a47341SAlan Cox  * The reservation structure
118f8a47341SAlan Cox  *
119f8a47341SAlan Cox  * A reservation structure is constructed whenever a large physical page is
120f8a47341SAlan Cox  * speculatively allocated to an object.  The reservation provides the small
121f8a47341SAlan Cox  * physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets
122f8a47341SAlan Cox  * within that object.  The reservation's "popcnt" tracks the number of these
123f8a47341SAlan Cox  * small physical pages that are in use at any given time.  When and if the
1243453bca8SAlan Cox  * reservation is not fully utilized, it appears in the queue of partially
125f8a47341SAlan Cox  * populated reservations.  The reservation always appears on the containing
126f8a47341SAlan Cox  * object's list of reservations.
127f8a47341SAlan Cox  *
1283453bca8SAlan Cox  * A partially populated reservation can be broken and reclaimed at any time.
129e2068d0bSJeff Roberson  *
130b378d296SMark Johnston  * c - constant after boot
1315c930c89SJeff Roberson  * d - vm_reserv_domain_lock
132e2068d0bSJeff Roberson  * o - vm_reserv_object_lock
133b378d296SMark Johnston  * r - vm_reserv_lock
134b378d296SMark Johnston  * s - vm_reserv_domain_scan_lock
135f8a47341SAlan Cox  */
136f8a47341SAlan Cox struct vm_reserv {
1375c930c89SJeff Roberson 	struct mtx	lock;			/* reservation lock. */
138fe6d5344SMark Johnston 	TAILQ_ENTRY(vm_reserv) partpopq;	/* (d, r) per-domain queue. */
1395c930c89SJeff Roberson 	LIST_ENTRY(vm_reserv) objq;		/* (o, r) object queue */
1405c930c89SJeff Roberson 	vm_object_t	object;			/* (o, r) containing object */
1415c930c89SJeff Roberson 	vm_pindex_t	pindex;			/* (o, r) offset in object */
142e2068d0bSJeff Roberson 	vm_page_t	pages;			/* (c) first page  */
1435c930c89SJeff Roberson 	uint16_t	popcnt;			/* (r) # of pages in use */
144fe6d5344SMark Johnston 	uint8_t		domain;			/* (c) NUMA domain. */
145fe6d5344SMark Johnston 	char		inpartpopq;		/* (d, r) */
1462ef6727eSJeff Roberson 	int		lasttick;		/* (r) last pop update tick. */
147*84e2ae64SDoug Moore 	bitstr_t	bit_decl(popmap, VM_LEVEL_0_NPAGES_MAX);
148*84e2ae64SDoug Moore 						/* (r) bit vector, used pages */
149f8a47341SAlan Cox };
150f8a47341SAlan Cox 
151b378d296SMark Johnston TAILQ_HEAD(vm_reserv_queue, vm_reserv);
152b378d296SMark Johnston 
1535c930c89SJeff Roberson #define	vm_reserv_lockptr(rv)		(&(rv)->lock)
1545c930c89SJeff Roberson #define	vm_reserv_assert_locked(rv)					\
1555c930c89SJeff Roberson 	    mtx_assert(vm_reserv_lockptr(rv), MA_OWNED)
1565c930c89SJeff Roberson #define	vm_reserv_lock(rv)		mtx_lock(vm_reserv_lockptr(rv))
1575c930c89SJeff Roberson #define	vm_reserv_trylock(rv)		mtx_trylock(vm_reserv_lockptr(rv))
1585c930c89SJeff Roberson #define	vm_reserv_unlock(rv)		mtx_unlock(vm_reserv_lockptr(rv))
1595c930c89SJeff Roberson 
160f8a47341SAlan Cox /*
161f8a47341SAlan Cox  * The reservation array
162f8a47341SAlan Cox  *
163f8a47341SAlan Cox  * This array is analoguous in function to vm_page_array.  It differs in the
164f8a47341SAlan Cox  * respect that it may contain a greater number of useful reservation
165f8a47341SAlan Cox  * structures than there are (physical) superpages.  These "invalid"
166f8a47341SAlan Cox  * reservation structures exist to trade-off space for time in the
167f8a47341SAlan Cox  * implementation of vm_reserv_from_page().  Invalid reservation structures are
168f8a47341SAlan Cox  * distinguishable from "valid" reservation structures by inspecting the
169f8a47341SAlan Cox  * reservation's "pages" field.  Invalid reservation structures have a NULL
170f8a47341SAlan Cox  * "pages" field.
171f8a47341SAlan Cox  *
172f8a47341SAlan Cox  * vm_reserv_from_page() maps a small (physical) page to an element of this
173f8a47341SAlan Cox  * array by computing a physical reservation number from the page's physical
174f8a47341SAlan Cox  * address.  The physical reservation number is used as the array index.
175f8a47341SAlan Cox  *
176f8a47341SAlan Cox  * An "active" reservation is a valid reservation structure that has a non-NULL
177f8a47341SAlan Cox  * "object" field and a non-zero "popcnt" field.  In other words, every active
178f8a47341SAlan Cox  * reservation belongs to a particular object.  Moreover, every active
179f8a47341SAlan Cox  * reservation has an entry in the containing object's list of reservations.
180f8a47341SAlan Cox  */
181f8a47341SAlan Cox static vm_reserv_t vm_reserv_array;
182f8a47341SAlan Cox 
183f8a47341SAlan Cox /*
184fe6d5344SMark Johnston  * The per-domain partially populated reservation queues
185f8a47341SAlan Cox  *
186fe6d5344SMark Johnston  * These queues enable the fast recovery of an unused free small page from a
187fe6d5344SMark Johnston  * partially populated reservation.  The reservation at the head of a queue
1883453bca8SAlan Cox  * is the least recently changed, partially populated reservation.
189f8a47341SAlan Cox  *
190fe6d5344SMark Johnston  * Access to this queue is synchronized by the per-domain reservation lock.
191b378d296SMark Johnston  * Threads reclaiming free pages from the queue must hold the per-domain scan
192b378d296SMark Johnston  * lock.
193f8a47341SAlan Cox  */
194fe6d5344SMark Johnston struct vm_reserv_domain {
195fe6d5344SMark Johnston 	struct mtx 		lock;
196b378d296SMark Johnston 	struct vm_reserv_queue	partpop;	/* (d) */
197b378d296SMark Johnston 	struct vm_reserv	marker;		/* (d, s) scan marker/lock */
198fe6d5344SMark Johnston } __aligned(CACHE_LINE_SIZE);
199fe6d5344SMark Johnston 
200fe6d5344SMark Johnston static struct vm_reserv_domain vm_rvd[MAXMEMDOM];
201fe6d5344SMark Johnston 
202fe6d5344SMark Johnston #define	vm_reserv_domain_lockptr(d)	(&vm_rvd[(d)].lock)
203b378d296SMark Johnston #define	vm_reserv_domain_assert_locked(d)	\
204b378d296SMark Johnston 	mtx_assert(vm_reserv_domain_lockptr(d), MA_OWNED)
205fe6d5344SMark Johnston #define	vm_reserv_domain_lock(d)	mtx_lock(vm_reserv_domain_lockptr(d))
206fe6d5344SMark Johnston #define	vm_reserv_domain_unlock(d)	mtx_unlock(vm_reserv_domain_lockptr(d))
207f8a47341SAlan Cox 
208b378d296SMark Johnston #define	vm_reserv_domain_scan_lock(d)	mtx_lock(&vm_rvd[(d)].marker.lock)
209b378d296SMark Johnston #define	vm_reserv_domain_scan_unlock(d)	mtx_unlock(&vm_rvd[(d)].marker.lock)
210b378d296SMark Johnston 
2117029da5cSPawel Biernacki static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
2127029da5cSPawel Biernacki     "Reservation Info");
213f8a47341SAlan Cox 
214d869a17eSMark Johnston static COUNTER_U64_DEFINE_EARLY(vm_reserv_broken);
2155c930c89SJeff Roberson SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD,
2165c930c89SJeff Roberson     &vm_reserv_broken, "Cumulative number of broken reservations");
217f8a47341SAlan Cox 
218d869a17eSMark Johnston static COUNTER_U64_DEFINE_EARLY(vm_reserv_freed);
2195c930c89SJeff Roberson SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD,
2205c930c89SJeff Roberson     &vm_reserv_freed, "Cumulative number of freed reservations");
221f8a47341SAlan Cox 
222e0a63baaSAlan Cox static int sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS);
223e0a63baaSAlan Cox 
224a314aba8SMateusz Guzik SYSCTL_PROC(_vm_reserv, OID_AUTO, fullpop, CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RD,
225a314aba8SMateusz Guzik     NULL, 0, sysctl_vm_reserv_fullpop, "I", "Current number of full reservations");
226e0a63baaSAlan Cox 
227f8a47341SAlan Cox static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS);
228f8a47341SAlan Cox 
2297029da5cSPawel Biernacki SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq,
230114484b7SMark Johnston     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
2317029da5cSPawel Biernacki     sysctl_vm_reserv_partpopq, "A",
2327029da5cSPawel Biernacki     "Partially populated reservation queues");
233f8a47341SAlan Cox 
234d869a17eSMark Johnston static COUNTER_U64_DEFINE_EARLY(vm_reserv_reclaimed);
2355c930c89SJeff Roberson SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD,
2365c930c89SJeff Roberson     &vm_reserv_reclaimed, "Cumulative number of reclaimed reservations");
237f8a47341SAlan Cox 
238e2068d0bSJeff Roberson /*
239e2068d0bSJeff Roberson  * The object lock pool is used to synchronize the rvq.  We can not use a
240e2068d0bSJeff Roberson  * pool mutex because it is required before malloc works.
241e2068d0bSJeff Roberson  *
242e2068d0bSJeff Roberson  * The "hash" function could be made faster without divide and modulo.
243e2068d0bSJeff Roberson  */
244e2068d0bSJeff Roberson #define	VM_RESERV_OBJ_LOCK_COUNT	MAXCPU
245e2068d0bSJeff Roberson 
246e2068d0bSJeff Roberson struct mtx_padalign vm_reserv_object_mtx[VM_RESERV_OBJ_LOCK_COUNT];
247e2068d0bSJeff Roberson 
248e2068d0bSJeff Roberson #define	vm_reserv_object_lock_idx(object)			\
249e2068d0bSJeff Roberson 	    (((uintptr_t)object / sizeof(*object)) % VM_RESERV_OBJ_LOCK_COUNT)
250e2068d0bSJeff Roberson #define	vm_reserv_object_lock_ptr(object)			\
251e2068d0bSJeff Roberson 	    &vm_reserv_object_mtx[vm_reserv_object_lock_idx((object))]
252e2068d0bSJeff Roberson #define	vm_reserv_object_lock(object)				\
253e2068d0bSJeff Roberson 	    mtx_lock(vm_reserv_object_lock_ptr((object)))
254e2068d0bSJeff Roberson #define	vm_reserv_object_unlock(object)				\
255e2068d0bSJeff Roberson 	    mtx_unlock(vm_reserv_object_lock_ptr((object)))
256e2068d0bSJeff Roberson 
257ada27a3bSKonstantin Belousov static void		vm_reserv_break(vm_reserv_t rv);
258ec179322SAlan Cox static void		vm_reserv_depopulate(vm_reserv_t rv, int index);
259f8a47341SAlan Cox static vm_reserv_t	vm_reserv_from_page(vm_page_t m);
260f8a47341SAlan Cox static boolean_t	vm_reserv_has_pindex(vm_reserv_t rv,
261f8a47341SAlan Cox 			    vm_pindex_t pindex);
262ec179322SAlan Cox static void		vm_reserv_populate(vm_reserv_t rv, int index);
26344aab2c3SAlan Cox static void		vm_reserv_reclaim(vm_reserv_t rv);
264f8a47341SAlan Cox 
265f8a47341SAlan Cox /*
266e0a63baaSAlan Cox  * Returns the current number of full reservations.
267e0a63baaSAlan Cox  *
268fe6d5344SMark Johnston  * Since the number of full reservations is computed without acquiring any
269fe6d5344SMark Johnston  * locks, the returned value is inexact.
270e0a63baaSAlan Cox  */
271e0a63baaSAlan Cox static int
272e0a63baaSAlan Cox sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS)
273e0a63baaSAlan Cox {
274e0a63baaSAlan Cox 	vm_paddr_t paddr;
275e0a63baaSAlan Cox 	struct vm_phys_seg *seg;
276e0a63baaSAlan Cox 	vm_reserv_t rv;
277e0a63baaSAlan Cox 	int fullpop, segind;
278e0a63baaSAlan Cox 
279e0a63baaSAlan Cox 	fullpop = 0;
280e0a63baaSAlan Cox 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
281e0a63baaSAlan Cox 		seg = &vm_phys_segs[segind];
282e0a63baaSAlan Cox 		paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
2837988971aSD Scott Phillips #ifdef VM_PHYSSEG_SPARSE
2847988971aSD Scott Phillips 		rv = seg->first_reserv + (paddr >> VM_LEVEL_0_SHIFT) -
2857988971aSD Scott Phillips 		    (seg->start >> VM_LEVEL_0_SHIFT);
2867988971aSD Scott Phillips #else
2877988971aSD Scott Phillips 		rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT];
2887988971aSD Scott Phillips #endif
2896b821a74SAleksandr Rybalko 		while (paddr + VM_LEVEL_0_SIZE > paddr && paddr +
2906b821a74SAleksandr Rybalko 		    VM_LEVEL_0_SIZE <= seg->end) {
291e0a63baaSAlan Cox 			fullpop += rv->popcnt == VM_LEVEL_0_NPAGES;
292e0a63baaSAlan Cox 			paddr += VM_LEVEL_0_SIZE;
2937988971aSD Scott Phillips 			rv++;
294e0a63baaSAlan Cox 		}
295e0a63baaSAlan Cox 	}
296e0a63baaSAlan Cox 	return (sysctl_handle_int(oidp, &fullpop, 0, req));
297e0a63baaSAlan Cox }
298e0a63baaSAlan Cox 
299e0a63baaSAlan Cox /*
3003453bca8SAlan Cox  * Describes the current state of the partially populated reservation queue.
301f8a47341SAlan Cox  */
302f8a47341SAlan Cox static int
303f8a47341SAlan Cox sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
304f8a47341SAlan Cox {
305f8a47341SAlan Cox 	struct sbuf sbuf;
306f8a47341SAlan Cox 	vm_reserv_t rv;
307ef435ae7SJeff Roberson 	int counter, error, domain, level, unused_pages;
308f8a47341SAlan Cox 
30900f0e671SMatthew D Fleming 	error = sysctl_wire_old_buffer(req, 0);
31000f0e671SMatthew D Fleming 	if (error != 0)
31100f0e671SMatthew D Fleming 		return (error);
3124e657159SMatthew D Fleming 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
313ef435ae7SJeff Roberson 	sbuf_printf(&sbuf, "\nDOMAIN    LEVEL     SIZE  NUMBER\n\n");
314ef435ae7SJeff Roberson 	for (domain = 0; domain < vm_ndomains; domain++) {
315f8a47341SAlan Cox 		for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) {
316f8a47341SAlan Cox 			counter = 0;
317f8a47341SAlan Cox 			unused_pages = 0;
3185c930c89SJeff Roberson 			vm_reserv_domain_lock(domain);
319fe6d5344SMark Johnston 			TAILQ_FOREACH(rv, &vm_rvd[domain].partpop, partpopq) {
320b378d296SMark Johnston 				if (rv == &vm_rvd[domain].marker)
321b378d296SMark Johnston 					continue;
322f8a47341SAlan Cox 				counter++;
323f8a47341SAlan Cox 				unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt;
324f8a47341SAlan Cox 			}
3255c930c89SJeff Roberson 			vm_reserv_domain_unlock(domain);
326ef435ae7SJeff Roberson 			sbuf_printf(&sbuf, "%6d, %7d, %6dK, %6d\n",
327ef435ae7SJeff Roberson 			    domain, level,
3282cf36c8fSAlan Cox 			    unused_pages * ((int)PAGE_SIZE / 1024), counter);
329f8a47341SAlan Cox 		}
330ef435ae7SJeff Roberson 	}
3314e657159SMatthew D Fleming 	error = sbuf_finish(&sbuf);
332f8a47341SAlan Cox 	sbuf_delete(&sbuf);
333f8a47341SAlan Cox 	return (error);
334f8a47341SAlan Cox }
335f8a47341SAlan Cox 
336f8a47341SAlan Cox /*
337e2068d0bSJeff Roberson  * Remove a reservation from the object's objq.
338e2068d0bSJeff Roberson  */
339e2068d0bSJeff Roberson static void
340e2068d0bSJeff Roberson vm_reserv_remove(vm_reserv_t rv)
341e2068d0bSJeff Roberson {
342e2068d0bSJeff Roberson 	vm_object_t object;
343e2068d0bSJeff Roberson 
3445c930c89SJeff Roberson 	vm_reserv_assert_locked(rv);
3455c930c89SJeff Roberson 	CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
3465c930c89SJeff Roberson 	    __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
347e2068d0bSJeff Roberson 	KASSERT(rv->object != NULL,
348e2068d0bSJeff Roberson 	    ("vm_reserv_remove: reserv %p is free", rv));
349e2068d0bSJeff Roberson 	KASSERT(!rv->inpartpopq,
350e2068d0bSJeff Roberson 	    ("vm_reserv_remove: reserv %p's inpartpopq is TRUE", rv));
351e2068d0bSJeff Roberson 	object = rv->object;
352e2068d0bSJeff Roberson 	vm_reserv_object_lock(object);
353e2068d0bSJeff Roberson 	LIST_REMOVE(rv, objq);
354e2068d0bSJeff Roberson 	rv->object = NULL;
355e2068d0bSJeff Roberson 	vm_reserv_object_unlock(object);
356e2068d0bSJeff Roberson }
357e2068d0bSJeff Roberson 
358e2068d0bSJeff Roberson /*
359e2068d0bSJeff Roberson  * Insert a new reservation into the object's objq.
360e2068d0bSJeff Roberson  */
361e2068d0bSJeff Roberson static void
362e2068d0bSJeff Roberson vm_reserv_insert(vm_reserv_t rv, vm_object_t object, vm_pindex_t pindex)
363e2068d0bSJeff Roberson {
364e2068d0bSJeff Roberson 
3655c930c89SJeff Roberson 	vm_reserv_assert_locked(rv);
3665c930c89SJeff Roberson 	CTR6(KTR_VM,
3675c930c89SJeff Roberson 	    "%s: rv %p(%p) object %p new %p popcnt %d",
3685c930c89SJeff Roberson 	    __FUNCTION__, rv, rv->pages, rv->object, object,
3695c930c89SJeff Roberson 	   rv->popcnt);
370e2068d0bSJeff Roberson 	KASSERT(rv->object == NULL,
371e2068d0bSJeff Roberson 	    ("vm_reserv_insert: reserv %p isn't free", rv));
372e2068d0bSJeff Roberson 	KASSERT(rv->popcnt == 0,
373e2068d0bSJeff Roberson 	    ("vm_reserv_insert: reserv %p's popcnt is corrupted", rv));
374e2068d0bSJeff Roberson 	KASSERT(!rv->inpartpopq,
375e2068d0bSJeff Roberson 	    ("vm_reserv_insert: reserv %p's inpartpopq is TRUE", rv));
376*84e2ae64SDoug Moore 	KASSERT(bit_ntest(rv->popmap, 0, VM_LEVEL_0_NPAGES - 1, 0),
377e2068d0bSJeff Roberson 	    ("vm_reserv_insert: reserv %p's popmap is corrupted", rv));
378e2068d0bSJeff Roberson 	vm_reserv_object_lock(object);
379e2068d0bSJeff Roberson 	rv->pindex = pindex;
380e2068d0bSJeff Roberson 	rv->object = object;
3812ef6727eSJeff Roberson 	rv->lasttick = ticks;
382e2068d0bSJeff Roberson 	LIST_INSERT_HEAD(&object->rvq, rv, objq);
383e2068d0bSJeff Roberson 	vm_reserv_object_unlock(object);
384e2068d0bSJeff Roberson }
385e2068d0bSJeff Roberson 
386e2068d0bSJeff Roberson /*
387f8a47341SAlan Cox  * Reduces the given reservation's population count.  If the population count
388f8a47341SAlan Cox  * becomes zero, the reservation is destroyed.  Additionally, moves the
3893453bca8SAlan Cox  * reservation to the tail of the partially populated reservation queue if the
390f8a47341SAlan Cox  * population count is non-zero.
391f8a47341SAlan Cox  */
392f8a47341SAlan Cox static void
393ec179322SAlan Cox vm_reserv_depopulate(vm_reserv_t rv, int index)
394f8a47341SAlan Cox {
3955c930c89SJeff Roberson 	struct vm_domain *vmd;
396f8a47341SAlan Cox 
3975c930c89SJeff Roberson 	vm_reserv_assert_locked(rv);
3985c930c89SJeff Roberson 	CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
3995c930c89SJeff Roberson 	    __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
400f8a47341SAlan Cox 	KASSERT(rv->object != NULL,
401f8a47341SAlan Cox 	    ("vm_reserv_depopulate: reserv %p is free", rv));
402*84e2ae64SDoug Moore 	KASSERT(bit_test(rv->popmap, index),
403a08c1515SAlan Cox 	    ("vm_reserv_depopulate: reserv %p's popmap[%d] is clear", rv,
404a08c1515SAlan Cox 	    index));
405f8a47341SAlan Cox 	KASSERT(rv->popcnt > 0,
406f8a47341SAlan Cox 	    ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv));
4072d3f4181SJeff Roberson 	KASSERT(rv->domain < vm_ndomains,
408ef435ae7SJeff Roberson 	    ("vm_reserv_depopulate: reserv %p's domain is corrupted %d",
409ef435ae7SJeff Roberson 	    rv, rv->domain));
4105c930c89SJeff Roberson 	if (rv->popcnt == VM_LEVEL_0_NPAGES) {
411dd05fa19SAlan Cox 		KASSERT(rv->pages->psind == 1,
412dd05fa19SAlan Cox 		    ("vm_reserv_depopulate: reserv %p is already demoted",
413dd05fa19SAlan Cox 		    rv));
414dd05fa19SAlan Cox 		rv->pages->psind = 0;
415f8a47341SAlan Cox 	}
416*84e2ae64SDoug Moore 	bit_clear(rv->popmap, index);
417f8a47341SAlan Cox 	rv->popcnt--;
4182ef6727eSJeff Roberson 	if ((unsigned)(ticks - rv->lasttick) >= PARTPOPSLOP ||
4192ef6727eSJeff Roberson 	    rv->popcnt == 0) {
4205c930c89SJeff Roberson 		vm_reserv_domain_lock(rv->domain);
4215c930c89SJeff Roberson 		if (rv->inpartpopq) {
422fe6d5344SMark Johnston 			TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq);
4235c930c89SJeff Roberson 			rv->inpartpopq = FALSE;
4245c930c89SJeff Roberson 		}
4255c930c89SJeff Roberson 		if (rv->popcnt != 0) {
426f8a47341SAlan Cox 			rv->inpartpopq = TRUE;
427fe6d5344SMark Johnston 			TAILQ_INSERT_TAIL(&vm_rvd[rv->domain].partpop, rv,
428fe6d5344SMark Johnston 			    partpopq);
429f8a47341SAlan Cox 		}
4305c930c89SJeff Roberson 		vm_reserv_domain_unlock(rv->domain);
4312ef6727eSJeff Roberson 		rv->lasttick = ticks;
4322ef6727eSJeff Roberson 	}
4335c930c89SJeff Roberson 	vmd = VM_DOMAIN(rv->domain);
4345c930c89SJeff Roberson 	if (rv->popcnt == 0) {
4355c930c89SJeff Roberson 		vm_reserv_remove(rv);
4365c930c89SJeff Roberson 		vm_domain_free_lock(vmd);
4375c930c89SJeff Roberson 		vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER);
4385c930c89SJeff Roberson 		vm_domain_free_unlock(vmd);
4395c930c89SJeff Roberson 		counter_u64_add(vm_reserv_freed, 1);
4405c930c89SJeff Roberson 	}
4415c930c89SJeff Roberson 	vm_domain_freecnt_inc(vmd, 1);
442f8a47341SAlan Cox }
443f8a47341SAlan Cox 
444f8a47341SAlan Cox /*
445f8a47341SAlan Cox  * Returns the reservation to which the given page might belong.
446f8a47341SAlan Cox  */
447f8a47341SAlan Cox static __inline vm_reserv_t
448f8a47341SAlan Cox vm_reserv_from_page(vm_page_t m)
449f8a47341SAlan Cox {
4507988971aSD Scott Phillips #ifdef VM_PHYSSEG_SPARSE
4517988971aSD Scott Phillips 	struct vm_phys_seg *seg;
452f8a47341SAlan Cox 
4537988971aSD Scott Phillips 	seg = &vm_phys_segs[m->segind];
4547988971aSD Scott Phillips 	return (seg->first_reserv + (VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT) -
4557988971aSD Scott Phillips 	    (seg->start >> VM_LEVEL_0_SHIFT));
4567988971aSD Scott Phillips #else
457f8a47341SAlan Cox 	return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]);
4587988971aSD Scott Phillips #endif
459f8a47341SAlan Cox }
460f8a47341SAlan Cox 
461f8a47341SAlan Cox /*
462e2068d0bSJeff Roberson  * Returns an existing reservation or NULL and initialized successor pointer.
463e2068d0bSJeff Roberson  */
464e2068d0bSJeff Roberson static vm_reserv_t
465e2068d0bSJeff Roberson vm_reserv_from_object(vm_object_t object, vm_pindex_t pindex,
466e2068d0bSJeff Roberson     vm_page_t mpred, vm_page_t *msuccp)
467e2068d0bSJeff Roberson {
468e2068d0bSJeff Roberson 	vm_reserv_t rv;
469e2068d0bSJeff Roberson 	vm_page_t msucc;
470e2068d0bSJeff Roberson 
471e2068d0bSJeff Roberson 	msucc = NULL;
472e2068d0bSJeff Roberson 	if (mpred != NULL) {
473e2068d0bSJeff Roberson 		KASSERT(mpred->object == object,
474e2068d0bSJeff Roberson 		    ("vm_reserv_from_object: object doesn't contain mpred"));
475e2068d0bSJeff Roberson 		KASSERT(mpred->pindex < pindex,
476e2068d0bSJeff Roberson 		    ("vm_reserv_from_object: mpred doesn't precede pindex"));
477e2068d0bSJeff Roberson 		rv = vm_reserv_from_page(mpred);
478e2068d0bSJeff Roberson 		if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
479e2068d0bSJeff Roberson 			goto found;
480e2068d0bSJeff Roberson 		msucc = TAILQ_NEXT(mpred, listq);
481e2068d0bSJeff Roberson 	} else
482e2068d0bSJeff Roberson 		msucc = TAILQ_FIRST(&object->memq);
483e2068d0bSJeff Roberson 	if (msucc != NULL) {
484e2068d0bSJeff Roberson 		KASSERT(msucc->pindex > pindex,
485e2068d0bSJeff Roberson 		    ("vm_reserv_from_object: msucc doesn't succeed pindex"));
486e2068d0bSJeff Roberson 		rv = vm_reserv_from_page(msucc);
487e2068d0bSJeff Roberson 		if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
488e2068d0bSJeff Roberson 			goto found;
489e2068d0bSJeff Roberson 	}
490e2068d0bSJeff Roberson 	rv = NULL;
491e2068d0bSJeff Roberson 
492e2068d0bSJeff Roberson found:
493e2068d0bSJeff Roberson 	*msuccp = msucc;
494e2068d0bSJeff Roberson 
495e2068d0bSJeff Roberson 	return (rv);
496e2068d0bSJeff Roberson }
497e2068d0bSJeff Roberson 
498e2068d0bSJeff Roberson /*
499f8a47341SAlan Cox  * Returns TRUE if the given reservation contains the given page index and
500f8a47341SAlan Cox  * FALSE otherwise.
501f8a47341SAlan Cox  */
502f8a47341SAlan Cox static __inline boolean_t
503f8a47341SAlan Cox vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex)
504f8a47341SAlan Cox {
505f8a47341SAlan Cox 
506f8a47341SAlan Cox 	return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0);
507f8a47341SAlan Cox }
508f8a47341SAlan Cox 
509f8a47341SAlan Cox /*
510f8a47341SAlan Cox  * Increases the given reservation's population count.  Moves the reservation
5113453bca8SAlan Cox  * to the tail of the partially populated reservation queue.
512f8a47341SAlan Cox  */
513f8a47341SAlan Cox static void
514ec179322SAlan Cox vm_reserv_populate(vm_reserv_t rv, int index)
515f8a47341SAlan Cox {
516f8a47341SAlan Cox 
5175c930c89SJeff Roberson 	vm_reserv_assert_locked(rv);
5185c930c89SJeff Roberson 	CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
5195c930c89SJeff Roberson 	    __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
520f8a47341SAlan Cox 	KASSERT(rv->object != NULL,
521f8a47341SAlan Cox 	    ("vm_reserv_populate: reserv %p is free", rv));
522*84e2ae64SDoug Moore 	KASSERT(!bit_test(rv->popmap, index),
523a08c1515SAlan Cox 	    ("vm_reserv_populate: reserv %p's popmap[%d] is set", rv,
524a08c1515SAlan Cox 	    index));
525f8a47341SAlan Cox 	KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES,
526f8a47341SAlan Cox 	    ("vm_reserv_populate: reserv %p is already full", rv));
527dd05fa19SAlan Cox 	KASSERT(rv->pages->psind == 0,
528dd05fa19SAlan Cox 	    ("vm_reserv_populate: reserv %p is already promoted", rv));
5292d3f4181SJeff Roberson 	KASSERT(rv->domain < vm_ndomains,
530ef435ae7SJeff Roberson 	    ("vm_reserv_populate: reserv %p's domain is corrupted %d",
531ef435ae7SJeff Roberson 	    rv, rv->domain));
532*84e2ae64SDoug Moore 	bit_set(rv->popmap, index);
5335c930c89SJeff Roberson 	rv->popcnt++;
5342ef6727eSJeff Roberson 	if ((unsigned)(ticks - rv->lasttick) < PARTPOPSLOP &&
5352ef6727eSJeff Roberson 	    rv->inpartpopq && rv->popcnt != VM_LEVEL_0_NPAGES)
5362ef6727eSJeff Roberson 		return;
5372ef6727eSJeff Roberson 	rv->lasttick = ticks;
5385c930c89SJeff Roberson 	vm_reserv_domain_lock(rv->domain);
539f8a47341SAlan Cox 	if (rv->inpartpopq) {
540fe6d5344SMark Johnston 		TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq);
541f8a47341SAlan Cox 		rv->inpartpopq = FALSE;
542f8a47341SAlan Cox 	}
543f8a47341SAlan Cox 	if (rv->popcnt < VM_LEVEL_0_NPAGES) {
544f8a47341SAlan Cox 		rv->inpartpopq = TRUE;
545fe6d5344SMark Johnston 		TAILQ_INSERT_TAIL(&vm_rvd[rv->domain].partpop, rv, partpopq);
5465c930c89SJeff Roberson 	} else {
5475c930c89SJeff Roberson 		KASSERT(rv->pages->psind == 0,
5485c930c89SJeff Roberson 		    ("vm_reserv_populate: reserv %p is already promoted",
5495c930c89SJeff Roberson 		    rv));
550dd05fa19SAlan Cox 		rv->pages->psind = 1;
551f8a47341SAlan Cox 	}
5525c930c89SJeff Roberson 	vm_reserv_domain_unlock(rv->domain);
5535c930c89SJeff Roberson }
554f8a47341SAlan Cox 
555f8a47341SAlan Cox /*
556e2068d0bSJeff Roberson  * Allocates a contiguous set of physical pages of the given size "npages"
5572d5039dbSAlan Cox  * from existing or newly created reservations.  All of the physical pages
558e2068d0bSJeff Roberson  * must be at or above the given physical address "low" and below the given
559e2068d0bSJeff Roberson  * physical address "high".  The given value "alignment" determines the
560e2068d0bSJeff Roberson  * alignment of the first physical page in the set.  If the given value
561e2068d0bSJeff Roberson  * "boundary" is non-zero, then the set of physical pages cannot cross any
562e2068d0bSJeff Roberson  * physical address boundary that is a multiple of that value.  Both
563e2068d0bSJeff Roberson  * "alignment" and "boundary" must be a power of two.
564e2068d0bSJeff Roberson  *
565e2068d0bSJeff Roberson  * The page "mpred" must immediately precede the offset "pindex" within the
566e2068d0bSJeff Roberson  * specified object.
567e2068d0bSJeff Roberson  *
5682d5039dbSAlan Cox  * The object must be locked.
569e2068d0bSJeff Roberson  */
570e2068d0bSJeff Roberson vm_page_t
5712d5039dbSAlan Cox vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, int domain,
5722d5039dbSAlan Cox     int req, vm_page_t mpred, u_long npages, vm_paddr_t low, vm_paddr_t high,
5732d5039dbSAlan Cox     u_long alignment, vm_paddr_t boundary)
574c68c3537SAlan Cox {
5755c930c89SJeff Roberson 	struct vm_domain *vmd;
576c68c3537SAlan Cox 	vm_paddr_t pa, size;
577920da7e4SAlan Cox 	vm_page_t m, m_ret, msucc;
578c68c3537SAlan Cox 	vm_pindex_t first, leftcap, rightcap;
579c68c3537SAlan Cox 	vm_reserv_t rv;
580c68c3537SAlan Cox 	u_long allocpages, maxpages, minpages;
581c68c3537SAlan Cox 	int i, index, n;
582c68c3537SAlan Cox 
58389f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
584c68c3537SAlan Cox 	KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
585c68c3537SAlan Cox 
586c68c3537SAlan Cox 	/*
587c68c3537SAlan Cox 	 * Is a reservation fundamentally impossible?
588c68c3537SAlan Cox 	 */
589c68c3537SAlan Cox 	if (pindex < VM_RESERV_INDEX(object, pindex) ||
590c68c3537SAlan Cox 	    pindex + npages > object->size)
591c68c3537SAlan Cox 		return (NULL);
592c68c3537SAlan Cox 
593c68c3537SAlan Cox 	/*
594c68c3537SAlan Cox 	 * All reservations of a particular size have the same alignment.
595c68c3537SAlan Cox 	 * Assuming that the first page is allocated from a reservation, the
596c68c3537SAlan Cox 	 * least significant bits of its physical address can be determined
597c68c3537SAlan Cox 	 * from its offset from the beginning of the reservation and the size
598c68c3537SAlan Cox 	 * of the reservation.
599c68c3537SAlan Cox 	 *
600c68c3537SAlan Cox 	 * Could the specified index within a reservation of the smallest
601c68c3537SAlan Cox 	 * possible size satisfy the alignment and boundary requirements?
602c68c3537SAlan Cox 	 */
603c68c3537SAlan Cox 	pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT;
604c68c3537SAlan Cox 	size = npages << PAGE_SHIFT;
605c606ab59SDoug Moore 	if (!vm_addr_ok(pa, size, alignment, boundary))
606c68c3537SAlan Cox 		return (NULL);
607c68c3537SAlan Cox 
608c68c3537SAlan Cox 	/*
6092d5039dbSAlan Cox 	 * Look for an existing reservation.
610c68c3537SAlan Cox 	 */
611e2068d0bSJeff Roberson 	rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
6122d5039dbSAlan Cox 	if (rv != NULL) {
6132d5039dbSAlan Cox 		KASSERT(object != kernel_object || rv->domain == domain,
6142d5039dbSAlan Cox 		    ("vm_reserv_alloc_contig: domain mismatch"));
6152d5039dbSAlan Cox 		index = VM_RESERV_INDEX(object, pindex);
6162d5039dbSAlan Cox 		/* Does the allocation fit within the reservation? */
6172d5039dbSAlan Cox 		if (index + npages > VM_LEVEL_0_NPAGES)
618e2068d0bSJeff Roberson 			return (NULL);
6192d5039dbSAlan Cox 		domain = rv->domain;
6202d5039dbSAlan Cox 		vmd = VM_DOMAIN(domain);
6212d5039dbSAlan Cox 		vm_reserv_lock(rv);
6222d5039dbSAlan Cox 		/* Handle reclaim race. */
6232d5039dbSAlan Cox 		if (rv->object != object)
6242d5039dbSAlan Cox 			goto out;
6252d5039dbSAlan Cox 		m = &rv->pages[index];
6262d5039dbSAlan Cox 		pa = VM_PAGE_TO_PHYS(m);
6272d5039dbSAlan Cox 		if (pa < low || pa + size > high ||
628c606ab59SDoug Moore 		    !vm_addr_ok(pa, size, alignment, boundary))
6292d5039dbSAlan Cox 			goto out;
6302d5039dbSAlan Cox 		/* Handle vm_page_rename(m, new_object, ...). */
631*84e2ae64SDoug Moore 		if (!bit_ntest(rv->popmap, index, index + npages - 1, 0))
6322d5039dbSAlan Cox 			goto out;
6332d5039dbSAlan Cox 		if (!vm_domain_allocate(vmd, req, npages))
6342d5039dbSAlan Cox 			goto out;
6352d5039dbSAlan Cox 		for (i = 0; i < npages; i++)
6362d5039dbSAlan Cox 			vm_reserv_populate(rv, index + i);
6372d5039dbSAlan Cox 		vm_reserv_unlock(rv);
6382d5039dbSAlan Cox 		return (m);
6392d5039dbSAlan Cox out:
6402d5039dbSAlan Cox 		vm_reserv_unlock(rv);
6412d5039dbSAlan Cox 		return (NULL);
6422d5039dbSAlan Cox 	}
643c68c3537SAlan Cox 
644c68c3537SAlan Cox 	/*
645c68c3537SAlan Cox 	 * Could at least one reservation fit between the first index to the
64664f096eeSAlan Cox 	 * left that can be used ("leftcap") and the first index to the right
64764f096eeSAlan Cox 	 * that cannot be used ("rightcap")?
648e2068d0bSJeff Roberson 	 *
649e2068d0bSJeff Roberson 	 * We must synchronize with the reserv object lock to protect the
650e2068d0bSJeff Roberson 	 * pindex/object of the resulting reservations against rename while
651e2068d0bSJeff Roberson 	 * we are inspecting.
652c68c3537SAlan Cox 	 */
653c68c3537SAlan Cox 	first = pindex - VM_RESERV_INDEX(object, pindex);
654e2068d0bSJeff Roberson 	minpages = VM_RESERV_INDEX(object, pindex) + npages;
655e2068d0bSJeff Roberson 	maxpages = roundup2(minpages, VM_LEVEL_0_NPAGES);
656e2068d0bSJeff Roberson 	allocpages = maxpages;
657e2068d0bSJeff Roberson 	vm_reserv_object_lock(object);
658c68c3537SAlan Cox 	if (mpred != NULL) {
659c68c3537SAlan Cox 		if ((rv = vm_reserv_from_page(mpred))->object != object)
660c68c3537SAlan Cox 			leftcap = mpred->pindex + 1;
661c68c3537SAlan Cox 		else
662c68c3537SAlan Cox 			leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
663e2068d0bSJeff Roberson 		if (leftcap > first) {
664e2068d0bSJeff Roberson 			vm_reserv_object_unlock(object);
665c68c3537SAlan Cox 			return (NULL);
666c68c3537SAlan Cox 		}
667e2068d0bSJeff Roberson 	}
668c68c3537SAlan Cox 	if (msucc != NULL) {
669c68c3537SAlan Cox 		if ((rv = vm_reserv_from_page(msucc))->object != object)
670c68c3537SAlan Cox 			rightcap = msucc->pindex;
671c68c3537SAlan Cox 		else
672c68c3537SAlan Cox 			rightcap = rv->pindex;
673c68c3537SAlan Cox 		if (first + maxpages > rightcap) {
674e2068d0bSJeff Roberson 			if (maxpages == VM_LEVEL_0_NPAGES) {
675e2068d0bSJeff Roberson 				vm_reserv_object_unlock(object);
676c68c3537SAlan Cox 				return (NULL);
677e2068d0bSJeff Roberson 			}
67864f096eeSAlan Cox 
67964f096eeSAlan Cox 			/*
68064f096eeSAlan Cox 			 * At least one reservation will fit between "leftcap"
68164f096eeSAlan Cox 			 * and "rightcap".  However, a reservation for the
68264f096eeSAlan Cox 			 * last of the requested pages will not fit.  Reduce
68364f096eeSAlan Cox 			 * the size of the upcoming allocation accordingly.
68464f096eeSAlan Cox 			 */
685c68c3537SAlan Cox 			allocpages = minpages;
686c68c3537SAlan Cox 		}
687c68c3537SAlan Cox 	}
688e2068d0bSJeff Roberson 	vm_reserv_object_unlock(object);
689c68c3537SAlan Cox 
690c68c3537SAlan Cox 	/*
691c68c3537SAlan Cox 	 * Would the last new reservation extend past the end of the object?
69263967687SJeff Roberson 	 *
69363967687SJeff Roberson 	 * If the object is unlikely to grow don't allocate a reservation for
69463967687SJeff Roberson 	 * the tail.
695c68c3537SAlan Cox 	 */
69663967687SJeff Roberson 	if ((object->flags & OBJ_ANON) == 0 &&
69763967687SJeff Roberson 	    first + maxpages > object->size) {
698c68c3537SAlan Cox 		if (maxpages == VM_LEVEL_0_NPAGES)
699c68c3537SAlan Cox 			return (NULL);
700c68c3537SAlan Cox 		allocpages = minpages;
701c68c3537SAlan Cox 	}
702c68c3537SAlan Cox 
703c68c3537SAlan Cox 	/*
70464f096eeSAlan Cox 	 * Allocate the physical pages.  The alignment and boundary specified
70564f096eeSAlan Cox 	 * for this allocation may be different from the alignment and
70664f096eeSAlan Cox 	 * boundary specified for the requested pages.  For instance, the
70764f096eeSAlan Cox 	 * specified index may not be the first page within the first new
70864f096eeSAlan Cox 	 * reservation.
709c68c3537SAlan Cox 	 */
7105c930c89SJeff Roberson 	m = NULL;
7115c930c89SJeff Roberson 	vmd = VM_DOMAIN(domain);
7125c930c89SJeff Roberson 	if (vm_domain_allocate(vmd, req, npages)) {
7135c930c89SJeff Roberson 		vm_domain_free_lock(vmd);
7145c930c89SJeff Roberson 		m = vm_phys_alloc_contig(domain, allocpages, low, high,
7155c930c89SJeff Roberson 		    ulmax(alignment, VM_LEVEL_0_SIZE),
7165c930c89SJeff Roberson 		    boundary > VM_LEVEL_0_SIZE ? boundary : 0);
7175c930c89SJeff Roberson 		vm_domain_free_unlock(vmd);
7185c930c89SJeff Roberson 		if (m == NULL) {
7195c930c89SJeff Roberson 			vm_domain_freecnt_inc(vmd, npages);
7205c930c89SJeff Roberson 			return (NULL);
7215c930c89SJeff Roberson 		}
7225c930c89SJeff Roberson 	} else
723c68c3537SAlan Cox 		return (NULL);
724431fb8abSMark Johnston 	KASSERT(vm_page_domain(m) == domain,
7257a469c8eSJeff Roberson 	    ("vm_reserv_alloc_contig: Page domain does not match requested."));
72664f096eeSAlan Cox 
72764f096eeSAlan Cox 	/*
72864f096eeSAlan Cox 	 * The allocated physical pages always begin at a reservation
72964f096eeSAlan Cox 	 * boundary, but they do not always end at a reservation boundary.
73064f096eeSAlan Cox 	 * Initialize every reservation that is completely covered by the
73164f096eeSAlan Cox 	 * allocated physical pages.
73264f096eeSAlan Cox 	 */
733c68c3537SAlan Cox 	m_ret = NULL;
734c68c3537SAlan Cox 	index = VM_RESERV_INDEX(object, pindex);
735c68c3537SAlan Cox 	do {
736c68c3537SAlan Cox 		rv = vm_reserv_from_page(m);
737c68c3537SAlan Cox 		KASSERT(rv->pages == m,
738c68c3537SAlan Cox 		    ("vm_reserv_alloc_contig: reserv %p's pages is corrupted",
739c68c3537SAlan Cox 		    rv));
7405c930c89SJeff Roberson 		vm_reserv_lock(rv);
741e2068d0bSJeff Roberson 		vm_reserv_insert(rv, object, first);
742c68c3537SAlan Cox 		n = ulmin(VM_LEVEL_0_NPAGES - index, npages);
743c68c3537SAlan Cox 		for (i = 0; i < n; i++)
744ec179322SAlan Cox 			vm_reserv_populate(rv, index + i);
745c68c3537SAlan Cox 		npages -= n;
746c68c3537SAlan Cox 		if (m_ret == NULL) {
747c68c3537SAlan Cox 			m_ret = &rv->pages[index];
748c68c3537SAlan Cox 			index = 0;
749c68c3537SAlan Cox 		}
7505c930c89SJeff Roberson 		vm_reserv_unlock(rv);
751c68c3537SAlan Cox 		m += VM_LEVEL_0_NPAGES;
752c68c3537SAlan Cox 		first += VM_LEVEL_0_NPAGES;
753c68c3537SAlan Cox 		allocpages -= VM_LEVEL_0_NPAGES;
75464f096eeSAlan Cox 	} while (allocpages >= VM_LEVEL_0_NPAGES);
755c68c3537SAlan Cox 	return (m_ret);
756e2068d0bSJeff Roberson }
757c68c3537SAlan Cox 
758c68c3537SAlan Cox /*
7592d5039dbSAlan Cox  * Allocate a physical page from an existing or newly created reservation.
760e2068d0bSJeff Roberson  *
761e2068d0bSJeff Roberson  * The page "mpred" must immediately precede the offset "pindex" within the
762e2068d0bSJeff Roberson  * specified object.
763e2068d0bSJeff Roberson  *
764e2068d0bSJeff Roberson  * The object must be locked.
765c68c3537SAlan Cox  */
766e2068d0bSJeff Roberson vm_page_t
7672d5039dbSAlan Cox vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, int domain,
7682d5039dbSAlan Cox     int req, vm_page_t mpred)
769e2068d0bSJeff Roberson {
770e2068d0bSJeff Roberson 	struct vm_domain *vmd;
771e2068d0bSJeff Roberson 	vm_page_t m, msucc;
7722d5039dbSAlan Cox 	vm_pindex_t first, leftcap, rightcap;
773e2068d0bSJeff Roberson 	vm_reserv_t rv;
77430fbfddaSJeff Roberson 	int index;
775e2068d0bSJeff Roberson 
776e2068d0bSJeff Roberson 	VM_OBJECT_ASSERT_WLOCKED(object);
777e2068d0bSJeff Roberson 
778e2068d0bSJeff Roberson 	/*
7792d5039dbSAlan Cox 	 * Is a reservation fundamentally impossible?
780e2068d0bSJeff Roberson 	 */
781e2068d0bSJeff Roberson 	if (pindex < VM_RESERV_INDEX(object, pindex) ||
7822d5039dbSAlan Cox 	    pindex >= object->size)
783e2068d0bSJeff Roberson 		return (NULL);
784e2068d0bSJeff Roberson 
785e2068d0bSJeff Roberson 	/*
786e2068d0bSJeff Roberson 	 * Look for an existing reservation.
787e2068d0bSJeff Roberson 	 */
788e2068d0bSJeff Roberson 	rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
7892d5039dbSAlan Cox 	if (rv != NULL) {
790e2068d0bSJeff Roberson 		KASSERT(object != kernel_object || rv->domain == domain,
7912d5039dbSAlan Cox 		    ("vm_reserv_alloc_page: domain mismatch"));
792e2068d0bSJeff Roberson 		domain = rv->domain;
793e2068d0bSJeff Roberson 		vmd = VM_DOMAIN(domain);
794c68c3537SAlan Cox 		index = VM_RESERV_INDEX(object, pindex);
795c68c3537SAlan Cox 		m = &rv->pages[index];
7965c930c89SJeff Roberson 		vm_reserv_lock(rv);
797e2068d0bSJeff Roberson 		/* Handle reclaim race. */
7985c930c89SJeff Roberson 		if (rv->object != object ||
799c68c3537SAlan Cox 		    /* Handle vm_page_rename(m, new_object, ...). */
800*84e2ae64SDoug Moore 		    bit_test(rv->popmap, index)) {
801e2068d0bSJeff Roberson 			m = NULL;
8025c930c89SJeff Roberson 			goto out;
80330fbfddaSJeff Roberson 		}
8045c930c89SJeff Roberson 		if (vm_domain_allocate(vmd, req, 1) == 0)
8055c930c89SJeff Roberson 			m = NULL;
8065c930c89SJeff Roberson 		else
8075c930c89SJeff Roberson 			vm_reserv_populate(rv, index);
8085c930c89SJeff Roberson out:
8095c930c89SJeff Roberson 		vm_reserv_unlock(rv);
810c68c3537SAlan Cox 		return (m);
811c68c3537SAlan Cox 	}
812c68c3537SAlan Cox 
813c68c3537SAlan Cox 	/*
814c68c3537SAlan Cox 	 * Could a reservation fit between the first index to the left that
815c68c3537SAlan Cox 	 * can be used and the first index to the right that cannot be used?
816e2068d0bSJeff Roberson 	 *
817e2068d0bSJeff Roberson 	 * We must synchronize with the reserv object lock to protect the
818e2068d0bSJeff Roberson 	 * pindex/object of the resulting reservations against rename while
819e2068d0bSJeff Roberson 	 * we are inspecting.
820f8a47341SAlan Cox 	 */
821c68c3537SAlan Cox 	first = pindex - VM_RESERV_INDEX(object, pindex);
822e2068d0bSJeff Roberson 	vm_reserv_object_lock(object);
823c68c3537SAlan Cox 	if (mpred != NULL) {
824c68c3537SAlan Cox 		if ((rv = vm_reserv_from_page(mpred))->object != object)
825f8a47341SAlan Cox 			leftcap = mpred->pindex + 1;
826f8a47341SAlan Cox 		else
827f8a47341SAlan Cox 			leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
828e2068d0bSJeff Roberson 		if (leftcap > first) {
829e2068d0bSJeff Roberson 			vm_reserv_object_unlock(object);
830c68c3537SAlan Cox 			return (NULL);
831c68c3537SAlan Cox 		}
832e2068d0bSJeff Roberson 	}
833c68c3537SAlan Cox 	if (msucc != NULL) {
834c68c3537SAlan Cox 		if ((rv = vm_reserv_from_page(msucc))->object != object)
835f8a47341SAlan Cox 			rightcap = msucc->pindex;
836f8a47341SAlan Cox 		else
837f8a47341SAlan Cox 			rightcap = rv->pindex;
838e2068d0bSJeff Roberson 		if (first + VM_LEVEL_0_NPAGES > rightcap) {
839e2068d0bSJeff Roberson 			vm_reserv_object_unlock(object);
840f8a47341SAlan Cox 			return (NULL);
841c68c3537SAlan Cox 		}
842e2068d0bSJeff Roberson 	}
843e2068d0bSJeff Roberson 	vm_reserv_object_unlock(object);
844f8a47341SAlan Cox 
845f8a47341SAlan Cox 	/*
84663967687SJeff Roberson 	 * Would the last new reservation extend past the end of the object?
84763967687SJeff Roberson 	 *
84863967687SJeff Roberson 	 * If the object is unlikely to grow don't allocate a reservation for
84963967687SJeff Roberson 	 * the tail.
850f8a47341SAlan Cox 	 */
85163967687SJeff Roberson 	if ((object->flags & OBJ_ANON) == 0 &&
85263967687SJeff Roberson 	    first + VM_LEVEL_0_NPAGES > object->size)
853f8a47341SAlan Cox 		return (NULL);
854f8a47341SAlan Cox 
855f8a47341SAlan Cox 	/*
856c68c3537SAlan Cox 	 * Allocate and populate the new reservation.
857f8a47341SAlan Cox 	 */
8585c930c89SJeff Roberson 	m = NULL;
8595c930c89SJeff Roberson 	vmd = VM_DOMAIN(domain);
8605c930c89SJeff Roberson 	if (vm_domain_allocate(vmd, req, 1)) {
8615c930c89SJeff Roberson 		vm_domain_free_lock(vmd);
8625c930c89SJeff Roberson 		m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT,
8635c930c89SJeff Roberson 		    VM_LEVEL_0_ORDER);
8645c930c89SJeff Roberson 		vm_domain_free_unlock(vmd);
8655c930c89SJeff Roberson 		if (m == NULL) {
8665c930c89SJeff Roberson 			vm_domain_freecnt_inc(vmd, 1);
8675c930c89SJeff Roberson 			return (NULL);
8685c930c89SJeff Roberson 		}
8695c930c89SJeff Roberson 	} else
870c68c3537SAlan Cox 		return (NULL);
871f8a47341SAlan Cox 	rv = vm_reserv_from_page(m);
8725c930c89SJeff Roberson 	vm_reserv_lock(rv);
873f8a47341SAlan Cox 	KASSERT(rv->pages == m,
874c68c3537SAlan Cox 	    ("vm_reserv_alloc_page: reserv %p's pages is corrupted", rv));
875e2068d0bSJeff Roberson 	vm_reserv_insert(rv, object, first);
876ec179322SAlan Cox 	index = VM_RESERV_INDEX(object, pindex);
877ec179322SAlan Cox 	vm_reserv_populate(rv, index);
8785c930c89SJeff Roberson 	vm_reserv_unlock(rv);
8795c930c89SJeff Roberson 
880ec179322SAlan Cox 	return (&rv->pages[index]);
881f8a47341SAlan Cox }
882f8a47341SAlan Cox 
883f8a47341SAlan Cox /*
884ada27a3bSKonstantin Belousov  * Breaks the given reservation.  All free pages in the reservation
885ada27a3bSKonstantin Belousov  * are returned to the physical memory allocator.  The reservation's
886ada27a3bSKonstantin Belousov  * population count and map are reset to their initial state.
887ec179322SAlan Cox  *
8883453bca8SAlan Cox  * The given reservation must not be in the partially populated reservation
889fe6d5344SMark Johnston  * queue.
890ec179322SAlan Cox  */
891ec179322SAlan Cox static void
892ada27a3bSKonstantin Belousov vm_reserv_break(vm_reserv_t rv)
893ec179322SAlan Cox {
894*84e2ae64SDoug Moore 	int hi, lo, pos;
895ec179322SAlan Cox 
8965c930c89SJeff Roberson 	vm_reserv_assert_locked(rv);
8975c930c89SJeff Roberson 	CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
8985c930c89SJeff Roberson 	    __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
899e2068d0bSJeff Roberson 	vm_reserv_remove(rv);
900c4be9169SKonstantin Belousov 	rv->pages->psind = 0;
901e67a5068SDoug Moore 	hi = lo = -1;
902*84e2ae64SDoug Moore 	pos = 0;
903*84e2ae64SDoug Moore 	for (;;) {
904*84e2ae64SDoug Moore 		bit_ff_at(rv->popmap, pos, VM_LEVEL_0_NPAGES, lo != hi, &pos);
905*84e2ae64SDoug Moore 		if (lo == hi) {
906*84e2ae64SDoug Moore 			if (pos == -1)
907*84e2ae64SDoug Moore 				break;
908*84e2ae64SDoug Moore 			lo = pos;
909*84e2ae64SDoug Moore 			continue;
910ec179322SAlan Cox 		}
911*84e2ae64SDoug Moore 		if (pos == -1)
912*84e2ae64SDoug Moore 			pos = VM_LEVEL_0_NPAGES;
913*84e2ae64SDoug Moore 		hi = pos;
9145c930c89SJeff Roberson 		vm_domain_free_lock(VM_DOMAIN(rv->domain));
915b8590daeSDoug Moore 		vm_phys_enqueue_contig(&rv->pages[lo], hi - lo);
9165c930c89SJeff Roberson 		vm_domain_free_unlock(VM_DOMAIN(rv->domain));
917e67a5068SDoug Moore 		lo = hi;
918e67a5068SDoug Moore 	}
919*84e2ae64SDoug Moore 	bit_nclear(rv->popmap, 0, VM_LEVEL_0_NPAGES - 1);
920e67a5068SDoug Moore 	rv->popcnt = 0;
9215c930c89SJeff Roberson 	counter_u64_add(vm_reserv_broken, 1);
922ec179322SAlan Cox }
923ec179322SAlan Cox 
924ec179322SAlan Cox /*
925f8a47341SAlan Cox  * Breaks all reservations belonging to the given object.
926f8a47341SAlan Cox  */
927f8a47341SAlan Cox void
928f8a47341SAlan Cox vm_reserv_break_all(vm_object_t object)
929f8a47341SAlan Cox {
930f8a47341SAlan Cox 	vm_reserv_t rv;
931f8a47341SAlan Cox 
932e2068d0bSJeff Roberson 	/*
933e2068d0bSJeff Roberson 	 * This access of object->rvq is unsynchronized so that the
934e2068d0bSJeff Roberson 	 * object rvq lock can nest after the domain_free lock.  We
935e2068d0bSJeff Roberson 	 * must check for races in the results.  However, the object
936e2068d0bSJeff Roberson 	 * lock prevents new additions, so we are guaranteed that when
937e2068d0bSJeff Roberson 	 * it returns NULL the object is properly empty.
938e2068d0bSJeff Roberson 	 */
939f8a47341SAlan Cox 	while ((rv = LIST_FIRST(&object->rvq)) != NULL) {
9405c930c89SJeff Roberson 		vm_reserv_lock(rv);
941e2068d0bSJeff Roberson 		/* Reclaim race. */
9425c930c89SJeff Roberson 		if (rv->object != object) {
9435c930c89SJeff Roberson 			vm_reserv_unlock(rv);
944e2068d0bSJeff Roberson 			continue;
9455c930c89SJeff Roberson 		}
9465c930c89SJeff Roberson 		vm_reserv_domain_lock(rv->domain);
947f8a47341SAlan Cox 		if (rv->inpartpopq) {
948fe6d5344SMark Johnston 			TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq);
949f8a47341SAlan Cox 			rv->inpartpopq = FALSE;
950f8a47341SAlan Cox 		}
9515c930c89SJeff Roberson 		vm_reserv_domain_unlock(rv->domain);
952ada27a3bSKonstantin Belousov 		vm_reserv_break(rv);
9535c930c89SJeff Roberson 		vm_reserv_unlock(rv);
954f8a47341SAlan Cox 	}
955f8a47341SAlan Cox }
956f8a47341SAlan Cox 
957f8a47341SAlan Cox /*
958f8a47341SAlan Cox  * Frees the given page if it belongs to a reservation.  Returns TRUE if the
959f8a47341SAlan Cox  * page is freed and FALSE otherwise.
960f8a47341SAlan Cox  */
961f8a47341SAlan Cox boolean_t
962f8a47341SAlan Cox vm_reserv_free_page(vm_page_t m)
963f8a47341SAlan Cox {
964f8a47341SAlan Cox 	vm_reserv_t rv;
9655c930c89SJeff Roberson 	boolean_t ret;
966f8a47341SAlan Cox 
967f8a47341SAlan Cox 	rv = vm_reserv_from_page(m);
968908e3da1SAlan Cox 	if (rv->object == NULL)
969908e3da1SAlan Cox 		return (FALSE);
9705c930c89SJeff Roberson 	vm_reserv_lock(rv);
9715c930c89SJeff Roberson 	/* Re-validate after lock. */
9725c930c89SJeff Roberson 	if (rv->object != NULL) {
973ec179322SAlan Cox 		vm_reserv_depopulate(rv, m - rv->pages);
9745c930c89SJeff Roberson 		ret = TRUE;
9755c930c89SJeff Roberson 	} else
9765c930c89SJeff Roberson 		ret = FALSE;
9775c930c89SJeff Roberson 	vm_reserv_unlock(rv);
9785c930c89SJeff Roberson 
9795c930c89SJeff Roberson 	return (ret);
980f8a47341SAlan Cox }
981f8a47341SAlan Cox 
982f8a47341SAlan Cox /*
983f8a47341SAlan Cox  * Initializes the reservation management system.  Specifically, initializes
984f8a47341SAlan Cox  * the reservation array.
985f8a47341SAlan Cox  *
986f8a47341SAlan Cox  * Requires that vm_page_array and first_page are initialized!
987f8a47341SAlan Cox  */
988f8a47341SAlan Cox void
989f8a47341SAlan Cox vm_reserv_init(void)
990f8a47341SAlan Cox {
991f8a47341SAlan Cox 	vm_paddr_t paddr;
99209e5f3c4SAlan Cox 	struct vm_phys_seg *seg;
9935c930c89SJeff Roberson 	struct vm_reserv *rv;
994b378d296SMark Johnston 	struct vm_reserv_domain *rvd;
9957988971aSD Scott Phillips #ifdef VM_PHYSSEG_SPARSE
9967988971aSD Scott Phillips 	vm_pindex_t used;
9977988971aSD Scott Phillips #endif
998*84e2ae64SDoug Moore 	int i, segind;
999f8a47341SAlan Cox 
1000f8a47341SAlan Cox 	/*
1001f8a47341SAlan Cox 	 * Initialize the reservation array.  Specifically, initialize the
1002f8a47341SAlan Cox 	 * "pages" field for every element that has an underlying superpage.
1003f8a47341SAlan Cox 	 */
10047988971aSD Scott Phillips #ifdef VM_PHYSSEG_SPARSE
10057988971aSD Scott Phillips 	used = 0;
10067988971aSD Scott Phillips #endif
100709e5f3c4SAlan Cox 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
100809e5f3c4SAlan Cox 		seg = &vm_phys_segs[segind];
10097988971aSD Scott Phillips #ifdef VM_PHYSSEG_SPARSE
10107988971aSD Scott Phillips 		seg->first_reserv = &vm_reserv_array[used];
10117988971aSD Scott Phillips 		used += howmany(seg->end, VM_LEVEL_0_SIZE) -
10127988971aSD Scott Phillips 		    seg->start / VM_LEVEL_0_SIZE;
10137988971aSD Scott Phillips #else
10147988971aSD Scott Phillips 		seg->first_reserv =
10157988971aSD Scott Phillips 		    &vm_reserv_array[seg->start >> VM_LEVEL_0_SHIFT];
10167988971aSD Scott Phillips #endif
101709e5f3c4SAlan Cox 		paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
10187988971aSD Scott Phillips 		rv = seg->first_reserv + (paddr >> VM_LEVEL_0_SHIFT) -
10197988971aSD Scott Phillips 		    (seg->start >> VM_LEVEL_0_SHIFT);
10206b821a74SAleksandr Rybalko 		while (paddr + VM_LEVEL_0_SIZE > paddr && paddr +
10216b821a74SAleksandr Rybalko 		    VM_LEVEL_0_SIZE <= seg->end) {
10225c930c89SJeff Roberson 			rv->pages = PHYS_TO_VM_PAGE(paddr);
10235c930c89SJeff Roberson 			rv->domain = seg->domain;
10245c930c89SJeff Roberson 			mtx_init(&rv->lock, "vm reserv", NULL, MTX_DEF);
1025f8a47341SAlan Cox 			paddr += VM_LEVEL_0_SIZE;
10267988971aSD Scott Phillips 			rv++;
1027f8a47341SAlan Cox 		}
1028f8a47341SAlan Cox 	}
10295c930c89SJeff Roberson 	for (i = 0; i < MAXMEMDOM; i++) {
1030b378d296SMark Johnston 		rvd = &vm_rvd[i];
1031b378d296SMark Johnston 		mtx_init(&rvd->lock, "vm reserv domain", NULL, MTX_DEF);
1032b378d296SMark Johnston 		TAILQ_INIT(&rvd->partpop);
1033b378d296SMark Johnston 		mtx_init(&rvd->marker.lock, "vm reserv marker", NULL, MTX_DEF);
1034b378d296SMark Johnston 
1035b378d296SMark Johnston 		/*
1036b378d296SMark Johnston 		 * Fully populated reservations should never be present in the
1037b378d296SMark Johnston 		 * partially populated reservation queues.
1038b378d296SMark Johnston 		 */
1039b378d296SMark Johnston 		rvd->marker.popcnt = VM_LEVEL_0_NPAGES;
1040*84e2ae64SDoug Moore 		bit_nset(rvd->marker.popmap, 0, VM_LEVEL_0_NPAGES - 1);
1041f8a47341SAlan Cox 	}
1042f8a47341SAlan Cox 
10435c930c89SJeff Roberson 	for (i = 0; i < VM_RESERV_OBJ_LOCK_COUNT; i++)
10445c930c89SJeff Roberson 		mtx_init(&vm_reserv_object_mtx[i], "resv obj lock", NULL,
10455c930c89SJeff Roberson 		    MTX_DEF);
10465c930c89SJeff Roberson }
10475c930c89SJeff Roberson 
1048f8a47341SAlan Cox /*
1049c869e672SAlan Cox  * Returns true if the given page belongs to a reservation and that page is
1050c869e672SAlan Cox  * free.  Otherwise, returns false.
1051c869e672SAlan Cox  */
1052c869e672SAlan Cox bool
1053c869e672SAlan Cox vm_reserv_is_page_free(vm_page_t m)
1054c869e672SAlan Cox {
1055c869e672SAlan Cox 	vm_reserv_t rv;
1056c869e672SAlan Cox 
1057c869e672SAlan Cox 	rv = vm_reserv_from_page(m);
1058c869e672SAlan Cox 	if (rv->object == NULL)
1059c869e672SAlan Cox 		return (false);
1060*84e2ae64SDoug Moore 	return (!bit_test(rv->popmap, m - rv->pages));
1061c869e672SAlan Cox }
1062c869e672SAlan Cox 
1063c869e672SAlan Cox /*
1064c869e672SAlan Cox  * If the given page belongs to a reservation, returns the level of that
1065c869e672SAlan Cox  * reservation.  Otherwise, returns -1.
1066c869e672SAlan Cox  */
1067c869e672SAlan Cox int
1068c869e672SAlan Cox vm_reserv_level(vm_page_t m)
1069c869e672SAlan Cox {
1070c869e672SAlan Cox 	vm_reserv_t rv;
1071c869e672SAlan Cox 
1072c869e672SAlan Cox 	rv = vm_reserv_from_page(m);
1073c869e672SAlan Cox 	return (rv->object != NULL ? 0 : -1);
1074c869e672SAlan Cox }
1075c869e672SAlan Cox 
1076c869e672SAlan Cox /*
10773453bca8SAlan Cox  * Returns a reservation level if the given page belongs to a fully populated
1078f8a47341SAlan Cox  * reservation and -1 otherwise.
1079f8a47341SAlan Cox  */
1080f8a47341SAlan Cox int
1081f8a47341SAlan Cox vm_reserv_level_iffullpop(vm_page_t m)
1082f8a47341SAlan Cox {
1083f8a47341SAlan Cox 	vm_reserv_t rv;
1084f8a47341SAlan Cox 
1085f8a47341SAlan Cox 	rv = vm_reserv_from_page(m);
1086f8a47341SAlan Cox 	return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1);
1087f8a47341SAlan Cox }
1088f8a47341SAlan Cox 
1089f8a47341SAlan Cox /*
1090b378d296SMark Johnston  * Remove a partially populated reservation from the queue.
1091b378d296SMark Johnston  */
1092b378d296SMark Johnston static void
1093b378d296SMark Johnston vm_reserv_dequeue(vm_reserv_t rv)
1094b378d296SMark Johnston {
1095b378d296SMark Johnston 
1096b378d296SMark Johnston 	vm_reserv_domain_assert_locked(rv->domain);
1097b378d296SMark Johnston 	vm_reserv_assert_locked(rv);
1098b378d296SMark Johnston 	CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
1099b378d296SMark Johnston 	    __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
1100b378d296SMark Johnston 	KASSERT(rv->inpartpopq,
1101b378d296SMark Johnston 	    ("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv));
1102b378d296SMark Johnston 
1103b378d296SMark Johnston 	TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq);
1104b378d296SMark Johnston 	rv->inpartpopq = FALSE;
1105b378d296SMark Johnston }
1106b378d296SMark Johnston 
1107b378d296SMark Johnston /*
11083453bca8SAlan Cox  * Breaks the given partially populated reservation, releasing its free pages
11093453bca8SAlan Cox  * to the physical memory allocator.
1110f8a47341SAlan Cox  */
111144aab2c3SAlan Cox static void
111244aab2c3SAlan Cox vm_reserv_reclaim(vm_reserv_t rv)
1113f8a47341SAlan Cox {
1114f8a47341SAlan Cox 
11155c930c89SJeff Roberson 	vm_reserv_assert_locked(rv);
11165c930c89SJeff Roberson 	CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
11175c930c89SJeff Roberson 	    __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
1118b378d296SMark Johnston 	if (rv->inpartpopq) {
11195c930c89SJeff Roberson 		vm_reserv_domain_lock(rv->domain);
1120b378d296SMark Johnston 		vm_reserv_dequeue(rv);
11215c930c89SJeff Roberson 		vm_reserv_domain_unlock(rv->domain);
1122b378d296SMark Johnston 	}
1123ada27a3bSKonstantin Belousov 	vm_reserv_break(rv);
11245c930c89SJeff Roberson 	counter_u64_add(vm_reserv_reclaimed, 1);
112544aab2c3SAlan Cox }
112644aab2c3SAlan Cox 
112744aab2c3SAlan Cox /*
1128b378d296SMark Johnston  * Breaks a reservation near the head of the partially populated reservation
11293453bca8SAlan Cox  * queue, releasing its free pages to the physical memory allocator.  Returns
11303453bca8SAlan Cox  * TRUE if a reservation is broken and FALSE otherwise.
113144aab2c3SAlan Cox  */
1132b378d296SMark Johnston bool
1133ef435ae7SJeff Roberson vm_reserv_reclaim_inactive(int domain)
113444aab2c3SAlan Cox {
113544aab2c3SAlan Cox 	vm_reserv_t rv;
113644aab2c3SAlan Cox 
1137b378d296SMark Johnston 	vm_reserv_domain_lock(domain);
1138b378d296SMark Johnston 	TAILQ_FOREACH(rv, &vm_rvd[domain].partpop, partpopq) {
1139b378d296SMark Johnston 		/*
1140b378d296SMark Johnston 		 * A locked reservation is likely being updated or reclaimed,
1141b378d296SMark Johnston 		 * so just skip ahead.
1142b378d296SMark Johnston 		 */
1143b378d296SMark Johnston 		if (rv != &vm_rvd[domain].marker && vm_reserv_trylock(rv)) {
1144b378d296SMark Johnston 			vm_reserv_dequeue(rv);
1145b378d296SMark Johnston 			break;
11465c930c89SJeff Roberson 		}
1147b378d296SMark Johnston 	}
1148b378d296SMark Johnston 	vm_reserv_domain_unlock(domain);
1149b378d296SMark Johnston 	if (rv != NULL) {
115044aab2c3SAlan Cox 		vm_reserv_reclaim(rv);
11515c930c89SJeff Roberson 		vm_reserv_unlock(rv);
1152b378d296SMark Johnston 		return (true);
1153f8a47341SAlan Cox 	}
1154b378d296SMark Johnston 	return (false);
1155f8a47341SAlan Cox }
1156f8a47341SAlan Cox 
1157f8a47341SAlan Cox /*
1158f96e8a0bSDoug Moore  * Determine whether this reservation has free pages that satisfy the given
1159f96e8a0bSDoug Moore  * request for contiguous physical memory.  Start searching from the lower
11606f1c8908SDoug Moore  * bound, defined by lo, and stop at the upper bound, hi.  Return the index
11616f1c8908SDoug Moore  * of the first satisfactory free page, or -1 if none is found.
1162f96e8a0bSDoug Moore  */
11636f1c8908SDoug Moore static int
11646f1c8908SDoug Moore vm_reserv_find_contig(vm_reserv_t rv, int npages, int lo,
11656f1c8908SDoug Moore     int hi, int ppn_align, int ppn_bound)
1166f96e8a0bSDoug Moore {
1167f96e8a0bSDoug Moore 
1168f96e8a0bSDoug Moore 	vm_reserv_assert_locked(rv);
11696f1c8908SDoug Moore 	KASSERT(npages <= VM_LEVEL_0_NPAGES - 1,
11706f1c8908SDoug Moore 	    ("%s: Too many pages", __func__));
11716f1c8908SDoug Moore 	KASSERT(ppn_bound <= VM_LEVEL_0_NPAGES,
11726f1c8908SDoug Moore 	    ("%s: Too big a boundary for reservation size", __func__));
11736f1c8908SDoug Moore 	KASSERT(npages <= ppn_bound,
11746f1c8908SDoug Moore 	    ("%s: Too many pages for given boundary", __func__));
11756f1c8908SDoug Moore 	KASSERT(ppn_align != 0 && powerof2(ppn_align),
11766f1c8908SDoug Moore 	    ("ppn_align is not a positive power of 2"));
11776f1c8908SDoug Moore 	KASSERT(ppn_bound != 0 && powerof2(ppn_bound),
11786f1c8908SDoug Moore 	    ("ppn_bound is not a positive power of 2"));
1179*84e2ae64SDoug Moore 	while (bit_ffc_area_at(rv->popmap, lo, hi, npages, &lo), lo != -1) {
11806f1c8908SDoug Moore 		if (lo < roundup2(lo, ppn_align)) {
1181f96e8a0bSDoug Moore 			/* Skip to next aligned page. */
11826f1c8908SDoug Moore 			lo = roundup2(lo, ppn_align);
1183*84e2ae64SDoug Moore 		} else if (roundup2(lo + 1, ppn_bound) >= lo + npages)
11846f1c8908SDoug Moore 			return (lo);
1185*84e2ae64SDoug Moore 		if (roundup2(lo + 1, ppn_bound) < lo + npages) {
1186*84e2ae64SDoug Moore 			/* Skip to next boundary-matching page. */
1187*84e2ae64SDoug Moore 			lo = roundup2(lo + 1, ppn_bound);
1188f96e8a0bSDoug Moore 		}
1189*84e2ae64SDoug Moore 	}
11906f1c8908SDoug Moore 	return (-1);
1191f96e8a0bSDoug Moore }
1192f96e8a0bSDoug Moore 
1193f96e8a0bSDoug Moore /*
11943453bca8SAlan Cox  * Searches the partially populated reservation queue for the least recently
11953453bca8SAlan Cox  * changed reservation with free pages that satisfy the given request for
11963453bca8SAlan Cox  * contiguous physical memory.  If a satisfactory reservation is found, it is
1197f96e8a0bSDoug Moore  * broken.  Returns true if a reservation is broken and false otherwise.
119844aab2c3SAlan Cox  */
11990d5fac28SDoug Moore vm_page_t
1200ef435ae7SJeff Roberson vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low,
1201ef435ae7SJeff Roberson     vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
120244aab2c3SAlan Cox {
1203b378d296SMark Johnston 	struct vm_reserv_queue *queue;
1204ec179322SAlan Cox 	vm_paddr_t pa, size;
12050d5fac28SDoug Moore 	vm_page_t m_ret;
1206b378d296SMark Johnston 	vm_reserv_t marker, rv, rvn;
12076f1c8908SDoug Moore 	int hi, lo, posn, ppn_align, ppn_bound;
120844aab2c3SAlan Cox 
12096f1c8908SDoug Moore 	KASSERT(npages > 0, ("npages is 0"));
12106f1c8908SDoug Moore 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
12116f1c8908SDoug Moore 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1212c68c3537SAlan Cox 	if (npages > VM_LEVEL_0_NPAGES - 1)
1213f96e8a0bSDoug Moore 		return (false);
12146f1c8908SDoug Moore 	size = npages << PAGE_SHIFT;
12156f1c8908SDoug Moore 	/*
12166f1c8908SDoug Moore 	 * Ensure that a free range starting at a boundary-multiple
12176f1c8908SDoug Moore 	 * doesn't include a boundary-multiple within it.  Otherwise,
12186f1c8908SDoug Moore 	 * no boundary-constrained allocation is possible.
12196f1c8908SDoug Moore 	 */
1220c606ab59SDoug Moore 	if (!vm_addr_bound_ok(0, size, boundary))
12210d5fac28SDoug Moore 		return (NULL);
1222b378d296SMark Johnston 	marker = &vm_rvd[domain].marker;
1223b378d296SMark Johnston 	queue = &vm_rvd[domain].partpop;
12246f1c8908SDoug Moore 	/*
12256f1c8908SDoug Moore 	 * Compute shifted alignment, boundary values for page-based
12266f1c8908SDoug Moore 	 * calculations.  Constrain to range [1, VM_LEVEL_0_NPAGES] to
12276f1c8908SDoug Moore 	 * avoid overflow.
12286f1c8908SDoug Moore 	 */
12296f1c8908SDoug Moore 	ppn_align = (int)(ulmin(ulmax(PAGE_SIZE, alignment),
12306f1c8908SDoug Moore 	    VM_LEVEL_0_SIZE) >> PAGE_SHIFT);
123149fd2d51SDoug Moore 	ppn_bound = boundary == 0 ? VM_LEVEL_0_NPAGES :
123249fd2d51SDoug Moore 	    (int)(MIN(MAX(PAGE_SIZE, boundary),
12336f1c8908SDoug Moore             VM_LEVEL_0_SIZE) >> PAGE_SHIFT);
1234b378d296SMark Johnston 
1235b378d296SMark Johnston 	vm_reserv_domain_scan_lock(domain);
12365c930c89SJeff Roberson 	vm_reserv_domain_lock(domain);
1237b378d296SMark Johnston 	TAILQ_FOREACH_SAFE(rv, queue, partpopq, rvn) {
1238f96e8a0bSDoug Moore 		pa = VM_PAGE_TO_PHYS(&rv->pages[0]);
1239f96e8a0bSDoug Moore 		if (pa + VM_LEVEL_0_SIZE - size < low) {
1240ec179322SAlan Cox 			/* This entire reservation is too low; go to next. */
124144aab2c3SAlan Cox 			continue;
124244aab2c3SAlan Cox 		}
124344aab2c3SAlan Cox 		if (pa + size > high) {
1244ec179322SAlan Cox 			/* This entire reservation is too high; go to next. */
1245ec179322SAlan Cox 			continue;
124685f2a0c9SMax Laier 		}
1247c606ab59SDoug Moore 		if (!vm_addr_align_ok(pa, alignment)) {
12486f1c8908SDoug Moore 			/* This entire reservation is unaligned; go to next. */
12496f1c8908SDoug Moore 			continue;
12506f1c8908SDoug Moore 		}
1251b378d296SMark Johnston 
12525c930c89SJeff Roberson 		if (vm_reserv_trylock(rv) == 0) {
1253b378d296SMark Johnston 			TAILQ_INSERT_AFTER(queue, rv, marker, partpopq);
12545c930c89SJeff Roberson 			vm_reserv_domain_unlock(domain);
12555c930c89SJeff Roberson 			vm_reserv_lock(rv);
1256968079f2SMark Johnston 			if (TAILQ_PREV(marker, vm_reserv_queue, partpopq) !=
1257968079f2SMark Johnston 			    rv) {
1258b378d296SMark Johnston 				vm_reserv_unlock(rv);
12595c930c89SJeff Roberson 				vm_reserv_domain_lock(domain);
1260b378d296SMark Johnston 				rvn = TAILQ_NEXT(marker, partpopq);
1261b378d296SMark Johnston 				TAILQ_REMOVE(queue, marker, partpopq);
12625c930c89SJeff Roberson 				continue;
12635c930c89SJeff Roberson 			}
1264b378d296SMark Johnston 			vm_reserv_domain_lock(domain);
1265b378d296SMark Johnston 			TAILQ_REMOVE(queue, marker, partpopq);
1266b378d296SMark Johnston 		}
12675c930c89SJeff Roberson 		vm_reserv_domain_unlock(domain);
12686f1c8908SDoug Moore 		lo = (pa >= low) ? 0 :
12696f1c8908SDoug Moore 		    (int)((low + PAGE_MASK - pa) >> PAGE_SHIFT);
12706f1c8908SDoug Moore 		hi = (pa + VM_LEVEL_0_SIZE <= high) ? VM_LEVEL_0_NPAGES :
12716f1c8908SDoug Moore 		    (int)((high - pa) >> PAGE_SHIFT);
12726f1c8908SDoug Moore 		posn = vm_reserv_find_contig(rv, (int)npages, lo, hi,
12736f1c8908SDoug Moore 		    ppn_align, ppn_bound);
12746f1c8908SDoug Moore 		if (posn >= 0) {
12750d5fac28SDoug Moore 			vm_reserv_domain_scan_unlock(domain);
12760d5fac28SDoug Moore 			/* Allocate requested space */
12770d5fac28SDoug Moore 			rv->popcnt += npages;
1278*84e2ae64SDoug Moore 			bit_nset(rv->popmap, posn, posn + npages - 1);
12790d5fac28SDoug Moore 			vm_reserv_reclaim(rv);
12800d5fac28SDoug Moore 			vm_reserv_unlock(rv);
12810d5fac28SDoug Moore 			m_ret = &rv->pages[posn];
12820d5fac28SDoug Moore 			pa = VM_PAGE_TO_PHYS(m_ret);
1283c606ab59SDoug Moore 			KASSERT(vm_addr_ok(pa, size, alignment, boundary),
1284c606ab59SDoug Moore 			    ("%s: adjusted address not aligned/bounded to "
1285c606ab59SDoug Moore 			     "%lx/%jx",
1286c606ab59SDoug Moore 			     __func__, alignment, (uintmax_t)boundary));
12870d5fac28SDoug Moore 			return (m_ret);
128844aab2c3SAlan Cox 		}
12895c930c89SJeff Roberson 		vm_reserv_domain_lock(domain);
1290968079f2SMark Johnston 		rvn = TAILQ_NEXT(rv, partpopq);
1291968079f2SMark Johnston 		vm_reserv_unlock(rv);
129244aab2c3SAlan Cox 	}
12935c930c89SJeff Roberson 	vm_reserv_domain_unlock(domain);
1294b378d296SMark Johnston 	vm_reserv_domain_scan_unlock(domain);
12950d5fac28SDoug Moore 	return (NULL);
129644aab2c3SAlan Cox }
129744aab2c3SAlan Cox 
129844aab2c3SAlan Cox /*
1299f8a47341SAlan Cox  * Transfers the reservation underlying the given page to a new object.
1300f8a47341SAlan Cox  *
1301f8a47341SAlan Cox  * The object must be locked.
1302f8a47341SAlan Cox  */
1303f8a47341SAlan Cox void
1304f8a47341SAlan Cox vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object,
1305f8a47341SAlan Cox     vm_pindex_t old_object_offset)
1306f8a47341SAlan Cox {
1307f8a47341SAlan Cox 	vm_reserv_t rv;
1308f8a47341SAlan Cox 
130989f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(new_object);
1310f8a47341SAlan Cox 	rv = vm_reserv_from_page(m);
1311f8a47341SAlan Cox 	if (rv->object == old_object) {
13125c930c89SJeff Roberson 		vm_reserv_lock(rv);
13135c930c89SJeff Roberson 		CTR6(KTR_VM,
13145c930c89SJeff Roberson 		    "%s: rv %p object %p new %p popcnt %d inpartpop %d",
13155c930c89SJeff Roberson 		    __FUNCTION__, rv, rv->object, new_object, rv->popcnt,
13165c930c89SJeff Roberson 		    rv->inpartpopq);
1317f8a47341SAlan Cox 		if (rv->object == old_object) {
1318e2068d0bSJeff Roberson 			vm_reserv_object_lock(old_object);
1319e2068d0bSJeff Roberson 			rv->object = NULL;
1320f8a47341SAlan Cox 			LIST_REMOVE(rv, objq);
1321e2068d0bSJeff Roberson 			vm_reserv_object_unlock(old_object);
1322e2068d0bSJeff Roberson 			vm_reserv_object_lock(new_object);
1323f8a47341SAlan Cox 			rv->object = new_object;
1324f8a47341SAlan Cox 			rv->pindex -= old_object_offset;
1325e2068d0bSJeff Roberson 			LIST_INSERT_HEAD(&new_object->rvq, rv, objq);
1326e2068d0bSJeff Roberson 			vm_reserv_object_unlock(new_object);
1327f8a47341SAlan Cox 		}
13285c930c89SJeff Roberson 		vm_reserv_unlock(rv);
1329f8a47341SAlan Cox 	}
1330f8a47341SAlan Cox }
1331f8a47341SAlan Cox 
1332f8a47341SAlan Cox /*
1333c869e672SAlan Cox  * Returns the size (in bytes) of a reservation of the specified level.
1334c869e672SAlan Cox  */
1335c869e672SAlan Cox int
1336c869e672SAlan Cox vm_reserv_size(int level)
1337c869e672SAlan Cox {
1338c869e672SAlan Cox 
1339c869e672SAlan Cox 	switch (level) {
1340c869e672SAlan Cox 	case 0:
1341c869e672SAlan Cox 		return (VM_LEVEL_0_SIZE);
1342c869e672SAlan Cox 	case -1:
1343c869e672SAlan Cox 		return (PAGE_SIZE);
1344c869e672SAlan Cox 	default:
1345c869e672SAlan Cox 		return (0);
1346c869e672SAlan Cox 	}
1347c869e672SAlan Cox }
1348c869e672SAlan Cox 
1349c869e672SAlan Cox /*
1350f8a47341SAlan Cox  * Allocates the virtual and physical memory required by the reservation
1351f8a47341SAlan Cox  * management system's data structures, in particular, the reservation array.
1352f8a47341SAlan Cox  */
1353f8a47341SAlan Cox vm_paddr_t
13543e5e1b51SJeff Roberson vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end)
1355f8a47341SAlan Cox {
13567988971aSD Scott Phillips 	vm_paddr_t new_end;
13577988971aSD Scott Phillips 	vm_pindex_t count;
1358f8a47341SAlan Cox 	size_t size;
13593e5e1b51SJeff Roberson 	int i;
13603e5e1b51SJeff Roberson 
13617988971aSD Scott Phillips 	count = 0;
13623e5e1b51SJeff Roberson 	for (i = 0; i < vm_phys_nsegs; i++) {
13637988971aSD Scott Phillips #ifdef VM_PHYSSEG_SPARSE
13647988971aSD Scott Phillips 		count += howmany(vm_phys_segs[i].end, VM_LEVEL_0_SIZE) -
13657988971aSD Scott Phillips 		    vm_phys_segs[i].start / VM_LEVEL_0_SIZE;
13667988971aSD Scott Phillips #else
13677988971aSD Scott Phillips 		count = MAX(count,
13687988971aSD Scott Phillips 		    howmany(vm_phys_segs[i].end, VM_LEVEL_0_SIZE));
13697988971aSD Scott Phillips #endif
13703e5e1b51SJeff Roberson 	}
13713e5e1b51SJeff Roberson 
13727988971aSD Scott Phillips 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
13737988971aSD Scott Phillips #ifdef VM_PHYSSEG_SPARSE
13747988971aSD Scott Phillips 		count += howmany(phys_avail[i + 1], VM_LEVEL_0_SIZE) -
13757988971aSD Scott Phillips 		    phys_avail[i] / VM_LEVEL_0_SIZE;
13767988971aSD Scott Phillips #else
13777988971aSD Scott Phillips 		count = MAX(count,
13787988971aSD Scott Phillips 		    howmany(phys_avail[i + 1], VM_LEVEL_0_SIZE));
13797988971aSD Scott Phillips #endif
13803e5e1b51SJeff Roberson 	}
1381f8a47341SAlan Cox 
1382f8a47341SAlan Cox 	/*
13837988971aSD Scott Phillips 	 * Calculate the size (in bytes) of the reservation array.  Rounding up
13847988971aSD Scott Phillips 	 * for partial superpages at boundaries, as every small page is mapped
13857988971aSD Scott Phillips 	 * to an element in the reservation array based on its physical address.
13867988971aSD Scott Phillips 	 * Thus, the number of elements in the reservation array can be greater
13877988971aSD Scott Phillips 	 * than the number of superpages.
1388f8a47341SAlan Cox 	 */
13897988971aSD Scott Phillips 	size = count * sizeof(struct vm_reserv);
1390f8a47341SAlan Cox 
1391f8a47341SAlan Cox 	/*
1392f8a47341SAlan Cox 	 * Allocate and map the physical memory for the reservation array.  The
1393f8a47341SAlan Cox 	 * next available virtual address is returned by reference.
1394f8a47341SAlan Cox 	 */
1395f8a47341SAlan Cox 	new_end = end - round_page(size);
1396f8a47341SAlan Cox 	vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end,
1397f8a47341SAlan Cox 	    VM_PROT_READ | VM_PROT_WRITE);
1398f8a47341SAlan Cox 	bzero(vm_reserv_array, size);
1399f8a47341SAlan Cox 
1400f8a47341SAlan Cox 	/*
1401f8a47341SAlan Cox 	 * Return the next available physical address.
1402f8a47341SAlan Cox 	 */
1403f8a47341SAlan Cox 	return (new_end);
1404f8a47341SAlan Cox }
1405f8a47341SAlan Cox 
14068b5e1472SAlan Cox /*
14078b5e1472SAlan Cox  * Returns the superpage containing the given page.
14088b5e1472SAlan Cox  */
14098b5e1472SAlan Cox vm_page_t
14108b5e1472SAlan Cox vm_reserv_to_superpage(vm_page_t m)
14118b5e1472SAlan Cox {
14128b5e1472SAlan Cox 	vm_reserv_t rv;
14138b5e1472SAlan Cox 
14148b5e1472SAlan Cox 	VM_OBJECT_ASSERT_LOCKED(m->object);
14158b5e1472SAlan Cox 	rv = vm_reserv_from_page(m);
14165c930c89SJeff Roberson 	if (rv->object == m->object && rv->popcnt == VM_LEVEL_0_NPAGES)
14175c930c89SJeff Roberson 		m = rv->pages;
14185c930c89SJeff Roberson 	else
14195c930c89SJeff Roberson 		m = NULL;
14205c930c89SJeff Roberson 
14215c930c89SJeff Roberson 	return (m);
14228b5e1472SAlan Cox }
14238b5e1472SAlan Cox 
1424f8a47341SAlan Cox #endif	/* VM_NRESERVLEVEL > 0 */
1425