xref: /freebsd/sys/vm/vm_reserv.c (revision c606ab59e7f9423f7027320e9a4514c7db39658d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2006 Rice University
5  * Copyright (c) 2007-2011 Alan L. Cox <alc@cs.rice.edu>
6  * All rights reserved.
7  *
8  * This software was developed for the FreeBSD Project by Alan L. Cox,
9  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
24  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
30  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  *	Superpage reservation management module
36  *
37  * Any external functions defined by this module are only to be used by the
38  * virtual memory system.
39  */
40 
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43 
44 #include "opt_vm.h"
45 
46 #include <sys/param.h>
47 #include <sys/kernel.h>
48 #include <sys/lock.h>
49 #include <sys/malloc.h>
50 #include <sys/mutex.h>
51 #include <sys/queue.h>
52 #include <sys/rwlock.h>
53 #include <sys/sbuf.h>
54 #include <sys/sysctl.h>
55 #include <sys/systm.h>
56 #include <sys/counter.h>
57 #include <sys/ktr.h>
58 #include <sys/vmmeter.h>
59 #include <sys/smp.h>
60 
61 #include <vm/vm.h>
62 #include <vm/vm_param.h>
63 #include <vm/vm_object.h>
64 #include <vm/vm_page.h>
65 #include <vm/vm_pageout.h>
66 #include <vm/vm_pagequeue.h>
67 #include <vm/vm_phys.h>
68 #include <vm/vm_radix.h>
69 #include <vm/vm_reserv.h>
70 
71 /*
72  * The reservation system supports the speculative allocation of large physical
73  * pages ("superpages").  Speculative allocation enables the fully automatic
74  * utilization of superpages by the virtual memory system.  In other words, no
75  * programmatic directives are required to use superpages.
76  */
77 
78 #if VM_NRESERVLEVEL > 0
79 
80 #ifndef VM_LEVEL_0_ORDER_MAX
81 #define	VM_LEVEL_0_ORDER_MAX	VM_LEVEL_0_ORDER
82 #endif
83 
84 /*
85  * The number of small pages that are contained in a level 0 reservation
86  */
87 #define	VM_LEVEL_0_NPAGES	(1 << VM_LEVEL_0_ORDER)
88 #define	VM_LEVEL_0_NPAGES_MAX	(1 << VM_LEVEL_0_ORDER_MAX)
89 
90 /*
91  * The number of bits by which a physical address is shifted to obtain the
92  * reservation number
93  */
94 #define	VM_LEVEL_0_SHIFT	(VM_LEVEL_0_ORDER + PAGE_SHIFT)
95 
96 /*
97  * The size of a level 0 reservation in bytes
98  */
99 #define	VM_LEVEL_0_SIZE		(1 << VM_LEVEL_0_SHIFT)
100 
101 /*
102  * Computes the index of the small page underlying the given (object, pindex)
103  * within the reservation's array of small pages.
104  */
105 #define	VM_RESERV_INDEX(object, pindex)	\
106     (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1))
107 
108 /*
109  * The size of a population map entry
110  */
111 typedef	u_long		popmap_t;
112 
113 /*
114  * The number of bits in a population map entry
115  */
116 #define	NBPOPMAP	(NBBY * sizeof(popmap_t))
117 
118 /*
119  * The number of population map entries in a reservation
120  */
121 #define	NPOPMAP		howmany(VM_LEVEL_0_NPAGES, NBPOPMAP)
122 #define	NPOPMAP_MAX	howmany(VM_LEVEL_0_NPAGES_MAX, NBPOPMAP)
123 
124 /*
125  * Number of elapsed ticks before we update the LRU queue position.  Used
126  * to reduce contention and churn on the list.
127  */
128 #define	PARTPOPSLOP	1
129 
130 /*
131  * Clear a bit in the population map.
132  */
133 static __inline void
134 popmap_clear(popmap_t popmap[], int i)
135 {
136 
137 	popmap[i / NBPOPMAP] &= ~(1UL << (i % NBPOPMAP));
138 }
139 
140 /*
141  * Set a bit in the population map.
142  */
143 static __inline void
144 popmap_set(popmap_t popmap[], int i)
145 {
146 
147 	popmap[i / NBPOPMAP] |= 1UL << (i % NBPOPMAP);
148 }
149 
150 /*
151  * Is a bit in the population map clear?
152  */
153 static __inline boolean_t
154 popmap_is_clear(popmap_t popmap[], int i)
155 {
156 
157 	return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) == 0);
158 }
159 
160 /*
161  * Is a bit in the population map set?
162  */
163 static __inline boolean_t
164 popmap_is_set(popmap_t popmap[], int i)
165 {
166 
167 	return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) != 0);
168 }
169 
170 /*
171  * The reservation structure
172  *
173  * A reservation structure is constructed whenever a large physical page is
174  * speculatively allocated to an object.  The reservation provides the small
175  * physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets
176  * within that object.  The reservation's "popcnt" tracks the number of these
177  * small physical pages that are in use at any given time.  When and if the
178  * reservation is not fully utilized, it appears in the queue of partially
179  * populated reservations.  The reservation always appears on the containing
180  * object's list of reservations.
181  *
182  * A partially populated reservation can be broken and reclaimed at any time.
183  *
184  * c - constant after boot
185  * d - vm_reserv_domain_lock
186  * o - vm_reserv_object_lock
187  * r - vm_reserv_lock
188  * s - vm_reserv_domain_scan_lock
189  */
190 struct vm_reserv {
191 	struct mtx	lock;			/* reservation lock. */
192 	TAILQ_ENTRY(vm_reserv) partpopq;	/* (d, r) per-domain queue. */
193 	LIST_ENTRY(vm_reserv) objq;		/* (o, r) object queue */
194 	vm_object_t	object;			/* (o, r) containing object */
195 	vm_pindex_t	pindex;			/* (o, r) offset in object */
196 	vm_page_t	pages;			/* (c) first page  */
197 	uint16_t	popcnt;			/* (r) # of pages in use */
198 	uint8_t		domain;			/* (c) NUMA domain. */
199 	char		inpartpopq;		/* (d, r) */
200 	int		lasttick;		/* (r) last pop update tick. */
201 	popmap_t	popmap[NPOPMAP_MAX];	/* (r) bit vector, used pages */
202 };
203 
204 TAILQ_HEAD(vm_reserv_queue, vm_reserv);
205 
206 #define	vm_reserv_lockptr(rv)		(&(rv)->lock)
207 #define	vm_reserv_assert_locked(rv)					\
208 	    mtx_assert(vm_reserv_lockptr(rv), MA_OWNED)
209 #define	vm_reserv_lock(rv)		mtx_lock(vm_reserv_lockptr(rv))
210 #define	vm_reserv_trylock(rv)		mtx_trylock(vm_reserv_lockptr(rv))
211 #define	vm_reserv_unlock(rv)		mtx_unlock(vm_reserv_lockptr(rv))
212 
213 /*
214  * The reservation array
215  *
216  * This array is analoguous in function to vm_page_array.  It differs in the
217  * respect that it may contain a greater number of useful reservation
218  * structures than there are (physical) superpages.  These "invalid"
219  * reservation structures exist to trade-off space for time in the
220  * implementation of vm_reserv_from_page().  Invalid reservation structures are
221  * distinguishable from "valid" reservation structures by inspecting the
222  * reservation's "pages" field.  Invalid reservation structures have a NULL
223  * "pages" field.
224  *
225  * vm_reserv_from_page() maps a small (physical) page to an element of this
226  * array by computing a physical reservation number from the page's physical
227  * address.  The physical reservation number is used as the array index.
228  *
229  * An "active" reservation is a valid reservation structure that has a non-NULL
230  * "object" field and a non-zero "popcnt" field.  In other words, every active
231  * reservation belongs to a particular object.  Moreover, every active
232  * reservation has an entry in the containing object's list of reservations.
233  */
234 static vm_reserv_t vm_reserv_array;
235 
236 /*
237  * The per-domain partially populated reservation queues
238  *
239  * These queues enable the fast recovery of an unused free small page from a
240  * partially populated reservation.  The reservation at the head of a queue
241  * is the least recently changed, partially populated reservation.
242  *
243  * Access to this queue is synchronized by the per-domain reservation lock.
244  * Threads reclaiming free pages from the queue must hold the per-domain scan
245  * lock.
246  */
247 struct vm_reserv_domain {
248 	struct mtx 		lock;
249 	struct vm_reserv_queue	partpop;	/* (d) */
250 	struct vm_reserv	marker;		/* (d, s) scan marker/lock */
251 } __aligned(CACHE_LINE_SIZE);
252 
253 static struct vm_reserv_domain vm_rvd[MAXMEMDOM];
254 
255 #define	vm_reserv_domain_lockptr(d)	(&vm_rvd[(d)].lock)
256 #define	vm_reserv_domain_assert_locked(d)	\
257 	mtx_assert(vm_reserv_domain_lockptr(d), MA_OWNED)
258 #define	vm_reserv_domain_lock(d)	mtx_lock(vm_reserv_domain_lockptr(d))
259 #define	vm_reserv_domain_unlock(d)	mtx_unlock(vm_reserv_domain_lockptr(d))
260 
261 #define	vm_reserv_domain_scan_lock(d)	mtx_lock(&vm_rvd[(d)].marker.lock)
262 #define	vm_reserv_domain_scan_unlock(d)	mtx_unlock(&vm_rvd[(d)].marker.lock)
263 
264 static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
265     "Reservation Info");
266 
267 static COUNTER_U64_DEFINE_EARLY(vm_reserv_broken);
268 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD,
269     &vm_reserv_broken, "Cumulative number of broken reservations");
270 
271 static COUNTER_U64_DEFINE_EARLY(vm_reserv_freed);
272 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD,
273     &vm_reserv_freed, "Cumulative number of freed reservations");
274 
275 static int sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS);
276 
277 SYSCTL_PROC(_vm_reserv, OID_AUTO, fullpop, CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RD,
278     NULL, 0, sysctl_vm_reserv_fullpop, "I", "Current number of full reservations");
279 
280 static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS);
281 
282 SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq,
283     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
284     sysctl_vm_reserv_partpopq, "A",
285     "Partially populated reservation queues");
286 
287 static COUNTER_U64_DEFINE_EARLY(vm_reserv_reclaimed);
288 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD,
289     &vm_reserv_reclaimed, "Cumulative number of reclaimed reservations");
290 
291 /*
292  * The object lock pool is used to synchronize the rvq.  We can not use a
293  * pool mutex because it is required before malloc works.
294  *
295  * The "hash" function could be made faster without divide and modulo.
296  */
297 #define	VM_RESERV_OBJ_LOCK_COUNT	MAXCPU
298 
299 struct mtx_padalign vm_reserv_object_mtx[VM_RESERV_OBJ_LOCK_COUNT];
300 
301 #define	vm_reserv_object_lock_idx(object)			\
302 	    (((uintptr_t)object / sizeof(*object)) % VM_RESERV_OBJ_LOCK_COUNT)
303 #define	vm_reserv_object_lock_ptr(object)			\
304 	    &vm_reserv_object_mtx[vm_reserv_object_lock_idx((object))]
305 #define	vm_reserv_object_lock(object)				\
306 	    mtx_lock(vm_reserv_object_lock_ptr((object)))
307 #define	vm_reserv_object_unlock(object)				\
308 	    mtx_unlock(vm_reserv_object_lock_ptr((object)))
309 
310 static void		vm_reserv_break(vm_reserv_t rv);
311 static void		vm_reserv_depopulate(vm_reserv_t rv, int index);
312 static vm_reserv_t	vm_reserv_from_page(vm_page_t m);
313 static boolean_t	vm_reserv_has_pindex(vm_reserv_t rv,
314 			    vm_pindex_t pindex);
315 static void		vm_reserv_populate(vm_reserv_t rv, int index);
316 static void		vm_reserv_reclaim(vm_reserv_t rv);
317 
318 /*
319  * Returns the current number of full reservations.
320  *
321  * Since the number of full reservations is computed without acquiring any
322  * locks, the returned value is inexact.
323  */
324 static int
325 sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS)
326 {
327 	vm_paddr_t paddr;
328 	struct vm_phys_seg *seg;
329 	vm_reserv_t rv;
330 	int fullpop, segind;
331 
332 	fullpop = 0;
333 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
334 		seg = &vm_phys_segs[segind];
335 		paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
336 #ifdef VM_PHYSSEG_SPARSE
337 		rv = seg->first_reserv + (paddr >> VM_LEVEL_0_SHIFT) -
338 		    (seg->start >> VM_LEVEL_0_SHIFT);
339 #else
340 		rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT];
341 #endif
342 		while (paddr + VM_LEVEL_0_SIZE > paddr && paddr +
343 		    VM_LEVEL_0_SIZE <= seg->end) {
344 			fullpop += rv->popcnt == VM_LEVEL_0_NPAGES;
345 			paddr += VM_LEVEL_0_SIZE;
346 			rv++;
347 		}
348 	}
349 	return (sysctl_handle_int(oidp, &fullpop, 0, req));
350 }
351 
352 /*
353  * Describes the current state of the partially populated reservation queue.
354  */
355 static int
356 sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
357 {
358 	struct sbuf sbuf;
359 	vm_reserv_t rv;
360 	int counter, error, domain, level, unused_pages;
361 
362 	error = sysctl_wire_old_buffer(req, 0);
363 	if (error != 0)
364 		return (error);
365 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
366 	sbuf_printf(&sbuf, "\nDOMAIN    LEVEL     SIZE  NUMBER\n\n");
367 	for (domain = 0; domain < vm_ndomains; domain++) {
368 		for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) {
369 			counter = 0;
370 			unused_pages = 0;
371 			vm_reserv_domain_lock(domain);
372 			TAILQ_FOREACH(rv, &vm_rvd[domain].partpop, partpopq) {
373 				if (rv == &vm_rvd[domain].marker)
374 					continue;
375 				counter++;
376 				unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt;
377 			}
378 			vm_reserv_domain_unlock(domain);
379 			sbuf_printf(&sbuf, "%6d, %7d, %6dK, %6d\n",
380 			    domain, level,
381 			    unused_pages * ((int)PAGE_SIZE / 1024), counter);
382 		}
383 	}
384 	error = sbuf_finish(&sbuf);
385 	sbuf_delete(&sbuf);
386 	return (error);
387 }
388 
389 /*
390  * Remove a reservation from the object's objq.
391  */
392 static void
393 vm_reserv_remove(vm_reserv_t rv)
394 {
395 	vm_object_t object;
396 
397 	vm_reserv_assert_locked(rv);
398 	CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
399 	    __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
400 	KASSERT(rv->object != NULL,
401 	    ("vm_reserv_remove: reserv %p is free", rv));
402 	KASSERT(!rv->inpartpopq,
403 	    ("vm_reserv_remove: reserv %p's inpartpopq is TRUE", rv));
404 	object = rv->object;
405 	vm_reserv_object_lock(object);
406 	LIST_REMOVE(rv, objq);
407 	rv->object = NULL;
408 	vm_reserv_object_unlock(object);
409 }
410 
411 /*
412  * Insert a new reservation into the object's objq.
413  */
414 static void
415 vm_reserv_insert(vm_reserv_t rv, vm_object_t object, vm_pindex_t pindex)
416 {
417 	int i;
418 
419 	vm_reserv_assert_locked(rv);
420 	CTR6(KTR_VM,
421 	    "%s: rv %p(%p) object %p new %p popcnt %d",
422 	    __FUNCTION__, rv, rv->pages, rv->object, object,
423 	   rv->popcnt);
424 	KASSERT(rv->object == NULL,
425 	    ("vm_reserv_insert: reserv %p isn't free", rv));
426 	KASSERT(rv->popcnt == 0,
427 	    ("vm_reserv_insert: reserv %p's popcnt is corrupted", rv));
428 	KASSERT(!rv->inpartpopq,
429 	    ("vm_reserv_insert: reserv %p's inpartpopq is TRUE", rv));
430 	for (i = 0; i < NPOPMAP; i++)
431 		KASSERT(rv->popmap[i] == 0,
432 		    ("vm_reserv_insert: reserv %p's popmap is corrupted", rv));
433 	vm_reserv_object_lock(object);
434 	rv->pindex = pindex;
435 	rv->object = object;
436 	rv->lasttick = ticks;
437 	LIST_INSERT_HEAD(&object->rvq, rv, objq);
438 	vm_reserv_object_unlock(object);
439 }
440 
441 /*
442  * Reduces the given reservation's population count.  If the population count
443  * becomes zero, the reservation is destroyed.  Additionally, moves the
444  * reservation to the tail of the partially populated reservation queue if the
445  * population count is non-zero.
446  */
447 static void
448 vm_reserv_depopulate(vm_reserv_t rv, int index)
449 {
450 	struct vm_domain *vmd;
451 
452 	vm_reserv_assert_locked(rv);
453 	CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
454 	    __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
455 	KASSERT(rv->object != NULL,
456 	    ("vm_reserv_depopulate: reserv %p is free", rv));
457 	KASSERT(popmap_is_set(rv->popmap, index),
458 	    ("vm_reserv_depopulate: reserv %p's popmap[%d] is clear", rv,
459 	    index));
460 	KASSERT(rv->popcnt > 0,
461 	    ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv));
462 	KASSERT(rv->domain < vm_ndomains,
463 	    ("vm_reserv_depopulate: reserv %p's domain is corrupted %d",
464 	    rv, rv->domain));
465 	if (rv->popcnt == VM_LEVEL_0_NPAGES) {
466 		KASSERT(rv->pages->psind == 1,
467 		    ("vm_reserv_depopulate: reserv %p is already demoted",
468 		    rv));
469 		rv->pages->psind = 0;
470 	}
471 	popmap_clear(rv->popmap, index);
472 	rv->popcnt--;
473 	if ((unsigned)(ticks - rv->lasttick) >= PARTPOPSLOP ||
474 	    rv->popcnt == 0) {
475 		vm_reserv_domain_lock(rv->domain);
476 		if (rv->inpartpopq) {
477 			TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq);
478 			rv->inpartpopq = FALSE;
479 		}
480 		if (rv->popcnt != 0) {
481 			rv->inpartpopq = TRUE;
482 			TAILQ_INSERT_TAIL(&vm_rvd[rv->domain].partpop, rv,
483 			    partpopq);
484 		}
485 		vm_reserv_domain_unlock(rv->domain);
486 		rv->lasttick = ticks;
487 	}
488 	vmd = VM_DOMAIN(rv->domain);
489 	if (rv->popcnt == 0) {
490 		vm_reserv_remove(rv);
491 		vm_domain_free_lock(vmd);
492 		vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER);
493 		vm_domain_free_unlock(vmd);
494 		counter_u64_add(vm_reserv_freed, 1);
495 	}
496 	vm_domain_freecnt_inc(vmd, 1);
497 }
498 
499 /*
500  * Returns the reservation to which the given page might belong.
501  */
502 static __inline vm_reserv_t
503 vm_reserv_from_page(vm_page_t m)
504 {
505 #ifdef VM_PHYSSEG_SPARSE
506 	struct vm_phys_seg *seg;
507 
508 	seg = &vm_phys_segs[m->segind];
509 	return (seg->first_reserv + (VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT) -
510 	    (seg->start >> VM_LEVEL_0_SHIFT));
511 #else
512 	return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]);
513 #endif
514 }
515 
516 /*
517  * Returns an existing reservation or NULL and initialized successor pointer.
518  */
519 static vm_reserv_t
520 vm_reserv_from_object(vm_object_t object, vm_pindex_t pindex,
521     vm_page_t mpred, vm_page_t *msuccp)
522 {
523 	vm_reserv_t rv;
524 	vm_page_t msucc;
525 
526 	msucc = NULL;
527 	if (mpred != NULL) {
528 		KASSERT(mpred->object == object,
529 		    ("vm_reserv_from_object: object doesn't contain mpred"));
530 		KASSERT(mpred->pindex < pindex,
531 		    ("vm_reserv_from_object: mpred doesn't precede pindex"));
532 		rv = vm_reserv_from_page(mpred);
533 		if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
534 			goto found;
535 		msucc = TAILQ_NEXT(mpred, listq);
536 	} else
537 		msucc = TAILQ_FIRST(&object->memq);
538 	if (msucc != NULL) {
539 		KASSERT(msucc->pindex > pindex,
540 		    ("vm_reserv_from_object: msucc doesn't succeed pindex"));
541 		rv = vm_reserv_from_page(msucc);
542 		if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
543 			goto found;
544 	}
545 	rv = NULL;
546 
547 found:
548 	*msuccp = msucc;
549 
550 	return (rv);
551 }
552 
553 /*
554  * Returns TRUE if the given reservation contains the given page index and
555  * FALSE otherwise.
556  */
557 static __inline boolean_t
558 vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex)
559 {
560 
561 	return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0);
562 }
563 
564 /*
565  * Increases the given reservation's population count.  Moves the reservation
566  * to the tail of the partially populated reservation queue.
567  */
568 static void
569 vm_reserv_populate(vm_reserv_t rv, int index)
570 {
571 
572 	vm_reserv_assert_locked(rv);
573 	CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
574 	    __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
575 	KASSERT(rv->object != NULL,
576 	    ("vm_reserv_populate: reserv %p is free", rv));
577 	KASSERT(popmap_is_clear(rv->popmap, index),
578 	    ("vm_reserv_populate: reserv %p's popmap[%d] is set", rv,
579 	    index));
580 	KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES,
581 	    ("vm_reserv_populate: reserv %p is already full", rv));
582 	KASSERT(rv->pages->psind == 0,
583 	    ("vm_reserv_populate: reserv %p is already promoted", rv));
584 	KASSERT(rv->domain < vm_ndomains,
585 	    ("vm_reserv_populate: reserv %p's domain is corrupted %d",
586 	    rv, rv->domain));
587 	popmap_set(rv->popmap, index);
588 	rv->popcnt++;
589 	if ((unsigned)(ticks - rv->lasttick) < PARTPOPSLOP &&
590 	    rv->inpartpopq && rv->popcnt != VM_LEVEL_0_NPAGES)
591 		return;
592 	rv->lasttick = ticks;
593 	vm_reserv_domain_lock(rv->domain);
594 	if (rv->inpartpopq) {
595 		TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq);
596 		rv->inpartpopq = FALSE;
597 	}
598 	if (rv->popcnt < VM_LEVEL_0_NPAGES) {
599 		rv->inpartpopq = TRUE;
600 		TAILQ_INSERT_TAIL(&vm_rvd[rv->domain].partpop, rv, partpopq);
601 	} else {
602 		KASSERT(rv->pages->psind == 0,
603 		    ("vm_reserv_populate: reserv %p is already promoted",
604 		    rv));
605 		rv->pages->psind = 1;
606 	}
607 	vm_reserv_domain_unlock(rv->domain);
608 }
609 
610 /*
611  * Allocates a contiguous set of physical pages of the given size "npages"
612  * from existing or newly created reservations.  All of the physical pages
613  * must be at or above the given physical address "low" and below the given
614  * physical address "high".  The given value "alignment" determines the
615  * alignment of the first physical page in the set.  If the given value
616  * "boundary" is non-zero, then the set of physical pages cannot cross any
617  * physical address boundary that is a multiple of that value.  Both
618  * "alignment" and "boundary" must be a power of two.
619  *
620  * The page "mpred" must immediately precede the offset "pindex" within the
621  * specified object.
622  *
623  * The object must be locked.
624  */
625 vm_page_t
626 vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, int domain,
627     int req, vm_page_t mpred, u_long npages, vm_paddr_t low, vm_paddr_t high,
628     u_long alignment, vm_paddr_t boundary)
629 {
630 	struct vm_domain *vmd;
631 	vm_paddr_t pa, size;
632 	vm_page_t m, m_ret, msucc;
633 	vm_pindex_t first, leftcap, rightcap;
634 	vm_reserv_t rv;
635 	u_long allocpages, maxpages, minpages;
636 	int i, index, n;
637 
638 	VM_OBJECT_ASSERT_WLOCKED(object);
639 	KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
640 
641 	/*
642 	 * Is a reservation fundamentally impossible?
643 	 */
644 	if (pindex < VM_RESERV_INDEX(object, pindex) ||
645 	    pindex + npages > object->size)
646 		return (NULL);
647 
648 	/*
649 	 * All reservations of a particular size have the same alignment.
650 	 * Assuming that the first page is allocated from a reservation, the
651 	 * least significant bits of its physical address can be determined
652 	 * from its offset from the beginning of the reservation and the size
653 	 * of the reservation.
654 	 *
655 	 * Could the specified index within a reservation of the smallest
656 	 * possible size satisfy the alignment and boundary requirements?
657 	 */
658 	pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT;
659 	size = npages << PAGE_SHIFT;
660 	if (!vm_addr_ok(pa, size, alignment, boundary))
661 		return (NULL);
662 
663 	/*
664 	 * Look for an existing reservation.
665 	 */
666 	rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
667 	if (rv != NULL) {
668 		KASSERT(object != kernel_object || rv->domain == domain,
669 		    ("vm_reserv_alloc_contig: domain mismatch"));
670 		index = VM_RESERV_INDEX(object, pindex);
671 		/* Does the allocation fit within the reservation? */
672 		if (index + npages > VM_LEVEL_0_NPAGES)
673 			return (NULL);
674 		domain = rv->domain;
675 		vmd = VM_DOMAIN(domain);
676 		vm_reserv_lock(rv);
677 		/* Handle reclaim race. */
678 		if (rv->object != object)
679 			goto out;
680 		m = &rv->pages[index];
681 		pa = VM_PAGE_TO_PHYS(m);
682 		if (pa < low || pa + size > high ||
683 		    !vm_addr_ok(pa, size, alignment, boundary))
684 			goto out;
685 		/* Handle vm_page_rename(m, new_object, ...). */
686 		for (i = 0; i < npages; i++)
687 			if (popmap_is_set(rv->popmap, index + i))
688 				goto out;
689 		if (!vm_domain_allocate(vmd, req, npages))
690 			goto out;
691 		for (i = 0; i < npages; i++)
692 			vm_reserv_populate(rv, index + i);
693 		vm_reserv_unlock(rv);
694 		return (m);
695 out:
696 		vm_reserv_unlock(rv);
697 		return (NULL);
698 	}
699 
700 	/*
701 	 * Could at least one reservation fit between the first index to the
702 	 * left that can be used ("leftcap") and the first index to the right
703 	 * that cannot be used ("rightcap")?
704 	 *
705 	 * We must synchronize with the reserv object lock to protect the
706 	 * pindex/object of the resulting reservations against rename while
707 	 * we are inspecting.
708 	 */
709 	first = pindex - VM_RESERV_INDEX(object, pindex);
710 	minpages = VM_RESERV_INDEX(object, pindex) + npages;
711 	maxpages = roundup2(minpages, VM_LEVEL_0_NPAGES);
712 	allocpages = maxpages;
713 	vm_reserv_object_lock(object);
714 	if (mpred != NULL) {
715 		if ((rv = vm_reserv_from_page(mpred))->object != object)
716 			leftcap = mpred->pindex + 1;
717 		else
718 			leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
719 		if (leftcap > first) {
720 			vm_reserv_object_unlock(object);
721 			return (NULL);
722 		}
723 	}
724 	if (msucc != NULL) {
725 		if ((rv = vm_reserv_from_page(msucc))->object != object)
726 			rightcap = msucc->pindex;
727 		else
728 			rightcap = rv->pindex;
729 		if (first + maxpages > rightcap) {
730 			if (maxpages == VM_LEVEL_0_NPAGES) {
731 				vm_reserv_object_unlock(object);
732 				return (NULL);
733 			}
734 
735 			/*
736 			 * At least one reservation will fit between "leftcap"
737 			 * and "rightcap".  However, a reservation for the
738 			 * last of the requested pages will not fit.  Reduce
739 			 * the size of the upcoming allocation accordingly.
740 			 */
741 			allocpages = minpages;
742 		}
743 	}
744 	vm_reserv_object_unlock(object);
745 
746 	/*
747 	 * Would the last new reservation extend past the end of the object?
748 	 *
749 	 * If the object is unlikely to grow don't allocate a reservation for
750 	 * the tail.
751 	 */
752 	if ((object->flags & OBJ_ANON) == 0 &&
753 	    first + maxpages > object->size) {
754 		if (maxpages == VM_LEVEL_0_NPAGES)
755 			return (NULL);
756 		allocpages = minpages;
757 	}
758 
759 	/*
760 	 * Allocate the physical pages.  The alignment and boundary specified
761 	 * for this allocation may be different from the alignment and
762 	 * boundary specified for the requested pages.  For instance, the
763 	 * specified index may not be the first page within the first new
764 	 * reservation.
765 	 */
766 	m = NULL;
767 	vmd = VM_DOMAIN(domain);
768 	if (vm_domain_allocate(vmd, req, npages)) {
769 		vm_domain_free_lock(vmd);
770 		m = vm_phys_alloc_contig(domain, allocpages, low, high,
771 		    ulmax(alignment, VM_LEVEL_0_SIZE),
772 		    boundary > VM_LEVEL_0_SIZE ? boundary : 0);
773 		vm_domain_free_unlock(vmd);
774 		if (m == NULL) {
775 			vm_domain_freecnt_inc(vmd, npages);
776 			return (NULL);
777 		}
778 	} else
779 		return (NULL);
780 	KASSERT(vm_page_domain(m) == domain,
781 	    ("vm_reserv_alloc_contig: Page domain does not match requested."));
782 
783 	/*
784 	 * The allocated physical pages always begin at a reservation
785 	 * boundary, but they do not always end at a reservation boundary.
786 	 * Initialize every reservation that is completely covered by the
787 	 * allocated physical pages.
788 	 */
789 	m_ret = NULL;
790 	index = VM_RESERV_INDEX(object, pindex);
791 	do {
792 		rv = vm_reserv_from_page(m);
793 		KASSERT(rv->pages == m,
794 		    ("vm_reserv_alloc_contig: reserv %p's pages is corrupted",
795 		    rv));
796 		vm_reserv_lock(rv);
797 		vm_reserv_insert(rv, object, first);
798 		n = ulmin(VM_LEVEL_0_NPAGES - index, npages);
799 		for (i = 0; i < n; i++)
800 			vm_reserv_populate(rv, index + i);
801 		npages -= n;
802 		if (m_ret == NULL) {
803 			m_ret = &rv->pages[index];
804 			index = 0;
805 		}
806 		vm_reserv_unlock(rv);
807 		m += VM_LEVEL_0_NPAGES;
808 		first += VM_LEVEL_0_NPAGES;
809 		allocpages -= VM_LEVEL_0_NPAGES;
810 	} while (allocpages >= VM_LEVEL_0_NPAGES);
811 	return (m_ret);
812 }
813 
814 /*
815  * Allocate a physical page from an existing or newly created reservation.
816  *
817  * The page "mpred" must immediately precede the offset "pindex" within the
818  * specified object.
819  *
820  * The object must be locked.
821  */
822 vm_page_t
823 vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, int domain,
824     int req, vm_page_t mpred)
825 {
826 	struct vm_domain *vmd;
827 	vm_page_t m, msucc;
828 	vm_pindex_t first, leftcap, rightcap;
829 	vm_reserv_t rv;
830 	int index;
831 
832 	VM_OBJECT_ASSERT_WLOCKED(object);
833 
834 	/*
835 	 * Is a reservation fundamentally impossible?
836 	 */
837 	if (pindex < VM_RESERV_INDEX(object, pindex) ||
838 	    pindex >= object->size)
839 		return (NULL);
840 
841 	/*
842 	 * Look for an existing reservation.
843 	 */
844 	rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
845 	if (rv != NULL) {
846 		KASSERT(object != kernel_object || rv->domain == domain,
847 		    ("vm_reserv_alloc_page: domain mismatch"));
848 		domain = rv->domain;
849 		vmd = VM_DOMAIN(domain);
850 		index = VM_RESERV_INDEX(object, pindex);
851 		m = &rv->pages[index];
852 		vm_reserv_lock(rv);
853 		/* Handle reclaim race. */
854 		if (rv->object != object ||
855 		    /* Handle vm_page_rename(m, new_object, ...). */
856 		    popmap_is_set(rv->popmap, index)) {
857 			m = NULL;
858 			goto out;
859 		}
860 		if (vm_domain_allocate(vmd, req, 1) == 0)
861 			m = NULL;
862 		else
863 			vm_reserv_populate(rv, index);
864 out:
865 		vm_reserv_unlock(rv);
866 		return (m);
867 	}
868 
869 	/*
870 	 * Could a reservation fit between the first index to the left that
871 	 * can be used and the first index to the right that cannot be used?
872 	 *
873 	 * We must synchronize with the reserv object lock to protect the
874 	 * pindex/object of the resulting reservations against rename while
875 	 * we are inspecting.
876 	 */
877 	first = pindex - VM_RESERV_INDEX(object, pindex);
878 	vm_reserv_object_lock(object);
879 	if (mpred != NULL) {
880 		if ((rv = vm_reserv_from_page(mpred))->object != object)
881 			leftcap = mpred->pindex + 1;
882 		else
883 			leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
884 		if (leftcap > first) {
885 			vm_reserv_object_unlock(object);
886 			return (NULL);
887 		}
888 	}
889 	if (msucc != NULL) {
890 		if ((rv = vm_reserv_from_page(msucc))->object != object)
891 			rightcap = msucc->pindex;
892 		else
893 			rightcap = rv->pindex;
894 		if (first + VM_LEVEL_0_NPAGES > rightcap) {
895 			vm_reserv_object_unlock(object);
896 			return (NULL);
897 		}
898 	}
899 	vm_reserv_object_unlock(object);
900 
901 	/*
902 	 * Would the last new reservation extend past the end of the object?
903 	 *
904 	 * If the object is unlikely to grow don't allocate a reservation for
905 	 * the tail.
906 	 */
907 	if ((object->flags & OBJ_ANON) == 0 &&
908 	    first + VM_LEVEL_0_NPAGES > object->size)
909 		return (NULL);
910 
911 	/*
912 	 * Allocate and populate the new reservation.
913 	 */
914 	m = NULL;
915 	vmd = VM_DOMAIN(domain);
916 	if (vm_domain_allocate(vmd, req, 1)) {
917 		vm_domain_free_lock(vmd);
918 		m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT,
919 		    VM_LEVEL_0_ORDER);
920 		vm_domain_free_unlock(vmd);
921 		if (m == NULL) {
922 			vm_domain_freecnt_inc(vmd, 1);
923 			return (NULL);
924 		}
925 	} else
926 		return (NULL);
927 	rv = vm_reserv_from_page(m);
928 	vm_reserv_lock(rv);
929 	KASSERT(rv->pages == m,
930 	    ("vm_reserv_alloc_page: reserv %p's pages is corrupted", rv));
931 	vm_reserv_insert(rv, object, first);
932 	index = VM_RESERV_INDEX(object, pindex);
933 	vm_reserv_populate(rv, index);
934 	vm_reserv_unlock(rv);
935 
936 	return (&rv->pages[index]);
937 }
938 
939 /*
940  * Breaks the given reservation.  All free pages in the reservation
941  * are returned to the physical memory allocator.  The reservation's
942  * population count and map are reset to their initial state.
943  *
944  * The given reservation must not be in the partially populated reservation
945  * queue.
946  */
947 static void
948 vm_reserv_break(vm_reserv_t rv)
949 {
950 	u_long changes;
951 	int bitpos, hi, i, lo;
952 
953 	vm_reserv_assert_locked(rv);
954 	CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
955 	    __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
956 	vm_reserv_remove(rv);
957 	rv->pages->psind = 0;
958 	hi = lo = -1;
959 	for (i = 0; i <= NPOPMAP; i++) {
960 		/*
961 		 * "changes" is a bitmask that marks where a new sequence of
962 		 * 0s or 1s begins in popmap[i], with last bit in popmap[i-1]
963 		 * considered to be 1 if and only if lo == hi.  The bits of
964 		 * popmap[-1] and popmap[NPOPMAP] are considered all 1s.
965 		 */
966 		if (i == NPOPMAP)
967 			changes = lo != hi;
968 		else {
969 			changes = rv->popmap[i];
970 			changes ^= (changes << 1) | (lo == hi);
971 			rv->popmap[i] = 0;
972 		}
973 		while (changes != 0) {
974 			/*
975 			 * If the next change marked begins a run of 0s, set
976 			 * lo to mark that position.  Otherwise set hi and
977 			 * free pages from lo up to hi.
978 			 */
979 			bitpos = ffsl(changes) - 1;
980 			changes ^= 1UL << bitpos;
981 			if (lo == hi)
982 				lo = NBPOPMAP * i + bitpos;
983 			else {
984 				hi = NBPOPMAP * i + bitpos;
985 				vm_domain_free_lock(VM_DOMAIN(rv->domain));
986 				vm_phys_enqueue_contig(&rv->pages[lo], hi - lo);
987 				vm_domain_free_unlock(VM_DOMAIN(rv->domain));
988 				lo = hi;
989 			}
990 		}
991 	}
992 	rv->popcnt = 0;
993 	counter_u64_add(vm_reserv_broken, 1);
994 }
995 
996 /*
997  * Breaks all reservations belonging to the given object.
998  */
999 void
1000 vm_reserv_break_all(vm_object_t object)
1001 {
1002 	vm_reserv_t rv;
1003 
1004 	/*
1005 	 * This access of object->rvq is unsynchronized so that the
1006 	 * object rvq lock can nest after the domain_free lock.  We
1007 	 * must check for races in the results.  However, the object
1008 	 * lock prevents new additions, so we are guaranteed that when
1009 	 * it returns NULL the object is properly empty.
1010 	 */
1011 	while ((rv = LIST_FIRST(&object->rvq)) != NULL) {
1012 		vm_reserv_lock(rv);
1013 		/* Reclaim race. */
1014 		if (rv->object != object) {
1015 			vm_reserv_unlock(rv);
1016 			continue;
1017 		}
1018 		vm_reserv_domain_lock(rv->domain);
1019 		if (rv->inpartpopq) {
1020 			TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq);
1021 			rv->inpartpopq = FALSE;
1022 		}
1023 		vm_reserv_domain_unlock(rv->domain);
1024 		vm_reserv_break(rv);
1025 		vm_reserv_unlock(rv);
1026 	}
1027 }
1028 
1029 /*
1030  * Frees the given page if it belongs to a reservation.  Returns TRUE if the
1031  * page is freed and FALSE otherwise.
1032  */
1033 boolean_t
1034 vm_reserv_free_page(vm_page_t m)
1035 {
1036 	vm_reserv_t rv;
1037 	boolean_t ret;
1038 
1039 	rv = vm_reserv_from_page(m);
1040 	if (rv->object == NULL)
1041 		return (FALSE);
1042 	vm_reserv_lock(rv);
1043 	/* Re-validate after lock. */
1044 	if (rv->object != NULL) {
1045 		vm_reserv_depopulate(rv, m - rv->pages);
1046 		ret = TRUE;
1047 	} else
1048 		ret = FALSE;
1049 	vm_reserv_unlock(rv);
1050 
1051 	return (ret);
1052 }
1053 
1054 /*
1055  * Initializes the reservation management system.  Specifically, initializes
1056  * the reservation array.
1057  *
1058  * Requires that vm_page_array and first_page are initialized!
1059  */
1060 void
1061 vm_reserv_init(void)
1062 {
1063 	vm_paddr_t paddr;
1064 	struct vm_phys_seg *seg;
1065 	struct vm_reserv *rv;
1066 	struct vm_reserv_domain *rvd;
1067 #ifdef VM_PHYSSEG_SPARSE
1068 	vm_pindex_t used;
1069 #endif
1070 	int i, j, segind;
1071 
1072 	/*
1073 	 * Initialize the reservation array.  Specifically, initialize the
1074 	 * "pages" field for every element that has an underlying superpage.
1075 	 */
1076 #ifdef VM_PHYSSEG_SPARSE
1077 	used = 0;
1078 #endif
1079 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
1080 		seg = &vm_phys_segs[segind];
1081 #ifdef VM_PHYSSEG_SPARSE
1082 		seg->first_reserv = &vm_reserv_array[used];
1083 		used += howmany(seg->end, VM_LEVEL_0_SIZE) -
1084 		    seg->start / VM_LEVEL_0_SIZE;
1085 #else
1086 		seg->first_reserv =
1087 		    &vm_reserv_array[seg->start >> VM_LEVEL_0_SHIFT];
1088 #endif
1089 		paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
1090 		rv = seg->first_reserv + (paddr >> VM_LEVEL_0_SHIFT) -
1091 		    (seg->start >> VM_LEVEL_0_SHIFT);
1092 		while (paddr + VM_LEVEL_0_SIZE > paddr && paddr +
1093 		    VM_LEVEL_0_SIZE <= seg->end) {
1094 			rv->pages = PHYS_TO_VM_PAGE(paddr);
1095 			rv->domain = seg->domain;
1096 			mtx_init(&rv->lock, "vm reserv", NULL, MTX_DEF);
1097 			paddr += VM_LEVEL_0_SIZE;
1098 			rv++;
1099 		}
1100 	}
1101 	for (i = 0; i < MAXMEMDOM; i++) {
1102 		rvd = &vm_rvd[i];
1103 		mtx_init(&rvd->lock, "vm reserv domain", NULL, MTX_DEF);
1104 		TAILQ_INIT(&rvd->partpop);
1105 		mtx_init(&rvd->marker.lock, "vm reserv marker", NULL, MTX_DEF);
1106 
1107 		/*
1108 		 * Fully populated reservations should never be present in the
1109 		 * partially populated reservation queues.
1110 		 */
1111 		rvd->marker.popcnt = VM_LEVEL_0_NPAGES;
1112 		for (j = 0; j < VM_LEVEL_0_NPAGES; j++)
1113 			popmap_set(rvd->marker.popmap, j);
1114 	}
1115 
1116 	for (i = 0; i < VM_RESERV_OBJ_LOCK_COUNT; i++)
1117 		mtx_init(&vm_reserv_object_mtx[i], "resv obj lock", NULL,
1118 		    MTX_DEF);
1119 }
1120 
1121 /*
1122  * Returns true if the given page belongs to a reservation and that page is
1123  * free.  Otherwise, returns false.
1124  */
1125 bool
1126 vm_reserv_is_page_free(vm_page_t m)
1127 {
1128 	vm_reserv_t rv;
1129 
1130 	rv = vm_reserv_from_page(m);
1131 	if (rv->object == NULL)
1132 		return (false);
1133 	return (popmap_is_clear(rv->popmap, m - rv->pages));
1134 }
1135 
1136 /*
1137  * If the given page belongs to a reservation, returns the level of that
1138  * reservation.  Otherwise, returns -1.
1139  */
1140 int
1141 vm_reserv_level(vm_page_t m)
1142 {
1143 	vm_reserv_t rv;
1144 
1145 	rv = vm_reserv_from_page(m);
1146 	return (rv->object != NULL ? 0 : -1);
1147 }
1148 
1149 /*
1150  * Returns a reservation level if the given page belongs to a fully populated
1151  * reservation and -1 otherwise.
1152  */
1153 int
1154 vm_reserv_level_iffullpop(vm_page_t m)
1155 {
1156 	vm_reserv_t rv;
1157 
1158 	rv = vm_reserv_from_page(m);
1159 	return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1);
1160 }
1161 
1162 /*
1163  * Remove a partially populated reservation from the queue.
1164  */
1165 static void
1166 vm_reserv_dequeue(vm_reserv_t rv)
1167 {
1168 
1169 	vm_reserv_domain_assert_locked(rv->domain);
1170 	vm_reserv_assert_locked(rv);
1171 	CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
1172 	    __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
1173 	KASSERT(rv->inpartpopq,
1174 	    ("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv));
1175 
1176 	TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq);
1177 	rv->inpartpopq = FALSE;
1178 }
1179 
1180 /*
1181  * Breaks the given partially populated reservation, releasing its free pages
1182  * to the physical memory allocator.
1183  */
1184 static void
1185 vm_reserv_reclaim(vm_reserv_t rv)
1186 {
1187 
1188 	vm_reserv_assert_locked(rv);
1189 	CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
1190 	    __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
1191 	if (rv->inpartpopq) {
1192 		vm_reserv_domain_lock(rv->domain);
1193 		vm_reserv_dequeue(rv);
1194 		vm_reserv_domain_unlock(rv->domain);
1195 	}
1196 	vm_reserv_break(rv);
1197 	counter_u64_add(vm_reserv_reclaimed, 1);
1198 }
1199 
1200 /*
1201  * Breaks a reservation near the head of the partially populated reservation
1202  * queue, releasing its free pages to the physical memory allocator.  Returns
1203  * TRUE if a reservation is broken and FALSE otherwise.
1204  */
1205 bool
1206 vm_reserv_reclaim_inactive(int domain)
1207 {
1208 	vm_reserv_t rv;
1209 
1210 	vm_reserv_domain_lock(domain);
1211 	TAILQ_FOREACH(rv, &vm_rvd[domain].partpop, partpopq) {
1212 		/*
1213 		 * A locked reservation is likely being updated or reclaimed,
1214 		 * so just skip ahead.
1215 		 */
1216 		if (rv != &vm_rvd[domain].marker && vm_reserv_trylock(rv)) {
1217 			vm_reserv_dequeue(rv);
1218 			break;
1219 		}
1220 	}
1221 	vm_reserv_domain_unlock(domain);
1222 	if (rv != NULL) {
1223 		vm_reserv_reclaim(rv);
1224 		vm_reserv_unlock(rv);
1225 		return (true);
1226 	}
1227 	return (false);
1228 }
1229 
1230 /*
1231  * Determine whether this reservation has free pages that satisfy the given
1232  * request for contiguous physical memory.  Start searching from the lower
1233  * bound, defined by lo, and stop at the upper bound, hi.  Return the index
1234  * of the first satisfactory free page, or -1 if none is found.
1235  */
1236 static int
1237 vm_reserv_find_contig(vm_reserv_t rv, int npages, int lo,
1238     int hi, int ppn_align, int ppn_bound)
1239 {
1240 	u_long changes;
1241 	int bitpos, bits_left, i, n;
1242 
1243 	vm_reserv_assert_locked(rv);
1244 	KASSERT(npages <= VM_LEVEL_0_NPAGES - 1,
1245 	    ("%s: Too many pages", __func__));
1246 	KASSERT(ppn_bound <= VM_LEVEL_0_NPAGES,
1247 	    ("%s: Too big a boundary for reservation size", __func__));
1248 	KASSERT(npages <= ppn_bound,
1249 	    ("%s: Too many pages for given boundary", __func__));
1250 	KASSERT(ppn_align != 0 && powerof2(ppn_align),
1251 	    ("ppn_align is not a positive power of 2"));
1252 	KASSERT(ppn_bound != 0 && powerof2(ppn_bound),
1253 	    ("ppn_bound is not a positive power of 2"));
1254 	i = lo / NBPOPMAP;
1255 	changes = rv->popmap[i] | ((1UL << (lo % NBPOPMAP)) - 1);
1256 	n = hi / NBPOPMAP;
1257 	bits_left = hi % NBPOPMAP;
1258 	hi = lo = -1;
1259 	for (;;) {
1260 		/*
1261 		 * "changes" is a bitmask that marks where a new sequence of
1262 		 * 0s or 1s begins in popmap[i], with last bit in popmap[i-1]
1263 		 * considered to be 1 if and only if lo == hi.  The bits of
1264 		 * popmap[-1] and popmap[NPOPMAP] are considered all 1s.
1265 		 */
1266 		changes ^= (changes << 1) | (lo == hi);
1267 		while (changes != 0) {
1268 			/*
1269 			 * If the next change marked begins a run of 0s, set
1270 			 * lo to mark that position.  Otherwise set hi and
1271 			 * look for a satisfactory first page from lo up to hi.
1272 			 */
1273 			bitpos = ffsl(changes) - 1;
1274 			changes ^= 1UL << bitpos;
1275 			if (lo == hi) {
1276 				lo = NBPOPMAP * i + bitpos;
1277 				continue;
1278 			}
1279 			hi = NBPOPMAP * i + bitpos;
1280 			if (lo < roundup2(lo, ppn_align)) {
1281 				/* Skip to next aligned page. */
1282 				lo = roundup2(lo, ppn_align);
1283 				if (lo >= VM_LEVEL_0_NPAGES)
1284 					return (-1);
1285 			}
1286 			if (lo + npages > roundup2(lo, ppn_bound)) {
1287 				/* Skip to next boundary-matching page. */
1288 				lo = roundup2(lo, ppn_bound);
1289 				if (lo >= VM_LEVEL_0_NPAGES)
1290 					return (-1);
1291 			}
1292 			if (lo + npages <= hi)
1293 				return (lo);
1294 			lo = hi;
1295 		}
1296 		if (++i < n)
1297 			changes = rv->popmap[i];
1298 		else if (i == n)
1299 			changes = bits_left == 0 ? -1UL :
1300 			    (rv->popmap[n] | (-1UL << bits_left));
1301 		else
1302 			return (-1);
1303 	}
1304 }
1305 
1306 /*
1307  * Searches the partially populated reservation queue for the least recently
1308  * changed reservation with free pages that satisfy the given request for
1309  * contiguous physical memory.  If a satisfactory reservation is found, it is
1310  * broken.  Returns true if a reservation is broken and false otherwise.
1311  */
1312 vm_page_t
1313 vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low,
1314     vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1315 {
1316 	struct vm_reserv_queue *queue;
1317 	vm_paddr_t pa, size;
1318 	vm_page_t m_ret;
1319 	vm_reserv_t marker, rv, rvn;
1320 	int hi, lo, posn, ppn_align, ppn_bound;
1321 
1322 	KASSERT(npages > 0, ("npages is 0"));
1323 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1324 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1325 	if (npages > VM_LEVEL_0_NPAGES - 1)
1326 		return (false);
1327 	size = npages << PAGE_SHIFT;
1328 	/*
1329 	 * Ensure that a free range starting at a boundary-multiple
1330 	 * doesn't include a boundary-multiple within it.  Otherwise,
1331 	 * no boundary-constrained allocation is possible.
1332 	 */
1333 	if (!vm_addr_bound_ok(0, size, boundary))
1334 		return (NULL);
1335 	marker = &vm_rvd[domain].marker;
1336 	queue = &vm_rvd[domain].partpop;
1337 	/*
1338 	 * Compute shifted alignment, boundary values for page-based
1339 	 * calculations.  Constrain to range [1, VM_LEVEL_0_NPAGES] to
1340 	 * avoid overflow.
1341 	 */
1342 	ppn_align = (int)(ulmin(ulmax(PAGE_SIZE, alignment),
1343 	    VM_LEVEL_0_SIZE) >> PAGE_SHIFT);
1344 	ppn_bound = boundary == 0 ? VM_LEVEL_0_NPAGES :
1345 	    (int)(MIN(MAX(PAGE_SIZE, boundary),
1346             VM_LEVEL_0_SIZE) >> PAGE_SHIFT);
1347 
1348 	vm_reserv_domain_scan_lock(domain);
1349 	vm_reserv_domain_lock(domain);
1350 	TAILQ_FOREACH_SAFE(rv, queue, partpopq, rvn) {
1351 		pa = VM_PAGE_TO_PHYS(&rv->pages[0]);
1352 		if (pa + VM_LEVEL_0_SIZE - size < low) {
1353 			/* This entire reservation is too low; go to next. */
1354 			continue;
1355 		}
1356 		if (pa + size > high) {
1357 			/* This entire reservation is too high; go to next. */
1358 			continue;
1359 		}
1360 		if (!vm_addr_align_ok(pa, alignment)) {
1361 			/* This entire reservation is unaligned; go to next. */
1362 			continue;
1363 		}
1364 
1365 		if (vm_reserv_trylock(rv) == 0) {
1366 			TAILQ_INSERT_AFTER(queue, rv, marker, partpopq);
1367 			vm_reserv_domain_unlock(domain);
1368 			vm_reserv_lock(rv);
1369 			if (TAILQ_PREV(marker, vm_reserv_queue, partpopq) !=
1370 			    rv) {
1371 				vm_reserv_unlock(rv);
1372 				vm_reserv_domain_lock(domain);
1373 				rvn = TAILQ_NEXT(marker, partpopq);
1374 				TAILQ_REMOVE(queue, marker, partpopq);
1375 				continue;
1376 			}
1377 			vm_reserv_domain_lock(domain);
1378 			TAILQ_REMOVE(queue, marker, partpopq);
1379 		}
1380 		vm_reserv_domain_unlock(domain);
1381 		lo = (pa >= low) ? 0 :
1382 		    (int)((low + PAGE_MASK - pa) >> PAGE_SHIFT);
1383 		hi = (pa + VM_LEVEL_0_SIZE <= high) ? VM_LEVEL_0_NPAGES :
1384 		    (int)((high - pa) >> PAGE_SHIFT);
1385 		posn = vm_reserv_find_contig(rv, (int)npages, lo, hi,
1386 		    ppn_align, ppn_bound);
1387 		if (posn >= 0) {
1388 			vm_reserv_domain_scan_unlock(domain);
1389 			/* Allocate requested space */
1390 			rv->popcnt += npages;
1391 			while (npages-- > 0)
1392 				popmap_set(rv->popmap, posn + npages);
1393 			vm_reserv_reclaim(rv);
1394 			vm_reserv_unlock(rv);
1395 			m_ret = &rv->pages[posn];
1396 			pa = VM_PAGE_TO_PHYS(m_ret);
1397 			KASSERT(vm_addr_ok(pa, size, alignment, boundary),
1398 			    ("%s: adjusted address not aligned/bounded to "
1399 			     "%lx/%jx",
1400 			     __func__, alignment, (uintmax_t)boundary));
1401 			return (m_ret);
1402 		}
1403 		vm_reserv_domain_lock(domain);
1404 		rvn = TAILQ_NEXT(rv, partpopq);
1405 		vm_reserv_unlock(rv);
1406 	}
1407 	vm_reserv_domain_unlock(domain);
1408 	vm_reserv_domain_scan_unlock(domain);
1409 	return (NULL);
1410 }
1411 
1412 /*
1413  * Transfers the reservation underlying the given page to a new object.
1414  *
1415  * The object must be locked.
1416  */
1417 void
1418 vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object,
1419     vm_pindex_t old_object_offset)
1420 {
1421 	vm_reserv_t rv;
1422 
1423 	VM_OBJECT_ASSERT_WLOCKED(new_object);
1424 	rv = vm_reserv_from_page(m);
1425 	if (rv->object == old_object) {
1426 		vm_reserv_lock(rv);
1427 		CTR6(KTR_VM,
1428 		    "%s: rv %p object %p new %p popcnt %d inpartpop %d",
1429 		    __FUNCTION__, rv, rv->object, new_object, rv->popcnt,
1430 		    rv->inpartpopq);
1431 		if (rv->object == old_object) {
1432 			vm_reserv_object_lock(old_object);
1433 			rv->object = NULL;
1434 			LIST_REMOVE(rv, objq);
1435 			vm_reserv_object_unlock(old_object);
1436 			vm_reserv_object_lock(new_object);
1437 			rv->object = new_object;
1438 			rv->pindex -= old_object_offset;
1439 			LIST_INSERT_HEAD(&new_object->rvq, rv, objq);
1440 			vm_reserv_object_unlock(new_object);
1441 		}
1442 		vm_reserv_unlock(rv);
1443 	}
1444 }
1445 
1446 /*
1447  * Returns the size (in bytes) of a reservation of the specified level.
1448  */
1449 int
1450 vm_reserv_size(int level)
1451 {
1452 
1453 	switch (level) {
1454 	case 0:
1455 		return (VM_LEVEL_0_SIZE);
1456 	case -1:
1457 		return (PAGE_SIZE);
1458 	default:
1459 		return (0);
1460 	}
1461 }
1462 
1463 /*
1464  * Allocates the virtual and physical memory required by the reservation
1465  * management system's data structures, in particular, the reservation array.
1466  */
1467 vm_paddr_t
1468 vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end)
1469 {
1470 	vm_paddr_t new_end;
1471 	vm_pindex_t count;
1472 	size_t size;
1473 	int i;
1474 
1475 	count = 0;
1476 	for (i = 0; i < vm_phys_nsegs; i++) {
1477 #ifdef VM_PHYSSEG_SPARSE
1478 		count += howmany(vm_phys_segs[i].end, VM_LEVEL_0_SIZE) -
1479 		    vm_phys_segs[i].start / VM_LEVEL_0_SIZE;
1480 #else
1481 		count = MAX(count,
1482 		    howmany(vm_phys_segs[i].end, VM_LEVEL_0_SIZE));
1483 #endif
1484 	}
1485 
1486 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1487 #ifdef VM_PHYSSEG_SPARSE
1488 		count += howmany(phys_avail[i + 1], VM_LEVEL_0_SIZE) -
1489 		    phys_avail[i] / VM_LEVEL_0_SIZE;
1490 #else
1491 		count = MAX(count,
1492 		    howmany(phys_avail[i + 1], VM_LEVEL_0_SIZE));
1493 #endif
1494 	}
1495 
1496 	/*
1497 	 * Calculate the size (in bytes) of the reservation array.  Rounding up
1498 	 * for partial superpages at boundaries, as every small page is mapped
1499 	 * to an element in the reservation array based on its physical address.
1500 	 * Thus, the number of elements in the reservation array can be greater
1501 	 * than the number of superpages.
1502 	 */
1503 	size = count * sizeof(struct vm_reserv);
1504 
1505 	/*
1506 	 * Allocate and map the physical memory for the reservation array.  The
1507 	 * next available virtual address is returned by reference.
1508 	 */
1509 	new_end = end - round_page(size);
1510 	vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end,
1511 	    VM_PROT_READ | VM_PROT_WRITE);
1512 	bzero(vm_reserv_array, size);
1513 
1514 	/*
1515 	 * Return the next available physical address.
1516 	 */
1517 	return (new_end);
1518 }
1519 
1520 /*
1521  * Returns the superpage containing the given page.
1522  */
1523 vm_page_t
1524 vm_reserv_to_superpage(vm_page_t m)
1525 {
1526 	vm_reserv_t rv;
1527 
1528 	VM_OBJECT_ASSERT_LOCKED(m->object);
1529 	rv = vm_reserv_from_page(m);
1530 	if (rv->object == m->object && rv->popcnt == VM_LEVEL_0_NPAGES)
1531 		m = rv->pages;
1532 	else
1533 		m = NULL;
1534 
1535 	return (m);
1536 }
1537 
1538 #endif	/* VM_NRESERVLEVEL > 0 */
1539