xref: /freebsd/sys/vm/vm_reserv.c (revision 730cecb05aaf016ac52ef7cfc691ccec3a0408cd)
1 /*-
2  * Copyright (c) 2002-2006 Rice University
3  * Copyright (c) 2007-2008 Alan L. Cox <alc@cs.rice.edu>
4  * All rights reserved.
5  *
6  * This software was developed for the FreeBSD Project by Alan L. Cox,
7  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
22  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
28  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  *	Superpage reservation management module
34  *
35  * Any external functions defined by this module are only to be used by the
36  * virtual memory system.
37  */
38 
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41 
42 #include "opt_vm.h"
43 
44 #include <sys/param.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/malloc.h>
48 #include <sys/mutex.h>
49 #include <sys/queue.h>
50 #include <sys/rwlock.h>
51 #include <sys/sbuf.h>
52 #include <sys/sysctl.h>
53 #include <sys/systm.h>
54 
55 #include <vm/vm.h>
56 #include <vm/vm_param.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_phys.h>
60 #include <vm/vm_radix.h>
61 #include <vm/vm_reserv.h>
62 
63 /*
64  * The reservation system supports the speculative allocation of large physical
65  * pages ("superpages").  Speculative allocation enables the fully-automatic
66  * utilization of superpages by the virtual memory system.  In other words, no
67  * programmatic directives are required to use superpages.
68  */
69 
70 #if VM_NRESERVLEVEL > 0
71 
72 /*
73  * The number of small pages that are contained in a level 0 reservation
74  */
75 #define	VM_LEVEL_0_NPAGES	(1 << VM_LEVEL_0_ORDER)
76 
77 /*
78  * The number of bits by which a physical address is shifted to obtain the
79  * reservation number
80  */
81 #define	VM_LEVEL_0_SHIFT	(VM_LEVEL_0_ORDER + PAGE_SHIFT)
82 
83 /*
84  * The size of a level 0 reservation in bytes
85  */
86 #define	VM_LEVEL_0_SIZE		(1 << VM_LEVEL_0_SHIFT)
87 
88 /*
89  * Computes the index of the small page underlying the given (object, pindex)
90  * within the reservation's array of small pages.
91  */
92 #define	VM_RESERV_INDEX(object, pindex)	\
93     (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1))
94 
95 /*
96  * The reservation structure
97  *
98  * A reservation structure is constructed whenever a large physical page is
99  * speculatively allocated to an object.  The reservation provides the small
100  * physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets
101  * within that object.  The reservation's "popcnt" tracks the number of these
102  * small physical pages that are in use at any given time.  When and if the
103  * reservation is not fully utilized, it appears in the queue of partially-
104  * populated reservations.  The reservation always appears on the containing
105  * object's list of reservations.
106  *
107  * A partially-populated reservation can be broken and reclaimed at any time.
108  */
109 struct vm_reserv {
110 	TAILQ_ENTRY(vm_reserv) partpopq;
111 	LIST_ENTRY(vm_reserv) objq;
112 	vm_object_t	object;			/* containing object */
113 	vm_pindex_t	pindex;			/* offset within object */
114 	vm_page_t	pages;			/* first page of a superpage */
115 	int		popcnt;			/* # of pages in use */
116 	char		inpartpopq;
117 };
118 
119 /*
120  * The reservation array
121  *
122  * This array is analoguous in function to vm_page_array.  It differs in the
123  * respect that it may contain a greater number of useful reservation
124  * structures than there are (physical) superpages.  These "invalid"
125  * reservation structures exist to trade-off space for time in the
126  * implementation of vm_reserv_from_page().  Invalid reservation structures are
127  * distinguishable from "valid" reservation structures by inspecting the
128  * reservation's "pages" field.  Invalid reservation structures have a NULL
129  * "pages" field.
130  *
131  * vm_reserv_from_page() maps a small (physical) page to an element of this
132  * array by computing a physical reservation number from the page's physical
133  * address.  The physical reservation number is used as the array index.
134  *
135  * An "active" reservation is a valid reservation structure that has a non-NULL
136  * "object" field and a non-zero "popcnt" field.  In other words, every active
137  * reservation belongs to a particular object.  Moreover, every active
138  * reservation has an entry in the containing object's list of reservations.
139  */
140 static vm_reserv_t vm_reserv_array;
141 
142 /*
143  * The partially-populated reservation queue
144  *
145  * This queue enables the fast recovery of an unused cached or free small page
146  * from a partially-populated reservation.  The reservation at the head of
147  * this queue is the least-recently-changed, partially-populated reservation.
148  *
149  * Access to this queue is synchronized by the free page queue lock.
150  */
151 static TAILQ_HEAD(, vm_reserv) vm_rvq_partpop =
152 			    TAILQ_HEAD_INITIALIZER(vm_rvq_partpop);
153 
154 static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD, 0, "Reservation Info");
155 
156 static long vm_reserv_broken;
157 SYSCTL_LONG(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD,
158     &vm_reserv_broken, 0, "Cumulative number of broken reservations");
159 
160 static long vm_reserv_freed;
161 SYSCTL_LONG(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD,
162     &vm_reserv_freed, 0, "Cumulative number of freed reservations");
163 
164 static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS);
165 
166 SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
167     sysctl_vm_reserv_partpopq, "A", "Partially-populated reservation queues");
168 
169 static long vm_reserv_reclaimed;
170 SYSCTL_LONG(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD,
171     &vm_reserv_reclaimed, 0, "Cumulative number of reclaimed reservations");
172 
173 static void		vm_reserv_depopulate(vm_reserv_t rv);
174 static vm_reserv_t	vm_reserv_from_page(vm_page_t m);
175 static boolean_t	vm_reserv_has_pindex(vm_reserv_t rv,
176 			    vm_pindex_t pindex);
177 static void		vm_reserv_populate(vm_reserv_t rv);
178 static void		vm_reserv_reclaim(vm_reserv_t rv);
179 
180 /*
181  * Describes the current state of the partially-populated reservation queue.
182  */
183 static int
184 sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
185 {
186 	struct sbuf sbuf;
187 	vm_reserv_t rv;
188 	int counter, error, level, unused_pages;
189 
190 	error = sysctl_wire_old_buffer(req, 0);
191 	if (error != 0)
192 		return (error);
193 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
194 	sbuf_printf(&sbuf, "\nLEVEL     SIZE  NUMBER\n\n");
195 	for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) {
196 		counter = 0;
197 		unused_pages = 0;
198 		mtx_lock(&vm_page_queue_free_mtx);
199 		TAILQ_FOREACH(rv, &vm_rvq_partpop/*[level]*/, partpopq) {
200 			counter++;
201 			unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt;
202 		}
203 		mtx_unlock(&vm_page_queue_free_mtx);
204 		sbuf_printf(&sbuf, "%5d: %6dK, %6d\n", level,
205 		    unused_pages * ((int)PAGE_SIZE / 1024), counter);
206 	}
207 	error = sbuf_finish(&sbuf);
208 	sbuf_delete(&sbuf);
209 	return (error);
210 }
211 
212 /*
213  * Reduces the given reservation's population count.  If the population count
214  * becomes zero, the reservation is destroyed.  Additionally, moves the
215  * reservation to the tail of the partially-populated reservations queue if the
216  * population count is non-zero.
217  *
218  * The free page queue lock must be held.
219  */
220 static void
221 vm_reserv_depopulate(vm_reserv_t rv)
222 {
223 
224 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
225 	KASSERT(rv->object != NULL,
226 	    ("vm_reserv_depopulate: reserv %p is free", rv));
227 	KASSERT(rv->popcnt > 0,
228 	    ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv));
229 	if (rv->inpartpopq) {
230 		TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
231 		rv->inpartpopq = FALSE;
232 	}
233 	rv->popcnt--;
234 	if (rv->popcnt == 0) {
235 		LIST_REMOVE(rv, objq);
236 		rv->object = NULL;
237 		vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER);
238 		vm_reserv_freed++;
239 	} else {
240 		rv->inpartpopq = TRUE;
241 		TAILQ_INSERT_TAIL(&vm_rvq_partpop, rv, partpopq);
242 	}
243 }
244 
245 /*
246  * Returns the reservation to which the given page might belong.
247  */
248 static __inline vm_reserv_t
249 vm_reserv_from_page(vm_page_t m)
250 {
251 
252 	return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]);
253 }
254 
255 /*
256  * Returns TRUE if the given reservation contains the given page index and
257  * FALSE otherwise.
258  */
259 static __inline boolean_t
260 vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex)
261 {
262 
263 	return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0);
264 }
265 
266 /*
267  * Increases the given reservation's population count.  Moves the reservation
268  * to the tail of the partially-populated reservation queue.
269  *
270  * The free page queue must be locked.
271  */
272 static void
273 vm_reserv_populate(vm_reserv_t rv)
274 {
275 
276 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
277 	KASSERT(rv->object != NULL,
278 	    ("vm_reserv_populate: reserv %p is free", rv));
279 	KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES,
280 	    ("vm_reserv_populate: reserv %p is already full", rv));
281 	if (rv->inpartpopq) {
282 		TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
283 		rv->inpartpopq = FALSE;
284 	}
285 	rv->popcnt++;
286 	if (rv->popcnt < VM_LEVEL_0_NPAGES) {
287 		rv->inpartpopq = TRUE;
288 		TAILQ_INSERT_TAIL(&vm_rvq_partpop, rv, partpopq);
289 	}
290 }
291 
292 /*
293  * Allocates a contiguous set of physical pages of the given size "npages"
294  * from an existing or newly-created reservation.  All of the physical pages
295  * must be at or above the given physical address "low" and below the given
296  * physical address "high".  The given value "alignment" determines the
297  * alignment of the first physical page in the set.  If the given value
298  * "boundary" is non-zero, then the set of physical pages cannot cross any
299  * physical address boundary that is a multiple of that value.  Both
300  * "alignment" and "boundary" must be a power of two.
301  *
302  * The object and free page queue must be locked.
303  */
304 vm_page_t
305 vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, u_long npages,
306     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
307 {
308 	vm_paddr_t pa, size;
309 	vm_page_t m, m_ret, mpred, msucc;
310 	vm_pindex_t first, leftcap, rightcap;
311 	vm_reserv_t rv;
312 	u_long allocpages, maxpages, minpages;
313 	int i, index, n;
314 
315 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
316 	VM_OBJECT_ASSERT_WLOCKED(object);
317 	KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
318 
319 	/*
320 	 * Is a reservation fundamentally impossible?
321 	 */
322 	if (pindex < VM_RESERV_INDEX(object, pindex) ||
323 	    pindex + npages > object->size)
324 		return (NULL);
325 
326 	/*
327 	 * All reservations of a particular size have the same alignment.
328 	 * Assuming that the first page is allocated from a reservation, the
329 	 * least significant bits of its physical address can be determined
330 	 * from its offset from the beginning of the reservation and the size
331 	 * of the reservation.
332 	 *
333 	 * Could the specified index within a reservation of the smallest
334 	 * possible size satisfy the alignment and boundary requirements?
335 	 */
336 	pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT;
337 	if ((pa & (alignment - 1)) != 0)
338 		return (NULL);
339 	size = npages << PAGE_SHIFT;
340 	if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
341 		return (NULL);
342 
343 	/*
344 	 * Look for an existing reservation.
345 	 */
346 	mpred = vm_radix_lookup_le(&object->rtree, pindex);
347 	if (mpred != NULL) {
348 		KASSERT(mpred->pindex < pindex,
349 		    ("vm_reserv_alloc_contig: pindex already allocated"));
350 		rv = vm_reserv_from_page(mpred);
351 		if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
352 			goto found;
353 		msucc = TAILQ_NEXT(mpred, listq);
354 	} else
355 		msucc = TAILQ_FIRST(&object->memq);
356 	if (msucc != NULL) {
357 		KASSERT(msucc->pindex > pindex,
358 		    ("vm_reserv_alloc_page: pindex already allocated"));
359 		rv = vm_reserv_from_page(msucc);
360 		if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
361 			goto found;
362 	}
363 
364 	/*
365 	 * Could at least one reservation fit between the first index to the
366 	 * left that can be used and the first index to the right that cannot
367 	 * be used?
368 	 */
369 	first = pindex - VM_RESERV_INDEX(object, pindex);
370 	if (mpred != NULL) {
371 		if ((rv = vm_reserv_from_page(mpred))->object != object)
372 			leftcap = mpred->pindex + 1;
373 		else
374 			leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
375 		if (leftcap > first)
376 			return (NULL);
377 	}
378 	minpages = VM_RESERV_INDEX(object, pindex) + npages;
379 	maxpages = roundup2(minpages, VM_LEVEL_0_NPAGES);
380 	allocpages = maxpages;
381 	if (msucc != NULL) {
382 		if ((rv = vm_reserv_from_page(msucc))->object != object)
383 			rightcap = msucc->pindex;
384 		else
385 			rightcap = rv->pindex;
386 		if (first + maxpages > rightcap) {
387 			if (maxpages == VM_LEVEL_0_NPAGES)
388 				return (NULL);
389 			allocpages = minpages;
390 		}
391 	}
392 
393 	/*
394 	 * Would the last new reservation extend past the end of the object?
395 	 */
396 	if (first + maxpages > object->size) {
397 		/*
398 		 * Don't allocate the last new reservation if the object is a
399 		 * vnode or backed by another object that is a vnode.
400 		 */
401 		if (object->type == OBJT_VNODE ||
402 		    (object->backing_object != NULL &&
403 		    object->backing_object->type == OBJT_VNODE)) {
404 			if (maxpages == VM_LEVEL_0_NPAGES)
405 				return (NULL);
406 			allocpages = minpages;
407 		}
408 		/* Speculate that the object may grow. */
409 	}
410 
411 	/*
412 	 * Allocate and populate the new reservations.  The alignment and
413 	 * boundary specified for this allocation may be different from the
414 	 * alignment and boundary specified for the requested pages.  For
415 	 * instance, the specified index may not be the first page within the
416 	 * first new reservation.
417 	 */
418 	m = vm_phys_alloc_contig(allocpages, low, high, ulmax(alignment,
419 	    VM_LEVEL_0_SIZE), boundary > VM_LEVEL_0_SIZE ? boundary : 0);
420 	if (m == NULL)
421 		return (NULL);
422 	m_ret = NULL;
423 	index = VM_RESERV_INDEX(object, pindex);
424 	do {
425 		rv = vm_reserv_from_page(m);
426 		KASSERT(rv->pages == m,
427 		    ("vm_reserv_alloc_contig: reserv %p's pages is corrupted",
428 		    rv));
429 		KASSERT(rv->object == NULL,
430 		    ("vm_reserv_alloc_contig: reserv %p isn't free", rv));
431 		LIST_INSERT_HEAD(&object->rvq, rv, objq);
432 		rv->object = object;
433 		rv->pindex = first;
434 		KASSERT(rv->popcnt == 0,
435 		    ("vm_reserv_alloc_contig: reserv %p's popcnt is corrupted",
436 		    rv));
437 		KASSERT(!rv->inpartpopq,
438 		    ("vm_reserv_alloc_contig: reserv %p's inpartpopq is TRUE",
439 		    rv));
440 		n = ulmin(VM_LEVEL_0_NPAGES - index, npages);
441 		for (i = 0; i < n; i++)
442 			vm_reserv_populate(rv);
443 		npages -= n;
444 		if (m_ret == NULL) {
445 			m_ret = &rv->pages[index];
446 			index = 0;
447 		}
448 		m += VM_LEVEL_0_NPAGES;
449 		first += VM_LEVEL_0_NPAGES;
450 		allocpages -= VM_LEVEL_0_NPAGES;
451 	} while (allocpages > 0);
452 	return (m_ret);
453 
454 	/*
455 	 * Found a matching reservation.
456 	 */
457 found:
458 	index = VM_RESERV_INDEX(object, pindex);
459 	/* Does the allocation fit within the reservation? */
460 	if (index + npages > VM_LEVEL_0_NPAGES)
461 		return (NULL);
462 	m = &rv->pages[index];
463 	pa = VM_PAGE_TO_PHYS(m);
464 	if (pa < low || pa + size > high || (pa & (alignment - 1)) != 0 ||
465 	    ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
466 		return (NULL);
467 	/* Handle vm_page_rename(m, new_object, ...). */
468 	for (i = 0; i < npages; i++)
469 		if ((rv->pages[index + i].flags & (PG_CACHED | PG_FREE)) == 0)
470 			return (NULL);
471 	for (i = 0; i < npages; i++)
472 		vm_reserv_populate(rv);
473 	return (m);
474 }
475 
476 /*
477  * Allocates a page from an existing or newly-created reservation.
478  *
479  * The object and free page queue must be locked.
480  */
481 vm_page_t
482 vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex)
483 {
484 	vm_page_t m, mpred, msucc;
485 	vm_pindex_t first, leftcap, rightcap;
486 	vm_reserv_t rv;
487 
488 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
489 	VM_OBJECT_ASSERT_WLOCKED(object);
490 
491 	/*
492 	 * Is a reservation fundamentally impossible?
493 	 */
494 	if (pindex < VM_RESERV_INDEX(object, pindex) ||
495 	    pindex >= object->size)
496 		return (NULL);
497 
498 	/*
499 	 * Look for an existing reservation.
500 	 */
501 	mpred = vm_radix_lookup_le(&object->rtree, pindex);
502 	if (mpred != NULL) {
503 		KASSERT(mpred->pindex < pindex,
504 		    ("vm_reserv_alloc_page: pindex already allocated"));
505 		rv = vm_reserv_from_page(mpred);
506 		if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
507 			goto found;
508 		msucc = TAILQ_NEXT(mpred, listq);
509 	} else
510 		msucc = TAILQ_FIRST(&object->memq);
511 	if (msucc != NULL) {
512 		KASSERT(msucc->pindex > pindex,
513 		    ("vm_reserv_alloc_page: pindex already allocated"));
514 		rv = vm_reserv_from_page(msucc);
515 		if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
516 			goto found;
517 	}
518 
519 	/*
520 	 * Could a reservation fit between the first index to the left that
521 	 * can be used and the first index to the right that cannot be used?
522 	 */
523 	first = pindex - VM_RESERV_INDEX(object, pindex);
524 	if (mpred != NULL) {
525 		if ((rv = vm_reserv_from_page(mpred))->object != object)
526 			leftcap = mpred->pindex + 1;
527 		else
528 			leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
529 		if (leftcap > first)
530 			return (NULL);
531 	}
532 	if (msucc != NULL) {
533 		if ((rv = vm_reserv_from_page(msucc))->object != object)
534 			rightcap = msucc->pindex;
535 		else
536 			rightcap = rv->pindex;
537 		if (first + VM_LEVEL_0_NPAGES > rightcap)
538 			return (NULL);
539 	}
540 
541 	/*
542 	 * Would a new reservation extend past the end of the object?
543 	 */
544 	if (first + VM_LEVEL_0_NPAGES > object->size) {
545 		/*
546 		 * Don't allocate a new reservation if the object is a vnode or
547 		 * backed by another object that is a vnode.
548 		 */
549 		if (object->type == OBJT_VNODE ||
550 		    (object->backing_object != NULL &&
551 		    object->backing_object->type == OBJT_VNODE))
552 			return (NULL);
553 		/* Speculate that the object may grow. */
554 	}
555 
556 	/*
557 	 * Allocate and populate the new reservation.
558 	 */
559 	m = vm_phys_alloc_pages(VM_FREEPOOL_DEFAULT, VM_LEVEL_0_ORDER);
560 	if (m == NULL)
561 		return (NULL);
562 	rv = vm_reserv_from_page(m);
563 	KASSERT(rv->pages == m,
564 	    ("vm_reserv_alloc_page: reserv %p's pages is corrupted", rv));
565 	KASSERT(rv->object == NULL,
566 	    ("vm_reserv_alloc_page: reserv %p isn't free", rv));
567 	LIST_INSERT_HEAD(&object->rvq, rv, objq);
568 	rv->object = object;
569 	rv->pindex = first;
570 	KASSERT(rv->popcnt == 0,
571 	    ("vm_reserv_alloc_page: reserv %p's popcnt is corrupted", rv));
572 	KASSERT(!rv->inpartpopq,
573 	    ("vm_reserv_alloc_page: reserv %p's inpartpopq is TRUE", rv));
574 	vm_reserv_populate(rv);
575 	return (&rv->pages[VM_RESERV_INDEX(object, pindex)]);
576 
577 	/*
578 	 * Found a matching reservation.
579 	 */
580 found:
581 	m = &rv->pages[VM_RESERV_INDEX(object, pindex)];
582 	/* Handle vm_page_rename(m, new_object, ...). */
583 	if ((m->flags & (PG_CACHED | PG_FREE)) == 0)
584 		return (NULL);
585 	vm_reserv_populate(rv);
586 	return (m);
587 }
588 
589 /*
590  * Breaks all reservations belonging to the given object.
591  */
592 void
593 vm_reserv_break_all(vm_object_t object)
594 {
595 	vm_reserv_t rv;
596 	int i;
597 
598 	mtx_lock(&vm_page_queue_free_mtx);
599 	while ((rv = LIST_FIRST(&object->rvq)) != NULL) {
600 		KASSERT(rv->object == object,
601 		    ("vm_reserv_break_all: reserv %p is corrupted", rv));
602 		if (rv->inpartpopq) {
603 			TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
604 			rv->inpartpopq = FALSE;
605 		}
606 		LIST_REMOVE(rv, objq);
607 		rv->object = NULL;
608 		for (i = 0; i < VM_LEVEL_0_NPAGES; i++) {
609 			if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0)
610 				vm_phys_free_pages(&rv->pages[i], 0);
611 			else
612 				rv->popcnt--;
613 		}
614 		KASSERT(rv->popcnt == 0,
615 		    ("vm_reserv_break_all: reserv %p's popcnt is corrupted",
616 		    rv));
617 		vm_reserv_broken++;
618 	}
619 	mtx_unlock(&vm_page_queue_free_mtx);
620 }
621 
622 /*
623  * Frees the given page if it belongs to a reservation.  Returns TRUE if the
624  * page is freed and FALSE otherwise.
625  *
626  * The free page queue lock must be held.
627  */
628 boolean_t
629 vm_reserv_free_page(vm_page_t m)
630 {
631 	vm_reserv_t rv;
632 
633 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
634 	rv = vm_reserv_from_page(m);
635 	if (rv->object == NULL)
636 		return (FALSE);
637 	if ((m->flags & PG_CACHED) != 0 && m->pool != VM_FREEPOOL_CACHE)
638 		vm_phys_set_pool(VM_FREEPOOL_CACHE, rv->pages,
639 		    VM_LEVEL_0_ORDER);
640 	vm_reserv_depopulate(rv);
641 	return (TRUE);
642 }
643 
644 /*
645  * Initializes the reservation management system.  Specifically, initializes
646  * the reservation array.
647  *
648  * Requires that vm_page_array and first_page are initialized!
649  */
650 void
651 vm_reserv_init(void)
652 {
653 	vm_paddr_t paddr;
654 	int i;
655 
656 	/*
657 	 * Initialize the reservation array.  Specifically, initialize the
658 	 * "pages" field for every element that has an underlying superpage.
659 	 */
660 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
661 		paddr = roundup2(phys_avail[i], VM_LEVEL_0_SIZE);
662 		while (paddr + VM_LEVEL_0_SIZE <= phys_avail[i + 1]) {
663 			vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT].pages =
664 			    PHYS_TO_VM_PAGE(paddr);
665 			paddr += VM_LEVEL_0_SIZE;
666 		}
667 	}
668 }
669 
670 /*
671  * Returns a reservation level if the given page belongs to a fully-populated
672  * reservation and -1 otherwise.
673  */
674 int
675 vm_reserv_level_iffullpop(vm_page_t m)
676 {
677 	vm_reserv_t rv;
678 
679 	rv = vm_reserv_from_page(m);
680 	return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1);
681 }
682 
683 /*
684  * Prepare for the reactivation of a cached page.
685  *
686  * First, suppose that the given page "m" was allocated individually, i.e., not
687  * as part of a reservation, and cached.  Then, suppose a reservation
688  * containing "m" is allocated by the same object.  Although "m" and the
689  * reservation belong to the same object, "m"'s pindex may not match the
690  * reservation's.
691  *
692  * The free page queue must be locked.
693  */
694 boolean_t
695 vm_reserv_reactivate_page(vm_page_t m)
696 {
697 	vm_reserv_t rv;
698 	int i, m_index;
699 
700 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
701 	rv = vm_reserv_from_page(m);
702 	if (rv->object == NULL)
703 		return (FALSE);
704 	KASSERT((m->flags & PG_CACHED) != 0,
705 	    ("vm_reserv_uncache_page: page %p is not cached", m));
706 	if (m->object == rv->object &&
707 	    m->pindex - rv->pindex == VM_RESERV_INDEX(m->object, m->pindex))
708 		vm_reserv_populate(rv);
709 	else {
710 		KASSERT(rv->inpartpopq,
711 		    ("vm_reserv_uncache_page: reserv %p's inpartpopq is FALSE",
712 		    rv));
713 		TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
714 		rv->inpartpopq = FALSE;
715 		LIST_REMOVE(rv, objq);
716 		rv->object = NULL;
717 		/* Don't vm_phys_free_pages(m, 0). */
718 		m_index = m - rv->pages;
719 		for (i = 0; i < m_index; i++) {
720 			if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0)
721 				vm_phys_free_pages(&rv->pages[i], 0);
722 			else
723 				rv->popcnt--;
724 		}
725 		for (i++; i < VM_LEVEL_0_NPAGES; i++) {
726 			if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0)
727 				vm_phys_free_pages(&rv->pages[i], 0);
728 			else
729 				rv->popcnt--;
730 		}
731 		KASSERT(rv->popcnt == 0,
732 		    ("vm_reserv_uncache_page: reserv %p's popcnt is corrupted",
733 		    rv));
734 		vm_reserv_broken++;
735 	}
736 	return (TRUE);
737 }
738 
739 /*
740  * Breaks the given partially-populated reservation, releasing its cached and
741  * free pages to the physical memory allocator.
742  *
743  * The free page queue lock must be held.
744  */
745 static void
746 vm_reserv_reclaim(vm_reserv_t rv)
747 {
748 	int i;
749 
750 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
751 	KASSERT(rv->inpartpopq,
752 	    ("vm_reserv_reclaim: reserv %p's inpartpopq is corrupted", rv));
753 	TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
754 	rv->inpartpopq = FALSE;
755 	KASSERT(rv->object != NULL,
756 	    ("vm_reserv_reclaim: reserv %p is free", rv));
757 	LIST_REMOVE(rv, objq);
758 	rv->object = NULL;
759 	for (i = 0; i < VM_LEVEL_0_NPAGES; i++) {
760 		if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0)
761 			vm_phys_free_pages(&rv->pages[i], 0);
762 		else
763 			rv->popcnt--;
764 	}
765 	KASSERT(rv->popcnt == 0,
766 	    ("vm_reserv_reclaim: reserv %p's popcnt is corrupted", rv));
767 	vm_reserv_reclaimed++;
768 }
769 
770 /*
771  * Breaks the reservation at the head of the partially-populated reservation
772  * queue, releasing its cached and free pages to the physical memory
773  * allocator.  Returns TRUE if a reservation is broken and FALSE otherwise.
774  *
775  * The free page queue lock must be held.
776  */
777 boolean_t
778 vm_reserv_reclaim_inactive(void)
779 {
780 	vm_reserv_t rv;
781 
782 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
783 	if ((rv = TAILQ_FIRST(&vm_rvq_partpop)) != NULL) {
784 		vm_reserv_reclaim(rv);
785 		return (TRUE);
786 	}
787 	return (FALSE);
788 }
789 
790 /*
791  * Searches the partially-populated reservation queue for the least recently
792  * active reservation with unused pages, i.e., cached or free, that satisfy the
793  * given request for contiguous physical memory.  If a satisfactory reservation
794  * is found, it is broken.  Returns TRUE if a reservation is broken and FALSE
795  * otherwise.
796  *
797  * The free page queue lock must be held.
798  */
799 boolean_t
800 vm_reserv_reclaim_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
801     u_long alignment, vm_paddr_t boundary)
802 {
803 	vm_paddr_t pa, pa_length, size;
804 	vm_reserv_t rv;
805 	int i;
806 
807 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
808 	if (npages > VM_LEVEL_0_NPAGES - 1)
809 		return (FALSE);
810 	size = npages << PAGE_SHIFT;
811 	TAILQ_FOREACH(rv, &vm_rvq_partpop, partpopq) {
812 		pa = VM_PAGE_TO_PHYS(&rv->pages[VM_LEVEL_0_NPAGES - 1]);
813 		if (pa + PAGE_SIZE - size < low) {
814 			/* this entire reservation is too low; go to next */
815 			continue;
816 		}
817 		pa_length = 0;
818 		for (i = 0; i < VM_LEVEL_0_NPAGES; i++)
819 			if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0) {
820 				pa_length += PAGE_SIZE;
821 				if (pa_length == PAGE_SIZE) {
822 					pa = VM_PAGE_TO_PHYS(&rv->pages[i]);
823 					if (pa + size > high) {
824 						/* skip to next reservation */
825 						break;
826 					} else if (pa < low ||
827 					    (pa & (alignment - 1)) != 0 ||
828 					    ((pa ^ (pa + size - 1)) &
829 					    ~(boundary - 1)) != 0)
830 						pa_length = 0;
831 				}
832 				if (pa_length >= size) {
833 					vm_reserv_reclaim(rv);
834 					return (TRUE);
835 				}
836 			} else
837 				pa_length = 0;
838 	}
839 	return (FALSE);
840 }
841 
842 /*
843  * Transfers the reservation underlying the given page to a new object.
844  *
845  * The object must be locked.
846  */
847 void
848 vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object,
849     vm_pindex_t old_object_offset)
850 {
851 	vm_reserv_t rv;
852 
853 	VM_OBJECT_ASSERT_WLOCKED(new_object);
854 	rv = vm_reserv_from_page(m);
855 	if (rv->object == old_object) {
856 		mtx_lock(&vm_page_queue_free_mtx);
857 		if (rv->object == old_object) {
858 			LIST_REMOVE(rv, objq);
859 			LIST_INSERT_HEAD(&new_object->rvq, rv, objq);
860 			rv->object = new_object;
861 			rv->pindex -= old_object_offset;
862 		}
863 		mtx_unlock(&vm_page_queue_free_mtx);
864 	}
865 }
866 
867 /*
868  * Allocates the virtual and physical memory required by the reservation
869  * management system's data structures, in particular, the reservation array.
870  */
871 vm_paddr_t
872 vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t high_water)
873 {
874 	vm_paddr_t new_end;
875 	size_t size;
876 
877 	/*
878 	 * Calculate the size (in bytes) of the reservation array.  Round up
879 	 * from "high_water" because every small page is mapped to an element
880 	 * in the reservation array based on its physical address.  Thus, the
881 	 * number of elements in the reservation array can be greater than the
882 	 * number of superpages.
883 	 */
884 	size = howmany(high_water, VM_LEVEL_0_SIZE) * sizeof(struct vm_reserv);
885 
886 	/*
887 	 * Allocate and map the physical memory for the reservation array.  The
888 	 * next available virtual address is returned by reference.
889 	 */
890 	new_end = end - round_page(size);
891 	vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end,
892 	    VM_PROT_READ | VM_PROT_WRITE);
893 	bzero(vm_reserv_array, size);
894 
895 	/*
896 	 * Return the next available physical address.
897 	 */
898 	return (new_end);
899 }
900 
901 #endif	/* VM_NRESERVLEVEL > 0 */
902