1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2002-2006 Rice University
5 * Copyright (c) 2007-2011 Alan L. Cox <alc@cs.rice.edu>
6 * All rights reserved.
7 *
8 * This software was developed for the FreeBSD Project by Alan L. Cox,
9 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
30 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Superpage reservation management module
36 *
37 * Any external functions defined by this module are only to be used by the
38 * virtual memory system.
39 */
40
41 #include <sys/cdefs.h>
42 #include "opt_vm.h"
43
44 #include <sys/param.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/malloc.h>
48 #include <sys/mutex.h>
49 #include <sys/queue.h>
50 #include <sys/rwlock.h>
51 #include <sys/sbuf.h>
52 #include <sys/sysctl.h>
53 #include <sys/systm.h>
54 #include <sys/bitstring.h>
55 #include <sys/counter.h>
56 #include <sys/ktr.h>
57 #include <sys/vmmeter.h>
58 #include <sys/smp.h>
59
60 #include <vm/vm.h>
61 #include <vm/vm_extern.h>
62 #include <vm/vm_param.h>
63 #include <vm/vm_object.h>
64 #include <vm/vm_page.h>
65 #include <vm/vm_pageout.h>
66 #include <vm/vm_pagequeue.h>
67 #include <vm/vm_phys.h>
68 #include <vm/vm_radix.h>
69 #include <vm/vm_reserv.h>
70
71 /*
72 * The reservation system supports the speculative allocation of large physical
73 * pages ("superpages"). Speculative allocation enables the fully automatic
74 * utilization of superpages by the virtual memory system. In other words, no
75 * programmatic directives are required to use superpages.
76 */
77
78 #if VM_NRESERVLEVEL > 0
79
80 /*
81 * Temporarily simulate two-level reservations. Effectively, VM_LEVEL_0_* is
82 * level 1, and VM_SUBLEVEL_0_* is level 0.
83 */
84 #if VM_NRESERVLEVEL == 2
85 #undef VM_NRESERVLEVEL
86 #define VM_NRESERVLEVEL 1
87 #if VM_LEVEL_0_ORDER == 4
88 #undef VM_LEVEL_0_ORDER
89 #define VM_LEVEL_0_ORDER (4 + VM_LEVEL_1_ORDER)
90 #define VM_SUBLEVEL_0_NPAGES (1 << 4)
91 #elif VM_LEVEL_0_ORDER == 7
92 #undef VM_LEVEL_0_ORDER
93 #define VM_LEVEL_0_ORDER (7 + VM_LEVEL_1_ORDER)
94 #define VM_SUBLEVEL_0_NPAGES (1 << 7)
95 #else
96 #error "Unsupported level 0 reservation size"
97 #endif
98 #define VM_LEVEL_0_PSIND 2
99 #else
100 #define VM_LEVEL_0_PSIND 1
101 #endif
102
103 #ifndef VM_LEVEL_0_ORDER_MAX
104 #define VM_LEVEL_0_ORDER_MAX VM_LEVEL_0_ORDER
105 #endif
106
107 /*
108 * The number of small pages that are contained in a level 0 reservation
109 */
110 #define VM_LEVEL_0_NPAGES (1 << VM_LEVEL_0_ORDER)
111 #define VM_LEVEL_0_NPAGES_MAX (1 << VM_LEVEL_0_ORDER_MAX)
112
113 /*
114 * The number of bits by which a physical address is shifted to obtain the
115 * reservation number
116 */
117 #define VM_LEVEL_0_SHIFT (VM_LEVEL_0_ORDER + PAGE_SHIFT)
118
119 /*
120 * The size of a level 0 reservation in bytes
121 */
122 #define VM_LEVEL_0_SIZE (1 << VM_LEVEL_0_SHIFT)
123
124 /*
125 * Computes the index of the small page underlying the given (object, pindex)
126 * within the reservation's array of small pages.
127 */
128 #define VM_RESERV_INDEX(object, pindex) \
129 (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1))
130
131 /*
132 * Number of elapsed ticks before we update the LRU queue position. Used
133 * to reduce contention and churn on the list.
134 */
135 #define PARTPOPSLOP 1
136
137 /*
138 * The reservation structure
139 *
140 * A reservation structure is constructed whenever a large physical page is
141 * speculatively allocated to an object. The reservation provides the small
142 * physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets
143 * within that object. The reservation's "popcnt" tracks the number of these
144 * small physical pages that are in use at any given time. When and if the
145 * reservation is not fully utilized, it appears in the queue of partially
146 * populated reservations. The reservation always appears on the containing
147 * object's list of reservations.
148 *
149 * A partially populated reservation can be broken and reclaimed at any time.
150 *
151 * c - constant after boot
152 * d - vm_reserv_domain_lock
153 * o - vm_reserv_object_lock
154 * r - vm_reserv_lock
155 * s - vm_reserv_domain_scan_lock
156 */
157 struct vm_reserv {
158 struct mtx lock; /* reservation lock. */
159 TAILQ_ENTRY(vm_reserv) partpopq; /* (d, r) per-domain queue. */
160 LIST_ENTRY(vm_reserv) objq; /* (o, r) object queue */
161 vm_object_t object; /* (o, r) containing object */
162 vm_pindex_t pindex; /* (o, r) offset in object */
163 vm_page_t pages; /* (c) first page */
164 uint16_t popcnt; /* (r) # of pages in use */
165 uint8_t domain; /* (c) NUMA domain. */
166 char inpartpopq; /* (d, r) */
167 int lasttick; /* (r) last pop update tick. */
168 bitstr_t bit_decl(popmap, VM_LEVEL_0_NPAGES_MAX);
169 /* (r) bit vector, used pages */
170 };
171
172 TAILQ_HEAD(vm_reserv_queue, vm_reserv);
173
174 #define vm_reserv_lockptr(rv) (&(rv)->lock)
175 #define vm_reserv_assert_locked(rv) \
176 mtx_assert(vm_reserv_lockptr(rv), MA_OWNED)
177 #define vm_reserv_lock(rv) mtx_lock(vm_reserv_lockptr(rv))
178 #define vm_reserv_trylock(rv) mtx_trylock(vm_reserv_lockptr(rv))
179 #define vm_reserv_unlock(rv) mtx_unlock(vm_reserv_lockptr(rv))
180
181 /*
182 * The reservation array
183 *
184 * This array is analoguous in function to vm_page_array. It differs in the
185 * respect that it may contain a greater number of useful reservation
186 * structures than there are (physical) superpages. These "invalid"
187 * reservation structures exist to trade-off space for time in the
188 * implementation of vm_reserv_from_page(). Invalid reservation structures are
189 * distinguishable from "valid" reservation structures by inspecting the
190 * reservation's "pages" field. Invalid reservation structures have a NULL
191 * "pages" field.
192 *
193 * vm_reserv_from_page() maps a small (physical) page to an element of this
194 * array by computing a physical reservation number from the page's physical
195 * address. The physical reservation number is used as the array index.
196 *
197 * An "active" reservation is a valid reservation structure that has a non-NULL
198 * "object" field and a non-zero "popcnt" field. In other words, every active
199 * reservation belongs to a particular object. Moreover, every active
200 * reservation has an entry in the containing object's list of reservations.
201 */
202 static vm_reserv_t vm_reserv_array;
203
204 /*
205 * The per-domain partially populated reservation queues
206 *
207 * These queues enable the fast recovery of an unused free small page from a
208 * partially populated reservation. The reservation at the head of a queue
209 * is the least recently changed, partially populated reservation.
210 *
211 * Access to this queue is synchronized by the per-domain reservation lock.
212 * Threads reclaiming free pages from the queue must hold the per-domain scan
213 * lock.
214 */
215 struct vm_reserv_domain {
216 struct mtx lock;
217 struct vm_reserv_queue partpop; /* (d) */
218 struct vm_reserv marker; /* (d, s) scan marker/lock */
219 } __aligned(CACHE_LINE_SIZE);
220
221 static struct vm_reserv_domain vm_rvd[MAXMEMDOM];
222
223 #define vm_reserv_domain_lockptr(d) (&vm_rvd[(d)].lock)
224 #define vm_reserv_domain_assert_locked(d) \
225 mtx_assert(vm_reserv_domain_lockptr(d), MA_OWNED)
226 #define vm_reserv_domain_lock(d) mtx_lock(vm_reserv_domain_lockptr(d))
227 #define vm_reserv_domain_unlock(d) mtx_unlock(vm_reserv_domain_lockptr(d))
228
229 #define vm_reserv_domain_scan_lock(d) mtx_lock(&vm_rvd[(d)].marker.lock)
230 #define vm_reserv_domain_scan_unlock(d) mtx_unlock(&vm_rvd[(d)].marker.lock)
231
232 static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
233 "Reservation Info");
234
235 static COUNTER_U64_DEFINE_EARLY(vm_reserv_broken);
236 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD,
237 &vm_reserv_broken, "Cumulative number of broken reservations");
238
239 static COUNTER_U64_DEFINE_EARLY(vm_reserv_freed);
240 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD,
241 &vm_reserv_freed, "Cumulative number of freed reservations");
242
243 static int sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS);
244
245 SYSCTL_PROC(_vm_reserv, OID_AUTO, fullpop, CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RD,
246 NULL, 0, sysctl_vm_reserv_fullpop, "I", "Current number of full reservations");
247
248 static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS);
249
250 SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq,
251 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
252 sysctl_vm_reserv_partpopq, "A",
253 "Partially populated reservation queues");
254
255 static COUNTER_U64_DEFINE_EARLY(vm_reserv_reclaimed);
256 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD,
257 &vm_reserv_reclaimed, "Cumulative number of reclaimed reservations");
258
259 /*
260 * The object lock pool is used to synchronize the rvq. We can not use a
261 * pool mutex because it is required before malloc works.
262 *
263 * The "hash" function could be made faster without divide and modulo.
264 */
265 #define VM_RESERV_OBJ_LOCK_COUNT MAXCPU
266
267 struct mtx_padalign vm_reserv_object_mtx[VM_RESERV_OBJ_LOCK_COUNT];
268
269 #define vm_reserv_object_lock_idx(object) \
270 (((uintptr_t)object / sizeof(*object)) % VM_RESERV_OBJ_LOCK_COUNT)
271 #define vm_reserv_object_lock_ptr(object) \
272 &vm_reserv_object_mtx[vm_reserv_object_lock_idx((object))]
273 #define vm_reserv_object_lock(object) \
274 mtx_lock(vm_reserv_object_lock_ptr((object)))
275 #define vm_reserv_object_unlock(object) \
276 mtx_unlock(vm_reserv_object_lock_ptr((object)))
277
278 static void vm_reserv_break(vm_reserv_t rv);
279 static void vm_reserv_depopulate(vm_reserv_t rv, int index);
280 static vm_reserv_t vm_reserv_from_page(vm_page_t m);
281 static boolean_t vm_reserv_has_pindex(vm_reserv_t rv,
282 vm_pindex_t pindex);
283 static void vm_reserv_populate(vm_reserv_t rv, int index);
284 static void vm_reserv_reclaim(vm_reserv_t rv);
285
286 /*
287 * Returns the current number of full reservations.
288 *
289 * Since the number of full reservations is computed without acquiring any
290 * locks, the returned value is inexact.
291 */
292 static int
sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS)293 sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS)
294 {
295 vm_paddr_t paddr;
296 struct vm_phys_seg *seg;
297 vm_reserv_t rv;
298 int fullpop, segind;
299
300 fullpop = 0;
301 for (segind = 0; segind < vm_phys_nsegs; segind++) {
302 seg = &vm_phys_segs[segind];
303 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
304 #ifdef VM_PHYSSEG_SPARSE
305 rv = seg->first_reserv + (paddr >> VM_LEVEL_0_SHIFT) -
306 (seg->start >> VM_LEVEL_0_SHIFT);
307 #else
308 rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT];
309 #endif
310 while (paddr + VM_LEVEL_0_SIZE > paddr && paddr +
311 VM_LEVEL_0_SIZE <= seg->end) {
312 fullpop += rv->popcnt == VM_LEVEL_0_NPAGES;
313 paddr += VM_LEVEL_0_SIZE;
314 rv++;
315 }
316 }
317 return (sysctl_handle_int(oidp, &fullpop, 0, req));
318 }
319
320 /*
321 * Describes the current state of the partially populated reservation queue.
322 */
323 static int
sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)324 sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
325 {
326 struct sbuf sbuf;
327 vm_reserv_t rv;
328 int counter, error, domain, level, unused_pages;
329
330 error = sysctl_wire_old_buffer(req, 0);
331 if (error != 0)
332 return (error);
333 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
334 sbuf_printf(&sbuf, "\nDOMAIN LEVEL SIZE NUMBER\n\n");
335 for (domain = 0; domain < vm_ndomains; domain++) {
336 for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) {
337 counter = 0;
338 unused_pages = 0;
339 vm_reserv_domain_lock(domain);
340 TAILQ_FOREACH(rv, &vm_rvd[domain].partpop, partpopq) {
341 if (rv == &vm_rvd[domain].marker)
342 continue;
343 counter++;
344 unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt;
345 }
346 vm_reserv_domain_unlock(domain);
347 sbuf_printf(&sbuf, "%6d, %7d, %6dK, %6d\n",
348 domain, level,
349 unused_pages * ((int)PAGE_SIZE / 1024), counter);
350 }
351 }
352 error = sbuf_finish(&sbuf);
353 sbuf_delete(&sbuf);
354 return (error);
355 }
356
357 /*
358 * Remove a reservation from the object's objq.
359 */
360 static void
vm_reserv_remove(vm_reserv_t rv)361 vm_reserv_remove(vm_reserv_t rv)
362 {
363 vm_object_t object;
364
365 vm_reserv_assert_locked(rv);
366 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
367 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
368 KASSERT(rv->object != NULL,
369 ("vm_reserv_remove: reserv %p is free", rv));
370 KASSERT(!rv->inpartpopq,
371 ("vm_reserv_remove: reserv %p's inpartpopq is TRUE", rv));
372 object = rv->object;
373 vm_reserv_object_lock(object);
374 LIST_REMOVE(rv, objq);
375 rv->object = NULL;
376 vm_reserv_object_unlock(object);
377 }
378
379 /*
380 * Insert a new reservation into the object's objq.
381 */
382 static void
vm_reserv_insert(vm_reserv_t rv,vm_object_t object,vm_pindex_t pindex)383 vm_reserv_insert(vm_reserv_t rv, vm_object_t object, vm_pindex_t pindex)
384 {
385
386 vm_reserv_assert_locked(rv);
387 CTR6(KTR_VM,
388 "%s: rv %p(%p) object %p new %p popcnt %d",
389 __FUNCTION__, rv, rv->pages, rv->object, object,
390 rv->popcnt);
391 KASSERT(rv->object == NULL,
392 ("vm_reserv_insert: reserv %p isn't free", rv));
393 KASSERT(rv->popcnt == 0,
394 ("vm_reserv_insert: reserv %p's popcnt is corrupted", rv));
395 KASSERT(!rv->inpartpopq,
396 ("vm_reserv_insert: reserv %p's inpartpopq is TRUE", rv));
397 KASSERT(bit_ntest(rv->popmap, 0, VM_LEVEL_0_NPAGES - 1, 0),
398 ("vm_reserv_insert: reserv %p's popmap is corrupted", rv));
399 vm_reserv_object_lock(object);
400 rv->pindex = pindex;
401 rv->object = object;
402 rv->lasttick = ticks;
403 LIST_INSERT_HEAD(&object->rvq, rv, objq);
404 vm_reserv_object_unlock(object);
405 }
406
407 #ifdef VM_SUBLEVEL_0_NPAGES
408 static inline bool
vm_reserv_is_sublevel_full(vm_reserv_t rv,int index)409 vm_reserv_is_sublevel_full(vm_reserv_t rv, int index)
410 {
411 _Static_assert(VM_SUBLEVEL_0_NPAGES == 16 ||
412 VM_SUBLEVEL_0_NPAGES == 128,
413 "vm_reserv_is_sublevel_full: unsupported VM_SUBLEVEL_0_NPAGES");
414 /* An equivalent bit_ntest() compiles to more instructions. */
415 switch (VM_SUBLEVEL_0_NPAGES) {
416 case 16:
417 return (((uint16_t *)rv->popmap)[index / 16] == UINT16_MAX);
418 case 128:
419 index = rounddown2(index, 128) / 64;
420 return (((uint64_t *)rv->popmap)[index] == UINT64_MAX &&
421 ((uint64_t *)rv->popmap)[index + 1] == UINT64_MAX);
422 default:
423 __unreachable();
424 }
425 }
426 #endif
427
428 /*
429 * Reduces the given reservation's population count. If the population count
430 * becomes zero, the reservation is destroyed. Additionally, moves the
431 * reservation to the tail of the partially populated reservation queue if the
432 * population count is non-zero.
433 */
434 static void
vm_reserv_depopulate(vm_reserv_t rv,int index)435 vm_reserv_depopulate(vm_reserv_t rv, int index)
436 {
437 struct vm_domain *vmd;
438
439 vm_reserv_assert_locked(rv);
440 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
441 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
442 KASSERT(rv->object != NULL,
443 ("vm_reserv_depopulate: reserv %p is free", rv));
444 KASSERT(bit_test(rv->popmap, index),
445 ("vm_reserv_depopulate: reserv %p's popmap[%d] is clear", rv,
446 index));
447 KASSERT(rv->popcnt > 0,
448 ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv));
449 KASSERT(rv->domain < vm_ndomains,
450 ("vm_reserv_depopulate: reserv %p's domain is corrupted %d",
451 rv, rv->domain));
452 if (rv->popcnt == VM_LEVEL_0_NPAGES) {
453 KASSERT(rv->pages->psind == VM_LEVEL_0_PSIND,
454 ("vm_reserv_depopulate: reserv %p is already demoted",
455 rv));
456 rv->pages->psind = VM_LEVEL_0_PSIND - 1;
457 }
458 #ifdef VM_SUBLEVEL_0_NPAGES
459 if (vm_reserv_is_sublevel_full(rv, index))
460 rv->pages[rounddown2(index, VM_SUBLEVEL_0_NPAGES)].psind = 0;
461 #endif
462 bit_clear(rv->popmap, index);
463 rv->popcnt--;
464 if ((unsigned)(ticks - rv->lasttick) >= PARTPOPSLOP ||
465 rv->popcnt == 0) {
466 vm_reserv_domain_lock(rv->domain);
467 if (rv->inpartpopq) {
468 TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq);
469 rv->inpartpopq = FALSE;
470 }
471 if (rv->popcnt != 0) {
472 rv->inpartpopq = TRUE;
473 TAILQ_INSERT_TAIL(&vm_rvd[rv->domain].partpop, rv,
474 partpopq);
475 }
476 vm_reserv_domain_unlock(rv->domain);
477 rv->lasttick = ticks;
478 }
479 vmd = VM_DOMAIN(rv->domain);
480 if (rv->popcnt == 0) {
481 vm_reserv_remove(rv);
482 vm_domain_free_lock(vmd);
483 vm_phys_free_pages(rv->pages, VM_FREEPOOL_DEFAULT,
484 VM_LEVEL_0_ORDER);
485 vm_domain_free_unlock(vmd);
486 counter_u64_add(vm_reserv_freed, 1);
487 }
488 vm_domain_freecnt_inc(vmd, 1);
489 }
490
491 /*
492 * Returns the reservation to which the given page might belong.
493 */
494 static __inline vm_reserv_t
vm_reserv_from_page(vm_page_t m)495 vm_reserv_from_page(vm_page_t m)
496 {
497 #ifdef VM_PHYSSEG_SPARSE
498 struct vm_phys_seg *seg;
499
500 seg = &vm_phys_segs[m->segind];
501 return (seg->first_reserv + (VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT) -
502 (seg->start >> VM_LEVEL_0_SHIFT));
503 #else
504 return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]);
505 #endif
506 }
507
508 /*
509 * Returns an existing reservation or NULL and initialized successor pointer.
510 */
511 static vm_reserv_t
vm_reserv_from_object(vm_object_t object,vm_pindex_t pindex,vm_page_t mpred,vm_page_t * msuccp)512 vm_reserv_from_object(vm_object_t object, vm_pindex_t pindex,
513 vm_page_t mpred, vm_page_t *msuccp)
514 {
515 vm_reserv_t rv;
516 vm_page_t msucc;
517
518 msucc = NULL;
519 if (mpred != NULL) {
520 KASSERT(mpred->object == object,
521 ("vm_reserv_from_object: object doesn't contain mpred"));
522 KASSERT(mpred->pindex < pindex,
523 ("vm_reserv_from_object: mpred doesn't precede pindex"));
524 rv = vm_reserv_from_page(mpred);
525 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
526 goto found;
527 msucc = TAILQ_NEXT(mpred, listq);
528 } else
529 msucc = TAILQ_FIRST(&object->memq);
530 if (msucc != NULL) {
531 KASSERT(msucc->pindex > pindex,
532 ("vm_reserv_from_object: msucc doesn't succeed pindex"));
533 rv = vm_reserv_from_page(msucc);
534 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
535 goto found;
536 }
537 rv = NULL;
538
539 found:
540 *msuccp = msucc;
541
542 return (rv);
543 }
544
545 /*
546 * Returns TRUE if the given reservation contains the given page index and
547 * FALSE otherwise.
548 */
549 static __inline boolean_t
vm_reserv_has_pindex(vm_reserv_t rv,vm_pindex_t pindex)550 vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex)
551 {
552
553 return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0);
554 }
555
556 /*
557 * Increases the given reservation's population count. Moves the reservation
558 * to the tail of the partially populated reservation queue.
559 */
560 static void
vm_reserv_populate(vm_reserv_t rv,int index)561 vm_reserv_populate(vm_reserv_t rv, int index)
562 {
563
564 vm_reserv_assert_locked(rv);
565 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
566 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
567 KASSERT(rv->object != NULL,
568 ("vm_reserv_populate: reserv %p is free", rv));
569 KASSERT(!bit_test(rv->popmap, index),
570 ("vm_reserv_populate: reserv %p's popmap[%d] is set", rv,
571 index));
572 KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES,
573 ("vm_reserv_populate: reserv %p is already full", rv));
574 KASSERT(rv->pages->psind >= 0 &&
575 rv->pages->psind < VM_LEVEL_0_PSIND,
576 ("vm_reserv_populate: reserv %p is already promoted", rv));
577 KASSERT(rv->domain < vm_ndomains,
578 ("vm_reserv_populate: reserv %p's domain is corrupted %d",
579 rv, rv->domain));
580 bit_set(rv->popmap, index);
581 #ifdef VM_SUBLEVEL_0_NPAGES
582 if (vm_reserv_is_sublevel_full(rv, index))
583 rv->pages[rounddown2(index, VM_SUBLEVEL_0_NPAGES)].psind = 1;
584 #endif
585 rv->popcnt++;
586 if ((unsigned)(ticks - rv->lasttick) < PARTPOPSLOP &&
587 rv->inpartpopq && rv->popcnt != VM_LEVEL_0_NPAGES)
588 return;
589 rv->lasttick = ticks;
590 vm_reserv_domain_lock(rv->domain);
591 if (rv->inpartpopq) {
592 TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq);
593 rv->inpartpopq = FALSE;
594 }
595 if (rv->popcnt < VM_LEVEL_0_NPAGES) {
596 rv->inpartpopq = TRUE;
597 TAILQ_INSERT_TAIL(&vm_rvd[rv->domain].partpop, rv, partpopq);
598 } else {
599 KASSERT(rv->pages->psind == VM_LEVEL_0_PSIND - 1,
600 ("vm_reserv_populate: reserv %p is already promoted",
601 rv));
602 rv->pages->psind = VM_LEVEL_0_PSIND;
603 }
604 vm_reserv_domain_unlock(rv->domain);
605 }
606
607 /*
608 * Allocates a contiguous set of physical pages of the given size "npages"
609 * from existing or newly created reservations. All of the physical pages
610 * must be at or above the given physical address "low" and below the given
611 * physical address "high". The given value "alignment" determines the
612 * alignment of the first physical page in the set. If the given value
613 * "boundary" is non-zero, then the set of physical pages cannot cross any
614 * physical address boundary that is a multiple of that value. Both
615 * "alignment" and "boundary" must be a power of two.
616 *
617 * The page "mpred" must immediately precede the offset "pindex" within the
618 * specified object.
619 *
620 * The object must be locked.
621 */
622 vm_page_t
vm_reserv_alloc_contig(vm_object_t object,vm_pindex_t pindex,int domain,int req,vm_page_t mpred,u_long npages,vm_paddr_t low,vm_paddr_t high,u_long alignment,vm_paddr_t boundary)623 vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, int domain,
624 int req, vm_page_t mpred, u_long npages, vm_paddr_t low, vm_paddr_t high,
625 u_long alignment, vm_paddr_t boundary)
626 {
627 struct vm_domain *vmd;
628 vm_paddr_t pa, size;
629 vm_page_t m, m_ret, msucc;
630 vm_pindex_t first, leftcap, rightcap;
631 vm_reserv_t rv;
632 u_long allocpages, maxpages, minpages;
633 int i, index, n;
634
635 VM_OBJECT_ASSERT_WLOCKED(object);
636 KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
637
638 /*
639 * Is a reservation fundamentally impossible?
640 */
641 if (pindex < VM_RESERV_INDEX(object, pindex) ||
642 pindex + npages > object->size)
643 return (NULL);
644
645 /*
646 * All reservations of a particular size have the same alignment.
647 * Assuming that the first page is allocated from a reservation, the
648 * least significant bits of its physical address can be determined
649 * from its offset from the beginning of the reservation and the size
650 * of the reservation.
651 *
652 * Could the specified index within a reservation of the smallest
653 * possible size satisfy the alignment and boundary requirements?
654 */
655 pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT;
656 size = npages << PAGE_SHIFT;
657 if (!vm_addr_ok(pa, size, alignment, boundary))
658 return (NULL);
659
660 /*
661 * Look for an existing reservation.
662 */
663 rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
664 if (rv != NULL) {
665 KASSERT(object != kernel_object || rv->domain == domain,
666 ("vm_reserv_alloc_contig: domain mismatch"));
667 index = VM_RESERV_INDEX(object, pindex);
668 /* Does the allocation fit within the reservation? */
669 if (index + npages > VM_LEVEL_0_NPAGES)
670 return (NULL);
671 domain = rv->domain;
672 vmd = VM_DOMAIN(domain);
673 vm_reserv_lock(rv);
674 /* Handle reclaim race. */
675 if (rv->object != object)
676 goto out;
677 m = &rv->pages[index];
678 pa = VM_PAGE_TO_PHYS(m);
679 if (pa < low || pa + size > high ||
680 !vm_addr_ok(pa, size, alignment, boundary))
681 goto out;
682 /* Handle vm_page_iter_rename(..., m, new_object, ...). */
683 if (!bit_ntest(rv->popmap, index, index + npages - 1, 0))
684 goto out;
685 if (!vm_domain_allocate(vmd, req, npages))
686 goto out;
687 for (i = 0; i < npages; i++)
688 vm_reserv_populate(rv, index + i);
689 vm_reserv_unlock(rv);
690 return (m);
691 out:
692 vm_reserv_unlock(rv);
693 return (NULL);
694 }
695
696 /*
697 * Could at least one reservation fit between the first index to the
698 * left that can be used ("leftcap") and the first index to the right
699 * that cannot be used ("rightcap")?
700 *
701 * We must synchronize with the reserv object lock to protect the
702 * pindex/object of the resulting reservations against rename while
703 * we are inspecting.
704 */
705 first = pindex - VM_RESERV_INDEX(object, pindex);
706 minpages = VM_RESERV_INDEX(object, pindex) + npages;
707 maxpages = roundup2(minpages, VM_LEVEL_0_NPAGES);
708 allocpages = maxpages;
709 vm_reserv_object_lock(object);
710 if (mpred != NULL) {
711 if ((rv = vm_reserv_from_page(mpred))->object != object)
712 leftcap = mpred->pindex + 1;
713 else
714 leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
715 if (leftcap > first) {
716 vm_reserv_object_unlock(object);
717 return (NULL);
718 }
719 }
720 if (msucc != NULL) {
721 if ((rv = vm_reserv_from_page(msucc))->object != object)
722 rightcap = msucc->pindex;
723 else
724 rightcap = rv->pindex;
725 if (first + maxpages > rightcap) {
726 if (maxpages == VM_LEVEL_0_NPAGES) {
727 vm_reserv_object_unlock(object);
728 return (NULL);
729 }
730
731 /*
732 * At least one reservation will fit between "leftcap"
733 * and "rightcap". However, a reservation for the
734 * last of the requested pages will not fit. Reduce
735 * the size of the upcoming allocation accordingly.
736 */
737 allocpages = minpages;
738 }
739 }
740 vm_reserv_object_unlock(object);
741
742 /*
743 * Would the last new reservation extend past the end of the object?
744 *
745 * If the object is unlikely to grow don't allocate a reservation for
746 * the tail.
747 */
748 if ((object->flags & OBJ_ANON) == 0 &&
749 first + maxpages > object->size) {
750 if (maxpages == VM_LEVEL_0_NPAGES)
751 return (NULL);
752 allocpages = minpages;
753 }
754
755 /*
756 * Allocate the physical pages. The alignment and boundary specified
757 * for this allocation may be different from the alignment and
758 * boundary specified for the requested pages. For instance, the
759 * specified index may not be the first page within the first new
760 * reservation.
761 */
762 m = NULL;
763 vmd = VM_DOMAIN(domain);
764 if (vm_domain_allocate(vmd, req, npages)) {
765 vm_domain_free_lock(vmd);
766 m = vm_phys_alloc_contig(domain, allocpages, low, high,
767 ulmax(alignment, VM_LEVEL_0_SIZE),
768 boundary > VM_LEVEL_0_SIZE ? boundary : 0);
769 vm_domain_free_unlock(vmd);
770 if (m == NULL) {
771 vm_domain_freecnt_inc(vmd, npages);
772 return (NULL);
773 }
774 } else
775 return (NULL);
776 KASSERT(vm_page_domain(m) == domain,
777 ("vm_reserv_alloc_contig: Page domain does not match requested."));
778
779 /*
780 * The allocated physical pages always begin at a reservation
781 * boundary, but they do not always end at a reservation boundary.
782 * Initialize every reservation that is completely covered by the
783 * allocated physical pages.
784 */
785 m_ret = NULL;
786 index = VM_RESERV_INDEX(object, pindex);
787 do {
788 rv = vm_reserv_from_page(m);
789 KASSERT(rv->pages == m,
790 ("vm_reserv_alloc_contig: reserv %p's pages is corrupted",
791 rv));
792 vm_reserv_lock(rv);
793 vm_reserv_insert(rv, object, first);
794 n = ulmin(VM_LEVEL_0_NPAGES - index, npages);
795 for (i = 0; i < n; i++)
796 vm_reserv_populate(rv, index + i);
797 npages -= n;
798 if (m_ret == NULL) {
799 m_ret = &rv->pages[index];
800 index = 0;
801 }
802 vm_reserv_unlock(rv);
803 m += VM_LEVEL_0_NPAGES;
804 first += VM_LEVEL_0_NPAGES;
805 allocpages -= VM_LEVEL_0_NPAGES;
806 } while (allocpages >= VM_LEVEL_0_NPAGES);
807 return (m_ret);
808 }
809
810 /*
811 * Allocate a physical page from an existing or newly created reservation.
812 *
813 * The page "mpred" must immediately precede the offset "pindex" within the
814 * specified object.
815 *
816 * The object must be locked.
817 */
818 vm_page_t
vm_reserv_alloc_page(vm_object_t object,vm_pindex_t pindex,int domain,int req,vm_page_t mpred)819 vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, int domain,
820 int req, vm_page_t mpred)
821 {
822 struct vm_domain *vmd;
823 vm_page_t m, msucc;
824 vm_pindex_t first, leftcap, rightcap;
825 vm_reserv_t rv;
826 int index;
827
828 VM_OBJECT_ASSERT_WLOCKED(object);
829
830 /*
831 * Is a reservation fundamentally impossible?
832 */
833 if (pindex < VM_RESERV_INDEX(object, pindex) ||
834 pindex >= object->size)
835 return (NULL);
836
837 /*
838 * Look for an existing reservation.
839 */
840 rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
841 if (rv != NULL) {
842 KASSERT(object != kernel_object || rv->domain == domain,
843 ("vm_reserv_alloc_page: domain mismatch"));
844 domain = rv->domain;
845 vmd = VM_DOMAIN(domain);
846 index = VM_RESERV_INDEX(object, pindex);
847 m = &rv->pages[index];
848 vm_reserv_lock(rv);
849 /* Handle reclaim race. */
850 if (rv->object != object ||
851 /* Handle vm_page_iter_rename(..., m, new_object, ...). */
852 bit_test(rv->popmap, index)) {
853 m = NULL;
854 goto out;
855 }
856 if (vm_domain_allocate(vmd, req, 1) == 0)
857 m = NULL;
858 else
859 vm_reserv_populate(rv, index);
860 out:
861 vm_reserv_unlock(rv);
862 return (m);
863 }
864
865 /*
866 * Could a reservation fit between the first index to the left that
867 * can be used and the first index to the right that cannot be used?
868 *
869 * We must synchronize with the reserv object lock to protect the
870 * pindex/object of the resulting reservations against rename while
871 * we are inspecting.
872 */
873 first = pindex - VM_RESERV_INDEX(object, pindex);
874 vm_reserv_object_lock(object);
875 if (mpred != NULL) {
876 if ((rv = vm_reserv_from_page(mpred))->object != object)
877 leftcap = mpred->pindex + 1;
878 else
879 leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
880 if (leftcap > first) {
881 vm_reserv_object_unlock(object);
882 return (NULL);
883 }
884 }
885 if (msucc != NULL) {
886 if ((rv = vm_reserv_from_page(msucc))->object != object)
887 rightcap = msucc->pindex;
888 else
889 rightcap = rv->pindex;
890 if (first + VM_LEVEL_0_NPAGES > rightcap) {
891 vm_reserv_object_unlock(object);
892 return (NULL);
893 }
894 }
895 vm_reserv_object_unlock(object);
896
897 /*
898 * Would the last new reservation extend past the end of the object?
899 *
900 * If the object is unlikely to grow don't allocate a reservation for
901 * the tail.
902 */
903 if ((object->flags & OBJ_ANON) == 0 &&
904 first + VM_LEVEL_0_NPAGES > object->size)
905 return (NULL);
906
907 /*
908 * Allocate and populate the new reservation.
909 */
910 m = NULL;
911 vmd = VM_DOMAIN(domain);
912 if (vm_domain_allocate(vmd, req, 1)) {
913 vm_domain_free_lock(vmd);
914 m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT,
915 VM_LEVEL_0_ORDER);
916 vm_domain_free_unlock(vmd);
917 if (m == NULL) {
918 vm_domain_freecnt_inc(vmd, 1);
919 return (NULL);
920 }
921 } else
922 return (NULL);
923 rv = vm_reserv_from_page(m);
924 vm_reserv_lock(rv);
925 KASSERT(rv->pages == m,
926 ("vm_reserv_alloc_page: reserv %p's pages is corrupted", rv));
927 vm_reserv_insert(rv, object, first);
928 index = VM_RESERV_INDEX(object, pindex);
929 vm_reserv_populate(rv, index);
930 vm_reserv_unlock(rv);
931
932 return (&rv->pages[index]);
933 }
934
935 /*
936 * Breaks the given reservation. All free pages in the reservation
937 * are returned to the physical memory allocator. The reservation's
938 * population count and map are reset to their initial state.
939 *
940 * The given reservation must not be in the partially populated reservation
941 * queue.
942 */
943 static void
vm_reserv_break(vm_reserv_t rv)944 vm_reserv_break(vm_reserv_t rv)
945 {
946 vm_page_t m;
947 int pos, pos0, pos1;
948
949 vm_reserv_assert_locked(rv);
950 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
951 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
952 vm_reserv_remove(rv);
953 m = rv->pages;
954 #ifdef VM_SUBLEVEL_0_NPAGES
955 for (; m < rv->pages + VM_LEVEL_0_NPAGES; m += VM_SUBLEVEL_0_NPAGES)
956 #endif
957 m->psind = 0;
958 pos0 = bit_test(rv->popmap, 0) ? -1 : 0;
959 pos1 = -1 - pos0;
960 for (pos = 0; pos < VM_LEVEL_0_NPAGES; ) {
961 /* Find the first different bit after pos. */
962 bit_ff_at(rv->popmap, pos + 1, VM_LEVEL_0_NPAGES,
963 pos1 < pos0, &pos);
964 if (pos == -1)
965 pos = VM_LEVEL_0_NPAGES;
966 if (pos0 < pos1) {
967 pos0 = pos;
968 continue;
969 }
970 /* Free unused pages from pos0 to pos. */
971 pos1 = pos;
972 vm_domain_free_lock(VM_DOMAIN(rv->domain));
973 vm_phys_enqueue_contig(&rv->pages[pos0], VM_FREEPOOL_DEFAULT,
974 pos1 - pos0);
975 vm_domain_free_unlock(VM_DOMAIN(rv->domain));
976 }
977 bit_nclear(rv->popmap, 0, VM_LEVEL_0_NPAGES - 1);
978 rv->popcnt = 0;
979 counter_u64_add(vm_reserv_broken, 1);
980 }
981
982 /*
983 * Breaks all reservations belonging to the given object.
984 */
985 void
vm_reserv_break_all(vm_object_t object)986 vm_reserv_break_all(vm_object_t object)
987 {
988 vm_reserv_t rv;
989
990 /*
991 * This access of object->rvq is unsynchronized so that the
992 * object rvq lock can nest after the domain_free lock. We
993 * must check for races in the results. However, the object
994 * lock prevents new additions, so we are guaranteed that when
995 * it returns NULL the object is properly empty.
996 */
997 while ((rv = LIST_FIRST(&object->rvq)) != NULL) {
998 vm_reserv_lock(rv);
999 /* Reclaim race. */
1000 if (rv->object != object) {
1001 vm_reserv_unlock(rv);
1002 continue;
1003 }
1004 vm_reserv_domain_lock(rv->domain);
1005 if (rv->inpartpopq) {
1006 TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq);
1007 rv->inpartpopq = FALSE;
1008 }
1009 vm_reserv_domain_unlock(rv->domain);
1010 vm_reserv_break(rv);
1011 vm_reserv_unlock(rv);
1012 }
1013 }
1014
1015 /*
1016 * Frees the given page if it belongs to a reservation. Returns TRUE if the
1017 * page is freed and FALSE otherwise.
1018 */
1019 boolean_t
vm_reserv_free_page(vm_page_t m)1020 vm_reserv_free_page(vm_page_t m)
1021 {
1022 vm_reserv_t rv;
1023 boolean_t ret;
1024
1025 rv = vm_reserv_from_page(m);
1026 if (rv->object == NULL)
1027 return (FALSE);
1028 vm_reserv_lock(rv);
1029 /* Re-validate after lock. */
1030 if (rv->object != NULL) {
1031 vm_reserv_depopulate(rv, m - rv->pages);
1032 ret = TRUE;
1033 } else
1034 ret = FALSE;
1035 vm_reserv_unlock(rv);
1036
1037 return (ret);
1038 }
1039
1040 /*
1041 * Initializes the reservation management system. Specifically, initializes
1042 * the reservation array.
1043 *
1044 * Requires that vm_page_array and first_page are initialized!
1045 */
1046 void
vm_reserv_init(void)1047 vm_reserv_init(void)
1048 {
1049 vm_paddr_t paddr;
1050 struct vm_phys_seg *seg;
1051 struct vm_reserv *rv;
1052 struct vm_reserv_domain *rvd;
1053 #ifdef VM_PHYSSEG_SPARSE
1054 vm_pindex_t used;
1055 #endif
1056 int i, segind;
1057
1058 /*
1059 * Initialize the reservation array. Specifically, initialize the
1060 * "pages" field for every element that has an underlying superpage.
1061 */
1062 #ifdef VM_PHYSSEG_SPARSE
1063 used = 0;
1064 #endif
1065 for (segind = 0; segind < vm_phys_nsegs; segind++) {
1066 seg = &vm_phys_segs[segind];
1067 #ifdef VM_PHYSSEG_SPARSE
1068 seg->first_reserv = &vm_reserv_array[used];
1069 used += howmany(seg->end, VM_LEVEL_0_SIZE) -
1070 seg->start / VM_LEVEL_0_SIZE;
1071 #else
1072 seg->first_reserv =
1073 &vm_reserv_array[seg->start >> VM_LEVEL_0_SHIFT];
1074 #endif
1075 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
1076 rv = seg->first_reserv + (paddr >> VM_LEVEL_0_SHIFT) -
1077 (seg->start >> VM_LEVEL_0_SHIFT);
1078 while (paddr + VM_LEVEL_0_SIZE > paddr && paddr +
1079 VM_LEVEL_0_SIZE <= seg->end) {
1080 rv->pages = PHYS_TO_VM_PAGE(paddr);
1081 rv->domain = seg->domain;
1082 mtx_init(&rv->lock, "vm reserv", NULL, MTX_DEF);
1083 paddr += VM_LEVEL_0_SIZE;
1084 rv++;
1085 }
1086 }
1087 for (i = 0; i < MAXMEMDOM; i++) {
1088 rvd = &vm_rvd[i];
1089 mtx_init(&rvd->lock, "vm reserv domain", NULL, MTX_DEF);
1090 TAILQ_INIT(&rvd->partpop);
1091 mtx_init(&rvd->marker.lock, "vm reserv marker", NULL, MTX_DEF);
1092
1093 /*
1094 * Fully populated reservations should never be present in the
1095 * partially populated reservation queues.
1096 */
1097 rvd->marker.popcnt = VM_LEVEL_0_NPAGES;
1098 bit_nset(rvd->marker.popmap, 0, VM_LEVEL_0_NPAGES - 1);
1099 }
1100
1101 for (i = 0; i < VM_RESERV_OBJ_LOCK_COUNT; i++)
1102 mtx_init(&vm_reserv_object_mtx[i], "resv obj lock", NULL,
1103 MTX_DEF);
1104 }
1105
1106 /*
1107 * Returns true if the given page belongs to a reservation and that page is
1108 * free. Otherwise, returns false.
1109 */
1110 bool
vm_reserv_is_page_free(vm_page_t m)1111 vm_reserv_is_page_free(vm_page_t m)
1112 {
1113 vm_reserv_t rv;
1114
1115 rv = vm_reserv_from_page(m);
1116 if (rv->object == NULL)
1117 return (false);
1118 return (!bit_test(rv->popmap, m - rv->pages));
1119 }
1120
1121 /*
1122 * Returns true if the given page is part of a block of npages, starting at a
1123 * multiple of npages, that are all allocated. Otherwise, returns false.
1124 */
1125 bool
vm_reserv_is_populated(vm_page_t m,int npages)1126 vm_reserv_is_populated(vm_page_t m, int npages)
1127 {
1128 vm_reserv_t rv;
1129 int index;
1130
1131 KASSERT(npages <= VM_LEVEL_0_NPAGES,
1132 ("%s: npages %d exceeds VM_LEVEL_0_NPAGES", __func__, npages));
1133 KASSERT(powerof2(npages),
1134 ("%s: npages %d is not a power of 2", __func__, npages));
1135 rv = vm_reserv_from_page(m);
1136 if (rv->object == NULL)
1137 return (false);
1138 index = rounddown2(m - rv->pages, npages);
1139 return (bit_ntest(rv->popmap, index, index + npages - 1, 1));
1140 }
1141
1142 /*
1143 * If the given page belongs to a reservation, returns the level of that
1144 * reservation. Otherwise, returns -1.
1145 */
1146 int
vm_reserv_level(vm_page_t m)1147 vm_reserv_level(vm_page_t m)
1148 {
1149 vm_reserv_t rv;
1150
1151 rv = vm_reserv_from_page(m);
1152 #ifdef VM_SUBLEVEL_0_NPAGES
1153 return (rv->object != NULL ? 1 : -1);
1154 #else
1155 return (rv->object != NULL ? 0 : -1);
1156 #endif
1157 }
1158
1159 /*
1160 * Returns a reservation level if the given page belongs to a fully populated
1161 * reservation and -1 otherwise.
1162 */
1163 int
vm_reserv_level_iffullpop(vm_page_t m)1164 vm_reserv_level_iffullpop(vm_page_t m)
1165 {
1166 vm_reserv_t rv;
1167
1168 rv = vm_reserv_from_page(m);
1169 if (rv->popcnt == VM_LEVEL_0_NPAGES) {
1170 #ifdef VM_SUBLEVEL_0_NPAGES
1171 return (1);
1172 } else if (rv->pages != NULL &&
1173 vm_reserv_is_sublevel_full(rv, m - rv->pages)) {
1174 #endif
1175 return (0);
1176 }
1177 return (-1);
1178 }
1179
1180 /*
1181 * Remove a partially populated reservation from the queue.
1182 */
1183 static void
vm_reserv_dequeue(vm_reserv_t rv)1184 vm_reserv_dequeue(vm_reserv_t rv)
1185 {
1186
1187 vm_reserv_domain_assert_locked(rv->domain);
1188 vm_reserv_assert_locked(rv);
1189 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
1190 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
1191 KASSERT(rv->inpartpopq,
1192 ("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv));
1193
1194 TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq);
1195 rv->inpartpopq = FALSE;
1196 }
1197
1198 /*
1199 * Breaks the given partially populated reservation, releasing its free pages
1200 * to the physical memory allocator.
1201 */
1202 static void
vm_reserv_reclaim(vm_reserv_t rv)1203 vm_reserv_reclaim(vm_reserv_t rv)
1204 {
1205
1206 vm_reserv_assert_locked(rv);
1207 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
1208 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
1209 if (rv->inpartpopq) {
1210 vm_reserv_domain_lock(rv->domain);
1211 vm_reserv_dequeue(rv);
1212 vm_reserv_domain_unlock(rv->domain);
1213 }
1214 vm_reserv_break(rv);
1215 counter_u64_add(vm_reserv_reclaimed, 1);
1216 }
1217
1218 /*
1219 * Breaks a reservation near the head of the partially populated reservation
1220 * queue, releasing its free pages to the physical memory allocator. Returns
1221 * TRUE if a reservation is broken and FALSE otherwise.
1222 */
1223 bool
vm_reserv_reclaim_inactive(int domain)1224 vm_reserv_reclaim_inactive(int domain)
1225 {
1226 vm_reserv_t rv;
1227
1228 vm_reserv_domain_lock(domain);
1229 TAILQ_FOREACH(rv, &vm_rvd[domain].partpop, partpopq) {
1230 /*
1231 * A locked reservation is likely being updated or reclaimed,
1232 * so just skip ahead.
1233 */
1234 if (rv != &vm_rvd[domain].marker && vm_reserv_trylock(rv)) {
1235 vm_reserv_dequeue(rv);
1236 break;
1237 }
1238 }
1239 vm_reserv_domain_unlock(domain);
1240 if (rv != NULL) {
1241 vm_reserv_reclaim(rv);
1242 vm_reserv_unlock(rv);
1243 return (true);
1244 }
1245 return (false);
1246 }
1247
1248 /*
1249 * Determine whether this reservation has free pages that satisfy the given
1250 * request for contiguous physical memory. Start searching from the lower
1251 * bound, defined by lo, and stop at the upper bound, hi. Return the index
1252 * of the first satisfactory free page, or -1 if none is found.
1253 */
1254 static int
vm_reserv_find_contig(vm_reserv_t rv,int npages,int lo,int hi,int ppn_align,int ppn_bound)1255 vm_reserv_find_contig(vm_reserv_t rv, int npages, int lo,
1256 int hi, int ppn_align, int ppn_bound)
1257 {
1258
1259 vm_reserv_assert_locked(rv);
1260 KASSERT(npages <= VM_LEVEL_0_NPAGES - 1,
1261 ("%s: Too many pages", __func__));
1262 KASSERT(ppn_bound <= VM_LEVEL_0_NPAGES,
1263 ("%s: Too big a boundary for reservation size", __func__));
1264 KASSERT(npages <= ppn_bound,
1265 ("%s: Too many pages for given boundary", __func__));
1266 KASSERT(ppn_align != 0 && powerof2(ppn_align),
1267 ("ppn_align is not a positive power of 2"));
1268 KASSERT(ppn_bound != 0 && powerof2(ppn_bound),
1269 ("ppn_bound is not a positive power of 2"));
1270 while (bit_ffc_area_at(rv->popmap, lo, hi, npages, &lo), lo != -1) {
1271 if (lo < roundup2(lo, ppn_align)) {
1272 /* Skip to next aligned page. */
1273 lo = roundup2(lo, ppn_align);
1274 } else if (roundup2(lo + 1, ppn_bound) >= lo + npages)
1275 return (lo);
1276 if (roundup2(lo + 1, ppn_bound) < lo + npages) {
1277 /* Skip to next boundary-matching page. */
1278 lo = roundup2(lo + 1, ppn_bound);
1279 }
1280 }
1281 return (-1);
1282 }
1283
1284 /*
1285 * Searches the partially populated reservation queue for the least recently
1286 * changed reservation with free pages that satisfy the given request for
1287 * contiguous physical memory. If a satisfactory reservation is found, it is
1288 * broken. Returns a page if a reservation is broken and NULL otherwise.
1289 */
1290 vm_page_t
vm_reserv_reclaim_contig(int domain,u_long npages,vm_paddr_t low,vm_paddr_t high,u_long alignment,vm_paddr_t boundary)1291 vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low,
1292 vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1293 {
1294 struct vm_reserv_queue *queue;
1295 vm_paddr_t pa, size;
1296 vm_page_t m_ret;
1297 vm_reserv_t marker, rv, rvn;
1298 int hi, lo, posn, ppn_align, ppn_bound;
1299
1300 KASSERT(npages > 0, ("npages is 0"));
1301 KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1302 KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1303 if (npages > VM_LEVEL_0_NPAGES - 1)
1304 return (NULL);
1305 size = npages << PAGE_SHIFT;
1306 /*
1307 * Ensure that a free range starting at a boundary-multiple
1308 * doesn't include a boundary-multiple within it. Otherwise,
1309 * no boundary-constrained allocation is possible.
1310 */
1311 if (!vm_addr_bound_ok(0, size, boundary))
1312 return (NULL);
1313 marker = &vm_rvd[domain].marker;
1314 queue = &vm_rvd[domain].partpop;
1315 /*
1316 * Compute shifted alignment, boundary values for page-based
1317 * calculations. Constrain to range [1, VM_LEVEL_0_NPAGES] to
1318 * avoid overflow.
1319 */
1320 ppn_align = (int)(ulmin(ulmax(PAGE_SIZE, alignment),
1321 VM_LEVEL_0_SIZE) >> PAGE_SHIFT);
1322 ppn_bound = boundary == 0 ? VM_LEVEL_0_NPAGES :
1323 (int)(MIN(MAX(PAGE_SIZE, boundary),
1324 VM_LEVEL_0_SIZE) >> PAGE_SHIFT);
1325
1326 vm_reserv_domain_scan_lock(domain);
1327 vm_reserv_domain_lock(domain);
1328 TAILQ_FOREACH_SAFE(rv, queue, partpopq, rvn) {
1329 pa = VM_PAGE_TO_PHYS(&rv->pages[0]);
1330 if (pa + VM_LEVEL_0_SIZE - size < low) {
1331 /* This entire reservation is too low; go to next. */
1332 continue;
1333 }
1334 if (pa + size > high) {
1335 /* This entire reservation is too high; go to next. */
1336 continue;
1337 }
1338 if (!vm_addr_align_ok(pa, alignment)) {
1339 /* This entire reservation is unaligned; go to next. */
1340 continue;
1341 }
1342
1343 if (vm_reserv_trylock(rv) == 0) {
1344 TAILQ_INSERT_AFTER(queue, rv, marker, partpopq);
1345 vm_reserv_domain_unlock(domain);
1346 vm_reserv_lock(rv);
1347 if (TAILQ_PREV(marker, vm_reserv_queue, partpopq) !=
1348 rv) {
1349 vm_reserv_unlock(rv);
1350 vm_reserv_domain_lock(domain);
1351 rvn = TAILQ_NEXT(marker, partpopq);
1352 TAILQ_REMOVE(queue, marker, partpopq);
1353 continue;
1354 }
1355 vm_reserv_domain_lock(domain);
1356 TAILQ_REMOVE(queue, marker, partpopq);
1357 }
1358 vm_reserv_domain_unlock(domain);
1359 lo = (pa >= low) ? 0 :
1360 (int)((low + PAGE_MASK - pa) >> PAGE_SHIFT);
1361 hi = (pa + VM_LEVEL_0_SIZE <= high) ? VM_LEVEL_0_NPAGES :
1362 (int)((high - pa) >> PAGE_SHIFT);
1363 posn = vm_reserv_find_contig(rv, (int)npages, lo, hi,
1364 ppn_align, ppn_bound);
1365 if (posn >= 0) {
1366 vm_reserv_domain_scan_unlock(domain);
1367 /* Allocate requested space */
1368 rv->popcnt += npages;
1369 bit_nset(rv->popmap, posn, posn + npages - 1);
1370 vm_reserv_reclaim(rv);
1371 vm_reserv_unlock(rv);
1372 m_ret = &rv->pages[posn];
1373 pa = VM_PAGE_TO_PHYS(m_ret);
1374 KASSERT(vm_addr_ok(pa, size, alignment, boundary),
1375 ("%s: adjusted address not aligned/bounded to "
1376 "%lx/%jx",
1377 __func__, alignment, (uintmax_t)boundary));
1378 return (m_ret);
1379 }
1380 vm_reserv_domain_lock(domain);
1381 rvn = TAILQ_NEXT(rv, partpopq);
1382 vm_reserv_unlock(rv);
1383 }
1384 vm_reserv_domain_unlock(domain);
1385 vm_reserv_domain_scan_unlock(domain);
1386 return (NULL);
1387 }
1388
1389 /*
1390 * Transfers the reservation underlying the given page to a new object.
1391 *
1392 * The object must be locked.
1393 */
1394 void
vm_reserv_rename(vm_page_t m,vm_object_t new_object,vm_object_t old_object,vm_pindex_t old_object_offset)1395 vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object,
1396 vm_pindex_t old_object_offset)
1397 {
1398 vm_reserv_t rv;
1399
1400 VM_OBJECT_ASSERT_WLOCKED(new_object);
1401 rv = vm_reserv_from_page(m);
1402 if (rv->object == old_object) {
1403 vm_reserv_lock(rv);
1404 CTR6(KTR_VM,
1405 "%s: rv %p object %p new %p popcnt %d inpartpop %d",
1406 __FUNCTION__, rv, rv->object, new_object, rv->popcnt,
1407 rv->inpartpopq);
1408 if (rv->object == old_object) {
1409 vm_reserv_object_lock(old_object);
1410 rv->object = NULL;
1411 LIST_REMOVE(rv, objq);
1412 vm_reserv_object_unlock(old_object);
1413 vm_reserv_object_lock(new_object);
1414 rv->object = new_object;
1415 rv->pindex -= old_object_offset;
1416 LIST_INSERT_HEAD(&new_object->rvq, rv, objq);
1417 vm_reserv_object_unlock(new_object);
1418 }
1419 vm_reserv_unlock(rv);
1420 }
1421 }
1422
1423 /*
1424 * Returns the size (in bytes) of a reservation of the specified level.
1425 */
1426 int
vm_reserv_size(int level)1427 vm_reserv_size(int level)
1428 {
1429
1430 switch (level) {
1431 case 0:
1432 #ifdef VM_SUBLEVEL_0_NPAGES
1433 return (VM_SUBLEVEL_0_NPAGES * PAGE_SIZE);
1434 case 1:
1435 #endif
1436 return (VM_LEVEL_0_SIZE);
1437 case -1:
1438 return (PAGE_SIZE);
1439 default:
1440 return (0);
1441 }
1442 }
1443
1444 /*
1445 * Allocates the virtual and physical memory required by the reservation
1446 * management system's data structures, in particular, the reservation array.
1447 */
1448 vm_paddr_t
vm_reserv_startup(vm_offset_t * vaddr,vm_paddr_t end)1449 vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end)
1450 {
1451 vm_paddr_t new_end;
1452 vm_pindex_t count;
1453 size_t size;
1454 int i;
1455
1456 count = 0;
1457 for (i = 0; i < vm_phys_nsegs; i++) {
1458 #ifdef VM_PHYSSEG_SPARSE
1459 count += howmany(vm_phys_segs[i].end, VM_LEVEL_0_SIZE) -
1460 vm_phys_segs[i].start / VM_LEVEL_0_SIZE;
1461 #else
1462 count = MAX(count,
1463 howmany(vm_phys_segs[i].end, VM_LEVEL_0_SIZE));
1464 #endif
1465 }
1466
1467 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1468 #ifdef VM_PHYSSEG_SPARSE
1469 count += howmany(phys_avail[i + 1], VM_LEVEL_0_SIZE) -
1470 phys_avail[i] / VM_LEVEL_0_SIZE;
1471 #else
1472 count = MAX(count,
1473 howmany(phys_avail[i + 1], VM_LEVEL_0_SIZE));
1474 #endif
1475 }
1476
1477 /*
1478 * Calculate the size (in bytes) of the reservation array. Rounding up
1479 * for partial superpages at boundaries, as every small page is mapped
1480 * to an element in the reservation array based on its physical address.
1481 * Thus, the number of elements in the reservation array can be greater
1482 * than the number of superpages.
1483 */
1484 size = count * sizeof(struct vm_reserv);
1485
1486 /*
1487 * Allocate and map the physical memory for the reservation array. The
1488 * next available virtual address is returned by reference.
1489 */
1490 new_end = end - round_page(size);
1491 vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end,
1492 VM_PROT_READ | VM_PROT_WRITE);
1493 bzero(vm_reserv_array, size);
1494
1495 /*
1496 * Return the next available physical address.
1497 */
1498 return (new_end);
1499 }
1500
1501 /*
1502 * Returns the superpage containing the given page.
1503 */
1504 vm_page_t
vm_reserv_to_superpage(vm_page_t m)1505 vm_reserv_to_superpage(vm_page_t m)
1506 {
1507 vm_reserv_t rv;
1508
1509 VM_OBJECT_ASSERT_LOCKED(m->object);
1510 rv = vm_reserv_from_page(m);
1511 if (rv->object == m->object) {
1512 if (rv->popcnt == VM_LEVEL_0_NPAGES)
1513 return (rv->pages);
1514 #ifdef VM_SUBLEVEL_0_NPAGES
1515 if (vm_reserv_is_sublevel_full(rv, m - rv->pages))
1516 return (rv->pages + rounddown2(m - rv->pages,
1517 VM_SUBLEVEL_0_NPAGES));
1518 #endif
1519 }
1520 return (NULL);
1521 }
1522
1523 #endif /* VM_NRESERVLEVEL > 0 */
1524