1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2002-2006 Rice University
5 * Copyright (c) 2007-2011 Alan L. Cox <alc@cs.rice.edu>
6 * All rights reserved.
7 *
8 * This software was developed for the FreeBSD Project by Alan L. Cox,
9 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
30 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Superpage reservation management module
36 *
37 * Any external functions defined by this module are only to be used by the
38 * virtual memory system.
39 */
40
41 #include <sys/cdefs.h>
42 #include "opt_vm.h"
43
44 #include <sys/param.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/malloc.h>
48 #include <sys/mutex.h>
49 #include <sys/queue.h>
50 #include <sys/rwlock.h>
51 #include <sys/sbuf.h>
52 #include <sys/sysctl.h>
53 #include <sys/systm.h>
54 #include <sys/bitstring.h>
55 #include <sys/counter.h>
56 #include <sys/ktr.h>
57 #include <sys/vmmeter.h>
58 #include <sys/smp.h>
59
60 #include <vm/vm.h>
61 #include <vm/vm_extern.h>
62 #include <vm/vm_param.h>
63 #include <vm/vm_object.h>
64 #include <vm/vm_page.h>
65 #include <vm/vm_pageout.h>
66 #include <vm/vm_pagequeue.h>
67 #include <vm/vm_phys.h>
68 #include <vm/vm_radix.h>
69 #include <vm/vm_reserv.h>
70
71 /*
72 * The reservation system supports the speculative allocation of large physical
73 * pages ("superpages"). Speculative allocation enables the fully automatic
74 * utilization of superpages by the virtual memory system. In other words, no
75 * programmatic directives are required to use superpages.
76 */
77
78 #if VM_NRESERVLEVEL > 0
79
80 /*
81 * Temporarily simulate two-level reservations. Effectively, VM_LEVEL_0_* is
82 * level 1, and VM_SUBLEVEL_0_* is level 0.
83 */
84 #if VM_NRESERVLEVEL == 2
85 #undef VM_NRESERVLEVEL
86 #define VM_NRESERVLEVEL 1
87 #if VM_LEVEL_0_ORDER == 4
88 #undef VM_LEVEL_0_ORDER
89 #define VM_LEVEL_0_ORDER (4 + VM_LEVEL_1_ORDER)
90 #define VM_SUBLEVEL_0_NPAGES (1 << 4)
91 #elif VM_LEVEL_0_ORDER == 7
92 #undef VM_LEVEL_0_ORDER
93 #define VM_LEVEL_0_ORDER (7 + VM_LEVEL_1_ORDER)
94 #define VM_SUBLEVEL_0_NPAGES (1 << 7)
95 #else
96 #error "Unsupported level 0 reservation size"
97 #endif
98 #define VM_LEVEL_0_PSIND 2
99 #else
100 #define VM_LEVEL_0_PSIND 1
101 #endif
102
103 #ifndef VM_LEVEL_0_ORDER_MAX
104 #define VM_LEVEL_0_ORDER_MAX VM_LEVEL_0_ORDER
105 #endif
106
107 /*
108 * The number of small pages that are contained in a level 0 reservation
109 */
110 #define VM_LEVEL_0_NPAGES (1 << VM_LEVEL_0_ORDER)
111 #define VM_LEVEL_0_NPAGES_MAX (1 << VM_LEVEL_0_ORDER_MAX)
112
113 /*
114 * The number of bits by which a physical address is shifted to obtain the
115 * reservation number
116 */
117 #define VM_LEVEL_0_SHIFT (VM_LEVEL_0_ORDER + PAGE_SHIFT)
118
119 /*
120 * The size of a level 0 reservation in bytes
121 */
122 #define VM_LEVEL_0_SIZE (1 << VM_LEVEL_0_SHIFT)
123
124 /*
125 * Computes the index of the small page underlying the given (object, pindex)
126 * within the reservation's array of small pages.
127 */
128 #define VM_RESERV_INDEX(object, pindex) \
129 (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1))
130
131 /*
132 * Number of elapsed ticks before we update the LRU queue position. Used
133 * to reduce contention and churn on the list.
134 */
135 #define PARTPOPSLOP 1
136
137 /*
138 * The reservation structure
139 *
140 * A reservation structure is constructed whenever a large physical page is
141 * speculatively allocated to an object. The reservation provides the small
142 * physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets
143 * within that object. The reservation's "popcnt" tracks the number of these
144 * small physical pages that are in use at any given time. When and if the
145 * reservation is not fully utilized, it appears in the queue of partially
146 * populated reservations. The reservation always appears on the containing
147 * object's list of reservations.
148 *
149 * A partially populated reservation can be broken and reclaimed at any time.
150 *
151 * c - constant after boot
152 * d - vm_reserv_domain_lock
153 * o - vm_reserv_object_lock
154 * r - vm_reserv_lock
155 * s - vm_reserv_domain_scan_lock
156 */
157 struct vm_reserv {
158 struct mtx lock; /* reservation lock. */
159 TAILQ_ENTRY(vm_reserv) partpopq; /* (d, r) per-domain queue. */
160 LIST_ENTRY(vm_reserv) objq; /* (o, r) object queue */
161 vm_object_t object; /* (o, r) containing object */
162 vm_pindex_t pindex; /* (o, r) offset in object */
163 vm_page_t pages; /* (c) first page */
164 uint16_t popcnt; /* (r) # of pages in use */
165 uint8_t domain; /* (c) NUMA domain. */
166 char inpartpopq; /* (d, r) */
167 int lasttick; /* (r) last pop update tick. */
168 bitstr_t bit_decl(popmap, VM_LEVEL_0_NPAGES_MAX);
169 /* (r) bit vector, used pages */
170 };
171
172 TAILQ_HEAD(vm_reserv_queue, vm_reserv);
173
174 #define vm_reserv_lockptr(rv) (&(rv)->lock)
175 #define vm_reserv_assert_locked(rv) \
176 mtx_assert(vm_reserv_lockptr(rv), MA_OWNED)
177 #define vm_reserv_lock(rv) mtx_lock(vm_reserv_lockptr(rv))
178 #define vm_reserv_trylock(rv) mtx_trylock(vm_reserv_lockptr(rv))
179 #define vm_reserv_unlock(rv) mtx_unlock(vm_reserv_lockptr(rv))
180
181 /*
182 * The reservation array
183 *
184 * This array is analoguous in function to vm_page_array. It differs in the
185 * respect that it may contain a greater number of useful reservation
186 * structures than there are (physical) superpages. These "invalid"
187 * reservation structures exist to trade-off space for time in the
188 * implementation of vm_reserv_from_page(). Invalid reservation structures are
189 * distinguishable from "valid" reservation structures by inspecting the
190 * reservation's "pages" field. Invalid reservation structures have a NULL
191 * "pages" field.
192 *
193 * vm_reserv_from_page() maps a small (physical) page to an element of this
194 * array by computing a physical reservation number from the page's physical
195 * address. The physical reservation number is used as the array index.
196 *
197 * An "active" reservation is a valid reservation structure that has a non-NULL
198 * "object" field and a non-zero "popcnt" field. In other words, every active
199 * reservation belongs to a particular object. Moreover, every active
200 * reservation has an entry in the containing object's list of reservations.
201 */
202 static vm_reserv_t vm_reserv_array;
203
204 /*
205 * The per-domain partially populated reservation queues
206 *
207 * These queues enable the fast recovery of an unused free small page from a
208 * partially populated reservation. The reservation at the head of a queue
209 * is the least recently changed, partially populated reservation.
210 *
211 * Access to this queue is synchronized by the per-domain reservation lock.
212 * Threads reclaiming free pages from the queue must hold the per-domain scan
213 * lock.
214 */
215 struct vm_reserv_domain {
216 struct mtx lock;
217 struct vm_reserv_queue partpop; /* (d) */
218 struct vm_reserv marker; /* (d, s) scan marker/lock */
219 } __aligned(CACHE_LINE_SIZE);
220
221 static struct vm_reserv_domain vm_rvd[MAXMEMDOM];
222
223 #define vm_reserv_domain_lockptr(d) (&vm_rvd[(d)].lock)
224 #define vm_reserv_domain_assert_locked(d) \
225 mtx_assert(vm_reserv_domain_lockptr(d), MA_OWNED)
226 #define vm_reserv_domain_lock(d) mtx_lock(vm_reserv_domain_lockptr(d))
227 #define vm_reserv_domain_unlock(d) mtx_unlock(vm_reserv_domain_lockptr(d))
228
229 #define vm_reserv_domain_scan_lock(d) mtx_lock(&vm_rvd[(d)].marker.lock)
230 #define vm_reserv_domain_scan_unlock(d) mtx_unlock(&vm_rvd[(d)].marker.lock)
231
232 static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
233 "Reservation Info");
234
235 static COUNTER_U64_DEFINE_EARLY(vm_reserv_broken);
236 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD,
237 &vm_reserv_broken, "Cumulative number of broken reservations");
238
239 static COUNTER_U64_DEFINE_EARLY(vm_reserv_freed);
240 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD,
241 &vm_reserv_freed, "Cumulative number of freed reservations");
242
243 static int sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS);
244
245 SYSCTL_PROC(_vm_reserv, OID_AUTO, fullpop, CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RD,
246 NULL, 0, sysctl_vm_reserv_fullpop, "I", "Current number of full reservations");
247
248 static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS);
249
250 SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq,
251 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
252 sysctl_vm_reserv_partpopq, "A",
253 "Partially populated reservation queues");
254
255 static COUNTER_U64_DEFINE_EARLY(vm_reserv_reclaimed);
256 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD,
257 &vm_reserv_reclaimed, "Cumulative number of reclaimed reservations");
258
259 /*
260 * The object lock pool is used to synchronize the rvq. We can not use a
261 * pool mutex because it is required before malloc works.
262 *
263 * The "hash" function could be made faster without divide and modulo.
264 */
265 #define VM_RESERV_OBJ_LOCK_COUNT MAXCPU
266
267 struct mtx_padalign vm_reserv_object_mtx[VM_RESERV_OBJ_LOCK_COUNT];
268
269 #define vm_reserv_object_lock_idx(object) \
270 (((uintptr_t)object / sizeof(*object)) % VM_RESERV_OBJ_LOCK_COUNT)
271 #define vm_reserv_object_lock_ptr(object) \
272 &vm_reserv_object_mtx[vm_reserv_object_lock_idx((object))]
273 #define vm_reserv_object_lock(object) \
274 mtx_lock(vm_reserv_object_lock_ptr((object)))
275 #define vm_reserv_object_unlock(object) \
276 mtx_unlock(vm_reserv_object_lock_ptr((object)))
277
278 static void vm_reserv_break(vm_reserv_t rv);
279 static void vm_reserv_depopulate(vm_reserv_t rv, int index);
280 static vm_reserv_t vm_reserv_from_page(vm_page_t m);
281 static boolean_t vm_reserv_has_pindex(vm_reserv_t rv,
282 vm_pindex_t pindex);
283 static void vm_reserv_populate(vm_reserv_t rv, int index);
284 static void vm_reserv_reclaim(vm_reserv_t rv);
285
286 /*
287 * Returns the current number of full reservations.
288 *
289 * Since the number of full reservations is computed without acquiring any
290 * locks, the returned value is inexact.
291 */
292 static int
sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS)293 sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS)
294 {
295 vm_paddr_t paddr;
296 struct vm_phys_seg *seg;
297 vm_reserv_t rv;
298 int fullpop, segind;
299
300 fullpop = 0;
301 for (segind = 0; segind < vm_phys_nsegs; segind++) {
302 seg = &vm_phys_segs[segind];
303 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
304 #ifdef VM_PHYSSEG_SPARSE
305 rv = seg->first_reserv + (paddr >> VM_LEVEL_0_SHIFT) -
306 (seg->start >> VM_LEVEL_0_SHIFT);
307 #else
308 rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT];
309 #endif
310 while (paddr + VM_LEVEL_0_SIZE > paddr && paddr +
311 VM_LEVEL_0_SIZE <= seg->end) {
312 fullpop += rv->popcnt == VM_LEVEL_0_NPAGES;
313 paddr += VM_LEVEL_0_SIZE;
314 rv++;
315 }
316 }
317 return (sysctl_handle_int(oidp, &fullpop, 0, req));
318 }
319
320 /*
321 * Describes the current state of the partially populated reservation queue.
322 */
323 static int
sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)324 sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
325 {
326 struct sbuf sbuf;
327 vm_reserv_t rv;
328 int counter, error, domain, level, unused_pages;
329
330 error = sysctl_wire_old_buffer(req, 0);
331 if (error != 0)
332 return (error);
333 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
334 sbuf_printf(&sbuf, "\nDOMAIN LEVEL SIZE NUMBER\n\n");
335 for (domain = 0; domain < vm_ndomains; domain++) {
336 for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) {
337 counter = 0;
338 unused_pages = 0;
339 vm_reserv_domain_lock(domain);
340 TAILQ_FOREACH(rv, &vm_rvd[domain].partpop, partpopq) {
341 if (rv == &vm_rvd[domain].marker)
342 continue;
343 counter++;
344 unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt;
345 }
346 vm_reserv_domain_unlock(domain);
347 sbuf_printf(&sbuf, "%6d, %7d, %6dK, %6d\n",
348 domain, level,
349 unused_pages * ((int)PAGE_SIZE / 1024), counter);
350 }
351 }
352 error = sbuf_finish(&sbuf);
353 sbuf_delete(&sbuf);
354 return (error);
355 }
356
357 /*
358 * Remove a reservation from the object's objq.
359 */
360 static void
vm_reserv_remove(vm_reserv_t rv)361 vm_reserv_remove(vm_reserv_t rv)
362 {
363 vm_object_t object;
364
365 vm_reserv_assert_locked(rv);
366 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
367 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
368 KASSERT(rv->object != NULL,
369 ("vm_reserv_remove: reserv %p is free", rv));
370 KASSERT(!rv->inpartpopq,
371 ("vm_reserv_remove: reserv %p's inpartpopq is TRUE", rv));
372 object = rv->object;
373 vm_reserv_object_lock(object);
374 LIST_REMOVE(rv, objq);
375 rv->object = NULL;
376 vm_reserv_object_unlock(object);
377 }
378
379 /*
380 * Insert a new reservation into the object's objq.
381 */
382 static void
vm_reserv_insert(vm_reserv_t rv,vm_object_t object,vm_pindex_t pindex)383 vm_reserv_insert(vm_reserv_t rv, vm_object_t object, vm_pindex_t pindex)
384 {
385
386 vm_reserv_assert_locked(rv);
387 CTR6(KTR_VM,
388 "%s: rv %p(%p) object %p new %p popcnt %d",
389 __FUNCTION__, rv, rv->pages, rv->object, object,
390 rv->popcnt);
391 KASSERT(rv->object == NULL,
392 ("vm_reserv_insert: reserv %p isn't free", rv));
393 KASSERT(rv->popcnt == 0,
394 ("vm_reserv_insert: reserv %p's popcnt is corrupted", rv));
395 KASSERT(!rv->inpartpopq,
396 ("vm_reserv_insert: reserv %p's inpartpopq is TRUE", rv));
397 KASSERT(bit_ntest(rv->popmap, 0, VM_LEVEL_0_NPAGES - 1, 0),
398 ("vm_reserv_insert: reserv %p's popmap is corrupted", rv));
399 vm_reserv_object_lock(object);
400 rv->pindex = pindex;
401 rv->object = object;
402 rv->lasttick = ticks;
403 LIST_INSERT_HEAD(&object->rvq, rv, objq);
404 vm_reserv_object_unlock(object);
405 }
406
407 #ifdef VM_SUBLEVEL_0_NPAGES
408 static inline bool
vm_reserv_is_sublevel_full(vm_reserv_t rv,int index)409 vm_reserv_is_sublevel_full(vm_reserv_t rv, int index)
410 {
411 _Static_assert(VM_SUBLEVEL_0_NPAGES == 16 ||
412 VM_SUBLEVEL_0_NPAGES == 128,
413 "vm_reserv_is_sublevel_full: unsupported VM_SUBLEVEL_0_NPAGES");
414 /* An equivalent bit_ntest() compiles to more instructions. */
415 switch (VM_SUBLEVEL_0_NPAGES) {
416 case 16:
417 return (((uint16_t *)rv->popmap)[index / 16] == UINT16_MAX);
418 case 128:
419 index = rounddown2(index, 128) / 64;
420 return (((uint64_t *)rv->popmap)[index] == UINT64_MAX &&
421 ((uint64_t *)rv->popmap)[index + 1] == UINT64_MAX);
422 default:
423 __unreachable();
424 }
425 }
426 #endif
427
428 /*
429 * Reduces the given reservation's population count. If the population count
430 * becomes zero, the reservation is destroyed. Additionally, moves the
431 * reservation to the tail of the partially populated reservation queue if the
432 * population count is non-zero.
433 */
434 static void
vm_reserv_depopulate(vm_reserv_t rv,int index)435 vm_reserv_depopulate(vm_reserv_t rv, int index)
436 {
437 struct vm_domain *vmd;
438
439 vm_reserv_assert_locked(rv);
440 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
441 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
442 KASSERT(rv->object != NULL,
443 ("vm_reserv_depopulate: reserv %p is free", rv));
444 KASSERT(bit_test(rv->popmap, index),
445 ("vm_reserv_depopulate: reserv %p's popmap[%d] is clear", rv,
446 index));
447 KASSERT(rv->popcnt > 0,
448 ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv));
449 KASSERT(rv->domain < vm_ndomains,
450 ("vm_reserv_depopulate: reserv %p's domain is corrupted %d",
451 rv, rv->domain));
452 if (rv->popcnt == VM_LEVEL_0_NPAGES) {
453 KASSERT(rv->pages->psind == VM_LEVEL_0_PSIND,
454 ("vm_reserv_depopulate: reserv %p is already demoted",
455 rv));
456 rv->pages->psind = VM_LEVEL_0_PSIND - 1;
457 }
458 #ifdef VM_SUBLEVEL_0_NPAGES
459 if (vm_reserv_is_sublevel_full(rv, index))
460 rv->pages[rounddown2(index, VM_SUBLEVEL_0_NPAGES)].psind = 0;
461 #endif
462 bit_clear(rv->popmap, index);
463 rv->popcnt--;
464 if ((unsigned)(ticks - rv->lasttick) >= PARTPOPSLOP ||
465 rv->popcnt == 0) {
466 vm_reserv_domain_lock(rv->domain);
467 if (rv->inpartpopq) {
468 TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq);
469 rv->inpartpopq = FALSE;
470 }
471 if (rv->popcnt != 0) {
472 rv->inpartpopq = TRUE;
473 TAILQ_INSERT_TAIL(&vm_rvd[rv->domain].partpop, rv,
474 partpopq);
475 }
476 vm_reserv_domain_unlock(rv->domain);
477 rv->lasttick = ticks;
478 }
479 vmd = VM_DOMAIN(rv->domain);
480 if (rv->popcnt == 0) {
481 vm_reserv_remove(rv);
482 vm_domain_free_lock(vmd);
483 vm_phys_free_pages(rv->pages, VM_FREEPOOL_DEFAULT,
484 VM_LEVEL_0_ORDER);
485 vm_domain_free_unlock(vmd);
486 counter_u64_add(vm_reserv_freed, 1);
487 }
488 vm_domain_freecnt_inc(vmd, 1);
489 }
490
491 /*
492 * Returns the reservation to which the given page might belong.
493 */
494 static __inline vm_reserv_t
vm_reserv_from_page(vm_page_t m)495 vm_reserv_from_page(vm_page_t m)
496 {
497 #ifdef VM_PHYSSEG_SPARSE
498 struct vm_phys_seg *seg;
499
500 seg = &vm_phys_segs[m->segind];
501 return (seg->first_reserv + (VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT) -
502 (seg->start >> VM_LEVEL_0_SHIFT));
503 #else
504 return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]);
505 #endif
506 }
507
508 /*
509 * Either returns an existing reservation or returns NULL and initializes
510 * successor pointer.
511 */
512 static vm_reserv_t
vm_reserv_from_object(vm_object_t object,vm_pindex_t pindex,vm_page_t * mpredp,vm_page_t * msuccp,struct pctrie_iter * pages)513 vm_reserv_from_object(vm_object_t object, vm_pindex_t pindex,
514 vm_page_t *mpredp, vm_page_t *msuccp, struct pctrie_iter *pages)
515 {
516 vm_reserv_t rv;
517 vm_page_t mpred, msucc;
518
519 mpred = vm_radix_iter_lookup_lt(pages, pindex);
520 if (mpred != NULL) {
521 KASSERT(mpred->object == object,
522 ("vm_reserv_from_object: object doesn't contain mpred"));
523 KASSERT(mpred->pindex < pindex,
524 ("vm_reserv_from_object: mpred doesn't precede pindex"));
525 rv = vm_reserv_from_page(mpred);
526 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
527 return (rv);
528 }
529
530 msucc = vm_radix_iter_lookup_ge(pages, pindex);
531 if (msucc != NULL) {
532 KASSERT(msucc->pindex > pindex,
533 ("vm_reserv_from_object: msucc doesn't succeed pindex"));
534 rv = vm_reserv_from_page(msucc);
535 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
536 return (rv);
537 }
538 *mpredp = mpred;
539 *msuccp = msucc;
540 return (NULL);
541 }
542
543 /*
544 * Returns TRUE if the given reservation contains the given page index and
545 * FALSE otherwise.
546 */
547 static __inline boolean_t
vm_reserv_has_pindex(vm_reserv_t rv,vm_pindex_t pindex)548 vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex)
549 {
550
551 return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0);
552 }
553
554 /*
555 * How many pages should be in a new allocation that starts at the first page of
556 * the reservation superpage that contains 'first', fits between the allocations
557 * that include 'mpred' and 'msucc', fits within 'object', includes at least
558 * 'minpages' pages, and tries to include every allocated page in a superpage?
559 *
560 * We must synchronize with the reserv object lock to protect the pindex/object
561 * of the resulting reservations against rename while we are inspecting.
562 */
563 static u_long
vm_reserv_num_alloc_pages(vm_object_t object,vm_pindex_t first,u_long minpages,vm_page_t mpred,vm_page_t msucc)564 vm_reserv_num_alloc_pages(vm_object_t object, vm_pindex_t first,
565 u_long minpages, vm_page_t mpred, vm_page_t msucc)
566 {
567 vm_pindex_t leftcap, rightcap;
568 vm_reserv_t rv;
569 u_int allocpages;
570
571 allocpages = roundup2(minpages, VM_LEVEL_0_NPAGES);
572
573 vm_reserv_object_lock(object);
574 if (mpred != NULL) {
575 if ((rv = vm_reserv_from_page(mpred))->object != object)
576 leftcap = mpred->pindex + 1;
577 else
578 leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
579 if (leftcap > first)
580 allocpages = 0;
581 }
582 if (minpages < allocpages) {
583 if (msucc == NULL) {
584 /*
585 * Would the last new reservation extend past the end of
586 * the object?
587 *
588 * If the object is unlikely to grow don't allocate a
589 * reservation for the tail.
590 */
591 if ((object->flags & OBJ_ANON) == 0)
592 rightcap = object->size;
593 else
594 rightcap = OBJ_MAX_SIZE;
595 } else {
596 /*
597 * Would the last new reservation extend past the start
598 * of another page or reservation?
599 *
600 * If the object would, don't allocate a reservation for
601 * the tail.
602 */
603 if ((rv = vm_reserv_from_page(msucc))->object != object)
604 rightcap = msucc->pindex;
605 else
606 rightcap = rv->pindex;
607 }
608 if (first + allocpages > rightcap) {
609 /*
610 * A reservation for the last of the requested pages
611 * will not fit. Reduce the size of the upcoming
612 * allocation accordingly.
613 */
614 allocpages = minpages;
615 }
616 }
617 vm_reserv_object_unlock(object);
618 return (allocpages);
619 }
620
621 /*
622 * Increases the given reservation's population count. Moves the reservation
623 * to the tail of the partially populated reservation queue.
624 */
625 static void
vm_reserv_populate(vm_reserv_t rv,int index)626 vm_reserv_populate(vm_reserv_t rv, int index)
627 {
628
629 vm_reserv_assert_locked(rv);
630 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
631 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
632 KASSERT(rv->object != NULL,
633 ("vm_reserv_populate: reserv %p is free", rv));
634 KASSERT(!bit_test(rv->popmap, index),
635 ("vm_reserv_populate: reserv %p's popmap[%d] is set", rv,
636 index));
637 KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES,
638 ("vm_reserv_populate: reserv %p is already full", rv));
639 KASSERT(rv->pages->psind >= 0 &&
640 rv->pages->psind < VM_LEVEL_0_PSIND,
641 ("vm_reserv_populate: reserv %p is already promoted", rv));
642 KASSERT(rv->domain < vm_ndomains,
643 ("vm_reserv_populate: reserv %p's domain is corrupted %d",
644 rv, rv->domain));
645 bit_set(rv->popmap, index);
646 #ifdef VM_SUBLEVEL_0_NPAGES
647 if (vm_reserv_is_sublevel_full(rv, index))
648 rv->pages[rounddown2(index, VM_SUBLEVEL_0_NPAGES)].psind = 1;
649 #endif
650 rv->popcnt++;
651 if ((unsigned)(ticks - rv->lasttick) < PARTPOPSLOP &&
652 rv->inpartpopq && rv->popcnt != VM_LEVEL_0_NPAGES)
653 return;
654 rv->lasttick = ticks;
655 vm_reserv_domain_lock(rv->domain);
656 if (rv->inpartpopq) {
657 TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq);
658 rv->inpartpopq = FALSE;
659 }
660 if (rv->popcnt < VM_LEVEL_0_NPAGES) {
661 rv->inpartpopq = TRUE;
662 TAILQ_INSERT_TAIL(&vm_rvd[rv->domain].partpop, rv, partpopq);
663 } else {
664 KASSERT(rv->pages->psind == VM_LEVEL_0_PSIND - 1,
665 ("vm_reserv_populate: reserv %p is already promoted",
666 rv));
667 rv->pages->psind = VM_LEVEL_0_PSIND;
668 }
669 vm_reserv_domain_unlock(rv->domain);
670 }
671
672 /*
673 * Allocates a contiguous set of physical pages of the given size "npages"
674 * from existing or newly created reservations. All of the physical pages
675 * must be at or above the given physical address "low" and below the given
676 * physical address "high". The given value "alignment" determines the
677 * alignment of the first physical page in the set. If the given value
678 * "boundary" is non-zero, then the set of physical pages cannot cross any
679 * physical address boundary that is a multiple of that value. Both
680 * "alignment" and "boundary" must be a power of two.
681 *
682 * The page "mpred" must immediately precede the offset "pindex" within the
683 * specified object.
684 *
685 * The object must be locked.
686 */
687 vm_page_t
vm_reserv_alloc_contig(vm_object_t object,vm_pindex_t pindex,int domain,int req,u_long npages,vm_paddr_t low,vm_paddr_t high,u_long alignment,vm_paddr_t boundary,struct pctrie_iter * pages)688 vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, int domain,
689 int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
690 vm_paddr_t boundary, struct pctrie_iter *pages)
691 {
692 struct vm_domain *vmd;
693 vm_paddr_t pa, size;
694 vm_page_t m, m_ret, mpred, msucc;
695 vm_pindex_t first;
696 vm_reserv_t rv;
697 u_long allocpages;
698 int i, index, n;
699
700 VM_OBJECT_ASSERT_WLOCKED(object);
701 KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
702
703 /*
704 * Is a reservation fundamentally impossible?
705 */
706 if (pindex < VM_RESERV_INDEX(object, pindex) ||
707 pindex + npages > object->size)
708 return (NULL);
709
710 /*
711 * All reservations of a particular size have the same alignment.
712 * Assuming that the first page is allocated from a reservation, the
713 * least significant bits of its physical address can be determined
714 * from its offset from the beginning of the reservation and the size
715 * of the reservation.
716 *
717 * Could the specified index within a reservation of the smallest
718 * possible size satisfy the alignment and boundary requirements?
719 */
720 pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT;
721 size = npages << PAGE_SHIFT;
722 if (!vm_addr_ok(pa, size, alignment, boundary))
723 return (NULL);
724
725 /*
726 * Look for an existing reservation.
727 */
728 rv = vm_reserv_from_object(object, pindex, &mpred, &msucc, pages);
729 if (rv != NULL) {
730 KASSERT(object != kernel_object || rv->domain == domain,
731 ("vm_reserv_alloc_contig: domain mismatch"));
732 index = VM_RESERV_INDEX(object, pindex);
733 /* Does the allocation fit within the reservation? */
734 if (index + npages > VM_LEVEL_0_NPAGES)
735 return (NULL);
736 domain = rv->domain;
737 vmd = VM_DOMAIN(domain);
738 vm_reserv_lock(rv);
739 /* Handle reclaim race. */
740 if (rv->object != object)
741 goto out;
742 m = &rv->pages[index];
743 pa = VM_PAGE_TO_PHYS(m);
744 if (pa < low || pa + size > high ||
745 !vm_addr_ok(pa, size, alignment, boundary))
746 goto out;
747 /* Handle vm_page_iter_rename(..., m, new_object, ...). */
748 if (!bit_ntest(rv->popmap, index, index + npages - 1, 0))
749 goto out;
750 if (!vm_domain_allocate(vmd, req, npages))
751 goto out;
752 for (i = 0; i < npages; i++)
753 vm_reserv_populate(rv, index + i);
754 vm_reserv_unlock(rv);
755 return (m);
756 out:
757 vm_reserv_unlock(rv);
758 return (NULL);
759 }
760
761 /*
762 * Check whether an allocation including at least one reservation can
763 * fit between mpred and msucc.
764 */
765 first = pindex - VM_RESERV_INDEX(object, pindex);
766 allocpages = vm_reserv_num_alloc_pages(object, first,
767 VM_RESERV_INDEX(object, pindex) + npages, mpred, msucc);
768 if (allocpages < VM_LEVEL_0_NPAGES)
769 return (NULL);
770
771 /*
772 * Allocate the physical pages. The alignment and boundary specified
773 * for this allocation may be different from the alignment and
774 * boundary specified for the requested pages. For instance, the
775 * specified index may not be the first page within the first new
776 * reservation.
777 */
778 m = NULL;
779 vmd = VM_DOMAIN(domain);
780 if (vm_domain_allocate(vmd, req, npages)) {
781 vm_domain_free_lock(vmd);
782 m = vm_phys_alloc_contig(domain, allocpages, low, high,
783 ulmax(alignment, VM_LEVEL_0_SIZE),
784 boundary > VM_LEVEL_0_SIZE ? boundary : 0);
785 vm_domain_free_unlock(vmd);
786 if (m == NULL) {
787 vm_domain_freecnt_inc(vmd, npages);
788 return (NULL);
789 }
790 } else
791 return (NULL);
792 KASSERT(vm_page_domain(m) == domain,
793 ("vm_reserv_alloc_contig: Page domain does not match requested."));
794
795 /*
796 * The allocated physical pages always begin at a reservation
797 * boundary, but they do not always end at a reservation boundary.
798 * Initialize every reservation that is completely covered by the
799 * allocated physical pages.
800 */
801 m_ret = NULL;
802 index = VM_RESERV_INDEX(object, pindex);
803 do {
804 rv = vm_reserv_from_page(m);
805 KASSERT(rv->pages == m,
806 ("vm_reserv_alloc_contig: reserv %p's pages is corrupted",
807 rv));
808 vm_reserv_lock(rv);
809 vm_reserv_insert(rv, object, first);
810 n = ulmin(VM_LEVEL_0_NPAGES - index, npages);
811 for (i = 0; i < n; i++)
812 vm_reserv_populate(rv, index + i);
813 npages -= n;
814 if (m_ret == NULL) {
815 m_ret = &rv->pages[index];
816 index = 0;
817 }
818 vm_reserv_unlock(rv);
819 m += VM_LEVEL_0_NPAGES;
820 first += VM_LEVEL_0_NPAGES;
821 allocpages -= VM_LEVEL_0_NPAGES;
822 } while (allocpages >= VM_LEVEL_0_NPAGES);
823 return (m_ret);
824 }
825
826 /*
827 * Allocate a physical page from an existing or newly created reservation.
828 *
829 * The page "mpred" must immediately precede the offset "pindex" within the
830 * specified object.
831 *
832 * The object must be locked.
833 */
834 vm_page_t
vm_reserv_alloc_page(vm_object_t object,vm_pindex_t pindex,int domain,int req,struct pctrie_iter * pages)835 vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, int domain,
836 int req, struct pctrie_iter *pages)
837 {
838 struct vm_domain *vmd;
839 vm_page_t m, mpred, msucc;
840 vm_pindex_t first;
841 vm_reserv_t rv;
842 int index;
843
844 VM_OBJECT_ASSERT_WLOCKED(object);
845
846 /*
847 * Is a reservation fundamentally impossible?
848 */
849 if (pindex < VM_RESERV_INDEX(object, pindex) ||
850 pindex >= object->size)
851 return (NULL);
852
853 /*
854 * Look for an existing reservation.
855 */
856 rv = vm_reserv_from_object(object, pindex, &mpred, &msucc, pages);
857 if (rv != NULL) {
858 KASSERT(object != kernel_object || rv->domain == domain,
859 ("vm_reserv_alloc_page: domain mismatch"));
860 domain = rv->domain;
861 vmd = VM_DOMAIN(domain);
862 index = VM_RESERV_INDEX(object, pindex);
863 m = &rv->pages[index];
864 vm_reserv_lock(rv);
865 /* Handle reclaim race. */
866 if (rv->object != object ||
867 /* Handle vm_page_iter_rename(..., m, new_object, ...). */
868 bit_test(rv->popmap, index)) {
869 m = NULL;
870 goto out;
871 }
872 if (vm_domain_allocate(vmd, req, 1) == 0)
873 m = NULL;
874 else
875 vm_reserv_populate(rv, index);
876 out:
877 vm_reserv_unlock(rv);
878 return (m);
879 }
880
881 /*
882 * Check whether an allocation including reservations can fit
883 * between mpred and msucc.
884 */
885 first = pindex - VM_RESERV_INDEX(object, pindex);
886 if (vm_reserv_num_alloc_pages(object, first, 1, mpred, msucc) <
887 VM_LEVEL_0_NPAGES)
888 return (NULL);
889
890 /*
891 * Allocate and populate the new reservation.
892 */
893 m = NULL;
894 vmd = VM_DOMAIN(domain);
895 if (vm_domain_allocate(vmd, req, 1)) {
896 vm_domain_free_lock(vmd);
897 m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT,
898 VM_LEVEL_0_ORDER);
899 vm_domain_free_unlock(vmd);
900 if (m == NULL) {
901 vm_domain_freecnt_inc(vmd, 1);
902 return (NULL);
903 }
904 } else
905 return (NULL);
906 rv = vm_reserv_from_page(m);
907 vm_reserv_lock(rv);
908 KASSERT(rv->pages == m,
909 ("vm_reserv_alloc_page: reserv %p's pages is corrupted", rv));
910 vm_reserv_insert(rv, object, first);
911 index = VM_RESERV_INDEX(object, pindex);
912 vm_reserv_populate(rv, index);
913 vm_reserv_unlock(rv);
914
915 return (&rv->pages[index]);
916 }
917
918 /*
919 * Breaks the given reservation. All free pages in the reservation
920 * are returned to the physical memory allocator. The reservation's
921 * population count and map are reset to their initial state.
922 *
923 * The given reservation must not be in the partially populated reservation
924 * queue.
925 */
926 static void
vm_reserv_break(vm_reserv_t rv)927 vm_reserv_break(vm_reserv_t rv)
928 {
929 vm_page_t m;
930 int pos, pos0, pos1;
931
932 vm_reserv_assert_locked(rv);
933 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
934 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
935 vm_reserv_remove(rv);
936 m = rv->pages;
937 #ifdef VM_SUBLEVEL_0_NPAGES
938 for (; m < rv->pages + VM_LEVEL_0_NPAGES; m += VM_SUBLEVEL_0_NPAGES)
939 #endif
940 m->psind = 0;
941 pos0 = bit_test(rv->popmap, 0) ? -1 : 0;
942 pos1 = -1 - pos0;
943 for (pos = 0; pos < VM_LEVEL_0_NPAGES; ) {
944 /* Find the first different bit after pos. */
945 bit_ff_at(rv->popmap, pos + 1, VM_LEVEL_0_NPAGES,
946 pos1 < pos0, &pos);
947 if (pos == -1)
948 pos = VM_LEVEL_0_NPAGES;
949 if (pos0 < pos1) {
950 pos0 = pos;
951 continue;
952 }
953 /* Free unused pages from pos0 to pos. */
954 pos1 = pos;
955 vm_domain_free_lock(VM_DOMAIN(rv->domain));
956 vm_phys_enqueue_contig(&rv->pages[pos0], VM_FREEPOOL_DEFAULT,
957 pos1 - pos0);
958 vm_domain_free_unlock(VM_DOMAIN(rv->domain));
959 }
960 bit_nclear(rv->popmap, 0, VM_LEVEL_0_NPAGES - 1);
961 rv->popcnt = 0;
962 counter_u64_add(vm_reserv_broken, 1);
963 }
964
965 /*
966 * Breaks all reservations belonging to the given object.
967 */
968 void
vm_reserv_break_all(vm_object_t object)969 vm_reserv_break_all(vm_object_t object)
970 {
971 vm_reserv_t rv;
972
973 /*
974 * This access of object->rvq is unsynchronized so that the
975 * object rvq lock can nest after the domain_free lock. We
976 * must check for races in the results. However, the object
977 * lock prevents new additions, so we are guaranteed that when
978 * it returns NULL the object is properly empty.
979 */
980 while ((rv = LIST_FIRST(&object->rvq)) != NULL) {
981 vm_reserv_lock(rv);
982 /* Reclaim race. */
983 if (rv->object != object) {
984 vm_reserv_unlock(rv);
985 continue;
986 }
987 vm_reserv_domain_lock(rv->domain);
988 if (rv->inpartpopq) {
989 TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq);
990 rv->inpartpopq = FALSE;
991 }
992 vm_reserv_domain_unlock(rv->domain);
993 vm_reserv_break(rv);
994 vm_reserv_unlock(rv);
995 }
996 }
997
998 /*
999 * Frees the given page if it belongs to a reservation. Returns TRUE if the
1000 * page is freed and FALSE otherwise.
1001 */
1002 boolean_t
vm_reserv_free_page(vm_page_t m)1003 vm_reserv_free_page(vm_page_t m)
1004 {
1005 vm_reserv_t rv;
1006 boolean_t ret;
1007
1008 rv = vm_reserv_from_page(m);
1009 if (rv->object == NULL)
1010 return (FALSE);
1011 vm_reserv_lock(rv);
1012 /* Re-validate after lock. */
1013 if (rv->object != NULL) {
1014 vm_reserv_depopulate(rv, m - rv->pages);
1015 ret = TRUE;
1016 } else
1017 ret = FALSE;
1018 vm_reserv_unlock(rv);
1019
1020 return (ret);
1021 }
1022
1023 /*
1024 * Initializes the reservation management system. Specifically, initializes
1025 * the reservation array.
1026 *
1027 * Requires that vm_page_array and first_page are initialized!
1028 */
1029 void
vm_reserv_init(void)1030 vm_reserv_init(void)
1031 {
1032 vm_paddr_t paddr;
1033 struct vm_phys_seg *seg;
1034 struct vm_reserv *rv;
1035 struct vm_reserv_domain *rvd;
1036 #ifdef VM_PHYSSEG_SPARSE
1037 vm_pindex_t used;
1038 #endif
1039 int i, segind;
1040
1041 /*
1042 * Initialize the reservation array. Specifically, initialize the
1043 * "pages" field for every element that has an underlying superpage.
1044 */
1045 #ifdef VM_PHYSSEG_SPARSE
1046 used = 0;
1047 #endif
1048 for (segind = 0; segind < vm_phys_nsegs; segind++) {
1049 seg = &vm_phys_segs[segind];
1050 #ifdef VM_PHYSSEG_SPARSE
1051 seg->first_reserv = &vm_reserv_array[used];
1052 used += howmany(seg->end, VM_LEVEL_0_SIZE) -
1053 seg->start / VM_LEVEL_0_SIZE;
1054 #else
1055 seg->first_reserv =
1056 &vm_reserv_array[seg->start >> VM_LEVEL_0_SHIFT];
1057 #endif
1058 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
1059 rv = seg->first_reserv + (paddr >> VM_LEVEL_0_SHIFT) -
1060 (seg->start >> VM_LEVEL_0_SHIFT);
1061 while (paddr + VM_LEVEL_0_SIZE > paddr && paddr +
1062 VM_LEVEL_0_SIZE <= seg->end) {
1063 rv->pages = PHYS_TO_VM_PAGE(paddr);
1064 rv->domain = seg->domain;
1065 mtx_init(&rv->lock, "vm reserv", NULL, MTX_DEF);
1066 paddr += VM_LEVEL_0_SIZE;
1067 rv++;
1068 }
1069 }
1070 for (i = 0; i < MAXMEMDOM; i++) {
1071 rvd = &vm_rvd[i];
1072 mtx_init(&rvd->lock, "vm reserv domain", NULL, MTX_DEF);
1073 TAILQ_INIT(&rvd->partpop);
1074 mtx_init(&rvd->marker.lock, "vm reserv marker", NULL, MTX_DEF);
1075
1076 /*
1077 * Fully populated reservations should never be present in the
1078 * partially populated reservation queues.
1079 */
1080 rvd->marker.popcnt = VM_LEVEL_0_NPAGES;
1081 bit_nset(rvd->marker.popmap, 0, VM_LEVEL_0_NPAGES - 1);
1082 }
1083
1084 for (i = 0; i < VM_RESERV_OBJ_LOCK_COUNT; i++)
1085 mtx_init(&vm_reserv_object_mtx[i], "resv obj lock", NULL,
1086 MTX_DEF);
1087 }
1088
1089 /*
1090 * Returns true if the given page belongs to a reservation and that page is
1091 * free. Otherwise, returns false.
1092 */
1093 bool
vm_reserv_is_page_free(vm_page_t m)1094 vm_reserv_is_page_free(vm_page_t m)
1095 {
1096 vm_reserv_t rv;
1097
1098 rv = vm_reserv_from_page(m);
1099 if (rv->object == NULL)
1100 return (false);
1101 return (!bit_test(rv->popmap, m - rv->pages));
1102 }
1103
1104 /*
1105 * Returns true if the given page is part of a block of npages, starting at a
1106 * multiple of npages, that are all allocated. Otherwise, returns false.
1107 */
1108 bool
vm_reserv_is_populated(vm_page_t m,int npages)1109 vm_reserv_is_populated(vm_page_t m, int npages)
1110 {
1111 vm_reserv_t rv;
1112 int index;
1113
1114 KASSERT(npages <= VM_LEVEL_0_NPAGES,
1115 ("%s: npages %d exceeds VM_LEVEL_0_NPAGES", __func__, npages));
1116 KASSERT(powerof2(npages),
1117 ("%s: npages %d is not a power of 2", __func__, npages));
1118 rv = vm_reserv_from_page(m);
1119 if (rv->object == NULL)
1120 return (false);
1121 index = rounddown2(m - rv->pages, npages);
1122 return (bit_ntest(rv->popmap, index, index + npages - 1, 1));
1123 }
1124
1125 /*
1126 * If the given page belongs to a reservation, returns the level of that
1127 * reservation. Otherwise, returns -1.
1128 */
1129 int
vm_reserv_level(vm_page_t m)1130 vm_reserv_level(vm_page_t m)
1131 {
1132 vm_reserv_t rv;
1133
1134 rv = vm_reserv_from_page(m);
1135 #ifdef VM_SUBLEVEL_0_NPAGES
1136 return (rv->object != NULL ? 1 : -1);
1137 #else
1138 return (rv->object != NULL ? 0 : -1);
1139 #endif
1140 }
1141
1142 /*
1143 * Returns a reservation level if the given page belongs to a fully populated
1144 * reservation and -1 otherwise.
1145 */
1146 int
vm_reserv_level_iffullpop(vm_page_t m)1147 vm_reserv_level_iffullpop(vm_page_t m)
1148 {
1149 vm_reserv_t rv;
1150
1151 rv = vm_reserv_from_page(m);
1152 if (rv->popcnt == VM_LEVEL_0_NPAGES) {
1153 #ifdef VM_SUBLEVEL_0_NPAGES
1154 return (1);
1155 } else if (rv->pages != NULL &&
1156 vm_reserv_is_sublevel_full(rv, m - rv->pages)) {
1157 #endif
1158 return (0);
1159 }
1160 return (-1);
1161 }
1162
1163 /*
1164 * Remove a partially populated reservation from the queue.
1165 */
1166 static void
vm_reserv_dequeue(vm_reserv_t rv)1167 vm_reserv_dequeue(vm_reserv_t rv)
1168 {
1169
1170 vm_reserv_domain_assert_locked(rv->domain);
1171 vm_reserv_assert_locked(rv);
1172 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
1173 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
1174 KASSERT(rv->inpartpopq,
1175 ("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv));
1176
1177 TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq);
1178 rv->inpartpopq = FALSE;
1179 }
1180
1181 /*
1182 * Breaks the given partially populated reservation, releasing its free pages
1183 * to the physical memory allocator.
1184 */
1185 static void
vm_reserv_reclaim(vm_reserv_t rv)1186 vm_reserv_reclaim(vm_reserv_t rv)
1187 {
1188
1189 vm_reserv_assert_locked(rv);
1190 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
1191 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
1192 if (rv->inpartpopq) {
1193 vm_reserv_domain_lock(rv->domain);
1194 vm_reserv_dequeue(rv);
1195 vm_reserv_domain_unlock(rv->domain);
1196 }
1197 vm_reserv_break(rv);
1198 counter_u64_add(vm_reserv_reclaimed, 1);
1199 }
1200
1201 /*
1202 * Breaks a reservation near the head of the partially populated reservation
1203 * queue, releasing its free pages to the physical memory allocator. Returns
1204 * TRUE if a reservation is broken and FALSE otherwise.
1205 */
1206 bool
vm_reserv_reclaim_inactive(int domain)1207 vm_reserv_reclaim_inactive(int domain)
1208 {
1209 vm_reserv_t rv;
1210
1211 vm_reserv_domain_lock(domain);
1212 TAILQ_FOREACH(rv, &vm_rvd[domain].partpop, partpopq) {
1213 /*
1214 * A locked reservation is likely being updated or reclaimed,
1215 * so just skip ahead.
1216 */
1217 if (rv != &vm_rvd[domain].marker && vm_reserv_trylock(rv)) {
1218 vm_reserv_dequeue(rv);
1219 break;
1220 }
1221 }
1222 vm_reserv_domain_unlock(domain);
1223 if (rv != NULL) {
1224 vm_reserv_reclaim(rv);
1225 vm_reserv_unlock(rv);
1226 return (true);
1227 }
1228 return (false);
1229 }
1230
1231 /*
1232 * Determine whether this reservation has free pages that satisfy the given
1233 * request for contiguous physical memory. Start searching from the lower
1234 * bound, defined by lo, and stop at the upper bound, hi. Return the index
1235 * of the first satisfactory free page, or -1 if none is found.
1236 */
1237 static int
vm_reserv_find_contig(vm_reserv_t rv,int npages,int lo,int hi,int ppn_align,int ppn_bound)1238 vm_reserv_find_contig(vm_reserv_t rv, int npages, int lo,
1239 int hi, int ppn_align, int ppn_bound)
1240 {
1241
1242 vm_reserv_assert_locked(rv);
1243 KASSERT(npages <= VM_LEVEL_0_NPAGES - 1,
1244 ("%s: Too many pages", __func__));
1245 KASSERT(ppn_bound <= VM_LEVEL_0_NPAGES,
1246 ("%s: Too big a boundary for reservation size", __func__));
1247 KASSERT(npages <= ppn_bound,
1248 ("%s: Too many pages for given boundary", __func__));
1249 KASSERT(ppn_align != 0 && powerof2(ppn_align),
1250 ("ppn_align is not a positive power of 2"));
1251 KASSERT(ppn_bound != 0 && powerof2(ppn_bound),
1252 ("ppn_bound is not a positive power of 2"));
1253 while (bit_ffc_area_at(rv->popmap, lo, hi, npages, &lo), lo != -1) {
1254 if (lo < roundup2(lo, ppn_align)) {
1255 /* Skip to next aligned page. */
1256 lo = roundup2(lo, ppn_align);
1257 } else if (roundup2(lo + 1, ppn_bound) >= lo + npages)
1258 return (lo);
1259 if (roundup2(lo + 1, ppn_bound) < lo + npages) {
1260 /* Skip to next boundary-matching page. */
1261 lo = roundup2(lo + 1, ppn_bound);
1262 }
1263 }
1264 return (-1);
1265 }
1266
1267 /*
1268 * Searches the partially populated reservation queue for the least recently
1269 * changed reservation with free pages that satisfy the given request for
1270 * contiguous physical memory. If a satisfactory reservation is found, it is
1271 * broken. Returns a page if a reservation is broken and NULL otherwise.
1272 */
1273 vm_page_t
vm_reserv_reclaim_contig(int domain,u_long npages,vm_paddr_t low,vm_paddr_t high,u_long alignment,vm_paddr_t boundary)1274 vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low,
1275 vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1276 {
1277 struct vm_reserv_queue *queue;
1278 vm_paddr_t pa, size;
1279 vm_page_t m_ret;
1280 vm_reserv_t marker, rv, rvn;
1281 int hi, lo, posn, ppn_align, ppn_bound;
1282
1283 KASSERT(npages > 0, ("npages is 0"));
1284 KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1285 KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1286 if (npages > VM_LEVEL_0_NPAGES - 1)
1287 return (NULL);
1288 size = npages << PAGE_SHIFT;
1289 /*
1290 * Ensure that a free range starting at a boundary-multiple
1291 * doesn't include a boundary-multiple within it. Otherwise,
1292 * no boundary-constrained allocation is possible.
1293 */
1294 if (!vm_addr_bound_ok(0, size, boundary))
1295 return (NULL);
1296 marker = &vm_rvd[domain].marker;
1297 queue = &vm_rvd[domain].partpop;
1298 /*
1299 * Compute shifted alignment, boundary values for page-based
1300 * calculations. Constrain to range [1, VM_LEVEL_0_NPAGES] to
1301 * avoid overflow.
1302 */
1303 ppn_align = (int)(ulmin(ulmax(PAGE_SIZE, alignment),
1304 VM_LEVEL_0_SIZE) >> PAGE_SHIFT);
1305 ppn_bound = boundary == 0 ? VM_LEVEL_0_NPAGES :
1306 (int)(MIN(MAX(PAGE_SIZE, boundary),
1307 VM_LEVEL_0_SIZE) >> PAGE_SHIFT);
1308
1309 vm_reserv_domain_scan_lock(domain);
1310 vm_reserv_domain_lock(domain);
1311 TAILQ_FOREACH_SAFE(rv, queue, partpopq, rvn) {
1312 pa = VM_PAGE_TO_PHYS(&rv->pages[0]);
1313 if (pa + VM_LEVEL_0_SIZE - size < low) {
1314 /* This entire reservation is too low; go to next. */
1315 continue;
1316 }
1317 if (pa + size > high) {
1318 /* This entire reservation is too high; go to next. */
1319 continue;
1320 }
1321 if (!vm_addr_align_ok(pa, alignment)) {
1322 /* This entire reservation is unaligned; go to next. */
1323 continue;
1324 }
1325
1326 if (vm_reserv_trylock(rv) == 0) {
1327 TAILQ_INSERT_AFTER(queue, rv, marker, partpopq);
1328 vm_reserv_domain_unlock(domain);
1329 vm_reserv_lock(rv);
1330 if (TAILQ_PREV(marker, vm_reserv_queue, partpopq) !=
1331 rv) {
1332 vm_reserv_unlock(rv);
1333 vm_reserv_domain_lock(domain);
1334 rvn = TAILQ_NEXT(marker, partpopq);
1335 TAILQ_REMOVE(queue, marker, partpopq);
1336 continue;
1337 }
1338 vm_reserv_domain_lock(domain);
1339 TAILQ_REMOVE(queue, marker, partpopq);
1340 }
1341 vm_reserv_domain_unlock(domain);
1342 lo = (pa >= low) ? 0 :
1343 (int)((low + PAGE_MASK - pa) >> PAGE_SHIFT);
1344 hi = (pa + VM_LEVEL_0_SIZE <= high) ? VM_LEVEL_0_NPAGES :
1345 (int)((high - pa) >> PAGE_SHIFT);
1346 posn = vm_reserv_find_contig(rv, (int)npages, lo, hi,
1347 ppn_align, ppn_bound);
1348 if (posn >= 0) {
1349 vm_reserv_domain_scan_unlock(domain);
1350 /* Allocate requested space */
1351 rv->popcnt += npages;
1352 bit_nset(rv->popmap, posn, posn + npages - 1);
1353 vm_reserv_reclaim(rv);
1354 vm_reserv_unlock(rv);
1355 m_ret = &rv->pages[posn];
1356 pa = VM_PAGE_TO_PHYS(m_ret);
1357 KASSERT(vm_addr_ok(pa, size, alignment, boundary),
1358 ("%s: adjusted address not aligned/bounded to "
1359 "%lx/%jx",
1360 __func__, alignment, (uintmax_t)boundary));
1361 return (m_ret);
1362 }
1363 vm_reserv_domain_lock(domain);
1364 rvn = TAILQ_NEXT(rv, partpopq);
1365 vm_reserv_unlock(rv);
1366 }
1367 vm_reserv_domain_unlock(domain);
1368 vm_reserv_domain_scan_unlock(domain);
1369 return (NULL);
1370 }
1371
1372 /*
1373 * Transfers the reservation underlying the given page to a new object.
1374 *
1375 * The object must be locked.
1376 */
1377 void
vm_reserv_rename(vm_page_t m,vm_object_t new_object,vm_object_t old_object,vm_pindex_t old_object_offset)1378 vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object,
1379 vm_pindex_t old_object_offset)
1380 {
1381 vm_reserv_t rv;
1382
1383 VM_OBJECT_ASSERT_WLOCKED(new_object);
1384 rv = vm_reserv_from_page(m);
1385 if (rv->object == old_object) {
1386 vm_reserv_lock(rv);
1387 CTR6(KTR_VM,
1388 "%s: rv %p object %p new %p popcnt %d inpartpop %d",
1389 __FUNCTION__, rv, rv->object, new_object, rv->popcnt,
1390 rv->inpartpopq);
1391 if (rv->object == old_object) {
1392 vm_reserv_object_lock(old_object);
1393 rv->object = NULL;
1394 LIST_REMOVE(rv, objq);
1395 vm_reserv_object_unlock(old_object);
1396 vm_reserv_object_lock(new_object);
1397 rv->object = new_object;
1398 rv->pindex -= old_object_offset;
1399 LIST_INSERT_HEAD(&new_object->rvq, rv, objq);
1400 vm_reserv_object_unlock(new_object);
1401 }
1402 vm_reserv_unlock(rv);
1403 }
1404 }
1405
1406 /*
1407 * Returns the size (in bytes) of a reservation of the specified level.
1408 */
1409 int
vm_reserv_size(int level)1410 vm_reserv_size(int level)
1411 {
1412
1413 switch (level) {
1414 case 0:
1415 #ifdef VM_SUBLEVEL_0_NPAGES
1416 return (VM_SUBLEVEL_0_NPAGES * PAGE_SIZE);
1417 case 1:
1418 #endif
1419 return (VM_LEVEL_0_SIZE);
1420 case -1:
1421 return (PAGE_SIZE);
1422 default:
1423 return (0);
1424 }
1425 }
1426
1427 /*
1428 * Allocates the virtual and physical memory required by the reservation
1429 * management system's data structures, in particular, the reservation array.
1430 */
1431 vm_paddr_t
vm_reserv_startup(vm_offset_t * vaddr,vm_paddr_t end)1432 vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end)
1433 {
1434 vm_paddr_t new_end;
1435 vm_pindex_t count;
1436 size_t size;
1437 int i;
1438
1439 count = 0;
1440 for (i = 0; i < vm_phys_nsegs; i++) {
1441 #ifdef VM_PHYSSEG_SPARSE
1442 count += howmany(vm_phys_segs[i].end, VM_LEVEL_0_SIZE) -
1443 vm_phys_segs[i].start / VM_LEVEL_0_SIZE;
1444 #else
1445 count = MAX(count,
1446 howmany(vm_phys_segs[i].end, VM_LEVEL_0_SIZE));
1447 #endif
1448 }
1449
1450 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1451 #ifdef VM_PHYSSEG_SPARSE
1452 count += howmany(phys_avail[i + 1], VM_LEVEL_0_SIZE) -
1453 phys_avail[i] / VM_LEVEL_0_SIZE;
1454 #else
1455 count = MAX(count,
1456 howmany(phys_avail[i + 1], VM_LEVEL_0_SIZE));
1457 #endif
1458 }
1459
1460 /*
1461 * Calculate the size (in bytes) of the reservation array. Rounding up
1462 * for partial superpages at boundaries, as every small page is mapped
1463 * to an element in the reservation array based on its physical address.
1464 * Thus, the number of elements in the reservation array can be greater
1465 * than the number of superpages.
1466 */
1467 size = count * sizeof(struct vm_reserv);
1468
1469 /*
1470 * Allocate and map the physical memory for the reservation array. The
1471 * next available virtual address is returned by reference.
1472 */
1473 new_end = end - round_page(size);
1474 vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end,
1475 VM_PROT_READ | VM_PROT_WRITE);
1476 bzero(vm_reserv_array, size);
1477
1478 /*
1479 * Return the next available physical address.
1480 */
1481 return (new_end);
1482 }
1483
1484 /*
1485 * Returns the superpage containing the given page.
1486 */
1487 vm_page_t
vm_reserv_to_superpage(vm_page_t m)1488 vm_reserv_to_superpage(vm_page_t m)
1489 {
1490 vm_reserv_t rv;
1491
1492 VM_OBJECT_ASSERT_LOCKED(m->object);
1493 rv = vm_reserv_from_page(m);
1494 if (rv->object == m->object) {
1495 if (rv->popcnt == VM_LEVEL_0_NPAGES)
1496 return (rv->pages);
1497 #ifdef VM_SUBLEVEL_0_NPAGES
1498 if (vm_reserv_is_sublevel_full(rv, m - rv->pages))
1499 return (rv->pages + rounddown2(m - rv->pages,
1500 VM_SUBLEVEL_0_NPAGES));
1501 #endif
1502 }
1503 return (NULL);
1504 }
1505
1506 #endif /* VM_NRESERVLEVEL > 0 */
1507