1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2002-2006 Rice University 5 * Copyright (c) 2007-2011 Alan L. Cox <alc@cs.rice.edu> 6 * All rights reserved. 7 * 8 * This software was developed for the FreeBSD Project by Alan L. Cox, 9 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 27 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY 30 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Superpage reservation management module 36 * 37 * Any external functions defined by this module are only to be used by the 38 * virtual memory system. 39 */ 40 41 #include <sys/cdefs.h> 42 #include "opt_vm.h" 43 44 #include <sys/param.h> 45 #include <sys/kernel.h> 46 #include <sys/lock.h> 47 #include <sys/malloc.h> 48 #include <sys/mutex.h> 49 #include <sys/queue.h> 50 #include <sys/rwlock.h> 51 #include <sys/sbuf.h> 52 #include <sys/sysctl.h> 53 #include <sys/systm.h> 54 #include <sys/bitstring.h> 55 #include <sys/counter.h> 56 #include <sys/ktr.h> 57 #include <sys/vmmeter.h> 58 #include <sys/smp.h> 59 60 #include <vm/vm.h> 61 #include <vm/vm_extern.h> 62 #include <vm/vm_param.h> 63 #include <vm/vm_object.h> 64 #include <vm/vm_page.h> 65 #include <vm/vm_pageout.h> 66 #include <vm/vm_pagequeue.h> 67 #include <vm/vm_phys.h> 68 #include <vm/vm_radix.h> 69 #include <vm/vm_reserv.h> 70 71 /* 72 * The reservation system supports the speculative allocation of large physical 73 * pages ("superpages"). Speculative allocation enables the fully automatic 74 * utilization of superpages by the virtual memory system. In other words, no 75 * programmatic directives are required to use superpages. 76 */ 77 78 #if VM_NRESERVLEVEL > 0 79 80 /* 81 * Temporarily simulate two-level reservations. Effectively, VM_LEVEL_0_* is 82 * level 1, and VM_SUBLEVEL_0_* is level 0. 83 */ 84 #if VM_NRESERVLEVEL == 2 85 #undef VM_NRESERVLEVEL 86 #define VM_NRESERVLEVEL 1 87 #if VM_LEVEL_0_ORDER == 4 88 #undef VM_LEVEL_0_ORDER 89 #define VM_LEVEL_0_ORDER (4 + VM_LEVEL_1_ORDER) 90 #define VM_SUBLEVEL_0_NPAGES (1 << 4) 91 #elif VM_LEVEL_0_ORDER == 7 92 #undef VM_LEVEL_0_ORDER 93 #define VM_LEVEL_0_ORDER (7 + VM_LEVEL_1_ORDER) 94 #define VM_SUBLEVEL_0_NPAGES (1 << 7) 95 #else 96 #error "Unsupported level 0 reservation size" 97 #endif 98 #define VM_LEVEL_0_PSIND 2 99 #else 100 #define VM_LEVEL_0_PSIND 1 101 #endif 102 103 #ifndef VM_LEVEL_0_ORDER_MAX 104 #define VM_LEVEL_0_ORDER_MAX VM_LEVEL_0_ORDER 105 #endif 106 107 /* 108 * The number of small pages that are contained in a level 0 reservation 109 */ 110 #define VM_LEVEL_0_NPAGES (1 << VM_LEVEL_0_ORDER) 111 #define VM_LEVEL_0_NPAGES_MAX (1 << VM_LEVEL_0_ORDER_MAX) 112 113 /* 114 * The number of bits by which a physical address is shifted to obtain the 115 * reservation number 116 */ 117 #define VM_LEVEL_0_SHIFT (VM_LEVEL_0_ORDER + PAGE_SHIFT) 118 119 /* 120 * The size of a level 0 reservation in bytes 121 */ 122 #define VM_LEVEL_0_SIZE (1 << VM_LEVEL_0_SHIFT) 123 124 /* 125 * Computes the index of the small page underlying the given (object, pindex) 126 * within the reservation's array of small pages. 127 */ 128 #define VM_RESERV_INDEX(object, pindex) \ 129 (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1)) 130 131 /* 132 * Number of elapsed ticks before we update the LRU queue position. Used 133 * to reduce contention and churn on the list. 134 */ 135 #define PARTPOPSLOP 1 136 137 /* 138 * The reservation structure 139 * 140 * A reservation structure is constructed whenever a large physical page is 141 * speculatively allocated to an object. The reservation provides the small 142 * physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets 143 * within that object. The reservation's "popcnt" tracks the number of these 144 * small physical pages that are in use at any given time. When and if the 145 * reservation is not fully utilized, it appears in the queue of partially 146 * populated reservations. The reservation always appears on the containing 147 * object's list of reservations. 148 * 149 * A partially populated reservation can be broken and reclaimed at any time. 150 * 151 * c - constant after boot 152 * d - vm_reserv_domain_lock 153 * o - vm_reserv_object_lock 154 * r - vm_reserv_lock 155 * s - vm_reserv_domain_scan_lock 156 */ 157 struct vm_reserv { 158 struct mtx lock; /* reservation lock. */ 159 TAILQ_ENTRY(vm_reserv) partpopq; /* (d, r) per-domain queue. */ 160 LIST_ENTRY(vm_reserv) objq; /* (o, r) object queue */ 161 vm_object_t object; /* (o, r) containing object */ 162 vm_pindex_t pindex; /* (o, r) offset in object */ 163 vm_page_t pages; /* (c) first page */ 164 uint16_t popcnt; /* (r) # of pages in use */ 165 uint8_t domain; /* (c) NUMA domain. */ 166 char inpartpopq; /* (d, r) */ 167 int lasttick; /* (r) last pop update tick. */ 168 bitstr_t bit_decl(popmap, VM_LEVEL_0_NPAGES_MAX); 169 /* (r) bit vector, used pages */ 170 }; 171 172 TAILQ_HEAD(vm_reserv_queue, vm_reserv); 173 174 #define vm_reserv_lockptr(rv) (&(rv)->lock) 175 #define vm_reserv_assert_locked(rv) \ 176 mtx_assert(vm_reserv_lockptr(rv), MA_OWNED) 177 #define vm_reserv_lock(rv) mtx_lock(vm_reserv_lockptr(rv)) 178 #define vm_reserv_trylock(rv) mtx_trylock(vm_reserv_lockptr(rv)) 179 #define vm_reserv_unlock(rv) mtx_unlock(vm_reserv_lockptr(rv)) 180 181 /* 182 * The reservation array 183 * 184 * This array is analoguous in function to vm_page_array. It differs in the 185 * respect that it may contain a greater number of useful reservation 186 * structures than there are (physical) superpages. These "invalid" 187 * reservation structures exist to trade-off space for time in the 188 * implementation of vm_reserv_from_page(). Invalid reservation structures are 189 * distinguishable from "valid" reservation structures by inspecting the 190 * reservation's "pages" field. Invalid reservation structures have a NULL 191 * "pages" field. 192 * 193 * vm_reserv_from_page() maps a small (physical) page to an element of this 194 * array by computing a physical reservation number from the page's physical 195 * address. The physical reservation number is used as the array index. 196 * 197 * An "active" reservation is a valid reservation structure that has a non-NULL 198 * "object" field and a non-zero "popcnt" field. In other words, every active 199 * reservation belongs to a particular object. Moreover, every active 200 * reservation has an entry in the containing object's list of reservations. 201 */ 202 static vm_reserv_t vm_reserv_array; 203 204 /* 205 * The per-domain partially populated reservation queues 206 * 207 * These queues enable the fast recovery of an unused free small page from a 208 * partially populated reservation. The reservation at the head of a queue 209 * is the least recently changed, partially populated reservation. 210 * 211 * Access to this queue is synchronized by the per-domain reservation lock. 212 * Threads reclaiming free pages from the queue must hold the per-domain scan 213 * lock. 214 */ 215 struct vm_reserv_domain { 216 struct mtx lock; 217 struct vm_reserv_queue partpop; /* (d) */ 218 struct vm_reserv marker; /* (d, s) scan marker/lock */ 219 } __aligned(CACHE_LINE_SIZE); 220 221 static struct vm_reserv_domain vm_rvd[MAXMEMDOM]; 222 223 #define vm_reserv_domain_lockptr(d) (&vm_rvd[(d)].lock) 224 #define vm_reserv_domain_assert_locked(d) \ 225 mtx_assert(vm_reserv_domain_lockptr(d), MA_OWNED) 226 #define vm_reserv_domain_lock(d) mtx_lock(vm_reserv_domain_lockptr(d)) 227 #define vm_reserv_domain_unlock(d) mtx_unlock(vm_reserv_domain_lockptr(d)) 228 229 #define vm_reserv_domain_scan_lock(d) mtx_lock(&vm_rvd[(d)].marker.lock) 230 #define vm_reserv_domain_scan_unlock(d) mtx_unlock(&vm_rvd[(d)].marker.lock) 231 232 static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 233 "Reservation Info"); 234 235 static COUNTER_U64_DEFINE_EARLY(vm_reserv_broken); 236 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD, 237 &vm_reserv_broken, "Cumulative number of broken reservations"); 238 239 static COUNTER_U64_DEFINE_EARLY(vm_reserv_freed); 240 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD, 241 &vm_reserv_freed, "Cumulative number of freed reservations"); 242 243 static int sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS); 244 245 SYSCTL_PROC(_vm_reserv, OID_AUTO, fullpop, CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RD, 246 NULL, 0, sysctl_vm_reserv_fullpop, "I", "Current number of full reservations"); 247 248 static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS); 249 250 SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq, 251 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 252 sysctl_vm_reserv_partpopq, "A", 253 "Partially populated reservation queues"); 254 255 static COUNTER_U64_DEFINE_EARLY(vm_reserv_reclaimed); 256 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD, 257 &vm_reserv_reclaimed, "Cumulative number of reclaimed reservations"); 258 259 /* 260 * The object lock pool is used to synchronize the rvq. We can not use a 261 * pool mutex because it is required before malloc works. 262 * 263 * The "hash" function could be made faster without divide and modulo. 264 */ 265 #define VM_RESERV_OBJ_LOCK_COUNT MAXCPU 266 267 struct mtx_padalign vm_reserv_object_mtx[VM_RESERV_OBJ_LOCK_COUNT]; 268 269 #define vm_reserv_object_lock_idx(object) \ 270 (((uintptr_t)object / sizeof(*object)) % VM_RESERV_OBJ_LOCK_COUNT) 271 #define vm_reserv_object_lock_ptr(object) \ 272 &vm_reserv_object_mtx[vm_reserv_object_lock_idx((object))] 273 #define vm_reserv_object_lock(object) \ 274 mtx_lock(vm_reserv_object_lock_ptr((object))) 275 #define vm_reserv_object_unlock(object) \ 276 mtx_unlock(vm_reserv_object_lock_ptr((object))) 277 278 static void vm_reserv_break(vm_reserv_t rv); 279 static void vm_reserv_depopulate(vm_reserv_t rv, int index); 280 static vm_reserv_t vm_reserv_from_page(vm_page_t m); 281 static boolean_t vm_reserv_has_pindex(vm_reserv_t rv, 282 vm_pindex_t pindex); 283 static void vm_reserv_populate(vm_reserv_t rv, int index); 284 static void vm_reserv_reclaim(vm_reserv_t rv); 285 286 /* 287 * Returns the current number of full reservations. 288 * 289 * Since the number of full reservations is computed without acquiring any 290 * locks, the returned value is inexact. 291 */ 292 static int 293 sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS) 294 { 295 vm_paddr_t paddr; 296 struct vm_phys_seg *seg; 297 vm_reserv_t rv; 298 int fullpop, segind; 299 300 fullpop = 0; 301 for (segind = 0; segind < vm_phys_nsegs; segind++) { 302 seg = &vm_phys_segs[segind]; 303 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE); 304 #ifdef VM_PHYSSEG_SPARSE 305 rv = seg->first_reserv + (paddr >> VM_LEVEL_0_SHIFT) - 306 (seg->start >> VM_LEVEL_0_SHIFT); 307 #else 308 rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT]; 309 #endif 310 while (paddr + VM_LEVEL_0_SIZE > paddr && paddr + 311 VM_LEVEL_0_SIZE <= seg->end) { 312 fullpop += rv->popcnt == VM_LEVEL_0_NPAGES; 313 paddr += VM_LEVEL_0_SIZE; 314 rv++; 315 } 316 } 317 return (sysctl_handle_int(oidp, &fullpop, 0, req)); 318 } 319 320 /* 321 * Describes the current state of the partially populated reservation queue. 322 */ 323 static int 324 sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS) 325 { 326 struct sbuf sbuf; 327 vm_reserv_t rv; 328 int counter, error, domain, level, unused_pages; 329 330 error = sysctl_wire_old_buffer(req, 0); 331 if (error != 0) 332 return (error); 333 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 334 sbuf_printf(&sbuf, "\nDOMAIN LEVEL SIZE NUMBER\n\n"); 335 for (domain = 0; domain < vm_ndomains; domain++) { 336 for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) { 337 counter = 0; 338 unused_pages = 0; 339 vm_reserv_domain_lock(domain); 340 TAILQ_FOREACH(rv, &vm_rvd[domain].partpop, partpopq) { 341 if (rv == &vm_rvd[domain].marker) 342 continue; 343 counter++; 344 unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt; 345 } 346 vm_reserv_domain_unlock(domain); 347 sbuf_printf(&sbuf, "%6d, %7d, %6dK, %6d\n", 348 domain, level, 349 unused_pages * ((int)PAGE_SIZE / 1024), counter); 350 } 351 } 352 error = sbuf_finish(&sbuf); 353 sbuf_delete(&sbuf); 354 return (error); 355 } 356 357 /* 358 * Remove a reservation from the object's objq. 359 */ 360 static void 361 vm_reserv_remove(vm_reserv_t rv) 362 { 363 vm_object_t object; 364 365 vm_reserv_assert_locked(rv); 366 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 367 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 368 KASSERT(rv->object != NULL, 369 ("vm_reserv_remove: reserv %p is free", rv)); 370 KASSERT(!rv->inpartpopq, 371 ("vm_reserv_remove: reserv %p's inpartpopq is TRUE", rv)); 372 object = rv->object; 373 vm_reserv_object_lock(object); 374 LIST_REMOVE(rv, objq); 375 rv->object = NULL; 376 vm_reserv_object_unlock(object); 377 } 378 379 /* 380 * Insert a new reservation into the object's objq. 381 */ 382 static void 383 vm_reserv_insert(vm_reserv_t rv, vm_object_t object, vm_pindex_t pindex) 384 { 385 386 vm_reserv_assert_locked(rv); 387 CTR6(KTR_VM, 388 "%s: rv %p(%p) object %p new %p popcnt %d", 389 __FUNCTION__, rv, rv->pages, rv->object, object, 390 rv->popcnt); 391 KASSERT(rv->object == NULL, 392 ("vm_reserv_insert: reserv %p isn't free", rv)); 393 KASSERT(rv->popcnt == 0, 394 ("vm_reserv_insert: reserv %p's popcnt is corrupted", rv)); 395 KASSERT(!rv->inpartpopq, 396 ("vm_reserv_insert: reserv %p's inpartpopq is TRUE", rv)); 397 KASSERT(bit_ntest(rv->popmap, 0, VM_LEVEL_0_NPAGES - 1, 0), 398 ("vm_reserv_insert: reserv %p's popmap is corrupted", rv)); 399 vm_reserv_object_lock(object); 400 rv->pindex = pindex; 401 rv->object = object; 402 rv->lasttick = ticks; 403 LIST_INSERT_HEAD(&object->rvq, rv, objq); 404 vm_reserv_object_unlock(object); 405 } 406 407 #ifdef VM_SUBLEVEL_0_NPAGES 408 static inline bool 409 vm_reserv_is_sublevel_full(vm_reserv_t rv, int index) 410 { 411 _Static_assert(VM_SUBLEVEL_0_NPAGES == 16 || 412 VM_SUBLEVEL_0_NPAGES == 128, 413 "vm_reserv_is_sublevel_full: unsupported VM_SUBLEVEL_0_NPAGES"); 414 /* An equivalent bit_ntest() compiles to more instructions. */ 415 switch (VM_SUBLEVEL_0_NPAGES) { 416 case 16: 417 return (((uint16_t *)rv->popmap)[index / 16] == UINT16_MAX); 418 case 128: 419 index = rounddown2(index, 128) / 64; 420 return (((uint64_t *)rv->popmap)[index] == UINT64_MAX && 421 ((uint64_t *)rv->popmap)[index + 1] == UINT64_MAX); 422 default: 423 __unreachable(); 424 } 425 } 426 #endif 427 428 /* 429 * Reduces the given reservation's population count. If the population count 430 * becomes zero, the reservation is destroyed. Additionally, moves the 431 * reservation to the tail of the partially populated reservation queue if the 432 * population count is non-zero. 433 */ 434 static void 435 vm_reserv_depopulate(vm_reserv_t rv, int index) 436 { 437 struct vm_domain *vmd; 438 439 vm_reserv_assert_locked(rv); 440 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 441 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 442 KASSERT(rv->object != NULL, 443 ("vm_reserv_depopulate: reserv %p is free", rv)); 444 KASSERT(bit_test(rv->popmap, index), 445 ("vm_reserv_depopulate: reserv %p's popmap[%d] is clear", rv, 446 index)); 447 KASSERT(rv->popcnt > 0, 448 ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv)); 449 KASSERT(rv->domain < vm_ndomains, 450 ("vm_reserv_depopulate: reserv %p's domain is corrupted %d", 451 rv, rv->domain)); 452 if (rv->popcnt == VM_LEVEL_0_NPAGES) { 453 KASSERT(rv->pages->psind == VM_LEVEL_0_PSIND, 454 ("vm_reserv_depopulate: reserv %p is already demoted", 455 rv)); 456 rv->pages->psind = VM_LEVEL_0_PSIND - 1; 457 } 458 #ifdef VM_SUBLEVEL_0_NPAGES 459 if (vm_reserv_is_sublevel_full(rv, index)) 460 rv->pages[rounddown2(index, VM_SUBLEVEL_0_NPAGES)].psind = 0; 461 #endif 462 bit_clear(rv->popmap, index); 463 rv->popcnt--; 464 if ((unsigned)(ticks - rv->lasttick) >= PARTPOPSLOP || 465 rv->popcnt == 0) { 466 vm_reserv_domain_lock(rv->domain); 467 if (rv->inpartpopq) { 468 TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); 469 rv->inpartpopq = FALSE; 470 } 471 if (rv->popcnt != 0) { 472 rv->inpartpopq = TRUE; 473 TAILQ_INSERT_TAIL(&vm_rvd[rv->domain].partpop, rv, 474 partpopq); 475 } 476 vm_reserv_domain_unlock(rv->domain); 477 rv->lasttick = ticks; 478 } 479 vmd = VM_DOMAIN(rv->domain); 480 if (rv->popcnt == 0) { 481 vm_reserv_remove(rv); 482 vm_domain_free_lock(vmd); 483 vm_phys_free_pages(rv->pages, rv->pages->pool, VM_LEVEL_0_ORDER); 484 vm_domain_free_unlock(vmd); 485 counter_u64_add(vm_reserv_freed, 1); 486 } 487 vm_domain_freecnt_inc(vmd, 1); 488 } 489 490 /* 491 * Returns the reservation to which the given page might belong. 492 */ 493 static __inline vm_reserv_t 494 vm_reserv_from_page(vm_page_t m) 495 { 496 #ifdef VM_PHYSSEG_SPARSE 497 struct vm_phys_seg *seg; 498 499 seg = &vm_phys_segs[m->segind]; 500 return (seg->first_reserv + (VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT) - 501 (seg->start >> VM_LEVEL_0_SHIFT)); 502 #else 503 return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]); 504 #endif 505 } 506 507 /* 508 * Returns an existing reservation or NULL and initialized successor pointer. 509 */ 510 static vm_reserv_t 511 vm_reserv_from_object(vm_object_t object, vm_pindex_t pindex, 512 vm_page_t mpred, vm_page_t *msuccp) 513 { 514 vm_reserv_t rv; 515 vm_page_t msucc; 516 517 msucc = NULL; 518 if (mpred != NULL) { 519 KASSERT(mpred->object == object, 520 ("vm_reserv_from_object: object doesn't contain mpred")); 521 KASSERT(mpred->pindex < pindex, 522 ("vm_reserv_from_object: mpred doesn't precede pindex")); 523 rv = vm_reserv_from_page(mpred); 524 if (rv->object == object && vm_reserv_has_pindex(rv, pindex)) 525 goto found; 526 msucc = TAILQ_NEXT(mpred, listq); 527 } else 528 msucc = TAILQ_FIRST(&object->memq); 529 if (msucc != NULL) { 530 KASSERT(msucc->pindex > pindex, 531 ("vm_reserv_from_object: msucc doesn't succeed pindex")); 532 rv = vm_reserv_from_page(msucc); 533 if (rv->object == object && vm_reserv_has_pindex(rv, pindex)) 534 goto found; 535 } 536 rv = NULL; 537 538 found: 539 *msuccp = msucc; 540 541 return (rv); 542 } 543 544 /* 545 * Returns TRUE if the given reservation contains the given page index and 546 * FALSE otherwise. 547 */ 548 static __inline boolean_t 549 vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex) 550 { 551 552 return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0); 553 } 554 555 /* 556 * Increases the given reservation's population count. Moves the reservation 557 * to the tail of the partially populated reservation queue. 558 */ 559 static void 560 vm_reserv_populate(vm_reserv_t rv, int index) 561 { 562 563 vm_reserv_assert_locked(rv); 564 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 565 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 566 KASSERT(rv->object != NULL, 567 ("vm_reserv_populate: reserv %p is free", rv)); 568 KASSERT(!bit_test(rv->popmap, index), 569 ("vm_reserv_populate: reserv %p's popmap[%d] is set", rv, 570 index)); 571 KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES, 572 ("vm_reserv_populate: reserv %p is already full", rv)); 573 KASSERT(rv->pages->psind >= 0 && 574 rv->pages->psind < VM_LEVEL_0_PSIND, 575 ("vm_reserv_populate: reserv %p is already promoted", rv)); 576 KASSERT(rv->domain < vm_ndomains, 577 ("vm_reserv_populate: reserv %p's domain is corrupted %d", 578 rv, rv->domain)); 579 bit_set(rv->popmap, index); 580 #ifdef VM_SUBLEVEL_0_NPAGES 581 if (vm_reserv_is_sublevel_full(rv, index)) 582 rv->pages[rounddown2(index, VM_SUBLEVEL_0_NPAGES)].psind = 1; 583 #endif 584 rv->popcnt++; 585 if ((unsigned)(ticks - rv->lasttick) < PARTPOPSLOP && 586 rv->inpartpopq && rv->popcnt != VM_LEVEL_0_NPAGES) 587 return; 588 rv->lasttick = ticks; 589 vm_reserv_domain_lock(rv->domain); 590 if (rv->inpartpopq) { 591 TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); 592 rv->inpartpopq = FALSE; 593 } 594 if (rv->popcnt < VM_LEVEL_0_NPAGES) { 595 rv->inpartpopq = TRUE; 596 TAILQ_INSERT_TAIL(&vm_rvd[rv->domain].partpop, rv, partpopq); 597 } else { 598 KASSERT(rv->pages->psind == VM_LEVEL_0_PSIND - 1, 599 ("vm_reserv_populate: reserv %p is already promoted", 600 rv)); 601 rv->pages->psind = VM_LEVEL_0_PSIND; 602 } 603 vm_reserv_domain_unlock(rv->domain); 604 } 605 606 /* 607 * Allocates a contiguous set of physical pages of the given size "npages" 608 * from existing or newly created reservations. All of the physical pages 609 * must be at or above the given physical address "low" and below the given 610 * physical address "high". The given value "alignment" determines the 611 * alignment of the first physical page in the set. If the given value 612 * "boundary" is non-zero, then the set of physical pages cannot cross any 613 * physical address boundary that is a multiple of that value. Both 614 * "alignment" and "boundary" must be a power of two. 615 * 616 * The page "mpred" must immediately precede the offset "pindex" within the 617 * specified object. 618 * 619 * The object must be locked. 620 */ 621 vm_page_t 622 vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, int domain, 623 int req, vm_page_t mpred, u_long npages, vm_paddr_t low, vm_paddr_t high, 624 u_long alignment, vm_paddr_t boundary) 625 { 626 struct vm_domain *vmd; 627 vm_paddr_t pa, size; 628 vm_page_t m, m_ret, msucc; 629 vm_pindex_t first, leftcap, rightcap; 630 vm_reserv_t rv; 631 u_long allocpages, maxpages, minpages; 632 int i, index, n; 633 634 VM_OBJECT_ASSERT_WLOCKED(object); 635 KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0")); 636 637 /* 638 * Is a reservation fundamentally impossible? 639 */ 640 if (pindex < VM_RESERV_INDEX(object, pindex) || 641 pindex + npages > object->size) 642 return (NULL); 643 644 /* 645 * All reservations of a particular size have the same alignment. 646 * Assuming that the first page is allocated from a reservation, the 647 * least significant bits of its physical address can be determined 648 * from its offset from the beginning of the reservation and the size 649 * of the reservation. 650 * 651 * Could the specified index within a reservation of the smallest 652 * possible size satisfy the alignment and boundary requirements? 653 */ 654 pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT; 655 size = npages << PAGE_SHIFT; 656 if (!vm_addr_ok(pa, size, alignment, boundary)) 657 return (NULL); 658 659 /* 660 * Look for an existing reservation. 661 */ 662 rv = vm_reserv_from_object(object, pindex, mpred, &msucc); 663 if (rv != NULL) { 664 KASSERT(object != kernel_object || rv->domain == domain, 665 ("vm_reserv_alloc_contig: domain mismatch")); 666 index = VM_RESERV_INDEX(object, pindex); 667 /* Does the allocation fit within the reservation? */ 668 if (index + npages > VM_LEVEL_0_NPAGES) 669 return (NULL); 670 domain = rv->domain; 671 vmd = VM_DOMAIN(domain); 672 vm_reserv_lock(rv); 673 /* Handle reclaim race. */ 674 if (rv->object != object) 675 goto out; 676 m = &rv->pages[index]; 677 pa = VM_PAGE_TO_PHYS(m); 678 if (pa < low || pa + size > high || 679 !vm_addr_ok(pa, size, alignment, boundary)) 680 goto out; 681 /* Handle vm_page_iter_rename(..., m, new_object, ...). */ 682 if (!bit_ntest(rv->popmap, index, index + npages - 1, 0)) 683 goto out; 684 if (!vm_domain_allocate(vmd, req, npages)) 685 goto out; 686 for (i = 0; i < npages; i++) 687 vm_reserv_populate(rv, index + i); 688 vm_reserv_unlock(rv); 689 return (m); 690 out: 691 vm_reserv_unlock(rv); 692 return (NULL); 693 } 694 695 /* 696 * Could at least one reservation fit between the first index to the 697 * left that can be used ("leftcap") and the first index to the right 698 * that cannot be used ("rightcap")? 699 * 700 * We must synchronize with the reserv object lock to protect the 701 * pindex/object of the resulting reservations against rename while 702 * we are inspecting. 703 */ 704 first = pindex - VM_RESERV_INDEX(object, pindex); 705 minpages = VM_RESERV_INDEX(object, pindex) + npages; 706 maxpages = roundup2(minpages, VM_LEVEL_0_NPAGES); 707 allocpages = maxpages; 708 vm_reserv_object_lock(object); 709 if (mpred != NULL) { 710 if ((rv = vm_reserv_from_page(mpred))->object != object) 711 leftcap = mpred->pindex + 1; 712 else 713 leftcap = rv->pindex + VM_LEVEL_0_NPAGES; 714 if (leftcap > first) { 715 vm_reserv_object_unlock(object); 716 return (NULL); 717 } 718 } 719 if (msucc != NULL) { 720 if ((rv = vm_reserv_from_page(msucc))->object != object) 721 rightcap = msucc->pindex; 722 else 723 rightcap = rv->pindex; 724 if (first + maxpages > rightcap) { 725 if (maxpages == VM_LEVEL_0_NPAGES) { 726 vm_reserv_object_unlock(object); 727 return (NULL); 728 } 729 730 /* 731 * At least one reservation will fit between "leftcap" 732 * and "rightcap". However, a reservation for the 733 * last of the requested pages will not fit. Reduce 734 * the size of the upcoming allocation accordingly. 735 */ 736 allocpages = minpages; 737 } 738 } 739 vm_reserv_object_unlock(object); 740 741 /* 742 * Would the last new reservation extend past the end of the object? 743 * 744 * If the object is unlikely to grow don't allocate a reservation for 745 * the tail. 746 */ 747 if ((object->flags & OBJ_ANON) == 0 && 748 first + maxpages > object->size) { 749 if (maxpages == VM_LEVEL_0_NPAGES) 750 return (NULL); 751 allocpages = minpages; 752 } 753 754 /* 755 * Allocate the physical pages. The alignment and boundary specified 756 * for this allocation may be different from the alignment and 757 * boundary specified for the requested pages. For instance, the 758 * specified index may not be the first page within the first new 759 * reservation. 760 */ 761 m = NULL; 762 vmd = VM_DOMAIN(domain); 763 if (vm_domain_allocate(vmd, req, npages)) { 764 vm_domain_free_lock(vmd); 765 m = vm_phys_alloc_contig(domain, allocpages, low, high, 766 ulmax(alignment, VM_LEVEL_0_SIZE), 767 boundary > VM_LEVEL_0_SIZE ? boundary : 0); 768 vm_domain_free_unlock(vmd); 769 if (m == NULL) { 770 vm_domain_freecnt_inc(vmd, npages); 771 return (NULL); 772 } 773 } else 774 return (NULL); 775 KASSERT(vm_page_domain(m) == domain, 776 ("vm_reserv_alloc_contig: Page domain does not match requested.")); 777 778 /* 779 * The allocated physical pages always begin at a reservation 780 * boundary, but they do not always end at a reservation boundary. 781 * Initialize every reservation that is completely covered by the 782 * allocated physical pages. 783 */ 784 m_ret = NULL; 785 index = VM_RESERV_INDEX(object, pindex); 786 do { 787 rv = vm_reserv_from_page(m); 788 KASSERT(rv->pages == m, 789 ("vm_reserv_alloc_contig: reserv %p's pages is corrupted", 790 rv)); 791 vm_reserv_lock(rv); 792 vm_reserv_insert(rv, object, first); 793 n = ulmin(VM_LEVEL_0_NPAGES - index, npages); 794 for (i = 0; i < n; i++) 795 vm_reserv_populate(rv, index + i); 796 npages -= n; 797 if (m_ret == NULL) { 798 m_ret = &rv->pages[index]; 799 index = 0; 800 } 801 vm_reserv_unlock(rv); 802 m += VM_LEVEL_0_NPAGES; 803 first += VM_LEVEL_0_NPAGES; 804 allocpages -= VM_LEVEL_0_NPAGES; 805 } while (allocpages >= VM_LEVEL_0_NPAGES); 806 return (m_ret); 807 } 808 809 /* 810 * Allocate a physical page from an existing or newly created reservation. 811 * 812 * The page "mpred" must immediately precede the offset "pindex" within the 813 * specified object. 814 * 815 * The object must be locked. 816 */ 817 vm_page_t 818 vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, int domain, 819 int req, vm_page_t mpred) 820 { 821 struct vm_domain *vmd; 822 vm_page_t m, msucc; 823 vm_pindex_t first, leftcap, rightcap; 824 vm_reserv_t rv; 825 int index; 826 827 VM_OBJECT_ASSERT_WLOCKED(object); 828 829 /* 830 * Is a reservation fundamentally impossible? 831 */ 832 if (pindex < VM_RESERV_INDEX(object, pindex) || 833 pindex >= object->size) 834 return (NULL); 835 836 /* 837 * Look for an existing reservation. 838 */ 839 rv = vm_reserv_from_object(object, pindex, mpred, &msucc); 840 if (rv != NULL) { 841 KASSERT(object != kernel_object || rv->domain == domain, 842 ("vm_reserv_alloc_page: domain mismatch")); 843 domain = rv->domain; 844 vmd = VM_DOMAIN(domain); 845 index = VM_RESERV_INDEX(object, pindex); 846 m = &rv->pages[index]; 847 vm_reserv_lock(rv); 848 /* Handle reclaim race. */ 849 if (rv->object != object || 850 /* Handle vm_page_iter_rename(..., m, new_object, ...). */ 851 bit_test(rv->popmap, index)) { 852 m = NULL; 853 goto out; 854 } 855 if (vm_domain_allocate(vmd, req, 1) == 0) 856 m = NULL; 857 else 858 vm_reserv_populate(rv, index); 859 out: 860 vm_reserv_unlock(rv); 861 return (m); 862 } 863 864 /* 865 * Could a reservation fit between the first index to the left that 866 * can be used and the first index to the right that cannot be used? 867 * 868 * We must synchronize with the reserv object lock to protect the 869 * pindex/object of the resulting reservations against rename while 870 * we are inspecting. 871 */ 872 first = pindex - VM_RESERV_INDEX(object, pindex); 873 vm_reserv_object_lock(object); 874 if (mpred != NULL) { 875 if ((rv = vm_reserv_from_page(mpred))->object != object) 876 leftcap = mpred->pindex + 1; 877 else 878 leftcap = rv->pindex + VM_LEVEL_0_NPAGES; 879 if (leftcap > first) { 880 vm_reserv_object_unlock(object); 881 return (NULL); 882 } 883 } 884 if (msucc != NULL) { 885 if ((rv = vm_reserv_from_page(msucc))->object != object) 886 rightcap = msucc->pindex; 887 else 888 rightcap = rv->pindex; 889 if (first + VM_LEVEL_0_NPAGES > rightcap) { 890 vm_reserv_object_unlock(object); 891 return (NULL); 892 } 893 } 894 vm_reserv_object_unlock(object); 895 896 /* 897 * Would the last new reservation extend past the end of the object? 898 * 899 * If the object is unlikely to grow don't allocate a reservation for 900 * the tail. 901 */ 902 if ((object->flags & OBJ_ANON) == 0 && 903 first + VM_LEVEL_0_NPAGES > object->size) 904 return (NULL); 905 906 /* 907 * Allocate and populate the new reservation. 908 */ 909 m = NULL; 910 vmd = VM_DOMAIN(domain); 911 if (vm_domain_allocate(vmd, req, 1)) { 912 vm_domain_free_lock(vmd); 913 m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT, 914 VM_LEVEL_0_ORDER); 915 vm_domain_free_unlock(vmd); 916 if (m == NULL) { 917 vm_domain_freecnt_inc(vmd, 1); 918 return (NULL); 919 } 920 } else 921 return (NULL); 922 rv = vm_reserv_from_page(m); 923 vm_reserv_lock(rv); 924 KASSERT(rv->pages == m, 925 ("vm_reserv_alloc_page: reserv %p's pages is corrupted", rv)); 926 vm_reserv_insert(rv, object, first); 927 index = VM_RESERV_INDEX(object, pindex); 928 vm_reserv_populate(rv, index); 929 vm_reserv_unlock(rv); 930 931 return (&rv->pages[index]); 932 } 933 934 /* 935 * Breaks the given reservation. All free pages in the reservation 936 * are returned to the physical memory allocator. The reservation's 937 * population count and map are reset to their initial state. 938 * 939 * The given reservation must not be in the partially populated reservation 940 * queue. 941 */ 942 static void 943 vm_reserv_break(vm_reserv_t rv) 944 { 945 vm_page_t m; 946 int pos, pos0, pos1; 947 948 vm_reserv_assert_locked(rv); 949 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 950 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 951 vm_reserv_remove(rv); 952 m = rv->pages; 953 #ifdef VM_SUBLEVEL_0_NPAGES 954 for (; m < rv->pages + VM_LEVEL_0_NPAGES; m += VM_SUBLEVEL_0_NPAGES) 955 #endif 956 m->psind = 0; 957 pos0 = bit_test(rv->popmap, 0) ? -1 : 0; 958 pos1 = -1 - pos0; 959 for (pos = 0; pos < VM_LEVEL_0_NPAGES; ) { 960 /* Find the first different bit after pos. */ 961 bit_ff_at(rv->popmap, pos + 1, VM_LEVEL_0_NPAGES, 962 pos1 < pos0, &pos); 963 if (pos == -1) 964 pos = VM_LEVEL_0_NPAGES; 965 if (pos0 < pos1) { 966 pos0 = pos; 967 continue; 968 } 969 /* Free unused pages from pos0 to pos. */ 970 pos1 = pos; 971 vm_domain_free_lock(VM_DOMAIN(rv->domain)); 972 vm_phys_enqueue_contig(&rv->pages[pos0], VM_FREEPOOL_DEFAULT, 973 pos1 - pos0); 974 vm_domain_free_unlock(VM_DOMAIN(rv->domain)); 975 } 976 bit_nclear(rv->popmap, 0, VM_LEVEL_0_NPAGES - 1); 977 rv->popcnt = 0; 978 counter_u64_add(vm_reserv_broken, 1); 979 } 980 981 /* 982 * Breaks all reservations belonging to the given object. 983 */ 984 void 985 vm_reserv_break_all(vm_object_t object) 986 { 987 vm_reserv_t rv; 988 989 /* 990 * This access of object->rvq is unsynchronized so that the 991 * object rvq lock can nest after the domain_free lock. We 992 * must check for races in the results. However, the object 993 * lock prevents new additions, so we are guaranteed that when 994 * it returns NULL the object is properly empty. 995 */ 996 while ((rv = LIST_FIRST(&object->rvq)) != NULL) { 997 vm_reserv_lock(rv); 998 /* Reclaim race. */ 999 if (rv->object != object) { 1000 vm_reserv_unlock(rv); 1001 continue; 1002 } 1003 vm_reserv_domain_lock(rv->domain); 1004 if (rv->inpartpopq) { 1005 TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); 1006 rv->inpartpopq = FALSE; 1007 } 1008 vm_reserv_domain_unlock(rv->domain); 1009 vm_reserv_break(rv); 1010 vm_reserv_unlock(rv); 1011 } 1012 } 1013 1014 /* 1015 * Frees the given page if it belongs to a reservation. Returns TRUE if the 1016 * page is freed and FALSE otherwise. 1017 */ 1018 boolean_t 1019 vm_reserv_free_page(vm_page_t m) 1020 { 1021 vm_reserv_t rv; 1022 boolean_t ret; 1023 1024 rv = vm_reserv_from_page(m); 1025 if (rv->object == NULL) 1026 return (FALSE); 1027 vm_reserv_lock(rv); 1028 /* Re-validate after lock. */ 1029 if (rv->object != NULL) { 1030 vm_reserv_depopulate(rv, m - rv->pages); 1031 ret = TRUE; 1032 } else 1033 ret = FALSE; 1034 vm_reserv_unlock(rv); 1035 1036 return (ret); 1037 } 1038 1039 /* 1040 * Initializes the reservation management system. Specifically, initializes 1041 * the reservation array. 1042 * 1043 * Requires that vm_page_array and first_page are initialized! 1044 */ 1045 void 1046 vm_reserv_init(void) 1047 { 1048 vm_paddr_t paddr; 1049 struct vm_phys_seg *seg; 1050 struct vm_reserv *rv; 1051 struct vm_reserv_domain *rvd; 1052 #ifdef VM_PHYSSEG_SPARSE 1053 vm_pindex_t used; 1054 #endif 1055 int i, segind; 1056 1057 /* 1058 * Initialize the reservation array. Specifically, initialize the 1059 * "pages" field for every element that has an underlying superpage. 1060 */ 1061 #ifdef VM_PHYSSEG_SPARSE 1062 used = 0; 1063 #endif 1064 for (segind = 0; segind < vm_phys_nsegs; segind++) { 1065 seg = &vm_phys_segs[segind]; 1066 #ifdef VM_PHYSSEG_SPARSE 1067 seg->first_reserv = &vm_reserv_array[used]; 1068 used += howmany(seg->end, VM_LEVEL_0_SIZE) - 1069 seg->start / VM_LEVEL_0_SIZE; 1070 #else 1071 seg->first_reserv = 1072 &vm_reserv_array[seg->start >> VM_LEVEL_0_SHIFT]; 1073 #endif 1074 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE); 1075 rv = seg->first_reserv + (paddr >> VM_LEVEL_0_SHIFT) - 1076 (seg->start >> VM_LEVEL_0_SHIFT); 1077 while (paddr + VM_LEVEL_0_SIZE > paddr && paddr + 1078 VM_LEVEL_0_SIZE <= seg->end) { 1079 rv->pages = PHYS_TO_VM_PAGE(paddr); 1080 rv->domain = seg->domain; 1081 mtx_init(&rv->lock, "vm reserv", NULL, MTX_DEF); 1082 paddr += VM_LEVEL_0_SIZE; 1083 rv++; 1084 } 1085 } 1086 for (i = 0; i < MAXMEMDOM; i++) { 1087 rvd = &vm_rvd[i]; 1088 mtx_init(&rvd->lock, "vm reserv domain", NULL, MTX_DEF); 1089 TAILQ_INIT(&rvd->partpop); 1090 mtx_init(&rvd->marker.lock, "vm reserv marker", NULL, MTX_DEF); 1091 1092 /* 1093 * Fully populated reservations should never be present in the 1094 * partially populated reservation queues. 1095 */ 1096 rvd->marker.popcnt = VM_LEVEL_0_NPAGES; 1097 bit_nset(rvd->marker.popmap, 0, VM_LEVEL_0_NPAGES - 1); 1098 } 1099 1100 for (i = 0; i < VM_RESERV_OBJ_LOCK_COUNT; i++) 1101 mtx_init(&vm_reserv_object_mtx[i], "resv obj lock", NULL, 1102 MTX_DEF); 1103 } 1104 1105 /* 1106 * Returns true if the given page belongs to a reservation and that page is 1107 * free. Otherwise, returns false. 1108 */ 1109 bool 1110 vm_reserv_is_page_free(vm_page_t m) 1111 { 1112 vm_reserv_t rv; 1113 1114 rv = vm_reserv_from_page(m); 1115 if (rv->object == NULL) 1116 return (false); 1117 return (!bit_test(rv->popmap, m - rv->pages)); 1118 } 1119 1120 /* 1121 * Returns true if the given page is part of a block of npages, starting at a 1122 * multiple of npages, that are all allocated. Otherwise, returns false. 1123 */ 1124 bool 1125 vm_reserv_is_populated(vm_page_t m, int npages) 1126 { 1127 vm_reserv_t rv; 1128 int index; 1129 1130 KASSERT(npages <= VM_LEVEL_0_NPAGES, 1131 ("%s: npages %d exceeds VM_LEVEL_0_NPAGES", __func__, npages)); 1132 KASSERT(powerof2(npages), 1133 ("%s: npages %d is not a power of 2", __func__, npages)); 1134 rv = vm_reserv_from_page(m); 1135 if (rv->object == NULL) 1136 return (false); 1137 index = rounddown2(m - rv->pages, npages); 1138 return (bit_ntest(rv->popmap, index, index + npages - 1, 1)); 1139 } 1140 1141 /* 1142 * If the given page belongs to a reservation, returns the level of that 1143 * reservation. Otherwise, returns -1. 1144 */ 1145 int 1146 vm_reserv_level(vm_page_t m) 1147 { 1148 vm_reserv_t rv; 1149 1150 rv = vm_reserv_from_page(m); 1151 #ifdef VM_SUBLEVEL_0_NPAGES 1152 return (rv->object != NULL ? 1 : -1); 1153 #else 1154 return (rv->object != NULL ? 0 : -1); 1155 #endif 1156 } 1157 1158 /* 1159 * Returns a reservation level if the given page belongs to a fully populated 1160 * reservation and -1 otherwise. 1161 */ 1162 int 1163 vm_reserv_level_iffullpop(vm_page_t m) 1164 { 1165 vm_reserv_t rv; 1166 1167 rv = vm_reserv_from_page(m); 1168 if (rv->popcnt == VM_LEVEL_0_NPAGES) { 1169 #ifdef VM_SUBLEVEL_0_NPAGES 1170 return (1); 1171 } else if (rv->pages != NULL && 1172 vm_reserv_is_sublevel_full(rv, m - rv->pages)) { 1173 #endif 1174 return (0); 1175 } 1176 return (-1); 1177 } 1178 1179 /* 1180 * Remove a partially populated reservation from the queue. 1181 */ 1182 static void 1183 vm_reserv_dequeue(vm_reserv_t rv) 1184 { 1185 1186 vm_reserv_domain_assert_locked(rv->domain); 1187 vm_reserv_assert_locked(rv); 1188 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 1189 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 1190 KASSERT(rv->inpartpopq, 1191 ("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv)); 1192 1193 TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); 1194 rv->inpartpopq = FALSE; 1195 } 1196 1197 /* 1198 * Breaks the given partially populated reservation, releasing its free pages 1199 * to the physical memory allocator. 1200 */ 1201 static void 1202 vm_reserv_reclaim(vm_reserv_t rv) 1203 { 1204 1205 vm_reserv_assert_locked(rv); 1206 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 1207 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 1208 if (rv->inpartpopq) { 1209 vm_reserv_domain_lock(rv->domain); 1210 vm_reserv_dequeue(rv); 1211 vm_reserv_domain_unlock(rv->domain); 1212 } 1213 vm_reserv_break(rv); 1214 counter_u64_add(vm_reserv_reclaimed, 1); 1215 } 1216 1217 /* 1218 * Breaks a reservation near the head of the partially populated reservation 1219 * queue, releasing its free pages to the physical memory allocator. Returns 1220 * TRUE if a reservation is broken and FALSE otherwise. 1221 */ 1222 bool 1223 vm_reserv_reclaim_inactive(int domain) 1224 { 1225 vm_reserv_t rv; 1226 1227 vm_reserv_domain_lock(domain); 1228 TAILQ_FOREACH(rv, &vm_rvd[domain].partpop, partpopq) { 1229 /* 1230 * A locked reservation is likely being updated or reclaimed, 1231 * so just skip ahead. 1232 */ 1233 if (rv != &vm_rvd[domain].marker && vm_reserv_trylock(rv)) { 1234 vm_reserv_dequeue(rv); 1235 break; 1236 } 1237 } 1238 vm_reserv_domain_unlock(domain); 1239 if (rv != NULL) { 1240 vm_reserv_reclaim(rv); 1241 vm_reserv_unlock(rv); 1242 return (true); 1243 } 1244 return (false); 1245 } 1246 1247 /* 1248 * Determine whether this reservation has free pages that satisfy the given 1249 * request for contiguous physical memory. Start searching from the lower 1250 * bound, defined by lo, and stop at the upper bound, hi. Return the index 1251 * of the first satisfactory free page, or -1 if none is found. 1252 */ 1253 static int 1254 vm_reserv_find_contig(vm_reserv_t rv, int npages, int lo, 1255 int hi, int ppn_align, int ppn_bound) 1256 { 1257 1258 vm_reserv_assert_locked(rv); 1259 KASSERT(npages <= VM_LEVEL_0_NPAGES - 1, 1260 ("%s: Too many pages", __func__)); 1261 KASSERT(ppn_bound <= VM_LEVEL_0_NPAGES, 1262 ("%s: Too big a boundary for reservation size", __func__)); 1263 KASSERT(npages <= ppn_bound, 1264 ("%s: Too many pages for given boundary", __func__)); 1265 KASSERT(ppn_align != 0 && powerof2(ppn_align), 1266 ("ppn_align is not a positive power of 2")); 1267 KASSERT(ppn_bound != 0 && powerof2(ppn_bound), 1268 ("ppn_bound is not a positive power of 2")); 1269 while (bit_ffc_area_at(rv->popmap, lo, hi, npages, &lo), lo != -1) { 1270 if (lo < roundup2(lo, ppn_align)) { 1271 /* Skip to next aligned page. */ 1272 lo = roundup2(lo, ppn_align); 1273 } else if (roundup2(lo + 1, ppn_bound) >= lo + npages) 1274 return (lo); 1275 if (roundup2(lo + 1, ppn_bound) < lo + npages) { 1276 /* Skip to next boundary-matching page. */ 1277 lo = roundup2(lo + 1, ppn_bound); 1278 } 1279 } 1280 return (-1); 1281 } 1282 1283 /* 1284 * Searches the partially populated reservation queue for the least recently 1285 * changed reservation with free pages that satisfy the given request for 1286 * contiguous physical memory. If a satisfactory reservation is found, it is 1287 * broken. Returns a page if a reservation is broken and NULL otherwise. 1288 */ 1289 vm_page_t 1290 vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low, 1291 vm_paddr_t high, u_long alignment, vm_paddr_t boundary) 1292 { 1293 struct vm_reserv_queue *queue; 1294 vm_paddr_t pa, size; 1295 vm_page_t m_ret; 1296 vm_reserv_t marker, rv, rvn; 1297 int hi, lo, posn, ppn_align, ppn_bound; 1298 1299 KASSERT(npages > 0, ("npages is 0")); 1300 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 1301 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 1302 if (npages > VM_LEVEL_0_NPAGES - 1) 1303 return (NULL); 1304 size = npages << PAGE_SHIFT; 1305 /* 1306 * Ensure that a free range starting at a boundary-multiple 1307 * doesn't include a boundary-multiple within it. Otherwise, 1308 * no boundary-constrained allocation is possible. 1309 */ 1310 if (!vm_addr_bound_ok(0, size, boundary)) 1311 return (NULL); 1312 marker = &vm_rvd[domain].marker; 1313 queue = &vm_rvd[domain].partpop; 1314 /* 1315 * Compute shifted alignment, boundary values for page-based 1316 * calculations. Constrain to range [1, VM_LEVEL_0_NPAGES] to 1317 * avoid overflow. 1318 */ 1319 ppn_align = (int)(ulmin(ulmax(PAGE_SIZE, alignment), 1320 VM_LEVEL_0_SIZE) >> PAGE_SHIFT); 1321 ppn_bound = boundary == 0 ? VM_LEVEL_0_NPAGES : 1322 (int)(MIN(MAX(PAGE_SIZE, boundary), 1323 VM_LEVEL_0_SIZE) >> PAGE_SHIFT); 1324 1325 vm_reserv_domain_scan_lock(domain); 1326 vm_reserv_domain_lock(domain); 1327 TAILQ_FOREACH_SAFE(rv, queue, partpopq, rvn) { 1328 pa = VM_PAGE_TO_PHYS(&rv->pages[0]); 1329 if (pa + VM_LEVEL_0_SIZE - size < low) { 1330 /* This entire reservation is too low; go to next. */ 1331 continue; 1332 } 1333 if (pa + size > high) { 1334 /* This entire reservation is too high; go to next. */ 1335 continue; 1336 } 1337 if (!vm_addr_align_ok(pa, alignment)) { 1338 /* This entire reservation is unaligned; go to next. */ 1339 continue; 1340 } 1341 1342 if (vm_reserv_trylock(rv) == 0) { 1343 TAILQ_INSERT_AFTER(queue, rv, marker, partpopq); 1344 vm_reserv_domain_unlock(domain); 1345 vm_reserv_lock(rv); 1346 if (TAILQ_PREV(marker, vm_reserv_queue, partpopq) != 1347 rv) { 1348 vm_reserv_unlock(rv); 1349 vm_reserv_domain_lock(domain); 1350 rvn = TAILQ_NEXT(marker, partpopq); 1351 TAILQ_REMOVE(queue, marker, partpopq); 1352 continue; 1353 } 1354 vm_reserv_domain_lock(domain); 1355 TAILQ_REMOVE(queue, marker, partpopq); 1356 } 1357 vm_reserv_domain_unlock(domain); 1358 lo = (pa >= low) ? 0 : 1359 (int)((low + PAGE_MASK - pa) >> PAGE_SHIFT); 1360 hi = (pa + VM_LEVEL_0_SIZE <= high) ? VM_LEVEL_0_NPAGES : 1361 (int)((high - pa) >> PAGE_SHIFT); 1362 posn = vm_reserv_find_contig(rv, (int)npages, lo, hi, 1363 ppn_align, ppn_bound); 1364 if (posn >= 0) { 1365 vm_reserv_domain_scan_unlock(domain); 1366 /* Allocate requested space */ 1367 rv->popcnt += npages; 1368 bit_nset(rv->popmap, posn, posn + npages - 1); 1369 vm_reserv_reclaim(rv); 1370 vm_reserv_unlock(rv); 1371 m_ret = &rv->pages[posn]; 1372 pa = VM_PAGE_TO_PHYS(m_ret); 1373 KASSERT(vm_addr_ok(pa, size, alignment, boundary), 1374 ("%s: adjusted address not aligned/bounded to " 1375 "%lx/%jx", 1376 __func__, alignment, (uintmax_t)boundary)); 1377 return (m_ret); 1378 } 1379 vm_reserv_domain_lock(domain); 1380 rvn = TAILQ_NEXT(rv, partpopq); 1381 vm_reserv_unlock(rv); 1382 } 1383 vm_reserv_domain_unlock(domain); 1384 vm_reserv_domain_scan_unlock(domain); 1385 return (NULL); 1386 } 1387 1388 /* 1389 * Transfers the reservation underlying the given page to a new object. 1390 * 1391 * The object must be locked. 1392 */ 1393 void 1394 vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object, 1395 vm_pindex_t old_object_offset) 1396 { 1397 vm_reserv_t rv; 1398 1399 VM_OBJECT_ASSERT_WLOCKED(new_object); 1400 rv = vm_reserv_from_page(m); 1401 if (rv->object == old_object) { 1402 vm_reserv_lock(rv); 1403 CTR6(KTR_VM, 1404 "%s: rv %p object %p new %p popcnt %d inpartpop %d", 1405 __FUNCTION__, rv, rv->object, new_object, rv->popcnt, 1406 rv->inpartpopq); 1407 if (rv->object == old_object) { 1408 vm_reserv_object_lock(old_object); 1409 rv->object = NULL; 1410 LIST_REMOVE(rv, objq); 1411 vm_reserv_object_unlock(old_object); 1412 vm_reserv_object_lock(new_object); 1413 rv->object = new_object; 1414 rv->pindex -= old_object_offset; 1415 LIST_INSERT_HEAD(&new_object->rvq, rv, objq); 1416 vm_reserv_object_unlock(new_object); 1417 } 1418 vm_reserv_unlock(rv); 1419 } 1420 } 1421 1422 /* 1423 * Returns the size (in bytes) of a reservation of the specified level. 1424 */ 1425 int 1426 vm_reserv_size(int level) 1427 { 1428 1429 switch (level) { 1430 case 0: 1431 #ifdef VM_SUBLEVEL_0_NPAGES 1432 return (VM_SUBLEVEL_0_NPAGES * PAGE_SIZE); 1433 case 1: 1434 #endif 1435 return (VM_LEVEL_0_SIZE); 1436 case -1: 1437 return (PAGE_SIZE); 1438 default: 1439 return (0); 1440 } 1441 } 1442 1443 /* 1444 * Allocates the virtual and physical memory required by the reservation 1445 * management system's data structures, in particular, the reservation array. 1446 */ 1447 vm_paddr_t 1448 vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end) 1449 { 1450 vm_paddr_t new_end; 1451 vm_pindex_t count; 1452 size_t size; 1453 int i; 1454 1455 count = 0; 1456 for (i = 0; i < vm_phys_nsegs; i++) { 1457 #ifdef VM_PHYSSEG_SPARSE 1458 count += howmany(vm_phys_segs[i].end, VM_LEVEL_0_SIZE) - 1459 vm_phys_segs[i].start / VM_LEVEL_0_SIZE; 1460 #else 1461 count = MAX(count, 1462 howmany(vm_phys_segs[i].end, VM_LEVEL_0_SIZE)); 1463 #endif 1464 } 1465 1466 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 1467 #ifdef VM_PHYSSEG_SPARSE 1468 count += howmany(phys_avail[i + 1], VM_LEVEL_0_SIZE) - 1469 phys_avail[i] / VM_LEVEL_0_SIZE; 1470 #else 1471 count = MAX(count, 1472 howmany(phys_avail[i + 1], VM_LEVEL_0_SIZE)); 1473 #endif 1474 } 1475 1476 /* 1477 * Calculate the size (in bytes) of the reservation array. Rounding up 1478 * for partial superpages at boundaries, as every small page is mapped 1479 * to an element in the reservation array based on its physical address. 1480 * Thus, the number of elements in the reservation array can be greater 1481 * than the number of superpages. 1482 */ 1483 size = count * sizeof(struct vm_reserv); 1484 1485 /* 1486 * Allocate and map the physical memory for the reservation array. The 1487 * next available virtual address is returned by reference. 1488 */ 1489 new_end = end - round_page(size); 1490 vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end, 1491 VM_PROT_READ | VM_PROT_WRITE); 1492 bzero(vm_reserv_array, size); 1493 1494 /* 1495 * Return the next available physical address. 1496 */ 1497 return (new_end); 1498 } 1499 1500 /* 1501 * Returns the superpage containing the given page. 1502 */ 1503 vm_page_t 1504 vm_reserv_to_superpage(vm_page_t m) 1505 { 1506 vm_reserv_t rv; 1507 1508 VM_OBJECT_ASSERT_LOCKED(m->object); 1509 rv = vm_reserv_from_page(m); 1510 if (rv->object == m->object) { 1511 if (rv->popcnt == VM_LEVEL_0_NPAGES) 1512 return (rv->pages); 1513 #ifdef VM_SUBLEVEL_0_NPAGES 1514 if (vm_reserv_is_sublevel_full(rv, m - rv->pages)) 1515 return (rv->pages + rounddown2(m - rv->pages, 1516 VM_SUBLEVEL_0_NPAGES)); 1517 #endif 1518 } 1519 return (NULL); 1520 } 1521 1522 #endif /* VM_NRESERVLEVEL > 0 */ 1523