1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2002-2006 Rice University 5 * Copyright (c) 2007-2011 Alan L. Cox <alc@cs.rice.edu> 6 * All rights reserved. 7 * 8 * This software was developed for the FreeBSD Project by Alan L. Cox, 9 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 27 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY 30 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Superpage reservation management module 36 * 37 * Any external functions defined by this module are only to be used by the 38 * virtual memory system. 39 */ 40 41 #include <sys/cdefs.h> 42 __FBSDID("$FreeBSD$"); 43 44 #include "opt_vm.h" 45 46 #include <sys/param.h> 47 #include <sys/kernel.h> 48 #include <sys/lock.h> 49 #include <sys/malloc.h> 50 #include <sys/mutex.h> 51 #include <sys/queue.h> 52 #include <sys/rwlock.h> 53 #include <sys/sbuf.h> 54 #include <sys/sysctl.h> 55 #include <sys/systm.h> 56 #include <sys/counter.h> 57 #include <sys/ktr.h> 58 #include <sys/vmmeter.h> 59 #include <sys/smp.h> 60 61 #include <vm/vm.h> 62 #include <vm/vm_param.h> 63 #include <vm/vm_object.h> 64 #include <vm/vm_page.h> 65 #include <vm/vm_pageout.h> 66 #include <vm/vm_phys.h> 67 #include <vm/vm_pagequeue.h> 68 #include <vm/vm_radix.h> 69 #include <vm/vm_reserv.h> 70 71 /* 72 * The reservation system supports the speculative allocation of large physical 73 * pages ("superpages"). Speculative allocation enables the fully automatic 74 * utilization of superpages by the virtual memory system. In other words, no 75 * programmatic directives are required to use superpages. 76 */ 77 78 #if VM_NRESERVLEVEL > 0 79 80 #ifndef VM_LEVEL_0_ORDER_MAX 81 #define VM_LEVEL_0_ORDER_MAX VM_LEVEL_0_ORDER 82 #endif 83 84 /* 85 * The number of small pages that are contained in a level 0 reservation 86 */ 87 #define VM_LEVEL_0_NPAGES (1 << VM_LEVEL_0_ORDER) 88 #define VM_LEVEL_0_NPAGES_MAX (1 << VM_LEVEL_0_ORDER_MAX) 89 90 /* 91 * The number of bits by which a physical address is shifted to obtain the 92 * reservation number 93 */ 94 #define VM_LEVEL_0_SHIFT (VM_LEVEL_0_ORDER + PAGE_SHIFT) 95 96 /* 97 * The size of a level 0 reservation in bytes 98 */ 99 #define VM_LEVEL_0_SIZE (1 << VM_LEVEL_0_SHIFT) 100 101 /* 102 * Computes the index of the small page underlying the given (object, pindex) 103 * within the reservation's array of small pages. 104 */ 105 #define VM_RESERV_INDEX(object, pindex) \ 106 (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1)) 107 108 /* 109 * The size of a population map entry 110 */ 111 typedef u_long popmap_t; 112 113 /* 114 * The number of bits in a population map entry 115 */ 116 #define NBPOPMAP (NBBY * sizeof(popmap_t)) 117 118 /* 119 * The number of population map entries in a reservation 120 */ 121 #define NPOPMAP howmany(VM_LEVEL_0_NPAGES, NBPOPMAP) 122 #define NPOPMAP_MAX howmany(VM_LEVEL_0_NPAGES_MAX, NBPOPMAP) 123 124 /* 125 * Number of elapsed ticks before we update the LRU queue position. Used 126 * to reduce contention and churn on the list. 127 */ 128 #define PARTPOPSLOP 1 129 130 /* 131 * Clear a bit in the population map. 132 */ 133 static __inline void 134 popmap_clear(popmap_t popmap[], int i) 135 { 136 137 popmap[i / NBPOPMAP] &= ~(1UL << (i % NBPOPMAP)); 138 } 139 140 /* 141 * Set a bit in the population map. 142 */ 143 static __inline void 144 popmap_set(popmap_t popmap[], int i) 145 { 146 147 popmap[i / NBPOPMAP] |= 1UL << (i % NBPOPMAP); 148 } 149 150 /* 151 * Is a bit in the population map clear? 152 */ 153 static __inline boolean_t 154 popmap_is_clear(popmap_t popmap[], int i) 155 { 156 157 return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) == 0); 158 } 159 160 /* 161 * Is a bit in the population map set? 162 */ 163 static __inline boolean_t 164 popmap_is_set(popmap_t popmap[], int i) 165 { 166 167 return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) != 0); 168 } 169 170 /* 171 * The reservation structure 172 * 173 * A reservation structure is constructed whenever a large physical page is 174 * speculatively allocated to an object. The reservation provides the small 175 * physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets 176 * within that object. The reservation's "popcnt" tracks the number of these 177 * small physical pages that are in use at any given time. When and if the 178 * reservation is not fully utilized, it appears in the queue of partially 179 * populated reservations. The reservation always appears on the containing 180 * object's list of reservations. 181 * 182 * A partially populated reservation can be broken and reclaimed at any time. 183 * 184 * r - vm_reserv_lock 185 * d - vm_reserv_domain_lock 186 * o - vm_reserv_object_lock 187 * c - constant after boot 188 */ 189 struct vm_reserv { 190 struct mtx lock; /* reservation lock. */ 191 TAILQ_ENTRY(vm_reserv) partpopq; /* (d) per-domain queue. */ 192 LIST_ENTRY(vm_reserv) objq; /* (o, r) object queue */ 193 vm_object_t object; /* (o, r) containing object */ 194 vm_pindex_t pindex; /* (o, r) offset in object */ 195 vm_page_t pages; /* (c) first page */ 196 uint16_t domain; /* (c) NUMA domain. */ 197 uint16_t popcnt; /* (r) # of pages in use */ 198 int lasttick; /* (r) last pop update tick. */ 199 char inpartpopq; /* (d) */ 200 popmap_t popmap[NPOPMAP_MAX]; /* (r) bit vector, used pages */ 201 }; 202 203 #define vm_reserv_lockptr(rv) (&(rv)->lock) 204 #define vm_reserv_assert_locked(rv) \ 205 mtx_assert(vm_reserv_lockptr(rv), MA_OWNED) 206 #define vm_reserv_lock(rv) mtx_lock(vm_reserv_lockptr(rv)) 207 #define vm_reserv_trylock(rv) mtx_trylock(vm_reserv_lockptr(rv)) 208 #define vm_reserv_unlock(rv) mtx_unlock(vm_reserv_lockptr(rv)) 209 210 static struct mtx_padalign vm_reserv_domain_locks[MAXMEMDOM]; 211 212 #define vm_reserv_domain_lockptr(d) &vm_reserv_domain_locks[(d)] 213 #define vm_reserv_domain_lock(d) mtx_lock(vm_reserv_domain_lockptr(d)) 214 #define vm_reserv_domain_unlock(d) mtx_unlock(vm_reserv_domain_lockptr(d)) 215 216 /* 217 * The reservation array 218 * 219 * This array is analoguous in function to vm_page_array. It differs in the 220 * respect that it may contain a greater number of useful reservation 221 * structures than there are (physical) superpages. These "invalid" 222 * reservation structures exist to trade-off space for time in the 223 * implementation of vm_reserv_from_page(). Invalid reservation structures are 224 * distinguishable from "valid" reservation structures by inspecting the 225 * reservation's "pages" field. Invalid reservation structures have a NULL 226 * "pages" field. 227 * 228 * vm_reserv_from_page() maps a small (physical) page to an element of this 229 * array by computing a physical reservation number from the page's physical 230 * address. The physical reservation number is used as the array index. 231 * 232 * An "active" reservation is a valid reservation structure that has a non-NULL 233 * "object" field and a non-zero "popcnt" field. In other words, every active 234 * reservation belongs to a particular object. Moreover, every active 235 * reservation has an entry in the containing object's list of reservations. 236 */ 237 static vm_reserv_t vm_reserv_array; 238 239 /* 240 * The partially populated reservation queue 241 * 242 * This queue enables the fast recovery of an unused free small page from a 243 * partially populated reservation. The reservation at the head of this queue 244 * is the least recently changed, partially populated reservation. 245 * 246 * Access to this queue is synchronized by the free page queue lock. 247 */ 248 static TAILQ_HEAD(, vm_reserv) vm_rvq_partpop[MAXMEMDOM]; 249 250 static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD, 0, "Reservation Info"); 251 252 static counter_u64_t vm_reserv_broken = EARLY_COUNTER; 253 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD, 254 &vm_reserv_broken, "Cumulative number of broken reservations"); 255 256 static counter_u64_t vm_reserv_freed = EARLY_COUNTER; 257 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD, 258 &vm_reserv_freed, "Cumulative number of freed reservations"); 259 260 static int sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS); 261 262 SYSCTL_PROC(_vm_reserv, OID_AUTO, fullpop, CTLTYPE_INT | CTLFLAG_RD, NULL, 0, 263 sysctl_vm_reserv_fullpop, "I", "Current number of full reservations"); 264 265 static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS); 266 267 SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, 268 sysctl_vm_reserv_partpopq, "A", "Partially populated reservation queues"); 269 270 static counter_u64_t vm_reserv_reclaimed = EARLY_COUNTER; 271 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD, 272 &vm_reserv_reclaimed, "Cumulative number of reclaimed reservations"); 273 274 /* 275 * The object lock pool is used to synchronize the rvq. We can not use a 276 * pool mutex because it is required before malloc works. 277 * 278 * The "hash" function could be made faster without divide and modulo. 279 */ 280 #define VM_RESERV_OBJ_LOCK_COUNT MAXCPU 281 282 struct mtx_padalign vm_reserv_object_mtx[VM_RESERV_OBJ_LOCK_COUNT]; 283 284 #define vm_reserv_object_lock_idx(object) \ 285 (((uintptr_t)object / sizeof(*object)) % VM_RESERV_OBJ_LOCK_COUNT) 286 #define vm_reserv_object_lock_ptr(object) \ 287 &vm_reserv_object_mtx[vm_reserv_object_lock_idx((object))] 288 #define vm_reserv_object_lock(object) \ 289 mtx_lock(vm_reserv_object_lock_ptr((object))) 290 #define vm_reserv_object_unlock(object) \ 291 mtx_unlock(vm_reserv_object_lock_ptr((object))) 292 293 static void vm_reserv_break(vm_reserv_t rv); 294 static void vm_reserv_depopulate(vm_reserv_t rv, int index); 295 static vm_reserv_t vm_reserv_from_page(vm_page_t m); 296 static boolean_t vm_reserv_has_pindex(vm_reserv_t rv, 297 vm_pindex_t pindex); 298 static void vm_reserv_populate(vm_reserv_t rv, int index); 299 static void vm_reserv_reclaim(vm_reserv_t rv); 300 301 /* 302 * Returns the current number of full reservations. 303 * 304 * Since the number of full reservations is computed without acquiring the 305 * free page queue lock, the returned value may be inexact. 306 */ 307 static int 308 sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS) 309 { 310 vm_paddr_t paddr; 311 struct vm_phys_seg *seg; 312 vm_reserv_t rv; 313 int fullpop, segind; 314 315 fullpop = 0; 316 for (segind = 0; segind < vm_phys_nsegs; segind++) { 317 seg = &vm_phys_segs[segind]; 318 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE); 319 while (paddr + VM_LEVEL_0_SIZE <= seg->end) { 320 rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT]; 321 fullpop += rv->popcnt == VM_LEVEL_0_NPAGES; 322 paddr += VM_LEVEL_0_SIZE; 323 } 324 } 325 return (sysctl_handle_int(oidp, &fullpop, 0, req)); 326 } 327 328 /* 329 * Describes the current state of the partially populated reservation queue. 330 */ 331 static int 332 sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS) 333 { 334 struct sbuf sbuf; 335 vm_reserv_t rv; 336 int counter, error, domain, level, unused_pages; 337 338 error = sysctl_wire_old_buffer(req, 0); 339 if (error != 0) 340 return (error); 341 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 342 sbuf_printf(&sbuf, "\nDOMAIN LEVEL SIZE NUMBER\n\n"); 343 for (domain = 0; domain < vm_ndomains; domain++) { 344 for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) { 345 counter = 0; 346 unused_pages = 0; 347 vm_reserv_domain_lock(domain); 348 TAILQ_FOREACH(rv, &vm_rvq_partpop[domain], partpopq) { 349 counter++; 350 unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt; 351 } 352 vm_reserv_domain_unlock(domain); 353 sbuf_printf(&sbuf, "%6d, %7d, %6dK, %6d\n", 354 domain, level, 355 unused_pages * ((int)PAGE_SIZE / 1024), counter); 356 } 357 } 358 error = sbuf_finish(&sbuf); 359 sbuf_delete(&sbuf); 360 return (error); 361 } 362 363 /* 364 * Remove a reservation from the object's objq. 365 */ 366 static void 367 vm_reserv_remove(vm_reserv_t rv) 368 { 369 vm_object_t object; 370 371 vm_reserv_assert_locked(rv); 372 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 373 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 374 KASSERT(rv->object != NULL, 375 ("vm_reserv_remove: reserv %p is free", rv)); 376 KASSERT(!rv->inpartpopq, 377 ("vm_reserv_remove: reserv %p's inpartpopq is TRUE", rv)); 378 object = rv->object; 379 vm_reserv_object_lock(object); 380 LIST_REMOVE(rv, objq); 381 rv->object = NULL; 382 vm_reserv_object_unlock(object); 383 } 384 385 /* 386 * Insert a new reservation into the object's objq. 387 */ 388 static void 389 vm_reserv_insert(vm_reserv_t rv, vm_object_t object, vm_pindex_t pindex) 390 { 391 int i; 392 393 vm_reserv_assert_locked(rv); 394 CTR6(KTR_VM, 395 "%s: rv %p(%p) object %p new %p popcnt %d", 396 __FUNCTION__, rv, rv->pages, rv->object, object, 397 rv->popcnt); 398 KASSERT(rv->object == NULL, 399 ("vm_reserv_insert: reserv %p isn't free", rv)); 400 KASSERT(rv->popcnt == 0, 401 ("vm_reserv_insert: reserv %p's popcnt is corrupted", rv)); 402 KASSERT(!rv->inpartpopq, 403 ("vm_reserv_insert: reserv %p's inpartpopq is TRUE", rv)); 404 for (i = 0; i < NPOPMAP; i++) 405 KASSERT(rv->popmap[i] == 0, 406 ("vm_reserv_insert: reserv %p's popmap is corrupted", rv)); 407 vm_reserv_object_lock(object); 408 rv->pindex = pindex; 409 rv->object = object; 410 rv->lasttick = ticks; 411 LIST_INSERT_HEAD(&object->rvq, rv, objq); 412 vm_reserv_object_unlock(object); 413 } 414 415 /* 416 * Reduces the given reservation's population count. If the population count 417 * becomes zero, the reservation is destroyed. Additionally, moves the 418 * reservation to the tail of the partially populated reservation queue if the 419 * population count is non-zero. 420 */ 421 static void 422 vm_reserv_depopulate(vm_reserv_t rv, int index) 423 { 424 struct vm_domain *vmd; 425 426 vm_reserv_assert_locked(rv); 427 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 428 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 429 KASSERT(rv->object != NULL, 430 ("vm_reserv_depopulate: reserv %p is free", rv)); 431 KASSERT(popmap_is_set(rv->popmap, index), 432 ("vm_reserv_depopulate: reserv %p's popmap[%d] is clear", rv, 433 index)); 434 KASSERT(rv->popcnt > 0, 435 ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv)); 436 KASSERT(rv->domain < vm_ndomains, 437 ("vm_reserv_depopulate: reserv %p's domain is corrupted %d", 438 rv, rv->domain)); 439 if (rv->popcnt == VM_LEVEL_0_NPAGES) { 440 KASSERT(rv->pages->psind == 1, 441 ("vm_reserv_depopulate: reserv %p is already demoted", 442 rv)); 443 rv->pages->psind = 0; 444 } 445 popmap_clear(rv->popmap, index); 446 rv->popcnt--; 447 if ((unsigned)(ticks - rv->lasttick) >= PARTPOPSLOP || 448 rv->popcnt == 0) { 449 vm_reserv_domain_lock(rv->domain); 450 if (rv->inpartpopq) { 451 TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq); 452 rv->inpartpopq = FALSE; 453 } 454 if (rv->popcnt != 0) { 455 rv->inpartpopq = TRUE; 456 TAILQ_INSERT_TAIL(&vm_rvq_partpop[rv->domain], rv, partpopq); 457 } 458 vm_reserv_domain_unlock(rv->domain); 459 rv->lasttick = ticks; 460 } 461 vmd = VM_DOMAIN(rv->domain); 462 if (rv->popcnt == 0) { 463 vm_reserv_remove(rv); 464 vm_domain_free_lock(vmd); 465 vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER); 466 vm_domain_free_unlock(vmd); 467 counter_u64_add(vm_reserv_freed, 1); 468 } 469 vm_domain_freecnt_inc(vmd, 1); 470 } 471 472 /* 473 * Returns the reservation to which the given page might belong. 474 */ 475 static __inline vm_reserv_t 476 vm_reserv_from_page(vm_page_t m) 477 { 478 479 return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]); 480 } 481 482 /* 483 * Returns an existing reservation or NULL and initialized successor pointer. 484 */ 485 static vm_reserv_t 486 vm_reserv_from_object(vm_object_t object, vm_pindex_t pindex, 487 vm_page_t mpred, vm_page_t *msuccp) 488 { 489 vm_reserv_t rv; 490 vm_page_t msucc; 491 492 msucc = NULL; 493 if (mpred != NULL) { 494 KASSERT(mpred->object == object, 495 ("vm_reserv_from_object: object doesn't contain mpred")); 496 KASSERT(mpred->pindex < pindex, 497 ("vm_reserv_from_object: mpred doesn't precede pindex")); 498 rv = vm_reserv_from_page(mpred); 499 if (rv->object == object && vm_reserv_has_pindex(rv, pindex)) 500 goto found; 501 msucc = TAILQ_NEXT(mpred, listq); 502 } else 503 msucc = TAILQ_FIRST(&object->memq); 504 if (msucc != NULL) { 505 KASSERT(msucc->pindex > pindex, 506 ("vm_reserv_from_object: msucc doesn't succeed pindex")); 507 rv = vm_reserv_from_page(msucc); 508 if (rv->object == object && vm_reserv_has_pindex(rv, pindex)) 509 goto found; 510 } 511 rv = NULL; 512 513 found: 514 *msuccp = msucc; 515 516 return (rv); 517 } 518 519 /* 520 * Returns TRUE if the given reservation contains the given page index and 521 * FALSE otherwise. 522 */ 523 static __inline boolean_t 524 vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex) 525 { 526 527 return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0); 528 } 529 530 /* 531 * Increases the given reservation's population count. Moves the reservation 532 * to the tail of the partially populated reservation queue. 533 * 534 * The free page queue must be locked. 535 */ 536 static void 537 vm_reserv_populate(vm_reserv_t rv, int index) 538 { 539 540 vm_reserv_assert_locked(rv); 541 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 542 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 543 KASSERT(rv->object != NULL, 544 ("vm_reserv_populate: reserv %p is free", rv)); 545 KASSERT(popmap_is_clear(rv->popmap, index), 546 ("vm_reserv_populate: reserv %p's popmap[%d] is set", rv, 547 index)); 548 KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES, 549 ("vm_reserv_populate: reserv %p is already full", rv)); 550 KASSERT(rv->pages->psind == 0, 551 ("vm_reserv_populate: reserv %p is already promoted", rv)); 552 KASSERT(rv->domain < vm_ndomains, 553 ("vm_reserv_populate: reserv %p's domain is corrupted %d", 554 rv, rv->domain)); 555 popmap_set(rv->popmap, index); 556 rv->popcnt++; 557 if ((unsigned)(ticks - rv->lasttick) < PARTPOPSLOP && 558 rv->inpartpopq && rv->popcnt != VM_LEVEL_0_NPAGES) 559 return; 560 rv->lasttick = ticks; 561 vm_reserv_domain_lock(rv->domain); 562 if (rv->inpartpopq) { 563 TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq); 564 rv->inpartpopq = FALSE; 565 } 566 if (rv->popcnt < VM_LEVEL_0_NPAGES) { 567 rv->inpartpopq = TRUE; 568 TAILQ_INSERT_TAIL(&vm_rvq_partpop[rv->domain], rv, partpopq); 569 } else { 570 KASSERT(rv->pages->psind == 0, 571 ("vm_reserv_populate: reserv %p is already promoted", 572 rv)); 573 rv->pages->psind = 1; 574 } 575 vm_reserv_domain_unlock(rv->domain); 576 } 577 578 /* 579 * Attempts to allocate a contiguous set of physical pages from existing 580 * reservations. See vm_reserv_alloc_contig() for a description of the 581 * function's parameters. 582 * 583 * The page "mpred" must immediately precede the offset "pindex" within the 584 * specified object. 585 * 586 * The object must be locked. 587 */ 588 vm_page_t 589 vm_reserv_extend_contig(int req, vm_object_t object, vm_pindex_t pindex, 590 int domain, u_long npages, vm_paddr_t low, vm_paddr_t high, 591 u_long alignment, vm_paddr_t boundary, vm_page_t mpred) 592 { 593 struct vm_domain *vmd; 594 vm_paddr_t pa, size; 595 vm_page_t m, msucc; 596 vm_reserv_t rv; 597 int i, index; 598 599 VM_OBJECT_ASSERT_WLOCKED(object); 600 KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0")); 601 602 /* 603 * Is a reservation fundamentally impossible? 604 */ 605 if (pindex < VM_RESERV_INDEX(object, pindex) || 606 pindex + npages > object->size || object->resident_page_count == 0) 607 return (NULL); 608 609 /* 610 * All reservations of a particular size have the same alignment. 611 * Assuming that the first page is allocated from a reservation, the 612 * least significant bits of its physical address can be determined 613 * from its offset from the beginning of the reservation and the size 614 * of the reservation. 615 * 616 * Could the specified index within a reservation of the smallest 617 * possible size satisfy the alignment and boundary requirements? 618 */ 619 pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT; 620 if ((pa & (alignment - 1)) != 0) 621 return (NULL); 622 size = npages << PAGE_SHIFT; 623 if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0) 624 return (NULL); 625 626 /* 627 * Look for an existing reservation. 628 */ 629 rv = vm_reserv_from_object(object, pindex, mpred, &msucc); 630 if (rv == NULL) 631 return (NULL); 632 KASSERT(object != kernel_object || rv->domain == domain, 633 ("vm_reserv_extend_contig: Domain mismatch from reservation.")); 634 index = VM_RESERV_INDEX(object, pindex); 635 /* Does the allocation fit within the reservation? */ 636 if (index + npages > VM_LEVEL_0_NPAGES) 637 return (NULL); 638 domain = rv->domain; 639 vmd = VM_DOMAIN(domain); 640 vm_reserv_lock(rv); 641 if (rv->object != object) 642 goto out; 643 m = &rv->pages[index]; 644 pa = VM_PAGE_TO_PHYS(m); 645 if (pa < low || pa + size > high || (pa & (alignment - 1)) != 0 || 646 ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0) 647 goto out; 648 /* Handle vm_page_rename(m, new_object, ...). */ 649 for (i = 0; i < npages; i++) { 650 if (popmap_is_set(rv->popmap, index + i)) 651 goto out; 652 } 653 if (!vm_domain_allocate(vmd, req, npages)) 654 goto out; 655 for (i = 0; i < npages; i++) 656 vm_reserv_populate(rv, index + i); 657 vm_reserv_unlock(rv); 658 return (m); 659 660 out: 661 vm_reserv_unlock(rv); 662 return (NULL); 663 } 664 665 /* 666 * Allocates a contiguous set of physical pages of the given size "npages" 667 * from newly created reservations. All of the physical pages 668 * must be at or above the given physical address "low" and below the given 669 * physical address "high". The given value "alignment" determines the 670 * alignment of the first physical page in the set. If the given value 671 * "boundary" is non-zero, then the set of physical pages cannot cross any 672 * physical address boundary that is a multiple of that value. Both 673 * "alignment" and "boundary" must be a power of two. 674 * 675 * Callers should first invoke vm_reserv_extend_contig() to attempt an 676 * allocation from existing reservations. 677 * 678 * The page "mpred" must immediately precede the offset "pindex" within the 679 * specified object. 680 * 681 * The object and free page queue must be locked. 682 */ 683 vm_page_t 684 vm_reserv_alloc_contig(int req, vm_object_t object, vm_pindex_t pindex, int domain, 685 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 686 vm_paddr_t boundary, vm_page_t mpred) 687 { 688 struct vm_domain *vmd; 689 vm_paddr_t pa, size; 690 vm_page_t m, m_ret, msucc; 691 vm_pindex_t first, leftcap, rightcap; 692 vm_reserv_t rv; 693 u_long allocpages, maxpages, minpages; 694 int i, index, n; 695 696 VM_OBJECT_ASSERT_WLOCKED(object); 697 KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0")); 698 699 /* 700 * Is a reservation fundamentally impossible? 701 */ 702 if (pindex < VM_RESERV_INDEX(object, pindex) || 703 pindex + npages > object->size) 704 return (NULL); 705 706 /* 707 * All reservations of a particular size have the same alignment. 708 * Assuming that the first page is allocated from a reservation, the 709 * least significant bits of its physical address can be determined 710 * from its offset from the beginning of the reservation and the size 711 * of the reservation. 712 * 713 * Could the specified index within a reservation of the smallest 714 * possible size satisfy the alignment and boundary requirements? 715 */ 716 pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT; 717 if ((pa & (alignment - 1)) != 0) 718 return (NULL); 719 size = npages << PAGE_SHIFT; 720 if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0) 721 return (NULL); 722 723 /* 724 * Callers should've extended an existing reservation prior to 725 * calling this function. If a reservation exists it is 726 * incompatible with the allocation. 727 */ 728 rv = vm_reserv_from_object(object, pindex, mpred, &msucc); 729 if (rv != NULL) 730 return (NULL); 731 732 /* 733 * Could at least one reservation fit between the first index to the 734 * left that can be used ("leftcap") and the first index to the right 735 * that cannot be used ("rightcap")? 736 * 737 * We must synchronize with the reserv object lock to protect the 738 * pindex/object of the resulting reservations against rename while 739 * we are inspecting. 740 */ 741 first = pindex - VM_RESERV_INDEX(object, pindex); 742 minpages = VM_RESERV_INDEX(object, pindex) + npages; 743 maxpages = roundup2(minpages, VM_LEVEL_0_NPAGES); 744 allocpages = maxpages; 745 vm_reserv_object_lock(object); 746 if (mpred != NULL) { 747 if ((rv = vm_reserv_from_page(mpred))->object != object) 748 leftcap = mpred->pindex + 1; 749 else 750 leftcap = rv->pindex + VM_LEVEL_0_NPAGES; 751 if (leftcap > first) { 752 vm_reserv_object_unlock(object); 753 return (NULL); 754 } 755 } 756 if (msucc != NULL) { 757 if ((rv = vm_reserv_from_page(msucc))->object != object) 758 rightcap = msucc->pindex; 759 else 760 rightcap = rv->pindex; 761 if (first + maxpages > rightcap) { 762 if (maxpages == VM_LEVEL_0_NPAGES) { 763 vm_reserv_object_unlock(object); 764 return (NULL); 765 } 766 767 /* 768 * At least one reservation will fit between "leftcap" 769 * and "rightcap". However, a reservation for the 770 * last of the requested pages will not fit. Reduce 771 * the size of the upcoming allocation accordingly. 772 */ 773 allocpages = minpages; 774 } 775 } 776 vm_reserv_object_unlock(object); 777 778 /* 779 * Would the last new reservation extend past the end of the object? 780 */ 781 if (first + maxpages > object->size) { 782 /* 783 * Don't allocate the last new reservation if the object is a 784 * vnode or backed by another object that is a vnode. 785 */ 786 if (object->type == OBJT_VNODE || 787 (object->backing_object != NULL && 788 object->backing_object->type == OBJT_VNODE)) { 789 if (maxpages == VM_LEVEL_0_NPAGES) 790 return (NULL); 791 allocpages = minpages; 792 } 793 /* Speculate that the object may grow. */ 794 } 795 796 /* 797 * Allocate the physical pages. The alignment and boundary specified 798 * for this allocation may be different from the alignment and 799 * boundary specified for the requested pages. For instance, the 800 * specified index may not be the first page within the first new 801 * reservation. 802 */ 803 m = NULL; 804 vmd = VM_DOMAIN(domain); 805 if (vm_domain_allocate(vmd, req, npages)) { 806 vm_domain_free_lock(vmd); 807 m = vm_phys_alloc_contig(domain, allocpages, low, high, 808 ulmax(alignment, VM_LEVEL_0_SIZE), 809 boundary > VM_LEVEL_0_SIZE ? boundary : 0); 810 vm_domain_free_unlock(vmd); 811 if (m == NULL) { 812 vm_domain_freecnt_inc(vmd, npages); 813 return (NULL); 814 } 815 } else 816 return (NULL); 817 KASSERT(vm_phys_domain(m) == domain, 818 ("vm_reserv_alloc_contig: Page domain does not match requested.")); 819 820 /* 821 * The allocated physical pages always begin at a reservation 822 * boundary, but they do not always end at a reservation boundary. 823 * Initialize every reservation that is completely covered by the 824 * allocated physical pages. 825 */ 826 m_ret = NULL; 827 index = VM_RESERV_INDEX(object, pindex); 828 do { 829 rv = vm_reserv_from_page(m); 830 KASSERT(rv->pages == m, 831 ("vm_reserv_alloc_contig: reserv %p's pages is corrupted", 832 rv)); 833 vm_reserv_lock(rv); 834 vm_reserv_insert(rv, object, first); 835 n = ulmin(VM_LEVEL_0_NPAGES - index, npages); 836 for (i = 0; i < n; i++) 837 vm_reserv_populate(rv, index + i); 838 npages -= n; 839 if (m_ret == NULL) { 840 m_ret = &rv->pages[index]; 841 index = 0; 842 } 843 vm_reserv_unlock(rv); 844 m += VM_LEVEL_0_NPAGES; 845 first += VM_LEVEL_0_NPAGES; 846 allocpages -= VM_LEVEL_0_NPAGES; 847 } while (allocpages >= VM_LEVEL_0_NPAGES); 848 return (m_ret); 849 } 850 851 /* 852 * Attempts to extend an existing reservation and allocate the page to the 853 * object. 854 * 855 * The page "mpred" must immediately precede the offset "pindex" within the 856 * specified object. 857 * 858 * The object must be locked. 859 */ 860 vm_page_t 861 vm_reserv_extend(int req, vm_object_t object, vm_pindex_t pindex, int domain, 862 vm_page_t mpred) 863 { 864 struct vm_domain *vmd; 865 vm_page_t m, msucc; 866 vm_reserv_t rv; 867 int index; 868 869 VM_OBJECT_ASSERT_WLOCKED(object); 870 871 /* 872 * Could a reservation currently exist? 873 */ 874 if (pindex < VM_RESERV_INDEX(object, pindex) || 875 pindex >= object->size || object->resident_page_count == 0) 876 return (NULL); 877 878 /* 879 * Look for an existing reservation. 880 */ 881 rv = vm_reserv_from_object(object, pindex, mpred, &msucc); 882 if (rv == NULL) 883 return (NULL); 884 885 KASSERT(object != kernel_object || rv->domain == domain, 886 ("vm_reserv_extend: Domain mismatch from reservation.")); 887 domain = rv->domain; 888 vmd = VM_DOMAIN(domain); 889 index = VM_RESERV_INDEX(object, pindex); 890 m = &rv->pages[index]; 891 vm_reserv_lock(rv); 892 /* Handle reclaim race. */ 893 if (rv->object != object || 894 /* Handle vm_page_rename(m, new_object, ...). */ 895 popmap_is_set(rv->popmap, index)) { 896 m = NULL; 897 goto out; 898 } 899 if (vm_domain_allocate(vmd, req, 1) == 0) 900 m = NULL; 901 else 902 vm_reserv_populate(rv, index); 903 out: 904 vm_reserv_unlock(rv); 905 906 return (m); 907 } 908 909 /* 910 * Attempts to allocate a new reservation for the object, and allocates a 911 * page from that reservation. Callers should first invoke vm_reserv_extend() 912 * to attempt an allocation from an existing reservation. 913 * 914 * The page "mpred" must immediately precede the offset "pindex" within the 915 * specified object. 916 * 917 * The object and free page queue must be locked. 918 */ 919 vm_page_t 920 vm_reserv_alloc_page(int req, vm_object_t object, vm_pindex_t pindex, int domain, 921 vm_page_t mpred) 922 { 923 struct vm_domain *vmd; 924 vm_page_t m, msucc; 925 vm_pindex_t first, leftcap, rightcap; 926 vm_reserv_t rv; 927 int index; 928 929 VM_OBJECT_ASSERT_WLOCKED(object); 930 931 /* 932 * Is a reservation fundamentally impossible? 933 */ 934 if (pindex < VM_RESERV_INDEX(object, pindex) || 935 pindex >= object->size) 936 return (NULL); 937 938 /* 939 * Callers should've extended an existing reservation prior to 940 * calling this function. If a reservation exists it is 941 * incompatible with the allocation. 942 */ 943 rv = vm_reserv_from_object(object, pindex, mpred, &msucc); 944 if (rv != NULL) 945 return (NULL); 946 947 /* 948 * Could a reservation fit between the first index to the left that 949 * can be used and the first index to the right that cannot be used? 950 * 951 * We must synchronize with the reserv object lock to protect the 952 * pindex/object of the resulting reservations against rename while 953 * we are inspecting. 954 */ 955 first = pindex - VM_RESERV_INDEX(object, pindex); 956 vm_reserv_object_lock(object); 957 if (mpred != NULL) { 958 if ((rv = vm_reserv_from_page(mpred))->object != object) 959 leftcap = mpred->pindex + 1; 960 else 961 leftcap = rv->pindex + VM_LEVEL_0_NPAGES; 962 if (leftcap > first) { 963 vm_reserv_object_unlock(object); 964 return (NULL); 965 } 966 } 967 if (msucc != NULL) { 968 if ((rv = vm_reserv_from_page(msucc))->object != object) 969 rightcap = msucc->pindex; 970 else 971 rightcap = rv->pindex; 972 if (first + VM_LEVEL_0_NPAGES > rightcap) { 973 vm_reserv_object_unlock(object); 974 return (NULL); 975 } 976 } 977 vm_reserv_object_unlock(object); 978 979 /* 980 * Would a new reservation extend past the end of the object? 981 */ 982 if (first + VM_LEVEL_0_NPAGES > object->size) { 983 /* 984 * Don't allocate a new reservation if the object is a vnode or 985 * backed by another object that is a vnode. 986 */ 987 if (object->type == OBJT_VNODE || 988 (object->backing_object != NULL && 989 object->backing_object->type == OBJT_VNODE)) 990 return (NULL); 991 /* Speculate that the object may grow. */ 992 } 993 994 /* 995 * Allocate and populate the new reservation. 996 */ 997 m = NULL; 998 vmd = VM_DOMAIN(domain); 999 if (vm_domain_allocate(vmd, req, 1)) { 1000 vm_domain_free_lock(vmd); 1001 m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT, 1002 VM_LEVEL_0_ORDER); 1003 vm_domain_free_unlock(vmd); 1004 if (m == NULL) { 1005 vm_domain_freecnt_inc(vmd, 1); 1006 return (NULL); 1007 } 1008 } else 1009 return (NULL); 1010 rv = vm_reserv_from_page(m); 1011 vm_reserv_lock(rv); 1012 KASSERT(rv->pages == m, 1013 ("vm_reserv_alloc_page: reserv %p's pages is corrupted", rv)); 1014 vm_reserv_insert(rv, object, first); 1015 index = VM_RESERV_INDEX(object, pindex); 1016 vm_reserv_populate(rv, index); 1017 vm_reserv_unlock(rv); 1018 1019 return (&rv->pages[index]); 1020 } 1021 1022 /* 1023 * Breaks the given reservation. All free pages in the reservation 1024 * are returned to the physical memory allocator. The reservation's 1025 * population count and map are reset to their initial state. 1026 * 1027 * The given reservation must not be in the partially populated reservation 1028 * queue. The free page queue lock must be held. 1029 */ 1030 static void 1031 vm_reserv_break(vm_reserv_t rv) 1032 { 1033 int begin_zeroes, hi, i, lo; 1034 1035 vm_reserv_assert_locked(rv); 1036 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 1037 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 1038 vm_reserv_remove(rv); 1039 rv->pages->psind = 0; 1040 i = hi = 0; 1041 do { 1042 /* Find the next 0 bit. Any previous 0 bits are < "hi". */ 1043 lo = ffsl(~(((1UL << hi) - 1) | rv->popmap[i])); 1044 if (lo == 0) { 1045 /* Redundantly clears bits < "hi". */ 1046 rv->popmap[i] = 0; 1047 rv->popcnt -= NBPOPMAP - hi; 1048 while (++i < NPOPMAP) { 1049 lo = ffsl(~rv->popmap[i]); 1050 if (lo == 0) { 1051 rv->popmap[i] = 0; 1052 rv->popcnt -= NBPOPMAP; 1053 } else 1054 break; 1055 } 1056 if (i == NPOPMAP) 1057 break; 1058 hi = 0; 1059 } 1060 KASSERT(lo > 0, ("vm_reserv_break: lo is %d", lo)); 1061 /* Convert from ffsl() to ordinary bit numbering. */ 1062 lo--; 1063 if (lo > 0) { 1064 /* Redundantly clears bits < "hi". */ 1065 rv->popmap[i] &= ~((1UL << lo) - 1); 1066 rv->popcnt -= lo - hi; 1067 } 1068 begin_zeroes = NBPOPMAP * i + lo; 1069 /* Find the next 1 bit. */ 1070 do 1071 hi = ffsl(rv->popmap[i]); 1072 while (hi == 0 && ++i < NPOPMAP); 1073 if (i != NPOPMAP) 1074 /* Convert from ffsl() to ordinary bit numbering. */ 1075 hi--; 1076 vm_domain_free_lock(VM_DOMAIN(rv->domain)); 1077 vm_phys_free_contig(&rv->pages[begin_zeroes], NBPOPMAP * i + 1078 hi - begin_zeroes); 1079 vm_domain_free_unlock(VM_DOMAIN(rv->domain)); 1080 } while (i < NPOPMAP); 1081 KASSERT(rv->popcnt == 0, 1082 ("vm_reserv_break: reserv %p's popcnt is corrupted", rv)); 1083 counter_u64_add(vm_reserv_broken, 1); 1084 } 1085 1086 /* 1087 * Breaks all reservations belonging to the given object. 1088 */ 1089 void 1090 vm_reserv_break_all(vm_object_t object) 1091 { 1092 vm_reserv_t rv; 1093 1094 /* 1095 * This access of object->rvq is unsynchronized so that the 1096 * object rvq lock can nest after the domain_free lock. We 1097 * must check for races in the results. However, the object 1098 * lock prevents new additions, so we are guaranteed that when 1099 * it returns NULL the object is properly empty. 1100 */ 1101 while ((rv = LIST_FIRST(&object->rvq)) != NULL) { 1102 vm_reserv_lock(rv); 1103 /* Reclaim race. */ 1104 if (rv->object != object) { 1105 vm_reserv_unlock(rv); 1106 continue; 1107 } 1108 vm_reserv_domain_lock(rv->domain); 1109 if (rv->inpartpopq) { 1110 TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq); 1111 rv->inpartpopq = FALSE; 1112 } 1113 vm_reserv_domain_unlock(rv->domain); 1114 vm_reserv_break(rv); 1115 vm_reserv_unlock(rv); 1116 } 1117 } 1118 1119 /* 1120 * Frees the given page if it belongs to a reservation. Returns TRUE if the 1121 * page is freed and FALSE otherwise. 1122 * 1123 * The free page queue lock must be held. 1124 */ 1125 boolean_t 1126 vm_reserv_free_page(vm_page_t m) 1127 { 1128 vm_reserv_t rv; 1129 boolean_t ret; 1130 1131 rv = vm_reserv_from_page(m); 1132 if (rv->object == NULL) 1133 return (FALSE); 1134 vm_reserv_lock(rv); 1135 /* Re-validate after lock. */ 1136 if (rv->object != NULL) { 1137 vm_reserv_depopulate(rv, m - rv->pages); 1138 ret = TRUE; 1139 } else 1140 ret = FALSE; 1141 vm_reserv_unlock(rv); 1142 1143 return (ret); 1144 } 1145 1146 /* 1147 * Initializes the reservation management system. Specifically, initializes 1148 * the reservation array. 1149 * 1150 * Requires that vm_page_array and first_page are initialized! 1151 */ 1152 void 1153 vm_reserv_init(void) 1154 { 1155 vm_paddr_t paddr; 1156 struct vm_phys_seg *seg; 1157 struct vm_reserv *rv; 1158 int i, segind; 1159 1160 /* 1161 * Initialize the reservation array. Specifically, initialize the 1162 * "pages" field for every element that has an underlying superpage. 1163 */ 1164 for (segind = 0; segind < vm_phys_nsegs; segind++) { 1165 seg = &vm_phys_segs[segind]; 1166 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE); 1167 while (paddr + VM_LEVEL_0_SIZE <= seg->end) { 1168 rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT]; 1169 rv->pages = PHYS_TO_VM_PAGE(paddr); 1170 rv->domain = seg->domain; 1171 mtx_init(&rv->lock, "vm reserv", NULL, MTX_DEF); 1172 paddr += VM_LEVEL_0_SIZE; 1173 } 1174 } 1175 for (i = 0; i < MAXMEMDOM; i++) { 1176 mtx_init(&vm_reserv_domain_locks[i], "VM reserv domain", NULL, 1177 MTX_DEF); 1178 TAILQ_INIT(&vm_rvq_partpop[i]); 1179 } 1180 1181 for (i = 0; i < VM_RESERV_OBJ_LOCK_COUNT; i++) 1182 mtx_init(&vm_reserv_object_mtx[i], "resv obj lock", NULL, 1183 MTX_DEF); 1184 } 1185 1186 /* 1187 * Returns true if the given page belongs to a reservation and that page is 1188 * free. Otherwise, returns false. 1189 */ 1190 bool 1191 vm_reserv_is_page_free(vm_page_t m) 1192 { 1193 vm_reserv_t rv; 1194 1195 rv = vm_reserv_from_page(m); 1196 if (rv->object == NULL) 1197 return (false); 1198 return (popmap_is_clear(rv->popmap, m - rv->pages)); 1199 } 1200 1201 /* 1202 * If the given page belongs to a reservation, returns the level of that 1203 * reservation. Otherwise, returns -1. 1204 */ 1205 int 1206 vm_reserv_level(vm_page_t m) 1207 { 1208 vm_reserv_t rv; 1209 1210 rv = vm_reserv_from_page(m); 1211 return (rv->object != NULL ? 0 : -1); 1212 } 1213 1214 /* 1215 * Returns a reservation level if the given page belongs to a fully populated 1216 * reservation and -1 otherwise. 1217 */ 1218 int 1219 vm_reserv_level_iffullpop(vm_page_t m) 1220 { 1221 vm_reserv_t rv; 1222 1223 rv = vm_reserv_from_page(m); 1224 return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1); 1225 } 1226 1227 /* 1228 * Breaks the given partially populated reservation, releasing its free pages 1229 * to the physical memory allocator. 1230 * 1231 * The free page queue lock must be held. 1232 */ 1233 static void 1234 vm_reserv_reclaim(vm_reserv_t rv) 1235 { 1236 1237 vm_reserv_assert_locked(rv); 1238 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 1239 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 1240 vm_reserv_domain_lock(rv->domain); 1241 KASSERT(rv->inpartpopq, 1242 ("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv)); 1243 KASSERT(rv->domain < vm_ndomains, 1244 ("vm_reserv_reclaim: reserv %p's domain is corrupted %d", 1245 rv, rv->domain)); 1246 TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq); 1247 rv->inpartpopq = FALSE; 1248 vm_reserv_domain_unlock(rv->domain); 1249 vm_reserv_break(rv); 1250 counter_u64_add(vm_reserv_reclaimed, 1); 1251 } 1252 1253 /* 1254 * Breaks the reservation at the head of the partially populated reservation 1255 * queue, releasing its free pages to the physical memory allocator. Returns 1256 * TRUE if a reservation is broken and FALSE otherwise. 1257 * 1258 * The free page queue lock must be held. 1259 */ 1260 boolean_t 1261 vm_reserv_reclaim_inactive(int domain) 1262 { 1263 vm_reserv_t rv; 1264 1265 while ((rv = TAILQ_FIRST(&vm_rvq_partpop[domain])) != NULL) { 1266 vm_reserv_lock(rv); 1267 if (rv != TAILQ_FIRST(&vm_rvq_partpop[domain])) { 1268 vm_reserv_unlock(rv); 1269 continue; 1270 } 1271 vm_reserv_reclaim(rv); 1272 vm_reserv_unlock(rv); 1273 return (TRUE); 1274 } 1275 return (FALSE); 1276 } 1277 1278 /* 1279 * Searches the partially populated reservation queue for the least recently 1280 * changed reservation with free pages that satisfy the given request for 1281 * contiguous physical memory. If a satisfactory reservation is found, it is 1282 * broken. Returns TRUE if a reservation is broken and FALSE otherwise. 1283 * 1284 * The free page queue lock must be held. 1285 */ 1286 boolean_t 1287 vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low, 1288 vm_paddr_t high, u_long alignment, vm_paddr_t boundary) 1289 { 1290 vm_paddr_t pa, size; 1291 vm_reserv_t rv, rvn; 1292 int hi, i, lo, low_index, next_free; 1293 1294 if (npages > VM_LEVEL_0_NPAGES - 1) 1295 return (FALSE); 1296 size = npages << PAGE_SHIFT; 1297 vm_reserv_domain_lock(domain); 1298 again: 1299 for (rv = TAILQ_FIRST(&vm_rvq_partpop[domain]); rv != NULL; rv = rvn) { 1300 rvn = TAILQ_NEXT(rv, partpopq); 1301 pa = VM_PAGE_TO_PHYS(&rv->pages[VM_LEVEL_0_NPAGES - 1]); 1302 if (pa + PAGE_SIZE - size < low) { 1303 /* This entire reservation is too low; go to next. */ 1304 continue; 1305 } 1306 pa = VM_PAGE_TO_PHYS(&rv->pages[0]); 1307 if (pa + size > high) { 1308 /* This entire reservation is too high; go to next. */ 1309 continue; 1310 } 1311 if (vm_reserv_trylock(rv) == 0) { 1312 vm_reserv_domain_unlock(domain); 1313 vm_reserv_lock(rv); 1314 if (!rv->inpartpopq) { 1315 vm_reserv_domain_lock(domain); 1316 if (!rvn->inpartpopq) 1317 goto again; 1318 continue; 1319 } 1320 } else 1321 vm_reserv_domain_unlock(domain); 1322 if (pa < low) { 1323 /* Start the search for free pages at "low". */ 1324 low_index = (low + PAGE_MASK - pa) >> PAGE_SHIFT; 1325 i = low_index / NBPOPMAP; 1326 hi = low_index % NBPOPMAP; 1327 } else 1328 i = hi = 0; 1329 do { 1330 /* Find the next free page. */ 1331 lo = ffsl(~(((1UL << hi) - 1) | rv->popmap[i])); 1332 while (lo == 0 && ++i < NPOPMAP) 1333 lo = ffsl(~rv->popmap[i]); 1334 if (i == NPOPMAP) 1335 break; 1336 /* Convert from ffsl() to ordinary bit numbering. */ 1337 lo--; 1338 next_free = NBPOPMAP * i + lo; 1339 pa = VM_PAGE_TO_PHYS(&rv->pages[next_free]); 1340 KASSERT(pa >= low, 1341 ("vm_reserv_reclaim_contig: pa is too low")); 1342 if (pa + size > high) { 1343 /* The rest of this reservation is too high. */ 1344 break; 1345 } else if ((pa & (alignment - 1)) != 0 || 1346 ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0) { 1347 /* 1348 * The current page doesn't meet the alignment 1349 * and/or boundary requirements. Continue 1350 * searching this reservation until the rest 1351 * of its free pages are either excluded or 1352 * exhausted. 1353 */ 1354 hi = lo + 1; 1355 if (hi >= NBPOPMAP) { 1356 hi = 0; 1357 i++; 1358 } 1359 continue; 1360 } 1361 /* Find the next used page. */ 1362 hi = ffsl(rv->popmap[i] & ~((1UL << lo) - 1)); 1363 while (hi == 0 && ++i < NPOPMAP) { 1364 if ((NBPOPMAP * i - next_free) * PAGE_SIZE >= 1365 size) { 1366 vm_reserv_reclaim(rv); 1367 vm_reserv_unlock(rv); 1368 return (TRUE); 1369 } 1370 hi = ffsl(rv->popmap[i]); 1371 } 1372 /* Convert from ffsl() to ordinary bit numbering. */ 1373 if (i != NPOPMAP) 1374 hi--; 1375 if ((NBPOPMAP * i + hi - next_free) * PAGE_SIZE >= 1376 size) { 1377 vm_reserv_reclaim(rv); 1378 vm_reserv_unlock(rv); 1379 return (TRUE); 1380 } 1381 } while (i < NPOPMAP); 1382 vm_reserv_unlock(rv); 1383 vm_reserv_domain_lock(domain); 1384 if (rvn != NULL && !rvn->inpartpopq) 1385 goto again; 1386 } 1387 vm_reserv_domain_unlock(domain); 1388 return (FALSE); 1389 } 1390 1391 /* 1392 * Transfers the reservation underlying the given page to a new object. 1393 * 1394 * The object must be locked. 1395 */ 1396 void 1397 vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object, 1398 vm_pindex_t old_object_offset) 1399 { 1400 vm_reserv_t rv; 1401 1402 VM_OBJECT_ASSERT_WLOCKED(new_object); 1403 rv = vm_reserv_from_page(m); 1404 if (rv->object == old_object) { 1405 vm_reserv_lock(rv); 1406 CTR6(KTR_VM, 1407 "%s: rv %p object %p new %p popcnt %d inpartpop %d", 1408 __FUNCTION__, rv, rv->object, new_object, rv->popcnt, 1409 rv->inpartpopq); 1410 if (rv->object == old_object) { 1411 vm_reserv_object_lock(old_object); 1412 rv->object = NULL; 1413 LIST_REMOVE(rv, objq); 1414 vm_reserv_object_unlock(old_object); 1415 vm_reserv_object_lock(new_object); 1416 rv->object = new_object; 1417 rv->pindex -= old_object_offset; 1418 LIST_INSERT_HEAD(&new_object->rvq, rv, objq); 1419 vm_reserv_object_unlock(new_object); 1420 } 1421 vm_reserv_unlock(rv); 1422 } 1423 } 1424 1425 /* 1426 * Returns the size (in bytes) of a reservation of the specified level. 1427 */ 1428 int 1429 vm_reserv_size(int level) 1430 { 1431 1432 switch (level) { 1433 case 0: 1434 return (VM_LEVEL_0_SIZE); 1435 case -1: 1436 return (PAGE_SIZE); 1437 default: 1438 return (0); 1439 } 1440 } 1441 1442 /* 1443 * Allocates the virtual and physical memory required by the reservation 1444 * management system's data structures, in particular, the reservation array. 1445 */ 1446 vm_paddr_t 1447 vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t high_water) 1448 { 1449 vm_paddr_t new_end; 1450 size_t size; 1451 1452 /* 1453 * Calculate the size (in bytes) of the reservation array. Round up 1454 * from "high_water" because every small page is mapped to an element 1455 * in the reservation array based on its physical address. Thus, the 1456 * number of elements in the reservation array can be greater than the 1457 * number of superpages. 1458 */ 1459 size = howmany(high_water, VM_LEVEL_0_SIZE) * sizeof(struct vm_reserv); 1460 1461 /* 1462 * Allocate and map the physical memory for the reservation array. The 1463 * next available virtual address is returned by reference. 1464 */ 1465 new_end = end - round_page(size); 1466 vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end, 1467 VM_PROT_READ | VM_PROT_WRITE); 1468 bzero(vm_reserv_array, size); 1469 1470 /* 1471 * Return the next available physical address. 1472 */ 1473 return (new_end); 1474 } 1475 1476 /* 1477 * Initializes the reservation management system. Specifically, initializes 1478 * the reservation counters. 1479 */ 1480 static void 1481 vm_reserv_counter_init(void *unused) 1482 { 1483 1484 vm_reserv_freed = counter_u64_alloc(M_WAITOK); 1485 vm_reserv_broken = counter_u64_alloc(M_WAITOK); 1486 vm_reserv_reclaimed = counter_u64_alloc(M_WAITOK); 1487 } 1488 SYSINIT(vm_reserv_counter_init, SI_SUB_CPU, SI_ORDER_ANY, 1489 vm_reserv_counter_init, NULL); 1490 1491 /* 1492 * Returns the superpage containing the given page. 1493 */ 1494 vm_page_t 1495 vm_reserv_to_superpage(vm_page_t m) 1496 { 1497 vm_reserv_t rv; 1498 1499 VM_OBJECT_ASSERT_LOCKED(m->object); 1500 rv = vm_reserv_from_page(m); 1501 if (rv->object == m->object && rv->popcnt == VM_LEVEL_0_NPAGES) 1502 m = rv->pages; 1503 else 1504 m = NULL; 1505 1506 return (m); 1507 } 1508 1509 #endif /* VM_NRESERVLEVEL > 0 */ 1510