1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2002-2006 Rice University 5 * Copyright (c) 2007-2011 Alan L. Cox <alc@cs.rice.edu> 6 * All rights reserved. 7 * 8 * This software was developed for the FreeBSD Project by Alan L. Cox, 9 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 27 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY 30 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Superpage reservation management module 36 * 37 * Any external functions defined by this module are only to be used by the 38 * virtual memory system. 39 */ 40 41 #include <sys/cdefs.h> 42 __FBSDID("$FreeBSD$"); 43 44 #include "opt_vm.h" 45 46 #include <sys/param.h> 47 #include <sys/kernel.h> 48 #include <sys/lock.h> 49 #include <sys/malloc.h> 50 #include <sys/mutex.h> 51 #include <sys/queue.h> 52 #include <sys/rwlock.h> 53 #include <sys/sbuf.h> 54 #include <sys/sysctl.h> 55 #include <sys/systm.h> 56 #include <sys/counter.h> 57 #include <sys/ktr.h> 58 #include <sys/vmmeter.h> 59 #include <sys/smp.h> 60 61 #include <vm/vm.h> 62 #include <vm/vm_param.h> 63 #include <vm/vm_object.h> 64 #include <vm/vm_page.h> 65 #include <vm/vm_pageout.h> 66 #include <vm/vm_phys.h> 67 #include <vm/vm_pagequeue.h> 68 #include <vm/vm_radix.h> 69 #include <vm/vm_reserv.h> 70 71 /* 72 * The reservation system supports the speculative allocation of large physical 73 * pages ("superpages"). Speculative allocation enables the fully automatic 74 * utilization of superpages by the virtual memory system. In other words, no 75 * programmatic directives are required to use superpages. 76 */ 77 78 #if VM_NRESERVLEVEL > 0 79 80 /* 81 * The number of small pages that are contained in a level 0 reservation 82 */ 83 #define VM_LEVEL_0_NPAGES (1 << VM_LEVEL_0_ORDER) 84 85 /* 86 * The number of bits by which a physical address is shifted to obtain the 87 * reservation number 88 */ 89 #define VM_LEVEL_0_SHIFT (VM_LEVEL_0_ORDER + PAGE_SHIFT) 90 91 /* 92 * The size of a level 0 reservation in bytes 93 */ 94 #define VM_LEVEL_0_SIZE (1 << VM_LEVEL_0_SHIFT) 95 96 /* 97 * Computes the index of the small page underlying the given (object, pindex) 98 * within the reservation's array of small pages. 99 */ 100 #define VM_RESERV_INDEX(object, pindex) \ 101 (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1)) 102 103 /* 104 * The size of a population map entry 105 */ 106 typedef u_long popmap_t; 107 108 /* 109 * The number of bits in a population map entry 110 */ 111 #define NBPOPMAP (NBBY * sizeof(popmap_t)) 112 113 /* 114 * The number of population map entries in a reservation 115 */ 116 #define NPOPMAP howmany(VM_LEVEL_0_NPAGES, NBPOPMAP) 117 118 /* 119 * Clear a bit in the population map. 120 */ 121 static __inline void 122 popmap_clear(popmap_t popmap[], int i) 123 { 124 125 popmap[i / NBPOPMAP] &= ~(1UL << (i % NBPOPMAP)); 126 } 127 128 /* 129 * Set a bit in the population map. 130 */ 131 static __inline void 132 popmap_set(popmap_t popmap[], int i) 133 { 134 135 popmap[i / NBPOPMAP] |= 1UL << (i % NBPOPMAP); 136 } 137 138 /* 139 * Is a bit in the population map clear? 140 */ 141 static __inline boolean_t 142 popmap_is_clear(popmap_t popmap[], int i) 143 { 144 145 return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) == 0); 146 } 147 148 /* 149 * Is a bit in the population map set? 150 */ 151 static __inline boolean_t 152 popmap_is_set(popmap_t popmap[], int i) 153 { 154 155 return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) != 0); 156 } 157 158 /* 159 * The reservation structure 160 * 161 * A reservation structure is constructed whenever a large physical page is 162 * speculatively allocated to an object. The reservation provides the small 163 * physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets 164 * within that object. The reservation's "popcnt" tracks the number of these 165 * small physical pages that are in use at any given time. When and if the 166 * reservation is not fully utilized, it appears in the queue of partially 167 * populated reservations. The reservation always appears on the containing 168 * object's list of reservations. 169 * 170 * A partially populated reservation can be broken and reclaimed at any time. 171 * 172 * r - vm_reserv_lock 173 * d - vm_reserv_domain_lock 174 * o - vm_reserv_object_lock 175 * c - constant after boot 176 */ 177 struct vm_reserv { 178 struct mtx lock; /* reservation lock. */ 179 TAILQ_ENTRY(vm_reserv) partpopq; /* (d) per-domain queue. */ 180 LIST_ENTRY(vm_reserv) objq; /* (o, r) object queue */ 181 vm_object_t object; /* (o, r) containing object */ 182 vm_pindex_t pindex; /* (o, r) offset in object */ 183 vm_page_t pages; /* (c) first page */ 184 uint16_t domain; /* (c) NUMA domain. */ 185 uint16_t popcnt; /* (r) # of pages in use */ 186 char inpartpopq; /* (d) */ 187 popmap_t popmap[NPOPMAP]; /* (r) bit vector, used pages */ 188 }; 189 190 #define vm_reserv_lockptr(rv) (&(rv)->lock) 191 #define vm_reserv_assert_locked(rv) \ 192 mtx_assert(vm_reserv_lockptr(rv), MA_OWNED) 193 #define vm_reserv_lock(rv) mtx_lock(vm_reserv_lockptr(rv)) 194 #define vm_reserv_trylock(rv) mtx_trylock(vm_reserv_lockptr(rv)) 195 #define vm_reserv_unlock(rv) mtx_unlock(vm_reserv_lockptr(rv)) 196 197 static struct mtx_padalign vm_reserv_domain_locks[MAXMEMDOM]; 198 199 #define vm_reserv_domain_lockptr(d) &vm_reserv_domain_locks[(d)] 200 #define vm_reserv_domain_lock(d) mtx_lock(vm_reserv_domain_lockptr(d)) 201 #define vm_reserv_domain_unlock(d) mtx_unlock(vm_reserv_domain_lockptr(d)) 202 203 /* 204 * The reservation array 205 * 206 * This array is analoguous in function to vm_page_array. It differs in the 207 * respect that it may contain a greater number of useful reservation 208 * structures than there are (physical) superpages. These "invalid" 209 * reservation structures exist to trade-off space for time in the 210 * implementation of vm_reserv_from_page(). Invalid reservation structures are 211 * distinguishable from "valid" reservation structures by inspecting the 212 * reservation's "pages" field. Invalid reservation structures have a NULL 213 * "pages" field. 214 * 215 * vm_reserv_from_page() maps a small (physical) page to an element of this 216 * array by computing a physical reservation number from the page's physical 217 * address. The physical reservation number is used as the array index. 218 * 219 * An "active" reservation is a valid reservation structure that has a non-NULL 220 * "object" field and a non-zero "popcnt" field. In other words, every active 221 * reservation belongs to a particular object. Moreover, every active 222 * reservation has an entry in the containing object's list of reservations. 223 */ 224 static vm_reserv_t vm_reserv_array; 225 226 /* 227 * The partially populated reservation queue 228 * 229 * This queue enables the fast recovery of an unused free small page from a 230 * partially populated reservation. The reservation at the head of this queue 231 * is the least recently changed, partially populated reservation. 232 * 233 * Access to this queue is synchronized by the free page queue lock. 234 */ 235 static TAILQ_HEAD(, vm_reserv) vm_rvq_partpop[MAXMEMDOM]; 236 237 static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD, 0, "Reservation Info"); 238 239 static counter_u64_t vm_reserv_broken = EARLY_COUNTER; 240 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD, 241 &vm_reserv_broken, "Cumulative number of broken reservations"); 242 243 static counter_u64_t vm_reserv_freed = EARLY_COUNTER; 244 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD, 245 &vm_reserv_freed, "Cumulative number of freed reservations"); 246 247 static int sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS); 248 249 SYSCTL_PROC(_vm_reserv, OID_AUTO, fullpop, CTLTYPE_INT | CTLFLAG_RD, NULL, 0, 250 sysctl_vm_reserv_fullpop, "I", "Current number of full reservations"); 251 252 static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS); 253 254 SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, 255 sysctl_vm_reserv_partpopq, "A", "Partially populated reservation queues"); 256 257 static counter_u64_t vm_reserv_reclaimed = EARLY_COUNTER; 258 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD, 259 &vm_reserv_reclaimed, "Cumulative number of reclaimed reservations"); 260 261 /* 262 * The object lock pool is used to synchronize the rvq. We can not use a 263 * pool mutex because it is required before malloc works. 264 * 265 * The "hash" function could be made faster without divide and modulo. 266 */ 267 #define VM_RESERV_OBJ_LOCK_COUNT MAXCPU 268 269 struct mtx_padalign vm_reserv_object_mtx[VM_RESERV_OBJ_LOCK_COUNT]; 270 271 #define vm_reserv_object_lock_idx(object) \ 272 (((uintptr_t)object / sizeof(*object)) % VM_RESERV_OBJ_LOCK_COUNT) 273 #define vm_reserv_object_lock_ptr(object) \ 274 &vm_reserv_object_mtx[vm_reserv_object_lock_idx((object))] 275 #define vm_reserv_object_lock(object) \ 276 mtx_lock(vm_reserv_object_lock_ptr((object))) 277 #define vm_reserv_object_unlock(object) \ 278 mtx_unlock(vm_reserv_object_lock_ptr((object))) 279 280 static void vm_reserv_break(vm_reserv_t rv); 281 static void vm_reserv_depopulate(vm_reserv_t rv, int index); 282 static vm_reserv_t vm_reserv_from_page(vm_page_t m); 283 static boolean_t vm_reserv_has_pindex(vm_reserv_t rv, 284 vm_pindex_t pindex); 285 static void vm_reserv_populate(vm_reserv_t rv, int index); 286 static void vm_reserv_reclaim(vm_reserv_t rv); 287 288 /* 289 * Returns the current number of full reservations. 290 * 291 * Since the number of full reservations is computed without acquiring the 292 * free page queue lock, the returned value may be inexact. 293 */ 294 static int 295 sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS) 296 { 297 vm_paddr_t paddr; 298 struct vm_phys_seg *seg; 299 vm_reserv_t rv; 300 int fullpop, segind; 301 302 fullpop = 0; 303 for (segind = 0; segind < vm_phys_nsegs; segind++) { 304 seg = &vm_phys_segs[segind]; 305 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE); 306 while (paddr + VM_LEVEL_0_SIZE <= seg->end) { 307 rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT]; 308 fullpop += rv->popcnt == VM_LEVEL_0_NPAGES; 309 paddr += VM_LEVEL_0_SIZE; 310 } 311 } 312 return (sysctl_handle_int(oidp, &fullpop, 0, req)); 313 } 314 315 /* 316 * Describes the current state of the partially populated reservation queue. 317 */ 318 static int 319 sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS) 320 { 321 struct sbuf sbuf; 322 vm_reserv_t rv; 323 int counter, error, domain, level, unused_pages; 324 325 error = sysctl_wire_old_buffer(req, 0); 326 if (error != 0) 327 return (error); 328 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 329 sbuf_printf(&sbuf, "\nDOMAIN LEVEL SIZE NUMBER\n\n"); 330 for (domain = 0; domain < vm_ndomains; domain++) { 331 for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) { 332 counter = 0; 333 unused_pages = 0; 334 vm_reserv_domain_lock(domain); 335 TAILQ_FOREACH(rv, &vm_rvq_partpop[domain], partpopq) { 336 counter++; 337 unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt; 338 } 339 vm_reserv_domain_unlock(domain); 340 sbuf_printf(&sbuf, "%6d, %7d, %6dK, %6d\n", 341 domain, level, 342 unused_pages * ((int)PAGE_SIZE / 1024), counter); 343 } 344 } 345 error = sbuf_finish(&sbuf); 346 sbuf_delete(&sbuf); 347 return (error); 348 } 349 350 /* 351 * Remove a reservation from the object's objq. 352 */ 353 static void 354 vm_reserv_remove(vm_reserv_t rv) 355 { 356 vm_object_t object; 357 358 vm_reserv_assert_locked(rv); 359 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 360 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 361 KASSERT(rv->object != NULL, 362 ("vm_reserv_remove: reserv %p is free", rv)); 363 KASSERT(!rv->inpartpopq, 364 ("vm_reserv_remove: reserv %p's inpartpopq is TRUE", rv)); 365 object = rv->object; 366 vm_reserv_object_lock(object); 367 LIST_REMOVE(rv, objq); 368 rv->object = NULL; 369 vm_reserv_object_unlock(object); 370 } 371 372 /* 373 * Insert a new reservation into the object's objq. 374 */ 375 static void 376 vm_reserv_insert(vm_reserv_t rv, vm_object_t object, vm_pindex_t pindex) 377 { 378 int i; 379 380 vm_reserv_assert_locked(rv); 381 CTR6(KTR_VM, 382 "%s: rv %p(%p) object %p new %p popcnt %d", 383 __FUNCTION__, rv, rv->pages, rv->object, object, 384 rv->popcnt); 385 KASSERT(rv->object == NULL, 386 ("vm_reserv_insert: reserv %p isn't free", rv)); 387 KASSERT(rv->popcnt == 0, 388 ("vm_reserv_insert: reserv %p's popcnt is corrupted", rv)); 389 KASSERT(!rv->inpartpopq, 390 ("vm_reserv_insert: reserv %p's inpartpopq is TRUE", rv)); 391 for (i = 0; i < NPOPMAP; i++) 392 KASSERT(rv->popmap[i] == 0, 393 ("vm_reserv_insert: reserv %p's popmap is corrupted", rv)); 394 vm_reserv_object_lock(object); 395 rv->pindex = pindex; 396 rv->object = object; 397 LIST_INSERT_HEAD(&object->rvq, rv, objq); 398 vm_reserv_object_unlock(object); 399 } 400 401 /* 402 * Reduces the given reservation's population count. If the population count 403 * becomes zero, the reservation is destroyed. Additionally, moves the 404 * reservation to the tail of the partially populated reservation queue if the 405 * population count is non-zero. 406 */ 407 static void 408 vm_reserv_depopulate(vm_reserv_t rv, int index) 409 { 410 struct vm_domain *vmd; 411 412 vm_reserv_assert_locked(rv); 413 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 414 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 415 KASSERT(rv->object != NULL, 416 ("vm_reserv_depopulate: reserv %p is free", rv)); 417 KASSERT(popmap_is_set(rv->popmap, index), 418 ("vm_reserv_depopulate: reserv %p's popmap[%d] is clear", rv, 419 index)); 420 KASSERT(rv->popcnt > 0, 421 ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv)); 422 KASSERT(rv->domain < vm_ndomains, 423 ("vm_reserv_depopulate: reserv %p's domain is corrupted %d", 424 rv, rv->domain)); 425 if (rv->popcnt == VM_LEVEL_0_NPAGES) { 426 KASSERT(rv->pages->psind == 1, 427 ("vm_reserv_depopulate: reserv %p is already demoted", 428 rv)); 429 rv->pages->psind = 0; 430 } 431 popmap_clear(rv->popmap, index); 432 rv->popcnt--; 433 vm_reserv_domain_lock(rv->domain); 434 if (rv->inpartpopq) { 435 TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq); 436 rv->inpartpopq = FALSE; 437 } 438 if (rv->popcnt != 0) { 439 rv->inpartpopq = TRUE; 440 TAILQ_INSERT_TAIL(&vm_rvq_partpop[rv->domain], rv, partpopq); 441 } 442 vm_reserv_domain_unlock(rv->domain); 443 vmd = VM_DOMAIN(rv->domain); 444 if (rv->popcnt == 0) { 445 vm_reserv_remove(rv); 446 vm_domain_free_lock(vmd); 447 vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER); 448 vm_domain_free_unlock(vmd); 449 counter_u64_add(vm_reserv_freed, 1); 450 } 451 vm_domain_freecnt_inc(vmd, 1); 452 } 453 454 /* 455 * Returns the reservation to which the given page might belong. 456 */ 457 static __inline vm_reserv_t 458 vm_reserv_from_page(vm_page_t m) 459 { 460 461 return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]); 462 } 463 464 /* 465 * Returns an existing reservation or NULL and initialized successor pointer. 466 */ 467 static vm_reserv_t 468 vm_reserv_from_object(vm_object_t object, vm_pindex_t pindex, 469 vm_page_t mpred, vm_page_t *msuccp) 470 { 471 vm_reserv_t rv; 472 vm_page_t msucc; 473 474 msucc = NULL; 475 if (mpred != NULL) { 476 KASSERT(mpred->object == object, 477 ("vm_reserv_from_object: object doesn't contain mpred")); 478 KASSERT(mpred->pindex < pindex, 479 ("vm_reserv_from_object: mpred doesn't precede pindex")); 480 rv = vm_reserv_from_page(mpred); 481 if (rv->object == object && vm_reserv_has_pindex(rv, pindex)) 482 goto found; 483 msucc = TAILQ_NEXT(mpred, listq); 484 } else 485 msucc = TAILQ_FIRST(&object->memq); 486 if (msucc != NULL) { 487 KASSERT(msucc->pindex > pindex, 488 ("vm_reserv_from_object: msucc doesn't succeed pindex")); 489 rv = vm_reserv_from_page(msucc); 490 if (rv->object == object && vm_reserv_has_pindex(rv, pindex)) 491 goto found; 492 } 493 rv = NULL; 494 495 found: 496 *msuccp = msucc; 497 498 return (rv); 499 } 500 501 /* 502 * Returns TRUE if the given reservation contains the given page index and 503 * FALSE otherwise. 504 */ 505 static __inline boolean_t 506 vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex) 507 { 508 509 return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0); 510 } 511 512 /* 513 * Increases the given reservation's population count. Moves the reservation 514 * to the tail of the partially populated reservation queue. 515 * 516 * The free page queue must be locked. 517 */ 518 static void 519 vm_reserv_populate(vm_reserv_t rv, int index) 520 { 521 522 vm_reserv_assert_locked(rv); 523 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 524 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 525 KASSERT(rv->object != NULL, 526 ("vm_reserv_populate: reserv %p is free", rv)); 527 KASSERT(popmap_is_clear(rv->popmap, index), 528 ("vm_reserv_populate: reserv %p's popmap[%d] is set", rv, 529 index)); 530 KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES, 531 ("vm_reserv_populate: reserv %p is already full", rv)); 532 KASSERT(rv->pages->psind == 0, 533 ("vm_reserv_populate: reserv %p is already promoted", rv)); 534 KASSERT(rv->domain < vm_ndomains, 535 ("vm_reserv_populate: reserv %p's domain is corrupted %d", 536 rv, rv->domain)); 537 popmap_set(rv->popmap, index); 538 rv->popcnt++; 539 vm_reserv_domain_lock(rv->domain); 540 if (rv->inpartpopq) { 541 TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq); 542 rv->inpartpopq = FALSE; 543 } 544 if (rv->popcnt < VM_LEVEL_0_NPAGES) { 545 rv->inpartpopq = TRUE; 546 TAILQ_INSERT_TAIL(&vm_rvq_partpop[rv->domain], rv, partpopq); 547 } else { 548 KASSERT(rv->pages->psind == 0, 549 ("vm_reserv_populate: reserv %p is already promoted", 550 rv)); 551 rv->pages->psind = 1; 552 } 553 vm_reserv_domain_unlock(rv->domain); 554 } 555 556 /* 557 * Attempts to allocate a contiguous set of physical pages from existing 558 * reservations. See vm_reserv_alloc_contig() for a description of the 559 * function's parameters. 560 * 561 * The page "mpred" must immediately precede the offset "pindex" within the 562 * specified object. 563 * 564 * The object must be locked. 565 */ 566 vm_page_t 567 vm_reserv_extend_contig(int req, vm_object_t object, vm_pindex_t pindex, 568 int domain, u_long npages, vm_paddr_t low, vm_paddr_t high, 569 u_long alignment, vm_paddr_t boundary, vm_page_t mpred) 570 { 571 struct vm_domain *vmd; 572 vm_paddr_t pa, size; 573 vm_page_t m, msucc; 574 vm_reserv_t rv; 575 int i, index; 576 577 VM_OBJECT_ASSERT_WLOCKED(object); 578 KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0")); 579 580 /* 581 * Is a reservation fundamentally impossible? 582 */ 583 if (pindex < VM_RESERV_INDEX(object, pindex) || 584 pindex + npages > object->size || object->resident_page_count == 0) 585 return (NULL); 586 587 /* 588 * All reservations of a particular size have the same alignment. 589 * Assuming that the first page is allocated from a reservation, the 590 * least significant bits of its physical address can be determined 591 * from its offset from the beginning of the reservation and the size 592 * of the reservation. 593 * 594 * Could the specified index within a reservation of the smallest 595 * possible size satisfy the alignment and boundary requirements? 596 */ 597 pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT; 598 if ((pa & (alignment - 1)) != 0) 599 return (NULL); 600 size = npages << PAGE_SHIFT; 601 if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0) 602 return (NULL); 603 604 /* 605 * Look for an existing reservation. 606 */ 607 rv = vm_reserv_from_object(object, pindex, mpred, &msucc); 608 if (rv == NULL) 609 return (NULL); 610 KASSERT(object != kernel_object || rv->domain == domain, 611 ("vm_reserv_extend_contig: Domain mismatch from reservation.")); 612 index = VM_RESERV_INDEX(object, pindex); 613 /* Does the allocation fit within the reservation? */ 614 if (index + npages > VM_LEVEL_0_NPAGES) 615 return (NULL); 616 domain = rv->domain; 617 vmd = VM_DOMAIN(domain); 618 vm_reserv_lock(rv); 619 if (rv->object != object) 620 goto out; 621 m = &rv->pages[index]; 622 pa = VM_PAGE_TO_PHYS(m); 623 if (pa < low || pa + size > high || (pa & (alignment - 1)) != 0 || 624 ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0) 625 goto out; 626 /* Handle vm_page_rename(m, new_object, ...). */ 627 for (i = 0; i < npages; i++) { 628 if (popmap_is_set(rv->popmap, index + i)) 629 goto out; 630 } 631 if (!vm_domain_allocate(vmd, req, npages)) 632 goto out; 633 for (i = 0; i < npages; i++) 634 vm_reserv_populate(rv, index + i); 635 vm_reserv_unlock(rv); 636 return (m); 637 638 out: 639 vm_reserv_unlock(rv); 640 return (NULL); 641 } 642 643 /* 644 * Allocates a contiguous set of physical pages of the given size "npages" 645 * from newly created reservations. All of the physical pages 646 * must be at or above the given physical address "low" and below the given 647 * physical address "high". The given value "alignment" determines the 648 * alignment of the first physical page in the set. If the given value 649 * "boundary" is non-zero, then the set of physical pages cannot cross any 650 * physical address boundary that is a multiple of that value. Both 651 * "alignment" and "boundary" must be a power of two. 652 * 653 * Callers should first invoke vm_reserv_extend_contig() to attempt an 654 * allocation from existing reservations. 655 * 656 * The page "mpred" must immediately precede the offset "pindex" within the 657 * specified object. 658 * 659 * The object and free page queue must be locked. 660 */ 661 vm_page_t 662 vm_reserv_alloc_contig(int req, vm_object_t object, vm_pindex_t pindex, int domain, 663 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 664 vm_paddr_t boundary, vm_page_t mpred) 665 { 666 struct vm_domain *vmd; 667 vm_paddr_t pa, size; 668 vm_page_t m, m_ret, msucc; 669 vm_pindex_t first, leftcap, rightcap; 670 vm_reserv_t rv; 671 u_long allocpages, maxpages, minpages; 672 int i, index, n; 673 674 VM_OBJECT_ASSERT_WLOCKED(object); 675 KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0")); 676 677 /* 678 * Is a reservation fundamentally impossible? 679 */ 680 if (pindex < VM_RESERV_INDEX(object, pindex) || 681 pindex + npages > object->size) 682 return (NULL); 683 684 /* 685 * All reservations of a particular size have the same alignment. 686 * Assuming that the first page is allocated from a reservation, the 687 * least significant bits of its physical address can be determined 688 * from its offset from the beginning of the reservation and the size 689 * of the reservation. 690 * 691 * Could the specified index within a reservation of the smallest 692 * possible size satisfy the alignment and boundary requirements? 693 */ 694 pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT; 695 if ((pa & (alignment - 1)) != 0) 696 return (NULL); 697 size = npages << PAGE_SHIFT; 698 if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0) 699 return (NULL); 700 701 /* 702 * Callers should've extended an existing reservation prior to 703 * calling this function. If a reservation exists it is 704 * incompatible with the allocation. 705 */ 706 rv = vm_reserv_from_object(object, pindex, mpred, &msucc); 707 if (rv != NULL) 708 return (NULL); 709 710 /* 711 * Could at least one reservation fit between the first index to the 712 * left that can be used ("leftcap") and the first index to the right 713 * that cannot be used ("rightcap")? 714 * 715 * We must synchronize with the reserv object lock to protect the 716 * pindex/object of the resulting reservations against rename while 717 * we are inspecting. 718 */ 719 first = pindex - VM_RESERV_INDEX(object, pindex); 720 minpages = VM_RESERV_INDEX(object, pindex) + npages; 721 maxpages = roundup2(minpages, VM_LEVEL_0_NPAGES); 722 allocpages = maxpages; 723 vm_reserv_object_lock(object); 724 if (mpred != NULL) { 725 if ((rv = vm_reserv_from_page(mpred))->object != object) 726 leftcap = mpred->pindex + 1; 727 else 728 leftcap = rv->pindex + VM_LEVEL_0_NPAGES; 729 if (leftcap > first) { 730 vm_reserv_object_unlock(object); 731 return (NULL); 732 } 733 } 734 if (msucc != NULL) { 735 if ((rv = vm_reserv_from_page(msucc))->object != object) 736 rightcap = msucc->pindex; 737 else 738 rightcap = rv->pindex; 739 if (first + maxpages > rightcap) { 740 if (maxpages == VM_LEVEL_0_NPAGES) { 741 vm_reserv_object_unlock(object); 742 return (NULL); 743 } 744 745 /* 746 * At least one reservation will fit between "leftcap" 747 * and "rightcap". However, a reservation for the 748 * last of the requested pages will not fit. Reduce 749 * the size of the upcoming allocation accordingly. 750 */ 751 allocpages = minpages; 752 } 753 } 754 vm_reserv_object_unlock(object); 755 756 /* 757 * Would the last new reservation extend past the end of the object? 758 */ 759 if (first + maxpages > object->size) { 760 /* 761 * Don't allocate the last new reservation if the object is a 762 * vnode or backed by another object that is a vnode. 763 */ 764 if (object->type == OBJT_VNODE || 765 (object->backing_object != NULL && 766 object->backing_object->type == OBJT_VNODE)) { 767 if (maxpages == VM_LEVEL_0_NPAGES) 768 return (NULL); 769 allocpages = minpages; 770 } 771 /* Speculate that the object may grow. */ 772 } 773 774 /* 775 * Allocate the physical pages. The alignment and boundary specified 776 * for this allocation may be different from the alignment and 777 * boundary specified for the requested pages. For instance, the 778 * specified index may not be the first page within the first new 779 * reservation. 780 */ 781 m = NULL; 782 vmd = VM_DOMAIN(domain); 783 if (vm_domain_allocate(vmd, req, npages)) { 784 vm_domain_free_lock(vmd); 785 m = vm_phys_alloc_contig(domain, allocpages, low, high, 786 ulmax(alignment, VM_LEVEL_0_SIZE), 787 boundary > VM_LEVEL_0_SIZE ? boundary : 0); 788 vm_domain_free_unlock(vmd); 789 if (m == NULL) { 790 vm_domain_freecnt_inc(vmd, npages); 791 return (NULL); 792 } 793 } else 794 return (NULL); 795 KASSERT(vm_phys_domain(m) == domain, 796 ("vm_reserv_alloc_contig: Page domain does not match requested.")); 797 798 /* 799 * The allocated physical pages always begin at a reservation 800 * boundary, but they do not always end at a reservation boundary. 801 * Initialize every reservation that is completely covered by the 802 * allocated physical pages. 803 */ 804 m_ret = NULL; 805 index = VM_RESERV_INDEX(object, pindex); 806 do { 807 rv = vm_reserv_from_page(m); 808 KASSERT(rv->pages == m, 809 ("vm_reserv_alloc_contig: reserv %p's pages is corrupted", 810 rv)); 811 vm_reserv_lock(rv); 812 vm_reserv_insert(rv, object, first); 813 n = ulmin(VM_LEVEL_0_NPAGES - index, npages); 814 for (i = 0; i < n; i++) 815 vm_reserv_populate(rv, index + i); 816 npages -= n; 817 if (m_ret == NULL) { 818 m_ret = &rv->pages[index]; 819 index = 0; 820 } 821 vm_reserv_unlock(rv); 822 m += VM_LEVEL_0_NPAGES; 823 first += VM_LEVEL_0_NPAGES; 824 allocpages -= VM_LEVEL_0_NPAGES; 825 } while (allocpages >= VM_LEVEL_0_NPAGES); 826 return (m_ret); 827 } 828 829 /* 830 * Attempts to extend an existing reservation and allocate the page to the 831 * object. 832 * 833 * The page "mpred" must immediately precede the offset "pindex" within the 834 * specified object. 835 * 836 * The object must be locked. 837 */ 838 vm_page_t 839 vm_reserv_extend(int req, vm_object_t object, vm_pindex_t pindex, int domain, 840 vm_page_t mpred) 841 { 842 struct vm_domain *vmd; 843 vm_page_t m, msucc; 844 vm_reserv_t rv; 845 int index; 846 847 VM_OBJECT_ASSERT_WLOCKED(object); 848 849 /* 850 * Could a reservation currently exist? 851 */ 852 if (pindex < VM_RESERV_INDEX(object, pindex) || 853 pindex >= object->size || object->resident_page_count == 0) 854 return (NULL); 855 856 /* 857 * Look for an existing reservation. 858 */ 859 rv = vm_reserv_from_object(object, pindex, mpred, &msucc); 860 if (rv == NULL) 861 return (NULL); 862 863 KASSERT(object != kernel_object || rv->domain == domain, 864 ("vm_reserv_extend: Domain mismatch from reservation.")); 865 domain = rv->domain; 866 vmd = VM_DOMAIN(domain); 867 index = VM_RESERV_INDEX(object, pindex); 868 m = &rv->pages[index]; 869 vm_reserv_lock(rv); 870 /* Handle reclaim race. */ 871 if (rv->object != object || 872 /* Handle vm_page_rename(m, new_object, ...). */ 873 popmap_is_set(rv->popmap, index)) { 874 m = NULL; 875 goto out; 876 } 877 if (vm_domain_allocate(vmd, req, 1) == 0) 878 m = NULL; 879 else 880 vm_reserv_populate(rv, index); 881 out: 882 vm_reserv_unlock(rv); 883 884 return (m); 885 } 886 887 /* 888 * Attempts to allocate a new reservation for the object, and allocates a 889 * page from that reservation. Callers should first invoke vm_reserv_extend() 890 * to attempt an allocation from an existing reservation. 891 * 892 * The page "mpred" must immediately precede the offset "pindex" within the 893 * specified object. 894 * 895 * The object and free page queue must be locked. 896 */ 897 vm_page_t 898 vm_reserv_alloc_page(int req, vm_object_t object, vm_pindex_t pindex, int domain, 899 vm_page_t mpred) 900 { 901 struct vm_domain *vmd; 902 vm_page_t m, msucc; 903 vm_pindex_t first, leftcap, rightcap; 904 vm_reserv_t rv; 905 int index; 906 907 VM_OBJECT_ASSERT_WLOCKED(object); 908 909 /* 910 * Is a reservation fundamentally impossible? 911 */ 912 if (pindex < VM_RESERV_INDEX(object, pindex) || 913 pindex >= object->size) 914 return (NULL); 915 916 /* 917 * Callers should've extended an existing reservation prior to 918 * calling this function. If a reservation exists it is 919 * incompatible with the allocation. 920 */ 921 rv = vm_reserv_from_object(object, pindex, mpred, &msucc); 922 if (rv != NULL) 923 return (NULL); 924 925 /* 926 * Could a reservation fit between the first index to the left that 927 * can be used and the first index to the right that cannot be used? 928 * 929 * We must synchronize with the reserv object lock to protect the 930 * pindex/object of the resulting reservations against rename while 931 * we are inspecting. 932 */ 933 first = pindex - VM_RESERV_INDEX(object, pindex); 934 vm_reserv_object_lock(object); 935 if (mpred != NULL) { 936 if ((rv = vm_reserv_from_page(mpred))->object != object) 937 leftcap = mpred->pindex + 1; 938 else 939 leftcap = rv->pindex + VM_LEVEL_0_NPAGES; 940 if (leftcap > first) { 941 vm_reserv_object_unlock(object); 942 return (NULL); 943 } 944 } 945 if (msucc != NULL) { 946 if ((rv = vm_reserv_from_page(msucc))->object != object) 947 rightcap = msucc->pindex; 948 else 949 rightcap = rv->pindex; 950 if (first + VM_LEVEL_0_NPAGES > rightcap) { 951 vm_reserv_object_unlock(object); 952 return (NULL); 953 } 954 } 955 vm_reserv_object_unlock(object); 956 957 /* 958 * Would a new reservation extend past the end of the object? 959 */ 960 if (first + VM_LEVEL_0_NPAGES > object->size) { 961 /* 962 * Don't allocate a new reservation if the object is a vnode or 963 * backed by another object that is a vnode. 964 */ 965 if (object->type == OBJT_VNODE || 966 (object->backing_object != NULL && 967 object->backing_object->type == OBJT_VNODE)) 968 return (NULL); 969 /* Speculate that the object may grow. */ 970 } 971 972 /* 973 * Allocate and populate the new reservation. 974 */ 975 m = NULL; 976 vmd = VM_DOMAIN(domain); 977 if (vm_domain_allocate(vmd, req, 1)) { 978 vm_domain_free_lock(vmd); 979 m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT, 980 VM_LEVEL_0_ORDER); 981 vm_domain_free_unlock(vmd); 982 if (m == NULL) { 983 vm_domain_freecnt_inc(vmd, 1); 984 return (NULL); 985 } 986 } else 987 return (NULL); 988 rv = vm_reserv_from_page(m); 989 vm_reserv_lock(rv); 990 KASSERT(rv->pages == m, 991 ("vm_reserv_alloc_page: reserv %p's pages is corrupted", rv)); 992 vm_reserv_insert(rv, object, first); 993 index = VM_RESERV_INDEX(object, pindex); 994 vm_reserv_populate(rv, index); 995 vm_reserv_unlock(rv); 996 997 return (&rv->pages[index]); 998 } 999 1000 /* 1001 * Breaks the given reservation. All free pages in the reservation 1002 * are returned to the physical memory allocator. The reservation's 1003 * population count and map are reset to their initial state. 1004 * 1005 * The given reservation must not be in the partially populated reservation 1006 * queue. The free page queue lock must be held. 1007 */ 1008 static void 1009 vm_reserv_break(vm_reserv_t rv) 1010 { 1011 int begin_zeroes, hi, i, lo; 1012 1013 vm_reserv_assert_locked(rv); 1014 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 1015 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 1016 vm_reserv_remove(rv); 1017 rv->pages->psind = 0; 1018 i = hi = 0; 1019 do { 1020 /* Find the next 0 bit. Any previous 0 bits are < "hi". */ 1021 lo = ffsl(~(((1UL << hi) - 1) | rv->popmap[i])); 1022 if (lo == 0) { 1023 /* Redundantly clears bits < "hi". */ 1024 rv->popmap[i] = 0; 1025 rv->popcnt -= NBPOPMAP - hi; 1026 while (++i < NPOPMAP) { 1027 lo = ffsl(~rv->popmap[i]); 1028 if (lo == 0) { 1029 rv->popmap[i] = 0; 1030 rv->popcnt -= NBPOPMAP; 1031 } else 1032 break; 1033 } 1034 if (i == NPOPMAP) 1035 break; 1036 hi = 0; 1037 } 1038 KASSERT(lo > 0, ("vm_reserv_break: lo is %d", lo)); 1039 /* Convert from ffsl() to ordinary bit numbering. */ 1040 lo--; 1041 if (lo > 0) { 1042 /* Redundantly clears bits < "hi". */ 1043 rv->popmap[i] &= ~((1UL << lo) - 1); 1044 rv->popcnt -= lo - hi; 1045 } 1046 begin_zeroes = NBPOPMAP * i + lo; 1047 /* Find the next 1 bit. */ 1048 do 1049 hi = ffsl(rv->popmap[i]); 1050 while (hi == 0 && ++i < NPOPMAP); 1051 if (i != NPOPMAP) 1052 /* Convert from ffsl() to ordinary bit numbering. */ 1053 hi--; 1054 vm_domain_free_lock(VM_DOMAIN(rv->domain)); 1055 vm_phys_free_contig(&rv->pages[begin_zeroes], NBPOPMAP * i + 1056 hi - begin_zeroes); 1057 vm_domain_free_unlock(VM_DOMAIN(rv->domain)); 1058 } while (i < NPOPMAP); 1059 KASSERT(rv->popcnt == 0, 1060 ("vm_reserv_break: reserv %p's popcnt is corrupted", rv)); 1061 counter_u64_add(vm_reserv_broken, 1); 1062 } 1063 1064 /* 1065 * Breaks all reservations belonging to the given object. 1066 */ 1067 void 1068 vm_reserv_break_all(vm_object_t object) 1069 { 1070 vm_reserv_t rv; 1071 1072 /* 1073 * This access of object->rvq is unsynchronized so that the 1074 * object rvq lock can nest after the domain_free lock. We 1075 * must check for races in the results. However, the object 1076 * lock prevents new additions, so we are guaranteed that when 1077 * it returns NULL the object is properly empty. 1078 */ 1079 while ((rv = LIST_FIRST(&object->rvq)) != NULL) { 1080 vm_reserv_lock(rv); 1081 /* Reclaim race. */ 1082 if (rv->object != object) { 1083 vm_reserv_unlock(rv); 1084 continue; 1085 } 1086 vm_reserv_domain_lock(rv->domain); 1087 if (rv->inpartpopq) { 1088 TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq); 1089 rv->inpartpopq = FALSE; 1090 } 1091 vm_reserv_domain_unlock(rv->domain); 1092 vm_reserv_break(rv); 1093 vm_reserv_unlock(rv); 1094 } 1095 } 1096 1097 /* 1098 * Frees the given page if it belongs to a reservation. Returns TRUE if the 1099 * page is freed and FALSE otherwise. 1100 * 1101 * The free page queue lock must be held. 1102 */ 1103 boolean_t 1104 vm_reserv_free_page(vm_page_t m) 1105 { 1106 vm_reserv_t rv; 1107 boolean_t ret; 1108 1109 rv = vm_reserv_from_page(m); 1110 if (rv->object == NULL) 1111 return (FALSE); 1112 vm_reserv_lock(rv); 1113 /* Re-validate after lock. */ 1114 if (rv->object != NULL) { 1115 vm_reserv_depopulate(rv, m - rv->pages); 1116 ret = TRUE; 1117 } else 1118 ret = FALSE; 1119 vm_reserv_unlock(rv); 1120 1121 return (ret); 1122 } 1123 1124 /* 1125 * Initializes the reservation management system. Specifically, initializes 1126 * the reservation array. 1127 * 1128 * Requires that vm_page_array and first_page are initialized! 1129 */ 1130 void 1131 vm_reserv_init(void) 1132 { 1133 vm_paddr_t paddr; 1134 struct vm_phys_seg *seg; 1135 struct vm_reserv *rv; 1136 int i, segind; 1137 1138 /* 1139 * Initialize the reservation array. Specifically, initialize the 1140 * "pages" field for every element that has an underlying superpage. 1141 */ 1142 for (segind = 0; segind < vm_phys_nsegs; segind++) { 1143 seg = &vm_phys_segs[segind]; 1144 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE); 1145 while (paddr + VM_LEVEL_0_SIZE <= seg->end) { 1146 rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT]; 1147 rv->pages = PHYS_TO_VM_PAGE(paddr); 1148 rv->domain = seg->domain; 1149 mtx_init(&rv->lock, "vm reserv", NULL, MTX_DEF); 1150 paddr += VM_LEVEL_0_SIZE; 1151 } 1152 } 1153 for (i = 0; i < MAXMEMDOM; i++) { 1154 mtx_init(&vm_reserv_domain_locks[i], "VM reserv domain", NULL, 1155 MTX_DEF); 1156 TAILQ_INIT(&vm_rvq_partpop[i]); 1157 } 1158 1159 for (i = 0; i < VM_RESERV_OBJ_LOCK_COUNT; i++) 1160 mtx_init(&vm_reserv_object_mtx[i], "resv obj lock", NULL, 1161 MTX_DEF); 1162 } 1163 1164 /* 1165 * Returns true if the given page belongs to a reservation and that page is 1166 * free. Otherwise, returns false. 1167 */ 1168 bool 1169 vm_reserv_is_page_free(vm_page_t m) 1170 { 1171 vm_reserv_t rv; 1172 1173 rv = vm_reserv_from_page(m); 1174 if (rv->object == NULL) 1175 return (false); 1176 return (popmap_is_clear(rv->popmap, m - rv->pages)); 1177 } 1178 1179 /* 1180 * If the given page belongs to a reservation, returns the level of that 1181 * reservation. Otherwise, returns -1. 1182 */ 1183 int 1184 vm_reserv_level(vm_page_t m) 1185 { 1186 vm_reserv_t rv; 1187 1188 rv = vm_reserv_from_page(m); 1189 return (rv->object != NULL ? 0 : -1); 1190 } 1191 1192 /* 1193 * Returns a reservation level if the given page belongs to a fully populated 1194 * reservation and -1 otherwise. 1195 */ 1196 int 1197 vm_reserv_level_iffullpop(vm_page_t m) 1198 { 1199 vm_reserv_t rv; 1200 1201 rv = vm_reserv_from_page(m); 1202 return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1); 1203 } 1204 1205 /* 1206 * Breaks the given partially populated reservation, releasing its free pages 1207 * to the physical memory allocator. 1208 * 1209 * The free page queue lock must be held. 1210 */ 1211 static void 1212 vm_reserv_reclaim(vm_reserv_t rv) 1213 { 1214 1215 vm_reserv_assert_locked(rv); 1216 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 1217 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 1218 vm_reserv_domain_lock(rv->domain); 1219 KASSERT(rv->inpartpopq, 1220 ("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv)); 1221 KASSERT(rv->domain < vm_ndomains, 1222 ("vm_reserv_reclaim: reserv %p's domain is corrupted %d", 1223 rv, rv->domain)); 1224 TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq); 1225 rv->inpartpopq = FALSE; 1226 vm_reserv_domain_unlock(rv->domain); 1227 vm_reserv_break(rv); 1228 counter_u64_add(vm_reserv_reclaimed, 1); 1229 } 1230 1231 /* 1232 * Breaks the reservation at the head of the partially populated reservation 1233 * queue, releasing its free pages to the physical memory allocator. Returns 1234 * TRUE if a reservation is broken and FALSE otherwise. 1235 * 1236 * The free page queue lock must be held. 1237 */ 1238 boolean_t 1239 vm_reserv_reclaim_inactive(int domain) 1240 { 1241 vm_reserv_t rv; 1242 1243 while ((rv = TAILQ_FIRST(&vm_rvq_partpop[domain])) != NULL) { 1244 vm_reserv_lock(rv); 1245 if (rv != TAILQ_FIRST(&vm_rvq_partpop[domain])) { 1246 vm_reserv_unlock(rv); 1247 continue; 1248 } 1249 vm_reserv_reclaim(rv); 1250 vm_reserv_unlock(rv); 1251 return (TRUE); 1252 } 1253 return (FALSE); 1254 } 1255 1256 /* 1257 * Searches the partially populated reservation queue for the least recently 1258 * changed reservation with free pages that satisfy the given request for 1259 * contiguous physical memory. If a satisfactory reservation is found, it is 1260 * broken. Returns TRUE if a reservation is broken and FALSE otherwise. 1261 * 1262 * The free page queue lock must be held. 1263 */ 1264 boolean_t 1265 vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low, 1266 vm_paddr_t high, u_long alignment, vm_paddr_t boundary) 1267 { 1268 vm_paddr_t pa, size; 1269 vm_reserv_t rv, rvn; 1270 int hi, i, lo, low_index, next_free; 1271 1272 if (npages > VM_LEVEL_0_NPAGES - 1) 1273 return (FALSE); 1274 size = npages << PAGE_SHIFT; 1275 vm_reserv_domain_lock(domain); 1276 again: 1277 for (rv = TAILQ_FIRST(&vm_rvq_partpop[domain]); rv != NULL; rv = rvn) { 1278 rvn = TAILQ_NEXT(rv, partpopq); 1279 pa = VM_PAGE_TO_PHYS(&rv->pages[VM_LEVEL_0_NPAGES - 1]); 1280 if (pa + PAGE_SIZE - size < low) { 1281 /* This entire reservation is too low; go to next. */ 1282 continue; 1283 } 1284 pa = VM_PAGE_TO_PHYS(&rv->pages[0]); 1285 if (pa + size > high) { 1286 /* This entire reservation is too high; go to next. */ 1287 continue; 1288 } 1289 if (vm_reserv_trylock(rv) == 0) { 1290 vm_reserv_domain_unlock(domain); 1291 vm_reserv_lock(rv); 1292 if (!rv->inpartpopq) { 1293 vm_reserv_domain_lock(domain); 1294 if (!rvn->inpartpopq) 1295 goto again; 1296 continue; 1297 } 1298 } else 1299 vm_reserv_domain_unlock(domain); 1300 if (pa < low) { 1301 /* Start the search for free pages at "low". */ 1302 low_index = (low + PAGE_MASK - pa) >> PAGE_SHIFT; 1303 i = low_index / NBPOPMAP; 1304 hi = low_index % NBPOPMAP; 1305 } else 1306 i = hi = 0; 1307 do { 1308 /* Find the next free page. */ 1309 lo = ffsl(~(((1UL << hi) - 1) | rv->popmap[i])); 1310 while (lo == 0 && ++i < NPOPMAP) 1311 lo = ffsl(~rv->popmap[i]); 1312 if (i == NPOPMAP) 1313 break; 1314 /* Convert from ffsl() to ordinary bit numbering. */ 1315 lo--; 1316 next_free = NBPOPMAP * i + lo; 1317 pa = VM_PAGE_TO_PHYS(&rv->pages[next_free]); 1318 KASSERT(pa >= low, 1319 ("vm_reserv_reclaim_contig: pa is too low")); 1320 if (pa + size > high) { 1321 /* The rest of this reservation is too high. */ 1322 break; 1323 } else if ((pa & (alignment - 1)) != 0 || 1324 ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0) { 1325 /* 1326 * The current page doesn't meet the alignment 1327 * and/or boundary requirements. Continue 1328 * searching this reservation until the rest 1329 * of its free pages are either excluded or 1330 * exhausted. 1331 */ 1332 hi = lo + 1; 1333 if (hi >= NBPOPMAP) { 1334 hi = 0; 1335 i++; 1336 } 1337 continue; 1338 } 1339 /* Find the next used page. */ 1340 hi = ffsl(rv->popmap[i] & ~((1UL << lo) - 1)); 1341 while (hi == 0 && ++i < NPOPMAP) { 1342 if ((NBPOPMAP * i - next_free) * PAGE_SIZE >= 1343 size) { 1344 vm_reserv_reclaim(rv); 1345 vm_reserv_unlock(rv); 1346 return (TRUE); 1347 } 1348 hi = ffsl(rv->popmap[i]); 1349 } 1350 /* Convert from ffsl() to ordinary bit numbering. */ 1351 if (i != NPOPMAP) 1352 hi--; 1353 if ((NBPOPMAP * i + hi - next_free) * PAGE_SIZE >= 1354 size) { 1355 vm_reserv_reclaim(rv); 1356 vm_reserv_unlock(rv); 1357 return (TRUE); 1358 } 1359 } while (i < NPOPMAP); 1360 vm_reserv_unlock(rv); 1361 vm_reserv_domain_lock(domain); 1362 if (rvn != NULL && !rvn->inpartpopq) 1363 goto again; 1364 } 1365 vm_reserv_domain_unlock(domain); 1366 return (FALSE); 1367 } 1368 1369 /* 1370 * Transfers the reservation underlying the given page to a new object. 1371 * 1372 * The object must be locked. 1373 */ 1374 void 1375 vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object, 1376 vm_pindex_t old_object_offset) 1377 { 1378 vm_reserv_t rv; 1379 1380 VM_OBJECT_ASSERT_WLOCKED(new_object); 1381 rv = vm_reserv_from_page(m); 1382 if (rv->object == old_object) { 1383 vm_reserv_lock(rv); 1384 CTR6(KTR_VM, 1385 "%s: rv %p object %p new %p popcnt %d inpartpop %d", 1386 __FUNCTION__, rv, rv->object, new_object, rv->popcnt, 1387 rv->inpartpopq); 1388 if (rv->object == old_object) { 1389 vm_reserv_object_lock(old_object); 1390 rv->object = NULL; 1391 LIST_REMOVE(rv, objq); 1392 vm_reserv_object_unlock(old_object); 1393 vm_reserv_object_lock(new_object); 1394 rv->object = new_object; 1395 rv->pindex -= old_object_offset; 1396 LIST_INSERT_HEAD(&new_object->rvq, rv, objq); 1397 vm_reserv_object_unlock(new_object); 1398 } 1399 vm_reserv_unlock(rv); 1400 } 1401 } 1402 1403 /* 1404 * Returns the size (in bytes) of a reservation of the specified level. 1405 */ 1406 int 1407 vm_reserv_size(int level) 1408 { 1409 1410 switch (level) { 1411 case 0: 1412 return (VM_LEVEL_0_SIZE); 1413 case -1: 1414 return (PAGE_SIZE); 1415 default: 1416 return (0); 1417 } 1418 } 1419 1420 /* 1421 * Allocates the virtual and physical memory required by the reservation 1422 * management system's data structures, in particular, the reservation array. 1423 */ 1424 vm_paddr_t 1425 vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t high_water) 1426 { 1427 vm_paddr_t new_end; 1428 size_t size; 1429 1430 /* 1431 * Calculate the size (in bytes) of the reservation array. Round up 1432 * from "high_water" because every small page is mapped to an element 1433 * in the reservation array based on its physical address. Thus, the 1434 * number of elements in the reservation array can be greater than the 1435 * number of superpages. 1436 */ 1437 size = howmany(high_water, VM_LEVEL_0_SIZE) * sizeof(struct vm_reserv); 1438 1439 /* 1440 * Allocate and map the physical memory for the reservation array. The 1441 * next available virtual address is returned by reference. 1442 */ 1443 new_end = end - round_page(size); 1444 vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end, 1445 VM_PROT_READ | VM_PROT_WRITE); 1446 bzero(vm_reserv_array, size); 1447 1448 /* 1449 * Return the next available physical address. 1450 */ 1451 return (new_end); 1452 } 1453 1454 /* 1455 * Initializes the reservation management system. Specifically, initializes 1456 * the reservation counters. 1457 */ 1458 static void 1459 vm_reserv_counter_init(void *unused) 1460 { 1461 1462 vm_reserv_freed = counter_u64_alloc(M_WAITOK); 1463 vm_reserv_broken = counter_u64_alloc(M_WAITOK); 1464 vm_reserv_reclaimed = counter_u64_alloc(M_WAITOK); 1465 } 1466 SYSINIT(vm_reserv_counter_init, SI_SUB_CPU, SI_ORDER_ANY, 1467 vm_reserv_counter_init, NULL); 1468 1469 /* 1470 * Returns the superpage containing the given page. 1471 */ 1472 vm_page_t 1473 vm_reserv_to_superpage(vm_page_t m) 1474 { 1475 vm_reserv_t rv; 1476 1477 VM_OBJECT_ASSERT_LOCKED(m->object); 1478 rv = vm_reserv_from_page(m); 1479 if (rv->object == m->object && rv->popcnt == VM_LEVEL_0_NPAGES) 1480 m = rv->pages; 1481 else 1482 m = NULL; 1483 1484 return (m); 1485 } 1486 1487 #endif /* VM_NRESERVLEVEL > 0 */ 1488