1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2002-2006 Rice University 5 * Copyright (c) 2007-2011 Alan L. Cox <alc@cs.rice.edu> 6 * All rights reserved. 7 * 8 * This software was developed for the FreeBSD Project by Alan L. Cox, 9 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 27 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY 30 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Superpage reservation management module 36 * 37 * Any external functions defined by this module are only to be used by the 38 * virtual memory system. 39 */ 40 41 #include <sys/cdefs.h> 42 __FBSDID("$FreeBSD$"); 43 44 #include "opt_vm.h" 45 46 #include <sys/param.h> 47 #include <sys/kernel.h> 48 #include <sys/lock.h> 49 #include <sys/malloc.h> 50 #include <sys/mutex.h> 51 #include <sys/queue.h> 52 #include <sys/rwlock.h> 53 #include <sys/sbuf.h> 54 #include <sys/sysctl.h> 55 #include <sys/systm.h> 56 #include <sys/counter.h> 57 #include <sys/ktr.h> 58 #include <sys/vmmeter.h> 59 #include <sys/smp.h> 60 61 #include <vm/vm.h> 62 #include <vm/vm_extern.h> 63 #include <vm/vm_param.h> 64 #include <vm/vm_object.h> 65 #include <vm/vm_page.h> 66 #include <vm/vm_pageout.h> 67 #include <vm/vm_pagequeue.h> 68 #include <vm/vm_phys.h> 69 #include <vm/vm_radix.h> 70 #include <vm/vm_reserv.h> 71 72 /* 73 * The reservation system supports the speculative allocation of large physical 74 * pages ("superpages"). Speculative allocation enables the fully automatic 75 * utilization of superpages by the virtual memory system. In other words, no 76 * programmatic directives are required to use superpages. 77 */ 78 79 #if VM_NRESERVLEVEL > 0 80 81 #ifndef VM_LEVEL_0_ORDER_MAX 82 #define VM_LEVEL_0_ORDER_MAX VM_LEVEL_0_ORDER 83 #endif 84 85 /* 86 * The number of small pages that are contained in a level 0 reservation 87 */ 88 #define VM_LEVEL_0_NPAGES (1 << VM_LEVEL_0_ORDER) 89 #define VM_LEVEL_0_NPAGES_MAX (1 << VM_LEVEL_0_ORDER_MAX) 90 91 /* 92 * The number of bits by which a physical address is shifted to obtain the 93 * reservation number 94 */ 95 #define VM_LEVEL_0_SHIFT (VM_LEVEL_0_ORDER + PAGE_SHIFT) 96 97 /* 98 * The size of a level 0 reservation in bytes 99 */ 100 #define VM_LEVEL_0_SIZE (1 << VM_LEVEL_0_SHIFT) 101 102 /* 103 * Computes the index of the small page underlying the given (object, pindex) 104 * within the reservation's array of small pages. 105 */ 106 #define VM_RESERV_INDEX(object, pindex) \ 107 (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1)) 108 109 /* 110 * The size of a population map entry 111 */ 112 typedef u_long popmap_t; 113 114 /* 115 * The number of bits in a population map entry 116 */ 117 #define NBPOPMAP (NBBY * sizeof(popmap_t)) 118 119 /* 120 * The number of population map entries in a reservation 121 */ 122 #define NPOPMAP howmany(VM_LEVEL_0_NPAGES, NBPOPMAP) 123 #define NPOPMAP_MAX howmany(VM_LEVEL_0_NPAGES_MAX, NBPOPMAP) 124 125 /* 126 * Number of elapsed ticks before we update the LRU queue position. Used 127 * to reduce contention and churn on the list. 128 */ 129 #define PARTPOPSLOP 1 130 131 /* 132 * Clear a bit in the population map. 133 */ 134 static __inline void 135 popmap_clear(popmap_t popmap[], int i) 136 { 137 138 popmap[i / NBPOPMAP] &= ~(1UL << (i % NBPOPMAP)); 139 } 140 141 /* 142 * Set a bit in the population map. 143 */ 144 static __inline void 145 popmap_set(popmap_t popmap[], int i) 146 { 147 148 popmap[i / NBPOPMAP] |= 1UL << (i % NBPOPMAP); 149 } 150 151 /* 152 * Is a bit in the population map clear? 153 */ 154 static __inline boolean_t 155 popmap_is_clear(popmap_t popmap[], int i) 156 { 157 158 return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) == 0); 159 } 160 161 /* 162 * Is a bit in the population map set? 163 */ 164 static __inline boolean_t 165 popmap_is_set(popmap_t popmap[], int i) 166 { 167 168 return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) != 0); 169 } 170 171 /* 172 * The reservation structure 173 * 174 * A reservation structure is constructed whenever a large physical page is 175 * speculatively allocated to an object. The reservation provides the small 176 * physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets 177 * within that object. The reservation's "popcnt" tracks the number of these 178 * small physical pages that are in use at any given time. When and if the 179 * reservation is not fully utilized, it appears in the queue of partially 180 * populated reservations. The reservation always appears on the containing 181 * object's list of reservations. 182 * 183 * A partially populated reservation can be broken and reclaimed at any time. 184 * 185 * c - constant after boot 186 * d - vm_reserv_domain_lock 187 * o - vm_reserv_object_lock 188 * r - vm_reserv_lock 189 * s - vm_reserv_domain_scan_lock 190 */ 191 struct vm_reserv { 192 struct mtx lock; /* reservation lock. */ 193 TAILQ_ENTRY(vm_reserv) partpopq; /* (d, r) per-domain queue. */ 194 LIST_ENTRY(vm_reserv) objq; /* (o, r) object queue */ 195 vm_object_t object; /* (o, r) containing object */ 196 vm_pindex_t pindex; /* (o, r) offset in object */ 197 vm_page_t pages; /* (c) first page */ 198 uint16_t popcnt; /* (r) # of pages in use */ 199 uint8_t domain; /* (c) NUMA domain. */ 200 char inpartpopq; /* (d, r) */ 201 int lasttick; /* (r) last pop update tick. */ 202 popmap_t popmap[NPOPMAP_MAX]; /* (r) bit vector, used pages */ 203 }; 204 205 TAILQ_HEAD(vm_reserv_queue, vm_reserv); 206 207 #define vm_reserv_lockptr(rv) (&(rv)->lock) 208 #define vm_reserv_assert_locked(rv) \ 209 mtx_assert(vm_reserv_lockptr(rv), MA_OWNED) 210 #define vm_reserv_lock(rv) mtx_lock(vm_reserv_lockptr(rv)) 211 #define vm_reserv_trylock(rv) mtx_trylock(vm_reserv_lockptr(rv)) 212 #define vm_reserv_unlock(rv) mtx_unlock(vm_reserv_lockptr(rv)) 213 214 /* 215 * The reservation array 216 * 217 * This array is analoguous in function to vm_page_array. It differs in the 218 * respect that it may contain a greater number of useful reservation 219 * structures than there are (physical) superpages. These "invalid" 220 * reservation structures exist to trade-off space for time in the 221 * implementation of vm_reserv_from_page(). Invalid reservation structures are 222 * distinguishable from "valid" reservation structures by inspecting the 223 * reservation's "pages" field. Invalid reservation structures have a NULL 224 * "pages" field. 225 * 226 * vm_reserv_from_page() maps a small (physical) page to an element of this 227 * array by computing a physical reservation number from the page's physical 228 * address. The physical reservation number is used as the array index. 229 * 230 * An "active" reservation is a valid reservation structure that has a non-NULL 231 * "object" field and a non-zero "popcnt" field. In other words, every active 232 * reservation belongs to a particular object. Moreover, every active 233 * reservation has an entry in the containing object's list of reservations. 234 */ 235 static vm_reserv_t vm_reserv_array; 236 237 /* 238 * The per-domain partially populated reservation queues 239 * 240 * These queues enable the fast recovery of an unused free small page from a 241 * partially populated reservation. The reservation at the head of a queue 242 * is the least recently changed, partially populated reservation. 243 * 244 * Access to this queue is synchronized by the per-domain reservation lock. 245 * Threads reclaiming free pages from the queue must hold the per-domain scan 246 * lock. 247 */ 248 struct vm_reserv_domain { 249 struct mtx lock; 250 struct vm_reserv_queue partpop; /* (d) */ 251 struct vm_reserv marker; /* (d, s) scan marker/lock */ 252 } __aligned(CACHE_LINE_SIZE); 253 254 static struct vm_reserv_domain vm_rvd[MAXMEMDOM]; 255 256 #define vm_reserv_domain_lockptr(d) (&vm_rvd[(d)].lock) 257 #define vm_reserv_domain_assert_locked(d) \ 258 mtx_assert(vm_reserv_domain_lockptr(d), MA_OWNED) 259 #define vm_reserv_domain_lock(d) mtx_lock(vm_reserv_domain_lockptr(d)) 260 #define vm_reserv_domain_unlock(d) mtx_unlock(vm_reserv_domain_lockptr(d)) 261 262 #define vm_reserv_domain_scan_lock(d) mtx_lock(&vm_rvd[(d)].marker.lock) 263 #define vm_reserv_domain_scan_unlock(d) mtx_unlock(&vm_rvd[(d)].marker.lock) 264 265 static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 266 "Reservation Info"); 267 268 static COUNTER_U64_DEFINE_EARLY(vm_reserv_broken); 269 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD, 270 &vm_reserv_broken, "Cumulative number of broken reservations"); 271 272 static COUNTER_U64_DEFINE_EARLY(vm_reserv_freed); 273 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD, 274 &vm_reserv_freed, "Cumulative number of freed reservations"); 275 276 static int sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS); 277 278 SYSCTL_PROC(_vm_reserv, OID_AUTO, fullpop, CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RD, 279 NULL, 0, sysctl_vm_reserv_fullpop, "I", "Current number of full reservations"); 280 281 static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS); 282 283 SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq, 284 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 285 sysctl_vm_reserv_partpopq, "A", 286 "Partially populated reservation queues"); 287 288 static COUNTER_U64_DEFINE_EARLY(vm_reserv_reclaimed); 289 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD, 290 &vm_reserv_reclaimed, "Cumulative number of reclaimed reservations"); 291 292 /* 293 * The object lock pool is used to synchronize the rvq. We can not use a 294 * pool mutex because it is required before malloc works. 295 * 296 * The "hash" function could be made faster without divide and modulo. 297 */ 298 #define VM_RESERV_OBJ_LOCK_COUNT MAXCPU 299 300 struct mtx_padalign vm_reserv_object_mtx[VM_RESERV_OBJ_LOCK_COUNT]; 301 302 #define vm_reserv_object_lock_idx(object) \ 303 (((uintptr_t)object / sizeof(*object)) % VM_RESERV_OBJ_LOCK_COUNT) 304 #define vm_reserv_object_lock_ptr(object) \ 305 &vm_reserv_object_mtx[vm_reserv_object_lock_idx((object))] 306 #define vm_reserv_object_lock(object) \ 307 mtx_lock(vm_reserv_object_lock_ptr((object))) 308 #define vm_reserv_object_unlock(object) \ 309 mtx_unlock(vm_reserv_object_lock_ptr((object))) 310 311 static void vm_reserv_break(vm_reserv_t rv); 312 static void vm_reserv_depopulate(vm_reserv_t rv, int index); 313 static vm_reserv_t vm_reserv_from_page(vm_page_t m); 314 static boolean_t vm_reserv_has_pindex(vm_reserv_t rv, 315 vm_pindex_t pindex); 316 static void vm_reserv_populate(vm_reserv_t rv, int index); 317 static void vm_reserv_reclaim(vm_reserv_t rv); 318 319 /* 320 * Returns the current number of full reservations. 321 * 322 * Since the number of full reservations is computed without acquiring any 323 * locks, the returned value is inexact. 324 */ 325 static int 326 sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS) 327 { 328 vm_paddr_t paddr; 329 struct vm_phys_seg *seg; 330 vm_reserv_t rv; 331 int fullpop, segind; 332 333 fullpop = 0; 334 for (segind = 0; segind < vm_phys_nsegs; segind++) { 335 seg = &vm_phys_segs[segind]; 336 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE); 337 #ifdef VM_PHYSSEG_SPARSE 338 rv = seg->first_reserv + (paddr >> VM_LEVEL_0_SHIFT) - 339 (seg->start >> VM_LEVEL_0_SHIFT); 340 #else 341 rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT]; 342 #endif 343 while (paddr + VM_LEVEL_0_SIZE > paddr && paddr + 344 VM_LEVEL_0_SIZE <= seg->end) { 345 fullpop += rv->popcnt == VM_LEVEL_0_NPAGES; 346 paddr += VM_LEVEL_0_SIZE; 347 rv++; 348 } 349 } 350 return (sysctl_handle_int(oidp, &fullpop, 0, req)); 351 } 352 353 /* 354 * Describes the current state of the partially populated reservation queue. 355 */ 356 static int 357 sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS) 358 { 359 struct sbuf sbuf; 360 vm_reserv_t rv; 361 int counter, error, domain, level, unused_pages; 362 363 error = sysctl_wire_old_buffer(req, 0); 364 if (error != 0) 365 return (error); 366 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 367 sbuf_printf(&sbuf, "\nDOMAIN LEVEL SIZE NUMBER\n\n"); 368 for (domain = 0; domain < vm_ndomains; domain++) { 369 for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) { 370 counter = 0; 371 unused_pages = 0; 372 vm_reserv_domain_lock(domain); 373 TAILQ_FOREACH(rv, &vm_rvd[domain].partpop, partpopq) { 374 if (rv == &vm_rvd[domain].marker) 375 continue; 376 counter++; 377 unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt; 378 } 379 vm_reserv_domain_unlock(domain); 380 sbuf_printf(&sbuf, "%6d, %7d, %6dK, %6d\n", 381 domain, level, 382 unused_pages * ((int)PAGE_SIZE / 1024), counter); 383 } 384 } 385 error = sbuf_finish(&sbuf); 386 sbuf_delete(&sbuf); 387 return (error); 388 } 389 390 /* 391 * Remove a reservation from the object's objq. 392 */ 393 static void 394 vm_reserv_remove(vm_reserv_t rv) 395 { 396 vm_object_t object; 397 398 vm_reserv_assert_locked(rv); 399 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 400 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 401 KASSERT(rv->object != NULL, 402 ("vm_reserv_remove: reserv %p is free", rv)); 403 KASSERT(!rv->inpartpopq, 404 ("vm_reserv_remove: reserv %p's inpartpopq is TRUE", rv)); 405 object = rv->object; 406 vm_reserv_object_lock(object); 407 LIST_REMOVE(rv, objq); 408 rv->object = NULL; 409 vm_reserv_object_unlock(object); 410 } 411 412 /* 413 * Insert a new reservation into the object's objq. 414 */ 415 static void 416 vm_reserv_insert(vm_reserv_t rv, vm_object_t object, vm_pindex_t pindex) 417 { 418 int i; 419 420 vm_reserv_assert_locked(rv); 421 CTR6(KTR_VM, 422 "%s: rv %p(%p) object %p new %p popcnt %d", 423 __FUNCTION__, rv, rv->pages, rv->object, object, 424 rv->popcnt); 425 KASSERT(rv->object == NULL, 426 ("vm_reserv_insert: reserv %p isn't free", rv)); 427 KASSERT(rv->popcnt == 0, 428 ("vm_reserv_insert: reserv %p's popcnt is corrupted", rv)); 429 KASSERT(!rv->inpartpopq, 430 ("vm_reserv_insert: reserv %p's inpartpopq is TRUE", rv)); 431 for (i = 0; i < NPOPMAP; i++) 432 KASSERT(rv->popmap[i] == 0, 433 ("vm_reserv_insert: reserv %p's popmap is corrupted", rv)); 434 vm_reserv_object_lock(object); 435 rv->pindex = pindex; 436 rv->object = object; 437 rv->lasttick = ticks; 438 LIST_INSERT_HEAD(&object->rvq, rv, objq); 439 vm_reserv_object_unlock(object); 440 } 441 442 /* 443 * Reduces the given reservation's population count. If the population count 444 * becomes zero, the reservation is destroyed. Additionally, moves the 445 * reservation to the tail of the partially populated reservation queue if the 446 * population count is non-zero. 447 */ 448 static void 449 vm_reserv_depopulate(vm_reserv_t rv, int index) 450 { 451 struct vm_domain *vmd; 452 453 vm_reserv_assert_locked(rv); 454 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 455 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 456 KASSERT(rv->object != NULL, 457 ("vm_reserv_depopulate: reserv %p is free", rv)); 458 KASSERT(popmap_is_set(rv->popmap, index), 459 ("vm_reserv_depopulate: reserv %p's popmap[%d] is clear", rv, 460 index)); 461 KASSERT(rv->popcnt > 0, 462 ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv)); 463 KASSERT(rv->domain < vm_ndomains, 464 ("vm_reserv_depopulate: reserv %p's domain is corrupted %d", 465 rv, rv->domain)); 466 if (rv->popcnt == VM_LEVEL_0_NPAGES) { 467 KASSERT(rv->pages->psind == 1, 468 ("vm_reserv_depopulate: reserv %p is already demoted", 469 rv)); 470 rv->pages->psind = 0; 471 } 472 popmap_clear(rv->popmap, index); 473 rv->popcnt--; 474 if ((unsigned)(ticks - rv->lasttick) >= PARTPOPSLOP || 475 rv->popcnt == 0) { 476 vm_reserv_domain_lock(rv->domain); 477 if (rv->inpartpopq) { 478 TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); 479 rv->inpartpopq = FALSE; 480 } 481 if (rv->popcnt != 0) { 482 rv->inpartpopq = TRUE; 483 TAILQ_INSERT_TAIL(&vm_rvd[rv->domain].partpop, rv, 484 partpopq); 485 } 486 vm_reserv_domain_unlock(rv->domain); 487 rv->lasttick = ticks; 488 } 489 vmd = VM_DOMAIN(rv->domain); 490 if (rv->popcnt == 0) { 491 vm_reserv_remove(rv); 492 vm_domain_free_lock(vmd); 493 vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER); 494 vm_domain_free_unlock(vmd); 495 counter_u64_add(vm_reserv_freed, 1); 496 } 497 vm_domain_freecnt_inc(vmd, 1); 498 } 499 500 /* 501 * Returns the reservation to which the given page might belong. 502 */ 503 static __inline vm_reserv_t 504 vm_reserv_from_page(vm_page_t m) 505 { 506 #ifdef VM_PHYSSEG_SPARSE 507 struct vm_phys_seg *seg; 508 509 seg = &vm_phys_segs[m->segind]; 510 return (seg->first_reserv + (VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT) - 511 (seg->start >> VM_LEVEL_0_SHIFT)); 512 #else 513 return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]); 514 #endif 515 } 516 517 /* 518 * Returns an existing reservation or NULL and initialized successor pointer. 519 */ 520 static vm_reserv_t 521 vm_reserv_from_object(vm_object_t object, vm_pindex_t pindex, 522 vm_page_t mpred, vm_page_t *msuccp) 523 { 524 vm_reserv_t rv; 525 vm_page_t msucc; 526 527 msucc = NULL; 528 if (mpred != NULL) { 529 KASSERT(mpred->object == object, 530 ("vm_reserv_from_object: object doesn't contain mpred")); 531 KASSERT(mpred->pindex < pindex, 532 ("vm_reserv_from_object: mpred doesn't precede pindex")); 533 rv = vm_reserv_from_page(mpred); 534 if (rv->object == object && vm_reserv_has_pindex(rv, pindex)) 535 goto found; 536 msucc = TAILQ_NEXT(mpred, listq); 537 } else 538 msucc = TAILQ_FIRST(&object->memq); 539 if (msucc != NULL) { 540 KASSERT(msucc->pindex > pindex, 541 ("vm_reserv_from_object: msucc doesn't succeed pindex")); 542 rv = vm_reserv_from_page(msucc); 543 if (rv->object == object && vm_reserv_has_pindex(rv, pindex)) 544 goto found; 545 } 546 rv = NULL; 547 548 found: 549 *msuccp = msucc; 550 551 return (rv); 552 } 553 554 /* 555 * Returns TRUE if the given reservation contains the given page index and 556 * FALSE otherwise. 557 */ 558 static __inline boolean_t 559 vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex) 560 { 561 562 return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0); 563 } 564 565 /* 566 * Increases the given reservation's population count. Moves the reservation 567 * to the tail of the partially populated reservation queue. 568 */ 569 static void 570 vm_reserv_populate(vm_reserv_t rv, int index) 571 { 572 573 vm_reserv_assert_locked(rv); 574 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 575 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 576 KASSERT(rv->object != NULL, 577 ("vm_reserv_populate: reserv %p is free", rv)); 578 KASSERT(popmap_is_clear(rv->popmap, index), 579 ("vm_reserv_populate: reserv %p's popmap[%d] is set", rv, 580 index)); 581 KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES, 582 ("vm_reserv_populate: reserv %p is already full", rv)); 583 KASSERT(rv->pages->psind == 0, 584 ("vm_reserv_populate: reserv %p is already promoted", rv)); 585 KASSERT(rv->domain < vm_ndomains, 586 ("vm_reserv_populate: reserv %p's domain is corrupted %d", 587 rv, rv->domain)); 588 popmap_set(rv->popmap, index); 589 rv->popcnt++; 590 if ((unsigned)(ticks - rv->lasttick) < PARTPOPSLOP && 591 rv->inpartpopq && rv->popcnt != VM_LEVEL_0_NPAGES) 592 return; 593 rv->lasttick = ticks; 594 vm_reserv_domain_lock(rv->domain); 595 if (rv->inpartpopq) { 596 TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); 597 rv->inpartpopq = FALSE; 598 } 599 if (rv->popcnt < VM_LEVEL_0_NPAGES) { 600 rv->inpartpopq = TRUE; 601 TAILQ_INSERT_TAIL(&vm_rvd[rv->domain].partpop, rv, partpopq); 602 } else { 603 KASSERT(rv->pages->psind == 0, 604 ("vm_reserv_populate: reserv %p is already promoted", 605 rv)); 606 rv->pages->psind = 1; 607 } 608 vm_reserv_domain_unlock(rv->domain); 609 } 610 611 /* 612 * Allocates a contiguous set of physical pages of the given size "npages" 613 * from existing or newly created reservations. All of the physical pages 614 * must be at or above the given physical address "low" and below the given 615 * physical address "high". The given value "alignment" determines the 616 * alignment of the first physical page in the set. If the given value 617 * "boundary" is non-zero, then the set of physical pages cannot cross any 618 * physical address boundary that is a multiple of that value. Both 619 * "alignment" and "boundary" must be a power of two. 620 * 621 * The page "mpred" must immediately precede the offset "pindex" within the 622 * specified object. 623 * 624 * The object must be locked. 625 */ 626 vm_page_t 627 vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, int domain, 628 int req, vm_page_t mpred, u_long npages, vm_paddr_t low, vm_paddr_t high, 629 u_long alignment, vm_paddr_t boundary) 630 { 631 struct vm_domain *vmd; 632 vm_paddr_t pa, size; 633 vm_page_t m, m_ret, msucc; 634 vm_pindex_t first, leftcap, rightcap; 635 vm_reserv_t rv; 636 u_long allocpages, maxpages, minpages; 637 int i, index, n; 638 639 VM_OBJECT_ASSERT_WLOCKED(object); 640 KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0")); 641 642 /* 643 * Is a reservation fundamentally impossible? 644 */ 645 if (pindex < VM_RESERV_INDEX(object, pindex) || 646 pindex + npages > object->size) 647 return (NULL); 648 649 /* 650 * All reservations of a particular size have the same alignment. 651 * Assuming that the first page is allocated from a reservation, the 652 * least significant bits of its physical address can be determined 653 * from its offset from the beginning of the reservation and the size 654 * of the reservation. 655 * 656 * Could the specified index within a reservation of the smallest 657 * possible size satisfy the alignment and boundary requirements? 658 */ 659 pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT; 660 size = npages << PAGE_SHIFT; 661 if (!vm_addr_ok(pa, size, alignment, boundary)) 662 return (NULL); 663 664 /* 665 * Look for an existing reservation. 666 */ 667 rv = vm_reserv_from_object(object, pindex, mpred, &msucc); 668 if (rv != NULL) { 669 KASSERT(object != kernel_object || rv->domain == domain, 670 ("vm_reserv_alloc_contig: domain mismatch")); 671 index = VM_RESERV_INDEX(object, pindex); 672 /* Does the allocation fit within the reservation? */ 673 if (index + npages > VM_LEVEL_0_NPAGES) 674 return (NULL); 675 domain = rv->domain; 676 vmd = VM_DOMAIN(domain); 677 vm_reserv_lock(rv); 678 /* Handle reclaim race. */ 679 if (rv->object != object) 680 goto out; 681 m = &rv->pages[index]; 682 pa = VM_PAGE_TO_PHYS(m); 683 if (pa < low || pa + size > high || 684 !vm_addr_ok(pa, size, alignment, boundary)) 685 goto out; 686 /* Handle vm_page_rename(m, new_object, ...). */ 687 for (i = 0; i < npages; i++) 688 if (popmap_is_set(rv->popmap, index + i)) 689 goto out; 690 if (!vm_domain_allocate(vmd, req, npages)) 691 goto out; 692 for (i = 0; i < npages; i++) 693 vm_reserv_populate(rv, index + i); 694 vm_reserv_unlock(rv); 695 return (m); 696 out: 697 vm_reserv_unlock(rv); 698 return (NULL); 699 } 700 701 /* 702 * Could at least one reservation fit between the first index to the 703 * left that can be used ("leftcap") and the first index to the right 704 * that cannot be used ("rightcap")? 705 * 706 * We must synchronize with the reserv object lock to protect the 707 * pindex/object of the resulting reservations against rename while 708 * we are inspecting. 709 */ 710 first = pindex - VM_RESERV_INDEX(object, pindex); 711 minpages = VM_RESERV_INDEX(object, pindex) + npages; 712 maxpages = roundup2(minpages, VM_LEVEL_0_NPAGES); 713 allocpages = maxpages; 714 vm_reserv_object_lock(object); 715 if (mpred != NULL) { 716 if ((rv = vm_reserv_from_page(mpred))->object != object) 717 leftcap = mpred->pindex + 1; 718 else 719 leftcap = rv->pindex + VM_LEVEL_0_NPAGES; 720 if (leftcap > first) { 721 vm_reserv_object_unlock(object); 722 return (NULL); 723 } 724 } 725 if (msucc != NULL) { 726 if ((rv = vm_reserv_from_page(msucc))->object != object) 727 rightcap = msucc->pindex; 728 else 729 rightcap = rv->pindex; 730 if (first + maxpages > rightcap) { 731 if (maxpages == VM_LEVEL_0_NPAGES) { 732 vm_reserv_object_unlock(object); 733 return (NULL); 734 } 735 736 /* 737 * At least one reservation will fit between "leftcap" 738 * and "rightcap". However, a reservation for the 739 * last of the requested pages will not fit. Reduce 740 * the size of the upcoming allocation accordingly. 741 */ 742 allocpages = minpages; 743 } 744 } 745 vm_reserv_object_unlock(object); 746 747 /* 748 * Would the last new reservation extend past the end of the object? 749 * 750 * If the object is unlikely to grow don't allocate a reservation for 751 * the tail. 752 */ 753 if ((object->flags & OBJ_ANON) == 0 && 754 first + maxpages > object->size) { 755 if (maxpages == VM_LEVEL_0_NPAGES) 756 return (NULL); 757 allocpages = minpages; 758 } 759 760 /* 761 * Allocate the physical pages. The alignment and boundary specified 762 * for this allocation may be different from the alignment and 763 * boundary specified for the requested pages. For instance, the 764 * specified index may not be the first page within the first new 765 * reservation. 766 */ 767 m = NULL; 768 vmd = VM_DOMAIN(domain); 769 if (vm_domain_allocate(vmd, req, npages)) { 770 vm_domain_free_lock(vmd); 771 m = vm_phys_alloc_contig(domain, allocpages, low, high, 772 ulmax(alignment, VM_LEVEL_0_SIZE), 773 boundary > VM_LEVEL_0_SIZE ? boundary : 0); 774 vm_domain_free_unlock(vmd); 775 if (m == NULL) { 776 vm_domain_freecnt_inc(vmd, npages); 777 return (NULL); 778 } 779 } else 780 return (NULL); 781 KASSERT(vm_page_domain(m) == domain, 782 ("vm_reserv_alloc_contig: Page domain does not match requested.")); 783 784 /* 785 * The allocated physical pages always begin at a reservation 786 * boundary, but they do not always end at a reservation boundary. 787 * Initialize every reservation that is completely covered by the 788 * allocated physical pages. 789 */ 790 m_ret = NULL; 791 index = VM_RESERV_INDEX(object, pindex); 792 do { 793 rv = vm_reserv_from_page(m); 794 KASSERT(rv->pages == m, 795 ("vm_reserv_alloc_contig: reserv %p's pages is corrupted", 796 rv)); 797 vm_reserv_lock(rv); 798 vm_reserv_insert(rv, object, first); 799 n = ulmin(VM_LEVEL_0_NPAGES - index, npages); 800 for (i = 0; i < n; i++) 801 vm_reserv_populate(rv, index + i); 802 npages -= n; 803 if (m_ret == NULL) { 804 m_ret = &rv->pages[index]; 805 index = 0; 806 } 807 vm_reserv_unlock(rv); 808 m += VM_LEVEL_0_NPAGES; 809 first += VM_LEVEL_0_NPAGES; 810 allocpages -= VM_LEVEL_0_NPAGES; 811 } while (allocpages >= VM_LEVEL_0_NPAGES); 812 return (m_ret); 813 } 814 815 /* 816 * Allocate a physical page from an existing or newly created reservation. 817 * 818 * The page "mpred" must immediately precede the offset "pindex" within the 819 * specified object. 820 * 821 * The object must be locked. 822 */ 823 vm_page_t 824 vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, int domain, 825 int req, vm_page_t mpred) 826 { 827 struct vm_domain *vmd; 828 vm_page_t m, msucc; 829 vm_pindex_t first, leftcap, rightcap; 830 vm_reserv_t rv; 831 int index; 832 833 VM_OBJECT_ASSERT_WLOCKED(object); 834 835 /* 836 * Is a reservation fundamentally impossible? 837 */ 838 if (pindex < VM_RESERV_INDEX(object, pindex) || 839 pindex >= object->size) 840 return (NULL); 841 842 /* 843 * Look for an existing reservation. 844 */ 845 rv = vm_reserv_from_object(object, pindex, mpred, &msucc); 846 if (rv != NULL) { 847 KASSERT(object != kernel_object || rv->domain == domain, 848 ("vm_reserv_alloc_page: domain mismatch")); 849 domain = rv->domain; 850 vmd = VM_DOMAIN(domain); 851 index = VM_RESERV_INDEX(object, pindex); 852 m = &rv->pages[index]; 853 vm_reserv_lock(rv); 854 /* Handle reclaim race. */ 855 if (rv->object != object || 856 /* Handle vm_page_rename(m, new_object, ...). */ 857 popmap_is_set(rv->popmap, index)) { 858 m = NULL; 859 goto out; 860 } 861 if (vm_domain_allocate(vmd, req, 1) == 0) 862 m = NULL; 863 else 864 vm_reserv_populate(rv, index); 865 out: 866 vm_reserv_unlock(rv); 867 return (m); 868 } 869 870 /* 871 * Could a reservation fit between the first index to the left that 872 * can be used and the first index to the right that cannot be used? 873 * 874 * We must synchronize with the reserv object lock to protect the 875 * pindex/object of the resulting reservations against rename while 876 * we are inspecting. 877 */ 878 first = pindex - VM_RESERV_INDEX(object, pindex); 879 vm_reserv_object_lock(object); 880 if (mpred != NULL) { 881 if ((rv = vm_reserv_from_page(mpred))->object != object) 882 leftcap = mpred->pindex + 1; 883 else 884 leftcap = rv->pindex + VM_LEVEL_0_NPAGES; 885 if (leftcap > first) { 886 vm_reserv_object_unlock(object); 887 return (NULL); 888 } 889 } 890 if (msucc != NULL) { 891 if ((rv = vm_reserv_from_page(msucc))->object != object) 892 rightcap = msucc->pindex; 893 else 894 rightcap = rv->pindex; 895 if (first + VM_LEVEL_0_NPAGES > rightcap) { 896 vm_reserv_object_unlock(object); 897 return (NULL); 898 } 899 } 900 vm_reserv_object_unlock(object); 901 902 /* 903 * Would the last new reservation extend past the end of the object? 904 * 905 * If the object is unlikely to grow don't allocate a reservation for 906 * the tail. 907 */ 908 if ((object->flags & OBJ_ANON) == 0 && 909 first + VM_LEVEL_0_NPAGES > object->size) 910 return (NULL); 911 912 /* 913 * Allocate and populate the new reservation. 914 */ 915 m = NULL; 916 vmd = VM_DOMAIN(domain); 917 if (vm_domain_allocate(vmd, req, 1)) { 918 vm_domain_free_lock(vmd); 919 m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT, 920 VM_LEVEL_0_ORDER); 921 vm_domain_free_unlock(vmd); 922 if (m == NULL) { 923 vm_domain_freecnt_inc(vmd, 1); 924 return (NULL); 925 } 926 } else 927 return (NULL); 928 rv = vm_reserv_from_page(m); 929 vm_reserv_lock(rv); 930 KASSERT(rv->pages == m, 931 ("vm_reserv_alloc_page: reserv %p's pages is corrupted", rv)); 932 vm_reserv_insert(rv, object, first); 933 index = VM_RESERV_INDEX(object, pindex); 934 vm_reserv_populate(rv, index); 935 vm_reserv_unlock(rv); 936 937 return (&rv->pages[index]); 938 } 939 940 /* 941 * Breaks the given reservation. All free pages in the reservation 942 * are returned to the physical memory allocator. The reservation's 943 * population count and map are reset to their initial state. 944 * 945 * The given reservation must not be in the partially populated reservation 946 * queue. 947 */ 948 static void 949 vm_reserv_break(vm_reserv_t rv) 950 { 951 u_long changes; 952 int bitpos, hi, i, lo; 953 954 vm_reserv_assert_locked(rv); 955 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 956 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 957 vm_reserv_remove(rv); 958 rv->pages->psind = 0; 959 hi = lo = -1; 960 for (i = 0; i <= NPOPMAP; i++) { 961 /* 962 * "changes" is a bitmask that marks where a new sequence of 963 * 0s or 1s begins in popmap[i], with last bit in popmap[i-1] 964 * considered to be 1 if and only if lo == hi. The bits of 965 * popmap[-1] and popmap[NPOPMAP] are considered all 1s. 966 */ 967 if (i == NPOPMAP) 968 changes = lo != hi; 969 else { 970 changes = rv->popmap[i]; 971 changes ^= (changes << 1) | (lo == hi); 972 rv->popmap[i] = 0; 973 } 974 while (changes != 0) { 975 /* 976 * If the next change marked begins a run of 0s, set 977 * lo to mark that position. Otherwise set hi and 978 * free pages from lo up to hi. 979 */ 980 bitpos = ffsl(changes) - 1; 981 changes ^= 1UL << bitpos; 982 if (lo == hi) 983 lo = NBPOPMAP * i + bitpos; 984 else { 985 hi = NBPOPMAP * i + bitpos; 986 vm_domain_free_lock(VM_DOMAIN(rv->domain)); 987 vm_phys_enqueue_contig(&rv->pages[lo], hi - lo); 988 vm_domain_free_unlock(VM_DOMAIN(rv->domain)); 989 lo = hi; 990 } 991 } 992 } 993 rv->popcnt = 0; 994 counter_u64_add(vm_reserv_broken, 1); 995 } 996 997 /* 998 * Breaks all reservations belonging to the given object. 999 */ 1000 void 1001 vm_reserv_break_all(vm_object_t object) 1002 { 1003 vm_reserv_t rv; 1004 1005 /* 1006 * This access of object->rvq is unsynchronized so that the 1007 * object rvq lock can nest after the domain_free lock. We 1008 * must check for races in the results. However, the object 1009 * lock prevents new additions, so we are guaranteed that when 1010 * it returns NULL the object is properly empty. 1011 */ 1012 while ((rv = LIST_FIRST(&object->rvq)) != NULL) { 1013 vm_reserv_lock(rv); 1014 /* Reclaim race. */ 1015 if (rv->object != object) { 1016 vm_reserv_unlock(rv); 1017 continue; 1018 } 1019 vm_reserv_domain_lock(rv->domain); 1020 if (rv->inpartpopq) { 1021 TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); 1022 rv->inpartpopq = FALSE; 1023 } 1024 vm_reserv_domain_unlock(rv->domain); 1025 vm_reserv_break(rv); 1026 vm_reserv_unlock(rv); 1027 } 1028 } 1029 1030 /* 1031 * Frees the given page if it belongs to a reservation. Returns TRUE if the 1032 * page is freed and FALSE otherwise. 1033 */ 1034 boolean_t 1035 vm_reserv_free_page(vm_page_t m) 1036 { 1037 vm_reserv_t rv; 1038 boolean_t ret; 1039 1040 rv = vm_reserv_from_page(m); 1041 if (rv->object == NULL) 1042 return (FALSE); 1043 vm_reserv_lock(rv); 1044 /* Re-validate after lock. */ 1045 if (rv->object != NULL) { 1046 vm_reserv_depopulate(rv, m - rv->pages); 1047 ret = TRUE; 1048 } else 1049 ret = FALSE; 1050 vm_reserv_unlock(rv); 1051 1052 return (ret); 1053 } 1054 1055 /* 1056 * Initializes the reservation management system. Specifically, initializes 1057 * the reservation array. 1058 * 1059 * Requires that vm_page_array and first_page are initialized! 1060 */ 1061 void 1062 vm_reserv_init(void) 1063 { 1064 vm_paddr_t paddr; 1065 struct vm_phys_seg *seg; 1066 struct vm_reserv *rv; 1067 struct vm_reserv_domain *rvd; 1068 #ifdef VM_PHYSSEG_SPARSE 1069 vm_pindex_t used; 1070 #endif 1071 int i, j, segind; 1072 1073 /* 1074 * Initialize the reservation array. Specifically, initialize the 1075 * "pages" field for every element that has an underlying superpage. 1076 */ 1077 #ifdef VM_PHYSSEG_SPARSE 1078 used = 0; 1079 #endif 1080 for (segind = 0; segind < vm_phys_nsegs; segind++) { 1081 seg = &vm_phys_segs[segind]; 1082 #ifdef VM_PHYSSEG_SPARSE 1083 seg->first_reserv = &vm_reserv_array[used]; 1084 used += howmany(seg->end, VM_LEVEL_0_SIZE) - 1085 seg->start / VM_LEVEL_0_SIZE; 1086 #else 1087 seg->first_reserv = 1088 &vm_reserv_array[seg->start >> VM_LEVEL_0_SHIFT]; 1089 #endif 1090 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE); 1091 rv = seg->first_reserv + (paddr >> VM_LEVEL_0_SHIFT) - 1092 (seg->start >> VM_LEVEL_0_SHIFT); 1093 while (paddr + VM_LEVEL_0_SIZE > paddr && paddr + 1094 VM_LEVEL_0_SIZE <= seg->end) { 1095 rv->pages = PHYS_TO_VM_PAGE(paddr); 1096 rv->domain = seg->domain; 1097 mtx_init(&rv->lock, "vm reserv", NULL, MTX_DEF); 1098 paddr += VM_LEVEL_0_SIZE; 1099 rv++; 1100 } 1101 } 1102 for (i = 0; i < MAXMEMDOM; i++) { 1103 rvd = &vm_rvd[i]; 1104 mtx_init(&rvd->lock, "vm reserv domain", NULL, MTX_DEF); 1105 TAILQ_INIT(&rvd->partpop); 1106 mtx_init(&rvd->marker.lock, "vm reserv marker", NULL, MTX_DEF); 1107 1108 /* 1109 * Fully populated reservations should never be present in the 1110 * partially populated reservation queues. 1111 */ 1112 rvd->marker.popcnt = VM_LEVEL_0_NPAGES; 1113 for (j = 0; j < VM_LEVEL_0_NPAGES; j++) 1114 popmap_set(rvd->marker.popmap, j); 1115 } 1116 1117 for (i = 0; i < VM_RESERV_OBJ_LOCK_COUNT; i++) 1118 mtx_init(&vm_reserv_object_mtx[i], "resv obj lock", NULL, 1119 MTX_DEF); 1120 } 1121 1122 /* 1123 * Returns true if the given page belongs to a reservation and that page is 1124 * free. Otherwise, returns false. 1125 */ 1126 bool 1127 vm_reserv_is_page_free(vm_page_t m) 1128 { 1129 vm_reserv_t rv; 1130 1131 rv = vm_reserv_from_page(m); 1132 if (rv->object == NULL) 1133 return (false); 1134 return (popmap_is_clear(rv->popmap, m - rv->pages)); 1135 } 1136 1137 /* 1138 * If the given page belongs to a reservation, returns the level of that 1139 * reservation. Otherwise, returns -1. 1140 */ 1141 int 1142 vm_reserv_level(vm_page_t m) 1143 { 1144 vm_reserv_t rv; 1145 1146 rv = vm_reserv_from_page(m); 1147 return (rv->object != NULL ? 0 : -1); 1148 } 1149 1150 /* 1151 * Returns a reservation level if the given page belongs to a fully populated 1152 * reservation and -1 otherwise. 1153 */ 1154 int 1155 vm_reserv_level_iffullpop(vm_page_t m) 1156 { 1157 vm_reserv_t rv; 1158 1159 rv = vm_reserv_from_page(m); 1160 return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1); 1161 } 1162 1163 /* 1164 * Remove a partially populated reservation from the queue. 1165 */ 1166 static void 1167 vm_reserv_dequeue(vm_reserv_t rv) 1168 { 1169 1170 vm_reserv_domain_assert_locked(rv->domain); 1171 vm_reserv_assert_locked(rv); 1172 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 1173 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 1174 KASSERT(rv->inpartpopq, 1175 ("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv)); 1176 1177 TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); 1178 rv->inpartpopq = FALSE; 1179 } 1180 1181 /* 1182 * Breaks the given partially populated reservation, releasing its free pages 1183 * to the physical memory allocator. 1184 */ 1185 static void 1186 vm_reserv_reclaim(vm_reserv_t rv) 1187 { 1188 1189 vm_reserv_assert_locked(rv); 1190 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", 1191 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); 1192 if (rv->inpartpopq) { 1193 vm_reserv_domain_lock(rv->domain); 1194 vm_reserv_dequeue(rv); 1195 vm_reserv_domain_unlock(rv->domain); 1196 } 1197 vm_reserv_break(rv); 1198 counter_u64_add(vm_reserv_reclaimed, 1); 1199 } 1200 1201 /* 1202 * Breaks a reservation near the head of the partially populated reservation 1203 * queue, releasing its free pages to the physical memory allocator. Returns 1204 * TRUE if a reservation is broken and FALSE otherwise. 1205 */ 1206 bool 1207 vm_reserv_reclaim_inactive(int domain) 1208 { 1209 vm_reserv_t rv; 1210 1211 vm_reserv_domain_lock(domain); 1212 TAILQ_FOREACH(rv, &vm_rvd[domain].partpop, partpopq) { 1213 /* 1214 * A locked reservation is likely being updated or reclaimed, 1215 * so just skip ahead. 1216 */ 1217 if (rv != &vm_rvd[domain].marker && vm_reserv_trylock(rv)) { 1218 vm_reserv_dequeue(rv); 1219 break; 1220 } 1221 } 1222 vm_reserv_domain_unlock(domain); 1223 if (rv != NULL) { 1224 vm_reserv_reclaim(rv); 1225 vm_reserv_unlock(rv); 1226 return (true); 1227 } 1228 return (false); 1229 } 1230 1231 /* 1232 * Determine whether this reservation has free pages that satisfy the given 1233 * request for contiguous physical memory. Start searching from the lower 1234 * bound, defined by lo, and stop at the upper bound, hi. Return the index 1235 * of the first satisfactory free page, or -1 if none is found. 1236 */ 1237 static int 1238 vm_reserv_find_contig(vm_reserv_t rv, int npages, int lo, 1239 int hi, int ppn_align, int ppn_bound) 1240 { 1241 u_long changes; 1242 int bitpos, bits_left, i, n; 1243 1244 vm_reserv_assert_locked(rv); 1245 KASSERT(npages <= VM_LEVEL_0_NPAGES - 1, 1246 ("%s: Too many pages", __func__)); 1247 KASSERT(ppn_bound <= VM_LEVEL_0_NPAGES, 1248 ("%s: Too big a boundary for reservation size", __func__)); 1249 KASSERT(npages <= ppn_bound, 1250 ("%s: Too many pages for given boundary", __func__)); 1251 KASSERT(ppn_align != 0 && powerof2(ppn_align), 1252 ("ppn_align is not a positive power of 2")); 1253 KASSERT(ppn_bound != 0 && powerof2(ppn_bound), 1254 ("ppn_bound is not a positive power of 2")); 1255 i = lo / NBPOPMAP; 1256 changes = rv->popmap[i] | ((1UL << (lo % NBPOPMAP)) - 1); 1257 n = hi / NBPOPMAP; 1258 bits_left = hi % NBPOPMAP; 1259 hi = lo = -1; 1260 for (;;) { 1261 /* 1262 * "changes" is a bitmask that marks where a new sequence of 1263 * 0s or 1s begins in popmap[i], with last bit in popmap[i-1] 1264 * considered to be 1 if and only if lo == hi. The bits of 1265 * popmap[-1] and popmap[NPOPMAP] are considered all 1s. 1266 */ 1267 changes ^= (changes << 1) | (lo == hi); 1268 while (changes != 0) { 1269 /* 1270 * If the next change marked begins a run of 0s, set 1271 * lo to mark that position. Otherwise set hi and 1272 * look for a satisfactory first page from lo up to hi. 1273 */ 1274 bitpos = ffsl(changes) - 1; 1275 changes ^= 1UL << bitpos; 1276 if (lo == hi) { 1277 lo = NBPOPMAP * i + bitpos; 1278 continue; 1279 } 1280 hi = NBPOPMAP * i + bitpos; 1281 if (lo < roundup2(lo, ppn_align)) { 1282 /* Skip to next aligned page. */ 1283 lo = roundup2(lo, ppn_align); 1284 if (lo >= VM_LEVEL_0_NPAGES) 1285 return (-1); 1286 } 1287 if (lo + npages > roundup2(lo, ppn_bound)) { 1288 /* Skip to next boundary-matching page. */ 1289 lo = roundup2(lo, ppn_bound); 1290 if (lo >= VM_LEVEL_0_NPAGES) 1291 return (-1); 1292 } 1293 if (lo + npages <= hi) 1294 return (lo); 1295 lo = hi; 1296 } 1297 if (++i < n) 1298 changes = rv->popmap[i]; 1299 else if (i == n) 1300 changes = bits_left == 0 ? -1UL : 1301 (rv->popmap[n] | (-1UL << bits_left)); 1302 else 1303 return (-1); 1304 } 1305 } 1306 1307 /* 1308 * Searches the partially populated reservation queue for the least recently 1309 * changed reservation with free pages that satisfy the given request for 1310 * contiguous physical memory. If a satisfactory reservation is found, it is 1311 * broken. Returns true if a reservation is broken and false otherwise. 1312 */ 1313 vm_page_t 1314 vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low, 1315 vm_paddr_t high, u_long alignment, vm_paddr_t boundary) 1316 { 1317 struct vm_reserv_queue *queue; 1318 vm_paddr_t pa, size; 1319 vm_page_t m_ret; 1320 vm_reserv_t marker, rv, rvn; 1321 int hi, lo, posn, ppn_align, ppn_bound; 1322 1323 KASSERT(npages > 0, ("npages is 0")); 1324 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 1325 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 1326 if (npages > VM_LEVEL_0_NPAGES - 1) 1327 return (false); 1328 size = npages << PAGE_SHIFT; 1329 /* 1330 * Ensure that a free range starting at a boundary-multiple 1331 * doesn't include a boundary-multiple within it. Otherwise, 1332 * no boundary-constrained allocation is possible. 1333 */ 1334 if (!vm_addr_bound_ok(0, size, boundary)) 1335 return (NULL); 1336 marker = &vm_rvd[domain].marker; 1337 queue = &vm_rvd[domain].partpop; 1338 /* 1339 * Compute shifted alignment, boundary values for page-based 1340 * calculations. Constrain to range [1, VM_LEVEL_0_NPAGES] to 1341 * avoid overflow. 1342 */ 1343 ppn_align = (int)(ulmin(ulmax(PAGE_SIZE, alignment), 1344 VM_LEVEL_0_SIZE) >> PAGE_SHIFT); 1345 ppn_bound = boundary == 0 ? VM_LEVEL_0_NPAGES : 1346 (int)(MIN(MAX(PAGE_SIZE, boundary), 1347 VM_LEVEL_0_SIZE) >> PAGE_SHIFT); 1348 1349 vm_reserv_domain_scan_lock(domain); 1350 vm_reserv_domain_lock(domain); 1351 TAILQ_FOREACH_SAFE(rv, queue, partpopq, rvn) { 1352 pa = VM_PAGE_TO_PHYS(&rv->pages[0]); 1353 if (pa + VM_LEVEL_0_SIZE - size < low) { 1354 /* This entire reservation is too low; go to next. */ 1355 continue; 1356 } 1357 if (pa + size > high) { 1358 /* This entire reservation is too high; go to next. */ 1359 continue; 1360 } 1361 if (!vm_addr_align_ok(pa, alignment)) { 1362 /* This entire reservation is unaligned; go to next. */ 1363 continue; 1364 } 1365 1366 if (vm_reserv_trylock(rv) == 0) { 1367 TAILQ_INSERT_AFTER(queue, rv, marker, partpopq); 1368 vm_reserv_domain_unlock(domain); 1369 vm_reserv_lock(rv); 1370 if (TAILQ_PREV(marker, vm_reserv_queue, partpopq) != 1371 rv) { 1372 vm_reserv_unlock(rv); 1373 vm_reserv_domain_lock(domain); 1374 rvn = TAILQ_NEXT(marker, partpopq); 1375 TAILQ_REMOVE(queue, marker, partpopq); 1376 continue; 1377 } 1378 vm_reserv_domain_lock(domain); 1379 TAILQ_REMOVE(queue, marker, partpopq); 1380 } 1381 vm_reserv_domain_unlock(domain); 1382 lo = (pa >= low) ? 0 : 1383 (int)((low + PAGE_MASK - pa) >> PAGE_SHIFT); 1384 hi = (pa + VM_LEVEL_0_SIZE <= high) ? VM_LEVEL_0_NPAGES : 1385 (int)((high - pa) >> PAGE_SHIFT); 1386 posn = vm_reserv_find_contig(rv, (int)npages, lo, hi, 1387 ppn_align, ppn_bound); 1388 if (posn >= 0) { 1389 vm_reserv_domain_scan_unlock(domain); 1390 /* Allocate requested space */ 1391 rv->popcnt += npages; 1392 while (npages-- > 0) 1393 popmap_set(rv->popmap, posn + npages); 1394 vm_reserv_reclaim(rv); 1395 vm_reserv_unlock(rv); 1396 m_ret = &rv->pages[posn]; 1397 pa = VM_PAGE_TO_PHYS(m_ret); 1398 KASSERT(vm_addr_ok(pa, size, alignment, boundary), 1399 ("%s: adjusted address not aligned/bounded to " 1400 "%lx/%jx", 1401 __func__, alignment, (uintmax_t)boundary)); 1402 return (m_ret); 1403 } 1404 vm_reserv_domain_lock(domain); 1405 rvn = TAILQ_NEXT(rv, partpopq); 1406 vm_reserv_unlock(rv); 1407 } 1408 vm_reserv_domain_unlock(domain); 1409 vm_reserv_domain_scan_unlock(domain); 1410 return (NULL); 1411 } 1412 1413 /* 1414 * Transfers the reservation underlying the given page to a new object. 1415 * 1416 * The object must be locked. 1417 */ 1418 void 1419 vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object, 1420 vm_pindex_t old_object_offset) 1421 { 1422 vm_reserv_t rv; 1423 1424 VM_OBJECT_ASSERT_WLOCKED(new_object); 1425 rv = vm_reserv_from_page(m); 1426 if (rv->object == old_object) { 1427 vm_reserv_lock(rv); 1428 CTR6(KTR_VM, 1429 "%s: rv %p object %p new %p popcnt %d inpartpop %d", 1430 __FUNCTION__, rv, rv->object, new_object, rv->popcnt, 1431 rv->inpartpopq); 1432 if (rv->object == old_object) { 1433 vm_reserv_object_lock(old_object); 1434 rv->object = NULL; 1435 LIST_REMOVE(rv, objq); 1436 vm_reserv_object_unlock(old_object); 1437 vm_reserv_object_lock(new_object); 1438 rv->object = new_object; 1439 rv->pindex -= old_object_offset; 1440 LIST_INSERT_HEAD(&new_object->rvq, rv, objq); 1441 vm_reserv_object_unlock(new_object); 1442 } 1443 vm_reserv_unlock(rv); 1444 } 1445 } 1446 1447 /* 1448 * Returns the size (in bytes) of a reservation of the specified level. 1449 */ 1450 int 1451 vm_reserv_size(int level) 1452 { 1453 1454 switch (level) { 1455 case 0: 1456 return (VM_LEVEL_0_SIZE); 1457 case -1: 1458 return (PAGE_SIZE); 1459 default: 1460 return (0); 1461 } 1462 } 1463 1464 /* 1465 * Allocates the virtual and physical memory required by the reservation 1466 * management system's data structures, in particular, the reservation array. 1467 */ 1468 vm_paddr_t 1469 vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end) 1470 { 1471 vm_paddr_t new_end; 1472 vm_pindex_t count; 1473 size_t size; 1474 int i; 1475 1476 count = 0; 1477 for (i = 0; i < vm_phys_nsegs; i++) { 1478 #ifdef VM_PHYSSEG_SPARSE 1479 count += howmany(vm_phys_segs[i].end, VM_LEVEL_0_SIZE) - 1480 vm_phys_segs[i].start / VM_LEVEL_0_SIZE; 1481 #else 1482 count = MAX(count, 1483 howmany(vm_phys_segs[i].end, VM_LEVEL_0_SIZE)); 1484 #endif 1485 } 1486 1487 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 1488 #ifdef VM_PHYSSEG_SPARSE 1489 count += howmany(phys_avail[i + 1], VM_LEVEL_0_SIZE) - 1490 phys_avail[i] / VM_LEVEL_0_SIZE; 1491 #else 1492 count = MAX(count, 1493 howmany(phys_avail[i + 1], VM_LEVEL_0_SIZE)); 1494 #endif 1495 } 1496 1497 /* 1498 * Calculate the size (in bytes) of the reservation array. Rounding up 1499 * for partial superpages at boundaries, as every small page is mapped 1500 * to an element in the reservation array based on its physical address. 1501 * Thus, the number of elements in the reservation array can be greater 1502 * than the number of superpages. 1503 */ 1504 size = count * sizeof(struct vm_reserv); 1505 1506 /* 1507 * Allocate and map the physical memory for the reservation array. The 1508 * next available virtual address is returned by reference. 1509 */ 1510 new_end = end - round_page(size); 1511 vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end, 1512 VM_PROT_READ | VM_PROT_WRITE); 1513 bzero(vm_reserv_array, size); 1514 1515 /* 1516 * Return the next available physical address. 1517 */ 1518 return (new_end); 1519 } 1520 1521 /* 1522 * Returns the superpage containing the given page. 1523 */ 1524 vm_page_t 1525 vm_reserv_to_superpage(vm_page_t m) 1526 { 1527 vm_reserv_t rv; 1528 1529 VM_OBJECT_ASSERT_LOCKED(m->object); 1530 rv = vm_reserv_from_page(m); 1531 if (rv->object == m->object && rv->popcnt == VM_LEVEL_0_NPAGES) 1532 m = rv->pages; 1533 else 1534 m = NULL; 1535 1536 return (m); 1537 } 1538 1539 #endif /* VM_NRESERVLEVEL > 0 */ 1540