1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991 Regents of the University of California. 5 * All rights reserved. 6 * Copyright (c) 1998 Matthew Dillon. All Rights Reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * The Mach Operating System project at Carnegie-Mellon University. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36 /*- 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 */ 62 63 /* 64 * Resident memory management module. 65 */ 66 67 #include <sys/cdefs.h> 68 #include "opt_vm.h" 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/counter.h> 73 #include <sys/domainset.h> 74 #include <sys/kernel.h> 75 #include <sys/limits.h> 76 #include <sys/linker.h> 77 #include <sys/lock.h> 78 #include <sys/malloc.h> 79 #include <sys/mman.h> 80 #include <sys/msgbuf.h> 81 #include <sys/mutex.h> 82 #include <sys/proc.h> 83 #include <sys/rwlock.h> 84 #include <sys/sleepqueue.h> 85 #include <sys/sbuf.h> 86 #include <sys/sched.h> 87 #include <sys/smp.h> 88 #include <sys/sysctl.h> 89 #include <sys/vmmeter.h> 90 #include <sys/vnode.h> 91 92 #include <vm/vm.h> 93 #include <vm/pmap.h> 94 #include <vm/vm_param.h> 95 #include <vm/vm_domainset.h> 96 #include <vm/vm_kern.h> 97 #include <vm/vm_map.h> 98 #include <vm/vm_object.h> 99 #include <vm/vm_page.h> 100 #include <vm/vm_pageout.h> 101 #include <vm/vm_phys.h> 102 #include <vm/vm_pagequeue.h> 103 #include <vm/vm_pager.h> 104 #include <vm/vm_radix.h> 105 #include <vm/vm_reserv.h> 106 #include <vm/vm_extern.h> 107 #include <vm/vm_dumpset.h> 108 #include <vm/uma.h> 109 #include <vm/uma_int.h> 110 111 #include <machine/md_var.h> 112 113 struct vm_domain vm_dom[MAXMEMDOM]; 114 115 DPCPU_DEFINE_STATIC(struct vm_batchqueue, pqbatch[MAXMEMDOM][PQ_COUNT]); 116 117 struct mtx_padalign __exclusive_cache_line pa_lock[PA_LOCK_COUNT]; 118 119 struct mtx_padalign __exclusive_cache_line vm_domainset_lock; 120 /* The following fields are protected by the domainset lock. */ 121 domainset_t __exclusive_cache_line vm_min_domains; 122 domainset_t __exclusive_cache_line vm_severe_domains; 123 static int vm_min_waiters; 124 static int vm_severe_waiters; 125 static int vm_pageproc_waiters; 126 127 static SYSCTL_NODE(_vm_stats, OID_AUTO, page, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 128 "VM page statistics"); 129 130 static COUNTER_U64_DEFINE_EARLY(pqstate_commit_retries); 131 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, pqstate_commit_retries, 132 CTLFLAG_RD, &pqstate_commit_retries, 133 "Number of failed per-page atomic queue state updates"); 134 135 static COUNTER_U64_DEFINE_EARLY(queue_ops); 136 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, queue_ops, 137 CTLFLAG_RD, &queue_ops, 138 "Number of batched queue operations"); 139 140 static COUNTER_U64_DEFINE_EARLY(queue_nops); 141 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, queue_nops, 142 CTLFLAG_RD, &queue_nops, 143 "Number of batched queue operations with no effects"); 144 145 /* 146 * bogus page -- for I/O to/from partially complete buffers, 147 * or for paging into sparsely invalid regions. 148 */ 149 vm_page_t bogus_page; 150 151 vm_page_t vm_page_array; 152 long vm_page_array_size; 153 long first_page; 154 155 struct bitset *vm_page_dump; 156 long vm_page_dump_pages; 157 158 static TAILQ_HEAD(, vm_page) blacklist_head; 159 static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS); 160 SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD | 161 CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages"); 162 163 static uma_zone_t fakepg_zone; 164 165 static void vm_page_alloc_check(vm_page_t m); 166 static vm_page_t vm_page_alloc_nofree_domain(int domain, int req); 167 static bool _vm_page_busy_sleep(vm_object_t obj, vm_page_t m, 168 vm_pindex_t pindex, const char *wmesg, int allocflags, bool locked); 169 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits); 170 static void vm_page_enqueue(vm_page_t m, uint8_t queue); 171 static bool vm_page_free_prep(vm_page_t m); 172 static void vm_page_free_toq(vm_page_t m); 173 static void vm_page_init(void *dummy); 174 static int vm_page_insert_after(vm_page_t m, vm_object_t object, 175 vm_pindex_t pindex, vm_page_t mpred); 176 static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object, 177 vm_page_t mpred); 178 static void vm_page_mvqueue(vm_page_t m, const uint8_t queue, 179 const uint16_t nflag); 180 static int vm_page_reclaim_run(int req_class, int domain, u_long npages, 181 vm_page_t m_run, vm_paddr_t high); 182 static void vm_page_release_toq(vm_page_t m, uint8_t nqueue, bool noreuse); 183 static int vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, 184 int req); 185 static int vm_page_zone_import(void *arg, void **store, int cnt, int domain, 186 int flags); 187 static void vm_page_zone_release(void *arg, void **store, int cnt); 188 189 SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init, NULL); 190 191 static void 192 vm_page_init(void *dummy) 193 { 194 195 fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL, 196 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 197 bogus_page = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_NOFREE); 198 } 199 200 static int pgcache_zone_max_pcpu; 201 SYSCTL_INT(_vm, OID_AUTO, pgcache_zone_max_pcpu, 202 CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pgcache_zone_max_pcpu, 0, 203 "Per-CPU page cache size"); 204 205 /* 206 * The cache page zone is initialized later since we need to be able to allocate 207 * pages before UMA is fully initialized. 208 */ 209 static void 210 vm_page_init_cache_zones(void *dummy __unused) 211 { 212 struct vm_domain *vmd; 213 struct vm_pgcache *pgcache; 214 int cache, domain, maxcache, pool; 215 216 TUNABLE_INT_FETCH("vm.pgcache_zone_max_pcpu", &pgcache_zone_max_pcpu); 217 maxcache = pgcache_zone_max_pcpu * mp_ncpus; 218 for (domain = 0; domain < vm_ndomains; domain++) { 219 vmd = VM_DOMAIN(domain); 220 for (pool = 0; pool < VM_NFREEPOOL; pool++) { 221 pgcache = &vmd->vmd_pgcache[pool]; 222 pgcache->domain = domain; 223 pgcache->pool = pool; 224 pgcache->zone = uma_zcache_create("vm pgcache", 225 PAGE_SIZE, NULL, NULL, NULL, NULL, 226 vm_page_zone_import, vm_page_zone_release, pgcache, 227 UMA_ZONE_VM); 228 229 /* 230 * Limit each pool's zone to 0.1% of the pages in the 231 * domain. 232 */ 233 cache = maxcache != 0 ? maxcache : 234 vmd->vmd_page_count / 1000; 235 uma_zone_set_maxcache(pgcache->zone, cache); 236 } 237 } 238 } 239 SYSINIT(vm_page2, SI_SUB_VM_CONF, SI_ORDER_ANY, vm_page_init_cache_zones, NULL); 240 241 /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */ 242 #if PAGE_SIZE == 32768 243 #ifdef CTASSERT 244 CTASSERT(sizeof(u_long) >= 8); 245 #endif 246 #endif 247 248 /* 249 * vm_set_page_size: 250 * 251 * Sets the page size, perhaps based upon the memory 252 * size. Must be called before any use of page-size 253 * dependent functions. 254 */ 255 void 256 vm_set_page_size(void) 257 { 258 if (vm_cnt.v_page_size == 0) 259 vm_cnt.v_page_size = PAGE_SIZE; 260 if (((vm_cnt.v_page_size - 1) & vm_cnt.v_page_size) != 0) 261 panic("vm_set_page_size: page size not a power of two"); 262 } 263 264 /* 265 * vm_page_blacklist_next: 266 * 267 * Find the next entry in the provided string of blacklist 268 * addresses. Entries are separated by space, comma, or newline. 269 * If an invalid integer is encountered then the rest of the 270 * string is skipped. Updates the list pointer to the next 271 * character, or NULL if the string is exhausted or invalid. 272 */ 273 static vm_paddr_t 274 vm_page_blacklist_next(char **list, char *end) 275 { 276 vm_paddr_t bad; 277 char *cp, *pos; 278 279 if (list == NULL || *list == NULL) 280 return (0); 281 if (**list =='\0') { 282 *list = NULL; 283 return (0); 284 } 285 286 /* 287 * If there's no end pointer then the buffer is coming from 288 * the kenv and we know it's null-terminated. 289 */ 290 if (end == NULL) 291 end = *list + strlen(*list); 292 293 /* Ensure that strtoq() won't walk off the end */ 294 if (*end != '\0') { 295 if (*end == '\n' || *end == ' ' || *end == ',') 296 *end = '\0'; 297 else { 298 printf("Blacklist not terminated, skipping\n"); 299 *list = NULL; 300 return (0); 301 } 302 } 303 304 for (pos = *list; *pos != '\0'; pos = cp) { 305 bad = strtoq(pos, &cp, 0); 306 if (*cp == '\0' || *cp == ' ' || *cp == ',' || *cp == '\n') { 307 if (bad == 0) { 308 if (++cp < end) 309 continue; 310 else 311 break; 312 } 313 } else 314 break; 315 if (*cp == '\0' || ++cp >= end) 316 *list = NULL; 317 else 318 *list = cp; 319 return (trunc_page(bad)); 320 } 321 printf("Garbage in RAM blacklist, skipping\n"); 322 *list = NULL; 323 return (0); 324 } 325 326 bool 327 vm_page_blacklist_add(vm_paddr_t pa, bool verbose) 328 { 329 struct vm_domain *vmd; 330 vm_page_t m; 331 bool found; 332 333 m = vm_phys_paddr_to_vm_page(pa); 334 if (m == NULL) 335 return (true); /* page does not exist, no failure */ 336 337 vmd = VM_DOMAIN(vm_phys_domain(pa)); 338 vm_domain_free_lock(vmd); 339 found = vm_phys_unfree_page(pa); 340 vm_domain_free_unlock(vmd); 341 if (found) { 342 vm_domain_freecnt_inc(vmd, -1); 343 TAILQ_INSERT_TAIL(&blacklist_head, m, listq); 344 if (verbose) 345 printf("Skipping page with pa 0x%jx\n", (uintmax_t)pa); 346 } 347 return (found); 348 } 349 350 /* 351 * vm_page_blacklist_check: 352 * 353 * Iterate through the provided string of blacklist addresses, pulling 354 * each entry out of the physical allocator free list and putting it 355 * onto a list for reporting via the vm.page_blacklist sysctl. 356 */ 357 static void 358 vm_page_blacklist_check(char *list, char *end) 359 { 360 vm_paddr_t pa; 361 char *next; 362 363 next = list; 364 while (next != NULL) { 365 if ((pa = vm_page_blacklist_next(&next, end)) == 0) 366 continue; 367 vm_page_blacklist_add(pa, bootverbose); 368 } 369 } 370 371 /* 372 * vm_page_blacklist_load: 373 * 374 * Search for a special module named "ram_blacklist". It'll be a 375 * plain text file provided by the user via the loader directive 376 * of the same name. 377 */ 378 static void 379 vm_page_blacklist_load(char **list, char **end) 380 { 381 void *mod; 382 u_char *ptr; 383 u_int len; 384 385 mod = NULL; 386 ptr = NULL; 387 388 mod = preload_search_by_type("ram_blacklist"); 389 if (mod != NULL) { 390 ptr = preload_fetch_addr(mod); 391 len = preload_fetch_size(mod); 392 } 393 *list = ptr; 394 if (ptr != NULL) 395 *end = ptr + len; 396 else 397 *end = NULL; 398 return; 399 } 400 401 static int 402 sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS) 403 { 404 vm_page_t m; 405 struct sbuf sbuf; 406 int error, first; 407 408 first = 1; 409 error = sysctl_wire_old_buffer(req, 0); 410 if (error != 0) 411 return (error); 412 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 413 TAILQ_FOREACH(m, &blacklist_head, listq) { 414 sbuf_printf(&sbuf, "%s%#jx", first ? "" : ",", 415 (uintmax_t)m->phys_addr); 416 first = 0; 417 } 418 error = sbuf_finish(&sbuf); 419 sbuf_delete(&sbuf); 420 return (error); 421 } 422 423 /* 424 * Initialize a dummy page for use in scans of the specified paging queue. 425 * In principle, this function only needs to set the flag PG_MARKER. 426 * Nonetheless, it write busies the page as a safety precaution. 427 */ 428 void 429 vm_page_init_marker(vm_page_t marker, int queue, uint16_t aflags) 430 { 431 432 bzero(marker, sizeof(*marker)); 433 marker->flags = PG_MARKER; 434 marker->a.flags = aflags; 435 marker->busy_lock = VPB_CURTHREAD_EXCLUSIVE; 436 marker->a.queue = queue; 437 } 438 439 static void 440 vm_page_domain_init(int domain) 441 { 442 struct vm_domain *vmd; 443 struct vm_pagequeue *pq; 444 int i; 445 446 vmd = VM_DOMAIN(domain); 447 bzero(vmd, sizeof(*vmd)); 448 *__DECONST(const char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) = 449 "vm inactive pagequeue"; 450 *__DECONST(const char **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_name) = 451 "vm active pagequeue"; 452 *__DECONST(const char **, &vmd->vmd_pagequeues[PQ_LAUNDRY].pq_name) = 453 "vm laundry pagequeue"; 454 *__DECONST(const char **, 455 &vmd->vmd_pagequeues[PQ_UNSWAPPABLE].pq_name) = 456 "vm unswappable pagequeue"; 457 vmd->vmd_domain = domain; 458 vmd->vmd_page_count = 0; 459 vmd->vmd_free_count = 0; 460 vmd->vmd_segs = 0; 461 vmd->vmd_oom = FALSE; 462 for (i = 0; i < PQ_COUNT; i++) { 463 pq = &vmd->vmd_pagequeues[i]; 464 TAILQ_INIT(&pq->pq_pl); 465 mtx_init(&pq->pq_mutex, pq->pq_name, "vm pagequeue", 466 MTX_DEF | MTX_DUPOK); 467 pq->pq_pdpages = 0; 468 vm_page_init_marker(&vmd->vmd_markers[i], i, 0); 469 } 470 mtx_init(&vmd->vmd_free_mtx, "vm page free queue", NULL, MTX_DEF); 471 mtx_init(&vmd->vmd_pageout_mtx, "vm pageout lock", NULL, MTX_DEF); 472 snprintf(vmd->vmd_name, sizeof(vmd->vmd_name), "%d", domain); 473 474 /* 475 * inacthead is used to provide FIFO ordering for LRU-bypassing 476 * insertions. 477 */ 478 vm_page_init_marker(&vmd->vmd_inacthead, PQ_INACTIVE, PGA_ENQUEUED); 479 TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_INACTIVE].pq_pl, 480 &vmd->vmd_inacthead, plinks.q); 481 482 /* 483 * The clock pages are used to implement active queue scanning without 484 * requeues. Scans start at clock[0], which is advanced after the scan 485 * ends. When the two clock hands meet, they are reset and scanning 486 * resumes from the head of the queue. 487 */ 488 vm_page_init_marker(&vmd->vmd_clock[0], PQ_ACTIVE, PGA_ENQUEUED); 489 vm_page_init_marker(&vmd->vmd_clock[1], PQ_ACTIVE, PGA_ENQUEUED); 490 TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl, 491 &vmd->vmd_clock[0], plinks.q); 492 TAILQ_INSERT_TAIL(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl, 493 &vmd->vmd_clock[1], plinks.q); 494 } 495 496 /* 497 * Initialize a physical page in preparation for adding it to the free 498 * lists. 499 */ 500 void 501 vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind, int pool) 502 { 503 m->object = NULL; 504 m->ref_count = 0; 505 m->busy_lock = VPB_FREED; 506 m->flags = m->a.flags = 0; 507 m->phys_addr = pa; 508 m->a.queue = PQ_NONE; 509 m->psind = 0; 510 m->segind = segind; 511 m->order = VM_NFREEORDER; 512 m->pool = pool; 513 m->valid = m->dirty = 0; 514 pmap_page_init(m); 515 } 516 517 #ifndef PMAP_HAS_PAGE_ARRAY 518 static vm_paddr_t 519 vm_page_array_alloc(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t page_range) 520 { 521 vm_paddr_t new_end; 522 523 /* 524 * Reserve an unmapped guard page to trap access to vm_page_array[-1]. 525 * However, because this page is allocated from KVM, out-of-bounds 526 * accesses using the direct map will not be trapped. 527 */ 528 *vaddr += PAGE_SIZE; 529 530 /* 531 * Allocate physical memory for the page structures, and map it. 532 */ 533 new_end = trunc_page(end - page_range * sizeof(struct vm_page)); 534 vm_page_array = (vm_page_t)pmap_map(vaddr, new_end, end, 535 VM_PROT_READ | VM_PROT_WRITE); 536 vm_page_array_size = page_range; 537 538 return (new_end); 539 } 540 #endif 541 542 /* 543 * vm_page_startup: 544 * 545 * Initializes the resident memory module. Allocates physical memory for 546 * bootstrapping UMA and some data structures that are used to manage 547 * physical pages. Initializes these structures, and populates the free 548 * page queues. 549 */ 550 vm_offset_t 551 vm_page_startup(vm_offset_t vaddr) 552 { 553 struct vm_phys_seg *seg; 554 struct vm_domain *vmd; 555 vm_page_t m; 556 char *list, *listend; 557 vm_paddr_t end, high_avail, low_avail, new_end, size; 558 vm_paddr_t page_range __unused; 559 vm_paddr_t last_pa, pa, startp, endp; 560 u_long pagecount; 561 #if MINIDUMP_PAGE_TRACKING 562 u_long vm_page_dump_size; 563 #endif 564 int biggestone, i, segind; 565 #ifdef WITNESS 566 vm_offset_t mapped; 567 int witness_size; 568 #endif 569 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE) 570 long ii; 571 #endif 572 #ifdef VM_FREEPOOL_LAZYINIT 573 int lazyinit; 574 #endif 575 576 vaddr = round_page(vaddr); 577 578 vm_phys_early_startup(); 579 biggestone = vm_phys_avail_largest(); 580 end = phys_avail[biggestone+1]; 581 582 /* 583 * Initialize the page and queue locks. 584 */ 585 mtx_init(&vm_domainset_lock, "vm domainset lock", NULL, MTX_DEF); 586 for (i = 0; i < PA_LOCK_COUNT; i++) 587 mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF); 588 for (i = 0; i < vm_ndomains; i++) 589 vm_page_domain_init(i); 590 591 new_end = end; 592 #ifdef WITNESS 593 witness_size = round_page(witness_startup_count()); 594 new_end -= witness_size; 595 mapped = pmap_map(&vaddr, new_end, new_end + witness_size, 596 VM_PROT_READ | VM_PROT_WRITE); 597 bzero((void *)mapped, witness_size); 598 witness_startup((void *)mapped); 599 #endif 600 601 #if MINIDUMP_PAGE_TRACKING 602 /* 603 * Allocate a bitmap to indicate that a random physical page 604 * needs to be included in a minidump. 605 * 606 * The amd64 port needs this to indicate which direct map pages 607 * need to be dumped, via calls to dump_add_page()/dump_drop_page(). 608 * 609 * However, i386 still needs this workspace internally within the 610 * minidump code. In theory, they are not needed on i386, but are 611 * included should the sf_buf code decide to use them. 612 */ 613 last_pa = 0; 614 vm_page_dump_pages = 0; 615 for (i = 0; dump_avail[i + 1] != 0; i += 2) { 616 vm_page_dump_pages += howmany(dump_avail[i + 1], PAGE_SIZE) - 617 dump_avail[i] / PAGE_SIZE; 618 if (dump_avail[i + 1] > last_pa) 619 last_pa = dump_avail[i + 1]; 620 } 621 vm_page_dump_size = round_page(BITSET_SIZE(vm_page_dump_pages)); 622 new_end -= vm_page_dump_size; 623 vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end, 624 new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE); 625 bzero((void *)vm_page_dump, vm_page_dump_size); 626 #if MINIDUMP_STARTUP_PAGE_TRACKING 627 /* 628 * Include the UMA bootstrap pages, witness pages and vm_page_dump 629 * in a crash dump. When pmap_map() uses the direct map, they are 630 * not automatically included. 631 */ 632 for (pa = new_end; pa < end; pa += PAGE_SIZE) 633 dump_add_page(pa); 634 #endif 635 #else 636 (void)last_pa; 637 #endif 638 phys_avail[biggestone + 1] = new_end; 639 #ifdef __amd64__ 640 /* 641 * Request that the physical pages underlying the message buffer be 642 * included in a crash dump. Since the message buffer is accessed 643 * through the direct map, they are not automatically included. 644 */ 645 pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr); 646 last_pa = pa + round_page(msgbufsize); 647 while (pa < last_pa) { 648 dump_add_page(pa); 649 pa += PAGE_SIZE; 650 } 651 #endif 652 /* 653 * Compute the number of pages of memory that will be available for 654 * use, taking into account the overhead of a page structure per page. 655 * In other words, solve 656 * "available physical memory" - round_page(page_range * 657 * sizeof(struct vm_page)) = page_range * PAGE_SIZE 658 * for page_range. 659 */ 660 low_avail = phys_avail[0]; 661 high_avail = phys_avail[1]; 662 for (i = 0; i < vm_phys_nsegs; i++) { 663 if (vm_phys_segs[i].start < low_avail) 664 low_avail = vm_phys_segs[i].start; 665 if (vm_phys_segs[i].end > high_avail) 666 high_avail = vm_phys_segs[i].end; 667 } 668 /* Skip the first chunk. It is already accounted for. */ 669 for (i = 2; phys_avail[i + 1] != 0; i += 2) { 670 if (phys_avail[i] < low_avail) 671 low_avail = phys_avail[i]; 672 if (phys_avail[i + 1] > high_avail) 673 high_avail = phys_avail[i + 1]; 674 } 675 first_page = low_avail / PAGE_SIZE; 676 #ifdef VM_PHYSSEG_SPARSE 677 size = 0; 678 for (i = 0; i < vm_phys_nsegs; i++) 679 size += vm_phys_segs[i].end - vm_phys_segs[i].start; 680 for (i = 0; phys_avail[i + 1] != 0; i += 2) 681 size += phys_avail[i + 1] - phys_avail[i]; 682 #elif defined(VM_PHYSSEG_DENSE) 683 size = high_avail - low_avail; 684 #else 685 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." 686 #endif 687 688 #ifdef PMAP_HAS_PAGE_ARRAY 689 pmap_page_array_startup(size / PAGE_SIZE); 690 biggestone = vm_phys_avail_largest(); 691 end = new_end = phys_avail[biggestone + 1]; 692 #else 693 #ifdef VM_PHYSSEG_DENSE 694 /* 695 * In the VM_PHYSSEG_DENSE case, the number of pages can account for 696 * the overhead of a page structure per page only if vm_page_array is 697 * allocated from the last physical memory chunk. Otherwise, we must 698 * allocate page structures representing the physical memory 699 * underlying vm_page_array, even though they will not be used. 700 */ 701 if (new_end != high_avail) 702 page_range = size / PAGE_SIZE; 703 else 704 #endif 705 { 706 page_range = size / (PAGE_SIZE + sizeof(struct vm_page)); 707 708 /* 709 * If the partial bytes remaining are large enough for 710 * a page (PAGE_SIZE) without a corresponding 711 * 'struct vm_page', then new_end will contain an 712 * extra page after subtracting the length of the VM 713 * page array. Compensate by subtracting an extra 714 * page from new_end. 715 */ 716 if (size % (PAGE_SIZE + sizeof(struct vm_page)) >= PAGE_SIZE) { 717 if (new_end == high_avail) 718 high_avail -= PAGE_SIZE; 719 new_end -= PAGE_SIZE; 720 } 721 } 722 end = new_end; 723 new_end = vm_page_array_alloc(&vaddr, end, page_range); 724 #endif 725 726 #if VM_NRESERVLEVEL > 0 727 /* 728 * Allocate physical memory for the reservation management system's 729 * data structures, and map it. 730 */ 731 new_end = vm_reserv_startup(&vaddr, new_end); 732 #endif 733 #if MINIDUMP_PAGE_TRACKING && MINIDUMP_STARTUP_PAGE_TRACKING 734 /* 735 * Include vm_page_array and vm_reserv_array in a crash dump. 736 */ 737 for (pa = new_end; pa < end; pa += PAGE_SIZE) 738 dump_add_page(pa); 739 #endif 740 phys_avail[biggestone + 1] = new_end; 741 742 /* 743 * Add physical memory segments corresponding to the available 744 * physical pages. 745 */ 746 for (i = 0; phys_avail[i + 1] != 0; i += 2) 747 if (vm_phys_avail_size(i) != 0) 748 vm_phys_add_seg(phys_avail[i], phys_avail[i + 1]); 749 750 /* 751 * Initialize the physical memory allocator. 752 */ 753 vm_phys_init(); 754 755 #ifdef VM_FREEPOOL_LAZYINIT 756 lazyinit = 1; 757 TUNABLE_INT_FETCH("debug.vm.lazy_page_init", &lazyinit); 758 #endif 759 760 /* 761 * Initialize the page structures and add every available page to the 762 * physical memory allocator's free lists. 763 */ 764 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE) 765 for (ii = 0; ii < vm_page_array_size; ii++) { 766 m = &vm_page_array[ii]; 767 vm_page_init_page(m, (first_page + ii) << PAGE_SHIFT, 0, 768 VM_FREEPOOL_DEFAULT); 769 m->flags = PG_FICTITIOUS; 770 } 771 #endif 772 vm_cnt.v_page_count = 0; 773 for (segind = 0; segind < vm_phys_nsegs; segind++) { 774 seg = &vm_phys_segs[segind]; 775 776 /* 777 * If lazy vm_page initialization is not enabled, simply 778 * initialize all of the pages in the segment. Otherwise, we 779 * only initialize: 780 * 1. Pages not covered by phys_avail[], since they might be 781 * freed to the allocator at some future point, e.g., by 782 * kmem_bootstrap_free(). 783 * 2. The first page of each run of free pages handed to the 784 * vm_phys allocator, which in turn defers initialization 785 * of pages until they are needed. 786 * This avoids blocking the boot process for long periods, which 787 * may be relevant for VMs (which ought to boot as quickly as 788 * possible) and/or systems with large amounts of physical 789 * memory. 790 */ 791 #ifdef VM_FREEPOOL_LAZYINIT 792 if (lazyinit) { 793 startp = seg->start; 794 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 795 if (startp >= seg->end) 796 break; 797 798 if (phys_avail[i + 1] < startp) 799 continue; 800 if (phys_avail[i] <= startp) { 801 startp = phys_avail[i + 1]; 802 continue; 803 } 804 805 m = vm_phys_seg_paddr_to_vm_page(seg, startp); 806 for (endp = MIN(phys_avail[i], seg->end); 807 startp < endp; startp += PAGE_SIZE, m++) { 808 vm_page_init_page(m, startp, segind, 809 VM_FREEPOOL_DEFAULT); 810 } 811 } 812 } else 813 #endif 814 for (m = seg->first_page, pa = seg->start; 815 pa < seg->end; m++, pa += PAGE_SIZE) { 816 vm_page_init_page(m, pa, segind, 817 VM_FREEPOOL_DEFAULT); 818 } 819 820 /* 821 * Add the segment's pages that are covered by one of 822 * phys_avail's ranges to the free lists. 823 */ 824 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 825 if (seg->end <= phys_avail[i] || 826 seg->start >= phys_avail[i + 1]) 827 continue; 828 829 startp = MAX(seg->start, phys_avail[i]); 830 endp = MIN(seg->end, phys_avail[i + 1]); 831 pagecount = (u_long)atop(endp - startp); 832 if (pagecount == 0) 833 continue; 834 835 m = vm_phys_seg_paddr_to_vm_page(seg, startp); 836 #ifdef VM_FREEPOOL_LAZYINIT 837 if (lazyinit) { 838 vm_page_init_page(m, startp, segind, 839 VM_FREEPOOL_LAZYINIT); 840 } 841 #endif 842 vmd = VM_DOMAIN(seg->domain); 843 vm_domain_free_lock(vmd); 844 vm_phys_enqueue_contig(m, pagecount); 845 vm_domain_free_unlock(vmd); 846 vm_domain_freecnt_inc(vmd, pagecount); 847 vm_cnt.v_page_count += (u_int)pagecount; 848 vmd->vmd_page_count += (u_int)pagecount; 849 vmd->vmd_segs |= 1UL << segind; 850 } 851 } 852 853 /* 854 * Remove blacklisted pages from the physical memory allocator. 855 */ 856 TAILQ_INIT(&blacklist_head); 857 vm_page_blacklist_load(&list, &listend); 858 vm_page_blacklist_check(list, listend); 859 860 list = kern_getenv("vm.blacklist"); 861 vm_page_blacklist_check(list, NULL); 862 863 freeenv(list); 864 #if VM_NRESERVLEVEL > 0 865 /* 866 * Initialize the reservation management system. 867 */ 868 vm_reserv_init(); 869 #endif 870 871 return (vaddr); 872 } 873 874 void 875 vm_page_reference(vm_page_t m) 876 { 877 878 vm_page_aflag_set(m, PGA_REFERENCED); 879 } 880 881 /* 882 * vm_page_trybusy 883 * 884 * Helper routine for grab functions to trylock busy. 885 * 886 * Returns true on success and false on failure. 887 */ 888 static bool 889 vm_page_trybusy(vm_page_t m, int allocflags) 890 { 891 892 if ((allocflags & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)) != 0) 893 return (vm_page_trysbusy(m)); 894 else 895 return (vm_page_tryxbusy(m)); 896 } 897 898 /* 899 * vm_page_tryacquire 900 * 901 * Helper routine for grab functions to trylock busy and wire. 902 * 903 * Returns true on success and false on failure. 904 */ 905 static inline bool 906 vm_page_tryacquire(vm_page_t m, int allocflags) 907 { 908 bool locked; 909 910 locked = vm_page_trybusy(m, allocflags); 911 if (locked && (allocflags & VM_ALLOC_WIRED) != 0) 912 vm_page_wire(m); 913 return (locked); 914 } 915 916 /* 917 * vm_page_busy_acquire: 918 * 919 * Acquire the busy lock as described by VM_ALLOC_* flags. Will loop 920 * and drop the object lock if necessary. 921 */ 922 bool 923 vm_page_busy_acquire(vm_page_t m, int allocflags) 924 { 925 vm_object_t obj; 926 bool locked; 927 928 /* 929 * The page-specific object must be cached because page 930 * identity can change during the sleep, causing the 931 * re-lock of a different object. 932 * It is assumed that a reference to the object is already 933 * held by the callers. 934 */ 935 obj = atomic_load_ptr(&m->object); 936 for (;;) { 937 if (vm_page_tryacquire(m, allocflags)) 938 return (true); 939 if ((allocflags & VM_ALLOC_NOWAIT) != 0) 940 return (false); 941 if (obj != NULL) 942 locked = VM_OBJECT_WOWNED(obj); 943 else 944 locked = false; 945 MPASS(locked || vm_page_wired(m)); 946 if (_vm_page_busy_sleep(obj, m, m->pindex, "vmpba", allocflags, 947 locked) && locked) 948 VM_OBJECT_WLOCK(obj); 949 if ((allocflags & VM_ALLOC_WAITFAIL) != 0) 950 return (false); 951 KASSERT(m->object == obj || m->object == NULL, 952 ("vm_page_busy_acquire: page %p does not belong to %p", 953 m, obj)); 954 } 955 } 956 957 /* 958 * vm_page_busy_downgrade: 959 * 960 * Downgrade an exclusive busy page into a single shared busy page. 961 */ 962 void 963 vm_page_busy_downgrade(vm_page_t m) 964 { 965 u_int x; 966 967 vm_page_assert_xbusied(m); 968 969 x = vm_page_busy_fetch(m); 970 for (;;) { 971 if (atomic_fcmpset_rel_int(&m->busy_lock, 972 &x, VPB_SHARERS_WORD(1))) 973 break; 974 } 975 if ((x & VPB_BIT_WAITERS) != 0) 976 wakeup(m); 977 } 978 979 /* 980 * 981 * vm_page_busy_tryupgrade: 982 * 983 * Attempt to upgrade a single shared busy into an exclusive busy. 984 */ 985 int 986 vm_page_busy_tryupgrade(vm_page_t m) 987 { 988 u_int ce, x; 989 990 vm_page_assert_sbusied(m); 991 992 x = vm_page_busy_fetch(m); 993 ce = VPB_CURTHREAD_EXCLUSIVE; 994 for (;;) { 995 if (VPB_SHARERS(x) > 1) 996 return (0); 997 KASSERT((x & ~VPB_BIT_WAITERS) == VPB_SHARERS_WORD(1), 998 ("vm_page_busy_tryupgrade: invalid lock state")); 999 if (!atomic_fcmpset_acq_int(&m->busy_lock, &x, 1000 ce | (x & VPB_BIT_WAITERS))) 1001 continue; 1002 return (1); 1003 } 1004 } 1005 1006 /* 1007 * vm_page_sbusied: 1008 * 1009 * Return a positive value if the page is shared busied, 0 otherwise. 1010 */ 1011 int 1012 vm_page_sbusied(vm_page_t m) 1013 { 1014 u_int x; 1015 1016 x = vm_page_busy_fetch(m); 1017 return ((x & VPB_BIT_SHARED) != 0 && x != VPB_UNBUSIED); 1018 } 1019 1020 /* 1021 * vm_page_sunbusy: 1022 * 1023 * Shared unbusy a page. 1024 */ 1025 void 1026 vm_page_sunbusy(vm_page_t m) 1027 { 1028 u_int x; 1029 1030 vm_page_assert_sbusied(m); 1031 1032 x = vm_page_busy_fetch(m); 1033 for (;;) { 1034 KASSERT(x != VPB_FREED, 1035 ("vm_page_sunbusy: Unlocking freed page.")); 1036 if (VPB_SHARERS(x) > 1) { 1037 if (atomic_fcmpset_int(&m->busy_lock, &x, 1038 x - VPB_ONE_SHARER)) 1039 break; 1040 continue; 1041 } 1042 KASSERT((x & ~VPB_BIT_WAITERS) == VPB_SHARERS_WORD(1), 1043 ("vm_page_sunbusy: invalid lock state")); 1044 if (!atomic_fcmpset_rel_int(&m->busy_lock, &x, VPB_UNBUSIED)) 1045 continue; 1046 if ((x & VPB_BIT_WAITERS) == 0) 1047 break; 1048 wakeup(m); 1049 break; 1050 } 1051 } 1052 1053 /* 1054 * vm_page_busy_sleep: 1055 * 1056 * Sleep if the page is busy, using the page pointer as wchan. 1057 * This is used to implement the hard-path of the busying mechanism. 1058 * 1059 * If VM_ALLOC_IGN_SBUSY is specified in allocflags, the function 1060 * will not sleep if the page is shared-busy. 1061 * 1062 * The object lock must be held on entry. 1063 * 1064 * Returns true if it slept and dropped the object lock, or false 1065 * if there was no sleep and the lock is still held. 1066 */ 1067 bool 1068 vm_page_busy_sleep(vm_page_t m, const char *wmesg, int allocflags) 1069 { 1070 vm_object_t obj; 1071 1072 obj = m->object; 1073 VM_OBJECT_ASSERT_LOCKED(obj); 1074 1075 return (_vm_page_busy_sleep(obj, m, m->pindex, wmesg, allocflags, 1076 true)); 1077 } 1078 1079 /* 1080 * vm_page_busy_sleep_unlocked: 1081 * 1082 * Sleep if the page is busy, using the page pointer as wchan. 1083 * This is used to implement the hard-path of busying mechanism. 1084 * 1085 * If VM_ALLOC_IGN_SBUSY is specified in allocflags, the function 1086 * will not sleep if the page is shared-busy. 1087 * 1088 * The object lock must not be held on entry. The operation will 1089 * return if the page changes identity. 1090 */ 1091 void 1092 vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m, vm_pindex_t pindex, 1093 const char *wmesg, int allocflags) 1094 { 1095 VM_OBJECT_ASSERT_UNLOCKED(obj); 1096 1097 (void)_vm_page_busy_sleep(obj, m, pindex, wmesg, allocflags, false); 1098 } 1099 1100 /* 1101 * _vm_page_busy_sleep: 1102 * 1103 * Internal busy sleep function. Verifies the page identity and 1104 * lockstate against parameters. Returns true if it sleeps and 1105 * false otherwise. 1106 * 1107 * allocflags uses VM_ALLOC_* flags to specify the lock required. 1108 * 1109 * If locked is true the lock will be dropped for any true returns 1110 * and held for any false returns. 1111 */ 1112 static bool 1113 _vm_page_busy_sleep(vm_object_t obj, vm_page_t m, vm_pindex_t pindex, 1114 const char *wmesg, int allocflags, bool locked) 1115 { 1116 bool xsleep; 1117 u_int x; 1118 1119 /* 1120 * If the object is busy we must wait for that to drain to zero 1121 * before trying the page again. 1122 */ 1123 if (obj != NULL && vm_object_busied(obj)) { 1124 if (locked) 1125 VM_OBJECT_DROP(obj); 1126 vm_object_busy_wait(obj, wmesg); 1127 return (true); 1128 } 1129 1130 if (!vm_page_busied(m)) 1131 return (false); 1132 1133 xsleep = (allocflags & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)) != 0; 1134 sleepq_lock(m); 1135 x = vm_page_busy_fetch(m); 1136 do { 1137 /* 1138 * If the page changes objects or becomes unlocked we can 1139 * simply return. 1140 */ 1141 if (x == VPB_UNBUSIED || 1142 (xsleep && (x & VPB_BIT_SHARED) != 0) || 1143 m->object != obj || m->pindex != pindex) { 1144 sleepq_release(m); 1145 return (false); 1146 } 1147 if ((x & VPB_BIT_WAITERS) != 0) 1148 break; 1149 } while (!atomic_fcmpset_int(&m->busy_lock, &x, x | VPB_BIT_WAITERS)); 1150 if (locked) 1151 VM_OBJECT_DROP(obj); 1152 DROP_GIANT(); 1153 sleepq_add(m, NULL, wmesg, 0, 0); 1154 sleepq_wait(m, PVM); 1155 PICKUP_GIANT(); 1156 return (true); 1157 } 1158 1159 /* 1160 * vm_page_trysbusy: 1161 * 1162 * Try to shared busy a page. 1163 * If the operation succeeds 1 is returned otherwise 0. 1164 * The operation never sleeps. 1165 */ 1166 int 1167 vm_page_trysbusy(vm_page_t m) 1168 { 1169 vm_object_t obj; 1170 u_int x; 1171 1172 obj = m->object; 1173 x = vm_page_busy_fetch(m); 1174 for (;;) { 1175 if ((x & VPB_BIT_SHARED) == 0) 1176 return (0); 1177 /* 1178 * Reduce the window for transient busies that will trigger 1179 * false negatives in vm_page_ps_test(). 1180 */ 1181 if (obj != NULL && vm_object_busied(obj)) 1182 return (0); 1183 if (atomic_fcmpset_acq_int(&m->busy_lock, &x, 1184 x + VPB_ONE_SHARER)) 1185 break; 1186 } 1187 1188 /* Refetch the object now that we're guaranteed that it is stable. */ 1189 obj = m->object; 1190 if (obj != NULL && vm_object_busied(obj)) { 1191 vm_page_sunbusy(m); 1192 return (0); 1193 } 1194 return (1); 1195 } 1196 1197 /* 1198 * vm_page_tryxbusy: 1199 * 1200 * Try to exclusive busy a page. 1201 * If the operation succeeds 1 is returned otherwise 0. 1202 * The operation never sleeps. 1203 */ 1204 int 1205 vm_page_tryxbusy(vm_page_t m) 1206 { 1207 vm_object_t obj; 1208 1209 if (atomic_cmpset_acq_int(&m->busy_lock, VPB_UNBUSIED, 1210 VPB_CURTHREAD_EXCLUSIVE) == 0) 1211 return (0); 1212 1213 obj = m->object; 1214 if (obj != NULL && vm_object_busied(obj)) { 1215 vm_page_xunbusy(m); 1216 return (0); 1217 } 1218 return (1); 1219 } 1220 1221 static void 1222 vm_page_xunbusy_hard_tail(vm_page_t m) 1223 { 1224 atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED); 1225 /* Wake the waiter. */ 1226 wakeup(m); 1227 } 1228 1229 /* 1230 * vm_page_xunbusy_hard: 1231 * 1232 * Called when unbusy has failed because there is a waiter. 1233 */ 1234 void 1235 vm_page_xunbusy_hard(vm_page_t m) 1236 { 1237 vm_page_assert_xbusied(m); 1238 vm_page_xunbusy_hard_tail(m); 1239 } 1240 1241 void 1242 vm_page_xunbusy_hard_unchecked(vm_page_t m) 1243 { 1244 vm_page_assert_xbusied_unchecked(m); 1245 vm_page_xunbusy_hard_tail(m); 1246 } 1247 1248 static void 1249 vm_page_busy_free(vm_page_t m) 1250 { 1251 u_int x; 1252 1253 atomic_thread_fence_rel(); 1254 x = atomic_swap_int(&m->busy_lock, VPB_FREED); 1255 if ((x & VPB_BIT_WAITERS) != 0) 1256 wakeup(m); 1257 } 1258 1259 /* 1260 * vm_page_unhold_pages: 1261 * 1262 * Unhold each of the pages that is referenced by the given array. 1263 */ 1264 void 1265 vm_page_unhold_pages(vm_page_t *ma, int count) 1266 { 1267 1268 for (; count != 0; count--) { 1269 vm_page_unwire(*ma, PQ_ACTIVE); 1270 ma++; 1271 } 1272 } 1273 1274 vm_page_t 1275 PHYS_TO_VM_PAGE(vm_paddr_t pa) 1276 { 1277 vm_page_t m; 1278 1279 #ifdef VM_PHYSSEG_SPARSE 1280 m = vm_phys_paddr_to_vm_page(pa); 1281 if (m == NULL) 1282 m = vm_phys_fictitious_to_vm_page(pa); 1283 return (m); 1284 #elif defined(VM_PHYSSEG_DENSE) 1285 long pi; 1286 1287 pi = atop(pa); 1288 if (pi >= first_page && (pi - first_page) < vm_page_array_size) { 1289 m = &vm_page_array[pi - first_page]; 1290 return (m); 1291 } 1292 return (vm_phys_fictitious_to_vm_page(pa)); 1293 #else 1294 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." 1295 #endif 1296 } 1297 1298 /* 1299 * vm_page_getfake: 1300 * 1301 * Create a fictitious page with the specified physical address and 1302 * memory attribute. The memory attribute is the only the machine- 1303 * dependent aspect of a fictitious page that must be initialized. 1304 */ 1305 vm_page_t 1306 vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr) 1307 { 1308 vm_page_t m; 1309 1310 m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO); 1311 vm_page_initfake(m, paddr, memattr); 1312 return (m); 1313 } 1314 1315 void 1316 vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) 1317 { 1318 1319 if ((m->flags & PG_FICTITIOUS) != 0) { 1320 /* 1321 * The page's memattr might have changed since the 1322 * previous initialization. Update the pmap to the 1323 * new memattr. 1324 */ 1325 goto memattr; 1326 } 1327 m->phys_addr = paddr; 1328 m->a.queue = PQ_NONE; 1329 /* Fictitious pages don't use "segind". */ 1330 m->flags = PG_FICTITIOUS; 1331 /* Fictitious pages don't use "order" or "pool". */ 1332 m->oflags = VPO_UNMANAGED; 1333 m->busy_lock = VPB_CURTHREAD_EXCLUSIVE; 1334 /* Fictitious pages are unevictable. */ 1335 m->ref_count = 1; 1336 pmap_page_init(m); 1337 memattr: 1338 pmap_page_set_memattr(m, memattr); 1339 } 1340 1341 /* 1342 * vm_page_putfake: 1343 * 1344 * Release a fictitious page. 1345 */ 1346 void 1347 vm_page_putfake(vm_page_t m) 1348 { 1349 1350 KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m)); 1351 KASSERT((m->flags & PG_FICTITIOUS) != 0, 1352 ("vm_page_putfake: bad page %p", m)); 1353 vm_page_assert_xbusied(m); 1354 vm_page_busy_free(m); 1355 uma_zfree(fakepg_zone, m); 1356 } 1357 1358 /* 1359 * vm_page_updatefake: 1360 * 1361 * Update the given fictitious page to the specified physical address and 1362 * memory attribute. 1363 */ 1364 void 1365 vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) 1366 { 1367 1368 KASSERT((m->flags & PG_FICTITIOUS) != 0, 1369 ("vm_page_updatefake: bad page %p", m)); 1370 m->phys_addr = paddr; 1371 pmap_page_set_memattr(m, memattr); 1372 } 1373 1374 /* 1375 * vm_page_free: 1376 * 1377 * Free a page. 1378 */ 1379 void 1380 vm_page_free(vm_page_t m) 1381 { 1382 1383 m->flags &= ~PG_ZERO; 1384 vm_page_free_toq(m); 1385 } 1386 1387 /* 1388 * vm_page_free_zero: 1389 * 1390 * Free a page to the zerod-pages queue 1391 */ 1392 void 1393 vm_page_free_zero(vm_page_t m) 1394 { 1395 1396 m->flags |= PG_ZERO; 1397 vm_page_free_toq(m); 1398 } 1399 1400 /* 1401 * Unbusy and handle the page queueing for a page from a getpages request that 1402 * was optionally read ahead or behind. 1403 */ 1404 void 1405 vm_page_readahead_finish(vm_page_t m) 1406 { 1407 1408 /* We shouldn't put invalid pages on queues. */ 1409 KASSERT(!vm_page_none_valid(m), ("%s: %p is invalid", __func__, m)); 1410 1411 /* 1412 * Since the page is not the actually needed one, whether it should 1413 * be activated or deactivated is not obvious. Empirical results 1414 * have shown that deactivating the page is usually the best choice, 1415 * unless the page is wanted by another thread. 1416 */ 1417 if ((vm_page_busy_fetch(m) & VPB_BIT_WAITERS) != 0) 1418 vm_page_activate(m); 1419 else 1420 vm_page_deactivate(m); 1421 vm_page_xunbusy_unchecked(m); 1422 } 1423 1424 /* 1425 * Destroy the identity of an invalid page and free it if possible. 1426 * This is intended to be used when reading a page from backing store fails. 1427 */ 1428 void 1429 vm_page_free_invalid(vm_page_t m) 1430 { 1431 1432 KASSERT(vm_page_none_valid(m), ("page %p is valid", m)); 1433 KASSERT(!pmap_page_is_mapped(m), ("page %p is mapped", m)); 1434 KASSERT(m->object != NULL, ("page %p has no object", m)); 1435 VM_OBJECT_ASSERT_WLOCKED(m->object); 1436 1437 /* 1438 * We may be attempting to free the page as part of the handling for an 1439 * I/O error, in which case the page was xbusied by a different thread. 1440 */ 1441 vm_page_xbusy_claim(m); 1442 1443 /* 1444 * If someone has wired this page while the object lock 1445 * was not held, then the thread that unwires is responsible 1446 * for freeing the page. Otherwise just free the page now. 1447 * The wire count of this unmapped page cannot change while 1448 * we have the page xbusy and the page's object wlocked. 1449 */ 1450 if (vm_page_remove(m)) 1451 vm_page_free(m); 1452 } 1453 1454 /* 1455 * vm_page_dirty_KBI: [ internal use only ] 1456 * 1457 * Set all bits in the page's dirty field. 1458 * 1459 * The object containing the specified page must be locked if the 1460 * call is made from the machine-independent layer. 1461 * 1462 * See vm_page_clear_dirty_mask(). 1463 * 1464 * This function should only be called by vm_page_dirty(). 1465 */ 1466 void 1467 vm_page_dirty_KBI(vm_page_t m) 1468 { 1469 1470 /* Refer to this operation by its public name. */ 1471 KASSERT(vm_page_all_valid(m), ("vm_page_dirty: page is invalid!")); 1472 m->dirty = VM_PAGE_BITS_ALL; 1473 } 1474 1475 /* 1476 * Insert the given page into the given object at the given pindex. mpred is 1477 * used for memq linkage. From vm_page_insert, lookup is true, mpred is 1478 * initially NULL, and this procedure looks it up. From vm_page_insert_after, 1479 * lookup is false and mpred is known to the caller to be valid, and may be 1480 * NULL if this will be the page with the lowest pindex. 1481 * 1482 * The procedure is marked __always_inline to suggest to the compiler to 1483 * eliminate the lookup parameter and the associated alternate branch. 1484 */ 1485 static __always_inline int 1486 vm_page_insert_lookup(vm_page_t m, vm_object_t object, vm_pindex_t pindex, 1487 vm_page_t mpred, bool lookup) 1488 { 1489 int error; 1490 1491 VM_OBJECT_ASSERT_WLOCKED(object); 1492 KASSERT(m->object == NULL, 1493 ("vm_page_insert: page %p already inserted", m)); 1494 1495 /* 1496 * Record the object/offset pair in this page. 1497 */ 1498 m->object = object; 1499 m->pindex = pindex; 1500 m->ref_count |= VPRC_OBJREF; 1501 1502 /* 1503 * Add this page to the object's radix tree, and look up mpred if 1504 * needed. 1505 */ 1506 if (lookup) 1507 error = vm_radix_insert_lookup_lt(&object->rtree, m, &mpred); 1508 else 1509 error = vm_radix_insert(&object->rtree, m); 1510 if (__predict_false(error != 0)) { 1511 m->object = NULL; 1512 m->pindex = 0; 1513 m->ref_count &= ~VPRC_OBJREF; 1514 return (1); 1515 } 1516 1517 /* 1518 * Now link into the object's ordered list of backed pages. 1519 */ 1520 vm_page_insert_radixdone(m, object, mpred); 1521 vm_pager_page_inserted(object, m); 1522 return (0); 1523 } 1524 1525 /* 1526 * vm_page_insert: [ internal use only ] 1527 * 1528 * Inserts the given mem entry into the object and object list. 1529 * 1530 * The object must be locked. 1531 */ 1532 int 1533 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) 1534 { 1535 return (vm_page_insert_lookup(m, object, pindex, NULL, true)); 1536 } 1537 1538 /* 1539 * vm_page_insert_after: 1540 * 1541 * Inserts the page "m" into the specified object at offset "pindex". 1542 * 1543 * The page "mpred" must immediately precede the offset "pindex" within 1544 * the specified object. 1545 * 1546 * The object must be locked. 1547 */ 1548 static int 1549 vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex, 1550 vm_page_t mpred) 1551 { 1552 return (vm_page_insert_lookup(m, object, pindex, mpred, false)); 1553 } 1554 1555 /* 1556 * vm_page_insert_radixdone: 1557 * 1558 * Complete page "m" insertion into the specified object after the 1559 * radix trie hooking. 1560 * 1561 * The page "mpred" must precede the offset "m->pindex" within the 1562 * specified object. 1563 * 1564 * The object must be locked. 1565 */ 1566 static void 1567 vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred) 1568 { 1569 1570 VM_OBJECT_ASSERT_WLOCKED(object); 1571 KASSERT(object != NULL && m->object == object, 1572 ("vm_page_insert_radixdone: page %p has inconsistent object", m)); 1573 KASSERT((m->ref_count & VPRC_OBJREF) != 0, 1574 ("vm_page_insert_radixdone: page %p is missing object ref", m)); 1575 if (mpred != NULL) { 1576 KASSERT(mpred->object == object, 1577 ("vm_page_insert_radixdone: object doesn't contain mpred")); 1578 KASSERT(mpred->pindex < m->pindex, 1579 ("vm_page_insert_radixdone: mpred doesn't precede pindex")); 1580 KASSERT(TAILQ_NEXT(mpred, listq) == NULL || 1581 m->pindex < TAILQ_NEXT(mpred, listq)->pindex, 1582 ("vm_page_insert_radixdone: pindex doesn't precede msucc")); 1583 } else { 1584 KASSERT(TAILQ_EMPTY(&object->memq) || 1585 m->pindex < TAILQ_FIRST(&object->memq)->pindex, 1586 ("vm_page_insert_radixdone: no mpred but not first page")); 1587 } 1588 1589 if (mpred != NULL) 1590 TAILQ_INSERT_AFTER(&object->memq, mpred, m, listq); 1591 else 1592 TAILQ_INSERT_HEAD(&object->memq, m, listq); 1593 1594 /* 1595 * Show that the object has one more resident page. 1596 */ 1597 object->resident_page_count++; 1598 1599 /* 1600 * Hold the vnode until the last page is released. 1601 */ 1602 if (object->resident_page_count == 1 && object->type == OBJT_VNODE) 1603 vhold(object->handle); 1604 1605 /* 1606 * Since we are inserting a new and possibly dirty page, 1607 * update the object's generation count. 1608 */ 1609 if (pmap_page_is_write_mapped(m)) 1610 vm_object_set_writeable_dirty(object); 1611 } 1612 1613 /* 1614 * Do the work to remove a page from its object. The caller is responsible for 1615 * updating the page's fields to reflect this removal. 1616 */ 1617 static void 1618 vm_page_object_remove(vm_page_t m) 1619 { 1620 vm_object_t object; 1621 vm_page_t mrem __diagused; 1622 1623 vm_page_assert_xbusied(m); 1624 object = m->object; 1625 VM_OBJECT_ASSERT_WLOCKED(object); 1626 KASSERT((m->ref_count & VPRC_OBJREF) != 0, 1627 ("page %p is missing its object ref", m)); 1628 1629 /* Deferred free of swap space. */ 1630 if ((m->a.flags & PGA_SWAP_FREE) != 0) 1631 vm_pager_page_unswapped(m); 1632 1633 vm_pager_page_removed(object, m); 1634 1635 m->object = NULL; 1636 mrem = vm_radix_remove(&object->rtree, m->pindex); 1637 KASSERT(mrem == m, ("removed page %p, expected page %p", mrem, m)); 1638 1639 /* 1640 * Now remove from the object's list of backed pages. 1641 */ 1642 TAILQ_REMOVE(&object->memq, m, listq); 1643 1644 /* 1645 * And show that the object has one fewer resident page. 1646 */ 1647 object->resident_page_count--; 1648 1649 /* 1650 * The vnode may now be recycled. 1651 */ 1652 if (object->resident_page_count == 0 && object->type == OBJT_VNODE) 1653 vdrop(object->handle); 1654 } 1655 1656 /* 1657 * vm_page_remove: 1658 * 1659 * Removes the specified page from its containing object, but does not 1660 * invalidate any backing storage. Returns true if the object's reference 1661 * was the last reference to the page, and false otherwise. 1662 * 1663 * The object must be locked and the page must be exclusively busied. 1664 * The exclusive busy will be released on return. If this is not the 1665 * final ref and the caller does not hold a wire reference it may not 1666 * continue to access the page. 1667 */ 1668 bool 1669 vm_page_remove(vm_page_t m) 1670 { 1671 bool dropped; 1672 1673 dropped = vm_page_remove_xbusy(m); 1674 vm_page_xunbusy(m); 1675 1676 return (dropped); 1677 } 1678 1679 /* 1680 * vm_page_remove_xbusy 1681 * 1682 * Removes the page but leaves the xbusy held. Returns true if this 1683 * removed the final ref and false otherwise. 1684 */ 1685 bool 1686 vm_page_remove_xbusy(vm_page_t m) 1687 { 1688 1689 vm_page_object_remove(m); 1690 return (vm_page_drop(m, VPRC_OBJREF) == VPRC_OBJREF); 1691 } 1692 1693 /* 1694 * vm_page_lookup: 1695 * 1696 * Returns the page associated with the object/offset 1697 * pair specified; if none is found, NULL is returned. 1698 * 1699 * The object must be locked. 1700 */ 1701 vm_page_t 1702 vm_page_lookup(vm_object_t object, vm_pindex_t pindex) 1703 { 1704 1705 VM_OBJECT_ASSERT_LOCKED(object); 1706 return (vm_radix_lookup(&object->rtree, pindex)); 1707 } 1708 1709 /* 1710 * vm_page_iter_init: 1711 * 1712 * Initialize iterator for vm pages. 1713 */ 1714 void 1715 vm_page_iter_init(struct pctrie_iter *pages, vm_object_t object) 1716 { 1717 1718 VM_OBJECT_ASSERT_LOCKED(object); 1719 vm_radix_iter_init(pages, &object->rtree); 1720 } 1721 1722 /* 1723 * vm_page_iter_init: 1724 * 1725 * Initialize iterator for vm pages. 1726 */ 1727 void 1728 vm_page_iter_limit_init(struct pctrie_iter *pages, vm_object_t object, 1729 vm_pindex_t limit) 1730 { 1731 1732 VM_OBJECT_ASSERT_LOCKED(object); 1733 vm_radix_iter_limit_init(pages, &object->rtree, limit); 1734 } 1735 1736 /* 1737 * vm_page_iter_lookup: 1738 * 1739 * Returns the page associated with the object/offset pair specified, and 1740 * stores the path to its position; if none is found, NULL is returned. 1741 * 1742 * The iter pctrie must be locked. 1743 */ 1744 vm_page_t 1745 vm_page_iter_lookup(struct pctrie_iter *pages, vm_pindex_t pindex) 1746 { 1747 1748 return (vm_radix_iter_lookup(pages, pindex)); 1749 } 1750 1751 /* 1752 * vm_page_lookup_unlocked: 1753 * 1754 * Returns the page associated with the object/offset pair specified; 1755 * if none is found, NULL is returned. The page may be no longer be 1756 * present in the object at the time that this function returns. Only 1757 * useful for opportunistic checks such as inmem(). 1758 */ 1759 vm_page_t 1760 vm_page_lookup_unlocked(vm_object_t object, vm_pindex_t pindex) 1761 { 1762 1763 return (vm_radix_lookup_unlocked(&object->rtree, pindex)); 1764 } 1765 1766 /* 1767 * vm_page_relookup: 1768 * 1769 * Returns a page that must already have been busied by 1770 * the caller. Used for bogus page replacement. 1771 */ 1772 vm_page_t 1773 vm_page_relookup(vm_object_t object, vm_pindex_t pindex) 1774 { 1775 vm_page_t m; 1776 1777 m = vm_radix_lookup_unlocked(&object->rtree, pindex); 1778 KASSERT(m != NULL && (vm_page_busied(m) || vm_page_wired(m)) && 1779 m->object == object && m->pindex == pindex, 1780 ("vm_page_relookup: Invalid page %p", m)); 1781 return (m); 1782 } 1783 1784 /* 1785 * This should only be used by lockless functions for releasing transient 1786 * incorrect acquires. The page may have been freed after we acquired a 1787 * busy lock. In this case busy_lock == VPB_FREED and we have nothing 1788 * further to do. 1789 */ 1790 static void 1791 vm_page_busy_release(vm_page_t m) 1792 { 1793 u_int x; 1794 1795 x = vm_page_busy_fetch(m); 1796 for (;;) { 1797 if (x == VPB_FREED) 1798 break; 1799 if ((x & VPB_BIT_SHARED) != 0 && VPB_SHARERS(x) > 1) { 1800 if (atomic_fcmpset_int(&m->busy_lock, &x, 1801 x - VPB_ONE_SHARER)) 1802 break; 1803 continue; 1804 } 1805 KASSERT((x & VPB_BIT_SHARED) != 0 || 1806 (x & ~VPB_BIT_WAITERS) == VPB_CURTHREAD_EXCLUSIVE, 1807 ("vm_page_busy_release: %p xbusy not owned.", m)); 1808 if (!atomic_fcmpset_rel_int(&m->busy_lock, &x, VPB_UNBUSIED)) 1809 continue; 1810 if ((x & VPB_BIT_WAITERS) != 0) 1811 wakeup(m); 1812 break; 1813 } 1814 } 1815 1816 /* 1817 * vm_page_find_least: 1818 * 1819 * Returns the page associated with the object with least pindex 1820 * greater than or equal to the parameter pindex, or NULL. 1821 * 1822 * The object must be locked. 1823 */ 1824 vm_page_t 1825 vm_page_find_least(vm_object_t object, vm_pindex_t pindex) 1826 { 1827 vm_page_t m; 1828 1829 VM_OBJECT_ASSERT_LOCKED(object); 1830 if ((m = TAILQ_FIRST(&object->memq)) != NULL && m->pindex < pindex) 1831 m = vm_radix_lookup_ge(&object->rtree, pindex); 1832 return (m); 1833 } 1834 1835 /* 1836 * vm_page_iter_lookup_ge: 1837 * 1838 * Returns the page associated with the object with least pindex 1839 * greater than or equal to the parameter pindex, or NULL. Initializes the 1840 * iterator to point to that page. 1841 * 1842 * The iter pctrie must be locked. 1843 */ 1844 vm_page_t 1845 vm_page_iter_lookup_ge(struct pctrie_iter *pages, vm_pindex_t pindex) 1846 { 1847 1848 return (vm_radix_iter_lookup_ge(pages, pindex)); 1849 } 1850 1851 /* 1852 * Returns the given page's successor (by pindex) within the object if it is 1853 * resident; if none is found, NULL is returned. 1854 * 1855 * The object must be locked. 1856 */ 1857 vm_page_t 1858 vm_page_next(vm_page_t m) 1859 { 1860 vm_page_t next; 1861 1862 VM_OBJECT_ASSERT_LOCKED(m->object); 1863 if ((next = TAILQ_NEXT(m, listq)) != NULL) { 1864 MPASS(next->object == m->object); 1865 if (next->pindex != m->pindex + 1) 1866 next = NULL; 1867 } 1868 return (next); 1869 } 1870 1871 /* 1872 * Returns the given page's predecessor (by pindex) within the object if it is 1873 * resident; if none is found, NULL is returned. 1874 * 1875 * The object must be locked. 1876 */ 1877 vm_page_t 1878 vm_page_prev(vm_page_t m) 1879 { 1880 vm_page_t prev; 1881 1882 VM_OBJECT_ASSERT_LOCKED(m->object); 1883 if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL) { 1884 MPASS(prev->object == m->object); 1885 if (prev->pindex != m->pindex - 1) 1886 prev = NULL; 1887 } 1888 return (prev); 1889 } 1890 1891 /* 1892 * Uses the page mnew as a replacement for an existing page at index 1893 * pindex which must be already present in the object. 1894 * 1895 * Both pages must be exclusively busied on enter. The old page is 1896 * unbusied on exit. 1897 * 1898 * A return value of true means mold is now free. If this is not the 1899 * final ref and the caller does not hold a wire reference it may not 1900 * continue to access the page. 1901 */ 1902 static bool 1903 vm_page_replace_hold(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex, 1904 vm_page_t mold) 1905 { 1906 vm_page_t mret __diagused; 1907 bool dropped; 1908 1909 VM_OBJECT_ASSERT_WLOCKED(object); 1910 vm_page_assert_xbusied(mold); 1911 KASSERT(mnew->object == NULL && (mnew->ref_count & VPRC_OBJREF) == 0, 1912 ("vm_page_replace: page %p already in object", mnew)); 1913 1914 /* 1915 * This function mostly follows vm_page_insert() and 1916 * vm_page_remove() without the radix, object count and vnode 1917 * dance. Double check such functions for more comments. 1918 */ 1919 1920 mnew->object = object; 1921 mnew->pindex = pindex; 1922 atomic_set_int(&mnew->ref_count, VPRC_OBJREF); 1923 mret = vm_radix_replace(&object->rtree, mnew); 1924 KASSERT(mret == mold, 1925 ("invalid page replacement, mold=%p, mret=%p", mold, mret)); 1926 KASSERT((mold->oflags & VPO_UNMANAGED) == 1927 (mnew->oflags & VPO_UNMANAGED), 1928 ("vm_page_replace: mismatched VPO_UNMANAGED")); 1929 1930 /* Keep the resident page list in sorted order. */ 1931 TAILQ_INSERT_AFTER(&object->memq, mold, mnew, listq); 1932 TAILQ_REMOVE(&object->memq, mold, listq); 1933 mold->object = NULL; 1934 1935 /* 1936 * The object's resident_page_count does not change because we have 1937 * swapped one page for another, but the generation count should 1938 * change if the page is dirty. 1939 */ 1940 if (pmap_page_is_write_mapped(mnew)) 1941 vm_object_set_writeable_dirty(object); 1942 dropped = vm_page_drop(mold, VPRC_OBJREF) == VPRC_OBJREF; 1943 vm_page_xunbusy(mold); 1944 1945 return (dropped); 1946 } 1947 1948 void 1949 vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex, 1950 vm_page_t mold) 1951 { 1952 1953 vm_page_assert_xbusied(mnew); 1954 1955 if (vm_page_replace_hold(mnew, object, pindex, mold)) 1956 vm_page_free(mold); 1957 } 1958 1959 /* 1960 * vm_page_rename: 1961 * 1962 * Move the given memory entry from its 1963 * current object to the specified target object/offset. 1964 * 1965 * Note: swap associated with the page must be invalidated by the move. We 1966 * have to do this for several reasons: (1) we aren't freeing the 1967 * page, (2) we are dirtying the page, (3) the VM system is probably 1968 * moving the page from object A to B, and will then later move 1969 * the backing store from A to B and we can't have a conflict. 1970 * 1971 * Note: we *always* dirty the page. It is necessary both for the 1972 * fact that we moved it, and because we may be invalidating 1973 * swap. 1974 * 1975 * The objects must be locked. 1976 */ 1977 int 1978 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) 1979 { 1980 vm_page_t mpred; 1981 vm_pindex_t opidx; 1982 1983 VM_OBJECT_ASSERT_WLOCKED(new_object); 1984 1985 KASSERT(m->ref_count != 0, ("vm_page_rename: page %p has no refs", m)); 1986 1987 /* 1988 * Create a custom version of vm_page_insert() which does not depend 1989 * by m_prev and can cheat on the implementation aspects of the 1990 * function. 1991 */ 1992 opidx = m->pindex; 1993 m->pindex = new_pindex; 1994 if (vm_radix_insert_lookup_lt(&new_object->rtree, m, &mpred) != 0) { 1995 m->pindex = opidx; 1996 return (1); 1997 } 1998 1999 /* 2000 * The operation cannot fail anymore. The removal must happen before 2001 * the listq iterator is tainted. 2002 */ 2003 m->pindex = opidx; 2004 vm_page_object_remove(m); 2005 2006 /* Return back to the new pindex to complete vm_page_insert(). */ 2007 m->pindex = new_pindex; 2008 m->object = new_object; 2009 2010 vm_page_insert_radixdone(m, new_object, mpred); 2011 vm_page_dirty(m); 2012 vm_pager_page_inserted(new_object, m); 2013 return (0); 2014 } 2015 2016 /* 2017 * vm_page_alloc: 2018 * 2019 * Allocate and return a page that is associated with the specified 2020 * object and offset pair. By default, this page is exclusive busied. 2021 * 2022 * The caller must always specify an allocation class. 2023 * 2024 * allocation classes: 2025 * VM_ALLOC_NORMAL normal process request 2026 * VM_ALLOC_SYSTEM system *really* needs a page 2027 * VM_ALLOC_INTERRUPT interrupt time request 2028 * 2029 * optional allocation flags: 2030 * VM_ALLOC_COUNT(number) the number of additional pages that the caller 2031 * intends to allocate 2032 * VM_ALLOC_NOBUSY do not exclusive busy the page 2033 * VM_ALLOC_NODUMP do not include the page in a kernel core dump 2034 * VM_ALLOC_SBUSY shared busy the allocated page 2035 * VM_ALLOC_WIRED wire the allocated page 2036 * VM_ALLOC_ZERO prefer a zeroed page 2037 */ 2038 vm_page_t 2039 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) 2040 { 2041 2042 return (vm_page_alloc_after(object, pindex, req, 2043 vm_radix_lookup_le(&object->rtree, pindex))); 2044 } 2045 2046 vm_page_t 2047 vm_page_alloc_domain(vm_object_t object, vm_pindex_t pindex, int domain, 2048 int req) 2049 { 2050 2051 return (vm_page_alloc_domain_after(object, pindex, domain, req, 2052 vm_radix_lookup_le(&object->rtree, pindex))); 2053 } 2054 2055 /* 2056 * Allocate a page in the specified object with the given page index. To 2057 * optimize insertion of the page into the object, the caller must also specify 2058 * the resident page in the object with largest index smaller than the given 2059 * page index, or NULL if no such page exists. 2060 */ 2061 vm_page_t 2062 vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex, 2063 int req, vm_page_t mpred) 2064 { 2065 struct vm_domainset_iter di; 2066 vm_page_t m; 2067 int domain; 2068 2069 vm_domainset_iter_page_init(&di, object, pindex, &domain, &req); 2070 do { 2071 m = vm_page_alloc_domain_after(object, pindex, domain, req, 2072 mpred); 2073 if (m != NULL) 2074 break; 2075 } while (vm_domainset_iter_page(&di, object, &domain) == 0); 2076 2077 return (m); 2078 } 2079 2080 /* 2081 * Returns true if the number of free pages exceeds the minimum 2082 * for the request class and false otherwise. 2083 */ 2084 static int 2085 _vm_domain_allocate(struct vm_domain *vmd, int req_class, int npages) 2086 { 2087 u_int limit, old, new; 2088 2089 if (req_class == VM_ALLOC_INTERRUPT) 2090 limit = 0; 2091 else if (req_class == VM_ALLOC_SYSTEM) 2092 limit = vmd->vmd_interrupt_free_min; 2093 else 2094 limit = vmd->vmd_free_reserved; 2095 2096 /* 2097 * Attempt to reserve the pages. Fail if we're below the limit. 2098 */ 2099 limit += npages; 2100 old = atomic_load_int(&vmd->vmd_free_count); 2101 do { 2102 if (old < limit) 2103 return (0); 2104 new = old - npages; 2105 } while (atomic_fcmpset_int(&vmd->vmd_free_count, &old, new) == 0); 2106 2107 /* Wake the page daemon if we've crossed the threshold. */ 2108 if (vm_paging_needed(vmd, new) && !vm_paging_needed(vmd, old)) 2109 pagedaemon_wakeup(vmd->vmd_domain); 2110 2111 /* Only update bitsets on transitions. */ 2112 if ((old >= vmd->vmd_free_min && new < vmd->vmd_free_min) || 2113 (old >= vmd->vmd_free_severe && new < vmd->vmd_free_severe)) 2114 vm_domain_set(vmd); 2115 2116 return (1); 2117 } 2118 2119 int 2120 vm_domain_allocate(struct vm_domain *vmd, int req, int npages) 2121 { 2122 int req_class; 2123 2124 /* 2125 * The page daemon is allowed to dig deeper into the free page list. 2126 */ 2127 req_class = req & VM_ALLOC_CLASS_MASK; 2128 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) 2129 req_class = VM_ALLOC_SYSTEM; 2130 return (_vm_domain_allocate(vmd, req_class, npages)); 2131 } 2132 2133 vm_page_t 2134 vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain, 2135 int req, vm_page_t mpred) 2136 { 2137 struct vm_domain *vmd; 2138 vm_page_t m; 2139 int flags; 2140 2141 #define VPA_FLAGS (VM_ALLOC_CLASS_MASK | VM_ALLOC_WAITFAIL | \ 2142 VM_ALLOC_NOWAIT | VM_ALLOC_NOBUSY | \ 2143 VM_ALLOC_SBUSY | VM_ALLOC_WIRED | \ 2144 VM_ALLOC_NODUMP | VM_ALLOC_ZERO | \ 2145 VM_ALLOC_NOFREE | VM_ALLOC_COUNT_MASK) 2146 KASSERT((req & ~VPA_FLAGS) == 0, 2147 ("invalid request %#x", req)); 2148 KASSERT(((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != 2149 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), 2150 ("invalid request %#x", req)); 2151 KASSERT(mpred == NULL || mpred->pindex < pindex, 2152 ("mpred %p doesn't precede pindex 0x%jx", mpred, 2153 (uintmax_t)pindex)); 2154 VM_OBJECT_ASSERT_WLOCKED(object); 2155 2156 flags = 0; 2157 m = NULL; 2158 if (!vm_pager_can_alloc_page(object, pindex)) 2159 return (NULL); 2160 again: 2161 if (__predict_false((req & VM_ALLOC_NOFREE) != 0)) { 2162 m = vm_page_alloc_nofree_domain(domain, req); 2163 if (m != NULL) 2164 goto found; 2165 } 2166 #if VM_NRESERVLEVEL > 0 2167 /* 2168 * Can we allocate the page from a reservation? 2169 */ 2170 if (vm_object_reserv(object) && 2171 (m = vm_reserv_alloc_page(object, pindex, domain, req, mpred)) != 2172 NULL) { 2173 goto found; 2174 } 2175 #endif 2176 vmd = VM_DOMAIN(domain); 2177 if (vmd->vmd_pgcache[VM_FREEPOOL_DEFAULT].zone != NULL) { 2178 m = uma_zalloc(vmd->vmd_pgcache[VM_FREEPOOL_DEFAULT].zone, 2179 M_NOWAIT | M_NOVM); 2180 if (m != NULL) { 2181 flags |= PG_PCPU_CACHE; 2182 goto found; 2183 } 2184 } 2185 if (vm_domain_allocate(vmd, req, 1)) { 2186 /* 2187 * If not, allocate it from the free page queues. 2188 */ 2189 vm_domain_free_lock(vmd); 2190 m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT, 0); 2191 vm_domain_free_unlock(vmd); 2192 if (m == NULL) { 2193 vm_domain_freecnt_inc(vmd, 1); 2194 #if VM_NRESERVLEVEL > 0 2195 if (vm_reserv_reclaim_inactive(domain)) 2196 goto again; 2197 #endif 2198 } 2199 } 2200 if (m == NULL) { 2201 /* 2202 * Not allocatable, give up. 2203 */ 2204 if (vm_domain_alloc_fail(vmd, object, req)) 2205 goto again; 2206 return (NULL); 2207 } 2208 2209 /* 2210 * At this point we had better have found a good page. 2211 */ 2212 found: 2213 vm_page_dequeue(m); 2214 vm_page_alloc_check(m); 2215 2216 /* 2217 * Initialize the page. Only the PG_ZERO flag is inherited. 2218 */ 2219 flags |= m->flags & PG_ZERO; 2220 if ((req & VM_ALLOC_NODUMP) != 0) 2221 flags |= PG_NODUMP; 2222 if ((req & VM_ALLOC_NOFREE) != 0) 2223 flags |= PG_NOFREE; 2224 m->flags = flags; 2225 m->a.flags = 0; 2226 m->oflags = (object->flags & OBJ_UNMANAGED) != 0 ? VPO_UNMANAGED : 0; 2227 if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0) 2228 m->busy_lock = VPB_CURTHREAD_EXCLUSIVE; 2229 else if ((req & VM_ALLOC_SBUSY) != 0) 2230 m->busy_lock = VPB_SHARERS_WORD(1); 2231 else 2232 m->busy_lock = VPB_UNBUSIED; 2233 if (req & VM_ALLOC_WIRED) { 2234 vm_wire_add(1); 2235 m->ref_count = 1; 2236 } 2237 m->a.act_count = 0; 2238 2239 if (vm_page_insert_after(m, object, pindex, mpred)) { 2240 if (req & VM_ALLOC_WIRED) { 2241 vm_wire_sub(1); 2242 m->ref_count = 0; 2243 } 2244 KASSERT(m->object == NULL, ("page %p has object", m)); 2245 m->oflags = VPO_UNMANAGED; 2246 m->busy_lock = VPB_UNBUSIED; 2247 /* Don't change PG_ZERO. */ 2248 vm_page_free_toq(m); 2249 if (req & VM_ALLOC_WAITFAIL) { 2250 VM_OBJECT_WUNLOCK(object); 2251 vm_radix_wait(); 2252 VM_OBJECT_WLOCK(object); 2253 } 2254 return (NULL); 2255 } 2256 2257 /* Ignore device objects; the pager sets "memattr" for them. */ 2258 if (object->memattr != VM_MEMATTR_DEFAULT && 2259 (object->flags & OBJ_FICTITIOUS) == 0) 2260 pmap_page_set_memattr(m, object->memattr); 2261 2262 return (m); 2263 } 2264 2265 /* 2266 * vm_page_alloc_contig: 2267 * 2268 * Allocate a contiguous set of physical pages of the given size "npages" 2269 * from the free lists. All of the physical pages must be at or above 2270 * the given physical address "low" and below the given physical address 2271 * "high". The given value "alignment" determines the alignment of the 2272 * first physical page in the set. If the given value "boundary" is 2273 * non-zero, then the set of physical pages cannot cross any physical 2274 * address boundary that is a multiple of that value. Both "alignment" 2275 * and "boundary" must be a power of two. 2276 * 2277 * If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT, 2278 * then the memory attribute setting for the physical pages is configured 2279 * to the object's memory attribute setting. Otherwise, the memory 2280 * attribute setting for the physical pages is configured to "memattr", 2281 * overriding the object's memory attribute setting. However, if the 2282 * object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the 2283 * memory attribute setting for the physical pages cannot be configured 2284 * to VM_MEMATTR_DEFAULT. 2285 * 2286 * The specified object may not contain fictitious pages. 2287 * 2288 * The caller must always specify an allocation class. 2289 * 2290 * allocation classes: 2291 * VM_ALLOC_NORMAL normal process request 2292 * VM_ALLOC_SYSTEM system *really* needs a page 2293 * VM_ALLOC_INTERRUPT interrupt time request 2294 * 2295 * optional allocation flags: 2296 * VM_ALLOC_NOBUSY do not exclusive busy the page 2297 * VM_ALLOC_NODUMP do not include the page in a kernel core dump 2298 * VM_ALLOC_SBUSY shared busy the allocated page 2299 * VM_ALLOC_WIRED wire the allocated page 2300 * VM_ALLOC_ZERO prefer a zeroed page 2301 */ 2302 vm_page_t 2303 vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req, 2304 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 2305 vm_paddr_t boundary, vm_memattr_t memattr) 2306 { 2307 struct vm_domainset_iter di; 2308 vm_page_t bounds[2]; 2309 vm_page_t m; 2310 int domain; 2311 int start_segind; 2312 2313 start_segind = -1; 2314 2315 vm_domainset_iter_page_init(&di, object, pindex, &domain, &req); 2316 do { 2317 m = vm_page_alloc_contig_domain(object, pindex, domain, req, 2318 npages, low, high, alignment, boundary, memattr); 2319 if (m != NULL) 2320 break; 2321 if (start_segind == -1) 2322 start_segind = vm_phys_lookup_segind(low); 2323 if (vm_phys_find_range(bounds, start_segind, domain, 2324 npages, low, high) == -1) { 2325 vm_domainset_iter_ignore(&di, domain); 2326 } 2327 } while (vm_domainset_iter_page(&di, object, &domain) == 0); 2328 2329 return (m); 2330 } 2331 2332 static vm_page_t 2333 vm_page_find_contig_domain(int domain, int req, u_long npages, vm_paddr_t low, 2334 vm_paddr_t high, u_long alignment, vm_paddr_t boundary) 2335 { 2336 struct vm_domain *vmd; 2337 vm_page_t m_ret; 2338 2339 /* 2340 * Can we allocate the pages without the number of free pages falling 2341 * below the lower bound for the allocation class? 2342 */ 2343 vmd = VM_DOMAIN(domain); 2344 if (!vm_domain_allocate(vmd, req, npages)) 2345 return (NULL); 2346 /* 2347 * Try to allocate the pages from the free page queues. 2348 */ 2349 vm_domain_free_lock(vmd); 2350 m_ret = vm_phys_alloc_contig(domain, npages, low, high, 2351 alignment, boundary); 2352 vm_domain_free_unlock(vmd); 2353 if (m_ret != NULL) 2354 return (m_ret); 2355 #if VM_NRESERVLEVEL > 0 2356 /* 2357 * Try to break a reservation to allocate the pages. 2358 */ 2359 if ((req & VM_ALLOC_NORECLAIM) == 0) { 2360 m_ret = vm_reserv_reclaim_contig(domain, npages, low, 2361 high, alignment, boundary); 2362 if (m_ret != NULL) 2363 return (m_ret); 2364 } 2365 #endif 2366 vm_domain_freecnt_inc(vmd, npages); 2367 return (NULL); 2368 } 2369 2370 vm_page_t 2371 vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain, 2372 int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 2373 vm_paddr_t boundary, vm_memattr_t memattr) 2374 { 2375 vm_page_t m, m_ret, mpred; 2376 u_int busy_lock, flags, oflags; 2377 2378 #define VPAC_FLAGS (VPA_FLAGS | VM_ALLOC_NORECLAIM) 2379 KASSERT((req & ~VPAC_FLAGS) == 0, 2380 ("invalid request %#x", req)); 2381 KASSERT(((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != 2382 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), 2383 ("invalid request %#x", req)); 2384 KASSERT((req & (VM_ALLOC_WAITOK | VM_ALLOC_NORECLAIM)) != 2385 (VM_ALLOC_WAITOK | VM_ALLOC_NORECLAIM), 2386 ("invalid request %#x", req)); 2387 VM_OBJECT_ASSERT_WLOCKED(object); 2388 KASSERT((object->flags & OBJ_FICTITIOUS) == 0, 2389 ("vm_page_alloc_contig: object %p has fictitious pages", 2390 object)); 2391 KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero")); 2392 2393 mpred = vm_radix_lookup_le(&object->rtree, pindex); 2394 KASSERT(mpred == NULL || mpred->pindex != pindex, 2395 ("vm_page_alloc_contig: pindex already allocated")); 2396 for (;;) { 2397 #if VM_NRESERVLEVEL > 0 2398 /* 2399 * Can we allocate the pages from a reservation? 2400 */ 2401 if (vm_object_reserv(object) && 2402 (m_ret = vm_reserv_alloc_contig(object, pindex, domain, req, 2403 mpred, npages, low, high, alignment, boundary)) != NULL) { 2404 break; 2405 } 2406 #endif 2407 if ((m_ret = vm_page_find_contig_domain(domain, req, npages, 2408 low, high, alignment, boundary)) != NULL) 2409 break; 2410 if (!vm_domain_alloc_fail(VM_DOMAIN(domain), object, req)) 2411 return (NULL); 2412 } 2413 2414 /* 2415 * Initialize the pages. Only the PG_ZERO flag is inherited. 2416 */ 2417 flags = PG_ZERO; 2418 if ((req & VM_ALLOC_NODUMP) != 0) 2419 flags |= PG_NODUMP; 2420 oflags = (object->flags & OBJ_UNMANAGED) != 0 ? VPO_UNMANAGED : 0; 2421 if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0) 2422 busy_lock = VPB_CURTHREAD_EXCLUSIVE; 2423 else if ((req & VM_ALLOC_SBUSY) != 0) 2424 busy_lock = VPB_SHARERS_WORD(1); 2425 else 2426 busy_lock = VPB_UNBUSIED; 2427 if ((req & VM_ALLOC_WIRED) != 0) 2428 vm_wire_add(npages); 2429 if (object->memattr != VM_MEMATTR_DEFAULT && 2430 memattr == VM_MEMATTR_DEFAULT) 2431 memattr = object->memattr; 2432 for (m = m_ret; m < &m_ret[npages]; m++) { 2433 vm_page_dequeue(m); 2434 vm_page_alloc_check(m); 2435 m->a.flags = 0; 2436 m->flags = (m->flags | PG_NODUMP) & flags; 2437 m->busy_lock = busy_lock; 2438 if ((req & VM_ALLOC_WIRED) != 0) 2439 m->ref_count = 1; 2440 m->a.act_count = 0; 2441 m->oflags = oflags; 2442 if (vm_page_insert_after(m, object, pindex, mpred)) { 2443 if ((req & VM_ALLOC_WIRED) != 0) 2444 vm_wire_sub(npages); 2445 KASSERT(m->object == NULL, 2446 ("page %p has object", m)); 2447 mpred = m; 2448 for (m = m_ret; m < &m_ret[npages]; m++) { 2449 if (m <= mpred && 2450 (req & VM_ALLOC_WIRED) != 0) 2451 m->ref_count = 0; 2452 m->oflags = VPO_UNMANAGED; 2453 m->busy_lock = VPB_UNBUSIED; 2454 /* Don't change PG_ZERO. */ 2455 vm_page_free_toq(m); 2456 } 2457 if (req & VM_ALLOC_WAITFAIL) { 2458 VM_OBJECT_WUNLOCK(object); 2459 vm_radix_wait(); 2460 VM_OBJECT_WLOCK(object); 2461 } 2462 return (NULL); 2463 } 2464 mpred = m; 2465 if (memattr != VM_MEMATTR_DEFAULT) 2466 pmap_page_set_memattr(m, memattr); 2467 pindex++; 2468 } 2469 return (m_ret); 2470 } 2471 2472 /* 2473 * Allocate a physical page that is not intended to be inserted into a VM 2474 * object. 2475 */ 2476 vm_page_t 2477 vm_page_alloc_noobj_domain(int domain, int req) 2478 { 2479 struct vm_domain *vmd; 2480 vm_page_t m; 2481 int flags; 2482 2483 #define VPAN_FLAGS (VM_ALLOC_CLASS_MASK | VM_ALLOC_WAITFAIL | \ 2484 VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | \ 2485 VM_ALLOC_NOBUSY | VM_ALLOC_WIRED | \ 2486 VM_ALLOC_NODUMP | VM_ALLOC_ZERO | \ 2487 VM_ALLOC_NOFREE | VM_ALLOC_COUNT_MASK) 2488 KASSERT((req & ~VPAN_FLAGS) == 0, 2489 ("invalid request %#x", req)); 2490 2491 flags = ((req & VM_ALLOC_NODUMP) != 0 ? PG_NODUMP : 0) | 2492 ((req & VM_ALLOC_NOFREE) != 0 ? PG_NOFREE : 0); 2493 vmd = VM_DOMAIN(domain); 2494 again: 2495 if (__predict_false((req & VM_ALLOC_NOFREE) != 0)) { 2496 m = vm_page_alloc_nofree_domain(domain, req); 2497 if (m != NULL) 2498 goto found; 2499 } 2500 2501 if (vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone != NULL) { 2502 m = uma_zalloc(vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone, 2503 M_NOWAIT | M_NOVM); 2504 if (m != NULL) { 2505 flags |= PG_PCPU_CACHE; 2506 goto found; 2507 } 2508 } 2509 2510 if (vm_domain_allocate(vmd, req, 1)) { 2511 vm_domain_free_lock(vmd); 2512 m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DIRECT, 0); 2513 vm_domain_free_unlock(vmd); 2514 if (m == NULL) { 2515 vm_domain_freecnt_inc(vmd, 1); 2516 #if VM_NRESERVLEVEL > 0 2517 if (vm_reserv_reclaim_inactive(domain)) 2518 goto again; 2519 #endif 2520 } 2521 } 2522 if (m == NULL) { 2523 if (vm_domain_alloc_fail(vmd, NULL, req)) 2524 goto again; 2525 return (NULL); 2526 } 2527 2528 found: 2529 vm_page_dequeue(m); 2530 vm_page_alloc_check(m); 2531 2532 /* 2533 * Consumers should not rely on a useful default pindex value. 2534 */ 2535 m->pindex = 0xdeadc0dedeadc0de; 2536 m->flags = (m->flags & PG_ZERO) | flags; 2537 m->a.flags = 0; 2538 m->oflags = VPO_UNMANAGED; 2539 m->busy_lock = VPB_UNBUSIED; 2540 if ((req & VM_ALLOC_WIRED) != 0) { 2541 vm_wire_add(1); 2542 m->ref_count = 1; 2543 } 2544 2545 if ((req & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0) 2546 pmap_zero_page(m); 2547 2548 return (m); 2549 } 2550 2551 #if VM_NRESERVLEVEL > 1 2552 #define VM_NOFREE_IMPORT_ORDER (VM_LEVEL_1_ORDER + VM_LEVEL_0_ORDER) 2553 #elif VM_NRESERVLEVEL > 0 2554 #define VM_NOFREE_IMPORT_ORDER VM_LEVEL_0_ORDER 2555 #else 2556 #define VM_NOFREE_IMPORT_ORDER 8 2557 #endif 2558 2559 /* 2560 * Allocate a single NOFREE page. 2561 * 2562 * This routine hands out NOFREE pages from higher-order 2563 * physical memory blocks in order to reduce memory fragmentation. 2564 * When a NOFREE for a given domain chunk is used up, 2565 * the routine will try to fetch a new one from the freelists 2566 * and discard the old one. 2567 */ 2568 static vm_page_t 2569 vm_page_alloc_nofree_domain(int domain, int req) 2570 { 2571 vm_page_t m; 2572 struct vm_domain *vmd; 2573 struct vm_nofreeq *nqp; 2574 2575 KASSERT((req & VM_ALLOC_NOFREE) != 0, ("invalid request %#x", req)); 2576 2577 vmd = VM_DOMAIN(domain); 2578 nqp = &vmd->vmd_nofreeq; 2579 vm_domain_free_lock(vmd); 2580 if (nqp->offs >= (1 << VM_NOFREE_IMPORT_ORDER) || nqp->ma == NULL) { 2581 if (!vm_domain_allocate(vmd, req, 2582 1 << VM_NOFREE_IMPORT_ORDER)) { 2583 vm_domain_free_unlock(vmd); 2584 return (NULL); 2585 } 2586 nqp->ma = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT, 2587 VM_NOFREE_IMPORT_ORDER); 2588 if (nqp->ma == NULL) { 2589 vm_domain_freecnt_inc(vmd, 1 << VM_NOFREE_IMPORT_ORDER); 2590 vm_domain_free_unlock(vmd); 2591 return (NULL); 2592 } 2593 nqp->offs = 0; 2594 } 2595 m = &nqp->ma[nqp->offs++]; 2596 vm_domain_free_unlock(vmd); 2597 VM_CNT_ADD(v_nofree_count, 1); 2598 2599 return (m); 2600 } 2601 2602 vm_page_t 2603 vm_page_alloc_noobj(int req) 2604 { 2605 struct vm_domainset_iter di; 2606 vm_page_t m; 2607 int domain; 2608 2609 vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req); 2610 do { 2611 m = vm_page_alloc_noobj_domain(domain, req); 2612 if (m != NULL) 2613 break; 2614 } while (vm_domainset_iter_page(&di, NULL, &domain) == 0); 2615 2616 return (m); 2617 } 2618 2619 vm_page_t 2620 vm_page_alloc_noobj_contig(int req, u_long npages, vm_paddr_t low, 2621 vm_paddr_t high, u_long alignment, vm_paddr_t boundary, 2622 vm_memattr_t memattr) 2623 { 2624 struct vm_domainset_iter di; 2625 vm_page_t m; 2626 int domain; 2627 2628 vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req); 2629 do { 2630 m = vm_page_alloc_noobj_contig_domain(domain, req, npages, low, 2631 high, alignment, boundary, memattr); 2632 if (m != NULL) 2633 break; 2634 } while (vm_domainset_iter_page(&di, NULL, &domain) == 0); 2635 2636 return (m); 2637 } 2638 2639 vm_page_t 2640 vm_page_alloc_noobj_contig_domain(int domain, int req, u_long npages, 2641 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, 2642 vm_memattr_t memattr) 2643 { 2644 vm_page_t m, m_ret; 2645 u_int flags; 2646 2647 #define VPANC_FLAGS (VPAN_FLAGS | VM_ALLOC_NORECLAIM) 2648 KASSERT((req & ~VPANC_FLAGS) == 0, 2649 ("invalid request %#x", req)); 2650 KASSERT((req & (VM_ALLOC_WAITOK | VM_ALLOC_NORECLAIM)) != 2651 (VM_ALLOC_WAITOK | VM_ALLOC_NORECLAIM), 2652 ("invalid request %#x", req)); 2653 KASSERT(((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != 2654 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), 2655 ("invalid request %#x", req)); 2656 KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero")); 2657 2658 while ((m_ret = vm_page_find_contig_domain(domain, req, npages, 2659 low, high, alignment, boundary)) == NULL) { 2660 if (!vm_domain_alloc_fail(VM_DOMAIN(domain), NULL, req)) 2661 return (NULL); 2662 } 2663 2664 /* 2665 * Initialize the pages. Only the PG_ZERO flag is inherited. 2666 */ 2667 flags = PG_ZERO; 2668 if ((req & VM_ALLOC_NODUMP) != 0) 2669 flags |= PG_NODUMP; 2670 if ((req & VM_ALLOC_WIRED) != 0) 2671 vm_wire_add(npages); 2672 for (m = m_ret; m < &m_ret[npages]; m++) { 2673 vm_page_dequeue(m); 2674 vm_page_alloc_check(m); 2675 2676 /* 2677 * Consumers should not rely on a useful default pindex value. 2678 */ 2679 m->pindex = 0xdeadc0dedeadc0de; 2680 m->a.flags = 0; 2681 m->flags = (m->flags | PG_NODUMP) & flags; 2682 m->busy_lock = VPB_UNBUSIED; 2683 if ((req & VM_ALLOC_WIRED) != 0) 2684 m->ref_count = 1; 2685 m->a.act_count = 0; 2686 m->oflags = VPO_UNMANAGED; 2687 2688 /* 2689 * Zero the page before updating any mappings since the page is 2690 * not yet shared with any devices which might require the 2691 * non-default memory attribute. pmap_page_set_memattr() 2692 * flushes data caches before returning. 2693 */ 2694 if ((req & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0) 2695 pmap_zero_page(m); 2696 if (memattr != VM_MEMATTR_DEFAULT) 2697 pmap_page_set_memattr(m, memattr); 2698 } 2699 return (m_ret); 2700 } 2701 2702 /* 2703 * Check a page that has been freshly dequeued from a freelist. 2704 */ 2705 static void 2706 vm_page_alloc_check(vm_page_t m) 2707 { 2708 2709 KASSERT(m->object == NULL, ("page %p has object", m)); 2710 KASSERT(m->a.queue == PQ_NONE && 2711 (m->a.flags & PGA_QUEUE_STATE_MASK) == 0, 2712 ("page %p has unexpected queue %d, flags %#x", 2713 m, m->a.queue, (m->a.flags & PGA_QUEUE_STATE_MASK))); 2714 KASSERT(m->ref_count == 0, ("page %p has references", m)); 2715 KASSERT(vm_page_busy_freed(m), ("page %p is not freed", m)); 2716 KASSERT(m->dirty == 0, ("page %p is dirty", m)); 2717 KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, 2718 ("page %p has unexpected memattr %d", 2719 m, pmap_page_get_memattr(m))); 2720 KASSERT(vm_page_none_valid(m), ("free page %p is valid", m)); 2721 pmap_vm_page_alloc_check(m); 2722 } 2723 2724 static int 2725 vm_page_zone_import(void *arg, void **store, int cnt, int domain, int flags) 2726 { 2727 struct vm_domain *vmd; 2728 struct vm_pgcache *pgcache; 2729 int i; 2730 2731 pgcache = arg; 2732 vmd = VM_DOMAIN(pgcache->domain); 2733 2734 /* 2735 * The page daemon should avoid creating extra memory pressure since its 2736 * main purpose is to replenish the store of free pages. 2737 */ 2738 if (vmd->vmd_severeset || curproc == pageproc || 2739 !_vm_domain_allocate(vmd, VM_ALLOC_NORMAL, cnt)) 2740 return (0); 2741 domain = vmd->vmd_domain; 2742 vm_domain_free_lock(vmd); 2743 i = vm_phys_alloc_npages(domain, pgcache->pool, cnt, 2744 (vm_page_t *)store); 2745 vm_domain_free_unlock(vmd); 2746 if (cnt != i) 2747 vm_domain_freecnt_inc(vmd, cnt - i); 2748 2749 return (i); 2750 } 2751 2752 static void 2753 vm_page_zone_release(void *arg, void **store, int cnt) 2754 { 2755 struct vm_domain *vmd; 2756 struct vm_pgcache *pgcache; 2757 vm_page_t m; 2758 int i; 2759 2760 pgcache = arg; 2761 vmd = VM_DOMAIN(pgcache->domain); 2762 vm_domain_free_lock(vmd); 2763 for (i = 0; i < cnt; i++) { 2764 m = (vm_page_t)store[i]; 2765 vm_phys_free_pages(m, 0); 2766 } 2767 vm_domain_free_unlock(vmd); 2768 vm_domain_freecnt_inc(vmd, cnt); 2769 } 2770 2771 #define VPSC_ANY 0 /* No restrictions. */ 2772 #define VPSC_NORESERV 1 /* Skip reservations; implies VPSC_NOSUPER. */ 2773 #define VPSC_NOSUPER 2 /* Skip superpages. */ 2774 2775 /* 2776 * vm_page_scan_contig: 2777 * 2778 * Scan vm_page_array[] between the specified entries "m_start" and 2779 * "m_end" for a run of contiguous physical pages that satisfy the 2780 * specified conditions, and return the lowest page in the run. The 2781 * specified "alignment" determines the alignment of the lowest physical 2782 * page in the run. If the specified "boundary" is non-zero, then the 2783 * run of physical pages cannot span a physical address that is a 2784 * multiple of "boundary". 2785 * 2786 * "m_end" is never dereferenced, so it need not point to a vm_page 2787 * structure within vm_page_array[]. 2788 * 2789 * "npages" must be greater than zero. "m_start" and "m_end" must not 2790 * span a hole (or discontiguity) in the physical address space. Both 2791 * "alignment" and "boundary" must be a power of two. 2792 */ 2793 static vm_page_t 2794 vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end, 2795 u_long alignment, vm_paddr_t boundary, int options) 2796 { 2797 vm_object_t object; 2798 vm_paddr_t pa; 2799 vm_page_t m, m_run; 2800 #if VM_NRESERVLEVEL > 0 2801 int level; 2802 #endif 2803 int m_inc, order, run_ext, run_len; 2804 2805 KASSERT(npages > 0, ("npages is 0")); 2806 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 2807 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 2808 m_run = NULL; 2809 run_len = 0; 2810 for (m = m_start; m < m_end && run_len < npages; m += m_inc) { 2811 KASSERT((m->flags & PG_MARKER) == 0, 2812 ("page %p is PG_MARKER", m)); 2813 KASSERT((m->flags & PG_FICTITIOUS) == 0 || m->ref_count >= 1, 2814 ("fictitious page %p has invalid ref count", m)); 2815 2816 /* 2817 * If the current page would be the start of a run, check its 2818 * physical address against the end, alignment, and boundary 2819 * conditions. If it doesn't satisfy these conditions, either 2820 * terminate the scan or advance to the next page that 2821 * satisfies the failed condition. 2822 */ 2823 if (run_len == 0) { 2824 KASSERT(m_run == NULL, ("m_run != NULL")); 2825 if (m + npages > m_end) 2826 break; 2827 pa = VM_PAGE_TO_PHYS(m); 2828 if (!vm_addr_align_ok(pa, alignment)) { 2829 m_inc = atop(roundup2(pa, alignment) - pa); 2830 continue; 2831 } 2832 if (!vm_addr_bound_ok(pa, ptoa(npages), boundary)) { 2833 m_inc = atop(roundup2(pa, boundary) - pa); 2834 continue; 2835 } 2836 } else 2837 KASSERT(m_run != NULL, ("m_run == NULL")); 2838 2839 retry: 2840 m_inc = 1; 2841 if (vm_page_wired(m)) 2842 run_ext = 0; 2843 #if VM_NRESERVLEVEL > 0 2844 else if ((level = vm_reserv_level(m)) >= 0 && 2845 (options & VPSC_NORESERV) != 0) { 2846 run_ext = 0; 2847 /* Advance to the end of the reservation. */ 2848 pa = VM_PAGE_TO_PHYS(m); 2849 m_inc = atop(roundup2(pa + 1, vm_reserv_size(level)) - 2850 pa); 2851 } 2852 #endif 2853 else if ((object = atomic_load_ptr(&m->object)) != NULL) { 2854 /* 2855 * The page is considered eligible for relocation if 2856 * and only if it could be laundered or reclaimed by 2857 * the page daemon. 2858 */ 2859 VM_OBJECT_RLOCK(object); 2860 if (object != m->object) { 2861 VM_OBJECT_RUNLOCK(object); 2862 goto retry; 2863 } 2864 /* Don't care: PG_NODUMP, PG_ZERO. */ 2865 if ((object->flags & OBJ_SWAP) == 0 && 2866 object->type != OBJT_VNODE) { 2867 run_ext = 0; 2868 #if VM_NRESERVLEVEL > 0 2869 } else if ((options & VPSC_NOSUPER) != 0 && 2870 (level = vm_reserv_level_iffullpop(m)) >= 0) { 2871 run_ext = 0; 2872 /* Advance to the end of the superpage. */ 2873 pa = VM_PAGE_TO_PHYS(m); 2874 m_inc = atop(roundup2(pa + 1, 2875 vm_reserv_size(level)) - pa); 2876 #endif 2877 } else if (object->memattr == VM_MEMATTR_DEFAULT && 2878 vm_page_queue(m) != PQ_NONE && !vm_page_busied(m)) { 2879 /* 2880 * The page is allocated but eligible for 2881 * relocation. Extend the current run by one 2882 * page. 2883 */ 2884 KASSERT(pmap_page_get_memattr(m) == 2885 VM_MEMATTR_DEFAULT, 2886 ("page %p has an unexpected memattr", m)); 2887 KASSERT((m->oflags & (VPO_SWAPINPROG | 2888 VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0, 2889 ("page %p has unexpected oflags", m)); 2890 /* Don't care: PGA_NOSYNC. */ 2891 run_ext = 1; 2892 } else 2893 run_ext = 0; 2894 VM_OBJECT_RUNLOCK(object); 2895 #if VM_NRESERVLEVEL > 0 2896 } else if (level >= 0) { 2897 /* 2898 * The page is reserved but not yet allocated. In 2899 * other words, it is still free. Extend the current 2900 * run by one page. 2901 */ 2902 run_ext = 1; 2903 #endif 2904 } else if ((order = m->order) < VM_NFREEORDER) { 2905 /* 2906 * The page is enqueued in the physical memory 2907 * allocator's free page queues. Moreover, it is the 2908 * first page in a power-of-two-sized run of 2909 * contiguous free pages. Add these pages to the end 2910 * of the current run, and jump ahead. 2911 */ 2912 run_ext = 1 << order; 2913 m_inc = 1 << order; 2914 } else { 2915 /* 2916 * Skip the page for one of the following reasons: (1) 2917 * It is enqueued in the physical memory allocator's 2918 * free page queues. However, it is not the first 2919 * page in a run of contiguous free pages. (This case 2920 * rarely occurs because the scan is performed in 2921 * ascending order.) (2) It is not reserved, and it is 2922 * transitioning from free to allocated. (Conversely, 2923 * the transition from allocated to free for managed 2924 * pages is blocked by the page busy lock.) (3) It is 2925 * allocated but not contained by an object and not 2926 * wired, e.g., allocated by Xen's balloon driver. 2927 */ 2928 run_ext = 0; 2929 } 2930 2931 /* 2932 * Extend or reset the current run of pages. 2933 */ 2934 if (run_ext > 0) { 2935 if (run_len == 0) 2936 m_run = m; 2937 run_len += run_ext; 2938 } else { 2939 if (run_len > 0) { 2940 m_run = NULL; 2941 run_len = 0; 2942 } 2943 } 2944 } 2945 if (run_len >= npages) 2946 return (m_run); 2947 return (NULL); 2948 } 2949 2950 /* 2951 * vm_page_reclaim_run: 2952 * 2953 * Try to relocate each of the allocated virtual pages within the 2954 * specified run of physical pages to a new physical address. Free the 2955 * physical pages underlying the relocated virtual pages. A virtual page 2956 * is relocatable if and only if it could be laundered or reclaimed by 2957 * the page daemon. Whenever possible, a virtual page is relocated to a 2958 * physical address above "high". 2959 * 2960 * Returns 0 if every physical page within the run was already free or 2961 * just freed by a successful relocation. Otherwise, returns a non-zero 2962 * value indicating why the last attempt to relocate a virtual page was 2963 * unsuccessful. 2964 * 2965 * "req_class" must be an allocation class. 2966 */ 2967 static int 2968 vm_page_reclaim_run(int req_class, int domain, u_long npages, vm_page_t m_run, 2969 vm_paddr_t high) 2970 { 2971 struct vm_domain *vmd; 2972 struct spglist free; 2973 vm_object_t object; 2974 vm_paddr_t pa; 2975 vm_page_t m, m_end, m_new; 2976 int error, order, req; 2977 2978 KASSERT((req_class & VM_ALLOC_CLASS_MASK) == req_class, 2979 ("req_class is not an allocation class")); 2980 SLIST_INIT(&free); 2981 error = 0; 2982 m = m_run; 2983 m_end = m_run + npages; 2984 for (; error == 0 && m < m_end; m++) { 2985 KASSERT((m->flags & (PG_FICTITIOUS | PG_MARKER)) == 0, 2986 ("page %p is PG_FICTITIOUS or PG_MARKER", m)); 2987 2988 /* 2989 * Racily check for wirings. Races are handled once the object 2990 * lock is held and the page is unmapped. 2991 */ 2992 if (vm_page_wired(m)) 2993 error = EBUSY; 2994 else if ((object = atomic_load_ptr(&m->object)) != NULL) { 2995 /* 2996 * The page is relocated if and only if it could be 2997 * laundered or reclaimed by the page daemon. 2998 */ 2999 VM_OBJECT_WLOCK(object); 3000 /* Don't care: PG_NODUMP, PG_ZERO. */ 3001 if (m->object != object || 3002 ((object->flags & OBJ_SWAP) == 0 && 3003 object->type != OBJT_VNODE)) 3004 error = EINVAL; 3005 else if (object->memattr != VM_MEMATTR_DEFAULT) 3006 error = EINVAL; 3007 else if (vm_page_queue(m) != PQ_NONE && 3008 vm_page_tryxbusy(m) != 0) { 3009 if (vm_page_wired(m)) { 3010 vm_page_xunbusy(m); 3011 error = EBUSY; 3012 goto unlock; 3013 } 3014 KASSERT(pmap_page_get_memattr(m) == 3015 VM_MEMATTR_DEFAULT, 3016 ("page %p has an unexpected memattr", m)); 3017 KASSERT(m->oflags == 0, 3018 ("page %p has unexpected oflags", m)); 3019 /* Don't care: PGA_NOSYNC. */ 3020 if (!vm_page_none_valid(m)) { 3021 /* 3022 * First, try to allocate a new page 3023 * that is above "high". Failing 3024 * that, try to allocate a new page 3025 * that is below "m_run". Allocate 3026 * the new page between the end of 3027 * "m_run" and "high" only as a last 3028 * resort. 3029 */ 3030 req = req_class; 3031 if ((m->flags & PG_NODUMP) != 0) 3032 req |= VM_ALLOC_NODUMP; 3033 if (trunc_page(high) != 3034 ~(vm_paddr_t)PAGE_MASK) { 3035 m_new = 3036 vm_page_alloc_noobj_contig( 3037 req, 1, round_page(high), 3038 ~(vm_paddr_t)0, PAGE_SIZE, 3039 0, VM_MEMATTR_DEFAULT); 3040 } else 3041 m_new = NULL; 3042 if (m_new == NULL) { 3043 pa = VM_PAGE_TO_PHYS(m_run); 3044 m_new = 3045 vm_page_alloc_noobj_contig( 3046 req, 1, 0, pa - 1, 3047 PAGE_SIZE, 0, 3048 VM_MEMATTR_DEFAULT); 3049 } 3050 if (m_new == NULL) { 3051 pa += ptoa(npages); 3052 m_new = 3053 vm_page_alloc_noobj_contig( 3054 req, 1, pa, high, PAGE_SIZE, 3055 0, VM_MEMATTR_DEFAULT); 3056 } 3057 if (m_new == NULL) { 3058 vm_page_xunbusy(m); 3059 error = ENOMEM; 3060 goto unlock; 3061 } 3062 3063 /* 3064 * Unmap the page and check for new 3065 * wirings that may have been acquired 3066 * through a pmap lookup. 3067 */ 3068 if (object->ref_count != 0 && 3069 !vm_page_try_remove_all(m)) { 3070 vm_page_xunbusy(m); 3071 vm_page_free(m_new); 3072 error = EBUSY; 3073 goto unlock; 3074 } 3075 3076 /* 3077 * Replace "m" with the new page. For 3078 * vm_page_replace(), "m" must be busy 3079 * and dequeued. Finally, change "m" 3080 * as if vm_page_free() was called. 3081 */ 3082 m_new->a.flags = m->a.flags & 3083 ~PGA_QUEUE_STATE_MASK; 3084 KASSERT(m_new->oflags == VPO_UNMANAGED, 3085 ("page %p is managed", m_new)); 3086 m_new->oflags = 0; 3087 pmap_copy_page(m, m_new); 3088 m_new->valid = m->valid; 3089 m_new->dirty = m->dirty; 3090 m->flags &= ~PG_ZERO; 3091 vm_page_dequeue(m); 3092 if (vm_page_replace_hold(m_new, object, 3093 m->pindex, m) && 3094 vm_page_free_prep(m)) 3095 SLIST_INSERT_HEAD(&free, m, 3096 plinks.s.ss); 3097 3098 /* 3099 * The new page must be deactivated 3100 * before the object is unlocked. 3101 */ 3102 vm_page_deactivate(m_new); 3103 } else { 3104 m->flags &= ~PG_ZERO; 3105 vm_page_dequeue(m); 3106 if (vm_page_free_prep(m)) 3107 SLIST_INSERT_HEAD(&free, m, 3108 plinks.s.ss); 3109 KASSERT(m->dirty == 0, 3110 ("page %p is dirty", m)); 3111 } 3112 } else 3113 error = EBUSY; 3114 unlock: 3115 VM_OBJECT_WUNLOCK(object); 3116 } else { 3117 MPASS(vm_page_domain(m) == domain); 3118 vmd = VM_DOMAIN(domain); 3119 vm_domain_free_lock(vmd); 3120 order = m->order; 3121 if (order < VM_NFREEORDER) { 3122 /* 3123 * The page is enqueued in the physical memory 3124 * allocator's free page queues. Moreover, it 3125 * is the first page in a power-of-two-sized 3126 * run of contiguous free pages. Jump ahead 3127 * to the last page within that run, and 3128 * continue from there. 3129 */ 3130 m += (1 << order) - 1; 3131 } 3132 #if VM_NRESERVLEVEL > 0 3133 else if (vm_reserv_is_page_free(m)) 3134 order = 0; 3135 #endif 3136 vm_domain_free_unlock(vmd); 3137 if (order == VM_NFREEORDER) 3138 error = EINVAL; 3139 } 3140 } 3141 if ((m = SLIST_FIRST(&free)) != NULL) { 3142 int cnt; 3143 3144 vmd = VM_DOMAIN(domain); 3145 cnt = 0; 3146 vm_domain_free_lock(vmd); 3147 do { 3148 MPASS(vm_page_domain(m) == domain); 3149 SLIST_REMOVE_HEAD(&free, plinks.s.ss); 3150 vm_phys_free_pages(m, 0); 3151 cnt++; 3152 } while ((m = SLIST_FIRST(&free)) != NULL); 3153 vm_domain_free_unlock(vmd); 3154 vm_domain_freecnt_inc(vmd, cnt); 3155 } 3156 return (error); 3157 } 3158 3159 #define NRUNS 16 3160 3161 #define RUN_INDEX(count, nruns) ((count) % (nruns)) 3162 3163 #define MIN_RECLAIM 8 3164 3165 /* 3166 * vm_page_reclaim_contig: 3167 * 3168 * Reclaim allocated, contiguous physical memory satisfying the specified 3169 * conditions by relocating the virtual pages using that physical memory. 3170 * Returns 0 if reclamation is successful, ERANGE if the specified domain 3171 * can't possibly satisfy the reclamation request, or ENOMEM if not 3172 * currently able to reclaim the requested number of pages. Since 3173 * relocation requires the allocation of physical pages, reclamation may 3174 * fail with ENOMEM due to a shortage of free pages. When reclamation 3175 * fails in this manner, callers are expected to perform vm_wait() before 3176 * retrying a failed allocation operation, e.g., vm_page_alloc_contig(). 3177 * 3178 * The caller must always specify an allocation class through "req". 3179 * 3180 * allocation classes: 3181 * VM_ALLOC_NORMAL normal process request 3182 * VM_ALLOC_SYSTEM system *really* needs a page 3183 * VM_ALLOC_INTERRUPT interrupt time request 3184 * 3185 * The optional allocation flags are ignored. 3186 * 3187 * "npages" must be greater than zero. Both "alignment" and "boundary" 3188 * must be a power of two. 3189 */ 3190 int 3191 vm_page_reclaim_contig_domain_ext(int domain, int req, u_long npages, 3192 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, 3193 int desired_runs) 3194 { 3195 struct vm_domain *vmd; 3196 vm_page_t bounds[2], m_run, _m_runs[NRUNS], *m_runs; 3197 u_long count, minalign, reclaimed; 3198 int error, i, min_reclaim, nruns, options, req_class; 3199 int segind, start_segind; 3200 int ret; 3201 3202 KASSERT(npages > 0, ("npages is 0")); 3203 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 3204 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 3205 3206 ret = ENOMEM; 3207 3208 /* 3209 * If the caller wants to reclaim multiple runs, try to allocate 3210 * space to store the runs. If that fails, fall back to the old 3211 * behavior of just reclaiming MIN_RECLAIM pages. 3212 */ 3213 if (desired_runs > 1) 3214 m_runs = malloc((NRUNS + desired_runs) * sizeof(*m_runs), 3215 M_TEMP, M_NOWAIT); 3216 else 3217 m_runs = NULL; 3218 3219 if (m_runs == NULL) { 3220 m_runs = _m_runs; 3221 nruns = NRUNS; 3222 } else { 3223 nruns = NRUNS + desired_runs - 1; 3224 } 3225 min_reclaim = MAX(desired_runs * npages, MIN_RECLAIM); 3226 3227 /* 3228 * The caller will attempt an allocation after some runs have been 3229 * reclaimed and added to the vm_phys buddy lists. Due to limitations 3230 * of vm_phys_alloc_contig(), round up the requested length to the next 3231 * power of two or maximum chunk size, and ensure that each run is 3232 * suitably aligned. 3233 */ 3234 minalign = 1ul << imin(flsl(npages - 1), VM_NFREEORDER - 1); 3235 npages = roundup2(npages, minalign); 3236 if (alignment < ptoa(minalign)) 3237 alignment = ptoa(minalign); 3238 3239 /* 3240 * The page daemon is allowed to dig deeper into the free page list. 3241 */ 3242 req_class = req & VM_ALLOC_CLASS_MASK; 3243 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) 3244 req_class = VM_ALLOC_SYSTEM; 3245 3246 start_segind = vm_phys_lookup_segind(low); 3247 3248 /* 3249 * Return if the number of free pages cannot satisfy the requested 3250 * allocation. 3251 */ 3252 vmd = VM_DOMAIN(domain); 3253 count = vmd->vmd_free_count; 3254 if (count < npages + vmd->vmd_free_reserved || (count < npages + 3255 vmd->vmd_interrupt_free_min && req_class == VM_ALLOC_SYSTEM) || 3256 (count < npages && req_class == VM_ALLOC_INTERRUPT)) 3257 goto done; 3258 3259 /* 3260 * Scan up to three times, relaxing the restrictions ("options") on 3261 * the reclamation of reservations and superpages each time. 3262 */ 3263 for (options = VPSC_NORESERV;;) { 3264 bool phys_range_exists = false; 3265 3266 /* 3267 * Find the highest runs that satisfy the given constraints 3268 * and restrictions, and record them in "m_runs". 3269 */ 3270 count = 0; 3271 segind = start_segind; 3272 while ((segind = vm_phys_find_range(bounds, segind, domain, 3273 npages, low, high)) != -1) { 3274 phys_range_exists = true; 3275 while ((m_run = vm_page_scan_contig(npages, bounds[0], 3276 bounds[1], alignment, boundary, options))) { 3277 bounds[0] = m_run + npages; 3278 m_runs[RUN_INDEX(count, nruns)] = m_run; 3279 count++; 3280 } 3281 segind++; 3282 } 3283 3284 if (!phys_range_exists) { 3285 ret = ERANGE; 3286 goto done; 3287 } 3288 3289 /* 3290 * Reclaim the highest runs in LIFO (descending) order until 3291 * the number of reclaimed pages, "reclaimed", is at least 3292 * "min_reclaim". Reset "reclaimed" each time because each 3293 * reclamation is idempotent, and runs will (likely) recur 3294 * from one scan to the next as restrictions are relaxed. 3295 */ 3296 reclaimed = 0; 3297 for (i = 0; count > 0 && i < nruns; i++) { 3298 count--; 3299 m_run = m_runs[RUN_INDEX(count, nruns)]; 3300 error = vm_page_reclaim_run(req_class, domain, npages, 3301 m_run, high); 3302 if (error == 0) { 3303 reclaimed += npages; 3304 if (reclaimed >= min_reclaim) { 3305 ret = 0; 3306 goto done; 3307 } 3308 } 3309 } 3310 3311 /* 3312 * Either relax the restrictions on the next scan or return if 3313 * the last scan had no restrictions. 3314 */ 3315 if (options == VPSC_NORESERV) 3316 options = VPSC_NOSUPER; 3317 else if (options == VPSC_NOSUPER) 3318 options = VPSC_ANY; 3319 else if (options == VPSC_ANY) { 3320 if (reclaimed != 0) 3321 ret = 0; 3322 goto done; 3323 } 3324 } 3325 done: 3326 if (m_runs != _m_runs) 3327 free(m_runs, M_TEMP); 3328 return (ret); 3329 } 3330 3331 int 3332 vm_page_reclaim_contig_domain(int domain, int req, u_long npages, 3333 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary) 3334 { 3335 return (vm_page_reclaim_contig_domain_ext(domain, req, npages, low, high, 3336 alignment, boundary, 1)); 3337 } 3338 3339 int 3340 vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high, 3341 u_long alignment, vm_paddr_t boundary) 3342 { 3343 struct vm_domainset_iter di; 3344 int domain, ret, status; 3345 3346 ret = ERANGE; 3347 3348 vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req); 3349 do { 3350 status = vm_page_reclaim_contig_domain(domain, req, npages, low, 3351 high, alignment, boundary); 3352 if (status == 0) 3353 return (0); 3354 else if (status == ERANGE) 3355 vm_domainset_iter_ignore(&di, domain); 3356 else { 3357 KASSERT(status == ENOMEM, ("Unrecognized error %d " 3358 "from vm_page_reclaim_contig_domain()", status)); 3359 ret = ENOMEM; 3360 } 3361 } while (vm_domainset_iter_page(&di, NULL, &domain) == 0); 3362 3363 return (ret); 3364 } 3365 3366 /* 3367 * Set the domain in the appropriate page level domainset. 3368 */ 3369 void 3370 vm_domain_set(struct vm_domain *vmd) 3371 { 3372 3373 mtx_lock(&vm_domainset_lock); 3374 if (!vmd->vmd_minset && vm_paging_min(vmd)) { 3375 vmd->vmd_minset = 1; 3376 DOMAINSET_SET(vmd->vmd_domain, &vm_min_domains); 3377 } 3378 if (!vmd->vmd_severeset && vm_paging_severe(vmd)) { 3379 vmd->vmd_severeset = 1; 3380 DOMAINSET_SET(vmd->vmd_domain, &vm_severe_domains); 3381 } 3382 mtx_unlock(&vm_domainset_lock); 3383 } 3384 3385 /* 3386 * Clear the domain from the appropriate page level domainset. 3387 */ 3388 void 3389 vm_domain_clear(struct vm_domain *vmd) 3390 { 3391 3392 mtx_lock(&vm_domainset_lock); 3393 if (vmd->vmd_minset && !vm_paging_min(vmd)) { 3394 vmd->vmd_minset = 0; 3395 DOMAINSET_CLR(vmd->vmd_domain, &vm_min_domains); 3396 if (vm_min_waiters != 0) { 3397 vm_min_waiters = 0; 3398 wakeup(&vm_min_domains); 3399 } 3400 } 3401 if (vmd->vmd_severeset && !vm_paging_severe(vmd)) { 3402 vmd->vmd_severeset = 0; 3403 DOMAINSET_CLR(vmd->vmd_domain, &vm_severe_domains); 3404 if (vm_severe_waiters != 0) { 3405 vm_severe_waiters = 0; 3406 wakeup(&vm_severe_domains); 3407 } 3408 } 3409 3410 /* 3411 * If pageout daemon needs pages, then tell it that there are 3412 * some free. 3413 */ 3414 if (vmd->vmd_pageout_pages_needed && 3415 vmd->vmd_free_count >= vmd->vmd_pageout_free_min) { 3416 wakeup(&vmd->vmd_pageout_pages_needed); 3417 vmd->vmd_pageout_pages_needed = 0; 3418 } 3419 3420 /* See comments in vm_wait_doms(). */ 3421 if (vm_pageproc_waiters) { 3422 vm_pageproc_waiters = 0; 3423 wakeup(&vm_pageproc_waiters); 3424 } 3425 mtx_unlock(&vm_domainset_lock); 3426 } 3427 3428 /* 3429 * Wait for free pages to exceed the min threshold globally. 3430 */ 3431 void 3432 vm_wait_min(void) 3433 { 3434 3435 mtx_lock(&vm_domainset_lock); 3436 while (vm_page_count_min()) { 3437 vm_min_waiters++; 3438 msleep(&vm_min_domains, &vm_domainset_lock, PVM, "vmwait", 0); 3439 } 3440 mtx_unlock(&vm_domainset_lock); 3441 } 3442 3443 /* 3444 * Wait for free pages to exceed the severe threshold globally. 3445 */ 3446 void 3447 vm_wait_severe(void) 3448 { 3449 3450 mtx_lock(&vm_domainset_lock); 3451 while (vm_page_count_severe()) { 3452 vm_severe_waiters++; 3453 msleep(&vm_severe_domains, &vm_domainset_lock, PVM, 3454 "vmwait", 0); 3455 } 3456 mtx_unlock(&vm_domainset_lock); 3457 } 3458 3459 u_int 3460 vm_wait_count(void) 3461 { 3462 3463 return (vm_severe_waiters + vm_min_waiters + vm_pageproc_waiters); 3464 } 3465 3466 int 3467 vm_wait_doms(const domainset_t *wdoms, int mflags) 3468 { 3469 int error; 3470 3471 error = 0; 3472 3473 /* 3474 * We use racey wakeup synchronization to avoid expensive global 3475 * locking for the pageproc when sleeping with a non-specific vm_wait. 3476 * To handle this, we only sleep for one tick in this instance. It 3477 * is expected that most allocations for the pageproc will come from 3478 * kmem or vm_page_grab* which will use the more specific and 3479 * race-free vm_wait_domain(). 3480 */ 3481 if (curproc == pageproc) { 3482 mtx_lock(&vm_domainset_lock); 3483 vm_pageproc_waiters++; 3484 error = msleep(&vm_pageproc_waiters, &vm_domainset_lock, 3485 PVM | PDROP | mflags, "pageprocwait", 1); 3486 } else { 3487 /* 3488 * XXX Ideally we would wait only until the allocation could 3489 * be satisfied. This condition can cause new allocators to 3490 * consume all freed pages while old allocators wait. 3491 */ 3492 mtx_lock(&vm_domainset_lock); 3493 if (vm_page_count_min_set(wdoms)) { 3494 if (pageproc == NULL) 3495 panic("vm_wait in early boot"); 3496 vm_min_waiters++; 3497 error = msleep(&vm_min_domains, &vm_domainset_lock, 3498 PVM | PDROP | mflags, "vmwait", 0); 3499 } else 3500 mtx_unlock(&vm_domainset_lock); 3501 } 3502 return (error); 3503 } 3504 3505 /* 3506 * vm_wait_domain: 3507 * 3508 * Sleep until free pages are available for allocation. 3509 * - Called in various places after failed memory allocations. 3510 */ 3511 void 3512 vm_wait_domain(int domain) 3513 { 3514 struct vm_domain *vmd; 3515 domainset_t wdom; 3516 3517 vmd = VM_DOMAIN(domain); 3518 vm_domain_free_assert_unlocked(vmd); 3519 3520 if (curproc == pageproc) { 3521 mtx_lock(&vm_domainset_lock); 3522 if (vmd->vmd_free_count < vmd->vmd_pageout_free_min) { 3523 vmd->vmd_pageout_pages_needed = 1; 3524 msleep(&vmd->vmd_pageout_pages_needed, 3525 &vm_domainset_lock, PDROP | PSWP, "VMWait", 0); 3526 } else 3527 mtx_unlock(&vm_domainset_lock); 3528 } else { 3529 DOMAINSET_ZERO(&wdom); 3530 DOMAINSET_SET(vmd->vmd_domain, &wdom); 3531 vm_wait_doms(&wdom, 0); 3532 } 3533 } 3534 3535 static int 3536 vm_wait_flags(vm_object_t obj, int mflags) 3537 { 3538 struct domainset *d; 3539 3540 d = NULL; 3541 3542 /* 3543 * Carefully fetch pointers only once: the struct domainset 3544 * itself is ummutable but the pointer might change. 3545 */ 3546 if (obj != NULL) 3547 d = obj->domain.dr_policy; 3548 if (d == NULL) 3549 d = curthread->td_domain.dr_policy; 3550 3551 return (vm_wait_doms(&d->ds_mask, mflags)); 3552 } 3553 3554 /* 3555 * vm_wait: 3556 * 3557 * Sleep until free pages are available for allocation in the 3558 * affinity domains of the obj. If obj is NULL, the domain set 3559 * for the calling thread is used. 3560 * Called in various places after failed memory allocations. 3561 */ 3562 void 3563 vm_wait(vm_object_t obj) 3564 { 3565 (void)vm_wait_flags(obj, 0); 3566 } 3567 3568 int 3569 vm_wait_intr(vm_object_t obj) 3570 { 3571 return (vm_wait_flags(obj, PCATCH)); 3572 } 3573 3574 /* 3575 * vm_domain_alloc_fail: 3576 * 3577 * Called when a page allocation function fails. Informs the 3578 * pagedaemon and performs the requested wait. Requires the 3579 * domain_free and object lock on entry. Returns with the 3580 * object lock held and free lock released. Returns an error when 3581 * retry is necessary. 3582 * 3583 */ 3584 static int 3585 vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, int req) 3586 { 3587 3588 vm_domain_free_assert_unlocked(vmd); 3589 3590 atomic_add_int(&vmd->vmd_pageout_deficit, 3591 max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1)); 3592 if (req & (VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) { 3593 if (object != NULL) 3594 VM_OBJECT_WUNLOCK(object); 3595 vm_wait_domain(vmd->vmd_domain); 3596 if (object != NULL) 3597 VM_OBJECT_WLOCK(object); 3598 if (req & VM_ALLOC_WAITOK) 3599 return (EAGAIN); 3600 } 3601 3602 return (0); 3603 } 3604 3605 /* 3606 * vm_waitpfault: 3607 * 3608 * Sleep until free pages are available for allocation. 3609 * - Called only in vm_fault so that processes page faulting 3610 * can be easily tracked. 3611 * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing 3612 * processes will be able to grab memory first. Do not change 3613 * this balance without careful testing first. 3614 */ 3615 void 3616 vm_waitpfault(struct domainset *dset, int timo) 3617 { 3618 3619 /* 3620 * XXX Ideally we would wait only until the allocation could 3621 * be satisfied. This condition can cause new allocators to 3622 * consume all freed pages while old allocators wait. 3623 */ 3624 mtx_lock(&vm_domainset_lock); 3625 if (vm_page_count_min_set(&dset->ds_mask)) { 3626 vm_min_waiters++; 3627 msleep(&vm_min_domains, &vm_domainset_lock, PUSER | PDROP, 3628 "pfault", timo); 3629 } else 3630 mtx_unlock(&vm_domainset_lock); 3631 } 3632 3633 static struct vm_pagequeue * 3634 _vm_page_pagequeue(vm_page_t m, uint8_t queue) 3635 { 3636 3637 return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue]); 3638 } 3639 3640 #ifdef INVARIANTS 3641 static struct vm_pagequeue * 3642 vm_page_pagequeue(vm_page_t m) 3643 { 3644 3645 return (_vm_page_pagequeue(m, vm_page_astate_load(m).queue)); 3646 } 3647 #endif 3648 3649 static __always_inline bool 3650 vm_page_pqstate_fcmpset(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new) 3651 { 3652 vm_page_astate_t tmp; 3653 3654 tmp = *old; 3655 do { 3656 if (__predict_true(vm_page_astate_fcmpset(m, old, new))) 3657 return (true); 3658 counter_u64_add(pqstate_commit_retries, 1); 3659 } while (old->_bits == tmp._bits); 3660 3661 return (false); 3662 } 3663 3664 /* 3665 * Do the work of committing a queue state update that moves the page out of 3666 * its current queue. 3667 */ 3668 static bool 3669 _vm_page_pqstate_commit_dequeue(struct vm_pagequeue *pq, vm_page_t m, 3670 vm_page_astate_t *old, vm_page_astate_t new) 3671 { 3672 vm_page_t next; 3673 3674 vm_pagequeue_assert_locked(pq); 3675 KASSERT(vm_page_pagequeue(m) == pq, 3676 ("%s: queue %p does not match page %p", __func__, pq, m)); 3677 KASSERT(old->queue != PQ_NONE && new.queue != old->queue, 3678 ("%s: invalid queue indices %d %d", 3679 __func__, old->queue, new.queue)); 3680 3681 /* 3682 * Once the queue index of the page changes there is nothing 3683 * synchronizing with further updates to the page's physical 3684 * queue state. Therefore we must speculatively remove the page 3685 * from the queue now and be prepared to roll back if the queue 3686 * state update fails. If the page is not physically enqueued then 3687 * we just update its queue index. 3688 */ 3689 if ((old->flags & PGA_ENQUEUED) != 0) { 3690 new.flags &= ~PGA_ENQUEUED; 3691 next = TAILQ_NEXT(m, plinks.q); 3692 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 3693 vm_pagequeue_cnt_dec(pq); 3694 if (!vm_page_pqstate_fcmpset(m, old, new)) { 3695 if (next == NULL) 3696 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); 3697 else 3698 TAILQ_INSERT_BEFORE(next, m, plinks.q); 3699 vm_pagequeue_cnt_inc(pq); 3700 return (false); 3701 } else { 3702 return (true); 3703 } 3704 } else { 3705 return (vm_page_pqstate_fcmpset(m, old, new)); 3706 } 3707 } 3708 3709 static bool 3710 vm_page_pqstate_commit_dequeue(vm_page_t m, vm_page_astate_t *old, 3711 vm_page_astate_t new) 3712 { 3713 struct vm_pagequeue *pq; 3714 vm_page_astate_t as; 3715 bool ret; 3716 3717 pq = _vm_page_pagequeue(m, old->queue); 3718 3719 /* 3720 * The queue field and PGA_ENQUEUED flag are stable only so long as the 3721 * corresponding page queue lock is held. 3722 */ 3723 vm_pagequeue_lock(pq); 3724 as = vm_page_astate_load(m); 3725 if (__predict_false(as._bits != old->_bits)) { 3726 *old = as; 3727 ret = false; 3728 } else { 3729 ret = _vm_page_pqstate_commit_dequeue(pq, m, old, new); 3730 } 3731 vm_pagequeue_unlock(pq); 3732 return (ret); 3733 } 3734 3735 /* 3736 * Commit a queue state update that enqueues or requeues a page. 3737 */ 3738 static bool 3739 _vm_page_pqstate_commit_requeue(struct vm_pagequeue *pq, vm_page_t m, 3740 vm_page_astate_t *old, vm_page_astate_t new) 3741 { 3742 struct vm_domain *vmd; 3743 3744 vm_pagequeue_assert_locked(pq); 3745 KASSERT(old->queue != PQ_NONE && new.queue == old->queue, 3746 ("%s: invalid queue indices %d %d", 3747 __func__, old->queue, new.queue)); 3748 3749 new.flags |= PGA_ENQUEUED; 3750 if (!vm_page_pqstate_fcmpset(m, old, new)) 3751 return (false); 3752 3753 if ((old->flags & PGA_ENQUEUED) != 0) 3754 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 3755 else 3756 vm_pagequeue_cnt_inc(pq); 3757 3758 /* 3759 * Give PGA_REQUEUE_HEAD precedence over PGA_REQUEUE. In particular, if 3760 * both flags are set in close succession, only PGA_REQUEUE_HEAD will be 3761 * applied, even if it was set first. 3762 */ 3763 if ((old->flags & PGA_REQUEUE_HEAD) != 0) { 3764 vmd = vm_pagequeue_domain(m); 3765 KASSERT(pq == &vmd->vmd_pagequeues[PQ_INACTIVE], 3766 ("%s: invalid page queue for page %p", __func__, m)); 3767 TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q); 3768 } else { 3769 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); 3770 } 3771 return (true); 3772 } 3773 3774 /* 3775 * Commit a queue state update that encodes a request for a deferred queue 3776 * operation. 3777 */ 3778 static bool 3779 vm_page_pqstate_commit_request(vm_page_t m, vm_page_astate_t *old, 3780 vm_page_astate_t new) 3781 { 3782 3783 KASSERT(old->queue == new.queue || new.queue != PQ_NONE, 3784 ("%s: invalid state, queue %d flags %x", 3785 __func__, new.queue, new.flags)); 3786 3787 if (old->_bits != new._bits && 3788 !vm_page_pqstate_fcmpset(m, old, new)) 3789 return (false); 3790 vm_page_pqbatch_submit(m, new.queue); 3791 return (true); 3792 } 3793 3794 /* 3795 * A generic queue state update function. This handles more cases than the 3796 * specialized functions above. 3797 */ 3798 bool 3799 vm_page_pqstate_commit(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new) 3800 { 3801 3802 if (old->_bits == new._bits) 3803 return (true); 3804 3805 if (old->queue != PQ_NONE && new.queue != old->queue) { 3806 if (!vm_page_pqstate_commit_dequeue(m, old, new)) 3807 return (false); 3808 if (new.queue != PQ_NONE) 3809 vm_page_pqbatch_submit(m, new.queue); 3810 } else { 3811 if (!vm_page_pqstate_fcmpset(m, old, new)) 3812 return (false); 3813 if (new.queue != PQ_NONE && 3814 ((new.flags & ~old->flags) & PGA_QUEUE_OP_MASK) != 0) 3815 vm_page_pqbatch_submit(m, new.queue); 3816 } 3817 return (true); 3818 } 3819 3820 /* 3821 * Apply deferred queue state updates to a page. 3822 */ 3823 static inline void 3824 vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_page_t m, uint8_t queue) 3825 { 3826 vm_page_astate_t new, old; 3827 3828 CRITICAL_ASSERT(curthread); 3829 vm_pagequeue_assert_locked(pq); 3830 KASSERT(queue < PQ_COUNT, 3831 ("%s: invalid queue index %d", __func__, queue)); 3832 KASSERT(pq == _vm_page_pagequeue(m, queue), 3833 ("%s: page %p does not belong to queue %p", __func__, m, pq)); 3834 3835 for (old = vm_page_astate_load(m);;) { 3836 if (__predict_false(old.queue != queue || 3837 (old.flags & PGA_QUEUE_OP_MASK) == 0)) { 3838 counter_u64_add(queue_nops, 1); 3839 break; 3840 } 3841 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3842 ("%s: page %p is unmanaged", __func__, m)); 3843 3844 new = old; 3845 if ((old.flags & PGA_DEQUEUE) != 0) { 3846 new.flags &= ~PGA_QUEUE_OP_MASK; 3847 new.queue = PQ_NONE; 3848 if (__predict_true(_vm_page_pqstate_commit_dequeue(pq, 3849 m, &old, new))) { 3850 counter_u64_add(queue_ops, 1); 3851 break; 3852 } 3853 } else { 3854 new.flags &= ~(PGA_REQUEUE | PGA_REQUEUE_HEAD); 3855 if (__predict_true(_vm_page_pqstate_commit_requeue(pq, 3856 m, &old, new))) { 3857 counter_u64_add(queue_ops, 1); 3858 break; 3859 } 3860 } 3861 } 3862 } 3863 3864 static void 3865 vm_pqbatch_process(struct vm_pagequeue *pq, struct vm_batchqueue *bq, 3866 uint8_t queue) 3867 { 3868 int i; 3869 3870 for (i = 0; i < bq->bq_cnt; i++) 3871 vm_pqbatch_process_page(pq, bq->bq_pa[i], queue); 3872 vm_batchqueue_init(bq); 3873 } 3874 3875 /* 3876 * vm_page_pqbatch_submit: [ internal use only ] 3877 * 3878 * Enqueue a page in the specified page queue's batched work queue. 3879 * The caller must have encoded the requested operation in the page 3880 * structure's a.flags field. 3881 */ 3882 void 3883 vm_page_pqbatch_submit(vm_page_t m, uint8_t queue) 3884 { 3885 struct vm_batchqueue *bq; 3886 struct vm_pagequeue *pq; 3887 int domain, slots_remaining; 3888 3889 KASSERT(queue < PQ_COUNT, ("invalid queue %d", queue)); 3890 3891 domain = vm_page_domain(m); 3892 critical_enter(); 3893 bq = DPCPU_PTR(pqbatch[domain][queue]); 3894 slots_remaining = vm_batchqueue_insert(bq, m); 3895 if (slots_remaining > (VM_BATCHQUEUE_SIZE >> 1)) { 3896 /* keep building the bq */ 3897 critical_exit(); 3898 return; 3899 } else if (slots_remaining > 0 ) { 3900 /* Try to process the bq if we can get the lock */ 3901 pq = &VM_DOMAIN(domain)->vmd_pagequeues[queue]; 3902 if (vm_pagequeue_trylock(pq)) { 3903 vm_pqbatch_process(pq, bq, queue); 3904 vm_pagequeue_unlock(pq); 3905 } 3906 critical_exit(); 3907 return; 3908 } 3909 critical_exit(); 3910 3911 /* if we make it here, the bq is full so wait for the lock */ 3912 3913 pq = &VM_DOMAIN(domain)->vmd_pagequeues[queue]; 3914 vm_pagequeue_lock(pq); 3915 critical_enter(); 3916 bq = DPCPU_PTR(pqbatch[domain][queue]); 3917 vm_pqbatch_process(pq, bq, queue); 3918 vm_pqbatch_process_page(pq, m, queue); 3919 vm_pagequeue_unlock(pq); 3920 critical_exit(); 3921 } 3922 3923 /* 3924 * vm_page_pqbatch_drain: [ internal use only ] 3925 * 3926 * Force all per-CPU page queue batch queues to be drained. This is 3927 * intended for use in severe memory shortages, to ensure that pages 3928 * do not remain stuck in the batch queues. 3929 */ 3930 void 3931 vm_page_pqbatch_drain(void) 3932 { 3933 struct thread *td; 3934 struct vm_domain *vmd; 3935 struct vm_pagequeue *pq; 3936 int cpu, domain, queue; 3937 3938 td = curthread; 3939 CPU_FOREACH(cpu) { 3940 thread_lock(td); 3941 sched_bind(td, cpu); 3942 thread_unlock(td); 3943 3944 for (domain = 0; domain < vm_ndomains; domain++) { 3945 vmd = VM_DOMAIN(domain); 3946 for (queue = 0; queue < PQ_COUNT; queue++) { 3947 pq = &vmd->vmd_pagequeues[queue]; 3948 vm_pagequeue_lock(pq); 3949 critical_enter(); 3950 vm_pqbatch_process(pq, 3951 DPCPU_PTR(pqbatch[domain][queue]), queue); 3952 critical_exit(); 3953 vm_pagequeue_unlock(pq); 3954 } 3955 } 3956 } 3957 thread_lock(td); 3958 sched_unbind(td); 3959 thread_unlock(td); 3960 } 3961 3962 /* 3963 * vm_page_dequeue_deferred: [ internal use only ] 3964 * 3965 * Request removal of the given page from its current page 3966 * queue. Physical removal from the queue may be deferred 3967 * indefinitely. 3968 */ 3969 void 3970 vm_page_dequeue_deferred(vm_page_t m) 3971 { 3972 vm_page_astate_t new, old; 3973 3974 old = vm_page_astate_load(m); 3975 do { 3976 if (old.queue == PQ_NONE) { 3977 KASSERT((old.flags & PGA_QUEUE_STATE_MASK) == 0, 3978 ("%s: page %p has unexpected queue state", 3979 __func__, m)); 3980 break; 3981 } 3982 new = old; 3983 new.flags |= PGA_DEQUEUE; 3984 } while (!vm_page_pqstate_commit_request(m, &old, new)); 3985 } 3986 3987 /* 3988 * vm_page_dequeue: 3989 * 3990 * Remove the page from whichever page queue it's in, if any, before 3991 * returning. 3992 */ 3993 void 3994 vm_page_dequeue(vm_page_t m) 3995 { 3996 vm_page_astate_t new, old; 3997 3998 old = vm_page_astate_load(m); 3999 do { 4000 if (old.queue == PQ_NONE) { 4001 KASSERT((old.flags & PGA_QUEUE_STATE_MASK) == 0, 4002 ("%s: page %p has unexpected queue state", 4003 __func__, m)); 4004 break; 4005 } 4006 new = old; 4007 new.flags &= ~PGA_QUEUE_OP_MASK; 4008 new.queue = PQ_NONE; 4009 } while (!vm_page_pqstate_commit_dequeue(m, &old, new)); 4010 4011 } 4012 4013 /* 4014 * Schedule the given page for insertion into the specified page queue. 4015 * Physical insertion of the page may be deferred indefinitely. 4016 */ 4017 static void 4018 vm_page_enqueue(vm_page_t m, uint8_t queue) 4019 { 4020 4021 KASSERT(m->a.queue == PQ_NONE && 4022 (m->a.flags & PGA_QUEUE_STATE_MASK) == 0, 4023 ("%s: page %p is already enqueued", __func__, m)); 4024 KASSERT(m->ref_count > 0, 4025 ("%s: page %p does not carry any references", __func__, m)); 4026 4027 m->a.queue = queue; 4028 if ((m->a.flags & PGA_REQUEUE) == 0) 4029 vm_page_aflag_set(m, PGA_REQUEUE); 4030 vm_page_pqbatch_submit(m, queue); 4031 } 4032 4033 /* 4034 * vm_page_free_prep: 4035 * 4036 * Prepares the given page to be put on the free list, 4037 * disassociating it from any VM object. The caller may return 4038 * the page to the free list only if this function returns true. 4039 * 4040 * The object, if it exists, must be locked, and then the page must 4041 * be xbusy. Otherwise the page must be not busied. A managed 4042 * page must be unmapped. 4043 */ 4044 static bool 4045 vm_page_free_prep(vm_page_t m) 4046 { 4047 4048 /* 4049 * Synchronize with threads that have dropped a reference to this 4050 * page. 4051 */ 4052 atomic_thread_fence_acq(); 4053 4054 #if defined(DIAGNOSTIC) && defined(PHYS_TO_DMAP) 4055 if (PMAP_HAS_DMAP && (m->flags & PG_ZERO) != 0) { 4056 uint64_t *p; 4057 int i; 4058 p = (uint64_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 4059 for (i = 0; i < PAGE_SIZE / sizeof(uint64_t); i++, p++) 4060 KASSERT(*p == 0, ("vm_page_free_prep %p PG_ZERO %d %jx", 4061 m, i, (uintmax_t)*p)); 4062 } 4063 #endif 4064 KASSERT((m->flags & PG_NOFREE) == 0, 4065 ("%s: attempting to free a PG_NOFREE page", __func__)); 4066 if ((m->oflags & VPO_UNMANAGED) == 0) { 4067 KASSERT(!pmap_page_is_mapped(m), 4068 ("vm_page_free_prep: freeing mapped page %p", m)); 4069 KASSERT((m->a.flags & (PGA_EXECUTABLE | PGA_WRITEABLE)) == 0, 4070 ("vm_page_free_prep: mapping flags set in page %p", m)); 4071 } else { 4072 KASSERT(m->a.queue == PQ_NONE, 4073 ("vm_page_free_prep: unmanaged page %p is queued", m)); 4074 } 4075 VM_CNT_INC(v_tfree); 4076 4077 if (m->object != NULL) { 4078 KASSERT(((m->oflags & VPO_UNMANAGED) != 0) == 4079 ((m->object->flags & OBJ_UNMANAGED) != 0), 4080 ("vm_page_free_prep: managed flag mismatch for page %p", 4081 m)); 4082 vm_page_assert_xbusied(m); 4083 4084 /* 4085 * The object reference can be released without an atomic 4086 * operation. 4087 */ 4088 KASSERT((m->flags & PG_FICTITIOUS) != 0 || 4089 m->ref_count == VPRC_OBJREF, 4090 ("vm_page_free_prep: page %p has unexpected ref_count %u", 4091 m, m->ref_count)); 4092 vm_page_object_remove(m); 4093 m->ref_count -= VPRC_OBJREF; 4094 } else 4095 vm_page_assert_unbusied(m); 4096 4097 vm_page_busy_free(m); 4098 4099 /* 4100 * If fictitious remove object association and 4101 * return. 4102 */ 4103 if ((m->flags & PG_FICTITIOUS) != 0) { 4104 KASSERT(m->ref_count == 1, 4105 ("fictitious page %p is referenced", m)); 4106 KASSERT(m->a.queue == PQ_NONE, 4107 ("fictitious page %p is queued", m)); 4108 return (false); 4109 } 4110 4111 /* 4112 * Pages need not be dequeued before they are returned to the physical 4113 * memory allocator, but they must at least be marked for a deferred 4114 * dequeue. 4115 */ 4116 if ((m->oflags & VPO_UNMANAGED) == 0) 4117 vm_page_dequeue_deferred(m); 4118 4119 m->valid = 0; 4120 vm_page_undirty(m); 4121 4122 if (m->ref_count != 0) 4123 panic("vm_page_free_prep: page %p has references", m); 4124 4125 /* 4126 * Restore the default memory attribute to the page. 4127 */ 4128 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) 4129 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); 4130 4131 #if VM_NRESERVLEVEL > 0 4132 /* 4133 * Determine whether the page belongs to a reservation. If the page was 4134 * allocated from a per-CPU cache, it cannot belong to a reservation, so 4135 * as an optimization, we avoid the check in that case. 4136 */ 4137 if ((m->flags & PG_PCPU_CACHE) == 0 && vm_reserv_free_page(m)) 4138 return (false); 4139 #endif 4140 4141 return (true); 4142 } 4143 4144 /* 4145 * vm_page_free_toq: 4146 * 4147 * Returns the given page to the free list, disassociating it 4148 * from any VM object. 4149 * 4150 * The object must be locked. The page must be exclusively busied if it 4151 * belongs to an object. 4152 */ 4153 static void 4154 vm_page_free_toq(vm_page_t m) 4155 { 4156 struct vm_domain *vmd; 4157 uma_zone_t zone; 4158 4159 if (!vm_page_free_prep(m)) 4160 return; 4161 4162 vmd = vm_pagequeue_domain(m); 4163 zone = vmd->vmd_pgcache[m->pool].zone; 4164 if ((m->flags & PG_PCPU_CACHE) != 0 && zone != NULL) { 4165 uma_zfree(zone, m); 4166 return; 4167 } 4168 vm_domain_free_lock(vmd); 4169 vm_phys_free_pages(m, 0); 4170 vm_domain_free_unlock(vmd); 4171 vm_domain_freecnt_inc(vmd, 1); 4172 } 4173 4174 /* 4175 * vm_page_free_pages_toq: 4176 * 4177 * Returns a list of pages to the free list, disassociating it 4178 * from any VM object. In other words, this is equivalent to 4179 * calling vm_page_free_toq() for each page of a list of VM objects. 4180 */ 4181 int 4182 vm_page_free_pages_toq(struct spglist *free, bool update_wire_count) 4183 { 4184 vm_page_t m; 4185 int count; 4186 4187 if (SLIST_EMPTY(free)) 4188 return (0); 4189 4190 count = 0; 4191 while ((m = SLIST_FIRST(free)) != NULL) { 4192 count++; 4193 SLIST_REMOVE_HEAD(free, plinks.s.ss); 4194 vm_page_free_toq(m); 4195 } 4196 4197 if (update_wire_count) 4198 vm_wire_sub(count); 4199 return (count); 4200 } 4201 4202 /* 4203 * Mark this page as wired down. For managed pages, this prevents reclamation 4204 * by the page daemon, or when the containing object, if any, is destroyed. 4205 */ 4206 void 4207 vm_page_wire(vm_page_t m) 4208 { 4209 u_int old; 4210 4211 #ifdef INVARIANTS 4212 if (m->object != NULL && !vm_page_busied(m) && 4213 !vm_object_busied(m->object)) 4214 VM_OBJECT_ASSERT_LOCKED(m->object); 4215 #endif 4216 KASSERT((m->flags & PG_FICTITIOUS) == 0 || 4217 VPRC_WIRE_COUNT(m->ref_count) >= 1, 4218 ("vm_page_wire: fictitious page %p has zero wirings", m)); 4219 4220 old = atomic_fetchadd_int(&m->ref_count, 1); 4221 KASSERT(VPRC_WIRE_COUNT(old) != VPRC_WIRE_COUNT_MAX, 4222 ("vm_page_wire: counter overflow for page %p", m)); 4223 if (VPRC_WIRE_COUNT(old) == 0) { 4224 if ((m->oflags & VPO_UNMANAGED) == 0) 4225 vm_page_aflag_set(m, PGA_DEQUEUE); 4226 vm_wire_add(1); 4227 } 4228 } 4229 4230 /* 4231 * Attempt to wire a mapped page following a pmap lookup of that page. 4232 * This may fail if a thread is concurrently tearing down mappings of the page. 4233 * The transient failure is acceptable because it translates to the 4234 * failure of the caller pmap_extract_and_hold(), which should be then 4235 * followed by the vm_fault() fallback, see e.g. vm_fault_quick_hold_pages(). 4236 */ 4237 bool 4238 vm_page_wire_mapped(vm_page_t m) 4239 { 4240 u_int old; 4241 4242 old = atomic_load_int(&m->ref_count); 4243 do { 4244 KASSERT(old > 0, 4245 ("vm_page_wire_mapped: wiring unreferenced page %p", m)); 4246 if ((old & VPRC_BLOCKED) != 0) 4247 return (false); 4248 } while (!atomic_fcmpset_int(&m->ref_count, &old, old + 1)); 4249 4250 if (VPRC_WIRE_COUNT(old) == 0) { 4251 if ((m->oflags & VPO_UNMANAGED) == 0) 4252 vm_page_aflag_set(m, PGA_DEQUEUE); 4253 vm_wire_add(1); 4254 } 4255 return (true); 4256 } 4257 4258 /* 4259 * Release a wiring reference to a managed page. If the page still belongs to 4260 * an object, update its position in the page queues to reflect the reference. 4261 * If the wiring was the last reference to the page, free the page. 4262 */ 4263 static void 4264 vm_page_unwire_managed(vm_page_t m, uint8_t nqueue, bool noreuse) 4265 { 4266 u_int old; 4267 4268 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4269 ("%s: page %p is unmanaged", __func__, m)); 4270 4271 /* 4272 * Update LRU state before releasing the wiring reference. 4273 * Use a release store when updating the reference count to 4274 * synchronize with vm_page_free_prep(). 4275 */ 4276 old = atomic_load_int(&m->ref_count); 4277 do { 4278 u_int count; 4279 4280 KASSERT(VPRC_WIRE_COUNT(old) > 0, 4281 ("vm_page_unwire: wire count underflow for page %p", m)); 4282 4283 count = old & ~VPRC_BLOCKED; 4284 if (count > VPRC_OBJREF + 1) { 4285 /* 4286 * The page has at least one other wiring reference. An 4287 * earlier iteration of this loop may have called 4288 * vm_page_release_toq() and cleared PGA_DEQUEUE, so 4289 * re-set it if necessary. 4290 */ 4291 if ((vm_page_astate_load(m).flags & PGA_DEQUEUE) == 0) 4292 vm_page_aflag_set(m, PGA_DEQUEUE); 4293 } else if (count == VPRC_OBJREF + 1) { 4294 /* 4295 * This is the last wiring. Clear PGA_DEQUEUE and 4296 * update the page's queue state to reflect the 4297 * reference. If the page does not belong to an object 4298 * (i.e., the VPRC_OBJREF bit is clear), we only need to 4299 * clear leftover queue state. 4300 */ 4301 vm_page_release_toq(m, nqueue, noreuse); 4302 } else if (count == 1) { 4303 vm_page_aflag_clear(m, PGA_DEQUEUE); 4304 } 4305 } while (!atomic_fcmpset_rel_int(&m->ref_count, &old, old - 1)); 4306 4307 if (VPRC_WIRE_COUNT(old) == 1) { 4308 vm_wire_sub(1); 4309 if (old == 1) 4310 vm_page_free(m); 4311 } 4312 } 4313 4314 /* 4315 * Release one wiring of the specified page, potentially allowing it to be 4316 * paged out. 4317 * 4318 * Only managed pages belonging to an object can be paged out. If the number 4319 * of wirings transitions to zero and the page is eligible for page out, then 4320 * the page is added to the specified paging queue. If the released wiring 4321 * represented the last reference to the page, the page is freed. 4322 */ 4323 void 4324 vm_page_unwire(vm_page_t m, uint8_t nqueue) 4325 { 4326 4327 KASSERT(nqueue < PQ_COUNT, 4328 ("vm_page_unwire: invalid queue %u request for page %p", 4329 nqueue, m)); 4330 4331 if ((m->oflags & VPO_UNMANAGED) != 0) { 4332 if (vm_page_unwire_noq(m) && m->ref_count == 0) 4333 vm_page_free(m); 4334 return; 4335 } 4336 vm_page_unwire_managed(m, nqueue, false); 4337 } 4338 4339 /* 4340 * Unwire a page without (re-)inserting it into a page queue. It is up 4341 * to the caller to enqueue, requeue, or free the page as appropriate. 4342 * In most cases involving managed pages, vm_page_unwire() should be used 4343 * instead. 4344 */ 4345 bool 4346 vm_page_unwire_noq(vm_page_t m) 4347 { 4348 u_int old; 4349 4350 old = vm_page_drop(m, 1); 4351 KASSERT(VPRC_WIRE_COUNT(old) != 0, 4352 ("%s: counter underflow for page %p", __func__, m)); 4353 KASSERT((m->flags & PG_FICTITIOUS) == 0 || VPRC_WIRE_COUNT(old) > 1, 4354 ("%s: missing ref on fictitious page %p", __func__, m)); 4355 4356 if (VPRC_WIRE_COUNT(old) > 1) 4357 return (false); 4358 if ((m->oflags & VPO_UNMANAGED) == 0) 4359 vm_page_aflag_clear(m, PGA_DEQUEUE); 4360 vm_wire_sub(1); 4361 return (true); 4362 } 4363 4364 /* 4365 * Ensure that the page ends up in the specified page queue. If the page is 4366 * active or being moved to the active queue, ensure that its act_count is 4367 * at least ACT_INIT but do not otherwise mess with it. 4368 */ 4369 static __always_inline void 4370 vm_page_mvqueue(vm_page_t m, const uint8_t nqueue, const uint16_t nflag) 4371 { 4372 vm_page_astate_t old, new; 4373 4374 KASSERT(m->ref_count > 0, 4375 ("%s: page %p does not carry any references", __func__, m)); 4376 KASSERT(nflag == PGA_REQUEUE || nflag == PGA_REQUEUE_HEAD, 4377 ("%s: invalid flags %x", __func__, nflag)); 4378 4379 if ((m->oflags & VPO_UNMANAGED) != 0 || vm_page_wired(m)) 4380 return; 4381 4382 old = vm_page_astate_load(m); 4383 do { 4384 if ((old.flags & PGA_DEQUEUE) != 0) 4385 break; 4386 new = old; 4387 new.flags &= ~PGA_QUEUE_OP_MASK; 4388 if (nqueue == PQ_ACTIVE) 4389 new.act_count = max(old.act_count, ACT_INIT); 4390 if (old.queue == nqueue) { 4391 /* 4392 * There is no need to requeue pages already in the 4393 * active queue. 4394 */ 4395 if (nqueue != PQ_ACTIVE || 4396 (old.flags & PGA_ENQUEUED) == 0) 4397 new.flags |= nflag; 4398 } else { 4399 new.flags |= nflag; 4400 new.queue = nqueue; 4401 } 4402 } while (!vm_page_pqstate_commit(m, &old, new)); 4403 } 4404 4405 /* 4406 * Put the specified page on the active list (if appropriate). 4407 */ 4408 void 4409 vm_page_activate(vm_page_t m) 4410 { 4411 4412 vm_page_mvqueue(m, PQ_ACTIVE, PGA_REQUEUE); 4413 } 4414 4415 /* 4416 * Move the specified page to the tail of the inactive queue, or requeue 4417 * the page if it is already in the inactive queue. 4418 */ 4419 void 4420 vm_page_deactivate(vm_page_t m) 4421 { 4422 4423 vm_page_mvqueue(m, PQ_INACTIVE, PGA_REQUEUE); 4424 } 4425 4426 void 4427 vm_page_deactivate_noreuse(vm_page_t m) 4428 { 4429 4430 vm_page_mvqueue(m, PQ_INACTIVE, PGA_REQUEUE_HEAD); 4431 } 4432 4433 /* 4434 * Put a page in the laundry, or requeue it if it is already there. 4435 */ 4436 void 4437 vm_page_launder(vm_page_t m) 4438 { 4439 4440 vm_page_mvqueue(m, PQ_LAUNDRY, PGA_REQUEUE); 4441 } 4442 4443 /* 4444 * Put a page in the PQ_UNSWAPPABLE holding queue. 4445 */ 4446 void 4447 vm_page_unswappable(vm_page_t m) 4448 { 4449 4450 VM_OBJECT_ASSERT_LOCKED(m->object); 4451 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4452 ("page %p already unswappable", m)); 4453 4454 vm_page_dequeue(m); 4455 vm_page_enqueue(m, PQ_UNSWAPPABLE); 4456 } 4457 4458 /* 4459 * Release a page back to the page queues in preparation for unwiring. 4460 */ 4461 static void 4462 vm_page_release_toq(vm_page_t m, uint8_t nqueue, const bool noreuse) 4463 { 4464 vm_page_astate_t old, new; 4465 uint16_t nflag; 4466 4467 /* 4468 * Use a check of the valid bits to determine whether we should 4469 * accelerate reclamation of the page. The object lock might not be 4470 * held here, in which case the check is racy. At worst we will either 4471 * accelerate reclamation of a valid page and violate LRU, or 4472 * unnecessarily defer reclamation of an invalid page. 4473 * 4474 * If we were asked to not cache the page, place it near the head of the 4475 * inactive queue so that is reclaimed sooner. 4476 */ 4477 if (noreuse || vm_page_none_valid(m)) { 4478 nqueue = PQ_INACTIVE; 4479 nflag = PGA_REQUEUE_HEAD; 4480 } else { 4481 nflag = PGA_REQUEUE; 4482 } 4483 4484 old = vm_page_astate_load(m); 4485 do { 4486 new = old; 4487 4488 /* 4489 * If the page is already in the active queue and we are not 4490 * trying to accelerate reclamation, simply mark it as 4491 * referenced and avoid any queue operations. 4492 */ 4493 new.flags &= ~PGA_QUEUE_OP_MASK; 4494 if (nflag != PGA_REQUEUE_HEAD && old.queue == PQ_ACTIVE && 4495 (old.flags & PGA_ENQUEUED) != 0) 4496 new.flags |= PGA_REFERENCED; 4497 else { 4498 new.flags |= nflag; 4499 new.queue = nqueue; 4500 } 4501 } while (!vm_page_pqstate_commit(m, &old, new)); 4502 } 4503 4504 /* 4505 * Unwire a page and either attempt to free it or re-add it to the page queues. 4506 */ 4507 void 4508 vm_page_release(vm_page_t m, int flags) 4509 { 4510 vm_object_t object; 4511 4512 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4513 ("vm_page_release: page %p is unmanaged", m)); 4514 4515 if ((flags & VPR_TRYFREE) != 0) { 4516 for (;;) { 4517 object = atomic_load_ptr(&m->object); 4518 if (object == NULL) 4519 break; 4520 /* Depends on type-stability. */ 4521 if (vm_page_busied(m) || !VM_OBJECT_TRYWLOCK(object)) 4522 break; 4523 if (object == m->object) { 4524 vm_page_release_locked(m, flags); 4525 VM_OBJECT_WUNLOCK(object); 4526 return; 4527 } 4528 VM_OBJECT_WUNLOCK(object); 4529 } 4530 } 4531 vm_page_unwire_managed(m, PQ_INACTIVE, flags != 0); 4532 } 4533 4534 /* See vm_page_release(). */ 4535 void 4536 vm_page_release_locked(vm_page_t m, int flags) 4537 { 4538 4539 VM_OBJECT_ASSERT_WLOCKED(m->object); 4540 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4541 ("vm_page_release_locked: page %p is unmanaged", m)); 4542 4543 if (vm_page_unwire_noq(m)) { 4544 if ((flags & VPR_TRYFREE) != 0 && 4545 (m->object->ref_count == 0 || !pmap_page_is_mapped(m)) && 4546 m->dirty == 0 && vm_page_tryxbusy(m)) { 4547 /* 4548 * An unlocked lookup may have wired the page before the 4549 * busy lock was acquired, in which case the page must 4550 * not be freed. 4551 */ 4552 if (__predict_true(!vm_page_wired(m))) { 4553 vm_page_free(m); 4554 return; 4555 } 4556 vm_page_xunbusy(m); 4557 } else { 4558 vm_page_release_toq(m, PQ_INACTIVE, flags != 0); 4559 } 4560 } 4561 } 4562 4563 static bool 4564 vm_page_try_blocked_op(vm_page_t m, void (*op)(vm_page_t)) 4565 { 4566 u_int old; 4567 4568 KASSERT(m->object != NULL && (m->oflags & VPO_UNMANAGED) == 0, 4569 ("vm_page_try_blocked_op: page %p has no object", m)); 4570 KASSERT(vm_page_busied(m), 4571 ("vm_page_try_blocked_op: page %p is not busy", m)); 4572 VM_OBJECT_ASSERT_LOCKED(m->object); 4573 4574 old = atomic_load_int(&m->ref_count); 4575 do { 4576 KASSERT(old != 0, 4577 ("vm_page_try_blocked_op: page %p has no references", m)); 4578 KASSERT((old & VPRC_BLOCKED) == 0, 4579 ("vm_page_try_blocked_op: page %p blocks wirings", m)); 4580 if (VPRC_WIRE_COUNT(old) != 0) 4581 return (false); 4582 } while (!atomic_fcmpset_int(&m->ref_count, &old, old | VPRC_BLOCKED)); 4583 4584 (op)(m); 4585 4586 /* 4587 * If the object is read-locked, new wirings may be created via an 4588 * object lookup. 4589 */ 4590 old = vm_page_drop(m, VPRC_BLOCKED); 4591 KASSERT(!VM_OBJECT_WOWNED(m->object) || 4592 old == (VPRC_BLOCKED | VPRC_OBJREF), 4593 ("vm_page_try_blocked_op: unexpected refcount value %u for %p", 4594 old, m)); 4595 return (true); 4596 } 4597 4598 /* 4599 * Atomically check for wirings and remove all mappings of the page. 4600 */ 4601 bool 4602 vm_page_try_remove_all(vm_page_t m) 4603 { 4604 4605 return (vm_page_try_blocked_op(m, pmap_remove_all)); 4606 } 4607 4608 /* 4609 * Atomically check for wirings and remove all writeable mappings of the page. 4610 */ 4611 bool 4612 vm_page_try_remove_write(vm_page_t m) 4613 { 4614 4615 return (vm_page_try_blocked_op(m, pmap_remove_write)); 4616 } 4617 4618 /* 4619 * vm_page_advise 4620 * 4621 * Apply the specified advice to the given page. 4622 */ 4623 void 4624 vm_page_advise(vm_page_t m, int advice) 4625 { 4626 4627 VM_OBJECT_ASSERT_WLOCKED(m->object); 4628 vm_page_assert_xbusied(m); 4629 4630 if (advice == MADV_FREE) 4631 /* 4632 * Mark the page clean. This will allow the page to be freed 4633 * without first paging it out. MADV_FREE pages are often 4634 * quickly reused by malloc(3), so we do not do anything that 4635 * would result in a page fault on a later access. 4636 */ 4637 vm_page_undirty(m); 4638 else if (advice != MADV_DONTNEED) { 4639 if (advice == MADV_WILLNEED) 4640 vm_page_activate(m); 4641 return; 4642 } 4643 4644 if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m)) 4645 vm_page_dirty(m); 4646 4647 /* 4648 * Clear any references to the page. Otherwise, the page daemon will 4649 * immediately reactivate the page. 4650 */ 4651 vm_page_aflag_clear(m, PGA_REFERENCED); 4652 4653 /* 4654 * Place clean pages near the head of the inactive queue rather than 4655 * the tail, thus defeating the queue's LRU operation and ensuring that 4656 * the page will be reused quickly. Dirty pages not already in the 4657 * laundry are moved there. 4658 */ 4659 if (m->dirty == 0) 4660 vm_page_deactivate_noreuse(m); 4661 else if (!vm_page_in_laundry(m)) 4662 vm_page_launder(m); 4663 } 4664 4665 /* 4666 * vm_page_grab_release 4667 * 4668 * Helper routine for grab functions to release busy on return. 4669 */ 4670 static inline void 4671 vm_page_grab_release(vm_page_t m, int allocflags) 4672 { 4673 4674 if ((allocflags & VM_ALLOC_NOBUSY) != 0) { 4675 if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0) 4676 vm_page_sunbusy(m); 4677 else 4678 vm_page_xunbusy(m); 4679 } 4680 } 4681 4682 /* 4683 * vm_page_grab_sleep 4684 * 4685 * Sleep for busy according to VM_ALLOC_ parameters. Returns true 4686 * if the caller should retry and false otherwise. 4687 * 4688 * If the object is locked on entry the object will be unlocked with 4689 * false returns and still locked but possibly having been dropped 4690 * with true returns. 4691 */ 4692 static bool 4693 vm_page_grab_sleep(vm_object_t object, vm_page_t m, vm_pindex_t pindex, 4694 const char *wmesg, int allocflags, bool locked) 4695 { 4696 4697 if ((allocflags & VM_ALLOC_NOWAIT) != 0) 4698 return (false); 4699 4700 /* 4701 * Reference the page before unlocking and sleeping so that 4702 * the page daemon is less likely to reclaim it. 4703 */ 4704 if (locked && (allocflags & VM_ALLOC_NOCREAT) == 0) 4705 vm_page_reference(m); 4706 4707 if (_vm_page_busy_sleep(object, m, pindex, wmesg, allocflags, locked) && 4708 locked) 4709 VM_OBJECT_WLOCK(object); 4710 if ((allocflags & VM_ALLOC_WAITFAIL) != 0) 4711 return (false); 4712 4713 return (true); 4714 } 4715 4716 /* 4717 * Assert that the grab flags are valid. 4718 */ 4719 static inline void 4720 vm_page_grab_check(int allocflags) 4721 { 4722 4723 KASSERT((allocflags & VM_ALLOC_NOBUSY) == 0 || 4724 (allocflags & VM_ALLOC_WIRED) != 0, 4725 ("vm_page_grab*: the pages must be busied or wired")); 4726 4727 KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || 4728 (allocflags & VM_ALLOC_IGN_SBUSY) != 0, 4729 ("vm_page_grab*: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch")); 4730 } 4731 4732 /* 4733 * Calculate the page allocation flags for grab. 4734 */ 4735 static inline int 4736 vm_page_grab_pflags(int allocflags) 4737 { 4738 int pflags; 4739 4740 pflags = allocflags & 4741 ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL | 4742 VM_ALLOC_NOBUSY | VM_ALLOC_IGN_SBUSY); 4743 if ((allocflags & VM_ALLOC_NOWAIT) == 0) 4744 pflags |= VM_ALLOC_WAITFAIL; 4745 if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0) 4746 pflags |= VM_ALLOC_SBUSY; 4747 4748 return (pflags); 4749 } 4750 4751 /* 4752 * Grab a page, waiting until we are waken up due to the page 4753 * changing state. We keep on waiting, if the page continues 4754 * to be in the object. If the page doesn't exist, first allocate it 4755 * and then conditionally zero it. 4756 * 4757 * This routine may sleep. 4758 * 4759 * The object must be locked on entry. The lock will, however, be released 4760 * and reacquired if the routine sleeps. 4761 */ 4762 vm_page_t 4763 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) 4764 { 4765 vm_page_t m; 4766 4767 VM_OBJECT_ASSERT_WLOCKED(object); 4768 vm_page_grab_check(allocflags); 4769 4770 retrylookup: 4771 if ((m = vm_page_lookup(object, pindex)) != NULL) { 4772 if (!vm_page_tryacquire(m, allocflags)) { 4773 if (vm_page_grab_sleep(object, m, pindex, "pgrbwt", 4774 allocflags, true)) 4775 goto retrylookup; 4776 return (NULL); 4777 } 4778 goto out; 4779 } 4780 if ((allocflags & VM_ALLOC_NOCREAT) != 0) 4781 return (NULL); 4782 m = vm_page_alloc(object, pindex, vm_page_grab_pflags(allocflags)); 4783 if (m == NULL) { 4784 if ((allocflags & (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL)) != 0) 4785 return (NULL); 4786 goto retrylookup; 4787 } 4788 if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0) 4789 pmap_zero_page(m); 4790 4791 out: 4792 vm_page_grab_release(m, allocflags); 4793 4794 return (m); 4795 } 4796 4797 /* 4798 * Locklessly attempt to acquire a page given a (object, pindex) tuple 4799 * and an optional previous page to avoid the radix lookup. The resulting 4800 * page will be validated against the identity tuple and busied or wired 4801 * as requested. A NULL *mp return guarantees that the page was not in 4802 * radix at the time of the call but callers must perform higher level 4803 * synchronization or retry the operation under a lock if they require 4804 * an atomic answer. This is the only lock free validation routine, 4805 * other routines can depend on the resulting page state. 4806 * 4807 * The return value indicates whether the operation failed due to caller 4808 * flags. The return is tri-state with mp: 4809 * 4810 * (true, *mp != NULL) - The operation was successful. 4811 * (true, *mp == NULL) - The page was not found in tree. 4812 * (false, *mp == NULL) - WAITFAIL or NOWAIT prevented acquisition. 4813 */ 4814 static bool 4815 vm_page_acquire_unlocked(vm_object_t object, vm_pindex_t pindex, 4816 vm_page_t prev, vm_page_t *mp, int allocflags) 4817 { 4818 vm_page_t m; 4819 4820 vm_page_grab_check(allocflags); 4821 MPASS(prev == NULL || vm_page_busied(prev) || vm_page_wired(prev)); 4822 4823 *mp = NULL; 4824 for (;;) { 4825 /* 4826 * We may see a false NULL here because the previous page 4827 * has been removed or just inserted and the list is loaded 4828 * without barriers. Switch to radix to verify. 4829 */ 4830 if (prev == NULL || (m = TAILQ_NEXT(prev, listq)) == NULL || 4831 QMD_IS_TRASHED(m) || m->pindex != pindex || 4832 atomic_load_ptr(&m->object) != object) { 4833 prev = NULL; 4834 /* 4835 * This guarantees the result is instantaneously 4836 * correct. 4837 */ 4838 m = vm_radix_lookup_unlocked(&object->rtree, pindex); 4839 } 4840 if (m == NULL) 4841 return (true); 4842 if (vm_page_trybusy(m, allocflags)) { 4843 if (m->object == object && m->pindex == pindex) 4844 break; 4845 /* relookup. */ 4846 vm_page_busy_release(m); 4847 cpu_spinwait(); 4848 continue; 4849 } 4850 if (!vm_page_grab_sleep(object, m, pindex, "pgnslp", 4851 allocflags, false)) 4852 return (false); 4853 } 4854 if ((allocflags & VM_ALLOC_WIRED) != 0) 4855 vm_page_wire(m); 4856 vm_page_grab_release(m, allocflags); 4857 *mp = m; 4858 return (true); 4859 } 4860 4861 /* 4862 * Try to locklessly grab a page and fall back to the object lock if NOCREAT 4863 * is not set. 4864 */ 4865 vm_page_t 4866 vm_page_grab_unlocked(vm_object_t object, vm_pindex_t pindex, int allocflags) 4867 { 4868 vm_page_t m; 4869 4870 vm_page_grab_check(allocflags); 4871 4872 if (!vm_page_acquire_unlocked(object, pindex, NULL, &m, allocflags)) 4873 return (NULL); 4874 if (m != NULL) 4875 return (m); 4876 4877 /* 4878 * The radix lockless lookup should never return a false negative 4879 * errors. If the user specifies NOCREAT they are guaranteed there 4880 * was no page present at the instant of the call. A NOCREAT caller 4881 * must handle create races gracefully. 4882 */ 4883 if ((allocflags & VM_ALLOC_NOCREAT) != 0) 4884 return (NULL); 4885 4886 VM_OBJECT_WLOCK(object); 4887 m = vm_page_grab(object, pindex, allocflags); 4888 VM_OBJECT_WUNLOCK(object); 4889 4890 return (m); 4891 } 4892 4893 /* 4894 * Grab a page and make it valid, paging in if necessary. Pages missing from 4895 * their pager are zero filled and validated. If a VM_ALLOC_COUNT is supplied 4896 * and the page is not valid as many as VM_INITIAL_PAGEIN pages can be brought 4897 * in simultaneously. Additional pages will be left on a paging queue but 4898 * will neither be wired nor busy regardless of allocflags. 4899 */ 4900 int 4901 vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, int allocflags) 4902 { 4903 vm_page_t m; 4904 vm_page_t ma[VM_INITIAL_PAGEIN]; 4905 int after, i, pflags, rv; 4906 4907 KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || 4908 (allocflags & VM_ALLOC_IGN_SBUSY) != 0, 4909 ("vm_page_grab_valid: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch")); 4910 KASSERT((allocflags & 4911 (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL | VM_ALLOC_ZERO)) == 0, 4912 ("vm_page_grab_valid: Invalid flags 0x%X", allocflags)); 4913 VM_OBJECT_ASSERT_WLOCKED(object); 4914 pflags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY | 4915 VM_ALLOC_WIRED | VM_ALLOC_IGN_SBUSY); 4916 pflags |= VM_ALLOC_WAITFAIL; 4917 4918 retrylookup: 4919 if ((m = vm_page_lookup(object, pindex)) != NULL) { 4920 /* 4921 * If the page is fully valid it can only become invalid 4922 * with the object lock held. If it is not valid it can 4923 * become valid with the busy lock held. Therefore, we 4924 * may unnecessarily lock the exclusive busy here if we 4925 * race with I/O completion not using the object lock. 4926 * However, we will not end up with an invalid page and a 4927 * shared lock. 4928 */ 4929 if (!vm_page_trybusy(m, 4930 vm_page_all_valid(m) ? allocflags : 0)) { 4931 (void)vm_page_grab_sleep(object, m, pindex, "pgrbwt", 4932 allocflags, true); 4933 goto retrylookup; 4934 } 4935 if (vm_page_all_valid(m)) 4936 goto out; 4937 if ((allocflags & VM_ALLOC_NOCREAT) != 0) { 4938 vm_page_busy_release(m); 4939 *mp = NULL; 4940 return (VM_PAGER_FAIL); 4941 } 4942 } else if ((allocflags & VM_ALLOC_NOCREAT) != 0) { 4943 *mp = NULL; 4944 return (VM_PAGER_FAIL); 4945 } else if ((m = vm_page_alloc(object, pindex, pflags)) == NULL) { 4946 if (!vm_pager_can_alloc_page(object, pindex)) { 4947 *mp = NULL; 4948 return (VM_PAGER_AGAIN); 4949 } 4950 goto retrylookup; 4951 } 4952 4953 vm_page_assert_xbusied(m); 4954 if (vm_pager_has_page(object, pindex, NULL, &after)) { 4955 after = MIN(after, VM_INITIAL_PAGEIN); 4956 after = MIN(after, allocflags >> VM_ALLOC_COUNT_SHIFT); 4957 after = MAX(after, 1); 4958 ma[0] = m; 4959 for (i = 1; i < after; i++) { 4960 if ((ma[i] = vm_page_next(ma[i - 1])) != NULL) { 4961 if (vm_page_any_valid(ma[i]) || 4962 !vm_page_tryxbusy(ma[i])) 4963 break; 4964 } else { 4965 ma[i] = vm_page_alloc(object, m->pindex + i, 4966 VM_ALLOC_NORMAL); 4967 if (ma[i] == NULL) 4968 break; 4969 } 4970 } 4971 after = i; 4972 vm_object_pip_add(object, after); 4973 VM_OBJECT_WUNLOCK(object); 4974 rv = vm_pager_get_pages(object, ma, after, NULL, NULL); 4975 VM_OBJECT_WLOCK(object); 4976 vm_object_pip_wakeupn(object, after); 4977 /* Pager may have replaced a page. */ 4978 m = ma[0]; 4979 if (rv != VM_PAGER_OK) { 4980 for (i = 0; i < after; i++) { 4981 if (!vm_page_wired(ma[i])) 4982 vm_page_free(ma[i]); 4983 else 4984 vm_page_xunbusy(ma[i]); 4985 } 4986 *mp = NULL; 4987 return (rv); 4988 } 4989 for (i = 1; i < after; i++) 4990 vm_page_readahead_finish(ma[i]); 4991 MPASS(vm_page_all_valid(m)); 4992 } else { 4993 vm_page_zero_invalid(m, TRUE); 4994 } 4995 out: 4996 if ((allocflags & VM_ALLOC_WIRED) != 0) 4997 vm_page_wire(m); 4998 if ((allocflags & VM_ALLOC_SBUSY) != 0 && vm_page_xbusied(m)) 4999 vm_page_busy_downgrade(m); 5000 else if ((allocflags & VM_ALLOC_NOBUSY) != 0) 5001 vm_page_busy_release(m); 5002 *mp = m; 5003 return (VM_PAGER_OK); 5004 } 5005 5006 /* 5007 * Locklessly grab a valid page. If the page is not valid or not yet 5008 * allocated this will fall back to the object lock method. 5009 */ 5010 int 5011 vm_page_grab_valid_unlocked(vm_page_t *mp, vm_object_t object, 5012 vm_pindex_t pindex, int allocflags) 5013 { 5014 vm_page_t m; 5015 int flags; 5016 int error; 5017 5018 KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || 5019 (allocflags & VM_ALLOC_IGN_SBUSY) != 0, 5020 ("vm_page_grab_valid_unlocked: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY " 5021 "mismatch")); 5022 KASSERT((allocflags & 5023 (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL | VM_ALLOC_ZERO)) == 0, 5024 ("vm_page_grab_valid_unlocked: Invalid flags 0x%X", allocflags)); 5025 5026 /* 5027 * Attempt a lockless lookup and busy. We need at least an sbusy 5028 * before we can inspect the valid field and return a wired page. 5029 */ 5030 flags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_WIRED); 5031 if (!vm_page_acquire_unlocked(object, pindex, NULL, mp, flags)) 5032 return (VM_PAGER_FAIL); 5033 if ((m = *mp) != NULL) { 5034 if (vm_page_all_valid(m)) { 5035 if ((allocflags & VM_ALLOC_WIRED) != 0) 5036 vm_page_wire(m); 5037 vm_page_grab_release(m, allocflags); 5038 return (VM_PAGER_OK); 5039 } 5040 vm_page_busy_release(m); 5041 } 5042 if ((allocflags & VM_ALLOC_NOCREAT) != 0) { 5043 *mp = NULL; 5044 return (VM_PAGER_FAIL); 5045 } 5046 VM_OBJECT_WLOCK(object); 5047 error = vm_page_grab_valid(mp, object, pindex, allocflags); 5048 VM_OBJECT_WUNLOCK(object); 5049 5050 return (error); 5051 } 5052 5053 /* 5054 * Return the specified range of pages from the given object. For each 5055 * page offset within the range, if a page already exists within the object 5056 * at that offset and it is busy, then wait for it to change state. If, 5057 * instead, the page doesn't exist, then allocate it. 5058 * 5059 * The caller must always specify an allocation class. 5060 * 5061 * allocation classes: 5062 * VM_ALLOC_NORMAL normal process request 5063 * VM_ALLOC_SYSTEM system *really* needs the pages 5064 * 5065 * The caller must always specify that the pages are to be busied and/or 5066 * wired. 5067 * 5068 * optional allocation flags: 5069 * VM_ALLOC_IGN_SBUSY do not sleep on soft busy pages 5070 * VM_ALLOC_NOBUSY do not exclusive busy the page 5071 * VM_ALLOC_NOWAIT do not sleep 5072 * VM_ALLOC_SBUSY set page to sbusy state 5073 * VM_ALLOC_WIRED wire the pages 5074 * VM_ALLOC_ZERO zero and validate any invalid pages 5075 * 5076 * If VM_ALLOC_NOWAIT is not specified, this routine may sleep. Otherwise, it 5077 * may return a partial prefix of the requested range. 5078 */ 5079 int 5080 vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags, 5081 vm_page_t *ma, int count) 5082 { 5083 vm_page_t m, mpred; 5084 int pflags; 5085 int i; 5086 5087 VM_OBJECT_ASSERT_WLOCKED(object); 5088 KASSERT(((u_int)allocflags >> VM_ALLOC_COUNT_SHIFT) == 0, 5089 ("vm_page_grap_pages: VM_ALLOC_COUNT() is not allowed")); 5090 KASSERT(count > 0, 5091 ("vm_page_grab_pages: invalid page count %d", count)); 5092 vm_page_grab_check(allocflags); 5093 5094 pflags = vm_page_grab_pflags(allocflags); 5095 i = 0; 5096 retrylookup: 5097 m = vm_radix_lookup_le(&object->rtree, pindex + i); 5098 if (m == NULL || m->pindex != pindex + i) { 5099 mpred = m; 5100 m = NULL; 5101 } else 5102 mpred = TAILQ_PREV(m, pglist, listq); 5103 for (; i < count; i++) { 5104 if (m != NULL) { 5105 if (!vm_page_tryacquire(m, allocflags)) { 5106 if (vm_page_grab_sleep(object, m, pindex + i, 5107 "grbmaw", allocflags, true)) 5108 goto retrylookup; 5109 break; 5110 } 5111 } else { 5112 if ((allocflags & VM_ALLOC_NOCREAT) != 0) 5113 break; 5114 m = vm_page_alloc_after(object, pindex + i, 5115 pflags | VM_ALLOC_COUNT(count - i), mpred); 5116 if (m == NULL) { 5117 if ((allocflags & (VM_ALLOC_NOWAIT | 5118 VM_ALLOC_WAITFAIL)) != 0) 5119 break; 5120 goto retrylookup; 5121 } 5122 } 5123 if (vm_page_none_valid(m) && 5124 (allocflags & VM_ALLOC_ZERO) != 0) { 5125 if ((m->flags & PG_ZERO) == 0) 5126 pmap_zero_page(m); 5127 vm_page_valid(m); 5128 } 5129 vm_page_grab_release(m, allocflags); 5130 ma[i] = mpred = m; 5131 m = vm_page_next(m); 5132 } 5133 return (i); 5134 } 5135 5136 /* 5137 * Unlocked variant of vm_page_grab_pages(). This accepts the same flags 5138 * and will fall back to the locked variant to handle allocation. 5139 */ 5140 int 5141 vm_page_grab_pages_unlocked(vm_object_t object, vm_pindex_t pindex, 5142 int allocflags, vm_page_t *ma, int count) 5143 { 5144 vm_page_t m, pred; 5145 int flags; 5146 int i; 5147 5148 KASSERT(count > 0, 5149 ("vm_page_grab_pages_unlocked: invalid page count %d", count)); 5150 vm_page_grab_check(allocflags); 5151 5152 /* 5153 * Modify flags for lockless acquire to hold the page until we 5154 * set it valid if necessary. 5155 */ 5156 flags = allocflags & ~VM_ALLOC_NOBUSY; 5157 pred = NULL; 5158 for (i = 0; i < count; i++, pindex++) { 5159 if (!vm_page_acquire_unlocked(object, pindex, pred, &m, flags)) 5160 return (i); 5161 if (m == NULL) 5162 break; 5163 if ((flags & VM_ALLOC_ZERO) != 0 && vm_page_none_valid(m)) { 5164 if ((m->flags & PG_ZERO) == 0) 5165 pmap_zero_page(m); 5166 vm_page_valid(m); 5167 } 5168 /* m will still be wired or busy according to flags. */ 5169 vm_page_grab_release(m, allocflags); 5170 pred = ma[i] = m; 5171 } 5172 if (i == count || (allocflags & VM_ALLOC_NOCREAT) != 0) 5173 return (i); 5174 count -= i; 5175 VM_OBJECT_WLOCK(object); 5176 i += vm_page_grab_pages(object, pindex, allocflags, &ma[i], count); 5177 VM_OBJECT_WUNLOCK(object); 5178 5179 return (i); 5180 } 5181 5182 /* 5183 * Mapping function for valid or dirty bits in a page. 5184 * 5185 * Inputs are required to range within a page. 5186 */ 5187 vm_page_bits_t 5188 vm_page_bits(int base, int size) 5189 { 5190 int first_bit; 5191 int last_bit; 5192 5193 KASSERT( 5194 base + size <= PAGE_SIZE, 5195 ("vm_page_bits: illegal base/size %d/%d", base, size) 5196 ); 5197 5198 if (size == 0) /* handle degenerate case */ 5199 return (0); 5200 5201 first_bit = base >> DEV_BSHIFT; 5202 last_bit = (base + size - 1) >> DEV_BSHIFT; 5203 5204 return (((vm_page_bits_t)2 << last_bit) - 5205 ((vm_page_bits_t)1 << first_bit)); 5206 } 5207 5208 void 5209 vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set) 5210 { 5211 5212 #if PAGE_SIZE == 32768 5213 atomic_set_64((uint64_t *)bits, set); 5214 #elif PAGE_SIZE == 16384 5215 atomic_set_32((uint32_t *)bits, set); 5216 #elif (PAGE_SIZE == 8192) && defined(atomic_set_16) 5217 atomic_set_16((uint16_t *)bits, set); 5218 #elif (PAGE_SIZE == 4096) && defined(atomic_set_8) 5219 atomic_set_8((uint8_t *)bits, set); 5220 #else /* PAGE_SIZE <= 8192 */ 5221 uintptr_t addr; 5222 int shift; 5223 5224 addr = (uintptr_t)bits; 5225 /* 5226 * Use a trick to perform a 32-bit atomic on the 5227 * containing aligned word, to not depend on the existence 5228 * of atomic_{set, clear}_{8, 16}. 5229 */ 5230 shift = addr & (sizeof(uint32_t) - 1); 5231 #if BYTE_ORDER == BIG_ENDIAN 5232 shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY; 5233 #else 5234 shift *= NBBY; 5235 #endif 5236 addr &= ~(sizeof(uint32_t) - 1); 5237 atomic_set_32((uint32_t *)addr, set << shift); 5238 #endif /* PAGE_SIZE */ 5239 } 5240 5241 static inline void 5242 vm_page_bits_clear(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t clear) 5243 { 5244 5245 #if PAGE_SIZE == 32768 5246 atomic_clear_64((uint64_t *)bits, clear); 5247 #elif PAGE_SIZE == 16384 5248 atomic_clear_32((uint32_t *)bits, clear); 5249 #elif (PAGE_SIZE == 8192) && defined(atomic_clear_16) 5250 atomic_clear_16((uint16_t *)bits, clear); 5251 #elif (PAGE_SIZE == 4096) && defined(atomic_clear_8) 5252 atomic_clear_8((uint8_t *)bits, clear); 5253 #else /* PAGE_SIZE <= 8192 */ 5254 uintptr_t addr; 5255 int shift; 5256 5257 addr = (uintptr_t)bits; 5258 /* 5259 * Use a trick to perform a 32-bit atomic on the 5260 * containing aligned word, to not depend on the existence 5261 * of atomic_{set, clear}_{8, 16}. 5262 */ 5263 shift = addr & (sizeof(uint32_t) - 1); 5264 #if BYTE_ORDER == BIG_ENDIAN 5265 shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY; 5266 #else 5267 shift *= NBBY; 5268 #endif 5269 addr &= ~(sizeof(uint32_t) - 1); 5270 atomic_clear_32((uint32_t *)addr, clear << shift); 5271 #endif /* PAGE_SIZE */ 5272 } 5273 5274 static inline vm_page_bits_t 5275 vm_page_bits_swap(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t newbits) 5276 { 5277 #if PAGE_SIZE == 32768 5278 uint64_t old; 5279 5280 old = *bits; 5281 while (atomic_fcmpset_64(bits, &old, newbits) == 0); 5282 return (old); 5283 #elif PAGE_SIZE == 16384 5284 uint32_t old; 5285 5286 old = *bits; 5287 while (atomic_fcmpset_32(bits, &old, newbits) == 0); 5288 return (old); 5289 #elif (PAGE_SIZE == 8192) && defined(atomic_fcmpset_16) 5290 uint16_t old; 5291 5292 old = *bits; 5293 while (atomic_fcmpset_16(bits, &old, newbits) == 0); 5294 return (old); 5295 #elif (PAGE_SIZE == 4096) && defined(atomic_fcmpset_8) 5296 uint8_t old; 5297 5298 old = *bits; 5299 while (atomic_fcmpset_8(bits, &old, newbits) == 0); 5300 return (old); 5301 #else /* PAGE_SIZE <= 4096*/ 5302 uintptr_t addr; 5303 uint32_t old, new, mask; 5304 int shift; 5305 5306 addr = (uintptr_t)bits; 5307 /* 5308 * Use a trick to perform a 32-bit atomic on the 5309 * containing aligned word, to not depend on the existence 5310 * of atomic_{set, swap, clear}_{8, 16}. 5311 */ 5312 shift = addr & (sizeof(uint32_t) - 1); 5313 #if BYTE_ORDER == BIG_ENDIAN 5314 shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY; 5315 #else 5316 shift *= NBBY; 5317 #endif 5318 addr &= ~(sizeof(uint32_t) - 1); 5319 mask = VM_PAGE_BITS_ALL << shift; 5320 5321 old = *bits; 5322 do { 5323 new = old & ~mask; 5324 new |= newbits << shift; 5325 } while (atomic_fcmpset_32((uint32_t *)addr, &old, new) == 0); 5326 return (old >> shift); 5327 #endif /* PAGE_SIZE */ 5328 } 5329 5330 /* 5331 * vm_page_set_valid_range: 5332 * 5333 * Sets portions of a page valid. The arguments are expected 5334 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 5335 * of any partial chunks touched by the range. The invalid portion of 5336 * such chunks will be zeroed. 5337 * 5338 * (base + size) must be less then or equal to PAGE_SIZE. 5339 */ 5340 void 5341 vm_page_set_valid_range(vm_page_t m, int base, int size) 5342 { 5343 int endoff, frag; 5344 vm_page_bits_t pagebits; 5345 5346 vm_page_assert_busied(m); 5347 if (size == 0) /* handle degenerate case */ 5348 return; 5349 5350 /* 5351 * If the base is not DEV_BSIZE aligned and the valid 5352 * bit is clear, we have to zero out a portion of the 5353 * first block. 5354 */ 5355 if ((frag = rounddown2(base, DEV_BSIZE)) != base && 5356 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0) 5357 pmap_zero_page_area(m, frag, base - frag); 5358 5359 /* 5360 * If the ending offset is not DEV_BSIZE aligned and the 5361 * valid bit is clear, we have to zero out a portion of 5362 * the last block. 5363 */ 5364 endoff = base + size; 5365 if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff && 5366 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0) 5367 pmap_zero_page_area(m, endoff, 5368 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 5369 5370 /* 5371 * Assert that no previously invalid block that is now being validated 5372 * is already dirty. 5373 */ 5374 KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0, 5375 ("vm_page_set_valid_range: page %p is dirty", m)); 5376 5377 /* 5378 * Set valid bits inclusive of any overlap. 5379 */ 5380 pagebits = vm_page_bits(base, size); 5381 if (vm_page_xbusied(m)) 5382 m->valid |= pagebits; 5383 else 5384 vm_page_bits_set(m, &m->valid, pagebits); 5385 } 5386 5387 /* 5388 * Set the page dirty bits and free the invalid swap space if 5389 * present. Returns the previous dirty bits. 5390 */ 5391 vm_page_bits_t 5392 vm_page_set_dirty(vm_page_t m) 5393 { 5394 vm_page_bits_t old; 5395 5396 VM_PAGE_OBJECT_BUSY_ASSERT(m); 5397 5398 if (vm_page_xbusied(m) && !pmap_page_is_write_mapped(m)) { 5399 old = m->dirty; 5400 m->dirty = VM_PAGE_BITS_ALL; 5401 } else 5402 old = vm_page_bits_swap(m, &m->dirty, VM_PAGE_BITS_ALL); 5403 if (old == 0 && (m->a.flags & PGA_SWAP_SPACE) != 0) 5404 vm_pager_page_unswapped(m); 5405 5406 return (old); 5407 } 5408 5409 /* 5410 * Clear the given bits from the specified page's dirty field. 5411 */ 5412 static __inline void 5413 vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits) 5414 { 5415 5416 vm_page_assert_busied(m); 5417 5418 /* 5419 * If the page is xbusied and not write mapped we are the 5420 * only thread that can modify dirty bits. Otherwise, The pmap 5421 * layer can call vm_page_dirty() without holding a distinguished 5422 * lock. The combination of page busy and atomic operations 5423 * suffice to guarantee consistency of the page dirty field. 5424 */ 5425 if (vm_page_xbusied(m) && !pmap_page_is_write_mapped(m)) 5426 m->dirty &= ~pagebits; 5427 else 5428 vm_page_bits_clear(m, &m->dirty, pagebits); 5429 } 5430 5431 /* 5432 * vm_page_set_validclean: 5433 * 5434 * Sets portions of a page valid and clean. The arguments are expected 5435 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 5436 * of any partial chunks touched by the range. The invalid portion of 5437 * such chunks will be zero'd. 5438 * 5439 * (base + size) must be less then or equal to PAGE_SIZE. 5440 */ 5441 void 5442 vm_page_set_validclean(vm_page_t m, int base, int size) 5443 { 5444 vm_page_bits_t oldvalid, pagebits; 5445 int endoff, frag; 5446 5447 vm_page_assert_busied(m); 5448 if (size == 0) /* handle degenerate case */ 5449 return; 5450 5451 /* 5452 * If the base is not DEV_BSIZE aligned and the valid 5453 * bit is clear, we have to zero out a portion of the 5454 * first block. 5455 */ 5456 if ((frag = rounddown2(base, DEV_BSIZE)) != base && 5457 (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0) 5458 pmap_zero_page_area(m, frag, base - frag); 5459 5460 /* 5461 * If the ending offset is not DEV_BSIZE aligned and the 5462 * valid bit is clear, we have to zero out a portion of 5463 * the last block. 5464 */ 5465 endoff = base + size; 5466 if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff && 5467 (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0) 5468 pmap_zero_page_area(m, endoff, 5469 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 5470 5471 /* 5472 * Set valid, clear dirty bits. If validating the entire 5473 * page we can safely clear the pmap modify bit. We also 5474 * use this opportunity to clear the PGA_NOSYNC flag. If a process 5475 * takes a write fault on a MAP_NOSYNC memory area the flag will 5476 * be set again. 5477 * 5478 * We set valid bits inclusive of any overlap, but we can only 5479 * clear dirty bits for DEV_BSIZE chunks that are fully within 5480 * the range. 5481 */ 5482 oldvalid = m->valid; 5483 pagebits = vm_page_bits(base, size); 5484 if (vm_page_xbusied(m)) 5485 m->valid |= pagebits; 5486 else 5487 vm_page_bits_set(m, &m->valid, pagebits); 5488 #if 0 /* NOT YET */ 5489 if ((frag = base & (DEV_BSIZE - 1)) != 0) { 5490 frag = DEV_BSIZE - frag; 5491 base += frag; 5492 size -= frag; 5493 if (size < 0) 5494 size = 0; 5495 } 5496 pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1)); 5497 #endif 5498 if (base == 0 && size == PAGE_SIZE) { 5499 /* 5500 * The page can only be modified within the pmap if it is 5501 * mapped, and it can only be mapped if it was previously 5502 * fully valid. 5503 */ 5504 if (oldvalid == VM_PAGE_BITS_ALL) 5505 /* 5506 * Perform the pmap_clear_modify() first. Otherwise, 5507 * a concurrent pmap operation, such as 5508 * pmap_protect(), could clear a modification in the 5509 * pmap and set the dirty field on the page before 5510 * pmap_clear_modify() had begun and after the dirty 5511 * field was cleared here. 5512 */ 5513 pmap_clear_modify(m); 5514 m->dirty = 0; 5515 vm_page_aflag_clear(m, PGA_NOSYNC); 5516 } else if (oldvalid != VM_PAGE_BITS_ALL && vm_page_xbusied(m)) 5517 m->dirty &= ~pagebits; 5518 else 5519 vm_page_clear_dirty_mask(m, pagebits); 5520 } 5521 5522 void 5523 vm_page_clear_dirty(vm_page_t m, int base, int size) 5524 { 5525 5526 vm_page_clear_dirty_mask(m, vm_page_bits(base, size)); 5527 } 5528 5529 /* 5530 * vm_page_set_invalid: 5531 * 5532 * Invalidates DEV_BSIZE'd chunks within a page. Both the 5533 * valid and dirty bits for the effected areas are cleared. 5534 */ 5535 void 5536 vm_page_set_invalid(vm_page_t m, int base, int size) 5537 { 5538 vm_page_bits_t bits; 5539 vm_object_t object; 5540 5541 /* 5542 * The object lock is required so that pages can't be mapped 5543 * read-only while we're in the process of invalidating them. 5544 */ 5545 object = m->object; 5546 VM_OBJECT_ASSERT_WLOCKED(object); 5547 vm_page_assert_busied(m); 5548 5549 if (object->type == OBJT_VNODE && base == 0 && IDX_TO_OFF(m->pindex) + 5550 size >= object->un_pager.vnp.vnp_size) 5551 bits = VM_PAGE_BITS_ALL; 5552 else 5553 bits = vm_page_bits(base, size); 5554 if (object->ref_count != 0 && vm_page_all_valid(m) && bits != 0) 5555 pmap_remove_all(m); 5556 KASSERT((bits == 0 && vm_page_all_valid(m)) || 5557 !pmap_page_is_mapped(m), 5558 ("vm_page_set_invalid: page %p is mapped", m)); 5559 if (vm_page_xbusied(m)) { 5560 m->valid &= ~bits; 5561 m->dirty &= ~bits; 5562 } else { 5563 vm_page_bits_clear(m, &m->valid, bits); 5564 vm_page_bits_clear(m, &m->dirty, bits); 5565 } 5566 } 5567 5568 /* 5569 * vm_page_invalid: 5570 * 5571 * Invalidates the entire page. The page must be busy, unmapped, and 5572 * the enclosing object must be locked. The object locks protects 5573 * against concurrent read-only pmap enter which is done without 5574 * busy. 5575 */ 5576 void 5577 vm_page_invalid(vm_page_t m) 5578 { 5579 5580 vm_page_assert_busied(m); 5581 VM_OBJECT_ASSERT_WLOCKED(m->object); 5582 MPASS(!pmap_page_is_mapped(m)); 5583 5584 if (vm_page_xbusied(m)) 5585 m->valid = 0; 5586 else 5587 vm_page_bits_clear(m, &m->valid, VM_PAGE_BITS_ALL); 5588 } 5589 5590 /* 5591 * vm_page_zero_invalid() 5592 * 5593 * The kernel assumes that the invalid portions of a page contain 5594 * garbage, but such pages can be mapped into memory by user code. 5595 * When this occurs, we must zero out the non-valid portions of the 5596 * page so user code sees what it expects. 5597 * 5598 * Pages are most often semi-valid when the end of a file is mapped 5599 * into memory and the file's size is not page aligned. 5600 */ 5601 void 5602 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) 5603 { 5604 int b; 5605 int i; 5606 5607 /* 5608 * Scan the valid bits looking for invalid sections that 5609 * must be zeroed. Invalid sub-DEV_BSIZE'd areas ( where the 5610 * valid bit may be set ) have already been zeroed by 5611 * vm_page_set_validclean(). 5612 */ 5613 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { 5614 if (i == (PAGE_SIZE / DEV_BSIZE) || 5615 (m->valid & ((vm_page_bits_t)1 << i))) { 5616 if (i > b) { 5617 pmap_zero_page_area(m, 5618 b << DEV_BSHIFT, (i - b) << DEV_BSHIFT); 5619 } 5620 b = i + 1; 5621 } 5622 } 5623 5624 /* 5625 * setvalid is TRUE when we can safely set the zero'd areas 5626 * as being valid. We can do this if there are no cache consistency 5627 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. 5628 */ 5629 if (setvalid) 5630 vm_page_valid(m); 5631 } 5632 5633 /* 5634 * vm_page_is_valid: 5635 * 5636 * Is (partial) page valid? Note that the case where size == 0 5637 * will return FALSE in the degenerate case where the page is 5638 * entirely invalid, and TRUE otherwise. 5639 * 5640 * Some callers envoke this routine without the busy lock held and 5641 * handle races via higher level locks. Typical callers should 5642 * hold a busy lock to prevent invalidation. 5643 */ 5644 int 5645 vm_page_is_valid(vm_page_t m, int base, int size) 5646 { 5647 vm_page_bits_t bits; 5648 5649 bits = vm_page_bits(base, size); 5650 return (vm_page_any_valid(m) && (m->valid & bits) == bits); 5651 } 5652 5653 /* 5654 * Returns true if all of the specified predicates are true for the entire 5655 * (super)page and false otherwise. 5656 */ 5657 bool 5658 vm_page_ps_test(vm_page_t m, int psind, int flags, vm_page_t skip_m) 5659 { 5660 vm_object_t object; 5661 int i, npages; 5662 5663 object = m->object; 5664 if (skip_m != NULL && skip_m->object != object) 5665 return (false); 5666 VM_OBJECT_ASSERT_LOCKED(object); 5667 KASSERT(psind <= m->psind, 5668 ("psind %d > psind %d of m %p", psind, m->psind, m)); 5669 npages = atop(pagesizes[psind]); 5670 5671 /* 5672 * The physically contiguous pages that make up a superpage, i.e., a 5673 * page with a page size index ("psind") greater than zero, will 5674 * occupy adjacent entries in vm_page_array[]. 5675 */ 5676 for (i = 0; i < npages; i++) { 5677 /* Always test object consistency, including "skip_m". */ 5678 if (m[i].object != object) 5679 return (false); 5680 if (&m[i] == skip_m) 5681 continue; 5682 if ((flags & PS_NONE_BUSY) != 0 && vm_page_busied(&m[i])) 5683 return (false); 5684 if ((flags & PS_ALL_DIRTY) != 0) { 5685 /* 5686 * Calling vm_page_test_dirty() or pmap_is_modified() 5687 * might stop this case from spuriously returning 5688 * "false". However, that would require a write lock 5689 * on the object containing "m[i]". 5690 */ 5691 if (m[i].dirty != VM_PAGE_BITS_ALL) 5692 return (false); 5693 } 5694 if ((flags & PS_ALL_VALID) != 0 && 5695 m[i].valid != VM_PAGE_BITS_ALL) 5696 return (false); 5697 } 5698 return (true); 5699 } 5700 5701 /* 5702 * Set the page's dirty bits if the page is modified. 5703 */ 5704 void 5705 vm_page_test_dirty(vm_page_t m) 5706 { 5707 5708 vm_page_assert_busied(m); 5709 if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m)) 5710 vm_page_dirty(m); 5711 } 5712 5713 void 5714 vm_page_valid(vm_page_t m) 5715 { 5716 5717 vm_page_assert_busied(m); 5718 if (vm_page_xbusied(m)) 5719 m->valid = VM_PAGE_BITS_ALL; 5720 else 5721 vm_page_bits_set(m, &m->valid, VM_PAGE_BITS_ALL); 5722 } 5723 5724 void 5725 vm_page_lock_KBI(vm_page_t m, const char *file, int line) 5726 { 5727 5728 mtx_lock_flags_(vm_page_lockptr(m), 0, file, line); 5729 } 5730 5731 void 5732 vm_page_unlock_KBI(vm_page_t m, const char *file, int line) 5733 { 5734 5735 mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line); 5736 } 5737 5738 int 5739 vm_page_trylock_KBI(vm_page_t m, const char *file, int line) 5740 { 5741 5742 return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line)); 5743 } 5744 5745 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 5746 void 5747 vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line) 5748 { 5749 5750 vm_page_lock_assert_KBI(m, MA_OWNED, file, line); 5751 } 5752 5753 void 5754 vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line) 5755 { 5756 5757 mtx_assert_(vm_page_lockptr(m), a, file, line); 5758 } 5759 #endif 5760 5761 #ifdef INVARIANTS 5762 void 5763 vm_page_object_busy_assert(vm_page_t m) 5764 { 5765 5766 /* 5767 * Certain of the page's fields may only be modified by the 5768 * holder of a page or object busy. 5769 */ 5770 if (m->object != NULL && !vm_page_busied(m)) 5771 VM_OBJECT_ASSERT_BUSY(m->object); 5772 } 5773 5774 void 5775 vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits) 5776 { 5777 5778 if ((bits & PGA_WRITEABLE) == 0) 5779 return; 5780 5781 /* 5782 * The PGA_WRITEABLE flag can only be set if the page is 5783 * managed, is exclusively busied or the object is locked. 5784 * Currently, this flag is only set by pmap_enter(). 5785 */ 5786 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5787 ("PGA_WRITEABLE on unmanaged page")); 5788 if (!vm_page_xbusied(m)) 5789 VM_OBJECT_ASSERT_BUSY(m->object); 5790 } 5791 #endif 5792 5793 #include "opt_ddb.h" 5794 #ifdef DDB 5795 #include <sys/kernel.h> 5796 5797 #include <ddb/ddb.h> 5798 5799 DB_SHOW_COMMAND_FLAGS(page, vm_page_print_page_info, DB_CMD_MEMSAFE) 5800 { 5801 5802 db_printf("vm_cnt.v_free_count: %d\n", vm_free_count()); 5803 db_printf("vm_cnt.v_inactive_count: %d\n", vm_inactive_count()); 5804 db_printf("vm_cnt.v_active_count: %d\n", vm_active_count()); 5805 db_printf("vm_cnt.v_laundry_count: %d\n", vm_laundry_count()); 5806 db_printf("vm_cnt.v_wire_count: %d\n", vm_wire_count()); 5807 db_printf("vm_cnt.v_free_reserved: %d\n", vm_cnt.v_free_reserved); 5808 db_printf("vm_cnt.v_free_min: %d\n", vm_cnt.v_free_min); 5809 db_printf("vm_cnt.v_free_target: %d\n", vm_cnt.v_free_target); 5810 db_printf("vm_cnt.v_inactive_target: %d\n", vm_cnt.v_inactive_target); 5811 } 5812 5813 DB_SHOW_COMMAND_FLAGS(pageq, vm_page_print_pageq_info, DB_CMD_MEMSAFE) 5814 { 5815 int dom; 5816 5817 db_printf("pq_free %d\n", vm_free_count()); 5818 for (dom = 0; dom < vm_ndomains; dom++) { 5819 db_printf( 5820 "dom %d page_cnt %d free %d pq_act %d pq_inact %d pq_laund %d pq_unsw %d\n", 5821 dom, 5822 vm_dom[dom].vmd_page_count, 5823 vm_dom[dom].vmd_free_count, 5824 vm_dom[dom].vmd_pagequeues[PQ_ACTIVE].pq_cnt, 5825 vm_dom[dom].vmd_pagequeues[PQ_INACTIVE].pq_cnt, 5826 vm_dom[dom].vmd_pagequeues[PQ_LAUNDRY].pq_cnt, 5827 vm_dom[dom].vmd_pagequeues[PQ_UNSWAPPABLE].pq_cnt); 5828 } 5829 } 5830 5831 DB_SHOW_COMMAND(pginfo, vm_page_print_pginfo) 5832 { 5833 vm_page_t m; 5834 boolean_t phys, virt; 5835 5836 if (!have_addr) { 5837 db_printf("show pginfo addr\n"); 5838 return; 5839 } 5840 5841 phys = strchr(modif, 'p') != NULL; 5842 virt = strchr(modif, 'v') != NULL; 5843 if (virt) 5844 m = PHYS_TO_VM_PAGE(pmap_kextract(addr)); 5845 else if (phys) 5846 m = PHYS_TO_VM_PAGE(addr); 5847 else 5848 m = (vm_page_t)addr; 5849 db_printf( 5850 "page %p obj %p pidx 0x%jx phys 0x%jx q %d ref 0x%x\n" 5851 " af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n", 5852 m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr, 5853 m->a.queue, m->ref_count, m->a.flags, m->oflags, 5854 m->flags, m->a.act_count, m->busy_lock, m->valid, m->dirty); 5855 } 5856 #endif /* DDB */ 5857