1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991 Regents of the University of California. 5 * All rights reserved. 6 * Copyright (c) 1998 Matthew Dillon. All Rights Reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * The Mach Operating System project at Carnegie-Mellon University. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36 /*- 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 */ 62 63 /* 64 * Resident memory management module. 65 */ 66 67 #include <sys/cdefs.h> 68 #include "opt_vm.h" 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/counter.h> 73 #include <sys/domainset.h> 74 #include <sys/kernel.h> 75 #include <sys/limits.h> 76 #include <sys/linker.h> 77 #include <sys/lock.h> 78 #include <sys/malloc.h> 79 #include <sys/mman.h> 80 #include <sys/msgbuf.h> 81 #include <sys/mutex.h> 82 #include <sys/proc.h> 83 #include <sys/rwlock.h> 84 #include <sys/sleepqueue.h> 85 #include <sys/sbuf.h> 86 #include <sys/sched.h> 87 #include <sys/smp.h> 88 #include <sys/sysctl.h> 89 #include <sys/vmmeter.h> 90 #include <sys/vnode.h> 91 92 #include <vm/vm.h> 93 #include <vm/pmap.h> 94 #include <vm/vm_param.h> 95 #include <vm/vm_domainset.h> 96 #include <vm/vm_kern.h> 97 #include <vm/vm_map.h> 98 #include <vm/vm_object.h> 99 #include <vm/vm_page.h> 100 #include <vm/vm_pageout.h> 101 #include <vm/vm_phys.h> 102 #include <vm/vm_pagequeue.h> 103 #include <vm/vm_pager.h> 104 #include <vm/vm_radix.h> 105 #include <vm/vm_reserv.h> 106 #include <vm/vm_extern.h> 107 #include <vm/vm_dumpset.h> 108 #include <vm/uma.h> 109 #include <vm/uma_int.h> 110 111 #include <machine/md_var.h> 112 113 struct vm_domain vm_dom[MAXMEMDOM]; 114 115 DPCPU_DEFINE_STATIC(struct vm_batchqueue, pqbatch[MAXMEMDOM][PQ_COUNT]); 116 117 struct mtx_padalign __exclusive_cache_line pa_lock[PA_LOCK_COUNT]; 118 119 struct mtx_padalign __exclusive_cache_line vm_domainset_lock; 120 /* The following fields are protected by the domainset lock. */ 121 domainset_t __exclusive_cache_line vm_min_domains; 122 domainset_t __exclusive_cache_line vm_severe_domains; 123 static int vm_min_waiters; 124 static int vm_severe_waiters; 125 static int vm_pageproc_waiters; 126 127 static SYSCTL_NODE(_vm_stats, OID_AUTO, page, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 128 "VM page statistics"); 129 130 static COUNTER_U64_DEFINE_EARLY(pqstate_commit_retries); 131 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, pqstate_commit_retries, 132 CTLFLAG_RD, &pqstate_commit_retries, 133 "Number of failed per-page atomic queue state updates"); 134 135 static COUNTER_U64_DEFINE_EARLY(queue_ops); 136 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, queue_ops, 137 CTLFLAG_RD, &queue_ops, 138 "Number of batched queue operations"); 139 140 static COUNTER_U64_DEFINE_EARLY(queue_nops); 141 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, queue_nops, 142 CTLFLAG_RD, &queue_nops, 143 "Number of batched queue operations with no effects"); 144 145 /* 146 * bogus page -- for I/O to/from partially complete buffers, 147 * or for paging into sparsely invalid regions. 148 */ 149 vm_page_t bogus_page; 150 151 vm_page_t vm_page_array; 152 long vm_page_array_size; 153 long first_page; 154 155 struct bitset *vm_page_dump; 156 long vm_page_dump_pages; 157 158 static TAILQ_HEAD(, vm_page) blacklist_head; 159 static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS); 160 SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD | 161 CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages"); 162 163 static uma_zone_t fakepg_zone; 164 165 static vm_page_t vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex, 166 int req, vm_page_t mpred); 167 static void vm_page_alloc_check(vm_page_t m); 168 static vm_page_t vm_page_alloc_nofree_domain(int domain, int req); 169 static bool _vm_page_busy_sleep(vm_object_t obj, vm_page_t m, 170 vm_pindex_t pindex, const char *wmesg, int allocflags, bool locked); 171 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits); 172 static void vm_page_enqueue(vm_page_t m, uint8_t queue); 173 static bool vm_page_free_prep(vm_page_t m); 174 static void vm_page_free_toq(vm_page_t m); 175 static void vm_page_init(void *dummy); 176 static int vm_page_insert_after(vm_page_t m, vm_object_t object, 177 vm_pindex_t pindex, vm_page_t mpred); 178 static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object, 179 vm_page_t mpred); 180 static void vm_page_mvqueue(vm_page_t m, const uint8_t queue, 181 const uint16_t nflag); 182 static int vm_page_reclaim_run(int req_class, int domain, u_long npages, 183 vm_page_t m_run, vm_paddr_t high); 184 static void vm_page_release_toq(vm_page_t m, uint8_t nqueue, bool noreuse); 185 static int vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, 186 int req); 187 static int vm_page_zone_import(void *arg, void **store, int cnt, int domain, 188 int flags); 189 static void vm_page_zone_release(void *arg, void **store, int cnt); 190 191 SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init, NULL); 192 193 static void 194 vm_page_init(void *dummy) 195 { 196 197 fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL, 198 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 199 bogus_page = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_NOFREE); 200 } 201 202 static int pgcache_zone_max_pcpu; 203 SYSCTL_INT(_vm, OID_AUTO, pgcache_zone_max_pcpu, 204 CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pgcache_zone_max_pcpu, 0, 205 "Per-CPU page cache size"); 206 207 /* 208 * The cache page zone is initialized later since we need to be able to allocate 209 * pages before UMA is fully initialized. 210 */ 211 static void 212 vm_page_init_cache_zones(void *dummy __unused) 213 { 214 struct vm_domain *vmd; 215 struct vm_pgcache *pgcache; 216 int cache, domain, maxcache, pool; 217 218 TUNABLE_INT_FETCH("vm.pgcache_zone_max_pcpu", &pgcache_zone_max_pcpu); 219 maxcache = pgcache_zone_max_pcpu * mp_ncpus; 220 for (domain = 0; domain < vm_ndomains; domain++) { 221 vmd = VM_DOMAIN(domain); 222 for (pool = 0; pool < VM_NFREEPOOL; pool++) { 223 pgcache = &vmd->vmd_pgcache[pool]; 224 pgcache->domain = domain; 225 pgcache->pool = pool; 226 pgcache->zone = uma_zcache_create("vm pgcache", 227 PAGE_SIZE, NULL, NULL, NULL, NULL, 228 vm_page_zone_import, vm_page_zone_release, pgcache, 229 UMA_ZONE_VM); 230 231 /* 232 * Limit each pool's zone to 0.1% of the pages in the 233 * domain. 234 */ 235 cache = maxcache != 0 ? maxcache : 236 vmd->vmd_page_count / 1000; 237 uma_zone_set_maxcache(pgcache->zone, cache); 238 } 239 } 240 } 241 SYSINIT(vm_page2, SI_SUB_VM_CONF, SI_ORDER_ANY, vm_page_init_cache_zones, NULL); 242 243 /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */ 244 #if PAGE_SIZE == 32768 245 #ifdef CTASSERT 246 CTASSERT(sizeof(u_long) >= 8); 247 #endif 248 #endif 249 250 /* 251 * vm_set_page_size: 252 * 253 * Sets the page size, perhaps based upon the memory 254 * size. Must be called before any use of page-size 255 * dependent functions. 256 */ 257 void 258 vm_set_page_size(void) 259 { 260 if (vm_cnt.v_page_size == 0) 261 vm_cnt.v_page_size = PAGE_SIZE; 262 if (((vm_cnt.v_page_size - 1) & vm_cnt.v_page_size) != 0) 263 panic("vm_set_page_size: page size not a power of two"); 264 } 265 266 /* 267 * vm_page_blacklist_next: 268 * 269 * Find the next entry in the provided string of blacklist 270 * addresses. Entries are separated by space, comma, or newline. 271 * If an invalid integer is encountered then the rest of the 272 * string is skipped. Updates the list pointer to the next 273 * character, or NULL if the string is exhausted or invalid. 274 */ 275 static vm_paddr_t 276 vm_page_blacklist_next(char **list, char *end) 277 { 278 vm_paddr_t bad; 279 char *cp, *pos; 280 281 if (list == NULL || *list == NULL) 282 return (0); 283 if (**list =='\0') { 284 *list = NULL; 285 return (0); 286 } 287 288 /* 289 * If there's no end pointer then the buffer is coming from 290 * the kenv and we know it's null-terminated. 291 */ 292 if (end == NULL) 293 end = *list + strlen(*list); 294 295 /* Ensure that strtoq() won't walk off the end */ 296 if (*end != '\0') { 297 if (*end == '\n' || *end == ' ' || *end == ',') 298 *end = '\0'; 299 else { 300 printf("Blacklist not terminated, skipping\n"); 301 *list = NULL; 302 return (0); 303 } 304 } 305 306 for (pos = *list; *pos != '\0'; pos = cp) { 307 bad = strtoq(pos, &cp, 0); 308 if (*cp == '\0' || *cp == ' ' || *cp == ',' || *cp == '\n') { 309 if (bad == 0) { 310 if (++cp < end) 311 continue; 312 else 313 break; 314 } 315 } else 316 break; 317 if (*cp == '\0' || ++cp >= end) 318 *list = NULL; 319 else 320 *list = cp; 321 return (trunc_page(bad)); 322 } 323 printf("Garbage in RAM blacklist, skipping\n"); 324 *list = NULL; 325 return (0); 326 } 327 328 bool 329 vm_page_blacklist_add(vm_paddr_t pa, bool verbose) 330 { 331 struct vm_domain *vmd; 332 vm_page_t m; 333 bool found; 334 335 m = vm_phys_paddr_to_vm_page(pa); 336 if (m == NULL) 337 return (true); /* page does not exist, no failure */ 338 339 vmd = VM_DOMAIN(vm_phys_domain(pa)); 340 vm_domain_free_lock(vmd); 341 found = vm_phys_unfree_page(pa); 342 vm_domain_free_unlock(vmd); 343 if (found) { 344 vm_domain_freecnt_inc(vmd, -1); 345 TAILQ_INSERT_TAIL(&blacklist_head, m, listq); 346 if (verbose) 347 printf("Skipping page with pa 0x%jx\n", (uintmax_t)pa); 348 } 349 return (found); 350 } 351 352 /* 353 * vm_page_blacklist_check: 354 * 355 * Iterate through the provided string of blacklist addresses, pulling 356 * each entry out of the physical allocator free list and putting it 357 * onto a list for reporting via the vm.page_blacklist sysctl. 358 */ 359 static void 360 vm_page_blacklist_check(char *list, char *end) 361 { 362 vm_paddr_t pa; 363 char *next; 364 365 next = list; 366 while (next != NULL) { 367 if ((pa = vm_page_blacklist_next(&next, end)) == 0) 368 continue; 369 vm_page_blacklist_add(pa, bootverbose); 370 } 371 } 372 373 /* 374 * vm_page_blacklist_load: 375 * 376 * Search for a special module named "ram_blacklist". It'll be a 377 * plain text file provided by the user via the loader directive 378 * of the same name. 379 */ 380 static void 381 vm_page_blacklist_load(char **list, char **end) 382 { 383 void *mod; 384 u_char *ptr; 385 u_int len; 386 387 mod = NULL; 388 ptr = NULL; 389 390 mod = preload_search_by_type("ram_blacklist"); 391 if (mod != NULL) { 392 ptr = preload_fetch_addr(mod); 393 len = preload_fetch_size(mod); 394 } 395 *list = ptr; 396 if (ptr != NULL) 397 *end = ptr + len; 398 else 399 *end = NULL; 400 return; 401 } 402 403 static int 404 sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS) 405 { 406 vm_page_t m; 407 struct sbuf sbuf; 408 int error, first; 409 410 first = 1; 411 error = sysctl_wire_old_buffer(req, 0); 412 if (error != 0) 413 return (error); 414 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 415 TAILQ_FOREACH(m, &blacklist_head, listq) { 416 sbuf_printf(&sbuf, "%s%#jx", first ? "" : ",", 417 (uintmax_t)m->phys_addr); 418 first = 0; 419 } 420 error = sbuf_finish(&sbuf); 421 sbuf_delete(&sbuf); 422 return (error); 423 } 424 425 /* 426 * Initialize a dummy page for use in scans of the specified paging queue. 427 * In principle, this function only needs to set the flag PG_MARKER. 428 * Nonetheless, it write busies the page as a safety precaution. 429 */ 430 void 431 vm_page_init_marker(vm_page_t marker, int queue, uint16_t aflags) 432 { 433 434 bzero(marker, sizeof(*marker)); 435 marker->flags = PG_MARKER; 436 marker->a.flags = aflags; 437 marker->busy_lock = VPB_CURTHREAD_EXCLUSIVE; 438 marker->a.queue = queue; 439 } 440 441 static void 442 vm_page_domain_init(int domain) 443 { 444 struct vm_domain *vmd; 445 struct vm_pagequeue *pq; 446 int i; 447 448 vmd = VM_DOMAIN(domain); 449 bzero(vmd, sizeof(*vmd)); 450 *__DECONST(const char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) = 451 "vm inactive pagequeue"; 452 *__DECONST(const char **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_name) = 453 "vm active pagequeue"; 454 *__DECONST(const char **, &vmd->vmd_pagequeues[PQ_LAUNDRY].pq_name) = 455 "vm laundry pagequeue"; 456 *__DECONST(const char **, 457 &vmd->vmd_pagequeues[PQ_UNSWAPPABLE].pq_name) = 458 "vm unswappable pagequeue"; 459 vmd->vmd_domain = domain; 460 vmd->vmd_page_count = 0; 461 vmd->vmd_free_count = 0; 462 vmd->vmd_segs = 0; 463 vmd->vmd_oom = false; 464 vmd->vmd_helper_threads_enabled = true; 465 for (i = 0; i < PQ_COUNT; i++) { 466 pq = &vmd->vmd_pagequeues[i]; 467 TAILQ_INIT(&pq->pq_pl); 468 mtx_init(&pq->pq_mutex, pq->pq_name, "vm pagequeue", 469 MTX_DEF | MTX_DUPOK); 470 pq->pq_pdpages = 0; 471 vm_page_init_marker(&vmd->vmd_markers[i], i, 0); 472 } 473 mtx_init(&vmd->vmd_free_mtx, "vm page free queue", NULL, MTX_DEF); 474 mtx_init(&vmd->vmd_pageout_mtx, "vm pageout lock", NULL, MTX_DEF); 475 snprintf(vmd->vmd_name, sizeof(vmd->vmd_name), "%d", domain); 476 477 /* 478 * inacthead is used to provide FIFO ordering for LRU-bypassing 479 * insertions. 480 */ 481 vm_page_init_marker(&vmd->vmd_inacthead, PQ_INACTIVE, PGA_ENQUEUED); 482 TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_INACTIVE].pq_pl, 483 &vmd->vmd_inacthead, plinks.q); 484 485 /* 486 * The clock pages are used to implement active queue scanning without 487 * requeues. Scans start at clock[0], which is advanced after the scan 488 * ends. When the two clock hands meet, they are reset and scanning 489 * resumes from the head of the queue. 490 */ 491 vm_page_init_marker(&vmd->vmd_clock[0], PQ_ACTIVE, PGA_ENQUEUED); 492 vm_page_init_marker(&vmd->vmd_clock[1], PQ_ACTIVE, PGA_ENQUEUED); 493 TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl, 494 &vmd->vmd_clock[0], plinks.q); 495 TAILQ_INSERT_TAIL(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl, 496 &vmd->vmd_clock[1], plinks.q); 497 } 498 499 /* 500 * Initialize a physical page in preparation for adding it to the free 501 * lists. 502 */ 503 void 504 vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind, int pool) 505 { 506 m->object = NULL; 507 m->ref_count = 0; 508 m->busy_lock = VPB_FREED; 509 m->flags = m->a.flags = 0; 510 m->phys_addr = pa; 511 m->a.queue = PQ_NONE; 512 m->psind = 0; 513 m->segind = segind; 514 m->order = VM_NFREEORDER; 515 m->pool = pool; 516 m->valid = m->dirty = 0; 517 pmap_page_init(m); 518 } 519 520 #ifndef PMAP_HAS_PAGE_ARRAY 521 static vm_paddr_t 522 vm_page_array_alloc(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t page_range) 523 { 524 vm_paddr_t new_end; 525 526 /* 527 * Reserve an unmapped guard page to trap access to vm_page_array[-1]. 528 * However, because this page is allocated from KVM, out-of-bounds 529 * accesses using the direct map will not be trapped. 530 */ 531 *vaddr += PAGE_SIZE; 532 533 /* 534 * Allocate physical memory for the page structures, and map it. 535 */ 536 new_end = trunc_page(end - page_range * sizeof(struct vm_page)); 537 vm_page_array = (vm_page_t)pmap_map(vaddr, new_end, end, 538 VM_PROT_READ | VM_PROT_WRITE); 539 vm_page_array_size = page_range; 540 541 return (new_end); 542 } 543 #endif 544 545 /* 546 * vm_page_startup: 547 * 548 * Initializes the resident memory module. Allocates physical memory for 549 * bootstrapping UMA and some data structures that are used to manage 550 * physical pages. Initializes these structures, and populates the free 551 * page queues. 552 */ 553 vm_offset_t 554 vm_page_startup(vm_offset_t vaddr) 555 { 556 struct vm_phys_seg *seg; 557 struct vm_domain *vmd; 558 vm_page_t m; 559 char *list, *listend; 560 vm_paddr_t end, high_avail, low_avail, new_end, size; 561 vm_paddr_t page_range __unused; 562 vm_paddr_t last_pa, pa, startp, endp; 563 u_long pagecount; 564 #if MINIDUMP_PAGE_TRACKING 565 u_long vm_page_dump_size; 566 #endif 567 int biggestone, i, segind; 568 #ifdef WITNESS 569 vm_offset_t mapped; 570 int witness_size; 571 #endif 572 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE) 573 long ii; 574 #endif 575 int pool; 576 #ifdef VM_FREEPOOL_LAZYINIT 577 int lazyinit; 578 #endif 579 580 vaddr = round_page(vaddr); 581 582 vm_phys_early_startup(); 583 biggestone = vm_phys_avail_largest(); 584 end = phys_avail[biggestone+1]; 585 586 /* 587 * Initialize the page and queue locks. 588 */ 589 mtx_init(&vm_domainset_lock, "vm domainset lock", NULL, MTX_DEF); 590 for (i = 0; i < PA_LOCK_COUNT; i++) 591 mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF); 592 for (i = 0; i < vm_ndomains; i++) 593 vm_page_domain_init(i); 594 595 new_end = end; 596 #ifdef WITNESS 597 witness_size = round_page(witness_startup_count()); 598 new_end -= witness_size; 599 mapped = pmap_map(&vaddr, new_end, new_end + witness_size, 600 VM_PROT_READ | VM_PROT_WRITE); 601 bzero((void *)mapped, witness_size); 602 witness_startup((void *)mapped); 603 #endif 604 605 #if MINIDUMP_PAGE_TRACKING 606 /* 607 * Allocate a bitmap to indicate that a random physical page 608 * needs to be included in a minidump. 609 * 610 * The amd64 port needs this to indicate which direct map pages 611 * need to be dumped, via calls to dump_add_page()/dump_drop_page(). 612 * 613 * However, i386 still needs this workspace internally within the 614 * minidump code. In theory, they are not needed on i386, but are 615 * included should the sf_buf code decide to use them. 616 */ 617 last_pa = 0; 618 vm_page_dump_pages = 0; 619 for (i = 0; dump_avail[i + 1] != 0; i += 2) { 620 vm_page_dump_pages += howmany(dump_avail[i + 1], PAGE_SIZE) - 621 dump_avail[i] / PAGE_SIZE; 622 if (dump_avail[i + 1] > last_pa) 623 last_pa = dump_avail[i + 1]; 624 } 625 vm_page_dump_size = round_page(BITSET_SIZE(vm_page_dump_pages)); 626 new_end -= vm_page_dump_size; 627 vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end, 628 new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE); 629 bzero((void *)vm_page_dump, vm_page_dump_size); 630 #if MINIDUMP_STARTUP_PAGE_TRACKING 631 /* 632 * Include the UMA bootstrap pages, witness pages and vm_page_dump 633 * in a crash dump. When pmap_map() uses the direct map, they are 634 * not automatically included. 635 */ 636 for (pa = new_end; pa < end; pa += PAGE_SIZE) 637 dump_add_page(pa); 638 #endif 639 #else 640 (void)last_pa; 641 #endif 642 phys_avail[biggestone + 1] = new_end; 643 #ifdef __amd64__ 644 /* 645 * Request that the physical pages underlying the message buffer be 646 * included in a crash dump. Since the message buffer is accessed 647 * through the direct map, they are not automatically included. 648 */ 649 pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr); 650 last_pa = pa + round_page(msgbufsize); 651 while (pa < last_pa) { 652 dump_add_page(pa); 653 pa += PAGE_SIZE; 654 } 655 #else 656 (void)pa; 657 #endif 658 /* 659 * Compute the number of pages of memory that will be available for 660 * use, taking into account the overhead of a page structure per page. 661 * In other words, solve 662 * "available physical memory" - round_page(page_range * 663 * sizeof(struct vm_page)) = page_range * PAGE_SIZE 664 * for page_range. 665 */ 666 low_avail = phys_avail[0]; 667 high_avail = phys_avail[1]; 668 for (i = 0; i < vm_phys_nsegs; i++) { 669 if (vm_phys_segs[i].start < low_avail) 670 low_avail = vm_phys_segs[i].start; 671 if (vm_phys_segs[i].end > high_avail) 672 high_avail = vm_phys_segs[i].end; 673 } 674 /* Skip the first chunk. It is already accounted for. */ 675 for (i = 2; phys_avail[i + 1] != 0; i += 2) { 676 if (phys_avail[i] < low_avail) 677 low_avail = phys_avail[i]; 678 if (phys_avail[i + 1] > high_avail) 679 high_avail = phys_avail[i + 1]; 680 } 681 first_page = low_avail / PAGE_SIZE; 682 #ifdef VM_PHYSSEG_SPARSE 683 size = 0; 684 for (i = 0; i < vm_phys_nsegs; i++) 685 size += vm_phys_segs[i].end - vm_phys_segs[i].start; 686 for (i = 0; phys_avail[i + 1] != 0; i += 2) 687 size += phys_avail[i + 1] - phys_avail[i]; 688 #elif defined(VM_PHYSSEG_DENSE) 689 size = high_avail - low_avail; 690 #else 691 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." 692 #endif 693 694 #ifdef PMAP_HAS_PAGE_ARRAY 695 pmap_page_array_startup(size / PAGE_SIZE); 696 biggestone = vm_phys_avail_largest(); 697 end = new_end = phys_avail[biggestone + 1]; 698 #else 699 #ifdef VM_PHYSSEG_DENSE 700 /* 701 * In the VM_PHYSSEG_DENSE case, the number of pages can account for 702 * the overhead of a page structure per page only if vm_page_array is 703 * allocated from the last physical memory chunk. Otherwise, we must 704 * allocate page structures representing the physical memory 705 * underlying vm_page_array, even though they will not be used. 706 */ 707 if (new_end != high_avail) 708 page_range = size / PAGE_SIZE; 709 else 710 #endif 711 { 712 page_range = size / (PAGE_SIZE + sizeof(struct vm_page)); 713 714 /* 715 * If the partial bytes remaining are large enough for 716 * a page (PAGE_SIZE) without a corresponding 717 * 'struct vm_page', then new_end will contain an 718 * extra page after subtracting the length of the VM 719 * page array. Compensate by subtracting an extra 720 * page from new_end. 721 */ 722 if (size % (PAGE_SIZE + sizeof(struct vm_page)) >= PAGE_SIZE) { 723 if (new_end == high_avail) 724 high_avail -= PAGE_SIZE; 725 new_end -= PAGE_SIZE; 726 } 727 } 728 end = new_end; 729 new_end = vm_page_array_alloc(&vaddr, end, page_range); 730 #endif 731 732 #if VM_NRESERVLEVEL > 0 733 /* 734 * Allocate physical memory for the reservation management system's 735 * data structures, and map it. 736 */ 737 new_end = vm_reserv_startup(&vaddr, new_end); 738 #endif 739 #if MINIDUMP_PAGE_TRACKING && MINIDUMP_STARTUP_PAGE_TRACKING 740 /* 741 * Include vm_page_array and vm_reserv_array in a crash dump. 742 */ 743 for (pa = new_end; pa < end; pa += PAGE_SIZE) 744 dump_add_page(pa); 745 #endif 746 phys_avail[biggestone + 1] = new_end; 747 748 /* 749 * Add physical memory segments corresponding to the available 750 * physical pages. 751 */ 752 for (i = 0; phys_avail[i + 1] != 0; i += 2) 753 if (vm_phys_avail_size(i) != 0) 754 vm_phys_add_seg(phys_avail[i], phys_avail[i + 1]); 755 756 /* 757 * Initialize the physical memory allocator. 758 */ 759 vm_phys_init(); 760 761 pool = VM_FREEPOOL_DEFAULT; 762 #ifdef VM_FREEPOOL_LAZYINIT 763 lazyinit = 1; 764 TUNABLE_INT_FETCH("debug.vm.lazy_page_init", &lazyinit); 765 if (lazyinit) 766 pool = VM_FREEPOOL_LAZYINIT; 767 #endif 768 769 /* 770 * Initialize the page structures and add every available page to the 771 * physical memory allocator's free lists. 772 */ 773 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE) 774 for (ii = 0; ii < vm_page_array_size; ii++) { 775 m = &vm_page_array[ii]; 776 vm_page_init_page(m, (first_page + ii) << PAGE_SHIFT, 0, 777 VM_FREEPOOL_DEFAULT); 778 m->flags = PG_FICTITIOUS; 779 } 780 #endif 781 vm_cnt.v_page_count = 0; 782 for (segind = 0; segind < vm_phys_nsegs; segind++) { 783 seg = &vm_phys_segs[segind]; 784 785 /* 786 * Initialize pages not covered by phys_avail[], since they 787 * might be freed to the allocator at some future point, e.g., 788 * by kmem_bootstrap_free(). 789 */ 790 startp = seg->start; 791 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 792 if (startp >= seg->end) 793 break; 794 if (phys_avail[i + 1] < startp) 795 continue; 796 if (phys_avail[i] <= startp) { 797 startp = phys_avail[i + 1]; 798 continue; 799 } 800 m = vm_phys_seg_paddr_to_vm_page(seg, startp); 801 for (endp = MIN(phys_avail[i], seg->end); 802 startp < endp; startp += PAGE_SIZE, m++) { 803 vm_page_init_page(m, startp, segind, 804 VM_FREEPOOL_DEFAULT); 805 } 806 } 807 808 /* 809 * Add the segment's pages that are covered by one of 810 * phys_avail's ranges to the free lists. 811 */ 812 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 813 if (seg->end <= phys_avail[i] || 814 seg->start >= phys_avail[i + 1]) 815 continue; 816 817 startp = MAX(seg->start, phys_avail[i]); 818 endp = MIN(seg->end, phys_avail[i + 1]); 819 pagecount = (u_long)atop(endp - startp); 820 if (pagecount == 0) 821 continue; 822 823 /* 824 * If lazy vm_page initialization is not enabled, simply 825 * initialize all of the pages in the segment covered by 826 * phys_avail. Otherwise, initialize only the first 827 * page of each run of free pages handed to the vm_phys 828 * allocator, which in turn defers initialization of 829 * pages until they are needed. 830 * 831 * This avoids blocking the boot process for long 832 * periods, which may be relevant for VMs (which ought 833 * to boot as quickly as possible) and/or systems with 834 * large amounts of physical memory. 835 */ 836 m = vm_phys_seg_paddr_to_vm_page(seg, startp); 837 vm_page_init_page(m, startp, segind, pool); 838 if (pool == VM_FREEPOOL_DEFAULT) { 839 for (int j = 1; j < pagecount; j++) { 840 vm_page_init_page(&m[j], 841 startp + ptoa(j), segind, pool); 842 } 843 } 844 vmd = VM_DOMAIN(seg->domain); 845 vm_domain_free_lock(vmd); 846 vm_phys_enqueue_contig(m, pool, pagecount); 847 vm_domain_free_unlock(vmd); 848 vm_domain_freecnt_inc(vmd, pagecount); 849 vm_cnt.v_page_count += (u_int)pagecount; 850 vmd->vmd_page_count += (u_int)pagecount; 851 vmd->vmd_segs |= 1UL << segind; 852 } 853 } 854 855 /* 856 * Remove blacklisted pages from the physical memory allocator. 857 */ 858 TAILQ_INIT(&blacklist_head); 859 vm_page_blacklist_load(&list, &listend); 860 vm_page_blacklist_check(list, listend); 861 862 list = kern_getenv("vm.blacklist"); 863 vm_page_blacklist_check(list, NULL); 864 865 freeenv(list); 866 #if VM_NRESERVLEVEL > 0 867 /* 868 * Initialize the reservation management system. 869 */ 870 vm_reserv_init(); 871 #endif 872 873 return (vaddr); 874 } 875 876 void 877 vm_page_reference(vm_page_t m) 878 { 879 880 vm_page_aflag_set(m, PGA_REFERENCED); 881 } 882 883 /* 884 * vm_page_trybusy 885 * 886 * Helper routine for grab functions to trylock busy. 887 * 888 * Returns true on success and false on failure. 889 */ 890 static bool 891 vm_page_trybusy(vm_page_t m, int allocflags) 892 { 893 894 if ((allocflags & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)) != 0) 895 return (vm_page_trysbusy(m)); 896 else 897 return (vm_page_tryxbusy(m)); 898 } 899 900 /* 901 * vm_page_tryacquire 902 * 903 * Helper routine for grab functions to trylock busy and wire. 904 * 905 * Returns true on success and false on failure. 906 */ 907 static inline bool 908 vm_page_tryacquire(vm_page_t m, int allocflags) 909 { 910 bool locked; 911 912 locked = vm_page_trybusy(m, allocflags); 913 if (locked && (allocflags & VM_ALLOC_WIRED) != 0) 914 vm_page_wire(m); 915 return (locked); 916 } 917 918 /* 919 * vm_page_busy_acquire: 920 * 921 * Acquire the busy lock as described by VM_ALLOC_* flags. Will loop 922 * and drop the object lock if necessary. 923 */ 924 bool 925 vm_page_busy_acquire(vm_page_t m, int allocflags) 926 { 927 vm_object_t obj; 928 bool locked; 929 930 /* 931 * The page-specific object must be cached because page 932 * identity can change during the sleep, causing the 933 * re-lock of a different object. 934 * It is assumed that a reference to the object is already 935 * held by the callers. 936 */ 937 obj = atomic_load_ptr(&m->object); 938 for (;;) { 939 if (vm_page_tryacquire(m, allocflags)) 940 return (true); 941 if ((allocflags & VM_ALLOC_NOWAIT) != 0) 942 return (false); 943 if (obj != NULL) 944 locked = VM_OBJECT_WOWNED(obj); 945 else 946 locked = false; 947 MPASS(locked || vm_page_wired(m)); 948 if (_vm_page_busy_sleep(obj, m, m->pindex, "vmpba", allocflags, 949 locked) && locked) 950 VM_OBJECT_WLOCK(obj); 951 if ((allocflags & VM_ALLOC_WAITFAIL) != 0) 952 return (false); 953 KASSERT(m->object == obj || m->object == NULL, 954 ("vm_page_busy_acquire: page %p does not belong to %p", 955 m, obj)); 956 } 957 } 958 959 /* 960 * vm_page_busy_downgrade: 961 * 962 * Downgrade an exclusive busy page into a single shared busy page. 963 */ 964 void 965 vm_page_busy_downgrade(vm_page_t m) 966 { 967 u_int x; 968 969 vm_page_assert_xbusied(m); 970 971 x = vm_page_busy_fetch(m); 972 for (;;) { 973 if (atomic_fcmpset_rel_int(&m->busy_lock, 974 &x, VPB_SHARERS_WORD(1))) 975 break; 976 } 977 if ((x & VPB_BIT_WAITERS) != 0) 978 wakeup(m); 979 } 980 981 /* 982 * 983 * vm_page_busy_tryupgrade: 984 * 985 * Attempt to upgrade a single shared busy into an exclusive busy. 986 */ 987 int 988 vm_page_busy_tryupgrade(vm_page_t m) 989 { 990 u_int ce, x; 991 992 vm_page_assert_sbusied(m); 993 994 x = vm_page_busy_fetch(m); 995 ce = VPB_CURTHREAD_EXCLUSIVE; 996 for (;;) { 997 if (VPB_SHARERS(x) > 1) 998 return (0); 999 KASSERT((x & ~VPB_BIT_WAITERS) == VPB_SHARERS_WORD(1), 1000 ("vm_page_busy_tryupgrade: invalid lock state")); 1001 if (!atomic_fcmpset_acq_int(&m->busy_lock, &x, 1002 ce | (x & VPB_BIT_WAITERS))) 1003 continue; 1004 return (1); 1005 } 1006 } 1007 1008 /* 1009 * vm_page_sbusied: 1010 * 1011 * Return a positive value if the page is shared busied, 0 otherwise. 1012 */ 1013 int 1014 vm_page_sbusied(vm_page_t m) 1015 { 1016 u_int x; 1017 1018 x = vm_page_busy_fetch(m); 1019 return ((x & VPB_BIT_SHARED) != 0 && x != VPB_UNBUSIED); 1020 } 1021 1022 /* 1023 * vm_page_sunbusy: 1024 * 1025 * Shared unbusy a page. 1026 */ 1027 void 1028 vm_page_sunbusy(vm_page_t m) 1029 { 1030 u_int x; 1031 1032 vm_page_assert_sbusied(m); 1033 1034 x = vm_page_busy_fetch(m); 1035 for (;;) { 1036 KASSERT(x != VPB_FREED, 1037 ("vm_page_sunbusy: Unlocking freed page.")); 1038 if (VPB_SHARERS(x) > 1) { 1039 if (atomic_fcmpset_int(&m->busy_lock, &x, 1040 x - VPB_ONE_SHARER)) 1041 break; 1042 continue; 1043 } 1044 KASSERT((x & ~VPB_BIT_WAITERS) == VPB_SHARERS_WORD(1), 1045 ("vm_page_sunbusy: invalid lock state")); 1046 if (!atomic_fcmpset_rel_int(&m->busy_lock, &x, VPB_UNBUSIED)) 1047 continue; 1048 if ((x & VPB_BIT_WAITERS) == 0) 1049 break; 1050 wakeup(m); 1051 break; 1052 } 1053 } 1054 1055 /* 1056 * vm_page_busy_sleep: 1057 * 1058 * Sleep if the page is busy, using the page pointer as wchan. 1059 * This is used to implement the hard-path of the busying mechanism. 1060 * 1061 * If VM_ALLOC_IGN_SBUSY is specified in allocflags, the function 1062 * will not sleep if the page is shared-busy. 1063 * 1064 * The object lock must be held on entry. 1065 * 1066 * Returns true if it slept and dropped the object lock, or false 1067 * if there was no sleep and the lock is still held. 1068 */ 1069 bool 1070 vm_page_busy_sleep(vm_page_t m, const char *wmesg, int allocflags) 1071 { 1072 vm_object_t obj; 1073 1074 obj = m->object; 1075 VM_OBJECT_ASSERT_LOCKED(obj); 1076 1077 return (_vm_page_busy_sleep(obj, m, m->pindex, wmesg, allocflags, 1078 true)); 1079 } 1080 1081 /* 1082 * vm_page_busy_sleep_unlocked: 1083 * 1084 * Sleep if the page is busy, using the page pointer as wchan. 1085 * This is used to implement the hard-path of busying mechanism. 1086 * 1087 * If VM_ALLOC_IGN_SBUSY is specified in allocflags, the function 1088 * will not sleep if the page is shared-busy. 1089 * 1090 * The object lock must not be held on entry. The operation will 1091 * return if the page changes identity. 1092 */ 1093 void 1094 vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m, vm_pindex_t pindex, 1095 const char *wmesg, int allocflags) 1096 { 1097 VM_OBJECT_ASSERT_UNLOCKED(obj); 1098 1099 (void)_vm_page_busy_sleep(obj, m, pindex, wmesg, allocflags, false); 1100 } 1101 1102 /* 1103 * _vm_page_busy_sleep: 1104 * 1105 * Internal busy sleep function. Verifies the page identity and 1106 * lockstate against parameters. Returns true if it sleeps and 1107 * false otherwise. 1108 * 1109 * allocflags uses VM_ALLOC_* flags to specify the lock required. 1110 * 1111 * If locked is true the lock will be dropped for any true returns 1112 * and held for any false returns. 1113 */ 1114 static bool 1115 _vm_page_busy_sleep(vm_object_t obj, vm_page_t m, vm_pindex_t pindex, 1116 const char *wmesg, int allocflags, bool locked) 1117 { 1118 bool xsleep; 1119 u_int x; 1120 1121 /* 1122 * If the object is busy we must wait for that to drain to zero 1123 * before trying the page again. 1124 */ 1125 if (obj != NULL && vm_object_busied(obj)) { 1126 if (locked) 1127 VM_OBJECT_DROP(obj); 1128 vm_object_busy_wait(obj, wmesg); 1129 return (true); 1130 } 1131 1132 if (!vm_page_busied(m)) 1133 return (false); 1134 1135 xsleep = (allocflags & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)) != 0; 1136 sleepq_lock(m); 1137 x = vm_page_busy_fetch(m); 1138 do { 1139 /* 1140 * If the page changes objects or becomes unlocked we can 1141 * simply return. 1142 */ 1143 if (x == VPB_UNBUSIED || 1144 (xsleep && (x & VPB_BIT_SHARED) != 0) || 1145 m->object != obj || m->pindex != pindex) { 1146 sleepq_release(m); 1147 return (false); 1148 } 1149 if ((x & VPB_BIT_WAITERS) != 0) 1150 break; 1151 } while (!atomic_fcmpset_int(&m->busy_lock, &x, x | VPB_BIT_WAITERS)); 1152 if (locked) 1153 VM_OBJECT_DROP(obj); 1154 DROP_GIANT(); 1155 sleepq_add(m, NULL, wmesg, 0, 0); 1156 sleepq_wait(m, PVM); 1157 PICKUP_GIANT(); 1158 return (true); 1159 } 1160 1161 /* 1162 * vm_page_trysbusy: 1163 * 1164 * Try to shared busy a page. 1165 * If the operation succeeds 1 is returned otherwise 0. 1166 * The operation never sleeps. 1167 */ 1168 int 1169 vm_page_trysbusy(vm_page_t m) 1170 { 1171 vm_object_t obj; 1172 u_int x; 1173 1174 obj = m->object; 1175 x = vm_page_busy_fetch(m); 1176 for (;;) { 1177 if ((x & VPB_BIT_SHARED) == 0) 1178 return (0); 1179 /* 1180 * Reduce the window for transient busies that will trigger 1181 * false negatives in vm_page_ps_test(). 1182 */ 1183 if (obj != NULL && vm_object_busied(obj)) 1184 return (0); 1185 if (atomic_fcmpset_acq_int(&m->busy_lock, &x, 1186 x + VPB_ONE_SHARER)) 1187 break; 1188 } 1189 1190 /* Refetch the object now that we're guaranteed that it is stable. */ 1191 obj = m->object; 1192 if (obj != NULL && vm_object_busied(obj)) { 1193 vm_page_sunbusy(m); 1194 return (0); 1195 } 1196 return (1); 1197 } 1198 1199 /* 1200 * vm_page_tryxbusy: 1201 * 1202 * Try to exclusive busy a page. 1203 * If the operation succeeds 1 is returned otherwise 0. 1204 * The operation never sleeps. 1205 */ 1206 int 1207 vm_page_tryxbusy(vm_page_t m) 1208 { 1209 vm_object_t obj; 1210 1211 if (atomic_cmpset_acq_int(&m->busy_lock, VPB_UNBUSIED, 1212 VPB_CURTHREAD_EXCLUSIVE) == 0) 1213 return (0); 1214 1215 obj = m->object; 1216 if (obj != NULL && vm_object_busied(obj)) { 1217 vm_page_xunbusy(m); 1218 return (0); 1219 } 1220 return (1); 1221 } 1222 1223 static void 1224 vm_page_xunbusy_hard_tail(vm_page_t m) 1225 { 1226 atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED); 1227 /* Wake the waiter. */ 1228 wakeup(m); 1229 } 1230 1231 /* 1232 * vm_page_xunbusy_hard: 1233 * 1234 * Called when unbusy has failed because there is a waiter. 1235 */ 1236 void 1237 vm_page_xunbusy_hard(vm_page_t m) 1238 { 1239 vm_page_assert_xbusied(m); 1240 vm_page_xunbusy_hard_tail(m); 1241 } 1242 1243 void 1244 vm_page_xunbusy_hard_unchecked(vm_page_t m) 1245 { 1246 vm_page_assert_xbusied_unchecked(m); 1247 vm_page_xunbusy_hard_tail(m); 1248 } 1249 1250 static void 1251 vm_page_busy_free(vm_page_t m) 1252 { 1253 u_int x; 1254 1255 atomic_thread_fence_rel(); 1256 x = atomic_swap_int(&m->busy_lock, VPB_FREED); 1257 if ((x & VPB_BIT_WAITERS) != 0) 1258 wakeup(m); 1259 } 1260 1261 /* 1262 * vm_page_unhold_pages: 1263 * 1264 * Unhold each of the pages that is referenced by the given array. 1265 */ 1266 void 1267 vm_page_unhold_pages(vm_page_t *ma, int count) 1268 { 1269 1270 for (; count != 0; count--) { 1271 vm_page_unwire(*ma, PQ_ACTIVE); 1272 ma++; 1273 } 1274 } 1275 1276 vm_page_t 1277 PHYS_TO_VM_PAGE(vm_paddr_t pa) 1278 { 1279 vm_page_t m; 1280 1281 #ifdef VM_PHYSSEG_SPARSE 1282 m = vm_phys_paddr_to_vm_page(pa); 1283 if (m == NULL) 1284 m = vm_phys_fictitious_to_vm_page(pa); 1285 return (m); 1286 #elif defined(VM_PHYSSEG_DENSE) 1287 long pi; 1288 1289 pi = atop(pa); 1290 if (pi >= first_page && (pi - first_page) < vm_page_array_size) { 1291 m = &vm_page_array[pi - first_page]; 1292 return (m); 1293 } 1294 return (vm_phys_fictitious_to_vm_page(pa)); 1295 #else 1296 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." 1297 #endif 1298 } 1299 1300 /* 1301 * vm_page_getfake: 1302 * 1303 * Create a fictitious page with the specified physical address and 1304 * memory attribute. The memory attribute is the only the machine- 1305 * dependent aspect of a fictitious page that must be initialized. 1306 */ 1307 vm_page_t 1308 vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr) 1309 { 1310 vm_page_t m; 1311 1312 m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO); 1313 vm_page_initfake(m, paddr, memattr); 1314 return (m); 1315 } 1316 1317 void 1318 vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) 1319 { 1320 1321 if ((m->flags & PG_FICTITIOUS) != 0) { 1322 /* 1323 * The page's memattr might have changed since the 1324 * previous initialization. Update the pmap to the 1325 * new memattr. 1326 */ 1327 goto memattr; 1328 } 1329 m->phys_addr = paddr; 1330 m->a.queue = PQ_NONE; 1331 /* Fictitious pages don't use "segind". */ 1332 m->flags = PG_FICTITIOUS; 1333 /* Fictitious pages don't use "order" or "pool". */ 1334 m->oflags = VPO_UNMANAGED; 1335 m->busy_lock = VPB_CURTHREAD_EXCLUSIVE; 1336 /* Fictitious pages are unevictable. */ 1337 m->ref_count = 1; 1338 pmap_page_init(m); 1339 memattr: 1340 pmap_page_set_memattr(m, memattr); 1341 } 1342 1343 /* 1344 * vm_page_putfake: 1345 * 1346 * Release a fictitious page. 1347 */ 1348 void 1349 vm_page_putfake(vm_page_t m) 1350 { 1351 1352 KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m)); 1353 KASSERT((m->flags & PG_FICTITIOUS) != 0, 1354 ("vm_page_putfake: bad page %p", m)); 1355 vm_page_assert_xbusied(m); 1356 vm_page_busy_free(m); 1357 uma_zfree(fakepg_zone, m); 1358 } 1359 1360 /* 1361 * vm_page_updatefake: 1362 * 1363 * Update the given fictitious page to the specified physical address and 1364 * memory attribute. 1365 */ 1366 void 1367 vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) 1368 { 1369 1370 KASSERT((m->flags & PG_FICTITIOUS) != 0, 1371 ("vm_page_updatefake: bad page %p", m)); 1372 m->phys_addr = paddr; 1373 pmap_page_set_memattr(m, memattr); 1374 } 1375 1376 /* 1377 * vm_page_free: 1378 * 1379 * Free a page. 1380 */ 1381 void 1382 vm_page_free(vm_page_t m) 1383 { 1384 1385 m->flags &= ~PG_ZERO; 1386 vm_page_free_toq(m); 1387 } 1388 1389 /* 1390 * vm_page_free_zero: 1391 * 1392 * Free a page to the zerod-pages queue 1393 */ 1394 void 1395 vm_page_free_zero(vm_page_t m) 1396 { 1397 1398 m->flags |= PG_ZERO; 1399 vm_page_free_toq(m); 1400 } 1401 1402 /* 1403 * Unbusy and handle the page queueing for a page from a getpages request that 1404 * was optionally read ahead or behind. 1405 */ 1406 void 1407 vm_page_readahead_finish(vm_page_t m) 1408 { 1409 1410 /* We shouldn't put invalid pages on queues. */ 1411 KASSERT(!vm_page_none_valid(m), ("%s: %p is invalid", __func__, m)); 1412 1413 /* 1414 * Since the page is not the actually needed one, whether it should 1415 * be activated or deactivated is not obvious. Empirical results 1416 * have shown that deactivating the page is usually the best choice, 1417 * unless the page is wanted by another thread. 1418 */ 1419 if ((vm_page_busy_fetch(m) & VPB_BIT_WAITERS) != 0) 1420 vm_page_activate(m); 1421 else 1422 vm_page_deactivate(m); 1423 vm_page_xunbusy_unchecked(m); 1424 } 1425 1426 /* 1427 * Destroy the identity of an invalid page and free it if possible. 1428 * This is intended to be used when reading a page from backing store fails. 1429 */ 1430 void 1431 vm_page_free_invalid(vm_page_t m) 1432 { 1433 1434 KASSERT(vm_page_none_valid(m), ("page %p is valid", m)); 1435 KASSERT(!pmap_page_is_mapped(m), ("page %p is mapped", m)); 1436 KASSERT(m->object != NULL, ("page %p has no object", m)); 1437 VM_OBJECT_ASSERT_WLOCKED(m->object); 1438 1439 /* 1440 * We may be attempting to free the page as part of the handling for an 1441 * I/O error, in which case the page was xbusied by a different thread. 1442 */ 1443 vm_page_xbusy_claim(m); 1444 1445 /* 1446 * If someone has wired this page while the object lock 1447 * was not held, then the thread that unwires is responsible 1448 * for freeing the page. Otherwise just free the page now. 1449 * The wire count of this unmapped page cannot change while 1450 * we have the page xbusy and the page's object wlocked. 1451 */ 1452 if (vm_page_remove(m)) 1453 vm_page_free(m); 1454 } 1455 1456 /* 1457 * vm_page_dirty_KBI: [ internal use only ] 1458 * 1459 * Set all bits in the page's dirty field. 1460 * 1461 * The object containing the specified page must be locked if the 1462 * call is made from the machine-independent layer. 1463 * 1464 * See vm_page_clear_dirty_mask(). 1465 * 1466 * This function should only be called by vm_page_dirty(). 1467 */ 1468 void 1469 vm_page_dirty_KBI(vm_page_t m) 1470 { 1471 1472 /* Refer to this operation by its public name. */ 1473 KASSERT(vm_page_all_valid(m), ("vm_page_dirty: page is invalid!")); 1474 m->dirty = VM_PAGE_BITS_ALL; 1475 } 1476 1477 /* 1478 * Insert the given page into the given object at the given pindex. mpred is 1479 * used for memq linkage. From vm_page_insert, lookup is true, mpred is 1480 * initially NULL, and this procedure looks it up. From vm_page_insert_after 1481 * and vm_page_iter_insert, lookup is false and mpred is known to the caller 1482 * to be valid, and may be NULL if this will be the page with the lowest 1483 * pindex. 1484 * 1485 * The procedure is marked __always_inline to suggest to the compiler to 1486 * eliminate the lookup parameter and the associated alternate branch. 1487 */ 1488 static __always_inline int 1489 vm_page_insert_lookup(vm_page_t m, vm_object_t object, vm_pindex_t pindex, 1490 struct pctrie_iter *pages, bool iter, vm_page_t mpred, bool lookup) 1491 { 1492 int error; 1493 1494 VM_OBJECT_ASSERT_WLOCKED(object); 1495 KASSERT(m->object == NULL, 1496 ("vm_page_insert: page %p already inserted", m)); 1497 1498 /* 1499 * Record the object/offset pair in this page. 1500 */ 1501 m->object = object; 1502 m->pindex = pindex; 1503 m->ref_count |= VPRC_OBJREF; 1504 1505 /* 1506 * Add this page to the object's radix tree, and look up mpred if 1507 * needed. 1508 */ 1509 if (iter) { 1510 KASSERT(!lookup, ("%s: cannot lookup mpred", __func__)); 1511 error = vm_radix_iter_insert(pages, m); 1512 } else if (lookup) 1513 error = vm_radix_insert_lookup_lt(&object->rtree, m, &mpred); 1514 else 1515 error = vm_radix_insert(&object->rtree, m); 1516 if (__predict_false(error != 0)) { 1517 m->object = NULL; 1518 m->pindex = 0; 1519 m->ref_count &= ~VPRC_OBJREF; 1520 return (1); 1521 } 1522 1523 /* 1524 * Now link into the object's ordered list of backed pages. 1525 */ 1526 vm_page_insert_radixdone(m, object, mpred); 1527 vm_pager_page_inserted(object, m); 1528 return (0); 1529 } 1530 1531 /* 1532 * vm_page_insert: [ internal use only ] 1533 * 1534 * Inserts the given mem entry into the object and object list. 1535 * 1536 * The object must be locked. 1537 */ 1538 int 1539 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) 1540 { 1541 return (vm_page_insert_lookup(m, object, pindex, NULL, false, NULL, 1542 true)); 1543 } 1544 1545 /* 1546 * vm_page_insert_after: 1547 * 1548 * Inserts the page "m" into the specified object at offset "pindex". 1549 * 1550 * The page "mpred" must immediately precede the offset "pindex" within 1551 * the specified object. 1552 * 1553 * The object must be locked. 1554 */ 1555 static int 1556 vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex, 1557 vm_page_t mpred) 1558 { 1559 return (vm_page_insert_lookup(m, object, pindex, NULL, false, mpred, 1560 false)); 1561 } 1562 1563 /* 1564 * vm_page_iter_insert: 1565 * 1566 * Tries to insert the page "m" into the specified object at offset 1567 * "pindex" using the iterator "pages". Returns 0 if the insertion was 1568 * successful. 1569 * 1570 * The page "mpred" must immediately precede the offset "pindex" within 1571 * the specified object. 1572 * 1573 * The object must be locked. 1574 */ 1575 static int 1576 vm_page_iter_insert(struct pctrie_iter *pages, vm_page_t m, vm_object_t object, 1577 vm_pindex_t pindex, vm_page_t mpred) 1578 { 1579 return (vm_page_insert_lookup(m, object, pindex, pages, true, mpred, 1580 false)); 1581 } 1582 1583 /* 1584 * vm_page_insert_radixdone: 1585 * 1586 * Complete page "m" insertion into the specified object after the 1587 * radix trie hooking. 1588 * 1589 * The page "mpred" must precede the offset "m->pindex" within the 1590 * specified object. 1591 * 1592 * The object must be locked. 1593 */ 1594 static void 1595 vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred) 1596 { 1597 1598 VM_OBJECT_ASSERT_WLOCKED(object); 1599 KASSERT(object != NULL && m->object == object, 1600 ("vm_page_insert_radixdone: page %p has inconsistent object", m)); 1601 KASSERT((m->ref_count & VPRC_OBJREF) != 0, 1602 ("vm_page_insert_radixdone: page %p is missing object ref", m)); 1603 if (mpred != NULL) { 1604 KASSERT(mpred->object == object, 1605 ("vm_page_insert_radixdone: object doesn't contain mpred")); 1606 KASSERT(mpred->pindex < m->pindex, 1607 ("vm_page_insert_radixdone: mpred doesn't precede pindex")); 1608 KASSERT(TAILQ_NEXT(mpred, listq) == NULL || 1609 m->pindex < TAILQ_NEXT(mpred, listq)->pindex, 1610 ("vm_page_insert_radixdone: pindex doesn't precede msucc")); 1611 } else { 1612 KASSERT(TAILQ_EMPTY(&object->memq) || 1613 m->pindex < TAILQ_FIRST(&object->memq)->pindex, 1614 ("vm_page_insert_radixdone: no mpred but not first page")); 1615 } 1616 1617 if (mpred != NULL) 1618 TAILQ_INSERT_AFTER(&object->memq, mpred, m, listq); 1619 else 1620 TAILQ_INSERT_HEAD(&object->memq, m, listq); 1621 1622 /* 1623 * Show that the object has one more resident page. 1624 */ 1625 object->resident_page_count++; 1626 1627 /* 1628 * Hold the vnode until the last page is released. 1629 */ 1630 if (object->resident_page_count == 1 && object->type == OBJT_VNODE) 1631 vhold(object->handle); 1632 1633 /* 1634 * Since we are inserting a new and possibly dirty page, 1635 * update the object's generation count. 1636 */ 1637 if (pmap_page_is_write_mapped(m)) 1638 vm_object_set_writeable_dirty(object); 1639 } 1640 1641 /* 1642 * vm_page_remove_radixdone 1643 * 1644 * Complete page "m" removal from the specified object after the radix trie 1645 * unhooking. 1646 * 1647 * The caller is responsible for updating the page's fields to reflect this 1648 * removal. 1649 */ 1650 static void 1651 vm_page_remove_radixdone(vm_page_t m) 1652 { 1653 vm_object_t object; 1654 1655 vm_page_assert_xbusied(m); 1656 object = m->object; 1657 VM_OBJECT_ASSERT_WLOCKED(object); 1658 KASSERT((m->ref_count & VPRC_OBJREF) != 0, 1659 ("page %p is missing its object ref", m)); 1660 1661 /* Deferred free of swap space. */ 1662 if ((m->a.flags & PGA_SWAP_FREE) != 0) 1663 vm_pager_page_unswapped(m); 1664 1665 vm_pager_page_removed(object, m); 1666 m->object = NULL; 1667 1668 /* 1669 * Now remove from the object's list of backed pages. 1670 */ 1671 TAILQ_REMOVE(&object->memq, m, listq); 1672 1673 /* 1674 * And show that the object has one fewer resident page. 1675 */ 1676 object->resident_page_count--; 1677 1678 /* 1679 * The vnode may now be recycled. 1680 */ 1681 if (object->resident_page_count == 0 && object->type == OBJT_VNODE) 1682 vdrop(object->handle); 1683 } 1684 1685 /* 1686 * vm_page_free_object_prep: 1687 * 1688 * Disassociates the given page from its VM object. 1689 * 1690 * The object must be locked, and the page must be xbusy. 1691 */ 1692 static void 1693 vm_page_free_object_prep(vm_page_t m) 1694 { 1695 KASSERT(((m->oflags & VPO_UNMANAGED) != 0) == 1696 ((m->object->flags & OBJ_UNMANAGED) != 0), 1697 ("%s: managed flag mismatch for page %p", 1698 __func__, m)); 1699 vm_page_assert_xbusied(m); 1700 1701 /* 1702 * The object reference can be released without an atomic 1703 * operation. 1704 */ 1705 KASSERT((m->flags & PG_FICTITIOUS) != 0 || 1706 m->ref_count == VPRC_OBJREF, 1707 ("%s: page %p has unexpected ref_count %u", 1708 __func__, m, m->ref_count)); 1709 vm_page_remove_radixdone(m); 1710 m->ref_count -= VPRC_OBJREF; 1711 } 1712 1713 /* 1714 * vm_page_iter_free: 1715 * 1716 * Free the given page, and use the iterator to remove it from the radix 1717 * tree. 1718 */ 1719 void 1720 vm_page_iter_free(struct pctrie_iter *pages, vm_page_t m) 1721 { 1722 vm_radix_iter_remove(pages); 1723 vm_page_free_object_prep(m); 1724 vm_page_xunbusy(m); 1725 m->flags &= ~PG_ZERO; 1726 vm_page_free_toq(m); 1727 } 1728 1729 /* 1730 * vm_page_remove: 1731 * 1732 * Removes the specified page from its containing object, but does not 1733 * invalidate any backing storage. Returns true if the object's reference 1734 * was the last reference to the page, and false otherwise. 1735 * 1736 * The object must be locked and the page must be exclusively busied. 1737 * The exclusive busy will be released on return. If this is not the 1738 * final ref and the caller does not hold a wire reference it may not 1739 * continue to access the page. 1740 */ 1741 bool 1742 vm_page_remove(vm_page_t m) 1743 { 1744 bool dropped; 1745 1746 dropped = vm_page_remove_xbusy(m); 1747 vm_page_xunbusy(m); 1748 1749 return (dropped); 1750 } 1751 1752 /* 1753 * vm_page_iter_remove: 1754 * 1755 * Remove the current page, and use the iterator to remove it from the 1756 * radix tree. 1757 */ 1758 bool 1759 vm_page_iter_remove(struct pctrie_iter *pages, vm_page_t m) 1760 { 1761 bool dropped; 1762 1763 vm_radix_iter_remove(pages); 1764 vm_page_remove_radixdone(m); 1765 dropped = (vm_page_drop(m, VPRC_OBJREF) == VPRC_OBJREF); 1766 vm_page_xunbusy(m); 1767 1768 return (dropped); 1769 } 1770 1771 /* 1772 * vm_page_radix_remove 1773 * 1774 * Removes the specified page from the radix tree. 1775 */ 1776 static void 1777 vm_page_radix_remove(vm_page_t m) 1778 { 1779 vm_page_t mrem __diagused; 1780 1781 mrem = vm_radix_remove(&m->object->rtree, m->pindex); 1782 KASSERT(mrem == m, 1783 ("removed page %p, expected page %p", mrem, m)); 1784 } 1785 1786 /* 1787 * vm_page_remove_xbusy 1788 * 1789 * Removes the page but leaves the xbusy held. Returns true if this 1790 * removed the final ref and false otherwise. 1791 */ 1792 bool 1793 vm_page_remove_xbusy(vm_page_t m) 1794 { 1795 1796 vm_page_radix_remove(m); 1797 vm_page_remove_radixdone(m); 1798 return (vm_page_drop(m, VPRC_OBJREF) == VPRC_OBJREF); 1799 } 1800 1801 /* 1802 * vm_page_lookup: 1803 * 1804 * Returns the page associated with the object/offset 1805 * pair specified; if none is found, NULL is returned. 1806 * 1807 * The object must be locked. 1808 */ 1809 vm_page_t 1810 vm_page_lookup(vm_object_t object, vm_pindex_t pindex) 1811 { 1812 1813 VM_OBJECT_ASSERT_LOCKED(object); 1814 return (vm_radix_lookup(&object->rtree, pindex)); 1815 } 1816 1817 /* 1818 * vm_page_iter_init: 1819 * 1820 * Initialize iterator for vm pages. 1821 */ 1822 void 1823 vm_page_iter_init(struct pctrie_iter *pages, vm_object_t object) 1824 { 1825 1826 vm_radix_iter_init(pages, &object->rtree); 1827 } 1828 1829 /* 1830 * vm_page_iter_init: 1831 * 1832 * Initialize iterator for vm pages. 1833 */ 1834 void 1835 vm_page_iter_limit_init(struct pctrie_iter *pages, vm_object_t object, 1836 vm_pindex_t limit) 1837 { 1838 1839 vm_radix_iter_limit_init(pages, &object->rtree, limit); 1840 } 1841 1842 /* 1843 * vm_page_iter_lookup: 1844 * 1845 * Returns the page associated with the object/offset pair specified, and 1846 * stores the path to its position; if none is found, NULL is returned. 1847 * 1848 * The iter pctrie must be locked. 1849 */ 1850 vm_page_t 1851 vm_page_iter_lookup(struct pctrie_iter *pages, vm_pindex_t pindex) 1852 { 1853 1854 return (vm_radix_iter_lookup(pages, pindex)); 1855 } 1856 1857 /* 1858 * vm_page_lookup_unlocked: 1859 * 1860 * Returns the page associated with the object/offset pair specified; 1861 * if none is found, NULL is returned. The page may be no longer be 1862 * present in the object at the time that this function returns. Only 1863 * useful for opportunistic checks such as inmem(). 1864 */ 1865 vm_page_t 1866 vm_page_lookup_unlocked(vm_object_t object, vm_pindex_t pindex) 1867 { 1868 1869 return (vm_radix_lookup_unlocked(&object->rtree, pindex)); 1870 } 1871 1872 /* 1873 * vm_page_relookup: 1874 * 1875 * Returns a page that must already have been busied by 1876 * the caller. Used for bogus page replacement. 1877 */ 1878 vm_page_t 1879 vm_page_relookup(vm_object_t object, vm_pindex_t pindex) 1880 { 1881 vm_page_t m; 1882 1883 m = vm_page_lookup_unlocked(object, pindex); 1884 KASSERT(m != NULL && (vm_page_busied(m) || vm_page_wired(m)) && 1885 m->object == object && m->pindex == pindex, 1886 ("vm_page_relookup: Invalid page %p", m)); 1887 return (m); 1888 } 1889 1890 /* 1891 * This should only be used by lockless functions for releasing transient 1892 * incorrect acquires. The page may have been freed after we acquired a 1893 * busy lock. In this case busy_lock == VPB_FREED and we have nothing 1894 * further to do. 1895 */ 1896 static void 1897 vm_page_busy_release(vm_page_t m) 1898 { 1899 u_int x; 1900 1901 x = vm_page_busy_fetch(m); 1902 for (;;) { 1903 if (x == VPB_FREED) 1904 break; 1905 if ((x & VPB_BIT_SHARED) != 0 && VPB_SHARERS(x) > 1) { 1906 if (atomic_fcmpset_int(&m->busy_lock, &x, 1907 x - VPB_ONE_SHARER)) 1908 break; 1909 continue; 1910 } 1911 KASSERT((x & VPB_BIT_SHARED) != 0 || 1912 (x & ~VPB_BIT_WAITERS) == VPB_CURTHREAD_EXCLUSIVE, 1913 ("vm_page_busy_release: %p xbusy not owned.", m)); 1914 if (!atomic_fcmpset_rel_int(&m->busy_lock, &x, VPB_UNBUSIED)) 1915 continue; 1916 if ((x & VPB_BIT_WAITERS) != 0) 1917 wakeup(m); 1918 break; 1919 } 1920 } 1921 1922 /* 1923 * vm_page_find_least: 1924 * 1925 * Returns the page associated with the object with least pindex 1926 * greater than or equal to the parameter pindex, or NULL. 1927 * 1928 * The object must be locked. 1929 */ 1930 vm_page_t 1931 vm_page_find_least(vm_object_t object, vm_pindex_t pindex) 1932 { 1933 vm_page_t m; 1934 1935 VM_OBJECT_ASSERT_LOCKED(object); 1936 if ((m = TAILQ_FIRST(&object->memq)) != NULL && m->pindex < pindex) 1937 m = vm_radix_lookup_ge(&object->rtree, pindex); 1938 return (m); 1939 } 1940 1941 /* 1942 * vm_page_iter_lookup_ge: 1943 * 1944 * Returns the page associated with the object with least pindex 1945 * greater than or equal to the parameter pindex, or NULL. Initializes the 1946 * iterator to point to that page. 1947 * 1948 * The iter pctrie must be locked. 1949 */ 1950 vm_page_t 1951 vm_page_iter_lookup_ge(struct pctrie_iter *pages, vm_pindex_t pindex) 1952 { 1953 1954 return (vm_radix_iter_lookup_ge(pages, pindex)); 1955 } 1956 1957 /* 1958 * Returns the given page's successor (by pindex) within the object if it is 1959 * resident; if none is found, NULL is returned. 1960 * 1961 * The object must be locked. 1962 */ 1963 vm_page_t 1964 vm_page_next(vm_page_t m) 1965 { 1966 vm_page_t next; 1967 1968 VM_OBJECT_ASSERT_LOCKED(m->object); 1969 if ((next = TAILQ_NEXT(m, listq)) != NULL) { 1970 MPASS(next->object == m->object); 1971 if (next->pindex != m->pindex + 1) 1972 next = NULL; 1973 } 1974 return (next); 1975 } 1976 1977 /* 1978 * Returns the given page's predecessor (by pindex) within the object if it is 1979 * resident; if none is found, NULL is returned. 1980 * 1981 * The object must be locked. 1982 */ 1983 vm_page_t 1984 vm_page_prev(vm_page_t m) 1985 { 1986 vm_page_t prev; 1987 1988 VM_OBJECT_ASSERT_LOCKED(m->object); 1989 if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL) { 1990 MPASS(prev->object == m->object); 1991 if (prev->pindex != m->pindex - 1) 1992 prev = NULL; 1993 } 1994 return (prev); 1995 } 1996 1997 /* 1998 * Uses the page mnew as a replacement for an existing page at index 1999 * pindex which must be already present in the object. 2000 * 2001 * Both pages must be exclusively busied on enter. The old page is 2002 * unbusied on exit. 2003 * 2004 * A return value of true means mold is now free. If this is not the 2005 * final ref and the caller does not hold a wire reference it may not 2006 * continue to access the page. 2007 */ 2008 static bool 2009 vm_page_replace_hold(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex, 2010 vm_page_t mold) 2011 { 2012 vm_page_t mret __diagused; 2013 bool dropped; 2014 2015 VM_OBJECT_ASSERT_WLOCKED(object); 2016 vm_page_assert_xbusied(mold); 2017 KASSERT(mnew->object == NULL && (mnew->ref_count & VPRC_OBJREF) == 0, 2018 ("vm_page_replace: page %p already in object", mnew)); 2019 2020 /* 2021 * This function mostly follows vm_page_insert() and 2022 * vm_page_remove() without the radix, object count and vnode 2023 * dance. Double check such functions for more comments. 2024 */ 2025 2026 mnew->object = object; 2027 mnew->pindex = pindex; 2028 atomic_set_int(&mnew->ref_count, VPRC_OBJREF); 2029 mret = vm_radix_replace(&object->rtree, mnew); 2030 KASSERT(mret == mold, 2031 ("invalid page replacement, mold=%p, mret=%p", mold, mret)); 2032 KASSERT((mold->oflags & VPO_UNMANAGED) == 2033 (mnew->oflags & VPO_UNMANAGED), 2034 ("vm_page_replace: mismatched VPO_UNMANAGED")); 2035 2036 /* Keep the resident page list in sorted order. */ 2037 TAILQ_INSERT_AFTER(&object->memq, mold, mnew, listq); 2038 TAILQ_REMOVE(&object->memq, mold, listq); 2039 mold->object = NULL; 2040 2041 /* 2042 * The object's resident_page_count does not change because we have 2043 * swapped one page for another, but the generation count should 2044 * change if the page is dirty. 2045 */ 2046 if (pmap_page_is_write_mapped(mnew)) 2047 vm_object_set_writeable_dirty(object); 2048 dropped = vm_page_drop(mold, VPRC_OBJREF) == VPRC_OBJREF; 2049 vm_page_xunbusy(mold); 2050 2051 return (dropped); 2052 } 2053 2054 void 2055 vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex, 2056 vm_page_t mold) 2057 { 2058 2059 vm_page_assert_xbusied(mnew); 2060 2061 if (vm_page_replace_hold(mnew, object, pindex, mold)) 2062 vm_page_free(mold); 2063 } 2064 2065 /* 2066 * vm_page_iter_rename: 2067 * 2068 * Tries to move the specified page from its current object to a new object 2069 * and pindex, using the given iterator to remove the page from its current 2070 * object. Returns true if the move was successful, and false if the move 2071 * was aborted due to a failed memory allocation. 2072 * 2073 * Panics if a page already resides in the new object at the new pindex. 2074 * 2075 * Note: swap associated with the page must be invalidated by the move. We 2076 * have to do this for several reasons: (1) we aren't freeing the 2077 * page, (2) we are dirtying the page, (3) the VM system is probably 2078 * moving the page from object A to B, and will then later move 2079 * the backing store from A to B and we can't have a conflict. 2080 * 2081 * Note: we *always* dirty the page. It is necessary both for the 2082 * fact that we moved it, and because we may be invalidating 2083 * swap. 2084 * 2085 * The objects must be locked. 2086 */ 2087 bool 2088 vm_page_iter_rename(struct pctrie_iter *old_pages, vm_page_t m, 2089 vm_object_t new_object, vm_pindex_t new_pindex) 2090 { 2091 vm_page_t mpred; 2092 vm_pindex_t opidx; 2093 2094 KASSERT((m->ref_count & VPRC_OBJREF) != 0, 2095 ("%s: page %p is missing object ref", __func__, m)); 2096 VM_OBJECT_ASSERT_WLOCKED(m->object); 2097 VM_OBJECT_ASSERT_WLOCKED(new_object); 2098 2099 /* 2100 * Create a custom version of vm_page_insert() which does not depend 2101 * by m_prev and can cheat on the implementation aspects of the 2102 * function. 2103 */ 2104 opidx = m->pindex; 2105 m->pindex = new_pindex; 2106 if (vm_radix_insert_lookup_lt(&new_object->rtree, m, &mpred) != 0) { 2107 m->pindex = opidx; 2108 return (false); 2109 } 2110 2111 /* 2112 * The operation cannot fail anymore. The removal must happen before 2113 * the listq iterator is tainted. 2114 */ 2115 m->pindex = opidx; 2116 vm_radix_iter_remove(old_pages); 2117 vm_page_remove_radixdone(m); 2118 2119 /* Return back to the new pindex to complete vm_page_insert(). */ 2120 m->pindex = new_pindex; 2121 m->object = new_object; 2122 2123 vm_page_insert_radixdone(m, new_object, mpred); 2124 vm_page_dirty(m); 2125 vm_pager_page_inserted(new_object, m); 2126 return (true); 2127 } 2128 2129 /* 2130 * vm_page_mpred: 2131 * 2132 * Return the greatest page of the object with index <= pindex, 2133 * or NULL, if there is none. Assumes object lock is held. 2134 */ 2135 vm_page_t 2136 vm_page_mpred(vm_object_t object, vm_pindex_t pindex) 2137 { 2138 return (vm_radix_lookup_le(&object->rtree, pindex)); 2139 } 2140 2141 /* 2142 * vm_page_alloc: 2143 * 2144 * Allocate and return a page that is associated with the specified 2145 * object and offset pair. By default, this page is exclusive busied. 2146 * 2147 * The caller must always specify an allocation class. 2148 * 2149 * allocation classes: 2150 * VM_ALLOC_NORMAL normal process request 2151 * VM_ALLOC_SYSTEM system *really* needs a page 2152 * VM_ALLOC_INTERRUPT interrupt time request 2153 * 2154 * optional allocation flags: 2155 * VM_ALLOC_COUNT(number) the number of additional pages that the caller 2156 * intends to allocate 2157 * VM_ALLOC_NOBUSY do not exclusive busy the page 2158 * VM_ALLOC_NODUMP do not include the page in a kernel core dump 2159 * VM_ALLOC_SBUSY shared busy the allocated page 2160 * VM_ALLOC_WIRED wire the allocated page 2161 * VM_ALLOC_ZERO prefer a zeroed page 2162 */ 2163 vm_page_t 2164 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) 2165 { 2166 2167 return (vm_page_alloc_after(object, pindex, req, 2168 vm_page_mpred(object, pindex))); 2169 } 2170 2171 /* 2172 * Allocate a page in the specified object with the given page index. To 2173 * optimize insertion of the page into the object, the caller must also specify 2174 * the resident page in the object with largest index smaller than the given 2175 * page index, or NULL if no such page exists. 2176 */ 2177 static vm_page_t 2178 vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex, 2179 int req, vm_page_t mpred) 2180 { 2181 struct vm_domainset_iter di; 2182 vm_page_t m; 2183 int domain; 2184 2185 vm_domainset_iter_page_init(&di, object, pindex, &domain, &req); 2186 do { 2187 m = vm_page_alloc_domain_after(object, pindex, domain, req, 2188 mpred); 2189 if (m != NULL) 2190 break; 2191 } while (vm_domainset_iter_page(&di, object, &domain) == 0); 2192 2193 return (m); 2194 } 2195 2196 /* 2197 * Returns true if the number of free pages exceeds the minimum 2198 * for the request class and false otherwise. 2199 */ 2200 static int 2201 _vm_domain_allocate(struct vm_domain *vmd, int req_class, int npages) 2202 { 2203 u_int limit, old, new; 2204 2205 if (req_class == VM_ALLOC_INTERRUPT) 2206 limit = 0; 2207 else if (req_class == VM_ALLOC_SYSTEM) 2208 limit = vmd->vmd_interrupt_free_min; 2209 else 2210 limit = vmd->vmd_free_reserved; 2211 2212 /* 2213 * Attempt to reserve the pages. Fail if we're below the limit. 2214 */ 2215 limit += npages; 2216 old = atomic_load_int(&vmd->vmd_free_count); 2217 do { 2218 if (old < limit) 2219 return (0); 2220 new = old - npages; 2221 } while (atomic_fcmpset_int(&vmd->vmd_free_count, &old, new) == 0); 2222 2223 /* Wake the page daemon if we've crossed the threshold. */ 2224 if (vm_paging_needed(vmd, new) && !vm_paging_needed(vmd, old)) 2225 pagedaemon_wakeup(vmd->vmd_domain); 2226 2227 /* Only update bitsets on transitions. */ 2228 if ((old >= vmd->vmd_free_min && new < vmd->vmd_free_min) || 2229 (old >= vmd->vmd_free_severe && new < vmd->vmd_free_severe)) 2230 vm_domain_set(vmd); 2231 2232 return (1); 2233 } 2234 2235 int 2236 vm_domain_allocate(struct vm_domain *vmd, int req, int npages) 2237 { 2238 int req_class; 2239 2240 /* 2241 * The page daemon is allowed to dig deeper into the free page list. 2242 */ 2243 req_class = req & VM_ALLOC_CLASS_MASK; 2244 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) 2245 req_class = VM_ALLOC_SYSTEM; 2246 return (_vm_domain_allocate(vmd, req_class, npages)); 2247 } 2248 2249 vm_page_t 2250 vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain, 2251 int req, vm_page_t mpred) 2252 { 2253 struct vm_domain *vmd; 2254 vm_page_t m; 2255 int flags; 2256 2257 #define VPA_FLAGS (VM_ALLOC_CLASS_MASK | VM_ALLOC_WAITFAIL | \ 2258 VM_ALLOC_NOWAIT | VM_ALLOC_NOBUSY | \ 2259 VM_ALLOC_SBUSY | VM_ALLOC_WIRED | \ 2260 VM_ALLOC_NODUMP | VM_ALLOC_ZERO | \ 2261 VM_ALLOC_NOFREE | VM_ALLOC_COUNT_MASK) 2262 KASSERT((req & ~VPA_FLAGS) == 0, 2263 ("invalid request %#x", req)); 2264 KASSERT(((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != 2265 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), 2266 ("invalid request %#x", req)); 2267 KASSERT(mpred == NULL || mpred->pindex < pindex, 2268 ("mpred %p doesn't precede pindex 0x%jx", mpred, 2269 (uintmax_t)pindex)); 2270 VM_OBJECT_ASSERT_WLOCKED(object); 2271 2272 flags = 0; 2273 m = NULL; 2274 if (!vm_pager_can_alloc_page(object, pindex)) 2275 return (NULL); 2276 again: 2277 if (__predict_false((req & VM_ALLOC_NOFREE) != 0)) { 2278 m = vm_page_alloc_nofree_domain(domain, req); 2279 if (m != NULL) 2280 goto found; 2281 } 2282 #if VM_NRESERVLEVEL > 0 2283 /* 2284 * Can we allocate the page from a reservation? 2285 */ 2286 if (vm_object_reserv(object) && 2287 (m = vm_reserv_alloc_page(object, pindex, domain, req, mpred)) != 2288 NULL) { 2289 goto found; 2290 } 2291 #endif 2292 vmd = VM_DOMAIN(domain); 2293 if (vmd->vmd_pgcache[VM_FREEPOOL_DEFAULT].zone != NULL) { 2294 m = uma_zalloc(vmd->vmd_pgcache[VM_FREEPOOL_DEFAULT].zone, 2295 M_NOWAIT | M_NOVM); 2296 if (m != NULL) { 2297 flags |= PG_PCPU_CACHE; 2298 goto found; 2299 } 2300 } 2301 if (vm_domain_allocate(vmd, req, 1)) { 2302 /* 2303 * If not, allocate it from the free page queues. 2304 */ 2305 vm_domain_free_lock(vmd); 2306 m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT, 0); 2307 vm_domain_free_unlock(vmd); 2308 if (m == NULL) { 2309 vm_domain_freecnt_inc(vmd, 1); 2310 #if VM_NRESERVLEVEL > 0 2311 if (vm_reserv_reclaim_inactive(domain)) 2312 goto again; 2313 #endif 2314 } 2315 } 2316 if (m == NULL) { 2317 /* 2318 * Not allocatable, give up. 2319 */ 2320 if (vm_domain_alloc_fail(vmd, object, req)) 2321 goto again; 2322 return (NULL); 2323 } 2324 2325 /* 2326 * At this point we had better have found a good page. 2327 */ 2328 found: 2329 vm_page_dequeue(m); 2330 vm_page_alloc_check(m); 2331 2332 /* 2333 * Initialize the page. Only the PG_ZERO flag is inherited. 2334 */ 2335 flags |= m->flags & PG_ZERO; 2336 if ((req & VM_ALLOC_NODUMP) != 0) 2337 flags |= PG_NODUMP; 2338 if ((req & VM_ALLOC_NOFREE) != 0) 2339 flags |= PG_NOFREE; 2340 m->flags = flags; 2341 m->a.flags = 0; 2342 m->oflags = (object->flags & OBJ_UNMANAGED) != 0 ? VPO_UNMANAGED : 0; 2343 m->pool = VM_FREEPOOL_DEFAULT; 2344 if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0) 2345 m->busy_lock = VPB_CURTHREAD_EXCLUSIVE; 2346 else if ((req & VM_ALLOC_SBUSY) != 0) 2347 m->busy_lock = VPB_SHARERS_WORD(1); 2348 else 2349 m->busy_lock = VPB_UNBUSIED; 2350 if (req & VM_ALLOC_WIRED) { 2351 vm_wire_add(1); 2352 m->ref_count = 1; 2353 } 2354 m->a.act_count = 0; 2355 2356 if (vm_page_insert_after(m, object, pindex, mpred)) { 2357 if (req & VM_ALLOC_WIRED) { 2358 vm_wire_sub(1); 2359 m->ref_count = 0; 2360 } 2361 KASSERT(m->object == NULL, ("page %p has object", m)); 2362 m->oflags = VPO_UNMANAGED; 2363 m->busy_lock = VPB_UNBUSIED; 2364 /* Don't change PG_ZERO. */ 2365 vm_page_free_toq(m); 2366 if (req & VM_ALLOC_WAITFAIL) { 2367 VM_OBJECT_WUNLOCK(object); 2368 vm_radix_wait(); 2369 VM_OBJECT_WLOCK(object); 2370 } 2371 return (NULL); 2372 } 2373 2374 /* Ignore device objects; the pager sets "memattr" for them. */ 2375 if (object->memattr != VM_MEMATTR_DEFAULT && 2376 (object->flags & OBJ_FICTITIOUS) == 0) 2377 pmap_page_set_memattr(m, object->memattr); 2378 2379 return (m); 2380 } 2381 2382 /* 2383 * vm_page_alloc_contig: 2384 * 2385 * Allocate a contiguous set of physical pages of the given size "npages" 2386 * from the free lists. All of the physical pages must be at or above 2387 * the given physical address "low" and below the given physical address 2388 * "high". The given value "alignment" determines the alignment of the 2389 * first physical page in the set. If the given value "boundary" is 2390 * non-zero, then the set of physical pages cannot cross any physical 2391 * address boundary that is a multiple of that value. Both "alignment" 2392 * and "boundary" must be a power of two. 2393 * 2394 * If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT, 2395 * then the memory attribute setting for the physical pages is configured 2396 * to the object's memory attribute setting. Otherwise, the memory 2397 * attribute setting for the physical pages is configured to "memattr", 2398 * overriding the object's memory attribute setting. However, if the 2399 * object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the 2400 * memory attribute setting for the physical pages cannot be configured 2401 * to VM_MEMATTR_DEFAULT. 2402 * 2403 * The specified object may not contain fictitious pages. 2404 * 2405 * The caller must always specify an allocation class. 2406 * 2407 * allocation classes: 2408 * VM_ALLOC_NORMAL normal process request 2409 * VM_ALLOC_SYSTEM system *really* needs a page 2410 * VM_ALLOC_INTERRUPT interrupt time request 2411 * 2412 * optional allocation flags: 2413 * VM_ALLOC_NOBUSY do not exclusive busy the page 2414 * VM_ALLOC_NODUMP do not include the page in a kernel core dump 2415 * VM_ALLOC_SBUSY shared busy the allocated page 2416 * VM_ALLOC_WIRED wire the allocated page 2417 * VM_ALLOC_ZERO prefer a zeroed page 2418 */ 2419 vm_page_t 2420 vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req, 2421 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 2422 vm_paddr_t boundary, vm_memattr_t memattr) 2423 { 2424 struct vm_domainset_iter di; 2425 vm_page_t bounds[2]; 2426 vm_page_t m; 2427 int domain; 2428 int start_segind; 2429 2430 start_segind = -1; 2431 2432 vm_domainset_iter_page_init(&di, object, pindex, &domain, &req); 2433 do { 2434 m = vm_page_alloc_contig_domain(object, pindex, domain, req, 2435 npages, low, high, alignment, boundary, memattr); 2436 if (m != NULL) 2437 break; 2438 if (start_segind == -1) 2439 start_segind = vm_phys_lookup_segind(low); 2440 if (vm_phys_find_range(bounds, start_segind, domain, 2441 npages, low, high) == -1) { 2442 vm_domainset_iter_ignore(&di, domain); 2443 } 2444 } while (vm_domainset_iter_page(&di, object, &domain) == 0); 2445 2446 return (m); 2447 } 2448 2449 static vm_page_t 2450 vm_page_find_contig_domain(int domain, int req, u_long npages, vm_paddr_t low, 2451 vm_paddr_t high, u_long alignment, vm_paddr_t boundary) 2452 { 2453 struct vm_domain *vmd; 2454 vm_page_t m_ret; 2455 2456 /* 2457 * Can we allocate the pages without the number of free pages falling 2458 * below the lower bound for the allocation class? 2459 */ 2460 vmd = VM_DOMAIN(domain); 2461 if (!vm_domain_allocate(vmd, req, npages)) 2462 return (NULL); 2463 /* 2464 * Try to allocate the pages from the free page queues. 2465 */ 2466 vm_domain_free_lock(vmd); 2467 m_ret = vm_phys_alloc_contig(domain, npages, low, high, 2468 alignment, boundary); 2469 vm_domain_free_unlock(vmd); 2470 if (m_ret != NULL) 2471 return (m_ret); 2472 #if VM_NRESERVLEVEL > 0 2473 /* 2474 * Try to break a reservation to allocate the pages. 2475 */ 2476 if ((req & VM_ALLOC_NORECLAIM) == 0) { 2477 m_ret = vm_reserv_reclaim_contig(domain, npages, low, 2478 high, alignment, boundary); 2479 if (m_ret != NULL) 2480 return (m_ret); 2481 } 2482 #endif 2483 vm_domain_freecnt_inc(vmd, npages); 2484 return (NULL); 2485 } 2486 2487 vm_page_t 2488 vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain, 2489 int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 2490 vm_paddr_t boundary, vm_memattr_t memattr) 2491 { 2492 struct pctrie_iter pages; 2493 vm_page_t m, m_ret, mpred; 2494 u_int busy_lock, flags, oflags; 2495 2496 #define VPAC_FLAGS (VPA_FLAGS | VM_ALLOC_NORECLAIM) 2497 KASSERT((req & ~VPAC_FLAGS) == 0, 2498 ("invalid request %#x", req)); 2499 KASSERT(((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != 2500 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), 2501 ("invalid request %#x", req)); 2502 KASSERT((req & (VM_ALLOC_WAITOK | VM_ALLOC_NORECLAIM)) != 2503 (VM_ALLOC_WAITOK | VM_ALLOC_NORECLAIM), 2504 ("invalid request %#x", req)); 2505 VM_OBJECT_ASSERT_WLOCKED(object); 2506 KASSERT((object->flags & OBJ_FICTITIOUS) == 0, 2507 ("vm_page_alloc_contig: object %p has fictitious pages", 2508 object)); 2509 KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero")); 2510 2511 vm_page_iter_init(&pages, object); 2512 mpred = vm_radix_iter_lookup_le(&pages, pindex); 2513 KASSERT(mpred == NULL || mpred->pindex != pindex, 2514 ("vm_page_alloc_contig: pindex already allocated")); 2515 for (;;) { 2516 #if VM_NRESERVLEVEL > 0 2517 /* 2518 * Can we allocate the pages from a reservation? 2519 */ 2520 if (vm_object_reserv(object) && 2521 (m_ret = vm_reserv_alloc_contig(object, pindex, domain, req, 2522 mpred, npages, low, high, alignment, boundary)) != NULL) { 2523 break; 2524 } 2525 #endif 2526 if ((m_ret = vm_page_find_contig_domain(domain, req, npages, 2527 low, high, alignment, boundary)) != NULL) 2528 break; 2529 if (!vm_domain_alloc_fail(VM_DOMAIN(domain), object, req)) 2530 return (NULL); 2531 } 2532 2533 /* 2534 * Initialize the pages. Only the PG_ZERO flag is inherited. 2535 */ 2536 flags = PG_ZERO; 2537 if ((req & VM_ALLOC_NODUMP) != 0) 2538 flags |= PG_NODUMP; 2539 oflags = (object->flags & OBJ_UNMANAGED) != 0 ? VPO_UNMANAGED : 0; 2540 if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0) 2541 busy_lock = VPB_CURTHREAD_EXCLUSIVE; 2542 else if ((req & VM_ALLOC_SBUSY) != 0) 2543 busy_lock = VPB_SHARERS_WORD(1); 2544 else 2545 busy_lock = VPB_UNBUSIED; 2546 if ((req & VM_ALLOC_WIRED) != 0) 2547 vm_wire_add(npages); 2548 if (object->memattr != VM_MEMATTR_DEFAULT && 2549 memattr == VM_MEMATTR_DEFAULT) 2550 memattr = object->memattr; 2551 for (m = m_ret; m < &m_ret[npages]; m++) { 2552 vm_page_dequeue(m); 2553 vm_page_alloc_check(m); 2554 m->a.flags = 0; 2555 m->flags = (m->flags | PG_NODUMP) & flags; 2556 m->busy_lock = busy_lock; 2557 if ((req & VM_ALLOC_WIRED) != 0) 2558 m->ref_count = 1; 2559 m->a.act_count = 0; 2560 m->oflags = oflags; 2561 m->pool = VM_FREEPOOL_DEFAULT; 2562 if (vm_page_iter_insert(&pages, m, object, pindex, mpred)) { 2563 if ((req & VM_ALLOC_WIRED) != 0) 2564 vm_wire_sub(npages); 2565 KASSERT(m->object == NULL, 2566 ("page %p has object", m)); 2567 mpred = m; 2568 for (m = m_ret; m < &m_ret[npages]; m++) { 2569 if (m <= mpred && 2570 (req & VM_ALLOC_WIRED) != 0) 2571 m->ref_count = 0; 2572 m->oflags = VPO_UNMANAGED; 2573 m->busy_lock = VPB_UNBUSIED; 2574 /* Don't change PG_ZERO. */ 2575 vm_page_free_toq(m); 2576 } 2577 if (req & VM_ALLOC_WAITFAIL) { 2578 VM_OBJECT_WUNLOCK(object); 2579 vm_radix_wait(); 2580 VM_OBJECT_WLOCK(object); 2581 } 2582 return (NULL); 2583 } 2584 mpred = m; 2585 if (memattr != VM_MEMATTR_DEFAULT) 2586 pmap_page_set_memattr(m, memattr); 2587 pindex++; 2588 } 2589 return (m_ret); 2590 } 2591 2592 /* 2593 * Allocate a physical page that is not intended to be inserted into a VM 2594 * object. 2595 */ 2596 vm_page_t 2597 vm_page_alloc_noobj_domain(int domain, int req) 2598 { 2599 struct vm_domain *vmd; 2600 vm_page_t m; 2601 int flags; 2602 2603 #define VPAN_FLAGS (VM_ALLOC_CLASS_MASK | VM_ALLOC_WAITFAIL | \ 2604 VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | \ 2605 VM_ALLOC_NOBUSY | VM_ALLOC_WIRED | \ 2606 VM_ALLOC_NODUMP | VM_ALLOC_ZERO | \ 2607 VM_ALLOC_NOFREE | VM_ALLOC_COUNT_MASK) 2608 KASSERT((req & ~VPAN_FLAGS) == 0, 2609 ("invalid request %#x", req)); 2610 2611 flags = ((req & VM_ALLOC_NODUMP) != 0 ? PG_NODUMP : 0) | 2612 ((req & VM_ALLOC_NOFREE) != 0 ? PG_NOFREE : 0); 2613 vmd = VM_DOMAIN(domain); 2614 again: 2615 if (__predict_false((req & VM_ALLOC_NOFREE) != 0)) { 2616 m = vm_page_alloc_nofree_domain(domain, req); 2617 if (m != NULL) 2618 goto found; 2619 } 2620 2621 if (vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone != NULL) { 2622 m = uma_zalloc(vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone, 2623 M_NOWAIT | M_NOVM); 2624 if (m != NULL) { 2625 flags |= PG_PCPU_CACHE; 2626 goto found; 2627 } 2628 } 2629 2630 if (vm_domain_allocate(vmd, req, 1)) { 2631 vm_domain_free_lock(vmd); 2632 m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DIRECT, 0); 2633 vm_domain_free_unlock(vmd); 2634 if (m == NULL) { 2635 vm_domain_freecnt_inc(vmd, 1); 2636 #if VM_NRESERVLEVEL > 0 2637 if (vm_reserv_reclaim_inactive(domain)) 2638 goto again; 2639 #endif 2640 } 2641 } 2642 if (m == NULL) { 2643 if (vm_domain_alloc_fail(vmd, NULL, req)) 2644 goto again; 2645 return (NULL); 2646 } 2647 2648 found: 2649 vm_page_dequeue(m); 2650 vm_page_alloc_check(m); 2651 2652 /* 2653 * Consumers should not rely on a useful default pindex value. 2654 */ 2655 m->pindex = 0xdeadc0dedeadc0de; 2656 m->flags = (m->flags & PG_ZERO) | flags; 2657 m->a.flags = 0; 2658 m->oflags = VPO_UNMANAGED; 2659 m->pool = VM_FREEPOOL_DIRECT; 2660 m->busy_lock = VPB_UNBUSIED; 2661 if ((req & VM_ALLOC_WIRED) != 0) { 2662 vm_wire_add(1); 2663 m->ref_count = 1; 2664 } 2665 2666 if ((req & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0) 2667 pmap_zero_page(m); 2668 2669 return (m); 2670 } 2671 2672 #if VM_NRESERVLEVEL > 1 2673 #define VM_NOFREE_IMPORT_ORDER (VM_LEVEL_1_ORDER + VM_LEVEL_0_ORDER) 2674 #elif VM_NRESERVLEVEL > 0 2675 #define VM_NOFREE_IMPORT_ORDER VM_LEVEL_0_ORDER 2676 #else 2677 #define VM_NOFREE_IMPORT_ORDER 8 2678 #endif 2679 2680 /* 2681 * Allocate a single NOFREE page. 2682 * 2683 * This routine hands out NOFREE pages from higher-order 2684 * physical memory blocks in order to reduce memory fragmentation. 2685 * When a NOFREE for a given domain chunk is used up, 2686 * the routine will try to fetch a new one from the freelists 2687 * and discard the old one. 2688 */ 2689 static vm_page_t 2690 vm_page_alloc_nofree_domain(int domain, int req) 2691 { 2692 vm_page_t m; 2693 struct vm_domain *vmd; 2694 struct vm_nofreeq *nqp; 2695 2696 KASSERT((req & VM_ALLOC_NOFREE) != 0, ("invalid request %#x", req)); 2697 2698 vmd = VM_DOMAIN(domain); 2699 nqp = &vmd->vmd_nofreeq; 2700 vm_domain_free_lock(vmd); 2701 if (nqp->offs >= (1 << VM_NOFREE_IMPORT_ORDER) || nqp->ma == NULL) { 2702 if (!vm_domain_allocate(vmd, req, 2703 1 << VM_NOFREE_IMPORT_ORDER)) { 2704 vm_domain_free_unlock(vmd); 2705 return (NULL); 2706 } 2707 nqp->ma = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT, 2708 VM_NOFREE_IMPORT_ORDER); 2709 if (nqp->ma == NULL) { 2710 vm_domain_freecnt_inc(vmd, 1 << VM_NOFREE_IMPORT_ORDER); 2711 vm_domain_free_unlock(vmd); 2712 return (NULL); 2713 } 2714 nqp->offs = 0; 2715 } 2716 m = &nqp->ma[nqp->offs++]; 2717 vm_domain_free_unlock(vmd); 2718 VM_CNT_ADD(v_nofree_count, 1); 2719 2720 return (m); 2721 } 2722 2723 vm_page_t 2724 vm_page_alloc_noobj(int req) 2725 { 2726 struct vm_domainset_iter di; 2727 vm_page_t m; 2728 int domain; 2729 2730 vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req); 2731 do { 2732 m = vm_page_alloc_noobj_domain(domain, req); 2733 if (m != NULL) 2734 break; 2735 } while (vm_domainset_iter_page(&di, NULL, &domain) == 0); 2736 2737 return (m); 2738 } 2739 2740 vm_page_t 2741 vm_page_alloc_noobj_contig(int req, u_long npages, vm_paddr_t low, 2742 vm_paddr_t high, u_long alignment, vm_paddr_t boundary, 2743 vm_memattr_t memattr) 2744 { 2745 struct vm_domainset_iter di; 2746 vm_page_t m; 2747 int domain; 2748 2749 vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req); 2750 do { 2751 m = vm_page_alloc_noobj_contig_domain(domain, req, npages, low, 2752 high, alignment, boundary, memattr); 2753 if (m != NULL) 2754 break; 2755 } while (vm_domainset_iter_page(&di, NULL, &domain) == 0); 2756 2757 return (m); 2758 } 2759 2760 vm_page_t 2761 vm_page_alloc_noobj_contig_domain(int domain, int req, u_long npages, 2762 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, 2763 vm_memattr_t memattr) 2764 { 2765 vm_page_t m, m_ret; 2766 u_int flags; 2767 2768 #define VPANC_FLAGS (VPAN_FLAGS | VM_ALLOC_NORECLAIM) 2769 KASSERT((req & ~VPANC_FLAGS) == 0, 2770 ("invalid request %#x", req)); 2771 KASSERT((req & (VM_ALLOC_WAITOK | VM_ALLOC_NORECLAIM)) != 2772 (VM_ALLOC_WAITOK | VM_ALLOC_NORECLAIM), 2773 ("invalid request %#x", req)); 2774 KASSERT(((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != 2775 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), 2776 ("invalid request %#x", req)); 2777 KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero")); 2778 2779 while ((m_ret = vm_page_find_contig_domain(domain, req, npages, 2780 low, high, alignment, boundary)) == NULL) { 2781 if (!vm_domain_alloc_fail(VM_DOMAIN(domain), NULL, req)) 2782 return (NULL); 2783 } 2784 2785 /* 2786 * Initialize the pages. Only the PG_ZERO flag is inherited. 2787 */ 2788 flags = PG_ZERO; 2789 if ((req & VM_ALLOC_NODUMP) != 0) 2790 flags |= PG_NODUMP; 2791 if ((req & VM_ALLOC_WIRED) != 0) 2792 vm_wire_add(npages); 2793 for (m = m_ret; m < &m_ret[npages]; m++) { 2794 vm_page_dequeue(m); 2795 vm_page_alloc_check(m); 2796 2797 /* 2798 * Consumers should not rely on a useful default pindex value. 2799 */ 2800 m->pindex = 0xdeadc0dedeadc0de; 2801 m->a.flags = 0; 2802 m->flags = (m->flags | PG_NODUMP) & flags; 2803 m->busy_lock = VPB_UNBUSIED; 2804 if ((req & VM_ALLOC_WIRED) != 0) 2805 m->ref_count = 1; 2806 m->a.act_count = 0; 2807 m->oflags = VPO_UNMANAGED; 2808 m->pool = VM_FREEPOOL_DIRECT; 2809 2810 /* 2811 * Zero the page before updating any mappings since the page is 2812 * not yet shared with any devices which might require the 2813 * non-default memory attribute. pmap_page_set_memattr() 2814 * flushes data caches before returning. 2815 */ 2816 if ((req & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0) 2817 pmap_zero_page(m); 2818 if (memattr != VM_MEMATTR_DEFAULT) 2819 pmap_page_set_memattr(m, memattr); 2820 } 2821 return (m_ret); 2822 } 2823 2824 /* 2825 * Check a page that has been freshly dequeued from a freelist. 2826 */ 2827 static void 2828 vm_page_alloc_check(vm_page_t m) 2829 { 2830 2831 KASSERT(m->object == NULL, ("page %p has object", m)); 2832 KASSERT(m->a.queue == PQ_NONE && 2833 (m->a.flags & PGA_QUEUE_STATE_MASK) == 0, 2834 ("page %p has unexpected queue %d, flags %#x", 2835 m, m->a.queue, (m->a.flags & PGA_QUEUE_STATE_MASK))); 2836 KASSERT(m->ref_count == 0, ("page %p has references", m)); 2837 KASSERT(vm_page_busy_freed(m), ("page %p is not freed", m)); 2838 KASSERT(m->dirty == 0, ("page %p is dirty", m)); 2839 KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, 2840 ("page %p has unexpected memattr %d", 2841 m, pmap_page_get_memattr(m))); 2842 KASSERT(vm_page_none_valid(m), ("free page %p is valid", m)); 2843 pmap_vm_page_alloc_check(m); 2844 } 2845 2846 static int 2847 vm_page_zone_import(void *arg, void **store, int cnt, int domain, int flags) 2848 { 2849 struct vm_domain *vmd; 2850 struct vm_pgcache *pgcache; 2851 int i; 2852 2853 pgcache = arg; 2854 vmd = VM_DOMAIN(pgcache->domain); 2855 2856 /* 2857 * The page daemon should avoid creating extra memory pressure since its 2858 * main purpose is to replenish the store of free pages. 2859 */ 2860 if (vmd->vmd_severeset || curproc == pageproc || 2861 !_vm_domain_allocate(vmd, VM_ALLOC_NORMAL, cnt)) 2862 return (0); 2863 domain = vmd->vmd_domain; 2864 vm_domain_free_lock(vmd); 2865 i = vm_phys_alloc_npages(domain, pgcache->pool, cnt, 2866 (vm_page_t *)store); 2867 vm_domain_free_unlock(vmd); 2868 if (cnt != i) 2869 vm_domain_freecnt_inc(vmd, cnt - i); 2870 2871 return (i); 2872 } 2873 2874 static void 2875 vm_page_zone_release(void *arg, void **store, int cnt) 2876 { 2877 struct vm_domain *vmd; 2878 struct vm_pgcache *pgcache; 2879 vm_page_t m; 2880 int i; 2881 2882 pgcache = arg; 2883 vmd = VM_DOMAIN(pgcache->domain); 2884 vm_domain_free_lock(vmd); 2885 for (i = 0; i < cnt; i++) { 2886 m = (vm_page_t)store[i]; 2887 vm_phys_free_pages(m, pgcache->pool, 0); 2888 } 2889 vm_domain_free_unlock(vmd); 2890 vm_domain_freecnt_inc(vmd, cnt); 2891 } 2892 2893 #define VPSC_ANY 0 /* No restrictions. */ 2894 #define VPSC_NORESERV 1 /* Skip reservations; implies VPSC_NOSUPER. */ 2895 #define VPSC_NOSUPER 2 /* Skip superpages. */ 2896 2897 /* 2898 * vm_page_scan_contig: 2899 * 2900 * Scan vm_page_array[] between the specified entries "m_start" and 2901 * "m_end" for a run of contiguous physical pages that satisfy the 2902 * specified conditions, and return the lowest page in the run. The 2903 * specified "alignment" determines the alignment of the lowest physical 2904 * page in the run. If the specified "boundary" is non-zero, then the 2905 * run of physical pages cannot span a physical address that is a 2906 * multiple of "boundary". 2907 * 2908 * "m_end" is never dereferenced, so it need not point to a vm_page 2909 * structure within vm_page_array[]. 2910 * 2911 * "npages" must be greater than zero. "m_start" and "m_end" must not 2912 * span a hole (or discontiguity) in the physical address space. Both 2913 * "alignment" and "boundary" must be a power of two. 2914 */ 2915 static vm_page_t 2916 vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end, 2917 u_long alignment, vm_paddr_t boundary, int options) 2918 { 2919 vm_object_t object; 2920 vm_paddr_t pa; 2921 vm_page_t m, m_run; 2922 #if VM_NRESERVLEVEL > 0 2923 int level; 2924 #endif 2925 int m_inc, order, run_ext, run_len; 2926 2927 KASSERT(npages > 0, ("npages is 0")); 2928 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 2929 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 2930 m_run = NULL; 2931 run_len = 0; 2932 for (m = m_start; m < m_end && run_len < npages; m += m_inc) { 2933 KASSERT((m->flags & PG_MARKER) == 0, 2934 ("page %p is PG_MARKER", m)); 2935 KASSERT((m->flags & PG_FICTITIOUS) == 0 || m->ref_count >= 1, 2936 ("fictitious page %p has invalid ref count", m)); 2937 2938 /* 2939 * If the current page would be the start of a run, check its 2940 * physical address against the end, alignment, and boundary 2941 * conditions. If it doesn't satisfy these conditions, either 2942 * terminate the scan or advance to the next page that 2943 * satisfies the failed condition. 2944 */ 2945 if (run_len == 0) { 2946 KASSERT(m_run == NULL, ("m_run != NULL")); 2947 if (m + npages > m_end) 2948 break; 2949 pa = VM_PAGE_TO_PHYS(m); 2950 if (!vm_addr_align_ok(pa, alignment)) { 2951 m_inc = atop(roundup2(pa, alignment) - pa); 2952 continue; 2953 } 2954 if (!vm_addr_bound_ok(pa, ptoa(npages), boundary)) { 2955 m_inc = atop(roundup2(pa, boundary) - pa); 2956 continue; 2957 } 2958 } else 2959 KASSERT(m_run != NULL, ("m_run == NULL")); 2960 2961 retry: 2962 m_inc = 1; 2963 if (vm_page_wired(m)) 2964 run_ext = 0; 2965 #if VM_NRESERVLEVEL > 0 2966 else if ((level = vm_reserv_level(m)) >= 0 && 2967 (options & VPSC_NORESERV) != 0) { 2968 run_ext = 0; 2969 /* Advance to the end of the reservation. */ 2970 pa = VM_PAGE_TO_PHYS(m); 2971 m_inc = atop(roundup2(pa + 1, vm_reserv_size(level)) - 2972 pa); 2973 } 2974 #endif 2975 else if ((object = atomic_load_ptr(&m->object)) != NULL) { 2976 /* 2977 * The page is considered eligible for relocation if 2978 * and only if it could be laundered or reclaimed by 2979 * the page daemon. 2980 */ 2981 VM_OBJECT_RLOCK(object); 2982 if (object != m->object) { 2983 VM_OBJECT_RUNLOCK(object); 2984 goto retry; 2985 } 2986 /* Don't care: PG_NODUMP, PG_ZERO. */ 2987 if ((object->flags & OBJ_SWAP) == 0 && 2988 object->type != OBJT_VNODE) { 2989 run_ext = 0; 2990 #if VM_NRESERVLEVEL > 0 2991 } else if ((options & VPSC_NOSUPER) != 0 && 2992 (level = vm_reserv_level_iffullpop(m)) >= 0) { 2993 run_ext = 0; 2994 /* Advance to the end of the superpage. */ 2995 pa = VM_PAGE_TO_PHYS(m); 2996 m_inc = atop(roundup2(pa + 1, 2997 vm_reserv_size(level)) - pa); 2998 #endif 2999 } else if (object->memattr == VM_MEMATTR_DEFAULT && 3000 vm_page_queue(m) != PQ_NONE && !vm_page_busied(m)) { 3001 /* 3002 * The page is allocated but eligible for 3003 * relocation. Extend the current run by one 3004 * page. 3005 */ 3006 KASSERT(pmap_page_get_memattr(m) == 3007 VM_MEMATTR_DEFAULT, 3008 ("page %p has an unexpected memattr", m)); 3009 KASSERT((m->oflags & (VPO_SWAPINPROG | 3010 VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0, 3011 ("page %p has unexpected oflags", m)); 3012 /* Don't care: PGA_NOSYNC. */ 3013 run_ext = 1; 3014 } else 3015 run_ext = 0; 3016 VM_OBJECT_RUNLOCK(object); 3017 #if VM_NRESERVLEVEL > 0 3018 } else if (level >= 0) { 3019 /* 3020 * The page is reserved but not yet allocated. In 3021 * other words, it is still free. Extend the current 3022 * run by one page. 3023 */ 3024 run_ext = 1; 3025 #endif 3026 } else if ((order = m->order) < VM_NFREEORDER) { 3027 /* 3028 * The page is enqueued in the physical memory 3029 * allocator's free page queues. Moreover, it is the 3030 * first page in a power-of-two-sized run of 3031 * contiguous free pages. Add these pages to the end 3032 * of the current run, and jump ahead. 3033 */ 3034 run_ext = 1 << order; 3035 m_inc = 1 << order; 3036 } else { 3037 /* 3038 * Skip the page for one of the following reasons: (1) 3039 * It is enqueued in the physical memory allocator's 3040 * free page queues. However, it is not the first 3041 * page in a run of contiguous free pages. (This case 3042 * rarely occurs because the scan is performed in 3043 * ascending order.) (2) It is not reserved, and it is 3044 * transitioning from free to allocated. (Conversely, 3045 * the transition from allocated to free for managed 3046 * pages is blocked by the page busy lock.) (3) It is 3047 * allocated but not contained by an object and not 3048 * wired, e.g., allocated by Xen's balloon driver. 3049 */ 3050 run_ext = 0; 3051 } 3052 3053 /* 3054 * Extend or reset the current run of pages. 3055 */ 3056 if (run_ext > 0) { 3057 if (run_len == 0) 3058 m_run = m; 3059 run_len += run_ext; 3060 } else { 3061 if (run_len > 0) { 3062 m_run = NULL; 3063 run_len = 0; 3064 } 3065 } 3066 } 3067 if (run_len >= npages) 3068 return (m_run); 3069 return (NULL); 3070 } 3071 3072 /* 3073 * vm_page_reclaim_run: 3074 * 3075 * Try to relocate each of the allocated virtual pages within the 3076 * specified run of physical pages to a new physical address. Free the 3077 * physical pages underlying the relocated virtual pages. A virtual page 3078 * is relocatable if and only if it could be laundered or reclaimed by 3079 * the page daemon. Whenever possible, a virtual page is relocated to a 3080 * physical address above "high". 3081 * 3082 * Returns 0 if every physical page within the run was already free or 3083 * just freed by a successful relocation. Otherwise, returns a non-zero 3084 * value indicating why the last attempt to relocate a virtual page was 3085 * unsuccessful. 3086 * 3087 * "req_class" must be an allocation class. 3088 */ 3089 static int 3090 vm_page_reclaim_run(int req_class, int domain, u_long npages, vm_page_t m_run, 3091 vm_paddr_t high) 3092 { 3093 struct vm_domain *vmd; 3094 struct spglist free; 3095 vm_object_t object; 3096 vm_paddr_t pa; 3097 vm_page_t m, m_end, m_new; 3098 int error, order, req; 3099 3100 KASSERT((req_class & VM_ALLOC_CLASS_MASK) == req_class, 3101 ("req_class is not an allocation class")); 3102 SLIST_INIT(&free); 3103 error = 0; 3104 m = m_run; 3105 m_end = m_run + npages; 3106 for (; error == 0 && m < m_end; m++) { 3107 KASSERT((m->flags & (PG_FICTITIOUS | PG_MARKER)) == 0, 3108 ("page %p is PG_FICTITIOUS or PG_MARKER", m)); 3109 3110 /* 3111 * Racily check for wirings. Races are handled once the object 3112 * lock is held and the page is unmapped. 3113 */ 3114 if (vm_page_wired(m)) 3115 error = EBUSY; 3116 else if ((object = atomic_load_ptr(&m->object)) != NULL) { 3117 /* 3118 * The page is relocated if and only if it could be 3119 * laundered or reclaimed by the page daemon. 3120 */ 3121 VM_OBJECT_WLOCK(object); 3122 /* Don't care: PG_NODUMP, PG_ZERO. */ 3123 if (m->object != object || 3124 ((object->flags & OBJ_SWAP) == 0 && 3125 object->type != OBJT_VNODE)) 3126 error = EINVAL; 3127 else if (object->memattr != VM_MEMATTR_DEFAULT) 3128 error = EINVAL; 3129 else if (vm_page_queue(m) != PQ_NONE && 3130 vm_page_tryxbusy(m) != 0) { 3131 if (vm_page_wired(m)) { 3132 vm_page_xunbusy(m); 3133 error = EBUSY; 3134 goto unlock; 3135 } 3136 KASSERT(pmap_page_get_memattr(m) == 3137 VM_MEMATTR_DEFAULT, 3138 ("page %p has an unexpected memattr", m)); 3139 KASSERT(m->oflags == 0, 3140 ("page %p has unexpected oflags", m)); 3141 /* Don't care: PGA_NOSYNC. */ 3142 if (!vm_page_none_valid(m)) { 3143 /* 3144 * First, try to allocate a new page 3145 * that is above "high". Failing 3146 * that, try to allocate a new page 3147 * that is below "m_run". Allocate 3148 * the new page between the end of 3149 * "m_run" and "high" only as a last 3150 * resort. 3151 */ 3152 req = req_class; 3153 if ((m->flags & PG_NODUMP) != 0) 3154 req |= VM_ALLOC_NODUMP; 3155 if (trunc_page(high) != 3156 ~(vm_paddr_t)PAGE_MASK) { 3157 m_new = 3158 vm_page_alloc_noobj_contig( 3159 req, 1, round_page(high), 3160 ~(vm_paddr_t)0, PAGE_SIZE, 3161 0, VM_MEMATTR_DEFAULT); 3162 } else 3163 m_new = NULL; 3164 if (m_new == NULL) { 3165 pa = VM_PAGE_TO_PHYS(m_run); 3166 m_new = 3167 vm_page_alloc_noobj_contig( 3168 req, 1, 0, pa - 1, 3169 PAGE_SIZE, 0, 3170 VM_MEMATTR_DEFAULT); 3171 } 3172 if (m_new == NULL) { 3173 pa += ptoa(npages); 3174 m_new = 3175 vm_page_alloc_noobj_contig( 3176 req, 1, pa, high, PAGE_SIZE, 3177 0, VM_MEMATTR_DEFAULT); 3178 } 3179 if (m_new == NULL) { 3180 vm_page_xunbusy(m); 3181 error = ENOMEM; 3182 goto unlock; 3183 } 3184 3185 /* 3186 * Unmap the page and check for new 3187 * wirings that may have been acquired 3188 * through a pmap lookup. 3189 */ 3190 if (object->ref_count != 0 && 3191 !vm_page_try_remove_all(m)) { 3192 vm_page_xunbusy(m); 3193 vm_page_free(m_new); 3194 error = EBUSY; 3195 goto unlock; 3196 } 3197 3198 /* 3199 * Replace "m" with the new page. For 3200 * vm_page_replace(), "m" must be busy 3201 * and dequeued. Finally, change "m" 3202 * as if vm_page_free() was called. 3203 */ 3204 m_new->a.flags = m->a.flags & 3205 ~PGA_QUEUE_STATE_MASK; 3206 KASSERT(m_new->oflags == VPO_UNMANAGED, 3207 ("page %p is managed", m_new)); 3208 m_new->oflags = 0; 3209 pmap_copy_page(m, m_new); 3210 m_new->valid = m->valid; 3211 m_new->dirty = m->dirty; 3212 m->flags &= ~PG_ZERO; 3213 vm_page_dequeue(m); 3214 if (vm_page_replace_hold(m_new, object, 3215 m->pindex, m) && 3216 vm_page_free_prep(m)) 3217 SLIST_INSERT_HEAD(&free, m, 3218 plinks.s.ss); 3219 3220 /* 3221 * The new page must be deactivated 3222 * before the object is unlocked. 3223 */ 3224 vm_page_deactivate(m_new); 3225 } else { 3226 m->flags &= ~PG_ZERO; 3227 vm_page_dequeue(m); 3228 if (vm_page_free_prep(m)) 3229 SLIST_INSERT_HEAD(&free, m, 3230 plinks.s.ss); 3231 KASSERT(m->dirty == 0, 3232 ("page %p is dirty", m)); 3233 } 3234 } else 3235 error = EBUSY; 3236 unlock: 3237 VM_OBJECT_WUNLOCK(object); 3238 } else { 3239 MPASS(vm_page_domain(m) == domain); 3240 vmd = VM_DOMAIN(domain); 3241 vm_domain_free_lock(vmd); 3242 order = m->order; 3243 if (order < VM_NFREEORDER) { 3244 /* 3245 * The page is enqueued in the physical memory 3246 * allocator's free page queues. Moreover, it 3247 * is the first page in a power-of-two-sized 3248 * run of contiguous free pages. Jump ahead 3249 * to the last page within that run, and 3250 * continue from there. 3251 */ 3252 m += (1 << order) - 1; 3253 } 3254 #if VM_NRESERVLEVEL > 0 3255 else if (vm_reserv_is_page_free(m)) 3256 order = 0; 3257 #endif 3258 vm_domain_free_unlock(vmd); 3259 if (order == VM_NFREEORDER) 3260 error = EINVAL; 3261 } 3262 } 3263 if ((m = SLIST_FIRST(&free)) != NULL) { 3264 int cnt; 3265 3266 vmd = VM_DOMAIN(domain); 3267 cnt = 0; 3268 vm_domain_free_lock(vmd); 3269 do { 3270 MPASS(vm_page_domain(m) == domain); 3271 SLIST_REMOVE_HEAD(&free, plinks.s.ss); 3272 vm_phys_free_pages(m, m->pool, 0); 3273 cnt++; 3274 } while ((m = SLIST_FIRST(&free)) != NULL); 3275 vm_domain_free_unlock(vmd); 3276 vm_domain_freecnt_inc(vmd, cnt); 3277 } 3278 return (error); 3279 } 3280 3281 #define NRUNS 16 3282 3283 #define RUN_INDEX(count, nruns) ((count) % (nruns)) 3284 3285 #define MIN_RECLAIM 8 3286 3287 /* 3288 * vm_page_reclaim_contig: 3289 * 3290 * Reclaim allocated, contiguous physical memory satisfying the specified 3291 * conditions by relocating the virtual pages using that physical memory. 3292 * Returns 0 if reclamation is successful, ERANGE if the specified domain 3293 * can't possibly satisfy the reclamation request, or ENOMEM if not 3294 * currently able to reclaim the requested number of pages. Since 3295 * relocation requires the allocation of physical pages, reclamation may 3296 * fail with ENOMEM due to a shortage of free pages. When reclamation 3297 * fails in this manner, callers are expected to perform vm_wait() before 3298 * retrying a failed allocation operation, e.g., vm_page_alloc_contig(). 3299 * 3300 * The caller must always specify an allocation class through "req". 3301 * 3302 * allocation classes: 3303 * VM_ALLOC_NORMAL normal process request 3304 * VM_ALLOC_SYSTEM system *really* needs a page 3305 * VM_ALLOC_INTERRUPT interrupt time request 3306 * 3307 * The optional allocation flags are ignored. 3308 * 3309 * "npages" must be greater than zero. Both "alignment" and "boundary" 3310 * must be a power of two. 3311 */ 3312 int 3313 vm_page_reclaim_contig_domain_ext(int domain, int req, u_long npages, 3314 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, 3315 int desired_runs) 3316 { 3317 struct vm_domain *vmd; 3318 vm_page_t bounds[2], m_run, _m_runs[NRUNS], *m_runs; 3319 u_long count, minalign, reclaimed; 3320 int error, i, min_reclaim, nruns, options, req_class; 3321 int segind, start_segind; 3322 int ret; 3323 3324 KASSERT(npages > 0, ("npages is 0")); 3325 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 3326 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 3327 3328 ret = ENOMEM; 3329 3330 /* 3331 * If the caller wants to reclaim multiple runs, try to allocate 3332 * space to store the runs. If that fails, fall back to the old 3333 * behavior of just reclaiming MIN_RECLAIM pages. 3334 */ 3335 if (desired_runs > 1) 3336 m_runs = malloc((NRUNS + desired_runs) * sizeof(*m_runs), 3337 M_TEMP, M_NOWAIT); 3338 else 3339 m_runs = NULL; 3340 3341 if (m_runs == NULL) { 3342 m_runs = _m_runs; 3343 nruns = NRUNS; 3344 } else { 3345 nruns = NRUNS + desired_runs - 1; 3346 } 3347 min_reclaim = MAX(desired_runs * npages, MIN_RECLAIM); 3348 3349 /* 3350 * The caller will attempt an allocation after some runs have been 3351 * reclaimed and added to the vm_phys buddy lists. Due to limitations 3352 * of vm_phys_alloc_contig(), round up the requested length to the next 3353 * power of two or maximum chunk size, and ensure that each run is 3354 * suitably aligned. 3355 */ 3356 minalign = 1ul << imin(flsl(npages - 1), VM_NFREEORDER - 1); 3357 npages = roundup2(npages, minalign); 3358 if (alignment < ptoa(minalign)) 3359 alignment = ptoa(minalign); 3360 3361 /* 3362 * The page daemon is allowed to dig deeper into the free page list. 3363 */ 3364 req_class = req & VM_ALLOC_CLASS_MASK; 3365 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) 3366 req_class = VM_ALLOC_SYSTEM; 3367 3368 start_segind = vm_phys_lookup_segind(low); 3369 3370 /* 3371 * Return if the number of free pages cannot satisfy the requested 3372 * allocation. 3373 */ 3374 vmd = VM_DOMAIN(domain); 3375 count = vmd->vmd_free_count; 3376 if (count < npages + vmd->vmd_free_reserved || (count < npages + 3377 vmd->vmd_interrupt_free_min && req_class == VM_ALLOC_SYSTEM) || 3378 (count < npages && req_class == VM_ALLOC_INTERRUPT)) 3379 goto done; 3380 3381 /* 3382 * Scan up to three times, relaxing the restrictions ("options") on 3383 * the reclamation of reservations and superpages each time. 3384 */ 3385 for (options = VPSC_NORESERV;;) { 3386 bool phys_range_exists = false; 3387 3388 /* 3389 * Find the highest runs that satisfy the given constraints 3390 * and restrictions, and record them in "m_runs". 3391 */ 3392 count = 0; 3393 segind = start_segind; 3394 while ((segind = vm_phys_find_range(bounds, segind, domain, 3395 npages, low, high)) != -1) { 3396 phys_range_exists = true; 3397 while ((m_run = vm_page_scan_contig(npages, bounds[0], 3398 bounds[1], alignment, boundary, options))) { 3399 bounds[0] = m_run + npages; 3400 m_runs[RUN_INDEX(count, nruns)] = m_run; 3401 count++; 3402 } 3403 segind++; 3404 } 3405 3406 if (!phys_range_exists) { 3407 ret = ERANGE; 3408 goto done; 3409 } 3410 3411 /* 3412 * Reclaim the highest runs in LIFO (descending) order until 3413 * the number of reclaimed pages, "reclaimed", is at least 3414 * "min_reclaim". Reset "reclaimed" each time because each 3415 * reclamation is idempotent, and runs will (likely) recur 3416 * from one scan to the next as restrictions are relaxed. 3417 */ 3418 reclaimed = 0; 3419 for (i = 0; count > 0 && i < nruns; i++) { 3420 count--; 3421 m_run = m_runs[RUN_INDEX(count, nruns)]; 3422 error = vm_page_reclaim_run(req_class, domain, npages, 3423 m_run, high); 3424 if (error == 0) { 3425 reclaimed += npages; 3426 if (reclaimed >= min_reclaim) { 3427 ret = 0; 3428 goto done; 3429 } 3430 } 3431 } 3432 3433 /* 3434 * Either relax the restrictions on the next scan or return if 3435 * the last scan had no restrictions. 3436 */ 3437 if (options == VPSC_NORESERV) 3438 options = VPSC_NOSUPER; 3439 else if (options == VPSC_NOSUPER) 3440 options = VPSC_ANY; 3441 else if (options == VPSC_ANY) { 3442 if (reclaimed != 0) 3443 ret = 0; 3444 goto done; 3445 } 3446 } 3447 done: 3448 if (m_runs != _m_runs) 3449 free(m_runs, M_TEMP); 3450 return (ret); 3451 } 3452 3453 int 3454 vm_page_reclaim_contig_domain(int domain, int req, u_long npages, 3455 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary) 3456 { 3457 return (vm_page_reclaim_contig_domain_ext(domain, req, npages, low, high, 3458 alignment, boundary, 1)); 3459 } 3460 3461 int 3462 vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high, 3463 u_long alignment, vm_paddr_t boundary) 3464 { 3465 struct vm_domainset_iter di; 3466 int domain, ret, status; 3467 3468 ret = ERANGE; 3469 3470 vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req); 3471 do { 3472 status = vm_page_reclaim_contig_domain(domain, req, npages, low, 3473 high, alignment, boundary); 3474 if (status == 0) 3475 return (0); 3476 else if (status == ERANGE) 3477 vm_domainset_iter_ignore(&di, domain); 3478 else { 3479 KASSERT(status == ENOMEM, ("Unrecognized error %d " 3480 "from vm_page_reclaim_contig_domain()", status)); 3481 ret = ENOMEM; 3482 } 3483 } while (vm_domainset_iter_page(&di, NULL, &domain) == 0); 3484 3485 return (ret); 3486 } 3487 3488 /* 3489 * Set the domain in the appropriate page level domainset. 3490 */ 3491 void 3492 vm_domain_set(struct vm_domain *vmd) 3493 { 3494 3495 mtx_lock(&vm_domainset_lock); 3496 if (!vmd->vmd_minset && vm_paging_min(vmd)) { 3497 vmd->vmd_minset = 1; 3498 DOMAINSET_SET(vmd->vmd_domain, &vm_min_domains); 3499 } 3500 if (!vmd->vmd_severeset && vm_paging_severe(vmd)) { 3501 vmd->vmd_severeset = 1; 3502 DOMAINSET_SET(vmd->vmd_domain, &vm_severe_domains); 3503 } 3504 mtx_unlock(&vm_domainset_lock); 3505 } 3506 3507 /* 3508 * Clear the domain from the appropriate page level domainset. 3509 */ 3510 void 3511 vm_domain_clear(struct vm_domain *vmd) 3512 { 3513 3514 mtx_lock(&vm_domainset_lock); 3515 if (vmd->vmd_minset && !vm_paging_min(vmd)) { 3516 vmd->vmd_minset = 0; 3517 DOMAINSET_CLR(vmd->vmd_domain, &vm_min_domains); 3518 if (vm_min_waiters != 0) { 3519 vm_min_waiters = 0; 3520 wakeup(&vm_min_domains); 3521 } 3522 } 3523 if (vmd->vmd_severeset && !vm_paging_severe(vmd)) { 3524 vmd->vmd_severeset = 0; 3525 DOMAINSET_CLR(vmd->vmd_domain, &vm_severe_domains); 3526 if (vm_severe_waiters != 0) { 3527 vm_severe_waiters = 0; 3528 wakeup(&vm_severe_domains); 3529 } 3530 } 3531 3532 /* 3533 * If pageout daemon needs pages, then tell it that there are 3534 * some free. 3535 */ 3536 if (vmd->vmd_pageout_pages_needed && 3537 vmd->vmd_free_count >= vmd->vmd_pageout_free_min) { 3538 wakeup(&vmd->vmd_pageout_pages_needed); 3539 vmd->vmd_pageout_pages_needed = 0; 3540 } 3541 3542 /* See comments in vm_wait_doms(). */ 3543 if (vm_pageproc_waiters) { 3544 vm_pageproc_waiters = 0; 3545 wakeup(&vm_pageproc_waiters); 3546 } 3547 mtx_unlock(&vm_domainset_lock); 3548 } 3549 3550 /* 3551 * Wait for free pages to exceed the min threshold globally. 3552 */ 3553 void 3554 vm_wait_min(void) 3555 { 3556 3557 mtx_lock(&vm_domainset_lock); 3558 while (vm_page_count_min()) { 3559 vm_min_waiters++; 3560 msleep(&vm_min_domains, &vm_domainset_lock, PVM, "vmwait", 0); 3561 } 3562 mtx_unlock(&vm_domainset_lock); 3563 } 3564 3565 /* 3566 * Wait for free pages to exceed the severe threshold globally. 3567 */ 3568 void 3569 vm_wait_severe(void) 3570 { 3571 3572 mtx_lock(&vm_domainset_lock); 3573 while (vm_page_count_severe()) { 3574 vm_severe_waiters++; 3575 msleep(&vm_severe_domains, &vm_domainset_lock, PVM, 3576 "vmwait", 0); 3577 } 3578 mtx_unlock(&vm_domainset_lock); 3579 } 3580 3581 u_int 3582 vm_wait_count(void) 3583 { 3584 3585 return (vm_severe_waiters + vm_min_waiters + vm_pageproc_waiters); 3586 } 3587 3588 int 3589 vm_wait_doms(const domainset_t *wdoms, int mflags) 3590 { 3591 int error; 3592 3593 error = 0; 3594 3595 /* 3596 * We use racey wakeup synchronization to avoid expensive global 3597 * locking for the pageproc when sleeping with a non-specific vm_wait. 3598 * To handle this, we only sleep for one tick in this instance. It 3599 * is expected that most allocations for the pageproc will come from 3600 * kmem or vm_page_grab* which will use the more specific and 3601 * race-free vm_wait_domain(). 3602 */ 3603 if (curproc == pageproc) { 3604 mtx_lock(&vm_domainset_lock); 3605 vm_pageproc_waiters++; 3606 error = msleep(&vm_pageproc_waiters, &vm_domainset_lock, 3607 PVM | PDROP | mflags, "pageprocwait", 1); 3608 } else { 3609 /* 3610 * XXX Ideally we would wait only until the allocation could 3611 * be satisfied. This condition can cause new allocators to 3612 * consume all freed pages while old allocators wait. 3613 */ 3614 mtx_lock(&vm_domainset_lock); 3615 if (vm_page_count_min_set(wdoms)) { 3616 if (pageproc == NULL) 3617 panic("vm_wait in early boot"); 3618 vm_min_waiters++; 3619 error = msleep(&vm_min_domains, &vm_domainset_lock, 3620 PVM | PDROP | mflags, "vmwait", 0); 3621 } else 3622 mtx_unlock(&vm_domainset_lock); 3623 } 3624 return (error); 3625 } 3626 3627 /* 3628 * vm_wait_domain: 3629 * 3630 * Sleep until free pages are available for allocation. 3631 * - Called in various places after failed memory allocations. 3632 */ 3633 void 3634 vm_wait_domain(int domain) 3635 { 3636 struct vm_domain *vmd; 3637 domainset_t wdom; 3638 3639 vmd = VM_DOMAIN(domain); 3640 vm_domain_free_assert_unlocked(vmd); 3641 3642 if (curproc == pageproc) { 3643 mtx_lock(&vm_domainset_lock); 3644 if (vmd->vmd_free_count < vmd->vmd_pageout_free_min) { 3645 vmd->vmd_pageout_pages_needed = 1; 3646 msleep(&vmd->vmd_pageout_pages_needed, 3647 &vm_domainset_lock, PDROP | PSWP, "VMWait", 0); 3648 } else 3649 mtx_unlock(&vm_domainset_lock); 3650 } else { 3651 DOMAINSET_ZERO(&wdom); 3652 DOMAINSET_SET(vmd->vmd_domain, &wdom); 3653 vm_wait_doms(&wdom, 0); 3654 } 3655 } 3656 3657 static int 3658 vm_wait_flags(vm_object_t obj, int mflags) 3659 { 3660 struct domainset *d; 3661 3662 d = NULL; 3663 3664 /* 3665 * Carefully fetch pointers only once: the struct domainset 3666 * itself is ummutable but the pointer might change. 3667 */ 3668 if (obj != NULL) 3669 d = obj->domain.dr_policy; 3670 if (d == NULL) 3671 d = curthread->td_domain.dr_policy; 3672 3673 return (vm_wait_doms(&d->ds_mask, mflags)); 3674 } 3675 3676 /* 3677 * vm_wait: 3678 * 3679 * Sleep until free pages are available for allocation in the 3680 * affinity domains of the obj. If obj is NULL, the domain set 3681 * for the calling thread is used. 3682 * Called in various places after failed memory allocations. 3683 */ 3684 void 3685 vm_wait(vm_object_t obj) 3686 { 3687 (void)vm_wait_flags(obj, 0); 3688 } 3689 3690 int 3691 vm_wait_intr(vm_object_t obj) 3692 { 3693 return (vm_wait_flags(obj, PCATCH)); 3694 } 3695 3696 /* 3697 * vm_domain_alloc_fail: 3698 * 3699 * Called when a page allocation function fails. Informs the 3700 * pagedaemon and performs the requested wait. Requires the 3701 * domain_free and object lock on entry. Returns with the 3702 * object lock held and free lock released. Returns an error when 3703 * retry is necessary. 3704 * 3705 */ 3706 static int 3707 vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, int req) 3708 { 3709 3710 vm_domain_free_assert_unlocked(vmd); 3711 3712 atomic_add_int(&vmd->vmd_pageout_deficit, 3713 max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1)); 3714 if (req & (VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) { 3715 if (object != NULL) 3716 VM_OBJECT_WUNLOCK(object); 3717 vm_wait_domain(vmd->vmd_domain); 3718 if (object != NULL) 3719 VM_OBJECT_WLOCK(object); 3720 if (req & VM_ALLOC_WAITOK) 3721 return (EAGAIN); 3722 } 3723 3724 return (0); 3725 } 3726 3727 /* 3728 * vm_waitpfault: 3729 * 3730 * Sleep until free pages are available for allocation. 3731 * - Called only in vm_fault so that processes page faulting 3732 * can be easily tracked. 3733 * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing 3734 * processes will be able to grab memory first. Do not change 3735 * this balance without careful testing first. 3736 */ 3737 void 3738 vm_waitpfault(struct domainset *dset, int timo) 3739 { 3740 3741 /* 3742 * XXX Ideally we would wait only until the allocation could 3743 * be satisfied. This condition can cause new allocators to 3744 * consume all freed pages while old allocators wait. 3745 */ 3746 mtx_lock(&vm_domainset_lock); 3747 if (vm_page_count_min_set(&dset->ds_mask)) { 3748 vm_min_waiters++; 3749 msleep(&vm_min_domains, &vm_domainset_lock, PUSER | PDROP, 3750 "pfault", timo); 3751 } else 3752 mtx_unlock(&vm_domainset_lock); 3753 } 3754 3755 static struct vm_pagequeue * 3756 _vm_page_pagequeue(vm_page_t m, uint8_t queue) 3757 { 3758 3759 return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue]); 3760 } 3761 3762 #ifdef INVARIANTS 3763 static struct vm_pagequeue * 3764 vm_page_pagequeue(vm_page_t m) 3765 { 3766 3767 return (_vm_page_pagequeue(m, vm_page_astate_load(m).queue)); 3768 } 3769 #endif 3770 3771 static __always_inline bool 3772 vm_page_pqstate_fcmpset(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new) 3773 { 3774 vm_page_astate_t tmp; 3775 3776 tmp = *old; 3777 do { 3778 if (__predict_true(vm_page_astate_fcmpset(m, old, new))) 3779 return (true); 3780 counter_u64_add(pqstate_commit_retries, 1); 3781 } while (old->_bits == tmp._bits); 3782 3783 return (false); 3784 } 3785 3786 /* 3787 * Do the work of committing a queue state update that moves the page out of 3788 * its current queue. 3789 */ 3790 static bool 3791 _vm_page_pqstate_commit_dequeue(struct vm_pagequeue *pq, vm_page_t m, 3792 vm_page_astate_t *old, vm_page_astate_t new) 3793 { 3794 vm_page_t next; 3795 3796 vm_pagequeue_assert_locked(pq); 3797 KASSERT(vm_page_pagequeue(m) == pq, 3798 ("%s: queue %p does not match page %p", __func__, pq, m)); 3799 KASSERT(old->queue != PQ_NONE && new.queue != old->queue, 3800 ("%s: invalid queue indices %d %d", 3801 __func__, old->queue, new.queue)); 3802 3803 /* 3804 * Once the queue index of the page changes there is nothing 3805 * synchronizing with further updates to the page's physical 3806 * queue state. Therefore we must speculatively remove the page 3807 * from the queue now and be prepared to roll back if the queue 3808 * state update fails. If the page is not physically enqueued then 3809 * we just update its queue index. 3810 */ 3811 if ((old->flags & PGA_ENQUEUED) != 0) { 3812 new.flags &= ~PGA_ENQUEUED; 3813 next = TAILQ_NEXT(m, plinks.q); 3814 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 3815 vm_pagequeue_cnt_dec(pq); 3816 if (!vm_page_pqstate_fcmpset(m, old, new)) { 3817 if (next == NULL) 3818 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); 3819 else 3820 TAILQ_INSERT_BEFORE(next, m, plinks.q); 3821 vm_pagequeue_cnt_inc(pq); 3822 return (false); 3823 } else { 3824 return (true); 3825 } 3826 } else { 3827 return (vm_page_pqstate_fcmpset(m, old, new)); 3828 } 3829 } 3830 3831 static bool 3832 vm_page_pqstate_commit_dequeue(vm_page_t m, vm_page_astate_t *old, 3833 vm_page_astate_t new) 3834 { 3835 struct vm_pagequeue *pq; 3836 vm_page_astate_t as; 3837 bool ret; 3838 3839 pq = _vm_page_pagequeue(m, old->queue); 3840 3841 /* 3842 * The queue field and PGA_ENQUEUED flag are stable only so long as the 3843 * corresponding page queue lock is held. 3844 */ 3845 vm_pagequeue_lock(pq); 3846 as = vm_page_astate_load(m); 3847 if (__predict_false(as._bits != old->_bits)) { 3848 *old = as; 3849 ret = false; 3850 } else { 3851 ret = _vm_page_pqstate_commit_dequeue(pq, m, old, new); 3852 } 3853 vm_pagequeue_unlock(pq); 3854 return (ret); 3855 } 3856 3857 /* 3858 * Commit a queue state update that enqueues or requeues a page. 3859 */ 3860 static bool 3861 _vm_page_pqstate_commit_requeue(struct vm_pagequeue *pq, vm_page_t m, 3862 vm_page_astate_t *old, vm_page_astate_t new) 3863 { 3864 struct vm_domain *vmd; 3865 3866 vm_pagequeue_assert_locked(pq); 3867 KASSERT(old->queue != PQ_NONE && new.queue == old->queue, 3868 ("%s: invalid queue indices %d %d", 3869 __func__, old->queue, new.queue)); 3870 3871 new.flags |= PGA_ENQUEUED; 3872 if (!vm_page_pqstate_fcmpset(m, old, new)) 3873 return (false); 3874 3875 if ((old->flags & PGA_ENQUEUED) != 0) 3876 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 3877 else 3878 vm_pagequeue_cnt_inc(pq); 3879 3880 /* 3881 * Give PGA_REQUEUE_HEAD precedence over PGA_REQUEUE. In particular, if 3882 * both flags are set in close succession, only PGA_REQUEUE_HEAD will be 3883 * applied, even if it was set first. 3884 */ 3885 if ((old->flags & PGA_REQUEUE_HEAD) != 0) { 3886 vmd = vm_pagequeue_domain(m); 3887 KASSERT(pq == &vmd->vmd_pagequeues[PQ_INACTIVE], 3888 ("%s: invalid page queue for page %p", __func__, m)); 3889 TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q); 3890 } else { 3891 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); 3892 } 3893 return (true); 3894 } 3895 3896 /* 3897 * Commit a queue state update that encodes a request for a deferred queue 3898 * operation. 3899 */ 3900 static bool 3901 vm_page_pqstate_commit_request(vm_page_t m, vm_page_astate_t *old, 3902 vm_page_astate_t new) 3903 { 3904 3905 KASSERT(old->queue == new.queue || new.queue != PQ_NONE, 3906 ("%s: invalid state, queue %d flags %x", 3907 __func__, new.queue, new.flags)); 3908 3909 if (old->_bits != new._bits && 3910 !vm_page_pqstate_fcmpset(m, old, new)) 3911 return (false); 3912 vm_page_pqbatch_submit(m, new.queue); 3913 return (true); 3914 } 3915 3916 /* 3917 * A generic queue state update function. This handles more cases than the 3918 * specialized functions above. 3919 */ 3920 bool 3921 vm_page_pqstate_commit(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new) 3922 { 3923 3924 if (old->_bits == new._bits) 3925 return (true); 3926 3927 if (old->queue != PQ_NONE && new.queue != old->queue) { 3928 if (!vm_page_pqstate_commit_dequeue(m, old, new)) 3929 return (false); 3930 if (new.queue != PQ_NONE) 3931 vm_page_pqbatch_submit(m, new.queue); 3932 } else { 3933 if (!vm_page_pqstate_fcmpset(m, old, new)) 3934 return (false); 3935 if (new.queue != PQ_NONE && 3936 ((new.flags & ~old->flags) & PGA_QUEUE_OP_MASK) != 0) 3937 vm_page_pqbatch_submit(m, new.queue); 3938 } 3939 return (true); 3940 } 3941 3942 /* 3943 * Apply deferred queue state updates to a page. 3944 */ 3945 static inline void 3946 vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_page_t m, uint8_t queue) 3947 { 3948 vm_page_astate_t new, old; 3949 3950 CRITICAL_ASSERT(curthread); 3951 vm_pagequeue_assert_locked(pq); 3952 KASSERT(queue < PQ_COUNT, 3953 ("%s: invalid queue index %d", __func__, queue)); 3954 KASSERT(pq == _vm_page_pagequeue(m, queue), 3955 ("%s: page %p does not belong to queue %p", __func__, m, pq)); 3956 3957 for (old = vm_page_astate_load(m);;) { 3958 if (__predict_false(old.queue != queue || 3959 (old.flags & PGA_QUEUE_OP_MASK) == 0)) { 3960 counter_u64_add(queue_nops, 1); 3961 break; 3962 } 3963 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3964 ("%s: page %p is unmanaged", __func__, m)); 3965 3966 new = old; 3967 if ((old.flags & PGA_DEQUEUE) != 0) { 3968 new.flags &= ~PGA_QUEUE_OP_MASK; 3969 new.queue = PQ_NONE; 3970 if (__predict_true(_vm_page_pqstate_commit_dequeue(pq, 3971 m, &old, new))) { 3972 counter_u64_add(queue_ops, 1); 3973 break; 3974 } 3975 } else { 3976 new.flags &= ~(PGA_REQUEUE | PGA_REQUEUE_HEAD); 3977 if (__predict_true(_vm_page_pqstate_commit_requeue(pq, 3978 m, &old, new))) { 3979 counter_u64_add(queue_ops, 1); 3980 break; 3981 } 3982 } 3983 } 3984 } 3985 3986 static void 3987 vm_pqbatch_process(struct vm_pagequeue *pq, struct vm_batchqueue *bq, 3988 uint8_t queue) 3989 { 3990 int i; 3991 3992 for (i = 0; i < bq->bq_cnt; i++) 3993 vm_pqbatch_process_page(pq, bq->bq_pa[i], queue); 3994 vm_batchqueue_init(bq); 3995 } 3996 3997 /* 3998 * vm_page_pqbatch_submit: [ internal use only ] 3999 * 4000 * Enqueue a page in the specified page queue's batched work queue. 4001 * The caller must have encoded the requested operation in the page 4002 * structure's a.flags field. 4003 */ 4004 void 4005 vm_page_pqbatch_submit(vm_page_t m, uint8_t queue) 4006 { 4007 struct vm_batchqueue *bq; 4008 struct vm_pagequeue *pq; 4009 int domain, slots_remaining; 4010 4011 KASSERT(queue < PQ_COUNT, ("invalid queue %d", queue)); 4012 4013 domain = vm_page_domain(m); 4014 critical_enter(); 4015 bq = DPCPU_PTR(pqbatch[domain][queue]); 4016 slots_remaining = vm_batchqueue_insert(bq, m); 4017 if (slots_remaining > (VM_BATCHQUEUE_SIZE >> 1)) { 4018 /* keep building the bq */ 4019 critical_exit(); 4020 return; 4021 } else if (slots_remaining > 0 ) { 4022 /* Try to process the bq if we can get the lock */ 4023 pq = &VM_DOMAIN(domain)->vmd_pagequeues[queue]; 4024 if (vm_pagequeue_trylock(pq)) { 4025 vm_pqbatch_process(pq, bq, queue); 4026 vm_pagequeue_unlock(pq); 4027 } 4028 critical_exit(); 4029 return; 4030 } 4031 critical_exit(); 4032 4033 /* if we make it here, the bq is full so wait for the lock */ 4034 4035 pq = &VM_DOMAIN(domain)->vmd_pagequeues[queue]; 4036 vm_pagequeue_lock(pq); 4037 critical_enter(); 4038 bq = DPCPU_PTR(pqbatch[domain][queue]); 4039 vm_pqbatch_process(pq, bq, queue); 4040 vm_pqbatch_process_page(pq, m, queue); 4041 vm_pagequeue_unlock(pq); 4042 critical_exit(); 4043 } 4044 4045 /* 4046 * vm_page_pqbatch_drain: [ internal use only ] 4047 * 4048 * Force all per-CPU page queue batch queues to be drained. This is 4049 * intended for use in severe memory shortages, to ensure that pages 4050 * do not remain stuck in the batch queues. 4051 */ 4052 void 4053 vm_page_pqbatch_drain(void) 4054 { 4055 struct thread *td; 4056 struct vm_domain *vmd; 4057 struct vm_pagequeue *pq; 4058 int cpu, domain, queue; 4059 4060 td = curthread; 4061 CPU_FOREACH(cpu) { 4062 thread_lock(td); 4063 sched_bind(td, cpu); 4064 thread_unlock(td); 4065 4066 for (domain = 0; domain < vm_ndomains; domain++) { 4067 vmd = VM_DOMAIN(domain); 4068 for (queue = 0; queue < PQ_COUNT; queue++) { 4069 pq = &vmd->vmd_pagequeues[queue]; 4070 vm_pagequeue_lock(pq); 4071 critical_enter(); 4072 vm_pqbatch_process(pq, 4073 DPCPU_PTR(pqbatch[domain][queue]), queue); 4074 critical_exit(); 4075 vm_pagequeue_unlock(pq); 4076 } 4077 } 4078 } 4079 thread_lock(td); 4080 sched_unbind(td); 4081 thread_unlock(td); 4082 } 4083 4084 /* 4085 * vm_page_dequeue_deferred: [ internal use only ] 4086 * 4087 * Request removal of the given page from its current page 4088 * queue. Physical removal from the queue may be deferred 4089 * indefinitely. 4090 */ 4091 void 4092 vm_page_dequeue_deferred(vm_page_t m) 4093 { 4094 vm_page_astate_t new, old; 4095 4096 old = vm_page_astate_load(m); 4097 do { 4098 if (old.queue == PQ_NONE) { 4099 KASSERT((old.flags & PGA_QUEUE_STATE_MASK) == 0, 4100 ("%s: page %p has unexpected queue state", 4101 __func__, m)); 4102 break; 4103 } 4104 new = old; 4105 new.flags |= PGA_DEQUEUE; 4106 } while (!vm_page_pqstate_commit_request(m, &old, new)); 4107 } 4108 4109 /* 4110 * vm_page_dequeue: 4111 * 4112 * Remove the page from whichever page queue it's in, if any, before 4113 * returning. 4114 */ 4115 void 4116 vm_page_dequeue(vm_page_t m) 4117 { 4118 vm_page_astate_t new, old; 4119 4120 old = vm_page_astate_load(m); 4121 do { 4122 if (old.queue == PQ_NONE) { 4123 KASSERT((old.flags & PGA_QUEUE_STATE_MASK) == 0, 4124 ("%s: page %p has unexpected queue state", 4125 __func__, m)); 4126 break; 4127 } 4128 new = old; 4129 new.flags &= ~PGA_QUEUE_OP_MASK; 4130 new.queue = PQ_NONE; 4131 } while (!vm_page_pqstate_commit_dequeue(m, &old, new)); 4132 4133 } 4134 4135 /* 4136 * Schedule the given page for insertion into the specified page queue. 4137 * Physical insertion of the page may be deferred indefinitely. 4138 */ 4139 static void 4140 vm_page_enqueue(vm_page_t m, uint8_t queue) 4141 { 4142 4143 KASSERT(m->a.queue == PQ_NONE && 4144 (m->a.flags & PGA_QUEUE_STATE_MASK) == 0, 4145 ("%s: page %p is already enqueued", __func__, m)); 4146 KASSERT(m->ref_count > 0, 4147 ("%s: page %p does not carry any references", __func__, m)); 4148 4149 m->a.queue = queue; 4150 if ((m->a.flags & PGA_REQUEUE) == 0) 4151 vm_page_aflag_set(m, PGA_REQUEUE); 4152 vm_page_pqbatch_submit(m, queue); 4153 } 4154 4155 /* 4156 * vm_page_free_prep: 4157 * 4158 * Prepares the given page to be put on the free list, 4159 * disassociating it from any VM object. The caller may return 4160 * the page to the free list only if this function returns true. 4161 * 4162 * The object, if it exists, must be locked, and then the page must 4163 * be xbusy. Otherwise the page must be not busied. A managed 4164 * page must be unmapped. 4165 */ 4166 static bool 4167 vm_page_free_prep(vm_page_t m) 4168 { 4169 4170 /* 4171 * Synchronize with threads that have dropped a reference to this 4172 * page. 4173 */ 4174 atomic_thread_fence_acq(); 4175 4176 #if defined(DIAGNOSTIC) && defined(PHYS_TO_DMAP) 4177 if (PMAP_HAS_DMAP && (m->flags & PG_ZERO) != 0) { 4178 uint64_t *p; 4179 int i; 4180 p = (uint64_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 4181 for (i = 0; i < PAGE_SIZE / sizeof(uint64_t); i++, p++) 4182 KASSERT(*p == 0, ("vm_page_free_prep %p PG_ZERO %d %jx", 4183 m, i, (uintmax_t)*p)); 4184 } 4185 #endif 4186 KASSERT((m->flags & PG_NOFREE) == 0, 4187 ("%s: attempting to free a PG_NOFREE page", __func__)); 4188 if ((m->oflags & VPO_UNMANAGED) == 0) { 4189 KASSERT(!pmap_page_is_mapped(m), 4190 ("vm_page_free_prep: freeing mapped page %p", m)); 4191 KASSERT((m->a.flags & (PGA_EXECUTABLE | PGA_WRITEABLE)) == 0, 4192 ("vm_page_free_prep: mapping flags set in page %p", m)); 4193 } else { 4194 KASSERT(m->a.queue == PQ_NONE, 4195 ("vm_page_free_prep: unmanaged page %p is queued", m)); 4196 } 4197 VM_CNT_INC(v_tfree); 4198 4199 if (m->object != NULL) { 4200 vm_page_radix_remove(m); 4201 vm_page_free_object_prep(m); 4202 } else 4203 vm_page_assert_unbusied(m); 4204 4205 vm_page_busy_free(m); 4206 4207 /* 4208 * If fictitious remove object association and 4209 * return. 4210 */ 4211 if ((m->flags & PG_FICTITIOUS) != 0) { 4212 KASSERT(m->ref_count == 1, 4213 ("fictitious page %p is referenced", m)); 4214 KASSERT(m->a.queue == PQ_NONE, 4215 ("fictitious page %p is queued", m)); 4216 return (false); 4217 } 4218 4219 /* 4220 * Pages need not be dequeued before they are returned to the physical 4221 * memory allocator, but they must at least be marked for a deferred 4222 * dequeue. 4223 */ 4224 if ((m->oflags & VPO_UNMANAGED) == 0) 4225 vm_page_dequeue_deferred(m); 4226 4227 m->valid = 0; 4228 vm_page_undirty(m); 4229 4230 if (m->ref_count != 0) 4231 panic("vm_page_free_prep: page %p has references", m); 4232 4233 /* 4234 * Restore the default memory attribute to the page. 4235 */ 4236 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) 4237 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); 4238 4239 #if VM_NRESERVLEVEL > 0 4240 /* 4241 * Determine whether the page belongs to a reservation. If the page was 4242 * allocated from a per-CPU cache, it cannot belong to a reservation, so 4243 * as an optimization, we avoid the check in that case. 4244 */ 4245 if ((m->flags & PG_PCPU_CACHE) == 0 && vm_reserv_free_page(m)) 4246 return (false); 4247 #endif 4248 4249 return (true); 4250 } 4251 4252 /* 4253 * vm_page_free_toq: 4254 * 4255 * Returns the given page to the free list, disassociating it 4256 * from any VM object. 4257 * 4258 * The object must be locked. The page must be exclusively busied if it 4259 * belongs to an object. 4260 */ 4261 static void 4262 vm_page_free_toq(vm_page_t m) 4263 { 4264 struct vm_domain *vmd; 4265 uma_zone_t zone; 4266 4267 if (!vm_page_free_prep(m)) 4268 return; 4269 4270 vmd = vm_pagequeue_domain(m); 4271 zone = vmd->vmd_pgcache[m->pool].zone; 4272 if ((m->flags & PG_PCPU_CACHE) != 0 && zone != NULL) { 4273 uma_zfree(zone, m); 4274 return; 4275 } 4276 vm_domain_free_lock(vmd); 4277 vm_phys_free_pages(m, m->pool, 0); 4278 vm_domain_free_unlock(vmd); 4279 vm_domain_freecnt_inc(vmd, 1); 4280 } 4281 4282 /* 4283 * vm_page_free_pages_toq: 4284 * 4285 * Returns a list of pages to the free list, disassociating it 4286 * from any VM object. In other words, this is equivalent to 4287 * calling vm_page_free_toq() for each page of a list of VM objects. 4288 */ 4289 int 4290 vm_page_free_pages_toq(struct spglist *free, bool update_wire_count) 4291 { 4292 vm_page_t m; 4293 int count; 4294 4295 if (SLIST_EMPTY(free)) 4296 return (0); 4297 4298 count = 0; 4299 while ((m = SLIST_FIRST(free)) != NULL) { 4300 count++; 4301 SLIST_REMOVE_HEAD(free, plinks.s.ss); 4302 vm_page_free_toq(m); 4303 } 4304 4305 if (update_wire_count) 4306 vm_wire_sub(count); 4307 return (count); 4308 } 4309 4310 /* 4311 * Mark this page as wired down. For managed pages, this prevents reclamation 4312 * by the page daemon, or when the containing object, if any, is destroyed. 4313 */ 4314 void 4315 vm_page_wire(vm_page_t m) 4316 { 4317 u_int old; 4318 4319 #ifdef INVARIANTS 4320 if (m->object != NULL && !vm_page_busied(m) && 4321 !vm_object_busied(m->object)) 4322 VM_OBJECT_ASSERT_LOCKED(m->object); 4323 #endif 4324 KASSERT((m->flags & PG_FICTITIOUS) == 0 || 4325 VPRC_WIRE_COUNT(m->ref_count) >= 1, 4326 ("vm_page_wire: fictitious page %p has zero wirings", m)); 4327 4328 old = atomic_fetchadd_int(&m->ref_count, 1); 4329 KASSERT(VPRC_WIRE_COUNT(old) != VPRC_WIRE_COUNT_MAX, 4330 ("vm_page_wire: counter overflow for page %p", m)); 4331 if (VPRC_WIRE_COUNT(old) == 0) { 4332 if ((m->oflags & VPO_UNMANAGED) == 0) 4333 vm_page_aflag_set(m, PGA_DEQUEUE); 4334 vm_wire_add(1); 4335 } 4336 } 4337 4338 /* 4339 * Attempt to wire a mapped page following a pmap lookup of that page. 4340 * This may fail if a thread is concurrently tearing down mappings of the page. 4341 * The transient failure is acceptable because it translates to the 4342 * failure of the caller pmap_extract_and_hold(), which should be then 4343 * followed by the vm_fault() fallback, see e.g. vm_fault_quick_hold_pages(). 4344 */ 4345 bool 4346 vm_page_wire_mapped(vm_page_t m) 4347 { 4348 u_int old; 4349 4350 old = atomic_load_int(&m->ref_count); 4351 do { 4352 KASSERT(old > 0, 4353 ("vm_page_wire_mapped: wiring unreferenced page %p", m)); 4354 if ((old & VPRC_BLOCKED) != 0) 4355 return (false); 4356 } while (!atomic_fcmpset_int(&m->ref_count, &old, old + 1)); 4357 4358 if (VPRC_WIRE_COUNT(old) == 0) { 4359 if ((m->oflags & VPO_UNMANAGED) == 0) 4360 vm_page_aflag_set(m, PGA_DEQUEUE); 4361 vm_wire_add(1); 4362 } 4363 return (true); 4364 } 4365 4366 /* 4367 * Release a wiring reference to a managed page. If the page still belongs to 4368 * an object, update its position in the page queues to reflect the reference. 4369 * If the wiring was the last reference to the page, free the page. 4370 */ 4371 static void 4372 vm_page_unwire_managed(vm_page_t m, uint8_t nqueue, bool noreuse) 4373 { 4374 u_int old; 4375 4376 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4377 ("%s: page %p is unmanaged", __func__, m)); 4378 4379 /* 4380 * Update LRU state before releasing the wiring reference. 4381 * Use a release store when updating the reference count to 4382 * synchronize with vm_page_free_prep(). 4383 */ 4384 old = atomic_load_int(&m->ref_count); 4385 do { 4386 u_int count; 4387 4388 KASSERT(VPRC_WIRE_COUNT(old) > 0, 4389 ("vm_page_unwire: wire count underflow for page %p", m)); 4390 4391 count = old & ~VPRC_BLOCKED; 4392 if (count > VPRC_OBJREF + 1) { 4393 /* 4394 * The page has at least one other wiring reference. An 4395 * earlier iteration of this loop may have called 4396 * vm_page_release_toq() and cleared PGA_DEQUEUE, so 4397 * re-set it if necessary. 4398 */ 4399 if ((vm_page_astate_load(m).flags & PGA_DEQUEUE) == 0) 4400 vm_page_aflag_set(m, PGA_DEQUEUE); 4401 } else if (count == VPRC_OBJREF + 1) { 4402 /* 4403 * This is the last wiring. Clear PGA_DEQUEUE and 4404 * update the page's queue state to reflect the 4405 * reference. If the page does not belong to an object 4406 * (i.e., the VPRC_OBJREF bit is clear), we only need to 4407 * clear leftover queue state. 4408 */ 4409 vm_page_release_toq(m, nqueue, noreuse); 4410 } else if (count == 1) { 4411 vm_page_aflag_clear(m, PGA_DEQUEUE); 4412 } 4413 } while (!atomic_fcmpset_rel_int(&m->ref_count, &old, old - 1)); 4414 4415 if (VPRC_WIRE_COUNT(old) == 1) { 4416 vm_wire_sub(1); 4417 if (old == 1) 4418 vm_page_free(m); 4419 } 4420 } 4421 4422 /* 4423 * Release one wiring of the specified page, potentially allowing it to be 4424 * paged out. 4425 * 4426 * Only managed pages belonging to an object can be paged out. If the number 4427 * of wirings transitions to zero and the page is eligible for page out, then 4428 * the page is added to the specified paging queue. If the released wiring 4429 * represented the last reference to the page, the page is freed. 4430 */ 4431 void 4432 vm_page_unwire(vm_page_t m, uint8_t nqueue) 4433 { 4434 4435 KASSERT(nqueue < PQ_COUNT, 4436 ("vm_page_unwire: invalid queue %u request for page %p", 4437 nqueue, m)); 4438 4439 if ((m->oflags & VPO_UNMANAGED) != 0) { 4440 if (vm_page_unwire_noq(m) && m->ref_count == 0) 4441 vm_page_free(m); 4442 return; 4443 } 4444 vm_page_unwire_managed(m, nqueue, false); 4445 } 4446 4447 /* 4448 * Unwire a page without (re-)inserting it into a page queue. It is up 4449 * to the caller to enqueue, requeue, or free the page as appropriate. 4450 * In most cases involving managed pages, vm_page_unwire() should be used 4451 * instead. 4452 */ 4453 bool 4454 vm_page_unwire_noq(vm_page_t m) 4455 { 4456 u_int old; 4457 4458 old = vm_page_drop(m, 1); 4459 KASSERT(VPRC_WIRE_COUNT(old) != 0, 4460 ("%s: counter underflow for page %p", __func__, m)); 4461 KASSERT((m->flags & PG_FICTITIOUS) == 0 || VPRC_WIRE_COUNT(old) > 1, 4462 ("%s: missing ref on fictitious page %p", __func__, m)); 4463 4464 if (VPRC_WIRE_COUNT(old) > 1) 4465 return (false); 4466 if ((m->oflags & VPO_UNMANAGED) == 0) 4467 vm_page_aflag_clear(m, PGA_DEQUEUE); 4468 vm_wire_sub(1); 4469 return (true); 4470 } 4471 4472 /* 4473 * Ensure that the page ends up in the specified page queue. If the page is 4474 * active or being moved to the active queue, ensure that its act_count is 4475 * at least ACT_INIT but do not otherwise mess with it. 4476 */ 4477 static __always_inline void 4478 vm_page_mvqueue(vm_page_t m, const uint8_t nqueue, const uint16_t nflag) 4479 { 4480 vm_page_astate_t old, new; 4481 4482 KASSERT(m->ref_count > 0, 4483 ("%s: page %p does not carry any references", __func__, m)); 4484 KASSERT(nflag == PGA_REQUEUE || nflag == PGA_REQUEUE_HEAD, 4485 ("%s: invalid flags %x", __func__, nflag)); 4486 4487 if ((m->oflags & VPO_UNMANAGED) != 0 || vm_page_wired(m)) 4488 return; 4489 4490 old = vm_page_astate_load(m); 4491 do { 4492 if ((old.flags & PGA_DEQUEUE) != 0) 4493 break; 4494 new = old; 4495 new.flags &= ~PGA_QUEUE_OP_MASK; 4496 if (nqueue == PQ_ACTIVE) 4497 new.act_count = max(old.act_count, ACT_INIT); 4498 if (old.queue == nqueue) { 4499 /* 4500 * There is no need to requeue pages already in the 4501 * active queue. 4502 */ 4503 if (nqueue != PQ_ACTIVE || 4504 (old.flags & PGA_ENQUEUED) == 0) 4505 new.flags |= nflag; 4506 } else { 4507 new.flags |= nflag; 4508 new.queue = nqueue; 4509 } 4510 } while (!vm_page_pqstate_commit(m, &old, new)); 4511 } 4512 4513 /* 4514 * Put the specified page on the active list (if appropriate). 4515 */ 4516 void 4517 vm_page_activate(vm_page_t m) 4518 { 4519 4520 vm_page_mvqueue(m, PQ_ACTIVE, PGA_REQUEUE); 4521 } 4522 4523 /* 4524 * Move the specified page to the tail of the inactive queue, or requeue 4525 * the page if it is already in the inactive queue. 4526 */ 4527 void 4528 vm_page_deactivate(vm_page_t m) 4529 { 4530 4531 vm_page_mvqueue(m, PQ_INACTIVE, PGA_REQUEUE); 4532 } 4533 4534 void 4535 vm_page_deactivate_noreuse(vm_page_t m) 4536 { 4537 4538 vm_page_mvqueue(m, PQ_INACTIVE, PGA_REQUEUE_HEAD); 4539 } 4540 4541 /* 4542 * Put a page in the laundry, or requeue it if it is already there. 4543 */ 4544 void 4545 vm_page_launder(vm_page_t m) 4546 { 4547 4548 vm_page_mvqueue(m, PQ_LAUNDRY, PGA_REQUEUE); 4549 } 4550 4551 /* 4552 * Put a page in the PQ_UNSWAPPABLE holding queue. 4553 */ 4554 void 4555 vm_page_unswappable(vm_page_t m) 4556 { 4557 4558 VM_OBJECT_ASSERT_LOCKED(m->object); 4559 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4560 ("page %p already unswappable", m)); 4561 4562 vm_page_dequeue(m); 4563 vm_page_enqueue(m, PQ_UNSWAPPABLE); 4564 } 4565 4566 /* 4567 * Release a page back to the page queues in preparation for unwiring. 4568 */ 4569 static void 4570 vm_page_release_toq(vm_page_t m, uint8_t nqueue, const bool noreuse) 4571 { 4572 vm_page_astate_t old, new; 4573 uint16_t nflag; 4574 4575 /* 4576 * Use a check of the valid bits to determine whether we should 4577 * accelerate reclamation of the page. The object lock might not be 4578 * held here, in which case the check is racy. At worst we will either 4579 * accelerate reclamation of a valid page and violate LRU, or 4580 * unnecessarily defer reclamation of an invalid page. 4581 * 4582 * If we were asked to not cache the page, place it near the head of the 4583 * inactive queue so that is reclaimed sooner. 4584 */ 4585 if (noreuse || vm_page_none_valid(m)) { 4586 nqueue = PQ_INACTIVE; 4587 nflag = PGA_REQUEUE_HEAD; 4588 } else { 4589 nflag = PGA_REQUEUE; 4590 } 4591 4592 old = vm_page_astate_load(m); 4593 do { 4594 new = old; 4595 4596 /* 4597 * If the page is already in the active queue and we are not 4598 * trying to accelerate reclamation, simply mark it as 4599 * referenced and avoid any queue operations. 4600 */ 4601 new.flags &= ~PGA_QUEUE_OP_MASK; 4602 if (nflag != PGA_REQUEUE_HEAD && old.queue == PQ_ACTIVE && 4603 (old.flags & PGA_ENQUEUED) != 0) 4604 new.flags |= PGA_REFERENCED; 4605 else { 4606 new.flags |= nflag; 4607 new.queue = nqueue; 4608 } 4609 } while (!vm_page_pqstate_commit(m, &old, new)); 4610 } 4611 4612 /* 4613 * Unwire a page and either attempt to free it or re-add it to the page queues. 4614 */ 4615 void 4616 vm_page_release(vm_page_t m, int flags) 4617 { 4618 vm_object_t object; 4619 4620 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4621 ("vm_page_release: page %p is unmanaged", m)); 4622 4623 if ((flags & VPR_TRYFREE) != 0) { 4624 for (;;) { 4625 object = atomic_load_ptr(&m->object); 4626 if (object == NULL) 4627 break; 4628 /* Depends on type-stability. */ 4629 if (vm_page_busied(m) || !VM_OBJECT_TRYWLOCK(object)) 4630 break; 4631 if (object == m->object) { 4632 vm_page_release_locked(m, flags); 4633 VM_OBJECT_WUNLOCK(object); 4634 return; 4635 } 4636 VM_OBJECT_WUNLOCK(object); 4637 } 4638 } 4639 vm_page_unwire_managed(m, PQ_INACTIVE, flags != 0); 4640 } 4641 4642 /* See vm_page_release(). */ 4643 void 4644 vm_page_release_locked(vm_page_t m, int flags) 4645 { 4646 4647 VM_OBJECT_ASSERT_WLOCKED(m->object); 4648 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4649 ("vm_page_release_locked: page %p is unmanaged", m)); 4650 4651 if (vm_page_unwire_noq(m)) { 4652 if ((flags & VPR_TRYFREE) != 0 && 4653 (m->object->ref_count == 0 || !pmap_page_is_mapped(m)) && 4654 m->dirty == 0 && vm_page_tryxbusy(m)) { 4655 /* 4656 * An unlocked lookup may have wired the page before the 4657 * busy lock was acquired, in which case the page must 4658 * not be freed. 4659 */ 4660 if (__predict_true(!vm_page_wired(m))) { 4661 vm_page_free(m); 4662 return; 4663 } 4664 vm_page_xunbusy(m); 4665 } else { 4666 vm_page_release_toq(m, PQ_INACTIVE, flags != 0); 4667 } 4668 } 4669 } 4670 4671 static bool 4672 vm_page_try_blocked_op(vm_page_t m, void (*op)(vm_page_t)) 4673 { 4674 u_int old; 4675 4676 KASSERT(m->object != NULL && (m->oflags & VPO_UNMANAGED) == 0, 4677 ("vm_page_try_blocked_op: page %p has no object", m)); 4678 KASSERT(vm_page_busied(m), 4679 ("vm_page_try_blocked_op: page %p is not busy", m)); 4680 VM_OBJECT_ASSERT_LOCKED(m->object); 4681 4682 old = atomic_load_int(&m->ref_count); 4683 do { 4684 KASSERT(old != 0, 4685 ("vm_page_try_blocked_op: page %p has no references", m)); 4686 KASSERT((old & VPRC_BLOCKED) == 0, 4687 ("vm_page_try_blocked_op: page %p blocks wirings", m)); 4688 if (VPRC_WIRE_COUNT(old) != 0) 4689 return (false); 4690 } while (!atomic_fcmpset_int(&m->ref_count, &old, old | VPRC_BLOCKED)); 4691 4692 (op)(m); 4693 4694 /* 4695 * If the object is read-locked, new wirings may be created via an 4696 * object lookup. 4697 */ 4698 old = vm_page_drop(m, VPRC_BLOCKED); 4699 KASSERT(!VM_OBJECT_WOWNED(m->object) || 4700 old == (VPRC_BLOCKED | VPRC_OBJREF), 4701 ("vm_page_try_blocked_op: unexpected refcount value %u for %p", 4702 old, m)); 4703 return (true); 4704 } 4705 4706 /* 4707 * Atomically check for wirings and remove all mappings of the page. 4708 */ 4709 bool 4710 vm_page_try_remove_all(vm_page_t m) 4711 { 4712 4713 return (vm_page_try_blocked_op(m, pmap_remove_all)); 4714 } 4715 4716 /* 4717 * Atomically check for wirings and remove all writeable mappings of the page. 4718 */ 4719 bool 4720 vm_page_try_remove_write(vm_page_t m) 4721 { 4722 4723 return (vm_page_try_blocked_op(m, pmap_remove_write)); 4724 } 4725 4726 /* 4727 * vm_page_advise 4728 * 4729 * Apply the specified advice to the given page. 4730 */ 4731 void 4732 vm_page_advise(vm_page_t m, int advice) 4733 { 4734 4735 VM_OBJECT_ASSERT_WLOCKED(m->object); 4736 vm_page_assert_xbusied(m); 4737 4738 if (advice == MADV_FREE) 4739 /* 4740 * Mark the page clean. This will allow the page to be freed 4741 * without first paging it out. MADV_FREE pages are often 4742 * quickly reused by malloc(3), so we do not do anything that 4743 * would result in a page fault on a later access. 4744 */ 4745 vm_page_undirty(m); 4746 else if (advice != MADV_DONTNEED) { 4747 if (advice == MADV_WILLNEED) 4748 vm_page_activate(m); 4749 return; 4750 } 4751 4752 if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m)) 4753 vm_page_dirty(m); 4754 4755 /* 4756 * Clear any references to the page. Otherwise, the page daemon will 4757 * immediately reactivate the page. 4758 */ 4759 vm_page_aflag_clear(m, PGA_REFERENCED); 4760 4761 /* 4762 * Place clean pages near the head of the inactive queue rather than 4763 * the tail, thus defeating the queue's LRU operation and ensuring that 4764 * the page will be reused quickly. Dirty pages not already in the 4765 * laundry are moved there. 4766 */ 4767 if (m->dirty == 0) 4768 vm_page_deactivate_noreuse(m); 4769 else if (!vm_page_in_laundry(m)) 4770 vm_page_launder(m); 4771 } 4772 4773 /* 4774 * vm_page_grab_release 4775 * 4776 * Helper routine for grab functions to release busy on return. 4777 */ 4778 static inline void 4779 vm_page_grab_release(vm_page_t m, int allocflags) 4780 { 4781 4782 if ((allocflags & VM_ALLOC_NOBUSY) != 0) { 4783 if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0) 4784 vm_page_sunbusy(m); 4785 else 4786 vm_page_xunbusy(m); 4787 } 4788 } 4789 4790 /* 4791 * vm_page_grab_sleep 4792 * 4793 * Sleep for busy according to VM_ALLOC_ parameters. Returns true 4794 * if the caller should retry and false otherwise. 4795 * 4796 * If the object is locked on entry the object will be unlocked with 4797 * false returns and still locked but possibly having been dropped 4798 * with true returns. 4799 */ 4800 static bool 4801 vm_page_grab_sleep(vm_object_t object, vm_page_t m, vm_pindex_t pindex, 4802 const char *wmesg, int allocflags, bool locked) 4803 { 4804 4805 if ((allocflags & VM_ALLOC_NOWAIT) != 0) 4806 return (false); 4807 4808 /* 4809 * Reference the page before unlocking and sleeping so that 4810 * the page daemon is less likely to reclaim it. 4811 */ 4812 if (locked && (allocflags & VM_ALLOC_NOCREAT) == 0) 4813 vm_page_reference(m); 4814 4815 if (_vm_page_busy_sleep(object, m, pindex, wmesg, allocflags, locked) && 4816 locked) 4817 VM_OBJECT_WLOCK(object); 4818 if ((allocflags & VM_ALLOC_WAITFAIL) != 0) 4819 return (false); 4820 4821 return (true); 4822 } 4823 4824 /* 4825 * Assert that the grab flags are valid. 4826 */ 4827 static inline void 4828 vm_page_grab_check(int allocflags) 4829 { 4830 4831 KASSERT((allocflags & VM_ALLOC_NOBUSY) == 0 || 4832 (allocflags & VM_ALLOC_WIRED) != 0, 4833 ("vm_page_grab*: the pages must be busied or wired")); 4834 4835 KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || 4836 (allocflags & VM_ALLOC_IGN_SBUSY) != 0, 4837 ("vm_page_grab*: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch")); 4838 } 4839 4840 /* 4841 * Calculate the page allocation flags for grab. 4842 */ 4843 static inline int 4844 vm_page_grab_pflags(int allocflags) 4845 { 4846 int pflags; 4847 4848 pflags = allocflags & 4849 ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL | 4850 VM_ALLOC_NOBUSY | VM_ALLOC_IGN_SBUSY); 4851 if ((allocflags & VM_ALLOC_NOWAIT) == 0) 4852 pflags |= VM_ALLOC_WAITFAIL; 4853 if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0) 4854 pflags |= VM_ALLOC_SBUSY; 4855 4856 return (pflags); 4857 } 4858 4859 /* 4860 * Grab a page, waiting until we are waken up due to the page 4861 * changing state. We keep on waiting, if the page continues 4862 * to be in the object. If the page doesn't exist, first allocate it 4863 * and then conditionally zero it. 4864 * 4865 * This routine may sleep. 4866 * 4867 * The object must be locked on entry. The lock will, however, be released 4868 * and reacquired if the routine sleeps. 4869 */ 4870 vm_page_t 4871 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) 4872 { 4873 vm_page_t m; 4874 4875 VM_OBJECT_ASSERT_WLOCKED(object); 4876 vm_page_grab_check(allocflags); 4877 4878 retrylookup: 4879 if ((m = vm_page_lookup(object, pindex)) != NULL) { 4880 if (!vm_page_tryacquire(m, allocflags)) { 4881 if (vm_page_grab_sleep(object, m, pindex, "pgrbwt", 4882 allocflags, true)) 4883 goto retrylookup; 4884 return (NULL); 4885 } 4886 goto out; 4887 } 4888 if ((allocflags & VM_ALLOC_NOCREAT) != 0) 4889 return (NULL); 4890 m = vm_page_alloc(object, pindex, vm_page_grab_pflags(allocflags)); 4891 if (m == NULL) { 4892 if ((allocflags & (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL)) != 0) 4893 return (NULL); 4894 goto retrylookup; 4895 } 4896 if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0) 4897 pmap_zero_page(m); 4898 4899 out: 4900 vm_page_grab_release(m, allocflags); 4901 4902 return (m); 4903 } 4904 4905 /* 4906 * Attempt to validate a page, locklessly acquiring it if necessary, given a 4907 * (object, pindex) tuple and either an invalided page or NULL. The resulting 4908 * page will be validated against the identity tuple, and busied or wired as 4909 * requested. A NULL page returned guarantees that the page was not in radix at 4910 * the time of the call but callers must perform higher level synchronization or 4911 * retry the operation under a lock if they require an atomic answer. This is 4912 * the only lock free validation routine, other routines can depend on the 4913 * resulting page state. 4914 * 4915 * The return value PAGE_NOT_ACQUIRED indicates that the operation failed due to 4916 * caller flags. 4917 */ 4918 #define PAGE_NOT_ACQUIRED ((vm_page_t)1) 4919 static vm_page_t 4920 vm_page_acquire_unlocked(vm_object_t object, vm_pindex_t pindex, vm_page_t m, 4921 int allocflags) 4922 { 4923 if (m == NULL) 4924 m = vm_page_lookup_unlocked(object, pindex); 4925 for (; m != NULL; m = vm_page_lookup_unlocked(object, pindex)) { 4926 if (vm_page_trybusy(m, allocflags)) { 4927 if (m->object == object && m->pindex == pindex) { 4928 if ((allocflags & VM_ALLOC_WIRED) != 0) 4929 vm_page_wire(m); 4930 vm_page_grab_release(m, allocflags); 4931 break; 4932 } 4933 /* relookup. */ 4934 vm_page_busy_release(m); 4935 cpu_spinwait(); 4936 continue; 4937 } 4938 if (!vm_page_grab_sleep(object, m, pindex, "pgnslp", 4939 allocflags, false)) 4940 return (PAGE_NOT_ACQUIRED); 4941 } 4942 return (m); 4943 } 4944 4945 /* 4946 * Try to locklessly grab a page and fall back to the object lock if NOCREAT 4947 * is not set. 4948 */ 4949 vm_page_t 4950 vm_page_grab_unlocked(vm_object_t object, vm_pindex_t pindex, int allocflags) 4951 { 4952 vm_page_t m; 4953 4954 vm_page_grab_check(allocflags); 4955 m = vm_page_acquire_unlocked(object, pindex, NULL, allocflags); 4956 if (m == PAGE_NOT_ACQUIRED) 4957 return (NULL); 4958 if (m != NULL) 4959 return (m); 4960 4961 /* 4962 * The radix lockless lookup should never return a false negative 4963 * errors. If the user specifies NOCREAT they are guaranteed there 4964 * was no page present at the instant of the call. A NOCREAT caller 4965 * must handle create races gracefully. 4966 */ 4967 if ((allocflags & VM_ALLOC_NOCREAT) != 0) 4968 return (NULL); 4969 4970 VM_OBJECT_WLOCK(object); 4971 m = vm_page_grab(object, pindex, allocflags); 4972 VM_OBJECT_WUNLOCK(object); 4973 4974 return (m); 4975 } 4976 4977 /* 4978 * Grab a page and make it valid, paging in if necessary. Pages missing from 4979 * their pager are zero filled and validated. If a VM_ALLOC_COUNT is supplied 4980 * and the page is not valid as many as VM_INITIAL_PAGEIN pages can be brought 4981 * in simultaneously. Additional pages will be left on a paging queue but 4982 * will neither be wired nor busy regardless of allocflags. 4983 */ 4984 int 4985 vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, int allocflags) 4986 { 4987 vm_page_t m; 4988 vm_page_t ma[VM_INITIAL_PAGEIN]; 4989 int after, i, pflags, rv; 4990 4991 KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || 4992 (allocflags & VM_ALLOC_IGN_SBUSY) != 0, 4993 ("vm_page_grab_valid: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch")); 4994 KASSERT((allocflags & 4995 (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL | VM_ALLOC_ZERO)) == 0, 4996 ("vm_page_grab_valid: Invalid flags 0x%X", allocflags)); 4997 VM_OBJECT_ASSERT_WLOCKED(object); 4998 pflags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY | 4999 VM_ALLOC_WIRED | VM_ALLOC_IGN_SBUSY); 5000 pflags |= VM_ALLOC_WAITFAIL; 5001 5002 retrylookup: 5003 if ((m = vm_page_lookup(object, pindex)) != NULL) { 5004 /* 5005 * If the page is fully valid it can only become invalid 5006 * with the object lock held. If it is not valid it can 5007 * become valid with the busy lock held. Therefore, we 5008 * may unnecessarily lock the exclusive busy here if we 5009 * race with I/O completion not using the object lock. 5010 * However, we will not end up with an invalid page and a 5011 * shared lock. 5012 */ 5013 if (!vm_page_trybusy(m, 5014 vm_page_all_valid(m) ? allocflags : 0)) { 5015 (void)vm_page_grab_sleep(object, m, pindex, "pgrbwt", 5016 allocflags, true); 5017 goto retrylookup; 5018 } 5019 if (vm_page_all_valid(m)) 5020 goto out; 5021 if ((allocflags & VM_ALLOC_NOCREAT) != 0) { 5022 vm_page_busy_release(m); 5023 *mp = NULL; 5024 return (VM_PAGER_FAIL); 5025 } 5026 } else if ((allocflags & VM_ALLOC_NOCREAT) != 0) { 5027 *mp = NULL; 5028 return (VM_PAGER_FAIL); 5029 } else if ((m = vm_page_alloc(object, pindex, pflags)) == NULL) { 5030 if (!vm_pager_can_alloc_page(object, pindex)) { 5031 *mp = NULL; 5032 return (VM_PAGER_AGAIN); 5033 } 5034 goto retrylookup; 5035 } 5036 5037 vm_page_assert_xbusied(m); 5038 if (vm_pager_has_page(object, pindex, NULL, &after)) { 5039 after = MIN(after, VM_INITIAL_PAGEIN); 5040 after = MIN(after, allocflags >> VM_ALLOC_COUNT_SHIFT); 5041 after = MAX(after, 1); 5042 ma[0] = m; 5043 for (i = 1; i < after; i++) { 5044 if ((ma[i] = vm_page_next(ma[i - 1])) != NULL) { 5045 if (vm_page_any_valid(ma[i]) || 5046 !vm_page_tryxbusy(ma[i])) 5047 break; 5048 } else { 5049 ma[i] = vm_page_alloc(object, m->pindex + i, 5050 VM_ALLOC_NORMAL); 5051 if (ma[i] == NULL) 5052 break; 5053 } 5054 } 5055 after = i; 5056 vm_object_pip_add(object, after); 5057 VM_OBJECT_WUNLOCK(object); 5058 rv = vm_pager_get_pages(object, ma, after, NULL, NULL); 5059 VM_OBJECT_WLOCK(object); 5060 vm_object_pip_wakeupn(object, after); 5061 /* Pager may have replaced a page. */ 5062 m = ma[0]; 5063 if (rv != VM_PAGER_OK) { 5064 for (i = 0; i < after; i++) { 5065 if (!vm_page_wired(ma[i])) 5066 vm_page_free(ma[i]); 5067 else 5068 vm_page_xunbusy(ma[i]); 5069 } 5070 *mp = NULL; 5071 return (rv); 5072 } 5073 for (i = 1; i < after; i++) 5074 vm_page_readahead_finish(ma[i]); 5075 MPASS(vm_page_all_valid(m)); 5076 } else { 5077 vm_page_zero_invalid(m, TRUE); 5078 } 5079 out: 5080 if ((allocflags & VM_ALLOC_WIRED) != 0) 5081 vm_page_wire(m); 5082 if ((allocflags & VM_ALLOC_SBUSY) != 0 && vm_page_xbusied(m)) 5083 vm_page_busy_downgrade(m); 5084 else if ((allocflags & VM_ALLOC_NOBUSY) != 0) 5085 vm_page_busy_release(m); 5086 *mp = m; 5087 return (VM_PAGER_OK); 5088 } 5089 5090 /* 5091 * Locklessly grab a valid page. If the page is not valid or not yet 5092 * allocated this will fall back to the object lock method. 5093 */ 5094 int 5095 vm_page_grab_valid_unlocked(vm_page_t *mp, vm_object_t object, 5096 vm_pindex_t pindex, int allocflags) 5097 { 5098 vm_page_t m; 5099 int flags; 5100 int error; 5101 5102 KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || 5103 (allocflags & VM_ALLOC_IGN_SBUSY) != 0, 5104 ("vm_page_grab_valid_unlocked: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY " 5105 "mismatch")); 5106 KASSERT((allocflags & 5107 (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL | VM_ALLOC_ZERO)) == 0, 5108 ("vm_page_grab_valid_unlocked: Invalid flags 0x%X", allocflags)); 5109 5110 /* 5111 * Attempt a lockless lookup and busy. We need at least an sbusy 5112 * before we can inspect the valid field and return a wired page. 5113 */ 5114 flags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_WIRED); 5115 vm_page_grab_check(flags); 5116 m = vm_page_acquire_unlocked(object, pindex, NULL, flags); 5117 if (m == PAGE_NOT_ACQUIRED) 5118 return (VM_PAGER_FAIL); 5119 if (m != NULL) { 5120 if (vm_page_all_valid(m)) { 5121 if ((allocflags & VM_ALLOC_WIRED) != 0) 5122 vm_page_wire(m); 5123 vm_page_grab_release(m, allocflags); 5124 *mp = m; 5125 return (VM_PAGER_OK); 5126 } 5127 vm_page_busy_release(m); 5128 } 5129 if ((allocflags & VM_ALLOC_NOCREAT) != 0) { 5130 *mp = NULL; 5131 return (VM_PAGER_FAIL); 5132 } 5133 VM_OBJECT_WLOCK(object); 5134 error = vm_page_grab_valid(mp, object, pindex, allocflags); 5135 VM_OBJECT_WUNLOCK(object); 5136 5137 return (error); 5138 } 5139 5140 /* 5141 * Return the specified range of pages from the given object. For each 5142 * page offset within the range, if a page already exists within the object 5143 * at that offset and it is busy, then wait for it to change state. If, 5144 * instead, the page doesn't exist, then allocate it. 5145 * 5146 * The caller must always specify an allocation class. 5147 * 5148 * allocation classes: 5149 * VM_ALLOC_NORMAL normal process request 5150 * VM_ALLOC_SYSTEM system *really* needs the pages 5151 * 5152 * The caller must always specify that the pages are to be busied and/or 5153 * wired. 5154 * 5155 * optional allocation flags: 5156 * VM_ALLOC_IGN_SBUSY do not sleep on soft busy pages 5157 * VM_ALLOC_NOBUSY do not exclusive busy the page 5158 * VM_ALLOC_NOWAIT do not sleep 5159 * VM_ALLOC_SBUSY set page to sbusy state 5160 * VM_ALLOC_WIRED wire the pages 5161 * VM_ALLOC_ZERO zero and validate any invalid pages 5162 * 5163 * If VM_ALLOC_NOWAIT is not specified, this routine may sleep. Otherwise, it 5164 * may return a partial prefix of the requested range. 5165 */ 5166 int 5167 vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags, 5168 vm_page_t *ma, int count) 5169 { 5170 vm_page_t m, mpred; 5171 int pflags; 5172 int i; 5173 5174 VM_OBJECT_ASSERT_WLOCKED(object); 5175 KASSERT(((u_int)allocflags >> VM_ALLOC_COUNT_SHIFT) == 0, 5176 ("vm_page_grap_pages: VM_ALLOC_COUNT() is not allowed")); 5177 KASSERT(count > 0, 5178 ("vm_page_grab_pages: invalid page count %d", count)); 5179 vm_page_grab_check(allocflags); 5180 5181 pflags = vm_page_grab_pflags(allocflags); 5182 i = 0; 5183 retrylookup: 5184 m = vm_page_mpred(object, pindex + i); 5185 if (m == NULL || m->pindex != pindex + i) { 5186 mpred = m; 5187 m = NULL; 5188 } else 5189 mpred = TAILQ_PREV(m, pglist, listq); 5190 for (; i < count; i++) { 5191 if (m != NULL) { 5192 if (!vm_page_tryacquire(m, allocflags)) { 5193 if (vm_page_grab_sleep(object, m, pindex + i, 5194 "grbmaw", allocflags, true)) 5195 goto retrylookup; 5196 break; 5197 } 5198 } else { 5199 if ((allocflags & VM_ALLOC_NOCREAT) != 0) 5200 break; 5201 m = vm_page_alloc_after(object, pindex + i, 5202 pflags | VM_ALLOC_COUNT(count - i), mpred); 5203 if (m == NULL) { 5204 if ((allocflags & (VM_ALLOC_NOWAIT | 5205 VM_ALLOC_WAITFAIL)) != 0) 5206 break; 5207 goto retrylookup; 5208 } 5209 } 5210 if (vm_page_none_valid(m) && 5211 (allocflags & VM_ALLOC_ZERO) != 0) { 5212 if ((m->flags & PG_ZERO) == 0) 5213 pmap_zero_page(m); 5214 vm_page_valid(m); 5215 } 5216 vm_page_grab_release(m, allocflags); 5217 ma[i] = mpred = m; 5218 m = vm_page_next(m); 5219 } 5220 return (i); 5221 } 5222 5223 /* 5224 * Unlocked variant of vm_page_grab_pages(). This accepts the same flags 5225 * and will fall back to the locked variant to handle allocation. 5226 */ 5227 int 5228 vm_page_grab_pages_unlocked(vm_object_t object, vm_pindex_t pindex, 5229 int allocflags, vm_page_t *ma, int count) 5230 { 5231 vm_page_t m; 5232 int flags; 5233 int i; 5234 5235 KASSERT(count > 0, 5236 ("vm_page_grab_pages_unlocked: invalid page count %d", count)); 5237 vm_page_grab_check(allocflags); 5238 5239 /* 5240 * Modify flags for lockless acquire to hold the page until we 5241 * set it valid if necessary. 5242 */ 5243 flags = allocflags & ~VM_ALLOC_NOBUSY; 5244 vm_page_grab_check(flags); 5245 m = NULL; 5246 for (i = 0; i < count; i++, pindex++) { 5247 /* 5248 * We may see a false NULL here because the previous page has 5249 * been removed or just inserted and the list is loaded without 5250 * barriers. Switch to radix to verify. 5251 */ 5252 if (m == NULL || QMD_IS_TRASHED(m) || m->pindex != pindex || 5253 atomic_load_ptr(&m->object) != object) { 5254 /* 5255 * This guarantees the result is instantaneously 5256 * correct. 5257 */ 5258 m = NULL; 5259 } 5260 m = vm_page_acquire_unlocked(object, pindex, m, flags); 5261 if (m == PAGE_NOT_ACQUIRED) 5262 return (i); 5263 if (m == NULL) 5264 break; 5265 if ((flags & VM_ALLOC_ZERO) != 0 && vm_page_none_valid(m)) { 5266 if ((m->flags & PG_ZERO) == 0) 5267 pmap_zero_page(m); 5268 vm_page_valid(m); 5269 } 5270 /* m will still be wired or busy according to flags. */ 5271 vm_page_grab_release(m, allocflags); 5272 ma[i] = m; 5273 m = TAILQ_NEXT(m, listq); 5274 } 5275 if (i == count || (allocflags & VM_ALLOC_NOCREAT) != 0) 5276 return (i); 5277 count -= i; 5278 VM_OBJECT_WLOCK(object); 5279 i += vm_page_grab_pages(object, pindex, allocflags, &ma[i], count); 5280 VM_OBJECT_WUNLOCK(object); 5281 5282 return (i); 5283 } 5284 5285 /* 5286 * Mapping function for valid or dirty bits in a page. 5287 * 5288 * Inputs are required to range within a page. 5289 */ 5290 vm_page_bits_t 5291 vm_page_bits(int base, int size) 5292 { 5293 int first_bit; 5294 int last_bit; 5295 5296 KASSERT( 5297 base + size <= PAGE_SIZE, 5298 ("vm_page_bits: illegal base/size %d/%d", base, size) 5299 ); 5300 5301 if (size == 0) /* handle degenerate case */ 5302 return (0); 5303 5304 first_bit = base >> DEV_BSHIFT; 5305 last_bit = (base + size - 1) >> DEV_BSHIFT; 5306 5307 return (((vm_page_bits_t)2 << last_bit) - 5308 ((vm_page_bits_t)1 << first_bit)); 5309 } 5310 5311 void 5312 vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set) 5313 { 5314 5315 #if PAGE_SIZE == 32768 5316 atomic_set_64((uint64_t *)bits, set); 5317 #elif PAGE_SIZE == 16384 5318 atomic_set_32((uint32_t *)bits, set); 5319 #elif (PAGE_SIZE == 8192) && defined(atomic_set_16) 5320 atomic_set_16((uint16_t *)bits, set); 5321 #elif (PAGE_SIZE == 4096) && defined(atomic_set_8) 5322 atomic_set_8((uint8_t *)bits, set); 5323 #else /* PAGE_SIZE <= 8192 */ 5324 uintptr_t addr; 5325 int shift; 5326 5327 addr = (uintptr_t)bits; 5328 /* 5329 * Use a trick to perform a 32-bit atomic on the 5330 * containing aligned word, to not depend on the existence 5331 * of atomic_{set, clear}_{8, 16}. 5332 */ 5333 shift = addr & (sizeof(uint32_t) - 1); 5334 #if BYTE_ORDER == BIG_ENDIAN 5335 shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY; 5336 #else 5337 shift *= NBBY; 5338 #endif 5339 addr &= ~(sizeof(uint32_t) - 1); 5340 atomic_set_32((uint32_t *)addr, set << shift); 5341 #endif /* PAGE_SIZE */ 5342 } 5343 5344 static inline void 5345 vm_page_bits_clear(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t clear) 5346 { 5347 5348 #if PAGE_SIZE == 32768 5349 atomic_clear_64((uint64_t *)bits, clear); 5350 #elif PAGE_SIZE == 16384 5351 atomic_clear_32((uint32_t *)bits, clear); 5352 #elif (PAGE_SIZE == 8192) && defined(atomic_clear_16) 5353 atomic_clear_16((uint16_t *)bits, clear); 5354 #elif (PAGE_SIZE == 4096) && defined(atomic_clear_8) 5355 atomic_clear_8((uint8_t *)bits, clear); 5356 #else /* PAGE_SIZE <= 8192 */ 5357 uintptr_t addr; 5358 int shift; 5359 5360 addr = (uintptr_t)bits; 5361 /* 5362 * Use a trick to perform a 32-bit atomic on the 5363 * containing aligned word, to not depend on the existence 5364 * of atomic_{set, clear}_{8, 16}. 5365 */ 5366 shift = addr & (sizeof(uint32_t) - 1); 5367 #if BYTE_ORDER == BIG_ENDIAN 5368 shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY; 5369 #else 5370 shift *= NBBY; 5371 #endif 5372 addr &= ~(sizeof(uint32_t) - 1); 5373 atomic_clear_32((uint32_t *)addr, clear << shift); 5374 #endif /* PAGE_SIZE */ 5375 } 5376 5377 static inline vm_page_bits_t 5378 vm_page_bits_swap(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t newbits) 5379 { 5380 #if PAGE_SIZE == 32768 5381 uint64_t old; 5382 5383 old = *bits; 5384 while (atomic_fcmpset_64(bits, &old, newbits) == 0); 5385 return (old); 5386 #elif PAGE_SIZE == 16384 5387 uint32_t old; 5388 5389 old = *bits; 5390 while (atomic_fcmpset_32(bits, &old, newbits) == 0); 5391 return (old); 5392 #elif (PAGE_SIZE == 8192) && defined(atomic_fcmpset_16) 5393 uint16_t old; 5394 5395 old = *bits; 5396 while (atomic_fcmpset_16(bits, &old, newbits) == 0); 5397 return (old); 5398 #elif (PAGE_SIZE == 4096) && defined(atomic_fcmpset_8) 5399 uint8_t old; 5400 5401 old = *bits; 5402 while (atomic_fcmpset_8(bits, &old, newbits) == 0); 5403 return (old); 5404 #else /* PAGE_SIZE <= 4096*/ 5405 uintptr_t addr; 5406 uint32_t old, new, mask; 5407 int shift; 5408 5409 addr = (uintptr_t)bits; 5410 /* 5411 * Use a trick to perform a 32-bit atomic on the 5412 * containing aligned word, to not depend on the existence 5413 * of atomic_{set, swap, clear}_{8, 16}. 5414 */ 5415 shift = addr & (sizeof(uint32_t) - 1); 5416 #if BYTE_ORDER == BIG_ENDIAN 5417 shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY; 5418 #else 5419 shift *= NBBY; 5420 #endif 5421 addr &= ~(sizeof(uint32_t) - 1); 5422 mask = VM_PAGE_BITS_ALL << shift; 5423 5424 old = *bits; 5425 do { 5426 new = old & ~mask; 5427 new |= newbits << shift; 5428 } while (atomic_fcmpset_32((uint32_t *)addr, &old, new) == 0); 5429 return (old >> shift); 5430 #endif /* PAGE_SIZE */ 5431 } 5432 5433 /* 5434 * vm_page_set_valid_range: 5435 * 5436 * Sets portions of a page valid. The arguments are expected 5437 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 5438 * of any partial chunks touched by the range. The invalid portion of 5439 * such chunks will be zeroed. 5440 * 5441 * (base + size) must be less then or equal to PAGE_SIZE. 5442 */ 5443 void 5444 vm_page_set_valid_range(vm_page_t m, int base, int size) 5445 { 5446 int endoff, frag; 5447 vm_page_bits_t pagebits; 5448 5449 vm_page_assert_busied(m); 5450 if (size == 0) /* handle degenerate case */ 5451 return; 5452 5453 /* 5454 * If the base is not DEV_BSIZE aligned and the valid 5455 * bit is clear, we have to zero out a portion of the 5456 * first block. 5457 */ 5458 if ((frag = rounddown2(base, DEV_BSIZE)) != base && 5459 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0) 5460 pmap_zero_page_area(m, frag, base - frag); 5461 5462 /* 5463 * If the ending offset is not DEV_BSIZE aligned and the 5464 * valid bit is clear, we have to zero out a portion of 5465 * the last block. 5466 */ 5467 endoff = base + size; 5468 if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff && 5469 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0) 5470 pmap_zero_page_area(m, endoff, 5471 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 5472 5473 /* 5474 * Assert that no previously invalid block that is now being validated 5475 * is already dirty. 5476 */ 5477 KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0, 5478 ("vm_page_set_valid_range: page %p is dirty", m)); 5479 5480 /* 5481 * Set valid bits inclusive of any overlap. 5482 */ 5483 pagebits = vm_page_bits(base, size); 5484 if (vm_page_xbusied(m)) 5485 m->valid |= pagebits; 5486 else 5487 vm_page_bits_set(m, &m->valid, pagebits); 5488 } 5489 5490 /* 5491 * Set the page dirty bits and free the invalid swap space if 5492 * present. Returns the previous dirty bits. 5493 */ 5494 vm_page_bits_t 5495 vm_page_set_dirty(vm_page_t m) 5496 { 5497 vm_page_bits_t old; 5498 5499 VM_PAGE_OBJECT_BUSY_ASSERT(m); 5500 5501 if (vm_page_xbusied(m) && !pmap_page_is_write_mapped(m)) { 5502 old = m->dirty; 5503 m->dirty = VM_PAGE_BITS_ALL; 5504 } else 5505 old = vm_page_bits_swap(m, &m->dirty, VM_PAGE_BITS_ALL); 5506 if (old == 0 && (m->a.flags & PGA_SWAP_SPACE) != 0) 5507 vm_pager_page_unswapped(m); 5508 5509 return (old); 5510 } 5511 5512 /* 5513 * Clear the given bits from the specified page's dirty field. 5514 */ 5515 static __inline void 5516 vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits) 5517 { 5518 5519 vm_page_assert_busied(m); 5520 5521 /* 5522 * If the page is xbusied and not write mapped we are the 5523 * only thread that can modify dirty bits. Otherwise, The pmap 5524 * layer can call vm_page_dirty() without holding a distinguished 5525 * lock. The combination of page busy and atomic operations 5526 * suffice to guarantee consistency of the page dirty field. 5527 */ 5528 if (vm_page_xbusied(m) && !pmap_page_is_write_mapped(m)) 5529 m->dirty &= ~pagebits; 5530 else 5531 vm_page_bits_clear(m, &m->dirty, pagebits); 5532 } 5533 5534 /* 5535 * vm_page_set_validclean: 5536 * 5537 * Sets portions of a page valid and clean. The arguments are expected 5538 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 5539 * of any partial chunks touched by the range. The invalid portion of 5540 * such chunks will be zero'd. 5541 * 5542 * (base + size) must be less then or equal to PAGE_SIZE. 5543 */ 5544 void 5545 vm_page_set_validclean(vm_page_t m, int base, int size) 5546 { 5547 vm_page_bits_t oldvalid, pagebits; 5548 int endoff, frag; 5549 5550 vm_page_assert_busied(m); 5551 if (size == 0) /* handle degenerate case */ 5552 return; 5553 5554 /* 5555 * If the base is not DEV_BSIZE aligned and the valid 5556 * bit is clear, we have to zero out a portion of the 5557 * first block. 5558 */ 5559 if ((frag = rounddown2(base, DEV_BSIZE)) != base && 5560 (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0) 5561 pmap_zero_page_area(m, frag, base - frag); 5562 5563 /* 5564 * If the ending offset is not DEV_BSIZE aligned and the 5565 * valid bit is clear, we have to zero out a portion of 5566 * the last block. 5567 */ 5568 endoff = base + size; 5569 if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff && 5570 (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0) 5571 pmap_zero_page_area(m, endoff, 5572 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 5573 5574 /* 5575 * Set valid, clear dirty bits. If validating the entire 5576 * page we can safely clear the pmap modify bit. We also 5577 * use this opportunity to clear the PGA_NOSYNC flag. If a process 5578 * takes a write fault on a MAP_NOSYNC memory area the flag will 5579 * be set again. 5580 * 5581 * We set valid bits inclusive of any overlap, but we can only 5582 * clear dirty bits for DEV_BSIZE chunks that are fully within 5583 * the range. 5584 */ 5585 oldvalid = m->valid; 5586 pagebits = vm_page_bits(base, size); 5587 if (vm_page_xbusied(m)) 5588 m->valid |= pagebits; 5589 else 5590 vm_page_bits_set(m, &m->valid, pagebits); 5591 #if 0 /* NOT YET */ 5592 if ((frag = base & (DEV_BSIZE - 1)) != 0) { 5593 frag = DEV_BSIZE - frag; 5594 base += frag; 5595 size -= frag; 5596 if (size < 0) 5597 size = 0; 5598 } 5599 pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1)); 5600 #endif 5601 if (base == 0 && size == PAGE_SIZE) { 5602 /* 5603 * The page can only be modified within the pmap if it is 5604 * mapped, and it can only be mapped if it was previously 5605 * fully valid. 5606 */ 5607 if (oldvalid == VM_PAGE_BITS_ALL) 5608 /* 5609 * Perform the pmap_clear_modify() first. Otherwise, 5610 * a concurrent pmap operation, such as 5611 * pmap_protect(), could clear a modification in the 5612 * pmap and set the dirty field on the page before 5613 * pmap_clear_modify() had begun and after the dirty 5614 * field was cleared here. 5615 */ 5616 pmap_clear_modify(m); 5617 m->dirty = 0; 5618 vm_page_aflag_clear(m, PGA_NOSYNC); 5619 } else if (oldvalid != VM_PAGE_BITS_ALL && vm_page_xbusied(m)) 5620 m->dirty &= ~pagebits; 5621 else 5622 vm_page_clear_dirty_mask(m, pagebits); 5623 } 5624 5625 void 5626 vm_page_clear_dirty(vm_page_t m, int base, int size) 5627 { 5628 5629 vm_page_clear_dirty_mask(m, vm_page_bits(base, size)); 5630 } 5631 5632 /* 5633 * vm_page_set_invalid: 5634 * 5635 * Invalidates DEV_BSIZE'd chunks within a page. Both the 5636 * valid and dirty bits for the effected areas are cleared. 5637 */ 5638 void 5639 vm_page_set_invalid(vm_page_t m, int base, int size) 5640 { 5641 vm_page_bits_t bits; 5642 vm_object_t object; 5643 5644 /* 5645 * The object lock is required so that pages can't be mapped 5646 * read-only while we're in the process of invalidating them. 5647 */ 5648 object = m->object; 5649 VM_OBJECT_ASSERT_WLOCKED(object); 5650 vm_page_assert_busied(m); 5651 5652 if (object->type == OBJT_VNODE && base == 0 && IDX_TO_OFF(m->pindex) + 5653 size >= object->un_pager.vnp.vnp_size) 5654 bits = VM_PAGE_BITS_ALL; 5655 else 5656 bits = vm_page_bits(base, size); 5657 if (object->ref_count != 0 && vm_page_all_valid(m) && bits != 0) 5658 pmap_remove_all(m); 5659 KASSERT((bits == 0 && vm_page_all_valid(m)) || 5660 !pmap_page_is_mapped(m), 5661 ("vm_page_set_invalid: page %p is mapped", m)); 5662 if (vm_page_xbusied(m)) { 5663 m->valid &= ~bits; 5664 m->dirty &= ~bits; 5665 } else { 5666 vm_page_bits_clear(m, &m->valid, bits); 5667 vm_page_bits_clear(m, &m->dirty, bits); 5668 } 5669 } 5670 5671 /* 5672 * vm_page_invalid: 5673 * 5674 * Invalidates the entire page. The page must be busy, unmapped, and 5675 * the enclosing object must be locked. The object locks protects 5676 * against concurrent read-only pmap enter which is done without 5677 * busy. 5678 */ 5679 void 5680 vm_page_invalid(vm_page_t m) 5681 { 5682 5683 vm_page_assert_busied(m); 5684 VM_OBJECT_ASSERT_WLOCKED(m->object); 5685 MPASS(!pmap_page_is_mapped(m)); 5686 5687 if (vm_page_xbusied(m)) 5688 m->valid = 0; 5689 else 5690 vm_page_bits_clear(m, &m->valid, VM_PAGE_BITS_ALL); 5691 } 5692 5693 /* 5694 * vm_page_zero_invalid() 5695 * 5696 * The kernel assumes that the invalid portions of a page contain 5697 * garbage, but such pages can be mapped into memory by user code. 5698 * When this occurs, we must zero out the non-valid portions of the 5699 * page so user code sees what it expects. 5700 * 5701 * Pages are most often semi-valid when the end of a file is mapped 5702 * into memory and the file's size is not page aligned. 5703 */ 5704 void 5705 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) 5706 { 5707 int b; 5708 int i; 5709 5710 /* 5711 * Scan the valid bits looking for invalid sections that 5712 * must be zeroed. Invalid sub-DEV_BSIZE'd areas ( where the 5713 * valid bit may be set ) have already been zeroed by 5714 * vm_page_set_validclean(). 5715 */ 5716 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { 5717 if (i == (PAGE_SIZE / DEV_BSIZE) || 5718 (m->valid & ((vm_page_bits_t)1 << i))) { 5719 if (i > b) { 5720 pmap_zero_page_area(m, 5721 b << DEV_BSHIFT, (i - b) << DEV_BSHIFT); 5722 } 5723 b = i + 1; 5724 } 5725 } 5726 5727 /* 5728 * setvalid is TRUE when we can safely set the zero'd areas 5729 * as being valid. We can do this if there are no cache consistency 5730 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. 5731 */ 5732 if (setvalid) 5733 vm_page_valid(m); 5734 } 5735 5736 /* 5737 * vm_page_is_valid: 5738 * 5739 * Is (partial) page valid? Note that the case where size == 0 5740 * will return FALSE in the degenerate case where the page is 5741 * entirely invalid, and TRUE otherwise. 5742 * 5743 * Some callers envoke this routine without the busy lock held and 5744 * handle races via higher level locks. Typical callers should 5745 * hold a busy lock to prevent invalidation. 5746 */ 5747 int 5748 vm_page_is_valid(vm_page_t m, int base, int size) 5749 { 5750 vm_page_bits_t bits; 5751 5752 bits = vm_page_bits(base, size); 5753 return (vm_page_any_valid(m) && (m->valid & bits) == bits); 5754 } 5755 5756 /* 5757 * Returns true if all of the specified predicates are true for the entire 5758 * (super)page and false otherwise. 5759 */ 5760 bool 5761 vm_page_ps_test(vm_page_t m, int psind, int flags, vm_page_t skip_m) 5762 { 5763 vm_object_t object; 5764 int i, npages; 5765 5766 object = m->object; 5767 if (skip_m != NULL && skip_m->object != object) 5768 return (false); 5769 VM_OBJECT_ASSERT_LOCKED(object); 5770 KASSERT(psind <= m->psind, 5771 ("psind %d > psind %d of m %p", psind, m->psind, m)); 5772 npages = atop(pagesizes[psind]); 5773 5774 /* 5775 * The physically contiguous pages that make up a superpage, i.e., a 5776 * page with a page size index ("psind") greater than zero, will 5777 * occupy adjacent entries in vm_page_array[]. 5778 */ 5779 for (i = 0; i < npages; i++) { 5780 /* Always test object consistency, including "skip_m". */ 5781 if (m[i].object != object) 5782 return (false); 5783 if (&m[i] == skip_m) 5784 continue; 5785 if ((flags & PS_NONE_BUSY) != 0 && vm_page_busied(&m[i])) 5786 return (false); 5787 if ((flags & PS_ALL_DIRTY) != 0) { 5788 /* 5789 * Calling vm_page_test_dirty() or pmap_is_modified() 5790 * might stop this case from spuriously returning 5791 * "false". However, that would require a write lock 5792 * on the object containing "m[i]". 5793 */ 5794 if (m[i].dirty != VM_PAGE_BITS_ALL) 5795 return (false); 5796 } 5797 if ((flags & PS_ALL_VALID) != 0 && 5798 m[i].valid != VM_PAGE_BITS_ALL) 5799 return (false); 5800 } 5801 return (true); 5802 } 5803 5804 /* 5805 * Set the page's dirty bits if the page is modified. 5806 */ 5807 void 5808 vm_page_test_dirty(vm_page_t m) 5809 { 5810 5811 vm_page_assert_busied(m); 5812 if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m)) 5813 vm_page_dirty(m); 5814 } 5815 5816 void 5817 vm_page_valid(vm_page_t m) 5818 { 5819 5820 vm_page_assert_busied(m); 5821 if (vm_page_xbusied(m)) 5822 m->valid = VM_PAGE_BITS_ALL; 5823 else 5824 vm_page_bits_set(m, &m->valid, VM_PAGE_BITS_ALL); 5825 } 5826 5827 void 5828 vm_page_lock_KBI(vm_page_t m, const char *file, int line) 5829 { 5830 5831 mtx_lock_flags_(vm_page_lockptr(m), 0, file, line); 5832 } 5833 5834 void 5835 vm_page_unlock_KBI(vm_page_t m, const char *file, int line) 5836 { 5837 5838 mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line); 5839 } 5840 5841 int 5842 vm_page_trylock_KBI(vm_page_t m, const char *file, int line) 5843 { 5844 5845 return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line)); 5846 } 5847 5848 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 5849 void 5850 vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line) 5851 { 5852 5853 vm_page_lock_assert_KBI(m, MA_OWNED, file, line); 5854 } 5855 5856 void 5857 vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line) 5858 { 5859 5860 mtx_assert_(vm_page_lockptr(m), a, file, line); 5861 } 5862 #endif 5863 5864 #ifdef INVARIANTS 5865 void 5866 vm_page_object_busy_assert(vm_page_t m) 5867 { 5868 5869 /* 5870 * Certain of the page's fields may only be modified by the 5871 * holder of a page or object busy. 5872 */ 5873 if (m->object != NULL && !vm_page_busied(m)) 5874 VM_OBJECT_ASSERT_BUSY(m->object); 5875 } 5876 5877 void 5878 vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits) 5879 { 5880 5881 if ((bits & PGA_WRITEABLE) == 0) 5882 return; 5883 5884 /* 5885 * The PGA_WRITEABLE flag can only be set if the page is 5886 * managed, is exclusively busied or the object is locked. 5887 * Currently, this flag is only set by pmap_enter(). 5888 */ 5889 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5890 ("PGA_WRITEABLE on unmanaged page")); 5891 if (!vm_page_xbusied(m)) 5892 VM_OBJECT_ASSERT_BUSY(m->object); 5893 } 5894 #endif 5895 5896 #include "opt_ddb.h" 5897 #ifdef DDB 5898 #include <sys/kernel.h> 5899 5900 #include <ddb/ddb.h> 5901 5902 DB_SHOW_COMMAND_FLAGS(page, vm_page_print_page_info, DB_CMD_MEMSAFE) 5903 { 5904 5905 db_printf("vm_cnt.v_free_count: %d\n", vm_free_count()); 5906 db_printf("vm_cnt.v_inactive_count: %d\n", vm_inactive_count()); 5907 db_printf("vm_cnt.v_active_count: %d\n", vm_active_count()); 5908 db_printf("vm_cnt.v_laundry_count: %d\n", vm_laundry_count()); 5909 db_printf("vm_cnt.v_wire_count: %d\n", vm_wire_count()); 5910 db_printf("vm_cnt.v_free_reserved: %d\n", vm_cnt.v_free_reserved); 5911 db_printf("vm_cnt.v_free_min: %d\n", vm_cnt.v_free_min); 5912 db_printf("vm_cnt.v_free_target: %d\n", vm_cnt.v_free_target); 5913 db_printf("vm_cnt.v_inactive_target: %d\n", vm_cnt.v_inactive_target); 5914 } 5915 5916 DB_SHOW_COMMAND_FLAGS(pageq, vm_page_print_pageq_info, DB_CMD_MEMSAFE) 5917 { 5918 int dom; 5919 5920 db_printf("pq_free %d\n", vm_free_count()); 5921 for (dom = 0; dom < vm_ndomains; dom++) { 5922 db_printf( 5923 "dom %d page_cnt %d free %d pq_act %d pq_inact %d pq_laund %d pq_unsw %d\n", 5924 dom, 5925 vm_dom[dom].vmd_page_count, 5926 vm_dom[dom].vmd_free_count, 5927 vm_dom[dom].vmd_pagequeues[PQ_ACTIVE].pq_cnt, 5928 vm_dom[dom].vmd_pagequeues[PQ_INACTIVE].pq_cnt, 5929 vm_dom[dom].vmd_pagequeues[PQ_LAUNDRY].pq_cnt, 5930 vm_dom[dom].vmd_pagequeues[PQ_UNSWAPPABLE].pq_cnt); 5931 } 5932 } 5933 5934 DB_SHOW_COMMAND(pginfo, vm_page_print_pginfo) 5935 { 5936 vm_page_t m; 5937 boolean_t phys, virt; 5938 5939 if (!have_addr) { 5940 db_printf("show pginfo addr\n"); 5941 return; 5942 } 5943 5944 phys = strchr(modif, 'p') != NULL; 5945 virt = strchr(modif, 'v') != NULL; 5946 if (virt) 5947 m = PHYS_TO_VM_PAGE(pmap_kextract(addr)); 5948 else if (phys) 5949 m = PHYS_TO_VM_PAGE(addr); 5950 else 5951 m = (vm_page_t)addr; 5952 db_printf( 5953 "page %p obj %p pidx 0x%jx phys 0x%jx q %d ref 0x%x\n" 5954 " af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n", 5955 m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr, 5956 m->a.queue, m->ref_count, m->a.flags, m->oflags, 5957 m->flags, m->a.act_count, m->busy_lock, m->valid, m->dirty); 5958 } 5959 #endif /* DDB */ 5960