1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991 Regents of the University of California. 5 * All rights reserved. 6 * Copyright (c) 1998 Matthew Dillon. All Rights Reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * The Mach Operating System project at Carnegie-Mellon University. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 36 */ 37 38 /*- 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 */ 64 65 /* 66 * Resident memory management module. 67 */ 68 69 #include <sys/cdefs.h> 70 __FBSDID("$FreeBSD$"); 71 72 #include "opt_vm.h" 73 74 #include <sys/param.h> 75 #include <sys/systm.h> 76 #include <sys/counter.h> 77 #include <sys/domainset.h> 78 #include <sys/kernel.h> 79 #include <sys/limits.h> 80 #include <sys/linker.h> 81 #include <sys/lock.h> 82 #include <sys/malloc.h> 83 #include <sys/mman.h> 84 #include <sys/msgbuf.h> 85 #include <sys/mutex.h> 86 #include <sys/proc.h> 87 #include <sys/rwlock.h> 88 #include <sys/sleepqueue.h> 89 #include <sys/sbuf.h> 90 #include <sys/sched.h> 91 #include <sys/smp.h> 92 #include <sys/sysctl.h> 93 #include <sys/vmmeter.h> 94 #include <sys/vnode.h> 95 96 #include <vm/vm.h> 97 #include <vm/pmap.h> 98 #include <vm/vm_param.h> 99 #include <vm/vm_domainset.h> 100 #include <vm/vm_kern.h> 101 #include <vm/vm_map.h> 102 #include <vm/vm_object.h> 103 #include <vm/vm_page.h> 104 #include <vm/vm_pageout.h> 105 #include <vm/vm_phys.h> 106 #include <vm/vm_pagequeue.h> 107 #include <vm/vm_pager.h> 108 #include <vm/vm_radix.h> 109 #include <vm/vm_reserv.h> 110 #include <vm/vm_extern.h> 111 #include <vm/vm_dumpset.h> 112 #include <vm/uma.h> 113 #include <vm/uma_int.h> 114 115 #include <machine/md_var.h> 116 117 struct vm_domain vm_dom[MAXMEMDOM]; 118 119 DPCPU_DEFINE_STATIC(struct vm_batchqueue, pqbatch[MAXMEMDOM][PQ_COUNT]); 120 121 struct mtx_padalign __exclusive_cache_line pa_lock[PA_LOCK_COUNT]; 122 123 struct mtx_padalign __exclusive_cache_line vm_domainset_lock; 124 /* The following fields are protected by the domainset lock. */ 125 domainset_t __exclusive_cache_line vm_min_domains; 126 domainset_t __exclusive_cache_line vm_severe_domains; 127 static int vm_min_waiters; 128 static int vm_severe_waiters; 129 static int vm_pageproc_waiters; 130 131 static SYSCTL_NODE(_vm_stats, OID_AUTO, page, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 132 "VM page statistics"); 133 134 static COUNTER_U64_DEFINE_EARLY(pqstate_commit_retries); 135 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, pqstate_commit_retries, 136 CTLFLAG_RD, &pqstate_commit_retries, 137 "Number of failed per-page atomic queue state updates"); 138 139 static COUNTER_U64_DEFINE_EARLY(queue_ops); 140 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, queue_ops, 141 CTLFLAG_RD, &queue_ops, 142 "Number of batched queue operations"); 143 144 static COUNTER_U64_DEFINE_EARLY(queue_nops); 145 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, queue_nops, 146 CTLFLAG_RD, &queue_nops, 147 "Number of batched queue operations with no effects"); 148 149 /* 150 * bogus page -- for I/O to/from partially complete buffers, 151 * or for paging into sparsely invalid regions. 152 */ 153 vm_page_t bogus_page; 154 155 vm_page_t vm_page_array; 156 long vm_page_array_size; 157 long first_page; 158 159 struct bitset *vm_page_dump; 160 long vm_page_dump_pages; 161 162 static TAILQ_HEAD(, vm_page) blacklist_head; 163 static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS); 164 SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD | 165 CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages"); 166 167 static uma_zone_t fakepg_zone; 168 169 static void vm_page_alloc_check(vm_page_t m); 170 static bool _vm_page_busy_sleep(vm_object_t obj, vm_page_t m, 171 vm_pindex_t pindex, const char *wmesg, int allocflags, bool locked); 172 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits); 173 static void vm_page_enqueue(vm_page_t m, uint8_t queue); 174 static bool vm_page_free_prep(vm_page_t m); 175 static void vm_page_free_toq(vm_page_t m); 176 static void vm_page_init(void *dummy); 177 static int vm_page_insert_after(vm_page_t m, vm_object_t object, 178 vm_pindex_t pindex, vm_page_t mpred); 179 static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object, 180 vm_page_t mpred); 181 static void vm_page_mvqueue(vm_page_t m, const uint8_t queue, 182 const uint16_t nflag); 183 static int vm_page_reclaim_run(int req_class, int domain, u_long npages, 184 vm_page_t m_run, vm_paddr_t high); 185 static void vm_page_release_toq(vm_page_t m, uint8_t nqueue, bool noreuse); 186 static int vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, 187 int req); 188 static int vm_page_zone_import(void *arg, void **store, int cnt, int domain, 189 int flags); 190 static void vm_page_zone_release(void *arg, void **store, int cnt); 191 192 SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init, NULL); 193 194 static void 195 vm_page_init(void *dummy) 196 { 197 198 fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL, 199 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 200 bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | 201 VM_ALLOC_NORMAL | VM_ALLOC_WIRED); 202 } 203 204 /* 205 * The cache page zone is initialized later since we need to be able to allocate 206 * pages before UMA is fully initialized. 207 */ 208 static void 209 vm_page_init_cache_zones(void *dummy __unused) 210 { 211 struct vm_domain *vmd; 212 struct vm_pgcache *pgcache; 213 int cache, domain, maxcache, pool; 214 215 maxcache = 0; 216 TUNABLE_INT_FETCH("vm.pgcache_zone_max_pcpu", &maxcache); 217 maxcache *= mp_ncpus; 218 for (domain = 0; domain < vm_ndomains; domain++) { 219 vmd = VM_DOMAIN(domain); 220 for (pool = 0; pool < VM_NFREEPOOL; pool++) { 221 pgcache = &vmd->vmd_pgcache[pool]; 222 pgcache->domain = domain; 223 pgcache->pool = pool; 224 pgcache->zone = uma_zcache_create("vm pgcache", 225 PAGE_SIZE, NULL, NULL, NULL, NULL, 226 vm_page_zone_import, vm_page_zone_release, pgcache, 227 UMA_ZONE_VM); 228 229 /* 230 * Limit each pool's zone to 0.1% of the pages in the 231 * domain. 232 */ 233 cache = maxcache != 0 ? maxcache : 234 vmd->vmd_page_count / 1000; 235 uma_zone_set_maxcache(pgcache->zone, cache); 236 } 237 } 238 } 239 SYSINIT(vm_page2, SI_SUB_VM_CONF, SI_ORDER_ANY, vm_page_init_cache_zones, NULL); 240 241 /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */ 242 #if PAGE_SIZE == 32768 243 #ifdef CTASSERT 244 CTASSERT(sizeof(u_long) >= 8); 245 #endif 246 #endif 247 248 /* 249 * vm_set_page_size: 250 * 251 * Sets the page size, perhaps based upon the memory 252 * size. Must be called before any use of page-size 253 * dependent functions. 254 */ 255 void 256 vm_set_page_size(void) 257 { 258 if (vm_cnt.v_page_size == 0) 259 vm_cnt.v_page_size = PAGE_SIZE; 260 if (((vm_cnt.v_page_size - 1) & vm_cnt.v_page_size) != 0) 261 panic("vm_set_page_size: page size not a power of two"); 262 } 263 264 /* 265 * vm_page_blacklist_next: 266 * 267 * Find the next entry in the provided string of blacklist 268 * addresses. Entries are separated by space, comma, or newline. 269 * If an invalid integer is encountered then the rest of the 270 * string is skipped. Updates the list pointer to the next 271 * character, or NULL if the string is exhausted or invalid. 272 */ 273 static vm_paddr_t 274 vm_page_blacklist_next(char **list, char *end) 275 { 276 vm_paddr_t bad; 277 char *cp, *pos; 278 279 if (list == NULL || *list == NULL) 280 return (0); 281 if (**list =='\0') { 282 *list = NULL; 283 return (0); 284 } 285 286 /* 287 * If there's no end pointer then the buffer is coming from 288 * the kenv and we know it's null-terminated. 289 */ 290 if (end == NULL) 291 end = *list + strlen(*list); 292 293 /* Ensure that strtoq() won't walk off the end */ 294 if (*end != '\0') { 295 if (*end == '\n' || *end == ' ' || *end == ',') 296 *end = '\0'; 297 else { 298 printf("Blacklist not terminated, skipping\n"); 299 *list = NULL; 300 return (0); 301 } 302 } 303 304 for (pos = *list; *pos != '\0'; pos = cp) { 305 bad = strtoq(pos, &cp, 0); 306 if (*cp == '\0' || *cp == ' ' || *cp == ',' || *cp == '\n') { 307 if (bad == 0) { 308 if (++cp < end) 309 continue; 310 else 311 break; 312 } 313 } else 314 break; 315 if (*cp == '\0' || ++cp >= end) 316 *list = NULL; 317 else 318 *list = cp; 319 return (trunc_page(bad)); 320 } 321 printf("Garbage in RAM blacklist, skipping\n"); 322 *list = NULL; 323 return (0); 324 } 325 326 bool 327 vm_page_blacklist_add(vm_paddr_t pa, bool verbose) 328 { 329 struct vm_domain *vmd; 330 vm_page_t m; 331 int ret; 332 333 m = vm_phys_paddr_to_vm_page(pa); 334 if (m == NULL) 335 return (true); /* page does not exist, no failure */ 336 337 vmd = vm_pagequeue_domain(m); 338 vm_domain_free_lock(vmd); 339 ret = vm_phys_unfree_page(m); 340 vm_domain_free_unlock(vmd); 341 if (ret != 0) { 342 vm_domain_freecnt_inc(vmd, -1); 343 TAILQ_INSERT_TAIL(&blacklist_head, m, listq); 344 if (verbose) 345 printf("Skipping page with pa 0x%jx\n", (uintmax_t)pa); 346 } 347 return (ret); 348 } 349 350 /* 351 * vm_page_blacklist_check: 352 * 353 * Iterate through the provided string of blacklist addresses, pulling 354 * each entry out of the physical allocator free list and putting it 355 * onto a list for reporting via the vm.page_blacklist sysctl. 356 */ 357 static void 358 vm_page_blacklist_check(char *list, char *end) 359 { 360 vm_paddr_t pa; 361 char *next; 362 363 next = list; 364 while (next != NULL) { 365 if ((pa = vm_page_blacklist_next(&next, end)) == 0) 366 continue; 367 vm_page_blacklist_add(pa, bootverbose); 368 } 369 } 370 371 /* 372 * vm_page_blacklist_load: 373 * 374 * Search for a special module named "ram_blacklist". It'll be a 375 * plain text file provided by the user via the loader directive 376 * of the same name. 377 */ 378 static void 379 vm_page_blacklist_load(char **list, char **end) 380 { 381 void *mod; 382 u_char *ptr; 383 u_int len; 384 385 mod = NULL; 386 ptr = NULL; 387 388 mod = preload_search_by_type("ram_blacklist"); 389 if (mod != NULL) { 390 ptr = preload_fetch_addr(mod); 391 len = preload_fetch_size(mod); 392 } 393 *list = ptr; 394 if (ptr != NULL) 395 *end = ptr + len; 396 else 397 *end = NULL; 398 return; 399 } 400 401 static int 402 sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS) 403 { 404 vm_page_t m; 405 struct sbuf sbuf; 406 int error, first; 407 408 first = 1; 409 error = sysctl_wire_old_buffer(req, 0); 410 if (error != 0) 411 return (error); 412 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 413 TAILQ_FOREACH(m, &blacklist_head, listq) { 414 sbuf_printf(&sbuf, "%s%#jx", first ? "" : ",", 415 (uintmax_t)m->phys_addr); 416 first = 0; 417 } 418 error = sbuf_finish(&sbuf); 419 sbuf_delete(&sbuf); 420 return (error); 421 } 422 423 /* 424 * Initialize a dummy page for use in scans of the specified paging queue. 425 * In principle, this function only needs to set the flag PG_MARKER. 426 * Nonetheless, it write busies the page as a safety precaution. 427 */ 428 void 429 vm_page_init_marker(vm_page_t marker, int queue, uint16_t aflags) 430 { 431 432 bzero(marker, sizeof(*marker)); 433 marker->flags = PG_MARKER; 434 marker->a.flags = aflags; 435 marker->busy_lock = VPB_CURTHREAD_EXCLUSIVE; 436 marker->a.queue = queue; 437 } 438 439 static void 440 vm_page_domain_init(int domain) 441 { 442 struct vm_domain *vmd; 443 struct vm_pagequeue *pq; 444 int i; 445 446 vmd = VM_DOMAIN(domain); 447 bzero(vmd, sizeof(*vmd)); 448 *__DECONST(const char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) = 449 "vm inactive pagequeue"; 450 *__DECONST(const char **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_name) = 451 "vm active pagequeue"; 452 *__DECONST(const char **, &vmd->vmd_pagequeues[PQ_LAUNDRY].pq_name) = 453 "vm laundry pagequeue"; 454 *__DECONST(const char **, 455 &vmd->vmd_pagequeues[PQ_UNSWAPPABLE].pq_name) = 456 "vm unswappable pagequeue"; 457 vmd->vmd_domain = domain; 458 vmd->vmd_page_count = 0; 459 vmd->vmd_free_count = 0; 460 vmd->vmd_segs = 0; 461 vmd->vmd_oom = FALSE; 462 for (i = 0; i < PQ_COUNT; i++) { 463 pq = &vmd->vmd_pagequeues[i]; 464 TAILQ_INIT(&pq->pq_pl); 465 mtx_init(&pq->pq_mutex, pq->pq_name, "vm pagequeue", 466 MTX_DEF | MTX_DUPOK); 467 pq->pq_pdpages = 0; 468 vm_page_init_marker(&vmd->vmd_markers[i], i, 0); 469 } 470 mtx_init(&vmd->vmd_free_mtx, "vm page free queue", NULL, MTX_DEF); 471 mtx_init(&vmd->vmd_pageout_mtx, "vm pageout lock", NULL, MTX_DEF); 472 snprintf(vmd->vmd_name, sizeof(vmd->vmd_name), "%d", domain); 473 474 /* 475 * inacthead is used to provide FIFO ordering for LRU-bypassing 476 * insertions. 477 */ 478 vm_page_init_marker(&vmd->vmd_inacthead, PQ_INACTIVE, PGA_ENQUEUED); 479 TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_INACTIVE].pq_pl, 480 &vmd->vmd_inacthead, plinks.q); 481 482 /* 483 * The clock pages are used to implement active queue scanning without 484 * requeues. Scans start at clock[0], which is advanced after the scan 485 * ends. When the two clock hands meet, they are reset and scanning 486 * resumes from the head of the queue. 487 */ 488 vm_page_init_marker(&vmd->vmd_clock[0], PQ_ACTIVE, PGA_ENQUEUED); 489 vm_page_init_marker(&vmd->vmd_clock[1], PQ_ACTIVE, PGA_ENQUEUED); 490 TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl, 491 &vmd->vmd_clock[0], plinks.q); 492 TAILQ_INSERT_TAIL(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl, 493 &vmd->vmd_clock[1], plinks.q); 494 } 495 496 /* 497 * Initialize a physical page in preparation for adding it to the free 498 * lists. 499 */ 500 void 501 vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind) 502 { 503 504 m->object = NULL; 505 m->ref_count = 0; 506 m->busy_lock = VPB_FREED; 507 m->flags = m->a.flags = 0; 508 m->phys_addr = pa; 509 m->a.queue = PQ_NONE; 510 m->psind = 0; 511 m->segind = segind; 512 m->order = VM_NFREEORDER; 513 m->pool = VM_FREEPOOL_DEFAULT; 514 m->valid = m->dirty = 0; 515 pmap_page_init(m); 516 } 517 518 #ifndef PMAP_HAS_PAGE_ARRAY 519 static vm_paddr_t 520 vm_page_array_alloc(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t page_range) 521 { 522 vm_paddr_t new_end; 523 524 /* 525 * Reserve an unmapped guard page to trap access to vm_page_array[-1]. 526 * However, because this page is allocated from KVM, out-of-bounds 527 * accesses using the direct map will not be trapped. 528 */ 529 *vaddr += PAGE_SIZE; 530 531 /* 532 * Allocate physical memory for the page structures, and map it. 533 */ 534 new_end = trunc_page(end - page_range * sizeof(struct vm_page)); 535 vm_page_array = (vm_page_t)pmap_map(vaddr, new_end, end, 536 VM_PROT_READ | VM_PROT_WRITE); 537 vm_page_array_size = page_range; 538 539 return (new_end); 540 } 541 #endif 542 543 /* 544 * vm_page_startup: 545 * 546 * Initializes the resident memory module. Allocates physical memory for 547 * bootstrapping UMA and some data structures that are used to manage 548 * physical pages. Initializes these structures, and populates the free 549 * page queues. 550 */ 551 vm_offset_t 552 vm_page_startup(vm_offset_t vaddr) 553 { 554 struct vm_phys_seg *seg; 555 vm_page_t m; 556 char *list, *listend; 557 vm_paddr_t end, high_avail, low_avail, new_end, size; 558 vm_paddr_t page_range __unused; 559 vm_paddr_t last_pa, pa; 560 u_long pagecount; 561 #if MINIDUMP_PAGE_TRACKING 562 u_long vm_page_dump_size; 563 #endif 564 int biggestone, i, segind; 565 #ifdef WITNESS 566 vm_offset_t mapped; 567 int witness_size; 568 #endif 569 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE) 570 long ii; 571 #endif 572 573 vaddr = round_page(vaddr); 574 575 vm_phys_early_startup(); 576 biggestone = vm_phys_avail_largest(); 577 end = phys_avail[biggestone+1]; 578 579 /* 580 * Initialize the page and queue locks. 581 */ 582 mtx_init(&vm_domainset_lock, "vm domainset lock", NULL, MTX_DEF); 583 for (i = 0; i < PA_LOCK_COUNT; i++) 584 mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF); 585 for (i = 0; i < vm_ndomains; i++) 586 vm_page_domain_init(i); 587 588 new_end = end; 589 #ifdef WITNESS 590 witness_size = round_page(witness_startup_count()); 591 new_end -= witness_size; 592 mapped = pmap_map(&vaddr, new_end, new_end + witness_size, 593 VM_PROT_READ | VM_PROT_WRITE); 594 bzero((void *)mapped, witness_size); 595 witness_startup((void *)mapped); 596 #endif 597 598 #if MINIDUMP_PAGE_TRACKING 599 /* 600 * Allocate a bitmap to indicate that a random physical page 601 * needs to be included in a minidump. 602 * 603 * The amd64 port needs this to indicate which direct map pages 604 * need to be dumped, via calls to dump_add_page()/dump_drop_page(). 605 * 606 * However, i386 still needs this workspace internally within the 607 * minidump code. In theory, they are not needed on i386, but are 608 * included should the sf_buf code decide to use them. 609 */ 610 last_pa = 0; 611 vm_page_dump_pages = 0; 612 for (i = 0; dump_avail[i + 1] != 0; i += 2) { 613 vm_page_dump_pages += howmany(dump_avail[i + 1], PAGE_SIZE) - 614 dump_avail[i] / PAGE_SIZE; 615 if (dump_avail[i + 1] > last_pa) 616 last_pa = dump_avail[i + 1]; 617 } 618 vm_page_dump_size = round_page(BITSET_SIZE(vm_page_dump_pages)); 619 new_end -= vm_page_dump_size; 620 vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end, 621 new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE); 622 bzero((void *)vm_page_dump, vm_page_dump_size); 623 #else 624 (void)last_pa; 625 #endif 626 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \ 627 defined(__riscv) || defined(__powerpc64__) 628 /* 629 * Include the UMA bootstrap pages, witness pages and vm_page_dump 630 * in a crash dump. When pmap_map() uses the direct map, they are 631 * not automatically included. 632 */ 633 for (pa = new_end; pa < end; pa += PAGE_SIZE) 634 dump_add_page(pa); 635 #endif 636 phys_avail[biggestone + 1] = new_end; 637 #ifdef __amd64__ 638 /* 639 * Request that the physical pages underlying the message buffer be 640 * included in a crash dump. Since the message buffer is accessed 641 * through the direct map, they are not automatically included. 642 */ 643 pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr); 644 last_pa = pa + round_page(msgbufsize); 645 while (pa < last_pa) { 646 dump_add_page(pa); 647 pa += PAGE_SIZE; 648 } 649 #endif 650 /* 651 * Compute the number of pages of memory that will be available for 652 * use, taking into account the overhead of a page structure per page. 653 * In other words, solve 654 * "available physical memory" - round_page(page_range * 655 * sizeof(struct vm_page)) = page_range * PAGE_SIZE 656 * for page_range. 657 */ 658 low_avail = phys_avail[0]; 659 high_avail = phys_avail[1]; 660 for (i = 0; i < vm_phys_nsegs; i++) { 661 if (vm_phys_segs[i].start < low_avail) 662 low_avail = vm_phys_segs[i].start; 663 if (vm_phys_segs[i].end > high_avail) 664 high_avail = vm_phys_segs[i].end; 665 } 666 /* Skip the first chunk. It is already accounted for. */ 667 for (i = 2; phys_avail[i + 1] != 0; i += 2) { 668 if (phys_avail[i] < low_avail) 669 low_avail = phys_avail[i]; 670 if (phys_avail[i + 1] > high_avail) 671 high_avail = phys_avail[i + 1]; 672 } 673 first_page = low_avail / PAGE_SIZE; 674 #ifdef VM_PHYSSEG_SPARSE 675 size = 0; 676 for (i = 0; i < vm_phys_nsegs; i++) 677 size += vm_phys_segs[i].end - vm_phys_segs[i].start; 678 for (i = 0; phys_avail[i + 1] != 0; i += 2) 679 size += phys_avail[i + 1] - phys_avail[i]; 680 #elif defined(VM_PHYSSEG_DENSE) 681 size = high_avail - low_avail; 682 #else 683 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." 684 #endif 685 686 #ifdef PMAP_HAS_PAGE_ARRAY 687 pmap_page_array_startup(size / PAGE_SIZE); 688 biggestone = vm_phys_avail_largest(); 689 end = new_end = phys_avail[biggestone + 1]; 690 #else 691 #ifdef VM_PHYSSEG_DENSE 692 /* 693 * In the VM_PHYSSEG_DENSE case, the number of pages can account for 694 * the overhead of a page structure per page only if vm_page_array is 695 * allocated from the last physical memory chunk. Otherwise, we must 696 * allocate page structures representing the physical memory 697 * underlying vm_page_array, even though they will not be used. 698 */ 699 if (new_end != high_avail) 700 page_range = size / PAGE_SIZE; 701 else 702 #endif 703 { 704 page_range = size / (PAGE_SIZE + sizeof(struct vm_page)); 705 706 /* 707 * If the partial bytes remaining are large enough for 708 * a page (PAGE_SIZE) without a corresponding 709 * 'struct vm_page', then new_end will contain an 710 * extra page after subtracting the length of the VM 711 * page array. Compensate by subtracting an extra 712 * page from new_end. 713 */ 714 if (size % (PAGE_SIZE + sizeof(struct vm_page)) >= PAGE_SIZE) { 715 if (new_end == high_avail) 716 high_avail -= PAGE_SIZE; 717 new_end -= PAGE_SIZE; 718 } 719 } 720 end = new_end; 721 new_end = vm_page_array_alloc(&vaddr, end, page_range); 722 #endif 723 724 #if VM_NRESERVLEVEL > 0 725 /* 726 * Allocate physical memory for the reservation management system's 727 * data structures, and map it. 728 */ 729 new_end = vm_reserv_startup(&vaddr, new_end); 730 #endif 731 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \ 732 defined(__riscv) || defined(__powerpc64__) 733 /* 734 * Include vm_page_array and vm_reserv_array in a crash dump. 735 */ 736 for (pa = new_end; pa < end; pa += PAGE_SIZE) 737 dump_add_page(pa); 738 #endif 739 phys_avail[biggestone + 1] = new_end; 740 741 /* 742 * Add physical memory segments corresponding to the available 743 * physical pages. 744 */ 745 for (i = 0; phys_avail[i + 1] != 0; i += 2) 746 if (vm_phys_avail_size(i) != 0) 747 vm_phys_add_seg(phys_avail[i], phys_avail[i + 1]); 748 749 /* 750 * Initialize the physical memory allocator. 751 */ 752 vm_phys_init(); 753 754 /* 755 * Initialize the page structures and add every available page to the 756 * physical memory allocator's free lists. 757 */ 758 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE) 759 for (ii = 0; ii < vm_page_array_size; ii++) { 760 m = &vm_page_array[ii]; 761 vm_page_init_page(m, (first_page + ii) << PAGE_SHIFT, 0); 762 m->flags = PG_FICTITIOUS; 763 } 764 #endif 765 vm_cnt.v_page_count = 0; 766 for (segind = 0; segind < vm_phys_nsegs; segind++) { 767 seg = &vm_phys_segs[segind]; 768 for (m = seg->first_page, pa = seg->start; pa < seg->end; 769 m++, pa += PAGE_SIZE) 770 vm_page_init_page(m, pa, segind); 771 772 /* 773 * Add the segment to the free lists only if it is covered by 774 * one of the ranges in phys_avail. Because we've added the 775 * ranges to the vm_phys_segs array, we can assume that each 776 * segment is either entirely contained in one of the ranges, 777 * or doesn't overlap any of them. 778 */ 779 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 780 struct vm_domain *vmd; 781 782 if (seg->start < phys_avail[i] || 783 seg->end > phys_avail[i + 1]) 784 continue; 785 786 m = seg->first_page; 787 pagecount = (u_long)atop(seg->end - seg->start); 788 789 vmd = VM_DOMAIN(seg->domain); 790 vm_domain_free_lock(vmd); 791 vm_phys_enqueue_contig(m, pagecount); 792 vm_domain_free_unlock(vmd); 793 vm_domain_freecnt_inc(vmd, pagecount); 794 vm_cnt.v_page_count += (u_int)pagecount; 795 796 vmd = VM_DOMAIN(seg->domain); 797 vmd->vmd_page_count += (u_int)pagecount; 798 vmd->vmd_segs |= 1UL << m->segind; 799 break; 800 } 801 } 802 803 /* 804 * Remove blacklisted pages from the physical memory allocator. 805 */ 806 TAILQ_INIT(&blacklist_head); 807 vm_page_blacklist_load(&list, &listend); 808 vm_page_blacklist_check(list, listend); 809 810 list = kern_getenv("vm.blacklist"); 811 vm_page_blacklist_check(list, NULL); 812 813 freeenv(list); 814 #if VM_NRESERVLEVEL > 0 815 /* 816 * Initialize the reservation management system. 817 */ 818 vm_reserv_init(); 819 #endif 820 821 return (vaddr); 822 } 823 824 void 825 vm_page_reference(vm_page_t m) 826 { 827 828 vm_page_aflag_set(m, PGA_REFERENCED); 829 } 830 831 /* 832 * vm_page_trybusy 833 * 834 * Helper routine for grab functions to trylock busy. 835 * 836 * Returns true on success and false on failure. 837 */ 838 static bool 839 vm_page_trybusy(vm_page_t m, int allocflags) 840 { 841 842 if ((allocflags & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)) != 0) 843 return (vm_page_trysbusy(m)); 844 else 845 return (vm_page_tryxbusy(m)); 846 } 847 848 /* 849 * vm_page_tryacquire 850 * 851 * Helper routine for grab functions to trylock busy and wire. 852 * 853 * Returns true on success and false on failure. 854 */ 855 static inline bool 856 vm_page_tryacquire(vm_page_t m, int allocflags) 857 { 858 bool locked; 859 860 locked = vm_page_trybusy(m, allocflags); 861 if (locked && (allocflags & VM_ALLOC_WIRED) != 0) 862 vm_page_wire(m); 863 return (locked); 864 } 865 866 /* 867 * vm_page_busy_acquire: 868 * 869 * Acquire the busy lock as described by VM_ALLOC_* flags. Will loop 870 * and drop the object lock if necessary. 871 */ 872 bool 873 vm_page_busy_acquire(vm_page_t m, int allocflags) 874 { 875 vm_object_t obj; 876 bool locked; 877 878 /* 879 * The page-specific object must be cached because page 880 * identity can change during the sleep, causing the 881 * re-lock of a different object. 882 * It is assumed that a reference to the object is already 883 * held by the callers. 884 */ 885 obj = atomic_load_ptr(&m->object); 886 for (;;) { 887 if (vm_page_tryacquire(m, allocflags)) 888 return (true); 889 if ((allocflags & VM_ALLOC_NOWAIT) != 0) 890 return (false); 891 if (obj != NULL) 892 locked = VM_OBJECT_WOWNED(obj); 893 else 894 locked = false; 895 MPASS(locked || vm_page_wired(m)); 896 if (_vm_page_busy_sleep(obj, m, m->pindex, "vmpba", allocflags, 897 locked) && locked) 898 VM_OBJECT_WLOCK(obj); 899 if ((allocflags & VM_ALLOC_WAITFAIL) != 0) 900 return (false); 901 KASSERT(m->object == obj || m->object == NULL, 902 ("vm_page_busy_acquire: page %p does not belong to %p", 903 m, obj)); 904 } 905 } 906 907 /* 908 * vm_page_busy_downgrade: 909 * 910 * Downgrade an exclusive busy page into a single shared busy page. 911 */ 912 void 913 vm_page_busy_downgrade(vm_page_t m) 914 { 915 u_int x; 916 917 vm_page_assert_xbusied(m); 918 919 x = vm_page_busy_fetch(m); 920 for (;;) { 921 if (atomic_fcmpset_rel_int(&m->busy_lock, 922 &x, VPB_SHARERS_WORD(1))) 923 break; 924 } 925 if ((x & VPB_BIT_WAITERS) != 0) 926 wakeup(m); 927 } 928 929 /* 930 * 931 * vm_page_busy_tryupgrade: 932 * 933 * Attempt to upgrade a single shared busy into an exclusive busy. 934 */ 935 int 936 vm_page_busy_tryupgrade(vm_page_t m) 937 { 938 u_int ce, x; 939 940 vm_page_assert_sbusied(m); 941 942 x = vm_page_busy_fetch(m); 943 ce = VPB_CURTHREAD_EXCLUSIVE; 944 for (;;) { 945 if (VPB_SHARERS(x) > 1) 946 return (0); 947 KASSERT((x & ~VPB_BIT_WAITERS) == VPB_SHARERS_WORD(1), 948 ("vm_page_busy_tryupgrade: invalid lock state")); 949 if (!atomic_fcmpset_acq_int(&m->busy_lock, &x, 950 ce | (x & VPB_BIT_WAITERS))) 951 continue; 952 return (1); 953 } 954 } 955 956 /* 957 * vm_page_sbusied: 958 * 959 * Return a positive value if the page is shared busied, 0 otherwise. 960 */ 961 int 962 vm_page_sbusied(vm_page_t m) 963 { 964 u_int x; 965 966 x = vm_page_busy_fetch(m); 967 return ((x & VPB_BIT_SHARED) != 0 && x != VPB_UNBUSIED); 968 } 969 970 /* 971 * vm_page_sunbusy: 972 * 973 * Shared unbusy a page. 974 */ 975 void 976 vm_page_sunbusy(vm_page_t m) 977 { 978 u_int x; 979 980 vm_page_assert_sbusied(m); 981 982 x = vm_page_busy_fetch(m); 983 for (;;) { 984 KASSERT(x != VPB_FREED, 985 ("vm_page_sunbusy: Unlocking freed page.")); 986 if (VPB_SHARERS(x) > 1) { 987 if (atomic_fcmpset_int(&m->busy_lock, &x, 988 x - VPB_ONE_SHARER)) 989 break; 990 continue; 991 } 992 KASSERT((x & ~VPB_BIT_WAITERS) == VPB_SHARERS_WORD(1), 993 ("vm_page_sunbusy: invalid lock state")); 994 if (!atomic_fcmpset_rel_int(&m->busy_lock, &x, VPB_UNBUSIED)) 995 continue; 996 if ((x & VPB_BIT_WAITERS) == 0) 997 break; 998 wakeup(m); 999 break; 1000 } 1001 } 1002 1003 /* 1004 * vm_page_busy_sleep: 1005 * 1006 * Sleep if the page is busy, using the page pointer as wchan. 1007 * This is used to implement the hard-path of busying mechanism. 1008 * 1009 * If nonshared is true, sleep only if the page is xbusy. 1010 * 1011 * The object lock must be held on entry and will be released on exit. 1012 */ 1013 void 1014 vm_page_busy_sleep(vm_page_t m, const char *wmesg, bool nonshared) 1015 { 1016 vm_object_t obj; 1017 1018 obj = m->object; 1019 VM_OBJECT_ASSERT_LOCKED(obj); 1020 vm_page_lock_assert(m, MA_NOTOWNED); 1021 1022 if (!_vm_page_busy_sleep(obj, m, m->pindex, wmesg, 1023 nonshared ? VM_ALLOC_SBUSY : 0 , true)) 1024 VM_OBJECT_DROP(obj); 1025 } 1026 1027 /* 1028 * vm_page_busy_sleep_unlocked: 1029 * 1030 * Sleep if the page is busy, using the page pointer as wchan. 1031 * This is used to implement the hard-path of busying mechanism. 1032 * 1033 * If nonshared is true, sleep only if the page is xbusy. 1034 * 1035 * The object lock must not be held on entry. The operation will 1036 * return if the page changes identity. 1037 */ 1038 void 1039 vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m, vm_pindex_t pindex, 1040 const char *wmesg, bool nonshared) 1041 { 1042 1043 VM_OBJECT_ASSERT_UNLOCKED(obj); 1044 vm_page_lock_assert(m, MA_NOTOWNED); 1045 1046 _vm_page_busy_sleep(obj, m, pindex, wmesg, 1047 nonshared ? VM_ALLOC_SBUSY : 0, false); 1048 } 1049 1050 /* 1051 * _vm_page_busy_sleep: 1052 * 1053 * Internal busy sleep function. Verifies the page identity and 1054 * lockstate against parameters. Returns true if it sleeps and 1055 * false otherwise. 1056 * 1057 * If locked is true the lock will be dropped for any true returns 1058 * and held for any false returns. 1059 */ 1060 static bool 1061 _vm_page_busy_sleep(vm_object_t obj, vm_page_t m, vm_pindex_t pindex, 1062 const char *wmesg, int allocflags, bool locked) 1063 { 1064 bool xsleep; 1065 u_int x; 1066 1067 /* 1068 * If the object is busy we must wait for that to drain to zero 1069 * before trying the page again. 1070 */ 1071 if (obj != NULL && vm_object_busied(obj)) { 1072 if (locked) 1073 VM_OBJECT_DROP(obj); 1074 vm_object_busy_wait(obj, wmesg); 1075 return (true); 1076 } 1077 1078 if (!vm_page_busied(m)) 1079 return (false); 1080 1081 xsleep = (allocflags & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)) != 0; 1082 sleepq_lock(m); 1083 x = vm_page_busy_fetch(m); 1084 do { 1085 /* 1086 * If the page changes objects or becomes unlocked we can 1087 * simply return. 1088 */ 1089 if (x == VPB_UNBUSIED || 1090 (xsleep && (x & VPB_BIT_SHARED) != 0) || 1091 m->object != obj || m->pindex != pindex) { 1092 sleepq_release(m); 1093 return (false); 1094 } 1095 if ((x & VPB_BIT_WAITERS) != 0) 1096 break; 1097 } while (!atomic_fcmpset_int(&m->busy_lock, &x, x | VPB_BIT_WAITERS)); 1098 if (locked) 1099 VM_OBJECT_DROP(obj); 1100 DROP_GIANT(); 1101 sleepq_add(m, NULL, wmesg, 0, 0); 1102 sleepq_wait(m, PVM); 1103 PICKUP_GIANT(); 1104 return (true); 1105 } 1106 1107 /* 1108 * vm_page_trysbusy: 1109 * 1110 * Try to shared busy a page. 1111 * If the operation succeeds 1 is returned otherwise 0. 1112 * The operation never sleeps. 1113 */ 1114 int 1115 vm_page_trysbusy(vm_page_t m) 1116 { 1117 vm_object_t obj; 1118 u_int x; 1119 1120 obj = m->object; 1121 x = vm_page_busy_fetch(m); 1122 for (;;) { 1123 if ((x & VPB_BIT_SHARED) == 0) 1124 return (0); 1125 /* 1126 * Reduce the window for transient busies that will trigger 1127 * false negatives in vm_page_ps_test(). 1128 */ 1129 if (obj != NULL && vm_object_busied(obj)) 1130 return (0); 1131 if (atomic_fcmpset_acq_int(&m->busy_lock, &x, 1132 x + VPB_ONE_SHARER)) 1133 break; 1134 } 1135 1136 /* Refetch the object now that we're guaranteed that it is stable. */ 1137 obj = m->object; 1138 if (obj != NULL && vm_object_busied(obj)) { 1139 vm_page_sunbusy(m); 1140 return (0); 1141 } 1142 return (1); 1143 } 1144 1145 /* 1146 * vm_page_tryxbusy: 1147 * 1148 * Try to exclusive busy a page. 1149 * If the operation succeeds 1 is returned otherwise 0. 1150 * The operation never sleeps. 1151 */ 1152 int 1153 vm_page_tryxbusy(vm_page_t m) 1154 { 1155 vm_object_t obj; 1156 1157 if (atomic_cmpset_acq_int(&m->busy_lock, VPB_UNBUSIED, 1158 VPB_CURTHREAD_EXCLUSIVE) == 0) 1159 return (0); 1160 1161 obj = m->object; 1162 if (obj != NULL && vm_object_busied(obj)) { 1163 vm_page_xunbusy(m); 1164 return (0); 1165 } 1166 return (1); 1167 } 1168 1169 static void 1170 vm_page_xunbusy_hard_tail(vm_page_t m) 1171 { 1172 atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED); 1173 /* Wake the waiter. */ 1174 wakeup(m); 1175 } 1176 1177 /* 1178 * vm_page_xunbusy_hard: 1179 * 1180 * Called when unbusy has failed because there is a waiter. 1181 */ 1182 void 1183 vm_page_xunbusy_hard(vm_page_t m) 1184 { 1185 vm_page_assert_xbusied(m); 1186 vm_page_xunbusy_hard_tail(m); 1187 } 1188 1189 void 1190 vm_page_xunbusy_hard_unchecked(vm_page_t m) 1191 { 1192 vm_page_assert_xbusied_unchecked(m); 1193 vm_page_xunbusy_hard_tail(m); 1194 } 1195 1196 static void 1197 vm_page_busy_free(vm_page_t m) 1198 { 1199 u_int x; 1200 1201 atomic_thread_fence_rel(); 1202 x = atomic_swap_int(&m->busy_lock, VPB_FREED); 1203 if ((x & VPB_BIT_WAITERS) != 0) 1204 wakeup(m); 1205 } 1206 1207 /* 1208 * vm_page_unhold_pages: 1209 * 1210 * Unhold each of the pages that is referenced by the given array. 1211 */ 1212 void 1213 vm_page_unhold_pages(vm_page_t *ma, int count) 1214 { 1215 1216 for (; count != 0; count--) { 1217 vm_page_unwire(*ma, PQ_ACTIVE); 1218 ma++; 1219 } 1220 } 1221 1222 vm_page_t 1223 PHYS_TO_VM_PAGE(vm_paddr_t pa) 1224 { 1225 vm_page_t m; 1226 1227 #ifdef VM_PHYSSEG_SPARSE 1228 m = vm_phys_paddr_to_vm_page(pa); 1229 if (m == NULL) 1230 m = vm_phys_fictitious_to_vm_page(pa); 1231 return (m); 1232 #elif defined(VM_PHYSSEG_DENSE) 1233 long pi; 1234 1235 pi = atop(pa); 1236 if (pi >= first_page && (pi - first_page) < vm_page_array_size) { 1237 m = &vm_page_array[pi - first_page]; 1238 return (m); 1239 } 1240 return (vm_phys_fictitious_to_vm_page(pa)); 1241 #else 1242 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." 1243 #endif 1244 } 1245 1246 /* 1247 * vm_page_getfake: 1248 * 1249 * Create a fictitious page with the specified physical address and 1250 * memory attribute. The memory attribute is the only the machine- 1251 * dependent aspect of a fictitious page that must be initialized. 1252 */ 1253 vm_page_t 1254 vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr) 1255 { 1256 vm_page_t m; 1257 1258 m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO); 1259 vm_page_initfake(m, paddr, memattr); 1260 return (m); 1261 } 1262 1263 void 1264 vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) 1265 { 1266 1267 if ((m->flags & PG_FICTITIOUS) != 0) { 1268 /* 1269 * The page's memattr might have changed since the 1270 * previous initialization. Update the pmap to the 1271 * new memattr. 1272 */ 1273 goto memattr; 1274 } 1275 m->phys_addr = paddr; 1276 m->a.queue = PQ_NONE; 1277 /* Fictitious pages don't use "segind". */ 1278 m->flags = PG_FICTITIOUS; 1279 /* Fictitious pages don't use "order" or "pool". */ 1280 m->oflags = VPO_UNMANAGED; 1281 m->busy_lock = VPB_CURTHREAD_EXCLUSIVE; 1282 /* Fictitious pages are unevictable. */ 1283 m->ref_count = 1; 1284 pmap_page_init(m); 1285 memattr: 1286 pmap_page_set_memattr(m, memattr); 1287 } 1288 1289 /* 1290 * vm_page_putfake: 1291 * 1292 * Release a fictitious page. 1293 */ 1294 void 1295 vm_page_putfake(vm_page_t m) 1296 { 1297 1298 KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m)); 1299 KASSERT((m->flags & PG_FICTITIOUS) != 0, 1300 ("vm_page_putfake: bad page %p", m)); 1301 vm_page_assert_xbusied(m); 1302 vm_page_busy_free(m); 1303 uma_zfree(fakepg_zone, m); 1304 } 1305 1306 /* 1307 * vm_page_updatefake: 1308 * 1309 * Update the given fictitious page to the specified physical address and 1310 * memory attribute. 1311 */ 1312 void 1313 vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) 1314 { 1315 1316 KASSERT((m->flags & PG_FICTITIOUS) != 0, 1317 ("vm_page_updatefake: bad page %p", m)); 1318 m->phys_addr = paddr; 1319 pmap_page_set_memattr(m, memattr); 1320 } 1321 1322 /* 1323 * vm_page_free: 1324 * 1325 * Free a page. 1326 */ 1327 void 1328 vm_page_free(vm_page_t m) 1329 { 1330 1331 m->flags &= ~PG_ZERO; 1332 vm_page_free_toq(m); 1333 } 1334 1335 /* 1336 * vm_page_free_zero: 1337 * 1338 * Free a page to the zerod-pages queue 1339 */ 1340 void 1341 vm_page_free_zero(vm_page_t m) 1342 { 1343 1344 m->flags |= PG_ZERO; 1345 vm_page_free_toq(m); 1346 } 1347 1348 /* 1349 * Unbusy and handle the page queueing for a page from a getpages request that 1350 * was optionally read ahead or behind. 1351 */ 1352 void 1353 vm_page_readahead_finish(vm_page_t m) 1354 { 1355 1356 /* We shouldn't put invalid pages on queues. */ 1357 KASSERT(!vm_page_none_valid(m), ("%s: %p is invalid", __func__, m)); 1358 1359 /* 1360 * Since the page is not the actually needed one, whether it should 1361 * be activated or deactivated is not obvious. Empirical results 1362 * have shown that deactivating the page is usually the best choice, 1363 * unless the page is wanted by another thread. 1364 */ 1365 if ((vm_page_busy_fetch(m) & VPB_BIT_WAITERS) != 0) 1366 vm_page_activate(m); 1367 else 1368 vm_page_deactivate(m); 1369 vm_page_xunbusy_unchecked(m); 1370 } 1371 1372 /* 1373 * Destroy the identity of an invalid page and free it if possible. 1374 * This is intended to be used when reading a page from backing store fails. 1375 */ 1376 void 1377 vm_page_free_invalid(vm_page_t m) 1378 { 1379 1380 KASSERT(vm_page_none_valid(m), ("page %p is valid", m)); 1381 KASSERT(!pmap_page_is_mapped(m), ("page %p is mapped", m)); 1382 KASSERT(m->object != NULL, ("page %p has no object", m)); 1383 VM_OBJECT_ASSERT_WLOCKED(m->object); 1384 1385 /* 1386 * We may be attempting to free the page as part of the handling for an 1387 * I/O error, in which case the page was xbusied by a different thread. 1388 */ 1389 vm_page_xbusy_claim(m); 1390 1391 /* 1392 * If someone has wired this page while the object lock 1393 * was not held, then the thread that unwires is responsible 1394 * for freeing the page. Otherwise just free the page now. 1395 * The wire count of this unmapped page cannot change while 1396 * we have the page xbusy and the page's object wlocked. 1397 */ 1398 if (vm_page_remove(m)) 1399 vm_page_free(m); 1400 } 1401 1402 /* 1403 * vm_page_sleep_if_busy: 1404 * 1405 * Sleep and release the object lock if the page is busied. 1406 * Returns TRUE if the thread slept. 1407 * 1408 * The given page must be unlocked and object containing it must 1409 * be locked. 1410 */ 1411 int 1412 vm_page_sleep_if_busy(vm_page_t m, const char *wmesg) 1413 { 1414 vm_object_t obj; 1415 1416 vm_page_lock_assert(m, MA_NOTOWNED); 1417 VM_OBJECT_ASSERT_WLOCKED(m->object); 1418 1419 /* 1420 * The page-specific object must be cached because page 1421 * identity can change during the sleep, causing the 1422 * re-lock of a different object. 1423 * It is assumed that a reference to the object is already 1424 * held by the callers. 1425 */ 1426 obj = m->object; 1427 if (_vm_page_busy_sleep(obj, m, m->pindex, wmesg, 0, true)) { 1428 VM_OBJECT_WLOCK(obj); 1429 return (TRUE); 1430 } 1431 return (FALSE); 1432 } 1433 1434 /* 1435 * vm_page_sleep_if_xbusy: 1436 * 1437 * Sleep and release the object lock if the page is xbusied. 1438 * Returns TRUE if the thread slept. 1439 * 1440 * The given page must be unlocked and object containing it must 1441 * be locked. 1442 */ 1443 int 1444 vm_page_sleep_if_xbusy(vm_page_t m, const char *wmesg) 1445 { 1446 vm_object_t obj; 1447 1448 vm_page_lock_assert(m, MA_NOTOWNED); 1449 VM_OBJECT_ASSERT_WLOCKED(m->object); 1450 1451 /* 1452 * The page-specific object must be cached because page 1453 * identity can change during the sleep, causing the 1454 * re-lock of a different object. 1455 * It is assumed that a reference to the object is already 1456 * held by the callers. 1457 */ 1458 obj = m->object; 1459 if (_vm_page_busy_sleep(obj, m, m->pindex, wmesg, VM_ALLOC_SBUSY, 1460 true)) { 1461 VM_OBJECT_WLOCK(obj); 1462 return (TRUE); 1463 } 1464 return (FALSE); 1465 } 1466 1467 /* 1468 * vm_page_dirty_KBI: [ internal use only ] 1469 * 1470 * Set all bits in the page's dirty field. 1471 * 1472 * The object containing the specified page must be locked if the 1473 * call is made from the machine-independent layer. 1474 * 1475 * See vm_page_clear_dirty_mask(). 1476 * 1477 * This function should only be called by vm_page_dirty(). 1478 */ 1479 void 1480 vm_page_dirty_KBI(vm_page_t m) 1481 { 1482 1483 /* Refer to this operation by its public name. */ 1484 KASSERT(vm_page_all_valid(m), ("vm_page_dirty: page is invalid!")); 1485 m->dirty = VM_PAGE_BITS_ALL; 1486 } 1487 1488 /* 1489 * vm_page_insert: [ internal use only ] 1490 * 1491 * Inserts the given mem entry into the object and object list. 1492 * 1493 * The object must be locked. 1494 */ 1495 int 1496 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) 1497 { 1498 vm_page_t mpred; 1499 1500 VM_OBJECT_ASSERT_WLOCKED(object); 1501 mpred = vm_radix_lookup_le(&object->rtree, pindex); 1502 return (vm_page_insert_after(m, object, pindex, mpred)); 1503 } 1504 1505 /* 1506 * vm_page_insert_after: 1507 * 1508 * Inserts the page "m" into the specified object at offset "pindex". 1509 * 1510 * The page "mpred" must immediately precede the offset "pindex" within 1511 * the specified object. 1512 * 1513 * The object must be locked. 1514 */ 1515 static int 1516 vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex, 1517 vm_page_t mpred) 1518 { 1519 vm_page_t msucc; 1520 1521 VM_OBJECT_ASSERT_WLOCKED(object); 1522 KASSERT(m->object == NULL, 1523 ("vm_page_insert_after: page already inserted")); 1524 if (mpred != NULL) { 1525 KASSERT(mpred->object == object, 1526 ("vm_page_insert_after: object doesn't contain mpred")); 1527 KASSERT(mpred->pindex < pindex, 1528 ("vm_page_insert_after: mpred doesn't precede pindex")); 1529 msucc = TAILQ_NEXT(mpred, listq); 1530 } else 1531 msucc = TAILQ_FIRST(&object->memq); 1532 if (msucc != NULL) 1533 KASSERT(msucc->pindex > pindex, 1534 ("vm_page_insert_after: msucc doesn't succeed pindex")); 1535 1536 /* 1537 * Record the object/offset pair in this page. 1538 */ 1539 m->object = object; 1540 m->pindex = pindex; 1541 m->ref_count |= VPRC_OBJREF; 1542 1543 /* 1544 * Now link into the object's ordered list of backed pages. 1545 */ 1546 if (vm_radix_insert(&object->rtree, m)) { 1547 m->object = NULL; 1548 m->pindex = 0; 1549 m->ref_count &= ~VPRC_OBJREF; 1550 return (1); 1551 } 1552 vm_page_insert_radixdone(m, object, mpred); 1553 return (0); 1554 } 1555 1556 /* 1557 * vm_page_insert_radixdone: 1558 * 1559 * Complete page "m" insertion into the specified object after the 1560 * radix trie hooking. 1561 * 1562 * The page "mpred" must precede the offset "m->pindex" within the 1563 * specified object. 1564 * 1565 * The object must be locked. 1566 */ 1567 static void 1568 vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred) 1569 { 1570 1571 VM_OBJECT_ASSERT_WLOCKED(object); 1572 KASSERT(object != NULL && m->object == object, 1573 ("vm_page_insert_radixdone: page %p has inconsistent object", m)); 1574 KASSERT((m->ref_count & VPRC_OBJREF) != 0, 1575 ("vm_page_insert_radixdone: page %p is missing object ref", m)); 1576 if (mpred != NULL) { 1577 KASSERT(mpred->object == object, 1578 ("vm_page_insert_radixdone: object doesn't contain mpred")); 1579 KASSERT(mpred->pindex < m->pindex, 1580 ("vm_page_insert_radixdone: mpred doesn't precede pindex")); 1581 } 1582 1583 if (mpred != NULL) 1584 TAILQ_INSERT_AFTER(&object->memq, mpred, m, listq); 1585 else 1586 TAILQ_INSERT_HEAD(&object->memq, m, listq); 1587 1588 /* 1589 * Show that the object has one more resident page. 1590 */ 1591 object->resident_page_count++; 1592 1593 /* 1594 * Hold the vnode until the last page is released. 1595 */ 1596 if (object->resident_page_count == 1 && object->type == OBJT_VNODE) 1597 vhold(object->handle); 1598 1599 /* 1600 * Since we are inserting a new and possibly dirty page, 1601 * update the object's generation count. 1602 */ 1603 if (pmap_page_is_write_mapped(m)) 1604 vm_object_set_writeable_dirty(object); 1605 } 1606 1607 /* 1608 * Do the work to remove a page from its object. The caller is responsible for 1609 * updating the page's fields to reflect this removal. 1610 */ 1611 static void 1612 vm_page_object_remove(vm_page_t m) 1613 { 1614 vm_object_t object; 1615 vm_page_t mrem; 1616 1617 vm_page_assert_xbusied(m); 1618 object = m->object; 1619 VM_OBJECT_ASSERT_WLOCKED(object); 1620 KASSERT((m->ref_count & VPRC_OBJREF) != 0, 1621 ("page %p is missing its object ref", m)); 1622 1623 /* Deferred free of swap space. */ 1624 if ((m->a.flags & PGA_SWAP_FREE) != 0) 1625 vm_pager_page_unswapped(m); 1626 1627 m->object = NULL; 1628 mrem = vm_radix_remove(&object->rtree, m->pindex); 1629 KASSERT(mrem == m, ("removed page %p, expected page %p", mrem, m)); 1630 1631 /* 1632 * Now remove from the object's list of backed pages. 1633 */ 1634 TAILQ_REMOVE(&object->memq, m, listq); 1635 1636 /* 1637 * And show that the object has one fewer resident page. 1638 */ 1639 object->resident_page_count--; 1640 1641 /* 1642 * The vnode may now be recycled. 1643 */ 1644 if (object->resident_page_count == 0 && object->type == OBJT_VNODE) 1645 vdrop(object->handle); 1646 } 1647 1648 /* 1649 * vm_page_remove: 1650 * 1651 * Removes the specified page from its containing object, but does not 1652 * invalidate any backing storage. Returns true if the object's reference 1653 * was the last reference to the page, and false otherwise. 1654 * 1655 * The object must be locked and the page must be exclusively busied. 1656 * The exclusive busy will be released on return. If this is not the 1657 * final ref and the caller does not hold a wire reference it may not 1658 * continue to access the page. 1659 */ 1660 bool 1661 vm_page_remove(vm_page_t m) 1662 { 1663 bool dropped; 1664 1665 dropped = vm_page_remove_xbusy(m); 1666 vm_page_xunbusy(m); 1667 1668 return (dropped); 1669 } 1670 1671 /* 1672 * vm_page_remove_xbusy 1673 * 1674 * Removes the page but leaves the xbusy held. Returns true if this 1675 * removed the final ref and false otherwise. 1676 */ 1677 bool 1678 vm_page_remove_xbusy(vm_page_t m) 1679 { 1680 1681 vm_page_object_remove(m); 1682 return (vm_page_drop(m, VPRC_OBJREF) == VPRC_OBJREF); 1683 } 1684 1685 /* 1686 * vm_page_lookup: 1687 * 1688 * Returns the page associated with the object/offset 1689 * pair specified; if none is found, NULL is returned. 1690 * 1691 * The object must be locked. 1692 */ 1693 vm_page_t 1694 vm_page_lookup(vm_object_t object, vm_pindex_t pindex) 1695 { 1696 1697 VM_OBJECT_ASSERT_LOCKED(object); 1698 return (vm_radix_lookup(&object->rtree, pindex)); 1699 } 1700 1701 /* 1702 * vm_page_lookup_unlocked: 1703 * 1704 * Returns the page associated with the object/offset pair specified; 1705 * if none is found, NULL is returned. The page may be no longer be 1706 * present in the object at the time that this function returns. Only 1707 * useful for opportunistic checks such as inmem(). 1708 */ 1709 vm_page_t 1710 vm_page_lookup_unlocked(vm_object_t object, vm_pindex_t pindex) 1711 { 1712 1713 return (vm_radix_lookup_unlocked(&object->rtree, pindex)); 1714 } 1715 1716 /* 1717 * vm_page_relookup: 1718 * 1719 * Returns a page that must already have been busied by 1720 * the caller. Used for bogus page replacement. 1721 */ 1722 vm_page_t 1723 vm_page_relookup(vm_object_t object, vm_pindex_t pindex) 1724 { 1725 vm_page_t m; 1726 1727 m = vm_radix_lookup_unlocked(&object->rtree, pindex); 1728 KASSERT(m != NULL && (vm_page_busied(m) || vm_page_wired(m)) && 1729 m->object == object && m->pindex == pindex, 1730 ("vm_page_relookup: Invalid page %p", m)); 1731 return (m); 1732 } 1733 1734 /* 1735 * This should only be used by lockless functions for releasing transient 1736 * incorrect acquires. The page may have been freed after we acquired a 1737 * busy lock. In this case busy_lock == VPB_FREED and we have nothing 1738 * further to do. 1739 */ 1740 static void 1741 vm_page_busy_release(vm_page_t m) 1742 { 1743 u_int x; 1744 1745 x = vm_page_busy_fetch(m); 1746 for (;;) { 1747 if (x == VPB_FREED) 1748 break; 1749 if ((x & VPB_BIT_SHARED) != 0 && VPB_SHARERS(x) > 1) { 1750 if (atomic_fcmpset_int(&m->busy_lock, &x, 1751 x - VPB_ONE_SHARER)) 1752 break; 1753 continue; 1754 } 1755 KASSERT((x & VPB_BIT_SHARED) != 0 || 1756 (x & ~VPB_BIT_WAITERS) == VPB_CURTHREAD_EXCLUSIVE, 1757 ("vm_page_busy_release: %p xbusy not owned.", m)); 1758 if (!atomic_fcmpset_rel_int(&m->busy_lock, &x, VPB_UNBUSIED)) 1759 continue; 1760 if ((x & VPB_BIT_WAITERS) != 0) 1761 wakeup(m); 1762 break; 1763 } 1764 } 1765 1766 /* 1767 * vm_page_find_least: 1768 * 1769 * Returns the page associated with the object with least pindex 1770 * greater than or equal to the parameter pindex, or NULL. 1771 * 1772 * The object must be locked. 1773 */ 1774 vm_page_t 1775 vm_page_find_least(vm_object_t object, vm_pindex_t pindex) 1776 { 1777 vm_page_t m; 1778 1779 VM_OBJECT_ASSERT_LOCKED(object); 1780 if ((m = TAILQ_FIRST(&object->memq)) != NULL && m->pindex < pindex) 1781 m = vm_radix_lookup_ge(&object->rtree, pindex); 1782 return (m); 1783 } 1784 1785 /* 1786 * Returns the given page's successor (by pindex) within the object if it is 1787 * resident; if none is found, NULL is returned. 1788 * 1789 * The object must be locked. 1790 */ 1791 vm_page_t 1792 vm_page_next(vm_page_t m) 1793 { 1794 vm_page_t next; 1795 1796 VM_OBJECT_ASSERT_LOCKED(m->object); 1797 if ((next = TAILQ_NEXT(m, listq)) != NULL) { 1798 MPASS(next->object == m->object); 1799 if (next->pindex != m->pindex + 1) 1800 next = NULL; 1801 } 1802 return (next); 1803 } 1804 1805 /* 1806 * Returns the given page's predecessor (by pindex) within the object if it is 1807 * resident; if none is found, NULL is returned. 1808 * 1809 * The object must be locked. 1810 */ 1811 vm_page_t 1812 vm_page_prev(vm_page_t m) 1813 { 1814 vm_page_t prev; 1815 1816 VM_OBJECT_ASSERT_LOCKED(m->object); 1817 if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL) { 1818 MPASS(prev->object == m->object); 1819 if (prev->pindex != m->pindex - 1) 1820 prev = NULL; 1821 } 1822 return (prev); 1823 } 1824 1825 /* 1826 * Uses the page mnew as a replacement for an existing page at index 1827 * pindex which must be already present in the object. 1828 * 1829 * Both pages must be exclusively busied on enter. The old page is 1830 * unbusied on exit. 1831 * 1832 * A return value of true means mold is now free. If this is not the 1833 * final ref and the caller does not hold a wire reference it may not 1834 * continue to access the page. 1835 */ 1836 static bool 1837 vm_page_replace_hold(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex, 1838 vm_page_t mold) 1839 { 1840 vm_page_t mret; 1841 bool dropped; 1842 1843 VM_OBJECT_ASSERT_WLOCKED(object); 1844 vm_page_assert_xbusied(mold); 1845 KASSERT(mnew->object == NULL && (mnew->ref_count & VPRC_OBJREF) == 0, 1846 ("vm_page_replace: page %p already in object", mnew)); 1847 1848 /* 1849 * This function mostly follows vm_page_insert() and 1850 * vm_page_remove() without the radix, object count and vnode 1851 * dance. Double check such functions for more comments. 1852 */ 1853 1854 mnew->object = object; 1855 mnew->pindex = pindex; 1856 atomic_set_int(&mnew->ref_count, VPRC_OBJREF); 1857 mret = vm_radix_replace(&object->rtree, mnew); 1858 KASSERT(mret == mold, 1859 ("invalid page replacement, mold=%p, mret=%p", mold, mret)); 1860 KASSERT((mold->oflags & VPO_UNMANAGED) == 1861 (mnew->oflags & VPO_UNMANAGED), 1862 ("vm_page_replace: mismatched VPO_UNMANAGED")); 1863 1864 /* Keep the resident page list in sorted order. */ 1865 TAILQ_INSERT_AFTER(&object->memq, mold, mnew, listq); 1866 TAILQ_REMOVE(&object->memq, mold, listq); 1867 mold->object = NULL; 1868 1869 /* 1870 * The object's resident_page_count does not change because we have 1871 * swapped one page for another, but the generation count should 1872 * change if the page is dirty. 1873 */ 1874 if (pmap_page_is_write_mapped(mnew)) 1875 vm_object_set_writeable_dirty(object); 1876 dropped = vm_page_drop(mold, VPRC_OBJREF) == VPRC_OBJREF; 1877 vm_page_xunbusy(mold); 1878 1879 return (dropped); 1880 } 1881 1882 void 1883 vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex, 1884 vm_page_t mold) 1885 { 1886 1887 vm_page_assert_xbusied(mnew); 1888 1889 if (vm_page_replace_hold(mnew, object, pindex, mold)) 1890 vm_page_free(mold); 1891 } 1892 1893 /* 1894 * vm_page_rename: 1895 * 1896 * Move the given memory entry from its 1897 * current object to the specified target object/offset. 1898 * 1899 * Note: swap associated with the page must be invalidated by the move. We 1900 * have to do this for several reasons: (1) we aren't freeing the 1901 * page, (2) we are dirtying the page, (3) the VM system is probably 1902 * moving the page from object A to B, and will then later move 1903 * the backing store from A to B and we can't have a conflict. 1904 * 1905 * Note: we *always* dirty the page. It is necessary both for the 1906 * fact that we moved it, and because we may be invalidating 1907 * swap. 1908 * 1909 * The objects must be locked. 1910 */ 1911 int 1912 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) 1913 { 1914 vm_page_t mpred; 1915 vm_pindex_t opidx; 1916 1917 VM_OBJECT_ASSERT_WLOCKED(new_object); 1918 1919 KASSERT(m->ref_count != 0, ("vm_page_rename: page %p has no refs", m)); 1920 mpred = vm_radix_lookup_le(&new_object->rtree, new_pindex); 1921 KASSERT(mpred == NULL || mpred->pindex != new_pindex, 1922 ("vm_page_rename: pindex already renamed")); 1923 1924 /* 1925 * Create a custom version of vm_page_insert() which does not depend 1926 * by m_prev and can cheat on the implementation aspects of the 1927 * function. 1928 */ 1929 opidx = m->pindex; 1930 m->pindex = new_pindex; 1931 if (vm_radix_insert(&new_object->rtree, m)) { 1932 m->pindex = opidx; 1933 return (1); 1934 } 1935 1936 /* 1937 * The operation cannot fail anymore. The removal must happen before 1938 * the listq iterator is tainted. 1939 */ 1940 m->pindex = opidx; 1941 vm_page_object_remove(m); 1942 1943 /* Return back to the new pindex to complete vm_page_insert(). */ 1944 m->pindex = new_pindex; 1945 m->object = new_object; 1946 1947 vm_page_insert_radixdone(m, new_object, mpred); 1948 vm_page_dirty(m); 1949 return (0); 1950 } 1951 1952 /* 1953 * vm_page_alloc: 1954 * 1955 * Allocate and return a page that is associated with the specified 1956 * object and offset pair. By default, this page is exclusive busied. 1957 * 1958 * The caller must always specify an allocation class. 1959 * 1960 * allocation classes: 1961 * VM_ALLOC_NORMAL normal process request 1962 * VM_ALLOC_SYSTEM system *really* needs a page 1963 * VM_ALLOC_INTERRUPT interrupt time request 1964 * 1965 * optional allocation flags: 1966 * VM_ALLOC_COUNT(number) the number of additional pages that the caller 1967 * intends to allocate 1968 * VM_ALLOC_NOBUSY do not exclusive busy the page 1969 * VM_ALLOC_NODUMP do not include the page in a kernel core dump 1970 * VM_ALLOC_NOOBJ page is not associated with an object and 1971 * should not be exclusive busy 1972 * VM_ALLOC_SBUSY shared busy the allocated page 1973 * VM_ALLOC_WIRED wire the allocated page 1974 * VM_ALLOC_ZERO prefer a zeroed page 1975 */ 1976 vm_page_t 1977 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) 1978 { 1979 1980 return (vm_page_alloc_after(object, pindex, req, object != NULL ? 1981 vm_radix_lookup_le(&object->rtree, pindex) : NULL)); 1982 } 1983 1984 vm_page_t 1985 vm_page_alloc_domain(vm_object_t object, vm_pindex_t pindex, int domain, 1986 int req) 1987 { 1988 1989 return (vm_page_alloc_domain_after(object, pindex, domain, req, 1990 object != NULL ? vm_radix_lookup_le(&object->rtree, pindex) : 1991 NULL)); 1992 } 1993 1994 /* 1995 * Allocate a page in the specified object with the given page index. To 1996 * optimize insertion of the page into the object, the caller must also specifiy 1997 * the resident page in the object with largest index smaller than the given 1998 * page index, or NULL if no such page exists. 1999 */ 2000 vm_page_t 2001 vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex, 2002 int req, vm_page_t mpred) 2003 { 2004 struct vm_domainset_iter di; 2005 vm_page_t m; 2006 int domain; 2007 2008 vm_domainset_iter_page_init(&di, object, pindex, &domain, &req); 2009 do { 2010 m = vm_page_alloc_domain_after(object, pindex, domain, req, 2011 mpred); 2012 if (m != NULL) 2013 break; 2014 } while (vm_domainset_iter_page(&di, object, &domain) == 0); 2015 2016 return (m); 2017 } 2018 2019 /* 2020 * Returns true if the number of free pages exceeds the minimum 2021 * for the request class and false otherwise. 2022 */ 2023 static int 2024 _vm_domain_allocate(struct vm_domain *vmd, int req_class, int npages) 2025 { 2026 u_int limit, old, new; 2027 2028 if (req_class == VM_ALLOC_INTERRUPT) 2029 limit = 0; 2030 else if (req_class == VM_ALLOC_SYSTEM) 2031 limit = vmd->vmd_interrupt_free_min; 2032 else 2033 limit = vmd->vmd_free_reserved; 2034 2035 /* 2036 * Attempt to reserve the pages. Fail if we're below the limit. 2037 */ 2038 limit += npages; 2039 old = vmd->vmd_free_count; 2040 do { 2041 if (old < limit) 2042 return (0); 2043 new = old - npages; 2044 } while (atomic_fcmpset_int(&vmd->vmd_free_count, &old, new) == 0); 2045 2046 /* Wake the page daemon if we've crossed the threshold. */ 2047 if (vm_paging_needed(vmd, new) && !vm_paging_needed(vmd, old)) 2048 pagedaemon_wakeup(vmd->vmd_domain); 2049 2050 /* Only update bitsets on transitions. */ 2051 if ((old >= vmd->vmd_free_min && new < vmd->vmd_free_min) || 2052 (old >= vmd->vmd_free_severe && new < vmd->vmd_free_severe)) 2053 vm_domain_set(vmd); 2054 2055 return (1); 2056 } 2057 2058 int 2059 vm_domain_allocate(struct vm_domain *vmd, int req, int npages) 2060 { 2061 int req_class; 2062 2063 /* 2064 * The page daemon is allowed to dig deeper into the free page list. 2065 */ 2066 req_class = req & VM_ALLOC_CLASS_MASK; 2067 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) 2068 req_class = VM_ALLOC_SYSTEM; 2069 return (_vm_domain_allocate(vmd, req_class, npages)); 2070 } 2071 2072 vm_page_t 2073 vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain, 2074 int req, vm_page_t mpred) 2075 { 2076 struct vm_domain *vmd; 2077 vm_page_t m; 2078 int flags, pool; 2079 2080 KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) && 2081 (object != NULL || (req & VM_ALLOC_SBUSY) == 0) && 2082 ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != 2083 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), 2084 ("inconsistent object(%p)/req(%x)", object, req)); 2085 KASSERT(object == NULL || (req & VM_ALLOC_WAITOK) == 0, 2086 ("Can't sleep and retry object insertion.")); 2087 KASSERT(mpred == NULL || mpred->pindex < pindex, 2088 ("mpred %p doesn't precede pindex 0x%jx", mpred, 2089 (uintmax_t)pindex)); 2090 if (object != NULL) 2091 VM_OBJECT_ASSERT_WLOCKED(object); 2092 2093 flags = 0; 2094 m = NULL; 2095 pool = object != NULL ? VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT; 2096 again: 2097 #if VM_NRESERVLEVEL > 0 2098 /* 2099 * Can we allocate the page from a reservation? 2100 */ 2101 if (vm_object_reserv(object) && 2102 (m = vm_reserv_alloc_page(object, pindex, domain, req, mpred)) != 2103 NULL) { 2104 goto found; 2105 } 2106 #endif 2107 vmd = VM_DOMAIN(domain); 2108 if (vmd->vmd_pgcache[pool].zone != NULL) { 2109 m = uma_zalloc(vmd->vmd_pgcache[pool].zone, M_NOWAIT | M_NOVM); 2110 if (m != NULL) { 2111 flags |= PG_PCPU_CACHE; 2112 goto found; 2113 } 2114 } 2115 if (vm_domain_allocate(vmd, req, 1)) { 2116 /* 2117 * If not, allocate it from the free page queues. 2118 */ 2119 vm_domain_free_lock(vmd); 2120 m = vm_phys_alloc_pages(domain, pool, 0); 2121 vm_domain_free_unlock(vmd); 2122 if (m == NULL) { 2123 vm_domain_freecnt_inc(vmd, 1); 2124 #if VM_NRESERVLEVEL > 0 2125 if (vm_reserv_reclaim_inactive(domain)) 2126 goto again; 2127 #endif 2128 } 2129 } 2130 if (m == NULL) { 2131 /* 2132 * Not allocatable, give up. 2133 */ 2134 if (vm_domain_alloc_fail(vmd, object, req)) 2135 goto again; 2136 return (NULL); 2137 } 2138 2139 /* 2140 * At this point we had better have found a good page. 2141 */ 2142 found: 2143 vm_page_dequeue(m); 2144 vm_page_alloc_check(m); 2145 2146 /* 2147 * Initialize the page. Only the PG_ZERO flag is inherited. 2148 */ 2149 if ((req & VM_ALLOC_ZERO) != 0) 2150 flags |= (m->flags & PG_ZERO); 2151 if ((req & VM_ALLOC_NODUMP) != 0) 2152 flags |= PG_NODUMP; 2153 m->flags = flags; 2154 m->a.flags = 0; 2155 m->oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ? 2156 VPO_UNMANAGED : 0; 2157 if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0) 2158 m->busy_lock = VPB_CURTHREAD_EXCLUSIVE; 2159 else if ((req & VM_ALLOC_SBUSY) != 0) 2160 m->busy_lock = VPB_SHARERS_WORD(1); 2161 else 2162 m->busy_lock = VPB_UNBUSIED; 2163 if (req & VM_ALLOC_WIRED) { 2164 vm_wire_add(1); 2165 m->ref_count = 1; 2166 } 2167 m->a.act_count = 0; 2168 2169 if (object != NULL) { 2170 if (vm_page_insert_after(m, object, pindex, mpred)) { 2171 if (req & VM_ALLOC_WIRED) { 2172 vm_wire_sub(1); 2173 m->ref_count = 0; 2174 } 2175 KASSERT(m->object == NULL, ("page %p has object", m)); 2176 m->oflags = VPO_UNMANAGED; 2177 m->busy_lock = VPB_UNBUSIED; 2178 /* Don't change PG_ZERO. */ 2179 vm_page_free_toq(m); 2180 if (req & VM_ALLOC_WAITFAIL) { 2181 VM_OBJECT_WUNLOCK(object); 2182 vm_radix_wait(); 2183 VM_OBJECT_WLOCK(object); 2184 } 2185 return (NULL); 2186 } 2187 2188 /* Ignore device objects; the pager sets "memattr" for them. */ 2189 if (object->memattr != VM_MEMATTR_DEFAULT && 2190 (object->flags & OBJ_FICTITIOUS) == 0) 2191 pmap_page_set_memattr(m, object->memattr); 2192 } else 2193 m->pindex = pindex; 2194 2195 return (m); 2196 } 2197 2198 /* 2199 * vm_page_alloc_contig: 2200 * 2201 * Allocate a contiguous set of physical pages of the given size "npages" 2202 * from the free lists. All of the physical pages must be at or above 2203 * the given physical address "low" and below the given physical address 2204 * "high". The given value "alignment" determines the alignment of the 2205 * first physical page in the set. If the given value "boundary" is 2206 * non-zero, then the set of physical pages cannot cross any physical 2207 * address boundary that is a multiple of that value. Both "alignment" 2208 * and "boundary" must be a power of two. 2209 * 2210 * If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT, 2211 * then the memory attribute setting for the physical pages is configured 2212 * to the object's memory attribute setting. Otherwise, the memory 2213 * attribute setting for the physical pages is configured to "memattr", 2214 * overriding the object's memory attribute setting. However, if the 2215 * object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the 2216 * memory attribute setting for the physical pages cannot be configured 2217 * to VM_MEMATTR_DEFAULT. 2218 * 2219 * The specified object may not contain fictitious pages. 2220 * 2221 * The caller must always specify an allocation class. 2222 * 2223 * allocation classes: 2224 * VM_ALLOC_NORMAL normal process request 2225 * VM_ALLOC_SYSTEM system *really* needs a page 2226 * VM_ALLOC_INTERRUPT interrupt time request 2227 * 2228 * optional allocation flags: 2229 * VM_ALLOC_NOBUSY do not exclusive busy the page 2230 * VM_ALLOC_NODUMP do not include the page in a kernel core dump 2231 * VM_ALLOC_NOOBJ page is not associated with an object and 2232 * should not be exclusive busy 2233 * VM_ALLOC_SBUSY shared busy the allocated page 2234 * VM_ALLOC_WIRED wire the allocated page 2235 * VM_ALLOC_ZERO prefer a zeroed page 2236 */ 2237 vm_page_t 2238 vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req, 2239 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 2240 vm_paddr_t boundary, vm_memattr_t memattr) 2241 { 2242 struct vm_domainset_iter di; 2243 vm_page_t m; 2244 int domain; 2245 2246 vm_domainset_iter_page_init(&di, object, pindex, &domain, &req); 2247 do { 2248 m = vm_page_alloc_contig_domain(object, pindex, domain, req, 2249 npages, low, high, alignment, boundary, memattr); 2250 if (m != NULL) 2251 break; 2252 } while (vm_domainset_iter_page(&di, object, &domain) == 0); 2253 2254 return (m); 2255 } 2256 2257 vm_page_t 2258 vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain, 2259 int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 2260 vm_paddr_t boundary, vm_memattr_t memattr) 2261 { 2262 struct vm_domain *vmd; 2263 vm_page_t m, m_ret, mpred; 2264 u_int busy_lock, flags, oflags; 2265 2266 mpred = NULL; /* XXX: pacify gcc */ 2267 KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) && 2268 (object != NULL || (req & VM_ALLOC_SBUSY) == 0) && 2269 ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != 2270 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), 2271 ("vm_page_alloc_contig: inconsistent object(%p)/req(%x)", object, 2272 req)); 2273 KASSERT(object == NULL || (req & VM_ALLOC_WAITOK) == 0, 2274 ("Can't sleep and retry object insertion.")); 2275 if (object != NULL) { 2276 VM_OBJECT_ASSERT_WLOCKED(object); 2277 KASSERT((object->flags & OBJ_FICTITIOUS) == 0, 2278 ("vm_page_alloc_contig: object %p has fictitious pages", 2279 object)); 2280 } 2281 KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero")); 2282 2283 if (object != NULL) { 2284 mpred = vm_radix_lookup_le(&object->rtree, pindex); 2285 KASSERT(mpred == NULL || mpred->pindex != pindex, 2286 ("vm_page_alloc_contig: pindex already allocated")); 2287 } 2288 2289 /* 2290 * Can we allocate the pages without the number of free pages falling 2291 * below the lower bound for the allocation class? 2292 */ 2293 m_ret = NULL; 2294 again: 2295 #if VM_NRESERVLEVEL > 0 2296 /* 2297 * Can we allocate the pages from a reservation? 2298 */ 2299 if (vm_object_reserv(object) && 2300 (m_ret = vm_reserv_alloc_contig(object, pindex, domain, req, 2301 mpred, npages, low, high, alignment, boundary)) != NULL) { 2302 goto found; 2303 } 2304 #endif 2305 vmd = VM_DOMAIN(domain); 2306 if (vm_domain_allocate(vmd, req, npages)) { 2307 /* 2308 * allocate them from the free page queues. 2309 */ 2310 vm_domain_free_lock(vmd); 2311 m_ret = vm_phys_alloc_contig(domain, npages, low, high, 2312 alignment, boundary); 2313 vm_domain_free_unlock(vmd); 2314 if (m_ret == NULL) { 2315 vm_domain_freecnt_inc(vmd, npages); 2316 #if VM_NRESERVLEVEL > 0 2317 if (vm_reserv_reclaim_contig(domain, npages, low, 2318 high, alignment, boundary)) 2319 goto again; 2320 #endif 2321 } 2322 } 2323 if (m_ret == NULL) { 2324 if (vm_domain_alloc_fail(vmd, object, req)) 2325 goto again; 2326 return (NULL); 2327 } 2328 #if VM_NRESERVLEVEL > 0 2329 found: 2330 #endif 2331 for (m = m_ret; m < &m_ret[npages]; m++) { 2332 vm_page_dequeue(m); 2333 vm_page_alloc_check(m); 2334 } 2335 2336 /* 2337 * Initialize the pages. Only the PG_ZERO flag is inherited. 2338 */ 2339 flags = 0; 2340 if ((req & VM_ALLOC_ZERO) != 0) 2341 flags = PG_ZERO; 2342 if ((req & VM_ALLOC_NODUMP) != 0) 2343 flags |= PG_NODUMP; 2344 oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ? 2345 VPO_UNMANAGED : 0; 2346 if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0) 2347 busy_lock = VPB_CURTHREAD_EXCLUSIVE; 2348 else if ((req & VM_ALLOC_SBUSY) != 0) 2349 busy_lock = VPB_SHARERS_WORD(1); 2350 else 2351 busy_lock = VPB_UNBUSIED; 2352 if ((req & VM_ALLOC_WIRED) != 0) 2353 vm_wire_add(npages); 2354 if (object != NULL) { 2355 if (object->memattr != VM_MEMATTR_DEFAULT && 2356 memattr == VM_MEMATTR_DEFAULT) 2357 memattr = object->memattr; 2358 } 2359 for (m = m_ret; m < &m_ret[npages]; m++) { 2360 m->a.flags = 0; 2361 m->flags = (m->flags | PG_NODUMP) & flags; 2362 m->busy_lock = busy_lock; 2363 if ((req & VM_ALLOC_WIRED) != 0) 2364 m->ref_count = 1; 2365 m->a.act_count = 0; 2366 m->oflags = oflags; 2367 if (object != NULL) { 2368 if (vm_page_insert_after(m, object, pindex, mpred)) { 2369 if ((req & VM_ALLOC_WIRED) != 0) 2370 vm_wire_sub(npages); 2371 KASSERT(m->object == NULL, 2372 ("page %p has object", m)); 2373 mpred = m; 2374 for (m = m_ret; m < &m_ret[npages]; m++) { 2375 if (m <= mpred && 2376 (req & VM_ALLOC_WIRED) != 0) 2377 m->ref_count = 0; 2378 m->oflags = VPO_UNMANAGED; 2379 m->busy_lock = VPB_UNBUSIED; 2380 /* Don't change PG_ZERO. */ 2381 vm_page_free_toq(m); 2382 } 2383 if (req & VM_ALLOC_WAITFAIL) { 2384 VM_OBJECT_WUNLOCK(object); 2385 vm_radix_wait(); 2386 VM_OBJECT_WLOCK(object); 2387 } 2388 return (NULL); 2389 } 2390 mpred = m; 2391 } else 2392 m->pindex = pindex; 2393 if (memattr != VM_MEMATTR_DEFAULT) 2394 pmap_page_set_memattr(m, memattr); 2395 pindex++; 2396 } 2397 return (m_ret); 2398 } 2399 2400 /* 2401 * Check a page that has been freshly dequeued from a freelist. 2402 */ 2403 static void 2404 vm_page_alloc_check(vm_page_t m) 2405 { 2406 2407 KASSERT(m->object == NULL, ("page %p has object", m)); 2408 KASSERT(m->a.queue == PQ_NONE && 2409 (m->a.flags & PGA_QUEUE_STATE_MASK) == 0, 2410 ("page %p has unexpected queue %d, flags %#x", 2411 m, m->a.queue, (m->a.flags & PGA_QUEUE_STATE_MASK))); 2412 KASSERT(m->ref_count == 0, ("page %p has references", m)); 2413 KASSERT(vm_page_busy_freed(m), ("page %p is not freed", m)); 2414 KASSERT(m->dirty == 0, ("page %p is dirty", m)); 2415 KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, 2416 ("page %p has unexpected memattr %d", 2417 m, pmap_page_get_memattr(m))); 2418 KASSERT(m->valid == 0, ("free page %p is valid", m)); 2419 pmap_vm_page_alloc_check(m); 2420 } 2421 2422 /* 2423 * vm_page_alloc_freelist: 2424 * 2425 * Allocate a physical page from the specified free page list. 2426 * 2427 * The caller must always specify an allocation class. 2428 * 2429 * allocation classes: 2430 * VM_ALLOC_NORMAL normal process request 2431 * VM_ALLOC_SYSTEM system *really* needs a page 2432 * VM_ALLOC_INTERRUPT interrupt time request 2433 * 2434 * optional allocation flags: 2435 * VM_ALLOC_COUNT(number) the number of additional pages that the caller 2436 * intends to allocate 2437 * VM_ALLOC_WIRED wire the allocated page 2438 * VM_ALLOC_ZERO prefer a zeroed page 2439 */ 2440 vm_page_t 2441 vm_page_alloc_freelist(int freelist, int req) 2442 { 2443 struct vm_domainset_iter di; 2444 vm_page_t m; 2445 int domain; 2446 2447 vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req); 2448 do { 2449 m = vm_page_alloc_freelist_domain(domain, freelist, req); 2450 if (m != NULL) 2451 break; 2452 } while (vm_domainset_iter_page(&di, NULL, &domain) == 0); 2453 2454 return (m); 2455 } 2456 2457 vm_page_t 2458 vm_page_alloc_freelist_domain(int domain, int freelist, int req) 2459 { 2460 struct vm_domain *vmd; 2461 vm_page_t m; 2462 u_int flags; 2463 2464 m = NULL; 2465 vmd = VM_DOMAIN(domain); 2466 again: 2467 if (vm_domain_allocate(vmd, req, 1)) { 2468 vm_domain_free_lock(vmd); 2469 m = vm_phys_alloc_freelist_pages(domain, freelist, 2470 VM_FREEPOOL_DIRECT, 0); 2471 vm_domain_free_unlock(vmd); 2472 if (m == NULL) 2473 vm_domain_freecnt_inc(vmd, 1); 2474 } 2475 if (m == NULL) { 2476 if (vm_domain_alloc_fail(vmd, NULL, req)) 2477 goto again; 2478 return (NULL); 2479 } 2480 vm_page_dequeue(m); 2481 vm_page_alloc_check(m); 2482 2483 /* 2484 * Initialize the page. Only the PG_ZERO flag is inherited. 2485 */ 2486 m->a.flags = 0; 2487 flags = 0; 2488 if ((req & VM_ALLOC_ZERO) != 0) 2489 flags = PG_ZERO; 2490 m->flags &= flags; 2491 if ((req & VM_ALLOC_WIRED) != 0) { 2492 vm_wire_add(1); 2493 m->ref_count = 1; 2494 } 2495 /* Unmanaged pages don't use "act_count". */ 2496 m->oflags = VPO_UNMANAGED; 2497 return (m); 2498 } 2499 2500 static int 2501 vm_page_zone_import(void *arg, void **store, int cnt, int domain, int flags) 2502 { 2503 struct vm_domain *vmd; 2504 struct vm_pgcache *pgcache; 2505 int i; 2506 2507 pgcache = arg; 2508 vmd = VM_DOMAIN(pgcache->domain); 2509 2510 /* 2511 * The page daemon should avoid creating extra memory pressure since its 2512 * main purpose is to replenish the store of free pages. 2513 */ 2514 if (vmd->vmd_severeset || curproc == pageproc || 2515 !_vm_domain_allocate(vmd, VM_ALLOC_NORMAL, cnt)) 2516 return (0); 2517 domain = vmd->vmd_domain; 2518 vm_domain_free_lock(vmd); 2519 i = vm_phys_alloc_npages(domain, pgcache->pool, cnt, 2520 (vm_page_t *)store); 2521 vm_domain_free_unlock(vmd); 2522 if (cnt != i) 2523 vm_domain_freecnt_inc(vmd, cnt - i); 2524 2525 return (i); 2526 } 2527 2528 static void 2529 vm_page_zone_release(void *arg, void **store, int cnt) 2530 { 2531 struct vm_domain *vmd; 2532 struct vm_pgcache *pgcache; 2533 vm_page_t m; 2534 int i; 2535 2536 pgcache = arg; 2537 vmd = VM_DOMAIN(pgcache->domain); 2538 vm_domain_free_lock(vmd); 2539 for (i = 0; i < cnt; i++) { 2540 m = (vm_page_t)store[i]; 2541 vm_phys_free_pages(m, 0); 2542 } 2543 vm_domain_free_unlock(vmd); 2544 vm_domain_freecnt_inc(vmd, cnt); 2545 } 2546 2547 #define VPSC_ANY 0 /* No restrictions. */ 2548 #define VPSC_NORESERV 1 /* Skip reservations; implies VPSC_NOSUPER. */ 2549 #define VPSC_NOSUPER 2 /* Skip superpages. */ 2550 2551 /* 2552 * vm_page_scan_contig: 2553 * 2554 * Scan vm_page_array[] between the specified entries "m_start" and 2555 * "m_end" for a run of contiguous physical pages that satisfy the 2556 * specified conditions, and return the lowest page in the run. The 2557 * specified "alignment" determines the alignment of the lowest physical 2558 * page in the run. If the specified "boundary" is non-zero, then the 2559 * run of physical pages cannot span a physical address that is a 2560 * multiple of "boundary". 2561 * 2562 * "m_end" is never dereferenced, so it need not point to a vm_page 2563 * structure within vm_page_array[]. 2564 * 2565 * "npages" must be greater than zero. "m_start" and "m_end" must not 2566 * span a hole (or discontiguity) in the physical address space. Both 2567 * "alignment" and "boundary" must be a power of two. 2568 */ 2569 vm_page_t 2570 vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end, 2571 u_long alignment, vm_paddr_t boundary, int options) 2572 { 2573 vm_object_t object; 2574 vm_paddr_t pa; 2575 vm_page_t m, m_run; 2576 #if VM_NRESERVLEVEL > 0 2577 int level; 2578 #endif 2579 int m_inc, order, run_ext, run_len; 2580 2581 KASSERT(npages > 0, ("npages is 0")); 2582 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 2583 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 2584 m_run = NULL; 2585 run_len = 0; 2586 for (m = m_start; m < m_end && run_len < npages; m += m_inc) { 2587 KASSERT((m->flags & PG_MARKER) == 0, 2588 ("page %p is PG_MARKER", m)); 2589 KASSERT((m->flags & PG_FICTITIOUS) == 0 || m->ref_count >= 1, 2590 ("fictitious page %p has invalid ref count", m)); 2591 2592 /* 2593 * If the current page would be the start of a run, check its 2594 * physical address against the end, alignment, and boundary 2595 * conditions. If it doesn't satisfy these conditions, either 2596 * terminate the scan or advance to the next page that 2597 * satisfies the failed condition. 2598 */ 2599 if (run_len == 0) { 2600 KASSERT(m_run == NULL, ("m_run != NULL")); 2601 if (m + npages > m_end) 2602 break; 2603 pa = VM_PAGE_TO_PHYS(m); 2604 if ((pa & (alignment - 1)) != 0) { 2605 m_inc = atop(roundup2(pa, alignment) - pa); 2606 continue; 2607 } 2608 if (rounddown2(pa ^ (pa + ptoa(npages) - 1), 2609 boundary) != 0) { 2610 m_inc = atop(roundup2(pa, boundary) - pa); 2611 continue; 2612 } 2613 } else 2614 KASSERT(m_run != NULL, ("m_run == NULL")); 2615 2616 retry: 2617 m_inc = 1; 2618 if (vm_page_wired(m)) 2619 run_ext = 0; 2620 #if VM_NRESERVLEVEL > 0 2621 else if ((level = vm_reserv_level(m)) >= 0 && 2622 (options & VPSC_NORESERV) != 0) { 2623 run_ext = 0; 2624 /* Advance to the end of the reservation. */ 2625 pa = VM_PAGE_TO_PHYS(m); 2626 m_inc = atop(roundup2(pa + 1, vm_reserv_size(level)) - 2627 pa); 2628 } 2629 #endif 2630 else if ((object = atomic_load_ptr(&m->object)) != NULL) { 2631 /* 2632 * The page is considered eligible for relocation if 2633 * and only if it could be laundered or reclaimed by 2634 * the page daemon. 2635 */ 2636 VM_OBJECT_RLOCK(object); 2637 if (object != m->object) { 2638 VM_OBJECT_RUNLOCK(object); 2639 goto retry; 2640 } 2641 /* Don't care: PG_NODUMP, PG_ZERO. */ 2642 if (object->type != OBJT_DEFAULT && 2643 (object->flags & OBJ_SWAP) == 0 && 2644 object->type != OBJT_VNODE) { 2645 run_ext = 0; 2646 #if VM_NRESERVLEVEL > 0 2647 } else if ((options & VPSC_NOSUPER) != 0 && 2648 (level = vm_reserv_level_iffullpop(m)) >= 0) { 2649 run_ext = 0; 2650 /* Advance to the end of the superpage. */ 2651 pa = VM_PAGE_TO_PHYS(m); 2652 m_inc = atop(roundup2(pa + 1, 2653 vm_reserv_size(level)) - pa); 2654 #endif 2655 } else if (object->memattr == VM_MEMATTR_DEFAULT && 2656 vm_page_queue(m) != PQ_NONE && !vm_page_busied(m)) { 2657 /* 2658 * The page is allocated but eligible for 2659 * relocation. Extend the current run by one 2660 * page. 2661 */ 2662 KASSERT(pmap_page_get_memattr(m) == 2663 VM_MEMATTR_DEFAULT, 2664 ("page %p has an unexpected memattr", m)); 2665 KASSERT((m->oflags & (VPO_SWAPINPROG | 2666 VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0, 2667 ("page %p has unexpected oflags", m)); 2668 /* Don't care: PGA_NOSYNC. */ 2669 run_ext = 1; 2670 } else 2671 run_ext = 0; 2672 VM_OBJECT_RUNLOCK(object); 2673 #if VM_NRESERVLEVEL > 0 2674 } else if (level >= 0) { 2675 /* 2676 * The page is reserved but not yet allocated. In 2677 * other words, it is still free. Extend the current 2678 * run by one page. 2679 */ 2680 run_ext = 1; 2681 #endif 2682 } else if ((order = m->order) < VM_NFREEORDER) { 2683 /* 2684 * The page is enqueued in the physical memory 2685 * allocator's free page queues. Moreover, it is the 2686 * first page in a power-of-two-sized run of 2687 * contiguous free pages. Add these pages to the end 2688 * of the current run, and jump ahead. 2689 */ 2690 run_ext = 1 << order; 2691 m_inc = 1 << order; 2692 } else { 2693 /* 2694 * Skip the page for one of the following reasons: (1) 2695 * It is enqueued in the physical memory allocator's 2696 * free page queues. However, it is not the first 2697 * page in a run of contiguous free pages. (This case 2698 * rarely occurs because the scan is performed in 2699 * ascending order.) (2) It is not reserved, and it is 2700 * transitioning from free to allocated. (Conversely, 2701 * the transition from allocated to free for managed 2702 * pages is blocked by the page busy lock.) (3) It is 2703 * allocated but not contained by an object and not 2704 * wired, e.g., allocated by Xen's balloon driver. 2705 */ 2706 run_ext = 0; 2707 } 2708 2709 /* 2710 * Extend or reset the current run of pages. 2711 */ 2712 if (run_ext > 0) { 2713 if (run_len == 0) 2714 m_run = m; 2715 run_len += run_ext; 2716 } else { 2717 if (run_len > 0) { 2718 m_run = NULL; 2719 run_len = 0; 2720 } 2721 } 2722 } 2723 if (run_len >= npages) 2724 return (m_run); 2725 return (NULL); 2726 } 2727 2728 /* 2729 * vm_page_reclaim_run: 2730 * 2731 * Try to relocate each of the allocated virtual pages within the 2732 * specified run of physical pages to a new physical address. Free the 2733 * physical pages underlying the relocated virtual pages. A virtual page 2734 * is relocatable if and only if it could be laundered or reclaimed by 2735 * the page daemon. Whenever possible, a virtual page is relocated to a 2736 * physical address above "high". 2737 * 2738 * Returns 0 if every physical page within the run was already free or 2739 * just freed by a successful relocation. Otherwise, returns a non-zero 2740 * value indicating why the last attempt to relocate a virtual page was 2741 * unsuccessful. 2742 * 2743 * "req_class" must be an allocation class. 2744 */ 2745 static int 2746 vm_page_reclaim_run(int req_class, int domain, u_long npages, vm_page_t m_run, 2747 vm_paddr_t high) 2748 { 2749 struct vm_domain *vmd; 2750 struct spglist free; 2751 vm_object_t object; 2752 vm_paddr_t pa; 2753 vm_page_t m, m_end, m_new; 2754 int error, order, req; 2755 2756 KASSERT((req_class & VM_ALLOC_CLASS_MASK) == req_class, 2757 ("req_class is not an allocation class")); 2758 SLIST_INIT(&free); 2759 error = 0; 2760 m = m_run; 2761 m_end = m_run + npages; 2762 for (; error == 0 && m < m_end; m++) { 2763 KASSERT((m->flags & (PG_FICTITIOUS | PG_MARKER)) == 0, 2764 ("page %p is PG_FICTITIOUS or PG_MARKER", m)); 2765 2766 /* 2767 * Racily check for wirings. Races are handled once the object 2768 * lock is held and the page is unmapped. 2769 */ 2770 if (vm_page_wired(m)) 2771 error = EBUSY; 2772 else if ((object = atomic_load_ptr(&m->object)) != NULL) { 2773 /* 2774 * The page is relocated if and only if it could be 2775 * laundered or reclaimed by the page daemon. 2776 */ 2777 VM_OBJECT_WLOCK(object); 2778 /* Don't care: PG_NODUMP, PG_ZERO. */ 2779 if (m->object != object || 2780 (object->type != OBJT_DEFAULT && 2781 (object->flags & OBJ_SWAP) == 0 && 2782 object->type != OBJT_VNODE)) 2783 error = EINVAL; 2784 else if (object->memattr != VM_MEMATTR_DEFAULT) 2785 error = EINVAL; 2786 else if (vm_page_queue(m) != PQ_NONE && 2787 vm_page_tryxbusy(m) != 0) { 2788 if (vm_page_wired(m)) { 2789 vm_page_xunbusy(m); 2790 error = EBUSY; 2791 goto unlock; 2792 } 2793 KASSERT(pmap_page_get_memattr(m) == 2794 VM_MEMATTR_DEFAULT, 2795 ("page %p has an unexpected memattr", m)); 2796 KASSERT(m->oflags == 0, 2797 ("page %p has unexpected oflags", m)); 2798 /* Don't care: PGA_NOSYNC. */ 2799 if (!vm_page_none_valid(m)) { 2800 /* 2801 * First, try to allocate a new page 2802 * that is above "high". Failing 2803 * that, try to allocate a new page 2804 * that is below "m_run". Allocate 2805 * the new page between the end of 2806 * "m_run" and "high" only as a last 2807 * resort. 2808 */ 2809 req = req_class | VM_ALLOC_NOOBJ; 2810 if ((m->flags & PG_NODUMP) != 0) 2811 req |= VM_ALLOC_NODUMP; 2812 if (trunc_page(high) != 2813 ~(vm_paddr_t)PAGE_MASK) { 2814 m_new = vm_page_alloc_contig( 2815 NULL, 0, req, 1, 2816 round_page(high), 2817 ~(vm_paddr_t)0, 2818 PAGE_SIZE, 0, 2819 VM_MEMATTR_DEFAULT); 2820 } else 2821 m_new = NULL; 2822 if (m_new == NULL) { 2823 pa = VM_PAGE_TO_PHYS(m_run); 2824 m_new = vm_page_alloc_contig( 2825 NULL, 0, req, 1, 2826 0, pa - 1, PAGE_SIZE, 0, 2827 VM_MEMATTR_DEFAULT); 2828 } 2829 if (m_new == NULL) { 2830 pa += ptoa(npages); 2831 m_new = vm_page_alloc_contig( 2832 NULL, 0, req, 1, 2833 pa, high, PAGE_SIZE, 0, 2834 VM_MEMATTR_DEFAULT); 2835 } 2836 if (m_new == NULL) { 2837 vm_page_xunbusy(m); 2838 error = ENOMEM; 2839 goto unlock; 2840 } 2841 2842 /* 2843 * Unmap the page and check for new 2844 * wirings that may have been acquired 2845 * through a pmap lookup. 2846 */ 2847 if (object->ref_count != 0 && 2848 !vm_page_try_remove_all(m)) { 2849 vm_page_xunbusy(m); 2850 vm_page_free(m_new); 2851 error = EBUSY; 2852 goto unlock; 2853 } 2854 2855 /* 2856 * Replace "m" with the new page. For 2857 * vm_page_replace(), "m" must be busy 2858 * and dequeued. Finally, change "m" 2859 * as if vm_page_free() was called. 2860 */ 2861 m_new->a.flags = m->a.flags & 2862 ~PGA_QUEUE_STATE_MASK; 2863 KASSERT(m_new->oflags == VPO_UNMANAGED, 2864 ("page %p is managed", m_new)); 2865 m_new->oflags = 0; 2866 pmap_copy_page(m, m_new); 2867 m_new->valid = m->valid; 2868 m_new->dirty = m->dirty; 2869 m->flags &= ~PG_ZERO; 2870 vm_page_dequeue(m); 2871 if (vm_page_replace_hold(m_new, object, 2872 m->pindex, m) && 2873 vm_page_free_prep(m)) 2874 SLIST_INSERT_HEAD(&free, m, 2875 plinks.s.ss); 2876 2877 /* 2878 * The new page must be deactivated 2879 * before the object is unlocked. 2880 */ 2881 vm_page_deactivate(m_new); 2882 } else { 2883 m->flags &= ~PG_ZERO; 2884 vm_page_dequeue(m); 2885 if (vm_page_free_prep(m)) 2886 SLIST_INSERT_HEAD(&free, m, 2887 plinks.s.ss); 2888 KASSERT(m->dirty == 0, 2889 ("page %p is dirty", m)); 2890 } 2891 } else 2892 error = EBUSY; 2893 unlock: 2894 VM_OBJECT_WUNLOCK(object); 2895 } else { 2896 MPASS(vm_page_domain(m) == domain); 2897 vmd = VM_DOMAIN(domain); 2898 vm_domain_free_lock(vmd); 2899 order = m->order; 2900 if (order < VM_NFREEORDER) { 2901 /* 2902 * The page is enqueued in the physical memory 2903 * allocator's free page queues. Moreover, it 2904 * is the first page in a power-of-two-sized 2905 * run of contiguous free pages. Jump ahead 2906 * to the last page within that run, and 2907 * continue from there. 2908 */ 2909 m += (1 << order) - 1; 2910 } 2911 #if VM_NRESERVLEVEL > 0 2912 else if (vm_reserv_is_page_free(m)) 2913 order = 0; 2914 #endif 2915 vm_domain_free_unlock(vmd); 2916 if (order == VM_NFREEORDER) 2917 error = EINVAL; 2918 } 2919 } 2920 if ((m = SLIST_FIRST(&free)) != NULL) { 2921 int cnt; 2922 2923 vmd = VM_DOMAIN(domain); 2924 cnt = 0; 2925 vm_domain_free_lock(vmd); 2926 do { 2927 MPASS(vm_page_domain(m) == domain); 2928 SLIST_REMOVE_HEAD(&free, plinks.s.ss); 2929 vm_phys_free_pages(m, 0); 2930 cnt++; 2931 } while ((m = SLIST_FIRST(&free)) != NULL); 2932 vm_domain_free_unlock(vmd); 2933 vm_domain_freecnt_inc(vmd, cnt); 2934 } 2935 return (error); 2936 } 2937 2938 #define NRUNS 16 2939 2940 CTASSERT(powerof2(NRUNS)); 2941 2942 #define RUN_INDEX(count) ((count) & (NRUNS - 1)) 2943 2944 #define MIN_RECLAIM 8 2945 2946 /* 2947 * vm_page_reclaim_contig: 2948 * 2949 * Reclaim allocated, contiguous physical memory satisfying the specified 2950 * conditions by relocating the virtual pages using that physical memory. 2951 * Returns true if reclamation is successful and false otherwise. Since 2952 * relocation requires the allocation of physical pages, reclamation may 2953 * fail due to a shortage of free pages. When reclamation fails, callers 2954 * are expected to perform vm_wait() before retrying a failed allocation 2955 * operation, e.g., vm_page_alloc_contig(). 2956 * 2957 * The caller must always specify an allocation class through "req". 2958 * 2959 * allocation classes: 2960 * VM_ALLOC_NORMAL normal process request 2961 * VM_ALLOC_SYSTEM system *really* needs a page 2962 * VM_ALLOC_INTERRUPT interrupt time request 2963 * 2964 * The optional allocation flags are ignored. 2965 * 2966 * "npages" must be greater than zero. Both "alignment" and "boundary" 2967 * must be a power of two. 2968 */ 2969 bool 2970 vm_page_reclaim_contig_domain(int domain, int req, u_long npages, 2971 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary) 2972 { 2973 struct vm_domain *vmd; 2974 vm_paddr_t curr_low; 2975 vm_page_t m_run, m_runs[NRUNS]; 2976 u_long count, minalign, reclaimed; 2977 int error, i, options, req_class; 2978 2979 KASSERT(npages > 0, ("npages is 0")); 2980 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 2981 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 2982 2983 /* 2984 * The caller will attempt an allocation after some runs have been 2985 * reclaimed and added to the vm_phys buddy lists. Due to limitations 2986 * of vm_phys_alloc_contig(), round up the requested length to the next 2987 * power of two or maximum chunk size, and ensure that each run is 2988 * suitably aligned. 2989 */ 2990 minalign = 1ul << imin(flsl(npages - 1), VM_NFREEORDER - 1); 2991 npages = roundup2(npages, minalign); 2992 if (alignment < ptoa(minalign)) 2993 alignment = ptoa(minalign); 2994 2995 /* 2996 * The page daemon is allowed to dig deeper into the free page list. 2997 */ 2998 req_class = req & VM_ALLOC_CLASS_MASK; 2999 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) 3000 req_class = VM_ALLOC_SYSTEM; 3001 3002 /* 3003 * Return if the number of free pages cannot satisfy the requested 3004 * allocation. 3005 */ 3006 vmd = VM_DOMAIN(domain); 3007 count = vmd->vmd_free_count; 3008 if (count < npages + vmd->vmd_free_reserved || (count < npages + 3009 vmd->vmd_interrupt_free_min && req_class == VM_ALLOC_SYSTEM) || 3010 (count < npages && req_class == VM_ALLOC_INTERRUPT)) 3011 return (false); 3012 3013 /* 3014 * Scan up to three times, relaxing the restrictions ("options") on 3015 * the reclamation of reservations and superpages each time. 3016 */ 3017 for (options = VPSC_NORESERV;;) { 3018 /* 3019 * Find the highest runs that satisfy the given constraints 3020 * and restrictions, and record them in "m_runs". 3021 */ 3022 curr_low = low; 3023 count = 0; 3024 for (;;) { 3025 m_run = vm_phys_scan_contig(domain, npages, curr_low, 3026 high, alignment, boundary, options); 3027 if (m_run == NULL) 3028 break; 3029 curr_low = VM_PAGE_TO_PHYS(m_run) + ptoa(npages); 3030 m_runs[RUN_INDEX(count)] = m_run; 3031 count++; 3032 } 3033 3034 /* 3035 * Reclaim the highest runs in LIFO (descending) order until 3036 * the number of reclaimed pages, "reclaimed", is at least 3037 * MIN_RECLAIM. Reset "reclaimed" each time because each 3038 * reclamation is idempotent, and runs will (likely) recur 3039 * from one scan to the next as restrictions are relaxed. 3040 */ 3041 reclaimed = 0; 3042 for (i = 0; count > 0 && i < NRUNS; i++) { 3043 count--; 3044 m_run = m_runs[RUN_INDEX(count)]; 3045 error = vm_page_reclaim_run(req_class, domain, npages, 3046 m_run, high); 3047 if (error == 0) { 3048 reclaimed += npages; 3049 if (reclaimed >= MIN_RECLAIM) 3050 return (true); 3051 } 3052 } 3053 3054 /* 3055 * Either relax the restrictions on the next scan or return if 3056 * the last scan had no restrictions. 3057 */ 3058 if (options == VPSC_NORESERV) 3059 options = VPSC_NOSUPER; 3060 else if (options == VPSC_NOSUPER) 3061 options = VPSC_ANY; 3062 else if (options == VPSC_ANY) 3063 return (reclaimed != 0); 3064 } 3065 } 3066 3067 bool 3068 vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high, 3069 u_long alignment, vm_paddr_t boundary) 3070 { 3071 struct vm_domainset_iter di; 3072 int domain; 3073 bool ret; 3074 3075 vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req); 3076 do { 3077 ret = vm_page_reclaim_contig_domain(domain, req, npages, low, 3078 high, alignment, boundary); 3079 if (ret) 3080 break; 3081 } while (vm_domainset_iter_page(&di, NULL, &domain) == 0); 3082 3083 return (ret); 3084 } 3085 3086 /* 3087 * Set the domain in the appropriate page level domainset. 3088 */ 3089 void 3090 vm_domain_set(struct vm_domain *vmd) 3091 { 3092 3093 mtx_lock(&vm_domainset_lock); 3094 if (!vmd->vmd_minset && vm_paging_min(vmd)) { 3095 vmd->vmd_minset = 1; 3096 DOMAINSET_SET(vmd->vmd_domain, &vm_min_domains); 3097 } 3098 if (!vmd->vmd_severeset && vm_paging_severe(vmd)) { 3099 vmd->vmd_severeset = 1; 3100 DOMAINSET_SET(vmd->vmd_domain, &vm_severe_domains); 3101 } 3102 mtx_unlock(&vm_domainset_lock); 3103 } 3104 3105 /* 3106 * Clear the domain from the appropriate page level domainset. 3107 */ 3108 void 3109 vm_domain_clear(struct vm_domain *vmd) 3110 { 3111 3112 mtx_lock(&vm_domainset_lock); 3113 if (vmd->vmd_minset && !vm_paging_min(vmd)) { 3114 vmd->vmd_minset = 0; 3115 DOMAINSET_CLR(vmd->vmd_domain, &vm_min_domains); 3116 if (vm_min_waiters != 0) { 3117 vm_min_waiters = 0; 3118 wakeup(&vm_min_domains); 3119 } 3120 } 3121 if (vmd->vmd_severeset && !vm_paging_severe(vmd)) { 3122 vmd->vmd_severeset = 0; 3123 DOMAINSET_CLR(vmd->vmd_domain, &vm_severe_domains); 3124 if (vm_severe_waiters != 0) { 3125 vm_severe_waiters = 0; 3126 wakeup(&vm_severe_domains); 3127 } 3128 } 3129 3130 /* 3131 * If pageout daemon needs pages, then tell it that there are 3132 * some free. 3133 */ 3134 if (vmd->vmd_pageout_pages_needed && 3135 vmd->vmd_free_count >= vmd->vmd_pageout_free_min) { 3136 wakeup(&vmd->vmd_pageout_pages_needed); 3137 vmd->vmd_pageout_pages_needed = 0; 3138 } 3139 3140 /* See comments in vm_wait_doms(). */ 3141 if (vm_pageproc_waiters) { 3142 vm_pageproc_waiters = 0; 3143 wakeup(&vm_pageproc_waiters); 3144 } 3145 mtx_unlock(&vm_domainset_lock); 3146 } 3147 3148 /* 3149 * Wait for free pages to exceed the min threshold globally. 3150 */ 3151 void 3152 vm_wait_min(void) 3153 { 3154 3155 mtx_lock(&vm_domainset_lock); 3156 while (vm_page_count_min()) { 3157 vm_min_waiters++; 3158 msleep(&vm_min_domains, &vm_domainset_lock, PVM, "vmwait", 0); 3159 } 3160 mtx_unlock(&vm_domainset_lock); 3161 } 3162 3163 /* 3164 * Wait for free pages to exceed the severe threshold globally. 3165 */ 3166 void 3167 vm_wait_severe(void) 3168 { 3169 3170 mtx_lock(&vm_domainset_lock); 3171 while (vm_page_count_severe()) { 3172 vm_severe_waiters++; 3173 msleep(&vm_severe_domains, &vm_domainset_lock, PVM, 3174 "vmwait", 0); 3175 } 3176 mtx_unlock(&vm_domainset_lock); 3177 } 3178 3179 u_int 3180 vm_wait_count(void) 3181 { 3182 3183 return (vm_severe_waiters + vm_min_waiters + vm_pageproc_waiters); 3184 } 3185 3186 int 3187 vm_wait_doms(const domainset_t *wdoms, int mflags) 3188 { 3189 int error; 3190 3191 error = 0; 3192 3193 /* 3194 * We use racey wakeup synchronization to avoid expensive global 3195 * locking for the pageproc when sleeping with a non-specific vm_wait. 3196 * To handle this, we only sleep for one tick in this instance. It 3197 * is expected that most allocations for the pageproc will come from 3198 * kmem or vm_page_grab* which will use the more specific and 3199 * race-free vm_wait_domain(). 3200 */ 3201 if (curproc == pageproc) { 3202 mtx_lock(&vm_domainset_lock); 3203 vm_pageproc_waiters++; 3204 error = msleep(&vm_pageproc_waiters, &vm_domainset_lock, 3205 PVM | PDROP | mflags, "pageprocwait", 1); 3206 } else { 3207 /* 3208 * XXX Ideally we would wait only until the allocation could 3209 * be satisfied. This condition can cause new allocators to 3210 * consume all freed pages while old allocators wait. 3211 */ 3212 mtx_lock(&vm_domainset_lock); 3213 if (vm_page_count_min_set(wdoms)) { 3214 vm_min_waiters++; 3215 error = msleep(&vm_min_domains, &vm_domainset_lock, 3216 PVM | PDROP | mflags, "vmwait", 0); 3217 } else 3218 mtx_unlock(&vm_domainset_lock); 3219 } 3220 return (error); 3221 } 3222 3223 /* 3224 * vm_wait_domain: 3225 * 3226 * Sleep until free pages are available for allocation. 3227 * - Called in various places after failed memory allocations. 3228 */ 3229 void 3230 vm_wait_domain(int domain) 3231 { 3232 struct vm_domain *vmd; 3233 domainset_t wdom; 3234 3235 vmd = VM_DOMAIN(domain); 3236 vm_domain_free_assert_unlocked(vmd); 3237 3238 if (curproc == pageproc) { 3239 mtx_lock(&vm_domainset_lock); 3240 if (vmd->vmd_free_count < vmd->vmd_pageout_free_min) { 3241 vmd->vmd_pageout_pages_needed = 1; 3242 msleep(&vmd->vmd_pageout_pages_needed, 3243 &vm_domainset_lock, PDROP | PSWP, "VMWait", 0); 3244 } else 3245 mtx_unlock(&vm_domainset_lock); 3246 } else { 3247 if (pageproc == NULL) 3248 panic("vm_wait in early boot"); 3249 DOMAINSET_ZERO(&wdom); 3250 DOMAINSET_SET(vmd->vmd_domain, &wdom); 3251 vm_wait_doms(&wdom, 0); 3252 } 3253 } 3254 3255 static int 3256 vm_wait_flags(vm_object_t obj, int mflags) 3257 { 3258 struct domainset *d; 3259 3260 d = NULL; 3261 3262 /* 3263 * Carefully fetch pointers only once: the struct domainset 3264 * itself is ummutable but the pointer might change. 3265 */ 3266 if (obj != NULL) 3267 d = obj->domain.dr_policy; 3268 if (d == NULL) 3269 d = curthread->td_domain.dr_policy; 3270 3271 return (vm_wait_doms(&d->ds_mask, mflags)); 3272 } 3273 3274 /* 3275 * vm_wait: 3276 * 3277 * Sleep until free pages are available for allocation in the 3278 * affinity domains of the obj. If obj is NULL, the domain set 3279 * for the calling thread is used. 3280 * Called in various places after failed memory allocations. 3281 */ 3282 void 3283 vm_wait(vm_object_t obj) 3284 { 3285 (void)vm_wait_flags(obj, 0); 3286 } 3287 3288 int 3289 vm_wait_intr(vm_object_t obj) 3290 { 3291 return (vm_wait_flags(obj, PCATCH)); 3292 } 3293 3294 /* 3295 * vm_domain_alloc_fail: 3296 * 3297 * Called when a page allocation function fails. Informs the 3298 * pagedaemon and performs the requested wait. Requires the 3299 * domain_free and object lock on entry. Returns with the 3300 * object lock held and free lock released. Returns an error when 3301 * retry is necessary. 3302 * 3303 */ 3304 static int 3305 vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, int req) 3306 { 3307 3308 vm_domain_free_assert_unlocked(vmd); 3309 3310 atomic_add_int(&vmd->vmd_pageout_deficit, 3311 max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1)); 3312 if (req & (VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) { 3313 if (object != NULL) 3314 VM_OBJECT_WUNLOCK(object); 3315 vm_wait_domain(vmd->vmd_domain); 3316 if (object != NULL) 3317 VM_OBJECT_WLOCK(object); 3318 if (req & VM_ALLOC_WAITOK) 3319 return (EAGAIN); 3320 } 3321 3322 return (0); 3323 } 3324 3325 /* 3326 * vm_waitpfault: 3327 * 3328 * Sleep until free pages are available for allocation. 3329 * - Called only in vm_fault so that processes page faulting 3330 * can be easily tracked. 3331 * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing 3332 * processes will be able to grab memory first. Do not change 3333 * this balance without careful testing first. 3334 */ 3335 void 3336 vm_waitpfault(struct domainset *dset, int timo) 3337 { 3338 3339 /* 3340 * XXX Ideally we would wait only until the allocation could 3341 * be satisfied. This condition can cause new allocators to 3342 * consume all freed pages while old allocators wait. 3343 */ 3344 mtx_lock(&vm_domainset_lock); 3345 if (vm_page_count_min_set(&dset->ds_mask)) { 3346 vm_min_waiters++; 3347 msleep(&vm_min_domains, &vm_domainset_lock, PUSER | PDROP, 3348 "pfault", timo); 3349 } else 3350 mtx_unlock(&vm_domainset_lock); 3351 } 3352 3353 static struct vm_pagequeue * 3354 _vm_page_pagequeue(vm_page_t m, uint8_t queue) 3355 { 3356 3357 return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue]); 3358 } 3359 3360 #ifdef INVARIANTS 3361 static struct vm_pagequeue * 3362 vm_page_pagequeue(vm_page_t m) 3363 { 3364 3365 return (_vm_page_pagequeue(m, vm_page_astate_load(m).queue)); 3366 } 3367 #endif 3368 3369 static __always_inline bool 3370 vm_page_pqstate_fcmpset(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new) 3371 { 3372 vm_page_astate_t tmp; 3373 3374 tmp = *old; 3375 do { 3376 if (__predict_true(vm_page_astate_fcmpset(m, old, new))) 3377 return (true); 3378 counter_u64_add(pqstate_commit_retries, 1); 3379 } while (old->_bits == tmp._bits); 3380 3381 return (false); 3382 } 3383 3384 /* 3385 * Do the work of committing a queue state update that moves the page out of 3386 * its current queue. 3387 */ 3388 static bool 3389 _vm_page_pqstate_commit_dequeue(struct vm_pagequeue *pq, vm_page_t m, 3390 vm_page_astate_t *old, vm_page_astate_t new) 3391 { 3392 vm_page_t next; 3393 3394 vm_pagequeue_assert_locked(pq); 3395 KASSERT(vm_page_pagequeue(m) == pq, 3396 ("%s: queue %p does not match page %p", __func__, pq, m)); 3397 KASSERT(old->queue != PQ_NONE && new.queue != old->queue, 3398 ("%s: invalid queue indices %d %d", 3399 __func__, old->queue, new.queue)); 3400 3401 /* 3402 * Once the queue index of the page changes there is nothing 3403 * synchronizing with further updates to the page's physical 3404 * queue state. Therefore we must speculatively remove the page 3405 * from the queue now and be prepared to roll back if the queue 3406 * state update fails. If the page is not physically enqueued then 3407 * we just update its queue index. 3408 */ 3409 if ((old->flags & PGA_ENQUEUED) != 0) { 3410 new.flags &= ~PGA_ENQUEUED; 3411 next = TAILQ_NEXT(m, plinks.q); 3412 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 3413 vm_pagequeue_cnt_dec(pq); 3414 if (!vm_page_pqstate_fcmpset(m, old, new)) { 3415 if (next == NULL) 3416 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); 3417 else 3418 TAILQ_INSERT_BEFORE(next, m, plinks.q); 3419 vm_pagequeue_cnt_inc(pq); 3420 return (false); 3421 } else { 3422 return (true); 3423 } 3424 } else { 3425 return (vm_page_pqstate_fcmpset(m, old, new)); 3426 } 3427 } 3428 3429 static bool 3430 vm_page_pqstate_commit_dequeue(vm_page_t m, vm_page_astate_t *old, 3431 vm_page_astate_t new) 3432 { 3433 struct vm_pagequeue *pq; 3434 vm_page_astate_t as; 3435 bool ret; 3436 3437 pq = _vm_page_pagequeue(m, old->queue); 3438 3439 /* 3440 * The queue field and PGA_ENQUEUED flag are stable only so long as the 3441 * corresponding page queue lock is held. 3442 */ 3443 vm_pagequeue_lock(pq); 3444 as = vm_page_astate_load(m); 3445 if (__predict_false(as._bits != old->_bits)) { 3446 *old = as; 3447 ret = false; 3448 } else { 3449 ret = _vm_page_pqstate_commit_dequeue(pq, m, old, new); 3450 } 3451 vm_pagequeue_unlock(pq); 3452 return (ret); 3453 } 3454 3455 /* 3456 * Commit a queue state update that enqueues or requeues a page. 3457 */ 3458 static bool 3459 _vm_page_pqstate_commit_requeue(struct vm_pagequeue *pq, vm_page_t m, 3460 vm_page_astate_t *old, vm_page_astate_t new) 3461 { 3462 struct vm_domain *vmd; 3463 3464 vm_pagequeue_assert_locked(pq); 3465 KASSERT(old->queue != PQ_NONE && new.queue == old->queue, 3466 ("%s: invalid queue indices %d %d", 3467 __func__, old->queue, new.queue)); 3468 3469 new.flags |= PGA_ENQUEUED; 3470 if (!vm_page_pqstate_fcmpset(m, old, new)) 3471 return (false); 3472 3473 if ((old->flags & PGA_ENQUEUED) != 0) 3474 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 3475 else 3476 vm_pagequeue_cnt_inc(pq); 3477 3478 /* 3479 * Give PGA_REQUEUE_HEAD precedence over PGA_REQUEUE. In particular, if 3480 * both flags are set in close succession, only PGA_REQUEUE_HEAD will be 3481 * applied, even if it was set first. 3482 */ 3483 if ((old->flags & PGA_REQUEUE_HEAD) != 0) { 3484 vmd = vm_pagequeue_domain(m); 3485 KASSERT(pq == &vmd->vmd_pagequeues[PQ_INACTIVE], 3486 ("%s: invalid page queue for page %p", __func__, m)); 3487 TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q); 3488 } else { 3489 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); 3490 } 3491 return (true); 3492 } 3493 3494 /* 3495 * Commit a queue state update that encodes a request for a deferred queue 3496 * operation. 3497 */ 3498 static bool 3499 vm_page_pqstate_commit_request(vm_page_t m, vm_page_astate_t *old, 3500 vm_page_astate_t new) 3501 { 3502 3503 KASSERT(old->queue == new.queue || new.queue != PQ_NONE, 3504 ("%s: invalid state, queue %d flags %x", 3505 __func__, new.queue, new.flags)); 3506 3507 if (old->_bits != new._bits && 3508 !vm_page_pqstate_fcmpset(m, old, new)) 3509 return (false); 3510 vm_page_pqbatch_submit(m, new.queue); 3511 return (true); 3512 } 3513 3514 /* 3515 * A generic queue state update function. This handles more cases than the 3516 * specialized functions above. 3517 */ 3518 bool 3519 vm_page_pqstate_commit(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new) 3520 { 3521 3522 if (old->_bits == new._bits) 3523 return (true); 3524 3525 if (old->queue != PQ_NONE && new.queue != old->queue) { 3526 if (!vm_page_pqstate_commit_dequeue(m, old, new)) 3527 return (false); 3528 if (new.queue != PQ_NONE) 3529 vm_page_pqbatch_submit(m, new.queue); 3530 } else { 3531 if (!vm_page_pqstate_fcmpset(m, old, new)) 3532 return (false); 3533 if (new.queue != PQ_NONE && 3534 ((new.flags & ~old->flags) & PGA_QUEUE_OP_MASK) != 0) 3535 vm_page_pqbatch_submit(m, new.queue); 3536 } 3537 return (true); 3538 } 3539 3540 /* 3541 * Apply deferred queue state updates to a page. 3542 */ 3543 static inline void 3544 vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_page_t m, uint8_t queue) 3545 { 3546 vm_page_astate_t new, old; 3547 3548 CRITICAL_ASSERT(curthread); 3549 vm_pagequeue_assert_locked(pq); 3550 KASSERT(queue < PQ_COUNT, 3551 ("%s: invalid queue index %d", __func__, queue)); 3552 KASSERT(pq == _vm_page_pagequeue(m, queue), 3553 ("%s: page %p does not belong to queue %p", __func__, m, pq)); 3554 3555 for (old = vm_page_astate_load(m);;) { 3556 if (__predict_false(old.queue != queue || 3557 (old.flags & PGA_QUEUE_OP_MASK) == 0)) { 3558 counter_u64_add(queue_nops, 1); 3559 break; 3560 } 3561 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3562 ("%s: page %p is unmanaged", __func__, m)); 3563 3564 new = old; 3565 if ((old.flags & PGA_DEQUEUE) != 0) { 3566 new.flags &= ~PGA_QUEUE_OP_MASK; 3567 new.queue = PQ_NONE; 3568 if (__predict_true(_vm_page_pqstate_commit_dequeue(pq, 3569 m, &old, new))) { 3570 counter_u64_add(queue_ops, 1); 3571 break; 3572 } 3573 } else { 3574 new.flags &= ~(PGA_REQUEUE | PGA_REQUEUE_HEAD); 3575 if (__predict_true(_vm_page_pqstate_commit_requeue(pq, 3576 m, &old, new))) { 3577 counter_u64_add(queue_ops, 1); 3578 break; 3579 } 3580 } 3581 } 3582 } 3583 3584 static void 3585 vm_pqbatch_process(struct vm_pagequeue *pq, struct vm_batchqueue *bq, 3586 uint8_t queue) 3587 { 3588 int i; 3589 3590 for (i = 0; i < bq->bq_cnt; i++) 3591 vm_pqbatch_process_page(pq, bq->bq_pa[i], queue); 3592 vm_batchqueue_init(bq); 3593 } 3594 3595 /* 3596 * vm_page_pqbatch_submit: [ internal use only ] 3597 * 3598 * Enqueue a page in the specified page queue's batched work queue. 3599 * The caller must have encoded the requested operation in the page 3600 * structure's a.flags field. 3601 */ 3602 void 3603 vm_page_pqbatch_submit(vm_page_t m, uint8_t queue) 3604 { 3605 struct vm_batchqueue *bq; 3606 struct vm_pagequeue *pq; 3607 int domain; 3608 3609 KASSERT(queue < PQ_COUNT, ("invalid queue %d", queue)); 3610 3611 domain = vm_page_domain(m); 3612 critical_enter(); 3613 bq = DPCPU_PTR(pqbatch[domain][queue]); 3614 if (vm_batchqueue_insert(bq, m)) { 3615 critical_exit(); 3616 return; 3617 } 3618 critical_exit(); 3619 3620 pq = &VM_DOMAIN(domain)->vmd_pagequeues[queue]; 3621 vm_pagequeue_lock(pq); 3622 critical_enter(); 3623 bq = DPCPU_PTR(pqbatch[domain][queue]); 3624 vm_pqbatch_process(pq, bq, queue); 3625 vm_pqbatch_process_page(pq, m, queue); 3626 vm_pagequeue_unlock(pq); 3627 critical_exit(); 3628 } 3629 3630 /* 3631 * vm_page_pqbatch_drain: [ internal use only ] 3632 * 3633 * Force all per-CPU page queue batch queues to be drained. This is 3634 * intended for use in severe memory shortages, to ensure that pages 3635 * do not remain stuck in the batch queues. 3636 */ 3637 void 3638 vm_page_pqbatch_drain(void) 3639 { 3640 struct thread *td; 3641 struct vm_domain *vmd; 3642 struct vm_pagequeue *pq; 3643 int cpu, domain, queue; 3644 3645 td = curthread; 3646 CPU_FOREACH(cpu) { 3647 thread_lock(td); 3648 sched_bind(td, cpu); 3649 thread_unlock(td); 3650 3651 for (domain = 0; domain < vm_ndomains; domain++) { 3652 vmd = VM_DOMAIN(domain); 3653 for (queue = 0; queue < PQ_COUNT; queue++) { 3654 pq = &vmd->vmd_pagequeues[queue]; 3655 vm_pagequeue_lock(pq); 3656 critical_enter(); 3657 vm_pqbatch_process(pq, 3658 DPCPU_PTR(pqbatch[domain][queue]), queue); 3659 critical_exit(); 3660 vm_pagequeue_unlock(pq); 3661 } 3662 } 3663 } 3664 thread_lock(td); 3665 sched_unbind(td); 3666 thread_unlock(td); 3667 } 3668 3669 /* 3670 * vm_page_dequeue_deferred: [ internal use only ] 3671 * 3672 * Request removal of the given page from its current page 3673 * queue. Physical removal from the queue may be deferred 3674 * indefinitely. 3675 */ 3676 void 3677 vm_page_dequeue_deferred(vm_page_t m) 3678 { 3679 vm_page_astate_t new, old; 3680 3681 old = vm_page_astate_load(m); 3682 do { 3683 if (old.queue == PQ_NONE) { 3684 KASSERT((old.flags & PGA_QUEUE_STATE_MASK) == 0, 3685 ("%s: page %p has unexpected queue state", 3686 __func__, m)); 3687 break; 3688 } 3689 new = old; 3690 new.flags |= PGA_DEQUEUE; 3691 } while (!vm_page_pqstate_commit_request(m, &old, new)); 3692 } 3693 3694 /* 3695 * vm_page_dequeue: 3696 * 3697 * Remove the page from whichever page queue it's in, if any, before 3698 * returning. 3699 */ 3700 void 3701 vm_page_dequeue(vm_page_t m) 3702 { 3703 vm_page_astate_t new, old; 3704 3705 old = vm_page_astate_load(m); 3706 do { 3707 if (old.queue == PQ_NONE) { 3708 KASSERT((old.flags & PGA_QUEUE_STATE_MASK) == 0, 3709 ("%s: page %p has unexpected queue state", 3710 __func__, m)); 3711 break; 3712 } 3713 new = old; 3714 new.flags &= ~PGA_QUEUE_OP_MASK; 3715 new.queue = PQ_NONE; 3716 } while (!vm_page_pqstate_commit_dequeue(m, &old, new)); 3717 3718 } 3719 3720 /* 3721 * Schedule the given page for insertion into the specified page queue. 3722 * Physical insertion of the page may be deferred indefinitely. 3723 */ 3724 static void 3725 vm_page_enqueue(vm_page_t m, uint8_t queue) 3726 { 3727 3728 KASSERT(m->a.queue == PQ_NONE && 3729 (m->a.flags & PGA_QUEUE_STATE_MASK) == 0, 3730 ("%s: page %p is already enqueued", __func__, m)); 3731 KASSERT(m->ref_count > 0, 3732 ("%s: page %p does not carry any references", __func__, m)); 3733 3734 m->a.queue = queue; 3735 if ((m->a.flags & PGA_REQUEUE) == 0) 3736 vm_page_aflag_set(m, PGA_REQUEUE); 3737 vm_page_pqbatch_submit(m, queue); 3738 } 3739 3740 /* 3741 * vm_page_free_prep: 3742 * 3743 * Prepares the given page to be put on the free list, 3744 * disassociating it from any VM object. The caller may return 3745 * the page to the free list only if this function returns true. 3746 * 3747 * The object, if it exists, must be locked, and then the page must 3748 * be xbusy. Otherwise the page must be not busied. A managed 3749 * page must be unmapped. 3750 */ 3751 static bool 3752 vm_page_free_prep(vm_page_t m) 3753 { 3754 3755 /* 3756 * Synchronize with threads that have dropped a reference to this 3757 * page. 3758 */ 3759 atomic_thread_fence_acq(); 3760 3761 #if defined(DIAGNOSTIC) && defined(PHYS_TO_DMAP) 3762 if (PMAP_HAS_DMAP && (m->flags & PG_ZERO) != 0) { 3763 uint64_t *p; 3764 int i; 3765 p = (uint64_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 3766 for (i = 0; i < PAGE_SIZE / sizeof(uint64_t); i++, p++) 3767 KASSERT(*p == 0, ("vm_page_free_prep %p PG_ZERO %d %jx", 3768 m, i, (uintmax_t)*p)); 3769 } 3770 #endif 3771 if ((m->oflags & VPO_UNMANAGED) == 0) { 3772 KASSERT(!pmap_page_is_mapped(m), 3773 ("vm_page_free_prep: freeing mapped page %p", m)); 3774 KASSERT((m->a.flags & (PGA_EXECUTABLE | PGA_WRITEABLE)) == 0, 3775 ("vm_page_free_prep: mapping flags set in page %p", m)); 3776 } else { 3777 KASSERT(m->a.queue == PQ_NONE, 3778 ("vm_page_free_prep: unmanaged page %p is queued", m)); 3779 } 3780 VM_CNT_INC(v_tfree); 3781 3782 if (m->object != NULL) { 3783 KASSERT(((m->oflags & VPO_UNMANAGED) != 0) == 3784 ((m->object->flags & OBJ_UNMANAGED) != 0), 3785 ("vm_page_free_prep: managed flag mismatch for page %p", 3786 m)); 3787 vm_page_assert_xbusied(m); 3788 3789 /* 3790 * The object reference can be released without an atomic 3791 * operation. 3792 */ 3793 KASSERT((m->flags & PG_FICTITIOUS) != 0 || 3794 m->ref_count == VPRC_OBJREF, 3795 ("vm_page_free_prep: page %p has unexpected ref_count %u", 3796 m, m->ref_count)); 3797 vm_page_object_remove(m); 3798 m->ref_count -= VPRC_OBJREF; 3799 } else 3800 vm_page_assert_unbusied(m); 3801 3802 vm_page_busy_free(m); 3803 3804 /* 3805 * If fictitious remove object association and 3806 * return. 3807 */ 3808 if ((m->flags & PG_FICTITIOUS) != 0) { 3809 KASSERT(m->ref_count == 1, 3810 ("fictitious page %p is referenced", m)); 3811 KASSERT(m->a.queue == PQ_NONE, 3812 ("fictitious page %p is queued", m)); 3813 return (false); 3814 } 3815 3816 /* 3817 * Pages need not be dequeued before they are returned to the physical 3818 * memory allocator, but they must at least be marked for a deferred 3819 * dequeue. 3820 */ 3821 if ((m->oflags & VPO_UNMANAGED) == 0) 3822 vm_page_dequeue_deferred(m); 3823 3824 m->valid = 0; 3825 vm_page_undirty(m); 3826 3827 if (m->ref_count != 0) 3828 panic("vm_page_free_prep: page %p has references", m); 3829 3830 /* 3831 * Restore the default memory attribute to the page. 3832 */ 3833 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) 3834 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); 3835 3836 #if VM_NRESERVLEVEL > 0 3837 /* 3838 * Determine whether the page belongs to a reservation. If the page was 3839 * allocated from a per-CPU cache, it cannot belong to a reservation, so 3840 * as an optimization, we avoid the check in that case. 3841 */ 3842 if ((m->flags & PG_PCPU_CACHE) == 0 && vm_reserv_free_page(m)) 3843 return (false); 3844 #endif 3845 3846 return (true); 3847 } 3848 3849 /* 3850 * vm_page_free_toq: 3851 * 3852 * Returns the given page to the free list, disassociating it 3853 * from any VM object. 3854 * 3855 * The object must be locked. The page must be exclusively busied if it 3856 * belongs to an object. 3857 */ 3858 static void 3859 vm_page_free_toq(vm_page_t m) 3860 { 3861 struct vm_domain *vmd; 3862 uma_zone_t zone; 3863 3864 if (!vm_page_free_prep(m)) 3865 return; 3866 3867 vmd = vm_pagequeue_domain(m); 3868 zone = vmd->vmd_pgcache[m->pool].zone; 3869 if ((m->flags & PG_PCPU_CACHE) != 0 && zone != NULL) { 3870 uma_zfree(zone, m); 3871 return; 3872 } 3873 vm_domain_free_lock(vmd); 3874 vm_phys_free_pages(m, 0); 3875 vm_domain_free_unlock(vmd); 3876 vm_domain_freecnt_inc(vmd, 1); 3877 } 3878 3879 /* 3880 * vm_page_free_pages_toq: 3881 * 3882 * Returns a list of pages to the free list, disassociating it 3883 * from any VM object. In other words, this is equivalent to 3884 * calling vm_page_free_toq() for each page of a list of VM objects. 3885 */ 3886 void 3887 vm_page_free_pages_toq(struct spglist *free, bool update_wire_count) 3888 { 3889 vm_page_t m; 3890 int count; 3891 3892 if (SLIST_EMPTY(free)) 3893 return; 3894 3895 count = 0; 3896 while ((m = SLIST_FIRST(free)) != NULL) { 3897 count++; 3898 SLIST_REMOVE_HEAD(free, plinks.s.ss); 3899 vm_page_free_toq(m); 3900 } 3901 3902 if (update_wire_count) 3903 vm_wire_sub(count); 3904 } 3905 3906 /* 3907 * Mark this page as wired down. For managed pages, this prevents reclamation 3908 * by the page daemon, or when the containing object, if any, is destroyed. 3909 */ 3910 void 3911 vm_page_wire(vm_page_t m) 3912 { 3913 u_int old; 3914 3915 #ifdef INVARIANTS 3916 if (m->object != NULL && !vm_page_busied(m) && 3917 !vm_object_busied(m->object)) 3918 VM_OBJECT_ASSERT_LOCKED(m->object); 3919 #endif 3920 KASSERT((m->flags & PG_FICTITIOUS) == 0 || 3921 VPRC_WIRE_COUNT(m->ref_count) >= 1, 3922 ("vm_page_wire: fictitious page %p has zero wirings", m)); 3923 3924 old = atomic_fetchadd_int(&m->ref_count, 1); 3925 KASSERT(VPRC_WIRE_COUNT(old) != VPRC_WIRE_COUNT_MAX, 3926 ("vm_page_wire: counter overflow for page %p", m)); 3927 if (VPRC_WIRE_COUNT(old) == 0) { 3928 if ((m->oflags & VPO_UNMANAGED) == 0) 3929 vm_page_aflag_set(m, PGA_DEQUEUE); 3930 vm_wire_add(1); 3931 } 3932 } 3933 3934 /* 3935 * Attempt to wire a mapped page following a pmap lookup of that page. 3936 * This may fail if a thread is concurrently tearing down mappings of the page. 3937 * The transient failure is acceptable because it translates to the 3938 * failure of the caller pmap_extract_and_hold(), which should be then 3939 * followed by the vm_fault() fallback, see e.g. vm_fault_quick_hold_pages(). 3940 */ 3941 bool 3942 vm_page_wire_mapped(vm_page_t m) 3943 { 3944 u_int old; 3945 3946 old = m->ref_count; 3947 do { 3948 KASSERT(old > 0, 3949 ("vm_page_wire_mapped: wiring unreferenced page %p", m)); 3950 if ((old & VPRC_BLOCKED) != 0) 3951 return (false); 3952 } while (!atomic_fcmpset_int(&m->ref_count, &old, old + 1)); 3953 3954 if (VPRC_WIRE_COUNT(old) == 0) { 3955 if ((m->oflags & VPO_UNMANAGED) == 0) 3956 vm_page_aflag_set(m, PGA_DEQUEUE); 3957 vm_wire_add(1); 3958 } 3959 return (true); 3960 } 3961 3962 /* 3963 * Release a wiring reference to a managed page. If the page still belongs to 3964 * an object, update its position in the page queues to reflect the reference. 3965 * If the wiring was the last reference to the page, free the page. 3966 */ 3967 static void 3968 vm_page_unwire_managed(vm_page_t m, uint8_t nqueue, bool noreuse) 3969 { 3970 u_int old; 3971 3972 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3973 ("%s: page %p is unmanaged", __func__, m)); 3974 3975 /* 3976 * Update LRU state before releasing the wiring reference. 3977 * Use a release store when updating the reference count to 3978 * synchronize with vm_page_free_prep(). 3979 */ 3980 old = m->ref_count; 3981 do { 3982 KASSERT(VPRC_WIRE_COUNT(old) > 0, 3983 ("vm_page_unwire: wire count underflow for page %p", m)); 3984 3985 if (old > VPRC_OBJREF + 1) { 3986 /* 3987 * The page has at least one other wiring reference. An 3988 * earlier iteration of this loop may have called 3989 * vm_page_release_toq() and cleared PGA_DEQUEUE, so 3990 * re-set it if necessary. 3991 */ 3992 if ((vm_page_astate_load(m).flags & PGA_DEQUEUE) == 0) 3993 vm_page_aflag_set(m, PGA_DEQUEUE); 3994 } else if (old == VPRC_OBJREF + 1) { 3995 /* 3996 * This is the last wiring. Clear PGA_DEQUEUE and 3997 * update the page's queue state to reflect the 3998 * reference. If the page does not belong to an object 3999 * (i.e., the VPRC_OBJREF bit is clear), we only need to 4000 * clear leftover queue state. 4001 */ 4002 vm_page_release_toq(m, nqueue, noreuse); 4003 } else if (old == 1) { 4004 vm_page_aflag_clear(m, PGA_DEQUEUE); 4005 } 4006 } while (!atomic_fcmpset_rel_int(&m->ref_count, &old, old - 1)); 4007 4008 if (VPRC_WIRE_COUNT(old) == 1) { 4009 vm_wire_sub(1); 4010 if (old == 1) 4011 vm_page_free(m); 4012 } 4013 } 4014 4015 /* 4016 * Release one wiring of the specified page, potentially allowing it to be 4017 * paged out. 4018 * 4019 * Only managed pages belonging to an object can be paged out. If the number 4020 * of wirings transitions to zero and the page is eligible for page out, then 4021 * the page is added to the specified paging queue. If the released wiring 4022 * represented the last reference to the page, the page is freed. 4023 */ 4024 void 4025 vm_page_unwire(vm_page_t m, uint8_t nqueue) 4026 { 4027 4028 KASSERT(nqueue < PQ_COUNT, 4029 ("vm_page_unwire: invalid queue %u request for page %p", 4030 nqueue, m)); 4031 4032 if ((m->oflags & VPO_UNMANAGED) != 0) { 4033 if (vm_page_unwire_noq(m) && m->ref_count == 0) 4034 vm_page_free(m); 4035 return; 4036 } 4037 vm_page_unwire_managed(m, nqueue, false); 4038 } 4039 4040 /* 4041 * Unwire a page without (re-)inserting it into a page queue. It is up 4042 * to the caller to enqueue, requeue, or free the page as appropriate. 4043 * In most cases involving managed pages, vm_page_unwire() should be used 4044 * instead. 4045 */ 4046 bool 4047 vm_page_unwire_noq(vm_page_t m) 4048 { 4049 u_int old; 4050 4051 old = vm_page_drop(m, 1); 4052 KASSERT(VPRC_WIRE_COUNT(old) != 0, 4053 ("vm_page_unref: counter underflow for page %p", m)); 4054 KASSERT((m->flags & PG_FICTITIOUS) == 0 || VPRC_WIRE_COUNT(old) > 1, 4055 ("vm_page_unref: missing ref on fictitious page %p", m)); 4056 4057 if (VPRC_WIRE_COUNT(old) > 1) 4058 return (false); 4059 if ((m->oflags & VPO_UNMANAGED) == 0) 4060 vm_page_aflag_clear(m, PGA_DEQUEUE); 4061 vm_wire_sub(1); 4062 return (true); 4063 } 4064 4065 /* 4066 * Ensure that the page ends up in the specified page queue. If the page is 4067 * active or being moved to the active queue, ensure that its act_count is 4068 * at least ACT_INIT but do not otherwise mess with it. 4069 */ 4070 static __always_inline void 4071 vm_page_mvqueue(vm_page_t m, const uint8_t nqueue, const uint16_t nflag) 4072 { 4073 vm_page_astate_t old, new; 4074 4075 KASSERT(m->ref_count > 0, 4076 ("%s: page %p does not carry any references", __func__, m)); 4077 KASSERT(nflag == PGA_REQUEUE || nflag == PGA_REQUEUE_HEAD, 4078 ("%s: invalid flags %x", __func__, nflag)); 4079 4080 if ((m->oflags & VPO_UNMANAGED) != 0 || vm_page_wired(m)) 4081 return; 4082 4083 old = vm_page_astate_load(m); 4084 do { 4085 if ((old.flags & PGA_DEQUEUE) != 0) 4086 break; 4087 new = old; 4088 new.flags &= ~PGA_QUEUE_OP_MASK; 4089 if (nqueue == PQ_ACTIVE) 4090 new.act_count = max(old.act_count, ACT_INIT); 4091 if (old.queue == nqueue) { 4092 if (nqueue != PQ_ACTIVE) 4093 new.flags |= nflag; 4094 } else { 4095 new.flags |= nflag; 4096 new.queue = nqueue; 4097 } 4098 } while (!vm_page_pqstate_commit(m, &old, new)); 4099 } 4100 4101 /* 4102 * Put the specified page on the active list (if appropriate). 4103 */ 4104 void 4105 vm_page_activate(vm_page_t m) 4106 { 4107 4108 vm_page_mvqueue(m, PQ_ACTIVE, PGA_REQUEUE); 4109 } 4110 4111 /* 4112 * Move the specified page to the tail of the inactive queue, or requeue 4113 * the page if it is already in the inactive queue. 4114 */ 4115 void 4116 vm_page_deactivate(vm_page_t m) 4117 { 4118 4119 vm_page_mvqueue(m, PQ_INACTIVE, PGA_REQUEUE); 4120 } 4121 4122 void 4123 vm_page_deactivate_noreuse(vm_page_t m) 4124 { 4125 4126 vm_page_mvqueue(m, PQ_INACTIVE, PGA_REQUEUE_HEAD); 4127 } 4128 4129 /* 4130 * Put a page in the laundry, or requeue it if it is already there. 4131 */ 4132 void 4133 vm_page_launder(vm_page_t m) 4134 { 4135 4136 vm_page_mvqueue(m, PQ_LAUNDRY, PGA_REQUEUE); 4137 } 4138 4139 /* 4140 * Put a page in the PQ_UNSWAPPABLE holding queue. 4141 */ 4142 void 4143 vm_page_unswappable(vm_page_t m) 4144 { 4145 4146 KASSERT(!vm_page_wired(m) && (m->oflags & VPO_UNMANAGED) == 0, 4147 ("page %p already unswappable", m)); 4148 4149 vm_page_dequeue(m); 4150 vm_page_enqueue(m, PQ_UNSWAPPABLE); 4151 } 4152 4153 /* 4154 * Release a page back to the page queues in preparation for unwiring. 4155 */ 4156 static void 4157 vm_page_release_toq(vm_page_t m, uint8_t nqueue, const bool noreuse) 4158 { 4159 vm_page_astate_t old, new; 4160 uint16_t nflag; 4161 4162 /* 4163 * Use a check of the valid bits to determine whether we should 4164 * accelerate reclamation of the page. The object lock might not be 4165 * held here, in which case the check is racy. At worst we will either 4166 * accelerate reclamation of a valid page and violate LRU, or 4167 * unnecessarily defer reclamation of an invalid page. 4168 * 4169 * If we were asked to not cache the page, place it near the head of the 4170 * inactive queue so that is reclaimed sooner. 4171 */ 4172 if (noreuse || m->valid == 0) { 4173 nqueue = PQ_INACTIVE; 4174 nflag = PGA_REQUEUE_HEAD; 4175 } else { 4176 nflag = PGA_REQUEUE; 4177 } 4178 4179 old = vm_page_astate_load(m); 4180 do { 4181 new = old; 4182 4183 /* 4184 * If the page is already in the active queue and we are not 4185 * trying to accelerate reclamation, simply mark it as 4186 * referenced and avoid any queue operations. 4187 */ 4188 new.flags &= ~PGA_QUEUE_OP_MASK; 4189 if (nflag != PGA_REQUEUE_HEAD && old.queue == PQ_ACTIVE) 4190 new.flags |= PGA_REFERENCED; 4191 else { 4192 new.flags |= nflag; 4193 new.queue = nqueue; 4194 } 4195 } while (!vm_page_pqstate_commit(m, &old, new)); 4196 } 4197 4198 /* 4199 * Unwire a page and either attempt to free it or re-add it to the page queues. 4200 */ 4201 void 4202 vm_page_release(vm_page_t m, int flags) 4203 { 4204 vm_object_t object; 4205 4206 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4207 ("vm_page_release: page %p is unmanaged", m)); 4208 4209 if ((flags & VPR_TRYFREE) != 0) { 4210 for (;;) { 4211 object = atomic_load_ptr(&m->object); 4212 if (object == NULL) 4213 break; 4214 /* Depends on type-stability. */ 4215 if (vm_page_busied(m) || !VM_OBJECT_TRYWLOCK(object)) 4216 break; 4217 if (object == m->object) { 4218 vm_page_release_locked(m, flags); 4219 VM_OBJECT_WUNLOCK(object); 4220 return; 4221 } 4222 VM_OBJECT_WUNLOCK(object); 4223 } 4224 } 4225 vm_page_unwire_managed(m, PQ_INACTIVE, flags != 0); 4226 } 4227 4228 /* See vm_page_release(). */ 4229 void 4230 vm_page_release_locked(vm_page_t m, int flags) 4231 { 4232 4233 VM_OBJECT_ASSERT_WLOCKED(m->object); 4234 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4235 ("vm_page_release_locked: page %p is unmanaged", m)); 4236 4237 if (vm_page_unwire_noq(m)) { 4238 if ((flags & VPR_TRYFREE) != 0 && 4239 (m->object->ref_count == 0 || !pmap_page_is_mapped(m)) && 4240 m->dirty == 0 && vm_page_tryxbusy(m)) { 4241 /* 4242 * An unlocked lookup may have wired the page before the 4243 * busy lock was acquired, in which case the page must 4244 * not be freed. 4245 */ 4246 if (__predict_true(!vm_page_wired(m))) { 4247 vm_page_free(m); 4248 return; 4249 } 4250 vm_page_xunbusy(m); 4251 } else { 4252 vm_page_release_toq(m, PQ_INACTIVE, flags != 0); 4253 } 4254 } 4255 } 4256 4257 static bool 4258 vm_page_try_blocked_op(vm_page_t m, void (*op)(vm_page_t)) 4259 { 4260 u_int old; 4261 4262 KASSERT(m->object != NULL && (m->oflags & VPO_UNMANAGED) == 0, 4263 ("vm_page_try_blocked_op: page %p has no object", m)); 4264 KASSERT(vm_page_busied(m), 4265 ("vm_page_try_blocked_op: page %p is not busy", m)); 4266 VM_OBJECT_ASSERT_LOCKED(m->object); 4267 4268 old = m->ref_count; 4269 do { 4270 KASSERT(old != 0, 4271 ("vm_page_try_blocked_op: page %p has no references", m)); 4272 if (VPRC_WIRE_COUNT(old) != 0) 4273 return (false); 4274 } while (!atomic_fcmpset_int(&m->ref_count, &old, old | VPRC_BLOCKED)); 4275 4276 (op)(m); 4277 4278 /* 4279 * If the object is read-locked, new wirings may be created via an 4280 * object lookup. 4281 */ 4282 old = vm_page_drop(m, VPRC_BLOCKED); 4283 KASSERT(!VM_OBJECT_WOWNED(m->object) || 4284 old == (VPRC_BLOCKED | VPRC_OBJREF), 4285 ("vm_page_try_blocked_op: unexpected refcount value %u for %p", 4286 old, m)); 4287 return (true); 4288 } 4289 4290 /* 4291 * Atomically check for wirings and remove all mappings of the page. 4292 */ 4293 bool 4294 vm_page_try_remove_all(vm_page_t m) 4295 { 4296 4297 return (vm_page_try_blocked_op(m, pmap_remove_all)); 4298 } 4299 4300 /* 4301 * Atomically check for wirings and remove all writeable mappings of the page. 4302 */ 4303 bool 4304 vm_page_try_remove_write(vm_page_t m) 4305 { 4306 4307 return (vm_page_try_blocked_op(m, pmap_remove_write)); 4308 } 4309 4310 /* 4311 * vm_page_advise 4312 * 4313 * Apply the specified advice to the given page. 4314 */ 4315 void 4316 vm_page_advise(vm_page_t m, int advice) 4317 { 4318 4319 VM_OBJECT_ASSERT_WLOCKED(m->object); 4320 vm_page_assert_xbusied(m); 4321 4322 if (advice == MADV_FREE) 4323 /* 4324 * Mark the page clean. This will allow the page to be freed 4325 * without first paging it out. MADV_FREE pages are often 4326 * quickly reused by malloc(3), so we do not do anything that 4327 * would result in a page fault on a later access. 4328 */ 4329 vm_page_undirty(m); 4330 else if (advice != MADV_DONTNEED) { 4331 if (advice == MADV_WILLNEED) 4332 vm_page_activate(m); 4333 return; 4334 } 4335 4336 if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m)) 4337 vm_page_dirty(m); 4338 4339 /* 4340 * Clear any references to the page. Otherwise, the page daemon will 4341 * immediately reactivate the page. 4342 */ 4343 vm_page_aflag_clear(m, PGA_REFERENCED); 4344 4345 /* 4346 * Place clean pages near the head of the inactive queue rather than 4347 * the tail, thus defeating the queue's LRU operation and ensuring that 4348 * the page will be reused quickly. Dirty pages not already in the 4349 * laundry are moved there. 4350 */ 4351 if (m->dirty == 0) 4352 vm_page_deactivate_noreuse(m); 4353 else if (!vm_page_in_laundry(m)) 4354 vm_page_launder(m); 4355 } 4356 4357 /* 4358 * vm_page_grab_release 4359 * 4360 * Helper routine for grab functions to release busy on return. 4361 */ 4362 static inline void 4363 vm_page_grab_release(vm_page_t m, int allocflags) 4364 { 4365 4366 if ((allocflags & VM_ALLOC_NOBUSY) != 0) { 4367 if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0) 4368 vm_page_sunbusy(m); 4369 else 4370 vm_page_xunbusy(m); 4371 } 4372 } 4373 4374 /* 4375 * vm_page_grab_sleep 4376 * 4377 * Sleep for busy according to VM_ALLOC_ parameters. Returns true 4378 * if the caller should retry and false otherwise. 4379 * 4380 * If the object is locked on entry the object will be unlocked with 4381 * false returns and still locked but possibly having been dropped 4382 * with true returns. 4383 */ 4384 static bool 4385 vm_page_grab_sleep(vm_object_t object, vm_page_t m, vm_pindex_t pindex, 4386 const char *wmesg, int allocflags, bool locked) 4387 { 4388 4389 if ((allocflags & VM_ALLOC_NOWAIT) != 0) 4390 return (false); 4391 4392 /* 4393 * Reference the page before unlocking and sleeping so that 4394 * the page daemon is less likely to reclaim it. 4395 */ 4396 if (locked && (allocflags & VM_ALLOC_NOCREAT) == 0) 4397 vm_page_reference(m); 4398 4399 if (_vm_page_busy_sleep(object, m, pindex, wmesg, allocflags, locked) && 4400 locked) 4401 VM_OBJECT_WLOCK(object); 4402 if ((allocflags & VM_ALLOC_WAITFAIL) != 0) 4403 return (false); 4404 4405 return (true); 4406 } 4407 4408 /* 4409 * Assert that the grab flags are valid. 4410 */ 4411 static inline void 4412 vm_page_grab_check(int allocflags) 4413 { 4414 4415 KASSERT((allocflags & VM_ALLOC_NOBUSY) == 0 || 4416 (allocflags & VM_ALLOC_WIRED) != 0, 4417 ("vm_page_grab*: the pages must be busied or wired")); 4418 4419 KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || 4420 (allocflags & VM_ALLOC_IGN_SBUSY) != 0, 4421 ("vm_page_grab*: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch")); 4422 } 4423 4424 /* 4425 * Calculate the page allocation flags for grab. 4426 */ 4427 static inline int 4428 vm_page_grab_pflags(int allocflags) 4429 { 4430 int pflags; 4431 4432 pflags = allocflags & 4433 ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL | 4434 VM_ALLOC_NOBUSY); 4435 if ((allocflags & VM_ALLOC_NOWAIT) == 0) 4436 pflags |= VM_ALLOC_WAITFAIL; 4437 if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0) 4438 pflags |= VM_ALLOC_SBUSY; 4439 4440 return (pflags); 4441 } 4442 4443 /* 4444 * Grab a page, waiting until we are waken up due to the page 4445 * changing state. We keep on waiting, if the page continues 4446 * to be in the object. If the page doesn't exist, first allocate it 4447 * and then conditionally zero it. 4448 * 4449 * This routine may sleep. 4450 * 4451 * The object must be locked on entry. The lock will, however, be released 4452 * and reacquired if the routine sleeps. 4453 */ 4454 vm_page_t 4455 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) 4456 { 4457 vm_page_t m; 4458 4459 VM_OBJECT_ASSERT_WLOCKED(object); 4460 vm_page_grab_check(allocflags); 4461 4462 retrylookup: 4463 if ((m = vm_page_lookup(object, pindex)) != NULL) { 4464 if (!vm_page_tryacquire(m, allocflags)) { 4465 if (vm_page_grab_sleep(object, m, pindex, "pgrbwt", 4466 allocflags, true)) 4467 goto retrylookup; 4468 return (NULL); 4469 } 4470 goto out; 4471 } 4472 if ((allocflags & VM_ALLOC_NOCREAT) != 0) 4473 return (NULL); 4474 m = vm_page_alloc(object, pindex, vm_page_grab_pflags(allocflags)); 4475 if (m == NULL) { 4476 if ((allocflags & (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL)) != 0) 4477 return (NULL); 4478 goto retrylookup; 4479 } 4480 if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0) 4481 pmap_zero_page(m); 4482 4483 out: 4484 vm_page_grab_release(m, allocflags); 4485 4486 return (m); 4487 } 4488 4489 /* 4490 * Locklessly attempt to acquire a page given a (object, pindex) tuple 4491 * and an optional previous page to avoid the radix lookup. The resulting 4492 * page will be validated against the identity tuple and busied or wired 4493 * as requested. A NULL *mp return guarantees that the page was not in 4494 * radix at the time of the call but callers must perform higher level 4495 * synchronization or retry the operation under a lock if they require 4496 * an atomic answer. This is the only lock free validation routine, 4497 * other routines can depend on the resulting page state. 4498 * 4499 * The return value indicates whether the operation failed due to caller 4500 * flags. The return is tri-state with mp: 4501 * 4502 * (true, *mp != NULL) - The operation was successful. 4503 * (true, *mp == NULL) - The page was not found in tree. 4504 * (false, *mp == NULL) - WAITFAIL or NOWAIT prevented acquisition. 4505 */ 4506 static bool 4507 vm_page_acquire_unlocked(vm_object_t object, vm_pindex_t pindex, 4508 vm_page_t prev, vm_page_t *mp, int allocflags) 4509 { 4510 vm_page_t m; 4511 4512 vm_page_grab_check(allocflags); 4513 MPASS(prev == NULL || vm_page_busied(prev) || vm_page_wired(prev)); 4514 4515 *mp = NULL; 4516 for (;;) { 4517 /* 4518 * We may see a false NULL here because the previous page 4519 * has been removed or just inserted and the list is loaded 4520 * without barriers. Switch to radix to verify. 4521 */ 4522 if (prev == NULL || (m = TAILQ_NEXT(prev, listq)) == NULL || 4523 QMD_IS_TRASHED(m) || m->pindex != pindex || 4524 atomic_load_ptr(&m->object) != object) { 4525 prev = NULL; 4526 /* 4527 * This guarantees the result is instantaneously 4528 * correct. 4529 */ 4530 m = vm_radix_lookup_unlocked(&object->rtree, pindex); 4531 } 4532 if (m == NULL) 4533 return (true); 4534 if (vm_page_trybusy(m, allocflags)) { 4535 if (m->object == object && m->pindex == pindex) 4536 break; 4537 /* relookup. */ 4538 vm_page_busy_release(m); 4539 cpu_spinwait(); 4540 continue; 4541 } 4542 if (!vm_page_grab_sleep(object, m, pindex, "pgnslp", 4543 allocflags, false)) 4544 return (false); 4545 } 4546 if ((allocflags & VM_ALLOC_WIRED) != 0) 4547 vm_page_wire(m); 4548 vm_page_grab_release(m, allocflags); 4549 *mp = m; 4550 return (true); 4551 } 4552 4553 /* 4554 * Try to locklessly grab a page and fall back to the object lock if NOCREAT 4555 * is not set. 4556 */ 4557 vm_page_t 4558 vm_page_grab_unlocked(vm_object_t object, vm_pindex_t pindex, int allocflags) 4559 { 4560 vm_page_t m; 4561 4562 vm_page_grab_check(allocflags); 4563 4564 if (!vm_page_acquire_unlocked(object, pindex, NULL, &m, allocflags)) 4565 return (NULL); 4566 if (m != NULL) 4567 return (m); 4568 4569 /* 4570 * The radix lockless lookup should never return a false negative 4571 * errors. If the user specifies NOCREAT they are guaranteed there 4572 * was no page present at the instant of the call. A NOCREAT caller 4573 * must handle create races gracefully. 4574 */ 4575 if ((allocflags & VM_ALLOC_NOCREAT) != 0) 4576 return (NULL); 4577 4578 VM_OBJECT_WLOCK(object); 4579 m = vm_page_grab(object, pindex, allocflags); 4580 VM_OBJECT_WUNLOCK(object); 4581 4582 return (m); 4583 } 4584 4585 /* 4586 * Grab a page and make it valid, paging in if necessary. Pages missing from 4587 * their pager are zero filled and validated. If a VM_ALLOC_COUNT is supplied 4588 * and the page is not valid as many as VM_INITIAL_PAGEIN pages can be brought 4589 * in simultaneously. Additional pages will be left on a paging queue but 4590 * will neither be wired nor busy regardless of allocflags. 4591 */ 4592 int 4593 vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, int allocflags) 4594 { 4595 vm_page_t m; 4596 vm_page_t ma[VM_INITIAL_PAGEIN]; 4597 int after, i, pflags, rv; 4598 4599 KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || 4600 (allocflags & VM_ALLOC_IGN_SBUSY) != 0, 4601 ("vm_page_grab_valid: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch")); 4602 KASSERT((allocflags & 4603 (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL | VM_ALLOC_ZERO)) == 0, 4604 ("vm_page_grab_valid: Invalid flags 0x%X", allocflags)); 4605 VM_OBJECT_ASSERT_WLOCKED(object); 4606 pflags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY | 4607 VM_ALLOC_WIRED); 4608 pflags |= VM_ALLOC_WAITFAIL; 4609 4610 retrylookup: 4611 if ((m = vm_page_lookup(object, pindex)) != NULL) { 4612 /* 4613 * If the page is fully valid it can only become invalid 4614 * with the object lock held. If it is not valid it can 4615 * become valid with the busy lock held. Therefore, we 4616 * may unnecessarily lock the exclusive busy here if we 4617 * race with I/O completion not using the object lock. 4618 * However, we will not end up with an invalid page and a 4619 * shared lock. 4620 */ 4621 if (!vm_page_trybusy(m, 4622 vm_page_all_valid(m) ? allocflags : 0)) { 4623 (void)vm_page_grab_sleep(object, m, pindex, "pgrbwt", 4624 allocflags, true); 4625 goto retrylookup; 4626 } 4627 if (vm_page_all_valid(m)) 4628 goto out; 4629 if ((allocflags & VM_ALLOC_NOCREAT) != 0) { 4630 vm_page_busy_release(m); 4631 *mp = NULL; 4632 return (VM_PAGER_FAIL); 4633 } 4634 } else if ((allocflags & VM_ALLOC_NOCREAT) != 0) { 4635 *mp = NULL; 4636 return (VM_PAGER_FAIL); 4637 } else if ((m = vm_page_alloc(object, pindex, pflags)) == NULL) { 4638 goto retrylookup; 4639 } 4640 4641 vm_page_assert_xbusied(m); 4642 if (vm_pager_has_page(object, pindex, NULL, &after)) { 4643 after = MIN(after, VM_INITIAL_PAGEIN); 4644 after = MIN(after, allocflags >> VM_ALLOC_COUNT_SHIFT); 4645 after = MAX(after, 1); 4646 ma[0] = m; 4647 for (i = 1; i < after; i++) { 4648 if ((ma[i] = vm_page_next(ma[i - 1])) != NULL) { 4649 if (ma[i]->valid || !vm_page_tryxbusy(ma[i])) 4650 break; 4651 } else { 4652 ma[i] = vm_page_alloc(object, m->pindex + i, 4653 VM_ALLOC_NORMAL); 4654 if (ma[i] == NULL) 4655 break; 4656 } 4657 } 4658 after = i; 4659 vm_object_pip_add(object, after); 4660 VM_OBJECT_WUNLOCK(object); 4661 rv = vm_pager_get_pages(object, ma, after, NULL, NULL); 4662 VM_OBJECT_WLOCK(object); 4663 vm_object_pip_wakeupn(object, after); 4664 /* Pager may have replaced a page. */ 4665 m = ma[0]; 4666 if (rv != VM_PAGER_OK) { 4667 for (i = 0; i < after; i++) { 4668 if (!vm_page_wired(ma[i])) 4669 vm_page_free(ma[i]); 4670 else 4671 vm_page_xunbusy(ma[i]); 4672 } 4673 *mp = NULL; 4674 return (rv); 4675 } 4676 for (i = 1; i < after; i++) 4677 vm_page_readahead_finish(ma[i]); 4678 MPASS(vm_page_all_valid(m)); 4679 } else { 4680 vm_page_zero_invalid(m, TRUE); 4681 } 4682 out: 4683 if ((allocflags & VM_ALLOC_WIRED) != 0) 4684 vm_page_wire(m); 4685 if ((allocflags & VM_ALLOC_SBUSY) != 0 && vm_page_xbusied(m)) 4686 vm_page_busy_downgrade(m); 4687 else if ((allocflags & VM_ALLOC_NOBUSY) != 0) 4688 vm_page_busy_release(m); 4689 *mp = m; 4690 return (VM_PAGER_OK); 4691 } 4692 4693 /* 4694 * Locklessly grab a valid page. If the page is not valid or not yet 4695 * allocated this will fall back to the object lock method. 4696 */ 4697 int 4698 vm_page_grab_valid_unlocked(vm_page_t *mp, vm_object_t object, 4699 vm_pindex_t pindex, int allocflags) 4700 { 4701 vm_page_t m; 4702 int flags; 4703 int error; 4704 4705 KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || 4706 (allocflags & VM_ALLOC_IGN_SBUSY) != 0, 4707 ("vm_page_grab_valid_unlocked: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY " 4708 "mismatch")); 4709 KASSERT((allocflags & 4710 (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL | VM_ALLOC_ZERO)) == 0, 4711 ("vm_page_grab_valid_unlocked: Invalid flags 0x%X", allocflags)); 4712 4713 /* 4714 * Attempt a lockless lookup and busy. We need at least an sbusy 4715 * before we can inspect the valid field and return a wired page. 4716 */ 4717 flags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_WIRED); 4718 if (!vm_page_acquire_unlocked(object, pindex, NULL, mp, flags)) 4719 return (VM_PAGER_FAIL); 4720 if ((m = *mp) != NULL) { 4721 if (vm_page_all_valid(m)) { 4722 if ((allocflags & VM_ALLOC_WIRED) != 0) 4723 vm_page_wire(m); 4724 vm_page_grab_release(m, allocflags); 4725 return (VM_PAGER_OK); 4726 } 4727 vm_page_busy_release(m); 4728 } 4729 if ((allocflags & VM_ALLOC_NOCREAT) != 0) { 4730 *mp = NULL; 4731 return (VM_PAGER_FAIL); 4732 } 4733 VM_OBJECT_WLOCK(object); 4734 error = vm_page_grab_valid(mp, object, pindex, allocflags); 4735 VM_OBJECT_WUNLOCK(object); 4736 4737 return (error); 4738 } 4739 4740 /* 4741 * Return the specified range of pages from the given object. For each 4742 * page offset within the range, if a page already exists within the object 4743 * at that offset and it is busy, then wait for it to change state. If, 4744 * instead, the page doesn't exist, then allocate it. 4745 * 4746 * The caller must always specify an allocation class. 4747 * 4748 * allocation classes: 4749 * VM_ALLOC_NORMAL normal process request 4750 * VM_ALLOC_SYSTEM system *really* needs the pages 4751 * 4752 * The caller must always specify that the pages are to be busied and/or 4753 * wired. 4754 * 4755 * optional allocation flags: 4756 * VM_ALLOC_IGN_SBUSY do not sleep on soft busy pages 4757 * VM_ALLOC_NOBUSY do not exclusive busy the page 4758 * VM_ALLOC_NOWAIT do not sleep 4759 * VM_ALLOC_SBUSY set page to sbusy state 4760 * VM_ALLOC_WIRED wire the pages 4761 * VM_ALLOC_ZERO zero and validate any invalid pages 4762 * 4763 * If VM_ALLOC_NOWAIT is not specified, this routine may sleep. Otherwise, it 4764 * may return a partial prefix of the requested range. 4765 */ 4766 int 4767 vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags, 4768 vm_page_t *ma, int count) 4769 { 4770 vm_page_t m, mpred; 4771 int pflags; 4772 int i; 4773 4774 VM_OBJECT_ASSERT_WLOCKED(object); 4775 KASSERT(((u_int)allocflags >> VM_ALLOC_COUNT_SHIFT) == 0, 4776 ("vm_page_grap_pages: VM_ALLOC_COUNT() is not allowed")); 4777 KASSERT(count > 0, 4778 ("vm_page_grab_pages: invalid page count %d", count)); 4779 vm_page_grab_check(allocflags); 4780 4781 pflags = vm_page_grab_pflags(allocflags); 4782 i = 0; 4783 retrylookup: 4784 m = vm_radix_lookup_le(&object->rtree, pindex + i); 4785 if (m == NULL || m->pindex != pindex + i) { 4786 mpred = m; 4787 m = NULL; 4788 } else 4789 mpred = TAILQ_PREV(m, pglist, listq); 4790 for (; i < count; i++) { 4791 if (m != NULL) { 4792 if (!vm_page_tryacquire(m, allocflags)) { 4793 if (vm_page_grab_sleep(object, m, pindex + i, 4794 "grbmaw", allocflags, true)) 4795 goto retrylookup; 4796 break; 4797 } 4798 } else { 4799 if ((allocflags & VM_ALLOC_NOCREAT) != 0) 4800 break; 4801 m = vm_page_alloc_after(object, pindex + i, 4802 pflags | VM_ALLOC_COUNT(count - i), mpred); 4803 if (m == NULL) { 4804 if ((allocflags & (VM_ALLOC_NOWAIT | 4805 VM_ALLOC_WAITFAIL)) != 0) 4806 break; 4807 goto retrylookup; 4808 } 4809 } 4810 if (vm_page_none_valid(m) && 4811 (allocflags & VM_ALLOC_ZERO) != 0) { 4812 if ((m->flags & PG_ZERO) == 0) 4813 pmap_zero_page(m); 4814 vm_page_valid(m); 4815 } 4816 vm_page_grab_release(m, allocflags); 4817 ma[i] = mpred = m; 4818 m = vm_page_next(m); 4819 } 4820 return (i); 4821 } 4822 4823 /* 4824 * Unlocked variant of vm_page_grab_pages(). This accepts the same flags 4825 * and will fall back to the locked variant to handle allocation. 4826 */ 4827 int 4828 vm_page_grab_pages_unlocked(vm_object_t object, vm_pindex_t pindex, 4829 int allocflags, vm_page_t *ma, int count) 4830 { 4831 vm_page_t m, pred; 4832 int flags; 4833 int i; 4834 4835 KASSERT(count > 0, 4836 ("vm_page_grab_pages_unlocked: invalid page count %d", count)); 4837 vm_page_grab_check(allocflags); 4838 4839 /* 4840 * Modify flags for lockless acquire to hold the page until we 4841 * set it valid if necessary. 4842 */ 4843 flags = allocflags & ~VM_ALLOC_NOBUSY; 4844 pred = NULL; 4845 for (i = 0; i < count; i++, pindex++) { 4846 if (!vm_page_acquire_unlocked(object, pindex, pred, &m, flags)) 4847 return (i); 4848 if (m == NULL) 4849 break; 4850 if ((flags & VM_ALLOC_ZERO) != 0 && vm_page_none_valid(m)) { 4851 if ((m->flags & PG_ZERO) == 0) 4852 pmap_zero_page(m); 4853 vm_page_valid(m); 4854 } 4855 /* m will still be wired or busy according to flags. */ 4856 vm_page_grab_release(m, allocflags); 4857 pred = ma[i] = m; 4858 } 4859 if (i == count || (allocflags & VM_ALLOC_NOCREAT) != 0) 4860 return (i); 4861 count -= i; 4862 VM_OBJECT_WLOCK(object); 4863 i += vm_page_grab_pages(object, pindex, allocflags, &ma[i], count); 4864 VM_OBJECT_WUNLOCK(object); 4865 4866 return (i); 4867 } 4868 4869 /* 4870 * Mapping function for valid or dirty bits in a page. 4871 * 4872 * Inputs are required to range within a page. 4873 */ 4874 vm_page_bits_t 4875 vm_page_bits(int base, int size) 4876 { 4877 int first_bit; 4878 int last_bit; 4879 4880 KASSERT( 4881 base + size <= PAGE_SIZE, 4882 ("vm_page_bits: illegal base/size %d/%d", base, size) 4883 ); 4884 4885 if (size == 0) /* handle degenerate case */ 4886 return (0); 4887 4888 first_bit = base >> DEV_BSHIFT; 4889 last_bit = (base + size - 1) >> DEV_BSHIFT; 4890 4891 return (((vm_page_bits_t)2 << last_bit) - 4892 ((vm_page_bits_t)1 << first_bit)); 4893 } 4894 4895 void 4896 vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set) 4897 { 4898 4899 #if PAGE_SIZE == 32768 4900 atomic_set_64((uint64_t *)bits, set); 4901 #elif PAGE_SIZE == 16384 4902 atomic_set_32((uint32_t *)bits, set); 4903 #elif (PAGE_SIZE == 8192) && defined(atomic_set_16) 4904 atomic_set_16((uint16_t *)bits, set); 4905 #elif (PAGE_SIZE == 4096) && defined(atomic_set_8) 4906 atomic_set_8((uint8_t *)bits, set); 4907 #else /* PAGE_SIZE <= 8192 */ 4908 uintptr_t addr; 4909 int shift; 4910 4911 addr = (uintptr_t)bits; 4912 /* 4913 * Use a trick to perform a 32-bit atomic on the 4914 * containing aligned word, to not depend on the existence 4915 * of atomic_{set, clear}_{8, 16}. 4916 */ 4917 shift = addr & (sizeof(uint32_t) - 1); 4918 #if BYTE_ORDER == BIG_ENDIAN 4919 shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY; 4920 #else 4921 shift *= NBBY; 4922 #endif 4923 addr &= ~(sizeof(uint32_t) - 1); 4924 atomic_set_32((uint32_t *)addr, set << shift); 4925 #endif /* PAGE_SIZE */ 4926 } 4927 4928 static inline void 4929 vm_page_bits_clear(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t clear) 4930 { 4931 4932 #if PAGE_SIZE == 32768 4933 atomic_clear_64((uint64_t *)bits, clear); 4934 #elif PAGE_SIZE == 16384 4935 atomic_clear_32((uint32_t *)bits, clear); 4936 #elif (PAGE_SIZE == 8192) && defined(atomic_clear_16) 4937 atomic_clear_16((uint16_t *)bits, clear); 4938 #elif (PAGE_SIZE == 4096) && defined(atomic_clear_8) 4939 atomic_clear_8((uint8_t *)bits, clear); 4940 #else /* PAGE_SIZE <= 8192 */ 4941 uintptr_t addr; 4942 int shift; 4943 4944 addr = (uintptr_t)bits; 4945 /* 4946 * Use a trick to perform a 32-bit atomic on the 4947 * containing aligned word, to not depend on the existence 4948 * of atomic_{set, clear}_{8, 16}. 4949 */ 4950 shift = addr & (sizeof(uint32_t) - 1); 4951 #if BYTE_ORDER == BIG_ENDIAN 4952 shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY; 4953 #else 4954 shift *= NBBY; 4955 #endif 4956 addr &= ~(sizeof(uint32_t) - 1); 4957 atomic_clear_32((uint32_t *)addr, clear << shift); 4958 #endif /* PAGE_SIZE */ 4959 } 4960 4961 static inline vm_page_bits_t 4962 vm_page_bits_swap(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t newbits) 4963 { 4964 #if PAGE_SIZE == 32768 4965 uint64_t old; 4966 4967 old = *bits; 4968 while (atomic_fcmpset_64(bits, &old, newbits) == 0); 4969 return (old); 4970 #elif PAGE_SIZE == 16384 4971 uint32_t old; 4972 4973 old = *bits; 4974 while (atomic_fcmpset_32(bits, &old, newbits) == 0); 4975 return (old); 4976 #elif (PAGE_SIZE == 8192) && defined(atomic_fcmpset_16) 4977 uint16_t old; 4978 4979 old = *bits; 4980 while (atomic_fcmpset_16(bits, &old, newbits) == 0); 4981 return (old); 4982 #elif (PAGE_SIZE == 4096) && defined(atomic_fcmpset_8) 4983 uint8_t old; 4984 4985 old = *bits; 4986 while (atomic_fcmpset_8(bits, &old, newbits) == 0); 4987 return (old); 4988 #else /* PAGE_SIZE <= 4096*/ 4989 uintptr_t addr; 4990 uint32_t old, new, mask; 4991 int shift; 4992 4993 addr = (uintptr_t)bits; 4994 /* 4995 * Use a trick to perform a 32-bit atomic on the 4996 * containing aligned word, to not depend on the existence 4997 * of atomic_{set, swap, clear}_{8, 16}. 4998 */ 4999 shift = addr & (sizeof(uint32_t) - 1); 5000 #if BYTE_ORDER == BIG_ENDIAN 5001 shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY; 5002 #else 5003 shift *= NBBY; 5004 #endif 5005 addr &= ~(sizeof(uint32_t) - 1); 5006 mask = VM_PAGE_BITS_ALL << shift; 5007 5008 old = *bits; 5009 do { 5010 new = old & ~mask; 5011 new |= newbits << shift; 5012 } while (atomic_fcmpset_32((uint32_t *)addr, &old, new) == 0); 5013 return (old >> shift); 5014 #endif /* PAGE_SIZE */ 5015 } 5016 5017 /* 5018 * vm_page_set_valid_range: 5019 * 5020 * Sets portions of a page valid. The arguments are expected 5021 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 5022 * of any partial chunks touched by the range. The invalid portion of 5023 * such chunks will be zeroed. 5024 * 5025 * (base + size) must be less then or equal to PAGE_SIZE. 5026 */ 5027 void 5028 vm_page_set_valid_range(vm_page_t m, int base, int size) 5029 { 5030 int endoff, frag; 5031 vm_page_bits_t pagebits; 5032 5033 vm_page_assert_busied(m); 5034 if (size == 0) /* handle degenerate case */ 5035 return; 5036 5037 /* 5038 * If the base is not DEV_BSIZE aligned and the valid 5039 * bit is clear, we have to zero out a portion of the 5040 * first block. 5041 */ 5042 if ((frag = rounddown2(base, DEV_BSIZE)) != base && 5043 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0) 5044 pmap_zero_page_area(m, frag, base - frag); 5045 5046 /* 5047 * If the ending offset is not DEV_BSIZE aligned and the 5048 * valid bit is clear, we have to zero out a portion of 5049 * the last block. 5050 */ 5051 endoff = base + size; 5052 if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff && 5053 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0) 5054 pmap_zero_page_area(m, endoff, 5055 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 5056 5057 /* 5058 * Assert that no previously invalid block that is now being validated 5059 * is already dirty. 5060 */ 5061 KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0, 5062 ("vm_page_set_valid_range: page %p is dirty", m)); 5063 5064 /* 5065 * Set valid bits inclusive of any overlap. 5066 */ 5067 pagebits = vm_page_bits(base, size); 5068 if (vm_page_xbusied(m)) 5069 m->valid |= pagebits; 5070 else 5071 vm_page_bits_set(m, &m->valid, pagebits); 5072 } 5073 5074 /* 5075 * Set the page dirty bits and free the invalid swap space if 5076 * present. Returns the previous dirty bits. 5077 */ 5078 vm_page_bits_t 5079 vm_page_set_dirty(vm_page_t m) 5080 { 5081 vm_page_bits_t old; 5082 5083 VM_PAGE_OBJECT_BUSY_ASSERT(m); 5084 5085 if (vm_page_xbusied(m) && !pmap_page_is_write_mapped(m)) { 5086 old = m->dirty; 5087 m->dirty = VM_PAGE_BITS_ALL; 5088 } else 5089 old = vm_page_bits_swap(m, &m->dirty, VM_PAGE_BITS_ALL); 5090 if (old == 0 && (m->a.flags & PGA_SWAP_SPACE) != 0) 5091 vm_pager_page_unswapped(m); 5092 5093 return (old); 5094 } 5095 5096 /* 5097 * Clear the given bits from the specified page's dirty field. 5098 */ 5099 static __inline void 5100 vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits) 5101 { 5102 5103 vm_page_assert_busied(m); 5104 5105 /* 5106 * If the page is xbusied and not write mapped we are the 5107 * only thread that can modify dirty bits. Otherwise, The pmap 5108 * layer can call vm_page_dirty() without holding a distinguished 5109 * lock. The combination of page busy and atomic operations 5110 * suffice to guarantee consistency of the page dirty field. 5111 */ 5112 if (vm_page_xbusied(m) && !pmap_page_is_write_mapped(m)) 5113 m->dirty &= ~pagebits; 5114 else 5115 vm_page_bits_clear(m, &m->dirty, pagebits); 5116 } 5117 5118 /* 5119 * vm_page_set_validclean: 5120 * 5121 * Sets portions of a page valid and clean. The arguments are expected 5122 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 5123 * of any partial chunks touched by the range. The invalid portion of 5124 * such chunks will be zero'd. 5125 * 5126 * (base + size) must be less then or equal to PAGE_SIZE. 5127 */ 5128 void 5129 vm_page_set_validclean(vm_page_t m, int base, int size) 5130 { 5131 vm_page_bits_t oldvalid, pagebits; 5132 int endoff, frag; 5133 5134 vm_page_assert_busied(m); 5135 if (size == 0) /* handle degenerate case */ 5136 return; 5137 5138 /* 5139 * If the base is not DEV_BSIZE aligned and the valid 5140 * bit is clear, we have to zero out a portion of the 5141 * first block. 5142 */ 5143 if ((frag = rounddown2(base, DEV_BSIZE)) != base && 5144 (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0) 5145 pmap_zero_page_area(m, frag, base - frag); 5146 5147 /* 5148 * If the ending offset is not DEV_BSIZE aligned and the 5149 * valid bit is clear, we have to zero out a portion of 5150 * the last block. 5151 */ 5152 endoff = base + size; 5153 if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff && 5154 (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0) 5155 pmap_zero_page_area(m, endoff, 5156 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 5157 5158 /* 5159 * Set valid, clear dirty bits. If validating the entire 5160 * page we can safely clear the pmap modify bit. We also 5161 * use this opportunity to clear the PGA_NOSYNC flag. If a process 5162 * takes a write fault on a MAP_NOSYNC memory area the flag will 5163 * be set again. 5164 * 5165 * We set valid bits inclusive of any overlap, but we can only 5166 * clear dirty bits for DEV_BSIZE chunks that are fully within 5167 * the range. 5168 */ 5169 oldvalid = m->valid; 5170 pagebits = vm_page_bits(base, size); 5171 if (vm_page_xbusied(m)) 5172 m->valid |= pagebits; 5173 else 5174 vm_page_bits_set(m, &m->valid, pagebits); 5175 #if 0 /* NOT YET */ 5176 if ((frag = base & (DEV_BSIZE - 1)) != 0) { 5177 frag = DEV_BSIZE - frag; 5178 base += frag; 5179 size -= frag; 5180 if (size < 0) 5181 size = 0; 5182 } 5183 pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1)); 5184 #endif 5185 if (base == 0 && size == PAGE_SIZE) { 5186 /* 5187 * The page can only be modified within the pmap if it is 5188 * mapped, and it can only be mapped if it was previously 5189 * fully valid. 5190 */ 5191 if (oldvalid == VM_PAGE_BITS_ALL) 5192 /* 5193 * Perform the pmap_clear_modify() first. Otherwise, 5194 * a concurrent pmap operation, such as 5195 * pmap_protect(), could clear a modification in the 5196 * pmap and set the dirty field on the page before 5197 * pmap_clear_modify() had begun and after the dirty 5198 * field was cleared here. 5199 */ 5200 pmap_clear_modify(m); 5201 m->dirty = 0; 5202 vm_page_aflag_clear(m, PGA_NOSYNC); 5203 } else if (oldvalid != VM_PAGE_BITS_ALL && vm_page_xbusied(m)) 5204 m->dirty &= ~pagebits; 5205 else 5206 vm_page_clear_dirty_mask(m, pagebits); 5207 } 5208 5209 void 5210 vm_page_clear_dirty(vm_page_t m, int base, int size) 5211 { 5212 5213 vm_page_clear_dirty_mask(m, vm_page_bits(base, size)); 5214 } 5215 5216 /* 5217 * vm_page_set_invalid: 5218 * 5219 * Invalidates DEV_BSIZE'd chunks within a page. Both the 5220 * valid and dirty bits for the effected areas are cleared. 5221 */ 5222 void 5223 vm_page_set_invalid(vm_page_t m, int base, int size) 5224 { 5225 vm_page_bits_t bits; 5226 vm_object_t object; 5227 5228 /* 5229 * The object lock is required so that pages can't be mapped 5230 * read-only while we're in the process of invalidating them. 5231 */ 5232 object = m->object; 5233 VM_OBJECT_ASSERT_WLOCKED(object); 5234 vm_page_assert_busied(m); 5235 5236 if (object->type == OBJT_VNODE && base == 0 && IDX_TO_OFF(m->pindex) + 5237 size >= object->un_pager.vnp.vnp_size) 5238 bits = VM_PAGE_BITS_ALL; 5239 else 5240 bits = vm_page_bits(base, size); 5241 if (object->ref_count != 0 && vm_page_all_valid(m) && bits != 0) 5242 pmap_remove_all(m); 5243 KASSERT((bits == 0 && vm_page_all_valid(m)) || 5244 !pmap_page_is_mapped(m), 5245 ("vm_page_set_invalid: page %p is mapped", m)); 5246 if (vm_page_xbusied(m)) { 5247 m->valid &= ~bits; 5248 m->dirty &= ~bits; 5249 } else { 5250 vm_page_bits_clear(m, &m->valid, bits); 5251 vm_page_bits_clear(m, &m->dirty, bits); 5252 } 5253 } 5254 5255 /* 5256 * vm_page_invalid: 5257 * 5258 * Invalidates the entire page. The page must be busy, unmapped, and 5259 * the enclosing object must be locked. The object locks protects 5260 * against concurrent read-only pmap enter which is done without 5261 * busy. 5262 */ 5263 void 5264 vm_page_invalid(vm_page_t m) 5265 { 5266 5267 vm_page_assert_busied(m); 5268 VM_OBJECT_ASSERT_LOCKED(m->object); 5269 MPASS(!pmap_page_is_mapped(m)); 5270 5271 if (vm_page_xbusied(m)) 5272 m->valid = 0; 5273 else 5274 vm_page_bits_clear(m, &m->valid, VM_PAGE_BITS_ALL); 5275 } 5276 5277 /* 5278 * vm_page_zero_invalid() 5279 * 5280 * The kernel assumes that the invalid portions of a page contain 5281 * garbage, but such pages can be mapped into memory by user code. 5282 * When this occurs, we must zero out the non-valid portions of the 5283 * page so user code sees what it expects. 5284 * 5285 * Pages are most often semi-valid when the end of a file is mapped 5286 * into memory and the file's size is not page aligned. 5287 */ 5288 void 5289 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) 5290 { 5291 int b; 5292 int i; 5293 5294 /* 5295 * Scan the valid bits looking for invalid sections that 5296 * must be zeroed. Invalid sub-DEV_BSIZE'd areas ( where the 5297 * valid bit may be set ) have already been zeroed by 5298 * vm_page_set_validclean(). 5299 */ 5300 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { 5301 if (i == (PAGE_SIZE / DEV_BSIZE) || 5302 (m->valid & ((vm_page_bits_t)1 << i))) { 5303 if (i > b) { 5304 pmap_zero_page_area(m, 5305 b << DEV_BSHIFT, (i - b) << DEV_BSHIFT); 5306 } 5307 b = i + 1; 5308 } 5309 } 5310 5311 /* 5312 * setvalid is TRUE when we can safely set the zero'd areas 5313 * as being valid. We can do this if there are no cache consistancy 5314 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. 5315 */ 5316 if (setvalid) 5317 vm_page_valid(m); 5318 } 5319 5320 /* 5321 * vm_page_is_valid: 5322 * 5323 * Is (partial) page valid? Note that the case where size == 0 5324 * will return FALSE in the degenerate case where the page is 5325 * entirely invalid, and TRUE otherwise. 5326 * 5327 * Some callers envoke this routine without the busy lock held and 5328 * handle races via higher level locks. Typical callers should 5329 * hold a busy lock to prevent invalidation. 5330 */ 5331 int 5332 vm_page_is_valid(vm_page_t m, int base, int size) 5333 { 5334 vm_page_bits_t bits; 5335 5336 bits = vm_page_bits(base, size); 5337 return (m->valid != 0 && (m->valid & bits) == bits); 5338 } 5339 5340 /* 5341 * Returns true if all of the specified predicates are true for the entire 5342 * (super)page and false otherwise. 5343 */ 5344 bool 5345 vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m) 5346 { 5347 vm_object_t object; 5348 int i, npages; 5349 5350 object = m->object; 5351 if (skip_m != NULL && skip_m->object != object) 5352 return (false); 5353 VM_OBJECT_ASSERT_LOCKED(object); 5354 npages = atop(pagesizes[m->psind]); 5355 5356 /* 5357 * The physically contiguous pages that make up a superpage, i.e., a 5358 * page with a page size index ("psind") greater than zero, will 5359 * occupy adjacent entries in vm_page_array[]. 5360 */ 5361 for (i = 0; i < npages; i++) { 5362 /* Always test object consistency, including "skip_m". */ 5363 if (m[i].object != object) 5364 return (false); 5365 if (&m[i] == skip_m) 5366 continue; 5367 if ((flags & PS_NONE_BUSY) != 0 && vm_page_busied(&m[i])) 5368 return (false); 5369 if ((flags & PS_ALL_DIRTY) != 0) { 5370 /* 5371 * Calling vm_page_test_dirty() or pmap_is_modified() 5372 * might stop this case from spuriously returning 5373 * "false". However, that would require a write lock 5374 * on the object containing "m[i]". 5375 */ 5376 if (m[i].dirty != VM_PAGE_BITS_ALL) 5377 return (false); 5378 } 5379 if ((flags & PS_ALL_VALID) != 0 && 5380 m[i].valid != VM_PAGE_BITS_ALL) 5381 return (false); 5382 } 5383 return (true); 5384 } 5385 5386 /* 5387 * Set the page's dirty bits if the page is modified. 5388 */ 5389 void 5390 vm_page_test_dirty(vm_page_t m) 5391 { 5392 5393 vm_page_assert_busied(m); 5394 if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m)) 5395 vm_page_dirty(m); 5396 } 5397 5398 void 5399 vm_page_valid(vm_page_t m) 5400 { 5401 5402 vm_page_assert_busied(m); 5403 if (vm_page_xbusied(m)) 5404 m->valid = VM_PAGE_BITS_ALL; 5405 else 5406 vm_page_bits_set(m, &m->valid, VM_PAGE_BITS_ALL); 5407 } 5408 5409 void 5410 vm_page_lock_KBI(vm_page_t m, const char *file, int line) 5411 { 5412 5413 mtx_lock_flags_(vm_page_lockptr(m), 0, file, line); 5414 } 5415 5416 void 5417 vm_page_unlock_KBI(vm_page_t m, const char *file, int line) 5418 { 5419 5420 mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line); 5421 } 5422 5423 int 5424 vm_page_trylock_KBI(vm_page_t m, const char *file, int line) 5425 { 5426 5427 return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line)); 5428 } 5429 5430 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 5431 void 5432 vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line) 5433 { 5434 5435 vm_page_lock_assert_KBI(m, MA_OWNED, file, line); 5436 } 5437 5438 void 5439 vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line) 5440 { 5441 5442 mtx_assert_(vm_page_lockptr(m), a, file, line); 5443 } 5444 #endif 5445 5446 #ifdef INVARIANTS 5447 void 5448 vm_page_object_busy_assert(vm_page_t m) 5449 { 5450 5451 /* 5452 * Certain of the page's fields may only be modified by the 5453 * holder of a page or object busy. 5454 */ 5455 if (m->object != NULL && !vm_page_busied(m)) 5456 VM_OBJECT_ASSERT_BUSY(m->object); 5457 } 5458 5459 void 5460 vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits) 5461 { 5462 5463 if ((bits & PGA_WRITEABLE) == 0) 5464 return; 5465 5466 /* 5467 * The PGA_WRITEABLE flag can only be set if the page is 5468 * managed, is exclusively busied or the object is locked. 5469 * Currently, this flag is only set by pmap_enter(). 5470 */ 5471 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5472 ("PGA_WRITEABLE on unmanaged page")); 5473 if (!vm_page_xbusied(m)) 5474 VM_OBJECT_ASSERT_BUSY(m->object); 5475 } 5476 #endif 5477 5478 #include "opt_ddb.h" 5479 #ifdef DDB 5480 #include <sys/kernel.h> 5481 5482 #include <ddb/ddb.h> 5483 5484 DB_SHOW_COMMAND(page, vm_page_print_page_info) 5485 { 5486 5487 db_printf("vm_cnt.v_free_count: %d\n", vm_free_count()); 5488 db_printf("vm_cnt.v_inactive_count: %d\n", vm_inactive_count()); 5489 db_printf("vm_cnt.v_active_count: %d\n", vm_active_count()); 5490 db_printf("vm_cnt.v_laundry_count: %d\n", vm_laundry_count()); 5491 db_printf("vm_cnt.v_wire_count: %d\n", vm_wire_count()); 5492 db_printf("vm_cnt.v_free_reserved: %d\n", vm_cnt.v_free_reserved); 5493 db_printf("vm_cnt.v_free_min: %d\n", vm_cnt.v_free_min); 5494 db_printf("vm_cnt.v_free_target: %d\n", vm_cnt.v_free_target); 5495 db_printf("vm_cnt.v_inactive_target: %d\n", vm_cnt.v_inactive_target); 5496 } 5497 5498 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) 5499 { 5500 int dom; 5501 5502 db_printf("pq_free %d\n", vm_free_count()); 5503 for (dom = 0; dom < vm_ndomains; dom++) { 5504 db_printf( 5505 "dom %d page_cnt %d free %d pq_act %d pq_inact %d pq_laund %d pq_unsw %d\n", 5506 dom, 5507 vm_dom[dom].vmd_page_count, 5508 vm_dom[dom].vmd_free_count, 5509 vm_dom[dom].vmd_pagequeues[PQ_ACTIVE].pq_cnt, 5510 vm_dom[dom].vmd_pagequeues[PQ_INACTIVE].pq_cnt, 5511 vm_dom[dom].vmd_pagequeues[PQ_LAUNDRY].pq_cnt, 5512 vm_dom[dom].vmd_pagequeues[PQ_UNSWAPPABLE].pq_cnt); 5513 } 5514 } 5515 5516 DB_SHOW_COMMAND(pginfo, vm_page_print_pginfo) 5517 { 5518 vm_page_t m; 5519 boolean_t phys, virt; 5520 5521 if (!have_addr) { 5522 db_printf("show pginfo addr\n"); 5523 return; 5524 } 5525 5526 phys = strchr(modif, 'p') != NULL; 5527 virt = strchr(modif, 'v') != NULL; 5528 if (virt) 5529 m = PHYS_TO_VM_PAGE(pmap_kextract(addr)); 5530 else if (phys) 5531 m = PHYS_TO_VM_PAGE(addr); 5532 else 5533 m = (vm_page_t)addr; 5534 db_printf( 5535 "page %p obj %p pidx 0x%jx phys 0x%jx q %d ref 0x%x\n" 5536 " af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n", 5537 m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr, 5538 m->a.queue, m->ref_count, m->a.flags, m->oflags, 5539 m->flags, m->a.act_count, m->busy_lock, m->valid, m->dirty); 5540 } 5541 #endif /* DDB */ 5542