1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991 Regents of the University of California. 5 * All rights reserved. 6 * Copyright (c) 1998 Matthew Dillon. All Rights Reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * The Mach Operating System project at Carnegie-Mellon University. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 36 */ 37 38 /*- 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 */ 64 65 /* 66 * Resident memory management module. 67 */ 68 69 #include <sys/cdefs.h> 70 __FBSDID("$FreeBSD$"); 71 72 #include "opt_vm.h" 73 74 #include <sys/param.h> 75 #include <sys/systm.h> 76 #include <sys/counter.h> 77 #include <sys/domainset.h> 78 #include <sys/kernel.h> 79 #include <sys/limits.h> 80 #include <sys/linker.h> 81 #include <sys/lock.h> 82 #include <sys/malloc.h> 83 #include <sys/mman.h> 84 #include <sys/msgbuf.h> 85 #include <sys/mutex.h> 86 #include <sys/proc.h> 87 #include <sys/rwlock.h> 88 #include <sys/sleepqueue.h> 89 #include <sys/sbuf.h> 90 #include <sys/sched.h> 91 #include <sys/smp.h> 92 #include <sys/sysctl.h> 93 #include <sys/vmmeter.h> 94 #include <sys/vnode.h> 95 96 #include <vm/vm.h> 97 #include <vm/pmap.h> 98 #include <vm/vm_param.h> 99 #include <vm/vm_domainset.h> 100 #include <vm/vm_kern.h> 101 #include <vm/vm_map.h> 102 #include <vm/vm_object.h> 103 #include <vm/vm_page.h> 104 #include <vm/vm_pageout.h> 105 #include <vm/vm_phys.h> 106 #include <vm/vm_pagequeue.h> 107 #include <vm/vm_pager.h> 108 #include <vm/vm_radix.h> 109 #include <vm/vm_reserv.h> 110 #include <vm/vm_extern.h> 111 #include <vm/uma.h> 112 #include <vm/uma_int.h> 113 114 #include <machine/md_var.h> 115 116 struct vm_domain vm_dom[MAXMEMDOM]; 117 118 DPCPU_DEFINE_STATIC(struct vm_batchqueue, pqbatch[MAXMEMDOM][PQ_COUNT]); 119 120 struct mtx_padalign __exclusive_cache_line pa_lock[PA_LOCK_COUNT]; 121 122 struct mtx_padalign __exclusive_cache_line vm_domainset_lock; 123 /* The following fields are protected by the domainset lock. */ 124 domainset_t __exclusive_cache_line vm_min_domains; 125 domainset_t __exclusive_cache_line vm_severe_domains; 126 static int vm_min_waiters; 127 static int vm_severe_waiters; 128 static int vm_pageproc_waiters; 129 130 static SYSCTL_NODE(_vm_stats, OID_AUTO, page, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 131 "VM page statistics"); 132 133 static COUNTER_U64_DEFINE_EARLY(pqstate_commit_retries); 134 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, pqstate_commit_retries, 135 CTLFLAG_RD, &pqstate_commit_retries, 136 "Number of failed per-page atomic queue state updates"); 137 138 static COUNTER_U64_DEFINE_EARLY(queue_ops); 139 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, queue_ops, 140 CTLFLAG_RD, &queue_ops, 141 "Number of batched queue operations"); 142 143 static COUNTER_U64_DEFINE_EARLY(queue_nops); 144 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, queue_nops, 145 CTLFLAG_RD, &queue_nops, 146 "Number of batched queue operations with no effects"); 147 148 /* 149 * bogus page -- for I/O to/from partially complete buffers, 150 * or for paging into sparsely invalid regions. 151 */ 152 vm_page_t bogus_page; 153 154 vm_page_t vm_page_array; 155 long vm_page_array_size; 156 long first_page; 157 158 struct bitset *vm_page_dump; 159 long vm_page_dump_pages; 160 161 static TAILQ_HEAD(, vm_page) blacklist_head; 162 static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS); 163 SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD | 164 CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages"); 165 166 static uma_zone_t fakepg_zone; 167 168 static void vm_page_alloc_check(vm_page_t m); 169 static bool _vm_page_busy_sleep(vm_object_t obj, vm_page_t m, 170 vm_pindex_t pindex, const char *wmesg, int allocflags, bool locked); 171 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits); 172 static void vm_page_enqueue(vm_page_t m, uint8_t queue); 173 static bool vm_page_free_prep(vm_page_t m); 174 static void vm_page_free_toq(vm_page_t m); 175 static void vm_page_init(void *dummy); 176 static int vm_page_insert_after(vm_page_t m, vm_object_t object, 177 vm_pindex_t pindex, vm_page_t mpred); 178 static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object, 179 vm_page_t mpred); 180 static void vm_page_mvqueue(vm_page_t m, const uint8_t queue, 181 const uint16_t nflag); 182 static int vm_page_reclaim_run(int req_class, int domain, u_long npages, 183 vm_page_t m_run, vm_paddr_t high); 184 static void vm_page_release_toq(vm_page_t m, uint8_t nqueue, bool noreuse); 185 static int vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, 186 int req); 187 static int vm_page_zone_import(void *arg, void **store, int cnt, int domain, 188 int flags); 189 static void vm_page_zone_release(void *arg, void **store, int cnt); 190 191 SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init, NULL); 192 193 static void 194 vm_page_init(void *dummy) 195 { 196 197 fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL, 198 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 199 bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | 200 VM_ALLOC_NORMAL | VM_ALLOC_WIRED); 201 } 202 203 /* 204 * The cache page zone is initialized later since we need to be able to allocate 205 * pages before UMA is fully initialized. 206 */ 207 static void 208 vm_page_init_cache_zones(void *dummy __unused) 209 { 210 struct vm_domain *vmd; 211 struct vm_pgcache *pgcache; 212 int cache, domain, maxcache, pool; 213 214 maxcache = 0; 215 TUNABLE_INT_FETCH("vm.pgcache_zone_max_pcpu", &maxcache); 216 maxcache *= mp_ncpus; 217 for (domain = 0; domain < vm_ndomains; domain++) { 218 vmd = VM_DOMAIN(domain); 219 for (pool = 0; pool < VM_NFREEPOOL; pool++) { 220 pgcache = &vmd->vmd_pgcache[pool]; 221 pgcache->domain = domain; 222 pgcache->pool = pool; 223 pgcache->zone = uma_zcache_create("vm pgcache", 224 PAGE_SIZE, NULL, NULL, NULL, NULL, 225 vm_page_zone_import, vm_page_zone_release, pgcache, 226 UMA_ZONE_VM); 227 228 /* 229 * Limit each pool's zone to 0.1% of the pages in the 230 * domain. 231 */ 232 cache = maxcache != 0 ? maxcache : 233 vmd->vmd_page_count / 1000; 234 uma_zone_set_maxcache(pgcache->zone, cache); 235 } 236 } 237 } 238 SYSINIT(vm_page2, SI_SUB_VM_CONF, SI_ORDER_ANY, vm_page_init_cache_zones, NULL); 239 240 /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */ 241 #if PAGE_SIZE == 32768 242 #ifdef CTASSERT 243 CTASSERT(sizeof(u_long) >= 8); 244 #endif 245 #endif 246 247 /* 248 * vm_set_page_size: 249 * 250 * Sets the page size, perhaps based upon the memory 251 * size. Must be called before any use of page-size 252 * dependent functions. 253 */ 254 void 255 vm_set_page_size(void) 256 { 257 if (vm_cnt.v_page_size == 0) 258 vm_cnt.v_page_size = PAGE_SIZE; 259 if (((vm_cnt.v_page_size - 1) & vm_cnt.v_page_size) != 0) 260 panic("vm_set_page_size: page size not a power of two"); 261 } 262 263 /* 264 * vm_page_blacklist_next: 265 * 266 * Find the next entry in the provided string of blacklist 267 * addresses. Entries are separated by space, comma, or newline. 268 * If an invalid integer is encountered then the rest of the 269 * string is skipped. Updates the list pointer to the next 270 * character, or NULL if the string is exhausted or invalid. 271 */ 272 static vm_paddr_t 273 vm_page_blacklist_next(char **list, char *end) 274 { 275 vm_paddr_t bad; 276 char *cp, *pos; 277 278 if (list == NULL || *list == NULL) 279 return (0); 280 if (**list =='\0') { 281 *list = NULL; 282 return (0); 283 } 284 285 /* 286 * If there's no end pointer then the buffer is coming from 287 * the kenv and we know it's null-terminated. 288 */ 289 if (end == NULL) 290 end = *list + strlen(*list); 291 292 /* Ensure that strtoq() won't walk off the end */ 293 if (*end != '\0') { 294 if (*end == '\n' || *end == ' ' || *end == ',') 295 *end = '\0'; 296 else { 297 printf("Blacklist not terminated, skipping\n"); 298 *list = NULL; 299 return (0); 300 } 301 } 302 303 for (pos = *list; *pos != '\0'; pos = cp) { 304 bad = strtoq(pos, &cp, 0); 305 if (*cp == '\0' || *cp == ' ' || *cp == ',' || *cp == '\n') { 306 if (bad == 0) { 307 if (++cp < end) 308 continue; 309 else 310 break; 311 } 312 } else 313 break; 314 if (*cp == '\0' || ++cp >= end) 315 *list = NULL; 316 else 317 *list = cp; 318 return (trunc_page(bad)); 319 } 320 printf("Garbage in RAM blacklist, skipping\n"); 321 *list = NULL; 322 return (0); 323 } 324 325 bool 326 vm_page_blacklist_add(vm_paddr_t pa, bool verbose) 327 { 328 struct vm_domain *vmd; 329 vm_page_t m; 330 int ret; 331 332 m = vm_phys_paddr_to_vm_page(pa); 333 if (m == NULL) 334 return (true); /* page does not exist, no failure */ 335 336 vmd = vm_pagequeue_domain(m); 337 vm_domain_free_lock(vmd); 338 ret = vm_phys_unfree_page(m); 339 vm_domain_free_unlock(vmd); 340 if (ret != 0) { 341 vm_domain_freecnt_inc(vmd, -1); 342 TAILQ_INSERT_TAIL(&blacklist_head, m, listq); 343 if (verbose) 344 printf("Skipping page with pa 0x%jx\n", (uintmax_t)pa); 345 } 346 return (ret); 347 } 348 349 /* 350 * vm_page_blacklist_check: 351 * 352 * Iterate through the provided string of blacklist addresses, pulling 353 * each entry out of the physical allocator free list and putting it 354 * onto a list for reporting via the vm.page_blacklist sysctl. 355 */ 356 static void 357 vm_page_blacklist_check(char *list, char *end) 358 { 359 vm_paddr_t pa; 360 char *next; 361 362 next = list; 363 while (next != NULL) { 364 if ((pa = vm_page_blacklist_next(&next, end)) == 0) 365 continue; 366 vm_page_blacklist_add(pa, bootverbose); 367 } 368 } 369 370 /* 371 * vm_page_blacklist_load: 372 * 373 * Search for a special module named "ram_blacklist". It'll be a 374 * plain text file provided by the user via the loader directive 375 * of the same name. 376 */ 377 static void 378 vm_page_blacklist_load(char **list, char **end) 379 { 380 void *mod; 381 u_char *ptr; 382 u_int len; 383 384 mod = NULL; 385 ptr = NULL; 386 387 mod = preload_search_by_type("ram_blacklist"); 388 if (mod != NULL) { 389 ptr = preload_fetch_addr(mod); 390 len = preload_fetch_size(mod); 391 } 392 *list = ptr; 393 if (ptr != NULL) 394 *end = ptr + len; 395 else 396 *end = NULL; 397 return; 398 } 399 400 static int 401 sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS) 402 { 403 vm_page_t m; 404 struct sbuf sbuf; 405 int error, first; 406 407 first = 1; 408 error = sysctl_wire_old_buffer(req, 0); 409 if (error != 0) 410 return (error); 411 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 412 TAILQ_FOREACH(m, &blacklist_head, listq) { 413 sbuf_printf(&sbuf, "%s%#jx", first ? "" : ",", 414 (uintmax_t)m->phys_addr); 415 first = 0; 416 } 417 error = sbuf_finish(&sbuf); 418 sbuf_delete(&sbuf); 419 return (error); 420 } 421 422 /* 423 * Initialize a dummy page for use in scans of the specified paging queue. 424 * In principle, this function only needs to set the flag PG_MARKER. 425 * Nonetheless, it write busies the page as a safety precaution. 426 */ 427 void 428 vm_page_init_marker(vm_page_t marker, int queue, uint16_t aflags) 429 { 430 431 bzero(marker, sizeof(*marker)); 432 marker->flags = PG_MARKER; 433 marker->a.flags = aflags; 434 marker->busy_lock = VPB_CURTHREAD_EXCLUSIVE; 435 marker->a.queue = queue; 436 } 437 438 static void 439 vm_page_domain_init(int domain) 440 { 441 struct vm_domain *vmd; 442 struct vm_pagequeue *pq; 443 int i; 444 445 vmd = VM_DOMAIN(domain); 446 bzero(vmd, sizeof(*vmd)); 447 *__DECONST(const char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) = 448 "vm inactive pagequeue"; 449 *__DECONST(const char **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_name) = 450 "vm active pagequeue"; 451 *__DECONST(const char **, &vmd->vmd_pagequeues[PQ_LAUNDRY].pq_name) = 452 "vm laundry pagequeue"; 453 *__DECONST(const char **, 454 &vmd->vmd_pagequeues[PQ_UNSWAPPABLE].pq_name) = 455 "vm unswappable pagequeue"; 456 vmd->vmd_domain = domain; 457 vmd->vmd_page_count = 0; 458 vmd->vmd_free_count = 0; 459 vmd->vmd_segs = 0; 460 vmd->vmd_oom = FALSE; 461 for (i = 0; i < PQ_COUNT; i++) { 462 pq = &vmd->vmd_pagequeues[i]; 463 TAILQ_INIT(&pq->pq_pl); 464 mtx_init(&pq->pq_mutex, pq->pq_name, "vm pagequeue", 465 MTX_DEF | MTX_DUPOK); 466 pq->pq_pdpages = 0; 467 vm_page_init_marker(&vmd->vmd_markers[i], i, 0); 468 } 469 mtx_init(&vmd->vmd_free_mtx, "vm page free queue", NULL, MTX_DEF); 470 mtx_init(&vmd->vmd_pageout_mtx, "vm pageout lock", NULL, MTX_DEF); 471 snprintf(vmd->vmd_name, sizeof(vmd->vmd_name), "%d", domain); 472 473 /* 474 * inacthead is used to provide FIFO ordering for LRU-bypassing 475 * insertions. 476 */ 477 vm_page_init_marker(&vmd->vmd_inacthead, PQ_INACTIVE, PGA_ENQUEUED); 478 TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_INACTIVE].pq_pl, 479 &vmd->vmd_inacthead, plinks.q); 480 481 /* 482 * The clock pages are used to implement active queue scanning without 483 * requeues. Scans start at clock[0], which is advanced after the scan 484 * ends. When the two clock hands meet, they are reset and scanning 485 * resumes from the head of the queue. 486 */ 487 vm_page_init_marker(&vmd->vmd_clock[0], PQ_ACTIVE, PGA_ENQUEUED); 488 vm_page_init_marker(&vmd->vmd_clock[1], PQ_ACTIVE, PGA_ENQUEUED); 489 TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl, 490 &vmd->vmd_clock[0], plinks.q); 491 TAILQ_INSERT_TAIL(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl, 492 &vmd->vmd_clock[1], plinks.q); 493 } 494 495 /* 496 * Initialize a physical page in preparation for adding it to the free 497 * lists. 498 */ 499 static void 500 vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind) 501 { 502 503 m->object = NULL; 504 m->ref_count = 0; 505 m->busy_lock = VPB_FREED; 506 m->flags = m->a.flags = 0; 507 m->phys_addr = pa; 508 m->a.queue = PQ_NONE; 509 m->psind = 0; 510 m->segind = segind; 511 m->order = VM_NFREEORDER; 512 m->pool = VM_FREEPOOL_DEFAULT; 513 m->valid = m->dirty = 0; 514 pmap_page_init(m); 515 } 516 517 #ifndef PMAP_HAS_PAGE_ARRAY 518 static vm_paddr_t 519 vm_page_array_alloc(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t page_range) 520 { 521 vm_paddr_t new_end; 522 523 /* 524 * Reserve an unmapped guard page to trap access to vm_page_array[-1]. 525 * However, because this page is allocated from KVM, out-of-bounds 526 * accesses using the direct map will not be trapped. 527 */ 528 *vaddr += PAGE_SIZE; 529 530 /* 531 * Allocate physical memory for the page structures, and map it. 532 */ 533 new_end = trunc_page(end - page_range * sizeof(struct vm_page)); 534 vm_page_array = (vm_page_t)pmap_map(vaddr, new_end, end, 535 VM_PROT_READ | VM_PROT_WRITE); 536 vm_page_array_size = page_range; 537 538 return (new_end); 539 } 540 #endif 541 542 /* 543 * vm_page_startup: 544 * 545 * Initializes the resident memory module. Allocates physical memory for 546 * bootstrapping UMA and some data structures that are used to manage 547 * physical pages. Initializes these structures, and populates the free 548 * page queues. 549 */ 550 vm_offset_t 551 vm_page_startup(vm_offset_t vaddr) 552 { 553 struct vm_phys_seg *seg; 554 vm_page_t m; 555 char *list, *listend; 556 vm_paddr_t end, high_avail, low_avail, new_end, size; 557 vm_paddr_t page_range __unused; 558 vm_paddr_t last_pa, pa; 559 u_long pagecount; 560 #if MINIDUMP_PAGE_TRACKING 561 u_long vm_page_dump_size; 562 #endif 563 int biggestone, i, segind; 564 #ifdef WITNESS 565 vm_offset_t mapped; 566 int witness_size; 567 #endif 568 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE) 569 long ii; 570 #endif 571 572 vaddr = round_page(vaddr); 573 574 vm_phys_early_startup(); 575 biggestone = vm_phys_avail_largest(); 576 end = phys_avail[biggestone+1]; 577 578 /* 579 * Initialize the page and queue locks. 580 */ 581 mtx_init(&vm_domainset_lock, "vm domainset lock", NULL, MTX_DEF); 582 for (i = 0; i < PA_LOCK_COUNT; i++) 583 mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF); 584 for (i = 0; i < vm_ndomains; i++) 585 vm_page_domain_init(i); 586 587 new_end = end; 588 #ifdef WITNESS 589 witness_size = round_page(witness_startup_count()); 590 new_end -= witness_size; 591 mapped = pmap_map(&vaddr, new_end, new_end + witness_size, 592 VM_PROT_READ | VM_PROT_WRITE); 593 bzero((void *)mapped, witness_size); 594 witness_startup((void *)mapped); 595 #endif 596 597 #if MINIDUMP_PAGE_TRACKING 598 /* 599 * Allocate a bitmap to indicate that a random physical page 600 * needs to be included in a minidump. 601 * 602 * The amd64 port needs this to indicate which direct map pages 603 * need to be dumped, via calls to dump_add_page()/dump_drop_page(). 604 * 605 * However, i386 still needs this workspace internally within the 606 * minidump code. In theory, they are not needed on i386, but are 607 * included should the sf_buf code decide to use them. 608 */ 609 last_pa = 0; 610 vm_page_dump_pages = 0; 611 for (i = 0; dump_avail[i + 1] != 0; i += 2) { 612 vm_page_dump_pages += howmany(dump_avail[i + 1], PAGE_SIZE) - 613 dump_avail[i] / PAGE_SIZE; 614 if (dump_avail[i + 1] > last_pa) 615 last_pa = dump_avail[i + 1]; 616 } 617 vm_page_dump_size = round_page(BITSET_SIZE(vm_page_dump_pages)); 618 new_end -= vm_page_dump_size; 619 vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end, 620 new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE); 621 bzero((void *)vm_page_dump, vm_page_dump_size); 622 #else 623 (void)last_pa; 624 #endif 625 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \ 626 defined(__riscv) || defined(__powerpc64__) 627 /* 628 * Include the UMA bootstrap pages, witness pages and vm_page_dump 629 * in a crash dump. When pmap_map() uses the direct map, they are 630 * not automatically included. 631 */ 632 for (pa = new_end; pa < end; pa += PAGE_SIZE) 633 dump_add_page(pa); 634 #endif 635 phys_avail[biggestone + 1] = new_end; 636 #ifdef __amd64__ 637 /* 638 * Request that the physical pages underlying the message buffer be 639 * included in a crash dump. Since the message buffer is accessed 640 * through the direct map, they are not automatically included. 641 */ 642 pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr); 643 last_pa = pa + round_page(msgbufsize); 644 while (pa < last_pa) { 645 dump_add_page(pa); 646 pa += PAGE_SIZE; 647 } 648 #endif 649 /* 650 * Compute the number of pages of memory that will be available for 651 * use, taking into account the overhead of a page structure per page. 652 * In other words, solve 653 * "available physical memory" - round_page(page_range * 654 * sizeof(struct vm_page)) = page_range * PAGE_SIZE 655 * for page_range. 656 */ 657 low_avail = phys_avail[0]; 658 high_avail = phys_avail[1]; 659 for (i = 0; i < vm_phys_nsegs; i++) { 660 if (vm_phys_segs[i].start < low_avail) 661 low_avail = vm_phys_segs[i].start; 662 if (vm_phys_segs[i].end > high_avail) 663 high_avail = vm_phys_segs[i].end; 664 } 665 /* Skip the first chunk. It is already accounted for. */ 666 for (i = 2; phys_avail[i + 1] != 0; i += 2) { 667 if (phys_avail[i] < low_avail) 668 low_avail = phys_avail[i]; 669 if (phys_avail[i + 1] > high_avail) 670 high_avail = phys_avail[i + 1]; 671 } 672 first_page = low_avail / PAGE_SIZE; 673 #ifdef VM_PHYSSEG_SPARSE 674 size = 0; 675 for (i = 0; i < vm_phys_nsegs; i++) 676 size += vm_phys_segs[i].end - vm_phys_segs[i].start; 677 for (i = 0; phys_avail[i + 1] != 0; i += 2) 678 size += phys_avail[i + 1] - phys_avail[i]; 679 #elif defined(VM_PHYSSEG_DENSE) 680 size = high_avail - low_avail; 681 #else 682 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." 683 #endif 684 685 #ifdef PMAP_HAS_PAGE_ARRAY 686 pmap_page_array_startup(size / PAGE_SIZE); 687 biggestone = vm_phys_avail_largest(); 688 end = new_end = phys_avail[biggestone + 1]; 689 #else 690 #ifdef VM_PHYSSEG_DENSE 691 /* 692 * In the VM_PHYSSEG_DENSE case, the number of pages can account for 693 * the overhead of a page structure per page only if vm_page_array is 694 * allocated from the last physical memory chunk. Otherwise, we must 695 * allocate page structures representing the physical memory 696 * underlying vm_page_array, even though they will not be used. 697 */ 698 if (new_end != high_avail) 699 page_range = size / PAGE_SIZE; 700 else 701 #endif 702 { 703 page_range = size / (PAGE_SIZE + sizeof(struct vm_page)); 704 705 /* 706 * If the partial bytes remaining are large enough for 707 * a page (PAGE_SIZE) without a corresponding 708 * 'struct vm_page', then new_end will contain an 709 * extra page after subtracting the length of the VM 710 * page array. Compensate by subtracting an extra 711 * page from new_end. 712 */ 713 if (size % (PAGE_SIZE + sizeof(struct vm_page)) >= PAGE_SIZE) { 714 if (new_end == high_avail) 715 high_avail -= PAGE_SIZE; 716 new_end -= PAGE_SIZE; 717 } 718 } 719 end = new_end; 720 new_end = vm_page_array_alloc(&vaddr, end, page_range); 721 #endif 722 723 #if VM_NRESERVLEVEL > 0 724 /* 725 * Allocate physical memory for the reservation management system's 726 * data structures, and map it. 727 */ 728 new_end = vm_reserv_startup(&vaddr, new_end); 729 #endif 730 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \ 731 defined(__riscv) || defined(__powerpc64__) 732 /* 733 * Include vm_page_array and vm_reserv_array in a crash dump. 734 */ 735 for (pa = new_end; pa < end; pa += PAGE_SIZE) 736 dump_add_page(pa); 737 #endif 738 phys_avail[biggestone + 1] = new_end; 739 740 /* 741 * Add physical memory segments corresponding to the available 742 * physical pages. 743 */ 744 for (i = 0; phys_avail[i + 1] != 0; i += 2) 745 if (vm_phys_avail_size(i) != 0) 746 vm_phys_add_seg(phys_avail[i], phys_avail[i + 1]); 747 748 /* 749 * Initialize the physical memory allocator. 750 */ 751 vm_phys_init(); 752 753 /* 754 * Initialize the page structures and add every available page to the 755 * physical memory allocator's free lists. 756 */ 757 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE) 758 for (ii = 0; ii < vm_page_array_size; ii++) { 759 m = &vm_page_array[ii]; 760 vm_page_init_page(m, (first_page + ii) << PAGE_SHIFT, 0); 761 m->flags = PG_FICTITIOUS; 762 } 763 #endif 764 vm_cnt.v_page_count = 0; 765 for (segind = 0; segind < vm_phys_nsegs; segind++) { 766 seg = &vm_phys_segs[segind]; 767 for (m = seg->first_page, pa = seg->start; pa < seg->end; 768 m++, pa += PAGE_SIZE) 769 vm_page_init_page(m, pa, segind); 770 771 /* 772 * Add the segment to the free lists only if it is covered by 773 * one of the ranges in phys_avail. Because we've added the 774 * ranges to the vm_phys_segs array, we can assume that each 775 * segment is either entirely contained in one of the ranges, 776 * or doesn't overlap any of them. 777 */ 778 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 779 struct vm_domain *vmd; 780 781 if (seg->start < phys_avail[i] || 782 seg->end > phys_avail[i + 1]) 783 continue; 784 785 m = seg->first_page; 786 pagecount = (u_long)atop(seg->end - seg->start); 787 788 vmd = VM_DOMAIN(seg->domain); 789 vm_domain_free_lock(vmd); 790 vm_phys_enqueue_contig(m, pagecount); 791 vm_domain_free_unlock(vmd); 792 vm_domain_freecnt_inc(vmd, pagecount); 793 vm_cnt.v_page_count += (u_int)pagecount; 794 795 vmd = VM_DOMAIN(seg->domain); 796 vmd->vmd_page_count += (u_int)pagecount; 797 vmd->vmd_segs |= 1UL << m->segind; 798 break; 799 } 800 } 801 802 /* 803 * Remove blacklisted pages from the physical memory allocator. 804 */ 805 TAILQ_INIT(&blacklist_head); 806 vm_page_blacklist_load(&list, &listend); 807 vm_page_blacklist_check(list, listend); 808 809 list = kern_getenv("vm.blacklist"); 810 vm_page_blacklist_check(list, NULL); 811 812 freeenv(list); 813 #if VM_NRESERVLEVEL > 0 814 /* 815 * Initialize the reservation management system. 816 */ 817 vm_reserv_init(); 818 #endif 819 820 return (vaddr); 821 } 822 823 void 824 vm_page_reference(vm_page_t m) 825 { 826 827 vm_page_aflag_set(m, PGA_REFERENCED); 828 } 829 830 /* 831 * vm_page_trybusy 832 * 833 * Helper routine for grab functions to trylock busy. 834 * 835 * Returns true on success and false on failure. 836 */ 837 static bool 838 vm_page_trybusy(vm_page_t m, int allocflags) 839 { 840 841 if ((allocflags & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)) != 0) 842 return (vm_page_trysbusy(m)); 843 else 844 return (vm_page_tryxbusy(m)); 845 } 846 847 /* 848 * vm_page_tryacquire 849 * 850 * Helper routine for grab functions to trylock busy and wire. 851 * 852 * Returns true on success and false on failure. 853 */ 854 static inline bool 855 vm_page_tryacquire(vm_page_t m, int allocflags) 856 { 857 bool locked; 858 859 locked = vm_page_trybusy(m, allocflags); 860 if (locked && (allocflags & VM_ALLOC_WIRED) != 0) 861 vm_page_wire(m); 862 return (locked); 863 } 864 865 /* 866 * vm_page_busy_acquire: 867 * 868 * Acquire the busy lock as described by VM_ALLOC_* flags. Will loop 869 * and drop the object lock if necessary. 870 */ 871 bool 872 vm_page_busy_acquire(vm_page_t m, int allocflags) 873 { 874 vm_object_t obj; 875 bool locked; 876 877 /* 878 * The page-specific object must be cached because page 879 * identity can change during the sleep, causing the 880 * re-lock of a different object. 881 * It is assumed that a reference to the object is already 882 * held by the callers. 883 */ 884 obj = m->object; 885 for (;;) { 886 if (vm_page_tryacquire(m, allocflags)) 887 return (true); 888 if ((allocflags & VM_ALLOC_NOWAIT) != 0) 889 return (false); 890 if (obj != NULL) 891 locked = VM_OBJECT_WOWNED(obj); 892 else 893 locked = false; 894 MPASS(locked || vm_page_wired(m)); 895 if (_vm_page_busy_sleep(obj, m, m->pindex, "vmpba", allocflags, 896 locked) && locked) 897 VM_OBJECT_WLOCK(obj); 898 if ((allocflags & VM_ALLOC_WAITFAIL) != 0) 899 return (false); 900 KASSERT(m->object == obj || m->object == NULL, 901 ("vm_page_busy_acquire: page %p does not belong to %p", 902 m, obj)); 903 } 904 } 905 906 /* 907 * vm_page_busy_downgrade: 908 * 909 * Downgrade an exclusive busy page into a single shared busy page. 910 */ 911 void 912 vm_page_busy_downgrade(vm_page_t m) 913 { 914 u_int x; 915 916 vm_page_assert_xbusied(m); 917 918 x = vm_page_busy_fetch(m); 919 for (;;) { 920 if (atomic_fcmpset_rel_int(&m->busy_lock, 921 &x, VPB_SHARERS_WORD(1))) 922 break; 923 } 924 if ((x & VPB_BIT_WAITERS) != 0) 925 wakeup(m); 926 } 927 928 /* 929 * 930 * vm_page_busy_tryupgrade: 931 * 932 * Attempt to upgrade a single shared busy into an exclusive busy. 933 */ 934 int 935 vm_page_busy_tryupgrade(vm_page_t m) 936 { 937 u_int ce, x; 938 939 vm_page_assert_sbusied(m); 940 941 x = vm_page_busy_fetch(m); 942 ce = VPB_CURTHREAD_EXCLUSIVE; 943 for (;;) { 944 if (VPB_SHARERS(x) > 1) 945 return (0); 946 KASSERT((x & ~VPB_BIT_WAITERS) == VPB_SHARERS_WORD(1), 947 ("vm_page_busy_tryupgrade: invalid lock state")); 948 if (!atomic_fcmpset_acq_int(&m->busy_lock, &x, 949 ce | (x & VPB_BIT_WAITERS))) 950 continue; 951 return (1); 952 } 953 } 954 955 /* 956 * vm_page_sbusied: 957 * 958 * Return a positive value if the page is shared busied, 0 otherwise. 959 */ 960 int 961 vm_page_sbusied(vm_page_t m) 962 { 963 u_int x; 964 965 x = vm_page_busy_fetch(m); 966 return ((x & VPB_BIT_SHARED) != 0 && x != VPB_UNBUSIED); 967 } 968 969 /* 970 * vm_page_sunbusy: 971 * 972 * Shared unbusy a page. 973 */ 974 void 975 vm_page_sunbusy(vm_page_t m) 976 { 977 u_int x; 978 979 vm_page_assert_sbusied(m); 980 981 x = vm_page_busy_fetch(m); 982 for (;;) { 983 KASSERT(x != VPB_FREED, 984 ("vm_page_sunbusy: Unlocking freed page.")); 985 if (VPB_SHARERS(x) > 1) { 986 if (atomic_fcmpset_int(&m->busy_lock, &x, 987 x - VPB_ONE_SHARER)) 988 break; 989 continue; 990 } 991 KASSERT((x & ~VPB_BIT_WAITERS) == VPB_SHARERS_WORD(1), 992 ("vm_page_sunbusy: invalid lock state")); 993 if (!atomic_fcmpset_rel_int(&m->busy_lock, &x, VPB_UNBUSIED)) 994 continue; 995 if ((x & VPB_BIT_WAITERS) == 0) 996 break; 997 wakeup(m); 998 break; 999 } 1000 } 1001 1002 /* 1003 * vm_page_busy_sleep: 1004 * 1005 * Sleep if the page is busy, using the page pointer as wchan. 1006 * This is used to implement the hard-path of busying mechanism. 1007 * 1008 * If nonshared is true, sleep only if the page is xbusy. 1009 * 1010 * The object lock must be held on entry and will be released on exit. 1011 */ 1012 void 1013 vm_page_busy_sleep(vm_page_t m, const char *wmesg, bool nonshared) 1014 { 1015 vm_object_t obj; 1016 1017 obj = m->object; 1018 VM_OBJECT_ASSERT_LOCKED(obj); 1019 vm_page_lock_assert(m, MA_NOTOWNED); 1020 1021 if (!_vm_page_busy_sleep(obj, m, m->pindex, wmesg, 1022 nonshared ? VM_ALLOC_SBUSY : 0 , true)) 1023 VM_OBJECT_DROP(obj); 1024 } 1025 1026 /* 1027 * vm_page_busy_sleep_unlocked: 1028 * 1029 * Sleep if the page is busy, using the page pointer as wchan. 1030 * This is used to implement the hard-path of busying mechanism. 1031 * 1032 * If nonshared is true, sleep only if the page is xbusy. 1033 * 1034 * The object lock must not be held on entry. The operation will 1035 * return if the page changes identity. 1036 */ 1037 void 1038 vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m, vm_pindex_t pindex, 1039 const char *wmesg, bool nonshared) 1040 { 1041 1042 VM_OBJECT_ASSERT_UNLOCKED(obj); 1043 vm_page_lock_assert(m, MA_NOTOWNED); 1044 1045 _vm_page_busy_sleep(obj, m, pindex, wmesg, 1046 nonshared ? VM_ALLOC_SBUSY : 0, false); 1047 } 1048 1049 /* 1050 * _vm_page_busy_sleep: 1051 * 1052 * Internal busy sleep function. Verifies the page identity and 1053 * lockstate against parameters. Returns true if it sleeps and 1054 * false otherwise. 1055 * 1056 * If locked is true the lock will be dropped for any true returns 1057 * and held for any false returns. 1058 */ 1059 static bool 1060 _vm_page_busy_sleep(vm_object_t obj, vm_page_t m, vm_pindex_t pindex, 1061 const char *wmesg, int allocflags, bool locked) 1062 { 1063 bool xsleep; 1064 u_int x; 1065 1066 /* 1067 * If the object is busy we must wait for that to drain to zero 1068 * before trying the page again. 1069 */ 1070 if (obj != NULL && vm_object_busied(obj)) { 1071 if (locked) 1072 VM_OBJECT_DROP(obj); 1073 vm_object_busy_wait(obj, wmesg); 1074 return (true); 1075 } 1076 1077 if (!vm_page_busied(m)) 1078 return (false); 1079 1080 xsleep = (allocflags & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)) != 0; 1081 sleepq_lock(m); 1082 x = vm_page_busy_fetch(m); 1083 do { 1084 /* 1085 * If the page changes objects or becomes unlocked we can 1086 * simply return. 1087 */ 1088 if (x == VPB_UNBUSIED || 1089 (xsleep && (x & VPB_BIT_SHARED) != 0) || 1090 m->object != obj || m->pindex != pindex) { 1091 sleepq_release(m); 1092 return (false); 1093 } 1094 if ((x & VPB_BIT_WAITERS) != 0) 1095 break; 1096 } while (!atomic_fcmpset_int(&m->busy_lock, &x, x | VPB_BIT_WAITERS)); 1097 if (locked) 1098 VM_OBJECT_DROP(obj); 1099 DROP_GIANT(); 1100 sleepq_add(m, NULL, wmesg, 0, 0); 1101 sleepq_wait(m, PVM); 1102 PICKUP_GIANT(); 1103 return (true); 1104 } 1105 1106 /* 1107 * vm_page_trysbusy: 1108 * 1109 * Try to shared busy a page. 1110 * If the operation succeeds 1 is returned otherwise 0. 1111 * The operation never sleeps. 1112 */ 1113 int 1114 vm_page_trysbusy(vm_page_t m) 1115 { 1116 vm_object_t obj; 1117 u_int x; 1118 1119 obj = m->object; 1120 x = vm_page_busy_fetch(m); 1121 for (;;) { 1122 if ((x & VPB_BIT_SHARED) == 0) 1123 return (0); 1124 /* 1125 * Reduce the window for transient busies that will trigger 1126 * false negatives in vm_page_ps_test(). 1127 */ 1128 if (obj != NULL && vm_object_busied(obj)) 1129 return (0); 1130 if (atomic_fcmpset_acq_int(&m->busy_lock, &x, 1131 x + VPB_ONE_SHARER)) 1132 break; 1133 } 1134 1135 /* Refetch the object now that we're guaranteed that it is stable. */ 1136 obj = m->object; 1137 if (obj != NULL && vm_object_busied(obj)) { 1138 vm_page_sunbusy(m); 1139 return (0); 1140 } 1141 return (1); 1142 } 1143 1144 /* 1145 * vm_page_tryxbusy: 1146 * 1147 * Try to exclusive busy a page. 1148 * If the operation succeeds 1 is returned otherwise 0. 1149 * The operation never sleeps. 1150 */ 1151 int 1152 vm_page_tryxbusy(vm_page_t m) 1153 { 1154 vm_object_t obj; 1155 1156 if (atomic_cmpset_acq_int(&m->busy_lock, VPB_UNBUSIED, 1157 VPB_CURTHREAD_EXCLUSIVE) == 0) 1158 return (0); 1159 1160 obj = m->object; 1161 if (obj != NULL && vm_object_busied(obj)) { 1162 vm_page_xunbusy(m); 1163 return (0); 1164 } 1165 return (1); 1166 } 1167 1168 static void 1169 vm_page_xunbusy_hard_tail(vm_page_t m) 1170 { 1171 atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED); 1172 /* Wake the waiter. */ 1173 wakeup(m); 1174 } 1175 1176 /* 1177 * vm_page_xunbusy_hard: 1178 * 1179 * Called when unbusy has failed because there is a waiter. 1180 */ 1181 void 1182 vm_page_xunbusy_hard(vm_page_t m) 1183 { 1184 vm_page_assert_xbusied(m); 1185 vm_page_xunbusy_hard_tail(m); 1186 } 1187 1188 void 1189 vm_page_xunbusy_hard_unchecked(vm_page_t m) 1190 { 1191 vm_page_assert_xbusied_unchecked(m); 1192 vm_page_xunbusy_hard_tail(m); 1193 } 1194 1195 static void 1196 vm_page_busy_free(vm_page_t m) 1197 { 1198 u_int x; 1199 1200 atomic_thread_fence_rel(); 1201 x = atomic_swap_int(&m->busy_lock, VPB_FREED); 1202 if ((x & VPB_BIT_WAITERS) != 0) 1203 wakeup(m); 1204 } 1205 1206 /* 1207 * vm_page_unhold_pages: 1208 * 1209 * Unhold each of the pages that is referenced by the given array. 1210 */ 1211 void 1212 vm_page_unhold_pages(vm_page_t *ma, int count) 1213 { 1214 1215 for (; count != 0; count--) { 1216 vm_page_unwire(*ma, PQ_ACTIVE); 1217 ma++; 1218 } 1219 } 1220 1221 vm_page_t 1222 PHYS_TO_VM_PAGE(vm_paddr_t pa) 1223 { 1224 vm_page_t m; 1225 1226 #ifdef VM_PHYSSEG_SPARSE 1227 m = vm_phys_paddr_to_vm_page(pa); 1228 if (m == NULL) 1229 m = vm_phys_fictitious_to_vm_page(pa); 1230 return (m); 1231 #elif defined(VM_PHYSSEG_DENSE) 1232 long pi; 1233 1234 pi = atop(pa); 1235 if (pi >= first_page && (pi - first_page) < vm_page_array_size) { 1236 m = &vm_page_array[pi - first_page]; 1237 return (m); 1238 } 1239 return (vm_phys_fictitious_to_vm_page(pa)); 1240 #else 1241 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." 1242 #endif 1243 } 1244 1245 /* 1246 * vm_page_getfake: 1247 * 1248 * Create a fictitious page with the specified physical address and 1249 * memory attribute. The memory attribute is the only the machine- 1250 * dependent aspect of a fictitious page that must be initialized. 1251 */ 1252 vm_page_t 1253 vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr) 1254 { 1255 vm_page_t m; 1256 1257 m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO); 1258 vm_page_initfake(m, paddr, memattr); 1259 return (m); 1260 } 1261 1262 void 1263 vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) 1264 { 1265 1266 if ((m->flags & PG_FICTITIOUS) != 0) { 1267 /* 1268 * The page's memattr might have changed since the 1269 * previous initialization. Update the pmap to the 1270 * new memattr. 1271 */ 1272 goto memattr; 1273 } 1274 m->phys_addr = paddr; 1275 m->a.queue = PQ_NONE; 1276 /* Fictitious pages don't use "segind". */ 1277 m->flags = PG_FICTITIOUS; 1278 /* Fictitious pages don't use "order" or "pool". */ 1279 m->oflags = VPO_UNMANAGED; 1280 m->busy_lock = VPB_CURTHREAD_EXCLUSIVE; 1281 /* Fictitious pages are unevictable. */ 1282 m->ref_count = 1; 1283 pmap_page_init(m); 1284 memattr: 1285 pmap_page_set_memattr(m, memattr); 1286 } 1287 1288 /* 1289 * vm_page_putfake: 1290 * 1291 * Release a fictitious page. 1292 */ 1293 void 1294 vm_page_putfake(vm_page_t m) 1295 { 1296 1297 KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m)); 1298 KASSERT((m->flags & PG_FICTITIOUS) != 0, 1299 ("vm_page_putfake: bad page %p", m)); 1300 vm_page_assert_xbusied(m); 1301 vm_page_busy_free(m); 1302 uma_zfree(fakepg_zone, m); 1303 } 1304 1305 /* 1306 * vm_page_updatefake: 1307 * 1308 * Update the given fictitious page to the specified physical address and 1309 * memory attribute. 1310 */ 1311 void 1312 vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) 1313 { 1314 1315 KASSERT((m->flags & PG_FICTITIOUS) != 0, 1316 ("vm_page_updatefake: bad page %p", m)); 1317 m->phys_addr = paddr; 1318 pmap_page_set_memattr(m, memattr); 1319 } 1320 1321 /* 1322 * vm_page_free: 1323 * 1324 * Free a page. 1325 */ 1326 void 1327 vm_page_free(vm_page_t m) 1328 { 1329 1330 m->flags &= ~PG_ZERO; 1331 vm_page_free_toq(m); 1332 } 1333 1334 /* 1335 * vm_page_free_zero: 1336 * 1337 * Free a page to the zerod-pages queue 1338 */ 1339 void 1340 vm_page_free_zero(vm_page_t m) 1341 { 1342 1343 m->flags |= PG_ZERO; 1344 vm_page_free_toq(m); 1345 } 1346 1347 /* 1348 * Unbusy and handle the page queueing for a page from a getpages request that 1349 * was optionally read ahead or behind. 1350 */ 1351 void 1352 vm_page_readahead_finish(vm_page_t m) 1353 { 1354 1355 /* We shouldn't put invalid pages on queues. */ 1356 KASSERT(!vm_page_none_valid(m), ("%s: %p is invalid", __func__, m)); 1357 1358 /* 1359 * Since the page is not the actually needed one, whether it should 1360 * be activated or deactivated is not obvious. Empirical results 1361 * have shown that deactivating the page is usually the best choice, 1362 * unless the page is wanted by another thread. 1363 */ 1364 if ((vm_page_busy_fetch(m) & VPB_BIT_WAITERS) != 0) 1365 vm_page_activate(m); 1366 else 1367 vm_page_deactivate(m); 1368 vm_page_xunbusy_unchecked(m); 1369 } 1370 1371 /* 1372 * Destroy the identity of an invalid page and free it if possible. 1373 * This is intended to be used when reading a page from backing store fails. 1374 */ 1375 void 1376 vm_page_free_invalid(vm_page_t m) 1377 { 1378 1379 KASSERT(vm_page_none_valid(m), ("page %p is valid", m)); 1380 KASSERT(!pmap_page_is_mapped(m), ("page %p is mapped", m)); 1381 KASSERT(m->object != NULL, ("page %p has no object", m)); 1382 VM_OBJECT_ASSERT_WLOCKED(m->object); 1383 1384 /* 1385 * We may be attempting to free the page as part of the handling for an 1386 * I/O error, in which case the page was xbusied by a different thread. 1387 */ 1388 vm_page_xbusy_claim(m); 1389 1390 /* 1391 * If someone has wired this page while the object lock 1392 * was not held, then the thread that unwires is responsible 1393 * for freeing the page. Otherwise just free the page now. 1394 * The wire count of this unmapped page cannot change while 1395 * we have the page xbusy and the page's object wlocked. 1396 */ 1397 if (vm_page_remove(m)) 1398 vm_page_free(m); 1399 } 1400 1401 /* 1402 * vm_page_sleep_if_busy: 1403 * 1404 * Sleep and release the object lock if the page is busied. 1405 * Returns TRUE if the thread slept. 1406 * 1407 * The given page must be unlocked and object containing it must 1408 * be locked. 1409 */ 1410 int 1411 vm_page_sleep_if_busy(vm_page_t m, const char *wmesg) 1412 { 1413 vm_object_t obj; 1414 1415 vm_page_lock_assert(m, MA_NOTOWNED); 1416 VM_OBJECT_ASSERT_WLOCKED(m->object); 1417 1418 /* 1419 * The page-specific object must be cached because page 1420 * identity can change during the sleep, causing the 1421 * re-lock of a different object. 1422 * It is assumed that a reference to the object is already 1423 * held by the callers. 1424 */ 1425 obj = m->object; 1426 if (_vm_page_busy_sleep(obj, m, m->pindex, wmesg, 0, true)) { 1427 VM_OBJECT_WLOCK(obj); 1428 return (TRUE); 1429 } 1430 return (FALSE); 1431 } 1432 1433 /* 1434 * vm_page_sleep_if_xbusy: 1435 * 1436 * Sleep and release the object lock if the page is xbusied. 1437 * Returns TRUE if the thread slept. 1438 * 1439 * The given page must be unlocked and object containing it must 1440 * be locked. 1441 */ 1442 int 1443 vm_page_sleep_if_xbusy(vm_page_t m, const char *wmesg) 1444 { 1445 vm_object_t obj; 1446 1447 vm_page_lock_assert(m, MA_NOTOWNED); 1448 VM_OBJECT_ASSERT_WLOCKED(m->object); 1449 1450 /* 1451 * The page-specific object must be cached because page 1452 * identity can change during the sleep, causing the 1453 * re-lock of a different object. 1454 * It is assumed that a reference to the object is already 1455 * held by the callers. 1456 */ 1457 obj = m->object; 1458 if (_vm_page_busy_sleep(obj, m, m->pindex, wmesg, VM_ALLOC_SBUSY, 1459 true)) { 1460 VM_OBJECT_WLOCK(obj); 1461 return (TRUE); 1462 } 1463 return (FALSE); 1464 } 1465 1466 /* 1467 * vm_page_dirty_KBI: [ internal use only ] 1468 * 1469 * Set all bits in the page's dirty field. 1470 * 1471 * The object containing the specified page must be locked if the 1472 * call is made from the machine-independent layer. 1473 * 1474 * See vm_page_clear_dirty_mask(). 1475 * 1476 * This function should only be called by vm_page_dirty(). 1477 */ 1478 void 1479 vm_page_dirty_KBI(vm_page_t m) 1480 { 1481 1482 /* Refer to this operation by its public name. */ 1483 KASSERT(vm_page_all_valid(m), ("vm_page_dirty: page is invalid!")); 1484 m->dirty = VM_PAGE_BITS_ALL; 1485 } 1486 1487 /* 1488 * vm_page_insert: [ internal use only ] 1489 * 1490 * Inserts the given mem entry into the object and object list. 1491 * 1492 * The object must be locked. 1493 */ 1494 int 1495 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) 1496 { 1497 vm_page_t mpred; 1498 1499 VM_OBJECT_ASSERT_WLOCKED(object); 1500 mpred = vm_radix_lookup_le(&object->rtree, pindex); 1501 return (vm_page_insert_after(m, object, pindex, mpred)); 1502 } 1503 1504 /* 1505 * vm_page_insert_after: 1506 * 1507 * Inserts the page "m" into the specified object at offset "pindex". 1508 * 1509 * The page "mpred" must immediately precede the offset "pindex" within 1510 * the specified object. 1511 * 1512 * The object must be locked. 1513 */ 1514 static int 1515 vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex, 1516 vm_page_t mpred) 1517 { 1518 vm_page_t msucc; 1519 1520 VM_OBJECT_ASSERT_WLOCKED(object); 1521 KASSERT(m->object == NULL, 1522 ("vm_page_insert_after: page already inserted")); 1523 if (mpred != NULL) { 1524 KASSERT(mpred->object == object, 1525 ("vm_page_insert_after: object doesn't contain mpred")); 1526 KASSERT(mpred->pindex < pindex, 1527 ("vm_page_insert_after: mpred doesn't precede pindex")); 1528 msucc = TAILQ_NEXT(mpred, listq); 1529 } else 1530 msucc = TAILQ_FIRST(&object->memq); 1531 if (msucc != NULL) 1532 KASSERT(msucc->pindex > pindex, 1533 ("vm_page_insert_after: msucc doesn't succeed pindex")); 1534 1535 /* 1536 * Record the object/offset pair in this page. 1537 */ 1538 m->object = object; 1539 m->pindex = pindex; 1540 m->ref_count |= VPRC_OBJREF; 1541 1542 /* 1543 * Now link into the object's ordered list of backed pages. 1544 */ 1545 if (vm_radix_insert(&object->rtree, m)) { 1546 m->object = NULL; 1547 m->pindex = 0; 1548 m->ref_count &= ~VPRC_OBJREF; 1549 return (1); 1550 } 1551 vm_page_insert_radixdone(m, object, mpred); 1552 return (0); 1553 } 1554 1555 /* 1556 * vm_page_insert_radixdone: 1557 * 1558 * Complete page "m" insertion into the specified object after the 1559 * radix trie hooking. 1560 * 1561 * The page "mpred" must precede the offset "m->pindex" within the 1562 * specified object. 1563 * 1564 * The object must be locked. 1565 */ 1566 static void 1567 vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred) 1568 { 1569 1570 VM_OBJECT_ASSERT_WLOCKED(object); 1571 KASSERT(object != NULL && m->object == object, 1572 ("vm_page_insert_radixdone: page %p has inconsistent object", m)); 1573 KASSERT((m->ref_count & VPRC_OBJREF) != 0, 1574 ("vm_page_insert_radixdone: page %p is missing object ref", m)); 1575 if (mpred != NULL) { 1576 KASSERT(mpred->object == object, 1577 ("vm_page_insert_radixdone: object doesn't contain mpred")); 1578 KASSERT(mpred->pindex < m->pindex, 1579 ("vm_page_insert_radixdone: mpred doesn't precede pindex")); 1580 } 1581 1582 if (mpred != NULL) 1583 TAILQ_INSERT_AFTER(&object->memq, mpred, m, listq); 1584 else 1585 TAILQ_INSERT_HEAD(&object->memq, m, listq); 1586 1587 /* 1588 * Show that the object has one more resident page. 1589 */ 1590 object->resident_page_count++; 1591 1592 /* 1593 * Hold the vnode until the last page is released. 1594 */ 1595 if (object->resident_page_count == 1 && object->type == OBJT_VNODE) 1596 vhold(object->handle); 1597 1598 /* 1599 * Since we are inserting a new and possibly dirty page, 1600 * update the object's generation count. 1601 */ 1602 if (pmap_page_is_write_mapped(m)) 1603 vm_object_set_writeable_dirty(object); 1604 } 1605 1606 /* 1607 * Do the work to remove a page from its object. The caller is responsible for 1608 * updating the page's fields to reflect this removal. 1609 */ 1610 static void 1611 vm_page_object_remove(vm_page_t m) 1612 { 1613 vm_object_t object; 1614 vm_page_t mrem; 1615 1616 vm_page_assert_xbusied(m); 1617 object = m->object; 1618 VM_OBJECT_ASSERT_WLOCKED(object); 1619 KASSERT((m->ref_count & VPRC_OBJREF) != 0, 1620 ("page %p is missing its object ref", m)); 1621 1622 /* Deferred free of swap space. */ 1623 if ((m->a.flags & PGA_SWAP_FREE) != 0) 1624 vm_pager_page_unswapped(m); 1625 1626 m->object = NULL; 1627 mrem = vm_radix_remove(&object->rtree, m->pindex); 1628 KASSERT(mrem == m, ("removed page %p, expected page %p", mrem, m)); 1629 1630 /* 1631 * Now remove from the object's list of backed pages. 1632 */ 1633 TAILQ_REMOVE(&object->memq, m, listq); 1634 1635 /* 1636 * And show that the object has one fewer resident page. 1637 */ 1638 object->resident_page_count--; 1639 1640 /* 1641 * The vnode may now be recycled. 1642 */ 1643 if (object->resident_page_count == 0 && object->type == OBJT_VNODE) 1644 vdrop(object->handle); 1645 } 1646 1647 /* 1648 * vm_page_remove: 1649 * 1650 * Removes the specified page from its containing object, but does not 1651 * invalidate any backing storage. Returns true if the object's reference 1652 * was the last reference to the page, and false otherwise. 1653 * 1654 * The object must be locked and the page must be exclusively busied. 1655 * The exclusive busy will be released on return. If this is not the 1656 * final ref and the caller does not hold a wire reference it may not 1657 * continue to access the page. 1658 */ 1659 bool 1660 vm_page_remove(vm_page_t m) 1661 { 1662 bool dropped; 1663 1664 dropped = vm_page_remove_xbusy(m); 1665 vm_page_xunbusy(m); 1666 1667 return (dropped); 1668 } 1669 1670 /* 1671 * vm_page_remove_xbusy 1672 * 1673 * Removes the page but leaves the xbusy held. Returns true if this 1674 * removed the final ref and false otherwise. 1675 */ 1676 bool 1677 vm_page_remove_xbusy(vm_page_t m) 1678 { 1679 1680 vm_page_object_remove(m); 1681 return (vm_page_drop(m, VPRC_OBJREF) == VPRC_OBJREF); 1682 } 1683 1684 /* 1685 * vm_page_lookup: 1686 * 1687 * Returns the page associated with the object/offset 1688 * pair specified; if none is found, NULL is returned. 1689 * 1690 * The object must be locked. 1691 */ 1692 vm_page_t 1693 vm_page_lookup(vm_object_t object, vm_pindex_t pindex) 1694 { 1695 1696 VM_OBJECT_ASSERT_LOCKED(object); 1697 return (vm_radix_lookup(&object->rtree, pindex)); 1698 } 1699 1700 /* 1701 * vm_page_lookup_unlocked: 1702 * 1703 * Returns the page associated with the object/offset pair specified; 1704 * if none is found, NULL is returned. The page may be no longer be 1705 * present in the object at the time that this function returns. Only 1706 * useful for opportunistic checks such as inmem(). 1707 */ 1708 vm_page_t 1709 vm_page_lookup_unlocked(vm_object_t object, vm_pindex_t pindex) 1710 { 1711 1712 return (vm_radix_lookup_unlocked(&object->rtree, pindex)); 1713 } 1714 1715 /* 1716 * vm_page_relookup: 1717 * 1718 * Returns a page that must already have been busied by 1719 * the caller. Used for bogus page replacement. 1720 */ 1721 vm_page_t 1722 vm_page_relookup(vm_object_t object, vm_pindex_t pindex) 1723 { 1724 vm_page_t m; 1725 1726 m = vm_radix_lookup_unlocked(&object->rtree, pindex); 1727 KASSERT(m != NULL && (vm_page_busied(m) || vm_page_wired(m)) && 1728 m->object == object && m->pindex == pindex, 1729 ("vm_page_relookup: Invalid page %p", m)); 1730 return (m); 1731 } 1732 1733 /* 1734 * This should only be used by lockless functions for releasing transient 1735 * incorrect acquires. The page may have been freed after we acquired a 1736 * busy lock. In this case busy_lock == VPB_FREED and we have nothing 1737 * further to do. 1738 */ 1739 static void 1740 vm_page_busy_release(vm_page_t m) 1741 { 1742 u_int x; 1743 1744 x = vm_page_busy_fetch(m); 1745 for (;;) { 1746 if (x == VPB_FREED) 1747 break; 1748 if ((x & VPB_BIT_SHARED) != 0 && VPB_SHARERS(x) > 1) { 1749 if (atomic_fcmpset_int(&m->busy_lock, &x, 1750 x - VPB_ONE_SHARER)) 1751 break; 1752 continue; 1753 } 1754 KASSERT((x & VPB_BIT_SHARED) != 0 || 1755 (x & ~VPB_BIT_WAITERS) == VPB_CURTHREAD_EXCLUSIVE, 1756 ("vm_page_busy_release: %p xbusy not owned.", m)); 1757 if (!atomic_fcmpset_rel_int(&m->busy_lock, &x, VPB_UNBUSIED)) 1758 continue; 1759 if ((x & VPB_BIT_WAITERS) != 0) 1760 wakeup(m); 1761 break; 1762 } 1763 } 1764 1765 /* 1766 * vm_page_find_least: 1767 * 1768 * Returns the page associated with the object with least pindex 1769 * greater than or equal to the parameter pindex, or NULL. 1770 * 1771 * The object must be locked. 1772 */ 1773 vm_page_t 1774 vm_page_find_least(vm_object_t object, vm_pindex_t pindex) 1775 { 1776 vm_page_t m; 1777 1778 VM_OBJECT_ASSERT_LOCKED(object); 1779 if ((m = TAILQ_FIRST(&object->memq)) != NULL && m->pindex < pindex) 1780 m = vm_radix_lookup_ge(&object->rtree, pindex); 1781 return (m); 1782 } 1783 1784 /* 1785 * Returns the given page's successor (by pindex) within the object if it is 1786 * resident; if none is found, NULL is returned. 1787 * 1788 * The object must be locked. 1789 */ 1790 vm_page_t 1791 vm_page_next(vm_page_t m) 1792 { 1793 vm_page_t next; 1794 1795 VM_OBJECT_ASSERT_LOCKED(m->object); 1796 if ((next = TAILQ_NEXT(m, listq)) != NULL) { 1797 MPASS(next->object == m->object); 1798 if (next->pindex != m->pindex + 1) 1799 next = NULL; 1800 } 1801 return (next); 1802 } 1803 1804 /* 1805 * Returns the given page's predecessor (by pindex) within the object if it is 1806 * resident; if none is found, NULL is returned. 1807 * 1808 * The object must be locked. 1809 */ 1810 vm_page_t 1811 vm_page_prev(vm_page_t m) 1812 { 1813 vm_page_t prev; 1814 1815 VM_OBJECT_ASSERT_LOCKED(m->object); 1816 if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL) { 1817 MPASS(prev->object == m->object); 1818 if (prev->pindex != m->pindex - 1) 1819 prev = NULL; 1820 } 1821 return (prev); 1822 } 1823 1824 /* 1825 * Uses the page mnew as a replacement for an existing page at index 1826 * pindex which must be already present in the object. 1827 * 1828 * Both pages must be exclusively busied on enter. The old page is 1829 * unbusied on exit. 1830 * 1831 * A return value of true means mold is now free. If this is not the 1832 * final ref and the caller does not hold a wire reference it may not 1833 * continue to access the page. 1834 */ 1835 static bool 1836 vm_page_replace_hold(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex, 1837 vm_page_t mold) 1838 { 1839 vm_page_t mret; 1840 bool dropped; 1841 1842 VM_OBJECT_ASSERT_WLOCKED(object); 1843 vm_page_assert_xbusied(mold); 1844 KASSERT(mnew->object == NULL && (mnew->ref_count & VPRC_OBJREF) == 0, 1845 ("vm_page_replace: page %p already in object", mnew)); 1846 1847 /* 1848 * This function mostly follows vm_page_insert() and 1849 * vm_page_remove() without the radix, object count and vnode 1850 * dance. Double check such functions for more comments. 1851 */ 1852 1853 mnew->object = object; 1854 mnew->pindex = pindex; 1855 atomic_set_int(&mnew->ref_count, VPRC_OBJREF); 1856 mret = vm_radix_replace(&object->rtree, mnew); 1857 KASSERT(mret == mold, 1858 ("invalid page replacement, mold=%p, mret=%p", mold, mret)); 1859 KASSERT((mold->oflags & VPO_UNMANAGED) == 1860 (mnew->oflags & VPO_UNMANAGED), 1861 ("vm_page_replace: mismatched VPO_UNMANAGED")); 1862 1863 /* Keep the resident page list in sorted order. */ 1864 TAILQ_INSERT_AFTER(&object->memq, mold, mnew, listq); 1865 TAILQ_REMOVE(&object->memq, mold, listq); 1866 mold->object = NULL; 1867 1868 /* 1869 * The object's resident_page_count does not change because we have 1870 * swapped one page for another, but the generation count should 1871 * change if the page is dirty. 1872 */ 1873 if (pmap_page_is_write_mapped(mnew)) 1874 vm_object_set_writeable_dirty(object); 1875 dropped = vm_page_drop(mold, VPRC_OBJREF) == VPRC_OBJREF; 1876 vm_page_xunbusy(mold); 1877 1878 return (dropped); 1879 } 1880 1881 void 1882 vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex, 1883 vm_page_t mold) 1884 { 1885 1886 vm_page_assert_xbusied(mnew); 1887 1888 if (vm_page_replace_hold(mnew, object, pindex, mold)) 1889 vm_page_free(mold); 1890 } 1891 1892 /* 1893 * vm_page_rename: 1894 * 1895 * Move the given memory entry from its 1896 * current object to the specified target object/offset. 1897 * 1898 * Note: swap associated with the page must be invalidated by the move. We 1899 * have to do this for several reasons: (1) we aren't freeing the 1900 * page, (2) we are dirtying the page, (3) the VM system is probably 1901 * moving the page from object A to B, and will then later move 1902 * the backing store from A to B and we can't have a conflict. 1903 * 1904 * Note: we *always* dirty the page. It is necessary both for the 1905 * fact that we moved it, and because we may be invalidating 1906 * swap. 1907 * 1908 * The objects must be locked. 1909 */ 1910 int 1911 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) 1912 { 1913 vm_page_t mpred; 1914 vm_pindex_t opidx; 1915 1916 VM_OBJECT_ASSERT_WLOCKED(new_object); 1917 1918 KASSERT(m->ref_count != 0, ("vm_page_rename: page %p has no refs", m)); 1919 mpred = vm_radix_lookup_le(&new_object->rtree, new_pindex); 1920 KASSERT(mpred == NULL || mpred->pindex != new_pindex, 1921 ("vm_page_rename: pindex already renamed")); 1922 1923 /* 1924 * Create a custom version of vm_page_insert() which does not depend 1925 * by m_prev and can cheat on the implementation aspects of the 1926 * function. 1927 */ 1928 opidx = m->pindex; 1929 m->pindex = new_pindex; 1930 if (vm_radix_insert(&new_object->rtree, m)) { 1931 m->pindex = opidx; 1932 return (1); 1933 } 1934 1935 /* 1936 * The operation cannot fail anymore. The removal must happen before 1937 * the listq iterator is tainted. 1938 */ 1939 m->pindex = opidx; 1940 vm_page_object_remove(m); 1941 1942 /* Return back to the new pindex to complete vm_page_insert(). */ 1943 m->pindex = new_pindex; 1944 m->object = new_object; 1945 1946 vm_page_insert_radixdone(m, new_object, mpred); 1947 vm_page_dirty(m); 1948 return (0); 1949 } 1950 1951 /* 1952 * vm_page_alloc: 1953 * 1954 * Allocate and return a page that is associated with the specified 1955 * object and offset pair. By default, this page is exclusive busied. 1956 * 1957 * The caller must always specify an allocation class. 1958 * 1959 * allocation classes: 1960 * VM_ALLOC_NORMAL normal process request 1961 * VM_ALLOC_SYSTEM system *really* needs a page 1962 * VM_ALLOC_INTERRUPT interrupt time request 1963 * 1964 * optional allocation flags: 1965 * VM_ALLOC_COUNT(number) the number of additional pages that the caller 1966 * intends to allocate 1967 * VM_ALLOC_NOBUSY do not exclusive busy the page 1968 * VM_ALLOC_NODUMP do not include the page in a kernel core dump 1969 * VM_ALLOC_NOOBJ page is not associated with an object and 1970 * should not be exclusive busy 1971 * VM_ALLOC_SBUSY shared busy the allocated page 1972 * VM_ALLOC_WIRED wire the allocated page 1973 * VM_ALLOC_ZERO prefer a zeroed page 1974 */ 1975 vm_page_t 1976 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) 1977 { 1978 1979 return (vm_page_alloc_after(object, pindex, req, object != NULL ? 1980 vm_radix_lookup_le(&object->rtree, pindex) : NULL)); 1981 } 1982 1983 vm_page_t 1984 vm_page_alloc_domain(vm_object_t object, vm_pindex_t pindex, int domain, 1985 int req) 1986 { 1987 1988 return (vm_page_alloc_domain_after(object, pindex, domain, req, 1989 object != NULL ? vm_radix_lookup_le(&object->rtree, pindex) : 1990 NULL)); 1991 } 1992 1993 /* 1994 * Allocate a page in the specified object with the given page index. To 1995 * optimize insertion of the page into the object, the caller must also specifiy 1996 * the resident page in the object with largest index smaller than the given 1997 * page index, or NULL if no such page exists. 1998 */ 1999 vm_page_t 2000 vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex, 2001 int req, vm_page_t mpred) 2002 { 2003 struct vm_domainset_iter di; 2004 vm_page_t m; 2005 int domain; 2006 2007 vm_domainset_iter_page_init(&di, object, pindex, &domain, &req); 2008 do { 2009 m = vm_page_alloc_domain_after(object, pindex, domain, req, 2010 mpred); 2011 if (m != NULL) 2012 break; 2013 } while (vm_domainset_iter_page(&di, object, &domain) == 0); 2014 2015 return (m); 2016 } 2017 2018 /* 2019 * Returns true if the number of free pages exceeds the minimum 2020 * for the request class and false otherwise. 2021 */ 2022 static int 2023 _vm_domain_allocate(struct vm_domain *vmd, int req_class, int npages) 2024 { 2025 u_int limit, old, new; 2026 2027 if (req_class == VM_ALLOC_INTERRUPT) 2028 limit = 0; 2029 else if (req_class == VM_ALLOC_SYSTEM) 2030 limit = vmd->vmd_interrupt_free_min; 2031 else 2032 limit = vmd->vmd_free_reserved; 2033 2034 /* 2035 * Attempt to reserve the pages. Fail if we're below the limit. 2036 */ 2037 limit += npages; 2038 old = vmd->vmd_free_count; 2039 do { 2040 if (old < limit) 2041 return (0); 2042 new = old - npages; 2043 } while (atomic_fcmpset_int(&vmd->vmd_free_count, &old, new) == 0); 2044 2045 /* Wake the page daemon if we've crossed the threshold. */ 2046 if (vm_paging_needed(vmd, new) && !vm_paging_needed(vmd, old)) 2047 pagedaemon_wakeup(vmd->vmd_domain); 2048 2049 /* Only update bitsets on transitions. */ 2050 if ((old >= vmd->vmd_free_min && new < vmd->vmd_free_min) || 2051 (old >= vmd->vmd_free_severe && new < vmd->vmd_free_severe)) 2052 vm_domain_set(vmd); 2053 2054 return (1); 2055 } 2056 2057 int 2058 vm_domain_allocate(struct vm_domain *vmd, int req, int npages) 2059 { 2060 int req_class; 2061 2062 /* 2063 * The page daemon is allowed to dig deeper into the free page list. 2064 */ 2065 req_class = req & VM_ALLOC_CLASS_MASK; 2066 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) 2067 req_class = VM_ALLOC_SYSTEM; 2068 return (_vm_domain_allocate(vmd, req_class, npages)); 2069 } 2070 2071 vm_page_t 2072 vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain, 2073 int req, vm_page_t mpred) 2074 { 2075 struct vm_domain *vmd; 2076 vm_page_t m; 2077 int flags, pool; 2078 2079 KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) && 2080 (object != NULL || (req & VM_ALLOC_SBUSY) == 0) && 2081 ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != 2082 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), 2083 ("inconsistent object(%p)/req(%x)", object, req)); 2084 KASSERT(object == NULL || (req & VM_ALLOC_WAITOK) == 0, 2085 ("Can't sleep and retry object insertion.")); 2086 KASSERT(mpred == NULL || mpred->pindex < pindex, 2087 ("mpred %p doesn't precede pindex 0x%jx", mpred, 2088 (uintmax_t)pindex)); 2089 if (object != NULL) 2090 VM_OBJECT_ASSERT_WLOCKED(object); 2091 2092 flags = 0; 2093 m = NULL; 2094 pool = object != NULL ? VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT; 2095 again: 2096 #if VM_NRESERVLEVEL > 0 2097 /* 2098 * Can we allocate the page from a reservation? 2099 */ 2100 if (vm_object_reserv(object) && 2101 (m = vm_reserv_alloc_page(object, pindex, domain, req, mpred)) != 2102 NULL) { 2103 goto found; 2104 } 2105 #endif 2106 vmd = VM_DOMAIN(domain); 2107 if (vmd->vmd_pgcache[pool].zone != NULL) { 2108 m = uma_zalloc(vmd->vmd_pgcache[pool].zone, M_NOWAIT | M_NOVM); 2109 if (m != NULL) { 2110 flags |= PG_PCPU_CACHE; 2111 goto found; 2112 } 2113 } 2114 if (vm_domain_allocate(vmd, req, 1)) { 2115 /* 2116 * If not, allocate it from the free page queues. 2117 */ 2118 vm_domain_free_lock(vmd); 2119 m = vm_phys_alloc_pages(domain, pool, 0); 2120 vm_domain_free_unlock(vmd); 2121 if (m == NULL) { 2122 vm_domain_freecnt_inc(vmd, 1); 2123 #if VM_NRESERVLEVEL > 0 2124 if (vm_reserv_reclaim_inactive(domain)) 2125 goto again; 2126 #endif 2127 } 2128 } 2129 if (m == NULL) { 2130 /* 2131 * Not allocatable, give up. 2132 */ 2133 if (vm_domain_alloc_fail(vmd, object, req)) 2134 goto again; 2135 return (NULL); 2136 } 2137 2138 /* 2139 * At this point we had better have found a good page. 2140 */ 2141 found: 2142 vm_page_dequeue(m); 2143 vm_page_alloc_check(m); 2144 2145 /* 2146 * Initialize the page. Only the PG_ZERO flag is inherited. 2147 */ 2148 if ((req & VM_ALLOC_ZERO) != 0) 2149 flags |= (m->flags & PG_ZERO); 2150 if ((req & VM_ALLOC_NODUMP) != 0) 2151 flags |= PG_NODUMP; 2152 m->flags = flags; 2153 m->a.flags = 0; 2154 m->oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ? 2155 VPO_UNMANAGED : 0; 2156 if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0) 2157 m->busy_lock = VPB_CURTHREAD_EXCLUSIVE; 2158 else if ((req & VM_ALLOC_SBUSY) != 0) 2159 m->busy_lock = VPB_SHARERS_WORD(1); 2160 else 2161 m->busy_lock = VPB_UNBUSIED; 2162 if (req & VM_ALLOC_WIRED) { 2163 vm_wire_add(1); 2164 m->ref_count = 1; 2165 } 2166 m->a.act_count = 0; 2167 2168 if (object != NULL) { 2169 if (vm_page_insert_after(m, object, pindex, mpred)) { 2170 if (req & VM_ALLOC_WIRED) { 2171 vm_wire_sub(1); 2172 m->ref_count = 0; 2173 } 2174 KASSERT(m->object == NULL, ("page %p has object", m)); 2175 m->oflags = VPO_UNMANAGED; 2176 m->busy_lock = VPB_UNBUSIED; 2177 /* Don't change PG_ZERO. */ 2178 vm_page_free_toq(m); 2179 if (req & VM_ALLOC_WAITFAIL) { 2180 VM_OBJECT_WUNLOCK(object); 2181 vm_radix_wait(); 2182 VM_OBJECT_WLOCK(object); 2183 } 2184 return (NULL); 2185 } 2186 2187 /* Ignore device objects; the pager sets "memattr" for them. */ 2188 if (object->memattr != VM_MEMATTR_DEFAULT && 2189 (object->flags & OBJ_FICTITIOUS) == 0) 2190 pmap_page_set_memattr(m, object->memattr); 2191 } else 2192 m->pindex = pindex; 2193 2194 return (m); 2195 } 2196 2197 /* 2198 * vm_page_alloc_contig: 2199 * 2200 * Allocate a contiguous set of physical pages of the given size "npages" 2201 * from the free lists. All of the physical pages must be at or above 2202 * the given physical address "low" and below the given physical address 2203 * "high". The given value "alignment" determines the alignment of the 2204 * first physical page in the set. If the given value "boundary" is 2205 * non-zero, then the set of physical pages cannot cross any physical 2206 * address boundary that is a multiple of that value. Both "alignment" 2207 * and "boundary" must be a power of two. 2208 * 2209 * If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT, 2210 * then the memory attribute setting for the physical pages is configured 2211 * to the object's memory attribute setting. Otherwise, the memory 2212 * attribute setting for the physical pages is configured to "memattr", 2213 * overriding the object's memory attribute setting. However, if the 2214 * object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the 2215 * memory attribute setting for the physical pages cannot be configured 2216 * to VM_MEMATTR_DEFAULT. 2217 * 2218 * The specified object may not contain fictitious pages. 2219 * 2220 * The caller must always specify an allocation class. 2221 * 2222 * allocation classes: 2223 * VM_ALLOC_NORMAL normal process request 2224 * VM_ALLOC_SYSTEM system *really* needs a page 2225 * VM_ALLOC_INTERRUPT interrupt time request 2226 * 2227 * optional allocation flags: 2228 * VM_ALLOC_NOBUSY do not exclusive busy the page 2229 * VM_ALLOC_NODUMP do not include the page in a kernel core dump 2230 * VM_ALLOC_NOOBJ page is not associated with an object and 2231 * should not be exclusive busy 2232 * VM_ALLOC_SBUSY shared busy the allocated page 2233 * VM_ALLOC_WIRED wire the allocated page 2234 * VM_ALLOC_ZERO prefer a zeroed page 2235 */ 2236 vm_page_t 2237 vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req, 2238 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 2239 vm_paddr_t boundary, vm_memattr_t memattr) 2240 { 2241 struct vm_domainset_iter di; 2242 vm_page_t m; 2243 int domain; 2244 2245 vm_domainset_iter_page_init(&di, object, pindex, &domain, &req); 2246 do { 2247 m = vm_page_alloc_contig_domain(object, pindex, domain, req, 2248 npages, low, high, alignment, boundary, memattr); 2249 if (m != NULL) 2250 break; 2251 } while (vm_domainset_iter_page(&di, object, &domain) == 0); 2252 2253 return (m); 2254 } 2255 2256 vm_page_t 2257 vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain, 2258 int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 2259 vm_paddr_t boundary, vm_memattr_t memattr) 2260 { 2261 struct vm_domain *vmd; 2262 vm_page_t m, m_ret, mpred; 2263 u_int busy_lock, flags, oflags; 2264 2265 mpred = NULL; /* XXX: pacify gcc */ 2266 KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) && 2267 (object != NULL || (req & VM_ALLOC_SBUSY) == 0) && 2268 ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != 2269 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), 2270 ("vm_page_alloc_contig: inconsistent object(%p)/req(%x)", object, 2271 req)); 2272 KASSERT(object == NULL || (req & VM_ALLOC_WAITOK) == 0, 2273 ("Can't sleep and retry object insertion.")); 2274 if (object != NULL) { 2275 VM_OBJECT_ASSERT_WLOCKED(object); 2276 KASSERT((object->flags & OBJ_FICTITIOUS) == 0, 2277 ("vm_page_alloc_contig: object %p has fictitious pages", 2278 object)); 2279 } 2280 KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero")); 2281 2282 if (object != NULL) { 2283 mpred = vm_radix_lookup_le(&object->rtree, pindex); 2284 KASSERT(mpred == NULL || mpred->pindex != pindex, 2285 ("vm_page_alloc_contig: pindex already allocated")); 2286 } 2287 2288 /* 2289 * Can we allocate the pages without the number of free pages falling 2290 * below the lower bound for the allocation class? 2291 */ 2292 m_ret = NULL; 2293 again: 2294 #if VM_NRESERVLEVEL > 0 2295 /* 2296 * Can we allocate the pages from a reservation? 2297 */ 2298 if (vm_object_reserv(object) && 2299 (m_ret = vm_reserv_alloc_contig(object, pindex, domain, req, 2300 mpred, npages, low, high, alignment, boundary)) != NULL) { 2301 goto found; 2302 } 2303 #endif 2304 vmd = VM_DOMAIN(domain); 2305 if (vm_domain_allocate(vmd, req, npages)) { 2306 /* 2307 * allocate them from the free page queues. 2308 */ 2309 vm_domain_free_lock(vmd); 2310 m_ret = vm_phys_alloc_contig(domain, npages, low, high, 2311 alignment, boundary); 2312 vm_domain_free_unlock(vmd); 2313 if (m_ret == NULL) { 2314 vm_domain_freecnt_inc(vmd, npages); 2315 #if VM_NRESERVLEVEL > 0 2316 if (vm_reserv_reclaim_contig(domain, npages, low, 2317 high, alignment, boundary)) 2318 goto again; 2319 #endif 2320 } 2321 } 2322 if (m_ret == NULL) { 2323 if (vm_domain_alloc_fail(vmd, object, req)) 2324 goto again; 2325 return (NULL); 2326 } 2327 #if VM_NRESERVLEVEL > 0 2328 found: 2329 #endif 2330 for (m = m_ret; m < &m_ret[npages]; m++) { 2331 vm_page_dequeue(m); 2332 vm_page_alloc_check(m); 2333 } 2334 2335 /* 2336 * Initialize the pages. Only the PG_ZERO flag is inherited. 2337 */ 2338 flags = 0; 2339 if ((req & VM_ALLOC_ZERO) != 0) 2340 flags = PG_ZERO; 2341 if ((req & VM_ALLOC_NODUMP) != 0) 2342 flags |= PG_NODUMP; 2343 oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ? 2344 VPO_UNMANAGED : 0; 2345 if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0) 2346 busy_lock = VPB_CURTHREAD_EXCLUSIVE; 2347 else if ((req & VM_ALLOC_SBUSY) != 0) 2348 busy_lock = VPB_SHARERS_WORD(1); 2349 else 2350 busy_lock = VPB_UNBUSIED; 2351 if ((req & VM_ALLOC_WIRED) != 0) 2352 vm_wire_add(npages); 2353 if (object != NULL) { 2354 if (object->memattr != VM_MEMATTR_DEFAULT && 2355 memattr == VM_MEMATTR_DEFAULT) 2356 memattr = object->memattr; 2357 } 2358 for (m = m_ret; m < &m_ret[npages]; m++) { 2359 m->a.flags = 0; 2360 m->flags = (m->flags | PG_NODUMP) & flags; 2361 m->busy_lock = busy_lock; 2362 if ((req & VM_ALLOC_WIRED) != 0) 2363 m->ref_count = 1; 2364 m->a.act_count = 0; 2365 m->oflags = oflags; 2366 if (object != NULL) { 2367 if (vm_page_insert_after(m, object, pindex, mpred)) { 2368 if ((req & VM_ALLOC_WIRED) != 0) 2369 vm_wire_sub(npages); 2370 KASSERT(m->object == NULL, 2371 ("page %p has object", m)); 2372 mpred = m; 2373 for (m = m_ret; m < &m_ret[npages]; m++) { 2374 if (m <= mpred && 2375 (req & VM_ALLOC_WIRED) != 0) 2376 m->ref_count = 0; 2377 m->oflags = VPO_UNMANAGED; 2378 m->busy_lock = VPB_UNBUSIED; 2379 /* Don't change PG_ZERO. */ 2380 vm_page_free_toq(m); 2381 } 2382 if (req & VM_ALLOC_WAITFAIL) { 2383 VM_OBJECT_WUNLOCK(object); 2384 vm_radix_wait(); 2385 VM_OBJECT_WLOCK(object); 2386 } 2387 return (NULL); 2388 } 2389 mpred = m; 2390 } else 2391 m->pindex = pindex; 2392 if (memattr != VM_MEMATTR_DEFAULT) 2393 pmap_page_set_memattr(m, memattr); 2394 pindex++; 2395 } 2396 return (m_ret); 2397 } 2398 2399 /* 2400 * Check a page that has been freshly dequeued from a freelist. 2401 */ 2402 static void 2403 vm_page_alloc_check(vm_page_t m) 2404 { 2405 2406 KASSERT(m->object == NULL, ("page %p has object", m)); 2407 KASSERT(m->a.queue == PQ_NONE && 2408 (m->a.flags & PGA_QUEUE_STATE_MASK) == 0, 2409 ("page %p has unexpected queue %d, flags %#x", 2410 m, m->a.queue, (m->a.flags & PGA_QUEUE_STATE_MASK))); 2411 KASSERT(m->ref_count == 0, ("page %p has references", m)); 2412 KASSERT(vm_page_busy_freed(m), ("page %p is not freed", m)); 2413 KASSERT(m->dirty == 0, ("page %p is dirty", m)); 2414 KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, 2415 ("page %p has unexpected memattr %d", 2416 m, pmap_page_get_memattr(m))); 2417 KASSERT(m->valid == 0, ("free page %p is valid", m)); 2418 } 2419 2420 /* 2421 * vm_page_alloc_freelist: 2422 * 2423 * Allocate a physical page from the specified free page list. 2424 * 2425 * The caller must always specify an allocation class. 2426 * 2427 * allocation classes: 2428 * VM_ALLOC_NORMAL normal process request 2429 * VM_ALLOC_SYSTEM system *really* needs a page 2430 * VM_ALLOC_INTERRUPT interrupt time request 2431 * 2432 * optional allocation flags: 2433 * VM_ALLOC_COUNT(number) the number of additional pages that the caller 2434 * intends to allocate 2435 * VM_ALLOC_WIRED wire the allocated page 2436 * VM_ALLOC_ZERO prefer a zeroed page 2437 */ 2438 vm_page_t 2439 vm_page_alloc_freelist(int freelist, int req) 2440 { 2441 struct vm_domainset_iter di; 2442 vm_page_t m; 2443 int domain; 2444 2445 vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req); 2446 do { 2447 m = vm_page_alloc_freelist_domain(domain, freelist, req); 2448 if (m != NULL) 2449 break; 2450 } while (vm_domainset_iter_page(&di, NULL, &domain) == 0); 2451 2452 return (m); 2453 } 2454 2455 vm_page_t 2456 vm_page_alloc_freelist_domain(int domain, int freelist, int req) 2457 { 2458 struct vm_domain *vmd; 2459 vm_page_t m; 2460 u_int flags; 2461 2462 m = NULL; 2463 vmd = VM_DOMAIN(domain); 2464 again: 2465 if (vm_domain_allocate(vmd, req, 1)) { 2466 vm_domain_free_lock(vmd); 2467 m = vm_phys_alloc_freelist_pages(domain, freelist, 2468 VM_FREEPOOL_DIRECT, 0); 2469 vm_domain_free_unlock(vmd); 2470 if (m == NULL) 2471 vm_domain_freecnt_inc(vmd, 1); 2472 } 2473 if (m == NULL) { 2474 if (vm_domain_alloc_fail(vmd, NULL, req)) 2475 goto again; 2476 return (NULL); 2477 } 2478 vm_page_dequeue(m); 2479 vm_page_alloc_check(m); 2480 2481 /* 2482 * Initialize the page. Only the PG_ZERO flag is inherited. 2483 */ 2484 m->a.flags = 0; 2485 flags = 0; 2486 if ((req & VM_ALLOC_ZERO) != 0) 2487 flags = PG_ZERO; 2488 m->flags &= flags; 2489 if ((req & VM_ALLOC_WIRED) != 0) { 2490 vm_wire_add(1); 2491 m->ref_count = 1; 2492 } 2493 /* Unmanaged pages don't use "act_count". */ 2494 m->oflags = VPO_UNMANAGED; 2495 return (m); 2496 } 2497 2498 static int 2499 vm_page_zone_import(void *arg, void **store, int cnt, int domain, int flags) 2500 { 2501 struct vm_domain *vmd; 2502 struct vm_pgcache *pgcache; 2503 int i; 2504 2505 pgcache = arg; 2506 vmd = VM_DOMAIN(pgcache->domain); 2507 2508 /* 2509 * The page daemon should avoid creating extra memory pressure since its 2510 * main purpose is to replenish the store of free pages. 2511 */ 2512 if (vmd->vmd_severeset || curproc == pageproc || 2513 !_vm_domain_allocate(vmd, VM_ALLOC_NORMAL, cnt)) 2514 return (0); 2515 domain = vmd->vmd_domain; 2516 vm_domain_free_lock(vmd); 2517 i = vm_phys_alloc_npages(domain, pgcache->pool, cnt, 2518 (vm_page_t *)store); 2519 vm_domain_free_unlock(vmd); 2520 if (cnt != i) 2521 vm_domain_freecnt_inc(vmd, cnt - i); 2522 2523 return (i); 2524 } 2525 2526 static void 2527 vm_page_zone_release(void *arg, void **store, int cnt) 2528 { 2529 struct vm_domain *vmd; 2530 struct vm_pgcache *pgcache; 2531 vm_page_t m; 2532 int i; 2533 2534 pgcache = arg; 2535 vmd = VM_DOMAIN(pgcache->domain); 2536 vm_domain_free_lock(vmd); 2537 for (i = 0; i < cnt; i++) { 2538 m = (vm_page_t)store[i]; 2539 vm_phys_free_pages(m, 0); 2540 } 2541 vm_domain_free_unlock(vmd); 2542 vm_domain_freecnt_inc(vmd, cnt); 2543 } 2544 2545 #define VPSC_ANY 0 /* No restrictions. */ 2546 #define VPSC_NORESERV 1 /* Skip reservations; implies VPSC_NOSUPER. */ 2547 #define VPSC_NOSUPER 2 /* Skip superpages. */ 2548 2549 /* 2550 * vm_page_scan_contig: 2551 * 2552 * Scan vm_page_array[] between the specified entries "m_start" and 2553 * "m_end" for a run of contiguous physical pages that satisfy the 2554 * specified conditions, and return the lowest page in the run. The 2555 * specified "alignment" determines the alignment of the lowest physical 2556 * page in the run. If the specified "boundary" is non-zero, then the 2557 * run of physical pages cannot span a physical address that is a 2558 * multiple of "boundary". 2559 * 2560 * "m_end" is never dereferenced, so it need not point to a vm_page 2561 * structure within vm_page_array[]. 2562 * 2563 * "npages" must be greater than zero. "m_start" and "m_end" must not 2564 * span a hole (or discontiguity) in the physical address space. Both 2565 * "alignment" and "boundary" must be a power of two. 2566 */ 2567 vm_page_t 2568 vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end, 2569 u_long alignment, vm_paddr_t boundary, int options) 2570 { 2571 vm_object_t object; 2572 vm_paddr_t pa; 2573 vm_page_t m, m_run; 2574 #if VM_NRESERVLEVEL > 0 2575 int level; 2576 #endif 2577 int m_inc, order, run_ext, run_len; 2578 2579 KASSERT(npages > 0, ("npages is 0")); 2580 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 2581 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 2582 m_run = NULL; 2583 run_len = 0; 2584 for (m = m_start; m < m_end && run_len < npages; m += m_inc) { 2585 KASSERT((m->flags & PG_MARKER) == 0, 2586 ("page %p is PG_MARKER", m)); 2587 KASSERT((m->flags & PG_FICTITIOUS) == 0 || m->ref_count >= 1, 2588 ("fictitious page %p has invalid ref count", m)); 2589 2590 /* 2591 * If the current page would be the start of a run, check its 2592 * physical address against the end, alignment, and boundary 2593 * conditions. If it doesn't satisfy these conditions, either 2594 * terminate the scan or advance to the next page that 2595 * satisfies the failed condition. 2596 */ 2597 if (run_len == 0) { 2598 KASSERT(m_run == NULL, ("m_run != NULL")); 2599 if (m + npages > m_end) 2600 break; 2601 pa = VM_PAGE_TO_PHYS(m); 2602 if ((pa & (alignment - 1)) != 0) { 2603 m_inc = atop(roundup2(pa, alignment) - pa); 2604 continue; 2605 } 2606 if (rounddown2(pa ^ (pa + ptoa(npages) - 1), 2607 boundary) != 0) { 2608 m_inc = atop(roundup2(pa, boundary) - pa); 2609 continue; 2610 } 2611 } else 2612 KASSERT(m_run != NULL, ("m_run == NULL")); 2613 2614 retry: 2615 m_inc = 1; 2616 if (vm_page_wired(m)) 2617 run_ext = 0; 2618 #if VM_NRESERVLEVEL > 0 2619 else if ((level = vm_reserv_level(m)) >= 0 && 2620 (options & VPSC_NORESERV) != 0) { 2621 run_ext = 0; 2622 /* Advance to the end of the reservation. */ 2623 pa = VM_PAGE_TO_PHYS(m); 2624 m_inc = atop(roundup2(pa + 1, vm_reserv_size(level)) - 2625 pa); 2626 } 2627 #endif 2628 else if ((object = atomic_load_ptr(&m->object)) != NULL) { 2629 /* 2630 * The page is considered eligible for relocation if 2631 * and only if it could be laundered or reclaimed by 2632 * the page daemon. 2633 */ 2634 VM_OBJECT_RLOCK(object); 2635 if (object != m->object) { 2636 VM_OBJECT_RUNLOCK(object); 2637 goto retry; 2638 } 2639 /* Don't care: PG_NODUMP, PG_ZERO. */ 2640 if (object->type != OBJT_DEFAULT && 2641 object->type != OBJT_SWAP && 2642 object->type != OBJT_VNODE) { 2643 run_ext = 0; 2644 #if VM_NRESERVLEVEL > 0 2645 } else if ((options & VPSC_NOSUPER) != 0 && 2646 (level = vm_reserv_level_iffullpop(m)) >= 0) { 2647 run_ext = 0; 2648 /* Advance to the end of the superpage. */ 2649 pa = VM_PAGE_TO_PHYS(m); 2650 m_inc = atop(roundup2(pa + 1, 2651 vm_reserv_size(level)) - pa); 2652 #endif 2653 } else if (object->memattr == VM_MEMATTR_DEFAULT && 2654 vm_page_queue(m) != PQ_NONE && !vm_page_busied(m)) { 2655 /* 2656 * The page is allocated but eligible for 2657 * relocation. Extend the current run by one 2658 * page. 2659 */ 2660 KASSERT(pmap_page_get_memattr(m) == 2661 VM_MEMATTR_DEFAULT, 2662 ("page %p has an unexpected memattr", m)); 2663 KASSERT((m->oflags & (VPO_SWAPINPROG | 2664 VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0, 2665 ("page %p has unexpected oflags", m)); 2666 /* Don't care: PGA_NOSYNC. */ 2667 run_ext = 1; 2668 } else 2669 run_ext = 0; 2670 VM_OBJECT_RUNLOCK(object); 2671 #if VM_NRESERVLEVEL > 0 2672 } else if (level >= 0) { 2673 /* 2674 * The page is reserved but not yet allocated. In 2675 * other words, it is still free. Extend the current 2676 * run by one page. 2677 */ 2678 run_ext = 1; 2679 #endif 2680 } else if ((order = m->order) < VM_NFREEORDER) { 2681 /* 2682 * The page is enqueued in the physical memory 2683 * allocator's free page queues. Moreover, it is the 2684 * first page in a power-of-two-sized run of 2685 * contiguous free pages. Add these pages to the end 2686 * of the current run, and jump ahead. 2687 */ 2688 run_ext = 1 << order; 2689 m_inc = 1 << order; 2690 } else { 2691 /* 2692 * Skip the page for one of the following reasons: (1) 2693 * It is enqueued in the physical memory allocator's 2694 * free page queues. However, it is not the first 2695 * page in a run of contiguous free pages. (This case 2696 * rarely occurs because the scan is performed in 2697 * ascending order.) (2) It is not reserved, and it is 2698 * transitioning from free to allocated. (Conversely, 2699 * the transition from allocated to free for managed 2700 * pages is blocked by the page busy lock.) (3) It is 2701 * allocated but not contained by an object and not 2702 * wired, e.g., allocated by Xen's balloon driver. 2703 */ 2704 run_ext = 0; 2705 } 2706 2707 /* 2708 * Extend or reset the current run of pages. 2709 */ 2710 if (run_ext > 0) { 2711 if (run_len == 0) 2712 m_run = m; 2713 run_len += run_ext; 2714 } else { 2715 if (run_len > 0) { 2716 m_run = NULL; 2717 run_len = 0; 2718 } 2719 } 2720 } 2721 if (run_len >= npages) 2722 return (m_run); 2723 return (NULL); 2724 } 2725 2726 /* 2727 * vm_page_reclaim_run: 2728 * 2729 * Try to relocate each of the allocated virtual pages within the 2730 * specified run of physical pages to a new physical address. Free the 2731 * physical pages underlying the relocated virtual pages. A virtual page 2732 * is relocatable if and only if it could be laundered or reclaimed by 2733 * the page daemon. Whenever possible, a virtual page is relocated to a 2734 * physical address above "high". 2735 * 2736 * Returns 0 if every physical page within the run was already free or 2737 * just freed by a successful relocation. Otherwise, returns a non-zero 2738 * value indicating why the last attempt to relocate a virtual page was 2739 * unsuccessful. 2740 * 2741 * "req_class" must be an allocation class. 2742 */ 2743 static int 2744 vm_page_reclaim_run(int req_class, int domain, u_long npages, vm_page_t m_run, 2745 vm_paddr_t high) 2746 { 2747 struct vm_domain *vmd; 2748 struct spglist free; 2749 vm_object_t object; 2750 vm_paddr_t pa; 2751 vm_page_t m, m_end, m_new; 2752 int error, order, req; 2753 2754 KASSERT((req_class & VM_ALLOC_CLASS_MASK) == req_class, 2755 ("req_class is not an allocation class")); 2756 SLIST_INIT(&free); 2757 error = 0; 2758 m = m_run; 2759 m_end = m_run + npages; 2760 for (; error == 0 && m < m_end; m++) { 2761 KASSERT((m->flags & (PG_FICTITIOUS | PG_MARKER)) == 0, 2762 ("page %p is PG_FICTITIOUS or PG_MARKER", m)); 2763 2764 /* 2765 * Racily check for wirings. Races are handled once the object 2766 * lock is held and the page is unmapped. 2767 */ 2768 if (vm_page_wired(m)) 2769 error = EBUSY; 2770 else if ((object = atomic_load_ptr(&m->object)) != NULL) { 2771 /* 2772 * The page is relocated if and only if it could be 2773 * laundered or reclaimed by the page daemon. 2774 */ 2775 VM_OBJECT_WLOCK(object); 2776 /* Don't care: PG_NODUMP, PG_ZERO. */ 2777 if (m->object != object || 2778 (object->type != OBJT_DEFAULT && 2779 object->type != OBJT_SWAP && 2780 object->type != OBJT_VNODE)) 2781 error = EINVAL; 2782 else if (object->memattr != VM_MEMATTR_DEFAULT) 2783 error = EINVAL; 2784 else if (vm_page_queue(m) != PQ_NONE && 2785 vm_page_tryxbusy(m) != 0) { 2786 if (vm_page_wired(m)) { 2787 vm_page_xunbusy(m); 2788 error = EBUSY; 2789 goto unlock; 2790 } 2791 KASSERT(pmap_page_get_memattr(m) == 2792 VM_MEMATTR_DEFAULT, 2793 ("page %p has an unexpected memattr", m)); 2794 KASSERT(m->oflags == 0, 2795 ("page %p has unexpected oflags", m)); 2796 /* Don't care: PGA_NOSYNC. */ 2797 if (!vm_page_none_valid(m)) { 2798 /* 2799 * First, try to allocate a new page 2800 * that is above "high". Failing 2801 * that, try to allocate a new page 2802 * that is below "m_run". Allocate 2803 * the new page between the end of 2804 * "m_run" and "high" only as a last 2805 * resort. 2806 */ 2807 req = req_class | VM_ALLOC_NOOBJ; 2808 if ((m->flags & PG_NODUMP) != 0) 2809 req |= VM_ALLOC_NODUMP; 2810 if (trunc_page(high) != 2811 ~(vm_paddr_t)PAGE_MASK) { 2812 m_new = vm_page_alloc_contig( 2813 NULL, 0, req, 1, 2814 round_page(high), 2815 ~(vm_paddr_t)0, 2816 PAGE_SIZE, 0, 2817 VM_MEMATTR_DEFAULT); 2818 } else 2819 m_new = NULL; 2820 if (m_new == NULL) { 2821 pa = VM_PAGE_TO_PHYS(m_run); 2822 m_new = vm_page_alloc_contig( 2823 NULL, 0, req, 1, 2824 0, pa - 1, PAGE_SIZE, 0, 2825 VM_MEMATTR_DEFAULT); 2826 } 2827 if (m_new == NULL) { 2828 pa += ptoa(npages); 2829 m_new = vm_page_alloc_contig( 2830 NULL, 0, req, 1, 2831 pa, high, PAGE_SIZE, 0, 2832 VM_MEMATTR_DEFAULT); 2833 } 2834 if (m_new == NULL) { 2835 vm_page_xunbusy(m); 2836 error = ENOMEM; 2837 goto unlock; 2838 } 2839 2840 /* 2841 * Unmap the page and check for new 2842 * wirings that may have been acquired 2843 * through a pmap lookup. 2844 */ 2845 if (object->ref_count != 0 && 2846 !vm_page_try_remove_all(m)) { 2847 vm_page_xunbusy(m); 2848 vm_page_free(m_new); 2849 error = EBUSY; 2850 goto unlock; 2851 } 2852 2853 /* 2854 * Replace "m" with the new page. For 2855 * vm_page_replace(), "m" must be busy 2856 * and dequeued. Finally, change "m" 2857 * as if vm_page_free() was called. 2858 */ 2859 m_new->a.flags = m->a.flags & 2860 ~PGA_QUEUE_STATE_MASK; 2861 KASSERT(m_new->oflags == VPO_UNMANAGED, 2862 ("page %p is managed", m_new)); 2863 m_new->oflags = 0; 2864 pmap_copy_page(m, m_new); 2865 m_new->valid = m->valid; 2866 m_new->dirty = m->dirty; 2867 m->flags &= ~PG_ZERO; 2868 vm_page_dequeue(m); 2869 if (vm_page_replace_hold(m_new, object, 2870 m->pindex, m) && 2871 vm_page_free_prep(m)) 2872 SLIST_INSERT_HEAD(&free, m, 2873 plinks.s.ss); 2874 2875 /* 2876 * The new page must be deactivated 2877 * before the object is unlocked. 2878 */ 2879 vm_page_deactivate(m_new); 2880 } else { 2881 m->flags &= ~PG_ZERO; 2882 vm_page_dequeue(m); 2883 if (vm_page_free_prep(m)) 2884 SLIST_INSERT_HEAD(&free, m, 2885 plinks.s.ss); 2886 KASSERT(m->dirty == 0, 2887 ("page %p is dirty", m)); 2888 } 2889 } else 2890 error = EBUSY; 2891 unlock: 2892 VM_OBJECT_WUNLOCK(object); 2893 } else { 2894 MPASS(vm_phys_domain(m) == domain); 2895 vmd = VM_DOMAIN(domain); 2896 vm_domain_free_lock(vmd); 2897 order = m->order; 2898 if (order < VM_NFREEORDER) { 2899 /* 2900 * The page is enqueued in the physical memory 2901 * allocator's free page queues. Moreover, it 2902 * is the first page in a power-of-two-sized 2903 * run of contiguous free pages. Jump ahead 2904 * to the last page within that run, and 2905 * continue from there. 2906 */ 2907 m += (1 << order) - 1; 2908 } 2909 #if VM_NRESERVLEVEL > 0 2910 else if (vm_reserv_is_page_free(m)) 2911 order = 0; 2912 #endif 2913 vm_domain_free_unlock(vmd); 2914 if (order == VM_NFREEORDER) 2915 error = EINVAL; 2916 } 2917 } 2918 if ((m = SLIST_FIRST(&free)) != NULL) { 2919 int cnt; 2920 2921 vmd = VM_DOMAIN(domain); 2922 cnt = 0; 2923 vm_domain_free_lock(vmd); 2924 do { 2925 MPASS(vm_phys_domain(m) == domain); 2926 SLIST_REMOVE_HEAD(&free, plinks.s.ss); 2927 vm_phys_free_pages(m, 0); 2928 cnt++; 2929 } while ((m = SLIST_FIRST(&free)) != NULL); 2930 vm_domain_free_unlock(vmd); 2931 vm_domain_freecnt_inc(vmd, cnt); 2932 } 2933 return (error); 2934 } 2935 2936 #define NRUNS 16 2937 2938 CTASSERT(powerof2(NRUNS)); 2939 2940 #define RUN_INDEX(count) ((count) & (NRUNS - 1)) 2941 2942 #define MIN_RECLAIM 8 2943 2944 /* 2945 * vm_page_reclaim_contig: 2946 * 2947 * Reclaim allocated, contiguous physical memory satisfying the specified 2948 * conditions by relocating the virtual pages using that physical memory. 2949 * Returns true if reclamation is successful and false otherwise. Since 2950 * relocation requires the allocation of physical pages, reclamation may 2951 * fail due to a shortage of free pages. When reclamation fails, callers 2952 * are expected to perform vm_wait() before retrying a failed allocation 2953 * operation, e.g., vm_page_alloc_contig(). 2954 * 2955 * The caller must always specify an allocation class through "req". 2956 * 2957 * allocation classes: 2958 * VM_ALLOC_NORMAL normal process request 2959 * VM_ALLOC_SYSTEM system *really* needs a page 2960 * VM_ALLOC_INTERRUPT interrupt time request 2961 * 2962 * The optional allocation flags are ignored. 2963 * 2964 * "npages" must be greater than zero. Both "alignment" and "boundary" 2965 * must be a power of two. 2966 */ 2967 bool 2968 vm_page_reclaim_contig_domain(int domain, int req, u_long npages, 2969 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary) 2970 { 2971 struct vm_domain *vmd; 2972 vm_paddr_t curr_low; 2973 vm_page_t m_run, m_runs[NRUNS]; 2974 u_long count, reclaimed; 2975 int error, i, options, req_class; 2976 2977 KASSERT(npages > 0, ("npages is 0")); 2978 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 2979 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 2980 req_class = req & VM_ALLOC_CLASS_MASK; 2981 2982 /* 2983 * The page daemon is allowed to dig deeper into the free page list. 2984 */ 2985 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) 2986 req_class = VM_ALLOC_SYSTEM; 2987 2988 /* 2989 * Return if the number of free pages cannot satisfy the requested 2990 * allocation. 2991 */ 2992 vmd = VM_DOMAIN(domain); 2993 count = vmd->vmd_free_count; 2994 if (count < npages + vmd->vmd_free_reserved || (count < npages + 2995 vmd->vmd_interrupt_free_min && req_class == VM_ALLOC_SYSTEM) || 2996 (count < npages && req_class == VM_ALLOC_INTERRUPT)) 2997 return (false); 2998 2999 /* 3000 * Scan up to three times, relaxing the restrictions ("options") on 3001 * the reclamation of reservations and superpages each time. 3002 */ 3003 for (options = VPSC_NORESERV;;) { 3004 /* 3005 * Find the highest runs that satisfy the given constraints 3006 * and restrictions, and record them in "m_runs". 3007 */ 3008 curr_low = low; 3009 count = 0; 3010 for (;;) { 3011 m_run = vm_phys_scan_contig(domain, npages, curr_low, 3012 high, alignment, boundary, options); 3013 if (m_run == NULL) 3014 break; 3015 curr_low = VM_PAGE_TO_PHYS(m_run) + ptoa(npages); 3016 m_runs[RUN_INDEX(count)] = m_run; 3017 count++; 3018 } 3019 3020 /* 3021 * Reclaim the highest runs in LIFO (descending) order until 3022 * the number of reclaimed pages, "reclaimed", is at least 3023 * MIN_RECLAIM. Reset "reclaimed" each time because each 3024 * reclamation is idempotent, and runs will (likely) recur 3025 * from one scan to the next as restrictions are relaxed. 3026 */ 3027 reclaimed = 0; 3028 for (i = 0; count > 0 && i < NRUNS; i++) { 3029 count--; 3030 m_run = m_runs[RUN_INDEX(count)]; 3031 error = vm_page_reclaim_run(req_class, domain, npages, 3032 m_run, high); 3033 if (error == 0) { 3034 reclaimed += npages; 3035 if (reclaimed >= MIN_RECLAIM) 3036 return (true); 3037 } 3038 } 3039 3040 /* 3041 * Either relax the restrictions on the next scan or return if 3042 * the last scan had no restrictions. 3043 */ 3044 if (options == VPSC_NORESERV) 3045 options = VPSC_NOSUPER; 3046 else if (options == VPSC_NOSUPER) 3047 options = VPSC_ANY; 3048 else if (options == VPSC_ANY) 3049 return (reclaimed != 0); 3050 } 3051 } 3052 3053 bool 3054 vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high, 3055 u_long alignment, vm_paddr_t boundary) 3056 { 3057 struct vm_domainset_iter di; 3058 int domain; 3059 bool ret; 3060 3061 vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req); 3062 do { 3063 ret = vm_page_reclaim_contig_domain(domain, req, npages, low, 3064 high, alignment, boundary); 3065 if (ret) 3066 break; 3067 } while (vm_domainset_iter_page(&di, NULL, &domain) == 0); 3068 3069 return (ret); 3070 } 3071 3072 /* 3073 * Set the domain in the appropriate page level domainset. 3074 */ 3075 void 3076 vm_domain_set(struct vm_domain *vmd) 3077 { 3078 3079 mtx_lock(&vm_domainset_lock); 3080 if (!vmd->vmd_minset && vm_paging_min(vmd)) { 3081 vmd->vmd_minset = 1; 3082 DOMAINSET_SET(vmd->vmd_domain, &vm_min_domains); 3083 } 3084 if (!vmd->vmd_severeset && vm_paging_severe(vmd)) { 3085 vmd->vmd_severeset = 1; 3086 DOMAINSET_SET(vmd->vmd_domain, &vm_severe_domains); 3087 } 3088 mtx_unlock(&vm_domainset_lock); 3089 } 3090 3091 /* 3092 * Clear the domain from the appropriate page level domainset. 3093 */ 3094 void 3095 vm_domain_clear(struct vm_domain *vmd) 3096 { 3097 3098 mtx_lock(&vm_domainset_lock); 3099 if (vmd->vmd_minset && !vm_paging_min(vmd)) { 3100 vmd->vmd_minset = 0; 3101 DOMAINSET_CLR(vmd->vmd_domain, &vm_min_domains); 3102 if (vm_min_waiters != 0) { 3103 vm_min_waiters = 0; 3104 wakeup(&vm_min_domains); 3105 } 3106 } 3107 if (vmd->vmd_severeset && !vm_paging_severe(vmd)) { 3108 vmd->vmd_severeset = 0; 3109 DOMAINSET_CLR(vmd->vmd_domain, &vm_severe_domains); 3110 if (vm_severe_waiters != 0) { 3111 vm_severe_waiters = 0; 3112 wakeup(&vm_severe_domains); 3113 } 3114 } 3115 3116 /* 3117 * If pageout daemon needs pages, then tell it that there are 3118 * some free. 3119 */ 3120 if (vmd->vmd_pageout_pages_needed && 3121 vmd->vmd_free_count >= vmd->vmd_pageout_free_min) { 3122 wakeup(&vmd->vmd_pageout_pages_needed); 3123 vmd->vmd_pageout_pages_needed = 0; 3124 } 3125 3126 /* See comments in vm_wait_doms(). */ 3127 if (vm_pageproc_waiters) { 3128 vm_pageproc_waiters = 0; 3129 wakeup(&vm_pageproc_waiters); 3130 } 3131 mtx_unlock(&vm_domainset_lock); 3132 } 3133 3134 /* 3135 * Wait for free pages to exceed the min threshold globally. 3136 */ 3137 void 3138 vm_wait_min(void) 3139 { 3140 3141 mtx_lock(&vm_domainset_lock); 3142 while (vm_page_count_min()) { 3143 vm_min_waiters++; 3144 msleep(&vm_min_domains, &vm_domainset_lock, PVM, "vmwait", 0); 3145 } 3146 mtx_unlock(&vm_domainset_lock); 3147 } 3148 3149 /* 3150 * Wait for free pages to exceed the severe threshold globally. 3151 */ 3152 void 3153 vm_wait_severe(void) 3154 { 3155 3156 mtx_lock(&vm_domainset_lock); 3157 while (vm_page_count_severe()) { 3158 vm_severe_waiters++; 3159 msleep(&vm_severe_domains, &vm_domainset_lock, PVM, 3160 "vmwait", 0); 3161 } 3162 mtx_unlock(&vm_domainset_lock); 3163 } 3164 3165 u_int 3166 vm_wait_count(void) 3167 { 3168 3169 return (vm_severe_waiters + vm_min_waiters + vm_pageproc_waiters); 3170 } 3171 3172 int 3173 vm_wait_doms(const domainset_t *wdoms, int mflags) 3174 { 3175 int error; 3176 3177 error = 0; 3178 3179 /* 3180 * We use racey wakeup synchronization to avoid expensive global 3181 * locking for the pageproc when sleeping with a non-specific vm_wait. 3182 * To handle this, we only sleep for one tick in this instance. It 3183 * is expected that most allocations for the pageproc will come from 3184 * kmem or vm_page_grab* which will use the more specific and 3185 * race-free vm_wait_domain(). 3186 */ 3187 if (curproc == pageproc) { 3188 mtx_lock(&vm_domainset_lock); 3189 vm_pageproc_waiters++; 3190 error = msleep(&vm_pageproc_waiters, &vm_domainset_lock, 3191 PVM | PDROP | mflags, "pageprocwait", 1); 3192 } else { 3193 /* 3194 * XXX Ideally we would wait only until the allocation could 3195 * be satisfied. This condition can cause new allocators to 3196 * consume all freed pages while old allocators wait. 3197 */ 3198 mtx_lock(&vm_domainset_lock); 3199 if (vm_page_count_min_set(wdoms)) { 3200 vm_min_waiters++; 3201 error = msleep(&vm_min_domains, &vm_domainset_lock, 3202 PVM | PDROP | mflags, "vmwait", 0); 3203 } else 3204 mtx_unlock(&vm_domainset_lock); 3205 } 3206 return (error); 3207 } 3208 3209 /* 3210 * vm_wait_domain: 3211 * 3212 * Sleep until free pages are available for allocation. 3213 * - Called in various places after failed memory allocations. 3214 */ 3215 void 3216 vm_wait_domain(int domain) 3217 { 3218 struct vm_domain *vmd; 3219 domainset_t wdom; 3220 3221 vmd = VM_DOMAIN(domain); 3222 vm_domain_free_assert_unlocked(vmd); 3223 3224 if (curproc == pageproc) { 3225 mtx_lock(&vm_domainset_lock); 3226 if (vmd->vmd_free_count < vmd->vmd_pageout_free_min) { 3227 vmd->vmd_pageout_pages_needed = 1; 3228 msleep(&vmd->vmd_pageout_pages_needed, 3229 &vm_domainset_lock, PDROP | PSWP, "VMWait", 0); 3230 } else 3231 mtx_unlock(&vm_domainset_lock); 3232 } else { 3233 if (pageproc == NULL) 3234 panic("vm_wait in early boot"); 3235 DOMAINSET_ZERO(&wdom); 3236 DOMAINSET_SET(vmd->vmd_domain, &wdom); 3237 vm_wait_doms(&wdom, 0); 3238 } 3239 } 3240 3241 static int 3242 vm_wait_flags(vm_object_t obj, int mflags) 3243 { 3244 struct domainset *d; 3245 3246 d = NULL; 3247 3248 /* 3249 * Carefully fetch pointers only once: the struct domainset 3250 * itself is ummutable but the pointer might change. 3251 */ 3252 if (obj != NULL) 3253 d = obj->domain.dr_policy; 3254 if (d == NULL) 3255 d = curthread->td_domain.dr_policy; 3256 3257 return (vm_wait_doms(&d->ds_mask, mflags)); 3258 } 3259 3260 /* 3261 * vm_wait: 3262 * 3263 * Sleep until free pages are available for allocation in the 3264 * affinity domains of the obj. If obj is NULL, the domain set 3265 * for the calling thread is used. 3266 * Called in various places after failed memory allocations. 3267 */ 3268 void 3269 vm_wait(vm_object_t obj) 3270 { 3271 (void)vm_wait_flags(obj, 0); 3272 } 3273 3274 int 3275 vm_wait_intr(vm_object_t obj) 3276 { 3277 return (vm_wait_flags(obj, PCATCH)); 3278 } 3279 3280 /* 3281 * vm_domain_alloc_fail: 3282 * 3283 * Called when a page allocation function fails. Informs the 3284 * pagedaemon and performs the requested wait. Requires the 3285 * domain_free and object lock on entry. Returns with the 3286 * object lock held and free lock released. Returns an error when 3287 * retry is necessary. 3288 * 3289 */ 3290 static int 3291 vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, int req) 3292 { 3293 3294 vm_domain_free_assert_unlocked(vmd); 3295 3296 atomic_add_int(&vmd->vmd_pageout_deficit, 3297 max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1)); 3298 if (req & (VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) { 3299 if (object != NULL) 3300 VM_OBJECT_WUNLOCK(object); 3301 vm_wait_domain(vmd->vmd_domain); 3302 if (object != NULL) 3303 VM_OBJECT_WLOCK(object); 3304 if (req & VM_ALLOC_WAITOK) 3305 return (EAGAIN); 3306 } 3307 3308 return (0); 3309 } 3310 3311 /* 3312 * vm_waitpfault: 3313 * 3314 * Sleep until free pages are available for allocation. 3315 * - Called only in vm_fault so that processes page faulting 3316 * can be easily tracked. 3317 * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing 3318 * processes will be able to grab memory first. Do not change 3319 * this balance without careful testing first. 3320 */ 3321 void 3322 vm_waitpfault(struct domainset *dset, int timo) 3323 { 3324 3325 /* 3326 * XXX Ideally we would wait only until the allocation could 3327 * be satisfied. This condition can cause new allocators to 3328 * consume all freed pages while old allocators wait. 3329 */ 3330 mtx_lock(&vm_domainset_lock); 3331 if (vm_page_count_min_set(&dset->ds_mask)) { 3332 vm_min_waiters++; 3333 msleep(&vm_min_domains, &vm_domainset_lock, PUSER | PDROP, 3334 "pfault", timo); 3335 } else 3336 mtx_unlock(&vm_domainset_lock); 3337 } 3338 3339 static struct vm_pagequeue * 3340 _vm_page_pagequeue(vm_page_t m, uint8_t queue) 3341 { 3342 3343 return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue]); 3344 } 3345 3346 #ifdef INVARIANTS 3347 static struct vm_pagequeue * 3348 vm_page_pagequeue(vm_page_t m) 3349 { 3350 3351 return (_vm_page_pagequeue(m, vm_page_astate_load(m).queue)); 3352 } 3353 #endif 3354 3355 static __always_inline bool 3356 vm_page_pqstate_fcmpset(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new) 3357 { 3358 vm_page_astate_t tmp; 3359 3360 tmp = *old; 3361 do { 3362 if (__predict_true(vm_page_astate_fcmpset(m, old, new))) 3363 return (true); 3364 counter_u64_add(pqstate_commit_retries, 1); 3365 } while (old->_bits == tmp._bits); 3366 3367 return (false); 3368 } 3369 3370 /* 3371 * Do the work of committing a queue state update that moves the page out of 3372 * its current queue. 3373 */ 3374 static bool 3375 _vm_page_pqstate_commit_dequeue(struct vm_pagequeue *pq, vm_page_t m, 3376 vm_page_astate_t *old, vm_page_astate_t new) 3377 { 3378 vm_page_t next; 3379 3380 vm_pagequeue_assert_locked(pq); 3381 KASSERT(vm_page_pagequeue(m) == pq, 3382 ("%s: queue %p does not match page %p", __func__, pq, m)); 3383 KASSERT(old->queue != PQ_NONE && new.queue != old->queue, 3384 ("%s: invalid queue indices %d %d", 3385 __func__, old->queue, new.queue)); 3386 3387 /* 3388 * Once the queue index of the page changes there is nothing 3389 * synchronizing with further updates to the page's physical 3390 * queue state. Therefore we must speculatively remove the page 3391 * from the queue now and be prepared to roll back if the queue 3392 * state update fails. If the page is not physically enqueued then 3393 * we just update its queue index. 3394 */ 3395 if ((old->flags & PGA_ENQUEUED) != 0) { 3396 new.flags &= ~PGA_ENQUEUED; 3397 next = TAILQ_NEXT(m, plinks.q); 3398 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 3399 vm_pagequeue_cnt_dec(pq); 3400 if (!vm_page_pqstate_fcmpset(m, old, new)) { 3401 if (next == NULL) 3402 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); 3403 else 3404 TAILQ_INSERT_BEFORE(next, m, plinks.q); 3405 vm_pagequeue_cnt_inc(pq); 3406 return (false); 3407 } else { 3408 return (true); 3409 } 3410 } else { 3411 return (vm_page_pqstate_fcmpset(m, old, new)); 3412 } 3413 } 3414 3415 static bool 3416 vm_page_pqstate_commit_dequeue(vm_page_t m, vm_page_astate_t *old, 3417 vm_page_astate_t new) 3418 { 3419 struct vm_pagequeue *pq; 3420 vm_page_astate_t as; 3421 bool ret; 3422 3423 pq = _vm_page_pagequeue(m, old->queue); 3424 3425 /* 3426 * The queue field and PGA_ENQUEUED flag are stable only so long as the 3427 * corresponding page queue lock is held. 3428 */ 3429 vm_pagequeue_lock(pq); 3430 as = vm_page_astate_load(m); 3431 if (__predict_false(as._bits != old->_bits)) { 3432 *old = as; 3433 ret = false; 3434 } else { 3435 ret = _vm_page_pqstate_commit_dequeue(pq, m, old, new); 3436 } 3437 vm_pagequeue_unlock(pq); 3438 return (ret); 3439 } 3440 3441 /* 3442 * Commit a queue state update that enqueues or requeues a page. 3443 */ 3444 static bool 3445 _vm_page_pqstate_commit_requeue(struct vm_pagequeue *pq, vm_page_t m, 3446 vm_page_astate_t *old, vm_page_astate_t new) 3447 { 3448 struct vm_domain *vmd; 3449 3450 vm_pagequeue_assert_locked(pq); 3451 KASSERT(old->queue != PQ_NONE && new.queue == old->queue, 3452 ("%s: invalid queue indices %d %d", 3453 __func__, old->queue, new.queue)); 3454 3455 new.flags |= PGA_ENQUEUED; 3456 if (!vm_page_pqstate_fcmpset(m, old, new)) 3457 return (false); 3458 3459 if ((old->flags & PGA_ENQUEUED) != 0) 3460 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 3461 else 3462 vm_pagequeue_cnt_inc(pq); 3463 3464 /* 3465 * Give PGA_REQUEUE_HEAD precedence over PGA_REQUEUE. In particular, if 3466 * both flags are set in close succession, only PGA_REQUEUE_HEAD will be 3467 * applied, even if it was set first. 3468 */ 3469 if ((old->flags & PGA_REQUEUE_HEAD) != 0) { 3470 vmd = vm_pagequeue_domain(m); 3471 KASSERT(pq == &vmd->vmd_pagequeues[PQ_INACTIVE], 3472 ("%s: invalid page queue for page %p", __func__, m)); 3473 TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q); 3474 } else { 3475 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); 3476 } 3477 return (true); 3478 } 3479 3480 /* 3481 * Commit a queue state update that encodes a request for a deferred queue 3482 * operation. 3483 */ 3484 static bool 3485 vm_page_pqstate_commit_request(vm_page_t m, vm_page_astate_t *old, 3486 vm_page_astate_t new) 3487 { 3488 3489 KASSERT(old->queue == new.queue || new.queue != PQ_NONE, 3490 ("%s: invalid state, queue %d flags %x", 3491 __func__, new.queue, new.flags)); 3492 3493 if (old->_bits != new._bits && 3494 !vm_page_pqstate_fcmpset(m, old, new)) 3495 return (false); 3496 vm_page_pqbatch_submit(m, new.queue); 3497 return (true); 3498 } 3499 3500 /* 3501 * A generic queue state update function. This handles more cases than the 3502 * specialized functions above. 3503 */ 3504 bool 3505 vm_page_pqstate_commit(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new) 3506 { 3507 3508 if (old->_bits == new._bits) 3509 return (true); 3510 3511 if (old->queue != PQ_NONE && new.queue != old->queue) { 3512 if (!vm_page_pqstate_commit_dequeue(m, old, new)) 3513 return (false); 3514 if (new.queue != PQ_NONE) 3515 vm_page_pqbatch_submit(m, new.queue); 3516 } else { 3517 if (!vm_page_pqstate_fcmpset(m, old, new)) 3518 return (false); 3519 if (new.queue != PQ_NONE && 3520 ((new.flags & ~old->flags) & PGA_QUEUE_OP_MASK) != 0) 3521 vm_page_pqbatch_submit(m, new.queue); 3522 } 3523 return (true); 3524 } 3525 3526 /* 3527 * Apply deferred queue state updates to a page. 3528 */ 3529 static inline void 3530 vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_page_t m, uint8_t queue) 3531 { 3532 vm_page_astate_t new, old; 3533 3534 CRITICAL_ASSERT(curthread); 3535 vm_pagequeue_assert_locked(pq); 3536 KASSERT(queue < PQ_COUNT, 3537 ("%s: invalid queue index %d", __func__, queue)); 3538 KASSERT(pq == _vm_page_pagequeue(m, queue), 3539 ("%s: page %p does not belong to queue %p", __func__, m, pq)); 3540 3541 for (old = vm_page_astate_load(m);;) { 3542 if (__predict_false(old.queue != queue || 3543 (old.flags & PGA_QUEUE_OP_MASK) == 0)) { 3544 counter_u64_add(queue_nops, 1); 3545 break; 3546 } 3547 KASSERT(old.queue != PQ_NONE || (old.flags & PGA_QUEUE_STATE_MASK) == 0, 3548 ("%s: page %p has unexpected queue state", __func__, m)); 3549 3550 new = old; 3551 if ((old.flags & PGA_DEQUEUE) != 0) { 3552 new.flags &= ~PGA_QUEUE_OP_MASK; 3553 new.queue = PQ_NONE; 3554 if (__predict_true(_vm_page_pqstate_commit_dequeue(pq, 3555 m, &old, new))) { 3556 counter_u64_add(queue_ops, 1); 3557 break; 3558 } 3559 } else { 3560 new.flags &= ~(PGA_REQUEUE | PGA_REQUEUE_HEAD); 3561 if (__predict_true(_vm_page_pqstate_commit_requeue(pq, 3562 m, &old, new))) { 3563 counter_u64_add(queue_ops, 1); 3564 break; 3565 } 3566 } 3567 } 3568 } 3569 3570 static void 3571 vm_pqbatch_process(struct vm_pagequeue *pq, struct vm_batchqueue *bq, 3572 uint8_t queue) 3573 { 3574 int i; 3575 3576 for (i = 0; i < bq->bq_cnt; i++) 3577 vm_pqbatch_process_page(pq, bq->bq_pa[i], queue); 3578 vm_batchqueue_init(bq); 3579 } 3580 3581 /* 3582 * vm_page_pqbatch_submit: [ internal use only ] 3583 * 3584 * Enqueue a page in the specified page queue's batched work queue. 3585 * The caller must have encoded the requested operation in the page 3586 * structure's a.flags field. 3587 */ 3588 void 3589 vm_page_pqbatch_submit(vm_page_t m, uint8_t queue) 3590 { 3591 struct vm_batchqueue *bq; 3592 struct vm_pagequeue *pq; 3593 int domain; 3594 3595 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3596 ("page %p is unmanaged", m)); 3597 KASSERT(queue < PQ_COUNT, ("invalid queue %d", queue)); 3598 3599 domain = vm_phys_domain(m); 3600 pq = &vm_pagequeue_domain(m)->vmd_pagequeues[queue]; 3601 3602 critical_enter(); 3603 bq = DPCPU_PTR(pqbatch[domain][queue]); 3604 if (vm_batchqueue_insert(bq, m)) { 3605 critical_exit(); 3606 return; 3607 } 3608 critical_exit(); 3609 vm_pagequeue_lock(pq); 3610 critical_enter(); 3611 bq = DPCPU_PTR(pqbatch[domain][queue]); 3612 vm_pqbatch_process(pq, bq, queue); 3613 vm_pqbatch_process_page(pq, m, queue); 3614 vm_pagequeue_unlock(pq); 3615 critical_exit(); 3616 } 3617 3618 /* 3619 * vm_page_pqbatch_drain: [ internal use only ] 3620 * 3621 * Force all per-CPU page queue batch queues to be drained. This is 3622 * intended for use in severe memory shortages, to ensure that pages 3623 * do not remain stuck in the batch queues. 3624 */ 3625 void 3626 vm_page_pqbatch_drain(void) 3627 { 3628 struct thread *td; 3629 struct vm_domain *vmd; 3630 struct vm_pagequeue *pq; 3631 int cpu, domain, queue; 3632 3633 td = curthread; 3634 CPU_FOREACH(cpu) { 3635 thread_lock(td); 3636 sched_bind(td, cpu); 3637 thread_unlock(td); 3638 3639 for (domain = 0; domain < vm_ndomains; domain++) { 3640 vmd = VM_DOMAIN(domain); 3641 for (queue = 0; queue < PQ_COUNT; queue++) { 3642 pq = &vmd->vmd_pagequeues[queue]; 3643 vm_pagequeue_lock(pq); 3644 critical_enter(); 3645 vm_pqbatch_process(pq, 3646 DPCPU_PTR(pqbatch[domain][queue]), queue); 3647 critical_exit(); 3648 vm_pagequeue_unlock(pq); 3649 } 3650 } 3651 } 3652 thread_lock(td); 3653 sched_unbind(td); 3654 thread_unlock(td); 3655 } 3656 3657 /* 3658 * vm_page_dequeue_deferred: [ internal use only ] 3659 * 3660 * Request removal of the given page from its current page 3661 * queue. Physical removal from the queue may be deferred 3662 * indefinitely. 3663 */ 3664 void 3665 vm_page_dequeue_deferred(vm_page_t m) 3666 { 3667 vm_page_astate_t new, old; 3668 3669 old = vm_page_astate_load(m); 3670 do { 3671 if (old.queue == PQ_NONE) { 3672 KASSERT((old.flags & PGA_QUEUE_STATE_MASK) == 0, 3673 ("%s: page %p has unexpected queue state", 3674 __func__, m)); 3675 break; 3676 } 3677 new = old; 3678 new.flags |= PGA_DEQUEUE; 3679 } while (!vm_page_pqstate_commit_request(m, &old, new)); 3680 } 3681 3682 /* 3683 * vm_page_dequeue: 3684 * 3685 * Remove the page from whichever page queue it's in, if any, before 3686 * returning. 3687 */ 3688 void 3689 vm_page_dequeue(vm_page_t m) 3690 { 3691 vm_page_astate_t new, old; 3692 3693 old = vm_page_astate_load(m); 3694 do { 3695 if (old.queue == PQ_NONE) { 3696 KASSERT((old.flags & PGA_QUEUE_STATE_MASK) == 0, 3697 ("%s: page %p has unexpected queue state", 3698 __func__, m)); 3699 break; 3700 } 3701 new = old; 3702 new.flags &= ~PGA_QUEUE_OP_MASK; 3703 new.queue = PQ_NONE; 3704 } while (!vm_page_pqstate_commit_dequeue(m, &old, new)); 3705 3706 } 3707 3708 /* 3709 * Schedule the given page for insertion into the specified page queue. 3710 * Physical insertion of the page may be deferred indefinitely. 3711 */ 3712 static void 3713 vm_page_enqueue(vm_page_t m, uint8_t queue) 3714 { 3715 3716 KASSERT(m->a.queue == PQ_NONE && 3717 (m->a.flags & PGA_QUEUE_STATE_MASK) == 0, 3718 ("%s: page %p is already enqueued", __func__, m)); 3719 KASSERT(m->ref_count > 0, 3720 ("%s: page %p does not carry any references", __func__, m)); 3721 3722 m->a.queue = queue; 3723 if ((m->a.flags & PGA_REQUEUE) == 0) 3724 vm_page_aflag_set(m, PGA_REQUEUE); 3725 vm_page_pqbatch_submit(m, queue); 3726 } 3727 3728 /* 3729 * vm_page_free_prep: 3730 * 3731 * Prepares the given page to be put on the free list, 3732 * disassociating it from any VM object. The caller may return 3733 * the page to the free list only if this function returns true. 3734 * 3735 * The object, if it exists, must be locked, and then the page must 3736 * be xbusy. Otherwise the page must be not busied. A managed 3737 * page must be unmapped. 3738 */ 3739 static bool 3740 vm_page_free_prep(vm_page_t m) 3741 { 3742 3743 /* 3744 * Synchronize with threads that have dropped a reference to this 3745 * page. 3746 */ 3747 atomic_thread_fence_acq(); 3748 3749 #if defined(DIAGNOSTIC) && defined(PHYS_TO_DMAP) 3750 if (PMAP_HAS_DMAP && (m->flags & PG_ZERO) != 0) { 3751 uint64_t *p; 3752 int i; 3753 p = (uint64_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 3754 for (i = 0; i < PAGE_SIZE / sizeof(uint64_t); i++, p++) 3755 KASSERT(*p == 0, ("vm_page_free_prep %p PG_ZERO %d %jx", 3756 m, i, (uintmax_t)*p)); 3757 } 3758 #endif 3759 if ((m->oflags & VPO_UNMANAGED) == 0) { 3760 KASSERT(!pmap_page_is_mapped(m), 3761 ("vm_page_free_prep: freeing mapped page %p", m)); 3762 KASSERT((m->a.flags & (PGA_EXECUTABLE | PGA_WRITEABLE)) == 0, 3763 ("vm_page_free_prep: mapping flags set in page %p", m)); 3764 } else { 3765 KASSERT(m->a.queue == PQ_NONE, 3766 ("vm_page_free_prep: unmanaged page %p is queued", m)); 3767 } 3768 VM_CNT_INC(v_tfree); 3769 3770 if (m->object != NULL) { 3771 KASSERT(((m->oflags & VPO_UNMANAGED) != 0) == 3772 ((m->object->flags & OBJ_UNMANAGED) != 0), 3773 ("vm_page_free_prep: managed flag mismatch for page %p", 3774 m)); 3775 vm_page_assert_xbusied(m); 3776 3777 /* 3778 * The object reference can be released without an atomic 3779 * operation. 3780 */ 3781 KASSERT((m->flags & PG_FICTITIOUS) != 0 || 3782 m->ref_count == VPRC_OBJREF, 3783 ("vm_page_free_prep: page %p has unexpected ref_count %u", 3784 m, m->ref_count)); 3785 vm_page_object_remove(m); 3786 m->ref_count -= VPRC_OBJREF; 3787 } else 3788 vm_page_assert_unbusied(m); 3789 3790 vm_page_busy_free(m); 3791 3792 /* 3793 * If fictitious remove object association and 3794 * return. 3795 */ 3796 if ((m->flags & PG_FICTITIOUS) != 0) { 3797 KASSERT(m->ref_count == 1, 3798 ("fictitious page %p is referenced", m)); 3799 KASSERT(m->a.queue == PQ_NONE, 3800 ("fictitious page %p is queued", m)); 3801 return (false); 3802 } 3803 3804 /* 3805 * Pages need not be dequeued before they are returned to the physical 3806 * memory allocator, but they must at least be marked for a deferred 3807 * dequeue. 3808 */ 3809 if ((m->oflags & VPO_UNMANAGED) == 0) 3810 vm_page_dequeue_deferred(m); 3811 3812 m->valid = 0; 3813 vm_page_undirty(m); 3814 3815 if (m->ref_count != 0) 3816 panic("vm_page_free_prep: page %p has references", m); 3817 3818 /* 3819 * Restore the default memory attribute to the page. 3820 */ 3821 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) 3822 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); 3823 3824 #if VM_NRESERVLEVEL > 0 3825 /* 3826 * Determine whether the page belongs to a reservation. If the page was 3827 * allocated from a per-CPU cache, it cannot belong to a reservation, so 3828 * as an optimization, we avoid the check in that case. 3829 */ 3830 if ((m->flags & PG_PCPU_CACHE) == 0 && vm_reserv_free_page(m)) 3831 return (false); 3832 #endif 3833 3834 return (true); 3835 } 3836 3837 /* 3838 * vm_page_free_toq: 3839 * 3840 * Returns the given page to the free list, disassociating it 3841 * from any VM object. 3842 * 3843 * The object must be locked. The page must be exclusively busied if it 3844 * belongs to an object. 3845 */ 3846 static void 3847 vm_page_free_toq(vm_page_t m) 3848 { 3849 struct vm_domain *vmd; 3850 uma_zone_t zone; 3851 3852 if (!vm_page_free_prep(m)) 3853 return; 3854 3855 vmd = vm_pagequeue_domain(m); 3856 zone = vmd->vmd_pgcache[m->pool].zone; 3857 if ((m->flags & PG_PCPU_CACHE) != 0 && zone != NULL) { 3858 uma_zfree(zone, m); 3859 return; 3860 } 3861 vm_domain_free_lock(vmd); 3862 vm_phys_free_pages(m, 0); 3863 vm_domain_free_unlock(vmd); 3864 vm_domain_freecnt_inc(vmd, 1); 3865 } 3866 3867 /* 3868 * vm_page_free_pages_toq: 3869 * 3870 * Returns a list of pages to the free list, disassociating it 3871 * from any VM object. In other words, this is equivalent to 3872 * calling vm_page_free_toq() for each page of a list of VM objects. 3873 */ 3874 void 3875 vm_page_free_pages_toq(struct spglist *free, bool update_wire_count) 3876 { 3877 vm_page_t m; 3878 int count; 3879 3880 if (SLIST_EMPTY(free)) 3881 return; 3882 3883 count = 0; 3884 while ((m = SLIST_FIRST(free)) != NULL) { 3885 count++; 3886 SLIST_REMOVE_HEAD(free, plinks.s.ss); 3887 vm_page_free_toq(m); 3888 } 3889 3890 if (update_wire_count) 3891 vm_wire_sub(count); 3892 } 3893 3894 /* 3895 * Mark this page as wired down. For managed pages, this prevents reclamation 3896 * by the page daemon, or when the containing object, if any, is destroyed. 3897 */ 3898 void 3899 vm_page_wire(vm_page_t m) 3900 { 3901 u_int old; 3902 3903 #ifdef INVARIANTS 3904 if (m->object != NULL && !vm_page_busied(m) && 3905 !vm_object_busied(m->object)) 3906 VM_OBJECT_ASSERT_LOCKED(m->object); 3907 #endif 3908 KASSERT((m->flags & PG_FICTITIOUS) == 0 || 3909 VPRC_WIRE_COUNT(m->ref_count) >= 1, 3910 ("vm_page_wire: fictitious page %p has zero wirings", m)); 3911 3912 old = atomic_fetchadd_int(&m->ref_count, 1); 3913 KASSERT(VPRC_WIRE_COUNT(old) != VPRC_WIRE_COUNT_MAX, 3914 ("vm_page_wire: counter overflow for page %p", m)); 3915 if (VPRC_WIRE_COUNT(old) == 0) { 3916 if ((m->oflags & VPO_UNMANAGED) == 0) 3917 vm_page_aflag_set(m, PGA_DEQUEUE); 3918 vm_wire_add(1); 3919 } 3920 } 3921 3922 /* 3923 * Attempt to wire a mapped page following a pmap lookup of that page. 3924 * This may fail if a thread is concurrently tearing down mappings of the page. 3925 * The transient failure is acceptable because it translates to the 3926 * failure of the caller pmap_extract_and_hold(), which should be then 3927 * followed by the vm_fault() fallback, see e.g. vm_fault_quick_hold_pages(). 3928 */ 3929 bool 3930 vm_page_wire_mapped(vm_page_t m) 3931 { 3932 u_int old; 3933 3934 old = m->ref_count; 3935 do { 3936 KASSERT(old > 0, 3937 ("vm_page_wire_mapped: wiring unreferenced page %p", m)); 3938 if ((old & VPRC_BLOCKED) != 0) 3939 return (false); 3940 } while (!atomic_fcmpset_int(&m->ref_count, &old, old + 1)); 3941 3942 if (VPRC_WIRE_COUNT(old) == 0) { 3943 if ((m->oflags & VPO_UNMANAGED) == 0) 3944 vm_page_aflag_set(m, PGA_DEQUEUE); 3945 vm_wire_add(1); 3946 } 3947 return (true); 3948 } 3949 3950 /* 3951 * Release a wiring reference to a managed page. If the page still belongs to 3952 * an object, update its position in the page queues to reflect the reference. 3953 * If the wiring was the last reference to the page, free the page. 3954 */ 3955 static void 3956 vm_page_unwire_managed(vm_page_t m, uint8_t nqueue, bool noreuse) 3957 { 3958 u_int old; 3959 3960 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3961 ("%s: page %p is unmanaged", __func__, m)); 3962 3963 /* 3964 * Update LRU state before releasing the wiring reference. 3965 * Use a release store when updating the reference count to 3966 * synchronize with vm_page_free_prep(). 3967 */ 3968 old = m->ref_count; 3969 do { 3970 KASSERT(VPRC_WIRE_COUNT(old) > 0, 3971 ("vm_page_unwire: wire count underflow for page %p", m)); 3972 3973 if (old > VPRC_OBJREF + 1) { 3974 /* 3975 * The page has at least one other wiring reference. An 3976 * earlier iteration of this loop may have called 3977 * vm_page_release_toq() and cleared PGA_DEQUEUE, so 3978 * re-set it if necessary. 3979 */ 3980 if ((vm_page_astate_load(m).flags & PGA_DEQUEUE) == 0) 3981 vm_page_aflag_set(m, PGA_DEQUEUE); 3982 } else if (old == VPRC_OBJREF + 1) { 3983 /* 3984 * This is the last wiring. Clear PGA_DEQUEUE and 3985 * update the page's queue state to reflect the 3986 * reference. If the page does not belong to an object 3987 * (i.e., the VPRC_OBJREF bit is clear), we only need to 3988 * clear leftover queue state. 3989 */ 3990 vm_page_release_toq(m, nqueue, false); 3991 } else if (old == 1) { 3992 vm_page_aflag_clear(m, PGA_DEQUEUE); 3993 } 3994 } while (!atomic_fcmpset_rel_int(&m->ref_count, &old, old - 1)); 3995 3996 if (VPRC_WIRE_COUNT(old) == 1) { 3997 vm_wire_sub(1); 3998 if (old == 1) 3999 vm_page_free(m); 4000 } 4001 } 4002 4003 /* 4004 * Release one wiring of the specified page, potentially allowing it to be 4005 * paged out. 4006 * 4007 * Only managed pages belonging to an object can be paged out. If the number 4008 * of wirings transitions to zero and the page is eligible for page out, then 4009 * the page is added to the specified paging queue. If the released wiring 4010 * represented the last reference to the page, the page is freed. 4011 */ 4012 void 4013 vm_page_unwire(vm_page_t m, uint8_t nqueue) 4014 { 4015 4016 KASSERT(nqueue < PQ_COUNT, 4017 ("vm_page_unwire: invalid queue %u request for page %p", 4018 nqueue, m)); 4019 4020 if ((m->oflags & VPO_UNMANAGED) != 0) { 4021 if (vm_page_unwire_noq(m) && m->ref_count == 0) 4022 vm_page_free(m); 4023 return; 4024 } 4025 vm_page_unwire_managed(m, nqueue, false); 4026 } 4027 4028 /* 4029 * Unwire a page without (re-)inserting it into a page queue. It is up 4030 * to the caller to enqueue, requeue, or free the page as appropriate. 4031 * In most cases involving managed pages, vm_page_unwire() should be used 4032 * instead. 4033 */ 4034 bool 4035 vm_page_unwire_noq(vm_page_t m) 4036 { 4037 u_int old; 4038 4039 old = vm_page_drop(m, 1); 4040 KASSERT(VPRC_WIRE_COUNT(old) != 0, 4041 ("vm_page_unref: counter underflow for page %p", m)); 4042 KASSERT((m->flags & PG_FICTITIOUS) == 0 || VPRC_WIRE_COUNT(old) > 1, 4043 ("vm_page_unref: missing ref on fictitious page %p", m)); 4044 4045 if (VPRC_WIRE_COUNT(old) > 1) 4046 return (false); 4047 if ((m->oflags & VPO_UNMANAGED) == 0) 4048 vm_page_aflag_clear(m, PGA_DEQUEUE); 4049 vm_wire_sub(1); 4050 return (true); 4051 } 4052 4053 /* 4054 * Ensure that the page ends up in the specified page queue. If the page is 4055 * active or being moved to the active queue, ensure that its act_count is 4056 * at least ACT_INIT but do not otherwise mess with it. 4057 */ 4058 static __always_inline void 4059 vm_page_mvqueue(vm_page_t m, const uint8_t nqueue, const uint16_t nflag) 4060 { 4061 vm_page_astate_t old, new; 4062 4063 KASSERT(m->ref_count > 0, 4064 ("%s: page %p does not carry any references", __func__, m)); 4065 KASSERT(nflag == PGA_REQUEUE || nflag == PGA_REQUEUE_HEAD, 4066 ("%s: invalid flags %x", __func__, nflag)); 4067 4068 if ((m->oflags & VPO_UNMANAGED) != 0 || vm_page_wired(m)) 4069 return; 4070 4071 old = vm_page_astate_load(m); 4072 do { 4073 if ((old.flags & PGA_DEQUEUE) != 0) 4074 break; 4075 new = old; 4076 new.flags &= ~PGA_QUEUE_OP_MASK; 4077 if (nqueue == PQ_ACTIVE) 4078 new.act_count = max(old.act_count, ACT_INIT); 4079 if (old.queue == nqueue) { 4080 if (nqueue != PQ_ACTIVE) 4081 new.flags |= nflag; 4082 } else { 4083 new.flags |= nflag; 4084 new.queue = nqueue; 4085 } 4086 } while (!vm_page_pqstate_commit(m, &old, new)); 4087 } 4088 4089 /* 4090 * Put the specified page on the active list (if appropriate). 4091 */ 4092 void 4093 vm_page_activate(vm_page_t m) 4094 { 4095 4096 vm_page_mvqueue(m, PQ_ACTIVE, PGA_REQUEUE); 4097 } 4098 4099 /* 4100 * Move the specified page to the tail of the inactive queue, or requeue 4101 * the page if it is already in the inactive queue. 4102 */ 4103 void 4104 vm_page_deactivate(vm_page_t m) 4105 { 4106 4107 vm_page_mvqueue(m, PQ_INACTIVE, PGA_REQUEUE); 4108 } 4109 4110 void 4111 vm_page_deactivate_noreuse(vm_page_t m) 4112 { 4113 4114 vm_page_mvqueue(m, PQ_INACTIVE, PGA_REQUEUE_HEAD); 4115 } 4116 4117 /* 4118 * Put a page in the laundry, or requeue it if it is already there. 4119 */ 4120 void 4121 vm_page_launder(vm_page_t m) 4122 { 4123 4124 vm_page_mvqueue(m, PQ_LAUNDRY, PGA_REQUEUE); 4125 } 4126 4127 /* 4128 * Put a page in the PQ_UNSWAPPABLE holding queue. 4129 */ 4130 void 4131 vm_page_unswappable(vm_page_t m) 4132 { 4133 4134 KASSERT(!vm_page_wired(m) && (m->oflags & VPO_UNMANAGED) == 0, 4135 ("page %p already unswappable", m)); 4136 4137 vm_page_dequeue(m); 4138 vm_page_enqueue(m, PQ_UNSWAPPABLE); 4139 } 4140 4141 /* 4142 * Release a page back to the page queues in preparation for unwiring. 4143 */ 4144 static void 4145 vm_page_release_toq(vm_page_t m, uint8_t nqueue, const bool noreuse) 4146 { 4147 vm_page_astate_t old, new; 4148 uint16_t nflag; 4149 4150 /* 4151 * Use a check of the valid bits to determine whether we should 4152 * accelerate reclamation of the page. The object lock might not be 4153 * held here, in which case the check is racy. At worst we will either 4154 * accelerate reclamation of a valid page and violate LRU, or 4155 * unnecessarily defer reclamation of an invalid page. 4156 * 4157 * If we were asked to not cache the page, place it near the head of the 4158 * inactive queue so that is reclaimed sooner. 4159 */ 4160 if (noreuse || m->valid == 0) { 4161 nqueue = PQ_INACTIVE; 4162 nflag = PGA_REQUEUE_HEAD; 4163 } else { 4164 nflag = PGA_REQUEUE; 4165 } 4166 4167 old = vm_page_astate_load(m); 4168 do { 4169 new = old; 4170 4171 /* 4172 * If the page is already in the active queue and we are not 4173 * trying to accelerate reclamation, simply mark it as 4174 * referenced and avoid any queue operations. 4175 */ 4176 new.flags &= ~PGA_QUEUE_OP_MASK; 4177 if (nflag != PGA_REQUEUE_HEAD && old.queue == PQ_ACTIVE) 4178 new.flags |= PGA_REFERENCED; 4179 else { 4180 new.flags |= nflag; 4181 new.queue = nqueue; 4182 } 4183 } while (!vm_page_pqstate_commit(m, &old, new)); 4184 } 4185 4186 /* 4187 * Unwire a page and either attempt to free it or re-add it to the page queues. 4188 */ 4189 void 4190 vm_page_release(vm_page_t m, int flags) 4191 { 4192 vm_object_t object; 4193 4194 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4195 ("vm_page_release: page %p is unmanaged", m)); 4196 4197 if ((flags & VPR_TRYFREE) != 0) { 4198 for (;;) { 4199 object = atomic_load_ptr(&m->object); 4200 if (object == NULL) 4201 break; 4202 /* Depends on type-stability. */ 4203 if (vm_page_busied(m) || !VM_OBJECT_TRYWLOCK(object)) 4204 break; 4205 if (object == m->object) { 4206 vm_page_release_locked(m, flags); 4207 VM_OBJECT_WUNLOCK(object); 4208 return; 4209 } 4210 VM_OBJECT_WUNLOCK(object); 4211 } 4212 } 4213 vm_page_unwire_managed(m, PQ_INACTIVE, flags != 0); 4214 } 4215 4216 /* See vm_page_release(). */ 4217 void 4218 vm_page_release_locked(vm_page_t m, int flags) 4219 { 4220 4221 VM_OBJECT_ASSERT_WLOCKED(m->object); 4222 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4223 ("vm_page_release_locked: page %p is unmanaged", m)); 4224 4225 if (vm_page_unwire_noq(m)) { 4226 if ((flags & VPR_TRYFREE) != 0 && 4227 (m->object->ref_count == 0 || !pmap_page_is_mapped(m)) && 4228 m->dirty == 0 && vm_page_tryxbusy(m)) { 4229 /* 4230 * An unlocked lookup may have wired the page before the 4231 * busy lock was acquired, in which case the page must 4232 * not be freed. 4233 */ 4234 if (__predict_true(!vm_page_wired(m))) { 4235 vm_page_free(m); 4236 return; 4237 } 4238 vm_page_xunbusy(m); 4239 } else { 4240 vm_page_release_toq(m, PQ_INACTIVE, flags != 0); 4241 } 4242 } 4243 } 4244 4245 static bool 4246 vm_page_try_blocked_op(vm_page_t m, void (*op)(vm_page_t)) 4247 { 4248 u_int old; 4249 4250 KASSERT(m->object != NULL && (m->oflags & VPO_UNMANAGED) == 0, 4251 ("vm_page_try_blocked_op: page %p has no object", m)); 4252 KASSERT(vm_page_busied(m), 4253 ("vm_page_try_blocked_op: page %p is not busy", m)); 4254 VM_OBJECT_ASSERT_LOCKED(m->object); 4255 4256 old = m->ref_count; 4257 do { 4258 KASSERT(old != 0, 4259 ("vm_page_try_blocked_op: page %p has no references", m)); 4260 if (VPRC_WIRE_COUNT(old) != 0) 4261 return (false); 4262 } while (!atomic_fcmpset_int(&m->ref_count, &old, old | VPRC_BLOCKED)); 4263 4264 (op)(m); 4265 4266 /* 4267 * If the object is read-locked, new wirings may be created via an 4268 * object lookup. 4269 */ 4270 old = vm_page_drop(m, VPRC_BLOCKED); 4271 KASSERT(!VM_OBJECT_WOWNED(m->object) || 4272 old == (VPRC_BLOCKED | VPRC_OBJREF), 4273 ("vm_page_try_blocked_op: unexpected refcount value %u for %p", 4274 old, m)); 4275 return (true); 4276 } 4277 4278 /* 4279 * Atomically check for wirings and remove all mappings of the page. 4280 */ 4281 bool 4282 vm_page_try_remove_all(vm_page_t m) 4283 { 4284 4285 return (vm_page_try_blocked_op(m, pmap_remove_all)); 4286 } 4287 4288 /* 4289 * Atomically check for wirings and remove all writeable mappings of the page. 4290 */ 4291 bool 4292 vm_page_try_remove_write(vm_page_t m) 4293 { 4294 4295 return (vm_page_try_blocked_op(m, pmap_remove_write)); 4296 } 4297 4298 /* 4299 * vm_page_advise 4300 * 4301 * Apply the specified advice to the given page. 4302 */ 4303 void 4304 vm_page_advise(vm_page_t m, int advice) 4305 { 4306 4307 VM_OBJECT_ASSERT_WLOCKED(m->object); 4308 vm_page_assert_xbusied(m); 4309 4310 if (advice == MADV_FREE) 4311 /* 4312 * Mark the page clean. This will allow the page to be freed 4313 * without first paging it out. MADV_FREE pages are often 4314 * quickly reused by malloc(3), so we do not do anything that 4315 * would result in a page fault on a later access. 4316 */ 4317 vm_page_undirty(m); 4318 else if (advice != MADV_DONTNEED) { 4319 if (advice == MADV_WILLNEED) 4320 vm_page_activate(m); 4321 return; 4322 } 4323 4324 if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m)) 4325 vm_page_dirty(m); 4326 4327 /* 4328 * Clear any references to the page. Otherwise, the page daemon will 4329 * immediately reactivate the page. 4330 */ 4331 vm_page_aflag_clear(m, PGA_REFERENCED); 4332 4333 /* 4334 * Place clean pages near the head of the inactive queue rather than 4335 * the tail, thus defeating the queue's LRU operation and ensuring that 4336 * the page will be reused quickly. Dirty pages not already in the 4337 * laundry are moved there. 4338 */ 4339 if (m->dirty == 0) 4340 vm_page_deactivate_noreuse(m); 4341 else if (!vm_page_in_laundry(m)) 4342 vm_page_launder(m); 4343 } 4344 4345 /* 4346 * vm_page_grab_release 4347 * 4348 * Helper routine for grab functions to release busy on return. 4349 */ 4350 static inline void 4351 vm_page_grab_release(vm_page_t m, int allocflags) 4352 { 4353 4354 if ((allocflags & VM_ALLOC_NOBUSY) != 0) { 4355 if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0) 4356 vm_page_sunbusy(m); 4357 else 4358 vm_page_xunbusy(m); 4359 } 4360 } 4361 4362 /* 4363 * vm_page_grab_sleep 4364 * 4365 * Sleep for busy according to VM_ALLOC_ parameters. Returns true 4366 * if the caller should retry and false otherwise. 4367 * 4368 * If the object is locked on entry the object will be unlocked with 4369 * false returns and still locked but possibly having been dropped 4370 * with true returns. 4371 */ 4372 static bool 4373 vm_page_grab_sleep(vm_object_t object, vm_page_t m, vm_pindex_t pindex, 4374 const char *wmesg, int allocflags, bool locked) 4375 { 4376 4377 if ((allocflags & VM_ALLOC_NOWAIT) != 0) 4378 return (false); 4379 4380 /* 4381 * Reference the page before unlocking and sleeping so that 4382 * the page daemon is less likely to reclaim it. 4383 */ 4384 if (locked && (allocflags & VM_ALLOC_NOCREAT) == 0) 4385 vm_page_reference(m); 4386 4387 if (_vm_page_busy_sleep(object, m, m->pindex, wmesg, allocflags, 4388 locked) && locked) 4389 VM_OBJECT_WLOCK(object); 4390 if ((allocflags & VM_ALLOC_WAITFAIL) != 0) 4391 return (false); 4392 4393 return (true); 4394 } 4395 4396 /* 4397 * Assert that the grab flags are valid. 4398 */ 4399 static inline void 4400 vm_page_grab_check(int allocflags) 4401 { 4402 4403 KASSERT((allocflags & VM_ALLOC_NOBUSY) == 0 || 4404 (allocflags & VM_ALLOC_WIRED) != 0, 4405 ("vm_page_grab*: the pages must be busied or wired")); 4406 4407 KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || 4408 (allocflags & VM_ALLOC_IGN_SBUSY) != 0, 4409 ("vm_page_grab*: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch")); 4410 } 4411 4412 /* 4413 * Calculate the page allocation flags for grab. 4414 */ 4415 static inline int 4416 vm_page_grab_pflags(int allocflags) 4417 { 4418 int pflags; 4419 4420 pflags = allocflags & 4421 ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL | 4422 VM_ALLOC_NOBUSY); 4423 if ((allocflags & VM_ALLOC_NOWAIT) == 0) 4424 pflags |= VM_ALLOC_WAITFAIL; 4425 if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0) 4426 pflags |= VM_ALLOC_SBUSY; 4427 4428 return (pflags); 4429 } 4430 4431 /* 4432 * Grab a page, waiting until we are waken up due to the page 4433 * changing state. We keep on waiting, if the page continues 4434 * to be in the object. If the page doesn't exist, first allocate it 4435 * and then conditionally zero it. 4436 * 4437 * This routine may sleep. 4438 * 4439 * The object must be locked on entry. The lock will, however, be released 4440 * and reacquired if the routine sleeps. 4441 */ 4442 vm_page_t 4443 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) 4444 { 4445 vm_page_t m; 4446 4447 VM_OBJECT_ASSERT_WLOCKED(object); 4448 vm_page_grab_check(allocflags); 4449 4450 retrylookup: 4451 if ((m = vm_page_lookup(object, pindex)) != NULL) { 4452 if (!vm_page_tryacquire(m, allocflags)) { 4453 if (vm_page_grab_sleep(object, m, pindex, "pgrbwt", 4454 allocflags, true)) 4455 goto retrylookup; 4456 return (NULL); 4457 } 4458 goto out; 4459 } 4460 if ((allocflags & VM_ALLOC_NOCREAT) != 0) 4461 return (NULL); 4462 m = vm_page_alloc(object, pindex, vm_page_grab_pflags(allocflags)); 4463 if (m == NULL) { 4464 if ((allocflags & (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL)) != 0) 4465 return (NULL); 4466 goto retrylookup; 4467 } 4468 if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0) 4469 pmap_zero_page(m); 4470 4471 out: 4472 vm_page_grab_release(m, allocflags); 4473 4474 return (m); 4475 } 4476 4477 /* 4478 * Locklessly attempt to acquire a page given a (object, pindex) tuple 4479 * and an optional previous page to avoid the radix lookup. The resulting 4480 * page will be validated against the identity tuple and busied or wired 4481 * as requested. A NULL *mp return guarantees that the page was not in 4482 * radix at the time of the call but callers must perform higher level 4483 * synchronization or retry the operation under a lock if they require 4484 * an atomic answer. This is the only lock free validation routine, 4485 * other routines can depend on the resulting page state. 4486 * 4487 * The return value indicates whether the operation failed due to caller 4488 * flags. The return is tri-state with mp: 4489 * 4490 * (true, *mp != NULL) - The operation was successful. 4491 * (true, *mp == NULL) - The page was not found in tree. 4492 * (false, *mp == NULL) - WAITFAIL or NOWAIT prevented acquisition. 4493 */ 4494 static bool 4495 vm_page_acquire_unlocked(vm_object_t object, vm_pindex_t pindex, 4496 vm_page_t prev, vm_page_t *mp, int allocflags) 4497 { 4498 vm_page_t m; 4499 4500 vm_page_grab_check(allocflags); 4501 MPASS(prev == NULL || vm_page_busied(prev) || vm_page_wired(prev)); 4502 4503 *mp = NULL; 4504 for (;;) { 4505 /* 4506 * We may see a false NULL here because the previous page 4507 * has been removed or just inserted and the list is loaded 4508 * without barriers. Switch to radix to verify. 4509 */ 4510 if (prev == NULL || (m = TAILQ_NEXT(prev, listq)) == NULL || 4511 QMD_IS_TRASHED(m) || m->pindex != pindex || 4512 atomic_load_ptr(&m->object) != object) { 4513 prev = NULL; 4514 /* 4515 * This guarantees the result is instantaneously 4516 * correct. 4517 */ 4518 m = vm_radix_lookup_unlocked(&object->rtree, pindex); 4519 } 4520 if (m == NULL) 4521 return (true); 4522 if (vm_page_trybusy(m, allocflags)) { 4523 if (m->object == object && m->pindex == pindex) 4524 break; 4525 /* relookup. */ 4526 vm_page_busy_release(m); 4527 cpu_spinwait(); 4528 continue; 4529 } 4530 if (!vm_page_grab_sleep(object, m, pindex, "pgnslp", 4531 allocflags, false)) 4532 return (false); 4533 } 4534 if ((allocflags & VM_ALLOC_WIRED) != 0) 4535 vm_page_wire(m); 4536 vm_page_grab_release(m, allocflags); 4537 *mp = m; 4538 return (true); 4539 } 4540 4541 /* 4542 * Try to locklessly grab a page and fall back to the object lock if NOCREAT 4543 * is not set. 4544 */ 4545 vm_page_t 4546 vm_page_grab_unlocked(vm_object_t object, vm_pindex_t pindex, int allocflags) 4547 { 4548 vm_page_t m; 4549 4550 vm_page_grab_check(allocflags); 4551 4552 if (!vm_page_acquire_unlocked(object, pindex, NULL, &m, allocflags)) 4553 return (NULL); 4554 if (m != NULL) 4555 return (m); 4556 4557 /* 4558 * The radix lockless lookup should never return a false negative 4559 * errors. If the user specifies NOCREAT they are guaranteed there 4560 * was no page present at the instant of the call. A NOCREAT caller 4561 * must handle create races gracefully. 4562 */ 4563 if ((allocflags & VM_ALLOC_NOCREAT) != 0) 4564 return (NULL); 4565 4566 VM_OBJECT_WLOCK(object); 4567 m = vm_page_grab(object, pindex, allocflags); 4568 VM_OBJECT_WUNLOCK(object); 4569 4570 return (m); 4571 } 4572 4573 /* 4574 * Grab a page and make it valid, paging in if necessary. Pages missing from 4575 * their pager are zero filled and validated. If a VM_ALLOC_COUNT is supplied 4576 * and the page is not valid as many as VM_INITIAL_PAGEIN pages can be brought 4577 * in simultaneously. Additional pages will be left on a paging queue but 4578 * will neither be wired nor busy regardless of allocflags. 4579 */ 4580 int 4581 vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, int allocflags) 4582 { 4583 vm_page_t m; 4584 vm_page_t ma[VM_INITIAL_PAGEIN]; 4585 int after, i, pflags, rv; 4586 4587 KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || 4588 (allocflags & VM_ALLOC_IGN_SBUSY) != 0, 4589 ("vm_page_grab_valid: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch")); 4590 KASSERT((allocflags & 4591 (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL | VM_ALLOC_ZERO)) == 0, 4592 ("vm_page_grab_valid: Invalid flags 0x%X", allocflags)); 4593 VM_OBJECT_ASSERT_WLOCKED(object); 4594 pflags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY | 4595 VM_ALLOC_WIRED); 4596 pflags |= VM_ALLOC_WAITFAIL; 4597 4598 retrylookup: 4599 if ((m = vm_page_lookup(object, pindex)) != NULL) { 4600 /* 4601 * If the page is fully valid it can only become invalid 4602 * with the object lock held. If it is not valid it can 4603 * become valid with the busy lock held. Therefore, we 4604 * may unnecessarily lock the exclusive busy here if we 4605 * race with I/O completion not using the object lock. 4606 * However, we will not end up with an invalid page and a 4607 * shared lock. 4608 */ 4609 if (!vm_page_trybusy(m, 4610 vm_page_all_valid(m) ? allocflags : 0)) { 4611 (void)vm_page_grab_sleep(object, m, pindex, "pgrbwt", 4612 allocflags, true); 4613 goto retrylookup; 4614 } 4615 if (vm_page_all_valid(m)) 4616 goto out; 4617 if ((allocflags & VM_ALLOC_NOCREAT) != 0) { 4618 vm_page_busy_release(m); 4619 *mp = NULL; 4620 return (VM_PAGER_FAIL); 4621 } 4622 } else if ((allocflags & VM_ALLOC_NOCREAT) != 0) { 4623 *mp = NULL; 4624 return (VM_PAGER_FAIL); 4625 } else if ((m = vm_page_alloc(object, pindex, pflags)) == NULL) { 4626 goto retrylookup; 4627 } 4628 4629 vm_page_assert_xbusied(m); 4630 if (vm_pager_has_page(object, pindex, NULL, &after)) { 4631 after = MIN(after, VM_INITIAL_PAGEIN); 4632 after = MIN(after, allocflags >> VM_ALLOC_COUNT_SHIFT); 4633 after = MAX(after, 1); 4634 ma[0] = m; 4635 for (i = 1; i < after; i++) { 4636 if ((ma[i] = vm_page_next(ma[i - 1])) != NULL) { 4637 if (ma[i]->valid || !vm_page_tryxbusy(ma[i])) 4638 break; 4639 } else { 4640 ma[i] = vm_page_alloc(object, m->pindex + i, 4641 VM_ALLOC_NORMAL); 4642 if (ma[i] == NULL) 4643 break; 4644 } 4645 } 4646 after = i; 4647 vm_object_pip_add(object, after); 4648 VM_OBJECT_WUNLOCK(object); 4649 rv = vm_pager_get_pages(object, ma, after, NULL, NULL); 4650 VM_OBJECT_WLOCK(object); 4651 vm_object_pip_wakeupn(object, after); 4652 /* Pager may have replaced a page. */ 4653 m = ma[0]; 4654 if (rv != VM_PAGER_OK) { 4655 for (i = 0; i < after; i++) { 4656 if (!vm_page_wired(ma[i])) 4657 vm_page_free(ma[i]); 4658 else 4659 vm_page_xunbusy(ma[i]); 4660 } 4661 *mp = NULL; 4662 return (rv); 4663 } 4664 for (i = 1; i < after; i++) 4665 vm_page_readahead_finish(ma[i]); 4666 MPASS(vm_page_all_valid(m)); 4667 } else { 4668 vm_page_zero_invalid(m, TRUE); 4669 } 4670 out: 4671 if ((allocflags & VM_ALLOC_WIRED) != 0) 4672 vm_page_wire(m); 4673 if ((allocflags & VM_ALLOC_SBUSY) != 0 && vm_page_xbusied(m)) 4674 vm_page_busy_downgrade(m); 4675 else if ((allocflags & VM_ALLOC_NOBUSY) != 0) 4676 vm_page_busy_release(m); 4677 *mp = m; 4678 return (VM_PAGER_OK); 4679 } 4680 4681 /* 4682 * Locklessly grab a valid page. If the page is not valid or not yet 4683 * allocated this will fall back to the object lock method. 4684 */ 4685 int 4686 vm_page_grab_valid_unlocked(vm_page_t *mp, vm_object_t object, 4687 vm_pindex_t pindex, int allocflags) 4688 { 4689 vm_page_t m; 4690 int flags; 4691 int error; 4692 4693 KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || 4694 (allocflags & VM_ALLOC_IGN_SBUSY) != 0, 4695 ("vm_page_grab_valid_unlocked: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY " 4696 "mismatch")); 4697 KASSERT((allocflags & 4698 (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL | VM_ALLOC_ZERO)) == 0, 4699 ("vm_page_grab_valid_unlocked: Invalid flags 0x%X", allocflags)); 4700 4701 /* 4702 * Attempt a lockless lookup and busy. We need at least an sbusy 4703 * before we can inspect the valid field and return a wired page. 4704 */ 4705 flags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_WIRED); 4706 if (!vm_page_acquire_unlocked(object, pindex, NULL, mp, flags)) 4707 return (VM_PAGER_FAIL); 4708 if ((m = *mp) != NULL) { 4709 if (vm_page_all_valid(m)) { 4710 if ((allocflags & VM_ALLOC_WIRED) != 0) 4711 vm_page_wire(m); 4712 vm_page_grab_release(m, allocflags); 4713 return (VM_PAGER_OK); 4714 } 4715 vm_page_busy_release(m); 4716 } 4717 if ((allocflags & VM_ALLOC_NOCREAT) != 0) { 4718 *mp = NULL; 4719 return (VM_PAGER_FAIL); 4720 } 4721 VM_OBJECT_WLOCK(object); 4722 error = vm_page_grab_valid(mp, object, pindex, allocflags); 4723 VM_OBJECT_WUNLOCK(object); 4724 4725 return (error); 4726 } 4727 4728 /* 4729 * Return the specified range of pages from the given object. For each 4730 * page offset within the range, if a page already exists within the object 4731 * at that offset and it is busy, then wait for it to change state. If, 4732 * instead, the page doesn't exist, then allocate it. 4733 * 4734 * The caller must always specify an allocation class. 4735 * 4736 * allocation classes: 4737 * VM_ALLOC_NORMAL normal process request 4738 * VM_ALLOC_SYSTEM system *really* needs the pages 4739 * 4740 * The caller must always specify that the pages are to be busied and/or 4741 * wired. 4742 * 4743 * optional allocation flags: 4744 * VM_ALLOC_IGN_SBUSY do not sleep on soft busy pages 4745 * VM_ALLOC_NOBUSY do not exclusive busy the page 4746 * VM_ALLOC_NOWAIT do not sleep 4747 * VM_ALLOC_SBUSY set page to sbusy state 4748 * VM_ALLOC_WIRED wire the pages 4749 * VM_ALLOC_ZERO zero and validate any invalid pages 4750 * 4751 * If VM_ALLOC_NOWAIT is not specified, this routine may sleep. Otherwise, it 4752 * may return a partial prefix of the requested range. 4753 */ 4754 int 4755 vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags, 4756 vm_page_t *ma, int count) 4757 { 4758 vm_page_t m, mpred; 4759 int pflags; 4760 int i; 4761 4762 VM_OBJECT_ASSERT_WLOCKED(object); 4763 KASSERT(((u_int)allocflags >> VM_ALLOC_COUNT_SHIFT) == 0, 4764 ("vm_page_grap_pages: VM_ALLOC_COUNT() is not allowed")); 4765 KASSERT(count > 0, 4766 ("vm_page_grab_pages: invalid page count %d", count)); 4767 vm_page_grab_check(allocflags); 4768 4769 pflags = vm_page_grab_pflags(allocflags); 4770 i = 0; 4771 retrylookup: 4772 m = vm_radix_lookup_le(&object->rtree, pindex + i); 4773 if (m == NULL || m->pindex != pindex + i) { 4774 mpred = m; 4775 m = NULL; 4776 } else 4777 mpred = TAILQ_PREV(m, pglist, listq); 4778 for (; i < count; i++) { 4779 if (m != NULL) { 4780 if (!vm_page_tryacquire(m, allocflags)) { 4781 if (vm_page_grab_sleep(object, m, pindex, 4782 "grbmaw", allocflags, true)) 4783 goto retrylookup; 4784 break; 4785 } 4786 } else { 4787 if ((allocflags & VM_ALLOC_NOCREAT) != 0) 4788 break; 4789 m = vm_page_alloc_after(object, pindex + i, 4790 pflags | VM_ALLOC_COUNT(count - i), mpred); 4791 if (m == NULL) { 4792 if ((allocflags & (VM_ALLOC_NOWAIT | 4793 VM_ALLOC_WAITFAIL)) != 0) 4794 break; 4795 goto retrylookup; 4796 } 4797 } 4798 if (vm_page_none_valid(m) && 4799 (allocflags & VM_ALLOC_ZERO) != 0) { 4800 if ((m->flags & PG_ZERO) == 0) 4801 pmap_zero_page(m); 4802 vm_page_valid(m); 4803 } 4804 vm_page_grab_release(m, allocflags); 4805 ma[i] = mpred = m; 4806 m = vm_page_next(m); 4807 } 4808 return (i); 4809 } 4810 4811 /* 4812 * Unlocked variant of vm_page_grab_pages(). This accepts the same flags 4813 * and will fall back to the locked variant to handle allocation. 4814 */ 4815 int 4816 vm_page_grab_pages_unlocked(vm_object_t object, vm_pindex_t pindex, 4817 int allocflags, vm_page_t *ma, int count) 4818 { 4819 vm_page_t m, pred; 4820 int flags; 4821 int i; 4822 4823 KASSERT(count > 0, 4824 ("vm_page_grab_pages_unlocked: invalid page count %d", count)); 4825 vm_page_grab_check(allocflags); 4826 4827 /* 4828 * Modify flags for lockless acquire to hold the page until we 4829 * set it valid if necessary. 4830 */ 4831 flags = allocflags & ~VM_ALLOC_NOBUSY; 4832 pred = NULL; 4833 for (i = 0; i < count; i++, pindex++) { 4834 if (!vm_page_acquire_unlocked(object, pindex, pred, &m, flags)) 4835 return (i); 4836 if (m == NULL) 4837 break; 4838 if ((flags & VM_ALLOC_ZERO) != 0 && vm_page_none_valid(m)) { 4839 if ((m->flags & PG_ZERO) == 0) 4840 pmap_zero_page(m); 4841 vm_page_valid(m); 4842 } 4843 /* m will still be wired or busy according to flags. */ 4844 vm_page_grab_release(m, allocflags); 4845 pred = ma[i] = m; 4846 } 4847 if (i == count || (allocflags & VM_ALLOC_NOCREAT) != 0) 4848 return (i); 4849 count -= i; 4850 VM_OBJECT_WLOCK(object); 4851 i += vm_page_grab_pages(object, pindex, allocflags, &ma[i], count); 4852 VM_OBJECT_WUNLOCK(object); 4853 4854 return (i); 4855 } 4856 4857 /* 4858 * Mapping function for valid or dirty bits in a page. 4859 * 4860 * Inputs are required to range within a page. 4861 */ 4862 vm_page_bits_t 4863 vm_page_bits(int base, int size) 4864 { 4865 int first_bit; 4866 int last_bit; 4867 4868 KASSERT( 4869 base + size <= PAGE_SIZE, 4870 ("vm_page_bits: illegal base/size %d/%d", base, size) 4871 ); 4872 4873 if (size == 0) /* handle degenerate case */ 4874 return (0); 4875 4876 first_bit = base >> DEV_BSHIFT; 4877 last_bit = (base + size - 1) >> DEV_BSHIFT; 4878 4879 return (((vm_page_bits_t)2 << last_bit) - 4880 ((vm_page_bits_t)1 << first_bit)); 4881 } 4882 4883 void 4884 vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set) 4885 { 4886 4887 #if PAGE_SIZE == 32768 4888 atomic_set_64((uint64_t *)bits, set); 4889 #elif PAGE_SIZE == 16384 4890 atomic_set_32((uint32_t *)bits, set); 4891 #elif (PAGE_SIZE == 8192) && defined(atomic_set_16) 4892 atomic_set_16((uint16_t *)bits, set); 4893 #elif (PAGE_SIZE == 4096) && defined(atomic_set_8) 4894 atomic_set_8((uint8_t *)bits, set); 4895 #else /* PAGE_SIZE <= 8192 */ 4896 uintptr_t addr; 4897 int shift; 4898 4899 addr = (uintptr_t)bits; 4900 /* 4901 * Use a trick to perform a 32-bit atomic on the 4902 * containing aligned word, to not depend on the existence 4903 * of atomic_{set, clear}_{8, 16}. 4904 */ 4905 shift = addr & (sizeof(uint32_t) - 1); 4906 #if BYTE_ORDER == BIG_ENDIAN 4907 shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY; 4908 #else 4909 shift *= NBBY; 4910 #endif 4911 addr &= ~(sizeof(uint32_t) - 1); 4912 atomic_set_32((uint32_t *)addr, set << shift); 4913 #endif /* PAGE_SIZE */ 4914 } 4915 4916 static inline void 4917 vm_page_bits_clear(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t clear) 4918 { 4919 4920 #if PAGE_SIZE == 32768 4921 atomic_clear_64((uint64_t *)bits, clear); 4922 #elif PAGE_SIZE == 16384 4923 atomic_clear_32((uint32_t *)bits, clear); 4924 #elif (PAGE_SIZE == 8192) && defined(atomic_clear_16) 4925 atomic_clear_16((uint16_t *)bits, clear); 4926 #elif (PAGE_SIZE == 4096) && defined(atomic_clear_8) 4927 atomic_clear_8((uint8_t *)bits, clear); 4928 #else /* PAGE_SIZE <= 8192 */ 4929 uintptr_t addr; 4930 int shift; 4931 4932 addr = (uintptr_t)bits; 4933 /* 4934 * Use a trick to perform a 32-bit atomic on the 4935 * containing aligned word, to not depend on the existence 4936 * of atomic_{set, clear}_{8, 16}. 4937 */ 4938 shift = addr & (sizeof(uint32_t) - 1); 4939 #if BYTE_ORDER == BIG_ENDIAN 4940 shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY; 4941 #else 4942 shift *= NBBY; 4943 #endif 4944 addr &= ~(sizeof(uint32_t) - 1); 4945 atomic_clear_32((uint32_t *)addr, clear << shift); 4946 #endif /* PAGE_SIZE */ 4947 } 4948 4949 static inline vm_page_bits_t 4950 vm_page_bits_swap(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t newbits) 4951 { 4952 #if PAGE_SIZE == 32768 4953 uint64_t old; 4954 4955 old = *bits; 4956 while (atomic_fcmpset_64(bits, &old, newbits) == 0); 4957 return (old); 4958 #elif PAGE_SIZE == 16384 4959 uint32_t old; 4960 4961 old = *bits; 4962 while (atomic_fcmpset_32(bits, &old, newbits) == 0); 4963 return (old); 4964 #elif (PAGE_SIZE == 8192) && defined(atomic_fcmpset_16) 4965 uint16_t old; 4966 4967 old = *bits; 4968 while (atomic_fcmpset_16(bits, &old, newbits) == 0); 4969 return (old); 4970 #elif (PAGE_SIZE == 4096) && defined(atomic_fcmpset_8) 4971 uint8_t old; 4972 4973 old = *bits; 4974 while (atomic_fcmpset_8(bits, &old, newbits) == 0); 4975 return (old); 4976 #else /* PAGE_SIZE <= 4096*/ 4977 uintptr_t addr; 4978 uint32_t old, new, mask; 4979 int shift; 4980 4981 addr = (uintptr_t)bits; 4982 /* 4983 * Use a trick to perform a 32-bit atomic on the 4984 * containing aligned word, to not depend on the existence 4985 * of atomic_{set, swap, clear}_{8, 16}. 4986 */ 4987 shift = addr & (sizeof(uint32_t) - 1); 4988 #if BYTE_ORDER == BIG_ENDIAN 4989 shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY; 4990 #else 4991 shift *= NBBY; 4992 #endif 4993 addr &= ~(sizeof(uint32_t) - 1); 4994 mask = VM_PAGE_BITS_ALL << shift; 4995 4996 old = *bits; 4997 do { 4998 new = old & ~mask; 4999 new |= newbits << shift; 5000 } while (atomic_fcmpset_32((uint32_t *)addr, &old, new) == 0); 5001 return (old >> shift); 5002 #endif /* PAGE_SIZE */ 5003 } 5004 5005 /* 5006 * vm_page_set_valid_range: 5007 * 5008 * Sets portions of a page valid. The arguments are expected 5009 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 5010 * of any partial chunks touched by the range. The invalid portion of 5011 * such chunks will be zeroed. 5012 * 5013 * (base + size) must be less then or equal to PAGE_SIZE. 5014 */ 5015 void 5016 vm_page_set_valid_range(vm_page_t m, int base, int size) 5017 { 5018 int endoff, frag; 5019 vm_page_bits_t pagebits; 5020 5021 vm_page_assert_busied(m); 5022 if (size == 0) /* handle degenerate case */ 5023 return; 5024 5025 /* 5026 * If the base is not DEV_BSIZE aligned and the valid 5027 * bit is clear, we have to zero out a portion of the 5028 * first block. 5029 */ 5030 if ((frag = rounddown2(base, DEV_BSIZE)) != base && 5031 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0) 5032 pmap_zero_page_area(m, frag, base - frag); 5033 5034 /* 5035 * If the ending offset is not DEV_BSIZE aligned and the 5036 * valid bit is clear, we have to zero out a portion of 5037 * the last block. 5038 */ 5039 endoff = base + size; 5040 if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff && 5041 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0) 5042 pmap_zero_page_area(m, endoff, 5043 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 5044 5045 /* 5046 * Assert that no previously invalid block that is now being validated 5047 * is already dirty. 5048 */ 5049 KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0, 5050 ("vm_page_set_valid_range: page %p is dirty", m)); 5051 5052 /* 5053 * Set valid bits inclusive of any overlap. 5054 */ 5055 pagebits = vm_page_bits(base, size); 5056 if (vm_page_xbusied(m)) 5057 m->valid |= pagebits; 5058 else 5059 vm_page_bits_set(m, &m->valid, pagebits); 5060 } 5061 5062 /* 5063 * Set the page dirty bits and free the invalid swap space if 5064 * present. Returns the previous dirty bits. 5065 */ 5066 vm_page_bits_t 5067 vm_page_set_dirty(vm_page_t m) 5068 { 5069 vm_page_bits_t old; 5070 5071 VM_PAGE_OBJECT_BUSY_ASSERT(m); 5072 5073 if (vm_page_xbusied(m) && !pmap_page_is_write_mapped(m)) { 5074 old = m->dirty; 5075 m->dirty = VM_PAGE_BITS_ALL; 5076 } else 5077 old = vm_page_bits_swap(m, &m->dirty, VM_PAGE_BITS_ALL); 5078 if (old == 0 && (m->a.flags & PGA_SWAP_SPACE) != 0) 5079 vm_pager_page_unswapped(m); 5080 5081 return (old); 5082 } 5083 5084 /* 5085 * Clear the given bits from the specified page's dirty field. 5086 */ 5087 static __inline void 5088 vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits) 5089 { 5090 5091 vm_page_assert_busied(m); 5092 5093 /* 5094 * If the page is xbusied and not write mapped we are the 5095 * only thread that can modify dirty bits. Otherwise, The pmap 5096 * layer can call vm_page_dirty() without holding a distinguished 5097 * lock. The combination of page busy and atomic operations 5098 * suffice to guarantee consistency of the page dirty field. 5099 */ 5100 if (vm_page_xbusied(m) && !pmap_page_is_write_mapped(m)) 5101 m->dirty &= ~pagebits; 5102 else 5103 vm_page_bits_clear(m, &m->dirty, pagebits); 5104 } 5105 5106 /* 5107 * vm_page_set_validclean: 5108 * 5109 * Sets portions of a page valid and clean. The arguments are expected 5110 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 5111 * of any partial chunks touched by the range. The invalid portion of 5112 * such chunks will be zero'd. 5113 * 5114 * (base + size) must be less then or equal to PAGE_SIZE. 5115 */ 5116 void 5117 vm_page_set_validclean(vm_page_t m, int base, int size) 5118 { 5119 vm_page_bits_t oldvalid, pagebits; 5120 int endoff, frag; 5121 5122 vm_page_assert_busied(m); 5123 if (size == 0) /* handle degenerate case */ 5124 return; 5125 5126 /* 5127 * If the base is not DEV_BSIZE aligned and the valid 5128 * bit is clear, we have to zero out a portion of the 5129 * first block. 5130 */ 5131 if ((frag = rounddown2(base, DEV_BSIZE)) != base && 5132 (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0) 5133 pmap_zero_page_area(m, frag, base - frag); 5134 5135 /* 5136 * If the ending offset is not DEV_BSIZE aligned and the 5137 * valid bit is clear, we have to zero out a portion of 5138 * the last block. 5139 */ 5140 endoff = base + size; 5141 if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff && 5142 (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0) 5143 pmap_zero_page_area(m, endoff, 5144 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 5145 5146 /* 5147 * Set valid, clear dirty bits. If validating the entire 5148 * page we can safely clear the pmap modify bit. We also 5149 * use this opportunity to clear the PGA_NOSYNC flag. If a process 5150 * takes a write fault on a MAP_NOSYNC memory area the flag will 5151 * be set again. 5152 * 5153 * We set valid bits inclusive of any overlap, but we can only 5154 * clear dirty bits for DEV_BSIZE chunks that are fully within 5155 * the range. 5156 */ 5157 oldvalid = m->valid; 5158 pagebits = vm_page_bits(base, size); 5159 if (vm_page_xbusied(m)) 5160 m->valid |= pagebits; 5161 else 5162 vm_page_bits_set(m, &m->valid, pagebits); 5163 #if 0 /* NOT YET */ 5164 if ((frag = base & (DEV_BSIZE - 1)) != 0) { 5165 frag = DEV_BSIZE - frag; 5166 base += frag; 5167 size -= frag; 5168 if (size < 0) 5169 size = 0; 5170 } 5171 pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1)); 5172 #endif 5173 if (base == 0 && size == PAGE_SIZE) { 5174 /* 5175 * The page can only be modified within the pmap if it is 5176 * mapped, and it can only be mapped if it was previously 5177 * fully valid. 5178 */ 5179 if (oldvalid == VM_PAGE_BITS_ALL) 5180 /* 5181 * Perform the pmap_clear_modify() first. Otherwise, 5182 * a concurrent pmap operation, such as 5183 * pmap_protect(), could clear a modification in the 5184 * pmap and set the dirty field on the page before 5185 * pmap_clear_modify() had begun and after the dirty 5186 * field was cleared here. 5187 */ 5188 pmap_clear_modify(m); 5189 m->dirty = 0; 5190 vm_page_aflag_clear(m, PGA_NOSYNC); 5191 } else if (oldvalid != VM_PAGE_BITS_ALL && vm_page_xbusied(m)) 5192 m->dirty &= ~pagebits; 5193 else 5194 vm_page_clear_dirty_mask(m, pagebits); 5195 } 5196 5197 void 5198 vm_page_clear_dirty(vm_page_t m, int base, int size) 5199 { 5200 5201 vm_page_clear_dirty_mask(m, vm_page_bits(base, size)); 5202 } 5203 5204 /* 5205 * vm_page_set_invalid: 5206 * 5207 * Invalidates DEV_BSIZE'd chunks within a page. Both the 5208 * valid and dirty bits for the effected areas are cleared. 5209 */ 5210 void 5211 vm_page_set_invalid(vm_page_t m, int base, int size) 5212 { 5213 vm_page_bits_t bits; 5214 vm_object_t object; 5215 5216 /* 5217 * The object lock is required so that pages can't be mapped 5218 * read-only while we're in the process of invalidating them. 5219 */ 5220 object = m->object; 5221 VM_OBJECT_ASSERT_WLOCKED(object); 5222 vm_page_assert_busied(m); 5223 5224 if (object->type == OBJT_VNODE && base == 0 && IDX_TO_OFF(m->pindex) + 5225 size >= object->un_pager.vnp.vnp_size) 5226 bits = VM_PAGE_BITS_ALL; 5227 else 5228 bits = vm_page_bits(base, size); 5229 if (object->ref_count != 0 && vm_page_all_valid(m) && bits != 0) 5230 pmap_remove_all(m); 5231 KASSERT((bits == 0 && vm_page_all_valid(m)) || 5232 !pmap_page_is_mapped(m), 5233 ("vm_page_set_invalid: page %p is mapped", m)); 5234 if (vm_page_xbusied(m)) { 5235 m->valid &= ~bits; 5236 m->dirty &= ~bits; 5237 } else { 5238 vm_page_bits_clear(m, &m->valid, bits); 5239 vm_page_bits_clear(m, &m->dirty, bits); 5240 } 5241 } 5242 5243 /* 5244 * vm_page_invalid: 5245 * 5246 * Invalidates the entire page. The page must be busy, unmapped, and 5247 * the enclosing object must be locked. The object locks protects 5248 * against concurrent read-only pmap enter which is done without 5249 * busy. 5250 */ 5251 void 5252 vm_page_invalid(vm_page_t m) 5253 { 5254 5255 vm_page_assert_busied(m); 5256 VM_OBJECT_ASSERT_LOCKED(m->object); 5257 MPASS(!pmap_page_is_mapped(m)); 5258 5259 if (vm_page_xbusied(m)) 5260 m->valid = 0; 5261 else 5262 vm_page_bits_clear(m, &m->valid, VM_PAGE_BITS_ALL); 5263 } 5264 5265 /* 5266 * vm_page_zero_invalid() 5267 * 5268 * The kernel assumes that the invalid portions of a page contain 5269 * garbage, but such pages can be mapped into memory by user code. 5270 * When this occurs, we must zero out the non-valid portions of the 5271 * page so user code sees what it expects. 5272 * 5273 * Pages are most often semi-valid when the end of a file is mapped 5274 * into memory and the file's size is not page aligned. 5275 */ 5276 void 5277 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) 5278 { 5279 int b; 5280 int i; 5281 5282 /* 5283 * Scan the valid bits looking for invalid sections that 5284 * must be zeroed. Invalid sub-DEV_BSIZE'd areas ( where the 5285 * valid bit may be set ) have already been zeroed by 5286 * vm_page_set_validclean(). 5287 */ 5288 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { 5289 if (i == (PAGE_SIZE / DEV_BSIZE) || 5290 (m->valid & ((vm_page_bits_t)1 << i))) { 5291 if (i > b) { 5292 pmap_zero_page_area(m, 5293 b << DEV_BSHIFT, (i - b) << DEV_BSHIFT); 5294 } 5295 b = i + 1; 5296 } 5297 } 5298 5299 /* 5300 * setvalid is TRUE when we can safely set the zero'd areas 5301 * as being valid. We can do this if there are no cache consistancy 5302 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. 5303 */ 5304 if (setvalid) 5305 vm_page_valid(m); 5306 } 5307 5308 /* 5309 * vm_page_is_valid: 5310 * 5311 * Is (partial) page valid? Note that the case where size == 0 5312 * will return FALSE in the degenerate case where the page is 5313 * entirely invalid, and TRUE otherwise. 5314 * 5315 * Some callers envoke this routine without the busy lock held and 5316 * handle races via higher level locks. Typical callers should 5317 * hold a busy lock to prevent invalidation. 5318 */ 5319 int 5320 vm_page_is_valid(vm_page_t m, int base, int size) 5321 { 5322 vm_page_bits_t bits; 5323 5324 bits = vm_page_bits(base, size); 5325 return (m->valid != 0 && (m->valid & bits) == bits); 5326 } 5327 5328 /* 5329 * Returns true if all of the specified predicates are true for the entire 5330 * (super)page and false otherwise. 5331 */ 5332 bool 5333 vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m) 5334 { 5335 vm_object_t object; 5336 int i, npages; 5337 5338 object = m->object; 5339 if (skip_m != NULL && skip_m->object != object) 5340 return (false); 5341 VM_OBJECT_ASSERT_LOCKED(object); 5342 npages = atop(pagesizes[m->psind]); 5343 5344 /* 5345 * The physically contiguous pages that make up a superpage, i.e., a 5346 * page with a page size index ("psind") greater than zero, will 5347 * occupy adjacent entries in vm_page_array[]. 5348 */ 5349 for (i = 0; i < npages; i++) { 5350 /* Always test object consistency, including "skip_m". */ 5351 if (m[i].object != object) 5352 return (false); 5353 if (&m[i] == skip_m) 5354 continue; 5355 if ((flags & PS_NONE_BUSY) != 0 && vm_page_busied(&m[i])) 5356 return (false); 5357 if ((flags & PS_ALL_DIRTY) != 0) { 5358 /* 5359 * Calling vm_page_test_dirty() or pmap_is_modified() 5360 * might stop this case from spuriously returning 5361 * "false". However, that would require a write lock 5362 * on the object containing "m[i]". 5363 */ 5364 if (m[i].dirty != VM_PAGE_BITS_ALL) 5365 return (false); 5366 } 5367 if ((flags & PS_ALL_VALID) != 0 && 5368 m[i].valid != VM_PAGE_BITS_ALL) 5369 return (false); 5370 } 5371 return (true); 5372 } 5373 5374 /* 5375 * Set the page's dirty bits if the page is modified. 5376 */ 5377 void 5378 vm_page_test_dirty(vm_page_t m) 5379 { 5380 5381 vm_page_assert_busied(m); 5382 if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m)) 5383 vm_page_dirty(m); 5384 } 5385 5386 void 5387 vm_page_valid(vm_page_t m) 5388 { 5389 5390 vm_page_assert_busied(m); 5391 if (vm_page_xbusied(m)) 5392 m->valid = VM_PAGE_BITS_ALL; 5393 else 5394 vm_page_bits_set(m, &m->valid, VM_PAGE_BITS_ALL); 5395 } 5396 5397 void 5398 vm_page_lock_KBI(vm_page_t m, const char *file, int line) 5399 { 5400 5401 mtx_lock_flags_(vm_page_lockptr(m), 0, file, line); 5402 } 5403 5404 void 5405 vm_page_unlock_KBI(vm_page_t m, const char *file, int line) 5406 { 5407 5408 mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line); 5409 } 5410 5411 int 5412 vm_page_trylock_KBI(vm_page_t m, const char *file, int line) 5413 { 5414 5415 return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line)); 5416 } 5417 5418 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 5419 void 5420 vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line) 5421 { 5422 5423 vm_page_lock_assert_KBI(m, MA_OWNED, file, line); 5424 } 5425 5426 void 5427 vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line) 5428 { 5429 5430 mtx_assert_(vm_page_lockptr(m), a, file, line); 5431 } 5432 #endif 5433 5434 #ifdef INVARIANTS 5435 void 5436 vm_page_object_busy_assert(vm_page_t m) 5437 { 5438 5439 /* 5440 * Certain of the page's fields may only be modified by the 5441 * holder of a page or object busy. 5442 */ 5443 if (m->object != NULL && !vm_page_busied(m)) 5444 VM_OBJECT_ASSERT_BUSY(m->object); 5445 } 5446 5447 void 5448 vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits) 5449 { 5450 5451 if ((bits & PGA_WRITEABLE) == 0) 5452 return; 5453 5454 /* 5455 * The PGA_WRITEABLE flag can only be set if the page is 5456 * managed, is exclusively busied or the object is locked. 5457 * Currently, this flag is only set by pmap_enter(). 5458 */ 5459 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5460 ("PGA_WRITEABLE on unmanaged page")); 5461 if (!vm_page_xbusied(m)) 5462 VM_OBJECT_ASSERT_BUSY(m->object); 5463 } 5464 #endif 5465 5466 #include "opt_ddb.h" 5467 #ifdef DDB 5468 #include <sys/kernel.h> 5469 5470 #include <ddb/ddb.h> 5471 5472 DB_SHOW_COMMAND(page, vm_page_print_page_info) 5473 { 5474 5475 db_printf("vm_cnt.v_free_count: %d\n", vm_free_count()); 5476 db_printf("vm_cnt.v_inactive_count: %d\n", vm_inactive_count()); 5477 db_printf("vm_cnt.v_active_count: %d\n", vm_active_count()); 5478 db_printf("vm_cnt.v_laundry_count: %d\n", vm_laundry_count()); 5479 db_printf("vm_cnt.v_wire_count: %d\n", vm_wire_count()); 5480 db_printf("vm_cnt.v_free_reserved: %d\n", vm_cnt.v_free_reserved); 5481 db_printf("vm_cnt.v_free_min: %d\n", vm_cnt.v_free_min); 5482 db_printf("vm_cnt.v_free_target: %d\n", vm_cnt.v_free_target); 5483 db_printf("vm_cnt.v_inactive_target: %d\n", vm_cnt.v_inactive_target); 5484 } 5485 5486 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) 5487 { 5488 int dom; 5489 5490 db_printf("pq_free %d\n", vm_free_count()); 5491 for (dom = 0; dom < vm_ndomains; dom++) { 5492 db_printf( 5493 "dom %d page_cnt %d free %d pq_act %d pq_inact %d pq_laund %d pq_unsw %d\n", 5494 dom, 5495 vm_dom[dom].vmd_page_count, 5496 vm_dom[dom].vmd_free_count, 5497 vm_dom[dom].vmd_pagequeues[PQ_ACTIVE].pq_cnt, 5498 vm_dom[dom].vmd_pagequeues[PQ_INACTIVE].pq_cnt, 5499 vm_dom[dom].vmd_pagequeues[PQ_LAUNDRY].pq_cnt, 5500 vm_dom[dom].vmd_pagequeues[PQ_UNSWAPPABLE].pq_cnt); 5501 } 5502 } 5503 5504 DB_SHOW_COMMAND(pginfo, vm_page_print_pginfo) 5505 { 5506 vm_page_t m; 5507 boolean_t phys, virt; 5508 5509 if (!have_addr) { 5510 db_printf("show pginfo addr\n"); 5511 return; 5512 } 5513 5514 phys = strchr(modif, 'p') != NULL; 5515 virt = strchr(modif, 'v') != NULL; 5516 if (virt) 5517 m = PHYS_TO_VM_PAGE(pmap_kextract(addr)); 5518 else if (phys) 5519 m = PHYS_TO_VM_PAGE(addr); 5520 else 5521 m = (vm_page_t)addr; 5522 db_printf( 5523 "page %p obj %p pidx 0x%jx phys 0x%jx q %d ref 0x%x\n" 5524 " af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n", 5525 m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr, 5526 m->a.queue, m->ref_count, m->a.flags, m->oflags, 5527 m->flags, m->a.act_count, m->busy_lock, m->valid, m->dirty); 5528 } 5529 #endif /* DDB */ 5530