1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991 Regents of the University of California. 5 * All rights reserved. 6 * Copyright (c) 1998 Matthew Dillon. All Rights Reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * The Mach Operating System project at Carnegie-Mellon University. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 36 */ 37 38 /*- 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 */ 64 65 /* 66 * Resident memory management module. 67 */ 68 69 #include <sys/cdefs.h> 70 #include "opt_vm.h" 71 72 #include <sys/param.h> 73 #include <sys/systm.h> 74 #include <sys/counter.h> 75 #include <sys/domainset.h> 76 #include <sys/kernel.h> 77 #include <sys/limits.h> 78 #include <sys/linker.h> 79 #include <sys/lock.h> 80 #include <sys/malloc.h> 81 #include <sys/mman.h> 82 #include <sys/msgbuf.h> 83 #include <sys/mutex.h> 84 #include <sys/proc.h> 85 #include <sys/rwlock.h> 86 #include <sys/sleepqueue.h> 87 #include <sys/sbuf.h> 88 #include <sys/sched.h> 89 #include <sys/smp.h> 90 #include <sys/sysctl.h> 91 #include <sys/vmmeter.h> 92 #include <sys/vnode.h> 93 94 #include <vm/vm.h> 95 #include <vm/pmap.h> 96 #include <vm/vm_param.h> 97 #include <vm/vm_domainset.h> 98 #include <vm/vm_kern.h> 99 #include <vm/vm_map.h> 100 #include <vm/vm_object.h> 101 #include <vm/vm_page.h> 102 #include <vm/vm_pageout.h> 103 #include <vm/vm_phys.h> 104 #include <vm/vm_pagequeue.h> 105 #include <vm/vm_pager.h> 106 #include <vm/vm_radix.h> 107 #include <vm/vm_reserv.h> 108 #include <vm/vm_extern.h> 109 #include <vm/vm_dumpset.h> 110 #include <vm/uma.h> 111 #include <vm/uma_int.h> 112 113 #include <machine/md_var.h> 114 115 struct vm_domain vm_dom[MAXMEMDOM]; 116 117 DPCPU_DEFINE_STATIC(struct vm_batchqueue, pqbatch[MAXMEMDOM][PQ_COUNT]); 118 119 struct mtx_padalign __exclusive_cache_line pa_lock[PA_LOCK_COUNT]; 120 121 struct mtx_padalign __exclusive_cache_line vm_domainset_lock; 122 /* The following fields are protected by the domainset lock. */ 123 domainset_t __exclusive_cache_line vm_min_domains; 124 domainset_t __exclusive_cache_line vm_severe_domains; 125 static int vm_min_waiters; 126 static int vm_severe_waiters; 127 static int vm_pageproc_waiters; 128 129 static SYSCTL_NODE(_vm_stats, OID_AUTO, page, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 130 "VM page statistics"); 131 132 static COUNTER_U64_DEFINE_EARLY(pqstate_commit_retries); 133 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, pqstate_commit_retries, 134 CTLFLAG_RD, &pqstate_commit_retries, 135 "Number of failed per-page atomic queue state updates"); 136 137 static COUNTER_U64_DEFINE_EARLY(queue_ops); 138 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, queue_ops, 139 CTLFLAG_RD, &queue_ops, 140 "Number of batched queue operations"); 141 142 static COUNTER_U64_DEFINE_EARLY(queue_nops); 143 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, queue_nops, 144 CTLFLAG_RD, &queue_nops, 145 "Number of batched queue operations with no effects"); 146 147 /* 148 * bogus page -- for I/O to/from partially complete buffers, 149 * or for paging into sparsely invalid regions. 150 */ 151 vm_page_t bogus_page; 152 153 vm_page_t vm_page_array; 154 long vm_page_array_size; 155 long first_page; 156 157 struct bitset *vm_page_dump; 158 long vm_page_dump_pages; 159 160 static TAILQ_HEAD(, vm_page) blacklist_head; 161 static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS); 162 SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD | 163 CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages"); 164 165 static uma_zone_t fakepg_zone; 166 167 static void vm_page_alloc_check(vm_page_t m); 168 static bool _vm_page_busy_sleep(vm_object_t obj, vm_page_t m, 169 vm_pindex_t pindex, const char *wmesg, int allocflags, bool locked); 170 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits); 171 static void vm_page_enqueue(vm_page_t m, uint8_t queue); 172 static bool vm_page_free_prep(vm_page_t m); 173 static void vm_page_free_toq(vm_page_t m); 174 static void vm_page_init(void *dummy); 175 static int vm_page_insert_after(vm_page_t m, vm_object_t object, 176 vm_pindex_t pindex, vm_page_t mpred); 177 static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object, 178 vm_page_t mpred); 179 static void vm_page_mvqueue(vm_page_t m, const uint8_t queue, 180 const uint16_t nflag); 181 static int vm_page_reclaim_run(int req_class, int domain, u_long npages, 182 vm_page_t m_run, vm_paddr_t high); 183 static void vm_page_release_toq(vm_page_t m, uint8_t nqueue, bool noreuse); 184 static int vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, 185 int req); 186 static int vm_page_zone_import(void *arg, void **store, int cnt, int domain, 187 int flags); 188 static void vm_page_zone_release(void *arg, void **store, int cnt); 189 190 SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init, NULL); 191 192 static void 193 vm_page_init(void *dummy) 194 { 195 196 fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL, 197 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 198 bogus_page = vm_page_alloc_noobj(VM_ALLOC_WIRED); 199 } 200 201 /* 202 * The cache page zone is initialized later since we need to be able to allocate 203 * pages before UMA is fully initialized. 204 */ 205 static void 206 vm_page_init_cache_zones(void *dummy __unused) 207 { 208 struct vm_domain *vmd; 209 struct vm_pgcache *pgcache; 210 int cache, domain, maxcache, pool; 211 212 maxcache = 0; 213 TUNABLE_INT_FETCH("vm.pgcache_zone_max_pcpu", &maxcache); 214 maxcache *= mp_ncpus; 215 for (domain = 0; domain < vm_ndomains; domain++) { 216 vmd = VM_DOMAIN(domain); 217 for (pool = 0; pool < VM_NFREEPOOL; pool++) { 218 pgcache = &vmd->vmd_pgcache[pool]; 219 pgcache->domain = domain; 220 pgcache->pool = pool; 221 pgcache->zone = uma_zcache_create("vm pgcache", 222 PAGE_SIZE, NULL, NULL, NULL, NULL, 223 vm_page_zone_import, vm_page_zone_release, pgcache, 224 UMA_ZONE_VM); 225 226 /* 227 * Limit each pool's zone to 0.1% of the pages in the 228 * domain. 229 */ 230 cache = maxcache != 0 ? maxcache : 231 vmd->vmd_page_count / 1000; 232 uma_zone_set_maxcache(pgcache->zone, cache); 233 } 234 } 235 } 236 SYSINIT(vm_page2, SI_SUB_VM_CONF, SI_ORDER_ANY, vm_page_init_cache_zones, NULL); 237 238 /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */ 239 #if PAGE_SIZE == 32768 240 #ifdef CTASSERT 241 CTASSERT(sizeof(u_long) >= 8); 242 #endif 243 #endif 244 245 /* 246 * vm_set_page_size: 247 * 248 * Sets the page size, perhaps based upon the memory 249 * size. Must be called before any use of page-size 250 * dependent functions. 251 */ 252 void 253 vm_set_page_size(void) 254 { 255 if (vm_cnt.v_page_size == 0) 256 vm_cnt.v_page_size = PAGE_SIZE; 257 if (((vm_cnt.v_page_size - 1) & vm_cnt.v_page_size) != 0) 258 panic("vm_set_page_size: page size not a power of two"); 259 } 260 261 /* 262 * vm_page_blacklist_next: 263 * 264 * Find the next entry in the provided string of blacklist 265 * addresses. Entries are separated by space, comma, or newline. 266 * If an invalid integer is encountered then the rest of the 267 * string is skipped. Updates the list pointer to the next 268 * character, or NULL if the string is exhausted or invalid. 269 */ 270 static vm_paddr_t 271 vm_page_blacklist_next(char **list, char *end) 272 { 273 vm_paddr_t bad; 274 char *cp, *pos; 275 276 if (list == NULL || *list == NULL) 277 return (0); 278 if (**list =='\0') { 279 *list = NULL; 280 return (0); 281 } 282 283 /* 284 * If there's no end pointer then the buffer is coming from 285 * the kenv and we know it's null-terminated. 286 */ 287 if (end == NULL) 288 end = *list + strlen(*list); 289 290 /* Ensure that strtoq() won't walk off the end */ 291 if (*end != '\0') { 292 if (*end == '\n' || *end == ' ' || *end == ',') 293 *end = '\0'; 294 else { 295 printf("Blacklist not terminated, skipping\n"); 296 *list = NULL; 297 return (0); 298 } 299 } 300 301 for (pos = *list; *pos != '\0'; pos = cp) { 302 bad = strtoq(pos, &cp, 0); 303 if (*cp == '\0' || *cp == ' ' || *cp == ',' || *cp == '\n') { 304 if (bad == 0) { 305 if (++cp < end) 306 continue; 307 else 308 break; 309 } 310 } else 311 break; 312 if (*cp == '\0' || ++cp >= end) 313 *list = NULL; 314 else 315 *list = cp; 316 return (trunc_page(bad)); 317 } 318 printf("Garbage in RAM blacklist, skipping\n"); 319 *list = NULL; 320 return (0); 321 } 322 323 bool 324 vm_page_blacklist_add(vm_paddr_t pa, bool verbose) 325 { 326 struct vm_domain *vmd; 327 vm_page_t m; 328 bool found; 329 330 m = vm_phys_paddr_to_vm_page(pa); 331 if (m == NULL) 332 return (true); /* page does not exist, no failure */ 333 334 vmd = vm_pagequeue_domain(m); 335 vm_domain_free_lock(vmd); 336 found = vm_phys_unfree_page(m); 337 vm_domain_free_unlock(vmd); 338 if (found) { 339 vm_domain_freecnt_inc(vmd, -1); 340 TAILQ_INSERT_TAIL(&blacklist_head, m, listq); 341 if (verbose) 342 printf("Skipping page with pa 0x%jx\n", (uintmax_t)pa); 343 } 344 return (found); 345 } 346 347 /* 348 * vm_page_blacklist_check: 349 * 350 * Iterate through the provided string of blacklist addresses, pulling 351 * each entry out of the physical allocator free list and putting it 352 * onto a list for reporting via the vm.page_blacklist sysctl. 353 */ 354 static void 355 vm_page_blacklist_check(char *list, char *end) 356 { 357 vm_paddr_t pa; 358 char *next; 359 360 next = list; 361 while (next != NULL) { 362 if ((pa = vm_page_blacklist_next(&next, end)) == 0) 363 continue; 364 vm_page_blacklist_add(pa, bootverbose); 365 } 366 } 367 368 /* 369 * vm_page_blacklist_load: 370 * 371 * Search for a special module named "ram_blacklist". It'll be a 372 * plain text file provided by the user via the loader directive 373 * of the same name. 374 */ 375 static void 376 vm_page_blacklist_load(char **list, char **end) 377 { 378 void *mod; 379 u_char *ptr; 380 u_int len; 381 382 mod = NULL; 383 ptr = NULL; 384 385 mod = preload_search_by_type("ram_blacklist"); 386 if (mod != NULL) { 387 ptr = preload_fetch_addr(mod); 388 len = preload_fetch_size(mod); 389 } 390 *list = ptr; 391 if (ptr != NULL) 392 *end = ptr + len; 393 else 394 *end = NULL; 395 return; 396 } 397 398 static int 399 sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS) 400 { 401 vm_page_t m; 402 struct sbuf sbuf; 403 int error, first; 404 405 first = 1; 406 error = sysctl_wire_old_buffer(req, 0); 407 if (error != 0) 408 return (error); 409 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 410 TAILQ_FOREACH(m, &blacklist_head, listq) { 411 sbuf_printf(&sbuf, "%s%#jx", first ? "" : ",", 412 (uintmax_t)m->phys_addr); 413 first = 0; 414 } 415 error = sbuf_finish(&sbuf); 416 sbuf_delete(&sbuf); 417 return (error); 418 } 419 420 /* 421 * Initialize a dummy page for use in scans of the specified paging queue. 422 * In principle, this function only needs to set the flag PG_MARKER. 423 * Nonetheless, it write busies the page as a safety precaution. 424 */ 425 void 426 vm_page_init_marker(vm_page_t marker, int queue, uint16_t aflags) 427 { 428 429 bzero(marker, sizeof(*marker)); 430 marker->flags = PG_MARKER; 431 marker->a.flags = aflags; 432 marker->busy_lock = VPB_CURTHREAD_EXCLUSIVE; 433 marker->a.queue = queue; 434 } 435 436 static void 437 vm_page_domain_init(int domain) 438 { 439 struct vm_domain *vmd; 440 struct vm_pagequeue *pq; 441 int i; 442 443 vmd = VM_DOMAIN(domain); 444 bzero(vmd, sizeof(*vmd)); 445 *__DECONST(const char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) = 446 "vm inactive pagequeue"; 447 *__DECONST(const char **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_name) = 448 "vm active pagequeue"; 449 *__DECONST(const char **, &vmd->vmd_pagequeues[PQ_LAUNDRY].pq_name) = 450 "vm laundry pagequeue"; 451 *__DECONST(const char **, 452 &vmd->vmd_pagequeues[PQ_UNSWAPPABLE].pq_name) = 453 "vm unswappable pagequeue"; 454 vmd->vmd_domain = domain; 455 vmd->vmd_page_count = 0; 456 vmd->vmd_free_count = 0; 457 vmd->vmd_segs = 0; 458 vmd->vmd_oom = FALSE; 459 for (i = 0; i < PQ_COUNT; i++) { 460 pq = &vmd->vmd_pagequeues[i]; 461 TAILQ_INIT(&pq->pq_pl); 462 mtx_init(&pq->pq_mutex, pq->pq_name, "vm pagequeue", 463 MTX_DEF | MTX_DUPOK); 464 pq->pq_pdpages = 0; 465 vm_page_init_marker(&vmd->vmd_markers[i], i, 0); 466 } 467 mtx_init(&vmd->vmd_free_mtx, "vm page free queue", NULL, MTX_DEF); 468 mtx_init(&vmd->vmd_pageout_mtx, "vm pageout lock", NULL, MTX_DEF); 469 snprintf(vmd->vmd_name, sizeof(vmd->vmd_name), "%d", domain); 470 471 /* 472 * inacthead is used to provide FIFO ordering for LRU-bypassing 473 * insertions. 474 */ 475 vm_page_init_marker(&vmd->vmd_inacthead, PQ_INACTIVE, PGA_ENQUEUED); 476 TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_INACTIVE].pq_pl, 477 &vmd->vmd_inacthead, plinks.q); 478 479 /* 480 * The clock pages are used to implement active queue scanning without 481 * requeues. Scans start at clock[0], which is advanced after the scan 482 * ends. When the two clock hands meet, they are reset and scanning 483 * resumes from the head of the queue. 484 */ 485 vm_page_init_marker(&vmd->vmd_clock[0], PQ_ACTIVE, PGA_ENQUEUED); 486 vm_page_init_marker(&vmd->vmd_clock[1], PQ_ACTIVE, PGA_ENQUEUED); 487 TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl, 488 &vmd->vmd_clock[0], plinks.q); 489 TAILQ_INSERT_TAIL(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl, 490 &vmd->vmd_clock[1], plinks.q); 491 } 492 493 /* 494 * Initialize a physical page in preparation for adding it to the free 495 * lists. 496 */ 497 void 498 vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind) 499 { 500 501 m->object = NULL; 502 m->ref_count = 0; 503 m->busy_lock = VPB_FREED; 504 m->flags = m->a.flags = 0; 505 m->phys_addr = pa; 506 m->a.queue = PQ_NONE; 507 m->psind = 0; 508 m->segind = segind; 509 m->order = VM_NFREEORDER; 510 m->pool = VM_FREEPOOL_DEFAULT; 511 m->valid = m->dirty = 0; 512 pmap_page_init(m); 513 } 514 515 #ifndef PMAP_HAS_PAGE_ARRAY 516 static vm_paddr_t 517 vm_page_array_alloc(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t page_range) 518 { 519 vm_paddr_t new_end; 520 521 /* 522 * Reserve an unmapped guard page to trap access to vm_page_array[-1]. 523 * However, because this page is allocated from KVM, out-of-bounds 524 * accesses using the direct map will not be trapped. 525 */ 526 *vaddr += PAGE_SIZE; 527 528 /* 529 * Allocate physical memory for the page structures, and map it. 530 */ 531 new_end = trunc_page(end - page_range * sizeof(struct vm_page)); 532 vm_page_array = (vm_page_t)pmap_map(vaddr, new_end, end, 533 VM_PROT_READ | VM_PROT_WRITE); 534 vm_page_array_size = page_range; 535 536 return (new_end); 537 } 538 #endif 539 540 /* 541 * vm_page_startup: 542 * 543 * Initializes the resident memory module. Allocates physical memory for 544 * bootstrapping UMA and some data structures that are used to manage 545 * physical pages. Initializes these structures, and populates the free 546 * page queues. 547 */ 548 vm_offset_t 549 vm_page_startup(vm_offset_t vaddr) 550 { 551 struct vm_phys_seg *seg; 552 struct vm_domain *vmd; 553 vm_page_t m; 554 char *list, *listend; 555 vm_paddr_t end, high_avail, low_avail, new_end, size; 556 vm_paddr_t page_range __unused; 557 vm_paddr_t last_pa, pa, startp, endp; 558 u_long pagecount; 559 #if MINIDUMP_PAGE_TRACKING 560 u_long vm_page_dump_size; 561 #endif 562 int biggestone, i, segind; 563 #ifdef WITNESS 564 vm_offset_t mapped; 565 int witness_size; 566 #endif 567 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE) 568 long ii; 569 #endif 570 571 vaddr = round_page(vaddr); 572 573 vm_phys_early_startup(); 574 biggestone = vm_phys_avail_largest(); 575 end = phys_avail[biggestone+1]; 576 577 /* 578 * Initialize the page and queue locks. 579 */ 580 mtx_init(&vm_domainset_lock, "vm domainset lock", NULL, MTX_DEF); 581 for (i = 0; i < PA_LOCK_COUNT; i++) 582 mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF); 583 for (i = 0; i < vm_ndomains; i++) 584 vm_page_domain_init(i); 585 586 new_end = end; 587 #ifdef WITNESS 588 witness_size = round_page(witness_startup_count()); 589 new_end -= witness_size; 590 mapped = pmap_map(&vaddr, new_end, new_end + witness_size, 591 VM_PROT_READ | VM_PROT_WRITE); 592 bzero((void *)mapped, witness_size); 593 witness_startup((void *)mapped); 594 #endif 595 596 #if MINIDUMP_PAGE_TRACKING 597 /* 598 * Allocate a bitmap to indicate that a random physical page 599 * needs to be included in a minidump. 600 * 601 * The amd64 port needs this to indicate which direct map pages 602 * need to be dumped, via calls to dump_add_page()/dump_drop_page(). 603 * 604 * However, i386 still needs this workspace internally within the 605 * minidump code. In theory, they are not needed on i386, but are 606 * included should the sf_buf code decide to use them. 607 */ 608 last_pa = 0; 609 vm_page_dump_pages = 0; 610 for (i = 0; dump_avail[i + 1] != 0; i += 2) { 611 vm_page_dump_pages += howmany(dump_avail[i + 1], PAGE_SIZE) - 612 dump_avail[i] / PAGE_SIZE; 613 if (dump_avail[i + 1] > last_pa) 614 last_pa = dump_avail[i + 1]; 615 } 616 vm_page_dump_size = round_page(BITSET_SIZE(vm_page_dump_pages)); 617 new_end -= vm_page_dump_size; 618 vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end, 619 new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE); 620 bzero((void *)vm_page_dump, vm_page_dump_size); 621 #else 622 (void)last_pa; 623 #endif 624 #if defined(__aarch64__) || defined(__amd64__) || \ 625 defined(__riscv) || defined(__powerpc64__) 626 /* 627 * Include the UMA bootstrap pages, witness pages and vm_page_dump 628 * in a crash dump. When pmap_map() uses the direct map, they are 629 * not automatically included. 630 */ 631 for (pa = new_end; pa < end; pa += PAGE_SIZE) 632 dump_add_page(pa); 633 #endif 634 phys_avail[biggestone + 1] = new_end; 635 #ifdef __amd64__ 636 /* 637 * Request that the physical pages underlying the message buffer be 638 * included in a crash dump. Since the message buffer is accessed 639 * through the direct map, they are not automatically included. 640 */ 641 pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr); 642 last_pa = pa + round_page(msgbufsize); 643 while (pa < last_pa) { 644 dump_add_page(pa); 645 pa += PAGE_SIZE; 646 } 647 #endif 648 /* 649 * Compute the number of pages of memory that will be available for 650 * use, taking into account the overhead of a page structure per page. 651 * In other words, solve 652 * "available physical memory" - round_page(page_range * 653 * sizeof(struct vm_page)) = page_range * PAGE_SIZE 654 * for page_range. 655 */ 656 low_avail = phys_avail[0]; 657 high_avail = phys_avail[1]; 658 for (i = 0; i < vm_phys_nsegs; i++) { 659 if (vm_phys_segs[i].start < low_avail) 660 low_avail = vm_phys_segs[i].start; 661 if (vm_phys_segs[i].end > high_avail) 662 high_avail = vm_phys_segs[i].end; 663 } 664 /* Skip the first chunk. It is already accounted for. */ 665 for (i = 2; phys_avail[i + 1] != 0; i += 2) { 666 if (phys_avail[i] < low_avail) 667 low_avail = phys_avail[i]; 668 if (phys_avail[i + 1] > high_avail) 669 high_avail = phys_avail[i + 1]; 670 } 671 first_page = low_avail / PAGE_SIZE; 672 #ifdef VM_PHYSSEG_SPARSE 673 size = 0; 674 for (i = 0; i < vm_phys_nsegs; i++) 675 size += vm_phys_segs[i].end - vm_phys_segs[i].start; 676 for (i = 0; phys_avail[i + 1] != 0; i += 2) 677 size += phys_avail[i + 1] - phys_avail[i]; 678 #elif defined(VM_PHYSSEG_DENSE) 679 size = high_avail - low_avail; 680 #else 681 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." 682 #endif 683 684 #ifdef PMAP_HAS_PAGE_ARRAY 685 pmap_page_array_startup(size / PAGE_SIZE); 686 biggestone = vm_phys_avail_largest(); 687 end = new_end = phys_avail[biggestone + 1]; 688 #else 689 #ifdef VM_PHYSSEG_DENSE 690 /* 691 * In the VM_PHYSSEG_DENSE case, the number of pages can account for 692 * the overhead of a page structure per page only if vm_page_array is 693 * allocated from the last physical memory chunk. Otherwise, we must 694 * allocate page structures representing the physical memory 695 * underlying vm_page_array, even though they will not be used. 696 */ 697 if (new_end != high_avail) 698 page_range = size / PAGE_SIZE; 699 else 700 #endif 701 { 702 page_range = size / (PAGE_SIZE + sizeof(struct vm_page)); 703 704 /* 705 * If the partial bytes remaining are large enough for 706 * a page (PAGE_SIZE) without a corresponding 707 * 'struct vm_page', then new_end will contain an 708 * extra page after subtracting the length of the VM 709 * page array. Compensate by subtracting an extra 710 * page from new_end. 711 */ 712 if (size % (PAGE_SIZE + sizeof(struct vm_page)) >= PAGE_SIZE) { 713 if (new_end == high_avail) 714 high_avail -= PAGE_SIZE; 715 new_end -= PAGE_SIZE; 716 } 717 } 718 end = new_end; 719 new_end = vm_page_array_alloc(&vaddr, end, page_range); 720 #endif 721 722 #if VM_NRESERVLEVEL > 0 723 /* 724 * Allocate physical memory for the reservation management system's 725 * data structures, and map it. 726 */ 727 new_end = vm_reserv_startup(&vaddr, new_end); 728 #endif 729 #if defined(__aarch64__) || defined(__amd64__) || \ 730 defined(__riscv) || defined(__powerpc64__) 731 /* 732 * Include vm_page_array and vm_reserv_array in a crash dump. 733 */ 734 for (pa = new_end; pa < end; pa += PAGE_SIZE) 735 dump_add_page(pa); 736 #endif 737 phys_avail[biggestone + 1] = new_end; 738 739 /* 740 * Add physical memory segments corresponding to the available 741 * physical pages. 742 */ 743 for (i = 0; phys_avail[i + 1] != 0; i += 2) 744 if (vm_phys_avail_size(i) != 0) 745 vm_phys_add_seg(phys_avail[i], phys_avail[i + 1]); 746 747 /* 748 * Initialize the physical memory allocator. 749 */ 750 vm_phys_init(); 751 752 /* 753 * Initialize the page structures and add every available page to the 754 * physical memory allocator's free lists. 755 */ 756 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE) 757 for (ii = 0; ii < vm_page_array_size; ii++) { 758 m = &vm_page_array[ii]; 759 vm_page_init_page(m, (first_page + ii) << PAGE_SHIFT, 0); 760 m->flags = PG_FICTITIOUS; 761 } 762 #endif 763 vm_cnt.v_page_count = 0; 764 for (segind = 0; segind < vm_phys_nsegs; segind++) { 765 seg = &vm_phys_segs[segind]; 766 for (m = seg->first_page, pa = seg->start; pa < seg->end; 767 m++, pa += PAGE_SIZE) 768 vm_page_init_page(m, pa, segind); 769 770 /* 771 * Add the segment's pages that are covered by one of 772 * phys_avail's ranges to the free lists. 773 */ 774 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 775 if (seg->end <= phys_avail[i] || 776 seg->start >= phys_avail[i + 1]) 777 continue; 778 779 startp = MAX(seg->start, phys_avail[i]); 780 endp = MIN(seg->end, phys_avail[i + 1]); 781 pagecount = (u_long)atop(endp - startp); 782 if (pagecount == 0) 783 continue; 784 785 m = seg->first_page + atop(startp - seg->start); 786 vmd = VM_DOMAIN(seg->domain); 787 vm_domain_free_lock(vmd); 788 vm_phys_enqueue_contig(m, pagecount); 789 vm_domain_free_unlock(vmd); 790 vm_domain_freecnt_inc(vmd, pagecount); 791 vm_cnt.v_page_count += (u_int)pagecount; 792 vmd->vmd_page_count += (u_int)pagecount; 793 vmd->vmd_segs |= 1UL << segind; 794 } 795 } 796 797 /* 798 * Remove blacklisted pages from the physical memory allocator. 799 */ 800 TAILQ_INIT(&blacklist_head); 801 vm_page_blacklist_load(&list, &listend); 802 vm_page_blacklist_check(list, listend); 803 804 list = kern_getenv("vm.blacklist"); 805 vm_page_blacklist_check(list, NULL); 806 807 freeenv(list); 808 #if VM_NRESERVLEVEL > 0 809 /* 810 * Initialize the reservation management system. 811 */ 812 vm_reserv_init(); 813 #endif 814 815 return (vaddr); 816 } 817 818 void 819 vm_page_reference(vm_page_t m) 820 { 821 822 vm_page_aflag_set(m, PGA_REFERENCED); 823 } 824 825 /* 826 * vm_page_trybusy 827 * 828 * Helper routine for grab functions to trylock busy. 829 * 830 * Returns true on success and false on failure. 831 */ 832 static bool 833 vm_page_trybusy(vm_page_t m, int allocflags) 834 { 835 836 if ((allocflags & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)) != 0) 837 return (vm_page_trysbusy(m)); 838 else 839 return (vm_page_tryxbusy(m)); 840 } 841 842 /* 843 * vm_page_tryacquire 844 * 845 * Helper routine for grab functions to trylock busy and wire. 846 * 847 * Returns true on success and false on failure. 848 */ 849 static inline bool 850 vm_page_tryacquire(vm_page_t m, int allocflags) 851 { 852 bool locked; 853 854 locked = vm_page_trybusy(m, allocflags); 855 if (locked && (allocflags & VM_ALLOC_WIRED) != 0) 856 vm_page_wire(m); 857 return (locked); 858 } 859 860 /* 861 * vm_page_busy_acquire: 862 * 863 * Acquire the busy lock as described by VM_ALLOC_* flags. Will loop 864 * and drop the object lock if necessary. 865 */ 866 bool 867 vm_page_busy_acquire(vm_page_t m, int allocflags) 868 { 869 vm_object_t obj; 870 bool locked; 871 872 /* 873 * The page-specific object must be cached because page 874 * identity can change during the sleep, causing the 875 * re-lock of a different object. 876 * It is assumed that a reference to the object is already 877 * held by the callers. 878 */ 879 obj = atomic_load_ptr(&m->object); 880 for (;;) { 881 if (vm_page_tryacquire(m, allocflags)) 882 return (true); 883 if ((allocflags & VM_ALLOC_NOWAIT) != 0) 884 return (false); 885 if (obj != NULL) 886 locked = VM_OBJECT_WOWNED(obj); 887 else 888 locked = false; 889 MPASS(locked || vm_page_wired(m)); 890 if (_vm_page_busy_sleep(obj, m, m->pindex, "vmpba", allocflags, 891 locked) && locked) 892 VM_OBJECT_WLOCK(obj); 893 if ((allocflags & VM_ALLOC_WAITFAIL) != 0) 894 return (false); 895 KASSERT(m->object == obj || m->object == NULL, 896 ("vm_page_busy_acquire: page %p does not belong to %p", 897 m, obj)); 898 } 899 } 900 901 /* 902 * vm_page_busy_downgrade: 903 * 904 * Downgrade an exclusive busy page into a single shared busy page. 905 */ 906 void 907 vm_page_busy_downgrade(vm_page_t m) 908 { 909 u_int x; 910 911 vm_page_assert_xbusied(m); 912 913 x = vm_page_busy_fetch(m); 914 for (;;) { 915 if (atomic_fcmpset_rel_int(&m->busy_lock, 916 &x, VPB_SHARERS_WORD(1))) 917 break; 918 } 919 if ((x & VPB_BIT_WAITERS) != 0) 920 wakeup(m); 921 } 922 923 /* 924 * 925 * vm_page_busy_tryupgrade: 926 * 927 * Attempt to upgrade a single shared busy into an exclusive busy. 928 */ 929 int 930 vm_page_busy_tryupgrade(vm_page_t m) 931 { 932 u_int ce, x; 933 934 vm_page_assert_sbusied(m); 935 936 x = vm_page_busy_fetch(m); 937 ce = VPB_CURTHREAD_EXCLUSIVE; 938 for (;;) { 939 if (VPB_SHARERS(x) > 1) 940 return (0); 941 KASSERT((x & ~VPB_BIT_WAITERS) == VPB_SHARERS_WORD(1), 942 ("vm_page_busy_tryupgrade: invalid lock state")); 943 if (!atomic_fcmpset_acq_int(&m->busy_lock, &x, 944 ce | (x & VPB_BIT_WAITERS))) 945 continue; 946 return (1); 947 } 948 } 949 950 /* 951 * vm_page_sbusied: 952 * 953 * Return a positive value if the page is shared busied, 0 otherwise. 954 */ 955 int 956 vm_page_sbusied(vm_page_t m) 957 { 958 u_int x; 959 960 x = vm_page_busy_fetch(m); 961 return ((x & VPB_BIT_SHARED) != 0 && x != VPB_UNBUSIED); 962 } 963 964 /* 965 * vm_page_sunbusy: 966 * 967 * Shared unbusy a page. 968 */ 969 void 970 vm_page_sunbusy(vm_page_t m) 971 { 972 u_int x; 973 974 vm_page_assert_sbusied(m); 975 976 x = vm_page_busy_fetch(m); 977 for (;;) { 978 KASSERT(x != VPB_FREED, 979 ("vm_page_sunbusy: Unlocking freed page.")); 980 if (VPB_SHARERS(x) > 1) { 981 if (atomic_fcmpset_int(&m->busy_lock, &x, 982 x - VPB_ONE_SHARER)) 983 break; 984 continue; 985 } 986 KASSERT((x & ~VPB_BIT_WAITERS) == VPB_SHARERS_WORD(1), 987 ("vm_page_sunbusy: invalid lock state")); 988 if (!atomic_fcmpset_rel_int(&m->busy_lock, &x, VPB_UNBUSIED)) 989 continue; 990 if ((x & VPB_BIT_WAITERS) == 0) 991 break; 992 wakeup(m); 993 break; 994 } 995 } 996 997 /* 998 * vm_page_busy_sleep: 999 * 1000 * Sleep if the page is busy, using the page pointer as wchan. 1001 * This is used to implement the hard-path of the busying mechanism. 1002 * 1003 * If VM_ALLOC_IGN_SBUSY is specified in allocflags, the function 1004 * will not sleep if the page is shared-busy. 1005 * 1006 * The object lock must be held on entry. 1007 * 1008 * Returns true if it slept and dropped the object lock, or false 1009 * if there was no sleep and the lock is still held. 1010 */ 1011 bool 1012 vm_page_busy_sleep(vm_page_t m, const char *wmesg, int allocflags) 1013 { 1014 vm_object_t obj; 1015 1016 obj = m->object; 1017 VM_OBJECT_ASSERT_LOCKED(obj); 1018 1019 return (_vm_page_busy_sleep(obj, m, m->pindex, wmesg, allocflags, 1020 true)); 1021 } 1022 1023 /* 1024 * vm_page_busy_sleep_unlocked: 1025 * 1026 * Sleep if the page is busy, using the page pointer as wchan. 1027 * This is used to implement the hard-path of busying mechanism. 1028 * 1029 * If VM_ALLOC_IGN_SBUSY is specified in allocflags, the function 1030 * will not sleep if the page is shared-busy. 1031 * 1032 * The object lock must not be held on entry. The operation will 1033 * return if the page changes identity. 1034 */ 1035 void 1036 vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m, vm_pindex_t pindex, 1037 const char *wmesg, int allocflags) 1038 { 1039 VM_OBJECT_ASSERT_UNLOCKED(obj); 1040 1041 (void)_vm_page_busy_sleep(obj, m, pindex, wmesg, allocflags, false); 1042 } 1043 1044 /* 1045 * _vm_page_busy_sleep: 1046 * 1047 * Internal busy sleep function. Verifies the page identity and 1048 * lockstate against parameters. Returns true if it sleeps and 1049 * false otherwise. 1050 * 1051 * allocflags uses VM_ALLOC_* flags to specify the lock required. 1052 * 1053 * If locked is true the lock will be dropped for any true returns 1054 * and held for any false returns. 1055 */ 1056 static bool 1057 _vm_page_busy_sleep(vm_object_t obj, vm_page_t m, vm_pindex_t pindex, 1058 const char *wmesg, int allocflags, bool locked) 1059 { 1060 bool xsleep; 1061 u_int x; 1062 1063 /* 1064 * If the object is busy we must wait for that to drain to zero 1065 * before trying the page again. 1066 */ 1067 if (obj != NULL && vm_object_busied(obj)) { 1068 if (locked) 1069 VM_OBJECT_DROP(obj); 1070 vm_object_busy_wait(obj, wmesg); 1071 return (true); 1072 } 1073 1074 if (!vm_page_busied(m)) 1075 return (false); 1076 1077 xsleep = (allocflags & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)) != 0; 1078 sleepq_lock(m); 1079 x = vm_page_busy_fetch(m); 1080 do { 1081 /* 1082 * If the page changes objects or becomes unlocked we can 1083 * simply return. 1084 */ 1085 if (x == VPB_UNBUSIED || 1086 (xsleep && (x & VPB_BIT_SHARED) != 0) || 1087 m->object != obj || m->pindex != pindex) { 1088 sleepq_release(m); 1089 return (false); 1090 } 1091 if ((x & VPB_BIT_WAITERS) != 0) 1092 break; 1093 } while (!atomic_fcmpset_int(&m->busy_lock, &x, x | VPB_BIT_WAITERS)); 1094 if (locked) 1095 VM_OBJECT_DROP(obj); 1096 DROP_GIANT(); 1097 sleepq_add(m, NULL, wmesg, 0, 0); 1098 sleepq_wait(m, PVM); 1099 PICKUP_GIANT(); 1100 return (true); 1101 } 1102 1103 /* 1104 * vm_page_trysbusy: 1105 * 1106 * Try to shared busy a page. 1107 * If the operation succeeds 1 is returned otherwise 0. 1108 * The operation never sleeps. 1109 */ 1110 int 1111 vm_page_trysbusy(vm_page_t m) 1112 { 1113 vm_object_t obj; 1114 u_int x; 1115 1116 obj = m->object; 1117 x = vm_page_busy_fetch(m); 1118 for (;;) { 1119 if ((x & VPB_BIT_SHARED) == 0) 1120 return (0); 1121 /* 1122 * Reduce the window for transient busies that will trigger 1123 * false negatives in vm_page_ps_test(). 1124 */ 1125 if (obj != NULL && vm_object_busied(obj)) 1126 return (0); 1127 if (atomic_fcmpset_acq_int(&m->busy_lock, &x, 1128 x + VPB_ONE_SHARER)) 1129 break; 1130 } 1131 1132 /* Refetch the object now that we're guaranteed that it is stable. */ 1133 obj = m->object; 1134 if (obj != NULL && vm_object_busied(obj)) { 1135 vm_page_sunbusy(m); 1136 return (0); 1137 } 1138 return (1); 1139 } 1140 1141 /* 1142 * vm_page_tryxbusy: 1143 * 1144 * Try to exclusive busy a page. 1145 * If the operation succeeds 1 is returned otherwise 0. 1146 * The operation never sleeps. 1147 */ 1148 int 1149 vm_page_tryxbusy(vm_page_t m) 1150 { 1151 vm_object_t obj; 1152 1153 if (atomic_cmpset_acq_int(&m->busy_lock, VPB_UNBUSIED, 1154 VPB_CURTHREAD_EXCLUSIVE) == 0) 1155 return (0); 1156 1157 obj = m->object; 1158 if (obj != NULL && vm_object_busied(obj)) { 1159 vm_page_xunbusy(m); 1160 return (0); 1161 } 1162 return (1); 1163 } 1164 1165 static void 1166 vm_page_xunbusy_hard_tail(vm_page_t m) 1167 { 1168 atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED); 1169 /* Wake the waiter. */ 1170 wakeup(m); 1171 } 1172 1173 /* 1174 * vm_page_xunbusy_hard: 1175 * 1176 * Called when unbusy has failed because there is a waiter. 1177 */ 1178 void 1179 vm_page_xunbusy_hard(vm_page_t m) 1180 { 1181 vm_page_assert_xbusied(m); 1182 vm_page_xunbusy_hard_tail(m); 1183 } 1184 1185 void 1186 vm_page_xunbusy_hard_unchecked(vm_page_t m) 1187 { 1188 vm_page_assert_xbusied_unchecked(m); 1189 vm_page_xunbusy_hard_tail(m); 1190 } 1191 1192 static void 1193 vm_page_busy_free(vm_page_t m) 1194 { 1195 u_int x; 1196 1197 atomic_thread_fence_rel(); 1198 x = atomic_swap_int(&m->busy_lock, VPB_FREED); 1199 if ((x & VPB_BIT_WAITERS) != 0) 1200 wakeup(m); 1201 } 1202 1203 /* 1204 * vm_page_unhold_pages: 1205 * 1206 * Unhold each of the pages that is referenced by the given array. 1207 */ 1208 void 1209 vm_page_unhold_pages(vm_page_t *ma, int count) 1210 { 1211 1212 for (; count != 0; count--) { 1213 vm_page_unwire(*ma, PQ_ACTIVE); 1214 ma++; 1215 } 1216 } 1217 1218 vm_page_t 1219 PHYS_TO_VM_PAGE(vm_paddr_t pa) 1220 { 1221 vm_page_t m; 1222 1223 #ifdef VM_PHYSSEG_SPARSE 1224 m = vm_phys_paddr_to_vm_page(pa); 1225 if (m == NULL) 1226 m = vm_phys_fictitious_to_vm_page(pa); 1227 return (m); 1228 #elif defined(VM_PHYSSEG_DENSE) 1229 long pi; 1230 1231 pi = atop(pa); 1232 if (pi >= first_page && (pi - first_page) < vm_page_array_size) { 1233 m = &vm_page_array[pi - first_page]; 1234 return (m); 1235 } 1236 return (vm_phys_fictitious_to_vm_page(pa)); 1237 #else 1238 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." 1239 #endif 1240 } 1241 1242 /* 1243 * vm_page_getfake: 1244 * 1245 * Create a fictitious page with the specified physical address and 1246 * memory attribute. The memory attribute is the only the machine- 1247 * dependent aspect of a fictitious page that must be initialized. 1248 */ 1249 vm_page_t 1250 vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr) 1251 { 1252 vm_page_t m; 1253 1254 m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO); 1255 vm_page_initfake(m, paddr, memattr); 1256 return (m); 1257 } 1258 1259 void 1260 vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) 1261 { 1262 1263 if ((m->flags & PG_FICTITIOUS) != 0) { 1264 /* 1265 * The page's memattr might have changed since the 1266 * previous initialization. Update the pmap to the 1267 * new memattr. 1268 */ 1269 goto memattr; 1270 } 1271 m->phys_addr = paddr; 1272 m->a.queue = PQ_NONE; 1273 /* Fictitious pages don't use "segind". */ 1274 m->flags = PG_FICTITIOUS; 1275 /* Fictitious pages don't use "order" or "pool". */ 1276 m->oflags = VPO_UNMANAGED; 1277 m->busy_lock = VPB_CURTHREAD_EXCLUSIVE; 1278 /* Fictitious pages are unevictable. */ 1279 m->ref_count = 1; 1280 pmap_page_init(m); 1281 memattr: 1282 pmap_page_set_memattr(m, memattr); 1283 } 1284 1285 /* 1286 * vm_page_putfake: 1287 * 1288 * Release a fictitious page. 1289 */ 1290 void 1291 vm_page_putfake(vm_page_t m) 1292 { 1293 1294 KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m)); 1295 KASSERT((m->flags & PG_FICTITIOUS) != 0, 1296 ("vm_page_putfake: bad page %p", m)); 1297 vm_page_assert_xbusied(m); 1298 vm_page_busy_free(m); 1299 uma_zfree(fakepg_zone, m); 1300 } 1301 1302 /* 1303 * vm_page_updatefake: 1304 * 1305 * Update the given fictitious page to the specified physical address and 1306 * memory attribute. 1307 */ 1308 void 1309 vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) 1310 { 1311 1312 KASSERT((m->flags & PG_FICTITIOUS) != 0, 1313 ("vm_page_updatefake: bad page %p", m)); 1314 m->phys_addr = paddr; 1315 pmap_page_set_memattr(m, memattr); 1316 } 1317 1318 /* 1319 * vm_page_free: 1320 * 1321 * Free a page. 1322 */ 1323 void 1324 vm_page_free(vm_page_t m) 1325 { 1326 1327 m->flags &= ~PG_ZERO; 1328 vm_page_free_toq(m); 1329 } 1330 1331 /* 1332 * vm_page_free_zero: 1333 * 1334 * Free a page to the zerod-pages queue 1335 */ 1336 void 1337 vm_page_free_zero(vm_page_t m) 1338 { 1339 1340 m->flags |= PG_ZERO; 1341 vm_page_free_toq(m); 1342 } 1343 1344 /* 1345 * Unbusy and handle the page queueing for a page from a getpages request that 1346 * was optionally read ahead or behind. 1347 */ 1348 void 1349 vm_page_readahead_finish(vm_page_t m) 1350 { 1351 1352 /* We shouldn't put invalid pages on queues. */ 1353 KASSERT(!vm_page_none_valid(m), ("%s: %p is invalid", __func__, m)); 1354 1355 /* 1356 * Since the page is not the actually needed one, whether it should 1357 * be activated or deactivated is not obvious. Empirical results 1358 * have shown that deactivating the page is usually the best choice, 1359 * unless the page is wanted by another thread. 1360 */ 1361 if ((vm_page_busy_fetch(m) & VPB_BIT_WAITERS) != 0) 1362 vm_page_activate(m); 1363 else 1364 vm_page_deactivate(m); 1365 vm_page_xunbusy_unchecked(m); 1366 } 1367 1368 /* 1369 * Destroy the identity of an invalid page and free it if possible. 1370 * This is intended to be used when reading a page from backing store fails. 1371 */ 1372 void 1373 vm_page_free_invalid(vm_page_t m) 1374 { 1375 1376 KASSERT(vm_page_none_valid(m), ("page %p is valid", m)); 1377 KASSERT(!pmap_page_is_mapped(m), ("page %p is mapped", m)); 1378 KASSERT(m->object != NULL, ("page %p has no object", m)); 1379 VM_OBJECT_ASSERT_WLOCKED(m->object); 1380 1381 /* 1382 * We may be attempting to free the page as part of the handling for an 1383 * I/O error, in which case the page was xbusied by a different thread. 1384 */ 1385 vm_page_xbusy_claim(m); 1386 1387 /* 1388 * If someone has wired this page while the object lock 1389 * was not held, then the thread that unwires is responsible 1390 * for freeing the page. Otherwise just free the page now. 1391 * The wire count of this unmapped page cannot change while 1392 * we have the page xbusy and the page's object wlocked. 1393 */ 1394 if (vm_page_remove(m)) 1395 vm_page_free(m); 1396 } 1397 1398 /* 1399 * vm_page_dirty_KBI: [ internal use only ] 1400 * 1401 * Set all bits in the page's dirty field. 1402 * 1403 * The object containing the specified page must be locked if the 1404 * call is made from the machine-independent layer. 1405 * 1406 * See vm_page_clear_dirty_mask(). 1407 * 1408 * This function should only be called by vm_page_dirty(). 1409 */ 1410 void 1411 vm_page_dirty_KBI(vm_page_t m) 1412 { 1413 1414 /* Refer to this operation by its public name. */ 1415 KASSERT(vm_page_all_valid(m), ("vm_page_dirty: page is invalid!")); 1416 m->dirty = VM_PAGE_BITS_ALL; 1417 } 1418 1419 /* 1420 * vm_page_insert: [ internal use only ] 1421 * 1422 * Inserts the given mem entry into the object and object list. 1423 * 1424 * The object must be locked. 1425 */ 1426 int 1427 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) 1428 { 1429 vm_page_t mpred; 1430 1431 VM_OBJECT_ASSERT_WLOCKED(object); 1432 mpred = vm_radix_lookup_le(&object->rtree, pindex); 1433 return (vm_page_insert_after(m, object, pindex, mpred)); 1434 } 1435 1436 /* 1437 * vm_page_insert_after: 1438 * 1439 * Inserts the page "m" into the specified object at offset "pindex". 1440 * 1441 * The page "mpred" must immediately precede the offset "pindex" within 1442 * the specified object. 1443 * 1444 * The object must be locked. 1445 */ 1446 static int 1447 vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex, 1448 vm_page_t mpred) 1449 { 1450 vm_page_t msucc; 1451 1452 VM_OBJECT_ASSERT_WLOCKED(object); 1453 KASSERT(m->object == NULL, 1454 ("vm_page_insert_after: page already inserted")); 1455 if (mpred != NULL) { 1456 KASSERT(mpred->object == object, 1457 ("vm_page_insert_after: object doesn't contain mpred")); 1458 KASSERT(mpred->pindex < pindex, 1459 ("vm_page_insert_after: mpred doesn't precede pindex")); 1460 msucc = TAILQ_NEXT(mpred, listq); 1461 } else 1462 msucc = TAILQ_FIRST(&object->memq); 1463 if (msucc != NULL) 1464 KASSERT(msucc->pindex > pindex, 1465 ("vm_page_insert_after: msucc doesn't succeed pindex")); 1466 1467 /* 1468 * Record the object/offset pair in this page. 1469 */ 1470 m->object = object; 1471 m->pindex = pindex; 1472 m->ref_count |= VPRC_OBJREF; 1473 1474 /* 1475 * Now link into the object's ordered list of backed pages. 1476 */ 1477 if (vm_radix_insert(&object->rtree, m)) { 1478 m->object = NULL; 1479 m->pindex = 0; 1480 m->ref_count &= ~VPRC_OBJREF; 1481 return (1); 1482 } 1483 vm_page_insert_radixdone(m, object, mpred); 1484 vm_pager_page_inserted(object, m); 1485 return (0); 1486 } 1487 1488 /* 1489 * vm_page_insert_radixdone: 1490 * 1491 * Complete page "m" insertion into the specified object after the 1492 * radix trie hooking. 1493 * 1494 * The page "mpred" must precede the offset "m->pindex" within the 1495 * specified object. 1496 * 1497 * The object must be locked. 1498 */ 1499 static void 1500 vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred) 1501 { 1502 1503 VM_OBJECT_ASSERT_WLOCKED(object); 1504 KASSERT(object != NULL && m->object == object, 1505 ("vm_page_insert_radixdone: page %p has inconsistent object", m)); 1506 KASSERT((m->ref_count & VPRC_OBJREF) != 0, 1507 ("vm_page_insert_radixdone: page %p is missing object ref", m)); 1508 if (mpred != NULL) { 1509 KASSERT(mpred->object == object, 1510 ("vm_page_insert_radixdone: object doesn't contain mpred")); 1511 KASSERT(mpred->pindex < m->pindex, 1512 ("vm_page_insert_radixdone: mpred doesn't precede pindex")); 1513 } 1514 1515 if (mpred != NULL) 1516 TAILQ_INSERT_AFTER(&object->memq, mpred, m, listq); 1517 else 1518 TAILQ_INSERT_HEAD(&object->memq, m, listq); 1519 1520 /* 1521 * Show that the object has one more resident page. 1522 */ 1523 object->resident_page_count++; 1524 1525 /* 1526 * Hold the vnode until the last page is released. 1527 */ 1528 if (object->resident_page_count == 1 && object->type == OBJT_VNODE) 1529 vhold(object->handle); 1530 1531 /* 1532 * Since we are inserting a new and possibly dirty page, 1533 * update the object's generation count. 1534 */ 1535 if (pmap_page_is_write_mapped(m)) 1536 vm_object_set_writeable_dirty(object); 1537 } 1538 1539 /* 1540 * Do the work to remove a page from its object. The caller is responsible for 1541 * updating the page's fields to reflect this removal. 1542 */ 1543 static void 1544 vm_page_object_remove(vm_page_t m) 1545 { 1546 vm_object_t object; 1547 vm_page_t mrem __diagused; 1548 1549 vm_page_assert_xbusied(m); 1550 object = m->object; 1551 VM_OBJECT_ASSERT_WLOCKED(object); 1552 KASSERT((m->ref_count & VPRC_OBJREF) != 0, 1553 ("page %p is missing its object ref", m)); 1554 1555 /* Deferred free of swap space. */ 1556 if ((m->a.flags & PGA_SWAP_FREE) != 0) 1557 vm_pager_page_unswapped(m); 1558 1559 vm_pager_page_removed(object, m); 1560 1561 m->object = NULL; 1562 mrem = vm_radix_remove(&object->rtree, m->pindex); 1563 KASSERT(mrem == m, ("removed page %p, expected page %p", mrem, m)); 1564 1565 /* 1566 * Now remove from the object's list of backed pages. 1567 */ 1568 TAILQ_REMOVE(&object->memq, m, listq); 1569 1570 /* 1571 * And show that the object has one fewer resident page. 1572 */ 1573 object->resident_page_count--; 1574 1575 /* 1576 * The vnode may now be recycled. 1577 */ 1578 if (object->resident_page_count == 0 && object->type == OBJT_VNODE) 1579 vdrop(object->handle); 1580 } 1581 1582 /* 1583 * vm_page_remove: 1584 * 1585 * Removes the specified page from its containing object, but does not 1586 * invalidate any backing storage. Returns true if the object's reference 1587 * was the last reference to the page, and false otherwise. 1588 * 1589 * The object must be locked and the page must be exclusively busied. 1590 * The exclusive busy will be released on return. If this is not the 1591 * final ref and the caller does not hold a wire reference it may not 1592 * continue to access the page. 1593 */ 1594 bool 1595 vm_page_remove(vm_page_t m) 1596 { 1597 bool dropped; 1598 1599 dropped = vm_page_remove_xbusy(m); 1600 vm_page_xunbusy(m); 1601 1602 return (dropped); 1603 } 1604 1605 /* 1606 * vm_page_remove_xbusy 1607 * 1608 * Removes the page but leaves the xbusy held. Returns true if this 1609 * removed the final ref and false otherwise. 1610 */ 1611 bool 1612 vm_page_remove_xbusy(vm_page_t m) 1613 { 1614 1615 vm_page_object_remove(m); 1616 return (vm_page_drop(m, VPRC_OBJREF) == VPRC_OBJREF); 1617 } 1618 1619 /* 1620 * vm_page_lookup: 1621 * 1622 * Returns the page associated with the object/offset 1623 * pair specified; if none is found, NULL is returned. 1624 * 1625 * The object must be locked. 1626 */ 1627 vm_page_t 1628 vm_page_lookup(vm_object_t object, vm_pindex_t pindex) 1629 { 1630 1631 VM_OBJECT_ASSERT_LOCKED(object); 1632 return (vm_radix_lookup(&object->rtree, pindex)); 1633 } 1634 1635 /* 1636 * vm_page_lookup_unlocked: 1637 * 1638 * Returns the page associated with the object/offset pair specified; 1639 * if none is found, NULL is returned. The page may be no longer be 1640 * present in the object at the time that this function returns. Only 1641 * useful for opportunistic checks such as inmem(). 1642 */ 1643 vm_page_t 1644 vm_page_lookup_unlocked(vm_object_t object, vm_pindex_t pindex) 1645 { 1646 1647 return (vm_radix_lookup_unlocked(&object->rtree, pindex)); 1648 } 1649 1650 /* 1651 * vm_page_relookup: 1652 * 1653 * Returns a page that must already have been busied by 1654 * the caller. Used for bogus page replacement. 1655 */ 1656 vm_page_t 1657 vm_page_relookup(vm_object_t object, vm_pindex_t pindex) 1658 { 1659 vm_page_t m; 1660 1661 m = vm_radix_lookup_unlocked(&object->rtree, pindex); 1662 KASSERT(m != NULL && (vm_page_busied(m) || vm_page_wired(m)) && 1663 m->object == object && m->pindex == pindex, 1664 ("vm_page_relookup: Invalid page %p", m)); 1665 return (m); 1666 } 1667 1668 /* 1669 * This should only be used by lockless functions for releasing transient 1670 * incorrect acquires. The page may have been freed after we acquired a 1671 * busy lock. In this case busy_lock == VPB_FREED and we have nothing 1672 * further to do. 1673 */ 1674 static void 1675 vm_page_busy_release(vm_page_t m) 1676 { 1677 u_int x; 1678 1679 x = vm_page_busy_fetch(m); 1680 for (;;) { 1681 if (x == VPB_FREED) 1682 break; 1683 if ((x & VPB_BIT_SHARED) != 0 && VPB_SHARERS(x) > 1) { 1684 if (atomic_fcmpset_int(&m->busy_lock, &x, 1685 x - VPB_ONE_SHARER)) 1686 break; 1687 continue; 1688 } 1689 KASSERT((x & VPB_BIT_SHARED) != 0 || 1690 (x & ~VPB_BIT_WAITERS) == VPB_CURTHREAD_EXCLUSIVE, 1691 ("vm_page_busy_release: %p xbusy not owned.", m)); 1692 if (!atomic_fcmpset_rel_int(&m->busy_lock, &x, VPB_UNBUSIED)) 1693 continue; 1694 if ((x & VPB_BIT_WAITERS) != 0) 1695 wakeup(m); 1696 break; 1697 } 1698 } 1699 1700 /* 1701 * vm_page_find_least: 1702 * 1703 * Returns the page associated with the object with least pindex 1704 * greater than or equal to the parameter pindex, or NULL. 1705 * 1706 * The object must be locked. 1707 */ 1708 vm_page_t 1709 vm_page_find_least(vm_object_t object, vm_pindex_t pindex) 1710 { 1711 vm_page_t m; 1712 1713 VM_OBJECT_ASSERT_LOCKED(object); 1714 if ((m = TAILQ_FIRST(&object->memq)) != NULL && m->pindex < pindex) 1715 m = vm_radix_lookup_ge(&object->rtree, pindex); 1716 return (m); 1717 } 1718 1719 /* 1720 * Returns the given page's successor (by pindex) within the object if it is 1721 * resident; if none is found, NULL is returned. 1722 * 1723 * The object must be locked. 1724 */ 1725 vm_page_t 1726 vm_page_next(vm_page_t m) 1727 { 1728 vm_page_t next; 1729 1730 VM_OBJECT_ASSERT_LOCKED(m->object); 1731 if ((next = TAILQ_NEXT(m, listq)) != NULL) { 1732 MPASS(next->object == m->object); 1733 if (next->pindex != m->pindex + 1) 1734 next = NULL; 1735 } 1736 return (next); 1737 } 1738 1739 /* 1740 * Returns the given page's predecessor (by pindex) within the object if it is 1741 * resident; if none is found, NULL is returned. 1742 * 1743 * The object must be locked. 1744 */ 1745 vm_page_t 1746 vm_page_prev(vm_page_t m) 1747 { 1748 vm_page_t prev; 1749 1750 VM_OBJECT_ASSERT_LOCKED(m->object); 1751 if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL) { 1752 MPASS(prev->object == m->object); 1753 if (prev->pindex != m->pindex - 1) 1754 prev = NULL; 1755 } 1756 return (prev); 1757 } 1758 1759 /* 1760 * Uses the page mnew as a replacement for an existing page at index 1761 * pindex which must be already present in the object. 1762 * 1763 * Both pages must be exclusively busied on enter. The old page is 1764 * unbusied on exit. 1765 * 1766 * A return value of true means mold is now free. If this is not the 1767 * final ref and the caller does not hold a wire reference it may not 1768 * continue to access the page. 1769 */ 1770 static bool 1771 vm_page_replace_hold(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex, 1772 vm_page_t mold) 1773 { 1774 vm_page_t mret __diagused; 1775 bool dropped; 1776 1777 VM_OBJECT_ASSERT_WLOCKED(object); 1778 vm_page_assert_xbusied(mold); 1779 KASSERT(mnew->object == NULL && (mnew->ref_count & VPRC_OBJREF) == 0, 1780 ("vm_page_replace: page %p already in object", mnew)); 1781 1782 /* 1783 * This function mostly follows vm_page_insert() and 1784 * vm_page_remove() without the radix, object count and vnode 1785 * dance. Double check such functions for more comments. 1786 */ 1787 1788 mnew->object = object; 1789 mnew->pindex = pindex; 1790 atomic_set_int(&mnew->ref_count, VPRC_OBJREF); 1791 mret = vm_radix_replace(&object->rtree, mnew); 1792 KASSERT(mret == mold, 1793 ("invalid page replacement, mold=%p, mret=%p", mold, mret)); 1794 KASSERT((mold->oflags & VPO_UNMANAGED) == 1795 (mnew->oflags & VPO_UNMANAGED), 1796 ("vm_page_replace: mismatched VPO_UNMANAGED")); 1797 1798 /* Keep the resident page list in sorted order. */ 1799 TAILQ_INSERT_AFTER(&object->memq, mold, mnew, listq); 1800 TAILQ_REMOVE(&object->memq, mold, listq); 1801 mold->object = NULL; 1802 1803 /* 1804 * The object's resident_page_count does not change because we have 1805 * swapped one page for another, but the generation count should 1806 * change if the page is dirty. 1807 */ 1808 if (pmap_page_is_write_mapped(mnew)) 1809 vm_object_set_writeable_dirty(object); 1810 dropped = vm_page_drop(mold, VPRC_OBJREF) == VPRC_OBJREF; 1811 vm_page_xunbusy(mold); 1812 1813 return (dropped); 1814 } 1815 1816 void 1817 vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex, 1818 vm_page_t mold) 1819 { 1820 1821 vm_page_assert_xbusied(mnew); 1822 1823 if (vm_page_replace_hold(mnew, object, pindex, mold)) 1824 vm_page_free(mold); 1825 } 1826 1827 /* 1828 * vm_page_rename: 1829 * 1830 * Move the given memory entry from its 1831 * current object to the specified target object/offset. 1832 * 1833 * Note: swap associated with the page must be invalidated by the move. We 1834 * have to do this for several reasons: (1) we aren't freeing the 1835 * page, (2) we are dirtying the page, (3) the VM system is probably 1836 * moving the page from object A to B, and will then later move 1837 * the backing store from A to B and we can't have a conflict. 1838 * 1839 * Note: we *always* dirty the page. It is necessary both for the 1840 * fact that we moved it, and because we may be invalidating 1841 * swap. 1842 * 1843 * The objects must be locked. 1844 */ 1845 int 1846 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) 1847 { 1848 vm_page_t mpred; 1849 vm_pindex_t opidx; 1850 1851 VM_OBJECT_ASSERT_WLOCKED(new_object); 1852 1853 KASSERT(m->ref_count != 0, ("vm_page_rename: page %p has no refs", m)); 1854 mpred = vm_radix_lookup_le(&new_object->rtree, new_pindex); 1855 KASSERT(mpred == NULL || mpred->pindex != new_pindex, 1856 ("vm_page_rename: pindex already renamed")); 1857 1858 /* 1859 * Create a custom version of vm_page_insert() which does not depend 1860 * by m_prev and can cheat on the implementation aspects of the 1861 * function. 1862 */ 1863 opidx = m->pindex; 1864 m->pindex = new_pindex; 1865 if (vm_radix_insert(&new_object->rtree, m)) { 1866 m->pindex = opidx; 1867 return (1); 1868 } 1869 1870 /* 1871 * The operation cannot fail anymore. The removal must happen before 1872 * the listq iterator is tainted. 1873 */ 1874 m->pindex = opidx; 1875 vm_page_object_remove(m); 1876 1877 /* Return back to the new pindex to complete vm_page_insert(). */ 1878 m->pindex = new_pindex; 1879 m->object = new_object; 1880 1881 vm_page_insert_radixdone(m, new_object, mpred); 1882 vm_page_dirty(m); 1883 vm_pager_page_inserted(new_object, m); 1884 return (0); 1885 } 1886 1887 /* 1888 * vm_page_alloc: 1889 * 1890 * Allocate and return a page that is associated with the specified 1891 * object and offset pair. By default, this page is exclusive busied. 1892 * 1893 * The caller must always specify an allocation class. 1894 * 1895 * allocation classes: 1896 * VM_ALLOC_NORMAL normal process request 1897 * VM_ALLOC_SYSTEM system *really* needs a page 1898 * VM_ALLOC_INTERRUPT interrupt time request 1899 * 1900 * optional allocation flags: 1901 * VM_ALLOC_COUNT(number) the number of additional pages that the caller 1902 * intends to allocate 1903 * VM_ALLOC_NOBUSY do not exclusive busy the page 1904 * VM_ALLOC_NODUMP do not include the page in a kernel core dump 1905 * VM_ALLOC_SBUSY shared busy the allocated page 1906 * VM_ALLOC_WIRED wire the allocated page 1907 * VM_ALLOC_ZERO prefer a zeroed page 1908 */ 1909 vm_page_t 1910 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) 1911 { 1912 1913 return (vm_page_alloc_after(object, pindex, req, 1914 vm_radix_lookup_le(&object->rtree, pindex))); 1915 } 1916 1917 vm_page_t 1918 vm_page_alloc_domain(vm_object_t object, vm_pindex_t pindex, int domain, 1919 int req) 1920 { 1921 1922 return (vm_page_alloc_domain_after(object, pindex, domain, req, 1923 vm_radix_lookup_le(&object->rtree, pindex))); 1924 } 1925 1926 /* 1927 * Allocate a page in the specified object with the given page index. To 1928 * optimize insertion of the page into the object, the caller must also specifiy 1929 * the resident page in the object with largest index smaller than the given 1930 * page index, or NULL if no such page exists. 1931 */ 1932 vm_page_t 1933 vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex, 1934 int req, vm_page_t mpred) 1935 { 1936 struct vm_domainset_iter di; 1937 vm_page_t m; 1938 int domain; 1939 1940 vm_domainset_iter_page_init(&di, object, pindex, &domain, &req); 1941 do { 1942 m = vm_page_alloc_domain_after(object, pindex, domain, req, 1943 mpred); 1944 if (m != NULL) 1945 break; 1946 } while (vm_domainset_iter_page(&di, object, &domain) == 0); 1947 1948 return (m); 1949 } 1950 1951 /* 1952 * Returns true if the number of free pages exceeds the minimum 1953 * for the request class and false otherwise. 1954 */ 1955 static int 1956 _vm_domain_allocate(struct vm_domain *vmd, int req_class, int npages) 1957 { 1958 u_int limit, old, new; 1959 1960 if (req_class == VM_ALLOC_INTERRUPT) 1961 limit = 0; 1962 else if (req_class == VM_ALLOC_SYSTEM) 1963 limit = vmd->vmd_interrupt_free_min; 1964 else 1965 limit = vmd->vmd_free_reserved; 1966 1967 /* 1968 * Attempt to reserve the pages. Fail if we're below the limit. 1969 */ 1970 limit += npages; 1971 old = vmd->vmd_free_count; 1972 do { 1973 if (old < limit) 1974 return (0); 1975 new = old - npages; 1976 } while (atomic_fcmpset_int(&vmd->vmd_free_count, &old, new) == 0); 1977 1978 /* Wake the page daemon if we've crossed the threshold. */ 1979 if (vm_paging_needed(vmd, new) && !vm_paging_needed(vmd, old)) 1980 pagedaemon_wakeup(vmd->vmd_domain); 1981 1982 /* Only update bitsets on transitions. */ 1983 if ((old >= vmd->vmd_free_min && new < vmd->vmd_free_min) || 1984 (old >= vmd->vmd_free_severe && new < vmd->vmd_free_severe)) 1985 vm_domain_set(vmd); 1986 1987 return (1); 1988 } 1989 1990 int 1991 vm_domain_allocate(struct vm_domain *vmd, int req, int npages) 1992 { 1993 int req_class; 1994 1995 /* 1996 * The page daemon is allowed to dig deeper into the free page list. 1997 */ 1998 req_class = req & VM_ALLOC_CLASS_MASK; 1999 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) 2000 req_class = VM_ALLOC_SYSTEM; 2001 return (_vm_domain_allocate(vmd, req_class, npages)); 2002 } 2003 2004 vm_page_t 2005 vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain, 2006 int req, vm_page_t mpred) 2007 { 2008 struct vm_domain *vmd; 2009 vm_page_t m; 2010 int flags; 2011 2012 #define VPA_FLAGS (VM_ALLOC_CLASS_MASK | VM_ALLOC_WAITFAIL | \ 2013 VM_ALLOC_NOWAIT | VM_ALLOC_NOBUSY | \ 2014 VM_ALLOC_SBUSY | VM_ALLOC_WIRED | \ 2015 VM_ALLOC_NODUMP | VM_ALLOC_ZERO | VM_ALLOC_COUNT_MASK) 2016 KASSERT((req & ~VPA_FLAGS) == 0, 2017 ("invalid request %#x", req)); 2018 KASSERT(((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != 2019 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), 2020 ("invalid request %#x", req)); 2021 KASSERT(mpred == NULL || mpred->pindex < pindex, 2022 ("mpred %p doesn't precede pindex 0x%jx", mpred, 2023 (uintmax_t)pindex)); 2024 VM_OBJECT_ASSERT_WLOCKED(object); 2025 2026 flags = 0; 2027 m = NULL; 2028 if (!vm_pager_can_alloc_page(object, pindex)) 2029 return (NULL); 2030 again: 2031 #if VM_NRESERVLEVEL > 0 2032 /* 2033 * Can we allocate the page from a reservation? 2034 */ 2035 if (vm_object_reserv(object) && 2036 (m = vm_reserv_alloc_page(object, pindex, domain, req, mpred)) != 2037 NULL) { 2038 goto found; 2039 } 2040 #endif 2041 vmd = VM_DOMAIN(domain); 2042 if (vmd->vmd_pgcache[VM_FREEPOOL_DEFAULT].zone != NULL) { 2043 m = uma_zalloc(vmd->vmd_pgcache[VM_FREEPOOL_DEFAULT].zone, 2044 M_NOWAIT | M_NOVM); 2045 if (m != NULL) { 2046 flags |= PG_PCPU_CACHE; 2047 goto found; 2048 } 2049 } 2050 if (vm_domain_allocate(vmd, req, 1)) { 2051 /* 2052 * If not, allocate it from the free page queues. 2053 */ 2054 vm_domain_free_lock(vmd); 2055 m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT, 0); 2056 vm_domain_free_unlock(vmd); 2057 if (m == NULL) { 2058 vm_domain_freecnt_inc(vmd, 1); 2059 #if VM_NRESERVLEVEL > 0 2060 if (vm_reserv_reclaim_inactive(domain)) 2061 goto again; 2062 #endif 2063 } 2064 } 2065 if (m == NULL) { 2066 /* 2067 * Not allocatable, give up. 2068 */ 2069 if (vm_domain_alloc_fail(vmd, object, req)) 2070 goto again; 2071 return (NULL); 2072 } 2073 2074 /* 2075 * At this point we had better have found a good page. 2076 */ 2077 found: 2078 vm_page_dequeue(m); 2079 vm_page_alloc_check(m); 2080 2081 /* 2082 * Initialize the page. Only the PG_ZERO flag is inherited. 2083 */ 2084 flags |= m->flags & PG_ZERO; 2085 if ((req & VM_ALLOC_NODUMP) != 0) 2086 flags |= PG_NODUMP; 2087 m->flags = flags; 2088 m->a.flags = 0; 2089 m->oflags = (object->flags & OBJ_UNMANAGED) != 0 ? VPO_UNMANAGED : 0; 2090 if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0) 2091 m->busy_lock = VPB_CURTHREAD_EXCLUSIVE; 2092 else if ((req & VM_ALLOC_SBUSY) != 0) 2093 m->busy_lock = VPB_SHARERS_WORD(1); 2094 else 2095 m->busy_lock = VPB_UNBUSIED; 2096 if (req & VM_ALLOC_WIRED) { 2097 vm_wire_add(1); 2098 m->ref_count = 1; 2099 } 2100 m->a.act_count = 0; 2101 2102 if (vm_page_insert_after(m, object, pindex, mpred)) { 2103 if (req & VM_ALLOC_WIRED) { 2104 vm_wire_sub(1); 2105 m->ref_count = 0; 2106 } 2107 KASSERT(m->object == NULL, ("page %p has object", m)); 2108 m->oflags = VPO_UNMANAGED; 2109 m->busy_lock = VPB_UNBUSIED; 2110 /* Don't change PG_ZERO. */ 2111 vm_page_free_toq(m); 2112 if (req & VM_ALLOC_WAITFAIL) { 2113 VM_OBJECT_WUNLOCK(object); 2114 vm_radix_wait(); 2115 VM_OBJECT_WLOCK(object); 2116 } 2117 return (NULL); 2118 } 2119 2120 /* Ignore device objects; the pager sets "memattr" for them. */ 2121 if (object->memattr != VM_MEMATTR_DEFAULT && 2122 (object->flags & OBJ_FICTITIOUS) == 0) 2123 pmap_page_set_memattr(m, object->memattr); 2124 2125 return (m); 2126 } 2127 2128 /* 2129 * vm_page_alloc_contig: 2130 * 2131 * Allocate a contiguous set of physical pages of the given size "npages" 2132 * from the free lists. All of the physical pages must be at or above 2133 * the given physical address "low" and below the given physical address 2134 * "high". The given value "alignment" determines the alignment of the 2135 * first physical page in the set. If the given value "boundary" is 2136 * non-zero, then the set of physical pages cannot cross any physical 2137 * address boundary that is a multiple of that value. Both "alignment" 2138 * and "boundary" must be a power of two. 2139 * 2140 * If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT, 2141 * then the memory attribute setting for the physical pages is configured 2142 * to the object's memory attribute setting. Otherwise, the memory 2143 * attribute setting for the physical pages is configured to "memattr", 2144 * overriding the object's memory attribute setting. However, if the 2145 * object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the 2146 * memory attribute setting for the physical pages cannot be configured 2147 * to VM_MEMATTR_DEFAULT. 2148 * 2149 * The specified object may not contain fictitious pages. 2150 * 2151 * The caller must always specify an allocation class. 2152 * 2153 * allocation classes: 2154 * VM_ALLOC_NORMAL normal process request 2155 * VM_ALLOC_SYSTEM system *really* needs a page 2156 * VM_ALLOC_INTERRUPT interrupt time request 2157 * 2158 * optional allocation flags: 2159 * VM_ALLOC_NOBUSY do not exclusive busy the page 2160 * VM_ALLOC_NODUMP do not include the page in a kernel core dump 2161 * VM_ALLOC_SBUSY shared busy the allocated page 2162 * VM_ALLOC_WIRED wire the allocated page 2163 * VM_ALLOC_ZERO prefer a zeroed page 2164 */ 2165 vm_page_t 2166 vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req, 2167 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 2168 vm_paddr_t boundary, vm_memattr_t memattr) 2169 { 2170 struct vm_domainset_iter di; 2171 vm_page_t m; 2172 int domain; 2173 2174 vm_domainset_iter_page_init(&di, object, pindex, &domain, &req); 2175 do { 2176 m = vm_page_alloc_contig_domain(object, pindex, domain, req, 2177 npages, low, high, alignment, boundary, memattr); 2178 if (m != NULL) 2179 break; 2180 } while (vm_domainset_iter_page(&di, object, &domain) == 0); 2181 2182 return (m); 2183 } 2184 2185 static vm_page_t 2186 vm_page_find_contig_domain(int domain, int req, u_long npages, vm_paddr_t low, 2187 vm_paddr_t high, u_long alignment, vm_paddr_t boundary) 2188 { 2189 struct vm_domain *vmd; 2190 vm_page_t m_ret; 2191 2192 /* 2193 * Can we allocate the pages without the number of free pages falling 2194 * below the lower bound for the allocation class? 2195 */ 2196 vmd = VM_DOMAIN(domain); 2197 if (!vm_domain_allocate(vmd, req, npages)) 2198 return (NULL); 2199 /* 2200 * Try to allocate the pages from the free page queues. 2201 */ 2202 vm_domain_free_lock(vmd); 2203 m_ret = vm_phys_alloc_contig(domain, npages, low, high, 2204 alignment, boundary); 2205 vm_domain_free_unlock(vmd); 2206 if (m_ret != NULL) 2207 return (m_ret); 2208 #if VM_NRESERVLEVEL > 0 2209 /* 2210 * Try to break a reservation to allocate the pages. 2211 */ 2212 if ((req & VM_ALLOC_NORECLAIM) == 0) { 2213 m_ret = vm_reserv_reclaim_contig(domain, npages, low, 2214 high, alignment, boundary); 2215 if (m_ret != NULL) 2216 return (m_ret); 2217 } 2218 #endif 2219 vm_domain_freecnt_inc(vmd, npages); 2220 return (NULL); 2221 } 2222 2223 vm_page_t 2224 vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain, 2225 int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 2226 vm_paddr_t boundary, vm_memattr_t memattr) 2227 { 2228 vm_page_t m, m_ret, mpred; 2229 u_int busy_lock, flags, oflags; 2230 2231 #define VPAC_FLAGS (VPA_FLAGS | VM_ALLOC_NORECLAIM) 2232 KASSERT((req & ~VPAC_FLAGS) == 0, 2233 ("invalid request %#x", req)); 2234 KASSERT(((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != 2235 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), 2236 ("invalid request %#x", req)); 2237 KASSERT((req & (VM_ALLOC_WAITOK | VM_ALLOC_NORECLAIM)) != 2238 (VM_ALLOC_WAITOK | VM_ALLOC_NORECLAIM), 2239 ("invalid request %#x", req)); 2240 VM_OBJECT_ASSERT_WLOCKED(object); 2241 KASSERT((object->flags & OBJ_FICTITIOUS) == 0, 2242 ("vm_page_alloc_contig: object %p has fictitious pages", 2243 object)); 2244 KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero")); 2245 2246 mpred = vm_radix_lookup_le(&object->rtree, pindex); 2247 KASSERT(mpred == NULL || mpred->pindex != pindex, 2248 ("vm_page_alloc_contig: pindex already allocated")); 2249 for (;;) { 2250 #if VM_NRESERVLEVEL > 0 2251 /* 2252 * Can we allocate the pages from a reservation? 2253 */ 2254 if (vm_object_reserv(object) && 2255 (m_ret = vm_reserv_alloc_contig(object, pindex, domain, req, 2256 mpred, npages, low, high, alignment, boundary)) != NULL) { 2257 break; 2258 } 2259 #endif 2260 if ((m_ret = vm_page_find_contig_domain(domain, req, npages, 2261 low, high, alignment, boundary)) != NULL) 2262 break; 2263 if (!vm_domain_alloc_fail(VM_DOMAIN(domain), object, req)) 2264 return (NULL); 2265 } 2266 for (m = m_ret; m < &m_ret[npages]; m++) { 2267 vm_page_dequeue(m); 2268 vm_page_alloc_check(m); 2269 } 2270 2271 /* 2272 * Initialize the pages. Only the PG_ZERO flag is inherited. 2273 */ 2274 flags = PG_ZERO; 2275 if ((req & VM_ALLOC_NODUMP) != 0) 2276 flags |= PG_NODUMP; 2277 oflags = (object->flags & OBJ_UNMANAGED) != 0 ? VPO_UNMANAGED : 0; 2278 if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0) 2279 busy_lock = VPB_CURTHREAD_EXCLUSIVE; 2280 else if ((req & VM_ALLOC_SBUSY) != 0) 2281 busy_lock = VPB_SHARERS_WORD(1); 2282 else 2283 busy_lock = VPB_UNBUSIED; 2284 if ((req & VM_ALLOC_WIRED) != 0) 2285 vm_wire_add(npages); 2286 if (object->memattr != VM_MEMATTR_DEFAULT && 2287 memattr == VM_MEMATTR_DEFAULT) 2288 memattr = object->memattr; 2289 for (m = m_ret; m < &m_ret[npages]; m++) { 2290 m->a.flags = 0; 2291 m->flags = (m->flags | PG_NODUMP) & flags; 2292 m->busy_lock = busy_lock; 2293 if ((req & VM_ALLOC_WIRED) != 0) 2294 m->ref_count = 1; 2295 m->a.act_count = 0; 2296 m->oflags = oflags; 2297 if (vm_page_insert_after(m, object, pindex, mpred)) { 2298 if ((req & VM_ALLOC_WIRED) != 0) 2299 vm_wire_sub(npages); 2300 KASSERT(m->object == NULL, 2301 ("page %p has object", m)); 2302 mpred = m; 2303 for (m = m_ret; m < &m_ret[npages]; m++) { 2304 if (m <= mpred && 2305 (req & VM_ALLOC_WIRED) != 0) 2306 m->ref_count = 0; 2307 m->oflags = VPO_UNMANAGED; 2308 m->busy_lock = VPB_UNBUSIED; 2309 /* Don't change PG_ZERO. */ 2310 vm_page_free_toq(m); 2311 } 2312 if (req & VM_ALLOC_WAITFAIL) { 2313 VM_OBJECT_WUNLOCK(object); 2314 vm_radix_wait(); 2315 VM_OBJECT_WLOCK(object); 2316 } 2317 return (NULL); 2318 } 2319 mpred = m; 2320 if (memattr != VM_MEMATTR_DEFAULT) 2321 pmap_page_set_memattr(m, memattr); 2322 pindex++; 2323 } 2324 return (m_ret); 2325 } 2326 2327 /* 2328 * Allocate a physical page that is not intended to be inserted into a VM 2329 * object. If the "freelist" parameter is not equal to VM_NFREELIST, then only 2330 * pages from the specified vm_phys freelist will be returned. 2331 */ 2332 static __always_inline vm_page_t 2333 _vm_page_alloc_noobj_domain(int domain, const int freelist, int req) 2334 { 2335 struct vm_domain *vmd; 2336 vm_page_t m; 2337 int flags; 2338 2339 #define VPAN_FLAGS (VM_ALLOC_CLASS_MASK | VM_ALLOC_WAITFAIL | \ 2340 VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | \ 2341 VM_ALLOC_NOBUSY | VM_ALLOC_WIRED | \ 2342 VM_ALLOC_NODUMP | VM_ALLOC_ZERO | VM_ALLOC_COUNT_MASK) 2343 KASSERT((req & ~VPAN_FLAGS) == 0, 2344 ("invalid request %#x", req)); 2345 2346 flags = (req & VM_ALLOC_NODUMP) != 0 ? PG_NODUMP : 0; 2347 vmd = VM_DOMAIN(domain); 2348 again: 2349 if (freelist == VM_NFREELIST && 2350 vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone != NULL) { 2351 m = uma_zalloc(vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone, 2352 M_NOWAIT | M_NOVM); 2353 if (m != NULL) { 2354 flags |= PG_PCPU_CACHE; 2355 goto found; 2356 } 2357 } 2358 2359 if (vm_domain_allocate(vmd, req, 1)) { 2360 vm_domain_free_lock(vmd); 2361 if (freelist == VM_NFREELIST) 2362 m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DIRECT, 0); 2363 else 2364 m = vm_phys_alloc_freelist_pages(domain, freelist, 2365 VM_FREEPOOL_DIRECT, 0); 2366 vm_domain_free_unlock(vmd); 2367 if (m == NULL) { 2368 vm_domain_freecnt_inc(vmd, 1); 2369 #if VM_NRESERVLEVEL > 0 2370 if (freelist == VM_NFREELIST && 2371 vm_reserv_reclaim_inactive(domain)) 2372 goto again; 2373 #endif 2374 } 2375 } 2376 if (m == NULL) { 2377 if (vm_domain_alloc_fail(vmd, NULL, req)) 2378 goto again; 2379 return (NULL); 2380 } 2381 2382 found: 2383 vm_page_dequeue(m); 2384 vm_page_alloc_check(m); 2385 2386 /* 2387 * Consumers should not rely on a useful default pindex value. 2388 */ 2389 m->pindex = 0xdeadc0dedeadc0de; 2390 m->flags = (m->flags & PG_ZERO) | flags; 2391 m->a.flags = 0; 2392 m->oflags = VPO_UNMANAGED; 2393 m->busy_lock = VPB_UNBUSIED; 2394 if ((req & VM_ALLOC_WIRED) != 0) { 2395 vm_wire_add(1); 2396 m->ref_count = 1; 2397 } 2398 2399 if ((req & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0) 2400 pmap_zero_page(m); 2401 2402 return (m); 2403 } 2404 2405 vm_page_t 2406 vm_page_alloc_freelist(int freelist, int req) 2407 { 2408 struct vm_domainset_iter di; 2409 vm_page_t m; 2410 int domain; 2411 2412 vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req); 2413 do { 2414 m = vm_page_alloc_freelist_domain(domain, freelist, req); 2415 if (m != NULL) 2416 break; 2417 } while (vm_domainset_iter_page(&di, NULL, &domain) == 0); 2418 2419 return (m); 2420 } 2421 2422 vm_page_t 2423 vm_page_alloc_freelist_domain(int domain, int freelist, int req) 2424 { 2425 KASSERT(freelist >= 0 && freelist < VM_NFREELIST, 2426 ("%s: invalid freelist %d", __func__, freelist)); 2427 2428 return (_vm_page_alloc_noobj_domain(domain, freelist, req)); 2429 } 2430 2431 vm_page_t 2432 vm_page_alloc_noobj(int req) 2433 { 2434 struct vm_domainset_iter di; 2435 vm_page_t m; 2436 int domain; 2437 2438 vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req); 2439 do { 2440 m = vm_page_alloc_noobj_domain(domain, req); 2441 if (m != NULL) 2442 break; 2443 } while (vm_domainset_iter_page(&di, NULL, &domain) == 0); 2444 2445 return (m); 2446 } 2447 2448 vm_page_t 2449 vm_page_alloc_noobj_domain(int domain, int req) 2450 { 2451 return (_vm_page_alloc_noobj_domain(domain, VM_NFREELIST, req)); 2452 } 2453 2454 vm_page_t 2455 vm_page_alloc_noobj_contig(int req, u_long npages, vm_paddr_t low, 2456 vm_paddr_t high, u_long alignment, vm_paddr_t boundary, 2457 vm_memattr_t memattr) 2458 { 2459 struct vm_domainset_iter di; 2460 vm_page_t m; 2461 int domain; 2462 2463 vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req); 2464 do { 2465 m = vm_page_alloc_noobj_contig_domain(domain, req, npages, low, 2466 high, alignment, boundary, memattr); 2467 if (m != NULL) 2468 break; 2469 } while (vm_domainset_iter_page(&di, NULL, &domain) == 0); 2470 2471 return (m); 2472 } 2473 2474 vm_page_t 2475 vm_page_alloc_noobj_contig_domain(int domain, int req, u_long npages, 2476 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, 2477 vm_memattr_t memattr) 2478 { 2479 vm_page_t m, m_ret; 2480 u_int flags; 2481 2482 #define VPANC_FLAGS (VPAN_FLAGS | VM_ALLOC_NORECLAIM) 2483 KASSERT((req & ~VPANC_FLAGS) == 0, 2484 ("invalid request %#x", req)); 2485 KASSERT((req & (VM_ALLOC_WAITOK | VM_ALLOC_NORECLAIM)) != 2486 (VM_ALLOC_WAITOK | VM_ALLOC_NORECLAIM), 2487 ("invalid request %#x", req)); 2488 KASSERT(((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != 2489 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), 2490 ("invalid request %#x", req)); 2491 KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero")); 2492 2493 while ((m_ret = vm_page_find_contig_domain(domain, req, npages, 2494 low, high, alignment, boundary)) == NULL) { 2495 if (!vm_domain_alloc_fail(VM_DOMAIN(domain), NULL, req)) 2496 return (NULL); 2497 } 2498 2499 /* 2500 * Initialize the pages. Only the PG_ZERO flag is inherited. 2501 */ 2502 flags = PG_ZERO; 2503 if ((req & VM_ALLOC_NODUMP) != 0) 2504 flags |= PG_NODUMP; 2505 if ((req & VM_ALLOC_WIRED) != 0) 2506 vm_wire_add(npages); 2507 for (m = m_ret; m < &m_ret[npages]; m++) { 2508 vm_page_dequeue(m); 2509 vm_page_alloc_check(m); 2510 2511 /* 2512 * Consumers should not rely on a useful default pindex value. 2513 */ 2514 m->pindex = 0xdeadc0dedeadc0de; 2515 m->a.flags = 0; 2516 m->flags = (m->flags | PG_NODUMP) & flags; 2517 m->busy_lock = VPB_UNBUSIED; 2518 if ((req & VM_ALLOC_WIRED) != 0) 2519 m->ref_count = 1; 2520 m->a.act_count = 0; 2521 m->oflags = VPO_UNMANAGED; 2522 2523 /* 2524 * Zero the page before updating any mappings since the page is 2525 * not yet shared with any devices which might require the 2526 * non-default memory attribute. pmap_page_set_memattr() 2527 * flushes data caches before returning. 2528 */ 2529 if ((req & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0) 2530 pmap_zero_page(m); 2531 if (memattr != VM_MEMATTR_DEFAULT) 2532 pmap_page_set_memattr(m, memattr); 2533 } 2534 return (m_ret); 2535 } 2536 2537 /* 2538 * Check a page that has been freshly dequeued from a freelist. 2539 */ 2540 static void 2541 vm_page_alloc_check(vm_page_t m) 2542 { 2543 2544 KASSERT(m->object == NULL, ("page %p has object", m)); 2545 KASSERT(m->a.queue == PQ_NONE && 2546 (m->a.flags & PGA_QUEUE_STATE_MASK) == 0, 2547 ("page %p has unexpected queue %d, flags %#x", 2548 m, m->a.queue, (m->a.flags & PGA_QUEUE_STATE_MASK))); 2549 KASSERT(m->ref_count == 0, ("page %p has references", m)); 2550 KASSERT(vm_page_busy_freed(m), ("page %p is not freed", m)); 2551 KASSERT(m->dirty == 0, ("page %p is dirty", m)); 2552 KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, 2553 ("page %p has unexpected memattr %d", 2554 m, pmap_page_get_memattr(m))); 2555 KASSERT(vm_page_none_valid(m), ("free page %p is valid", m)); 2556 pmap_vm_page_alloc_check(m); 2557 } 2558 2559 static int 2560 vm_page_zone_import(void *arg, void **store, int cnt, int domain, int flags) 2561 { 2562 struct vm_domain *vmd; 2563 struct vm_pgcache *pgcache; 2564 int i; 2565 2566 pgcache = arg; 2567 vmd = VM_DOMAIN(pgcache->domain); 2568 2569 /* 2570 * The page daemon should avoid creating extra memory pressure since its 2571 * main purpose is to replenish the store of free pages. 2572 */ 2573 if (vmd->vmd_severeset || curproc == pageproc || 2574 !_vm_domain_allocate(vmd, VM_ALLOC_NORMAL, cnt)) 2575 return (0); 2576 domain = vmd->vmd_domain; 2577 vm_domain_free_lock(vmd); 2578 i = vm_phys_alloc_npages(domain, pgcache->pool, cnt, 2579 (vm_page_t *)store); 2580 vm_domain_free_unlock(vmd); 2581 if (cnt != i) 2582 vm_domain_freecnt_inc(vmd, cnt - i); 2583 2584 return (i); 2585 } 2586 2587 static void 2588 vm_page_zone_release(void *arg, void **store, int cnt) 2589 { 2590 struct vm_domain *vmd; 2591 struct vm_pgcache *pgcache; 2592 vm_page_t m; 2593 int i; 2594 2595 pgcache = arg; 2596 vmd = VM_DOMAIN(pgcache->domain); 2597 vm_domain_free_lock(vmd); 2598 for (i = 0; i < cnt; i++) { 2599 m = (vm_page_t)store[i]; 2600 vm_phys_free_pages(m, 0); 2601 } 2602 vm_domain_free_unlock(vmd); 2603 vm_domain_freecnt_inc(vmd, cnt); 2604 } 2605 2606 #define VPSC_ANY 0 /* No restrictions. */ 2607 #define VPSC_NORESERV 1 /* Skip reservations; implies VPSC_NOSUPER. */ 2608 #define VPSC_NOSUPER 2 /* Skip superpages. */ 2609 2610 /* 2611 * vm_page_scan_contig: 2612 * 2613 * Scan vm_page_array[] between the specified entries "m_start" and 2614 * "m_end" for a run of contiguous physical pages that satisfy the 2615 * specified conditions, and return the lowest page in the run. The 2616 * specified "alignment" determines the alignment of the lowest physical 2617 * page in the run. If the specified "boundary" is non-zero, then the 2618 * run of physical pages cannot span a physical address that is a 2619 * multiple of "boundary". 2620 * 2621 * "m_end" is never dereferenced, so it need not point to a vm_page 2622 * structure within vm_page_array[]. 2623 * 2624 * "npages" must be greater than zero. "m_start" and "m_end" must not 2625 * span a hole (or discontiguity) in the physical address space. Both 2626 * "alignment" and "boundary" must be a power of two. 2627 */ 2628 static vm_page_t 2629 vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end, 2630 u_long alignment, vm_paddr_t boundary, int options) 2631 { 2632 vm_object_t object; 2633 vm_paddr_t pa; 2634 vm_page_t m, m_run; 2635 #if VM_NRESERVLEVEL > 0 2636 int level; 2637 #endif 2638 int m_inc, order, run_ext, run_len; 2639 2640 KASSERT(npages > 0, ("npages is 0")); 2641 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 2642 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 2643 m_run = NULL; 2644 run_len = 0; 2645 for (m = m_start; m < m_end && run_len < npages; m += m_inc) { 2646 KASSERT((m->flags & PG_MARKER) == 0, 2647 ("page %p is PG_MARKER", m)); 2648 KASSERT((m->flags & PG_FICTITIOUS) == 0 || m->ref_count >= 1, 2649 ("fictitious page %p has invalid ref count", m)); 2650 2651 /* 2652 * If the current page would be the start of a run, check its 2653 * physical address against the end, alignment, and boundary 2654 * conditions. If it doesn't satisfy these conditions, either 2655 * terminate the scan or advance to the next page that 2656 * satisfies the failed condition. 2657 */ 2658 if (run_len == 0) { 2659 KASSERT(m_run == NULL, ("m_run != NULL")); 2660 if (m + npages > m_end) 2661 break; 2662 pa = VM_PAGE_TO_PHYS(m); 2663 if (!vm_addr_align_ok(pa, alignment)) { 2664 m_inc = atop(roundup2(pa, alignment) - pa); 2665 continue; 2666 } 2667 if (!vm_addr_bound_ok(pa, ptoa(npages), boundary)) { 2668 m_inc = atop(roundup2(pa, boundary) - pa); 2669 continue; 2670 } 2671 } else 2672 KASSERT(m_run != NULL, ("m_run == NULL")); 2673 2674 retry: 2675 m_inc = 1; 2676 if (vm_page_wired(m)) 2677 run_ext = 0; 2678 #if VM_NRESERVLEVEL > 0 2679 else if ((level = vm_reserv_level(m)) >= 0 && 2680 (options & VPSC_NORESERV) != 0) { 2681 run_ext = 0; 2682 /* Advance to the end of the reservation. */ 2683 pa = VM_PAGE_TO_PHYS(m); 2684 m_inc = atop(roundup2(pa + 1, vm_reserv_size(level)) - 2685 pa); 2686 } 2687 #endif 2688 else if ((object = atomic_load_ptr(&m->object)) != NULL) { 2689 /* 2690 * The page is considered eligible for relocation if 2691 * and only if it could be laundered or reclaimed by 2692 * the page daemon. 2693 */ 2694 VM_OBJECT_RLOCK(object); 2695 if (object != m->object) { 2696 VM_OBJECT_RUNLOCK(object); 2697 goto retry; 2698 } 2699 /* Don't care: PG_NODUMP, PG_ZERO. */ 2700 if ((object->flags & OBJ_SWAP) == 0 && 2701 object->type != OBJT_VNODE) { 2702 run_ext = 0; 2703 #if VM_NRESERVLEVEL > 0 2704 } else if ((options & VPSC_NOSUPER) != 0 && 2705 (level = vm_reserv_level_iffullpop(m)) >= 0) { 2706 run_ext = 0; 2707 /* Advance to the end of the superpage. */ 2708 pa = VM_PAGE_TO_PHYS(m); 2709 m_inc = atop(roundup2(pa + 1, 2710 vm_reserv_size(level)) - pa); 2711 #endif 2712 } else if (object->memattr == VM_MEMATTR_DEFAULT && 2713 vm_page_queue(m) != PQ_NONE && !vm_page_busied(m)) { 2714 /* 2715 * The page is allocated but eligible for 2716 * relocation. Extend the current run by one 2717 * page. 2718 */ 2719 KASSERT(pmap_page_get_memattr(m) == 2720 VM_MEMATTR_DEFAULT, 2721 ("page %p has an unexpected memattr", m)); 2722 KASSERT((m->oflags & (VPO_SWAPINPROG | 2723 VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0, 2724 ("page %p has unexpected oflags", m)); 2725 /* Don't care: PGA_NOSYNC. */ 2726 run_ext = 1; 2727 } else 2728 run_ext = 0; 2729 VM_OBJECT_RUNLOCK(object); 2730 #if VM_NRESERVLEVEL > 0 2731 } else if (level >= 0) { 2732 /* 2733 * The page is reserved but not yet allocated. In 2734 * other words, it is still free. Extend the current 2735 * run by one page. 2736 */ 2737 run_ext = 1; 2738 #endif 2739 } else if ((order = m->order) < VM_NFREEORDER) { 2740 /* 2741 * The page is enqueued in the physical memory 2742 * allocator's free page queues. Moreover, it is the 2743 * first page in a power-of-two-sized run of 2744 * contiguous free pages. Add these pages to the end 2745 * of the current run, and jump ahead. 2746 */ 2747 run_ext = 1 << order; 2748 m_inc = 1 << order; 2749 } else { 2750 /* 2751 * Skip the page for one of the following reasons: (1) 2752 * It is enqueued in the physical memory allocator's 2753 * free page queues. However, it is not the first 2754 * page in a run of contiguous free pages. (This case 2755 * rarely occurs because the scan is performed in 2756 * ascending order.) (2) It is not reserved, and it is 2757 * transitioning from free to allocated. (Conversely, 2758 * the transition from allocated to free for managed 2759 * pages is blocked by the page busy lock.) (3) It is 2760 * allocated but not contained by an object and not 2761 * wired, e.g., allocated by Xen's balloon driver. 2762 */ 2763 run_ext = 0; 2764 } 2765 2766 /* 2767 * Extend or reset the current run of pages. 2768 */ 2769 if (run_ext > 0) { 2770 if (run_len == 0) 2771 m_run = m; 2772 run_len += run_ext; 2773 } else { 2774 if (run_len > 0) { 2775 m_run = NULL; 2776 run_len = 0; 2777 } 2778 } 2779 } 2780 if (run_len >= npages) 2781 return (m_run); 2782 return (NULL); 2783 } 2784 2785 /* 2786 * vm_page_reclaim_run: 2787 * 2788 * Try to relocate each of the allocated virtual pages within the 2789 * specified run of physical pages to a new physical address. Free the 2790 * physical pages underlying the relocated virtual pages. A virtual page 2791 * is relocatable if and only if it could be laundered or reclaimed by 2792 * the page daemon. Whenever possible, a virtual page is relocated to a 2793 * physical address above "high". 2794 * 2795 * Returns 0 if every physical page within the run was already free or 2796 * just freed by a successful relocation. Otherwise, returns a non-zero 2797 * value indicating why the last attempt to relocate a virtual page was 2798 * unsuccessful. 2799 * 2800 * "req_class" must be an allocation class. 2801 */ 2802 static int 2803 vm_page_reclaim_run(int req_class, int domain, u_long npages, vm_page_t m_run, 2804 vm_paddr_t high) 2805 { 2806 struct vm_domain *vmd; 2807 struct spglist free; 2808 vm_object_t object; 2809 vm_paddr_t pa; 2810 vm_page_t m, m_end, m_new; 2811 int error, order, req; 2812 2813 KASSERT((req_class & VM_ALLOC_CLASS_MASK) == req_class, 2814 ("req_class is not an allocation class")); 2815 SLIST_INIT(&free); 2816 error = 0; 2817 m = m_run; 2818 m_end = m_run + npages; 2819 for (; error == 0 && m < m_end; m++) { 2820 KASSERT((m->flags & (PG_FICTITIOUS | PG_MARKER)) == 0, 2821 ("page %p is PG_FICTITIOUS or PG_MARKER", m)); 2822 2823 /* 2824 * Racily check for wirings. Races are handled once the object 2825 * lock is held and the page is unmapped. 2826 */ 2827 if (vm_page_wired(m)) 2828 error = EBUSY; 2829 else if ((object = atomic_load_ptr(&m->object)) != NULL) { 2830 /* 2831 * The page is relocated if and only if it could be 2832 * laundered or reclaimed by the page daemon. 2833 */ 2834 VM_OBJECT_WLOCK(object); 2835 /* Don't care: PG_NODUMP, PG_ZERO. */ 2836 if (m->object != object || 2837 ((object->flags & OBJ_SWAP) == 0 && 2838 object->type != OBJT_VNODE)) 2839 error = EINVAL; 2840 else if (object->memattr != VM_MEMATTR_DEFAULT) 2841 error = EINVAL; 2842 else if (vm_page_queue(m) != PQ_NONE && 2843 vm_page_tryxbusy(m) != 0) { 2844 if (vm_page_wired(m)) { 2845 vm_page_xunbusy(m); 2846 error = EBUSY; 2847 goto unlock; 2848 } 2849 KASSERT(pmap_page_get_memattr(m) == 2850 VM_MEMATTR_DEFAULT, 2851 ("page %p has an unexpected memattr", m)); 2852 KASSERT(m->oflags == 0, 2853 ("page %p has unexpected oflags", m)); 2854 /* Don't care: PGA_NOSYNC. */ 2855 if (!vm_page_none_valid(m)) { 2856 /* 2857 * First, try to allocate a new page 2858 * that is above "high". Failing 2859 * that, try to allocate a new page 2860 * that is below "m_run". Allocate 2861 * the new page between the end of 2862 * "m_run" and "high" only as a last 2863 * resort. 2864 */ 2865 req = req_class; 2866 if ((m->flags & PG_NODUMP) != 0) 2867 req |= VM_ALLOC_NODUMP; 2868 if (trunc_page(high) != 2869 ~(vm_paddr_t)PAGE_MASK) { 2870 m_new = 2871 vm_page_alloc_noobj_contig( 2872 req, 1, round_page(high), 2873 ~(vm_paddr_t)0, PAGE_SIZE, 2874 0, VM_MEMATTR_DEFAULT); 2875 } else 2876 m_new = NULL; 2877 if (m_new == NULL) { 2878 pa = VM_PAGE_TO_PHYS(m_run); 2879 m_new = 2880 vm_page_alloc_noobj_contig( 2881 req, 1, 0, pa - 1, 2882 PAGE_SIZE, 0, 2883 VM_MEMATTR_DEFAULT); 2884 } 2885 if (m_new == NULL) { 2886 pa += ptoa(npages); 2887 m_new = 2888 vm_page_alloc_noobj_contig( 2889 req, 1, pa, high, PAGE_SIZE, 2890 0, VM_MEMATTR_DEFAULT); 2891 } 2892 if (m_new == NULL) { 2893 vm_page_xunbusy(m); 2894 error = ENOMEM; 2895 goto unlock; 2896 } 2897 2898 /* 2899 * Unmap the page and check for new 2900 * wirings that may have been acquired 2901 * through a pmap lookup. 2902 */ 2903 if (object->ref_count != 0 && 2904 !vm_page_try_remove_all(m)) { 2905 vm_page_xunbusy(m); 2906 vm_page_free(m_new); 2907 error = EBUSY; 2908 goto unlock; 2909 } 2910 2911 /* 2912 * Replace "m" with the new page. For 2913 * vm_page_replace(), "m" must be busy 2914 * and dequeued. Finally, change "m" 2915 * as if vm_page_free() was called. 2916 */ 2917 m_new->a.flags = m->a.flags & 2918 ~PGA_QUEUE_STATE_MASK; 2919 KASSERT(m_new->oflags == VPO_UNMANAGED, 2920 ("page %p is managed", m_new)); 2921 m_new->oflags = 0; 2922 pmap_copy_page(m, m_new); 2923 m_new->valid = m->valid; 2924 m_new->dirty = m->dirty; 2925 m->flags &= ~PG_ZERO; 2926 vm_page_dequeue(m); 2927 if (vm_page_replace_hold(m_new, object, 2928 m->pindex, m) && 2929 vm_page_free_prep(m)) 2930 SLIST_INSERT_HEAD(&free, m, 2931 plinks.s.ss); 2932 2933 /* 2934 * The new page must be deactivated 2935 * before the object is unlocked. 2936 */ 2937 vm_page_deactivate(m_new); 2938 } else { 2939 m->flags &= ~PG_ZERO; 2940 vm_page_dequeue(m); 2941 if (vm_page_free_prep(m)) 2942 SLIST_INSERT_HEAD(&free, m, 2943 plinks.s.ss); 2944 KASSERT(m->dirty == 0, 2945 ("page %p is dirty", m)); 2946 } 2947 } else 2948 error = EBUSY; 2949 unlock: 2950 VM_OBJECT_WUNLOCK(object); 2951 } else { 2952 MPASS(vm_page_domain(m) == domain); 2953 vmd = VM_DOMAIN(domain); 2954 vm_domain_free_lock(vmd); 2955 order = m->order; 2956 if (order < VM_NFREEORDER) { 2957 /* 2958 * The page is enqueued in the physical memory 2959 * allocator's free page queues. Moreover, it 2960 * is the first page in a power-of-two-sized 2961 * run of contiguous free pages. Jump ahead 2962 * to the last page within that run, and 2963 * continue from there. 2964 */ 2965 m += (1 << order) - 1; 2966 } 2967 #if VM_NRESERVLEVEL > 0 2968 else if (vm_reserv_is_page_free(m)) 2969 order = 0; 2970 #endif 2971 vm_domain_free_unlock(vmd); 2972 if (order == VM_NFREEORDER) 2973 error = EINVAL; 2974 } 2975 } 2976 if ((m = SLIST_FIRST(&free)) != NULL) { 2977 int cnt; 2978 2979 vmd = VM_DOMAIN(domain); 2980 cnt = 0; 2981 vm_domain_free_lock(vmd); 2982 do { 2983 MPASS(vm_page_domain(m) == domain); 2984 SLIST_REMOVE_HEAD(&free, plinks.s.ss); 2985 vm_phys_free_pages(m, 0); 2986 cnt++; 2987 } while ((m = SLIST_FIRST(&free)) != NULL); 2988 vm_domain_free_unlock(vmd); 2989 vm_domain_freecnt_inc(vmd, cnt); 2990 } 2991 return (error); 2992 } 2993 2994 #define NRUNS 16 2995 2996 #define RUN_INDEX(count, nruns) ((count) % (nruns)) 2997 2998 #define MIN_RECLAIM 8 2999 3000 /* 3001 * vm_page_reclaim_contig: 3002 * 3003 * Reclaim allocated, contiguous physical memory satisfying the specified 3004 * conditions by relocating the virtual pages using that physical memory. 3005 * Returns true if reclamation is successful and false otherwise. Since 3006 * relocation requires the allocation of physical pages, reclamation may 3007 * fail due to a shortage of free pages. When reclamation fails, callers 3008 * are expected to perform vm_wait() before retrying a failed allocation 3009 * operation, e.g., vm_page_alloc_contig(). 3010 * 3011 * The caller must always specify an allocation class through "req". 3012 * 3013 * allocation classes: 3014 * VM_ALLOC_NORMAL normal process request 3015 * VM_ALLOC_SYSTEM system *really* needs a page 3016 * VM_ALLOC_INTERRUPT interrupt time request 3017 * 3018 * The optional allocation flags are ignored. 3019 * 3020 * "npages" must be greater than zero. Both "alignment" and "boundary" 3021 * must be a power of two. 3022 */ 3023 bool 3024 vm_page_reclaim_contig_domain_ext(int domain, int req, u_long npages, 3025 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, 3026 int desired_runs) 3027 { 3028 struct vm_domain *vmd; 3029 vm_page_t bounds[2], m_run, _m_runs[NRUNS], *m_runs; 3030 u_long count, minalign, reclaimed; 3031 int error, i, min_reclaim, nruns, options, req_class, segind; 3032 bool ret; 3033 3034 KASSERT(npages > 0, ("npages is 0")); 3035 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 3036 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 3037 3038 ret = false; 3039 3040 /* 3041 * If the caller wants to reclaim multiple runs, try to allocate 3042 * space to store the runs. If that fails, fall back to the old 3043 * behavior of just reclaiming MIN_RECLAIM pages. 3044 */ 3045 if (desired_runs > 1) 3046 m_runs = malloc((NRUNS + desired_runs) * sizeof(*m_runs), 3047 M_TEMP, M_NOWAIT); 3048 else 3049 m_runs = NULL; 3050 3051 if (m_runs == NULL) { 3052 m_runs = _m_runs; 3053 nruns = NRUNS; 3054 } else { 3055 nruns = NRUNS + desired_runs - 1; 3056 } 3057 min_reclaim = MAX(desired_runs * npages, MIN_RECLAIM); 3058 3059 /* 3060 * The caller will attempt an allocation after some runs have been 3061 * reclaimed and added to the vm_phys buddy lists. Due to limitations 3062 * of vm_phys_alloc_contig(), round up the requested length to the next 3063 * power of two or maximum chunk size, and ensure that each run is 3064 * suitably aligned. 3065 */ 3066 minalign = 1ul << imin(flsl(npages - 1), VM_NFREEORDER - 1); 3067 npages = roundup2(npages, minalign); 3068 if (alignment < ptoa(minalign)) 3069 alignment = ptoa(minalign); 3070 3071 /* 3072 * The page daemon is allowed to dig deeper into the free page list. 3073 */ 3074 req_class = req & VM_ALLOC_CLASS_MASK; 3075 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) 3076 req_class = VM_ALLOC_SYSTEM; 3077 3078 /* 3079 * Return if the number of free pages cannot satisfy the requested 3080 * allocation. 3081 */ 3082 vmd = VM_DOMAIN(domain); 3083 count = vmd->vmd_free_count; 3084 if (count < npages + vmd->vmd_free_reserved || (count < npages + 3085 vmd->vmd_interrupt_free_min && req_class == VM_ALLOC_SYSTEM) || 3086 (count < npages && req_class == VM_ALLOC_INTERRUPT)) 3087 goto done; 3088 3089 /* 3090 * Scan up to three times, relaxing the restrictions ("options") on 3091 * the reclamation of reservations and superpages each time. 3092 */ 3093 for (options = VPSC_NORESERV;;) { 3094 /* 3095 * Find the highest runs that satisfy the given constraints 3096 * and restrictions, and record them in "m_runs". 3097 */ 3098 count = 0; 3099 segind = vm_phys_lookup_segind(low); 3100 while ((segind = vm_phys_find_range(bounds, segind, domain, 3101 npages, low, high)) != -1) { 3102 while ((m_run = vm_page_scan_contig(npages, bounds[0], 3103 bounds[1], alignment, boundary, options))) { 3104 bounds[0] = m_run + npages; 3105 m_runs[RUN_INDEX(count, nruns)] = m_run; 3106 count++; 3107 } 3108 segind++; 3109 } 3110 3111 /* 3112 * Reclaim the highest runs in LIFO (descending) order until 3113 * the number of reclaimed pages, "reclaimed", is at least 3114 * "min_reclaim". Reset "reclaimed" each time because each 3115 * reclamation is idempotent, and runs will (likely) recur 3116 * from one scan to the next as restrictions are relaxed. 3117 */ 3118 reclaimed = 0; 3119 for (i = 0; count > 0 && i < nruns; i++) { 3120 count--; 3121 m_run = m_runs[RUN_INDEX(count, nruns)]; 3122 error = vm_page_reclaim_run(req_class, domain, npages, 3123 m_run, high); 3124 if (error == 0) { 3125 reclaimed += npages; 3126 if (reclaimed >= min_reclaim) { 3127 ret = true; 3128 goto done; 3129 } 3130 } 3131 } 3132 3133 /* 3134 * Either relax the restrictions on the next scan or return if 3135 * the last scan had no restrictions. 3136 */ 3137 if (options == VPSC_NORESERV) 3138 options = VPSC_NOSUPER; 3139 else if (options == VPSC_NOSUPER) 3140 options = VPSC_ANY; 3141 else if (options == VPSC_ANY) { 3142 ret = reclaimed != 0; 3143 goto done; 3144 } 3145 } 3146 done: 3147 if (m_runs != _m_runs) 3148 free(m_runs, M_TEMP); 3149 return (ret); 3150 } 3151 3152 bool 3153 vm_page_reclaim_contig_domain(int domain, int req, u_long npages, 3154 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary) 3155 { 3156 return (vm_page_reclaim_contig_domain_ext(domain, req, npages, low, high, 3157 alignment, boundary, 1)); 3158 } 3159 3160 bool 3161 vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high, 3162 u_long alignment, vm_paddr_t boundary) 3163 { 3164 struct vm_domainset_iter di; 3165 int domain; 3166 bool ret; 3167 3168 vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req); 3169 do { 3170 ret = vm_page_reclaim_contig_domain(domain, req, npages, low, 3171 high, alignment, boundary); 3172 if (ret) 3173 break; 3174 } while (vm_domainset_iter_page(&di, NULL, &domain) == 0); 3175 3176 return (ret); 3177 } 3178 3179 /* 3180 * Set the domain in the appropriate page level domainset. 3181 */ 3182 void 3183 vm_domain_set(struct vm_domain *vmd) 3184 { 3185 3186 mtx_lock(&vm_domainset_lock); 3187 if (!vmd->vmd_minset && vm_paging_min(vmd)) { 3188 vmd->vmd_minset = 1; 3189 DOMAINSET_SET(vmd->vmd_domain, &vm_min_domains); 3190 } 3191 if (!vmd->vmd_severeset && vm_paging_severe(vmd)) { 3192 vmd->vmd_severeset = 1; 3193 DOMAINSET_SET(vmd->vmd_domain, &vm_severe_domains); 3194 } 3195 mtx_unlock(&vm_domainset_lock); 3196 } 3197 3198 /* 3199 * Clear the domain from the appropriate page level domainset. 3200 */ 3201 void 3202 vm_domain_clear(struct vm_domain *vmd) 3203 { 3204 3205 mtx_lock(&vm_domainset_lock); 3206 if (vmd->vmd_minset && !vm_paging_min(vmd)) { 3207 vmd->vmd_minset = 0; 3208 DOMAINSET_CLR(vmd->vmd_domain, &vm_min_domains); 3209 if (vm_min_waiters != 0) { 3210 vm_min_waiters = 0; 3211 wakeup(&vm_min_domains); 3212 } 3213 } 3214 if (vmd->vmd_severeset && !vm_paging_severe(vmd)) { 3215 vmd->vmd_severeset = 0; 3216 DOMAINSET_CLR(vmd->vmd_domain, &vm_severe_domains); 3217 if (vm_severe_waiters != 0) { 3218 vm_severe_waiters = 0; 3219 wakeup(&vm_severe_domains); 3220 } 3221 } 3222 3223 /* 3224 * If pageout daemon needs pages, then tell it that there are 3225 * some free. 3226 */ 3227 if (vmd->vmd_pageout_pages_needed && 3228 vmd->vmd_free_count >= vmd->vmd_pageout_free_min) { 3229 wakeup(&vmd->vmd_pageout_pages_needed); 3230 vmd->vmd_pageout_pages_needed = 0; 3231 } 3232 3233 /* See comments in vm_wait_doms(). */ 3234 if (vm_pageproc_waiters) { 3235 vm_pageproc_waiters = 0; 3236 wakeup(&vm_pageproc_waiters); 3237 } 3238 mtx_unlock(&vm_domainset_lock); 3239 } 3240 3241 /* 3242 * Wait for free pages to exceed the min threshold globally. 3243 */ 3244 void 3245 vm_wait_min(void) 3246 { 3247 3248 mtx_lock(&vm_domainset_lock); 3249 while (vm_page_count_min()) { 3250 vm_min_waiters++; 3251 msleep(&vm_min_domains, &vm_domainset_lock, PVM, "vmwait", 0); 3252 } 3253 mtx_unlock(&vm_domainset_lock); 3254 } 3255 3256 /* 3257 * Wait for free pages to exceed the severe threshold globally. 3258 */ 3259 void 3260 vm_wait_severe(void) 3261 { 3262 3263 mtx_lock(&vm_domainset_lock); 3264 while (vm_page_count_severe()) { 3265 vm_severe_waiters++; 3266 msleep(&vm_severe_domains, &vm_domainset_lock, PVM, 3267 "vmwait", 0); 3268 } 3269 mtx_unlock(&vm_domainset_lock); 3270 } 3271 3272 u_int 3273 vm_wait_count(void) 3274 { 3275 3276 return (vm_severe_waiters + vm_min_waiters + vm_pageproc_waiters); 3277 } 3278 3279 int 3280 vm_wait_doms(const domainset_t *wdoms, int mflags) 3281 { 3282 int error; 3283 3284 error = 0; 3285 3286 /* 3287 * We use racey wakeup synchronization to avoid expensive global 3288 * locking for the pageproc when sleeping with a non-specific vm_wait. 3289 * To handle this, we only sleep for one tick in this instance. It 3290 * is expected that most allocations for the pageproc will come from 3291 * kmem or vm_page_grab* which will use the more specific and 3292 * race-free vm_wait_domain(). 3293 */ 3294 if (curproc == pageproc) { 3295 mtx_lock(&vm_domainset_lock); 3296 vm_pageproc_waiters++; 3297 error = msleep(&vm_pageproc_waiters, &vm_domainset_lock, 3298 PVM | PDROP | mflags, "pageprocwait", 1); 3299 } else { 3300 /* 3301 * XXX Ideally we would wait only until the allocation could 3302 * be satisfied. This condition can cause new allocators to 3303 * consume all freed pages while old allocators wait. 3304 */ 3305 mtx_lock(&vm_domainset_lock); 3306 if (vm_page_count_min_set(wdoms)) { 3307 if (pageproc == NULL) 3308 panic("vm_wait in early boot"); 3309 vm_min_waiters++; 3310 error = msleep(&vm_min_domains, &vm_domainset_lock, 3311 PVM | PDROP | mflags, "vmwait", 0); 3312 } else 3313 mtx_unlock(&vm_domainset_lock); 3314 } 3315 return (error); 3316 } 3317 3318 /* 3319 * vm_wait_domain: 3320 * 3321 * Sleep until free pages are available for allocation. 3322 * - Called in various places after failed memory allocations. 3323 */ 3324 void 3325 vm_wait_domain(int domain) 3326 { 3327 struct vm_domain *vmd; 3328 domainset_t wdom; 3329 3330 vmd = VM_DOMAIN(domain); 3331 vm_domain_free_assert_unlocked(vmd); 3332 3333 if (curproc == pageproc) { 3334 mtx_lock(&vm_domainset_lock); 3335 if (vmd->vmd_free_count < vmd->vmd_pageout_free_min) { 3336 vmd->vmd_pageout_pages_needed = 1; 3337 msleep(&vmd->vmd_pageout_pages_needed, 3338 &vm_domainset_lock, PDROP | PSWP, "VMWait", 0); 3339 } else 3340 mtx_unlock(&vm_domainset_lock); 3341 } else { 3342 DOMAINSET_ZERO(&wdom); 3343 DOMAINSET_SET(vmd->vmd_domain, &wdom); 3344 vm_wait_doms(&wdom, 0); 3345 } 3346 } 3347 3348 static int 3349 vm_wait_flags(vm_object_t obj, int mflags) 3350 { 3351 struct domainset *d; 3352 3353 d = NULL; 3354 3355 /* 3356 * Carefully fetch pointers only once: the struct domainset 3357 * itself is ummutable but the pointer might change. 3358 */ 3359 if (obj != NULL) 3360 d = obj->domain.dr_policy; 3361 if (d == NULL) 3362 d = curthread->td_domain.dr_policy; 3363 3364 return (vm_wait_doms(&d->ds_mask, mflags)); 3365 } 3366 3367 /* 3368 * vm_wait: 3369 * 3370 * Sleep until free pages are available for allocation in the 3371 * affinity domains of the obj. If obj is NULL, the domain set 3372 * for the calling thread is used. 3373 * Called in various places after failed memory allocations. 3374 */ 3375 void 3376 vm_wait(vm_object_t obj) 3377 { 3378 (void)vm_wait_flags(obj, 0); 3379 } 3380 3381 int 3382 vm_wait_intr(vm_object_t obj) 3383 { 3384 return (vm_wait_flags(obj, PCATCH)); 3385 } 3386 3387 /* 3388 * vm_domain_alloc_fail: 3389 * 3390 * Called when a page allocation function fails. Informs the 3391 * pagedaemon and performs the requested wait. Requires the 3392 * domain_free and object lock on entry. Returns with the 3393 * object lock held and free lock released. Returns an error when 3394 * retry is necessary. 3395 * 3396 */ 3397 static int 3398 vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, int req) 3399 { 3400 3401 vm_domain_free_assert_unlocked(vmd); 3402 3403 atomic_add_int(&vmd->vmd_pageout_deficit, 3404 max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1)); 3405 if (req & (VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) { 3406 if (object != NULL) 3407 VM_OBJECT_WUNLOCK(object); 3408 vm_wait_domain(vmd->vmd_domain); 3409 if (object != NULL) 3410 VM_OBJECT_WLOCK(object); 3411 if (req & VM_ALLOC_WAITOK) 3412 return (EAGAIN); 3413 } 3414 3415 return (0); 3416 } 3417 3418 /* 3419 * vm_waitpfault: 3420 * 3421 * Sleep until free pages are available for allocation. 3422 * - Called only in vm_fault so that processes page faulting 3423 * can be easily tracked. 3424 * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing 3425 * processes will be able to grab memory first. Do not change 3426 * this balance without careful testing first. 3427 */ 3428 void 3429 vm_waitpfault(struct domainset *dset, int timo) 3430 { 3431 3432 /* 3433 * XXX Ideally we would wait only until the allocation could 3434 * be satisfied. This condition can cause new allocators to 3435 * consume all freed pages while old allocators wait. 3436 */ 3437 mtx_lock(&vm_domainset_lock); 3438 if (vm_page_count_min_set(&dset->ds_mask)) { 3439 vm_min_waiters++; 3440 msleep(&vm_min_domains, &vm_domainset_lock, PUSER | PDROP, 3441 "pfault", timo); 3442 } else 3443 mtx_unlock(&vm_domainset_lock); 3444 } 3445 3446 static struct vm_pagequeue * 3447 _vm_page_pagequeue(vm_page_t m, uint8_t queue) 3448 { 3449 3450 return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue]); 3451 } 3452 3453 #ifdef INVARIANTS 3454 static struct vm_pagequeue * 3455 vm_page_pagequeue(vm_page_t m) 3456 { 3457 3458 return (_vm_page_pagequeue(m, vm_page_astate_load(m).queue)); 3459 } 3460 #endif 3461 3462 static __always_inline bool 3463 vm_page_pqstate_fcmpset(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new) 3464 { 3465 vm_page_astate_t tmp; 3466 3467 tmp = *old; 3468 do { 3469 if (__predict_true(vm_page_astate_fcmpset(m, old, new))) 3470 return (true); 3471 counter_u64_add(pqstate_commit_retries, 1); 3472 } while (old->_bits == tmp._bits); 3473 3474 return (false); 3475 } 3476 3477 /* 3478 * Do the work of committing a queue state update that moves the page out of 3479 * its current queue. 3480 */ 3481 static bool 3482 _vm_page_pqstate_commit_dequeue(struct vm_pagequeue *pq, vm_page_t m, 3483 vm_page_astate_t *old, vm_page_astate_t new) 3484 { 3485 vm_page_t next; 3486 3487 vm_pagequeue_assert_locked(pq); 3488 KASSERT(vm_page_pagequeue(m) == pq, 3489 ("%s: queue %p does not match page %p", __func__, pq, m)); 3490 KASSERT(old->queue != PQ_NONE && new.queue != old->queue, 3491 ("%s: invalid queue indices %d %d", 3492 __func__, old->queue, new.queue)); 3493 3494 /* 3495 * Once the queue index of the page changes there is nothing 3496 * synchronizing with further updates to the page's physical 3497 * queue state. Therefore we must speculatively remove the page 3498 * from the queue now and be prepared to roll back if the queue 3499 * state update fails. If the page is not physically enqueued then 3500 * we just update its queue index. 3501 */ 3502 if ((old->flags & PGA_ENQUEUED) != 0) { 3503 new.flags &= ~PGA_ENQUEUED; 3504 next = TAILQ_NEXT(m, plinks.q); 3505 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 3506 vm_pagequeue_cnt_dec(pq); 3507 if (!vm_page_pqstate_fcmpset(m, old, new)) { 3508 if (next == NULL) 3509 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); 3510 else 3511 TAILQ_INSERT_BEFORE(next, m, plinks.q); 3512 vm_pagequeue_cnt_inc(pq); 3513 return (false); 3514 } else { 3515 return (true); 3516 } 3517 } else { 3518 return (vm_page_pqstate_fcmpset(m, old, new)); 3519 } 3520 } 3521 3522 static bool 3523 vm_page_pqstate_commit_dequeue(vm_page_t m, vm_page_astate_t *old, 3524 vm_page_astate_t new) 3525 { 3526 struct vm_pagequeue *pq; 3527 vm_page_astate_t as; 3528 bool ret; 3529 3530 pq = _vm_page_pagequeue(m, old->queue); 3531 3532 /* 3533 * The queue field and PGA_ENQUEUED flag are stable only so long as the 3534 * corresponding page queue lock is held. 3535 */ 3536 vm_pagequeue_lock(pq); 3537 as = vm_page_astate_load(m); 3538 if (__predict_false(as._bits != old->_bits)) { 3539 *old = as; 3540 ret = false; 3541 } else { 3542 ret = _vm_page_pqstate_commit_dequeue(pq, m, old, new); 3543 } 3544 vm_pagequeue_unlock(pq); 3545 return (ret); 3546 } 3547 3548 /* 3549 * Commit a queue state update that enqueues or requeues a page. 3550 */ 3551 static bool 3552 _vm_page_pqstate_commit_requeue(struct vm_pagequeue *pq, vm_page_t m, 3553 vm_page_astate_t *old, vm_page_astate_t new) 3554 { 3555 struct vm_domain *vmd; 3556 3557 vm_pagequeue_assert_locked(pq); 3558 KASSERT(old->queue != PQ_NONE && new.queue == old->queue, 3559 ("%s: invalid queue indices %d %d", 3560 __func__, old->queue, new.queue)); 3561 3562 new.flags |= PGA_ENQUEUED; 3563 if (!vm_page_pqstate_fcmpset(m, old, new)) 3564 return (false); 3565 3566 if ((old->flags & PGA_ENQUEUED) != 0) 3567 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 3568 else 3569 vm_pagequeue_cnt_inc(pq); 3570 3571 /* 3572 * Give PGA_REQUEUE_HEAD precedence over PGA_REQUEUE. In particular, if 3573 * both flags are set in close succession, only PGA_REQUEUE_HEAD will be 3574 * applied, even if it was set first. 3575 */ 3576 if ((old->flags & PGA_REQUEUE_HEAD) != 0) { 3577 vmd = vm_pagequeue_domain(m); 3578 KASSERT(pq == &vmd->vmd_pagequeues[PQ_INACTIVE], 3579 ("%s: invalid page queue for page %p", __func__, m)); 3580 TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q); 3581 } else { 3582 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); 3583 } 3584 return (true); 3585 } 3586 3587 /* 3588 * Commit a queue state update that encodes a request for a deferred queue 3589 * operation. 3590 */ 3591 static bool 3592 vm_page_pqstate_commit_request(vm_page_t m, vm_page_astate_t *old, 3593 vm_page_astate_t new) 3594 { 3595 3596 KASSERT(old->queue == new.queue || new.queue != PQ_NONE, 3597 ("%s: invalid state, queue %d flags %x", 3598 __func__, new.queue, new.flags)); 3599 3600 if (old->_bits != new._bits && 3601 !vm_page_pqstate_fcmpset(m, old, new)) 3602 return (false); 3603 vm_page_pqbatch_submit(m, new.queue); 3604 return (true); 3605 } 3606 3607 /* 3608 * A generic queue state update function. This handles more cases than the 3609 * specialized functions above. 3610 */ 3611 bool 3612 vm_page_pqstate_commit(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new) 3613 { 3614 3615 if (old->_bits == new._bits) 3616 return (true); 3617 3618 if (old->queue != PQ_NONE && new.queue != old->queue) { 3619 if (!vm_page_pqstate_commit_dequeue(m, old, new)) 3620 return (false); 3621 if (new.queue != PQ_NONE) 3622 vm_page_pqbatch_submit(m, new.queue); 3623 } else { 3624 if (!vm_page_pqstate_fcmpset(m, old, new)) 3625 return (false); 3626 if (new.queue != PQ_NONE && 3627 ((new.flags & ~old->flags) & PGA_QUEUE_OP_MASK) != 0) 3628 vm_page_pqbatch_submit(m, new.queue); 3629 } 3630 return (true); 3631 } 3632 3633 /* 3634 * Apply deferred queue state updates to a page. 3635 */ 3636 static inline void 3637 vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_page_t m, uint8_t queue) 3638 { 3639 vm_page_astate_t new, old; 3640 3641 CRITICAL_ASSERT(curthread); 3642 vm_pagequeue_assert_locked(pq); 3643 KASSERT(queue < PQ_COUNT, 3644 ("%s: invalid queue index %d", __func__, queue)); 3645 KASSERT(pq == _vm_page_pagequeue(m, queue), 3646 ("%s: page %p does not belong to queue %p", __func__, m, pq)); 3647 3648 for (old = vm_page_astate_load(m);;) { 3649 if (__predict_false(old.queue != queue || 3650 (old.flags & PGA_QUEUE_OP_MASK) == 0)) { 3651 counter_u64_add(queue_nops, 1); 3652 break; 3653 } 3654 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3655 ("%s: page %p is unmanaged", __func__, m)); 3656 3657 new = old; 3658 if ((old.flags & PGA_DEQUEUE) != 0) { 3659 new.flags &= ~PGA_QUEUE_OP_MASK; 3660 new.queue = PQ_NONE; 3661 if (__predict_true(_vm_page_pqstate_commit_dequeue(pq, 3662 m, &old, new))) { 3663 counter_u64_add(queue_ops, 1); 3664 break; 3665 } 3666 } else { 3667 new.flags &= ~(PGA_REQUEUE | PGA_REQUEUE_HEAD); 3668 if (__predict_true(_vm_page_pqstate_commit_requeue(pq, 3669 m, &old, new))) { 3670 counter_u64_add(queue_ops, 1); 3671 break; 3672 } 3673 } 3674 } 3675 } 3676 3677 static void 3678 vm_pqbatch_process(struct vm_pagequeue *pq, struct vm_batchqueue *bq, 3679 uint8_t queue) 3680 { 3681 int i; 3682 3683 for (i = 0; i < bq->bq_cnt; i++) 3684 vm_pqbatch_process_page(pq, bq->bq_pa[i], queue); 3685 vm_batchqueue_init(bq); 3686 } 3687 3688 /* 3689 * vm_page_pqbatch_submit: [ internal use only ] 3690 * 3691 * Enqueue a page in the specified page queue's batched work queue. 3692 * The caller must have encoded the requested operation in the page 3693 * structure's a.flags field. 3694 */ 3695 void 3696 vm_page_pqbatch_submit(vm_page_t m, uint8_t queue) 3697 { 3698 struct vm_batchqueue *bq; 3699 struct vm_pagequeue *pq; 3700 int domain, slots_remaining; 3701 3702 KASSERT(queue < PQ_COUNT, ("invalid queue %d", queue)); 3703 3704 domain = vm_page_domain(m); 3705 critical_enter(); 3706 bq = DPCPU_PTR(pqbatch[domain][queue]); 3707 slots_remaining = vm_batchqueue_insert(bq, m); 3708 if (slots_remaining > (VM_BATCHQUEUE_SIZE >> 1)) { 3709 /* keep building the bq */ 3710 critical_exit(); 3711 return; 3712 } else if (slots_remaining > 0 ) { 3713 /* Try to process the bq if we can get the lock */ 3714 pq = &VM_DOMAIN(domain)->vmd_pagequeues[queue]; 3715 if (vm_pagequeue_trylock(pq)) { 3716 vm_pqbatch_process(pq, bq, queue); 3717 vm_pagequeue_unlock(pq); 3718 } 3719 critical_exit(); 3720 return; 3721 } 3722 critical_exit(); 3723 3724 /* if we make it here, the bq is full so wait for the lock */ 3725 3726 pq = &VM_DOMAIN(domain)->vmd_pagequeues[queue]; 3727 vm_pagequeue_lock(pq); 3728 critical_enter(); 3729 bq = DPCPU_PTR(pqbatch[domain][queue]); 3730 vm_pqbatch_process(pq, bq, queue); 3731 vm_pqbatch_process_page(pq, m, queue); 3732 vm_pagequeue_unlock(pq); 3733 critical_exit(); 3734 } 3735 3736 /* 3737 * vm_page_pqbatch_drain: [ internal use only ] 3738 * 3739 * Force all per-CPU page queue batch queues to be drained. This is 3740 * intended for use in severe memory shortages, to ensure that pages 3741 * do not remain stuck in the batch queues. 3742 */ 3743 void 3744 vm_page_pqbatch_drain(void) 3745 { 3746 struct thread *td; 3747 struct vm_domain *vmd; 3748 struct vm_pagequeue *pq; 3749 int cpu, domain, queue; 3750 3751 td = curthread; 3752 CPU_FOREACH(cpu) { 3753 thread_lock(td); 3754 sched_bind(td, cpu); 3755 thread_unlock(td); 3756 3757 for (domain = 0; domain < vm_ndomains; domain++) { 3758 vmd = VM_DOMAIN(domain); 3759 for (queue = 0; queue < PQ_COUNT; queue++) { 3760 pq = &vmd->vmd_pagequeues[queue]; 3761 vm_pagequeue_lock(pq); 3762 critical_enter(); 3763 vm_pqbatch_process(pq, 3764 DPCPU_PTR(pqbatch[domain][queue]), queue); 3765 critical_exit(); 3766 vm_pagequeue_unlock(pq); 3767 } 3768 } 3769 } 3770 thread_lock(td); 3771 sched_unbind(td); 3772 thread_unlock(td); 3773 } 3774 3775 /* 3776 * vm_page_dequeue_deferred: [ internal use only ] 3777 * 3778 * Request removal of the given page from its current page 3779 * queue. Physical removal from the queue may be deferred 3780 * indefinitely. 3781 */ 3782 void 3783 vm_page_dequeue_deferred(vm_page_t m) 3784 { 3785 vm_page_astate_t new, old; 3786 3787 old = vm_page_astate_load(m); 3788 do { 3789 if (old.queue == PQ_NONE) { 3790 KASSERT((old.flags & PGA_QUEUE_STATE_MASK) == 0, 3791 ("%s: page %p has unexpected queue state", 3792 __func__, m)); 3793 break; 3794 } 3795 new = old; 3796 new.flags |= PGA_DEQUEUE; 3797 } while (!vm_page_pqstate_commit_request(m, &old, new)); 3798 } 3799 3800 /* 3801 * vm_page_dequeue: 3802 * 3803 * Remove the page from whichever page queue it's in, if any, before 3804 * returning. 3805 */ 3806 void 3807 vm_page_dequeue(vm_page_t m) 3808 { 3809 vm_page_astate_t new, old; 3810 3811 old = vm_page_astate_load(m); 3812 do { 3813 if (old.queue == PQ_NONE) { 3814 KASSERT((old.flags & PGA_QUEUE_STATE_MASK) == 0, 3815 ("%s: page %p has unexpected queue state", 3816 __func__, m)); 3817 break; 3818 } 3819 new = old; 3820 new.flags &= ~PGA_QUEUE_OP_MASK; 3821 new.queue = PQ_NONE; 3822 } while (!vm_page_pqstate_commit_dequeue(m, &old, new)); 3823 3824 } 3825 3826 /* 3827 * Schedule the given page for insertion into the specified page queue. 3828 * Physical insertion of the page may be deferred indefinitely. 3829 */ 3830 static void 3831 vm_page_enqueue(vm_page_t m, uint8_t queue) 3832 { 3833 3834 KASSERT(m->a.queue == PQ_NONE && 3835 (m->a.flags & PGA_QUEUE_STATE_MASK) == 0, 3836 ("%s: page %p is already enqueued", __func__, m)); 3837 KASSERT(m->ref_count > 0, 3838 ("%s: page %p does not carry any references", __func__, m)); 3839 3840 m->a.queue = queue; 3841 if ((m->a.flags & PGA_REQUEUE) == 0) 3842 vm_page_aflag_set(m, PGA_REQUEUE); 3843 vm_page_pqbatch_submit(m, queue); 3844 } 3845 3846 /* 3847 * vm_page_free_prep: 3848 * 3849 * Prepares the given page to be put on the free list, 3850 * disassociating it from any VM object. The caller may return 3851 * the page to the free list only if this function returns true. 3852 * 3853 * The object, if it exists, must be locked, and then the page must 3854 * be xbusy. Otherwise the page must be not busied. A managed 3855 * page must be unmapped. 3856 */ 3857 static bool 3858 vm_page_free_prep(vm_page_t m) 3859 { 3860 3861 /* 3862 * Synchronize with threads that have dropped a reference to this 3863 * page. 3864 */ 3865 atomic_thread_fence_acq(); 3866 3867 #if defined(DIAGNOSTIC) && defined(PHYS_TO_DMAP) 3868 if (PMAP_HAS_DMAP && (m->flags & PG_ZERO) != 0) { 3869 uint64_t *p; 3870 int i; 3871 p = (uint64_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 3872 for (i = 0; i < PAGE_SIZE / sizeof(uint64_t); i++, p++) 3873 KASSERT(*p == 0, ("vm_page_free_prep %p PG_ZERO %d %jx", 3874 m, i, (uintmax_t)*p)); 3875 } 3876 #endif 3877 if ((m->oflags & VPO_UNMANAGED) == 0) { 3878 KASSERT(!pmap_page_is_mapped(m), 3879 ("vm_page_free_prep: freeing mapped page %p", m)); 3880 KASSERT((m->a.flags & (PGA_EXECUTABLE | PGA_WRITEABLE)) == 0, 3881 ("vm_page_free_prep: mapping flags set in page %p", m)); 3882 } else { 3883 KASSERT(m->a.queue == PQ_NONE, 3884 ("vm_page_free_prep: unmanaged page %p is queued", m)); 3885 } 3886 VM_CNT_INC(v_tfree); 3887 3888 if (m->object != NULL) { 3889 KASSERT(((m->oflags & VPO_UNMANAGED) != 0) == 3890 ((m->object->flags & OBJ_UNMANAGED) != 0), 3891 ("vm_page_free_prep: managed flag mismatch for page %p", 3892 m)); 3893 vm_page_assert_xbusied(m); 3894 3895 /* 3896 * The object reference can be released without an atomic 3897 * operation. 3898 */ 3899 KASSERT((m->flags & PG_FICTITIOUS) != 0 || 3900 m->ref_count == VPRC_OBJREF, 3901 ("vm_page_free_prep: page %p has unexpected ref_count %u", 3902 m, m->ref_count)); 3903 vm_page_object_remove(m); 3904 m->ref_count -= VPRC_OBJREF; 3905 } else 3906 vm_page_assert_unbusied(m); 3907 3908 vm_page_busy_free(m); 3909 3910 /* 3911 * If fictitious remove object association and 3912 * return. 3913 */ 3914 if ((m->flags & PG_FICTITIOUS) != 0) { 3915 KASSERT(m->ref_count == 1, 3916 ("fictitious page %p is referenced", m)); 3917 KASSERT(m->a.queue == PQ_NONE, 3918 ("fictitious page %p is queued", m)); 3919 return (false); 3920 } 3921 3922 /* 3923 * Pages need not be dequeued before they are returned to the physical 3924 * memory allocator, but they must at least be marked for a deferred 3925 * dequeue. 3926 */ 3927 if ((m->oflags & VPO_UNMANAGED) == 0) 3928 vm_page_dequeue_deferred(m); 3929 3930 m->valid = 0; 3931 vm_page_undirty(m); 3932 3933 if (m->ref_count != 0) 3934 panic("vm_page_free_prep: page %p has references", m); 3935 3936 /* 3937 * Restore the default memory attribute to the page. 3938 */ 3939 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) 3940 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); 3941 3942 #if VM_NRESERVLEVEL > 0 3943 /* 3944 * Determine whether the page belongs to a reservation. If the page was 3945 * allocated from a per-CPU cache, it cannot belong to a reservation, so 3946 * as an optimization, we avoid the check in that case. 3947 */ 3948 if ((m->flags & PG_PCPU_CACHE) == 0 && vm_reserv_free_page(m)) 3949 return (false); 3950 #endif 3951 3952 return (true); 3953 } 3954 3955 /* 3956 * vm_page_free_toq: 3957 * 3958 * Returns the given page to the free list, disassociating it 3959 * from any VM object. 3960 * 3961 * The object must be locked. The page must be exclusively busied if it 3962 * belongs to an object. 3963 */ 3964 static void 3965 vm_page_free_toq(vm_page_t m) 3966 { 3967 struct vm_domain *vmd; 3968 uma_zone_t zone; 3969 3970 if (!vm_page_free_prep(m)) 3971 return; 3972 3973 vmd = vm_pagequeue_domain(m); 3974 zone = vmd->vmd_pgcache[m->pool].zone; 3975 if ((m->flags & PG_PCPU_CACHE) != 0 && zone != NULL) { 3976 uma_zfree(zone, m); 3977 return; 3978 } 3979 vm_domain_free_lock(vmd); 3980 vm_phys_free_pages(m, 0); 3981 vm_domain_free_unlock(vmd); 3982 vm_domain_freecnt_inc(vmd, 1); 3983 } 3984 3985 /* 3986 * vm_page_free_pages_toq: 3987 * 3988 * Returns a list of pages to the free list, disassociating it 3989 * from any VM object. In other words, this is equivalent to 3990 * calling vm_page_free_toq() for each page of a list of VM objects. 3991 */ 3992 void 3993 vm_page_free_pages_toq(struct spglist *free, bool update_wire_count) 3994 { 3995 vm_page_t m; 3996 int count; 3997 3998 if (SLIST_EMPTY(free)) 3999 return; 4000 4001 count = 0; 4002 while ((m = SLIST_FIRST(free)) != NULL) { 4003 count++; 4004 SLIST_REMOVE_HEAD(free, plinks.s.ss); 4005 vm_page_free_toq(m); 4006 } 4007 4008 if (update_wire_count) 4009 vm_wire_sub(count); 4010 } 4011 4012 /* 4013 * Mark this page as wired down. For managed pages, this prevents reclamation 4014 * by the page daemon, or when the containing object, if any, is destroyed. 4015 */ 4016 void 4017 vm_page_wire(vm_page_t m) 4018 { 4019 u_int old; 4020 4021 #ifdef INVARIANTS 4022 if (m->object != NULL && !vm_page_busied(m) && 4023 !vm_object_busied(m->object)) 4024 VM_OBJECT_ASSERT_LOCKED(m->object); 4025 #endif 4026 KASSERT((m->flags & PG_FICTITIOUS) == 0 || 4027 VPRC_WIRE_COUNT(m->ref_count) >= 1, 4028 ("vm_page_wire: fictitious page %p has zero wirings", m)); 4029 4030 old = atomic_fetchadd_int(&m->ref_count, 1); 4031 KASSERT(VPRC_WIRE_COUNT(old) != VPRC_WIRE_COUNT_MAX, 4032 ("vm_page_wire: counter overflow for page %p", m)); 4033 if (VPRC_WIRE_COUNT(old) == 0) { 4034 if ((m->oflags & VPO_UNMANAGED) == 0) 4035 vm_page_aflag_set(m, PGA_DEQUEUE); 4036 vm_wire_add(1); 4037 } 4038 } 4039 4040 /* 4041 * Attempt to wire a mapped page following a pmap lookup of that page. 4042 * This may fail if a thread is concurrently tearing down mappings of the page. 4043 * The transient failure is acceptable because it translates to the 4044 * failure of the caller pmap_extract_and_hold(), which should be then 4045 * followed by the vm_fault() fallback, see e.g. vm_fault_quick_hold_pages(). 4046 */ 4047 bool 4048 vm_page_wire_mapped(vm_page_t m) 4049 { 4050 u_int old; 4051 4052 old = m->ref_count; 4053 do { 4054 KASSERT(old > 0, 4055 ("vm_page_wire_mapped: wiring unreferenced page %p", m)); 4056 if ((old & VPRC_BLOCKED) != 0) 4057 return (false); 4058 } while (!atomic_fcmpset_int(&m->ref_count, &old, old + 1)); 4059 4060 if (VPRC_WIRE_COUNT(old) == 0) { 4061 if ((m->oflags & VPO_UNMANAGED) == 0) 4062 vm_page_aflag_set(m, PGA_DEQUEUE); 4063 vm_wire_add(1); 4064 } 4065 return (true); 4066 } 4067 4068 /* 4069 * Release a wiring reference to a managed page. If the page still belongs to 4070 * an object, update its position in the page queues to reflect the reference. 4071 * If the wiring was the last reference to the page, free the page. 4072 */ 4073 static void 4074 vm_page_unwire_managed(vm_page_t m, uint8_t nqueue, bool noreuse) 4075 { 4076 u_int old; 4077 4078 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4079 ("%s: page %p is unmanaged", __func__, m)); 4080 4081 /* 4082 * Update LRU state before releasing the wiring reference. 4083 * Use a release store when updating the reference count to 4084 * synchronize with vm_page_free_prep(). 4085 */ 4086 old = m->ref_count; 4087 do { 4088 KASSERT(VPRC_WIRE_COUNT(old) > 0, 4089 ("vm_page_unwire: wire count underflow for page %p", m)); 4090 4091 if (old > VPRC_OBJREF + 1) { 4092 /* 4093 * The page has at least one other wiring reference. An 4094 * earlier iteration of this loop may have called 4095 * vm_page_release_toq() and cleared PGA_DEQUEUE, so 4096 * re-set it if necessary. 4097 */ 4098 if ((vm_page_astate_load(m).flags & PGA_DEQUEUE) == 0) 4099 vm_page_aflag_set(m, PGA_DEQUEUE); 4100 } else if (old == VPRC_OBJREF + 1) { 4101 /* 4102 * This is the last wiring. Clear PGA_DEQUEUE and 4103 * update the page's queue state to reflect the 4104 * reference. If the page does not belong to an object 4105 * (i.e., the VPRC_OBJREF bit is clear), we only need to 4106 * clear leftover queue state. 4107 */ 4108 vm_page_release_toq(m, nqueue, noreuse); 4109 } else if (old == 1) { 4110 vm_page_aflag_clear(m, PGA_DEQUEUE); 4111 } 4112 } while (!atomic_fcmpset_rel_int(&m->ref_count, &old, old - 1)); 4113 4114 if (VPRC_WIRE_COUNT(old) == 1) { 4115 vm_wire_sub(1); 4116 if (old == 1) 4117 vm_page_free(m); 4118 } 4119 } 4120 4121 /* 4122 * Release one wiring of the specified page, potentially allowing it to be 4123 * paged out. 4124 * 4125 * Only managed pages belonging to an object can be paged out. If the number 4126 * of wirings transitions to zero and the page is eligible for page out, then 4127 * the page is added to the specified paging queue. If the released wiring 4128 * represented the last reference to the page, the page is freed. 4129 */ 4130 void 4131 vm_page_unwire(vm_page_t m, uint8_t nqueue) 4132 { 4133 4134 KASSERT(nqueue < PQ_COUNT, 4135 ("vm_page_unwire: invalid queue %u request for page %p", 4136 nqueue, m)); 4137 4138 if ((m->oflags & VPO_UNMANAGED) != 0) { 4139 if (vm_page_unwire_noq(m) && m->ref_count == 0) 4140 vm_page_free(m); 4141 return; 4142 } 4143 vm_page_unwire_managed(m, nqueue, false); 4144 } 4145 4146 /* 4147 * Unwire a page without (re-)inserting it into a page queue. It is up 4148 * to the caller to enqueue, requeue, or free the page as appropriate. 4149 * In most cases involving managed pages, vm_page_unwire() should be used 4150 * instead. 4151 */ 4152 bool 4153 vm_page_unwire_noq(vm_page_t m) 4154 { 4155 u_int old; 4156 4157 old = vm_page_drop(m, 1); 4158 KASSERT(VPRC_WIRE_COUNT(old) != 0, 4159 ("%s: counter underflow for page %p", __func__, m)); 4160 KASSERT((m->flags & PG_FICTITIOUS) == 0 || VPRC_WIRE_COUNT(old) > 1, 4161 ("%s: missing ref on fictitious page %p", __func__, m)); 4162 4163 if (VPRC_WIRE_COUNT(old) > 1) 4164 return (false); 4165 if ((m->oflags & VPO_UNMANAGED) == 0) 4166 vm_page_aflag_clear(m, PGA_DEQUEUE); 4167 vm_wire_sub(1); 4168 return (true); 4169 } 4170 4171 /* 4172 * Ensure that the page ends up in the specified page queue. If the page is 4173 * active or being moved to the active queue, ensure that its act_count is 4174 * at least ACT_INIT but do not otherwise mess with it. 4175 */ 4176 static __always_inline void 4177 vm_page_mvqueue(vm_page_t m, const uint8_t nqueue, const uint16_t nflag) 4178 { 4179 vm_page_astate_t old, new; 4180 4181 KASSERT(m->ref_count > 0, 4182 ("%s: page %p does not carry any references", __func__, m)); 4183 KASSERT(nflag == PGA_REQUEUE || nflag == PGA_REQUEUE_HEAD, 4184 ("%s: invalid flags %x", __func__, nflag)); 4185 4186 if ((m->oflags & VPO_UNMANAGED) != 0 || vm_page_wired(m)) 4187 return; 4188 4189 old = vm_page_astate_load(m); 4190 do { 4191 if ((old.flags & PGA_DEQUEUE) != 0) 4192 break; 4193 new = old; 4194 new.flags &= ~PGA_QUEUE_OP_MASK; 4195 if (nqueue == PQ_ACTIVE) 4196 new.act_count = max(old.act_count, ACT_INIT); 4197 if (old.queue == nqueue) { 4198 /* 4199 * There is no need to requeue pages already in the 4200 * active queue. 4201 */ 4202 if (nqueue != PQ_ACTIVE || 4203 (old.flags & PGA_ENQUEUED) == 0) 4204 new.flags |= nflag; 4205 } else { 4206 new.flags |= nflag; 4207 new.queue = nqueue; 4208 } 4209 } while (!vm_page_pqstate_commit(m, &old, new)); 4210 } 4211 4212 /* 4213 * Put the specified page on the active list (if appropriate). 4214 */ 4215 void 4216 vm_page_activate(vm_page_t m) 4217 { 4218 4219 vm_page_mvqueue(m, PQ_ACTIVE, PGA_REQUEUE); 4220 } 4221 4222 /* 4223 * Move the specified page to the tail of the inactive queue, or requeue 4224 * the page if it is already in the inactive queue. 4225 */ 4226 void 4227 vm_page_deactivate(vm_page_t m) 4228 { 4229 4230 vm_page_mvqueue(m, PQ_INACTIVE, PGA_REQUEUE); 4231 } 4232 4233 void 4234 vm_page_deactivate_noreuse(vm_page_t m) 4235 { 4236 4237 vm_page_mvqueue(m, PQ_INACTIVE, PGA_REQUEUE_HEAD); 4238 } 4239 4240 /* 4241 * Put a page in the laundry, or requeue it if it is already there. 4242 */ 4243 void 4244 vm_page_launder(vm_page_t m) 4245 { 4246 4247 vm_page_mvqueue(m, PQ_LAUNDRY, PGA_REQUEUE); 4248 } 4249 4250 /* 4251 * Put a page in the PQ_UNSWAPPABLE holding queue. 4252 */ 4253 void 4254 vm_page_unswappable(vm_page_t m) 4255 { 4256 4257 VM_OBJECT_ASSERT_LOCKED(m->object); 4258 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4259 ("page %p already unswappable", m)); 4260 4261 vm_page_dequeue(m); 4262 vm_page_enqueue(m, PQ_UNSWAPPABLE); 4263 } 4264 4265 /* 4266 * Release a page back to the page queues in preparation for unwiring. 4267 */ 4268 static void 4269 vm_page_release_toq(vm_page_t m, uint8_t nqueue, const bool noreuse) 4270 { 4271 vm_page_astate_t old, new; 4272 uint16_t nflag; 4273 4274 /* 4275 * Use a check of the valid bits to determine whether we should 4276 * accelerate reclamation of the page. The object lock might not be 4277 * held here, in which case the check is racy. At worst we will either 4278 * accelerate reclamation of a valid page and violate LRU, or 4279 * unnecessarily defer reclamation of an invalid page. 4280 * 4281 * If we were asked to not cache the page, place it near the head of the 4282 * inactive queue so that is reclaimed sooner. 4283 */ 4284 if (noreuse || vm_page_none_valid(m)) { 4285 nqueue = PQ_INACTIVE; 4286 nflag = PGA_REQUEUE_HEAD; 4287 } else { 4288 nflag = PGA_REQUEUE; 4289 } 4290 4291 old = vm_page_astate_load(m); 4292 do { 4293 new = old; 4294 4295 /* 4296 * If the page is already in the active queue and we are not 4297 * trying to accelerate reclamation, simply mark it as 4298 * referenced and avoid any queue operations. 4299 */ 4300 new.flags &= ~PGA_QUEUE_OP_MASK; 4301 if (nflag != PGA_REQUEUE_HEAD && old.queue == PQ_ACTIVE && 4302 (old.flags & PGA_ENQUEUED) != 0) 4303 new.flags |= PGA_REFERENCED; 4304 else { 4305 new.flags |= nflag; 4306 new.queue = nqueue; 4307 } 4308 } while (!vm_page_pqstate_commit(m, &old, new)); 4309 } 4310 4311 /* 4312 * Unwire a page and either attempt to free it or re-add it to the page queues. 4313 */ 4314 void 4315 vm_page_release(vm_page_t m, int flags) 4316 { 4317 vm_object_t object; 4318 4319 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4320 ("vm_page_release: page %p is unmanaged", m)); 4321 4322 if ((flags & VPR_TRYFREE) != 0) { 4323 for (;;) { 4324 object = atomic_load_ptr(&m->object); 4325 if (object == NULL) 4326 break; 4327 /* Depends on type-stability. */ 4328 if (vm_page_busied(m) || !VM_OBJECT_TRYWLOCK(object)) 4329 break; 4330 if (object == m->object) { 4331 vm_page_release_locked(m, flags); 4332 VM_OBJECT_WUNLOCK(object); 4333 return; 4334 } 4335 VM_OBJECT_WUNLOCK(object); 4336 } 4337 } 4338 vm_page_unwire_managed(m, PQ_INACTIVE, flags != 0); 4339 } 4340 4341 /* See vm_page_release(). */ 4342 void 4343 vm_page_release_locked(vm_page_t m, int flags) 4344 { 4345 4346 VM_OBJECT_ASSERT_WLOCKED(m->object); 4347 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4348 ("vm_page_release_locked: page %p is unmanaged", m)); 4349 4350 if (vm_page_unwire_noq(m)) { 4351 if ((flags & VPR_TRYFREE) != 0 && 4352 (m->object->ref_count == 0 || !pmap_page_is_mapped(m)) && 4353 m->dirty == 0 && vm_page_tryxbusy(m)) { 4354 /* 4355 * An unlocked lookup may have wired the page before the 4356 * busy lock was acquired, in which case the page must 4357 * not be freed. 4358 */ 4359 if (__predict_true(!vm_page_wired(m))) { 4360 vm_page_free(m); 4361 return; 4362 } 4363 vm_page_xunbusy(m); 4364 } else { 4365 vm_page_release_toq(m, PQ_INACTIVE, flags != 0); 4366 } 4367 } 4368 } 4369 4370 static bool 4371 vm_page_try_blocked_op(vm_page_t m, void (*op)(vm_page_t)) 4372 { 4373 u_int old; 4374 4375 KASSERT(m->object != NULL && (m->oflags & VPO_UNMANAGED) == 0, 4376 ("vm_page_try_blocked_op: page %p has no object", m)); 4377 KASSERT(vm_page_busied(m), 4378 ("vm_page_try_blocked_op: page %p is not busy", m)); 4379 VM_OBJECT_ASSERT_LOCKED(m->object); 4380 4381 old = m->ref_count; 4382 do { 4383 KASSERT(old != 0, 4384 ("vm_page_try_blocked_op: page %p has no references", m)); 4385 if (VPRC_WIRE_COUNT(old) != 0) 4386 return (false); 4387 } while (!atomic_fcmpset_int(&m->ref_count, &old, old | VPRC_BLOCKED)); 4388 4389 (op)(m); 4390 4391 /* 4392 * If the object is read-locked, new wirings may be created via an 4393 * object lookup. 4394 */ 4395 old = vm_page_drop(m, VPRC_BLOCKED); 4396 KASSERT(!VM_OBJECT_WOWNED(m->object) || 4397 old == (VPRC_BLOCKED | VPRC_OBJREF), 4398 ("vm_page_try_blocked_op: unexpected refcount value %u for %p", 4399 old, m)); 4400 return (true); 4401 } 4402 4403 /* 4404 * Atomically check for wirings and remove all mappings of the page. 4405 */ 4406 bool 4407 vm_page_try_remove_all(vm_page_t m) 4408 { 4409 4410 return (vm_page_try_blocked_op(m, pmap_remove_all)); 4411 } 4412 4413 /* 4414 * Atomically check for wirings and remove all writeable mappings of the page. 4415 */ 4416 bool 4417 vm_page_try_remove_write(vm_page_t m) 4418 { 4419 4420 return (vm_page_try_blocked_op(m, pmap_remove_write)); 4421 } 4422 4423 /* 4424 * vm_page_advise 4425 * 4426 * Apply the specified advice to the given page. 4427 */ 4428 void 4429 vm_page_advise(vm_page_t m, int advice) 4430 { 4431 4432 VM_OBJECT_ASSERT_WLOCKED(m->object); 4433 vm_page_assert_xbusied(m); 4434 4435 if (advice == MADV_FREE) 4436 /* 4437 * Mark the page clean. This will allow the page to be freed 4438 * without first paging it out. MADV_FREE pages are often 4439 * quickly reused by malloc(3), so we do not do anything that 4440 * would result in a page fault on a later access. 4441 */ 4442 vm_page_undirty(m); 4443 else if (advice != MADV_DONTNEED) { 4444 if (advice == MADV_WILLNEED) 4445 vm_page_activate(m); 4446 return; 4447 } 4448 4449 if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m)) 4450 vm_page_dirty(m); 4451 4452 /* 4453 * Clear any references to the page. Otherwise, the page daemon will 4454 * immediately reactivate the page. 4455 */ 4456 vm_page_aflag_clear(m, PGA_REFERENCED); 4457 4458 /* 4459 * Place clean pages near the head of the inactive queue rather than 4460 * the tail, thus defeating the queue's LRU operation and ensuring that 4461 * the page will be reused quickly. Dirty pages not already in the 4462 * laundry are moved there. 4463 */ 4464 if (m->dirty == 0) 4465 vm_page_deactivate_noreuse(m); 4466 else if (!vm_page_in_laundry(m)) 4467 vm_page_launder(m); 4468 } 4469 4470 /* 4471 * vm_page_grab_release 4472 * 4473 * Helper routine for grab functions to release busy on return. 4474 */ 4475 static inline void 4476 vm_page_grab_release(vm_page_t m, int allocflags) 4477 { 4478 4479 if ((allocflags & VM_ALLOC_NOBUSY) != 0) { 4480 if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0) 4481 vm_page_sunbusy(m); 4482 else 4483 vm_page_xunbusy(m); 4484 } 4485 } 4486 4487 /* 4488 * vm_page_grab_sleep 4489 * 4490 * Sleep for busy according to VM_ALLOC_ parameters. Returns true 4491 * if the caller should retry and false otherwise. 4492 * 4493 * If the object is locked on entry the object will be unlocked with 4494 * false returns and still locked but possibly having been dropped 4495 * with true returns. 4496 */ 4497 static bool 4498 vm_page_grab_sleep(vm_object_t object, vm_page_t m, vm_pindex_t pindex, 4499 const char *wmesg, int allocflags, bool locked) 4500 { 4501 4502 if ((allocflags & VM_ALLOC_NOWAIT) != 0) 4503 return (false); 4504 4505 /* 4506 * Reference the page before unlocking and sleeping so that 4507 * the page daemon is less likely to reclaim it. 4508 */ 4509 if (locked && (allocflags & VM_ALLOC_NOCREAT) == 0) 4510 vm_page_reference(m); 4511 4512 if (_vm_page_busy_sleep(object, m, pindex, wmesg, allocflags, locked) && 4513 locked) 4514 VM_OBJECT_WLOCK(object); 4515 if ((allocflags & VM_ALLOC_WAITFAIL) != 0) 4516 return (false); 4517 4518 return (true); 4519 } 4520 4521 /* 4522 * Assert that the grab flags are valid. 4523 */ 4524 static inline void 4525 vm_page_grab_check(int allocflags) 4526 { 4527 4528 KASSERT((allocflags & VM_ALLOC_NOBUSY) == 0 || 4529 (allocflags & VM_ALLOC_WIRED) != 0, 4530 ("vm_page_grab*: the pages must be busied or wired")); 4531 4532 KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || 4533 (allocflags & VM_ALLOC_IGN_SBUSY) != 0, 4534 ("vm_page_grab*: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch")); 4535 } 4536 4537 /* 4538 * Calculate the page allocation flags for grab. 4539 */ 4540 static inline int 4541 vm_page_grab_pflags(int allocflags) 4542 { 4543 int pflags; 4544 4545 pflags = allocflags & 4546 ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL | 4547 VM_ALLOC_NOBUSY | VM_ALLOC_IGN_SBUSY); 4548 if ((allocflags & VM_ALLOC_NOWAIT) == 0) 4549 pflags |= VM_ALLOC_WAITFAIL; 4550 if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0) 4551 pflags |= VM_ALLOC_SBUSY; 4552 4553 return (pflags); 4554 } 4555 4556 /* 4557 * Grab a page, waiting until we are waken up due to the page 4558 * changing state. We keep on waiting, if the page continues 4559 * to be in the object. If the page doesn't exist, first allocate it 4560 * and then conditionally zero it. 4561 * 4562 * This routine may sleep. 4563 * 4564 * The object must be locked on entry. The lock will, however, be released 4565 * and reacquired if the routine sleeps. 4566 */ 4567 vm_page_t 4568 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) 4569 { 4570 vm_page_t m; 4571 4572 VM_OBJECT_ASSERT_WLOCKED(object); 4573 vm_page_grab_check(allocflags); 4574 4575 retrylookup: 4576 if ((m = vm_page_lookup(object, pindex)) != NULL) { 4577 if (!vm_page_tryacquire(m, allocflags)) { 4578 if (vm_page_grab_sleep(object, m, pindex, "pgrbwt", 4579 allocflags, true)) 4580 goto retrylookup; 4581 return (NULL); 4582 } 4583 goto out; 4584 } 4585 if ((allocflags & VM_ALLOC_NOCREAT) != 0) 4586 return (NULL); 4587 m = vm_page_alloc(object, pindex, vm_page_grab_pflags(allocflags)); 4588 if (m == NULL) { 4589 if ((allocflags & (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL)) != 0) 4590 return (NULL); 4591 goto retrylookup; 4592 } 4593 if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0) 4594 pmap_zero_page(m); 4595 4596 out: 4597 vm_page_grab_release(m, allocflags); 4598 4599 return (m); 4600 } 4601 4602 /* 4603 * Locklessly attempt to acquire a page given a (object, pindex) tuple 4604 * and an optional previous page to avoid the radix lookup. The resulting 4605 * page will be validated against the identity tuple and busied or wired 4606 * as requested. A NULL *mp return guarantees that the page was not in 4607 * radix at the time of the call but callers must perform higher level 4608 * synchronization or retry the operation under a lock if they require 4609 * an atomic answer. This is the only lock free validation routine, 4610 * other routines can depend on the resulting page state. 4611 * 4612 * The return value indicates whether the operation failed due to caller 4613 * flags. The return is tri-state with mp: 4614 * 4615 * (true, *mp != NULL) - The operation was successful. 4616 * (true, *mp == NULL) - The page was not found in tree. 4617 * (false, *mp == NULL) - WAITFAIL or NOWAIT prevented acquisition. 4618 */ 4619 static bool 4620 vm_page_acquire_unlocked(vm_object_t object, vm_pindex_t pindex, 4621 vm_page_t prev, vm_page_t *mp, int allocflags) 4622 { 4623 vm_page_t m; 4624 4625 vm_page_grab_check(allocflags); 4626 MPASS(prev == NULL || vm_page_busied(prev) || vm_page_wired(prev)); 4627 4628 *mp = NULL; 4629 for (;;) { 4630 /* 4631 * We may see a false NULL here because the previous page 4632 * has been removed or just inserted and the list is loaded 4633 * without barriers. Switch to radix to verify. 4634 */ 4635 if (prev == NULL || (m = TAILQ_NEXT(prev, listq)) == NULL || 4636 QMD_IS_TRASHED(m) || m->pindex != pindex || 4637 atomic_load_ptr(&m->object) != object) { 4638 prev = NULL; 4639 /* 4640 * This guarantees the result is instantaneously 4641 * correct. 4642 */ 4643 m = vm_radix_lookup_unlocked(&object->rtree, pindex); 4644 } 4645 if (m == NULL) 4646 return (true); 4647 if (vm_page_trybusy(m, allocflags)) { 4648 if (m->object == object && m->pindex == pindex) 4649 break; 4650 /* relookup. */ 4651 vm_page_busy_release(m); 4652 cpu_spinwait(); 4653 continue; 4654 } 4655 if (!vm_page_grab_sleep(object, m, pindex, "pgnslp", 4656 allocflags, false)) 4657 return (false); 4658 } 4659 if ((allocflags & VM_ALLOC_WIRED) != 0) 4660 vm_page_wire(m); 4661 vm_page_grab_release(m, allocflags); 4662 *mp = m; 4663 return (true); 4664 } 4665 4666 /* 4667 * Try to locklessly grab a page and fall back to the object lock if NOCREAT 4668 * is not set. 4669 */ 4670 vm_page_t 4671 vm_page_grab_unlocked(vm_object_t object, vm_pindex_t pindex, int allocflags) 4672 { 4673 vm_page_t m; 4674 4675 vm_page_grab_check(allocflags); 4676 4677 if (!vm_page_acquire_unlocked(object, pindex, NULL, &m, allocflags)) 4678 return (NULL); 4679 if (m != NULL) 4680 return (m); 4681 4682 /* 4683 * The radix lockless lookup should never return a false negative 4684 * errors. If the user specifies NOCREAT they are guaranteed there 4685 * was no page present at the instant of the call. A NOCREAT caller 4686 * must handle create races gracefully. 4687 */ 4688 if ((allocflags & VM_ALLOC_NOCREAT) != 0) 4689 return (NULL); 4690 4691 VM_OBJECT_WLOCK(object); 4692 m = vm_page_grab(object, pindex, allocflags); 4693 VM_OBJECT_WUNLOCK(object); 4694 4695 return (m); 4696 } 4697 4698 /* 4699 * Grab a page and make it valid, paging in if necessary. Pages missing from 4700 * their pager are zero filled and validated. If a VM_ALLOC_COUNT is supplied 4701 * and the page is not valid as many as VM_INITIAL_PAGEIN pages can be brought 4702 * in simultaneously. Additional pages will be left on a paging queue but 4703 * will neither be wired nor busy regardless of allocflags. 4704 */ 4705 int 4706 vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, int allocflags) 4707 { 4708 vm_page_t m; 4709 vm_page_t ma[VM_INITIAL_PAGEIN]; 4710 int after, i, pflags, rv; 4711 4712 KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || 4713 (allocflags & VM_ALLOC_IGN_SBUSY) != 0, 4714 ("vm_page_grab_valid: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch")); 4715 KASSERT((allocflags & 4716 (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL | VM_ALLOC_ZERO)) == 0, 4717 ("vm_page_grab_valid: Invalid flags 0x%X", allocflags)); 4718 VM_OBJECT_ASSERT_WLOCKED(object); 4719 pflags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY | 4720 VM_ALLOC_WIRED | VM_ALLOC_IGN_SBUSY); 4721 pflags |= VM_ALLOC_WAITFAIL; 4722 4723 retrylookup: 4724 if ((m = vm_page_lookup(object, pindex)) != NULL) { 4725 /* 4726 * If the page is fully valid it can only become invalid 4727 * with the object lock held. If it is not valid it can 4728 * become valid with the busy lock held. Therefore, we 4729 * may unnecessarily lock the exclusive busy here if we 4730 * race with I/O completion not using the object lock. 4731 * However, we will not end up with an invalid page and a 4732 * shared lock. 4733 */ 4734 if (!vm_page_trybusy(m, 4735 vm_page_all_valid(m) ? allocflags : 0)) { 4736 (void)vm_page_grab_sleep(object, m, pindex, "pgrbwt", 4737 allocflags, true); 4738 goto retrylookup; 4739 } 4740 if (vm_page_all_valid(m)) 4741 goto out; 4742 if ((allocflags & VM_ALLOC_NOCREAT) != 0) { 4743 vm_page_busy_release(m); 4744 *mp = NULL; 4745 return (VM_PAGER_FAIL); 4746 } 4747 } else if ((allocflags & VM_ALLOC_NOCREAT) != 0) { 4748 *mp = NULL; 4749 return (VM_PAGER_FAIL); 4750 } else if ((m = vm_page_alloc(object, pindex, pflags)) == NULL) { 4751 if (!vm_pager_can_alloc_page(object, pindex)) { 4752 *mp = NULL; 4753 return (VM_PAGER_AGAIN); 4754 } 4755 goto retrylookup; 4756 } 4757 4758 vm_page_assert_xbusied(m); 4759 if (vm_pager_has_page(object, pindex, NULL, &after)) { 4760 after = MIN(after, VM_INITIAL_PAGEIN); 4761 after = MIN(after, allocflags >> VM_ALLOC_COUNT_SHIFT); 4762 after = MAX(after, 1); 4763 ma[0] = m; 4764 for (i = 1; i < after; i++) { 4765 if ((ma[i] = vm_page_next(ma[i - 1])) != NULL) { 4766 if (vm_page_any_valid(ma[i]) || 4767 !vm_page_tryxbusy(ma[i])) 4768 break; 4769 } else { 4770 ma[i] = vm_page_alloc(object, m->pindex + i, 4771 VM_ALLOC_NORMAL); 4772 if (ma[i] == NULL) 4773 break; 4774 } 4775 } 4776 after = i; 4777 vm_object_pip_add(object, after); 4778 VM_OBJECT_WUNLOCK(object); 4779 rv = vm_pager_get_pages(object, ma, after, NULL, NULL); 4780 VM_OBJECT_WLOCK(object); 4781 vm_object_pip_wakeupn(object, after); 4782 /* Pager may have replaced a page. */ 4783 m = ma[0]; 4784 if (rv != VM_PAGER_OK) { 4785 for (i = 0; i < after; i++) { 4786 if (!vm_page_wired(ma[i])) 4787 vm_page_free(ma[i]); 4788 else 4789 vm_page_xunbusy(ma[i]); 4790 } 4791 *mp = NULL; 4792 return (rv); 4793 } 4794 for (i = 1; i < after; i++) 4795 vm_page_readahead_finish(ma[i]); 4796 MPASS(vm_page_all_valid(m)); 4797 } else { 4798 vm_page_zero_invalid(m, TRUE); 4799 } 4800 out: 4801 if ((allocflags & VM_ALLOC_WIRED) != 0) 4802 vm_page_wire(m); 4803 if ((allocflags & VM_ALLOC_SBUSY) != 0 && vm_page_xbusied(m)) 4804 vm_page_busy_downgrade(m); 4805 else if ((allocflags & VM_ALLOC_NOBUSY) != 0) 4806 vm_page_busy_release(m); 4807 *mp = m; 4808 return (VM_PAGER_OK); 4809 } 4810 4811 /* 4812 * Locklessly grab a valid page. If the page is not valid or not yet 4813 * allocated this will fall back to the object lock method. 4814 */ 4815 int 4816 vm_page_grab_valid_unlocked(vm_page_t *mp, vm_object_t object, 4817 vm_pindex_t pindex, int allocflags) 4818 { 4819 vm_page_t m; 4820 int flags; 4821 int error; 4822 4823 KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || 4824 (allocflags & VM_ALLOC_IGN_SBUSY) != 0, 4825 ("vm_page_grab_valid_unlocked: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY " 4826 "mismatch")); 4827 KASSERT((allocflags & 4828 (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL | VM_ALLOC_ZERO)) == 0, 4829 ("vm_page_grab_valid_unlocked: Invalid flags 0x%X", allocflags)); 4830 4831 /* 4832 * Attempt a lockless lookup and busy. We need at least an sbusy 4833 * before we can inspect the valid field and return a wired page. 4834 */ 4835 flags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_WIRED); 4836 if (!vm_page_acquire_unlocked(object, pindex, NULL, mp, flags)) 4837 return (VM_PAGER_FAIL); 4838 if ((m = *mp) != NULL) { 4839 if (vm_page_all_valid(m)) { 4840 if ((allocflags & VM_ALLOC_WIRED) != 0) 4841 vm_page_wire(m); 4842 vm_page_grab_release(m, allocflags); 4843 return (VM_PAGER_OK); 4844 } 4845 vm_page_busy_release(m); 4846 } 4847 if ((allocflags & VM_ALLOC_NOCREAT) != 0) { 4848 *mp = NULL; 4849 return (VM_PAGER_FAIL); 4850 } 4851 VM_OBJECT_WLOCK(object); 4852 error = vm_page_grab_valid(mp, object, pindex, allocflags); 4853 VM_OBJECT_WUNLOCK(object); 4854 4855 return (error); 4856 } 4857 4858 /* 4859 * Return the specified range of pages from the given object. For each 4860 * page offset within the range, if a page already exists within the object 4861 * at that offset and it is busy, then wait for it to change state. If, 4862 * instead, the page doesn't exist, then allocate it. 4863 * 4864 * The caller must always specify an allocation class. 4865 * 4866 * allocation classes: 4867 * VM_ALLOC_NORMAL normal process request 4868 * VM_ALLOC_SYSTEM system *really* needs the pages 4869 * 4870 * The caller must always specify that the pages are to be busied and/or 4871 * wired. 4872 * 4873 * optional allocation flags: 4874 * VM_ALLOC_IGN_SBUSY do not sleep on soft busy pages 4875 * VM_ALLOC_NOBUSY do not exclusive busy the page 4876 * VM_ALLOC_NOWAIT do not sleep 4877 * VM_ALLOC_SBUSY set page to sbusy state 4878 * VM_ALLOC_WIRED wire the pages 4879 * VM_ALLOC_ZERO zero and validate any invalid pages 4880 * 4881 * If VM_ALLOC_NOWAIT is not specified, this routine may sleep. Otherwise, it 4882 * may return a partial prefix of the requested range. 4883 */ 4884 int 4885 vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags, 4886 vm_page_t *ma, int count) 4887 { 4888 vm_page_t m, mpred; 4889 int pflags; 4890 int i; 4891 4892 VM_OBJECT_ASSERT_WLOCKED(object); 4893 KASSERT(((u_int)allocflags >> VM_ALLOC_COUNT_SHIFT) == 0, 4894 ("vm_page_grap_pages: VM_ALLOC_COUNT() is not allowed")); 4895 KASSERT(count > 0, 4896 ("vm_page_grab_pages: invalid page count %d", count)); 4897 vm_page_grab_check(allocflags); 4898 4899 pflags = vm_page_grab_pflags(allocflags); 4900 i = 0; 4901 retrylookup: 4902 m = vm_radix_lookup_le(&object->rtree, pindex + i); 4903 if (m == NULL || m->pindex != pindex + i) { 4904 mpred = m; 4905 m = NULL; 4906 } else 4907 mpred = TAILQ_PREV(m, pglist, listq); 4908 for (; i < count; i++) { 4909 if (m != NULL) { 4910 if (!vm_page_tryacquire(m, allocflags)) { 4911 if (vm_page_grab_sleep(object, m, pindex + i, 4912 "grbmaw", allocflags, true)) 4913 goto retrylookup; 4914 break; 4915 } 4916 } else { 4917 if ((allocflags & VM_ALLOC_NOCREAT) != 0) 4918 break; 4919 m = vm_page_alloc_after(object, pindex + i, 4920 pflags | VM_ALLOC_COUNT(count - i), mpred); 4921 if (m == NULL) { 4922 if ((allocflags & (VM_ALLOC_NOWAIT | 4923 VM_ALLOC_WAITFAIL)) != 0) 4924 break; 4925 goto retrylookup; 4926 } 4927 } 4928 if (vm_page_none_valid(m) && 4929 (allocflags & VM_ALLOC_ZERO) != 0) { 4930 if ((m->flags & PG_ZERO) == 0) 4931 pmap_zero_page(m); 4932 vm_page_valid(m); 4933 } 4934 vm_page_grab_release(m, allocflags); 4935 ma[i] = mpred = m; 4936 m = vm_page_next(m); 4937 } 4938 return (i); 4939 } 4940 4941 /* 4942 * Unlocked variant of vm_page_grab_pages(). This accepts the same flags 4943 * and will fall back to the locked variant to handle allocation. 4944 */ 4945 int 4946 vm_page_grab_pages_unlocked(vm_object_t object, vm_pindex_t pindex, 4947 int allocflags, vm_page_t *ma, int count) 4948 { 4949 vm_page_t m, pred; 4950 int flags; 4951 int i; 4952 4953 KASSERT(count > 0, 4954 ("vm_page_grab_pages_unlocked: invalid page count %d", count)); 4955 vm_page_grab_check(allocflags); 4956 4957 /* 4958 * Modify flags for lockless acquire to hold the page until we 4959 * set it valid if necessary. 4960 */ 4961 flags = allocflags & ~VM_ALLOC_NOBUSY; 4962 pred = NULL; 4963 for (i = 0; i < count; i++, pindex++) { 4964 if (!vm_page_acquire_unlocked(object, pindex, pred, &m, flags)) 4965 return (i); 4966 if (m == NULL) 4967 break; 4968 if ((flags & VM_ALLOC_ZERO) != 0 && vm_page_none_valid(m)) { 4969 if ((m->flags & PG_ZERO) == 0) 4970 pmap_zero_page(m); 4971 vm_page_valid(m); 4972 } 4973 /* m will still be wired or busy according to flags. */ 4974 vm_page_grab_release(m, allocflags); 4975 pred = ma[i] = m; 4976 } 4977 if (i == count || (allocflags & VM_ALLOC_NOCREAT) != 0) 4978 return (i); 4979 count -= i; 4980 VM_OBJECT_WLOCK(object); 4981 i += vm_page_grab_pages(object, pindex, allocflags, &ma[i], count); 4982 VM_OBJECT_WUNLOCK(object); 4983 4984 return (i); 4985 } 4986 4987 /* 4988 * Mapping function for valid or dirty bits in a page. 4989 * 4990 * Inputs are required to range within a page. 4991 */ 4992 vm_page_bits_t 4993 vm_page_bits(int base, int size) 4994 { 4995 int first_bit; 4996 int last_bit; 4997 4998 KASSERT( 4999 base + size <= PAGE_SIZE, 5000 ("vm_page_bits: illegal base/size %d/%d", base, size) 5001 ); 5002 5003 if (size == 0) /* handle degenerate case */ 5004 return (0); 5005 5006 first_bit = base >> DEV_BSHIFT; 5007 last_bit = (base + size - 1) >> DEV_BSHIFT; 5008 5009 return (((vm_page_bits_t)2 << last_bit) - 5010 ((vm_page_bits_t)1 << first_bit)); 5011 } 5012 5013 void 5014 vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set) 5015 { 5016 5017 #if PAGE_SIZE == 32768 5018 atomic_set_64((uint64_t *)bits, set); 5019 #elif PAGE_SIZE == 16384 5020 atomic_set_32((uint32_t *)bits, set); 5021 #elif (PAGE_SIZE == 8192) && defined(atomic_set_16) 5022 atomic_set_16((uint16_t *)bits, set); 5023 #elif (PAGE_SIZE == 4096) && defined(atomic_set_8) 5024 atomic_set_8((uint8_t *)bits, set); 5025 #else /* PAGE_SIZE <= 8192 */ 5026 uintptr_t addr; 5027 int shift; 5028 5029 addr = (uintptr_t)bits; 5030 /* 5031 * Use a trick to perform a 32-bit atomic on the 5032 * containing aligned word, to not depend on the existence 5033 * of atomic_{set, clear}_{8, 16}. 5034 */ 5035 shift = addr & (sizeof(uint32_t) - 1); 5036 #if BYTE_ORDER == BIG_ENDIAN 5037 shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY; 5038 #else 5039 shift *= NBBY; 5040 #endif 5041 addr &= ~(sizeof(uint32_t) - 1); 5042 atomic_set_32((uint32_t *)addr, set << shift); 5043 #endif /* PAGE_SIZE */ 5044 } 5045 5046 static inline void 5047 vm_page_bits_clear(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t clear) 5048 { 5049 5050 #if PAGE_SIZE == 32768 5051 atomic_clear_64((uint64_t *)bits, clear); 5052 #elif PAGE_SIZE == 16384 5053 atomic_clear_32((uint32_t *)bits, clear); 5054 #elif (PAGE_SIZE == 8192) && defined(atomic_clear_16) 5055 atomic_clear_16((uint16_t *)bits, clear); 5056 #elif (PAGE_SIZE == 4096) && defined(atomic_clear_8) 5057 atomic_clear_8((uint8_t *)bits, clear); 5058 #else /* PAGE_SIZE <= 8192 */ 5059 uintptr_t addr; 5060 int shift; 5061 5062 addr = (uintptr_t)bits; 5063 /* 5064 * Use a trick to perform a 32-bit atomic on the 5065 * containing aligned word, to not depend on the existence 5066 * of atomic_{set, clear}_{8, 16}. 5067 */ 5068 shift = addr & (sizeof(uint32_t) - 1); 5069 #if BYTE_ORDER == BIG_ENDIAN 5070 shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY; 5071 #else 5072 shift *= NBBY; 5073 #endif 5074 addr &= ~(sizeof(uint32_t) - 1); 5075 atomic_clear_32((uint32_t *)addr, clear << shift); 5076 #endif /* PAGE_SIZE */ 5077 } 5078 5079 static inline vm_page_bits_t 5080 vm_page_bits_swap(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t newbits) 5081 { 5082 #if PAGE_SIZE == 32768 5083 uint64_t old; 5084 5085 old = *bits; 5086 while (atomic_fcmpset_64(bits, &old, newbits) == 0); 5087 return (old); 5088 #elif PAGE_SIZE == 16384 5089 uint32_t old; 5090 5091 old = *bits; 5092 while (atomic_fcmpset_32(bits, &old, newbits) == 0); 5093 return (old); 5094 #elif (PAGE_SIZE == 8192) && defined(atomic_fcmpset_16) 5095 uint16_t old; 5096 5097 old = *bits; 5098 while (atomic_fcmpset_16(bits, &old, newbits) == 0); 5099 return (old); 5100 #elif (PAGE_SIZE == 4096) && defined(atomic_fcmpset_8) 5101 uint8_t old; 5102 5103 old = *bits; 5104 while (atomic_fcmpset_8(bits, &old, newbits) == 0); 5105 return (old); 5106 #else /* PAGE_SIZE <= 4096*/ 5107 uintptr_t addr; 5108 uint32_t old, new, mask; 5109 int shift; 5110 5111 addr = (uintptr_t)bits; 5112 /* 5113 * Use a trick to perform a 32-bit atomic on the 5114 * containing aligned word, to not depend on the existence 5115 * of atomic_{set, swap, clear}_{8, 16}. 5116 */ 5117 shift = addr & (sizeof(uint32_t) - 1); 5118 #if BYTE_ORDER == BIG_ENDIAN 5119 shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY; 5120 #else 5121 shift *= NBBY; 5122 #endif 5123 addr &= ~(sizeof(uint32_t) - 1); 5124 mask = VM_PAGE_BITS_ALL << shift; 5125 5126 old = *bits; 5127 do { 5128 new = old & ~mask; 5129 new |= newbits << shift; 5130 } while (atomic_fcmpset_32((uint32_t *)addr, &old, new) == 0); 5131 return (old >> shift); 5132 #endif /* PAGE_SIZE */ 5133 } 5134 5135 /* 5136 * vm_page_set_valid_range: 5137 * 5138 * Sets portions of a page valid. The arguments are expected 5139 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 5140 * of any partial chunks touched by the range. The invalid portion of 5141 * such chunks will be zeroed. 5142 * 5143 * (base + size) must be less then or equal to PAGE_SIZE. 5144 */ 5145 void 5146 vm_page_set_valid_range(vm_page_t m, int base, int size) 5147 { 5148 int endoff, frag; 5149 vm_page_bits_t pagebits; 5150 5151 vm_page_assert_busied(m); 5152 if (size == 0) /* handle degenerate case */ 5153 return; 5154 5155 /* 5156 * If the base is not DEV_BSIZE aligned and the valid 5157 * bit is clear, we have to zero out a portion of the 5158 * first block. 5159 */ 5160 if ((frag = rounddown2(base, DEV_BSIZE)) != base && 5161 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0) 5162 pmap_zero_page_area(m, frag, base - frag); 5163 5164 /* 5165 * If the ending offset is not DEV_BSIZE aligned and the 5166 * valid bit is clear, we have to zero out a portion of 5167 * the last block. 5168 */ 5169 endoff = base + size; 5170 if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff && 5171 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0) 5172 pmap_zero_page_area(m, endoff, 5173 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 5174 5175 /* 5176 * Assert that no previously invalid block that is now being validated 5177 * is already dirty. 5178 */ 5179 KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0, 5180 ("vm_page_set_valid_range: page %p is dirty", m)); 5181 5182 /* 5183 * Set valid bits inclusive of any overlap. 5184 */ 5185 pagebits = vm_page_bits(base, size); 5186 if (vm_page_xbusied(m)) 5187 m->valid |= pagebits; 5188 else 5189 vm_page_bits_set(m, &m->valid, pagebits); 5190 } 5191 5192 /* 5193 * Set the page dirty bits and free the invalid swap space if 5194 * present. Returns the previous dirty bits. 5195 */ 5196 vm_page_bits_t 5197 vm_page_set_dirty(vm_page_t m) 5198 { 5199 vm_page_bits_t old; 5200 5201 VM_PAGE_OBJECT_BUSY_ASSERT(m); 5202 5203 if (vm_page_xbusied(m) && !pmap_page_is_write_mapped(m)) { 5204 old = m->dirty; 5205 m->dirty = VM_PAGE_BITS_ALL; 5206 } else 5207 old = vm_page_bits_swap(m, &m->dirty, VM_PAGE_BITS_ALL); 5208 if (old == 0 && (m->a.flags & PGA_SWAP_SPACE) != 0) 5209 vm_pager_page_unswapped(m); 5210 5211 return (old); 5212 } 5213 5214 /* 5215 * Clear the given bits from the specified page's dirty field. 5216 */ 5217 static __inline void 5218 vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits) 5219 { 5220 5221 vm_page_assert_busied(m); 5222 5223 /* 5224 * If the page is xbusied and not write mapped we are the 5225 * only thread that can modify dirty bits. Otherwise, The pmap 5226 * layer can call vm_page_dirty() without holding a distinguished 5227 * lock. The combination of page busy and atomic operations 5228 * suffice to guarantee consistency of the page dirty field. 5229 */ 5230 if (vm_page_xbusied(m) && !pmap_page_is_write_mapped(m)) 5231 m->dirty &= ~pagebits; 5232 else 5233 vm_page_bits_clear(m, &m->dirty, pagebits); 5234 } 5235 5236 /* 5237 * vm_page_set_validclean: 5238 * 5239 * Sets portions of a page valid and clean. The arguments are expected 5240 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 5241 * of any partial chunks touched by the range. The invalid portion of 5242 * such chunks will be zero'd. 5243 * 5244 * (base + size) must be less then or equal to PAGE_SIZE. 5245 */ 5246 void 5247 vm_page_set_validclean(vm_page_t m, int base, int size) 5248 { 5249 vm_page_bits_t oldvalid, pagebits; 5250 int endoff, frag; 5251 5252 vm_page_assert_busied(m); 5253 if (size == 0) /* handle degenerate case */ 5254 return; 5255 5256 /* 5257 * If the base is not DEV_BSIZE aligned and the valid 5258 * bit is clear, we have to zero out a portion of the 5259 * first block. 5260 */ 5261 if ((frag = rounddown2(base, DEV_BSIZE)) != base && 5262 (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0) 5263 pmap_zero_page_area(m, frag, base - frag); 5264 5265 /* 5266 * If the ending offset is not DEV_BSIZE aligned and the 5267 * valid bit is clear, we have to zero out a portion of 5268 * the last block. 5269 */ 5270 endoff = base + size; 5271 if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff && 5272 (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0) 5273 pmap_zero_page_area(m, endoff, 5274 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 5275 5276 /* 5277 * Set valid, clear dirty bits. If validating the entire 5278 * page we can safely clear the pmap modify bit. We also 5279 * use this opportunity to clear the PGA_NOSYNC flag. If a process 5280 * takes a write fault on a MAP_NOSYNC memory area the flag will 5281 * be set again. 5282 * 5283 * We set valid bits inclusive of any overlap, but we can only 5284 * clear dirty bits for DEV_BSIZE chunks that are fully within 5285 * the range. 5286 */ 5287 oldvalid = m->valid; 5288 pagebits = vm_page_bits(base, size); 5289 if (vm_page_xbusied(m)) 5290 m->valid |= pagebits; 5291 else 5292 vm_page_bits_set(m, &m->valid, pagebits); 5293 #if 0 /* NOT YET */ 5294 if ((frag = base & (DEV_BSIZE - 1)) != 0) { 5295 frag = DEV_BSIZE - frag; 5296 base += frag; 5297 size -= frag; 5298 if (size < 0) 5299 size = 0; 5300 } 5301 pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1)); 5302 #endif 5303 if (base == 0 && size == PAGE_SIZE) { 5304 /* 5305 * The page can only be modified within the pmap if it is 5306 * mapped, and it can only be mapped if it was previously 5307 * fully valid. 5308 */ 5309 if (oldvalid == VM_PAGE_BITS_ALL) 5310 /* 5311 * Perform the pmap_clear_modify() first. Otherwise, 5312 * a concurrent pmap operation, such as 5313 * pmap_protect(), could clear a modification in the 5314 * pmap and set the dirty field on the page before 5315 * pmap_clear_modify() had begun and after the dirty 5316 * field was cleared here. 5317 */ 5318 pmap_clear_modify(m); 5319 m->dirty = 0; 5320 vm_page_aflag_clear(m, PGA_NOSYNC); 5321 } else if (oldvalid != VM_PAGE_BITS_ALL && vm_page_xbusied(m)) 5322 m->dirty &= ~pagebits; 5323 else 5324 vm_page_clear_dirty_mask(m, pagebits); 5325 } 5326 5327 void 5328 vm_page_clear_dirty(vm_page_t m, int base, int size) 5329 { 5330 5331 vm_page_clear_dirty_mask(m, vm_page_bits(base, size)); 5332 } 5333 5334 /* 5335 * vm_page_set_invalid: 5336 * 5337 * Invalidates DEV_BSIZE'd chunks within a page. Both the 5338 * valid and dirty bits for the effected areas are cleared. 5339 */ 5340 void 5341 vm_page_set_invalid(vm_page_t m, int base, int size) 5342 { 5343 vm_page_bits_t bits; 5344 vm_object_t object; 5345 5346 /* 5347 * The object lock is required so that pages can't be mapped 5348 * read-only while we're in the process of invalidating them. 5349 */ 5350 object = m->object; 5351 VM_OBJECT_ASSERT_WLOCKED(object); 5352 vm_page_assert_busied(m); 5353 5354 if (object->type == OBJT_VNODE && base == 0 && IDX_TO_OFF(m->pindex) + 5355 size >= object->un_pager.vnp.vnp_size) 5356 bits = VM_PAGE_BITS_ALL; 5357 else 5358 bits = vm_page_bits(base, size); 5359 if (object->ref_count != 0 && vm_page_all_valid(m) && bits != 0) 5360 pmap_remove_all(m); 5361 KASSERT((bits == 0 && vm_page_all_valid(m)) || 5362 !pmap_page_is_mapped(m), 5363 ("vm_page_set_invalid: page %p is mapped", m)); 5364 if (vm_page_xbusied(m)) { 5365 m->valid &= ~bits; 5366 m->dirty &= ~bits; 5367 } else { 5368 vm_page_bits_clear(m, &m->valid, bits); 5369 vm_page_bits_clear(m, &m->dirty, bits); 5370 } 5371 } 5372 5373 /* 5374 * vm_page_invalid: 5375 * 5376 * Invalidates the entire page. The page must be busy, unmapped, and 5377 * the enclosing object must be locked. The object locks protects 5378 * against concurrent read-only pmap enter which is done without 5379 * busy. 5380 */ 5381 void 5382 vm_page_invalid(vm_page_t m) 5383 { 5384 5385 vm_page_assert_busied(m); 5386 VM_OBJECT_ASSERT_WLOCKED(m->object); 5387 MPASS(!pmap_page_is_mapped(m)); 5388 5389 if (vm_page_xbusied(m)) 5390 m->valid = 0; 5391 else 5392 vm_page_bits_clear(m, &m->valid, VM_PAGE_BITS_ALL); 5393 } 5394 5395 /* 5396 * vm_page_zero_invalid() 5397 * 5398 * The kernel assumes that the invalid portions of a page contain 5399 * garbage, but such pages can be mapped into memory by user code. 5400 * When this occurs, we must zero out the non-valid portions of the 5401 * page so user code sees what it expects. 5402 * 5403 * Pages are most often semi-valid when the end of a file is mapped 5404 * into memory and the file's size is not page aligned. 5405 */ 5406 void 5407 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) 5408 { 5409 int b; 5410 int i; 5411 5412 /* 5413 * Scan the valid bits looking for invalid sections that 5414 * must be zeroed. Invalid sub-DEV_BSIZE'd areas ( where the 5415 * valid bit may be set ) have already been zeroed by 5416 * vm_page_set_validclean(). 5417 */ 5418 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { 5419 if (i == (PAGE_SIZE / DEV_BSIZE) || 5420 (m->valid & ((vm_page_bits_t)1 << i))) { 5421 if (i > b) { 5422 pmap_zero_page_area(m, 5423 b << DEV_BSHIFT, (i - b) << DEV_BSHIFT); 5424 } 5425 b = i + 1; 5426 } 5427 } 5428 5429 /* 5430 * setvalid is TRUE when we can safely set the zero'd areas 5431 * as being valid. We can do this if there are no cache consistency 5432 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. 5433 */ 5434 if (setvalid) 5435 vm_page_valid(m); 5436 } 5437 5438 /* 5439 * vm_page_is_valid: 5440 * 5441 * Is (partial) page valid? Note that the case where size == 0 5442 * will return FALSE in the degenerate case where the page is 5443 * entirely invalid, and TRUE otherwise. 5444 * 5445 * Some callers envoke this routine without the busy lock held and 5446 * handle races via higher level locks. Typical callers should 5447 * hold a busy lock to prevent invalidation. 5448 */ 5449 int 5450 vm_page_is_valid(vm_page_t m, int base, int size) 5451 { 5452 vm_page_bits_t bits; 5453 5454 bits = vm_page_bits(base, size); 5455 return (vm_page_any_valid(m) && (m->valid & bits) == bits); 5456 } 5457 5458 /* 5459 * Returns true if all of the specified predicates are true for the entire 5460 * (super)page and false otherwise. 5461 */ 5462 bool 5463 vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m) 5464 { 5465 vm_object_t object; 5466 int i, npages; 5467 5468 object = m->object; 5469 if (skip_m != NULL && skip_m->object != object) 5470 return (false); 5471 VM_OBJECT_ASSERT_LOCKED(object); 5472 npages = atop(pagesizes[m->psind]); 5473 5474 /* 5475 * The physically contiguous pages that make up a superpage, i.e., a 5476 * page with a page size index ("psind") greater than zero, will 5477 * occupy adjacent entries in vm_page_array[]. 5478 */ 5479 for (i = 0; i < npages; i++) { 5480 /* Always test object consistency, including "skip_m". */ 5481 if (m[i].object != object) 5482 return (false); 5483 if (&m[i] == skip_m) 5484 continue; 5485 if ((flags & PS_NONE_BUSY) != 0 && vm_page_busied(&m[i])) 5486 return (false); 5487 if ((flags & PS_ALL_DIRTY) != 0) { 5488 /* 5489 * Calling vm_page_test_dirty() or pmap_is_modified() 5490 * might stop this case from spuriously returning 5491 * "false". However, that would require a write lock 5492 * on the object containing "m[i]". 5493 */ 5494 if (m[i].dirty != VM_PAGE_BITS_ALL) 5495 return (false); 5496 } 5497 if ((flags & PS_ALL_VALID) != 0 && 5498 m[i].valid != VM_PAGE_BITS_ALL) 5499 return (false); 5500 } 5501 return (true); 5502 } 5503 5504 /* 5505 * Set the page's dirty bits if the page is modified. 5506 */ 5507 void 5508 vm_page_test_dirty(vm_page_t m) 5509 { 5510 5511 vm_page_assert_busied(m); 5512 if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m)) 5513 vm_page_dirty(m); 5514 } 5515 5516 void 5517 vm_page_valid(vm_page_t m) 5518 { 5519 5520 vm_page_assert_busied(m); 5521 if (vm_page_xbusied(m)) 5522 m->valid = VM_PAGE_BITS_ALL; 5523 else 5524 vm_page_bits_set(m, &m->valid, VM_PAGE_BITS_ALL); 5525 } 5526 5527 void 5528 vm_page_lock_KBI(vm_page_t m, const char *file, int line) 5529 { 5530 5531 mtx_lock_flags_(vm_page_lockptr(m), 0, file, line); 5532 } 5533 5534 void 5535 vm_page_unlock_KBI(vm_page_t m, const char *file, int line) 5536 { 5537 5538 mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line); 5539 } 5540 5541 int 5542 vm_page_trylock_KBI(vm_page_t m, const char *file, int line) 5543 { 5544 5545 return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line)); 5546 } 5547 5548 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 5549 void 5550 vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line) 5551 { 5552 5553 vm_page_lock_assert_KBI(m, MA_OWNED, file, line); 5554 } 5555 5556 void 5557 vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line) 5558 { 5559 5560 mtx_assert_(vm_page_lockptr(m), a, file, line); 5561 } 5562 #endif 5563 5564 #ifdef INVARIANTS 5565 void 5566 vm_page_object_busy_assert(vm_page_t m) 5567 { 5568 5569 /* 5570 * Certain of the page's fields may only be modified by the 5571 * holder of a page or object busy. 5572 */ 5573 if (m->object != NULL && !vm_page_busied(m)) 5574 VM_OBJECT_ASSERT_BUSY(m->object); 5575 } 5576 5577 void 5578 vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits) 5579 { 5580 5581 if ((bits & PGA_WRITEABLE) == 0) 5582 return; 5583 5584 /* 5585 * The PGA_WRITEABLE flag can only be set if the page is 5586 * managed, is exclusively busied or the object is locked. 5587 * Currently, this flag is only set by pmap_enter(). 5588 */ 5589 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5590 ("PGA_WRITEABLE on unmanaged page")); 5591 if (!vm_page_xbusied(m)) 5592 VM_OBJECT_ASSERT_BUSY(m->object); 5593 } 5594 #endif 5595 5596 #include "opt_ddb.h" 5597 #ifdef DDB 5598 #include <sys/kernel.h> 5599 5600 #include <ddb/ddb.h> 5601 5602 DB_SHOW_COMMAND_FLAGS(page, vm_page_print_page_info, DB_CMD_MEMSAFE) 5603 { 5604 5605 db_printf("vm_cnt.v_free_count: %d\n", vm_free_count()); 5606 db_printf("vm_cnt.v_inactive_count: %d\n", vm_inactive_count()); 5607 db_printf("vm_cnt.v_active_count: %d\n", vm_active_count()); 5608 db_printf("vm_cnt.v_laundry_count: %d\n", vm_laundry_count()); 5609 db_printf("vm_cnt.v_wire_count: %d\n", vm_wire_count()); 5610 db_printf("vm_cnt.v_free_reserved: %d\n", vm_cnt.v_free_reserved); 5611 db_printf("vm_cnt.v_free_min: %d\n", vm_cnt.v_free_min); 5612 db_printf("vm_cnt.v_free_target: %d\n", vm_cnt.v_free_target); 5613 db_printf("vm_cnt.v_inactive_target: %d\n", vm_cnt.v_inactive_target); 5614 } 5615 5616 DB_SHOW_COMMAND_FLAGS(pageq, vm_page_print_pageq_info, DB_CMD_MEMSAFE) 5617 { 5618 int dom; 5619 5620 db_printf("pq_free %d\n", vm_free_count()); 5621 for (dom = 0; dom < vm_ndomains; dom++) { 5622 db_printf( 5623 "dom %d page_cnt %d free %d pq_act %d pq_inact %d pq_laund %d pq_unsw %d\n", 5624 dom, 5625 vm_dom[dom].vmd_page_count, 5626 vm_dom[dom].vmd_free_count, 5627 vm_dom[dom].vmd_pagequeues[PQ_ACTIVE].pq_cnt, 5628 vm_dom[dom].vmd_pagequeues[PQ_INACTIVE].pq_cnt, 5629 vm_dom[dom].vmd_pagequeues[PQ_LAUNDRY].pq_cnt, 5630 vm_dom[dom].vmd_pagequeues[PQ_UNSWAPPABLE].pq_cnt); 5631 } 5632 } 5633 5634 DB_SHOW_COMMAND(pginfo, vm_page_print_pginfo) 5635 { 5636 vm_page_t m; 5637 boolean_t phys, virt; 5638 5639 if (!have_addr) { 5640 db_printf("show pginfo addr\n"); 5641 return; 5642 } 5643 5644 phys = strchr(modif, 'p') != NULL; 5645 virt = strchr(modif, 'v') != NULL; 5646 if (virt) 5647 m = PHYS_TO_VM_PAGE(pmap_kextract(addr)); 5648 else if (phys) 5649 m = PHYS_TO_VM_PAGE(addr); 5650 else 5651 m = (vm_page_t)addr; 5652 db_printf( 5653 "page %p obj %p pidx 0x%jx phys 0x%jx q %d ref 0x%x\n" 5654 " af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n", 5655 m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr, 5656 m->a.queue, m->ref_count, m->a.flags, m->oflags, 5657 m->flags, m->a.act_count, m->busy_lock, m->valid, m->dirty); 5658 } 5659 #endif /* DDB */ 5660