1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991 Regents of the University of California. 5 * All rights reserved. 6 * Copyright (c) 1998 Matthew Dillon. All Rights Reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * The Mach Operating System project at Carnegie-Mellon University. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 36 */ 37 38 /*- 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 */ 64 65 /* 66 * GENERAL RULES ON VM_PAGE MANIPULATION 67 * 68 * - A page queue lock is required when adding or removing a page from a 69 * page queue regardless of other locks or the busy state of a page. 70 * 71 * * In general, no thread besides the page daemon can acquire or 72 * hold more than one page queue lock at a time. 73 * 74 * * The page daemon can acquire and hold any pair of page queue 75 * locks in any order. 76 * 77 * - The object lock is required when inserting or removing 78 * pages from an object (vm_page_insert() or vm_page_remove()). 79 * 80 */ 81 82 /* 83 * Resident memory management module. 84 */ 85 86 #include <sys/cdefs.h> 87 __FBSDID("$FreeBSD$"); 88 89 #include "opt_vm.h" 90 91 #include <sys/param.h> 92 #include <sys/systm.h> 93 #include <sys/lock.h> 94 #include <sys/domainset.h> 95 #include <sys/kernel.h> 96 #include <sys/limits.h> 97 #include <sys/linker.h> 98 #include <sys/malloc.h> 99 #include <sys/mman.h> 100 #include <sys/msgbuf.h> 101 #include <sys/mutex.h> 102 #include <sys/proc.h> 103 #include <sys/rwlock.h> 104 #include <sys/sbuf.h> 105 #include <sys/sched.h> 106 #include <sys/smp.h> 107 #include <sys/sysctl.h> 108 #include <sys/vmmeter.h> 109 #include <sys/vnode.h> 110 111 #include <vm/vm.h> 112 #include <vm/pmap.h> 113 #include <vm/vm_param.h> 114 #include <vm/vm_domainset.h> 115 #include <vm/vm_kern.h> 116 #include <vm/vm_map.h> 117 #include <vm/vm_object.h> 118 #include <vm/vm_page.h> 119 #include <vm/vm_pageout.h> 120 #include <vm/vm_phys.h> 121 #include <vm/vm_pagequeue.h> 122 #include <vm/vm_pager.h> 123 #include <vm/vm_radix.h> 124 #include <vm/vm_reserv.h> 125 #include <vm/vm_extern.h> 126 #include <vm/uma.h> 127 #include <vm/uma_int.h> 128 129 #include <machine/md_var.h> 130 131 extern int uma_startup_count(int); 132 extern void uma_startup(void *, int); 133 extern int vmem_startup_count(void); 134 135 struct vm_domain vm_dom[MAXMEMDOM]; 136 137 DPCPU_DEFINE_STATIC(struct vm_batchqueue, pqbatch[MAXMEMDOM][PQ_COUNT]); 138 139 struct mtx_padalign __exclusive_cache_line pa_lock[PA_LOCK_COUNT]; 140 141 struct mtx_padalign __exclusive_cache_line vm_domainset_lock; 142 /* The following fields are protected by the domainset lock. */ 143 domainset_t __exclusive_cache_line vm_min_domains; 144 domainset_t __exclusive_cache_line vm_severe_domains; 145 static int vm_min_waiters; 146 static int vm_severe_waiters; 147 static int vm_pageproc_waiters; 148 149 /* 150 * bogus page -- for I/O to/from partially complete buffers, 151 * or for paging into sparsely invalid regions. 152 */ 153 vm_page_t bogus_page; 154 155 vm_page_t vm_page_array; 156 long vm_page_array_size; 157 long first_page; 158 159 static int boot_pages; 160 SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 161 &boot_pages, 0, 162 "number of pages allocated for bootstrapping the VM system"); 163 164 static int pa_tryrelock_restart; 165 SYSCTL_INT(_vm, OID_AUTO, tryrelock_restart, CTLFLAG_RD, 166 &pa_tryrelock_restart, 0, "Number of tryrelock restarts"); 167 168 static TAILQ_HEAD(, vm_page) blacklist_head; 169 static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS); 170 SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD | 171 CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages"); 172 173 static uma_zone_t fakepg_zone; 174 175 static void vm_page_alloc_check(vm_page_t m); 176 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits); 177 static void vm_page_dequeue_complete(vm_page_t m); 178 static void vm_page_enqueue(vm_page_t m, uint8_t queue); 179 static void vm_page_init(void *dummy); 180 static int vm_page_insert_after(vm_page_t m, vm_object_t object, 181 vm_pindex_t pindex, vm_page_t mpred); 182 static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object, 183 vm_page_t mpred); 184 static int vm_page_reclaim_run(int req_class, int domain, u_long npages, 185 vm_page_t m_run, vm_paddr_t high); 186 static int vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, 187 int req); 188 static int vm_page_import(void *arg, void **store, int cnt, int domain, 189 int flags); 190 static void vm_page_release(void *arg, void **store, int cnt); 191 192 SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init, NULL); 193 194 static void 195 vm_page_init(void *dummy) 196 { 197 198 fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL, 199 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM); 200 bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | 201 VM_ALLOC_NORMAL | VM_ALLOC_WIRED); 202 } 203 204 /* 205 * The cache page zone is initialized later since we need to be able to allocate 206 * pages before UMA is fully initialized. 207 */ 208 static void 209 vm_page_init_cache_zones(void *dummy __unused) 210 { 211 struct vm_domain *vmd; 212 int i; 213 214 for (i = 0; i < vm_ndomains; i++) { 215 vmd = VM_DOMAIN(i); 216 /* 217 * Don't allow the page cache to take up more than .25% of 218 * memory. 219 */ 220 if (vmd->vmd_page_count / 400 < 256 * mp_ncpus) 221 continue; 222 vmd->vmd_pgcache = uma_zcache_create("vm pgcache", 223 sizeof(struct vm_page), NULL, NULL, NULL, NULL, 224 vm_page_import, vm_page_release, vmd, 225 UMA_ZONE_MAXBUCKET | UMA_ZONE_VM); 226 (void )uma_zone_set_maxcache(vmd->vmd_pgcache, 0); 227 } 228 } 229 SYSINIT(vm_page2, SI_SUB_VM_CONF, SI_ORDER_ANY, vm_page_init_cache_zones, NULL); 230 231 /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */ 232 #if PAGE_SIZE == 32768 233 #ifdef CTASSERT 234 CTASSERT(sizeof(u_long) >= 8); 235 #endif 236 #endif 237 238 /* 239 * Try to acquire a physical address lock while a pmap is locked. If we 240 * fail to trylock we unlock and lock the pmap directly and cache the 241 * locked pa in *locked. The caller should then restart their loop in case 242 * the virtual to physical mapping has changed. 243 */ 244 int 245 vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked) 246 { 247 vm_paddr_t lockpa; 248 249 lockpa = *locked; 250 *locked = pa; 251 if (lockpa) { 252 PA_LOCK_ASSERT(lockpa, MA_OWNED); 253 if (PA_LOCKPTR(pa) == PA_LOCKPTR(lockpa)) 254 return (0); 255 PA_UNLOCK(lockpa); 256 } 257 if (PA_TRYLOCK(pa)) 258 return (0); 259 PMAP_UNLOCK(pmap); 260 atomic_add_int(&pa_tryrelock_restart, 1); 261 PA_LOCK(pa); 262 PMAP_LOCK(pmap); 263 return (EAGAIN); 264 } 265 266 /* 267 * vm_set_page_size: 268 * 269 * Sets the page size, perhaps based upon the memory 270 * size. Must be called before any use of page-size 271 * dependent functions. 272 */ 273 void 274 vm_set_page_size(void) 275 { 276 if (vm_cnt.v_page_size == 0) 277 vm_cnt.v_page_size = PAGE_SIZE; 278 if (((vm_cnt.v_page_size - 1) & vm_cnt.v_page_size) != 0) 279 panic("vm_set_page_size: page size not a power of two"); 280 } 281 282 /* 283 * vm_page_blacklist_next: 284 * 285 * Find the next entry in the provided string of blacklist 286 * addresses. Entries are separated by space, comma, or newline. 287 * If an invalid integer is encountered then the rest of the 288 * string is skipped. Updates the list pointer to the next 289 * character, or NULL if the string is exhausted or invalid. 290 */ 291 static vm_paddr_t 292 vm_page_blacklist_next(char **list, char *end) 293 { 294 vm_paddr_t bad; 295 char *cp, *pos; 296 297 if (list == NULL || *list == NULL) 298 return (0); 299 if (**list =='\0') { 300 *list = NULL; 301 return (0); 302 } 303 304 /* 305 * If there's no end pointer then the buffer is coming from 306 * the kenv and we know it's null-terminated. 307 */ 308 if (end == NULL) 309 end = *list + strlen(*list); 310 311 /* Ensure that strtoq() won't walk off the end */ 312 if (*end != '\0') { 313 if (*end == '\n' || *end == ' ' || *end == ',') 314 *end = '\0'; 315 else { 316 printf("Blacklist not terminated, skipping\n"); 317 *list = NULL; 318 return (0); 319 } 320 } 321 322 for (pos = *list; *pos != '\0'; pos = cp) { 323 bad = strtoq(pos, &cp, 0); 324 if (*cp == '\0' || *cp == ' ' || *cp == ',' || *cp == '\n') { 325 if (bad == 0) { 326 if (++cp < end) 327 continue; 328 else 329 break; 330 } 331 } else 332 break; 333 if (*cp == '\0' || ++cp >= end) 334 *list = NULL; 335 else 336 *list = cp; 337 return (trunc_page(bad)); 338 } 339 printf("Garbage in RAM blacklist, skipping\n"); 340 *list = NULL; 341 return (0); 342 } 343 344 bool 345 vm_page_blacklist_add(vm_paddr_t pa, bool verbose) 346 { 347 struct vm_domain *vmd; 348 vm_page_t m; 349 int ret; 350 351 m = vm_phys_paddr_to_vm_page(pa); 352 if (m == NULL) 353 return (true); /* page does not exist, no failure */ 354 355 vmd = vm_pagequeue_domain(m); 356 vm_domain_free_lock(vmd); 357 ret = vm_phys_unfree_page(m); 358 vm_domain_free_unlock(vmd); 359 if (ret != 0) { 360 vm_domain_freecnt_inc(vmd, -1); 361 TAILQ_INSERT_TAIL(&blacklist_head, m, listq); 362 if (verbose) 363 printf("Skipping page with pa 0x%jx\n", (uintmax_t)pa); 364 } 365 return (ret); 366 } 367 368 /* 369 * vm_page_blacklist_check: 370 * 371 * Iterate through the provided string of blacklist addresses, pulling 372 * each entry out of the physical allocator free list and putting it 373 * onto a list for reporting via the vm.page_blacklist sysctl. 374 */ 375 static void 376 vm_page_blacklist_check(char *list, char *end) 377 { 378 vm_paddr_t pa; 379 char *next; 380 381 next = list; 382 while (next != NULL) { 383 if ((pa = vm_page_blacklist_next(&next, end)) == 0) 384 continue; 385 vm_page_blacklist_add(pa, bootverbose); 386 } 387 } 388 389 /* 390 * vm_page_blacklist_load: 391 * 392 * Search for a special module named "ram_blacklist". It'll be a 393 * plain text file provided by the user via the loader directive 394 * of the same name. 395 */ 396 static void 397 vm_page_blacklist_load(char **list, char **end) 398 { 399 void *mod; 400 u_char *ptr; 401 u_int len; 402 403 mod = NULL; 404 ptr = NULL; 405 406 mod = preload_search_by_type("ram_blacklist"); 407 if (mod != NULL) { 408 ptr = preload_fetch_addr(mod); 409 len = preload_fetch_size(mod); 410 } 411 *list = ptr; 412 if (ptr != NULL) 413 *end = ptr + len; 414 else 415 *end = NULL; 416 return; 417 } 418 419 static int 420 sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS) 421 { 422 vm_page_t m; 423 struct sbuf sbuf; 424 int error, first; 425 426 first = 1; 427 error = sysctl_wire_old_buffer(req, 0); 428 if (error != 0) 429 return (error); 430 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 431 TAILQ_FOREACH(m, &blacklist_head, listq) { 432 sbuf_printf(&sbuf, "%s%#jx", first ? "" : ",", 433 (uintmax_t)m->phys_addr); 434 first = 0; 435 } 436 error = sbuf_finish(&sbuf); 437 sbuf_delete(&sbuf); 438 return (error); 439 } 440 441 /* 442 * Initialize a dummy page for use in scans of the specified paging queue. 443 * In principle, this function only needs to set the flag PG_MARKER. 444 * Nonetheless, it write busies and initializes the hold count to one as 445 * safety precautions. 446 */ 447 static void 448 vm_page_init_marker(vm_page_t marker, int queue, uint8_t aflags) 449 { 450 451 bzero(marker, sizeof(*marker)); 452 marker->flags = PG_MARKER; 453 marker->aflags = aflags; 454 marker->busy_lock = VPB_SINGLE_EXCLUSIVER; 455 marker->queue = queue; 456 marker->hold_count = 1; 457 } 458 459 static void 460 vm_page_domain_init(int domain) 461 { 462 struct vm_domain *vmd; 463 struct vm_pagequeue *pq; 464 int i; 465 466 vmd = VM_DOMAIN(domain); 467 bzero(vmd, sizeof(*vmd)); 468 *__DECONST(char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) = 469 "vm inactive pagequeue"; 470 *__DECONST(char **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_name) = 471 "vm active pagequeue"; 472 *__DECONST(char **, &vmd->vmd_pagequeues[PQ_LAUNDRY].pq_name) = 473 "vm laundry pagequeue"; 474 *__DECONST(char **, &vmd->vmd_pagequeues[PQ_UNSWAPPABLE].pq_name) = 475 "vm unswappable pagequeue"; 476 vmd->vmd_domain = domain; 477 vmd->vmd_page_count = 0; 478 vmd->vmd_free_count = 0; 479 vmd->vmd_segs = 0; 480 vmd->vmd_oom = FALSE; 481 for (i = 0; i < PQ_COUNT; i++) { 482 pq = &vmd->vmd_pagequeues[i]; 483 TAILQ_INIT(&pq->pq_pl); 484 mtx_init(&pq->pq_mutex, pq->pq_name, "vm pagequeue", 485 MTX_DEF | MTX_DUPOK); 486 pq->pq_pdpages = 0; 487 vm_page_init_marker(&vmd->vmd_markers[i], i, 0); 488 } 489 mtx_init(&vmd->vmd_free_mtx, "vm page free queue", NULL, MTX_DEF); 490 mtx_init(&vmd->vmd_pageout_mtx, "vm pageout lock", NULL, MTX_DEF); 491 snprintf(vmd->vmd_name, sizeof(vmd->vmd_name), "%d", domain); 492 493 /* 494 * inacthead is used to provide FIFO ordering for LRU-bypassing 495 * insertions. 496 */ 497 vm_page_init_marker(&vmd->vmd_inacthead, PQ_INACTIVE, PGA_ENQUEUED); 498 TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_INACTIVE].pq_pl, 499 &vmd->vmd_inacthead, plinks.q); 500 501 /* 502 * The clock pages are used to implement active queue scanning without 503 * requeues. Scans start at clock[0], which is advanced after the scan 504 * ends. When the two clock hands meet, they are reset and scanning 505 * resumes from the head of the queue. 506 */ 507 vm_page_init_marker(&vmd->vmd_clock[0], PQ_ACTIVE, PGA_ENQUEUED); 508 vm_page_init_marker(&vmd->vmd_clock[1], PQ_ACTIVE, PGA_ENQUEUED); 509 TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl, 510 &vmd->vmd_clock[0], plinks.q); 511 TAILQ_INSERT_TAIL(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl, 512 &vmd->vmd_clock[1], plinks.q); 513 } 514 515 /* 516 * Initialize a physical page in preparation for adding it to the free 517 * lists. 518 */ 519 static void 520 vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind) 521 { 522 523 m->object = NULL; 524 m->wire_count = 0; 525 m->busy_lock = VPB_UNBUSIED; 526 m->hold_count = 0; 527 m->flags = m->aflags = 0; 528 m->phys_addr = pa; 529 m->queue = PQ_NONE; 530 m->psind = 0; 531 m->segind = segind; 532 m->order = VM_NFREEORDER; 533 m->pool = VM_FREEPOOL_DEFAULT; 534 m->valid = m->dirty = 0; 535 pmap_page_init(m); 536 } 537 538 /* 539 * vm_page_startup: 540 * 541 * Initializes the resident memory module. Allocates physical memory for 542 * bootstrapping UMA and some data structures that are used to manage 543 * physical pages. Initializes these structures, and populates the free 544 * page queues. 545 */ 546 vm_offset_t 547 vm_page_startup(vm_offset_t vaddr) 548 { 549 struct vm_phys_seg *seg; 550 vm_page_t m; 551 char *list, *listend; 552 vm_offset_t mapped; 553 vm_paddr_t end, high_avail, low_avail, new_end, page_range, size; 554 vm_paddr_t biggestsize, last_pa, pa; 555 u_long pagecount; 556 int biggestone, i, segind; 557 #ifdef WITNESS 558 int witness_size; 559 #endif 560 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE) 561 long ii; 562 #endif 563 564 biggestsize = 0; 565 biggestone = 0; 566 vaddr = round_page(vaddr); 567 568 for (i = 0; phys_avail[i + 1]; i += 2) { 569 phys_avail[i] = round_page(phys_avail[i]); 570 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 571 } 572 for (i = 0; phys_avail[i + 1]; i += 2) { 573 size = phys_avail[i + 1] - phys_avail[i]; 574 if (size > biggestsize) { 575 biggestone = i; 576 biggestsize = size; 577 } 578 } 579 580 end = phys_avail[biggestone+1]; 581 582 /* 583 * Initialize the page and queue locks. 584 */ 585 mtx_init(&vm_domainset_lock, "vm domainset lock", NULL, MTX_DEF); 586 for (i = 0; i < PA_LOCK_COUNT; i++) 587 mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF); 588 for (i = 0; i < vm_ndomains; i++) 589 vm_page_domain_init(i); 590 591 /* 592 * Allocate memory for use when boot strapping the kernel memory 593 * allocator. Tell UMA how many zones we are going to create 594 * before going fully functional. UMA will add its zones. 595 * 596 * VM startup zones: vmem, vmem_btag, VM OBJECT, RADIX NODE, MAP, 597 * KMAP ENTRY, MAP ENTRY, VMSPACE. 598 */ 599 boot_pages = uma_startup_count(8); 600 601 #ifndef UMA_MD_SMALL_ALLOC 602 /* vmem_startup() calls uma_prealloc(). */ 603 boot_pages += vmem_startup_count(); 604 /* vm_map_startup() calls uma_prealloc(). */ 605 boot_pages += howmany(MAX_KMAP, 606 UMA_SLAB_SPACE / sizeof(struct vm_map)); 607 608 /* 609 * Before going fully functional kmem_init() does allocation 610 * from "KMAP ENTRY" and vmem_create() does allocation from "vmem". 611 */ 612 boot_pages += 2; 613 #endif 614 /* 615 * CTFLAG_RDTUN doesn't work during the early boot process, so we must 616 * manually fetch the value. 617 */ 618 TUNABLE_INT_FETCH("vm.boot_pages", &boot_pages); 619 new_end = end - (boot_pages * UMA_SLAB_SIZE); 620 new_end = trunc_page(new_end); 621 mapped = pmap_map(&vaddr, new_end, end, 622 VM_PROT_READ | VM_PROT_WRITE); 623 bzero((void *)mapped, end - new_end); 624 uma_startup((void *)mapped, boot_pages); 625 626 #ifdef WITNESS 627 witness_size = round_page(witness_startup_count()); 628 new_end -= witness_size; 629 mapped = pmap_map(&vaddr, new_end, new_end + witness_size, 630 VM_PROT_READ | VM_PROT_WRITE); 631 bzero((void *)mapped, witness_size); 632 witness_startup((void *)mapped); 633 #endif 634 635 #if defined(__aarch64__) || defined(__amd64__) || defined(__arm__) || \ 636 defined(__i386__) || defined(__mips__) || defined(__riscv) 637 /* 638 * Allocate a bitmap to indicate that a random physical page 639 * needs to be included in a minidump. 640 * 641 * The amd64 port needs this to indicate which direct map pages 642 * need to be dumped, via calls to dump_add_page()/dump_drop_page(). 643 * 644 * However, i386 still needs this workspace internally within the 645 * minidump code. In theory, they are not needed on i386, but are 646 * included should the sf_buf code decide to use them. 647 */ 648 last_pa = 0; 649 for (i = 0; dump_avail[i + 1] != 0; i += 2) 650 if (dump_avail[i + 1] > last_pa) 651 last_pa = dump_avail[i + 1]; 652 page_range = last_pa / PAGE_SIZE; 653 vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY); 654 new_end -= vm_page_dump_size; 655 vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end, 656 new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE); 657 bzero((void *)vm_page_dump, vm_page_dump_size); 658 #else 659 (void)last_pa; 660 #endif 661 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \ 662 defined(__riscv) 663 /* 664 * Include the UMA bootstrap pages, witness pages and vm_page_dump 665 * in a crash dump. When pmap_map() uses the direct map, they are 666 * not automatically included. 667 */ 668 for (pa = new_end; pa < end; pa += PAGE_SIZE) 669 dump_add_page(pa); 670 #endif 671 phys_avail[biggestone + 1] = new_end; 672 #ifdef __amd64__ 673 /* 674 * Request that the physical pages underlying the message buffer be 675 * included in a crash dump. Since the message buffer is accessed 676 * through the direct map, they are not automatically included. 677 */ 678 pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr); 679 last_pa = pa + round_page(msgbufsize); 680 while (pa < last_pa) { 681 dump_add_page(pa); 682 pa += PAGE_SIZE; 683 } 684 #endif 685 /* 686 * Compute the number of pages of memory that will be available for 687 * use, taking into account the overhead of a page structure per page. 688 * In other words, solve 689 * "available physical memory" - round_page(page_range * 690 * sizeof(struct vm_page)) = page_range * PAGE_SIZE 691 * for page_range. 692 */ 693 low_avail = phys_avail[0]; 694 high_avail = phys_avail[1]; 695 for (i = 0; i < vm_phys_nsegs; i++) { 696 if (vm_phys_segs[i].start < low_avail) 697 low_avail = vm_phys_segs[i].start; 698 if (vm_phys_segs[i].end > high_avail) 699 high_avail = vm_phys_segs[i].end; 700 } 701 /* Skip the first chunk. It is already accounted for. */ 702 for (i = 2; phys_avail[i + 1] != 0; i += 2) { 703 if (phys_avail[i] < low_avail) 704 low_avail = phys_avail[i]; 705 if (phys_avail[i + 1] > high_avail) 706 high_avail = phys_avail[i + 1]; 707 } 708 first_page = low_avail / PAGE_SIZE; 709 #ifdef VM_PHYSSEG_SPARSE 710 size = 0; 711 for (i = 0; i < vm_phys_nsegs; i++) 712 size += vm_phys_segs[i].end - vm_phys_segs[i].start; 713 for (i = 0; phys_avail[i + 1] != 0; i += 2) 714 size += phys_avail[i + 1] - phys_avail[i]; 715 #elif defined(VM_PHYSSEG_DENSE) 716 size = high_avail - low_avail; 717 #else 718 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." 719 #endif 720 721 #ifdef VM_PHYSSEG_DENSE 722 /* 723 * In the VM_PHYSSEG_DENSE case, the number of pages can account for 724 * the overhead of a page structure per page only if vm_page_array is 725 * allocated from the last physical memory chunk. Otherwise, we must 726 * allocate page structures representing the physical memory 727 * underlying vm_page_array, even though they will not be used. 728 */ 729 if (new_end != high_avail) 730 page_range = size / PAGE_SIZE; 731 else 732 #endif 733 { 734 page_range = size / (PAGE_SIZE + sizeof(struct vm_page)); 735 736 /* 737 * If the partial bytes remaining are large enough for 738 * a page (PAGE_SIZE) without a corresponding 739 * 'struct vm_page', then new_end will contain an 740 * extra page after subtracting the length of the VM 741 * page array. Compensate by subtracting an extra 742 * page from new_end. 743 */ 744 if (size % (PAGE_SIZE + sizeof(struct vm_page)) >= PAGE_SIZE) { 745 if (new_end == high_avail) 746 high_avail -= PAGE_SIZE; 747 new_end -= PAGE_SIZE; 748 } 749 } 750 end = new_end; 751 752 /* 753 * Reserve an unmapped guard page to trap access to vm_page_array[-1]. 754 * However, because this page is allocated from KVM, out-of-bounds 755 * accesses using the direct map will not be trapped. 756 */ 757 vaddr += PAGE_SIZE; 758 759 /* 760 * Allocate physical memory for the page structures, and map it. 761 */ 762 new_end = trunc_page(end - page_range * sizeof(struct vm_page)); 763 mapped = pmap_map(&vaddr, new_end, end, 764 VM_PROT_READ | VM_PROT_WRITE); 765 vm_page_array = (vm_page_t)mapped; 766 vm_page_array_size = page_range; 767 768 #if VM_NRESERVLEVEL > 0 769 /* 770 * Allocate physical memory for the reservation management system's 771 * data structures, and map it. 772 */ 773 if (high_avail == end) 774 high_avail = new_end; 775 new_end = vm_reserv_startup(&vaddr, new_end, high_avail); 776 #endif 777 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \ 778 defined(__riscv) 779 /* 780 * Include vm_page_array and vm_reserv_array in a crash dump. 781 */ 782 for (pa = new_end; pa < end; pa += PAGE_SIZE) 783 dump_add_page(pa); 784 #endif 785 phys_avail[biggestone + 1] = new_end; 786 787 /* 788 * Add physical memory segments corresponding to the available 789 * physical pages. 790 */ 791 for (i = 0; phys_avail[i + 1] != 0; i += 2) 792 vm_phys_add_seg(phys_avail[i], phys_avail[i + 1]); 793 794 /* 795 * Initialize the physical memory allocator. 796 */ 797 vm_phys_init(); 798 799 /* 800 * Initialize the page structures and add every available page to the 801 * physical memory allocator's free lists. 802 */ 803 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE) 804 for (ii = 0; ii < vm_page_array_size; ii++) { 805 m = &vm_page_array[ii]; 806 vm_page_init_page(m, (first_page + ii) << PAGE_SHIFT, 0); 807 m->flags = PG_FICTITIOUS; 808 } 809 #endif 810 vm_cnt.v_page_count = 0; 811 for (segind = 0; segind < vm_phys_nsegs; segind++) { 812 seg = &vm_phys_segs[segind]; 813 for (m = seg->first_page, pa = seg->start; pa < seg->end; 814 m++, pa += PAGE_SIZE) 815 vm_page_init_page(m, pa, segind); 816 817 /* 818 * Add the segment to the free lists only if it is covered by 819 * one of the ranges in phys_avail. Because we've added the 820 * ranges to the vm_phys_segs array, we can assume that each 821 * segment is either entirely contained in one of the ranges, 822 * or doesn't overlap any of them. 823 */ 824 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 825 struct vm_domain *vmd; 826 827 if (seg->start < phys_avail[i] || 828 seg->end > phys_avail[i + 1]) 829 continue; 830 831 m = seg->first_page; 832 pagecount = (u_long)atop(seg->end - seg->start); 833 834 vmd = VM_DOMAIN(seg->domain); 835 vm_domain_free_lock(vmd); 836 vm_phys_free_contig(m, pagecount); 837 vm_domain_free_unlock(vmd); 838 vm_domain_freecnt_inc(vmd, pagecount); 839 vm_cnt.v_page_count += (u_int)pagecount; 840 841 vmd = VM_DOMAIN(seg->domain); 842 vmd->vmd_page_count += (u_int)pagecount; 843 vmd->vmd_segs |= 1UL << m->segind; 844 break; 845 } 846 } 847 848 /* 849 * Remove blacklisted pages from the physical memory allocator. 850 */ 851 TAILQ_INIT(&blacklist_head); 852 vm_page_blacklist_load(&list, &listend); 853 vm_page_blacklist_check(list, listend); 854 855 list = kern_getenv("vm.blacklist"); 856 vm_page_blacklist_check(list, NULL); 857 858 freeenv(list); 859 #if VM_NRESERVLEVEL > 0 860 /* 861 * Initialize the reservation management system. 862 */ 863 vm_reserv_init(); 864 #endif 865 866 return (vaddr); 867 } 868 869 void 870 vm_page_reference(vm_page_t m) 871 { 872 873 vm_page_aflag_set(m, PGA_REFERENCED); 874 } 875 876 /* 877 * vm_page_busy_downgrade: 878 * 879 * Downgrade an exclusive busy page into a single shared busy page. 880 */ 881 void 882 vm_page_busy_downgrade(vm_page_t m) 883 { 884 u_int x; 885 bool locked; 886 887 vm_page_assert_xbusied(m); 888 locked = mtx_owned(vm_page_lockptr(m)); 889 890 for (;;) { 891 x = m->busy_lock; 892 x &= VPB_BIT_WAITERS; 893 if (x != 0 && !locked) 894 vm_page_lock(m); 895 if (atomic_cmpset_rel_int(&m->busy_lock, 896 VPB_SINGLE_EXCLUSIVER | x, VPB_SHARERS_WORD(1))) 897 break; 898 if (x != 0 && !locked) 899 vm_page_unlock(m); 900 } 901 if (x != 0) { 902 wakeup(m); 903 if (!locked) 904 vm_page_unlock(m); 905 } 906 } 907 908 /* 909 * vm_page_sbusied: 910 * 911 * Return a positive value if the page is shared busied, 0 otherwise. 912 */ 913 int 914 vm_page_sbusied(vm_page_t m) 915 { 916 u_int x; 917 918 x = m->busy_lock; 919 return ((x & VPB_BIT_SHARED) != 0 && x != VPB_UNBUSIED); 920 } 921 922 /* 923 * vm_page_sunbusy: 924 * 925 * Shared unbusy a page. 926 */ 927 void 928 vm_page_sunbusy(vm_page_t m) 929 { 930 u_int x; 931 932 vm_page_lock_assert(m, MA_NOTOWNED); 933 vm_page_assert_sbusied(m); 934 935 for (;;) { 936 x = m->busy_lock; 937 if (VPB_SHARERS(x) > 1) { 938 if (atomic_cmpset_int(&m->busy_lock, x, 939 x - VPB_ONE_SHARER)) 940 break; 941 continue; 942 } 943 if ((x & VPB_BIT_WAITERS) == 0) { 944 KASSERT(x == VPB_SHARERS_WORD(1), 945 ("vm_page_sunbusy: invalid lock state")); 946 if (atomic_cmpset_int(&m->busy_lock, 947 VPB_SHARERS_WORD(1), VPB_UNBUSIED)) 948 break; 949 continue; 950 } 951 KASSERT(x == (VPB_SHARERS_WORD(1) | VPB_BIT_WAITERS), 952 ("vm_page_sunbusy: invalid lock state for waiters")); 953 954 vm_page_lock(m); 955 if (!atomic_cmpset_int(&m->busy_lock, x, VPB_UNBUSIED)) { 956 vm_page_unlock(m); 957 continue; 958 } 959 wakeup(m); 960 vm_page_unlock(m); 961 break; 962 } 963 } 964 965 /* 966 * vm_page_busy_sleep: 967 * 968 * Sleep and release the page lock, using the page pointer as wchan. 969 * This is used to implement the hard-path of busying mechanism. 970 * 971 * The given page must be locked. 972 * 973 * If nonshared is true, sleep only if the page is xbusy. 974 */ 975 void 976 vm_page_busy_sleep(vm_page_t m, const char *wmesg, bool nonshared) 977 { 978 u_int x; 979 980 vm_page_assert_locked(m); 981 982 x = m->busy_lock; 983 if (x == VPB_UNBUSIED || (nonshared && (x & VPB_BIT_SHARED) != 0) || 984 ((x & VPB_BIT_WAITERS) == 0 && 985 !atomic_cmpset_int(&m->busy_lock, x, x | VPB_BIT_WAITERS))) { 986 vm_page_unlock(m); 987 return; 988 } 989 msleep(m, vm_page_lockptr(m), PVM | PDROP, wmesg, 0); 990 } 991 992 /* 993 * vm_page_trysbusy: 994 * 995 * Try to shared busy a page. 996 * If the operation succeeds 1 is returned otherwise 0. 997 * The operation never sleeps. 998 */ 999 int 1000 vm_page_trysbusy(vm_page_t m) 1001 { 1002 u_int x; 1003 1004 for (;;) { 1005 x = m->busy_lock; 1006 if ((x & VPB_BIT_SHARED) == 0) 1007 return (0); 1008 if (atomic_cmpset_acq_int(&m->busy_lock, x, x + VPB_ONE_SHARER)) 1009 return (1); 1010 } 1011 } 1012 1013 static void 1014 vm_page_xunbusy_locked(vm_page_t m) 1015 { 1016 1017 vm_page_assert_xbusied(m); 1018 vm_page_assert_locked(m); 1019 1020 atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED); 1021 /* There is a waiter, do wakeup() instead of vm_page_flash(). */ 1022 wakeup(m); 1023 } 1024 1025 void 1026 vm_page_xunbusy_maybelocked(vm_page_t m) 1027 { 1028 bool lockacq; 1029 1030 vm_page_assert_xbusied(m); 1031 1032 /* 1033 * Fast path for unbusy. If it succeeds, we know that there 1034 * are no waiters, so we do not need a wakeup. 1035 */ 1036 if (atomic_cmpset_rel_int(&m->busy_lock, VPB_SINGLE_EXCLUSIVER, 1037 VPB_UNBUSIED)) 1038 return; 1039 1040 lockacq = !mtx_owned(vm_page_lockptr(m)); 1041 if (lockacq) 1042 vm_page_lock(m); 1043 vm_page_xunbusy_locked(m); 1044 if (lockacq) 1045 vm_page_unlock(m); 1046 } 1047 1048 /* 1049 * vm_page_xunbusy_hard: 1050 * 1051 * Called after the first try the exclusive unbusy of a page failed. 1052 * It is assumed that the waiters bit is on. 1053 */ 1054 void 1055 vm_page_xunbusy_hard(vm_page_t m) 1056 { 1057 1058 vm_page_assert_xbusied(m); 1059 1060 vm_page_lock(m); 1061 vm_page_xunbusy_locked(m); 1062 vm_page_unlock(m); 1063 } 1064 1065 /* 1066 * vm_page_flash: 1067 * 1068 * Wakeup anyone waiting for the page. 1069 * The ownership bits do not change. 1070 * 1071 * The given page must be locked. 1072 */ 1073 void 1074 vm_page_flash(vm_page_t m) 1075 { 1076 u_int x; 1077 1078 vm_page_lock_assert(m, MA_OWNED); 1079 1080 for (;;) { 1081 x = m->busy_lock; 1082 if ((x & VPB_BIT_WAITERS) == 0) 1083 return; 1084 if (atomic_cmpset_int(&m->busy_lock, x, 1085 x & (~VPB_BIT_WAITERS))) 1086 break; 1087 } 1088 wakeup(m); 1089 } 1090 1091 /* 1092 * Avoid releasing and reacquiring the same page lock. 1093 */ 1094 void 1095 vm_page_change_lock(vm_page_t m, struct mtx **mtx) 1096 { 1097 struct mtx *mtx1; 1098 1099 mtx1 = vm_page_lockptr(m); 1100 if (*mtx == mtx1) 1101 return; 1102 if (*mtx != NULL) 1103 mtx_unlock(*mtx); 1104 *mtx = mtx1; 1105 mtx_lock(mtx1); 1106 } 1107 1108 /* 1109 * Keep page from being freed by the page daemon 1110 * much of the same effect as wiring, except much lower 1111 * overhead and should be used only for *very* temporary 1112 * holding ("wiring"). 1113 */ 1114 void 1115 vm_page_hold(vm_page_t mem) 1116 { 1117 1118 vm_page_lock_assert(mem, MA_OWNED); 1119 mem->hold_count++; 1120 } 1121 1122 void 1123 vm_page_unhold(vm_page_t mem) 1124 { 1125 1126 vm_page_lock_assert(mem, MA_OWNED); 1127 KASSERT(mem->hold_count >= 1, ("vm_page_unhold: hold count < 0!!!")); 1128 --mem->hold_count; 1129 if (mem->hold_count == 0 && (mem->flags & PG_UNHOLDFREE) != 0) 1130 vm_page_free_toq(mem); 1131 } 1132 1133 /* 1134 * vm_page_unhold_pages: 1135 * 1136 * Unhold each of the pages that is referenced by the given array. 1137 */ 1138 void 1139 vm_page_unhold_pages(vm_page_t *ma, int count) 1140 { 1141 struct mtx *mtx; 1142 1143 mtx = NULL; 1144 for (; count != 0; count--) { 1145 vm_page_change_lock(*ma, &mtx); 1146 vm_page_unhold(*ma); 1147 ma++; 1148 } 1149 if (mtx != NULL) 1150 mtx_unlock(mtx); 1151 } 1152 1153 vm_page_t 1154 PHYS_TO_VM_PAGE(vm_paddr_t pa) 1155 { 1156 vm_page_t m; 1157 1158 #ifdef VM_PHYSSEG_SPARSE 1159 m = vm_phys_paddr_to_vm_page(pa); 1160 if (m == NULL) 1161 m = vm_phys_fictitious_to_vm_page(pa); 1162 return (m); 1163 #elif defined(VM_PHYSSEG_DENSE) 1164 long pi; 1165 1166 pi = atop(pa); 1167 if (pi >= first_page && (pi - first_page) < vm_page_array_size) { 1168 m = &vm_page_array[pi - first_page]; 1169 return (m); 1170 } 1171 return (vm_phys_fictitious_to_vm_page(pa)); 1172 #else 1173 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." 1174 #endif 1175 } 1176 1177 /* 1178 * vm_page_getfake: 1179 * 1180 * Create a fictitious page with the specified physical address and 1181 * memory attribute. The memory attribute is the only the machine- 1182 * dependent aspect of a fictitious page that must be initialized. 1183 */ 1184 vm_page_t 1185 vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr) 1186 { 1187 vm_page_t m; 1188 1189 m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO); 1190 vm_page_initfake(m, paddr, memattr); 1191 return (m); 1192 } 1193 1194 void 1195 vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) 1196 { 1197 1198 if ((m->flags & PG_FICTITIOUS) != 0) { 1199 /* 1200 * The page's memattr might have changed since the 1201 * previous initialization. Update the pmap to the 1202 * new memattr. 1203 */ 1204 goto memattr; 1205 } 1206 m->phys_addr = paddr; 1207 m->queue = PQ_NONE; 1208 /* Fictitious pages don't use "segind". */ 1209 m->flags = PG_FICTITIOUS; 1210 /* Fictitious pages don't use "order" or "pool". */ 1211 m->oflags = VPO_UNMANAGED; 1212 m->busy_lock = VPB_SINGLE_EXCLUSIVER; 1213 m->wire_count = 1; 1214 pmap_page_init(m); 1215 memattr: 1216 pmap_page_set_memattr(m, memattr); 1217 } 1218 1219 /* 1220 * vm_page_putfake: 1221 * 1222 * Release a fictitious page. 1223 */ 1224 void 1225 vm_page_putfake(vm_page_t m) 1226 { 1227 1228 KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m)); 1229 KASSERT((m->flags & PG_FICTITIOUS) != 0, 1230 ("vm_page_putfake: bad page %p", m)); 1231 uma_zfree(fakepg_zone, m); 1232 } 1233 1234 /* 1235 * vm_page_updatefake: 1236 * 1237 * Update the given fictitious page to the specified physical address and 1238 * memory attribute. 1239 */ 1240 void 1241 vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) 1242 { 1243 1244 KASSERT((m->flags & PG_FICTITIOUS) != 0, 1245 ("vm_page_updatefake: bad page %p", m)); 1246 m->phys_addr = paddr; 1247 pmap_page_set_memattr(m, memattr); 1248 } 1249 1250 /* 1251 * vm_page_free: 1252 * 1253 * Free a page. 1254 */ 1255 void 1256 vm_page_free(vm_page_t m) 1257 { 1258 1259 m->flags &= ~PG_ZERO; 1260 vm_page_free_toq(m); 1261 } 1262 1263 /* 1264 * vm_page_free_zero: 1265 * 1266 * Free a page to the zerod-pages queue 1267 */ 1268 void 1269 vm_page_free_zero(vm_page_t m) 1270 { 1271 1272 m->flags |= PG_ZERO; 1273 vm_page_free_toq(m); 1274 } 1275 1276 /* 1277 * Unbusy and handle the page queueing for a page from a getpages request that 1278 * was optionally read ahead or behind. 1279 */ 1280 void 1281 vm_page_readahead_finish(vm_page_t m) 1282 { 1283 1284 /* We shouldn't put invalid pages on queues. */ 1285 KASSERT(m->valid != 0, ("%s: %p is invalid", __func__, m)); 1286 1287 /* 1288 * Since the page is not the actually needed one, whether it should 1289 * be activated or deactivated is not obvious. Empirical results 1290 * have shown that deactivating the page is usually the best choice, 1291 * unless the page is wanted by another thread. 1292 */ 1293 vm_page_lock(m); 1294 if ((m->busy_lock & VPB_BIT_WAITERS) != 0) 1295 vm_page_activate(m); 1296 else 1297 vm_page_deactivate(m); 1298 vm_page_unlock(m); 1299 vm_page_xunbusy(m); 1300 } 1301 1302 /* 1303 * vm_page_sleep_if_busy: 1304 * 1305 * Sleep and release the page queues lock if the page is busied. 1306 * Returns TRUE if the thread slept. 1307 * 1308 * The given page must be unlocked and object containing it must 1309 * be locked. 1310 */ 1311 int 1312 vm_page_sleep_if_busy(vm_page_t m, const char *msg) 1313 { 1314 vm_object_t obj; 1315 1316 vm_page_lock_assert(m, MA_NOTOWNED); 1317 VM_OBJECT_ASSERT_WLOCKED(m->object); 1318 1319 if (vm_page_busied(m)) { 1320 /* 1321 * The page-specific object must be cached because page 1322 * identity can change during the sleep, causing the 1323 * re-lock of a different object. 1324 * It is assumed that a reference to the object is already 1325 * held by the callers. 1326 */ 1327 obj = m->object; 1328 vm_page_lock(m); 1329 VM_OBJECT_WUNLOCK(obj); 1330 vm_page_busy_sleep(m, msg, false); 1331 VM_OBJECT_WLOCK(obj); 1332 return (TRUE); 1333 } 1334 return (FALSE); 1335 } 1336 1337 /* 1338 * vm_page_dirty_KBI: [ internal use only ] 1339 * 1340 * Set all bits in the page's dirty field. 1341 * 1342 * The object containing the specified page must be locked if the 1343 * call is made from the machine-independent layer. 1344 * 1345 * See vm_page_clear_dirty_mask(). 1346 * 1347 * This function should only be called by vm_page_dirty(). 1348 */ 1349 void 1350 vm_page_dirty_KBI(vm_page_t m) 1351 { 1352 1353 /* Refer to this operation by its public name. */ 1354 KASSERT(m->valid == VM_PAGE_BITS_ALL, 1355 ("vm_page_dirty: page is invalid!")); 1356 m->dirty = VM_PAGE_BITS_ALL; 1357 } 1358 1359 /* 1360 * vm_page_insert: [ internal use only ] 1361 * 1362 * Inserts the given mem entry into the object and object list. 1363 * 1364 * The object must be locked. 1365 */ 1366 int 1367 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) 1368 { 1369 vm_page_t mpred; 1370 1371 VM_OBJECT_ASSERT_WLOCKED(object); 1372 mpred = vm_radix_lookup_le(&object->rtree, pindex); 1373 return (vm_page_insert_after(m, object, pindex, mpred)); 1374 } 1375 1376 /* 1377 * vm_page_insert_after: 1378 * 1379 * Inserts the page "m" into the specified object at offset "pindex". 1380 * 1381 * The page "mpred" must immediately precede the offset "pindex" within 1382 * the specified object. 1383 * 1384 * The object must be locked. 1385 */ 1386 static int 1387 vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex, 1388 vm_page_t mpred) 1389 { 1390 vm_page_t msucc; 1391 1392 VM_OBJECT_ASSERT_WLOCKED(object); 1393 KASSERT(m->object == NULL, 1394 ("vm_page_insert_after: page already inserted")); 1395 if (mpred != NULL) { 1396 KASSERT(mpred->object == object, 1397 ("vm_page_insert_after: object doesn't contain mpred")); 1398 KASSERT(mpred->pindex < pindex, 1399 ("vm_page_insert_after: mpred doesn't precede pindex")); 1400 msucc = TAILQ_NEXT(mpred, listq); 1401 } else 1402 msucc = TAILQ_FIRST(&object->memq); 1403 if (msucc != NULL) 1404 KASSERT(msucc->pindex > pindex, 1405 ("vm_page_insert_after: msucc doesn't succeed pindex")); 1406 1407 /* 1408 * Record the object/offset pair in this page 1409 */ 1410 m->object = object; 1411 m->pindex = pindex; 1412 1413 /* 1414 * Now link into the object's ordered list of backed pages. 1415 */ 1416 if (vm_radix_insert(&object->rtree, m)) { 1417 m->object = NULL; 1418 m->pindex = 0; 1419 return (1); 1420 } 1421 vm_page_insert_radixdone(m, object, mpred); 1422 return (0); 1423 } 1424 1425 /* 1426 * vm_page_insert_radixdone: 1427 * 1428 * Complete page "m" insertion into the specified object after the 1429 * radix trie hooking. 1430 * 1431 * The page "mpred" must precede the offset "m->pindex" within the 1432 * specified object. 1433 * 1434 * The object must be locked. 1435 */ 1436 static void 1437 vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred) 1438 { 1439 1440 VM_OBJECT_ASSERT_WLOCKED(object); 1441 KASSERT(object != NULL && m->object == object, 1442 ("vm_page_insert_radixdone: page %p has inconsistent object", m)); 1443 if (mpred != NULL) { 1444 KASSERT(mpred->object == object, 1445 ("vm_page_insert_after: object doesn't contain mpred")); 1446 KASSERT(mpred->pindex < m->pindex, 1447 ("vm_page_insert_after: mpred doesn't precede pindex")); 1448 } 1449 1450 if (mpred != NULL) 1451 TAILQ_INSERT_AFTER(&object->memq, mpred, m, listq); 1452 else 1453 TAILQ_INSERT_HEAD(&object->memq, m, listq); 1454 1455 /* 1456 * Show that the object has one more resident page. 1457 */ 1458 object->resident_page_count++; 1459 1460 /* 1461 * Hold the vnode until the last page is released. 1462 */ 1463 if (object->resident_page_count == 1 && object->type == OBJT_VNODE) 1464 vhold(object->handle); 1465 1466 /* 1467 * Since we are inserting a new and possibly dirty page, 1468 * update the object's OBJ_MIGHTBEDIRTY flag. 1469 */ 1470 if (pmap_page_is_write_mapped(m)) 1471 vm_object_set_writeable_dirty(object); 1472 } 1473 1474 /* 1475 * vm_page_remove: 1476 * 1477 * Removes the specified page from its containing object, but does not 1478 * invalidate any backing storage. 1479 * 1480 * The object must be locked. The page must be locked if it is managed. 1481 */ 1482 void 1483 vm_page_remove(vm_page_t m) 1484 { 1485 vm_object_t object; 1486 vm_page_t mrem; 1487 1488 if ((m->oflags & VPO_UNMANAGED) == 0) 1489 vm_page_assert_locked(m); 1490 if ((object = m->object) == NULL) 1491 return; 1492 VM_OBJECT_ASSERT_WLOCKED(object); 1493 if (vm_page_xbusied(m)) 1494 vm_page_xunbusy_maybelocked(m); 1495 mrem = vm_radix_remove(&object->rtree, m->pindex); 1496 KASSERT(mrem == m, ("removed page %p, expected page %p", mrem, m)); 1497 1498 /* 1499 * Now remove from the object's list of backed pages. 1500 */ 1501 TAILQ_REMOVE(&object->memq, m, listq); 1502 1503 /* 1504 * And show that the object has one fewer resident page. 1505 */ 1506 object->resident_page_count--; 1507 1508 /* 1509 * The vnode may now be recycled. 1510 */ 1511 if (object->resident_page_count == 0 && object->type == OBJT_VNODE) 1512 vdrop(object->handle); 1513 1514 m->object = NULL; 1515 } 1516 1517 /* 1518 * vm_page_lookup: 1519 * 1520 * Returns the page associated with the object/offset 1521 * pair specified; if none is found, NULL is returned. 1522 * 1523 * The object must be locked. 1524 */ 1525 vm_page_t 1526 vm_page_lookup(vm_object_t object, vm_pindex_t pindex) 1527 { 1528 1529 VM_OBJECT_ASSERT_LOCKED(object); 1530 return (vm_radix_lookup(&object->rtree, pindex)); 1531 } 1532 1533 /* 1534 * vm_page_find_least: 1535 * 1536 * Returns the page associated with the object with least pindex 1537 * greater than or equal to the parameter pindex, or NULL. 1538 * 1539 * The object must be locked. 1540 */ 1541 vm_page_t 1542 vm_page_find_least(vm_object_t object, vm_pindex_t pindex) 1543 { 1544 vm_page_t m; 1545 1546 VM_OBJECT_ASSERT_LOCKED(object); 1547 if ((m = TAILQ_FIRST(&object->memq)) != NULL && m->pindex < pindex) 1548 m = vm_radix_lookup_ge(&object->rtree, pindex); 1549 return (m); 1550 } 1551 1552 /* 1553 * Returns the given page's successor (by pindex) within the object if it is 1554 * resident; if none is found, NULL is returned. 1555 * 1556 * The object must be locked. 1557 */ 1558 vm_page_t 1559 vm_page_next(vm_page_t m) 1560 { 1561 vm_page_t next; 1562 1563 VM_OBJECT_ASSERT_LOCKED(m->object); 1564 if ((next = TAILQ_NEXT(m, listq)) != NULL) { 1565 MPASS(next->object == m->object); 1566 if (next->pindex != m->pindex + 1) 1567 next = NULL; 1568 } 1569 return (next); 1570 } 1571 1572 /* 1573 * Returns the given page's predecessor (by pindex) within the object if it is 1574 * resident; if none is found, NULL is returned. 1575 * 1576 * The object must be locked. 1577 */ 1578 vm_page_t 1579 vm_page_prev(vm_page_t m) 1580 { 1581 vm_page_t prev; 1582 1583 VM_OBJECT_ASSERT_LOCKED(m->object); 1584 if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL) { 1585 MPASS(prev->object == m->object); 1586 if (prev->pindex != m->pindex - 1) 1587 prev = NULL; 1588 } 1589 return (prev); 1590 } 1591 1592 /* 1593 * Uses the page mnew as a replacement for an existing page at index 1594 * pindex which must be already present in the object. 1595 * 1596 * The existing page must not be on a paging queue. 1597 */ 1598 vm_page_t 1599 vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex) 1600 { 1601 vm_page_t mold; 1602 1603 VM_OBJECT_ASSERT_WLOCKED(object); 1604 KASSERT(mnew->object == NULL, 1605 ("vm_page_replace: page %p already in object", mnew)); 1606 KASSERT(mnew->queue == PQ_NONE, 1607 ("vm_page_replace: new page %p is on a paging queue", mnew)); 1608 1609 /* 1610 * This function mostly follows vm_page_insert() and 1611 * vm_page_remove() without the radix, object count and vnode 1612 * dance. Double check such functions for more comments. 1613 */ 1614 1615 mnew->object = object; 1616 mnew->pindex = pindex; 1617 mold = vm_radix_replace(&object->rtree, mnew); 1618 KASSERT(mold->queue == PQ_NONE, 1619 ("vm_page_replace: old page %p is on a paging queue", mold)); 1620 1621 /* Keep the resident page list in sorted order. */ 1622 TAILQ_INSERT_AFTER(&object->memq, mold, mnew, listq); 1623 TAILQ_REMOVE(&object->memq, mold, listq); 1624 1625 mold->object = NULL; 1626 vm_page_xunbusy_maybelocked(mold); 1627 1628 /* 1629 * The object's resident_page_count does not change because we have 1630 * swapped one page for another, but OBJ_MIGHTBEDIRTY. 1631 */ 1632 if (pmap_page_is_write_mapped(mnew)) 1633 vm_object_set_writeable_dirty(object); 1634 return (mold); 1635 } 1636 1637 /* 1638 * vm_page_rename: 1639 * 1640 * Move the given memory entry from its 1641 * current object to the specified target object/offset. 1642 * 1643 * Note: swap associated with the page must be invalidated by the move. We 1644 * have to do this for several reasons: (1) we aren't freeing the 1645 * page, (2) we are dirtying the page, (3) the VM system is probably 1646 * moving the page from object A to B, and will then later move 1647 * the backing store from A to B and we can't have a conflict. 1648 * 1649 * Note: we *always* dirty the page. It is necessary both for the 1650 * fact that we moved it, and because we may be invalidating 1651 * swap. 1652 * 1653 * The objects must be locked. 1654 */ 1655 int 1656 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) 1657 { 1658 vm_page_t mpred; 1659 vm_pindex_t opidx; 1660 1661 VM_OBJECT_ASSERT_WLOCKED(new_object); 1662 1663 mpred = vm_radix_lookup_le(&new_object->rtree, new_pindex); 1664 KASSERT(mpred == NULL || mpred->pindex != new_pindex, 1665 ("vm_page_rename: pindex already renamed")); 1666 1667 /* 1668 * Create a custom version of vm_page_insert() which does not depend 1669 * by m_prev and can cheat on the implementation aspects of the 1670 * function. 1671 */ 1672 opidx = m->pindex; 1673 m->pindex = new_pindex; 1674 if (vm_radix_insert(&new_object->rtree, m)) { 1675 m->pindex = opidx; 1676 return (1); 1677 } 1678 1679 /* 1680 * The operation cannot fail anymore. The removal must happen before 1681 * the listq iterator is tainted. 1682 */ 1683 m->pindex = opidx; 1684 vm_page_lock(m); 1685 vm_page_remove(m); 1686 1687 /* Return back to the new pindex to complete vm_page_insert(). */ 1688 m->pindex = new_pindex; 1689 m->object = new_object; 1690 vm_page_unlock(m); 1691 vm_page_insert_radixdone(m, new_object, mpred); 1692 vm_page_dirty(m); 1693 return (0); 1694 } 1695 1696 /* 1697 * vm_page_alloc: 1698 * 1699 * Allocate and return a page that is associated with the specified 1700 * object and offset pair. By default, this page is exclusive busied. 1701 * 1702 * The caller must always specify an allocation class. 1703 * 1704 * allocation classes: 1705 * VM_ALLOC_NORMAL normal process request 1706 * VM_ALLOC_SYSTEM system *really* needs a page 1707 * VM_ALLOC_INTERRUPT interrupt time request 1708 * 1709 * optional allocation flags: 1710 * VM_ALLOC_COUNT(number) the number of additional pages that the caller 1711 * intends to allocate 1712 * VM_ALLOC_NOBUSY do not exclusive busy the page 1713 * VM_ALLOC_NODUMP do not include the page in a kernel core dump 1714 * VM_ALLOC_NOOBJ page is not associated with an object and 1715 * should not be exclusive busy 1716 * VM_ALLOC_SBUSY shared busy the allocated page 1717 * VM_ALLOC_WIRED wire the allocated page 1718 * VM_ALLOC_ZERO prefer a zeroed page 1719 */ 1720 vm_page_t 1721 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) 1722 { 1723 1724 return (vm_page_alloc_after(object, pindex, req, object != NULL ? 1725 vm_radix_lookup_le(&object->rtree, pindex) : NULL)); 1726 } 1727 1728 vm_page_t 1729 vm_page_alloc_domain(vm_object_t object, vm_pindex_t pindex, int domain, 1730 int req) 1731 { 1732 1733 return (vm_page_alloc_domain_after(object, pindex, domain, req, 1734 object != NULL ? vm_radix_lookup_le(&object->rtree, pindex) : 1735 NULL)); 1736 } 1737 1738 /* 1739 * Allocate a page in the specified object with the given page index. To 1740 * optimize insertion of the page into the object, the caller must also specifiy 1741 * the resident page in the object with largest index smaller than the given 1742 * page index, or NULL if no such page exists. 1743 */ 1744 vm_page_t 1745 vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex, 1746 int req, vm_page_t mpred) 1747 { 1748 struct vm_domainset_iter di; 1749 vm_page_t m; 1750 int domain; 1751 1752 vm_domainset_iter_page_init(&di, object, pindex, &domain, &req); 1753 do { 1754 m = vm_page_alloc_domain_after(object, pindex, domain, req, 1755 mpred); 1756 if (m != NULL) 1757 break; 1758 } while (vm_domainset_iter_page(&di, object, &domain) == 0); 1759 1760 return (m); 1761 } 1762 1763 /* 1764 * Returns true if the number of free pages exceeds the minimum 1765 * for the request class and false otherwise. 1766 */ 1767 int 1768 vm_domain_allocate(struct vm_domain *vmd, int req, int npages) 1769 { 1770 u_int limit, old, new; 1771 1772 req = req & VM_ALLOC_CLASS_MASK; 1773 1774 /* 1775 * The page daemon is allowed to dig deeper into the free page list. 1776 */ 1777 if (curproc == pageproc && req != VM_ALLOC_INTERRUPT) 1778 req = VM_ALLOC_SYSTEM; 1779 if (req == VM_ALLOC_INTERRUPT) 1780 limit = 0; 1781 else if (req == VM_ALLOC_SYSTEM) 1782 limit = vmd->vmd_interrupt_free_min; 1783 else 1784 limit = vmd->vmd_free_reserved; 1785 1786 /* 1787 * Attempt to reserve the pages. Fail if we're below the limit. 1788 */ 1789 limit += npages; 1790 old = vmd->vmd_free_count; 1791 do { 1792 if (old < limit) 1793 return (0); 1794 new = old - npages; 1795 } while (atomic_fcmpset_int(&vmd->vmd_free_count, &old, new) == 0); 1796 1797 /* Wake the page daemon if we've crossed the threshold. */ 1798 if (vm_paging_needed(vmd, new) && !vm_paging_needed(vmd, old)) 1799 pagedaemon_wakeup(vmd->vmd_domain); 1800 1801 /* Only update bitsets on transitions. */ 1802 if ((old >= vmd->vmd_free_min && new < vmd->vmd_free_min) || 1803 (old >= vmd->vmd_free_severe && new < vmd->vmd_free_severe)) 1804 vm_domain_set(vmd); 1805 1806 return (1); 1807 } 1808 1809 vm_page_t 1810 vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain, 1811 int req, vm_page_t mpred) 1812 { 1813 struct vm_domain *vmd; 1814 vm_page_t m; 1815 int flags; 1816 1817 KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) && 1818 (object != NULL || (req & VM_ALLOC_SBUSY) == 0) && 1819 ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != 1820 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), 1821 ("inconsistent object(%p)/req(%x)", object, req)); 1822 KASSERT(object == NULL || (req & VM_ALLOC_WAITOK) == 0, 1823 ("Can't sleep and retry object insertion.")); 1824 KASSERT(mpred == NULL || mpred->pindex < pindex, 1825 ("mpred %p doesn't precede pindex 0x%jx", mpred, 1826 (uintmax_t)pindex)); 1827 if (object != NULL) 1828 VM_OBJECT_ASSERT_WLOCKED(object); 1829 1830 again: 1831 m = NULL; 1832 #if VM_NRESERVLEVEL > 0 1833 /* 1834 * Can we allocate the page from a reservation? 1835 */ 1836 if (vm_object_reserv(object) && 1837 ((m = vm_reserv_extend(req, object, pindex, domain, mpred)) != NULL || 1838 (m = vm_reserv_alloc_page(req, object, pindex, domain, mpred)) != NULL)) { 1839 domain = vm_phys_domain(m); 1840 vmd = VM_DOMAIN(domain); 1841 goto found; 1842 } 1843 #endif 1844 vmd = VM_DOMAIN(domain); 1845 if (object != NULL && vmd->vmd_pgcache != NULL) { 1846 m = uma_zalloc(vmd->vmd_pgcache, M_NOWAIT); 1847 if (m != NULL) 1848 goto found; 1849 } 1850 if (vm_domain_allocate(vmd, req, 1)) { 1851 /* 1852 * If not, allocate it from the free page queues. 1853 */ 1854 vm_domain_free_lock(vmd); 1855 m = vm_phys_alloc_pages(domain, object != NULL ? 1856 VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0); 1857 vm_domain_free_unlock(vmd); 1858 if (m == NULL) { 1859 vm_domain_freecnt_inc(vmd, 1); 1860 #if VM_NRESERVLEVEL > 0 1861 if (vm_reserv_reclaim_inactive(domain)) 1862 goto again; 1863 #endif 1864 } 1865 } 1866 if (m == NULL) { 1867 /* 1868 * Not allocatable, give up. 1869 */ 1870 if (vm_domain_alloc_fail(vmd, object, req)) 1871 goto again; 1872 return (NULL); 1873 } 1874 1875 /* 1876 * At this point we had better have found a good page. 1877 */ 1878 KASSERT(m != NULL, ("missing page")); 1879 1880 found: 1881 vm_page_dequeue(m); 1882 vm_page_alloc_check(m); 1883 1884 /* 1885 * Initialize the page. Only the PG_ZERO flag is inherited. 1886 */ 1887 flags = 0; 1888 if ((req & VM_ALLOC_ZERO) != 0) 1889 flags = PG_ZERO; 1890 flags &= m->flags; 1891 if ((req & VM_ALLOC_NODUMP) != 0) 1892 flags |= PG_NODUMP; 1893 m->flags = flags; 1894 m->aflags = 0; 1895 m->oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ? 1896 VPO_UNMANAGED : 0; 1897 m->busy_lock = VPB_UNBUSIED; 1898 if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0) 1899 m->busy_lock = VPB_SINGLE_EXCLUSIVER; 1900 if ((req & VM_ALLOC_SBUSY) != 0) 1901 m->busy_lock = VPB_SHARERS_WORD(1); 1902 if (req & VM_ALLOC_WIRED) { 1903 /* 1904 * The page lock is not required for wiring a page until that 1905 * page is inserted into the object. 1906 */ 1907 vm_wire_add(1); 1908 m->wire_count = 1; 1909 } 1910 m->act_count = 0; 1911 1912 if (object != NULL) { 1913 if (vm_page_insert_after(m, object, pindex, mpred)) { 1914 if (req & VM_ALLOC_WIRED) { 1915 vm_wire_sub(1); 1916 m->wire_count = 0; 1917 } 1918 KASSERT(m->object == NULL, ("page %p has object", m)); 1919 m->oflags = VPO_UNMANAGED; 1920 m->busy_lock = VPB_UNBUSIED; 1921 /* Don't change PG_ZERO. */ 1922 vm_page_free_toq(m); 1923 if (req & VM_ALLOC_WAITFAIL) { 1924 VM_OBJECT_WUNLOCK(object); 1925 vm_radix_wait(); 1926 VM_OBJECT_WLOCK(object); 1927 } 1928 return (NULL); 1929 } 1930 1931 /* Ignore device objects; the pager sets "memattr" for them. */ 1932 if (object->memattr != VM_MEMATTR_DEFAULT && 1933 (object->flags & OBJ_FICTITIOUS) == 0) 1934 pmap_page_set_memattr(m, object->memattr); 1935 } else 1936 m->pindex = pindex; 1937 1938 return (m); 1939 } 1940 1941 /* 1942 * vm_page_alloc_contig: 1943 * 1944 * Allocate a contiguous set of physical pages of the given size "npages" 1945 * from the free lists. All of the physical pages must be at or above 1946 * the given physical address "low" and below the given physical address 1947 * "high". The given value "alignment" determines the alignment of the 1948 * first physical page in the set. If the given value "boundary" is 1949 * non-zero, then the set of physical pages cannot cross any physical 1950 * address boundary that is a multiple of that value. Both "alignment" 1951 * and "boundary" must be a power of two. 1952 * 1953 * If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT, 1954 * then the memory attribute setting for the physical pages is configured 1955 * to the object's memory attribute setting. Otherwise, the memory 1956 * attribute setting for the physical pages is configured to "memattr", 1957 * overriding the object's memory attribute setting. However, if the 1958 * object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the 1959 * memory attribute setting for the physical pages cannot be configured 1960 * to VM_MEMATTR_DEFAULT. 1961 * 1962 * The specified object may not contain fictitious pages. 1963 * 1964 * The caller must always specify an allocation class. 1965 * 1966 * allocation classes: 1967 * VM_ALLOC_NORMAL normal process request 1968 * VM_ALLOC_SYSTEM system *really* needs a page 1969 * VM_ALLOC_INTERRUPT interrupt time request 1970 * 1971 * optional allocation flags: 1972 * VM_ALLOC_NOBUSY do not exclusive busy the page 1973 * VM_ALLOC_NODUMP do not include the page in a kernel core dump 1974 * VM_ALLOC_NOOBJ page is not associated with an object and 1975 * should not be exclusive busy 1976 * VM_ALLOC_SBUSY shared busy the allocated page 1977 * VM_ALLOC_WIRED wire the allocated page 1978 * VM_ALLOC_ZERO prefer a zeroed page 1979 */ 1980 vm_page_t 1981 vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req, 1982 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 1983 vm_paddr_t boundary, vm_memattr_t memattr) 1984 { 1985 struct vm_domainset_iter di; 1986 vm_page_t m; 1987 int domain; 1988 1989 vm_domainset_iter_page_init(&di, object, pindex, &domain, &req); 1990 do { 1991 m = vm_page_alloc_contig_domain(object, pindex, domain, req, 1992 npages, low, high, alignment, boundary, memattr); 1993 if (m != NULL) 1994 break; 1995 } while (vm_domainset_iter_page(&di, object, &domain) == 0); 1996 1997 return (m); 1998 } 1999 2000 vm_page_t 2001 vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain, 2002 int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 2003 vm_paddr_t boundary, vm_memattr_t memattr) 2004 { 2005 struct vm_domain *vmd; 2006 vm_page_t m, m_ret, mpred; 2007 u_int busy_lock, flags, oflags; 2008 2009 mpred = NULL; /* XXX: pacify gcc */ 2010 KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) && 2011 (object != NULL || (req & VM_ALLOC_SBUSY) == 0) && 2012 ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != 2013 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), 2014 ("vm_page_alloc_contig: inconsistent object(%p)/req(%x)", object, 2015 req)); 2016 KASSERT(object == NULL || (req & VM_ALLOC_WAITOK) == 0, 2017 ("Can't sleep and retry object insertion.")); 2018 if (object != NULL) { 2019 VM_OBJECT_ASSERT_WLOCKED(object); 2020 KASSERT((object->flags & OBJ_FICTITIOUS) == 0, 2021 ("vm_page_alloc_contig: object %p has fictitious pages", 2022 object)); 2023 } 2024 KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero")); 2025 2026 if (object != NULL) { 2027 mpred = vm_radix_lookup_le(&object->rtree, pindex); 2028 KASSERT(mpred == NULL || mpred->pindex != pindex, 2029 ("vm_page_alloc_contig: pindex already allocated")); 2030 } 2031 2032 /* 2033 * Can we allocate the pages without the number of free pages falling 2034 * below the lower bound for the allocation class? 2035 */ 2036 again: 2037 #if VM_NRESERVLEVEL > 0 2038 /* 2039 * Can we allocate the pages from a reservation? 2040 */ 2041 if (vm_object_reserv(object) && 2042 ((m_ret = vm_reserv_extend_contig(req, object, pindex, domain, 2043 npages, low, high, alignment, boundary, mpred)) != NULL || 2044 (m_ret = vm_reserv_alloc_contig(req, object, pindex, domain, 2045 npages, low, high, alignment, boundary, mpred)) != NULL)) { 2046 domain = vm_phys_domain(m_ret); 2047 vmd = VM_DOMAIN(domain); 2048 goto found; 2049 } 2050 #endif 2051 m_ret = NULL; 2052 vmd = VM_DOMAIN(domain); 2053 if (vm_domain_allocate(vmd, req, npages)) { 2054 /* 2055 * allocate them from the free page queues. 2056 */ 2057 vm_domain_free_lock(vmd); 2058 m_ret = vm_phys_alloc_contig(domain, npages, low, high, 2059 alignment, boundary); 2060 vm_domain_free_unlock(vmd); 2061 if (m_ret == NULL) { 2062 vm_domain_freecnt_inc(vmd, npages); 2063 #if VM_NRESERVLEVEL > 0 2064 if (vm_reserv_reclaim_contig(domain, npages, low, 2065 high, alignment, boundary)) 2066 goto again; 2067 #endif 2068 } 2069 } 2070 if (m_ret == NULL) { 2071 if (vm_domain_alloc_fail(vmd, object, req)) 2072 goto again; 2073 return (NULL); 2074 } 2075 #if VM_NRESERVLEVEL > 0 2076 found: 2077 #endif 2078 for (m = m_ret; m < &m_ret[npages]; m++) { 2079 vm_page_dequeue(m); 2080 vm_page_alloc_check(m); 2081 } 2082 2083 /* 2084 * Initialize the pages. Only the PG_ZERO flag is inherited. 2085 */ 2086 flags = 0; 2087 if ((req & VM_ALLOC_ZERO) != 0) 2088 flags = PG_ZERO; 2089 if ((req & VM_ALLOC_NODUMP) != 0) 2090 flags |= PG_NODUMP; 2091 oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ? 2092 VPO_UNMANAGED : 0; 2093 busy_lock = VPB_UNBUSIED; 2094 if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0) 2095 busy_lock = VPB_SINGLE_EXCLUSIVER; 2096 if ((req & VM_ALLOC_SBUSY) != 0) 2097 busy_lock = VPB_SHARERS_WORD(1); 2098 if ((req & VM_ALLOC_WIRED) != 0) 2099 vm_wire_add(npages); 2100 if (object != NULL) { 2101 if (object->memattr != VM_MEMATTR_DEFAULT && 2102 memattr == VM_MEMATTR_DEFAULT) 2103 memattr = object->memattr; 2104 } 2105 for (m = m_ret; m < &m_ret[npages]; m++) { 2106 m->aflags = 0; 2107 m->flags = (m->flags | PG_NODUMP) & flags; 2108 m->busy_lock = busy_lock; 2109 if ((req & VM_ALLOC_WIRED) != 0) 2110 m->wire_count = 1; 2111 m->act_count = 0; 2112 m->oflags = oflags; 2113 if (object != NULL) { 2114 if (vm_page_insert_after(m, object, pindex, mpred)) { 2115 if ((req & VM_ALLOC_WIRED) != 0) 2116 vm_wire_sub(npages); 2117 KASSERT(m->object == NULL, 2118 ("page %p has object", m)); 2119 mpred = m; 2120 for (m = m_ret; m < &m_ret[npages]; m++) { 2121 if (m <= mpred && 2122 (req & VM_ALLOC_WIRED) != 0) 2123 m->wire_count = 0; 2124 m->oflags = VPO_UNMANAGED; 2125 m->busy_lock = VPB_UNBUSIED; 2126 /* Don't change PG_ZERO. */ 2127 vm_page_free_toq(m); 2128 } 2129 if (req & VM_ALLOC_WAITFAIL) { 2130 VM_OBJECT_WUNLOCK(object); 2131 vm_radix_wait(); 2132 VM_OBJECT_WLOCK(object); 2133 } 2134 return (NULL); 2135 } 2136 mpred = m; 2137 } else 2138 m->pindex = pindex; 2139 if (memattr != VM_MEMATTR_DEFAULT) 2140 pmap_page_set_memattr(m, memattr); 2141 pindex++; 2142 } 2143 return (m_ret); 2144 } 2145 2146 /* 2147 * Check a page that has been freshly dequeued from a freelist. 2148 */ 2149 static void 2150 vm_page_alloc_check(vm_page_t m) 2151 { 2152 2153 KASSERT(m->object == NULL, ("page %p has object", m)); 2154 KASSERT(m->queue == PQ_NONE && (m->aflags & PGA_QUEUE_STATE_MASK) == 0, 2155 ("page %p has unexpected queue %d, flags %#x", 2156 m, m->queue, (m->aflags & PGA_QUEUE_STATE_MASK))); 2157 KASSERT(!vm_page_held(m), ("page %p is held", m)); 2158 KASSERT(!vm_page_busied(m), ("page %p is busy", m)); 2159 KASSERT(m->dirty == 0, ("page %p is dirty", m)); 2160 KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, 2161 ("page %p has unexpected memattr %d", 2162 m, pmap_page_get_memattr(m))); 2163 KASSERT(m->valid == 0, ("free page %p is valid", m)); 2164 } 2165 2166 /* 2167 * vm_page_alloc_freelist: 2168 * 2169 * Allocate a physical page from the specified free page list. 2170 * 2171 * The caller must always specify an allocation class. 2172 * 2173 * allocation classes: 2174 * VM_ALLOC_NORMAL normal process request 2175 * VM_ALLOC_SYSTEM system *really* needs a page 2176 * VM_ALLOC_INTERRUPT interrupt time request 2177 * 2178 * optional allocation flags: 2179 * VM_ALLOC_COUNT(number) the number of additional pages that the caller 2180 * intends to allocate 2181 * VM_ALLOC_WIRED wire the allocated page 2182 * VM_ALLOC_ZERO prefer a zeroed page 2183 */ 2184 vm_page_t 2185 vm_page_alloc_freelist(int freelist, int req) 2186 { 2187 struct vm_domainset_iter di; 2188 vm_page_t m; 2189 int domain; 2190 2191 vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req); 2192 do { 2193 m = vm_page_alloc_freelist_domain(domain, freelist, req); 2194 if (m != NULL) 2195 break; 2196 } while (vm_domainset_iter_page(&di, NULL, &domain) == 0); 2197 2198 return (m); 2199 } 2200 2201 vm_page_t 2202 vm_page_alloc_freelist_domain(int domain, int freelist, int req) 2203 { 2204 struct vm_domain *vmd; 2205 vm_page_t m; 2206 u_int flags; 2207 2208 m = NULL; 2209 vmd = VM_DOMAIN(domain); 2210 again: 2211 if (vm_domain_allocate(vmd, req, 1)) { 2212 vm_domain_free_lock(vmd); 2213 m = vm_phys_alloc_freelist_pages(domain, freelist, 2214 VM_FREEPOOL_DIRECT, 0); 2215 vm_domain_free_unlock(vmd); 2216 if (m == NULL) 2217 vm_domain_freecnt_inc(vmd, 1); 2218 } 2219 if (m == NULL) { 2220 if (vm_domain_alloc_fail(vmd, NULL, req)) 2221 goto again; 2222 return (NULL); 2223 } 2224 vm_page_dequeue(m); 2225 vm_page_alloc_check(m); 2226 2227 /* 2228 * Initialize the page. Only the PG_ZERO flag is inherited. 2229 */ 2230 m->aflags = 0; 2231 flags = 0; 2232 if ((req & VM_ALLOC_ZERO) != 0) 2233 flags = PG_ZERO; 2234 m->flags &= flags; 2235 if ((req & VM_ALLOC_WIRED) != 0) { 2236 /* 2237 * The page lock is not required for wiring a page that does 2238 * not belong to an object. 2239 */ 2240 vm_wire_add(1); 2241 m->wire_count = 1; 2242 } 2243 /* Unmanaged pages don't use "act_count". */ 2244 m->oflags = VPO_UNMANAGED; 2245 return (m); 2246 } 2247 2248 static int 2249 vm_page_import(void *arg, void **store, int cnt, int domain, int flags) 2250 { 2251 struct vm_domain *vmd; 2252 int i; 2253 2254 vmd = arg; 2255 /* Only import if we can bring in a full bucket. */ 2256 if (cnt == 1 || !vm_domain_allocate(vmd, VM_ALLOC_NORMAL, cnt)) 2257 return (0); 2258 domain = vmd->vmd_domain; 2259 vm_domain_free_lock(vmd); 2260 i = vm_phys_alloc_npages(domain, VM_FREEPOOL_DEFAULT, cnt, 2261 (vm_page_t *)store); 2262 vm_domain_free_unlock(vmd); 2263 if (cnt != i) 2264 vm_domain_freecnt_inc(vmd, cnt - i); 2265 2266 return (i); 2267 } 2268 2269 static void 2270 vm_page_release(void *arg, void **store, int cnt) 2271 { 2272 struct vm_domain *vmd; 2273 vm_page_t m; 2274 int i; 2275 2276 vmd = arg; 2277 vm_domain_free_lock(vmd); 2278 for (i = 0; i < cnt; i++) { 2279 m = (vm_page_t)store[i]; 2280 vm_phys_free_pages(m, 0); 2281 } 2282 vm_domain_free_unlock(vmd); 2283 vm_domain_freecnt_inc(vmd, cnt); 2284 } 2285 2286 #define VPSC_ANY 0 /* No restrictions. */ 2287 #define VPSC_NORESERV 1 /* Skip reservations; implies VPSC_NOSUPER. */ 2288 #define VPSC_NOSUPER 2 /* Skip superpages. */ 2289 2290 /* 2291 * vm_page_scan_contig: 2292 * 2293 * Scan vm_page_array[] between the specified entries "m_start" and 2294 * "m_end" for a run of contiguous physical pages that satisfy the 2295 * specified conditions, and return the lowest page in the run. The 2296 * specified "alignment" determines the alignment of the lowest physical 2297 * page in the run. If the specified "boundary" is non-zero, then the 2298 * run of physical pages cannot span a physical address that is a 2299 * multiple of "boundary". 2300 * 2301 * "m_end" is never dereferenced, so it need not point to a vm_page 2302 * structure within vm_page_array[]. 2303 * 2304 * "npages" must be greater than zero. "m_start" and "m_end" must not 2305 * span a hole (or discontiguity) in the physical address space. Both 2306 * "alignment" and "boundary" must be a power of two. 2307 */ 2308 vm_page_t 2309 vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end, 2310 u_long alignment, vm_paddr_t boundary, int options) 2311 { 2312 struct mtx *m_mtx; 2313 vm_object_t object; 2314 vm_paddr_t pa; 2315 vm_page_t m, m_run; 2316 #if VM_NRESERVLEVEL > 0 2317 int level; 2318 #endif 2319 int m_inc, order, run_ext, run_len; 2320 2321 KASSERT(npages > 0, ("npages is 0")); 2322 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 2323 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 2324 m_run = NULL; 2325 run_len = 0; 2326 m_mtx = NULL; 2327 for (m = m_start; m < m_end && run_len < npages; m += m_inc) { 2328 KASSERT((m->flags & PG_MARKER) == 0, 2329 ("page %p is PG_MARKER", m)); 2330 KASSERT((m->flags & PG_FICTITIOUS) == 0 || m->wire_count == 1, 2331 ("fictitious page %p has invalid wire count", m)); 2332 2333 /* 2334 * If the current page would be the start of a run, check its 2335 * physical address against the end, alignment, and boundary 2336 * conditions. If it doesn't satisfy these conditions, either 2337 * terminate the scan or advance to the next page that 2338 * satisfies the failed condition. 2339 */ 2340 if (run_len == 0) { 2341 KASSERT(m_run == NULL, ("m_run != NULL")); 2342 if (m + npages > m_end) 2343 break; 2344 pa = VM_PAGE_TO_PHYS(m); 2345 if ((pa & (alignment - 1)) != 0) { 2346 m_inc = atop(roundup2(pa, alignment) - pa); 2347 continue; 2348 } 2349 if (rounddown2(pa ^ (pa + ptoa(npages) - 1), 2350 boundary) != 0) { 2351 m_inc = atop(roundup2(pa, boundary) - pa); 2352 continue; 2353 } 2354 } else 2355 KASSERT(m_run != NULL, ("m_run == NULL")); 2356 2357 vm_page_change_lock(m, &m_mtx); 2358 m_inc = 1; 2359 retry: 2360 if (vm_page_held(m)) 2361 run_ext = 0; 2362 #if VM_NRESERVLEVEL > 0 2363 else if ((level = vm_reserv_level(m)) >= 0 && 2364 (options & VPSC_NORESERV) != 0) { 2365 run_ext = 0; 2366 /* Advance to the end of the reservation. */ 2367 pa = VM_PAGE_TO_PHYS(m); 2368 m_inc = atop(roundup2(pa + 1, vm_reserv_size(level)) - 2369 pa); 2370 } 2371 #endif 2372 else if ((object = m->object) != NULL) { 2373 /* 2374 * The page is considered eligible for relocation if 2375 * and only if it could be laundered or reclaimed by 2376 * the page daemon. 2377 */ 2378 if (!VM_OBJECT_TRYRLOCK(object)) { 2379 mtx_unlock(m_mtx); 2380 VM_OBJECT_RLOCK(object); 2381 mtx_lock(m_mtx); 2382 if (m->object != object) { 2383 /* 2384 * The page may have been freed. 2385 */ 2386 VM_OBJECT_RUNLOCK(object); 2387 goto retry; 2388 } else if (vm_page_held(m)) { 2389 run_ext = 0; 2390 goto unlock; 2391 } 2392 } 2393 KASSERT((m->flags & PG_UNHOLDFREE) == 0, 2394 ("page %p is PG_UNHOLDFREE", m)); 2395 /* Don't care: PG_NODUMP, PG_ZERO. */ 2396 if (object->type != OBJT_DEFAULT && 2397 object->type != OBJT_SWAP && 2398 object->type != OBJT_VNODE) { 2399 run_ext = 0; 2400 #if VM_NRESERVLEVEL > 0 2401 } else if ((options & VPSC_NOSUPER) != 0 && 2402 (level = vm_reserv_level_iffullpop(m)) >= 0) { 2403 run_ext = 0; 2404 /* Advance to the end of the superpage. */ 2405 pa = VM_PAGE_TO_PHYS(m); 2406 m_inc = atop(roundup2(pa + 1, 2407 vm_reserv_size(level)) - pa); 2408 #endif 2409 } else if (object->memattr == VM_MEMATTR_DEFAULT && 2410 vm_page_queue(m) != PQ_NONE && !vm_page_busied(m)) { 2411 /* 2412 * The page is allocated but eligible for 2413 * relocation. Extend the current run by one 2414 * page. 2415 */ 2416 KASSERT(pmap_page_get_memattr(m) == 2417 VM_MEMATTR_DEFAULT, 2418 ("page %p has an unexpected memattr", m)); 2419 KASSERT((m->oflags & (VPO_SWAPINPROG | 2420 VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0, 2421 ("page %p has unexpected oflags", m)); 2422 /* Don't care: VPO_NOSYNC. */ 2423 run_ext = 1; 2424 } else 2425 run_ext = 0; 2426 unlock: 2427 VM_OBJECT_RUNLOCK(object); 2428 #if VM_NRESERVLEVEL > 0 2429 } else if (level >= 0) { 2430 /* 2431 * The page is reserved but not yet allocated. In 2432 * other words, it is still free. Extend the current 2433 * run by one page. 2434 */ 2435 run_ext = 1; 2436 #endif 2437 } else if ((order = m->order) < VM_NFREEORDER) { 2438 /* 2439 * The page is enqueued in the physical memory 2440 * allocator's free page queues. Moreover, it is the 2441 * first page in a power-of-two-sized run of 2442 * contiguous free pages. Add these pages to the end 2443 * of the current run, and jump ahead. 2444 */ 2445 run_ext = 1 << order; 2446 m_inc = 1 << order; 2447 } else { 2448 /* 2449 * Skip the page for one of the following reasons: (1) 2450 * It is enqueued in the physical memory allocator's 2451 * free page queues. However, it is not the first 2452 * page in a run of contiguous free pages. (This case 2453 * rarely occurs because the scan is performed in 2454 * ascending order.) (2) It is not reserved, and it is 2455 * transitioning from free to allocated. (Conversely, 2456 * the transition from allocated to free for managed 2457 * pages is blocked by the page lock.) (3) It is 2458 * allocated but not contained by an object and not 2459 * wired, e.g., allocated by Xen's balloon driver. 2460 */ 2461 run_ext = 0; 2462 } 2463 2464 /* 2465 * Extend or reset the current run of pages. 2466 */ 2467 if (run_ext > 0) { 2468 if (run_len == 0) 2469 m_run = m; 2470 run_len += run_ext; 2471 } else { 2472 if (run_len > 0) { 2473 m_run = NULL; 2474 run_len = 0; 2475 } 2476 } 2477 } 2478 if (m_mtx != NULL) 2479 mtx_unlock(m_mtx); 2480 if (run_len >= npages) 2481 return (m_run); 2482 return (NULL); 2483 } 2484 2485 /* 2486 * vm_page_reclaim_run: 2487 * 2488 * Try to relocate each of the allocated virtual pages within the 2489 * specified run of physical pages to a new physical address. Free the 2490 * physical pages underlying the relocated virtual pages. A virtual page 2491 * is relocatable if and only if it could be laundered or reclaimed by 2492 * the page daemon. Whenever possible, a virtual page is relocated to a 2493 * physical address above "high". 2494 * 2495 * Returns 0 if every physical page within the run was already free or 2496 * just freed by a successful relocation. Otherwise, returns a non-zero 2497 * value indicating why the last attempt to relocate a virtual page was 2498 * unsuccessful. 2499 * 2500 * "req_class" must be an allocation class. 2501 */ 2502 static int 2503 vm_page_reclaim_run(int req_class, int domain, u_long npages, vm_page_t m_run, 2504 vm_paddr_t high) 2505 { 2506 struct vm_domain *vmd; 2507 struct mtx *m_mtx; 2508 struct spglist free; 2509 vm_object_t object; 2510 vm_paddr_t pa; 2511 vm_page_t m, m_end, m_new; 2512 int error, order, req; 2513 2514 KASSERT((req_class & VM_ALLOC_CLASS_MASK) == req_class, 2515 ("req_class is not an allocation class")); 2516 SLIST_INIT(&free); 2517 error = 0; 2518 m = m_run; 2519 m_end = m_run + npages; 2520 m_mtx = NULL; 2521 for (; error == 0 && m < m_end; m++) { 2522 KASSERT((m->flags & (PG_FICTITIOUS | PG_MARKER)) == 0, 2523 ("page %p is PG_FICTITIOUS or PG_MARKER", m)); 2524 2525 /* 2526 * Avoid releasing and reacquiring the same page lock. 2527 */ 2528 vm_page_change_lock(m, &m_mtx); 2529 retry: 2530 if (vm_page_held(m)) 2531 error = EBUSY; 2532 else if ((object = m->object) != NULL) { 2533 /* 2534 * The page is relocated if and only if it could be 2535 * laundered or reclaimed by the page daemon. 2536 */ 2537 if (!VM_OBJECT_TRYWLOCK(object)) { 2538 mtx_unlock(m_mtx); 2539 VM_OBJECT_WLOCK(object); 2540 mtx_lock(m_mtx); 2541 if (m->object != object) { 2542 /* 2543 * The page may have been freed. 2544 */ 2545 VM_OBJECT_WUNLOCK(object); 2546 goto retry; 2547 } else if (vm_page_held(m)) { 2548 error = EBUSY; 2549 goto unlock; 2550 } 2551 } 2552 KASSERT((m->flags & PG_UNHOLDFREE) == 0, 2553 ("page %p is PG_UNHOLDFREE", m)); 2554 /* Don't care: PG_NODUMP, PG_ZERO. */ 2555 if (object->type != OBJT_DEFAULT && 2556 object->type != OBJT_SWAP && 2557 object->type != OBJT_VNODE) 2558 error = EINVAL; 2559 else if (object->memattr != VM_MEMATTR_DEFAULT) 2560 error = EINVAL; 2561 else if (vm_page_queue(m) != PQ_NONE && 2562 !vm_page_busied(m)) { 2563 KASSERT(pmap_page_get_memattr(m) == 2564 VM_MEMATTR_DEFAULT, 2565 ("page %p has an unexpected memattr", m)); 2566 KASSERT((m->oflags & (VPO_SWAPINPROG | 2567 VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0, 2568 ("page %p has unexpected oflags", m)); 2569 /* Don't care: VPO_NOSYNC. */ 2570 if (m->valid != 0) { 2571 /* 2572 * First, try to allocate a new page 2573 * that is above "high". Failing 2574 * that, try to allocate a new page 2575 * that is below "m_run". Allocate 2576 * the new page between the end of 2577 * "m_run" and "high" only as a last 2578 * resort. 2579 */ 2580 req = req_class | VM_ALLOC_NOOBJ; 2581 if ((m->flags & PG_NODUMP) != 0) 2582 req |= VM_ALLOC_NODUMP; 2583 if (trunc_page(high) != 2584 ~(vm_paddr_t)PAGE_MASK) { 2585 m_new = vm_page_alloc_contig( 2586 NULL, 0, req, 1, 2587 round_page(high), 2588 ~(vm_paddr_t)0, 2589 PAGE_SIZE, 0, 2590 VM_MEMATTR_DEFAULT); 2591 } else 2592 m_new = NULL; 2593 if (m_new == NULL) { 2594 pa = VM_PAGE_TO_PHYS(m_run); 2595 m_new = vm_page_alloc_contig( 2596 NULL, 0, req, 1, 2597 0, pa - 1, PAGE_SIZE, 0, 2598 VM_MEMATTR_DEFAULT); 2599 } 2600 if (m_new == NULL) { 2601 pa += ptoa(npages); 2602 m_new = vm_page_alloc_contig( 2603 NULL, 0, req, 1, 2604 pa, high, PAGE_SIZE, 0, 2605 VM_MEMATTR_DEFAULT); 2606 } 2607 if (m_new == NULL) { 2608 error = ENOMEM; 2609 goto unlock; 2610 } 2611 KASSERT(m_new->wire_count == 0, 2612 ("page %p is wired", m_new)); 2613 2614 /* 2615 * Replace "m" with the new page. For 2616 * vm_page_replace(), "m" must be busy 2617 * and dequeued. Finally, change "m" 2618 * as if vm_page_free() was called. 2619 */ 2620 if (object->ref_count != 0) 2621 pmap_remove_all(m); 2622 m_new->aflags = m->aflags & 2623 ~PGA_QUEUE_STATE_MASK; 2624 KASSERT(m_new->oflags == VPO_UNMANAGED, 2625 ("page %p is managed", m_new)); 2626 m_new->oflags = m->oflags & VPO_NOSYNC; 2627 pmap_copy_page(m, m_new); 2628 m_new->valid = m->valid; 2629 m_new->dirty = m->dirty; 2630 m->flags &= ~PG_ZERO; 2631 vm_page_xbusy(m); 2632 vm_page_dequeue(m); 2633 vm_page_replace_checked(m_new, object, 2634 m->pindex, m); 2635 if (vm_page_free_prep(m)) 2636 SLIST_INSERT_HEAD(&free, m, 2637 plinks.s.ss); 2638 2639 /* 2640 * The new page must be deactivated 2641 * before the object is unlocked. 2642 */ 2643 vm_page_change_lock(m_new, &m_mtx); 2644 vm_page_deactivate(m_new); 2645 } else { 2646 m->flags &= ~PG_ZERO; 2647 vm_page_dequeue(m); 2648 vm_page_remove(m); 2649 if (vm_page_free_prep(m)) 2650 SLIST_INSERT_HEAD(&free, m, 2651 plinks.s.ss); 2652 KASSERT(m->dirty == 0, 2653 ("page %p is dirty", m)); 2654 } 2655 } else 2656 error = EBUSY; 2657 unlock: 2658 VM_OBJECT_WUNLOCK(object); 2659 } else { 2660 MPASS(vm_phys_domain(m) == domain); 2661 vmd = VM_DOMAIN(domain); 2662 vm_domain_free_lock(vmd); 2663 order = m->order; 2664 if (order < VM_NFREEORDER) { 2665 /* 2666 * The page is enqueued in the physical memory 2667 * allocator's free page queues. Moreover, it 2668 * is the first page in a power-of-two-sized 2669 * run of contiguous free pages. Jump ahead 2670 * to the last page within that run, and 2671 * continue from there. 2672 */ 2673 m += (1 << order) - 1; 2674 } 2675 #if VM_NRESERVLEVEL > 0 2676 else if (vm_reserv_is_page_free(m)) 2677 order = 0; 2678 #endif 2679 vm_domain_free_unlock(vmd); 2680 if (order == VM_NFREEORDER) 2681 error = EINVAL; 2682 } 2683 } 2684 if (m_mtx != NULL) 2685 mtx_unlock(m_mtx); 2686 if ((m = SLIST_FIRST(&free)) != NULL) { 2687 int cnt; 2688 2689 vmd = VM_DOMAIN(domain); 2690 cnt = 0; 2691 vm_domain_free_lock(vmd); 2692 do { 2693 MPASS(vm_phys_domain(m) == domain); 2694 SLIST_REMOVE_HEAD(&free, plinks.s.ss); 2695 vm_phys_free_pages(m, 0); 2696 cnt++; 2697 } while ((m = SLIST_FIRST(&free)) != NULL); 2698 vm_domain_free_unlock(vmd); 2699 vm_domain_freecnt_inc(vmd, cnt); 2700 } 2701 return (error); 2702 } 2703 2704 #define NRUNS 16 2705 2706 CTASSERT(powerof2(NRUNS)); 2707 2708 #define RUN_INDEX(count) ((count) & (NRUNS - 1)) 2709 2710 #define MIN_RECLAIM 8 2711 2712 /* 2713 * vm_page_reclaim_contig: 2714 * 2715 * Reclaim allocated, contiguous physical memory satisfying the specified 2716 * conditions by relocating the virtual pages using that physical memory. 2717 * Returns true if reclamation is successful and false otherwise. Since 2718 * relocation requires the allocation of physical pages, reclamation may 2719 * fail due to a shortage of free pages. When reclamation fails, callers 2720 * are expected to perform vm_wait() before retrying a failed allocation 2721 * operation, e.g., vm_page_alloc_contig(). 2722 * 2723 * The caller must always specify an allocation class through "req". 2724 * 2725 * allocation classes: 2726 * VM_ALLOC_NORMAL normal process request 2727 * VM_ALLOC_SYSTEM system *really* needs a page 2728 * VM_ALLOC_INTERRUPT interrupt time request 2729 * 2730 * The optional allocation flags are ignored. 2731 * 2732 * "npages" must be greater than zero. Both "alignment" and "boundary" 2733 * must be a power of two. 2734 */ 2735 bool 2736 vm_page_reclaim_contig_domain(int domain, int req, u_long npages, 2737 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary) 2738 { 2739 struct vm_domain *vmd; 2740 vm_paddr_t curr_low; 2741 vm_page_t m_run, m_runs[NRUNS]; 2742 u_long count, reclaimed; 2743 int error, i, options, req_class; 2744 2745 KASSERT(npages > 0, ("npages is 0")); 2746 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 2747 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 2748 req_class = req & VM_ALLOC_CLASS_MASK; 2749 2750 /* 2751 * The page daemon is allowed to dig deeper into the free page list. 2752 */ 2753 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) 2754 req_class = VM_ALLOC_SYSTEM; 2755 2756 /* 2757 * Return if the number of free pages cannot satisfy the requested 2758 * allocation. 2759 */ 2760 vmd = VM_DOMAIN(domain); 2761 count = vmd->vmd_free_count; 2762 if (count < npages + vmd->vmd_free_reserved || (count < npages + 2763 vmd->vmd_interrupt_free_min && req_class == VM_ALLOC_SYSTEM) || 2764 (count < npages && req_class == VM_ALLOC_INTERRUPT)) 2765 return (false); 2766 2767 /* 2768 * Scan up to three times, relaxing the restrictions ("options") on 2769 * the reclamation of reservations and superpages each time. 2770 */ 2771 for (options = VPSC_NORESERV;;) { 2772 /* 2773 * Find the highest runs that satisfy the given constraints 2774 * and restrictions, and record them in "m_runs". 2775 */ 2776 curr_low = low; 2777 count = 0; 2778 for (;;) { 2779 m_run = vm_phys_scan_contig(domain, npages, curr_low, 2780 high, alignment, boundary, options); 2781 if (m_run == NULL) 2782 break; 2783 curr_low = VM_PAGE_TO_PHYS(m_run) + ptoa(npages); 2784 m_runs[RUN_INDEX(count)] = m_run; 2785 count++; 2786 } 2787 2788 /* 2789 * Reclaim the highest runs in LIFO (descending) order until 2790 * the number of reclaimed pages, "reclaimed", is at least 2791 * MIN_RECLAIM. Reset "reclaimed" each time because each 2792 * reclamation is idempotent, and runs will (likely) recur 2793 * from one scan to the next as restrictions are relaxed. 2794 */ 2795 reclaimed = 0; 2796 for (i = 0; count > 0 && i < NRUNS; i++) { 2797 count--; 2798 m_run = m_runs[RUN_INDEX(count)]; 2799 error = vm_page_reclaim_run(req_class, domain, npages, 2800 m_run, high); 2801 if (error == 0) { 2802 reclaimed += npages; 2803 if (reclaimed >= MIN_RECLAIM) 2804 return (true); 2805 } 2806 } 2807 2808 /* 2809 * Either relax the restrictions on the next scan or return if 2810 * the last scan had no restrictions. 2811 */ 2812 if (options == VPSC_NORESERV) 2813 options = VPSC_NOSUPER; 2814 else if (options == VPSC_NOSUPER) 2815 options = VPSC_ANY; 2816 else if (options == VPSC_ANY) 2817 return (reclaimed != 0); 2818 } 2819 } 2820 2821 bool 2822 vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high, 2823 u_long alignment, vm_paddr_t boundary) 2824 { 2825 struct vm_domainset_iter di; 2826 int domain; 2827 bool ret; 2828 2829 vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req); 2830 do { 2831 ret = vm_page_reclaim_contig_domain(domain, req, npages, low, 2832 high, alignment, boundary); 2833 if (ret) 2834 break; 2835 } while (vm_domainset_iter_page(&di, NULL, &domain) == 0); 2836 2837 return (ret); 2838 } 2839 2840 /* 2841 * Set the domain in the appropriate page level domainset. 2842 */ 2843 void 2844 vm_domain_set(struct vm_domain *vmd) 2845 { 2846 2847 mtx_lock(&vm_domainset_lock); 2848 if (!vmd->vmd_minset && vm_paging_min(vmd)) { 2849 vmd->vmd_minset = 1; 2850 DOMAINSET_SET(vmd->vmd_domain, &vm_min_domains); 2851 } 2852 if (!vmd->vmd_severeset && vm_paging_severe(vmd)) { 2853 vmd->vmd_severeset = 1; 2854 DOMAINSET_SET(vmd->vmd_domain, &vm_severe_domains); 2855 } 2856 mtx_unlock(&vm_domainset_lock); 2857 } 2858 2859 /* 2860 * Clear the domain from the appropriate page level domainset. 2861 */ 2862 void 2863 vm_domain_clear(struct vm_domain *vmd) 2864 { 2865 2866 mtx_lock(&vm_domainset_lock); 2867 if (vmd->vmd_minset && !vm_paging_min(vmd)) { 2868 vmd->vmd_minset = 0; 2869 DOMAINSET_CLR(vmd->vmd_domain, &vm_min_domains); 2870 if (vm_min_waiters != 0) { 2871 vm_min_waiters = 0; 2872 wakeup(&vm_min_domains); 2873 } 2874 } 2875 if (vmd->vmd_severeset && !vm_paging_severe(vmd)) { 2876 vmd->vmd_severeset = 0; 2877 DOMAINSET_CLR(vmd->vmd_domain, &vm_severe_domains); 2878 if (vm_severe_waiters != 0) { 2879 vm_severe_waiters = 0; 2880 wakeup(&vm_severe_domains); 2881 } 2882 } 2883 2884 /* 2885 * If pageout daemon needs pages, then tell it that there are 2886 * some free. 2887 */ 2888 if (vmd->vmd_pageout_pages_needed && 2889 vmd->vmd_free_count >= vmd->vmd_pageout_free_min) { 2890 wakeup(&vmd->vmd_pageout_pages_needed); 2891 vmd->vmd_pageout_pages_needed = 0; 2892 } 2893 2894 /* See comments in vm_wait_doms(). */ 2895 if (vm_pageproc_waiters) { 2896 vm_pageproc_waiters = 0; 2897 wakeup(&vm_pageproc_waiters); 2898 } 2899 mtx_unlock(&vm_domainset_lock); 2900 } 2901 2902 /* 2903 * Wait for free pages to exceed the min threshold globally. 2904 */ 2905 void 2906 vm_wait_min(void) 2907 { 2908 2909 mtx_lock(&vm_domainset_lock); 2910 while (vm_page_count_min()) { 2911 vm_min_waiters++; 2912 msleep(&vm_min_domains, &vm_domainset_lock, PVM, "vmwait", 0); 2913 } 2914 mtx_unlock(&vm_domainset_lock); 2915 } 2916 2917 /* 2918 * Wait for free pages to exceed the severe threshold globally. 2919 */ 2920 void 2921 vm_wait_severe(void) 2922 { 2923 2924 mtx_lock(&vm_domainset_lock); 2925 while (vm_page_count_severe()) { 2926 vm_severe_waiters++; 2927 msleep(&vm_severe_domains, &vm_domainset_lock, PVM, 2928 "vmwait", 0); 2929 } 2930 mtx_unlock(&vm_domainset_lock); 2931 } 2932 2933 u_int 2934 vm_wait_count(void) 2935 { 2936 2937 return (vm_severe_waiters + vm_min_waiters + vm_pageproc_waiters); 2938 } 2939 2940 void 2941 vm_wait_doms(const domainset_t *wdoms) 2942 { 2943 2944 /* 2945 * We use racey wakeup synchronization to avoid expensive global 2946 * locking for the pageproc when sleeping with a non-specific vm_wait. 2947 * To handle this, we only sleep for one tick in this instance. It 2948 * is expected that most allocations for the pageproc will come from 2949 * kmem or vm_page_grab* which will use the more specific and 2950 * race-free vm_wait_domain(). 2951 */ 2952 if (curproc == pageproc) { 2953 mtx_lock(&vm_domainset_lock); 2954 vm_pageproc_waiters++; 2955 msleep(&vm_pageproc_waiters, &vm_domainset_lock, PVM | PDROP, 2956 "pageprocwait", 1); 2957 } else { 2958 /* 2959 * XXX Ideally we would wait only until the allocation could 2960 * be satisfied. This condition can cause new allocators to 2961 * consume all freed pages while old allocators wait. 2962 */ 2963 mtx_lock(&vm_domainset_lock); 2964 if (vm_page_count_min_set(wdoms)) { 2965 vm_min_waiters++; 2966 msleep(&vm_min_domains, &vm_domainset_lock, 2967 PVM | PDROP, "vmwait", 0); 2968 } else 2969 mtx_unlock(&vm_domainset_lock); 2970 } 2971 } 2972 2973 /* 2974 * vm_wait_domain: 2975 * 2976 * Sleep until free pages are available for allocation. 2977 * - Called in various places after failed memory allocations. 2978 */ 2979 void 2980 vm_wait_domain(int domain) 2981 { 2982 struct vm_domain *vmd; 2983 domainset_t wdom; 2984 2985 vmd = VM_DOMAIN(domain); 2986 vm_domain_free_assert_unlocked(vmd); 2987 2988 if (curproc == pageproc) { 2989 mtx_lock(&vm_domainset_lock); 2990 if (vmd->vmd_free_count < vmd->vmd_pageout_free_min) { 2991 vmd->vmd_pageout_pages_needed = 1; 2992 msleep(&vmd->vmd_pageout_pages_needed, 2993 &vm_domainset_lock, PDROP | PSWP, "VMWait", 0); 2994 } else 2995 mtx_unlock(&vm_domainset_lock); 2996 } else { 2997 if (pageproc == NULL) 2998 panic("vm_wait in early boot"); 2999 DOMAINSET_ZERO(&wdom); 3000 DOMAINSET_SET(vmd->vmd_domain, &wdom); 3001 vm_wait_doms(&wdom); 3002 } 3003 } 3004 3005 /* 3006 * vm_wait: 3007 * 3008 * Sleep until free pages are available for allocation in the 3009 * affinity domains of the obj. If obj is NULL, the domain set 3010 * for the calling thread is used. 3011 * Called in various places after failed memory allocations. 3012 */ 3013 void 3014 vm_wait(vm_object_t obj) 3015 { 3016 struct domainset *d; 3017 3018 d = NULL; 3019 3020 /* 3021 * Carefully fetch pointers only once: the struct domainset 3022 * itself is ummutable but the pointer might change. 3023 */ 3024 if (obj != NULL) 3025 d = obj->domain.dr_policy; 3026 if (d == NULL) 3027 d = curthread->td_domain.dr_policy; 3028 3029 vm_wait_doms(&d->ds_mask); 3030 } 3031 3032 /* 3033 * vm_domain_alloc_fail: 3034 * 3035 * Called when a page allocation function fails. Informs the 3036 * pagedaemon and performs the requested wait. Requires the 3037 * domain_free and object lock on entry. Returns with the 3038 * object lock held and free lock released. Returns an error when 3039 * retry is necessary. 3040 * 3041 */ 3042 static int 3043 vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, int req) 3044 { 3045 3046 vm_domain_free_assert_unlocked(vmd); 3047 3048 atomic_add_int(&vmd->vmd_pageout_deficit, 3049 max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1)); 3050 if (req & (VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) { 3051 if (object != NULL) 3052 VM_OBJECT_WUNLOCK(object); 3053 vm_wait_domain(vmd->vmd_domain); 3054 if (object != NULL) 3055 VM_OBJECT_WLOCK(object); 3056 if (req & VM_ALLOC_WAITOK) 3057 return (EAGAIN); 3058 } 3059 3060 return (0); 3061 } 3062 3063 /* 3064 * vm_waitpfault: 3065 * 3066 * Sleep until free pages are available for allocation. 3067 * - Called only in vm_fault so that processes page faulting 3068 * can be easily tracked. 3069 * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing 3070 * processes will be able to grab memory first. Do not change 3071 * this balance without careful testing first. 3072 */ 3073 void 3074 vm_waitpfault(struct domainset *dset) 3075 { 3076 3077 /* 3078 * XXX Ideally we would wait only until the allocation could 3079 * be satisfied. This condition can cause new allocators to 3080 * consume all freed pages while old allocators wait. 3081 */ 3082 mtx_lock(&vm_domainset_lock); 3083 if (vm_page_count_min_set(&dset->ds_mask)) { 3084 vm_min_waiters++; 3085 msleep(&vm_min_domains, &vm_domainset_lock, PUSER | PDROP, 3086 "pfault", 0); 3087 } else 3088 mtx_unlock(&vm_domainset_lock); 3089 } 3090 3091 struct vm_pagequeue * 3092 vm_page_pagequeue(vm_page_t m) 3093 { 3094 3095 return (&vm_pagequeue_domain(m)->vmd_pagequeues[m->queue]); 3096 } 3097 3098 static struct mtx * 3099 vm_page_pagequeue_lockptr(vm_page_t m) 3100 { 3101 uint8_t queue; 3102 3103 if ((queue = atomic_load_8(&m->queue)) == PQ_NONE) 3104 return (NULL); 3105 return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue].pq_mutex); 3106 } 3107 3108 static inline void 3109 vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_page_t m) 3110 { 3111 struct vm_domain *vmd; 3112 uint8_t qflags; 3113 3114 CRITICAL_ASSERT(curthread); 3115 vm_pagequeue_assert_locked(pq); 3116 3117 /* 3118 * The page daemon is allowed to set m->queue = PQ_NONE without 3119 * the page queue lock held. In this case it is about to free the page, 3120 * which must not have any queue state. 3121 */ 3122 qflags = atomic_load_8(&m->aflags) & PGA_QUEUE_STATE_MASK; 3123 KASSERT(pq == vm_page_pagequeue(m) || qflags == 0, 3124 ("page %p doesn't belong to queue %p but has queue state %#x", 3125 m, pq, qflags)); 3126 3127 if ((qflags & PGA_DEQUEUE) != 0) { 3128 if (__predict_true((qflags & PGA_ENQUEUED) != 0)) { 3129 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 3130 vm_pagequeue_cnt_dec(pq); 3131 } 3132 vm_page_dequeue_complete(m); 3133 } else if ((qflags & (PGA_REQUEUE | PGA_REQUEUE_HEAD)) != 0) { 3134 if ((qflags & PGA_ENQUEUED) != 0) 3135 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 3136 else { 3137 vm_pagequeue_cnt_inc(pq); 3138 vm_page_aflag_set(m, PGA_ENQUEUED); 3139 } 3140 if ((qflags & PGA_REQUEUE_HEAD) != 0) { 3141 KASSERT(m->queue == PQ_INACTIVE, 3142 ("head enqueue not supported for page %p", m)); 3143 vmd = vm_pagequeue_domain(m); 3144 TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q); 3145 } else 3146 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); 3147 3148 /* 3149 * PGA_REQUEUE and PGA_REQUEUE_HEAD must be cleared after 3150 * setting PGA_ENQUEUED in order to synchronize with the 3151 * page daemon. 3152 */ 3153 vm_page_aflag_clear(m, PGA_REQUEUE | PGA_REQUEUE_HEAD); 3154 } 3155 } 3156 3157 static void 3158 vm_pqbatch_process(struct vm_pagequeue *pq, struct vm_batchqueue *bq, 3159 uint8_t queue) 3160 { 3161 vm_page_t m; 3162 int i; 3163 3164 for (i = 0; i < bq->bq_cnt; i++) { 3165 m = bq->bq_pa[i]; 3166 if (__predict_false(m->queue != queue)) 3167 continue; 3168 vm_pqbatch_process_page(pq, m); 3169 } 3170 vm_batchqueue_init(bq); 3171 } 3172 3173 static void 3174 vm_pqbatch_submit_page(vm_page_t m, uint8_t queue) 3175 { 3176 struct vm_batchqueue *bq; 3177 struct vm_pagequeue *pq; 3178 int domain; 3179 3180 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3181 ("page %p is unmanaged", m)); 3182 KASSERT(mtx_owned(vm_page_lockptr(m)) || 3183 (m->object == NULL && (m->aflags & PGA_DEQUEUE) != 0), 3184 ("missing synchronization for page %p", m)); 3185 KASSERT(queue < PQ_COUNT, ("invalid queue %d", queue)); 3186 3187 domain = vm_phys_domain(m); 3188 pq = &vm_pagequeue_domain(m)->vmd_pagequeues[queue]; 3189 3190 critical_enter(); 3191 bq = DPCPU_PTR(pqbatch[domain][queue]); 3192 if (vm_batchqueue_insert(bq, m)) { 3193 critical_exit(); 3194 return; 3195 } 3196 if (!vm_pagequeue_trylock(pq)) { 3197 critical_exit(); 3198 vm_pagequeue_lock(pq); 3199 critical_enter(); 3200 bq = DPCPU_PTR(pqbatch[domain][queue]); 3201 } 3202 vm_pqbatch_process(pq, bq, queue); 3203 3204 /* 3205 * The page may have been logically dequeued before we acquired the 3206 * page queue lock. In this case, since we either hold the page lock 3207 * or the page is being freed, a different thread cannot be concurrently 3208 * enqueuing the page. 3209 */ 3210 if (__predict_true(m->queue == queue)) 3211 vm_pqbatch_process_page(pq, m); 3212 else { 3213 KASSERT(m->queue == PQ_NONE, 3214 ("invalid queue transition for page %p", m)); 3215 KASSERT((m->aflags & PGA_ENQUEUED) == 0, 3216 ("page %p is enqueued with invalid queue index", m)); 3217 vm_page_aflag_clear(m, PGA_QUEUE_STATE_MASK); 3218 } 3219 vm_pagequeue_unlock(pq); 3220 critical_exit(); 3221 } 3222 3223 /* 3224 * vm_page_drain_pqbatch: [ internal use only ] 3225 * 3226 * Force all per-CPU page queue batch queues to be drained. This is 3227 * intended for use in severe memory shortages, to ensure that pages 3228 * do not remain stuck in the batch queues. 3229 */ 3230 void 3231 vm_page_drain_pqbatch(void) 3232 { 3233 struct thread *td; 3234 struct vm_domain *vmd; 3235 struct vm_pagequeue *pq; 3236 int cpu, domain, queue; 3237 3238 td = curthread; 3239 CPU_FOREACH(cpu) { 3240 thread_lock(td); 3241 sched_bind(td, cpu); 3242 thread_unlock(td); 3243 3244 for (domain = 0; domain < vm_ndomains; domain++) { 3245 vmd = VM_DOMAIN(domain); 3246 for (queue = 0; queue < PQ_COUNT; queue++) { 3247 pq = &vmd->vmd_pagequeues[queue]; 3248 vm_pagequeue_lock(pq); 3249 critical_enter(); 3250 vm_pqbatch_process(pq, 3251 DPCPU_PTR(pqbatch[domain][queue]), queue); 3252 critical_exit(); 3253 vm_pagequeue_unlock(pq); 3254 } 3255 } 3256 } 3257 thread_lock(td); 3258 sched_unbind(td); 3259 thread_unlock(td); 3260 } 3261 3262 /* 3263 * Complete the logical removal of a page from a page queue. We must be 3264 * careful to synchronize with the page daemon, which may be concurrently 3265 * examining the page with only the page lock held. The page must not be 3266 * in a state where it appears to be logically enqueued. 3267 */ 3268 static void 3269 vm_page_dequeue_complete(vm_page_t m) 3270 { 3271 3272 m->queue = PQ_NONE; 3273 atomic_thread_fence_rel(); 3274 vm_page_aflag_clear(m, PGA_QUEUE_STATE_MASK); 3275 } 3276 3277 /* 3278 * vm_page_dequeue_deferred: [ internal use only ] 3279 * 3280 * Request removal of the given page from its current page 3281 * queue. Physical removal from the queue may be deferred 3282 * indefinitely. 3283 * 3284 * The page must be locked. 3285 */ 3286 void 3287 vm_page_dequeue_deferred(vm_page_t m) 3288 { 3289 uint8_t queue; 3290 3291 vm_page_assert_locked(m); 3292 3293 if ((queue = vm_page_queue(m)) == PQ_NONE) 3294 return; 3295 vm_page_aflag_set(m, PGA_DEQUEUE); 3296 vm_pqbatch_submit_page(m, queue); 3297 } 3298 3299 /* 3300 * A variant of vm_page_dequeue_deferred() that does not assert the page 3301 * lock and is only to be called from vm_page_free_prep(). It is just an 3302 * open-coded implementation of vm_page_dequeue_deferred(). Because the 3303 * page is being freed, we can assume that nothing else is scheduling queue 3304 * operations on this page, so we get for free the mutual exclusion that 3305 * is otherwise provided by the page lock. 3306 */ 3307 static void 3308 vm_page_dequeue_deferred_free(vm_page_t m) 3309 { 3310 uint8_t queue; 3311 3312 KASSERT(m->object == NULL, ("page %p has an object reference", m)); 3313 3314 if ((m->aflags & PGA_DEQUEUE) != 0) 3315 return; 3316 atomic_thread_fence_acq(); 3317 if ((queue = m->queue) == PQ_NONE) 3318 return; 3319 vm_page_aflag_set(m, PGA_DEQUEUE); 3320 vm_pqbatch_submit_page(m, queue); 3321 } 3322 3323 /* 3324 * vm_page_dequeue: 3325 * 3326 * Remove the page from whichever page queue it's in, if any. 3327 * The page must either be locked or unallocated. This constraint 3328 * ensures that the queue state of the page will remain consistent 3329 * after this function returns. 3330 */ 3331 void 3332 vm_page_dequeue(vm_page_t m) 3333 { 3334 struct mtx *lock, *lock1; 3335 struct vm_pagequeue *pq; 3336 uint8_t aflags; 3337 3338 KASSERT(mtx_owned(vm_page_lockptr(m)) || m->order == VM_NFREEORDER, 3339 ("page %p is allocated and unlocked", m)); 3340 3341 for (;;) { 3342 lock = vm_page_pagequeue_lockptr(m); 3343 if (lock == NULL) { 3344 /* 3345 * A thread may be concurrently executing 3346 * vm_page_dequeue_complete(). Ensure that all queue 3347 * state is cleared before we return. 3348 */ 3349 aflags = atomic_load_8(&m->aflags); 3350 if ((aflags & PGA_QUEUE_STATE_MASK) == 0) 3351 return; 3352 KASSERT((aflags & PGA_DEQUEUE) != 0, 3353 ("page %p has unexpected queue state flags %#x", 3354 m, aflags)); 3355 3356 /* 3357 * Busy wait until the thread updating queue state is 3358 * finished. Such a thread must be executing in a 3359 * critical section. 3360 */ 3361 cpu_spinwait(); 3362 continue; 3363 } 3364 mtx_lock(lock); 3365 if ((lock1 = vm_page_pagequeue_lockptr(m)) == lock) 3366 break; 3367 mtx_unlock(lock); 3368 lock = lock1; 3369 } 3370 KASSERT(lock == vm_page_pagequeue_lockptr(m), 3371 ("%s: page %p migrated directly between queues", __func__, m)); 3372 KASSERT((m->aflags & PGA_DEQUEUE) != 0 || 3373 mtx_owned(vm_page_lockptr(m)), 3374 ("%s: queued unlocked page %p", __func__, m)); 3375 3376 if ((m->aflags & PGA_ENQUEUED) != 0) { 3377 pq = vm_page_pagequeue(m); 3378 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 3379 vm_pagequeue_cnt_dec(pq); 3380 } 3381 vm_page_dequeue_complete(m); 3382 mtx_unlock(lock); 3383 } 3384 3385 /* 3386 * Schedule the given page for insertion into the specified page queue. 3387 * Physical insertion of the page may be deferred indefinitely. 3388 */ 3389 static void 3390 vm_page_enqueue(vm_page_t m, uint8_t queue) 3391 { 3392 3393 vm_page_assert_locked(m); 3394 KASSERT(m->queue == PQ_NONE && (m->aflags & PGA_QUEUE_STATE_MASK) == 0, 3395 ("%s: page %p is already enqueued", __func__, m)); 3396 3397 m->queue = queue; 3398 if ((m->aflags & PGA_REQUEUE) == 0) 3399 vm_page_aflag_set(m, PGA_REQUEUE); 3400 vm_pqbatch_submit_page(m, queue); 3401 } 3402 3403 /* 3404 * vm_page_requeue: [ internal use only ] 3405 * 3406 * Schedule a requeue of the given page. 3407 * 3408 * The page must be locked. 3409 */ 3410 void 3411 vm_page_requeue(vm_page_t m) 3412 { 3413 3414 vm_page_assert_locked(m); 3415 KASSERT(vm_page_queue(m) != PQ_NONE, 3416 ("%s: page %p is not logically enqueued", __func__, m)); 3417 3418 if ((m->aflags & PGA_REQUEUE) == 0) 3419 vm_page_aflag_set(m, PGA_REQUEUE); 3420 vm_pqbatch_submit_page(m, atomic_load_8(&m->queue)); 3421 } 3422 3423 /* 3424 * vm_page_activate: 3425 * 3426 * Put the specified page on the active list (if appropriate). 3427 * Ensure that act_count is at least ACT_INIT but do not otherwise 3428 * mess with it. 3429 * 3430 * The page must be locked. 3431 */ 3432 void 3433 vm_page_activate(vm_page_t m) 3434 { 3435 3436 vm_page_assert_locked(m); 3437 3438 if (m->wire_count > 0 || (m->oflags & VPO_UNMANAGED) != 0) 3439 return; 3440 if (vm_page_queue(m) == PQ_ACTIVE) { 3441 if (m->act_count < ACT_INIT) 3442 m->act_count = ACT_INIT; 3443 return; 3444 } 3445 3446 vm_page_dequeue(m); 3447 if (m->act_count < ACT_INIT) 3448 m->act_count = ACT_INIT; 3449 vm_page_enqueue(m, PQ_ACTIVE); 3450 } 3451 3452 /* 3453 * vm_page_free_prep: 3454 * 3455 * Prepares the given page to be put on the free list, 3456 * disassociating it from any VM object. The caller may return 3457 * the page to the free list only if this function returns true. 3458 * 3459 * The object must be locked. The page must be locked if it is 3460 * managed. 3461 */ 3462 bool 3463 vm_page_free_prep(vm_page_t m) 3464 { 3465 3466 #if defined(DIAGNOSTIC) && defined(PHYS_TO_DMAP) 3467 if (PMAP_HAS_DMAP && (m->flags & PG_ZERO) != 0) { 3468 uint64_t *p; 3469 int i; 3470 p = (uint64_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 3471 for (i = 0; i < PAGE_SIZE / sizeof(uint64_t); i++, p++) 3472 KASSERT(*p == 0, ("vm_page_free_prep %p PG_ZERO %d %jx", 3473 m, i, (uintmax_t)*p)); 3474 } 3475 #endif 3476 if ((m->oflags & VPO_UNMANAGED) == 0) { 3477 vm_page_lock_assert(m, MA_OWNED); 3478 KASSERT(!pmap_page_is_mapped(m), 3479 ("vm_page_free_prep: freeing mapped page %p", m)); 3480 } else 3481 KASSERT(m->queue == PQ_NONE, 3482 ("vm_page_free_prep: unmanaged page %p is queued", m)); 3483 VM_CNT_INC(v_tfree); 3484 3485 if (vm_page_sbusied(m)) 3486 panic("vm_page_free_prep: freeing busy page %p", m); 3487 3488 vm_page_remove(m); 3489 3490 /* 3491 * If fictitious remove object association and 3492 * return. 3493 */ 3494 if ((m->flags & PG_FICTITIOUS) != 0) { 3495 KASSERT(m->wire_count == 1, 3496 ("fictitious page %p is not wired", m)); 3497 KASSERT(m->queue == PQ_NONE, 3498 ("fictitious page %p is queued", m)); 3499 return (false); 3500 } 3501 3502 /* 3503 * Pages need not be dequeued before they are returned to the physical 3504 * memory allocator, but they must at least be marked for a deferred 3505 * dequeue. 3506 */ 3507 if ((m->oflags & VPO_UNMANAGED) == 0) 3508 vm_page_dequeue_deferred_free(m); 3509 3510 m->valid = 0; 3511 vm_page_undirty(m); 3512 3513 if (m->wire_count != 0) 3514 panic("vm_page_free_prep: freeing wired page %p", m); 3515 if (m->hold_count != 0) { 3516 m->flags &= ~PG_ZERO; 3517 KASSERT((m->flags & PG_UNHOLDFREE) == 0, 3518 ("vm_page_free_prep: freeing PG_UNHOLDFREE page %p", m)); 3519 m->flags |= PG_UNHOLDFREE; 3520 return (false); 3521 } 3522 3523 /* 3524 * Restore the default memory attribute to the page. 3525 */ 3526 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) 3527 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); 3528 3529 #if VM_NRESERVLEVEL > 0 3530 if (vm_reserv_free_page(m)) 3531 return (false); 3532 #endif 3533 3534 return (true); 3535 } 3536 3537 /* 3538 * vm_page_free_toq: 3539 * 3540 * Returns the given page to the free list, disassociating it 3541 * from any VM object. 3542 * 3543 * The object must be locked. The page must be locked if it is 3544 * managed. 3545 */ 3546 void 3547 vm_page_free_toq(vm_page_t m) 3548 { 3549 struct vm_domain *vmd; 3550 3551 if (!vm_page_free_prep(m)) 3552 return; 3553 3554 vmd = vm_pagequeue_domain(m); 3555 if (m->pool == VM_FREEPOOL_DEFAULT && vmd->vmd_pgcache != NULL) { 3556 uma_zfree(vmd->vmd_pgcache, m); 3557 return; 3558 } 3559 vm_domain_free_lock(vmd); 3560 vm_phys_free_pages(m, 0); 3561 vm_domain_free_unlock(vmd); 3562 vm_domain_freecnt_inc(vmd, 1); 3563 } 3564 3565 /* 3566 * vm_page_free_pages_toq: 3567 * 3568 * Returns a list of pages to the free list, disassociating it 3569 * from any VM object. In other words, this is equivalent to 3570 * calling vm_page_free_toq() for each page of a list of VM objects. 3571 * 3572 * The objects must be locked. The pages must be locked if it is 3573 * managed. 3574 */ 3575 void 3576 vm_page_free_pages_toq(struct spglist *free, bool update_wire_count) 3577 { 3578 vm_page_t m; 3579 int count; 3580 3581 if (SLIST_EMPTY(free)) 3582 return; 3583 3584 count = 0; 3585 while ((m = SLIST_FIRST(free)) != NULL) { 3586 count++; 3587 SLIST_REMOVE_HEAD(free, plinks.s.ss); 3588 vm_page_free_toq(m); 3589 } 3590 3591 if (update_wire_count) 3592 vm_wire_sub(count); 3593 } 3594 3595 /* 3596 * vm_page_wire: 3597 * 3598 * Mark this page as wired down. If the page is fictitious, then 3599 * its wire count must remain one. 3600 * 3601 * The page must be locked. 3602 */ 3603 void 3604 vm_page_wire(vm_page_t m) 3605 { 3606 3607 vm_page_assert_locked(m); 3608 if ((m->flags & PG_FICTITIOUS) != 0) { 3609 KASSERT(m->wire_count == 1, 3610 ("vm_page_wire: fictitious page %p's wire count isn't one", 3611 m)); 3612 return; 3613 } 3614 if (m->wire_count == 0) { 3615 KASSERT((m->oflags & VPO_UNMANAGED) == 0 || 3616 m->queue == PQ_NONE, 3617 ("vm_page_wire: unmanaged page %p is queued", m)); 3618 vm_wire_add(1); 3619 } 3620 m->wire_count++; 3621 KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m)); 3622 } 3623 3624 /* 3625 * vm_page_unwire: 3626 * 3627 * Release one wiring of the specified page, potentially allowing it to be 3628 * paged out. Returns TRUE if the number of wirings transitions to zero and 3629 * FALSE otherwise. 3630 * 3631 * Only managed pages belonging to an object can be paged out. If the number 3632 * of wirings transitions to zero and the page is eligible for page out, then 3633 * the page is added to the specified paging queue (unless PQ_NONE is 3634 * specified, in which case the page is dequeued if it belongs to a paging 3635 * queue). 3636 * 3637 * If a page is fictitious, then its wire count must always be one. 3638 * 3639 * A managed page must be locked. 3640 */ 3641 bool 3642 vm_page_unwire(vm_page_t m, uint8_t queue) 3643 { 3644 bool unwired; 3645 3646 KASSERT(queue < PQ_COUNT || queue == PQ_NONE, 3647 ("vm_page_unwire: invalid queue %u request for page %p", 3648 queue, m)); 3649 if ((m->oflags & VPO_UNMANAGED) == 0) 3650 vm_page_assert_locked(m); 3651 3652 unwired = vm_page_unwire_noq(m); 3653 if (!unwired || (m->oflags & VPO_UNMANAGED) != 0 || m->object == NULL) 3654 return (unwired); 3655 3656 if (vm_page_queue(m) == queue) { 3657 if (queue == PQ_ACTIVE) 3658 vm_page_reference(m); 3659 else if (queue != PQ_NONE) 3660 vm_page_requeue(m); 3661 } else { 3662 vm_page_dequeue(m); 3663 if (queue != PQ_NONE) { 3664 vm_page_enqueue(m, queue); 3665 if (queue == PQ_ACTIVE) 3666 /* Initialize act_count. */ 3667 vm_page_activate(m); 3668 } 3669 } 3670 return (unwired); 3671 } 3672 3673 /* 3674 * 3675 * vm_page_unwire_noq: 3676 * 3677 * Unwire a page without (re-)inserting it into a page queue. It is up 3678 * to the caller to enqueue, requeue, or free the page as appropriate. 3679 * In most cases, vm_page_unwire() should be used instead. 3680 */ 3681 bool 3682 vm_page_unwire_noq(vm_page_t m) 3683 { 3684 3685 if ((m->oflags & VPO_UNMANAGED) == 0) 3686 vm_page_assert_locked(m); 3687 if ((m->flags & PG_FICTITIOUS) != 0) { 3688 KASSERT(m->wire_count == 1, 3689 ("vm_page_unwire: fictitious page %p's wire count isn't one", m)); 3690 return (false); 3691 } 3692 if (m->wire_count == 0) 3693 panic("vm_page_unwire: page %p's wire count is zero", m); 3694 m->wire_count--; 3695 if (m->wire_count == 0) { 3696 vm_wire_sub(1); 3697 return (true); 3698 } else 3699 return (false); 3700 } 3701 3702 /* 3703 * Move the specified page to the tail of the inactive queue, or requeue 3704 * the page if it is already in the inactive queue. 3705 * 3706 * The page must be locked. 3707 */ 3708 void 3709 vm_page_deactivate(vm_page_t m) 3710 { 3711 3712 vm_page_assert_locked(m); 3713 3714 if (m->wire_count > 0 || (m->oflags & VPO_UNMANAGED) != 0) 3715 return; 3716 3717 if (!vm_page_inactive(m)) { 3718 vm_page_dequeue(m); 3719 vm_page_enqueue(m, PQ_INACTIVE); 3720 } else 3721 vm_page_requeue(m); 3722 } 3723 3724 /* 3725 * Move the specified page close to the head of the inactive queue, 3726 * bypassing LRU. A marker page is used to maintain FIFO ordering. 3727 * As with regular enqueues, we use a per-CPU batch queue to reduce 3728 * contention on the page queue lock. 3729 * 3730 * The page must be locked. 3731 */ 3732 void 3733 vm_page_deactivate_noreuse(vm_page_t m) 3734 { 3735 3736 vm_page_assert_locked(m); 3737 3738 if (m->wire_count > 0 || (m->oflags & VPO_UNMANAGED) != 0) 3739 return; 3740 3741 if (!vm_page_inactive(m)) { 3742 vm_page_dequeue(m); 3743 m->queue = PQ_INACTIVE; 3744 } 3745 if ((m->aflags & PGA_REQUEUE_HEAD) == 0) 3746 vm_page_aflag_set(m, PGA_REQUEUE_HEAD); 3747 vm_pqbatch_submit_page(m, PQ_INACTIVE); 3748 } 3749 3750 /* 3751 * vm_page_launder 3752 * 3753 * Put a page in the laundry, or requeue it if it is already there. 3754 */ 3755 void 3756 vm_page_launder(vm_page_t m) 3757 { 3758 3759 vm_page_assert_locked(m); 3760 if (m->wire_count > 0 || (m->oflags & VPO_UNMANAGED) != 0) 3761 return; 3762 3763 if (vm_page_in_laundry(m)) 3764 vm_page_requeue(m); 3765 else { 3766 vm_page_dequeue(m); 3767 vm_page_enqueue(m, PQ_LAUNDRY); 3768 } 3769 } 3770 3771 /* 3772 * vm_page_unswappable 3773 * 3774 * Put a page in the PQ_UNSWAPPABLE holding queue. 3775 */ 3776 void 3777 vm_page_unswappable(vm_page_t m) 3778 { 3779 3780 vm_page_assert_locked(m); 3781 KASSERT(m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0, 3782 ("page %p already unswappable", m)); 3783 3784 vm_page_dequeue(m); 3785 vm_page_enqueue(m, PQ_UNSWAPPABLE); 3786 } 3787 3788 /* 3789 * Attempt to free the page. If it cannot be freed, do nothing. Returns true 3790 * if the page is freed and false otherwise. 3791 * 3792 * The page must be managed. The page and its containing object must be 3793 * locked. 3794 */ 3795 bool 3796 vm_page_try_to_free(vm_page_t m) 3797 { 3798 3799 vm_page_assert_locked(m); 3800 VM_OBJECT_ASSERT_WLOCKED(m->object); 3801 KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("page %p is unmanaged", m)); 3802 if (m->dirty != 0 || vm_page_held(m) || vm_page_busied(m)) 3803 return (false); 3804 if (m->object->ref_count != 0) { 3805 pmap_remove_all(m); 3806 if (m->dirty != 0) 3807 return (false); 3808 } 3809 vm_page_free(m); 3810 return (true); 3811 } 3812 3813 /* 3814 * vm_page_advise 3815 * 3816 * Apply the specified advice to the given page. 3817 * 3818 * The object and page must be locked. 3819 */ 3820 void 3821 vm_page_advise(vm_page_t m, int advice) 3822 { 3823 3824 vm_page_assert_locked(m); 3825 VM_OBJECT_ASSERT_WLOCKED(m->object); 3826 if (advice == MADV_FREE) 3827 /* 3828 * Mark the page clean. This will allow the page to be freed 3829 * without first paging it out. MADV_FREE pages are often 3830 * quickly reused by malloc(3), so we do not do anything that 3831 * would result in a page fault on a later access. 3832 */ 3833 vm_page_undirty(m); 3834 else if (advice != MADV_DONTNEED) { 3835 if (advice == MADV_WILLNEED) 3836 vm_page_activate(m); 3837 return; 3838 } 3839 3840 /* 3841 * Clear any references to the page. Otherwise, the page daemon will 3842 * immediately reactivate the page. 3843 */ 3844 vm_page_aflag_clear(m, PGA_REFERENCED); 3845 3846 if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m)) 3847 vm_page_dirty(m); 3848 3849 /* 3850 * Place clean pages near the head of the inactive queue rather than 3851 * the tail, thus defeating the queue's LRU operation and ensuring that 3852 * the page will be reused quickly. Dirty pages not already in the 3853 * laundry are moved there. 3854 */ 3855 if (m->dirty == 0) 3856 vm_page_deactivate_noreuse(m); 3857 else if (!vm_page_in_laundry(m)) 3858 vm_page_launder(m); 3859 } 3860 3861 /* 3862 * Grab a page, waiting until we are waken up due to the page 3863 * changing state. We keep on waiting, if the page continues 3864 * to be in the object. If the page doesn't exist, first allocate it 3865 * and then conditionally zero it. 3866 * 3867 * This routine may sleep. 3868 * 3869 * The object must be locked on entry. The lock will, however, be released 3870 * and reacquired if the routine sleeps. 3871 */ 3872 vm_page_t 3873 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) 3874 { 3875 vm_page_t m; 3876 int sleep; 3877 int pflags; 3878 3879 VM_OBJECT_ASSERT_WLOCKED(object); 3880 KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || 3881 (allocflags & VM_ALLOC_IGN_SBUSY) != 0, 3882 ("vm_page_grab: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch")); 3883 pflags = allocflags & 3884 ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL); 3885 if ((allocflags & VM_ALLOC_NOWAIT) == 0) 3886 pflags |= VM_ALLOC_WAITFAIL; 3887 retrylookup: 3888 if ((m = vm_page_lookup(object, pindex)) != NULL) { 3889 sleep = (allocflags & VM_ALLOC_IGN_SBUSY) != 0 ? 3890 vm_page_xbusied(m) : vm_page_busied(m); 3891 if (sleep) { 3892 if ((allocflags & VM_ALLOC_NOWAIT) != 0) 3893 return (NULL); 3894 /* 3895 * Reference the page before unlocking and 3896 * sleeping so that the page daemon is less 3897 * likely to reclaim it. 3898 */ 3899 vm_page_aflag_set(m, PGA_REFERENCED); 3900 vm_page_lock(m); 3901 VM_OBJECT_WUNLOCK(object); 3902 vm_page_busy_sleep(m, "pgrbwt", (allocflags & 3903 VM_ALLOC_IGN_SBUSY) != 0); 3904 VM_OBJECT_WLOCK(object); 3905 goto retrylookup; 3906 } else { 3907 if ((allocflags & VM_ALLOC_WIRED) != 0) { 3908 vm_page_lock(m); 3909 vm_page_wire(m); 3910 vm_page_unlock(m); 3911 } 3912 if ((allocflags & 3913 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0) 3914 vm_page_xbusy(m); 3915 if ((allocflags & VM_ALLOC_SBUSY) != 0) 3916 vm_page_sbusy(m); 3917 return (m); 3918 } 3919 } 3920 m = vm_page_alloc(object, pindex, pflags); 3921 if (m == NULL) { 3922 if ((allocflags & VM_ALLOC_NOWAIT) != 0) 3923 return (NULL); 3924 goto retrylookup; 3925 } 3926 if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0) 3927 pmap_zero_page(m); 3928 return (m); 3929 } 3930 3931 /* 3932 * Return the specified range of pages from the given object. For each 3933 * page offset within the range, if a page already exists within the object 3934 * at that offset and it is busy, then wait for it to change state. If, 3935 * instead, the page doesn't exist, then allocate it. 3936 * 3937 * The caller must always specify an allocation class. 3938 * 3939 * allocation classes: 3940 * VM_ALLOC_NORMAL normal process request 3941 * VM_ALLOC_SYSTEM system *really* needs the pages 3942 * 3943 * The caller must always specify that the pages are to be busied and/or 3944 * wired. 3945 * 3946 * optional allocation flags: 3947 * VM_ALLOC_IGN_SBUSY do not sleep on soft busy pages 3948 * VM_ALLOC_NOBUSY do not exclusive busy the page 3949 * VM_ALLOC_NOWAIT do not sleep 3950 * VM_ALLOC_SBUSY set page to sbusy state 3951 * VM_ALLOC_WIRED wire the pages 3952 * VM_ALLOC_ZERO zero and validate any invalid pages 3953 * 3954 * If VM_ALLOC_NOWAIT is not specified, this routine may sleep. Otherwise, it 3955 * may return a partial prefix of the requested range. 3956 */ 3957 int 3958 vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags, 3959 vm_page_t *ma, int count) 3960 { 3961 vm_page_t m, mpred; 3962 int pflags; 3963 int i; 3964 bool sleep; 3965 3966 VM_OBJECT_ASSERT_WLOCKED(object); 3967 KASSERT(((u_int)allocflags >> VM_ALLOC_COUNT_SHIFT) == 0, 3968 ("vm_page_grap_pages: VM_ALLOC_COUNT() is not allowed")); 3969 KASSERT((allocflags & VM_ALLOC_NOBUSY) == 0 || 3970 (allocflags & VM_ALLOC_WIRED) != 0, 3971 ("vm_page_grab_pages: the pages must be busied or wired")); 3972 KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || 3973 (allocflags & VM_ALLOC_IGN_SBUSY) != 0, 3974 ("vm_page_grab_pages: VM_ALLOC_SBUSY/IGN_SBUSY mismatch")); 3975 if (count == 0) 3976 return (0); 3977 pflags = allocflags & ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | 3978 VM_ALLOC_WAITFAIL | VM_ALLOC_IGN_SBUSY); 3979 if ((allocflags & VM_ALLOC_NOWAIT) == 0) 3980 pflags |= VM_ALLOC_WAITFAIL; 3981 i = 0; 3982 retrylookup: 3983 m = vm_radix_lookup_le(&object->rtree, pindex + i); 3984 if (m == NULL || m->pindex != pindex + i) { 3985 mpred = m; 3986 m = NULL; 3987 } else 3988 mpred = TAILQ_PREV(m, pglist, listq); 3989 for (; i < count; i++) { 3990 if (m != NULL) { 3991 sleep = (allocflags & VM_ALLOC_IGN_SBUSY) != 0 ? 3992 vm_page_xbusied(m) : vm_page_busied(m); 3993 if (sleep) { 3994 if ((allocflags & VM_ALLOC_NOWAIT) != 0) 3995 break; 3996 /* 3997 * Reference the page before unlocking and 3998 * sleeping so that the page daemon is less 3999 * likely to reclaim it. 4000 */ 4001 vm_page_aflag_set(m, PGA_REFERENCED); 4002 vm_page_lock(m); 4003 VM_OBJECT_WUNLOCK(object); 4004 vm_page_busy_sleep(m, "grbmaw", (allocflags & 4005 VM_ALLOC_IGN_SBUSY) != 0); 4006 VM_OBJECT_WLOCK(object); 4007 goto retrylookup; 4008 } 4009 if ((allocflags & VM_ALLOC_WIRED) != 0) { 4010 vm_page_lock(m); 4011 vm_page_wire(m); 4012 vm_page_unlock(m); 4013 } 4014 if ((allocflags & (VM_ALLOC_NOBUSY | 4015 VM_ALLOC_SBUSY)) == 0) 4016 vm_page_xbusy(m); 4017 if ((allocflags & VM_ALLOC_SBUSY) != 0) 4018 vm_page_sbusy(m); 4019 } else { 4020 m = vm_page_alloc_after(object, pindex + i, 4021 pflags | VM_ALLOC_COUNT(count - i), mpred); 4022 if (m == NULL) { 4023 if ((allocflags & VM_ALLOC_NOWAIT) != 0) 4024 break; 4025 goto retrylookup; 4026 } 4027 } 4028 if (m->valid == 0 && (allocflags & VM_ALLOC_ZERO) != 0) { 4029 if ((m->flags & PG_ZERO) == 0) 4030 pmap_zero_page(m); 4031 m->valid = VM_PAGE_BITS_ALL; 4032 } 4033 ma[i] = mpred = m; 4034 m = vm_page_next(m); 4035 } 4036 return (i); 4037 } 4038 4039 /* 4040 * Mapping function for valid or dirty bits in a page. 4041 * 4042 * Inputs are required to range within a page. 4043 */ 4044 vm_page_bits_t 4045 vm_page_bits(int base, int size) 4046 { 4047 int first_bit; 4048 int last_bit; 4049 4050 KASSERT( 4051 base + size <= PAGE_SIZE, 4052 ("vm_page_bits: illegal base/size %d/%d", base, size) 4053 ); 4054 4055 if (size == 0) /* handle degenerate case */ 4056 return (0); 4057 4058 first_bit = base >> DEV_BSHIFT; 4059 last_bit = (base + size - 1) >> DEV_BSHIFT; 4060 4061 return (((vm_page_bits_t)2 << last_bit) - 4062 ((vm_page_bits_t)1 << first_bit)); 4063 } 4064 4065 /* 4066 * vm_page_set_valid_range: 4067 * 4068 * Sets portions of a page valid. The arguments are expected 4069 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 4070 * of any partial chunks touched by the range. The invalid portion of 4071 * such chunks will be zeroed. 4072 * 4073 * (base + size) must be less then or equal to PAGE_SIZE. 4074 */ 4075 void 4076 vm_page_set_valid_range(vm_page_t m, int base, int size) 4077 { 4078 int endoff, frag; 4079 4080 VM_OBJECT_ASSERT_WLOCKED(m->object); 4081 if (size == 0) /* handle degenerate case */ 4082 return; 4083 4084 /* 4085 * If the base is not DEV_BSIZE aligned and the valid 4086 * bit is clear, we have to zero out a portion of the 4087 * first block. 4088 */ 4089 if ((frag = rounddown2(base, DEV_BSIZE)) != base && 4090 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0) 4091 pmap_zero_page_area(m, frag, base - frag); 4092 4093 /* 4094 * If the ending offset is not DEV_BSIZE aligned and the 4095 * valid bit is clear, we have to zero out a portion of 4096 * the last block. 4097 */ 4098 endoff = base + size; 4099 if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff && 4100 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0) 4101 pmap_zero_page_area(m, endoff, 4102 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 4103 4104 /* 4105 * Assert that no previously invalid block that is now being validated 4106 * is already dirty. 4107 */ 4108 KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0, 4109 ("vm_page_set_valid_range: page %p is dirty", m)); 4110 4111 /* 4112 * Set valid bits inclusive of any overlap. 4113 */ 4114 m->valid |= vm_page_bits(base, size); 4115 } 4116 4117 /* 4118 * Clear the given bits from the specified page's dirty field. 4119 */ 4120 static __inline void 4121 vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits) 4122 { 4123 uintptr_t addr; 4124 #if PAGE_SIZE < 16384 4125 int shift; 4126 #endif 4127 4128 /* 4129 * If the object is locked and the page is neither exclusive busy nor 4130 * write mapped, then the page's dirty field cannot possibly be 4131 * set by a concurrent pmap operation. 4132 */ 4133 VM_OBJECT_ASSERT_WLOCKED(m->object); 4134 if (!vm_page_xbusied(m) && !pmap_page_is_write_mapped(m)) 4135 m->dirty &= ~pagebits; 4136 else { 4137 /* 4138 * The pmap layer can call vm_page_dirty() without 4139 * holding a distinguished lock. The combination of 4140 * the object's lock and an atomic operation suffice 4141 * to guarantee consistency of the page dirty field. 4142 * 4143 * For PAGE_SIZE == 32768 case, compiler already 4144 * properly aligns the dirty field, so no forcible 4145 * alignment is needed. Only require existence of 4146 * atomic_clear_64 when page size is 32768. 4147 */ 4148 addr = (uintptr_t)&m->dirty; 4149 #if PAGE_SIZE == 32768 4150 atomic_clear_64((uint64_t *)addr, pagebits); 4151 #elif PAGE_SIZE == 16384 4152 atomic_clear_32((uint32_t *)addr, pagebits); 4153 #else /* PAGE_SIZE <= 8192 */ 4154 /* 4155 * Use a trick to perform a 32-bit atomic on the 4156 * containing aligned word, to not depend on the existence 4157 * of atomic_clear_{8, 16}. 4158 */ 4159 shift = addr & (sizeof(uint32_t) - 1); 4160 #if BYTE_ORDER == BIG_ENDIAN 4161 shift = (sizeof(uint32_t) - sizeof(m->dirty) - shift) * NBBY; 4162 #else 4163 shift *= NBBY; 4164 #endif 4165 addr &= ~(sizeof(uint32_t) - 1); 4166 atomic_clear_32((uint32_t *)addr, pagebits << shift); 4167 #endif /* PAGE_SIZE */ 4168 } 4169 } 4170 4171 /* 4172 * vm_page_set_validclean: 4173 * 4174 * Sets portions of a page valid and clean. The arguments are expected 4175 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 4176 * of any partial chunks touched by the range. The invalid portion of 4177 * such chunks will be zero'd. 4178 * 4179 * (base + size) must be less then or equal to PAGE_SIZE. 4180 */ 4181 void 4182 vm_page_set_validclean(vm_page_t m, int base, int size) 4183 { 4184 vm_page_bits_t oldvalid, pagebits; 4185 int endoff, frag; 4186 4187 VM_OBJECT_ASSERT_WLOCKED(m->object); 4188 if (size == 0) /* handle degenerate case */ 4189 return; 4190 4191 /* 4192 * If the base is not DEV_BSIZE aligned and the valid 4193 * bit is clear, we have to zero out a portion of the 4194 * first block. 4195 */ 4196 if ((frag = rounddown2(base, DEV_BSIZE)) != base && 4197 (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0) 4198 pmap_zero_page_area(m, frag, base - frag); 4199 4200 /* 4201 * If the ending offset is not DEV_BSIZE aligned and the 4202 * valid bit is clear, we have to zero out a portion of 4203 * the last block. 4204 */ 4205 endoff = base + size; 4206 if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff && 4207 (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0) 4208 pmap_zero_page_area(m, endoff, 4209 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 4210 4211 /* 4212 * Set valid, clear dirty bits. If validating the entire 4213 * page we can safely clear the pmap modify bit. We also 4214 * use this opportunity to clear the VPO_NOSYNC flag. If a process 4215 * takes a write fault on a MAP_NOSYNC memory area the flag will 4216 * be set again. 4217 * 4218 * We set valid bits inclusive of any overlap, but we can only 4219 * clear dirty bits for DEV_BSIZE chunks that are fully within 4220 * the range. 4221 */ 4222 oldvalid = m->valid; 4223 pagebits = vm_page_bits(base, size); 4224 m->valid |= pagebits; 4225 #if 0 /* NOT YET */ 4226 if ((frag = base & (DEV_BSIZE - 1)) != 0) { 4227 frag = DEV_BSIZE - frag; 4228 base += frag; 4229 size -= frag; 4230 if (size < 0) 4231 size = 0; 4232 } 4233 pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1)); 4234 #endif 4235 if (base == 0 && size == PAGE_SIZE) { 4236 /* 4237 * The page can only be modified within the pmap if it is 4238 * mapped, and it can only be mapped if it was previously 4239 * fully valid. 4240 */ 4241 if (oldvalid == VM_PAGE_BITS_ALL) 4242 /* 4243 * Perform the pmap_clear_modify() first. Otherwise, 4244 * a concurrent pmap operation, such as 4245 * pmap_protect(), could clear a modification in the 4246 * pmap and set the dirty field on the page before 4247 * pmap_clear_modify() had begun and after the dirty 4248 * field was cleared here. 4249 */ 4250 pmap_clear_modify(m); 4251 m->dirty = 0; 4252 m->oflags &= ~VPO_NOSYNC; 4253 } else if (oldvalid != VM_PAGE_BITS_ALL) 4254 m->dirty &= ~pagebits; 4255 else 4256 vm_page_clear_dirty_mask(m, pagebits); 4257 } 4258 4259 void 4260 vm_page_clear_dirty(vm_page_t m, int base, int size) 4261 { 4262 4263 vm_page_clear_dirty_mask(m, vm_page_bits(base, size)); 4264 } 4265 4266 /* 4267 * vm_page_set_invalid: 4268 * 4269 * Invalidates DEV_BSIZE'd chunks within a page. Both the 4270 * valid and dirty bits for the effected areas are cleared. 4271 */ 4272 void 4273 vm_page_set_invalid(vm_page_t m, int base, int size) 4274 { 4275 vm_page_bits_t bits; 4276 vm_object_t object; 4277 4278 object = m->object; 4279 VM_OBJECT_ASSERT_WLOCKED(object); 4280 if (object->type == OBJT_VNODE && base == 0 && IDX_TO_OFF(m->pindex) + 4281 size >= object->un_pager.vnp.vnp_size) 4282 bits = VM_PAGE_BITS_ALL; 4283 else 4284 bits = vm_page_bits(base, size); 4285 if (object->ref_count != 0 && m->valid == VM_PAGE_BITS_ALL && 4286 bits != 0) 4287 pmap_remove_all(m); 4288 KASSERT((bits == 0 && m->valid == VM_PAGE_BITS_ALL) || 4289 !pmap_page_is_mapped(m), 4290 ("vm_page_set_invalid: page %p is mapped", m)); 4291 m->valid &= ~bits; 4292 m->dirty &= ~bits; 4293 } 4294 4295 /* 4296 * vm_page_zero_invalid() 4297 * 4298 * The kernel assumes that the invalid portions of a page contain 4299 * garbage, but such pages can be mapped into memory by user code. 4300 * When this occurs, we must zero out the non-valid portions of the 4301 * page so user code sees what it expects. 4302 * 4303 * Pages are most often semi-valid when the end of a file is mapped 4304 * into memory and the file's size is not page aligned. 4305 */ 4306 void 4307 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) 4308 { 4309 int b; 4310 int i; 4311 4312 VM_OBJECT_ASSERT_WLOCKED(m->object); 4313 /* 4314 * Scan the valid bits looking for invalid sections that 4315 * must be zeroed. Invalid sub-DEV_BSIZE'd areas ( where the 4316 * valid bit may be set ) have already been zeroed by 4317 * vm_page_set_validclean(). 4318 */ 4319 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { 4320 if (i == (PAGE_SIZE / DEV_BSIZE) || 4321 (m->valid & ((vm_page_bits_t)1 << i))) { 4322 if (i > b) { 4323 pmap_zero_page_area(m, 4324 b << DEV_BSHIFT, (i - b) << DEV_BSHIFT); 4325 } 4326 b = i + 1; 4327 } 4328 } 4329 4330 /* 4331 * setvalid is TRUE when we can safely set the zero'd areas 4332 * as being valid. We can do this if there are no cache consistancy 4333 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. 4334 */ 4335 if (setvalid) 4336 m->valid = VM_PAGE_BITS_ALL; 4337 } 4338 4339 /* 4340 * vm_page_is_valid: 4341 * 4342 * Is (partial) page valid? Note that the case where size == 0 4343 * will return FALSE in the degenerate case where the page is 4344 * entirely invalid, and TRUE otherwise. 4345 */ 4346 int 4347 vm_page_is_valid(vm_page_t m, int base, int size) 4348 { 4349 vm_page_bits_t bits; 4350 4351 VM_OBJECT_ASSERT_LOCKED(m->object); 4352 bits = vm_page_bits(base, size); 4353 return (m->valid != 0 && (m->valid & bits) == bits); 4354 } 4355 4356 /* 4357 * Returns true if all of the specified predicates are true for the entire 4358 * (super)page and false otherwise. 4359 */ 4360 bool 4361 vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m) 4362 { 4363 vm_object_t object; 4364 int i, npages; 4365 4366 object = m->object; 4367 if (skip_m != NULL && skip_m->object != object) 4368 return (false); 4369 VM_OBJECT_ASSERT_LOCKED(object); 4370 npages = atop(pagesizes[m->psind]); 4371 4372 /* 4373 * The physically contiguous pages that make up a superpage, i.e., a 4374 * page with a page size index ("psind") greater than zero, will 4375 * occupy adjacent entries in vm_page_array[]. 4376 */ 4377 for (i = 0; i < npages; i++) { 4378 /* Always test object consistency, including "skip_m". */ 4379 if (m[i].object != object) 4380 return (false); 4381 if (&m[i] == skip_m) 4382 continue; 4383 if ((flags & PS_NONE_BUSY) != 0 && vm_page_busied(&m[i])) 4384 return (false); 4385 if ((flags & PS_ALL_DIRTY) != 0) { 4386 /* 4387 * Calling vm_page_test_dirty() or pmap_is_modified() 4388 * might stop this case from spuriously returning 4389 * "false". However, that would require a write lock 4390 * on the object containing "m[i]". 4391 */ 4392 if (m[i].dirty != VM_PAGE_BITS_ALL) 4393 return (false); 4394 } 4395 if ((flags & PS_ALL_VALID) != 0 && 4396 m[i].valid != VM_PAGE_BITS_ALL) 4397 return (false); 4398 } 4399 return (true); 4400 } 4401 4402 /* 4403 * Set the page's dirty bits if the page is modified. 4404 */ 4405 void 4406 vm_page_test_dirty(vm_page_t m) 4407 { 4408 4409 VM_OBJECT_ASSERT_WLOCKED(m->object); 4410 if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m)) 4411 vm_page_dirty(m); 4412 } 4413 4414 void 4415 vm_page_lock_KBI(vm_page_t m, const char *file, int line) 4416 { 4417 4418 mtx_lock_flags_(vm_page_lockptr(m), 0, file, line); 4419 } 4420 4421 void 4422 vm_page_unlock_KBI(vm_page_t m, const char *file, int line) 4423 { 4424 4425 mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line); 4426 } 4427 4428 int 4429 vm_page_trylock_KBI(vm_page_t m, const char *file, int line) 4430 { 4431 4432 return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line)); 4433 } 4434 4435 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 4436 void 4437 vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line) 4438 { 4439 4440 vm_page_lock_assert_KBI(m, MA_OWNED, file, line); 4441 } 4442 4443 void 4444 vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line) 4445 { 4446 4447 mtx_assert_(vm_page_lockptr(m), a, file, line); 4448 } 4449 #endif 4450 4451 #ifdef INVARIANTS 4452 void 4453 vm_page_object_lock_assert(vm_page_t m) 4454 { 4455 4456 /* 4457 * Certain of the page's fields may only be modified by the 4458 * holder of the containing object's lock or the exclusive busy. 4459 * holder. Unfortunately, the holder of the write busy is 4460 * not recorded, and thus cannot be checked here. 4461 */ 4462 if (m->object != NULL && !vm_page_xbusied(m)) 4463 VM_OBJECT_ASSERT_WLOCKED(m->object); 4464 } 4465 4466 void 4467 vm_page_assert_pga_writeable(vm_page_t m, uint8_t bits) 4468 { 4469 4470 if ((bits & PGA_WRITEABLE) == 0) 4471 return; 4472 4473 /* 4474 * The PGA_WRITEABLE flag can only be set if the page is 4475 * managed, is exclusively busied or the object is locked. 4476 * Currently, this flag is only set by pmap_enter(). 4477 */ 4478 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4479 ("PGA_WRITEABLE on unmanaged page")); 4480 if (!vm_page_xbusied(m)) 4481 VM_OBJECT_ASSERT_LOCKED(m->object); 4482 } 4483 #endif 4484 4485 #include "opt_ddb.h" 4486 #ifdef DDB 4487 #include <sys/kernel.h> 4488 4489 #include <ddb/ddb.h> 4490 4491 DB_SHOW_COMMAND(page, vm_page_print_page_info) 4492 { 4493 4494 db_printf("vm_cnt.v_free_count: %d\n", vm_free_count()); 4495 db_printf("vm_cnt.v_inactive_count: %d\n", vm_inactive_count()); 4496 db_printf("vm_cnt.v_active_count: %d\n", vm_active_count()); 4497 db_printf("vm_cnt.v_laundry_count: %d\n", vm_laundry_count()); 4498 db_printf("vm_cnt.v_wire_count: %d\n", vm_wire_count()); 4499 db_printf("vm_cnt.v_free_reserved: %d\n", vm_cnt.v_free_reserved); 4500 db_printf("vm_cnt.v_free_min: %d\n", vm_cnt.v_free_min); 4501 db_printf("vm_cnt.v_free_target: %d\n", vm_cnt.v_free_target); 4502 db_printf("vm_cnt.v_inactive_target: %d\n", vm_cnt.v_inactive_target); 4503 } 4504 4505 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) 4506 { 4507 int dom; 4508 4509 db_printf("pq_free %d\n", vm_free_count()); 4510 for (dom = 0; dom < vm_ndomains; dom++) { 4511 db_printf( 4512 "dom %d page_cnt %d free %d pq_act %d pq_inact %d pq_laund %d pq_unsw %d\n", 4513 dom, 4514 vm_dom[dom].vmd_page_count, 4515 vm_dom[dom].vmd_free_count, 4516 vm_dom[dom].vmd_pagequeues[PQ_ACTIVE].pq_cnt, 4517 vm_dom[dom].vmd_pagequeues[PQ_INACTIVE].pq_cnt, 4518 vm_dom[dom].vmd_pagequeues[PQ_LAUNDRY].pq_cnt, 4519 vm_dom[dom].vmd_pagequeues[PQ_UNSWAPPABLE].pq_cnt); 4520 } 4521 } 4522 4523 DB_SHOW_COMMAND(pginfo, vm_page_print_pginfo) 4524 { 4525 vm_page_t m; 4526 boolean_t phys, virt; 4527 4528 if (!have_addr) { 4529 db_printf("show pginfo addr\n"); 4530 return; 4531 } 4532 4533 phys = strchr(modif, 'p') != NULL; 4534 virt = strchr(modif, 'v') != NULL; 4535 if (virt) 4536 m = PHYS_TO_VM_PAGE(pmap_kextract(addr)); 4537 else if (phys) 4538 m = PHYS_TO_VM_PAGE(addr); 4539 else 4540 m = (vm_page_t)addr; 4541 db_printf( 4542 "page %p obj %p pidx 0x%jx phys 0x%jx q %d hold %d wire %d\n" 4543 " af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n", 4544 m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr, 4545 m->queue, m->hold_count, m->wire_count, m->aflags, m->oflags, 4546 m->flags, m->act_count, m->busy_lock, m->valid, m->dirty); 4547 } 4548 #endif /* DDB */ 4549