1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991 Regents of the University of California. 5 * All rights reserved. 6 * Copyright (c) 1998 Matthew Dillon. All Rights Reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * The Mach Operating System project at Carnegie-Mellon University. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 36 */ 37 38 /*- 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 */ 64 65 /* 66 * GENERAL RULES ON VM_PAGE MANIPULATION 67 * 68 * - A page queue lock is required when adding or removing a page from a 69 * page queue regardless of other locks or the busy state of a page. 70 * 71 * * In general, no thread besides the page daemon can acquire or 72 * hold more than one page queue lock at a time. 73 * 74 * * The page daemon can acquire and hold any pair of page queue 75 * locks in any order. 76 * 77 * - The object lock is required when inserting or removing 78 * pages from an object (vm_page_insert() or vm_page_remove()). 79 * 80 */ 81 82 /* 83 * Resident memory management module. 84 */ 85 86 #include <sys/cdefs.h> 87 __FBSDID("$FreeBSD$"); 88 89 #include "opt_vm.h" 90 91 #include <sys/param.h> 92 #include <sys/systm.h> 93 #include <sys/lock.h> 94 #include <sys/domainset.h> 95 #include <sys/kernel.h> 96 #include <sys/limits.h> 97 #include <sys/linker.h> 98 #include <sys/malloc.h> 99 #include <sys/mman.h> 100 #include <sys/msgbuf.h> 101 #include <sys/mutex.h> 102 #include <sys/proc.h> 103 #include <sys/rwlock.h> 104 #include <sys/sbuf.h> 105 #include <sys/sched.h> 106 #include <sys/smp.h> 107 #include <sys/sysctl.h> 108 #include <sys/vmmeter.h> 109 #include <sys/vnode.h> 110 111 #include <vm/vm.h> 112 #include <vm/pmap.h> 113 #include <vm/vm_param.h> 114 #include <vm/vm_domainset.h> 115 #include <vm/vm_kern.h> 116 #include <vm/vm_map.h> 117 #include <vm/vm_object.h> 118 #include <vm/vm_page.h> 119 #include <vm/vm_pageout.h> 120 #include <vm/vm_phys.h> 121 #include <vm/vm_pagequeue.h> 122 #include <vm/vm_pager.h> 123 #include <vm/vm_radix.h> 124 #include <vm/vm_reserv.h> 125 #include <vm/vm_extern.h> 126 #include <vm/uma.h> 127 #include <vm/uma_int.h> 128 129 #include <machine/md_var.h> 130 131 extern int uma_startup_count(int); 132 extern void uma_startup(void *, int); 133 extern int vmem_startup_count(void); 134 135 struct vm_domain vm_dom[MAXMEMDOM]; 136 137 DPCPU_DEFINE_STATIC(struct vm_batchqueue, pqbatch[MAXMEMDOM][PQ_COUNT]); 138 139 struct mtx_padalign __exclusive_cache_line pa_lock[PA_LOCK_COUNT]; 140 141 struct mtx_padalign __exclusive_cache_line vm_domainset_lock; 142 /* The following fields are protected by the domainset lock. */ 143 domainset_t __exclusive_cache_line vm_min_domains; 144 domainset_t __exclusive_cache_line vm_severe_domains; 145 static int vm_min_waiters; 146 static int vm_severe_waiters; 147 static int vm_pageproc_waiters; 148 149 /* 150 * bogus page -- for I/O to/from partially complete buffers, 151 * or for paging into sparsely invalid regions. 152 */ 153 vm_page_t bogus_page; 154 155 vm_page_t vm_page_array; 156 long vm_page_array_size; 157 long first_page; 158 159 static int boot_pages; 160 SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 161 &boot_pages, 0, 162 "number of pages allocated for bootstrapping the VM system"); 163 164 static int pa_tryrelock_restart; 165 SYSCTL_INT(_vm, OID_AUTO, tryrelock_restart, CTLFLAG_RD, 166 &pa_tryrelock_restart, 0, "Number of tryrelock restarts"); 167 168 static TAILQ_HEAD(, vm_page) blacklist_head; 169 static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS); 170 SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD | 171 CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages"); 172 173 static uma_zone_t fakepg_zone; 174 175 static void vm_page_alloc_check(vm_page_t m); 176 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits); 177 static void vm_page_dequeue_complete(vm_page_t m); 178 static void vm_page_enqueue(vm_page_t m, uint8_t queue); 179 static void vm_page_init(void *dummy); 180 static int vm_page_insert_after(vm_page_t m, vm_object_t object, 181 vm_pindex_t pindex, vm_page_t mpred); 182 static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object, 183 vm_page_t mpred); 184 static int vm_page_reclaim_run(int req_class, int domain, u_long npages, 185 vm_page_t m_run, vm_paddr_t high); 186 static int vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, 187 int req); 188 static int vm_page_import(void *arg, void **store, int cnt, int domain, 189 int flags); 190 static void vm_page_release(void *arg, void **store, int cnt); 191 192 SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init, NULL); 193 194 static void 195 vm_page_init(void *dummy) 196 { 197 198 fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL, 199 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM); 200 bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | 201 VM_ALLOC_NORMAL | VM_ALLOC_WIRED); 202 } 203 204 /* 205 * The cache page zone is initialized later since we need to be able to allocate 206 * pages before UMA is fully initialized. 207 */ 208 static void 209 vm_page_init_cache_zones(void *dummy __unused) 210 { 211 struct vm_domain *vmd; 212 int i; 213 214 for (i = 0; i < vm_ndomains; i++) { 215 vmd = VM_DOMAIN(i); 216 /* 217 * Don't allow the page cache to take up more than .25% of 218 * memory. 219 */ 220 if (vmd->vmd_page_count / 400 < 256 * mp_ncpus) 221 continue; 222 vmd->vmd_pgcache = uma_zcache_create("vm pgcache", 223 sizeof(struct vm_page), NULL, NULL, NULL, NULL, 224 vm_page_import, vm_page_release, vmd, 225 UMA_ZONE_MAXBUCKET | UMA_ZONE_VM); 226 (void )uma_zone_set_maxcache(vmd->vmd_pgcache, 0); 227 } 228 } 229 SYSINIT(vm_page2, SI_SUB_VM_CONF, SI_ORDER_ANY, vm_page_init_cache_zones, NULL); 230 231 /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */ 232 #if PAGE_SIZE == 32768 233 #ifdef CTASSERT 234 CTASSERT(sizeof(u_long) >= 8); 235 #endif 236 #endif 237 238 /* 239 * Try to acquire a physical address lock while a pmap is locked. If we 240 * fail to trylock we unlock and lock the pmap directly and cache the 241 * locked pa in *locked. The caller should then restart their loop in case 242 * the virtual to physical mapping has changed. 243 */ 244 int 245 vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked) 246 { 247 vm_paddr_t lockpa; 248 249 lockpa = *locked; 250 *locked = pa; 251 if (lockpa) { 252 PA_LOCK_ASSERT(lockpa, MA_OWNED); 253 if (PA_LOCKPTR(pa) == PA_LOCKPTR(lockpa)) 254 return (0); 255 PA_UNLOCK(lockpa); 256 } 257 if (PA_TRYLOCK(pa)) 258 return (0); 259 PMAP_UNLOCK(pmap); 260 atomic_add_int(&pa_tryrelock_restart, 1); 261 PA_LOCK(pa); 262 PMAP_LOCK(pmap); 263 return (EAGAIN); 264 } 265 266 /* 267 * vm_set_page_size: 268 * 269 * Sets the page size, perhaps based upon the memory 270 * size. Must be called before any use of page-size 271 * dependent functions. 272 */ 273 void 274 vm_set_page_size(void) 275 { 276 if (vm_cnt.v_page_size == 0) 277 vm_cnt.v_page_size = PAGE_SIZE; 278 if (((vm_cnt.v_page_size - 1) & vm_cnt.v_page_size) != 0) 279 panic("vm_set_page_size: page size not a power of two"); 280 } 281 282 /* 283 * vm_page_blacklist_next: 284 * 285 * Find the next entry in the provided string of blacklist 286 * addresses. Entries are separated by space, comma, or newline. 287 * If an invalid integer is encountered then the rest of the 288 * string is skipped. Updates the list pointer to the next 289 * character, or NULL if the string is exhausted or invalid. 290 */ 291 static vm_paddr_t 292 vm_page_blacklist_next(char **list, char *end) 293 { 294 vm_paddr_t bad; 295 char *cp, *pos; 296 297 if (list == NULL || *list == NULL) 298 return (0); 299 if (**list =='\0') { 300 *list = NULL; 301 return (0); 302 } 303 304 /* 305 * If there's no end pointer then the buffer is coming from 306 * the kenv and we know it's null-terminated. 307 */ 308 if (end == NULL) 309 end = *list + strlen(*list); 310 311 /* Ensure that strtoq() won't walk off the end */ 312 if (*end != '\0') { 313 if (*end == '\n' || *end == ' ' || *end == ',') 314 *end = '\0'; 315 else { 316 printf("Blacklist not terminated, skipping\n"); 317 *list = NULL; 318 return (0); 319 } 320 } 321 322 for (pos = *list; *pos != '\0'; pos = cp) { 323 bad = strtoq(pos, &cp, 0); 324 if (*cp == '\0' || *cp == ' ' || *cp == ',' || *cp == '\n') { 325 if (bad == 0) { 326 if (++cp < end) 327 continue; 328 else 329 break; 330 } 331 } else 332 break; 333 if (*cp == '\0' || ++cp >= end) 334 *list = NULL; 335 else 336 *list = cp; 337 return (trunc_page(bad)); 338 } 339 printf("Garbage in RAM blacklist, skipping\n"); 340 *list = NULL; 341 return (0); 342 } 343 344 bool 345 vm_page_blacklist_add(vm_paddr_t pa, bool verbose) 346 { 347 struct vm_domain *vmd; 348 vm_page_t m; 349 int ret; 350 351 m = vm_phys_paddr_to_vm_page(pa); 352 if (m == NULL) 353 return (true); /* page does not exist, no failure */ 354 355 vmd = vm_pagequeue_domain(m); 356 vm_domain_free_lock(vmd); 357 ret = vm_phys_unfree_page(m); 358 vm_domain_free_unlock(vmd); 359 if (ret != 0) { 360 vm_domain_freecnt_inc(vmd, -1); 361 TAILQ_INSERT_TAIL(&blacklist_head, m, listq); 362 if (verbose) 363 printf("Skipping page with pa 0x%jx\n", (uintmax_t)pa); 364 } 365 return (ret); 366 } 367 368 /* 369 * vm_page_blacklist_check: 370 * 371 * Iterate through the provided string of blacklist addresses, pulling 372 * each entry out of the physical allocator free list and putting it 373 * onto a list for reporting via the vm.page_blacklist sysctl. 374 */ 375 static void 376 vm_page_blacklist_check(char *list, char *end) 377 { 378 vm_paddr_t pa; 379 char *next; 380 381 next = list; 382 while (next != NULL) { 383 if ((pa = vm_page_blacklist_next(&next, end)) == 0) 384 continue; 385 vm_page_blacklist_add(pa, bootverbose); 386 } 387 } 388 389 /* 390 * vm_page_blacklist_load: 391 * 392 * Search for a special module named "ram_blacklist". It'll be a 393 * plain text file provided by the user via the loader directive 394 * of the same name. 395 */ 396 static void 397 vm_page_blacklist_load(char **list, char **end) 398 { 399 void *mod; 400 u_char *ptr; 401 u_int len; 402 403 mod = NULL; 404 ptr = NULL; 405 406 mod = preload_search_by_type("ram_blacklist"); 407 if (mod != NULL) { 408 ptr = preload_fetch_addr(mod); 409 len = preload_fetch_size(mod); 410 } 411 *list = ptr; 412 if (ptr != NULL) 413 *end = ptr + len; 414 else 415 *end = NULL; 416 return; 417 } 418 419 static int 420 sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS) 421 { 422 vm_page_t m; 423 struct sbuf sbuf; 424 int error, first; 425 426 first = 1; 427 error = sysctl_wire_old_buffer(req, 0); 428 if (error != 0) 429 return (error); 430 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 431 TAILQ_FOREACH(m, &blacklist_head, listq) { 432 sbuf_printf(&sbuf, "%s%#jx", first ? "" : ",", 433 (uintmax_t)m->phys_addr); 434 first = 0; 435 } 436 error = sbuf_finish(&sbuf); 437 sbuf_delete(&sbuf); 438 return (error); 439 } 440 441 /* 442 * Initialize a dummy page for use in scans of the specified paging queue. 443 * In principle, this function only needs to set the flag PG_MARKER. 444 * Nonetheless, it write busies and initializes the hold count to one as 445 * safety precautions. 446 */ 447 static void 448 vm_page_init_marker(vm_page_t marker, int queue, uint8_t aflags) 449 { 450 451 bzero(marker, sizeof(*marker)); 452 marker->flags = PG_MARKER; 453 marker->aflags = aflags; 454 marker->busy_lock = VPB_SINGLE_EXCLUSIVER; 455 marker->queue = queue; 456 marker->hold_count = 1; 457 } 458 459 static void 460 vm_page_domain_init(int domain) 461 { 462 struct vm_domain *vmd; 463 struct vm_pagequeue *pq; 464 int i; 465 466 vmd = VM_DOMAIN(domain); 467 bzero(vmd, sizeof(*vmd)); 468 *__DECONST(char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) = 469 "vm inactive pagequeue"; 470 *__DECONST(char **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_name) = 471 "vm active pagequeue"; 472 *__DECONST(char **, &vmd->vmd_pagequeues[PQ_LAUNDRY].pq_name) = 473 "vm laundry pagequeue"; 474 *__DECONST(char **, &vmd->vmd_pagequeues[PQ_UNSWAPPABLE].pq_name) = 475 "vm unswappable pagequeue"; 476 vmd->vmd_domain = domain; 477 vmd->vmd_page_count = 0; 478 vmd->vmd_free_count = 0; 479 vmd->vmd_segs = 0; 480 vmd->vmd_oom = FALSE; 481 for (i = 0; i < PQ_COUNT; i++) { 482 pq = &vmd->vmd_pagequeues[i]; 483 TAILQ_INIT(&pq->pq_pl); 484 mtx_init(&pq->pq_mutex, pq->pq_name, "vm pagequeue", 485 MTX_DEF | MTX_DUPOK); 486 pq->pq_pdpages = 0; 487 vm_page_init_marker(&vmd->vmd_markers[i], i, 0); 488 } 489 mtx_init(&vmd->vmd_free_mtx, "vm page free queue", NULL, MTX_DEF); 490 mtx_init(&vmd->vmd_pageout_mtx, "vm pageout lock", NULL, MTX_DEF); 491 snprintf(vmd->vmd_name, sizeof(vmd->vmd_name), "%d", domain); 492 493 /* 494 * inacthead is used to provide FIFO ordering for LRU-bypassing 495 * insertions. 496 */ 497 vm_page_init_marker(&vmd->vmd_inacthead, PQ_INACTIVE, PGA_ENQUEUED); 498 TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_INACTIVE].pq_pl, 499 &vmd->vmd_inacthead, plinks.q); 500 501 /* 502 * The clock pages are used to implement active queue scanning without 503 * requeues. Scans start at clock[0], which is advanced after the scan 504 * ends. When the two clock hands meet, they are reset and scanning 505 * resumes from the head of the queue. 506 */ 507 vm_page_init_marker(&vmd->vmd_clock[0], PQ_ACTIVE, PGA_ENQUEUED); 508 vm_page_init_marker(&vmd->vmd_clock[1], PQ_ACTIVE, PGA_ENQUEUED); 509 TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl, 510 &vmd->vmd_clock[0], plinks.q); 511 TAILQ_INSERT_TAIL(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl, 512 &vmd->vmd_clock[1], plinks.q); 513 } 514 515 /* 516 * Initialize a physical page in preparation for adding it to the free 517 * lists. 518 */ 519 static void 520 vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind) 521 { 522 523 m->object = NULL; 524 m->wire_count = 0; 525 m->busy_lock = VPB_UNBUSIED; 526 m->hold_count = 0; 527 m->flags = m->aflags = 0; 528 m->phys_addr = pa; 529 m->queue = PQ_NONE; 530 m->psind = 0; 531 m->segind = segind; 532 m->order = VM_NFREEORDER; 533 m->pool = VM_FREEPOOL_DEFAULT; 534 m->valid = m->dirty = 0; 535 pmap_page_init(m); 536 } 537 538 /* 539 * vm_page_startup: 540 * 541 * Initializes the resident memory module. Allocates physical memory for 542 * bootstrapping UMA and some data structures that are used to manage 543 * physical pages. Initializes these structures, and populates the free 544 * page queues. 545 */ 546 vm_offset_t 547 vm_page_startup(vm_offset_t vaddr) 548 { 549 struct vm_phys_seg *seg; 550 vm_page_t m; 551 char *list, *listend; 552 vm_offset_t mapped; 553 vm_paddr_t end, high_avail, low_avail, new_end, page_range, size; 554 vm_paddr_t biggestsize, last_pa, pa; 555 u_long pagecount; 556 int biggestone, i, segind; 557 #ifdef WITNESS 558 int witness_size; 559 #endif 560 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE) 561 long ii; 562 #endif 563 564 biggestsize = 0; 565 biggestone = 0; 566 vaddr = round_page(vaddr); 567 568 for (i = 0; phys_avail[i + 1]; i += 2) { 569 phys_avail[i] = round_page(phys_avail[i]); 570 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 571 } 572 for (i = 0; phys_avail[i + 1]; i += 2) { 573 size = phys_avail[i + 1] - phys_avail[i]; 574 if (size > biggestsize) { 575 biggestone = i; 576 biggestsize = size; 577 } 578 } 579 580 end = phys_avail[biggestone+1]; 581 582 /* 583 * Initialize the page and queue locks. 584 */ 585 mtx_init(&vm_domainset_lock, "vm domainset lock", NULL, MTX_DEF); 586 for (i = 0; i < PA_LOCK_COUNT; i++) 587 mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF); 588 for (i = 0; i < vm_ndomains; i++) 589 vm_page_domain_init(i); 590 591 /* 592 * Allocate memory for use when boot strapping the kernel memory 593 * allocator. Tell UMA how many zones we are going to create 594 * before going fully functional. UMA will add its zones. 595 * 596 * VM startup zones: vmem, vmem_btag, VM OBJECT, RADIX NODE, MAP, 597 * KMAP ENTRY, MAP ENTRY, VMSPACE. 598 */ 599 boot_pages = uma_startup_count(8); 600 601 #ifndef UMA_MD_SMALL_ALLOC 602 /* vmem_startup() calls uma_prealloc(). */ 603 boot_pages += vmem_startup_count(); 604 /* vm_map_startup() calls uma_prealloc(). */ 605 boot_pages += howmany(MAX_KMAP, 606 UMA_SLAB_SPACE / sizeof(struct vm_map)); 607 608 /* 609 * Before going fully functional kmem_init() does allocation 610 * from "KMAP ENTRY" and vmem_create() does allocation from "vmem". 611 */ 612 boot_pages += 2; 613 #endif 614 /* 615 * CTFLAG_RDTUN doesn't work during the early boot process, so we must 616 * manually fetch the value. 617 */ 618 TUNABLE_INT_FETCH("vm.boot_pages", &boot_pages); 619 new_end = end - (boot_pages * UMA_SLAB_SIZE); 620 new_end = trunc_page(new_end); 621 mapped = pmap_map(&vaddr, new_end, end, 622 VM_PROT_READ | VM_PROT_WRITE); 623 bzero((void *)mapped, end - new_end); 624 uma_startup((void *)mapped, boot_pages); 625 626 #ifdef WITNESS 627 witness_size = round_page(witness_startup_count()); 628 new_end -= witness_size; 629 mapped = pmap_map(&vaddr, new_end, new_end + witness_size, 630 VM_PROT_READ | VM_PROT_WRITE); 631 bzero((void *)mapped, witness_size); 632 witness_startup((void *)mapped); 633 #endif 634 635 #if defined(__aarch64__) || defined(__amd64__) || defined(__arm__) || \ 636 defined(__i386__) || defined(__mips__) || defined(__riscv) 637 /* 638 * Allocate a bitmap to indicate that a random physical page 639 * needs to be included in a minidump. 640 * 641 * The amd64 port needs this to indicate which direct map pages 642 * need to be dumped, via calls to dump_add_page()/dump_drop_page(). 643 * 644 * However, i386 still needs this workspace internally within the 645 * minidump code. In theory, they are not needed on i386, but are 646 * included should the sf_buf code decide to use them. 647 */ 648 last_pa = 0; 649 for (i = 0; dump_avail[i + 1] != 0; i += 2) 650 if (dump_avail[i + 1] > last_pa) 651 last_pa = dump_avail[i + 1]; 652 page_range = last_pa / PAGE_SIZE; 653 vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY); 654 new_end -= vm_page_dump_size; 655 vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end, 656 new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE); 657 bzero((void *)vm_page_dump, vm_page_dump_size); 658 #else 659 (void)last_pa; 660 #endif 661 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \ 662 defined(__riscv) 663 /* 664 * Include the UMA bootstrap pages, witness pages and vm_page_dump 665 * in a crash dump. When pmap_map() uses the direct map, they are 666 * not automatically included. 667 */ 668 for (pa = new_end; pa < end; pa += PAGE_SIZE) 669 dump_add_page(pa); 670 #endif 671 phys_avail[biggestone + 1] = new_end; 672 #ifdef __amd64__ 673 /* 674 * Request that the physical pages underlying the message buffer be 675 * included in a crash dump. Since the message buffer is accessed 676 * through the direct map, they are not automatically included. 677 */ 678 pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr); 679 last_pa = pa + round_page(msgbufsize); 680 while (pa < last_pa) { 681 dump_add_page(pa); 682 pa += PAGE_SIZE; 683 } 684 #endif 685 /* 686 * Compute the number of pages of memory that will be available for 687 * use, taking into account the overhead of a page structure per page. 688 * In other words, solve 689 * "available physical memory" - round_page(page_range * 690 * sizeof(struct vm_page)) = page_range * PAGE_SIZE 691 * for page_range. 692 */ 693 low_avail = phys_avail[0]; 694 high_avail = phys_avail[1]; 695 for (i = 0; i < vm_phys_nsegs; i++) { 696 if (vm_phys_segs[i].start < low_avail) 697 low_avail = vm_phys_segs[i].start; 698 if (vm_phys_segs[i].end > high_avail) 699 high_avail = vm_phys_segs[i].end; 700 } 701 /* Skip the first chunk. It is already accounted for. */ 702 for (i = 2; phys_avail[i + 1] != 0; i += 2) { 703 if (phys_avail[i] < low_avail) 704 low_avail = phys_avail[i]; 705 if (phys_avail[i + 1] > high_avail) 706 high_avail = phys_avail[i + 1]; 707 } 708 first_page = low_avail / PAGE_SIZE; 709 #ifdef VM_PHYSSEG_SPARSE 710 size = 0; 711 for (i = 0; i < vm_phys_nsegs; i++) 712 size += vm_phys_segs[i].end - vm_phys_segs[i].start; 713 for (i = 0; phys_avail[i + 1] != 0; i += 2) 714 size += phys_avail[i + 1] - phys_avail[i]; 715 #elif defined(VM_PHYSSEG_DENSE) 716 size = high_avail - low_avail; 717 #else 718 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." 719 #endif 720 721 #ifdef VM_PHYSSEG_DENSE 722 /* 723 * In the VM_PHYSSEG_DENSE case, the number of pages can account for 724 * the overhead of a page structure per page only if vm_page_array is 725 * allocated from the last physical memory chunk. Otherwise, we must 726 * allocate page structures representing the physical memory 727 * underlying vm_page_array, even though they will not be used. 728 */ 729 if (new_end != high_avail) 730 page_range = size / PAGE_SIZE; 731 else 732 #endif 733 { 734 page_range = size / (PAGE_SIZE + sizeof(struct vm_page)); 735 736 /* 737 * If the partial bytes remaining are large enough for 738 * a page (PAGE_SIZE) without a corresponding 739 * 'struct vm_page', then new_end will contain an 740 * extra page after subtracting the length of the VM 741 * page array. Compensate by subtracting an extra 742 * page from new_end. 743 */ 744 if (size % (PAGE_SIZE + sizeof(struct vm_page)) >= PAGE_SIZE) { 745 if (new_end == high_avail) 746 high_avail -= PAGE_SIZE; 747 new_end -= PAGE_SIZE; 748 } 749 } 750 end = new_end; 751 752 /* 753 * Reserve an unmapped guard page to trap access to vm_page_array[-1]. 754 * However, because this page is allocated from KVM, out-of-bounds 755 * accesses using the direct map will not be trapped. 756 */ 757 vaddr += PAGE_SIZE; 758 759 /* 760 * Allocate physical memory for the page structures, and map it. 761 */ 762 new_end = trunc_page(end - page_range * sizeof(struct vm_page)); 763 mapped = pmap_map(&vaddr, new_end, end, 764 VM_PROT_READ | VM_PROT_WRITE); 765 vm_page_array = (vm_page_t)mapped; 766 vm_page_array_size = page_range; 767 768 #if VM_NRESERVLEVEL > 0 769 /* 770 * Allocate physical memory for the reservation management system's 771 * data structures, and map it. 772 */ 773 if (high_avail == end) 774 high_avail = new_end; 775 new_end = vm_reserv_startup(&vaddr, new_end, high_avail); 776 #endif 777 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \ 778 defined(__riscv) 779 /* 780 * Include vm_page_array and vm_reserv_array in a crash dump. 781 */ 782 for (pa = new_end; pa < end; pa += PAGE_SIZE) 783 dump_add_page(pa); 784 #endif 785 phys_avail[biggestone + 1] = new_end; 786 787 /* 788 * Add physical memory segments corresponding to the available 789 * physical pages. 790 */ 791 for (i = 0; phys_avail[i + 1] != 0; i += 2) 792 vm_phys_add_seg(phys_avail[i], phys_avail[i + 1]); 793 794 /* 795 * Initialize the physical memory allocator. 796 */ 797 vm_phys_init(); 798 799 /* 800 * Initialize the page structures and add every available page to the 801 * physical memory allocator's free lists. 802 */ 803 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE) 804 for (ii = 0; ii < vm_page_array_size; ii++) { 805 m = &vm_page_array[ii]; 806 vm_page_init_page(m, (first_page + ii) << PAGE_SHIFT, 0); 807 m->flags = PG_FICTITIOUS; 808 } 809 #endif 810 vm_cnt.v_page_count = 0; 811 for (segind = 0; segind < vm_phys_nsegs; segind++) { 812 seg = &vm_phys_segs[segind]; 813 for (m = seg->first_page, pa = seg->start; pa < seg->end; 814 m++, pa += PAGE_SIZE) 815 vm_page_init_page(m, pa, segind); 816 817 /* 818 * Add the segment to the free lists only if it is covered by 819 * one of the ranges in phys_avail. Because we've added the 820 * ranges to the vm_phys_segs array, we can assume that each 821 * segment is either entirely contained in one of the ranges, 822 * or doesn't overlap any of them. 823 */ 824 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 825 struct vm_domain *vmd; 826 827 if (seg->start < phys_avail[i] || 828 seg->end > phys_avail[i + 1]) 829 continue; 830 831 m = seg->first_page; 832 pagecount = (u_long)atop(seg->end - seg->start); 833 834 vmd = VM_DOMAIN(seg->domain); 835 vm_domain_free_lock(vmd); 836 vm_phys_enqueue_contig(m, pagecount); 837 vm_domain_free_unlock(vmd); 838 vm_domain_freecnt_inc(vmd, pagecount); 839 vm_cnt.v_page_count += (u_int)pagecount; 840 841 vmd = VM_DOMAIN(seg->domain); 842 vmd->vmd_page_count += (u_int)pagecount; 843 vmd->vmd_segs |= 1UL << m->segind; 844 break; 845 } 846 } 847 848 /* 849 * Remove blacklisted pages from the physical memory allocator. 850 */ 851 TAILQ_INIT(&blacklist_head); 852 vm_page_blacklist_load(&list, &listend); 853 vm_page_blacklist_check(list, listend); 854 855 list = kern_getenv("vm.blacklist"); 856 vm_page_blacklist_check(list, NULL); 857 858 freeenv(list); 859 #if VM_NRESERVLEVEL > 0 860 /* 861 * Initialize the reservation management system. 862 */ 863 vm_reserv_init(); 864 #endif 865 866 return (vaddr); 867 } 868 869 void 870 vm_page_reference(vm_page_t m) 871 { 872 873 vm_page_aflag_set(m, PGA_REFERENCED); 874 } 875 876 /* 877 * vm_page_busy_downgrade: 878 * 879 * Downgrade an exclusive busy page into a single shared busy page. 880 */ 881 void 882 vm_page_busy_downgrade(vm_page_t m) 883 { 884 u_int x; 885 bool locked; 886 887 vm_page_assert_xbusied(m); 888 locked = mtx_owned(vm_page_lockptr(m)); 889 890 for (;;) { 891 x = m->busy_lock; 892 x &= VPB_BIT_WAITERS; 893 if (x != 0 && !locked) 894 vm_page_lock(m); 895 if (atomic_cmpset_rel_int(&m->busy_lock, 896 VPB_SINGLE_EXCLUSIVER | x, VPB_SHARERS_WORD(1))) 897 break; 898 if (x != 0 && !locked) 899 vm_page_unlock(m); 900 } 901 if (x != 0) { 902 wakeup(m); 903 if (!locked) 904 vm_page_unlock(m); 905 } 906 } 907 908 /* 909 * vm_page_sbusied: 910 * 911 * Return a positive value if the page is shared busied, 0 otherwise. 912 */ 913 int 914 vm_page_sbusied(vm_page_t m) 915 { 916 u_int x; 917 918 x = m->busy_lock; 919 return ((x & VPB_BIT_SHARED) != 0 && x != VPB_UNBUSIED); 920 } 921 922 /* 923 * vm_page_sunbusy: 924 * 925 * Shared unbusy a page. 926 */ 927 void 928 vm_page_sunbusy(vm_page_t m) 929 { 930 u_int x; 931 932 vm_page_lock_assert(m, MA_NOTOWNED); 933 vm_page_assert_sbusied(m); 934 935 for (;;) { 936 x = m->busy_lock; 937 if (VPB_SHARERS(x) > 1) { 938 if (atomic_cmpset_int(&m->busy_lock, x, 939 x - VPB_ONE_SHARER)) 940 break; 941 continue; 942 } 943 if ((x & VPB_BIT_WAITERS) == 0) { 944 KASSERT(x == VPB_SHARERS_WORD(1), 945 ("vm_page_sunbusy: invalid lock state")); 946 if (atomic_cmpset_int(&m->busy_lock, 947 VPB_SHARERS_WORD(1), VPB_UNBUSIED)) 948 break; 949 continue; 950 } 951 KASSERT(x == (VPB_SHARERS_WORD(1) | VPB_BIT_WAITERS), 952 ("vm_page_sunbusy: invalid lock state for waiters")); 953 954 vm_page_lock(m); 955 if (!atomic_cmpset_int(&m->busy_lock, x, VPB_UNBUSIED)) { 956 vm_page_unlock(m); 957 continue; 958 } 959 wakeup(m); 960 vm_page_unlock(m); 961 break; 962 } 963 } 964 965 /* 966 * vm_page_busy_sleep: 967 * 968 * Sleep and release the page lock, using the page pointer as wchan. 969 * This is used to implement the hard-path of busying mechanism. 970 * 971 * The given page must be locked. 972 * 973 * If nonshared is true, sleep only if the page is xbusy. 974 */ 975 void 976 vm_page_busy_sleep(vm_page_t m, const char *wmesg, bool nonshared) 977 { 978 u_int x; 979 980 vm_page_assert_locked(m); 981 982 x = m->busy_lock; 983 if (x == VPB_UNBUSIED || (nonshared && (x & VPB_BIT_SHARED) != 0) || 984 ((x & VPB_BIT_WAITERS) == 0 && 985 !atomic_cmpset_int(&m->busy_lock, x, x | VPB_BIT_WAITERS))) { 986 vm_page_unlock(m); 987 return; 988 } 989 msleep(m, vm_page_lockptr(m), PVM | PDROP, wmesg, 0); 990 } 991 992 /* 993 * vm_page_trysbusy: 994 * 995 * Try to shared busy a page. 996 * If the operation succeeds 1 is returned otherwise 0. 997 * The operation never sleeps. 998 */ 999 int 1000 vm_page_trysbusy(vm_page_t m) 1001 { 1002 u_int x; 1003 1004 for (;;) { 1005 x = m->busy_lock; 1006 if ((x & VPB_BIT_SHARED) == 0) 1007 return (0); 1008 if (atomic_cmpset_acq_int(&m->busy_lock, x, x + VPB_ONE_SHARER)) 1009 return (1); 1010 } 1011 } 1012 1013 static void 1014 vm_page_xunbusy_locked(vm_page_t m) 1015 { 1016 1017 vm_page_assert_xbusied(m); 1018 vm_page_assert_locked(m); 1019 1020 atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED); 1021 /* There is a waiter, do wakeup() instead of vm_page_flash(). */ 1022 wakeup(m); 1023 } 1024 1025 void 1026 vm_page_xunbusy_maybelocked(vm_page_t m) 1027 { 1028 bool lockacq; 1029 1030 vm_page_assert_xbusied(m); 1031 1032 /* 1033 * Fast path for unbusy. If it succeeds, we know that there 1034 * are no waiters, so we do not need a wakeup. 1035 */ 1036 if (atomic_cmpset_rel_int(&m->busy_lock, VPB_SINGLE_EXCLUSIVER, 1037 VPB_UNBUSIED)) 1038 return; 1039 1040 lockacq = !mtx_owned(vm_page_lockptr(m)); 1041 if (lockacq) 1042 vm_page_lock(m); 1043 vm_page_xunbusy_locked(m); 1044 if (lockacq) 1045 vm_page_unlock(m); 1046 } 1047 1048 /* 1049 * vm_page_xunbusy_hard: 1050 * 1051 * Called after the first try the exclusive unbusy of a page failed. 1052 * It is assumed that the waiters bit is on. 1053 */ 1054 void 1055 vm_page_xunbusy_hard(vm_page_t m) 1056 { 1057 1058 vm_page_assert_xbusied(m); 1059 1060 vm_page_lock(m); 1061 vm_page_xunbusy_locked(m); 1062 vm_page_unlock(m); 1063 } 1064 1065 /* 1066 * vm_page_flash: 1067 * 1068 * Wakeup anyone waiting for the page. 1069 * The ownership bits do not change. 1070 * 1071 * The given page must be locked. 1072 */ 1073 void 1074 vm_page_flash(vm_page_t m) 1075 { 1076 u_int x; 1077 1078 vm_page_lock_assert(m, MA_OWNED); 1079 1080 for (;;) { 1081 x = m->busy_lock; 1082 if ((x & VPB_BIT_WAITERS) == 0) 1083 return; 1084 if (atomic_cmpset_int(&m->busy_lock, x, 1085 x & (~VPB_BIT_WAITERS))) 1086 break; 1087 } 1088 wakeup(m); 1089 } 1090 1091 /* 1092 * Avoid releasing and reacquiring the same page lock. 1093 */ 1094 void 1095 vm_page_change_lock(vm_page_t m, struct mtx **mtx) 1096 { 1097 struct mtx *mtx1; 1098 1099 mtx1 = vm_page_lockptr(m); 1100 if (*mtx == mtx1) 1101 return; 1102 if (*mtx != NULL) 1103 mtx_unlock(*mtx); 1104 *mtx = mtx1; 1105 mtx_lock(mtx1); 1106 } 1107 1108 /* 1109 * Keep page from being freed by the page daemon 1110 * much of the same effect as wiring, except much lower 1111 * overhead and should be used only for *very* temporary 1112 * holding ("wiring"). 1113 */ 1114 void 1115 vm_page_hold(vm_page_t mem) 1116 { 1117 1118 vm_page_lock_assert(mem, MA_OWNED); 1119 mem->hold_count++; 1120 } 1121 1122 void 1123 vm_page_unhold(vm_page_t mem) 1124 { 1125 1126 vm_page_lock_assert(mem, MA_OWNED); 1127 KASSERT(mem->hold_count >= 1, ("vm_page_unhold: hold count < 0!!!")); 1128 --mem->hold_count; 1129 if (mem->hold_count == 0 && (mem->flags & PG_UNHOLDFREE) != 0) 1130 vm_page_free_toq(mem); 1131 } 1132 1133 /* 1134 * vm_page_unhold_pages: 1135 * 1136 * Unhold each of the pages that is referenced by the given array. 1137 */ 1138 void 1139 vm_page_unhold_pages(vm_page_t *ma, int count) 1140 { 1141 struct mtx *mtx; 1142 1143 mtx = NULL; 1144 for (; count != 0; count--) { 1145 vm_page_change_lock(*ma, &mtx); 1146 vm_page_unhold(*ma); 1147 ma++; 1148 } 1149 if (mtx != NULL) 1150 mtx_unlock(mtx); 1151 } 1152 1153 vm_page_t 1154 PHYS_TO_VM_PAGE(vm_paddr_t pa) 1155 { 1156 vm_page_t m; 1157 1158 #ifdef VM_PHYSSEG_SPARSE 1159 m = vm_phys_paddr_to_vm_page(pa); 1160 if (m == NULL) 1161 m = vm_phys_fictitious_to_vm_page(pa); 1162 return (m); 1163 #elif defined(VM_PHYSSEG_DENSE) 1164 long pi; 1165 1166 pi = atop(pa); 1167 if (pi >= first_page && (pi - first_page) < vm_page_array_size) { 1168 m = &vm_page_array[pi - first_page]; 1169 return (m); 1170 } 1171 return (vm_phys_fictitious_to_vm_page(pa)); 1172 #else 1173 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." 1174 #endif 1175 } 1176 1177 /* 1178 * vm_page_getfake: 1179 * 1180 * Create a fictitious page with the specified physical address and 1181 * memory attribute. The memory attribute is the only the machine- 1182 * dependent aspect of a fictitious page that must be initialized. 1183 */ 1184 vm_page_t 1185 vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr) 1186 { 1187 vm_page_t m; 1188 1189 m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO); 1190 vm_page_initfake(m, paddr, memattr); 1191 return (m); 1192 } 1193 1194 void 1195 vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) 1196 { 1197 1198 if ((m->flags & PG_FICTITIOUS) != 0) { 1199 /* 1200 * The page's memattr might have changed since the 1201 * previous initialization. Update the pmap to the 1202 * new memattr. 1203 */ 1204 goto memattr; 1205 } 1206 m->phys_addr = paddr; 1207 m->queue = PQ_NONE; 1208 /* Fictitious pages don't use "segind". */ 1209 m->flags = PG_FICTITIOUS; 1210 /* Fictitious pages don't use "order" or "pool". */ 1211 m->oflags = VPO_UNMANAGED; 1212 m->busy_lock = VPB_SINGLE_EXCLUSIVER; 1213 m->wire_count = 1; 1214 pmap_page_init(m); 1215 memattr: 1216 pmap_page_set_memattr(m, memattr); 1217 } 1218 1219 /* 1220 * vm_page_putfake: 1221 * 1222 * Release a fictitious page. 1223 */ 1224 void 1225 vm_page_putfake(vm_page_t m) 1226 { 1227 1228 KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m)); 1229 KASSERT((m->flags & PG_FICTITIOUS) != 0, 1230 ("vm_page_putfake: bad page %p", m)); 1231 uma_zfree(fakepg_zone, m); 1232 } 1233 1234 /* 1235 * vm_page_updatefake: 1236 * 1237 * Update the given fictitious page to the specified physical address and 1238 * memory attribute. 1239 */ 1240 void 1241 vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) 1242 { 1243 1244 KASSERT((m->flags & PG_FICTITIOUS) != 0, 1245 ("vm_page_updatefake: bad page %p", m)); 1246 m->phys_addr = paddr; 1247 pmap_page_set_memattr(m, memattr); 1248 } 1249 1250 /* 1251 * vm_page_free: 1252 * 1253 * Free a page. 1254 */ 1255 void 1256 vm_page_free(vm_page_t m) 1257 { 1258 1259 m->flags &= ~PG_ZERO; 1260 vm_page_free_toq(m); 1261 } 1262 1263 /* 1264 * vm_page_free_zero: 1265 * 1266 * Free a page to the zerod-pages queue 1267 */ 1268 void 1269 vm_page_free_zero(vm_page_t m) 1270 { 1271 1272 m->flags |= PG_ZERO; 1273 vm_page_free_toq(m); 1274 } 1275 1276 /* 1277 * Unbusy and handle the page queueing for a page from a getpages request that 1278 * was optionally read ahead or behind. 1279 */ 1280 void 1281 vm_page_readahead_finish(vm_page_t m) 1282 { 1283 1284 /* We shouldn't put invalid pages on queues. */ 1285 KASSERT(m->valid != 0, ("%s: %p is invalid", __func__, m)); 1286 1287 /* 1288 * Since the page is not the actually needed one, whether it should 1289 * be activated or deactivated is not obvious. Empirical results 1290 * have shown that deactivating the page is usually the best choice, 1291 * unless the page is wanted by another thread. 1292 */ 1293 vm_page_lock(m); 1294 if ((m->busy_lock & VPB_BIT_WAITERS) != 0) 1295 vm_page_activate(m); 1296 else 1297 vm_page_deactivate(m); 1298 vm_page_unlock(m); 1299 vm_page_xunbusy(m); 1300 } 1301 1302 /* 1303 * vm_page_sleep_if_busy: 1304 * 1305 * Sleep and release the page queues lock if the page is busied. 1306 * Returns TRUE if the thread slept. 1307 * 1308 * The given page must be unlocked and object containing it must 1309 * be locked. 1310 */ 1311 int 1312 vm_page_sleep_if_busy(vm_page_t m, const char *msg) 1313 { 1314 vm_object_t obj; 1315 1316 vm_page_lock_assert(m, MA_NOTOWNED); 1317 VM_OBJECT_ASSERT_WLOCKED(m->object); 1318 1319 if (vm_page_busied(m)) { 1320 /* 1321 * The page-specific object must be cached because page 1322 * identity can change during the sleep, causing the 1323 * re-lock of a different object. 1324 * It is assumed that a reference to the object is already 1325 * held by the callers. 1326 */ 1327 obj = m->object; 1328 vm_page_lock(m); 1329 VM_OBJECT_WUNLOCK(obj); 1330 vm_page_busy_sleep(m, msg, false); 1331 VM_OBJECT_WLOCK(obj); 1332 return (TRUE); 1333 } 1334 return (FALSE); 1335 } 1336 1337 /* 1338 * vm_page_dirty_KBI: [ internal use only ] 1339 * 1340 * Set all bits in the page's dirty field. 1341 * 1342 * The object containing the specified page must be locked if the 1343 * call is made from the machine-independent layer. 1344 * 1345 * See vm_page_clear_dirty_mask(). 1346 * 1347 * This function should only be called by vm_page_dirty(). 1348 */ 1349 void 1350 vm_page_dirty_KBI(vm_page_t m) 1351 { 1352 1353 /* Refer to this operation by its public name. */ 1354 KASSERT(m->valid == VM_PAGE_BITS_ALL, 1355 ("vm_page_dirty: page is invalid!")); 1356 m->dirty = VM_PAGE_BITS_ALL; 1357 } 1358 1359 /* 1360 * vm_page_insert: [ internal use only ] 1361 * 1362 * Inserts the given mem entry into the object and object list. 1363 * 1364 * The object must be locked. 1365 */ 1366 int 1367 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) 1368 { 1369 vm_page_t mpred; 1370 1371 VM_OBJECT_ASSERT_WLOCKED(object); 1372 mpred = vm_radix_lookup_le(&object->rtree, pindex); 1373 return (vm_page_insert_after(m, object, pindex, mpred)); 1374 } 1375 1376 /* 1377 * vm_page_insert_after: 1378 * 1379 * Inserts the page "m" into the specified object at offset "pindex". 1380 * 1381 * The page "mpred" must immediately precede the offset "pindex" within 1382 * the specified object. 1383 * 1384 * The object must be locked. 1385 */ 1386 static int 1387 vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex, 1388 vm_page_t mpred) 1389 { 1390 vm_page_t msucc; 1391 1392 VM_OBJECT_ASSERT_WLOCKED(object); 1393 KASSERT(m->object == NULL, 1394 ("vm_page_insert_after: page already inserted")); 1395 if (mpred != NULL) { 1396 KASSERT(mpred->object == object, 1397 ("vm_page_insert_after: object doesn't contain mpred")); 1398 KASSERT(mpred->pindex < pindex, 1399 ("vm_page_insert_after: mpred doesn't precede pindex")); 1400 msucc = TAILQ_NEXT(mpred, listq); 1401 } else 1402 msucc = TAILQ_FIRST(&object->memq); 1403 if (msucc != NULL) 1404 KASSERT(msucc->pindex > pindex, 1405 ("vm_page_insert_after: msucc doesn't succeed pindex")); 1406 1407 /* 1408 * Record the object/offset pair in this page 1409 */ 1410 m->object = object; 1411 m->pindex = pindex; 1412 1413 /* 1414 * Now link into the object's ordered list of backed pages. 1415 */ 1416 if (vm_radix_insert(&object->rtree, m)) { 1417 m->object = NULL; 1418 m->pindex = 0; 1419 return (1); 1420 } 1421 vm_page_insert_radixdone(m, object, mpred); 1422 return (0); 1423 } 1424 1425 /* 1426 * vm_page_insert_radixdone: 1427 * 1428 * Complete page "m" insertion into the specified object after the 1429 * radix trie hooking. 1430 * 1431 * The page "mpred" must precede the offset "m->pindex" within the 1432 * specified object. 1433 * 1434 * The object must be locked. 1435 */ 1436 static void 1437 vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred) 1438 { 1439 1440 VM_OBJECT_ASSERT_WLOCKED(object); 1441 KASSERT(object != NULL && m->object == object, 1442 ("vm_page_insert_radixdone: page %p has inconsistent object", m)); 1443 if (mpred != NULL) { 1444 KASSERT(mpred->object == object, 1445 ("vm_page_insert_after: object doesn't contain mpred")); 1446 KASSERT(mpred->pindex < m->pindex, 1447 ("vm_page_insert_after: mpred doesn't precede pindex")); 1448 } 1449 1450 if (mpred != NULL) 1451 TAILQ_INSERT_AFTER(&object->memq, mpred, m, listq); 1452 else 1453 TAILQ_INSERT_HEAD(&object->memq, m, listq); 1454 1455 /* 1456 * Show that the object has one more resident page. 1457 */ 1458 object->resident_page_count++; 1459 1460 /* 1461 * Hold the vnode until the last page is released. 1462 */ 1463 if (object->resident_page_count == 1 && object->type == OBJT_VNODE) 1464 vhold(object->handle); 1465 1466 /* 1467 * Since we are inserting a new and possibly dirty page, 1468 * update the object's OBJ_MIGHTBEDIRTY flag. 1469 */ 1470 if (pmap_page_is_write_mapped(m)) 1471 vm_object_set_writeable_dirty(object); 1472 } 1473 1474 /* 1475 * vm_page_remove: 1476 * 1477 * Removes the specified page from its containing object, but does not 1478 * invalidate any backing storage. 1479 * 1480 * The object must be locked. The page must be locked if it is managed. 1481 */ 1482 void 1483 vm_page_remove(vm_page_t m) 1484 { 1485 vm_object_t object; 1486 vm_page_t mrem; 1487 1488 if ((m->oflags & VPO_UNMANAGED) == 0) 1489 vm_page_assert_locked(m); 1490 if ((object = m->object) == NULL) 1491 return; 1492 VM_OBJECT_ASSERT_WLOCKED(object); 1493 if (vm_page_xbusied(m)) 1494 vm_page_xunbusy_maybelocked(m); 1495 mrem = vm_radix_remove(&object->rtree, m->pindex); 1496 KASSERT(mrem == m, ("removed page %p, expected page %p", mrem, m)); 1497 1498 /* 1499 * Now remove from the object's list of backed pages. 1500 */ 1501 TAILQ_REMOVE(&object->memq, m, listq); 1502 1503 /* 1504 * And show that the object has one fewer resident page. 1505 */ 1506 object->resident_page_count--; 1507 1508 /* 1509 * The vnode may now be recycled. 1510 */ 1511 if (object->resident_page_count == 0 && object->type == OBJT_VNODE) 1512 vdrop(object->handle); 1513 1514 m->object = NULL; 1515 } 1516 1517 /* 1518 * vm_page_lookup: 1519 * 1520 * Returns the page associated with the object/offset 1521 * pair specified; if none is found, NULL is returned. 1522 * 1523 * The object must be locked. 1524 */ 1525 vm_page_t 1526 vm_page_lookup(vm_object_t object, vm_pindex_t pindex) 1527 { 1528 1529 VM_OBJECT_ASSERT_LOCKED(object); 1530 return (vm_radix_lookup(&object->rtree, pindex)); 1531 } 1532 1533 /* 1534 * vm_page_find_least: 1535 * 1536 * Returns the page associated with the object with least pindex 1537 * greater than or equal to the parameter pindex, or NULL. 1538 * 1539 * The object must be locked. 1540 */ 1541 vm_page_t 1542 vm_page_find_least(vm_object_t object, vm_pindex_t pindex) 1543 { 1544 vm_page_t m; 1545 1546 VM_OBJECT_ASSERT_LOCKED(object); 1547 if ((m = TAILQ_FIRST(&object->memq)) != NULL && m->pindex < pindex) 1548 m = vm_radix_lookup_ge(&object->rtree, pindex); 1549 return (m); 1550 } 1551 1552 /* 1553 * Returns the given page's successor (by pindex) within the object if it is 1554 * resident; if none is found, NULL is returned. 1555 * 1556 * The object must be locked. 1557 */ 1558 vm_page_t 1559 vm_page_next(vm_page_t m) 1560 { 1561 vm_page_t next; 1562 1563 VM_OBJECT_ASSERT_LOCKED(m->object); 1564 if ((next = TAILQ_NEXT(m, listq)) != NULL) { 1565 MPASS(next->object == m->object); 1566 if (next->pindex != m->pindex + 1) 1567 next = NULL; 1568 } 1569 return (next); 1570 } 1571 1572 /* 1573 * Returns the given page's predecessor (by pindex) within the object if it is 1574 * resident; if none is found, NULL is returned. 1575 * 1576 * The object must be locked. 1577 */ 1578 vm_page_t 1579 vm_page_prev(vm_page_t m) 1580 { 1581 vm_page_t prev; 1582 1583 VM_OBJECT_ASSERT_LOCKED(m->object); 1584 if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL) { 1585 MPASS(prev->object == m->object); 1586 if (prev->pindex != m->pindex - 1) 1587 prev = NULL; 1588 } 1589 return (prev); 1590 } 1591 1592 /* 1593 * Uses the page mnew as a replacement for an existing page at index 1594 * pindex which must be already present in the object. 1595 * 1596 * The existing page must not be on a paging queue. 1597 */ 1598 vm_page_t 1599 vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex) 1600 { 1601 vm_page_t mold; 1602 1603 VM_OBJECT_ASSERT_WLOCKED(object); 1604 KASSERT(mnew->object == NULL, 1605 ("vm_page_replace: page %p already in object", mnew)); 1606 KASSERT(mnew->queue == PQ_NONE, 1607 ("vm_page_replace: new page %p is on a paging queue", mnew)); 1608 1609 /* 1610 * This function mostly follows vm_page_insert() and 1611 * vm_page_remove() without the radix, object count and vnode 1612 * dance. Double check such functions for more comments. 1613 */ 1614 1615 mnew->object = object; 1616 mnew->pindex = pindex; 1617 mold = vm_radix_replace(&object->rtree, mnew); 1618 KASSERT(mold->queue == PQ_NONE, 1619 ("vm_page_replace: old page %p is on a paging queue", mold)); 1620 1621 /* Keep the resident page list in sorted order. */ 1622 TAILQ_INSERT_AFTER(&object->memq, mold, mnew, listq); 1623 TAILQ_REMOVE(&object->memq, mold, listq); 1624 1625 mold->object = NULL; 1626 vm_page_xunbusy_maybelocked(mold); 1627 1628 /* 1629 * The object's resident_page_count does not change because we have 1630 * swapped one page for another, but OBJ_MIGHTBEDIRTY. 1631 */ 1632 if (pmap_page_is_write_mapped(mnew)) 1633 vm_object_set_writeable_dirty(object); 1634 return (mold); 1635 } 1636 1637 /* 1638 * vm_page_rename: 1639 * 1640 * Move the given memory entry from its 1641 * current object to the specified target object/offset. 1642 * 1643 * Note: swap associated with the page must be invalidated by the move. We 1644 * have to do this for several reasons: (1) we aren't freeing the 1645 * page, (2) we are dirtying the page, (3) the VM system is probably 1646 * moving the page from object A to B, and will then later move 1647 * the backing store from A to B and we can't have a conflict. 1648 * 1649 * Note: we *always* dirty the page. It is necessary both for the 1650 * fact that we moved it, and because we may be invalidating 1651 * swap. 1652 * 1653 * The objects must be locked. 1654 */ 1655 int 1656 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) 1657 { 1658 vm_page_t mpred; 1659 vm_pindex_t opidx; 1660 1661 VM_OBJECT_ASSERT_WLOCKED(new_object); 1662 1663 mpred = vm_radix_lookup_le(&new_object->rtree, new_pindex); 1664 KASSERT(mpred == NULL || mpred->pindex != new_pindex, 1665 ("vm_page_rename: pindex already renamed")); 1666 1667 /* 1668 * Create a custom version of vm_page_insert() which does not depend 1669 * by m_prev and can cheat on the implementation aspects of the 1670 * function. 1671 */ 1672 opidx = m->pindex; 1673 m->pindex = new_pindex; 1674 if (vm_radix_insert(&new_object->rtree, m)) { 1675 m->pindex = opidx; 1676 return (1); 1677 } 1678 1679 /* 1680 * The operation cannot fail anymore. The removal must happen before 1681 * the listq iterator is tainted. 1682 */ 1683 m->pindex = opidx; 1684 vm_page_lock(m); 1685 vm_page_remove(m); 1686 1687 /* Return back to the new pindex to complete vm_page_insert(). */ 1688 m->pindex = new_pindex; 1689 m->object = new_object; 1690 vm_page_unlock(m); 1691 vm_page_insert_radixdone(m, new_object, mpred); 1692 vm_page_dirty(m); 1693 return (0); 1694 } 1695 1696 /* 1697 * vm_page_alloc: 1698 * 1699 * Allocate and return a page that is associated with the specified 1700 * object and offset pair. By default, this page is exclusive busied. 1701 * 1702 * The caller must always specify an allocation class. 1703 * 1704 * allocation classes: 1705 * VM_ALLOC_NORMAL normal process request 1706 * VM_ALLOC_SYSTEM system *really* needs a page 1707 * VM_ALLOC_INTERRUPT interrupt time request 1708 * 1709 * optional allocation flags: 1710 * VM_ALLOC_COUNT(number) the number of additional pages that the caller 1711 * intends to allocate 1712 * VM_ALLOC_NOBUSY do not exclusive busy the page 1713 * VM_ALLOC_NODUMP do not include the page in a kernel core dump 1714 * VM_ALLOC_NOOBJ page is not associated with an object and 1715 * should not be exclusive busy 1716 * VM_ALLOC_SBUSY shared busy the allocated page 1717 * VM_ALLOC_WIRED wire the allocated page 1718 * VM_ALLOC_ZERO prefer a zeroed page 1719 */ 1720 vm_page_t 1721 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) 1722 { 1723 1724 return (vm_page_alloc_after(object, pindex, req, object != NULL ? 1725 vm_radix_lookup_le(&object->rtree, pindex) : NULL)); 1726 } 1727 1728 vm_page_t 1729 vm_page_alloc_domain(vm_object_t object, vm_pindex_t pindex, int domain, 1730 int req) 1731 { 1732 1733 return (vm_page_alloc_domain_after(object, pindex, domain, req, 1734 object != NULL ? vm_radix_lookup_le(&object->rtree, pindex) : 1735 NULL)); 1736 } 1737 1738 /* 1739 * Allocate a page in the specified object with the given page index. To 1740 * optimize insertion of the page into the object, the caller must also specifiy 1741 * the resident page in the object with largest index smaller than the given 1742 * page index, or NULL if no such page exists. 1743 */ 1744 vm_page_t 1745 vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex, 1746 int req, vm_page_t mpred) 1747 { 1748 struct vm_domainset_iter di; 1749 vm_page_t m; 1750 int domain; 1751 1752 vm_domainset_iter_page_init(&di, object, pindex, &domain, &req); 1753 do { 1754 m = vm_page_alloc_domain_after(object, pindex, domain, req, 1755 mpred); 1756 if (m != NULL) 1757 break; 1758 } while (vm_domainset_iter_page(&di, object, &domain) == 0); 1759 1760 return (m); 1761 } 1762 1763 /* 1764 * Returns true if the number of free pages exceeds the minimum 1765 * for the request class and false otherwise. 1766 */ 1767 int 1768 vm_domain_allocate(struct vm_domain *vmd, int req, int npages) 1769 { 1770 u_int limit, old, new; 1771 1772 req = req & VM_ALLOC_CLASS_MASK; 1773 1774 /* 1775 * The page daemon is allowed to dig deeper into the free page list. 1776 */ 1777 if (curproc == pageproc && req != VM_ALLOC_INTERRUPT) 1778 req = VM_ALLOC_SYSTEM; 1779 if (req == VM_ALLOC_INTERRUPT) 1780 limit = 0; 1781 else if (req == VM_ALLOC_SYSTEM) 1782 limit = vmd->vmd_interrupt_free_min; 1783 else 1784 limit = vmd->vmd_free_reserved; 1785 1786 /* 1787 * Attempt to reserve the pages. Fail if we're below the limit. 1788 */ 1789 limit += npages; 1790 old = vmd->vmd_free_count; 1791 do { 1792 if (old < limit) 1793 return (0); 1794 new = old - npages; 1795 } while (atomic_fcmpset_int(&vmd->vmd_free_count, &old, new) == 0); 1796 1797 /* Wake the page daemon if we've crossed the threshold. */ 1798 if (vm_paging_needed(vmd, new) && !vm_paging_needed(vmd, old)) 1799 pagedaemon_wakeup(vmd->vmd_domain); 1800 1801 /* Only update bitsets on transitions. */ 1802 if ((old >= vmd->vmd_free_min && new < vmd->vmd_free_min) || 1803 (old >= vmd->vmd_free_severe && new < vmd->vmd_free_severe)) 1804 vm_domain_set(vmd); 1805 1806 return (1); 1807 } 1808 1809 vm_page_t 1810 vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain, 1811 int req, vm_page_t mpred) 1812 { 1813 struct vm_domain *vmd; 1814 vm_page_t m; 1815 int flags; 1816 1817 KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) && 1818 (object != NULL || (req & VM_ALLOC_SBUSY) == 0) && 1819 ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != 1820 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), 1821 ("inconsistent object(%p)/req(%x)", object, req)); 1822 KASSERT(object == NULL || (req & VM_ALLOC_WAITOK) == 0, 1823 ("Can't sleep and retry object insertion.")); 1824 KASSERT(mpred == NULL || mpred->pindex < pindex, 1825 ("mpred %p doesn't precede pindex 0x%jx", mpred, 1826 (uintmax_t)pindex)); 1827 if (object != NULL) 1828 VM_OBJECT_ASSERT_WLOCKED(object); 1829 1830 again: 1831 m = NULL; 1832 #if VM_NRESERVLEVEL > 0 1833 /* 1834 * Can we allocate the page from a reservation? 1835 */ 1836 if (vm_object_reserv(object) && 1837 (m = vm_reserv_alloc_page(object, pindex, domain, req, mpred)) != 1838 NULL) { 1839 domain = vm_phys_domain(m); 1840 vmd = VM_DOMAIN(domain); 1841 goto found; 1842 } 1843 #endif 1844 vmd = VM_DOMAIN(domain); 1845 if (object != NULL && vmd->vmd_pgcache != NULL) { 1846 m = uma_zalloc(vmd->vmd_pgcache, M_NOWAIT); 1847 if (m != NULL) 1848 goto found; 1849 } 1850 if (vm_domain_allocate(vmd, req, 1)) { 1851 /* 1852 * If not, allocate it from the free page queues. 1853 */ 1854 vm_domain_free_lock(vmd); 1855 m = vm_phys_alloc_pages(domain, object != NULL ? 1856 VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0); 1857 vm_domain_free_unlock(vmd); 1858 if (m == NULL) { 1859 vm_domain_freecnt_inc(vmd, 1); 1860 #if VM_NRESERVLEVEL > 0 1861 if (vm_reserv_reclaim_inactive(domain)) 1862 goto again; 1863 #endif 1864 } 1865 } 1866 if (m == NULL) { 1867 /* 1868 * Not allocatable, give up. 1869 */ 1870 if (vm_domain_alloc_fail(vmd, object, req)) 1871 goto again; 1872 return (NULL); 1873 } 1874 1875 /* 1876 * At this point we had better have found a good page. 1877 */ 1878 KASSERT(m != NULL, ("missing page")); 1879 1880 found: 1881 vm_page_dequeue(m); 1882 vm_page_alloc_check(m); 1883 1884 /* 1885 * Initialize the page. Only the PG_ZERO flag is inherited. 1886 */ 1887 flags = 0; 1888 if ((req & VM_ALLOC_ZERO) != 0) 1889 flags = PG_ZERO; 1890 flags &= m->flags; 1891 if ((req & VM_ALLOC_NODUMP) != 0) 1892 flags |= PG_NODUMP; 1893 m->flags = flags; 1894 m->aflags = 0; 1895 m->oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ? 1896 VPO_UNMANAGED : 0; 1897 m->busy_lock = VPB_UNBUSIED; 1898 if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0) 1899 m->busy_lock = VPB_SINGLE_EXCLUSIVER; 1900 if ((req & VM_ALLOC_SBUSY) != 0) 1901 m->busy_lock = VPB_SHARERS_WORD(1); 1902 if (req & VM_ALLOC_WIRED) { 1903 /* 1904 * The page lock is not required for wiring a page until that 1905 * page is inserted into the object. 1906 */ 1907 vm_wire_add(1); 1908 m->wire_count = 1; 1909 } 1910 m->act_count = 0; 1911 1912 if (object != NULL) { 1913 if (vm_page_insert_after(m, object, pindex, mpred)) { 1914 if (req & VM_ALLOC_WIRED) { 1915 vm_wire_sub(1); 1916 m->wire_count = 0; 1917 } 1918 KASSERT(m->object == NULL, ("page %p has object", m)); 1919 m->oflags = VPO_UNMANAGED; 1920 m->busy_lock = VPB_UNBUSIED; 1921 /* Don't change PG_ZERO. */ 1922 vm_page_free_toq(m); 1923 if (req & VM_ALLOC_WAITFAIL) { 1924 VM_OBJECT_WUNLOCK(object); 1925 vm_radix_wait(); 1926 VM_OBJECT_WLOCK(object); 1927 } 1928 return (NULL); 1929 } 1930 1931 /* Ignore device objects; the pager sets "memattr" for them. */ 1932 if (object->memattr != VM_MEMATTR_DEFAULT && 1933 (object->flags & OBJ_FICTITIOUS) == 0) 1934 pmap_page_set_memattr(m, object->memattr); 1935 } else 1936 m->pindex = pindex; 1937 1938 return (m); 1939 } 1940 1941 /* 1942 * vm_page_alloc_contig: 1943 * 1944 * Allocate a contiguous set of physical pages of the given size "npages" 1945 * from the free lists. All of the physical pages must be at or above 1946 * the given physical address "low" and below the given physical address 1947 * "high". The given value "alignment" determines the alignment of the 1948 * first physical page in the set. If the given value "boundary" is 1949 * non-zero, then the set of physical pages cannot cross any physical 1950 * address boundary that is a multiple of that value. Both "alignment" 1951 * and "boundary" must be a power of two. 1952 * 1953 * If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT, 1954 * then the memory attribute setting for the physical pages is configured 1955 * to the object's memory attribute setting. Otherwise, the memory 1956 * attribute setting for the physical pages is configured to "memattr", 1957 * overriding the object's memory attribute setting. However, if the 1958 * object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the 1959 * memory attribute setting for the physical pages cannot be configured 1960 * to VM_MEMATTR_DEFAULT. 1961 * 1962 * The specified object may not contain fictitious pages. 1963 * 1964 * The caller must always specify an allocation class. 1965 * 1966 * allocation classes: 1967 * VM_ALLOC_NORMAL normal process request 1968 * VM_ALLOC_SYSTEM system *really* needs a page 1969 * VM_ALLOC_INTERRUPT interrupt time request 1970 * 1971 * optional allocation flags: 1972 * VM_ALLOC_NOBUSY do not exclusive busy the page 1973 * VM_ALLOC_NODUMP do not include the page in a kernel core dump 1974 * VM_ALLOC_NOOBJ page is not associated with an object and 1975 * should not be exclusive busy 1976 * VM_ALLOC_SBUSY shared busy the allocated page 1977 * VM_ALLOC_WIRED wire the allocated page 1978 * VM_ALLOC_ZERO prefer a zeroed page 1979 */ 1980 vm_page_t 1981 vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req, 1982 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 1983 vm_paddr_t boundary, vm_memattr_t memattr) 1984 { 1985 struct vm_domainset_iter di; 1986 vm_page_t m; 1987 int domain; 1988 1989 vm_domainset_iter_page_init(&di, object, pindex, &domain, &req); 1990 do { 1991 m = vm_page_alloc_contig_domain(object, pindex, domain, req, 1992 npages, low, high, alignment, boundary, memattr); 1993 if (m != NULL) 1994 break; 1995 } while (vm_domainset_iter_page(&di, object, &domain) == 0); 1996 1997 return (m); 1998 } 1999 2000 vm_page_t 2001 vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain, 2002 int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 2003 vm_paddr_t boundary, vm_memattr_t memattr) 2004 { 2005 struct vm_domain *vmd; 2006 vm_page_t m, m_ret, mpred; 2007 u_int busy_lock, flags, oflags; 2008 2009 mpred = NULL; /* XXX: pacify gcc */ 2010 KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) && 2011 (object != NULL || (req & VM_ALLOC_SBUSY) == 0) && 2012 ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != 2013 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), 2014 ("vm_page_alloc_contig: inconsistent object(%p)/req(%x)", object, 2015 req)); 2016 KASSERT(object == NULL || (req & VM_ALLOC_WAITOK) == 0, 2017 ("Can't sleep and retry object insertion.")); 2018 if (object != NULL) { 2019 VM_OBJECT_ASSERT_WLOCKED(object); 2020 KASSERT((object->flags & OBJ_FICTITIOUS) == 0, 2021 ("vm_page_alloc_contig: object %p has fictitious pages", 2022 object)); 2023 } 2024 KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero")); 2025 2026 if (object != NULL) { 2027 mpred = vm_radix_lookup_le(&object->rtree, pindex); 2028 KASSERT(mpred == NULL || mpred->pindex != pindex, 2029 ("vm_page_alloc_contig: pindex already allocated")); 2030 } 2031 2032 /* 2033 * Can we allocate the pages without the number of free pages falling 2034 * below the lower bound for the allocation class? 2035 */ 2036 again: 2037 #if VM_NRESERVLEVEL > 0 2038 /* 2039 * Can we allocate the pages from a reservation? 2040 */ 2041 if (vm_object_reserv(object) && 2042 (m_ret = vm_reserv_alloc_contig(object, pindex, domain, req, 2043 mpred, npages, low, high, alignment, boundary)) != NULL) { 2044 domain = vm_phys_domain(m_ret); 2045 vmd = VM_DOMAIN(domain); 2046 goto found; 2047 } 2048 #endif 2049 m_ret = NULL; 2050 vmd = VM_DOMAIN(domain); 2051 if (vm_domain_allocate(vmd, req, npages)) { 2052 /* 2053 * allocate them from the free page queues. 2054 */ 2055 vm_domain_free_lock(vmd); 2056 m_ret = vm_phys_alloc_contig(domain, npages, low, high, 2057 alignment, boundary); 2058 vm_domain_free_unlock(vmd); 2059 if (m_ret == NULL) { 2060 vm_domain_freecnt_inc(vmd, npages); 2061 #if VM_NRESERVLEVEL > 0 2062 if (vm_reserv_reclaim_contig(domain, npages, low, 2063 high, alignment, boundary)) 2064 goto again; 2065 #endif 2066 } 2067 } 2068 if (m_ret == NULL) { 2069 if (vm_domain_alloc_fail(vmd, object, req)) 2070 goto again; 2071 return (NULL); 2072 } 2073 #if VM_NRESERVLEVEL > 0 2074 found: 2075 #endif 2076 for (m = m_ret; m < &m_ret[npages]; m++) { 2077 vm_page_dequeue(m); 2078 vm_page_alloc_check(m); 2079 } 2080 2081 /* 2082 * Initialize the pages. Only the PG_ZERO flag is inherited. 2083 */ 2084 flags = 0; 2085 if ((req & VM_ALLOC_ZERO) != 0) 2086 flags = PG_ZERO; 2087 if ((req & VM_ALLOC_NODUMP) != 0) 2088 flags |= PG_NODUMP; 2089 oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ? 2090 VPO_UNMANAGED : 0; 2091 busy_lock = VPB_UNBUSIED; 2092 if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0) 2093 busy_lock = VPB_SINGLE_EXCLUSIVER; 2094 if ((req & VM_ALLOC_SBUSY) != 0) 2095 busy_lock = VPB_SHARERS_WORD(1); 2096 if ((req & VM_ALLOC_WIRED) != 0) 2097 vm_wire_add(npages); 2098 if (object != NULL) { 2099 if (object->memattr != VM_MEMATTR_DEFAULT && 2100 memattr == VM_MEMATTR_DEFAULT) 2101 memattr = object->memattr; 2102 } 2103 for (m = m_ret; m < &m_ret[npages]; m++) { 2104 m->aflags = 0; 2105 m->flags = (m->flags | PG_NODUMP) & flags; 2106 m->busy_lock = busy_lock; 2107 if ((req & VM_ALLOC_WIRED) != 0) 2108 m->wire_count = 1; 2109 m->act_count = 0; 2110 m->oflags = oflags; 2111 if (object != NULL) { 2112 if (vm_page_insert_after(m, object, pindex, mpred)) { 2113 if ((req & VM_ALLOC_WIRED) != 0) 2114 vm_wire_sub(npages); 2115 KASSERT(m->object == NULL, 2116 ("page %p has object", m)); 2117 mpred = m; 2118 for (m = m_ret; m < &m_ret[npages]; m++) { 2119 if (m <= mpred && 2120 (req & VM_ALLOC_WIRED) != 0) 2121 m->wire_count = 0; 2122 m->oflags = VPO_UNMANAGED; 2123 m->busy_lock = VPB_UNBUSIED; 2124 /* Don't change PG_ZERO. */ 2125 vm_page_free_toq(m); 2126 } 2127 if (req & VM_ALLOC_WAITFAIL) { 2128 VM_OBJECT_WUNLOCK(object); 2129 vm_radix_wait(); 2130 VM_OBJECT_WLOCK(object); 2131 } 2132 return (NULL); 2133 } 2134 mpred = m; 2135 } else 2136 m->pindex = pindex; 2137 if (memattr != VM_MEMATTR_DEFAULT) 2138 pmap_page_set_memattr(m, memattr); 2139 pindex++; 2140 } 2141 return (m_ret); 2142 } 2143 2144 /* 2145 * Check a page that has been freshly dequeued from a freelist. 2146 */ 2147 static void 2148 vm_page_alloc_check(vm_page_t m) 2149 { 2150 2151 KASSERT(m->object == NULL, ("page %p has object", m)); 2152 KASSERT(m->queue == PQ_NONE && (m->aflags & PGA_QUEUE_STATE_MASK) == 0, 2153 ("page %p has unexpected queue %d, flags %#x", 2154 m, m->queue, (m->aflags & PGA_QUEUE_STATE_MASK))); 2155 KASSERT(!vm_page_held(m), ("page %p is held", m)); 2156 KASSERT(!vm_page_busied(m), ("page %p is busy", m)); 2157 KASSERT(m->dirty == 0, ("page %p is dirty", m)); 2158 KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, 2159 ("page %p has unexpected memattr %d", 2160 m, pmap_page_get_memattr(m))); 2161 KASSERT(m->valid == 0, ("free page %p is valid", m)); 2162 } 2163 2164 /* 2165 * vm_page_alloc_freelist: 2166 * 2167 * Allocate a physical page from the specified free page list. 2168 * 2169 * The caller must always specify an allocation class. 2170 * 2171 * allocation classes: 2172 * VM_ALLOC_NORMAL normal process request 2173 * VM_ALLOC_SYSTEM system *really* needs a page 2174 * VM_ALLOC_INTERRUPT interrupt time request 2175 * 2176 * optional allocation flags: 2177 * VM_ALLOC_COUNT(number) the number of additional pages that the caller 2178 * intends to allocate 2179 * VM_ALLOC_WIRED wire the allocated page 2180 * VM_ALLOC_ZERO prefer a zeroed page 2181 */ 2182 vm_page_t 2183 vm_page_alloc_freelist(int freelist, int req) 2184 { 2185 struct vm_domainset_iter di; 2186 vm_page_t m; 2187 int domain; 2188 2189 vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req); 2190 do { 2191 m = vm_page_alloc_freelist_domain(domain, freelist, req); 2192 if (m != NULL) 2193 break; 2194 } while (vm_domainset_iter_page(&di, NULL, &domain) == 0); 2195 2196 return (m); 2197 } 2198 2199 vm_page_t 2200 vm_page_alloc_freelist_domain(int domain, int freelist, int req) 2201 { 2202 struct vm_domain *vmd; 2203 vm_page_t m; 2204 u_int flags; 2205 2206 m = NULL; 2207 vmd = VM_DOMAIN(domain); 2208 again: 2209 if (vm_domain_allocate(vmd, req, 1)) { 2210 vm_domain_free_lock(vmd); 2211 m = vm_phys_alloc_freelist_pages(domain, freelist, 2212 VM_FREEPOOL_DIRECT, 0); 2213 vm_domain_free_unlock(vmd); 2214 if (m == NULL) 2215 vm_domain_freecnt_inc(vmd, 1); 2216 } 2217 if (m == NULL) { 2218 if (vm_domain_alloc_fail(vmd, NULL, req)) 2219 goto again; 2220 return (NULL); 2221 } 2222 vm_page_dequeue(m); 2223 vm_page_alloc_check(m); 2224 2225 /* 2226 * Initialize the page. Only the PG_ZERO flag is inherited. 2227 */ 2228 m->aflags = 0; 2229 flags = 0; 2230 if ((req & VM_ALLOC_ZERO) != 0) 2231 flags = PG_ZERO; 2232 m->flags &= flags; 2233 if ((req & VM_ALLOC_WIRED) != 0) { 2234 /* 2235 * The page lock is not required for wiring a page that does 2236 * not belong to an object. 2237 */ 2238 vm_wire_add(1); 2239 m->wire_count = 1; 2240 } 2241 /* Unmanaged pages don't use "act_count". */ 2242 m->oflags = VPO_UNMANAGED; 2243 return (m); 2244 } 2245 2246 static int 2247 vm_page_import(void *arg, void **store, int cnt, int domain, int flags) 2248 { 2249 struct vm_domain *vmd; 2250 int i; 2251 2252 vmd = arg; 2253 /* Only import if we can bring in a full bucket. */ 2254 if (cnt == 1 || !vm_domain_allocate(vmd, VM_ALLOC_NORMAL, cnt)) 2255 return (0); 2256 domain = vmd->vmd_domain; 2257 vm_domain_free_lock(vmd); 2258 i = vm_phys_alloc_npages(domain, VM_FREEPOOL_DEFAULT, cnt, 2259 (vm_page_t *)store); 2260 vm_domain_free_unlock(vmd); 2261 if (cnt != i) 2262 vm_domain_freecnt_inc(vmd, cnt - i); 2263 2264 return (i); 2265 } 2266 2267 static void 2268 vm_page_release(void *arg, void **store, int cnt) 2269 { 2270 struct vm_domain *vmd; 2271 vm_page_t m; 2272 int i; 2273 2274 vmd = arg; 2275 vm_domain_free_lock(vmd); 2276 for (i = 0; i < cnt; i++) { 2277 m = (vm_page_t)store[i]; 2278 vm_phys_free_pages(m, 0); 2279 } 2280 vm_domain_free_unlock(vmd); 2281 vm_domain_freecnt_inc(vmd, cnt); 2282 } 2283 2284 #define VPSC_ANY 0 /* No restrictions. */ 2285 #define VPSC_NORESERV 1 /* Skip reservations; implies VPSC_NOSUPER. */ 2286 #define VPSC_NOSUPER 2 /* Skip superpages. */ 2287 2288 /* 2289 * vm_page_scan_contig: 2290 * 2291 * Scan vm_page_array[] between the specified entries "m_start" and 2292 * "m_end" for a run of contiguous physical pages that satisfy the 2293 * specified conditions, and return the lowest page in the run. The 2294 * specified "alignment" determines the alignment of the lowest physical 2295 * page in the run. If the specified "boundary" is non-zero, then the 2296 * run of physical pages cannot span a physical address that is a 2297 * multiple of "boundary". 2298 * 2299 * "m_end" is never dereferenced, so it need not point to a vm_page 2300 * structure within vm_page_array[]. 2301 * 2302 * "npages" must be greater than zero. "m_start" and "m_end" must not 2303 * span a hole (or discontiguity) in the physical address space. Both 2304 * "alignment" and "boundary" must be a power of two. 2305 */ 2306 vm_page_t 2307 vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end, 2308 u_long alignment, vm_paddr_t boundary, int options) 2309 { 2310 struct mtx *m_mtx; 2311 vm_object_t object; 2312 vm_paddr_t pa; 2313 vm_page_t m, m_run; 2314 #if VM_NRESERVLEVEL > 0 2315 int level; 2316 #endif 2317 int m_inc, order, run_ext, run_len; 2318 2319 KASSERT(npages > 0, ("npages is 0")); 2320 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 2321 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 2322 m_run = NULL; 2323 run_len = 0; 2324 m_mtx = NULL; 2325 for (m = m_start; m < m_end && run_len < npages; m += m_inc) { 2326 KASSERT((m->flags & PG_MARKER) == 0, 2327 ("page %p is PG_MARKER", m)); 2328 KASSERT((m->flags & PG_FICTITIOUS) == 0 || m->wire_count == 1, 2329 ("fictitious page %p has invalid wire count", m)); 2330 2331 /* 2332 * If the current page would be the start of a run, check its 2333 * physical address against the end, alignment, and boundary 2334 * conditions. If it doesn't satisfy these conditions, either 2335 * terminate the scan or advance to the next page that 2336 * satisfies the failed condition. 2337 */ 2338 if (run_len == 0) { 2339 KASSERT(m_run == NULL, ("m_run != NULL")); 2340 if (m + npages > m_end) 2341 break; 2342 pa = VM_PAGE_TO_PHYS(m); 2343 if ((pa & (alignment - 1)) != 0) { 2344 m_inc = atop(roundup2(pa, alignment) - pa); 2345 continue; 2346 } 2347 if (rounddown2(pa ^ (pa + ptoa(npages) - 1), 2348 boundary) != 0) { 2349 m_inc = atop(roundup2(pa, boundary) - pa); 2350 continue; 2351 } 2352 } else 2353 KASSERT(m_run != NULL, ("m_run == NULL")); 2354 2355 vm_page_change_lock(m, &m_mtx); 2356 m_inc = 1; 2357 retry: 2358 if (vm_page_held(m)) 2359 run_ext = 0; 2360 #if VM_NRESERVLEVEL > 0 2361 else if ((level = vm_reserv_level(m)) >= 0 && 2362 (options & VPSC_NORESERV) != 0) { 2363 run_ext = 0; 2364 /* Advance to the end of the reservation. */ 2365 pa = VM_PAGE_TO_PHYS(m); 2366 m_inc = atop(roundup2(pa + 1, vm_reserv_size(level)) - 2367 pa); 2368 } 2369 #endif 2370 else if ((object = m->object) != NULL) { 2371 /* 2372 * The page is considered eligible for relocation if 2373 * and only if it could be laundered or reclaimed by 2374 * the page daemon. 2375 */ 2376 if (!VM_OBJECT_TRYRLOCK(object)) { 2377 mtx_unlock(m_mtx); 2378 VM_OBJECT_RLOCK(object); 2379 mtx_lock(m_mtx); 2380 if (m->object != object) { 2381 /* 2382 * The page may have been freed. 2383 */ 2384 VM_OBJECT_RUNLOCK(object); 2385 goto retry; 2386 } else if (vm_page_held(m)) { 2387 run_ext = 0; 2388 goto unlock; 2389 } 2390 } 2391 KASSERT((m->flags & PG_UNHOLDFREE) == 0, 2392 ("page %p is PG_UNHOLDFREE", m)); 2393 /* Don't care: PG_NODUMP, PG_ZERO. */ 2394 if (object->type != OBJT_DEFAULT && 2395 object->type != OBJT_SWAP && 2396 object->type != OBJT_VNODE) { 2397 run_ext = 0; 2398 #if VM_NRESERVLEVEL > 0 2399 } else if ((options & VPSC_NOSUPER) != 0 && 2400 (level = vm_reserv_level_iffullpop(m)) >= 0) { 2401 run_ext = 0; 2402 /* Advance to the end of the superpage. */ 2403 pa = VM_PAGE_TO_PHYS(m); 2404 m_inc = atop(roundup2(pa + 1, 2405 vm_reserv_size(level)) - pa); 2406 #endif 2407 } else if (object->memattr == VM_MEMATTR_DEFAULT && 2408 vm_page_queue(m) != PQ_NONE && !vm_page_busied(m)) { 2409 /* 2410 * The page is allocated but eligible for 2411 * relocation. Extend the current run by one 2412 * page. 2413 */ 2414 KASSERT(pmap_page_get_memattr(m) == 2415 VM_MEMATTR_DEFAULT, 2416 ("page %p has an unexpected memattr", m)); 2417 KASSERT((m->oflags & (VPO_SWAPINPROG | 2418 VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0, 2419 ("page %p has unexpected oflags", m)); 2420 /* Don't care: VPO_NOSYNC. */ 2421 run_ext = 1; 2422 } else 2423 run_ext = 0; 2424 unlock: 2425 VM_OBJECT_RUNLOCK(object); 2426 #if VM_NRESERVLEVEL > 0 2427 } else if (level >= 0) { 2428 /* 2429 * The page is reserved but not yet allocated. In 2430 * other words, it is still free. Extend the current 2431 * run by one page. 2432 */ 2433 run_ext = 1; 2434 #endif 2435 } else if ((order = m->order) < VM_NFREEORDER) { 2436 /* 2437 * The page is enqueued in the physical memory 2438 * allocator's free page queues. Moreover, it is the 2439 * first page in a power-of-two-sized run of 2440 * contiguous free pages. Add these pages to the end 2441 * of the current run, and jump ahead. 2442 */ 2443 run_ext = 1 << order; 2444 m_inc = 1 << order; 2445 } else { 2446 /* 2447 * Skip the page for one of the following reasons: (1) 2448 * It is enqueued in the physical memory allocator's 2449 * free page queues. However, it is not the first 2450 * page in a run of contiguous free pages. (This case 2451 * rarely occurs because the scan is performed in 2452 * ascending order.) (2) It is not reserved, and it is 2453 * transitioning from free to allocated. (Conversely, 2454 * the transition from allocated to free for managed 2455 * pages is blocked by the page lock.) (3) It is 2456 * allocated but not contained by an object and not 2457 * wired, e.g., allocated by Xen's balloon driver. 2458 */ 2459 run_ext = 0; 2460 } 2461 2462 /* 2463 * Extend or reset the current run of pages. 2464 */ 2465 if (run_ext > 0) { 2466 if (run_len == 0) 2467 m_run = m; 2468 run_len += run_ext; 2469 } else { 2470 if (run_len > 0) { 2471 m_run = NULL; 2472 run_len = 0; 2473 } 2474 } 2475 } 2476 if (m_mtx != NULL) 2477 mtx_unlock(m_mtx); 2478 if (run_len >= npages) 2479 return (m_run); 2480 return (NULL); 2481 } 2482 2483 /* 2484 * vm_page_reclaim_run: 2485 * 2486 * Try to relocate each of the allocated virtual pages within the 2487 * specified run of physical pages to a new physical address. Free the 2488 * physical pages underlying the relocated virtual pages. A virtual page 2489 * is relocatable if and only if it could be laundered or reclaimed by 2490 * the page daemon. Whenever possible, a virtual page is relocated to a 2491 * physical address above "high". 2492 * 2493 * Returns 0 if every physical page within the run was already free or 2494 * just freed by a successful relocation. Otherwise, returns a non-zero 2495 * value indicating why the last attempt to relocate a virtual page was 2496 * unsuccessful. 2497 * 2498 * "req_class" must be an allocation class. 2499 */ 2500 static int 2501 vm_page_reclaim_run(int req_class, int domain, u_long npages, vm_page_t m_run, 2502 vm_paddr_t high) 2503 { 2504 struct vm_domain *vmd; 2505 struct mtx *m_mtx; 2506 struct spglist free; 2507 vm_object_t object; 2508 vm_paddr_t pa; 2509 vm_page_t m, m_end, m_new; 2510 int error, order, req; 2511 2512 KASSERT((req_class & VM_ALLOC_CLASS_MASK) == req_class, 2513 ("req_class is not an allocation class")); 2514 SLIST_INIT(&free); 2515 error = 0; 2516 m = m_run; 2517 m_end = m_run + npages; 2518 m_mtx = NULL; 2519 for (; error == 0 && m < m_end; m++) { 2520 KASSERT((m->flags & (PG_FICTITIOUS | PG_MARKER)) == 0, 2521 ("page %p is PG_FICTITIOUS or PG_MARKER", m)); 2522 2523 /* 2524 * Avoid releasing and reacquiring the same page lock. 2525 */ 2526 vm_page_change_lock(m, &m_mtx); 2527 retry: 2528 if (vm_page_held(m)) 2529 error = EBUSY; 2530 else if ((object = m->object) != NULL) { 2531 /* 2532 * The page is relocated if and only if it could be 2533 * laundered or reclaimed by the page daemon. 2534 */ 2535 if (!VM_OBJECT_TRYWLOCK(object)) { 2536 mtx_unlock(m_mtx); 2537 VM_OBJECT_WLOCK(object); 2538 mtx_lock(m_mtx); 2539 if (m->object != object) { 2540 /* 2541 * The page may have been freed. 2542 */ 2543 VM_OBJECT_WUNLOCK(object); 2544 goto retry; 2545 } else if (vm_page_held(m)) { 2546 error = EBUSY; 2547 goto unlock; 2548 } 2549 } 2550 KASSERT((m->flags & PG_UNHOLDFREE) == 0, 2551 ("page %p is PG_UNHOLDFREE", m)); 2552 /* Don't care: PG_NODUMP, PG_ZERO. */ 2553 if (object->type != OBJT_DEFAULT && 2554 object->type != OBJT_SWAP && 2555 object->type != OBJT_VNODE) 2556 error = EINVAL; 2557 else if (object->memattr != VM_MEMATTR_DEFAULT) 2558 error = EINVAL; 2559 else if (vm_page_queue(m) != PQ_NONE && 2560 !vm_page_busied(m)) { 2561 KASSERT(pmap_page_get_memattr(m) == 2562 VM_MEMATTR_DEFAULT, 2563 ("page %p has an unexpected memattr", m)); 2564 KASSERT((m->oflags & (VPO_SWAPINPROG | 2565 VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0, 2566 ("page %p has unexpected oflags", m)); 2567 /* Don't care: VPO_NOSYNC. */ 2568 if (m->valid != 0) { 2569 /* 2570 * First, try to allocate a new page 2571 * that is above "high". Failing 2572 * that, try to allocate a new page 2573 * that is below "m_run". Allocate 2574 * the new page between the end of 2575 * "m_run" and "high" only as a last 2576 * resort. 2577 */ 2578 req = req_class | VM_ALLOC_NOOBJ; 2579 if ((m->flags & PG_NODUMP) != 0) 2580 req |= VM_ALLOC_NODUMP; 2581 if (trunc_page(high) != 2582 ~(vm_paddr_t)PAGE_MASK) { 2583 m_new = vm_page_alloc_contig( 2584 NULL, 0, req, 1, 2585 round_page(high), 2586 ~(vm_paddr_t)0, 2587 PAGE_SIZE, 0, 2588 VM_MEMATTR_DEFAULT); 2589 } else 2590 m_new = NULL; 2591 if (m_new == NULL) { 2592 pa = VM_PAGE_TO_PHYS(m_run); 2593 m_new = vm_page_alloc_contig( 2594 NULL, 0, req, 1, 2595 0, pa - 1, PAGE_SIZE, 0, 2596 VM_MEMATTR_DEFAULT); 2597 } 2598 if (m_new == NULL) { 2599 pa += ptoa(npages); 2600 m_new = vm_page_alloc_contig( 2601 NULL, 0, req, 1, 2602 pa, high, PAGE_SIZE, 0, 2603 VM_MEMATTR_DEFAULT); 2604 } 2605 if (m_new == NULL) { 2606 error = ENOMEM; 2607 goto unlock; 2608 } 2609 KASSERT(!vm_page_wired(m_new), 2610 ("page %p is wired", m_new)); 2611 2612 /* 2613 * Replace "m" with the new page. For 2614 * vm_page_replace(), "m" must be busy 2615 * and dequeued. Finally, change "m" 2616 * as if vm_page_free() was called. 2617 */ 2618 if (object->ref_count != 0) 2619 pmap_remove_all(m); 2620 m_new->aflags = m->aflags & 2621 ~PGA_QUEUE_STATE_MASK; 2622 KASSERT(m_new->oflags == VPO_UNMANAGED, 2623 ("page %p is managed", m_new)); 2624 m_new->oflags = m->oflags & VPO_NOSYNC; 2625 pmap_copy_page(m, m_new); 2626 m_new->valid = m->valid; 2627 m_new->dirty = m->dirty; 2628 m->flags &= ~PG_ZERO; 2629 vm_page_xbusy(m); 2630 vm_page_dequeue(m); 2631 vm_page_replace_checked(m_new, object, 2632 m->pindex, m); 2633 if (vm_page_free_prep(m)) 2634 SLIST_INSERT_HEAD(&free, m, 2635 plinks.s.ss); 2636 2637 /* 2638 * The new page must be deactivated 2639 * before the object is unlocked. 2640 */ 2641 vm_page_change_lock(m_new, &m_mtx); 2642 vm_page_deactivate(m_new); 2643 } else { 2644 m->flags &= ~PG_ZERO; 2645 vm_page_dequeue(m); 2646 if (vm_page_free_prep(m)) 2647 SLIST_INSERT_HEAD(&free, m, 2648 plinks.s.ss); 2649 KASSERT(m->dirty == 0, 2650 ("page %p is dirty", m)); 2651 } 2652 } else 2653 error = EBUSY; 2654 unlock: 2655 VM_OBJECT_WUNLOCK(object); 2656 } else { 2657 MPASS(vm_phys_domain(m) == domain); 2658 vmd = VM_DOMAIN(domain); 2659 vm_domain_free_lock(vmd); 2660 order = m->order; 2661 if (order < VM_NFREEORDER) { 2662 /* 2663 * The page is enqueued in the physical memory 2664 * allocator's free page queues. Moreover, it 2665 * is the first page in a power-of-two-sized 2666 * run of contiguous free pages. Jump ahead 2667 * to the last page within that run, and 2668 * continue from there. 2669 */ 2670 m += (1 << order) - 1; 2671 } 2672 #if VM_NRESERVLEVEL > 0 2673 else if (vm_reserv_is_page_free(m)) 2674 order = 0; 2675 #endif 2676 vm_domain_free_unlock(vmd); 2677 if (order == VM_NFREEORDER) 2678 error = EINVAL; 2679 } 2680 } 2681 if (m_mtx != NULL) 2682 mtx_unlock(m_mtx); 2683 if ((m = SLIST_FIRST(&free)) != NULL) { 2684 int cnt; 2685 2686 vmd = VM_DOMAIN(domain); 2687 cnt = 0; 2688 vm_domain_free_lock(vmd); 2689 do { 2690 MPASS(vm_phys_domain(m) == domain); 2691 SLIST_REMOVE_HEAD(&free, plinks.s.ss); 2692 vm_phys_free_pages(m, 0); 2693 cnt++; 2694 } while ((m = SLIST_FIRST(&free)) != NULL); 2695 vm_domain_free_unlock(vmd); 2696 vm_domain_freecnt_inc(vmd, cnt); 2697 } 2698 return (error); 2699 } 2700 2701 #define NRUNS 16 2702 2703 CTASSERT(powerof2(NRUNS)); 2704 2705 #define RUN_INDEX(count) ((count) & (NRUNS - 1)) 2706 2707 #define MIN_RECLAIM 8 2708 2709 /* 2710 * vm_page_reclaim_contig: 2711 * 2712 * Reclaim allocated, contiguous physical memory satisfying the specified 2713 * conditions by relocating the virtual pages using that physical memory. 2714 * Returns true if reclamation is successful and false otherwise. Since 2715 * relocation requires the allocation of physical pages, reclamation may 2716 * fail due to a shortage of free pages. When reclamation fails, callers 2717 * are expected to perform vm_wait() before retrying a failed allocation 2718 * operation, e.g., vm_page_alloc_contig(). 2719 * 2720 * The caller must always specify an allocation class through "req". 2721 * 2722 * allocation classes: 2723 * VM_ALLOC_NORMAL normal process request 2724 * VM_ALLOC_SYSTEM system *really* needs a page 2725 * VM_ALLOC_INTERRUPT interrupt time request 2726 * 2727 * The optional allocation flags are ignored. 2728 * 2729 * "npages" must be greater than zero. Both "alignment" and "boundary" 2730 * must be a power of two. 2731 */ 2732 bool 2733 vm_page_reclaim_contig_domain(int domain, int req, u_long npages, 2734 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary) 2735 { 2736 struct vm_domain *vmd; 2737 vm_paddr_t curr_low; 2738 vm_page_t m_run, m_runs[NRUNS]; 2739 u_long count, reclaimed; 2740 int error, i, options, req_class; 2741 2742 KASSERT(npages > 0, ("npages is 0")); 2743 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 2744 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 2745 req_class = req & VM_ALLOC_CLASS_MASK; 2746 2747 /* 2748 * The page daemon is allowed to dig deeper into the free page list. 2749 */ 2750 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) 2751 req_class = VM_ALLOC_SYSTEM; 2752 2753 /* 2754 * Return if the number of free pages cannot satisfy the requested 2755 * allocation. 2756 */ 2757 vmd = VM_DOMAIN(domain); 2758 count = vmd->vmd_free_count; 2759 if (count < npages + vmd->vmd_free_reserved || (count < npages + 2760 vmd->vmd_interrupt_free_min && req_class == VM_ALLOC_SYSTEM) || 2761 (count < npages && req_class == VM_ALLOC_INTERRUPT)) 2762 return (false); 2763 2764 /* 2765 * Scan up to three times, relaxing the restrictions ("options") on 2766 * the reclamation of reservations and superpages each time. 2767 */ 2768 for (options = VPSC_NORESERV;;) { 2769 /* 2770 * Find the highest runs that satisfy the given constraints 2771 * and restrictions, and record them in "m_runs". 2772 */ 2773 curr_low = low; 2774 count = 0; 2775 for (;;) { 2776 m_run = vm_phys_scan_contig(domain, npages, curr_low, 2777 high, alignment, boundary, options); 2778 if (m_run == NULL) 2779 break; 2780 curr_low = VM_PAGE_TO_PHYS(m_run) + ptoa(npages); 2781 m_runs[RUN_INDEX(count)] = m_run; 2782 count++; 2783 } 2784 2785 /* 2786 * Reclaim the highest runs in LIFO (descending) order until 2787 * the number of reclaimed pages, "reclaimed", is at least 2788 * MIN_RECLAIM. Reset "reclaimed" each time because each 2789 * reclamation is idempotent, and runs will (likely) recur 2790 * from one scan to the next as restrictions are relaxed. 2791 */ 2792 reclaimed = 0; 2793 for (i = 0; count > 0 && i < NRUNS; i++) { 2794 count--; 2795 m_run = m_runs[RUN_INDEX(count)]; 2796 error = vm_page_reclaim_run(req_class, domain, npages, 2797 m_run, high); 2798 if (error == 0) { 2799 reclaimed += npages; 2800 if (reclaimed >= MIN_RECLAIM) 2801 return (true); 2802 } 2803 } 2804 2805 /* 2806 * Either relax the restrictions on the next scan or return if 2807 * the last scan had no restrictions. 2808 */ 2809 if (options == VPSC_NORESERV) 2810 options = VPSC_NOSUPER; 2811 else if (options == VPSC_NOSUPER) 2812 options = VPSC_ANY; 2813 else if (options == VPSC_ANY) 2814 return (reclaimed != 0); 2815 } 2816 } 2817 2818 bool 2819 vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high, 2820 u_long alignment, vm_paddr_t boundary) 2821 { 2822 struct vm_domainset_iter di; 2823 int domain; 2824 bool ret; 2825 2826 vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req); 2827 do { 2828 ret = vm_page_reclaim_contig_domain(domain, req, npages, low, 2829 high, alignment, boundary); 2830 if (ret) 2831 break; 2832 } while (vm_domainset_iter_page(&di, NULL, &domain) == 0); 2833 2834 return (ret); 2835 } 2836 2837 /* 2838 * Set the domain in the appropriate page level domainset. 2839 */ 2840 void 2841 vm_domain_set(struct vm_domain *vmd) 2842 { 2843 2844 mtx_lock(&vm_domainset_lock); 2845 if (!vmd->vmd_minset && vm_paging_min(vmd)) { 2846 vmd->vmd_minset = 1; 2847 DOMAINSET_SET(vmd->vmd_domain, &vm_min_domains); 2848 } 2849 if (!vmd->vmd_severeset && vm_paging_severe(vmd)) { 2850 vmd->vmd_severeset = 1; 2851 DOMAINSET_SET(vmd->vmd_domain, &vm_severe_domains); 2852 } 2853 mtx_unlock(&vm_domainset_lock); 2854 } 2855 2856 /* 2857 * Clear the domain from the appropriate page level domainset. 2858 */ 2859 void 2860 vm_domain_clear(struct vm_domain *vmd) 2861 { 2862 2863 mtx_lock(&vm_domainset_lock); 2864 if (vmd->vmd_minset && !vm_paging_min(vmd)) { 2865 vmd->vmd_minset = 0; 2866 DOMAINSET_CLR(vmd->vmd_domain, &vm_min_domains); 2867 if (vm_min_waiters != 0) { 2868 vm_min_waiters = 0; 2869 wakeup(&vm_min_domains); 2870 } 2871 } 2872 if (vmd->vmd_severeset && !vm_paging_severe(vmd)) { 2873 vmd->vmd_severeset = 0; 2874 DOMAINSET_CLR(vmd->vmd_domain, &vm_severe_domains); 2875 if (vm_severe_waiters != 0) { 2876 vm_severe_waiters = 0; 2877 wakeup(&vm_severe_domains); 2878 } 2879 } 2880 2881 /* 2882 * If pageout daemon needs pages, then tell it that there are 2883 * some free. 2884 */ 2885 if (vmd->vmd_pageout_pages_needed && 2886 vmd->vmd_free_count >= vmd->vmd_pageout_free_min) { 2887 wakeup(&vmd->vmd_pageout_pages_needed); 2888 vmd->vmd_pageout_pages_needed = 0; 2889 } 2890 2891 /* See comments in vm_wait_doms(). */ 2892 if (vm_pageproc_waiters) { 2893 vm_pageproc_waiters = 0; 2894 wakeup(&vm_pageproc_waiters); 2895 } 2896 mtx_unlock(&vm_domainset_lock); 2897 } 2898 2899 /* 2900 * Wait for free pages to exceed the min threshold globally. 2901 */ 2902 void 2903 vm_wait_min(void) 2904 { 2905 2906 mtx_lock(&vm_domainset_lock); 2907 while (vm_page_count_min()) { 2908 vm_min_waiters++; 2909 msleep(&vm_min_domains, &vm_domainset_lock, PVM, "vmwait", 0); 2910 } 2911 mtx_unlock(&vm_domainset_lock); 2912 } 2913 2914 /* 2915 * Wait for free pages to exceed the severe threshold globally. 2916 */ 2917 void 2918 vm_wait_severe(void) 2919 { 2920 2921 mtx_lock(&vm_domainset_lock); 2922 while (vm_page_count_severe()) { 2923 vm_severe_waiters++; 2924 msleep(&vm_severe_domains, &vm_domainset_lock, PVM, 2925 "vmwait", 0); 2926 } 2927 mtx_unlock(&vm_domainset_lock); 2928 } 2929 2930 u_int 2931 vm_wait_count(void) 2932 { 2933 2934 return (vm_severe_waiters + vm_min_waiters + vm_pageproc_waiters); 2935 } 2936 2937 void 2938 vm_wait_doms(const domainset_t *wdoms) 2939 { 2940 2941 /* 2942 * We use racey wakeup synchronization to avoid expensive global 2943 * locking for the pageproc when sleeping with a non-specific vm_wait. 2944 * To handle this, we only sleep for one tick in this instance. It 2945 * is expected that most allocations for the pageproc will come from 2946 * kmem or vm_page_grab* which will use the more specific and 2947 * race-free vm_wait_domain(). 2948 */ 2949 if (curproc == pageproc) { 2950 mtx_lock(&vm_domainset_lock); 2951 vm_pageproc_waiters++; 2952 msleep(&vm_pageproc_waiters, &vm_domainset_lock, PVM | PDROP, 2953 "pageprocwait", 1); 2954 } else { 2955 /* 2956 * XXX Ideally we would wait only until the allocation could 2957 * be satisfied. This condition can cause new allocators to 2958 * consume all freed pages while old allocators wait. 2959 */ 2960 mtx_lock(&vm_domainset_lock); 2961 if (vm_page_count_min_set(wdoms)) { 2962 vm_min_waiters++; 2963 msleep(&vm_min_domains, &vm_domainset_lock, 2964 PVM | PDROP, "vmwait", 0); 2965 } else 2966 mtx_unlock(&vm_domainset_lock); 2967 } 2968 } 2969 2970 /* 2971 * vm_wait_domain: 2972 * 2973 * Sleep until free pages are available for allocation. 2974 * - Called in various places after failed memory allocations. 2975 */ 2976 void 2977 vm_wait_domain(int domain) 2978 { 2979 struct vm_domain *vmd; 2980 domainset_t wdom; 2981 2982 vmd = VM_DOMAIN(domain); 2983 vm_domain_free_assert_unlocked(vmd); 2984 2985 if (curproc == pageproc) { 2986 mtx_lock(&vm_domainset_lock); 2987 if (vmd->vmd_free_count < vmd->vmd_pageout_free_min) { 2988 vmd->vmd_pageout_pages_needed = 1; 2989 msleep(&vmd->vmd_pageout_pages_needed, 2990 &vm_domainset_lock, PDROP | PSWP, "VMWait", 0); 2991 } else 2992 mtx_unlock(&vm_domainset_lock); 2993 } else { 2994 if (pageproc == NULL) 2995 panic("vm_wait in early boot"); 2996 DOMAINSET_ZERO(&wdom); 2997 DOMAINSET_SET(vmd->vmd_domain, &wdom); 2998 vm_wait_doms(&wdom); 2999 } 3000 } 3001 3002 /* 3003 * vm_wait: 3004 * 3005 * Sleep until free pages are available for allocation in the 3006 * affinity domains of the obj. If obj is NULL, the domain set 3007 * for the calling thread is used. 3008 * Called in various places after failed memory allocations. 3009 */ 3010 void 3011 vm_wait(vm_object_t obj) 3012 { 3013 struct domainset *d; 3014 3015 d = NULL; 3016 3017 /* 3018 * Carefully fetch pointers only once: the struct domainset 3019 * itself is ummutable but the pointer might change. 3020 */ 3021 if (obj != NULL) 3022 d = obj->domain.dr_policy; 3023 if (d == NULL) 3024 d = curthread->td_domain.dr_policy; 3025 3026 vm_wait_doms(&d->ds_mask); 3027 } 3028 3029 /* 3030 * vm_domain_alloc_fail: 3031 * 3032 * Called when a page allocation function fails. Informs the 3033 * pagedaemon and performs the requested wait. Requires the 3034 * domain_free and object lock on entry. Returns with the 3035 * object lock held and free lock released. Returns an error when 3036 * retry is necessary. 3037 * 3038 */ 3039 static int 3040 vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, int req) 3041 { 3042 3043 vm_domain_free_assert_unlocked(vmd); 3044 3045 atomic_add_int(&vmd->vmd_pageout_deficit, 3046 max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1)); 3047 if (req & (VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) { 3048 if (object != NULL) 3049 VM_OBJECT_WUNLOCK(object); 3050 vm_wait_domain(vmd->vmd_domain); 3051 if (object != NULL) 3052 VM_OBJECT_WLOCK(object); 3053 if (req & VM_ALLOC_WAITOK) 3054 return (EAGAIN); 3055 } 3056 3057 return (0); 3058 } 3059 3060 /* 3061 * vm_waitpfault: 3062 * 3063 * Sleep until free pages are available for allocation. 3064 * - Called only in vm_fault so that processes page faulting 3065 * can be easily tracked. 3066 * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing 3067 * processes will be able to grab memory first. Do not change 3068 * this balance without careful testing first. 3069 */ 3070 void 3071 vm_waitpfault(struct domainset *dset) 3072 { 3073 3074 /* 3075 * XXX Ideally we would wait only until the allocation could 3076 * be satisfied. This condition can cause new allocators to 3077 * consume all freed pages while old allocators wait. 3078 */ 3079 mtx_lock(&vm_domainset_lock); 3080 if (vm_page_count_min_set(&dset->ds_mask)) { 3081 vm_min_waiters++; 3082 msleep(&vm_min_domains, &vm_domainset_lock, PUSER | PDROP, 3083 "pfault", 0); 3084 } else 3085 mtx_unlock(&vm_domainset_lock); 3086 } 3087 3088 struct vm_pagequeue * 3089 vm_page_pagequeue(vm_page_t m) 3090 { 3091 3092 return (&vm_pagequeue_domain(m)->vmd_pagequeues[m->queue]); 3093 } 3094 3095 static struct mtx * 3096 vm_page_pagequeue_lockptr(vm_page_t m) 3097 { 3098 uint8_t queue; 3099 3100 if ((queue = atomic_load_8(&m->queue)) == PQ_NONE) 3101 return (NULL); 3102 return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue].pq_mutex); 3103 } 3104 3105 static inline void 3106 vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_page_t m) 3107 { 3108 struct vm_domain *vmd; 3109 uint8_t qflags; 3110 3111 CRITICAL_ASSERT(curthread); 3112 vm_pagequeue_assert_locked(pq); 3113 3114 /* 3115 * The page daemon is allowed to set m->queue = PQ_NONE without 3116 * the page queue lock held. In this case it is about to free the page, 3117 * which must not have any queue state. 3118 */ 3119 qflags = atomic_load_8(&m->aflags) & PGA_QUEUE_STATE_MASK; 3120 KASSERT(pq == vm_page_pagequeue(m) || qflags == 0, 3121 ("page %p doesn't belong to queue %p but has queue state %#x", 3122 m, pq, qflags)); 3123 3124 if ((qflags & PGA_DEQUEUE) != 0) { 3125 if (__predict_true((qflags & PGA_ENQUEUED) != 0)) { 3126 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 3127 vm_pagequeue_cnt_dec(pq); 3128 } 3129 vm_page_dequeue_complete(m); 3130 } else if ((qflags & (PGA_REQUEUE | PGA_REQUEUE_HEAD)) != 0) { 3131 if ((qflags & PGA_ENQUEUED) != 0) 3132 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 3133 else { 3134 vm_pagequeue_cnt_inc(pq); 3135 vm_page_aflag_set(m, PGA_ENQUEUED); 3136 } 3137 if ((qflags & PGA_REQUEUE_HEAD) != 0) { 3138 KASSERT(m->queue == PQ_INACTIVE, 3139 ("head enqueue not supported for page %p", m)); 3140 vmd = vm_pagequeue_domain(m); 3141 TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q); 3142 } else 3143 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); 3144 3145 /* 3146 * PGA_REQUEUE and PGA_REQUEUE_HEAD must be cleared after 3147 * setting PGA_ENQUEUED in order to synchronize with the 3148 * page daemon. 3149 */ 3150 vm_page_aflag_clear(m, PGA_REQUEUE | PGA_REQUEUE_HEAD); 3151 } 3152 } 3153 3154 static void 3155 vm_pqbatch_process(struct vm_pagequeue *pq, struct vm_batchqueue *bq, 3156 uint8_t queue) 3157 { 3158 vm_page_t m; 3159 int i; 3160 3161 for (i = 0; i < bq->bq_cnt; i++) { 3162 m = bq->bq_pa[i]; 3163 if (__predict_false(m->queue != queue)) 3164 continue; 3165 vm_pqbatch_process_page(pq, m); 3166 } 3167 vm_batchqueue_init(bq); 3168 } 3169 3170 static void 3171 vm_pqbatch_submit_page(vm_page_t m, uint8_t queue) 3172 { 3173 struct vm_batchqueue *bq; 3174 struct vm_pagequeue *pq; 3175 int domain; 3176 3177 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3178 ("page %p is unmanaged", m)); 3179 KASSERT(mtx_owned(vm_page_lockptr(m)) || 3180 (m->object == NULL && (m->aflags & PGA_DEQUEUE) != 0), 3181 ("missing synchronization for page %p", m)); 3182 KASSERT(queue < PQ_COUNT, ("invalid queue %d", queue)); 3183 3184 domain = vm_phys_domain(m); 3185 pq = &vm_pagequeue_domain(m)->vmd_pagequeues[queue]; 3186 3187 critical_enter(); 3188 bq = DPCPU_PTR(pqbatch[domain][queue]); 3189 if (vm_batchqueue_insert(bq, m)) { 3190 critical_exit(); 3191 return; 3192 } 3193 if (!vm_pagequeue_trylock(pq)) { 3194 critical_exit(); 3195 vm_pagequeue_lock(pq); 3196 critical_enter(); 3197 bq = DPCPU_PTR(pqbatch[domain][queue]); 3198 } 3199 vm_pqbatch_process(pq, bq, queue); 3200 3201 /* 3202 * The page may have been logically dequeued before we acquired the 3203 * page queue lock. In this case, since we either hold the page lock 3204 * or the page is being freed, a different thread cannot be concurrently 3205 * enqueuing the page. 3206 */ 3207 if (__predict_true(m->queue == queue)) 3208 vm_pqbatch_process_page(pq, m); 3209 else { 3210 KASSERT(m->queue == PQ_NONE, 3211 ("invalid queue transition for page %p", m)); 3212 KASSERT((m->aflags & PGA_ENQUEUED) == 0, 3213 ("page %p is enqueued with invalid queue index", m)); 3214 vm_page_aflag_clear(m, PGA_QUEUE_STATE_MASK); 3215 } 3216 vm_pagequeue_unlock(pq); 3217 critical_exit(); 3218 } 3219 3220 /* 3221 * vm_page_drain_pqbatch: [ internal use only ] 3222 * 3223 * Force all per-CPU page queue batch queues to be drained. This is 3224 * intended for use in severe memory shortages, to ensure that pages 3225 * do not remain stuck in the batch queues. 3226 */ 3227 void 3228 vm_page_drain_pqbatch(void) 3229 { 3230 struct thread *td; 3231 struct vm_domain *vmd; 3232 struct vm_pagequeue *pq; 3233 int cpu, domain, queue; 3234 3235 td = curthread; 3236 CPU_FOREACH(cpu) { 3237 thread_lock(td); 3238 sched_bind(td, cpu); 3239 thread_unlock(td); 3240 3241 for (domain = 0; domain < vm_ndomains; domain++) { 3242 vmd = VM_DOMAIN(domain); 3243 for (queue = 0; queue < PQ_COUNT; queue++) { 3244 pq = &vmd->vmd_pagequeues[queue]; 3245 vm_pagequeue_lock(pq); 3246 critical_enter(); 3247 vm_pqbatch_process(pq, 3248 DPCPU_PTR(pqbatch[domain][queue]), queue); 3249 critical_exit(); 3250 vm_pagequeue_unlock(pq); 3251 } 3252 } 3253 } 3254 thread_lock(td); 3255 sched_unbind(td); 3256 thread_unlock(td); 3257 } 3258 3259 /* 3260 * Complete the logical removal of a page from a page queue. We must be 3261 * careful to synchronize with the page daemon, which may be concurrently 3262 * examining the page with only the page lock held. The page must not be 3263 * in a state where it appears to be logically enqueued. 3264 */ 3265 static void 3266 vm_page_dequeue_complete(vm_page_t m) 3267 { 3268 3269 m->queue = PQ_NONE; 3270 atomic_thread_fence_rel(); 3271 vm_page_aflag_clear(m, PGA_QUEUE_STATE_MASK); 3272 } 3273 3274 /* 3275 * vm_page_dequeue_deferred: [ internal use only ] 3276 * 3277 * Request removal of the given page from its current page 3278 * queue. Physical removal from the queue may be deferred 3279 * indefinitely. 3280 * 3281 * The page must be locked. 3282 */ 3283 void 3284 vm_page_dequeue_deferred(vm_page_t m) 3285 { 3286 uint8_t queue; 3287 3288 vm_page_assert_locked(m); 3289 3290 if ((queue = vm_page_queue(m)) == PQ_NONE) 3291 return; 3292 vm_page_aflag_set(m, PGA_DEQUEUE); 3293 vm_pqbatch_submit_page(m, queue); 3294 } 3295 3296 /* 3297 * A variant of vm_page_dequeue_deferred() that does not assert the page 3298 * lock and is only to be called from vm_page_free_prep(). It is just an 3299 * open-coded implementation of vm_page_dequeue_deferred(). Because the 3300 * page is being freed, we can assume that nothing else is scheduling queue 3301 * operations on this page, so we get for free the mutual exclusion that 3302 * is otherwise provided by the page lock. 3303 */ 3304 static void 3305 vm_page_dequeue_deferred_free(vm_page_t m) 3306 { 3307 uint8_t queue; 3308 3309 KASSERT(m->object == NULL, ("page %p has an object reference", m)); 3310 3311 if ((m->aflags & PGA_DEQUEUE) != 0) 3312 return; 3313 atomic_thread_fence_acq(); 3314 if ((queue = m->queue) == PQ_NONE) 3315 return; 3316 vm_page_aflag_set(m, PGA_DEQUEUE); 3317 vm_pqbatch_submit_page(m, queue); 3318 } 3319 3320 /* 3321 * vm_page_dequeue: 3322 * 3323 * Remove the page from whichever page queue it's in, if any. 3324 * The page must either be locked or unallocated. This constraint 3325 * ensures that the queue state of the page will remain consistent 3326 * after this function returns. 3327 */ 3328 void 3329 vm_page_dequeue(vm_page_t m) 3330 { 3331 struct mtx *lock, *lock1; 3332 struct vm_pagequeue *pq; 3333 uint8_t aflags; 3334 3335 KASSERT(mtx_owned(vm_page_lockptr(m)) || m->order == VM_NFREEORDER, 3336 ("page %p is allocated and unlocked", m)); 3337 3338 for (;;) { 3339 lock = vm_page_pagequeue_lockptr(m); 3340 if (lock == NULL) { 3341 /* 3342 * A thread may be concurrently executing 3343 * vm_page_dequeue_complete(). Ensure that all queue 3344 * state is cleared before we return. 3345 */ 3346 aflags = atomic_load_8(&m->aflags); 3347 if ((aflags & PGA_QUEUE_STATE_MASK) == 0) 3348 return; 3349 KASSERT((aflags & PGA_DEQUEUE) != 0, 3350 ("page %p has unexpected queue state flags %#x", 3351 m, aflags)); 3352 3353 /* 3354 * Busy wait until the thread updating queue state is 3355 * finished. Such a thread must be executing in a 3356 * critical section. 3357 */ 3358 cpu_spinwait(); 3359 continue; 3360 } 3361 mtx_lock(lock); 3362 if ((lock1 = vm_page_pagequeue_lockptr(m)) == lock) 3363 break; 3364 mtx_unlock(lock); 3365 lock = lock1; 3366 } 3367 KASSERT(lock == vm_page_pagequeue_lockptr(m), 3368 ("%s: page %p migrated directly between queues", __func__, m)); 3369 KASSERT((m->aflags & PGA_DEQUEUE) != 0 || 3370 mtx_owned(vm_page_lockptr(m)), 3371 ("%s: queued unlocked page %p", __func__, m)); 3372 3373 if ((m->aflags & PGA_ENQUEUED) != 0) { 3374 pq = vm_page_pagequeue(m); 3375 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 3376 vm_pagequeue_cnt_dec(pq); 3377 } 3378 vm_page_dequeue_complete(m); 3379 mtx_unlock(lock); 3380 } 3381 3382 /* 3383 * Schedule the given page for insertion into the specified page queue. 3384 * Physical insertion of the page may be deferred indefinitely. 3385 */ 3386 static void 3387 vm_page_enqueue(vm_page_t m, uint8_t queue) 3388 { 3389 3390 vm_page_assert_locked(m); 3391 KASSERT(m->queue == PQ_NONE && (m->aflags & PGA_QUEUE_STATE_MASK) == 0, 3392 ("%s: page %p is already enqueued", __func__, m)); 3393 3394 m->queue = queue; 3395 if ((m->aflags & PGA_REQUEUE) == 0) 3396 vm_page_aflag_set(m, PGA_REQUEUE); 3397 vm_pqbatch_submit_page(m, queue); 3398 } 3399 3400 /* 3401 * vm_page_requeue: [ internal use only ] 3402 * 3403 * Schedule a requeue of the given page. 3404 * 3405 * The page must be locked. 3406 */ 3407 void 3408 vm_page_requeue(vm_page_t m) 3409 { 3410 3411 vm_page_assert_locked(m); 3412 KASSERT(vm_page_queue(m) != PQ_NONE, 3413 ("%s: page %p is not logically enqueued", __func__, m)); 3414 3415 if ((m->aflags & PGA_REQUEUE) == 0) 3416 vm_page_aflag_set(m, PGA_REQUEUE); 3417 vm_pqbatch_submit_page(m, atomic_load_8(&m->queue)); 3418 } 3419 3420 /* 3421 * vm_page_activate: 3422 * 3423 * Put the specified page on the active list (if appropriate). 3424 * Ensure that act_count is at least ACT_INIT but do not otherwise 3425 * mess with it. 3426 * 3427 * The page must be locked. 3428 */ 3429 void 3430 vm_page_activate(vm_page_t m) 3431 { 3432 3433 vm_page_assert_locked(m); 3434 3435 if (vm_page_wired(m) || (m->oflags & VPO_UNMANAGED) != 0) 3436 return; 3437 if (vm_page_queue(m) == PQ_ACTIVE) { 3438 if (m->act_count < ACT_INIT) 3439 m->act_count = ACT_INIT; 3440 return; 3441 } 3442 3443 vm_page_dequeue(m); 3444 if (m->act_count < ACT_INIT) 3445 m->act_count = ACT_INIT; 3446 vm_page_enqueue(m, PQ_ACTIVE); 3447 } 3448 3449 /* 3450 * vm_page_free_prep: 3451 * 3452 * Prepares the given page to be put on the free list, 3453 * disassociating it from any VM object. The caller may return 3454 * the page to the free list only if this function returns true. 3455 * 3456 * The object must be locked. The page must be locked if it is 3457 * managed. 3458 */ 3459 bool 3460 vm_page_free_prep(vm_page_t m) 3461 { 3462 3463 #if defined(DIAGNOSTIC) && defined(PHYS_TO_DMAP) 3464 if (PMAP_HAS_DMAP && (m->flags & PG_ZERO) != 0) { 3465 uint64_t *p; 3466 int i; 3467 p = (uint64_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 3468 for (i = 0; i < PAGE_SIZE / sizeof(uint64_t); i++, p++) 3469 KASSERT(*p == 0, ("vm_page_free_prep %p PG_ZERO %d %jx", 3470 m, i, (uintmax_t)*p)); 3471 } 3472 #endif 3473 if ((m->oflags & VPO_UNMANAGED) == 0) { 3474 vm_page_lock_assert(m, MA_OWNED); 3475 KASSERT(!pmap_page_is_mapped(m), 3476 ("vm_page_free_prep: freeing mapped page %p", m)); 3477 } else 3478 KASSERT(m->queue == PQ_NONE, 3479 ("vm_page_free_prep: unmanaged page %p is queued", m)); 3480 VM_CNT_INC(v_tfree); 3481 3482 if (vm_page_sbusied(m)) 3483 panic("vm_page_free_prep: freeing busy page %p", m); 3484 3485 vm_page_remove(m); 3486 3487 /* 3488 * If fictitious remove object association and 3489 * return. 3490 */ 3491 if ((m->flags & PG_FICTITIOUS) != 0) { 3492 KASSERT(m->wire_count == 1, 3493 ("fictitious page %p is not wired", m)); 3494 KASSERT(m->queue == PQ_NONE, 3495 ("fictitious page %p is queued", m)); 3496 return (false); 3497 } 3498 3499 /* 3500 * Pages need not be dequeued before they are returned to the physical 3501 * memory allocator, but they must at least be marked for a deferred 3502 * dequeue. 3503 */ 3504 if ((m->oflags & VPO_UNMANAGED) == 0) 3505 vm_page_dequeue_deferred_free(m); 3506 3507 m->valid = 0; 3508 vm_page_undirty(m); 3509 3510 if (vm_page_wired(m) != 0) 3511 panic("vm_page_free_prep: freeing wired page %p", m); 3512 if (m->hold_count != 0) { 3513 m->flags &= ~PG_ZERO; 3514 KASSERT((m->flags & PG_UNHOLDFREE) == 0, 3515 ("vm_page_free_prep: freeing PG_UNHOLDFREE page %p", m)); 3516 m->flags |= PG_UNHOLDFREE; 3517 return (false); 3518 } 3519 3520 /* 3521 * Restore the default memory attribute to the page. 3522 */ 3523 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) 3524 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); 3525 3526 #if VM_NRESERVLEVEL > 0 3527 if (vm_reserv_free_page(m)) 3528 return (false); 3529 #endif 3530 3531 return (true); 3532 } 3533 3534 /* 3535 * vm_page_free_toq: 3536 * 3537 * Returns the given page to the free list, disassociating it 3538 * from any VM object. 3539 * 3540 * The object must be locked. The page must be locked if it is 3541 * managed. 3542 */ 3543 void 3544 vm_page_free_toq(vm_page_t m) 3545 { 3546 struct vm_domain *vmd; 3547 3548 if (!vm_page_free_prep(m)) 3549 return; 3550 3551 vmd = vm_pagequeue_domain(m); 3552 if (m->pool == VM_FREEPOOL_DEFAULT && vmd->vmd_pgcache != NULL) { 3553 uma_zfree(vmd->vmd_pgcache, m); 3554 return; 3555 } 3556 vm_domain_free_lock(vmd); 3557 vm_phys_free_pages(m, 0); 3558 vm_domain_free_unlock(vmd); 3559 vm_domain_freecnt_inc(vmd, 1); 3560 } 3561 3562 /* 3563 * vm_page_free_pages_toq: 3564 * 3565 * Returns a list of pages to the free list, disassociating it 3566 * from any VM object. In other words, this is equivalent to 3567 * calling vm_page_free_toq() for each page of a list of VM objects. 3568 * 3569 * The objects must be locked. The pages must be locked if it is 3570 * managed. 3571 */ 3572 void 3573 vm_page_free_pages_toq(struct spglist *free, bool update_wire_count) 3574 { 3575 vm_page_t m; 3576 int count; 3577 3578 if (SLIST_EMPTY(free)) 3579 return; 3580 3581 count = 0; 3582 while ((m = SLIST_FIRST(free)) != NULL) { 3583 count++; 3584 SLIST_REMOVE_HEAD(free, plinks.s.ss); 3585 vm_page_free_toq(m); 3586 } 3587 3588 if (update_wire_count) 3589 vm_wire_sub(count); 3590 } 3591 3592 /* 3593 * vm_page_wire: 3594 * 3595 * Mark this page as wired down. If the page is fictitious, then 3596 * its wire count must remain one. 3597 * 3598 * The page must be locked. 3599 */ 3600 void 3601 vm_page_wire(vm_page_t m) 3602 { 3603 3604 vm_page_assert_locked(m); 3605 if ((m->flags & PG_FICTITIOUS) != 0) { 3606 KASSERT(m->wire_count == 1, 3607 ("vm_page_wire: fictitious page %p's wire count isn't one", 3608 m)); 3609 return; 3610 } 3611 if (!vm_page_wired(m)) { 3612 KASSERT((m->oflags & VPO_UNMANAGED) == 0 || 3613 m->queue == PQ_NONE, 3614 ("vm_page_wire: unmanaged page %p is queued", m)); 3615 vm_wire_add(1); 3616 } 3617 m->wire_count++; 3618 KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m)); 3619 } 3620 3621 /* 3622 * vm_page_unwire: 3623 * 3624 * Release one wiring of the specified page, potentially allowing it to be 3625 * paged out. Returns TRUE if the number of wirings transitions to zero and 3626 * FALSE otherwise. 3627 * 3628 * Only managed pages belonging to an object can be paged out. If the number 3629 * of wirings transitions to zero and the page is eligible for page out, then 3630 * the page is added to the specified paging queue (unless PQ_NONE is 3631 * specified, in which case the page is dequeued if it belongs to a paging 3632 * queue). 3633 * 3634 * If a page is fictitious, then its wire count must always be one. 3635 * 3636 * A managed page must be locked. 3637 */ 3638 bool 3639 vm_page_unwire(vm_page_t m, uint8_t queue) 3640 { 3641 bool unwired; 3642 3643 KASSERT(queue < PQ_COUNT || queue == PQ_NONE, 3644 ("vm_page_unwire: invalid queue %u request for page %p", 3645 queue, m)); 3646 if ((m->oflags & VPO_UNMANAGED) == 0) 3647 vm_page_assert_locked(m); 3648 3649 unwired = vm_page_unwire_noq(m); 3650 if (!unwired || (m->oflags & VPO_UNMANAGED) != 0 || m->object == NULL) 3651 return (unwired); 3652 3653 if (vm_page_queue(m) == queue) { 3654 if (queue == PQ_ACTIVE) 3655 vm_page_reference(m); 3656 else if (queue != PQ_NONE) 3657 vm_page_requeue(m); 3658 } else { 3659 vm_page_dequeue(m); 3660 if (queue != PQ_NONE) { 3661 vm_page_enqueue(m, queue); 3662 if (queue == PQ_ACTIVE) 3663 /* Initialize act_count. */ 3664 vm_page_activate(m); 3665 } 3666 } 3667 return (unwired); 3668 } 3669 3670 /* 3671 * 3672 * vm_page_unwire_noq: 3673 * 3674 * Unwire a page without (re-)inserting it into a page queue. It is up 3675 * to the caller to enqueue, requeue, or free the page as appropriate. 3676 * In most cases, vm_page_unwire() should be used instead. 3677 */ 3678 bool 3679 vm_page_unwire_noq(vm_page_t m) 3680 { 3681 3682 if ((m->oflags & VPO_UNMANAGED) == 0) 3683 vm_page_assert_locked(m); 3684 if ((m->flags & PG_FICTITIOUS) != 0) { 3685 KASSERT(m->wire_count == 1, 3686 ("vm_page_unwire: fictitious page %p's wire count isn't one", m)); 3687 return (false); 3688 } 3689 if (!vm_page_wired(m)) 3690 panic("vm_page_unwire: page %p's wire count is zero", m); 3691 m->wire_count--; 3692 if (m->wire_count == 0) { 3693 vm_wire_sub(1); 3694 return (true); 3695 } else 3696 return (false); 3697 } 3698 3699 /* 3700 * Move the specified page to the tail of the inactive queue, or requeue 3701 * the page if it is already in the inactive queue. 3702 * 3703 * The page must be locked. 3704 */ 3705 void 3706 vm_page_deactivate(vm_page_t m) 3707 { 3708 3709 vm_page_assert_locked(m); 3710 3711 if (vm_page_wired(m) || (m->oflags & VPO_UNMANAGED) != 0) 3712 return; 3713 3714 if (!vm_page_inactive(m)) { 3715 vm_page_dequeue(m); 3716 vm_page_enqueue(m, PQ_INACTIVE); 3717 } else 3718 vm_page_requeue(m); 3719 } 3720 3721 /* 3722 * Move the specified page close to the head of the inactive queue, 3723 * bypassing LRU. A marker page is used to maintain FIFO ordering. 3724 * As with regular enqueues, we use a per-CPU batch queue to reduce 3725 * contention on the page queue lock. 3726 * 3727 * The page must be locked. 3728 */ 3729 void 3730 vm_page_deactivate_noreuse(vm_page_t m) 3731 { 3732 3733 vm_page_assert_locked(m); 3734 3735 if (vm_page_wired(m) || (m->oflags & VPO_UNMANAGED) != 0) 3736 return; 3737 3738 if (!vm_page_inactive(m)) { 3739 vm_page_dequeue(m); 3740 m->queue = PQ_INACTIVE; 3741 } 3742 if ((m->aflags & PGA_REQUEUE_HEAD) == 0) 3743 vm_page_aflag_set(m, PGA_REQUEUE_HEAD); 3744 vm_pqbatch_submit_page(m, PQ_INACTIVE); 3745 } 3746 3747 /* 3748 * vm_page_launder 3749 * 3750 * Put a page in the laundry, or requeue it if it is already there. 3751 */ 3752 void 3753 vm_page_launder(vm_page_t m) 3754 { 3755 3756 vm_page_assert_locked(m); 3757 if (vm_page_wired(m) || (m->oflags & VPO_UNMANAGED) != 0) 3758 return; 3759 3760 if (vm_page_in_laundry(m)) 3761 vm_page_requeue(m); 3762 else { 3763 vm_page_dequeue(m); 3764 vm_page_enqueue(m, PQ_LAUNDRY); 3765 } 3766 } 3767 3768 /* 3769 * vm_page_unswappable 3770 * 3771 * Put a page in the PQ_UNSWAPPABLE holding queue. 3772 */ 3773 void 3774 vm_page_unswappable(vm_page_t m) 3775 { 3776 3777 vm_page_assert_locked(m); 3778 KASSERT(!vm_page_wired(m) && (m->oflags & VPO_UNMANAGED) == 0, 3779 ("page %p already unswappable", m)); 3780 3781 vm_page_dequeue(m); 3782 vm_page_enqueue(m, PQ_UNSWAPPABLE); 3783 } 3784 3785 /* 3786 * Attempt to free the page. If it cannot be freed, do nothing. Returns true 3787 * if the page is freed and false otherwise. 3788 * 3789 * The page must be managed. The page and its containing object must be 3790 * locked. 3791 */ 3792 bool 3793 vm_page_try_to_free(vm_page_t m) 3794 { 3795 3796 vm_page_assert_locked(m); 3797 VM_OBJECT_ASSERT_WLOCKED(m->object); 3798 KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("page %p is unmanaged", m)); 3799 if (m->dirty != 0 || vm_page_held(m) || vm_page_busied(m)) 3800 return (false); 3801 if (m->object->ref_count != 0) { 3802 pmap_remove_all(m); 3803 if (m->dirty != 0) 3804 return (false); 3805 } 3806 vm_page_free(m); 3807 return (true); 3808 } 3809 3810 /* 3811 * vm_page_advise 3812 * 3813 * Apply the specified advice to the given page. 3814 * 3815 * The object and page must be locked. 3816 */ 3817 void 3818 vm_page_advise(vm_page_t m, int advice) 3819 { 3820 3821 vm_page_assert_locked(m); 3822 VM_OBJECT_ASSERT_WLOCKED(m->object); 3823 if (advice == MADV_FREE) 3824 /* 3825 * Mark the page clean. This will allow the page to be freed 3826 * without first paging it out. MADV_FREE pages are often 3827 * quickly reused by malloc(3), so we do not do anything that 3828 * would result in a page fault on a later access. 3829 */ 3830 vm_page_undirty(m); 3831 else if (advice != MADV_DONTNEED) { 3832 if (advice == MADV_WILLNEED) 3833 vm_page_activate(m); 3834 return; 3835 } 3836 3837 /* 3838 * Clear any references to the page. Otherwise, the page daemon will 3839 * immediately reactivate the page. 3840 */ 3841 vm_page_aflag_clear(m, PGA_REFERENCED); 3842 3843 if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m)) 3844 vm_page_dirty(m); 3845 3846 /* 3847 * Place clean pages near the head of the inactive queue rather than 3848 * the tail, thus defeating the queue's LRU operation and ensuring that 3849 * the page will be reused quickly. Dirty pages not already in the 3850 * laundry are moved there. 3851 */ 3852 if (m->dirty == 0) 3853 vm_page_deactivate_noreuse(m); 3854 else if (!vm_page_in_laundry(m)) 3855 vm_page_launder(m); 3856 } 3857 3858 /* 3859 * Grab a page, waiting until we are waken up due to the page 3860 * changing state. We keep on waiting, if the page continues 3861 * to be in the object. If the page doesn't exist, first allocate it 3862 * and then conditionally zero it. 3863 * 3864 * This routine may sleep. 3865 * 3866 * The object must be locked on entry. The lock will, however, be released 3867 * and reacquired if the routine sleeps. 3868 */ 3869 vm_page_t 3870 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) 3871 { 3872 vm_page_t m; 3873 int sleep; 3874 int pflags; 3875 3876 VM_OBJECT_ASSERT_WLOCKED(object); 3877 KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || 3878 (allocflags & VM_ALLOC_IGN_SBUSY) != 0, 3879 ("vm_page_grab: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch")); 3880 pflags = allocflags & 3881 ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL); 3882 if ((allocflags & VM_ALLOC_NOWAIT) == 0) 3883 pflags |= VM_ALLOC_WAITFAIL; 3884 retrylookup: 3885 if ((m = vm_page_lookup(object, pindex)) != NULL) { 3886 sleep = (allocflags & VM_ALLOC_IGN_SBUSY) != 0 ? 3887 vm_page_xbusied(m) : vm_page_busied(m); 3888 if (sleep) { 3889 if ((allocflags & VM_ALLOC_NOWAIT) != 0) 3890 return (NULL); 3891 /* 3892 * Reference the page before unlocking and 3893 * sleeping so that the page daemon is less 3894 * likely to reclaim it. 3895 */ 3896 vm_page_aflag_set(m, PGA_REFERENCED); 3897 vm_page_lock(m); 3898 VM_OBJECT_WUNLOCK(object); 3899 vm_page_busy_sleep(m, "pgrbwt", (allocflags & 3900 VM_ALLOC_IGN_SBUSY) != 0); 3901 VM_OBJECT_WLOCK(object); 3902 goto retrylookup; 3903 } else { 3904 if ((allocflags & VM_ALLOC_WIRED) != 0) { 3905 vm_page_lock(m); 3906 vm_page_wire(m); 3907 vm_page_unlock(m); 3908 } 3909 if ((allocflags & 3910 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0) 3911 vm_page_xbusy(m); 3912 if ((allocflags & VM_ALLOC_SBUSY) != 0) 3913 vm_page_sbusy(m); 3914 return (m); 3915 } 3916 } 3917 m = vm_page_alloc(object, pindex, pflags); 3918 if (m == NULL) { 3919 if ((allocflags & VM_ALLOC_NOWAIT) != 0) 3920 return (NULL); 3921 goto retrylookup; 3922 } 3923 if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0) 3924 pmap_zero_page(m); 3925 return (m); 3926 } 3927 3928 /* 3929 * Return the specified range of pages from the given object. For each 3930 * page offset within the range, if a page already exists within the object 3931 * at that offset and it is busy, then wait for it to change state. If, 3932 * instead, the page doesn't exist, then allocate it. 3933 * 3934 * The caller must always specify an allocation class. 3935 * 3936 * allocation classes: 3937 * VM_ALLOC_NORMAL normal process request 3938 * VM_ALLOC_SYSTEM system *really* needs the pages 3939 * 3940 * The caller must always specify that the pages are to be busied and/or 3941 * wired. 3942 * 3943 * optional allocation flags: 3944 * VM_ALLOC_IGN_SBUSY do not sleep on soft busy pages 3945 * VM_ALLOC_NOBUSY do not exclusive busy the page 3946 * VM_ALLOC_NOWAIT do not sleep 3947 * VM_ALLOC_SBUSY set page to sbusy state 3948 * VM_ALLOC_WIRED wire the pages 3949 * VM_ALLOC_ZERO zero and validate any invalid pages 3950 * 3951 * If VM_ALLOC_NOWAIT is not specified, this routine may sleep. Otherwise, it 3952 * may return a partial prefix of the requested range. 3953 */ 3954 int 3955 vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags, 3956 vm_page_t *ma, int count) 3957 { 3958 vm_page_t m, mpred; 3959 int pflags; 3960 int i; 3961 bool sleep; 3962 3963 VM_OBJECT_ASSERT_WLOCKED(object); 3964 KASSERT(((u_int)allocflags >> VM_ALLOC_COUNT_SHIFT) == 0, 3965 ("vm_page_grap_pages: VM_ALLOC_COUNT() is not allowed")); 3966 KASSERT((allocflags & VM_ALLOC_NOBUSY) == 0 || 3967 (allocflags & VM_ALLOC_WIRED) != 0, 3968 ("vm_page_grab_pages: the pages must be busied or wired")); 3969 KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || 3970 (allocflags & VM_ALLOC_IGN_SBUSY) != 0, 3971 ("vm_page_grab_pages: VM_ALLOC_SBUSY/IGN_SBUSY mismatch")); 3972 if (count == 0) 3973 return (0); 3974 pflags = allocflags & ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | 3975 VM_ALLOC_WAITFAIL | VM_ALLOC_IGN_SBUSY); 3976 if ((allocflags & VM_ALLOC_NOWAIT) == 0) 3977 pflags |= VM_ALLOC_WAITFAIL; 3978 i = 0; 3979 retrylookup: 3980 m = vm_radix_lookup_le(&object->rtree, pindex + i); 3981 if (m == NULL || m->pindex != pindex + i) { 3982 mpred = m; 3983 m = NULL; 3984 } else 3985 mpred = TAILQ_PREV(m, pglist, listq); 3986 for (; i < count; i++) { 3987 if (m != NULL) { 3988 sleep = (allocflags & VM_ALLOC_IGN_SBUSY) != 0 ? 3989 vm_page_xbusied(m) : vm_page_busied(m); 3990 if (sleep) { 3991 if ((allocflags & VM_ALLOC_NOWAIT) != 0) 3992 break; 3993 /* 3994 * Reference the page before unlocking and 3995 * sleeping so that the page daemon is less 3996 * likely to reclaim it. 3997 */ 3998 vm_page_aflag_set(m, PGA_REFERENCED); 3999 vm_page_lock(m); 4000 VM_OBJECT_WUNLOCK(object); 4001 vm_page_busy_sleep(m, "grbmaw", (allocflags & 4002 VM_ALLOC_IGN_SBUSY) != 0); 4003 VM_OBJECT_WLOCK(object); 4004 goto retrylookup; 4005 } 4006 if ((allocflags & VM_ALLOC_WIRED) != 0) { 4007 vm_page_lock(m); 4008 vm_page_wire(m); 4009 vm_page_unlock(m); 4010 } 4011 if ((allocflags & (VM_ALLOC_NOBUSY | 4012 VM_ALLOC_SBUSY)) == 0) 4013 vm_page_xbusy(m); 4014 if ((allocflags & VM_ALLOC_SBUSY) != 0) 4015 vm_page_sbusy(m); 4016 } else { 4017 m = vm_page_alloc_after(object, pindex + i, 4018 pflags | VM_ALLOC_COUNT(count - i), mpred); 4019 if (m == NULL) { 4020 if ((allocflags & VM_ALLOC_NOWAIT) != 0) 4021 break; 4022 goto retrylookup; 4023 } 4024 } 4025 if (m->valid == 0 && (allocflags & VM_ALLOC_ZERO) != 0) { 4026 if ((m->flags & PG_ZERO) == 0) 4027 pmap_zero_page(m); 4028 m->valid = VM_PAGE_BITS_ALL; 4029 } 4030 ma[i] = mpred = m; 4031 m = vm_page_next(m); 4032 } 4033 return (i); 4034 } 4035 4036 /* 4037 * Mapping function for valid or dirty bits in a page. 4038 * 4039 * Inputs are required to range within a page. 4040 */ 4041 vm_page_bits_t 4042 vm_page_bits(int base, int size) 4043 { 4044 int first_bit; 4045 int last_bit; 4046 4047 KASSERT( 4048 base + size <= PAGE_SIZE, 4049 ("vm_page_bits: illegal base/size %d/%d", base, size) 4050 ); 4051 4052 if (size == 0) /* handle degenerate case */ 4053 return (0); 4054 4055 first_bit = base >> DEV_BSHIFT; 4056 last_bit = (base + size - 1) >> DEV_BSHIFT; 4057 4058 return (((vm_page_bits_t)2 << last_bit) - 4059 ((vm_page_bits_t)1 << first_bit)); 4060 } 4061 4062 /* 4063 * vm_page_set_valid_range: 4064 * 4065 * Sets portions of a page valid. The arguments are expected 4066 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 4067 * of any partial chunks touched by the range. The invalid portion of 4068 * such chunks will be zeroed. 4069 * 4070 * (base + size) must be less then or equal to PAGE_SIZE. 4071 */ 4072 void 4073 vm_page_set_valid_range(vm_page_t m, int base, int size) 4074 { 4075 int endoff, frag; 4076 4077 VM_OBJECT_ASSERT_WLOCKED(m->object); 4078 if (size == 0) /* handle degenerate case */ 4079 return; 4080 4081 /* 4082 * If the base is not DEV_BSIZE aligned and the valid 4083 * bit is clear, we have to zero out a portion of the 4084 * first block. 4085 */ 4086 if ((frag = rounddown2(base, DEV_BSIZE)) != base && 4087 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0) 4088 pmap_zero_page_area(m, frag, base - frag); 4089 4090 /* 4091 * If the ending offset is not DEV_BSIZE aligned and the 4092 * valid bit is clear, we have to zero out a portion of 4093 * the last block. 4094 */ 4095 endoff = base + size; 4096 if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff && 4097 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0) 4098 pmap_zero_page_area(m, endoff, 4099 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 4100 4101 /* 4102 * Assert that no previously invalid block that is now being validated 4103 * is already dirty. 4104 */ 4105 KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0, 4106 ("vm_page_set_valid_range: page %p is dirty", m)); 4107 4108 /* 4109 * Set valid bits inclusive of any overlap. 4110 */ 4111 m->valid |= vm_page_bits(base, size); 4112 } 4113 4114 /* 4115 * Clear the given bits from the specified page's dirty field. 4116 */ 4117 static __inline void 4118 vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits) 4119 { 4120 uintptr_t addr; 4121 #if PAGE_SIZE < 16384 4122 int shift; 4123 #endif 4124 4125 /* 4126 * If the object is locked and the page is neither exclusive busy nor 4127 * write mapped, then the page's dirty field cannot possibly be 4128 * set by a concurrent pmap operation. 4129 */ 4130 VM_OBJECT_ASSERT_WLOCKED(m->object); 4131 if (!vm_page_xbusied(m) && !pmap_page_is_write_mapped(m)) 4132 m->dirty &= ~pagebits; 4133 else { 4134 /* 4135 * The pmap layer can call vm_page_dirty() without 4136 * holding a distinguished lock. The combination of 4137 * the object's lock and an atomic operation suffice 4138 * to guarantee consistency of the page dirty field. 4139 * 4140 * For PAGE_SIZE == 32768 case, compiler already 4141 * properly aligns the dirty field, so no forcible 4142 * alignment is needed. Only require existence of 4143 * atomic_clear_64 when page size is 32768. 4144 */ 4145 addr = (uintptr_t)&m->dirty; 4146 #if PAGE_SIZE == 32768 4147 atomic_clear_64((uint64_t *)addr, pagebits); 4148 #elif PAGE_SIZE == 16384 4149 atomic_clear_32((uint32_t *)addr, pagebits); 4150 #else /* PAGE_SIZE <= 8192 */ 4151 /* 4152 * Use a trick to perform a 32-bit atomic on the 4153 * containing aligned word, to not depend on the existence 4154 * of atomic_clear_{8, 16}. 4155 */ 4156 shift = addr & (sizeof(uint32_t) - 1); 4157 #if BYTE_ORDER == BIG_ENDIAN 4158 shift = (sizeof(uint32_t) - sizeof(m->dirty) - shift) * NBBY; 4159 #else 4160 shift *= NBBY; 4161 #endif 4162 addr &= ~(sizeof(uint32_t) - 1); 4163 atomic_clear_32((uint32_t *)addr, pagebits << shift); 4164 #endif /* PAGE_SIZE */ 4165 } 4166 } 4167 4168 /* 4169 * vm_page_set_validclean: 4170 * 4171 * Sets portions of a page valid and clean. The arguments are expected 4172 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 4173 * of any partial chunks touched by the range. The invalid portion of 4174 * such chunks will be zero'd. 4175 * 4176 * (base + size) must be less then or equal to PAGE_SIZE. 4177 */ 4178 void 4179 vm_page_set_validclean(vm_page_t m, int base, int size) 4180 { 4181 vm_page_bits_t oldvalid, pagebits; 4182 int endoff, frag; 4183 4184 VM_OBJECT_ASSERT_WLOCKED(m->object); 4185 if (size == 0) /* handle degenerate case */ 4186 return; 4187 4188 /* 4189 * If the base is not DEV_BSIZE aligned and the valid 4190 * bit is clear, we have to zero out a portion of the 4191 * first block. 4192 */ 4193 if ((frag = rounddown2(base, DEV_BSIZE)) != base && 4194 (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0) 4195 pmap_zero_page_area(m, frag, base - frag); 4196 4197 /* 4198 * If the ending offset is not DEV_BSIZE aligned and the 4199 * valid bit is clear, we have to zero out a portion of 4200 * the last block. 4201 */ 4202 endoff = base + size; 4203 if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff && 4204 (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0) 4205 pmap_zero_page_area(m, endoff, 4206 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 4207 4208 /* 4209 * Set valid, clear dirty bits. If validating the entire 4210 * page we can safely clear the pmap modify bit. We also 4211 * use this opportunity to clear the VPO_NOSYNC flag. If a process 4212 * takes a write fault on a MAP_NOSYNC memory area the flag will 4213 * be set again. 4214 * 4215 * We set valid bits inclusive of any overlap, but we can only 4216 * clear dirty bits for DEV_BSIZE chunks that are fully within 4217 * the range. 4218 */ 4219 oldvalid = m->valid; 4220 pagebits = vm_page_bits(base, size); 4221 m->valid |= pagebits; 4222 #if 0 /* NOT YET */ 4223 if ((frag = base & (DEV_BSIZE - 1)) != 0) { 4224 frag = DEV_BSIZE - frag; 4225 base += frag; 4226 size -= frag; 4227 if (size < 0) 4228 size = 0; 4229 } 4230 pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1)); 4231 #endif 4232 if (base == 0 && size == PAGE_SIZE) { 4233 /* 4234 * The page can only be modified within the pmap if it is 4235 * mapped, and it can only be mapped if it was previously 4236 * fully valid. 4237 */ 4238 if (oldvalid == VM_PAGE_BITS_ALL) 4239 /* 4240 * Perform the pmap_clear_modify() first. Otherwise, 4241 * a concurrent pmap operation, such as 4242 * pmap_protect(), could clear a modification in the 4243 * pmap and set the dirty field on the page before 4244 * pmap_clear_modify() had begun and after the dirty 4245 * field was cleared here. 4246 */ 4247 pmap_clear_modify(m); 4248 m->dirty = 0; 4249 m->oflags &= ~VPO_NOSYNC; 4250 } else if (oldvalid != VM_PAGE_BITS_ALL) 4251 m->dirty &= ~pagebits; 4252 else 4253 vm_page_clear_dirty_mask(m, pagebits); 4254 } 4255 4256 void 4257 vm_page_clear_dirty(vm_page_t m, int base, int size) 4258 { 4259 4260 vm_page_clear_dirty_mask(m, vm_page_bits(base, size)); 4261 } 4262 4263 /* 4264 * vm_page_set_invalid: 4265 * 4266 * Invalidates DEV_BSIZE'd chunks within a page. Both the 4267 * valid and dirty bits for the effected areas are cleared. 4268 */ 4269 void 4270 vm_page_set_invalid(vm_page_t m, int base, int size) 4271 { 4272 vm_page_bits_t bits; 4273 vm_object_t object; 4274 4275 object = m->object; 4276 VM_OBJECT_ASSERT_WLOCKED(object); 4277 if (object->type == OBJT_VNODE && base == 0 && IDX_TO_OFF(m->pindex) + 4278 size >= object->un_pager.vnp.vnp_size) 4279 bits = VM_PAGE_BITS_ALL; 4280 else 4281 bits = vm_page_bits(base, size); 4282 if (object->ref_count != 0 && m->valid == VM_PAGE_BITS_ALL && 4283 bits != 0) 4284 pmap_remove_all(m); 4285 KASSERT((bits == 0 && m->valid == VM_PAGE_BITS_ALL) || 4286 !pmap_page_is_mapped(m), 4287 ("vm_page_set_invalid: page %p is mapped", m)); 4288 m->valid &= ~bits; 4289 m->dirty &= ~bits; 4290 } 4291 4292 /* 4293 * vm_page_zero_invalid() 4294 * 4295 * The kernel assumes that the invalid portions of a page contain 4296 * garbage, but such pages can be mapped into memory by user code. 4297 * When this occurs, we must zero out the non-valid portions of the 4298 * page so user code sees what it expects. 4299 * 4300 * Pages are most often semi-valid when the end of a file is mapped 4301 * into memory and the file's size is not page aligned. 4302 */ 4303 void 4304 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) 4305 { 4306 int b; 4307 int i; 4308 4309 VM_OBJECT_ASSERT_WLOCKED(m->object); 4310 /* 4311 * Scan the valid bits looking for invalid sections that 4312 * must be zeroed. Invalid sub-DEV_BSIZE'd areas ( where the 4313 * valid bit may be set ) have already been zeroed by 4314 * vm_page_set_validclean(). 4315 */ 4316 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { 4317 if (i == (PAGE_SIZE / DEV_BSIZE) || 4318 (m->valid & ((vm_page_bits_t)1 << i))) { 4319 if (i > b) { 4320 pmap_zero_page_area(m, 4321 b << DEV_BSHIFT, (i - b) << DEV_BSHIFT); 4322 } 4323 b = i + 1; 4324 } 4325 } 4326 4327 /* 4328 * setvalid is TRUE when we can safely set the zero'd areas 4329 * as being valid. We can do this if there are no cache consistancy 4330 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. 4331 */ 4332 if (setvalid) 4333 m->valid = VM_PAGE_BITS_ALL; 4334 } 4335 4336 /* 4337 * vm_page_is_valid: 4338 * 4339 * Is (partial) page valid? Note that the case where size == 0 4340 * will return FALSE in the degenerate case where the page is 4341 * entirely invalid, and TRUE otherwise. 4342 */ 4343 int 4344 vm_page_is_valid(vm_page_t m, int base, int size) 4345 { 4346 vm_page_bits_t bits; 4347 4348 VM_OBJECT_ASSERT_LOCKED(m->object); 4349 bits = vm_page_bits(base, size); 4350 return (m->valid != 0 && (m->valid & bits) == bits); 4351 } 4352 4353 /* 4354 * Returns true if all of the specified predicates are true for the entire 4355 * (super)page and false otherwise. 4356 */ 4357 bool 4358 vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m) 4359 { 4360 vm_object_t object; 4361 int i, npages; 4362 4363 object = m->object; 4364 if (skip_m != NULL && skip_m->object != object) 4365 return (false); 4366 VM_OBJECT_ASSERT_LOCKED(object); 4367 npages = atop(pagesizes[m->psind]); 4368 4369 /* 4370 * The physically contiguous pages that make up a superpage, i.e., a 4371 * page with a page size index ("psind") greater than zero, will 4372 * occupy adjacent entries in vm_page_array[]. 4373 */ 4374 for (i = 0; i < npages; i++) { 4375 /* Always test object consistency, including "skip_m". */ 4376 if (m[i].object != object) 4377 return (false); 4378 if (&m[i] == skip_m) 4379 continue; 4380 if ((flags & PS_NONE_BUSY) != 0 && vm_page_busied(&m[i])) 4381 return (false); 4382 if ((flags & PS_ALL_DIRTY) != 0) { 4383 /* 4384 * Calling vm_page_test_dirty() or pmap_is_modified() 4385 * might stop this case from spuriously returning 4386 * "false". However, that would require a write lock 4387 * on the object containing "m[i]". 4388 */ 4389 if (m[i].dirty != VM_PAGE_BITS_ALL) 4390 return (false); 4391 } 4392 if ((flags & PS_ALL_VALID) != 0 && 4393 m[i].valid != VM_PAGE_BITS_ALL) 4394 return (false); 4395 } 4396 return (true); 4397 } 4398 4399 /* 4400 * Set the page's dirty bits if the page is modified. 4401 */ 4402 void 4403 vm_page_test_dirty(vm_page_t m) 4404 { 4405 4406 VM_OBJECT_ASSERT_WLOCKED(m->object); 4407 if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m)) 4408 vm_page_dirty(m); 4409 } 4410 4411 void 4412 vm_page_lock_KBI(vm_page_t m, const char *file, int line) 4413 { 4414 4415 mtx_lock_flags_(vm_page_lockptr(m), 0, file, line); 4416 } 4417 4418 void 4419 vm_page_unlock_KBI(vm_page_t m, const char *file, int line) 4420 { 4421 4422 mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line); 4423 } 4424 4425 int 4426 vm_page_trylock_KBI(vm_page_t m, const char *file, int line) 4427 { 4428 4429 return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line)); 4430 } 4431 4432 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 4433 void 4434 vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line) 4435 { 4436 4437 vm_page_lock_assert_KBI(m, MA_OWNED, file, line); 4438 } 4439 4440 void 4441 vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line) 4442 { 4443 4444 mtx_assert_(vm_page_lockptr(m), a, file, line); 4445 } 4446 #endif 4447 4448 #ifdef INVARIANTS 4449 void 4450 vm_page_object_lock_assert(vm_page_t m) 4451 { 4452 4453 /* 4454 * Certain of the page's fields may only be modified by the 4455 * holder of the containing object's lock or the exclusive busy. 4456 * holder. Unfortunately, the holder of the write busy is 4457 * not recorded, and thus cannot be checked here. 4458 */ 4459 if (m->object != NULL && !vm_page_xbusied(m)) 4460 VM_OBJECT_ASSERT_WLOCKED(m->object); 4461 } 4462 4463 void 4464 vm_page_assert_pga_writeable(vm_page_t m, uint8_t bits) 4465 { 4466 4467 if ((bits & PGA_WRITEABLE) == 0) 4468 return; 4469 4470 /* 4471 * The PGA_WRITEABLE flag can only be set if the page is 4472 * managed, is exclusively busied or the object is locked. 4473 * Currently, this flag is only set by pmap_enter(). 4474 */ 4475 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4476 ("PGA_WRITEABLE on unmanaged page")); 4477 if (!vm_page_xbusied(m)) 4478 VM_OBJECT_ASSERT_LOCKED(m->object); 4479 } 4480 #endif 4481 4482 #include "opt_ddb.h" 4483 #ifdef DDB 4484 #include <sys/kernel.h> 4485 4486 #include <ddb/ddb.h> 4487 4488 DB_SHOW_COMMAND(page, vm_page_print_page_info) 4489 { 4490 4491 db_printf("vm_cnt.v_free_count: %d\n", vm_free_count()); 4492 db_printf("vm_cnt.v_inactive_count: %d\n", vm_inactive_count()); 4493 db_printf("vm_cnt.v_active_count: %d\n", vm_active_count()); 4494 db_printf("vm_cnt.v_laundry_count: %d\n", vm_laundry_count()); 4495 db_printf("vm_cnt.v_wire_count: %d\n", vm_wire_count()); 4496 db_printf("vm_cnt.v_free_reserved: %d\n", vm_cnt.v_free_reserved); 4497 db_printf("vm_cnt.v_free_min: %d\n", vm_cnt.v_free_min); 4498 db_printf("vm_cnt.v_free_target: %d\n", vm_cnt.v_free_target); 4499 db_printf("vm_cnt.v_inactive_target: %d\n", vm_cnt.v_inactive_target); 4500 } 4501 4502 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) 4503 { 4504 int dom; 4505 4506 db_printf("pq_free %d\n", vm_free_count()); 4507 for (dom = 0; dom < vm_ndomains; dom++) { 4508 db_printf( 4509 "dom %d page_cnt %d free %d pq_act %d pq_inact %d pq_laund %d pq_unsw %d\n", 4510 dom, 4511 vm_dom[dom].vmd_page_count, 4512 vm_dom[dom].vmd_free_count, 4513 vm_dom[dom].vmd_pagequeues[PQ_ACTIVE].pq_cnt, 4514 vm_dom[dom].vmd_pagequeues[PQ_INACTIVE].pq_cnt, 4515 vm_dom[dom].vmd_pagequeues[PQ_LAUNDRY].pq_cnt, 4516 vm_dom[dom].vmd_pagequeues[PQ_UNSWAPPABLE].pq_cnt); 4517 } 4518 } 4519 4520 DB_SHOW_COMMAND(pginfo, vm_page_print_pginfo) 4521 { 4522 vm_page_t m; 4523 boolean_t phys, virt; 4524 4525 if (!have_addr) { 4526 db_printf("show pginfo addr\n"); 4527 return; 4528 } 4529 4530 phys = strchr(modif, 'p') != NULL; 4531 virt = strchr(modif, 'v') != NULL; 4532 if (virt) 4533 m = PHYS_TO_VM_PAGE(pmap_kextract(addr)); 4534 else if (phys) 4535 m = PHYS_TO_VM_PAGE(addr); 4536 else 4537 m = (vm_page_t)addr; 4538 db_printf( 4539 "page %p obj %p pidx 0x%jx phys 0x%jx q %d hold %d wire %d\n" 4540 " af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n", 4541 m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr, 4542 m->queue, m->hold_count, m->wire_count, m->aflags, m->oflags, 4543 m->flags, m->act_count, m->busy_lock, m->valid, m->dirty); 4544 } 4545 #endif /* DDB */ 4546