1 /*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1998 Matthew Dillon. All Rights Reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * The Mach Operating System project at Carnegie-Mellon University. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 34 */ 35 36 /*- 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 */ 62 63 /* 64 * GENERAL RULES ON VM_PAGE MANIPULATION 65 * 66 * - A page queue lock is required when adding or removing a page from a 67 * page queue regardless of other locks or the busy state of a page. 68 * 69 * * In general, no thread besides the page daemon can acquire or 70 * hold more than one page queue lock at a time. 71 * 72 * * The page daemon can acquire and hold any pair of page queue 73 * locks in any order. 74 * 75 * - The object lock is required when inserting or removing 76 * pages from an object (vm_page_insert() or vm_page_remove()). 77 * 78 */ 79 80 /* 81 * Resident memory management module. 82 */ 83 84 #include <sys/cdefs.h> 85 __FBSDID("$FreeBSD$"); 86 87 #include "opt_vm.h" 88 89 #include <sys/param.h> 90 #include <sys/systm.h> 91 #include <sys/lock.h> 92 #include <sys/kernel.h> 93 #include <sys/limits.h> 94 #include <sys/linker.h> 95 #include <sys/malloc.h> 96 #include <sys/mman.h> 97 #include <sys/msgbuf.h> 98 #include <sys/mutex.h> 99 #include <sys/proc.h> 100 #include <sys/rwlock.h> 101 #include <sys/sbuf.h> 102 #include <sys/smp.h> 103 #include <sys/sysctl.h> 104 #include <sys/vmmeter.h> 105 #include <sys/vnode.h> 106 107 #include <vm/vm.h> 108 #include <vm/pmap.h> 109 #include <vm/vm_param.h> 110 #include <vm/vm_kern.h> 111 #include <vm/vm_object.h> 112 #include <vm/vm_page.h> 113 #include <vm/vm_pageout.h> 114 #include <vm/vm_pager.h> 115 #include <vm/vm_phys.h> 116 #include <vm/vm_radix.h> 117 #include <vm/vm_reserv.h> 118 #include <vm/vm_extern.h> 119 #include <vm/uma.h> 120 #include <vm/uma_int.h> 121 122 #include <machine/md_var.h> 123 124 /* 125 * Associated with page of user-allocatable memory is a 126 * page structure. 127 */ 128 129 struct vm_domain vm_dom[MAXMEMDOM]; 130 struct mtx_padalign vm_page_queue_free_mtx; 131 132 struct mtx_padalign pa_lock[PA_LOCK_COUNT]; 133 134 vm_page_t vm_page_array; 135 long vm_page_array_size; 136 long first_page; 137 138 static int boot_pages = UMA_BOOT_PAGES; 139 SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 140 &boot_pages, 0, 141 "number of pages allocated for bootstrapping the VM system"); 142 143 static int pa_tryrelock_restart; 144 SYSCTL_INT(_vm, OID_AUTO, tryrelock_restart, CTLFLAG_RD, 145 &pa_tryrelock_restart, 0, "Number of tryrelock restarts"); 146 147 static TAILQ_HEAD(, vm_page) blacklist_head; 148 static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS); 149 SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD | 150 CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages"); 151 152 /* Is the page daemon waiting for free pages? */ 153 static int vm_pageout_pages_needed; 154 155 static uma_zone_t fakepg_zone; 156 157 static void vm_page_alloc_check(vm_page_t m); 158 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits); 159 static void vm_page_enqueue(uint8_t queue, vm_page_t m); 160 static void vm_page_free_wakeup(void); 161 static void vm_page_init_fakepg(void *dummy); 162 static int vm_page_insert_after(vm_page_t m, vm_object_t object, 163 vm_pindex_t pindex, vm_page_t mpred); 164 static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object, 165 vm_page_t mpred); 166 static int vm_page_reclaim_run(int req_class, u_long npages, vm_page_t m_run, 167 vm_paddr_t high); 168 169 SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init_fakepg, NULL); 170 171 static void 172 vm_page_init_fakepg(void *dummy) 173 { 174 175 fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL, 176 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM); 177 } 178 179 /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */ 180 #if PAGE_SIZE == 32768 181 #ifdef CTASSERT 182 CTASSERT(sizeof(u_long) >= 8); 183 #endif 184 #endif 185 186 /* 187 * Try to acquire a physical address lock while a pmap is locked. If we 188 * fail to trylock we unlock and lock the pmap directly and cache the 189 * locked pa in *locked. The caller should then restart their loop in case 190 * the virtual to physical mapping has changed. 191 */ 192 int 193 vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked) 194 { 195 vm_paddr_t lockpa; 196 197 lockpa = *locked; 198 *locked = pa; 199 if (lockpa) { 200 PA_LOCK_ASSERT(lockpa, MA_OWNED); 201 if (PA_LOCKPTR(pa) == PA_LOCKPTR(lockpa)) 202 return (0); 203 PA_UNLOCK(lockpa); 204 } 205 if (PA_TRYLOCK(pa)) 206 return (0); 207 PMAP_UNLOCK(pmap); 208 atomic_add_int(&pa_tryrelock_restart, 1); 209 PA_LOCK(pa); 210 PMAP_LOCK(pmap); 211 return (EAGAIN); 212 } 213 214 /* 215 * vm_set_page_size: 216 * 217 * Sets the page size, perhaps based upon the memory 218 * size. Must be called before any use of page-size 219 * dependent functions. 220 */ 221 void 222 vm_set_page_size(void) 223 { 224 if (vm_cnt.v_page_size == 0) 225 vm_cnt.v_page_size = PAGE_SIZE; 226 if (((vm_cnt.v_page_size - 1) & vm_cnt.v_page_size) != 0) 227 panic("vm_set_page_size: page size not a power of two"); 228 } 229 230 /* 231 * vm_page_blacklist_next: 232 * 233 * Find the next entry in the provided string of blacklist 234 * addresses. Entries are separated by space, comma, or newline. 235 * If an invalid integer is encountered then the rest of the 236 * string is skipped. Updates the list pointer to the next 237 * character, or NULL if the string is exhausted or invalid. 238 */ 239 static vm_paddr_t 240 vm_page_blacklist_next(char **list, char *end) 241 { 242 vm_paddr_t bad; 243 char *cp, *pos; 244 245 if (list == NULL || *list == NULL) 246 return (0); 247 if (**list =='\0') { 248 *list = NULL; 249 return (0); 250 } 251 252 /* 253 * If there's no end pointer then the buffer is coming from 254 * the kenv and we know it's null-terminated. 255 */ 256 if (end == NULL) 257 end = *list + strlen(*list); 258 259 /* Ensure that strtoq() won't walk off the end */ 260 if (*end != '\0') { 261 if (*end == '\n' || *end == ' ' || *end == ',') 262 *end = '\0'; 263 else { 264 printf("Blacklist not terminated, skipping\n"); 265 *list = NULL; 266 return (0); 267 } 268 } 269 270 for (pos = *list; *pos != '\0'; pos = cp) { 271 bad = strtoq(pos, &cp, 0); 272 if (*cp == '\0' || *cp == ' ' || *cp == ',' || *cp == '\n') { 273 if (bad == 0) { 274 if (++cp < end) 275 continue; 276 else 277 break; 278 } 279 } else 280 break; 281 if (*cp == '\0' || ++cp >= end) 282 *list = NULL; 283 else 284 *list = cp; 285 return (trunc_page(bad)); 286 } 287 printf("Garbage in RAM blacklist, skipping\n"); 288 *list = NULL; 289 return (0); 290 } 291 292 /* 293 * vm_page_blacklist_check: 294 * 295 * Iterate through the provided string of blacklist addresses, pulling 296 * each entry out of the physical allocator free list and putting it 297 * onto a list for reporting via the vm.page_blacklist sysctl. 298 */ 299 static void 300 vm_page_blacklist_check(char *list, char *end) 301 { 302 vm_paddr_t pa; 303 vm_page_t m; 304 char *next; 305 int ret; 306 307 next = list; 308 while (next != NULL) { 309 if ((pa = vm_page_blacklist_next(&next, end)) == 0) 310 continue; 311 m = vm_phys_paddr_to_vm_page(pa); 312 if (m == NULL) 313 continue; 314 mtx_lock(&vm_page_queue_free_mtx); 315 ret = vm_phys_unfree_page(m); 316 mtx_unlock(&vm_page_queue_free_mtx); 317 if (ret == TRUE) { 318 TAILQ_INSERT_TAIL(&blacklist_head, m, listq); 319 if (bootverbose) 320 printf("Skipping page with pa 0x%jx\n", 321 (uintmax_t)pa); 322 } 323 } 324 } 325 326 /* 327 * vm_page_blacklist_load: 328 * 329 * Search for a special module named "ram_blacklist". It'll be a 330 * plain text file provided by the user via the loader directive 331 * of the same name. 332 */ 333 static void 334 vm_page_blacklist_load(char **list, char **end) 335 { 336 void *mod; 337 u_char *ptr; 338 u_int len; 339 340 mod = NULL; 341 ptr = NULL; 342 343 mod = preload_search_by_type("ram_blacklist"); 344 if (mod != NULL) { 345 ptr = preload_fetch_addr(mod); 346 len = preload_fetch_size(mod); 347 } 348 *list = ptr; 349 if (ptr != NULL) 350 *end = ptr + len; 351 else 352 *end = NULL; 353 return; 354 } 355 356 static int 357 sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS) 358 { 359 vm_page_t m; 360 struct sbuf sbuf; 361 int error, first; 362 363 first = 1; 364 error = sysctl_wire_old_buffer(req, 0); 365 if (error != 0) 366 return (error); 367 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 368 TAILQ_FOREACH(m, &blacklist_head, listq) { 369 sbuf_printf(&sbuf, "%s%#jx", first ? "" : ",", 370 (uintmax_t)m->phys_addr); 371 first = 0; 372 } 373 error = sbuf_finish(&sbuf); 374 sbuf_delete(&sbuf); 375 return (error); 376 } 377 378 static void 379 vm_page_domain_init(struct vm_domain *vmd) 380 { 381 struct vm_pagequeue *pq; 382 int i; 383 384 *__DECONST(char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) = 385 "vm inactive pagequeue"; 386 *__DECONST(u_int **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_vcnt) = 387 &vm_cnt.v_inactive_count; 388 *__DECONST(char **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_name) = 389 "vm active pagequeue"; 390 *__DECONST(u_int **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_vcnt) = 391 &vm_cnt.v_active_count; 392 *__DECONST(char **, &vmd->vmd_pagequeues[PQ_LAUNDRY].pq_name) = 393 "vm laundry pagequeue"; 394 *__DECONST(int **, &vmd->vmd_pagequeues[PQ_LAUNDRY].pq_vcnt) = 395 &vm_cnt.v_laundry_count; 396 *__DECONST(char **, &vmd->vmd_pagequeues[PQ_UNSWAPPABLE].pq_name) = 397 "vm unswappable pagequeue"; 398 /* Unswappable dirty pages are counted as being in the laundry. */ 399 *__DECONST(int **, &vmd->vmd_pagequeues[PQ_UNSWAPPABLE].pq_vcnt) = 400 &vm_cnt.v_laundry_count; 401 vmd->vmd_page_count = 0; 402 vmd->vmd_free_count = 0; 403 vmd->vmd_segs = 0; 404 vmd->vmd_oom = FALSE; 405 for (i = 0; i < PQ_COUNT; i++) { 406 pq = &vmd->vmd_pagequeues[i]; 407 TAILQ_INIT(&pq->pq_pl); 408 mtx_init(&pq->pq_mutex, pq->pq_name, "vm pagequeue", 409 MTX_DEF | MTX_DUPOK); 410 } 411 } 412 413 /* 414 * vm_page_startup: 415 * 416 * Initializes the resident memory module. 417 * 418 * Allocates memory for the page cells, and 419 * for the object/offset-to-page hash table headers. 420 * Each page cell is initialized and placed on the free list. 421 */ 422 vm_offset_t 423 vm_page_startup(vm_offset_t vaddr) 424 { 425 vm_offset_t mapped; 426 vm_paddr_t page_range; 427 vm_paddr_t new_end; 428 int i; 429 vm_paddr_t pa; 430 vm_paddr_t last_pa; 431 char *list, *listend; 432 vm_paddr_t end; 433 vm_paddr_t biggestsize; 434 vm_paddr_t low_water, high_water; 435 int biggestone; 436 int pages_per_zone; 437 438 biggestsize = 0; 439 biggestone = 0; 440 vaddr = round_page(vaddr); 441 442 for (i = 0; phys_avail[i + 1]; i += 2) { 443 phys_avail[i] = round_page(phys_avail[i]); 444 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 445 } 446 447 low_water = phys_avail[0]; 448 high_water = phys_avail[1]; 449 450 for (i = 0; i < vm_phys_nsegs; i++) { 451 if (vm_phys_segs[i].start < low_water) 452 low_water = vm_phys_segs[i].start; 453 if (vm_phys_segs[i].end > high_water) 454 high_water = vm_phys_segs[i].end; 455 } 456 for (i = 0; phys_avail[i + 1]; i += 2) { 457 vm_paddr_t size = phys_avail[i + 1] - phys_avail[i]; 458 459 if (size > biggestsize) { 460 biggestone = i; 461 biggestsize = size; 462 } 463 if (phys_avail[i] < low_water) 464 low_water = phys_avail[i]; 465 if (phys_avail[i + 1] > high_water) 466 high_water = phys_avail[i + 1]; 467 } 468 469 end = phys_avail[biggestone+1]; 470 471 /* 472 * Initialize the page and queue locks. 473 */ 474 mtx_init(&vm_page_queue_free_mtx, "vm page free queue", NULL, MTX_DEF); 475 for (i = 0; i < PA_LOCK_COUNT; i++) 476 mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF); 477 for (i = 0; i < vm_ndomains; i++) 478 vm_page_domain_init(&vm_dom[i]); 479 480 /* 481 * Almost all of the pages needed for boot strapping UMA are used 482 * for zone structures, so if the number of CPUs results in those 483 * structures taking more than one page each, we set aside more pages 484 * in proportion to the zone structure size. 485 */ 486 pages_per_zone = howmany(sizeof(struct uma_zone) + 487 sizeof(struct uma_cache) * (mp_maxid + 1), UMA_SLAB_SIZE); 488 if (pages_per_zone > 1) { 489 /* Reserve more pages so that we don't run out. */ 490 boot_pages = UMA_BOOT_PAGES_ZONES * pages_per_zone; 491 } 492 493 /* 494 * Allocate memory for use when boot strapping the kernel memory 495 * allocator. 496 * 497 * CTFLAG_RDTUN doesn't work during the early boot process, so we must 498 * manually fetch the value. 499 */ 500 TUNABLE_INT_FETCH("vm.boot_pages", &boot_pages); 501 new_end = end - (boot_pages * UMA_SLAB_SIZE); 502 new_end = trunc_page(new_end); 503 mapped = pmap_map(&vaddr, new_end, end, 504 VM_PROT_READ | VM_PROT_WRITE); 505 bzero((void *)mapped, end - new_end); 506 uma_startup((void *)mapped, boot_pages); 507 508 #if defined(__aarch64__) || defined(__amd64__) || defined(__arm__) || \ 509 defined(__i386__) || defined(__mips__) 510 /* 511 * Allocate a bitmap to indicate that a random physical page 512 * needs to be included in a minidump. 513 * 514 * The amd64 port needs this to indicate which direct map pages 515 * need to be dumped, via calls to dump_add_page()/dump_drop_page(). 516 * 517 * However, i386 still needs this workspace internally within the 518 * minidump code. In theory, they are not needed on i386, but are 519 * included should the sf_buf code decide to use them. 520 */ 521 last_pa = 0; 522 for (i = 0; dump_avail[i + 1] != 0; i += 2) 523 if (dump_avail[i + 1] > last_pa) 524 last_pa = dump_avail[i + 1]; 525 page_range = last_pa / PAGE_SIZE; 526 vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY); 527 new_end -= vm_page_dump_size; 528 vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end, 529 new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE); 530 bzero((void *)vm_page_dump, vm_page_dump_size); 531 #endif 532 #ifdef __amd64__ 533 /* 534 * Request that the physical pages underlying the message buffer be 535 * included in a crash dump. Since the message buffer is accessed 536 * through the direct map, they are not automatically included. 537 */ 538 pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr); 539 last_pa = pa + round_page(msgbufsize); 540 while (pa < last_pa) { 541 dump_add_page(pa); 542 pa += PAGE_SIZE; 543 } 544 #endif 545 /* 546 * Compute the number of pages of memory that will be available for 547 * use (taking into account the overhead of a page structure per 548 * page). 549 */ 550 first_page = low_water / PAGE_SIZE; 551 #ifdef VM_PHYSSEG_SPARSE 552 page_range = 0; 553 for (i = 0; i < vm_phys_nsegs; i++) { 554 page_range += atop(vm_phys_segs[i].end - 555 vm_phys_segs[i].start); 556 } 557 for (i = 0; phys_avail[i + 1] != 0; i += 2) 558 page_range += atop(phys_avail[i + 1] - phys_avail[i]); 559 #elif defined(VM_PHYSSEG_DENSE) 560 page_range = high_water / PAGE_SIZE - first_page; 561 #else 562 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." 563 #endif 564 end = new_end; 565 566 /* 567 * Reserve an unmapped guard page to trap access to vm_page_array[-1]. 568 */ 569 vaddr += PAGE_SIZE; 570 571 /* 572 * Initialize the mem entry structures now, and put them in the free 573 * queue. 574 */ 575 new_end = trunc_page(end - page_range * sizeof(struct vm_page)); 576 mapped = pmap_map(&vaddr, new_end, end, 577 VM_PROT_READ | VM_PROT_WRITE); 578 vm_page_array = (vm_page_t) mapped; 579 #if VM_NRESERVLEVEL > 0 580 /* 581 * Allocate memory for the reservation management system's data 582 * structures. 583 */ 584 new_end = vm_reserv_startup(&vaddr, new_end, high_water); 585 #endif 586 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) 587 /* 588 * pmap_map on arm64, amd64, and mips can come out of the direct-map, 589 * not kvm like i386, so the pages must be tracked for a crashdump to 590 * include this data. This includes the vm_page_array and the early 591 * UMA bootstrap pages. 592 */ 593 for (pa = new_end; pa < phys_avail[biggestone + 1]; pa += PAGE_SIZE) 594 dump_add_page(pa); 595 #endif 596 phys_avail[biggestone + 1] = new_end; 597 598 /* 599 * Add physical memory segments corresponding to the available 600 * physical pages. 601 */ 602 for (i = 0; phys_avail[i + 1] != 0; i += 2) 603 vm_phys_add_seg(phys_avail[i], phys_avail[i + 1]); 604 605 /* 606 * Clear all of the page structures 607 */ 608 bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page)); 609 for (i = 0; i < page_range; i++) 610 vm_page_array[i].order = VM_NFREEORDER; 611 vm_page_array_size = page_range; 612 613 /* 614 * Initialize the physical memory allocator. 615 */ 616 vm_phys_init(); 617 618 /* 619 * Add every available physical page that is not blacklisted to 620 * the free lists. 621 */ 622 vm_cnt.v_page_count = 0; 623 vm_cnt.v_free_count = 0; 624 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 625 pa = phys_avail[i]; 626 last_pa = phys_avail[i + 1]; 627 while (pa < last_pa) { 628 vm_phys_add_page(pa); 629 pa += PAGE_SIZE; 630 } 631 } 632 633 TAILQ_INIT(&blacklist_head); 634 vm_page_blacklist_load(&list, &listend); 635 vm_page_blacklist_check(list, listend); 636 637 list = kern_getenv("vm.blacklist"); 638 vm_page_blacklist_check(list, NULL); 639 640 freeenv(list); 641 #if VM_NRESERVLEVEL > 0 642 /* 643 * Initialize the reservation management system. 644 */ 645 vm_reserv_init(); 646 #endif 647 return (vaddr); 648 } 649 650 void 651 vm_page_reference(vm_page_t m) 652 { 653 654 vm_page_aflag_set(m, PGA_REFERENCED); 655 } 656 657 /* 658 * vm_page_busy_downgrade: 659 * 660 * Downgrade an exclusive busy page into a single shared busy page. 661 */ 662 void 663 vm_page_busy_downgrade(vm_page_t m) 664 { 665 u_int x; 666 bool locked; 667 668 vm_page_assert_xbusied(m); 669 locked = mtx_owned(vm_page_lockptr(m)); 670 671 for (;;) { 672 x = m->busy_lock; 673 x &= VPB_BIT_WAITERS; 674 if (x != 0 && !locked) 675 vm_page_lock(m); 676 if (atomic_cmpset_rel_int(&m->busy_lock, 677 VPB_SINGLE_EXCLUSIVER | x, VPB_SHARERS_WORD(1))) 678 break; 679 if (x != 0 && !locked) 680 vm_page_unlock(m); 681 } 682 if (x != 0) { 683 wakeup(m); 684 if (!locked) 685 vm_page_unlock(m); 686 } 687 } 688 689 /* 690 * vm_page_sbusied: 691 * 692 * Return a positive value if the page is shared busied, 0 otherwise. 693 */ 694 int 695 vm_page_sbusied(vm_page_t m) 696 { 697 u_int x; 698 699 x = m->busy_lock; 700 return ((x & VPB_BIT_SHARED) != 0 && x != VPB_UNBUSIED); 701 } 702 703 /* 704 * vm_page_sunbusy: 705 * 706 * Shared unbusy a page. 707 */ 708 void 709 vm_page_sunbusy(vm_page_t m) 710 { 711 u_int x; 712 713 vm_page_assert_sbusied(m); 714 715 for (;;) { 716 x = m->busy_lock; 717 if (VPB_SHARERS(x) > 1) { 718 if (atomic_cmpset_int(&m->busy_lock, x, 719 x - VPB_ONE_SHARER)) 720 break; 721 continue; 722 } 723 if ((x & VPB_BIT_WAITERS) == 0) { 724 KASSERT(x == VPB_SHARERS_WORD(1), 725 ("vm_page_sunbusy: invalid lock state")); 726 if (atomic_cmpset_int(&m->busy_lock, 727 VPB_SHARERS_WORD(1), VPB_UNBUSIED)) 728 break; 729 continue; 730 } 731 KASSERT(x == (VPB_SHARERS_WORD(1) | VPB_BIT_WAITERS), 732 ("vm_page_sunbusy: invalid lock state for waiters")); 733 734 vm_page_lock(m); 735 if (!atomic_cmpset_int(&m->busy_lock, x, VPB_UNBUSIED)) { 736 vm_page_unlock(m); 737 continue; 738 } 739 wakeup(m); 740 vm_page_unlock(m); 741 break; 742 } 743 } 744 745 /* 746 * vm_page_busy_sleep: 747 * 748 * Sleep and release the page lock, using the page pointer as wchan. 749 * This is used to implement the hard-path of busying mechanism. 750 * 751 * The given page must be locked. 752 * 753 * If nonshared is true, sleep only if the page is xbusy. 754 */ 755 void 756 vm_page_busy_sleep(vm_page_t m, const char *wmesg, bool nonshared) 757 { 758 u_int x; 759 760 vm_page_assert_locked(m); 761 762 x = m->busy_lock; 763 if (x == VPB_UNBUSIED || (nonshared && (x & VPB_BIT_SHARED) != 0) || 764 ((x & VPB_BIT_WAITERS) == 0 && 765 !atomic_cmpset_int(&m->busy_lock, x, x | VPB_BIT_WAITERS))) { 766 vm_page_unlock(m); 767 return; 768 } 769 msleep(m, vm_page_lockptr(m), PVM | PDROP, wmesg, 0); 770 } 771 772 /* 773 * vm_page_trysbusy: 774 * 775 * Try to shared busy a page. 776 * If the operation succeeds 1 is returned otherwise 0. 777 * The operation never sleeps. 778 */ 779 int 780 vm_page_trysbusy(vm_page_t m) 781 { 782 u_int x; 783 784 for (;;) { 785 x = m->busy_lock; 786 if ((x & VPB_BIT_SHARED) == 0) 787 return (0); 788 if (atomic_cmpset_acq_int(&m->busy_lock, x, x + VPB_ONE_SHARER)) 789 return (1); 790 } 791 } 792 793 static void 794 vm_page_xunbusy_locked(vm_page_t m) 795 { 796 797 vm_page_assert_xbusied(m); 798 vm_page_assert_locked(m); 799 800 atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED); 801 /* There is a waiter, do wakeup() instead of vm_page_flash(). */ 802 wakeup(m); 803 } 804 805 void 806 vm_page_xunbusy_maybelocked(vm_page_t m) 807 { 808 bool lockacq; 809 810 vm_page_assert_xbusied(m); 811 812 /* 813 * Fast path for unbusy. If it succeeds, we know that there 814 * are no waiters, so we do not need a wakeup. 815 */ 816 if (atomic_cmpset_rel_int(&m->busy_lock, VPB_SINGLE_EXCLUSIVER, 817 VPB_UNBUSIED)) 818 return; 819 820 lockacq = !mtx_owned(vm_page_lockptr(m)); 821 if (lockacq) 822 vm_page_lock(m); 823 vm_page_xunbusy_locked(m); 824 if (lockacq) 825 vm_page_unlock(m); 826 } 827 828 /* 829 * vm_page_xunbusy_hard: 830 * 831 * Called after the first try the exclusive unbusy of a page failed. 832 * It is assumed that the waiters bit is on. 833 */ 834 void 835 vm_page_xunbusy_hard(vm_page_t m) 836 { 837 838 vm_page_assert_xbusied(m); 839 840 vm_page_lock(m); 841 vm_page_xunbusy_locked(m); 842 vm_page_unlock(m); 843 } 844 845 /* 846 * vm_page_flash: 847 * 848 * Wakeup anyone waiting for the page. 849 * The ownership bits do not change. 850 * 851 * The given page must be locked. 852 */ 853 void 854 vm_page_flash(vm_page_t m) 855 { 856 u_int x; 857 858 vm_page_lock_assert(m, MA_OWNED); 859 860 for (;;) { 861 x = m->busy_lock; 862 if ((x & VPB_BIT_WAITERS) == 0) 863 return; 864 if (atomic_cmpset_int(&m->busy_lock, x, 865 x & (~VPB_BIT_WAITERS))) 866 break; 867 } 868 wakeup(m); 869 } 870 871 /* 872 * Keep page from being freed by the page daemon 873 * much of the same effect as wiring, except much lower 874 * overhead and should be used only for *very* temporary 875 * holding ("wiring"). 876 */ 877 void 878 vm_page_hold(vm_page_t mem) 879 { 880 881 vm_page_lock_assert(mem, MA_OWNED); 882 mem->hold_count++; 883 } 884 885 void 886 vm_page_unhold(vm_page_t mem) 887 { 888 889 vm_page_lock_assert(mem, MA_OWNED); 890 KASSERT(mem->hold_count >= 1, ("vm_page_unhold: hold count < 0!!!")); 891 --mem->hold_count; 892 if (mem->hold_count == 0 && (mem->flags & PG_UNHOLDFREE) != 0) 893 vm_page_free_toq(mem); 894 } 895 896 /* 897 * vm_page_unhold_pages: 898 * 899 * Unhold each of the pages that is referenced by the given array. 900 */ 901 void 902 vm_page_unhold_pages(vm_page_t *ma, int count) 903 { 904 struct mtx *mtx, *new_mtx; 905 906 mtx = NULL; 907 for (; count != 0; count--) { 908 /* 909 * Avoid releasing and reacquiring the same page lock. 910 */ 911 new_mtx = vm_page_lockptr(*ma); 912 if (mtx != new_mtx) { 913 if (mtx != NULL) 914 mtx_unlock(mtx); 915 mtx = new_mtx; 916 mtx_lock(mtx); 917 } 918 vm_page_unhold(*ma); 919 ma++; 920 } 921 if (mtx != NULL) 922 mtx_unlock(mtx); 923 } 924 925 vm_page_t 926 PHYS_TO_VM_PAGE(vm_paddr_t pa) 927 { 928 vm_page_t m; 929 930 #ifdef VM_PHYSSEG_SPARSE 931 m = vm_phys_paddr_to_vm_page(pa); 932 if (m == NULL) 933 m = vm_phys_fictitious_to_vm_page(pa); 934 return (m); 935 #elif defined(VM_PHYSSEG_DENSE) 936 long pi; 937 938 pi = atop(pa); 939 if (pi >= first_page && (pi - first_page) < vm_page_array_size) { 940 m = &vm_page_array[pi - first_page]; 941 return (m); 942 } 943 return (vm_phys_fictitious_to_vm_page(pa)); 944 #else 945 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." 946 #endif 947 } 948 949 /* 950 * vm_page_getfake: 951 * 952 * Create a fictitious page with the specified physical address and 953 * memory attribute. The memory attribute is the only the machine- 954 * dependent aspect of a fictitious page that must be initialized. 955 */ 956 vm_page_t 957 vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr) 958 { 959 vm_page_t m; 960 961 m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO); 962 vm_page_initfake(m, paddr, memattr); 963 return (m); 964 } 965 966 void 967 vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) 968 { 969 970 if ((m->flags & PG_FICTITIOUS) != 0) { 971 /* 972 * The page's memattr might have changed since the 973 * previous initialization. Update the pmap to the 974 * new memattr. 975 */ 976 goto memattr; 977 } 978 m->phys_addr = paddr; 979 m->queue = PQ_NONE; 980 /* Fictitious pages don't use "segind". */ 981 m->flags = PG_FICTITIOUS; 982 /* Fictitious pages don't use "order" or "pool". */ 983 m->oflags = VPO_UNMANAGED; 984 m->busy_lock = VPB_SINGLE_EXCLUSIVER; 985 m->wire_count = 1; 986 pmap_page_init(m); 987 memattr: 988 pmap_page_set_memattr(m, memattr); 989 } 990 991 /* 992 * vm_page_putfake: 993 * 994 * Release a fictitious page. 995 */ 996 void 997 vm_page_putfake(vm_page_t m) 998 { 999 1000 KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m)); 1001 KASSERT((m->flags & PG_FICTITIOUS) != 0, 1002 ("vm_page_putfake: bad page %p", m)); 1003 uma_zfree(fakepg_zone, m); 1004 } 1005 1006 /* 1007 * vm_page_updatefake: 1008 * 1009 * Update the given fictitious page to the specified physical address and 1010 * memory attribute. 1011 */ 1012 void 1013 vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) 1014 { 1015 1016 KASSERT((m->flags & PG_FICTITIOUS) != 0, 1017 ("vm_page_updatefake: bad page %p", m)); 1018 m->phys_addr = paddr; 1019 pmap_page_set_memattr(m, memattr); 1020 } 1021 1022 /* 1023 * vm_page_free: 1024 * 1025 * Free a page. 1026 */ 1027 void 1028 vm_page_free(vm_page_t m) 1029 { 1030 1031 m->flags &= ~PG_ZERO; 1032 vm_page_free_toq(m); 1033 } 1034 1035 /* 1036 * vm_page_free_zero: 1037 * 1038 * Free a page to the zerod-pages queue 1039 */ 1040 void 1041 vm_page_free_zero(vm_page_t m) 1042 { 1043 1044 m->flags |= PG_ZERO; 1045 vm_page_free_toq(m); 1046 } 1047 1048 /* 1049 * Unbusy and handle the page queueing for a page from a getpages request that 1050 * was optionally read ahead or behind. 1051 */ 1052 void 1053 vm_page_readahead_finish(vm_page_t m) 1054 { 1055 1056 /* We shouldn't put invalid pages on queues. */ 1057 KASSERT(m->valid != 0, ("%s: %p is invalid", __func__, m)); 1058 1059 /* 1060 * Since the page is not the actually needed one, whether it should 1061 * be activated or deactivated is not obvious. Empirical results 1062 * have shown that deactivating the page is usually the best choice, 1063 * unless the page is wanted by another thread. 1064 */ 1065 vm_page_lock(m); 1066 if ((m->busy_lock & VPB_BIT_WAITERS) != 0) 1067 vm_page_activate(m); 1068 else 1069 vm_page_deactivate(m); 1070 vm_page_unlock(m); 1071 vm_page_xunbusy(m); 1072 } 1073 1074 /* 1075 * vm_page_sleep_if_busy: 1076 * 1077 * Sleep and release the page queues lock if the page is busied. 1078 * Returns TRUE if the thread slept. 1079 * 1080 * The given page must be unlocked and object containing it must 1081 * be locked. 1082 */ 1083 int 1084 vm_page_sleep_if_busy(vm_page_t m, const char *msg) 1085 { 1086 vm_object_t obj; 1087 1088 vm_page_lock_assert(m, MA_NOTOWNED); 1089 VM_OBJECT_ASSERT_WLOCKED(m->object); 1090 1091 if (vm_page_busied(m)) { 1092 /* 1093 * The page-specific object must be cached because page 1094 * identity can change during the sleep, causing the 1095 * re-lock of a different object. 1096 * It is assumed that a reference to the object is already 1097 * held by the callers. 1098 */ 1099 obj = m->object; 1100 vm_page_lock(m); 1101 VM_OBJECT_WUNLOCK(obj); 1102 vm_page_busy_sleep(m, msg, false); 1103 VM_OBJECT_WLOCK(obj); 1104 return (TRUE); 1105 } 1106 return (FALSE); 1107 } 1108 1109 /* 1110 * vm_page_dirty_KBI: [ internal use only ] 1111 * 1112 * Set all bits in the page's dirty field. 1113 * 1114 * The object containing the specified page must be locked if the 1115 * call is made from the machine-independent layer. 1116 * 1117 * See vm_page_clear_dirty_mask(). 1118 * 1119 * This function should only be called by vm_page_dirty(). 1120 */ 1121 void 1122 vm_page_dirty_KBI(vm_page_t m) 1123 { 1124 1125 /* Refer to this operation by its public name. */ 1126 KASSERT(m->valid == VM_PAGE_BITS_ALL, 1127 ("vm_page_dirty: page is invalid!")); 1128 m->dirty = VM_PAGE_BITS_ALL; 1129 } 1130 1131 /* 1132 * vm_page_insert: [ internal use only ] 1133 * 1134 * Inserts the given mem entry into the object and object list. 1135 * 1136 * The object must be locked. 1137 */ 1138 int 1139 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) 1140 { 1141 vm_page_t mpred; 1142 1143 VM_OBJECT_ASSERT_WLOCKED(object); 1144 mpred = vm_radix_lookup_le(&object->rtree, pindex); 1145 return (vm_page_insert_after(m, object, pindex, mpred)); 1146 } 1147 1148 /* 1149 * vm_page_insert_after: 1150 * 1151 * Inserts the page "m" into the specified object at offset "pindex". 1152 * 1153 * The page "mpred" must immediately precede the offset "pindex" within 1154 * the specified object. 1155 * 1156 * The object must be locked. 1157 */ 1158 static int 1159 vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex, 1160 vm_page_t mpred) 1161 { 1162 vm_page_t msucc; 1163 1164 VM_OBJECT_ASSERT_WLOCKED(object); 1165 KASSERT(m->object == NULL, 1166 ("vm_page_insert_after: page already inserted")); 1167 if (mpred != NULL) { 1168 KASSERT(mpred->object == object, 1169 ("vm_page_insert_after: object doesn't contain mpred")); 1170 KASSERT(mpred->pindex < pindex, 1171 ("vm_page_insert_after: mpred doesn't precede pindex")); 1172 msucc = TAILQ_NEXT(mpred, listq); 1173 } else 1174 msucc = TAILQ_FIRST(&object->memq); 1175 if (msucc != NULL) 1176 KASSERT(msucc->pindex > pindex, 1177 ("vm_page_insert_after: msucc doesn't succeed pindex")); 1178 1179 /* 1180 * Record the object/offset pair in this page 1181 */ 1182 m->object = object; 1183 m->pindex = pindex; 1184 1185 /* 1186 * Now link into the object's ordered list of backed pages. 1187 */ 1188 if (vm_radix_insert(&object->rtree, m)) { 1189 m->object = NULL; 1190 m->pindex = 0; 1191 return (1); 1192 } 1193 vm_page_insert_radixdone(m, object, mpred); 1194 return (0); 1195 } 1196 1197 /* 1198 * vm_page_insert_radixdone: 1199 * 1200 * Complete page "m" insertion into the specified object after the 1201 * radix trie hooking. 1202 * 1203 * The page "mpred" must precede the offset "m->pindex" within the 1204 * specified object. 1205 * 1206 * The object must be locked. 1207 */ 1208 static void 1209 vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred) 1210 { 1211 1212 VM_OBJECT_ASSERT_WLOCKED(object); 1213 KASSERT(object != NULL && m->object == object, 1214 ("vm_page_insert_radixdone: page %p has inconsistent object", m)); 1215 if (mpred != NULL) { 1216 KASSERT(mpred->object == object, 1217 ("vm_page_insert_after: object doesn't contain mpred")); 1218 KASSERT(mpred->pindex < m->pindex, 1219 ("vm_page_insert_after: mpred doesn't precede pindex")); 1220 } 1221 1222 if (mpred != NULL) 1223 TAILQ_INSERT_AFTER(&object->memq, mpred, m, listq); 1224 else 1225 TAILQ_INSERT_HEAD(&object->memq, m, listq); 1226 1227 /* 1228 * Show that the object has one more resident page. 1229 */ 1230 object->resident_page_count++; 1231 1232 /* 1233 * Hold the vnode until the last page is released. 1234 */ 1235 if (object->resident_page_count == 1 && object->type == OBJT_VNODE) 1236 vhold(object->handle); 1237 1238 /* 1239 * Since we are inserting a new and possibly dirty page, 1240 * update the object's OBJ_MIGHTBEDIRTY flag. 1241 */ 1242 if (pmap_page_is_write_mapped(m)) 1243 vm_object_set_writeable_dirty(object); 1244 } 1245 1246 /* 1247 * vm_page_remove: 1248 * 1249 * Removes the specified page from its containing object, but does not 1250 * invalidate any backing storage. 1251 * 1252 * The object must be locked. The page must be locked if it is managed. 1253 */ 1254 void 1255 vm_page_remove(vm_page_t m) 1256 { 1257 vm_object_t object; 1258 vm_page_t mrem; 1259 1260 if ((m->oflags & VPO_UNMANAGED) == 0) 1261 vm_page_assert_locked(m); 1262 if ((object = m->object) == NULL) 1263 return; 1264 VM_OBJECT_ASSERT_WLOCKED(object); 1265 if (vm_page_xbusied(m)) 1266 vm_page_xunbusy_maybelocked(m); 1267 mrem = vm_radix_remove(&object->rtree, m->pindex); 1268 KASSERT(mrem == m, ("removed page %p, expected page %p", mrem, m)); 1269 1270 /* 1271 * Now remove from the object's list of backed pages. 1272 */ 1273 TAILQ_REMOVE(&object->memq, m, listq); 1274 1275 /* 1276 * And show that the object has one fewer resident page. 1277 */ 1278 object->resident_page_count--; 1279 1280 /* 1281 * The vnode may now be recycled. 1282 */ 1283 if (object->resident_page_count == 0 && object->type == OBJT_VNODE) 1284 vdrop(object->handle); 1285 1286 m->object = NULL; 1287 } 1288 1289 /* 1290 * vm_page_lookup: 1291 * 1292 * Returns the page associated with the object/offset 1293 * pair specified; if none is found, NULL is returned. 1294 * 1295 * The object must be locked. 1296 */ 1297 vm_page_t 1298 vm_page_lookup(vm_object_t object, vm_pindex_t pindex) 1299 { 1300 1301 VM_OBJECT_ASSERT_LOCKED(object); 1302 return (vm_radix_lookup(&object->rtree, pindex)); 1303 } 1304 1305 /* 1306 * vm_page_find_least: 1307 * 1308 * Returns the page associated with the object with least pindex 1309 * greater than or equal to the parameter pindex, or NULL. 1310 * 1311 * The object must be locked. 1312 */ 1313 vm_page_t 1314 vm_page_find_least(vm_object_t object, vm_pindex_t pindex) 1315 { 1316 vm_page_t m; 1317 1318 VM_OBJECT_ASSERT_LOCKED(object); 1319 if ((m = TAILQ_FIRST(&object->memq)) != NULL && m->pindex < pindex) 1320 m = vm_radix_lookup_ge(&object->rtree, pindex); 1321 return (m); 1322 } 1323 1324 /* 1325 * Returns the given page's successor (by pindex) within the object if it is 1326 * resident; if none is found, NULL is returned. 1327 * 1328 * The object must be locked. 1329 */ 1330 vm_page_t 1331 vm_page_next(vm_page_t m) 1332 { 1333 vm_page_t next; 1334 1335 VM_OBJECT_ASSERT_LOCKED(m->object); 1336 if ((next = TAILQ_NEXT(m, listq)) != NULL) { 1337 MPASS(next->object == m->object); 1338 if (next->pindex != m->pindex + 1) 1339 next = NULL; 1340 } 1341 return (next); 1342 } 1343 1344 /* 1345 * Returns the given page's predecessor (by pindex) within the object if it is 1346 * resident; if none is found, NULL is returned. 1347 * 1348 * The object must be locked. 1349 */ 1350 vm_page_t 1351 vm_page_prev(vm_page_t m) 1352 { 1353 vm_page_t prev; 1354 1355 VM_OBJECT_ASSERT_LOCKED(m->object); 1356 if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL) { 1357 MPASS(prev->object == m->object); 1358 if (prev->pindex != m->pindex - 1) 1359 prev = NULL; 1360 } 1361 return (prev); 1362 } 1363 1364 /* 1365 * Uses the page mnew as a replacement for an existing page at index 1366 * pindex which must be already present in the object. 1367 * 1368 * The existing page must not be on a paging queue. 1369 */ 1370 vm_page_t 1371 vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex) 1372 { 1373 vm_page_t mold; 1374 1375 VM_OBJECT_ASSERT_WLOCKED(object); 1376 KASSERT(mnew->object == NULL, 1377 ("vm_page_replace: page already in object")); 1378 1379 /* 1380 * This function mostly follows vm_page_insert() and 1381 * vm_page_remove() without the radix, object count and vnode 1382 * dance. Double check such functions for more comments. 1383 */ 1384 1385 mnew->object = object; 1386 mnew->pindex = pindex; 1387 mold = vm_radix_replace(&object->rtree, mnew); 1388 KASSERT(mold->queue == PQ_NONE, 1389 ("vm_page_replace: mold is on a paging queue")); 1390 1391 /* Keep the resident page list in sorted order. */ 1392 TAILQ_INSERT_AFTER(&object->memq, mold, mnew, listq); 1393 TAILQ_REMOVE(&object->memq, mold, listq); 1394 1395 mold->object = NULL; 1396 vm_page_xunbusy_maybelocked(mold); 1397 1398 /* 1399 * The object's resident_page_count does not change because we have 1400 * swapped one page for another, but OBJ_MIGHTBEDIRTY. 1401 */ 1402 if (pmap_page_is_write_mapped(mnew)) 1403 vm_object_set_writeable_dirty(object); 1404 return (mold); 1405 } 1406 1407 /* 1408 * vm_page_rename: 1409 * 1410 * Move the given memory entry from its 1411 * current object to the specified target object/offset. 1412 * 1413 * Note: swap associated with the page must be invalidated by the move. We 1414 * have to do this for several reasons: (1) we aren't freeing the 1415 * page, (2) we are dirtying the page, (3) the VM system is probably 1416 * moving the page from object A to B, and will then later move 1417 * the backing store from A to B and we can't have a conflict. 1418 * 1419 * Note: we *always* dirty the page. It is necessary both for the 1420 * fact that we moved it, and because we may be invalidating 1421 * swap. 1422 * 1423 * The objects must be locked. 1424 */ 1425 int 1426 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) 1427 { 1428 vm_page_t mpred; 1429 vm_pindex_t opidx; 1430 1431 VM_OBJECT_ASSERT_WLOCKED(new_object); 1432 1433 mpred = vm_radix_lookup_le(&new_object->rtree, new_pindex); 1434 KASSERT(mpred == NULL || mpred->pindex != new_pindex, 1435 ("vm_page_rename: pindex already renamed")); 1436 1437 /* 1438 * Create a custom version of vm_page_insert() which does not depend 1439 * by m_prev and can cheat on the implementation aspects of the 1440 * function. 1441 */ 1442 opidx = m->pindex; 1443 m->pindex = new_pindex; 1444 if (vm_radix_insert(&new_object->rtree, m)) { 1445 m->pindex = opidx; 1446 return (1); 1447 } 1448 1449 /* 1450 * The operation cannot fail anymore. The removal must happen before 1451 * the listq iterator is tainted. 1452 */ 1453 m->pindex = opidx; 1454 vm_page_lock(m); 1455 vm_page_remove(m); 1456 1457 /* Return back to the new pindex to complete vm_page_insert(). */ 1458 m->pindex = new_pindex; 1459 m->object = new_object; 1460 vm_page_unlock(m); 1461 vm_page_insert_radixdone(m, new_object, mpred); 1462 vm_page_dirty(m); 1463 return (0); 1464 } 1465 1466 /* 1467 * vm_page_alloc: 1468 * 1469 * Allocate and return a page that is associated with the specified 1470 * object and offset pair. By default, this page is exclusive busied. 1471 * 1472 * The caller must always specify an allocation class. 1473 * 1474 * allocation classes: 1475 * VM_ALLOC_NORMAL normal process request 1476 * VM_ALLOC_SYSTEM system *really* needs a page 1477 * VM_ALLOC_INTERRUPT interrupt time request 1478 * 1479 * optional allocation flags: 1480 * VM_ALLOC_COUNT(number) the number of additional pages that the caller 1481 * intends to allocate 1482 * VM_ALLOC_NOBUSY do not exclusive busy the page 1483 * VM_ALLOC_NODUMP do not include the page in a kernel core dump 1484 * VM_ALLOC_NOOBJ page is not associated with an object and 1485 * should not be exclusive busy 1486 * VM_ALLOC_SBUSY shared busy the allocated page 1487 * VM_ALLOC_WIRED wire the allocated page 1488 * VM_ALLOC_ZERO prefer a zeroed page 1489 * 1490 * This routine may not sleep. 1491 */ 1492 vm_page_t 1493 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) 1494 { 1495 vm_page_t m, mpred; 1496 int flags, req_class; 1497 1498 mpred = NULL; /* XXX: pacify gcc */ 1499 KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) && 1500 (object != NULL || (req & VM_ALLOC_SBUSY) == 0) && 1501 ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != 1502 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), 1503 ("vm_page_alloc: inconsistent object(%p)/req(%x)", object, req)); 1504 if (object != NULL) 1505 VM_OBJECT_ASSERT_WLOCKED(object); 1506 1507 req_class = req & VM_ALLOC_CLASS_MASK; 1508 1509 /* 1510 * The page daemon is allowed to dig deeper into the free page list. 1511 */ 1512 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) 1513 req_class = VM_ALLOC_SYSTEM; 1514 1515 if (object != NULL) { 1516 mpred = vm_radix_lookup_le(&object->rtree, pindex); 1517 KASSERT(mpred == NULL || mpred->pindex != pindex, 1518 ("vm_page_alloc: pindex already allocated")); 1519 } 1520 1521 /* 1522 * Allocate a page if the number of free pages exceeds the minimum 1523 * for the request class. 1524 */ 1525 mtx_lock(&vm_page_queue_free_mtx); 1526 if (vm_cnt.v_free_count > vm_cnt.v_free_reserved || 1527 (req_class == VM_ALLOC_SYSTEM && 1528 vm_cnt.v_free_count > vm_cnt.v_interrupt_free_min) || 1529 (req_class == VM_ALLOC_INTERRUPT && 1530 vm_cnt.v_free_count > 0)) { 1531 /* 1532 * Can we allocate the page from a reservation? 1533 */ 1534 #if VM_NRESERVLEVEL > 0 1535 if (object == NULL || (object->flags & (OBJ_COLORED | 1536 OBJ_FICTITIOUS)) != OBJ_COLORED || (m = 1537 vm_reserv_alloc_page(object, pindex, mpred)) == NULL) 1538 #endif 1539 { 1540 /* 1541 * If not, allocate it from the free page queues. 1542 */ 1543 m = vm_phys_alloc_pages(object != NULL ? 1544 VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0); 1545 #if VM_NRESERVLEVEL > 0 1546 if (m == NULL && vm_reserv_reclaim_inactive()) { 1547 m = vm_phys_alloc_pages(object != NULL ? 1548 VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 1549 0); 1550 } 1551 #endif 1552 } 1553 } else { 1554 /* 1555 * Not allocatable, give up. 1556 */ 1557 mtx_unlock(&vm_page_queue_free_mtx); 1558 atomic_add_int(&vm_pageout_deficit, 1559 max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1)); 1560 pagedaemon_wakeup(); 1561 return (NULL); 1562 } 1563 1564 /* 1565 * At this point we had better have found a good page. 1566 */ 1567 KASSERT(m != NULL, ("vm_page_alloc: missing page")); 1568 vm_phys_freecnt_adj(m, -1); 1569 mtx_unlock(&vm_page_queue_free_mtx); 1570 vm_page_alloc_check(m); 1571 1572 /* 1573 * Initialize the page. Only the PG_ZERO flag is inherited. 1574 */ 1575 flags = 0; 1576 if ((req & VM_ALLOC_ZERO) != 0) 1577 flags = PG_ZERO; 1578 flags &= m->flags; 1579 if ((req & VM_ALLOC_NODUMP) != 0) 1580 flags |= PG_NODUMP; 1581 m->flags = flags; 1582 m->aflags = 0; 1583 m->oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ? 1584 VPO_UNMANAGED : 0; 1585 m->busy_lock = VPB_UNBUSIED; 1586 if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0) 1587 m->busy_lock = VPB_SINGLE_EXCLUSIVER; 1588 if ((req & VM_ALLOC_SBUSY) != 0) 1589 m->busy_lock = VPB_SHARERS_WORD(1); 1590 if (req & VM_ALLOC_WIRED) { 1591 /* 1592 * The page lock is not required for wiring a page until that 1593 * page is inserted into the object. 1594 */ 1595 atomic_add_int(&vm_cnt.v_wire_count, 1); 1596 m->wire_count = 1; 1597 } 1598 m->act_count = 0; 1599 1600 if (object != NULL) { 1601 if (vm_page_insert_after(m, object, pindex, mpred)) { 1602 pagedaemon_wakeup(); 1603 if (req & VM_ALLOC_WIRED) { 1604 atomic_subtract_int(&vm_cnt.v_wire_count, 1); 1605 m->wire_count = 0; 1606 } 1607 KASSERT(m->object == NULL, ("page %p has object", m)); 1608 m->oflags = VPO_UNMANAGED; 1609 m->busy_lock = VPB_UNBUSIED; 1610 /* Don't change PG_ZERO. */ 1611 vm_page_free_toq(m); 1612 return (NULL); 1613 } 1614 1615 /* Ignore device objects; the pager sets "memattr" for them. */ 1616 if (object->memattr != VM_MEMATTR_DEFAULT && 1617 (object->flags & OBJ_FICTITIOUS) == 0) 1618 pmap_page_set_memattr(m, object->memattr); 1619 } else 1620 m->pindex = pindex; 1621 1622 /* 1623 * Don't wakeup too often - wakeup the pageout daemon when 1624 * we would be nearly out of memory. 1625 */ 1626 if (vm_paging_needed()) 1627 pagedaemon_wakeup(); 1628 1629 return (m); 1630 } 1631 1632 /* 1633 * vm_page_alloc_contig: 1634 * 1635 * Allocate a contiguous set of physical pages of the given size "npages" 1636 * from the free lists. All of the physical pages must be at or above 1637 * the given physical address "low" and below the given physical address 1638 * "high". The given value "alignment" determines the alignment of the 1639 * first physical page in the set. If the given value "boundary" is 1640 * non-zero, then the set of physical pages cannot cross any physical 1641 * address boundary that is a multiple of that value. Both "alignment" 1642 * and "boundary" must be a power of two. 1643 * 1644 * If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT, 1645 * then the memory attribute setting for the physical pages is configured 1646 * to the object's memory attribute setting. Otherwise, the memory 1647 * attribute setting for the physical pages is configured to "memattr", 1648 * overriding the object's memory attribute setting. However, if the 1649 * object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the 1650 * memory attribute setting for the physical pages cannot be configured 1651 * to VM_MEMATTR_DEFAULT. 1652 * 1653 * The specified object may not contain fictitious pages. 1654 * 1655 * The caller must always specify an allocation class. 1656 * 1657 * allocation classes: 1658 * VM_ALLOC_NORMAL normal process request 1659 * VM_ALLOC_SYSTEM system *really* needs a page 1660 * VM_ALLOC_INTERRUPT interrupt time request 1661 * 1662 * optional allocation flags: 1663 * VM_ALLOC_NOBUSY do not exclusive busy the page 1664 * VM_ALLOC_NODUMP do not include the page in a kernel core dump 1665 * VM_ALLOC_NOOBJ page is not associated with an object and 1666 * should not be exclusive busy 1667 * VM_ALLOC_SBUSY shared busy the allocated page 1668 * VM_ALLOC_WIRED wire the allocated page 1669 * VM_ALLOC_ZERO prefer a zeroed page 1670 * 1671 * This routine may not sleep. 1672 */ 1673 vm_page_t 1674 vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req, 1675 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 1676 vm_paddr_t boundary, vm_memattr_t memattr) 1677 { 1678 vm_page_t m, m_ret, mpred; 1679 u_int busy_lock, flags, oflags; 1680 int req_class; 1681 1682 mpred = NULL; /* XXX: pacify gcc */ 1683 KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) && 1684 (object != NULL || (req & VM_ALLOC_SBUSY) == 0) && 1685 ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != 1686 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), 1687 ("vm_page_alloc_contig: inconsistent object(%p)/req(%x)", object, 1688 req)); 1689 if (object != NULL) { 1690 VM_OBJECT_ASSERT_WLOCKED(object); 1691 KASSERT((object->flags & OBJ_FICTITIOUS) == 0, 1692 ("vm_page_alloc_contig: object %p has fictitious pages", 1693 object)); 1694 } 1695 KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero")); 1696 req_class = req & VM_ALLOC_CLASS_MASK; 1697 1698 /* 1699 * The page daemon is allowed to dig deeper into the free page list. 1700 */ 1701 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) 1702 req_class = VM_ALLOC_SYSTEM; 1703 1704 if (object != NULL) { 1705 mpred = vm_radix_lookup_le(&object->rtree, pindex); 1706 KASSERT(mpred == NULL || mpred->pindex != pindex, 1707 ("vm_page_alloc_contig: pindex already allocated")); 1708 } 1709 1710 /* 1711 * Can we allocate the pages without the number of free pages falling 1712 * below the lower bound for the allocation class? 1713 */ 1714 mtx_lock(&vm_page_queue_free_mtx); 1715 if (vm_cnt.v_free_count >= npages + vm_cnt.v_free_reserved || 1716 (req_class == VM_ALLOC_SYSTEM && 1717 vm_cnt.v_free_count >= npages + vm_cnt.v_interrupt_free_min) || 1718 (req_class == VM_ALLOC_INTERRUPT && 1719 vm_cnt.v_free_count >= npages)) { 1720 /* 1721 * Can we allocate the pages from a reservation? 1722 */ 1723 #if VM_NRESERVLEVEL > 0 1724 retry: 1725 if (object == NULL || (object->flags & OBJ_COLORED) == 0 || 1726 (m_ret = vm_reserv_alloc_contig(object, pindex, npages, 1727 low, high, alignment, boundary, mpred)) == NULL) 1728 #endif 1729 /* 1730 * If not, allocate them from the free page queues. 1731 */ 1732 m_ret = vm_phys_alloc_contig(npages, low, high, 1733 alignment, boundary); 1734 } else { 1735 mtx_unlock(&vm_page_queue_free_mtx); 1736 atomic_add_int(&vm_pageout_deficit, npages); 1737 pagedaemon_wakeup(); 1738 return (NULL); 1739 } 1740 if (m_ret != NULL) 1741 vm_phys_freecnt_adj(m_ret, -npages); 1742 else { 1743 #if VM_NRESERVLEVEL > 0 1744 if (vm_reserv_reclaim_contig(npages, low, high, alignment, 1745 boundary)) 1746 goto retry; 1747 #endif 1748 } 1749 mtx_unlock(&vm_page_queue_free_mtx); 1750 if (m_ret == NULL) 1751 return (NULL); 1752 for (m = m_ret; m < &m_ret[npages]; m++) 1753 vm_page_alloc_check(m); 1754 1755 /* 1756 * Initialize the pages. Only the PG_ZERO flag is inherited. 1757 */ 1758 flags = 0; 1759 if ((req & VM_ALLOC_ZERO) != 0) 1760 flags = PG_ZERO; 1761 if ((req & VM_ALLOC_NODUMP) != 0) 1762 flags |= PG_NODUMP; 1763 oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ? 1764 VPO_UNMANAGED : 0; 1765 busy_lock = VPB_UNBUSIED; 1766 if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0) 1767 busy_lock = VPB_SINGLE_EXCLUSIVER; 1768 if ((req & VM_ALLOC_SBUSY) != 0) 1769 busy_lock = VPB_SHARERS_WORD(1); 1770 if ((req & VM_ALLOC_WIRED) != 0) 1771 atomic_add_int(&vm_cnt.v_wire_count, npages); 1772 if (object != NULL) { 1773 if (object->memattr != VM_MEMATTR_DEFAULT && 1774 memattr == VM_MEMATTR_DEFAULT) 1775 memattr = object->memattr; 1776 } 1777 for (m = m_ret; m < &m_ret[npages]; m++) { 1778 m->aflags = 0; 1779 m->flags = (m->flags | PG_NODUMP) & flags; 1780 m->busy_lock = busy_lock; 1781 if ((req & VM_ALLOC_WIRED) != 0) 1782 m->wire_count = 1; 1783 m->act_count = 0; 1784 m->oflags = oflags; 1785 if (object != NULL) { 1786 if (vm_page_insert_after(m, object, pindex, mpred)) { 1787 pagedaemon_wakeup(); 1788 if ((req & VM_ALLOC_WIRED) != 0) 1789 atomic_subtract_int( 1790 &vm_cnt.v_wire_count, npages); 1791 KASSERT(m->object == NULL, 1792 ("page %p has object", m)); 1793 mpred = m; 1794 for (m = m_ret; m < &m_ret[npages]; m++) { 1795 if (m <= mpred && 1796 (req & VM_ALLOC_WIRED) != 0) 1797 m->wire_count = 0; 1798 m->oflags = VPO_UNMANAGED; 1799 m->busy_lock = VPB_UNBUSIED; 1800 /* Don't change PG_ZERO. */ 1801 vm_page_free_toq(m); 1802 } 1803 return (NULL); 1804 } 1805 mpred = m; 1806 } else 1807 m->pindex = pindex; 1808 if (memattr != VM_MEMATTR_DEFAULT) 1809 pmap_page_set_memattr(m, memattr); 1810 pindex++; 1811 } 1812 if (vm_paging_needed()) 1813 pagedaemon_wakeup(); 1814 return (m_ret); 1815 } 1816 1817 /* 1818 * Check a page that has been freshly dequeued from a freelist. 1819 */ 1820 static void 1821 vm_page_alloc_check(vm_page_t m) 1822 { 1823 1824 KASSERT(m->object == NULL, ("page %p has object", m)); 1825 KASSERT(m->queue == PQ_NONE, 1826 ("page %p has unexpected queue %d", m, m->queue)); 1827 KASSERT(m->wire_count == 0, ("page %p is wired", m)); 1828 KASSERT(m->hold_count == 0, ("page %p is held", m)); 1829 KASSERT(!vm_page_busied(m), ("page %p is busy", m)); 1830 KASSERT(m->dirty == 0, ("page %p is dirty", m)); 1831 KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, 1832 ("page %p has unexpected memattr %d", 1833 m, pmap_page_get_memattr(m))); 1834 KASSERT(m->valid == 0, ("free page %p is valid", m)); 1835 } 1836 1837 /* 1838 * vm_page_alloc_freelist: 1839 * 1840 * Allocate a physical page from the specified free page list. 1841 * 1842 * The caller must always specify an allocation class. 1843 * 1844 * allocation classes: 1845 * VM_ALLOC_NORMAL normal process request 1846 * VM_ALLOC_SYSTEM system *really* needs a page 1847 * VM_ALLOC_INTERRUPT interrupt time request 1848 * 1849 * optional allocation flags: 1850 * VM_ALLOC_COUNT(number) the number of additional pages that the caller 1851 * intends to allocate 1852 * VM_ALLOC_WIRED wire the allocated page 1853 * VM_ALLOC_ZERO prefer a zeroed page 1854 * 1855 * This routine may not sleep. 1856 */ 1857 vm_page_t 1858 vm_page_alloc_freelist(int flind, int req) 1859 { 1860 vm_page_t m; 1861 u_int flags; 1862 int req_class; 1863 1864 req_class = req & VM_ALLOC_CLASS_MASK; 1865 1866 /* 1867 * The page daemon is allowed to dig deeper into the free page list. 1868 */ 1869 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) 1870 req_class = VM_ALLOC_SYSTEM; 1871 1872 /* 1873 * Do not allocate reserved pages unless the req has asked for it. 1874 */ 1875 mtx_lock(&vm_page_queue_free_mtx); 1876 if (vm_cnt.v_free_count > vm_cnt.v_free_reserved || 1877 (req_class == VM_ALLOC_SYSTEM && 1878 vm_cnt.v_free_count > vm_cnt.v_interrupt_free_min) || 1879 (req_class == VM_ALLOC_INTERRUPT && 1880 vm_cnt.v_free_count > 0)) 1881 m = vm_phys_alloc_freelist_pages(flind, VM_FREEPOOL_DIRECT, 0); 1882 else { 1883 mtx_unlock(&vm_page_queue_free_mtx); 1884 atomic_add_int(&vm_pageout_deficit, 1885 max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1)); 1886 pagedaemon_wakeup(); 1887 return (NULL); 1888 } 1889 if (m == NULL) { 1890 mtx_unlock(&vm_page_queue_free_mtx); 1891 return (NULL); 1892 } 1893 vm_phys_freecnt_adj(m, -1); 1894 mtx_unlock(&vm_page_queue_free_mtx); 1895 vm_page_alloc_check(m); 1896 1897 /* 1898 * Initialize the page. Only the PG_ZERO flag is inherited. 1899 */ 1900 m->aflags = 0; 1901 flags = 0; 1902 if ((req & VM_ALLOC_ZERO) != 0) 1903 flags = PG_ZERO; 1904 m->flags &= flags; 1905 if ((req & VM_ALLOC_WIRED) != 0) { 1906 /* 1907 * The page lock is not required for wiring a page that does 1908 * not belong to an object. 1909 */ 1910 atomic_add_int(&vm_cnt.v_wire_count, 1); 1911 m->wire_count = 1; 1912 } 1913 /* Unmanaged pages don't use "act_count". */ 1914 m->oflags = VPO_UNMANAGED; 1915 if (vm_paging_needed()) 1916 pagedaemon_wakeup(); 1917 return (m); 1918 } 1919 1920 #define VPSC_ANY 0 /* No restrictions. */ 1921 #define VPSC_NORESERV 1 /* Skip reservations; implies VPSC_NOSUPER. */ 1922 #define VPSC_NOSUPER 2 /* Skip superpages. */ 1923 1924 /* 1925 * vm_page_scan_contig: 1926 * 1927 * Scan vm_page_array[] between the specified entries "m_start" and 1928 * "m_end" for a run of contiguous physical pages that satisfy the 1929 * specified conditions, and return the lowest page in the run. The 1930 * specified "alignment" determines the alignment of the lowest physical 1931 * page in the run. If the specified "boundary" is non-zero, then the 1932 * run of physical pages cannot span a physical address that is a 1933 * multiple of "boundary". 1934 * 1935 * "m_end" is never dereferenced, so it need not point to a vm_page 1936 * structure within vm_page_array[]. 1937 * 1938 * "npages" must be greater than zero. "m_start" and "m_end" must not 1939 * span a hole (or discontiguity) in the physical address space. Both 1940 * "alignment" and "boundary" must be a power of two. 1941 */ 1942 vm_page_t 1943 vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end, 1944 u_long alignment, vm_paddr_t boundary, int options) 1945 { 1946 struct mtx *m_mtx, *new_mtx; 1947 vm_object_t object; 1948 vm_paddr_t pa; 1949 vm_page_t m, m_run; 1950 #if VM_NRESERVLEVEL > 0 1951 int level; 1952 #endif 1953 int m_inc, order, run_ext, run_len; 1954 1955 KASSERT(npages > 0, ("npages is 0")); 1956 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 1957 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 1958 m_run = NULL; 1959 run_len = 0; 1960 m_mtx = NULL; 1961 for (m = m_start; m < m_end && run_len < npages; m += m_inc) { 1962 KASSERT((m->flags & (PG_FICTITIOUS | PG_MARKER)) == 0, 1963 ("page %p is PG_FICTITIOUS or PG_MARKER", m)); 1964 1965 /* 1966 * If the current page would be the start of a run, check its 1967 * physical address against the end, alignment, and boundary 1968 * conditions. If it doesn't satisfy these conditions, either 1969 * terminate the scan or advance to the next page that 1970 * satisfies the failed condition. 1971 */ 1972 if (run_len == 0) { 1973 KASSERT(m_run == NULL, ("m_run != NULL")); 1974 if (m + npages > m_end) 1975 break; 1976 pa = VM_PAGE_TO_PHYS(m); 1977 if ((pa & (alignment - 1)) != 0) { 1978 m_inc = atop(roundup2(pa, alignment) - pa); 1979 continue; 1980 } 1981 if (rounddown2(pa ^ (pa + ptoa(npages) - 1), 1982 boundary) != 0) { 1983 m_inc = atop(roundup2(pa, boundary) - pa); 1984 continue; 1985 } 1986 } else 1987 KASSERT(m_run != NULL, ("m_run == NULL")); 1988 1989 /* 1990 * Avoid releasing and reacquiring the same page lock. 1991 */ 1992 new_mtx = vm_page_lockptr(m); 1993 if (m_mtx != new_mtx) { 1994 if (m_mtx != NULL) 1995 mtx_unlock(m_mtx); 1996 m_mtx = new_mtx; 1997 mtx_lock(m_mtx); 1998 } 1999 m_inc = 1; 2000 retry: 2001 if (m->wire_count != 0 || m->hold_count != 0) 2002 run_ext = 0; 2003 #if VM_NRESERVLEVEL > 0 2004 else if ((level = vm_reserv_level(m)) >= 0 && 2005 (options & VPSC_NORESERV) != 0) { 2006 run_ext = 0; 2007 /* Advance to the end of the reservation. */ 2008 pa = VM_PAGE_TO_PHYS(m); 2009 m_inc = atop(roundup2(pa + 1, vm_reserv_size(level)) - 2010 pa); 2011 } 2012 #endif 2013 else if ((object = m->object) != NULL) { 2014 /* 2015 * The page is considered eligible for relocation if 2016 * and only if it could be laundered or reclaimed by 2017 * the page daemon. 2018 */ 2019 if (!VM_OBJECT_TRYRLOCK(object)) { 2020 mtx_unlock(m_mtx); 2021 VM_OBJECT_RLOCK(object); 2022 mtx_lock(m_mtx); 2023 if (m->object != object) { 2024 /* 2025 * The page may have been freed. 2026 */ 2027 VM_OBJECT_RUNLOCK(object); 2028 goto retry; 2029 } else if (m->wire_count != 0 || 2030 m->hold_count != 0) { 2031 run_ext = 0; 2032 goto unlock; 2033 } 2034 } 2035 KASSERT((m->flags & PG_UNHOLDFREE) == 0, 2036 ("page %p is PG_UNHOLDFREE", m)); 2037 /* Don't care: PG_NODUMP, PG_ZERO. */ 2038 if (object->type != OBJT_DEFAULT && 2039 object->type != OBJT_SWAP && 2040 object->type != OBJT_VNODE) { 2041 run_ext = 0; 2042 #if VM_NRESERVLEVEL > 0 2043 } else if ((options & VPSC_NOSUPER) != 0 && 2044 (level = vm_reserv_level_iffullpop(m)) >= 0) { 2045 run_ext = 0; 2046 /* Advance to the end of the superpage. */ 2047 pa = VM_PAGE_TO_PHYS(m); 2048 m_inc = atop(roundup2(pa + 1, 2049 vm_reserv_size(level)) - pa); 2050 #endif 2051 } else if (object->memattr == VM_MEMATTR_DEFAULT && 2052 m->queue != PQ_NONE && !vm_page_busied(m)) { 2053 /* 2054 * The page is allocated but eligible for 2055 * relocation. Extend the current run by one 2056 * page. 2057 */ 2058 KASSERT(pmap_page_get_memattr(m) == 2059 VM_MEMATTR_DEFAULT, 2060 ("page %p has an unexpected memattr", m)); 2061 KASSERT((m->oflags & (VPO_SWAPINPROG | 2062 VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0, 2063 ("page %p has unexpected oflags", m)); 2064 /* Don't care: VPO_NOSYNC. */ 2065 run_ext = 1; 2066 } else 2067 run_ext = 0; 2068 unlock: 2069 VM_OBJECT_RUNLOCK(object); 2070 #if VM_NRESERVLEVEL > 0 2071 } else if (level >= 0) { 2072 /* 2073 * The page is reserved but not yet allocated. In 2074 * other words, it is still free. Extend the current 2075 * run by one page. 2076 */ 2077 run_ext = 1; 2078 #endif 2079 } else if ((order = m->order) < VM_NFREEORDER) { 2080 /* 2081 * The page is enqueued in the physical memory 2082 * allocator's free page queues. Moreover, it is the 2083 * first page in a power-of-two-sized run of 2084 * contiguous free pages. Add these pages to the end 2085 * of the current run, and jump ahead. 2086 */ 2087 run_ext = 1 << order; 2088 m_inc = 1 << order; 2089 } else { 2090 /* 2091 * Skip the page for one of the following reasons: (1) 2092 * It is enqueued in the physical memory allocator's 2093 * free page queues. However, it is not the first 2094 * page in a run of contiguous free pages. (This case 2095 * rarely occurs because the scan is performed in 2096 * ascending order.) (2) It is not reserved, and it is 2097 * transitioning from free to allocated. (Conversely, 2098 * the transition from allocated to free for managed 2099 * pages is blocked by the page lock.) (3) It is 2100 * allocated but not contained by an object and not 2101 * wired, e.g., allocated by Xen's balloon driver. 2102 */ 2103 run_ext = 0; 2104 } 2105 2106 /* 2107 * Extend or reset the current run of pages. 2108 */ 2109 if (run_ext > 0) { 2110 if (run_len == 0) 2111 m_run = m; 2112 run_len += run_ext; 2113 } else { 2114 if (run_len > 0) { 2115 m_run = NULL; 2116 run_len = 0; 2117 } 2118 } 2119 } 2120 if (m_mtx != NULL) 2121 mtx_unlock(m_mtx); 2122 if (run_len >= npages) 2123 return (m_run); 2124 return (NULL); 2125 } 2126 2127 /* 2128 * vm_page_reclaim_run: 2129 * 2130 * Try to relocate each of the allocated virtual pages within the 2131 * specified run of physical pages to a new physical address. Free the 2132 * physical pages underlying the relocated virtual pages. A virtual page 2133 * is relocatable if and only if it could be laundered or reclaimed by 2134 * the page daemon. Whenever possible, a virtual page is relocated to a 2135 * physical address above "high". 2136 * 2137 * Returns 0 if every physical page within the run was already free or 2138 * just freed by a successful relocation. Otherwise, returns a non-zero 2139 * value indicating why the last attempt to relocate a virtual page was 2140 * unsuccessful. 2141 * 2142 * "req_class" must be an allocation class. 2143 */ 2144 static int 2145 vm_page_reclaim_run(int req_class, u_long npages, vm_page_t m_run, 2146 vm_paddr_t high) 2147 { 2148 struct mtx *m_mtx, *new_mtx; 2149 struct spglist free; 2150 vm_object_t object; 2151 vm_paddr_t pa; 2152 vm_page_t m, m_end, m_new; 2153 int error, order, req; 2154 2155 KASSERT((req_class & VM_ALLOC_CLASS_MASK) == req_class, 2156 ("req_class is not an allocation class")); 2157 SLIST_INIT(&free); 2158 error = 0; 2159 m = m_run; 2160 m_end = m_run + npages; 2161 m_mtx = NULL; 2162 for (; error == 0 && m < m_end; m++) { 2163 KASSERT((m->flags & (PG_FICTITIOUS | PG_MARKER)) == 0, 2164 ("page %p is PG_FICTITIOUS or PG_MARKER", m)); 2165 2166 /* 2167 * Avoid releasing and reacquiring the same page lock. 2168 */ 2169 new_mtx = vm_page_lockptr(m); 2170 if (m_mtx != new_mtx) { 2171 if (m_mtx != NULL) 2172 mtx_unlock(m_mtx); 2173 m_mtx = new_mtx; 2174 mtx_lock(m_mtx); 2175 } 2176 retry: 2177 if (m->wire_count != 0 || m->hold_count != 0) 2178 error = EBUSY; 2179 else if ((object = m->object) != NULL) { 2180 /* 2181 * The page is relocated if and only if it could be 2182 * laundered or reclaimed by the page daemon. 2183 */ 2184 if (!VM_OBJECT_TRYWLOCK(object)) { 2185 mtx_unlock(m_mtx); 2186 VM_OBJECT_WLOCK(object); 2187 mtx_lock(m_mtx); 2188 if (m->object != object) { 2189 /* 2190 * The page may have been freed. 2191 */ 2192 VM_OBJECT_WUNLOCK(object); 2193 goto retry; 2194 } else if (m->wire_count != 0 || 2195 m->hold_count != 0) { 2196 error = EBUSY; 2197 goto unlock; 2198 } 2199 } 2200 KASSERT((m->flags & PG_UNHOLDFREE) == 0, 2201 ("page %p is PG_UNHOLDFREE", m)); 2202 /* Don't care: PG_NODUMP, PG_ZERO. */ 2203 if (object->type != OBJT_DEFAULT && 2204 object->type != OBJT_SWAP && 2205 object->type != OBJT_VNODE) 2206 error = EINVAL; 2207 else if (object->memattr != VM_MEMATTR_DEFAULT) 2208 error = EINVAL; 2209 else if (m->queue != PQ_NONE && !vm_page_busied(m)) { 2210 KASSERT(pmap_page_get_memattr(m) == 2211 VM_MEMATTR_DEFAULT, 2212 ("page %p has an unexpected memattr", m)); 2213 KASSERT((m->oflags & (VPO_SWAPINPROG | 2214 VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0, 2215 ("page %p has unexpected oflags", m)); 2216 /* Don't care: VPO_NOSYNC. */ 2217 if (m->valid != 0) { 2218 /* 2219 * First, try to allocate a new page 2220 * that is above "high". Failing 2221 * that, try to allocate a new page 2222 * that is below "m_run". Allocate 2223 * the new page between the end of 2224 * "m_run" and "high" only as a last 2225 * resort. 2226 */ 2227 req = req_class | VM_ALLOC_NOOBJ; 2228 if ((m->flags & PG_NODUMP) != 0) 2229 req |= VM_ALLOC_NODUMP; 2230 if (trunc_page(high) != 2231 ~(vm_paddr_t)PAGE_MASK) { 2232 m_new = vm_page_alloc_contig( 2233 NULL, 0, req, 1, 2234 round_page(high), 2235 ~(vm_paddr_t)0, 2236 PAGE_SIZE, 0, 2237 VM_MEMATTR_DEFAULT); 2238 } else 2239 m_new = NULL; 2240 if (m_new == NULL) { 2241 pa = VM_PAGE_TO_PHYS(m_run); 2242 m_new = vm_page_alloc_contig( 2243 NULL, 0, req, 1, 2244 0, pa - 1, PAGE_SIZE, 0, 2245 VM_MEMATTR_DEFAULT); 2246 } 2247 if (m_new == NULL) { 2248 pa += ptoa(npages); 2249 m_new = vm_page_alloc_contig( 2250 NULL, 0, req, 1, 2251 pa, high, PAGE_SIZE, 0, 2252 VM_MEMATTR_DEFAULT); 2253 } 2254 if (m_new == NULL) { 2255 error = ENOMEM; 2256 goto unlock; 2257 } 2258 KASSERT(m_new->wire_count == 0, 2259 ("page %p is wired", m)); 2260 2261 /* 2262 * Replace "m" with the new page. For 2263 * vm_page_replace(), "m" must be busy 2264 * and dequeued. Finally, change "m" 2265 * as if vm_page_free() was called. 2266 */ 2267 if (object->ref_count != 0) 2268 pmap_remove_all(m); 2269 m_new->aflags = m->aflags; 2270 KASSERT(m_new->oflags == VPO_UNMANAGED, 2271 ("page %p is managed", m)); 2272 m_new->oflags = m->oflags & VPO_NOSYNC; 2273 pmap_copy_page(m, m_new); 2274 m_new->valid = m->valid; 2275 m_new->dirty = m->dirty; 2276 m->flags &= ~PG_ZERO; 2277 vm_page_xbusy(m); 2278 vm_page_remque(m); 2279 vm_page_replace_checked(m_new, object, 2280 m->pindex, m); 2281 m->valid = 0; 2282 vm_page_undirty(m); 2283 2284 /* 2285 * The new page must be deactivated 2286 * before the object is unlocked. 2287 */ 2288 new_mtx = vm_page_lockptr(m_new); 2289 if (m_mtx != new_mtx) { 2290 mtx_unlock(m_mtx); 2291 m_mtx = new_mtx; 2292 mtx_lock(m_mtx); 2293 } 2294 vm_page_deactivate(m_new); 2295 } else { 2296 m->flags &= ~PG_ZERO; 2297 vm_page_remque(m); 2298 vm_page_remove(m); 2299 KASSERT(m->dirty == 0, 2300 ("page %p is dirty", m)); 2301 } 2302 SLIST_INSERT_HEAD(&free, m, plinks.s.ss); 2303 } else 2304 error = EBUSY; 2305 unlock: 2306 VM_OBJECT_WUNLOCK(object); 2307 } else { 2308 mtx_lock(&vm_page_queue_free_mtx); 2309 order = m->order; 2310 if (order < VM_NFREEORDER) { 2311 /* 2312 * The page is enqueued in the physical memory 2313 * allocator's free page queues. Moreover, it 2314 * is the first page in a power-of-two-sized 2315 * run of contiguous free pages. Jump ahead 2316 * to the last page within that run, and 2317 * continue from there. 2318 */ 2319 m += (1 << order) - 1; 2320 } 2321 #if VM_NRESERVLEVEL > 0 2322 else if (vm_reserv_is_page_free(m)) 2323 order = 0; 2324 #endif 2325 mtx_unlock(&vm_page_queue_free_mtx); 2326 if (order == VM_NFREEORDER) 2327 error = EINVAL; 2328 } 2329 } 2330 if (m_mtx != NULL) 2331 mtx_unlock(m_mtx); 2332 if ((m = SLIST_FIRST(&free)) != NULL) { 2333 mtx_lock(&vm_page_queue_free_mtx); 2334 do { 2335 SLIST_REMOVE_HEAD(&free, plinks.s.ss); 2336 vm_phys_freecnt_adj(m, 1); 2337 #if VM_NRESERVLEVEL > 0 2338 if (!vm_reserv_free_page(m)) 2339 #else 2340 if (true) 2341 #endif 2342 vm_phys_free_pages(m, 0); 2343 } while ((m = SLIST_FIRST(&free)) != NULL); 2344 vm_page_free_wakeup(); 2345 mtx_unlock(&vm_page_queue_free_mtx); 2346 } 2347 return (error); 2348 } 2349 2350 #define NRUNS 16 2351 2352 CTASSERT(powerof2(NRUNS)); 2353 2354 #define RUN_INDEX(count) ((count) & (NRUNS - 1)) 2355 2356 #define MIN_RECLAIM 8 2357 2358 /* 2359 * vm_page_reclaim_contig: 2360 * 2361 * Reclaim allocated, contiguous physical memory satisfying the specified 2362 * conditions by relocating the virtual pages using that physical memory. 2363 * Returns true if reclamation is successful and false otherwise. Since 2364 * relocation requires the allocation of physical pages, reclamation may 2365 * fail due to a shortage of free pages. When reclamation fails, callers 2366 * are expected to perform VM_WAIT before retrying a failed allocation 2367 * operation, e.g., vm_page_alloc_contig(). 2368 * 2369 * The caller must always specify an allocation class through "req". 2370 * 2371 * allocation classes: 2372 * VM_ALLOC_NORMAL normal process request 2373 * VM_ALLOC_SYSTEM system *really* needs a page 2374 * VM_ALLOC_INTERRUPT interrupt time request 2375 * 2376 * The optional allocation flags are ignored. 2377 * 2378 * "npages" must be greater than zero. Both "alignment" and "boundary" 2379 * must be a power of two. 2380 */ 2381 bool 2382 vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high, 2383 u_long alignment, vm_paddr_t boundary) 2384 { 2385 vm_paddr_t curr_low; 2386 vm_page_t m_run, m_runs[NRUNS]; 2387 u_long count, reclaimed; 2388 int error, i, options, req_class; 2389 2390 KASSERT(npages > 0, ("npages is 0")); 2391 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 2392 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 2393 req_class = req & VM_ALLOC_CLASS_MASK; 2394 2395 /* 2396 * The page daemon is allowed to dig deeper into the free page list. 2397 */ 2398 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) 2399 req_class = VM_ALLOC_SYSTEM; 2400 2401 /* 2402 * Return if the number of free pages cannot satisfy the requested 2403 * allocation. 2404 */ 2405 count = vm_cnt.v_free_count; 2406 if (count < npages + vm_cnt.v_free_reserved || (count < npages + 2407 vm_cnt.v_interrupt_free_min && req_class == VM_ALLOC_SYSTEM) || 2408 (count < npages && req_class == VM_ALLOC_INTERRUPT)) 2409 return (false); 2410 2411 /* 2412 * Scan up to three times, relaxing the restrictions ("options") on 2413 * the reclamation of reservations and superpages each time. 2414 */ 2415 for (options = VPSC_NORESERV;;) { 2416 /* 2417 * Find the highest runs that satisfy the given constraints 2418 * and restrictions, and record them in "m_runs". 2419 */ 2420 curr_low = low; 2421 count = 0; 2422 for (;;) { 2423 m_run = vm_phys_scan_contig(npages, curr_low, high, 2424 alignment, boundary, options); 2425 if (m_run == NULL) 2426 break; 2427 curr_low = VM_PAGE_TO_PHYS(m_run) + ptoa(npages); 2428 m_runs[RUN_INDEX(count)] = m_run; 2429 count++; 2430 } 2431 2432 /* 2433 * Reclaim the highest runs in LIFO (descending) order until 2434 * the number of reclaimed pages, "reclaimed", is at least 2435 * MIN_RECLAIM. Reset "reclaimed" each time because each 2436 * reclamation is idempotent, and runs will (likely) recur 2437 * from one scan to the next as restrictions are relaxed. 2438 */ 2439 reclaimed = 0; 2440 for (i = 0; count > 0 && i < NRUNS; i++) { 2441 count--; 2442 m_run = m_runs[RUN_INDEX(count)]; 2443 error = vm_page_reclaim_run(req_class, npages, m_run, 2444 high); 2445 if (error == 0) { 2446 reclaimed += npages; 2447 if (reclaimed >= MIN_RECLAIM) 2448 return (true); 2449 } 2450 } 2451 2452 /* 2453 * Either relax the restrictions on the next scan or return if 2454 * the last scan had no restrictions. 2455 */ 2456 if (options == VPSC_NORESERV) 2457 options = VPSC_NOSUPER; 2458 else if (options == VPSC_NOSUPER) 2459 options = VPSC_ANY; 2460 else if (options == VPSC_ANY) 2461 return (reclaimed != 0); 2462 } 2463 } 2464 2465 /* 2466 * vm_wait: (also see VM_WAIT macro) 2467 * 2468 * Sleep until free pages are available for allocation. 2469 * - Called in various places before memory allocations. 2470 */ 2471 void 2472 vm_wait(void) 2473 { 2474 2475 mtx_lock(&vm_page_queue_free_mtx); 2476 if (curproc == pageproc) { 2477 vm_pageout_pages_needed = 1; 2478 msleep(&vm_pageout_pages_needed, &vm_page_queue_free_mtx, 2479 PDROP | PSWP, "VMWait", 0); 2480 } else { 2481 if (__predict_false(pageproc == NULL)) 2482 panic("vm_wait in early boot"); 2483 if (!vm_pageout_wanted) { 2484 vm_pageout_wanted = true; 2485 wakeup(&vm_pageout_wanted); 2486 } 2487 vm_pages_needed = true; 2488 msleep(&vm_cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM, 2489 "vmwait", 0); 2490 } 2491 } 2492 2493 /* 2494 * vm_waitpfault: (also see VM_WAITPFAULT macro) 2495 * 2496 * Sleep until free pages are available for allocation. 2497 * - Called only in vm_fault so that processes page faulting 2498 * can be easily tracked. 2499 * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing 2500 * processes will be able to grab memory first. Do not change 2501 * this balance without careful testing first. 2502 */ 2503 void 2504 vm_waitpfault(void) 2505 { 2506 2507 mtx_lock(&vm_page_queue_free_mtx); 2508 if (!vm_pageout_wanted) { 2509 vm_pageout_wanted = true; 2510 wakeup(&vm_pageout_wanted); 2511 } 2512 vm_pages_needed = true; 2513 msleep(&vm_cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER, 2514 "pfault", 0); 2515 } 2516 2517 struct vm_pagequeue * 2518 vm_page_pagequeue(vm_page_t m) 2519 { 2520 2521 if (vm_page_in_laundry(m)) 2522 return (&vm_dom[0].vmd_pagequeues[m->queue]); 2523 else 2524 return (&vm_phys_domain(m)->vmd_pagequeues[m->queue]); 2525 } 2526 2527 /* 2528 * vm_page_dequeue: 2529 * 2530 * Remove the given page from its current page queue. 2531 * 2532 * The page must be locked. 2533 */ 2534 void 2535 vm_page_dequeue(vm_page_t m) 2536 { 2537 struct vm_pagequeue *pq; 2538 2539 vm_page_assert_locked(m); 2540 KASSERT(m->queue < PQ_COUNT, ("vm_page_dequeue: page %p is not queued", 2541 m)); 2542 pq = vm_page_pagequeue(m); 2543 vm_pagequeue_lock(pq); 2544 m->queue = PQ_NONE; 2545 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 2546 vm_pagequeue_cnt_dec(pq); 2547 vm_pagequeue_unlock(pq); 2548 } 2549 2550 /* 2551 * vm_page_dequeue_locked: 2552 * 2553 * Remove the given page from its current page queue. 2554 * 2555 * The page and page queue must be locked. 2556 */ 2557 void 2558 vm_page_dequeue_locked(vm_page_t m) 2559 { 2560 struct vm_pagequeue *pq; 2561 2562 vm_page_lock_assert(m, MA_OWNED); 2563 pq = vm_page_pagequeue(m); 2564 vm_pagequeue_assert_locked(pq); 2565 m->queue = PQ_NONE; 2566 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 2567 vm_pagequeue_cnt_dec(pq); 2568 } 2569 2570 /* 2571 * vm_page_enqueue: 2572 * 2573 * Add the given page to the specified page queue. 2574 * 2575 * The page must be locked. 2576 */ 2577 static void 2578 vm_page_enqueue(uint8_t queue, vm_page_t m) 2579 { 2580 struct vm_pagequeue *pq; 2581 2582 vm_page_lock_assert(m, MA_OWNED); 2583 KASSERT(queue < PQ_COUNT, 2584 ("vm_page_enqueue: invalid queue %u request for page %p", 2585 queue, m)); 2586 if (queue == PQ_LAUNDRY || queue == PQ_UNSWAPPABLE) 2587 pq = &vm_dom[0].vmd_pagequeues[queue]; 2588 else 2589 pq = &vm_phys_domain(m)->vmd_pagequeues[queue]; 2590 vm_pagequeue_lock(pq); 2591 m->queue = queue; 2592 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); 2593 vm_pagequeue_cnt_inc(pq); 2594 vm_pagequeue_unlock(pq); 2595 } 2596 2597 /* 2598 * vm_page_requeue: 2599 * 2600 * Move the given page to the tail of its current page queue. 2601 * 2602 * The page must be locked. 2603 */ 2604 void 2605 vm_page_requeue(vm_page_t m) 2606 { 2607 struct vm_pagequeue *pq; 2608 2609 vm_page_lock_assert(m, MA_OWNED); 2610 KASSERT(m->queue != PQ_NONE, 2611 ("vm_page_requeue: page %p is not queued", m)); 2612 pq = vm_page_pagequeue(m); 2613 vm_pagequeue_lock(pq); 2614 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 2615 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); 2616 vm_pagequeue_unlock(pq); 2617 } 2618 2619 /* 2620 * vm_page_requeue_locked: 2621 * 2622 * Move the given page to the tail of its current page queue. 2623 * 2624 * The page queue must be locked. 2625 */ 2626 void 2627 vm_page_requeue_locked(vm_page_t m) 2628 { 2629 struct vm_pagequeue *pq; 2630 2631 KASSERT(m->queue != PQ_NONE, 2632 ("vm_page_requeue_locked: page %p is not queued", m)); 2633 pq = vm_page_pagequeue(m); 2634 vm_pagequeue_assert_locked(pq); 2635 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 2636 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); 2637 } 2638 2639 /* 2640 * vm_page_activate: 2641 * 2642 * Put the specified page on the active list (if appropriate). 2643 * Ensure that act_count is at least ACT_INIT but do not otherwise 2644 * mess with it. 2645 * 2646 * The page must be locked. 2647 */ 2648 void 2649 vm_page_activate(vm_page_t m) 2650 { 2651 int queue; 2652 2653 vm_page_lock_assert(m, MA_OWNED); 2654 if ((queue = m->queue) != PQ_ACTIVE) { 2655 if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) { 2656 if (m->act_count < ACT_INIT) 2657 m->act_count = ACT_INIT; 2658 if (queue != PQ_NONE) 2659 vm_page_dequeue(m); 2660 vm_page_enqueue(PQ_ACTIVE, m); 2661 } else 2662 KASSERT(queue == PQ_NONE, 2663 ("vm_page_activate: wired page %p is queued", m)); 2664 } else { 2665 if (m->act_count < ACT_INIT) 2666 m->act_count = ACT_INIT; 2667 } 2668 } 2669 2670 /* 2671 * vm_page_free_wakeup: 2672 * 2673 * Helper routine for vm_page_free_toq(). This routine is called 2674 * when a page is added to the free queues. 2675 * 2676 * The page queues must be locked. 2677 */ 2678 static inline void 2679 vm_page_free_wakeup(void) 2680 { 2681 2682 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 2683 /* 2684 * if pageout daemon needs pages, then tell it that there are 2685 * some free. 2686 */ 2687 if (vm_pageout_pages_needed && 2688 vm_cnt.v_free_count >= vm_cnt.v_pageout_free_min) { 2689 wakeup(&vm_pageout_pages_needed); 2690 vm_pageout_pages_needed = 0; 2691 } 2692 /* 2693 * wakeup processes that are waiting on memory if we hit a 2694 * high water mark. And wakeup scheduler process if we have 2695 * lots of memory. this process will swapin processes. 2696 */ 2697 if (vm_pages_needed && !vm_page_count_min()) { 2698 vm_pages_needed = false; 2699 wakeup(&vm_cnt.v_free_count); 2700 } 2701 } 2702 2703 /* 2704 * vm_page_free_toq: 2705 * 2706 * Returns the given page to the free list, 2707 * disassociating it with any VM object. 2708 * 2709 * The object must be locked. The page must be locked if it is managed. 2710 */ 2711 void 2712 vm_page_free_toq(vm_page_t m) 2713 { 2714 2715 if ((m->oflags & VPO_UNMANAGED) == 0) { 2716 vm_page_lock_assert(m, MA_OWNED); 2717 KASSERT(!pmap_page_is_mapped(m), 2718 ("vm_page_free_toq: freeing mapped page %p", m)); 2719 } else 2720 KASSERT(m->queue == PQ_NONE, 2721 ("vm_page_free_toq: unmanaged page %p is queued", m)); 2722 PCPU_INC(cnt.v_tfree); 2723 2724 if (vm_page_sbusied(m)) 2725 panic("vm_page_free: freeing busy page %p", m); 2726 2727 /* 2728 * Unqueue, then remove page. Note that we cannot destroy 2729 * the page here because we do not want to call the pager's 2730 * callback routine until after we've put the page on the 2731 * appropriate free queue. 2732 */ 2733 vm_page_remque(m); 2734 vm_page_remove(m); 2735 2736 /* 2737 * If fictitious remove object association and 2738 * return, otherwise delay object association removal. 2739 */ 2740 if ((m->flags & PG_FICTITIOUS) != 0) { 2741 return; 2742 } 2743 2744 m->valid = 0; 2745 vm_page_undirty(m); 2746 2747 if (m->wire_count != 0) 2748 panic("vm_page_free: freeing wired page %p", m); 2749 if (m->hold_count != 0) { 2750 m->flags &= ~PG_ZERO; 2751 KASSERT((m->flags & PG_UNHOLDFREE) == 0, 2752 ("vm_page_free: freeing PG_UNHOLDFREE page %p", m)); 2753 m->flags |= PG_UNHOLDFREE; 2754 } else { 2755 /* 2756 * Restore the default memory attribute to the page. 2757 */ 2758 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) 2759 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); 2760 2761 /* 2762 * Insert the page into the physical memory allocator's free 2763 * page queues. 2764 */ 2765 mtx_lock(&vm_page_queue_free_mtx); 2766 vm_phys_freecnt_adj(m, 1); 2767 #if VM_NRESERVLEVEL > 0 2768 if (!vm_reserv_free_page(m)) 2769 #else 2770 if (TRUE) 2771 #endif 2772 vm_phys_free_pages(m, 0); 2773 vm_page_free_wakeup(); 2774 mtx_unlock(&vm_page_queue_free_mtx); 2775 } 2776 } 2777 2778 /* 2779 * vm_page_wire: 2780 * 2781 * Mark this page as wired down by yet 2782 * another map, removing it from paging queues 2783 * as necessary. 2784 * 2785 * If the page is fictitious, then its wire count must remain one. 2786 * 2787 * The page must be locked. 2788 */ 2789 void 2790 vm_page_wire(vm_page_t m) 2791 { 2792 2793 /* 2794 * Only bump the wire statistics if the page is not already wired, 2795 * and only unqueue the page if it is on some queue (if it is unmanaged 2796 * it is already off the queues). 2797 */ 2798 vm_page_lock_assert(m, MA_OWNED); 2799 if ((m->flags & PG_FICTITIOUS) != 0) { 2800 KASSERT(m->wire_count == 1, 2801 ("vm_page_wire: fictitious page %p's wire count isn't one", 2802 m)); 2803 return; 2804 } 2805 if (m->wire_count == 0) { 2806 KASSERT((m->oflags & VPO_UNMANAGED) == 0 || 2807 m->queue == PQ_NONE, 2808 ("vm_page_wire: unmanaged page %p is queued", m)); 2809 vm_page_remque(m); 2810 atomic_add_int(&vm_cnt.v_wire_count, 1); 2811 } 2812 m->wire_count++; 2813 KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m)); 2814 } 2815 2816 /* 2817 * vm_page_unwire: 2818 * 2819 * Release one wiring of the specified page, potentially allowing it to be 2820 * paged out. Returns TRUE if the number of wirings transitions to zero and 2821 * FALSE otherwise. 2822 * 2823 * Only managed pages belonging to an object can be paged out. If the number 2824 * of wirings transitions to zero and the page is eligible for page out, then 2825 * the page is added to the specified paging queue (unless PQ_NONE is 2826 * specified). 2827 * 2828 * If a page is fictitious, then its wire count must always be one. 2829 * 2830 * A managed page must be locked. 2831 */ 2832 boolean_t 2833 vm_page_unwire(vm_page_t m, uint8_t queue) 2834 { 2835 2836 KASSERT(queue < PQ_COUNT || queue == PQ_NONE, 2837 ("vm_page_unwire: invalid queue %u request for page %p", 2838 queue, m)); 2839 if ((m->oflags & VPO_UNMANAGED) == 0) 2840 vm_page_assert_locked(m); 2841 if ((m->flags & PG_FICTITIOUS) != 0) { 2842 KASSERT(m->wire_count == 1, 2843 ("vm_page_unwire: fictitious page %p's wire count isn't one", m)); 2844 return (FALSE); 2845 } 2846 if (m->wire_count > 0) { 2847 m->wire_count--; 2848 if (m->wire_count == 0) { 2849 atomic_subtract_int(&vm_cnt.v_wire_count, 1); 2850 if ((m->oflags & VPO_UNMANAGED) == 0 && 2851 m->object != NULL && queue != PQ_NONE) 2852 vm_page_enqueue(queue, m); 2853 return (TRUE); 2854 } else 2855 return (FALSE); 2856 } else 2857 panic("vm_page_unwire: page %p's wire count is zero", m); 2858 } 2859 2860 /* 2861 * Move the specified page to the inactive queue. 2862 * 2863 * Normally, "noreuse" is FALSE, resulting in LRU ordering of the inactive 2864 * queue. However, setting "noreuse" to TRUE will accelerate the specified 2865 * page's reclamation, but it will not unmap the page from any address space. 2866 * This is implemented by inserting the page near the head of the inactive 2867 * queue, using a marker page to guide FIFO insertion ordering. 2868 * 2869 * The page must be locked. 2870 */ 2871 static inline void 2872 _vm_page_deactivate(vm_page_t m, boolean_t noreuse) 2873 { 2874 struct vm_pagequeue *pq; 2875 int queue; 2876 2877 vm_page_assert_locked(m); 2878 2879 /* 2880 * Ignore if the page is already inactive, unless it is unlikely to be 2881 * reactivated. 2882 */ 2883 if ((queue = m->queue) == PQ_INACTIVE && !noreuse) 2884 return; 2885 if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) { 2886 pq = &vm_phys_domain(m)->vmd_pagequeues[PQ_INACTIVE]; 2887 /* Avoid multiple acquisitions of the inactive queue lock. */ 2888 if (queue == PQ_INACTIVE) { 2889 vm_pagequeue_lock(pq); 2890 vm_page_dequeue_locked(m); 2891 } else { 2892 if (queue != PQ_NONE) 2893 vm_page_dequeue(m); 2894 vm_pagequeue_lock(pq); 2895 } 2896 m->queue = PQ_INACTIVE; 2897 if (noreuse) 2898 TAILQ_INSERT_BEFORE(&vm_phys_domain(m)->vmd_inacthead, 2899 m, plinks.q); 2900 else 2901 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); 2902 vm_pagequeue_cnt_inc(pq); 2903 vm_pagequeue_unlock(pq); 2904 } 2905 } 2906 2907 /* 2908 * Move the specified page to the inactive queue. 2909 * 2910 * The page must be locked. 2911 */ 2912 void 2913 vm_page_deactivate(vm_page_t m) 2914 { 2915 2916 _vm_page_deactivate(m, FALSE); 2917 } 2918 2919 /* 2920 * Move the specified page to the inactive queue with the expectation 2921 * that it is unlikely to be reused. 2922 * 2923 * The page must be locked. 2924 */ 2925 void 2926 vm_page_deactivate_noreuse(vm_page_t m) 2927 { 2928 2929 _vm_page_deactivate(m, TRUE); 2930 } 2931 2932 /* 2933 * vm_page_launder 2934 * 2935 * Put a page in the laundry. 2936 */ 2937 void 2938 vm_page_launder(vm_page_t m) 2939 { 2940 int queue; 2941 2942 vm_page_assert_locked(m); 2943 if ((queue = m->queue) != PQ_LAUNDRY) { 2944 if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) { 2945 if (queue != PQ_NONE) 2946 vm_page_dequeue(m); 2947 vm_page_enqueue(PQ_LAUNDRY, m); 2948 } else 2949 KASSERT(queue == PQ_NONE, 2950 ("wired page %p is queued", m)); 2951 } 2952 } 2953 2954 /* 2955 * vm_page_unswappable 2956 * 2957 * Put a page in the PQ_UNSWAPPABLE holding queue. 2958 */ 2959 void 2960 vm_page_unswappable(vm_page_t m) 2961 { 2962 2963 vm_page_assert_locked(m); 2964 KASSERT(m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0, 2965 ("page %p already unswappable", m)); 2966 if (m->queue != PQ_NONE) 2967 vm_page_dequeue(m); 2968 vm_page_enqueue(PQ_UNSWAPPABLE, m); 2969 } 2970 2971 /* 2972 * vm_page_try_to_free() 2973 * 2974 * Attempt to free the page. If we cannot free it, we do nothing. 2975 * 1 is returned on success, 0 on failure. 2976 */ 2977 int 2978 vm_page_try_to_free(vm_page_t m) 2979 { 2980 2981 vm_page_lock_assert(m, MA_OWNED); 2982 if (m->object != NULL) 2983 VM_OBJECT_ASSERT_WLOCKED(m->object); 2984 if (m->dirty || m->hold_count || m->wire_count || 2985 (m->oflags & VPO_UNMANAGED) != 0 || vm_page_busied(m)) 2986 return (0); 2987 pmap_remove_all(m); 2988 if (m->dirty) 2989 return (0); 2990 vm_page_free(m); 2991 return (1); 2992 } 2993 2994 /* 2995 * vm_page_advise 2996 * 2997 * Deactivate or do nothing, as appropriate. 2998 * 2999 * The object and page must be locked. 3000 */ 3001 void 3002 vm_page_advise(vm_page_t m, int advice) 3003 { 3004 3005 vm_page_assert_locked(m); 3006 VM_OBJECT_ASSERT_WLOCKED(m->object); 3007 if (advice == MADV_FREE) 3008 /* 3009 * Mark the page clean. This will allow the page to be freed 3010 * without first paging it out. MADV_FREE pages are often 3011 * quickly reused by malloc(3), so we do not do anything that 3012 * would result in a page fault on a later access. 3013 */ 3014 vm_page_undirty(m); 3015 else if (advice != MADV_DONTNEED) 3016 return; 3017 3018 /* 3019 * Clear any references to the page. Otherwise, the page daemon will 3020 * immediately reactivate the page. 3021 */ 3022 vm_page_aflag_clear(m, PGA_REFERENCED); 3023 3024 if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m)) 3025 vm_page_dirty(m); 3026 3027 /* 3028 * Place clean pages near the head of the inactive queue rather than 3029 * the tail, thus defeating the queue's LRU operation and ensuring that 3030 * the page will be reused quickly. Dirty pages not already in the 3031 * laundry are moved there. 3032 */ 3033 if (m->dirty == 0) 3034 vm_page_deactivate_noreuse(m); 3035 else 3036 vm_page_launder(m); 3037 } 3038 3039 /* 3040 * Grab a page, waiting until we are waken up due to the page 3041 * changing state. We keep on waiting, if the page continues 3042 * to be in the object. If the page doesn't exist, first allocate it 3043 * and then conditionally zero it. 3044 * 3045 * This routine may sleep. 3046 * 3047 * The object must be locked on entry. The lock will, however, be released 3048 * and reacquired if the routine sleeps. 3049 */ 3050 vm_page_t 3051 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) 3052 { 3053 vm_page_t m; 3054 int sleep; 3055 3056 VM_OBJECT_ASSERT_WLOCKED(object); 3057 KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || 3058 (allocflags & VM_ALLOC_IGN_SBUSY) != 0, 3059 ("vm_page_grab: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch")); 3060 retrylookup: 3061 if ((m = vm_page_lookup(object, pindex)) != NULL) { 3062 sleep = (allocflags & VM_ALLOC_IGN_SBUSY) != 0 ? 3063 vm_page_xbusied(m) : vm_page_busied(m); 3064 if (sleep) { 3065 if ((allocflags & VM_ALLOC_NOWAIT) != 0) 3066 return (NULL); 3067 /* 3068 * Reference the page before unlocking and 3069 * sleeping so that the page daemon is less 3070 * likely to reclaim it. 3071 */ 3072 vm_page_aflag_set(m, PGA_REFERENCED); 3073 vm_page_lock(m); 3074 VM_OBJECT_WUNLOCK(object); 3075 vm_page_busy_sleep(m, "pgrbwt", (allocflags & 3076 VM_ALLOC_IGN_SBUSY) != 0); 3077 VM_OBJECT_WLOCK(object); 3078 goto retrylookup; 3079 } else { 3080 if ((allocflags & VM_ALLOC_WIRED) != 0) { 3081 vm_page_lock(m); 3082 vm_page_wire(m); 3083 vm_page_unlock(m); 3084 } 3085 if ((allocflags & 3086 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0) 3087 vm_page_xbusy(m); 3088 if ((allocflags & VM_ALLOC_SBUSY) != 0) 3089 vm_page_sbusy(m); 3090 return (m); 3091 } 3092 } 3093 m = vm_page_alloc(object, pindex, allocflags); 3094 if (m == NULL) { 3095 if ((allocflags & VM_ALLOC_NOWAIT) != 0) 3096 return (NULL); 3097 VM_OBJECT_WUNLOCK(object); 3098 VM_WAIT; 3099 VM_OBJECT_WLOCK(object); 3100 goto retrylookup; 3101 } 3102 if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0) 3103 pmap_zero_page(m); 3104 return (m); 3105 } 3106 3107 /* 3108 * Mapping function for valid or dirty bits in a page. 3109 * 3110 * Inputs are required to range within a page. 3111 */ 3112 vm_page_bits_t 3113 vm_page_bits(int base, int size) 3114 { 3115 int first_bit; 3116 int last_bit; 3117 3118 KASSERT( 3119 base + size <= PAGE_SIZE, 3120 ("vm_page_bits: illegal base/size %d/%d", base, size) 3121 ); 3122 3123 if (size == 0) /* handle degenerate case */ 3124 return (0); 3125 3126 first_bit = base >> DEV_BSHIFT; 3127 last_bit = (base + size - 1) >> DEV_BSHIFT; 3128 3129 return (((vm_page_bits_t)2 << last_bit) - 3130 ((vm_page_bits_t)1 << first_bit)); 3131 } 3132 3133 /* 3134 * vm_page_set_valid_range: 3135 * 3136 * Sets portions of a page valid. The arguments are expected 3137 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 3138 * of any partial chunks touched by the range. The invalid portion of 3139 * such chunks will be zeroed. 3140 * 3141 * (base + size) must be less then or equal to PAGE_SIZE. 3142 */ 3143 void 3144 vm_page_set_valid_range(vm_page_t m, int base, int size) 3145 { 3146 int endoff, frag; 3147 3148 VM_OBJECT_ASSERT_WLOCKED(m->object); 3149 if (size == 0) /* handle degenerate case */ 3150 return; 3151 3152 /* 3153 * If the base is not DEV_BSIZE aligned and the valid 3154 * bit is clear, we have to zero out a portion of the 3155 * first block. 3156 */ 3157 if ((frag = rounddown2(base, DEV_BSIZE)) != base && 3158 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0) 3159 pmap_zero_page_area(m, frag, base - frag); 3160 3161 /* 3162 * If the ending offset is not DEV_BSIZE aligned and the 3163 * valid bit is clear, we have to zero out a portion of 3164 * the last block. 3165 */ 3166 endoff = base + size; 3167 if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff && 3168 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0) 3169 pmap_zero_page_area(m, endoff, 3170 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 3171 3172 /* 3173 * Assert that no previously invalid block that is now being validated 3174 * is already dirty. 3175 */ 3176 KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0, 3177 ("vm_page_set_valid_range: page %p is dirty", m)); 3178 3179 /* 3180 * Set valid bits inclusive of any overlap. 3181 */ 3182 m->valid |= vm_page_bits(base, size); 3183 } 3184 3185 /* 3186 * Clear the given bits from the specified page's dirty field. 3187 */ 3188 static __inline void 3189 vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits) 3190 { 3191 uintptr_t addr; 3192 #if PAGE_SIZE < 16384 3193 int shift; 3194 #endif 3195 3196 /* 3197 * If the object is locked and the page is neither exclusive busy nor 3198 * write mapped, then the page's dirty field cannot possibly be 3199 * set by a concurrent pmap operation. 3200 */ 3201 VM_OBJECT_ASSERT_WLOCKED(m->object); 3202 if (!vm_page_xbusied(m) && !pmap_page_is_write_mapped(m)) 3203 m->dirty &= ~pagebits; 3204 else { 3205 /* 3206 * The pmap layer can call vm_page_dirty() without 3207 * holding a distinguished lock. The combination of 3208 * the object's lock and an atomic operation suffice 3209 * to guarantee consistency of the page dirty field. 3210 * 3211 * For PAGE_SIZE == 32768 case, compiler already 3212 * properly aligns the dirty field, so no forcible 3213 * alignment is needed. Only require existence of 3214 * atomic_clear_64 when page size is 32768. 3215 */ 3216 addr = (uintptr_t)&m->dirty; 3217 #if PAGE_SIZE == 32768 3218 atomic_clear_64((uint64_t *)addr, pagebits); 3219 #elif PAGE_SIZE == 16384 3220 atomic_clear_32((uint32_t *)addr, pagebits); 3221 #else /* PAGE_SIZE <= 8192 */ 3222 /* 3223 * Use a trick to perform a 32-bit atomic on the 3224 * containing aligned word, to not depend on the existence 3225 * of atomic_clear_{8, 16}. 3226 */ 3227 shift = addr & (sizeof(uint32_t) - 1); 3228 #if BYTE_ORDER == BIG_ENDIAN 3229 shift = (sizeof(uint32_t) - sizeof(m->dirty) - shift) * NBBY; 3230 #else 3231 shift *= NBBY; 3232 #endif 3233 addr &= ~(sizeof(uint32_t) - 1); 3234 atomic_clear_32((uint32_t *)addr, pagebits << shift); 3235 #endif /* PAGE_SIZE */ 3236 } 3237 } 3238 3239 /* 3240 * vm_page_set_validclean: 3241 * 3242 * Sets portions of a page valid and clean. The arguments are expected 3243 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 3244 * of any partial chunks touched by the range. The invalid portion of 3245 * such chunks will be zero'd. 3246 * 3247 * (base + size) must be less then or equal to PAGE_SIZE. 3248 */ 3249 void 3250 vm_page_set_validclean(vm_page_t m, int base, int size) 3251 { 3252 vm_page_bits_t oldvalid, pagebits; 3253 int endoff, frag; 3254 3255 VM_OBJECT_ASSERT_WLOCKED(m->object); 3256 if (size == 0) /* handle degenerate case */ 3257 return; 3258 3259 /* 3260 * If the base is not DEV_BSIZE aligned and the valid 3261 * bit is clear, we have to zero out a portion of the 3262 * first block. 3263 */ 3264 if ((frag = rounddown2(base, DEV_BSIZE)) != base && 3265 (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0) 3266 pmap_zero_page_area(m, frag, base - frag); 3267 3268 /* 3269 * If the ending offset is not DEV_BSIZE aligned and the 3270 * valid bit is clear, we have to zero out a portion of 3271 * the last block. 3272 */ 3273 endoff = base + size; 3274 if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff && 3275 (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0) 3276 pmap_zero_page_area(m, endoff, 3277 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 3278 3279 /* 3280 * Set valid, clear dirty bits. If validating the entire 3281 * page we can safely clear the pmap modify bit. We also 3282 * use this opportunity to clear the VPO_NOSYNC flag. If a process 3283 * takes a write fault on a MAP_NOSYNC memory area the flag will 3284 * be set again. 3285 * 3286 * We set valid bits inclusive of any overlap, but we can only 3287 * clear dirty bits for DEV_BSIZE chunks that are fully within 3288 * the range. 3289 */ 3290 oldvalid = m->valid; 3291 pagebits = vm_page_bits(base, size); 3292 m->valid |= pagebits; 3293 #if 0 /* NOT YET */ 3294 if ((frag = base & (DEV_BSIZE - 1)) != 0) { 3295 frag = DEV_BSIZE - frag; 3296 base += frag; 3297 size -= frag; 3298 if (size < 0) 3299 size = 0; 3300 } 3301 pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1)); 3302 #endif 3303 if (base == 0 && size == PAGE_SIZE) { 3304 /* 3305 * The page can only be modified within the pmap if it is 3306 * mapped, and it can only be mapped if it was previously 3307 * fully valid. 3308 */ 3309 if (oldvalid == VM_PAGE_BITS_ALL) 3310 /* 3311 * Perform the pmap_clear_modify() first. Otherwise, 3312 * a concurrent pmap operation, such as 3313 * pmap_protect(), could clear a modification in the 3314 * pmap and set the dirty field on the page before 3315 * pmap_clear_modify() had begun and after the dirty 3316 * field was cleared here. 3317 */ 3318 pmap_clear_modify(m); 3319 m->dirty = 0; 3320 m->oflags &= ~VPO_NOSYNC; 3321 } else if (oldvalid != VM_PAGE_BITS_ALL) 3322 m->dirty &= ~pagebits; 3323 else 3324 vm_page_clear_dirty_mask(m, pagebits); 3325 } 3326 3327 void 3328 vm_page_clear_dirty(vm_page_t m, int base, int size) 3329 { 3330 3331 vm_page_clear_dirty_mask(m, vm_page_bits(base, size)); 3332 } 3333 3334 /* 3335 * vm_page_set_invalid: 3336 * 3337 * Invalidates DEV_BSIZE'd chunks within a page. Both the 3338 * valid and dirty bits for the effected areas are cleared. 3339 */ 3340 void 3341 vm_page_set_invalid(vm_page_t m, int base, int size) 3342 { 3343 vm_page_bits_t bits; 3344 vm_object_t object; 3345 3346 object = m->object; 3347 VM_OBJECT_ASSERT_WLOCKED(object); 3348 if (object->type == OBJT_VNODE && base == 0 && IDX_TO_OFF(m->pindex) + 3349 size >= object->un_pager.vnp.vnp_size) 3350 bits = VM_PAGE_BITS_ALL; 3351 else 3352 bits = vm_page_bits(base, size); 3353 if (object->ref_count != 0 && m->valid == VM_PAGE_BITS_ALL && 3354 bits != 0) 3355 pmap_remove_all(m); 3356 KASSERT((bits == 0 && m->valid == VM_PAGE_BITS_ALL) || 3357 !pmap_page_is_mapped(m), 3358 ("vm_page_set_invalid: page %p is mapped", m)); 3359 m->valid &= ~bits; 3360 m->dirty &= ~bits; 3361 } 3362 3363 /* 3364 * vm_page_zero_invalid() 3365 * 3366 * The kernel assumes that the invalid portions of a page contain 3367 * garbage, but such pages can be mapped into memory by user code. 3368 * When this occurs, we must zero out the non-valid portions of the 3369 * page so user code sees what it expects. 3370 * 3371 * Pages are most often semi-valid when the end of a file is mapped 3372 * into memory and the file's size is not page aligned. 3373 */ 3374 void 3375 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) 3376 { 3377 int b; 3378 int i; 3379 3380 VM_OBJECT_ASSERT_WLOCKED(m->object); 3381 /* 3382 * Scan the valid bits looking for invalid sections that 3383 * must be zeroed. Invalid sub-DEV_BSIZE'd areas ( where the 3384 * valid bit may be set ) have already been zeroed by 3385 * vm_page_set_validclean(). 3386 */ 3387 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { 3388 if (i == (PAGE_SIZE / DEV_BSIZE) || 3389 (m->valid & ((vm_page_bits_t)1 << i))) { 3390 if (i > b) { 3391 pmap_zero_page_area(m, 3392 b << DEV_BSHIFT, (i - b) << DEV_BSHIFT); 3393 } 3394 b = i + 1; 3395 } 3396 } 3397 3398 /* 3399 * setvalid is TRUE when we can safely set the zero'd areas 3400 * as being valid. We can do this if there are no cache consistancy 3401 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. 3402 */ 3403 if (setvalid) 3404 m->valid = VM_PAGE_BITS_ALL; 3405 } 3406 3407 /* 3408 * vm_page_is_valid: 3409 * 3410 * Is (partial) page valid? Note that the case where size == 0 3411 * will return FALSE in the degenerate case where the page is 3412 * entirely invalid, and TRUE otherwise. 3413 */ 3414 int 3415 vm_page_is_valid(vm_page_t m, int base, int size) 3416 { 3417 vm_page_bits_t bits; 3418 3419 VM_OBJECT_ASSERT_LOCKED(m->object); 3420 bits = vm_page_bits(base, size); 3421 return (m->valid != 0 && (m->valid & bits) == bits); 3422 } 3423 3424 /* 3425 * vm_page_ps_is_valid: 3426 * 3427 * Returns TRUE if the entire (super)page is valid and FALSE otherwise. 3428 */ 3429 boolean_t 3430 vm_page_ps_is_valid(vm_page_t m) 3431 { 3432 int i, npages; 3433 3434 VM_OBJECT_ASSERT_LOCKED(m->object); 3435 npages = atop(pagesizes[m->psind]); 3436 3437 /* 3438 * The physically contiguous pages that make up a superpage, i.e., a 3439 * page with a page size index ("psind") greater than zero, will 3440 * occupy adjacent entries in vm_page_array[]. 3441 */ 3442 for (i = 0; i < npages; i++) { 3443 if (m[i].valid != VM_PAGE_BITS_ALL) 3444 return (FALSE); 3445 } 3446 return (TRUE); 3447 } 3448 3449 /* 3450 * Set the page's dirty bits if the page is modified. 3451 */ 3452 void 3453 vm_page_test_dirty(vm_page_t m) 3454 { 3455 3456 VM_OBJECT_ASSERT_WLOCKED(m->object); 3457 if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m)) 3458 vm_page_dirty(m); 3459 } 3460 3461 void 3462 vm_page_lock_KBI(vm_page_t m, const char *file, int line) 3463 { 3464 3465 mtx_lock_flags_(vm_page_lockptr(m), 0, file, line); 3466 } 3467 3468 void 3469 vm_page_unlock_KBI(vm_page_t m, const char *file, int line) 3470 { 3471 3472 mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line); 3473 } 3474 3475 int 3476 vm_page_trylock_KBI(vm_page_t m, const char *file, int line) 3477 { 3478 3479 return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line)); 3480 } 3481 3482 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 3483 void 3484 vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line) 3485 { 3486 3487 vm_page_lock_assert_KBI(m, MA_OWNED, file, line); 3488 } 3489 3490 void 3491 vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line) 3492 { 3493 3494 mtx_assert_(vm_page_lockptr(m), a, file, line); 3495 } 3496 #endif 3497 3498 #ifdef INVARIANTS 3499 void 3500 vm_page_object_lock_assert(vm_page_t m) 3501 { 3502 3503 /* 3504 * Certain of the page's fields may only be modified by the 3505 * holder of the containing object's lock or the exclusive busy. 3506 * holder. Unfortunately, the holder of the write busy is 3507 * not recorded, and thus cannot be checked here. 3508 */ 3509 if (m->object != NULL && !vm_page_xbusied(m)) 3510 VM_OBJECT_ASSERT_WLOCKED(m->object); 3511 } 3512 3513 void 3514 vm_page_assert_pga_writeable(vm_page_t m, uint8_t bits) 3515 { 3516 3517 if ((bits & PGA_WRITEABLE) == 0) 3518 return; 3519 3520 /* 3521 * The PGA_WRITEABLE flag can only be set if the page is 3522 * managed, is exclusively busied or the object is locked. 3523 * Currently, this flag is only set by pmap_enter(). 3524 */ 3525 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3526 ("PGA_WRITEABLE on unmanaged page")); 3527 if (!vm_page_xbusied(m)) 3528 VM_OBJECT_ASSERT_LOCKED(m->object); 3529 } 3530 #endif 3531 3532 #include "opt_ddb.h" 3533 #ifdef DDB 3534 #include <sys/kernel.h> 3535 3536 #include <ddb/ddb.h> 3537 3538 DB_SHOW_COMMAND(page, vm_page_print_page_info) 3539 { 3540 3541 db_printf("vm_cnt.v_free_count: %d\n", vm_cnt.v_free_count); 3542 db_printf("vm_cnt.v_inactive_count: %d\n", vm_cnt.v_inactive_count); 3543 db_printf("vm_cnt.v_active_count: %d\n", vm_cnt.v_active_count); 3544 db_printf("vm_cnt.v_laundry_count: %d\n", vm_cnt.v_laundry_count); 3545 db_printf("vm_cnt.v_wire_count: %d\n", vm_cnt.v_wire_count); 3546 db_printf("vm_cnt.v_free_reserved: %d\n", vm_cnt.v_free_reserved); 3547 db_printf("vm_cnt.v_free_min: %d\n", vm_cnt.v_free_min); 3548 db_printf("vm_cnt.v_free_target: %d\n", vm_cnt.v_free_target); 3549 db_printf("vm_cnt.v_inactive_target: %d\n", vm_cnt.v_inactive_target); 3550 } 3551 3552 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) 3553 { 3554 int dom; 3555 3556 db_printf("pq_free %d\n", vm_cnt.v_free_count); 3557 for (dom = 0; dom < vm_ndomains; dom++) { 3558 db_printf( 3559 "dom %d page_cnt %d free %d pq_act %d pq_inact %d pq_laund %d pq_unsw %d\n", 3560 dom, 3561 vm_dom[dom].vmd_page_count, 3562 vm_dom[dom].vmd_free_count, 3563 vm_dom[dom].vmd_pagequeues[PQ_ACTIVE].pq_cnt, 3564 vm_dom[dom].vmd_pagequeues[PQ_INACTIVE].pq_cnt, 3565 vm_dom[dom].vmd_pagequeues[PQ_LAUNDRY].pq_cnt, 3566 vm_dom[dom].vmd_pagequeues[PQ_UNSWAPPABLE].pq_cnt); 3567 } 3568 } 3569 3570 DB_SHOW_COMMAND(pginfo, vm_page_print_pginfo) 3571 { 3572 vm_page_t m; 3573 boolean_t phys; 3574 3575 if (!have_addr) { 3576 db_printf("show pginfo addr\n"); 3577 return; 3578 } 3579 3580 phys = strchr(modif, 'p') != NULL; 3581 if (phys) 3582 m = PHYS_TO_VM_PAGE(addr); 3583 else 3584 m = (vm_page_t)addr; 3585 db_printf( 3586 "page %p obj %p pidx 0x%jx phys 0x%jx q %d hold %d wire %d\n" 3587 " af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n", 3588 m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr, 3589 m->queue, m->hold_count, m->wire_count, m->aflags, m->oflags, 3590 m->flags, m->act_count, m->busy_lock, m->valid, m->dirty); 3591 } 3592 #endif /* DDB */ 3593