1 /*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1998 Matthew Dillon. All Rights Reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * The Mach Operating System project at Carnegie-Mellon University. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 34 */ 35 36 /*- 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 */ 62 63 /* 64 * GENERAL RULES ON VM_PAGE MANIPULATION 65 * 66 * - a pageq mutex is required when adding or removing a page from a 67 * page queue (vm_page_queue[]), regardless of other mutexes or the 68 * busy state of a page. 69 * 70 * - The object mutex is held when inserting or removing 71 * pages from an object (vm_page_insert() or vm_page_remove()). 72 * 73 */ 74 75 /* 76 * Resident memory management module. 77 */ 78 79 #include <sys/cdefs.h> 80 __FBSDID("$FreeBSD$"); 81 82 #include "opt_vm.h" 83 84 #include <sys/param.h> 85 #include <sys/systm.h> 86 #include <sys/lock.h> 87 #include <sys/kernel.h> 88 #include <sys/limits.h> 89 #include <sys/malloc.h> 90 #include <sys/msgbuf.h> 91 #include <sys/mutex.h> 92 #include <sys/proc.h> 93 #include <sys/sysctl.h> 94 #include <sys/vmmeter.h> 95 #include <sys/vnode.h> 96 97 #include <vm/vm.h> 98 #include <vm/pmap.h> 99 #include <vm/vm_param.h> 100 #include <vm/vm_kern.h> 101 #include <vm/vm_object.h> 102 #include <vm/vm_page.h> 103 #include <vm/vm_pageout.h> 104 #include <vm/vm_pager.h> 105 #include <vm/vm_phys.h> 106 #include <vm/vm_reserv.h> 107 #include <vm/vm_extern.h> 108 #include <vm/uma.h> 109 #include <vm/uma_int.h> 110 111 #include <machine/md_var.h> 112 113 /* 114 * Associated with page of user-allocatable memory is a 115 * page structure. 116 */ 117 118 struct vpgqueues vm_page_queues[PQ_COUNT]; 119 struct vpglocks vm_page_queue_lock; 120 struct vpglocks vm_page_queue_free_lock; 121 122 struct vpglocks pa_lock[PA_LOCK_COUNT]; 123 124 vm_page_t vm_page_array = 0; 125 int vm_page_array_size = 0; 126 long first_page = 0; 127 int vm_page_zero_count = 0; 128 129 static int boot_pages = UMA_BOOT_PAGES; 130 TUNABLE_INT("vm.boot_pages", &boot_pages); 131 SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RD, &boot_pages, 0, 132 "number of pages allocated for bootstrapping the VM system"); 133 134 static int pa_tryrelock_restart; 135 SYSCTL_INT(_vm, OID_AUTO, tryrelock_restart, CTLFLAG_RD, 136 &pa_tryrelock_restart, 0, "Number of tryrelock restarts"); 137 138 static uma_zone_t fakepg_zone; 139 140 static struct vnode *vm_page_alloc_init(vm_page_t m); 141 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits); 142 static void vm_page_queue_remove(int queue, vm_page_t m); 143 static void vm_page_enqueue(int queue, vm_page_t m); 144 static void vm_page_init_fakepg(void *dummy); 145 146 SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init_fakepg, NULL); 147 148 static void 149 vm_page_init_fakepg(void *dummy) 150 { 151 152 fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL, 153 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM); 154 } 155 156 /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */ 157 #if PAGE_SIZE == 32768 158 #ifdef CTASSERT 159 CTASSERT(sizeof(u_long) >= 8); 160 #endif 161 #endif 162 163 /* 164 * Try to acquire a physical address lock while a pmap is locked. If we 165 * fail to trylock we unlock and lock the pmap directly and cache the 166 * locked pa in *locked. The caller should then restart their loop in case 167 * the virtual to physical mapping has changed. 168 */ 169 int 170 vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked) 171 { 172 vm_paddr_t lockpa; 173 174 lockpa = *locked; 175 *locked = pa; 176 if (lockpa) { 177 PA_LOCK_ASSERT(lockpa, MA_OWNED); 178 if (PA_LOCKPTR(pa) == PA_LOCKPTR(lockpa)) 179 return (0); 180 PA_UNLOCK(lockpa); 181 } 182 if (PA_TRYLOCK(pa)) 183 return (0); 184 PMAP_UNLOCK(pmap); 185 atomic_add_int(&pa_tryrelock_restart, 1); 186 PA_LOCK(pa); 187 PMAP_LOCK(pmap); 188 return (EAGAIN); 189 } 190 191 /* 192 * vm_set_page_size: 193 * 194 * Sets the page size, perhaps based upon the memory 195 * size. Must be called before any use of page-size 196 * dependent functions. 197 */ 198 void 199 vm_set_page_size(void) 200 { 201 if (cnt.v_page_size == 0) 202 cnt.v_page_size = PAGE_SIZE; 203 if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0) 204 panic("vm_set_page_size: page size not a power of two"); 205 } 206 207 /* 208 * vm_page_blacklist_lookup: 209 * 210 * See if a physical address in this page has been listed 211 * in the blacklist tunable. Entries in the tunable are 212 * separated by spaces or commas. If an invalid integer is 213 * encountered then the rest of the string is skipped. 214 */ 215 static int 216 vm_page_blacklist_lookup(char *list, vm_paddr_t pa) 217 { 218 vm_paddr_t bad; 219 char *cp, *pos; 220 221 for (pos = list; *pos != '\0'; pos = cp) { 222 bad = strtoq(pos, &cp, 0); 223 if (*cp != '\0') { 224 if (*cp == ' ' || *cp == ',') { 225 cp++; 226 if (cp == pos) 227 continue; 228 } else 229 break; 230 } 231 if (pa == trunc_page(bad)) 232 return (1); 233 } 234 return (0); 235 } 236 237 /* 238 * vm_page_startup: 239 * 240 * Initializes the resident memory module. 241 * 242 * Allocates memory for the page cells, and 243 * for the object/offset-to-page hash table headers. 244 * Each page cell is initialized and placed on the free list. 245 */ 246 vm_offset_t 247 vm_page_startup(vm_offset_t vaddr) 248 { 249 vm_offset_t mapped; 250 vm_paddr_t page_range; 251 vm_paddr_t new_end; 252 int i; 253 vm_paddr_t pa; 254 vm_paddr_t last_pa; 255 char *list; 256 257 /* the biggest memory array is the second group of pages */ 258 vm_paddr_t end; 259 vm_paddr_t biggestsize; 260 vm_paddr_t low_water, high_water; 261 int biggestone; 262 263 biggestsize = 0; 264 biggestone = 0; 265 vaddr = round_page(vaddr); 266 267 for (i = 0; phys_avail[i + 1]; i += 2) { 268 phys_avail[i] = round_page(phys_avail[i]); 269 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 270 } 271 272 low_water = phys_avail[0]; 273 high_water = phys_avail[1]; 274 275 for (i = 0; phys_avail[i + 1]; i += 2) { 276 vm_paddr_t size = phys_avail[i + 1] - phys_avail[i]; 277 278 if (size > biggestsize) { 279 biggestone = i; 280 biggestsize = size; 281 } 282 if (phys_avail[i] < low_water) 283 low_water = phys_avail[i]; 284 if (phys_avail[i + 1] > high_water) 285 high_water = phys_avail[i + 1]; 286 } 287 288 #ifdef XEN 289 low_water = 0; 290 #endif 291 292 end = phys_avail[biggestone+1]; 293 294 /* 295 * Initialize the locks. 296 */ 297 mtx_init(&vm_page_queue_mtx, "vm page queue mutex", NULL, MTX_DEF | 298 MTX_RECURSE); 299 mtx_init(&vm_page_queue_free_mtx, "vm page queue free mutex", NULL, 300 MTX_DEF); 301 302 /* Setup page locks. */ 303 for (i = 0; i < PA_LOCK_COUNT; i++) 304 mtx_init(&pa_lock[i].data, "page lock", NULL, MTX_DEF); 305 306 /* 307 * Initialize the queue headers for the hold queue, the active queue, 308 * and the inactive queue. 309 */ 310 for (i = 0; i < PQ_COUNT; i++) 311 TAILQ_INIT(&vm_page_queues[i].pl); 312 vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count; 313 vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count; 314 vm_page_queues[PQ_HOLD].cnt = &cnt.v_active_count; 315 316 /* 317 * Allocate memory for use when boot strapping the kernel memory 318 * allocator. 319 */ 320 new_end = end - (boot_pages * UMA_SLAB_SIZE); 321 new_end = trunc_page(new_end); 322 mapped = pmap_map(&vaddr, new_end, end, 323 VM_PROT_READ | VM_PROT_WRITE); 324 bzero((void *)mapped, end - new_end); 325 uma_startup((void *)mapped, boot_pages); 326 327 #if defined(__amd64__) || defined(__i386__) || defined(__arm__) || \ 328 defined(__mips__) 329 /* 330 * Allocate a bitmap to indicate that a random physical page 331 * needs to be included in a minidump. 332 * 333 * The amd64 port needs this to indicate which direct map pages 334 * need to be dumped, via calls to dump_add_page()/dump_drop_page(). 335 * 336 * However, i386 still needs this workspace internally within the 337 * minidump code. In theory, they are not needed on i386, but are 338 * included should the sf_buf code decide to use them. 339 */ 340 last_pa = 0; 341 for (i = 0; dump_avail[i + 1] != 0; i += 2) 342 if (dump_avail[i + 1] > last_pa) 343 last_pa = dump_avail[i + 1]; 344 page_range = last_pa / PAGE_SIZE; 345 vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY); 346 new_end -= vm_page_dump_size; 347 vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end, 348 new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE); 349 bzero((void *)vm_page_dump, vm_page_dump_size); 350 #endif 351 #ifdef __amd64__ 352 /* 353 * Request that the physical pages underlying the message buffer be 354 * included in a crash dump. Since the message buffer is accessed 355 * through the direct map, they are not automatically included. 356 */ 357 pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr); 358 last_pa = pa + round_page(msgbufsize); 359 while (pa < last_pa) { 360 dump_add_page(pa); 361 pa += PAGE_SIZE; 362 } 363 #endif 364 /* 365 * Compute the number of pages of memory that will be available for 366 * use (taking into account the overhead of a page structure per 367 * page). 368 */ 369 first_page = low_water / PAGE_SIZE; 370 #ifdef VM_PHYSSEG_SPARSE 371 page_range = 0; 372 for (i = 0; phys_avail[i + 1] != 0; i += 2) 373 page_range += atop(phys_avail[i + 1] - phys_avail[i]); 374 #elif defined(VM_PHYSSEG_DENSE) 375 page_range = high_water / PAGE_SIZE - first_page; 376 #else 377 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." 378 #endif 379 end = new_end; 380 381 /* 382 * Reserve an unmapped guard page to trap access to vm_page_array[-1]. 383 */ 384 vaddr += PAGE_SIZE; 385 386 /* 387 * Initialize the mem entry structures now, and put them in the free 388 * queue. 389 */ 390 new_end = trunc_page(end - page_range * sizeof(struct vm_page)); 391 mapped = pmap_map(&vaddr, new_end, end, 392 VM_PROT_READ | VM_PROT_WRITE); 393 vm_page_array = (vm_page_t) mapped; 394 #if VM_NRESERVLEVEL > 0 395 /* 396 * Allocate memory for the reservation management system's data 397 * structures. 398 */ 399 new_end = vm_reserv_startup(&vaddr, new_end, high_water); 400 #endif 401 #if defined(__amd64__) || defined(__mips__) 402 /* 403 * pmap_map on amd64 and mips can come out of the direct-map, not kvm 404 * like i386, so the pages must be tracked for a crashdump to include 405 * this data. This includes the vm_page_array and the early UMA 406 * bootstrap pages. 407 */ 408 for (pa = new_end; pa < phys_avail[biggestone + 1]; pa += PAGE_SIZE) 409 dump_add_page(pa); 410 #endif 411 phys_avail[biggestone + 1] = new_end; 412 413 /* 414 * Clear all of the page structures 415 */ 416 bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page)); 417 for (i = 0; i < page_range; i++) 418 vm_page_array[i].order = VM_NFREEORDER; 419 vm_page_array_size = page_range; 420 421 /* 422 * Initialize the physical memory allocator. 423 */ 424 vm_phys_init(); 425 426 /* 427 * Add every available physical page that is not blacklisted to 428 * the free lists. 429 */ 430 cnt.v_page_count = 0; 431 cnt.v_free_count = 0; 432 list = getenv("vm.blacklist"); 433 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 434 pa = phys_avail[i]; 435 last_pa = phys_avail[i + 1]; 436 while (pa < last_pa) { 437 if (list != NULL && 438 vm_page_blacklist_lookup(list, pa)) 439 printf("Skipping page with pa 0x%jx\n", 440 (uintmax_t)pa); 441 else 442 vm_phys_add_page(pa); 443 pa += PAGE_SIZE; 444 } 445 } 446 freeenv(list); 447 #if VM_NRESERVLEVEL > 0 448 /* 449 * Initialize the reservation management system. 450 */ 451 vm_reserv_init(); 452 #endif 453 return (vaddr); 454 } 455 456 457 CTASSERT(offsetof(struct vm_page, aflags) % sizeof(uint32_t) == 0); 458 459 void 460 vm_page_aflag_set(vm_page_t m, uint8_t bits) 461 { 462 uint32_t *addr, val; 463 464 /* 465 * The PGA_WRITEABLE flag can only be set if the page is managed and 466 * VPO_BUSY. Currently, this flag is only set by pmap_enter(). 467 */ 468 KASSERT((bits & PGA_WRITEABLE) == 0 || 469 (m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == VPO_BUSY, 470 ("PGA_WRITEABLE and !VPO_BUSY")); 471 472 /* 473 * We want to use atomic updates for m->aflags, which is a 474 * byte wide. Not all architectures provide atomic operations 475 * on the single-byte destination. Punt and access the whole 476 * 4-byte word with an atomic update. Parallel non-atomic 477 * updates to the fields included in the update by proximity 478 * are handled properly by atomics. 479 */ 480 addr = (void *)&m->aflags; 481 MPASS(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0); 482 val = bits; 483 #if BYTE_ORDER == BIG_ENDIAN 484 val <<= 24; 485 #endif 486 atomic_set_32(addr, val); 487 } 488 489 void 490 vm_page_aflag_clear(vm_page_t m, uint8_t bits) 491 { 492 uint32_t *addr, val; 493 494 /* 495 * The PGA_REFERENCED flag can only be cleared if the object 496 * containing the page is locked. 497 */ 498 KASSERT((bits & PGA_REFERENCED) == 0 || VM_OBJECT_LOCKED(m->object), 499 ("PGA_REFERENCED and !VM_OBJECT_LOCKED")); 500 501 /* 502 * See the comment in vm_page_aflag_set(). 503 */ 504 addr = (void *)&m->aflags; 505 MPASS(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0); 506 val = bits; 507 #if BYTE_ORDER == BIG_ENDIAN 508 val <<= 24; 509 #endif 510 atomic_clear_32(addr, val); 511 } 512 513 void 514 vm_page_reference(vm_page_t m) 515 { 516 517 vm_page_aflag_set(m, PGA_REFERENCED); 518 } 519 520 void 521 vm_page_busy(vm_page_t m) 522 { 523 524 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 525 KASSERT((m->oflags & VPO_BUSY) == 0, 526 ("vm_page_busy: page already busy!!!")); 527 m->oflags |= VPO_BUSY; 528 } 529 530 /* 531 * vm_page_flash: 532 * 533 * wakeup anyone waiting for the page. 534 */ 535 void 536 vm_page_flash(vm_page_t m) 537 { 538 539 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 540 if (m->oflags & VPO_WANTED) { 541 m->oflags &= ~VPO_WANTED; 542 wakeup(m); 543 } 544 } 545 546 /* 547 * vm_page_wakeup: 548 * 549 * clear the VPO_BUSY flag and wakeup anyone waiting for the 550 * page. 551 * 552 */ 553 void 554 vm_page_wakeup(vm_page_t m) 555 { 556 557 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 558 KASSERT(m->oflags & VPO_BUSY, ("vm_page_wakeup: page not busy!!!")); 559 m->oflags &= ~VPO_BUSY; 560 vm_page_flash(m); 561 } 562 563 void 564 vm_page_io_start(vm_page_t m) 565 { 566 567 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 568 m->busy++; 569 } 570 571 void 572 vm_page_io_finish(vm_page_t m) 573 { 574 575 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 576 KASSERT(m->busy > 0, ("vm_page_io_finish: page %p is not busy", m)); 577 m->busy--; 578 if (m->busy == 0) 579 vm_page_flash(m); 580 } 581 582 /* 583 * Keep page from being freed by the page daemon 584 * much of the same effect as wiring, except much lower 585 * overhead and should be used only for *very* temporary 586 * holding ("wiring"). 587 */ 588 void 589 vm_page_hold(vm_page_t mem) 590 { 591 592 vm_page_lock_assert(mem, MA_OWNED); 593 mem->hold_count++; 594 } 595 596 void 597 vm_page_unhold(vm_page_t mem) 598 { 599 600 vm_page_lock_assert(mem, MA_OWNED); 601 --mem->hold_count; 602 KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!")); 603 if (mem->hold_count == 0 && mem->queue == PQ_HOLD) 604 vm_page_free_toq(mem); 605 } 606 607 /* 608 * vm_page_unhold_pages: 609 * 610 * Unhold each of the pages that is referenced by the given array. 611 */ 612 void 613 vm_page_unhold_pages(vm_page_t *ma, int count) 614 { 615 struct mtx *mtx, *new_mtx; 616 617 mtx = NULL; 618 for (; count != 0; count--) { 619 /* 620 * Avoid releasing and reacquiring the same page lock. 621 */ 622 new_mtx = vm_page_lockptr(*ma); 623 if (mtx != new_mtx) { 624 if (mtx != NULL) 625 mtx_unlock(mtx); 626 mtx = new_mtx; 627 mtx_lock(mtx); 628 } 629 vm_page_unhold(*ma); 630 ma++; 631 } 632 if (mtx != NULL) 633 mtx_unlock(mtx); 634 } 635 636 /* 637 * vm_page_getfake: 638 * 639 * Create a fictitious page with the specified physical address and 640 * memory attribute. The memory attribute is the only the machine- 641 * dependent aspect of a fictitious page that must be initialized. 642 */ 643 vm_page_t 644 vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr) 645 { 646 vm_page_t m; 647 648 m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO); 649 m->phys_addr = paddr; 650 m->queue = PQ_NONE; 651 /* Fictitious pages don't use "segind". */ 652 m->flags = PG_FICTITIOUS; 653 /* Fictitious pages don't use "order" or "pool". */ 654 m->oflags = VPO_BUSY | VPO_UNMANAGED; 655 m->wire_count = 1; 656 pmap_page_set_memattr(m, memattr); 657 return (m); 658 } 659 660 /* 661 * vm_page_putfake: 662 * 663 * Release a fictitious page. 664 */ 665 void 666 vm_page_putfake(vm_page_t m) 667 { 668 669 KASSERT((m->flags & PG_FICTITIOUS) != 0, 670 ("vm_page_putfake: bad page %p", m)); 671 uma_zfree(fakepg_zone, m); 672 } 673 674 /* 675 * vm_page_updatefake: 676 * 677 * Update the given fictitious page to the specified physical address and 678 * memory attribute. 679 */ 680 void 681 vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) 682 { 683 684 KASSERT((m->flags & PG_FICTITIOUS) != 0, 685 ("vm_page_updatefake: bad page %p", m)); 686 m->phys_addr = paddr; 687 pmap_page_set_memattr(m, memattr); 688 } 689 690 /* 691 * vm_page_free: 692 * 693 * Free a page. 694 */ 695 void 696 vm_page_free(vm_page_t m) 697 { 698 699 m->flags &= ~PG_ZERO; 700 vm_page_free_toq(m); 701 } 702 703 /* 704 * vm_page_free_zero: 705 * 706 * Free a page to the zerod-pages queue 707 */ 708 void 709 vm_page_free_zero(vm_page_t m) 710 { 711 712 m->flags |= PG_ZERO; 713 vm_page_free_toq(m); 714 } 715 716 /* 717 * vm_page_sleep: 718 * 719 * Sleep and release the page and page queues locks. 720 * 721 * The object containing the given page must be locked. 722 */ 723 void 724 vm_page_sleep(vm_page_t m, const char *msg) 725 { 726 727 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 728 if (mtx_owned(&vm_page_queue_mtx)) 729 vm_page_unlock_queues(); 730 if (mtx_owned(vm_page_lockptr(m))) 731 vm_page_unlock(m); 732 733 /* 734 * It's possible that while we sleep, the page will get 735 * unbusied and freed. If we are holding the object 736 * lock, we will assume we hold a reference to the object 737 * such that even if m->object changes, we can re-lock 738 * it. 739 */ 740 m->oflags |= VPO_WANTED; 741 msleep(m, VM_OBJECT_MTX(m->object), PVM, msg, 0); 742 } 743 744 /* 745 * vm_page_dirty: 746 * 747 * Set all bits in the page's dirty field. 748 * 749 * The object containing the specified page must be locked if the 750 * call is made from the machine-independent layer. 751 * 752 * See vm_page_clear_dirty_mask(). 753 */ 754 void 755 vm_page_dirty(vm_page_t m) 756 { 757 758 KASSERT((m->flags & PG_CACHED) == 0, 759 ("vm_page_dirty: page in cache!")); 760 KASSERT(!VM_PAGE_IS_FREE(m), 761 ("vm_page_dirty: page is free!")); 762 KASSERT(m->valid == VM_PAGE_BITS_ALL, 763 ("vm_page_dirty: page is invalid!")); 764 m->dirty = VM_PAGE_BITS_ALL; 765 } 766 767 /* 768 * vm_page_splay: 769 * 770 * Implements Sleator and Tarjan's top-down splay algorithm. Returns 771 * the vm_page containing the given pindex. If, however, that 772 * pindex is not found in the vm_object, returns a vm_page that is 773 * adjacent to the pindex, coming before or after it. 774 */ 775 vm_page_t 776 vm_page_splay(vm_pindex_t pindex, vm_page_t root) 777 { 778 struct vm_page dummy; 779 vm_page_t lefttreemax, righttreemin, y; 780 781 if (root == NULL) 782 return (root); 783 lefttreemax = righttreemin = &dummy; 784 for (;; root = y) { 785 if (pindex < root->pindex) { 786 if ((y = root->left) == NULL) 787 break; 788 if (pindex < y->pindex) { 789 /* Rotate right. */ 790 root->left = y->right; 791 y->right = root; 792 root = y; 793 if ((y = root->left) == NULL) 794 break; 795 } 796 /* Link into the new root's right tree. */ 797 righttreemin->left = root; 798 righttreemin = root; 799 } else if (pindex > root->pindex) { 800 if ((y = root->right) == NULL) 801 break; 802 if (pindex > y->pindex) { 803 /* Rotate left. */ 804 root->right = y->left; 805 y->left = root; 806 root = y; 807 if ((y = root->right) == NULL) 808 break; 809 } 810 /* Link into the new root's left tree. */ 811 lefttreemax->right = root; 812 lefttreemax = root; 813 } else 814 break; 815 } 816 /* Assemble the new root. */ 817 lefttreemax->right = root->left; 818 righttreemin->left = root->right; 819 root->left = dummy.right; 820 root->right = dummy.left; 821 return (root); 822 } 823 824 /* 825 * vm_page_insert: [ internal use only ] 826 * 827 * Inserts the given mem entry into the object and object list. 828 * 829 * The pagetables are not updated but will presumably fault the page 830 * in if necessary, or if a kernel page the caller will at some point 831 * enter the page into the kernel's pmap. We are not allowed to block 832 * here so we *can't* do this anyway. 833 * 834 * The object and page must be locked. 835 * This routine may not block. 836 */ 837 void 838 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) 839 { 840 vm_page_t root; 841 842 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 843 if (m->object != NULL) 844 panic("vm_page_insert: page already inserted"); 845 846 /* 847 * Record the object/offset pair in this page 848 */ 849 m->object = object; 850 m->pindex = pindex; 851 852 /* 853 * Now link into the object's ordered list of backed pages. 854 */ 855 root = object->root; 856 if (root == NULL) { 857 m->left = NULL; 858 m->right = NULL; 859 TAILQ_INSERT_TAIL(&object->memq, m, listq); 860 } else { 861 root = vm_page_splay(pindex, root); 862 if (pindex < root->pindex) { 863 m->left = root->left; 864 m->right = root; 865 root->left = NULL; 866 TAILQ_INSERT_BEFORE(root, m, listq); 867 } else if (pindex == root->pindex) 868 panic("vm_page_insert: offset already allocated"); 869 else { 870 m->right = root->right; 871 m->left = root; 872 root->right = NULL; 873 TAILQ_INSERT_AFTER(&object->memq, root, m, listq); 874 } 875 } 876 object->root = m; 877 878 /* 879 * show that the object has one more resident page. 880 */ 881 object->resident_page_count++; 882 /* 883 * Hold the vnode until the last page is released. 884 */ 885 if (object->resident_page_count == 1 && object->type == OBJT_VNODE) 886 vhold((struct vnode *)object->handle); 887 888 /* 889 * Since we are inserting a new and possibly dirty page, 890 * update the object's OBJ_MIGHTBEDIRTY flag. 891 */ 892 if (m->aflags & PGA_WRITEABLE) 893 vm_object_set_writeable_dirty(object); 894 } 895 896 /* 897 * vm_page_remove: 898 * NOTE: used by device pager as well -wfj 899 * 900 * Removes the given mem entry from the object/offset-page 901 * table and the object page list, but do not invalidate/terminate 902 * the backing store. 903 * 904 * The object and page must be locked. 905 * The underlying pmap entry (if any) is NOT removed here. 906 * This routine may not block. 907 */ 908 void 909 vm_page_remove(vm_page_t m) 910 { 911 vm_object_t object; 912 vm_page_t next, prev, root; 913 914 if ((m->oflags & VPO_UNMANAGED) == 0) 915 vm_page_lock_assert(m, MA_OWNED); 916 if ((object = m->object) == NULL) 917 return; 918 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 919 if (m->oflags & VPO_BUSY) { 920 m->oflags &= ~VPO_BUSY; 921 vm_page_flash(m); 922 } 923 924 /* 925 * Now remove from the object's list of backed pages. 926 */ 927 if ((next = TAILQ_NEXT(m, listq)) != NULL && next->left == m) { 928 /* 929 * Since the page's successor in the list is also its parent 930 * in the tree, its right subtree must be empty. 931 */ 932 next->left = m->left; 933 KASSERT(m->right == NULL, 934 ("vm_page_remove: page %p has right child", m)); 935 } else if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL && 936 prev->right == m) { 937 /* 938 * Since the page's predecessor in the list is also its parent 939 * in the tree, its left subtree must be empty. 940 */ 941 KASSERT(m->left == NULL, 942 ("vm_page_remove: page %p has left child", m)); 943 prev->right = m->right; 944 } else { 945 if (m != object->root) 946 vm_page_splay(m->pindex, object->root); 947 if (m->left == NULL) 948 root = m->right; 949 else if (m->right == NULL) 950 root = m->left; 951 else { 952 /* 953 * Move the page's successor to the root, because 954 * pages are usually removed in ascending order. 955 */ 956 if (m->right != next) 957 vm_page_splay(m->pindex, m->right); 958 next->left = m->left; 959 root = next; 960 } 961 object->root = root; 962 } 963 TAILQ_REMOVE(&object->memq, m, listq); 964 965 /* 966 * And show that the object has one fewer resident page. 967 */ 968 object->resident_page_count--; 969 /* 970 * The vnode may now be recycled. 971 */ 972 if (object->resident_page_count == 0 && object->type == OBJT_VNODE) 973 vdrop((struct vnode *)object->handle); 974 975 m->object = NULL; 976 } 977 978 /* 979 * vm_page_lookup: 980 * 981 * Returns the page associated with the object/offset 982 * pair specified; if none is found, NULL is returned. 983 * 984 * The object must be locked. 985 * This routine may not block. 986 * This is a critical path routine 987 */ 988 vm_page_t 989 vm_page_lookup(vm_object_t object, vm_pindex_t pindex) 990 { 991 vm_page_t m; 992 993 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 994 if ((m = object->root) != NULL && m->pindex != pindex) { 995 m = vm_page_splay(pindex, m); 996 if ((object->root = m)->pindex != pindex) 997 m = NULL; 998 } 999 return (m); 1000 } 1001 1002 /* 1003 * vm_page_find_least: 1004 * 1005 * Returns the page associated with the object with least pindex 1006 * greater than or equal to the parameter pindex, or NULL. 1007 * 1008 * The object must be locked. 1009 * The routine may not block. 1010 */ 1011 vm_page_t 1012 vm_page_find_least(vm_object_t object, vm_pindex_t pindex) 1013 { 1014 vm_page_t m; 1015 1016 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1017 if ((m = TAILQ_FIRST(&object->memq)) != NULL) { 1018 if (m->pindex < pindex) { 1019 m = vm_page_splay(pindex, object->root); 1020 if ((object->root = m)->pindex < pindex) 1021 m = TAILQ_NEXT(m, listq); 1022 } 1023 } 1024 return (m); 1025 } 1026 1027 /* 1028 * Returns the given page's successor (by pindex) within the object if it is 1029 * resident; if none is found, NULL is returned. 1030 * 1031 * The object must be locked. 1032 */ 1033 vm_page_t 1034 vm_page_next(vm_page_t m) 1035 { 1036 vm_page_t next; 1037 1038 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1039 if ((next = TAILQ_NEXT(m, listq)) != NULL && 1040 next->pindex != m->pindex + 1) 1041 next = NULL; 1042 return (next); 1043 } 1044 1045 /* 1046 * Returns the given page's predecessor (by pindex) within the object if it is 1047 * resident; if none is found, NULL is returned. 1048 * 1049 * The object must be locked. 1050 */ 1051 vm_page_t 1052 vm_page_prev(vm_page_t m) 1053 { 1054 vm_page_t prev; 1055 1056 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1057 if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL && 1058 prev->pindex != m->pindex - 1) 1059 prev = NULL; 1060 return (prev); 1061 } 1062 1063 /* 1064 * vm_page_rename: 1065 * 1066 * Move the given memory entry from its 1067 * current object to the specified target object/offset. 1068 * 1069 * The object must be locked. 1070 * This routine may not block. 1071 * 1072 * Note: swap associated with the page must be invalidated by the move. We 1073 * have to do this for several reasons: (1) we aren't freeing the 1074 * page, (2) we are dirtying the page, (3) the VM system is probably 1075 * moving the page from object A to B, and will then later move 1076 * the backing store from A to B and we can't have a conflict. 1077 * 1078 * Note: we *always* dirty the page. It is necessary both for the 1079 * fact that we moved it, and because we may be invalidating 1080 * swap. If the page is on the cache, we have to deactivate it 1081 * or vm_page_dirty() will panic. Dirty pages are not allowed 1082 * on the cache. 1083 */ 1084 void 1085 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) 1086 { 1087 1088 vm_page_remove(m); 1089 vm_page_insert(m, new_object, new_pindex); 1090 vm_page_dirty(m); 1091 } 1092 1093 /* 1094 * Convert all of the given object's cached pages that have a 1095 * pindex within the given range into free pages. If the value 1096 * zero is given for "end", then the range's upper bound is 1097 * infinity. If the given object is backed by a vnode and it 1098 * transitions from having one or more cached pages to none, the 1099 * vnode's hold count is reduced. 1100 */ 1101 void 1102 vm_page_cache_free(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 1103 { 1104 vm_page_t m, m_next; 1105 boolean_t empty; 1106 1107 mtx_lock(&vm_page_queue_free_mtx); 1108 if (__predict_false(object->cache == NULL)) { 1109 mtx_unlock(&vm_page_queue_free_mtx); 1110 return; 1111 } 1112 m = object->cache = vm_page_splay(start, object->cache); 1113 if (m->pindex < start) { 1114 if (m->right == NULL) 1115 m = NULL; 1116 else { 1117 m_next = vm_page_splay(start, m->right); 1118 m_next->left = m; 1119 m->right = NULL; 1120 m = object->cache = m_next; 1121 } 1122 } 1123 1124 /* 1125 * At this point, "m" is either (1) a reference to the page 1126 * with the least pindex that is greater than or equal to 1127 * "start" or (2) NULL. 1128 */ 1129 for (; m != NULL && (m->pindex < end || end == 0); m = m_next) { 1130 /* 1131 * Find "m"'s successor and remove "m" from the 1132 * object's cache. 1133 */ 1134 if (m->right == NULL) { 1135 object->cache = m->left; 1136 m_next = NULL; 1137 } else { 1138 m_next = vm_page_splay(start, m->right); 1139 m_next->left = m->left; 1140 object->cache = m_next; 1141 } 1142 /* Convert "m" to a free page. */ 1143 m->object = NULL; 1144 m->valid = 0; 1145 /* Clear PG_CACHED and set PG_FREE. */ 1146 m->flags ^= PG_CACHED | PG_FREE; 1147 KASSERT((m->flags & (PG_CACHED | PG_FREE)) == PG_FREE, 1148 ("vm_page_cache_free: page %p has inconsistent flags", m)); 1149 cnt.v_cache_count--; 1150 cnt.v_free_count++; 1151 } 1152 empty = object->cache == NULL; 1153 mtx_unlock(&vm_page_queue_free_mtx); 1154 if (object->type == OBJT_VNODE && empty) 1155 vdrop(object->handle); 1156 } 1157 1158 /* 1159 * Returns the cached page that is associated with the given 1160 * object and offset. If, however, none exists, returns NULL. 1161 * 1162 * The free page queue must be locked. 1163 */ 1164 static inline vm_page_t 1165 vm_page_cache_lookup(vm_object_t object, vm_pindex_t pindex) 1166 { 1167 vm_page_t m; 1168 1169 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1170 if ((m = object->cache) != NULL && m->pindex != pindex) { 1171 m = vm_page_splay(pindex, m); 1172 if ((object->cache = m)->pindex != pindex) 1173 m = NULL; 1174 } 1175 return (m); 1176 } 1177 1178 /* 1179 * Remove the given cached page from its containing object's 1180 * collection of cached pages. 1181 * 1182 * The free page queue must be locked. 1183 */ 1184 void 1185 vm_page_cache_remove(vm_page_t m) 1186 { 1187 vm_object_t object; 1188 vm_page_t root; 1189 1190 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1191 KASSERT((m->flags & PG_CACHED) != 0, 1192 ("vm_page_cache_remove: page %p is not cached", m)); 1193 object = m->object; 1194 if (m != object->cache) { 1195 root = vm_page_splay(m->pindex, object->cache); 1196 KASSERT(root == m, 1197 ("vm_page_cache_remove: page %p is not cached in object %p", 1198 m, object)); 1199 } 1200 if (m->left == NULL) 1201 root = m->right; 1202 else if (m->right == NULL) 1203 root = m->left; 1204 else { 1205 root = vm_page_splay(m->pindex, m->left); 1206 root->right = m->right; 1207 } 1208 object->cache = root; 1209 m->object = NULL; 1210 cnt.v_cache_count--; 1211 } 1212 1213 /* 1214 * Transfer all of the cached pages with offset greater than or 1215 * equal to 'offidxstart' from the original object's cache to the 1216 * new object's cache. However, any cached pages with offset 1217 * greater than or equal to the new object's size are kept in the 1218 * original object. Initially, the new object's cache must be 1219 * empty. Offset 'offidxstart' in the original object must 1220 * correspond to offset zero in the new object. 1221 * 1222 * The new object must be locked. 1223 */ 1224 void 1225 vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart, 1226 vm_object_t new_object) 1227 { 1228 vm_page_t m, m_next; 1229 1230 /* 1231 * Insertion into an object's collection of cached pages 1232 * requires the object to be locked. In contrast, removal does 1233 * not. 1234 */ 1235 VM_OBJECT_LOCK_ASSERT(new_object, MA_OWNED); 1236 KASSERT(new_object->cache == NULL, 1237 ("vm_page_cache_transfer: object %p has cached pages", 1238 new_object)); 1239 mtx_lock(&vm_page_queue_free_mtx); 1240 if ((m = orig_object->cache) != NULL) { 1241 /* 1242 * Transfer all of the pages with offset greater than or 1243 * equal to 'offidxstart' from the original object's 1244 * cache to the new object's cache. 1245 */ 1246 m = vm_page_splay(offidxstart, m); 1247 if (m->pindex < offidxstart) { 1248 orig_object->cache = m; 1249 new_object->cache = m->right; 1250 m->right = NULL; 1251 } else { 1252 orig_object->cache = m->left; 1253 new_object->cache = m; 1254 m->left = NULL; 1255 } 1256 while ((m = new_object->cache) != NULL) { 1257 if ((m->pindex - offidxstart) >= new_object->size) { 1258 /* 1259 * Return all of the cached pages with 1260 * offset greater than or equal to the 1261 * new object's size to the original 1262 * object's cache. 1263 */ 1264 new_object->cache = m->left; 1265 m->left = orig_object->cache; 1266 orig_object->cache = m; 1267 break; 1268 } 1269 m_next = vm_page_splay(m->pindex, m->right); 1270 /* Update the page's object and offset. */ 1271 m->object = new_object; 1272 m->pindex -= offidxstart; 1273 if (m_next == NULL) 1274 break; 1275 m->right = NULL; 1276 m_next->left = m; 1277 new_object->cache = m_next; 1278 } 1279 KASSERT(new_object->cache == NULL || 1280 new_object->type == OBJT_SWAP, 1281 ("vm_page_cache_transfer: object %p's type is incompatible" 1282 " with cached pages", new_object)); 1283 } 1284 mtx_unlock(&vm_page_queue_free_mtx); 1285 } 1286 1287 /* 1288 * vm_page_alloc: 1289 * 1290 * Allocate and return a page that is associated with the specified 1291 * object and offset pair. By default, this page has the flag VPO_BUSY 1292 * set. 1293 * 1294 * The caller must always specify an allocation class. 1295 * 1296 * allocation classes: 1297 * VM_ALLOC_NORMAL normal process request 1298 * VM_ALLOC_SYSTEM system *really* needs a page 1299 * VM_ALLOC_INTERRUPT interrupt time request 1300 * 1301 * optional allocation flags: 1302 * VM_ALLOC_COUNT(number) the number of additional pages that the caller 1303 * intends to allocate 1304 * VM_ALLOC_IFCACHED return page only if it is cached 1305 * VM_ALLOC_IFNOTCACHED return NULL, do not reactivate if the page 1306 * is cached 1307 * VM_ALLOC_NOBUSY do not set the flag VPO_BUSY on the page 1308 * VM_ALLOC_NOOBJ page is not associated with an object and 1309 * should not have the flag VPO_BUSY set 1310 * VM_ALLOC_WIRED wire the allocated page 1311 * VM_ALLOC_ZERO prefer a zeroed page 1312 * 1313 * This routine may not sleep. 1314 */ 1315 vm_page_t 1316 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) 1317 { 1318 struct vnode *vp = NULL; 1319 vm_object_t m_object; 1320 vm_page_t m; 1321 int flags, req_class; 1322 1323 KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0), 1324 ("vm_page_alloc: inconsistent object/req")); 1325 if (object != NULL) 1326 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1327 1328 req_class = req & VM_ALLOC_CLASS_MASK; 1329 1330 /* 1331 * The page daemon is allowed to dig deeper into the free page list. 1332 */ 1333 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) 1334 req_class = VM_ALLOC_SYSTEM; 1335 1336 mtx_lock(&vm_page_queue_free_mtx); 1337 if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved || 1338 (req_class == VM_ALLOC_SYSTEM && 1339 cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) || 1340 (req_class == VM_ALLOC_INTERRUPT && 1341 cnt.v_free_count + cnt.v_cache_count > 0)) { 1342 /* 1343 * Allocate from the free queue if the number of free pages 1344 * exceeds the minimum for the request class. 1345 */ 1346 if (object != NULL && 1347 (m = vm_page_cache_lookup(object, pindex)) != NULL) { 1348 if ((req & VM_ALLOC_IFNOTCACHED) != 0) { 1349 mtx_unlock(&vm_page_queue_free_mtx); 1350 return (NULL); 1351 } 1352 if (vm_phys_unfree_page(m)) 1353 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, 0); 1354 #if VM_NRESERVLEVEL > 0 1355 else if (!vm_reserv_reactivate_page(m)) 1356 #else 1357 else 1358 #endif 1359 panic("vm_page_alloc: cache page %p is missing" 1360 " from the free queue", m); 1361 } else if ((req & VM_ALLOC_IFCACHED) != 0) { 1362 mtx_unlock(&vm_page_queue_free_mtx); 1363 return (NULL); 1364 #if VM_NRESERVLEVEL > 0 1365 } else if (object == NULL || object->type == OBJT_DEVICE || 1366 object->type == OBJT_SG || 1367 (object->flags & OBJ_COLORED) == 0 || 1368 (m = vm_reserv_alloc_page(object, pindex)) == NULL) { 1369 #else 1370 } else { 1371 #endif 1372 m = vm_phys_alloc_pages(object != NULL ? 1373 VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0); 1374 #if VM_NRESERVLEVEL > 0 1375 if (m == NULL && vm_reserv_reclaim_inactive()) { 1376 m = vm_phys_alloc_pages(object != NULL ? 1377 VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 1378 0); 1379 } 1380 #endif 1381 } 1382 } else { 1383 /* 1384 * Not allocatable, give up. 1385 */ 1386 mtx_unlock(&vm_page_queue_free_mtx); 1387 atomic_add_int(&vm_pageout_deficit, 1388 max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1)); 1389 pagedaemon_wakeup(); 1390 return (NULL); 1391 } 1392 1393 /* 1394 * At this point we had better have found a good page. 1395 */ 1396 KASSERT(m != NULL, ("vm_page_alloc: missing page")); 1397 KASSERT(m->queue == PQ_NONE, 1398 ("vm_page_alloc: page %p has unexpected queue %d", m, m->queue)); 1399 KASSERT(m->wire_count == 0, ("vm_page_alloc: page %p is wired", m)); 1400 KASSERT(m->hold_count == 0, ("vm_page_alloc: page %p is held", m)); 1401 KASSERT(m->busy == 0, ("vm_page_alloc: page %p is busy", m)); 1402 KASSERT(m->dirty == 0, ("vm_page_alloc: page %p is dirty", m)); 1403 KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, 1404 ("vm_page_alloc: page %p has unexpected memattr %d", m, 1405 pmap_page_get_memattr(m))); 1406 if ((m->flags & PG_CACHED) != 0) { 1407 KASSERT((m->flags & PG_ZERO) == 0, 1408 ("vm_page_alloc: cached page %p is PG_ZERO", m)); 1409 KASSERT(m->valid != 0, 1410 ("vm_page_alloc: cached page %p is invalid", m)); 1411 if (m->object == object && m->pindex == pindex) 1412 cnt.v_reactivated++; 1413 else 1414 m->valid = 0; 1415 m_object = m->object; 1416 vm_page_cache_remove(m); 1417 if (m_object->type == OBJT_VNODE && m_object->cache == NULL) 1418 vp = m_object->handle; 1419 } else { 1420 KASSERT(VM_PAGE_IS_FREE(m), 1421 ("vm_page_alloc: page %p is not free", m)); 1422 KASSERT(m->valid == 0, 1423 ("vm_page_alloc: free page %p is valid", m)); 1424 cnt.v_free_count--; 1425 } 1426 1427 /* 1428 * Only the PG_ZERO flag is inherited. The PG_CACHED or PG_FREE flag 1429 * must be cleared before the free page queues lock is released. 1430 */ 1431 flags = 0; 1432 if (m->flags & PG_ZERO) { 1433 vm_page_zero_count--; 1434 if (req & VM_ALLOC_ZERO) 1435 flags = PG_ZERO; 1436 } 1437 m->flags = flags; 1438 mtx_unlock(&vm_page_queue_free_mtx); 1439 m->aflags = 0; 1440 if (object == NULL || object->type == OBJT_PHYS) 1441 m->oflags = VPO_UNMANAGED; 1442 else 1443 m->oflags = 0; 1444 if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ)) == 0) 1445 m->oflags |= VPO_BUSY; 1446 if (req & VM_ALLOC_WIRED) { 1447 /* 1448 * The page lock is not required for wiring a page until that 1449 * page is inserted into the object. 1450 */ 1451 atomic_add_int(&cnt.v_wire_count, 1); 1452 m->wire_count = 1; 1453 } 1454 m->act_count = 0; 1455 1456 if (object != NULL) { 1457 /* Ignore device objects; the pager sets "memattr" for them. */ 1458 if (object->memattr != VM_MEMATTR_DEFAULT && 1459 object->type != OBJT_DEVICE && object->type != OBJT_SG) 1460 pmap_page_set_memattr(m, object->memattr); 1461 vm_page_insert(m, object, pindex); 1462 } else 1463 m->pindex = pindex; 1464 1465 /* 1466 * The following call to vdrop() must come after the above call 1467 * to vm_page_insert() in case both affect the same object and 1468 * vnode. Otherwise, the affected vnode's hold count could 1469 * temporarily become zero. 1470 */ 1471 if (vp != NULL) 1472 vdrop(vp); 1473 1474 /* 1475 * Don't wakeup too often - wakeup the pageout daemon when 1476 * we would be nearly out of memory. 1477 */ 1478 if (vm_paging_needed()) 1479 pagedaemon_wakeup(); 1480 1481 return (m); 1482 } 1483 1484 /* 1485 * vm_page_alloc_contig: 1486 * 1487 * Allocate a contiguous set of physical pages of the given size "npages" 1488 * from the free lists. All of the physical pages must be at or above 1489 * the given physical address "low" and below the given physical address 1490 * "high". The given value "alignment" determines the alignment of the 1491 * first physical page in the set. If the given value "boundary" is 1492 * non-zero, then the set of physical pages cannot cross any physical 1493 * address boundary that is a multiple of that value. Both "alignment" 1494 * and "boundary" must be a power of two. 1495 * 1496 * If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT, 1497 * then the memory attribute setting for the physical pages is configured 1498 * to the object's memory attribute setting. Otherwise, the memory 1499 * attribute setting for the physical pages is configured to "memattr", 1500 * overriding the object's memory attribute setting. However, if the 1501 * object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the 1502 * memory attribute setting for the physical pages cannot be configured 1503 * to VM_MEMATTR_DEFAULT. 1504 * 1505 * The caller must always specify an allocation class. 1506 * 1507 * allocation classes: 1508 * VM_ALLOC_NORMAL normal process request 1509 * VM_ALLOC_SYSTEM system *really* needs a page 1510 * VM_ALLOC_INTERRUPT interrupt time request 1511 * 1512 * optional allocation flags: 1513 * VM_ALLOC_NOBUSY do not set the flag VPO_BUSY on the page 1514 * VM_ALLOC_NOOBJ page is not associated with an object and 1515 * should not have the flag VPO_BUSY set 1516 * VM_ALLOC_WIRED wire the allocated page 1517 * VM_ALLOC_ZERO prefer a zeroed page 1518 * 1519 * This routine may not sleep. 1520 */ 1521 vm_page_t 1522 vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req, 1523 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 1524 vm_paddr_t boundary, vm_memattr_t memattr) 1525 { 1526 struct vnode *drop; 1527 vm_page_t deferred_vdrop_list, m, m_ret; 1528 u_int flags, oflags; 1529 int req_class; 1530 1531 KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0), 1532 ("vm_page_alloc_contig: inconsistent object/req")); 1533 if (object != NULL) { 1534 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1535 KASSERT(object->type == OBJT_PHYS, 1536 ("vm_page_alloc_contig: object %p isn't OBJT_PHYS", 1537 object)); 1538 } 1539 KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero")); 1540 req_class = req & VM_ALLOC_CLASS_MASK; 1541 1542 /* 1543 * The page daemon is allowed to dig deeper into the free page list. 1544 */ 1545 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) 1546 req_class = VM_ALLOC_SYSTEM; 1547 1548 deferred_vdrop_list = NULL; 1549 mtx_lock(&vm_page_queue_free_mtx); 1550 if (cnt.v_free_count + cnt.v_cache_count >= npages + 1551 cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM && 1552 cnt.v_free_count + cnt.v_cache_count >= npages + 1553 cnt.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT && 1554 cnt.v_free_count + cnt.v_cache_count >= npages)) { 1555 #if VM_NRESERVLEVEL > 0 1556 retry: 1557 #endif 1558 m_ret = vm_phys_alloc_contig(npages, low, high, alignment, 1559 boundary); 1560 } else { 1561 mtx_unlock(&vm_page_queue_free_mtx); 1562 atomic_add_int(&vm_pageout_deficit, npages); 1563 pagedaemon_wakeup(); 1564 return (NULL); 1565 } 1566 if (m_ret != NULL) 1567 for (m = m_ret; m < &m_ret[npages]; m++) { 1568 drop = vm_page_alloc_init(m); 1569 if (drop != NULL) { 1570 /* 1571 * Enqueue the vnode for deferred vdrop(). 1572 * 1573 * Once the pages are removed from the free 1574 * page list, "pageq" can be safely abused to 1575 * construct a short-lived list of vnodes. 1576 */ 1577 m->pageq.tqe_prev = (void *)drop; 1578 m->pageq.tqe_next = deferred_vdrop_list; 1579 deferred_vdrop_list = m; 1580 } 1581 } 1582 else { 1583 #if VM_NRESERVLEVEL > 0 1584 if (vm_reserv_reclaim_contig(npages << PAGE_SHIFT, low, high, 1585 alignment, boundary)) 1586 goto retry; 1587 #endif 1588 } 1589 mtx_unlock(&vm_page_queue_free_mtx); 1590 if (m_ret == NULL) 1591 return (NULL); 1592 1593 /* 1594 * Initialize the pages. Only the PG_ZERO flag is inherited. 1595 */ 1596 flags = 0; 1597 if ((req & VM_ALLOC_ZERO) != 0) 1598 flags = PG_ZERO; 1599 if ((req & VM_ALLOC_WIRED) != 0) 1600 atomic_add_int(&cnt.v_wire_count, npages); 1601 oflags = VPO_UNMANAGED; 1602 if (object != NULL) { 1603 if ((req & VM_ALLOC_NOBUSY) == 0) 1604 oflags |= VPO_BUSY; 1605 if (object->memattr != VM_MEMATTR_DEFAULT && 1606 memattr == VM_MEMATTR_DEFAULT) 1607 memattr = object->memattr; 1608 } 1609 for (m = m_ret; m < &m_ret[npages]; m++) { 1610 m->aflags = 0; 1611 m->flags &= flags; 1612 if ((req & VM_ALLOC_WIRED) != 0) 1613 m->wire_count = 1; 1614 /* Unmanaged pages don't use "act_count". */ 1615 m->oflags = oflags; 1616 if (memattr != VM_MEMATTR_DEFAULT) 1617 pmap_page_set_memattr(m, memattr); 1618 if (object != NULL) 1619 vm_page_insert(m, object, pindex); 1620 else 1621 m->pindex = pindex; 1622 pindex++; 1623 } 1624 while (deferred_vdrop_list != NULL) { 1625 vdrop((struct vnode *)deferred_vdrop_list->pageq.tqe_prev); 1626 deferred_vdrop_list = deferred_vdrop_list->pageq.tqe_next; 1627 } 1628 if (vm_paging_needed()) 1629 pagedaemon_wakeup(); 1630 return (m_ret); 1631 } 1632 1633 /* 1634 * Initialize a page that has been freshly dequeued from a freelist. 1635 * The caller has to drop the vnode returned, if it is not NULL. 1636 * 1637 * This function may only be used to initialize unmanaged pages. 1638 * 1639 * To be called with vm_page_queue_free_mtx held. 1640 */ 1641 static struct vnode * 1642 vm_page_alloc_init(vm_page_t m) 1643 { 1644 struct vnode *drop; 1645 vm_object_t m_object; 1646 1647 KASSERT(m->queue == PQ_NONE, 1648 ("vm_page_alloc_init: page %p has unexpected queue %d", 1649 m, m->queue)); 1650 KASSERT(m->wire_count == 0, 1651 ("vm_page_alloc_init: page %p is wired", m)); 1652 KASSERT(m->hold_count == 0, 1653 ("vm_page_alloc_init: page %p is held", m)); 1654 KASSERT(m->busy == 0, 1655 ("vm_page_alloc_init: page %p is busy", m)); 1656 KASSERT(m->dirty == 0, 1657 ("vm_page_alloc_init: page %p is dirty", m)); 1658 KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, 1659 ("vm_page_alloc_init: page %p has unexpected memattr %d", 1660 m, pmap_page_get_memattr(m))); 1661 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1662 drop = NULL; 1663 if ((m->flags & PG_CACHED) != 0) { 1664 KASSERT((m->flags & PG_ZERO) == 0, 1665 ("vm_page_alloc_init: cached page %p is PG_ZERO", m)); 1666 m->valid = 0; 1667 m_object = m->object; 1668 vm_page_cache_remove(m); 1669 if (m_object->type == OBJT_VNODE && m_object->cache == NULL) 1670 drop = m_object->handle; 1671 } else { 1672 KASSERT(VM_PAGE_IS_FREE(m), 1673 ("vm_page_alloc_init: page %p is not free", m)); 1674 KASSERT(m->valid == 0, 1675 ("vm_page_alloc_init: free page %p is valid", m)); 1676 cnt.v_free_count--; 1677 if ((m->flags & PG_ZERO) != 0) 1678 vm_page_zero_count--; 1679 } 1680 /* Don't clear the PG_ZERO flag; we'll need it later. */ 1681 m->flags &= PG_ZERO; 1682 return (drop); 1683 } 1684 1685 /* 1686 * vm_page_alloc_freelist: 1687 * 1688 * Allocate a physical page from the specified free page list. 1689 * 1690 * The caller must always specify an allocation class. 1691 * 1692 * allocation classes: 1693 * VM_ALLOC_NORMAL normal process request 1694 * VM_ALLOC_SYSTEM system *really* needs a page 1695 * VM_ALLOC_INTERRUPT interrupt time request 1696 * 1697 * optional allocation flags: 1698 * VM_ALLOC_COUNT(number) the number of additional pages that the caller 1699 * intends to allocate 1700 * VM_ALLOC_WIRED wire the allocated page 1701 * VM_ALLOC_ZERO prefer a zeroed page 1702 * 1703 * This routine may not sleep. 1704 */ 1705 vm_page_t 1706 vm_page_alloc_freelist(int flind, int req) 1707 { 1708 struct vnode *drop; 1709 vm_page_t m; 1710 u_int flags; 1711 int req_class; 1712 1713 req_class = req & VM_ALLOC_CLASS_MASK; 1714 1715 /* 1716 * The page daemon is allowed to dig deeper into the free page list. 1717 */ 1718 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) 1719 req_class = VM_ALLOC_SYSTEM; 1720 1721 /* 1722 * Do not allocate reserved pages unless the req has asked for it. 1723 */ 1724 mtx_lock(&vm_page_queue_free_mtx); 1725 if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved || 1726 (req_class == VM_ALLOC_SYSTEM && 1727 cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) || 1728 (req_class == VM_ALLOC_INTERRUPT && 1729 cnt.v_free_count + cnt.v_cache_count > 0)) 1730 m = vm_phys_alloc_freelist_pages(flind, VM_FREEPOOL_DIRECT, 0); 1731 else { 1732 mtx_unlock(&vm_page_queue_free_mtx); 1733 atomic_add_int(&vm_pageout_deficit, 1734 max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1)); 1735 pagedaemon_wakeup(); 1736 return (NULL); 1737 } 1738 if (m == NULL) { 1739 mtx_unlock(&vm_page_queue_free_mtx); 1740 return (NULL); 1741 } 1742 drop = vm_page_alloc_init(m); 1743 mtx_unlock(&vm_page_queue_free_mtx); 1744 1745 /* 1746 * Initialize the page. Only the PG_ZERO flag is inherited. 1747 */ 1748 m->aflags = 0; 1749 flags = 0; 1750 if ((req & VM_ALLOC_ZERO) != 0) 1751 flags = PG_ZERO; 1752 m->flags &= flags; 1753 if ((req & VM_ALLOC_WIRED) != 0) { 1754 /* 1755 * The page lock is not required for wiring a page that does 1756 * not belong to an object. 1757 */ 1758 atomic_add_int(&cnt.v_wire_count, 1); 1759 m->wire_count = 1; 1760 } 1761 /* Unmanaged pages don't use "act_count". */ 1762 m->oflags = VPO_UNMANAGED; 1763 if (drop != NULL) 1764 vdrop(drop); 1765 if (vm_paging_needed()) 1766 pagedaemon_wakeup(); 1767 return (m); 1768 } 1769 1770 /* 1771 * vm_wait: (also see VM_WAIT macro) 1772 * 1773 * Block until free pages are available for allocation 1774 * - Called in various places before memory allocations. 1775 */ 1776 void 1777 vm_wait(void) 1778 { 1779 1780 mtx_lock(&vm_page_queue_free_mtx); 1781 if (curproc == pageproc) { 1782 vm_pageout_pages_needed = 1; 1783 msleep(&vm_pageout_pages_needed, &vm_page_queue_free_mtx, 1784 PDROP | PSWP, "VMWait", 0); 1785 } else { 1786 if (!vm_pages_needed) { 1787 vm_pages_needed = 1; 1788 wakeup(&vm_pages_needed); 1789 } 1790 msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM, 1791 "vmwait", 0); 1792 } 1793 } 1794 1795 /* 1796 * vm_waitpfault: (also see VM_WAITPFAULT macro) 1797 * 1798 * Block until free pages are available for allocation 1799 * - Called only in vm_fault so that processes page faulting 1800 * can be easily tracked. 1801 * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing 1802 * processes will be able to grab memory first. Do not change 1803 * this balance without careful testing first. 1804 */ 1805 void 1806 vm_waitpfault(void) 1807 { 1808 1809 mtx_lock(&vm_page_queue_free_mtx); 1810 if (!vm_pages_needed) { 1811 vm_pages_needed = 1; 1812 wakeup(&vm_pages_needed); 1813 } 1814 msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER, 1815 "pfault", 0); 1816 } 1817 1818 /* 1819 * vm_page_requeue: 1820 * 1821 * Move the given page to the tail of its present page queue. 1822 * 1823 * The page queues must be locked. 1824 */ 1825 void 1826 vm_page_requeue(vm_page_t m) 1827 { 1828 struct vpgqueues *vpq; 1829 int queue; 1830 1831 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1832 queue = m->queue; 1833 KASSERT(queue != PQ_NONE, 1834 ("vm_page_requeue: page %p is not queued", m)); 1835 vpq = &vm_page_queues[queue]; 1836 TAILQ_REMOVE(&vpq->pl, m, pageq); 1837 TAILQ_INSERT_TAIL(&vpq->pl, m, pageq); 1838 } 1839 1840 /* 1841 * vm_page_queue_remove: 1842 * 1843 * Remove the given page from the specified queue. 1844 * 1845 * The page and page queues must be locked. 1846 */ 1847 static __inline void 1848 vm_page_queue_remove(int queue, vm_page_t m) 1849 { 1850 struct vpgqueues *pq; 1851 1852 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1853 vm_page_lock_assert(m, MA_OWNED); 1854 pq = &vm_page_queues[queue]; 1855 TAILQ_REMOVE(&pq->pl, m, pageq); 1856 (*pq->cnt)--; 1857 } 1858 1859 /* 1860 * vm_pageq_remove: 1861 * 1862 * Remove a page from its queue. 1863 * 1864 * The given page must be locked. 1865 * This routine may not block. 1866 */ 1867 void 1868 vm_pageq_remove(vm_page_t m) 1869 { 1870 int queue; 1871 1872 vm_page_lock_assert(m, MA_OWNED); 1873 if ((queue = m->queue) != PQ_NONE) { 1874 vm_page_lock_queues(); 1875 m->queue = PQ_NONE; 1876 vm_page_queue_remove(queue, m); 1877 vm_page_unlock_queues(); 1878 } 1879 } 1880 1881 /* 1882 * vm_page_enqueue: 1883 * 1884 * Add the given page to the specified queue. 1885 * 1886 * The page queues must be locked. 1887 */ 1888 static void 1889 vm_page_enqueue(int queue, vm_page_t m) 1890 { 1891 struct vpgqueues *vpq; 1892 1893 vpq = &vm_page_queues[queue]; 1894 m->queue = queue; 1895 TAILQ_INSERT_TAIL(&vpq->pl, m, pageq); 1896 ++*vpq->cnt; 1897 } 1898 1899 /* 1900 * vm_page_activate: 1901 * 1902 * Put the specified page on the active list (if appropriate). 1903 * Ensure that act_count is at least ACT_INIT but do not otherwise 1904 * mess with it. 1905 * 1906 * The page must be locked. 1907 * This routine may not block. 1908 */ 1909 void 1910 vm_page_activate(vm_page_t m) 1911 { 1912 int queue; 1913 1914 vm_page_lock_assert(m, MA_OWNED); 1915 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1916 if ((queue = m->queue) != PQ_ACTIVE) { 1917 if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) { 1918 if (m->act_count < ACT_INIT) 1919 m->act_count = ACT_INIT; 1920 vm_page_lock_queues(); 1921 if (queue != PQ_NONE) 1922 vm_page_queue_remove(queue, m); 1923 vm_page_enqueue(PQ_ACTIVE, m); 1924 vm_page_unlock_queues(); 1925 } else 1926 KASSERT(queue == PQ_NONE, 1927 ("vm_page_activate: wired page %p is queued", m)); 1928 } else { 1929 if (m->act_count < ACT_INIT) 1930 m->act_count = ACT_INIT; 1931 } 1932 } 1933 1934 /* 1935 * vm_page_free_wakeup: 1936 * 1937 * Helper routine for vm_page_free_toq() and vm_page_cache(). This 1938 * routine is called when a page has been added to the cache or free 1939 * queues. 1940 * 1941 * The page queues must be locked. 1942 * This routine may not block. 1943 */ 1944 static inline void 1945 vm_page_free_wakeup(void) 1946 { 1947 1948 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1949 /* 1950 * if pageout daemon needs pages, then tell it that there are 1951 * some free. 1952 */ 1953 if (vm_pageout_pages_needed && 1954 cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) { 1955 wakeup(&vm_pageout_pages_needed); 1956 vm_pageout_pages_needed = 0; 1957 } 1958 /* 1959 * wakeup processes that are waiting on memory if we hit a 1960 * high water mark. And wakeup scheduler process if we have 1961 * lots of memory. this process will swapin processes. 1962 */ 1963 if (vm_pages_needed && !vm_page_count_min()) { 1964 vm_pages_needed = 0; 1965 wakeup(&cnt.v_free_count); 1966 } 1967 } 1968 1969 /* 1970 * vm_page_free_toq: 1971 * 1972 * Returns the given page to the free list, 1973 * disassociating it with any VM object. 1974 * 1975 * Object and page must be locked prior to entry. 1976 * This routine may not block. 1977 */ 1978 1979 void 1980 vm_page_free_toq(vm_page_t m) 1981 { 1982 1983 if ((m->oflags & VPO_UNMANAGED) == 0) { 1984 vm_page_lock_assert(m, MA_OWNED); 1985 KASSERT(!pmap_page_is_mapped(m), 1986 ("vm_page_free_toq: freeing mapped page %p", m)); 1987 } 1988 PCPU_INC(cnt.v_tfree); 1989 1990 if (VM_PAGE_IS_FREE(m)) 1991 panic("vm_page_free: freeing free page %p", m); 1992 else if (m->busy != 0) 1993 panic("vm_page_free: freeing busy page %p", m); 1994 1995 /* 1996 * unqueue, then remove page. Note that we cannot destroy 1997 * the page here because we do not want to call the pager's 1998 * callback routine until after we've put the page on the 1999 * appropriate free queue. 2000 */ 2001 if ((m->oflags & VPO_UNMANAGED) == 0) 2002 vm_pageq_remove(m); 2003 vm_page_remove(m); 2004 2005 /* 2006 * If fictitious remove object association and 2007 * return, otherwise delay object association removal. 2008 */ 2009 if ((m->flags & PG_FICTITIOUS) != 0) { 2010 return; 2011 } 2012 2013 m->valid = 0; 2014 vm_page_undirty(m); 2015 2016 if (m->wire_count != 0) 2017 panic("vm_page_free: freeing wired page %p", m); 2018 if (m->hold_count != 0) { 2019 m->flags &= ~PG_ZERO; 2020 vm_page_lock_queues(); 2021 vm_page_enqueue(PQ_HOLD, m); 2022 vm_page_unlock_queues(); 2023 } else { 2024 /* 2025 * Restore the default memory attribute to the page. 2026 */ 2027 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) 2028 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); 2029 2030 /* 2031 * Insert the page into the physical memory allocator's 2032 * cache/free page queues. 2033 */ 2034 mtx_lock(&vm_page_queue_free_mtx); 2035 m->flags |= PG_FREE; 2036 cnt.v_free_count++; 2037 #if VM_NRESERVLEVEL > 0 2038 if (!vm_reserv_free_page(m)) 2039 #else 2040 if (TRUE) 2041 #endif 2042 vm_phys_free_pages(m, 0); 2043 if ((m->flags & PG_ZERO) != 0) 2044 ++vm_page_zero_count; 2045 else 2046 vm_page_zero_idle_wakeup(); 2047 vm_page_free_wakeup(); 2048 mtx_unlock(&vm_page_queue_free_mtx); 2049 } 2050 } 2051 2052 /* 2053 * vm_page_wire: 2054 * 2055 * Mark this page as wired down by yet 2056 * another map, removing it from paging queues 2057 * as necessary. 2058 * 2059 * If the page is fictitious, then its wire count must remain one. 2060 * 2061 * The page must be locked. 2062 * This routine may not block. 2063 */ 2064 void 2065 vm_page_wire(vm_page_t m) 2066 { 2067 2068 /* 2069 * Only bump the wire statistics if the page is not already wired, 2070 * and only unqueue the page if it is on some queue (if it is unmanaged 2071 * it is already off the queues). 2072 */ 2073 vm_page_lock_assert(m, MA_OWNED); 2074 if ((m->flags & PG_FICTITIOUS) != 0) { 2075 KASSERT(m->wire_count == 1, 2076 ("vm_page_wire: fictitious page %p's wire count isn't one", 2077 m)); 2078 return; 2079 } 2080 if (m->wire_count == 0) { 2081 if ((m->oflags & VPO_UNMANAGED) == 0) 2082 vm_pageq_remove(m); 2083 atomic_add_int(&cnt.v_wire_count, 1); 2084 } 2085 m->wire_count++; 2086 KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m)); 2087 } 2088 2089 /* 2090 * vm_page_unwire: 2091 * 2092 * Release one wiring of the specified page, potentially enabling it to be 2093 * paged again. If paging is enabled, then the value of the parameter 2094 * "activate" determines to which queue the page is added. If "activate" is 2095 * non-zero, then the page is added to the active queue. Otherwise, it is 2096 * added to the inactive queue. 2097 * 2098 * However, unless the page belongs to an object, it is not enqueued because 2099 * it cannot be paged out. 2100 * 2101 * If a page is fictitious, then its wire count must alway be one. 2102 * 2103 * A managed page must be locked. 2104 */ 2105 void 2106 vm_page_unwire(vm_page_t m, int activate) 2107 { 2108 2109 if ((m->oflags & VPO_UNMANAGED) == 0) 2110 vm_page_lock_assert(m, MA_OWNED); 2111 if ((m->flags & PG_FICTITIOUS) != 0) { 2112 KASSERT(m->wire_count == 1, 2113 ("vm_page_unwire: fictitious page %p's wire count isn't one", m)); 2114 return; 2115 } 2116 if (m->wire_count > 0) { 2117 m->wire_count--; 2118 if (m->wire_count == 0) { 2119 atomic_subtract_int(&cnt.v_wire_count, 1); 2120 if ((m->oflags & VPO_UNMANAGED) != 0 || 2121 m->object == NULL) 2122 return; 2123 vm_page_lock_queues(); 2124 if (activate) 2125 vm_page_enqueue(PQ_ACTIVE, m); 2126 else { 2127 m->flags &= ~PG_WINATCFLS; 2128 vm_page_enqueue(PQ_INACTIVE, m); 2129 } 2130 vm_page_unlock_queues(); 2131 } 2132 } else 2133 panic("vm_page_unwire: page %p's wire count is zero", m); 2134 } 2135 2136 /* 2137 * Move the specified page to the inactive queue. 2138 * 2139 * Many pages placed on the inactive queue should actually go 2140 * into the cache, but it is difficult to figure out which. What 2141 * we do instead, if the inactive target is well met, is to put 2142 * clean pages at the head of the inactive queue instead of the tail. 2143 * This will cause them to be moved to the cache more quickly and 2144 * if not actively re-referenced, reclaimed more quickly. If we just 2145 * stick these pages at the end of the inactive queue, heavy filesystem 2146 * meta-data accesses can cause an unnecessary paging load on memory bound 2147 * processes. This optimization causes one-time-use metadata to be 2148 * reused more quickly. 2149 * 2150 * Normally athead is 0 resulting in LRU operation. athead is set 2151 * to 1 if we want this page to be 'as if it were placed in the cache', 2152 * except without unmapping it from the process address space. 2153 * 2154 * This routine may not block. 2155 */ 2156 static inline void 2157 _vm_page_deactivate(vm_page_t m, int athead) 2158 { 2159 int queue; 2160 2161 vm_page_lock_assert(m, MA_OWNED); 2162 2163 /* 2164 * Ignore if already inactive. 2165 */ 2166 if ((queue = m->queue) == PQ_INACTIVE) 2167 return; 2168 if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) { 2169 vm_page_lock_queues(); 2170 m->flags &= ~PG_WINATCFLS; 2171 if (queue != PQ_NONE) 2172 vm_page_queue_remove(queue, m); 2173 if (athead) 2174 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, 2175 pageq); 2176 else 2177 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, 2178 pageq); 2179 m->queue = PQ_INACTIVE; 2180 cnt.v_inactive_count++; 2181 vm_page_unlock_queues(); 2182 } 2183 } 2184 2185 /* 2186 * Move the specified page to the inactive queue. 2187 * 2188 * The page must be locked. 2189 */ 2190 void 2191 vm_page_deactivate(vm_page_t m) 2192 { 2193 2194 _vm_page_deactivate(m, 0); 2195 } 2196 2197 /* 2198 * vm_page_try_to_cache: 2199 * 2200 * Returns 0 on failure, 1 on success 2201 */ 2202 int 2203 vm_page_try_to_cache(vm_page_t m) 2204 { 2205 2206 vm_page_lock_assert(m, MA_OWNED); 2207 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2208 if (m->dirty || m->hold_count || m->busy || m->wire_count || 2209 (m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0) 2210 return (0); 2211 pmap_remove_all(m); 2212 if (m->dirty) 2213 return (0); 2214 vm_page_cache(m); 2215 return (1); 2216 } 2217 2218 /* 2219 * vm_page_try_to_free() 2220 * 2221 * Attempt to free the page. If we cannot free it, we do nothing. 2222 * 1 is returned on success, 0 on failure. 2223 */ 2224 int 2225 vm_page_try_to_free(vm_page_t m) 2226 { 2227 2228 vm_page_lock_assert(m, MA_OWNED); 2229 if (m->object != NULL) 2230 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2231 if (m->dirty || m->hold_count || m->busy || m->wire_count || 2232 (m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0) 2233 return (0); 2234 pmap_remove_all(m); 2235 if (m->dirty) 2236 return (0); 2237 vm_page_free(m); 2238 return (1); 2239 } 2240 2241 /* 2242 * vm_page_cache 2243 * 2244 * Put the specified page onto the page cache queue (if appropriate). 2245 * 2246 * This routine may not block. 2247 */ 2248 void 2249 vm_page_cache(vm_page_t m) 2250 { 2251 vm_object_t object; 2252 vm_page_t next, prev, root; 2253 2254 vm_page_lock_assert(m, MA_OWNED); 2255 object = m->object; 2256 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2257 if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) || m->busy || 2258 m->hold_count || m->wire_count) 2259 panic("vm_page_cache: attempting to cache busy page"); 2260 pmap_remove_all(m); 2261 if (m->dirty != 0) 2262 panic("vm_page_cache: page %p is dirty", m); 2263 if (m->valid == 0 || object->type == OBJT_DEFAULT || 2264 (object->type == OBJT_SWAP && 2265 !vm_pager_has_page(object, m->pindex, NULL, NULL))) { 2266 /* 2267 * Hypothesis: A cache-elgible page belonging to a 2268 * default object or swap object but without a backing 2269 * store must be zero filled. 2270 */ 2271 vm_page_free(m); 2272 return; 2273 } 2274 KASSERT((m->flags & PG_CACHED) == 0, 2275 ("vm_page_cache: page %p is already cached", m)); 2276 PCPU_INC(cnt.v_tcached); 2277 2278 /* 2279 * Remove the page from the paging queues. 2280 */ 2281 vm_pageq_remove(m); 2282 2283 /* 2284 * Remove the page from the object's collection of resident 2285 * pages. 2286 */ 2287 if ((next = TAILQ_NEXT(m, listq)) != NULL && next->left == m) { 2288 /* 2289 * Since the page's successor in the list is also its parent 2290 * in the tree, its right subtree must be empty. 2291 */ 2292 next->left = m->left; 2293 KASSERT(m->right == NULL, 2294 ("vm_page_cache: page %p has right child", m)); 2295 } else if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL && 2296 prev->right == m) { 2297 /* 2298 * Since the page's predecessor in the list is also its parent 2299 * in the tree, its left subtree must be empty. 2300 */ 2301 KASSERT(m->left == NULL, 2302 ("vm_page_cache: page %p has left child", m)); 2303 prev->right = m->right; 2304 } else { 2305 if (m != object->root) 2306 vm_page_splay(m->pindex, object->root); 2307 if (m->left == NULL) 2308 root = m->right; 2309 else if (m->right == NULL) 2310 root = m->left; 2311 else { 2312 /* 2313 * Move the page's successor to the root, because 2314 * pages are usually removed in ascending order. 2315 */ 2316 if (m->right != next) 2317 vm_page_splay(m->pindex, m->right); 2318 next->left = m->left; 2319 root = next; 2320 } 2321 object->root = root; 2322 } 2323 TAILQ_REMOVE(&object->memq, m, listq); 2324 object->resident_page_count--; 2325 2326 /* 2327 * Restore the default memory attribute to the page. 2328 */ 2329 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) 2330 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); 2331 2332 /* 2333 * Insert the page into the object's collection of cached pages 2334 * and the physical memory allocator's cache/free page queues. 2335 */ 2336 m->flags &= ~PG_ZERO; 2337 mtx_lock(&vm_page_queue_free_mtx); 2338 m->flags |= PG_CACHED; 2339 cnt.v_cache_count++; 2340 root = object->cache; 2341 if (root == NULL) { 2342 m->left = NULL; 2343 m->right = NULL; 2344 } else { 2345 root = vm_page_splay(m->pindex, root); 2346 if (m->pindex < root->pindex) { 2347 m->left = root->left; 2348 m->right = root; 2349 root->left = NULL; 2350 } else if (__predict_false(m->pindex == root->pindex)) 2351 panic("vm_page_cache: offset already cached"); 2352 else { 2353 m->right = root->right; 2354 m->left = root; 2355 root->right = NULL; 2356 } 2357 } 2358 object->cache = m; 2359 #if VM_NRESERVLEVEL > 0 2360 if (!vm_reserv_free_page(m)) { 2361 #else 2362 if (TRUE) { 2363 #endif 2364 vm_phys_set_pool(VM_FREEPOOL_CACHE, m, 0); 2365 vm_phys_free_pages(m, 0); 2366 } 2367 vm_page_free_wakeup(); 2368 mtx_unlock(&vm_page_queue_free_mtx); 2369 2370 /* 2371 * Increment the vnode's hold count if this is the object's only 2372 * cached page. Decrement the vnode's hold count if this was 2373 * the object's only resident page. 2374 */ 2375 if (object->type == OBJT_VNODE) { 2376 if (root == NULL && object->resident_page_count != 0) 2377 vhold(object->handle); 2378 else if (root != NULL && object->resident_page_count == 0) 2379 vdrop(object->handle); 2380 } 2381 } 2382 2383 /* 2384 * vm_page_dontneed 2385 * 2386 * Cache, deactivate, or do nothing as appropriate. This routine 2387 * is typically used by madvise() MADV_DONTNEED. 2388 * 2389 * Generally speaking we want to move the page into the cache so 2390 * it gets reused quickly. However, this can result in a silly syndrome 2391 * due to the page recycling too quickly. Small objects will not be 2392 * fully cached. On the otherhand, if we move the page to the inactive 2393 * queue we wind up with a problem whereby very large objects 2394 * unnecessarily blow away our inactive and cache queues. 2395 * 2396 * The solution is to move the pages based on a fixed weighting. We 2397 * either leave them alone, deactivate them, or move them to the cache, 2398 * where moving them to the cache has the highest weighting. 2399 * By forcing some pages into other queues we eventually force the 2400 * system to balance the queues, potentially recovering other unrelated 2401 * space from active. The idea is to not force this to happen too 2402 * often. 2403 */ 2404 void 2405 vm_page_dontneed(vm_page_t m) 2406 { 2407 int dnw; 2408 int head; 2409 2410 vm_page_lock_assert(m, MA_OWNED); 2411 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2412 dnw = PCPU_GET(dnweight); 2413 PCPU_INC(dnweight); 2414 2415 /* 2416 * Occasionally leave the page alone. 2417 */ 2418 if ((dnw & 0x01F0) == 0 || m->queue == PQ_INACTIVE) { 2419 if (m->act_count >= ACT_INIT) 2420 --m->act_count; 2421 return; 2422 } 2423 2424 /* 2425 * Clear any references to the page. Otherwise, the page daemon will 2426 * immediately reactivate the page. 2427 * 2428 * Perform the pmap_clear_reference() first. Otherwise, a concurrent 2429 * pmap operation, such as pmap_remove(), could clear a reference in 2430 * the pmap and set PGA_REFERENCED on the page before the 2431 * pmap_clear_reference() had completed. Consequently, the page would 2432 * appear referenced based upon an old reference that occurred before 2433 * this function ran. 2434 */ 2435 pmap_clear_reference(m); 2436 vm_page_aflag_clear(m, PGA_REFERENCED); 2437 2438 if (m->dirty == 0 && pmap_is_modified(m)) 2439 vm_page_dirty(m); 2440 2441 if (m->dirty || (dnw & 0x0070) == 0) { 2442 /* 2443 * Deactivate the page 3 times out of 32. 2444 */ 2445 head = 0; 2446 } else { 2447 /* 2448 * Cache the page 28 times out of every 32. Note that 2449 * the page is deactivated instead of cached, but placed 2450 * at the head of the queue instead of the tail. 2451 */ 2452 head = 1; 2453 } 2454 _vm_page_deactivate(m, head); 2455 } 2456 2457 /* 2458 * Grab a page, waiting until we are waken up due to the page 2459 * changing state. We keep on waiting, if the page continues 2460 * to be in the object. If the page doesn't exist, first allocate it 2461 * and then conditionally zero it. 2462 * 2463 * The caller must always specify the VM_ALLOC_RETRY flag. This is intended 2464 * to facilitate its eventual removal. 2465 * 2466 * This routine may block. 2467 */ 2468 vm_page_t 2469 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) 2470 { 2471 vm_page_t m; 2472 2473 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2474 KASSERT((allocflags & VM_ALLOC_RETRY) != 0, 2475 ("vm_page_grab: VM_ALLOC_RETRY is required")); 2476 retrylookup: 2477 if ((m = vm_page_lookup(object, pindex)) != NULL) { 2478 if ((m->oflags & VPO_BUSY) != 0 || 2479 ((allocflags & VM_ALLOC_IGN_SBUSY) == 0 && m->busy != 0)) { 2480 /* 2481 * Reference the page before unlocking and 2482 * sleeping so that the page daemon is less 2483 * likely to reclaim it. 2484 */ 2485 vm_page_aflag_set(m, PGA_REFERENCED); 2486 vm_page_sleep(m, "pgrbwt"); 2487 goto retrylookup; 2488 } else { 2489 if ((allocflags & VM_ALLOC_WIRED) != 0) { 2490 vm_page_lock(m); 2491 vm_page_wire(m); 2492 vm_page_unlock(m); 2493 } 2494 if ((allocflags & VM_ALLOC_NOBUSY) == 0) 2495 vm_page_busy(m); 2496 return (m); 2497 } 2498 } 2499 m = vm_page_alloc(object, pindex, allocflags & ~(VM_ALLOC_RETRY | 2500 VM_ALLOC_IGN_SBUSY)); 2501 if (m == NULL) { 2502 VM_OBJECT_UNLOCK(object); 2503 VM_WAIT; 2504 VM_OBJECT_LOCK(object); 2505 goto retrylookup; 2506 } else if (m->valid != 0) 2507 return (m); 2508 if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0) 2509 pmap_zero_page(m); 2510 return (m); 2511 } 2512 2513 /* 2514 * Mapping function for valid bits or for dirty bits in 2515 * a page. May not block. 2516 * 2517 * Inputs are required to range within a page. 2518 */ 2519 vm_page_bits_t 2520 vm_page_bits(int base, int size) 2521 { 2522 int first_bit; 2523 int last_bit; 2524 2525 KASSERT( 2526 base + size <= PAGE_SIZE, 2527 ("vm_page_bits: illegal base/size %d/%d", base, size) 2528 ); 2529 2530 if (size == 0) /* handle degenerate case */ 2531 return (0); 2532 2533 first_bit = base >> DEV_BSHIFT; 2534 last_bit = (base + size - 1) >> DEV_BSHIFT; 2535 2536 return (((vm_page_bits_t)2 << last_bit) - 2537 ((vm_page_bits_t)1 << first_bit)); 2538 } 2539 2540 /* 2541 * vm_page_set_valid: 2542 * 2543 * Sets portions of a page valid. The arguments are expected 2544 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 2545 * of any partial chunks touched by the range. The invalid portion of 2546 * such chunks will be zeroed. 2547 * 2548 * (base + size) must be less then or equal to PAGE_SIZE. 2549 */ 2550 void 2551 vm_page_set_valid(vm_page_t m, int base, int size) 2552 { 2553 int endoff, frag; 2554 2555 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2556 if (size == 0) /* handle degenerate case */ 2557 return; 2558 2559 /* 2560 * If the base is not DEV_BSIZE aligned and the valid 2561 * bit is clear, we have to zero out a portion of the 2562 * first block. 2563 */ 2564 if ((frag = base & ~(DEV_BSIZE - 1)) != base && 2565 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0) 2566 pmap_zero_page_area(m, frag, base - frag); 2567 2568 /* 2569 * If the ending offset is not DEV_BSIZE aligned and the 2570 * valid bit is clear, we have to zero out a portion of 2571 * the last block. 2572 */ 2573 endoff = base + size; 2574 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff && 2575 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0) 2576 pmap_zero_page_area(m, endoff, 2577 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 2578 2579 /* 2580 * Assert that no previously invalid block that is now being validated 2581 * is already dirty. 2582 */ 2583 KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0, 2584 ("vm_page_set_valid: page %p is dirty", m)); 2585 2586 /* 2587 * Set valid bits inclusive of any overlap. 2588 */ 2589 m->valid |= vm_page_bits(base, size); 2590 } 2591 2592 /* 2593 * Clear the given bits from the specified page's dirty field. 2594 */ 2595 static __inline void 2596 vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits) 2597 { 2598 uintptr_t addr; 2599 #if PAGE_SIZE < 16384 2600 int shift; 2601 #endif 2602 2603 /* 2604 * If the object is locked and the page is neither VPO_BUSY nor 2605 * PGA_WRITEABLE, then the page's dirty field cannot possibly be 2606 * set by a concurrent pmap operation. 2607 */ 2608 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2609 if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0) 2610 m->dirty &= ~pagebits; 2611 else { 2612 /* 2613 * The pmap layer can call vm_page_dirty() without 2614 * holding a distinguished lock. The combination of 2615 * the object's lock and an atomic operation suffice 2616 * to guarantee consistency of the page dirty field. 2617 * 2618 * For PAGE_SIZE == 32768 case, compiler already 2619 * properly aligns the dirty field, so no forcible 2620 * alignment is needed. Only require existence of 2621 * atomic_clear_64 when page size is 32768. 2622 */ 2623 addr = (uintptr_t)&m->dirty; 2624 #if PAGE_SIZE == 32768 2625 atomic_clear_64((uint64_t *)addr, pagebits); 2626 #elif PAGE_SIZE == 16384 2627 atomic_clear_32((uint32_t *)addr, pagebits); 2628 #else /* PAGE_SIZE <= 8192 */ 2629 /* 2630 * Use a trick to perform a 32-bit atomic on the 2631 * containing aligned word, to not depend on the existence 2632 * of atomic_clear_{8, 16}. 2633 */ 2634 shift = addr & (sizeof(uint32_t) - 1); 2635 #if BYTE_ORDER == BIG_ENDIAN 2636 shift = (sizeof(uint32_t) - sizeof(m->dirty) - shift) * NBBY; 2637 #else 2638 shift *= NBBY; 2639 #endif 2640 addr &= ~(sizeof(uint32_t) - 1); 2641 atomic_clear_32((uint32_t *)addr, pagebits << shift); 2642 #endif /* PAGE_SIZE */ 2643 } 2644 } 2645 2646 /* 2647 * vm_page_set_validclean: 2648 * 2649 * Sets portions of a page valid and clean. The arguments are expected 2650 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 2651 * of any partial chunks touched by the range. The invalid portion of 2652 * such chunks will be zero'd. 2653 * 2654 * This routine may not block. 2655 * 2656 * (base + size) must be less then or equal to PAGE_SIZE. 2657 */ 2658 void 2659 vm_page_set_validclean(vm_page_t m, int base, int size) 2660 { 2661 vm_page_bits_t oldvalid, pagebits; 2662 int endoff, frag; 2663 2664 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2665 if (size == 0) /* handle degenerate case */ 2666 return; 2667 2668 /* 2669 * If the base is not DEV_BSIZE aligned and the valid 2670 * bit is clear, we have to zero out a portion of the 2671 * first block. 2672 */ 2673 if ((frag = base & ~(DEV_BSIZE - 1)) != base && 2674 (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0) 2675 pmap_zero_page_area(m, frag, base - frag); 2676 2677 /* 2678 * If the ending offset is not DEV_BSIZE aligned and the 2679 * valid bit is clear, we have to zero out a portion of 2680 * the last block. 2681 */ 2682 endoff = base + size; 2683 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff && 2684 (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0) 2685 pmap_zero_page_area(m, endoff, 2686 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 2687 2688 /* 2689 * Set valid, clear dirty bits. If validating the entire 2690 * page we can safely clear the pmap modify bit. We also 2691 * use this opportunity to clear the VPO_NOSYNC flag. If a process 2692 * takes a write fault on a MAP_NOSYNC memory area the flag will 2693 * be set again. 2694 * 2695 * We set valid bits inclusive of any overlap, but we can only 2696 * clear dirty bits for DEV_BSIZE chunks that are fully within 2697 * the range. 2698 */ 2699 oldvalid = m->valid; 2700 pagebits = vm_page_bits(base, size); 2701 m->valid |= pagebits; 2702 #if 0 /* NOT YET */ 2703 if ((frag = base & (DEV_BSIZE - 1)) != 0) { 2704 frag = DEV_BSIZE - frag; 2705 base += frag; 2706 size -= frag; 2707 if (size < 0) 2708 size = 0; 2709 } 2710 pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1)); 2711 #endif 2712 if (base == 0 && size == PAGE_SIZE) { 2713 /* 2714 * The page can only be modified within the pmap if it is 2715 * mapped, and it can only be mapped if it was previously 2716 * fully valid. 2717 */ 2718 if (oldvalid == VM_PAGE_BITS_ALL) 2719 /* 2720 * Perform the pmap_clear_modify() first. Otherwise, 2721 * a concurrent pmap operation, such as 2722 * pmap_protect(), could clear a modification in the 2723 * pmap and set the dirty field on the page before 2724 * pmap_clear_modify() had begun and after the dirty 2725 * field was cleared here. 2726 */ 2727 pmap_clear_modify(m); 2728 m->dirty = 0; 2729 m->oflags &= ~VPO_NOSYNC; 2730 } else if (oldvalid != VM_PAGE_BITS_ALL) 2731 m->dirty &= ~pagebits; 2732 else 2733 vm_page_clear_dirty_mask(m, pagebits); 2734 } 2735 2736 void 2737 vm_page_clear_dirty(vm_page_t m, int base, int size) 2738 { 2739 2740 vm_page_clear_dirty_mask(m, vm_page_bits(base, size)); 2741 } 2742 2743 /* 2744 * vm_page_set_invalid: 2745 * 2746 * Invalidates DEV_BSIZE'd chunks within a page. Both the 2747 * valid and dirty bits for the effected areas are cleared. 2748 * 2749 * May not block. 2750 */ 2751 void 2752 vm_page_set_invalid(vm_page_t m, int base, int size) 2753 { 2754 vm_page_bits_t bits; 2755 2756 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2757 KASSERT((m->oflags & VPO_BUSY) == 0, 2758 ("vm_page_set_invalid: page %p is busy", m)); 2759 bits = vm_page_bits(base, size); 2760 if (m->valid == VM_PAGE_BITS_ALL && bits != 0) 2761 pmap_remove_all(m); 2762 KASSERT(!pmap_page_is_mapped(m), 2763 ("vm_page_set_invalid: page %p is mapped", m)); 2764 m->valid &= ~bits; 2765 m->dirty &= ~bits; 2766 } 2767 2768 /* 2769 * vm_page_zero_invalid() 2770 * 2771 * The kernel assumes that the invalid portions of a page contain 2772 * garbage, but such pages can be mapped into memory by user code. 2773 * When this occurs, we must zero out the non-valid portions of the 2774 * page so user code sees what it expects. 2775 * 2776 * Pages are most often semi-valid when the end of a file is mapped 2777 * into memory and the file's size is not page aligned. 2778 */ 2779 void 2780 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) 2781 { 2782 int b; 2783 int i; 2784 2785 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2786 /* 2787 * Scan the valid bits looking for invalid sections that 2788 * must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the 2789 * valid bit may be set ) have already been zerod by 2790 * vm_page_set_validclean(). 2791 */ 2792 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { 2793 if (i == (PAGE_SIZE / DEV_BSIZE) || 2794 (m->valid & ((vm_page_bits_t)1 << i))) { 2795 if (i > b) { 2796 pmap_zero_page_area(m, 2797 b << DEV_BSHIFT, (i - b) << DEV_BSHIFT); 2798 } 2799 b = i + 1; 2800 } 2801 } 2802 2803 /* 2804 * setvalid is TRUE when we can safely set the zero'd areas 2805 * as being valid. We can do this if there are no cache consistancy 2806 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. 2807 */ 2808 if (setvalid) 2809 m->valid = VM_PAGE_BITS_ALL; 2810 } 2811 2812 /* 2813 * vm_page_is_valid: 2814 * 2815 * Is (partial) page valid? Note that the case where size == 0 2816 * will return FALSE in the degenerate case where the page is 2817 * entirely invalid, and TRUE otherwise. 2818 * 2819 * May not block. 2820 */ 2821 int 2822 vm_page_is_valid(vm_page_t m, int base, int size) 2823 { 2824 vm_page_bits_t bits; 2825 2826 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2827 bits = vm_page_bits(base, size); 2828 if (m->valid && ((m->valid & bits) == bits)) 2829 return 1; 2830 else 2831 return 0; 2832 } 2833 2834 /* 2835 * update dirty bits from pmap/mmu. May not block. 2836 */ 2837 void 2838 vm_page_test_dirty(vm_page_t m) 2839 { 2840 2841 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2842 if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m)) 2843 vm_page_dirty(m); 2844 } 2845 2846 int so_zerocp_fullpage = 0; 2847 2848 /* 2849 * Replace the given page with a copy. The copied page assumes 2850 * the portion of the given page's "wire_count" that is not the 2851 * responsibility of this copy-on-write mechanism. 2852 * 2853 * The object containing the given page must have a non-zero 2854 * paging-in-progress count and be locked. 2855 */ 2856 void 2857 vm_page_cowfault(vm_page_t m) 2858 { 2859 vm_page_t mnew; 2860 vm_object_t object; 2861 vm_pindex_t pindex; 2862 2863 mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED); 2864 vm_page_lock_assert(m, MA_OWNED); 2865 object = m->object; 2866 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2867 KASSERT(object->paging_in_progress != 0, 2868 ("vm_page_cowfault: object %p's paging-in-progress count is zero.", 2869 object)); 2870 pindex = m->pindex; 2871 2872 retry_alloc: 2873 pmap_remove_all(m); 2874 vm_page_remove(m); 2875 mnew = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY); 2876 if (mnew == NULL) { 2877 vm_page_insert(m, object, pindex); 2878 vm_page_unlock(m); 2879 VM_OBJECT_UNLOCK(object); 2880 VM_WAIT; 2881 VM_OBJECT_LOCK(object); 2882 if (m == vm_page_lookup(object, pindex)) { 2883 vm_page_lock(m); 2884 goto retry_alloc; 2885 } else { 2886 /* 2887 * Page disappeared during the wait. 2888 */ 2889 return; 2890 } 2891 } 2892 2893 if (m->cow == 0) { 2894 /* 2895 * check to see if we raced with an xmit complete when 2896 * waiting to allocate a page. If so, put things back 2897 * the way they were 2898 */ 2899 vm_page_unlock(m); 2900 vm_page_lock(mnew); 2901 vm_page_free(mnew); 2902 vm_page_unlock(mnew); 2903 vm_page_insert(m, object, pindex); 2904 } else { /* clear COW & copy page */ 2905 if (!so_zerocp_fullpage) 2906 pmap_copy_page(m, mnew); 2907 mnew->valid = VM_PAGE_BITS_ALL; 2908 vm_page_dirty(mnew); 2909 mnew->wire_count = m->wire_count - m->cow; 2910 m->wire_count = m->cow; 2911 vm_page_unlock(m); 2912 } 2913 } 2914 2915 void 2916 vm_page_cowclear(vm_page_t m) 2917 { 2918 2919 vm_page_lock_assert(m, MA_OWNED); 2920 if (m->cow) { 2921 m->cow--; 2922 /* 2923 * let vm_fault add back write permission lazily 2924 */ 2925 } 2926 /* 2927 * sf_buf_free() will free the page, so we needn't do it here 2928 */ 2929 } 2930 2931 int 2932 vm_page_cowsetup(vm_page_t m) 2933 { 2934 2935 vm_page_lock_assert(m, MA_OWNED); 2936 if ((m->flags & PG_FICTITIOUS) != 0 || 2937 (m->oflags & VPO_UNMANAGED) != 0 || 2938 m->cow == USHRT_MAX - 1 || !VM_OBJECT_TRYLOCK(m->object)) 2939 return (EBUSY); 2940 m->cow++; 2941 pmap_remove_write(m); 2942 VM_OBJECT_UNLOCK(m->object); 2943 return (0); 2944 } 2945 2946 #ifdef INVARIANTS 2947 void 2948 vm_page_object_lock_assert(vm_page_t m) 2949 { 2950 2951 /* 2952 * Certain of the page's fields may only be modified by the 2953 * holder of the containing object's lock or the setter of the 2954 * page's VPO_BUSY flag. Unfortunately, the setter of the 2955 * VPO_BUSY flag is not recorded, and thus cannot be checked 2956 * here. 2957 */ 2958 if (m->object != NULL && (m->oflags & VPO_BUSY) == 0) 2959 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2960 } 2961 #endif 2962 2963 #include "opt_ddb.h" 2964 #ifdef DDB 2965 #include <sys/kernel.h> 2966 2967 #include <ddb/ddb.h> 2968 2969 DB_SHOW_COMMAND(page, vm_page_print_page_info) 2970 { 2971 db_printf("cnt.v_free_count: %d\n", cnt.v_free_count); 2972 db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count); 2973 db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count); 2974 db_printf("cnt.v_active_count: %d\n", cnt.v_active_count); 2975 db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count); 2976 db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved); 2977 db_printf("cnt.v_free_min: %d\n", cnt.v_free_min); 2978 db_printf("cnt.v_free_target: %d\n", cnt.v_free_target); 2979 db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min); 2980 db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target); 2981 } 2982 2983 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) 2984 { 2985 2986 db_printf("PQ_FREE:"); 2987 db_printf(" %d", cnt.v_free_count); 2988 db_printf("\n"); 2989 2990 db_printf("PQ_CACHE:"); 2991 db_printf(" %d", cnt.v_cache_count); 2992 db_printf("\n"); 2993 2994 db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n", 2995 *vm_page_queues[PQ_ACTIVE].cnt, 2996 *vm_page_queues[PQ_INACTIVE].cnt); 2997 } 2998 #endif /* DDB */ 2999