1 /*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1998 Matthew Dillon. All Rights Reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * The Mach Operating System project at Carnegie-Mellon University. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 34 */ 35 36 /*- 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 */ 62 63 /* 64 * GENERAL RULES ON VM_PAGE MANIPULATION 65 * 66 * - a pageq mutex is required when adding or removing a page from a 67 * page queue (vm_page_queue[]), regardless of other mutexes or the 68 * busy state of a page. 69 * 70 * - a hash chain mutex is required when associating or disassociating 71 * a page from the VM PAGE CACHE hash table (vm_page_buckets), 72 * regardless of other mutexes or the busy state of a page. 73 * 74 * - either a hash chain mutex OR a busied page is required in order 75 * to modify the page flags. A hash chain mutex must be obtained in 76 * order to busy a page. A page's flags cannot be modified by a 77 * hash chain mutex if the page is marked busy. 78 * 79 * - The object memq mutex is held when inserting or removing 80 * pages from an object (vm_page_insert() or vm_page_remove()). This 81 * is different from the object's main mutex. 82 * 83 * Generally speaking, you have to be aware of side effects when running 84 * vm_page ops. A vm_page_lookup() will return with the hash chain 85 * locked, whether it was able to lookup the page or not. vm_page_free(), 86 * vm_page_cache(), vm_page_activate(), and a number of other routines 87 * will release the hash chain mutex for you. Intermediate manipulation 88 * routines such as vm_page_flag_set() expect the hash chain to be held 89 * on entry and the hash chain will remain held on return. 90 * 91 * pageq scanning can only occur with the pageq in question locked. 92 * We have a known bottleneck with the active queue, but the cache 93 * and free queues are actually arrays already. 94 */ 95 96 /* 97 * Resident memory management module. 98 */ 99 100 #include <sys/cdefs.h> 101 __FBSDID("$FreeBSD$"); 102 103 #include "opt_vm.h" 104 105 #include <sys/param.h> 106 #include <sys/systm.h> 107 #include <sys/lock.h> 108 #include <sys/kernel.h> 109 #include <sys/limits.h> 110 #include <sys/malloc.h> 111 #include <sys/msgbuf.h> 112 #include <sys/mutex.h> 113 #include <sys/proc.h> 114 #include <sys/sysctl.h> 115 #include <sys/vmmeter.h> 116 #include <sys/vnode.h> 117 118 #include <vm/vm.h> 119 #include <vm/pmap.h> 120 #include <vm/vm_param.h> 121 #include <vm/vm_kern.h> 122 #include <vm/vm_object.h> 123 #include <vm/vm_page.h> 124 #include <vm/vm_pageout.h> 125 #include <vm/vm_pager.h> 126 #include <vm/vm_phys.h> 127 #include <vm/vm_reserv.h> 128 #include <vm/vm_extern.h> 129 #include <vm/uma.h> 130 #include <vm/uma_int.h> 131 132 #include <machine/md_var.h> 133 134 /* 135 * Associated with page of user-allocatable memory is a 136 * page structure. 137 */ 138 139 struct vpgqueues vm_page_queues[PQ_COUNT]; 140 struct vpglocks vm_page_queue_lock; 141 struct vpglocks vm_page_queue_free_lock; 142 143 struct vpglocks pa_lock[PA_LOCK_COUNT]; 144 145 vm_page_t vm_page_array = 0; 146 int vm_page_array_size = 0; 147 long first_page = 0; 148 int vm_page_zero_count = 0; 149 150 static int boot_pages = UMA_BOOT_PAGES; 151 TUNABLE_INT("vm.boot_pages", &boot_pages); 152 SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RD, &boot_pages, 0, 153 "number of pages allocated for bootstrapping the VM system"); 154 155 static int pa_tryrelock_restart; 156 SYSCTL_INT(_vm, OID_AUTO, tryrelock_restart, CTLFLAG_RD, 157 &pa_tryrelock_restart, 0, "Number of tryrelock restarts"); 158 159 static uma_zone_t fakepg_zone; 160 161 static void vm_page_clear_dirty_mask(vm_page_t m, int pagebits); 162 static void vm_page_queue_remove(int queue, vm_page_t m); 163 static void vm_page_enqueue(int queue, vm_page_t m); 164 static void vm_page_init_fakepg(void *dummy); 165 166 SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init_fakepg, NULL); 167 168 static void 169 vm_page_init_fakepg(void *dummy) 170 { 171 172 fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL, 173 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM); 174 } 175 176 /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */ 177 #if PAGE_SIZE == 32768 178 #ifdef CTASSERT 179 CTASSERT(sizeof(u_long) >= 8); 180 #endif 181 #endif 182 183 /* 184 * Try to acquire a physical address lock while a pmap is locked. If we 185 * fail to trylock we unlock and lock the pmap directly and cache the 186 * locked pa in *locked. The caller should then restart their loop in case 187 * the virtual to physical mapping has changed. 188 */ 189 int 190 vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked) 191 { 192 vm_paddr_t lockpa; 193 194 lockpa = *locked; 195 *locked = pa; 196 if (lockpa) { 197 PA_LOCK_ASSERT(lockpa, MA_OWNED); 198 if (PA_LOCKPTR(pa) == PA_LOCKPTR(lockpa)) 199 return (0); 200 PA_UNLOCK(lockpa); 201 } 202 if (PA_TRYLOCK(pa)) 203 return (0); 204 PMAP_UNLOCK(pmap); 205 atomic_add_int(&pa_tryrelock_restart, 1); 206 PA_LOCK(pa); 207 PMAP_LOCK(pmap); 208 return (EAGAIN); 209 } 210 211 /* 212 * vm_set_page_size: 213 * 214 * Sets the page size, perhaps based upon the memory 215 * size. Must be called before any use of page-size 216 * dependent functions. 217 */ 218 void 219 vm_set_page_size(void) 220 { 221 if (cnt.v_page_size == 0) 222 cnt.v_page_size = PAGE_SIZE; 223 if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0) 224 panic("vm_set_page_size: page size not a power of two"); 225 } 226 227 /* 228 * vm_page_blacklist_lookup: 229 * 230 * See if a physical address in this page has been listed 231 * in the blacklist tunable. Entries in the tunable are 232 * separated by spaces or commas. If an invalid integer is 233 * encountered then the rest of the string is skipped. 234 */ 235 static int 236 vm_page_blacklist_lookup(char *list, vm_paddr_t pa) 237 { 238 vm_paddr_t bad; 239 char *cp, *pos; 240 241 for (pos = list; *pos != '\0'; pos = cp) { 242 bad = strtoq(pos, &cp, 0); 243 if (*cp != '\0') { 244 if (*cp == ' ' || *cp == ',') { 245 cp++; 246 if (cp == pos) 247 continue; 248 } else 249 break; 250 } 251 if (pa == trunc_page(bad)) 252 return (1); 253 } 254 return (0); 255 } 256 257 /* 258 * vm_page_startup: 259 * 260 * Initializes the resident memory module. 261 * 262 * Allocates memory for the page cells, and 263 * for the object/offset-to-page hash table headers. 264 * Each page cell is initialized and placed on the free list. 265 */ 266 vm_offset_t 267 vm_page_startup(vm_offset_t vaddr) 268 { 269 vm_offset_t mapped; 270 vm_paddr_t page_range; 271 vm_paddr_t new_end; 272 int i; 273 vm_paddr_t pa; 274 vm_paddr_t last_pa; 275 char *list; 276 277 /* the biggest memory array is the second group of pages */ 278 vm_paddr_t end; 279 vm_paddr_t biggestsize; 280 vm_paddr_t low_water, high_water; 281 int biggestone; 282 283 biggestsize = 0; 284 biggestone = 0; 285 vaddr = round_page(vaddr); 286 287 for (i = 0; phys_avail[i + 1]; i += 2) { 288 phys_avail[i] = round_page(phys_avail[i]); 289 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 290 } 291 292 low_water = phys_avail[0]; 293 high_water = phys_avail[1]; 294 295 for (i = 0; phys_avail[i + 1]; i += 2) { 296 vm_paddr_t size = phys_avail[i + 1] - phys_avail[i]; 297 298 if (size > biggestsize) { 299 biggestone = i; 300 biggestsize = size; 301 } 302 if (phys_avail[i] < low_water) 303 low_water = phys_avail[i]; 304 if (phys_avail[i + 1] > high_water) 305 high_water = phys_avail[i + 1]; 306 } 307 308 #ifdef XEN 309 low_water = 0; 310 #endif 311 312 end = phys_avail[biggestone+1]; 313 314 /* 315 * Initialize the locks. 316 */ 317 mtx_init(&vm_page_queue_mtx, "vm page queue mutex", NULL, MTX_DEF | 318 MTX_RECURSE); 319 mtx_init(&vm_page_queue_free_mtx, "vm page queue free mutex", NULL, 320 MTX_DEF); 321 322 /* Setup page locks. */ 323 for (i = 0; i < PA_LOCK_COUNT; i++) 324 mtx_init(&pa_lock[i].data, "page lock", NULL, MTX_DEF); 325 326 /* 327 * Initialize the queue headers for the hold queue, the active queue, 328 * and the inactive queue. 329 */ 330 for (i = 0; i < PQ_COUNT; i++) 331 TAILQ_INIT(&vm_page_queues[i].pl); 332 vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count; 333 vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count; 334 vm_page_queues[PQ_HOLD].cnt = &cnt.v_active_count; 335 336 /* 337 * Allocate memory for use when boot strapping the kernel memory 338 * allocator. 339 */ 340 new_end = end - (boot_pages * UMA_SLAB_SIZE); 341 new_end = trunc_page(new_end); 342 mapped = pmap_map(&vaddr, new_end, end, 343 VM_PROT_READ | VM_PROT_WRITE); 344 bzero((void *)mapped, end - new_end); 345 uma_startup((void *)mapped, boot_pages); 346 347 #if defined(__amd64__) || defined(__i386__) || defined(__arm__) || \ 348 defined(__mips__) 349 /* 350 * Allocate a bitmap to indicate that a random physical page 351 * needs to be included in a minidump. 352 * 353 * The amd64 port needs this to indicate which direct map pages 354 * need to be dumped, via calls to dump_add_page()/dump_drop_page(). 355 * 356 * However, i386 still needs this workspace internally within the 357 * minidump code. In theory, they are not needed on i386, but are 358 * included should the sf_buf code decide to use them. 359 */ 360 last_pa = 0; 361 for (i = 0; dump_avail[i + 1] != 0; i += 2) 362 if (dump_avail[i + 1] > last_pa) 363 last_pa = dump_avail[i + 1]; 364 page_range = last_pa / PAGE_SIZE; 365 vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY); 366 new_end -= vm_page_dump_size; 367 vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end, 368 new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE); 369 bzero((void *)vm_page_dump, vm_page_dump_size); 370 #endif 371 #ifdef __amd64__ 372 /* 373 * Request that the physical pages underlying the message buffer be 374 * included in a crash dump. Since the message buffer is accessed 375 * through the direct map, they are not automatically included. 376 */ 377 pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr); 378 last_pa = pa + round_page(msgbufsize); 379 while (pa < last_pa) { 380 dump_add_page(pa); 381 pa += PAGE_SIZE; 382 } 383 #endif 384 /* 385 * Compute the number of pages of memory that will be available for 386 * use (taking into account the overhead of a page structure per 387 * page). 388 */ 389 first_page = low_water / PAGE_SIZE; 390 #ifdef VM_PHYSSEG_SPARSE 391 page_range = 0; 392 for (i = 0; phys_avail[i + 1] != 0; i += 2) 393 page_range += atop(phys_avail[i + 1] - phys_avail[i]); 394 #elif defined(VM_PHYSSEG_DENSE) 395 page_range = high_water / PAGE_SIZE - first_page; 396 #else 397 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." 398 #endif 399 end = new_end; 400 401 /* 402 * Reserve an unmapped guard page to trap access to vm_page_array[-1]. 403 */ 404 vaddr += PAGE_SIZE; 405 406 /* 407 * Initialize the mem entry structures now, and put them in the free 408 * queue. 409 */ 410 new_end = trunc_page(end - page_range * sizeof(struct vm_page)); 411 mapped = pmap_map(&vaddr, new_end, end, 412 VM_PROT_READ | VM_PROT_WRITE); 413 vm_page_array = (vm_page_t) mapped; 414 #if VM_NRESERVLEVEL > 0 415 /* 416 * Allocate memory for the reservation management system's data 417 * structures. 418 */ 419 new_end = vm_reserv_startup(&vaddr, new_end, high_water); 420 #endif 421 #if defined(__amd64__) || defined(__mips__) 422 /* 423 * pmap_map on amd64 and mips can come out of the direct-map, not kvm 424 * like i386, so the pages must be tracked for a crashdump to include 425 * this data. This includes the vm_page_array and the early UMA 426 * bootstrap pages. 427 */ 428 for (pa = new_end; pa < phys_avail[biggestone + 1]; pa += PAGE_SIZE) 429 dump_add_page(pa); 430 #endif 431 phys_avail[biggestone + 1] = new_end; 432 433 /* 434 * Clear all of the page structures 435 */ 436 bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page)); 437 for (i = 0; i < page_range; i++) 438 vm_page_array[i].order = VM_NFREEORDER; 439 vm_page_array_size = page_range; 440 441 /* 442 * Initialize the physical memory allocator. 443 */ 444 vm_phys_init(); 445 446 /* 447 * Add every available physical page that is not blacklisted to 448 * the free lists. 449 */ 450 cnt.v_page_count = 0; 451 cnt.v_free_count = 0; 452 list = getenv("vm.blacklist"); 453 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 454 pa = phys_avail[i]; 455 last_pa = phys_avail[i + 1]; 456 while (pa < last_pa) { 457 if (list != NULL && 458 vm_page_blacklist_lookup(list, pa)) 459 printf("Skipping page with pa 0x%jx\n", 460 (uintmax_t)pa); 461 else 462 vm_phys_add_page(pa); 463 pa += PAGE_SIZE; 464 } 465 } 466 freeenv(list); 467 #if VM_NRESERVLEVEL > 0 468 /* 469 * Initialize the reservation management system. 470 */ 471 vm_reserv_init(); 472 #endif 473 return (vaddr); 474 } 475 476 void 477 vm_page_flag_set(vm_page_t m, unsigned short bits) 478 { 479 480 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 481 /* 482 * The PG_WRITEABLE flag can only be set if the page is managed and 483 * VPO_BUSY. Currently, this flag is only set by pmap_enter(). 484 */ 485 KASSERT((bits & PG_WRITEABLE) == 0 || 486 ((m->flags & (PG_UNMANAGED | PG_FICTITIOUS)) == 0 && 487 (m->oflags & VPO_BUSY) != 0), ("PG_WRITEABLE and !VPO_BUSY")); 488 m->flags |= bits; 489 } 490 491 void 492 vm_page_flag_clear(vm_page_t m, unsigned short bits) 493 { 494 495 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 496 /* 497 * The PG_REFERENCED flag can only be cleared if the object 498 * containing the page is locked. 499 */ 500 KASSERT((bits & PG_REFERENCED) == 0 || VM_OBJECT_LOCKED(m->object), 501 ("PG_REFERENCED and !VM_OBJECT_LOCKED")); 502 m->flags &= ~bits; 503 } 504 505 void 506 vm_page_busy(vm_page_t m) 507 { 508 509 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 510 KASSERT((m->oflags & VPO_BUSY) == 0, 511 ("vm_page_busy: page already busy!!!")); 512 m->oflags |= VPO_BUSY; 513 } 514 515 /* 516 * vm_page_flash: 517 * 518 * wakeup anyone waiting for the page. 519 */ 520 void 521 vm_page_flash(vm_page_t m) 522 { 523 524 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 525 if (m->oflags & VPO_WANTED) { 526 m->oflags &= ~VPO_WANTED; 527 wakeup(m); 528 } 529 } 530 531 /* 532 * vm_page_wakeup: 533 * 534 * clear the VPO_BUSY flag and wakeup anyone waiting for the 535 * page. 536 * 537 */ 538 void 539 vm_page_wakeup(vm_page_t m) 540 { 541 542 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 543 KASSERT(m->oflags & VPO_BUSY, ("vm_page_wakeup: page not busy!!!")); 544 m->oflags &= ~VPO_BUSY; 545 vm_page_flash(m); 546 } 547 548 void 549 vm_page_io_start(vm_page_t m) 550 { 551 552 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 553 m->busy++; 554 } 555 556 void 557 vm_page_io_finish(vm_page_t m) 558 { 559 560 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 561 KASSERT(m->busy > 0, ("vm_page_io_finish: page %p is not busy", m)); 562 m->busy--; 563 if (m->busy == 0) 564 vm_page_flash(m); 565 } 566 567 /* 568 * Keep page from being freed by the page daemon 569 * much of the same effect as wiring, except much lower 570 * overhead and should be used only for *very* temporary 571 * holding ("wiring"). 572 */ 573 void 574 vm_page_hold(vm_page_t mem) 575 { 576 577 vm_page_lock_assert(mem, MA_OWNED); 578 mem->hold_count++; 579 } 580 581 void 582 vm_page_unhold(vm_page_t mem) 583 { 584 585 vm_page_lock_assert(mem, MA_OWNED); 586 --mem->hold_count; 587 KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!")); 588 if (mem->hold_count == 0 && mem->queue == PQ_HOLD) 589 vm_page_free_toq(mem); 590 } 591 592 /* 593 * vm_page_unhold_pages: 594 * 595 * Unhold each of the pages that is referenced by the given array. 596 */ 597 void 598 vm_page_unhold_pages(vm_page_t *ma, int count) 599 { 600 struct mtx *mtx, *new_mtx; 601 602 mtx = NULL; 603 for (; count != 0; count--) { 604 /* 605 * Avoid releasing and reacquiring the same page lock. 606 */ 607 new_mtx = vm_page_lockptr(*ma); 608 if (mtx != new_mtx) { 609 if (mtx != NULL) 610 mtx_unlock(mtx); 611 mtx = new_mtx; 612 mtx_lock(mtx); 613 } 614 vm_page_unhold(*ma); 615 ma++; 616 } 617 if (mtx != NULL) 618 mtx_unlock(mtx); 619 } 620 621 /* 622 * vm_page_getfake: 623 * 624 * Create a fictitious page with the specified physical address and 625 * memory attribute. The memory attribute is the only the machine- 626 * dependent aspect of a fictitious page that must be initialized. 627 */ 628 vm_page_t 629 vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr) 630 { 631 vm_page_t m; 632 633 m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO); 634 m->phys_addr = paddr; 635 m->queue = PQ_NONE; 636 /* Fictitious pages don't use "segind". */ 637 m->flags = PG_FICTITIOUS; 638 /* Fictitious pages don't use "order" or "pool". */ 639 m->oflags = VPO_BUSY; 640 m->wire_count = 1; 641 pmap_page_set_memattr(m, memattr); 642 return (m); 643 } 644 645 /* 646 * vm_page_putfake: 647 * 648 * Release a fictitious page. 649 */ 650 void 651 vm_page_putfake(vm_page_t m) 652 { 653 654 KASSERT((m->flags & PG_FICTITIOUS) != 0, 655 ("vm_page_putfake: bad page %p", m)); 656 uma_zfree(fakepg_zone, m); 657 } 658 659 /* 660 * vm_page_updatefake: 661 * 662 * Update the given fictitious page to the specified physical address and 663 * memory attribute. 664 */ 665 void 666 vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) 667 { 668 669 KASSERT((m->flags & PG_FICTITIOUS) != 0, 670 ("vm_page_updatefake: bad page %p", m)); 671 m->phys_addr = paddr; 672 pmap_page_set_memattr(m, memattr); 673 } 674 675 /* 676 * vm_page_free: 677 * 678 * Free a page. 679 */ 680 void 681 vm_page_free(vm_page_t m) 682 { 683 684 m->flags &= ~PG_ZERO; 685 vm_page_free_toq(m); 686 } 687 688 /* 689 * vm_page_free_zero: 690 * 691 * Free a page to the zerod-pages queue 692 */ 693 void 694 vm_page_free_zero(vm_page_t m) 695 { 696 697 m->flags |= PG_ZERO; 698 vm_page_free_toq(m); 699 } 700 701 /* 702 * vm_page_sleep: 703 * 704 * Sleep and release the page and page queues locks. 705 * 706 * The object containing the given page must be locked. 707 */ 708 void 709 vm_page_sleep(vm_page_t m, const char *msg) 710 { 711 712 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 713 if (mtx_owned(&vm_page_queue_mtx)) 714 vm_page_unlock_queues(); 715 if (mtx_owned(vm_page_lockptr(m))) 716 vm_page_unlock(m); 717 718 /* 719 * It's possible that while we sleep, the page will get 720 * unbusied and freed. If we are holding the object 721 * lock, we will assume we hold a reference to the object 722 * such that even if m->object changes, we can re-lock 723 * it. 724 */ 725 m->oflags |= VPO_WANTED; 726 msleep(m, VM_OBJECT_MTX(m->object), PVM, msg, 0); 727 } 728 729 /* 730 * vm_page_dirty: 731 * 732 * make page all dirty 733 */ 734 void 735 vm_page_dirty(vm_page_t m) 736 { 737 738 KASSERT((m->flags & PG_CACHED) == 0, 739 ("vm_page_dirty: page in cache!")); 740 KASSERT(!VM_PAGE_IS_FREE(m), 741 ("vm_page_dirty: page is free!")); 742 KASSERT(m->valid == VM_PAGE_BITS_ALL, 743 ("vm_page_dirty: page is invalid!")); 744 m->dirty = VM_PAGE_BITS_ALL; 745 } 746 747 /* 748 * vm_page_splay: 749 * 750 * Implements Sleator and Tarjan's top-down splay algorithm. Returns 751 * the vm_page containing the given pindex. If, however, that 752 * pindex is not found in the vm_object, returns a vm_page that is 753 * adjacent to the pindex, coming before or after it. 754 */ 755 vm_page_t 756 vm_page_splay(vm_pindex_t pindex, vm_page_t root) 757 { 758 struct vm_page dummy; 759 vm_page_t lefttreemax, righttreemin, y; 760 761 if (root == NULL) 762 return (root); 763 lefttreemax = righttreemin = &dummy; 764 for (;; root = y) { 765 if (pindex < root->pindex) { 766 if ((y = root->left) == NULL) 767 break; 768 if (pindex < y->pindex) { 769 /* Rotate right. */ 770 root->left = y->right; 771 y->right = root; 772 root = y; 773 if ((y = root->left) == NULL) 774 break; 775 } 776 /* Link into the new root's right tree. */ 777 righttreemin->left = root; 778 righttreemin = root; 779 } else if (pindex > root->pindex) { 780 if ((y = root->right) == NULL) 781 break; 782 if (pindex > y->pindex) { 783 /* Rotate left. */ 784 root->right = y->left; 785 y->left = root; 786 root = y; 787 if ((y = root->right) == NULL) 788 break; 789 } 790 /* Link into the new root's left tree. */ 791 lefttreemax->right = root; 792 lefttreemax = root; 793 } else 794 break; 795 } 796 /* Assemble the new root. */ 797 lefttreemax->right = root->left; 798 righttreemin->left = root->right; 799 root->left = dummy.right; 800 root->right = dummy.left; 801 return (root); 802 } 803 804 /* 805 * vm_page_insert: [ internal use only ] 806 * 807 * Inserts the given mem entry into the object and object list. 808 * 809 * The pagetables are not updated but will presumably fault the page 810 * in if necessary, or if a kernel page the caller will at some point 811 * enter the page into the kernel's pmap. We are not allowed to block 812 * here so we *can't* do this anyway. 813 * 814 * The object and page must be locked. 815 * This routine may not block. 816 */ 817 void 818 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) 819 { 820 vm_page_t root; 821 822 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 823 if (m->object != NULL) 824 panic("vm_page_insert: page already inserted"); 825 826 /* 827 * Record the object/offset pair in this page 828 */ 829 m->object = object; 830 m->pindex = pindex; 831 832 /* 833 * Now link into the object's ordered list of backed pages. 834 */ 835 root = object->root; 836 if (root == NULL) { 837 m->left = NULL; 838 m->right = NULL; 839 TAILQ_INSERT_TAIL(&object->memq, m, listq); 840 } else { 841 root = vm_page_splay(pindex, root); 842 if (pindex < root->pindex) { 843 m->left = root->left; 844 m->right = root; 845 root->left = NULL; 846 TAILQ_INSERT_BEFORE(root, m, listq); 847 } else if (pindex == root->pindex) 848 panic("vm_page_insert: offset already allocated"); 849 else { 850 m->right = root->right; 851 m->left = root; 852 root->right = NULL; 853 TAILQ_INSERT_AFTER(&object->memq, root, m, listq); 854 } 855 } 856 object->root = m; 857 858 /* 859 * show that the object has one more resident page. 860 */ 861 object->resident_page_count++; 862 /* 863 * Hold the vnode until the last page is released. 864 */ 865 if (object->resident_page_count == 1 && object->type == OBJT_VNODE) 866 vhold((struct vnode *)object->handle); 867 868 /* 869 * Since we are inserting a new and possibly dirty page, 870 * update the object's OBJ_MIGHTBEDIRTY flag. 871 */ 872 if (m->flags & PG_WRITEABLE) 873 vm_object_set_writeable_dirty(object); 874 } 875 876 /* 877 * vm_page_remove: 878 * NOTE: used by device pager as well -wfj 879 * 880 * Removes the given mem entry from the object/offset-page 881 * table and the object page list, but do not invalidate/terminate 882 * the backing store. 883 * 884 * The object and page must be locked. 885 * The underlying pmap entry (if any) is NOT removed here. 886 * This routine may not block. 887 */ 888 void 889 vm_page_remove(vm_page_t m) 890 { 891 vm_object_t object; 892 vm_page_t root; 893 894 if ((m->flags & PG_UNMANAGED) == 0) 895 vm_page_lock_assert(m, MA_OWNED); 896 if ((object = m->object) == NULL) 897 return; 898 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 899 if (m->oflags & VPO_BUSY) { 900 m->oflags &= ~VPO_BUSY; 901 vm_page_flash(m); 902 } 903 904 /* 905 * Now remove from the object's list of backed pages. 906 */ 907 if (m != object->root) 908 vm_page_splay(m->pindex, object->root); 909 if (m->left == NULL) 910 root = m->right; 911 else { 912 root = vm_page_splay(m->pindex, m->left); 913 root->right = m->right; 914 } 915 object->root = root; 916 TAILQ_REMOVE(&object->memq, m, listq); 917 918 /* 919 * And show that the object has one fewer resident page. 920 */ 921 object->resident_page_count--; 922 /* 923 * The vnode may now be recycled. 924 */ 925 if (object->resident_page_count == 0 && object->type == OBJT_VNODE) 926 vdrop((struct vnode *)object->handle); 927 928 m->object = NULL; 929 } 930 931 /* 932 * vm_page_lookup: 933 * 934 * Returns the page associated with the object/offset 935 * pair specified; if none is found, NULL is returned. 936 * 937 * The object must be locked. 938 * This routine may not block. 939 * This is a critical path routine 940 */ 941 vm_page_t 942 vm_page_lookup(vm_object_t object, vm_pindex_t pindex) 943 { 944 vm_page_t m; 945 946 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 947 if ((m = object->root) != NULL && m->pindex != pindex) { 948 m = vm_page_splay(pindex, m); 949 if ((object->root = m)->pindex != pindex) 950 m = NULL; 951 } 952 return (m); 953 } 954 955 /* 956 * vm_page_find_least: 957 * 958 * Returns the page associated with the object with least pindex 959 * greater than or equal to the parameter pindex, or NULL. 960 * 961 * The object must be locked. 962 * The routine may not block. 963 */ 964 vm_page_t 965 vm_page_find_least(vm_object_t object, vm_pindex_t pindex) 966 { 967 vm_page_t m; 968 969 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 970 if ((m = TAILQ_FIRST(&object->memq)) != NULL) { 971 if (m->pindex < pindex) { 972 m = vm_page_splay(pindex, object->root); 973 if ((object->root = m)->pindex < pindex) 974 m = TAILQ_NEXT(m, listq); 975 } 976 } 977 return (m); 978 } 979 980 /* 981 * Returns the given page's successor (by pindex) within the object if it is 982 * resident; if none is found, NULL is returned. 983 * 984 * The object must be locked. 985 */ 986 vm_page_t 987 vm_page_next(vm_page_t m) 988 { 989 vm_page_t next; 990 991 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 992 if ((next = TAILQ_NEXT(m, listq)) != NULL && 993 next->pindex != m->pindex + 1) 994 next = NULL; 995 return (next); 996 } 997 998 /* 999 * Returns the given page's predecessor (by pindex) within the object if it is 1000 * resident; if none is found, NULL is returned. 1001 * 1002 * The object must be locked. 1003 */ 1004 vm_page_t 1005 vm_page_prev(vm_page_t m) 1006 { 1007 vm_page_t prev; 1008 1009 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1010 if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL && 1011 prev->pindex != m->pindex - 1) 1012 prev = NULL; 1013 return (prev); 1014 } 1015 1016 /* 1017 * vm_page_rename: 1018 * 1019 * Move the given memory entry from its 1020 * current object to the specified target object/offset. 1021 * 1022 * The object must be locked. 1023 * This routine may not block. 1024 * 1025 * Note: swap associated with the page must be invalidated by the move. We 1026 * have to do this for several reasons: (1) we aren't freeing the 1027 * page, (2) we are dirtying the page, (3) the VM system is probably 1028 * moving the page from object A to B, and will then later move 1029 * the backing store from A to B and we can't have a conflict. 1030 * 1031 * Note: we *always* dirty the page. It is necessary both for the 1032 * fact that we moved it, and because we may be invalidating 1033 * swap. If the page is on the cache, we have to deactivate it 1034 * or vm_page_dirty() will panic. Dirty pages are not allowed 1035 * on the cache. 1036 */ 1037 void 1038 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) 1039 { 1040 1041 vm_page_remove(m); 1042 vm_page_insert(m, new_object, new_pindex); 1043 vm_page_dirty(m); 1044 } 1045 1046 /* 1047 * Convert all of the given object's cached pages that have a 1048 * pindex within the given range into free pages. If the value 1049 * zero is given for "end", then the range's upper bound is 1050 * infinity. If the given object is backed by a vnode and it 1051 * transitions from having one or more cached pages to none, the 1052 * vnode's hold count is reduced. 1053 */ 1054 void 1055 vm_page_cache_free(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 1056 { 1057 vm_page_t m, m_next; 1058 boolean_t empty; 1059 1060 mtx_lock(&vm_page_queue_free_mtx); 1061 if (__predict_false(object->cache == NULL)) { 1062 mtx_unlock(&vm_page_queue_free_mtx); 1063 return; 1064 } 1065 m = object->cache = vm_page_splay(start, object->cache); 1066 if (m->pindex < start) { 1067 if (m->right == NULL) 1068 m = NULL; 1069 else { 1070 m_next = vm_page_splay(start, m->right); 1071 m_next->left = m; 1072 m->right = NULL; 1073 m = object->cache = m_next; 1074 } 1075 } 1076 1077 /* 1078 * At this point, "m" is either (1) a reference to the page 1079 * with the least pindex that is greater than or equal to 1080 * "start" or (2) NULL. 1081 */ 1082 for (; m != NULL && (m->pindex < end || end == 0); m = m_next) { 1083 /* 1084 * Find "m"'s successor and remove "m" from the 1085 * object's cache. 1086 */ 1087 if (m->right == NULL) { 1088 object->cache = m->left; 1089 m_next = NULL; 1090 } else { 1091 m_next = vm_page_splay(start, m->right); 1092 m_next->left = m->left; 1093 object->cache = m_next; 1094 } 1095 /* Convert "m" to a free page. */ 1096 m->object = NULL; 1097 m->valid = 0; 1098 /* Clear PG_CACHED and set PG_FREE. */ 1099 m->flags ^= PG_CACHED | PG_FREE; 1100 KASSERT((m->flags & (PG_CACHED | PG_FREE)) == PG_FREE, 1101 ("vm_page_cache_free: page %p has inconsistent flags", m)); 1102 cnt.v_cache_count--; 1103 cnt.v_free_count++; 1104 } 1105 empty = object->cache == NULL; 1106 mtx_unlock(&vm_page_queue_free_mtx); 1107 if (object->type == OBJT_VNODE && empty) 1108 vdrop(object->handle); 1109 } 1110 1111 /* 1112 * Returns the cached page that is associated with the given 1113 * object and offset. If, however, none exists, returns NULL. 1114 * 1115 * The free page queue must be locked. 1116 */ 1117 static inline vm_page_t 1118 vm_page_cache_lookup(vm_object_t object, vm_pindex_t pindex) 1119 { 1120 vm_page_t m; 1121 1122 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1123 if ((m = object->cache) != NULL && m->pindex != pindex) { 1124 m = vm_page_splay(pindex, m); 1125 if ((object->cache = m)->pindex != pindex) 1126 m = NULL; 1127 } 1128 return (m); 1129 } 1130 1131 /* 1132 * Remove the given cached page from its containing object's 1133 * collection of cached pages. 1134 * 1135 * The free page queue must be locked. 1136 */ 1137 void 1138 vm_page_cache_remove(vm_page_t m) 1139 { 1140 vm_object_t object; 1141 vm_page_t root; 1142 1143 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1144 KASSERT((m->flags & PG_CACHED) != 0, 1145 ("vm_page_cache_remove: page %p is not cached", m)); 1146 object = m->object; 1147 if (m != object->cache) { 1148 root = vm_page_splay(m->pindex, object->cache); 1149 KASSERT(root == m, 1150 ("vm_page_cache_remove: page %p is not cached in object %p", 1151 m, object)); 1152 } 1153 if (m->left == NULL) 1154 root = m->right; 1155 else if (m->right == NULL) 1156 root = m->left; 1157 else { 1158 root = vm_page_splay(m->pindex, m->left); 1159 root->right = m->right; 1160 } 1161 object->cache = root; 1162 m->object = NULL; 1163 cnt.v_cache_count--; 1164 } 1165 1166 /* 1167 * Transfer all of the cached pages with offset greater than or 1168 * equal to 'offidxstart' from the original object's cache to the 1169 * new object's cache. However, any cached pages with offset 1170 * greater than or equal to the new object's size are kept in the 1171 * original object. Initially, the new object's cache must be 1172 * empty. Offset 'offidxstart' in the original object must 1173 * correspond to offset zero in the new object. 1174 * 1175 * The new object must be locked. 1176 */ 1177 void 1178 vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart, 1179 vm_object_t new_object) 1180 { 1181 vm_page_t m, m_next; 1182 1183 /* 1184 * Insertion into an object's collection of cached pages 1185 * requires the object to be locked. In contrast, removal does 1186 * not. 1187 */ 1188 VM_OBJECT_LOCK_ASSERT(new_object, MA_OWNED); 1189 KASSERT(new_object->cache == NULL, 1190 ("vm_page_cache_transfer: object %p has cached pages", 1191 new_object)); 1192 mtx_lock(&vm_page_queue_free_mtx); 1193 if ((m = orig_object->cache) != NULL) { 1194 /* 1195 * Transfer all of the pages with offset greater than or 1196 * equal to 'offidxstart' from the original object's 1197 * cache to the new object's cache. 1198 */ 1199 m = vm_page_splay(offidxstart, m); 1200 if (m->pindex < offidxstart) { 1201 orig_object->cache = m; 1202 new_object->cache = m->right; 1203 m->right = NULL; 1204 } else { 1205 orig_object->cache = m->left; 1206 new_object->cache = m; 1207 m->left = NULL; 1208 } 1209 while ((m = new_object->cache) != NULL) { 1210 if ((m->pindex - offidxstart) >= new_object->size) { 1211 /* 1212 * Return all of the cached pages with 1213 * offset greater than or equal to the 1214 * new object's size to the original 1215 * object's cache. 1216 */ 1217 new_object->cache = m->left; 1218 m->left = orig_object->cache; 1219 orig_object->cache = m; 1220 break; 1221 } 1222 m_next = vm_page_splay(m->pindex, m->right); 1223 /* Update the page's object and offset. */ 1224 m->object = new_object; 1225 m->pindex -= offidxstart; 1226 if (m_next == NULL) 1227 break; 1228 m->right = NULL; 1229 m_next->left = m; 1230 new_object->cache = m_next; 1231 } 1232 KASSERT(new_object->cache == NULL || 1233 new_object->type == OBJT_SWAP, 1234 ("vm_page_cache_transfer: object %p's type is incompatible" 1235 " with cached pages", new_object)); 1236 } 1237 mtx_unlock(&vm_page_queue_free_mtx); 1238 } 1239 1240 /* 1241 * vm_page_alloc: 1242 * 1243 * Allocate and return a memory cell associated 1244 * with this VM object/offset pair. 1245 * 1246 * The caller must always specify an allocation class. 1247 * 1248 * allocation classes: 1249 * VM_ALLOC_NORMAL normal process request 1250 * VM_ALLOC_SYSTEM system *really* needs a page 1251 * VM_ALLOC_INTERRUPT interrupt time request 1252 * 1253 * optional allocation flags: 1254 * VM_ALLOC_ZERO prefer a zeroed page 1255 * VM_ALLOC_WIRED wire the allocated page 1256 * VM_ALLOC_NOOBJ page is not associated with a vm object 1257 * VM_ALLOC_NOBUSY do not set the page busy 1258 * VM_ALLOC_IFCACHED return page only if it is cached 1259 * VM_ALLOC_IFNOTCACHED return NULL, do not reactivate if the page 1260 * is cached 1261 * 1262 * This routine may not sleep. 1263 */ 1264 vm_page_t 1265 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) 1266 { 1267 struct vnode *vp = NULL; 1268 vm_object_t m_object; 1269 vm_page_t m; 1270 int flags, page_req; 1271 1272 if ((req & VM_ALLOC_NOOBJ) == 0) { 1273 KASSERT(object != NULL, 1274 ("vm_page_alloc: NULL object.")); 1275 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1276 } 1277 1278 page_req = req & VM_ALLOC_CLASS_MASK; 1279 1280 /* 1281 * The pager is allowed to eat deeper into the free page list. 1282 */ 1283 if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) 1284 page_req = VM_ALLOC_SYSTEM; 1285 1286 mtx_lock(&vm_page_queue_free_mtx); 1287 if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved || 1288 (page_req == VM_ALLOC_SYSTEM && 1289 cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) || 1290 (page_req == VM_ALLOC_INTERRUPT && 1291 cnt.v_free_count + cnt.v_cache_count > 0)) { 1292 /* 1293 * Allocate from the free queue if the number of free pages 1294 * exceeds the minimum for the request class. 1295 */ 1296 if (object != NULL && 1297 (m = vm_page_cache_lookup(object, pindex)) != NULL) { 1298 if ((req & VM_ALLOC_IFNOTCACHED) != 0) { 1299 mtx_unlock(&vm_page_queue_free_mtx); 1300 return (NULL); 1301 } 1302 if (vm_phys_unfree_page(m)) 1303 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, 0); 1304 #if VM_NRESERVLEVEL > 0 1305 else if (!vm_reserv_reactivate_page(m)) 1306 #else 1307 else 1308 #endif 1309 panic("vm_page_alloc: cache page %p is missing" 1310 " from the free queue", m); 1311 } else if ((req & VM_ALLOC_IFCACHED) != 0) { 1312 mtx_unlock(&vm_page_queue_free_mtx); 1313 return (NULL); 1314 #if VM_NRESERVLEVEL > 0 1315 } else if (object == NULL || object->type == OBJT_DEVICE || 1316 object->type == OBJT_SG || 1317 (object->flags & OBJ_COLORED) == 0 || 1318 (m = vm_reserv_alloc_page(object, pindex)) == NULL) { 1319 #else 1320 } else { 1321 #endif 1322 m = vm_phys_alloc_pages(object != NULL ? 1323 VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0); 1324 #if VM_NRESERVLEVEL > 0 1325 if (m == NULL && vm_reserv_reclaim_inactive()) { 1326 m = vm_phys_alloc_pages(object != NULL ? 1327 VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 1328 0); 1329 } 1330 #endif 1331 } 1332 } else { 1333 /* 1334 * Not allocatable, give up. 1335 */ 1336 mtx_unlock(&vm_page_queue_free_mtx); 1337 atomic_add_int(&vm_pageout_deficit, 1338 MAX((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1)); 1339 pagedaemon_wakeup(); 1340 return (NULL); 1341 } 1342 1343 /* 1344 * At this point we had better have found a good page. 1345 */ 1346 1347 KASSERT(m != NULL, ("vm_page_alloc: missing page")); 1348 KASSERT(m->queue == PQ_NONE, 1349 ("vm_page_alloc: page %p has unexpected queue %d", m, m->queue)); 1350 KASSERT(m->wire_count == 0, ("vm_page_alloc: page %p is wired", m)); 1351 KASSERT(m->hold_count == 0, ("vm_page_alloc: page %p is held", m)); 1352 KASSERT(m->busy == 0, ("vm_page_alloc: page %p is busy", m)); 1353 KASSERT(m->dirty == 0, ("vm_page_alloc: page %p is dirty", m)); 1354 KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, 1355 ("vm_page_alloc: page %p has unexpected memattr %d", m, 1356 pmap_page_get_memattr(m))); 1357 if ((m->flags & PG_CACHED) != 0) { 1358 KASSERT(m->valid != 0, 1359 ("vm_page_alloc: cached page %p is invalid", m)); 1360 if (m->object == object && m->pindex == pindex) 1361 cnt.v_reactivated++; 1362 else 1363 m->valid = 0; 1364 m_object = m->object; 1365 vm_page_cache_remove(m); 1366 if (m_object->type == OBJT_VNODE && m_object->cache == NULL) 1367 vp = m_object->handle; 1368 } else { 1369 KASSERT(VM_PAGE_IS_FREE(m), 1370 ("vm_page_alloc: page %p is not free", m)); 1371 KASSERT(m->valid == 0, 1372 ("vm_page_alloc: free page %p is valid", m)); 1373 cnt.v_free_count--; 1374 } 1375 1376 /* 1377 * Only the PG_ZERO flag is inherited. The PG_CACHED or PG_FREE flag 1378 * must be cleared before the free page queues lock is released. 1379 */ 1380 flags = 0; 1381 if (m->flags & PG_ZERO) { 1382 vm_page_zero_count--; 1383 if (req & VM_ALLOC_ZERO) 1384 flags = PG_ZERO; 1385 } 1386 if (object == NULL || object->type == OBJT_PHYS) 1387 flags |= PG_UNMANAGED; 1388 m->flags = flags; 1389 mtx_unlock(&vm_page_queue_free_mtx); 1390 if (req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ)) 1391 m->oflags = 0; 1392 else 1393 m->oflags = VPO_BUSY; 1394 if (req & VM_ALLOC_WIRED) { 1395 /* 1396 * The page lock is not required for wiring a page until that 1397 * page is inserted into the object. 1398 */ 1399 atomic_add_int(&cnt.v_wire_count, 1); 1400 m->wire_count = 1; 1401 } 1402 m->act_count = 0; 1403 1404 if (object != NULL) { 1405 /* Ignore device objects; the pager sets "memattr" for them. */ 1406 if (object->memattr != VM_MEMATTR_DEFAULT && 1407 object->type != OBJT_DEVICE && object->type != OBJT_SG) 1408 pmap_page_set_memattr(m, object->memattr); 1409 vm_page_insert(m, object, pindex); 1410 } else 1411 m->pindex = pindex; 1412 1413 /* 1414 * The following call to vdrop() must come after the above call 1415 * to vm_page_insert() in case both affect the same object and 1416 * vnode. Otherwise, the affected vnode's hold count could 1417 * temporarily become zero. 1418 */ 1419 if (vp != NULL) 1420 vdrop(vp); 1421 1422 /* 1423 * Don't wakeup too often - wakeup the pageout daemon when 1424 * we would be nearly out of memory. 1425 */ 1426 if (vm_paging_needed()) 1427 pagedaemon_wakeup(); 1428 1429 return (m); 1430 } 1431 1432 /* 1433 * Initialize a page that has been freshly dequeued from a freelist. 1434 * The caller has to drop the vnode returned, if it is not NULL. 1435 * 1436 * To be called with vm_page_queue_free_mtx held. 1437 */ 1438 struct vnode * 1439 vm_page_alloc_init(vm_page_t m) 1440 { 1441 struct vnode *drop; 1442 vm_object_t m_object; 1443 1444 KASSERT(m->queue == PQ_NONE, 1445 ("vm_page_alloc_init: page %p has unexpected queue %d", 1446 m, m->queue)); 1447 KASSERT(m->wire_count == 0, 1448 ("vm_page_alloc_init: page %p is wired", m)); 1449 KASSERT(m->hold_count == 0, 1450 ("vm_page_alloc_init: page %p is held", m)); 1451 KASSERT(m->busy == 0, 1452 ("vm_page_alloc_init: page %p is busy", m)); 1453 KASSERT(m->dirty == 0, 1454 ("vm_page_alloc_init: page %p is dirty", m)); 1455 KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, 1456 ("vm_page_alloc_init: page %p has unexpected memattr %d", 1457 m, pmap_page_get_memattr(m))); 1458 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1459 drop = NULL; 1460 if ((m->flags & PG_CACHED) != 0) { 1461 m->valid = 0; 1462 m_object = m->object; 1463 vm_page_cache_remove(m); 1464 if (m_object->type == OBJT_VNODE && 1465 m_object->cache == NULL) 1466 drop = m_object->handle; 1467 } else { 1468 KASSERT(VM_PAGE_IS_FREE(m), 1469 ("vm_page_alloc_init: page %p is not free", m)); 1470 KASSERT(m->valid == 0, 1471 ("vm_page_alloc_init: free page %p is valid", m)); 1472 cnt.v_free_count--; 1473 } 1474 if (m->flags & PG_ZERO) 1475 vm_page_zero_count--; 1476 /* Don't clear the PG_ZERO flag; we'll need it later. */ 1477 m->flags = PG_UNMANAGED | (m->flags & PG_ZERO); 1478 m->oflags = 0; 1479 /* Unmanaged pages don't use "act_count". */ 1480 return (drop); 1481 } 1482 1483 /* 1484 * vm_page_alloc_freelist: 1485 * 1486 * Allocate a page from the specified freelist. 1487 * Only the ALLOC_CLASS values in req are honored, other request flags 1488 * are ignored. 1489 */ 1490 vm_page_t 1491 vm_page_alloc_freelist(int flind, int req) 1492 { 1493 struct vnode *drop; 1494 vm_page_t m; 1495 int page_req; 1496 1497 m = NULL; 1498 page_req = req & VM_ALLOC_CLASS_MASK; 1499 mtx_lock(&vm_page_queue_free_mtx); 1500 /* 1501 * Do not allocate reserved pages unless the req has asked for it. 1502 */ 1503 if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved || 1504 (page_req == VM_ALLOC_SYSTEM && 1505 cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) || 1506 (page_req == VM_ALLOC_INTERRUPT && 1507 cnt.v_free_count + cnt.v_cache_count > 0)) { 1508 m = vm_phys_alloc_freelist_pages(flind, VM_FREEPOOL_DIRECT, 0); 1509 } 1510 if (m == NULL) { 1511 mtx_unlock(&vm_page_queue_free_mtx); 1512 return (NULL); 1513 } 1514 drop = vm_page_alloc_init(m); 1515 mtx_unlock(&vm_page_queue_free_mtx); 1516 if (drop) 1517 vdrop(drop); 1518 return (m); 1519 } 1520 1521 /* 1522 * vm_wait: (also see VM_WAIT macro) 1523 * 1524 * Block until free pages are available for allocation 1525 * - Called in various places before memory allocations. 1526 */ 1527 void 1528 vm_wait(void) 1529 { 1530 1531 mtx_lock(&vm_page_queue_free_mtx); 1532 if (curproc == pageproc) { 1533 vm_pageout_pages_needed = 1; 1534 msleep(&vm_pageout_pages_needed, &vm_page_queue_free_mtx, 1535 PDROP | PSWP, "VMWait", 0); 1536 } else { 1537 if (!vm_pages_needed) { 1538 vm_pages_needed = 1; 1539 wakeup(&vm_pages_needed); 1540 } 1541 msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM, 1542 "vmwait", 0); 1543 } 1544 } 1545 1546 /* 1547 * vm_waitpfault: (also see VM_WAITPFAULT macro) 1548 * 1549 * Block until free pages are available for allocation 1550 * - Called only in vm_fault so that processes page faulting 1551 * can be easily tracked. 1552 * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing 1553 * processes will be able to grab memory first. Do not change 1554 * this balance without careful testing first. 1555 */ 1556 void 1557 vm_waitpfault(void) 1558 { 1559 1560 mtx_lock(&vm_page_queue_free_mtx); 1561 if (!vm_pages_needed) { 1562 vm_pages_needed = 1; 1563 wakeup(&vm_pages_needed); 1564 } 1565 msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER, 1566 "pfault", 0); 1567 } 1568 1569 /* 1570 * vm_page_requeue: 1571 * 1572 * Move the given page to the tail of its present page queue. 1573 * 1574 * The page queues must be locked. 1575 */ 1576 void 1577 vm_page_requeue(vm_page_t m) 1578 { 1579 struct vpgqueues *vpq; 1580 int queue; 1581 1582 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1583 queue = m->queue; 1584 KASSERT(queue != PQ_NONE, 1585 ("vm_page_requeue: page %p is not queued", m)); 1586 vpq = &vm_page_queues[queue]; 1587 TAILQ_REMOVE(&vpq->pl, m, pageq); 1588 TAILQ_INSERT_TAIL(&vpq->pl, m, pageq); 1589 } 1590 1591 /* 1592 * vm_page_queue_remove: 1593 * 1594 * Remove the given page from the specified queue. 1595 * 1596 * The page and page queues must be locked. 1597 */ 1598 static __inline void 1599 vm_page_queue_remove(int queue, vm_page_t m) 1600 { 1601 struct vpgqueues *pq; 1602 1603 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1604 vm_page_lock_assert(m, MA_OWNED); 1605 pq = &vm_page_queues[queue]; 1606 TAILQ_REMOVE(&pq->pl, m, pageq); 1607 (*pq->cnt)--; 1608 } 1609 1610 /* 1611 * vm_pageq_remove: 1612 * 1613 * Remove a page from its queue. 1614 * 1615 * The given page must be locked. 1616 * This routine may not block. 1617 */ 1618 void 1619 vm_pageq_remove(vm_page_t m) 1620 { 1621 int queue; 1622 1623 vm_page_lock_assert(m, MA_OWNED); 1624 if ((queue = m->queue) != PQ_NONE) { 1625 vm_page_lock_queues(); 1626 m->queue = PQ_NONE; 1627 vm_page_queue_remove(queue, m); 1628 vm_page_unlock_queues(); 1629 } 1630 } 1631 1632 /* 1633 * vm_page_enqueue: 1634 * 1635 * Add the given page to the specified queue. 1636 * 1637 * The page queues must be locked. 1638 */ 1639 static void 1640 vm_page_enqueue(int queue, vm_page_t m) 1641 { 1642 struct vpgqueues *vpq; 1643 1644 vpq = &vm_page_queues[queue]; 1645 m->queue = queue; 1646 TAILQ_INSERT_TAIL(&vpq->pl, m, pageq); 1647 ++*vpq->cnt; 1648 } 1649 1650 /* 1651 * vm_page_activate: 1652 * 1653 * Put the specified page on the active list (if appropriate). 1654 * Ensure that act_count is at least ACT_INIT but do not otherwise 1655 * mess with it. 1656 * 1657 * The page must be locked. 1658 * This routine may not block. 1659 */ 1660 void 1661 vm_page_activate(vm_page_t m) 1662 { 1663 int queue; 1664 1665 vm_page_lock_assert(m, MA_OWNED); 1666 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1667 if ((queue = m->queue) != PQ_ACTIVE) { 1668 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) { 1669 if (m->act_count < ACT_INIT) 1670 m->act_count = ACT_INIT; 1671 vm_page_lock_queues(); 1672 if (queue != PQ_NONE) 1673 vm_page_queue_remove(queue, m); 1674 vm_page_enqueue(PQ_ACTIVE, m); 1675 vm_page_unlock_queues(); 1676 } else 1677 KASSERT(queue == PQ_NONE, 1678 ("vm_page_activate: wired page %p is queued", m)); 1679 } else { 1680 if (m->act_count < ACT_INIT) 1681 m->act_count = ACT_INIT; 1682 } 1683 } 1684 1685 /* 1686 * vm_page_free_wakeup: 1687 * 1688 * Helper routine for vm_page_free_toq() and vm_page_cache(). This 1689 * routine is called when a page has been added to the cache or free 1690 * queues. 1691 * 1692 * The page queues must be locked. 1693 * This routine may not block. 1694 */ 1695 static inline void 1696 vm_page_free_wakeup(void) 1697 { 1698 1699 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1700 /* 1701 * if pageout daemon needs pages, then tell it that there are 1702 * some free. 1703 */ 1704 if (vm_pageout_pages_needed && 1705 cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) { 1706 wakeup(&vm_pageout_pages_needed); 1707 vm_pageout_pages_needed = 0; 1708 } 1709 /* 1710 * wakeup processes that are waiting on memory if we hit a 1711 * high water mark. And wakeup scheduler process if we have 1712 * lots of memory. this process will swapin processes. 1713 */ 1714 if (vm_pages_needed && !vm_page_count_min()) { 1715 vm_pages_needed = 0; 1716 wakeup(&cnt.v_free_count); 1717 } 1718 } 1719 1720 /* 1721 * vm_page_free_toq: 1722 * 1723 * Returns the given page to the free list, 1724 * disassociating it with any VM object. 1725 * 1726 * Object and page must be locked prior to entry. 1727 * This routine may not block. 1728 */ 1729 1730 void 1731 vm_page_free_toq(vm_page_t m) 1732 { 1733 1734 if ((m->flags & PG_UNMANAGED) == 0) { 1735 vm_page_lock_assert(m, MA_OWNED); 1736 KASSERT(!pmap_page_is_mapped(m), 1737 ("vm_page_free_toq: freeing mapped page %p", m)); 1738 } 1739 PCPU_INC(cnt.v_tfree); 1740 1741 if (VM_PAGE_IS_FREE(m)) 1742 panic("vm_page_free: freeing free page %p", m); 1743 else if (m->busy != 0) 1744 panic("vm_page_free: freeing busy page %p", m); 1745 1746 /* 1747 * unqueue, then remove page. Note that we cannot destroy 1748 * the page here because we do not want to call the pager's 1749 * callback routine until after we've put the page on the 1750 * appropriate free queue. 1751 */ 1752 if ((m->flags & PG_UNMANAGED) == 0) 1753 vm_pageq_remove(m); 1754 vm_page_remove(m); 1755 1756 /* 1757 * If fictitious remove object association and 1758 * return, otherwise delay object association removal. 1759 */ 1760 if ((m->flags & PG_FICTITIOUS) != 0) { 1761 return; 1762 } 1763 1764 m->valid = 0; 1765 vm_page_undirty(m); 1766 1767 if (m->wire_count != 0) 1768 panic("vm_page_free: freeing wired page %p", m); 1769 if (m->hold_count != 0) { 1770 m->flags &= ~PG_ZERO; 1771 vm_page_lock_queues(); 1772 vm_page_enqueue(PQ_HOLD, m); 1773 vm_page_unlock_queues(); 1774 } else { 1775 /* 1776 * Restore the default memory attribute to the page. 1777 */ 1778 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) 1779 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); 1780 1781 /* 1782 * Insert the page into the physical memory allocator's 1783 * cache/free page queues. 1784 */ 1785 mtx_lock(&vm_page_queue_free_mtx); 1786 m->flags |= PG_FREE; 1787 cnt.v_free_count++; 1788 #if VM_NRESERVLEVEL > 0 1789 if (!vm_reserv_free_page(m)) 1790 #else 1791 if (TRUE) 1792 #endif 1793 vm_phys_free_pages(m, 0); 1794 if ((m->flags & PG_ZERO) != 0) 1795 ++vm_page_zero_count; 1796 else 1797 vm_page_zero_idle_wakeup(); 1798 vm_page_free_wakeup(); 1799 mtx_unlock(&vm_page_queue_free_mtx); 1800 } 1801 } 1802 1803 /* 1804 * vm_page_wire: 1805 * 1806 * Mark this page as wired down by yet 1807 * another map, removing it from paging queues 1808 * as necessary. 1809 * 1810 * If the page is fictitious, then its wire count must remain one. 1811 * 1812 * The page must be locked. 1813 * This routine may not block. 1814 */ 1815 void 1816 vm_page_wire(vm_page_t m) 1817 { 1818 1819 /* 1820 * Only bump the wire statistics if the page is not already wired, 1821 * and only unqueue the page if it is on some queue (if it is unmanaged 1822 * it is already off the queues). 1823 */ 1824 vm_page_lock_assert(m, MA_OWNED); 1825 if ((m->flags & PG_FICTITIOUS) != 0) { 1826 KASSERT(m->wire_count == 1, 1827 ("vm_page_wire: fictitious page %p's wire count isn't one", 1828 m)); 1829 return; 1830 } 1831 if (m->wire_count == 0) { 1832 if ((m->flags & PG_UNMANAGED) == 0) 1833 vm_pageq_remove(m); 1834 atomic_add_int(&cnt.v_wire_count, 1); 1835 } 1836 m->wire_count++; 1837 KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m)); 1838 } 1839 1840 /* 1841 * vm_page_unwire: 1842 * 1843 * Release one wiring of the specified page, potentially enabling it to be 1844 * paged again. If paging is enabled, then the value of the parameter 1845 * "activate" determines to which queue the page is added. If "activate" is 1846 * non-zero, then the page is added to the active queue. Otherwise, it is 1847 * added to the inactive queue. 1848 * 1849 * However, unless the page belongs to an object, it is not enqueued because 1850 * it cannot be paged out. 1851 * 1852 * If a page is fictitious, then its wire count must alway be one. 1853 * 1854 * A managed page must be locked. 1855 */ 1856 void 1857 vm_page_unwire(vm_page_t m, int activate) 1858 { 1859 1860 if ((m->flags & PG_UNMANAGED) == 0) 1861 vm_page_lock_assert(m, MA_OWNED); 1862 if ((m->flags & PG_FICTITIOUS) != 0) { 1863 KASSERT(m->wire_count == 1, 1864 ("vm_page_unwire: fictitious page %p's wire count isn't one", m)); 1865 return; 1866 } 1867 if (m->wire_count > 0) { 1868 m->wire_count--; 1869 if (m->wire_count == 0) { 1870 atomic_subtract_int(&cnt.v_wire_count, 1); 1871 if ((m->flags & PG_UNMANAGED) != 0 || 1872 m->object == NULL) 1873 return; 1874 vm_page_lock_queues(); 1875 if (activate) 1876 vm_page_enqueue(PQ_ACTIVE, m); 1877 else { 1878 vm_page_flag_clear(m, PG_WINATCFLS); 1879 vm_page_enqueue(PQ_INACTIVE, m); 1880 } 1881 vm_page_unlock_queues(); 1882 } 1883 } else 1884 panic("vm_page_unwire: page %p's wire count is zero", m); 1885 } 1886 1887 /* 1888 * Move the specified page to the inactive queue. 1889 * 1890 * Many pages placed on the inactive queue should actually go 1891 * into the cache, but it is difficult to figure out which. What 1892 * we do instead, if the inactive target is well met, is to put 1893 * clean pages at the head of the inactive queue instead of the tail. 1894 * This will cause them to be moved to the cache more quickly and 1895 * if not actively re-referenced, reclaimed more quickly. If we just 1896 * stick these pages at the end of the inactive queue, heavy filesystem 1897 * meta-data accesses can cause an unnecessary paging load on memory bound 1898 * processes. This optimization causes one-time-use metadata to be 1899 * reused more quickly. 1900 * 1901 * Normally athead is 0 resulting in LRU operation. athead is set 1902 * to 1 if we want this page to be 'as if it were placed in the cache', 1903 * except without unmapping it from the process address space. 1904 * 1905 * This routine may not block. 1906 */ 1907 static inline void 1908 _vm_page_deactivate(vm_page_t m, int athead) 1909 { 1910 int queue; 1911 1912 vm_page_lock_assert(m, MA_OWNED); 1913 1914 /* 1915 * Ignore if already inactive. 1916 */ 1917 if ((queue = m->queue) == PQ_INACTIVE) 1918 return; 1919 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) { 1920 vm_page_lock_queues(); 1921 vm_page_flag_clear(m, PG_WINATCFLS); 1922 if (queue != PQ_NONE) 1923 vm_page_queue_remove(queue, m); 1924 if (athead) 1925 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, 1926 pageq); 1927 else 1928 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, 1929 pageq); 1930 m->queue = PQ_INACTIVE; 1931 cnt.v_inactive_count++; 1932 vm_page_unlock_queues(); 1933 } 1934 } 1935 1936 /* 1937 * Move the specified page to the inactive queue. 1938 * 1939 * The page must be locked. 1940 */ 1941 void 1942 vm_page_deactivate(vm_page_t m) 1943 { 1944 1945 _vm_page_deactivate(m, 0); 1946 } 1947 1948 /* 1949 * vm_page_try_to_cache: 1950 * 1951 * Returns 0 on failure, 1 on success 1952 */ 1953 int 1954 vm_page_try_to_cache(vm_page_t m) 1955 { 1956 1957 vm_page_lock_assert(m, MA_OWNED); 1958 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1959 if (m->dirty || m->hold_count || m->busy || m->wire_count || 1960 (m->oflags & VPO_BUSY) || (m->flags & PG_UNMANAGED)) 1961 return (0); 1962 pmap_remove_all(m); 1963 if (m->dirty) 1964 return (0); 1965 vm_page_cache(m); 1966 return (1); 1967 } 1968 1969 /* 1970 * vm_page_try_to_free() 1971 * 1972 * Attempt to free the page. If we cannot free it, we do nothing. 1973 * 1 is returned on success, 0 on failure. 1974 */ 1975 int 1976 vm_page_try_to_free(vm_page_t m) 1977 { 1978 1979 vm_page_lock_assert(m, MA_OWNED); 1980 if (m->object != NULL) 1981 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1982 if (m->dirty || m->hold_count || m->busy || m->wire_count || 1983 (m->oflags & VPO_BUSY) || (m->flags & PG_UNMANAGED)) 1984 return (0); 1985 pmap_remove_all(m); 1986 if (m->dirty) 1987 return (0); 1988 vm_page_free(m); 1989 return (1); 1990 } 1991 1992 /* 1993 * vm_page_cache 1994 * 1995 * Put the specified page onto the page cache queue (if appropriate). 1996 * 1997 * This routine may not block. 1998 */ 1999 void 2000 vm_page_cache(vm_page_t m) 2001 { 2002 vm_object_t object; 2003 vm_page_t root; 2004 2005 vm_page_lock_assert(m, MA_OWNED); 2006 object = m->object; 2007 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2008 if ((m->flags & PG_UNMANAGED) || (m->oflags & VPO_BUSY) || m->busy || 2009 m->hold_count || m->wire_count) 2010 panic("vm_page_cache: attempting to cache busy page"); 2011 pmap_remove_all(m); 2012 if (m->dirty != 0) 2013 panic("vm_page_cache: page %p is dirty", m); 2014 if (m->valid == 0 || object->type == OBJT_DEFAULT || 2015 (object->type == OBJT_SWAP && 2016 !vm_pager_has_page(object, m->pindex, NULL, NULL))) { 2017 /* 2018 * Hypothesis: A cache-elgible page belonging to a 2019 * default object or swap object but without a backing 2020 * store must be zero filled. 2021 */ 2022 vm_page_free(m); 2023 return; 2024 } 2025 KASSERT((m->flags & PG_CACHED) == 0, 2026 ("vm_page_cache: page %p is already cached", m)); 2027 PCPU_INC(cnt.v_tcached); 2028 2029 /* 2030 * Remove the page from the paging queues. 2031 */ 2032 vm_pageq_remove(m); 2033 2034 /* 2035 * Remove the page from the object's collection of resident 2036 * pages. 2037 */ 2038 if (m != object->root) 2039 vm_page_splay(m->pindex, object->root); 2040 if (m->left == NULL) 2041 root = m->right; 2042 else { 2043 root = vm_page_splay(m->pindex, m->left); 2044 root->right = m->right; 2045 } 2046 object->root = root; 2047 TAILQ_REMOVE(&object->memq, m, listq); 2048 object->resident_page_count--; 2049 2050 /* 2051 * Restore the default memory attribute to the page. 2052 */ 2053 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) 2054 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); 2055 2056 /* 2057 * Insert the page into the object's collection of cached pages 2058 * and the physical memory allocator's cache/free page queues. 2059 */ 2060 m->flags &= ~PG_ZERO; 2061 mtx_lock(&vm_page_queue_free_mtx); 2062 m->flags |= PG_CACHED; 2063 cnt.v_cache_count++; 2064 root = object->cache; 2065 if (root == NULL) { 2066 m->left = NULL; 2067 m->right = NULL; 2068 } else { 2069 root = vm_page_splay(m->pindex, root); 2070 if (m->pindex < root->pindex) { 2071 m->left = root->left; 2072 m->right = root; 2073 root->left = NULL; 2074 } else if (__predict_false(m->pindex == root->pindex)) 2075 panic("vm_page_cache: offset already cached"); 2076 else { 2077 m->right = root->right; 2078 m->left = root; 2079 root->right = NULL; 2080 } 2081 } 2082 object->cache = m; 2083 #if VM_NRESERVLEVEL > 0 2084 if (!vm_reserv_free_page(m)) { 2085 #else 2086 if (TRUE) { 2087 #endif 2088 vm_phys_set_pool(VM_FREEPOOL_CACHE, m, 0); 2089 vm_phys_free_pages(m, 0); 2090 } 2091 vm_page_free_wakeup(); 2092 mtx_unlock(&vm_page_queue_free_mtx); 2093 2094 /* 2095 * Increment the vnode's hold count if this is the object's only 2096 * cached page. Decrement the vnode's hold count if this was 2097 * the object's only resident page. 2098 */ 2099 if (object->type == OBJT_VNODE) { 2100 if (root == NULL && object->resident_page_count != 0) 2101 vhold(object->handle); 2102 else if (root != NULL && object->resident_page_count == 0) 2103 vdrop(object->handle); 2104 } 2105 } 2106 2107 /* 2108 * vm_page_dontneed 2109 * 2110 * Cache, deactivate, or do nothing as appropriate. This routine 2111 * is typically used by madvise() MADV_DONTNEED. 2112 * 2113 * Generally speaking we want to move the page into the cache so 2114 * it gets reused quickly. However, this can result in a silly syndrome 2115 * due to the page recycling too quickly. Small objects will not be 2116 * fully cached. On the otherhand, if we move the page to the inactive 2117 * queue we wind up with a problem whereby very large objects 2118 * unnecessarily blow away our inactive and cache queues. 2119 * 2120 * The solution is to move the pages based on a fixed weighting. We 2121 * either leave them alone, deactivate them, or move them to the cache, 2122 * where moving them to the cache has the highest weighting. 2123 * By forcing some pages into other queues we eventually force the 2124 * system to balance the queues, potentially recovering other unrelated 2125 * space from active. The idea is to not force this to happen too 2126 * often. 2127 */ 2128 void 2129 vm_page_dontneed(vm_page_t m) 2130 { 2131 int dnw; 2132 int head; 2133 2134 vm_page_lock_assert(m, MA_OWNED); 2135 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2136 dnw = PCPU_GET(dnweight); 2137 PCPU_INC(dnweight); 2138 2139 /* 2140 * Occasionally leave the page alone. 2141 */ 2142 if ((dnw & 0x01F0) == 0 || m->queue == PQ_INACTIVE) { 2143 if (m->act_count >= ACT_INIT) 2144 --m->act_count; 2145 return; 2146 } 2147 2148 /* 2149 * Clear any references to the page. Otherwise, the page daemon will 2150 * immediately reactivate the page. 2151 * 2152 * Perform the pmap_clear_reference() first. Otherwise, a concurrent 2153 * pmap operation, such as pmap_remove(), could clear a reference in 2154 * the pmap and set PG_REFERENCED on the page before the 2155 * pmap_clear_reference() had completed. Consequently, the page would 2156 * appear referenced based upon an old reference that occurred before 2157 * this function ran. 2158 */ 2159 pmap_clear_reference(m); 2160 vm_page_lock_queues(); 2161 vm_page_flag_clear(m, PG_REFERENCED); 2162 vm_page_unlock_queues(); 2163 2164 if (m->dirty == 0 && pmap_is_modified(m)) 2165 vm_page_dirty(m); 2166 2167 if (m->dirty || (dnw & 0x0070) == 0) { 2168 /* 2169 * Deactivate the page 3 times out of 32. 2170 */ 2171 head = 0; 2172 } else { 2173 /* 2174 * Cache the page 28 times out of every 32. Note that 2175 * the page is deactivated instead of cached, but placed 2176 * at the head of the queue instead of the tail. 2177 */ 2178 head = 1; 2179 } 2180 _vm_page_deactivate(m, head); 2181 } 2182 2183 /* 2184 * Grab a page, waiting until we are waken up due to the page 2185 * changing state. We keep on waiting, if the page continues 2186 * to be in the object. If the page doesn't exist, first allocate it 2187 * and then conditionally zero it. 2188 * 2189 * The caller must always specify the VM_ALLOC_RETRY flag. This is intended 2190 * to facilitate its eventual removal. 2191 * 2192 * This routine may block. 2193 */ 2194 vm_page_t 2195 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) 2196 { 2197 vm_page_t m; 2198 2199 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2200 KASSERT((allocflags & VM_ALLOC_RETRY) != 0, 2201 ("vm_page_grab: VM_ALLOC_RETRY is required")); 2202 retrylookup: 2203 if ((m = vm_page_lookup(object, pindex)) != NULL) { 2204 if ((m->oflags & VPO_BUSY) != 0 || 2205 ((allocflags & VM_ALLOC_IGN_SBUSY) == 0 && m->busy != 0)) { 2206 /* 2207 * Reference the page before unlocking and 2208 * sleeping so that the page daemon is less 2209 * likely to reclaim it. 2210 */ 2211 vm_page_lock_queues(); 2212 vm_page_flag_set(m, PG_REFERENCED); 2213 vm_page_sleep(m, "pgrbwt"); 2214 goto retrylookup; 2215 } else { 2216 if ((allocflags & VM_ALLOC_WIRED) != 0) { 2217 vm_page_lock(m); 2218 vm_page_wire(m); 2219 vm_page_unlock(m); 2220 } 2221 if ((allocflags & VM_ALLOC_NOBUSY) == 0) 2222 vm_page_busy(m); 2223 return (m); 2224 } 2225 } 2226 m = vm_page_alloc(object, pindex, allocflags & ~(VM_ALLOC_RETRY | 2227 VM_ALLOC_IGN_SBUSY)); 2228 if (m == NULL) { 2229 VM_OBJECT_UNLOCK(object); 2230 VM_WAIT; 2231 VM_OBJECT_LOCK(object); 2232 goto retrylookup; 2233 } else if (m->valid != 0) 2234 return (m); 2235 if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0) 2236 pmap_zero_page(m); 2237 return (m); 2238 } 2239 2240 /* 2241 * Mapping function for valid bits or for dirty bits in 2242 * a page. May not block. 2243 * 2244 * Inputs are required to range within a page. 2245 */ 2246 int 2247 vm_page_bits(int base, int size) 2248 { 2249 int first_bit; 2250 int last_bit; 2251 2252 KASSERT( 2253 base + size <= PAGE_SIZE, 2254 ("vm_page_bits: illegal base/size %d/%d", base, size) 2255 ); 2256 2257 if (size == 0) /* handle degenerate case */ 2258 return (0); 2259 2260 first_bit = base >> DEV_BSHIFT; 2261 last_bit = (base + size - 1) >> DEV_BSHIFT; 2262 2263 return ((2 << last_bit) - (1 << first_bit)); 2264 } 2265 2266 /* 2267 * vm_page_set_valid: 2268 * 2269 * Sets portions of a page valid. The arguments are expected 2270 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 2271 * of any partial chunks touched by the range. The invalid portion of 2272 * such chunks will be zeroed. 2273 * 2274 * (base + size) must be less then or equal to PAGE_SIZE. 2275 */ 2276 void 2277 vm_page_set_valid(vm_page_t m, int base, int size) 2278 { 2279 int endoff, frag; 2280 2281 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2282 if (size == 0) /* handle degenerate case */ 2283 return; 2284 2285 /* 2286 * If the base is not DEV_BSIZE aligned and the valid 2287 * bit is clear, we have to zero out a portion of the 2288 * first block. 2289 */ 2290 if ((frag = base & ~(DEV_BSIZE - 1)) != base && 2291 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0) 2292 pmap_zero_page_area(m, frag, base - frag); 2293 2294 /* 2295 * If the ending offset is not DEV_BSIZE aligned and the 2296 * valid bit is clear, we have to zero out a portion of 2297 * the last block. 2298 */ 2299 endoff = base + size; 2300 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff && 2301 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0) 2302 pmap_zero_page_area(m, endoff, 2303 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 2304 2305 /* 2306 * Assert that no previously invalid block that is now being validated 2307 * is already dirty. 2308 */ 2309 KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0, 2310 ("vm_page_set_valid: page %p is dirty", m)); 2311 2312 /* 2313 * Set valid bits inclusive of any overlap. 2314 */ 2315 m->valid |= vm_page_bits(base, size); 2316 } 2317 2318 /* 2319 * Clear the given bits from the specified page's dirty field. 2320 */ 2321 static __inline void 2322 vm_page_clear_dirty_mask(vm_page_t m, int pagebits) 2323 { 2324 2325 /* 2326 * If the object is locked and the page is neither VPO_BUSY nor 2327 * PG_WRITEABLE, then the page's dirty field cannot possibly be 2328 * modified by a concurrent pmap operation. 2329 */ 2330 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2331 if ((m->oflags & VPO_BUSY) == 0 && (m->flags & PG_WRITEABLE) == 0) 2332 m->dirty &= ~pagebits; 2333 else { 2334 vm_page_lock_queues(); 2335 m->dirty &= ~pagebits; 2336 vm_page_unlock_queues(); 2337 } 2338 } 2339 2340 /* 2341 * vm_page_set_validclean: 2342 * 2343 * Sets portions of a page valid and clean. The arguments are expected 2344 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 2345 * of any partial chunks touched by the range. The invalid portion of 2346 * such chunks will be zero'd. 2347 * 2348 * This routine may not block. 2349 * 2350 * (base + size) must be less then or equal to PAGE_SIZE. 2351 */ 2352 void 2353 vm_page_set_validclean(vm_page_t m, int base, int size) 2354 { 2355 u_long oldvalid; 2356 int endoff, frag, pagebits; 2357 2358 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2359 if (size == 0) /* handle degenerate case */ 2360 return; 2361 2362 /* 2363 * If the base is not DEV_BSIZE aligned and the valid 2364 * bit is clear, we have to zero out a portion of the 2365 * first block. 2366 */ 2367 if ((frag = base & ~(DEV_BSIZE - 1)) != base && 2368 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0) 2369 pmap_zero_page_area(m, frag, base - frag); 2370 2371 /* 2372 * If the ending offset is not DEV_BSIZE aligned and the 2373 * valid bit is clear, we have to zero out a portion of 2374 * the last block. 2375 */ 2376 endoff = base + size; 2377 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff && 2378 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0) 2379 pmap_zero_page_area(m, endoff, 2380 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 2381 2382 /* 2383 * Set valid, clear dirty bits. If validating the entire 2384 * page we can safely clear the pmap modify bit. We also 2385 * use this opportunity to clear the VPO_NOSYNC flag. If a process 2386 * takes a write fault on a MAP_NOSYNC memory area the flag will 2387 * be set again. 2388 * 2389 * We set valid bits inclusive of any overlap, but we can only 2390 * clear dirty bits for DEV_BSIZE chunks that are fully within 2391 * the range. 2392 */ 2393 oldvalid = m->valid; 2394 pagebits = vm_page_bits(base, size); 2395 m->valid |= pagebits; 2396 #if 0 /* NOT YET */ 2397 if ((frag = base & (DEV_BSIZE - 1)) != 0) { 2398 frag = DEV_BSIZE - frag; 2399 base += frag; 2400 size -= frag; 2401 if (size < 0) 2402 size = 0; 2403 } 2404 pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1)); 2405 #endif 2406 if (base == 0 && size == PAGE_SIZE) { 2407 /* 2408 * The page can only be modified within the pmap if it is 2409 * mapped, and it can only be mapped if it was previously 2410 * fully valid. 2411 */ 2412 if (oldvalid == VM_PAGE_BITS_ALL) 2413 /* 2414 * Perform the pmap_clear_modify() first. Otherwise, 2415 * a concurrent pmap operation, such as 2416 * pmap_protect(), could clear a modification in the 2417 * pmap and set the dirty field on the page before 2418 * pmap_clear_modify() had begun and after the dirty 2419 * field was cleared here. 2420 */ 2421 pmap_clear_modify(m); 2422 m->dirty = 0; 2423 m->oflags &= ~VPO_NOSYNC; 2424 } else if (oldvalid != VM_PAGE_BITS_ALL) 2425 m->dirty &= ~pagebits; 2426 else 2427 vm_page_clear_dirty_mask(m, pagebits); 2428 } 2429 2430 void 2431 vm_page_clear_dirty(vm_page_t m, int base, int size) 2432 { 2433 2434 vm_page_clear_dirty_mask(m, vm_page_bits(base, size)); 2435 } 2436 2437 /* 2438 * vm_page_set_invalid: 2439 * 2440 * Invalidates DEV_BSIZE'd chunks within a page. Both the 2441 * valid and dirty bits for the effected areas are cleared. 2442 * 2443 * May not block. 2444 */ 2445 void 2446 vm_page_set_invalid(vm_page_t m, int base, int size) 2447 { 2448 int bits; 2449 2450 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2451 KASSERT((m->oflags & VPO_BUSY) == 0, 2452 ("vm_page_set_invalid: page %p is busy", m)); 2453 bits = vm_page_bits(base, size); 2454 if (m->valid == VM_PAGE_BITS_ALL && bits != 0) 2455 pmap_remove_all(m); 2456 KASSERT(!pmap_page_is_mapped(m), 2457 ("vm_page_set_invalid: page %p is mapped", m)); 2458 m->valid &= ~bits; 2459 m->dirty &= ~bits; 2460 } 2461 2462 /* 2463 * vm_page_zero_invalid() 2464 * 2465 * The kernel assumes that the invalid portions of a page contain 2466 * garbage, but such pages can be mapped into memory by user code. 2467 * When this occurs, we must zero out the non-valid portions of the 2468 * page so user code sees what it expects. 2469 * 2470 * Pages are most often semi-valid when the end of a file is mapped 2471 * into memory and the file's size is not page aligned. 2472 */ 2473 void 2474 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) 2475 { 2476 int b; 2477 int i; 2478 2479 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2480 /* 2481 * Scan the valid bits looking for invalid sections that 2482 * must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the 2483 * valid bit may be set ) have already been zerod by 2484 * vm_page_set_validclean(). 2485 */ 2486 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { 2487 if (i == (PAGE_SIZE / DEV_BSIZE) || 2488 (m->valid & (1 << i)) 2489 ) { 2490 if (i > b) { 2491 pmap_zero_page_area(m, 2492 b << DEV_BSHIFT, (i - b) << DEV_BSHIFT); 2493 } 2494 b = i + 1; 2495 } 2496 } 2497 2498 /* 2499 * setvalid is TRUE when we can safely set the zero'd areas 2500 * as being valid. We can do this if there are no cache consistancy 2501 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. 2502 */ 2503 if (setvalid) 2504 m->valid = VM_PAGE_BITS_ALL; 2505 } 2506 2507 /* 2508 * vm_page_is_valid: 2509 * 2510 * Is (partial) page valid? Note that the case where size == 0 2511 * will return FALSE in the degenerate case where the page is 2512 * entirely invalid, and TRUE otherwise. 2513 * 2514 * May not block. 2515 */ 2516 int 2517 vm_page_is_valid(vm_page_t m, int base, int size) 2518 { 2519 int bits = vm_page_bits(base, size); 2520 2521 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2522 if (m->valid && ((m->valid & bits) == bits)) 2523 return 1; 2524 else 2525 return 0; 2526 } 2527 2528 /* 2529 * update dirty bits from pmap/mmu. May not block. 2530 */ 2531 void 2532 vm_page_test_dirty(vm_page_t m) 2533 { 2534 2535 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2536 if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m)) 2537 vm_page_dirty(m); 2538 } 2539 2540 int so_zerocp_fullpage = 0; 2541 2542 /* 2543 * Replace the given page with a copy. The copied page assumes 2544 * the portion of the given page's "wire_count" that is not the 2545 * responsibility of this copy-on-write mechanism. 2546 * 2547 * The object containing the given page must have a non-zero 2548 * paging-in-progress count and be locked. 2549 */ 2550 void 2551 vm_page_cowfault(vm_page_t m) 2552 { 2553 vm_page_t mnew; 2554 vm_object_t object; 2555 vm_pindex_t pindex; 2556 2557 mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED); 2558 vm_page_lock_assert(m, MA_OWNED); 2559 object = m->object; 2560 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2561 KASSERT(object->paging_in_progress != 0, 2562 ("vm_page_cowfault: object %p's paging-in-progress count is zero.", 2563 object)); 2564 pindex = m->pindex; 2565 2566 retry_alloc: 2567 pmap_remove_all(m); 2568 vm_page_remove(m); 2569 mnew = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY); 2570 if (mnew == NULL) { 2571 vm_page_insert(m, object, pindex); 2572 vm_page_unlock(m); 2573 VM_OBJECT_UNLOCK(object); 2574 VM_WAIT; 2575 VM_OBJECT_LOCK(object); 2576 if (m == vm_page_lookup(object, pindex)) { 2577 vm_page_lock(m); 2578 goto retry_alloc; 2579 } else { 2580 /* 2581 * Page disappeared during the wait. 2582 */ 2583 return; 2584 } 2585 } 2586 2587 if (m->cow == 0) { 2588 /* 2589 * check to see if we raced with an xmit complete when 2590 * waiting to allocate a page. If so, put things back 2591 * the way they were 2592 */ 2593 vm_page_unlock(m); 2594 vm_page_lock(mnew); 2595 vm_page_free(mnew); 2596 vm_page_unlock(mnew); 2597 vm_page_insert(m, object, pindex); 2598 } else { /* clear COW & copy page */ 2599 if (!so_zerocp_fullpage) 2600 pmap_copy_page(m, mnew); 2601 mnew->valid = VM_PAGE_BITS_ALL; 2602 vm_page_dirty(mnew); 2603 mnew->wire_count = m->wire_count - m->cow; 2604 m->wire_count = m->cow; 2605 vm_page_unlock(m); 2606 } 2607 } 2608 2609 void 2610 vm_page_cowclear(vm_page_t m) 2611 { 2612 2613 vm_page_lock_assert(m, MA_OWNED); 2614 if (m->cow) { 2615 m->cow--; 2616 /* 2617 * let vm_fault add back write permission lazily 2618 */ 2619 } 2620 /* 2621 * sf_buf_free() will free the page, so we needn't do it here 2622 */ 2623 } 2624 2625 int 2626 vm_page_cowsetup(vm_page_t m) 2627 { 2628 2629 vm_page_lock_assert(m, MA_OWNED); 2630 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 2631 m->cow == USHRT_MAX - 1 || !VM_OBJECT_TRYLOCK(m->object)) 2632 return (EBUSY); 2633 m->cow++; 2634 pmap_remove_write(m); 2635 VM_OBJECT_UNLOCK(m->object); 2636 return (0); 2637 } 2638 2639 #include "opt_ddb.h" 2640 #ifdef DDB 2641 #include <sys/kernel.h> 2642 2643 #include <ddb/ddb.h> 2644 2645 DB_SHOW_COMMAND(page, vm_page_print_page_info) 2646 { 2647 db_printf("cnt.v_free_count: %d\n", cnt.v_free_count); 2648 db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count); 2649 db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count); 2650 db_printf("cnt.v_active_count: %d\n", cnt.v_active_count); 2651 db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count); 2652 db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved); 2653 db_printf("cnt.v_free_min: %d\n", cnt.v_free_min); 2654 db_printf("cnt.v_free_target: %d\n", cnt.v_free_target); 2655 db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min); 2656 db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target); 2657 } 2658 2659 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) 2660 { 2661 2662 db_printf("PQ_FREE:"); 2663 db_printf(" %d", cnt.v_free_count); 2664 db_printf("\n"); 2665 2666 db_printf("PQ_CACHE:"); 2667 db_printf(" %d", cnt.v_cache_count); 2668 db_printf("\n"); 2669 2670 db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n", 2671 *vm_page_queues[PQ_ACTIVE].cnt, 2672 *vm_page_queues[PQ_INACTIVE].cnt); 2673 } 2674 #endif /* DDB */ 2675