1 /*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 33 */ 34 35 /*- 36 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 37 * All rights reserved. 38 * 39 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 40 * 41 * Permission to use, copy, modify and distribute this software and 42 * its documentation is hereby granted, provided that both the copyright 43 * notice and this permission notice appear in all copies of the 44 * software, derivative works or modified versions, and any portions 45 * thereof, and that both notices appear in supporting documentation. 46 * 47 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 48 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 49 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 50 * 51 * Carnegie Mellon requests users of this software to return to 52 * 53 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 54 * School of Computer Science 55 * Carnegie Mellon University 56 * Pittsburgh PA 15213-3890 57 * 58 * any improvements or extensions that they make and grant Carnegie the 59 * rights to redistribute these changes. 60 */ 61 62 /* 63 * GENERAL RULES ON VM_PAGE MANIPULATION 64 * 65 * - a pageq mutex is required when adding or removing a page from a 66 * page queue (vm_page_queue[]), regardless of other mutexes or the 67 * busy state of a page. 68 * 69 * - a hash chain mutex is required when associating or disassociating 70 * a page from the VM PAGE CACHE hash table (vm_page_buckets), 71 * regardless of other mutexes or the busy state of a page. 72 * 73 * - either a hash chain mutex OR a busied page is required in order 74 * to modify the page flags. A hash chain mutex must be obtained in 75 * order to busy a page. A page's flags cannot be modified by a 76 * hash chain mutex if the page is marked busy. 77 * 78 * - The object memq mutex is held when inserting or removing 79 * pages from an object (vm_page_insert() or vm_page_remove()). This 80 * is different from the object's main mutex. 81 * 82 * Generally speaking, you have to be aware of side effects when running 83 * vm_page ops. A vm_page_lookup() will return with the hash chain 84 * locked, whether it was able to lookup the page or not. vm_page_free(), 85 * vm_page_cache(), vm_page_activate(), and a number of other routines 86 * will release the hash chain mutex for you. Intermediate manipulation 87 * routines such as vm_page_flag_set() expect the hash chain to be held 88 * on entry and the hash chain will remain held on return. 89 * 90 * pageq scanning can only occur with the pageq in question locked. 91 * We have a known bottleneck with the active queue, but the cache 92 * and free queues are actually arrays already. 93 */ 94 95 /* 96 * Resident memory management module. 97 */ 98 99 #include <sys/cdefs.h> 100 __FBSDID("$FreeBSD$"); 101 102 #include <sys/param.h> 103 #include <sys/systm.h> 104 #include <sys/lock.h> 105 #include <sys/kernel.h> 106 #include <sys/malloc.h> 107 #include <sys/mutex.h> 108 #include <sys/proc.h> 109 #include <sys/sysctl.h> 110 #include <sys/vmmeter.h> 111 #include <sys/vnode.h> 112 113 #include <vm/vm.h> 114 #include <vm/vm_param.h> 115 #include <vm/vm_kern.h> 116 #include <vm/vm_object.h> 117 #include <vm/vm_page.h> 118 #include <vm/vm_pageout.h> 119 #include <vm/vm_pager.h> 120 #include <vm/vm_extern.h> 121 #include <vm/uma.h> 122 #include <vm/uma_int.h> 123 124 #include <machine/md_var.h> 125 126 /* 127 * Associated with page of user-allocatable memory is a 128 * page structure. 129 */ 130 131 struct mtx vm_page_queue_mtx; 132 struct mtx vm_page_queue_free_mtx; 133 134 vm_page_t vm_page_array = 0; 135 int vm_page_array_size = 0; 136 long first_page = 0; 137 int vm_page_zero_count = 0; 138 139 static int boot_pages = UMA_BOOT_PAGES; 140 TUNABLE_INT("vm.boot_pages", &boot_pages); 141 SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RD, &boot_pages, 0, 142 "number of pages allocated for bootstrapping the VM system"); 143 144 /* 145 * vm_set_page_size: 146 * 147 * Sets the page size, perhaps based upon the memory 148 * size. Must be called before any use of page-size 149 * dependent functions. 150 */ 151 void 152 vm_set_page_size(void) 153 { 154 if (cnt.v_page_size == 0) 155 cnt.v_page_size = PAGE_SIZE; 156 if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0) 157 panic("vm_set_page_size: page size not a power of two"); 158 } 159 160 /* 161 * vm_page_blacklist_lookup: 162 * 163 * See if a physical address in this page has been listed 164 * in the blacklist tunable. Entries in the tunable are 165 * separated by spaces or commas. If an invalid integer is 166 * encountered then the rest of the string is skipped. 167 */ 168 static int 169 vm_page_blacklist_lookup(char *list, vm_paddr_t pa) 170 { 171 vm_paddr_t bad; 172 char *cp, *pos; 173 174 for (pos = list; *pos != '\0'; pos = cp) { 175 bad = strtoq(pos, &cp, 0); 176 if (*cp != '\0') { 177 if (*cp == ' ' || *cp == ',') { 178 cp++; 179 if (cp == pos) 180 continue; 181 } else 182 break; 183 } 184 if (pa == trunc_page(bad)) 185 return (1); 186 } 187 return (0); 188 } 189 190 /* 191 * vm_page_startup: 192 * 193 * Initializes the resident memory module. 194 * 195 * Allocates memory for the page cells, and 196 * for the object/offset-to-page hash table headers. 197 * Each page cell is initialized and placed on the free list. 198 */ 199 vm_offset_t 200 vm_page_startup(vm_offset_t vaddr) 201 { 202 vm_offset_t mapped; 203 vm_size_t npages; 204 vm_paddr_t page_range; 205 vm_paddr_t new_end; 206 int i; 207 vm_paddr_t pa; 208 int nblocks; 209 vm_paddr_t last_pa; 210 char *list; 211 212 /* the biggest memory array is the second group of pages */ 213 vm_paddr_t end; 214 vm_paddr_t biggestsize; 215 int biggestone; 216 217 vm_paddr_t total; 218 219 total = 0; 220 biggestsize = 0; 221 biggestone = 0; 222 nblocks = 0; 223 vaddr = round_page(vaddr); 224 225 for (i = 0; phys_avail[i + 1]; i += 2) { 226 phys_avail[i] = round_page(phys_avail[i]); 227 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 228 } 229 230 for (i = 0; phys_avail[i + 1]; i += 2) { 231 vm_paddr_t size = phys_avail[i + 1] - phys_avail[i]; 232 233 if (size > biggestsize) { 234 biggestone = i; 235 biggestsize = size; 236 } 237 ++nblocks; 238 total += size; 239 } 240 241 end = phys_avail[biggestone+1]; 242 243 /* 244 * Initialize the locks. 245 */ 246 mtx_init(&vm_page_queue_mtx, "vm page queue mutex", NULL, MTX_DEF | 247 MTX_RECURSE); 248 mtx_init(&vm_page_queue_free_mtx, "vm page queue free mutex", NULL, 249 MTX_SPIN); 250 251 /* 252 * Initialize the queue headers for the free queue, the active queue 253 * and the inactive queue. 254 */ 255 vm_pageq_init(); 256 257 /* 258 * Allocate memory for use when boot strapping the kernel memory 259 * allocator. 260 */ 261 new_end = end - (boot_pages * UMA_SLAB_SIZE); 262 new_end = trunc_page(new_end); 263 mapped = pmap_map(&vaddr, new_end, end, 264 VM_PROT_READ | VM_PROT_WRITE); 265 bzero((void *)mapped, end - new_end); 266 uma_startup((void *)mapped, boot_pages); 267 268 #if defined(__amd64__) || defined(__i386__) 269 /* 270 * Allocate a bitmap to indicate that a random physical page 271 * needs to be included in a minidump. 272 * 273 * The amd64 port needs this to indicate which direct map pages 274 * need to be dumped, via calls to dump_add_page()/dump_drop_page(). 275 * 276 * However, i386 still needs this workspace internally within the 277 * minidump code. In theory, they are not needed on i386, but are 278 * included should the sf_buf code decide to use them. 279 */ 280 page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE; 281 vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY); 282 new_end -= vm_page_dump_size; 283 vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end, 284 new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE); 285 bzero((void *)vm_page_dump, vm_page_dump_size); 286 #endif 287 /* 288 * Compute the number of pages of memory that will be available for 289 * use (taking into account the overhead of a page structure per 290 * page). 291 */ 292 first_page = phys_avail[0] / PAGE_SIZE; 293 page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE - first_page; 294 npages = (total - (page_range * sizeof(struct vm_page)) - 295 (end - new_end)) / PAGE_SIZE; 296 end = new_end; 297 298 /* 299 * Reserve an unmapped guard page to trap access to vm_page_array[-1]. 300 */ 301 vaddr += PAGE_SIZE; 302 303 /* 304 * Initialize the mem entry structures now, and put them in the free 305 * queue. 306 */ 307 new_end = trunc_page(end - page_range * sizeof(struct vm_page)); 308 mapped = pmap_map(&vaddr, new_end, end, 309 VM_PROT_READ | VM_PROT_WRITE); 310 vm_page_array = (vm_page_t) mapped; 311 #ifdef __amd64__ 312 /* 313 * pmap_map on amd64 comes out of the direct-map, not kvm like i386, 314 * so the pages must be tracked for a crashdump to include this data. 315 * This includes the vm_page_array and the early UMA bootstrap pages. 316 */ 317 for (pa = new_end; pa < phys_avail[biggestone + 1]; pa += PAGE_SIZE) 318 dump_add_page(pa); 319 #endif 320 phys_avail[biggestone + 1] = new_end; 321 322 /* 323 * Clear all of the page structures 324 */ 325 bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page)); 326 vm_page_array_size = page_range; 327 328 /* 329 * Construct the free queue(s) in descending order (by physical 330 * address) so that the first 16MB of physical memory is allocated 331 * last rather than first. On large-memory machines, this avoids 332 * the exhaustion of low physical memory before isa_dma_init has run. 333 */ 334 cnt.v_page_count = 0; 335 cnt.v_free_count = 0; 336 list = getenv("vm.blacklist"); 337 for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) { 338 pa = phys_avail[i]; 339 last_pa = phys_avail[i + 1]; 340 while (pa < last_pa && npages-- > 0) { 341 if (list != NULL && 342 vm_page_blacklist_lookup(list, pa)) 343 printf("Skipping page with pa 0x%jx\n", 344 (uintmax_t)pa); 345 else 346 vm_pageq_add_new_page(pa); 347 pa += PAGE_SIZE; 348 } 349 } 350 freeenv(list); 351 return (vaddr); 352 } 353 354 void 355 vm_page_flag_set(vm_page_t m, unsigned short bits) 356 { 357 358 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 359 m->flags |= bits; 360 } 361 362 void 363 vm_page_flag_clear(vm_page_t m, unsigned short bits) 364 { 365 366 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 367 m->flags &= ~bits; 368 } 369 370 void 371 vm_page_busy(vm_page_t m) 372 { 373 374 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 375 KASSERT((m->flags & PG_BUSY) == 0, 376 ("vm_page_busy: page already busy!!!")); 377 vm_page_flag_set(m, PG_BUSY); 378 } 379 380 /* 381 * vm_page_flash: 382 * 383 * wakeup anyone waiting for the page. 384 */ 385 void 386 vm_page_flash(vm_page_t m) 387 { 388 389 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 390 if (m->flags & PG_WANTED) { 391 vm_page_flag_clear(m, PG_WANTED); 392 wakeup(m); 393 } 394 } 395 396 /* 397 * vm_page_wakeup: 398 * 399 * clear the PG_BUSY flag and wakeup anyone waiting for the 400 * page. 401 * 402 */ 403 void 404 vm_page_wakeup(vm_page_t m) 405 { 406 407 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 408 KASSERT(m->flags & PG_BUSY, ("vm_page_wakeup: page not busy!!!")); 409 vm_page_flag_clear(m, PG_BUSY); 410 vm_page_flash(m); 411 } 412 413 void 414 vm_page_io_start(vm_page_t m) 415 { 416 417 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 418 m->busy++; 419 } 420 421 void 422 vm_page_io_finish(vm_page_t m) 423 { 424 425 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 426 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 427 m->busy--; 428 if (m->busy == 0) 429 vm_page_flash(m); 430 } 431 432 /* 433 * Keep page from being freed by the page daemon 434 * much of the same effect as wiring, except much lower 435 * overhead and should be used only for *very* temporary 436 * holding ("wiring"). 437 */ 438 void 439 vm_page_hold(vm_page_t mem) 440 { 441 442 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 443 mem->hold_count++; 444 } 445 446 void 447 vm_page_unhold(vm_page_t mem) 448 { 449 450 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 451 --mem->hold_count; 452 KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!")); 453 if (mem->hold_count == 0 && VM_PAGE_INQUEUE2(mem, PQ_HOLD)) 454 vm_page_free_toq(mem); 455 } 456 457 /* 458 * vm_page_free: 459 * 460 * Free a page 461 * 462 * The clearing of PG_ZERO is a temporary safety until the code can be 463 * reviewed to determine that PG_ZERO is being properly cleared on 464 * write faults or maps. PG_ZERO was previously cleared in 465 * vm_page_alloc(). 466 */ 467 void 468 vm_page_free(vm_page_t m) 469 { 470 vm_page_flag_clear(m, PG_ZERO); 471 vm_page_free_toq(m); 472 vm_page_zero_idle_wakeup(); 473 } 474 475 /* 476 * vm_page_free_zero: 477 * 478 * Free a page to the zerod-pages queue 479 */ 480 void 481 vm_page_free_zero(vm_page_t m) 482 { 483 vm_page_flag_set(m, PG_ZERO); 484 vm_page_free_toq(m); 485 } 486 487 /* 488 * vm_page_sleep_if_busy: 489 * 490 * Sleep and release the page queues lock if PG_BUSY is set or, 491 * if also_m_busy is TRUE, busy is non-zero. Returns TRUE if the 492 * thread slept and the page queues lock was released. 493 * Otherwise, retains the page queues lock and returns FALSE. 494 */ 495 int 496 vm_page_sleep_if_busy(vm_page_t m, int also_m_busy, const char *msg) 497 { 498 499 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 500 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 501 if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) { 502 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED); 503 vm_page_unlock_queues(); 504 505 /* 506 * It's possible that while we sleep, the page will get 507 * unbusied and freed. If we are holding the object 508 * lock, we will assume we hold a reference to the object 509 * such that even if m->object changes, we can re-lock 510 * it. 511 */ 512 msleep(m, VM_OBJECT_MTX(m->object), PVM, msg, 0); 513 return (TRUE); 514 } 515 return (FALSE); 516 } 517 518 /* 519 * vm_page_dirty: 520 * 521 * make page all dirty 522 */ 523 void 524 vm_page_dirty(vm_page_t m) 525 { 526 KASSERT(VM_PAGE_GETKNOWNQUEUE1(m) != PQ_CACHE, 527 ("vm_page_dirty: page in cache!")); 528 KASSERT(VM_PAGE_GETKNOWNQUEUE1(m) != PQ_FREE, 529 ("vm_page_dirty: page is free!")); 530 m->dirty = VM_PAGE_BITS_ALL; 531 } 532 533 /* 534 * vm_page_splay: 535 * 536 * Implements Sleator and Tarjan's top-down splay algorithm. Returns 537 * the vm_page containing the given pindex. If, however, that 538 * pindex is not found in the vm_object, returns a vm_page that is 539 * adjacent to the pindex, coming before or after it. 540 */ 541 vm_page_t 542 vm_page_splay(vm_pindex_t pindex, vm_page_t root) 543 { 544 struct vm_page dummy; 545 vm_page_t lefttreemax, righttreemin, y; 546 547 if (root == NULL) 548 return (root); 549 lefttreemax = righttreemin = &dummy; 550 for (;; root = y) { 551 if (pindex < root->pindex) { 552 if ((y = root->left) == NULL) 553 break; 554 if (pindex < y->pindex) { 555 /* Rotate right. */ 556 root->left = y->right; 557 y->right = root; 558 root = y; 559 if ((y = root->left) == NULL) 560 break; 561 } 562 /* Link into the new root's right tree. */ 563 righttreemin->left = root; 564 righttreemin = root; 565 } else if (pindex > root->pindex) { 566 if ((y = root->right) == NULL) 567 break; 568 if (pindex > y->pindex) { 569 /* Rotate left. */ 570 root->right = y->left; 571 y->left = root; 572 root = y; 573 if ((y = root->right) == NULL) 574 break; 575 } 576 /* Link into the new root's left tree. */ 577 lefttreemax->right = root; 578 lefttreemax = root; 579 } else 580 break; 581 } 582 /* Assemble the new root. */ 583 lefttreemax->right = root->left; 584 righttreemin->left = root->right; 585 root->left = dummy.right; 586 root->right = dummy.left; 587 return (root); 588 } 589 590 /* 591 * vm_page_insert: [ internal use only ] 592 * 593 * Inserts the given mem entry into the object and object list. 594 * 595 * The pagetables are not updated but will presumably fault the page 596 * in if necessary, or if a kernel page the caller will at some point 597 * enter the page into the kernel's pmap. We are not allowed to block 598 * here so we *can't* do this anyway. 599 * 600 * The object and page must be locked. 601 * This routine may not block. 602 */ 603 void 604 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) 605 { 606 vm_page_t root; 607 608 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 609 if (m->object != NULL) 610 panic("vm_page_insert: page already inserted"); 611 612 /* 613 * Record the object/offset pair in this page 614 */ 615 m->object = object; 616 m->pindex = pindex; 617 618 /* 619 * Now link into the object's ordered list of backed pages. 620 */ 621 root = object->root; 622 if (root == NULL) { 623 m->left = NULL; 624 m->right = NULL; 625 TAILQ_INSERT_TAIL(&object->memq, m, listq); 626 } else { 627 root = vm_page_splay(pindex, root); 628 if (pindex < root->pindex) { 629 m->left = root->left; 630 m->right = root; 631 root->left = NULL; 632 TAILQ_INSERT_BEFORE(root, m, listq); 633 } else if (pindex == root->pindex) 634 panic("vm_page_insert: offset already allocated"); 635 else { 636 m->right = root->right; 637 m->left = root; 638 root->right = NULL; 639 TAILQ_INSERT_AFTER(&object->memq, root, m, listq); 640 } 641 } 642 object->root = m; 643 object->generation++; 644 645 /* 646 * show that the object has one more resident page. 647 */ 648 object->resident_page_count++; 649 /* 650 * Hold the vnode until the last page is released. 651 */ 652 if (object->resident_page_count == 1 && object->type == OBJT_VNODE) 653 vhold((struct vnode *)object->handle); 654 655 /* 656 * Since we are inserting a new and possibly dirty page, 657 * update the object's OBJ_MIGHTBEDIRTY flag. 658 */ 659 if (m->flags & PG_WRITEABLE) 660 vm_object_set_writeable_dirty(object); 661 } 662 663 /* 664 * vm_page_remove: 665 * NOTE: used by device pager as well -wfj 666 * 667 * Removes the given mem entry from the object/offset-page 668 * table and the object page list, but do not invalidate/terminate 669 * the backing store. 670 * 671 * The object and page must be locked. 672 * The underlying pmap entry (if any) is NOT removed here. 673 * This routine may not block. 674 */ 675 void 676 vm_page_remove(vm_page_t m) 677 { 678 vm_object_t object; 679 vm_page_t root; 680 681 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 682 if ((object = m->object) == NULL) 683 return; 684 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 685 if (m->flags & PG_BUSY) { 686 vm_page_flag_clear(m, PG_BUSY); 687 vm_page_flash(m); 688 } 689 690 /* 691 * Now remove from the object's list of backed pages. 692 */ 693 if (m != object->root) 694 vm_page_splay(m->pindex, object->root); 695 if (m->left == NULL) 696 root = m->right; 697 else { 698 root = vm_page_splay(m->pindex, m->left); 699 root->right = m->right; 700 } 701 object->root = root; 702 TAILQ_REMOVE(&object->memq, m, listq); 703 704 /* 705 * And show that the object has one fewer resident page. 706 */ 707 object->resident_page_count--; 708 object->generation++; 709 /* 710 * The vnode may now be recycled. 711 */ 712 if (object->resident_page_count == 0 && object->type == OBJT_VNODE) 713 vdrop((struct vnode *)object->handle); 714 715 m->object = NULL; 716 } 717 718 /* 719 * vm_page_lookup: 720 * 721 * Returns the page associated with the object/offset 722 * pair specified; if none is found, NULL is returned. 723 * 724 * The object must be locked. 725 * This routine may not block. 726 * This is a critical path routine 727 */ 728 vm_page_t 729 vm_page_lookup(vm_object_t object, vm_pindex_t pindex) 730 { 731 vm_page_t m; 732 733 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 734 if ((m = object->root) != NULL && m->pindex != pindex) { 735 m = vm_page_splay(pindex, m); 736 if ((object->root = m)->pindex != pindex) 737 m = NULL; 738 } 739 return (m); 740 } 741 742 /* 743 * vm_page_rename: 744 * 745 * Move the given memory entry from its 746 * current object to the specified target object/offset. 747 * 748 * The object must be locked. 749 * This routine may not block. 750 * 751 * Note: swap associated with the page must be invalidated by the move. We 752 * have to do this for several reasons: (1) we aren't freeing the 753 * page, (2) we are dirtying the page, (3) the VM system is probably 754 * moving the page from object A to B, and will then later move 755 * the backing store from A to B and we can't have a conflict. 756 * 757 * Note: we *always* dirty the page. It is necessary both for the 758 * fact that we moved it, and because we may be invalidating 759 * swap. If the page is on the cache, we have to deactivate it 760 * or vm_page_dirty() will panic. Dirty pages are not allowed 761 * on the cache. 762 */ 763 void 764 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) 765 { 766 767 vm_page_remove(m); 768 vm_page_insert(m, new_object, new_pindex); 769 if (VM_PAGE_INQUEUE1(m, PQ_CACHE)) 770 vm_page_deactivate(m); 771 vm_page_dirty(m); 772 } 773 774 /* 775 * vm_page_select_cache: 776 * 777 * Move a page of the given color from the cache queue to the free 778 * queue. As pages might be found, but are not applicable, they are 779 * deactivated. 780 * 781 * This routine may not block. 782 */ 783 vm_page_t 784 vm_page_select_cache(int color) 785 { 786 vm_object_t object; 787 vm_page_t m; 788 boolean_t was_trylocked; 789 790 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 791 while ((m = vm_pageq_find(PQ_CACHE, color, FALSE)) != NULL) { 792 KASSERT(m->dirty == 0, ("Found dirty cache page %p", m)); 793 KASSERT(!pmap_page_is_mapped(m), 794 ("Found mapped cache page %p", m)); 795 KASSERT((m->flags & PG_UNMANAGED) == 0, 796 ("Found unmanaged cache page %p", m)); 797 KASSERT(m->wire_count == 0, ("Found wired cache page %p", m)); 798 if (m->hold_count == 0 && (object = m->object, 799 (was_trylocked = VM_OBJECT_TRYLOCK(object)) || 800 VM_OBJECT_LOCKED(object))) { 801 KASSERT((m->flags & PG_BUSY) == 0 && m->busy == 0, 802 ("Found busy cache page %p", m)); 803 vm_page_free(m); 804 if (was_trylocked) 805 VM_OBJECT_UNLOCK(object); 806 break; 807 } 808 vm_page_deactivate(m); 809 } 810 return (m); 811 } 812 813 /* 814 * vm_page_alloc: 815 * 816 * Allocate and return a memory cell associated 817 * with this VM object/offset pair. 818 * 819 * page_req classes: 820 * VM_ALLOC_NORMAL normal process request 821 * VM_ALLOC_SYSTEM system *really* needs a page 822 * VM_ALLOC_INTERRUPT interrupt time request 823 * VM_ALLOC_ZERO zero page 824 * 825 * This routine may not block. 826 * 827 * Additional special handling is required when called from an 828 * interrupt (VM_ALLOC_INTERRUPT). We are not allowed to mess with 829 * the page cache in this case. 830 */ 831 vm_page_t 832 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) 833 { 834 vm_page_t m = NULL; 835 int color, flags, page_req; 836 837 page_req = req & VM_ALLOC_CLASS_MASK; 838 KASSERT(curthread->td_intr_nesting_level == 0 || 839 page_req == VM_ALLOC_INTERRUPT, 840 ("vm_page_alloc(NORMAL|SYSTEM) in interrupt context")); 841 842 if ((req & VM_ALLOC_NOOBJ) == 0) { 843 KASSERT(object != NULL, 844 ("vm_page_alloc: NULL object.")); 845 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 846 color = (pindex + object->pg_color) & PQ_COLORMASK; 847 } else 848 color = pindex & PQ_COLORMASK; 849 850 /* 851 * The pager is allowed to eat deeper into the free page list. 852 */ 853 if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) { 854 page_req = VM_ALLOC_SYSTEM; 855 }; 856 857 loop: 858 mtx_lock_spin(&vm_page_queue_free_mtx); 859 if (cnt.v_free_count > cnt.v_free_reserved || 860 (page_req == VM_ALLOC_SYSTEM && 861 cnt.v_cache_count == 0 && 862 cnt.v_free_count > cnt.v_interrupt_free_min) || 863 (page_req == VM_ALLOC_INTERRUPT && cnt.v_free_count > 0)) { 864 /* 865 * Allocate from the free queue if the number of free pages 866 * exceeds the minimum for the request class. 867 */ 868 m = vm_pageq_find(PQ_FREE, color, (req & VM_ALLOC_ZERO) != 0); 869 } else if (page_req != VM_ALLOC_INTERRUPT) { 870 mtx_unlock_spin(&vm_page_queue_free_mtx); 871 /* 872 * Allocatable from cache (non-interrupt only). On success, 873 * we must free the page and try again, thus ensuring that 874 * cnt.v_*_free_min counters are replenished. 875 */ 876 vm_page_lock_queues(); 877 if ((m = vm_page_select_cache(color)) == NULL) { 878 KASSERT(cnt.v_cache_count == 0, 879 ("vm_page_alloc: cache queue is missing %d pages", 880 cnt.v_cache_count)); 881 vm_page_unlock_queues(); 882 atomic_add_int(&vm_pageout_deficit, 1); 883 pagedaemon_wakeup(); 884 885 if (page_req != VM_ALLOC_SYSTEM) 886 return NULL; 887 888 mtx_lock_spin(&vm_page_queue_free_mtx); 889 if (cnt.v_free_count <= cnt.v_interrupt_free_min) { 890 mtx_unlock_spin(&vm_page_queue_free_mtx); 891 return (NULL); 892 } 893 m = vm_pageq_find(PQ_FREE, color, (req & VM_ALLOC_ZERO) != 0); 894 } else { 895 vm_page_unlock_queues(); 896 goto loop; 897 } 898 } else { 899 /* 900 * Not allocatable from cache from interrupt, give up. 901 */ 902 mtx_unlock_spin(&vm_page_queue_free_mtx); 903 atomic_add_int(&vm_pageout_deficit, 1); 904 pagedaemon_wakeup(); 905 return (NULL); 906 } 907 908 /* 909 * At this point we had better have found a good page. 910 */ 911 912 KASSERT( 913 m != NULL, 914 ("vm_page_alloc(): missing page on free queue") 915 ); 916 917 /* 918 * Remove from free queue 919 */ 920 vm_pageq_remove_nowakeup(m); 921 922 /* 923 * Initialize structure. Only the PG_ZERO flag is inherited. 924 */ 925 flags = PG_BUSY; 926 if (m->flags & PG_ZERO) { 927 vm_page_zero_count--; 928 if (req & VM_ALLOC_ZERO) 929 flags = PG_ZERO | PG_BUSY; 930 } 931 if (req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ)) 932 flags &= ~PG_BUSY; 933 m->flags = flags; 934 if (req & VM_ALLOC_WIRED) { 935 atomic_add_int(&cnt.v_wire_count, 1); 936 m->wire_count = 1; 937 } else 938 m->wire_count = 0; 939 m->hold_count = 0; 940 m->act_count = 0; 941 m->busy = 0; 942 m->valid = 0; 943 KASSERT(m->dirty == 0, ("vm_page_alloc: free/cache page %p was dirty", m)); 944 mtx_unlock_spin(&vm_page_queue_free_mtx); 945 946 if ((req & VM_ALLOC_NOOBJ) == 0) 947 vm_page_insert(m, object, pindex); 948 else 949 m->pindex = pindex; 950 951 /* 952 * Don't wakeup too often - wakeup the pageout daemon when 953 * we would be nearly out of memory. 954 */ 955 if (vm_paging_needed()) 956 pagedaemon_wakeup(); 957 958 return (m); 959 } 960 961 /* 962 * vm_wait: (also see VM_WAIT macro) 963 * 964 * Block until free pages are available for allocation 965 * - Called in various places before memory allocations. 966 */ 967 void 968 vm_wait(void) 969 { 970 971 vm_page_lock_queues(); 972 if (curproc == pageproc) { 973 vm_pageout_pages_needed = 1; 974 msleep(&vm_pageout_pages_needed, &vm_page_queue_mtx, 975 PDROP | PSWP, "VMWait", 0); 976 } else { 977 if (!vm_pages_needed) { 978 vm_pages_needed = 1; 979 wakeup(&vm_pages_needed); 980 } 981 msleep(&cnt.v_free_count, &vm_page_queue_mtx, PDROP | PVM, 982 "vmwait", 0); 983 } 984 } 985 986 /* 987 * vm_waitpfault: (also see VM_WAITPFAULT macro) 988 * 989 * Block until free pages are available for allocation 990 * - Called only in vm_fault so that processes page faulting 991 * can be easily tracked. 992 * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing 993 * processes will be able to grab memory first. Do not change 994 * this balance without careful testing first. 995 */ 996 void 997 vm_waitpfault(void) 998 { 999 1000 vm_page_lock_queues(); 1001 if (!vm_pages_needed) { 1002 vm_pages_needed = 1; 1003 wakeup(&vm_pages_needed); 1004 } 1005 msleep(&cnt.v_free_count, &vm_page_queue_mtx, PDROP | PUSER, 1006 "pfault", 0); 1007 } 1008 1009 /* 1010 * vm_page_activate: 1011 * 1012 * Put the specified page on the active list (if appropriate). 1013 * Ensure that act_count is at least ACT_INIT but do not otherwise 1014 * mess with it. 1015 * 1016 * The page queues must be locked. 1017 * This routine may not block. 1018 */ 1019 void 1020 vm_page_activate(vm_page_t m) 1021 { 1022 1023 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1024 if (VM_PAGE_GETKNOWNQUEUE2(m) != PQ_ACTIVE) { 1025 if (VM_PAGE_INQUEUE1(m, PQ_CACHE)) 1026 cnt.v_reactivated++; 1027 vm_pageq_remove(m); 1028 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) { 1029 if (m->act_count < ACT_INIT) 1030 m->act_count = ACT_INIT; 1031 vm_pageq_enqueue(PQ_ACTIVE, m); 1032 } 1033 } else { 1034 if (m->act_count < ACT_INIT) 1035 m->act_count = ACT_INIT; 1036 } 1037 } 1038 1039 /* 1040 * vm_page_free_wakeup: 1041 * 1042 * Helper routine for vm_page_free_toq() and vm_page_cache(). This 1043 * routine is called when a page has been added to the cache or free 1044 * queues. 1045 * 1046 * The page queues must be locked. 1047 * This routine may not block. 1048 */ 1049 static inline void 1050 vm_page_free_wakeup(void) 1051 { 1052 1053 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1054 /* 1055 * if pageout daemon needs pages, then tell it that there are 1056 * some free. 1057 */ 1058 if (vm_pageout_pages_needed && 1059 cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) { 1060 wakeup(&vm_pageout_pages_needed); 1061 vm_pageout_pages_needed = 0; 1062 } 1063 /* 1064 * wakeup processes that are waiting on memory if we hit a 1065 * high water mark. And wakeup scheduler process if we have 1066 * lots of memory. this process will swapin processes. 1067 */ 1068 if (vm_pages_needed && !vm_page_count_min()) { 1069 vm_pages_needed = 0; 1070 wakeup(&cnt.v_free_count); 1071 } 1072 } 1073 1074 /* 1075 * vm_page_free_toq: 1076 * 1077 * Returns the given page to the PQ_FREE list, 1078 * disassociating it with any VM object. 1079 * 1080 * Object and page must be locked prior to entry. 1081 * This routine may not block. 1082 */ 1083 1084 void 1085 vm_page_free_toq(vm_page_t m) 1086 { 1087 struct vpgqueues *pq; 1088 1089 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1090 KASSERT(!pmap_page_is_mapped(m), 1091 ("vm_page_free_toq: freeing mapped page %p", m)); 1092 cnt.v_tfree++; 1093 1094 if (m->busy || VM_PAGE_INQUEUE1(m, PQ_FREE)) { 1095 printf( 1096 "vm_page_free: pindex(%lu), busy(%d), PG_BUSY(%d), hold(%d)\n", 1097 (u_long)m->pindex, m->busy, (m->flags & PG_BUSY) ? 1 : 0, 1098 m->hold_count); 1099 if (VM_PAGE_INQUEUE1(m, PQ_FREE)) 1100 panic("vm_page_free: freeing free page"); 1101 else 1102 panic("vm_page_free: freeing busy page"); 1103 } 1104 1105 /* 1106 * unqueue, then remove page. Note that we cannot destroy 1107 * the page here because we do not want to call the pager's 1108 * callback routine until after we've put the page on the 1109 * appropriate free queue. 1110 */ 1111 vm_pageq_remove_nowakeup(m); 1112 vm_page_remove(m); 1113 1114 /* 1115 * If fictitious remove object association and 1116 * return, otherwise delay object association removal. 1117 */ 1118 if ((m->flags & PG_FICTITIOUS) != 0) { 1119 return; 1120 } 1121 1122 m->valid = 0; 1123 vm_page_undirty(m); 1124 1125 if (m->wire_count != 0) { 1126 if (m->wire_count > 1) { 1127 panic("vm_page_free: invalid wire count (%d), pindex: 0x%lx", 1128 m->wire_count, (long)m->pindex); 1129 } 1130 panic("vm_page_free: freeing wired page"); 1131 } 1132 1133 /* 1134 * Clear the UNMANAGED flag when freeing an unmanaged page. 1135 */ 1136 if (m->flags & PG_UNMANAGED) { 1137 m->flags &= ~PG_UNMANAGED; 1138 } 1139 1140 if (m->hold_count != 0) { 1141 m->flags &= ~PG_ZERO; 1142 VM_PAGE_SETQUEUE2(m, PQ_HOLD); 1143 } else 1144 VM_PAGE_SETQUEUE1(m, PQ_FREE); 1145 pq = &vm_page_queues[VM_PAGE_GETQUEUE(m)]; 1146 mtx_lock_spin(&vm_page_queue_free_mtx); 1147 pq->lcnt++; 1148 ++(*pq->cnt); 1149 1150 /* 1151 * Put zero'd pages on the end ( where we look for zero'd pages 1152 * first ) and non-zerod pages at the head. 1153 */ 1154 if (m->flags & PG_ZERO) { 1155 TAILQ_INSERT_TAIL(&pq->pl, m, pageq); 1156 ++vm_page_zero_count; 1157 } else { 1158 TAILQ_INSERT_HEAD(&pq->pl, m, pageq); 1159 } 1160 mtx_unlock_spin(&vm_page_queue_free_mtx); 1161 vm_page_free_wakeup(); 1162 } 1163 1164 /* 1165 * vm_page_unmanage: 1166 * 1167 * Prevent PV management from being done on the page. The page is 1168 * removed from the paging queues as if it were wired, and as a 1169 * consequence of no longer being managed the pageout daemon will not 1170 * touch it (since there is no way to locate the pte mappings for the 1171 * page). madvise() calls that mess with the pmap will also no longer 1172 * operate on the page. 1173 * 1174 * Beyond that the page is still reasonably 'normal'. Freeing the page 1175 * will clear the flag. 1176 * 1177 * This routine is used by OBJT_PHYS objects - objects using unswappable 1178 * physical memory as backing store rather then swap-backed memory and 1179 * will eventually be extended to support 4MB unmanaged physical 1180 * mappings. 1181 */ 1182 void 1183 vm_page_unmanage(vm_page_t m) 1184 { 1185 1186 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1187 if ((m->flags & PG_UNMANAGED) == 0) { 1188 if (m->wire_count == 0) 1189 vm_pageq_remove(m); 1190 } 1191 vm_page_flag_set(m, PG_UNMANAGED); 1192 } 1193 1194 /* 1195 * vm_page_wire: 1196 * 1197 * Mark this page as wired down by yet 1198 * another map, removing it from paging queues 1199 * as necessary. 1200 * 1201 * The page queues must be locked. 1202 * This routine may not block. 1203 */ 1204 void 1205 vm_page_wire(vm_page_t m) 1206 { 1207 1208 /* 1209 * Only bump the wire statistics if the page is not already wired, 1210 * and only unqueue the page if it is on some queue (if it is unmanaged 1211 * it is already off the queues). 1212 */ 1213 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1214 if (m->flags & PG_FICTITIOUS) 1215 return; 1216 if (m->wire_count == 0) { 1217 if ((m->flags & PG_UNMANAGED) == 0) 1218 vm_pageq_remove(m); 1219 atomic_add_int(&cnt.v_wire_count, 1); 1220 } 1221 m->wire_count++; 1222 KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m)); 1223 } 1224 1225 /* 1226 * vm_page_unwire: 1227 * 1228 * Release one wiring of this page, potentially 1229 * enabling it to be paged again. 1230 * 1231 * Many pages placed on the inactive queue should actually go 1232 * into the cache, but it is difficult to figure out which. What 1233 * we do instead, if the inactive target is well met, is to put 1234 * clean pages at the head of the inactive queue instead of the tail. 1235 * This will cause them to be moved to the cache more quickly and 1236 * if not actively re-referenced, freed more quickly. If we just 1237 * stick these pages at the end of the inactive queue, heavy filesystem 1238 * meta-data accesses can cause an unnecessary paging load on memory bound 1239 * processes. This optimization causes one-time-use metadata to be 1240 * reused more quickly. 1241 * 1242 * BUT, if we are in a low-memory situation we have no choice but to 1243 * put clean pages on the cache queue. 1244 * 1245 * A number of routines use vm_page_unwire() to guarantee that the page 1246 * will go into either the inactive or active queues, and will NEVER 1247 * be placed in the cache - for example, just after dirtying a page. 1248 * dirty pages in the cache are not allowed. 1249 * 1250 * The page queues must be locked. 1251 * This routine may not block. 1252 */ 1253 void 1254 vm_page_unwire(vm_page_t m, int activate) 1255 { 1256 1257 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1258 if (m->flags & PG_FICTITIOUS) 1259 return; 1260 if (m->wire_count > 0) { 1261 m->wire_count--; 1262 if (m->wire_count == 0) { 1263 atomic_subtract_int(&cnt.v_wire_count, 1); 1264 if (m->flags & PG_UNMANAGED) { 1265 ; 1266 } else if (activate) 1267 vm_pageq_enqueue(PQ_ACTIVE, m); 1268 else { 1269 vm_page_flag_clear(m, PG_WINATCFLS); 1270 vm_pageq_enqueue(PQ_INACTIVE, m); 1271 } 1272 } 1273 } else { 1274 panic("vm_page_unwire: invalid wire count: %d", m->wire_count); 1275 } 1276 } 1277 1278 1279 /* 1280 * Move the specified page to the inactive queue. If the page has 1281 * any associated swap, the swap is deallocated. 1282 * 1283 * Normally athead is 0 resulting in LRU operation. athead is set 1284 * to 1 if we want this page to be 'as if it were placed in the cache', 1285 * except without unmapping it from the process address space. 1286 * 1287 * This routine may not block. 1288 */ 1289 static inline void 1290 _vm_page_deactivate(vm_page_t m, int athead) 1291 { 1292 1293 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1294 1295 /* 1296 * Ignore if already inactive. 1297 */ 1298 if (VM_PAGE_INQUEUE2(m, PQ_INACTIVE)) 1299 return; 1300 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) { 1301 if (VM_PAGE_INQUEUE1(m, PQ_CACHE)) 1302 cnt.v_reactivated++; 1303 vm_page_flag_clear(m, PG_WINATCFLS); 1304 vm_pageq_remove(m); 1305 if (athead) 1306 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 1307 else 1308 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 1309 VM_PAGE_SETQUEUE2(m, PQ_INACTIVE); 1310 vm_page_queues[PQ_INACTIVE].lcnt++; 1311 cnt.v_inactive_count++; 1312 } 1313 } 1314 1315 void 1316 vm_page_deactivate(vm_page_t m) 1317 { 1318 _vm_page_deactivate(m, 0); 1319 } 1320 1321 /* 1322 * vm_page_try_to_cache: 1323 * 1324 * Returns 0 on failure, 1 on success 1325 */ 1326 int 1327 vm_page_try_to_cache(vm_page_t m) 1328 { 1329 1330 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1331 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1332 if (m->dirty || m->hold_count || m->busy || m->wire_count || 1333 (m->flags & (PG_BUSY|PG_UNMANAGED))) { 1334 return (0); 1335 } 1336 pmap_remove_all(m); 1337 if (m->dirty) 1338 return (0); 1339 vm_page_cache(m); 1340 return (1); 1341 } 1342 1343 /* 1344 * vm_page_try_to_free() 1345 * 1346 * Attempt to free the page. If we cannot free it, we do nothing. 1347 * 1 is returned on success, 0 on failure. 1348 */ 1349 int 1350 vm_page_try_to_free(vm_page_t m) 1351 { 1352 1353 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1354 if (m->object != NULL) 1355 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1356 if (m->dirty || m->hold_count || m->busy || m->wire_count || 1357 (m->flags & (PG_BUSY|PG_UNMANAGED))) { 1358 return (0); 1359 } 1360 pmap_remove_all(m); 1361 if (m->dirty) 1362 return (0); 1363 vm_page_free(m); 1364 return (1); 1365 } 1366 1367 /* 1368 * vm_page_cache 1369 * 1370 * Put the specified page onto the page cache queue (if appropriate). 1371 * 1372 * This routine may not block. 1373 */ 1374 void 1375 vm_page_cache(vm_page_t m) 1376 { 1377 1378 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1379 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1380 if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy || 1381 m->hold_count || m->wire_count) { 1382 printf("vm_page_cache: attempting to cache busy page\n"); 1383 return; 1384 } 1385 if (VM_PAGE_INQUEUE1(m, PQ_CACHE)) 1386 return; 1387 1388 /* 1389 * Remove all pmaps and indicate that the page is not 1390 * writeable or mapped. 1391 */ 1392 pmap_remove_all(m); 1393 if (m->dirty != 0) { 1394 panic("vm_page_cache: caching a dirty page, pindex: %ld", 1395 (long)m->pindex); 1396 } 1397 vm_pageq_remove_nowakeup(m); 1398 vm_pageq_enqueue(PQ_CACHE + m->pc, m); 1399 vm_page_free_wakeup(); 1400 } 1401 1402 /* 1403 * vm_page_dontneed 1404 * 1405 * Cache, deactivate, or do nothing as appropriate. This routine 1406 * is typically used by madvise() MADV_DONTNEED. 1407 * 1408 * Generally speaking we want to move the page into the cache so 1409 * it gets reused quickly. However, this can result in a silly syndrome 1410 * due to the page recycling too quickly. Small objects will not be 1411 * fully cached. On the otherhand, if we move the page to the inactive 1412 * queue we wind up with a problem whereby very large objects 1413 * unnecessarily blow away our inactive and cache queues. 1414 * 1415 * The solution is to move the pages based on a fixed weighting. We 1416 * either leave them alone, deactivate them, or move them to the cache, 1417 * where moving them to the cache has the highest weighting. 1418 * By forcing some pages into other queues we eventually force the 1419 * system to balance the queues, potentially recovering other unrelated 1420 * space from active. The idea is to not force this to happen too 1421 * often. 1422 */ 1423 void 1424 vm_page_dontneed(vm_page_t m) 1425 { 1426 static int dnweight; 1427 int dnw; 1428 int head; 1429 1430 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1431 dnw = ++dnweight; 1432 1433 /* 1434 * occassionally leave the page alone 1435 */ 1436 if ((dnw & 0x01F0) == 0 || 1437 VM_PAGE_INQUEUE2(m, PQ_INACTIVE) || 1438 VM_PAGE_INQUEUE1(m, PQ_CACHE) 1439 ) { 1440 if (m->act_count >= ACT_INIT) 1441 --m->act_count; 1442 return; 1443 } 1444 1445 if (m->dirty == 0 && pmap_is_modified(m)) 1446 vm_page_dirty(m); 1447 1448 if (m->dirty || (dnw & 0x0070) == 0) { 1449 /* 1450 * Deactivate the page 3 times out of 32. 1451 */ 1452 head = 0; 1453 } else { 1454 /* 1455 * Cache the page 28 times out of every 32. Note that 1456 * the page is deactivated instead of cached, but placed 1457 * at the head of the queue instead of the tail. 1458 */ 1459 head = 1; 1460 } 1461 _vm_page_deactivate(m, head); 1462 } 1463 1464 /* 1465 * Grab a page, waiting until we are waken up due to the page 1466 * changing state. We keep on waiting, if the page continues 1467 * to be in the object. If the page doesn't exist, first allocate it 1468 * and then conditionally zero it. 1469 * 1470 * This routine may block. 1471 */ 1472 vm_page_t 1473 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) 1474 { 1475 vm_page_t m; 1476 1477 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1478 retrylookup: 1479 if ((m = vm_page_lookup(object, pindex)) != NULL) { 1480 vm_page_lock_queues(); 1481 if (m->busy || (m->flags & PG_BUSY)) { 1482 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED); 1483 vm_page_unlock_queues(); 1484 msleep(m, VM_OBJECT_MTX(m->object), PVM, "pgrbwt", 0); 1485 if ((allocflags & VM_ALLOC_RETRY) == 0) 1486 return (NULL); 1487 goto retrylookup; 1488 } else { 1489 if (allocflags & VM_ALLOC_WIRED) 1490 vm_page_wire(m); 1491 if ((allocflags & VM_ALLOC_NOBUSY) == 0) 1492 vm_page_busy(m); 1493 vm_page_unlock_queues(); 1494 return (m); 1495 } 1496 } 1497 m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY); 1498 if (m == NULL) { 1499 VM_OBJECT_UNLOCK(object); 1500 VM_WAIT; 1501 VM_OBJECT_LOCK(object); 1502 if ((allocflags & VM_ALLOC_RETRY) == 0) 1503 return (NULL); 1504 goto retrylookup; 1505 } 1506 if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0) 1507 pmap_zero_page(m); 1508 return (m); 1509 } 1510 1511 /* 1512 * Mapping function for valid bits or for dirty bits in 1513 * a page. May not block. 1514 * 1515 * Inputs are required to range within a page. 1516 */ 1517 inline int 1518 vm_page_bits(int base, int size) 1519 { 1520 int first_bit; 1521 int last_bit; 1522 1523 KASSERT( 1524 base + size <= PAGE_SIZE, 1525 ("vm_page_bits: illegal base/size %d/%d", base, size) 1526 ); 1527 1528 if (size == 0) /* handle degenerate case */ 1529 return (0); 1530 1531 first_bit = base >> DEV_BSHIFT; 1532 last_bit = (base + size - 1) >> DEV_BSHIFT; 1533 1534 return ((2 << last_bit) - (1 << first_bit)); 1535 } 1536 1537 /* 1538 * vm_page_set_validclean: 1539 * 1540 * Sets portions of a page valid and clean. The arguments are expected 1541 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 1542 * of any partial chunks touched by the range. The invalid portion of 1543 * such chunks will be zero'd. 1544 * 1545 * This routine may not block. 1546 * 1547 * (base + size) must be less then or equal to PAGE_SIZE. 1548 */ 1549 void 1550 vm_page_set_validclean(vm_page_t m, int base, int size) 1551 { 1552 int pagebits; 1553 int frag; 1554 int endoff; 1555 1556 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1557 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1558 if (size == 0) /* handle degenerate case */ 1559 return; 1560 1561 /* 1562 * If the base is not DEV_BSIZE aligned and the valid 1563 * bit is clear, we have to zero out a portion of the 1564 * first block. 1565 */ 1566 if ((frag = base & ~(DEV_BSIZE - 1)) != base && 1567 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0) 1568 pmap_zero_page_area(m, frag, base - frag); 1569 1570 /* 1571 * If the ending offset is not DEV_BSIZE aligned and the 1572 * valid bit is clear, we have to zero out a portion of 1573 * the last block. 1574 */ 1575 endoff = base + size; 1576 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff && 1577 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0) 1578 pmap_zero_page_area(m, endoff, 1579 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 1580 1581 /* 1582 * Set valid, clear dirty bits. If validating the entire 1583 * page we can safely clear the pmap modify bit. We also 1584 * use this opportunity to clear the PG_NOSYNC flag. If a process 1585 * takes a write fault on a MAP_NOSYNC memory area the flag will 1586 * be set again. 1587 * 1588 * We set valid bits inclusive of any overlap, but we can only 1589 * clear dirty bits for DEV_BSIZE chunks that are fully within 1590 * the range. 1591 */ 1592 pagebits = vm_page_bits(base, size); 1593 m->valid |= pagebits; 1594 #if 0 /* NOT YET */ 1595 if ((frag = base & (DEV_BSIZE - 1)) != 0) { 1596 frag = DEV_BSIZE - frag; 1597 base += frag; 1598 size -= frag; 1599 if (size < 0) 1600 size = 0; 1601 } 1602 pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1)); 1603 #endif 1604 m->dirty &= ~pagebits; 1605 if (base == 0 && size == PAGE_SIZE) { 1606 pmap_clear_modify(m); 1607 vm_page_flag_clear(m, PG_NOSYNC); 1608 } 1609 } 1610 1611 void 1612 vm_page_clear_dirty(vm_page_t m, int base, int size) 1613 { 1614 1615 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1616 m->dirty &= ~vm_page_bits(base, size); 1617 } 1618 1619 /* 1620 * vm_page_set_invalid: 1621 * 1622 * Invalidates DEV_BSIZE'd chunks within a page. Both the 1623 * valid and dirty bits for the effected areas are cleared. 1624 * 1625 * May not block. 1626 */ 1627 void 1628 vm_page_set_invalid(vm_page_t m, int base, int size) 1629 { 1630 int bits; 1631 1632 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1633 bits = vm_page_bits(base, size); 1634 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1635 if (m->valid == VM_PAGE_BITS_ALL && bits != 0) 1636 pmap_remove_all(m); 1637 m->valid &= ~bits; 1638 m->dirty &= ~bits; 1639 m->object->generation++; 1640 } 1641 1642 /* 1643 * vm_page_zero_invalid() 1644 * 1645 * The kernel assumes that the invalid portions of a page contain 1646 * garbage, but such pages can be mapped into memory by user code. 1647 * When this occurs, we must zero out the non-valid portions of the 1648 * page so user code sees what it expects. 1649 * 1650 * Pages are most often semi-valid when the end of a file is mapped 1651 * into memory and the file's size is not page aligned. 1652 */ 1653 void 1654 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) 1655 { 1656 int b; 1657 int i; 1658 1659 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1660 /* 1661 * Scan the valid bits looking for invalid sections that 1662 * must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the 1663 * valid bit may be set ) have already been zerod by 1664 * vm_page_set_validclean(). 1665 */ 1666 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { 1667 if (i == (PAGE_SIZE / DEV_BSIZE) || 1668 (m->valid & (1 << i)) 1669 ) { 1670 if (i > b) { 1671 pmap_zero_page_area(m, 1672 b << DEV_BSHIFT, (i - b) << DEV_BSHIFT); 1673 } 1674 b = i + 1; 1675 } 1676 } 1677 1678 /* 1679 * setvalid is TRUE when we can safely set the zero'd areas 1680 * as being valid. We can do this if there are no cache consistancy 1681 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. 1682 */ 1683 if (setvalid) 1684 m->valid = VM_PAGE_BITS_ALL; 1685 } 1686 1687 /* 1688 * vm_page_is_valid: 1689 * 1690 * Is (partial) page valid? Note that the case where size == 0 1691 * will return FALSE in the degenerate case where the page is 1692 * entirely invalid, and TRUE otherwise. 1693 * 1694 * May not block. 1695 */ 1696 int 1697 vm_page_is_valid(vm_page_t m, int base, int size) 1698 { 1699 int bits = vm_page_bits(base, size); 1700 1701 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1702 if (m->valid && ((m->valid & bits) == bits)) 1703 return 1; 1704 else 1705 return 0; 1706 } 1707 1708 /* 1709 * update dirty bits from pmap/mmu. May not block. 1710 */ 1711 void 1712 vm_page_test_dirty(vm_page_t m) 1713 { 1714 if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) { 1715 vm_page_dirty(m); 1716 } 1717 } 1718 1719 int so_zerocp_fullpage = 0; 1720 1721 void 1722 vm_page_cowfault(vm_page_t m) 1723 { 1724 vm_page_t mnew; 1725 vm_object_t object; 1726 vm_pindex_t pindex; 1727 1728 object = m->object; 1729 pindex = m->pindex; 1730 1731 retry_alloc: 1732 pmap_remove_all(m); 1733 vm_page_remove(m); 1734 mnew = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL); 1735 if (mnew == NULL) { 1736 vm_page_insert(m, object, pindex); 1737 vm_page_unlock_queues(); 1738 VM_OBJECT_UNLOCK(object); 1739 VM_WAIT; 1740 VM_OBJECT_LOCK(object); 1741 vm_page_lock_queues(); 1742 goto retry_alloc; 1743 } 1744 1745 if (m->cow == 0) { 1746 /* 1747 * check to see if we raced with an xmit complete when 1748 * waiting to allocate a page. If so, put things back 1749 * the way they were 1750 */ 1751 vm_page_free(mnew); 1752 vm_page_insert(m, object, pindex); 1753 } else { /* clear COW & copy page */ 1754 if (!so_zerocp_fullpage) 1755 pmap_copy_page(m, mnew); 1756 mnew->valid = VM_PAGE_BITS_ALL; 1757 vm_page_dirty(mnew); 1758 vm_page_flag_clear(mnew, PG_BUSY); 1759 mnew->wire_count = m->wire_count - m->cow; 1760 m->wire_count = m->cow; 1761 } 1762 } 1763 1764 void 1765 vm_page_cowclear(vm_page_t m) 1766 { 1767 1768 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1769 if (m->cow) { 1770 m->cow--; 1771 /* 1772 * let vm_fault add back write permission lazily 1773 */ 1774 } 1775 /* 1776 * sf_buf_free() will free the page, so we needn't do it here 1777 */ 1778 } 1779 1780 void 1781 vm_page_cowsetup(vm_page_t m) 1782 { 1783 1784 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1785 m->cow++; 1786 pmap_remove_write(m); 1787 } 1788 1789 #include "opt_ddb.h" 1790 #ifdef DDB 1791 #include <sys/kernel.h> 1792 1793 #include <ddb/ddb.h> 1794 1795 DB_SHOW_COMMAND(page, vm_page_print_page_info) 1796 { 1797 db_printf("cnt.v_free_count: %d\n", cnt.v_free_count); 1798 db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count); 1799 db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count); 1800 db_printf("cnt.v_active_count: %d\n", cnt.v_active_count); 1801 db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count); 1802 db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved); 1803 db_printf("cnt.v_free_min: %d\n", cnt.v_free_min); 1804 db_printf("cnt.v_free_target: %d\n", cnt.v_free_target); 1805 db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min); 1806 db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target); 1807 } 1808 1809 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) 1810 { 1811 int i; 1812 db_printf("PQ_FREE:"); 1813 for (i = 0; i < PQ_NUMCOLORS; i++) { 1814 db_printf(" %d", vm_page_queues[PQ_FREE + i].lcnt); 1815 } 1816 db_printf("\n"); 1817 1818 db_printf("PQ_CACHE:"); 1819 for (i = 0; i < PQ_NUMCOLORS; i++) { 1820 db_printf(" %d", vm_page_queues[PQ_CACHE + i].lcnt); 1821 } 1822 db_printf("\n"); 1823 1824 db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n", 1825 vm_page_queues[PQ_ACTIVE].lcnt, 1826 vm_page_queues[PQ_INACTIVE].lcnt); 1827 } 1828 #endif /* DDB */ 1829