1 /*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1998 Matthew Dillon. All Rights Reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * The Mach Operating System project at Carnegie-Mellon University. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 34 */ 35 36 /*- 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 */ 62 63 /* 64 * GENERAL RULES ON VM_PAGE MANIPULATION 65 * 66 * - a pageq mutex is required when adding or removing a page from a 67 * page queue (vm_page_queue[]), regardless of other mutexes or the 68 * busy state of a page. 69 * 70 * - The object mutex is held when inserting or removing 71 * pages from an object (vm_page_insert() or vm_page_remove()). 72 * 73 */ 74 75 /* 76 * Resident memory management module. 77 */ 78 79 #include <sys/cdefs.h> 80 __FBSDID("$FreeBSD$"); 81 82 #include "opt_vm.h" 83 84 #include <sys/param.h> 85 #include <sys/systm.h> 86 #include <sys/lock.h> 87 #include <sys/kernel.h> 88 #include <sys/limits.h> 89 #include <sys/malloc.h> 90 #include <sys/msgbuf.h> 91 #include <sys/mutex.h> 92 #include <sys/proc.h> 93 #include <sys/sysctl.h> 94 #include <sys/vmmeter.h> 95 #include <sys/vnode.h> 96 97 #include <vm/vm.h> 98 #include <vm/pmap.h> 99 #include <vm/vm_param.h> 100 #include <vm/vm_kern.h> 101 #include <vm/vm_object.h> 102 #include <vm/vm_page.h> 103 #include <vm/vm_pageout.h> 104 #include <vm/vm_pager.h> 105 #include <vm/vm_phys.h> 106 #include <vm/vm_reserv.h> 107 #include <vm/vm_extern.h> 108 #include <vm/uma.h> 109 #include <vm/uma_int.h> 110 111 #include <machine/md_var.h> 112 113 /* 114 * Associated with page of user-allocatable memory is a 115 * page structure. 116 */ 117 118 struct vpgqueues vm_page_queues[PQ_COUNT]; 119 struct vpglocks vm_page_queue_lock; 120 struct vpglocks vm_page_queue_free_lock; 121 122 struct vpglocks pa_lock[PA_LOCK_COUNT]; 123 124 vm_page_t vm_page_array = 0; 125 int vm_page_array_size = 0; 126 long first_page = 0; 127 int vm_page_zero_count = 0; 128 129 static int boot_pages = UMA_BOOT_PAGES; 130 TUNABLE_INT("vm.boot_pages", &boot_pages); 131 SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RD, &boot_pages, 0, 132 "number of pages allocated for bootstrapping the VM system"); 133 134 static int pa_tryrelock_restart; 135 SYSCTL_INT(_vm, OID_AUTO, tryrelock_restart, CTLFLAG_RD, 136 &pa_tryrelock_restart, 0, "Number of tryrelock restarts"); 137 138 static uma_zone_t fakepg_zone; 139 140 static void vm_page_clear_dirty_mask(vm_page_t m, int pagebits); 141 static void vm_page_queue_remove(int queue, vm_page_t m); 142 static void vm_page_enqueue(int queue, vm_page_t m); 143 static void vm_page_init_fakepg(void *dummy); 144 145 SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init_fakepg, NULL); 146 147 static void 148 vm_page_init_fakepg(void *dummy) 149 { 150 151 fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL, 152 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM); 153 } 154 155 /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */ 156 #if PAGE_SIZE == 32768 157 #ifdef CTASSERT 158 CTASSERT(sizeof(u_long) >= 8); 159 #endif 160 #endif 161 162 /* 163 * Try to acquire a physical address lock while a pmap is locked. If we 164 * fail to trylock we unlock and lock the pmap directly and cache the 165 * locked pa in *locked. The caller should then restart their loop in case 166 * the virtual to physical mapping has changed. 167 */ 168 int 169 vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked) 170 { 171 vm_paddr_t lockpa; 172 173 lockpa = *locked; 174 *locked = pa; 175 if (lockpa) { 176 PA_LOCK_ASSERT(lockpa, MA_OWNED); 177 if (PA_LOCKPTR(pa) == PA_LOCKPTR(lockpa)) 178 return (0); 179 PA_UNLOCK(lockpa); 180 } 181 if (PA_TRYLOCK(pa)) 182 return (0); 183 PMAP_UNLOCK(pmap); 184 atomic_add_int(&pa_tryrelock_restart, 1); 185 PA_LOCK(pa); 186 PMAP_LOCK(pmap); 187 return (EAGAIN); 188 } 189 190 /* 191 * vm_set_page_size: 192 * 193 * Sets the page size, perhaps based upon the memory 194 * size. Must be called before any use of page-size 195 * dependent functions. 196 */ 197 void 198 vm_set_page_size(void) 199 { 200 if (cnt.v_page_size == 0) 201 cnt.v_page_size = PAGE_SIZE; 202 if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0) 203 panic("vm_set_page_size: page size not a power of two"); 204 } 205 206 /* 207 * vm_page_blacklist_lookup: 208 * 209 * See if a physical address in this page has been listed 210 * in the blacklist tunable. Entries in the tunable are 211 * separated by spaces or commas. If an invalid integer is 212 * encountered then the rest of the string is skipped. 213 */ 214 static int 215 vm_page_blacklist_lookup(char *list, vm_paddr_t pa) 216 { 217 vm_paddr_t bad; 218 char *cp, *pos; 219 220 for (pos = list; *pos != '\0'; pos = cp) { 221 bad = strtoq(pos, &cp, 0); 222 if (*cp != '\0') { 223 if (*cp == ' ' || *cp == ',') { 224 cp++; 225 if (cp == pos) 226 continue; 227 } else 228 break; 229 } 230 if (pa == trunc_page(bad)) 231 return (1); 232 } 233 return (0); 234 } 235 236 /* 237 * vm_page_startup: 238 * 239 * Initializes the resident memory module. 240 * 241 * Allocates memory for the page cells, and 242 * for the object/offset-to-page hash table headers. 243 * Each page cell is initialized and placed on the free list. 244 */ 245 vm_offset_t 246 vm_page_startup(vm_offset_t vaddr) 247 { 248 vm_offset_t mapped; 249 vm_paddr_t page_range; 250 vm_paddr_t new_end; 251 int i; 252 vm_paddr_t pa; 253 vm_paddr_t last_pa; 254 char *list; 255 256 /* the biggest memory array is the second group of pages */ 257 vm_paddr_t end; 258 vm_paddr_t biggestsize; 259 vm_paddr_t low_water, high_water; 260 int biggestone; 261 262 biggestsize = 0; 263 biggestone = 0; 264 vaddr = round_page(vaddr); 265 266 for (i = 0; phys_avail[i + 1]; i += 2) { 267 phys_avail[i] = round_page(phys_avail[i]); 268 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 269 } 270 271 low_water = phys_avail[0]; 272 high_water = phys_avail[1]; 273 274 for (i = 0; phys_avail[i + 1]; i += 2) { 275 vm_paddr_t size = phys_avail[i + 1] - phys_avail[i]; 276 277 if (size > biggestsize) { 278 biggestone = i; 279 biggestsize = size; 280 } 281 if (phys_avail[i] < low_water) 282 low_water = phys_avail[i]; 283 if (phys_avail[i + 1] > high_water) 284 high_water = phys_avail[i + 1]; 285 } 286 287 #ifdef XEN 288 low_water = 0; 289 #endif 290 291 end = phys_avail[biggestone+1]; 292 293 /* 294 * Initialize the locks. 295 */ 296 mtx_init(&vm_page_queue_mtx, "vm page queue mutex", NULL, MTX_DEF | 297 MTX_RECURSE); 298 mtx_init(&vm_page_queue_free_mtx, "vm page queue free mutex", NULL, 299 MTX_DEF); 300 301 /* Setup page locks. */ 302 for (i = 0; i < PA_LOCK_COUNT; i++) 303 mtx_init(&pa_lock[i].data, "page lock", NULL, MTX_DEF); 304 305 /* 306 * Initialize the queue headers for the hold queue, the active queue, 307 * and the inactive queue. 308 */ 309 for (i = 0; i < PQ_COUNT; i++) 310 TAILQ_INIT(&vm_page_queues[i].pl); 311 vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count; 312 vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count; 313 vm_page_queues[PQ_HOLD].cnt = &cnt.v_active_count; 314 315 /* 316 * Allocate memory for use when boot strapping the kernel memory 317 * allocator. 318 */ 319 new_end = end - (boot_pages * UMA_SLAB_SIZE); 320 new_end = trunc_page(new_end); 321 mapped = pmap_map(&vaddr, new_end, end, 322 VM_PROT_READ | VM_PROT_WRITE); 323 bzero((void *)mapped, end - new_end); 324 uma_startup((void *)mapped, boot_pages); 325 326 #if defined(__amd64__) || defined(__i386__) || defined(__arm__) || \ 327 defined(__mips__) 328 /* 329 * Allocate a bitmap to indicate that a random physical page 330 * needs to be included in a minidump. 331 * 332 * The amd64 port needs this to indicate which direct map pages 333 * need to be dumped, via calls to dump_add_page()/dump_drop_page(). 334 * 335 * However, i386 still needs this workspace internally within the 336 * minidump code. In theory, they are not needed on i386, but are 337 * included should the sf_buf code decide to use them. 338 */ 339 last_pa = 0; 340 for (i = 0; dump_avail[i + 1] != 0; i += 2) 341 if (dump_avail[i + 1] > last_pa) 342 last_pa = dump_avail[i + 1]; 343 page_range = last_pa / PAGE_SIZE; 344 vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY); 345 new_end -= vm_page_dump_size; 346 vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end, 347 new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE); 348 bzero((void *)vm_page_dump, vm_page_dump_size); 349 #endif 350 #ifdef __amd64__ 351 /* 352 * Request that the physical pages underlying the message buffer be 353 * included in a crash dump. Since the message buffer is accessed 354 * through the direct map, they are not automatically included. 355 */ 356 pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr); 357 last_pa = pa + round_page(msgbufsize); 358 while (pa < last_pa) { 359 dump_add_page(pa); 360 pa += PAGE_SIZE; 361 } 362 #endif 363 /* 364 * Compute the number of pages of memory that will be available for 365 * use (taking into account the overhead of a page structure per 366 * page). 367 */ 368 first_page = low_water / PAGE_SIZE; 369 #ifdef VM_PHYSSEG_SPARSE 370 page_range = 0; 371 for (i = 0; phys_avail[i + 1] != 0; i += 2) 372 page_range += atop(phys_avail[i + 1] - phys_avail[i]); 373 #elif defined(VM_PHYSSEG_DENSE) 374 page_range = high_water / PAGE_SIZE - first_page; 375 #else 376 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." 377 #endif 378 end = new_end; 379 380 /* 381 * Reserve an unmapped guard page to trap access to vm_page_array[-1]. 382 */ 383 vaddr += PAGE_SIZE; 384 385 /* 386 * Initialize the mem entry structures now, and put them in the free 387 * queue. 388 */ 389 new_end = trunc_page(end - page_range * sizeof(struct vm_page)); 390 mapped = pmap_map(&vaddr, new_end, end, 391 VM_PROT_READ | VM_PROT_WRITE); 392 vm_page_array = (vm_page_t) mapped; 393 #if VM_NRESERVLEVEL > 0 394 /* 395 * Allocate memory for the reservation management system's data 396 * structures. 397 */ 398 new_end = vm_reserv_startup(&vaddr, new_end, high_water); 399 #endif 400 #if defined(__amd64__) || defined(__mips__) 401 /* 402 * pmap_map on amd64 and mips can come out of the direct-map, not kvm 403 * like i386, so the pages must be tracked for a crashdump to include 404 * this data. This includes the vm_page_array and the early UMA 405 * bootstrap pages. 406 */ 407 for (pa = new_end; pa < phys_avail[biggestone + 1]; pa += PAGE_SIZE) 408 dump_add_page(pa); 409 #endif 410 phys_avail[biggestone + 1] = new_end; 411 412 /* 413 * Clear all of the page structures 414 */ 415 bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page)); 416 for (i = 0; i < page_range; i++) 417 vm_page_array[i].order = VM_NFREEORDER; 418 vm_page_array_size = page_range; 419 420 /* 421 * Initialize the physical memory allocator. 422 */ 423 vm_phys_init(); 424 425 /* 426 * Add every available physical page that is not blacklisted to 427 * the free lists. 428 */ 429 cnt.v_page_count = 0; 430 cnt.v_free_count = 0; 431 list = getenv("vm.blacklist"); 432 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 433 pa = phys_avail[i]; 434 last_pa = phys_avail[i + 1]; 435 while (pa < last_pa) { 436 if (list != NULL && 437 vm_page_blacklist_lookup(list, pa)) 438 printf("Skipping page with pa 0x%jx\n", 439 (uintmax_t)pa); 440 else 441 vm_phys_add_page(pa); 442 pa += PAGE_SIZE; 443 } 444 } 445 freeenv(list); 446 #if VM_NRESERVLEVEL > 0 447 /* 448 * Initialize the reservation management system. 449 */ 450 vm_reserv_init(); 451 #endif 452 return (vaddr); 453 } 454 455 456 CTASSERT(offsetof(struct vm_page, aflags) % sizeof(uint32_t) == 0); 457 458 void 459 vm_page_aflag_set(vm_page_t m, uint8_t bits) 460 { 461 uint32_t *addr, val; 462 463 /* 464 * The PGA_WRITEABLE flag can only be set if the page is managed and 465 * VPO_BUSY. Currently, this flag is only set by pmap_enter(). 466 */ 467 KASSERT((bits & PGA_WRITEABLE) == 0 || 468 (m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == VPO_BUSY, 469 ("PGA_WRITEABLE and !VPO_BUSY")); 470 471 /* 472 * We want to use atomic updates for m->aflags, which is a 473 * byte wide. Not all architectures provide atomic operations 474 * on the single-byte destination. Punt and access the whole 475 * 4-byte word with an atomic update. Parallel non-atomic 476 * updates to the fields included in the update by proximity 477 * are handled properly by atomics. 478 */ 479 addr = (void *)&m->aflags; 480 MPASS(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0); 481 val = bits; 482 #if BYTE_ORDER == BIG_ENDIAN 483 val <<= 24; 484 #endif 485 atomic_set_32(addr, val); 486 } 487 488 void 489 vm_page_aflag_clear(vm_page_t m, uint8_t bits) 490 { 491 uint32_t *addr, val; 492 493 /* 494 * The PGA_REFERENCED flag can only be cleared if the object 495 * containing the page is locked. 496 */ 497 KASSERT((bits & PGA_REFERENCED) == 0 || VM_OBJECT_LOCKED(m->object), 498 ("PGA_REFERENCED and !VM_OBJECT_LOCKED")); 499 500 /* 501 * See the comment in vm_page_aflag_set(). 502 */ 503 addr = (void *)&m->aflags; 504 MPASS(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0); 505 val = bits; 506 #if BYTE_ORDER == BIG_ENDIAN 507 val <<= 24; 508 #endif 509 atomic_clear_32(addr, val); 510 } 511 512 void 513 vm_page_reference(vm_page_t m) 514 { 515 516 vm_page_aflag_set(m, PGA_REFERENCED); 517 } 518 519 void 520 vm_page_busy(vm_page_t m) 521 { 522 523 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 524 KASSERT((m->oflags & VPO_BUSY) == 0, 525 ("vm_page_busy: page already busy!!!")); 526 m->oflags |= VPO_BUSY; 527 } 528 529 /* 530 * vm_page_flash: 531 * 532 * wakeup anyone waiting for the page. 533 */ 534 void 535 vm_page_flash(vm_page_t m) 536 { 537 538 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 539 if (m->oflags & VPO_WANTED) { 540 m->oflags &= ~VPO_WANTED; 541 wakeup(m); 542 } 543 } 544 545 /* 546 * vm_page_wakeup: 547 * 548 * clear the VPO_BUSY flag and wakeup anyone waiting for the 549 * page. 550 * 551 */ 552 void 553 vm_page_wakeup(vm_page_t m) 554 { 555 556 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 557 KASSERT(m->oflags & VPO_BUSY, ("vm_page_wakeup: page not busy!!!")); 558 m->oflags &= ~VPO_BUSY; 559 vm_page_flash(m); 560 } 561 562 void 563 vm_page_io_start(vm_page_t m) 564 { 565 566 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 567 m->busy++; 568 } 569 570 void 571 vm_page_io_finish(vm_page_t m) 572 { 573 574 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 575 KASSERT(m->busy > 0, ("vm_page_io_finish: page %p is not busy", m)); 576 m->busy--; 577 if (m->busy == 0) 578 vm_page_flash(m); 579 } 580 581 /* 582 * Keep page from being freed by the page daemon 583 * much of the same effect as wiring, except much lower 584 * overhead and should be used only for *very* temporary 585 * holding ("wiring"). 586 */ 587 void 588 vm_page_hold(vm_page_t mem) 589 { 590 591 vm_page_lock_assert(mem, MA_OWNED); 592 mem->hold_count++; 593 } 594 595 void 596 vm_page_unhold(vm_page_t mem) 597 { 598 599 vm_page_lock_assert(mem, MA_OWNED); 600 --mem->hold_count; 601 KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!")); 602 if (mem->hold_count == 0 && mem->queue == PQ_HOLD) 603 vm_page_free_toq(mem); 604 } 605 606 /* 607 * vm_page_unhold_pages: 608 * 609 * Unhold each of the pages that is referenced by the given array. 610 */ 611 void 612 vm_page_unhold_pages(vm_page_t *ma, int count) 613 { 614 struct mtx *mtx, *new_mtx; 615 616 mtx = NULL; 617 for (; count != 0; count--) { 618 /* 619 * Avoid releasing and reacquiring the same page lock. 620 */ 621 new_mtx = vm_page_lockptr(*ma); 622 if (mtx != new_mtx) { 623 if (mtx != NULL) 624 mtx_unlock(mtx); 625 mtx = new_mtx; 626 mtx_lock(mtx); 627 } 628 vm_page_unhold(*ma); 629 ma++; 630 } 631 if (mtx != NULL) 632 mtx_unlock(mtx); 633 } 634 635 /* 636 * vm_page_getfake: 637 * 638 * Create a fictitious page with the specified physical address and 639 * memory attribute. The memory attribute is the only the machine- 640 * dependent aspect of a fictitious page that must be initialized. 641 */ 642 vm_page_t 643 vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr) 644 { 645 vm_page_t m; 646 647 m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO); 648 m->phys_addr = paddr; 649 m->queue = PQ_NONE; 650 /* Fictitious pages don't use "segind". */ 651 m->flags = PG_FICTITIOUS; 652 /* Fictitious pages don't use "order" or "pool". */ 653 m->oflags = VPO_BUSY | VPO_UNMANAGED; 654 m->wire_count = 1; 655 pmap_page_set_memattr(m, memattr); 656 return (m); 657 } 658 659 /* 660 * vm_page_putfake: 661 * 662 * Release a fictitious page. 663 */ 664 void 665 vm_page_putfake(vm_page_t m) 666 { 667 668 KASSERT((m->flags & PG_FICTITIOUS) != 0, 669 ("vm_page_putfake: bad page %p", m)); 670 uma_zfree(fakepg_zone, m); 671 } 672 673 /* 674 * vm_page_updatefake: 675 * 676 * Update the given fictitious page to the specified physical address and 677 * memory attribute. 678 */ 679 void 680 vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) 681 { 682 683 KASSERT((m->flags & PG_FICTITIOUS) != 0, 684 ("vm_page_updatefake: bad page %p", m)); 685 m->phys_addr = paddr; 686 pmap_page_set_memattr(m, memattr); 687 } 688 689 /* 690 * vm_page_free: 691 * 692 * Free a page. 693 */ 694 void 695 vm_page_free(vm_page_t m) 696 { 697 698 m->flags &= ~PG_ZERO; 699 vm_page_free_toq(m); 700 } 701 702 /* 703 * vm_page_free_zero: 704 * 705 * Free a page to the zerod-pages queue 706 */ 707 void 708 vm_page_free_zero(vm_page_t m) 709 { 710 711 m->flags |= PG_ZERO; 712 vm_page_free_toq(m); 713 } 714 715 /* 716 * vm_page_sleep: 717 * 718 * Sleep and release the page and page queues locks. 719 * 720 * The object containing the given page must be locked. 721 */ 722 void 723 vm_page_sleep(vm_page_t m, const char *msg) 724 { 725 726 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 727 if (mtx_owned(&vm_page_queue_mtx)) 728 vm_page_unlock_queues(); 729 if (mtx_owned(vm_page_lockptr(m))) 730 vm_page_unlock(m); 731 732 /* 733 * It's possible that while we sleep, the page will get 734 * unbusied and freed. If we are holding the object 735 * lock, we will assume we hold a reference to the object 736 * such that even if m->object changes, we can re-lock 737 * it. 738 */ 739 m->oflags |= VPO_WANTED; 740 msleep(m, VM_OBJECT_MTX(m->object), PVM, msg, 0); 741 } 742 743 /* 744 * vm_page_dirty: 745 * 746 * Set all bits in the page's dirty field. 747 * 748 * The object containing the specified page must be locked if the 749 * call is made from the machine-independent layer. 750 * 751 * See vm_page_clear_dirty_mask(). 752 */ 753 void 754 vm_page_dirty(vm_page_t m) 755 { 756 757 KASSERT((m->flags & PG_CACHED) == 0, 758 ("vm_page_dirty: page in cache!")); 759 KASSERT(!VM_PAGE_IS_FREE(m), 760 ("vm_page_dirty: page is free!")); 761 KASSERT(m->valid == VM_PAGE_BITS_ALL, 762 ("vm_page_dirty: page is invalid!")); 763 m->dirty = VM_PAGE_BITS_ALL; 764 } 765 766 /* 767 * vm_page_splay: 768 * 769 * Implements Sleator and Tarjan's top-down splay algorithm. Returns 770 * the vm_page containing the given pindex. If, however, that 771 * pindex is not found in the vm_object, returns a vm_page that is 772 * adjacent to the pindex, coming before or after it. 773 */ 774 vm_page_t 775 vm_page_splay(vm_pindex_t pindex, vm_page_t root) 776 { 777 struct vm_page dummy; 778 vm_page_t lefttreemax, righttreemin, y; 779 780 if (root == NULL) 781 return (root); 782 lefttreemax = righttreemin = &dummy; 783 for (;; root = y) { 784 if (pindex < root->pindex) { 785 if ((y = root->left) == NULL) 786 break; 787 if (pindex < y->pindex) { 788 /* Rotate right. */ 789 root->left = y->right; 790 y->right = root; 791 root = y; 792 if ((y = root->left) == NULL) 793 break; 794 } 795 /* Link into the new root's right tree. */ 796 righttreemin->left = root; 797 righttreemin = root; 798 } else if (pindex > root->pindex) { 799 if ((y = root->right) == NULL) 800 break; 801 if (pindex > y->pindex) { 802 /* Rotate left. */ 803 root->right = y->left; 804 y->left = root; 805 root = y; 806 if ((y = root->right) == NULL) 807 break; 808 } 809 /* Link into the new root's left tree. */ 810 lefttreemax->right = root; 811 lefttreemax = root; 812 } else 813 break; 814 } 815 /* Assemble the new root. */ 816 lefttreemax->right = root->left; 817 righttreemin->left = root->right; 818 root->left = dummy.right; 819 root->right = dummy.left; 820 return (root); 821 } 822 823 /* 824 * vm_page_insert: [ internal use only ] 825 * 826 * Inserts the given mem entry into the object and object list. 827 * 828 * The pagetables are not updated but will presumably fault the page 829 * in if necessary, or if a kernel page the caller will at some point 830 * enter the page into the kernel's pmap. We are not allowed to block 831 * here so we *can't* do this anyway. 832 * 833 * The object and page must be locked. 834 * This routine may not block. 835 */ 836 void 837 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) 838 { 839 vm_page_t root; 840 841 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 842 if (m->object != NULL) 843 panic("vm_page_insert: page already inserted"); 844 845 /* 846 * Record the object/offset pair in this page 847 */ 848 m->object = object; 849 m->pindex = pindex; 850 851 /* 852 * Now link into the object's ordered list of backed pages. 853 */ 854 root = object->root; 855 if (root == NULL) { 856 m->left = NULL; 857 m->right = NULL; 858 TAILQ_INSERT_TAIL(&object->memq, m, listq); 859 } else { 860 root = vm_page_splay(pindex, root); 861 if (pindex < root->pindex) { 862 m->left = root->left; 863 m->right = root; 864 root->left = NULL; 865 TAILQ_INSERT_BEFORE(root, m, listq); 866 } else if (pindex == root->pindex) 867 panic("vm_page_insert: offset already allocated"); 868 else { 869 m->right = root->right; 870 m->left = root; 871 root->right = NULL; 872 TAILQ_INSERT_AFTER(&object->memq, root, m, listq); 873 } 874 } 875 object->root = m; 876 877 /* 878 * show that the object has one more resident page. 879 */ 880 object->resident_page_count++; 881 /* 882 * Hold the vnode until the last page is released. 883 */ 884 if (object->resident_page_count == 1 && object->type == OBJT_VNODE) 885 vhold((struct vnode *)object->handle); 886 887 /* 888 * Since we are inserting a new and possibly dirty page, 889 * update the object's OBJ_MIGHTBEDIRTY flag. 890 */ 891 if (m->aflags & PGA_WRITEABLE) 892 vm_object_set_writeable_dirty(object); 893 } 894 895 /* 896 * vm_page_remove: 897 * NOTE: used by device pager as well -wfj 898 * 899 * Removes the given mem entry from the object/offset-page 900 * table and the object page list, but do not invalidate/terminate 901 * the backing store. 902 * 903 * The object and page must be locked. 904 * The underlying pmap entry (if any) is NOT removed here. 905 * This routine may not block. 906 */ 907 void 908 vm_page_remove(vm_page_t m) 909 { 910 vm_object_t object; 911 vm_page_t root; 912 913 if ((m->oflags & VPO_UNMANAGED) == 0) 914 vm_page_lock_assert(m, MA_OWNED); 915 if ((object = m->object) == NULL) 916 return; 917 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 918 if (m->oflags & VPO_BUSY) { 919 m->oflags &= ~VPO_BUSY; 920 vm_page_flash(m); 921 } 922 923 /* 924 * Now remove from the object's list of backed pages. 925 */ 926 if (m != object->root) 927 vm_page_splay(m->pindex, object->root); 928 if (m->left == NULL) 929 root = m->right; 930 else { 931 root = vm_page_splay(m->pindex, m->left); 932 root->right = m->right; 933 } 934 object->root = root; 935 TAILQ_REMOVE(&object->memq, m, listq); 936 937 /* 938 * And show that the object has one fewer resident page. 939 */ 940 object->resident_page_count--; 941 /* 942 * The vnode may now be recycled. 943 */ 944 if (object->resident_page_count == 0 && object->type == OBJT_VNODE) 945 vdrop((struct vnode *)object->handle); 946 947 m->object = NULL; 948 } 949 950 /* 951 * vm_page_lookup: 952 * 953 * Returns the page associated with the object/offset 954 * pair specified; if none is found, NULL is returned. 955 * 956 * The object must be locked. 957 * This routine may not block. 958 * This is a critical path routine 959 */ 960 vm_page_t 961 vm_page_lookup(vm_object_t object, vm_pindex_t pindex) 962 { 963 vm_page_t m; 964 965 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 966 if ((m = object->root) != NULL && m->pindex != pindex) { 967 m = vm_page_splay(pindex, m); 968 if ((object->root = m)->pindex != pindex) 969 m = NULL; 970 } 971 return (m); 972 } 973 974 /* 975 * vm_page_find_least: 976 * 977 * Returns the page associated with the object with least pindex 978 * greater than or equal to the parameter pindex, or NULL. 979 * 980 * The object must be locked. 981 * The routine may not block. 982 */ 983 vm_page_t 984 vm_page_find_least(vm_object_t object, vm_pindex_t pindex) 985 { 986 vm_page_t m; 987 988 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 989 if ((m = TAILQ_FIRST(&object->memq)) != NULL) { 990 if (m->pindex < pindex) { 991 m = vm_page_splay(pindex, object->root); 992 if ((object->root = m)->pindex < pindex) 993 m = TAILQ_NEXT(m, listq); 994 } 995 } 996 return (m); 997 } 998 999 /* 1000 * Returns the given page's successor (by pindex) within the object if it is 1001 * resident; if none is found, NULL is returned. 1002 * 1003 * The object must be locked. 1004 */ 1005 vm_page_t 1006 vm_page_next(vm_page_t m) 1007 { 1008 vm_page_t next; 1009 1010 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1011 if ((next = TAILQ_NEXT(m, listq)) != NULL && 1012 next->pindex != m->pindex + 1) 1013 next = NULL; 1014 return (next); 1015 } 1016 1017 /* 1018 * Returns the given page's predecessor (by pindex) within the object if it is 1019 * resident; if none is found, NULL is returned. 1020 * 1021 * The object must be locked. 1022 */ 1023 vm_page_t 1024 vm_page_prev(vm_page_t m) 1025 { 1026 vm_page_t prev; 1027 1028 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1029 if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL && 1030 prev->pindex != m->pindex - 1) 1031 prev = NULL; 1032 return (prev); 1033 } 1034 1035 /* 1036 * vm_page_rename: 1037 * 1038 * Move the given memory entry from its 1039 * current object to the specified target object/offset. 1040 * 1041 * The object must be locked. 1042 * This routine may not block. 1043 * 1044 * Note: swap associated with the page must be invalidated by the move. We 1045 * have to do this for several reasons: (1) we aren't freeing the 1046 * page, (2) we are dirtying the page, (3) the VM system is probably 1047 * moving the page from object A to B, and will then later move 1048 * the backing store from A to B and we can't have a conflict. 1049 * 1050 * Note: we *always* dirty the page. It is necessary both for the 1051 * fact that we moved it, and because we may be invalidating 1052 * swap. If the page is on the cache, we have to deactivate it 1053 * or vm_page_dirty() will panic. Dirty pages are not allowed 1054 * on the cache. 1055 */ 1056 void 1057 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) 1058 { 1059 1060 vm_page_remove(m); 1061 vm_page_insert(m, new_object, new_pindex); 1062 vm_page_dirty(m); 1063 } 1064 1065 /* 1066 * Convert all of the given object's cached pages that have a 1067 * pindex within the given range into free pages. If the value 1068 * zero is given for "end", then the range's upper bound is 1069 * infinity. If the given object is backed by a vnode and it 1070 * transitions from having one or more cached pages to none, the 1071 * vnode's hold count is reduced. 1072 */ 1073 void 1074 vm_page_cache_free(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 1075 { 1076 vm_page_t m, m_next; 1077 boolean_t empty; 1078 1079 mtx_lock(&vm_page_queue_free_mtx); 1080 if (__predict_false(object->cache == NULL)) { 1081 mtx_unlock(&vm_page_queue_free_mtx); 1082 return; 1083 } 1084 m = object->cache = vm_page_splay(start, object->cache); 1085 if (m->pindex < start) { 1086 if (m->right == NULL) 1087 m = NULL; 1088 else { 1089 m_next = vm_page_splay(start, m->right); 1090 m_next->left = m; 1091 m->right = NULL; 1092 m = object->cache = m_next; 1093 } 1094 } 1095 1096 /* 1097 * At this point, "m" is either (1) a reference to the page 1098 * with the least pindex that is greater than or equal to 1099 * "start" or (2) NULL. 1100 */ 1101 for (; m != NULL && (m->pindex < end || end == 0); m = m_next) { 1102 /* 1103 * Find "m"'s successor and remove "m" from the 1104 * object's cache. 1105 */ 1106 if (m->right == NULL) { 1107 object->cache = m->left; 1108 m_next = NULL; 1109 } else { 1110 m_next = vm_page_splay(start, m->right); 1111 m_next->left = m->left; 1112 object->cache = m_next; 1113 } 1114 /* Convert "m" to a free page. */ 1115 m->object = NULL; 1116 m->valid = 0; 1117 /* Clear PG_CACHED and set PG_FREE. */ 1118 m->flags ^= PG_CACHED | PG_FREE; 1119 KASSERT((m->flags & (PG_CACHED | PG_FREE)) == PG_FREE, 1120 ("vm_page_cache_free: page %p has inconsistent flags", m)); 1121 cnt.v_cache_count--; 1122 cnt.v_free_count++; 1123 } 1124 empty = object->cache == NULL; 1125 mtx_unlock(&vm_page_queue_free_mtx); 1126 if (object->type == OBJT_VNODE && empty) 1127 vdrop(object->handle); 1128 } 1129 1130 /* 1131 * Returns the cached page that is associated with the given 1132 * object and offset. If, however, none exists, returns NULL. 1133 * 1134 * The free page queue must be locked. 1135 */ 1136 static inline vm_page_t 1137 vm_page_cache_lookup(vm_object_t object, vm_pindex_t pindex) 1138 { 1139 vm_page_t m; 1140 1141 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1142 if ((m = object->cache) != NULL && m->pindex != pindex) { 1143 m = vm_page_splay(pindex, m); 1144 if ((object->cache = m)->pindex != pindex) 1145 m = NULL; 1146 } 1147 return (m); 1148 } 1149 1150 /* 1151 * Remove the given cached page from its containing object's 1152 * collection of cached pages. 1153 * 1154 * The free page queue must be locked. 1155 */ 1156 void 1157 vm_page_cache_remove(vm_page_t m) 1158 { 1159 vm_object_t object; 1160 vm_page_t root; 1161 1162 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1163 KASSERT((m->flags & PG_CACHED) != 0, 1164 ("vm_page_cache_remove: page %p is not cached", m)); 1165 object = m->object; 1166 if (m != object->cache) { 1167 root = vm_page_splay(m->pindex, object->cache); 1168 KASSERT(root == m, 1169 ("vm_page_cache_remove: page %p is not cached in object %p", 1170 m, object)); 1171 } 1172 if (m->left == NULL) 1173 root = m->right; 1174 else if (m->right == NULL) 1175 root = m->left; 1176 else { 1177 root = vm_page_splay(m->pindex, m->left); 1178 root->right = m->right; 1179 } 1180 object->cache = root; 1181 m->object = NULL; 1182 cnt.v_cache_count--; 1183 } 1184 1185 /* 1186 * Transfer all of the cached pages with offset greater than or 1187 * equal to 'offidxstart' from the original object's cache to the 1188 * new object's cache. However, any cached pages with offset 1189 * greater than or equal to the new object's size are kept in the 1190 * original object. Initially, the new object's cache must be 1191 * empty. Offset 'offidxstart' in the original object must 1192 * correspond to offset zero in the new object. 1193 * 1194 * The new object must be locked. 1195 */ 1196 void 1197 vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart, 1198 vm_object_t new_object) 1199 { 1200 vm_page_t m, m_next; 1201 1202 /* 1203 * Insertion into an object's collection of cached pages 1204 * requires the object to be locked. In contrast, removal does 1205 * not. 1206 */ 1207 VM_OBJECT_LOCK_ASSERT(new_object, MA_OWNED); 1208 KASSERT(new_object->cache == NULL, 1209 ("vm_page_cache_transfer: object %p has cached pages", 1210 new_object)); 1211 mtx_lock(&vm_page_queue_free_mtx); 1212 if ((m = orig_object->cache) != NULL) { 1213 /* 1214 * Transfer all of the pages with offset greater than or 1215 * equal to 'offidxstart' from the original object's 1216 * cache to the new object's cache. 1217 */ 1218 m = vm_page_splay(offidxstart, m); 1219 if (m->pindex < offidxstart) { 1220 orig_object->cache = m; 1221 new_object->cache = m->right; 1222 m->right = NULL; 1223 } else { 1224 orig_object->cache = m->left; 1225 new_object->cache = m; 1226 m->left = NULL; 1227 } 1228 while ((m = new_object->cache) != NULL) { 1229 if ((m->pindex - offidxstart) >= new_object->size) { 1230 /* 1231 * Return all of the cached pages with 1232 * offset greater than or equal to the 1233 * new object's size to the original 1234 * object's cache. 1235 */ 1236 new_object->cache = m->left; 1237 m->left = orig_object->cache; 1238 orig_object->cache = m; 1239 break; 1240 } 1241 m_next = vm_page_splay(m->pindex, m->right); 1242 /* Update the page's object and offset. */ 1243 m->object = new_object; 1244 m->pindex -= offidxstart; 1245 if (m_next == NULL) 1246 break; 1247 m->right = NULL; 1248 m_next->left = m; 1249 new_object->cache = m_next; 1250 } 1251 KASSERT(new_object->cache == NULL || 1252 new_object->type == OBJT_SWAP, 1253 ("vm_page_cache_transfer: object %p's type is incompatible" 1254 " with cached pages", new_object)); 1255 } 1256 mtx_unlock(&vm_page_queue_free_mtx); 1257 } 1258 1259 /* 1260 * vm_page_alloc: 1261 * 1262 * Allocate and return a memory cell associated 1263 * with this VM object/offset pair. 1264 * 1265 * The caller must always specify an allocation class. 1266 * 1267 * allocation classes: 1268 * VM_ALLOC_NORMAL normal process request 1269 * VM_ALLOC_SYSTEM system *really* needs a page 1270 * VM_ALLOC_INTERRUPT interrupt time request 1271 * 1272 * optional allocation flags: 1273 * VM_ALLOC_ZERO prefer a zeroed page 1274 * VM_ALLOC_WIRED wire the allocated page 1275 * VM_ALLOC_NOOBJ page is not associated with a vm object 1276 * VM_ALLOC_NOBUSY do not set the page busy 1277 * VM_ALLOC_IFCACHED return page only if it is cached 1278 * VM_ALLOC_IFNOTCACHED return NULL, do not reactivate if the page 1279 * is cached 1280 * 1281 * This routine may not sleep. 1282 */ 1283 vm_page_t 1284 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) 1285 { 1286 struct vnode *vp = NULL; 1287 vm_object_t m_object; 1288 vm_page_t m; 1289 int flags, page_req; 1290 1291 if ((req & VM_ALLOC_NOOBJ) == 0) { 1292 KASSERT(object != NULL, 1293 ("vm_page_alloc: NULL object.")); 1294 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1295 } 1296 1297 page_req = req & VM_ALLOC_CLASS_MASK; 1298 1299 /* 1300 * The pager is allowed to eat deeper into the free page list. 1301 */ 1302 if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) 1303 page_req = VM_ALLOC_SYSTEM; 1304 1305 mtx_lock(&vm_page_queue_free_mtx); 1306 if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved || 1307 (page_req == VM_ALLOC_SYSTEM && 1308 cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) || 1309 (page_req == VM_ALLOC_INTERRUPT && 1310 cnt.v_free_count + cnt.v_cache_count > 0)) { 1311 /* 1312 * Allocate from the free queue if the number of free pages 1313 * exceeds the minimum for the request class. 1314 */ 1315 if (object != NULL && 1316 (m = vm_page_cache_lookup(object, pindex)) != NULL) { 1317 if ((req & VM_ALLOC_IFNOTCACHED) != 0) { 1318 mtx_unlock(&vm_page_queue_free_mtx); 1319 return (NULL); 1320 } 1321 if (vm_phys_unfree_page(m)) 1322 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, 0); 1323 #if VM_NRESERVLEVEL > 0 1324 else if (!vm_reserv_reactivate_page(m)) 1325 #else 1326 else 1327 #endif 1328 panic("vm_page_alloc: cache page %p is missing" 1329 " from the free queue", m); 1330 } else if ((req & VM_ALLOC_IFCACHED) != 0) { 1331 mtx_unlock(&vm_page_queue_free_mtx); 1332 return (NULL); 1333 #if VM_NRESERVLEVEL > 0 1334 } else if (object == NULL || object->type == OBJT_DEVICE || 1335 object->type == OBJT_SG || 1336 (object->flags & OBJ_COLORED) == 0 || 1337 (m = vm_reserv_alloc_page(object, pindex)) == NULL) { 1338 #else 1339 } else { 1340 #endif 1341 m = vm_phys_alloc_pages(object != NULL ? 1342 VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0); 1343 #if VM_NRESERVLEVEL > 0 1344 if (m == NULL && vm_reserv_reclaim_inactive()) { 1345 m = vm_phys_alloc_pages(object != NULL ? 1346 VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 1347 0); 1348 } 1349 #endif 1350 } 1351 } else { 1352 /* 1353 * Not allocatable, give up. 1354 */ 1355 mtx_unlock(&vm_page_queue_free_mtx); 1356 atomic_add_int(&vm_pageout_deficit, 1357 MAX((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1)); 1358 pagedaemon_wakeup(); 1359 return (NULL); 1360 } 1361 1362 /* 1363 * At this point we had better have found a good page. 1364 */ 1365 1366 KASSERT(m != NULL, ("vm_page_alloc: missing page")); 1367 KASSERT(m->queue == PQ_NONE, 1368 ("vm_page_alloc: page %p has unexpected queue %d", m, m->queue)); 1369 KASSERT(m->wire_count == 0, ("vm_page_alloc: page %p is wired", m)); 1370 KASSERT(m->hold_count == 0, ("vm_page_alloc: page %p is held", m)); 1371 KASSERT(m->busy == 0, ("vm_page_alloc: page %p is busy", m)); 1372 KASSERT(m->dirty == 0, ("vm_page_alloc: page %p is dirty", m)); 1373 KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, 1374 ("vm_page_alloc: page %p has unexpected memattr %d", m, 1375 pmap_page_get_memattr(m))); 1376 if ((m->flags & PG_CACHED) != 0) { 1377 KASSERT(m->valid != 0, 1378 ("vm_page_alloc: cached page %p is invalid", m)); 1379 if (m->object == object && m->pindex == pindex) 1380 cnt.v_reactivated++; 1381 else 1382 m->valid = 0; 1383 m_object = m->object; 1384 vm_page_cache_remove(m); 1385 if (m_object->type == OBJT_VNODE && m_object->cache == NULL) 1386 vp = m_object->handle; 1387 } else { 1388 KASSERT(VM_PAGE_IS_FREE(m), 1389 ("vm_page_alloc: page %p is not free", m)); 1390 KASSERT(m->valid == 0, 1391 ("vm_page_alloc: free page %p is valid", m)); 1392 cnt.v_free_count--; 1393 } 1394 1395 /* 1396 * Only the PG_ZERO flag is inherited. The PG_CACHED or PG_FREE flag 1397 * must be cleared before the free page queues lock is released. 1398 */ 1399 flags = 0; 1400 if (m->flags & PG_ZERO) { 1401 vm_page_zero_count--; 1402 if (req & VM_ALLOC_ZERO) 1403 flags = PG_ZERO; 1404 } 1405 m->flags = flags; 1406 mtx_unlock(&vm_page_queue_free_mtx); 1407 m->aflags = 0; 1408 if (object == NULL || object->type == OBJT_PHYS) 1409 m->oflags = VPO_UNMANAGED; 1410 else 1411 m->oflags = 0; 1412 if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ)) == 0) 1413 m->oflags |= VPO_BUSY; 1414 if (req & VM_ALLOC_WIRED) { 1415 /* 1416 * The page lock is not required for wiring a page until that 1417 * page is inserted into the object. 1418 */ 1419 atomic_add_int(&cnt.v_wire_count, 1); 1420 m->wire_count = 1; 1421 } 1422 m->act_count = 0; 1423 1424 if (object != NULL) { 1425 /* Ignore device objects; the pager sets "memattr" for them. */ 1426 if (object->memattr != VM_MEMATTR_DEFAULT && 1427 object->type != OBJT_DEVICE && object->type != OBJT_SG) 1428 pmap_page_set_memattr(m, object->memattr); 1429 vm_page_insert(m, object, pindex); 1430 } else 1431 m->pindex = pindex; 1432 1433 /* 1434 * The following call to vdrop() must come after the above call 1435 * to vm_page_insert() in case both affect the same object and 1436 * vnode. Otherwise, the affected vnode's hold count could 1437 * temporarily become zero. 1438 */ 1439 if (vp != NULL) 1440 vdrop(vp); 1441 1442 /* 1443 * Don't wakeup too often - wakeup the pageout daemon when 1444 * we would be nearly out of memory. 1445 */ 1446 if (vm_paging_needed()) 1447 pagedaemon_wakeup(); 1448 1449 return (m); 1450 } 1451 1452 /* 1453 * Initialize a page that has been freshly dequeued from a freelist. 1454 * The caller has to drop the vnode returned, if it is not NULL. 1455 * 1456 * To be called with vm_page_queue_free_mtx held. 1457 */ 1458 struct vnode * 1459 vm_page_alloc_init(vm_page_t m) 1460 { 1461 struct vnode *drop; 1462 vm_object_t m_object; 1463 1464 KASSERT(m->queue == PQ_NONE, 1465 ("vm_page_alloc_init: page %p has unexpected queue %d", 1466 m, m->queue)); 1467 KASSERT(m->wire_count == 0, 1468 ("vm_page_alloc_init: page %p is wired", m)); 1469 KASSERT(m->hold_count == 0, 1470 ("vm_page_alloc_init: page %p is held", m)); 1471 KASSERT(m->busy == 0, 1472 ("vm_page_alloc_init: page %p is busy", m)); 1473 KASSERT(m->dirty == 0, 1474 ("vm_page_alloc_init: page %p is dirty", m)); 1475 KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, 1476 ("vm_page_alloc_init: page %p has unexpected memattr %d", 1477 m, pmap_page_get_memattr(m))); 1478 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1479 drop = NULL; 1480 if ((m->flags & PG_CACHED) != 0) { 1481 m->valid = 0; 1482 m_object = m->object; 1483 vm_page_cache_remove(m); 1484 if (m_object->type == OBJT_VNODE && 1485 m_object->cache == NULL) 1486 drop = m_object->handle; 1487 } else { 1488 KASSERT(VM_PAGE_IS_FREE(m), 1489 ("vm_page_alloc_init: page %p is not free", m)); 1490 KASSERT(m->valid == 0, 1491 ("vm_page_alloc_init: free page %p is valid", m)); 1492 cnt.v_free_count--; 1493 } 1494 if (m->flags & PG_ZERO) 1495 vm_page_zero_count--; 1496 /* Don't clear the PG_ZERO flag; we'll need it later. */ 1497 m->flags &= PG_ZERO; 1498 m->aflags = 0; 1499 m->oflags = VPO_UNMANAGED; 1500 /* Unmanaged pages don't use "act_count". */ 1501 return (drop); 1502 } 1503 1504 /* 1505 * vm_page_alloc_freelist: 1506 * 1507 * Allocate a page from the specified freelist. 1508 * Only the ALLOC_CLASS values in req are honored, other request flags 1509 * are ignored. 1510 */ 1511 vm_page_t 1512 vm_page_alloc_freelist(int flind, int req) 1513 { 1514 struct vnode *drop; 1515 vm_page_t m; 1516 int page_req; 1517 1518 m = NULL; 1519 page_req = req & VM_ALLOC_CLASS_MASK; 1520 mtx_lock(&vm_page_queue_free_mtx); 1521 /* 1522 * Do not allocate reserved pages unless the req has asked for it. 1523 */ 1524 if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved || 1525 (page_req == VM_ALLOC_SYSTEM && 1526 cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) || 1527 (page_req == VM_ALLOC_INTERRUPT && 1528 cnt.v_free_count + cnt.v_cache_count > 0)) { 1529 m = vm_phys_alloc_freelist_pages(flind, VM_FREEPOOL_DIRECT, 0); 1530 } 1531 if (m == NULL) { 1532 mtx_unlock(&vm_page_queue_free_mtx); 1533 return (NULL); 1534 } 1535 drop = vm_page_alloc_init(m); 1536 mtx_unlock(&vm_page_queue_free_mtx); 1537 if (drop) 1538 vdrop(drop); 1539 return (m); 1540 } 1541 1542 /* 1543 * vm_wait: (also see VM_WAIT macro) 1544 * 1545 * Block until free pages are available for allocation 1546 * - Called in various places before memory allocations. 1547 */ 1548 void 1549 vm_wait(void) 1550 { 1551 1552 mtx_lock(&vm_page_queue_free_mtx); 1553 if (curproc == pageproc) { 1554 vm_pageout_pages_needed = 1; 1555 msleep(&vm_pageout_pages_needed, &vm_page_queue_free_mtx, 1556 PDROP | PSWP, "VMWait", 0); 1557 } else { 1558 if (!vm_pages_needed) { 1559 vm_pages_needed = 1; 1560 wakeup(&vm_pages_needed); 1561 } 1562 msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM, 1563 "vmwait", 0); 1564 } 1565 } 1566 1567 /* 1568 * vm_waitpfault: (also see VM_WAITPFAULT macro) 1569 * 1570 * Block until free pages are available for allocation 1571 * - Called only in vm_fault so that processes page faulting 1572 * can be easily tracked. 1573 * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing 1574 * processes will be able to grab memory first. Do not change 1575 * this balance without careful testing first. 1576 */ 1577 void 1578 vm_waitpfault(void) 1579 { 1580 1581 mtx_lock(&vm_page_queue_free_mtx); 1582 if (!vm_pages_needed) { 1583 vm_pages_needed = 1; 1584 wakeup(&vm_pages_needed); 1585 } 1586 msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER, 1587 "pfault", 0); 1588 } 1589 1590 /* 1591 * vm_page_requeue: 1592 * 1593 * Move the given page to the tail of its present page queue. 1594 * 1595 * The page queues must be locked. 1596 */ 1597 void 1598 vm_page_requeue(vm_page_t m) 1599 { 1600 struct vpgqueues *vpq; 1601 int queue; 1602 1603 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1604 queue = m->queue; 1605 KASSERT(queue != PQ_NONE, 1606 ("vm_page_requeue: page %p is not queued", m)); 1607 vpq = &vm_page_queues[queue]; 1608 TAILQ_REMOVE(&vpq->pl, m, pageq); 1609 TAILQ_INSERT_TAIL(&vpq->pl, m, pageq); 1610 } 1611 1612 /* 1613 * vm_page_queue_remove: 1614 * 1615 * Remove the given page from the specified queue. 1616 * 1617 * The page and page queues must be locked. 1618 */ 1619 static __inline void 1620 vm_page_queue_remove(int queue, vm_page_t m) 1621 { 1622 struct vpgqueues *pq; 1623 1624 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1625 vm_page_lock_assert(m, MA_OWNED); 1626 pq = &vm_page_queues[queue]; 1627 TAILQ_REMOVE(&pq->pl, m, pageq); 1628 (*pq->cnt)--; 1629 } 1630 1631 /* 1632 * vm_pageq_remove: 1633 * 1634 * Remove a page from its queue. 1635 * 1636 * The given page must be locked. 1637 * This routine may not block. 1638 */ 1639 void 1640 vm_pageq_remove(vm_page_t m) 1641 { 1642 int queue; 1643 1644 vm_page_lock_assert(m, MA_OWNED); 1645 if ((queue = m->queue) != PQ_NONE) { 1646 vm_page_lock_queues(); 1647 m->queue = PQ_NONE; 1648 vm_page_queue_remove(queue, m); 1649 vm_page_unlock_queues(); 1650 } 1651 } 1652 1653 /* 1654 * vm_page_enqueue: 1655 * 1656 * Add the given page to the specified queue. 1657 * 1658 * The page queues must be locked. 1659 */ 1660 static void 1661 vm_page_enqueue(int queue, vm_page_t m) 1662 { 1663 struct vpgqueues *vpq; 1664 1665 vpq = &vm_page_queues[queue]; 1666 m->queue = queue; 1667 TAILQ_INSERT_TAIL(&vpq->pl, m, pageq); 1668 ++*vpq->cnt; 1669 } 1670 1671 /* 1672 * vm_page_activate: 1673 * 1674 * Put the specified page on the active list (if appropriate). 1675 * Ensure that act_count is at least ACT_INIT but do not otherwise 1676 * mess with it. 1677 * 1678 * The page must be locked. 1679 * This routine may not block. 1680 */ 1681 void 1682 vm_page_activate(vm_page_t m) 1683 { 1684 int queue; 1685 1686 vm_page_lock_assert(m, MA_OWNED); 1687 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1688 if ((queue = m->queue) != PQ_ACTIVE) { 1689 if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) { 1690 if (m->act_count < ACT_INIT) 1691 m->act_count = ACT_INIT; 1692 vm_page_lock_queues(); 1693 if (queue != PQ_NONE) 1694 vm_page_queue_remove(queue, m); 1695 vm_page_enqueue(PQ_ACTIVE, m); 1696 vm_page_unlock_queues(); 1697 } else 1698 KASSERT(queue == PQ_NONE, 1699 ("vm_page_activate: wired page %p is queued", m)); 1700 } else { 1701 if (m->act_count < ACT_INIT) 1702 m->act_count = ACT_INIT; 1703 } 1704 } 1705 1706 /* 1707 * vm_page_free_wakeup: 1708 * 1709 * Helper routine for vm_page_free_toq() and vm_page_cache(). This 1710 * routine is called when a page has been added to the cache or free 1711 * queues. 1712 * 1713 * The page queues must be locked. 1714 * This routine may not block. 1715 */ 1716 static inline void 1717 vm_page_free_wakeup(void) 1718 { 1719 1720 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1721 /* 1722 * if pageout daemon needs pages, then tell it that there are 1723 * some free. 1724 */ 1725 if (vm_pageout_pages_needed && 1726 cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) { 1727 wakeup(&vm_pageout_pages_needed); 1728 vm_pageout_pages_needed = 0; 1729 } 1730 /* 1731 * wakeup processes that are waiting on memory if we hit a 1732 * high water mark. And wakeup scheduler process if we have 1733 * lots of memory. this process will swapin processes. 1734 */ 1735 if (vm_pages_needed && !vm_page_count_min()) { 1736 vm_pages_needed = 0; 1737 wakeup(&cnt.v_free_count); 1738 } 1739 } 1740 1741 /* 1742 * vm_page_free_toq: 1743 * 1744 * Returns the given page to the free list, 1745 * disassociating it with any VM object. 1746 * 1747 * Object and page must be locked prior to entry. 1748 * This routine may not block. 1749 */ 1750 1751 void 1752 vm_page_free_toq(vm_page_t m) 1753 { 1754 1755 if ((m->oflags & VPO_UNMANAGED) == 0) { 1756 vm_page_lock_assert(m, MA_OWNED); 1757 KASSERT(!pmap_page_is_mapped(m), 1758 ("vm_page_free_toq: freeing mapped page %p", m)); 1759 } 1760 PCPU_INC(cnt.v_tfree); 1761 1762 if (VM_PAGE_IS_FREE(m)) 1763 panic("vm_page_free: freeing free page %p", m); 1764 else if (m->busy != 0) 1765 panic("vm_page_free: freeing busy page %p", m); 1766 1767 /* 1768 * unqueue, then remove page. Note that we cannot destroy 1769 * the page here because we do not want to call the pager's 1770 * callback routine until after we've put the page on the 1771 * appropriate free queue. 1772 */ 1773 if ((m->oflags & VPO_UNMANAGED) == 0) 1774 vm_pageq_remove(m); 1775 vm_page_remove(m); 1776 1777 /* 1778 * If fictitious remove object association and 1779 * return, otherwise delay object association removal. 1780 */ 1781 if ((m->flags & PG_FICTITIOUS) != 0) { 1782 return; 1783 } 1784 1785 m->valid = 0; 1786 vm_page_undirty(m); 1787 1788 if (m->wire_count != 0) 1789 panic("vm_page_free: freeing wired page %p", m); 1790 if (m->hold_count != 0) { 1791 m->flags &= ~PG_ZERO; 1792 vm_page_lock_queues(); 1793 vm_page_enqueue(PQ_HOLD, m); 1794 vm_page_unlock_queues(); 1795 } else { 1796 /* 1797 * Restore the default memory attribute to the page. 1798 */ 1799 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) 1800 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); 1801 1802 /* 1803 * Insert the page into the physical memory allocator's 1804 * cache/free page queues. 1805 */ 1806 mtx_lock(&vm_page_queue_free_mtx); 1807 m->flags |= PG_FREE; 1808 cnt.v_free_count++; 1809 #if VM_NRESERVLEVEL > 0 1810 if (!vm_reserv_free_page(m)) 1811 #else 1812 if (TRUE) 1813 #endif 1814 vm_phys_free_pages(m, 0); 1815 if ((m->flags & PG_ZERO) != 0) 1816 ++vm_page_zero_count; 1817 else 1818 vm_page_zero_idle_wakeup(); 1819 vm_page_free_wakeup(); 1820 mtx_unlock(&vm_page_queue_free_mtx); 1821 } 1822 } 1823 1824 /* 1825 * vm_page_wire: 1826 * 1827 * Mark this page as wired down by yet 1828 * another map, removing it from paging queues 1829 * as necessary. 1830 * 1831 * If the page is fictitious, then its wire count must remain one. 1832 * 1833 * The page must be locked. 1834 * This routine may not block. 1835 */ 1836 void 1837 vm_page_wire(vm_page_t m) 1838 { 1839 1840 /* 1841 * Only bump the wire statistics if the page is not already wired, 1842 * and only unqueue the page if it is on some queue (if it is unmanaged 1843 * it is already off the queues). 1844 */ 1845 vm_page_lock_assert(m, MA_OWNED); 1846 if ((m->flags & PG_FICTITIOUS) != 0) { 1847 KASSERT(m->wire_count == 1, 1848 ("vm_page_wire: fictitious page %p's wire count isn't one", 1849 m)); 1850 return; 1851 } 1852 if (m->wire_count == 0) { 1853 if ((m->oflags & VPO_UNMANAGED) == 0) 1854 vm_pageq_remove(m); 1855 atomic_add_int(&cnt.v_wire_count, 1); 1856 } 1857 m->wire_count++; 1858 KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m)); 1859 } 1860 1861 /* 1862 * vm_page_unwire: 1863 * 1864 * Release one wiring of the specified page, potentially enabling it to be 1865 * paged again. If paging is enabled, then the value of the parameter 1866 * "activate" determines to which queue the page is added. If "activate" is 1867 * non-zero, then the page is added to the active queue. Otherwise, it is 1868 * added to the inactive queue. 1869 * 1870 * However, unless the page belongs to an object, it is not enqueued because 1871 * it cannot be paged out. 1872 * 1873 * If a page is fictitious, then its wire count must alway be one. 1874 * 1875 * A managed page must be locked. 1876 */ 1877 void 1878 vm_page_unwire(vm_page_t m, int activate) 1879 { 1880 1881 if ((m->oflags & VPO_UNMANAGED) == 0) 1882 vm_page_lock_assert(m, MA_OWNED); 1883 if ((m->flags & PG_FICTITIOUS) != 0) { 1884 KASSERT(m->wire_count == 1, 1885 ("vm_page_unwire: fictitious page %p's wire count isn't one", m)); 1886 return; 1887 } 1888 if (m->wire_count > 0) { 1889 m->wire_count--; 1890 if (m->wire_count == 0) { 1891 atomic_subtract_int(&cnt.v_wire_count, 1); 1892 if ((m->oflags & VPO_UNMANAGED) != 0 || 1893 m->object == NULL) 1894 return; 1895 vm_page_lock_queues(); 1896 if (activate) 1897 vm_page_enqueue(PQ_ACTIVE, m); 1898 else { 1899 m->flags &= ~PG_WINATCFLS; 1900 vm_page_enqueue(PQ_INACTIVE, m); 1901 } 1902 vm_page_unlock_queues(); 1903 } 1904 } else 1905 panic("vm_page_unwire: page %p's wire count is zero", m); 1906 } 1907 1908 /* 1909 * Move the specified page to the inactive queue. 1910 * 1911 * Many pages placed on the inactive queue should actually go 1912 * into the cache, but it is difficult to figure out which. What 1913 * we do instead, if the inactive target is well met, is to put 1914 * clean pages at the head of the inactive queue instead of the tail. 1915 * This will cause them to be moved to the cache more quickly and 1916 * if not actively re-referenced, reclaimed more quickly. If we just 1917 * stick these pages at the end of the inactive queue, heavy filesystem 1918 * meta-data accesses can cause an unnecessary paging load on memory bound 1919 * processes. This optimization causes one-time-use metadata to be 1920 * reused more quickly. 1921 * 1922 * Normally athead is 0 resulting in LRU operation. athead is set 1923 * to 1 if we want this page to be 'as if it were placed in the cache', 1924 * except without unmapping it from the process address space. 1925 * 1926 * This routine may not block. 1927 */ 1928 static inline void 1929 _vm_page_deactivate(vm_page_t m, int athead) 1930 { 1931 int queue; 1932 1933 vm_page_lock_assert(m, MA_OWNED); 1934 1935 /* 1936 * Ignore if already inactive. 1937 */ 1938 if ((queue = m->queue) == PQ_INACTIVE) 1939 return; 1940 if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) { 1941 vm_page_lock_queues(); 1942 m->flags &= ~PG_WINATCFLS; 1943 if (queue != PQ_NONE) 1944 vm_page_queue_remove(queue, m); 1945 if (athead) 1946 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, 1947 pageq); 1948 else 1949 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, 1950 pageq); 1951 m->queue = PQ_INACTIVE; 1952 cnt.v_inactive_count++; 1953 vm_page_unlock_queues(); 1954 } 1955 } 1956 1957 /* 1958 * Move the specified page to the inactive queue. 1959 * 1960 * The page must be locked. 1961 */ 1962 void 1963 vm_page_deactivate(vm_page_t m) 1964 { 1965 1966 _vm_page_deactivate(m, 0); 1967 } 1968 1969 /* 1970 * vm_page_try_to_cache: 1971 * 1972 * Returns 0 on failure, 1 on success 1973 */ 1974 int 1975 vm_page_try_to_cache(vm_page_t m) 1976 { 1977 1978 vm_page_lock_assert(m, MA_OWNED); 1979 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1980 if (m->dirty || m->hold_count || m->busy || m->wire_count || 1981 (m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0) 1982 return (0); 1983 pmap_remove_all(m); 1984 if (m->dirty) 1985 return (0); 1986 vm_page_cache(m); 1987 return (1); 1988 } 1989 1990 /* 1991 * vm_page_try_to_free() 1992 * 1993 * Attempt to free the page. If we cannot free it, we do nothing. 1994 * 1 is returned on success, 0 on failure. 1995 */ 1996 int 1997 vm_page_try_to_free(vm_page_t m) 1998 { 1999 2000 vm_page_lock_assert(m, MA_OWNED); 2001 if (m->object != NULL) 2002 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2003 if (m->dirty || m->hold_count || m->busy || m->wire_count || 2004 (m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0) 2005 return (0); 2006 pmap_remove_all(m); 2007 if (m->dirty) 2008 return (0); 2009 vm_page_free(m); 2010 return (1); 2011 } 2012 2013 /* 2014 * vm_page_cache 2015 * 2016 * Put the specified page onto the page cache queue (if appropriate). 2017 * 2018 * This routine may not block. 2019 */ 2020 void 2021 vm_page_cache(vm_page_t m) 2022 { 2023 vm_object_t object; 2024 vm_page_t root; 2025 2026 vm_page_lock_assert(m, MA_OWNED); 2027 object = m->object; 2028 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2029 if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) || m->busy || 2030 m->hold_count || m->wire_count) 2031 panic("vm_page_cache: attempting to cache busy page"); 2032 pmap_remove_all(m); 2033 if (m->dirty != 0) 2034 panic("vm_page_cache: page %p is dirty", m); 2035 if (m->valid == 0 || object->type == OBJT_DEFAULT || 2036 (object->type == OBJT_SWAP && 2037 !vm_pager_has_page(object, m->pindex, NULL, NULL))) { 2038 /* 2039 * Hypothesis: A cache-elgible page belonging to a 2040 * default object or swap object but without a backing 2041 * store must be zero filled. 2042 */ 2043 vm_page_free(m); 2044 return; 2045 } 2046 KASSERT((m->flags & PG_CACHED) == 0, 2047 ("vm_page_cache: page %p is already cached", m)); 2048 PCPU_INC(cnt.v_tcached); 2049 2050 /* 2051 * Remove the page from the paging queues. 2052 */ 2053 vm_pageq_remove(m); 2054 2055 /* 2056 * Remove the page from the object's collection of resident 2057 * pages. 2058 */ 2059 if (m != object->root) 2060 vm_page_splay(m->pindex, object->root); 2061 if (m->left == NULL) 2062 root = m->right; 2063 else { 2064 root = vm_page_splay(m->pindex, m->left); 2065 root->right = m->right; 2066 } 2067 object->root = root; 2068 TAILQ_REMOVE(&object->memq, m, listq); 2069 object->resident_page_count--; 2070 2071 /* 2072 * Restore the default memory attribute to the page. 2073 */ 2074 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) 2075 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); 2076 2077 /* 2078 * Insert the page into the object's collection of cached pages 2079 * and the physical memory allocator's cache/free page queues. 2080 */ 2081 m->flags &= ~PG_ZERO; 2082 mtx_lock(&vm_page_queue_free_mtx); 2083 m->flags |= PG_CACHED; 2084 cnt.v_cache_count++; 2085 root = object->cache; 2086 if (root == NULL) { 2087 m->left = NULL; 2088 m->right = NULL; 2089 } else { 2090 root = vm_page_splay(m->pindex, root); 2091 if (m->pindex < root->pindex) { 2092 m->left = root->left; 2093 m->right = root; 2094 root->left = NULL; 2095 } else if (__predict_false(m->pindex == root->pindex)) 2096 panic("vm_page_cache: offset already cached"); 2097 else { 2098 m->right = root->right; 2099 m->left = root; 2100 root->right = NULL; 2101 } 2102 } 2103 object->cache = m; 2104 #if VM_NRESERVLEVEL > 0 2105 if (!vm_reserv_free_page(m)) { 2106 #else 2107 if (TRUE) { 2108 #endif 2109 vm_phys_set_pool(VM_FREEPOOL_CACHE, m, 0); 2110 vm_phys_free_pages(m, 0); 2111 } 2112 vm_page_free_wakeup(); 2113 mtx_unlock(&vm_page_queue_free_mtx); 2114 2115 /* 2116 * Increment the vnode's hold count if this is the object's only 2117 * cached page. Decrement the vnode's hold count if this was 2118 * the object's only resident page. 2119 */ 2120 if (object->type == OBJT_VNODE) { 2121 if (root == NULL && object->resident_page_count != 0) 2122 vhold(object->handle); 2123 else if (root != NULL && object->resident_page_count == 0) 2124 vdrop(object->handle); 2125 } 2126 } 2127 2128 /* 2129 * vm_page_dontneed 2130 * 2131 * Cache, deactivate, or do nothing as appropriate. This routine 2132 * is typically used by madvise() MADV_DONTNEED. 2133 * 2134 * Generally speaking we want to move the page into the cache so 2135 * it gets reused quickly. However, this can result in a silly syndrome 2136 * due to the page recycling too quickly. Small objects will not be 2137 * fully cached. On the otherhand, if we move the page to the inactive 2138 * queue we wind up with a problem whereby very large objects 2139 * unnecessarily blow away our inactive and cache queues. 2140 * 2141 * The solution is to move the pages based on a fixed weighting. We 2142 * either leave them alone, deactivate them, or move them to the cache, 2143 * where moving them to the cache has the highest weighting. 2144 * By forcing some pages into other queues we eventually force the 2145 * system to balance the queues, potentially recovering other unrelated 2146 * space from active. The idea is to not force this to happen too 2147 * often. 2148 */ 2149 void 2150 vm_page_dontneed(vm_page_t m) 2151 { 2152 int dnw; 2153 int head; 2154 2155 vm_page_lock_assert(m, MA_OWNED); 2156 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2157 dnw = PCPU_GET(dnweight); 2158 PCPU_INC(dnweight); 2159 2160 /* 2161 * Occasionally leave the page alone. 2162 */ 2163 if ((dnw & 0x01F0) == 0 || m->queue == PQ_INACTIVE) { 2164 if (m->act_count >= ACT_INIT) 2165 --m->act_count; 2166 return; 2167 } 2168 2169 /* 2170 * Clear any references to the page. Otherwise, the page daemon will 2171 * immediately reactivate the page. 2172 * 2173 * Perform the pmap_clear_reference() first. Otherwise, a concurrent 2174 * pmap operation, such as pmap_remove(), could clear a reference in 2175 * the pmap and set PGA_REFERENCED on the page before the 2176 * pmap_clear_reference() had completed. Consequently, the page would 2177 * appear referenced based upon an old reference that occurred before 2178 * this function ran. 2179 */ 2180 pmap_clear_reference(m); 2181 vm_page_aflag_clear(m, PGA_REFERENCED); 2182 2183 if (m->dirty == 0 && pmap_is_modified(m)) 2184 vm_page_dirty(m); 2185 2186 if (m->dirty || (dnw & 0x0070) == 0) { 2187 /* 2188 * Deactivate the page 3 times out of 32. 2189 */ 2190 head = 0; 2191 } else { 2192 /* 2193 * Cache the page 28 times out of every 32. Note that 2194 * the page is deactivated instead of cached, but placed 2195 * at the head of the queue instead of the tail. 2196 */ 2197 head = 1; 2198 } 2199 _vm_page_deactivate(m, head); 2200 } 2201 2202 /* 2203 * Grab a page, waiting until we are waken up due to the page 2204 * changing state. We keep on waiting, if the page continues 2205 * to be in the object. If the page doesn't exist, first allocate it 2206 * and then conditionally zero it. 2207 * 2208 * The caller must always specify the VM_ALLOC_RETRY flag. This is intended 2209 * to facilitate its eventual removal. 2210 * 2211 * This routine may block. 2212 */ 2213 vm_page_t 2214 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) 2215 { 2216 vm_page_t m; 2217 2218 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2219 KASSERT((allocflags & VM_ALLOC_RETRY) != 0, 2220 ("vm_page_grab: VM_ALLOC_RETRY is required")); 2221 retrylookup: 2222 if ((m = vm_page_lookup(object, pindex)) != NULL) { 2223 if ((m->oflags & VPO_BUSY) != 0 || 2224 ((allocflags & VM_ALLOC_IGN_SBUSY) == 0 && m->busy != 0)) { 2225 /* 2226 * Reference the page before unlocking and 2227 * sleeping so that the page daemon is less 2228 * likely to reclaim it. 2229 */ 2230 vm_page_aflag_set(m, PGA_REFERENCED); 2231 vm_page_sleep(m, "pgrbwt"); 2232 goto retrylookup; 2233 } else { 2234 if ((allocflags & VM_ALLOC_WIRED) != 0) { 2235 vm_page_lock(m); 2236 vm_page_wire(m); 2237 vm_page_unlock(m); 2238 } 2239 if ((allocflags & VM_ALLOC_NOBUSY) == 0) 2240 vm_page_busy(m); 2241 return (m); 2242 } 2243 } 2244 m = vm_page_alloc(object, pindex, allocflags & ~(VM_ALLOC_RETRY | 2245 VM_ALLOC_IGN_SBUSY)); 2246 if (m == NULL) { 2247 VM_OBJECT_UNLOCK(object); 2248 VM_WAIT; 2249 VM_OBJECT_LOCK(object); 2250 goto retrylookup; 2251 } else if (m->valid != 0) 2252 return (m); 2253 if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0) 2254 pmap_zero_page(m); 2255 return (m); 2256 } 2257 2258 /* 2259 * Mapping function for valid bits or for dirty bits in 2260 * a page. May not block. 2261 * 2262 * Inputs are required to range within a page. 2263 */ 2264 int 2265 vm_page_bits(int base, int size) 2266 { 2267 int first_bit; 2268 int last_bit; 2269 2270 KASSERT( 2271 base + size <= PAGE_SIZE, 2272 ("vm_page_bits: illegal base/size %d/%d", base, size) 2273 ); 2274 2275 if (size == 0) /* handle degenerate case */ 2276 return (0); 2277 2278 first_bit = base >> DEV_BSHIFT; 2279 last_bit = (base + size - 1) >> DEV_BSHIFT; 2280 2281 return ((2 << last_bit) - (1 << first_bit)); 2282 } 2283 2284 /* 2285 * vm_page_set_valid: 2286 * 2287 * Sets portions of a page valid. The arguments are expected 2288 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 2289 * of any partial chunks touched by the range. The invalid portion of 2290 * such chunks will be zeroed. 2291 * 2292 * (base + size) must be less then or equal to PAGE_SIZE. 2293 */ 2294 void 2295 vm_page_set_valid(vm_page_t m, int base, int size) 2296 { 2297 int endoff, frag; 2298 2299 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2300 if (size == 0) /* handle degenerate case */ 2301 return; 2302 2303 /* 2304 * If the base is not DEV_BSIZE aligned and the valid 2305 * bit is clear, we have to zero out a portion of the 2306 * first block. 2307 */ 2308 if ((frag = base & ~(DEV_BSIZE - 1)) != base && 2309 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0) 2310 pmap_zero_page_area(m, frag, base - frag); 2311 2312 /* 2313 * If the ending offset is not DEV_BSIZE aligned and the 2314 * valid bit is clear, we have to zero out a portion of 2315 * the last block. 2316 */ 2317 endoff = base + size; 2318 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff && 2319 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0) 2320 pmap_zero_page_area(m, endoff, 2321 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 2322 2323 /* 2324 * Assert that no previously invalid block that is now being validated 2325 * is already dirty. 2326 */ 2327 KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0, 2328 ("vm_page_set_valid: page %p is dirty", m)); 2329 2330 /* 2331 * Set valid bits inclusive of any overlap. 2332 */ 2333 m->valid |= vm_page_bits(base, size); 2334 } 2335 2336 /* 2337 * Clear the given bits from the specified page's dirty field. 2338 */ 2339 static __inline void 2340 vm_page_clear_dirty_mask(vm_page_t m, int pagebits) 2341 { 2342 uintptr_t addr; 2343 #if PAGE_SIZE < 16384 2344 int shift; 2345 #endif 2346 2347 /* 2348 * If the object is locked and the page is neither VPO_BUSY nor 2349 * PGA_WRITEABLE, then the page's dirty field cannot possibly be 2350 * set by a concurrent pmap operation. 2351 */ 2352 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2353 if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0) 2354 m->dirty &= ~pagebits; 2355 else { 2356 /* 2357 * The pmap layer can call vm_page_dirty() without 2358 * holding a distinguished lock. The combination of 2359 * the object's lock and an atomic operation suffice 2360 * to guarantee consistency of the page dirty field. 2361 * 2362 * For PAGE_SIZE == 32768 case, compiler already 2363 * properly aligns the dirty field, so no forcible 2364 * alignment is needed. Only require existence of 2365 * atomic_clear_64 when page size is 32768. 2366 */ 2367 addr = (uintptr_t)&m->dirty; 2368 #if PAGE_SIZE == 32768 2369 #error pagebits too short 2370 atomic_clear_64((uint64_t *)addr, pagebits); 2371 #elif PAGE_SIZE == 16384 2372 atomic_clear_32((uint32_t *)addr, pagebits); 2373 #else /* PAGE_SIZE <= 8192 */ 2374 /* 2375 * Use a trick to perform a 32-bit atomic on the 2376 * containing aligned word, to not depend on the existence 2377 * of atomic_clear_{8, 16}. 2378 */ 2379 shift = addr & (sizeof(uint32_t) - 1); 2380 #if BYTE_ORDER == BIG_ENDIAN 2381 shift = (sizeof(uint32_t) - sizeof(m->dirty) - shift) * NBBY; 2382 #else 2383 shift *= NBBY; 2384 #endif 2385 addr &= ~(sizeof(uint32_t) - 1); 2386 atomic_clear_32((uint32_t *)addr, pagebits << shift); 2387 #endif /* PAGE_SIZE */ 2388 } 2389 } 2390 2391 /* 2392 * vm_page_set_validclean: 2393 * 2394 * Sets portions of a page valid and clean. The arguments are expected 2395 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 2396 * of any partial chunks touched by the range. The invalid portion of 2397 * such chunks will be zero'd. 2398 * 2399 * This routine may not block. 2400 * 2401 * (base + size) must be less then or equal to PAGE_SIZE. 2402 */ 2403 void 2404 vm_page_set_validclean(vm_page_t m, int base, int size) 2405 { 2406 u_long oldvalid; 2407 int endoff, frag, pagebits; 2408 2409 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2410 if (size == 0) /* handle degenerate case */ 2411 return; 2412 2413 /* 2414 * If the base is not DEV_BSIZE aligned and the valid 2415 * bit is clear, we have to zero out a portion of the 2416 * first block. 2417 */ 2418 if ((frag = base & ~(DEV_BSIZE - 1)) != base && 2419 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0) 2420 pmap_zero_page_area(m, frag, base - frag); 2421 2422 /* 2423 * If the ending offset is not DEV_BSIZE aligned and the 2424 * valid bit is clear, we have to zero out a portion of 2425 * the last block. 2426 */ 2427 endoff = base + size; 2428 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff && 2429 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0) 2430 pmap_zero_page_area(m, endoff, 2431 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 2432 2433 /* 2434 * Set valid, clear dirty bits. If validating the entire 2435 * page we can safely clear the pmap modify bit. We also 2436 * use this opportunity to clear the VPO_NOSYNC flag. If a process 2437 * takes a write fault on a MAP_NOSYNC memory area the flag will 2438 * be set again. 2439 * 2440 * We set valid bits inclusive of any overlap, but we can only 2441 * clear dirty bits for DEV_BSIZE chunks that are fully within 2442 * the range. 2443 */ 2444 oldvalid = m->valid; 2445 pagebits = vm_page_bits(base, size); 2446 m->valid |= pagebits; 2447 #if 0 /* NOT YET */ 2448 if ((frag = base & (DEV_BSIZE - 1)) != 0) { 2449 frag = DEV_BSIZE - frag; 2450 base += frag; 2451 size -= frag; 2452 if (size < 0) 2453 size = 0; 2454 } 2455 pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1)); 2456 #endif 2457 if (base == 0 && size == PAGE_SIZE) { 2458 /* 2459 * The page can only be modified within the pmap if it is 2460 * mapped, and it can only be mapped if it was previously 2461 * fully valid. 2462 */ 2463 if (oldvalid == VM_PAGE_BITS_ALL) 2464 /* 2465 * Perform the pmap_clear_modify() first. Otherwise, 2466 * a concurrent pmap operation, such as 2467 * pmap_protect(), could clear a modification in the 2468 * pmap and set the dirty field on the page before 2469 * pmap_clear_modify() had begun and after the dirty 2470 * field was cleared here. 2471 */ 2472 pmap_clear_modify(m); 2473 m->dirty = 0; 2474 m->oflags &= ~VPO_NOSYNC; 2475 } else if (oldvalid != VM_PAGE_BITS_ALL) 2476 m->dirty &= ~pagebits; 2477 else 2478 vm_page_clear_dirty_mask(m, pagebits); 2479 } 2480 2481 void 2482 vm_page_clear_dirty(vm_page_t m, int base, int size) 2483 { 2484 2485 vm_page_clear_dirty_mask(m, vm_page_bits(base, size)); 2486 } 2487 2488 /* 2489 * vm_page_set_invalid: 2490 * 2491 * Invalidates DEV_BSIZE'd chunks within a page. Both the 2492 * valid and dirty bits for the effected areas are cleared. 2493 * 2494 * May not block. 2495 */ 2496 void 2497 vm_page_set_invalid(vm_page_t m, int base, int size) 2498 { 2499 int bits; 2500 2501 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2502 KASSERT((m->oflags & VPO_BUSY) == 0, 2503 ("vm_page_set_invalid: page %p is busy", m)); 2504 bits = vm_page_bits(base, size); 2505 if (m->valid == VM_PAGE_BITS_ALL && bits != 0) 2506 pmap_remove_all(m); 2507 KASSERT(!pmap_page_is_mapped(m), 2508 ("vm_page_set_invalid: page %p is mapped", m)); 2509 m->valid &= ~bits; 2510 m->dirty &= ~bits; 2511 } 2512 2513 /* 2514 * vm_page_zero_invalid() 2515 * 2516 * The kernel assumes that the invalid portions of a page contain 2517 * garbage, but such pages can be mapped into memory by user code. 2518 * When this occurs, we must zero out the non-valid portions of the 2519 * page so user code sees what it expects. 2520 * 2521 * Pages are most often semi-valid when the end of a file is mapped 2522 * into memory and the file's size is not page aligned. 2523 */ 2524 void 2525 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) 2526 { 2527 int b; 2528 int i; 2529 2530 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2531 /* 2532 * Scan the valid bits looking for invalid sections that 2533 * must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the 2534 * valid bit may be set ) have already been zerod by 2535 * vm_page_set_validclean(). 2536 */ 2537 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { 2538 if (i == (PAGE_SIZE / DEV_BSIZE) || 2539 (m->valid & (1 << i)) 2540 ) { 2541 if (i > b) { 2542 pmap_zero_page_area(m, 2543 b << DEV_BSHIFT, (i - b) << DEV_BSHIFT); 2544 } 2545 b = i + 1; 2546 } 2547 } 2548 2549 /* 2550 * setvalid is TRUE when we can safely set the zero'd areas 2551 * as being valid. We can do this if there are no cache consistancy 2552 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. 2553 */ 2554 if (setvalid) 2555 m->valid = VM_PAGE_BITS_ALL; 2556 } 2557 2558 /* 2559 * vm_page_is_valid: 2560 * 2561 * Is (partial) page valid? Note that the case where size == 0 2562 * will return FALSE in the degenerate case where the page is 2563 * entirely invalid, and TRUE otherwise. 2564 * 2565 * May not block. 2566 */ 2567 int 2568 vm_page_is_valid(vm_page_t m, int base, int size) 2569 { 2570 int bits = vm_page_bits(base, size); 2571 2572 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2573 if (m->valid && ((m->valid & bits) == bits)) 2574 return 1; 2575 else 2576 return 0; 2577 } 2578 2579 /* 2580 * update dirty bits from pmap/mmu. May not block. 2581 */ 2582 void 2583 vm_page_test_dirty(vm_page_t m) 2584 { 2585 2586 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2587 if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m)) 2588 vm_page_dirty(m); 2589 } 2590 2591 int so_zerocp_fullpage = 0; 2592 2593 /* 2594 * Replace the given page with a copy. The copied page assumes 2595 * the portion of the given page's "wire_count" that is not the 2596 * responsibility of this copy-on-write mechanism. 2597 * 2598 * The object containing the given page must have a non-zero 2599 * paging-in-progress count and be locked. 2600 */ 2601 void 2602 vm_page_cowfault(vm_page_t m) 2603 { 2604 vm_page_t mnew; 2605 vm_object_t object; 2606 vm_pindex_t pindex; 2607 2608 mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED); 2609 vm_page_lock_assert(m, MA_OWNED); 2610 object = m->object; 2611 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2612 KASSERT(object->paging_in_progress != 0, 2613 ("vm_page_cowfault: object %p's paging-in-progress count is zero.", 2614 object)); 2615 pindex = m->pindex; 2616 2617 retry_alloc: 2618 pmap_remove_all(m); 2619 vm_page_remove(m); 2620 mnew = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY); 2621 if (mnew == NULL) { 2622 vm_page_insert(m, object, pindex); 2623 vm_page_unlock(m); 2624 VM_OBJECT_UNLOCK(object); 2625 VM_WAIT; 2626 VM_OBJECT_LOCK(object); 2627 if (m == vm_page_lookup(object, pindex)) { 2628 vm_page_lock(m); 2629 goto retry_alloc; 2630 } else { 2631 /* 2632 * Page disappeared during the wait. 2633 */ 2634 return; 2635 } 2636 } 2637 2638 if (m->cow == 0) { 2639 /* 2640 * check to see if we raced with an xmit complete when 2641 * waiting to allocate a page. If so, put things back 2642 * the way they were 2643 */ 2644 vm_page_unlock(m); 2645 vm_page_lock(mnew); 2646 vm_page_free(mnew); 2647 vm_page_unlock(mnew); 2648 vm_page_insert(m, object, pindex); 2649 } else { /* clear COW & copy page */ 2650 if (!so_zerocp_fullpage) 2651 pmap_copy_page(m, mnew); 2652 mnew->valid = VM_PAGE_BITS_ALL; 2653 vm_page_dirty(mnew); 2654 mnew->wire_count = m->wire_count - m->cow; 2655 m->wire_count = m->cow; 2656 vm_page_unlock(m); 2657 } 2658 } 2659 2660 void 2661 vm_page_cowclear(vm_page_t m) 2662 { 2663 2664 vm_page_lock_assert(m, MA_OWNED); 2665 if (m->cow) { 2666 m->cow--; 2667 /* 2668 * let vm_fault add back write permission lazily 2669 */ 2670 } 2671 /* 2672 * sf_buf_free() will free the page, so we needn't do it here 2673 */ 2674 } 2675 2676 int 2677 vm_page_cowsetup(vm_page_t m) 2678 { 2679 2680 vm_page_lock_assert(m, MA_OWNED); 2681 if ((m->flags & PG_FICTITIOUS) != 0 || 2682 (m->oflags & VPO_UNMANAGED) != 0 || 2683 m->cow == USHRT_MAX - 1 || !VM_OBJECT_TRYLOCK(m->object)) 2684 return (EBUSY); 2685 m->cow++; 2686 pmap_remove_write(m); 2687 VM_OBJECT_UNLOCK(m->object); 2688 return (0); 2689 } 2690 2691 #ifdef INVARIANTS 2692 void 2693 vm_page_object_lock_assert(vm_page_t m) 2694 { 2695 2696 /* 2697 * Certain of the page's fields may only be modified by the 2698 * holder of the containing object's lock or the setter of the 2699 * page's VPO_BUSY flag. Unfortunately, the setter of the 2700 * VPO_BUSY flag is not recorded, and thus cannot be checked 2701 * here. 2702 */ 2703 if (m->object != NULL && (m->oflags & VPO_BUSY) == 0) 2704 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2705 } 2706 #endif 2707 2708 #include "opt_ddb.h" 2709 #ifdef DDB 2710 #include <sys/kernel.h> 2711 2712 #include <ddb/ddb.h> 2713 2714 DB_SHOW_COMMAND(page, vm_page_print_page_info) 2715 { 2716 db_printf("cnt.v_free_count: %d\n", cnt.v_free_count); 2717 db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count); 2718 db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count); 2719 db_printf("cnt.v_active_count: %d\n", cnt.v_active_count); 2720 db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count); 2721 db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved); 2722 db_printf("cnt.v_free_min: %d\n", cnt.v_free_min); 2723 db_printf("cnt.v_free_target: %d\n", cnt.v_free_target); 2724 db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min); 2725 db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target); 2726 } 2727 2728 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) 2729 { 2730 2731 db_printf("PQ_FREE:"); 2732 db_printf(" %d", cnt.v_free_count); 2733 db_printf("\n"); 2734 2735 db_printf("PQ_CACHE:"); 2736 db_printf(" %d", cnt.v_cache_count); 2737 db_printf("\n"); 2738 2739 db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n", 2740 *vm_page_queues[PQ_ACTIVE].cnt, 2741 *vm_page_queues[PQ_INACTIVE].cnt); 2742 } 2743 #endif /* DDB */ 2744