1 /*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1998 Matthew Dillon. All Rights Reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * The Mach Operating System project at Carnegie-Mellon University. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 34 */ 35 36 /*- 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 */ 62 63 /* 64 * GENERAL RULES ON VM_PAGE MANIPULATION 65 * 66 * - A page queue lock is required when adding or removing a page from a 67 * page queue regardless of other locks or the busy state of a page. 68 * 69 * * In general, no thread besides the page daemon can acquire or 70 * hold more than one page queue lock at a time. 71 * 72 * * The page daemon can acquire and hold any pair of page queue 73 * locks in any order. 74 * 75 * - The object lock is required when inserting or removing 76 * pages from an object (vm_page_insert() or vm_page_remove()). 77 * 78 */ 79 80 /* 81 * Resident memory management module. 82 */ 83 84 #include <sys/cdefs.h> 85 __FBSDID("$FreeBSD$"); 86 87 #include "opt_vm.h" 88 89 #include <sys/param.h> 90 #include <sys/systm.h> 91 #include <sys/lock.h> 92 #include <sys/kernel.h> 93 #include <sys/limits.h> 94 #include <sys/linker.h> 95 #include <sys/malloc.h> 96 #include <sys/mman.h> 97 #include <sys/msgbuf.h> 98 #include <sys/mutex.h> 99 #include <sys/proc.h> 100 #include <sys/rwlock.h> 101 #include <sys/sbuf.h> 102 #include <sys/sysctl.h> 103 #include <sys/vmmeter.h> 104 #include <sys/vnode.h> 105 106 #include <vm/vm.h> 107 #include <vm/pmap.h> 108 #include <vm/vm_param.h> 109 #include <vm/vm_kern.h> 110 #include <vm/vm_object.h> 111 #include <vm/vm_page.h> 112 #include <vm/vm_pageout.h> 113 #include <vm/vm_pager.h> 114 #include <vm/vm_phys.h> 115 #include <vm/vm_radix.h> 116 #include <vm/vm_reserv.h> 117 #include <vm/vm_extern.h> 118 #include <vm/uma.h> 119 #include <vm/uma_int.h> 120 121 #include <machine/md_var.h> 122 123 /* 124 * Associated with page of user-allocatable memory is a 125 * page structure. 126 */ 127 128 struct vm_domain vm_dom[MAXMEMDOM]; 129 struct mtx_padalign vm_page_queue_free_mtx; 130 131 struct mtx_padalign pa_lock[PA_LOCK_COUNT]; 132 133 vm_page_t vm_page_array; 134 long vm_page_array_size; 135 long first_page; 136 int vm_page_zero_count; 137 138 static int boot_pages = UMA_BOOT_PAGES; 139 SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 140 &boot_pages, 0, 141 "number of pages allocated for bootstrapping the VM system"); 142 143 static int pa_tryrelock_restart; 144 SYSCTL_INT(_vm, OID_AUTO, tryrelock_restart, CTLFLAG_RD, 145 &pa_tryrelock_restart, 0, "Number of tryrelock restarts"); 146 147 static TAILQ_HEAD(, vm_page) blacklist_head; 148 static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS); 149 SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD | 150 CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages"); 151 152 153 static uma_zone_t fakepg_zone; 154 155 static struct vnode *vm_page_alloc_init(vm_page_t m); 156 static void vm_page_cache_turn_free(vm_page_t m); 157 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits); 158 static void vm_page_enqueue(uint8_t queue, vm_page_t m); 159 static void vm_page_init_fakepg(void *dummy); 160 static int vm_page_insert_after(vm_page_t m, vm_object_t object, 161 vm_pindex_t pindex, vm_page_t mpred); 162 static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object, 163 vm_page_t mpred); 164 165 SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init_fakepg, NULL); 166 167 static void 168 vm_page_init_fakepg(void *dummy) 169 { 170 171 fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL, 172 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM); 173 } 174 175 /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */ 176 #if PAGE_SIZE == 32768 177 #ifdef CTASSERT 178 CTASSERT(sizeof(u_long) >= 8); 179 #endif 180 #endif 181 182 /* 183 * Try to acquire a physical address lock while a pmap is locked. If we 184 * fail to trylock we unlock and lock the pmap directly and cache the 185 * locked pa in *locked. The caller should then restart their loop in case 186 * the virtual to physical mapping has changed. 187 */ 188 int 189 vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked) 190 { 191 vm_paddr_t lockpa; 192 193 lockpa = *locked; 194 *locked = pa; 195 if (lockpa) { 196 PA_LOCK_ASSERT(lockpa, MA_OWNED); 197 if (PA_LOCKPTR(pa) == PA_LOCKPTR(lockpa)) 198 return (0); 199 PA_UNLOCK(lockpa); 200 } 201 if (PA_TRYLOCK(pa)) 202 return (0); 203 PMAP_UNLOCK(pmap); 204 atomic_add_int(&pa_tryrelock_restart, 1); 205 PA_LOCK(pa); 206 PMAP_LOCK(pmap); 207 return (EAGAIN); 208 } 209 210 /* 211 * vm_set_page_size: 212 * 213 * Sets the page size, perhaps based upon the memory 214 * size. Must be called before any use of page-size 215 * dependent functions. 216 */ 217 void 218 vm_set_page_size(void) 219 { 220 if (vm_cnt.v_page_size == 0) 221 vm_cnt.v_page_size = PAGE_SIZE; 222 if (((vm_cnt.v_page_size - 1) & vm_cnt.v_page_size) != 0) 223 panic("vm_set_page_size: page size not a power of two"); 224 } 225 226 /* 227 * vm_page_blacklist_next: 228 * 229 * Find the next entry in the provided string of blacklist 230 * addresses. Entries are separated by space, comma, or newline. 231 * If an invalid integer is encountered then the rest of the 232 * string is skipped. Updates the list pointer to the next 233 * character, or NULL if the string is exhausted or invalid. 234 */ 235 static vm_paddr_t 236 vm_page_blacklist_next(char **list, char *end) 237 { 238 vm_paddr_t bad; 239 char *cp, *pos; 240 241 if (list == NULL || *list == NULL) 242 return (0); 243 if (**list =='\0') { 244 *list = NULL; 245 return (0); 246 } 247 248 /* 249 * If there's no end pointer then the buffer is coming from 250 * the kenv and we know it's null-terminated. 251 */ 252 if (end == NULL) 253 end = *list + strlen(*list); 254 255 /* Ensure that strtoq() won't walk off the end */ 256 if (*end != '\0') { 257 if (*end == '\n' || *end == ' ' || *end == ',') 258 *end = '\0'; 259 else { 260 printf("Blacklist not terminated, skipping\n"); 261 *list = NULL; 262 return (0); 263 } 264 } 265 266 for (pos = *list; *pos != '\0'; pos = cp) { 267 bad = strtoq(pos, &cp, 0); 268 if (*cp == '\0' || *cp == ' ' || *cp == ',' || *cp == '\n') { 269 if (bad == 0) { 270 if (++cp < end) 271 continue; 272 else 273 break; 274 } 275 } else 276 break; 277 if (*cp == '\0' || ++cp >= end) 278 *list = NULL; 279 else 280 *list = cp; 281 return (trunc_page(bad)); 282 } 283 printf("Garbage in RAM blacklist, skipping\n"); 284 *list = NULL; 285 return (0); 286 } 287 288 /* 289 * vm_page_blacklist_check: 290 * 291 * Iterate through the provided string of blacklist addresses, pulling 292 * each entry out of the physical allocator free list and putting it 293 * onto a list for reporting via the vm.page_blacklist sysctl. 294 */ 295 static void 296 vm_page_blacklist_check(char *list, char *end) 297 { 298 vm_paddr_t pa; 299 vm_page_t m; 300 char *next; 301 int ret; 302 303 next = list; 304 while (next != NULL) { 305 if ((pa = vm_page_blacklist_next(&next, end)) == 0) 306 continue; 307 m = vm_phys_paddr_to_vm_page(pa); 308 if (m == NULL) 309 continue; 310 mtx_lock(&vm_page_queue_free_mtx); 311 ret = vm_phys_unfree_page(m); 312 mtx_unlock(&vm_page_queue_free_mtx); 313 if (ret == TRUE) { 314 TAILQ_INSERT_TAIL(&blacklist_head, m, listq); 315 if (bootverbose) 316 printf("Skipping page with pa 0x%jx\n", 317 (uintmax_t)pa); 318 } 319 } 320 } 321 322 /* 323 * vm_page_blacklist_load: 324 * 325 * Search for a special module named "ram_blacklist". It'll be a 326 * plain text file provided by the user via the loader directive 327 * of the same name. 328 */ 329 static void 330 vm_page_blacklist_load(char **list, char **end) 331 { 332 void *mod; 333 u_char *ptr; 334 u_int len; 335 336 mod = NULL; 337 ptr = NULL; 338 339 mod = preload_search_by_type("ram_blacklist"); 340 if (mod != NULL) { 341 ptr = preload_fetch_addr(mod); 342 len = preload_fetch_size(mod); 343 } 344 *list = ptr; 345 if (ptr != NULL) 346 *end = ptr + len; 347 else 348 *end = NULL; 349 return; 350 } 351 352 static int 353 sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS) 354 { 355 vm_page_t m; 356 struct sbuf sbuf; 357 int error, first; 358 359 first = 1; 360 error = sysctl_wire_old_buffer(req, 0); 361 if (error != 0) 362 return (error); 363 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 364 TAILQ_FOREACH(m, &blacklist_head, listq) { 365 sbuf_printf(&sbuf, "%s%#jx", first ? "" : ",", 366 (uintmax_t)m->phys_addr); 367 first = 0; 368 } 369 error = sbuf_finish(&sbuf); 370 sbuf_delete(&sbuf); 371 return (error); 372 } 373 374 static void 375 vm_page_domain_init(struct vm_domain *vmd) 376 { 377 struct vm_pagequeue *pq; 378 int i; 379 380 *__DECONST(char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) = 381 "vm inactive pagequeue"; 382 *__DECONST(int **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_vcnt) = 383 &vm_cnt.v_inactive_count; 384 *__DECONST(char **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_name) = 385 "vm active pagequeue"; 386 *__DECONST(int **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_vcnt) = 387 &vm_cnt.v_active_count; 388 vmd->vmd_page_count = 0; 389 vmd->vmd_free_count = 0; 390 vmd->vmd_segs = 0; 391 vmd->vmd_oom = FALSE; 392 vmd->vmd_pass = 0; 393 for (i = 0; i < PQ_COUNT; i++) { 394 pq = &vmd->vmd_pagequeues[i]; 395 TAILQ_INIT(&pq->pq_pl); 396 mtx_init(&pq->pq_mutex, pq->pq_name, "vm pagequeue", 397 MTX_DEF | MTX_DUPOK); 398 } 399 } 400 401 /* 402 * vm_page_startup: 403 * 404 * Initializes the resident memory module. 405 * 406 * Allocates memory for the page cells, and 407 * for the object/offset-to-page hash table headers. 408 * Each page cell is initialized and placed on the free list. 409 */ 410 vm_offset_t 411 vm_page_startup(vm_offset_t vaddr) 412 { 413 vm_offset_t mapped; 414 vm_paddr_t page_range; 415 vm_paddr_t new_end; 416 int i; 417 vm_paddr_t pa; 418 vm_paddr_t last_pa; 419 char *list, *listend; 420 vm_paddr_t end; 421 vm_paddr_t biggestsize; 422 vm_paddr_t low_water, high_water; 423 int biggestone; 424 425 biggestsize = 0; 426 biggestone = 0; 427 vaddr = round_page(vaddr); 428 429 for (i = 0; phys_avail[i + 1]; i += 2) { 430 phys_avail[i] = round_page(phys_avail[i]); 431 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 432 } 433 434 #ifdef XEN 435 /* 436 * There is no obvious reason why i386 PV Xen needs vm_page structs 437 * created for these pseudo-physical addresses. XXX 438 */ 439 vm_phys_add_seg(0, phys_avail[0]); 440 #endif 441 442 low_water = phys_avail[0]; 443 high_water = phys_avail[1]; 444 445 for (i = 0; i < vm_phys_nsegs; i++) { 446 if (vm_phys_segs[i].start < low_water) 447 low_water = vm_phys_segs[i].start; 448 if (vm_phys_segs[i].end > high_water) 449 high_water = vm_phys_segs[i].end; 450 } 451 for (i = 0; phys_avail[i + 1]; i += 2) { 452 vm_paddr_t size = phys_avail[i + 1] - phys_avail[i]; 453 454 if (size > biggestsize) { 455 biggestone = i; 456 biggestsize = size; 457 } 458 if (phys_avail[i] < low_water) 459 low_water = phys_avail[i]; 460 if (phys_avail[i + 1] > high_water) 461 high_water = phys_avail[i + 1]; 462 } 463 464 end = phys_avail[biggestone+1]; 465 466 /* 467 * Initialize the page and queue locks. 468 */ 469 mtx_init(&vm_page_queue_free_mtx, "vm page free queue", NULL, MTX_DEF); 470 for (i = 0; i < PA_LOCK_COUNT; i++) 471 mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF); 472 for (i = 0; i < vm_ndomains; i++) 473 vm_page_domain_init(&vm_dom[i]); 474 475 /* 476 * Allocate memory for use when boot strapping the kernel memory 477 * allocator. 478 * 479 * CTFLAG_RDTUN doesn't work during the early boot process, so we must 480 * manually fetch the value. 481 */ 482 TUNABLE_INT_FETCH("vm.boot_pages", &boot_pages); 483 new_end = end - (boot_pages * UMA_SLAB_SIZE); 484 new_end = trunc_page(new_end); 485 mapped = pmap_map(&vaddr, new_end, end, 486 VM_PROT_READ | VM_PROT_WRITE); 487 bzero((void *)mapped, end - new_end); 488 uma_startup((void *)mapped, boot_pages); 489 490 #if defined(__amd64__) || defined(__i386__) || defined(__arm__) || \ 491 defined(__mips__) 492 /* 493 * Allocate a bitmap to indicate that a random physical page 494 * needs to be included in a minidump. 495 * 496 * The amd64 port needs this to indicate which direct map pages 497 * need to be dumped, via calls to dump_add_page()/dump_drop_page(). 498 * 499 * However, i386 still needs this workspace internally within the 500 * minidump code. In theory, they are not needed on i386, but are 501 * included should the sf_buf code decide to use them. 502 */ 503 last_pa = 0; 504 for (i = 0; dump_avail[i + 1] != 0; i += 2) 505 if (dump_avail[i + 1] > last_pa) 506 last_pa = dump_avail[i + 1]; 507 page_range = last_pa / PAGE_SIZE; 508 vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY); 509 new_end -= vm_page_dump_size; 510 vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end, 511 new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE); 512 bzero((void *)vm_page_dump, vm_page_dump_size); 513 #endif 514 #ifdef __amd64__ 515 /* 516 * Request that the physical pages underlying the message buffer be 517 * included in a crash dump. Since the message buffer is accessed 518 * through the direct map, they are not automatically included. 519 */ 520 pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr); 521 last_pa = pa + round_page(msgbufsize); 522 while (pa < last_pa) { 523 dump_add_page(pa); 524 pa += PAGE_SIZE; 525 } 526 #endif 527 /* 528 * Compute the number of pages of memory that will be available for 529 * use (taking into account the overhead of a page structure per 530 * page). 531 */ 532 first_page = low_water / PAGE_SIZE; 533 #ifdef VM_PHYSSEG_SPARSE 534 page_range = 0; 535 for (i = 0; i < vm_phys_nsegs; i++) { 536 page_range += atop(vm_phys_segs[i].end - 537 vm_phys_segs[i].start); 538 } 539 for (i = 0; phys_avail[i + 1] != 0; i += 2) 540 page_range += atop(phys_avail[i + 1] - phys_avail[i]); 541 #elif defined(VM_PHYSSEG_DENSE) 542 page_range = high_water / PAGE_SIZE - first_page; 543 #else 544 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." 545 #endif 546 end = new_end; 547 548 /* 549 * Reserve an unmapped guard page to trap access to vm_page_array[-1]. 550 */ 551 vaddr += PAGE_SIZE; 552 553 /* 554 * Initialize the mem entry structures now, and put them in the free 555 * queue. 556 */ 557 new_end = trunc_page(end - page_range * sizeof(struct vm_page)); 558 mapped = pmap_map(&vaddr, new_end, end, 559 VM_PROT_READ | VM_PROT_WRITE); 560 vm_page_array = (vm_page_t) mapped; 561 #if VM_NRESERVLEVEL > 0 562 /* 563 * Allocate memory for the reservation management system's data 564 * structures. 565 */ 566 new_end = vm_reserv_startup(&vaddr, new_end, high_water); 567 #endif 568 #if defined(__amd64__) || defined(__mips__) 569 /* 570 * pmap_map on amd64 and mips can come out of the direct-map, not kvm 571 * like i386, so the pages must be tracked for a crashdump to include 572 * this data. This includes the vm_page_array and the early UMA 573 * bootstrap pages. 574 */ 575 for (pa = new_end; pa < phys_avail[biggestone + 1]; pa += PAGE_SIZE) 576 dump_add_page(pa); 577 #endif 578 phys_avail[biggestone + 1] = new_end; 579 580 /* 581 * Add physical memory segments corresponding to the available 582 * physical pages. 583 */ 584 for (i = 0; phys_avail[i + 1] != 0; i += 2) 585 vm_phys_add_seg(phys_avail[i], phys_avail[i + 1]); 586 587 /* 588 * Clear all of the page structures 589 */ 590 bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page)); 591 for (i = 0; i < page_range; i++) 592 vm_page_array[i].order = VM_NFREEORDER; 593 vm_page_array_size = page_range; 594 595 /* 596 * Initialize the physical memory allocator. 597 */ 598 vm_phys_init(); 599 600 /* 601 * Add every available physical page that is not blacklisted to 602 * the free lists. 603 */ 604 vm_cnt.v_page_count = 0; 605 vm_cnt.v_free_count = 0; 606 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 607 pa = phys_avail[i]; 608 last_pa = phys_avail[i + 1]; 609 while (pa < last_pa) { 610 vm_phys_add_page(pa); 611 pa += PAGE_SIZE; 612 } 613 } 614 615 TAILQ_INIT(&blacklist_head); 616 vm_page_blacklist_load(&list, &listend); 617 vm_page_blacklist_check(list, listend); 618 619 list = kern_getenv("vm.blacklist"); 620 vm_page_blacklist_check(list, NULL); 621 622 freeenv(list); 623 #if VM_NRESERVLEVEL > 0 624 /* 625 * Initialize the reservation management system. 626 */ 627 vm_reserv_init(); 628 #endif 629 return (vaddr); 630 } 631 632 void 633 vm_page_reference(vm_page_t m) 634 { 635 636 vm_page_aflag_set(m, PGA_REFERENCED); 637 } 638 639 /* 640 * vm_page_busy_downgrade: 641 * 642 * Downgrade an exclusive busy page into a single shared busy page. 643 */ 644 void 645 vm_page_busy_downgrade(vm_page_t m) 646 { 647 u_int x; 648 649 vm_page_assert_xbusied(m); 650 651 for (;;) { 652 x = m->busy_lock; 653 x &= VPB_BIT_WAITERS; 654 if (atomic_cmpset_rel_int(&m->busy_lock, 655 VPB_SINGLE_EXCLUSIVER | x, VPB_SHARERS_WORD(1) | x)) 656 break; 657 } 658 } 659 660 /* 661 * vm_page_sbusied: 662 * 663 * Return a positive value if the page is shared busied, 0 otherwise. 664 */ 665 int 666 vm_page_sbusied(vm_page_t m) 667 { 668 u_int x; 669 670 x = m->busy_lock; 671 return ((x & VPB_BIT_SHARED) != 0 && x != VPB_UNBUSIED); 672 } 673 674 /* 675 * vm_page_sunbusy: 676 * 677 * Shared unbusy a page. 678 */ 679 void 680 vm_page_sunbusy(vm_page_t m) 681 { 682 u_int x; 683 684 vm_page_assert_sbusied(m); 685 686 for (;;) { 687 x = m->busy_lock; 688 if (VPB_SHARERS(x) > 1) { 689 if (atomic_cmpset_int(&m->busy_lock, x, 690 x - VPB_ONE_SHARER)) 691 break; 692 continue; 693 } 694 if ((x & VPB_BIT_WAITERS) == 0) { 695 KASSERT(x == VPB_SHARERS_WORD(1), 696 ("vm_page_sunbusy: invalid lock state")); 697 if (atomic_cmpset_int(&m->busy_lock, 698 VPB_SHARERS_WORD(1), VPB_UNBUSIED)) 699 break; 700 continue; 701 } 702 KASSERT(x == (VPB_SHARERS_WORD(1) | VPB_BIT_WAITERS), 703 ("vm_page_sunbusy: invalid lock state for waiters")); 704 705 vm_page_lock(m); 706 if (!atomic_cmpset_int(&m->busy_lock, x, VPB_UNBUSIED)) { 707 vm_page_unlock(m); 708 continue; 709 } 710 wakeup(m); 711 vm_page_unlock(m); 712 break; 713 } 714 } 715 716 /* 717 * vm_page_busy_sleep: 718 * 719 * Sleep and release the page lock, using the page pointer as wchan. 720 * This is used to implement the hard-path of busying mechanism. 721 * 722 * The given page must be locked. 723 */ 724 void 725 vm_page_busy_sleep(vm_page_t m, const char *wmesg) 726 { 727 u_int x; 728 729 vm_page_lock_assert(m, MA_OWNED); 730 731 x = m->busy_lock; 732 if (x == VPB_UNBUSIED) { 733 vm_page_unlock(m); 734 return; 735 } 736 if ((x & VPB_BIT_WAITERS) == 0 && 737 !atomic_cmpset_int(&m->busy_lock, x, x | VPB_BIT_WAITERS)) { 738 vm_page_unlock(m); 739 return; 740 } 741 msleep(m, vm_page_lockptr(m), PVM | PDROP, wmesg, 0); 742 } 743 744 /* 745 * vm_page_trysbusy: 746 * 747 * Try to shared busy a page. 748 * If the operation succeeds 1 is returned otherwise 0. 749 * The operation never sleeps. 750 */ 751 int 752 vm_page_trysbusy(vm_page_t m) 753 { 754 u_int x; 755 756 for (;;) { 757 x = m->busy_lock; 758 if ((x & VPB_BIT_SHARED) == 0) 759 return (0); 760 if (atomic_cmpset_acq_int(&m->busy_lock, x, x + VPB_ONE_SHARER)) 761 return (1); 762 } 763 } 764 765 /* 766 * vm_page_xunbusy_hard: 767 * 768 * Called after the first try the exclusive unbusy of a page failed. 769 * It is assumed that the waiters bit is on. 770 */ 771 void 772 vm_page_xunbusy_hard(vm_page_t m) 773 { 774 775 vm_page_assert_xbusied(m); 776 777 vm_page_lock(m); 778 atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED); 779 wakeup(m); 780 vm_page_unlock(m); 781 } 782 783 /* 784 * vm_page_flash: 785 * 786 * Wakeup anyone waiting for the page. 787 * The ownership bits do not change. 788 * 789 * The given page must be locked. 790 */ 791 void 792 vm_page_flash(vm_page_t m) 793 { 794 u_int x; 795 796 vm_page_lock_assert(m, MA_OWNED); 797 798 for (;;) { 799 x = m->busy_lock; 800 if ((x & VPB_BIT_WAITERS) == 0) 801 return; 802 if (atomic_cmpset_int(&m->busy_lock, x, 803 x & (~VPB_BIT_WAITERS))) 804 break; 805 } 806 wakeup(m); 807 } 808 809 /* 810 * Keep page from being freed by the page daemon 811 * much of the same effect as wiring, except much lower 812 * overhead and should be used only for *very* temporary 813 * holding ("wiring"). 814 */ 815 void 816 vm_page_hold(vm_page_t mem) 817 { 818 819 vm_page_lock_assert(mem, MA_OWNED); 820 mem->hold_count++; 821 } 822 823 void 824 vm_page_unhold(vm_page_t mem) 825 { 826 827 vm_page_lock_assert(mem, MA_OWNED); 828 KASSERT(mem->hold_count >= 1, ("vm_page_unhold: hold count < 0!!!")); 829 --mem->hold_count; 830 if (mem->hold_count == 0 && (mem->flags & PG_UNHOLDFREE) != 0) 831 vm_page_free_toq(mem); 832 } 833 834 /* 835 * vm_page_unhold_pages: 836 * 837 * Unhold each of the pages that is referenced by the given array. 838 */ 839 void 840 vm_page_unhold_pages(vm_page_t *ma, int count) 841 { 842 struct mtx *mtx, *new_mtx; 843 844 mtx = NULL; 845 for (; count != 0; count--) { 846 /* 847 * Avoid releasing and reacquiring the same page lock. 848 */ 849 new_mtx = vm_page_lockptr(*ma); 850 if (mtx != new_mtx) { 851 if (mtx != NULL) 852 mtx_unlock(mtx); 853 mtx = new_mtx; 854 mtx_lock(mtx); 855 } 856 vm_page_unhold(*ma); 857 ma++; 858 } 859 if (mtx != NULL) 860 mtx_unlock(mtx); 861 } 862 863 vm_page_t 864 PHYS_TO_VM_PAGE(vm_paddr_t pa) 865 { 866 vm_page_t m; 867 868 #ifdef VM_PHYSSEG_SPARSE 869 m = vm_phys_paddr_to_vm_page(pa); 870 if (m == NULL) 871 m = vm_phys_fictitious_to_vm_page(pa); 872 return (m); 873 #elif defined(VM_PHYSSEG_DENSE) 874 long pi; 875 876 pi = atop(pa); 877 if (pi >= first_page && (pi - first_page) < vm_page_array_size) { 878 m = &vm_page_array[pi - first_page]; 879 return (m); 880 } 881 return (vm_phys_fictitious_to_vm_page(pa)); 882 #else 883 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." 884 #endif 885 } 886 887 /* 888 * vm_page_getfake: 889 * 890 * Create a fictitious page with the specified physical address and 891 * memory attribute. The memory attribute is the only the machine- 892 * dependent aspect of a fictitious page that must be initialized. 893 */ 894 vm_page_t 895 vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr) 896 { 897 vm_page_t m; 898 899 m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO); 900 vm_page_initfake(m, paddr, memattr); 901 return (m); 902 } 903 904 void 905 vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) 906 { 907 908 if ((m->flags & PG_FICTITIOUS) != 0) { 909 /* 910 * The page's memattr might have changed since the 911 * previous initialization. Update the pmap to the 912 * new memattr. 913 */ 914 goto memattr; 915 } 916 m->phys_addr = paddr; 917 m->queue = PQ_NONE; 918 /* Fictitious pages don't use "segind". */ 919 m->flags = PG_FICTITIOUS; 920 /* Fictitious pages don't use "order" or "pool". */ 921 m->oflags = VPO_UNMANAGED; 922 m->busy_lock = VPB_SINGLE_EXCLUSIVER; 923 m->wire_count = 1; 924 pmap_page_init(m); 925 memattr: 926 pmap_page_set_memattr(m, memattr); 927 } 928 929 /* 930 * vm_page_putfake: 931 * 932 * Release a fictitious page. 933 */ 934 void 935 vm_page_putfake(vm_page_t m) 936 { 937 938 KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m)); 939 KASSERT((m->flags & PG_FICTITIOUS) != 0, 940 ("vm_page_putfake: bad page %p", m)); 941 uma_zfree(fakepg_zone, m); 942 } 943 944 /* 945 * vm_page_updatefake: 946 * 947 * Update the given fictitious page to the specified physical address and 948 * memory attribute. 949 */ 950 void 951 vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) 952 { 953 954 KASSERT((m->flags & PG_FICTITIOUS) != 0, 955 ("vm_page_updatefake: bad page %p", m)); 956 m->phys_addr = paddr; 957 pmap_page_set_memattr(m, memattr); 958 } 959 960 /* 961 * vm_page_free: 962 * 963 * Free a page. 964 */ 965 void 966 vm_page_free(vm_page_t m) 967 { 968 969 m->flags &= ~PG_ZERO; 970 vm_page_free_toq(m); 971 } 972 973 /* 974 * vm_page_free_zero: 975 * 976 * Free a page to the zerod-pages queue 977 */ 978 void 979 vm_page_free_zero(vm_page_t m) 980 { 981 982 m->flags |= PG_ZERO; 983 vm_page_free_toq(m); 984 } 985 986 /* 987 * Unbusy and handle the page queueing for a page from the VOP_GETPAGES() 988 * array which is not the request page. 989 */ 990 void 991 vm_page_readahead_finish(vm_page_t m) 992 { 993 994 if (m->valid != 0) { 995 /* 996 * Since the page is not the requested page, whether 997 * it should be activated or deactivated is not 998 * obvious. Empirical results have shown that 999 * deactivating the page is usually the best choice, 1000 * unless the page is wanted by another thread. 1001 */ 1002 vm_page_lock(m); 1003 if ((m->busy_lock & VPB_BIT_WAITERS) != 0) 1004 vm_page_activate(m); 1005 else 1006 vm_page_deactivate(m); 1007 vm_page_unlock(m); 1008 vm_page_xunbusy(m); 1009 } else { 1010 /* 1011 * Free the completely invalid page. Such page state 1012 * occurs due to the short read operation which did 1013 * not covered our page at all, or in case when a read 1014 * error happens. 1015 */ 1016 vm_page_lock(m); 1017 vm_page_free(m); 1018 vm_page_unlock(m); 1019 } 1020 } 1021 1022 /* 1023 * vm_page_sleep_if_busy: 1024 * 1025 * Sleep and release the page queues lock if the page is busied. 1026 * Returns TRUE if the thread slept. 1027 * 1028 * The given page must be unlocked and object containing it must 1029 * be locked. 1030 */ 1031 int 1032 vm_page_sleep_if_busy(vm_page_t m, const char *msg) 1033 { 1034 vm_object_t obj; 1035 1036 vm_page_lock_assert(m, MA_NOTOWNED); 1037 VM_OBJECT_ASSERT_WLOCKED(m->object); 1038 1039 if (vm_page_busied(m)) { 1040 /* 1041 * The page-specific object must be cached because page 1042 * identity can change during the sleep, causing the 1043 * re-lock of a different object. 1044 * It is assumed that a reference to the object is already 1045 * held by the callers. 1046 */ 1047 obj = m->object; 1048 vm_page_lock(m); 1049 VM_OBJECT_WUNLOCK(obj); 1050 vm_page_busy_sleep(m, msg); 1051 VM_OBJECT_WLOCK(obj); 1052 return (TRUE); 1053 } 1054 return (FALSE); 1055 } 1056 1057 /* 1058 * vm_page_dirty_KBI: [ internal use only ] 1059 * 1060 * Set all bits in the page's dirty field. 1061 * 1062 * The object containing the specified page must be locked if the 1063 * call is made from the machine-independent layer. 1064 * 1065 * See vm_page_clear_dirty_mask(). 1066 * 1067 * This function should only be called by vm_page_dirty(). 1068 */ 1069 void 1070 vm_page_dirty_KBI(vm_page_t m) 1071 { 1072 1073 /* These assertions refer to this operation by its public name. */ 1074 KASSERT((m->flags & PG_CACHED) == 0, 1075 ("vm_page_dirty: page in cache!")); 1076 KASSERT(m->valid == VM_PAGE_BITS_ALL, 1077 ("vm_page_dirty: page is invalid!")); 1078 m->dirty = VM_PAGE_BITS_ALL; 1079 } 1080 1081 /* 1082 * vm_page_insert: [ internal use only ] 1083 * 1084 * Inserts the given mem entry into the object and object list. 1085 * 1086 * The object must be locked. 1087 */ 1088 int 1089 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) 1090 { 1091 vm_page_t mpred; 1092 1093 VM_OBJECT_ASSERT_WLOCKED(object); 1094 mpred = vm_radix_lookup_le(&object->rtree, pindex); 1095 return (vm_page_insert_after(m, object, pindex, mpred)); 1096 } 1097 1098 /* 1099 * vm_page_insert_after: 1100 * 1101 * Inserts the page "m" into the specified object at offset "pindex". 1102 * 1103 * The page "mpred" must immediately precede the offset "pindex" within 1104 * the specified object. 1105 * 1106 * The object must be locked. 1107 */ 1108 static int 1109 vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex, 1110 vm_page_t mpred) 1111 { 1112 vm_pindex_t sidx; 1113 vm_object_t sobj; 1114 vm_page_t msucc; 1115 1116 VM_OBJECT_ASSERT_WLOCKED(object); 1117 KASSERT(m->object == NULL, 1118 ("vm_page_insert_after: page already inserted")); 1119 if (mpred != NULL) { 1120 KASSERT(mpred->object == object, 1121 ("vm_page_insert_after: object doesn't contain mpred")); 1122 KASSERT(mpred->pindex < pindex, 1123 ("vm_page_insert_after: mpred doesn't precede pindex")); 1124 msucc = TAILQ_NEXT(mpred, listq); 1125 } else 1126 msucc = TAILQ_FIRST(&object->memq); 1127 if (msucc != NULL) 1128 KASSERT(msucc->pindex > pindex, 1129 ("vm_page_insert_after: msucc doesn't succeed pindex")); 1130 1131 /* 1132 * Record the object/offset pair in this page 1133 */ 1134 sobj = m->object; 1135 sidx = m->pindex; 1136 m->object = object; 1137 m->pindex = pindex; 1138 1139 /* 1140 * Now link into the object's ordered list of backed pages. 1141 */ 1142 if (vm_radix_insert(&object->rtree, m)) { 1143 m->object = sobj; 1144 m->pindex = sidx; 1145 return (1); 1146 } 1147 vm_page_insert_radixdone(m, object, mpred); 1148 return (0); 1149 } 1150 1151 /* 1152 * vm_page_insert_radixdone: 1153 * 1154 * Complete page "m" insertion into the specified object after the 1155 * radix trie hooking. 1156 * 1157 * The page "mpred" must precede the offset "m->pindex" within the 1158 * specified object. 1159 * 1160 * The object must be locked. 1161 */ 1162 static void 1163 vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred) 1164 { 1165 1166 VM_OBJECT_ASSERT_WLOCKED(object); 1167 KASSERT(object != NULL && m->object == object, 1168 ("vm_page_insert_radixdone: page %p has inconsistent object", m)); 1169 if (mpred != NULL) { 1170 KASSERT(mpred->object == object, 1171 ("vm_page_insert_after: object doesn't contain mpred")); 1172 KASSERT(mpred->pindex < m->pindex, 1173 ("vm_page_insert_after: mpred doesn't precede pindex")); 1174 } 1175 1176 if (mpred != NULL) 1177 TAILQ_INSERT_AFTER(&object->memq, mpred, m, listq); 1178 else 1179 TAILQ_INSERT_HEAD(&object->memq, m, listq); 1180 1181 /* 1182 * Show that the object has one more resident page. 1183 */ 1184 object->resident_page_count++; 1185 1186 /* 1187 * Hold the vnode until the last page is released. 1188 */ 1189 if (object->resident_page_count == 1 && object->type == OBJT_VNODE) 1190 vhold(object->handle); 1191 1192 /* 1193 * Since we are inserting a new and possibly dirty page, 1194 * update the object's OBJ_MIGHTBEDIRTY flag. 1195 */ 1196 if (pmap_page_is_write_mapped(m)) 1197 vm_object_set_writeable_dirty(object); 1198 } 1199 1200 /* 1201 * vm_page_remove: 1202 * 1203 * Removes the given mem entry from the object/offset-page 1204 * table and the object page list, but do not invalidate/terminate 1205 * the backing store. 1206 * 1207 * The object must be locked. The page must be locked if it is managed. 1208 */ 1209 void 1210 vm_page_remove(vm_page_t m) 1211 { 1212 vm_object_t object; 1213 boolean_t lockacq; 1214 1215 if ((m->oflags & VPO_UNMANAGED) == 0) 1216 vm_page_lock_assert(m, MA_OWNED); 1217 if ((object = m->object) == NULL) 1218 return; 1219 VM_OBJECT_ASSERT_WLOCKED(object); 1220 if (vm_page_xbusied(m)) { 1221 lockacq = FALSE; 1222 if ((m->oflags & VPO_UNMANAGED) != 0 && 1223 !mtx_owned(vm_page_lockptr(m))) { 1224 lockacq = TRUE; 1225 vm_page_lock(m); 1226 } 1227 vm_page_flash(m); 1228 atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED); 1229 if (lockacq) 1230 vm_page_unlock(m); 1231 } 1232 1233 /* 1234 * Now remove from the object's list of backed pages. 1235 */ 1236 vm_radix_remove(&object->rtree, m->pindex); 1237 TAILQ_REMOVE(&object->memq, m, listq); 1238 1239 /* 1240 * And show that the object has one fewer resident page. 1241 */ 1242 object->resident_page_count--; 1243 1244 /* 1245 * The vnode may now be recycled. 1246 */ 1247 if (object->resident_page_count == 0 && object->type == OBJT_VNODE) 1248 vdrop(object->handle); 1249 1250 m->object = NULL; 1251 } 1252 1253 /* 1254 * vm_page_lookup: 1255 * 1256 * Returns the page associated with the object/offset 1257 * pair specified; if none is found, NULL is returned. 1258 * 1259 * The object must be locked. 1260 */ 1261 vm_page_t 1262 vm_page_lookup(vm_object_t object, vm_pindex_t pindex) 1263 { 1264 1265 VM_OBJECT_ASSERT_LOCKED(object); 1266 return (vm_radix_lookup(&object->rtree, pindex)); 1267 } 1268 1269 /* 1270 * vm_page_find_least: 1271 * 1272 * Returns the page associated with the object with least pindex 1273 * greater than or equal to the parameter pindex, or NULL. 1274 * 1275 * The object must be locked. 1276 */ 1277 vm_page_t 1278 vm_page_find_least(vm_object_t object, vm_pindex_t pindex) 1279 { 1280 vm_page_t m; 1281 1282 VM_OBJECT_ASSERT_LOCKED(object); 1283 if ((m = TAILQ_FIRST(&object->memq)) != NULL && m->pindex < pindex) 1284 m = vm_radix_lookup_ge(&object->rtree, pindex); 1285 return (m); 1286 } 1287 1288 /* 1289 * Returns the given page's successor (by pindex) within the object if it is 1290 * resident; if none is found, NULL is returned. 1291 * 1292 * The object must be locked. 1293 */ 1294 vm_page_t 1295 vm_page_next(vm_page_t m) 1296 { 1297 vm_page_t next; 1298 1299 VM_OBJECT_ASSERT_WLOCKED(m->object); 1300 if ((next = TAILQ_NEXT(m, listq)) != NULL && 1301 next->pindex != m->pindex + 1) 1302 next = NULL; 1303 return (next); 1304 } 1305 1306 /* 1307 * Returns the given page's predecessor (by pindex) within the object if it is 1308 * resident; if none is found, NULL is returned. 1309 * 1310 * The object must be locked. 1311 */ 1312 vm_page_t 1313 vm_page_prev(vm_page_t m) 1314 { 1315 vm_page_t prev; 1316 1317 VM_OBJECT_ASSERT_WLOCKED(m->object); 1318 if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL && 1319 prev->pindex != m->pindex - 1) 1320 prev = NULL; 1321 return (prev); 1322 } 1323 1324 /* 1325 * Uses the page mnew as a replacement for an existing page at index 1326 * pindex which must be already present in the object. 1327 * 1328 * The existing page must not be on a paging queue. 1329 */ 1330 vm_page_t 1331 vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex) 1332 { 1333 vm_page_t mold, mpred; 1334 1335 VM_OBJECT_ASSERT_WLOCKED(object); 1336 1337 /* 1338 * This function mostly follows vm_page_insert() and 1339 * vm_page_remove() without the radix, object count and vnode 1340 * dance. Double check such functions for more comments. 1341 */ 1342 mpred = vm_radix_lookup(&object->rtree, pindex); 1343 KASSERT(mpred != NULL, 1344 ("vm_page_replace: replacing page not present with pindex")); 1345 mpred = TAILQ_PREV(mpred, respgs, listq); 1346 if (mpred != NULL) 1347 KASSERT(mpred->pindex < pindex, 1348 ("vm_page_insert_after: mpred doesn't precede pindex")); 1349 1350 mnew->object = object; 1351 mnew->pindex = pindex; 1352 mold = vm_radix_replace(&object->rtree, mnew); 1353 KASSERT(mold->queue == PQ_NONE, 1354 ("vm_page_replace: mold is on a paging queue")); 1355 1356 /* Detach the old page from the resident tailq. */ 1357 TAILQ_REMOVE(&object->memq, mold, listq); 1358 1359 mold->object = NULL; 1360 vm_page_xunbusy(mold); 1361 1362 /* Insert the new page in the resident tailq. */ 1363 if (mpred != NULL) 1364 TAILQ_INSERT_AFTER(&object->memq, mpred, mnew, listq); 1365 else 1366 TAILQ_INSERT_HEAD(&object->memq, mnew, listq); 1367 if (pmap_page_is_write_mapped(mnew)) 1368 vm_object_set_writeable_dirty(object); 1369 return (mold); 1370 } 1371 1372 /* 1373 * vm_page_rename: 1374 * 1375 * Move the given memory entry from its 1376 * current object to the specified target object/offset. 1377 * 1378 * Note: swap associated with the page must be invalidated by the move. We 1379 * have to do this for several reasons: (1) we aren't freeing the 1380 * page, (2) we are dirtying the page, (3) the VM system is probably 1381 * moving the page from object A to B, and will then later move 1382 * the backing store from A to B and we can't have a conflict. 1383 * 1384 * Note: we *always* dirty the page. It is necessary both for the 1385 * fact that we moved it, and because we may be invalidating 1386 * swap. If the page is on the cache, we have to deactivate it 1387 * or vm_page_dirty() will panic. Dirty pages are not allowed 1388 * on the cache. 1389 * 1390 * The objects must be locked. 1391 */ 1392 int 1393 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) 1394 { 1395 vm_page_t mpred; 1396 vm_pindex_t opidx; 1397 1398 VM_OBJECT_ASSERT_WLOCKED(new_object); 1399 1400 mpred = vm_radix_lookup_le(&new_object->rtree, new_pindex); 1401 KASSERT(mpred == NULL || mpred->pindex != new_pindex, 1402 ("vm_page_rename: pindex already renamed")); 1403 1404 /* 1405 * Create a custom version of vm_page_insert() which does not depend 1406 * by m_prev and can cheat on the implementation aspects of the 1407 * function. 1408 */ 1409 opidx = m->pindex; 1410 m->pindex = new_pindex; 1411 if (vm_radix_insert(&new_object->rtree, m)) { 1412 m->pindex = opidx; 1413 return (1); 1414 } 1415 1416 /* 1417 * The operation cannot fail anymore. The removal must happen before 1418 * the listq iterator is tainted. 1419 */ 1420 m->pindex = opidx; 1421 vm_page_lock(m); 1422 vm_page_remove(m); 1423 1424 /* Return back to the new pindex to complete vm_page_insert(). */ 1425 m->pindex = new_pindex; 1426 m->object = new_object; 1427 vm_page_unlock(m); 1428 vm_page_insert_radixdone(m, new_object, mpred); 1429 vm_page_dirty(m); 1430 return (0); 1431 } 1432 1433 /* 1434 * Convert all of the given object's cached pages that have a 1435 * pindex within the given range into free pages. If the value 1436 * zero is given for "end", then the range's upper bound is 1437 * infinity. If the given object is backed by a vnode and it 1438 * transitions from having one or more cached pages to none, the 1439 * vnode's hold count is reduced. 1440 */ 1441 void 1442 vm_page_cache_free(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 1443 { 1444 vm_page_t m; 1445 boolean_t empty; 1446 1447 mtx_lock(&vm_page_queue_free_mtx); 1448 if (__predict_false(vm_radix_is_empty(&object->cache))) { 1449 mtx_unlock(&vm_page_queue_free_mtx); 1450 return; 1451 } 1452 while ((m = vm_radix_lookup_ge(&object->cache, start)) != NULL) { 1453 if (end != 0 && m->pindex >= end) 1454 break; 1455 vm_radix_remove(&object->cache, m->pindex); 1456 vm_page_cache_turn_free(m); 1457 } 1458 empty = vm_radix_is_empty(&object->cache); 1459 mtx_unlock(&vm_page_queue_free_mtx); 1460 if (object->type == OBJT_VNODE && empty) 1461 vdrop(object->handle); 1462 } 1463 1464 /* 1465 * Returns the cached page that is associated with the given 1466 * object and offset. If, however, none exists, returns NULL. 1467 * 1468 * The free page queue must be locked. 1469 */ 1470 static inline vm_page_t 1471 vm_page_cache_lookup(vm_object_t object, vm_pindex_t pindex) 1472 { 1473 1474 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1475 return (vm_radix_lookup(&object->cache, pindex)); 1476 } 1477 1478 /* 1479 * Remove the given cached page from its containing object's 1480 * collection of cached pages. 1481 * 1482 * The free page queue must be locked. 1483 */ 1484 static void 1485 vm_page_cache_remove(vm_page_t m) 1486 { 1487 1488 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1489 KASSERT((m->flags & PG_CACHED) != 0, 1490 ("vm_page_cache_remove: page %p is not cached", m)); 1491 vm_radix_remove(&m->object->cache, m->pindex); 1492 m->object = NULL; 1493 vm_cnt.v_cache_count--; 1494 } 1495 1496 /* 1497 * Transfer all of the cached pages with offset greater than or 1498 * equal to 'offidxstart' from the original object's cache to the 1499 * new object's cache. However, any cached pages with offset 1500 * greater than or equal to the new object's size are kept in the 1501 * original object. Initially, the new object's cache must be 1502 * empty. Offset 'offidxstart' in the original object must 1503 * correspond to offset zero in the new object. 1504 * 1505 * The new object must be locked. 1506 */ 1507 void 1508 vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart, 1509 vm_object_t new_object) 1510 { 1511 vm_page_t m; 1512 1513 /* 1514 * Insertion into an object's collection of cached pages 1515 * requires the object to be locked. In contrast, removal does 1516 * not. 1517 */ 1518 VM_OBJECT_ASSERT_WLOCKED(new_object); 1519 KASSERT(vm_radix_is_empty(&new_object->cache), 1520 ("vm_page_cache_transfer: object %p has cached pages", 1521 new_object)); 1522 mtx_lock(&vm_page_queue_free_mtx); 1523 while ((m = vm_radix_lookup_ge(&orig_object->cache, 1524 offidxstart)) != NULL) { 1525 /* 1526 * Transfer all of the pages with offset greater than or 1527 * equal to 'offidxstart' from the original object's 1528 * cache to the new object's cache. 1529 */ 1530 if ((m->pindex - offidxstart) >= new_object->size) 1531 break; 1532 vm_radix_remove(&orig_object->cache, m->pindex); 1533 /* Update the page's object and offset. */ 1534 m->object = new_object; 1535 m->pindex -= offidxstart; 1536 if (vm_radix_insert(&new_object->cache, m)) 1537 vm_page_cache_turn_free(m); 1538 } 1539 mtx_unlock(&vm_page_queue_free_mtx); 1540 } 1541 1542 /* 1543 * Returns TRUE if a cached page is associated with the given object and 1544 * offset, and FALSE otherwise. 1545 * 1546 * The object must be locked. 1547 */ 1548 boolean_t 1549 vm_page_is_cached(vm_object_t object, vm_pindex_t pindex) 1550 { 1551 vm_page_t m; 1552 1553 /* 1554 * Insertion into an object's collection of cached pages requires the 1555 * object to be locked. Therefore, if the object is locked and the 1556 * object's collection is empty, there is no need to acquire the free 1557 * page queues lock in order to prove that the specified page doesn't 1558 * exist. 1559 */ 1560 VM_OBJECT_ASSERT_WLOCKED(object); 1561 if (__predict_true(vm_object_cache_is_empty(object))) 1562 return (FALSE); 1563 mtx_lock(&vm_page_queue_free_mtx); 1564 m = vm_page_cache_lookup(object, pindex); 1565 mtx_unlock(&vm_page_queue_free_mtx); 1566 return (m != NULL); 1567 } 1568 1569 /* 1570 * vm_page_alloc: 1571 * 1572 * Allocate and return a page that is associated with the specified 1573 * object and offset pair. By default, this page is exclusive busied. 1574 * 1575 * The caller must always specify an allocation class. 1576 * 1577 * allocation classes: 1578 * VM_ALLOC_NORMAL normal process request 1579 * VM_ALLOC_SYSTEM system *really* needs a page 1580 * VM_ALLOC_INTERRUPT interrupt time request 1581 * 1582 * optional allocation flags: 1583 * VM_ALLOC_COUNT(number) the number of additional pages that the caller 1584 * intends to allocate 1585 * VM_ALLOC_IFCACHED return page only if it is cached 1586 * VM_ALLOC_IFNOTCACHED return NULL, do not reactivate if the page 1587 * is cached 1588 * VM_ALLOC_NOBUSY do not exclusive busy the page 1589 * VM_ALLOC_NODUMP do not include the page in a kernel core dump 1590 * VM_ALLOC_NOOBJ page is not associated with an object and 1591 * should not be exclusive busy 1592 * VM_ALLOC_SBUSY shared busy the allocated page 1593 * VM_ALLOC_WIRED wire the allocated page 1594 * VM_ALLOC_ZERO prefer a zeroed page 1595 * 1596 * This routine may not sleep. 1597 */ 1598 vm_page_t 1599 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) 1600 { 1601 struct vnode *vp = NULL; 1602 vm_object_t m_object; 1603 vm_page_t m, mpred; 1604 int flags, req_class; 1605 1606 mpred = 0; /* XXX: pacify gcc */ 1607 KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) && 1608 (object != NULL || (req & VM_ALLOC_SBUSY) == 0) && 1609 ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != 1610 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), 1611 ("vm_page_alloc: inconsistent object(%p)/req(%x)", (void *)object, 1612 req)); 1613 if (object != NULL) 1614 VM_OBJECT_ASSERT_WLOCKED(object); 1615 1616 req_class = req & VM_ALLOC_CLASS_MASK; 1617 1618 /* 1619 * The page daemon is allowed to dig deeper into the free page list. 1620 */ 1621 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) 1622 req_class = VM_ALLOC_SYSTEM; 1623 1624 if (object != NULL) { 1625 mpred = vm_radix_lookup_le(&object->rtree, pindex); 1626 KASSERT(mpred == NULL || mpred->pindex != pindex, 1627 ("vm_page_alloc: pindex already allocated")); 1628 } 1629 1630 /* 1631 * The page allocation request can came from consumers which already 1632 * hold the free page queue mutex, like vm_page_insert() in 1633 * vm_page_cache(). 1634 */ 1635 mtx_lock_flags(&vm_page_queue_free_mtx, MTX_RECURSE); 1636 if (vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_free_reserved || 1637 (req_class == VM_ALLOC_SYSTEM && 1638 vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_interrupt_free_min) || 1639 (req_class == VM_ALLOC_INTERRUPT && 1640 vm_cnt.v_free_count + vm_cnt.v_cache_count > 0)) { 1641 /* 1642 * Allocate from the free queue if the number of free pages 1643 * exceeds the minimum for the request class. 1644 */ 1645 if (object != NULL && 1646 (m = vm_page_cache_lookup(object, pindex)) != NULL) { 1647 if ((req & VM_ALLOC_IFNOTCACHED) != 0) { 1648 mtx_unlock(&vm_page_queue_free_mtx); 1649 return (NULL); 1650 } 1651 if (vm_phys_unfree_page(m)) 1652 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, 0); 1653 #if VM_NRESERVLEVEL > 0 1654 else if (!vm_reserv_reactivate_page(m)) 1655 #else 1656 else 1657 #endif 1658 panic("vm_page_alloc: cache page %p is missing" 1659 " from the free queue", m); 1660 } else if ((req & VM_ALLOC_IFCACHED) != 0) { 1661 mtx_unlock(&vm_page_queue_free_mtx); 1662 return (NULL); 1663 #if VM_NRESERVLEVEL > 0 1664 } else if (object == NULL || (object->flags & (OBJ_COLORED | 1665 OBJ_FICTITIOUS)) != OBJ_COLORED || (m = 1666 vm_reserv_alloc_page(object, pindex, mpred)) == NULL) { 1667 #else 1668 } else { 1669 #endif 1670 m = vm_phys_alloc_pages(object != NULL ? 1671 VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0); 1672 #if VM_NRESERVLEVEL > 0 1673 if (m == NULL && vm_reserv_reclaim_inactive()) { 1674 m = vm_phys_alloc_pages(object != NULL ? 1675 VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 1676 0); 1677 } 1678 #endif 1679 } 1680 } else { 1681 /* 1682 * Not allocatable, give up. 1683 */ 1684 mtx_unlock(&vm_page_queue_free_mtx); 1685 atomic_add_int(&vm_pageout_deficit, 1686 max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1)); 1687 pagedaemon_wakeup(); 1688 return (NULL); 1689 } 1690 1691 /* 1692 * At this point we had better have found a good page. 1693 */ 1694 KASSERT(m != NULL, ("vm_page_alloc: missing page")); 1695 KASSERT(m->queue == PQ_NONE, 1696 ("vm_page_alloc: page %p has unexpected queue %d", m, m->queue)); 1697 KASSERT(m->wire_count == 0, ("vm_page_alloc: page %p is wired", m)); 1698 KASSERT(m->hold_count == 0, ("vm_page_alloc: page %p is held", m)); 1699 KASSERT(!vm_page_sbusied(m), 1700 ("vm_page_alloc: page %p is busy", m)); 1701 KASSERT(m->dirty == 0, ("vm_page_alloc: page %p is dirty", m)); 1702 KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, 1703 ("vm_page_alloc: page %p has unexpected memattr %d", m, 1704 pmap_page_get_memattr(m))); 1705 if ((m->flags & PG_CACHED) != 0) { 1706 KASSERT((m->flags & PG_ZERO) == 0, 1707 ("vm_page_alloc: cached page %p is PG_ZERO", m)); 1708 KASSERT(m->valid != 0, 1709 ("vm_page_alloc: cached page %p is invalid", m)); 1710 if (m->object == object && m->pindex == pindex) 1711 vm_cnt.v_reactivated++; 1712 else 1713 m->valid = 0; 1714 m_object = m->object; 1715 vm_page_cache_remove(m); 1716 if (m_object->type == OBJT_VNODE && 1717 vm_object_cache_is_empty(m_object)) 1718 vp = m_object->handle; 1719 } else { 1720 KASSERT(m->valid == 0, 1721 ("vm_page_alloc: free page %p is valid", m)); 1722 vm_phys_freecnt_adj(m, -1); 1723 if ((m->flags & PG_ZERO) != 0) 1724 vm_page_zero_count--; 1725 } 1726 mtx_unlock(&vm_page_queue_free_mtx); 1727 1728 /* 1729 * Initialize the page. Only the PG_ZERO flag is inherited. 1730 */ 1731 flags = 0; 1732 if ((req & VM_ALLOC_ZERO) != 0) 1733 flags = PG_ZERO; 1734 flags &= m->flags; 1735 if ((req & VM_ALLOC_NODUMP) != 0) 1736 flags |= PG_NODUMP; 1737 m->flags = flags; 1738 m->aflags = 0; 1739 m->oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ? 1740 VPO_UNMANAGED : 0; 1741 m->busy_lock = VPB_UNBUSIED; 1742 if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0) 1743 m->busy_lock = VPB_SINGLE_EXCLUSIVER; 1744 if ((req & VM_ALLOC_SBUSY) != 0) 1745 m->busy_lock = VPB_SHARERS_WORD(1); 1746 if (req & VM_ALLOC_WIRED) { 1747 /* 1748 * The page lock is not required for wiring a page until that 1749 * page is inserted into the object. 1750 */ 1751 atomic_add_int(&vm_cnt.v_wire_count, 1); 1752 m->wire_count = 1; 1753 } 1754 m->act_count = 0; 1755 1756 if (object != NULL) { 1757 if (vm_page_insert_after(m, object, pindex, mpred)) { 1758 /* See the comment below about hold count. */ 1759 if (vp != NULL) 1760 vdrop(vp); 1761 pagedaemon_wakeup(); 1762 if (req & VM_ALLOC_WIRED) { 1763 atomic_subtract_int(&vm_cnt.v_wire_count, 1); 1764 m->wire_count = 0; 1765 } 1766 m->object = NULL; 1767 vm_page_free(m); 1768 return (NULL); 1769 } 1770 1771 /* Ignore device objects; the pager sets "memattr" for them. */ 1772 if (object->memattr != VM_MEMATTR_DEFAULT && 1773 (object->flags & OBJ_FICTITIOUS) == 0) 1774 pmap_page_set_memattr(m, object->memattr); 1775 } else 1776 m->pindex = pindex; 1777 1778 /* 1779 * The following call to vdrop() must come after the above call 1780 * to vm_page_insert() in case both affect the same object and 1781 * vnode. Otherwise, the affected vnode's hold count could 1782 * temporarily become zero. 1783 */ 1784 if (vp != NULL) 1785 vdrop(vp); 1786 1787 /* 1788 * Don't wakeup too often - wakeup the pageout daemon when 1789 * we would be nearly out of memory. 1790 */ 1791 if (vm_paging_needed()) 1792 pagedaemon_wakeup(); 1793 1794 return (m); 1795 } 1796 1797 static void 1798 vm_page_alloc_contig_vdrop(struct spglist *lst) 1799 { 1800 1801 while (!SLIST_EMPTY(lst)) { 1802 vdrop((struct vnode *)SLIST_FIRST(lst)-> plinks.s.pv); 1803 SLIST_REMOVE_HEAD(lst, plinks.s.ss); 1804 } 1805 } 1806 1807 /* 1808 * vm_page_alloc_contig: 1809 * 1810 * Allocate a contiguous set of physical pages of the given size "npages" 1811 * from the free lists. All of the physical pages must be at or above 1812 * the given physical address "low" and below the given physical address 1813 * "high". The given value "alignment" determines the alignment of the 1814 * first physical page in the set. If the given value "boundary" is 1815 * non-zero, then the set of physical pages cannot cross any physical 1816 * address boundary that is a multiple of that value. Both "alignment" 1817 * and "boundary" must be a power of two. 1818 * 1819 * If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT, 1820 * then the memory attribute setting for the physical pages is configured 1821 * to the object's memory attribute setting. Otherwise, the memory 1822 * attribute setting for the physical pages is configured to "memattr", 1823 * overriding the object's memory attribute setting. However, if the 1824 * object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the 1825 * memory attribute setting for the physical pages cannot be configured 1826 * to VM_MEMATTR_DEFAULT. 1827 * 1828 * The caller must always specify an allocation class. 1829 * 1830 * allocation classes: 1831 * VM_ALLOC_NORMAL normal process request 1832 * VM_ALLOC_SYSTEM system *really* needs a page 1833 * VM_ALLOC_INTERRUPT interrupt time request 1834 * 1835 * optional allocation flags: 1836 * VM_ALLOC_NOBUSY do not exclusive busy the page 1837 * VM_ALLOC_NOOBJ page is not associated with an object and 1838 * should not be exclusive busy 1839 * VM_ALLOC_SBUSY shared busy the allocated page 1840 * VM_ALLOC_WIRED wire the allocated page 1841 * VM_ALLOC_ZERO prefer a zeroed page 1842 * 1843 * This routine may not sleep. 1844 */ 1845 vm_page_t 1846 vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req, 1847 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 1848 vm_paddr_t boundary, vm_memattr_t memattr) 1849 { 1850 struct vnode *drop; 1851 struct spglist deferred_vdrop_list; 1852 vm_page_t m, m_tmp, m_ret; 1853 u_int flags; 1854 int req_class; 1855 1856 KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) && 1857 (object != NULL || (req & VM_ALLOC_SBUSY) == 0) && 1858 ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != 1859 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), 1860 ("vm_page_alloc: inconsistent object(%p)/req(%x)", (void *)object, 1861 req)); 1862 if (object != NULL) { 1863 VM_OBJECT_ASSERT_WLOCKED(object); 1864 KASSERT(object->type == OBJT_PHYS, 1865 ("vm_page_alloc_contig: object %p isn't OBJT_PHYS", 1866 object)); 1867 } 1868 KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero")); 1869 req_class = req & VM_ALLOC_CLASS_MASK; 1870 1871 /* 1872 * The page daemon is allowed to dig deeper into the free page list. 1873 */ 1874 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) 1875 req_class = VM_ALLOC_SYSTEM; 1876 1877 SLIST_INIT(&deferred_vdrop_list); 1878 mtx_lock(&vm_page_queue_free_mtx); 1879 if (vm_cnt.v_free_count + vm_cnt.v_cache_count >= npages + 1880 vm_cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM && 1881 vm_cnt.v_free_count + vm_cnt.v_cache_count >= npages + 1882 vm_cnt.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT && 1883 vm_cnt.v_free_count + vm_cnt.v_cache_count >= npages)) { 1884 #if VM_NRESERVLEVEL > 0 1885 retry: 1886 if (object == NULL || (object->flags & OBJ_COLORED) == 0 || 1887 (m_ret = vm_reserv_alloc_contig(object, pindex, npages, 1888 low, high, alignment, boundary)) == NULL) 1889 #endif 1890 m_ret = vm_phys_alloc_contig(npages, low, high, 1891 alignment, boundary); 1892 } else { 1893 mtx_unlock(&vm_page_queue_free_mtx); 1894 atomic_add_int(&vm_pageout_deficit, npages); 1895 pagedaemon_wakeup(); 1896 return (NULL); 1897 } 1898 if (m_ret != NULL) 1899 for (m = m_ret; m < &m_ret[npages]; m++) { 1900 drop = vm_page_alloc_init(m); 1901 if (drop != NULL) { 1902 /* 1903 * Enqueue the vnode for deferred vdrop(). 1904 */ 1905 m->plinks.s.pv = drop; 1906 SLIST_INSERT_HEAD(&deferred_vdrop_list, m, 1907 plinks.s.ss); 1908 } 1909 } 1910 else { 1911 #if VM_NRESERVLEVEL > 0 1912 if (vm_reserv_reclaim_contig(npages, low, high, alignment, 1913 boundary)) 1914 goto retry; 1915 #endif 1916 } 1917 mtx_unlock(&vm_page_queue_free_mtx); 1918 if (m_ret == NULL) 1919 return (NULL); 1920 1921 /* 1922 * Initialize the pages. Only the PG_ZERO flag is inherited. 1923 */ 1924 flags = 0; 1925 if ((req & VM_ALLOC_ZERO) != 0) 1926 flags = PG_ZERO; 1927 if ((req & VM_ALLOC_NODUMP) != 0) 1928 flags |= PG_NODUMP; 1929 if ((req & VM_ALLOC_WIRED) != 0) 1930 atomic_add_int(&vm_cnt.v_wire_count, npages); 1931 if (object != NULL) { 1932 if (object->memattr != VM_MEMATTR_DEFAULT && 1933 memattr == VM_MEMATTR_DEFAULT) 1934 memattr = object->memattr; 1935 } 1936 for (m = m_ret; m < &m_ret[npages]; m++) { 1937 m->aflags = 0; 1938 m->flags = (m->flags | PG_NODUMP) & flags; 1939 m->busy_lock = VPB_UNBUSIED; 1940 if (object != NULL) { 1941 if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0) 1942 m->busy_lock = VPB_SINGLE_EXCLUSIVER; 1943 if ((req & VM_ALLOC_SBUSY) != 0) 1944 m->busy_lock = VPB_SHARERS_WORD(1); 1945 } 1946 if ((req & VM_ALLOC_WIRED) != 0) 1947 m->wire_count = 1; 1948 /* Unmanaged pages don't use "act_count". */ 1949 m->oflags = VPO_UNMANAGED; 1950 if (object != NULL) { 1951 if (vm_page_insert(m, object, pindex)) { 1952 vm_page_alloc_contig_vdrop( 1953 &deferred_vdrop_list); 1954 if (vm_paging_needed()) 1955 pagedaemon_wakeup(); 1956 if ((req & VM_ALLOC_WIRED) != 0) 1957 atomic_subtract_int(&vm_cnt.v_wire_count, 1958 npages); 1959 for (m_tmp = m, m = m_ret; 1960 m < &m_ret[npages]; m++) { 1961 if ((req & VM_ALLOC_WIRED) != 0) 1962 m->wire_count = 0; 1963 if (m >= m_tmp) 1964 m->object = NULL; 1965 vm_page_free(m); 1966 } 1967 return (NULL); 1968 } 1969 } else 1970 m->pindex = pindex; 1971 if (memattr != VM_MEMATTR_DEFAULT) 1972 pmap_page_set_memattr(m, memattr); 1973 pindex++; 1974 } 1975 vm_page_alloc_contig_vdrop(&deferred_vdrop_list); 1976 if (vm_paging_needed()) 1977 pagedaemon_wakeup(); 1978 return (m_ret); 1979 } 1980 1981 /* 1982 * Initialize a page that has been freshly dequeued from a freelist. 1983 * The caller has to drop the vnode returned, if it is not NULL. 1984 * 1985 * This function may only be used to initialize unmanaged pages. 1986 * 1987 * To be called with vm_page_queue_free_mtx held. 1988 */ 1989 static struct vnode * 1990 vm_page_alloc_init(vm_page_t m) 1991 { 1992 struct vnode *drop; 1993 vm_object_t m_object; 1994 1995 KASSERT(m->queue == PQ_NONE, 1996 ("vm_page_alloc_init: page %p has unexpected queue %d", 1997 m, m->queue)); 1998 KASSERT(m->wire_count == 0, 1999 ("vm_page_alloc_init: page %p is wired", m)); 2000 KASSERT(m->hold_count == 0, 2001 ("vm_page_alloc_init: page %p is held", m)); 2002 KASSERT(!vm_page_sbusied(m), 2003 ("vm_page_alloc_init: page %p is busy", m)); 2004 KASSERT(m->dirty == 0, 2005 ("vm_page_alloc_init: page %p is dirty", m)); 2006 KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, 2007 ("vm_page_alloc_init: page %p has unexpected memattr %d", 2008 m, pmap_page_get_memattr(m))); 2009 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 2010 drop = NULL; 2011 if ((m->flags & PG_CACHED) != 0) { 2012 KASSERT((m->flags & PG_ZERO) == 0, 2013 ("vm_page_alloc_init: cached page %p is PG_ZERO", m)); 2014 m->valid = 0; 2015 m_object = m->object; 2016 vm_page_cache_remove(m); 2017 if (m_object->type == OBJT_VNODE && 2018 vm_object_cache_is_empty(m_object)) 2019 drop = m_object->handle; 2020 } else { 2021 KASSERT(m->valid == 0, 2022 ("vm_page_alloc_init: free page %p is valid", m)); 2023 vm_phys_freecnt_adj(m, -1); 2024 if ((m->flags & PG_ZERO) != 0) 2025 vm_page_zero_count--; 2026 } 2027 return (drop); 2028 } 2029 2030 /* 2031 * vm_page_alloc_freelist: 2032 * 2033 * Allocate a physical page from the specified free page list. 2034 * 2035 * The caller must always specify an allocation class. 2036 * 2037 * allocation classes: 2038 * VM_ALLOC_NORMAL normal process request 2039 * VM_ALLOC_SYSTEM system *really* needs a page 2040 * VM_ALLOC_INTERRUPT interrupt time request 2041 * 2042 * optional allocation flags: 2043 * VM_ALLOC_COUNT(number) the number of additional pages that the caller 2044 * intends to allocate 2045 * VM_ALLOC_WIRED wire the allocated page 2046 * VM_ALLOC_ZERO prefer a zeroed page 2047 * 2048 * This routine may not sleep. 2049 */ 2050 vm_page_t 2051 vm_page_alloc_freelist(int flind, int req) 2052 { 2053 struct vnode *drop; 2054 vm_page_t m; 2055 u_int flags; 2056 int req_class; 2057 2058 req_class = req & VM_ALLOC_CLASS_MASK; 2059 2060 /* 2061 * The page daemon is allowed to dig deeper into the free page list. 2062 */ 2063 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) 2064 req_class = VM_ALLOC_SYSTEM; 2065 2066 /* 2067 * Do not allocate reserved pages unless the req has asked for it. 2068 */ 2069 mtx_lock_flags(&vm_page_queue_free_mtx, MTX_RECURSE); 2070 if (vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_free_reserved || 2071 (req_class == VM_ALLOC_SYSTEM && 2072 vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_interrupt_free_min) || 2073 (req_class == VM_ALLOC_INTERRUPT && 2074 vm_cnt.v_free_count + vm_cnt.v_cache_count > 0)) 2075 m = vm_phys_alloc_freelist_pages(flind, VM_FREEPOOL_DIRECT, 0); 2076 else { 2077 mtx_unlock(&vm_page_queue_free_mtx); 2078 atomic_add_int(&vm_pageout_deficit, 2079 max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1)); 2080 pagedaemon_wakeup(); 2081 return (NULL); 2082 } 2083 if (m == NULL) { 2084 mtx_unlock(&vm_page_queue_free_mtx); 2085 return (NULL); 2086 } 2087 drop = vm_page_alloc_init(m); 2088 mtx_unlock(&vm_page_queue_free_mtx); 2089 2090 /* 2091 * Initialize the page. Only the PG_ZERO flag is inherited. 2092 */ 2093 m->aflags = 0; 2094 flags = 0; 2095 if ((req & VM_ALLOC_ZERO) != 0) 2096 flags = PG_ZERO; 2097 m->flags &= flags; 2098 if ((req & VM_ALLOC_WIRED) != 0) { 2099 /* 2100 * The page lock is not required for wiring a page that does 2101 * not belong to an object. 2102 */ 2103 atomic_add_int(&vm_cnt.v_wire_count, 1); 2104 m->wire_count = 1; 2105 } 2106 /* Unmanaged pages don't use "act_count". */ 2107 m->oflags = VPO_UNMANAGED; 2108 if (drop != NULL) 2109 vdrop(drop); 2110 if (vm_paging_needed()) 2111 pagedaemon_wakeup(); 2112 return (m); 2113 } 2114 2115 /* 2116 * vm_wait: (also see VM_WAIT macro) 2117 * 2118 * Sleep until free pages are available for allocation. 2119 * - Called in various places before memory allocations. 2120 */ 2121 void 2122 vm_wait(void) 2123 { 2124 2125 mtx_lock(&vm_page_queue_free_mtx); 2126 if (curproc == pageproc) { 2127 vm_pageout_pages_needed = 1; 2128 msleep(&vm_pageout_pages_needed, &vm_page_queue_free_mtx, 2129 PDROP | PSWP, "VMWait", 0); 2130 } else { 2131 if (!vm_pages_needed) { 2132 vm_pages_needed = 1; 2133 wakeup(&vm_pages_needed); 2134 } 2135 msleep(&vm_cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM, 2136 "vmwait", 0); 2137 } 2138 } 2139 2140 /* 2141 * vm_waitpfault: (also see VM_WAITPFAULT macro) 2142 * 2143 * Sleep until free pages are available for allocation. 2144 * - Called only in vm_fault so that processes page faulting 2145 * can be easily tracked. 2146 * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing 2147 * processes will be able to grab memory first. Do not change 2148 * this balance without careful testing first. 2149 */ 2150 void 2151 vm_waitpfault(void) 2152 { 2153 2154 mtx_lock(&vm_page_queue_free_mtx); 2155 if (!vm_pages_needed) { 2156 vm_pages_needed = 1; 2157 wakeup(&vm_pages_needed); 2158 } 2159 msleep(&vm_cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER, 2160 "pfault", 0); 2161 } 2162 2163 struct vm_pagequeue * 2164 vm_page_pagequeue(vm_page_t m) 2165 { 2166 2167 return (&vm_phys_domain(m)->vmd_pagequeues[m->queue]); 2168 } 2169 2170 /* 2171 * vm_page_dequeue: 2172 * 2173 * Remove the given page from its current page queue. 2174 * 2175 * The page must be locked. 2176 */ 2177 void 2178 vm_page_dequeue(vm_page_t m) 2179 { 2180 struct vm_pagequeue *pq; 2181 2182 vm_page_assert_locked(m); 2183 KASSERT(m->queue < PQ_COUNT, ("vm_page_dequeue: page %p is not queued", 2184 m)); 2185 pq = vm_page_pagequeue(m); 2186 vm_pagequeue_lock(pq); 2187 m->queue = PQ_NONE; 2188 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 2189 vm_pagequeue_cnt_dec(pq); 2190 vm_pagequeue_unlock(pq); 2191 } 2192 2193 /* 2194 * vm_page_dequeue_locked: 2195 * 2196 * Remove the given page from its current page queue. 2197 * 2198 * The page and page queue must be locked. 2199 */ 2200 void 2201 vm_page_dequeue_locked(vm_page_t m) 2202 { 2203 struct vm_pagequeue *pq; 2204 2205 vm_page_lock_assert(m, MA_OWNED); 2206 pq = vm_page_pagequeue(m); 2207 vm_pagequeue_assert_locked(pq); 2208 m->queue = PQ_NONE; 2209 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 2210 vm_pagequeue_cnt_dec(pq); 2211 } 2212 2213 /* 2214 * vm_page_enqueue: 2215 * 2216 * Add the given page to the specified page queue. 2217 * 2218 * The page must be locked. 2219 */ 2220 static void 2221 vm_page_enqueue(uint8_t queue, vm_page_t m) 2222 { 2223 struct vm_pagequeue *pq; 2224 2225 vm_page_lock_assert(m, MA_OWNED); 2226 KASSERT(queue < PQ_COUNT, 2227 ("vm_page_enqueue: invalid queue %u request for page %p", 2228 queue, m)); 2229 pq = &vm_phys_domain(m)->vmd_pagequeues[queue]; 2230 vm_pagequeue_lock(pq); 2231 m->queue = queue; 2232 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); 2233 vm_pagequeue_cnt_inc(pq); 2234 vm_pagequeue_unlock(pq); 2235 } 2236 2237 /* 2238 * vm_page_requeue: 2239 * 2240 * Move the given page to the tail of its current page queue. 2241 * 2242 * The page must be locked. 2243 */ 2244 void 2245 vm_page_requeue(vm_page_t m) 2246 { 2247 struct vm_pagequeue *pq; 2248 2249 vm_page_lock_assert(m, MA_OWNED); 2250 KASSERT(m->queue != PQ_NONE, 2251 ("vm_page_requeue: page %p is not queued", m)); 2252 pq = vm_page_pagequeue(m); 2253 vm_pagequeue_lock(pq); 2254 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 2255 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); 2256 vm_pagequeue_unlock(pq); 2257 } 2258 2259 /* 2260 * vm_page_requeue_locked: 2261 * 2262 * Move the given page to the tail of its current page queue. 2263 * 2264 * The page queue must be locked. 2265 */ 2266 void 2267 vm_page_requeue_locked(vm_page_t m) 2268 { 2269 struct vm_pagequeue *pq; 2270 2271 KASSERT(m->queue != PQ_NONE, 2272 ("vm_page_requeue_locked: page %p is not queued", m)); 2273 pq = vm_page_pagequeue(m); 2274 vm_pagequeue_assert_locked(pq); 2275 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 2276 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); 2277 } 2278 2279 /* 2280 * vm_page_activate: 2281 * 2282 * Put the specified page on the active list (if appropriate). 2283 * Ensure that act_count is at least ACT_INIT but do not otherwise 2284 * mess with it. 2285 * 2286 * The page must be locked. 2287 */ 2288 void 2289 vm_page_activate(vm_page_t m) 2290 { 2291 int queue; 2292 2293 vm_page_lock_assert(m, MA_OWNED); 2294 if ((queue = m->queue) != PQ_ACTIVE) { 2295 if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) { 2296 if (m->act_count < ACT_INIT) 2297 m->act_count = ACT_INIT; 2298 if (queue != PQ_NONE) 2299 vm_page_dequeue(m); 2300 vm_page_enqueue(PQ_ACTIVE, m); 2301 } else 2302 KASSERT(queue == PQ_NONE, 2303 ("vm_page_activate: wired page %p is queued", m)); 2304 } else { 2305 if (m->act_count < ACT_INIT) 2306 m->act_count = ACT_INIT; 2307 } 2308 } 2309 2310 /* 2311 * vm_page_free_wakeup: 2312 * 2313 * Helper routine for vm_page_free_toq() and vm_page_cache(). This 2314 * routine is called when a page has been added to the cache or free 2315 * queues. 2316 * 2317 * The page queues must be locked. 2318 */ 2319 static inline void 2320 vm_page_free_wakeup(void) 2321 { 2322 2323 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 2324 /* 2325 * if pageout daemon needs pages, then tell it that there are 2326 * some free. 2327 */ 2328 if (vm_pageout_pages_needed && 2329 vm_cnt.v_cache_count + vm_cnt.v_free_count >= vm_cnt.v_pageout_free_min) { 2330 wakeup(&vm_pageout_pages_needed); 2331 vm_pageout_pages_needed = 0; 2332 } 2333 /* 2334 * wakeup processes that are waiting on memory if we hit a 2335 * high water mark. And wakeup scheduler process if we have 2336 * lots of memory. this process will swapin processes. 2337 */ 2338 if (vm_pages_needed && !vm_page_count_min()) { 2339 vm_pages_needed = 0; 2340 wakeup(&vm_cnt.v_free_count); 2341 } 2342 } 2343 2344 /* 2345 * Turn a cached page into a free page, by changing its attributes. 2346 * Keep the statistics up-to-date. 2347 * 2348 * The free page queue must be locked. 2349 */ 2350 static void 2351 vm_page_cache_turn_free(vm_page_t m) 2352 { 2353 2354 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 2355 2356 m->object = NULL; 2357 m->valid = 0; 2358 KASSERT((m->flags & PG_CACHED) != 0, 2359 ("vm_page_cache_turn_free: page %p is not cached", m)); 2360 m->flags &= ~PG_CACHED; 2361 vm_cnt.v_cache_count--; 2362 vm_phys_freecnt_adj(m, 1); 2363 } 2364 2365 /* 2366 * vm_page_free_toq: 2367 * 2368 * Returns the given page to the free list, 2369 * disassociating it with any VM object. 2370 * 2371 * The object must be locked. The page must be locked if it is managed. 2372 */ 2373 void 2374 vm_page_free_toq(vm_page_t m) 2375 { 2376 2377 if ((m->oflags & VPO_UNMANAGED) == 0) { 2378 vm_page_lock_assert(m, MA_OWNED); 2379 KASSERT(!pmap_page_is_mapped(m), 2380 ("vm_page_free_toq: freeing mapped page %p", m)); 2381 } else 2382 KASSERT(m->queue == PQ_NONE, 2383 ("vm_page_free_toq: unmanaged page %p is queued", m)); 2384 PCPU_INC(cnt.v_tfree); 2385 2386 if (vm_page_sbusied(m)) 2387 panic("vm_page_free: freeing busy page %p", m); 2388 2389 /* 2390 * Unqueue, then remove page. Note that we cannot destroy 2391 * the page here because we do not want to call the pager's 2392 * callback routine until after we've put the page on the 2393 * appropriate free queue. 2394 */ 2395 vm_page_remque(m); 2396 vm_page_remove(m); 2397 2398 /* 2399 * If fictitious remove object association and 2400 * return, otherwise delay object association removal. 2401 */ 2402 if ((m->flags & PG_FICTITIOUS) != 0) { 2403 return; 2404 } 2405 2406 m->valid = 0; 2407 vm_page_undirty(m); 2408 2409 if (m->wire_count != 0) 2410 panic("vm_page_free: freeing wired page %p", m); 2411 if (m->hold_count != 0) { 2412 m->flags &= ~PG_ZERO; 2413 KASSERT((m->flags & PG_UNHOLDFREE) == 0, 2414 ("vm_page_free: freeing PG_UNHOLDFREE page %p", m)); 2415 m->flags |= PG_UNHOLDFREE; 2416 } else { 2417 /* 2418 * Restore the default memory attribute to the page. 2419 */ 2420 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) 2421 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); 2422 2423 /* 2424 * Insert the page into the physical memory allocator's 2425 * cache/free page queues. 2426 */ 2427 mtx_lock(&vm_page_queue_free_mtx); 2428 vm_phys_freecnt_adj(m, 1); 2429 #if VM_NRESERVLEVEL > 0 2430 if (!vm_reserv_free_page(m)) 2431 #else 2432 if (TRUE) 2433 #endif 2434 vm_phys_free_pages(m, 0); 2435 if ((m->flags & PG_ZERO) != 0) 2436 ++vm_page_zero_count; 2437 else 2438 vm_page_zero_idle_wakeup(); 2439 vm_page_free_wakeup(); 2440 mtx_unlock(&vm_page_queue_free_mtx); 2441 } 2442 } 2443 2444 /* 2445 * vm_page_wire: 2446 * 2447 * Mark this page as wired down by yet 2448 * another map, removing it from paging queues 2449 * as necessary. 2450 * 2451 * If the page is fictitious, then its wire count must remain one. 2452 * 2453 * The page must be locked. 2454 */ 2455 void 2456 vm_page_wire(vm_page_t m) 2457 { 2458 2459 /* 2460 * Only bump the wire statistics if the page is not already wired, 2461 * and only unqueue the page if it is on some queue (if it is unmanaged 2462 * it is already off the queues). 2463 */ 2464 vm_page_lock_assert(m, MA_OWNED); 2465 if ((m->flags & PG_FICTITIOUS) != 0) { 2466 KASSERT(m->wire_count == 1, 2467 ("vm_page_wire: fictitious page %p's wire count isn't one", 2468 m)); 2469 return; 2470 } 2471 if (m->wire_count == 0) { 2472 KASSERT((m->oflags & VPO_UNMANAGED) == 0 || 2473 m->queue == PQ_NONE, 2474 ("vm_page_wire: unmanaged page %p is queued", m)); 2475 vm_page_remque(m); 2476 atomic_add_int(&vm_cnt.v_wire_count, 1); 2477 } 2478 m->wire_count++; 2479 KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m)); 2480 } 2481 2482 /* 2483 * vm_page_unwire: 2484 * 2485 * Release one wiring of the specified page, potentially enabling it to be 2486 * paged again. If paging is enabled, then the value of the parameter 2487 * "queue" determines the queue to which the page is added. 2488 * 2489 * However, unless the page belongs to an object, it is not enqueued because 2490 * it cannot be paged out. 2491 * 2492 * If a page is fictitious, then its wire count must always be one. 2493 * 2494 * A managed page must be locked. 2495 */ 2496 void 2497 vm_page_unwire(vm_page_t m, uint8_t queue) 2498 { 2499 2500 KASSERT(queue < PQ_COUNT, 2501 ("vm_page_unwire: invalid queue %u request for page %p", 2502 queue, m)); 2503 if ((m->oflags & VPO_UNMANAGED) == 0) 2504 vm_page_lock_assert(m, MA_OWNED); 2505 if ((m->flags & PG_FICTITIOUS) != 0) { 2506 KASSERT(m->wire_count == 1, 2507 ("vm_page_unwire: fictitious page %p's wire count isn't one", m)); 2508 return; 2509 } 2510 if (m->wire_count > 0) { 2511 m->wire_count--; 2512 if (m->wire_count == 0) { 2513 atomic_subtract_int(&vm_cnt.v_wire_count, 1); 2514 if ((m->oflags & VPO_UNMANAGED) != 0 || 2515 m->object == NULL) 2516 return; 2517 if (queue == PQ_INACTIVE) 2518 m->flags &= ~PG_WINATCFLS; 2519 vm_page_enqueue(queue, m); 2520 } 2521 } else 2522 panic("vm_page_unwire: page %p's wire count is zero", m); 2523 } 2524 2525 /* 2526 * Move the specified page to the inactive queue. 2527 * 2528 * Many pages placed on the inactive queue should actually go 2529 * into the cache, but it is difficult to figure out which. What 2530 * we do instead, if the inactive target is well met, is to put 2531 * clean pages at the head of the inactive queue instead of the tail. 2532 * This will cause them to be moved to the cache more quickly and 2533 * if not actively re-referenced, reclaimed more quickly. If we just 2534 * stick these pages at the end of the inactive queue, heavy filesystem 2535 * meta-data accesses can cause an unnecessary paging load on memory bound 2536 * processes. This optimization causes one-time-use metadata to be 2537 * reused more quickly. 2538 * 2539 * Normally athead is 0 resulting in LRU operation. athead is set 2540 * to 1 if we want this page to be 'as if it were placed in the cache', 2541 * except without unmapping it from the process address space. 2542 * 2543 * The page must be locked. 2544 */ 2545 static inline void 2546 _vm_page_deactivate(vm_page_t m, int athead) 2547 { 2548 struct vm_pagequeue *pq; 2549 int queue; 2550 2551 vm_page_lock_assert(m, MA_OWNED); 2552 2553 /* 2554 * Ignore if already inactive. 2555 */ 2556 if ((queue = m->queue) == PQ_INACTIVE) 2557 return; 2558 if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) { 2559 if (queue != PQ_NONE) 2560 vm_page_dequeue(m); 2561 m->flags &= ~PG_WINATCFLS; 2562 pq = &vm_phys_domain(m)->vmd_pagequeues[PQ_INACTIVE]; 2563 vm_pagequeue_lock(pq); 2564 m->queue = PQ_INACTIVE; 2565 if (athead) 2566 TAILQ_INSERT_HEAD(&pq->pq_pl, m, plinks.q); 2567 else 2568 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); 2569 vm_pagequeue_cnt_inc(pq); 2570 vm_pagequeue_unlock(pq); 2571 } 2572 } 2573 2574 /* 2575 * Move the specified page to the inactive queue. 2576 * 2577 * The page must be locked. 2578 */ 2579 void 2580 vm_page_deactivate(vm_page_t m) 2581 { 2582 2583 _vm_page_deactivate(m, 0); 2584 } 2585 2586 /* 2587 * vm_page_try_to_cache: 2588 * 2589 * Returns 0 on failure, 1 on success 2590 */ 2591 int 2592 vm_page_try_to_cache(vm_page_t m) 2593 { 2594 2595 vm_page_lock_assert(m, MA_OWNED); 2596 VM_OBJECT_ASSERT_WLOCKED(m->object); 2597 if (m->dirty || m->hold_count || m->wire_count || 2598 (m->oflags & VPO_UNMANAGED) != 0 || vm_page_busied(m)) 2599 return (0); 2600 pmap_remove_all(m); 2601 if (m->dirty) 2602 return (0); 2603 vm_page_cache(m); 2604 return (1); 2605 } 2606 2607 /* 2608 * vm_page_try_to_free() 2609 * 2610 * Attempt to free the page. If we cannot free it, we do nothing. 2611 * 1 is returned on success, 0 on failure. 2612 */ 2613 int 2614 vm_page_try_to_free(vm_page_t m) 2615 { 2616 2617 vm_page_lock_assert(m, MA_OWNED); 2618 if (m->object != NULL) 2619 VM_OBJECT_ASSERT_WLOCKED(m->object); 2620 if (m->dirty || m->hold_count || m->wire_count || 2621 (m->oflags & VPO_UNMANAGED) != 0 || vm_page_busied(m)) 2622 return (0); 2623 pmap_remove_all(m); 2624 if (m->dirty) 2625 return (0); 2626 vm_page_free(m); 2627 return (1); 2628 } 2629 2630 /* 2631 * vm_page_cache 2632 * 2633 * Put the specified page onto the page cache queue (if appropriate). 2634 * 2635 * The object and page must be locked. 2636 */ 2637 void 2638 vm_page_cache(vm_page_t m) 2639 { 2640 vm_object_t object; 2641 boolean_t cache_was_empty; 2642 2643 vm_page_lock_assert(m, MA_OWNED); 2644 object = m->object; 2645 VM_OBJECT_ASSERT_WLOCKED(object); 2646 if (vm_page_busied(m) || (m->oflags & VPO_UNMANAGED) || 2647 m->hold_count || m->wire_count) 2648 panic("vm_page_cache: attempting to cache busy page"); 2649 KASSERT(!pmap_page_is_mapped(m), 2650 ("vm_page_cache: page %p is mapped", m)); 2651 KASSERT(m->dirty == 0, ("vm_page_cache: page %p is dirty", m)); 2652 if (m->valid == 0 || object->type == OBJT_DEFAULT || 2653 (object->type == OBJT_SWAP && 2654 !vm_pager_has_page(object, m->pindex, NULL, NULL))) { 2655 /* 2656 * Hypothesis: A cache-eligible page belonging to a 2657 * default object or swap object but without a backing 2658 * store must be zero filled. 2659 */ 2660 vm_page_free(m); 2661 return; 2662 } 2663 KASSERT((m->flags & PG_CACHED) == 0, 2664 ("vm_page_cache: page %p is already cached", m)); 2665 2666 /* 2667 * Remove the page from the paging queues. 2668 */ 2669 vm_page_remque(m); 2670 2671 /* 2672 * Remove the page from the object's collection of resident 2673 * pages. 2674 */ 2675 vm_radix_remove(&object->rtree, m->pindex); 2676 TAILQ_REMOVE(&object->memq, m, listq); 2677 object->resident_page_count--; 2678 2679 /* 2680 * Restore the default memory attribute to the page. 2681 */ 2682 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) 2683 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); 2684 2685 /* 2686 * Insert the page into the object's collection of cached pages 2687 * and the physical memory allocator's cache/free page queues. 2688 */ 2689 m->flags &= ~PG_ZERO; 2690 mtx_lock(&vm_page_queue_free_mtx); 2691 cache_was_empty = vm_radix_is_empty(&object->cache); 2692 if (vm_radix_insert(&object->cache, m)) { 2693 mtx_unlock(&vm_page_queue_free_mtx); 2694 if (object->resident_page_count == 0) 2695 vdrop(object->handle); 2696 m->object = NULL; 2697 vm_page_free(m); 2698 return; 2699 } 2700 2701 /* 2702 * The above call to vm_radix_insert() could reclaim the one pre- 2703 * existing cached page from this object, resulting in a call to 2704 * vdrop(). 2705 */ 2706 if (!cache_was_empty) 2707 cache_was_empty = vm_radix_is_singleton(&object->cache); 2708 2709 m->flags |= PG_CACHED; 2710 vm_cnt.v_cache_count++; 2711 PCPU_INC(cnt.v_tcached); 2712 #if VM_NRESERVLEVEL > 0 2713 if (!vm_reserv_free_page(m)) { 2714 #else 2715 if (TRUE) { 2716 #endif 2717 vm_phys_set_pool(VM_FREEPOOL_CACHE, m, 0); 2718 vm_phys_free_pages(m, 0); 2719 } 2720 vm_page_free_wakeup(); 2721 mtx_unlock(&vm_page_queue_free_mtx); 2722 2723 /* 2724 * Increment the vnode's hold count if this is the object's only 2725 * cached page. Decrement the vnode's hold count if this was 2726 * the object's only resident page. 2727 */ 2728 if (object->type == OBJT_VNODE) { 2729 if (cache_was_empty && object->resident_page_count != 0) 2730 vhold(object->handle); 2731 else if (!cache_was_empty && object->resident_page_count == 0) 2732 vdrop(object->handle); 2733 } 2734 } 2735 2736 /* 2737 * vm_page_advise 2738 * 2739 * Cache, deactivate, or do nothing as appropriate. This routine 2740 * is used by madvise(). 2741 * 2742 * Generally speaking we want to move the page into the cache so 2743 * it gets reused quickly. However, this can result in a silly syndrome 2744 * due to the page recycling too quickly. Small objects will not be 2745 * fully cached. On the other hand, if we move the page to the inactive 2746 * queue we wind up with a problem whereby very large objects 2747 * unnecessarily blow away our inactive and cache queues. 2748 * 2749 * The solution is to move the pages based on a fixed weighting. We 2750 * either leave them alone, deactivate them, or move them to the cache, 2751 * where moving them to the cache has the highest weighting. 2752 * By forcing some pages into other queues we eventually force the 2753 * system to balance the queues, potentially recovering other unrelated 2754 * space from active. The idea is to not force this to happen too 2755 * often. 2756 * 2757 * The object and page must be locked. 2758 */ 2759 void 2760 vm_page_advise(vm_page_t m, int advice) 2761 { 2762 int dnw, head; 2763 2764 vm_page_assert_locked(m); 2765 VM_OBJECT_ASSERT_WLOCKED(m->object); 2766 if (advice == MADV_FREE) { 2767 /* 2768 * Mark the page clean. This will allow the page to be freed 2769 * up by the system. However, such pages are often reused 2770 * quickly by malloc() so we do not do anything that would 2771 * cause a page fault if we can help it. 2772 * 2773 * Specifically, we do not try to actually free the page now 2774 * nor do we try to put it in the cache (which would cause a 2775 * page fault on reuse). 2776 * 2777 * But we do make the page is freeable as we can without 2778 * actually taking the step of unmapping it. 2779 */ 2780 m->dirty = 0; 2781 m->act_count = 0; 2782 } else if (advice != MADV_DONTNEED) 2783 return; 2784 dnw = PCPU_GET(dnweight); 2785 PCPU_INC(dnweight); 2786 2787 /* 2788 * Occasionally leave the page alone. 2789 */ 2790 if ((dnw & 0x01F0) == 0 || m->queue == PQ_INACTIVE) { 2791 if (m->act_count >= ACT_INIT) 2792 --m->act_count; 2793 return; 2794 } 2795 2796 /* 2797 * Clear any references to the page. Otherwise, the page daemon will 2798 * immediately reactivate the page. 2799 */ 2800 vm_page_aflag_clear(m, PGA_REFERENCED); 2801 2802 if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m)) 2803 vm_page_dirty(m); 2804 2805 if (m->dirty || (dnw & 0x0070) == 0) { 2806 /* 2807 * Deactivate the page 3 times out of 32. 2808 */ 2809 head = 0; 2810 } else { 2811 /* 2812 * Cache the page 28 times out of every 32. Note that 2813 * the page is deactivated instead of cached, but placed 2814 * at the head of the queue instead of the tail. 2815 */ 2816 head = 1; 2817 } 2818 _vm_page_deactivate(m, head); 2819 } 2820 2821 /* 2822 * Grab a page, waiting until we are waken up due to the page 2823 * changing state. We keep on waiting, if the page continues 2824 * to be in the object. If the page doesn't exist, first allocate it 2825 * and then conditionally zero it. 2826 * 2827 * This routine may sleep. 2828 * 2829 * The object must be locked on entry. The lock will, however, be released 2830 * and reacquired if the routine sleeps. 2831 */ 2832 vm_page_t 2833 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) 2834 { 2835 vm_page_t m; 2836 int sleep; 2837 2838 VM_OBJECT_ASSERT_WLOCKED(object); 2839 KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || 2840 (allocflags & VM_ALLOC_IGN_SBUSY) != 0, 2841 ("vm_page_grab: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch")); 2842 retrylookup: 2843 if ((m = vm_page_lookup(object, pindex)) != NULL) { 2844 sleep = (allocflags & VM_ALLOC_IGN_SBUSY) != 0 ? 2845 vm_page_xbusied(m) : vm_page_busied(m); 2846 if (sleep) { 2847 if ((allocflags & VM_ALLOC_NOWAIT) != 0) 2848 return (NULL); 2849 /* 2850 * Reference the page before unlocking and 2851 * sleeping so that the page daemon is less 2852 * likely to reclaim it. 2853 */ 2854 vm_page_aflag_set(m, PGA_REFERENCED); 2855 vm_page_lock(m); 2856 VM_OBJECT_WUNLOCK(object); 2857 vm_page_busy_sleep(m, "pgrbwt"); 2858 VM_OBJECT_WLOCK(object); 2859 goto retrylookup; 2860 } else { 2861 if ((allocflags & VM_ALLOC_WIRED) != 0) { 2862 vm_page_lock(m); 2863 vm_page_wire(m); 2864 vm_page_unlock(m); 2865 } 2866 if ((allocflags & 2867 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0) 2868 vm_page_xbusy(m); 2869 if ((allocflags & VM_ALLOC_SBUSY) != 0) 2870 vm_page_sbusy(m); 2871 return (m); 2872 } 2873 } 2874 m = vm_page_alloc(object, pindex, allocflags); 2875 if (m == NULL) { 2876 if ((allocflags & VM_ALLOC_NOWAIT) != 0) 2877 return (NULL); 2878 VM_OBJECT_WUNLOCK(object); 2879 VM_WAIT; 2880 VM_OBJECT_WLOCK(object); 2881 goto retrylookup; 2882 } else if (m->valid != 0) 2883 return (m); 2884 if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0) 2885 pmap_zero_page(m); 2886 return (m); 2887 } 2888 2889 /* 2890 * Mapping function for valid or dirty bits in a page. 2891 * 2892 * Inputs are required to range within a page. 2893 */ 2894 vm_page_bits_t 2895 vm_page_bits(int base, int size) 2896 { 2897 int first_bit; 2898 int last_bit; 2899 2900 KASSERT( 2901 base + size <= PAGE_SIZE, 2902 ("vm_page_bits: illegal base/size %d/%d", base, size) 2903 ); 2904 2905 if (size == 0) /* handle degenerate case */ 2906 return (0); 2907 2908 first_bit = base >> DEV_BSHIFT; 2909 last_bit = (base + size - 1) >> DEV_BSHIFT; 2910 2911 return (((vm_page_bits_t)2 << last_bit) - 2912 ((vm_page_bits_t)1 << first_bit)); 2913 } 2914 2915 /* 2916 * vm_page_set_valid_range: 2917 * 2918 * Sets portions of a page valid. The arguments are expected 2919 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 2920 * of any partial chunks touched by the range. The invalid portion of 2921 * such chunks will be zeroed. 2922 * 2923 * (base + size) must be less then or equal to PAGE_SIZE. 2924 */ 2925 void 2926 vm_page_set_valid_range(vm_page_t m, int base, int size) 2927 { 2928 int endoff, frag; 2929 2930 VM_OBJECT_ASSERT_WLOCKED(m->object); 2931 if (size == 0) /* handle degenerate case */ 2932 return; 2933 2934 /* 2935 * If the base is not DEV_BSIZE aligned and the valid 2936 * bit is clear, we have to zero out a portion of the 2937 * first block. 2938 */ 2939 if ((frag = base & ~(DEV_BSIZE - 1)) != base && 2940 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0) 2941 pmap_zero_page_area(m, frag, base - frag); 2942 2943 /* 2944 * If the ending offset is not DEV_BSIZE aligned and the 2945 * valid bit is clear, we have to zero out a portion of 2946 * the last block. 2947 */ 2948 endoff = base + size; 2949 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff && 2950 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0) 2951 pmap_zero_page_area(m, endoff, 2952 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 2953 2954 /* 2955 * Assert that no previously invalid block that is now being validated 2956 * is already dirty. 2957 */ 2958 KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0, 2959 ("vm_page_set_valid_range: page %p is dirty", m)); 2960 2961 /* 2962 * Set valid bits inclusive of any overlap. 2963 */ 2964 m->valid |= vm_page_bits(base, size); 2965 } 2966 2967 /* 2968 * Clear the given bits from the specified page's dirty field. 2969 */ 2970 static __inline void 2971 vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits) 2972 { 2973 uintptr_t addr; 2974 #if PAGE_SIZE < 16384 2975 int shift; 2976 #endif 2977 2978 /* 2979 * If the object is locked and the page is neither exclusive busy nor 2980 * write mapped, then the page's dirty field cannot possibly be 2981 * set by a concurrent pmap operation. 2982 */ 2983 VM_OBJECT_ASSERT_WLOCKED(m->object); 2984 if (!vm_page_xbusied(m) && !pmap_page_is_write_mapped(m)) 2985 m->dirty &= ~pagebits; 2986 else { 2987 /* 2988 * The pmap layer can call vm_page_dirty() without 2989 * holding a distinguished lock. The combination of 2990 * the object's lock and an atomic operation suffice 2991 * to guarantee consistency of the page dirty field. 2992 * 2993 * For PAGE_SIZE == 32768 case, compiler already 2994 * properly aligns the dirty field, so no forcible 2995 * alignment is needed. Only require existence of 2996 * atomic_clear_64 when page size is 32768. 2997 */ 2998 addr = (uintptr_t)&m->dirty; 2999 #if PAGE_SIZE == 32768 3000 atomic_clear_64((uint64_t *)addr, pagebits); 3001 #elif PAGE_SIZE == 16384 3002 atomic_clear_32((uint32_t *)addr, pagebits); 3003 #else /* PAGE_SIZE <= 8192 */ 3004 /* 3005 * Use a trick to perform a 32-bit atomic on the 3006 * containing aligned word, to not depend on the existence 3007 * of atomic_clear_{8, 16}. 3008 */ 3009 shift = addr & (sizeof(uint32_t) - 1); 3010 #if BYTE_ORDER == BIG_ENDIAN 3011 shift = (sizeof(uint32_t) - sizeof(m->dirty) - shift) * NBBY; 3012 #else 3013 shift *= NBBY; 3014 #endif 3015 addr &= ~(sizeof(uint32_t) - 1); 3016 atomic_clear_32((uint32_t *)addr, pagebits << shift); 3017 #endif /* PAGE_SIZE */ 3018 } 3019 } 3020 3021 /* 3022 * vm_page_set_validclean: 3023 * 3024 * Sets portions of a page valid and clean. The arguments are expected 3025 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 3026 * of any partial chunks touched by the range. The invalid portion of 3027 * such chunks will be zero'd. 3028 * 3029 * (base + size) must be less then or equal to PAGE_SIZE. 3030 */ 3031 void 3032 vm_page_set_validclean(vm_page_t m, int base, int size) 3033 { 3034 vm_page_bits_t oldvalid, pagebits; 3035 int endoff, frag; 3036 3037 VM_OBJECT_ASSERT_WLOCKED(m->object); 3038 if (size == 0) /* handle degenerate case */ 3039 return; 3040 3041 /* 3042 * If the base is not DEV_BSIZE aligned and the valid 3043 * bit is clear, we have to zero out a portion of the 3044 * first block. 3045 */ 3046 if ((frag = base & ~(DEV_BSIZE - 1)) != base && 3047 (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0) 3048 pmap_zero_page_area(m, frag, base - frag); 3049 3050 /* 3051 * If the ending offset is not DEV_BSIZE aligned and the 3052 * valid bit is clear, we have to zero out a portion of 3053 * the last block. 3054 */ 3055 endoff = base + size; 3056 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff && 3057 (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0) 3058 pmap_zero_page_area(m, endoff, 3059 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 3060 3061 /* 3062 * Set valid, clear dirty bits. If validating the entire 3063 * page we can safely clear the pmap modify bit. We also 3064 * use this opportunity to clear the VPO_NOSYNC flag. If a process 3065 * takes a write fault on a MAP_NOSYNC memory area the flag will 3066 * be set again. 3067 * 3068 * We set valid bits inclusive of any overlap, but we can only 3069 * clear dirty bits for DEV_BSIZE chunks that are fully within 3070 * the range. 3071 */ 3072 oldvalid = m->valid; 3073 pagebits = vm_page_bits(base, size); 3074 m->valid |= pagebits; 3075 #if 0 /* NOT YET */ 3076 if ((frag = base & (DEV_BSIZE - 1)) != 0) { 3077 frag = DEV_BSIZE - frag; 3078 base += frag; 3079 size -= frag; 3080 if (size < 0) 3081 size = 0; 3082 } 3083 pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1)); 3084 #endif 3085 if (base == 0 && size == PAGE_SIZE) { 3086 /* 3087 * The page can only be modified within the pmap if it is 3088 * mapped, and it can only be mapped if it was previously 3089 * fully valid. 3090 */ 3091 if (oldvalid == VM_PAGE_BITS_ALL) 3092 /* 3093 * Perform the pmap_clear_modify() first. Otherwise, 3094 * a concurrent pmap operation, such as 3095 * pmap_protect(), could clear a modification in the 3096 * pmap and set the dirty field on the page before 3097 * pmap_clear_modify() had begun and after the dirty 3098 * field was cleared here. 3099 */ 3100 pmap_clear_modify(m); 3101 m->dirty = 0; 3102 m->oflags &= ~VPO_NOSYNC; 3103 } else if (oldvalid != VM_PAGE_BITS_ALL) 3104 m->dirty &= ~pagebits; 3105 else 3106 vm_page_clear_dirty_mask(m, pagebits); 3107 } 3108 3109 void 3110 vm_page_clear_dirty(vm_page_t m, int base, int size) 3111 { 3112 3113 vm_page_clear_dirty_mask(m, vm_page_bits(base, size)); 3114 } 3115 3116 /* 3117 * vm_page_set_invalid: 3118 * 3119 * Invalidates DEV_BSIZE'd chunks within a page. Both the 3120 * valid and dirty bits for the effected areas are cleared. 3121 */ 3122 void 3123 vm_page_set_invalid(vm_page_t m, int base, int size) 3124 { 3125 vm_page_bits_t bits; 3126 vm_object_t object; 3127 3128 object = m->object; 3129 VM_OBJECT_ASSERT_WLOCKED(object); 3130 if (object->type == OBJT_VNODE && base == 0 && IDX_TO_OFF(m->pindex) + 3131 size >= object->un_pager.vnp.vnp_size) 3132 bits = VM_PAGE_BITS_ALL; 3133 else 3134 bits = vm_page_bits(base, size); 3135 if (m->valid == VM_PAGE_BITS_ALL && bits != 0) 3136 pmap_remove_all(m); 3137 KASSERT((bits == 0 && m->valid == VM_PAGE_BITS_ALL) || 3138 !pmap_page_is_mapped(m), 3139 ("vm_page_set_invalid: page %p is mapped", m)); 3140 m->valid &= ~bits; 3141 m->dirty &= ~bits; 3142 } 3143 3144 /* 3145 * vm_page_zero_invalid() 3146 * 3147 * The kernel assumes that the invalid portions of a page contain 3148 * garbage, but such pages can be mapped into memory by user code. 3149 * When this occurs, we must zero out the non-valid portions of the 3150 * page so user code sees what it expects. 3151 * 3152 * Pages are most often semi-valid when the end of a file is mapped 3153 * into memory and the file's size is not page aligned. 3154 */ 3155 void 3156 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) 3157 { 3158 int b; 3159 int i; 3160 3161 VM_OBJECT_ASSERT_WLOCKED(m->object); 3162 /* 3163 * Scan the valid bits looking for invalid sections that 3164 * must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the 3165 * valid bit may be set ) have already been zerod by 3166 * vm_page_set_validclean(). 3167 */ 3168 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { 3169 if (i == (PAGE_SIZE / DEV_BSIZE) || 3170 (m->valid & ((vm_page_bits_t)1 << i))) { 3171 if (i > b) { 3172 pmap_zero_page_area(m, 3173 b << DEV_BSHIFT, (i - b) << DEV_BSHIFT); 3174 } 3175 b = i + 1; 3176 } 3177 } 3178 3179 /* 3180 * setvalid is TRUE when we can safely set the zero'd areas 3181 * as being valid. We can do this if there are no cache consistancy 3182 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. 3183 */ 3184 if (setvalid) 3185 m->valid = VM_PAGE_BITS_ALL; 3186 } 3187 3188 /* 3189 * vm_page_is_valid: 3190 * 3191 * Is (partial) page valid? Note that the case where size == 0 3192 * will return FALSE in the degenerate case where the page is 3193 * entirely invalid, and TRUE otherwise. 3194 */ 3195 int 3196 vm_page_is_valid(vm_page_t m, int base, int size) 3197 { 3198 vm_page_bits_t bits; 3199 3200 VM_OBJECT_ASSERT_LOCKED(m->object); 3201 bits = vm_page_bits(base, size); 3202 return (m->valid != 0 && (m->valid & bits) == bits); 3203 } 3204 3205 /* 3206 * vm_page_ps_is_valid: 3207 * 3208 * Returns TRUE if the entire (super)page is valid and FALSE otherwise. 3209 */ 3210 boolean_t 3211 vm_page_ps_is_valid(vm_page_t m) 3212 { 3213 int i, npages; 3214 3215 VM_OBJECT_ASSERT_LOCKED(m->object); 3216 npages = atop(pagesizes[m->psind]); 3217 3218 /* 3219 * The physically contiguous pages that make up a superpage, i.e., a 3220 * page with a page size index ("psind") greater than zero, will 3221 * occupy adjacent entries in vm_page_array[]. 3222 */ 3223 for (i = 0; i < npages; i++) { 3224 if (m[i].valid != VM_PAGE_BITS_ALL) 3225 return (FALSE); 3226 } 3227 return (TRUE); 3228 } 3229 3230 /* 3231 * Set the page's dirty bits if the page is modified. 3232 */ 3233 void 3234 vm_page_test_dirty(vm_page_t m) 3235 { 3236 3237 VM_OBJECT_ASSERT_WLOCKED(m->object); 3238 if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m)) 3239 vm_page_dirty(m); 3240 } 3241 3242 void 3243 vm_page_lock_KBI(vm_page_t m, const char *file, int line) 3244 { 3245 3246 mtx_lock_flags_(vm_page_lockptr(m), 0, file, line); 3247 } 3248 3249 void 3250 vm_page_unlock_KBI(vm_page_t m, const char *file, int line) 3251 { 3252 3253 mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line); 3254 } 3255 3256 int 3257 vm_page_trylock_KBI(vm_page_t m, const char *file, int line) 3258 { 3259 3260 return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line)); 3261 } 3262 3263 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 3264 void 3265 vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line) 3266 { 3267 3268 vm_page_lock_assert_KBI(m, MA_OWNED, file, line); 3269 } 3270 3271 void 3272 vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line) 3273 { 3274 3275 mtx_assert_(vm_page_lockptr(m), a, file, line); 3276 } 3277 #endif 3278 3279 #ifdef INVARIANTS 3280 void 3281 vm_page_object_lock_assert(vm_page_t m) 3282 { 3283 3284 /* 3285 * Certain of the page's fields may only be modified by the 3286 * holder of the containing object's lock or the exclusive busy. 3287 * holder. Unfortunately, the holder of the write busy is 3288 * not recorded, and thus cannot be checked here. 3289 */ 3290 if (m->object != NULL && !vm_page_xbusied(m)) 3291 VM_OBJECT_ASSERT_WLOCKED(m->object); 3292 } 3293 3294 void 3295 vm_page_assert_pga_writeable(vm_page_t m, uint8_t bits) 3296 { 3297 3298 if ((bits & PGA_WRITEABLE) == 0) 3299 return; 3300 3301 /* 3302 * The PGA_WRITEABLE flag can only be set if the page is 3303 * managed, is exclusively busied or the object is locked. 3304 * Currently, this flag is only set by pmap_enter(). 3305 */ 3306 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3307 ("PGA_WRITEABLE on unmanaged page")); 3308 if (!vm_page_xbusied(m)) 3309 VM_OBJECT_ASSERT_LOCKED(m->object); 3310 } 3311 #endif 3312 3313 #include "opt_ddb.h" 3314 #ifdef DDB 3315 #include <sys/kernel.h> 3316 3317 #include <ddb/ddb.h> 3318 3319 DB_SHOW_COMMAND(page, vm_page_print_page_info) 3320 { 3321 db_printf("vm_cnt.v_free_count: %d\n", vm_cnt.v_free_count); 3322 db_printf("vm_cnt.v_cache_count: %d\n", vm_cnt.v_cache_count); 3323 db_printf("vm_cnt.v_inactive_count: %d\n", vm_cnt.v_inactive_count); 3324 db_printf("vm_cnt.v_active_count: %d\n", vm_cnt.v_active_count); 3325 db_printf("vm_cnt.v_wire_count: %d\n", vm_cnt.v_wire_count); 3326 db_printf("vm_cnt.v_free_reserved: %d\n", vm_cnt.v_free_reserved); 3327 db_printf("vm_cnt.v_free_min: %d\n", vm_cnt.v_free_min); 3328 db_printf("vm_cnt.v_free_target: %d\n", vm_cnt.v_free_target); 3329 db_printf("vm_cnt.v_cache_min: %d\n", vm_cnt.v_cache_min); 3330 db_printf("vm_cnt.v_inactive_target: %d\n", vm_cnt.v_inactive_target); 3331 } 3332 3333 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) 3334 { 3335 int dom; 3336 3337 db_printf("pq_free %d pq_cache %d\n", 3338 vm_cnt.v_free_count, vm_cnt.v_cache_count); 3339 for (dom = 0; dom < vm_ndomains; dom++) { 3340 db_printf( 3341 "dom %d page_cnt %d free %d pq_act %d pq_inact %d pass %d\n", 3342 dom, 3343 vm_dom[dom].vmd_page_count, 3344 vm_dom[dom].vmd_free_count, 3345 vm_dom[dom].vmd_pagequeues[PQ_ACTIVE].pq_cnt, 3346 vm_dom[dom].vmd_pagequeues[PQ_INACTIVE].pq_cnt, 3347 vm_dom[dom].vmd_pass); 3348 } 3349 } 3350 3351 DB_SHOW_COMMAND(pginfo, vm_page_print_pginfo) 3352 { 3353 vm_page_t m; 3354 boolean_t phys; 3355 3356 if (!have_addr) { 3357 db_printf("show pginfo addr\n"); 3358 return; 3359 } 3360 3361 phys = strchr(modif, 'p') != NULL; 3362 if (phys) 3363 m = PHYS_TO_VM_PAGE(addr); 3364 else 3365 m = (vm_page_t)addr; 3366 db_printf( 3367 "page %p obj %p pidx 0x%jx phys 0x%jx q %d hold %d wire %d\n" 3368 " af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n", 3369 m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr, 3370 m->queue, m->hold_count, m->wire_count, m->aflags, m->oflags, 3371 m->flags, m->act_count, m->busy_lock, m->valid, m->dirty); 3372 } 3373 #endif /* DDB */ 3374