1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 * 68 * $Id: vm_pageout.c,v 1.125 1998/08/24 08:39:38 dfr Exp $ 69 */ 70 71 /* 72 * The proverbial page-out daemon. 73 */ 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/kernel.h> 78 #include <sys/proc.h> 79 #include <sys/resourcevar.h> 80 #include <sys/signalvar.h> 81 #include <sys/vnode.h> 82 #include <sys/vmmeter.h> 83 #include <sys/sysctl.h> 84 85 #include <vm/vm.h> 86 #include <vm/vm_param.h> 87 #include <vm/vm_prot.h> 88 #include <sys/lock.h> 89 #include <vm/vm_object.h> 90 #include <vm/vm_page.h> 91 #include <vm/vm_map.h> 92 #include <vm/vm_pageout.h> 93 #include <vm/vm_pager.h> 94 #include <vm/swap_pager.h> 95 #include <vm/vm_extern.h> 96 97 /* 98 * System initialization 99 */ 100 101 /* the kernel process "vm_pageout"*/ 102 static void vm_pageout __P((void)); 103 static int vm_pageout_clean __P((vm_page_t)); 104 static int vm_pageout_scan __P((void)); 105 static int vm_pageout_free_page_calc __P((vm_size_t count)); 106 struct proc *pageproc; 107 108 static struct kproc_desc page_kp = { 109 "pagedaemon", 110 vm_pageout, 111 &pageproc 112 }; 113 SYSINIT_KT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 114 115 #if !defined(NO_SWAPPING) 116 /* the kernel process "vm_daemon"*/ 117 static void vm_daemon __P((void)); 118 static struct proc *vmproc; 119 120 static struct kproc_desc vm_kp = { 121 "vmdaemon", 122 vm_daemon, 123 &vmproc 124 }; 125 SYSINIT_KT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 126 #endif 127 128 129 int vm_pages_needed=0; /* Event on which pageout daemon sleeps */ 130 int vm_pageout_deficit=0; /* Estimated number of pages deficit */ 131 int vm_pageout_pages_needed=0; /* flag saying that the pageout daemon needs pages */ 132 133 extern int npendingio; 134 #if !defined(NO_SWAPPING) 135 static int vm_pageout_req_swapout; /* XXX */ 136 static int vm_daemon_needed; 137 #endif 138 extern int nswiodone; 139 extern int vm_swap_size; 140 extern int vfs_update_wakeup; 141 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 142 static int vm_pageout_full_stats_interval = 0; 143 static int vm_pageout_stats_free_max=0, vm_pageout_algorithm_lru=0; 144 static int defer_swap_pageouts=0; 145 static int disable_swap_pageouts=0; 146 147 static int max_page_launder=100; 148 #if defined(NO_SWAPPING) 149 static int vm_swap_enabled=0; 150 static int vm_swap_idle_enabled=0; 151 #else 152 static int vm_swap_enabled=1; 153 static int vm_swap_idle_enabled=0; 154 #endif 155 156 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 157 CTLFLAG_RW, &vm_pageout_algorithm_lru, 0, ""); 158 159 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 160 CTLFLAG_RW, &vm_pageout_stats_max, 0, ""); 161 162 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 163 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, ""); 164 165 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 166 CTLFLAG_RW, &vm_pageout_stats_interval, 0, ""); 167 168 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max, 169 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, ""); 170 171 #if defined(NO_SWAPPING) 172 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 173 CTLFLAG_RD, &vm_swap_enabled, 0, ""); 174 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 175 CTLFLAG_RD, &vm_swap_idle_enabled, 0, ""); 176 #else 177 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 178 CTLFLAG_RW, &vm_swap_enabled, 0, ""); 179 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 180 CTLFLAG_RW, &vm_swap_idle_enabled, 0, ""); 181 #endif 182 183 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 184 CTLFLAG_RW, &defer_swap_pageouts, 0, ""); 185 186 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 187 CTLFLAG_RW, &disable_swap_pageouts, 0, ""); 188 189 SYSCTL_INT(_vm, OID_AUTO, max_page_launder, 190 CTLFLAG_RW, &max_page_launder, 0, ""); 191 192 193 #define VM_PAGEOUT_PAGE_COUNT 16 194 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 195 196 int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 197 198 #if !defined(NO_SWAPPING) 199 typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int)); 200 static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t)); 201 static freeer_fcn_t vm_pageout_object_deactivate_pages; 202 static void vm_req_vmdaemon __P((void)); 203 #endif 204 static void vm_pageout_page_stats(void); 205 void pmap_collect(void); 206 207 /* 208 * vm_pageout_clean: 209 * 210 * Clean the page and remove it from the laundry. 211 * 212 * We set the busy bit to cause potential page faults on this page to 213 * block. 214 * 215 * And we set pageout-in-progress to keep the object from disappearing 216 * during pageout. This guarantees that the page won't move from the 217 * inactive queue. (However, any other page on the inactive queue may 218 * move!) 219 */ 220 static int 221 vm_pageout_clean(m) 222 vm_page_t m; 223 { 224 register vm_object_t object; 225 vm_page_t mc[2*vm_pageout_page_count]; 226 int pageout_count; 227 int i, forward_okay, backward_okay, page_base; 228 vm_pindex_t pindex = m->pindex; 229 230 object = m->object; 231 232 /* 233 * If not OBJT_SWAP, additional memory may be needed to do the pageout. 234 * Try to avoid the deadlock. 235 */ 236 if ((object->type == OBJT_DEFAULT) && 237 ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min)) 238 return 0; 239 240 /* 241 * Don't mess with the page if it's busy. 242 */ 243 if ((m->hold_count != 0) || 244 ((m->busy != 0) || (m->flags & PG_BUSY))) 245 return 0; 246 247 /* 248 * Try collapsing before it's too late. 249 */ 250 if (object->backing_object) { 251 vm_object_collapse(object); 252 } 253 254 mc[vm_pageout_page_count] = m; 255 pageout_count = 1; 256 page_base = vm_pageout_page_count; 257 forward_okay = TRUE; 258 if (pindex != 0) 259 backward_okay = TRUE; 260 else 261 backward_okay = FALSE; 262 /* 263 * Scan object for clusterable pages. 264 * 265 * We can cluster ONLY if: ->> the page is NOT 266 * clean, wired, busy, held, or mapped into a 267 * buffer, and one of the following: 268 * 1) The page is inactive, or a seldom used 269 * active page. 270 * -or- 271 * 2) we force the issue. 272 */ 273 for (i = 1; (i < vm_pageout_page_count) && (forward_okay || backward_okay); i++) { 274 vm_page_t p; 275 276 /* 277 * See if forward page is clusterable. 278 */ 279 if (forward_okay) { 280 /* 281 * Stop forward scan at end of object. 282 */ 283 if ((pindex + i) > object->size) { 284 forward_okay = FALSE; 285 goto do_backward; 286 } 287 p = vm_page_lookup(object, pindex + i); 288 if (p) { 289 if (((p->queue - p->pc) == PQ_CACHE) || 290 (p->flags & PG_BUSY) || p->busy) { 291 forward_okay = FALSE; 292 goto do_backward; 293 } 294 vm_page_test_dirty(p); 295 if ((p->dirty & p->valid) != 0 && 296 (p->queue == PQ_INACTIVE) && 297 (p->wire_count == 0) && 298 (p->hold_count == 0)) { 299 mc[vm_pageout_page_count + i] = p; 300 pageout_count++; 301 if (pageout_count == vm_pageout_page_count) 302 break; 303 } else { 304 forward_okay = FALSE; 305 } 306 } else { 307 forward_okay = FALSE; 308 } 309 } 310 do_backward: 311 /* 312 * See if backward page is clusterable. 313 */ 314 if (backward_okay) { 315 /* 316 * Stop backward scan at beginning of object. 317 */ 318 if ((pindex - i) == 0) { 319 backward_okay = FALSE; 320 } 321 p = vm_page_lookup(object, pindex - i); 322 if (p) { 323 if (((p->queue - p->pc) == PQ_CACHE) || 324 (p->flags & PG_BUSY) || p->busy) { 325 backward_okay = FALSE; 326 continue; 327 } 328 vm_page_test_dirty(p); 329 if ((p->dirty & p->valid) != 0 && 330 (p->queue == PQ_INACTIVE) && 331 (p->wire_count == 0) && 332 (p->hold_count == 0)) { 333 mc[vm_pageout_page_count - i] = p; 334 pageout_count++; 335 page_base--; 336 if (pageout_count == vm_pageout_page_count) 337 break; 338 } else { 339 backward_okay = FALSE; 340 } 341 } else { 342 backward_okay = FALSE; 343 } 344 } 345 } 346 347 /* 348 * we allow reads during pageouts... 349 */ 350 return vm_pageout_flush(&mc[page_base], pageout_count, 0); 351 } 352 353 int 354 vm_pageout_flush(mc, count, flags) 355 vm_page_t *mc; 356 int count; 357 int flags; 358 { 359 register vm_object_t object; 360 int pageout_status[count]; 361 int numpagedout = 0; 362 int i; 363 364 for (i = 0; i < count; i++) { 365 vm_page_io_start(mc[i]); 366 vm_page_protect(mc[i], VM_PROT_READ); 367 } 368 369 object = mc[0]->object; 370 vm_object_pip_add(object, count); 371 372 vm_pager_put_pages(object, mc, count, 373 (flags | ((object == kernel_object) ? OBJPC_SYNC : 0)), 374 pageout_status); 375 376 for (i = 0; i < count; i++) { 377 vm_page_t mt = mc[i]; 378 379 switch (pageout_status[i]) { 380 case VM_PAGER_OK: 381 numpagedout++; 382 break; 383 case VM_PAGER_PEND: 384 numpagedout++; 385 break; 386 case VM_PAGER_BAD: 387 /* 388 * Page outside of range of object. Right now we 389 * essentially lose the changes by pretending it 390 * worked. 391 */ 392 pmap_clear_modify(VM_PAGE_TO_PHYS(mt)); 393 mt->dirty = 0; 394 break; 395 case VM_PAGER_ERROR: 396 case VM_PAGER_FAIL: 397 /* 398 * If page couldn't be paged out, then reactivate the 399 * page so it doesn't clog the inactive list. (We 400 * will try paging out it again later). 401 */ 402 vm_page_activate(mt); 403 break; 404 case VM_PAGER_AGAIN: 405 break; 406 } 407 408 /* 409 * If the operation is still going, leave the page busy to 410 * block all other accesses. Also, leave the paging in 411 * progress indicator set so that we don't attempt an object 412 * collapse. 413 */ 414 if (pageout_status[i] != VM_PAGER_PEND) { 415 vm_object_pip_wakeup(object); 416 vm_page_io_finish(mt); 417 } 418 } 419 return numpagedout; 420 } 421 422 #if !defined(NO_SWAPPING) 423 /* 424 * vm_pageout_object_deactivate_pages 425 * 426 * deactivate enough pages to satisfy the inactive target 427 * requirements or if vm_page_proc_limit is set, then 428 * deactivate all of the pages in the object and its 429 * backing_objects. 430 * 431 * The object and map must be locked. 432 */ 433 static void 434 vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only) 435 vm_map_t map; 436 vm_object_t object; 437 vm_pindex_t desired; 438 int map_remove_only; 439 { 440 register vm_page_t p, next; 441 int rcount; 442 int remove_mode; 443 int s; 444 445 if (object->type == OBJT_DEVICE) 446 return; 447 448 while (object) { 449 if (vm_map_pmap(map)->pm_stats.resident_count <= desired) 450 return; 451 if (object->paging_in_progress) 452 return; 453 454 remove_mode = map_remove_only; 455 if (object->shadow_count > 1) 456 remove_mode = 1; 457 /* 458 * scan the objects entire memory queue 459 */ 460 rcount = object->resident_page_count; 461 p = TAILQ_FIRST(&object->memq); 462 while (p && (rcount-- > 0)) { 463 int actcount; 464 if (vm_map_pmap(map)->pm_stats.resident_count <= desired) 465 return; 466 next = TAILQ_NEXT(p, listq); 467 cnt.v_pdpages++; 468 if (p->wire_count != 0 || 469 p->hold_count != 0 || 470 p->busy != 0 || 471 (p->flags & PG_BUSY) || 472 !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) { 473 p = next; 474 continue; 475 } 476 477 actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p)); 478 if (actcount) { 479 vm_page_flag_set(p, PG_REFERENCED); 480 } else if (p->flags & PG_REFERENCED) { 481 actcount = 1; 482 } 483 484 if ((p->queue != PQ_ACTIVE) && 485 (p->flags & PG_REFERENCED)) { 486 vm_page_activate(p); 487 p->act_count += actcount; 488 vm_page_flag_clear(p, PG_REFERENCED); 489 } else if (p->queue == PQ_ACTIVE) { 490 if ((p->flags & PG_REFERENCED) == 0) { 491 p->act_count -= min(p->act_count, ACT_DECLINE); 492 if (!remove_mode && (vm_pageout_algorithm_lru || (p->act_count == 0))) { 493 vm_page_protect(p, VM_PROT_NONE); 494 vm_page_deactivate(p); 495 } else { 496 s = splvm(); 497 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 498 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 499 splx(s); 500 } 501 } else { 502 vm_page_activate(p); 503 vm_page_flag_clear(p, PG_REFERENCED); 504 if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 505 p->act_count += ACT_ADVANCE; 506 s = splvm(); 507 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 508 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 509 splx(s); 510 } 511 } else if (p->queue == PQ_INACTIVE) { 512 vm_page_protect(p, VM_PROT_NONE); 513 } 514 p = next; 515 } 516 object = object->backing_object; 517 } 518 return; 519 } 520 521 /* 522 * deactivate some number of pages in a map, try to do it fairly, but 523 * that is really hard to do. 524 */ 525 static void 526 vm_pageout_map_deactivate_pages(map, desired) 527 vm_map_t map; 528 vm_pindex_t desired; 529 { 530 vm_map_entry_t tmpe; 531 vm_object_t obj, bigobj; 532 533 if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) { 534 return; 535 } 536 537 bigobj = NULL; 538 539 /* 540 * first, search out the biggest object, and try to free pages from 541 * that. 542 */ 543 tmpe = map->header.next; 544 while (tmpe != &map->header) { 545 if ((tmpe->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) { 546 obj = tmpe->object.vm_object; 547 if ((obj != NULL) && (obj->shadow_count <= 1) && 548 ((bigobj == NULL) || 549 (bigobj->resident_page_count < obj->resident_page_count))) { 550 bigobj = obj; 551 } 552 } 553 tmpe = tmpe->next; 554 } 555 556 if (bigobj) 557 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0); 558 559 /* 560 * Next, hunt around for other pages to deactivate. We actually 561 * do this search sort of wrong -- .text first is not the best idea. 562 */ 563 tmpe = map->header.next; 564 while (tmpe != &map->header) { 565 if (vm_map_pmap(map)->pm_stats.resident_count <= desired) 566 break; 567 if ((tmpe->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) { 568 obj = tmpe->object.vm_object; 569 if (obj) 570 vm_pageout_object_deactivate_pages(map, obj, desired, 0); 571 } 572 tmpe = tmpe->next; 573 }; 574 575 /* 576 * Remove all mappings if a process is swapped out, this will free page 577 * table pages. 578 */ 579 if (desired == 0) 580 pmap_remove(vm_map_pmap(map), 581 VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 582 vm_map_unlock(map); 583 return; 584 } 585 #endif 586 587 void 588 vm_pageout_page_free(vm_page_t m) { 589 struct vnode *vp; 590 vm_object_t object; 591 592 object = m->object; 593 object->ref_count++; 594 595 if (object->type == OBJT_VNODE) { 596 vp = object->handle; 597 vp->v_usecount++; 598 if (VSHOULDBUSY(vp)) 599 vbusy(vp); 600 } 601 602 vm_page_busy(m); 603 vm_page_protect(m, VM_PROT_NONE); 604 vm_page_free(m); 605 vm_object_deallocate(object); 606 } 607 608 /* 609 * vm_pageout_scan does the dirty work for the pageout daemon. 610 */ 611 static int 612 vm_pageout_scan() 613 { 614 vm_page_t m, next; 615 int page_shortage, addl_page_shortage, maxscan, pcount; 616 int maxlaunder; 617 int pages_freed; 618 struct proc *p, *bigproc; 619 vm_offset_t size, bigsize; 620 vm_object_t object; 621 int force_wakeup = 0; 622 int actcount; 623 int vnodes_skipped = 0; 624 int s; 625 626 /* 627 * Do whatever cleanup that the pmap code can. 628 */ 629 pmap_collect(); 630 631 /* 632 * Start scanning the inactive queue for pages we can free. We keep 633 * scanning until we have enough free pages or we have scanned through 634 * the entire queue. If we encounter dirty pages, we start cleaning 635 * them. 636 */ 637 638 pages_freed = 0; 639 addl_page_shortage = vm_pageout_deficit; 640 vm_pageout_deficit = 0; 641 642 if (max_page_launder == 0) 643 max_page_launder = 1; 644 maxlaunder = (cnt.v_inactive_target > max_page_launder) ? 645 max_page_launder : cnt.v_inactive_target; 646 647 rescan0: 648 maxscan = cnt.v_inactive_count; 649 for( m = TAILQ_FIRST(&vm_page_queue_inactive); 650 651 (m != NULL) && (maxscan-- > 0) && 652 ((cnt.v_cache_count + cnt.v_free_count) < 653 (cnt.v_cache_min + cnt.v_free_target)); 654 655 m = next) { 656 657 cnt.v_pdpages++; 658 659 if (m->queue != PQ_INACTIVE) { 660 goto rescan0; 661 } 662 663 next = TAILQ_NEXT(m, pageq); 664 665 if (m->hold_count) { 666 s = splvm(); 667 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 668 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 669 splx(s); 670 addl_page_shortage++; 671 continue; 672 } 673 /* 674 * Dont mess with busy pages, keep in the front of the 675 * queue, most likely are being paged out. 676 */ 677 if (m->busy || (m->flags & PG_BUSY)) { 678 addl_page_shortage++; 679 continue; 680 } 681 682 /* 683 * If the object is not being used, we ignore previous references. 684 */ 685 if (m->object->ref_count == 0) { 686 vm_page_flag_clear(m, PG_REFERENCED); 687 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 688 689 /* 690 * Otherwise, if the page has been referenced while in the inactive 691 * queue, we bump the "activation count" upwards, making it less 692 * likely that the page will be added back to the inactive queue 693 * prematurely again. Here we check the page tables (or emulated 694 * bits, if any), given the upper level VM system not knowing anything 695 * about existing references. 696 */ 697 } else if (((m->flags & PG_REFERENCED) == 0) && 698 (actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)))) { 699 vm_page_activate(m); 700 m->act_count += (actcount + ACT_ADVANCE); 701 continue; 702 } 703 704 /* 705 * If the upper level VM system knows about any page references, 706 * we activate the page. We also set the "activation count" higher 707 * than normal so that we will less likely place pages back onto the 708 * inactive queue again. 709 */ 710 if ((m->flags & PG_REFERENCED) != 0) { 711 vm_page_flag_clear(m, PG_REFERENCED); 712 actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); 713 vm_page_activate(m); 714 m->act_count += (actcount + ACT_ADVANCE + 1); 715 continue; 716 } 717 718 /* 719 * If the upper level VM system doesn't know anything about the 720 * page being dirty, we have to check for it again. As far as the 721 * VM code knows, any partially dirty pages are fully dirty. 722 */ 723 if (m->dirty == 0) { 724 vm_page_test_dirty(m); 725 } else { 726 m->dirty = VM_PAGE_BITS_ALL; 727 } 728 729 /* 730 * Invalid pages can be easily freed 731 */ 732 if (m->valid == 0) { 733 vm_pageout_page_free(m); 734 cnt.v_dfree++; 735 pages_freed++; 736 737 /* 738 * Clean pages can be placed onto the cache queue. 739 */ 740 } else if (m->dirty == 0) { 741 vm_page_cache(m); 742 pages_freed++; 743 744 /* 745 * Dirty pages need to be paged out. Note that we clean 746 * only a limited number of pages per pagedaemon pass. 747 */ 748 } else if (maxlaunder > 0) { 749 int written; 750 int swap_pageouts_ok; 751 struct vnode *vp = NULL; 752 753 object = m->object; 754 755 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 756 swap_pageouts_ok = 1; 757 } else { 758 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 759 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 760 (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min); 761 762 } 763 764 /* 765 * We don't bother paging objects that are "dead". Those 766 * objects are in a "rundown" state. 767 */ 768 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 769 s = splvm(); 770 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 771 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 772 splx(s); 773 continue; 774 } 775 776 if ((object->type == OBJT_VNODE) && 777 (object->flags & OBJ_DEAD) == 0) { 778 vp = object->handle; 779 if (VOP_ISLOCKED(vp) || 780 vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curproc)) { 781 if ((m->queue == PQ_INACTIVE) && 782 (m->hold_count == 0) && 783 (m->busy == 0) && 784 (m->flags & PG_BUSY) == 0) { 785 s = splvm(); 786 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 787 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 788 splx(s); 789 } 790 if (object->flags & OBJ_MIGHTBEDIRTY) 791 vnodes_skipped++; 792 continue; 793 } 794 795 /* 796 * The page might have been moved to another queue 797 * during potential blocking in vget() above. 798 */ 799 if (m->queue != PQ_INACTIVE) { 800 if (object->flags & OBJ_MIGHTBEDIRTY) 801 vnodes_skipped++; 802 vput(vp); 803 continue; 804 } 805 806 /* 807 * The page may have been busied during the blocking in 808 * vput(); We don't move the page back onto the end of 809 * the queue so that statistics are more correct if we don't. 810 */ 811 if (m->busy || (m->flags & PG_BUSY)) { 812 vput(vp); 813 continue; 814 } 815 816 /* 817 * If the page has become held, then skip it 818 */ 819 if (m->hold_count) { 820 s = splvm(); 821 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 822 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 823 splx(s); 824 if (object->flags & OBJ_MIGHTBEDIRTY) 825 vnodes_skipped++; 826 vput(vp); 827 continue; 828 } 829 } 830 831 /* 832 * If a page is dirty, then it is either being washed 833 * (but not yet cleaned) or it is still in the 834 * laundry. If it is still in the laundry, then we 835 * start the cleaning operation. 836 */ 837 written = vm_pageout_clean(m); 838 if (vp) 839 vput(vp); 840 841 maxlaunder -= written; 842 } 843 } 844 845 /* 846 * Compute the page shortage. If we are still very low on memory be 847 * sure that we will move a minimal amount of pages from active to 848 * inactive. 849 */ 850 page_shortage = (cnt.v_inactive_target + cnt.v_cache_min) - 851 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 852 page_shortage += addl_page_shortage; 853 if (page_shortage <= 0) { 854 page_shortage = 0; 855 } 856 857 pcount = cnt.v_active_count; 858 m = TAILQ_FIRST(&vm_page_queue_active); 859 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 860 861 /* 862 * This is a consistancy check, and should likely be a panic 863 * or warning. 864 */ 865 if (m->queue != PQ_ACTIVE) { 866 break; 867 } 868 869 next = TAILQ_NEXT(m, pageq); 870 /* 871 * Don't deactivate pages that are busy. 872 */ 873 if ((m->busy != 0) || 874 (m->flags & PG_BUSY) || 875 (m->hold_count != 0)) { 876 s = splvm(); 877 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 878 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 879 splx(s); 880 m = next; 881 continue; 882 } 883 884 /* 885 * The count for pagedaemon pages is done after checking the 886 * page for eligbility... 887 */ 888 cnt.v_pdpages++; 889 890 /* 891 * Check to see "how much" the page has been used. 892 */ 893 actcount = 0; 894 if (m->object->ref_count != 0) { 895 if (m->flags & PG_REFERENCED) { 896 actcount += 1; 897 } 898 actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); 899 if (actcount) { 900 m->act_count += ACT_ADVANCE + actcount; 901 if (m->act_count > ACT_MAX) 902 m->act_count = ACT_MAX; 903 } 904 } 905 906 /* 907 * Since we have "tested" this bit, we need to clear it now. 908 */ 909 vm_page_flag_clear(m, PG_REFERENCED); 910 911 /* 912 * Only if an object is currently being used, do we use the 913 * page activation count stats. 914 */ 915 if (actcount && (m->object->ref_count != 0)) { 916 s = splvm(); 917 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 918 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 919 splx(s); 920 } else { 921 m->act_count -= min(m->act_count, ACT_DECLINE); 922 if (vm_pageout_algorithm_lru || 923 (m->object->ref_count == 0) || (m->act_count == 0)) { 924 page_shortage--; 925 if (m->object->ref_count == 0) { 926 vm_page_protect(m, VM_PROT_NONE); 927 if (m->dirty == 0) 928 vm_page_cache(m); 929 else 930 vm_page_deactivate(m); 931 } else { 932 vm_page_deactivate(m); 933 } 934 } else { 935 s = splvm(); 936 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 937 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 938 splx(s); 939 } 940 } 941 m = next; 942 } 943 944 s = splvm(); 945 /* 946 * We try to maintain some *really* free pages, this allows interrupt 947 * code to be guaranteed space. 948 */ 949 while (cnt.v_free_count < cnt.v_free_reserved) { 950 static int cache_rover = 0; 951 m = vm_page_list_find(PQ_CACHE, cache_rover); 952 if (!m) 953 break; 954 cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; 955 vm_pageout_page_free(m); 956 cnt.v_dfree++; 957 } 958 splx(s); 959 960 #if !defined(NO_SWAPPING) 961 /* 962 * Idle process swapout -- run once per second. 963 */ 964 if (vm_swap_idle_enabled) { 965 static long lsec; 966 if (time_second != lsec) { 967 vm_pageout_req_swapout |= VM_SWAP_IDLE; 968 vm_req_vmdaemon(); 969 lsec = time_second; 970 } 971 } 972 #endif 973 974 /* 975 * If we didn't get enough free pages, and we have skipped a vnode 976 * in a writeable object, wakeup the sync daemon. And kick swapout 977 * if we did not get enough free pages. 978 */ 979 if ((cnt.v_cache_count + cnt.v_free_count) < 980 (cnt.v_free_target + cnt.v_cache_min) ) { 981 if (vnodes_skipped && 982 (cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_min) { 983 if (!vfs_update_wakeup) { 984 vfs_update_wakeup = 1; 985 wakeup(&vfs_update_wakeup); 986 } 987 } 988 #if !defined(NO_SWAPPING) 989 if (vm_swap_enabled && 990 (cnt.v_free_count + cnt.v_cache_count < cnt.v_free_target)) { 991 vm_req_vmdaemon(); 992 vm_pageout_req_swapout |= VM_SWAP_NORMAL; 993 } 994 #endif 995 } 996 997 998 /* 999 * make sure that we have swap space -- if we are low on memory and 1000 * swap -- then kill the biggest process. 1001 */ 1002 if ((vm_swap_size == 0 || swap_pager_full) && 1003 ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) { 1004 bigproc = NULL; 1005 bigsize = 0; 1006 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 1007 /* 1008 * if this is a system process, skip it 1009 */ 1010 if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || 1011 ((p->p_pid < 48) && (vm_swap_size != 0))) { 1012 continue; 1013 } 1014 /* 1015 * if the process is in a non-running type state, 1016 * don't touch it. 1017 */ 1018 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1019 continue; 1020 } 1021 /* 1022 * get the process size 1023 */ 1024 size = p->p_vmspace->vm_pmap.pm_stats.resident_count; 1025 /* 1026 * if the this process is bigger than the biggest one 1027 * remember it. 1028 */ 1029 if (size > bigsize) { 1030 bigproc = p; 1031 bigsize = size; 1032 } 1033 } 1034 if (bigproc != NULL) { 1035 killproc(bigproc, "out of swap space"); 1036 bigproc->p_estcpu = 0; 1037 bigproc->p_nice = PRIO_MIN; 1038 resetpriority(bigproc); 1039 wakeup(&cnt.v_free_count); 1040 } 1041 } 1042 return force_wakeup; 1043 } 1044 1045 /* 1046 * This routine tries to maintain the pseudo LRU active queue, 1047 * so that during long periods of time where there is no paging, 1048 * that some statistic accumlation still occurs. This code 1049 * helps the situation where paging just starts to occur. 1050 */ 1051 static void 1052 vm_pageout_page_stats() 1053 { 1054 int s; 1055 vm_page_t m,next; 1056 int pcount,tpcount; /* Number of pages to check */ 1057 static int fullintervalcount = 0; 1058 int page_shortage; 1059 1060 page_shortage = (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - 1061 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 1062 if (page_shortage <= 0) 1063 return; 1064 1065 pcount = cnt.v_active_count; 1066 fullintervalcount += vm_pageout_stats_interval; 1067 if (fullintervalcount < vm_pageout_full_stats_interval) { 1068 tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count; 1069 if (pcount > tpcount) 1070 pcount = tpcount; 1071 } 1072 1073 m = TAILQ_FIRST(&vm_page_queue_active); 1074 while ((m != NULL) && (pcount-- > 0)) { 1075 int actcount; 1076 1077 if (m->queue != PQ_ACTIVE) { 1078 break; 1079 } 1080 1081 next = TAILQ_NEXT(m, pageq); 1082 /* 1083 * Don't deactivate pages that are busy. 1084 */ 1085 if ((m->busy != 0) || 1086 (m->flags & PG_BUSY) || 1087 (m->hold_count != 0)) { 1088 s = splvm(); 1089 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1090 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1091 splx(s); 1092 m = next; 1093 continue; 1094 } 1095 1096 actcount = 0; 1097 if (m->flags & PG_REFERENCED) { 1098 vm_page_flag_clear(m, PG_REFERENCED); 1099 actcount += 1; 1100 } 1101 1102 actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); 1103 if (actcount) { 1104 m->act_count += ACT_ADVANCE + actcount; 1105 if (m->act_count > ACT_MAX) 1106 m->act_count = ACT_MAX; 1107 s = splvm(); 1108 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1109 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1110 splx(s); 1111 } else { 1112 if (m->act_count == 0) { 1113 /* 1114 * We turn off page access, so that we have more accurate 1115 * RSS stats. We don't do this in the normal page deactivation 1116 * when the system is loaded VM wise, because the cost of 1117 * the large number of page protect operations would be higher 1118 * than the value of doing the operation. 1119 */ 1120 vm_page_protect(m, VM_PROT_NONE); 1121 vm_page_deactivate(m); 1122 } else { 1123 m->act_count -= min(m->act_count, ACT_DECLINE); 1124 s = splvm(); 1125 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1126 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1127 splx(s); 1128 } 1129 } 1130 1131 m = next; 1132 } 1133 } 1134 1135 static int 1136 vm_pageout_free_page_calc(count) 1137 vm_size_t count; 1138 { 1139 if (count < cnt.v_page_count) 1140 return 0; 1141 /* 1142 * free_reserved needs to include enough for the largest swap pager 1143 * structures plus enough for any pv_entry structs when paging. 1144 */ 1145 if (cnt.v_page_count > 1024) 1146 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1147 else 1148 cnt.v_free_min = 4; 1149 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1150 cnt.v_interrupt_free_min; 1151 cnt.v_free_reserved = vm_pageout_page_count + 1152 cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE; 1153 cnt.v_free_min += cnt.v_free_reserved; 1154 return 1; 1155 } 1156 1157 1158 /* 1159 * vm_pageout is the high level pageout daemon. 1160 */ 1161 static void 1162 vm_pageout() 1163 { 1164 /* 1165 * Initialize some paging parameters. 1166 */ 1167 1168 cnt.v_interrupt_free_min = 2; 1169 if (cnt.v_page_count < 2000) 1170 vm_pageout_page_count = 8; 1171 1172 vm_pageout_free_page_calc(cnt.v_page_count); 1173 /* 1174 * free_reserved needs to include enough for the largest swap pager 1175 * structures plus enough for any pv_entry structs when paging. 1176 */ 1177 if (cnt.v_free_count > 6144) 1178 cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved; 1179 else 1180 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; 1181 1182 if (cnt.v_free_count > 2048) { 1183 cnt.v_cache_min = cnt.v_free_target; 1184 cnt.v_cache_max = 2 * cnt.v_cache_min; 1185 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 1186 } else { 1187 cnt.v_cache_min = 0; 1188 cnt.v_cache_max = 0; 1189 cnt.v_inactive_target = cnt.v_free_count / 4; 1190 } 1191 if (cnt.v_inactive_target > cnt.v_free_count / 3) 1192 cnt.v_inactive_target = cnt.v_free_count / 3; 1193 1194 /* XXX does not really belong here */ 1195 if (vm_page_max_wired == 0) 1196 vm_page_max_wired = cnt.v_free_count / 3; 1197 1198 if (vm_pageout_stats_max == 0) 1199 vm_pageout_stats_max = cnt.v_free_target; 1200 1201 /* 1202 * Set interval in seconds for stats scan. 1203 */ 1204 if (vm_pageout_stats_interval == 0) 1205 vm_pageout_stats_interval = 5; 1206 if (vm_pageout_full_stats_interval == 0) 1207 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1208 1209 1210 /* 1211 * Set maximum free per pass 1212 */ 1213 if (vm_pageout_stats_free_max == 0) 1214 vm_pageout_stats_free_max = 5; 1215 1216 max_page_launder = (cnt.v_page_count > 1800 ? 32 : 16); 1217 1218 swap_pager_swap_init(); 1219 /* 1220 * The pageout daemon is never done, so loop forever. 1221 */ 1222 while (TRUE) { 1223 int inactive_target; 1224 int error; 1225 int s = splvm(); 1226 if (!vm_pages_needed || 1227 ((cnt.v_free_count + cnt.v_cache_count) > cnt.v_free_min)) { 1228 vm_pages_needed = 0; 1229 error = tsleep(&vm_pages_needed, 1230 PVM, "psleep", vm_pageout_stats_interval * hz); 1231 if (error && !vm_pages_needed) { 1232 splx(s); 1233 vm_pageout_page_stats(); 1234 continue; 1235 } 1236 } else if (vm_pages_needed) { 1237 vm_pages_needed = 0; 1238 tsleep(&vm_pages_needed, PVM, "psleep", hz/2); 1239 } 1240 1241 if (vm_pages_needed) 1242 cnt.v_pdwakeups++; 1243 vm_pages_needed = 0; 1244 splx(s); 1245 vm_pager_sync(); 1246 vm_pageout_scan(); 1247 vm_pageout_deficit = 0; 1248 vm_pager_sync(); 1249 wakeup(&cnt.v_free_count); 1250 } 1251 } 1252 1253 void 1254 pagedaemon_wakeup() 1255 { 1256 if (!vm_pages_needed && curproc != pageproc) { 1257 vm_pages_needed++; 1258 wakeup(&vm_pages_needed); 1259 } 1260 } 1261 1262 #if !defined(NO_SWAPPING) 1263 static void 1264 vm_req_vmdaemon() 1265 { 1266 static int lastrun = 0; 1267 1268 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1269 wakeup(&vm_daemon_needed); 1270 lastrun = ticks; 1271 } 1272 } 1273 1274 static void 1275 vm_daemon() 1276 { 1277 vm_object_t object; 1278 struct proc *p; 1279 1280 while (TRUE) { 1281 tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0); 1282 if (vm_pageout_req_swapout) { 1283 swapout_procs(vm_pageout_req_swapout); 1284 vm_pageout_req_swapout = 0; 1285 } 1286 /* 1287 * scan the processes for exceeding their rlimits or if 1288 * process is swapped out -- deactivate pages 1289 */ 1290 1291 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 1292 quad_t limit; 1293 vm_offset_t size; 1294 1295 /* 1296 * if this is a system process or if we have already 1297 * looked at this process, skip it. 1298 */ 1299 if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 1300 continue; 1301 } 1302 /* 1303 * if the process is in a non-running type state, 1304 * don't touch it. 1305 */ 1306 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1307 continue; 1308 } 1309 /* 1310 * get a limit 1311 */ 1312 limit = qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 1313 p->p_rlimit[RLIMIT_RSS].rlim_max); 1314 1315 /* 1316 * let processes that are swapped out really be 1317 * swapped out set the limit to nothing (will force a 1318 * swap-out.) 1319 */ 1320 if ((p->p_flag & P_INMEM) == 0) 1321 limit = 0; /* XXX */ 1322 1323 size = p->p_vmspace->vm_pmap.pm_stats.resident_count * PAGE_SIZE; 1324 if (limit >= 0 && size >= limit) { 1325 vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map, 1326 (vm_pindex_t)(limit >> PAGE_SHIFT) ); 1327 } 1328 } 1329 } 1330 } 1331 #endif 1332