1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 * 68 * $Id: vm_pageout.c,v 1.87 1996/11/28 23:15:06 dyson Exp $ 69 */ 70 71 /* 72 * The proverbial page-out daemon. 73 */ 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/kernel.h> 78 #include <sys/proc.h> 79 #include <sys/resourcevar.h> 80 #include <sys/malloc.h> 81 #include <sys/kernel.h> 82 #include <sys/signalvar.h> 83 #include <sys/vnode.h> 84 #include <sys/vmmeter.h> 85 #include <sys/sysctl.h> 86 87 #include <vm/vm.h> 88 #include <vm/vm_param.h> 89 #include <vm/vm_prot.h> 90 #include <vm/lock.h> 91 #include <vm/vm_object.h> 92 #include <vm/vm_page.h> 93 #include <vm/vm_map.h> 94 #include <vm/vm_pageout.h> 95 #include <vm/vm_kern.h> 96 #include <vm/vm_pager.h> 97 #include <vm/swap_pager.h> 98 #include <vm/vm_extern.h> 99 100 /* 101 * System initialization 102 */ 103 104 /* the kernel process "vm_pageout"*/ 105 static void vm_pageout __P((void)); 106 static int vm_pageout_clean __P((vm_page_t, int)); 107 static int vm_pageout_scan __P((void)); 108 static int vm_pageout_free_page_calc __P((vm_size_t count)); 109 struct proc *pageproc; 110 111 static struct kproc_desc page_kp = { 112 "pagedaemon", 113 vm_pageout, 114 &pageproc 115 }; 116 SYSINIT_KT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 117 118 #if !defined(NO_SWAPPING) 119 /* the kernel process "vm_daemon"*/ 120 static void vm_daemon __P((void)); 121 static struct proc *vmproc; 122 123 static struct kproc_desc vm_kp = { 124 "vmdaemon", 125 vm_daemon, 126 &vmproc 127 }; 128 SYSINIT_KT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 129 #endif 130 131 132 int vm_pages_needed; /* Event on which pageout daemon sleeps */ 133 134 int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */ 135 136 extern int npendingio; 137 #if !defined(NO_SWAPPING) 138 static int vm_pageout_req_swapout; /* XXX */ 139 static int vm_daemon_needed; 140 #endif 141 extern int nswiodone; 142 extern int vm_swap_size; 143 extern int vfs_update_wakeup; 144 int vm_pageout_algorithm_lru=0; 145 #if defined(NO_SWAPPING) 146 int vm_swapping_enabled=0; 147 #else 148 int vm_swapping_enabled=1; 149 #endif 150 151 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 152 CTLFLAG_RW, &vm_pageout_algorithm_lru, 0, ""); 153 154 #if defined(NO_SWAPPING) 155 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swapping_enabled, 156 CTLFLAG_RD, &vm_swapping_enabled, 0, ""); 157 #else 158 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swapping_enabled, 159 CTLFLAG_RW, &vm_swapping_enabled, 0, ""); 160 #endif 161 162 #define MAXLAUNDER (cnt.v_page_count > 1800 ? 32 : 16) 163 164 #define VM_PAGEOUT_PAGE_COUNT 16 165 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 166 167 int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 168 169 #if !defined(NO_SWAPPING) 170 typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int)); 171 static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t)); 172 static freeer_fcn_t vm_pageout_object_deactivate_pages; 173 static void vm_req_vmdaemon __P((void)); 174 #endif 175 176 /* 177 * vm_pageout_clean: 178 * 179 * Clean the page and remove it from the laundry. 180 * 181 * We set the busy bit to cause potential page faults on this page to 182 * block. 183 * 184 * And we set pageout-in-progress to keep the object from disappearing 185 * during pageout. This guarantees that the page won't move from the 186 * inactive queue. (However, any other page on the inactive queue may 187 * move!) 188 */ 189 static int 190 vm_pageout_clean(m, sync) 191 vm_page_t m; 192 int sync; 193 { 194 register vm_object_t object; 195 vm_page_t mc[2*vm_pageout_page_count]; 196 int pageout_count; 197 int i, forward_okay, backward_okay, page_base; 198 vm_pindex_t pindex = m->pindex; 199 200 object = m->object; 201 202 /* 203 * If not OBJT_SWAP, additional memory may be needed to do the pageout. 204 * Try to avoid the deadlock. 205 */ 206 if ((sync != VM_PAGEOUT_FORCE) && 207 (object->type == OBJT_DEFAULT) && 208 ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min)) 209 return 0; 210 211 /* 212 * Don't mess with the page if it's busy. 213 */ 214 if ((!sync && m->hold_count != 0) || 215 ((m->busy != 0) || (m->flags & PG_BUSY))) 216 return 0; 217 218 #if defined(OLD_COLLAPSE_CODE) 219 /* 220 * Try collapsing before it's too late. 221 */ 222 if (!sync && object->backing_object) { 223 vm_object_collapse(object); 224 } 225 #endif 226 227 mc[vm_pageout_page_count] = m; 228 pageout_count = 1; 229 page_base = vm_pageout_page_count; 230 forward_okay = TRUE; 231 if (pindex != 0) 232 backward_okay = TRUE; 233 else 234 backward_okay = FALSE; 235 /* 236 * Scan object for clusterable pages. 237 * 238 * We can cluster ONLY if: ->> the page is NOT 239 * clean, wired, busy, held, or mapped into a 240 * buffer, and one of the following: 241 * 1) The page is inactive, or a seldom used 242 * active page. 243 * -or- 244 * 2) we force the issue. 245 */ 246 for (i = 1; (i < vm_pageout_page_count) && (forward_okay || backward_okay); i++) { 247 vm_page_t p; 248 249 /* 250 * See if forward page is clusterable. 251 */ 252 if (forward_okay) { 253 /* 254 * Stop forward scan at end of object. 255 */ 256 if ((pindex + i) > object->size) { 257 forward_okay = FALSE; 258 goto do_backward; 259 } 260 p = vm_page_lookup(object, pindex + i); 261 if (p) { 262 if (((p->queue - p->pc) == PQ_CACHE) || 263 (p->flags & PG_BUSY) || p->busy) { 264 forward_okay = FALSE; 265 goto do_backward; 266 } 267 vm_page_test_dirty(p); 268 if ((p->dirty & p->valid) != 0 && 269 ((p->queue == PQ_INACTIVE) || 270 (sync == VM_PAGEOUT_FORCE)) && 271 (p->wire_count == 0) && 272 (p->hold_count == 0)) { 273 mc[vm_pageout_page_count + i] = p; 274 pageout_count++; 275 if (pageout_count == vm_pageout_page_count) 276 break; 277 } else { 278 forward_okay = FALSE; 279 } 280 } else { 281 forward_okay = FALSE; 282 } 283 } 284 do_backward: 285 /* 286 * See if backward page is clusterable. 287 */ 288 if (backward_okay) { 289 /* 290 * Stop backward scan at beginning of object. 291 */ 292 if ((pindex - i) == 0) { 293 backward_okay = FALSE; 294 } 295 p = vm_page_lookup(object, pindex - i); 296 if (p) { 297 if (((p->queue - p->pc) == PQ_CACHE) || 298 (p->flags & PG_BUSY) || p->busy) { 299 backward_okay = FALSE; 300 continue; 301 } 302 vm_page_test_dirty(p); 303 if ((p->dirty & p->valid) != 0 && 304 ((p->queue == PQ_INACTIVE) || 305 (sync == VM_PAGEOUT_FORCE)) && 306 (p->wire_count == 0) && 307 (p->hold_count == 0)) { 308 mc[vm_pageout_page_count - i] = p; 309 pageout_count++; 310 page_base--; 311 if (pageout_count == vm_pageout_page_count) 312 break; 313 } else { 314 backward_okay = FALSE; 315 } 316 } else { 317 backward_okay = FALSE; 318 } 319 } 320 } 321 322 /* 323 * we allow reads during pageouts... 324 */ 325 for (i = page_base; i < (page_base + pageout_count); i++) { 326 mc[i]->flags |= PG_BUSY; 327 vm_page_protect(mc[i], VM_PROT_READ); 328 } 329 330 return vm_pageout_flush(&mc[page_base], pageout_count, sync); 331 } 332 333 int 334 vm_pageout_flush(mc, count, sync) 335 vm_page_t *mc; 336 int count; 337 int sync; 338 { 339 register vm_object_t object; 340 int pageout_status[count]; 341 int anyok = 0; 342 int i; 343 344 object = mc[0]->object; 345 object->paging_in_progress += count; 346 347 vm_pager_put_pages(object, mc, count, 348 ((sync || (object == kernel_object)) ? TRUE : FALSE), 349 pageout_status); 350 351 for (i = 0; i < count; i++) { 352 vm_page_t mt = mc[i]; 353 354 switch (pageout_status[i]) { 355 case VM_PAGER_OK: 356 ++anyok; 357 break; 358 case VM_PAGER_PEND: 359 ++anyok; 360 break; 361 case VM_PAGER_BAD: 362 /* 363 * Page outside of range of object. Right now we 364 * essentially lose the changes by pretending it 365 * worked. 366 */ 367 pmap_clear_modify(VM_PAGE_TO_PHYS(mt)); 368 mt->dirty = 0; 369 break; 370 case VM_PAGER_ERROR: 371 case VM_PAGER_FAIL: 372 /* 373 * If page couldn't be paged out, then reactivate the 374 * page so it doesn't clog the inactive list. (We 375 * will try paging out it again later). 376 */ 377 if (mt->queue == PQ_INACTIVE) 378 vm_page_activate(mt); 379 break; 380 case VM_PAGER_AGAIN: 381 break; 382 } 383 384 385 /* 386 * If the operation is still going, leave the page busy to 387 * block all other accesses. Also, leave the paging in 388 * progress indicator set so that we don't attempt an object 389 * collapse. 390 */ 391 if (pageout_status[i] != VM_PAGER_PEND) { 392 vm_object_pip_wakeup(object); 393 PAGE_WAKEUP(mt); 394 } 395 } 396 return anyok; 397 } 398 399 #if !defined(NO_SWAPPING) 400 /* 401 * vm_pageout_object_deactivate_pages 402 * 403 * deactivate enough pages to satisfy the inactive target 404 * requirements or if vm_page_proc_limit is set, then 405 * deactivate all of the pages in the object and its 406 * backing_objects. 407 * 408 * The object and map must be locked. 409 */ 410 static void 411 vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only) 412 vm_map_t map; 413 vm_object_t object; 414 vm_pindex_t desired; 415 int map_remove_only; 416 { 417 register vm_page_t p, next; 418 int rcount; 419 int remove_mode; 420 int s; 421 422 if (object->type == OBJT_DEVICE) 423 return; 424 425 while (object) { 426 if (vm_map_pmap(map)->pm_stats.resident_count <= desired) 427 return; 428 if (object->paging_in_progress) 429 return; 430 431 remove_mode = map_remove_only; 432 if (object->shadow_count > 1) 433 remove_mode = 1; 434 /* 435 * scan the objects entire memory queue 436 */ 437 rcount = object->resident_page_count; 438 p = TAILQ_FIRST(&object->memq); 439 while (p && (rcount-- > 0)) { 440 int refcount; 441 if (vm_map_pmap(map)->pm_stats.resident_count <= desired) 442 return; 443 next = TAILQ_NEXT(p, listq); 444 cnt.v_pdpages++; 445 if (p->wire_count != 0 || 446 p->hold_count != 0 || 447 p->busy != 0 || 448 (p->flags & PG_BUSY) || 449 !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) { 450 p = next; 451 continue; 452 } 453 454 refcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p)); 455 if (refcount) { 456 p->flags |= PG_REFERENCED; 457 } else if (p->flags & PG_REFERENCED) { 458 refcount = 1; 459 } 460 461 if ((p->queue != PQ_ACTIVE) && 462 (p->flags & PG_REFERENCED)) { 463 vm_page_activate(p); 464 p->act_count += refcount; 465 p->flags &= ~PG_REFERENCED; 466 } else if (p->queue == PQ_ACTIVE) { 467 if ((p->flags & PG_REFERENCED) == 0) { 468 p->act_count -= min(p->act_count, ACT_DECLINE); 469 if (!remove_mode && (vm_pageout_algorithm_lru || (p->act_count == 0))) { 470 vm_page_protect(p, VM_PROT_NONE); 471 vm_page_deactivate(p); 472 } else { 473 s = splvm(); 474 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 475 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 476 splx(s); 477 } 478 } else { 479 p->flags &= ~PG_REFERENCED; 480 if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 481 p->act_count += ACT_ADVANCE; 482 s = splvm(); 483 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 484 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 485 splx(s); 486 } 487 } else if (p->queue == PQ_INACTIVE) { 488 vm_page_protect(p, VM_PROT_NONE); 489 } 490 p = next; 491 } 492 object = object->backing_object; 493 } 494 return; 495 } 496 497 /* 498 * deactivate some number of pages in a map, try to do it fairly, but 499 * that is really hard to do. 500 */ 501 static void 502 vm_pageout_map_deactivate_pages(map, desired) 503 vm_map_t map; 504 vm_pindex_t desired; 505 { 506 vm_map_entry_t tmpe; 507 vm_object_t obj, bigobj; 508 509 vm_map_reference(map); 510 if (!lock_try_write(&map->lock)) { 511 vm_map_deallocate(map); 512 return; 513 } 514 515 bigobj = NULL; 516 517 /* 518 * first, search out the biggest object, and try to free pages from 519 * that. 520 */ 521 tmpe = map->header.next; 522 while (tmpe != &map->header) { 523 if ((tmpe->is_sub_map == 0) && (tmpe->is_a_map == 0)) { 524 obj = tmpe->object.vm_object; 525 if ((obj != NULL) && (obj->shadow_count <= 1) && 526 ((bigobj == NULL) || 527 (bigobj->resident_page_count < obj->resident_page_count))) { 528 bigobj = obj; 529 } 530 } 531 tmpe = tmpe->next; 532 } 533 534 if (bigobj) 535 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0); 536 537 /* 538 * Next, hunt around for other pages to deactivate. We actually 539 * do this search sort of wrong -- .text first is not the best idea. 540 */ 541 tmpe = map->header.next; 542 while (tmpe != &map->header) { 543 if (vm_map_pmap(map)->pm_stats.resident_count <= desired) 544 break; 545 if ((tmpe->is_sub_map == 0) && (tmpe->is_a_map == 0)) { 546 obj = tmpe->object.vm_object; 547 if (obj) 548 vm_pageout_object_deactivate_pages(map, obj, desired, 0); 549 } 550 tmpe = tmpe->next; 551 }; 552 553 /* 554 * Remove all mappings if a process is swapped out, this will free page 555 * table pages. 556 */ 557 if (desired == 0) 558 pmap_remove(vm_map_pmap(map), 559 VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 560 vm_map_unlock(map); 561 vm_map_deallocate(map); 562 return; 563 } 564 #endif 565 566 /* 567 * vm_pageout_scan does the dirty work for the pageout daemon. 568 */ 569 static int 570 vm_pageout_scan() 571 { 572 vm_page_t m, next; 573 int page_shortage, addl_page_shortage, maxscan, maxlaunder, pcount; 574 int pages_freed; 575 struct proc *p, *bigproc; 576 vm_offset_t size, bigsize; 577 vm_object_t object; 578 int force_wakeup = 0; 579 int vnodes_skipped = 0; 580 int s; 581 582 /* 583 * Start scanning the inactive queue for pages we can free. We keep 584 * scanning until we have enough free pages or we have scanned through 585 * the entire queue. If we encounter dirty pages, we start cleaning 586 * them. 587 */ 588 589 pages_freed = 0; 590 addl_page_shortage = 0; 591 592 maxlaunder = (cnt.v_inactive_target > MAXLAUNDER) ? 593 MAXLAUNDER : cnt.v_inactive_target; 594 rescan0: 595 maxscan = cnt.v_inactive_count; 596 for( m = TAILQ_FIRST(&vm_page_queue_inactive); 597 598 (m != NULL) && (maxscan-- > 0) && 599 ((cnt.v_cache_count + cnt.v_free_count) < 600 (cnt.v_cache_min + cnt.v_free_target)); 601 602 m = next) { 603 604 cnt.v_pdpages++; 605 606 if (m->queue != PQ_INACTIVE) { 607 goto rescan0; 608 } 609 610 next = TAILQ_NEXT(m, pageq); 611 612 if (m->hold_count) { 613 s = splvm(); 614 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 615 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 616 splx(s); 617 addl_page_shortage++; 618 continue; 619 } 620 /* 621 * Dont mess with busy pages, keep in the front of the 622 * queue, most likely are being paged out. 623 */ 624 if (m->busy || (m->flags & PG_BUSY)) { 625 addl_page_shortage++; 626 continue; 627 } 628 629 if (m->object->ref_count == 0) { 630 m->flags &= ~PG_REFERENCED; 631 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 632 } else if (((m->flags & PG_REFERENCED) == 0) && 633 pmap_ts_referenced(VM_PAGE_TO_PHYS(m))) { 634 vm_page_activate(m); 635 continue; 636 } 637 638 if ((m->flags & PG_REFERENCED) != 0) { 639 m->flags &= ~PG_REFERENCED; 640 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 641 vm_page_activate(m); 642 continue; 643 } 644 645 if (m->dirty == 0) { 646 vm_page_test_dirty(m); 647 } else if (m->dirty != 0) { 648 m->dirty = VM_PAGE_BITS_ALL; 649 } 650 651 if (m->valid == 0) { 652 vm_page_protect(m, VM_PROT_NONE); 653 vm_page_free(m); 654 cnt.v_dfree++; 655 ++pages_freed; 656 } else if (m->dirty == 0) { 657 vm_page_cache(m); 658 ++pages_freed; 659 } else if (maxlaunder > 0) { 660 int written; 661 struct vnode *vp = NULL; 662 663 object = m->object; 664 if (object->flags & OBJ_DEAD) { 665 s = splvm(); 666 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 667 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 668 splx(s); 669 continue; 670 } 671 672 if (object->type == OBJT_VNODE) { 673 vp = object->handle; 674 if (VOP_ISLOCKED(vp) || vget(vp, 1)) { 675 if ((m->queue == PQ_INACTIVE) && 676 (m->hold_count == 0) && 677 (m->busy == 0) && 678 (m->flags & PG_BUSY) == 0) { 679 s = splvm(); 680 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 681 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 682 splx(s); 683 } 684 if (object->flags & OBJ_MIGHTBEDIRTY) 685 ++vnodes_skipped; 686 continue; 687 } 688 689 /* 690 * The page might have been moved to another queue 691 * during potential blocking in vget() above. 692 */ 693 if (m->queue != PQ_INACTIVE) { 694 if (object->flags & OBJ_MIGHTBEDIRTY) 695 ++vnodes_skipped; 696 vput(vp); 697 continue; 698 } 699 700 /* 701 * The page may have been busied during the blocking in 702 * vput(); We don't move the page back onto the end of 703 * the queue so that statistics are more correct if we don't. 704 */ 705 if (m->busy || (m->flags & PG_BUSY)) { 706 vput(vp); 707 continue; 708 } 709 710 /* 711 * If the page has become held, then skip it 712 */ 713 if (m->hold_count) { 714 s = splvm(); 715 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 716 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 717 splx(s); 718 if (object->flags & OBJ_MIGHTBEDIRTY) 719 ++vnodes_skipped; 720 vput(vp); 721 continue; 722 } 723 } 724 725 /* 726 * If a page is dirty, then it is either being washed 727 * (but not yet cleaned) or it is still in the 728 * laundry. If it is still in the laundry, then we 729 * start the cleaning operation. 730 */ 731 written = vm_pageout_clean(m, 0); 732 733 if (vp) 734 vput(vp); 735 736 maxlaunder -= written; 737 } 738 } 739 740 /* 741 * Compute the page shortage. If we are still very low on memory be 742 * sure that we will move a minimal amount of pages from active to 743 * inactive. 744 */ 745 746 page_shortage = (cnt.v_inactive_target + cnt.v_cache_min) - 747 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 748 if (page_shortage <= 0) { 749 if (pages_freed == 0) { 750 page_shortage = cnt.v_free_min - cnt.v_free_count; 751 } else { 752 page_shortage = 1; 753 } 754 } 755 if (addl_page_shortage) { 756 if (page_shortage < 0) 757 page_shortage = 0; 758 page_shortage += addl_page_shortage; 759 } 760 761 pcount = cnt.v_active_count; 762 m = TAILQ_FIRST(&vm_page_queue_active); 763 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 764 int refcount; 765 766 if (m->queue != PQ_ACTIVE) { 767 break; 768 } 769 770 next = TAILQ_NEXT(m, pageq); 771 /* 772 * Don't deactivate pages that are busy. 773 */ 774 if ((m->busy != 0) || 775 (m->flags & PG_BUSY) || 776 (m->hold_count != 0)) { 777 s = splvm(); 778 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 779 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 780 splx(s); 781 m = next; 782 continue; 783 } 784 785 /* 786 * The count for pagedaemon pages is done after checking the 787 * page for eligbility... 788 */ 789 cnt.v_pdpages++; 790 791 refcount = 0; 792 if (m->object->ref_count != 0) { 793 if (m->flags & PG_REFERENCED) { 794 refcount += 1; 795 } 796 refcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); 797 if (refcount) { 798 m->act_count += ACT_ADVANCE + refcount; 799 if (m->act_count > ACT_MAX) 800 m->act_count = ACT_MAX; 801 } 802 } 803 804 m->flags &= ~PG_REFERENCED; 805 806 if (refcount && (m->object->ref_count != 0)) { 807 s = splvm(); 808 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 809 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 810 splx(s); 811 } else { 812 m->act_count -= min(m->act_count, ACT_DECLINE); 813 if (vm_pageout_algorithm_lru || 814 (m->object->ref_count == 0) || (m->act_count == 0)) { 815 --page_shortage; 816 vm_page_protect(m, VM_PROT_NONE); 817 if ((m->dirty == 0) && 818 (m->object->ref_count == 0)) { 819 vm_page_cache(m); 820 } else { 821 vm_page_deactivate(m); 822 } 823 } else { 824 s = splvm(); 825 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 826 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 827 splx(s); 828 } 829 } 830 m = next; 831 } 832 833 s = splvm(); 834 /* 835 * We try to maintain some *really* free pages, this allows interrupt 836 * code to be guaranteed space. 837 */ 838 while (cnt.v_free_count < cnt.v_free_reserved) { 839 static int cache_rover = 0; 840 m = vm_page_list_find(PQ_CACHE, cache_rover); 841 if (!m) 842 break; 843 cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; 844 vm_page_free(m); 845 cnt.v_dfree++; 846 } 847 splx(s); 848 849 /* 850 * If we didn't get enough free pages, and we have skipped a vnode 851 * in a writeable object, wakeup the sync daemon. And kick swapout 852 * if we did not get enough free pages. 853 */ 854 if ((cnt.v_cache_count + cnt.v_free_count) < 855 (cnt.v_free_target + cnt.v_cache_min) ) { 856 if (vnodes_skipped && 857 (cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_min) { 858 if (!vfs_update_wakeup) { 859 vfs_update_wakeup = 1; 860 wakeup(&vfs_update_wakeup); 861 } 862 } 863 #if !defined(NO_SWAPPING) 864 if (vm_swapping_enabled && 865 (cnt.v_free_count + cnt.v_cache_count < cnt.v_free_target)) { 866 vm_req_vmdaemon(); 867 vm_pageout_req_swapout = 1; 868 } 869 #endif 870 } 871 872 873 /* 874 * make sure that we have swap space -- if we are low on memory and 875 * swap -- then kill the biggest process. 876 */ 877 if ((vm_swap_size == 0 || swap_pager_full) && 878 ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) { 879 bigproc = NULL; 880 bigsize = 0; 881 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 882 /* 883 * if this is a system process, skip it 884 */ 885 if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || 886 ((p->p_pid < 48) && (vm_swap_size != 0))) { 887 continue; 888 } 889 /* 890 * if the process is in a non-running type state, 891 * don't touch it. 892 */ 893 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 894 continue; 895 } 896 /* 897 * get the process size 898 */ 899 size = p->p_vmspace->vm_pmap.pm_stats.resident_count; 900 /* 901 * if the this process is bigger than the biggest one 902 * remember it. 903 */ 904 if (size > bigsize) { 905 bigproc = p; 906 bigsize = size; 907 } 908 } 909 if (bigproc != NULL) { 910 killproc(bigproc, "out of swap space"); 911 bigproc->p_estcpu = 0; 912 bigproc->p_nice = PRIO_MIN; 913 resetpriority(bigproc); 914 wakeup(&cnt.v_free_count); 915 } 916 } 917 return force_wakeup; 918 } 919 920 static int 921 vm_pageout_free_page_calc(count) 922 vm_size_t count; 923 { 924 if (count < cnt.v_page_count) 925 return 0; 926 /* 927 * free_reserved needs to include enough for the largest swap pager 928 * structures plus enough for any pv_entry structs when paging. 929 */ 930 if (cnt.v_page_count > 1024) 931 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 932 else 933 cnt.v_free_min = 4; 934 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 935 cnt.v_interrupt_free_min; 936 cnt.v_free_reserved = vm_pageout_page_count + 937 cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE; 938 cnt.v_free_min += cnt.v_free_reserved; 939 return 1; 940 } 941 942 943 #ifdef unused 944 int 945 vm_pageout_free_pages(object, add) 946 vm_object_t object; 947 int add; 948 { 949 return vm_pageout_free_page_calc(object->size); 950 } 951 #endif 952 953 /* 954 * vm_pageout is the high level pageout daemon. 955 */ 956 static void 957 vm_pageout() 958 { 959 (void) spl0(); 960 961 /* 962 * Initialize some paging parameters. 963 */ 964 965 cnt.v_interrupt_free_min = 2; 966 if (cnt.v_page_count < 2000) 967 vm_pageout_page_count = 8; 968 969 vm_pageout_free_page_calc(cnt.v_page_count); 970 /* 971 * free_reserved needs to include enough for the largest swap pager 972 * structures plus enough for any pv_entry structs when paging. 973 */ 974 cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved; 975 976 if (cnt.v_free_count > 1024) { 977 cnt.v_cache_max = (cnt.v_free_count - 1024) / 2; 978 cnt.v_cache_min = (cnt.v_free_count - 1024) / 8; 979 cnt.v_inactive_target = 2*cnt.v_cache_min + 192; 980 } else { 981 cnt.v_cache_min = 0; 982 cnt.v_cache_max = 0; 983 cnt.v_inactive_target = cnt.v_free_count / 4; 984 } 985 986 /* XXX does not really belong here */ 987 if (vm_page_max_wired == 0) 988 vm_page_max_wired = cnt.v_free_count / 3; 989 990 991 swap_pager_swap_init(); 992 /* 993 * The pageout daemon is never done, so loop forever. 994 */ 995 while (TRUE) { 996 int inactive_target; 997 int s = splvm(); 998 if (!vm_pages_needed || 999 ((cnt.v_free_count + cnt.v_cache_count) > cnt.v_free_min)) { 1000 vm_pages_needed = 0; 1001 tsleep(&vm_pages_needed, PVM, "psleep", 0); 1002 } else if (!vm_pages_needed) { 1003 tsleep(&vm_pages_needed, PVM, "psleep", hz/10); 1004 } 1005 inactive_target = 1006 (cnt.v_page_count - cnt.v_wire_count) / 4; 1007 if (inactive_target < 2*cnt.v_free_min) 1008 inactive_target = 2*cnt.v_free_min; 1009 cnt.v_inactive_target = inactive_target; 1010 if (vm_pages_needed) 1011 cnt.v_pdwakeups++; 1012 vm_pages_needed = 0; 1013 splx(s); 1014 vm_pager_sync(); 1015 vm_pageout_scan(); 1016 vm_pager_sync(); 1017 wakeup(&cnt.v_free_count); 1018 } 1019 } 1020 1021 void 1022 pagedaemon_wakeup() 1023 { 1024 if (!vm_pages_needed && curproc != pageproc) { 1025 vm_pages_needed++; 1026 wakeup(&vm_pages_needed); 1027 } 1028 } 1029 1030 #if !defined(NO_SWAPPING) 1031 static void 1032 vm_req_vmdaemon() 1033 { 1034 static int lastrun = 0; 1035 1036 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1037 wakeup(&vm_daemon_needed); 1038 lastrun = ticks; 1039 } 1040 } 1041 1042 static void 1043 vm_daemon() 1044 { 1045 vm_object_t object; 1046 struct proc *p; 1047 1048 (void) spl0(); 1049 1050 while (TRUE) { 1051 tsleep(&vm_daemon_needed, PUSER, "psleep", 0); 1052 if (vm_pageout_req_swapout) { 1053 swapout_procs(); 1054 vm_pageout_req_swapout = 0; 1055 } 1056 /* 1057 * scan the processes for exceeding their rlimits or if 1058 * process is swapped out -- deactivate pages 1059 */ 1060 1061 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 1062 quad_t limit; 1063 vm_offset_t size; 1064 1065 /* 1066 * if this is a system process or if we have already 1067 * looked at this process, skip it. 1068 */ 1069 if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 1070 continue; 1071 } 1072 /* 1073 * if the process is in a non-running type state, 1074 * don't touch it. 1075 */ 1076 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1077 continue; 1078 } 1079 /* 1080 * get a limit 1081 */ 1082 limit = qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 1083 p->p_rlimit[RLIMIT_RSS].rlim_max); 1084 1085 /* 1086 * let processes that are swapped out really be 1087 * swapped out set the limit to nothing (will force a 1088 * swap-out.) 1089 */ 1090 if ((p->p_flag & P_INMEM) == 0) 1091 limit = 0; /* XXX */ 1092 1093 size = p->p_vmspace->vm_pmap.pm_stats.resident_count * PAGE_SIZE; 1094 if (limit >= 0 && size >= limit) { 1095 vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map, 1096 (vm_pindex_t)(limit >> PAGE_SHIFT) ); 1097 } 1098 } 1099 1100 /* 1101 * we remove cached objects that have no RSS... 1102 */ 1103 restart: 1104 object = TAILQ_FIRST(&vm_object_cached_list); 1105 while (object) { 1106 /* 1107 * if there are no resident pages -- get rid of the object 1108 */ 1109 if (object->resident_page_count == 0) { 1110 vm_object_reference(object); 1111 pager_cache(object, FALSE); 1112 goto restart; 1113 } 1114 object = TAILQ_NEXT(object, cached_list); 1115 } 1116 } 1117 } 1118 #endif 1119