1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 * 68 * $Id: vm_pageout.c,v 1.95 1997/02/22 09:48:33 peter Exp $ 69 */ 70 71 /* 72 * The proverbial page-out daemon. 73 */ 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/kernel.h> 78 #include <sys/proc.h> 79 #include <sys/resourcevar.h> 80 #include <sys/malloc.h> 81 #include <sys/kernel.h> 82 #include <sys/signalvar.h> 83 #include <sys/vnode.h> 84 #include <sys/vmmeter.h> 85 #include <sys/sysctl.h> 86 87 #include <vm/vm.h> 88 #include <vm/vm_param.h> 89 #include <vm/vm_prot.h> 90 #include <sys/lock.h> 91 #include <vm/vm_object.h> 92 #include <vm/vm_page.h> 93 #include <vm/vm_map.h> 94 #include <vm/vm_pageout.h> 95 #include <vm/vm_kern.h> 96 #include <vm/vm_pager.h> 97 #include <vm/swap_pager.h> 98 #include <vm/vm_extern.h> 99 100 /* 101 * System initialization 102 */ 103 104 /* the kernel process "vm_pageout"*/ 105 static void vm_pageout __P((void)); 106 static int vm_pageout_clean __P((vm_page_t, int)); 107 static int vm_pageout_scan __P((void)); 108 static int vm_pageout_free_page_calc __P((vm_size_t count)); 109 struct proc *pageproc; 110 111 static struct kproc_desc page_kp = { 112 "pagedaemon", 113 vm_pageout, 114 &pageproc 115 }; 116 SYSINIT_KT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 117 118 #if !defined(NO_SWAPPING) 119 /* the kernel process "vm_daemon"*/ 120 static void vm_daemon __P((void)); 121 static struct proc *vmproc; 122 123 static struct kproc_desc vm_kp = { 124 "vmdaemon", 125 vm_daemon, 126 &vmproc 127 }; 128 SYSINIT_KT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 129 #endif 130 131 132 int vm_pages_needed; /* Event on which pageout daemon sleeps */ 133 134 int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */ 135 136 extern int npendingio; 137 #if !defined(NO_SWAPPING) 138 static int vm_pageout_req_swapout; /* XXX */ 139 static int vm_daemon_needed; 140 #endif 141 extern int nswiodone; 142 extern int vm_swap_size; 143 extern int vfs_update_wakeup; 144 int vm_pageout_algorithm_lru=0; 145 #if defined(NO_SWAPPING) 146 int vm_swapping_enabled=0; 147 #else 148 int vm_swapping_enabled=1; 149 #endif 150 151 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 152 CTLFLAG_RW, &vm_pageout_algorithm_lru, 0, ""); 153 154 #if defined(NO_SWAPPING) 155 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swapping_enabled, 156 CTLFLAG_RD, &vm_swapping_enabled, 0, ""); 157 #else 158 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swapping_enabled, 159 CTLFLAG_RW, &vm_swapping_enabled, 0, ""); 160 #endif 161 162 #define MAXLAUNDER (cnt.v_page_count > 1800 ? 32 : 16) 163 164 #define VM_PAGEOUT_PAGE_COUNT 16 165 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 166 167 int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 168 169 #if !defined(NO_SWAPPING) 170 typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int)); 171 static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t)); 172 static freeer_fcn_t vm_pageout_object_deactivate_pages; 173 static void vm_req_vmdaemon __P((void)); 174 #endif 175 176 /* 177 * vm_pageout_clean: 178 * 179 * Clean the page and remove it from the laundry. 180 * 181 * We set the busy bit to cause potential page faults on this page to 182 * block. 183 * 184 * And we set pageout-in-progress to keep the object from disappearing 185 * during pageout. This guarantees that the page won't move from the 186 * inactive queue. (However, any other page on the inactive queue may 187 * move!) 188 */ 189 static int 190 vm_pageout_clean(m, sync) 191 vm_page_t m; 192 int sync; 193 { 194 register vm_object_t object; 195 vm_page_t mc[2*vm_pageout_page_count]; 196 int pageout_count; 197 int i, forward_okay, backward_okay, page_base; 198 vm_pindex_t pindex = m->pindex; 199 200 object = m->object; 201 202 /* 203 * If not OBJT_SWAP, additional memory may be needed to do the pageout. 204 * Try to avoid the deadlock. 205 */ 206 if ((sync != VM_PAGEOUT_FORCE) && 207 (object->type == OBJT_DEFAULT) && 208 ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min)) 209 return 0; 210 211 /* 212 * Don't mess with the page if it's busy. 213 */ 214 if ((!sync && m->hold_count != 0) || 215 ((m->busy != 0) || (m->flags & PG_BUSY))) 216 return 0; 217 218 /* 219 * Try collapsing before it's too late. 220 */ 221 if (!sync && object->backing_object) { 222 vm_object_collapse(object); 223 } 224 225 mc[vm_pageout_page_count] = m; 226 pageout_count = 1; 227 page_base = vm_pageout_page_count; 228 forward_okay = TRUE; 229 if (pindex != 0) 230 backward_okay = TRUE; 231 else 232 backward_okay = FALSE; 233 /* 234 * Scan object for clusterable pages. 235 * 236 * We can cluster ONLY if: ->> the page is NOT 237 * clean, wired, busy, held, or mapped into a 238 * buffer, and one of the following: 239 * 1) The page is inactive, or a seldom used 240 * active page. 241 * -or- 242 * 2) we force the issue. 243 */ 244 for (i = 1; (i < vm_pageout_page_count) && (forward_okay || backward_okay); i++) { 245 vm_page_t p; 246 247 /* 248 * See if forward page is clusterable. 249 */ 250 if (forward_okay) { 251 /* 252 * Stop forward scan at end of object. 253 */ 254 if ((pindex + i) > object->size) { 255 forward_okay = FALSE; 256 goto do_backward; 257 } 258 p = vm_page_lookup(object, pindex + i); 259 if (p) { 260 if (((p->queue - p->pc) == PQ_CACHE) || 261 (p->flags & PG_BUSY) || p->busy) { 262 forward_okay = FALSE; 263 goto do_backward; 264 } 265 vm_page_test_dirty(p); 266 if ((p->dirty & p->valid) != 0 && 267 ((p->queue == PQ_INACTIVE) || 268 (sync == VM_PAGEOUT_FORCE)) && 269 (p->wire_count == 0) && 270 (p->hold_count == 0)) { 271 mc[vm_pageout_page_count + i] = p; 272 pageout_count++; 273 if (pageout_count == vm_pageout_page_count) 274 break; 275 } else { 276 forward_okay = FALSE; 277 } 278 } else { 279 forward_okay = FALSE; 280 } 281 } 282 do_backward: 283 /* 284 * See if backward page is clusterable. 285 */ 286 if (backward_okay) { 287 /* 288 * Stop backward scan at beginning of object. 289 */ 290 if ((pindex - i) == 0) { 291 backward_okay = FALSE; 292 } 293 p = vm_page_lookup(object, pindex - i); 294 if (p) { 295 if (((p->queue - p->pc) == PQ_CACHE) || 296 (p->flags & PG_BUSY) || p->busy) { 297 backward_okay = FALSE; 298 continue; 299 } 300 vm_page_test_dirty(p); 301 if ((p->dirty & p->valid) != 0 && 302 ((p->queue == PQ_INACTIVE) || 303 (sync == VM_PAGEOUT_FORCE)) && 304 (p->wire_count == 0) && 305 (p->hold_count == 0)) { 306 mc[vm_pageout_page_count - i] = p; 307 pageout_count++; 308 page_base--; 309 if (pageout_count == vm_pageout_page_count) 310 break; 311 } else { 312 backward_okay = FALSE; 313 } 314 } else { 315 backward_okay = FALSE; 316 } 317 } 318 } 319 320 /* 321 * we allow reads during pageouts... 322 */ 323 for (i = page_base; i < (page_base + pageout_count); i++) { 324 mc[i]->flags |= PG_BUSY; 325 vm_page_protect(mc[i], VM_PROT_READ); 326 } 327 328 return vm_pageout_flush(&mc[page_base], pageout_count, sync); 329 } 330 331 int 332 vm_pageout_flush(mc, count, sync) 333 vm_page_t *mc; 334 int count; 335 int sync; 336 { 337 register vm_object_t object; 338 int pageout_status[count]; 339 int anyok = 0; 340 int i; 341 342 object = mc[0]->object; 343 object->paging_in_progress += count; 344 345 vm_pager_put_pages(object, mc, count, 346 ((sync || (object == kernel_object)) ? TRUE : FALSE), 347 pageout_status); 348 349 for (i = 0; i < count; i++) { 350 vm_page_t mt = mc[i]; 351 352 switch (pageout_status[i]) { 353 case VM_PAGER_OK: 354 ++anyok; 355 break; 356 case VM_PAGER_PEND: 357 ++anyok; 358 break; 359 case VM_PAGER_BAD: 360 /* 361 * Page outside of range of object. Right now we 362 * essentially lose the changes by pretending it 363 * worked. 364 */ 365 pmap_clear_modify(VM_PAGE_TO_PHYS(mt)); 366 mt->dirty = 0; 367 break; 368 case VM_PAGER_ERROR: 369 case VM_PAGER_FAIL: 370 /* 371 * If page couldn't be paged out, then reactivate the 372 * page so it doesn't clog the inactive list. (We 373 * will try paging out it again later). 374 */ 375 if (mt->queue == PQ_INACTIVE) 376 vm_page_activate(mt); 377 break; 378 case VM_PAGER_AGAIN: 379 break; 380 } 381 382 383 /* 384 * If the operation is still going, leave the page busy to 385 * block all other accesses. Also, leave the paging in 386 * progress indicator set so that we don't attempt an object 387 * collapse. 388 */ 389 if (pageout_status[i] != VM_PAGER_PEND) { 390 vm_object_pip_wakeup(object); 391 PAGE_WAKEUP(mt); 392 } 393 } 394 return anyok; 395 } 396 397 #if !defined(NO_SWAPPING) 398 /* 399 * vm_pageout_object_deactivate_pages 400 * 401 * deactivate enough pages to satisfy the inactive target 402 * requirements or if vm_page_proc_limit is set, then 403 * deactivate all of the pages in the object and its 404 * backing_objects. 405 * 406 * The object and map must be locked. 407 */ 408 static void 409 vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only) 410 vm_map_t map; 411 vm_object_t object; 412 vm_pindex_t desired; 413 int map_remove_only; 414 { 415 register vm_page_t p, next; 416 int rcount; 417 int remove_mode; 418 int s; 419 420 if (object->type == OBJT_DEVICE) 421 return; 422 423 while (object) { 424 if (vm_map_pmap(map)->pm_stats.resident_count <= desired) 425 return; 426 if (object->paging_in_progress) 427 return; 428 429 remove_mode = map_remove_only; 430 if (object->shadow_count > 1) 431 remove_mode = 1; 432 /* 433 * scan the objects entire memory queue 434 */ 435 rcount = object->resident_page_count; 436 p = TAILQ_FIRST(&object->memq); 437 while (p && (rcount-- > 0)) { 438 int refcount; 439 if (vm_map_pmap(map)->pm_stats.resident_count <= desired) 440 return; 441 next = TAILQ_NEXT(p, listq); 442 cnt.v_pdpages++; 443 if (p->wire_count != 0 || 444 p->hold_count != 0 || 445 p->busy != 0 || 446 (p->flags & PG_BUSY) || 447 !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) { 448 p = next; 449 continue; 450 } 451 452 refcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p)); 453 if (refcount) { 454 p->flags |= PG_REFERENCED; 455 } else if (p->flags & PG_REFERENCED) { 456 refcount = 1; 457 } 458 459 if ((p->queue != PQ_ACTIVE) && 460 (p->flags & PG_REFERENCED)) { 461 vm_page_activate(p); 462 p->act_count += refcount; 463 p->flags &= ~PG_REFERENCED; 464 } else if (p->queue == PQ_ACTIVE) { 465 if ((p->flags & PG_REFERENCED) == 0) { 466 p->act_count -= min(p->act_count, ACT_DECLINE); 467 if (!remove_mode && (vm_pageout_algorithm_lru || (p->act_count == 0))) { 468 vm_page_protect(p, VM_PROT_NONE); 469 vm_page_deactivate(p); 470 } else { 471 s = splvm(); 472 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 473 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 474 splx(s); 475 } 476 } else { 477 p->flags &= ~PG_REFERENCED; 478 if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 479 p->act_count += ACT_ADVANCE; 480 s = splvm(); 481 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 482 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 483 splx(s); 484 } 485 } else if (p->queue == PQ_INACTIVE) { 486 vm_page_protect(p, VM_PROT_NONE); 487 } 488 p = next; 489 } 490 object = object->backing_object; 491 } 492 return; 493 } 494 495 /* 496 * deactivate some number of pages in a map, try to do it fairly, but 497 * that is really hard to do. 498 */ 499 static void 500 vm_pageout_map_deactivate_pages(map, desired) 501 vm_map_t map; 502 vm_pindex_t desired; 503 { 504 vm_map_entry_t tmpe; 505 vm_object_t obj, bigobj; 506 507 vm_map_reference(map); 508 if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) { 509 vm_map_deallocate(map); 510 return; 511 } 512 513 bigobj = NULL; 514 515 /* 516 * first, search out the biggest object, and try to free pages from 517 * that. 518 */ 519 tmpe = map->header.next; 520 while (tmpe != &map->header) { 521 if ((tmpe->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) { 522 obj = tmpe->object.vm_object; 523 if ((obj != NULL) && (obj->shadow_count <= 1) && 524 ((bigobj == NULL) || 525 (bigobj->resident_page_count < obj->resident_page_count))) { 526 bigobj = obj; 527 } 528 } 529 tmpe = tmpe->next; 530 } 531 532 if (bigobj) 533 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0); 534 535 /* 536 * Next, hunt around for other pages to deactivate. We actually 537 * do this search sort of wrong -- .text first is not the best idea. 538 */ 539 tmpe = map->header.next; 540 while (tmpe != &map->header) { 541 if (vm_map_pmap(map)->pm_stats.resident_count <= desired) 542 break; 543 if ((tmpe->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) { 544 obj = tmpe->object.vm_object; 545 if (obj) 546 vm_pageout_object_deactivate_pages(map, obj, desired, 0); 547 } 548 tmpe = tmpe->next; 549 }; 550 551 /* 552 * Remove all mappings if a process is swapped out, this will free page 553 * table pages. 554 */ 555 if (desired == 0) 556 pmap_remove(vm_map_pmap(map), 557 VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 558 vm_map_unlock(map); 559 vm_map_deallocate(map); 560 return; 561 } 562 #endif 563 564 /* 565 * vm_pageout_scan does the dirty work for the pageout daemon. 566 */ 567 static int 568 vm_pageout_scan() 569 { 570 vm_page_t m, next; 571 int page_shortage, addl_page_shortage, maxscan, maxlaunder, pcount; 572 int pages_freed; 573 struct proc *p, *bigproc; 574 vm_offset_t size, bigsize; 575 vm_object_t object; 576 int force_wakeup = 0; 577 int vnodes_skipped = 0; 578 int s; 579 580 /* 581 * Start scanning the inactive queue for pages we can free. We keep 582 * scanning until we have enough free pages or we have scanned through 583 * the entire queue. If we encounter dirty pages, we start cleaning 584 * them. 585 */ 586 587 pages_freed = 0; 588 addl_page_shortage = 0; 589 590 maxlaunder = (cnt.v_inactive_target > MAXLAUNDER) ? 591 MAXLAUNDER : cnt.v_inactive_target; 592 rescan0: 593 maxscan = cnt.v_inactive_count; 594 for( m = TAILQ_FIRST(&vm_page_queue_inactive); 595 596 (m != NULL) && (maxscan-- > 0) && 597 ((cnt.v_cache_count + cnt.v_free_count) < 598 (cnt.v_cache_min + cnt.v_free_target)); 599 600 m = next) { 601 602 cnt.v_pdpages++; 603 604 if (m->queue != PQ_INACTIVE) { 605 goto rescan0; 606 } 607 608 next = TAILQ_NEXT(m, pageq); 609 610 if (m->hold_count) { 611 s = splvm(); 612 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 613 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 614 splx(s); 615 addl_page_shortage++; 616 continue; 617 } 618 /* 619 * Dont mess with busy pages, keep in the front of the 620 * queue, most likely are being paged out. 621 */ 622 if (m->busy || (m->flags & PG_BUSY)) { 623 addl_page_shortage++; 624 continue; 625 } 626 627 if (m->object->ref_count == 0) { 628 m->flags &= ~PG_REFERENCED; 629 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 630 } else if (((m->flags & PG_REFERENCED) == 0) && 631 pmap_ts_referenced(VM_PAGE_TO_PHYS(m))) { 632 vm_page_activate(m); 633 continue; 634 } 635 636 if ((m->flags & PG_REFERENCED) != 0) { 637 m->flags &= ~PG_REFERENCED; 638 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 639 vm_page_activate(m); 640 continue; 641 } 642 643 if (m->dirty == 0) { 644 vm_page_test_dirty(m); 645 } else if (m->dirty != 0) { 646 m->dirty = VM_PAGE_BITS_ALL; 647 } 648 649 if (m->valid == 0) { 650 vm_page_protect(m, VM_PROT_NONE); 651 vm_page_free(m); 652 cnt.v_dfree++; 653 ++pages_freed; 654 } else if (m->dirty == 0) { 655 vm_page_cache(m); 656 ++pages_freed; 657 } else if (maxlaunder > 0) { 658 int written; 659 struct vnode *vp = NULL; 660 661 object = m->object; 662 if (object->flags & OBJ_DEAD) { 663 s = splvm(); 664 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 665 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 666 splx(s); 667 continue; 668 } 669 670 if (object->type == OBJT_VNODE) { 671 vp = object->handle; 672 if (VOP_ISLOCKED(vp) || 673 vget(vp, LK_EXCLUSIVE, curproc)) { 674 if ((m->queue == PQ_INACTIVE) && 675 (m->hold_count == 0) && 676 (m->busy == 0) && 677 (m->flags & PG_BUSY) == 0) { 678 s = splvm(); 679 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 680 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 681 splx(s); 682 } 683 if (object->flags & OBJ_MIGHTBEDIRTY) 684 ++vnodes_skipped; 685 continue; 686 } 687 688 /* 689 * The page might have been moved to another queue 690 * during potential blocking in vget() above. 691 */ 692 if (m->queue != PQ_INACTIVE) { 693 if (object->flags & OBJ_MIGHTBEDIRTY) 694 ++vnodes_skipped; 695 vput(vp); 696 continue; 697 } 698 699 /* 700 * The page may have been busied during the blocking in 701 * vput(); We don't move the page back onto the end of 702 * the queue so that statistics are more correct if we don't. 703 */ 704 if (m->busy || (m->flags & PG_BUSY)) { 705 vput(vp); 706 continue; 707 } 708 709 /* 710 * If the page has become held, then skip it 711 */ 712 if (m->hold_count) { 713 s = splvm(); 714 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 715 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 716 splx(s); 717 if (object->flags & OBJ_MIGHTBEDIRTY) 718 ++vnodes_skipped; 719 vput(vp); 720 continue; 721 } 722 } 723 724 /* 725 * If a page is dirty, then it is either being washed 726 * (but not yet cleaned) or it is still in the 727 * laundry. If it is still in the laundry, then we 728 * start the cleaning operation. 729 */ 730 written = vm_pageout_clean(m, 0); 731 732 if (vp) 733 vput(vp); 734 735 maxlaunder -= written; 736 } 737 } 738 739 /* 740 * Compute the page shortage. If we are still very low on memory be 741 * sure that we will move a minimal amount of pages from active to 742 * inactive. 743 */ 744 745 page_shortage = (cnt.v_inactive_target + cnt.v_cache_min) - 746 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 747 if (page_shortage <= 0) { 748 if (pages_freed == 0) { 749 page_shortage = cnt.v_free_min - cnt.v_free_count; 750 } else { 751 page_shortage = 1; 752 } 753 } 754 if (addl_page_shortage) { 755 if (page_shortage < 0) 756 page_shortage = 0; 757 page_shortage += addl_page_shortage; 758 } 759 760 pcount = cnt.v_active_count; 761 m = TAILQ_FIRST(&vm_page_queue_active); 762 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 763 int refcount; 764 765 if (m->queue != PQ_ACTIVE) { 766 break; 767 } 768 769 next = TAILQ_NEXT(m, pageq); 770 /* 771 * Don't deactivate pages that are busy. 772 */ 773 if ((m->busy != 0) || 774 (m->flags & PG_BUSY) || 775 (m->hold_count != 0)) { 776 s = splvm(); 777 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 778 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 779 splx(s); 780 m = next; 781 continue; 782 } 783 784 /* 785 * The count for pagedaemon pages is done after checking the 786 * page for eligbility... 787 */ 788 cnt.v_pdpages++; 789 790 refcount = 0; 791 if (m->object->ref_count != 0) { 792 if (m->flags & PG_REFERENCED) { 793 refcount += 1; 794 } 795 refcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); 796 if (refcount) { 797 m->act_count += ACT_ADVANCE + refcount; 798 if (m->act_count > ACT_MAX) 799 m->act_count = ACT_MAX; 800 } 801 } 802 803 m->flags &= ~PG_REFERENCED; 804 805 if (refcount && (m->object->ref_count != 0)) { 806 s = splvm(); 807 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 808 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 809 splx(s); 810 } else { 811 m->act_count -= min(m->act_count, ACT_DECLINE); 812 if (vm_pageout_algorithm_lru || 813 (m->object->ref_count == 0) || (m->act_count == 0)) { 814 --page_shortage; 815 if (m->object->ref_count == 0) { 816 vm_page_protect(m, VM_PROT_NONE); 817 if (m->dirty == 0) 818 vm_page_cache(m); 819 else 820 vm_page_deactivate(m); 821 } else { 822 vm_page_deactivate(m); 823 } 824 } else { 825 s = splvm(); 826 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 827 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 828 splx(s); 829 } 830 } 831 m = next; 832 } 833 834 s = splvm(); 835 /* 836 * We try to maintain some *really* free pages, this allows interrupt 837 * code to be guaranteed space. 838 */ 839 while (cnt.v_free_count < cnt.v_free_reserved) { 840 static int cache_rover = 0; 841 m = vm_page_list_find(PQ_CACHE, cache_rover); 842 if (!m) 843 break; 844 cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; 845 vm_page_free(m); 846 cnt.v_dfree++; 847 } 848 splx(s); 849 850 /* 851 * If we didn't get enough free pages, and we have skipped a vnode 852 * in a writeable object, wakeup the sync daemon. And kick swapout 853 * if we did not get enough free pages. 854 */ 855 if ((cnt.v_cache_count + cnt.v_free_count) < 856 (cnt.v_free_target + cnt.v_cache_min) ) { 857 if (vnodes_skipped && 858 (cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_min) { 859 if (!vfs_update_wakeup) { 860 vfs_update_wakeup = 1; 861 wakeup(&vfs_update_wakeup); 862 } 863 } 864 #if !defined(NO_SWAPPING) 865 if (vm_swapping_enabled && 866 (cnt.v_free_count + cnt.v_cache_count < cnt.v_free_target)) { 867 vm_req_vmdaemon(); 868 vm_pageout_req_swapout = 1; 869 } 870 #endif 871 } 872 873 874 /* 875 * make sure that we have swap space -- if we are low on memory and 876 * swap -- then kill the biggest process. 877 */ 878 if ((vm_swap_size == 0 || swap_pager_full) && 879 ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) { 880 bigproc = NULL; 881 bigsize = 0; 882 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 883 /* 884 * if this is a system process, skip it 885 */ 886 if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || 887 ((p->p_pid < 48) && (vm_swap_size != 0))) { 888 continue; 889 } 890 /* 891 * if the process is in a non-running type state, 892 * don't touch it. 893 */ 894 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 895 continue; 896 } 897 /* 898 * get the process size 899 */ 900 size = p->p_vmspace->vm_pmap.pm_stats.resident_count; 901 /* 902 * if the this process is bigger than the biggest one 903 * remember it. 904 */ 905 if (size > bigsize) { 906 bigproc = p; 907 bigsize = size; 908 } 909 } 910 if (bigproc != NULL) { 911 killproc(bigproc, "out of swap space"); 912 bigproc->p_estcpu = 0; 913 bigproc->p_nice = PRIO_MIN; 914 resetpriority(bigproc); 915 wakeup(&cnt.v_free_count); 916 } 917 } 918 return force_wakeup; 919 } 920 921 static int 922 vm_pageout_free_page_calc(count) 923 vm_size_t count; 924 { 925 if (count < cnt.v_page_count) 926 return 0; 927 /* 928 * free_reserved needs to include enough for the largest swap pager 929 * structures plus enough for any pv_entry structs when paging. 930 */ 931 if (cnt.v_page_count > 1024) 932 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 933 else 934 cnt.v_free_min = 4; 935 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 936 cnt.v_interrupt_free_min; 937 cnt.v_free_reserved = vm_pageout_page_count + 938 cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE; 939 cnt.v_free_min += cnt.v_free_reserved; 940 return 1; 941 } 942 943 944 #ifdef unused 945 int 946 vm_pageout_free_pages(object, add) 947 vm_object_t object; 948 int add; 949 { 950 return vm_pageout_free_page_calc(object->size); 951 } 952 #endif 953 954 /* 955 * vm_pageout is the high level pageout daemon. 956 */ 957 static void 958 vm_pageout() 959 { 960 /* 961 * Initialize some paging parameters. 962 */ 963 964 cnt.v_interrupt_free_min = 2; 965 if (cnt.v_page_count < 2000) 966 vm_pageout_page_count = 8; 967 968 vm_pageout_free_page_calc(cnt.v_page_count); 969 /* 970 * free_reserved needs to include enough for the largest swap pager 971 * structures plus enough for any pv_entry structs when paging. 972 */ 973 cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved; 974 975 if (cnt.v_free_count > 1024) { 976 cnt.v_cache_max = (cnt.v_free_count - 1024) / 2; 977 cnt.v_cache_min = (cnt.v_free_count - 1024) / 8; 978 cnt.v_inactive_target = 2*cnt.v_cache_min + 192; 979 } else { 980 cnt.v_cache_min = 0; 981 cnt.v_cache_max = 0; 982 cnt.v_inactive_target = cnt.v_free_count / 4; 983 } 984 985 /* XXX does not really belong here */ 986 if (vm_page_max_wired == 0) 987 vm_page_max_wired = cnt.v_free_count / 3; 988 989 990 swap_pager_swap_init(); 991 /* 992 * The pageout daemon is never done, so loop forever. 993 */ 994 while (TRUE) { 995 int inactive_target; 996 int s = splvm(); 997 if (!vm_pages_needed || 998 ((cnt.v_free_count + cnt.v_cache_count) > cnt.v_free_min)) { 999 vm_pages_needed = 0; 1000 tsleep(&vm_pages_needed, PVM, "psleep", 0); 1001 } else if (!vm_pages_needed) { 1002 tsleep(&vm_pages_needed, PVM, "psleep", hz/10); 1003 } 1004 inactive_target = 1005 (cnt.v_page_count - cnt.v_wire_count) / 4; 1006 if (inactive_target < 2*cnt.v_free_min) 1007 inactive_target = 2*cnt.v_free_min; 1008 cnt.v_inactive_target = inactive_target; 1009 if (vm_pages_needed) 1010 cnt.v_pdwakeups++; 1011 vm_pages_needed = 0; 1012 splx(s); 1013 vm_pager_sync(); 1014 vm_pageout_scan(); 1015 vm_pager_sync(); 1016 wakeup(&cnt.v_free_count); 1017 } 1018 } 1019 1020 void 1021 pagedaemon_wakeup() 1022 { 1023 if (!vm_pages_needed && curproc != pageproc) { 1024 vm_pages_needed++; 1025 wakeup(&vm_pages_needed); 1026 } 1027 } 1028 1029 #if !defined(NO_SWAPPING) 1030 static void 1031 vm_req_vmdaemon() 1032 { 1033 static int lastrun = 0; 1034 1035 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1036 wakeup(&vm_daemon_needed); 1037 lastrun = ticks; 1038 } 1039 } 1040 1041 static void 1042 vm_daemon() 1043 { 1044 vm_object_t object; 1045 struct proc *p; 1046 1047 while (TRUE) { 1048 tsleep(&vm_daemon_needed, PUSER, "psleep", 0); 1049 if (vm_pageout_req_swapout) { 1050 swapout_procs(); 1051 vm_pageout_req_swapout = 0; 1052 } 1053 /* 1054 * scan the processes for exceeding their rlimits or if 1055 * process is swapped out -- deactivate pages 1056 */ 1057 1058 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 1059 quad_t limit; 1060 vm_offset_t size; 1061 1062 /* 1063 * if this is a system process or if we have already 1064 * looked at this process, skip it. 1065 */ 1066 if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 1067 continue; 1068 } 1069 /* 1070 * if the process is in a non-running type state, 1071 * don't touch it. 1072 */ 1073 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1074 continue; 1075 } 1076 /* 1077 * get a limit 1078 */ 1079 limit = qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 1080 p->p_rlimit[RLIMIT_RSS].rlim_max); 1081 1082 /* 1083 * let processes that are swapped out really be 1084 * swapped out set the limit to nothing (will force a 1085 * swap-out.) 1086 */ 1087 if ((p->p_flag & P_INMEM) == 0) 1088 limit = 0; /* XXX */ 1089 1090 size = p->p_vmspace->vm_pmap.pm_stats.resident_count * PAGE_SIZE; 1091 if (limit >= 0 && size >= limit) { 1092 vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map, 1093 (vm_pindex_t)(limit >> PAGE_SHIFT) ); 1094 } 1095 } 1096 1097 /* 1098 * we remove cached objects that have no RSS... 1099 */ 1100 restart: 1101 object = TAILQ_FIRST(&vm_object_cached_list); 1102 while (object) { 1103 /* 1104 * if there are no resident pages -- get rid of the object 1105 */ 1106 if (object->resident_page_count == 0) { 1107 vm_object_reference(object); 1108 pager_cache(object, FALSE); 1109 goto restart; 1110 } 1111 object = TAILQ_NEXT(object, cached_list); 1112 } 1113 } 1114 } 1115 #endif 1116