1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 * 68 * $Id: vm_pageout.c,v 1.142 1999/06/26 14:56:58 peter Exp $ 69 */ 70 71 /* 72 * The proverbial page-out daemon. 73 */ 74 75 #include "opt_vm.h" 76 #include <sys/param.h> 77 #include <sys/systm.h> 78 #include <sys/kernel.h> 79 #include <sys/proc.h> 80 #include <sys/kthread.h> 81 #include <sys/resourcevar.h> 82 #include <sys/signalvar.h> 83 #include <sys/vnode.h> 84 #include <sys/vmmeter.h> 85 #include <sys/sysctl.h> 86 87 #include <vm/vm.h> 88 #include <vm/vm_param.h> 89 #include <vm/vm_prot.h> 90 #include <sys/lock.h> 91 #include <vm/vm_object.h> 92 #include <vm/vm_page.h> 93 #include <vm/vm_map.h> 94 #include <vm/vm_pageout.h> 95 #include <vm/vm_pager.h> 96 #include <vm/swap_pager.h> 97 #include <vm/vm_extern.h> 98 99 /* 100 * System initialization 101 */ 102 103 /* the kernel process "vm_pageout"*/ 104 static void vm_pageout __P((void)); 105 static int vm_pageout_clean __P((vm_page_t)); 106 static int vm_pageout_scan __P((void)); 107 static int vm_pageout_free_page_calc __P((vm_size_t count)); 108 struct proc *pageproc; 109 110 static struct kproc_desc page_kp = { 111 "pagedaemon", 112 vm_pageout, 113 &pageproc 114 }; 115 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 116 117 #if !defined(NO_SWAPPING) 118 /* the kernel process "vm_daemon"*/ 119 static void vm_daemon __P((void)); 120 static struct proc *vmproc; 121 122 static struct kproc_desc vm_kp = { 123 "vmdaemon", 124 vm_daemon, 125 &vmproc 126 }; 127 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 128 #endif 129 130 131 int vm_pages_needed=0; /* Event on which pageout daemon sleeps */ 132 int vm_pageout_deficit=0; /* Estimated number of pages deficit */ 133 int vm_pageout_pages_needed=0; /* flag saying that the pageout daemon needs pages */ 134 135 extern int npendingio; 136 #if !defined(NO_SWAPPING) 137 static int vm_pageout_req_swapout; /* XXX */ 138 static int vm_daemon_needed; 139 #endif 140 extern int nswiodone; 141 extern int vm_swap_size; 142 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 143 static int vm_pageout_full_stats_interval = 0; 144 static int vm_pageout_stats_free_max=0, vm_pageout_algorithm_lru=0; 145 static int defer_swap_pageouts=0; 146 static int disable_swap_pageouts=0; 147 148 static int max_page_launder=100; 149 #if defined(NO_SWAPPING) 150 static int vm_swap_enabled=0; 151 static int vm_swap_idle_enabled=0; 152 #else 153 static int vm_swap_enabled=1; 154 static int vm_swap_idle_enabled=0; 155 #endif 156 157 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 158 CTLFLAG_RW, &vm_pageout_algorithm_lru, 0, "LRU page mgmt"); 159 160 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 161 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 162 163 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 164 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 165 166 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 167 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 168 169 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max, 170 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented"); 171 172 #if defined(NO_SWAPPING) 173 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 174 CTLFLAG_RD, &vm_swap_enabled, 0, ""); 175 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 176 CTLFLAG_RD, &vm_swap_idle_enabled, 0, ""); 177 #else 178 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 179 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 180 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 181 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 182 #endif 183 184 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 185 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 186 187 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 188 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 189 190 SYSCTL_INT(_vm, OID_AUTO, max_page_launder, 191 CTLFLAG_RW, &max_page_launder, 0, "Maximum number of pages to clean per pass"); 192 193 194 #define VM_PAGEOUT_PAGE_COUNT 16 195 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 196 197 int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 198 199 #if !defined(NO_SWAPPING) 200 typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int)); 201 static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t)); 202 static freeer_fcn_t vm_pageout_object_deactivate_pages; 203 static void vm_req_vmdaemon __P((void)); 204 #endif 205 static void vm_pageout_page_stats(void); 206 207 /* 208 * vm_pageout_clean: 209 * 210 * Clean the page and remove it from the laundry. 211 * 212 * We set the busy bit to cause potential page faults on this page to 213 * block. Note the careful timing, however, the busy bit isn't set till 214 * late and we cannot do anything that will mess with the page. 215 */ 216 217 static int 218 vm_pageout_clean(m) 219 vm_page_t m; 220 { 221 register vm_object_t object; 222 vm_page_t mc[2*vm_pageout_page_count]; 223 int pageout_count; 224 int i, forward_okay, backward_okay, page_base; 225 vm_pindex_t pindex = m->pindex; 226 227 object = m->object; 228 229 /* 230 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 231 * with the new swapper, but we could have serious problems paging 232 * out other object types if there is insufficient memory. 233 * 234 * Unfortunately, checking free memory here is far too late, so the 235 * check has been moved up a procedural level. 236 */ 237 238 #if 0 239 /* 240 * If not OBJT_SWAP, additional memory may be needed to do the pageout. 241 * Try to avoid the deadlock. 242 */ 243 if ((object->type == OBJT_DEFAULT) && 244 ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min)) 245 return 0; 246 #endif 247 248 /* 249 * Don't mess with the page if it's busy. 250 */ 251 if ((m->hold_count != 0) || 252 ((m->busy != 0) || (m->flags & PG_BUSY))) 253 return 0; 254 255 #if 0 256 /* 257 * XXX REMOVED XXX. vm_object_collapse() can block, which can 258 * change the page state. Calling vm_object_collapse() might also 259 * destroy or rename the page because we have not busied it yet!!! 260 * So this code segment is removed. 261 */ 262 /* 263 * Try collapsing before it's too late. XXX huh? Why are we doing 264 * this here? 265 */ 266 if (object->backing_object) { 267 vm_object_collapse(object); 268 } 269 #endif 270 271 mc[vm_pageout_page_count] = m; 272 pageout_count = 1; 273 page_base = vm_pageout_page_count; 274 forward_okay = TRUE; 275 if (pindex != 0) 276 backward_okay = TRUE; 277 else 278 backward_okay = FALSE; 279 /* 280 * Scan object for clusterable pages. 281 * 282 * We can cluster ONLY if: ->> the page is NOT 283 * clean, wired, busy, held, or mapped into a 284 * buffer, and one of the following: 285 * 1) The page is inactive, or a seldom used 286 * active page. 287 * -or- 288 * 2) we force the issue. 289 */ 290 for (i = 1; (i < vm_pageout_page_count) && (forward_okay || backward_okay); i++) { 291 vm_page_t p; 292 293 /* 294 * See if forward page is clusterable. 295 */ 296 if (forward_okay) { 297 /* 298 * Stop forward scan at end of object. 299 */ 300 if ((pindex + i) > object->size) { 301 forward_okay = FALSE; 302 goto do_backward; 303 } 304 p = vm_page_lookup(object, pindex + i); 305 if (p) { 306 if (((p->queue - p->pc) == PQ_CACHE) || 307 (p->flags & PG_BUSY) || p->busy) { 308 forward_okay = FALSE; 309 goto do_backward; 310 } 311 vm_page_test_dirty(p); 312 if ((p->dirty & p->valid) != 0 && 313 (p->queue == PQ_INACTIVE) && 314 (p->wire_count == 0) && 315 (p->hold_count == 0)) { 316 mc[vm_pageout_page_count + i] = p; 317 pageout_count++; 318 if (pageout_count == vm_pageout_page_count) 319 break; 320 } else { 321 forward_okay = FALSE; 322 } 323 } else { 324 forward_okay = FALSE; 325 } 326 } 327 do_backward: 328 /* 329 * See if backward page is clusterable. 330 */ 331 if (backward_okay) { 332 /* 333 * Stop backward scan at beginning of object. 334 */ 335 if ((pindex - i) == 0) { 336 backward_okay = FALSE; 337 } 338 p = vm_page_lookup(object, pindex - i); 339 if (p) { 340 if (((p->queue - p->pc) == PQ_CACHE) || 341 (p->flags & PG_BUSY) || p->busy) { 342 backward_okay = FALSE; 343 continue; 344 } 345 vm_page_test_dirty(p); 346 if ((p->dirty & p->valid) != 0 && 347 (p->queue == PQ_INACTIVE) && 348 (p->wire_count == 0) && 349 (p->hold_count == 0)) { 350 mc[vm_pageout_page_count - i] = p; 351 pageout_count++; 352 page_base--; 353 if (pageout_count == vm_pageout_page_count) 354 break; 355 } else { 356 backward_okay = FALSE; 357 } 358 } else { 359 backward_okay = FALSE; 360 } 361 } 362 } 363 364 /* 365 * we allow reads during pageouts... 366 */ 367 return vm_pageout_flush(&mc[page_base], pageout_count, 0); 368 } 369 370 /* 371 * vm_pageout_flush() - launder the given pages 372 * 373 * The given pages are laundered. Note that we setup for the start of 374 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 375 * reference count all in here rather then in the parent. If we want 376 * the parent to do more sophisticated things we may have to change 377 * the ordering. 378 */ 379 380 int 381 vm_pageout_flush(mc, count, flags) 382 vm_page_t *mc; 383 int count; 384 int flags; 385 { 386 register vm_object_t object; 387 int pageout_status[count]; 388 int numpagedout = 0; 389 int i; 390 391 /* 392 * Initiate I/O. Bump the vm_page_t->busy counter and 393 * mark the pages read-only. 394 * 395 * We do not have to fixup the clean/dirty bits here... we can 396 * allow the pager to do it after the I/O completes. 397 */ 398 399 for (i = 0; i < count; i++) { 400 vm_page_io_start(mc[i]); 401 vm_page_protect(mc[i], VM_PROT_READ); 402 } 403 404 object = mc[0]->object; 405 vm_object_pip_add(object, count); 406 407 vm_pager_put_pages(object, mc, count, 408 (flags | ((object == kernel_object) ? OBJPC_SYNC : 0)), 409 pageout_status); 410 411 for (i = 0; i < count; i++) { 412 vm_page_t mt = mc[i]; 413 414 switch (pageout_status[i]) { 415 case VM_PAGER_OK: 416 numpagedout++; 417 break; 418 case VM_PAGER_PEND: 419 numpagedout++; 420 break; 421 case VM_PAGER_BAD: 422 /* 423 * Page outside of range of object. Right now we 424 * essentially lose the changes by pretending it 425 * worked. 426 */ 427 pmap_clear_modify(VM_PAGE_TO_PHYS(mt)); 428 mt->dirty = 0; 429 break; 430 case VM_PAGER_ERROR: 431 case VM_PAGER_FAIL: 432 /* 433 * If page couldn't be paged out, then reactivate the 434 * page so it doesn't clog the inactive list. (We 435 * will try paging out it again later). 436 */ 437 vm_page_activate(mt); 438 break; 439 case VM_PAGER_AGAIN: 440 break; 441 } 442 443 /* 444 * If the operation is still going, leave the page busy to 445 * block all other accesses. Also, leave the paging in 446 * progress indicator set so that we don't attempt an object 447 * collapse. 448 */ 449 if (pageout_status[i] != VM_PAGER_PEND) { 450 vm_object_pip_wakeup(object); 451 vm_page_io_finish(mt); 452 } 453 } 454 return numpagedout; 455 } 456 457 #if !defined(NO_SWAPPING) 458 /* 459 * vm_pageout_object_deactivate_pages 460 * 461 * deactivate enough pages to satisfy the inactive target 462 * requirements or if vm_page_proc_limit is set, then 463 * deactivate all of the pages in the object and its 464 * backing_objects. 465 * 466 * The object and map must be locked. 467 */ 468 static void 469 vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only) 470 vm_map_t map; 471 vm_object_t object; 472 vm_pindex_t desired; 473 int map_remove_only; 474 { 475 register vm_page_t p, next; 476 int rcount; 477 int remove_mode; 478 int s; 479 480 if (object->type == OBJT_DEVICE) 481 return; 482 483 while (object) { 484 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 485 return; 486 if (object->paging_in_progress) 487 return; 488 489 remove_mode = map_remove_only; 490 if (object->shadow_count > 1) 491 remove_mode = 1; 492 /* 493 * scan the objects entire memory queue 494 */ 495 rcount = object->resident_page_count; 496 p = TAILQ_FIRST(&object->memq); 497 while (p && (rcount-- > 0)) { 498 int actcount; 499 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 500 return; 501 next = TAILQ_NEXT(p, listq); 502 cnt.v_pdpages++; 503 if (p->wire_count != 0 || 504 p->hold_count != 0 || 505 p->busy != 0 || 506 (p->flags & PG_BUSY) || 507 !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) { 508 p = next; 509 continue; 510 } 511 512 actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p)); 513 if (actcount) { 514 vm_page_flag_set(p, PG_REFERENCED); 515 } else if (p->flags & PG_REFERENCED) { 516 actcount = 1; 517 } 518 519 if ((p->queue != PQ_ACTIVE) && 520 (p->flags & PG_REFERENCED)) { 521 vm_page_activate(p); 522 p->act_count += actcount; 523 vm_page_flag_clear(p, PG_REFERENCED); 524 } else if (p->queue == PQ_ACTIVE) { 525 if ((p->flags & PG_REFERENCED) == 0) { 526 p->act_count -= min(p->act_count, ACT_DECLINE); 527 if (!remove_mode && (vm_pageout_algorithm_lru || (p->act_count == 0))) { 528 vm_page_protect(p, VM_PROT_NONE); 529 vm_page_deactivate(p); 530 } else { 531 s = splvm(); 532 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 533 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 534 splx(s); 535 } 536 } else { 537 vm_page_activate(p); 538 vm_page_flag_clear(p, PG_REFERENCED); 539 if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 540 p->act_count += ACT_ADVANCE; 541 s = splvm(); 542 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 543 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 544 splx(s); 545 } 546 } else if (p->queue == PQ_INACTIVE) { 547 vm_page_protect(p, VM_PROT_NONE); 548 } 549 p = next; 550 } 551 object = object->backing_object; 552 } 553 return; 554 } 555 556 /* 557 * deactivate some number of pages in a map, try to do it fairly, but 558 * that is really hard to do. 559 */ 560 static void 561 vm_pageout_map_deactivate_pages(map, desired) 562 vm_map_t map; 563 vm_pindex_t desired; 564 { 565 vm_map_entry_t tmpe; 566 vm_object_t obj, bigobj; 567 568 if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) { 569 return; 570 } 571 572 bigobj = NULL; 573 574 /* 575 * first, search out the biggest object, and try to free pages from 576 * that. 577 */ 578 tmpe = map->header.next; 579 while (tmpe != &map->header) { 580 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 581 obj = tmpe->object.vm_object; 582 if ((obj != NULL) && (obj->shadow_count <= 1) && 583 ((bigobj == NULL) || 584 (bigobj->resident_page_count < obj->resident_page_count))) { 585 bigobj = obj; 586 } 587 } 588 tmpe = tmpe->next; 589 } 590 591 if (bigobj) 592 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0); 593 594 /* 595 * Next, hunt around for other pages to deactivate. We actually 596 * do this search sort of wrong -- .text first is not the best idea. 597 */ 598 tmpe = map->header.next; 599 while (tmpe != &map->header) { 600 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 601 break; 602 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 603 obj = tmpe->object.vm_object; 604 if (obj) 605 vm_pageout_object_deactivate_pages(map, obj, desired, 0); 606 } 607 tmpe = tmpe->next; 608 }; 609 610 /* 611 * Remove all mappings if a process is swapped out, this will free page 612 * table pages. 613 */ 614 if (desired == 0) 615 pmap_remove(vm_map_pmap(map), 616 VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 617 vm_map_unlock(map); 618 return; 619 } 620 #endif 621 622 /* 623 * Don't try to be fancy - being fancy can lead to VOP_LOCK's and therefore 624 * to vnode deadlocks. We only do it for OBJT_DEFAULT and OBJT_SWAP objects 625 * which we know can be trivially freed. 626 */ 627 628 void 629 vm_pageout_page_free(vm_page_t m) { 630 vm_object_t object = m->object; 631 int type = object->type; 632 633 if (type == OBJT_SWAP || type == OBJT_DEFAULT) 634 vm_object_reference(object); 635 vm_page_busy(m); 636 vm_page_protect(m, VM_PROT_NONE); 637 vm_page_free(m); 638 if (type == OBJT_SWAP || type == OBJT_DEFAULT) 639 vm_object_deallocate(object); 640 } 641 642 /* 643 * vm_pageout_scan does the dirty work for the pageout daemon. 644 */ 645 static int 646 vm_pageout_scan() 647 { 648 vm_page_t m, next; 649 int page_shortage, maxscan, pcount; 650 int addl_page_shortage, addl_page_shortage_init; 651 int maxlaunder; 652 int launder_loop = 0; 653 struct proc *p, *bigproc; 654 vm_offset_t size, bigsize; 655 vm_object_t object; 656 int force_wakeup = 0; 657 int actcount; 658 int vnodes_skipped = 0; 659 int s; 660 661 /* 662 * Do whatever cleanup that the pmap code can. 663 */ 664 pmap_collect(); 665 666 addl_page_shortage_init = vm_pageout_deficit; 667 vm_pageout_deficit = 0; 668 669 if (max_page_launder == 0) 670 max_page_launder = 1; 671 672 /* 673 * Calculate the number of pages we want to either free or move 674 * to the cache. 675 */ 676 677 page_shortage = (cnt.v_free_target + cnt.v_cache_min) - 678 (cnt.v_free_count + cnt.v_cache_count); 679 page_shortage += addl_page_shortage_init; 680 681 /* 682 * Figure out what to do with dirty pages when they are encountered. 683 * Assume that 1/3 of the pages on the inactive list are clean. If 684 * we think we can reach our target, disable laundering (do not 685 * clean any dirty pages). If we miss the target we will loop back 686 * up and do a laundering run. 687 */ 688 689 if (cnt.v_inactive_count / 3 > page_shortage) { 690 maxlaunder = 0; 691 launder_loop = 0; 692 } else { 693 maxlaunder = 694 (cnt.v_inactive_target > max_page_launder) ? 695 max_page_launder : cnt.v_inactive_target; 696 launder_loop = 1; 697 } 698 699 /* 700 * Start scanning the inactive queue for pages we can move to the 701 * cache or free. The scan will stop when the target is reached or 702 * we have scanned the entire inactive queue. 703 */ 704 705 rescan0: 706 addl_page_shortage = addl_page_shortage_init; 707 maxscan = cnt.v_inactive_count; 708 for ( 709 m = TAILQ_FIRST(&vm_page_queue_inactive); 710 m != NULL && maxscan-- > 0 && page_shortage > 0; 711 m = next 712 ) { 713 714 cnt.v_pdpages++; 715 716 if (m->queue != PQ_INACTIVE) { 717 goto rescan0; 718 } 719 720 next = TAILQ_NEXT(m, pageq); 721 722 if (m->hold_count) { 723 s = splvm(); 724 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 725 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 726 splx(s); 727 addl_page_shortage++; 728 continue; 729 } 730 /* 731 * Dont mess with busy pages, keep in the front of the 732 * queue, most likely are being paged out. 733 */ 734 if (m->busy || (m->flags & PG_BUSY)) { 735 addl_page_shortage++; 736 continue; 737 } 738 739 /* 740 * If the object is not being used, we ignore previous 741 * references. 742 */ 743 if (m->object->ref_count == 0) { 744 vm_page_flag_clear(m, PG_REFERENCED); 745 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 746 747 /* 748 * Otherwise, if the page has been referenced while in the 749 * inactive queue, we bump the "activation count" upwards, 750 * making it less likely that the page will be added back to 751 * the inactive queue prematurely again. Here we check the 752 * page tables (or emulated bits, if any), given the upper 753 * level VM system not knowing anything about existing 754 * references. 755 */ 756 } else if (((m->flags & PG_REFERENCED) == 0) && 757 (actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)))) { 758 vm_page_activate(m); 759 m->act_count += (actcount + ACT_ADVANCE); 760 continue; 761 } 762 763 /* 764 * If the upper level VM system knows about any page 765 * references, we activate the page. We also set the 766 * "activation count" higher than normal so that we will less 767 * likely place pages back onto the inactive queue again. 768 */ 769 if ((m->flags & PG_REFERENCED) != 0) { 770 vm_page_flag_clear(m, PG_REFERENCED); 771 actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); 772 vm_page_activate(m); 773 m->act_count += (actcount + ACT_ADVANCE + 1); 774 continue; 775 } 776 777 /* 778 * If the upper level VM system doesn't know anything about 779 * the page being dirty, we have to check for it again. As 780 * far as the VM code knows, any partially dirty pages are 781 * fully dirty. 782 */ 783 if (m->dirty == 0) { 784 vm_page_test_dirty(m); 785 } else { 786 vm_page_dirty(m); 787 } 788 789 /* 790 * Invalid pages can be easily freed 791 */ 792 if (m->valid == 0) { 793 vm_pageout_page_free(m); 794 cnt.v_dfree++; 795 --page_shortage; 796 797 /* 798 * Clean pages can be placed onto the cache queue. 799 */ 800 } else if (m->dirty == 0) { 801 vm_page_cache(m); 802 --page_shortage; 803 804 /* 805 * Dirty pages need to be paged out. Note that we clean 806 * only a limited number of pages per pagedaemon pass. 807 */ 808 } else if (maxlaunder > 0) { 809 int written; 810 int swap_pageouts_ok; 811 struct vnode *vp = NULL; 812 813 object = m->object; 814 815 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 816 swap_pageouts_ok = 1; 817 } else { 818 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 819 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 820 (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min); 821 822 } 823 824 /* 825 * We don't bother paging objects that are "dead". 826 * Those objects are in a "rundown" state. 827 */ 828 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 829 s = splvm(); 830 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 831 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 832 splx(s); 833 continue; 834 } 835 836 /* 837 * For now we protect against potential memory 838 * deadlocks by requiring significant memory to be 839 * free if the object is not OBJT_DEFAULT or OBJT_SWAP. 840 * We do not 'trust' any other object type to operate 841 * with low memory, not even OBJT_DEVICE. The VM 842 * allocator will special case allocations done by 843 * the pageout daemon so the check below actually 844 * does have some hysteresis in it. It isn't the best 845 * solution, though. 846 */ 847 848 if ( 849 object->type != OBJT_DEFAULT && 850 object->type != OBJT_SWAP && 851 cnt.v_free_count < cnt.v_free_reserved 852 ) { 853 s = splvm(); 854 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 855 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 856 splx(s); 857 continue; 858 } 859 860 /* 861 * Presumably we have sufficient free memory to do 862 * the more sophisticated checks and locking required 863 * for vnodes. 864 * 865 * The object is already known NOT to be dead. The 866 * vget() may still block, though, because 867 * VOP_ISLOCKED() doesn't check to see if an inode 868 * (v_data) is associated with the vnode. If it isn't, 869 * vget() will load in it from disk. Worse, vget() 870 * may actually get stuck waiting on "inode" if another 871 * process is in the process of bringing the inode in. 872 * This is bad news for us either way. 873 * 874 * So for the moment we check v_data == NULL as a 875 * workaround. This means that vnodes which do not 876 * use v_data in the way we expect probably will not 877 * wind up being paged out by the pager and it will be 878 * up to the syncer to get them. That's better then 879 * us blocking here. 880 * 881 * This whole code section is bogus - we need to fix 882 * the vnode pager to handle vm_page_t's without us 883 * having to do any sophisticated VOP tests. 884 */ 885 886 if (object->type == OBJT_VNODE) { 887 vp = object->handle; 888 889 if (VOP_ISLOCKED(vp) || 890 vp->v_data == NULL || 891 vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curproc)) { 892 if ((m->queue == PQ_INACTIVE) && 893 (m->hold_count == 0) && 894 (m->busy == 0) && 895 (m->flags & PG_BUSY) == 0) { 896 s = splvm(); 897 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 898 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 899 splx(s); 900 } 901 if (object->flags & OBJ_MIGHTBEDIRTY) 902 vnodes_skipped++; 903 continue; 904 } 905 906 /* 907 * The page might have been moved to another queue 908 * during potential blocking in vget() above. 909 */ 910 if (m->queue != PQ_INACTIVE) { 911 if (object->flags & OBJ_MIGHTBEDIRTY) 912 vnodes_skipped++; 913 vput(vp); 914 continue; 915 } 916 917 /* 918 * The page may have been busied during the blocking in 919 * vput(); We don't move the page back onto the end of 920 * the queue so that statistics are more correct if we don't. 921 */ 922 if (m->busy || (m->flags & PG_BUSY)) { 923 vput(vp); 924 continue; 925 } 926 927 /* 928 * If the page has become held, then skip it 929 */ 930 if (m->hold_count) { 931 s = splvm(); 932 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 933 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 934 splx(s); 935 if (object->flags & OBJ_MIGHTBEDIRTY) 936 vnodes_skipped++; 937 vput(vp); 938 continue; 939 } 940 } 941 942 /* 943 * If a page is dirty, then it is either being washed 944 * (but not yet cleaned) or it is still in the 945 * laundry. If it is still in the laundry, then we 946 * start the cleaning operation. 947 */ 948 written = vm_pageout_clean(m); 949 if (vp) 950 vput(vp); 951 952 maxlaunder -= written; 953 } 954 } 955 956 /* 957 * If we still have a page shortage and we didn't launder anything, 958 * run the inactive scan again and launder something this time. 959 */ 960 961 if (launder_loop == 0 && page_shortage > 0) { 962 launder_loop = 1; 963 maxlaunder = 964 (cnt.v_inactive_target > max_page_launder) ? 965 max_page_launder : cnt.v_inactive_target; 966 goto rescan0; 967 } 968 969 /* 970 * Compute the page shortage from the point of view of having to 971 * move pages from the active queue to the inactive queue. 972 */ 973 974 page_shortage = (cnt.v_inactive_target + cnt.v_cache_min) - 975 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 976 page_shortage += addl_page_shortage; 977 978 /* 979 * Scan the active queue for things we can deactivate 980 */ 981 982 pcount = cnt.v_active_count; 983 m = TAILQ_FIRST(&vm_page_queue_active); 984 985 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 986 987 /* 988 * This is a consistancy check, and should likely be a panic 989 * or warning. 990 */ 991 if (m->queue != PQ_ACTIVE) { 992 break; 993 } 994 995 next = TAILQ_NEXT(m, pageq); 996 /* 997 * Don't deactivate pages that are busy. 998 */ 999 if ((m->busy != 0) || 1000 (m->flags & PG_BUSY) || 1001 (m->hold_count != 0)) { 1002 s = splvm(); 1003 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1004 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1005 splx(s); 1006 m = next; 1007 continue; 1008 } 1009 1010 /* 1011 * The count for pagedaemon pages is done after checking the 1012 * page for eligbility... 1013 */ 1014 cnt.v_pdpages++; 1015 1016 /* 1017 * Check to see "how much" the page has been used. 1018 */ 1019 actcount = 0; 1020 if (m->object->ref_count != 0) { 1021 if (m->flags & PG_REFERENCED) { 1022 actcount += 1; 1023 } 1024 actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); 1025 if (actcount) { 1026 m->act_count += ACT_ADVANCE + actcount; 1027 if (m->act_count > ACT_MAX) 1028 m->act_count = ACT_MAX; 1029 } 1030 } 1031 1032 /* 1033 * Since we have "tested" this bit, we need to clear it now. 1034 */ 1035 vm_page_flag_clear(m, PG_REFERENCED); 1036 1037 /* 1038 * Only if an object is currently being used, do we use the 1039 * page activation count stats. 1040 */ 1041 if (actcount && (m->object->ref_count != 0)) { 1042 s = splvm(); 1043 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1044 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1045 splx(s); 1046 } else { 1047 m->act_count -= min(m->act_count, ACT_DECLINE); 1048 if (vm_pageout_algorithm_lru || 1049 (m->object->ref_count == 0) || (m->act_count == 0)) { 1050 page_shortage--; 1051 if (m->object->ref_count == 0) { 1052 vm_page_protect(m, VM_PROT_NONE); 1053 if (m->dirty == 0) 1054 vm_page_cache(m); 1055 else 1056 vm_page_deactivate(m); 1057 } else { 1058 vm_page_deactivate(m); 1059 } 1060 } else { 1061 s = splvm(); 1062 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1063 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1064 splx(s); 1065 } 1066 } 1067 m = next; 1068 } 1069 1070 s = splvm(); 1071 1072 /* 1073 * We try to maintain some *really* free pages, this allows interrupt 1074 * code to be guaranteed space. Since both cache and free queues 1075 * are considered basically 'free', moving pages from cache to free 1076 * does not effect other calculations. 1077 */ 1078 1079 while (cnt.v_free_count < cnt.v_free_reserved) { 1080 static int cache_rover = 0; 1081 m = vm_page_list_find(PQ_CACHE, cache_rover, FALSE); 1082 if (!m) 1083 break; 1084 if ((m->flags & PG_BUSY) || m->busy || m->hold_count || m->wire_count) { 1085 #ifdef INVARIANTS 1086 printf("Warning: busy page %p found in cache\n", m); 1087 #endif 1088 vm_page_deactivate(m); 1089 continue; 1090 } 1091 cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; 1092 vm_pageout_page_free(m); 1093 cnt.v_dfree++; 1094 } 1095 splx(s); 1096 1097 #if !defined(NO_SWAPPING) 1098 /* 1099 * Idle process swapout -- run once per second. 1100 */ 1101 if (vm_swap_idle_enabled) { 1102 static long lsec; 1103 if (time_second != lsec) { 1104 vm_pageout_req_swapout |= VM_SWAP_IDLE; 1105 vm_req_vmdaemon(); 1106 lsec = time_second; 1107 } 1108 } 1109 #endif 1110 1111 /* 1112 * If we didn't get enough free pages, and we have skipped a vnode 1113 * in a writeable object, wakeup the sync daemon. And kick swapout 1114 * if we did not get enough free pages. 1115 */ 1116 if ((cnt.v_cache_count + cnt.v_free_count) < 1117 (cnt.v_free_target + cnt.v_cache_min) ) { 1118 if (vnodes_skipped && 1119 (cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_min) { 1120 (void) speedup_syncer(); 1121 } 1122 #if !defined(NO_SWAPPING) 1123 if (vm_swap_enabled && 1124 (cnt.v_free_count + cnt.v_cache_count < cnt.v_free_target)) { 1125 vm_req_vmdaemon(); 1126 vm_pageout_req_swapout |= VM_SWAP_NORMAL; 1127 } 1128 #endif 1129 } 1130 1131 /* 1132 * make sure that we have swap space -- if we are low on memory and 1133 * swap -- then kill the biggest process. 1134 */ 1135 if ((vm_swap_size == 0 || swap_pager_full) && 1136 ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) { 1137 bigproc = NULL; 1138 bigsize = 0; 1139 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 1140 /* 1141 * if this is a system process, skip it 1142 */ 1143 if ((p->p_flag & P_SYSTEM) || (p->p_lock > 0) || 1144 (p->p_pid == 1) || 1145 ((p->p_pid < 48) && (vm_swap_size != 0))) { 1146 continue; 1147 } 1148 /* 1149 * if the process is in a non-running type state, 1150 * don't touch it. 1151 */ 1152 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1153 continue; 1154 } 1155 /* 1156 * get the process size 1157 */ 1158 size = vmspace_resident_count(p->p_vmspace); 1159 /* 1160 * if the this process is bigger than the biggest one 1161 * remember it. 1162 */ 1163 if (size > bigsize) { 1164 bigproc = p; 1165 bigsize = size; 1166 } 1167 } 1168 if (bigproc != NULL) { 1169 killproc(bigproc, "out of swap space"); 1170 bigproc->p_estcpu = 0; 1171 bigproc->p_nice = PRIO_MIN; 1172 resetpriority(bigproc); 1173 wakeup(&cnt.v_free_count); 1174 } 1175 } 1176 return force_wakeup; 1177 } 1178 1179 /* 1180 * This routine tries to maintain the pseudo LRU active queue, 1181 * so that during long periods of time where there is no paging, 1182 * that some statistic accumlation still occurs. This code 1183 * helps the situation where paging just starts to occur. 1184 */ 1185 static void 1186 vm_pageout_page_stats() 1187 { 1188 int s; 1189 vm_page_t m,next; 1190 int pcount,tpcount; /* Number of pages to check */ 1191 static int fullintervalcount = 0; 1192 int page_shortage; 1193 1194 page_shortage = (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - 1195 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 1196 if (page_shortage <= 0) 1197 return; 1198 1199 pcount = cnt.v_active_count; 1200 fullintervalcount += vm_pageout_stats_interval; 1201 if (fullintervalcount < vm_pageout_full_stats_interval) { 1202 tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count; 1203 if (pcount > tpcount) 1204 pcount = tpcount; 1205 } 1206 1207 m = TAILQ_FIRST(&vm_page_queue_active); 1208 while ((m != NULL) && (pcount-- > 0)) { 1209 int actcount; 1210 1211 if (m->queue != PQ_ACTIVE) { 1212 break; 1213 } 1214 1215 next = TAILQ_NEXT(m, pageq); 1216 /* 1217 * Don't deactivate pages that are busy. 1218 */ 1219 if ((m->busy != 0) || 1220 (m->flags & PG_BUSY) || 1221 (m->hold_count != 0)) { 1222 s = splvm(); 1223 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1224 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1225 splx(s); 1226 m = next; 1227 continue; 1228 } 1229 1230 actcount = 0; 1231 if (m->flags & PG_REFERENCED) { 1232 vm_page_flag_clear(m, PG_REFERENCED); 1233 actcount += 1; 1234 } 1235 1236 actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); 1237 if (actcount) { 1238 m->act_count += ACT_ADVANCE + actcount; 1239 if (m->act_count > ACT_MAX) 1240 m->act_count = ACT_MAX; 1241 s = splvm(); 1242 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1243 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1244 splx(s); 1245 } else { 1246 if (m->act_count == 0) { 1247 /* 1248 * We turn off page access, so that we have more accurate 1249 * RSS stats. We don't do this in the normal page deactivation 1250 * when the system is loaded VM wise, because the cost of 1251 * the large number of page protect operations would be higher 1252 * than the value of doing the operation. 1253 */ 1254 vm_page_protect(m, VM_PROT_NONE); 1255 vm_page_deactivate(m); 1256 } else { 1257 m->act_count -= min(m->act_count, ACT_DECLINE); 1258 s = splvm(); 1259 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1260 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1261 splx(s); 1262 } 1263 } 1264 1265 m = next; 1266 } 1267 } 1268 1269 static int 1270 vm_pageout_free_page_calc(count) 1271 vm_size_t count; 1272 { 1273 if (count < cnt.v_page_count) 1274 return 0; 1275 /* 1276 * free_reserved needs to include enough for the largest swap pager 1277 * structures plus enough for any pv_entry structs when paging. 1278 */ 1279 if (cnt.v_page_count > 1024) 1280 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1281 else 1282 cnt.v_free_min = 4; 1283 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1284 cnt.v_interrupt_free_min; 1285 cnt.v_free_reserved = vm_pageout_page_count + 1286 cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE; 1287 cnt.v_free_min += cnt.v_free_reserved; 1288 return 1; 1289 } 1290 1291 1292 /* 1293 * vm_pageout is the high level pageout daemon. 1294 */ 1295 static void 1296 vm_pageout() 1297 { 1298 /* 1299 * Initialize some paging parameters. 1300 */ 1301 1302 cnt.v_interrupt_free_min = 2; 1303 if (cnt.v_page_count < 2000) 1304 vm_pageout_page_count = 8; 1305 1306 vm_pageout_free_page_calc(cnt.v_page_count); 1307 /* 1308 * free_reserved needs to include enough for the largest swap pager 1309 * structures plus enough for any pv_entry structs when paging. 1310 */ 1311 if (cnt.v_free_count > 6144) 1312 cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved; 1313 else 1314 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; 1315 1316 if (cnt.v_free_count > 2048) { 1317 cnt.v_cache_min = cnt.v_free_target; 1318 cnt.v_cache_max = 2 * cnt.v_cache_min; 1319 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 1320 } else { 1321 cnt.v_cache_min = 0; 1322 cnt.v_cache_max = 0; 1323 cnt.v_inactive_target = cnt.v_free_count / 4; 1324 } 1325 if (cnt.v_inactive_target > cnt.v_free_count / 3) 1326 cnt.v_inactive_target = cnt.v_free_count / 3; 1327 1328 /* XXX does not really belong here */ 1329 if (vm_page_max_wired == 0) 1330 vm_page_max_wired = cnt.v_free_count / 3; 1331 1332 if (vm_pageout_stats_max == 0) 1333 vm_pageout_stats_max = cnt.v_free_target; 1334 1335 /* 1336 * Set interval in seconds for stats scan. 1337 */ 1338 if (vm_pageout_stats_interval == 0) 1339 vm_pageout_stats_interval = 5; 1340 if (vm_pageout_full_stats_interval == 0) 1341 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1342 1343 1344 /* 1345 * Set maximum free per pass 1346 */ 1347 if (vm_pageout_stats_free_max == 0) 1348 vm_pageout_stats_free_max = 5; 1349 1350 max_page_launder = (cnt.v_page_count > 1800 ? 32 : 16); 1351 1352 swap_pager_swap_init(); 1353 /* 1354 * The pageout daemon is never done, so loop forever. 1355 */ 1356 while (TRUE) { 1357 int error; 1358 int s = splvm(); 1359 if (!vm_pages_needed || 1360 ((cnt.v_free_count + cnt.v_cache_count) > cnt.v_free_min)) { 1361 vm_pages_needed = 0; 1362 error = tsleep(&vm_pages_needed, 1363 PVM, "psleep", vm_pageout_stats_interval * hz); 1364 if (error && !vm_pages_needed) { 1365 splx(s); 1366 vm_pageout_page_stats(); 1367 continue; 1368 } 1369 } else if (vm_pages_needed) { 1370 vm_pages_needed = 0; 1371 tsleep(&vm_pages_needed, PVM, "psleep", hz/2); 1372 } 1373 1374 if (vm_pages_needed) 1375 cnt.v_pdwakeups++; 1376 vm_pages_needed = 0; 1377 splx(s); 1378 vm_pageout_scan(); 1379 vm_pageout_deficit = 0; 1380 wakeup(&cnt.v_free_count); 1381 } 1382 } 1383 1384 void 1385 pagedaemon_wakeup() 1386 { 1387 if (!vm_pages_needed && curproc != pageproc) { 1388 vm_pages_needed++; 1389 wakeup(&vm_pages_needed); 1390 } 1391 } 1392 1393 #if !defined(NO_SWAPPING) 1394 static void 1395 vm_req_vmdaemon() 1396 { 1397 static int lastrun = 0; 1398 1399 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1400 wakeup(&vm_daemon_needed); 1401 lastrun = ticks; 1402 } 1403 } 1404 1405 static void 1406 vm_daemon() 1407 { 1408 struct proc *p; 1409 1410 while (TRUE) { 1411 tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0); 1412 if (vm_pageout_req_swapout) { 1413 swapout_procs(vm_pageout_req_swapout); 1414 vm_pageout_req_swapout = 0; 1415 } 1416 /* 1417 * scan the processes for exceeding their rlimits or if 1418 * process is swapped out -- deactivate pages 1419 */ 1420 1421 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 1422 vm_pindex_t limit, size; 1423 1424 /* 1425 * if this is a system process or if we have already 1426 * looked at this process, skip it. 1427 */ 1428 if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 1429 continue; 1430 } 1431 /* 1432 * if the process is in a non-running type state, 1433 * don't touch it. 1434 */ 1435 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1436 continue; 1437 } 1438 /* 1439 * get a limit 1440 */ 1441 limit = OFF_TO_IDX( 1442 qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 1443 p->p_rlimit[RLIMIT_RSS].rlim_max)); 1444 1445 /* 1446 * let processes that are swapped out really be 1447 * swapped out set the limit to nothing (will force a 1448 * swap-out.) 1449 */ 1450 if ((p->p_flag & P_INMEM) == 0) 1451 limit = 0; /* XXX */ 1452 1453 size = vmspace_resident_count(p->p_vmspace); 1454 if (limit >= 0 && size >= limit) { 1455 vm_pageout_map_deactivate_pages( 1456 &p->p_vmspace->vm_map, limit); 1457 } 1458 } 1459 } 1460 } 1461 #endif 1462