1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 * 68 * $FreeBSD$ 69 */ 70 71 /* 72 * The proverbial page-out daemon. 73 */ 74 75 #include "opt_vm.h" 76 #include <sys/param.h> 77 #include <sys/systm.h> 78 #include <sys/kernel.h> 79 #include <sys/proc.h> 80 #include <sys/kthread.h> 81 #include <sys/resourcevar.h> 82 #include <sys/signalvar.h> 83 #include <sys/vnode.h> 84 #include <sys/vmmeter.h> 85 #include <sys/sysctl.h> 86 87 #include <vm/vm.h> 88 #include <vm/vm_param.h> 89 #include <vm/vm_prot.h> 90 #include <sys/lock.h> 91 #include <vm/vm_object.h> 92 #include <vm/vm_page.h> 93 #include <vm/vm_map.h> 94 #include <vm/vm_pageout.h> 95 #include <vm/vm_pager.h> 96 #include <vm/swap_pager.h> 97 #include <vm/vm_extern.h> 98 99 /* 100 * System initialization 101 */ 102 103 /* the kernel process "vm_pageout"*/ 104 static void vm_pageout __P((void)); 105 static int vm_pageout_clean __P((vm_page_t)); 106 static int vm_pageout_scan __P((void)); 107 static int vm_pageout_free_page_calc __P((vm_size_t count)); 108 struct proc *pageproc; 109 110 static struct kproc_desc page_kp = { 111 "pagedaemon", 112 vm_pageout, 113 &pageproc 114 }; 115 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 116 117 #if !defined(NO_SWAPPING) 118 /* the kernel process "vm_daemon"*/ 119 static void vm_daemon __P((void)); 120 static struct proc *vmproc; 121 122 static struct kproc_desc vm_kp = { 123 "vmdaemon", 124 vm_daemon, 125 &vmproc 126 }; 127 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 128 #endif 129 130 131 int vm_pages_needed=0; /* Event on which pageout daemon sleeps */ 132 int vm_pageout_deficit=0; /* Estimated number of pages deficit */ 133 int vm_pageout_pages_needed=0; /* flag saying that the pageout daemon needs pages */ 134 135 #if !defined(NO_SWAPPING) 136 static int vm_pageout_req_swapout; /* XXX */ 137 static int vm_daemon_needed; 138 #endif 139 extern int vm_swap_size; 140 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 141 static int vm_pageout_full_stats_interval = 0; 142 static int vm_pageout_stats_free_max=0, vm_pageout_algorithm_lru=0; 143 static int defer_swap_pageouts=0; 144 static int disable_swap_pageouts=0; 145 146 static int max_page_launder=100; 147 #if defined(NO_SWAPPING) 148 static int vm_swap_enabled=0; 149 static int vm_swap_idle_enabled=0; 150 #else 151 static int vm_swap_enabled=1; 152 static int vm_swap_idle_enabled=0; 153 #endif 154 155 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 156 CTLFLAG_RW, &vm_pageout_algorithm_lru, 0, "LRU page mgmt"); 157 158 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 159 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 160 161 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 162 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 163 164 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 165 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 166 167 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max, 168 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented"); 169 170 #if defined(NO_SWAPPING) 171 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 172 CTLFLAG_RD, &vm_swap_enabled, 0, ""); 173 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 174 CTLFLAG_RD, &vm_swap_idle_enabled, 0, ""); 175 #else 176 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 177 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 178 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 179 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 180 #endif 181 182 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 183 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 184 185 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 186 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 187 188 SYSCTL_INT(_vm, OID_AUTO, max_page_launder, 189 CTLFLAG_RW, &max_page_launder, 0, "Maximum number of pages to clean per pass"); 190 191 192 #define VM_PAGEOUT_PAGE_COUNT 16 193 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 194 195 int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 196 197 #if !defined(NO_SWAPPING) 198 typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int)); 199 static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t)); 200 static freeer_fcn_t vm_pageout_object_deactivate_pages; 201 static void vm_req_vmdaemon __P((void)); 202 #endif 203 static void vm_pageout_page_stats(void); 204 205 /* 206 * vm_pageout_clean: 207 * 208 * Clean the page and remove it from the laundry. 209 * 210 * We set the busy bit to cause potential page faults on this page to 211 * block. Note the careful timing, however, the busy bit isn't set till 212 * late and we cannot do anything that will mess with the page. 213 */ 214 215 static int 216 vm_pageout_clean(m) 217 vm_page_t m; 218 { 219 register vm_object_t object; 220 vm_page_t mc[2*vm_pageout_page_count]; 221 int pageout_count; 222 int i, forward_okay, backward_okay, page_base; 223 vm_pindex_t pindex = m->pindex; 224 225 object = m->object; 226 227 /* 228 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 229 * with the new swapper, but we could have serious problems paging 230 * out other object types if there is insufficient memory. 231 * 232 * Unfortunately, checking free memory here is far too late, so the 233 * check has been moved up a procedural level. 234 */ 235 236 /* 237 * Don't mess with the page if it's busy. 238 */ 239 if ((m->hold_count != 0) || 240 ((m->busy != 0) || (m->flags & PG_BUSY))) 241 return 0; 242 243 mc[vm_pageout_page_count] = m; 244 pageout_count = 1; 245 page_base = vm_pageout_page_count; 246 forward_okay = TRUE; 247 if (pindex != 0) 248 backward_okay = TRUE; 249 else 250 backward_okay = FALSE; 251 /* 252 * Scan object for clusterable pages. 253 * 254 * We can cluster ONLY if: ->> the page is NOT 255 * clean, wired, busy, held, or mapped into a 256 * buffer, and one of the following: 257 * 1) The page is inactive, or a seldom used 258 * active page. 259 * -or- 260 * 2) we force the issue. 261 */ 262 for (i = 1; (i < vm_pageout_page_count) && (forward_okay || backward_okay); i++) { 263 vm_page_t p; 264 265 /* 266 * See if forward page is clusterable. 267 */ 268 if (forward_okay) { 269 /* 270 * Stop forward scan at end of object. 271 */ 272 if ((pindex + i) > object->size) { 273 forward_okay = FALSE; 274 goto do_backward; 275 } 276 p = vm_page_lookup(object, pindex + i); 277 if (p) { 278 if (((p->queue - p->pc) == PQ_CACHE) || 279 (p->flags & PG_BUSY) || p->busy) { 280 forward_okay = FALSE; 281 goto do_backward; 282 } 283 vm_page_test_dirty(p); 284 if ((p->dirty & p->valid) != 0 && 285 (p->queue == PQ_INACTIVE) && 286 (p->wire_count == 0) && 287 (p->hold_count == 0)) { 288 mc[vm_pageout_page_count + i] = p; 289 pageout_count++; 290 if (pageout_count == vm_pageout_page_count) 291 break; 292 } else { 293 forward_okay = FALSE; 294 } 295 } else { 296 forward_okay = FALSE; 297 } 298 } 299 do_backward: 300 /* 301 * See if backward page is clusterable. 302 */ 303 if (backward_okay) { 304 /* 305 * Stop backward scan at beginning of object. 306 */ 307 if ((pindex - i) == 0) { 308 backward_okay = FALSE; 309 } 310 p = vm_page_lookup(object, pindex - i); 311 if (p) { 312 if (((p->queue - p->pc) == PQ_CACHE) || 313 (p->flags & PG_BUSY) || p->busy) { 314 backward_okay = FALSE; 315 continue; 316 } 317 vm_page_test_dirty(p); 318 if ((p->dirty & p->valid) != 0 && 319 (p->queue == PQ_INACTIVE) && 320 (p->wire_count == 0) && 321 (p->hold_count == 0)) { 322 mc[vm_pageout_page_count - i] = p; 323 pageout_count++; 324 page_base--; 325 if (pageout_count == vm_pageout_page_count) 326 break; 327 } else { 328 backward_okay = FALSE; 329 } 330 } else { 331 backward_okay = FALSE; 332 } 333 } 334 } 335 336 /* 337 * we allow reads during pageouts... 338 */ 339 return vm_pageout_flush(&mc[page_base], pageout_count, 0); 340 } 341 342 /* 343 * vm_pageout_flush() - launder the given pages 344 * 345 * The given pages are laundered. Note that we setup for the start of 346 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 347 * reference count all in here rather then in the parent. If we want 348 * the parent to do more sophisticated things we may have to change 349 * the ordering. 350 */ 351 352 int 353 vm_pageout_flush(mc, count, flags) 354 vm_page_t *mc; 355 int count; 356 int flags; 357 { 358 register vm_object_t object; 359 int pageout_status[count]; 360 int numpagedout = 0; 361 int i; 362 363 /* 364 * Initiate I/O. Bump the vm_page_t->busy counter and 365 * mark the pages read-only. 366 * 367 * We do not have to fixup the clean/dirty bits here... we can 368 * allow the pager to do it after the I/O completes. 369 */ 370 371 for (i = 0; i < count; i++) { 372 vm_page_io_start(mc[i]); 373 vm_page_protect(mc[i], VM_PROT_READ); 374 } 375 376 object = mc[0]->object; 377 vm_object_pip_add(object, count); 378 379 vm_pager_put_pages(object, mc, count, 380 (flags | ((object == kernel_object) ? OBJPC_SYNC : 0)), 381 pageout_status); 382 383 for (i = 0; i < count; i++) { 384 vm_page_t mt = mc[i]; 385 386 switch (pageout_status[i]) { 387 case VM_PAGER_OK: 388 numpagedout++; 389 break; 390 case VM_PAGER_PEND: 391 numpagedout++; 392 break; 393 case VM_PAGER_BAD: 394 /* 395 * Page outside of range of object. Right now we 396 * essentially lose the changes by pretending it 397 * worked. 398 */ 399 pmap_clear_modify(VM_PAGE_TO_PHYS(mt)); 400 mt->dirty = 0; 401 break; 402 case VM_PAGER_ERROR: 403 case VM_PAGER_FAIL: 404 /* 405 * If page couldn't be paged out, then reactivate the 406 * page so it doesn't clog the inactive list. (We 407 * will try paging out it again later). 408 */ 409 vm_page_activate(mt); 410 break; 411 case VM_PAGER_AGAIN: 412 break; 413 } 414 415 /* 416 * If the operation is still going, leave the page busy to 417 * block all other accesses. Also, leave the paging in 418 * progress indicator set so that we don't attempt an object 419 * collapse. 420 */ 421 if (pageout_status[i] != VM_PAGER_PEND) { 422 vm_object_pip_wakeup(object); 423 vm_page_io_finish(mt); 424 } 425 } 426 return numpagedout; 427 } 428 429 #if !defined(NO_SWAPPING) 430 /* 431 * vm_pageout_object_deactivate_pages 432 * 433 * deactivate enough pages to satisfy the inactive target 434 * requirements or if vm_page_proc_limit is set, then 435 * deactivate all of the pages in the object and its 436 * backing_objects. 437 * 438 * The object and map must be locked. 439 */ 440 static void 441 vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only) 442 vm_map_t map; 443 vm_object_t object; 444 vm_pindex_t desired; 445 int map_remove_only; 446 { 447 register vm_page_t p, next; 448 int rcount; 449 int remove_mode; 450 int s; 451 452 if (object->type == OBJT_DEVICE) 453 return; 454 455 while (object) { 456 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 457 return; 458 if (object->paging_in_progress) 459 return; 460 461 remove_mode = map_remove_only; 462 if (object->shadow_count > 1) 463 remove_mode = 1; 464 /* 465 * scan the objects entire memory queue 466 */ 467 rcount = object->resident_page_count; 468 p = TAILQ_FIRST(&object->memq); 469 while (p && (rcount-- > 0)) { 470 int actcount; 471 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 472 return; 473 next = TAILQ_NEXT(p, listq); 474 cnt.v_pdpages++; 475 if (p->wire_count != 0 || 476 p->hold_count != 0 || 477 p->busy != 0 || 478 (p->flags & PG_BUSY) || 479 !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) { 480 p = next; 481 continue; 482 } 483 484 actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p)); 485 if (actcount) { 486 vm_page_flag_set(p, PG_REFERENCED); 487 } else if (p->flags & PG_REFERENCED) { 488 actcount = 1; 489 } 490 491 if ((p->queue != PQ_ACTIVE) && 492 (p->flags & PG_REFERENCED)) { 493 vm_page_activate(p); 494 p->act_count += actcount; 495 vm_page_flag_clear(p, PG_REFERENCED); 496 } else if (p->queue == PQ_ACTIVE) { 497 if ((p->flags & PG_REFERENCED) == 0) { 498 p->act_count -= min(p->act_count, ACT_DECLINE); 499 if (!remove_mode && (vm_pageout_algorithm_lru || (p->act_count == 0))) { 500 vm_page_protect(p, VM_PROT_NONE); 501 vm_page_deactivate(p); 502 } else { 503 s = splvm(); 504 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 505 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 506 splx(s); 507 } 508 } else { 509 vm_page_activate(p); 510 vm_page_flag_clear(p, PG_REFERENCED); 511 if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 512 p->act_count += ACT_ADVANCE; 513 s = splvm(); 514 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 515 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 516 splx(s); 517 } 518 } else if (p->queue == PQ_INACTIVE) { 519 vm_page_protect(p, VM_PROT_NONE); 520 } 521 p = next; 522 } 523 object = object->backing_object; 524 } 525 return; 526 } 527 528 /* 529 * deactivate some number of pages in a map, try to do it fairly, but 530 * that is really hard to do. 531 */ 532 static void 533 vm_pageout_map_deactivate_pages(map, desired) 534 vm_map_t map; 535 vm_pindex_t desired; 536 { 537 vm_map_entry_t tmpe; 538 vm_object_t obj, bigobj; 539 540 if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) { 541 return; 542 } 543 544 bigobj = NULL; 545 546 /* 547 * first, search out the biggest object, and try to free pages from 548 * that. 549 */ 550 tmpe = map->header.next; 551 while (tmpe != &map->header) { 552 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 553 obj = tmpe->object.vm_object; 554 if ((obj != NULL) && (obj->shadow_count <= 1) && 555 ((bigobj == NULL) || 556 (bigobj->resident_page_count < obj->resident_page_count))) { 557 bigobj = obj; 558 } 559 } 560 tmpe = tmpe->next; 561 } 562 563 if (bigobj) 564 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0); 565 566 /* 567 * Next, hunt around for other pages to deactivate. We actually 568 * do this search sort of wrong -- .text first is not the best idea. 569 */ 570 tmpe = map->header.next; 571 while (tmpe != &map->header) { 572 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 573 break; 574 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 575 obj = tmpe->object.vm_object; 576 if (obj) 577 vm_pageout_object_deactivate_pages(map, obj, desired, 0); 578 } 579 tmpe = tmpe->next; 580 }; 581 582 /* 583 * Remove all mappings if a process is swapped out, this will free page 584 * table pages. 585 */ 586 if (desired == 0) 587 pmap_remove(vm_map_pmap(map), 588 VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 589 vm_map_unlock(map); 590 return; 591 } 592 #endif 593 594 /* 595 * Don't try to be fancy - being fancy can lead to VOP_LOCK's and therefore 596 * to vnode deadlocks. We only do it for OBJT_DEFAULT and OBJT_SWAP objects 597 * which we know can be trivially freed. 598 */ 599 600 void 601 vm_pageout_page_free(vm_page_t m) { 602 vm_object_t object = m->object; 603 int type = object->type; 604 605 if (type == OBJT_SWAP || type == OBJT_DEFAULT) 606 vm_object_reference(object); 607 vm_page_busy(m); 608 vm_page_protect(m, VM_PROT_NONE); 609 vm_page_free(m); 610 if (type == OBJT_SWAP || type == OBJT_DEFAULT) 611 vm_object_deallocate(object); 612 } 613 614 /* 615 * vm_pageout_scan does the dirty work for the pageout daemon. 616 */ 617 static int 618 vm_pageout_scan() 619 { 620 vm_page_t m, next; 621 int page_shortage, maxscan, pcount; 622 int addl_page_shortage, addl_page_shortage_init; 623 int maxlaunder; 624 int launder_loop = 0; 625 struct proc *p, *bigproc; 626 vm_offset_t size, bigsize; 627 vm_object_t object; 628 int force_wakeup = 0; 629 int actcount; 630 int vnodes_skipped = 0; 631 int s; 632 633 /* 634 * Do whatever cleanup that the pmap code can. 635 */ 636 pmap_collect(); 637 638 addl_page_shortage_init = vm_pageout_deficit; 639 vm_pageout_deficit = 0; 640 641 if (max_page_launder == 0) 642 max_page_launder = 1; 643 644 /* 645 * Calculate the number of pages we want to either free or move 646 * to the cache. 647 */ 648 649 page_shortage = (cnt.v_free_target + cnt.v_cache_min) - 650 (cnt.v_free_count + cnt.v_cache_count); 651 page_shortage += addl_page_shortage_init; 652 653 /* 654 * Figure out what to do with dirty pages when they are encountered. 655 * Assume that 1/3 of the pages on the inactive list are clean. If 656 * we think we can reach our target, disable laundering (do not 657 * clean any dirty pages). If we miss the target we will loop back 658 * up and do a laundering run. 659 */ 660 661 if (cnt.v_inactive_count / 3 > page_shortage) { 662 maxlaunder = 0; 663 launder_loop = 0; 664 } else { 665 maxlaunder = 666 (cnt.v_inactive_target > max_page_launder) ? 667 max_page_launder : cnt.v_inactive_target; 668 launder_loop = 1; 669 } 670 671 /* 672 * Start scanning the inactive queue for pages we can move to the 673 * cache or free. The scan will stop when the target is reached or 674 * we have scanned the entire inactive queue. 675 */ 676 677 rescan0: 678 addl_page_shortage = addl_page_shortage_init; 679 maxscan = cnt.v_inactive_count; 680 for (m = TAILQ_FIRST(&vm_page_queue_inactive); 681 m != NULL && maxscan-- > 0 && page_shortage > 0; 682 m = next) { 683 684 cnt.v_pdpages++; 685 686 if (m->queue != PQ_INACTIVE) { 687 goto rescan0; 688 } 689 690 next = TAILQ_NEXT(m, pageq); 691 692 if (m->hold_count) { 693 s = splvm(); 694 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 695 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 696 splx(s); 697 addl_page_shortage++; 698 continue; 699 } 700 /* 701 * Dont mess with busy pages, keep in the front of the 702 * queue, most likely are being paged out. 703 */ 704 if (m->busy || (m->flags & PG_BUSY)) { 705 addl_page_shortage++; 706 continue; 707 } 708 709 /* 710 * If the object is not being used, we ignore previous 711 * references. 712 */ 713 if (m->object->ref_count == 0) { 714 vm_page_flag_clear(m, PG_REFERENCED); 715 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 716 717 /* 718 * Otherwise, if the page has been referenced while in the 719 * inactive queue, we bump the "activation count" upwards, 720 * making it less likely that the page will be added back to 721 * the inactive queue prematurely again. Here we check the 722 * page tables (or emulated bits, if any), given the upper 723 * level VM system not knowing anything about existing 724 * references. 725 */ 726 } else if (((m->flags & PG_REFERENCED) == 0) && 727 (actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)))) { 728 vm_page_activate(m); 729 m->act_count += (actcount + ACT_ADVANCE); 730 continue; 731 } 732 733 /* 734 * If the upper level VM system knows about any page 735 * references, we activate the page. We also set the 736 * "activation count" higher than normal so that we will less 737 * likely place pages back onto the inactive queue again. 738 */ 739 if ((m->flags & PG_REFERENCED) != 0) { 740 vm_page_flag_clear(m, PG_REFERENCED); 741 actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); 742 vm_page_activate(m); 743 m->act_count += (actcount + ACT_ADVANCE + 1); 744 continue; 745 } 746 747 /* 748 * If the upper level VM system doesn't know anything about 749 * the page being dirty, we have to check for it again. As 750 * far as the VM code knows, any partially dirty pages are 751 * fully dirty. 752 */ 753 if (m->dirty == 0) { 754 vm_page_test_dirty(m); 755 } else { 756 vm_page_dirty(m); 757 } 758 759 /* 760 * Invalid pages can be easily freed 761 */ 762 if (m->valid == 0) { 763 vm_pageout_page_free(m); 764 cnt.v_dfree++; 765 --page_shortage; 766 767 /* 768 * Clean pages can be placed onto the cache queue. 769 */ 770 } else if (m->dirty == 0) { 771 vm_page_cache(m); 772 --page_shortage; 773 774 /* 775 * Dirty pages need to be paged out. Note that we clean 776 * only a limited number of pages per pagedaemon pass. 777 */ 778 } else if (maxlaunder > 0) { 779 int written; 780 int swap_pageouts_ok; 781 struct vnode *vp = NULL; 782 783 object = m->object; 784 785 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 786 swap_pageouts_ok = 1; 787 } else { 788 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 789 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 790 (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min); 791 792 } 793 794 /* 795 * We don't bother paging objects that are "dead". 796 * Those objects are in a "rundown" state. 797 */ 798 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 799 s = splvm(); 800 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 801 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 802 splx(s); 803 continue; 804 } 805 806 /* 807 * For now we protect against potential memory 808 * deadlocks by requiring significant memory to be 809 * free if the object is not OBJT_DEFAULT or OBJT_SWAP. 810 * We do not 'trust' any other object type to operate 811 * with low memory, not even OBJT_DEVICE. The VM 812 * allocator will special case allocations done by 813 * the pageout daemon so the check below actually 814 * does have some hysteresis in it. It isn't the best 815 * solution, though. 816 */ 817 818 if (object->type != OBJT_DEFAULT && 819 object->type != OBJT_SWAP && 820 cnt.v_free_count < cnt.v_free_reserved) { 821 s = splvm(); 822 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 823 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, 824 pageq); 825 splx(s); 826 continue; 827 } 828 829 /* 830 * Presumably we have sufficient free memory to do 831 * the more sophisticated checks and locking required 832 * for vnodes. 833 * 834 * The object is already known NOT to be dead. The 835 * vget() may still block, though, because 836 * VOP_ISLOCKED() doesn't check to see if an inode 837 * (v_data) is associated with the vnode. If it isn't, 838 * vget() will load in it from disk. Worse, vget() 839 * may actually get stuck waiting on "inode" if another 840 * process is in the process of bringing the inode in. 841 * This is bad news for us either way. 842 * 843 * So for the moment we check v_data == NULL as a 844 * workaround. This means that vnodes which do not 845 * use v_data in the way we expect probably will not 846 * wind up being paged out by the pager and it will be 847 * up to the syncer to get them. That's better then 848 * us blocking here. 849 * 850 * This whole code section is bogus - we need to fix 851 * the vnode pager to handle vm_page_t's without us 852 * having to do any sophisticated VOP tests. 853 */ 854 855 if (object->type == OBJT_VNODE) { 856 vp = object->handle; 857 858 if (VOP_ISLOCKED(vp) || 859 vp->v_data == NULL || 860 vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curproc)) { 861 if ((m->queue == PQ_INACTIVE) && 862 (m->hold_count == 0) && 863 (m->busy == 0) && 864 (m->flags & PG_BUSY) == 0) { 865 s = splvm(); 866 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 867 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 868 splx(s); 869 } 870 if (object->flags & OBJ_MIGHTBEDIRTY) 871 vnodes_skipped++; 872 continue; 873 } 874 875 /* 876 * The page might have been moved to another queue 877 * during potential blocking in vget() above. 878 */ 879 if (m->queue != PQ_INACTIVE) { 880 if (object->flags & OBJ_MIGHTBEDIRTY) 881 vnodes_skipped++; 882 vput(vp); 883 continue; 884 } 885 886 /* 887 * The page may have been busied during the blocking in 888 * vput(); We don't move the page back onto the end of 889 * the queue so that statistics are more correct if we don't. 890 */ 891 if (m->busy || (m->flags & PG_BUSY)) { 892 vput(vp); 893 continue; 894 } 895 896 /* 897 * If the page has become held, then skip it 898 */ 899 if (m->hold_count) { 900 s = splvm(); 901 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 902 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 903 splx(s); 904 if (object->flags & OBJ_MIGHTBEDIRTY) 905 vnodes_skipped++; 906 vput(vp); 907 continue; 908 } 909 } 910 911 /* 912 * If a page is dirty, then it is either being washed 913 * (but not yet cleaned) or it is still in the 914 * laundry. If it is still in the laundry, then we 915 * start the cleaning operation. 916 */ 917 written = vm_pageout_clean(m); 918 if (vp) 919 vput(vp); 920 921 maxlaunder -= written; 922 } 923 } 924 925 /* 926 * If we still have a page shortage and we didn't launder anything, 927 * run the inactive scan again and launder something this time. 928 */ 929 930 if (launder_loop == 0 && page_shortage > 0) { 931 launder_loop = 1; 932 maxlaunder = 933 (cnt.v_inactive_target > max_page_launder) ? 934 max_page_launder : cnt.v_inactive_target; 935 goto rescan0; 936 } 937 938 /* 939 * Compute the page shortage from the point of view of having to 940 * move pages from the active queue to the inactive queue. 941 */ 942 943 page_shortage = (cnt.v_inactive_target + cnt.v_cache_min) - 944 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 945 page_shortage += addl_page_shortage; 946 947 /* 948 * Scan the active queue for things we can deactivate 949 */ 950 951 pcount = cnt.v_active_count; 952 m = TAILQ_FIRST(&vm_page_queue_active); 953 954 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 955 956 /* 957 * This is a consistancy check, and should likely be a panic 958 * or warning. 959 */ 960 if (m->queue != PQ_ACTIVE) { 961 break; 962 } 963 964 next = TAILQ_NEXT(m, pageq); 965 /* 966 * Don't deactivate pages that are busy. 967 */ 968 if ((m->busy != 0) || 969 (m->flags & PG_BUSY) || 970 (m->hold_count != 0)) { 971 s = splvm(); 972 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 973 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 974 splx(s); 975 m = next; 976 continue; 977 } 978 979 /* 980 * The count for pagedaemon pages is done after checking the 981 * page for eligbility... 982 */ 983 cnt.v_pdpages++; 984 985 /* 986 * Check to see "how much" the page has been used. 987 */ 988 actcount = 0; 989 if (m->object->ref_count != 0) { 990 if (m->flags & PG_REFERENCED) { 991 actcount += 1; 992 } 993 actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); 994 if (actcount) { 995 m->act_count += ACT_ADVANCE + actcount; 996 if (m->act_count > ACT_MAX) 997 m->act_count = ACT_MAX; 998 } 999 } 1000 1001 /* 1002 * Since we have "tested" this bit, we need to clear it now. 1003 */ 1004 vm_page_flag_clear(m, PG_REFERENCED); 1005 1006 /* 1007 * Only if an object is currently being used, do we use the 1008 * page activation count stats. 1009 */ 1010 if (actcount && (m->object->ref_count != 0)) { 1011 s = splvm(); 1012 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1013 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1014 splx(s); 1015 } else { 1016 m->act_count -= min(m->act_count, ACT_DECLINE); 1017 if (vm_pageout_algorithm_lru || 1018 (m->object->ref_count == 0) || (m->act_count == 0)) { 1019 page_shortage--; 1020 if (m->object->ref_count == 0) { 1021 vm_page_protect(m, VM_PROT_NONE); 1022 if (m->dirty == 0) 1023 vm_page_cache(m); 1024 else 1025 vm_page_deactivate(m); 1026 } else { 1027 vm_page_deactivate(m); 1028 } 1029 } else { 1030 s = splvm(); 1031 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1032 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1033 splx(s); 1034 } 1035 } 1036 m = next; 1037 } 1038 1039 s = splvm(); 1040 1041 /* 1042 * We try to maintain some *really* free pages, this allows interrupt 1043 * code to be guaranteed space. Since both cache and free queues 1044 * are considered basically 'free', moving pages from cache to free 1045 * does not effect other calculations. 1046 */ 1047 1048 while (cnt.v_free_count < cnt.v_free_reserved) { 1049 static int cache_rover = 0; 1050 m = vm_page_list_find(PQ_CACHE, cache_rover, FALSE); 1051 if (!m) 1052 break; 1053 if ((m->flags & PG_BUSY) || m->busy || m->hold_count || m->wire_count) { 1054 #ifdef INVARIANTS 1055 printf("Warning: busy page %p found in cache\n", m); 1056 #endif 1057 vm_page_deactivate(m); 1058 continue; 1059 } 1060 cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; 1061 vm_pageout_page_free(m); 1062 cnt.v_dfree++; 1063 } 1064 splx(s); 1065 1066 #if !defined(NO_SWAPPING) 1067 /* 1068 * Idle process swapout -- run once per second. 1069 */ 1070 if (vm_swap_idle_enabled) { 1071 static long lsec; 1072 if (time_second != lsec) { 1073 vm_pageout_req_swapout |= VM_SWAP_IDLE; 1074 vm_req_vmdaemon(); 1075 lsec = time_second; 1076 } 1077 } 1078 #endif 1079 1080 /* 1081 * If we didn't get enough free pages, and we have skipped a vnode 1082 * in a writeable object, wakeup the sync daemon. And kick swapout 1083 * if we did not get enough free pages. 1084 */ 1085 if ((cnt.v_cache_count + cnt.v_free_count) < 1086 (cnt.v_free_target + cnt.v_cache_min) ) { 1087 if (vnodes_skipped && 1088 (cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_min) { 1089 (void) speedup_syncer(); 1090 } 1091 #if !defined(NO_SWAPPING) 1092 if (vm_swap_enabled && 1093 (cnt.v_free_count + cnt.v_cache_count < cnt.v_free_target)) { 1094 vm_req_vmdaemon(); 1095 vm_pageout_req_swapout |= VM_SWAP_NORMAL; 1096 } 1097 #endif 1098 } 1099 1100 /* 1101 * make sure that we have swap space -- if we are low on memory and 1102 * swap -- then kill the biggest process. 1103 */ 1104 if ((vm_swap_size == 0 || swap_pager_full) && 1105 ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) { 1106 bigproc = NULL; 1107 bigsize = 0; 1108 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 1109 /* 1110 * if this is a system process, skip it 1111 */ 1112 if ((p->p_flag & P_SYSTEM) || (p->p_lock > 0) || 1113 (p->p_pid == 1) || 1114 ((p->p_pid < 48) && (vm_swap_size != 0))) { 1115 continue; 1116 } 1117 /* 1118 * if the process is in a non-running type state, 1119 * don't touch it. 1120 */ 1121 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1122 continue; 1123 } 1124 /* 1125 * get the process size 1126 */ 1127 size = vmspace_resident_count(p->p_vmspace); 1128 /* 1129 * if the this process is bigger than the biggest one 1130 * remember it. 1131 */ 1132 if (size > bigsize) { 1133 bigproc = p; 1134 bigsize = size; 1135 } 1136 } 1137 if (bigproc != NULL) { 1138 killproc(bigproc, "out of swap space"); 1139 bigproc->p_estcpu = 0; 1140 bigproc->p_nice = PRIO_MIN; 1141 resetpriority(bigproc); 1142 wakeup(&cnt.v_free_count); 1143 } 1144 } 1145 return force_wakeup; 1146 } 1147 1148 /* 1149 * This routine tries to maintain the pseudo LRU active queue, 1150 * so that during long periods of time where there is no paging, 1151 * that some statistic accumlation still occurs. This code 1152 * helps the situation where paging just starts to occur. 1153 */ 1154 static void 1155 vm_pageout_page_stats() 1156 { 1157 int s; 1158 vm_page_t m,next; 1159 int pcount,tpcount; /* Number of pages to check */ 1160 static int fullintervalcount = 0; 1161 int page_shortage; 1162 1163 page_shortage = (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - 1164 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 1165 if (page_shortage <= 0) 1166 return; 1167 1168 pcount = cnt.v_active_count; 1169 fullintervalcount += vm_pageout_stats_interval; 1170 if (fullintervalcount < vm_pageout_full_stats_interval) { 1171 tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count; 1172 if (pcount > tpcount) 1173 pcount = tpcount; 1174 } 1175 1176 m = TAILQ_FIRST(&vm_page_queue_active); 1177 while ((m != NULL) && (pcount-- > 0)) { 1178 int actcount; 1179 1180 if (m->queue != PQ_ACTIVE) { 1181 break; 1182 } 1183 1184 next = TAILQ_NEXT(m, pageq); 1185 /* 1186 * Don't deactivate pages that are busy. 1187 */ 1188 if ((m->busy != 0) || 1189 (m->flags & PG_BUSY) || 1190 (m->hold_count != 0)) { 1191 s = splvm(); 1192 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1193 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1194 splx(s); 1195 m = next; 1196 continue; 1197 } 1198 1199 actcount = 0; 1200 if (m->flags & PG_REFERENCED) { 1201 vm_page_flag_clear(m, PG_REFERENCED); 1202 actcount += 1; 1203 } 1204 1205 actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); 1206 if (actcount) { 1207 m->act_count += ACT_ADVANCE + actcount; 1208 if (m->act_count > ACT_MAX) 1209 m->act_count = ACT_MAX; 1210 s = splvm(); 1211 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1212 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1213 splx(s); 1214 } else { 1215 if (m->act_count == 0) { 1216 /* 1217 * We turn off page access, so that we have more accurate 1218 * RSS stats. We don't do this in the normal page deactivation 1219 * when the system is loaded VM wise, because the cost of 1220 * the large number of page protect operations would be higher 1221 * than the value of doing the operation. 1222 */ 1223 vm_page_protect(m, VM_PROT_NONE); 1224 vm_page_deactivate(m); 1225 } else { 1226 m->act_count -= min(m->act_count, ACT_DECLINE); 1227 s = splvm(); 1228 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1229 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1230 splx(s); 1231 } 1232 } 1233 1234 m = next; 1235 } 1236 } 1237 1238 static int 1239 vm_pageout_free_page_calc(count) 1240 vm_size_t count; 1241 { 1242 if (count < cnt.v_page_count) 1243 return 0; 1244 /* 1245 * free_reserved needs to include enough for the largest swap pager 1246 * structures plus enough for any pv_entry structs when paging. 1247 */ 1248 if (cnt.v_page_count > 1024) 1249 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1250 else 1251 cnt.v_free_min = 4; 1252 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1253 cnt.v_interrupt_free_min; 1254 cnt.v_free_reserved = vm_pageout_page_count + 1255 cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE; 1256 cnt.v_free_min += cnt.v_free_reserved; 1257 return 1; 1258 } 1259 1260 1261 /* 1262 * vm_pageout is the high level pageout daemon. 1263 */ 1264 static void 1265 vm_pageout() 1266 { 1267 /* 1268 * Initialize some paging parameters. 1269 */ 1270 1271 cnt.v_interrupt_free_min = 2; 1272 if (cnt.v_page_count < 2000) 1273 vm_pageout_page_count = 8; 1274 1275 vm_pageout_free_page_calc(cnt.v_page_count); 1276 /* 1277 * free_reserved needs to include enough for the largest swap pager 1278 * structures plus enough for any pv_entry structs when paging. 1279 */ 1280 if (cnt.v_free_count > 6144) 1281 cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved; 1282 else 1283 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; 1284 1285 if (cnt.v_free_count > 2048) { 1286 cnt.v_cache_min = cnt.v_free_target; 1287 cnt.v_cache_max = 2 * cnt.v_cache_min; 1288 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 1289 } else { 1290 cnt.v_cache_min = 0; 1291 cnt.v_cache_max = 0; 1292 cnt.v_inactive_target = cnt.v_free_count / 4; 1293 } 1294 if (cnt.v_inactive_target > cnt.v_free_count / 3) 1295 cnt.v_inactive_target = cnt.v_free_count / 3; 1296 1297 /* XXX does not really belong here */ 1298 if (vm_page_max_wired == 0) 1299 vm_page_max_wired = cnt.v_free_count / 3; 1300 1301 if (vm_pageout_stats_max == 0) 1302 vm_pageout_stats_max = cnt.v_free_target; 1303 1304 /* 1305 * Set interval in seconds for stats scan. 1306 */ 1307 if (vm_pageout_stats_interval == 0) 1308 vm_pageout_stats_interval = 5; 1309 if (vm_pageout_full_stats_interval == 0) 1310 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1311 1312 1313 /* 1314 * Set maximum free per pass 1315 */ 1316 if (vm_pageout_stats_free_max == 0) 1317 vm_pageout_stats_free_max = 5; 1318 1319 max_page_launder = (cnt.v_page_count > 1800 ? 32 : 16); 1320 1321 curproc->p_flag |= P_BUFEXHAUST; 1322 swap_pager_swap_init(); 1323 /* 1324 * The pageout daemon is never done, so loop forever. 1325 */ 1326 while (TRUE) { 1327 int error; 1328 int s = splvm(); 1329 if (!vm_pages_needed || 1330 ((cnt.v_free_count + cnt.v_cache_count) > cnt.v_free_min)) { 1331 vm_pages_needed = 0; 1332 error = tsleep(&vm_pages_needed, 1333 PVM, "psleep", vm_pageout_stats_interval * hz); 1334 if (error && !vm_pages_needed) { 1335 splx(s); 1336 vm_pageout_page_stats(); 1337 continue; 1338 } 1339 } else if (vm_pages_needed) { 1340 vm_pages_needed = 0; 1341 tsleep(&vm_pages_needed, PVM, "psleep", hz/2); 1342 } 1343 1344 if (vm_pages_needed) 1345 cnt.v_pdwakeups++; 1346 vm_pages_needed = 0; 1347 splx(s); 1348 vm_pageout_scan(); 1349 vm_pageout_deficit = 0; 1350 wakeup(&cnt.v_free_count); 1351 } 1352 } 1353 1354 void 1355 pagedaemon_wakeup() 1356 { 1357 if (!vm_pages_needed && curproc != pageproc) { 1358 vm_pages_needed++; 1359 wakeup(&vm_pages_needed); 1360 } 1361 } 1362 1363 #if !defined(NO_SWAPPING) 1364 static void 1365 vm_req_vmdaemon() 1366 { 1367 static int lastrun = 0; 1368 1369 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1370 wakeup(&vm_daemon_needed); 1371 lastrun = ticks; 1372 } 1373 } 1374 1375 static void 1376 vm_daemon() 1377 { 1378 struct proc *p; 1379 1380 while (TRUE) { 1381 tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0); 1382 if (vm_pageout_req_swapout) { 1383 swapout_procs(vm_pageout_req_swapout); 1384 vm_pageout_req_swapout = 0; 1385 } 1386 /* 1387 * scan the processes for exceeding their rlimits or if 1388 * process is swapped out -- deactivate pages 1389 */ 1390 1391 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 1392 vm_pindex_t limit, size; 1393 1394 /* 1395 * if this is a system process or if we have already 1396 * looked at this process, skip it. 1397 */ 1398 if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 1399 continue; 1400 } 1401 /* 1402 * if the process is in a non-running type state, 1403 * don't touch it. 1404 */ 1405 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1406 continue; 1407 } 1408 /* 1409 * get a limit 1410 */ 1411 limit = OFF_TO_IDX( 1412 qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 1413 p->p_rlimit[RLIMIT_RSS].rlim_max)); 1414 1415 /* 1416 * let processes that are swapped out really be 1417 * swapped out set the limit to nothing (will force a 1418 * swap-out.) 1419 */ 1420 if ((p->p_flag & P_INMEM) == 0) 1421 limit = 0; /* XXX */ 1422 1423 size = vmspace_resident_count(p->p_vmspace); 1424 if (limit >= 0 && size >= limit) { 1425 vm_pageout_map_deactivate_pages( 1426 &p->p_vmspace->vm_map, limit); 1427 } 1428 } 1429 } 1430 } 1431 #endif 1432