1 /*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * Copyright (c) 2005 Yahoo! Technologies Norway AS 9 * All rights reserved. 10 * 11 * This code is derived from software contributed to Berkeley by 12 * The Mach Operating System project at Carnegie-Mellon University. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. All advertising materials mentioning features or use of this software 23 * must display the following acknowledgement: 24 * This product includes software developed by the University of 25 * California, Berkeley and its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 43 * 44 * 45 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46 * All rights reserved. 47 * 48 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 49 * 50 * Permission to use, copy, modify and distribute this software and 51 * its documentation is hereby granted, provided that both the copyright 52 * notice and this permission notice appear in all copies of the 53 * software, derivative works or modified versions, and any portions 54 * thereof, and that both notices appear in supporting documentation. 55 * 56 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 57 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 58 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 59 * 60 * Carnegie Mellon requests users of this software to return to 61 * 62 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 63 * School of Computer Science 64 * Carnegie Mellon University 65 * Pittsburgh PA 15213-3890 66 * 67 * any improvements or extensions that they make and grant Carnegie the 68 * rights to redistribute these changes. 69 */ 70 71 /* 72 * The proverbial page-out daemon. 73 */ 74 75 #include <sys/cdefs.h> 76 __FBSDID("$FreeBSD$"); 77 78 #include "opt_vm.h" 79 #include <sys/param.h> 80 #include <sys/systm.h> 81 #include <sys/kernel.h> 82 #include <sys/eventhandler.h> 83 #include <sys/lock.h> 84 #include <sys/mutex.h> 85 #include <sys/proc.h> 86 #include <sys/kthread.h> 87 #include <sys/ktr.h> 88 #include <sys/mount.h> 89 #include <sys/resourcevar.h> 90 #include <sys/sched.h> 91 #include <sys/signalvar.h> 92 #include <sys/vnode.h> 93 #include <sys/vmmeter.h> 94 #include <sys/sx.h> 95 #include <sys/sysctl.h> 96 97 #include <vm/vm.h> 98 #include <vm/vm_param.h> 99 #include <vm/vm_object.h> 100 #include <vm/vm_page.h> 101 #include <vm/vm_map.h> 102 #include <vm/vm_pageout.h> 103 #include <vm/vm_pager.h> 104 #include <vm/swap_pager.h> 105 #include <vm/vm_extern.h> 106 #include <vm/uma.h> 107 108 /* 109 * System initialization 110 */ 111 112 /* the kernel process "vm_pageout"*/ 113 static void vm_pageout(void); 114 static int vm_pageout_clean(vm_page_t); 115 static void vm_pageout_scan(int pass); 116 117 struct proc *pageproc; 118 119 static struct kproc_desc page_kp = { 120 "pagedaemon", 121 vm_pageout, 122 &pageproc 123 }; 124 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, 125 &page_kp); 126 127 #if !defined(NO_SWAPPING) 128 /* the kernel process "vm_daemon"*/ 129 static void vm_daemon(void); 130 static struct proc *vmproc; 131 132 static struct kproc_desc vm_kp = { 133 "vmdaemon", 134 vm_daemon, 135 &vmproc 136 }; 137 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp); 138 #endif 139 140 141 int vm_pages_needed; /* Event on which pageout daemon sleeps */ 142 int vm_pageout_deficit; /* Estimated number of pages deficit */ 143 int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */ 144 145 #if !defined(NO_SWAPPING) 146 static int vm_pageout_req_swapout; /* XXX */ 147 static int vm_daemon_needed; 148 static struct mtx vm_daemon_mtx; 149 /* Allow for use by vm_pageout before vm_daemon is initialized. */ 150 MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF); 151 #endif 152 static int vm_max_launder = 32; 153 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 154 static int vm_pageout_full_stats_interval = 0; 155 static int vm_pageout_algorithm=0; 156 static int defer_swap_pageouts=0; 157 static int disable_swap_pageouts=0; 158 159 #if defined(NO_SWAPPING) 160 static int vm_swap_enabled=0; 161 static int vm_swap_idle_enabled=0; 162 #else 163 static int vm_swap_enabled=1; 164 static int vm_swap_idle_enabled=0; 165 #endif 166 167 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 168 CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt"); 169 170 SYSCTL_INT(_vm, OID_AUTO, max_launder, 171 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 172 173 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 174 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 175 176 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 177 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 178 179 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 180 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 181 182 #if defined(NO_SWAPPING) 183 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 184 CTLFLAG_RD, &vm_swap_enabled, 0, "Enable entire process swapout"); 185 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 186 CTLFLAG_RD, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 187 #else 188 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 189 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 190 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 191 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 192 #endif 193 194 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 195 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 196 197 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 198 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 199 200 static int pageout_lock_miss; 201 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 202 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 203 204 #define VM_PAGEOUT_PAGE_COUNT 16 205 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 206 207 int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 208 SYSCTL_INT(_vm, OID_AUTO, max_wired, 209 CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count"); 210 211 #if !defined(NO_SWAPPING) 212 static void vm_pageout_map_deactivate_pages(vm_map_t, long); 213 static void vm_pageout_object_deactivate_pages(pmap_t, vm_object_t, long); 214 static void vm_req_vmdaemon(int req); 215 #endif 216 static void vm_pageout_page_stats(void); 217 218 static void 219 vm_pageout_init_marker(vm_page_t marker, u_short queue) 220 { 221 222 bzero(marker, sizeof(*marker)); 223 marker->flags = PG_FICTITIOUS | PG_MARKER; 224 marker->oflags = VPO_BUSY; 225 marker->queue = queue; 226 marker->wire_count = 1; 227 } 228 229 /* 230 * vm_pageout_fallback_object_lock: 231 * 232 * Lock vm object currently associated with `m'. VM_OBJECT_TRYLOCK is 233 * known to have failed and page queue must be either PQ_ACTIVE or 234 * PQ_INACTIVE. To avoid lock order violation, unlock the page queues 235 * while locking the vm object. Use marker page to detect page queue 236 * changes and maintain notion of next page on page queue. Return 237 * TRUE if no changes were detected, FALSE otherwise. vm object is 238 * locked on return. 239 * 240 * This function depends on both the lock portion of struct vm_object 241 * and normal struct vm_page being type stable. 242 */ 243 boolean_t 244 vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next) 245 { 246 struct vm_page marker; 247 boolean_t unchanged; 248 u_short queue; 249 vm_object_t object; 250 251 queue = m->queue; 252 vm_pageout_init_marker(&marker, queue); 253 object = m->object; 254 255 TAILQ_INSERT_AFTER(&vm_page_queues[queue].pl, 256 m, &marker, pageq); 257 vm_page_unlock_queues(); 258 vm_page_unlock(m); 259 VM_OBJECT_LOCK(object); 260 vm_page_lock(m); 261 vm_page_lock_queues(); 262 263 /* Page queue might have changed. */ 264 *next = TAILQ_NEXT(&marker, pageq); 265 unchanged = (m->queue == queue && 266 m->object == object && 267 &marker == TAILQ_NEXT(m, pageq)); 268 TAILQ_REMOVE(&vm_page_queues[queue].pl, 269 &marker, pageq); 270 return (unchanged); 271 } 272 273 /* 274 * Lock the page while holding the page queue lock. Use marker page 275 * to detect page queue changes and maintain notion of next page on 276 * page queue. Return TRUE if no changes were detected, FALSE 277 * otherwise. The page is locked on return. The page queue lock might 278 * be dropped and reacquired. 279 * 280 * This function depends on normal struct vm_page being type stable. 281 */ 282 boolean_t 283 vm_pageout_page_lock(vm_page_t m, vm_page_t *next) 284 { 285 struct vm_page marker; 286 boolean_t unchanged; 287 u_short queue; 288 289 vm_page_lock_assert(m, MA_NOTOWNED); 290 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 291 292 if (vm_page_trylock(m)) 293 return (TRUE); 294 295 queue = m->queue; 296 vm_pageout_init_marker(&marker, queue); 297 298 TAILQ_INSERT_AFTER(&vm_page_queues[queue].pl, m, &marker, pageq); 299 vm_page_unlock_queues(); 300 vm_page_lock(m); 301 vm_page_lock_queues(); 302 303 /* Page queue might have changed. */ 304 *next = TAILQ_NEXT(&marker, pageq); 305 unchanged = (m->queue == queue && &marker == TAILQ_NEXT(m, pageq)); 306 TAILQ_REMOVE(&vm_page_queues[queue].pl, &marker, pageq); 307 return (unchanged); 308 } 309 310 /* 311 * vm_pageout_clean: 312 * 313 * Clean the page and remove it from the laundry. 314 * 315 * We set the busy bit to cause potential page faults on this page to 316 * block. Note the careful timing, however, the busy bit isn't set till 317 * late and we cannot do anything that will mess with the page. 318 */ 319 static int 320 vm_pageout_clean(vm_page_t m) 321 { 322 vm_object_t object; 323 vm_page_t mc[2*vm_pageout_page_count]; 324 int pageout_count; 325 int ib, is, page_base; 326 vm_pindex_t pindex = m->pindex; 327 328 vm_page_lock_assert(m, MA_NOTOWNED); 329 vm_page_lock(m); 330 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 331 332 /* 333 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 334 * with the new swapper, but we could have serious problems paging 335 * out other object types if there is insufficient memory. 336 * 337 * Unfortunately, checking free memory here is far too late, so the 338 * check has been moved up a procedural level. 339 */ 340 341 /* 342 * Can't clean the page if it's busy or held. 343 */ 344 if ((m->hold_count != 0) || 345 ((m->busy != 0) || (m->oflags & VPO_BUSY))) { 346 vm_page_unlock(m); 347 return 0; 348 } 349 350 mc[vm_pageout_page_count] = m; 351 pageout_count = 1; 352 page_base = vm_pageout_page_count; 353 ib = 1; 354 is = 1; 355 356 /* 357 * Scan object for clusterable pages. 358 * 359 * We can cluster ONLY if: ->> the page is NOT 360 * clean, wired, busy, held, or mapped into a 361 * buffer, and one of the following: 362 * 1) The page is inactive, or a seldom used 363 * active page. 364 * -or- 365 * 2) we force the issue. 366 * 367 * During heavy mmap/modification loads the pageout 368 * daemon can really fragment the underlying file 369 * due to flushing pages out of order and not trying 370 * align the clusters (which leave sporatic out-of-order 371 * holes). To solve this problem we do the reverse scan 372 * first and attempt to align our cluster, then do a 373 * forward scan if room remains. 374 */ 375 object = m->object; 376 more: 377 while (ib && pageout_count < vm_pageout_page_count) { 378 vm_page_t p; 379 380 if (ib > pindex) { 381 ib = 0; 382 break; 383 } 384 385 if ((p = vm_page_lookup(object, pindex - ib)) == NULL) { 386 ib = 0; 387 break; 388 } 389 if ((p->oflags & VPO_BUSY) || p->busy) { 390 ib = 0; 391 break; 392 } 393 vm_page_lock(p); 394 vm_page_test_dirty(p); 395 if (p->dirty == 0 || 396 p->queue != PQ_INACTIVE || 397 p->hold_count != 0) { /* may be undergoing I/O */ 398 vm_page_unlock(p); 399 ib = 0; 400 break; 401 } 402 vm_page_unlock(p); 403 mc[--page_base] = p; 404 ++pageout_count; 405 ++ib; 406 /* 407 * alignment boundry, stop here and switch directions. Do 408 * not clear ib. 409 */ 410 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 411 break; 412 } 413 414 while (pageout_count < vm_pageout_page_count && 415 pindex + is < object->size) { 416 vm_page_t p; 417 418 if ((p = vm_page_lookup(object, pindex + is)) == NULL) 419 break; 420 if ((p->oflags & VPO_BUSY) || p->busy) { 421 break; 422 } 423 vm_page_lock(p); 424 vm_page_test_dirty(p); 425 if (p->dirty == 0 || 426 p->queue != PQ_INACTIVE || 427 p->hold_count != 0) { /* may be undergoing I/O */ 428 vm_page_unlock(p); 429 break; 430 } 431 vm_page_unlock(p); 432 mc[page_base + pageout_count] = p; 433 ++pageout_count; 434 ++is; 435 } 436 437 /* 438 * If we exhausted our forward scan, continue with the reverse scan 439 * when possible, even past a page boundry. This catches boundry 440 * conditions. 441 */ 442 if (ib && pageout_count < vm_pageout_page_count) 443 goto more; 444 445 vm_page_unlock(m); 446 /* 447 * we allow reads during pageouts... 448 */ 449 return (vm_pageout_flush(&mc[page_base], pageout_count, 0)); 450 } 451 452 /* 453 * vm_pageout_flush() - launder the given pages 454 * 455 * The given pages are laundered. Note that we setup for the start of 456 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 457 * reference count all in here rather then in the parent. If we want 458 * the parent to do more sophisticated things we may have to change 459 * the ordering. 460 */ 461 int 462 vm_pageout_flush(vm_page_t *mc, int count, int flags) 463 { 464 vm_object_t object = mc[0]->object; 465 int pageout_status[count]; 466 int numpagedout = 0; 467 int i; 468 469 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 470 mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED); 471 472 /* 473 * Initiate I/O. Bump the vm_page_t->busy counter and 474 * mark the pages read-only. 475 * 476 * We do not have to fixup the clean/dirty bits here... we can 477 * allow the pager to do it after the I/O completes. 478 * 479 * NOTE! mc[i]->dirty may be partial or fragmented due to an 480 * edge case with file fragments. 481 */ 482 for (i = 0; i < count; i++) { 483 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, 484 ("vm_pageout_flush: partially invalid page %p index %d/%d", 485 mc[i], i, count)); 486 vm_page_io_start(mc[i]); 487 pmap_remove_write(mc[i]); 488 } 489 vm_object_pip_add(object, count); 490 491 vm_pager_put_pages(object, mc, count, flags, pageout_status); 492 493 for (i = 0; i < count; i++) { 494 vm_page_t mt = mc[i]; 495 496 KASSERT(pageout_status[i] == VM_PAGER_PEND || 497 (mt->flags & PG_WRITEABLE) == 0, 498 ("vm_pageout_flush: page %p is not write protected", mt)); 499 switch (pageout_status[i]) { 500 case VM_PAGER_OK: 501 case VM_PAGER_PEND: 502 numpagedout++; 503 break; 504 case VM_PAGER_BAD: 505 /* 506 * Page outside of range of object. Right now we 507 * essentially lose the changes by pretending it 508 * worked. 509 */ 510 vm_page_undirty(mt); 511 break; 512 case VM_PAGER_ERROR: 513 case VM_PAGER_FAIL: 514 /* 515 * If page couldn't be paged out, then reactivate the 516 * page so it doesn't clog the inactive list. (We 517 * will try paging out it again later). 518 */ 519 vm_page_lock(mt); 520 vm_page_activate(mt); 521 vm_page_unlock(mt); 522 break; 523 case VM_PAGER_AGAIN: 524 break; 525 } 526 527 /* 528 * If the operation is still going, leave the page busy to 529 * block all other accesses. Also, leave the paging in 530 * progress indicator set so that we don't attempt an object 531 * collapse. 532 */ 533 if (pageout_status[i] != VM_PAGER_PEND) { 534 vm_object_pip_wakeup(object); 535 vm_page_io_finish(mt); 536 if (vm_page_count_severe()) { 537 vm_page_lock(mt); 538 vm_page_try_to_cache(mt); 539 vm_page_unlock(mt); 540 } 541 } 542 } 543 return (numpagedout); 544 } 545 546 #if !defined(NO_SWAPPING) 547 /* 548 * vm_pageout_object_deactivate_pages 549 * 550 * deactivate enough pages to satisfy the inactive target 551 * requirements or if vm_page_proc_limit is set, then 552 * deactivate all of the pages in the object and its 553 * backing_objects. 554 * 555 * The object and map must be locked. 556 */ 557 static void 558 vm_pageout_object_deactivate_pages(pmap, first_object, desired) 559 pmap_t pmap; 560 vm_object_t first_object; 561 long desired; 562 { 563 vm_object_t backing_object, object; 564 vm_page_t p, next; 565 int actcount, remove_mode; 566 567 VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED); 568 if (first_object->type == OBJT_DEVICE || 569 first_object->type == OBJT_SG) 570 return; 571 for (object = first_object;; object = backing_object) { 572 if (pmap_resident_count(pmap) <= desired) 573 goto unlock_return; 574 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 575 if (object->type == OBJT_PHYS || object->paging_in_progress) 576 goto unlock_return; 577 578 remove_mode = 0; 579 if (object->shadow_count > 1) 580 remove_mode = 1; 581 /* 582 * scan the objects entire memory queue 583 */ 584 p = TAILQ_FIRST(&object->memq); 585 while (p != NULL) { 586 if (pmap_resident_count(pmap) <= desired) 587 goto unlock_return; 588 next = TAILQ_NEXT(p, listq); 589 if ((p->oflags & VPO_BUSY) != 0 || p->busy != 0) { 590 p = next; 591 continue; 592 } 593 vm_page_lock(p); 594 vm_page_lock_queues(); 595 cnt.v_pdpages++; 596 if (p->wire_count != 0 || 597 p->hold_count != 0 || 598 !pmap_page_exists_quick(pmap, p)) { 599 vm_page_unlock_queues(); 600 vm_page_unlock(p); 601 p = next; 602 continue; 603 } 604 actcount = pmap_ts_referenced(p); 605 if (actcount) { 606 vm_page_flag_set(p, PG_REFERENCED); 607 } else if (p->flags & PG_REFERENCED) { 608 actcount = 1; 609 } 610 if ((p->queue != PQ_ACTIVE) && 611 (p->flags & PG_REFERENCED)) { 612 vm_page_activate(p); 613 p->act_count += actcount; 614 vm_page_flag_clear(p, PG_REFERENCED); 615 } else if (p->queue == PQ_ACTIVE) { 616 if ((p->flags & PG_REFERENCED) == 0) { 617 p->act_count -= min(p->act_count, ACT_DECLINE); 618 if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) { 619 pmap_remove_all(p); 620 vm_page_deactivate(p); 621 } else { 622 vm_page_requeue(p); 623 } 624 } else { 625 vm_page_activate(p); 626 vm_page_flag_clear(p, PG_REFERENCED); 627 if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 628 p->act_count += ACT_ADVANCE; 629 vm_page_requeue(p); 630 } 631 } else if (p->queue == PQ_INACTIVE) { 632 pmap_remove_all(p); 633 } 634 vm_page_unlock_queues(); 635 vm_page_unlock(p); 636 p = next; 637 } 638 if ((backing_object = object->backing_object) == NULL) 639 goto unlock_return; 640 VM_OBJECT_LOCK(backing_object); 641 if (object != first_object) 642 VM_OBJECT_UNLOCK(object); 643 } 644 unlock_return: 645 if (object != first_object) 646 VM_OBJECT_UNLOCK(object); 647 } 648 649 /* 650 * deactivate some number of pages in a map, try to do it fairly, but 651 * that is really hard to do. 652 */ 653 static void 654 vm_pageout_map_deactivate_pages(map, desired) 655 vm_map_t map; 656 long desired; 657 { 658 vm_map_entry_t tmpe; 659 vm_object_t obj, bigobj; 660 int nothingwired; 661 662 if (!vm_map_trylock(map)) 663 return; 664 665 bigobj = NULL; 666 nothingwired = TRUE; 667 668 /* 669 * first, search out the biggest object, and try to free pages from 670 * that. 671 */ 672 tmpe = map->header.next; 673 while (tmpe != &map->header) { 674 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 675 obj = tmpe->object.vm_object; 676 if (obj != NULL && VM_OBJECT_TRYLOCK(obj)) { 677 if (obj->shadow_count <= 1 && 678 (bigobj == NULL || 679 bigobj->resident_page_count < obj->resident_page_count)) { 680 if (bigobj != NULL) 681 VM_OBJECT_UNLOCK(bigobj); 682 bigobj = obj; 683 } else 684 VM_OBJECT_UNLOCK(obj); 685 } 686 } 687 if (tmpe->wired_count > 0) 688 nothingwired = FALSE; 689 tmpe = tmpe->next; 690 } 691 692 if (bigobj != NULL) { 693 vm_pageout_object_deactivate_pages(map->pmap, bigobj, desired); 694 VM_OBJECT_UNLOCK(bigobj); 695 } 696 /* 697 * Next, hunt around for other pages to deactivate. We actually 698 * do this search sort of wrong -- .text first is not the best idea. 699 */ 700 tmpe = map->header.next; 701 while (tmpe != &map->header) { 702 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 703 break; 704 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 705 obj = tmpe->object.vm_object; 706 if (obj != NULL) { 707 VM_OBJECT_LOCK(obj); 708 vm_pageout_object_deactivate_pages(map->pmap, obj, desired); 709 VM_OBJECT_UNLOCK(obj); 710 } 711 } 712 tmpe = tmpe->next; 713 } 714 715 /* 716 * Remove all mappings if a process is swapped out, this will free page 717 * table pages. 718 */ 719 if (desired == 0 && nothingwired) { 720 pmap_remove(vm_map_pmap(map), vm_map_min(map), 721 vm_map_max(map)); 722 } 723 vm_map_unlock(map); 724 } 725 #endif /* !defined(NO_SWAPPING) */ 726 727 /* 728 * vm_pageout_scan does the dirty work for the pageout daemon. 729 */ 730 static void 731 vm_pageout_scan(int pass) 732 { 733 vm_page_t m, next; 734 struct vm_page marker; 735 int page_shortage, maxscan, pcount; 736 int addl_page_shortage, addl_page_shortage_init; 737 vm_object_t object; 738 int actcount; 739 int vnodes_skipped = 0; 740 int maxlaunder; 741 742 /* 743 * Decrease registered cache sizes. 744 */ 745 EVENTHANDLER_INVOKE(vm_lowmem, 0); 746 /* 747 * We do this explicitly after the caches have been drained above. 748 */ 749 uma_reclaim(); 750 751 addl_page_shortage_init = atomic_readandclear_int(&vm_pageout_deficit); 752 753 /* 754 * Calculate the number of pages we want to either free or move 755 * to the cache. 756 */ 757 page_shortage = vm_paging_target() + addl_page_shortage_init; 758 759 vm_pageout_init_marker(&marker, PQ_INACTIVE); 760 761 /* 762 * Start scanning the inactive queue for pages we can move to the 763 * cache or free. The scan will stop when the target is reached or 764 * we have scanned the entire inactive queue. Note that m->act_count 765 * is not used to form decisions for the inactive queue, only for the 766 * active queue. 767 * 768 * maxlaunder limits the number of dirty pages we flush per scan. 769 * For most systems a smaller value (16 or 32) is more robust under 770 * extreme memory and disk pressure because any unnecessary writes 771 * to disk can result in extreme performance degredation. However, 772 * systems with excessive dirty pages (especially when MAP_NOSYNC is 773 * used) will die horribly with limited laundering. If the pageout 774 * daemon cannot clean enough pages in the first pass, we let it go 775 * all out in succeeding passes. 776 */ 777 if ((maxlaunder = vm_max_launder) <= 1) 778 maxlaunder = 1; 779 if (pass) 780 maxlaunder = 10000; 781 vm_page_lock_queues(); 782 rescan0: 783 addl_page_shortage = addl_page_shortage_init; 784 maxscan = cnt.v_inactive_count; 785 786 for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); 787 m != NULL && maxscan-- > 0 && page_shortage > 0; 788 m = next) { 789 790 cnt.v_pdpages++; 791 792 if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE) { 793 goto rescan0; 794 } 795 796 next = TAILQ_NEXT(m, pageq); 797 798 /* 799 * skip marker pages 800 */ 801 if (m->flags & PG_MARKER) 802 continue; 803 804 /* 805 * Lock the page. 806 */ 807 if (!vm_pageout_page_lock(m, &next)) { 808 vm_page_unlock(m); 809 addl_page_shortage++; 810 continue; 811 } 812 813 /* 814 * A held page may be undergoing I/O, so skip it. 815 */ 816 if (m->hold_count || (object = m->object) == NULL) { 817 vm_page_unlock(m); 818 vm_page_requeue(m); 819 addl_page_shortage++; 820 continue; 821 } 822 823 /* 824 * Don't mess with busy pages, keep in the front of the 825 * queue, most likely are being paged out. 826 */ 827 if (!VM_OBJECT_TRYLOCK(object) && 828 (!vm_pageout_fallback_object_lock(m, &next) || 829 m->hold_count != 0)) { 830 VM_OBJECT_UNLOCK(object); 831 vm_page_unlock(m); 832 addl_page_shortage++; 833 continue; 834 } 835 if (m->busy || (m->oflags & VPO_BUSY)) { 836 vm_page_unlock(m); 837 VM_OBJECT_UNLOCK(object); 838 addl_page_shortage++; 839 continue; 840 } 841 842 /* 843 * If the object is not being used, we ignore previous 844 * references. 845 */ 846 if (object->ref_count == 0) { 847 vm_page_flag_clear(m, PG_REFERENCED); 848 KASSERT(!pmap_page_is_mapped(m), 849 ("vm_pageout_scan: page %p is mapped", m)); 850 851 /* 852 * Otherwise, if the page has been referenced while in the 853 * inactive queue, we bump the "activation count" upwards, 854 * making it less likely that the page will be added back to 855 * the inactive queue prematurely again. Here we check the 856 * page tables (or emulated bits, if any), given the upper 857 * level VM system not knowing anything about existing 858 * references. 859 */ 860 } else if (((m->flags & PG_REFERENCED) == 0) && 861 (actcount = pmap_ts_referenced(m))) { 862 vm_page_activate(m); 863 VM_OBJECT_UNLOCK(object); 864 m->act_count += (actcount + ACT_ADVANCE); 865 vm_page_unlock(m); 866 continue; 867 } 868 869 /* 870 * If the upper level VM system knows about any page 871 * references, we activate the page. We also set the 872 * "activation count" higher than normal so that we will less 873 * likely place pages back onto the inactive queue again. 874 */ 875 if ((m->flags & PG_REFERENCED) != 0) { 876 vm_page_flag_clear(m, PG_REFERENCED); 877 actcount = pmap_ts_referenced(m); 878 vm_page_activate(m); 879 VM_OBJECT_UNLOCK(object); 880 m->act_count += (actcount + ACT_ADVANCE + 1); 881 vm_page_unlock(m); 882 continue; 883 } 884 885 /* 886 * If the upper level VM system does not believe that the page 887 * is fully dirty, but it is mapped for write access, then we 888 * consult the pmap to see if the page's dirty status should 889 * be updated. 890 */ 891 if (m->dirty != VM_PAGE_BITS_ALL && 892 (m->flags & PG_WRITEABLE) != 0) { 893 /* 894 * Avoid a race condition: Unless write access is 895 * removed from the page, another processor could 896 * modify it before all access is removed by the call 897 * to vm_page_cache() below. If vm_page_cache() finds 898 * that the page has been modified when it removes all 899 * access, it panics because it cannot cache dirty 900 * pages. In principle, we could eliminate just write 901 * access here rather than all access. In the expected 902 * case, when there are no last instant modifications 903 * to the page, removing all access will be cheaper 904 * overall. 905 */ 906 if (pmap_is_modified(m)) 907 vm_page_dirty(m); 908 else if (m->dirty == 0) 909 pmap_remove_all(m); 910 } 911 912 if (m->valid == 0) { 913 /* 914 * Invalid pages can be easily freed 915 */ 916 vm_page_free(m); 917 cnt.v_dfree++; 918 --page_shortage; 919 } else if (m->dirty == 0) { 920 /* 921 * Clean pages can be placed onto the cache queue. 922 * This effectively frees them. 923 */ 924 vm_page_cache(m); 925 --page_shortage; 926 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) { 927 /* 928 * Dirty pages need to be paged out, but flushing 929 * a page is extremely expensive verses freeing 930 * a clean page. Rather then artificially limiting 931 * the number of pages we can flush, we instead give 932 * dirty pages extra priority on the inactive queue 933 * by forcing them to be cycled through the queue 934 * twice before being flushed, after which the 935 * (now clean) page will cycle through once more 936 * before being freed. This significantly extends 937 * the thrash point for a heavily loaded machine. 938 */ 939 vm_page_flag_set(m, PG_WINATCFLS); 940 vm_page_requeue(m); 941 } else if (maxlaunder > 0) { 942 /* 943 * We always want to try to flush some dirty pages if 944 * we encounter them, to keep the system stable. 945 * Normally this number is small, but under extreme 946 * pressure where there are insufficient clean pages 947 * on the inactive queue, we may have to go all out. 948 */ 949 int swap_pageouts_ok, vfslocked = 0; 950 struct vnode *vp = NULL; 951 struct mount *mp = NULL; 952 953 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 954 swap_pageouts_ok = 1; 955 } else { 956 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 957 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 958 vm_page_count_min()); 959 960 } 961 962 /* 963 * We don't bother paging objects that are "dead". 964 * Those objects are in a "rundown" state. 965 */ 966 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 967 vm_page_unlock(m); 968 VM_OBJECT_UNLOCK(object); 969 vm_page_requeue(m); 970 continue; 971 } 972 973 /* 974 * Following operations may unlock 975 * vm_page_queue_mtx, invalidating the 'next' 976 * pointer. To prevent an inordinate number 977 * of restarts we use our marker to remember 978 * our place. 979 * 980 */ 981 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, 982 m, &marker, pageq); 983 /* 984 * The object is already known NOT to be dead. It 985 * is possible for the vget() to block the whole 986 * pageout daemon, but the new low-memory handling 987 * code should prevent it. 988 * 989 * The previous code skipped locked vnodes and, worse, 990 * reordered pages in the queue. This results in 991 * completely non-deterministic operation and, on a 992 * busy system, can lead to extremely non-optimal 993 * pageouts. For example, it can cause clean pages 994 * to be freed and dirty pages to be moved to the end 995 * of the queue. Since dirty pages are also moved to 996 * the end of the queue once-cleaned, this gives 997 * way too large a weighting to defering the freeing 998 * of dirty pages. 999 * 1000 * We can't wait forever for the vnode lock, we might 1001 * deadlock due to a vn_read() getting stuck in 1002 * vm_wait while holding this vnode. We skip the 1003 * vnode if we can't get it in a reasonable amount 1004 * of time. 1005 */ 1006 if (object->type == OBJT_VNODE) { 1007 vm_page_unlock_queues(); 1008 vm_page_unlock(m); 1009 vp = object->handle; 1010 if (vp->v_type == VREG && 1011 vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1012 mp = NULL; 1013 ++pageout_lock_miss; 1014 if (object->flags & OBJ_MIGHTBEDIRTY) 1015 vnodes_skipped++; 1016 vm_page_lock_queues(); 1017 goto unlock_and_continue; 1018 } 1019 KASSERT(mp != NULL, 1020 ("vp %p with NULL v_mount", vp)); 1021 vm_object_reference_locked(object); 1022 VM_OBJECT_UNLOCK(object); 1023 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 1024 if (vget(vp, LK_EXCLUSIVE | LK_TIMELOCK, 1025 curthread)) { 1026 VM_OBJECT_LOCK(object); 1027 vm_page_lock_queues(); 1028 ++pageout_lock_miss; 1029 if (object->flags & OBJ_MIGHTBEDIRTY) 1030 vnodes_skipped++; 1031 vp = NULL; 1032 goto unlock_and_continue; 1033 } 1034 VM_OBJECT_LOCK(object); 1035 vm_page_lock(m); 1036 vm_page_lock_queues(); 1037 /* 1038 * The page might have been moved to another 1039 * queue during potential blocking in vget() 1040 * above. The page might have been freed and 1041 * reused for another vnode. 1042 */ 1043 if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE || 1044 m->object != object || 1045 TAILQ_NEXT(m, pageq) != &marker) { 1046 vm_page_unlock(m); 1047 if (object->flags & OBJ_MIGHTBEDIRTY) 1048 vnodes_skipped++; 1049 goto unlock_and_continue; 1050 } 1051 1052 /* 1053 * The page may have been busied during the 1054 * blocking in vget(). We don't move the 1055 * page back onto the end of the queue so that 1056 * statistics are more correct if we don't. 1057 */ 1058 if (m->busy || (m->oflags & VPO_BUSY)) { 1059 vm_page_unlock(m); 1060 goto unlock_and_continue; 1061 } 1062 1063 /* 1064 * If the page has become held it might 1065 * be undergoing I/O, so skip it 1066 */ 1067 if (m->hold_count) { 1068 vm_page_unlock(m); 1069 vm_page_requeue(m); 1070 if (object->flags & OBJ_MIGHTBEDIRTY) 1071 vnodes_skipped++; 1072 goto unlock_and_continue; 1073 } 1074 } 1075 vm_page_unlock(m); 1076 1077 /* 1078 * If a page is dirty, then it is either being washed 1079 * (but not yet cleaned) or it is still in the 1080 * laundry. If it is still in the laundry, then we 1081 * start the cleaning operation. 1082 * 1083 * decrement page_shortage on success to account for 1084 * the (future) cleaned page. Otherwise we could wind 1085 * up laundering or cleaning too many pages. 1086 */ 1087 vm_page_unlock_queues(); 1088 if (vm_pageout_clean(m) != 0) { 1089 --page_shortage; 1090 --maxlaunder; 1091 } 1092 vm_page_lock_queues(); 1093 unlock_and_continue: 1094 vm_page_lock_assert(m, MA_NOTOWNED); 1095 VM_OBJECT_UNLOCK(object); 1096 if (mp != NULL) { 1097 vm_page_unlock_queues(); 1098 if (vp != NULL) 1099 vput(vp); 1100 VFS_UNLOCK_GIANT(vfslocked); 1101 vm_object_deallocate(object); 1102 vn_finished_write(mp); 1103 vm_page_lock_queues(); 1104 } 1105 next = TAILQ_NEXT(&marker, pageq); 1106 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, 1107 &marker, pageq); 1108 vm_page_lock_assert(m, MA_NOTOWNED); 1109 continue; 1110 } 1111 vm_page_unlock(m); 1112 VM_OBJECT_UNLOCK(object); 1113 } 1114 1115 /* 1116 * Compute the number of pages we want to try to move from the 1117 * active queue to the inactive queue. 1118 */ 1119 page_shortage = vm_paging_target() + 1120 cnt.v_inactive_target - cnt.v_inactive_count; 1121 page_shortage += addl_page_shortage; 1122 1123 /* 1124 * Scan the active queue for things we can deactivate. We nominally 1125 * track the per-page activity counter and use it to locate 1126 * deactivation candidates. 1127 */ 1128 pcount = cnt.v_active_count; 1129 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1130 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1131 1132 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 1133 1134 KASSERT(VM_PAGE_INQUEUE2(m, PQ_ACTIVE), 1135 ("vm_pageout_scan: page %p isn't active", m)); 1136 1137 next = TAILQ_NEXT(m, pageq); 1138 object = m->object; 1139 if ((m->flags & PG_MARKER) != 0) { 1140 m = next; 1141 continue; 1142 } 1143 if (!vm_pageout_page_lock(m, &next) || 1144 (object = m->object) == NULL) { 1145 vm_page_unlock(m); 1146 m = next; 1147 continue; 1148 } 1149 if (!VM_OBJECT_TRYLOCK(object) && 1150 !vm_pageout_fallback_object_lock(m, &next)) { 1151 VM_OBJECT_UNLOCK(object); 1152 vm_page_unlock(m); 1153 m = next; 1154 continue; 1155 } 1156 1157 /* 1158 * Don't deactivate pages that are busy. 1159 */ 1160 if ((m->busy != 0) || 1161 (m->oflags & VPO_BUSY) || 1162 (m->hold_count != 0)) { 1163 vm_page_unlock(m); 1164 VM_OBJECT_UNLOCK(object); 1165 vm_page_requeue(m); 1166 m = next; 1167 continue; 1168 } 1169 1170 /* 1171 * The count for pagedaemon pages is done after checking the 1172 * page for eligibility... 1173 */ 1174 cnt.v_pdpages++; 1175 1176 /* 1177 * Check to see "how much" the page has been used. 1178 */ 1179 actcount = 0; 1180 if (object->ref_count != 0) { 1181 if (m->flags & PG_REFERENCED) { 1182 actcount += 1; 1183 } 1184 actcount += pmap_ts_referenced(m); 1185 if (actcount) { 1186 m->act_count += ACT_ADVANCE + actcount; 1187 if (m->act_count > ACT_MAX) 1188 m->act_count = ACT_MAX; 1189 } 1190 } 1191 1192 /* 1193 * Since we have "tested" this bit, we need to clear it now. 1194 */ 1195 vm_page_flag_clear(m, PG_REFERENCED); 1196 1197 /* 1198 * Only if an object is currently being used, do we use the 1199 * page activation count stats. 1200 */ 1201 if (actcount && (object->ref_count != 0)) { 1202 vm_page_requeue(m); 1203 } else { 1204 m->act_count -= min(m->act_count, ACT_DECLINE); 1205 if (vm_pageout_algorithm || 1206 object->ref_count == 0 || 1207 m->act_count == 0) { 1208 page_shortage--; 1209 if (object->ref_count == 0) { 1210 KASSERT(!pmap_page_is_mapped(m), 1211 ("vm_pageout_scan: page %p is mapped", m)); 1212 if (m->dirty == 0) 1213 vm_page_cache(m); 1214 else 1215 vm_page_deactivate(m); 1216 } else { 1217 vm_page_deactivate(m); 1218 } 1219 } else { 1220 vm_page_requeue(m); 1221 } 1222 } 1223 vm_page_unlock(m); 1224 VM_OBJECT_UNLOCK(object); 1225 m = next; 1226 } 1227 vm_page_unlock_queues(); 1228 #if !defined(NO_SWAPPING) 1229 /* 1230 * Idle process swapout -- run once per second. 1231 */ 1232 if (vm_swap_idle_enabled) { 1233 static long lsec; 1234 if (time_second != lsec) { 1235 vm_req_vmdaemon(VM_SWAP_IDLE); 1236 lsec = time_second; 1237 } 1238 } 1239 #endif 1240 1241 /* 1242 * If we didn't get enough free pages, and we have skipped a vnode 1243 * in a writeable object, wakeup the sync daemon. And kick swapout 1244 * if we did not get enough free pages. 1245 */ 1246 if (vm_paging_target() > 0) { 1247 if (vnodes_skipped && vm_page_count_min()) 1248 (void) speedup_syncer(); 1249 #if !defined(NO_SWAPPING) 1250 if (vm_swap_enabled && vm_page_count_target()) 1251 vm_req_vmdaemon(VM_SWAP_NORMAL); 1252 #endif 1253 } 1254 1255 /* 1256 * If we are critically low on one of RAM or swap and low on 1257 * the other, kill the largest process. However, we avoid 1258 * doing this on the first pass in order to give ourselves a 1259 * chance to flush out dirty vnode-backed pages and to allow 1260 * active pages to be moved to the inactive queue and reclaimed. 1261 */ 1262 if (pass != 0 && 1263 ((swap_pager_avail < 64 && vm_page_count_min()) || 1264 (swap_pager_full && vm_paging_target() > 0))) 1265 vm_pageout_oom(VM_OOM_MEM); 1266 } 1267 1268 1269 void 1270 vm_pageout_oom(int shortage) 1271 { 1272 struct proc *p, *bigproc; 1273 vm_offset_t size, bigsize; 1274 struct thread *td; 1275 struct vmspace *vm; 1276 1277 /* 1278 * We keep the process bigproc locked once we find it to keep anyone 1279 * from messing with it; however, there is a possibility of 1280 * deadlock if process B is bigproc and one of it's child processes 1281 * attempts to propagate a signal to B while we are waiting for A's 1282 * lock while walking this list. To avoid this, we don't block on 1283 * the process lock but just skip a process if it is already locked. 1284 */ 1285 bigproc = NULL; 1286 bigsize = 0; 1287 sx_slock(&allproc_lock); 1288 FOREACH_PROC_IN_SYSTEM(p) { 1289 int breakout; 1290 1291 if (PROC_TRYLOCK(p) == 0) 1292 continue; 1293 /* 1294 * If this is a system, protected or killed process, skip it. 1295 */ 1296 if ((p->p_flag & (P_INEXEC | P_PROTECTED | P_SYSTEM)) || 1297 (p->p_pid == 1) || P_KILLED(p) || 1298 ((p->p_pid < 48) && (swap_pager_avail != 0))) { 1299 PROC_UNLOCK(p); 1300 continue; 1301 } 1302 /* 1303 * If the process is in a non-running type state, 1304 * don't touch it. Check all the threads individually. 1305 */ 1306 breakout = 0; 1307 FOREACH_THREAD_IN_PROC(p, td) { 1308 thread_lock(td); 1309 if (!TD_ON_RUNQ(td) && 1310 !TD_IS_RUNNING(td) && 1311 !TD_IS_SLEEPING(td)) { 1312 thread_unlock(td); 1313 breakout = 1; 1314 break; 1315 } 1316 thread_unlock(td); 1317 } 1318 if (breakout) { 1319 PROC_UNLOCK(p); 1320 continue; 1321 } 1322 /* 1323 * get the process size 1324 */ 1325 vm = vmspace_acquire_ref(p); 1326 if (vm == NULL) { 1327 PROC_UNLOCK(p); 1328 continue; 1329 } 1330 if (!vm_map_trylock_read(&vm->vm_map)) { 1331 vmspace_free(vm); 1332 PROC_UNLOCK(p); 1333 continue; 1334 } 1335 size = vmspace_swap_count(vm); 1336 vm_map_unlock_read(&vm->vm_map); 1337 if (shortage == VM_OOM_MEM) 1338 size += vmspace_resident_count(vm); 1339 vmspace_free(vm); 1340 /* 1341 * if the this process is bigger than the biggest one 1342 * remember it. 1343 */ 1344 if (size > bigsize) { 1345 if (bigproc != NULL) 1346 PROC_UNLOCK(bigproc); 1347 bigproc = p; 1348 bigsize = size; 1349 } else 1350 PROC_UNLOCK(p); 1351 } 1352 sx_sunlock(&allproc_lock); 1353 if (bigproc != NULL) { 1354 killproc(bigproc, "out of swap space"); 1355 sched_nice(bigproc, PRIO_MIN); 1356 PROC_UNLOCK(bigproc); 1357 wakeup(&cnt.v_free_count); 1358 } 1359 } 1360 1361 /* 1362 * This routine tries to maintain the pseudo LRU active queue, 1363 * so that during long periods of time where there is no paging, 1364 * that some statistic accumulation still occurs. This code 1365 * helps the situation where paging just starts to occur. 1366 */ 1367 static void 1368 vm_pageout_page_stats() 1369 { 1370 vm_object_t object; 1371 vm_page_t m,next; 1372 int pcount,tpcount; /* Number of pages to check */ 1373 static int fullintervalcount = 0; 1374 int page_shortage; 1375 1376 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1377 page_shortage = 1378 (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - 1379 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 1380 1381 if (page_shortage <= 0) 1382 return; 1383 1384 pcount = cnt.v_active_count; 1385 fullintervalcount += vm_pageout_stats_interval; 1386 if (fullintervalcount < vm_pageout_full_stats_interval) { 1387 tpcount = (int64_t)vm_pageout_stats_max * cnt.v_active_count / 1388 cnt.v_page_count; 1389 if (pcount > tpcount) 1390 pcount = tpcount; 1391 } else { 1392 fullintervalcount = 0; 1393 } 1394 1395 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1396 while ((m != NULL) && (pcount-- > 0)) { 1397 int actcount; 1398 1399 KASSERT(VM_PAGE_INQUEUE2(m, PQ_ACTIVE), 1400 ("vm_pageout_page_stats: page %p isn't active", m)); 1401 1402 next = TAILQ_NEXT(m, pageq); 1403 if ((m->flags & PG_MARKER) != 0) { 1404 m = next; 1405 continue; 1406 } 1407 vm_page_lock_assert(m, MA_NOTOWNED); 1408 if (!vm_pageout_page_lock(m, &next) || 1409 (object = m->object) == NULL) { 1410 vm_page_unlock(m); 1411 m = next; 1412 continue; 1413 } 1414 if (!VM_OBJECT_TRYLOCK(object) && 1415 !vm_pageout_fallback_object_lock(m, &next)) { 1416 VM_OBJECT_UNLOCK(object); 1417 vm_page_unlock(m); 1418 m = next; 1419 continue; 1420 } 1421 1422 /* 1423 * Don't deactivate pages that are busy. 1424 */ 1425 if ((m->busy != 0) || 1426 (m->oflags & VPO_BUSY) || 1427 (m->hold_count != 0)) { 1428 vm_page_unlock(m); 1429 VM_OBJECT_UNLOCK(object); 1430 vm_page_requeue(m); 1431 m = next; 1432 continue; 1433 } 1434 1435 actcount = 0; 1436 if (m->flags & PG_REFERENCED) { 1437 vm_page_flag_clear(m, PG_REFERENCED); 1438 actcount += 1; 1439 } 1440 1441 actcount += pmap_ts_referenced(m); 1442 if (actcount) { 1443 m->act_count += ACT_ADVANCE + actcount; 1444 if (m->act_count > ACT_MAX) 1445 m->act_count = ACT_MAX; 1446 vm_page_requeue(m); 1447 } else { 1448 if (m->act_count == 0) { 1449 /* 1450 * We turn off page access, so that we have 1451 * more accurate RSS stats. We don't do this 1452 * in the normal page deactivation when the 1453 * system is loaded VM wise, because the 1454 * cost of the large number of page protect 1455 * operations would be higher than the value 1456 * of doing the operation. 1457 */ 1458 pmap_remove_all(m); 1459 vm_page_deactivate(m); 1460 } else { 1461 m->act_count -= min(m->act_count, ACT_DECLINE); 1462 vm_page_requeue(m); 1463 } 1464 } 1465 vm_page_unlock(m); 1466 VM_OBJECT_UNLOCK(object); 1467 m = next; 1468 } 1469 } 1470 1471 /* 1472 * vm_pageout is the high level pageout daemon. 1473 */ 1474 static void 1475 vm_pageout() 1476 { 1477 int error, pass; 1478 1479 /* 1480 * Initialize some paging parameters. 1481 */ 1482 cnt.v_interrupt_free_min = 2; 1483 if (cnt.v_page_count < 2000) 1484 vm_pageout_page_count = 8; 1485 1486 /* 1487 * v_free_reserved needs to include enough for the largest 1488 * swap pager structures plus enough for any pv_entry structs 1489 * when paging. 1490 */ 1491 if (cnt.v_page_count > 1024) 1492 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1493 else 1494 cnt.v_free_min = 4; 1495 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1496 cnt.v_interrupt_free_min; 1497 cnt.v_free_reserved = vm_pageout_page_count + 1498 cnt.v_pageout_free_min + (cnt.v_page_count / 768); 1499 cnt.v_free_severe = cnt.v_free_min / 2; 1500 cnt.v_free_min += cnt.v_free_reserved; 1501 cnt.v_free_severe += cnt.v_free_reserved; 1502 1503 /* 1504 * v_free_target and v_cache_min control pageout hysteresis. Note 1505 * that these are more a measure of the VM cache queue hysteresis 1506 * then the VM free queue. Specifically, v_free_target is the 1507 * high water mark (free+cache pages). 1508 * 1509 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the 1510 * low water mark, while v_free_min is the stop. v_cache_min must 1511 * be big enough to handle memory needs while the pageout daemon 1512 * is signalled and run to free more pages. 1513 */ 1514 if (cnt.v_free_count > 6144) 1515 cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved; 1516 else 1517 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; 1518 1519 if (cnt.v_free_count > 2048) { 1520 cnt.v_cache_min = cnt.v_free_target; 1521 cnt.v_cache_max = 2 * cnt.v_cache_min; 1522 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 1523 } else { 1524 cnt.v_cache_min = 0; 1525 cnt.v_cache_max = 0; 1526 cnt.v_inactive_target = cnt.v_free_count / 4; 1527 } 1528 if (cnt.v_inactive_target > cnt.v_free_count / 3) 1529 cnt.v_inactive_target = cnt.v_free_count / 3; 1530 1531 /* XXX does not really belong here */ 1532 if (vm_page_max_wired == 0) 1533 vm_page_max_wired = cnt.v_free_count / 3; 1534 1535 if (vm_pageout_stats_max == 0) 1536 vm_pageout_stats_max = cnt.v_free_target; 1537 1538 /* 1539 * Set interval in seconds for stats scan. 1540 */ 1541 if (vm_pageout_stats_interval == 0) 1542 vm_pageout_stats_interval = 5; 1543 if (vm_pageout_full_stats_interval == 0) 1544 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1545 1546 swap_pager_swap_init(); 1547 pass = 0; 1548 /* 1549 * The pageout daemon is never done, so loop forever. 1550 */ 1551 while (TRUE) { 1552 /* 1553 * If we have enough free memory, wakeup waiters. Do 1554 * not clear vm_pages_needed until we reach our target, 1555 * otherwise we may be woken up over and over again and 1556 * waste a lot of cpu. 1557 */ 1558 mtx_lock(&vm_page_queue_free_mtx); 1559 if (vm_pages_needed && !vm_page_count_min()) { 1560 if (!vm_paging_needed()) 1561 vm_pages_needed = 0; 1562 wakeup(&cnt.v_free_count); 1563 } 1564 if (vm_pages_needed) { 1565 /* 1566 * Still not done, take a second pass without waiting 1567 * (unlimited dirty cleaning), otherwise sleep a bit 1568 * and try again. 1569 */ 1570 ++pass; 1571 if (pass > 1) 1572 msleep(&vm_pages_needed, 1573 &vm_page_queue_free_mtx, PVM, "psleep", 1574 hz / 2); 1575 } else { 1576 /* 1577 * Good enough, sleep & handle stats. Prime the pass 1578 * for the next run. 1579 */ 1580 if (pass > 1) 1581 pass = 1; 1582 else 1583 pass = 0; 1584 error = msleep(&vm_pages_needed, 1585 &vm_page_queue_free_mtx, PVM, "psleep", 1586 vm_pageout_stats_interval * hz); 1587 if (error && !vm_pages_needed) { 1588 mtx_unlock(&vm_page_queue_free_mtx); 1589 pass = 0; 1590 vm_page_lock_queues(); 1591 vm_pageout_page_stats(); 1592 vm_page_unlock_queues(); 1593 continue; 1594 } 1595 } 1596 if (vm_pages_needed) 1597 cnt.v_pdwakeups++; 1598 mtx_unlock(&vm_page_queue_free_mtx); 1599 vm_pageout_scan(pass); 1600 } 1601 } 1602 1603 /* 1604 * Unless the free page queue lock is held by the caller, this function 1605 * should be regarded as advisory. Specifically, the caller should 1606 * not msleep() on &cnt.v_free_count following this function unless 1607 * the free page queue lock is held until the msleep() is performed. 1608 */ 1609 void 1610 pagedaemon_wakeup() 1611 { 1612 1613 if (!vm_pages_needed && curthread->td_proc != pageproc) { 1614 vm_pages_needed = 1; 1615 wakeup(&vm_pages_needed); 1616 } 1617 } 1618 1619 #if !defined(NO_SWAPPING) 1620 static void 1621 vm_req_vmdaemon(int req) 1622 { 1623 static int lastrun = 0; 1624 1625 mtx_lock(&vm_daemon_mtx); 1626 vm_pageout_req_swapout |= req; 1627 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1628 wakeup(&vm_daemon_needed); 1629 lastrun = ticks; 1630 } 1631 mtx_unlock(&vm_daemon_mtx); 1632 } 1633 1634 static void 1635 vm_daemon() 1636 { 1637 struct rlimit rsslim; 1638 struct proc *p; 1639 struct thread *td; 1640 struct vmspace *vm; 1641 int breakout, swapout_flags; 1642 1643 while (TRUE) { 1644 mtx_lock(&vm_daemon_mtx); 1645 msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep", 0); 1646 swapout_flags = vm_pageout_req_swapout; 1647 vm_pageout_req_swapout = 0; 1648 mtx_unlock(&vm_daemon_mtx); 1649 if (swapout_flags) 1650 swapout_procs(swapout_flags); 1651 1652 /* 1653 * scan the processes for exceeding their rlimits or if 1654 * process is swapped out -- deactivate pages 1655 */ 1656 sx_slock(&allproc_lock); 1657 FOREACH_PROC_IN_SYSTEM(p) { 1658 vm_pindex_t limit, size; 1659 1660 /* 1661 * if this is a system process or if we have already 1662 * looked at this process, skip it. 1663 */ 1664 PROC_LOCK(p); 1665 if (p->p_flag & (P_INEXEC | P_SYSTEM | P_WEXIT)) { 1666 PROC_UNLOCK(p); 1667 continue; 1668 } 1669 /* 1670 * if the process is in a non-running type state, 1671 * don't touch it. 1672 */ 1673 breakout = 0; 1674 FOREACH_THREAD_IN_PROC(p, td) { 1675 thread_lock(td); 1676 if (!TD_ON_RUNQ(td) && 1677 !TD_IS_RUNNING(td) && 1678 !TD_IS_SLEEPING(td)) { 1679 thread_unlock(td); 1680 breakout = 1; 1681 break; 1682 } 1683 thread_unlock(td); 1684 } 1685 if (breakout) { 1686 PROC_UNLOCK(p); 1687 continue; 1688 } 1689 /* 1690 * get a limit 1691 */ 1692 lim_rlimit(p, RLIMIT_RSS, &rsslim); 1693 limit = OFF_TO_IDX( 1694 qmin(rsslim.rlim_cur, rsslim.rlim_max)); 1695 1696 /* 1697 * let processes that are swapped out really be 1698 * swapped out set the limit to nothing (will force a 1699 * swap-out.) 1700 */ 1701 if ((p->p_flag & P_INMEM) == 0) 1702 limit = 0; /* XXX */ 1703 vm = vmspace_acquire_ref(p); 1704 PROC_UNLOCK(p); 1705 if (vm == NULL) 1706 continue; 1707 1708 size = vmspace_resident_count(vm); 1709 if (limit >= 0 && size >= limit) { 1710 vm_pageout_map_deactivate_pages( 1711 &vm->vm_map, limit); 1712 } 1713 vmspace_free(vm); 1714 } 1715 sx_sunlock(&allproc_lock); 1716 } 1717 } 1718 #endif /* !defined(NO_SWAPPING) */ 1719