1 /*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * Copyright (c) 2005 Yahoo! Technologies Norway AS 9 * All rights reserved. 10 * 11 * This code is derived from software contributed to Berkeley by 12 * The Mach Operating System project at Carnegie-Mellon University. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. All advertising materials mentioning features or use of this software 23 * must display the following acknowledgement: 24 * This product includes software developed by the University of 25 * California, Berkeley and its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 43 * 44 * 45 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46 * All rights reserved. 47 * 48 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 49 * 50 * Permission to use, copy, modify and distribute this software and 51 * its documentation is hereby granted, provided that both the copyright 52 * notice and this permission notice appear in all copies of the 53 * software, derivative works or modified versions, and any portions 54 * thereof, and that both notices appear in supporting documentation. 55 * 56 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 57 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 58 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 59 * 60 * Carnegie Mellon requests users of this software to return to 61 * 62 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 63 * School of Computer Science 64 * Carnegie Mellon University 65 * Pittsburgh PA 15213-3890 66 * 67 * any improvements or extensions that they make and grant Carnegie the 68 * rights to redistribute these changes. 69 */ 70 71 /* 72 * The proverbial page-out daemon. 73 */ 74 75 #include <sys/cdefs.h> 76 __FBSDID("$FreeBSD$"); 77 78 #include "opt_vm.h" 79 #include <sys/param.h> 80 #include <sys/systm.h> 81 #include <sys/kernel.h> 82 #include <sys/eventhandler.h> 83 #include <sys/lock.h> 84 #include <sys/mutex.h> 85 #include <sys/proc.h> 86 #include <sys/kthread.h> 87 #include <sys/ktr.h> 88 #include <sys/mount.h> 89 #include <sys/racct.h> 90 #include <sys/resourcevar.h> 91 #include <sys/sched.h> 92 #include <sys/signalvar.h> 93 #include <sys/vnode.h> 94 #include <sys/vmmeter.h> 95 #include <sys/sx.h> 96 #include <sys/sysctl.h> 97 98 #include <vm/vm.h> 99 #include <vm/vm_param.h> 100 #include <vm/vm_object.h> 101 #include <vm/vm_page.h> 102 #include <vm/vm_map.h> 103 #include <vm/vm_pageout.h> 104 #include <vm/vm_pager.h> 105 #include <vm/swap_pager.h> 106 #include <vm/vm_extern.h> 107 #include <vm/uma.h> 108 109 /* 110 * System initialization 111 */ 112 113 /* the kernel process "vm_pageout"*/ 114 static void vm_pageout(void); 115 static int vm_pageout_clean(vm_page_t); 116 static void vm_pageout_scan(int pass); 117 118 struct proc *pageproc; 119 120 static struct kproc_desc page_kp = { 121 "pagedaemon", 122 vm_pageout, 123 &pageproc 124 }; 125 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, 126 &page_kp); 127 128 #if !defined(NO_SWAPPING) 129 /* the kernel process "vm_daemon"*/ 130 static void vm_daemon(void); 131 static struct proc *vmproc; 132 133 static struct kproc_desc vm_kp = { 134 "vmdaemon", 135 vm_daemon, 136 &vmproc 137 }; 138 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp); 139 #endif 140 141 142 int vm_pages_needed; /* Event on which pageout daemon sleeps */ 143 int vm_pageout_deficit; /* Estimated number of pages deficit */ 144 int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */ 145 146 #if !defined(NO_SWAPPING) 147 static int vm_pageout_req_swapout; /* XXX */ 148 static int vm_daemon_needed; 149 static struct mtx vm_daemon_mtx; 150 /* Allow for use by vm_pageout before vm_daemon is initialized. */ 151 MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF); 152 #endif 153 static int vm_max_launder = 32; 154 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 155 static int vm_pageout_full_stats_interval = 0; 156 static int vm_pageout_algorithm=0; 157 static int defer_swap_pageouts=0; 158 static int disable_swap_pageouts=0; 159 160 #if defined(NO_SWAPPING) 161 static int vm_swap_enabled=0; 162 static int vm_swap_idle_enabled=0; 163 #else 164 static int vm_swap_enabled=1; 165 static int vm_swap_idle_enabled=0; 166 #endif 167 168 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 169 CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt"); 170 171 SYSCTL_INT(_vm, OID_AUTO, max_launder, 172 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 173 174 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 175 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 176 177 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 178 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 179 180 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 181 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 182 183 #if defined(NO_SWAPPING) 184 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 185 CTLFLAG_RD, &vm_swap_enabled, 0, "Enable entire process swapout"); 186 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 187 CTLFLAG_RD, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 188 #else 189 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 190 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 191 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 192 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 193 #endif 194 195 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 196 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 197 198 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 199 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 200 201 static int pageout_lock_miss; 202 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 203 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 204 205 #define VM_PAGEOUT_PAGE_COUNT 16 206 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 207 208 int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 209 SYSCTL_INT(_vm, OID_AUTO, max_wired, 210 CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count"); 211 212 #if !defined(NO_SWAPPING) 213 static void vm_pageout_map_deactivate_pages(vm_map_t, long); 214 static void vm_pageout_object_deactivate_pages(pmap_t, vm_object_t, long); 215 static void vm_req_vmdaemon(int req); 216 #endif 217 static void vm_pageout_page_stats(void); 218 219 static void 220 vm_pageout_init_marker(vm_page_t marker, u_short queue) 221 { 222 223 bzero(marker, sizeof(*marker)); 224 marker->flags = PG_FICTITIOUS | PG_MARKER; 225 marker->oflags = VPO_BUSY; 226 marker->queue = queue; 227 marker->wire_count = 1; 228 } 229 230 /* 231 * vm_pageout_fallback_object_lock: 232 * 233 * Lock vm object currently associated with `m'. VM_OBJECT_TRYLOCK is 234 * known to have failed and page queue must be either PQ_ACTIVE or 235 * PQ_INACTIVE. To avoid lock order violation, unlock the page queues 236 * while locking the vm object. Use marker page to detect page queue 237 * changes and maintain notion of next page on page queue. Return 238 * TRUE if no changes were detected, FALSE otherwise. vm object is 239 * locked on return. 240 * 241 * This function depends on both the lock portion of struct vm_object 242 * and normal struct vm_page being type stable. 243 */ 244 boolean_t 245 vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next) 246 { 247 struct vm_page marker; 248 boolean_t unchanged; 249 u_short queue; 250 vm_object_t object; 251 252 queue = m->queue; 253 vm_pageout_init_marker(&marker, queue); 254 object = m->object; 255 256 TAILQ_INSERT_AFTER(&vm_page_queues[queue].pl, 257 m, &marker, pageq); 258 vm_page_unlock_queues(); 259 vm_page_unlock(m); 260 VM_OBJECT_LOCK(object); 261 vm_page_lock(m); 262 vm_page_lock_queues(); 263 264 /* Page queue might have changed. */ 265 *next = TAILQ_NEXT(&marker, pageq); 266 unchanged = (m->queue == queue && 267 m->object == object && 268 &marker == TAILQ_NEXT(m, pageq)); 269 TAILQ_REMOVE(&vm_page_queues[queue].pl, 270 &marker, pageq); 271 return (unchanged); 272 } 273 274 /* 275 * Lock the page while holding the page queue lock. Use marker page 276 * to detect page queue changes and maintain notion of next page on 277 * page queue. Return TRUE if no changes were detected, FALSE 278 * otherwise. The page is locked on return. The page queue lock might 279 * be dropped and reacquired. 280 * 281 * This function depends on normal struct vm_page being type stable. 282 */ 283 boolean_t 284 vm_pageout_page_lock(vm_page_t m, vm_page_t *next) 285 { 286 struct vm_page marker; 287 boolean_t unchanged; 288 u_short queue; 289 290 vm_page_lock_assert(m, MA_NOTOWNED); 291 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 292 293 if (vm_page_trylock(m)) 294 return (TRUE); 295 296 queue = m->queue; 297 vm_pageout_init_marker(&marker, queue); 298 299 TAILQ_INSERT_AFTER(&vm_page_queues[queue].pl, m, &marker, pageq); 300 vm_page_unlock_queues(); 301 vm_page_lock(m); 302 vm_page_lock_queues(); 303 304 /* Page queue might have changed. */ 305 *next = TAILQ_NEXT(&marker, pageq); 306 unchanged = (m->queue == queue && &marker == TAILQ_NEXT(m, pageq)); 307 TAILQ_REMOVE(&vm_page_queues[queue].pl, &marker, pageq); 308 return (unchanged); 309 } 310 311 /* 312 * vm_pageout_clean: 313 * 314 * Clean the page and remove it from the laundry. 315 * 316 * We set the busy bit to cause potential page faults on this page to 317 * block. Note the careful timing, however, the busy bit isn't set till 318 * late and we cannot do anything that will mess with the page. 319 */ 320 static int 321 vm_pageout_clean(vm_page_t m) 322 { 323 vm_object_t object; 324 vm_page_t mc[2*vm_pageout_page_count], pb, ps; 325 int pageout_count; 326 int ib, is, page_base; 327 vm_pindex_t pindex = m->pindex; 328 329 vm_page_lock_assert(m, MA_OWNED); 330 object = m->object; 331 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 332 333 /* 334 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 335 * with the new swapper, but we could have serious problems paging 336 * out other object types if there is insufficient memory. 337 * 338 * Unfortunately, checking free memory here is far too late, so the 339 * check has been moved up a procedural level. 340 */ 341 342 /* 343 * Can't clean the page if it's busy or held. 344 */ 345 KASSERT(m->busy == 0 && (m->oflags & VPO_BUSY) == 0, 346 ("vm_pageout_clean: page %p is busy", m)); 347 KASSERT(m->hold_count == 0, ("vm_pageout_clean: page %p is held", m)); 348 vm_page_unlock(m); 349 350 mc[vm_pageout_page_count] = pb = ps = m; 351 pageout_count = 1; 352 page_base = vm_pageout_page_count; 353 ib = 1; 354 is = 1; 355 356 /* 357 * Scan object for clusterable pages. 358 * 359 * We can cluster ONLY if: ->> the page is NOT 360 * clean, wired, busy, held, or mapped into a 361 * buffer, and one of the following: 362 * 1) The page is inactive, or a seldom used 363 * active page. 364 * -or- 365 * 2) we force the issue. 366 * 367 * During heavy mmap/modification loads the pageout 368 * daemon can really fragment the underlying file 369 * due to flushing pages out of order and not trying 370 * align the clusters (which leave sporatic out-of-order 371 * holes). To solve this problem we do the reverse scan 372 * first and attempt to align our cluster, then do a 373 * forward scan if room remains. 374 */ 375 more: 376 while (ib && pageout_count < vm_pageout_page_count) { 377 vm_page_t p; 378 379 if (ib > pindex) { 380 ib = 0; 381 break; 382 } 383 384 if ((p = vm_page_prev(pb)) == NULL || 385 (p->oflags & VPO_BUSY) != 0 || p->busy != 0) { 386 ib = 0; 387 break; 388 } 389 vm_page_lock(p); 390 vm_page_test_dirty(p); 391 if (p->dirty == 0 || 392 p->queue != PQ_INACTIVE || 393 p->hold_count != 0) { /* may be undergoing I/O */ 394 vm_page_unlock(p); 395 ib = 0; 396 break; 397 } 398 vm_page_unlock(p); 399 mc[--page_base] = pb = p; 400 ++pageout_count; 401 ++ib; 402 /* 403 * alignment boundry, stop here and switch directions. Do 404 * not clear ib. 405 */ 406 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 407 break; 408 } 409 410 while (pageout_count < vm_pageout_page_count && 411 pindex + is < object->size) { 412 vm_page_t p; 413 414 if ((p = vm_page_next(ps)) == NULL || 415 (p->oflags & VPO_BUSY) != 0 || p->busy != 0) 416 break; 417 vm_page_lock(p); 418 vm_page_test_dirty(p); 419 if (p->dirty == 0 || 420 p->queue != PQ_INACTIVE || 421 p->hold_count != 0) { /* may be undergoing I/O */ 422 vm_page_unlock(p); 423 break; 424 } 425 vm_page_unlock(p); 426 mc[page_base + pageout_count] = ps = p; 427 ++pageout_count; 428 ++is; 429 } 430 431 /* 432 * If we exhausted our forward scan, continue with the reverse scan 433 * when possible, even past a page boundry. This catches boundry 434 * conditions. 435 */ 436 if (ib && pageout_count < vm_pageout_page_count) 437 goto more; 438 439 /* 440 * we allow reads during pageouts... 441 */ 442 return (vm_pageout_flush(&mc[page_base], pageout_count, 0, 0, NULL)); 443 } 444 445 /* 446 * vm_pageout_flush() - launder the given pages 447 * 448 * The given pages are laundered. Note that we setup for the start of 449 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 450 * reference count all in here rather then in the parent. If we want 451 * the parent to do more sophisticated things we may have to change 452 * the ordering. 453 * 454 * Returned runlen is the count of pages between mreq and first 455 * page after mreq with status VM_PAGER_AGAIN. 456 */ 457 int 458 vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen) 459 { 460 vm_object_t object = mc[0]->object; 461 int pageout_status[count]; 462 int numpagedout = 0; 463 int i, runlen; 464 465 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 466 mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED); 467 468 /* 469 * Initiate I/O. Bump the vm_page_t->busy counter and 470 * mark the pages read-only. 471 * 472 * We do not have to fixup the clean/dirty bits here... we can 473 * allow the pager to do it after the I/O completes. 474 * 475 * NOTE! mc[i]->dirty may be partial or fragmented due to an 476 * edge case with file fragments. 477 */ 478 for (i = 0; i < count; i++) { 479 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, 480 ("vm_pageout_flush: partially invalid page %p index %d/%d", 481 mc[i], i, count)); 482 vm_page_io_start(mc[i]); 483 pmap_remove_write(mc[i]); 484 } 485 vm_object_pip_add(object, count); 486 487 vm_pager_put_pages(object, mc, count, flags, pageout_status); 488 489 runlen = count - mreq; 490 for (i = 0; i < count; i++) { 491 vm_page_t mt = mc[i]; 492 493 KASSERT(pageout_status[i] == VM_PAGER_PEND || 494 (mt->flags & PG_WRITEABLE) == 0, 495 ("vm_pageout_flush: page %p is not write protected", mt)); 496 switch (pageout_status[i]) { 497 case VM_PAGER_OK: 498 case VM_PAGER_PEND: 499 numpagedout++; 500 break; 501 case VM_PAGER_BAD: 502 /* 503 * Page outside of range of object. Right now we 504 * essentially lose the changes by pretending it 505 * worked. 506 */ 507 vm_page_undirty(mt); 508 break; 509 case VM_PAGER_ERROR: 510 case VM_PAGER_FAIL: 511 /* 512 * If page couldn't be paged out, then reactivate the 513 * page so it doesn't clog the inactive list. (We 514 * will try paging out it again later). 515 */ 516 vm_page_lock(mt); 517 vm_page_activate(mt); 518 vm_page_unlock(mt); 519 break; 520 case VM_PAGER_AGAIN: 521 if (i >= mreq && i - mreq < runlen) 522 runlen = i - mreq; 523 break; 524 } 525 526 /* 527 * If the operation is still going, leave the page busy to 528 * block all other accesses. Also, leave the paging in 529 * progress indicator set so that we don't attempt an object 530 * collapse. 531 */ 532 if (pageout_status[i] != VM_PAGER_PEND) { 533 vm_object_pip_wakeup(object); 534 vm_page_io_finish(mt); 535 if (vm_page_count_severe()) { 536 vm_page_lock(mt); 537 vm_page_try_to_cache(mt); 538 vm_page_unlock(mt); 539 } 540 } 541 } 542 if (prunlen != NULL) 543 *prunlen = runlen; 544 return (numpagedout); 545 } 546 547 #if !defined(NO_SWAPPING) 548 /* 549 * vm_pageout_object_deactivate_pages 550 * 551 * Deactivate enough pages to satisfy the inactive target 552 * requirements. 553 * 554 * The object and map must be locked. 555 */ 556 static void 557 vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object, 558 long desired) 559 { 560 vm_object_t backing_object, object; 561 vm_page_t p; 562 int actcount, remove_mode; 563 564 VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED); 565 if (first_object->type == OBJT_DEVICE || 566 first_object->type == OBJT_SG) 567 return; 568 for (object = first_object;; object = backing_object) { 569 if (pmap_resident_count(pmap) <= desired) 570 goto unlock_return; 571 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 572 if (object->type == OBJT_PHYS || object->paging_in_progress) 573 goto unlock_return; 574 575 remove_mode = 0; 576 if (object->shadow_count > 1) 577 remove_mode = 1; 578 /* 579 * Scan the object's entire memory queue. 580 */ 581 TAILQ_FOREACH(p, &object->memq, listq) { 582 if (pmap_resident_count(pmap) <= desired) 583 goto unlock_return; 584 if ((p->oflags & VPO_BUSY) != 0 || p->busy != 0) 585 continue; 586 PCPU_INC(cnt.v_pdpages); 587 vm_page_lock(p); 588 if (p->wire_count != 0 || p->hold_count != 0 || 589 !pmap_page_exists_quick(pmap, p)) { 590 vm_page_unlock(p); 591 continue; 592 } 593 actcount = pmap_ts_referenced(p); 594 if ((p->flags & PG_REFERENCED) != 0) { 595 if (actcount == 0) 596 actcount = 1; 597 vm_page_lock_queues(); 598 vm_page_flag_clear(p, PG_REFERENCED); 599 vm_page_unlock_queues(); 600 } 601 if (p->queue != PQ_ACTIVE && actcount != 0) { 602 vm_page_activate(p); 603 p->act_count += actcount; 604 } else if (p->queue == PQ_ACTIVE) { 605 if (actcount == 0) { 606 p->act_count -= min(p->act_count, 607 ACT_DECLINE); 608 if (!remove_mode && 609 (vm_pageout_algorithm || 610 p->act_count == 0)) { 611 pmap_remove_all(p); 612 vm_page_deactivate(p); 613 } else { 614 vm_page_lock_queues(); 615 vm_page_requeue(p); 616 vm_page_unlock_queues(); 617 } 618 } else { 619 vm_page_activate(p); 620 if (p->act_count < ACT_MAX - 621 ACT_ADVANCE) 622 p->act_count += ACT_ADVANCE; 623 vm_page_lock_queues(); 624 vm_page_requeue(p); 625 vm_page_unlock_queues(); 626 } 627 } else if (p->queue == PQ_INACTIVE) 628 pmap_remove_all(p); 629 vm_page_unlock(p); 630 } 631 if ((backing_object = object->backing_object) == NULL) 632 goto unlock_return; 633 VM_OBJECT_LOCK(backing_object); 634 if (object != first_object) 635 VM_OBJECT_UNLOCK(object); 636 } 637 unlock_return: 638 if (object != first_object) 639 VM_OBJECT_UNLOCK(object); 640 } 641 642 /* 643 * deactivate some number of pages in a map, try to do it fairly, but 644 * that is really hard to do. 645 */ 646 static void 647 vm_pageout_map_deactivate_pages(map, desired) 648 vm_map_t map; 649 long desired; 650 { 651 vm_map_entry_t tmpe; 652 vm_object_t obj, bigobj; 653 int nothingwired; 654 655 if (!vm_map_trylock(map)) 656 return; 657 658 bigobj = NULL; 659 nothingwired = TRUE; 660 661 /* 662 * first, search out the biggest object, and try to free pages from 663 * that. 664 */ 665 tmpe = map->header.next; 666 while (tmpe != &map->header) { 667 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 668 obj = tmpe->object.vm_object; 669 if (obj != NULL && VM_OBJECT_TRYLOCK(obj)) { 670 if (obj->shadow_count <= 1 && 671 (bigobj == NULL || 672 bigobj->resident_page_count < obj->resident_page_count)) { 673 if (bigobj != NULL) 674 VM_OBJECT_UNLOCK(bigobj); 675 bigobj = obj; 676 } else 677 VM_OBJECT_UNLOCK(obj); 678 } 679 } 680 if (tmpe->wired_count > 0) 681 nothingwired = FALSE; 682 tmpe = tmpe->next; 683 } 684 685 if (bigobj != NULL) { 686 vm_pageout_object_deactivate_pages(map->pmap, bigobj, desired); 687 VM_OBJECT_UNLOCK(bigobj); 688 } 689 /* 690 * Next, hunt around for other pages to deactivate. We actually 691 * do this search sort of wrong -- .text first is not the best idea. 692 */ 693 tmpe = map->header.next; 694 while (tmpe != &map->header) { 695 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 696 break; 697 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 698 obj = tmpe->object.vm_object; 699 if (obj != NULL) { 700 VM_OBJECT_LOCK(obj); 701 vm_pageout_object_deactivate_pages(map->pmap, obj, desired); 702 VM_OBJECT_UNLOCK(obj); 703 } 704 } 705 tmpe = tmpe->next; 706 } 707 708 /* 709 * Remove all mappings if a process is swapped out, this will free page 710 * table pages. 711 */ 712 if (desired == 0 && nothingwired) { 713 tmpe = map->header.next; 714 while (tmpe != &map->header) { 715 pmap_remove(vm_map_pmap(map), tmpe->start, tmpe->end); 716 tmpe = tmpe->next; 717 } 718 } 719 vm_map_unlock(map); 720 } 721 #endif /* !defined(NO_SWAPPING) */ 722 723 /* 724 * vm_pageout_scan does the dirty work for the pageout daemon. 725 */ 726 static void 727 vm_pageout_scan(int pass) 728 { 729 vm_page_t m, next; 730 struct vm_page marker; 731 int page_shortage, maxscan, pcount; 732 int addl_page_shortage, addl_page_shortage_init; 733 vm_object_t object; 734 int actcount; 735 int vnodes_skipped = 0; 736 int maxlaunder; 737 738 /* 739 * Decrease registered cache sizes. 740 */ 741 EVENTHANDLER_INVOKE(vm_lowmem, 0); 742 /* 743 * We do this explicitly after the caches have been drained above. 744 */ 745 uma_reclaim(); 746 747 addl_page_shortage_init = atomic_readandclear_int(&vm_pageout_deficit); 748 749 /* 750 * Calculate the number of pages we want to either free or move 751 * to the cache. 752 */ 753 page_shortage = vm_paging_target() + addl_page_shortage_init; 754 755 vm_pageout_init_marker(&marker, PQ_INACTIVE); 756 757 /* 758 * Start scanning the inactive queue for pages we can move to the 759 * cache or free. The scan will stop when the target is reached or 760 * we have scanned the entire inactive queue. Note that m->act_count 761 * is not used to form decisions for the inactive queue, only for the 762 * active queue. 763 * 764 * maxlaunder limits the number of dirty pages we flush per scan. 765 * For most systems a smaller value (16 or 32) is more robust under 766 * extreme memory and disk pressure because any unnecessary writes 767 * to disk can result in extreme performance degredation. However, 768 * systems with excessive dirty pages (especially when MAP_NOSYNC is 769 * used) will die horribly with limited laundering. If the pageout 770 * daemon cannot clean enough pages in the first pass, we let it go 771 * all out in succeeding passes. 772 */ 773 if ((maxlaunder = vm_max_launder) <= 1) 774 maxlaunder = 1; 775 if (pass) 776 maxlaunder = 10000; 777 vm_page_lock_queues(); 778 rescan0: 779 addl_page_shortage = addl_page_shortage_init; 780 maxscan = cnt.v_inactive_count; 781 782 for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); 783 m != NULL && maxscan-- > 0 && page_shortage > 0; 784 m = next) { 785 786 cnt.v_pdpages++; 787 788 if (m->queue != PQ_INACTIVE) 789 goto rescan0; 790 791 next = TAILQ_NEXT(m, pageq); 792 793 /* 794 * skip marker pages 795 */ 796 if (m->flags & PG_MARKER) 797 continue; 798 799 /* 800 * Lock the page. 801 */ 802 if (!vm_pageout_page_lock(m, &next)) { 803 vm_page_unlock(m); 804 addl_page_shortage++; 805 continue; 806 } 807 808 /* 809 * A held page may be undergoing I/O, so skip it. 810 */ 811 if (m->hold_count) { 812 vm_page_unlock(m); 813 vm_page_requeue(m); 814 addl_page_shortage++; 815 continue; 816 } 817 818 /* 819 * Don't mess with busy pages, keep in the front of the 820 * queue, most likely are being paged out. 821 */ 822 object = m->object; 823 if (!VM_OBJECT_TRYLOCK(object) && 824 (!vm_pageout_fallback_object_lock(m, &next) || 825 m->hold_count != 0)) { 826 VM_OBJECT_UNLOCK(object); 827 vm_page_unlock(m); 828 addl_page_shortage++; 829 continue; 830 } 831 if (m->busy || (m->oflags & VPO_BUSY)) { 832 vm_page_unlock(m); 833 VM_OBJECT_UNLOCK(object); 834 addl_page_shortage++; 835 continue; 836 } 837 838 /* 839 * If the object is not being used, we ignore previous 840 * references. 841 */ 842 if (object->ref_count == 0) { 843 vm_page_flag_clear(m, PG_REFERENCED); 844 KASSERT(!pmap_page_is_mapped(m), 845 ("vm_pageout_scan: page %p is mapped", m)); 846 847 /* 848 * Otherwise, if the page has been referenced while in the 849 * inactive queue, we bump the "activation count" upwards, 850 * making it less likely that the page will be added back to 851 * the inactive queue prematurely again. Here we check the 852 * page tables (or emulated bits, if any), given the upper 853 * level VM system not knowing anything about existing 854 * references. 855 */ 856 } else if (((m->flags & PG_REFERENCED) == 0) && 857 (actcount = pmap_ts_referenced(m))) { 858 vm_page_activate(m); 859 vm_page_unlock(m); 860 m->act_count += actcount + ACT_ADVANCE; 861 VM_OBJECT_UNLOCK(object); 862 continue; 863 } 864 865 /* 866 * If the upper level VM system knows about any page 867 * references, we activate the page. We also set the 868 * "activation count" higher than normal so that we will less 869 * likely place pages back onto the inactive queue again. 870 */ 871 if ((m->flags & PG_REFERENCED) != 0) { 872 vm_page_flag_clear(m, PG_REFERENCED); 873 actcount = pmap_ts_referenced(m); 874 vm_page_activate(m); 875 vm_page_unlock(m); 876 m->act_count += actcount + ACT_ADVANCE + 1; 877 VM_OBJECT_UNLOCK(object); 878 continue; 879 } 880 881 /* 882 * If the upper level VM system does not believe that the page 883 * is fully dirty, but it is mapped for write access, then we 884 * consult the pmap to see if the page's dirty status should 885 * be updated. 886 */ 887 if (m->dirty != VM_PAGE_BITS_ALL && 888 (m->flags & PG_WRITEABLE) != 0) { 889 /* 890 * Avoid a race condition: Unless write access is 891 * removed from the page, another processor could 892 * modify it before all access is removed by the call 893 * to vm_page_cache() below. If vm_page_cache() finds 894 * that the page has been modified when it removes all 895 * access, it panics because it cannot cache dirty 896 * pages. In principle, we could eliminate just write 897 * access here rather than all access. In the expected 898 * case, when there are no last instant modifications 899 * to the page, removing all access will be cheaper 900 * overall. 901 */ 902 if (pmap_is_modified(m)) 903 vm_page_dirty(m); 904 else if (m->dirty == 0) 905 pmap_remove_all(m); 906 } 907 908 if (m->valid == 0) { 909 /* 910 * Invalid pages can be easily freed 911 */ 912 vm_page_free(m); 913 cnt.v_dfree++; 914 --page_shortage; 915 } else if (m->dirty == 0) { 916 /* 917 * Clean pages can be placed onto the cache queue. 918 * This effectively frees them. 919 */ 920 vm_page_cache(m); 921 --page_shortage; 922 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) { 923 /* 924 * Dirty pages need to be paged out, but flushing 925 * a page is extremely expensive verses freeing 926 * a clean page. Rather then artificially limiting 927 * the number of pages we can flush, we instead give 928 * dirty pages extra priority on the inactive queue 929 * by forcing them to be cycled through the queue 930 * twice before being flushed, after which the 931 * (now clean) page will cycle through once more 932 * before being freed. This significantly extends 933 * the thrash point for a heavily loaded machine. 934 */ 935 vm_page_flag_set(m, PG_WINATCFLS); 936 vm_page_requeue(m); 937 } else if (maxlaunder > 0) { 938 /* 939 * We always want to try to flush some dirty pages if 940 * we encounter them, to keep the system stable. 941 * Normally this number is small, but under extreme 942 * pressure where there are insufficient clean pages 943 * on the inactive queue, we may have to go all out. 944 */ 945 int swap_pageouts_ok, vfslocked = 0; 946 struct vnode *vp = NULL; 947 struct mount *mp = NULL; 948 949 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 950 swap_pageouts_ok = 1; 951 } else { 952 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 953 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 954 vm_page_count_min()); 955 956 } 957 958 /* 959 * We don't bother paging objects that are "dead". 960 * Those objects are in a "rundown" state. 961 */ 962 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 963 vm_page_unlock(m); 964 VM_OBJECT_UNLOCK(object); 965 vm_page_requeue(m); 966 continue; 967 } 968 969 /* 970 * Following operations may unlock 971 * vm_page_queue_mtx, invalidating the 'next' 972 * pointer. To prevent an inordinate number 973 * of restarts we use our marker to remember 974 * our place. 975 * 976 */ 977 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, 978 m, &marker, pageq); 979 /* 980 * The object is already known NOT to be dead. It 981 * is possible for the vget() to block the whole 982 * pageout daemon, but the new low-memory handling 983 * code should prevent it. 984 * 985 * The previous code skipped locked vnodes and, worse, 986 * reordered pages in the queue. This results in 987 * completely non-deterministic operation and, on a 988 * busy system, can lead to extremely non-optimal 989 * pageouts. For example, it can cause clean pages 990 * to be freed and dirty pages to be moved to the end 991 * of the queue. Since dirty pages are also moved to 992 * the end of the queue once-cleaned, this gives 993 * way too large a weighting to defering the freeing 994 * of dirty pages. 995 * 996 * We can't wait forever for the vnode lock, we might 997 * deadlock due to a vn_read() getting stuck in 998 * vm_wait while holding this vnode. We skip the 999 * vnode if we can't get it in a reasonable amount 1000 * of time. 1001 */ 1002 if (object->type == OBJT_VNODE) { 1003 vm_page_unlock_queues(); 1004 vm_page_unlock(m); 1005 vp = object->handle; 1006 if (vp->v_type == VREG && 1007 vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1008 mp = NULL; 1009 ++pageout_lock_miss; 1010 if (object->flags & OBJ_MIGHTBEDIRTY) 1011 vnodes_skipped++; 1012 vm_page_lock_queues(); 1013 goto unlock_and_continue; 1014 } 1015 KASSERT(mp != NULL, 1016 ("vp %p with NULL v_mount", vp)); 1017 vm_object_reference_locked(object); 1018 VM_OBJECT_UNLOCK(object); 1019 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 1020 if (vget(vp, LK_EXCLUSIVE | LK_TIMELOCK, 1021 curthread)) { 1022 VM_OBJECT_LOCK(object); 1023 vm_page_lock_queues(); 1024 ++pageout_lock_miss; 1025 if (object->flags & OBJ_MIGHTBEDIRTY) 1026 vnodes_skipped++; 1027 vp = NULL; 1028 goto unlock_and_continue; 1029 } 1030 VM_OBJECT_LOCK(object); 1031 vm_page_lock(m); 1032 vm_page_lock_queues(); 1033 /* 1034 * The page might have been moved to another 1035 * queue during potential blocking in vget() 1036 * above. The page might have been freed and 1037 * reused for another vnode. 1038 */ 1039 if (m->queue != PQ_INACTIVE || 1040 m->object != object || 1041 TAILQ_NEXT(m, pageq) != &marker) { 1042 vm_page_unlock(m); 1043 if (object->flags & OBJ_MIGHTBEDIRTY) 1044 vnodes_skipped++; 1045 goto unlock_and_continue; 1046 } 1047 1048 /* 1049 * The page may have been busied during the 1050 * blocking in vget(). We don't move the 1051 * page back onto the end of the queue so that 1052 * statistics are more correct if we don't. 1053 */ 1054 if (m->busy || (m->oflags & VPO_BUSY)) { 1055 vm_page_unlock(m); 1056 goto unlock_and_continue; 1057 } 1058 1059 /* 1060 * If the page has become held it might 1061 * be undergoing I/O, so skip it 1062 */ 1063 if (m->hold_count) { 1064 vm_page_unlock(m); 1065 vm_page_requeue(m); 1066 if (object->flags & OBJ_MIGHTBEDIRTY) 1067 vnodes_skipped++; 1068 goto unlock_and_continue; 1069 } 1070 } 1071 1072 /* 1073 * If a page is dirty, then it is either being washed 1074 * (but not yet cleaned) or it is still in the 1075 * laundry. If it is still in the laundry, then we 1076 * start the cleaning operation. 1077 * 1078 * decrement page_shortage on success to account for 1079 * the (future) cleaned page. Otherwise we could wind 1080 * up laundering or cleaning too many pages. 1081 */ 1082 vm_page_unlock_queues(); 1083 if (vm_pageout_clean(m) != 0) { 1084 --page_shortage; 1085 --maxlaunder; 1086 } 1087 vm_page_lock_queues(); 1088 unlock_and_continue: 1089 vm_page_lock_assert(m, MA_NOTOWNED); 1090 VM_OBJECT_UNLOCK(object); 1091 if (mp != NULL) { 1092 vm_page_unlock_queues(); 1093 if (vp != NULL) 1094 vput(vp); 1095 VFS_UNLOCK_GIANT(vfslocked); 1096 vm_object_deallocate(object); 1097 vn_finished_write(mp); 1098 vm_page_lock_queues(); 1099 } 1100 next = TAILQ_NEXT(&marker, pageq); 1101 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, 1102 &marker, pageq); 1103 vm_page_lock_assert(m, MA_NOTOWNED); 1104 continue; 1105 } 1106 vm_page_unlock(m); 1107 VM_OBJECT_UNLOCK(object); 1108 } 1109 1110 /* 1111 * Compute the number of pages we want to try to move from the 1112 * active queue to the inactive queue. 1113 */ 1114 page_shortage = vm_paging_target() + 1115 cnt.v_inactive_target - cnt.v_inactive_count; 1116 page_shortage += addl_page_shortage; 1117 1118 /* 1119 * Scan the active queue for things we can deactivate. We nominally 1120 * track the per-page activity counter and use it to locate 1121 * deactivation candidates. 1122 */ 1123 pcount = cnt.v_active_count; 1124 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1125 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1126 1127 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 1128 1129 KASSERT(m->queue == PQ_ACTIVE, 1130 ("vm_pageout_scan: page %p isn't active", m)); 1131 1132 next = TAILQ_NEXT(m, pageq); 1133 if ((m->flags & PG_MARKER) != 0) { 1134 m = next; 1135 continue; 1136 } 1137 if (!vm_pageout_page_lock(m, &next)) { 1138 vm_page_unlock(m); 1139 m = next; 1140 continue; 1141 } 1142 object = m->object; 1143 if (!VM_OBJECT_TRYLOCK(object) && 1144 !vm_pageout_fallback_object_lock(m, &next)) { 1145 VM_OBJECT_UNLOCK(object); 1146 vm_page_unlock(m); 1147 m = next; 1148 continue; 1149 } 1150 1151 /* 1152 * Don't deactivate pages that are busy. 1153 */ 1154 if ((m->busy != 0) || 1155 (m->oflags & VPO_BUSY) || 1156 (m->hold_count != 0)) { 1157 vm_page_unlock(m); 1158 VM_OBJECT_UNLOCK(object); 1159 vm_page_requeue(m); 1160 m = next; 1161 continue; 1162 } 1163 1164 /* 1165 * The count for pagedaemon pages is done after checking the 1166 * page for eligibility... 1167 */ 1168 cnt.v_pdpages++; 1169 1170 /* 1171 * Check to see "how much" the page has been used. 1172 */ 1173 actcount = 0; 1174 if (object->ref_count != 0) { 1175 if (m->flags & PG_REFERENCED) { 1176 actcount += 1; 1177 } 1178 actcount += pmap_ts_referenced(m); 1179 if (actcount) { 1180 m->act_count += ACT_ADVANCE + actcount; 1181 if (m->act_count > ACT_MAX) 1182 m->act_count = ACT_MAX; 1183 } 1184 } 1185 1186 /* 1187 * Since we have "tested" this bit, we need to clear it now. 1188 */ 1189 vm_page_flag_clear(m, PG_REFERENCED); 1190 1191 /* 1192 * Only if an object is currently being used, do we use the 1193 * page activation count stats. 1194 */ 1195 if (actcount && (object->ref_count != 0)) { 1196 vm_page_requeue(m); 1197 } else { 1198 m->act_count -= min(m->act_count, ACT_DECLINE); 1199 if (vm_pageout_algorithm || 1200 object->ref_count == 0 || 1201 m->act_count == 0) { 1202 page_shortage--; 1203 if (object->ref_count == 0) { 1204 KASSERT(!pmap_page_is_mapped(m), 1205 ("vm_pageout_scan: page %p is mapped", m)); 1206 if (m->dirty == 0) 1207 vm_page_cache(m); 1208 else 1209 vm_page_deactivate(m); 1210 } else { 1211 vm_page_deactivate(m); 1212 } 1213 } else { 1214 vm_page_requeue(m); 1215 } 1216 } 1217 vm_page_unlock(m); 1218 VM_OBJECT_UNLOCK(object); 1219 m = next; 1220 } 1221 vm_page_unlock_queues(); 1222 #if !defined(NO_SWAPPING) 1223 /* 1224 * Idle process swapout -- run once per second. 1225 */ 1226 if (vm_swap_idle_enabled) { 1227 static long lsec; 1228 if (time_second != lsec) { 1229 vm_req_vmdaemon(VM_SWAP_IDLE); 1230 lsec = time_second; 1231 } 1232 } 1233 #endif 1234 1235 /* 1236 * If we didn't get enough free pages, and we have skipped a vnode 1237 * in a writeable object, wakeup the sync daemon. And kick swapout 1238 * if we did not get enough free pages. 1239 */ 1240 if (vm_paging_target() > 0) { 1241 if (vnodes_skipped && vm_page_count_min()) 1242 (void) speedup_syncer(); 1243 #if !defined(NO_SWAPPING) 1244 if (vm_swap_enabled && vm_page_count_target()) 1245 vm_req_vmdaemon(VM_SWAP_NORMAL); 1246 #endif 1247 } 1248 1249 /* 1250 * If we are critically low on one of RAM or swap and low on 1251 * the other, kill the largest process. However, we avoid 1252 * doing this on the first pass in order to give ourselves a 1253 * chance to flush out dirty vnode-backed pages and to allow 1254 * active pages to be moved to the inactive queue and reclaimed. 1255 */ 1256 if (pass != 0 && 1257 ((swap_pager_avail < 64 && vm_page_count_min()) || 1258 (swap_pager_full && vm_paging_target() > 0))) 1259 vm_pageout_oom(VM_OOM_MEM); 1260 } 1261 1262 1263 void 1264 vm_pageout_oom(int shortage) 1265 { 1266 struct proc *p, *bigproc; 1267 vm_offset_t size, bigsize; 1268 struct thread *td; 1269 struct vmspace *vm; 1270 1271 /* 1272 * We keep the process bigproc locked once we find it to keep anyone 1273 * from messing with it; however, there is a possibility of 1274 * deadlock if process B is bigproc and one of it's child processes 1275 * attempts to propagate a signal to B while we are waiting for A's 1276 * lock while walking this list. To avoid this, we don't block on 1277 * the process lock but just skip a process if it is already locked. 1278 */ 1279 bigproc = NULL; 1280 bigsize = 0; 1281 sx_slock(&allproc_lock); 1282 FOREACH_PROC_IN_SYSTEM(p) { 1283 int breakout; 1284 1285 if (PROC_TRYLOCK(p) == 0) 1286 continue; 1287 /* 1288 * If this is a system, protected or killed process, skip it. 1289 */ 1290 if (p->p_state != PRS_NORMAL || 1291 (p->p_flag & (P_INEXEC | P_PROTECTED | P_SYSTEM)) || 1292 (p->p_pid == 1) || P_KILLED(p) || 1293 ((p->p_pid < 48) && (swap_pager_avail != 0))) { 1294 PROC_UNLOCK(p); 1295 continue; 1296 } 1297 /* 1298 * If the process is in a non-running type state, 1299 * don't touch it. Check all the threads individually. 1300 */ 1301 breakout = 0; 1302 FOREACH_THREAD_IN_PROC(p, td) { 1303 thread_lock(td); 1304 if (!TD_ON_RUNQ(td) && 1305 !TD_IS_RUNNING(td) && 1306 !TD_IS_SLEEPING(td) && 1307 !TD_IS_SUSPENDED(td)) { 1308 thread_unlock(td); 1309 breakout = 1; 1310 break; 1311 } 1312 thread_unlock(td); 1313 } 1314 if (breakout) { 1315 PROC_UNLOCK(p); 1316 continue; 1317 } 1318 /* 1319 * get the process size 1320 */ 1321 vm = vmspace_acquire_ref(p); 1322 if (vm == NULL) { 1323 PROC_UNLOCK(p); 1324 continue; 1325 } 1326 if (!vm_map_trylock_read(&vm->vm_map)) { 1327 vmspace_free(vm); 1328 PROC_UNLOCK(p); 1329 continue; 1330 } 1331 size = vmspace_swap_count(vm); 1332 vm_map_unlock_read(&vm->vm_map); 1333 if (shortage == VM_OOM_MEM) 1334 size += vmspace_resident_count(vm); 1335 vmspace_free(vm); 1336 /* 1337 * if the this process is bigger than the biggest one 1338 * remember it. 1339 */ 1340 if (size > bigsize) { 1341 if (bigproc != NULL) 1342 PROC_UNLOCK(bigproc); 1343 bigproc = p; 1344 bigsize = size; 1345 } else 1346 PROC_UNLOCK(p); 1347 } 1348 sx_sunlock(&allproc_lock); 1349 if (bigproc != NULL) { 1350 killproc(bigproc, "out of swap space"); 1351 sched_nice(bigproc, PRIO_MIN); 1352 PROC_UNLOCK(bigproc); 1353 wakeup(&cnt.v_free_count); 1354 } 1355 } 1356 1357 /* 1358 * This routine tries to maintain the pseudo LRU active queue, 1359 * so that during long periods of time where there is no paging, 1360 * that some statistic accumulation still occurs. This code 1361 * helps the situation where paging just starts to occur. 1362 */ 1363 static void 1364 vm_pageout_page_stats() 1365 { 1366 vm_object_t object; 1367 vm_page_t m,next; 1368 int pcount,tpcount; /* Number of pages to check */ 1369 static int fullintervalcount = 0; 1370 int page_shortage; 1371 1372 page_shortage = 1373 (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - 1374 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 1375 1376 if (page_shortage <= 0) 1377 return; 1378 1379 vm_page_lock_queues(); 1380 pcount = cnt.v_active_count; 1381 fullintervalcount += vm_pageout_stats_interval; 1382 if (fullintervalcount < vm_pageout_full_stats_interval) { 1383 tpcount = (int64_t)vm_pageout_stats_max * cnt.v_active_count / 1384 cnt.v_page_count; 1385 if (pcount > tpcount) 1386 pcount = tpcount; 1387 } else { 1388 fullintervalcount = 0; 1389 } 1390 1391 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1392 while ((m != NULL) && (pcount-- > 0)) { 1393 int actcount; 1394 1395 KASSERT(m->queue == PQ_ACTIVE, 1396 ("vm_pageout_page_stats: page %p isn't active", m)); 1397 1398 next = TAILQ_NEXT(m, pageq); 1399 if ((m->flags & PG_MARKER) != 0) { 1400 m = next; 1401 continue; 1402 } 1403 vm_page_lock_assert(m, MA_NOTOWNED); 1404 if (!vm_pageout_page_lock(m, &next)) { 1405 vm_page_unlock(m); 1406 m = next; 1407 continue; 1408 } 1409 object = m->object; 1410 if (!VM_OBJECT_TRYLOCK(object) && 1411 !vm_pageout_fallback_object_lock(m, &next)) { 1412 VM_OBJECT_UNLOCK(object); 1413 vm_page_unlock(m); 1414 m = next; 1415 continue; 1416 } 1417 1418 /* 1419 * Don't deactivate pages that are busy. 1420 */ 1421 if ((m->busy != 0) || 1422 (m->oflags & VPO_BUSY) || 1423 (m->hold_count != 0)) { 1424 vm_page_unlock(m); 1425 VM_OBJECT_UNLOCK(object); 1426 vm_page_requeue(m); 1427 m = next; 1428 continue; 1429 } 1430 1431 actcount = 0; 1432 if (m->flags & PG_REFERENCED) { 1433 vm_page_flag_clear(m, PG_REFERENCED); 1434 actcount += 1; 1435 } 1436 1437 actcount += pmap_ts_referenced(m); 1438 if (actcount) { 1439 m->act_count += ACT_ADVANCE + actcount; 1440 if (m->act_count > ACT_MAX) 1441 m->act_count = ACT_MAX; 1442 vm_page_requeue(m); 1443 } else { 1444 if (m->act_count == 0) { 1445 /* 1446 * We turn off page access, so that we have 1447 * more accurate RSS stats. We don't do this 1448 * in the normal page deactivation when the 1449 * system is loaded VM wise, because the 1450 * cost of the large number of page protect 1451 * operations would be higher than the value 1452 * of doing the operation. 1453 */ 1454 pmap_remove_all(m); 1455 vm_page_deactivate(m); 1456 } else { 1457 m->act_count -= min(m->act_count, ACT_DECLINE); 1458 vm_page_requeue(m); 1459 } 1460 } 1461 vm_page_unlock(m); 1462 VM_OBJECT_UNLOCK(object); 1463 m = next; 1464 } 1465 vm_page_unlock_queues(); 1466 } 1467 1468 /* 1469 * vm_pageout is the high level pageout daemon. 1470 */ 1471 static void 1472 vm_pageout() 1473 { 1474 int error, pass; 1475 1476 /* 1477 * Initialize some paging parameters. 1478 */ 1479 cnt.v_interrupt_free_min = 2; 1480 if (cnt.v_page_count < 2000) 1481 vm_pageout_page_count = 8; 1482 1483 /* 1484 * v_free_reserved needs to include enough for the largest 1485 * swap pager structures plus enough for any pv_entry structs 1486 * when paging. 1487 */ 1488 if (cnt.v_page_count > 1024) 1489 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1490 else 1491 cnt.v_free_min = 4; 1492 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1493 cnt.v_interrupt_free_min; 1494 cnt.v_free_reserved = vm_pageout_page_count + 1495 cnt.v_pageout_free_min + (cnt.v_page_count / 768); 1496 cnt.v_free_severe = cnt.v_free_min / 2; 1497 cnt.v_free_min += cnt.v_free_reserved; 1498 cnt.v_free_severe += cnt.v_free_reserved; 1499 1500 /* 1501 * v_free_target and v_cache_min control pageout hysteresis. Note 1502 * that these are more a measure of the VM cache queue hysteresis 1503 * then the VM free queue. Specifically, v_free_target is the 1504 * high water mark (free+cache pages). 1505 * 1506 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the 1507 * low water mark, while v_free_min is the stop. v_cache_min must 1508 * be big enough to handle memory needs while the pageout daemon 1509 * is signalled and run to free more pages. 1510 */ 1511 if (cnt.v_free_count > 6144) 1512 cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved; 1513 else 1514 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; 1515 1516 if (cnt.v_free_count > 2048) { 1517 cnt.v_cache_min = cnt.v_free_target; 1518 cnt.v_cache_max = 2 * cnt.v_cache_min; 1519 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 1520 } else { 1521 cnt.v_cache_min = 0; 1522 cnt.v_cache_max = 0; 1523 cnt.v_inactive_target = cnt.v_free_count / 4; 1524 } 1525 if (cnt.v_inactive_target > cnt.v_free_count / 3) 1526 cnt.v_inactive_target = cnt.v_free_count / 3; 1527 1528 /* XXX does not really belong here */ 1529 if (vm_page_max_wired == 0) 1530 vm_page_max_wired = cnt.v_free_count / 3; 1531 1532 if (vm_pageout_stats_max == 0) 1533 vm_pageout_stats_max = cnt.v_free_target; 1534 1535 /* 1536 * Set interval in seconds for stats scan. 1537 */ 1538 if (vm_pageout_stats_interval == 0) 1539 vm_pageout_stats_interval = 5; 1540 if (vm_pageout_full_stats_interval == 0) 1541 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1542 1543 swap_pager_swap_init(); 1544 pass = 0; 1545 /* 1546 * The pageout daemon is never done, so loop forever. 1547 */ 1548 while (TRUE) { 1549 /* 1550 * If we have enough free memory, wakeup waiters. Do 1551 * not clear vm_pages_needed until we reach our target, 1552 * otherwise we may be woken up over and over again and 1553 * waste a lot of cpu. 1554 */ 1555 mtx_lock(&vm_page_queue_free_mtx); 1556 if (vm_pages_needed && !vm_page_count_min()) { 1557 if (!vm_paging_needed()) 1558 vm_pages_needed = 0; 1559 wakeup(&cnt.v_free_count); 1560 } 1561 if (vm_pages_needed) { 1562 /* 1563 * Still not done, take a second pass without waiting 1564 * (unlimited dirty cleaning), otherwise sleep a bit 1565 * and try again. 1566 */ 1567 ++pass; 1568 if (pass > 1) 1569 msleep(&vm_pages_needed, 1570 &vm_page_queue_free_mtx, PVM, "psleep", 1571 hz / 2); 1572 } else { 1573 /* 1574 * Good enough, sleep & handle stats. Prime the pass 1575 * for the next run. 1576 */ 1577 if (pass > 1) 1578 pass = 1; 1579 else 1580 pass = 0; 1581 error = msleep(&vm_pages_needed, 1582 &vm_page_queue_free_mtx, PVM, "psleep", 1583 vm_pageout_stats_interval * hz); 1584 if (error && !vm_pages_needed) { 1585 mtx_unlock(&vm_page_queue_free_mtx); 1586 pass = 0; 1587 vm_pageout_page_stats(); 1588 continue; 1589 } 1590 } 1591 if (vm_pages_needed) 1592 cnt.v_pdwakeups++; 1593 mtx_unlock(&vm_page_queue_free_mtx); 1594 vm_pageout_scan(pass); 1595 } 1596 } 1597 1598 /* 1599 * Unless the free page queue lock is held by the caller, this function 1600 * should be regarded as advisory. Specifically, the caller should 1601 * not msleep() on &cnt.v_free_count following this function unless 1602 * the free page queue lock is held until the msleep() is performed. 1603 */ 1604 void 1605 pagedaemon_wakeup() 1606 { 1607 1608 if (!vm_pages_needed && curthread->td_proc != pageproc) { 1609 vm_pages_needed = 1; 1610 wakeup(&vm_pages_needed); 1611 } 1612 } 1613 1614 #if !defined(NO_SWAPPING) 1615 static void 1616 vm_req_vmdaemon(int req) 1617 { 1618 static int lastrun = 0; 1619 1620 mtx_lock(&vm_daemon_mtx); 1621 vm_pageout_req_swapout |= req; 1622 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1623 wakeup(&vm_daemon_needed); 1624 lastrun = ticks; 1625 } 1626 mtx_unlock(&vm_daemon_mtx); 1627 } 1628 1629 static void 1630 vm_daemon() 1631 { 1632 struct rlimit rsslim; 1633 struct proc *p; 1634 struct thread *td; 1635 struct vmspace *vm; 1636 int breakout, swapout_flags, tryagain, attempts; 1637 uint64_t rsize, ravailable; 1638 1639 while (TRUE) { 1640 mtx_lock(&vm_daemon_mtx); 1641 #ifdef RACCT 1642 msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep", hz); 1643 #else 1644 msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep", 0); 1645 #endif 1646 swapout_flags = vm_pageout_req_swapout; 1647 vm_pageout_req_swapout = 0; 1648 mtx_unlock(&vm_daemon_mtx); 1649 if (swapout_flags) 1650 swapout_procs(swapout_flags); 1651 1652 /* 1653 * scan the processes for exceeding their rlimits or if 1654 * process is swapped out -- deactivate pages 1655 */ 1656 tryagain = 0; 1657 attempts = 0; 1658 again: 1659 attempts++; 1660 sx_slock(&allproc_lock); 1661 FOREACH_PROC_IN_SYSTEM(p) { 1662 vm_pindex_t limit, size; 1663 1664 /* 1665 * if this is a system process or if we have already 1666 * looked at this process, skip it. 1667 */ 1668 PROC_LOCK(p); 1669 if (p->p_state != PRS_NORMAL || 1670 p->p_flag & (P_INEXEC | P_SYSTEM | P_WEXIT)) { 1671 PROC_UNLOCK(p); 1672 continue; 1673 } 1674 /* 1675 * if the process is in a non-running type state, 1676 * don't touch it. 1677 */ 1678 breakout = 0; 1679 FOREACH_THREAD_IN_PROC(p, td) { 1680 thread_lock(td); 1681 if (!TD_ON_RUNQ(td) && 1682 !TD_IS_RUNNING(td) && 1683 !TD_IS_SLEEPING(td) && 1684 !TD_IS_SUSPENDED(td)) { 1685 thread_unlock(td); 1686 breakout = 1; 1687 break; 1688 } 1689 thread_unlock(td); 1690 } 1691 if (breakout) { 1692 PROC_UNLOCK(p); 1693 continue; 1694 } 1695 /* 1696 * get a limit 1697 */ 1698 lim_rlimit(p, RLIMIT_RSS, &rsslim); 1699 limit = OFF_TO_IDX( 1700 qmin(rsslim.rlim_cur, rsslim.rlim_max)); 1701 1702 /* 1703 * let processes that are swapped out really be 1704 * swapped out set the limit to nothing (will force a 1705 * swap-out.) 1706 */ 1707 if ((p->p_flag & P_INMEM) == 0) 1708 limit = 0; /* XXX */ 1709 vm = vmspace_acquire_ref(p); 1710 PROC_UNLOCK(p); 1711 if (vm == NULL) 1712 continue; 1713 1714 size = vmspace_resident_count(vm); 1715 if (limit >= 0 && size >= limit) { 1716 vm_pageout_map_deactivate_pages( 1717 &vm->vm_map, limit); 1718 } 1719 rsize = IDX_TO_OFF(size); 1720 PROC_LOCK(p); 1721 racct_set(p, RACCT_RSS, rsize); 1722 ravailable = racct_get_available(p, RACCT_RSS); 1723 PROC_UNLOCK(p); 1724 if (rsize > ravailable) { 1725 /* 1726 * Don't be overly aggressive; this might be 1727 * an innocent process, and the limit could've 1728 * been exceeded by some memory hog. Don't 1729 * try to deactivate more than 1/4th of process' 1730 * resident set size. 1731 */ 1732 if (attempts <= 8) { 1733 if (ravailable < rsize - (rsize / 4)) 1734 ravailable = rsize - (rsize / 4); 1735 } 1736 vm_pageout_map_deactivate_pages( 1737 &vm->vm_map, OFF_TO_IDX(ravailable)); 1738 /* Update RSS usage after paging out. */ 1739 size = vmspace_resident_count(vm); 1740 rsize = IDX_TO_OFF(size); 1741 PROC_LOCK(p); 1742 racct_set(p, RACCT_RSS, rsize); 1743 PROC_UNLOCK(p); 1744 if (rsize > ravailable) 1745 tryagain = 1; 1746 } 1747 vmspace_free(vm); 1748 } 1749 sx_sunlock(&allproc_lock); 1750 if (tryagain != 0 && attempts <= 10) 1751 goto again; 1752 } 1753 } 1754 #endif /* !defined(NO_SWAPPING) */ 1755