1 /*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * Copyright (c) 2005 Yahoo! Technologies Norway AS 9 * All rights reserved. 10 * 11 * This code is derived from software contributed to Berkeley by 12 * The Mach Operating System project at Carnegie-Mellon University. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. All advertising materials mentioning features or use of this software 23 * must display the following acknowledgement: 24 * This product includes software developed by the University of 25 * California, Berkeley and its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 43 * 44 * 45 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46 * All rights reserved. 47 * 48 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 49 * 50 * Permission to use, copy, modify and distribute this software and 51 * its documentation is hereby granted, provided that both the copyright 52 * notice and this permission notice appear in all copies of the 53 * software, derivative works or modified versions, and any portions 54 * thereof, and that both notices appear in supporting documentation. 55 * 56 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 57 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 58 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 59 * 60 * Carnegie Mellon requests users of this software to return to 61 * 62 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 63 * School of Computer Science 64 * Carnegie Mellon University 65 * Pittsburgh PA 15213-3890 66 * 67 * any improvements or extensions that they make and grant Carnegie the 68 * rights to redistribute these changes. 69 */ 70 71 /* 72 * The proverbial page-out daemon. 73 */ 74 75 #include <sys/cdefs.h> 76 __FBSDID("$FreeBSD$"); 77 78 #include "opt_vm.h" 79 #include <sys/param.h> 80 #include <sys/systm.h> 81 #include <sys/kernel.h> 82 #include <sys/eventhandler.h> 83 #include <sys/lock.h> 84 #include <sys/mutex.h> 85 #include <sys/proc.h> 86 #include <sys/kthread.h> 87 #include <sys/ktr.h> 88 #include <sys/mount.h> 89 #include <sys/racct.h> 90 #include <sys/resourcevar.h> 91 #include <sys/sched.h> 92 #include <sys/signalvar.h> 93 #include <sys/smp.h> 94 #include <sys/vnode.h> 95 #include <sys/vmmeter.h> 96 #include <sys/rwlock.h> 97 #include <sys/sx.h> 98 #include <sys/sysctl.h> 99 100 #include <vm/vm.h> 101 #include <vm/vm_param.h> 102 #include <vm/vm_object.h> 103 #include <vm/vm_page.h> 104 #include <vm/vm_map.h> 105 #include <vm/vm_pageout.h> 106 #include <vm/vm_pager.h> 107 #include <vm/vm_phys.h> 108 #include <vm/swap_pager.h> 109 #include <vm/vm_extern.h> 110 #include <vm/uma.h> 111 112 /* 113 * System initialization 114 */ 115 116 /* the kernel process "vm_pageout"*/ 117 static void vm_pageout(void); 118 static int vm_pageout_clean(vm_page_t); 119 static void vm_pageout_scan(struct vm_domain *vmd, int pass); 120 static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int pass); 121 122 struct proc *pageproc; 123 124 static struct kproc_desc page_kp = { 125 "pagedaemon", 126 vm_pageout, 127 &pageproc 128 }; 129 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, 130 &page_kp); 131 132 #if !defined(NO_SWAPPING) 133 /* the kernel process "vm_daemon"*/ 134 static void vm_daemon(void); 135 static struct proc *vmproc; 136 137 static struct kproc_desc vm_kp = { 138 "vmdaemon", 139 vm_daemon, 140 &vmproc 141 }; 142 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp); 143 #endif 144 145 146 int vm_pages_needed; /* Event on which pageout daemon sleeps */ 147 int vm_pageout_deficit; /* Estimated number of pages deficit */ 148 int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */ 149 int vm_pageout_wakeup_thresh; 150 151 #if !defined(NO_SWAPPING) 152 static int vm_pageout_req_swapout; /* XXX */ 153 static int vm_daemon_needed; 154 static struct mtx vm_daemon_mtx; 155 /* Allow for use by vm_pageout before vm_daemon is initialized. */ 156 MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF); 157 #endif 158 static int vm_max_launder = 32; 159 static int vm_pageout_update_period; 160 static int defer_swap_pageouts; 161 static int disable_swap_pageouts; 162 static int lowmem_period = 10; 163 static int lowmem_ticks; 164 165 #if defined(NO_SWAPPING) 166 static int vm_swap_enabled = 0; 167 static int vm_swap_idle_enabled = 0; 168 #else 169 static int vm_swap_enabled = 1; 170 static int vm_swap_idle_enabled = 0; 171 #endif 172 173 SYSCTL_INT(_vm, OID_AUTO, pageout_wakeup_thresh, 174 CTLFLAG_RW, &vm_pageout_wakeup_thresh, 0, 175 "free page threshold for waking up the pageout daemon"); 176 177 SYSCTL_INT(_vm, OID_AUTO, max_launder, 178 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 179 180 SYSCTL_INT(_vm, OID_AUTO, pageout_update_period, 181 CTLFLAG_RW, &vm_pageout_update_period, 0, 182 "Maximum active LRU update period"); 183 184 SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RW, &lowmem_period, 0, 185 "Low memory callback period"); 186 187 #if defined(NO_SWAPPING) 188 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 189 CTLFLAG_RD, &vm_swap_enabled, 0, "Enable entire process swapout"); 190 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 191 CTLFLAG_RD, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 192 #else 193 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 194 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 195 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 196 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 197 #endif 198 199 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 200 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 201 202 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 203 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 204 205 static int pageout_lock_miss; 206 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 207 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 208 209 #define VM_PAGEOUT_PAGE_COUNT 16 210 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 211 212 int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 213 SYSCTL_INT(_vm, OID_AUTO, max_wired, 214 CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count"); 215 216 static boolean_t vm_pageout_fallback_object_lock(vm_page_t, vm_page_t *); 217 static boolean_t vm_pageout_launder(struct vm_pagequeue *pq, int, vm_paddr_t, 218 vm_paddr_t); 219 #if !defined(NO_SWAPPING) 220 static void vm_pageout_map_deactivate_pages(vm_map_t, long); 221 static void vm_pageout_object_deactivate_pages(pmap_t, vm_object_t, long); 222 static void vm_req_vmdaemon(int req); 223 #endif 224 static boolean_t vm_pageout_page_lock(vm_page_t, vm_page_t *); 225 226 /* 227 * Initialize a dummy page for marking the caller's place in the specified 228 * paging queue. In principle, this function only needs to set the flag 229 * PG_MARKER. Nonetheless, it wirte busies and initializes the hold count 230 * to one as safety precautions. 231 */ 232 static void 233 vm_pageout_init_marker(vm_page_t marker, u_short queue) 234 { 235 236 bzero(marker, sizeof(*marker)); 237 marker->flags = PG_MARKER; 238 marker->busy_lock = VPB_SINGLE_EXCLUSIVER; 239 marker->queue = queue; 240 marker->hold_count = 1; 241 } 242 243 /* 244 * vm_pageout_fallback_object_lock: 245 * 246 * Lock vm object currently associated with `m'. VM_OBJECT_TRYWLOCK is 247 * known to have failed and page queue must be either PQ_ACTIVE or 248 * PQ_INACTIVE. To avoid lock order violation, unlock the page queues 249 * while locking the vm object. Use marker page to detect page queue 250 * changes and maintain notion of next page on page queue. Return 251 * TRUE if no changes were detected, FALSE otherwise. vm object is 252 * locked on return. 253 * 254 * This function depends on both the lock portion of struct vm_object 255 * and normal struct vm_page being type stable. 256 */ 257 static boolean_t 258 vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next) 259 { 260 struct vm_page marker; 261 struct vm_pagequeue *pq; 262 boolean_t unchanged; 263 u_short queue; 264 vm_object_t object; 265 266 queue = m->queue; 267 vm_pageout_init_marker(&marker, queue); 268 pq = vm_page_pagequeue(m); 269 object = m->object; 270 271 TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q); 272 vm_pagequeue_unlock(pq); 273 vm_page_unlock(m); 274 VM_OBJECT_WLOCK(object); 275 vm_page_lock(m); 276 vm_pagequeue_lock(pq); 277 278 /* Page queue might have changed. */ 279 *next = TAILQ_NEXT(&marker, plinks.q); 280 unchanged = (m->queue == queue && 281 m->object == object && 282 &marker == TAILQ_NEXT(m, plinks.q)); 283 TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q); 284 return (unchanged); 285 } 286 287 /* 288 * Lock the page while holding the page queue lock. Use marker page 289 * to detect page queue changes and maintain notion of next page on 290 * page queue. Return TRUE if no changes were detected, FALSE 291 * otherwise. The page is locked on return. The page queue lock might 292 * be dropped and reacquired. 293 * 294 * This function depends on normal struct vm_page being type stable. 295 */ 296 static boolean_t 297 vm_pageout_page_lock(vm_page_t m, vm_page_t *next) 298 { 299 struct vm_page marker; 300 struct vm_pagequeue *pq; 301 boolean_t unchanged; 302 u_short queue; 303 304 vm_page_lock_assert(m, MA_NOTOWNED); 305 if (vm_page_trylock(m)) 306 return (TRUE); 307 308 queue = m->queue; 309 vm_pageout_init_marker(&marker, queue); 310 pq = vm_page_pagequeue(m); 311 312 TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q); 313 vm_pagequeue_unlock(pq); 314 vm_page_lock(m); 315 vm_pagequeue_lock(pq); 316 317 /* Page queue might have changed. */ 318 *next = TAILQ_NEXT(&marker, plinks.q); 319 unchanged = (m->queue == queue && &marker == TAILQ_NEXT(m, plinks.q)); 320 TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q); 321 return (unchanged); 322 } 323 324 /* 325 * vm_pageout_clean: 326 * 327 * Clean the page and remove it from the laundry. 328 * 329 * We set the busy bit to cause potential page faults on this page to 330 * block. Note the careful timing, however, the busy bit isn't set till 331 * late and we cannot do anything that will mess with the page. 332 */ 333 static int 334 vm_pageout_clean(vm_page_t m) 335 { 336 vm_object_t object; 337 vm_page_t mc[2*vm_pageout_page_count], pb, ps; 338 int pageout_count; 339 int ib, is, page_base; 340 vm_pindex_t pindex = m->pindex; 341 342 vm_page_lock_assert(m, MA_OWNED); 343 object = m->object; 344 VM_OBJECT_ASSERT_WLOCKED(object); 345 346 /* 347 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 348 * with the new swapper, but we could have serious problems paging 349 * out other object types if there is insufficient memory. 350 * 351 * Unfortunately, checking free memory here is far too late, so the 352 * check has been moved up a procedural level. 353 */ 354 355 /* 356 * Can't clean the page if it's busy or held. 357 */ 358 vm_page_assert_unbusied(m); 359 KASSERT(m->hold_count == 0, ("vm_pageout_clean: page %p is held", m)); 360 vm_page_unlock(m); 361 362 mc[vm_pageout_page_count] = pb = ps = m; 363 pageout_count = 1; 364 page_base = vm_pageout_page_count; 365 ib = 1; 366 is = 1; 367 368 /* 369 * Scan object for clusterable pages. 370 * 371 * We can cluster ONLY if: ->> the page is NOT 372 * clean, wired, busy, held, or mapped into a 373 * buffer, and one of the following: 374 * 1) The page is inactive, or a seldom used 375 * active page. 376 * -or- 377 * 2) we force the issue. 378 * 379 * During heavy mmap/modification loads the pageout 380 * daemon can really fragment the underlying file 381 * due to flushing pages out of order and not trying 382 * align the clusters (which leave sporatic out-of-order 383 * holes). To solve this problem we do the reverse scan 384 * first and attempt to align our cluster, then do a 385 * forward scan if room remains. 386 */ 387 more: 388 while (ib && pageout_count < vm_pageout_page_count) { 389 vm_page_t p; 390 391 if (ib > pindex) { 392 ib = 0; 393 break; 394 } 395 396 if ((p = vm_page_prev(pb)) == NULL || vm_page_busied(p)) { 397 ib = 0; 398 break; 399 } 400 vm_page_lock(p); 401 vm_page_test_dirty(p); 402 if (p->dirty == 0 || 403 p->queue != PQ_INACTIVE || 404 p->hold_count != 0) { /* may be undergoing I/O */ 405 vm_page_unlock(p); 406 ib = 0; 407 break; 408 } 409 vm_page_unlock(p); 410 mc[--page_base] = pb = p; 411 ++pageout_count; 412 ++ib; 413 /* 414 * alignment boundry, stop here and switch directions. Do 415 * not clear ib. 416 */ 417 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 418 break; 419 } 420 421 while (pageout_count < vm_pageout_page_count && 422 pindex + is < object->size) { 423 vm_page_t p; 424 425 if ((p = vm_page_next(ps)) == NULL || vm_page_busied(p)) 426 break; 427 vm_page_lock(p); 428 vm_page_test_dirty(p); 429 if (p->dirty == 0 || 430 p->queue != PQ_INACTIVE || 431 p->hold_count != 0) { /* may be undergoing I/O */ 432 vm_page_unlock(p); 433 break; 434 } 435 vm_page_unlock(p); 436 mc[page_base + pageout_count] = ps = p; 437 ++pageout_count; 438 ++is; 439 } 440 441 /* 442 * If we exhausted our forward scan, continue with the reverse scan 443 * when possible, even past a page boundry. This catches boundry 444 * conditions. 445 */ 446 if (ib && pageout_count < vm_pageout_page_count) 447 goto more; 448 449 /* 450 * we allow reads during pageouts... 451 */ 452 return (vm_pageout_flush(&mc[page_base], pageout_count, 0, 0, NULL, 453 NULL)); 454 } 455 456 /* 457 * vm_pageout_flush() - launder the given pages 458 * 459 * The given pages are laundered. Note that we setup for the start of 460 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 461 * reference count all in here rather then in the parent. If we want 462 * the parent to do more sophisticated things we may have to change 463 * the ordering. 464 * 465 * Returned runlen is the count of pages between mreq and first 466 * page after mreq with status VM_PAGER_AGAIN. 467 * *eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL 468 * for any page in runlen set. 469 */ 470 int 471 vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen, 472 boolean_t *eio) 473 { 474 vm_object_t object = mc[0]->object; 475 int pageout_status[count]; 476 int numpagedout = 0; 477 int i, runlen; 478 479 VM_OBJECT_ASSERT_WLOCKED(object); 480 481 /* 482 * Initiate I/O. Bump the vm_page_t->busy counter and 483 * mark the pages read-only. 484 * 485 * We do not have to fixup the clean/dirty bits here... we can 486 * allow the pager to do it after the I/O completes. 487 * 488 * NOTE! mc[i]->dirty may be partial or fragmented due to an 489 * edge case with file fragments. 490 */ 491 for (i = 0; i < count; i++) { 492 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, 493 ("vm_pageout_flush: partially invalid page %p index %d/%d", 494 mc[i], i, count)); 495 vm_page_sbusy(mc[i]); 496 pmap_remove_write(mc[i]); 497 } 498 vm_object_pip_add(object, count); 499 500 vm_pager_put_pages(object, mc, count, flags, pageout_status); 501 502 runlen = count - mreq; 503 if (eio != NULL) 504 *eio = FALSE; 505 for (i = 0; i < count; i++) { 506 vm_page_t mt = mc[i]; 507 508 KASSERT(pageout_status[i] == VM_PAGER_PEND || 509 !pmap_page_is_write_mapped(mt), 510 ("vm_pageout_flush: page %p is not write protected", mt)); 511 switch (pageout_status[i]) { 512 case VM_PAGER_OK: 513 case VM_PAGER_PEND: 514 numpagedout++; 515 break; 516 case VM_PAGER_BAD: 517 /* 518 * Page outside of range of object. Right now we 519 * essentially lose the changes by pretending it 520 * worked. 521 */ 522 vm_page_undirty(mt); 523 break; 524 case VM_PAGER_ERROR: 525 case VM_PAGER_FAIL: 526 /* 527 * If page couldn't be paged out, then reactivate the 528 * page so it doesn't clog the inactive list. (We 529 * will try paging out it again later). 530 */ 531 vm_page_lock(mt); 532 vm_page_activate(mt); 533 vm_page_unlock(mt); 534 if (eio != NULL && i >= mreq && i - mreq < runlen) 535 *eio = TRUE; 536 break; 537 case VM_PAGER_AGAIN: 538 if (i >= mreq && i - mreq < runlen) 539 runlen = i - mreq; 540 break; 541 } 542 543 /* 544 * If the operation is still going, leave the page busy to 545 * block all other accesses. Also, leave the paging in 546 * progress indicator set so that we don't attempt an object 547 * collapse. 548 */ 549 if (pageout_status[i] != VM_PAGER_PEND) { 550 vm_object_pip_wakeup(object); 551 vm_page_sunbusy(mt); 552 if (vm_page_count_severe()) { 553 vm_page_lock(mt); 554 vm_page_try_to_cache(mt); 555 vm_page_unlock(mt); 556 } 557 } 558 } 559 if (prunlen != NULL) 560 *prunlen = runlen; 561 return (numpagedout); 562 } 563 564 static boolean_t 565 vm_pageout_launder(struct vm_pagequeue *pq, int tries, vm_paddr_t low, 566 vm_paddr_t high) 567 { 568 struct mount *mp; 569 struct vnode *vp; 570 vm_object_t object; 571 vm_paddr_t pa; 572 vm_page_t m, m_tmp, next; 573 int lockmode; 574 575 vm_pagequeue_lock(pq); 576 TAILQ_FOREACH_SAFE(m, &pq->pq_pl, plinks.q, next) { 577 if ((m->flags & PG_MARKER) != 0) 578 continue; 579 pa = VM_PAGE_TO_PHYS(m); 580 if (pa < low || pa + PAGE_SIZE > high) 581 continue; 582 if (!vm_pageout_page_lock(m, &next) || m->hold_count != 0) { 583 vm_page_unlock(m); 584 continue; 585 } 586 object = m->object; 587 if ((!VM_OBJECT_TRYWLOCK(object) && 588 (!vm_pageout_fallback_object_lock(m, &next) || 589 m->hold_count != 0)) || vm_page_busied(m)) { 590 vm_page_unlock(m); 591 VM_OBJECT_WUNLOCK(object); 592 continue; 593 } 594 vm_page_test_dirty(m); 595 if (m->dirty == 0 && object->ref_count != 0) 596 pmap_remove_all(m); 597 if (m->dirty != 0) { 598 vm_page_unlock(m); 599 if (tries == 0 || (object->flags & OBJ_DEAD) != 0) { 600 VM_OBJECT_WUNLOCK(object); 601 continue; 602 } 603 if (object->type == OBJT_VNODE) { 604 vm_pagequeue_unlock(pq); 605 vp = object->handle; 606 vm_object_reference_locked(object); 607 VM_OBJECT_WUNLOCK(object); 608 (void)vn_start_write(vp, &mp, V_WAIT); 609 lockmode = MNT_SHARED_WRITES(vp->v_mount) ? 610 LK_SHARED : LK_EXCLUSIVE; 611 vn_lock(vp, lockmode | LK_RETRY); 612 VM_OBJECT_WLOCK(object); 613 vm_object_page_clean(object, 0, 0, OBJPC_SYNC); 614 VM_OBJECT_WUNLOCK(object); 615 VOP_UNLOCK(vp, 0); 616 vm_object_deallocate(object); 617 vn_finished_write(mp); 618 return (TRUE); 619 } else if (object->type == OBJT_SWAP || 620 object->type == OBJT_DEFAULT) { 621 vm_pagequeue_unlock(pq); 622 m_tmp = m; 623 vm_pageout_flush(&m_tmp, 1, VM_PAGER_PUT_SYNC, 624 0, NULL, NULL); 625 VM_OBJECT_WUNLOCK(object); 626 return (TRUE); 627 } 628 } else { 629 /* 630 * Dequeue here to prevent lock recursion in 631 * vm_page_cache(). 632 */ 633 vm_page_dequeue_locked(m); 634 vm_page_cache(m); 635 vm_page_unlock(m); 636 } 637 VM_OBJECT_WUNLOCK(object); 638 } 639 vm_pagequeue_unlock(pq); 640 return (FALSE); 641 } 642 643 /* 644 * Increase the number of cached pages. The specified value, "tries", 645 * determines which categories of pages are cached: 646 * 647 * 0: All clean, inactive pages within the specified physical address range 648 * are cached. Will not sleep. 649 * 1: The vm_lowmem handlers are called. All inactive pages within 650 * the specified physical address range are cached. May sleep. 651 * 2: The vm_lowmem handlers are called. All inactive and active pages 652 * within the specified physical address range are cached. May sleep. 653 */ 654 void 655 vm_pageout_grow_cache(int tries, vm_paddr_t low, vm_paddr_t high) 656 { 657 int actl, actmax, inactl, inactmax, dom, initial_dom; 658 static int start_dom = 0; 659 660 if (tries > 0) { 661 /* 662 * Decrease registered cache sizes. The vm_lowmem handlers 663 * may acquire locks and/or sleep, so they can only be invoked 664 * when "tries" is greater than zero. 665 */ 666 EVENTHANDLER_INVOKE(vm_lowmem, 0); 667 668 /* 669 * We do this explicitly after the caches have been drained 670 * above. 671 */ 672 uma_reclaim(); 673 } 674 675 /* 676 * Make the next scan start on the next domain. 677 */ 678 initial_dom = atomic_fetchadd_int(&start_dom, 1) % vm_ndomains; 679 680 inactl = 0; 681 inactmax = cnt.v_inactive_count; 682 actl = 0; 683 actmax = tries < 2 ? 0 : cnt.v_active_count; 684 dom = initial_dom; 685 686 /* 687 * Scan domains in round-robin order, first inactive queues, 688 * then active. Since domain usually owns large physically 689 * contiguous chunk of memory, it makes sense to completely 690 * exhaust one domain before switching to next, while growing 691 * the pool of contiguous physical pages. 692 * 693 * Do not even start launder a domain which cannot contain 694 * the specified address range, as indicated by segments 695 * constituting the domain. 696 */ 697 again: 698 if (inactl < inactmax) { 699 if (vm_phys_domain_intersects(vm_dom[dom].vmd_segs, 700 low, high) && 701 vm_pageout_launder(&vm_dom[dom].vmd_pagequeues[PQ_INACTIVE], 702 tries, low, high)) { 703 inactl++; 704 goto again; 705 } 706 if (++dom == vm_ndomains) 707 dom = 0; 708 if (dom != initial_dom) 709 goto again; 710 } 711 if (actl < actmax) { 712 if (vm_phys_domain_intersects(vm_dom[dom].vmd_segs, 713 low, high) && 714 vm_pageout_launder(&vm_dom[dom].vmd_pagequeues[PQ_ACTIVE], 715 tries, low, high)) { 716 actl++; 717 goto again; 718 } 719 if (++dom == vm_ndomains) 720 dom = 0; 721 if (dom != initial_dom) 722 goto again; 723 } 724 } 725 726 #if !defined(NO_SWAPPING) 727 /* 728 * vm_pageout_object_deactivate_pages 729 * 730 * Deactivate enough pages to satisfy the inactive target 731 * requirements. 732 * 733 * The object and map must be locked. 734 */ 735 static void 736 vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object, 737 long desired) 738 { 739 vm_object_t backing_object, object; 740 vm_page_t p; 741 int act_delta, remove_mode; 742 743 VM_OBJECT_ASSERT_LOCKED(first_object); 744 if ((first_object->flags & OBJ_FICTITIOUS) != 0) 745 return; 746 for (object = first_object;; object = backing_object) { 747 if (pmap_resident_count(pmap) <= desired) 748 goto unlock_return; 749 VM_OBJECT_ASSERT_LOCKED(object); 750 if ((object->flags & OBJ_UNMANAGED) != 0 || 751 object->paging_in_progress != 0) 752 goto unlock_return; 753 754 remove_mode = 0; 755 if (object->shadow_count > 1) 756 remove_mode = 1; 757 /* 758 * Scan the object's entire memory queue. 759 */ 760 TAILQ_FOREACH(p, &object->memq, listq) { 761 if (pmap_resident_count(pmap) <= desired) 762 goto unlock_return; 763 if (vm_page_busied(p)) 764 continue; 765 PCPU_INC(cnt.v_pdpages); 766 vm_page_lock(p); 767 if (p->wire_count != 0 || p->hold_count != 0 || 768 !pmap_page_exists_quick(pmap, p)) { 769 vm_page_unlock(p); 770 continue; 771 } 772 act_delta = pmap_ts_referenced(p); 773 if ((p->aflags & PGA_REFERENCED) != 0) { 774 if (act_delta == 0) 775 act_delta = 1; 776 vm_page_aflag_clear(p, PGA_REFERENCED); 777 } 778 if (p->queue != PQ_ACTIVE && act_delta != 0) { 779 vm_page_activate(p); 780 p->act_count += act_delta; 781 } else if (p->queue == PQ_ACTIVE) { 782 if (act_delta == 0) { 783 p->act_count -= min(p->act_count, 784 ACT_DECLINE); 785 if (!remove_mode && p->act_count == 0) { 786 pmap_remove_all(p); 787 vm_page_deactivate(p); 788 } else 789 vm_page_requeue(p); 790 } else { 791 vm_page_activate(p); 792 if (p->act_count < ACT_MAX - 793 ACT_ADVANCE) 794 p->act_count += ACT_ADVANCE; 795 vm_page_requeue(p); 796 } 797 } else if (p->queue == PQ_INACTIVE) 798 pmap_remove_all(p); 799 vm_page_unlock(p); 800 } 801 if ((backing_object = object->backing_object) == NULL) 802 goto unlock_return; 803 VM_OBJECT_RLOCK(backing_object); 804 if (object != first_object) 805 VM_OBJECT_RUNLOCK(object); 806 } 807 unlock_return: 808 if (object != first_object) 809 VM_OBJECT_RUNLOCK(object); 810 } 811 812 /* 813 * deactivate some number of pages in a map, try to do it fairly, but 814 * that is really hard to do. 815 */ 816 static void 817 vm_pageout_map_deactivate_pages(map, desired) 818 vm_map_t map; 819 long desired; 820 { 821 vm_map_entry_t tmpe; 822 vm_object_t obj, bigobj; 823 int nothingwired; 824 825 if (!vm_map_trylock(map)) 826 return; 827 828 bigobj = NULL; 829 nothingwired = TRUE; 830 831 /* 832 * first, search out the biggest object, and try to free pages from 833 * that. 834 */ 835 tmpe = map->header.next; 836 while (tmpe != &map->header) { 837 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 838 obj = tmpe->object.vm_object; 839 if (obj != NULL && VM_OBJECT_TRYRLOCK(obj)) { 840 if (obj->shadow_count <= 1 && 841 (bigobj == NULL || 842 bigobj->resident_page_count < obj->resident_page_count)) { 843 if (bigobj != NULL) 844 VM_OBJECT_RUNLOCK(bigobj); 845 bigobj = obj; 846 } else 847 VM_OBJECT_RUNLOCK(obj); 848 } 849 } 850 if (tmpe->wired_count > 0) 851 nothingwired = FALSE; 852 tmpe = tmpe->next; 853 } 854 855 if (bigobj != NULL) { 856 vm_pageout_object_deactivate_pages(map->pmap, bigobj, desired); 857 VM_OBJECT_RUNLOCK(bigobj); 858 } 859 /* 860 * Next, hunt around for other pages to deactivate. We actually 861 * do this search sort of wrong -- .text first is not the best idea. 862 */ 863 tmpe = map->header.next; 864 while (tmpe != &map->header) { 865 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 866 break; 867 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 868 obj = tmpe->object.vm_object; 869 if (obj != NULL) { 870 VM_OBJECT_RLOCK(obj); 871 vm_pageout_object_deactivate_pages(map->pmap, obj, desired); 872 VM_OBJECT_RUNLOCK(obj); 873 } 874 } 875 tmpe = tmpe->next; 876 } 877 878 /* 879 * Remove all mappings if a process is swapped out, this will free page 880 * table pages. 881 */ 882 if (desired == 0 && nothingwired) { 883 pmap_remove(vm_map_pmap(map), vm_map_min(map), 884 vm_map_max(map)); 885 } 886 vm_map_unlock(map); 887 } 888 #endif /* !defined(NO_SWAPPING) */ 889 890 /* 891 * vm_pageout_scan does the dirty work for the pageout daemon. 892 * 893 * pass 0 - Update active LRU/deactivate pages 894 * pass 1 - Move inactive to cache or free 895 * pass 2 - Launder dirty pages 896 */ 897 static void 898 vm_pageout_scan(struct vm_domain *vmd, int pass) 899 { 900 vm_page_t m, next; 901 struct vm_pagequeue *pq; 902 int page_shortage, maxscan, pcount; 903 int addl_page_shortage; 904 vm_object_t object; 905 int act_delta; 906 int vnodes_skipped = 0; 907 int maxlaunder; 908 int lockmode; 909 boolean_t queues_locked; 910 911 /* 912 * If we need to reclaim memory ask kernel caches to return 913 * some. We rate limit to avoid thrashing. 914 */ 915 if (vmd == &vm_dom[0] && pass > 0 && 916 lowmem_ticks + (lowmem_period * hz) < ticks) { 917 /* 918 * Decrease registered cache sizes. 919 */ 920 EVENTHANDLER_INVOKE(vm_lowmem, 0); 921 /* 922 * We do this explicitly after the caches have been 923 * drained above. 924 */ 925 uma_reclaim(); 926 lowmem_ticks = ticks; 927 } 928 929 /* 930 * The addl_page_shortage is the number of temporarily 931 * stuck pages in the inactive queue. In other words, the 932 * number of pages from the inactive count that should be 933 * discounted in setting the target for the active queue scan. 934 */ 935 addl_page_shortage = atomic_readandclear_int(&vm_pageout_deficit); 936 937 /* 938 * Calculate the number of pages we want to either free or move 939 * to the cache. 940 */ 941 page_shortage = vm_paging_target() + addl_page_shortage; 942 943 /* 944 * maxlaunder limits the number of dirty pages we flush per scan. 945 * For most systems a smaller value (16 or 32) is more robust under 946 * extreme memory and disk pressure because any unnecessary writes 947 * to disk can result in extreme performance degredation. However, 948 * systems with excessive dirty pages (especially when MAP_NOSYNC is 949 * used) will die horribly with limited laundering. If the pageout 950 * daemon cannot clean enough pages in the first pass, we let it go 951 * all out in succeeding passes. 952 */ 953 if ((maxlaunder = vm_max_launder) <= 1) 954 maxlaunder = 1; 955 if (pass > 1) 956 maxlaunder = 10000; 957 958 /* 959 * Start scanning the inactive queue for pages we can move to the 960 * cache or free. The scan will stop when the target is reached or 961 * we have scanned the entire inactive queue. Note that m->act_count 962 * is not used to form decisions for the inactive queue, only for the 963 * active queue. 964 */ 965 pq = &vmd->vmd_pagequeues[PQ_INACTIVE]; 966 maxscan = pq->pq_cnt; 967 vm_pagequeue_lock(pq); 968 queues_locked = TRUE; 969 for (m = TAILQ_FIRST(&pq->pq_pl); 970 m != NULL && maxscan-- > 0 && page_shortage > 0; 971 m = next) { 972 vm_pagequeue_assert_locked(pq); 973 KASSERT(queues_locked, ("unlocked queues")); 974 KASSERT(m->queue == PQ_INACTIVE, ("Inactive queue %p", m)); 975 976 PCPU_INC(cnt.v_pdpages); 977 next = TAILQ_NEXT(m, plinks.q); 978 979 /* 980 * skip marker pages 981 */ 982 if (m->flags & PG_MARKER) 983 continue; 984 985 KASSERT((m->flags & PG_FICTITIOUS) == 0, 986 ("Fictitious page %p cannot be in inactive queue", m)); 987 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 988 ("Unmanaged page %p cannot be in inactive queue", m)); 989 990 /* 991 * The page or object lock acquisitions fail if the 992 * page was removed from the queue or moved to a 993 * different position within the queue. In either 994 * case, addl_page_shortage should not be incremented. 995 */ 996 if (!vm_pageout_page_lock(m, &next)) { 997 vm_page_unlock(m); 998 continue; 999 } 1000 object = m->object; 1001 if (!VM_OBJECT_TRYWLOCK(object) && 1002 !vm_pageout_fallback_object_lock(m, &next)) { 1003 vm_page_unlock(m); 1004 VM_OBJECT_WUNLOCK(object); 1005 continue; 1006 } 1007 1008 /* 1009 * Don't mess with busy pages, keep them at at the 1010 * front of the queue, most likely they are being 1011 * paged out. Increment addl_page_shortage for busy 1012 * pages, because they may leave the inactive queue 1013 * shortly after page scan is finished. 1014 */ 1015 if (vm_page_busied(m)) { 1016 vm_page_unlock(m); 1017 VM_OBJECT_WUNLOCK(object); 1018 addl_page_shortage++; 1019 continue; 1020 } 1021 1022 /* 1023 * We unlock the inactive page queue, invalidating the 1024 * 'next' pointer. Use our marker to remember our 1025 * place. 1026 */ 1027 TAILQ_INSERT_AFTER(&pq->pq_pl, m, &vmd->vmd_marker, plinks.q); 1028 vm_pagequeue_unlock(pq); 1029 queues_locked = FALSE; 1030 1031 /* 1032 * We bump the activation count if the page has been 1033 * referenced while in the inactive queue. This makes 1034 * it less likely that the page will be added back to the 1035 * inactive queue prematurely again. Here we check the 1036 * page tables (or emulated bits, if any), given the upper 1037 * level VM system not knowing anything about existing 1038 * references. 1039 */ 1040 act_delta = 0; 1041 if ((m->aflags & PGA_REFERENCED) != 0) { 1042 vm_page_aflag_clear(m, PGA_REFERENCED); 1043 act_delta = 1; 1044 } 1045 if (object->ref_count != 0) { 1046 act_delta += pmap_ts_referenced(m); 1047 } else { 1048 KASSERT(!pmap_page_is_mapped(m), 1049 ("vm_pageout_scan: page %p is mapped", m)); 1050 } 1051 1052 /* 1053 * If the upper level VM system knows about any page 1054 * references, we reactivate the page or requeue it. 1055 */ 1056 if (act_delta != 0) { 1057 if (object->ref_count) { 1058 vm_page_activate(m); 1059 m->act_count += act_delta + ACT_ADVANCE; 1060 } else { 1061 vm_pagequeue_lock(pq); 1062 queues_locked = TRUE; 1063 vm_page_requeue_locked(m); 1064 } 1065 VM_OBJECT_WUNLOCK(object); 1066 vm_page_unlock(m); 1067 goto relock_queues; 1068 } 1069 1070 if (m->hold_count != 0) { 1071 vm_page_unlock(m); 1072 VM_OBJECT_WUNLOCK(object); 1073 1074 /* 1075 * Held pages are essentially stuck in the 1076 * queue. So, they ought to be discounted 1077 * from the inactive count. See the 1078 * calculation of the page_shortage for the 1079 * loop over the active queue below. 1080 */ 1081 addl_page_shortage++; 1082 goto relock_queues; 1083 } 1084 1085 /* 1086 * If the page appears to be clean at the machine-independent 1087 * layer, then remove all of its mappings from the pmap in 1088 * anticipation of placing it onto the cache queue. If, 1089 * however, any of the page's mappings allow write access, 1090 * then the page may still be modified until the last of those 1091 * mappings are removed. 1092 */ 1093 vm_page_test_dirty(m); 1094 if (m->dirty == 0 && object->ref_count != 0) 1095 pmap_remove_all(m); 1096 1097 if (m->valid == 0) { 1098 /* 1099 * Invalid pages can be easily freed 1100 */ 1101 vm_page_free(m); 1102 PCPU_INC(cnt.v_dfree); 1103 --page_shortage; 1104 } else if (m->dirty == 0) { 1105 /* 1106 * Clean pages can be placed onto the cache queue. 1107 * This effectively frees them. 1108 */ 1109 vm_page_cache(m); 1110 --page_shortage; 1111 } else if ((m->flags & PG_WINATCFLS) == 0 && pass < 2) { 1112 /* 1113 * Dirty pages need to be paged out, but flushing 1114 * a page is extremely expensive verses freeing 1115 * a clean page. Rather then artificially limiting 1116 * the number of pages we can flush, we instead give 1117 * dirty pages extra priority on the inactive queue 1118 * by forcing them to be cycled through the queue 1119 * twice before being flushed, after which the 1120 * (now clean) page will cycle through once more 1121 * before being freed. This significantly extends 1122 * the thrash point for a heavily loaded machine. 1123 */ 1124 m->flags |= PG_WINATCFLS; 1125 vm_pagequeue_lock(pq); 1126 queues_locked = TRUE; 1127 vm_page_requeue_locked(m); 1128 } else if (maxlaunder > 0) { 1129 /* 1130 * We always want to try to flush some dirty pages if 1131 * we encounter them, to keep the system stable. 1132 * Normally this number is small, but under extreme 1133 * pressure where there are insufficient clean pages 1134 * on the inactive queue, we may have to go all out. 1135 */ 1136 int swap_pageouts_ok; 1137 struct vnode *vp = NULL; 1138 struct mount *mp = NULL; 1139 1140 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 1141 swap_pageouts_ok = 1; 1142 } else { 1143 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 1144 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 1145 vm_page_count_min()); 1146 1147 } 1148 1149 /* 1150 * We don't bother paging objects that are "dead". 1151 * Those objects are in a "rundown" state. 1152 */ 1153 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 1154 vm_pagequeue_lock(pq); 1155 vm_page_unlock(m); 1156 VM_OBJECT_WUNLOCK(object); 1157 queues_locked = TRUE; 1158 vm_page_requeue_locked(m); 1159 goto relock_queues; 1160 } 1161 1162 /* 1163 * The object is already known NOT to be dead. It 1164 * is possible for the vget() to block the whole 1165 * pageout daemon, but the new low-memory handling 1166 * code should prevent it. 1167 * 1168 * The previous code skipped locked vnodes and, worse, 1169 * reordered pages in the queue. This results in 1170 * completely non-deterministic operation and, on a 1171 * busy system, can lead to extremely non-optimal 1172 * pageouts. For example, it can cause clean pages 1173 * to be freed and dirty pages to be moved to the end 1174 * of the queue. Since dirty pages are also moved to 1175 * the end of the queue once-cleaned, this gives 1176 * way too large a weighting to defering the freeing 1177 * of dirty pages. 1178 * 1179 * We can't wait forever for the vnode lock, we might 1180 * deadlock due to a vn_read() getting stuck in 1181 * vm_wait while holding this vnode. We skip the 1182 * vnode if we can't get it in a reasonable amount 1183 * of time. 1184 */ 1185 if (object->type == OBJT_VNODE) { 1186 vm_page_unlock(m); 1187 vp = object->handle; 1188 if (vp->v_type == VREG && 1189 vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1190 mp = NULL; 1191 ++pageout_lock_miss; 1192 if (object->flags & OBJ_MIGHTBEDIRTY) 1193 vnodes_skipped++; 1194 goto unlock_and_continue; 1195 } 1196 KASSERT(mp != NULL, 1197 ("vp %p with NULL v_mount", vp)); 1198 vm_object_reference_locked(object); 1199 VM_OBJECT_WUNLOCK(object); 1200 lockmode = MNT_SHARED_WRITES(vp->v_mount) ? 1201 LK_SHARED : LK_EXCLUSIVE; 1202 if (vget(vp, lockmode | LK_TIMELOCK, 1203 curthread)) { 1204 VM_OBJECT_WLOCK(object); 1205 ++pageout_lock_miss; 1206 if (object->flags & OBJ_MIGHTBEDIRTY) 1207 vnodes_skipped++; 1208 vp = NULL; 1209 goto unlock_and_continue; 1210 } 1211 VM_OBJECT_WLOCK(object); 1212 vm_page_lock(m); 1213 vm_pagequeue_lock(pq); 1214 queues_locked = TRUE; 1215 /* 1216 * The page might have been moved to another 1217 * queue during potential blocking in vget() 1218 * above. The page might have been freed and 1219 * reused for another vnode. 1220 */ 1221 if (m->queue != PQ_INACTIVE || 1222 m->object != object || 1223 TAILQ_NEXT(m, plinks.q) != &vmd->vmd_marker) { 1224 vm_page_unlock(m); 1225 if (object->flags & OBJ_MIGHTBEDIRTY) 1226 vnodes_skipped++; 1227 goto unlock_and_continue; 1228 } 1229 1230 /* 1231 * The page may have been busied during the 1232 * blocking in vget(). We don't move the 1233 * page back onto the end of the queue so that 1234 * statistics are more correct if we don't. 1235 */ 1236 if (vm_page_busied(m)) { 1237 vm_page_unlock(m); 1238 goto unlock_and_continue; 1239 } 1240 1241 /* 1242 * If the page has become held it might 1243 * be undergoing I/O, so skip it 1244 */ 1245 if (m->hold_count) { 1246 vm_page_unlock(m); 1247 vm_page_requeue_locked(m); 1248 if (object->flags & OBJ_MIGHTBEDIRTY) 1249 vnodes_skipped++; 1250 goto unlock_and_continue; 1251 } 1252 vm_pagequeue_unlock(pq); 1253 queues_locked = FALSE; 1254 } 1255 1256 /* 1257 * If a page is dirty, then it is either being washed 1258 * (but not yet cleaned) or it is still in the 1259 * laundry. If it is still in the laundry, then we 1260 * start the cleaning operation. 1261 * 1262 * decrement page_shortage on success to account for 1263 * the (future) cleaned page. Otherwise we could wind 1264 * up laundering or cleaning too many pages. 1265 */ 1266 if (vm_pageout_clean(m) != 0) { 1267 --page_shortage; 1268 --maxlaunder; 1269 } 1270 unlock_and_continue: 1271 vm_page_lock_assert(m, MA_NOTOWNED); 1272 VM_OBJECT_WUNLOCK(object); 1273 if (mp != NULL) { 1274 if (queues_locked) { 1275 vm_pagequeue_unlock(pq); 1276 queues_locked = FALSE; 1277 } 1278 if (vp != NULL) 1279 vput(vp); 1280 vm_object_deallocate(object); 1281 vn_finished_write(mp); 1282 } 1283 vm_page_lock_assert(m, MA_NOTOWNED); 1284 goto relock_queues; 1285 } 1286 vm_page_unlock(m); 1287 VM_OBJECT_WUNLOCK(object); 1288 relock_queues: 1289 if (!queues_locked) { 1290 vm_pagequeue_lock(pq); 1291 queues_locked = TRUE; 1292 } 1293 next = TAILQ_NEXT(&vmd->vmd_marker, plinks.q); 1294 TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_marker, plinks.q); 1295 } 1296 vm_pagequeue_unlock(pq); 1297 1298 /* 1299 * Compute the number of pages we want to try to move from the 1300 * active queue to the inactive queue. 1301 */ 1302 pq = &vmd->vmd_pagequeues[PQ_ACTIVE]; 1303 vm_pagequeue_lock(pq); 1304 pcount = pq->pq_cnt; 1305 page_shortage = vm_paging_target() + 1306 cnt.v_inactive_target - cnt.v_inactive_count; 1307 page_shortage += addl_page_shortage; 1308 /* 1309 * If we're just idle polling attempt to visit every 1310 * active page within 'update_period' seconds. 1311 */ 1312 if (pass == 0 && vm_pageout_update_period != 0) { 1313 pcount /= vm_pageout_update_period; 1314 page_shortage = pcount; 1315 } 1316 1317 /* 1318 * Scan the active queue for things we can deactivate. We nominally 1319 * track the per-page activity counter and use it to locate 1320 * deactivation candidates. 1321 */ 1322 m = TAILQ_FIRST(&pq->pq_pl); 1323 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 1324 1325 KASSERT(m->queue == PQ_ACTIVE, 1326 ("vm_pageout_scan: page %p isn't active", m)); 1327 1328 next = TAILQ_NEXT(m, plinks.q); 1329 if ((m->flags & PG_MARKER) != 0) { 1330 m = next; 1331 continue; 1332 } 1333 KASSERT((m->flags & PG_FICTITIOUS) == 0, 1334 ("Fictitious page %p cannot be in active queue", m)); 1335 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1336 ("Unmanaged page %p cannot be in active queue", m)); 1337 if (!vm_pageout_page_lock(m, &next)) { 1338 vm_page_unlock(m); 1339 m = next; 1340 continue; 1341 } 1342 1343 /* 1344 * The count for pagedaemon pages is done after checking the 1345 * page for eligibility... 1346 */ 1347 PCPU_INC(cnt.v_pdpages); 1348 1349 /* 1350 * Check to see "how much" the page has been used. 1351 */ 1352 act_delta = 0; 1353 if (m->aflags & PGA_REFERENCED) { 1354 vm_page_aflag_clear(m, PGA_REFERENCED); 1355 act_delta += 1; 1356 } 1357 /* 1358 * Unlocked object ref count check. Two races are possible. 1359 * 1) The ref was transitioning to zero and we saw non-zero, 1360 * the pmap bits will be checked unnecessarily. 1361 * 2) The ref was transitioning to one and we saw zero. 1362 * The page lock prevents a new reference to this page so 1363 * we need not check the reference bits. 1364 */ 1365 if (m->object->ref_count != 0) 1366 act_delta += pmap_ts_referenced(m); 1367 1368 /* 1369 * Advance or decay the act_count based on recent usage. 1370 */ 1371 if (act_delta) { 1372 m->act_count += ACT_ADVANCE + act_delta; 1373 if (m->act_count > ACT_MAX) 1374 m->act_count = ACT_MAX; 1375 } else { 1376 m->act_count -= min(m->act_count, ACT_DECLINE); 1377 act_delta = m->act_count; 1378 } 1379 1380 /* 1381 * Move this page to the tail of the active or inactive 1382 * queue depending on usage. 1383 */ 1384 if (act_delta == 0) { 1385 /* Dequeue to avoid later lock recursion. */ 1386 vm_page_dequeue_locked(m); 1387 vm_page_deactivate(m); 1388 page_shortage--; 1389 } else 1390 vm_page_requeue_locked(m); 1391 vm_page_unlock(m); 1392 m = next; 1393 } 1394 vm_pagequeue_unlock(pq); 1395 #if !defined(NO_SWAPPING) 1396 /* 1397 * Idle process swapout -- run once per second. 1398 */ 1399 if (vm_swap_idle_enabled) { 1400 static long lsec; 1401 if (time_second != lsec) { 1402 vm_req_vmdaemon(VM_SWAP_IDLE); 1403 lsec = time_second; 1404 } 1405 } 1406 #endif 1407 1408 /* 1409 * If we didn't get enough free pages, and we have skipped a vnode 1410 * in a writeable object, wakeup the sync daemon. And kick swapout 1411 * if we did not get enough free pages. 1412 */ 1413 if (vm_paging_target() > 0) { 1414 if (vnodes_skipped && vm_page_count_min()) 1415 (void) speedup_syncer(); 1416 #if !defined(NO_SWAPPING) 1417 if (vm_swap_enabled && vm_page_count_target()) 1418 vm_req_vmdaemon(VM_SWAP_NORMAL); 1419 #endif 1420 } 1421 1422 /* 1423 * If we are critically low on one of RAM or swap and low on 1424 * the other, kill the largest process. However, we avoid 1425 * doing this on the first pass in order to give ourselves a 1426 * chance to flush out dirty vnode-backed pages and to allow 1427 * active pages to be moved to the inactive queue and reclaimed. 1428 */ 1429 vm_pageout_mightbe_oom(vmd, pass); 1430 } 1431 1432 static int vm_pageout_oom_vote; 1433 1434 /* 1435 * The pagedaemon threads randlomly select one to perform the 1436 * OOM. Trying to kill processes before all pagedaemons 1437 * failed to reach free target is premature. 1438 */ 1439 static void 1440 vm_pageout_mightbe_oom(struct vm_domain *vmd, int pass) 1441 { 1442 int old_vote; 1443 1444 if (pass <= 1 || !((swap_pager_avail < 64 && vm_page_count_min()) || 1445 (swap_pager_full && vm_paging_target() > 0))) { 1446 if (vmd->vmd_oom) { 1447 vmd->vmd_oom = FALSE; 1448 atomic_subtract_int(&vm_pageout_oom_vote, 1); 1449 } 1450 return; 1451 } 1452 1453 if (vmd->vmd_oom) 1454 return; 1455 1456 vmd->vmd_oom = TRUE; 1457 old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1); 1458 if (old_vote != vm_ndomains - 1) 1459 return; 1460 1461 /* 1462 * The current pagedaemon thread is the last in the quorum to 1463 * start OOM. Initiate the selection and signaling of the 1464 * victim. 1465 */ 1466 vm_pageout_oom(VM_OOM_MEM); 1467 1468 /* 1469 * After one round of OOM terror, recall our vote. On the 1470 * next pass, current pagedaemon would vote again if the low 1471 * memory condition is still there, due to vmd_oom being 1472 * false. 1473 */ 1474 vmd->vmd_oom = FALSE; 1475 atomic_subtract_int(&vm_pageout_oom_vote, 1); 1476 } 1477 1478 void 1479 vm_pageout_oom(int shortage) 1480 { 1481 struct proc *p, *bigproc; 1482 vm_offset_t size, bigsize; 1483 struct thread *td; 1484 struct vmspace *vm; 1485 1486 /* 1487 * We keep the process bigproc locked once we find it to keep anyone 1488 * from messing with it; however, there is a possibility of 1489 * deadlock if process B is bigproc and one of it's child processes 1490 * attempts to propagate a signal to B while we are waiting for A's 1491 * lock while walking this list. To avoid this, we don't block on 1492 * the process lock but just skip a process if it is already locked. 1493 */ 1494 bigproc = NULL; 1495 bigsize = 0; 1496 sx_slock(&allproc_lock); 1497 FOREACH_PROC_IN_SYSTEM(p) { 1498 int breakout; 1499 1500 if (PROC_TRYLOCK(p) == 0) 1501 continue; 1502 /* 1503 * If this is a system, protected or killed process, skip it. 1504 */ 1505 if (p->p_state != PRS_NORMAL || 1506 (p->p_flag & (P_INEXEC | P_PROTECTED | P_SYSTEM)) || 1507 (p->p_pid == 1) || P_KILLED(p) || 1508 ((p->p_pid < 48) && (swap_pager_avail != 0))) { 1509 PROC_UNLOCK(p); 1510 continue; 1511 } 1512 /* 1513 * If the process is in a non-running type state, 1514 * don't touch it. Check all the threads individually. 1515 */ 1516 breakout = 0; 1517 FOREACH_THREAD_IN_PROC(p, td) { 1518 thread_lock(td); 1519 if (!TD_ON_RUNQ(td) && 1520 !TD_IS_RUNNING(td) && 1521 !TD_IS_SLEEPING(td) && 1522 !TD_IS_SUSPENDED(td)) { 1523 thread_unlock(td); 1524 breakout = 1; 1525 break; 1526 } 1527 thread_unlock(td); 1528 } 1529 if (breakout) { 1530 PROC_UNLOCK(p); 1531 continue; 1532 } 1533 /* 1534 * get the process size 1535 */ 1536 vm = vmspace_acquire_ref(p); 1537 if (vm == NULL) { 1538 PROC_UNLOCK(p); 1539 continue; 1540 } 1541 if (!vm_map_trylock_read(&vm->vm_map)) { 1542 vmspace_free(vm); 1543 PROC_UNLOCK(p); 1544 continue; 1545 } 1546 size = vmspace_swap_count(vm); 1547 vm_map_unlock_read(&vm->vm_map); 1548 if (shortage == VM_OOM_MEM) 1549 size += vmspace_resident_count(vm); 1550 vmspace_free(vm); 1551 /* 1552 * if the this process is bigger than the biggest one 1553 * remember it. 1554 */ 1555 if (size > bigsize) { 1556 if (bigproc != NULL) 1557 PROC_UNLOCK(bigproc); 1558 bigproc = p; 1559 bigsize = size; 1560 } else 1561 PROC_UNLOCK(p); 1562 } 1563 sx_sunlock(&allproc_lock); 1564 if (bigproc != NULL) { 1565 killproc(bigproc, "out of swap space"); 1566 sched_nice(bigproc, PRIO_MIN); 1567 PROC_UNLOCK(bigproc); 1568 wakeup(&cnt.v_free_count); 1569 } 1570 } 1571 1572 static void 1573 vm_pageout_worker(void *arg) 1574 { 1575 struct vm_domain *domain; 1576 int domidx; 1577 1578 domidx = (uintptr_t)arg; 1579 domain = &vm_dom[domidx]; 1580 1581 /* 1582 * XXXKIB It could be useful to bind pageout daemon threads to 1583 * the cores belonging to the domain, from which vm_page_array 1584 * is allocated. 1585 */ 1586 1587 KASSERT(domain->vmd_segs != 0, ("domain without segments")); 1588 vm_pageout_init_marker(&domain->vmd_marker, PQ_INACTIVE); 1589 1590 /* 1591 * The pageout daemon worker is never done, so loop forever. 1592 */ 1593 while (TRUE) { 1594 /* 1595 * If we have enough free memory, wakeup waiters. Do 1596 * not clear vm_pages_needed until we reach our target, 1597 * otherwise we may be woken up over and over again and 1598 * waste a lot of cpu. 1599 */ 1600 mtx_lock(&vm_page_queue_free_mtx); 1601 if (vm_pages_needed && !vm_page_count_min()) { 1602 if (!vm_paging_needed()) 1603 vm_pages_needed = 0; 1604 wakeup(&cnt.v_free_count); 1605 } 1606 if (vm_pages_needed) { 1607 /* 1608 * Still not done, take a second pass without waiting 1609 * (unlimited dirty cleaning), otherwise sleep a bit 1610 * and try again. 1611 */ 1612 if (domain->vmd_pass > 1) 1613 msleep(&vm_pages_needed, 1614 &vm_page_queue_free_mtx, PVM, "psleep", 1615 hz / 2); 1616 } else { 1617 /* 1618 * Good enough, sleep until required to refresh 1619 * stats. 1620 */ 1621 domain->vmd_pass = 0; 1622 msleep(&vm_pages_needed, &vm_page_queue_free_mtx, 1623 PVM, "psleep", hz); 1624 1625 } 1626 if (vm_pages_needed) { 1627 cnt.v_pdwakeups++; 1628 domain->vmd_pass++; 1629 } 1630 mtx_unlock(&vm_page_queue_free_mtx); 1631 vm_pageout_scan(domain, domain->vmd_pass); 1632 } 1633 } 1634 1635 /* 1636 * vm_pageout is the high level pageout daemon. 1637 */ 1638 static void 1639 vm_pageout(void) 1640 { 1641 #if MAXMEMDOM > 1 1642 int error, i; 1643 #endif 1644 1645 /* 1646 * Initialize some paging parameters. 1647 */ 1648 cnt.v_interrupt_free_min = 2; 1649 if (cnt.v_page_count < 2000) 1650 vm_pageout_page_count = 8; 1651 1652 /* 1653 * v_free_reserved needs to include enough for the largest 1654 * swap pager structures plus enough for any pv_entry structs 1655 * when paging. 1656 */ 1657 if (cnt.v_page_count > 1024) 1658 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1659 else 1660 cnt.v_free_min = 4; 1661 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1662 cnt.v_interrupt_free_min; 1663 cnt.v_free_reserved = vm_pageout_page_count + 1664 cnt.v_pageout_free_min + (cnt.v_page_count / 768); 1665 cnt.v_free_severe = cnt.v_free_min / 2; 1666 cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved; 1667 cnt.v_free_min += cnt.v_free_reserved; 1668 cnt.v_free_severe += cnt.v_free_reserved; 1669 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 1670 if (cnt.v_inactive_target > cnt.v_free_count / 3) 1671 cnt.v_inactive_target = cnt.v_free_count / 3; 1672 1673 /* 1674 * Set the default wakeup threshold to be 10% above the minimum 1675 * page limit. This keeps the steady state out of shortfall. 1676 */ 1677 vm_pageout_wakeup_thresh = (cnt.v_free_min / 10) * 11; 1678 1679 /* 1680 * Set interval in seconds for active scan. We want to visit each 1681 * page at least once every ten minutes. This is to prevent worst 1682 * case paging behaviors with stale active LRU. 1683 */ 1684 if (vm_pageout_update_period == 0) 1685 vm_pageout_update_period = 600; 1686 1687 /* XXX does not really belong here */ 1688 if (vm_page_max_wired == 0) 1689 vm_page_max_wired = cnt.v_free_count / 3; 1690 1691 swap_pager_swap_init(); 1692 #if MAXMEMDOM > 1 1693 for (i = 1; i < vm_ndomains; i++) { 1694 error = kthread_add(vm_pageout_worker, (void *)(uintptr_t)i, 1695 curproc, NULL, 0, 0, "dom%d", i); 1696 if (error != 0) { 1697 panic("starting pageout for domain %d, error %d\n", 1698 i, error); 1699 } 1700 } 1701 #endif 1702 vm_pageout_worker((uintptr_t)0); 1703 } 1704 1705 /* 1706 * Unless the free page queue lock is held by the caller, this function 1707 * should be regarded as advisory. Specifically, the caller should 1708 * not msleep() on &cnt.v_free_count following this function unless 1709 * the free page queue lock is held until the msleep() is performed. 1710 */ 1711 void 1712 pagedaemon_wakeup(void) 1713 { 1714 1715 if (!vm_pages_needed && curthread->td_proc != pageproc) { 1716 vm_pages_needed = 1; 1717 wakeup(&vm_pages_needed); 1718 } 1719 } 1720 1721 #if !defined(NO_SWAPPING) 1722 static void 1723 vm_req_vmdaemon(int req) 1724 { 1725 static int lastrun = 0; 1726 1727 mtx_lock(&vm_daemon_mtx); 1728 vm_pageout_req_swapout |= req; 1729 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1730 wakeup(&vm_daemon_needed); 1731 lastrun = ticks; 1732 } 1733 mtx_unlock(&vm_daemon_mtx); 1734 } 1735 1736 static void 1737 vm_daemon(void) 1738 { 1739 struct rlimit rsslim; 1740 struct proc *p; 1741 struct thread *td; 1742 struct vmspace *vm; 1743 int breakout, swapout_flags, tryagain, attempts; 1744 #ifdef RACCT 1745 uint64_t rsize, ravailable; 1746 #endif 1747 1748 while (TRUE) { 1749 mtx_lock(&vm_daemon_mtx); 1750 #ifdef RACCT 1751 msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep", hz); 1752 #else 1753 msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep", 0); 1754 #endif 1755 swapout_flags = vm_pageout_req_swapout; 1756 vm_pageout_req_swapout = 0; 1757 mtx_unlock(&vm_daemon_mtx); 1758 if (swapout_flags) 1759 swapout_procs(swapout_flags); 1760 1761 /* 1762 * scan the processes for exceeding their rlimits or if 1763 * process is swapped out -- deactivate pages 1764 */ 1765 tryagain = 0; 1766 attempts = 0; 1767 again: 1768 attempts++; 1769 sx_slock(&allproc_lock); 1770 FOREACH_PROC_IN_SYSTEM(p) { 1771 vm_pindex_t limit, size; 1772 1773 /* 1774 * if this is a system process or if we have already 1775 * looked at this process, skip it. 1776 */ 1777 PROC_LOCK(p); 1778 if (p->p_state != PRS_NORMAL || 1779 p->p_flag & (P_INEXEC | P_SYSTEM | P_WEXIT)) { 1780 PROC_UNLOCK(p); 1781 continue; 1782 } 1783 /* 1784 * if the process is in a non-running type state, 1785 * don't touch it. 1786 */ 1787 breakout = 0; 1788 FOREACH_THREAD_IN_PROC(p, td) { 1789 thread_lock(td); 1790 if (!TD_ON_RUNQ(td) && 1791 !TD_IS_RUNNING(td) && 1792 !TD_IS_SLEEPING(td) && 1793 !TD_IS_SUSPENDED(td)) { 1794 thread_unlock(td); 1795 breakout = 1; 1796 break; 1797 } 1798 thread_unlock(td); 1799 } 1800 if (breakout) { 1801 PROC_UNLOCK(p); 1802 continue; 1803 } 1804 /* 1805 * get a limit 1806 */ 1807 lim_rlimit(p, RLIMIT_RSS, &rsslim); 1808 limit = OFF_TO_IDX( 1809 qmin(rsslim.rlim_cur, rsslim.rlim_max)); 1810 1811 /* 1812 * let processes that are swapped out really be 1813 * swapped out set the limit to nothing (will force a 1814 * swap-out.) 1815 */ 1816 if ((p->p_flag & P_INMEM) == 0) 1817 limit = 0; /* XXX */ 1818 vm = vmspace_acquire_ref(p); 1819 PROC_UNLOCK(p); 1820 if (vm == NULL) 1821 continue; 1822 1823 size = vmspace_resident_count(vm); 1824 if (size >= limit) { 1825 vm_pageout_map_deactivate_pages( 1826 &vm->vm_map, limit); 1827 } 1828 #ifdef RACCT 1829 rsize = IDX_TO_OFF(size); 1830 PROC_LOCK(p); 1831 racct_set(p, RACCT_RSS, rsize); 1832 ravailable = racct_get_available(p, RACCT_RSS); 1833 PROC_UNLOCK(p); 1834 if (rsize > ravailable) { 1835 /* 1836 * Don't be overly aggressive; this might be 1837 * an innocent process, and the limit could've 1838 * been exceeded by some memory hog. Don't 1839 * try to deactivate more than 1/4th of process' 1840 * resident set size. 1841 */ 1842 if (attempts <= 8) { 1843 if (ravailable < rsize - (rsize / 4)) 1844 ravailable = rsize - (rsize / 4); 1845 } 1846 vm_pageout_map_deactivate_pages( 1847 &vm->vm_map, OFF_TO_IDX(ravailable)); 1848 /* Update RSS usage after paging out. */ 1849 size = vmspace_resident_count(vm); 1850 rsize = IDX_TO_OFF(size); 1851 PROC_LOCK(p); 1852 racct_set(p, RACCT_RSS, rsize); 1853 PROC_UNLOCK(p); 1854 if (rsize > ravailable) 1855 tryagain = 1; 1856 } 1857 #endif 1858 vmspace_free(vm); 1859 } 1860 sx_sunlock(&allproc_lock); 1861 if (tryagain != 0 && attempts <= 10) 1862 goto again; 1863 } 1864 } 1865 #endif /* !defined(NO_SWAPPING) */ 1866