1 /*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * Copyright (c) 2005 Yahoo! Technologies Norway AS 9 * All rights reserved. 10 * 11 * This code is derived from software contributed to Berkeley by 12 * The Mach Operating System project at Carnegie-Mellon University. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. All advertising materials mentioning features or use of this software 23 * must display the following acknowledgement: 24 * This product includes software developed by the University of 25 * California, Berkeley and its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 43 * 44 * 45 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46 * All rights reserved. 47 * 48 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 49 * 50 * Permission to use, copy, modify and distribute this software and 51 * its documentation is hereby granted, provided that both the copyright 52 * notice and this permission notice appear in all copies of the 53 * software, derivative works or modified versions, and any portions 54 * thereof, and that both notices appear in supporting documentation. 55 * 56 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 57 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 58 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 59 * 60 * Carnegie Mellon requests users of this software to return to 61 * 62 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 63 * School of Computer Science 64 * Carnegie Mellon University 65 * Pittsburgh PA 15213-3890 66 * 67 * any improvements or extensions that they make and grant Carnegie the 68 * rights to redistribute these changes. 69 */ 70 71 /* 72 * The proverbial page-out daemon. 73 */ 74 75 #include <sys/cdefs.h> 76 __FBSDID("$FreeBSD$"); 77 78 #include "opt_vm.h" 79 #include "opt_kdtrace.h" 80 #include <sys/param.h> 81 #include <sys/systm.h> 82 #include <sys/kernel.h> 83 #include <sys/eventhandler.h> 84 #include <sys/lock.h> 85 #include <sys/mutex.h> 86 #include <sys/proc.h> 87 #include <sys/kthread.h> 88 #include <sys/ktr.h> 89 #include <sys/mount.h> 90 #include <sys/racct.h> 91 #include <sys/resourcevar.h> 92 #include <sys/sched.h> 93 #include <sys/sdt.h> 94 #include <sys/signalvar.h> 95 #include <sys/smp.h> 96 #include <sys/vnode.h> 97 #include <sys/vmmeter.h> 98 #include <sys/rwlock.h> 99 #include <sys/sx.h> 100 #include <sys/sysctl.h> 101 102 #include <vm/vm.h> 103 #include <vm/vm_param.h> 104 #include <vm/vm_object.h> 105 #include <vm/vm_page.h> 106 #include <vm/vm_map.h> 107 #include <vm/vm_pageout.h> 108 #include <vm/vm_pager.h> 109 #include <vm/vm_phys.h> 110 #include <vm/swap_pager.h> 111 #include <vm/vm_extern.h> 112 #include <vm/uma.h> 113 114 /* 115 * System initialization 116 */ 117 118 /* the kernel process "vm_pageout"*/ 119 static void vm_pageout(void); 120 static void vm_pageout_init(void); 121 static int vm_pageout_clean(vm_page_t); 122 static void vm_pageout_scan(struct vm_domain *vmd, int pass); 123 static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int pass); 124 125 SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init, 126 NULL); 127 128 struct proc *pageproc; 129 130 static struct kproc_desc page_kp = { 131 "pagedaemon", 132 vm_pageout, 133 &pageproc 134 }; 135 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start, 136 &page_kp); 137 138 SDT_PROVIDER_DEFINE(vm); 139 SDT_PROBE_DEFINE(vm, , , vm__lowmem_cache); 140 SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan); 141 142 #if !defined(NO_SWAPPING) 143 /* the kernel process "vm_daemon"*/ 144 static void vm_daemon(void); 145 static struct proc *vmproc; 146 147 static struct kproc_desc vm_kp = { 148 "vmdaemon", 149 vm_daemon, 150 &vmproc 151 }; 152 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp); 153 #endif 154 155 156 int vm_pages_needed; /* Event on which pageout daemon sleeps */ 157 int vm_pageout_deficit; /* Estimated number of pages deficit */ 158 int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */ 159 int vm_pageout_wakeup_thresh; 160 161 #if !defined(NO_SWAPPING) 162 static int vm_pageout_req_swapout; /* XXX */ 163 static int vm_daemon_needed; 164 static struct mtx vm_daemon_mtx; 165 /* Allow for use by vm_pageout before vm_daemon is initialized. */ 166 MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF); 167 #endif 168 static int vm_max_launder = 32; 169 static int vm_pageout_update_period; 170 static int defer_swap_pageouts; 171 static int disable_swap_pageouts; 172 static int lowmem_period = 10; 173 static int lowmem_ticks; 174 175 #if defined(NO_SWAPPING) 176 static int vm_swap_enabled = 0; 177 static int vm_swap_idle_enabled = 0; 178 #else 179 static int vm_swap_enabled = 1; 180 static int vm_swap_idle_enabled = 0; 181 #endif 182 183 SYSCTL_INT(_vm, OID_AUTO, pageout_wakeup_thresh, 184 CTLFLAG_RW, &vm_pageout_wakeup_thresh, 0, 185 "free page threshold for waking up the pageout daemon"); 186 187 SYSCTL_INT(_vm, OID_AUTO, max_launder, 188 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 189 190 SYSCTL_INT(_vm, OID_AUTO, pageout_update_period, 191 CTLFLAG_RW, &vm_pageout_update_period, 0, 192 "Maximum active LRU update period"); 193 194 SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RW, &lowmem_period, 0, 195 "Low memory callback period"); 196 197 #if defined(NO_SWAPPING) 198 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 199 CTLFLAG_RD, &vm_swap_enabled, 0, "Enable entire process swapout"); 200 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 201 CTLFLAG_RD, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 202 #else 203 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 204 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 205 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 206 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 207 #endif 208 209 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 210 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 211 212 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 213 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 214 215 static int pageout_lock_miss; 216 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 217 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 218 219 #define VM_PAGEOUT_PAGE_COUNT 16 220 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 221 222 int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 223 SYSCTL_INT(_vm, OID_AUTO, max_wired, 224 CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count"); 225 226 static boolean_t vm_pageout_fallback_object_lock(vm_page_t, vm_page_t *); 227 static boolean_t vm_pageout_launder(struct vm_pagequeue *pq, int, vm_paddr_t, 228 vm_paddr_t); 229 #if !defined(NO_SWAPPING) 230 static void vm_pageout_map_deactivate_pages(vm_map_t, long); 231 static void vm_pageout_object_deactivate_pages(pmap_t, vm_object_t, long); 232 static void vm_req_vmdaemon(int req); 233 #endif 234 static boolean_t vm_pageout_page_lock(vm_page_t, vm_page_t *); 235 236 /* 237 * Initialize a dummy page for marking the caller's place in the specified 238 * paging queue. In principle, this function only needs to set the flag 239 * PG_MARKER. Nonetheless, it wirte busies and initializes the hold count 240 * to one as safety precautions. 241 */ 242 static void 243 vm_pageout_init_marker(vm_page_t marker, u_short queue) 244 { 245 246 bzero(marker, sizeof(*marker)); 247 marker->flags = PG_MARKER; 248 marker->busy_lock = VPB_SINGLE_EXCLUSIVER; 249 marker->queue = queue; 250 marker->hold_count = 1; 251 } 252 253 /* 254 * vm_pageout_fallback_object_lock: 255 * 256 * Lock vm object currently associated with `m'. VM_OBJECT_TRYWLOCK is 257 * known to have failed and page queue must be either PQ_ACTIVE or 258 * PQ_INACTIVE. To avoid lock order violation, unlock the page queues 259 * while locking the vm object. Use marker page to detect page queue 260 * changes and maintain notion of next page on page queue. Return 261 * TRUE if no changes were detected, FALSE otherwise. vm object is 262 * locked on return. 263 * 264 * This function depends on both the lock portion of struct vm_object 265 * and normal struct vm_page being type stable. 266 */ 267 static boolean_t 268 vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next) 269 { 270 struct vm_page marker; 271 struct vm_pagequeue *pq; 272 boolean_t unchanged; 273 u_short queue; 274 vm_object_t object; 275 276 queue = m->queue; 277 vm_pageout_init_marker(&marker, queue); 278 pq = vm_page_pagequeue(m); 279 object = m->object; 280 281 TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q); 282 vm_pagequeue_unlock(pq); 283 vm_page_unlock(m); 284 VM_OBJECT_WLOCK(object); 285 vm_page_lock(m); 286 vm_pagequeue_lock(pq); 287 288 /* Page queue might have changed. */ 289 *next = TAILQ_NEXT(&marker, plinks.q); 290 unchanged = (m->queue == queue && 291 m->object == object && 292 &marker == TAILQ_NEXT(m, plinks.q)); 293 TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q); 294 return (unchanged); 295 } 296 297 /* 298 * Lock the page while holding the page queue lock. Use marker page 299 * to detect page queue changes and maintain notion of next page on 300 * page queue. Return TRUE if no changes were detected, FALSE 301 * otherwise. The page is locked on return. The page queue lock might 302 * be dropped and reacquired. 303 * 304 * This function depends on normal struct vm_page being type stable. 305 */ 306 static boolean_t 307 vm_pageout_page_lock(vm_page_t m, vm_page_t *next) 308 { 309 struct vm_page marker; 310 struct vm_pagequeue *pq; 311 boolean_t unchanged; 312 u_short queue; 313 314 vm_page_lock_assert(m, MA_NOTOWNED); 315 if (vm_page_trylock(m)) 316 return (TRUE); 317 318 queue = m->queue; 319 vm_pageout_init_marker(&marker, queue); 320 pq = vm_page_pagequeue(m); 321 322 TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q); 323 vm_pagequeue_unlock(pq); 324 vm_page_lock(m); 325 vm_pagequeue_lock(pq); 326 327 /* Page queue might have changed. */ 328 *next = TAILQ_NEXT(&marker, plinks.q); 329 unchanged = (m->queue == queue && &marker == TAILQ_NEXT(m, plinks.q)); 330 TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q); 331 return (unchanged); 332 } 333 334 /* 335 * vm_pageout_clean: 336 * 337 * Clean the page and remove it from the laundry. 338 * 339 * We set the busy bit to cause potential page faults on this page to 340 * block. Note the careful timing, however, the busy bit isn't set till 341 * late and we cannot do anything that will mess with the page. 342 */ 343 static int 344 vm_pageout_clean(vm_page_t m) 345 { 346 vm_object_t object; 347 vm_page_t mc[2*vm_pageout_page_count], pb, ps; 348 int pageout_count; 349 int ib, is, page_base; 350 vm_pindex_t pindex = m->pindex; 351 352 vm_page_lock_assert(m, MA_OWNED); 353 object = m->object; 354 VM_OBJECT_ASSERT_WLOCKED(object); 355 356 /* 357 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 358 * with the new swapper, but we could have serious problems paging 359 * out other object types if there is insufficient memory. 360 * 361 * Unfortunately, checking free memory here is far too late, so the 362 * check has been moved up a procedural level. 363 */ 364 365 /* 366 * Can't clean the page if it's busy or held. 367 */ 368 vm_page_assert_unbusied(m); 369 KASSERT(m->hold_count == 0, ("vm_pageout_clean: page %p is held", m)); 370 vm_page_unlock(m); 371 372 mc[vm_pageout_page_count] = pb = ps = m; 373 pageout_count = 1; 374 page_base = vm_pageout_page_count; 375 ib = 1; 376 is = 1; 377 378 /* 379 * Scan object for clusterable pages. 380 * 381 * We can cluster ONLY if: ->> the page is NOT 382 * clean, wired, busy, held, or mapped into a 383 * buffer, and one of the following: 384 * 1) The page is inactive, or a seldom used 385 * active page. 386 * -or- 387 * 2) we force the issue. 388 * 389 * During heavy mmap/modification loads the pageout 390 * daemon can really fragment the underlying file 391 * due to flushing pages out of order and not trying 392 * align the clusters (which leave sporatic out-of-order 393 * holes). To solve this problem we do the reverse scan 394 * first and attempt to align our cluster, then do a 395 * forward scan if room remains. 396 */ 397 more: 398 while (ib && pageout_count < vm_pageout_page_count) { 399 vm_page_t p; 400 401 if (ib > pindex) { 402 ib = 0; 403 break; 404 } 405 406 if ((p = vm_page_prev(pb)) == NULL || vm_page_busied(p)) { 407 ib = 0; 408 break; 409 } 410 vm_page_lock(p); 411 vm_page_test_dirty(p); 412 if (p->dirty == 0 || 413 p->queue != PQ_INACTIVE || 414 p->hold_count != 0) { /* may be undergoing I/O */ 415 vm_page_unlock(p); 416 ib = 0; 417 break; 418 } 419 vm_page_unlock(p); 420 mc[--page_base] = pb = p; 421 ++pageout_count; 422 ++ib; 423 /* 424 * alignment boundry, stop here and switch directions. Do 425 * not clear ib. 426 */ 427 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 428 break; 429 } 430 431 while (pageout_count < vm_pageout_page_count && 432 pindex + is < object->size) { 433 vm_page_t p; 434 435 if ((p = vm_page_next(ps)) == NULL || vm_page_busied(p)) 436 break; 437 vm_page_lock(p); 438 vm_page_test_dirty(p); 439 if (p->dirty == 0 || 440 p->queue != PQ_INACTIVE || 441 p->hold_count != 0) { /* may be undergoing I/O */ 442 vm_page_unlock(p); 443 break; 444 } 445 vm_page_unlock(p); 446 mc[page_base + pageout_count] = ps = p; 447 ++pageout_count; 448 ++is; 449 } 450 451 /* 452 * If we exhausted our forward scan, continue with the reverse scan 453 * when possible, even past a page boundry. This catches boundry 454 * conditions. 455 */ 456 if (ib && pageout_count < vm_pageout_page_count) 457 goto more; 458 459 /* 460 * we allow reads during pageouts... 461 */ 462 return (vm_pageout_flush(&mc[page_base], pageout_count, 0, 0, NULL, 463 NULL)); 464 } 465 466 /* 467 * vm_pageout_flush() - launder the given pages 468 * 469 * The given pages are laundered. Note that we setup for the start of 470 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 471 * reference count all in here rather then in the parent. If we want 472 * the parent to do more sophisticated things we may have to change 473 * the ordering. 474 * 475 * Returned runlen is the count of pages between mreq and first 476 * page after mreq with status VM_PAGER_AGAIN. 477 * *eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL 478 * for any page in runlen set. 479 */ 480 int 481 vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen, 482 boolean_t *eio) 483 { 484 vm_object_t object = mc[0]->object; 485 int pageout_status[count]; 486 int numpagedout = 0; 487 int i, runlen; 488 489 VM_OBJECT_ASSERT_WLOCKED(object); 490 491 /* 492 * Initiate I/O. Bump the vm_page_t->busy counter and 493 * mark the pages read-only. 494 * 495 * We do not have to fixup the clean/dirty bits here... we can 496 * allow the pager to do it after the I/O completes. 497 * 498 * NOTE! mc[i]->dirty may be partial or fragmented due to an 499 * edge case with file fragments. 500 */ 501 for (i = 0; i < count; i++) { 502 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, 503 ("vm_pageout_flush: partially invalid page %p index %d/%d", 504 mc[i], i, count)); 505 vm_page_sbusy(mc[i]); 506 pmap_remove_write(mc[i]); 507 } 508 vm_object_pip_add(object, count); 509 510 vm_pager_put_pages(object, mc, count, flags, pageout_status); 511 512 runlen = count - mreq; 513 if (eio != NULL) 514 *eio = FALSE; 515 for (i = 0; i < count; i++) { 516 vm_page_t mt = mc[i]; 517 518 KASSERT(pageout_status[i] == VM_PAGER_PEND || 519 !pmap_page_is_write_mapped(mt), 520 ("vm_pageout_flush: page %p is not write protected", mt)); 521 switch (pageout_status[i]) { 522 case VM_PAGER_OK: 523 case VM_PAGER_PEND: 524 numpagedout++; 525 break; 526 case VM_PAGER_BAD: 527 /* 528 * Page outside of range of object. Right now we 529 * essentially lose the changes by pretending it 530 * worked. 531 */ 532 vm_page_undirty(mt); 533 break; 534 case VM_PAGER_ERROR: 535 case VM_PAGER_FAIL: 536 /* 537 * If page couldn't be paged out, then reactivate the 538 * page so it doesn't clog the inactive list. (We 539 * will try paging out it again later). 540 */ 541 vm_page_lock(mt); 542 vm_page_activate(mt); 543 vm_page_unlock(mt); 544 if (eio != NULL && i >= mreq && i - mreq < runlen) 545 *eio = TRUE; 546 break; 547 case VM_PAGER_AGAIN: 548 if (i >= mreq && i - mreq < runlen) 549 runlen = i - mreq; 550 break; 551 } 552 553 /* 554 * If the operation is still going, leave the page busy to 555 * block all other accesses. Also, leave the paging in 556 * progress indicator set so that we don't attempt an object 557 * collapse. 558 */ 559 if (pageout_status[i] != VM_PAGER_PEND) { 560 vm_object_pip_wakeup(object); 561 vm_page_sunbusy(mt); 562 if (vm_page_count_severe()) { 563 vm_page_lock(mt); 564 vm_page_try_to_cache(mt); 565 vm_page_unlock(mt); 566 } 567 } 568 } 569 if (prunlen != NULL) 570 *prunlen = runlen; 571 return (numpagedout); 572 } 573 574 static boolean_t 575 vm_pageout_launder(struct vm_pagequeue *pq, int tries, vm_paddr_t low, 576 vm_paddr_t high) 577 { 578 struct mount *mp; 579 struct vnode *vp; 580 vm_object_t object; 581 vm_paddr_t pa; 582 vm_page_t m, m_tmp, next; 583 int lockmode; 584 585 vm_pagequeue_lock(pq); 586 TAILQ_FOREACH_SAFE(m, &pq->pq_pl, plinks.q, next) { 587 if ((m->flags & PG_MARKER) != 0) 588 continue; 589 pa = VM_PAGE_TO_PHYS(m); 590 if (pa < low || pa + PAGE_SIZE > high) 591 continue; 592 if (!vm_pageout_page_lock(m, &next) || m->hold_count != 0) { 593 vm_page_unlock(m); 594 continue; 595 } 596 object = m->object; 597 if ((!VM_OBJECT_TRYWLOCK(object) && 598 (!vm_pageout_fallback_object_lock(m, &next) || 599 m->hold_count != 0)) || vm_page_busied(m)) { 600 vm_page_unlock(m); 601 VM_OBJECT_WUNLOCK(object); 602 continue; 603 } 604 vm_page_test_dirty(m); 605 if (m->dirty == 0 && object->ref_count != 0) 606 pmap_remove_all(m); 607 if (m->dirty != 0) { 608 vm_page_unlock(m); 609 if (tries == 0 || (object->flags & OBJ_DEAD) != 0) { 610 VM_OBJECT_WUNLOCK(object); 611 continue; 612 } 613 if (object->type == OBJT_VNODE) { 614 vm_pagequeue_unlock(pq); 615 vp = object->handle; 616 vm_object_reference_locked(object); 617 VM_OBJECT_WUNLOCK(object); 618 (void)vn_start_write(vp, &mp, V_WAIT); 619 lockmode = MNT_SHARED_WRITES(vp->v_mount) ? 620 LK_SHARED : LK_EXCLUSIVE; 621 vn_lock(vp, lockmode | LK_RETRY); 622 VM_OBJECT_WLOCK(object); 623 vm_object_page_clean(object, 0, 0, OBJPC_SYNC); 624 VM_OBJECT_WUNLOCK(object); 625 VOP_UNLOCK(vp, 0); 626 vm_object_deallocate(object); 627 vn_finished_write(mp); 628 return (TRUE); 629 } else if (object->type == OBJT_SWAP || 630 object->type == OBJT_DEFAULT) { 631 vm_pagequeue_unlock(pq); 632 m_tmp = m; 633 vm_pageout_flush(&m_tmp, 1, VM_PAGER_PUT_SYNC, 634 0, NULL, NULL); 635 VM_OBJECT_WUNLOCK(object); 636 return (TRUE); 637 } 638 } else { 639 /* 640 * Dequeue here to prevent lock recursion in 641 * vm_page_cache(). 642 */ 643 vm_page_dequeue_locked(m); 644 vm_page_cache(m); 645 vm_page_unlock(m); 646 } 647 VM_OBJECT_WUNLOCK(object); 648 } 649 vm_pagequeue_unlock(pq); 650 return (FALSE); 651 } 652 653 /* 654 * Increase the number of cached pages. The specified value, "tries", 655 * determines which categories of pages are cached: 656 * 657 * 0: All clean, inactive pages within the specified physical address range 658 * are cached. Will not sleep. 659 * 1: The vm_lowmem handlers are called. All inactive pages within 660 * the specified physical address range are cached. May sleep. 661 * 2: The vm_lowmem handlers are called. All inactive and active pages 662 * within the specified physical address range are cached. May sleep. 663 */ 664 void 665 vm_pageout_grow_cache(int tries, vm_paddr_t low, vm_paddr_t high) 666 { 667 int actl, actmax, inactl, inactmax, dom, initial_dom; 668 static int start_dom = 0; 669 670 if (tries > 0) { 671 /* 672 * Decrease registered cache sizes. The vm_lowmem handlers 673 * may acquire locks and/or sleep, so they can only be invoked 674 * when "tries" is greater than zero. 675 */ 676 SDT_PROBE0(vm, , , vm__lowmem_cache); 677 EVENTHANDLER_INVOKE(vm_lowmem, 0); 678 679 /* 680 * We do this explicitly after the caches have been drained 681 * above. 682 */ 683 uma_reclaim(); 684 } 685 686 /* 687 * Make the next scan start on the next domain. 688 */ 689 initial_dom = atomic_fetchadd_int(&start_dom, 1) % vm_ndomains; 690 691 inactl = 0; 692 inactmax = vm_cnt.v_inactive_count; 693 actl = 0; 694 actmax = tries < 2 ? 0 : vm_cnt.v_active_count; 695 dom = initial_dom; 696 697 /* 698 * Scan domains in round-robin order, first inactive queues, 699 * then active. Since domain usually owns large physically 700 * contiguous chunk of memory, it makes sense to completely 701 * exhaust one domain before switching to next, while growing 702 * the pool of contiguous physical pages. 703 * 704 * Do not even start launder a domain which cannot contain 705 * the specified address range, as indicated by segments 706 * constituting the domain. 707 */ 708 again: 709 if (inactl < inactmax) { 710 if (vm_phys_domain_intersects(vm_dom[dom].vmd_segs, 711 low, high) && 712 vm_pageout_launder(&vm_dom[dom].vmd_pagequeues[PQ_INACTIVE], 713 tries, low, high)) { 714 inactl++; 715 goto again; 716 } 717 if (++dom == vm_ndomains) 718 dom = 0; 719 if (dom != initial_dom) 720 goto again; 721 } 722 if (actl < actmax) { 723 if (vm_phys_domain_intersects(vm_dom[dom].vmd_segs, 724 low, high) && 725 vm_pageout_launder(&vm_dom[dom].vmd_pagequeues[PQ_ACTIVE], 726 tries, low, high)) { 727 actl++; 728 goto again; 729 } 730 if (++dom == vm_ndomains) 731 dom = 0; 732 if (dom != initial_dom) 733 goto again; 734 } 735 } 736 737 #if !defined(NO_SWAPPING) 738 /* 739 * vm_pageout_object_deactivate_pages 740 * 741 * Deactivate enough pages to satisfy the inactive target 742 * requirements. 743 * 744 * The object and map must be locked. 745 */ 746 static void 747 vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object, 748 long desired) 749 { 750 vm_object_t backing_object, object; 751 vm_page_t p; 752 int act_delta, remove_mode; 753 754 VM_OBJECT_ASSERT_LOCKED(first_object); 755 if ((first_object->flags & OBJ_FICTITIOUS) != 0) 756 return; 757 for (object = first_object;; object = backing_object) { 758 if (pmap_resident_count(pmap) <= desired) 759 goto unlock_return; 760 VM_OBJECT_ASSERT_LOCKED(object); 761 if ((object->flags & OBJ_UNMANAGED) != 0 || 762 object->paging_in_progress != 0) 763 goto unlock_return; 764 765 remove_mode = 0; 766 if (object->shadow_count > 1) 767 remove_mode = 1; 768 /* 769 * Scan the object's entire memory queue. 770 */ 771 TAILQ_FOREACH(p, &object->memq, listq) { 772 if (pmap_resident_count(pmap) <= desired) 773 goto unlock_return; 774 if (vm_page_busied(p)) 775 continue; 776 PCPU_INC(cnt.v_pdpages); 777 vm_page_lock(p); 778 if (p->wire_count != 0 || p->hold_count != 0 || 779 !pmap_page_exists_quick(pmap, p)) { 780 vm_page_unlock(p); 781 continue; 782 } 783 act_delta = pmap_ts_referenced(p); 784 if ((p->aflags & PGA_REFERENCED) != 0) { 785 if (act_delta == 0) 786 act_delta = 1; 787 vm_page_aflag_clear(p, PGA_REFERENCED); 788 } 789 if (p->queue != PQ_ACTIVE && act_delta != 0) { 790 vm_page_activate(p); 791 p->act_count += act_delta; 792 } else if (p->queue == PQ_ACTIVE) { 793 if (act_delta == 0) { 794 p->act_count -= min(p->act_count, 795 ACT_DECLINE); 796 if (!remove_mode && p->act_count == 0) { 797 pmap_remove_all(p); 798 vm_page_deactivate(p); 799 } else 800 vm_page_requeue(p); 801 } else { 802 vm_page_activate(p); 803 if (p->act_count < ACT_MAX - 804 ACT_ADVANCE) 805 p->act_count += ACT_ADVANCE; 806 vm_page_requeue(p); 807 } 808 } else if (p->queue == PQ_INACTIVE) 809 pmap_remove_all(p); 810 vm_page_unlock(p); 811 } 812 if ((backing_object = object->backing_object) == NULL) 813 goto unlock_return; 814 VM_OBJECT_RLOCK(backing_object); 815 if (object != first_object) 816 VM_OBJECT_RUNLOCK(object); 817 } 818 unlock_return: 819 if (object != first_object) 820 VM_OBJECT_RUNLOCK(object); 821 } 822 823 /* 824 * deactivate some number of pages in a map, try to do it fairly, but 825 * that is really hard to do. 826 */ 827 static void 828 vm_pageout_map_deactivate_pages(map, desired) 829 vm_map_t map; 830 long desired; 831 { 832 vm_map_entry_t tmpe; 833 vm_object_t obj, bigobj; 834 int nothingwired; 835 836 if (!vm_map_trylock(map)) 837 return; 838 839 bigobj = NULL; 840 nothingwired = TRUE; 841 842 /* 843 * first, search out the biggest object, and try to free pages from 844 * that. 845 */ 846 tmpe = map->header.next; 847 while (tmpe != &map->header) { 848 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 849 obj = tmpe->object.vm_object; 850 if (obj != NULL && VM_OBJECT_TRYRLOCK(obj)) { 851 if (obj->shadow_count <= 1 && 852 (bigobj == NULL || 853 bigobj->resident_page_count < obj->resident_page_count)) { 854 if (bigobj != NULL) 855 VM_OBJECT_RUNLOCK(bigobj); 856 bigobj = obj; 857 } else 858 VM_OBJECT_RUNLOCK(obj); 859 } 860 } 861 if (tmpe->wired_count > 0) 862 nothingwired = FALSE; 863 tmpe = tmpe->next; 864 } 865 866 if (bigobj != NULL) { 867 vm_pageout_object_deactivate_pages(map->pmap, bigobj, desired); 868 VM_OBJECT_RUNLOCK(bigobj); 869 } 870 /* 871 * Next, hunt around for other pages to deactivate. We actually 872 * do this search sort of wrong -- .text first is not the best idea. 873 */ 874 tmpe = map->header.next; 875 while (tmpe != &map->header) { 876 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 877 break; 878 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 879 obj = tmpe->object.vm_object; 880 if (obj != NULL) { 881 VM_OBJECT_RLOCK(obj); 882 vm_pageout_object_deactivate_pages(map->pmap, obj, desired); 883 VM_OBJECT_RUNLOCK(obj); 884 } 885 } 886 tmpe = tmpe->next; 887 } 888 889 /* 890 * Remove all mappings if a process is swapped out, this will free page 891 * table pages. 892 */ 893 if (desired == 0 && nothingwired) { 894 pmap_remove(vm_map_pmap(map), vm_map_min(map), 895 vm_map_max(map)); 896 } 897 898 vm_map_unlock(map); 899 } 900 #endif /* !defined(NO_SWAPPING) */ 901 902 /* 903 * vm_pageout_scan does the dirty work for the pageout daemon. 904 * 905 * pass 0 - Update active LRU/deactivate pages 906 * pass 1 - Move inactive to cache or free 907 * pass 2 - Launder dirty pages 908 */ 909 static void 910 vm_pageout_scan(struct vm_domain *vmd, int pass) 911 { 912 vm_page_t m, next; 913 struct vm_pagequeue *pq; 914 vm_object_t object; 915 int act_delta, addl_page_shortage, deficit, maxscan, page_shortage; 916 int vnodes_skipped = 0; 917 int maxlaunder; 918 int lockmode; 919 boolean_t queues_locked; 920 921 /* 922 * If we need to reclaim memory ask kernel caches to return 923 * some. We rate limit to avoid thrashing. 924 */ 925 if (vmd == &vm_dom[0] && pass > 0 && 926 (ticks - lowmem_ticks) / hz >= lowmem_period) { 927 /* 928 * Decrease registered cache sizes. 929 */ 930 SDT_PROBE0(vm, , , vm__lowmem_scan); 931 EVENTHANDLER_INVOKE(vm_lowmem, 0); 932 /* 933 * We do this explicitly after the caches have been 934 * drained above. 935 */ 936 uma_reclaim(); 937 lowmem_ticks = ticks; 938 } 939 940 /* 941 * The addl_page_shortage is the number of temporarily 942 * stuck pages in the inactive queue. In other words, the 943 * number of pages from the inactive count that should be 944 * discounted in setting the target for the active queue scan. 945 */ 946 addl_page_shortage = 0; 947 948 /* 949 * Calculate the number of pages we want to either free or move 950 * to the cache. 951 */ 952 if (pass > 0) { 953 deficit = atomic_readandclear_int(&vm_pageout_deficit); 954 page_shortage = vm_paging_target() + deficit; 955 } else 956 page_shortage = deficit = 0; 957 958 /* 959 * maxlaunder limits the number of dirty pages we flush per scan. 960 * For most systems a smaller value (16 or 32) is more robust under 961 * extreme memory and disk pressure because any unnecessary writes 962 * to disk can result in extreme performance degredation. However, 963 * systems with excessive dirty pages (especially when MAP_NOSYNC is 964 * used) will die horribly with limited laundering. If the pageout 965 * daemon cannot clean enough pages in the first pass, we let it go 966 * all out in succeeding passes. 967 */ 968 if ((maxlaunder = vm_max_launder) <= 1) 969 maxlaunder = 1; 970 if (pass > 1) 971 maxlaunder = 10000; 972 973 /* 974 * Start scanning the inactive queue for pages we can move to the 975 * cache or free. The scan will stop when the target is reached or 976 * we have scanned the entire inactive queue. Note that m->act_count 977 * is not used to form decisions for the inactive queue, only for the 978 * active queue. 979 */ 980 pq = &vmd->vmd_pagequeues[PQ_INACTIVE]; 981 maxscan = pq->pq_cnt; 982 vm_pagequeue_lock(pq); 983 queues_locked = TRUE; 984 for (m = TAILQ_FIRST(&pq->pq_pl); 985 m != NULL && maxscan-- > 0 && page_shortage > 0; 986 m = next) { 987 vm_pagequeue_assert_locked(pq); 988 KASSERT(queues_locked, ("unlocked queues")); 989 KASSERT(m->queue == PQ_INACTIVE, ("Inactive queue %p", m)); 990 991 PCPU_INC(cnt.v_pdpages); 992 next = TAILQ_NEXT(m, plinks.q); 993 994 /* 995 * skip marker pages 996 */ 997 if (m->flags & PG_MARKER) 998 continue; 999 1000 KASSERT((m->flags & PG_FICTITIOUS) == 0, 1001 ("Fictitious page %p cannot be in inactive queue", m)); 1002 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1003 ("Unmanaged page %p cannot be in inactive queue", m)); 1004 1005 /* 1006 * The page or object lock acquisitions fail if the 1007 * page was removed from the queue or moved to a 1008 * different position within the queue. In either 1009 * case, addl_page_shortage should not be incremented. 1010 */ 1011 if (!vm_pageout_page_lock(m, &next)) { 1012 vm_page_unlock(m); 1013 continue; 1014 } 1015 object = m->object; 1016 if (!VM_OBJECT_TRYWLOCK(object) && 1017 !vm_pageout_fallback_object_lock(m, &next)) { 1018 vm_page_unlock(m); 1019 VM_OBJECT_WUNLOCK(object); 1020 continue; 1021 } 1022 1023 /* 1024 * Don't mess with busy pages, keep them at at the 1025 * front of the queue, most likely they are being 1026 * paged out. Increment addl_page_shortage for busy 1027 * pages, because they may leave the inactive queue 1028 * shortly after page scan is finished. 1029 */ 1030 if (vm_page_busied(m)) { 1031 vm_page_unlock(m); 1032 VM_OBJECT_WUNLOCK(object); 1033 addl_page_shortage++; 1034 continue; 1035 } 1036 1037 /* 1038 * We unlock the inactive page queue, invalidating the 1039 * 'next' pointer. Use our marker to remember our 1040 * place. 1041 */ 1042 TAILQ_INSERT_AFTER(&pq->pq_pl, m, &vmd->vmd_marker, plinks.q); 1043 vm_pagequeue_unlock(pq); 1044 queues_locked = FALSE; 1045 1046 /* 1047 * We bump the activation count if the page has been 1048 * referenced while in the inactive queue. This makes 1049 * it less likely that the page will be added back to the 1050 * inactive queue prematurely again. Here we check the 1051 * page tables (or emulated bits, if any), given the upper 1052 * level VM system not knowing anything about existing 1053 * references. 1054 */ 1055 if ((m->aflags & PGA_REFERENCED) != 0) { 1056 vm_page_aflag_clear(m, PGA_REFERENCED); 1057 act_delta = 1; 1058 } else 1059 act_delta = 0; 1060 if (object->ref_count != 0) { 1061 act_delta += pmap_ts_referenced(m); 1062 } else { 1063 KASSERT(!pmap_page_is_mapped(m), 1064 ("vm_pageout_scan: page %p is mapped", m)); 1065 } 1066 1067 /* 1068 * If the upper level VM system knows about any page 1069 * references, we reactivate the page or requeue it. 1070 */ 1071 if (act_delta != 0) { 1072 if (object->ref_count != 0) { 1073 vm_page_activate(m); 1074 m->act_count += act_delta + ACT_ADVANCE; 1075 } else { 1076 vm_pagequeue_lock(pq); 1077 queues_locked = TRUE; 1078 vm_page_requeue_locked(m); 1079 } 1080 VM_OBJECT_WUNLOCK(object); 1081 vm_page_unlock(m); 1082 goto relock_queues; 1083 } 1084 1085 if (m->hold_count != 0) { 1086 vm_page_unlock(m); 1087 VM_OBJECT_WUNLOCK(object); 1088 1089 /* 1090 * Held pages are essentially stuck in the 1091 * queue. So, they ought to be discounted 1092 * from the inactive count. See the 1093 * calculation of the page_shortage for the 1094 * loop over the active queue below. 1095 */ 1096 addl_page_shortage++; 1097 goto relock_queues; 1098 } 1099 1100 /* 1101 * If the page appears to be clean at the machine-independent 1102 * layer, then remove all of its mappings from the pmap in 1103 * anticipation of placing it onto the cache queue. If, 1104 * however, any of the page's mappings allow write access, 1105 * then the page may still be modified until the last of those 1106 * mappings are removed. 1107 */ 1108 vm_page_test_dirty(m); 1109 if (m->dirty == 0 && object->ref_count != 0) 1110 pmap_remove_all(m); 1111 1112 if (m->valid == 0) { 1113 /* 1114 * Invalid pages can be easily freed 1115 */ 1116 vm_page_free(m); 1117 PCPU_INC(cnt.v_dfree); 1118 --page_shortage; 1119 } else if (m->dirty == 0) { 1120 /* 1121 * Clean pages can be placed onto the cache queue. 1122 * This effectively frees them. 1123 */ 1124 vm_page_cache(m); 1125 --page_shortage; 1126 } else if ((m->flags & PG_WINATCFLS) == 0 && pass < 2) { 1127 /* 1128 * Dirty pages need to be paged out, but flushing 1129 * a page is extremely expensive versus freeing 1130 * a clean page. Rather then artificially limiting 1131 * the number of pages we can flush, we instead give 1132 * dirty pages extra priority on the inactive queue 1133 * by forcing them to be cycled through the queue 1134 * twice before being flushed, after which the 1135 * (now clean) page will cycle through once more 1136 * before being freed. This significantly extends 1137 * the thrash point for a heavily loaded machine. 1138 */ 1139 m->flags |= PG_WINATCFLS; 1140 vm_pagequeue_lock(pq); 1141 queues_locked = TRUE; 1142 vm_page_requeue_locked(m); 1143 } else if (maxlaunder > 0) { 1144 /* 1145 * We always want to try to flush some dirty pages if 1146 * we encounter them, to keep the system stable. 1147 * Normally this number is small, but under extreme 1148 * pressure where there are insufficient clean pages 1149 * on the inactive queue, we may have to go all out. 1150 */ 1151 int swap_pageouts_ok; 1152 struct vnode *vp = NULL; 1153 struct mount *mp = NULL; 1154 1155 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 1156 swap_pageouts_ok = 1; 1157 } else { 1158 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 1159 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 1160 vm_page_count_min()); 1161 1162 } 1163 1164 /* 1165 * We don't bother paging objects that are "dead". 1166 * Those objects are in a "rundown" state. 1167 */ 1168 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 1169 vm_pagequeue_lock(pq); 1170 vm_page_unlock(m); 1171 VM_OBJECT_WUNLOCK(object); 1172 queues_locked = TRUE; 1173 vm_page_requeue_locked(m); 1174 goto relock_queues; 1175 } 1176 1177 /* 1178 * The object is already known NOT to be dead. It 1179 * is possible for the vget() to block the whole 1180 * pageout daemon, but the new low-memory handling 1181 * code should prevent it. 1182 * 1183 * The previous code skipped locked vnodes and, worse, 1184 * reordered pages in the queue. This results in 1185 * completely non-deterministic operation and, on a 1186 * busy system, can lead to extremely non-optimal 1187 * pageouts. For example, it can cause clean pages 1188 * to be freed and dirty pages to be moved to the end 1189 * of the queue. Since dirty pages are also moved to 1190 * the end of the queue once-cleaned, this gives 1191 * way too large a weighting to deferring the freeing 1192 * of dirty pages. 1193 * 1194 * We can't wait forever for the vnode lock, we might 1195 * deadlock due to a vn_read() getting stuck in 1196 * vm_wait while holding this vnode. We skip the 1197 * vnode if we can't get it in a reasonable amount 1198 * of time. 1199 */ 1200 if (object->type == OBJT_VNODE) { 1201 vm_page_unlock(m); 1202 vp = object->handle; 1203 if (vp->v_type == VREG && 1204 vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1205 mp = NULL; 1206 ++pageout_lock_miss; 1207 if (object->flags & OBJ_MIGHTBEDIRTY) 1208 vnodes_skipped++; 1209 goto unlock_and_continue; 1210 } 1211 KASSERT(mp != NULL, 1212 ("vp %p with NULL v_mount", vp)); 1213 vm_object_reference_locked(object); 1214 VM_OBJECT_WUNLOCK(object); 1215 lockmode = MNT_SHARED_WRITES(vp->v_mount) ? 1216 LK_SHARED : LK_EXCLUSIVE; 1217 if (vget(vp, lockmode | LK_TIMELOCK, 1218 curthread)) { 1219 VM_OBJECT_WLOCK(object); 1220 ++pageout_lock_miss; 1221 if (object->flags & OBJ_MIGHTBEDIRTY) 1222 vnodes_skipped++; 1223 vp = NULL; 1224 goto unlock_and_continue; 1225 } 1226 VM_OBJECT_WLOCK(object); 1227 vm_page_lock(m); 1228 vm_pagequeue_lock(pq); 1229 queues_locked = TRUE; 1230 /* 1231 * The page might have been moved to another 1232 * queue during potential blocking in vget() 1233 * above. The page might have been freed and 1234 * reused for another vnode. 1235 */ 1236 if (m->queue != PQ_INACTIVE || 1237 m->object != object || 1238 TAILQ_NEXT(m, plinks.q) != &vmd->vmd_marker) { 1239 vm_page_unlock(m); 1240 if (object->flags & OBJ_MIGHTBEDIRTY) 1241 vnodes_skipped++; 1242 goto unlock_and_continue; 1243 } 1244 1245 /* 1246 * The page may have been busied during the 1247 * blocking in vget(). We don't move the 1248 * page back onto the end of the queue so that 1249 * statistics are more correct if we don't. 1250 */ 1251 if (vm_page_busied(m)) { 1252 vm_page_unlock(m); 1253 addl_page_shortage++; 1254 goto unlock_and_continue; 1255 } 1256 1257 /* 1258 * If the page has become held it might 1259 * be undergoing I/O, so skip it 1260 */ 1261 if (m->hold_count != 0) { 1262 vm_page_unlock(m); 1263 addl_page_shortage++; 1264 if (object->flags & OBJ_MIGHTBEDIRTY) 1265 vnodes_skipped++; 1266 goto unlock_and_continue; 1267 } 1268 vm_pagequeue_unlock(pq); 1269 queues_locked = FALSE; 1270 } 1271 1272 /* 1273 * If a page is dirty, then it is either being washed 1274 * (but not yet cleaned) or it is still in the 1275 * laundry. If it is still in the laundry, then we 1276 * start the cleaning operation. 1277 * 1278 * decrement page_shortage on success to account for 1279 * the (future) cleaned page. Otherwise we could wind 1280 * up laundering or cleaning too many pages. 1281 */ 1282 if (vm_pageout_clean(m) != 0) { 1283 --page_shortage; 1284 --maxlaunder; 1285 } 1286 unlock_and_continue: 1287 vm_page_lock_assert(m, MA_NOTOWNED); 1288 VM_OBJECT_WUNLOCK(object); 1289 if (mp != NULL) { 1290 if (queues_locked) { 1291 vm_pagequeue_unlock(pq); 1292 queues_locked = FALSE; 1293 } 1294 if (vp != NULL) 1295 vput(vp); 1296 vm_object_deallocate(object); 1297 vn_finished_write(mp); 1298 } 1299 vm_page_lock_assert(m, MA_NOTOWNED); 1300 goto relock_queues; 1301 } 1302 vm_page_unlock(m); 1303 VM_OBJECT_WUNLOCK(object); 1304 relock_queues: 1305 if (!queues_locked) { 1306 vm_pagequeue_lock(pq); 1307 queues_locked = TRUE; 1308 } 1309 next = TAILQ_NEXT(&vmd->vmd_marker, plinks.q); 1310 TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_marker, plinks.q); 1311 } 1312 vm_pagequeue_unlock(pq); 1313 1314 #if !defined(NO_SWAPPING) 1315 /* 1316 * Wakeup the swapout daemon if we didn't cache or free the targeted 1317 * number of pages. 1318 */ 1319 if (vm_swap_enabled && page_shortage > 0) 1320 vm_req_vmdaemon(VM_SWAP_NORMAL); 1321 #endif 1322 1323 /* 1324 * Wakeup the sync daemon if we skipped a vnode in a writeable object 1325 * and we didn't cache or free enough pages. 1326 */ 1327 if (vnodes_skipped > 0 && page_shortage > vm_cnt.v_free_target - 1328 vm_cnt.v_free_min) 1329 (void)speedup_syncer(); 1330 1331 /* 1332 * Compute the number of pages we want to try to move from the 1333 * active queue to the inactive queue. 1334 */ 1335 page_shortage = vm_cnt.v_inactive_target - vm_cnt.v_inactive_count + 1336 vm_paging_target() + deficit + addl_page_shortage; 1337 1338 pq = &vmd->vmd_pagequeues[PQ_ACTIVE]; 1339 vm_pagequeue_lock(pq); 1340 maxscan = pq->pq_cnt; 1341 1342 /* 1343 * If we're just idle polling attempt to visit every 1344 * active page within 'update_period' seconds. 1345 */ 1346 if (pass == 0 && vm_pageout_update_period != 0) { 1347 maxscan /= vm_pageout_update_period; 1348 page_shortage = maxscan; 1349 } 1350 1351 /* 1352 * Scan the active queue for things we can deactivate. We nominally 1353 * track the per-page activity counter and use it to locate 1354 * deactivation candidates. 1355 */ 1356 m = TAILQ_FIRST(&pq->pq_pl); 1357 while (m != NULL && maxscan-- > 0 && page_shortage > 0) { 1358 1359 KASSERT(m->queue == PQ_ACTIVE, 1360 ("vm_pageout_scan: page %p isn't active", m)); 1361 1362 next = TAILQ_NEXT(m, plinks.q); 1363 if ((m->flags & PG_MARKER) != 0) { 1364 m = next; 1365 continue; 1366 } 1367 KASSERT((m->flags & PG_FICTITIOUS) == 0, 1368 ("Fictitious page %p cannot be in active queue", m)); 1369 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1370 ("Unmanaged page %p cannot be in active queue", m)); 1371 if (!vm_pageout_page_lock(m, &next)) { 1372 vm_page_unlock(m); 1373 m = next; 1374 continue; 1375 } 1376 1377 /* 1378 * The count for pagedaemon pages is done after checking the 1379 * page for eligibility... 1380 */ 1381 PCPU_INC(cnt.v_pdpages); 1382 1383 /* 1384 * Check to see "how much" the page has been used. 1385 */ 1386 if ((m->aflags & PGA_REFERENCED) != 0) { 1387 vm_page_aflag_clear(m, PGA_REFERENCED); 1388 act_delta = 1; 1389 } else 1390 act_delta = 0; 1391 1392 /* 1393 * Unlocked object ref count check. Two races are possible. 1394 * 1) The ref was transitioning to zero and we saw non-zero, 1395 * the pmap bits will be checked unnecessarily. 1396 * 2) The ref was transitioning to one and we saw zero. 1397 * The page lock prevents a new reference to this page so 1398 * we need not check the reference bits. 1399 */ 1400 if (m->object->ref_count != 0) 1401 act_delta += pmap_ts_referenced(m); 1402 1403 /* 1404 * Advance or decay the act_count based on recent usage. 1405 */ 1406 if (act_delta != 0) { 1407 m->act_count += ACT_ADVANCE + act_delta; 1408 if (m->act_count > ACT_MAX) 1409 m->act_count = ACT_MAX; 1410 } else 1411 m->act_count -= min(m->act_count, ACT_DECLINE); 1412 1413 /* 1414 * Move this page to the tail of the active or inactive 1415 * queue depending on usage. 1416 */ 1417 if (m->act_count == 0) { 1418 /* Dequeue to avoid later lock recursion. */ 1419 vm_page_dequeue_locked(m); 1420 vm_page_deactivate(m); 1421 page_shortage--; 1422 } else 1423 vm_page_requeue_locked(m); 1424 vm_page_unlock(m); 1425 m = next; 1426 } 1427 vm_pagequeue_unlock(pq); 1428 #if !defined(NO_SWAPPING) 1429 /* 1430 * Idle process swapout -- run once per second. 1431 */ 1432 if (vm_swap_idle_enabled) { 1433 static long lsec; 1434 if (time_second != lsec) { 1435 vm_req_vmdaemon(VM_SWAP_IDLE); 1436 lsec = time_second; 1437 } 1438 } 1439 #endif 1440 1441 /* 1442 * If we are critically low on one of RAM or swap and low on 1443 * the other, kill the largest process. However, we avoid 1444 * doing this on the first pass in order to give ourselves a 1445 * chance to flush out dirty vnode-backed pages and to allow 1446 * active pages to be moved to the inactive queue and reclaimed. 1447 */ 1448 vm_pageout_mightbe_oom(vmd, pass); 1449 } 1450 1451 static int vm_pageout_oom_vote; 1452 1453 /* 1454 * The pagedaemon threads randlomly select one to perform the 1455 * OOM. Trying to kill processes before all pagedaemons 1456 * failed to reach free target is premature. 1457 */ 1458 static void 1459 vm_pageout_mightbe_oom(struct vm_domain *vmd, int pass) 1460 { 1461 int old_vote; 1462 1463 if (pass <= 1 || !((swap_pager_avail < 64 && vm_page_count_min()) || 1464 (swap_pager_full && vm_paging_target() > 0))) { 1465 if (vmd->vmd_oom) { 1466 vmd->vmd_oom = FALSE; 1467 atomic_subtract_int(&vm_pageout_oom_vote, 1); 1468 } 1469 return; 1470 } 1471 1472 if (vmd->vmd_oom) 1473 return; 1474 1475 vmd->vmd_oom = TRUE; 1476 old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1); 1477 if (old_vote != vm_ndomains - 1) 1478 return; 1479 1480 /* 1481 * The current pagedaemon thread is the last in the quorum to 1482 * start OOM. Initiate the selection and signaling of the 1483 * victim. 1484 */ 1485 vm_pageout_oom(VM_OOM_MEM); 1486 1487 /* 1488 * After one round of OOM terror, recall our vote. On the 1489 * next pass, current pagedaemon would vote again if the low 1490 * memory condition is still there, due to vmd_oom being 1491 * false. 1492 */ 1493 vmd->vmd_oom = FALSE; 1494 atomic_subtract_int(&vm_pageout_oom_vote, 1); 1495 } 1496 1497 void 1498 vm_pageout_oom(int shortage) 1499 { 1500 struct proc *p, *bigproc; 1501 vm_offset_t size, bigsize; 1502 struct thread *td; 1503 struct vmspace *vm; 1504 1505 /* 1506 * We keep the process bigproc locked once we find it to keep anyone 1507 * from messing with it; however, there is a possibility of 1508 * deadlock if process B is bigproc and one of it's child processes 1509 * attempts to propagate a signal to B while we are waiting for A's 1510 * lock while walking this list. To avoid this, we don't block on 1511 * the process lock but just skip a process if it is already locked. 1512 */ 1513 bigproc = NULL; 1514 bigsize = 0; 1515 sx_slock(&allproc_lock); 1516 FOREACH_PROC_IN_SYSTEM(p) { 1517 int breakout; 1518 1519 if (PROC_TRYLOCK(p) == 0) 1520 continue; 1521 /* 1522 * If this is a system, protected or killed process, skip it. 1523 */ 1524 if (p->p_state != PRS_NORMAL || 1525 (p->p_flag & (P_INEXEC | P_PROTECTED | P_SYSTEM)) || 1526 (p->p_pid == 1) || P_KILLED(p) || 1527 ((p->p_pid < 48) && (swap_pager_avail != 0))) { 1528 PROC_UNLOCK(p); 1529 continue; 1530 } 1531 /* 1532 * If the process is in a non-running type state, 1533 * don't touch it. Check all the threads individually. 1534 */ 1535 breakout = 0; 1536 FOREACH_THREAD_IN_PROC(p, td) { 1537 thread_lock(td); 1538 if (!TD_ON_RUNQ(td) && 1539 !TD_IS_RUNNING(td) && 1540 !TD_IS_SLEEPING(td) && 1541 !TD_IS_SUSPENDED(td)) { 1542 thread_unlock(td); 1543 breakout = 1; 1544 break; 1545 } 1546 thread_unlock(td); 1547 } 1548 if (breakout) { 1549 PROC_UNLOCK(p); 1550 continue; 1551 } 1552 /* 1553 * get the process size 1554 */ 1555 vm = vmspace_acquire_ref(p); 1556 if (vm == NULL) { 1557 PROC_UNLOCK(p); 1558 continue; 1559 } 1560 if (!vm_map_trylock_read(&vm->vm_map)) { 1561 vmspace_free(vm); 1562 PROC_UNLOCK(p); 1563 continue; 1564 } 1565 size = vmspace_swap_count(vm); 1566 vm_map_unlock_read(&vm->vm_map); 1567 if (shortage == VM_OOM_MEM) 1568 size += vmspace_resident_count(vm); 1569 vmspace_free(vm); 1570 /* 1571 * if the this process is bigger than the biggest one 1572 * remember it. 1573 */ 1574 if (size > bigsize) { 1575 if (bigproc != NULL) 1576 PROC_UNLOCK(bigproc); 1577 bigproc = p; 1578 bigsize = size; 1579 } else 1580 PROC_UNLOCK(p); 1581 } 1582 sx_sunlock(&allproc_lock); 1583 if (bigproc != NULL) { 1584 killproc(bigproc, "out of swap space"); 1585 sched_nice(bigproc, PRIO_MIN); 1586 PROC_UNLOCK(bigproc); 1587 wakeup(&vm_cnt.v_free_count); 1588 } 1589 } 1590 1591 static void 1592 vm_pageout_worker(void *arg) 1593 { 1594 struct vm_domain *domain; 1595 int domidx; 1596 1597 domidx = (uintptr_t)arg; 1598 domain = &vm_dom[domidx]; 1599 1600 /* 1601 * XXXKIB It could be useful to bind pageout daemon threads to 1602 * the cores belonging to the domain, from which vm_page_array 1603 * is allocated. 1604 */ 1605 1606 KASSERT(domain->vmd_segs != 0, ("domain without segments")); 1607 vm_pageout_init_marker(&domain->vmd_marker, PQ_INACTIVE); 1608 1609 /* 1610 * The pageout daemon worker is never done, so loop forever. 1611 */ 1612 while (TRUE) { 1613 /* 1614 * If we have enough free memory, wakeup waiters. Do 1615 * not clear vm_pages_needed until we reach our target, 1616 * otherwise we may be woken up over and over again and 1617 * waste a lot of cpu. 1618 */ 1619 mtx_lock(&vm_page_queue_free_mtx); 1620 if (vm_pages_needed && !vm_page_count_min()) { 1621 if (!vm_paging_needed()) 1622 vm_pages_needed = 0; 1623 wakeup(&vm_cnt.v_free_count); 1624 } 1625 if (vm_pages_needed) { 1626 /* 1627 * Still not done, take a second pass without waiting 1628 * (unlimited dirty cleaning), otherwise sleep a bit 1629 * and try again. 1630 */ 1631 if (domain->vmd_pass > 1) 1632 msleep(&vm_pages_needed, 1633 &vm_page_queue_free_mtx, PVM, "psleep", 1634 hz / 2); 1635 } else { 1636 /* 1637 * Good enough, sleep until required to refresh 1638 * stats. 1639 */ 1640 domain->vmd_pass = 0; 1641 msleep(&vm_pages_needed, &vm_page_queue_free_mtx, 1642 PVM, "psleep", hz); 1643 1644 } 1645 if (vm_pages_needed) { 1646 vm_cnt.v_pdwakeups++; 1647 domain->vmd_pass++; 1648 } 1649 mtx_unlock(&vm_page_queue_free_mtx); 1650 vm_pageout_scan(domain, domain->vmd_pass); 1651 } 1652 } 1653 1654 /* 1655 * vm_pageout_init initialises basic pageout daemon settings. 1656 */ 1657 static void 1658 vm_pageout_init(void) 1659 { 1660 /* 1661 * Initialize some paging parameters. 1662 */ 1663 vm_cnt.v_interrupt_free_min = 2; 1664 if (vm_cnt.v_page_count < 2000) 1665 vm_pageout_page_count = 8; 1666 1667 /* 1668 * v_free_reserved needs to include enough for the largest 1669 * swap pager structures plus enough for any pv_entry structs 1670 * when paging. 1671 */ 1672 if (vm_cnt.v_page_count > 1024) 1673 vm_cnt.v_free_min = 4 + (vm_cnt.v_page_count - 1024) / 200; 1674 else 1675 vm_cnt.v_free_min = 4; 1676 vm_cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1677 vm_cnt.v_interrupt_free_min; 1678 vm_cnt.v_free_reserved = vm_pageout_page_count + 1679 vm_cnt.v_pageout_free_min + (vm_cnt.v_page_count / 768); 1680 vm_cnt.v_free_severe = vm_cnt.v_free_min / 2; 1681 vm_cnt.v_free_target = 4 * vm_cnt.v_free_min + vm_cnt.v_free_reserved; 1682 vm_cnt.v_free_min += vm_cnt.v_free_reserved; 1683 vm_cnt.v_free_severe += vm_cnt.v_free_reserved; 1684 vm_cnt.v_inactive_target = (3 * vm_cnt.v_free_target) / 2; 1685 if (vm_cnt.v_inactive_target > vm_cnt.v_free_count / 3) 1686 vm_cnt.v_inactive_target = vm_cnt.v_free_count / 3; 1687 1688 /* 1689 * Set the default wakeup threshold to be 10% above the minimum 1690 * page limit. This keeps the steady state out of shortfall. 1691 */ 1692 vm_pageout_wakeup_thresh = (vm_cnt.v_free_min / 10) * 11; 1693 1694 /* 1695 * Set interval in seconds for active scan. We want to visit each 1696 * page at least once every ten minutes. This is to prevent worst 1697 * case paging behaviors with stale active LRU. 1698 */ 1699 if (vm_pageout_update_period == 0) 1700 vm_pageout_update_period = 600; 1701 1702 /* XXX does not really belong here */ 1703 if (vm_page_max_wired == 0) 1704 vm_page_max_wired = vm_cnt.v_free_count / 3; 1705 } 1706 1707 /* 1708 * vm_pageout is the high level pageout daemon. 1709 */ 1710 static void 1711 vm_pageout(void) 1712 { 1713 #if MAXMEMDOM > 1 1714 int error, i; 1715 #endif 1716 1717 swap_pager_swap_init(); 1718 #if MAXMEMDOM > 1 1719 for (i = 1; i < vm_ndomains; i++) { 1720 error = kthread_add(vm_pageout_worker, (void *)(uintptr_t)i, 1721 curproc, NULL, 0, 0, "dom%d", i); 1722 if (error != 0) { 1723 panic("starting pageout for domain %d, error %d\n", 1724 i, error); 1725 } 1726 } 1727 #endif 1728 vm_pageout_worker((void *)(uintptr_t)0); 1729 } 1730 1731 /* 1732 * Unless the free page queue lock is held by the caller, this function 1733 * should be regarded as advisory. Specifically, the caller should 1734 * not msleep() on &vm_cnt.v_free_count following this function unless 1735 * the free page queue lock is held until the msleep() is performed. 1736 */ 1737 void 1738 pagedaemon_wakeup(void) 1739 { 1740 1741 if (!vm_pages_needed && curthread->td_proc != pageproc) { 1742 vm_pages_needed = 1; 1743 wakeup(&vm_pages_needed); 1744 } 1745 } 1746 1747 #if !defined(NO_SWAPPING) 1748 static void 1749 vm_req_vmdaemon(int req) 1750 { 1751 static int lastrun = 0; 1752 1753 mtx_lock(&vm_daemon_mtx); 1754 vm_pageout_req_swapout |= req; 1755 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1756 wakeup(&vm_daemon_needed); 1757 lastrun = ticks; 1758 } 1759 mtx_unlock(&vm_daemon_mtx); 1760 } 1761 1762 static void 1763 vm_daemon(void) 1764 { 1765 struct rlimit rsslim; 1766 struct proc *p; 1767 struct thread *td; 1768 struct vmspace *vm; 1769 int breakout, swapout_flags, tryagain, attempts; 1770 #ifdef RACCT 1771 uint64_t rsize, ravailable; 1772 #endif 1773 1774 while (TRUE) { 1775 mtx_lock(&vm_daemon_mtx); 1776 #ifdef RACCT 1777 msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep", hz); 1778 #else 1779 msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep", 0); 1780 #endif 1781 swapout_flags = vm_pageout_req_swapout; 1782 vm_pageout_req_swapout = 0; 1783 mtx_unlock(&vm_daemon_mtx); 1784 if (swapout_flags) 1785 swapout_procs(swapout_flags); 1786 1787 /* 1788 * scan the processes for exceeding their rlimits or if 1789 * process is swapped out -- deactivate pages 1790 */ 1791 tryagain = 0; 1792 attempts = 0; 1793 again: 1794 attempts++; 1795 sx_slock(&allproc_lock); 1796 FOREACH_PROC_IN_SYSTEM(p) { 1797 vm_pindex_t limit, size; 1798 1799 /* 1800 * if this is a system process or if we have already 1801 * looked at this process, skip it. 1802 */ 1803 PROC_LOCK(p); 1804 if (p->p_state != PRS_NORMAL || 1805 p->p_flag & (P_INEXEC | P_SYSTEM | P_WEXIT)) { 1806 PROC_UNLOCK(p); 1807 continue; 1808 } 1809 /* 1810 * if the process is in a non-running type state, 1811 * don't touch it. 1812 */ 1813 breakout = 0; 1814 FOREACH_THREAD_IN_PROC(p, td) { 1815 thread_lock(td); 1816 if (!TD_ON_RUNQ(td) && 1817 !TD_IS_RUNNING(td) && 1818 !TD_IS_SLEEPING(td) && 1819 !TD_IS_SUSPENDED(td)) { 1820 thread_unlock(td); 1821 breakout = 1; 1822 break; 1823 } 1824 thread_unlock(td); 1825 } 1826 if (breakout) { 1827 PROC_UNLOCK(p); 1828 continue; 1829 } 1830 /* 1831 * get a limit 1832 */ 1833 lim_rlimit(p, RLIMIT_RSS, &rsslim); 1834 limit = OFF_TO_IDX( 1835 qmin(rsslim.rlim_cur, rsslim.rlim_max)); 1836 1837 /* 1838 * let processes that are swapped out really be 1839 * swapped out set the limit to nothing (will force a 1840 * swap-out.) 1841 */ 1842 if ((p->p_flag & P_INMEM) == 0) 1843 limit = 0; /* XXX */ 1844 vm = vmspace_acquire_ref(p); 1845 PROC_UNLOCK(p); 1846 if (vm == NULL) 1847 continue; 1848 1849 size = vmspace_resident_count(vm); 1850 if (size >= limit) { 1851 vm_pageout_map_deactivate_pages( 1852 &vm->vm_map, limit); 1853 } 1854 #ifdef RACCT 1855 rsize = IDX_TO_OFF(size); 1856 PROC_LOCK(p); 1857 racct_set(p, RACCT_RSS, rsize); 1858 ravailable = racct_get_available(p, RACCT_RSS); 1859 PROC_UNLOCK(p); 1860 if (rsize > ravailable) { 1861 /* 1862 * Don't be overly aggressive; this might be 1863 * an innocent process, and the limit could've 1864 * been exceeded by some memory hog. Don't 1865 * try to deactivate more than 1/4th of process' 1866 * resident set size. 1867 */ 1868 if (attempts <= 8) { 1869 if (ravailable < rsize - (rsize / 4)) 1870 ravailable = rsize - (rsize / 4); 1871 } 1872 vm_pageout_map_deactivate_pages( 1873 &vm->vm_map, OFF_TO_IDX(ravailable)); 1874 /* Update RSS usage after paging out. */ 1875 size = vmspace_resident_count(vm); 1876 rsize = IDX_TO_OFF(size); 1877 PROC_LOCK(p); 1878 racct_set(p, RACCT_RSS, rsize); 1879 PROC_UNLOCK(p); 1880 if (rsize > ravailable) 1881 tryagain = 1; 1882 } 1883 #endif 1884 vmspace_free(vm); 1885 } 1886 sx_sunlock(&allproc_lock); 1887 if (tryagain != 0 && attempts <= 10) 1888 goto again; 1889 } 1890 } 1891 #endif /* !defined(NO_SWAPPING) */ 1892