1 /*- 2 * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991 Regents of the University of California. 5 * All rights reserved. 6 * Copyright (c) 1994 John S. Dyson 7 * All rights reserved. 8 * Copyright (c) 1994 David Greenman 9 * All rights reserved. 10 * Copyright (c) 2005 Yahoo! Technologies Norway AS 11 * All rights reserved. 12 * 13 * This code is derived from software contributed to Berkeley by 14 * The Mach Operating System project at Carnegie-Mellon University. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. All advertising materials mentioning features or use of this software 25 * must display the following acknowledgement: 26 * This product includes software developed by the University of 27 * California, Berkeley and its contributors. 28 * 4. Neither the name of the University nor the names of its contributors 29 * may be used to endorse or promote products derived from this software 30 * without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 42 * SUCH DAMAGE. 43 * 44 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 45 * 46 * 47 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 48 * All rights reserved. 49 * 50 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 51 * 52 * Permission to use, copy, modify and distribute this software and 53 * its documentation is hereby granted, provided that both the copyright 54 * notice and this permission notice appear in all copies of the 55 * software, derivative works or modified versions, and any portions 56 * thereof, and that both notices appear in supporting documentation. 57 * 58 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 59 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 60 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 61 * 62 * Carnegie Mellon requests users of this software to return to 63 * 64 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 65 * School of Computer Science 66 * Carnegie Mellon University 67 * Pittsburgh PA 15213-3890 68 * 69 * any improvements or extensions that they make and grant Carnegie the 70 * rights to redistribute these changes. 71 */ 72 73 /* 74 * The proverbial page-out daemon. 75 */ 76 77 #include <sys/cdefs.h> 78 __FBSDID("$FreeBSD$"); 79 80 #include "opt_vm.h" 81 82 #include <sys/param.h> 83 #include <sys/systm.h> 84 #include <sys/kernel.h> 85 #include <sys/eventhandler.h> 86 #include <sys/lock.h> 87 #include <sys/mutex.h> 88 #include <sys/proc.h> 89 #include <sys/kthread.h> 90 #include <sys/ktr.h> 91 #include <sys/mount.h> 92 #include <sys/racct.h> 93 #include <sys/resourcevar.h> 94 #include <sys/sched.h> 95 #include <sys/sdt.h> 96 #include <sys/signalvar.h> 97 #include <sys/smp.h> 98 #include <sys/time.h> 99 #include <sys/vnode.h> 100 #include <sys/vmmeter.h> 101 #include <sys/rwlock.h> 102 #include <sys/sx.h> 103 #include <sys/sysctl.h> 104 105 #include <vm/vm.h> 106 #include <vm/vm_param.h> 107 #include <vm/vm_object.h> 108 #include <vm/vm_page.h> 109 #include <vm/vm_map.h> 110 #include <vm/vm_pageout.h> 111 #include <vm/vm_pager.h> 112 #include <vm/vm_phys.h> 113 #include <vm/swap_pager.h> 114 #include <vm/vm_extern.h> 115 #include <vm/uma.h> 116 117 /* 118 * System initialization 119 */ 120 121 /* the kernel process "vm_pageout"*/ 122 static void vm_pageout(void); 123 static void vm_pageout_init(void); 124 static int vm_pageout_clean(vm_page_t m, int *numpagedout); 125 static int vm_pageout_cluster(vm_page_t m); 126 static bool vm_pageout_scan(struct vm_domain *vmd, int pass); 127 static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage, 128 int starting_page_shortage); 129 130 SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init, 131 NULL); 132 133 struct proc *pageproc; 134 135 static struct kproc_desc page_kp = { 136 "pagedaemon", 137 vm_pageout, 138 &pageproc 139 }; 140 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start, 141 &page_kp); 142 143 SDT_PROVIDER_DEFINE(vm); 144 SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan); 145 146 /* Pagedaemon activity rates, in subdivisions of one second. */ 147 #define VM_LAUNDER_RATE 10 148 #define VM_INACT_SCAN_RATE 2 149 150 int vm_pageout_deficit; /* Estimated number of pages deficit */ 151 u_int vm_pageout_wakeup_thresh; 152 static int vm_pageout_oom_seq = 12; 153 bool vm_pageout_wanted; /* Event on which pageout daemon sleeps */ 154 bool vm_pages_needed; /* Are threads waiting for free pages? */ 155 156 /* Pending request for dirty page laundering. */ 157 static enum { 158 VM_LAUNDRY_IDLE, 159 VM_LAUNDRY_BACKGROUND, 160 VM_LAUNDRY_SHORTFALL 161 } vm_laundry_request = VM_LAUNDRY_IDLE; 162 163 static int vm_pageout_update_period; 164 static int disable_swap_pageouts; 165 static int lowmem_period = 10; 166 static time_t lowmem_uptime; 167 static int swapdev_enabled; 168 169 static int vm_panic_on_oom = 0; 170 171 SYSCTL_INT(_vm, OID_AUTO, panic_on_oom, 172 CTLFLAG_RWTUN, &vm_panic_on_oom, 0, 173 "panic on out of memory instead of killing the largest process"); 174 175 SYSCTL_INT(_vm, OID_AUTO, pageout_wakeup_thresh, 176 CTLFLAG_RWTUN, &vm_pageout_wakeup_thresh, 0, 177 "free page threshold for waking up the pageout daemon"); 178 179 SYSCTL_INT(_vm, OID_AUTO, pageout_update_period, 180 CTLFLAG_RWTUN, &vm_pageout_update_period, 0, 181 "Maximum active LRU update period"); 182 183 SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RWTUN, &lowmem_period, 0, 184 "Low memory callback period"); 185 186 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 187 CTLFLAG_RWTUN, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 188 189 static int pageout_lock_miss; 190 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 191 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 192 193 SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq, 194 CTLFLAG_RWTUN, &vm_pageout_oom_seq, 0, 195 "back-to-back calls to oom detector to start OOM"); 196 197 static int act_scan_laundry_weight = 3; 198 SYSCTL_INT(_vm, OID_AUTO, act_scan_laundry_weight, CTLFLAG_RWTUN, 199 &act_scan_laundry_weight, 0, 200 "weight given to clean vs. dirty pages in active queue scans"); 201 202 static u_int vm_background_launder_target; 203 SYSCTL_UINT(_vm, OID_AUTO, background_launder_target, CTLFLAG_RWTUN, 204 &vm_background_launder_target, 0, 205 "background laundering target, in pages"); 206 207 static u_int vm_background_launder_rate = 4096; 208 SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RWTUN, 209 &vm_background_launder_rate, 0, 210 "background laundering rate, in kilobytes per second"); 211 212 static u_int vm_background_launder_max = 20 * 1024; 213 SYSCTL_UINT(_vm, OID_AUTO, background_launder_max, CTLFLAG_RWTUN, 214 &vm_background_launder_max, 0, "background laundering cap, in kilobytes"); 215 216 int vm_pageout_page_count = 32; 217 218 int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 219 SYSCTL_INT(_vm, OID_AUTO, max_wired, 220 CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count"); 221 222 static u_int isqrt(u_int num); 223 static boolean_t vm_pageout_fallback_object_lock(vm_page_t, vm_page_t *); 224 static int vm_pageout_launder(struct vm_domain *vmd, int launder, 225 bool in_shortfall); 226 static void vm_pageout_laundry_worker(void *arg); 227 static boolean_t vm_pageout_page_lock(vm_page_t, vm_page_t *); 228 229 /* 230 * Initialize a dummy page for marking the caller's place in the specified 231 * paging queue. In principle, this function only needs to set the flag 232 * PG_MARKER. Nonetheless, it write busies and initializes the hold count 233 * to one as safety precautions. 234 */ 235 static void 236 vm_pageout_init_marker(vm_page_t marker, u_short queue) 237 { 238 239 bzero(marker, sizeof(*marker)); 240 marker->flags = PG_MARKER; 241 marker->busy_lock = VPB_SINGLE_EXCLUSIVER; 242 marker->queue = queue; 243 marker->hold_count = 1; 244 } 245 246 /* 247 * vm_pageout_fallback_object_lock: 248 * 249 * Lock vm object currently associated with `m'. VM_OBJECT_TRYWLOCK is 250 * known to have failed and page queue must be either PQ_ACTIVE or 251 * PQ_INACTIVE. To avoid lock order violation, unlock the page queue 252 * while locking the vm object. Use marker page to detect page queue 253 * changes and maintain notion of next page on page queue. Return 254 * TRUE if no changes were detected, FALSE otherwise. vm object is 255 * locked on return. 256 * 257 * This function depends on both the lock portion of struct vm_object 258 * and normal struct vm_page being type stable. 259 */ 260 static boolean_t 261 vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next) 262 { 263 struct vm_page marker; 264 struct vm_pagequeue *pq; 265 boolean_t unchanged; 266 u_short queue; 267 vm_object_t object; 268 269 queue = m->queue; 270 vm_pageout_init_marker(&marker, queue); 271 pq = vm_page_pagequeue(m); 272 object = m->object; 273 274 TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q); 275 vm_pagequeue_unlock(pq); 276 vm_page_unlock(m); 277 VM_OBJECT_WLOCK(object); 278 vm_page_lock(m); 279 vm_pagequeue_lock(pq); 280 281 /* 282 * The page's object might have changed, and/or the page might 283 * have moved from its original position in the queue. If the 284 * page's object has changed, then the caller should abandon 285 * processing the page because the wrong object lock was 286 * acquired. Use the marker's plinks.q, not the page's, to 287 * determine if the page has been moved. The state of the 288 * page's plinks.q can be indeterminate; whereas, the marker's 289 * plinks.q must be valid. 290 */ 291 *next = TAILQ_NEXT(&marker, plinks.q); 292 unchanged = m->object == object && 293 m == TAILQ_PREV(&marker, pglist, plinks.q); 294 KASSERT(!unchanged || m->queue == queue, 295 ("page %p queue %d %d", m, queue, m->queue)); 296 TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q); 297 return (unchanged); 298 } 299 300 /* 301 * Lock the page while holding the page queue lock. Use marker page 302 * to detect page queue changes and maintain notion of next page on 303 * page queue. Return TRUE if no changes were detected, FALSE 304 * otherwise. The page is locked on return. The page queue lock might 305 * be dropped and reacquired. 306 * 307 * This function depends on normal struct vm_page being type stable. 308 */ 309 static boolean_t 310 vm_pageout_page_lock(vm_page_t m, vm_page_t *next) 311 { 312 struct vm_page marker; 313 struct vm_pagequeue *pq; 314 boolean_t unchanged; 315 u_short queue; 316 317 vm_page_lock_assert(m, MA_NOTOWNED); 318 if (vm_page_trylock(m)) 319 return (TRUE); 320 321 queue = m->queue; 322 vm_pageout_init_marker(&marker, queue); 323 pq = vm_page_pagequeue(m); 324 325 TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q); 326 vm_pagequeue_unlock(pq); 327 vm_page_lock(m); 328 vm_pagequeue_lock(pq); 329 330 /* Page queue might have changed. */ 331 *next = TAILQ_NEXT(&marker, plinks.q); 332 unchanged = m == TAILQ_PREV(&marker, pglist, plinks.q); 333 KASSERT(!unchanged || m->queue == queue, 334 ("page %p queue %d %d", m, queue, m->queue)); 335 TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q); 336 return (unchanged); 337 } 338 339 /* 340 * Scan for pages at adjacent offsets within the given page's object that are 341 * eligible for laundering, form a cluster of these pages and the given page, 342 * and launder that cluster. 343 */ 344 static int 345 vm_pageout_cluster(vm_page_t m) 346 { 347 vm_object_t object; 348 vm_page_t mc[2 * vm_pageout_page_count], p, pb, ps; 349 vm_pindex_t pindex; 350 int ib, is, page_base, pageout_count; 351 352 vm_page_assert_locked(m); 353 object = m->object; 354 VM_OBJECT_ASSERT_WLOCKED(object); 355 pindex = m->pindex; 356 357 /* 358 * We can't clean the page if it is busy or held. 359 */ 360 vm_page_assert_unbusied(m); 361 KASSERT(m->hold_count == 0, ("page %p is held", m)); 362 363 pmap_remove_write(m); 364 vm_page_unlock(m); 365 366 mc[vm_pageout_page_count] = pb = ps = m; 367 pageout_count = 1; 368 page_base = vm_pageout_page_count; 369 ib = 1; 370 is = 1; 371 372 /* 373 * We can cluster only if the page is not clean, busy, or held, and 374 * the page is in the laundry queue. 375 * 376 * During heavy mmap/modification loads the pageout 377 * daemon can really fragment the underlying file 378 * due to flushing pages out of order and not trying to 379 * align the clusters (which leaves sporadic out-of-order 380 * holes). To solve this problem we do the reverse scan 381 * first and attempt to align our cluster, then do a 382 * forward scan if room remains. 383 */ 384 more: 385 while (ib != 0 && pageout_count < vm_pageout_page_count) { 386 if (ib > pindex) { 387 ib = 0; 388 break; 389 } 390 if ((p = vm_page_prev(pb)) == NULL || vm_page_busied(p)) { 391 ib = 0; 392 break; 393 } 394 vm_page_test_dirty(p); 395 if (p->dirty == 0) { 396 ib = 0; 397 break; 398 } 399 vm_page_lock(p); 400 if (!vm_page_in_laundry(p) || 401 p->hold_count != 0) { /* may be undergoing I/O */ 402 vm_page_unlock(p); 403 ib = 0; 404 break; 405 } 406 pmap_remove_write(p); 407 vm_page_unlock(p); 408 mc[--page_base] = pb = p; 409 ++pageout_count; 410 ++ib; 411 412 /* 413 * We are at an alignment boundary. Stop here, and switch 414 * directions. Do not clear ib. 415 */ 416 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 417 break; 418 } 419 while (pageout_count < vm_pageout_page_count && 420 pindex + is < object->size) { 421 if ((p = vm_page_next(ps)) == NULL || vm_page_busied(p)) 422 break; 423 vm_page_test_dirty(p); 424 if (p->dirty == 0) 425 break; 426 vm_page_lock(p); 427 if (!vm_page_in_laundry(p) || 428 p->hold_count != 0) { /* may be undergoing I/O */ 429 vm_page_unlock(p); 430 break; 431 } 432 pmap_remove_write(p); 433 vm_page_unlock(p); 434 mc[page_base + pageout_count] = ps = p; 435 ++pageout_count; 436 ++is; 437 } 438 439 /* 440 * If we exhausted our forward scan, continue with the reverse scan 441 * when possible, even past an alignment boundary. This catches 442 * boundary conditions. 443 */ 444 if (ib != 0 && pageout_count < vm_pageout_page_count) 445 goto more; 446 447 return (vm_pageout_flush(&mc[page_base], pageout_count, 448 VM_PAGER_PUT_NOREUSE, 0, NULL, NULL)); 449 } 450 451 /* 452 * vm_pageout_flush() - launder the given pages 453 * 454 * The given pages are laundered. Note that we setup for the start of 455 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 456 * reference count all in here rather then in the parent. If we want 457 * the parent to do more sophisticated things we may have to change 458 * the ordering. 459 * 460 * Returned runlen is the count of pages between mreq and first 461 * page after mreq with status VM_PAGER_AGAIN. 462 * *eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL 463 * for any page in runlen set. 464 */ 465 int 466 vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen, 467 boolean_t *eio) 468 { 469 vm_object_t object = mc[0]->object; 470 int pageout_status[count]; 471 int numpagedout = 0; 472 int i, runlen; 473 474 VM_OBJECT_ASSERT_WLOCKED(object); 475 476 /* 477 * Initiate I/O. Mark the pages busy and verify that they're valid 478 * and read-only. 479 * 480 * We do not have to fixup the clean/dirty bits here... we can 481 * allow the pager to do it after the I/O completes. 482 * 483 * NOTE! mc[i]->dirty may be partial or fragmented due to an 484 * edge case with file fragments. 485 */ 486 for (i = 0; i < count; i++) { 487 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, 488 ("vm_pageout_flush: partially invalid page %p index %d/%d", 489 mc[i], i, count)); 490 KASSERT((mc[i]->aflags & PGA_WRITEABLE) == 0, 491 ("vm_pageout_flush: writeable page %p", mc[i])); 492 vm_page_sbusy(mc[i]); 493 } 494 vm_object_pip_add(object, count); 495 496 vm_pager_put_pages(object, mc, count, flags, pageout_status); 497 498 runlen = count - mreq; 499 if (eio != NULL) 500 *eio = FALSE; 501 for (i = 0; i < count; i++) { 502 vm_page_t mt = mc[i]; 503 504 KASSERT(pageout_status[i] == VM_PAGER_PEND || 505 !pmap_page_is_write_mapped(mt), 506 ("vm_pageout_flush: page %p is not write protected", mt)); 507 switch (pageout_status[i]) { 508 case VM_PAGER_OK: 509 vm_page_lock(mt); 510 if (vm_page_in_laundry(mt)) 511 vm_page_deactivate_noreuse(mt); 512 vm_page_unlock(mt); 513 /* FALLTHROUGH */ 514 case VM_PAGER_PEND: 515 numpagedout++; 516 break; 517 case VM_PAGER_BAD: 518 /* 519 * The page is outside the object's range. We pretend 520 * that the page out worked and clean the page, so the 521 * changes will be lost if the page is reclaimed by 522 * the page daemon. 523 */ 524 vm_page_undirty(mt); 525 vm_page_lock(mt); 526 if (vm_page_in_laundry(mt)) 527 vm_page_deactivate_noreuse(mt); 528 vm_page_unlock(mt); 529 break; 530 case VM_PAGER_ERROR: 531 case VM_PAGER_FAIL: 532 /* 533 * If the page couldn't be paged out to swap because the 534 * pager wasn't able to find space, place the page in 535 * the PQ_UNSWAPPABLE holding queue. This is an 536 * optimization that prevents the page daemon from 537 * wasting CPU cycles on pages that cannot be reclaimed 538 * becase no swap device is configured. 539 * 540 * Otherwise, reactivate the page so that it doesn't 541 * clog the laundry and inactive queues. (We will try 542 * paging it out again later.) 543 */ 544 vm_page_lock(mt); 545 if (object->type == OBJT_SWAP && 546 pageout_status[i] == VM_PAGER_FAIL) { 547 vm_page_unswappable(mt); 548 numpagedout++; 549 } else 550 vm_page_activate(mt); 551 vm_page_unlock(mt); 552 if (eio != NULL && i >= mreq && i - mreq < runlen) 553 *eio = TRUE; 554 break; 555 case VM_PAGER_AGAIN: 556 if (i >= mreq && i - mreq < runlen) 557 runlen = i - mreq; 558 break; 559 } 560 561 /* 562 * If the operation is still going, leave the page busy to 563 * block all other accesses. Also, leave the paging in 564 * progress indicator set so that we don't attempt an object 565 * collapse. 566 */ 567 if (pageout_status[i] != VM_PAGER_PEND) { 568 vm_object_pip_wakeup(object); 569 vm_page_sunbusy(mt); 570 } 571 } 572 if (prunlen != NULL) 573 *prunlen = runlen; 574 return (numpagedout); 575 } 576 577 static void 578 vm_pageout_swapon(void *arg __unused, struct swdevt *sp __unused) 579 { 580 581 atomic_store_rel_int(&swapdev_enabled, 1); 582 } 583 584 static void 585 vm_pageout_swapoff(void *arg __unused, struct swdevt *sp __unused) 586 { 587 588 if (swap_pager_nswapdev() == 1) 589 atomic_store_rel_int(&swapdev_enabled, 0); 590 } 591 592 /* 593 * Attempt to acquire all of the necessary locks to launder a page and 594 * then call through the clustering layer to PUTPAGES. Wait a short 595 * time for a vnode lock. 596 * 597 * Requires the page and object lock on entry, releases both before return. 598 * Returns 0 on success and an errno otherwise. 599 */ 600 static int 601 vm_pageout_clean(vm_page_t m, int *numpagedout) 602 { 603 struct vnode *vp; 604 struct mount *mp; 605 vm_object_t object; 606 vm_pindex_t pindex; 607 int error, lockmode; 608 609 vm_page_assert_locked(m); 610 object = m->object; 611 VM_OBJECT_ASSERT_WLOCKED(object); 612 error = 0; 613 vp = NULL; 614 mp = NULL; 615 616 /* 617 * The object is already known NOT to be dead. It 618 * is possible for the vget() to block the whole 619 * pageout daemon, but the new low-memory handling 620 * code should prevent it. 621 * 622 * We can't wait forever for the vnode lock, we might 623 * deadlock due to a vn_read() getting stuck in 624 * vm_wait while holding this vnode. We skip the 625 * vnode if we can't get it in a reasonable amount 626 * of time. 627 */ 628 if (object->type == OBJT_VNODE) { 629 vm_page_unlock(m); 630 vp = object->handle; 631 if (vp->v_type == VREG && 632 vn_start_write(vp, &mp, V_NOWAIT) != 0) { 633 mp = NULL; 634 error = EDEADLK; 635 goto unlock_all; 636 } 637 KASSERT(mp != NULL, 638 ("vp %p with NULL v_mount", vp)); 639 vm_object_reference_locked(object); 640 pindex = m->pindex; 641 VM_OBJECT_WUNLOCK(object); 642 lockmode = MNT_SHARED_WRITES(vp->v_mount) ? 643 LK_SHARED : LK_EXCLUSIVE; 644 if (vget(vp, lockmode | LK_TIMELOCK, curthread)) { 645 vp = NULL; 646 error = EDEADLK; 647 goto unlock_mp; 648 } 649 VM_OBJECT_WLOCK(object); 650 651 /* 652 * Ensure that the object and vnode were not disassociated 653 * while locks were dropped. 654 */ 655 if (vp->v_object != object) { 656 error = ENOENT; 657 goto unlock_all; 658 } 659 vm_page_lock(m); 660 661 /* 662 * While the object and page were unlocked, the page 663 * may have been: 664 * (1) moved to a different queue, 665 * (2) reallocated to a different object, 666 * (3) reallocated to a different offset, or 667 * (4) cleaned. 668 */ 669 if (!vm_page_in_laundry(m) || m->object != object || 670 m->pindex != pindex || m->dirty == 0) { 671 vm_page_unlock(m); 672 error = ENXIO; 673 goto unlock_all; 674 } 675 676 /* 677 * The page may have been busied or held while the object 678 * and page locks were released. 679 */ 680 if (vm_page_busied(m) || m->hold_count != 0) { 681 vm_page_unlock(m); 682 error = EBUSY; 683 goto unlock_all; 684 } 685 } 686 687 /* 688 * If a page is dirty, then it is either being washed 689 * (but not yet cleaned) or it is still in the 690 * laundry. If it is still in the laundry, then we 691 * start the cleaning operation. 692 */ 693 if ((*numpagedout = vm_pageout_cluster(m)) == 0) 694 error = EIO; 695 696 unlock_all: 697 VM_OBJECT_WUNLOCK(object); 698 699 unlock_mp: 700 vm_page_lock_assert(m, MA_NOTOWNED); 701 if (mp != NULL) { 702 if (vp != NULL) 703 vput(vp); 704 vm_object_deallocate(object); 705 vn_finished_write(mp); 706 } 707 708 return (error); 709 } 710 711 /* 712 * Attempt to launder the specified number of pages. 713 * 714 * Returns the number of pages successfully laundered. 715 */ 716 static int 717 vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall) 718 { 719 struct vm_pagequeue *pq; 720 vm_object_t object; 721 vm_page_t m, next; 722 int act_delta, error, maxscan, numpagedout, starting_target; 723 int vnodes_skipped; 724 bool pageout_ok, queue_locked; 725 726 starting_target = launder; 727 vnodes_skipped = 0; 728 729 /* 730 * Scan the laundry queues for pages eligible to be laundered. We stop 731 * once the target number of dirty pages have been laundered, or once 732 * we've reached the end of the queue. A single iteration of this loop 733 * may cause more than one page to be laundered because of clustering. 734 * 735 * maxscan ensures that we don't re-examine requeued pages. Any 736 * additional pages written as part of a cluster are subtracted from 737 * maxscan since they must be taken from the laundry queue. 738 * 739 * As an optimization, we avoid laundering from PQ_UNSWAPPABLE when no 740 * swap devices are configured. 741 */ 742 if (atomic_load_acq_int(&swapdev_enabled)) 743 pq = &vmd->vmd_pagequeues[PQ_UNSWAPPABLE]; 744 else 745 pq = &vmd->vmd_pagequeues[PQ_LAUNDRY]; 746 747 scan: 748 vm_pagequeue_lock(pq); 749 maxscan = pq->pq_cnt; 750 queue_locked = true; 751 for (m = TAILQ_FIRST(&pq->pq_pl); 752 m != NULL && maxscan-- > 0 && launder > 0; 753 m = next) { 754 vm_pagequeue_assert_locked(pq); 755 KASSERT(queue_locked, ("unlocked laundry queue")); 756 KASSERT(vm_page_in_laundry(m), 757 ("page %p has an inconsistent queue", m)); 758 next = TAILQ_NEXT(m, plinks.q); 759 if ((m->flags & PG_MARKER) != 0) 760 continue; 761 KASSERT((m->flags & PG_FICTITIOUS) == 0, 762 ("PG_FICTITIOUS page %p cannot be in laundry queue", m)); 763 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 764 ("VPO_UNMANAGED page %p cannot be in laundry queue", m)); 765 if (!vm_pageout_page_lock(m, &next) || m->hold_count != 0) { 766 vm_page_unlock(m); 767 continue; 768 } 769 object = m->object; 770 if ((!VM_OBJECT_TRYWLOCK(object) && 771 (!vm_pageout_fallback_object_lock(m, &next) || 772 m->hold_count != 0)) || vm_page_busied(m)) { 773 VM_OBJECT_WUNLOCK(object); 774 vm_page_unlock(m); 775 continue; 776 } 777 778 /* 779 * Unlock the laundry queue, invalidating the 'next' pointer. 780 * Use a marker to remember our place in the laundry queue. 781 */ 782 TAILQ_INSERT_AFTER(&pq->pq_pl, m, &vmd->vmd_laundry_marker, 783 plinks.q); 784 vm_pagequeue_unlock(pq); 785 queue_locked = false; 786 787 /* 788 * Invalid pages can be easily freed. They cannot be 789 * mapped; vm_page_free() asserts this. 790 */ 791 if (m->valid == 0) 792 goto free_page; 793 794 /* 795 * If the page has been referenced and the object is not dead, 796 * reactivate or requeue the page depending on whether the 797 * object is mapped. 798 */ 799 if ((m->aflags & PGA_REFERENCED) != 0) { 800 vm_page_aflag_clear(m, PGA_REFERENCED); 801 act_delta = 1; 802 } else 803 act_delta = 0; 804 if (object->ref_count != 0) 805 act_delta += pmap_ts_referenced(m); 806 else { 807 KASSERT(!pmap_page_is_mapped(m), 808 ("page %p is mapped", m)); 809 } 810 if (act_delta != 0) { 811 if (object->ref_count != 0) { 812 VM_CNT_INC(v_reactivated); 813 vm_page_activate(m); 814 815 /* 816 * Increase the activation count if the page 817 * was referenced while in the laundry queue. 818 * This makes it less likely that the page will 819 * be returned prematurely to the inactive 820 * queue. 821 */ 822 m->act_count += act_delta + ACT_ADVANCE; 823 824 /* 825 * If this was a background laundering, count 826 * activated pages towards our target. The 827 * purpose of background laundering is to ensure 828 * that pages are eventually cycled through the 829 * laundry queue, and an activation is a valid 830 * way out. 831 */ 832 if (!in_shortfall) 833 launder--; 834 goto drop_page; 835 } else if ((object->flags & OBJ_DEAD) == 0) 836 goto requeue_page; 837 } 838 839 /* 840 * If the page appears to be clean at the machine-independent 841 * layer, then remove all of its mappings from the pmap in 842 * anticipation of freeing it. If, however, any of the page's 843 * mappings allow write access, then the page may still be 844 * modified until the last of those mappings are removed. 845 */ 846 if (object->ref_count != 0) { 847 vm_page_test_dirty(m); 848 if (m->dirty == 0) 849 pmap_remove_all(m); 850 } 851 852 /* 853 * Clean pages are freed, and dirty pages are paged out unless 854 * they belong to a dead object. Requeueing dirty pages from 855 * dead objects is pointless, as they are being paged out and 856 * freed by the thread that destroyed the object. 857 */ 858 if (m->dirty == 0) { 859 free_page: 860 vm_page_free(m); 861 VM_CNT_INC(v_dfree); 862 } else if ((object->flags & OBJ_DEAD) == 0) { 863 if (object->type != OBJT_SWAP && 864 object->type != OBJT_DEFAULT) 865 pageout_ok = true; 866 else if (disable_swap_pageouts) 867 pageout_ok = false; 868 else 869 pageout_ok = true; 870 if (!pageout_ok) { 871 requeue_page: 872 vm_pagequeue_lock(pq); 873 queue_locked = true; 874 vm_page_requeue_locked(m); 875 goto drop_page; 876 } 877 878 /* 879 * Form a cluster with adjacent, dirty pages from the 880 * same object, and page out that entire cluster. 881 * 882 * The adjacent, dirty pages must also be in the 883 * laundry. However, their mappings are not checked 884 * for new references. Consequently, a recently 885 * referenced page may be paged out. However, that 886 * page will not be prematurely reclaimed. After page 887 * out, the page will be placed in the inactive queue, 888 * where any new references will be detected and the 889 * page reactivated. 890 */ 891 error = vm_pageout_clean(m, &numpagedout); 892 if (error == 0) { 893 launder -= numpagedout; 894 maxscan -= numpagedout - 1; 895 } else if (error == EDEADLK) { 896 pageout_lock_miss++; 897 vnodes_skipped++; 898 } 899 goto relock_queue; 900 } 901 drop_page: 902 vm_page_unlock(m); 903 VM_OBJECT_WUNLOCK(object); 904 relock_queue: 905 if (!queue_locked) { 906 vm_pagequeue_lock(pq); 907 queue_locked = true; 908 } 909 next = TAILQ_NEXT(&vmd->vmd_laundry_marker, plinks.q); 910 TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_laundry_marker, plinks.q); 911 } 912 vm_pagequeue_unlock(pq); 913 914 if (launder > 0 && pq == &vmd->vmd_pagequeues[PQ_UNSWAPPABLE]) { 915 pq = &vmd->vmd_pagequeues[PQ_LAUNDRY]; 916 goto scan; 917 } 918 919 /* 920 * Wakeup the sync daemon if we skipped a vnode in a writeable object 921 * and we didn't launder enough pages. 922 */ 923 if (vnodes_skipped > 0 && launder > 0) 924 (void)speedup_syncer(); 925 926 return (starting_target - launder); 927 } 928 929 /* 930 * Compute the integer square root. 931 */ 932 static u_int 933 isqrt(u_int num) 934 { 935 u_int bit, root, tmp; 936 937 bit = 1u << ((NBBY * sizeof(u_int)) - 2); 938 while (bit > num) 939 bit >>= 2; 940 root = 0; 941 while (bit != 0) { 942 tmp = root + bit; 943 root >>= 1; 944 if (num >= tmp) { 945 num -= tmp; 946 root += bit; 947 } 948 bit >>= 2; 949 } 950 return (root); 951 } 952 953 /* 954 * Perform the work of the laundry thread: periodically wake up and determine 955 * whether any pages need to be laundered. If so, determine the number of pages 956 * that need to be laundered, and launder them. 957 */ 958 static void 959 vm_pageout_laundry_worker(void *arg) 960 { 961 struct vm_domain *domain; 962 struct vm_pagequeue *pq; 963 uint64_t nclean, ndirty; 964 u_int last_launder, wakeups; 965 int domidx, last_target, launder, shortfall, shortfall_cycle, target; 966 bool in_shortfall; 967 968 domidx = (uintptr_t)arg; 969 domain = &vm_dom[domidx]; 970 pq = &domain->vmd_pagequeues[PQ_LAUNDRY]; 971 KASSERT(domain->vmd_segs != 0, ("domain without segments")); 972 vm_pageout_init_marker(&domain->vmd_laundry_marker, PQ_LAUNDRY); 973 974 shortfall = 0; 975 in_shortfall = false; 976 shortfall_cycle = 0; 977 target = 0; 978 last_launder = 0; 979 980 /* 981 * Calls to these handlers are serialized by the swap syscall lock. 982 */ 983 (void)EVENTHANDLER_REGISTER(swapon, vm_pageout_swapon, domain, 984 EVENTHANDLER_PRI_ANY); 985 (void)EVENTHANDLER_REGISTER(swapoff, vm_pageout_swapoff, domain, 986 EVENTHANDLER_PRI_ANY); 987 988 /* 989 * The pageout laundry worker is never done, so loop forever. 990 */ 991 for (;;) { 992 KASSERT(target >= 0, ("negative target %d", target)); 993 KASSERT(shortfall_cycle >= 0, 994 ("negative cycle %d", shortfall_cycle)); 995 launder = 0; 996 wakeups = VM_CNT_FETCH(v_pdwakeups); 997 998 /* 999 * First determine whether we need to launder pages to meet a 1000 * shortage of free pages. 1001 */ 1002 if (shortfall > 0) { 1003 in_shortfall = true; 1004 shortfall_cycle = VM_LAUNDER_RATE / VM_INACT_SCAN_RATE; 1005 target = shortfall; 1006 } else if (!in_shortfall) 1007 goto trybackground; 1008 else if (shortfall_cycle == 0 || vm_laundry_target() <= 0) { 1009 /* 1010 * We recently entered shortfall and began laundering 1011 * pages. If we have completed that laundering run 1012 * (and we are no longer in shortfall) or we have met 1013 * our laundry target through other activity, then we 1014 * can stop laundering pages. 1015 */ 1016 in_shortfall = false; 1017 target = 0; 1018 goto trybackground; 1019 } 1020 last_launder = wakeups; 1021 launder = target / shortfall_cycle--; 1022 goto dolaundry; 1023 1024 /* 1025 * There's no immediate need to launder any pages; see if we 1026 * meet the conditions to perform background laundering: 1027 * 1028 * 1. The ratio of dirty to clean inactive pages exceeds the 1029 * background laundering threshold and the pagedaemon has 1030 * been woken up to reclaim pages since our last 1031 * laundering, or 1032 * 2. we haven't yet reached the target of the current 1033 * background laundering run. 1034 * 1035 * The background laundering threshold is not a constant. 1036 * Instead, it is a slowly growing function of the number of 1037 * page daemon wakeups since the last laundering. Thus, as the 1038 * ratio of dirty to clean inactive pages grows, the amount of 1039 * memory pressure required to trigger laundering decreases. 1040 */ 1041 trybackground: 1042 nclean = vm_cnt.v_inactive_count + vm_cnt.v_free_count; 1043 ndirty = vm_cnt.v_laundry_count; 1044 if (target == 0 && wakeups != last_launder && 1045 ndirty * isqrt(wakeups - last_launder) >= nclean) { 1046 target = vm_background_launder_target; 1047 } 1048 1049 /* 1050 * We have a non-zero background laundering target. If we've 1051 * laundered up to our maximum without observing a page daemon 1052 * wakeup, just stop. This is a safety belt that ensures we 1053 * don't launder an excessive amount if memory pressure is low 1054 * and the ratio of dirty to clean pages is large. Otherwise, 1055 * proceed at the background laundering rate. 1056 */ 1057 if (target > 0) { 1058 if (wakeups != last_launder) { 1059 last_launder = wakeups; 1060 last_target = target; 1061 } else if (last_target - target >= 1062 vm_background_launder_max * PAGE_SIZE / 1024) { 1063 target = 0; 1064 } 1065 launder = vm_background_launder_rate * PAGE_SIZE / 1024; 1066 launder /= VM_LAUNDER_RATE; 1067 if (launder > target) 1068 launder = target; 1069 } 1070 1071 dolaundry: 1072 if (launder > 0) { 1073 /* 1074 * Because of I/O clustering, the number of laundered 1075 * pages could exceed "target" by the maximum size of 1076 * a cluster minus one. 1077 */ 1078 target -= min(vm_pageout_launder(domain, launder, 1079 in_shortfall), target); 1080 pause("laundp", hz / VM_LAUNDER_RATE); 1081 } 1082 1083 /* 1084 * If we're not currently laundering pages and the page daemon 1085 * hasn't posted a new request, sleep until the page daemon 1086 * kicks us. 1087 */ 1088 vm_pagequeue_lock(pq); 1089 if (target == 0 && vm_laundry_request == VM_LAUNDRY_IDLE) 1090 (void)mtx_sleep(&vm_laundry_request, 1091 vm_pagequeue_lockptr(pq), PVM, "launds", 0); 1092 1093 /* 1094 * If the pagedaemon has indicated that it's in shortfall, start 1095 * a shortfall laundering unless we're already in the middle of 1096 * one. This may preempt a background laundering. 1097 */ 1098 if (vm_laundry_request == VM_LAUNDRY_SHORTFALL && 1099 (!in_shortfall || shortfall_cycle == 0)) { 1100 shortfall = vm_laundry_target() + vm_pageout_deficit; 1101 target = 0; 1102 } else 1103 shortfall = 0; 1104 1105 if (target == 0) 1106 vm_laundry_request = VM_LAUNDRY_IDLE; 1107 vm_pagequeue_unlock(pq); 1108 } 1109 } 1110 1111 /* 1112 * vm_pageout_scan does the dirty work for the pageout daemon. 1113 * 1114 * pass == 0: Update active LRU/deactivate pages 1115 * pass >= 1: Free inactive pages 1116 * 1117 * Returns true if pass was zero or enough pages were freed by the inactive 1118 * queue scan to meet the target. 1119 */ 1120 static bool 1121 vm_pageout_scan(struct vm_domain *vmd, int pass) 1122 { 1123 vm_page_t m, next; 1124 struct vm_pagequeue *pq; 1125 vm_object_t object; 1126 long min_scan; 1127 int act_delta, addl_page_shortage, deficit, inactq_shortage, maxscan; 1128 int page_shortage, scan_tick, scanned, starting_page_shortage; 1129 boolean_t queue_locked; 1130 1131 /* 1132 * If we need to reclaim memory ask kernel caches to return 1133 * some. We rate limit to avoid thrashing. 1134 */ 1135 if (vmd == &vm_dom[0] && pass > 0 && 1136 (time_uptime - lowmem_uptime) >= lowmem_period) { 1137 /* 1138 * Decrease registered cache sizes. 1139 */ 1140 SDT_PROBE0(vm, , , vm__lowmem_scan); 1141 EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES); 1142 /* 1143 * We do this explicitly after the caches have been 1144 * drained above. 1145 */ 1146 uma_reclaim(); 1147 lowmem_uptime = time_uptime; 1148 } 1149 1150 /* 1151 * The addl_page_shortage is the number of temporarily 1152 * stuck pages in the inactive queue. In other words, the 1153 * number of pages from the inactive count that should be 1154 * discounted in setting the target for the active queue scan. 1155 */ 1156 addl_page_shortage = 0; 1157 1158 /* 1159 * Calculate the number of pages that we want to free. This number 1160 * can be negative if many pages are freed between the wakeup call to 1161 * the page daemon and this calculation. 1162 */ 1163 if (pass > 0) { 1164 deficit = atomic_readandclear_int(&vm_pageout_deficit); 1165 page_shortage = vm_paging_target() + deficit; 1166 } else 1167 page_shortage = deficit = 0; 1168 starting_page_shortage = page_shortage; 1169 1170 /* 1171 * Start scanning the inactive queue for pages that we can free. The 1172 * scan will stop when we reach the target or we have scanned the 1173 * entire queue. (Note that m->act_count is not used to make 1174 * decisions for the inactive queue, only for the active queue.) 1175 */ 1176 pq = &vmd->vmd_pagequeues[PQ_INACTIVE]; 1177 maxscan = pq->pq_cnt; 1178 vm_pagequeue_lock(pq); 1179 queue_locked = TRUE; 1180 for (m = TAILQ_FIRST(&pq->pq_pl); 1181 m != NULL && maxscan-- > 0 && page_shortage > 0; 1182 m = next) { 1183 vm_pagequeue_assert_locked(pq); 1184 KASSERT(queue_locked, ("unlocked inactive queue")); 1185 KASSERT(vm_page_inactive(m), ("Inactive queue %p", m)); 1186 1187 VM_CNT_INC(v_pdpages); 1188 next = TAILQ_NEXT(m, plinks.q); 1189 1190 /* 1191 * skip marker pages 1192 */ 1193 if (m->flags & PG_MARKER) 1194 continue; 1195 1196 KASSERT((m->flags & PG_FICTITIOUS) == 0, 1197 ("Fictitious page %p cannot be in inactive queue", m)); 1198 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1199 ("Unmanaged page %p cannot be in inactive queue", m)); 1200 1201 /* 1202 * The page or object lock acquisitions fail if the 1203 * page was removed from the queue or moved to a 1204 * different position within the queue. In either 1205 * case, addl_page_shortage should not be incremented. 1206 */ 1207 if (!vm_pageout_page_lock(m, &next)) 1208 goto unlock_page; 1209 else if (m->hold_count != 0) { 1210 /* 1211 * Held pages are essentially stuck in the 1212 * queue. So, they ought to be discounted 1213 * from the inactive count. See the 1214 * calculation of inactq_shortage before the 1215 * loop over the active queue below. 1216 */ 1217 addl_page_shortage++; 1218 goto unlock_page; 1219 } 1220 object = m->object; 1221 if (!VM_OBJECT_TRYWLOCK(object)) { 1222 if (!vm_pageout_fallback_object_lock(m, &next)) 1223 goto unlock_object; 1224 else if (m->hold_count != 0) { 1225 addl_page_shortage++; 1226 goto unlock_object; 1227 } 1228 } 1229 if (vm_page_busied(m)) { 1230 /* 1231 * Don't mess with busy pages. Leave them at 1232 * the front of the queue. Most likely, they 1233 * are being paged out and will leave the 1234 * queue shortly after the scan finishes. So, 1235 * they ought to be discounted from the 1236 * inactive count. 1237 */ 1238 addl_page_shortage++; 1239 unlock_object: 1240 VM_OBJECT_WUNLOCK(object); 1241 unlock_page: 1242 vm_page_unlock(m); 1243 continue; 1244 } 1245 KASSERT(m->hold_count == 0, ("Held page %p", m)); 1246 1247 /* 1248 * Dequeue the inactive page and unlock the inactive page 1249 * queue, invalidating the 'next' pointer. Dequeueing the 1250 * page here avoids a later reacquisition (and release) of 1251 * the inactive page queue lock when vm_page_activate(), 1252 * vm_page_free(), or vm_page_launder() is called. Use a 1253 * marker to remember our place in the inactive queue. 1254 */ 1255 TAILQ_INSERT_AFTER(&pq->pq_pl, m, &vmd->vmd_marker, plinks.q); 1256 vm_page_dequeue_locked(m); 1257 vm_pagequeue_unlock(pq); 1258 queue_locked = FALSE; 1259 1260 /* 1261 * Invalid pages can be easily freed. They cannot be 1262 * mapped, vm_page_free() asserts this. 1263 */ 1264 if (m->valid == 0) 1265 goto free_page; 1266 1267 /* 1268 * If the page has been referenced and the object is not dead, 1269 * reactivate or requeue the page depending on whether the 1270 * object is mapped. 1271 */ 1272 if ((m->aflags & PGA_REFERENCED) != 0) { 1273 vm_page_aflag_clear(m, PGA_REFERENCED); 1274 act_delta = 1; 1275 } else 1276 act_delta = 0; 1277 if (object->ref_count != 0) { 1278 act_delta += pmap_ts_referenced(m); 1279 } else { 1280 KASSERT(!pmap_page_is_mapped(m), 1281 ("vm_pageout_scan: page %p is mapped", m)); 1282 } 1283 if (act_delta != 0) { 1284 if (object->ref_count != 0) { 1285 VM_CNT_INC(v_reactivated); 1286 vm_page_activate(m); 1287 1288 /* 1289 * Increase the activation count if the page 1290 * was referenced while in the inactive queue. 1291 * This makes it less likely that the page will 1292 * be returned prematurely to the inactive 1293 * queue. 1294 */ 1295 m->act_count += act_delta + ACT_ADVANCE; 1296 goto drop_page; 1297 } else if ((object->flags & OBJ_DEAD) == 0) { 1298 vm_pagequeue_lock(pq); 1299 queue_locked = TRUE; 1300 m->queue = PQ_INACTIVE; 1301 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); 1302 vm_pagequeue_cnt_inc(pq); 1303 goto drop_page; 1304 } 1305 } 1306 1307 /* 1308 * If the page appears to be clean at the machine-independent 1309 * layer, then remove all of its mappings from the pmap in 1310 * anticipation of freeing it. If, however, any of the page's 1311 * mappings allow write access, then the page may still be 1312 * modified until the last of those mappings are removed. 1313 */ 1314 if (object->ref_count != 0) { 1315 vm_page_test_dirty(m); 1316 if (m->dirty == 0) 1317 pmap_remove_all(m); 1318 } 1319 1320 /* 1321 * Clean pages can be freed, but dirty pages must be sent back 1322 * to the laundry, unless they belong to a dead object. 1323 * Requeueing dirty pages from dead objects is pointless, as 1324 * they are being paged out and freed by the thread that 1325 * destroyed the object. 1326 */ 1327 if (m->dirty == 0) { 1328 free_page: 1329 vm_page_free(m); 1330 VM_CNT_INC(v_dfree); 1331 --page_shortage; 1332 } else if ((object->flags & OBJ_DEAD) == 0) 1333 vm_page_launder(m); 1334 drop_page: 1335 vm_page_unlock(m); 1336 VM_OBJECT_WUNLOCK(object); 1337 if (!queue_locked) { 1338 vm_pagequeue_lock(pq); 1339 queue_locked = TRUE; 1340 } 1341 next = TAILQ_NEXT(&vmd->vmd_marker, plinks.q); 1342 TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_marker, plinks.q); 1343 } 1344 vm_pagequeue_unlock(pq); 1345 1346 /* 1347 * Wake up the laundry thread so that it can perform any needed 1348 * laundering. If we didn't meet our target, we're in shortfall and 1349 * need to launder more aggressively. If PQ_LAUNDRY is empty and no 1350 * swap devices are configured, the laundry thread has no work to do, so 1351 * don't bother waking it up. 1352 */ 1353 if (vm_laundry_request == VM_LAUNDRY_IDLE && 1354 starting_page_shortage > 0) { 1355 pq = &vm_dom[0].vmd_pagequeues[PQ_LAUNDRY]; 1356 vm_pagequeue_lock(pq); 1357 if (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled)) { 1358 if (page_shortage > 0) { 1359 vm_laundry_request = VM_LAUNDRY_SHORTFALL; 1360 VM_CNT_INC(v_pdshortfalls); 1361 } else if (vm_laundry_request != VM_LAUNDRY_SHORTFALL) 1362 vm_laundry_request = VM_LAUNDRY_BACKGROUND; 1363 wakeup(&vm_laundry_request); 1364 } 1365 vm_pagequeue_unlock(pq); 1366 } 1367 1368 /* 1369 * Wakeup the swapout daemon if we didn't free the targeted number of 1370 * pages. 1371 */ 1372 if (page_shortage > 0) 1373 vm_swapout_run(); 1374 1375 /* 1376 * If the inactive queue scan fails repeatedly to meet its 1377 * target, kill the largest process. 1378 */ 1379 vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage); 1380 1381 /* 1382 * Compute the number of pages we want to try to move from the 1383 * active queue to either the inactive or laundry queue. 1384 * 1385 * When scanning active pages, we make clean pages count more heavily 1386 * towards the page shortage than dirty pages. This is because dirty 1387 * pages must be laundered before they can be reused and thus have less 1388 * utility when attempting to quickly alleviate a shortage. However, 1389 * this weighting also causes the scan to deactivate dirty pages more 1390 * more aggressively, improving the effectiveness of clustering and 1391 * ensuring that they can eventually be reused. 1392 */ 1393 inactq_shortage = vm_cnt.v_inactive_target - (vm_cnt.v_inactive_count + 1394 vm_cnt.v_laundry_count / act_scan_laundry_weight) + 1395 vm_paging_target() + deficit + addl_page_shortage; 1396 page_shortage *= act_scan_laundry_weight; 1397 1398 pq = &vmd->vmd_pagequeues[PQ_ACTIVE]; 1399 vm_pagequeue_lock(pq); 1400 maxscan = pq->pq_cnt; 1401 1402 /* 1403 * If we're just idle polling attempt to visit every 1404 * active page within 'update_period' seconds. 1405 */ 1406 scan_tick = ticks; 1407 if (vm_pageout_update_period != 0) { 1408 min_scan = pq->pq_cnt; 1409 min_scan *= scan_tick - vmd->vmd_last_active_scan; 1410 min_scan /= hz * vm_pageout_update_period; 1411 } else 1412 min_scan = 0; 1413 if (min_scan > 0 || (inactq_shortage > 0 && maxscan > 0)) 1414 vmd->vmd_last_active_scan = scan_tick; 1415 1416 /* 1417 * Scan the active queue for pages that can be deactivated. Update 1418 * the per-page activity counter and use it to identify deactivation 1419 * candidates. Held pages may be deactivated. 1420 */ 1421 for (m = TAILQ_FIRST(&pq->pq_pl), scanned = 0; m != NULL && (scanned < 1422 min_scan || (inactq_shortage > 0 && scanned < maxscan)); m = next, 1423 scanned++) { 1424 KASSERT(m->queue == PQ_ACTIVE, 1425 ("vm_pageout_scan: page %p isn't active", m)); 1426 next = TAILQ_NEXT(m, plinks.q); 1427 if ((m->flags & PG_MARKER) != 0) 1428 continue; 1429 KASSERT((m->flags & PG_FICTITIOUS) == 0, 1430 ("Fictitious page %p cannot be in active queue", m)); 1431 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1432 ("Unmanaged page %p cannot be in active queue", m)); 1433 if (!vm_pageout_page_lock(m, &next)) { 1434 vm_page_unlock(m); 1435 continue; 1436 } 1437 1438 /* 1439 * The count for page daemon pages is updated after checking 1440 * the page for eligibility. 1441 */ 1442 VM_CNT_INC(v_pdpages); 1443 1444 /* 1445 * Check to see "how much" the page has been used. 1446 */ 1447 if ((m->aflags & PGA_REFERENCED) != 0) { 1448 vm_page_aflag_clear(m, PGA_REFERENCED); 1449 act_delta = 1; 1450 } else 1451 act_delta = 0; 1452 1453 /* 1454 * Perform an unsynchronized object ref count check. While 1455 * the page lock ensures that the page is not reallocated to 1456 * another object, in particular, one with unmanaged mappings 1457 * that cannot support pmap_ts_referenced(), two races are, 1458 * nonetheless, possible: 1459 * 1) The count was transitioning to zero, but we saw a non- 1460 * zero value. pmap_ts_referenced() will return zero 1461 * because the page is not mapped. 1462 * 2) The count was transitioning to one, but we saw zero. 1463 * This race delays the detection of a new reference. At 1464 * worst, we will deactivate and reactivate the page. 1465 */ 1466 if (m->object->ref_count != 0) 1467 act_delta += pmap_ts_referenced(m); 1468 1469 /* 1470 * Advance or decay the act_count based on recent usage. 1471 */ 1472 if (act_delta != 0) { 1473 m->act_count += ACT_ADVANCE + act_delta; 1474 if (m->act_count > ACT_MAX) 1475 m->act_count = ACT_MAX; 1476 } else 1477 m->act_count -= min(m->act_count, ACT_DECLINE); 1478 1479 /* 1480 * Move this page to the tail of the active, inactive or laundry 1481 * queue depending on usage. 1482 */ 1483 if (m->act_count == 0) { 1484 /* Dequeue to avoid later lock recursion. */ 1485 vm_page_dequeue_locked(m); 1486 1487 /* 1488 * When not short for inactive pages, let dirty pages go 1489 * through the inactive queue before moving to the 1490 * laundry queues. This gives them some extra time to 1491 * be reactivated, potentially avoiding an expensive 1492 * pageout. During a page shortage, the inactive queue 1493 * is necessarily small, so we may move dirty pages 1494 * directly to the laundry queue. 1495 */ 1496 if (inactq_shortage <= 0) 1497 vm_page_deactivate(m); 1498 else { 1499 /* 1500 * Calling vm_page_test_dirty() here would 1501 * require acquisition of the object's write 1502 * lock. However, during a page shortage, 1503 * directing dirty pages into the laundry 1504 * queue is only an optimization and not a 1505 * requirement. Therefore, we simply rely on 1506 * the opportunistic updates to the page's 1507 * dirty field by the pmap. 1508 */ 1509 if (m->dirty == 0) { 1510 vm_page_deactivate(m); 1511 inactq_shortage -= 1512 act_scan_laundry_weight; 1513 } else { 1514 vm_page_launder(m); 1515 inactq_shortage--; 1516 } 1517 } 1518 } else 1519 vm_page_requeue_locked(m); 1520 vm_page_unlock(m); 1521 } 1522 vm_pagequeue_unlock(pq); 1523 if (pass > 0) 1524 vm_swapout_run_idle(); 1525 return (page_shortage <= 0); 1526 } 1527 1528 static int vm_pageout_oom_vote; 1529 1530 /* 1531 * The pagedaemon threads randlomly select one to perform the 1532 * OOM. Trying to kill processes before all pagedaemons 1533 * failed to reach free target is premature. 1534 */ 1535 static void 1536 vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage, 1537 int starting_page_shortage) 1538 { 1539 int old_vote; 1540 1541 if (starting_page_shortage <= 0 || starting_page_shortage != 1542 page_shortage) 1543 vmd->vmd_oom_seq = 0; 1544 else 1545 vmd->vmd_oom_seq++; 1546 if (vmd->vmd_oom_seq < vm_pageout_oom_seq) { 1547 if (vmd->vmd_oom) { 1548 vmd->vmd_oom = FALSE; 1549 atomic_subtract_int(&vm_pageout_oom_vote, 1); 1550 } 1551 return; 1552 } 1553 1554 /* 1555 * Do not follow the call sequence until OOM condition is 1556 * cleared. 1557 */ 1558 vmd->vmd_oom_seq = 0; 1559 1560 if (vmd->vmd_oom) 1561 return; 1562 1563 vmd->vmd_oom = TRUE; 1564 old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1); 1565 if (old_vote != vm_ndomains - 1) 1566 return; 1567 1568 /* 1569 * The current pagedaemon thread is the last in the quorum to 1570 * start OOM. Initiate the selection and signaling of the 1571 * victim. 1572 */ 1573 vm_pageout_oom(VM_OOM_MEM); 1574 1575 /* 1576 * After one round of OOM terror, recall our vote. On the 1577 * next pass, current pagedaemon would vote again if the low 1578 * memory condition is still there, due to vmd_oom being 1579 * false. 1580 */ 1581 vmd->vmd_oom = FALSE; 1582 atomic_subtract_int(&vm_pageout_oom_vote, 1); 1583 } 1584 1585 /* 1586 * The OOM killer is the page daemon's action of last resort when 1587 * memory allocation requests have been stalled for a prolonged period 1588 * of time because it cannot reclaim memory. This function computes 1589 * the approximate number of physical pages that could be reclaimed if 1590 * the specified address space is destroyed. 1591 * 1592 * Private, anonymous memory owned by the address space is the 1593 * principal resource that we expect to recover after an OOM kill. 1594 * Since the physical pages mapped by the address space's COW entries 1595 * are typically shared pages, they are unlikely to be released and so 1596 * they are not counted. 1597 * 1598 * To get to the point where the page daemon runs the OOM killer, its 1599 * efforts to write-back vnode-backed pages may have stalled. This 1600 * could be caused by a memory allocation deadlock in the write path 1601 * that might be resolved by an OOM kill. Therefore, physical pages 1602 * belonging to vnode-backed objects are counted, because they might 1603 * be freed without being written out first if the address space holds 1604 * the last reference to an unlinked vnode. 1605 * 1606 * Similarly, physical pages belonging to OBJT_PHYS objects are 1607 * counted because the address space might hold the last reference to 1608 * the object. 1609 */ 1610 static long 1611 vm_pageout_oom_pagecount(struct vmspace *vmspace) 1612 { 1613 vm_map_t map; 1614 vm_map_entry_t entry; 1615 vm_object_t obj; 1616 long res; 1617 1618 map = &vmspace->vm_map; 1619 KASSERT(!map->system_map, ("system map")); 1620 sx_assert(&map->lock, SA_LOCKED); 1621 res = 0; 1622 for (entry = map->header.next; entry != &map->header; 1623 entry = entry->next) { 1624 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) 1625 continue; 1626 obj = entry->object.vm_object; 1627 if (obj == NULL) 1628 continue; 1629 if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 && 1630 obj->ref_count != 1) 1631 continue; 1632 switch (obj->type) { 1633 case OBJT_DEFAULT: 1634 case OBJT_SWAP: 1635 case OBJT_PHYS: 1636 case OBJT_VNODE: 1637 res += obj->resident_page_count; 1638 break; 1639 } 1640 } 1641 return (res); 1642 } 1643 1644 void 1645 vm_pageout_oom(int shortage) 1646 { 1647 struct proc *p, *bigproc; 1648 vm_offset_t size, bigsize; 1649 struct thread *td; 1650 struct vmspace *vm; 1651 bool breakout; 1652 1653 /* 1654 * We keep the process bigproc locked once we find it to keep anyone 1655 * from messing with it; however, there is a possibility of 1656 * deadlock if process B is bigproc and one of its child processes 1657 * attempts to propagate a signal to B while we are waiting for A's 1658 * lock while walking this list. To avoid this, we don't block on 1659 * the process lock but just skip a process if it is already locked. 1660 */ 1661 bigproc = NULL; 1662 bigsize = 0; 1663 sx_slock(&allproc_lock); 1664 FOREACH_PROC_IN_SYSTEM(p) { 1665 PROC_LOCK(p); 1666 1667 /* 1668 * If this is a system, protected or killed process, skip it. 1669 */ 1670 if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC | 1671 P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 || 1672 p->p_pid == 1 || P_KILLED(p) || 1673 (p->p_pid < 48 && swap_pager_avail != 0)) { 1674 PROC_UNLOCK(p); 1675 continue; 1676 } 1677 /* 1678 * If the process is in a non-running type state, 1679 * don't touch it. Check all the threads individually. 1680 */ 1681 breakout = false; 1682 FOREACH_THREAD_IN_PROC(p, td) { 1683 thread_lock(td); 1684 if (!TD_ON_RUNQ(td) && 1685 !TD_IS_RUNNING(td) && 1686 !TD_IS_SLEEPING(td) && 1687 !TD_IS_SUSPENDED(td) && 1688 !TD_IS_SWAPPED(td)) { 1689 thread_unlock(td); 1690 breakout = true; 1691 break; 1692 } 1693 thread_unlock(td); 1694 } 1695 if (breakout) { 1696 PROC_UNLOCK(p); 1697 continue; 1698 } 1699 /* 1700 * get the process size 1701 */ 1702 vm = vmspace_acquire_ref(p); 1703 if (vm == NULL) { 1704 PROC_UNLOCK(p); 1705 continue; 1706 } 1707 _PHOLD_LITE(p); 1708 PROC_UNLOCK(p); 1709 sx_sunlock(&allproc_lock); 1710 if (!vm_map_trylock_read(&vm->vm_map)) { 1711 vmspace_free(vm); 1712 sx_slock(&allproc_lock); 1713 PRELE(p); 1714 continue; 1715 } 1716 size = vmspace_swap_count(vm); 1717 if (shortage == VM_OOM_MEM) 1718 size += vm_pageout_oom_pagecount(vm); 1719 vm_map_unlock_read(&vm->vm_map); 1720 vmspace_free(vm); 1721 sx_slock(&allproc_lock); 1722 1723 /* 1724 * If this process is bigger than the biggest one, 1725 * remember it. 1726 */ 1727 if (size > bigsize) { 1728 if (bigproc != NULL) 1729 PRELE(bigproc); 1730 bigproc = p; 1731 bigsize = size; 1732 } else { 1733 PRELE(p); 1734 } 1735 } 1736 sx_sunlock(&allproc_lock); 1737 if (bigproc != NULL) { 1738 if (vm_panic_on_oom != 0) 1739 panic("out of swap space"); 1740 PROC_LOCK(bigproc); 1741 killproc(bigproc, "out of swap space"); 1742 sched_nice(bigproc, PRIO_MIN); 1743 _PRELE(bigproc); 1744 PROC_UNLOCK(bigproc); 1745 wakeup(&vm_cnt.v_free_count); 1746 } 1747 } 1748 1749 static void 1750 vm_pageout_worker(void *arg) 1751 { 1752 struct vm_domain *domain; 1753 int domidx, pass; 1754 bool target_met; 1755 1756 domidx = (uintptr_t)arg; 1757 domain = &vm_dom[domidx]; 1758 pass = 0; 1759 target_met = true; 1760 1761 /* 1762 * XXXKIB It could be useful to bind pageout daemon threads to 1763 * the cores belonging to the domain, from which vm_page_array 1764 * is allocated. 1765 */ 1766 1767 KASSERT(domain->vmd_segs != 0, ("domain without segments")); 1768 domain->vmd_last_active_scan = ticks; 1769 vm_pageout_init_marker(&domain->vmd_marker, PQ_INACTIVE); 1770 vm_pageout_init_marker(&domain->vmd_inacthead, PQ_INACTIVE); 1771 TAILQ_INSERT_HEAD(&domain->vmd_pagequeues[PQ_INACTIVE].pq_pl, 1772 &domain->vmd_inacthead, plinks.q); 1773 1774 /* 1775 * The pageout daemon worker is never done, so loop forever. 1776 */ 1777 while (TRUE) { 1778 mtx_lock(&vm_page_queue_free_mtx); 1779 1780 /* 1781 * Generally, after a level >= 1 scan, if there are enough 1782 * free pages to wakeup the waiters, then they are already 1783 * awake. A call to vm_page_free() during the scan awakened 1784 * them. However, in the following case, this wakeup serves 1785 * to bound the amount of time that a thread might wait. 1786 * Suppose a thread's call to vm_page_alloc() fails, but 1787 * before that thread calls VM_WAIT, enough pages are freed by 1788 * other threads to alleviate the free page shortage. The 1789 * thread will, nonetheless, wait until another page is freed 1790 * or this wakeup is performed. 1791 */ 1792 if (vm_pages_needed && !vm_page_count_min()) { 1793 vm_pages_needed = false; 1794 wakeup(&vm_cnt.v_free_count); 1795 } 1796 1797 /* 1798 * Do not clear vm_pageout_wanted until we reach our free page 1799 * target. Otherwise, we may be awakened over and over again, 1800 * wasting CPU time. 1801 */ 1802 if (vm_pageout_wanted && target_met) 1803 vm_pageout_wanted = false; 1804 1805 /* 1806 * Might the page daemon receive a wakeup call? 1807 */ 1808 if (vm_pageout_wanted) { 1809 /* 1810 * No. Either vm_pageout_wanted was set by another 1811 * thread during the previous scan, which must have 1812 * been a level 0 scan, or vm_pageout_wanted was 1813 * already set and the scan failed to free enough 1814 * pages. If we haven't yet performed a level >= 1 1815 * (page reclamation) scan, then increase the level 1816 * and scan again now. Otherwise, sleep a bit and 1817 * try again later. 1818 */ 1819 mtx_unlock(&vm_page_queue_free_mtx); 1820 if (pass >= 1) 1821 pause("pwait", hz / VM_INACT_SCAN_RATE); 1822 pass++; 1823 } else { 1824 /* 1825 * Yes. Sleep until pages need to be reclaimed or 1826 * have their reference stats updated. 1827 */ 1828 if (mtx_sleep(&vm_pageout_wanted, 1829 &vm_page_queue_free_mtx, PDROP | PVM, "psleep", 1830 hz) == 0) { 1831 VM_CNT_INC(v_pdwakeups); 1832 pass = 1; 1833 } else 1834 pass = 0; 1835 } 1836 1837 target_met = vm_pageout_scan(domain, pass); 1838 } 1839 } 1840 1841 /* 1842 * vm_pageout_init initialises basic pageout daemon settings. 1843 */ 1844 static void 1845 vm_pageout_init(void) 1846 { 1847 /* 1848 * Initialize some paging parameters. 1849 */ 1850 vm_cnt.v_interrupt_free_min = 2; 1851 if (vm_cnt.v_page_count < 2000) 1852 vm_pageout_page_count = 8; 1853 1854 /* 1855 * v_free_reserved needs to include enough for the largest 1856 * swap pager structures plus enough for any pv_entry structs 1857 * when paging. 1858 */ 1859 if (vm_cnt.v_page_count > 1024) 1860 vm_cnt.v_free_min = 4 + (vm_cnt.v_page_count - 1024) / 200; 1861 else 1862 vm_cnt.v_free_min = 4; 1863 vm_cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1864 vm_cnt.v_interrupt_free_min; 1865 vm_cnt.v_free_reserved = vm_pageout_page_count + 1866 vm_cnt.v_pageout_free_min + (vm_cnt.v_page_count / 768); 1867 vm_cnt.v_free_severe = vm_cnt.v_free_min / 2; 1868 vm_cnt.v_free_target = 4 * vm_cnt.v_free_min + vm_cnt.v_free_reserved; 1869 vm_cnt.v_free_min += vm_cnt.v_free_reserved; 1870 vm_cnt.v_free_severe += vm_cnt.v_free_reserved; 1871 vm_cnt.v_inactive_target = (3 * vm_cnt.v_free_target) / 2; 1872 if (vm_cnt.v_inactive_target > vm_cnt.v_free_count / 3) 1873 vm_cnt.v_inactive_target = vm_cnt.v_free_count / 3; 1874 1875 /* 1876 * Set the default wakeup threshold to be 10% above the minimum 1877 * page limit. This keeps the steady state out of shortfall. 1878 */ 1879 vm_pageout_wakeup_thresh = (vm_cnt.v_free_min / 10) * 11; 1880 1881 /* 1882 * Set interval in seconds for active scan. We want to visit each 1883 * page at least once every ten minutes. This is to prevent worst 1884 * case paging behaviors with stale active LRU. 1885 */ 1886 if (vm_pageout_update_period == 0) 1887 vm_pageout_update_period = 600; 1888 1889 /* XXX does not really belong here */ 1890 if (vm_page_max_wired == 0) 1891 vm_page_max_wired = vm_cnt.v_free_count / 3; 1892 1893 /* 1894 * Target amount of memory to move out of the laundry queue during a 1895 * background laundering. This is proportional to the amount of system 1896 * memory. 1897 */ 1898 vm_background_launder_target = (vm_cnt.v_free_target - 1899 vm_cnt.v_free_min) / 10; 1900 } 1901 1902 /* 1903 * vm_pageout is the high level pageout daemon. 1904 */ 1905 static void 1906 vm_pageout(void) 1907 { 1908 int error; 1909 #ifdef VM_NUMA_ALLOC 1910 int i; 1911 #endif 1912 1913 swap_pager_swap_init(); 1914 error = kthread_add(vm_pageout_laundry_worker, NULL, curproc, NULL, 1915 0, 0, "laundry: dom0"); 1916 if (error != 0) 1917 panic("starting laundry for domain 0, error %d", error); 1918 #ifdef VM_NUMA_ALLOC 1919 for (i = 1; i < vm_ndomains; i++) { 1920 error = kthread_add(vm_pageout_worker, (void *)(uintptr_t)i, 1921 curproc, NULL, 0, 0, "dom%d", i); 1922 if (error != 0) { 1923 panic("starting pageout for domain %d, error %d\n", 1924 i, error); 1925 } 1926 } 1927 #endif 1928 error = kthread_add(uma_reclaim_worker, NULL, curproc, NULL, 1929 0, 0, "uma"); 1930 if (error != 0) 1931 panic("starting uma_reclaim helper, error %d\n", error); 1932 vm_pageout_worker((void *)(uintptr_t)0); 1933 } 1934 1935 /* 1936 * Unless the free page queue lock is held by the caller, this function 1937 * should be regarded as advisory. Specifically, the caller should 1938 * not msleep() on &vm_cnt.v_free_count following this function unless 1939 * the free page queue lock is held until the msleep() is performed. 1940 */ 1941 void 1942 pagedaemon_wakeup(void) 1943 { 1944 1945 if (!vm_pageout_wanted && curthread->td_proc != pageproc) { 1946 vm_pageout_wanted = true; 1947 wakeup(&vm_pageout_wanted); 1948 } 1949 } 1950