1 /*- 2 * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991 Regents of the University of California. 5 * All rights reserved. 6 * Copyright (c) 1994 John S. Dyson 7 * All rights reserved. 8 * Copyright (c) 1994 David Greenman 9 * All rights reserved. 10 * Copyright (c) 2005 Yahoo! Technologies Norway AS 11 * All rights reserved. 12 * 13 * This code is derived from software contributed to Berkeley by 14 * The Mach Operating System project at Carnegie-Mellon University. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. All advertising materials mentioning features or use of this software 25 * must display the following acknowledgement: 26 * This product includes software developed by the University of 27 * California, Berkeley and its contributors. 28 * 4. Neither the name of the University nor the names of its contributors 29 * may be used to endorse or promote products derived from this software 30 * without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 42 * SUCH DAMAGE. 43 * 44 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 45 * 46 * 47 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 48 * All rights reserved. 49 * 50 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 51 * 52 * Permission to use, copy, modify and distribute this software and 53 * its documentation is hereby granted, provided that both the copyright 54 * notice and this permission notice appear in all copies of the 55 * software, derivative works or modified versions, and any portions 56 * thereof, and that both notices appear in supporting documentation. 57 * 58 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 59 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 60 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 61 * 62 * Carnegie Mellon requests users of this software to return to 63 * 64 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 65 * School of Computer Science 66 * Carnegie Mellon University 67 * Pittsburgh PA 15213-3890 68 * 69 * any improvements or extensions that they make and grant Carnegie the 70 * rights to redistribute these changes. 71 */ 72 73 /* 74 * The proverbial page-out daemon. 75 */ 76 77 #include <sys/cdefs.h> 78 __FBSDID("$FreeBSD$"); 79 80 #include "opt_vm.h" 81 82 #include <sys/param.h> 83 #include <sys/systm.h> 84 #include <sys/kernel.h> 85 #include <sys/eventhandler.h> 86 #include <sys/lock.h> 87 #include <sys/mutex.h> 88 #include <sys/proc.h> 89 #include <sys/kthread.h> 90 #include <sys/ktr.h> 91 #include <sys/mount.h> 92 #include <sys/racct.h> 93 #include <sys/resourcevar.h> 94 #include <sys/sched.h> 95 #include <sys/sdt.h> 96 #include <sys/signalvar.h> 97 #include <sys/smp.h> 98 #include <sys/time.h> 99 #include <sys/vnode.h> 100 #include <sys/vmmeter.h> 101 #include <sys/rwlock.h> 102 #include <sys/sx.h> 103 #include <sys/sysctl.h> 104 105 #include <vm/vm.h> 106 #include <vm/vm_param.h> 107 #include <vm/vm_object.h> 108 #include <vm/vm_page.h> 109 #include <vm/vm_map.h> 110 #include <vm/vm_pageout.h> 111 #include <vm/vm_pager.h> 112 #include <vm/vm_phys.h> 113 #include <vm/vm_pagequeue.h> 114 #include <vm/swap_pager.h> 115 #include <vm/vm_extern.h> 116 #include <vm/uma.h> 117 118 /* 119 * System initialization 120 */ 121 122 /* the kernel process "vm_pageout"*/ 123 static void vm_pageout(void); 124 static void vm_pageout_init(void); 125 static int vm_pageout_clean(vm_page_t m, int *numpagedout); 126 static int vm_pageout_cluster(vm_page_t m); 127 static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage, 128 int starting_page_shortage); 129 130 SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init, 131 NULL); 132 133 struct proc *pageproc; 134 135 static struct kproc_desc page_kp = { 136 "pagedaemon", 137 vm_pageout, 138 &pageproc 139 }; 140 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start, 141 &page_kp); 142 143 SDT_PROVIDER_DEFINE(vm); 144 SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan); 145 146 /* Pagedaemon activity rates, in subdivisions of one second. */ 147 #define VM_LAUNDER_RATE 10 148 #define VM_INACT_SCAN_RATE 10 149 150 static int vm_pageout_oom_seq = 12; 151 152 static int vm_pageout_update_period; 153 static int disable_swap_pageouts; 154 static int lowmem_period = 10; 155 static int swapdev_enabled; 156 157 static int vm_panic_on_oom = 0; 158 159 SYSCTL_INT(_vm, OID_AUTO, panic_on_oom, 160 CTLFLAG_RWTUN, &vm_panic_on_oom, 0, 161 "Panic on the given number of out-of-memory errors instead of killing the largest process"); 162 163 SYSCTL_INT(_vm, OID_AUTO, pageout_update_period, 164 CTLFLAG_RWTUN, &vm_pageout_update_period, 0, 165 "Maximum active LRU update period"); 166 167 SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RWTUN, &lowmem_period, 0, 168 "Low memory callback period"); 169 170 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 171 CTLFLAG_RWTUN, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 172 173 static int pageout_lock_miss; 174 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 175 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 176 177 SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq, 178 CTLFLAG_RWTUN, &vm_pageout_oom_seq, 0, 179 "back-to-back calls to oom detector to start OOM"); 180 181 static int act_scan_laundry_weight = 3; 182 SYSCTL_INT(_vm, OID_AUTO, act_scan_laundry_weight, CTLFLAG_RWTUN, 183 &act_scan_laundry_weight, 0, 184 "weight given to clean vs. dirty pages in active queue scans"); 185 186 static u_int vm_background_launder_rate = 4096; 187 SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RWTUN, 188 &vm_background_launder_rate, 0, 189 "background laundering rate, in kilobytes per second"); 190 191 static u_int vm_background_launder_max = 20 * 1024; 192 SYSCTL_UINT(_vm, OID_AUTO, background_launder_max, CTLFLAG_RWTUN, 193 &vm_background_launder_max, 0, "background laundering cap, in kilobytes"); 194 195 int vm_pageout_page_count = 32; 196 197 u_long vm_page_max_user_wired; 198 SYSCTL_ULONG(_vm, OID_AUTO, max_user_wired, CTLFLAG_RW, 199 &vm_page_max_user_wired, 0, 200 "system-wide limit to user-wired page count"); 201 202 static u_int isqrt(u_int num); 203 static int vm_pageout_launder(struct vm_domain *vmd, int launder, 204 bool in_shortfall); 205 static void vm_pageout_laundry_worker(void *arg); 206 207 struct scan_state { 208 struct vm_batchqueue bq; 209 struct vm_pagequeue *pq; 210 vm_page_t marker; 211 int maxscan; 212 int scanned; 213 }; 214 215 static void 216 vm_pageout_init_scan(struct scan_state *ss, struct vm_pagequeue *pq, 217 vm_page_t marker, vm_page_t after, int maxscan) 218 { 219 220 vm_pagequeue_assert_locked(pq); 221 KASSERT((marker->a.flags & PGA_ENQUEUED) == 0, 222 ("marker %p already enqueued", marker)); 223 224 if (after == NULL) 225 TAILQ_INSERT_HEAD(&pq->pq_pl, marker, plinks.q); 226 else 227 TAILQ_INSERT_AFTER(&pq->pq_pl, after, marker, plinks.q); 228 vm_page_aflag_set(marker, PGA_ENQUEUED); 229 230 vm_batchqueue_init(&ss->bq); 231 ss->pq = pq; 232 ss->marker = marker; 233 ss->maxscan = maxscan; 234 ss->scanned = 0; 235 vm_pagequeue_unlock(pq); 236 } 237 238 static void 239 vm_pageout_end_scan(struct scan_state *ss) 240 { 241 struct vm_pagequeue *pq; 242 243 pq = ss->pq; 244 vm_pagequeue_assert_locked(pq); 245 KASSERT((ss->marker->a.flags & PGA_ENQUEUED) != 0, 246 ("marker %p not enqueued", ss->marker)); 247 248 TAILQ_REMOVE(&pq->pq_pl, ss->marker, plinks.q); 249 vm_page_aflag_clear(ss->marker, PGA_ENQUEUED); 250 pq->pq_pdpages += ss->scanned; 251 } 252 253 /* 254 * Add a small number of queued pages to a batch queue for later processing 255 * without the corresponding queue lock held. The caller must have enqueued a 256 * marker page at the desired start point for the scan. Pages will be 257 * physically dequeued if the caller so requests. Otherwise, the returned 258 * batch may contain marker pages, and it is up to the caller to handle them. 259 * 260 * When processing the batch queue, vm_pageout_defer() must be used to 261 * determine whether the page has been logically dequeued since the batch was 262 * collected. 263 */ 264 static __always_inline void 265 vm_pageout_collect_batch(struct scan_state *ss, const bool dequeue) 266 { 267 struct vm_pagequeue *pq; 268 vm_page_t m, marker, n; 269 270 marker = ss->marker; 271 pq = ss->pq; 272 273 KASSERT((marker->a.flags & PGA_ENQUEUED) != 0, 274 ("marker %p not enqueued", ss->marker)); 275 276 vm_pagequeue_lock(pq); 277 for (m = TAILQ_NEXT(marker, plinks.q); m != NULL && 278 ss->scanned < ss->maxscan && ss->bq.bq_cnt < VM_BATCHQUEUE_SIZE; 279 m = n, ss->scanned++) { 280 n = TAILQ_NEXT(m, plinks.q); 281 if ((m->flags & PG_MARKER) == 0) { 282 KASSERT((m->a.flags & PGA_ENQUEUED) != 0, 283 ("page %p not enqueued", m)); 284 KASSERT((m->flags & PG_FICTITIOUS) == 0, 285 ("Fictitious page %p cannot be in page queue", m)); 286 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 287 ("Unmanaged page %p cannot be in page queue", m)); 288 } else if (dequeue) 289 continue; 290 291 (void)vm_batchqueue_insert(&ss->bq, m); 292 if (dequeue) { 293 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 294 vm_page_aflag_clear(m, PGA_ENQUEUED); 295 } 296 } 297 TAILQ_REMOVE(&pq->pq_pl, marker, plinks.q); 298 if (__predict_true(m != NULL)) 299 TAILQ_INSERT_BEFORE(m, marker, plinks.q); 300 else 301 TAILQ_INSERT_TAIL(&pq->pq_pl, marker, plinks.q); 302 if (dequeue) 303 vm_pagequeue_cnt_add(pq, -ss->bq.bq_cnt); 304 vm_pagequeue_unlock(pq); 305 } 306 307 /* 308 * Return the next page to be scanned, or NULL if the scan is complete. 309 */ 310 static __always_inline vm_page_t 311 vm_pageout_next(struct scan_state *ss, const bool dequeue) 312 { 313 314 if (ss->bq.bq_cnt == 0) 315 vm_pageout_collect_batch(ss, dequeue); 316 return (vm_batchqueue_pop(&ss->bq)); 317 } 318 319 /* 320 * Determine whether processing of a page should be deferred and ensure that any 321 * outstanding queue operations are processed. 322 */ 323 static __always_inline bool 324 vm_pageout_defer(vm_page_t m, const uint8_t queue, const bool enqueued) 325 { 326 vm_page_astate_t as; 327 328 as = vm_page_astate_load(m); 329 if (__predict_false(as.queue != queue || 330 ((as.flags & PGA_ENQUEUED) != 0) != enqueued)) 331 return (true); 332 if ((as.flags & PGA_QUEUE_OP_MASK) != 0) { 333 vm_page_pqbatch_submit(m, queue); 334 return (true); 335 } 336 return (false); 337 } 338 339 /* 340 * Scan for pages at adjacent offsets within the given page's object that are 341 * eligible for laundering, form a cluster of these pages and the given page, 342 * and launder that cluster. 343 */ 344 static int 345 vm_pageout_cluster(vm_page_t m) 346 { 347 vm_object_t object; 348 vm_page_t mc[2 * vm_pageout_page_count], p, pb, ps; 349 vm_pindex_t pindex; 350 int ib, is, page_base, pageout_count; 351 352 object = m->object; 353 VM_OBJECT_ASSERT_WLOCKED(object); 354 pindex = m->pindex; 355 356 vm_page_assert_xbusied(m); 357 358 mc[vm_pageout_page_count] = pb = ps = m; 359 pageout_count = 1; 360 page_base = vm_pageout_page_count; 361 ib = 1; 362 is = 1; 363 364 /* 365 * We can cluster only if the page is not clean, busy, or held, and 366 * the page is in the laundry queue. 367 * 368 * During heavy mmap/modification loads the pageout 369 * daemon can really fragment the underlying file 370 * due to flushing pages out of order and not trying to 371 * align the clusters (which leaves sporadic out-of-order 372 * holes). To solve this problem we do the reverse scan 373 * first and attempt to align our cluster, then do a 374 * forward scan if room remains. 375 */ 376 more: 377 while (ib != 0 && pageout_count < vm_pageout_page_count) { 378 if (ib > pindex) { 379 ib = 0; 380 break; 381 } 382 if ((p = vm_page_prev(pb)) == NULL || 383 vm_page_tryxbusy(p) == 0) { 384 ib = 0; 385 break; 386 } 387 if (vm_page_wired(p)) { 388 ib = 0; 389 vm_page_xunbusy(p); 390 break; 391 } 392 vm_page_test_dirty(p); 393 if (p->dirty == 0) { 394 ib = 0; 395 vm_page_xunbusy(p); 396 break; 397 } 398 if (!vm_page_in_laundry(p) || !vm_page_try_remove_write(p)) { 399 vm_page_xunbusy(p); 400 ib = 0; 401 break; 402 } 403 mc[--page_base] = pb = p; 404 ++pageout_count; 405 ++ib; 406 407 /* 408 * We are at an alignment boundary. Stop here, and switch 409 * directions. Do not clear ib. 410 */ 411 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 412 break; 413 } 414 while (pageout_count < vm_pageout_page_count && 415 pindex + is < object->size) { 416 if ((p = vm_page_next(ps)) == NULL || 417 vm_page_tryxbusy(p) == 0) 418 break; 419 if (vm_page_wired(p)) { 420 vm_page_xunbusy(p); 421 break; 422 } 423 vm_page_test_dirty(p); 424 if (p->dirty == 0) { 425 vm_page_xunbusy(p); 426 break; 427 } 428 if (!vm_page_in_laundry(p) || !vm_page_try_remove_write(p)) { 429 vm_page_xunbusy(p); 430 break; 431 } 432 mc[page_base + pageout_count] = ps = p; 433 ++pageout_count; 434 ++is; 435 } 436 437 /* 438 * If we exhausted our forward scan, continue with the reverse scan 439 * when possible, even past an alignment boundary. This catches 440 * boundary conditions. 441 */ 442 if (ib != 0 && pageout_count < vm_pageout_page_count) 443 goto more; 444 445 return (vm_pageout_flush(&mc[page_base], pageout_count, 446 VM_PAGER_PUT_NOREUSE, 0, NULL, NULL)); 447 } 448 449 /* 450 * vm_pageout_flush() - launder the given pages 451 * 452 * The given pages are laundered. Note that we setup for the start of 453 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 454 * reference count all in here rather then in the parent. If we want 455 * the parent to do more sophisticated things we may have to change 456 * the ordering. 457 * 458 * Returned runlen is the count of pages between mreq and first 459 * page after mreq with status VM_PAGER_AGAIN. 460 * *eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL 461 * for any page in runlen set. 462 */ 463 int 464 vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen, 465 boolean_t *eio) 466 { 467 vm_object_t object = mc[0]->object; 468 int pageout_status[count]; 469 int numpagedout = 0; 470 int i, runlen; 471 472 VM_OBJECT_ASSERT_WLOCKED(object); 473 474 /* 475 * Initiate I/O. Mark the pages shared busy and verify that they're 476 * valid and read-only. 477 * 478 * We do not have to fixup the clean/dirty bits here... we can 479 * allow the pager to do it after the I/O completes. 480 * 481 * NOTE! mc[i]->dirty may be partial or fragmented due to an 482 * edge case with file fragments. 483 */ 484 for (i = 0; i < count; i++) { 485 KASSERT(vm_page_all_valid(mc[i]), 486 ("vm_pageout_flush: partially invalid page %p index %d/%d", 487 mc[i], i, count)); 488 KASSERT((mc[i]->a.flags & PGA_WRITEABLE) == 0, 489 ("vm_pageout_flush: writeable page %p", mc[i])); 490 vm_page_busy_downgrade(mc[i]); 491 } 492 vm_object_pip_add(object, count); 493 494 vm_pager_put_pages(object, mc, count, flags, pageout_status); 495 496 runlen = count - mreq; 497 if (eio != NULL) 498 *eio = FALSE; 499 for (i = 0; i < count; i++) { 500 vm_page_t mt = mc[i]; 501 502 KASSERT(pageout_status[i] == VM_PAGER_PEND || 503 !pmap_page_is_write_mapped(mt), 504 ("vm_pageout_flush: page %p is not write protected", mt)); 505 switch (pageout_status[i]) { 506 case VM_PAGER_OK: 507 /* 508 * The page may have moved since laundering started, in 509 * which case it should be left alone. 510 */ 511 if (vm_page_in_laundry(mt)) 512 vm_page_deactivate_noreuse(mt); 513 /* FALLTHROUGH */ 514 case VM_PAGER_PEND: 515 numpagedout++; 516 break; 517 case VM_PAGER_BAD: 518 /* 519 * The page is outside the object's range. We pretend 520 * that the page out worked and clean the page, so the 521 * changes will be lost if the page is reclaimed by 522 * the page daemon. 523 */ 524 vm_page_undirty(mt); 525 if (vm_page_in_laundry(mt)) 526 vm_page_deactivate_noreuse(mt); 527 break; 528 case VM_PAGER_ERROR: 529 case VM_PAGER_FAIL: 530 /* 531 * If the page couldn't be paged out to swap because the 532 * pager wasn't able to find space, place the page in 533 * the PQ_UNSWAPPABLE holding queue. This is an 534 * optimization that prevents the page daemon from 535 * wasting CPU cycles on pages that cannot be reclaimed 536 * becase no swap device is configured. 537 * 538 * Otherwise, reactivate the page so that it doesn't 539 * clog the laundry and inactive queues. (We will try 540 * paging it out again later.) 541 */ 542 if (object->type == OBJT_SWAP && 543 pageout_status[i] == VM_PAGER_FAIL) { 544 vm_page_unswappable(mt); 545 numpagedout++; 546 } else 547 vm_page_activate(mt); 548 if (eio != NULL && i >= mreq && i - mreq < runlen) 549 *eio = TRUE; 550 break; 551 case VM_PAGER_AGAIN: 552 if (i >= mreq && i - mreq < runlen) 553 runlen = i - mreq; 554 break; 555 } 556 557 /* 558 * If the operation is still going, leave the page busy to 559 * block all other accesses. Also, leave the paging in 560 * progress indicator set so that we don't attempt an object 561 * collapse. 562 */ 563 if (pageout_status[i] != VM_PAGER_PEND) { 564 vm_object_pip_wakeup(object); 565 vm_page_sunbusy(mt); 566 } 567 } 568 if (prunlen != NULL) 569 *prunlen = runlen; 570 return (numpagedout); 571 } 572 573 static void 574 vm_pageout_swapon(void *arg __unused, struct swdevt *sp __unused) 575 { 576 577 atomic_store_rel_int(&swapdev_enabled, 1); 578 } 579 580 static void 581 vm_pageout_swapoff(void *arg __unused, struct swdevt *sp __unused) 582 { 583 584 if (swap_pager_nswapdev() == 1) 585 atomic_store_rel_int(&swapdev_enabled, 0); 586 } 587 588 /* 589 * Attempt to acquire all of the necessary locks to launder a page and 590 * then call through the clustering layer to PUTPAGES. Wait a short 591 * time for a vnode lock. 592 * 593 * Requires the page and object lock on entry, releases both before return. 594 * Returns 0 on success and an errno otherwise. 595 */ 596 static int 597 vm_pageout_clean(vm_page_t m, int *numpagedout) 598 { 599 struct vnode *vp; 600 struct mount *mp; 601 vm_object_t object; 602 vm_pindex_t pindex; 603 int error, lockmode; 604 605 object = m->object; 606 VM_OBJECT_ASSERT_WLOCKED(object); 607 error = 0; 608 vp = NULL; 609 mp = NULL; 610 611 /* 612 * The object is already known NOT to be dead. It 613 * is possible for the vget() to block the whole 614 * pageout daemon, but the new low-memory handling 615 * code should prevent it. 616 * 617 * We can't wait forever for the vnode lock, we might 618 * deadlock due to a vn_read() getting stuck in 619 * vm_wait while holding this vnode. We skip the 620 * vnode if we can't get it in a reasonable amount 621 * of time. 622 */ 623 if (object->type == OBJT_VNODE) { 624 vm_page_xunbusy(m); 625 vp = object->handle; 626 if (vp->v_type == VREG && 627 vn_start_write(vp, &mp, V_NOWAIT) != 0) { 628 mp = NULL; 629 error = EDEADLK; 630 goto unlock_all; 631 } 632 KASSERT(mp != NULL, 633 ("vp %p with NULL v_mount", vp)); 634 vm_object_reference_locked(object); 635 pindex = m->pindex; 636 VM_OBJECT_WUNLOCK(object); 637 lockmode = MNT_SHARED_WRITES(vp->v_mount) ? 638 LK_SHARED : LK_EXCLUSIVE; 639 if (vget(vp, lockmode | LK_TIMELOCK, curthread)) { 640 vp = NULL; 641 error = EDEADLK; 642 goto unlock_mp; 643 } 644 VM_OBJECT_WLOCK(object); 645 646 /* 647 * Ensure that the object and vnode were not disassociated 648 * while locks were dropped. 649 */ 650 if (vp->v_object != object) { 651 error = ENOENT; 652 goto unlock_all; 653 } 654 655 /* 656 * While the object was unlocked, the page may have been: 657 * (1) moved to a different queue, 658 * (2) reallocated to a different object, 659 * (3) reallocated to a different offset, or 660 * (4) cleaned. 661 */ 662 if (!vm_page_in_laundry(m) || m->object != object || 663 m->pindex != pindex || m->dirty == 0) { 664 error = ENXIO; 665 goto unlock_all; 666 } 667 668 /* 669 * The page may have been busied while the object lock was 670 * released. 671 */ 672 if (vm_page_tryxbusy(m) == 0) { 673 error = EBUSY; 674 goto unlock_all; 675 } 676 } 677 678 /* 679 * Remove all writeable mappings, failing if the page is wired. 680 */ 681 if (!vm_page_try_remove_write(m)) { 682 vm_page_xunbusy(m); 683 error = EBUSY; 684 goto unlock_all; 685 } 686 687 /* 688 * If a page is dirty, then it is either being washed 689 * (but not yet cleaned) or it is still in the 690 * laundry. If it is still in the laundry, then we 691 * start the cleaning operation. 692 */ 693 if ((*numpagedout = vm_pageout_cluster(m)) == 0) 694 error = EIO; 695 696 unlock_all: 697 VM_OBJECT_WUNLOCK(object); 698 699 unlock_mp: 700 if (mp != NULL) { 701 if (vp != NULL) 702 vput(vp); 703 vm_object_deallocate(object); 704 vn_finished_write(mp); 705 } 706 707 return (error); 708 } 709 710 /* 711 * Attempt to launder the specified number of pages. 712 * 713 * Returns the number of pages successfully laundered. 714 */ 715 static int 716 vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall) 717 { 718 struct scan_state ss; 719 struct vm_pagequeue *pq; 720 vm_object_t object; 721 vm_page_t m, marker; 722 vm_page_astate_t new, old; 723 int act_delta, error, numpagedout, queue, refs, starting_target; 724 int vnodes_skipped; 725 bool pageout_ok; 726 727 object = NULL; 728 starting_target = launder; 729 vnodes_skipped = 0; 730 731 /* 732 * Scan the laundry queues for pages eligible to be laundered. We stop 733 * once the target number of dirty pages have been laundered, or once 734 * we've reached the end of the queue. A single iteration of this loop 735 * may cause more than one page to be laundered because of clustering. 736 * 737 * As an optimization, we avoid laundering from PQ_UNSWAPPABLE when no 738 * swap devices are configured. 739 */ 740 if (atomic_load_acq_int(&swapdev_enabled)) 741 queue = PQ_UNSWAPPABLE; 742 else 743 queue = PQ_LAUNDRY; 744 745 scan: 746 marker = &vmd->vmd_markers[queue]; 747 pq = &vmd->vmd_pagequeues[queue]; 748 vm_pagequeue_lock(pq); 749 vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt); 750 while (launder > 0 && (m = vm_pageout_next(&ss, false)) != NULL) { 751 if (__predict_false((m->flags & PG_MARKER) != 0)) 752 continue; 753 754 /* 755 * Don't touch a page that was removed from the queue after the 756 * page queue lock was released. Otherwise, ensure that any 757 * pending queue operations, such as dequeues for wired pages, 758 * are handled. 759 */ 760 if (vm_pageout_defer(m, queue, true)) 761 continue; 762 763 /* 764 * Lock the page's object. 765 */ 766 if (object == NULL || object != m->object) { 767 if (object != NULL) 768 VM_OBJECT_WUNLOCK(object); 769 object = atomic_load_ptr(&m->object); 770 if (__predict_false(object == NULL)) 771 /* The page is being freed by another thread. */ 772 continue; 773 774 /* Depends on type-stability. */ 775 VM_OBJECT_WLOCK(object); 776 if (__predict_false(m->object != object)) { 777 VM_OBJECT_WUNLOCK(object); 778 object = NULL; 779 continue; 780 } 781 } 782 783 if (vm_page_tryxbusy(m) == 0) 784 continue; 785 786 /* 787 * Check for wirings now that we hold the object lock and have 788 * exclusively busied the page. If the page is mapped, it may 789 * still be wired by pmap lookups. The call to 790 * vm_page_try_remove_all() below atomically checks for such 791 * wirings and removes mappings. If the page is unmapped, the 792 * wire count is guaranteed not to increase after this check. 793 */ 794 if (__predict_false(vm_page_wired(m))) 795 goto skip_page; 796 797 /* 798 * Invalid pages can be easily freed. They cannot be 799 * mapped; vm_page_free() asserts this. 800 */ 801 if (vm_page_none_valid(m)) 802 goto free_page; 803 804 refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0; 805 806 for (old = vm_page_astate_load(m);;) { 807 /* 808 * Check to see if the page has been removed from the 809 * queue since the first such check. Leave it alone if 810 * so, discarding any references collected by 811 * pmap_ts_referenced(). 812 */ 813 if (__predict_false(_vm_page_queue(old) == PQ_NONE)) 814 goto skip_page; 815 816 new = old; 817 act_delta = refs; 818 if ((old.flags & PGA_REFERENCED) != 0) { 819 new.flags &= ~PGA_REFERENCED; 820 act_delta++; 821 } 822 if (act_delta == 0) { 823 ; 824 } else if (object->ref_count != 0) { 825 /* 826 * Increase the activation count if the page was 827 * referenced while in the laundry queue. This 828 * makes it less likely that the page will be 829 * returned prematurely to the laundry queue. 830 */ 831 new.act_count += ACT_ADVANCE + 832 act_delta; 833 if (new.act_count > ACT_MAX) 834 new.act_count = ACT_MAX; 835 836 new.flags &= ~PGA_QUEUE_OP_MASK; 837 new.flags |= PGA_REQUEUE; 838 new.queue = PQ_ACTIVE; 839 if (!vm_page_pqstate_commit(m, &old, new)) 840 continue; 841 842 /* 843 * If this was a background laundering, count 844 * activated pages towards our target. The 845 * purpose of background laundering is to ensure 846 * that pages are eventually cycled through the 847 * laundry queue, and an activation is a valid 848 * way out. 849 */ 850 if (!in_shortfall) 851 launder--; 852 VM_CNT_INC(v_reactivated); 853 goto skip_page; 854 } else if ((object->flags & OBJ_DEAD) == 0) { 855 new.flags |= PGA_REQUEUE; 856 if (!vm_page_pqstate_commit(m, &old, new)) 857 continue; 858 goto skip_page; 859 } 860 break; 861 } 862 863 /* 864 * If the page appears to be clean at the machine-independent 865 * layer, then remove all of its mappings from the pmap in 866 * anticipation of freeing it. If, however, any of the page's 867 * mappings allow write access, then the page may still be 868 * modified until the last of those mappings are removed. 869 */ 870 if (object->ref_count != 0) { 871 vm_page_test_dirty(m); 872 if (m->dirty == 0 && !vm_page_try_remove_all(m)) 873 goto skip_page; 874 } 875 876 /* 877 * Clean pages are freed, and dirty pages are paged out unless 878 * they belong to a dead object. Requeueing dirty pages from 879 * dead objects is pointless, as they are being paged out and 880 * freed by the thread that destroyed the object. 881 */ 882 if (m->dirty == 0) { 883 free_page: 884 /* 885 * Now we are guaranteed that no other threads are 886 * manipulating the page, check for a last-second 887 * reference. 888 */ 889 if (vm_pageout_defer(m, queue, true)) 890 goto skip_page; 891 vm_page_free(m); 892 VM_CNT_INC(v_dfree); 893 } else if ((object->flags & OBJ_DEAD) == 0) { 894 if (object->type != OBJT_SWAP && 895 object->type != OBJT_DEFAULT) 896 pageout_ok = true; 897 else if (disable_swap_pageouts) 898 pageout_ok = false; 899 else 900 pageout_ok = true; 901 if (!pageout_ok) { 902 vm_page_launder(m); 903 goto skip_page; 904 } 905 906 /* 907 * Form a cluster with adjacent, dirty pages from the 908 * same object, and page out that entire cluster. 909 * 910 * The adjacent, dirty pages must also be in the 911 * laundry. However, their mappings are not checked 912 * for new references. Consequently, a recently 913 * referenced page may be paged out. However, that 914 * page will not be prematurely reclaimed. After page 915 * out, the page will be placed in the inactive queue, 916 * where any new references will be detected and the 917 * page reactivated. 918 */ 919 error = vm_pageout_clean(m, &numpagedout); 920 if (error == 0) { 921 launder -= numpagedout; 922 ss.scanned += numpagedout; 923 } else if (error == EDEADLK) { 924 pageout_lock_miss++; 925 vnodes_skipped++; 926 } 927 object = NULL; 928 } else { 929 skip_page: 930 vm_page_xunbusy(m); 931 } 932 } 933 if (object != NULL) { 934 VM_OBJECT_WUNLOCK(object); 935 object = NULL; 936 } 937 vm_pagequeue_lock(pq); 938 vm_pageout_end_scan(&ss); 939 vm_pagequeue_unlock(pq); 940 941 if (launder > 0 && queue == PQ_UNSWAPPABLE) { 942 queue = PQ_LAUNDRY; 943 goto scan; 944 } 945 946 /* 947 * Wakeup the sync daemon if we skipped a vnode in a writeable object 948 * and we didn't launder enough pages. 949 */ 950 if (vnodes_skipped > 0 && launder > 0) 951 (void)speedup_syncer(); 952 953 return (starting_target - launder); 954 } 955 956 /* 957 * Compute the integer square root. 958 */ 959 static u_int 960 isqrt(u_int num) 961 { 962 u_int bit, root, tmp; 963 964 bit = num != 0 ? (1u << ((fls(num) - 1) & ~1)) : 0; 965 root = 0; 966 while (bit != 0) { 967 tmp = root + bit; 968 root >>= 1; 969 if (num >= tmp) { 970 num -= tmp; 971 root += bit; 972 } 973 bit >>= 2; 974 } 975 return (root); 976 } 977 978 /* 979 * Perform the work of the laundry thread: periodically wake up and determine 980 * whether any pages need to be laundered. If so, determine the number of pages 981 * that need to be laundered, and launder them. 982 */ 983 static void 984 vm_pageout_laundry_worker(void *arg) 985 { 986 struct vm_domain *vmd; 987 struct vm_pagequeue *pq; 988 uint64_t nclean, ndirty, nfreed; 989 int domain, last_target, launder, shortfall, shortfall_cycle, target; 990 bool in_shortfall; 991 992 domain = (uintptr_t)arg; 993 vmd = VM_DOMAIN(domain); 994 pq = &vmd->vmd_pagequeues[PQ_LAUNDRY]; 995 KASSERT(vmd->vmd_segs != 0, ("domain without segments")); 996 997 shortfall = 0; 998 in_shortfall = false; 999 shortfall_cycle = 0; 1000 last_target = target = 0; 1001 nfreed = 0; 1002 1003 /* 1004 * Calls to these handlers are serialized by the swap syscall lock. 1005 */ 1006 (void)EVENTHANDLER_REGISTER(swapon, vm_pageout_swapon, vmd, 1007 EVENTHANDLER_PRI_ANY); 1008 (void)EVENTHANDLER_REGISTER(swapoff, vm_pageout_swapoff, vmd, 1009 EVENTHANDLER_PRI_ANY); 1010 1011 /* 1012 * The pageout laundry worker is never done, so loop forever. 1013 */ 1014 for (;;) { 1015 KASSERT(target >= 0, ("negative target %d", target)); 1016 KASSERT(shortfall_cycle >= 0, 1017 ("negative cycle %d", shortfall_cycle)); 1018 launder = 0; 1019 1020 /* 1021 * First determine whether we need to launder pages to meet a 1022 * shortage of free pages. 1023 */ 1024 if (shortfall > 0) { 1025 in_shortfall = true; 1026 shortfall_cycle = VM_LAUNDER_RATE / VM_INACT_SCAN_RATE; 1027 target = shortfall; 1028 } else if (!in_shortfall) 1029 goto trybackground; 1030 else if (shortfall_cycle == 0 || vm_laundry_target(vmd) <= 0) { 1031 /* 1032 * We recently entered shortfall and began laundering 1033 * pages. If we have completed that laundering run 1034 * (and we are no longer in shortfall) or we have met 1035 * our laundry target through other activity, then we 1036 * can stop laundering pages. 1037 */ 1038 in_shortfall = false; 1039 target = 0; 1040 goto trybackground; 1041 } 1042 launder = target / shortfall_cycle--; 1043 goto dolaundry; 1044 1045 /* 1046 * There's no immediate need to launder any pages; see if we 1047 * meet the conditions to perform background laundering: 1048 * 1049 * 1. The ratio of dirty to clean inactive pages exceeds the 1050 * background laundering threshold, or 1051 * 2. we haven't yet reached the target of the current 1052 * background laundering run. 1053 * 1054 * The background laundering threshold is not a constant. 1055 * Instead, it is a slowly growing function of the number of 1056 * clean pages freed by the page daemon since the last 1057 * background laundering. Thus, as the ratio of dirty to 1058 * clean inactive pages grows, the amount of memory pressure 1059 * required to trigger laundering decreases. We ensure 1060 * that the threshold is non-zero after an inactive queue 1061 * scan, even if that scan failed to free a single clean page. 1062 */ 1063 trybackground: 1064 nclean = vmd->vmd_free_count + 1065 vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt; 1066 ndirty = vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt; 1067 if (target == 0 && ndirty * isqrt(howmany(nfreed + 1, 1068 vmd->vmd_free_target - vmd->vmd_free_min)) >= nclean) { 1069 target = vmd->vmd_background_launder_target; 1070 } 1071 1072 /* 1073 * We have a non-zero background laundering target. If we've 1074 * laundered up to our maximum without observing a page daemon 1075 * request, just stop. This is a safety belt that ensures we 1076 * don't launder an excessive amount if memory pressure is low 1077 * and the ratio of dirty to clean pages is large. Otherwise, 1078 * proceed at the background laundering rate. 1079 */ 1080 if (target > 0) { 1081 if (nfreed > 0) { 1082 nfreed = 0; 1083 last_target = target; 1084 } else if (last_target - target >= 1085 vm_background_launder_max * PAGE_SIZE / 1024) { 1086 target = 0; 1087 } 1088 launder = vm_background_launder_rate * PAGE_SIZE / 1024; 1089 launder /= VM_LAUNDER_RATE; 1090 if (launder > target) 1091 launder = target; 1092 } 1093 1094 dolaundry: 1095 if (launder > 0) { 1096 /* 1097 * Because of I/O clustering, the number of laundered 1098 * pages could exceed "target" by the maximum size of 1099 * a cluster minus one. 1100 */ 1101 target -= min(vm_pageout_launder(vmd, launder, 1102 in_shortfall), target); 1103 pause("laundp", hz / VM_LAUNDER_RATE); 1104 } 1105 1106 /* 1107 * If we're not currently laundering pages and the page daemon 1108 * hasn't posted a new request, sleep until the page daemon 1109 * kicks us. 1110 */ 1111 vm_pagequeue_lock(pq); 1112 if (target == 0 && vmd->vmd_laundry_request == VM_LAUNDRY_IDLE) 1113 (void)mtx_sleep(&vmd->vmd_laundry_request, 1114 vm_pagequeue_lockptr(pq), PVM, "launds", 0); 1115 1116 /* 1117 * If the pagedaemon has indicated that it's in shortfall, start 1118 * a shortfall laundering unless we're already in the middle of 1119 * one. This may preempt a background laundering. 1120 */ 1121 if (vmd->vmd_laundry_request == VM_LAUNDRY_SHORTFALL && 1122 (!in_shortfall || shortfall_cycle == 0)) { 1123 shortfall = vm_laundry_target(vmd) + 1124 vmd->vmd_pageout_deficit; 1125 target = 0; 1126 } else 1127 shortfall = 0; 1128 1129 if (target == 0) 1130 vmd->vmd_laundry_request = VM_LAUNDRY_IDLE; 1131 nfreed += vmd->vmd_clean_pages_freed; 1132 vmd->vmd_clean_pages_freed = 0; 1133 vm_pagequeue_unlock(pq); 1134 } 1135 } 1136 1137 /* 1138 * Compute the number of pages we want to try to move from the 1139 * active queue to either the inactive or laundry queue. 1140 * 1141 * When scanning active pages during a shortage, we make clean pages 1142 * count more heavily towards the page shortage than dirty pages. 1143 * This is because dirty pages must be laundered before they can be 1144 * reused and thus have less utility when attempting to quickly 1145 * alleviate a free page shortage. However, this weighting also 1146 * causes the scan to deactivate dirty pages more aggressively, 1147 * improving the effectiveness of clustering. 1148 */ 1149 static int 1150 vm_pageout_active_target(struct vm_domain *vmd) 1151 { 1152 int shortage; 1153 1154 shortage = vmd->vmd_inactive_target + vm_paging_target(vmd) - 1155 (vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt + 1156 vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt / act_scan_laundry_weight); 1157 shortage *= act_scan_laundry_weight; 1158 return (shortage); 1159 } 1160 1161 /* 1162 * Scan the active queue. If there is no shortage of inactive pages, scan a 1163 * small portion of the queue in order to maintain quasi-LRU. 1164 */ 1165 static void 1166 vm_pageout_scan_active(struct vm_domain *vmd, int page_shortage) 1167 { 1168 struct scan_state ss; 1169 vm_object_t object; 1170 vm_page_t m, marker; 1171 struct vm_pagequeue *pq; 1172 vm_page_astate_t old, new; 1173 long min_scan; 1174 int act_delta, max_scan, ps_delta, refs, scan_tick; 1175 uint8_t nqueue; 1176 1177 marker = &vmd->vmd_markers[PQ_ACTIVE]; 1178 pq = &vmd->vmd_pagequeues[PQ_ACTIVE]; 1179 vm_pagequeue_lock(pq); 1180 1181 /* 1182 * If we're just idle polling attempt to visit every 1183 * active page within 'update_period' seconds. 1184 */ 1185 scan_tick = ticks; 1186 if (vm_pageout_update_period != 0) { 1187 min_scan = pq->pq_cnt; 1188 min_scan *= scan_tick - vmd->vmd_last_active_scan; 1189 min_scan /= hz * vm_pageout_update_period; 1190 } else 1191 min_scan = 0; 1192 if (min_scan > 0 || (page_shortage > 0 && pq->pq_cnt > 0)) 1193 vmd->vmd_last_active_scan = scan_tick; 1194 1195 /* 1196 * Scan the active queue for pages that can be deactivated. Update 1197 * the per-page activity counter and use it to identify deactivation 1198 * candidates. Held pages may be deactivated. 1199 * 1200 * To avoid requeuing each page that remains in the active queue, we 1201 * implement the CLOCK algorithm. To keep the implementation of the 1202 * enqueue operation consistent for all page queues, we use two hands, 1203 * represented by marker pages. Scans begin at the first hand, which 1204 * precedes the second hand in the queue. When the two hands meet, 1205 * they are moved back to the head and tail of the queue, respectively, 1206 * and scanning resumes. 1207 */ 1208 max_scan = page_shortage > 0 ? pq->pq_cnt : min_scan; 1209 act_scan: 1210 vm_pageout_init_scan(&ss, pq, marker, &vmd->vmd_clock[0], max_scan); 1211 while ((m = vm_pageout_next(&ss, false)) != NULL) { 1212 if (__predict_false(m == &vmd->vmd_clock[1])) { 1213 vm_pagequeue_lock(pq); 1214 TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q); 1215 TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[1], plinks.q); 1216 TAILQ_INSERT_HEAD(&pq->pq_pl, &vmd->vmd_clock[0], 1217 plinks.q); 1218 TAILQ_INSERT_TAIL(&pq->pq_pl, &vmd->vmd_clock[1], 1219 plinks.q); 1220 max_scan -= ss.scanned; 1221 vm_pageout_end_scan(&ss); 1222 goto act_scan; 1223 } 1224 if (__predict_false((m->flags & PG_MARKER) != 0)) 1225 continue; 1226 1227 /* 1228 * Don't touch a page that was removed from the queue after the 1229 * page queue lock was released. Otherwise, ensure that any 1230 * pending queue operations, such as dequeues for wired pages, 1231 * are handled. 1232 */ 1233 if (vm_pageout_defer(m, PQ_ACTIVE, true)) 1234 continue; 1235 1236 /* 1237 * A page's object pointer may be set to NULL before 1238 * the object lock is acquired. 1239 */ 1240 object = atomic_load_ptr(&m->object); 1241 if (__predict_false(object == NULL)) 1242 /* 1243 * The page has been removed from its object. 1244 */ 1245 continue; 1246 1247 /* Deferred free of swap space. */ 1248 if ((m->a.flags & PGA_SWAP_FREE) != 0 && 1249 VM_OBJECT_TRYWLOCK(object)) { 1250 if (m->object == object) 1251 vm_pager_page_unswapped(m); 1252 VM_OBJECT_WUNLOCK(object); 1253 } 1254 1255 /* 1256 * Check to see "how much" the page has been used. 1257 * 1258 * Test PGA_REFERENCED after calling pmap_ts_referenced() so 1259 * that a reference from a concurrently destroyed mapping is 1260 * observed here and now. 1261 * 1262 * Perform an unsynchronized object ref count check. While 1263 * the page lock ensures that the page is not reallocated to 1264 * another object, in particular, one with unmanaged mappings 1265 * that cannot support pmap_ts_referenced(), two races are, 1266 * nonetheless, possible: 1267 * 1) The count was transitioning to zero, but we saw a non- 1268 * zero value. pmap_ts_referenced() will return zero 1269 * because the page is not mapped. 1270 * 2) The count was transitioning to one, but we saw zero. 1271 * This race delays the detection of a new reference. At 1272 * worst, we will deactivate and reactivate the page. 1273 */ 1274 refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0; 1275 1276 old = vm_page_astate_load(m); 1277 do { 1278 /* 1279 * Check to see if the page has been removed from the 1280 * queue since the first such check. Leave it alone if 1281 * so, discarding any references collected by 1282 * pmap_ts_referenced(). 1283 */ 1284 if (__predict_false(_vm_page_queue(old) == PQ_NONE)) 1285 break; 1286 1287 /* 1288 * Advance or decay the act_count based on recent usage. 1289 */ 1290 new = old; 1291 act_delta = refs; 1292 if ((old.flags & PGA_REFERENCED) != 0) { 1293 new.flags &= ~PGA_REFERENCED; 1294 act_delta++; 1295 } 1296 if (act_delta != 0) { 1297 new.act_count += ACT_ADVANCE + act_delta; 1298 if (new.act_count > ACT_MAX) 1299 new.act_count = ACT_MAX; 1300 } else { 1301 new.act_count -= min(new.act_count, 1302 ACT_DECLINE); 1303 } 1304 1305 if (new.act_count > 0) { 1306 /* 1307 * Adjust the activation count and keep the page 1308 * in the active queue. The count might be left 1309 * unchanged if it is saturated. The page may 1310 * have been moved to a different queue since we 1311 * started the scan, in which case we move it 1312 * back. 1313 */ 1314 ps_delta = 0; 1315 if (old.queue != PQ_ACTIVE) { 1316 new.flags &= ~PGA_QUEUE_OP_MASK; 1317 new.flags |= PGA_REQUEUE; 1318 new.queue = PQ_ACTIVE; 1319 } 1320 } else { 1321 /* 1322 * When not short for inactive pages, let dirty 1323 * pages go through the inactive queue before 1324 * moving to the laundry queue. This gives them 1325 * some extra time to be reactivated, 1326 * potentially avoiding an expensive pageout. 1327 * However, during a page shortage, the inactive 1328 * queue is necessarily small, and so dirty 1329 * pages would only spend a trivial amount of 1330 * time in the inactive queue. Therefore, we 1331 * might as well place them directly in the 1332 * laundry queue to reduce queuing overhead. 1333 * 1334 * Calling vm_page_test_dirty() here would 1335 * require acquisition of the object's write 1336 * lock. However, during a page shortage, 1337 * directing dirty pages into the laundry queue 1338 * is only an optimization and not a 1339 * requirement. Therefore, we simply rely on 1340 * the opportunistic updates to the page's dirty 1341 * field by the pmap. 1342 */ 1343 if (page_shortage <= 0) { 1344 nqueue = PQ_INACTIVE; 1345 ps_delta = 0; 1346 } else if (m->dirty == 0) { 1347 nqueue = PQ_INACTIVE; 1348 ps_delta = act_scan_laundry_weight; 1349 } else { 1350 nqueue = PQ_LAUNDRY; 1351 ps_delta = 1; 1352 } 1353 1354 new.flags &= ~PGA_QUEUE_OP_MASK; 1355 new.flags |= PGA_REQUEUE; 1356 new.queue = nqueue; 1357 } 1358 } while (!vm_page_pqstate_commit(m, &old, new)); 1359 1360 page_shortage -= ps_delta; 1361 } 1362 vm_pagequeue_lock(pq); 1363 TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q); 1364 TAILQ_INSERT_AFTER(&pq->pq_pl, marker, &vmd->vmd_clock[0], plinks.q); 1365 vm_pageout_end_scan(&ss); 1366 vm_pagequeue_unlock(pq); 1367 } 1368 1369 static int 1370 vm_pageout_reinsert_inactive_page(struct vm_pagequeue *pq, vm_page_t marker, 1371 vm_page_t m) 1372 { 1373 vm_page_astate_t as; 1374 1375 vm_pagequeue_assert_locked(pq); 1376 1377 as = vm_page_astate_load(m); 1378 if (as.queue != PQ_INACTIVE || (as.flags & PGA_ENQUEUED) != 0) 1379 return (0); 1380 vm_page_aflag_set(m, PGA_ENQUEUED); 1381 TAILQ_INSERT_BEFORE(marker, m, plinks.q); 1382 return (1); 1383 } 1384 1385 /* 1386 * Re-add stuck pages to the inactive queue. We will examine them again 1387 * during the next scan. If the queue state of a page has changed since 1388 * it was physically removed from the page queue in 1389 * vm_pageout_collect_batch(), don't do anything with that page. 1390 */ 1391 static void 1392 vm_pageout_reinsert_inactive(struct scan_state *ss, struct vm_batchqueue *bq, 1393 vm_page_t m) 1394 { 1395 struct vm_pagequeue *pq; 1396 vm_page_t marker; 1397 int delta; 1398 1399 delta = 0; 1400 marker = ss->marker; 1401 pq = ss->pq; 1402 1403 if (m != NULL) { 1404 if (vm_batchqueue_insert(bq, m)) 1405 return; 1406 vm_pagequeue_lock(pq); 1407 delta += vm_pageout_reinsert_inactive_page(pq, marker, m); 1408 } else 1409 vm_pagequeue_lock(pq); 1410 while ((m = vm_batchqueue_pop(bq)) != NULL) 1411 delta += vm_pageout_reinsert_inactive_page(pq, marker, m); 1412 vm_pagequeue_cnt_add(pq, delta); 1413 vm_pagequeue_unlock(pq); 1414 vm_batchqueue_init(bq); 1415 } 1416 1417 /* 1418 * Attempt to reclaim the requested number of pages from the inactive queue. 1419 * Returns true if the shortage was addressed. 1420 */ 1421 static int 1422 vm_pageout_scan_inactive(struct vm_domain *vmd, int shortage, 1423 int *addl_shortage) 1424 { 1425 struct scan_state ss; 1426 struct vm_batchqueue rq; 1427 vm_page_t m, marker; 1428 struct vm_pagequeue *pq; 1429 vm_object_t object; 1430 vm_page_astate_t old, new; 1431 int act_delta, addl_page_shortage, deficit, page_shortage, refs; 1432 int starting_page_shortage; 1433 1434 /* 1435 * The addl_page_shortage is an estimate of the number of temporarily 1436 * stuck pages in the inactive queue. In other words, the 1437 * number of pages from the inactive count that should be 1438 * discounted in setting the target for the active queue scan. 1439 */ 1440 addl_page_shortage = 0; 1441 1442 /* 1443 * vmd_pageout_deficit counts the number of pages requested in 1444 * allocations that failed because of a free page shortage. We assume 1445 * that the allocations will be reattempted and thus include the deficit 1446 * in our scan target. 1447 */ 1448 deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit); 1449 starting_page_shortage = page_shortage = shortage + deficit; 1450 1451 object = NULL; 1452 vm_batchqueue_init(&rq); 1453 1454 /* 1455 * Start scanning the inactive queue for pages that we can free. The 1456 * scan will stop when we reach the target or we have scanned the 1457 * entire queue. (Note that m->a.act_count is not used to make 1458 * decisions for the inactive queue, only for the active queue.) 1459 */ 1460 marker = &vmd->vmd_markers[PQ_INACTIVE]; 1461 pq = &vmd->vmd_pagequeues[PQ_INACTIVE]; 1462 vm_pagequeue_lock(pq); 1463 vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt); 1464 while (page_shortage > 0 && (m = vm_pageout_next(&ss, true)) != NULL) { 1465 KASSERT((m->flags & PG_MARKER) == 0, 1466 ("marker page %p was dequeued", m)); 1467 1468 /* 1469 * Don't touch a page that was removed from the queue after the 1470 * page queue lock was released. Otherwise, ensure that any 1471 * pending queue operations, such as dequeues for wired pages, 1472 * are handled. 1473 */ 1474 if (vm_pageout_defer(m, PQ_INACTIVE, false)) 1475 continue; 1476 1477 /* 1478 * Lock the page's object. 1479 */ 1480 if (object == NULL || object != m->object) { 1481 if (object != NULL) 1482 VM_OBJECT_WUNLOCK(object); 1483 object = atomic_load_ptr(&m->object); 1484 if (__predict_false(object == NULL)) 1485 /* The page is being freed by another thread. */ 1486 continue; 1487 1488 /* Depends on type-stability. */ 1489 VM_OBJECT_WLOCK(object); 1490 if (__predict_false(m->object != object)) { 1491 VM_OBJECT_WUNLOCK(object); 1492 object = NULL; 1493 goto reinsert; 1494 } 1495 } 1496 1497 if (vm_page_tryxbusy(m) == 0) { 1498 /* 1499 * Don't mess with busy pages. Leave them at 1500 * the front of the queue. Most likely, they 1501 * are being paged out and will leave the 1502 * queue shortly after the scan finishes. So, 1503 * they ought to be discounted from the 1504 * inactive count. 1505 */ 1506 addl_page_shortage++; 1507 goto reinsert; 1508 } 1509 1510 /* Deferred free of swap space. */ 1511 if ((m->a.flags & PGA_SWAP_FREE) != 0) 1512 vm_pager_page_unswapped(m); 1513 1514 /* 1515 * Check for wirings now that we hold the object lock and have 1516 * exclusively busied the page. If the page is mapped, it may 1517 * still be wired by pmap lookups. The call to 1518 * vm_page_try_remove_all() below atomically checks for such 1519 * wirings and removes mappings. If the page is unmapped, the 1520 * wire count is guaranteed not to increase after this check. 1521 */ 1522 if (__predict_false(vm_page_wired(m))) 1523 goto skip_page; 1524 1525 /* 1526 * Invalid pages can be easily freed. They cannot be 1527 * mapped, vm_page_free() asserts this. 1528 */ 1529 if (vm_page_none_valid(m)) 1530 goto free_page; 1531 1532 refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0; 1533 1534 for (old = vm_page_astate_load(m);;) { 1535 /* 1536 * Check to see if the page has been removed from the 1537 * queue since the first such check. Leave it alone if 1538 * so, discarding any references collected by 1539 * pmap_ts_referenced(). 1540 */ 1541 if (__predict_false(_vm_page_queue(old) == PQ_NONE)) 1542 goto skip_page; 1543 1544 new = old; 1545 act_delta = refs; 1546 if ((old.flags & PGA_REFERENCED) != 0) { 1547 new.flags &= ~PGA_REFERENCED; 1548 act_delta++; 1549 } 1550 if (act_delta == 0) { 1551 ; 1552 } else if (object->ref_count != 0) { 1553 /* 1554 * Increase the activation count if the 1555 * page was referenced while in the 1556 * inactive queue. This makes it less 1557 * likely that the page will be returned 1558 * prematurely to the inactive queue. 1559 */ 1560 new.act_count += ACT_ADVANCE + 1561 act_delta; 1562 if (new.act_count > ACT_MAX) 1563 new.act_count = ACT_MAX; 1564 1565 new.flags &= ~PGA_QUEUE_OP_MASK; 1566 new.flags |= PGA_REQUEUE; 1567 new.queue = PQ_ACTIVE; 1568 if (!vm_page_pqstate_commit(m, &old, new)) 1569 continue; 1570 1571 VM_CNT_INC(v_reactivated); 1572 goto skip_page; 1573 } else if ((object->flags & OBJ_DEAD) == 0) { 1574 new.queue = PQ_INACTIVE; 1575 new.flags |= PGA_REQUEUE; 1576 if (!vm_page_pqstate_commit(m, &old, new)) 1577 continue; 1578 goto skip_page; 1579 } 1580 break; 1581 } 1582 1583 /* 1584 * If the page appears to be clean at the machine-independent 1585 * layer, then remove all of its mappings from the pmap in 1586 * anticipation of freeing it. If, however, any of the page's 1587 * mappings allow write access, then the page may still be 1588 * modified until the last of those mappings are removed. 1589 */ 1590 if (object->ref_count != 0) { 1591 vm_page_test_dirty(m); 1592 if (m->dirty == 0 && !vm_page_try_remove_all(m)) 1593 goto skip_page; 1594 } 1595 1596 /* 1597 * Clean pages can be freed, but dirty pages must be sent back 1598 * to the laundry, unless they belong to a dead object. 1599 * Requeueing dirty pages from dead objects is pointless, as 1600 * they are being paged out and freed by the thread that 1601 * destroyed the object. 1602 */ 1603 if (m->dirty == 0) { 1604 free_page: 1605 /* 1606 * Now we are guaranteed that no other threads are 1607 * manipulating the page, check for a last-second 1608 * reference that would save it from doom. 1609 */ 1610 if (vm_pageout_defer(m, PQ_INACTIVE, false)) 1611 goto skip_page; 1612 1613 /* 1614 * Because we dequeued the page and have already checked 1615 * for pending dequeue and enqueue requests, we can 1616 * safely disassociate the page from the inactive queue 1617 * without holding the queue lock. 1618 */ 1619 m->a.queue = PQ_NONE; 1620 vm_page_free(m); 1621 page_shortage--; 1622 continue; 1623 } 1624 if ((object->flags & OBJ_DEAD) == 0) 1625 vm_page_launder(m); 1626 skip_page: 1627 vm_page_xunbusy(m); 1628 continue; 1629 reinsert: 1630 vm_pageout_reinsert_inactive(&ss, &rq, m); 1631 } 1632 if (object != NULL) 1633 VM_OBJECT_WUNLOCK(object); 1634 vm_pageout_reinsert_inactive(&ss, &rq, NULL); 1635 vm_pageout_reinsert_inactive(&ss, &ss.bq, NULL); 1636 vm_pagequeue_lock(pq); 1637 vm_pageout_end_scan(&ss); 1638 vm_pagequeue_unlock(pq); 1639 1640 VM_CNT_ADD(v_dfree, starting_page_shortage - page_shortage); 1641 1642 /* 1643 * Wake up the laundry thread so that it can perform any needed 1644 * laundering. If we didn't meet our target, we're in shortfall and 1645 * need to launder more aggressively. If PQ_LAUNDRY is empty and no 1646 * swap devices are configured, the laundry thread has no work to do, so 1647 * don't bother waking it up. 1648 * 1649 * The laundry thread uses the number of inactive queue scans elapsed 1650 * since the last laundering to determine whether to launder again, so 1651 * keep count. 1652 */ 1653 if (starting_page_shortage > 0) { 1654 pq = &vmd->vmd_pagequeues[PQ_LAUNDRY]; 1655 vm_pagequeue_lock(pq); 1656 if (vmd->vmd_laundry_request == VM_LAUNDRY_IDLE && 1657 (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled))) { 1658 if (page_shortage > 0) { 1659 vmd->vmd_laundry_request = VM_LAUNDRY_SHORTFALL; 1660 VM_CNT_INC(v_pdshortfalls); 1661 } else if (vmd->vmd_laundry_request != 1662 VM_LAUNDRY_SHORTFALL) 1663 vmd->vmd_laundry_request = 1664 VM_LAUNDRY_BACKGROUND; 1665 wakeup(&vmd->vmd_laundry_request); 1666 } 1667 vmd->vmd_clean_pages_freed += 1668 starting_page_shortage - page_shortage; 1669 vm_pagequeue_unlock(pq); 1670 } 1671 1672 /* 1673 * Wakeup the swapout daemon if we didn't free the targeted number of 1674 * pages. 1675 */ 1676 if (page_shortage > 0) 1677 vm_swapout_run(); 1678 1679 /* 1680 * If the inactive queue scan fails repeatedly to meet its 1681 * target, kill the largest process. 1682 */ 1683 vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage); 1684 1685 /* 1686 * Reclaim pages by swapping out idle processes, if configured to do so. 1687 */ 1688 vm_swapout_run_idle(); 1689 1690 /* 1691 * See the description of addl_page_shortage above. 1692 */ 1693 *addl_shortage = addl_page_shortage + deficit; 1694 1695 return (page_shortage <= 0); 1696 } 1697 1698 static int vm_pageout_oom_vote; 1699 1700 /* 1701 * The pagedaemon threads randlomly select one to perform the 1702 * OOM. Trying to kill processes before all pagedaemons 1703 * failed to reach free target is premature. 1704 */ 1705 static void 1706 vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage, 1707 int starting_page_shortage) 1708 { 1709 int old_vote; 1710 1711 if (starting_page_shortage <= 0 || starting_page_shortage != 1712 page_shortage) 1713 vmd->vmd_oom_seq = 0; 1714 else 1715 vmd->vmd_oom_seq++; 1716 if (vmd->vmd_oom_seq < vm_pageout_oom_seq) { 1717 if (vmd->vmd_oom) { 1718 vmd->vmd_oom = FALSE; 1719 atomic_subtract_int(&vm_pageout_oom_vote, 1); 1720 } 1721 return; 1722 } 1723 1724 /* 1725 * Do not follow the call sequence until OOM condition is 1726 * cleared. 1727 */ 1728 vmd->vmd_oom_seq = 0; 1729 1730 if (vmd->vmd_oom) 1731 return; 1732 1733 vmd->vmd_oom = TRUE; 1734 old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1); 1735 if (old_vote != vm_ndomains - 1) 1736 return; 1737 1738 /* 1739 * The current pagedaemon thread is the last in the quorum to 1740 * start OOM. Initiate the selection and signaling of the 1741 * victim. 1742 */ 1743 vm_pageout_oom(VM_OOM_MEM); 1744 1745 /* 1746 * After one round of OOM terror, recall our vote. On the 1747 * next pass, current pagedaemon would vote again if the low 1748 * memory condition is still there, due to vmd_oom being 1749 * false. 1750 */ 1751 vmd->vmd_oom = FALSE; 1752 atomic_subtract_int(&vm_pageout_oom_vote, 1); 1753 } 1754 1755 /* 1756 * The OOM killer is the page daemon's action of last resort when 1757 * memory allocation requests have been stalled for a prolonged period 1758 * of time because it cannot reclaim memory. This function computes 1759 * the approximate number of physical pages that could be reclaimed if 1760 * the specified address space is destroyed. 1761 * 1762 * Private, anonymous memory owned by the address space is the 1763 * principal resource that we expect to recover after an OOM kill. 1764 * Since the physical pages mapped by the address space's COW entries 1765 * are typically shared pages, they are unlikely to be released and so 1766 * they are not counted. 1767 * 1768 * To get to the point where the page daemon runs the OOM killer, its 1769 * efforts to write-back vnode-backed pages may have stalled. This 1770 * could be caused by a memory allocation deadlock in the write path 1771 * that might be resolved by an OOM kill. Therefore, physical pages 1772 * belonging to vnode-backed objects are counted, because they might 1773 * be freed without being written out first if the address space holds 1774 * the last reference to an unlinked vnode. 1775 * 1776 * Similarly, physical pages belonging to OBJT_PHYS objects are 1777 * counted because the address space might hold the last reference to 1778 * the object. 1779 */ 1780 static long 1781 vm_pageout_oom_pagecount(struct vmspace *vmspace) 1782 { 1783 vm_map_t map; 1784 vm_map_entry_t entry; 1785 vm_object_t obj; 1786 long res; 1787 1788 map = &vmspace->vm_map; 1789 KASSERT(!map->system_map, ("system map")); 1790 sx_assert(&map->lock, SA_LOCKED); 1791 res = 0; 1792 VM_MAP_ENTRY_FOREACH(entry, map) { 1793 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) 1794 continue; 1795 obj = entry->object.vm_object; 1796 if (obj == NULL) 1797 continue; 1798 if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 && 1799 obj->ref_count != 1) 1800 continue; 1801 switch (obj->type) { 1802 case OBJT_DEFAULT: 1803 case OBJT_SWAP: 1804 case OBJT_PHYS: 1805 case OBJT_VNODE: 1806 res += obj->resident_page_count; 1807 break; 1808 } 1809 } 1810 return (res); 1811 } 1812 1813 static int vm_oom_ratelim_last; 1814 static int vm_oom_pf_secs = 10; 1815 SYSCTL_INT(_vm, OID_AUTO, oom_pf_secs, CTLFLAG_RWTUN, &vm_oom_pf_secs, 0, 1816 ""); 1817 static struct mtx vm_oom_ratelim_mtx; 1818 1819 void 1820 vm_pageout_oom(int shortage) 1821 { 1822 struct proc *p, *bigproc; 1823 vm_offset_t size, bigsize; 1824 struct thread *td; 1825 struct vmspace *vm; 1826 int now; 1827 bool breakout; 1828 1829 /* 1830 * For OOM requests originating from vm_fault(), there is a high 1831 * chance that a single large process faults simultaneously in 1832 * several threads. Also, on an active system running many 1833 * processes of middle-size, like buildworld, all of them 1834 * could fault almost simultaneously as well. 1835 * 1836 * To avoid killing too many processes, rate-limit OOMs 1837 * initiated by vm_fault() time-outs on the waits for free 1838 * pages. 1839 */ 1840 mtx_lock(&vm_oom_ratelim_mtx); 1841 now = ticks; 1842 if (shortage == VM_OOM_MEM_PF && 1843 (u_int)(now - vm_oom_ratelim_last) < hz * vm_oom_pf_secs) { 1844 mtx_unlock(&vm_oom_ratelim_mtx); 1845 return; 1846 } 1847 vm_oom_ratelim_last = now; 1848 mtx_unlock(&vm_oom_ratelim_mtx); 1849 1850 /* 1851 * We keep the process bigproc locked once we find it to keep anyone 1852 * from messing with it; however, there is a possibility of 1853 * deadlock if process B is bigproc and one of its child processes 1854 * attempts to propagate a signal to B while we are waiting for A's 1855 * lock while walking this list. To avoid this, we don't block on 1856 * the process lock but just skip a process if it is already locked. 1857 */ 1858 bigproc = NULL; 1859 bigsize = 0; 1860 sx_slock(&allproc_lock); 1861 FOREACH_PROC_IN_SYSTEM(p) { 1862 PROC_LOCK(p); 1863 1864 /* 1865 * If this is a system, protected or killed process, skip it. 1866 */ 1867 if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC | 1868 P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 || 1869 p->p_pid == 1 || P_KILLED(p) || 1870 (p->p_pid < 48 && swap_pager_avail != 0)) { 1871 PROC_UNLOCK(p); 1872 continue; 1873 } 1874 /* 1875 * If the process is in a non-running type state, 1876 * don't touch it. Check all the threads individually. 1877 */ 1878 breakout = false; 1879 FOREACH_THREAD_IN_PROC(p, td) { 1880 thread_lock(td); 1881 if (!TD_ON_RUNQ(td) && 1882 !TD_IS_RUNNING(td) && 1883 !TD_IS_SLEEPING(td) && 1884 !TD_IS_SUSPENDED(td) && 1885 !TD_IS_SWAPPED(td)) { 1886 thread_unlock(td); 1887 breakout = true; 1888 break; 1889 } 1890 thread_unlock(td); 1891 } 1892 if (breakout) { 1893 PROC_UNLOCK(p); 1894 continue; 1895 } 1896 /* 1897 * get the process size 1898 */ 1899 vm = vmspace_acquire_ref(p); 1900 if (vm == NULL) { 1901 PROC_UNLOCK(p); 1902 continue; 1903 } 1904 _PHOLD_LITE(p); 1905 PROC_UNLOCK(p); 1906 sx_sunlock(&allproc_lock); 1907 if (!vm_map_trylock_read(&vm->vm_map)) { 1908 vmspace_free(vm); 1909 sx_slock(&allproc_lock); 1910 PRELE(p); 1911 continue; 1912 } 1913 size = vmspace_swap_count(vm); 1914 if (shortage == VM_OOM_MEM || shortage == VM_OOM_MEM_PF) 1915 size += vm_pageout_oom_pagecount(vm); 1916 vm_map_unlock_read(&vm->vm_map); 1917 vmspace_free(vm); 1918 sx_slock(&allproc_lock); 1919 1920 /* 1921 * If this process is bigger than the biggest one, 1922 * remember it. 1923 */ 1924 if (size > bigsize) { 1925 if (bigproc != NULL) 1926 PRELE(bigproc); 1927 bigproc = p; 1928 bigsize = size; 1929 } else { 1930 PRELE(p); 1931 } 1932 } 1933 sx_sunlock(&allproc_lock); 1934 if (bigproc != NULL) { 1935 if (vm_panic_on_oom != 0 && --vm_panic_on_oom == 0) 1936 panic("out of swap space"); 1937 PROC_LOCK(bigproc); 1938 killproc(bigproc, "out of swap space"); 1939 sched_nice(bigproc, PRIO_MIN); 1940 _PRELE(bigproc); 1941 PROC_UNLOCK(bigproc); 1942 } 1943 } 1944 1945 /* 1946 * Signal a free page shortage to subsystems that have registered an event 1947 * handler. Reclaim memory from UMA in the event of a severe shortage. 1948 * Return true if the free page count should be re-evaluated. 1949 */ 1950 static bool 1951 vm_pageout_lowmem(void) 1952 { 1953 static int lowmem_ticks = 0; 1954 int last; 1955 bool ret; 1956 1957 ret = false; 1958 1959 last = atomic_load_int(&lowmem_ticks); 1960 while ((u_int)(ticks - last) / hz >= lowmem_period) { 1961 if (atomic_fcmpset_int(&lowmem_ticks, &last, ticks) == 0) 1962 continue; 1963 1964 /* 1965 * Decrease registered cache sizes. 1966 */ 1967 SDT_PROBE0(vm, , , vm__lowmem_scan); 1968 EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES); 1969 1970 /* 1971 * We do this explicitly after the caches have been 1972 * drained above. 1973 */ 1974 uma_reclaim(UMA_RECLAIM_TRIM); 1975 ret = true; 1976 break; 1977 } 1978 1979 /* 1980 * Kick off an asynchronous reclaim of cached memory if one of the 1981 * page daemons is failing to keep up with demand. Use the "severe" 1982 * threshold instead of "min" to ensure that we do not blow away the 1983 * caches if a subset of the NUMA domains are depleted by kernel memory 1984 * allocations; the domainset iterators automatically skip domains 1985 * below the "min" threshold on the first pass. 1986 * 1987 * UMA reclaim worker has its own rate-limiting mechanism, so don't 1988 * worry about kicking it too often. 1989 */ 1990 if (vm_page_count_severe()) 1991 uma_reclaim_wakeup(); 1992 1993 return (ret); 1994 } 1995 1996 static void 1997 vm_pageout_worker(void *arg) 1998 { 1999 struct vm_domain *vmd; 2000 u_int ofree; 2001 int addl_shortage, domain, shortage; 2002 bool target_met; 2003 2004 domain = (uintptr_t)arg; 2005 vmd = VM_DOMAIN(domain); 2006 shortage = 0; 2007 target_met = true; 2008 2009 /* 2010 * XXXKIB It could be useful to bind pageout daemon threads to 2011 * the cores belonging to the domain, from which vm_page_array 2012 * is allocated. 2013 */ 2014 2015 KASSERT(vmd->vmd_segs != 0, ("domain without segments")); 2016 vmd->vmd_last_active_scan = ticks; 2017 2018 /* 2019 * The pageout daemon worker is never done, so loop forever. 2020 */ 2021 while (TRUE) { 2022 vm_domain_pageout_lock(vmd); 2023 2024 /* 2025 * We need to clear wanted before we check the limits. This 2026 * prevents races with wakers who will check wanted after they 2027 * reach the limit. 2028 */ 2029 atomic_store_int(&vmd->vmd_pageout_wanted, 0); 2030 2031 /* 2032 * Might the page daemon need to run again? 2033 */ 2034 if (vm_paging_needed(vmd, vmd->vmd_free_count)) { 2035 /* 2036 * Yes. If the scan failed to produce enough free 2037 * pages, sleep uninterruptibly for some time in the 2038 * hope that the laundry thread will clean some pages. 2039 */ 2040 vm_domain_pageout_unlock(vmd); 2041 if (!target_met) 2042 pause("pwait", hz / VM_INACT_SCAN_RATE); 2043 } else { 2044 /* 2045 * No, sleep until the next wakeup or until pages 2046 * need to have their reference stats updated. 2047 */ 2048 if (mtx_sleep(&vmd->vmd_pageout_wanted, 2049 vm_domain_pageout_lockptr(vmd), PDROP | PVM, 2050 "psleep", hz / VM_INACT_SCAN_RATE) == 0) 2051 VM_CNT_INC(v_pdwakeups); 2052 } 2053 2054 /* Prevent spurious wakeups by ensuring that wanted is set. */ 2055 atomic_store_int(&vmd->vmd_pageout_wanted, 1); 2056 2057 /* 2058 * Use the controller to calculate how many pages to free in 2059 * this interval, and scan the inactive queue. If the lowmem 2060 * handlers appear to have freed up some pages, subtract the 2061 * difference from the inactive queue scan target. 2062 */ 2063 shortage = pidctrl_daemon(&vmd->vmd_pid, vmd->vmd_free_count); 2064 if (shortage > 0) { 2065 ofree = vmd->vmd_free_count; 2066 if (vm_pageout_lowmem() && vmd->vmd_free_count > ofree) 2067 shortage -= min(vmd->vmd_free_count - ofree, 2068 (u_int)shortage); 2069 target_met = vm_pageout_scan_inactive(vmd, shortage, 2070 &addl_shortage); 2071 } else 2072 addl_shortage = 0; 2073 2074 /* 2075 * Scan the active queue. A positive value for shortage 2076 * indicates that we must aggressively deactivate pages to avoid 2077 * a shortfall. 2078 */ 2079 shortage = vm_pageout_active_target(vmd) + addl_shortage; 2080 vm_pageout_scan_active(vmd, shortage); 2081 } 2082 } 2083 2084 /* 2085 * Initialize basic pageout daemon settings. See the comment above the 2086 * definition of vm_domain for some explanation of how these thresholds are 2087 * used. 2088 */ 2089 static void 2090 vm_pageout_init_domain(int domain) 2091 { 2092 struct vm_domain *vmd; 2093 struct sysctl_oid *oid; 2094 2095 vmd = VM_DOMAIN(domain); 2096 vmd->vmd_interrupt_free_min = 2; 2097 2098 /* 2099 * v_free_reserved needs to include enough for the largest 2100 * swap pager structures plus enough for any pv_entry structs 2101 * when paging. 2102 */ 2103 vmd->vmd_pageout_free_min = 2 * MAXBSIZE / PAGE_SIZE + 2104 vmd->vmd_interrupt_free_min; 2105 vmd->vmd_free_reserved = vm_pageout_page_count + 2106 vmd->vmd_pageout_free_min + vmd->vmd_page_count / 768; 2107 vmd->vmd_free_min = vmd->vmd_page_count / 200; 2108 vmd->vmd_free_severe = vmd->vmd_free_min / 2; 2109 vmd->vmd_free_target = 4 * vmd->vmd_free_min + vmd->vmd_free_reserved; 2110 vmd->vmd_free_min += vmd->vmd_free_reserved; 2111 vmd->vmd_free_severe += vmd->vmd_free_reserved; 2112 vmd->vmd_inactive_target = (3 * vmd->vmd_free_target) / 2; 2113 if (vmd->vmd_inactive_target > vmd->vmd_free_count / 3) 2114 vmd->vmd_inactive_target = vmd->vmd_free_count / 3; 2115 2116 /* 2117 * Set the default wakeup threshold to be 10% below the paging 2118 * target. This keeps the steady state out of shortfall. 2119 */ 2120 vmd->vmd_pageout_wakeup_thresh = (vmd->vmd_free_target / 10) * 9; 2121 2122 /* 2123 * Target amount of memory to move out of the laundry queue during a 2124 * background laundering. This is proportional to the amount of system 2125 * memory. 2126 */ 2127 vmd->vmd_background_launder_target = (vmd->vmd_free_target - 2128 vmd->vmd_free_min) / 10; 2129 2130 /* Initialize the pageout daemon pid controller. */ 2131 pidctrl_init(&vmd->vmd_pid, hz / VM_INACT_SCAN_RATE, 2132 vmd->vmd_free_target, PIDCTRL_BOUND, 2133 PIDCTRL_KPD, PIDCTRL_KID, PIDCTRL_KDD); 2134 oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(vmd->vmd_oid), OID_AUTO, 2135 "pidctrl", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); 2136 pidctrl_init_sysctl(&vmd->vmd_pid, SYSCTL_CHILDREN(oid)); 2137 } 2138 2139 static void 2140 vm_pageout_init(void) 2141 { 2142 u_int freecount; 2143 int i; 2144 2145 /* 2146 * Initialize some paging parameters. 2147 */ 2148 if (vm_cnt.v_page_count < 2000) 2149 vm_pageout_page_count = 8; 2150 2151 freecount = 0; 2152 for (i = 0; i < vm_ndomains; i++) { 2153 struct vm_domain *vmd; 2154 2155 vm_pageout_init_domain(i); 2156 vmd = VM_DOMAIN(i); 2157 vm_cnt.v_free_reserved += vmd->vmd_free_reserved; 2158 vm_cnt.v_free_target += vmd->vmd_free_target; 2159 vm_cnt.v_free_min += vmd->vmd_free_min; 2160 vm_cnt.v_inactive_target += vmd->vmd_inactive_target; 2161 vm_cnt.v_pageout_free_min += vmd->vmd_pageout_free_min; 2162 vm_cnt.v_interrupt_free_min += vmd->vmd_interrupt_free_min; 2163 vm_cnt.v_free_severe += vmd->vmd_free_severe; 2164 freecount += vmd->vmd_free_count; 2165 } 2166 2167 /* 2168 * Set interval in seconds for active scan. We want to visit each 2169 * page at least once every ten minutes. This is to prevent worst 2170 * case paging behaviors with stale active LRU. 2171 */ 2172 if (vm_pageout_update_period == 0) 2173 vm_pageout_update_period = 600; 2174 2175 if (vm_page_max_user_wired == 0) 2176 vm_page_max_user_wired = freecount / 3; 2177 } 2178 2179 /* 2180 * vm_pageout is the high level pageout daemon. 2181 */ 2182 static void 2183 vm_pageout(void) 2184 { 2185 struct proc *p; 2186 struct thread *td; 2187 int error, first, i; 2188 2189 p = curproc; 2190 td = curthread; 2191 2192 mtx_init(&vm_oom_ratelim_mtx, "vmoomr", NULL, MTX_DEF); 2193 swap_pager_swap_init(); 2194 for (first = -1, i = 0; i < vm_ndomains; i++) { 2195 if (VM_DOMAIN_EMPTY(i)) { 2196 if (bootverbose) 2197 printf("domain %d empty; skipping pageout\n", 2198 i); 2199 continue; 2200 } 2201 if (first == -1) 2202 first = i; 2203 else { 2204 error = kthread_add(vm_pageout_worker, 2205 (void *)(uintptr_t)i, p, NULL, 0, 0, "dom%d", i); 2206 if (error != 0) 2207 panic("starting pageout for domain %d: %d\n", 2208 i, error); 2209 } 2210 error = kthread_add(vm_pageout_laundry_worker, 2211 (void *)(uintptr_t)i, p, NULL, 0, 0, "laundry: dom%d", i); 2212 if (error != 0) 2213 panic("starting laundry for domain %d: %d", i, error); 2214 } 2215 error = kthread_add(uma_reclaim_worker, NULL, p, NULL, 0, 0, "uma"); 2216 if (error != 0) 2217 panic("starting uma_reclaim helper, error %d\n", error); 2218 2219 snprintf(td->td_name, sizeof(td->td_name), "dom%d", first); 2220 vm_pageout_worker((void *)(uintptr_t)first); 2221 } 2222 2223 /* 2224 * Perform an advisory wakeup of the page daemon. 2225 */ 2226 void 2227 pagedaemon_wakeup(int domain) 2228 { 2229 struct vm_domain *vmd; 2230 2231 vmd = VM_DOMAIN(domain); 2232 vm_domain_pageout_assert_unlocked(vmd); 2233 if (curproc == pageproc) 2234 return; 2235 2236 if (atomic_fetchadd_int(&vmd->vmd_pageout_wanted, 1) == 0) { 2237 vm_domain_pageout_lock(vmd); 2238 atomic_store_int(&vmd->vmd_pageout_wanted, 1); 2239 wakeup(&vmd->vmd_pageout_wanted); 2240 vm_domain_pageout_unlock(vmd); 2241 } 2242 } 2243