1 /*- 2 * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991 Regents of the University of California. 5 * All rights reserved. 6 * Copyright (c) 1994 John S. Dyson 7 * All rights reserved. 8 * Copyright (c) 1994 David Greenman 9 * All rights reserved. 10 * Copyright (c) 2005 Yahoo! Technologies Norway AS 11 * All rights reserved. 12 * 13 * This code is derived from software contributed to Berkeley by 14 * The Mach Operating System project at Carnegie-Mellon University. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. All advertising materials mentioning features or use of this software 25 * must display the following acknowledgement: 26 * This product includes software developed by the University of 27 * California, Berkeley and its contributors. 28 * 4. Neither the name of the University nor the names of its contributors 29 * may be used to endorse or promote products derived from this software 30 * without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 42 * SUCH DAMAGE. 43 * 44 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 45 * 46 * 47 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 48 * All rights reserved. 49 * 50 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 51 * 52 * Permission to use, copy, modify and distribute this software and 53 * its documentation is hereby granted, provided that both the copyright 54 * notice and this permission notice appear in all copies of the 55 * software, derivative works or modified versions, and any portions 56 * thereof, and that both notices appear in supporting documentation. 57 * 58 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 59 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 60 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 61 * 62 * Carnegie Mellon requests users of this software to return to 63 * 64 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 65 * School of Computer Science 66 * Carnegie Mellon University 67 * Pittsburgh PA 15213-3890 68 * 69 * any improvements or extensions that they make and grant Carnegie the 70 * rights to redistribute these changes. 71 */ 72 73 /* 74 * The proverbial page-out daemon. 75 */ 76 77 #include <sys/cdefs.h> 78 __FBSDID("$FreeBSD$"); 79 80 #include "opt_vm.h" 81 82 #include <sys/param.h> 83 #include <sys/systm.h> 84 #include <sys/kernel.h> 85 #include <sys/eventhandler.h> 86 #include <sys/lock.h> 87 #include <sys/mutex.h> 88 #include <sys/proc.h> 89 #include <sys/kthread.h> 90 #include <sys/ktr.h> 91 #include <sys/mount.h> 92 #include <sys/racct.h> 93 #include <sys/resourcevar.h> 94 #include <sys/sched.h> 95 #include <sys/sdt.h> 96 #include <sys/signalvar.h> 97 #include <sys/smp.h> 98 #include <sys/time.h> 99 #include <sys/vnode.h> 100 #include <sys/vmmeter.h> 101 #include <sys/rwlock.h> 102 #include <sys/sx.h> 103 #include <sys/sysctl.h> 104 105 #include <vm/vm.h> 106 #include <vm/vm_param.h> 107 #include <vm/vm_object.h> 108 #include <vm/vm_page.h> 109 #include <vm/vm_map.h> 110 #include <vm/vm_pageout.h> 111 #include <vm/vm_pager.h> 112 #include <vm/vm_phys.h> 113 #include <vm/vm_pagequeue.h> 114 #include <vm/swap_pager.h> 115 #include <vm/vm_extern.h> 116 #include <vm/uma.h> 117 118 /* 119 * System initialization 120 */ 121 122 /* the kernel process "vm_pageout"*/ 123 static void vm_pageout(void); 124 static void vm_pageout_init(void); 125 static int vm_pageout_clean(vm_page_t m, int *numpagedout); 126 static int vm_pageout_cluster(vm_page_t m); 127 static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage, 128 int starting_page_shortage); 129 130 SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init, 131 NULL); 132 133 struct proc *pageproc; 134 135 static struct kproc_desc page_kp = { 136 "pagedaemon", 137 vm_pageout, 138 &pageproc 139 }; 140 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start, 141 &page_kp); 142 143 SDT_PROVIDER_DEFINE(vm); 144 SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan); 145 146 /* Pagedaemon activity rates, in subdivisions of one second. */ 147 #define VM_LAUNDER_RATE 10 148 #define VM_INACT_SCAN_RATE 10 149 150 static int vm_pageout_oom_seq = 12; 151 152 static int vm_pageout_update_period; 153 static int disable_swap_pageouts; 154 static int lowmem_period = 10; 155 static int swapdev_enabled; 156 157 static int vm_panic_on_oom = 0; 158 159 SYSCTL_INT(_vm, OID_AUTO, panic_on_oom, 160 CTLFLAG_RWTUN, &vm_panic_on_oom, 0, 161 "panic on out of memory instead of killing the largest process"); 162 163 SYSCTL_INT(_vm, OID_AUTO, pageout_update_period, 164 CTLFLAG_RWTUN, &vm_pageout_update_period, 0, 165 "Maximum active LRU update period"); 166 167 SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RWTUN, &lowmem_period, 0, 168 "Low memory callback period"); 169 170 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 171 CTLFLAG_RWTUN, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 172 173 static int pageout_lock_miss; 174 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 175 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 176 177 SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq, 178 CTLFLAG_RWTUN, &vm_pageout_oom_seq, 0, 179 "back-to-back calls to oom detector to start OOM"); 180 181 static int act_scan_laundry_weight = 3; 182 SYSCTL_INT(_vm, OID_AUTO, act_scan_laundry_weight, CTLFLAG_RWTUN, 183 &act_scan_laundry_weight, 0, 184 "weight given to clean vs. dirty pages in active queue scans"); 185 186 static u_int vm_background_launder_rate = 4096; 187 SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RWTUN, 188 &vm_background_launder_rate, 0, 189 "background laundering rate, in kilobytes per second"); 190 191 static u_int vm_background_launder_max = 20 * 1024; 192 SYSCTL_UINT(_vm, OID_AUTO, background_launder_max, CTLFLAG_RWTUN, 193 &vm_background_launder_max, 0, "background laundering cap, in kilobytes"); 194 195 int vm_pageout_page_count = 32; 196 197 u_long vm_page_max_user_wired; 198 SYSCTL_ULONG(_vm, OID_AUTO, max_user_wired, CTLFLAG_RW, 199 &vm_page_max_user_wired, 0, 200 "system-wide limit to user-wired page count"); 201 202 static u_int isqrt(u_int num); 203 static int vm_pageout_launder(struct vm_domain *vmd, int launder, 204 bool in_shortfall); 205 static void vm_pageout_laundry_worker(void *arg); 206 207 struct scan_state { 208 struct vm_batchqueue bq; 209 struct vm_pagequeue *pq; 210 vm_page_t marker; 211 int maxscan; 212 int scanned; 213 }; 214 215 static void 216 vm_pageout_init_scan(struct scan_state *ss, struct vm_pagequeue *pq, 217 vm_page_t marker, vm_page_t after, int maxscan) 218 { 219 220 vm_pagequeue_assert_locked(pq); 221 KASSERT((marker->aflags & PGA_ENQUEUED) == 0, 222 ("marker %p already enqueued", marker)); 223 224 if (after == NULL) 225 TAILQ_INSERT_HEAD(&pq->pq_pl, marker, plinks.q); 226 else 227 TAILQ_INSERT_AFTER(&pq->pq_pl, after, marker, plinks.q); 228 vm_page_aflag_set(marker, PGA_ENQUEUED); 229 230 vm_batchqueue_init(&ss->bq); 231 ss->pq = pq; 232 ss->marker = marker; 233 ss->maxscan = maxscan; 234 ss->scanned = 0; 235 vm_pagequeue_unlock(pq); 236 } 237 238 static void 239 vm_pageout_end_scan(struct scan_state *ss) 240 { 241 struct vm_pagequeue *pq; 242 243 pq = ss->pq; 244 vm_pagequeue_assert_locked(pq); 245 KASSERT((ss->marker->aflags & PGA_ENQUEUED) != 0, 246 ("marker %p not enqueued", ss->marker)); 247 248 TAILQ_REMOVE(&pq->pq_pl, ss->marker, plinks.q); 249 vm_page_aflag_clear(ss->marker, PGA_ENQUEUED); 250 pq->pq_pdpages += ss->scanned; 251 } 252 253 /* 254 * Add a small number of queued pages to a batch queue for later processing 255 * without the corresponding queue lock held. The caller must have enqueued a 256 * marker page at the desired start point for the scan. Pages will be 257 * physically dequeued if the caller so requests. Otherwise, the returned 258 * batch may contain marker pages, and it is up to the caller to handle them. 259 * 260 * When processing the batch queue, vm_page_queue() must be used to 261 * determine whether the page has been logically dequeued by another thread. 262 * Once this check is performed, the page lock guarantees that the page will 263 * not be disassociated from the queue. 264 */ 265 static __always_inline void 266 vm_pageout_collect_batch(struct scan_state *ss, const bool dequeue) 267 { 268 struct vm_pagequeue *pq; 269 vm_page_t m, marker, n; 270 271 marker = ss->marker; 272 pq = ss->pq; 273 274 KASSERT((marker->aflags & PGA_ENQUEUED) != 0, 275 ("marker %p not enqueued", ss->marker)); 276 277 vm_pagequeue_lock(pq); 278 for (m = TAILQ_NEXT(marker, plinks.q); m != NULL && 279 ss->scanned < ss->maxscan && ss->bq.bq_cnt < VM_BATCHQUEUE_SIZE; 280 m = n, ss->scanned++) { 281 n = TAILQ_NEXT(m, plinks.q); 282 if ((m->flags & PG_MARKER) == 0) { 283 KASSERT((m->aflags & PGA_ENQUEUED) != 0, 284 ("page %p not enqueued", m)); 285 KASSERT((m->flags & PG_FICTITIOUS) == 0, 286 ("Fictitious page %p cannot be in page queue", m)); 287 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 288 ("Unmanaged page %p cannot be in page queue", m)); 289 } else if (dequeue) 290 continue; 291 292 (void)vm_batchqueue_insert(&ss->bq, m); 293 if (dequeue) { 294 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 295 vm_page_aflag_clear(m, PGA_ENQUEUED); 296 } 297 } 298 TAILQ_REMOVE(&pq->pq_pl, marker, plinks.q); 299 if (__predict_true(m != NULL)) 300 TAILQ_INSERT_BEFORE(m, marker, plinks.q); 301 else 302 TAILQ_INSERT_TAIL(&pq->pq_pl, marker, plinks.q); 303 if (dequeue) 304 vm_pagequeue_cnt_add(pq, -ss->bq.bq_cnt); 305 vm_pagequeue_unlock(pq); 306 } 307 308 /* Return the next page to be scanned, or NULL if the scan is complete. */ 309 static __always_inline vm_page_t 310 vm_pageout_next(struct scan_state *ss, const bool dequeue) 311 { 312 313 if (ss->bq.bq_cnt == 0) 314 vm_pageout_collect_batch(ss, dequeue); 315 return (vm_batchqueue_pop(&ss->bq)); 316 } 317 318 /* 319 * Scan for pages at adjacent offsets within the given page's object that are 320 * eligible for laundering, form a cluster of these pages and the given page, 321 * and launder that cluster. 322 */ 323 static int 324 vm_pageout_cluster(vm_page_t m) 325 { 326 vm_object_t object; 327 vm_page_t mc[2 * vm_pageout_page_count], p, pb, ps; 328 vm_pindex_t pindex; 329 int ib, is, page_base, pageout_count; 330 331 vm_page_assert_locked(m); 332 object = m->object; 333 VM_OBJECT_ASSERT_WLOCKED(object); 334 pindex = m->pindex; 335 336 vm_page_assert_unbusied(m); 337 KASSERT(!vm_page_wired(m), ("page %p is wired", m)); 338 339 pmap_remove_write(m); 340 vm_page_unlock(m); 341 342 mc[vm_pageout_page_count] = pb = ps = m; 343 pageout_count = 1; 344 page_base = vm_pageout_page_count; 345 ib = 1; 346 is = 1; 347 348 /* 349 * We can cluster only if the page is not clean, busy, or held, and 350 * the page is in the laundry queue. 351 * 352 * During heavy mmap/modification loads the pageout 353 * daemon can really fragment the underlying file 354 * due to flushing pages out of order and not trying to 355 * align the clusters (which leaves sporadic out-of-order 356 * holes). To solve this problem we do the reverse scan 357 * first and attempt to align our cluster, then do a 358 * forward scan if room remains. 359 */ 360 more: 361 while (ib != 0 && pageout_count < vm_pageout_page_count) { 362 if (ib > pindex) { 363 ib = 0; 364 break; 365 } 366 if ((p = vm_page_prev(pb)) == NULL || vm_page_busied(p)) { 367 ib = 0; 368 break; 369 } 370 vm_page_test_dirty(p); 371 if (p->dirty == 0) { 372 ib = 0; 373 break; 374 } 375 vm_page_lock(p); 376 if (vm_page_wired(p) || !vm_page_in_laundry(p)) { 377 vm_page_unlock(p); 378 ib = 0; 379 break; 380 } 381 pmap_remove_write(p); 382 vm_page_unlock(p); 383 mc[--page_base] = pb = p; 384 ++pageout_count; 385 ++ib; 386 387 /* 388 * We are at an alignment boundary. Stop here, and switch 389 * directions. Do not clear ib. 390 */ 391 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 392 break; 393 } 394 while (pageout_count < vm_pageout_page_count && 395 pindex + is < object->size) { 396 if ((p = vm_page_next(ps)) == NULL || vm_page_busied(p)) 397 break; 398 vm_page_test_dirty(p); 399 if (p->dirty == 0) 400 break; 401 vm_page_lock(p); 402 if (vm_page_wired(p) || !vm_page_in_laundry(p)) { 403 vm_page_unlock(p); 404 break; 405 } 406 pmap_remove_write(p); 407 vm_page_unlock(p); 408 mc[page_base + pageout_count] = ps = p; 409 ++pageout_count; 410 ++is; 411 } 412 413 /* 414 * If we exhausted our forward scan, continue with the reverse scan 415 * when possible, even past an alignment boundary. This catches 416 * boundary conditions. 417 */ 418 if (ib != 0 && pageout_count < vm_pageout_page_count) 419 goto more; 420 421 return (vm_pageout_flush(&mc[page_base], pageout_count, 422 VM_PAGER_PUT_NOREUSE, 0, NULL, NULL)); 423 } 424 425 /* 426 * vm_pageout_flush() - launder the given pages 427 * 428 * The given pages are laundered. Note that we setup for the start of 429 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 430 * reference count all in here rather then in the parent. If we want 431 * the parent to do more sophisticated things we may have to change 432 * the ordering. 433 * 434 * Returned runlen is the count of pages between mreq and first 435 * page after mreq with status VM_PAGER_AGAIN. 436 * *eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL 437 * for any page in runlen set. 438 */ 439 int 440 vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen, 441 boolean_t *eio) 442 { 443 vm_object_t object = mc[0]->object; 444 int pageout_status[count]; 445 int numpagedout = 0; 446 int i, runlen; 447 448 VM_OBJECT_ASSERT_WLOCKED(object); 449 450 /* 451 * Initiate I/O. Mark the pages busy and verify that they're valid 452 * and read-only. 453 * 454 * We do not have to fixup the clean/dirty bits here... we can 455 * allow the pager to do it after the I/O completes. 456 * 457 * NOTE! mc[i]->dirty may be partial or fragmented due to an 458 * edge case with file fragments. 459 */ 460 for (i = 0; i < count; i++) { 461 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, 462 ("vm_pageout_flush: partially invalid page %p index %d/%d", 463 mc[i], i, count)); 464 KASSERT((mc[i]->aflags & PGA_WRITEABLE) == 0, 465 ("vm_pageout_flush: writeable page %p", mc[i])); 466 vm_page_sbusy(mc[i]); 467 } 468 vm_object_pip_add(object, count); 469 470 vm_pager_put_pages(object, mc, count, flags, pageout_status); 471 472 runlen = count - mreq; 473 if (eio != NULL) 474 *eio = FALSE; 475 for (i = 0; i < count; i++) { 476 vm_page_t mt = mc[i]; 477 478 KASSERT(pageout_status[i] == VM_PAGER_PEND || 479 !pmap_page_is_write_mapped(mt), 480 ("vm_pageout_flush: page %p is not write protected", mt)); 481 switch (pageout_status[i]) { 482 case VM_PAGER_OK: 483 vm_page_lock(mt); 484 if (vm_page_in_laundry(mt)) 485 vm_page_deactivate_noreuse(mt); 486 vm_page_unlock(mt); 487 /* FALLTHROUGH */ 488 case VM_PAGER_PEND: 489 numpagedout++; 490 break; 491 case VM_PAGER_BAD: 492 /* 493 * The page is outside the object's range. We pretend 494 * that the page out worked and clean the page, so the 495 * changes will be lost if the page is reclaimed by 496 * the page daemon. 497 */ 498 vm_page_undirty(mt); 499 vm_page_lock(mt); 500 if (vm_page_in_laundry(mt)) 501 vm_page_deactivate_noreuse(mt); 502 vm_page_unlock(mt); 503 break; 504 case VM_PAGER_ERROR: 505 case VM_PAGER_FAIL: 506 /* 507 * If the page couldn't be paged out to swap because the 508 * pager wasn't able to find space, place the page in 509 * the PQ_UNSWAPPABLE holding queue. This is an 510 * optimization that prevents the page daemon from 511 * wasting CPU cycles on pages that cannot be reclaimed 512 * becase no swap device is configured. 513 * 514 * Otherwise, reactivate the page so that it doesn't 515 * clog the laundry and inactive queues. (We will try 516 * paging it out again later.) 517 */ 518 vm_page_lock(mt); 519 if (object->type == OBJT_SWAP && 520 pageout_status[i] == VM_PAGER_FAIL) { 521 vm_page_unswappable(mt); 522 numpagedout++; 523 } else 524 vm_page_activate(mt); 525 vm_page_unlock(mt); 526 if (eio != NULL && i >= mreq && i - mreq < runlen) 527 *eio = TRUE; 528 break; 529 case VM_PAGER_AGAIN: 530 if (i >= mreq && i - mreq < runlen) 531 runlen = i - mreq; 532 break; 533 } 534 535 /* 536 * If the operation is still going, leave the page busy to 537 * block all other accesses. Also, leave the paging in 538 * progress indicator set so that we don't attempt an object 539 * collapse. 540 */ 541 if (pageout_status[i] != VM_PAGER_PEND) { 542 vm_object_pip_wakeup(object); 543 vm_page_sunbusy(mt); 544 } 545 } 546 if (prunlen != NULL) 547 *prunlen = runlen; 548 return (numpagedout); 549 } 550 551 static void 552 vm_pageout_swapon(void *arg __unused, struct swdevt *sp __unused) 553 { 554 555 atomic_store_rel_int(&swapdev_enabled, 1); 556 } 557 558 static void 559 vm_pageout_swapoff(void *arg __unused, struct swdevt *sp __unused) 560 { 561 562 if (swap_pager_nswapdev() == 1) 563 atomic_store_rel_int(&swapdev_enabled, 0); 564 } 565 566 /* 567 * Attempt to acquire all of the necessary locks to launder a page and 568 * then call through the clustering layer to PUTPAGES. Wait a short 569 * time for a vnode lock. 570 * 571 * Requires the page and object lock on entry, releases both before return. 572 * Returns 0 on success and an errno otherwise. 573 */ 574 static int 575 vm_pageout_clean(vm_page_t m, int *numpagedout) 576 { 577 struct vnode *vp; 578 struct mount *mp; 579 vm_object_t object; 580 vm_pindex_t pindex; 581 int error, lockmode; 582 583 vm_page_assert_locked(m); 584 object = m->object; 585 VM_OBJECT_ASSERT_WLOCKED(object); 586 error = 0; 587 vp = NULL; 588 mp = NULL; 589 590 /* 591 * The object is already known NOT to be dead. It 592 * is possible for the vget() to block the whole 593 * pageout daemon, but the new low-memory handling 594 * code should prevent it. 595 * 596 * We can't wait forever for the vnode lock, we might 597 * deadlock due to a vn_read() getting stuck in 598 * vm_wait while holding this vnode. We skip the 599 * vnode if we can't get it in a reasonable amount 600 * of time. 601 */ 602 if (object->type == OBJT_VNODE) { 603 vm_page_unlock(m); 604 vp = object->handle; 605 if (vp->v_type == VREG && 606 vn_start_write(vp, &mp, V_NOWAIT) != 0) { 607 mp = NULL; 608 error = EDEADLK; 609 goto unlock_all; 610 } 611 KASSERT(mp != NULL, 612 ("vp %p with NULL v_mount", vp)); 613 vm_object_reference_locked(object); 614 pindex = m->pindex; 615 VM_OBJECT_WUNLOCK(object); 616 lockmode = MNT_SHARED_WRITES(vp->v_mount) ? 617 LK_SHARED : LK_EXCLUSIVE; 618 if (vget(vp, lockmode | LK_TIMELOCK, curthread)) { 619 vp = NULL; 620 error = EDEADLK; 621 goto unlock_mp; 622 } 623 VM_OBJECT_WLOCK(object); 624 625 /* 626 * Ensure that the object and vnode were not disassociated 627 * while locks were dropped. 628 */ 629 if (vp->v_object != object) { 630 error = ENOENT; 631 goto unlock_all; 632 } 633 vm_page_lock(m); 634 635 /* 636 * While the object and page were unlocked, the page 637 * may have been: 638 * (1) moved to a different queue, 639 * (2) reallocated to a different object, 640 * (3) reallocated to a different offset, or 641 * (4) cleaned. 642 */ 643 if (!vm_page_in_laundry(m) || m->object != object || 644 m->pindex != pindex || m->dirty == 0) { 645 vm_page_unlock(m); 646 error = ENXIO; 647 goto unlock_all; 648 } 649 650 /* 651 * The page may have been busied or referenced while the object 652 * and page locks were released. 653 */ 654 if (vm_page_busied(m) || vm_page_wired(m)) { 655 vm_page_unlock(m); 656 error = EBUSY; 657 goto unlock_all; 658 } 659 } 660 661 /* 662 * If a page is dirty, then it is either being washed 663 * (but not yet cleaned) or it is still in the 664 * laundry. If it is still in the laundry, then we 665 * start the cleaning operation. 666 */ 667 if ((*numpagedout = vm_pageout_cluster(m)) == 0) 668 error = EIO; 669 670 unlock_all: 671 VM_OBJECT_WUNLOCK(object); 672 673 unlock_mp: 674 vm_page_lock_assert(m, MA_NOTOWNED); 675 if (mp != NULL) { 676 if (vp != NULL) 677 vput(vp); 678 vm_object_deallocate(object); 679 vn_finished_write(mp); 680 } 681 682 return (error); 683 } 684 685 /* 686 * Attempt to launder the specified number of pages. 687 * 688 * Returns the number of pages successfully laundered. 689 */ 690 static int 691 vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall) 692 { 693 struct scan_state ss; 694 struct vm_pagequeue *pq; 695 struct mtx *mtx; 696 vm_object_t object; 697 vm_page_t m, marker; 698 int act_delta, error, numpagedout, queue, starting_target; 699 int vnodes_skipped; 700 bool pageout_ok; 701 702 mtx = NULL; 703 object = NULL; 704 starting_target = launder; 705 vnodes_skipped = 0; 706 707 /* 708 * Scan the laundry queues for pages eligible to be laundered. We stop 709 * once the target number of dirty pages have been laundered, or once 710 * we've reached the end of the queue. A single iteration of this loop 711 * may cause more than one page to be laundered because of clustering. 712 * 713 * As an optimization, we avoid laundering from PQ_UNSWAPPABLE when no 714 * swap devices are configured. 715 */ 716 if (atomic_load_acq_int(&swapdev_enabled)) 717 queue = PQ_UNSWAPPABLE; 718 else 719 queue = PQ_LAUNDRY; 720 721 scan: 722 marker = &vmd->vmd_markers[queue]; 723 pq = &vmd->vmd_pagequeues[queue]; 724 vm_pagequeue_lock(pq); 725 vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt); 726 while (launder > 0 && (m = vm_pageout_next(&ss, false)) != NULL) { 727 if (__predict_false((m->flags & PG_MARKER) != 0)) 728 continue; 729 730 vm_page_change_lock(m, &mtx); 731 732 recheck: 733 /* 734 * The page may have been disassociated from the queue 735 * while locks were dropped. 736 */ 737 if (vm_page_queue(m) != queue) 738 continue; 739 740 /* 741 * A requeue was requested, so this page gets a second 742 * chance. 743 */ 744 if ((m->aflags & PGA_REQUEUE) != 0) { 745 vm_page_pqbatch_submit(m, queue); 746 continue; 747 } 748 749 /* 750 * Wired pages may not be freed. Complete their removal 751 * from the queue now to avoid needless revisits during 752 * future scans. 753 */ 754 if (vm_page_wired(m)) { 755 vm_page_dequeue_deferred(m); 756 continue; 757 } 758 759 if (object != m->object) { 760 if (object != NULL) 761 VM_OBJECT_WUNLOCK(object); 762 object = m->object; 763 if (!VM_OBJECT_TRYWLOCK(object)) { 764 mtx_unlock(mtx); 765 /* Depends on type-stability. */ 766 VM_OBJECT_WLOCK(object); 767 mtx_lock(mtx); 768 goto recheck; 769 } 770 } 771 772 if (vm_page_busied(m)) 773 continue; 774 775 /* 776 * Invalid pages can be easily freed. They cannot be 777 * mapped; vm_page_free() asserts this. 778 */ 779 if (m->valid == 0) 780 goto free_page; 781 782 /* 783 * If the page has been referenced and the object is not dead, 784 * reactivate or requeue the page depending on whether the 785 * object is mapped. 786 * 787 * Test PGA_REFERENCED after calling pmap_ts_referenced() so 788 * that a reference from a concurrently destroyed mapping is 789 * observed here and now. 790 */ 791 if (object->ref_count != 0) 792 act_delta = pmap_ts_referenced(m); 793 else { 794 KASSERT(!pmap_page_is_mapped(m), 795 ("page %p is mapped", m)); 796 act_delta = 0; 797 } 798 if ((m->aflags & PGA_REFERENCED) != 0) { 799 vm_page_aflag_clear(m, PGA_REFERENCED); 800 act_delta++; 801 } 802 if (act_delta != 0) { 803 if (object->ref_count != 0) { 804 VM_CNT_INC(v_reactivated); 805 vm_page_activate(m); 806 807 /* 808 * Increase the activation count if the page 809 * was referenced while in the laundry queue. 810 * This makes it less likely that the page will 811 * be returned prematurely to the inactive 812 * queue. 813 */ 814 m->act_count += act_delta + ACT_ADVANCE; 815 816 /* 817 * If this was a background laundering, count 818 * activated pages towards our target. The 819 * purpose of background laundering is to ensure 820 * that pages are eventually cycled through the 821 * laundry queue, and an activation is a valid 822 * way out. 823 */ 824 if (!in_shortfall) 825 launder--; 826 continue; 827 } else if ((object->flags & OBJ_DEAD) == 0) { 828 vm_page_requeue(m); 829 continue; 830 } 831 } 832 833 /* 834 * If the page appears to be clean at the machine-independent 835 * layer, then remove all of its mappings from the pmap in 836 * anticipation of freeing it. If, however, any of the page's 837 * mappings allow write access, then the page may still be 838 * modified until the last of those mappings are removed. 839 */ 840 if (object->ref_count != 0) { 841 vm_page_test_dirty(m); 842 if (m->dirty == 0) 843 pmap_remove_all(m); 844 } 845 846 /* 847 * Clean pages are freed, and dirty pages are paged out unless 848 * they belong to a dead object. Requeueing dirty pages from 849 * dead objects is pointless, as they are being paged out and 850 * freed by the thread that destroyed the object. 851 */ 852 if (m->dirty == 0) { 853 free_page: 854 vm_page_free(m); 855 VM_CNT_INC(v_dfree); 856 } else if ((object->flags & OBJ_DEAD) == 0) { 857 if (object->type != OBJT_SWAP && 858 object->type != OBJT_DEFAULT) 859 pageout_ok = true; 860 else if (disable_swap_pageouts) 861 pageout_ok = false; 862 else 863 pageout_ok = true; 864 if (!pageout_ok) { 865 vm_page_requeue(m); 866 continue; 867 } 868 869 /* 870 * Form a cluster with adjacent, dirty pages from the 871 * same object, and page out that entire cluster. 872 * 873 * The adjacent, dirty pages must also be in the 874 * laundry. However, their mappings are not checked 875 * for new references. Consequently, a recently 876 * referenced page may be paged out. However, that 877 * page will not be prematurely reclaimed. After page 878 * out, the page will be placed in the inactive queue, 879 * where any new references will be detected and the 880 * page reactivated. 881 */ 882 error = vm_pageout_clean(m, &numpagedout); 883 if (error == 0) { 884 launder -= numpagedout; 885 ss.scanned += numpagedout; 886 } else if (error == EDEADLK) { 887 pageout_lock_miss++; 888 vnodes_skipped++; 889 } 890 mtx = NULL; 891 object = NULL; 892 } 893 } 894 if (mtx != NULL) { 895 mtx_unlock(mtx); 896 mtx = NULL; 897 } 898 if (object != NULL) { 899 VM_OBJECT_WUNLOCK(object); 900 object = NULL; 901 } 902 vm_pagequeue_lock(pq); 903 vm_pageout_end_scan(&ss); 904 vm_pagequeue_unlock(pq); 905 906 if (launder > 0 && queue == PQ_UNSWAPPABLE) { 907 queue = PQ_LAUNDRY; 908 goto scan; 909 } 910 911 /* 912 * Wakeup the sync daemon if we skipped a vnode in a writeable object 913 * and we didn't launder enough pages. 914 */ 915 if (vnodes_skipped > 0 && launder > 0) 916 (void)speedup_syncer(); 917 918 return (starting_target - launder); 919 } 920 921 /* 922 * Compute the integer square root. 923 */ 924 static u_int 925 isqrt(u_int num) 926 { 927 u_int bit, root, tmp; 928 929 bit = num != 0 ? (1u << ((fls(num) - 1) & ~1)) : 0; 930 root = 0; 931 while (bit != 0) { 932 tmp = root + bit; 933 root >>= 1; 934 if (num >= tmp) { 935 num -= tmp; 936 root += bit; 937 } 938 bit >>= 2; 939 } 940 return (root); 941 } 942 943 /* 944 * Perform the work of the laundry thread: periodically wake up and determine 945 * whether any pages need to be laundered. If so, determine the number of pages 946 * that need to be laundered, and launder them. 947 */ 948 static void 949 vm_pageout_laundry_worker(void *arg) 950 { 951 struct vm_domain *vmd; 952 struct vm_pagequeue *pq; 953 uint64_t nclean, ndirty, nfreed; 954 int domain, last_target, launder, shortfall, shortfall_cycle, target; 955 bool in_shortfall; 956 957 domain = (uintptr_t)arg; 958 vmd = VM_DOMAIN(domain); 959 pq = &vmd->vmd_pagequeues[PQ_LAUNDRY]; 960 KASSERT(vmd->vmd_segs != 0, ("domain without segments")); 961 962 shortfall = 0; 963 in_shortfall = false; 964 shortfall_cycle = 0; 965 last_target = target = 0; 966 nfreed = 0; 967 968 /* 969 * Calls to these handlers are serialized by the swap syscall lock. 970 */ 971 (void)EVENTHANDLER_REGISTER(swapon, vm_pageout_swapon, vmd, 972 EVENTHANDLER_PRI_ANY); 973 (void)EVENTHANDLER_REGISTER(swapoff, vm_pageout_swapoff, vmd, 974 EVENTHANDLER_PRI_ANY); 975 976 /* 977 * The pageout laundry worker is never done, so loop forever. 978 */ 979 for (;;) { 980 KASSERT(target >= 0, ("negative target %d", target)); 981 KASSERT(shortfall_cycle >= 0, 982 ("negative cycle %d", shortfall_cycle)); 983 launder = 0; 984 985 /* 986 * First determine whether we need to launder pages to meet a 987 * shortage of free pages. 988 */ 989 if (shortfall > 0) { 990 in_shortfall = true; 991 shortfall_cycle = VM_LAUNDER_RATE / VM_INACT_SCAN_RATE; 992 target = shortfall; 993 } else if (!in_shortfall) 994 goto trybackground; 995 else if (shortfall_cycle == 0 || vm_laundry_target(vmd) <= 0) { 996 /* 997 * We recently entered shortfall and began laundering 998 * pages. If we have completed that laundering run 999 * (and we are no longer in shortfall) or we have met 1000 * our laundry target through other activity, then we 1001 * can stop laundering pages. 1002 */ 1003 in_shortfall = false; 1004 target = 0; 1005 goto trybackground; 1006 } 1007 launder = target / shortfall_cycle--; 1008 goto dolaundry; 1009 1010 /* 1011 * There's no immediate need to launder any pages; see if we 1012 * meet the conditions to perform background laundering: 1013 * 1014 * 1. The ratio of dirty to clean inactive pages exceeds the 1015 * background laundering threshold, or 1016 * 2. we haven't yet reached the target of the current 1017 * background laundering run. 1018 * 1019 * The background laundering threshold is not a constant. 1020 * Instead, it is a slowly growing function of the number of 1021 * clean pages freed by the page daemon since the last 1022 * background laundering. Thus, as the ratio of dirty to 1023 * clean inactive pages grows, the amount of memory pressure 1024 * required to trigger laundering decreases. We ensure 1025 * that the threshold is non-zero after an inactive queue 1026 * scan, even if that scan failed to free a single clean page. 1027 */ 1028 trybackground: 1029 nclean = vmd->vmd_free_count + 1030 vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt; 1031 ndirty = vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt; 1032 if (target == 0 && ndirty * isqrt(howmany(nfreed + 1, 1033 vmd->vmd_free_target - vmd->vmd_free_min)) >= nclean) { 1034 target = vmd->vmd_background_launder_target; 1035 } 1036 1037 /* 1038 * We have a non-zero background laundering target. If we've 1039 * laundered up to our maximum without observing a page daemon 1040 * request, just stop. This is a safety belt that ensures we 1041 * don't launder an excessive amount if memory pressure is low 1042 * and the ratio of dirty to clean pages is large. Otherwise, 1043 * proceed at the background laundering rate. 1044 */ 1045 if (target > 0) { 1046 if (nfreed > 0) { 1047 nfreed = 0; 1048 last_target = target; 1049 } else if (last_target - target >= 1050 vm_background_launder_max * PAGE_SIZE / 1024) { 1051 target = 0; 1052 } 1053 launder = vm_background_launder_rate * PAGE_SIZE / 1024; 1054 launder /= VM_LAUNDER_RATE; 1055 if (launder > target) 1056 launder = target; 1057 } 1058 1059 dolaundry: 1060 if (launder > 0) { 1061 /* 1062 * Because of I/O clustering, the number of laundered 1063 * pages could exceed "target" by the maximum size of 1064 * a cluster minus one. 1065 */ 1066 target -= min(vm_pageout_launder(vmd, launder, 1067 in_shortfall), target); 1068 pause("laundp", hz / VM_LAUNDER_RATE); 1069 } 1070 1071 /* 1072 * If we're not currently laundering pages and the page daemon 1073 * hasn't posted a new request, sleep until the page daemon 1074 * kicks us. 1075 */ 1076 vm_pagequeue_lock(pq); 1077 if (target == 0 && vmd->vmd_laundry_request == VM_LAUNDRY_IDLE) 1078 (void)mtx_sleep(&vmd->vmd_laundry_request, 1079 vm_pagequeue_lockptr(pq), PVM, "launds", 0); 1080 1081 /* 1082 * If the pagedaemon has indicated that it's in shortfall, start 1083 * a shortfall laundering unless we're already in the middle of 1084 * one. This may preempt a background laundering. 1085 */ 1086 if (vmd->vmd_laundry_request == VM_LAUNDRY_SHORTFALL && 1087 (!in_shortfall || shortfall_cycle == 0)) { 1088 shortfall = vm_laundry_target(vmd) + 1089 vmd->vmd_pageout_deficit; 1090 target = 0; 1091 } else 1092 shortfall = 0; 1093 1094 if (target == 0) 1095 vmd->vmd_laundry_request = VM_LAUNDRY_IDLE; 1096 nfreed += vmd->vmd_clean_pages_freed; 1097 vmd->vmd_clean_pages_freed = 0; 1098 vm_pagequeue_unlock(pq); 1099 } 1100 } 1101 1102 /* 1103 * Compute the number of pages we want to try to move from the 1104 * active queue to either the inactive or laundry queue. 1105 * 1106 * When scanning active pages during a shortage, we make clean pages 1107 * count more heavily towards the page shortage than dirty pages. 1108 * This is because dirty pages must be laundered before they can be 1109 * reused and thus have less utility when attempting to quickly 1110 * alleviate a free page shortage. However, this weighting also 1111 * causes the scan to deactivate dirty pages more aggressively, 1112 * improving the effectiveness of clustering. 1113 */ 1114 static int 1115 vm_pageout_active_target(struct vm_domain *vmd) 1116 { 1117 int shortage; 1118 1119 shortage = vmd->vmd_inactive_target + vm_paging_target(vmd) - 1120 (vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt + 1121 vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt / act_scan_laundry_weight); 1122 shortage *= act_scan_laundry_weight; 1123 return (shortage); 1124 } 1125 1126 /* 1127 * Scan the active queue. If there is no shortage of inactive pages, scan a 1128 * small portion of the queue in order to maintain quasi-LRU. 1129 */ 1130 static void 1131 vm_pageout_scan_active(struct vm_domain *vmd, int page_shortage) 1132 { 1133 struct scan_state ss; 1134 struct mtx *mtx; 1135 vm_page_t m, marker; 1136 struct vm_pagequeue *pq; 1137 long min_scan; 1138 int act_delta, max_scan, scan_tick; 1139 1140 marker = &vmd->vmd_markers[PQ_ACTIVE]; 1141 pq = &vmd->vmd_pagequeues[PQ_ACTIVE]; 1142 vm_pagequeue_lock(pq); 1143 1144 /* 1145 * If we're just idle polling attempt to visit every 1146 * active page within 'update_period' seconds. 1147 */ 1148 scan_tick = ticks; 1149 if (vm_pageout_update_period != 0) { 1150 min_scan = pq->pq_cnt; 1151 min_scan *= scan_tick - vmd->vmd_last_active_scan; 1152 min_scan /= hz * vm_pageout_update_period; 1153 } else 1154 min_scan = 0; 1155 if (min_scan > 0 || (page_shortage > 0 && pq->pq_cnt > 0)) 1156 vmd->vmd_last_active_scan = scan_tick; 1157 1158 /* 1159 * Scan the active queue for pages that can be deactivated. Update 1160 * the per-page activity counter and use it to identify deactivation 1161 * candidates. Held pages may be deactivated. 1162 * 1163 * To avoid requeuing each page that remains in the active queue, we 1164 * implement the CLOCK algorithm. To keep the implementation of the 1165 * enqueue operation consistent for all page queues, we use two hands, 1166 * represented by marker pages. Scans begin at the first hand, which 1167 * precedes the second hand in the queue. When the two hands meet, 1168 * they are moved back to the head and tail of the queue, respectively, 1169 * and scanning resumes. 1170 */ 1171 max_scan = page_shortage > 0 ? pq->pq_cnt : min_scan; 1172 mtx = NULL; 1173 act_scan: 1174 vm_pageout_init_scan(&ss, pq, marker, &vmd->vmd_clock[0], max_scan); 1175 while ((m = vm_pageout_next(&ss, false)) != NULL) { 1176 if (__predict_false(m == &vmd->vmd_clock[1])) { 1177 vm_pagequeue_lock(pq); 1178 TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q); 1179 TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[1], plinks.q); 1180 TAILQ_INSERT_HEAD(&pq->pq_pl, &vmd->vmd_clock[0], 1181 plinks.q); 1182 TAILQ_INSERT_TAIL(&pq->pq_pl, &vmd->vmd_clock[1], 1183 plinks.q); 1184 max_scan -= ss.scanned; 1185 vm_pageout_end_scan(&ss); 1186 goto act_scan; 1187 } 1188 if (__predict_false((m->flags & PG_MARKER) != 0)) 1189 continue; 1190 1191 vm_page_change_lock(m, &mtx); 1192 1193 /* 1194 * The page may have been disassociated from the queue 1195 * while locks were dropped. 1196 */ 1197 if (vm_page_queue(m) != PQ_ACTIVE) 1198 continue; 1199 1200 /* 1201 * Wired pages are dequeued lazily. 1202 */ 1203 if (vm_page_wired(m)) { 1204 vm_page_dequeue_deferred(m); 1205 continue; 1206 } 1207 1208 /* 1209 * Check to see "how much" the page has been used. 1210 * 1211 * Test PGA_REFERENCED after calling pmap_ts_referenced() so 1212 * that a reference from a concurrently destroyed mapping is 1213 * observed here and now. 1214 * 1215 * Perform an unsynchronized object ref count check. While 1216 * the page lock ensures that the page is not reallocated to 1217 * another object, in particular, one with unmanaged mappings 1218 * that cannot support pmap_ts_referenced(), two races are, 1219 * nonetheless, possible: 1220 * 1) The count was transitioning to zero, but we saw a non- 1221 * zero value. pmap_ts_referenced() will return zero 1222 * because the page is not mapped. 1223 * 2) The count was transitioning to one, but we saw zero. 1224 * This race delays the detection of a new reference. At 1225 * worst, we will deactivate and reactivate the page. 1226 */ 1227 if (m->object->ref_count != 0) 1228 act_delta = pmap_ts_referenced(m); 1229 else 1230 act_delta = 0; 1231 if ((m->aflags & PGA_REFERENCED) != 0) { 1232 vm_page_aflag_clear(m, PGA_REFERENCED); 1233 act_delta++; 1234 } 1235 1236 /* 1237 * Advance or decay the act_count based on recent usage. 1238 */ 1239 if (act_delta != 0) { 1240 m->act_count += ACT_ADVANCE + act_delta; 1241 if (m->act_count > ACT_MAX) 1242 m->act_count = ACT_MAX; 1243 } else 1244 m->act_count -= min(m->act_count, ACT_DECLINE); 1245 1246 if (m->act_count == 0) { 1247 /* 1248 * When not short for inactive pages, let dirty pages go 1249 * through the inactive queue before moving to the 1250 * laundry queues. This gives them some extra time to 1251 * be reactivated, potentially avoiding an expensive 1252 * pageout. However, during a page shortage, the 1253 * inactive queue is necessarily small, and so dirty 1254 * pages would only spend a trivial amount of time in 1255 * the inactive queue. Therefore, we might as well 1256 * place them directly in the laundry queue to reduce 1257 * queuing overhead. 1258 */ 1259 if (page_shortage <= 0) { 1260 vm_page_swapqueue(m, PQ_ACTIVE, PQ_INACTIVE); 1261 } else { 1262 /* 1263 * Calling vm_page_test_dirty() here would 1264 * require acquisition of the object's write 1265 * lock. However, during a page shortage, 1266 * directing dirty pages into the laundry 1267 * queue is only an optimization and not a 1268 * requirement. Therefore, we simply rely on 1269 * the opportunistic updates to the page's 1270 * dirty field by the pmap. 1271 */ 1272 if (m->dirty == 0) { 1273 vm_page_swapqueue(m, PQ_ACTIVE, 1274 PQ_INACTIVE); 1275 page_shortage -= 1276 act_scan_laundry_weight; 1277 } else { 1278 vm_page_swapqueue(m, PQ_ACTIVE, 1279 PQ_LAUNDRY); 1280 page_shortage--; 1281 } 1282 } 1283 } 1284 } 1285 if (mtx != NULL) { 1286 mtx_unlock(mtx); 1287 mtx = NULL; 1288 } 1289 vm_pagequeue_lock(pq); 1290 TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q); 1291 TAILQ_INSERT_AFTER(&pq->pq_pl, marker, &vmd->vmd_clock[0], plinks.q); 1292 vm_pageout_end_scan(&ss); 1293 vm_pagequeue_unlock(pq); 1294 } 1295 1296 static int 1297 vm_pageout_reinsert_inactive_page(struct scan_state *ss, vm_page_t m) 1298 { 1299 struct vm_domain *vmd; 1300 1301 if (m->queue != PQ_INACTIVE || (m->aflags & PGA_ENQUEUED) != 0) 1302 return (0); 1303 vm_page_aflag_set(m, PGA_ENQUEUED); 1304 if ((m->aflags & PGA_REQUEUE_HEAD) != 0) { 1305 vmd = vm_pagequeue_domain(m); 1306 TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q); 1307 vm_page_aflag_clear(m, PGA_REQUEUE | PGA_REQUEUE_HEAD); 1308 } else if ((m->aflags & PGA_REQUEUE) != 0) { 1309 TAILQ_INSERT_TAIL(&ss->pq->pq_pl, m, plinks.q); 1310 vm_page_aflag_clear(m, PGA_REQUEUE | PGA_REQUEUE_HEAD); 1311 } else 1312 TAILQ_INSERT_BEFORE(ss->marker, m, plinks.q); 1313 return (1); 1314 } 1315 1316 /* 1317 * Re-add stuck pages to the inactive queue. We will examine them again 1318 * during the next scan. If the queue state of a page has changed since 1319 * it was physically removed from the page queue in 1320 * vm_pageout_collect_batch(), don't do anything with that page. 1321 */ 1322 static void 1323 vm_pageout_reinsert_inactive(struct scan_state *ss, struct vm_batchqueue *bq, 1324 vm_page_t m) 1325 { 1326 struct vm_pagequeue *pq; 1327 int delta; 1328 1329 delta = 0; 1330 pq = ss->pq; 1331 1332 if (m != NULL) { 1333 if (vm_batchqueue_insert(bq, m)) 1334 return; 1335 vm_pagequeue_lock(pq); 1336 delta += vm_pageout_reinsert_inactive_page(ss, m); 1337 } else 1338 vm_pagequeue_lock(pq); 1339 while ((m = vm_batchqueue_pop(bq)) != NULL) 1340 delta += vm_pageout_reinsert_inactive_page(ss, m); 1341 vm_pagequeue_cnt_add(pq, delta); 1342 vm_pagequeue_unlock(pq); 1343 vm_batchqueue_init(bq); 1344 } 1345 1346 /* 1347 * Attempt to reclaim the requested number of pages from the inactive queue. 1348 * Returns true if the shortage was addressed. 1349 */ 1350 static int 1351 vm_pageout_scan_inactive(struct vm_domain *vmd, int shortage, 1352 int *addl_shortage) 1353 { 1354 struct scan_state ss; 1355 struct vm_batchqueue rq; 1356 struct mtx *mtx; 1357 vm_page_t m, marker; 1358 struct vm_pagequeue *pq; 1359 vm_object_t object; 1360 int act_delta, addl_page_shortage, deficit, page_shortage; 1361 int starting_page_shortage; 1362 1363 /* 1364 * The addl_page_shortage is an estimate of the number of temporarily 1365 * stuck pages in the inactive queue. In other words, the 1366 * number of pages from the inactive count that should be 1367 * discounted in setting the target for the active queue scan. 1368 */ 1369 addl_page_shortage = 0; 1370 1371 /* 1372 * vmd_pageout_deficit counts the number of pages requested in 1373 * allocations that failed because of a free page shortage. We assume 1374 * that the allocations will be reattempted and thus include the deficit 1375 * in our scan target. 1376 */ 1377 deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit); 1378 starting_page_shortage = page_shortage = shortage + deficit; 1379 1380 mtx = NULL; 1381 object = NULL; 1382 vm_batchqueue_init(&rq); 1383 1384 /* 1385 * Start scanning the inactive queue for pages that we can free. The 1386 * scan will stop when we reach the target or we have scanned the 1387 * entire queue. (Note that m->act_count is not used to make 1388 * decisions for the inactive queue, only for the active queue.) 1389 */ 1390 marker = &vmd->vmd_markers[PQ_INACTIVE]; 1391 pq = &vmd->vmd_pagequeues[PQ_INACTIVE]; 1392 vm_pagequeue_lock(pq); 1393 vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt); 1394 while (page_shortage > 0 && (m = vm_pageout_next(&ss, true)) != NULL) { 1395 KASSERT((m->flags & PG_MARKER) == 0, 1396 ("marker page %p was dequeued", m)); 1397 1398 vm_page_change_lock(m, &mtx); 1399 1400 recheck: 1401 /* 1402 * The page may have been disassociated from the queue 1403 * while locks were dropped. 1404 */ 1405 if (vm_page_queue(m) != PQ_INACTIVE) { 1406 addl_page_shortage++; 1407 continue; 1408 } 1409 1410 /* 1411 * The page was re-enqueued after the page queue lock was 1412 * dropped, or a requeue was requested. This page gets a second 1413 * chance. 1414 */ 1415 if ((m->aflags & (PGA_ENQUEUED | PGA_REQUEUE | 1416 PGA_REQUEUE_HEAD)) != 0) 1417 goto reinsert; 1418 1419 /* 1420 * Wired pages may not be freed. Complete their removal 1421 * from the queue now to avoid needless revisits during 1422 * future scans. 1423 */ 1424 if (vm_page_wired(m)) { 1425 vm_page_dequeue_deferred(m); 1426 continue; 1427 } 1428 1429 if (object != m->object) { 1430 if (object != NULL) 1431 VM_OBJECT_WUNLOCK(object); 1432 object = m->object; 1433 if (!VM_OBJECT_TRYWLOCK(object)) { 1434 mtx_unlock(mtx); 1435 /* Depends on type-stability. */ 1436 VM_OBJECT_WLOCK(object); 1437 mtx_lock(mtx); 1438 goto recheck; 1439 } 1440 } 1441 1442 if (vm_page_busied(m)) { 1443 /* 1444 * Don't mess with busy pages. Leave them at 1445 * the front of the queue. Most likely, they 1446 * are being paged out and will leave the 1447 * queue shortly after the scan finishes. So, 1448 * they ought to be discounted from the 1449 * inactive count. 1450 */ 1451 addl_page_shortage++; 1452 goto reinsert; 1453 } 1454 1455 /* 1456 * Invalid pages can be easily freed. They cannot be 1457 * mapped, vm_page_free() asserts this. 1458 */ 1459 if (m->valid == 0) 1460 goto free_page; 1461 1462 /* 1463 * If the page has been referenced and the object is not dead, 1464 * reactivate or requeue the page depending on whether the 1465 * object is mapped. 1466 * 1467 * Test PGA_REFERENCED after calling pmap_ts_referenced() so 1468 * that a reference from a concurrently destroyed mapping is 1469 * observed here and now. 1470 */ 1471 if (object->ref_count != 0) 1472 act_delta = pmap_ts_referenced(m); 1473 else { 1474 KASSERT(!pmap_page_is_mapped(m), 1475 ("page %p is mapped", m)); 1476 act_delta = 0; 1477 } 1478 if ((m->aflags & PGA_REFERENCED) != 0) { 1479 vm_page_aflag_clear(m, PGA_REFERENCED); 1480 act_delta++; 1481 } 1482 if (act_delta != 0) { 1483 if (object->ref_count != 0) { 1484 VM_CNT_INC(v_reactivated); 1485 vm_page_activate(m); 1486 1487 /* 1488 * Increase the activation count if the page 1489 * was referenced while in the inactive queue. 1490 * This makes it less likely that the page will 1491 * be returned prematurely to the inactive 1492 * queue. 1493 */ 1494 m->act_count += act_delta + ACT_ADVANCE; 1495 continue; 1496 } else if ((object->flags & OBJ_DEAD) == 0) { 1497 vm_page_aflag_set(m, PGA_REQUEUE); 1498 goto reinsert; 1499 } 1500 } 1501 1502 /* 1503 * If the page appears to be clean at the machine-independent 1504 * layer, then remove all of its mappings from the pmap in 1505 * anticipation of freeing it. If, however, any of the page's 1506 * mappings allow write access, then the page may still be 1507 * modified until the last of those mappings are removed. 1508 */ 1509 if (object->ref_count != 0) { 1510 vm_page_test_dirty(m); 1511 if (m->dirty == 0) 1512 pmap_remove_all(m); 1513 } 1514 1515 /* 1516 * Clean pages can be freed, but dirty pages must be sent back 1517 * to the laundry, unless they belong to a dead object. 1518 * Requeueing dirty pages from dead objects is pointless, as 1519 * they are being paged out and freed by the thread that 1520 * destroyed the object. 1521 */ 1522 if (m->dirty == 0) { 1523 free_page: 1524 /* 1525 * Because we dequeued the page and have already 1526 * checked for concurrent dequeue and enqueue 1527 * requests, we can safely disassociate the page 1528 * from the inactive queue. 1529 */ 1530 KASSERT((m->aflags & PGA_QUEUE_STATE_MASK) == 0, 1531 ("page %p has queue state", m)); 1532 m->queue = PQ_NONE; 1533 vm_page_free(m); 1534 page_shortage--; 1535 } else if ((object->flags & OBJ_DEAD) == 0) 1536 vm_page_launder(m); 1537 continue; 1538 reinsert: 1539 vm_pageout_reinsert_inactive(&ss, &rq, m); 1540 } 1541 if (mtx != NULL) 1542 mtx_unlock(mtx); 1543 if (object != NULL) 1544 VM_OBJECT_WUNLOCK(object); 1545 vm_pageout_reinsert_inactive(&ss, &rq, NULL); 1546 vm_pageout_reinsert_inactive(&ss, &ss.bq, NULL); 1547 vm_pagequeue_lock(pq); 1548 vm_pageout_end_scan(&ss); 1549 vm_pagequeue_unlock(pq); 1550 1551 VM_CNT_ADD(v_dfree, starting_page_shortage - page_shortage); 1552 1553 /* 1554 * Wake up the laundry thread so that it can perform any needed 1555 * laundering. If we didn't meet our target, we're in shortfall and 1556 * need to launder more aggressively. If PQ_LAUNDRY is empty and no 1557 * swap devices are configured, the laundry thread has no work to do, so 1558 * don't bother waking it up. 1559 * 1560 * The laundry thread uses the number of inactive queue scans elapsed 1561 * since the last laundering to determine whether to launder again, so 1562 * keep count. 1563 */ 1564 if (starting_page_shortage > 0) { 1565 pq = &vmd->vmd_pagequeues[PQ_LAUNDRY]; 1566 vm_pagequeue_lock(pq); 1567 if (vmd->vmd_laundry_request == VM_LAUNDRY_IDLE && 1568 (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled))) { 1569 if (page_shortage > 0) { 1570 vmd->vmd_laundry_request = VM_LAUNDRY_SHORTFALL; 1571 VM_CNT_INC(v_pdshortfalls); 1572 } else if (vmd->vmd_laundry_request != 1573 VM_LAUNDRY_SHORTFALL) 1574 vmd->vmd_laundry_request = 1575 VM_LAUNDRY_BACKGROUND; 1576 wakeup(&vmd->vmd_laundry_request); 1577 } 1578 vmd->vmd_clean_pages_freed += 1579 starting_page_shortage - page_shortage; 1580 vm_pagequeue_unlock(pq); 1581 } 1582 1583 /* 1584 * Wakeup the swapout daemon if we didn't free the targeted number of 1585 * pages. 1586 */ 1587 if (page_shortage > 0) 1588 vm_swapout_run(); 1589 1590 /* 1591 * If the inactive queue scan fails repeatedly to meet its 1592 * target, kill the largest process. 1593 */ 1594 vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage); 1595 1596 /* 1597 * Reclaim pages by swapping out idle processes, if configured to do so. 1598 */ 1599 vm_swapout_run_idle(); 1600 1601 /* 1602 * See the description of addl_page_shortage above. 1603 */ 1604 *addl_shortage = addl_page_shortage + deficit; 1605 1606 return (page_shortage <= 0); 1607 } 1608 1609 static int vm_pageout_oom_vote; 1610 1611 /* 1612 * The pagedaemon threads randlomly select one to perform the 1613 * OOM. Trying to kill processes before all pagedaemons 1614 * failed to reach free target is premature. 1615 */ 1616 static void 1617 vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage, 1618 int starting_page_shortage) 1619 { 1620 int old_vote; 1621 1622 if (starting_page_shortage <= 0 || starting_page_shortage != 1623 page_shortage) 1624 vmd->vmd_oom_seq = 0; 1625 else 1626 vmd->vmd_oom_seq++; 1627 if (vmd->vmd_oom_seq < vm_pageout_oom_seq) { 1628 if (vmd->vmd_oom) { 1629 vmd->vmd_oom = FALSE; 1630 atomic_subtract_int(&vm_pageout_oom_vote, 1); 1631 } 1632 return; 1633 } 1634 1635 /* 1636 * Do not follow the call sequence until OOM condition is 1637 * cleared. 1638 */ 1639 vmd->vmd_oom_seq = 0; 1640 1641 if (vmd->vmd_oom) 1642 return; 1643 1644 vmd->vmd_oom = TRUE; 1645 old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1); 1646 if (old_vote != vm_ndomains - 1) 1647 return; 1648 1649 /* 1650 * The current pagedaemon thread is the last in the quorum to 1651 * start OOM. Initiate the selection and signaling of the 1652 * victim. 1653 */ 1654 vm_pageout_oom(VM_OOM_MEM); 1655 1656 /* 1657 * After one round of OOM terror, recall our vote. On the 1658 * next pass, current pagedaemon would vote again if the low 1659 * memory condition is still there, due to vmd_oom being 1660 * false. 1661 */ 1662 vmd->vmd_oom = FALSE; 1663 atomic_subtract_int(&vm_pageout_oom_vote, 1); 1664 } 1665 1666 /* 1667 * The OOM killer is the page daemon's action of last resort when 1668 * memory allocation requests have been stalled for a prolonged period 1669 * of time because it cannot reclaim memory. This function computes 1670 * the approximate number of physical pages that could be reclaimed if 1671 * the specified address space is destroyed. 1672 * 1673 * Private, anonymous memory owned by the address space is the 1674 * principal resource that we expect to recover after an OOM kill. 1675 * Since the physical pages mapped by the address space's COW entries 1676 * are typically shared pages, they are unlikely to be released and so 1677 * they are not counted. 1678 * 1679 * To get to the point where the page daemon runs the OOM killer, its 1680 * efforts to write-back vnode-backed pages may have stalled. This 1681 * could be caused by a memory allocation deadlock in the write path 1682 * that might be resolved by an OOM kill. Therefore, physical pages 1683 * belonging to vnode-backed objects are counted, because they might 1684 * be freed without being written out first if the address space holds 1685 * the last reference to an unlinked vnode. 1686 * 1687 * Similarly, physical pages belonging to OBJT_PHYS objects are 1688 * counted because the address space might hold the last reference to 1689 * the object. 1690 */ 1691 static long 1692 vm_pageout_oom_pagecount(struct vmspace *vmspace) 1693 { 1694 vm_map_t map; 1695 vm_map_entry_t entry; 1696 vm_object_t obj; 1697 long res; 1698 1699 map = &vmspace->vm_map; 1700 KASSERT(!map->system_map, ("system map")); 1701 sx_assert(&map->lock, SA_LOCKED); 1702 res = 0; 1703 for (entry = map->header.next; entry != &map->header; 1704 entry = entry->next) { 1705 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) 1706 continue; 1707 obj = entry->object.vm_object; 1708 if (obj == NULL) 1709 continue; 1710 if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 && 1711 obj->ref_count != 1) 1712 continue; 1713 switch (obj->type) { 1714 case OBJT_DEFAULT: 1715 case OBJT_SWAP: 1716 case OBJT_PHYS: 1717 case OBJT_VNODE: 1718 res += obj->resident_page_count; 1719 break; 1720 } 1721 } 1722 return (res); 1723 } 1724 1725 static int vm_oom_ratelim_last; 1726 static int vm_oom_pf_secs = 10; 1727 SYSCTL_INT(_vm, OID_AUTO, oom_pf_secs, CTLFLAG_RWTUN, &vm_oom_pf_secs, 0, 1728 ""); 1729 static struct mtx vm_oom_ratelim_mtx; 1730 1731 void 1732 vm_pageout_oom(int shortage) 1733 { 1734 struct proc *p, *bigproc; 1735 vm_offset_t size, bigsize; 1736 struct thread *td; 1737 struct vmspace *vm; 1738 int now; 1739 bool breakout; 1740 1741 /* 1742 * For OOM requests originating from vm_fault(), there is a high 1743 * chance that a single large process faults simultaneously in 1744 * several threads. Also, on an active system running many 1745 * processes of middle-size, like buildworld, all of them 1746 * could fault almost simultaneously as well. 1747 * 1748 * To avoid killing too many processes, rate-limit OOMs 1749 * initiated by vm_fault() time-outs on the waits for free 1750 * pages. 1751 */ 1752 mtx_lock(&vm_oom_ratelim_mtx); 1753 now = ticks; 1754 if (shortage == VM_OOM_MEM_PF && 1755 (u_int)(now - vm_oom_ratelim_last) < hz * vm_oom_pf_secs) { 1756 mtx_unlock(&vm_oom_ratelim_mtx); 1757 return; 1758 } 1759 vm_oom_ratelim_last = now; 1760 mtx_unlock(&vm_oom_ratelim_mtx); 1761 1762 /* 1763 * We keep the process bigproc locked once we find it to keep anyone 1764 * from messing with it; however, there is a possibility of 1765 * deadlock if process B is bigproc and one of its child processes 1766 * attempts to propagate a signal to B while we are waiting for A's 1767 * lock while walking this list. To avoid this, we don't block on 1768 * the process lock but just skip a process if it is already locked. 1769 */ 1770 bigproc = NULL; 1771 bigsize = 0; 1772 sx_slock(&allproc_lock); 1773 FOREACH_PROC_IN_SYSTEM(p) { 1774 PROC_LOCK(p); 1775 1776 /* 1777 * If this is a system, protected or killed process, skip it. 1778 */ 1779 if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC | 1780 P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 || 1781 p->p_pid == 1 || P_KILLED(p) || 1782 (p->p_pid < 48 && swap_pager_avail != 0)) { 1783 PROC_UNLOCK(p); 1784 continue; 1785 } 1786 /* 1787 * If the process is in a non-running type state, 1788 * don't touch it. Check all the threads individually. 1789 */ 1790 breakout = false; 1791 FOREACH_THREAD_IN_PROC(p, td) { 1792 thread_lock(td); 1793 if (!TD_ON_RUNQ(td) && 1794 !TD_IS_RUNNING(td) && 1795 !TD_IS_SLEEPING(td) && 1796 !TD_IS_SUSPENDED(td) && 1797 !TD_IS_SWAPPED(td)) { 1798 thread_unlock(td); 1799 breakout = true; 1800 break; 1801 } 1802 thread_unlock(td); 1803 } 1804 if (breakout) { 1805 PROC_UNLOCK(p); 1806 continue; 1807 } 1808 /* 1809 * get the process size 1810 */ 1811 vm = vmspace_acquire_ref(p); 1812 if (vm == NULL) { 1813 PROC_UNLOCK(p); 1814 continue; 1815 } 1816 _PHOLD_LITE(p); 1817 PROC_UNLOCK(p); 1818 sx_sunlock(&allproc_lock); 1819 if (!vm_map_trylock_read(&vm->vm_map)) { 1820 vmspace_free(vm); 1821 sx_slock(&allproc_lock); 1822 PRELE(p); 1823 continue; 1824 } 1825 size = vmspace_swap_count(vm); 1826 if (shortage == VM_OOM_MEM || shortage == VM_OOM_MEM_PF) 1827 size += vm_pageout_oom_pagecount(vm); 1828 vm_map_unlock_read(&vm->vm_map); 1829 vmspace_free(vm); 1830 sx_slock(&allproc_lock); 1831 1832 /* 1833 * If this process is bigger than the biggest one, 1834 * remember it. 1835 */ 1836 if (size > bigsize) { 1837 if (bigproc != NULL) 1838 PRELE(bigproc); 1839 bigproc = p; 1840 bigsize = size; 1841 } else { 1842 PRELE(p); 1843 } 1844 } 1845 sx_sunlock(&allproc_lock); 1846 if (bigproc != NULL) { 1847 if (vm_panic_on_oom != 0) 1848 panic("out of swap space"); 1849 PROC_LOCK(bigproc); 1850 killproc(bigproc, "out of swap space"); 1851 sched_nice(bigproc, PRIO_MIN); 1852 _PRELE(bigproc); 1853 PROC_UNLOCK(bigproc); 1854 } 1855 } 1856 1857 static bool 1858 vm_pageout_lowmem(void) 1859 { 1860 static int lowmem_ticks = 0; 1861 int last; 1862 1863 last = atomic_load_int(&lowmem_ticks); 1864 while ((u_int)(ticks - last) / hz >= lowmem_period) { 1865 if (atomic_fcmpset_int(&lowmem_ticks, &last, ticks) == 0) 1866 continue; 1867 1868 /* 1869 * Decrease registered cache sizes. 1870 */ 1871 SDT_PROBE0(vm, , , vm__lowmem_scan); 1872 EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES); 1873 1874 /* 1875 * We do this explicitly after the caches have been 1876 * drained above. If we have a severe page shortage on 1877 * our hands, completely drain all UMA zones. Otherwise, 1878 * just prune the caches. 1879 */ 1880 uma_reclaim(vm_page_count_min() ? UMA_RECLAIM_DRAIN_CPU : 1881 UMA_RECLAIM_TRIM); 1882 return (true); 1883 } 1884 return (false); 1885 } 1886 1887 static void 1888 vm_pageout_worker(void *arg) 1889 { 1890 struct vm_domain *vmd; 1891 u_int ofree; 1892 int addl_shortage, domain, shortage; 1893 bool target_met; 1894 1895 domain = (uintptr_t)arg; 1896 vmd = VM_DOMAIN(domain); 1897 shortage = 0; 1898 target_met = true; 1899 1900 /* 1901 * XXXKIB It could be useful to bind pageout daemon threads to 1902 * the cores belonging to the domain, from which vm_page_array 1903 * is allocated. 1904 */ 1905 1906 KASSERT(vmd->vmd_segs != 0, ("domain without segments")); 1907 vmd->vmd_last_active_scan = ticks; 1908 1909 /* 1910 * The pageout daemon worker is never done, so loop forever. 1911 */ 1912 while (TRUE) { 1913 vm_domain_pageout_lock(vmd); 1914 1915 /* 1916 * We need to clear wanted before we check the limits. This 1917 * prevents races with wakers who will check wanted after they 1918 * reach the limit. 1919 */ 1920 atomic_store_int(&vmd->vmd_pageout_wanted, 0); 1921 1922 /* 1923 * Might the page daemon need to run again? 1924 */ 1925 if (vm_paging_needed(vmd, vmd->vmd_free_count)) { 1926 /* 1927 * Yes. If the scan failed to produce enough free 1928 * pages, sleep uninterruptibly for some time in the 1929 * hope that the laundry thread will clean some pages. 1930 */ 1931 vm_domain_pageout_unlock(vmd); 1932 if (!target_met) 1933 pause("pwait", hz / VM_INACT_SCAN_RATE); 1934 } else { 1935 /* 1936 * No, sleep until the next wakeup or until pages 1937 * need to have their reference stats updated. 1938 */ 1939 if (mtx_sleep(&vmd->vmd_pageout_wanted, 1940 vm_domain_pageout_lockptr(vmd), PDROP | PVM, 1941 "psleep", hz / VM_INACT_SCAN_RATE) == 0) 1942 VM_CNT_INC(v_pdwakeups); 1943 } 1944 1945 /* Prevent spurious wakeups by ensuring that wanted is set. */ 1946 atomic_store_int(&vmd->vmd_pageout_wanted, 1); 1947 1948 /* 1949 * Use the controller to calculate how many pages to free in 1950 * this interval, and scan the inactive queue. If the lowmem 1951 * handlers appear to have freed up some pages, subtract the 1952 * difference from the inactive queue scan target. 1953 */ 1954 shortage = pidctrl_daemon(&vmd->vmd_pid, vmd->vmd_free_count); 1955 if (shortage > 0) { 1956 ofree = vmd->vmd_free_count; 1957 if (vm_pageout_lowmem() && vmd->vmd_free_count > ofree) 1958 shortage -= min(vmd->vmd_free_count - ofree, 1959 (u_int)shortage); 1960 target_met = vm_pageout_scan_inactive(vmd, shortage, 1961 &addl_shortage); 1962 } else 1963 addl_shortage = 0; 1964 1965 /* 1966 * Scan the active queue. A positive value for shortage 1967 * indicates that we must aggressively deactivate pages to avoid 1968 * a shortfall. 1969 */ 1970 shortage = vm_pageout_active_target(vmd) + addl_shortage; 1971 vm_pageout_scan_active(vmd, shortage); 1972 } 1973 } 1974 1975 /* 1976 * vm_pageout_init initialises basic pageout daemon settings. 1977 */ 1978 static void 1979 vm_pageout_init_domain(int domain) 1980 { 1981 struct vm_domain *vmd; 1982 struct sysctl_oid *oid; 1983 1984 vmd = VM_DOMAIN(domain); 1985 vmd->vmd_interrupt_free_min = 2; 1986 1987 /* 1988 * v_free_reserved needs to include enough for the largest 1989 * swap pager structures plus enough for any pv_entry structs 1990 * when paging. 1991 */ 1992 if (vmd->vmd_page_count > 1024) 1993 vmd->vmd_free_min = 4 + (vmd->vmd_page_count - 1024) / 200; 1994 else 1995 vmd->vmd_free_min = 4; 1996 vmd->vmd_pageout_free_min = 2 * MAXBSIZE / PAGE_SIZE + 1997 vmd->vmd_interrupt_free_min; 1998 vmd->vmd_free_reserved = vm_pageout_page_count + 1999 vmd->vmd_pageout_free_min + (vmd->vmd_page_count / 768); 2000 vmd->vmd_free_severe = vmd->vmd_free_min / 2; 2001 vmd->vmd_free_target = 4 * vmd->vmd_free_min + vmd->vmd_free_reserved; 2002 vmd->vmd_free_min += vmd->vmd_free_reserved; 2003 vmd->vmd_free_severe += vmd->vmd_free_reserved; 2004 vmd->vmd_inactive_target = (3 * vmd->vmd_free_target) / 2; 2005 if (vmd->vmd_inactive_target > vmd->vmd_free_count / 3) 2006 vmd->vmd_inactive_target = vmd->vmd_free_count / 3; 2007 2008 /* 2009 * Set the default wakeup threshold to be 10% below the paging 2010 * target. This keeps the steady state out of shortfall. 2011 */ 2012 vmd->vmd_pageout_wakeup_thresh = (vmd->vmd_free_target / 10) * 9; 2013 2014 /* 2015 * Target amount of memory to move out of the laundry queue during a 2016 * background laundering. This is proportional to the amount of system 2017 * memory. 2018 */ 2019 vmd->vmd_background_launder_target = (vmd->vmd_free_target - 2020 vmd->vmd_free_min) / 10; 2021 2022 /* Initialize the pageout daemon pid controller. */ 2023 pidctrl_init(&vmd->vmd_pid, hz / VM_INACT_SCAN_RATE, 2024 vmd->vmd_free_target, PIDCTRL_BOUND, 2025 PIDCTRL_KPD, PIDCTRL_KID, PIDCTRL_KDD); 2026 oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(vmd->vmd_oid), OID_AUTO, 2027 "pidctrl", CTLFLAG_RD, NULL, ""); 2028 pidctrl_init_sysctl(&vmd->vmd_pid, SYSCTL_CHILDREN(oid)); 2029 } 2030 2031 static void 2032 vm_pageout_init(void) 2033 { 2034 u_int freecount; 2035 int i; 2036 2037 /* 2038 * Initialize some paging parameters. 2039 */ 2040 if (vm_cnt.v_page_count < 2000) 2041 vm_pageout_page_count = 8; 2042 2043 freecount = 0; 2044 for (i = 0; i < vm_ndomains; i++) { 2045 struct vm_domain *vmd; 2046 2047 vm_pageout_init_domain(i); 2048 vmd = VM_DOMAIN(i); 2049 vm_cnt.v_free_reserved += vmd->vmd_free_reserved; 2050 vm_cnt.v_free_target += vmd->vmd_free_target; 2051 vm_cnt.v_free_min += vmd->vmd_free_min; 2052 vm_cnt.v_inactive_target += vmd->vmd_inactive_target; 2053 vm_cnt.v_pageout_free_min += vmd->vmd_pageout_free_min; 2054 vm_cnt.v_interrupt_free_min += vmd->vmd_interrupt_free_min; 2055 vm_cnt.v_free_severe += vmd->vmd_free_severe; 2056 freecount += vmd->vmd_free_count; 2057 } 2058 2059 /* 2060 * Set interval in seconds for active scan. We want to visit each 2061 * page at least once every ten minutes. This is to prevent worst 2062 * case paging behaviors with stale active LRU. 2063 */ 2064 if (vm_pageout_update_period == 0) 2065 vm_pageout_update_period = 600; 2066 2067 if (vm_page_max_user_wired == 0) 2068 vm_page_max_user_wired = freecount / 3; 2069 } 2070 2071 /* 2072 * vm_pageout is the high level pageout daemon. 2073 */ 2074 static void 2075 vm_pageout(void) 2076 { 2077 struct proc *p; 2078 struct thread *td; 2079 int error, first, i; 2080 2081 p = curproc; 2082 td = curthread; 2083 2084 mtx_init(&vm_oom_ratelim_mtx, "vmoomr", NULL, MTX_DEF); 2085 swap_pager_swap_init(); 2086 for (first = -1, i = 0; i < vm_ndomains; i++) { 2087 if (VM_DOMAIN_EMPTY(i)) { 2088 if (bootverbose) 2089 printf("domain %d empty; skipping pageout\n", 2090 i); 2091 continue; 2092 } 2093 if (first == -1) 2094 first = i; 2095 else { 2096 error = kthread_add(vm_pageout_worker, 2097 (void *)(uintptr_t)i, p, NULL, 0, 0, "dom%d", i); 2098 if (error != 0) 2099 panic("starting pageout for domain %d: %d\n", 2100 i, error); 2101 } 2102 error = kthread_add(vm_pageout_laundry_worker, 2103 (void *)(uintptr_t)i, p, NULL, 0, 0, "laundry: dom%d", i); 2104 if (error != 0) 2105 panic("starting laundry for domain %d: %d", i, error); 2106 } 2107 error = kthread_add(uma_reclaim_worker, NULL, p, NULL, 0, 0, "uma"); 2108 if (error != 0) 2109 panic("starting uma_reclaim helper, error %d\n", error); 2110 2111 snprintf(td->td_name, sizeof(td->td_name), "dom%d", first); 2112 vm_pageout_worker((void *)(uintptr_t)first); 2113 } 2114 2115 /* 2116 * Perform an advisory wakeup of the page daemon. 2117 */ 2118 void 2119 pagedaemon_wakeup(int domain) 2120 { 2121 struct vm_domain *vmd; 2122 2123 vmd = VM_DOMAIN(domain); 2124 vm_domain_pageout_assert_unlocked(vmd); 2125 if (curproc == pageproc) 2126 return; 2127 2128 if (atomic_fetchadd_int(&vmd->vmd_pageout_wanted, 1) == 0) { 2129 vm_domain_pageout_lock(vmd); 2130 atomic_store_int(&vmd->vmd_pageout_wanted, 1); 2131 wakeup(&vmd->vmd_pageout_wanted); 2132 vm_domain_pageout_unlock(vmd); 2133 } 2134 } 2135