1 /*- 2 * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991 Regents of the University of California. 5 * All rights reserved. 6 * Copyright (c) 1994 John S. Dyson 7 * All rights reserved. 8 * Copyright (c) 1994 David Greenman 9 * All rights reserved. 10 * Copyright (c) 2005 Yahoo! Technologies Norway AS 11 * All rights reserved. 12 * 13 * This code is derived from software contributed to Berkeley by 14 * The Mach Operating System project at Carnegie-Mellon University. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. All advertising materials mentioning features or use of this software 25 * must display the following acknowledgement: 26 * This product includes software developed by the University of 27 * California, Berkeley and its contributors. 28 * 4. Neither the name of the University nor the names of its contributors 29 * may be used to endorse or promote products derived from this software 30 * without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 42 * SUCH DAMAGE. 43 * 44 * 45 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46 * All rights reserved. 47 * 48 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 49 * 50 * Permission to use, copy, modify and distribute this software and 51 * its documentation is hereby granted, provided that both the copyright 52 * notice and this permission notice appear in all copies of the 53 * software, derivative works or modified versions, and any portions 54 * thereof, and that both notices appear in supporting documentation. 55 * 56 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 57 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 58 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 59 * 60 * Carnegie Mellon requests users of this software to return to 61 * 62 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 63 * School of Computer Science 64 * Carnegie Mellon University 65 * Pittsburgh PA 15213-3890 66 * 67 * any improvements or extensions that they make and grant Carnegie the 68 * rights to redistribute these changes. 69 */ 70 71 /* 72 * The proverbial page-out daemon. 73 */ 74 75 #include <sys/cdefs.h> 76 #include "opt_vm.h" 77 78 #include <sys/param.h> 79 #include <sys/systm.h> 80 #include <sys/kernel.h> 81 #include <sys/blockcount.h> 82 #include <sys/eventhandler.h> 83 #include <sys/lock.h> 84 #include <sys/mutex.h> 85 #include <sys/proc.h> 86 #include <sys/kthread.h> 87 #include <sys/ktr.h> 88 #include <sys/mount.h> 89 #include <sys/racct.h> 90 #include <sys/resourcevar.h> 91 #include <sys/sched.h> 92 #include <sys/sdt.h> 93 #include <sys/signalvar.h> 94 #include <sys/smp.h> 95 #include <sys/time.h> 96 #include <sys/vnode.h> 97 #include <sys/vmmeter.h> 98 #include <sys/rwlock.h> 99 #include <sys/sx.h> 100 #include <sys/sysctl.h> 101 102 #include <vm/vm.h> 103 #include <vm/vm_param.h> 104 #include <vm/vm_object.h> 105 #include <vm/vm_page.h> 106 #include <vm/vm_map.h> 107 #include <vm/vm_pageout.h> 108 #include <vm/vm_pager.h> 109 #include <vm/vm_phys.h> 110 #include <vm/vm_pagequeue.h> 111 #include <vm/swap_pager.h> 112 #include <vm/vm_extern.h> 113 #include <vm/uma.h> 114 115 /* 116 * System initialization 117 */ 118 119 /* the kernel process "vm_pageout"*/ 120 static void vm_pageout(void); 121 static void vm_pageout_init(void); 122 static int vm_pageout_clean(vm_page_t m, int *numpagedout); 123 static int vm_pageout_cluster(vm_page_t m); 124 static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage, 125 int starting_page_shortage); 126 127 SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init, 128 NULL); 129 130 struct proc *pageproc; 131 132 static struct kproc_desc page_kp = { 133 "pagedaemon", 134 vm_pageout, 135 &pageproc 136 }; 137 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start, 138 &page_kp); 139 140 SDT_PROVIDER_DEFINE(vm); 141 SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan); 142 143 /* Pagedaemon activity rates, in subdivisions of one second. */ 144 #define VM_LAUNDER_RATE 10 145 #define VM_INACT_SCAN_RATE 10 146 147 static int swapdev_enabled; 148 int vm_pageout_page_count = 32; 149 150 static int vm_panic_on_oom = 0; 151 SYSCTL_INT(_vm, OID_AUTO, panic_on_oom, 152 CTLFLAG_RWTUN, &vm_panic_on_oom, 0, 153 "Panic on the given number of out-of-memory errors instead of " 154 "killing the largest process"); 155 156 static int vm_pageout_update_period; 157 SYSCTL_INT(_vm, OID_AUTO, pageout_update_period, 158 CTLFLAG_RWTUN, &vm_pageout_update_period, 0, 159 "Maximum active LRU update period"); 160 161 static int pageout_cpus_per_thread = 16; 162 SYSCTL_INT(_vm, OID_AUTO, pageout_cpus_per_thread, CTLFLAG_RDTUN, 163 &pageout_cpus_per_thread, 0, 164 "Number of CPUs per pagedaemon worker thread"); 165 166 static int lowmem_period = 10; 167 SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RWTUN, &lowmem_period, 0, 168 "Low memory callback period"); 169 170 static int disable_swap_pageouts; 171 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 172 CTLFLAG_RWTUN, &disable_swap_pageouts, 0, 173 "Disallow swapout of dirty pages"); 174 175 static int pageout_lock_miss; 176 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 177 CTLFLAG_RD, &pageout_lock_miss, 0, 178 "vget() lock misses during pageout"); 179 180 static int vm_pageout_oom_seq = 12; 181 SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq, 182 CTLFLAG_RWTUN, &vm_pageout_oom_seq, 0, 183 "back-to-back calls to oom detector to start OOM"); 184 185 static int act_scan_laundry_weight = 3; 186 SYSCTL_INT(_vm, OID_AUTO, act_scan_laundry_weight, CTLFLAG_RWTUN, 187 &act_scan_laundry_weight, 0, 188 "weight given to clean vs. dirty pages in active queue scans"); 189 190 static u_int vm_background_launder_rate = 4096; 191 SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RWTUN, 192 &vm_background_launder_rate, 0, 193 "background laundering rate, in kilobytes per second"); 194 195 static u_int vm_background_launder_max = 20 * 1024; 196 SYSCTL_UINT(_vm, OID_AUTO, background_launder_max, CTLFLAG_RWTUN, 197 &vm_background_launder_max, 0, 198 "background laundering cap, in kilobytes"); 199 200 u_long vm_page_max_user_wired; 201 SYSCTL_ULONG(_vm, OID_AUTO, max_user_wired, CTLFLAG_RW, 202 &vm_page_max_user_wired, 0, 203 "system-wide limit to user-wired page count"); 204 205 static u_int isqrt(u_int num); 206 static int vm_pageout_launder(struct vm_domain *vmd, int launder, 207 bool in_shortfall); 208 static void vm_pageout_laundry_worker(void *arg); 209 210 struct scan_state { 211 struct vm_batchqueue bq; 212 struct vm_pagequeue *pq; 213 vm_page_t marker; 214 int maxscan; 215 int scanned; 216 }; 217 218 static void 219 vm_pageout_init_scan(struct scan_state *ss, struct vm_pagequeue *pq, 220 vm_page_t marker, vm_page_t after, int maxscan) 221 { 222 223 vm_pagequeue_assert_locked(pq); 224 KASSERT((marker->a.flags & PGA_ENQUEUED) == 0, 225 ("marker %p already enqueued", marker)); 226 227 if (after == NULL) 228 TAILQ_INSERT_HEAD(&pq->pq_pl, marker, plinks.q); 229 else 230 TAILQ_INSERT_AFTER(&pq->pq_pl, after, marker, plinks.q); 231 vm_page_aflag_set(marker, PGA_ENQUEUED); 232 233 vm_batchqueue_init(&ss->bq); 234 ss->pq = pq; 235 ss->marker = marker; 236 ss->maxscan = maxscan; 237 ss->scanned = 0; 238 vm_pagequeue_unlock(pq); 239 } 240 241 static void 242 vm_pageout_end_scan(struct scan_state *ss) 243 { 244 struct vm_pagequeue *pq; 245 246 pq = ss->pq; 247 vm_pagequeue_assert_locked(pq); 248 KASSERT((ss->marker->a.flags & PGA_ENQUEUED) != 0, 249 ("marker %p not enqueued", ss->marker)); 250 251 TAILQ_REMOVE(&pq->pq_pl, ss->marker, plinks.q); 252 vm_page_aflag_clear(ss->marker, PGA_ENQUEUED); 253 pq->pq_pdpages += ss->scanned; 254 } 255 256 /* 257 * Add a small number of queued pages to a batch queue for later processing 258 * without the corresponding queue lock held. The caller must have enqueued a 259 * marker page at the desired start point for the scan. Pages will be 260 * physically dequeued if the caller so requests. Otherwise, the returned 261 * batch may contain marker pages, and it is up to the caller to handle them. 262 * 263 * When processing the batch queue, vm_pageout_defer() must be used to 264 * determine whether the page has been logically dequeued since the batch was 265 * collected. 266 */ 267 static __always_inline void 268 vm_pageout_collect_batch(struct scan_state *ss, const bool dequeue) 269 { 270 struct vm_pagequeue *pq; 271 vm_page_t m, marker, n; 272 273 marker = ss->marker; 274 pq = ss->pq; 275 276 KASSERT((marker->a.flags & PGA_ENQUEUED) != 0, 277 ("marker %p not enqueued", ss->marker)); 278 279 vm_pagequeue_lock(pq); 280 for (m = TAILQ_NEXT(marker, plinks.q); m != NULL && 281 ss->scanned < ss->maxscan && ss->bq.bq_cnt < VM_BATCHQUEUE_SIZE; 282 m = n, ss->scanned++) { 283 n = TAILQ_NEXT(m, plinks.q); 284 if ((m->flags & PG_MARKER) == 0) { 285 KASSERT((m->a.flags & PGA_ENQUEUED) != 0, 286 ("page %p not enqueued", m)); 287 KASSERT((m->flags & PG_FICTITIOUS) == 0, 288 ("Fictitious page %p cannot be in page queue", m)); 289 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 290 ("Unmanaged page %p cannot be in page queue", m)); 291 } else if (dequeue) 292 continue; 293 294 (void)vm_batchqueue_insert(&ss->bq, m); 295 if (dequeue) { 296 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 297 vm_page_aflag_clear(m, PGA_ENQUEUED); 298 } 299 } 300 TAILQ_REMOVE(&pq->pq_pl, marker, plinks.q); 301 if (__predict_true(m != NULL)) 302 TAILQ_INSERT_BEFORE(m, marker, plinks.q); 303 else 304 TAILQ_INSERT_TAIL(&pq->pq_pl, marker, plinks.q); 305 if (dequeue) 306 vm_pagequeue_cnt_add(pq, -ss->bq.bq_cnt); 307 vm_pagequeue_unlock(pq); 308 } 309 310 /* 311 * Return the next page to be scanned, or NULL if the scan is complete. 312 */ 313 static __always_inline vm_page_t 314 vm_pageout_next(struct scan_state *ss, const bool dequeue) 315 { 316 317 if (ss->bq.bq_cnt == 0) 318 vm_pageout_collect_batch(ss, dequeue); 319 return (vm_batchqueue_pop(&ss->bq)); 320 } 321 322 /* 323 * Determine whether processing of a page should be deferred and ensure that any 324 * outstanding queue operations are processed. 325 */ 326 static __always_inline bool 327 vm_pageout_defer(vm_page_t m, const uint8_t queue, const bool enqueued) 328 { 329 vm_page_astate_t as; 330 331 as = vm_page_astate_load(m); 332 if (__predict_false(as.queue != queue || 333 ((as.flags & PGA_ENQUEUED) != 0) != enqueued)) 334 return (true); 335 if ((as.flags & PGA_QUEUE_OP_MASK) != 0) { 336 vm_page_pqbatch_submit(m, queue); 337 return (true); 338 } 339 return (false); 340 } 341 342 /* 343 * We can cluster only if the page is not clean, busy, or held, and the page is 344 * in the laundry queue. 345 */ 346 static bool 347 vm_pageout_flushable(vm_page_t m) 348 { 349 if (vm_page_tryxbusy(m) == 0) 350 return (false); 351 if (!vm_page_wired(m)) { 352 vm_page_test_dirty(m); 353 if (m->dirty != 0 && vm_page_in_laundry(m) && 354 vm_page_try_remove_write(m)) 355 return (true); 356 } 357 vm_page_xunbusy(m); 358 return (false); 359 } 360 361 /* 362 * Scan for pages at adjacent offsets within the given page's object that are 363 * eligible for laundering, form a cluster of these pages and the given page, 364 * and launder that cluster. 365 */ 366 static int 367 vm_pageout_cluster(vm_page_t m) 368 { 369 vm_page_t mc[2 * vm_pageout_page_count - 1]; 370 int alignment, num_ends, page_base, pageout_count; 371 372 VM_OBJECT_ASSERT_WLOCKED(m->object); 373 374 vm_page_assert_xbusied(m); 375 376 alignment = m->pindex % vm_pageout_page_count; 377 num_ends = 0; 378 page_base = nitems(mc) / 2; 379 pageout_count = 1; 380 mc[page_base] = m; 381 382 /* 383 * During heavy mmap/modification loads the pageout 384 * daemon can really fragment the underlying file 385 * due to flushing pages out of order and not trying to 386 * align the clusters (which leaves sporadic out-of-order 387 * holes). To solve this problem we do the reverse scan 388 * first and attempt to align our cluster, then do a 389 * forward scan if room remains. 390 */ 391 more: 392 m = mc[page_base]; 393 while (pageout_count < vm_pageout_page_count) { 394 /* 395 * If we are at an alignment boundary, and haven't reached the 396 * last flushable page forward, stop here, and switch 397 * directions. 398 */ 399 if (alignment == pageout_count - 1 && num_ends == 0) 400 break; 401 402 m = vm_page_prev(m); 403 if (m == NULL || !vm_pageout_flushable(m)) { 404 num_ends++; 405 break; 406 } 407 mc[--page_base] = m; 408 ++pageout_count; 409 } 410 m = mc[page_base + pageout_count - 1]; 411 while (num_ends != 2 && pageout_count < vm_pageout_page_count) { 412 m = vm_page_next(m); 413 if (m == NULL || !vm_pageout_flushable(m)) { 414 if (num_ends++ == 0) 415 /* Resume the reverse scan. */ 416 goto more; 417 break; 418 } 419 mc[page_base + pageout_count] = m; 420 ++pageout_count; 421 } 422 423 return (vm_pageout_flush(&mc[page_base], pageout_count, 424 VM_PAGER_PUT_NOREUSE, 0, NULL, NULL)); 425 } 426 427 /* 428 * vm_pageout_flush() - launder the given pages 429 * 430 * The given pages are laundered. Note that we setup for the start of 431 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 432 * reference count all in here rather then in the parent. If we want 433 * the parent to do more sophisticated things we may have to change 434 * the ordering. 435 * 436 * Returned runlen is the count of pages between mreq and first 437 * page after mreq with status VM_PAGER_AGAIN. 438 * *eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL 439 * for any page in runlen set. 440 */ 441 int 442 vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen, 443 boolean_t *eio) 444 { 445 vm_object_t object = mc[0]->object; 446 int pageout_status[count]; 447 int numpagedout = 0; 448 int i, runlen; 449 450 VM_OBJECT_ASSERT_WLOCKED(object); 451 452 /* 453 * Initiate I/O. Mark the pages shared busy and verify that they're 454 * valid and read-only. 455 * 456 * We do not have to fixup the clean/dirty bits here... we can 457 * allow the pager to do it after the I/O completes. 458 * 459 * NOTE! mc[i]->dirty may be partial or fragmented due to an 460 * edge case with file fragments. 461 */ 462 for (i = 0; i < count; i++) { 463 KASSERT(vm_page_all_valid(mc[i]), 464 ("vm_pageout_flush: partially invalid page %p index %d/%d", 465 mc[i], i, count)); 466 KASSERT((mc[i]->a.flags & PGA_WRITEABLE) == 0, 467 ("vm_pageout_flush: writeable page %p", mc[i])); 468 vm_page_busy_downgrade(mc[i]); 469 } 470 vm_object_pip_add(object, count); 471 472 vm_pager_put_pages(object, mc, count, flags, pageout_status); 473 474 runlen = count - mreq; 475 if (eio != NULL) 476 *eio = FALSE; 477 for (i = 0; i < count; i++) { 478 vm_page_t mt = mc[i]; 479 480 KASSERT(pageout_status[i] == VM_PAGER_PEND || 481 !pmap_page_is_write_mapped(mt), 482 ("vm_pageout_flush: page %p is not write protected", mt)); 483 switch (pageout_status[i]) { 484 case VM_PAGER_OK: 485 /* 486 * The page may have moved since laundering started, in 487 * which case it should be left alone. 488 */ 489 if (vm_page_in_laundry(mt)) 490 vm_page_deactivate_noreuse(mt); 491 /* FALLTHROUGH */ 492 case VM_PAGER_PEND: 493 numpagedout++; 494 break; 495 case VM_PAGER_BAD: 496 /* 497 * The page is outside the object's range. We pretend 498 * that the page out worked and clean the page, so the 499 * changes will be lost if the page is reclaimed by 500 * the page daemon. 501 */ 502 vm_page_undirty(mt); 503 if (vm_page_in_laundry(mt)) 504 vm_page_deactivate_noreuse(mt); 505 break; 506 case VM_PAGER_ERROR: 507 case VM_PAGER_FAIL: 508 /* 509 * If the page couldn't be paged out to swap because the 510 * pager wasn't able to find space, place the page in 511 * the PQ_UNSWAPPABLE holding queue. This is an 512 * optimization that prevents the page daemon from 513 * wasting CPU cycles on pages that cannot be reclaimed 514 * because no swap device is configured. 515 * 516 * Otherwise, reactivate the page so that it doesn't 517 * clog the laundry and inactive queues. (We will try 518 * paging it out again later.) 519 */ 520 if ((object->flags & OBJ_SWAP) != 0 && 521 pageout_status[i] == VM_PAGER_FAIL) { 522 vm_page_unswappable(mt); 523 numpagedout++; 524 } else 525 vm_page_activate(mt); 526 if (eio != NULL && i >= mreq && i - mreq < runlen) 527 *eio = TRUE; 528 break; 529 case VM_PAGER_AGAIN: 530 if (i >= mreq && i - mreq < runlen) 531 runlen = i - mreq; 532 break; 533 } 534 535 /* 536 * If the operation is still going, leave the page busy to 537 * block all other accesses. Also, leave the paging in 538 * progress indicator set so that we don't attempt an object 539 * collapse. 540 */ 541 if (pageout_status[i] != VM_PAGER_PEND) { 542 vm_object_pip_wakeup(object); 543 vm_page_sunbusy(mt); 544 } 545 } 546 if (prunlen != NULL) 547 *prunlen = runlen; 548 return (numpagedout); 549 } 550 551 static void 552 vm_pageout_swapon(void *arg __unused, struct swdevt *sp __unused) 553 { 554 555 atomic_store_rel_int(&swapdev_enabled, 1); 556 } 557 558 static void 559 vm_pageout_swapoff(void *arg __unused, struct swdevt *sp __unused) 560 { 561 562 if (swap_pager_nswapdev() == 1) 563 atomic_store_rel_int(&swapdev_enabled, 0); 564 } 565 566 /* 567 * Attempt to acquire all of the necessary locks to launder a page and 568 * then call through the clustering layer to PUTPAGES. Wait a short 569 * time for a vnode lock. 570 * 571 * Requires the page and object lock on entry, releases both before return. 572 * Returns 0 on success and an errno otherwise. 573 */ 574 static int 575 vm_pageout_clean(vm_page_t m, int *numpagedout) 576 { 577 struct vnode *vp; 578 struct mount *mp; 579 vm_object_t object; 580 vm_pindex_t pindex; 581 int error; 582 583 object = m->object; 584 VM_OBJECT_ASSERT_WLOCKED(object); 585 error = 0; 586 vp = NULL; 587 mp = NULL; 588 589 /* 590 * The object is already known NOT to be dead. It 591 * is possible for the vget() to block the whole 592 * pageout daemon, but the new low-memory handling 593 * code should prevent it. 594 * 595 * We can't wait forever for the vnode lock, we might 596 * deadlock due to a vn_read() getting stuck in 597 * vm_wait while holding this vnode. We skip the 598 * vnode if we can't get it in a reasonable amount 599 * of time. 600 */ 601 if (object->type == OBJT_VNODE) { 602 vm_page_xunbusy(m); 603 vp = object->handle; 604 if (vp->v_type == VREG && 605 vn_start_write(vp, &mp, V_NOWAIT) != 0) { 606 mp = NULL; 607 error = EDEADLK; 608 goto unlock_all; 609 } 610 KASSERT(mp != NULL, 611 ("vp %p with NULL v_mount", vp)); 612 vm_object_reference_locked(object); 613 pindex = m->pindex; 614 VM_OBJECT_WUNLOCK(object); 615 if (vget(vp, vn_lktype_write(NULL, vp) | LK_TIMELOCK) != 0) { 616 vp = NULL; 617 error = EDEADLK; 618 goto unlock_mp; 619 } 620 VM_OBJECT_WLOCK(object); 621 622 /* 623 * Ensure that the object and vnode were not disassociated 624 * while locks were dropped. 625 */ 626 if (vp->v_object != object) { 627 error = ENOENT; 628 goto unlock_all; 629 } 630 631 /* 632 * While the object was unlocked, the page may have been: 633 * (1) moved to a different queue, 634 * (2) reallocated to a different object, 635 * (3) reallocated to a different offset, or 636 * (4) cleaned. 637 */ 638 if (!vm_page_in_laundry(m) || m->object != object || 639 m->pindex != pindex || m->dirty == 0) { 640 error = ENXIO; 641 goto unlock_all; 642 } 643 644 /* 645 * The page may have been busied while the object lock was 646 * released. 647 */ 648 if (vm_page_tryxbusy(m) == 0) { 649 error = EBUSY; 650 goto unlock_all; 651 } 652 } 653 654 /* 655 * Remove all writeable mappings, failing if the page is wired. 656 */ 657 if (!vm_page_try_remove_write(m)) { 658 vm_page_xunbusy(m); 659 error = EBUSY; 660 goto unlock_all; 661 } 662 663 /* 664 * If a page is dirty, then it is either being washed 665 * (but not yet cleaned) or it is still in the 666 * laundry. If it is still in the laundry, then we 667 * start the cleaning operation. 668 */ 669 if ((*numpagedout = vm_pageout_cluster(m)) == 0) 670 error = EIO; 671 672 unlock_all: 673 VM_OBJECT_WUNLOCK(object); 674 675 unlock_mp: 676 if (mp != NULL) { 677 if (vp != NULL) 678 vput(vp); 679 vm_object_deallocate(object); 680 vn_finished_write(mp); 681 } 682 683 return (error); 684 } 685 686 /* 687 * Attempt to launder the specified number of pages. 688 * 689 * Returns the number of pages successfully laundered. 690 */ 691 static int 692 vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall) 693 { 694 struct scan_state ss; 695 struct vm_pagequeue *pq; 696 vm_object_t object; 697 vm_page_t m, marker; 698 vm_page_astate_t new, old; 699 int act_delta, error, numpagedout, queue, refs, starting_target; 700 int vnodes_skipped; 701 bool pageout_ok; 702 703 object = NULL; 704 starting_target = launder; 705 vnodes_skipped = 0; 706 707 /* 708 * Scan the laundry queues for pages eligible to be laundered. We stop 709 * once the target number of dirty pages have been laundered, or once 710 * we've reached the end of the queue. A single iteration of this loop 711 * may cause more than one page to be laundered because of clustering. 712 * 713 * As an optimization, we avoid laundering from PQ_UNSWAPPABLE when no 714 * swap devices are configured. 715 */ 716 if (atomic_load_acq_int(&swapdev_enabled)) 717 queue = PQ_UNSWAPPABLE; 718 else 719 queue = PQ_LAUNDRY; 720 721 scan: 722 marker = &vmd->vmd_markers[queue]; 723 pq = &vmd->vmd_pagequeues[queue]; 724 vm_pagequeue_lock(pq); 725 vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt); 726 while (launder > 0 && (m = vm_pageout_next(&ss, false)) != NULL) { 727 if (__predict_false((m->flags & PG_MARKER) != 0)) 728 continue; 729 730 /* 731 * Don't touch a page that was removed from the queue after the 732 * page queue lock was released. Otherwise, ensure that any 733 * pending queue operations, such as dequeues for wired pages, 734 * are handled. 735 */ 736 if (vm_pageout_defer(m, queue, true)) 737 continue; 738 739 /* 740 * Lock the page's object. 741 */ 742 if (object == NULL || object != m->object) { 743 if (object != NULL) 744 VM_OBJECT_WUNLOCK(object); 745 object = atomic_load_ptr(&m->object); 746 if (__predict_false(object == NULL)) 747 /* The page is being freed by another thread. */ 748 continue; 749 750 /* Depends on type-stability. */ 751 VM_OBJECT_WLOCK(object); 752 if (__predict_false(m->object != object)) { 753 VM_OBJECT_WUNLOCK(object); 754 object = NULL; 755 continue; 756 } 757 } 758 759 if (vm_page_tryxbusy(m) == 0) 760 continue; 761 762 /* 763 * Check for wirings now that we hold the object lock and have 764 * exclusively busied the page. If the page is mapped, it may 765 * still be wired by pmap lookups. The call to 766 * vm_page_try_remove_all() below atomically checks for such 767 * wirings and removes mappings. If the page is unmapped, the 768 * wire count is guaranteed not to increase after this check. 769 */ 770 if (__predict_false(vm_page_wired(m))) 771 goto skip_page; 772 773 /* 774 * Invalid pages can be easily freed. They cannot be 775 * mapped; vm_page_free() asserts this. 776 */ 777 if (vm_page_none_valid(m)) 778 goto free_page; 779 780 refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0; 781 782 for (old = vm_page_astate_load(m);;) { 783 /* 784 * Check to see if the page has been removed from the 785 * queue since the first such check. Leave it alone if 786 * so, discarding any references collected by 787 * pmap_ts_referenced(). 788 */ 789 if (__predict_false(_vm_page_queue(old) == PQ_NONE)) 790 goto skip_page; 791 792 new = old; 793 act_delta = refs; 794 if ((old.flags & PGA_REFERENCED) != 0) { 795 new.flags &= ~PGA_REFERENCED; 796 act_delta++; 797 } 798 if (act_delta == 0) { 799 ; 800 } else if (object->ref_count != 0) { 801 /* 802 * Increase the activation count if the page was 803 * referenced while in the laundry queue. This 804 * makes it less likely that the page will be 805 * returned prematurely to the laundry queue. 806 */ 807 new.act_count += ACT_ADVANCE + 808 act_delta; 809 if (new.act_count > ACT_MAX) 810 new.act_count = ACT_MAX; 811 812 new.flags &= ~PGA_QUEUE_OP_MASK; 813 new.flags |= PGA_REQUEUE; 814 new.queue = PQ_ACTIVE; 815 if (!vm_page_pqstate_commit(m, &old, new)) 816 continue; 817 818 /* 819 * If this was a background laundering, count 820 * activated pages towards our target. The 821 * purpose of background laundering is to ensure 822 * that pages are eventually cycled through the 823 * laundry queue, and an activation is a valid 824 * way out. 825 */ 826 if (!in_shortfall) 827 launder--; 828 VM_CNT_INC(v_reactivated); 829 goto skip_page; 830 } else if ((object->flags & OBJ_DEAD) == 0) { 831 new.flags |= PGA_REQUEUE; 832 if (!vm_page_pqstate_commit(m, &old, new)) 833 continue; 834 goto skip_page; 835 } 836 break; 837 } 838 839 /* 840 * If the page appears to be clean at the machine-independent 841 * layer, then remove all of its mappings from the pmap in 842 * anticipation of freeing it. If, however, any of the page's 843 * mappings allow write access, then the page may still be 844 * modified until the last of those mappings are removed. 845 */ 846 if (object->ref_count != 0) { 847 vm_page_test_dirty(m); 848 if (m->dirty == 0 && !vm_page_try_remove_all(m)) 849 goto skip_page; 850 } 851 852 /* 853 * Clean pages are freed, and dirty pages are paged out unless 854 * they belong to a dead object. Requeueing dirty pages from 855 * dead objects is pointless, as they are being paged out and 856 * freed by the thread that destroyed the object. 857 */ 858 if (m->dirty == 0) { 859 free_page: 860 /* 861 * Now we are guaranteed that no other threads are 862 * manipulating the page, check for a last-second 863 * reference. 864 */ 865 if (vm_pageout_defer(m, queue, true)) 866 goto skip_page; 867 vm_page_free(m); 868 VM_CNT_INC(v_dfree); 869 } else if ((object->flags & OBJ_DEAD) == 0) { 870 if ((object->flags & OBJ_SWAP) != 0) 871 pageout_ok = disable_swap_pageouts == 0; 872 else 873 pageout_ok = true; 874 if (!pageout_ok) { 875 vm_page_launder(m); 876 goto skip_page; 877 } 878 879 /* 880 * Form a cluster with adjacent, dirty pages from the 881 * same object, and page out that entire cluster. 882 * 883 * The adjacent, dirty pages must also be in the 884 * laundry. However, their mappings are not checked 885 * for new references. Consequently, a recently 886 * referenced page may be paged out. However, that 887 * page will not be prematurely reclaimed. After page 888 * out, the page will be placed in the inactive queue, 889 * where any new references will be detected and the 890 * page reactivated. 891 */ 892 error = vm_pageout_clean(m, &numpagedout); 893 if (error == 0) { 894 launder -= numpagedout; 895 ss.scanned += numpagedout; 896 } else if (error == EDEADLK) { 897 pageout_lock_miss++; 898 vnodes_skipped++; 899 } 900 object = NULL; 901 } else { 902 skip_page: 903 vm_page_xunbusy(m); 904 } 905 } 906 if (object != NULL) { 907 VM_OBJECT_WUNLOCK(object); 908 object = NULL; 909 } 910 vm_pagequeue_lock(pq); 911 vm_pageout_end_scan(&ss); 912 vm_pagequeue_unlock(pq); 913 914 if (launder > 0 && queue == PQ_UNSWAPPABLE) { 915 queue = PQ_LAUNDRY; 916 goto scan; 917 } 918 919 /* 920 * Wakeup the sync daemon if we skipped a vnode in a writeable object 921 * and we didn't launder enough pages. 922 */ 923 if (vnodes_skipped > 0 && launder > 0) 924 (void)speedup_syncer(); 925 926 return (starting_target - launder); 927 } 928 929 /* 930 * Compute the integer square root. 931 */ 932 static u_int 933 isqrt(u_int num) 934 { 935 u_int bit, root, tmp; 936 937 bit = num != 0 ? (1u << ((fls(num) - 1) & ~1)) : 0; 938 root = 0; 939 while (bit != 0) { 940 tmp = root + bit; 941 root >>= 1; 942 if (num >= tmp) { 943 num -= tmp; 944 root += bit; 945 } 946 bit >>= 2; 947 } 948 return (root); 949 } 950 951 /* 952 * Perform the work of the laundry thread: periodically wake up and determine 953 * whether any pages need to be laundered. If so, determine the number of pages 954 * that need to be laundered, and launder them. 955 */ 956 static void 957 vm_pageout_laundry_worker(void *arg) 958 { 959 struct vm_domain *vmd; 960 struct vm_pagequeue *pq; 961 uint64_t nclean, ndirty, nfreed; 962 int domain, last_target, launder, shortfall, shortfall_cycle, target; 963 bool in_shortfall; 964 965 domain = (uintptr_t)arg; 966 vmd = VM_DOMAIN(domain); 967 pq = &vmd->vmd_pagequeues[PQ_LAUNDRY]; 968 KASSERT(vmd->vmd_segs != 0, ("domain without segments")); 969 970 shortfall = 0; 971 in_shortfall = false; 972 shortfall_cycle = 0; 973 last_target = target = 0; 974 nfreed = 0; 975 976 /* 977 * Calls to these handlers are serialized by the swap syscall lock. 978 */ 979 (void)EVENTHANDLER_REGISTER(swapon, vm_pageout_swapon, vmd, 980 EVENTHANDLER_PRI_ANY); 981 (void)EVENTHANDLER_REGISTER(swapoff, vm_pageout_swapoff, vmd, 982 EVENTHANDLER_PRI_ANY); 983 984 /* 985 * The pageout laundry worker is never done, so loop forever. 986 */ 987 for (;;) { 988 KASSERT(target >= 0, ("negative target %d", target)); 989 KASSERT(shortfall_cycle >= 0, 990 ("negative cycle %d", shortfall_cycle)); 991 launder = 0; 992 993 /* 994 * First determine whether we need to launder pages to meet a 995 * shortage of free pages. 996 */ 997 if (shortfall > 0) { 998 in_shortfall = true; 999 shortfall_cycle = VM_LAUNDER_RATE / VM_INACT_SCAN_RATE; 1000 target = shortfall; 1001 } else if (!in_shortfall) 1002 goto trybackground; 1003 else if (shortfall_cycle == 0 || vm_laundry_target(vmd) <= 0) { 1004 /* 1005 * We recently entered shortfall and began laundering 1006 * pages. If we have completed that laundering run 1007 * (and we are no longer in shortfall) or we have met 1008 * our laundry target through other activity, then we 1009 * can stop laundering pages. 1010 */ 1011 in_shortfall = false; 1012 target = 0; 1013 goto trybackground; 1014 } 1015 launder = target / shortfall_cycle--; 1016 goto dolaundry; 1017 1018 /* 1019 * There's no immediate need to launder any pages; see if we 1020 * meet the conditions to perform background laundering: 1021 * 1022 * 1. The ratio of dirty to clean inactive pages exceeds the 1023 * background laundering threshold, or 1024 * 2. we haven't yet reached the target of the current 1025 * background laundering run. 1026 * 1027 * The background laundering threshold is not a constant. 1028 * Instead, it is a slowly growing function of the number of 1029 * clean pages freed by the page daemon since the last 1030 * background laundering. Thus, as the ratio of dirty to 1031 * clean inactive pages grows, the amount of memory pressure 1032 * required to trigger laundering decreases. We ensure 1033 * that the threshold is non-zero after an inactive queue 1034 * scan, even if that scan failed to free a single clean page. 1035 */ 1036 trybackground: 1037 nclean = vmd->vmd_free_count + 1038 vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt; 1039 ndirty = vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt; 1040 if (target == 0 && ndirty * isqrt(howmany(nfreed + 1, 1041 vmd->vmd_free_target - vmd->vmd_free_min)) >= nclean) { 1042 target = vmd->vmd_background_launder_target; 1043 } 1044 1045 /* 1046 * We have a non-zero background laundering target. If we've 1047 * laundered up to our maximum without observing a page daemon 1048 * request, just stop. This is a safety belt that ensures we 1049 * don't launder an excessive amount if memory pressure is low 1050 * and the ratio of dirty to clean pages is large. Otherwise, 1051 * proceed at the background laundering rate. 1052 */ 1053 if (target > 0) { 1054 if (nfreed > 0) { 1055 nfreed = 0; 1056 last_target = target; 1057 } else if (last_target - target >= 1058 vm_background_launder_max * PAGE_SIZE / 1024) { 1059 target = 0; 1060 } 1061 launder = vm_background_launder_rate * PAGE_SIZE / 1024; 1062 launder /= VM_LAUNDER_RATE; 1063 if (launder > target) 1064 launder = target; 1065 } 1066 1067 dolaundry: 1068 if (launder > 0) { 1069 /* 1070 * Because of I/O clustering, the number of laundered 1071 * pages could exceed "target" by the maximum size of 1072 * a cluster minus one. 1073 */ 1074 target -= min(vm_pageout_launder(vmd, launder, 1075 in_shortfall), target); 1076 pause("laundp", hz / VM_LAUNDER_RATE); 1077 } 1078 1079 /* 1080 * If we're not currently laundering pages and the page daemon 1081 * hasn't posted a new request, sleep until the page daemon 1082 * kicks us. 1083 */ 1084 vm_pagequeue_lock(pq); 1085 if (target == 0 && vmd->vmd_laundry_request == VM_LAUNDRY_IDLE) 1086 (void)mtx_sleep(&vmd->vmd_laundry_request, 1087 vm_pagequeue_lockptr(pq), PVM, "launds", 0); 1088 1089 /* 1090 * If the pagedaemon has indicated that it's in shortfall, start 1091 * a shortfall laundering unless we're already in the middle of 1092 * one. This may preempt a background laundering. 1093 */ 1094 if (vmd->vmd_laundry_request == VM_LAUNDRY_SHORTFALL && 1095 (!in_shortfall || shortfall_cycle == 0)) { 1096 shortfall = vm_laundry_target(vmd) + 1097 vmd->vmd_pageout_deficit; 1098 target = 0; 1099 } else 1100 shortfall = 0; 1101 1102 if (target == 0) 1103 vmd->vmd_laundry_request = VM_LAUNDRY_IDLE; 1104 nfreed += vmd->vmd_clean_pages_freed; 1105 vmd->vmd_clean_pages_freed = 0; 1106 vm_pagequeue_unlock(pq); 1107 } 1108 } 1109 1110 /* 1111 * Compute the number of pages we want to try to move from the 1112 * active queue to either the inactive or laundry queue. 1113 * 1114 * When scanning active pages during a shortage, we make clean pages 1115 * count more heavily towards the page shortage than dirty pages. 1116 * This is because dirty pages must be laundered before they can be 1117 * reused and thus have less utility when attempting to quickly 1118 * alleviate a free page shortage. However, this weighting also 1119 * causes the scan to deactivate dirty pages more aggressively, 1120 * improving the effectiveness of clustering. 1121 */ 1122 static int 1123 vm_pageout_active_target(struct vm_domain *vmd) 1124 { 1125 int shortage; 1126 1127 shortage = vmd->vmd_inactive_target + vm_paging_target(vmd) - 1128 (vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt + 1129 vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt / act_scan_laundry_weight); 1130 shortage *= act_scan_laundry_weight; 1131 return (shortage); 1132 } 1133 1134 /* 1135 * Scan the active queue. If there is no shortage of inactive pages, scan a 1136 * small portion of the queue in order to maintain quasi-LRU. 1137 */ 1138 static void 1139 vm_pageout_scan_active(struct vm_domain *vmd, int page_shortage) 1140 { 1141 struct scan_state ss; 1142 vm_object_t object; 1143 vm_page_t m, marker; 1144 struct vm_pagequeue *pq; 1145 vm_page_astate_t old, new; 1146 long min_scan; 1147 int act_delta, max_scan, ps_delta, refs, scan_tick; 1148 uint8_t nqueue; 1149 1150 marker = &vmd->vmd_markers[PQ_ACTIVE]; 1151 pq = &vmd->vmd_pagequeues[PQ_ACTIVE]; 1152 vm_pagequeue_lock(pq); 1153 1154 /* 1155 * If we're just idle polling attempt to visit every 1156 * active page within 'update_period' seconds. 1157 */ 1158 scan_tick = ticks; 1159 if (vm_pageout_update_period != 0) { 1160 min_scan = pq->pq_cnt; 1161 min_scan *= scan_tick - vmd->vmd_last_active_scan; 1162 min_scan /= hz * vm_pageout_update_period; 1163 } else 1164 min_scan = 0; 1165 if (min_scan > 0 || (page_shortage > 0 && pq->pq_cnt > 0)) 1166 vmd->vmd_last_active_scan = scan_tick; 1167 1168 /* 1169 * Scan the active queue for pages that can be deactivated. Update 1170 * the per-page activity counter and use it to identify deactivation 1171 * candidates. Held pages may be deactivated. 1172 * 1173 * To avoid requeuing each page that remains in the active queue, we 1174 * implement the CLOCK algorithm. To keep the implementation of the 1175 * enqueue operation consistent for all page queues, we use two hands, 1176 * represented by marker pages. Scans begin at the first hand, which 1177 * precedes the second hand in the queue. When the two hands meet, 1178 * they are moved back to the head and tail of the queue, respectively, 1179 * and scanning resumes. 1180 */ 1181 max_scan = page_shortage > 0 ? pq->pq_cnt : min_scan; 1182 act_scan: 1183 vm_pageout_init_scan(&ss, pq, marker, &vmd->vmd_clock[0], max_scan); 1184 while ((m = vm_pageout_next(&ss, false)) != NULL) { 1185 if (__predict_false(m == &vmd->vmd_clock[1])) { 1186 vm_pagequeue_lock(pq); 1187 TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q); 1188 TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[1], plinks.q); 1189 TAILQ_INSERT_HEAD(&pq->pq_pl, &vmd->vmd_clock[0], 1190 plinks.q); 1191 TAILQ_INSERT_TAIL(&pq->pq_pl, &vmd->vmd_clock[1], 1192 plinks.q); 1193 max_scan -= ss.scanned; 1194 vm_pageout_end_scan(&ss); 1195 goto act_scan; 1196 } 1197 if (__predict_false((m->flags & PG_MARKER) != 0)) 1198 continue; 1199 1200 /* 1201 * Don't touch a page that was removed from the queue after the 1202 * page queue lock was released. Otherwise, ensure that any 1203 * pending queue operations, such as dequeues for wired pages, 1204 * are handled. 1205 */ 1206 if (vm_pageout_defer(m, PQ_ACTIVE, true)) 1207 continue; 1208 1209 /* 1210 * A page's object pointer may be set to NULL before 1211 * the object lock is acquired. 1212 */ 1213 object = atomic_load_ptr(&m->object); 1214 if (__predict_false(object == NULL)) 1215 /* 1216 * The page has been removed from its object. 1217 */ 1218 continue; 1219 1220 /* Deferred free of swap space. */ 1221 if ((m->a.flags & PGA_SWAP_FREE) != 0 && 1222 VM_OBJECT_TRYWLOCK(object)) { 1223 if (m->object == object) 1224 vm_pager_page_unswapped(m); 1225 VM_OBJECT_WUNLOCK(object); 1226 } 1227 1228 /* 1229 * Check to see "how much" the page has been used. 1230 * 1231 * Test PGA_REFERENCED after calling pmap_ts_referenced() so 1232 * that a reference from a concurrently destroyed mapping is 1233 * observed here and now. 1234 * 1235 * Perform an unsynchronized object ref count check. While 1236 * the page lock ensures that the page is not reallocated to 1237 * another object, in particular, one with unmanaged mappings 1238 * that cannot support pmap_ts_referenced(), two races are, 1239 * nonetheless, possible: 1240 * 1) The count was transitioning to zero, but we saw a non- 1241 * zero value. pmap_ts_referenced() will return zero 1242 * because the page is not mapped. 1243 * 2) The count was transitioning to one, but we saw zero. 1244 * This race delays the detection of a new reference. At 1245 * worst, we will deactivate and reactivate the page. 1246 */ 1247 refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0; 1248 1249 old = vm_page_astate_load(m); 1250 do { 1251 /* 1252 * Check to see if the page has been removed from the 1253 * queue since the first such check. Leave it alone if 1254 * so, discarding any references collected by 1255 * pmap_ts_referenced(). 1256 */ 1257 if (__predict_false(_vm_page_queue(old) == PQ_NONE)) { 1258 ps_delta = 0; 1259 break; 1260 } 1261 1262 /* 1263 * Advance or decay the act_count based on recent usage. 1264 */ 1265 new = old; 1266 act_delta = refs; 1267 if ((old.flags & PGA_REFERENCED) != 0) { 1268 new.flags &= ~PGA_REFERENCED; 1269 act_delta++; 1270 } 1271 if (act_delta != 0) { 1272 new.act_count += ACT_ADVANCE + act_delta; 1273 if (new.act_count > ACT_MAX) 1274 new.act_count = ACT_MAX; 1275 } else { 1276 new.act_count -= min(new.act_count, 1277 ACT_DECLINE); 1278 } 1279 1280 if (new.act_count > 0) { 1281 /* 1282 * Adjust the activation count and keep the page 1283 * in the active queue. The count might be left 1284 * unchanged if it is saturated. The page may 1285 * have been moved to a different queue since we 1286 * started the scan, in which case we move it 1287 * back. 1288 */ 1289 ps_delta = 0; 1290 if (old.queue != PQ_ACTIVE) { 1291 new.flags &= ~PGA_QUEUE_OP_MASK; 1292 new.flags |= PGA_REQUEUE; 1293 new.queue = PQ_ACTIVE; 1294 } 1295 } else { 1296 /* 1297 * When not short for inactive pages, let dirty 1298 * pages go through the inactive queue before 1299 * moving to the laundry queue. This gives them 1300 * some extra time to be reactivated, 1301 * potentially avoiding an expensive pageout. 1302 * However, during a page shortage, the inactive 1303 * queue is necessarily small, and so dirty 1304 * pages would only spend a trivial amount of 1305 * time in the inactive queue. Therefore, we 1306 * might as well place them directly in the 1307 * laundry queue to reduce queuing overhead. 1308 * 1309 * Calling vm_page_test_dirty() here would 1310 * require acquisition of the object's write 1311 * lock. However, during a page shortage, 1312 * directing dirty pages into the laundry queue 1313 * is only an optimization and not a 1314 * requirement. Therefore, we simply rely on 1315 * the opportunistic updates to the page's dirty 1316 * field by the pmap. 1317 */ 1318 if (page_shortage <= 0) { 1319 nqueue = PQ_INACTIVE; 1320 ps_delta = 0; 1321 } else if (m->dirty == 0) { 1322 nqueue = PQ_INACTIVE; 1323 ps_delta = act_scan_laundry_weight; 1324 } else { 1325 nqueue = PQ_LAUNDRY; 1326 ps_delta = 1; 1327 } 1328 1329 new.flags &= ~PGA_QUEUE_OP_MASK; 1330 new.flags |= PGA_REQUEUE; 1331 new.queue = nqueue; 1332 } 1333 } while (!vm_page_pqstate_commit(m, &old, new)); 1334 1335 page_shortage -= ps_delta; 1336 } 1337 vm_pagequeue_lock(pq); 1338 TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q); 1339 TAILQ_INSERT_AFTER(&pq->pq_pl, marker, &vmd->vmd_clock[0], plinks.q); 1340 vm_pageout_end_scan(&ss); 1341 vm_pagequeue_unlock(pq); 1342 } 1343 1344 static int 1345 vm_pageout_reinsert_inactive_page(struct vm_pagequeue *pq, vm_page_t marker, 1346 vm_page_t m) 1347 { 1348 vm_page_astate_t as; 1349 1350 vm_pagequeue_assert_locked(pq); 1351 1352 as = vm_page_astate_load(m); 1353 if (as.queue != PQ_INACTIVE || (as.flags & PGA_ENQUEUED) != 0) 1354 return (0); 1355 vm_page_aflag_set(m, PGA_ENQUEUED); 1356 TAILQ_INSERT_BEFORE(marker, m, plinks.q); 1357 return (1); 1358 } 1359 1360 /* 1361 * Re-add stuck pages to the inactive queue. We will examine them again 1362 * during the next scan. If the queue state of a page has changed since 1363 * it was physically removed from the page queue in 1364 * vm_pageout_collect_batch(), don't do anything with that page. 1365 */ 1366 static void 1367 vm_pageout_reinsert_inactive(struct scan_state *ss, struct vm_batchqueue *bq, 1368 vm_page_t m) 1369 { 1370 struct vm_pagequeue *pq; 1371 vm_page_t marker; 1372 int delta; 1373 1374 delta = 0; 1375 marker = ss->marker; 1376 pq = ss->pq; 1377 1378 if (m != NULL) { 1379 if (vm_batchqueue_insert(bq, m) != 0) 1380 return; 1381 vm_pagequeue_lock(pq); 1382 delta += vm_pageout_reinsert_inactive_page(pq, marker, m); 1383 } else 1384 vm_pagequeue_lock(pq); 1385 while ((m = vm_batchqueue_pop(bq)) != NULL) 1386 delta += vm_pageout_reinsert_inactive_page(pq, marker, m); 1387 vm_pagequeue_cnt_add(pq, delta); 1388 vm_pagequeue_unlock(pq); 1389 vm_batchqueue_init(bq); 1390 } 1391 1392 static void 1393 vm_pageout_scan_inactive(struct vm_domain *vmd, int page_shortage) 1394 { 1395 struct timeval start, end; 1396 struct scan_state ss; 1397 struct vm_batchqueue rq; 1398 struct vm_page marker_page; 1399 vm_page_t m, marker; 1400 struct vm_pagequeue *pq; 1401 vm_object_t object; 1402 vm_page_astate_t old, new; 1403 int act_delta, addl_page_shortage, starting_page_shortage, refs; 1404 1405 object = NULL; 1406 vm_batchqueue_init(&rq); 1407 getmicrouptime(&start); 1408 1409 /* 1410 * The addl_page_shortage is an estimate of the number of temporarily 1411 * stuck pages in the inactive queue. In other words, the 1412 * number of pages from the inactive count that should be 1413 * discounted in setting the target for the active queue scan. 1414 */ 1415 addl_page_shortage = 0; 1416 1417 /* 1418 * Start scanning the inactive queue for pages that we can free. The 1419 * scan will stop when we reach the target or we have scanned the 1420 * entire queue. (Note that m->a.act_count is not used to make 1421 * decisions for the inactive queue, only for the active queue.) 1422 */ 1423 starting_page_shortage = page_shortage; 1424 marker = &marker_page; 1425 vm_page_init_marker(marker, PQ_INACTIVE, 0); 1426 pq = &vmd->vmd_pagequeues[PQ_INACTIVE]; 1427 vm_pagequeue_lock(pq); 1428 vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt); 1429 while (page_shortage > 0) { 1430 /* 1431 * If we need to refill the scan batch queue, release any 1432 * optimistically held object lock. This gives someone else a 1433 * chance to grab the lock, and also avoids holding it while we 1434 * do unrelated work. 1435 */ 1436 if (object != NULL && vm_batchqueue_empty(&ss.bq)) { 1437 VM_OBJECT_WUNLOCK(object); 1438 object = NULL; 1439 } 1440 1441 m = vm_pageout_next(&ss, true); 1442 if (m == NULL) 1443 break; 1444 KASSERT((m->flags & PG_MARKER) == 0, 1445 ("marker page %p was dequeued", m)); 1446 1447 /* 1448 * Don't touch a page that was removed from the queue after the 1449 * page queue lock was released. Otherwise, ensure that any 1450 * pending queue operations, such as dequeues for wired pages, 1451 * are handled. 1452 */ 1453 if (vm_pageout_defer(m, PQ_INACTIVE, false)) 1454 continue; 1455 1456 /* 1457 * Lock the page's object. 1458 */ 1459 if (object == NULL || object != m->object) { 1460 if (object != NULL) 1461 VM_OBJECT_WUNLOCK(object); 1462 object = atomic_load_ptr(&m->object); 1463 if (__predict_false(object == NULL)) 1464 /* The page is being freed by another thread. */ 1465 continue; 1466 1467 /* Depends on type-stability. */ 1468 VM_OBJECT_WLOCK(object); 1469 if (__predict_false(m->object != object)) { 1470 VM_OBJECT_WUNLOCK(object); 1471 object = NULL; 1472 goto reinsert; 1473 } 1474 } 1475 1476 if (vm_page_tryxbusy(m) == 0) { 1477 /* 1478 * Don't mess with busy pages. Leave them at 1479 * the front of the queue. Most likely, they 1480 * are being paged out and will leave the 1481 * queue shortly after the scan finishes. So, 1482 * they ought to be discounted from the 1483 * inactive count. 1484 */ 1485 addl_page_shortage++; 1486 goto reinsert; 1487 } 1488 1489 /* Deferred free of swap space. */ 1490 if ((m->a.flags & PGA_SWAP_FREE) != 0) 1491 vm_pager_page_unswapped(m); 1492 1493 /* 1494 * Check for wirings now that we hold the object lock and have 1495 * exclusively busied the page. If the page is mapped, it may 1496 * still be wired by pmap lookups. The call to 1497 * vm_page_try_remove_all() below atomically checks for such 1498 * wirings and removes mappings. If the page is unmapped, the 1499 * wire count is guaranteed not to increase after this check. 1500 */ 1501 if (__predict_false(vm_page_wired(m))) 1502 goto skip_page; 1503 1504 /* 1505 * Invalid pages can be easily freed. They cannot be 1506 * mapped, vm_page_free() asserts this. 1507 */ 1508 if (vm_page_none_valid(m)) 1509 goto free_page; 1510 1511 refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0; 1512 1513 for (old = vm_page_astate_load(m);;) { 1514 /* 1515 * Check to see if the page has been removed from the 1516 * queue since the first such check. Leave it alone if 1517 * so, discarding any references collected by 1518 * pmap_ts_referenced(). 1519 */ 1520 if (__predict_false(_vm_page_queue(old) == PQ_NONE)) 1521 goto skip_page; 1522 1523 new = old; 1524 act_delta = refs; 1525 if ((old.flags & PGA_REFERENCED) != 0) { 1526 new.flags &= ~PGA_REFERENCED; 1527 act_delta++; 1528 } 1529 if (act_delta == 0) { 1530 ; 1531 } else if (object->ref_count != 0) { 1532 /* 1533 * Increase the activation count if the 1534 * page was referenced while in the 1535 * inactive queue. This makes it less 1536 * likely that the page will be returned 1537 * prematurely to the inactive queue. 1538 */ 1539 new.act_count += ACT_ADVANCE + 1540 act_delta; 1541 if (new.act_count > ACT_MAX) 1542 new.act_count = ACT_MAX; 1543 1544 new.flags &= ~PGA_QUEUE_OP_MASK; 1545 new.flags |= PGA_REQUEUE; 1546 new.queue = PQ_ACTIVE; 1547 if (!vm_page_pqstate_commit(m, &old, new)) 1548 continue; 1549 1550 VM_CNT_INC(v_reactivated); 1551 goto skip_page; 1552 } else if ((object->flags & OBJ_DEAD) == 0) { 1553 new.queue = PQ_INACTIVE; 1554 new.flags |= PGA_REQUEUE; 1555 if (!vm_page_pqstate_commit(m, &old, new)) 1556 continue; 1557 goto skip_page; 1558 } 1559 break; 1560 } 1561 1562 /* 1563 * If the page appears to be clean at the machine-independent 1564 * layer, then remove all of its mappings from the pmap in 1565 * anticipation of freeing it. If, however, any of the page's 1566 * mappings allow write access, then the page may still be 1567 * modified until the last of those mappings are removed. 1568 */ 1569 if (object->ref_count != 0) { 1570 vm_page_test_dirty(m); 1571 if (m->dirty == 0 && !vm_page_try_remove_all(m)) 1572 goto skip_page; 1573 } 1574 1575 /* 1576 * Clean pages can be freed, but dirty pages must be sent back 1577 * to the laundry, unless they belong to a dead object. 1578 * Requeueing dirty pages from dead objects is pointless, as 1579 * they are being paged out and freed by the thread that 1580 * destroyed the object. 1581 */ 1582 if (m->dirty == 0) { 1583 free_page: 1584 /* 1585 * Now we are guaranteed that no other threads are 1586 * manipulating the page, check for a last-second 1587 * reference that would save it from doom. 1588 */ 1589 if (vm_pageout_defer(m, PQ_INACTIVE, false)) 1590 goto skip_page; 1591 1592 /* 1593 * Because we dequeued the page and have already checked 1594 * for pending dequeue and enqueue requests, we can 1595 * safely disassociate the page from the inactive queue 1596 * without holding the queue lock. 1597 */ 1598 m->a.queue = PQ_NONE; 1599 vm_page_free(m); 1600 page_shortage--; 1601 continue; 1602 } 1603 if ((object->flags & OBJ_DEAD) == 0) 1604 vm_page_launder(m); 1605 skip_page: 1606 vm_page_xunbusy(m); 1607 continue; 1608 reinsert: 1609 vm_pageout_reinsert_inactive(&ss, &rq, m); 1610 } 1611 if (object != NULL) 1612 VM_OBJECT_WUNLOCK(object); 1613 vm_pageout_reinsert_inactive(&ss, &rq, NULL); 1614 vm_pageout_reinsert_inactive(&ss, &ss.bq, NULL); 1615 vm_pagequeue_lock(pq); 1616 vm_pageout_end_scan(&ss); 1617 vm_pagequeue_unlock(pq); 1618 1619 /* 1620 * Record the remaining shortage and the progress and rate it was made. 1621 */ 1622 atomic_add_int(&vmd->vmd_addl_shortage, addl_page_shortage); 1623 getmicrouptime(&end); 1624 timevalsub(&end, &start); 1625 atomic_add_int(&vmd->vmd_inactive_us, 1626 end.tv_sec * 1000000 + end.tv_usec); 1627 atomic_add_int(&vmd->vmd_inactive_freed, 1628 starting_page_shortage - page_shortage); 1629 } 1630 1631 /* 1632 * Dispatch a number of inactive threads according to load and collect the 1633 * results to present a coherent view of paging activity on this domain. 1634 */ 1635 static int 1636 vm_pageout_inactive_dispatch(struct vm_domain *vmd, int shortage) 1637 { 1638 u_int freed, pps, slop, threads, us; 1639 1640 vmd->vmd_inactive_shortage = shortage; 1641 slop = 0; 1642 1643 /* 1644 * If we have more work than we can do in a quarter of our interval, we 1645 * fire off multiple threads to process it. 1646 */ 1647 threads = vmd->vmd_inactive_threads; 1648 if (threads > 1 && vmd->vmd_inactive_pps != 0 && 1649 shortage > vmd->vmd_inactive_pps / VM_INACT_SCAN_RATE / 4) { 1650 vmd->vmd_inactive_shortage /= threads; 1651 slop = shortage % threads; 1652 vm_domain_pageout_lock(vmd); 1653 blockcount_acquire(&vmd->vmd_inactive_starting, threads - 1); 1654 blockcount_acquire(&vmd->vmd_inactive_running, threads - 1); 1655 wakeup(&vmd->vmd_inactive_shortage); 1656 vm_domain_pageout_unlock(vmd); 1657 } 1658 1659 /* Run the local thread scan. */ 1660 vm_pageout_scan_inactive(vmd, vmd->vmd_inactive_shortage + slop); 1661 1662 /* 1663 * Block until helper threads report results and then accumulate 1664 * totals. 1665 */ 1666 blockcount_wait(&vmd->vmd_inactive_running, NULL, "vmpoid", PVM); 1667 freed = atomic_readandclear_int(&vmd->vmd_inactive_freed); 1668 VM_CNT_ADD(v_dfree, freed); 1669 1670 /* 1671 * Calculate the per-thread paging rate with an exponential decay of 1672 * prior results. Careful to avoid integer rounding errors with large 1673 * us values. 1674 */ 1675 us = max(atomic_readandclear_int(&vmd->vmd_inactive_us), 1); 1676 if (us > 1000000) 1677 /* Keep rounding to tenths */ 1678 pps = (freed * 10) / ((us * 10) / 1000000); 1679 else 1680 pps = (1000000 / us) * freed; 1681 vmd->vmd_inactive_pps = (vmd->vmd_inactive_pps / 2) + (pps / 2); 1682 1683 return (shortage - freed); 1684 } 1685 1686 /* 1687 * Attempt to reclaim the requested number of pages from the inactive queue. 1688 * Returns true if the shortage was addressed. 1689 */ 1690 static int 1691 vm_pageout_inactive(struct vm_domain *vmd, int shortage, int *addl_shortage) 1692 { 1693 struct vm_pagequeue *pq; 1694 u_int addl_page_shortage, deficit, page_shortage; 1695 u_int starting_page_shortage; 1696 1697 /* 1698 * vmd_pageout_deficit counts the number of pages requested in 1699 * allocations that failed because of a free page shortage. We assume 1700 * that the allocations will be reattempted and thus include the deficit 1701 * in our scan target. 1702 */ 1703 deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit); 1704 starting_page_shortage = shortage + deficit; 1705 1706 /* 1707 * Run the inactive scan on as many threads as is necessary. 1708 */ 1709 page_shortage = vm_pageout_inactive_dispatch(vmd, starting_page_shortage); 1710 addl_page_shortage = atomic_readandclear_int(&vmd->vmd_addl_shortage); 1711 1712 /* 1713 * Wake up the laundry thread so that it can perform any needed 1714 * laundering. If we didn't meet our target, we're in shortfall and 1715 * need to launder more aggressively. If PQ_LAUNDRY is empty and no 1716 * swap devices are configured, the laundry thread has no work to do, so 1717 * don't bother waking it up. 1718 * 1719 * The laundry thread uses the number of inactive queue scans elapsed 1720 * since the last laundering to determine whether to launder again, so 1721 * keep count. 1722 */ 1723 if (starting_page_shortage > 0) { 1724 pq = &vmd->vmd_pagequeues[PQ_LAUNDRY]; 1725 vm_pagequeue_lock(pq); 1726 if (vmd->vmd_laundry_request == VM_LAUNDRY_IDLE && 1727 (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled))) { 1728 if (page_shortage > 0) { 1729 vmd->vmd_laundry_request = VM_LAUNDRY_SHORTFALL; 1730 VM_CNT_INC(v_pdshortfalls); 1731 } else if (vmd->vmd_laundry_request != 1732 VM_LAUNDRY_SHORTFALL) 1733 vmd->vmd_laundry_request = 1734 VM_LAUNDRY_BACKGROUND; 1735 wakeup(&vmd->vmd_laundry_request); 1736 } 1737 vmd->vmd_clean_pages_freed += 1738 starting_page_shortage - page_shortage; 1739 vm_pagequeue_unlock(pq); 1740 } 1741 1742 /* 1743 * If the inactive queue scan fails repeatedly to meet its 1744 * target, kill the largest process. 1745 */ 1746 vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage); 1747 1748 /* 1749 * See the description of addl_page_shortage above. 1750 */ 1751 *addl_shortage = addl_page_shortage + deficit; 1752 1753 return (page_shortage <= 0); 1754 } 1755 1756 static int vm_pageout_oom_vote; 1757 1758 /* 1759 * The pagedaemon threads randlomly select one to perform the 1760 * OOM. Trying to kill processes before all pagedaemons 1761 * failed to reach free target is premature. 1762 */ 1763 static void 1764 vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage, 1765 int starting_page_shortage) 1766 { 1767 int old_vote; 1768 1769 if (starting_page_shortage <= 0 || starting_page_shortage != 1770 page_shortage) 1771 vmd->vmd_oom_seq = 0; 1772 else 1773 vmd->vmd_oom_seq++; 1774 if (vmd->vmd_oom_seq < vm_pageout_oom_seq) { 1775 if (vmd->vmd_oom) { 1776 vmd->vmd_oom = FALSE; 1777 atomic_subtract_int(&vm_pageout_oom_vote, 1); 1778 } 1779 return; 1780 } 1781 1782 /* 1783 * Do not follow the call sequence until OOM condition is 1784 * cleared. 1785 */ 1786 vmd->vmd_oom_seq = 0; 1787 1788 if (vmd->vmd_oom) 1789 return; 1790 1791 vmd->vmd_oom = TRUE; 1792 old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1); 1793 if (old_vote != vm_ndomains - 1) 1794 return; 1795 1796 /* 1797 * The current pagedaemon thread is the last in the quorum to 1798 * start OOM. Initiate the selection and signaling of the 1799 * victim. 1800 */ 1801 vm_pageout_oom(VM_OOM_MEM); 1802 1803 /* 1804 * After one round of OOM terror, recall our vote. On the 1805 * next pass, current pagedaemon would vote again if the low 1806 * memory condition is still there, due to vmd_oom being 1807 * false. 1808 */ 1809 vmd->vmd_oom = FALSE; 1810 atomic_subtract_int(&vm_pageout_oom_vote, 1); 1811 } 1812 1813 /* 1814 * The OOM killer is the page daemon's action of last resort when 1815 * memory allocation requests have been stalled for a prolonged period 1816 * of time because it cannot reclaim memory. This function computes 1817 * the approximate number of physical pages that could be reclaimed if 1818 * the specified address space is destroyed. 1819 * 1820 * Private, anonymous memory owned by the address space is the 1821 * principal resource that we expect to recover after an OOM kill. 1822 * Since the physical pages mapped by the address space's COW entries 1823 * are typically shared pages, they are unlikely to be released and so 1824 * they are not counted. 1825 * 1826 * To get to the point where the page daemon runs the OOM killer, its 1827 * efforts to write-back vnode-backed pages may have stalled. This 1828 * could be caused by a memory allocation deadlock in the write path 1829 * that might be resolved by an OOM kill. Therefore, physical pages 1830 * belonging to vnode-backed objects are counted, because they might 1831 * be freed without being written out first if the address space holds 1832 * the last reference to an unlinked vnode. 1833 * 1834 * Similarly, physical pages belonging to OBJT_PHYS objects are 1835 * counted because the address space might hold the last reference to 1836 * the object. 1837 */ 1838 static long 1839 vm_pageout_oom_pagecount(struct vmspace *vmspace) 1840 { 1841 vm_map_t map; 1842 vm_map_entry_t entry; 1843 vm_object_t obj; 1844 long res; 1845 1846 map = &vmspace->vm_map; 1847 KASSERT(!map->system_map, ("system map")); 1848 sx_assert(&map->lock, SA_LOCKED); 1849 res = 0; 1850 VM_MAP_ENTRY_FOREACH(entry, map) { 1851 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) 1852 continue; 1853 obj = entry->object.vm_object; 1854 if (obj == NULL) 1855 continue; 1856 if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 && 1857 obj->ref_count != 1) 1858 continue; 1859 if (obj->type == OBJT_PHYS || obj->type == OBJT_VNODE || 1860 (obj->flags & OBJ_SWAP) != 0) 1861 res += obj->resident_page_count; 1862 } 1863 return (res); 1864 } 1865 1866 static int vm_oom_ratelim_last; 1867 static int vm_oom_pf_secs = 10; 1868 SYSCTL_INT(_vm, OID_AUTO, oom_pf_secs, CTLFLAG_RWTUN, &vm_oom_pf_secs, 0, 1869 ""); 1870 static struct mtx vm_oom_ratelim_mtx; 1871 1872 void 1873 vm_pageout_oom(int shortage) 1874 { 1875 const char *reason; 1876 struct proc *p, *bigproc; 1877 vm_offset_t size, bigsize; 1878 struct thread *td; 1879 struct vmspace *vm; 1880 int now; 1881 bool breakout; 1882 1883 /* 1884 * For OOM requests originating from vm_fault(), there is a high 1885 * chance that a single large process faults simultaneously in 1886 * several threads. Also, on an active system running many 1887 * processes of middle-size, like buildworld, all of them 1888 * could fault almost simultaneously as well. 1889 * 1890 * To avoid killing too many processes, rate-limit OOMs 1891 * initiated by vm_fault() time-outs on the waits for free 1892 * pages. 1893 */ 1894 mtx_lock(&vm_oom_ratelim_mtx); 1895 now = ticks; 1896 if (shortage == VM_OOM_MEM_PF && 1897 (u_int)(now - vm_oom_ratelim_last) < hz * vm_oom_pf_secs) { 1898 mtx_unlock(&vm_oom_ratelim_mtx); 1899 return; 1900 } 1901 vm_oom_ratelim_last = now; 1902 mtx_unlock(&vm_oom_ratelim_mtx); 1903 1904 /* 1905 * We keep the process bigproc locked once we find it to keep anyone 1906 * from messing with it; however, there is a possibility of 1907 * deadlock if process B is bigproc and one of its child processes 1908 * attempts to propagate a signal to B while we are waiting for A's 1909 * lock while walking this list. To avoid this, we don't block on 1910 * the process lock but just skip a process if it is already locked. 1911 */ 1912 bigproc = NULL; 1913 bigsize = 0; 1914 sx_slock(&allproc_lock); 1915 FOREACH_PROC_IN_SYSTEM(p) { 1916 PROC_LOCK(p); 1917 1918 /* 1919 * If this is a system, protected or killed process, skip it. 1920 */ 1921 if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC | 1922 P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 || 1923 p->p_pid == 1 || P_KILLED(p) || 1924 (p->p_pid < 48 && swap_pager_avail != 0)) { 1925 PROC_UNLOCK(p); 1926 continue; 1927 } 1928 /* 1929 * If the process is in a non-running type state, 1930 * don't touch it. Check all the threads individually. 1931 */ 1932 breakout = false; 1933 FOREACH_THREAD_IN_PROC(p, td) { 1934 thread_lock(td); 1935 if (!TD_ON_RUNQ(td) && 1936 !TD_IS_RUNNING(td) && 1937 !TD_IS_SLEEPING(td) && 1938 !TD_IS_SUSPENDED(td)) { 1939 thread_unlock(td); 1940 breakout = true; 1941 break; 1942 } 1943 thread_unlock(td); 1944 } 1945 if (breakout) { 1946 PROC_UNLOCK(p); 1947 continue; 1948 } 1949 /* 1950 * get the process size 1951 */ 1952 vm = vmspace_acquire_ref(p); 1953 if (vm == NULL) { 1954 PROC_UNLOCK(p); 1955 continue; 1956 } 1957 _PHOLD(p); 1958 PROC_UNLOCK(p); 1959 sx_sunlock(&allproc_lock); 1960 if (!vm_map_trylock_read(&vm->vm_map)) { 1961 vmspace_free(vm); 1962 sx_slock(&allproc_lock); 1963 PRELE(p); 1964 continue; 1965 } 1966 size = vmspace_swap_count(vm); 1967 if (shortage == VM_OOM_MEM || shortage == VM_OOM_MEM_PF) 1968 size += vm_pageout_oom_pagecount(vm); 1969 vm_map_unlock_read(&vm->vm_map); 1970 vmspace_free(vm); 1971 sx_slock(&allproc_lock); 1972 1973 /* 1974 * If this process is bigger than the biggest one, 1975 * remember it. 1976 */ 1977 if (size > bigsize) { 1978 if (bigproc != NULL) 1979 PRELE(bigproc); 1980 bigproc = p; 1981 bigsize = size; 1982 } else { 1983 PRELE(p); 1984 } 1985 } 1986 sx_sunlock(&allproc_lock); 1987 1988 if (bigproc != NULL) { 1989 switch (shortage) { 1990 case VM_OOM_MEM: 1991 reason = "failed to reclaim memory"; 1992 break; 1993 case VM_OOM_MEM_PF: 1994 reason = "a thread waited too long to allocate a page"; 1995 break; 1996 case VM_OOM_SWAPZ: 1997 reason = "out of swap space"; 1998 break; 1999 default: 2000 panic("unknown OOM reason %d", shortage); 2001 } 2002 if (vm_panic_on_oom != 0 && --vm_panic_on_oom == 0) 2003 panic("%s", reason); 2004 PROC_LOCK(bigproc); 2005 killproc(bigproc, reason); 2006 sched_nice(bigproc, PRIO_MIN); 2007 _PRELE(bigproc); 2008 PROC_UNLOCK(bigproc); 2009 } 2010 } 2011 2012 /* 2013 * Signal a free page shortage to subsystems that have registered an event 2014 * handler. Reclaim memory from UMA in the event of a severe shortage. 2015 * Return true if the free page count should be re-evaluated. 2016 */ 2017 static bool 2018 vm_pageout_lowmem(void) 2019 { 2020 static int lowmem_ticks = 0; 2021 int last; 2022 bool ret; 2023 2024 ret = false; 2025 2026 last = atomic_load_int(&lowmem_ticks); 2027 while ((u_int)(ticks - last) / hz >= lowmem_period) { 2028 if (atomic_fcmpset_int(&lowmem_ticks, &last, ticks) == 0) 2029 continue; 2030 2031 /* 2032 * Decrease registered cache sizes. 2033 */ 2034 SDT_PROBE0(vm, , , vm__lowmem_scan); 2035 EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES); 2036 2037 /* 2038 * We do this explicitly after the caches have been 2039 * drained above. 2040 */ 2041 uma_reclaim(UMA_RECLAIM_TRIM); 2042 ret = true; 2043 break; 2044 } 2045 2046 /* 2047 * Kick off an asynchronous reclaim of cached memory if one of the 2048 * page daemons is failing to keep up with demand. Use the "severe" 2049 * threshold instead of "min" to ensure that we do not blow away the 2050 * caches if a subset of the NUMA domains are depleted by kernel memory 2051 * allocations; the domainset iterators automatically skip domains 2052 * below the "min" threshold on the first pass. 2053 * 2054 * UMA reclaim worker has its own rate-limiting mechanism, so don't 2055 * worry about kicking it too often. 2056 */ 2057 if (vm_page_count_severe()) 2058 uma_reclaim_wakeup(); 2059 2060 return (ret); 2061 } 2062 2063 static void 2064 vm_pageout_worker(void *arg) 2065 { 2066 struct vm_domain *vmd; 2067 u_int ofree; 2068 int addl_shortage, domain, shortage; 2069 bool target_met; 2070 2071 domain = (uintptr_t)arg; 2072 vmd = VM_DOMAIN(domain); 2073 shortage = 0; 2074 target_met = true; 2075 2076 /* 2077 * XXXKIB It could be useful to bind pageout daemon threads to 2078 * the cores belonging to the domain, from which vm_page_array 2079 * is allocated. 2080 */ 2081 2082 KASSERT(vmd->vmd_segs != 0, ("domain without segments")); 2083 vmd->vmd_last_active_scan = ticks; 2084 2085 /* 2086 * The pageout daemon worker is never done, so loop forever. 2087 */ 2088 while (TRUE) { 2089 vm_domain_pageout_lock(vmd); 2090 2091 /* 2092 * We need to clear wanted before we check the limits. This 2093 * prevents races with wakers who will check wanted after they 2094 * reach the limit. 2095 */ 2096 atomic_store_int(&vmd->vmd_pageout_wanted, 0); 2097 2098 /* 2099 * Might the page daemon need to run again? 2100 */ 2101 if (vm_paging_needed(vmd, vmd->vmd_free_count)) { 2102 /* 2103 * Yes. If the scan failed to produce enough free 2104 * pages, sleep uninterruptibly for some time in the 2105 * hope that the laundry thread will clean some pages. 2106 */ 2107 vm_domain_pageout_unlock(vmd); 2108 if (!target_met) 2109 pause("pwait", hz / VM_INACT_SCAN_RATE); 2110 } else { 2111 /* 2112 * No, sleep until the next wakeup or until pages 2113 * need to have their reference stats updated. 2114 */ 2115 if (mtx_sleep(&vmd->vmd_pageout_wanted, 2116 vm_domain_pageout_lockptr(vmd), PDROP | PVM, 2117 "psleep", hz / VM_INACT_SCAN_RATE) == 0) 2118 VM_CNT_INC(v_pdwakeups); 2119 } 2120 2121 /* Prevent spurious wakeups by ensuring that wanted is set. */ 2122 atomic_store_int(&vmd->vmd_pageout_wanted, 1); 2123 2124 /* 2125 * Use the controller to calculate how many pages to free in 2126 * this interval, and scan the inactive queue. If the lowmem 2127 * handlers appear to have freed up some pages, subtract the 2128 * difference from the inactive queue scan target. 2129 */ 2130 shortage = pidctrl_daemon(&vmd->vmd_pid, vmd->vmd_free_count); 2131 if (shortage > 0) { 2132 ofree = vmd->vmd_free_count; 2133 if (vm_pageout_lowmem() && vmd->vmd_free_count > ofree) 2134 shortage -= min(vmd->vmd_free_count - ofree, 2135 (u_int)shortage); 2136 target_met = vm_pageout_inactive(vmd, shortage, 2137 &addl_shortage); 2138 } else 2139 addl_shortage = 0; 2140 2141 /* 2142 * Scan the active queue. A positive value for shortage 2143 * indicates that we must aggressively deactivate pages to avoid 2144 * a shortfall. 2145 */ 2146 shortage = vm_pageout_active_target(vmd) + addl_shortage; 2147 vm_pageout_scan_active(vmd, shortage); 2148 } 2149 } 2150 2151 /* 2152 * vm_pageout_helper runs additional pageout daemons in times of high paging 2153 * activity. 2154 */ 2155 static void 2156 vm_pageout_helper(void *arg) 2157 { 2158 struct vm_domain *vmd; 2159 int domain; 2160 2161 domain = (uintptr_t)arg; 2162 vmd = VM_DOMAIN(domain); 2163 2164 vm_domain_pageout_lock(vmd); 2165 for (;;) { 2166 msleep(&vmd->vmd_inactive_shortage, 2167 vm_domain_pageout_lockptr(vmd), PVM, "psleep", 0); 2168 blockcount_release(&vmd->vmd_inactive_starting, 1); 2169 2170 vm_domain_pageout_unlock(vmd); 2171 vm_pageout_scan_inactive(vmd, vmd->vmd_inactive_shortage); 2172 vm_domain_pageout_lock(vmd); 2173 2174 /* 2175 * Release the running count while the pageout lock is held to 2176 * prevent wakeup races. 2177 */ 2178 blockcount_release(&vmd->vmd_inactive_running, 1); 2179 } 2180 } 2181 2182 static int 2183 get_pageout_threads_per_domain(const struct vm_domain *vmd) 2184 { 2185 unsigned total_pageout_threads, eligible_cpus, domain_cpus; 2186 2187 if (VM_DOMAIN_EMPTY(vmd->vmd_domain)) 2188 return (0); 2189 2190 /* 2191 * Semi-arbitrarily constrain pagedaemon threads to less than half the 2192 * total number of CPUs in the system as an upper limit. 2193 */ 2194 if (pageout_cpus_per_thread < 2) 2195 pageout_cpus_per_thread = 2; 2196 else if (pageout_cpus_per_thread > mp_ncpus) 2197 pageout_cpus_per_thread = mp_ncpus; 2198 2199 total_pageout_threads = howmany(mp_ncpus, pageout_cpus_per_thread); 2200 domain_cpus = CPU_COUNT(&cpuset_domain[vmd->vmd_domain]); 2201 2202 /* Pagedaemons are not run in empty domains. */ 2203 eligible_cpus = mp_ncpus; 2204 for (unsigned i = 0; i < vm_ndomains; i++) 2205 if (VM_DOMAIN_EMPTY(i)) 2206 eligible_cpus -= CPU_COUNT(&cpuset_domain[i]); 2207 2208 /* 2209 * Assign a portion of the total pageout threads to this domain 2210 * corresponding to the fraction of pagedaemon-eligible CPUs in the 2211 * domain. In asymmetric NUMA systems, domains with more CPUs may be 2212 * allocated more threads than domains with fewer CPUs. 2213 */ 2214 return (howmany(total_pageout_threads * domain_cpus, eligible_cpus)); 2215 } 2216 2217 /* 2218 * Initialize basic pageout daemon settings. See the comment above the 2219 * definition of vm_domain for some explanation of how these thresholds are 2220 * used. 2221 */ 2222 static void 2223 vm_pageout_init_domain(int domain) 2224 { 2225 struct vm_domain *vmd; 2226 struct sysctl_oid *oid; 2227 2228 vmd = VM_DOMAIN(domain); 2229 vmd->vmd_interrupt_free_min = 2; 2230 2231 /* 2232 * v_free_reserved needs to include enough for the largest 2233 * swap pager structures plus enough for any pv_entry structs 2234 * when paging. 2235 */ 2236 vmd->vmd_pageout_free_min = 2 * MAXBSIZE / PAGE_SIZE + 2237 vmd->vmd_interrupt_free_min; 2238 vmd->vmd_free_reserved = vm_pageout_page_count + 2239 vmd->vmd_pageout_free_min + vmd->vmd_page_count / 768; 2240 vmd->vmd_free_min = vmd->vmd_page_count / 200; 2241 vmd->vmd_free_severe = vmd->vmd_free_min / 2; 2242 vmd->vmd_free_target = 4 * vmd->vmd_free_min + vmd->vmd_free_reserved; 2243 vmd->vmd_free_min += vmd->vmd_free_reserved; 2244 vmd->vmd_free_severe += vmd->vmd_free_reserved; 2245 vmd->vmd_inactive_target = (3 * vmd->vmd_free_target) / 2; 2246 if (vmd->vmd_inactive_target > vmd->vmd_free_count / 3) 2247 vmd->vmd_inactive_target = vmd->vmd_free_count / 3; 2248 2249 /* 2250 * Set the default wakeup threshold to be 10% below the paging 2251 * target. This keeps the steady state out of shortfall. 2252 */ 2253 vmd->vmd_pageout_wakeup_thresh = (vmd->vmd_free_target / 10) * 9; 2254 2255 /* 2256 * Target amount of memory to move out of the laundry queue during a 2257 * background laundering. This is proportional to the amount of system 2258 * memory. 2259 */ 2260 vmd->vmd_background_launder_target = (vmd->vmd_free_target - 2261 vmd->vmd_free_min) / 10; 2262 2263 /* Initialize the pageout daemon pid controller. */ 2264 pidctrl_init(&vmd->vmd_pid, hz / VM_INACT_SCAN_RATE, 2265 vmd->vmd_free_target, PIDCTRL_BOUND, 2266 PIDCTRL_KPD, PIDCTRL_KID, PIDCTRL_KDD); 2267 oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(vmd->vmd_oid), OID_AUTO, 2268 "pidctrl", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); 2269 pidctrl_init_sysctl(&vmd->vmd_pid, SYSCTL_CHILDREN(oid)); 2270 2271 vmd->vmd_inactive_threads = get_pageout_threads_per_domain(vmd); 2272 } 2273 2274 static void 2275 vm_pageout_init(void) 2276 { 2277 u_long freecount; 2278 int i; 2279 2280 /* 2281 * Initialize some paging parameters. 2282 */ 2283 freecount = 0; 2284 for (i = 0; i < vm_ndomains; i++) { 2285 struct vm_domain *vmd; 2286 2287 vm_pageout_init_domain(i); 2288 vmd = VM_DOMAIN(i); 2289 vm_cnt.v_free_reserved += vmd->vmd_free_reserved; 2290 vm_cnt.v_free_target += vmd->vmd_free_target; 2291 vm_cnt.v_free_min += vmd->vmd_free_min; 2292 vm_cnt.v_inactive_target += vmd->vmd_inactive_target; 2293 vm_cnt.v_pageout_free_min += vmd->vmd_pageout_free_min; 2294 vm_cnt.v_interrupt_free_min += vmd->vmd_interrupt_free_min; 2295 vm_cnt.v_free_severe += vmd->vmd_free_severe; 2296 freecount += vmd->vmd_free_count; 2297 } 2298 2299 /* 2300 * Set interval in seconds for active scan. We want to visit each 2301 * page at least once every ten minutes. This is to prevent worst 2302 * case paging behaviors with stale active LRU. 2303 */ 2304 if (vm_pageout_update_period == 0) 2305 vm_pageout_update_period = 600; 2306 2307 /* 2308 * Set the maximum number of user-wired virtual pages. Historically the 2309 * main source of such pages was mlock(2) and mlockall(2). Hypervisors 2310 * may also request user-wired memory. 2311 */ 2312 if (vm_page_max_user_wired == 0) 2313 vm_page_max_user_wired = 4 * freecount / 5; 2314 } 2315 2316 /* 2317 * vm_pageout is the high level pageout daemon. 2318 */ 2319 static void 2320 vm_pageout(void) 2321 { 2322 struct proc *p; 2323 struct thread *td; 2324 int error, first, i, j, pageout_threads; 2325 2326 p = curproc; 2327 td = curthread; 2328 2329 mtx_init(&vm_oom_ratelim_mtx, "vmoomr", NULL, MTX_DEF); 2330 swap_pager_swap_init(); 2331 for (first = -1, i = 0; i < vm_ndomains; i++) { 2332 if (VM_DOMAIN_EMPTY(i)) { 2333 if (bootverbose) 2334 printf("domain %d empty; skipping pageout\n", 2335 i); 2336 continue; 2337 } 2338 if (first == -1) 2339 first = i; 2340 else { 2341 error = kthread_add(vm_pageout_worker, 2342 (void *)(uintptr_t)i, p, NULL, 0, 0, "dom%d", i); 2343 if (error != 0) 2344 panic("starting pageout for domain %d: %d\n", 2345 i, error); 2346 } 2347 pageout_threads = VM_DOMAIN(i)->vmd_inactive_threads; 2348 for (j = 0; j < pageout_threads - 1; j++) { 2349 error = kthread_add(vm_pageout_helper, 2350 (void *)(uintptr_t)i, p, NULL, 0, 0, 2351 "dom%d helper%d", i, j); 2352 if (error != 0) 2353 panic("starting pageout helper %d for domain " 2354 "%d: %d\n", j, i, error); 2355 } 2356 error = kthread_add(vm_pageout_laundry_worker, 2357 (void *)(uintptr_t)i, p, NULL, 0, 0, "laundry: dom%d", i); 2358 if (error != 0) 2359 panic("starting laundry for domain %d: %d", i, error); 2360 } 2361 error = kthread_add(uma_reclaim_worker, NULL, p, NULL, 0, 0, "uma"); 2362 if (error != 0) 2363 panic("starting uma_reclaim helper, error %d\n", error); 2364 2365 snprintf(td->td_name, sizeof(td->td_name), "dom%d", first); 2366 vm_pageout_worker((void *)(uintptr_t)first); 2367 } 2368 2369 /* 2370 * Perform an advisory wakeup of the page daemon. 2371 */ 2372 void 2373 pagedaemon_wakeup(int domain) 2374 { 2375 struct vm_domain *vmd; 2376 2377 vmd = VM_DOMAIN(domain); 2378 vm_domain_pageout_assert_unlocked(vmd); 2379 if (curproc == pageproc) 2380 return; 2381 2382 if (atomic_fetchadd_int(&vmd->vmd_pageout_wanted, 1) == 0) { 2383 vm_domain_pageout_lock(vmd); 2384 atomic_store_int(&vmd->vmd_pageout_wanted, 1); 2385 wakeup(&vmd->vmd_pageout_wanted); 2386 vm_domain_pageout_unlock(vmd); 2387 } 2388 } 2389