160727d8bSWarner Losh /*- 2796df753SPedro F. Giffuni * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3df57947fSPedro F. Giffuni * 426f9a767SRodney W. Grimes * Copyright (c) 1991 Regents of the University of California. 526f9a767SRodney W. Grimes * All rights reserved. 626f9a767SRodney W. Grimes * Copyright (c) 1994 John S. Dyson 726f9a767SRodney W. Grimes * All rights reserved. 826f9a767SRodney W. Grimes * Copyright (c) 1994 David Greenman 926f9a767SRodney W. Grimes * All rights reserved. 108dbca793STor Egge * Copyright (c) 2005 Yahoo! Technologies Norway AS 118dbca793STor Egge * All rights reserved. 12df8bae1dSRodney W. Grimes * 13df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 14df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 15df8bae1dSRodney W. Grimes * 16df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 17df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 18df8bae1dSRodney W. Grimes * are met: 19df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 20df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 21df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 22df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 23df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 24df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 255929bcfaSPhilippe Charnier * must display the following acknowledgement: 26df8bae1dSRodney W. Grimes * This product includes software developed by the University of 27df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 28df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 29df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 30df8bae1dSRodney W. Grimes * without specific prior written permission. 31df8bae1dSRodney W. Grimes * 32df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 33df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 34df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 35df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 36df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 37df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 38df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 39df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 40df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 41df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 42df8bae1dSRodney W. Grimes * SUCH DAMAGE. 43df8bae1dSRodney W. Grimes * 44df8bae1dSRodney W. Grimes * 45df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46df8bae1dSRodney W. Grimes * All rights reserved. 47df8bae1dSRodney W. Grimes * 48df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 49df8bae1dSRodney W. Grimes * 50df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 51df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 52df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 53df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 54df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 55df8bae1dSRodney W. Grimes * 56df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 57df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 58df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 59df8bae1dSRodney W. Grimes * 60df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 61df8bae1dSRodney W. Grimes * 62df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 63df8bae1dSRodney W. Grimes * School of Computer Science 64df8bae1dSRodney W. Grimes * Carnegie Mellon University 65df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 66df8bae1dSRodney W. Grimes * 67df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 68df8bae1dSRodney W. Grimes * rights to redistribute these changes. 69df8bae1dSRodney W. Grimes */ 70df8bae1dSRodney W. Grimes 71df8bae1dSRodney W. Grimes /* 72df8bae1dSRodney W. Grimes * The proverbial page-out daemon. 73df8bae1dSRodney W. Grimes */ 74df8bae1dSRodney W. Grimes 75874651b1SDavid E. O'Brien #include <sys/cdefs.h> 76faa5f8d8SAndrzej Bialecki #include "opt_vm.h" 777672ca05SMark Johnston 78df8bae1dSRodney W. Grimes #include <sys/param.h> 7926f9a767SRodney W. Grimes #include <sys/systm.h> 80b5e8ce9fSBruce Evans #include <sys/kernel.h> 810292c54bSConrad Meyer #include <sys/blockcount.h> 82855a310fSJeff Roberson #include <sys/eventhandler.h> 83fb919e4dSMark Murray #include <sys/lock.h> 84fb919e4dSMark Murray #include <sys/mutex.h> 8526f9a767SRodney W. Grimes #include <sys/proc.h> 869c8b8baaSPeter Wemm #include <sys/kthread.h> 870384fff8SJason Evans #include <sys/ktr.h> 8897824da3SAlan Cox #include <sys/mount.h> 89099e7e95SEdward Tomasz Napierala #include <sys/racct.h> 9026f9a767SRodney W. Grimes #include <sys/resourcevar.h> 91b43179fbSJeff Roberson #include <sys/sched.h> 9214a0d74eSSteven Hartland #include <sys/sdt.h> 93d2fc5315SPoul-Henning Kamp #include <sys/signalvar.h> 94449c2e92SKonstantin Belousov #include <sys/smp.h> 95a6bf3a9eSRyan Stone #include <sys/time.h> 96f6b04d2bSDavid Greenman #include <sys/vnode.h> 97efeaf95aSDavid Greenman #include <sys/vmmeter.h> 9889f6b863SAttilio Rao #include <sys/rwlock.h> 991005a129SJohn Baldwin #include <sys/sx.h> 10038efa82bSJohn Dyson #include <sys/sysctl.h> 101df8bae1dSRodney W. Grimes 102df8bae1dSRodney W. Grimes #include <vm/vm.h> 103efeaf95aSDavid Greenman #include <vm/vm_param.h> 104efeaf95aSDavid Greenman #include <vm/vm_object.h> 105df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 106efeaf95aSDavid Greenman #include <vm/vm_map.h> 107df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h> 10824a1cce3SDavid Greenman #include <vm/vm_pager.h> 109449c2e92SKonstantin Belousov #include <vm/vm_phys.h> 110e2068d0bSJeff Roberson #include <vm/vm_pagequeue.h> 11105f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h> 112efeaf95aSDavid Greenman #include <vm/vm_extern.h> 113670d17b5SJeff Roberson #include <vm/uma.h> 114df8bae1dSRodney W. Grimes 1152b14f991SJulian Elischer /* 1162b14f991SJulian Elischer * System initialization 1172b14f991SJulian Elischer */ 1182b14f991SJulian Elischer 1192b14f991SJulian Elischer /* the kernel process "vm_pageout"*/ 12011caded3SAlfred Perlstein static void vm_pageout(void); 1214d19f4adSSteven Hartland static void vm_pageout_init(void); 122ebcddc72SAlan Cox static int vm_pageout_clean(vm_page_t m, int *numpagedout); 12334d8b7eaSJeff Roberson static int vm_pageout_cluster(vm_page_t m); 12476386c7eSKonstantin Belousov static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage, 12576386c7eSKonstantin Belousov int starting_page_shortage); 12645ae1d91SAlan Cox 1274d19f4adSSteven Hartland SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init, 1284d19f4adSSteven Hartland NULL); 1294d19f4adSSteven Hartland 1302b14f991SJulian Elischer struct proc *pageproc; 1312b14f991SJulian Elischer 1322b14f991SJulian Elischer static struct kproc_desc page_kp = { 1332b14f991SJulian Elischer "pagedaemon", 1342b14f991SJulian Elischer vm_pageout, 1352b14f991SJulian Elischer &pageproc 1362b14f991SJulian Elischer }; 1374d19f4adSSteven Hartland SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start, 138237fdd78SRobert Watson &page_kp); 1392b14f991SJulian Elischer 14014a0d74eSSteven Hartland SDT_PROVIDER_DEFINE(vm); 14114a0d74eSSteven Hartland SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan); 14214a0d74eSSteven Hartland 143ebcddc72SAlan Cox /* Pagedaemon activity rates, in subdivisions of one second. */ 144ebcddc72SAlan Cox #define VM_LAUNDER_RATE 10 1455f8cd1c0SJeff Roberson #define VM_INACT_SCAN_RATE 10 1462b14f991SJulian Elischer 147b1fd102eSMark Johnston static int swapdev_enabled; 148c4a25e07SMark Johnston int vm_pageout_page_count = 32; 14970111b90SJohn Dyson 1508311a2b8SWill Andrews static int vm_panic_on_oom = 0; 1518311a2b8SWill Andrews SYSCTL_INT(_vm, OID_AUTO, panic_on_oom, 1528311a2b8SWill Andrews CTLFLAG_RWTUN, &vm_panic_on_oom, 0, 153c4a25e07SMark Johnston "Panic on the given number of out-of-memory errors instead of " 154c4a25e07SMark Johnston "killing the largest process"); 1558311a2b8SWill Andrews 156c4a25e07SMark Johnston static int vm_pageout_update_period; 157d9e23210SJeff Roberson SYSCTL_INT(_vm, OID_AUTO, pageout_update_period, 158e0b2fc3aSMark Johnston CTLFLAG_RWTUN, &vm_pageout_update_period, 0, 159d9e23210SJeff Roberson "Maximum active LRU update period"); 16053636869SAndrey Zonov 16174f5530dSConrad Meyer static int pageout_cpus_per_thread = 16; 16274f5530dSConrad Meyer SYSCTL_INT(_vm, OID_AUTO, pageout_cpus_per_thread, CTLFLAG_RDTUN, 16374f5530dSConrad Meyer &pageout_cpus_per_thread, 0, 16474f5530dSConrad Meyer "Number of CPUs per pagedaemon worker thread"); 1650292c54bSConrad Meyer 166c4a25e07SMark Johnston static int lowmem_period = 10; 167e0b2fc3aSMark Johnston SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RWTUN, &lowmem_period, 0, 168c9612b2dSJeff Roberson "Low memory callback period"); 169c9612b2dSJeff Roberson 170c4a25e07SMark Johnston static int disable_swap_pageouts; 171ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 172c4a25e07SMark Johnston CTLFLAG_RWTUN, &disable_swap_pageouts, 0, 173c4a25e07SMark Johnston "Disallow swapout of dirty pages"); 17412ac6a1dSJohn Dyson 17523b59018SMatthew Dillon static int pageout_lock_miss; 17623b59018SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 177c4a25e07SMark Johnston CTLFLAG_RD, &pageout_lock_miss, 0, 178c4a25e07SMark Johnston "vget() lock misses during pageout"); 17923b59018SMatthew Dillon 180c4a25e07SMark Johnston static int vm_pageout_oom_seq = 12; 18176386c7eSKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq, 182e0b2fc3aSMark Johnston CTLFLAG_RWTUN, &vm_pageout_oom_seq, 0, 18376386c7eSKonstantin Belousov "back-to-back calls to oom detector to start OOM"); 18476386c7eSKonstantin Belousov 185ebcddc72SAlan Cox static int act_scan_laundry_weight = 3; 186e0b2fc3aSMark Johnston SYSCTL_INT(_vm, OID_AUTO, act_scan_laundry_weight, CTLFLAG_RWTUN, 187ebcddc72SAlan Cox &act_scan_laundry_weight, 0, 188ebcddc72SAlan Cox "weight given to clean vs. dirty pages in active queue scans"); 189ebcddc72SAlan Cox 190ebcddc72SAlan Cox static u_int vm_background_launder_rate = 4096; 191e0b2fc3aSMark Johnston SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RWTUN, 192ebcddc72SAlan Cox &vm_background_launder_rate, 0, 193ebcddc72SAlan Cox "background laundering rate, in kilobytes per second"); 194ebcddc72SAlan Cox 195ebcddc72SAlan Cox static u_int vm_background_launder_max = 20 * 1024; 196e0b2fc3aSMark Johnston SYSCTL_UINT(_vm, OID_AUTO, background_launder_max, CTLFLAG_RWTUN, 197c4a25e07SMark Johnston &vm_background_launder_max, 0, 198c4a25e07SMark Johnston "background laundering cap, in kilobytes"); 199df8bae1dSRodney W. Grimes 20054a3a114SMark Johnston u_long vm_page_max_user_wired; 20154a3a114SMark Johnston SYSCTL_ULONG(_vm, OID_AUTO, max_user_wired, CTLFLAG_RW, 20254a3a114SMark Johnston &vm_page_max_user_wired, 0, 20354a3a114SMark Johnston "system-wide limit to user-wired page count"); 204df8bae1dSRodney W. Grimes 205ebcddc72SAlan Cox static u_int isqrt(u_int num); 206ebcddc72SAlan Cox static int vm_pageout_launder(struct vm_domain *vmd, int launder, 207ebcddc72SAlan Cox bool in_shortfall); 208ebcddc72SAlan Cox static void vm_pageout_laundry_worker(void *arg); 209cd41fc12SDavid Greenman 2105cd29d0fSMark Johnston struct scan_state { 2115cd29d0fSMark Johnston struct vm_batchqueue bq; 2128d220203SAlan Cox struct vm_pagequeue *pq; 2135cd29d0fSMark Johnston vm_page_t marker; 2145cd29d0fSMark Johnston int maxscan; 2155cd29d0fSMark Johnston int scanned; 2165cd29d0fSMark Johnston }; 2178dbca793STor Egge 2185cd29d0fSMark Johnston static void 2195cd29d0fSMark Johnston vm_pageout_init_scan(struct scan_state *ss, struct vm_pagequeue *pq, 2205cd29d0fSMark Johnston vm_page_t marker, vm_page_t after, int maxscan) 2215cd29d0fSMark Johnston { 2228dbca793STor Egge 2235cd29d0fSMark Johnston vm_pagequeue_assert_locked(pq); 2245cff1f4dSMark Johnston KASSERT((marker->a.flags & PGA_ENQUEUED) == 0, 2255cd29d0fSMark Johnston ("marker %p already enqueued", marker)); 2265cd29d0fSMark Johnston 2275cd29d0fSMark Johnston if (after == NULL) 2285cd29d0fSMark Johnston TAILQ_INSERT_HEAD(&pq->pq_pl, marker, plinks.q); 2295cd29d0fSMark Johnston else 2305cd29d0fSMark Johnston TAILQ_INSERT_AFTER(&pq->pq_pl, after, marker, plinks.q); 2315cd29d0fSMark Johnston vm_page_aflag_set(marker, PGA_ENQUEUED); 2325cd29d0fSMark Johnston 2335cd29d0fSMark Johnston vm_batchqueue_init(&ss->bq); 2345cd29d0fSMark Johnston ss->pq = pq; 2355cd29d0fSMark Johnston ss->marker = marker; 2365cd29d0fSMark Johnston ss->maxscan = maxscan; 2375cd29d0fSMark Johnston ss->scanned = 0; 2388d220203SAlan Cox vm_pagequeue_unlock(pq); 2395cd29d0fSMark Johnston } 2408dbca793STor Egge 2415cd29d0fSMark Johnston static void 2425cd29d0fSMark Johnston vm_pageout_end_scan(struct scan_state *ss) 2435cd29d0fSMark Johnston { 2445cd29d0fSMark Johnston struct vm_pagequeue *pq; 2455cd29d0fSMark Johnston 2465cd29d0fSMark Johnston pq = ss->pq; 2475cd29d0fSMark Johnston vm_pagequeue_assert_locked(pq); 2485cff1f4dSMark Johnston KASSERT((ss->marker->a.flags & PGA_ENQUEUED) != 0, 2495cd29d0fSMark Johnston ("marker %p not enqueued", ss->marker)); 2505cd29d0fSMark Johnston 2515cd29d0fSMark Johnston TAILQ_REMOVE(&pq->pq_pl, ss->marker, plinks.q); 2525cd29d0fSMark Johnston vm_page_aflag_clear(ss->marker, PGA_ENQUEUED); 253899fe184SMark Johnston pq->pq_pdpages += ss->scanned; 2548dbca793STor Egge } 2558dbca793STor Egge 2568dbca793STor Egge /* 2575cd29d0fSMark Johnston * Add a small number of queued pages to a batch queue for later processing 2585cd29d0fSMark Johnston * without the corresponding queue lock held. The caller must have enqueued a 2595cd29d0fSMark Johnston * marker page at the desired start point for the scan. Pages will be 2605cd29d0fSMark Johnston * physically dequeued if the caller so requests. Otherwise, the returned 2615cd29d0fSMark Johnston * batch may contain marker pages, and it is up to the caller to handle them. 2625cd29d0fSMark Johnston * 263efec381dSMark Johnston * When processing the batch queue, vm_pageout_defer() must be used to 264efec381dSMark Johnston * determine whether the page has been logically dequeued since the batch was 265efec381dSMark Johnston * collected. 2665cd29d0fSMark Johnston */ 2675cd29d0fSMark Johnston static __always_inline void 2685cd29d0fSMark Johnston vm_pageout_collect_batch(struct scan_state *ss, const bool dequeue) 2695cd29d0fSMark Johnston { 2708d220203SAlan Cox struct vm_pagequeue *pq; 271d70f0ab3SMark Johnston vm_page_t m, marker, n; 2728c616246SKonstantin Belousov 2735cd29d0fSMark Johnston marker = ss->marker; 2745cd29d0fSMark Johnston pq = ss->pq; 2758c616246SKonstantin Belousov 2765cff1f4dSMark Johnston KASSERT((marker->a.flags & PGA_ENQUEUED) != 0, 2775cd29d0fSMark Johnston ("marker %p not enqueued", ss->marker)); 2788c616246SKonstantin Belousov 2798d220203SAlan Cox vm_pagequeue_lock(pq); 2805cd29d0fSMark Johnston for (m = TAILQ_NEXT(marker, plinks.q); m != NULL && 2815cd29d0fSMark Johnston ss->scanned < ss->maxscan && ss->bq.bq_cnt < VM_BATCHQUEUE_SIZE; 282d70f0ab3SMark Johnston m = n, ss->scanned++) { 283d70f0ab3SMark Johnston n = TAILQ_NEXT(m, plinks.q); 2845cd29d0fSMark Johnston if ((m->flags & PG_MARKER) == 0) { 2855cff1f4dSMark Johnston KASSERT((m->a.flags & PGA_ENQUEUED) != 0, 2865cd29d0fSMark Johnston ("page %p not enqueued", m)); 2875cd29d0fSMark Johnston KASSERT((m->flags & PG_FICTITIOUS) == 0, 2885cd29d0fSMark Johnston ("Fictitious page %p cannot be in page queue", m)); 2895cd29d0fSMark Johnston KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2905cd29d0fSMark Johnston ("Unmanaged page %p cannot be in page queue", m)); 2915cd29d0fSMark Johnston } else if (dequeue) 2925cd29d0fSMark Johnston continue; 2938c616246SKonstantin Belousov 2945cd29d0fSMark Johnston (void)vm_batchqueue_insert(&ss->bq, m); 2955cd29d0fSMark Johnston if (dequeue) { 2965cd29d0fSMark Johnston TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 2975cd29d0fSMark Johnston vm_page_aflag_clear(m, PGA_ENQUEUED); 2985cd29d0fSMark Johnston } 2995cd29d0fSMark Johnston } 3005cd29d0fSMark Johnston TAILQ_REMOVE(&pq->pq_pl, marker, plinks.q); 3015cd29d0fSMark Johnston if (__predict_true(m != NULL)) 3025cd29d0fSMark Johnston TAILQ_INSERT_BEFORE(m, marker, plinks.q); 3035cd29d0fSMark Johnston else 3045cd29d0fSMark Johnston TAILQ_INSERT_TAIL(&pq->pq_pl, marker, plinks.q); 3055cd29d0fSMark Johnston if (dequeue) 3065cd29d0fSMark Johnston vm_pagequeue_cnt_add(pq, -ss->bq.bq_cnt); 3075cd29d0fSMark Johnston vm_pagequeue_unlock(pq); 3085cd29d0fSMark Johnston } 3095cd29d0fSMark Johnston 310fee2a2faSMark Johnston /* 311fee2a2faSMark Johnston * Return the next page to be scanned, or NULL if the scan is complete. 312fee2a2faSMark Johnston */ 3135cd29d0fSMark Johnston static __always_inline vm_page_t 3145cd29d0fSMark Johnston vm_pageout_next(struct scan_state *ss, const bool dequeue) 3155cd29d0fSMark Johnston { 3165cd29d0fSMark Johnston 3175cd29d0fSMark Johnston if (ss->bq.bq_cnt == 0) 3185cd29d0fSMark Johnston vm_pageout_collect_batch(ss, dequeue); 3195cd29d0fSMark Johnston return (vm_batchqueue_pop(&ss->bq)); 3208c616246SKonstantin Belousov } 3218c616246SKonstantin Belousov 3228c616246SKonstantin Belousov /* 323b7f30bffSMark Johnston * Determine whether processing of a page should be deferred and ensure that any 324b7f30bffSMark Johnston * outstanding queue operations are processed. 325b7f30bffSMark Johnston */ 326b7f30bffSMark Johnston static __always_inline bool 327b7f30bffSMark Johnston vm_pageout_defer(vm_page_t m, const uint8_t queue, const bool enqueued) 328b7f30bffSMark Johnston { 329b7f30bffSMark Johnston vm_page_astate_t as; 330b7f30bffSMark Johnston 331b7f30bffSMark Johnston as = vm_page_astate_load(m); 332b7f30bffSMark Johnston if (__predict_false(as.queue != queue || 333b7f30bffSMark Johnston ((as.flags & PGA_ENQUEUED) != 0) != enqueued)) 334b7f30bffSMark Johnston return (true); 335b7f30bffSMark Johnston if ((as.flags & PGA_QUEUE_OP_MASK) != 0) { 336b7f30bffSMark Johnston vm_page_pqbatch_submit(m, queue); 337b7f30bffSMark Johnston return (true); 338b7f30bffSMark Johnston } 339b7f30bffSMark Johnston return (false); 340b7f30bffSMark Johnston } 341b7f30bffSMark Johnston 342b7f30bffSMark Johnston /* 343acb4cb33SDoug Moore * We can cluster only if the page is not clean, busy, or held, and the page is 344acb4cb33SDoug Moore * in the laundry queue. 345acb4cb33SDoug Moore */ 346acb4cb33SDoug Moore static bool 347acb4cb33SDoug Moore vm_pageout_flushable(vm_page_t m) 348acb4cb33SDoug Moore { 349acb4cb33SDoug Moore if (vm_page_tryxbusy(m) == 0) 350acb4cb33SDoug Moore return (false); 351acb4cb33SDoug Moore if (!vm_page_wired(m)) { 352acb4cb33SDoug Moore vm_page_test_dirty(m); 353acb4cb33SDoug Moore if (m->dirty != 0 && vm_page_in_laundry(m) && 354acb4cb33SDoug Moore vm_page_try_remove_write(m)) 355acb4cb33SDoug Moore return (true); 356acb4cb33SDoug Moore } 357acb4cb33SDoug Moore vm_page_xunbusy(m); 358acb4cb33SDoug Moore return (false); 359acb4cb33SDoug Moore } 360acb4cb33SDoug Moore 361acb4cb33SDoug Moore /* 362248fe642SAlan Cox * Scan for pages at adjacent offsets within the given page's object that are 363248fe642SAlan Cox * eligible for laundering, form a cluster of these pages and the given page, 364248fe642SAlan Cox * and launder that cluster. 36526f9a767SRodney W. Grimes */ 3663af76890SPoul-Henning Kamp static int 36734d8b7eaSJeff Roberson vm_pageout_cluster(vm_page_t m) 36824a1cce3SDavid Greenman { 369acb4cb33SDoug Moore vm_page_t mc[2 * vm_pageout_page_count - 1]; 370acb4cb33SDoug Moore int alignment, num_ends, page_base, pageout_count; 37126f9a767SRodney W. Grimes 3728bb6b413SKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(m->object); 3730cddd8f0SMatthew Dillon 37463e97555SJeff Roberson vm_page_assert_xbusied(m); 3750d94caffSDavid Greenman 376acb4cb33SDoug Moore alignment = m->pindex % vm_pageout_page_count; 377acb4cb33SDoug Moore num_ends = 0; 3786d86bdf1SDoug Moore page_base = nitems(mc) / 2; 379acb4cb33SDoug Moore pageout_count = 1; 380acb4cb33SDoug Moore mc[page_base] = m; 38190ecac61SMatthew Dillon 38224a1cce3SDavid Greenman /* 38390ecac61SMatthew Dillon * During heavy mmap/modification loads the pageout 38490ecac61SMatthew Dillon * daemon can really fragment the underlying file 385248fe642SAlan Cox * due to flushing pages out of order and not trying to 386248fe642SAlan Cox * align the clusters (which leaves sporadic out-of-order 38790ecac61SMatthew Dillon * holes). To solve this problem we do the reverse scan 38890ecac61SMatthew Dillon * first and attempt to align our cluster, then do a 38990ecac61SMatthew Dillon * forward scan if room remains. 39024a1cce3SDavid Greenman */ 39190ecac61SMatthew Dillon more: 392acb4cb33SDoug Moore m = mc[page_base]; 393acb4cb33SDoug Moore while (pageout_count < vm_pageout_page_count) { 39424a1cce3SDavid Greenman /* 395acb4cb33SDoug Moore * If we are at an alignment boundary, and haven't reached the 396acb4cb33SDoug Moore * last flushable page forward, stop here, and switch 397acb4cb33SDoug Moore * directions. 39824a1cce3SDavid Greenman */ 399acb4cb33SDoug Moore if (alignment == pageout_count - 1 && num_ends == 0) 40090ecac61SMatthew Dillon break; 40190ecac61SMatthew Dillon 402acb4cb33SDoug Moore m = vm_page_prev(m); 403acb4cb33SDoug Moore if (m == NULL || !vm_pageout_flushable(m)) { 404acb4cb33SDoug Moore num_ends++; 405acb4cb33SDoug Moore break; 406acb4cb33SDoug Moore } 407acb4cb33SDoug Moore mc[--page_base] = m; 408acb4cb33SDoug Moore ++pageout_count; 409acb4cb33SDoug Moore } 410acb4cb33SDoug Moore m = mc[page_base + pageout_count - 1]; 411acb4cb33SDoug Moore while (num_ends != 2 && pageout_count < vm_pageout_page_count) { 412acb4cb33SDoug Moore m = vm_page_next(m); 413acb4cb33SDoug Moore if (m == NULL || !vm_pageout_flushable(m)) { 414acb4cb33SDoug Moore if (num_ends++ == 0) 415acb4cb33SDoug Moore /* Resume the reverse scan. */ 41690ecac61SMatthew Dillon goto more; 417acb4cb33SDoug Moore break; 418acb4cb33SDoug Moore } 419acb4cb33SDoug Moore mc[page_base + pageout_count] = m; 420acb4cb33SDoug Moore ++pageout_count; 421acb4cb33SDoug Moore } 422f6b04d2bSDavid Greenman 42399e6e193SMark Johnston return (vm_pageout_flush(&mc[page_base], pageout_count, 42499e6e193SMark Johnston VM_PAGER_PUT_NOREUSE, 0, NULL, NULL)); 425aef922f5SJohn Dyson } 426aef922f5SJohn Dyson 4271c7c3c6aSMatthew Dillon /* 4281c7c3c6aSMatthew Dillon * vm_pageout_flush() - launder the given pages 4291c7c3c6aSMatthew Dillon * 4301c7c3c6aSMatthew Dillon * The given pages are laundered. Note that we setup for the start of 4311c7c3c6aSMatthew Dillon * I/O ( i.e. busy the page ), mark it read-only, and bump the object 4321c7c3c6aSMatthew Dillon * reference count all in here rather then in the parent. If we want 4331c7c3c6aSMatthew Dillon * the parent to do more sophisticated things we may have to change 4341c7c3c6aSMatthew Dillon * the ordering. 4351e8a675cSKonstantin Belousov * 4361e8a675cSKonstantin Belousov * Returned runlen is the count of pages between mreq and first 4371e8a675cSKonstantin Belousov * page after mreq with status VM_PAGER_AGAIN. 438126d6082SKonstantin Belousov * *eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL 439126d6082SKonstantin Belousov * for any page in runlen set. 4401c7c3c6aSMatthew Dillon */ 441aef922f5SJohn Dyson int 442126d6082SKonstantin Belousov vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen, 443126d6082SKonstantin Belousov boolean_t *eio) 444aef922f5SJohn Dyson { 4452e3b314dSAlan Cox vm_object_t object = mc[0]->object; 446aef922f5SJohn Dyson int pageout_status[count]; 44795461b45SJohn Dyson int numpagedout = 0; 4481e8a675cSKonstantin Belousov int i, runlen; 449aef922f5SJohn Dyson 45089f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 4517bec141bSKip Macy 4521c7c3c6aSMatthew Dillon /* 45363e97555SJeff Roberson * Initiate I/O. Mark the pages shared busy and verify that they're 45463e97555SJeff Roberson * valid and read-only. 4551c7c3c6aSMatthew Dillon * 4561c7c3c6aSMatthew Dillon * We do not have to fixup the clean/dirty bits here... we can 4571c7c3c6aSMatthew Dillon * allow the pager to do it after the I/O completes. 45802fa91d3SMatthew Dillon * 45902fa91d3SMatthew Dillon * NOTE! mc[i]->dirty may be partial or fragmented due to an 46002fa91d3SMatthew Dillon * edge case with file fragments. 4611c7c3c6aSMatthew Dillon */ 4628f9110f6SJohn Dyson for (i = 0; i < count; i++) { 4630012f373SJeff Roberson KASSERT(vm_page_all_valid(mc[i]), 4647a935082SAlan Cox ("vm_pageout_flush: partially invalid page %p index %d/%d", 4657a935082SAlan Cox mc[i], i, count)); 4665cff1f4dSMark Johnston KASSERT((mc[i]->a.flags & PGA_WRITEABLE) == 0, 467aed9aaaaSMark Johnston ("vm_pageout_flush: writeable page %p", mc[i])); 46863e97555SJeff Roberson vm_page_busy_downgrade(mc[i]); 4692965a453SKip Macy } 470d474eaaaSDoug Rabson vm_object_pip_add(object, count); 471aef922f5SJohn Dyson 472d076fbeaSAlan Cox vm_pager_put_pages(object, mc, count, flags, pageout_status); 47326f9a767SRodney W. Grimes 4741e8a675cSKonstantin Belousov runlen = count - mreq; 475126d6082SKonstantin Belousov if (eio != NULL) 476126d6082SKonstantin Belousov *eio = FALSE; 477aef922f5SJohn Dyson for (i = 0; i < count; i++) { 478aef922f5SJohn Dyson vm_page_t mt = mc[i]; 47924a1cce3SDavid Greenman 4804cd45723SAlan Cox KASSERT(pageout_status[i] == VM_PAGER_PEND || 4816031c68dSAlan Cox !pmap_page_is_write_mapped(mt), 4829ea8d1a6SAlan Cox ("vm_pageout_flush: page %p is not write protected", mt)); 48326f9a767SRodney W. Grimes switch (pageout_status[i]) { 48426f9a767SRodney W. Grimes case VM_PAGER_OK: 4859f5632e6SMark Johnston /* 4869f5632e6SMark Johnston * The page may have moved since laundering started, in 4879f5632e6SMark Johnston * which case it should be left alone. 4889f5632e6SMark Johnston */ 489ebcddc72SAlan Cox if (vm_page_in_laundry(mt)) 490ebcddc72SAlan Cox vm_page_deactivate_noreuse(mt); 491ebcddc72SAlan Cox /* FALLTHROUGH */ 49226f9a767SRodney W. Grimes case VM_PAGER_PEND: 49395461b45SJohn Dyson numpagedout++; 49426f9a767SRodney W. Grimes break; 49526f9a767SRodney W. Grimes case VM_PAGER_BAD: 49626f9a767SRodney W. Grimes /* 497ebcddc72SAlan Cox * The page is outside the object's range. We pretend 498ebcddc72SAlan Cox * that the page out worked and clean the page, so the 499ebcddc72SAlan Cox * changes will be lost if the page is reclaimed by 500ebcddc72SAlan Cox * the page daemon. 50126f9a767SRodney W. Grimes */ 50290ecac61SMatthew Dillon vm_page_undirty(mt); 503ebcddc72SAlan Cox if (vm_page_in_laundry(mt)) 504ebcddc72SAlan Cox vm_page_deactivate_noreuse(mt); 50526f9a767SRodney W. Grimes break; 50626f9a767SRodney W. Grimes case VM_PAGER_ERROR: 50726f9a767SRodney W. Grimes case VM_PAGER_FAIL: 50826f9a767SRodney W. Grimes /* 509b1fd102eSMark Johnston * If the page couldn't be paged out to swap because the 510b1fd102eSMark Johnston * pager wasn't able to find space, place the page in 511b1fd102eSMark Johnston * the PQ_UNSWAPPABLE holding queue. This is an 512b1fd102eSMark Johnston * optimization that prevents the page daemon from 513b1fd102eSMark Johnston * wasting CPU cycles on pages that cannot be reclaimed 514fa7a635fSGordon Bergling * because no swap device is configured. 515b1fd102eSMark Johnston * 516b1fd102eSMark Johnston * Otherwise, reactivate the page so that it doesn't 517b1fd102eSMark Johnston * clog the laundry and inactive queues. (We will try 518b1fd102eSMark Johnston * paging it out again later.) 51926f9a767SRodney W. Grimes */ 5204b8365d7SKonstantin Belousov if ((object->flags & OBJ_SWAP) != 0 && 521b1fd102eSMark Johnston pageout_status[i] == VM_PAGER_FAIL) { 522b1fd102eSMark Johnston vm_page_unswappable(mt); 523b1fd102eSMark Johnston numpagedout++; 524b1fd102eSMark Johnston } else 52524a1cce3SDavid Greenman vm_page_activate(mt); 526126d6082SKonstantin Belousov if (eio != NULL && i >= mreq && i - mreq < runlen) 527126d6082SKonstantin Belousov *eio = TRUE; 52826f9a767SRodney W. Grimes break; 52926f9a767SRodney W. Grimes case VM_PAGER_AGAIN: 5301e8a675cSKonstantin Belousov if (i >= mreq && i - mreq < runlen) 5311e8a675cSKonstantin Belousov runlen = i - mreq; 53226f9a767SRodney W. Grimes break; 53326f9a767SRodney W. Grimes } 53426f9a767SRodney W. Grimes 53526f9a767SRodney W. Grimes /* 5360d94caffSDavid Greenman * If the operation is still going, leave the page busy to 5370d94caffSDavid Greenman * block all other accesses. Also, leave the paging in 5380d94caffSDavid Greenman * progress indicator set so that we don't attempt an object 5390d94caffSDavid Greenman * collapse. 54026f9a767SRodney W. Grimes */ 54126f9a767SRodney W. Grimes if (pageout_status[i] != VM_PAGER_PEND) { 542f919ebdeSDavid Greenman vm_object_pip_wakeup(object); 543c7aebda8SAttilio Rao vm_page_sunbusy(mt); 5443c4a2440SAlan Cox } 5453c4a2440SAlan Cox } 5461e8a675cSKonstantin Belousov if (prunlen != NULL) 5471e8a675cSKonstantin Belousov *prunlen = runlen; 5483c4a2440SAlan Cox return (numpagedout); 54926f9a767SRodney W. Grimes } 55026f9a767SRodney W. Grimes 551b1fd102eSMark Johnston static void 552b1fd102eSMark Johnston vm_pageout_swapon(void *arg __unused, struct swdevt *sp __unused) 553b1fd102eSMark Johnston { 554b1fd102eSMark Johnston 555b1fd102eSMark Johnston atomic_store_rel_int(&swapdev_enabled, 1); 556b1fd102eSMark Johnston } 557b1fd102eSMark Johnston 558b1fd102eSMark Johnston static void 559b1fd102eSMark Johnston vm_pageout_swapoff(void *arg __unused, struct swdevt *sp __unused) 560b1fd102eSMark Johnston { 561b1fd102eSMark Johnston 562b1fd102eSMark Johnston if (swap_pager_nswapdev() == 1) 563b1fd102eSMark Johnston atomic_store_rel_int(&swapdev_enabled, 0); 564b1fd102eSMark Johnston } 565b1fd102eSMark Johnston 5661c7c3c6aSMatthew Dillon /* 56734d8b7eaSJeff Roberson * Attempt to acquire all of the necessary locks to launder a page and 56834d8b7eaSJeff Roberson * then call through the clustering layer to PUTPAGES. Wait a short 56934d8b7eaSJeff Roberson * time for a vnode lock. 57034d8b7eaSJeff Roberson * 57134d8b7eaSJeff Roberson * Requires the page and object lock on entry, releases both before return. 57234d8b7eaSJeff Roberson * Returns 0 on success and an errno otherwise. 57334d8b7eaSJeff Roberson */ 57434d8b7eaSJeff Roberson static int 575ebcddc72SAlan Cox vm_pageout_clean(vm_page_t m, int *numpagedout) 57634d8b7eaSJeff Roberson { 57734d8b7eaSJeff Roberson struct vnode *vp; 57834d8b7eaSJeff Roberson struct mount *mp; 57934d8b7eaSJeff Roberson vm_object_t object; 58034d8b7eaSJeff Roberson vm_pindex_t pindex; 5810ef5eee9SKonstantin Belousov int error; 58234d8b7eaSJeff Roberson 58334d8b7eaSJeff Roberson object = m->object; 58434d8b7eaSJeff Roberson VM_OBJECT_ASSERT_WLOCKED(object); 58534d8b7eaSJeff Roberson error = 0; 58634d8b7eaSJeff Roberson vp = NULL; 58734d8b7eaSJeff Roberson mp = NULL; 58834d8b7eaSJeff Roberson 58934d8b7eaSJeff Roberson /* 59034d8b7eaSJeff Roberson * The object is already known NOT to be dead. It 59134d8b7eaSJeff Roberson * is possible for the vget() to block the whole 59234d8b7eaSJeff Roberson * pageout daemon, but the new low-memory handling 59334d8b7eaSJeff Roberson * code should prevent it. 59434d8b7eaSJeff Roberson * 59534d8b7eaSJeff Roberson * We can't wait forever for the vnode lock, we might 59634d8b7eaSJeff Roberson * deadlock due to a vn_read() getting stuck in 59734d8b7eaSJeff Roberson * vm_wait while holding this vnode. We skip the 59834d8b7eaSJeff Roberson * vnode if we can't get it in a reasonable amount 59934d8b7eaSJeff Roberson * of time. 60034d8b7eaSJeff Roberson */ 60134d8b7eaSJeff Roberson if (object->type == OBJT_VNODE) { 60263e97555SJeff Roberson vm_page_xunbusy(m); 60334d8b7eaSJeff Roberson vp = object->handle; 60434d8b7eaSJeff Roberson if (vp->v_type == VREG && 60534d8b7eaSJeff Roberson vn_start_write(vp, &mp, V_NOWAIT) != 0) { 60634d8b7eaSJeff Roberson mp = NULL; 60734d8b7eaSJeff Roberson error = EDEADLK; 60834d8b7eaSJeff Roberson goto unlock_all; 60934d8b7eaSJeff Roberson } 61034d8b7eaSJeff Roberson KASSERT(mp != NULL, 61134d8b7eaSJeff Roberson ("vp %p with NULL v_mount", vp)); 61234d8b7eaSJeff Roberson vm_object_reference_locked(object); 61334d8b7eaSJeff Roberson pindex = m->pindex; 61434d8b7eaSJeff Roberson VM_OBJECT_WUNLOCK(object); 6150ef5eee9SKonstantin Belousov if (vget(vp, vn_lktype_write(NULL, vp) | LK_TIMELOCK) != 0) { 61634d8b7eaSJeff Roberson vp = NULL; 61734d8b7eaSJeff Roberson error = EDEADLK; 61834d8b7eaSJeff Roberson goto unlock_mp; 61934d8b7eaSJeff Roberson } 62034d8b7eaSJeff Roberson VM_OBJECT_WLOCK(object); 62157cd81a3SMark Johnston 62257cd81a3SMark Johnston /* 62357cd81a3SMark Johnston * Ensure that the object and vnode were not disassociated 62457cd81a3SMark Johnston * while locks were dropped. 62557cd81a3SMark Johnston */ 62657cd81a3SMark Johnston if (vp->v_object != object) { 62757cd81a3SMark Johnston error = ENOENT; 62857cd81a3SMark Johnston goto unlock_all; 62957cd81a3SMark Johnston } 63057cd81a3SMark Johnston 63134d8b7eaSJeff Roberson /* 6329f5632e6SMark Johnston * While the object was unlocked, the page may have been: 63334d8b7eaSJeff Roberson * (1) moved to a different queue, 63434d8b7eaSJeff Roberson * (2) reallocated to a different object, 63534d8b7eaSJeff Roberson * (3) reallocated to a different offset, or 63634d8b7eaSJeff Roberson * (4) cleaned. 63734d8b7eaSJeff Roberson */ 638ebcddc72SAlan Cox if (!vm_page_in_laundry(m) || m->object != object || 63934d8b7eaSJeff Roberson m->pindex != pindex || m->dirty == 0) { 64034d8b7eaSJeff Roberson error = ENXIO; 64134d8b7eaSJeff Roberson goto unlock_all; 64234d8b7eaSJeff Roberson } 64334d8b7eaSJeff Roberson 64434d8b7eaSJeff Roberson /* 6459f5632e6SMark Johnston * The page may have been busied while the object lock was 6469f5632e6SMark Johnston * released. 64734d8b7eaSJeff Roberson */ 64863e97555SJeff Roberson if (vm_page_tryxbusy(m) == 0) { 64934d8b7eaSJeff Roberson error = EBUSY; 65034d8b7eaSJeff Roberson goto unlock_all; 65134d8b7eaSJeff Roberson } 65234d8b7eaSJeff Roberson } 65334d8b7eaSJeff Roberson 65434d8b7eaSJeff Roberson /* 655fee2a2faSMark Johnston * Remove all writeable mappings, failing if the page is wired. 656fee2a2faSMark Johnston */ 657fee2a2faSMark Johnston if (!vm_page_try_remove_write(m)) { 65863e97555SJeff Roberson vm_page_xunbusy(m); 659fee2a2faSMark Johnston error = EBUSY; 660fee2a2faSMark Johnston goto unlock_all; 661fee2a2faSMark Johnston } 662fee2a2faSMark Johnston 663fee2a2faSMark Johnston /* 66434d8b7eaSJeff Roberson * If a page is dirty, then it is either being washed 66534d8b7eaSJeff Roberson * (but not yet cleaned) or it is still in the 66634d8b7eaSJeff Roberson * laundry. If it is still in the laundry, then we 66734d8b7eaSJeff Roberson * start the cleaning operation. 66834d8b7eaSJeff Roberson */ 669ebcddc72SAlan Cox if ((*numpagedout = vm_pageout_cluster(m)) == 0) 67034d8b7eaSJeff Roberson error = EIO; 67134d8b7eaSJeff Roberson 67234d8b7eaSJeff Roberson unlock_all: 67334d8b7eaSJeff Roberson VM_OBJECT_WUNLOCK(object); 67434d8b7eaSJeff Roberson 67534d8b7eaSJeff Roberson unlock_mp: 67634d8b7eaSJeff Roberson if (mp != NULL) { 67734d8b7eaSJeff Roberson if (vp != NULL) 67834d8b7eaSJeff Roberson vput(vp); 67934d8b7eaSJeff Roberson vm_object_deallocate(object); 68034d8b7eaSJeff Roberson vn_finished_write(mp); 68134d8b7eaSJeff Roberson } 68234d8b7eaSJeff Roberson 68334d8b7eaSJeff Roberson return (error); 68434d8b7eaSJeff Roberson } 68534d8b7eaSJeff Roberson 68634d8b7eaSJeff Roberson /* 687ebcddc72SAlan Cox * Attempt to launder the specified number of pages. 688ebcddc72SAlan Cox * 689ebcddc72SAlan Cox * Returns the number of pages successfully laundered. 690ebcddc72SAlan Cox */ 691ebcddc72SAlan Cox static int 692ebcddc72SAlan Cox vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall) 693ebcddc72SAlan Cox { 6945cd29d0fSMark Johnston struct scan_state ss; 695ebcddc72SAlan Cox struct vm_pagequeue *pq; 696ebcddc72SAlan Cox vm_object_t object; 6975cd29d0fSMark Johnston vm_page_t m, marker; 698f3f38e25SMark Johnston vm_page_astate_t new, old; 699f3f38e25SMark Johnston int act_delta, error, numpagedout, queue, refs, starting_target; 700ebcddc72SAlan Cox int vnodes_skipped; 70160256604SMark Johnston bool pageout_ok; 702ebcddc72SAlan Cox 7035cd29d0fSMark Johnston object = NULL; 704ebcddc72SAlan Cox starting_target = launder; 705ebcddc72SAlan Cox vnodes_skipped = 0; 706ebcddc72SAlan Cox 707ebcddc72SAlan Cox /* 708b1fd102eSMark Johnston * Scan the laundry queues for pages eligible to be laundered. We stop 709ebcddc72SAlan Cox * once the target number of dirty pages have been laundered, or once 710ebcddc72SAlan Cox * we've reached the end of the queue. A single iteration of this loop 711ebcddc72SAlan Cox * may cause more than one page to be laundered because of clustering. 712ebcddc72SAlan Cox * 713b1fd102eSMark Johnston * As an optimization, we avoid laundering from PQ_UNSWAPPABLE when no 714b1fd102eSMark Johnston * swap devices are configured. 715ebcddc72SAlan Cox */ 716b1fd102eSMark Johnston if (atomic_load_acq_int(&swapdev_enabled)) 71764b38930SMark Johnston queue = PQ_UNSWAPPABLE; 718b1fd102eSMark Johnston else 71964b38930SMark Johnston queue = PQ_LAUNDRY; 720ebcddc72SAlan Cox 721b1fd102eSMark Johnston scan: 72264b38930SMark Johnston marker = &vmd->vmd_markers[queue]; 7235cd29d0fSMark Johnston pq = &vmd->vmd_pagequeues[queue]; 724ebcddc72SAlan Cox vm_pagequeue_lock(pq); 7255cd29d0fSMark Johnston vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt); 7265cd29d0fSMark Johnston while (launder > 0 && (m = vm_pageout_next(&ss, false)) != NULL) { 7275cd29d0fSMark Johnston if (__predict_false((m->flags & PG_MARKER) != 0)) 728ebcddc72SAlan Cox continue; 7295cd29d0fSMark Johnston 7305cd29d0fSMark Johnston /* 731b7f30bffSMark Johnston * Don't touch a page that was removed from the queue after the 732b7f30bffSMark Johnston * page queue lock was released. Otherwise, ensure that any 733b7f30bffSMark Johnston * pending queue operations, such as dequeues for wired pages, 734b7f30bffSMark Johnston * are handled. 7355cd29d0fSMark Johnston */ 736b7f30bffSMark Johnston if (vm_pageout_defer(m, queue, true)) 737ebcddc72SAlan Cox continue; 738e8bcf696SMark Johnston 7399f5632e6SMark Johnston /* 7409f5632e6SMark Johnston * Lock the page's object. 7419f5632e6SMark Johnston */ 7429f5632e6SMark Johnston if (object == NULL || object != m->object) { 74360256604SMark Johnston if (object != NULL) 7445cd29d0fSMark Johnston VM_OBJECT_WUNLOCK(object); 74523ed568cSMateusz Guzik object = atomic_load_ptr(&m->object); 7469f5632e6SMark Johnston if (__predict_false(object == NULL)) 7479f5632e6SMark Johnston /* The page is being freed by another thread. */ 7489f5632e6SMark Johnston continue; 7499f5632e6SMark Johnston 750e8bcf696SMark Johnston /* Depends on type-stability. */ 75141fd4b94SMark Johnston VM_OBJECT_WLOCK(object); 7529f5632e6SMark Johnston if (__predict_false(m->object != object)) { 7539f5632e6SMark Johnston VM_OBJECT_WUNLOCK(object); 7549f5632e6SMark Johnston object = NULL; 75541fd4b94SMark Johnston continue; 7569f5632e6SMark Johnston } 7579f5632e6SMark Johnston } 7585cd29d0fSMark Johnston 75963e97555SJeff Roberson if (vm_page_tryxbusy(m) == 0) 7605cd29d0fSMark Johnston continue; 761ebcddc72SAlan Cox 762ebcddc72SAlan Cox /* 763b7f30bffSMark Johnston * Check for wirings now that we hold the object lock and have 7649f5632e6SMark Johnston * exclusively busied the page. If the page is mapped, it may 7659f5632e6SMark Johnston * still be wired by pmap lookups. The call to 766fee2a2faSMark Johnston * vm_page_try_remove_all() below atomically checks for such 767fee2a2faSMark Johnston * wirings and removes mappings. If the page is unmapped, the 7689f5632e6SMark Johnston * wire count is guaranteed not to increase after this check. 769fee2a2faSMark Johnston */ 7709f5632e6SMark Johnston if (__predict_false(vm_page_wired(m))) 771f3f38e25SMark Johnston goto skip_page; 772fee2a2faSMark Johnston 773fee2a2faSMark Johnston /* 774ebcddc72SAlan Cox * Invalid pages can be easily freed. They cannot be 775ebcddc72SAlan Cox * mapped; vm_page_free() asserts this. 776ebcddc72SAlan Cox */ 7770012f373SJeff Roberson if (vm_page_none_valid(m)) 778ebcddc72SAlan Cox goto free_page; 779ebcddc72SAlan Cox 780b51927b7SKonstantin Belousov refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0; 781f3f38e25SMark Johnston 782f3f38e25SMark Johnston for (old = vm_page_astate_load(m);;) { 783ebcddc72SAlan Cox /* 784f3f38e25SMark Johnston * Check to see if the page has been removed from the 785f3f38e25SMark Johnston * queue since the first such check. Leave it alone if 786f3f38e25SMark Johnston * so, discarding any references collected by 787f3f38e25SMark Johnston * pmap_ts_referenced(). 788ebcddc72SAlan Cox */ 789f3f38e25SMark Johnston if (__predict_false(_vm_page_queue(old) == PQ_NONE)) 790f3f38e25SMark Johnston goto skip_page; 791f3f38e25SMark Johnston 792f3f38e25SMark Johnston new = old; 793f3f38e25SMark Johnston act_delta = refs; 794f3f38e25SMark Johnston if ((old.flags & PGA_REFERENCED) != 0) { 795f3f38e25SMark Johnston new.flags &= ~PGA_REFERENCED; 796d7aeb429SAlan Cox act_delta++; 797ebcddc72SAlan Cox } 798f3f38e25SMark Johnston if (act_delta == 0) { 799f3f38e25SMark Johnston ; 800b51927b7SKonstantin Belousov } else if (object->ref_count != 0) { 801ebcddc72SAlan Cox /* 802f3f38e25SMark Johnston * Increase the activation count if the page was 803f3f38e25SMark Johnston * referenced while in the laundry queue. This 804f3f38e25SMark Johnston * makes it less likely that the page will be 805f3f38e25SMark Johnston * returned prematurely to the laundry queue. 806e8bcf696SMark Johnston */ 807f3f38e25SMark Johnston new.act_count += ACT_ADVANCE + 808f3f38e25SMark Johnston act_delta; 809f3f38e25SMark Johnston if (new.act_count > ACT_MAX) 810f3f38e25SMark Johnston new.act_count = ACT_MAX; 811f3f38e25SMark Johnston 812f7607c30SMark Johnston new.flags &= ~PGA_QUEUE_OP_MASK; 813f3f38e25SMark Johnston new.flags |= PGA_REQUEUE; 814f3f38e25SMark Johnston new.queue = PQ_ACTIVE; 815f3f38e25SMark Johnston if (!vm_page_pqstate_commit(m, &old, new)) 816f3f38e25SMark Johnston continue; 817e8bcf696SMark Johnston 818e8bcf696SMark Johnston /* 819e8bcf696SMark Johnston * If this was a background laundering, count 820e8bcf696SMark Johnston * activated pages towards our target. The 821e8bcf696SMark Johnston * purpose of background laundering is to ensure 822e8bcf696SMark Johnston * that pages are eventually cycled through the 823e8bcf696SMark Johnston * laundry queue, and an activation is a valid 824e8bcf696SMark Johnston * way out. 825ebcddc72SAlan Cox */ 826ebcddc72SAlan Cox if (!in_shortfall) 827ebcddc72SAlan Cox launder--; 828f3f38e25SMark Johnston VM_CNT_INC(v_reactivated); 829f3f38e25SMark Johnston goto skip_page; 8305cd29d0fSMark Johnston } else if ((object->flags & OBJ_DEAD) == 0) { 831f3f38e25SMark Johnston new.flags |= PGA_REQUEUE; 832f3f38e25SMark Johnston if (!vm_page_pqstate_commit(m, &old, new)) 833e8bcf696SMark Johnston continue; 834f3f38e25SMark Johnston goto skip_page; 8355cd29d0fSMark Johnston } 836f3f38e25SMark Johnston break; 837ebcddc72SAlan Cox } 838ebcddc72SAlan Cox 839ebcddc72SAlan Cox /* 840ebcddc72SAlan Cox * If the page appears to be clean at the machine-independent 841ebcddc72SAlan Cox * layer, then remove all of its mappings from the pmap in 842ebcddc72SAlan Cox * anticipation of freeing it. If, however, any of the page's 843ebcddc72SAlan Cox * mappings allow write access, then the page may still be 844ebcddc72SAlan Cox * modified until the last of those mappings are removed. 845ebcddc72SAlan Cox */ 846ebcddc72SAlan Cox if (object->ref_count != 0) { 847ebcddc72SAlan Cox vm_page_test_dirty(m); 8489f5632e6SMark Johnston if (m->dirty == 0 && !vm_page_try_remove_all(m)) 849f3f38e25SMark Johnston goto skip_page; 850fee2a2faSMark Johnston } 851ebcddc72SAlan Cox 852ebcddc72SAlan Cox /* 853ebcddc72SAlan Cox * Clean pages are freed, and dirty pages are paged out unless 854ebcddc72SAlan Cox * they belong to a dead object. Requeueing dirty pages from 855ebcddc72SAlan Cox * dead objects is pointless, as they are being paged out and 856ebcddc72SAlan Cox * freed by the thread that destroyed the object. 857ebcddc72SAlan Cox */ 858ebcddc72SAlan Cox if (m->dirty == 0) { 859ebcddc72SAlan Cox free_page: 8609f5632e6SMark Johnston /* 8619f5632e6SMark Johnston * Now we are guaranteed that no other threads are 8629f5632e6SMark Johnston * manipulating the page, check for a last-second 8639f5632e6SMark Johnston * reference. 8649f5632e6SMark Johnston */ 8659f5632e6SMark Johnston if (vm_pageout_defer(m, queue, true)) 8669f5632e6SMark Johnston goto skip_page; 867ebcddc72SAlan Cox vm_page_free(m); 86883c9dea1SGleb Smirnoff VM_CNT_INC(v_dfree); 869ebcddc72SAlan Cox } else if ((object->flags & OBJ_DEAD) == 0) { 8700cb2610eSMark Johnston if ((object->flags & OBJ_SWAP) != 0) 8710cb2610eSMark Johnston pageout_ok = disable_swap_pageouts == 0; 872ebcddc72SAlan Cox else 873ebcddc72SAlan Cox pageout_ok = true; 874ebcddc72SAlan Cox if (!pageout_ok) { 875f3f38e25SMark Johnston vm_page_launder(m); 876f3f38e25SMark Johnston goto skip_page; 877ebcddc72SAlan Cox } 878ebcddc72SAlan Cox 879ebcddc72SAlan Cox /* 880ebcddc72SAlan Cox * Form a cluster with adjacent, dirty pages from the 881ebcddc72SAlan Cox * same object, and page out that entire cluster. 882ebcddc72SAlan Cox * 883ebcddc72SAlan Cox * The adjacent, dirty pages must also be in the 884ebcddc72SAlan Cox * laundry. However, their mappings are not checked 885ebcddc72SAlan Cox * for new references. Consequently, a recently 886ebcddc72SAlan Cox * referenced page may be paged out. However, that 887ebcddc72SAlan Cox * page will not be prematurely reclaimed. After page 888ebcddc72SAlan Cox * out, the page will be placed in the inactive queue, 889ebcddc72SAlan Cox * where any new references will be detected and the 890ebcddc72SAlan Cox * page reactivated. 891ebcddc72SAlan Cox */ 892ebcddc72SAlan Cox error = vm_pageout_clean(m, &numpagedout); 893ebcddc72SAlan Cox if (error == 0) { 894ebcddc72SAlan Cox launder -= numpagedout; 8955cd29d0fSMark Johnston ss.scanned += numpagedout; 896ebcddc72SAlan Cox } else if (error == EDEADLK) { 897ebcddc72SAlan Cox pageout_lock_miss++; 898ebcddc72SAlan Cox vnodes_skipped++; 899ebcddc72SAlan Cox } 90060256604SMark Johnston object = NULL; 901f3f38e25SMark Johnston } else { 902f3f38e25SMark Johnston skip_page: 90363e97555SJeff Roberson vm_page_xunbusy(m); 904e8bcf696SMark Johnston } 905f3f38e25SMark Johnston } 90646e39081SMark Johnston if (object != NULL) { 907ebcddc72SAlan Cox VM_OBJECT_WUNLOCK(object); 90846e39081SMark Johnston object = NULL; 90946e39081SMark Johnston } 910ebcddc72SAlan Cox vm_pagequeue_lock(pq); 9115cd29d0fSMark Johnston vm_pageout_end_scan(&ss); 912ebcddc72SAlan Cox vm_pagequeue_unlock(pq); 913ebcddc72SAlan Cox 91464b38930SMark Johnston if (launder > 0 && queue == PQ_UNSWAPPABLE) { 91564b38930SMark Johnston queue = PQ_LAUNDRY; 916b1fd102eSMark Johnston goto scan; 917b1fd102eSMark Johnston } 918b1fd102eSMark Johnston 919ebcddc72SAlan Cox /* 920ebcddc72SAlan Cox * Wakeup the sync daemon if we skipped a vnode in a writeable object 921ebcddc72SAlan Cox * and we didn't launder enough pages. 922ebcddc72SAlan Cox */ 923ebcddc72SAlan Cox if (vnodes_skipped > 0 && launder > 0) 924ebcddc72SAlan Cox (void)speedup_syncer(); 925ebcddc72SAlan Cox 926ebcddc72SAlan Cox return (starting_target - launder); 927ebcddc72SAlan Cox } 928ebcddc72SAlan Cox 929ebcddc72SAlan Cox /* 930ebcddc72SAlan Cox * Compute the integer square root. 931ebcddc72SAlan Cox */ 932ebcddc72SAlan Cox static u_int 933ebcddc72SAlan Cox isqrt(u_int num) 934ebcddc72SAlan Cox { 935ebcddc72SAlan Cox u_int bit, root, tmp; 936ebcddc72SAlan Cox 93764f8d257SDoug Moore bit = num != 0 ? (1u << ((fls(num) - 1) & ~1)) : 0; 938ebcddc72SAlan Cox root = 0; 939ebcddc72SAlan Cox while (bit != 0) { 940ebcddc72SAlan Cox tmp = root + bit; 941ebcddc72SAlan Cox root >>= 1; 942ebcddc72SAlan Cox if (num >= tmp) { 943ebcddc72SAlan Cox num -= tmp; 944ebcddc72SAlan Cox root += bit; 945ebcddc72SAlan Cox } 946ebcddc72SAlan Cox bit >>= 2; 947ebcddc72SAlan Cox } 948ebcddc72SAlan Cox return (root); 949ebcddc72SAlan Cox } 950ebcddc72SAlan Cox 951ebcddc72SAlan Cox /* 952ebcddc72SAlan Cox * Perform the work of the laundry thread: periodically wake up and determine 953ebcddc72SAlan Cox * whether any pages need to be laundered. If so, determine the number of pages 954ebcddc72SAlan Cox * that need to be laundered, and launder them. 955ebcddc72SAlan Cox */ 956ebcddc72SAlan Cox static void 957ebcddc72SAlan Cox vm_pageout_laundry_worker(void *arg) 958ebcddc72SAlan Cox { 959e2068d0bSJeff Roberson struct vm_domain *vmd; 960ebcddc72SAlan Cox struct vm_pagequeue *pq; 96160684862SMark Johnston uint64_t nclean, ndirty, nfreed; 962e2068d0bSJeff Roberson int domain, last_target, launder, shortfall, shortfall_cycle, target; 963ebcddc72SAlan Cox bool in_shortfall; 964ebcddc72SAlan Cox 965e2068d0bSJeff Roberson domain = (uintptr_t)arg; 966e2068d0bSJeff Roberson vmd = VM_DOMAIN(domain); 967e2068d0bSJeff Roberson pq = &vmd->vmd_pagequeues[PQ_LAUNDRY]; 968e2068d0bSJeff Roberson KASSERT(vmd->vmd_segs != 0, ("domain without segments")); 969ebcddc72SAlan Cox 970ebcddc72SAlan Cox shortfall = 0; 971ebcddc72SAlan Cox in_shortfall = false; 972ebcddc72SAlan Cox shortfall_cycle = 0; 9738002c3a4SMark Johnston last_target = target = 0; 97460684862SMark Johnston nfreed = 0; 975ebcddc72SAlan Cox 976ebcddc72SAlan Cox /* 977b1fd102eSMark Johnston * Calls to these handlers are serialized by the swap syscall lock. 978b1fd102eSMark Johnston */ 979e2068d0bSJeff Roberson (void)EVENTHANDLER_REGISTER(swapon, vm_pageout_swapon, vmd, 980b1fd102eSMark Johnston EVENTHANDLER_PRI_ANY); 981e2068d0bSJeff Roberson (void)EVENTHANDLER_REGISTER(swapoff, vm_pageout_swapoff, vmd, 982b1fd102eSMark Johnston EVENTHANDLER_PRI_ANY); 983b1fd102eSMark Johnston 984b1fd102eSMark Johnston /* 985ebcddc72SAlan Cox * The pageout laundry worker is never done, so loop forever. 986ebcddc72SAlan Cox */ 987ebcddc72SAlan Cox for (;;) { 988ebcddc72SAlan Cox KASSERT(target >= 0, ("negative target %d", target)); 989ebcddc72SAlan Cox KASSERT(shortfall_cycle >= 0, 990ebcddc72SAlan Cox ("negative cycle %d", shortfall_cycle)); 991ebcddc72SAlan Cox launder = 0; 992ebcddc72SAlan Cox 993ebcddc72SAlan Cox /* 994ebcddc72SAlan Cox * First determine whether we need to launder pages to meet a 995ebcddc72SAlan Cox * shortage of free pages. 996ebcddc72SAlan Cox */ 997ebcddc72SAlan Cox if (shortfall > 0) { 998ebcddc72SAlan Cox in_shortfall = true; 999ebcddc72SAlan Cox shortfall_cycle = VM_LAUNDER_RATE / VM_INACT_SCAN_RATE; 1000ebcddc72SAlan Cox target = shortfall; 1001ebcddc72SAlan Cox } else if (!in_shortfall) 1002ebcddc72SAlan Cox goto trybackground; 1003e2068d0bSJeff Roberson else if (shortfall_cycle == 0 || vm_laundry_target(vmd) <= 0) { 1004ebcddc72SAlan Cox /* 1005ebcddc72SAlan Cox * We recently entered shortfall and began laundering 1006ebcddc72SAlan Cox * pages. If we have completed that laundering run 1007ebcddc72SAlan Cox * (and we are no longer in shortfall) or we have met 1008ebcddc72SAlan Cox * our laundry target through other activity, then we 1009ebcddc72SAlan Cox * can stop laundering pages. 1010ebcddc72SAlan Cox */ 1011ebcddc72SAlan Cox in_shortfall = false; 1012ebcddc72SAlan Cox target = 0; 1013ebcddc72SAlan Cox goto trybackground; 1014ebcddc72SAlan Cox } 1015ebcddc72SAlan Cox launder = target / shortfall_cycle--; 1016ebcddc72SAlan Cox goto dolaundry; 1017ebcddc72SAlan Cox 1018ebcddc72SAlan Cox /* 1019ebcddc72SAlan Cox * There's no immediate need to launder any pages; see if we 1020ebcddc72SAlan Cox * meet the conditions to perform background laundering: 1021ebcddc72SAlan Cox * 1022ebcddc72SAlan Cox * 1. The ratio of dirty to clean inactive pages exceeds the 102360684862SMark Johnston * background laundering threshold, or 1024ebcddc72SAlan Cox * 2. we haven't yet reached the target of the current 1025ebcddc72SAlan Cox * background laundering run. 1026ebcddc72SAlan Cox * 1027ebcddc72SAlan Cox * The background laundering threshold is not a constant. 1028ebcddc72SAlan Cox * Instead, it is a slowly growing function of the number of 102960684862SMark Johnston * clean pages freed by the page daemon since the last 103060684862SMark Johnston * background laundering. Thus, as the ratio of dirty to 103160684862SMark Johnston * clean inactive pages grows, the amount of memory pressure 1032c098768eSMark Johnston * required to trigger laundering decreases. We ensure 1033c098768eSMark Johnston * that the threshold is non-zero after an inactive queue 1034c098768eSMark Johnston * scan, even if that scan failed to free a single clean page. 1035ebcddc72SAlan Cox */ 1036ebcddc72SAlan Cox trybackground: 1037e2068d0bSJeff Roberson nclean = vmd->vmd_free_count + 1038e2068d0bSJeff Roberson vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt; 1039e2068d0bSJeff Roberson ndirty = vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt; 1040c098768eSMark Johnston if (target == 0 && ndirty * isqrt(howmany(nfreed + 1, 1041c098768eSMark Johnston vmd->vmd_free_target - vmd->vmd_free_min)) >= nclean) { 1042e2068d0bSJeff Roberson target = vmd->vmd_background_launder_target; 1043ebcddc72SAlan Cox } 1044ebcddc72SAlan Cox 1045ebcddc72SAlan Cox /* 1046ebcddc72SAlan Cox * We have a non-zero background laundering target. If we've 1047ebcddc72SAlan Cox * laundered up to our maximum without observing a page daemon 1048cb35676eSMark Johnston * request, just stop. This is a safety belt that ensures we 1049ebcddc72SAlan Cox * don't launder an excessive amount if memory pressure is low 1050ebcddc72SAlan Cox * and the ratio of dirty to clean pages is large. Otherwise, 1051ebcddc72SAlan Cox * proceed at the background laundering rate. 1052ebcddc72SAlan Cox */ 1053ebcddc72SAlan Cox if (target > 0) { 105460684862SMark Johnston if (nfreed > 0) { 105560684862SMark Johnston nfreed = 0; 1056ebcddc72SAlan Cox last_target = target; 1057ebcddc72SAlan Cox } else if (last_target - target >= 1058ebcddc72SAlan Cox vm_background_launder_max * PAGE_SIZE / 1024) { 1059ebcddc72SAlan Cox target = 0; 1060ebcddc72SAlan Cox } 1061ebcddc72SAlan Cox launder = vm_background_launder_rate * PAGE_SIZE / 1024; 1062ebcddc72SAlan Cox launder /= VM_LAUNDER_RATE; 1063ebcddc72SAlan Cox if (launder > target) 1064ebcddc72SAlan Cox launder = target; 1065ebcddc72SAlan Cox } 1066ebcddc72SAlan Cox 1067ebcddc72SAlan Cox dolaundry: 1068ebcddc72SAlan Cox if (launder > 0) { 1069ebcddc72SAlan Cox /* 1070ebcddc72SAlan Cox * Because of I/O clustering, the number of laundered 1071ebcddc72SAlan Cox * pages could exceed "target" by the maximum size of 1072ebcddc72SAlan Cox * a cluster minus one. 1073ebcddc72SAlan Cox */ 1074e2068d0bSJeff Roberson target -= min(vm_pageout_launder(vmd, launder, 1075ebcddc72SAlan Cox in_shortfall), target); 1076ebcddc72SAlan Cox pause("laundp", hz / VM_LAUNDER_RATE); 1077ebcddc72SAlan Cox } 1078ebcddc72SAlan Cox 1079ebcddc72SAlan Cox /* 1080ebcddc72SAlan Cox * If we're not currently laundering pages and the page daemon 1081ebcddc72SAlan Cox * hasn't posted a new request, sleep until the page daemon 1082ebcddc72SAlan Cox * kicks us. 1083ebcddc72SAlan Cox */ 1084ebcddc72SAlan Cox vm_pagequeue_lock(pq); 1085e2068d0bSJeff Roberson if (target == 0 && vmd->vmd_laundry_request == VM_LAUNDRY_IDLE) 1086e2068d0bSJeff Roberson (void)mtx_sleep(&vmd->vmd_laundry_request, 1087ebcddc72SAlan Cox vm_pagequeue_lockptr(pq), PVM, "launds", 0); 1088ebcddc72SAlan Cox 1089ebcddc72SAlan Cox /* 1090ebcddc72SAlan Cox * If the pagedaemon has indicated that it's in shortfall, start 1091ebcddc72SAlan Cox * a shortfall laundering unless we're already in the middle of 1092ebcddc72SAlan Cox * one. This may preempt a background laundering. 1093ebcddc72SAlan Cox */ 1094e2068d0bSJeff Roberson if (vmd->vmd_laundry_request == VM_LAUNDRY_SHORTFALL && 1095ebcddc72SAlan Cox (!in_shortfall || shortfall_cycle == 0)) { 1096e2068d0bSJeff Roberson shortfall = vm_laundry_target(vmd) + 1097e2068d0bSJeff Roberson vmd->vmd_pageout_deficit; 1098ebcddc72SAlan Cox target = 0; 1099ebcddc72SAlan Cox } else 1100ebcddc72SAlan Cox shortfall = 0; 1101ebcddc72SAlan Cox 1102ebcddc72SAlan Cox if (target == 0) 1103e2068d0bSJeff Roberson vmd->vmd_laundry_request = VM_LAUNDRY_IDLE; 110460684862SMark Johnston nfreed += vmd->vmd_clean_pages_freed; 110560684862SMark Johnston vmd->vmd_clean_pages_freed = 0; 1106ebcddc72SAlan Cox vm_pagequeue_unlock(pq); 1107ebcddc72SAlan Cox } 1108ebcddc72SAlan Cox } 1109ebcddc72SAlan Cox 1110be37ee79SMark Johnston /* 1111be37ee79SMark Johnston * Compute the number of pages we want to try to move from the 1112be37ee79SMark Johnston * active queue to either the inactive or laundry queue. 1113be37ee79SMark Johnston * 11147bb4634eSMark Johnston * When scanning active pages during a shortage, we make clean pages 11157bb4634eSMark Johnston * count more heavily towards the page shortage than dirty pages. 11167bb4634eSMark Johnston * This is because dirty pages must be laundered before they can be 11177bb4634eSMark Johnston * reused and thus have less utility when attempting to quickly 11187bb4634eSMark Johnston * alleviate a free page shortage. However, this weighting also 11197bb4634eSMark Johnston * causes the scan to deactivate dirty pages more aggressively, 11207bb4634eSMark Johnston * improving the effectiveness of clustering. 1121be37ee79SMark Johnston */ 1122be37ee79SMark Johnston static int 11237bb4634eSMark Johnston vm_pageout_active_target(struct vm_domain *vmd) 1124be37ee79SMark Johnston { 1125be37ee79SMark Johnston int shortage; 1126be37ee79SMark Johnston 1127be37ee79SMark Johnston shortage = vmd->vmd_inactive_target + vm_paging_target(vmd) - 1128be37ee79SMark Johnston (vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt + 1129be37ee79SMark Johnston vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt / act_scan_laundry_weight); 1130be37ee79SMark Johnston shortage *= act_scan_laundry_weight; 1131be37ee79SMark Johnston return (shortage); 1132be37ee79SMark Johnston } 1133be37ee79SMark Johnston 1134be37ee79SMark Johnston /* 1135be37ee79SMark Johnston * Scan the active queue. If there is no shortage of inactive pages, scan a 1136be37ee79SMark Johnston * small portion of the queue in order to maintain quasi-LRU. 1137be37ee79SMark Johnston */ 1138be37ee79SMark Johnston static void 1139be37ee79SMark Johnston vm_pageout_scan_active(struct vm_domain *vmd, int page_shortage) 1140be37ee79SMark Johnston { 1141be37ee79SMark Johnston struct scan_state ss; 1142fee2a2faSMark Johnston vm_object_t object; 1143be37ee79SMark Johnston vm_page_t m, marker; 1144be37ee79SMark Johnston struct vm_pagequeue *pq; 1145f3f38e25SMark Johnston vm_page_astate_t old, new; 1146be37ee79SMark Johnston long min_scan; 1147f3f38e25SMark Johnston int act_delta, max_scan, ps_delta, refs, scan_tick; 1148f3f38e25SMark Johnston uint8_t nqueue; 1149be37ee79SMark Johnston 1150be37ee79SMark Johnston marker = &vmd->vmd_markers[PQ_ACTIVE]; 1151be37ee79SMark Johnston pq = &vmd->vmd_pagequeues[PQ_ACTIVE]; 1152be37ee79SMark Johnston vm_pagequeue_lock(pq); 1153be37ee79SMark Johnston 1154be37ee79SMark Johnston /* 1155be37ee79SMark Johnston * If we're just idle polling attempt to visit every 1156be37ee79SMark Johnston * active page within 'update_period' seconds. 1157be37ee79SMark Johnston */ 1158be37ee79SMark Johnston scan_tick = ticks; 1159be37ee79SMark Johnston if (vm_pageout_update_period != 0) { 1160be37ee79SMark Johnston min_scan = pq->pq_cnt; 1161be37ee79SMark Johnston min_scan *= scan_tick - vmd->vmd_last_active_scan; 1162be37ee79SMark Johnston min_scan /= hz * vm_pageout_update_period; 1163be37ee79SMark Johnston } else 1164be37ee79SMark Johnston min_scan = 0; 1165be37ee79SMark Johnston if (min_scan > 0 || (page_shortage > 0 && pq->pq_cnt > 0)) 1166be37ee79SMark Johnston vmd->vmd_last_active_scan = scan_tick; 1167be37ee79SMark Johnston 1168be37ee79SMark Johnston /* 1169be37ee79SMark Johnston * Scan the active queue for pages that can be deactivated. Update 1170be37ee79SMark Johnston * the per-page activity counter and use it to identify deactivation 1171be37ee79SMark Johnston * candidates. Held pages may be deactivated. 1172be37ee79SMark Johnston * 1173be37ee79SMark Johnston * To avoid requeuing each page that remains in the active queue, we 11747bb4634eSMark Johnston * implement the CLOCK algorithm. To keep the implementation of the 11757bb4634eSMark Johnston * enqueue operation consistent for all page queues, we use two hands, 11767bb4634eSMark Johnston * represented by marker pages. Scans begin at the first hand, which 11777bb4634eSMark Johnston * precedes the second hand in the queue. When the two hands meet, 11787bb4634eSMark Johnston * they are moved back to the head and tail of the queue, respectively, 11797bb4634eSMark Johnston * and scanning resumes. 1180be37ee79SMark Johnston */ 1181be37ee79SMark Johnston max_scan = page_shortage > 0 ? pq->pq_cnt : min_scan; 1182be37ee79SMark Johnston act_scan: 1183be37ee79SMark Johnston vm_pageout_init_scan(&ss, pq, marker, &vmd->vmd_clock[0], max_scan); 1184be37ee79SMark Johnston while ((m = vm_pageout_next(&ss, false)) != NULL) { 1185be37ee79SMark Johnston if (__predict_false(m == &vmd->vmd_clock[1])) { 1186be37ee79SMark Johnston vm_pagequeue_lock(pq); 1187be37ee79SMark Johnston TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q); 1188be37ee79SMark Johnston TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[1], plinks.q); 1189be37ee79SMark Johnston TAILQ_INSERT_HEAD(&pq->pq_pl, &vmd->vmd_clock[0], 1190be37ee79SMark Johnston plinks.q); 1191be37ee79SMark Johnston TAILQ_INSERT_TAIL(&pq->pq_pl, &vmd->vmd_clock[1], 1192be37ee79SMark Johnston plinks.q); 1193be37ee79SMark Johnston max_scan -= ss.scanned; 1194be37ee79SMark Johnston vm_pageout_end_scan(&ss); 1195be37ee79SMark Johnston goto act_scan; 1196be37ee79SMark Johnston } 1197be37ee79SMark Johnston if (__predict_false((m->flags & PG_MARKER) != 0)) 1198be37ee79SMark Johnston continue; 1199be37ee79SMark Johnston 1200e8bcf696SMark Johnston /* 1201b7f30bffSMark Johnston * Don't touch a page that was removed from the queue after the 1202b7f30bffSMark Johnston * page queue lock was released. Otherwise, ensure that any 1203b7f30bffSMark Johnston * pending queue operations, such as dequeues for wired pages, 1204b7f30bffSMark Johnston * are handled. 1205e8bcf696SMark Johnston */ 1206b7f30bffSMark Johnston if (vm_pageout_defer(m, PQ_ACTIVE, true)) 1207e8bcf696SMark Johnston continue; 1208e8bcf696SMark Johnston 1209e8bcf696SMark Johnston /* 1210e8bcf696SMark Johnston * A page's object pointer may be set to NULL before 1211e8bcf696SMark Johnston * the object lock is acquired. 1212e8bcf696SMark Johnston */ 121323ed568cSMateusz Guzik object = atomic_load_ptr(&m->object); 1214fee2a2faSMark Johnston if (__predict_false(object == NULL)) 1215fee2a2faSMark Johnston /* 1216fee2a2faSMark Johnston * The page has been removed from its object. 1217fee2a2faSMark Johnston */ 1218fee2a2faSMark Johnston continue; 1219fee2a2faSMark Johnston 1220f3f38e25SMark Johnston /* Deferred free of swap space. */ 1221f3f38e25SMark Johnston if ((m->a.flags & PGA_SWAP_FREE) != 0 && 1222f3f38e25SMark Johnston VM_OBJECT_TRYWLOCK(object)) { 1223f3f38e25SMark Johnston if (m->object == object) 1224f3f38e25SMark Johnston vm_pager_page_unswapped(m); 1225f3f38e25SMark Johnston VM_OBJECT_WUNLOCK(object); 1226f3f38e25SMark Johnston } 1227f3f38e25SMark Johnston 1228fee2a2faSMark Johnston /* 1229be37ee79SMark Johnston * Check to see "how much" the page has been used. 1230d7aeb429SAlan Cox * 1231d7aeb429SAlan Cox * Test PGA_REFERENCED after calling pmap_ts_referenced() so 1232d7aeb429SAlan Cox * that a reference from a concurrently destroyed mapping is 1233d7aeb429SAlan Cox * observed here and now. 1234b51927b7SKonstantin Belousov * 1235b51927b7SKonstantin Belousov * Perform an unsynchronized object ref count check. While 1236b51927b7SKonstantin Belousov * the page lock ensures that the page is not reallocated to 1237b51927b7SKonstantin Belousov * another object, in particular, one with unmanaged mappings 1238b51927b7SKonstantin Belousov * that cannot support pmap_ts_referenced(), two races are, 1239b51927b7SKonstantin Belousov * nonetheless, possible: 1240b51927b7SKonstantin Belousov * 1) The count was transitioning to zero, but we saw a non- 1241b51927b7SKonstantin Belousov * zero value. pmap_ts_referenced() will return zero 1242b51927b7SKonstantin Belousov * because the page is not mapped. 1243b51927b7SKonstantin Belousov * 2) The count was transitioning to one, but we saw zero. 1244b51927b7SKonstantin Belousov * This race delays the detection of a new reference. At 1245b51927b7SKonstantin Belousov * worst, we will deactivate and reactivate the page. 1246be37ee79SMark Johnston */ 1247b51927b7SKonstantin Belousov refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0; 1248be37ee79SMark Johnston 1249f3f38e25SMark Johnston old = vm_page_astate_load(m); 1250f3f38e25SMark Johnston do { 1251f3f38e25SMark Johnston /* 1252f3f38e25SMark Johnston * Check to see if the page has been removed from the 1253f3f38e25SMark Johnston * queue since the first such check. Leave it alone if 1254f3f38e25SMark Johnston * so, discarding any references collected by 1255f3f38e25SMark Johnston * pmap_ts_referenced(). 1256f3f38e25SMark Johnston */ 1257609de97eSEric van Gyzen if (__predict_false(_vm_page_queue(old) == PQ_NONE)) { 1258609de97eSEric van Gyzen ps_delta = 0; 1259f3f38e25SMark Johnston break; 1260609de97eSEric van Gyzen } 1261a8081778SJeff Roberson 1262be37ee79SMark Johnston /* 1263be37ee79SMark Johnston * Advance or decay the act_count based on recent usage. 1264be37ee79SMark Johnston */ 1265f3f38e25SMark Johnston new = old; 1266f3f38e25SMark Johnston act_delta = refs; 1267f3f38e25SMark Johnston if ((old.flags & PGA_REFERENCED) != 0) { 1268f3f38e25SMark Johnston new.flags &= ~PGA_REFERENCED; 1269f3f38e25SMark Johnston act_delta++; 1270f3f38e25SMark Johnston } 1271be37ee79SMark Johnston if (act_delta != 0) { 1272f3f38e25SMark Johnston new.act_count += ACT_ADVANCE + act_delta; 1273f3f38e25SMark Johnston if (new.act_count > ACT_MAX) 1274f3f38e25SMark Johnston new.act_count = ACT_MAX; 1275f3f38e25SMark Johnston } else { 1276f3f38e25SMark Johnston new.act_count -= min(new.act_count, 1277f3f38e25SMark Johnston ACT_DECLINE); 1278f3f38e25SMark Johnston } 1279be37ee79SMark Johnston 1280f3f38e25SMark Johnston if (new.act_count > 0) { 1281be37ee79SMark Johnston /* 1282f3f38e25SMark Johnston * Adjust the activation count and keep the page 1283f3f38e25SMark Johnston * in the active queue. The count might be left 1284f3f38e25SMark Johnston * unchanged if it is saturated. The page may 1285f3f38e25SMark Johnston * have been moved to a different queue since we 1286f3f38e25SMark Johnston * started the scan, in which case we move it 1287f3f38e25SMark Johnston * back. 1288be37ee79SMark Johnston */ 1289f3f38e25SMark Johnston ps_delta = 0; 1290f3f38e25SMark Johnston if (old.queue != PQ_ACTIVE) { 1291f7607c30SMark Johnston new.flags &= ~PGA_QUEUE_OP_MASK; 1292f7607c30SMark Johnston new.flags |= PGA_REQUEUE; 1293f7607c30SMark Johnston new.queue = PQ_ACTIVE; 1294f3f38e25SMark Johnston } 12957cdeaf33SMark Johnston } else { 1296be37ee79SMark Johnston /* 1297f3f38e25SMark Johnston * When not short for inactive pages, let dirty 1298f3f38e25SMark Johnston * pages go through the inactive queue before 1299f3f38e25SMark Johnston * moving to the laundry queue. This gives them 1300f3f38e25SMark Johnston * some extra time to be reactivated, 1301f3f38e25SMark Johnston * potentially avoiding an expensive pageout. 1302f3f38e25SMark Johnston * However, during a page shortage, the inactive 1303f3f38e25SMark Johnston * queue is necessarily small, and so dirty 1304f3f38e25SMark Johnston * pages would only spend a trivial amount of 1305f3f38e25SMark Johnston * time in the inactive queue. Therefore, we 1306f3f38e25SMark Johnston * might as well place them directly in the 1307f3f38e25SMark Johnston * laundry queue to reduce queuing overhead. 1308f3f38e25SMark Johnston * 1309be37ee79SMark Johnston * Calling vm_page_test_dirty() here would 1310be37ee79SMark Johnston * require acquisition of the object's write 1311be37ee79SMark Johnston * lock. However, during a page shortage, 1312f3f38e25SMark Johnston * directing dirty pages into the laundry queue 1313f3f38e25SMark Johnston * is only an optimization and not a 1314be37ee79SMark Johnston * requirement. Therefore, we simply rely on 1315f3f38e25SMark Johnston * the opportunistic updates to the page's dirty 1316f3f38e25SMark Johnston * field by the pmap. 1317be37ee79SMark Johnston */ 1318f3f38e25SMark Johnston if (page_shortage <= 0) { 1319f3f38e25SMark Johnston nqueue = PQ_INACTIVE; 1320f3f38e25SMark Johnston ps_delta = 0; 1321f3f38e25SMark Johnston } else if (m->dirty == 0) { 1322f3f38e25SMark Johnston nqueue = PQ_INACTIVE; 1323f3f38e25SMark Johnston ps_delta = act_scan_laundry_weight; 1324be37ee79SMark Johnston } else { 1325f3f38e25SMark Johnston nqueue = PQ_LAUNDRY; 1326f3f38e25SMark Johnston ps_delta = 1; 1327be37ee79SMark Johnston } 1328f3f38e25SMark Johnston 1329f7607c30SMark Johnston new.flags &= ~PGA_QUEUE_OP_MASK; 1330f3f38e25SMark Johnston new.flags |= PGA_REQUEUE; 1331f3f38e25SMark Johnston new.queue = nqueue; 1332be37ee79SMark Johnston } 1333f3f38e25SMark Johnston } while (!vm_page_pqstate_commit(m, &old, new)); 1334f3f38e25SMark Johnston 1335f3f38e25SMark Johnston page_shortage -= ps_delta; 1336be37ee79SMark Johnston } 1337be37ee79SMark Johnston vm_pagequeue_lock(pq); 1338be37ee79SMark Johnston TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q); 1339be37ee79SMark Johnston TAILQ_INSERT_AFTER(&pq->pq_pl, marker, &vmd->vmd_clock[0], plinks.q); 1340be37ee79SMark Johnston vm_pageout_end_scan(&ss); 1341be37ee79SMark Johnston vm_pagequeue_unlock(pq); 1342be37ee79SMark Johnston } 1343be37ee79SMark Johnston 13445cd29d0fSMark Johnston static int 1345f3f38e25SMark Johnston vm_pageout_reinsert_inactive_page(struct vm_pagequeue *pq, vm_page_t marker, 1346f3f38e25SMark Johnston vm_page_t m) 13475cd29d0fSMark Johnston { 1348f3f38e25SMark Johnston vm_page_astate_t as; 13495cd29d0fSMark Johnston 1350f3f38e25SMark Johnston vm_pagequeue_assert_locked(pq); 1351f3f38e25SMark Johnston 1352f3f38e25SMark Johnston as = vm_page_astate_load(m); 1353f3f38e25SMark Johnston if (as.queue != PQ_INACTIVE || (as.flags & PGA_ENQUEUED) != 0) 1354e8bcf696SMark Johnston return (0); 1355e8bcf696SMark Johnston vm_page_aflag_set(m, PGA_ENQUEUED); 1356f3f38e25SMark Johnston TAILQ_INSERT_BEFORE(marker, m, plinks.q); 13575cd29d0fSMark Johnston return (1); 13585cd29d0fSMark Johnston } 13595cd29d0fSMark Johnston 13605cd29d0fSMark Johnston /* 13615cd29d0fSMark Johnston * Re-add stuck pages to the inactive queue. We will examine them again 13625cd29d0fSMark Johnston * during the next scan. If the queue state of a page has changed since 13635cd29d0fSMark Johnston * it was physically removed from the page queue in 13645cd29d0fSMark Johnston * vm_pageout_collect_batch(), don't do anything with that page. 13655cd29d0fSMark Johnston */ 13665cd29d0fSMark Johnston static void 13675cd29d0fSMark Johnston vm_pageout_reinsert_inactive(struct scan_state *ss, struct vm_batchqueue *bq, 13685cd29d0fSMark Johnston vm_page_t m) 13695cd29d0fSMark Johnston { 13705cd29d0fSMark Johnston struct vm_pagequeue *pq; 1371f3f38e25SMark Johnston vm_page_t marker; 13725cd29d0fSMark Johnston int delta; 13735cd29d0fSMark Johnston 13745cd29d0fSMark Johnston delta = 0; 1375f3f38e25SMark Johnston marker = ss->marker; 13765cd29d0fSMark Johnston pq = ss->pq; 13775cd29d0fSMark Johnston 13785cd29d0fSMark Johnston if (m != NULL) { 13791cac76c9SAndrew Gallatin if (vm_batchqueue_insert(bq, m) != 0) 13805cd29d0fSMark Johnston return; 13815cd29d0fSMark Johnston vm_pagequeue_lock(pq); 1382f3f38e25SMark Johnston delta += vm_pageout_reinsert_inactive_page(pq, marker, m); 13835cd29d0fSMark Johnston } else 13845cd29d0fSMark Johnston vm_pagequeue_lock(pq); 13855cd29d0fSMark Johnston while ((m = vm_batchqueue_pop(bq)) != NULL) 1386f3f38e25SMark Johnston delta += vm_pageout_reinsert_inactive_page(pq, marker, m); 13875cd29d0fSMark Johnston vm_pagequeue_cnt_add(pq, delta); 13885cd29d0fSMark Johnston vm_pagequeue_unlock(pq); 13895cd29d0fSMark Johnston vm_batchqueue_init(bq); 13905cd29d0fSMark Johnston } 13915cd29d0fSMark Johnston 13920292c54bSConrad Meyer static void 13930292c54bSConrad Meyer vm_pageout_scan_inactive(struct vm_domain *vmd, int page_shortage) 1394df8bae1dSRodney W. Grimes { 13950292c54bSConrad Meyer struct timeval start, end; 13965cd29d0fSMark Johnston struct scan_state ss; 13975cd29d0fSMark Johnston struct vm_batchqueue rq; 13980292c54bSConrad Meyer struct vm_page marker_page; 13995cd29d0fSMark Johnston vm_page_t m, marker; 14008d220203SAlan Cox struct vm_pagequeue *pq; 1401df8bae1dSRodney W. Grimes vm_object_t object; 1402f3f38e25SMark Johnston vm_page_astate_t old, new; 14030292c54bSConrad Meyer int act_delta, addl_page_shortage, starting_page_shortage, refs; 14040292c54bSConrad Meyer 14050292c54bSConrad Meyer object = NULL; 14060292c54bSConrad Meyer vm_batchqueue_init(&rq); 14070292c54bSConrad Meyer getmicrouptime(&start); 14080d94caffSDavid Greenman 1409df8bae1dSRodney W. Grimes /* 141001f04471SMark Johnston * The addl_page_shortage is an estimate of the number of temporarily 1411311e34e2SKonstantin Belousov * stuck pages in the inactive queue. In other words, the 1412449c2e92SKonstantin Belousov * number of pages from the inactive count that should be 1413311e34e2SKonstantin Belousov * discounted in setting the target for the active queue scan. 1414311e34e2SKonstantin Belousov */ 14159099545aSAlan Cox addl_page_shortage = 0; 14169099545aSAlan Cox 14171c7c3c6aSMatthew Dillon /* 1418f095d1bbSAlan Cox * Start scanning the inactive queue for pages that we can free. The 1419f095d1bbSAlan Cox * scan will stop when we reach the target or we have scanned the 14205cff1f4dSMark Johnston * entire queue. (Note that m->a.act_count is not used to make 1421f095d1bbSAlan Cox * decisions for the inactive queue, only for the active queue.) 14228d220203SAlan Cox */ 14230292c54bSConrad Meyer starting_page_shortage = page_shortage; 14240292c54bSConrad Meyer marker = &marker_page; 14250292c54bSConrad Meyer vm_page_init_marker(marker, PQ_INACTIVE, 0); 14265cd29d0fSMark Johnston pq = &vmd->vmd_pagequeues[PQ_INACTIVE]; 14278d220203SAlan Cox vm_pagequeue_lock(pq); 14285cd29d0fSMark Johnston vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt); 1429a216e311SRyan Libby while (page_shortage > 0) { 1430a216e311SRyan Libby /* 1431a216e311SRyan Libby * If we need to refill the scan batch queue, release any 1432a216e311SRyan Libby * optimistically held object lock. This gives someone else a 1433a216e311SRyan Libby * chance to grab the lock, and also avoids holding it while we 1434a216e311SRyan Libby * do unrelated work. 1435a216e311SRyan Libby */ 1436a216e311SRyan Libby if (object != NULL && vm_batchqueue_empty(&ss.bq)) { 1437a216e311SRyan Libby VM_OBJECT_WUNLOCK(object); 1438a216e311SRyan Libby object = NULL; 1439a216e311SRyan Libby } 1440a216e311SRyan Libby 1441a216e311SRyan Libby m = vm_pageout_next(&ss, true); 1442a216e311SRyan Libby if (m == NULL) 1443a216e311SRyan Libby break; 14445cd29d0fSMark Johnston KASSERT((m->flags & PG_MARKER) == 0, 14455cd29d0fSMark Johnston ("marker page %p was dequeued", m)); 1446df8bae1dSRodney W. Grimes 1447936524aaSMatthew Dillon /* 1448b7f30bffSMark Johnston * Don't touch a page that was removed from the queue after the 1449b7f30bffSMark Johnston * page queue lock was released. Otherwise, ensure that any 1450b7f30bffSMark Johnston * pending queue operations, such as dequeues for wired pages, 1451b7f30bffSMark Johnston * are handled. 1452936524aaSMatthew Dillon */ 1453b7f30bffSMark Johnston if (vm_pageout_defer(m, PQ_INACTIVE, false)) 1454936524aaSMatthew Dillon continue; 1455e8bcf696SMark Johnston 14569f5632e6SMark Johnston /* 14579f5632e6SMark Johnston * Lock the page's object. 14589f5632e6SMark Johnston */ 14599f5632e6SMark Johnston if (object == NULL || object != m->object) { 146060256604SMark Johnston if (object != NULL) 14615cd29d0fSMark Johnston VM_OBJECT_WUNLOCK(object); 146223ed568cSMateusz Guzik object = atomic_load_ptr(&m->object); 14639f5632e6SMark Johnston if (__predict_false(object == NULL)) 14649f5632e6SMark Johnston /* The page is being freed by another thread. */ 14659f5632e6SMark Johnston continue; 14669f5632e6SMark Johnston 1467e8bcf696SMark Johnston /* Depends on type-stability. */ 146841fd4b94SMark Johnston VM_OBJECT_WLOCK(object); 14699f5632e6SMark Johnston if (__predict_false(m->object != object)) { 14709f5632e6SMark Johnston VM_OBJECT_WUNLOCK(object); 14719f5632e6SMark Johnston object = NULL; 14729f5632e6SMark Johnston goto reinsert; 147341fd4b94SMark Johnston } 147441fd4b94SMark Johnston } 14755cd29d0fSMark Johnston 147663e97555SJeff Roberson if (vm_page_tryxbusy(m) == 0) { 1477a3aeedabSAlan Cox /* 1478a3aeedabSAlan Cox * Don't mess with busy pages. Leave them at 1479a3aeedabSAlan Cox * the front of the queue. Most likely, they 1480a3aeedabSAlan Cox * are being paged out and will leave the 1481a3aeedabSAlan Cox * queue shortly after the scan finishes. So, 1482a3aeedabSAlan Cox * they ought to be discounted from the 1483a3aeedabSAlan Cox * inactive count. 1484a3aeedabSAlan Cox */ 1485a3aeedabSAlan Cox addl_page_shortage++; 14865cd29d0fSMark Johnston goto reinsert; 148726f9a767SRodney W. Grimes } 148848cc2fc7SKonstantin Belousov 1489a8081778SJeff Roberson /* Deferred free of swap space. */ 1490a8081778SJeff Roberson if ((m->a.flags & PGA_SWAP_FREE) != 0) 1491a8081778SJeff Roberson vm_pager_page_unswapped(m); 1492a8081778SJeff Roberson 149348cc2fc7SKonstantin Belousov /* 14949f5632e6SMark Johnston * Check for wirings now that we hold the object lock and have 14959f5632e6SMark Johnston * exclusively busied the page. If the page is mapped, it may 14969f5632e6SMark Johnston * still be wired by pmap lookups. The call to 1497fee2a2faSMark Johnston * vm_page_try_remove_all() below atomically checks for such 1498fee2a2faSMark Johnston * wirings and removes mappings. If the page is unmapped, the 14999f5632e6SMark Johnston * wire count is guaranteed not to increase after this check. 1500fee2a2faSMark Johnston */ 15019f5632e6SMark Johnston if (__predict_false(vm_page_wired(m))) 1502f3f38e25SMark Johnston goto skip_page; 1503fee2a2faSMark Johnston 1504fee2a2faSMark Johnston /* 15058748f58cSKonstantin Belousov * Invalid pages can be easily freed. They cannot be 15068748f58cSKonstantin Belousov * mapped, vm_page_free() asserts this. 1507776f729cSKonstantin Belousov */ 15080012f373SJeff Roberson if (vm_page_none_valid(m)) 15098748f58cSKonstantin Belousov goto free_page; 1510776f729cSKonstantin Belousov 1511b51927b7SKonstantin Belousov refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0; 1512f3f38e25SMark Johnston 1513f3f38e25SMark Johnston for (old = vm_page_astate_load(m);;) { 1514776f729cSKonstantin Belousov /* 1515f3f38e25SMark Johnston * Check to see if the page has been removed from the 1516f3f38e25SMark Johnston * queue since the first such check. Leave it alone if 1517f3f38e25SMark Johnston * so, discarding any references collected by 1518f3f38e25SMark Johnston * pmap_ts_referenced(). 15197e006499SJohn Dyson */ 1520f3f38e25SMark Johnston if (__predict_false(_vm_page_queue(old) == PQ_NONE)) 1521f3f38e25SMark Johnston goto skip_page; 1522f3f38e25SMark Johnston 1523f3f38e25SMark Johnston new = old; 1524f3f38e25SMark Johnston act_delta = refs; 1525f3f38e25SMark Johnston if ((old.flags & PGA_REFERENCED) != 0) { 1526f3f38e25SMark Johnston new.flags &= ~PGA_REFERENCED; 1527d7aeb429SAlan Cox act_delta++; 15282fe6e4d7SDavid Greenman } 1529f3f38e25SMark Johnston if (act_delta == 0) { 1530f3f38e25SMark Johnston ; 1531b51927b7SKonstantin Belousov } else if (object->ref_count != 0) { 1532e8bcf696SMark Johnston /* 1533f3f38e25SMark Johnston * Increase the activation count if the 1534f3f38e25SMark Johnston * page was referenced while in the 1535f3f38e25SMark Johnston * inactive queue. This makes it less 1536f3f38e25SMark Johnston * likely that the page will be returned 1537f3f38e25SMark Johnston * prematurely to the inactive queue. 1538e8bcf696SMark Johnston */ 1539f3f38e25SMark Johnston new.act_count += ACT_ADVANCE + 1540f3f38e25SMark Johnston act_delta; 1541f3f38e25SMark Johnston if (new.act_count > ACT_MAX) 1542f3f38e25SMark Johnston new.act_count = ACT_MAX; 1543f3f38e25SMark Johnston 1544f7607c30SMark Johnston new.flags &= ~PGA_QUEUE_OP_MASK; 1545f3f38e25SMark Johnston new.flags |= PGA_REQUEUE; 1546f3f38e25SMark Johnston new.queue = PQ_ACTIVE; 1547f3f38e25SMark Johnston if (!vm_page_pqstate_commit(m, &old, new)) 1548e8bcf696SMark Johnston continue; 1549f3f38e25SMark Johnston 1550f3f38e25SMark Johnston VM_CNT_INC(v_reactivated); 1551f3f38e25SMark Johnston goto skip_page; 1552ebcddc72SAlan Cox } else if ((object->flags & OBJ_DEAD) == 0) { 1553f3f38e25SMark Johnston new.queue = PQ_INACTIVE; 1554f3f38e25SMark Johnston new.flags |= PGA_REQUEUE; 1555f3f38e25SMark Johnston if (!vm_page_pqstate_commit(m, &old, new)) 1556f3f38e25SMark Johnston continue; 1557f3f38e25SMark Johnston goto skip_page; 1558ebcddc72SAlan Cox } 1559f3f38e25SMark Johnston break; 1560960810ccSAlan Cox } 156167bf6868SJohn Dyson 15627e006499SJohn Dyson /* 15639fc4739dSAlan Cox * If the page appears to be clean at the machine-independent 15649fc4739dSAlan Cox * layer, then remove all of its mappings from the pmap in 1565a766ffd0SAlan Cox * anticipation of freeing it. If, however, any of the page's 1566a766ffd0SAlan Cox * mappings allow write access, then the page may still be 1567a766ffd0SAlan Cox * modified until the last of those mappings are removed. 15687e006499SJohn Dyson */ 1569b51927b7SKonstantin Belousov if (object->ref_count != 0) { 15709fc4739dSAlan Cox vm_page_test_dirty(m); 15719f5632e6SMark Johnston if (m->dirty == 0 && !vm_page_try_remove_all(m)) 1572f3f38e25SMark Johnston goto skip_page; 1573fee2a2faSMark Johnston } 1574dcbcd518SBruce Evans 15756989c456SAlan Cox /* 1576ebcddc72SAlan Cox * Clean pages can be freed, but dirty pages must be sent back 1577ebcddc72SAlan Cox * to the laundry, unless they belong to a dead object. 1578ebcddc72SAlan Cox * Requeueing dirty pages from dead objects is pointless, as 1579ebcddc72SAlan Cox * they are being paged out and freed by the thread that 1580ebcddc72SAlan Cox * destroyed the object. 15816989c456SAlan Cox */ 1582ebcddc72SAlan Cox if (m->dirty == 0) { 15838748f58cSKonstantin Belousov free_page: 15845cd29d0fSMark Johnston /* 15859f5632e6SMark Johnston * Now we are guaranteed that no other threads are 15869f5632e6SMark Johnston * manipulating the page, check for a last-second 15879f5632e6SMark Johnston * reference that would save it from doom. 15885cd29d0fSMark Johnston */ 15899f5632e6SMark Johnston if (vm_pageout_defer(m, PQ_INACTIVE, false)) 15909f5632e6SMark Johnston goto skip_page; 15919f5632e6SMark Johnston 15929f5632e6SMark Johnston /* 15939f5632e6SMark Johnston * Because we dequeued the page and have already checked 15949f5632e6SMark Johnston * for pending dequeue and enqueue requests, we can 15959f5632e6SMark Johnston * safely disassociate the page from the inactive queue 15969f5632e6SMark Johnston * without holding the queue lock. 15979f5632e6SMark Johnston */ 15985cff1f4dSMark Johnston m->a.queue = PQ_NONE; 159978afdce6SAlan Cox vm_page_free(m); 16005cd29d0fSMark Johnston page_shortage--; 160163e97555SJeff Roberson continue; 160263e97555SJeff Roberson } 160363e97555SJeff Roberson if ((object->flags & OBJ_DEAD) == 0) 1604ebcddc72SAlan Cox vm_page_launder(m); 1605f3f38e25SMark Johnston skip_page: 1606f3f38e25SMark Johnston vm_page_xunbusy(m); 16075cd29d0fSMark Johnston continue; 16085cd29d0fSMark Johnston reinsert: 16095cd29d0fSMark Johnston vm_pageout_reinsert_inactive(&ss, &rq, m); 16105cd29d0fSMark Johnston } 161160256604SMark Johnston if (object != NULL) 161289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 16135cd29d0fSMark Johnston vm_pageout_reinsert_inactive(&ss, &rq, NULL); 16145cd29d0fSMark Johnston vm_pageout_reinsert_inactive(&ss, &ss.bq, NULL); 16158d220203SAlan Cox vm_pagequeue_lock(pq); 16165cd29d0fSMark Johnston vm_pageout_end_scan(&ss); 16178d220203SAlan Cox vm_pagequeue_unlock(pq); 161826f9a767SRodney W. Grimes 16190292c54bSConrad Meyer /* 16200292c54bSConrad Meyer * Record the remaining shortage and the progress and rate it was made. 16210292c54bSConrad Meyer */ 16220292c54bSConrad Meyer atomic_add_int(&vmd->vmd_addl_shortage, addl_page_shortage); 16230292c54bSConrad Meyer getmicrouptime(&end); 16240292c54bSConrad Meyer timevalsub(&end, &start); 16250292c54bSConrad Meyer atomic_add_int(&vmd->vmd_inactive_us, 16260292c54bSConrad Meyer end.tv_sec * 1000000 + end.tv_usec); 16270292c54bSConrad Meyer atomic_add_int(&vmd->vmd_inactive_freed, 16280292c54bSConrad Meyer starting_page_shortage - page_shortage); 16290292c54bSConrad Meyer } 16300292c54bSConrad Meyer 16310292c54bSConrad Meyer /* 16320292c54bSConrad Meyer * Dispatch a number of inactive threads according to load and collect the 16332913cc46SMark Johnston * results to present a coherent view of paging activity on this domain. 16340292c54bSConrad Meyer */ 16350292c54bSConrad Meyer static int 16360292c54bSConrad Meyer vm_pageout_inactive_dispatch(struct vm_domain *vmd, int shortage) 16370292c54bSConrad Meyer { 16382913cc46SMark Johnston u_int freed, pps, slop, threads, us; 16390292c54bSConrad Meyer 16400292c54bSConrad Meyer vmd->vmd_inactive_shortage = shortage; 16412913cc46SMark Johnston slop = 0; 16420292c54bSConrad Meyer 16430292c54bSConrad Meyer /* 16440292c54bSConrad Meyer * If we have more work than we can do in a quarter of our interval, we 16450292c54bSConrad Meyer * fire off multiple threads to process it. 16460292c54bSConrad Meyer */ 16470292c54bSConrad Meyer threads = vmd->vmd_inactive_threads; 16482913cc46SMark Johnston if (threads > 1 && vmd->vmd_inactive_pps != 0 && 16492913cc46SMark Johnston shortage > vmd->vmd_inactive_pps / VM_INACT_SCAN_RATE / 4) { 16500292c54bSConrad Meyer vmd->vmd_inactive_shortage /= threads; 16512913cc46SMark Johnston slop = shortage % threads; 16522913cc46SMark Johnston vm_domain_pageout_lock(vmd); 16530292c54bSConrad Meyer blockcount_acquire(&vmd->vmd_inactive_starting, threads - 1); 16540292c54bSConrad Meyer blockcount_acquire(&vmd->vmd_inactive_running, threads - 1); 16550292c54bSConrad Meyer wakeup(&vmd->vmd_inactive_shortage); 16560292c54bSConrad Meyer vm_domain_pageout_unlock(vmd); 16570292c54bSConrad Meyer } 16580292c54bSConrad Meyer 16590292c54bSConrad Meyer /* Run the local thread scan. */ 16602913cc46SMark Johnston vm_pageout_scan_inactive(vmd, vmd->vmd_inactive_shortage + slop); 16610292c54bSConrad Meyer 16620292c54bSConrad Meyer /* 16630292c54bSConrad Meyer * Block until helper threads report results and then accumulate 16640292c54bSConrad Meyer * totals. 16650292c54bSConrad Meyer */ 16660292c54bSConrad Meyer blockcount_wait(&vmd->vmd_inactive_running, NULL, "vmpoid", PVM); 16670292c54bSConrad Meyer freed = atomic_readandclear_int(&vmd->vmd_inactive_freed); 16680292c54bSConrad Meyer VM_CNT_ADD(v_dfree, freed); 16690292c54bSConrad Meyer 16700292c54bSConrad Meyer /* 16710292c54bSConrad Meyer * Calculate the per-thread paging rate with an exponential decay of 16720292c54bSConrad Meyer * prior results. Careful to avoid integer rounding errors with large 16730292c54bSConrad Meyer * us values. 16740292c54bSConrad Meyer */ 16750292c54bSConrad Meyer us = max(atomic_readandclear_int(&vmd->vmd_inactive_us), 1); 16760292c54bSConrad Meyer if (us > 1000000) 16770292c54bSConrad Meyer /* Keep rounding to tenths */ 16780292c54bSConrad Meyer pps = (freed * 10) / ((us * 10) / 1000000); 16790292c54bSConrad Meyer else 16800292c54bSConrad Meyer pps = (1000000 / us) * freed; 16810292c54bSConrad Meyer vmd->vmd_inactive_pps = (vmd->vmd_inactive_pps / 2) + (pps / 2); 16820292c54bSConrad Meyer 16830292c54bSConrad Meyer return (shortage - freed); 16840292c54bSConrad Meyer } 16850292c54bSConrad Meyer 16860292c54bSConrad Meyer /* 16870292c54bSConrad Meyer * Attempt to reclaim the requested number of pages from the inactive queue. 16880292c54bSConrad Meyer * Returns true if the shortage was addressed. 16890292c54bSConrad Meyer */ 16900292c54bSConrad Meyer static int 16910292c54bSConrad Meyer vm_pageout_inactive(struct vm_domain *vmd, int shortage, int *addl_shortage) 16920292c54bSConrad Meyer { 16930292c54bSConrad Meyer struct vm_pagequeue *pq; 16940292c54bSConrad Meyer u_int addl_page_shortage, deficit, page_shortage; 16950292c54bSConrad Meyer u_int starting_page_shortage; 16960292c54bSConrad Meyer 16970292c54bSConrad Meyer /* 16980292c54bSConrad Meyer * vmd_pageout_deficit counts the number of pages requested in 16990292c54bSConrad Meyer * allocations that failed because of a free page shortage. We assume 17000292c54bSConrad Meyer * that the allocations will be reattempted and thus include the deficit 17010292c54bSConrad Meyer * in our scan target. 17020292c54bSConrad Meyer */ 17030292c54bSConrad Meyer deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit); 17040292c54bSConrad Meyer starting_page_shortage = shortage + deficit; 17050292c54bSConrad Meyer 17060292c54bSConrad Meyer /* 17070292c54bSConrad Meyer * Run the inactive scan on as many threads as is necessary. 17080292c54bSConrad Meyer */ 17090292c54bSConrad Meyer page_shortage = vm_pageout_inactive_dispatch(vmd, starting_page_shortage); 17100292c54bSConrad Meyer addl_page_shortage = atomic_readandclear_int(&vmd->vmd_addl_shortage); 17115cd29d0fSMark Johnston 1712ebcddc72SAlan Cox /* 1713ebcddc72SAlan Cox * Wake up the laundry thread so that it can perform any needed 1714ebcddc72SAlan Cox * laundering. If we didn't meet our target, we're in shortfall and 1715b1fd102eSMark Johnston * need to launder more aggressively. If PQ_LAUNDRY is empty and no 1716b1fd102eSMark Johnston * swap devices are configured, the laundry thread has no work to do, so 1717b1fd102eSMark Johnston * don't bother waking it up. 1718cb35676eSMark Johnston * 1719cb35676eSMark Johnston * The laundry thread uses the number of inactive queue scans elapsed 1720cb35676eSMark Johnston * since the last laundering to determine whether to launder again, so 1721cb35676eSMark Johnston * keep count. 1722ebcddc72SAlan Cox */ 1723cb35676eSMark Johnston if (starting_page_shortage > 0) { 1724e2068d0bSJeff Roberson pq = &vmd->vmd_pagequeues[PQ_LAUNDRY]; 1725ebcddc72SAlan Cox vm_pagequeue_lock(pq); 1726e2068d0bSJeff Roberson if (vmd->vmd_laundry_request == VM_LAUNDRY_IDLE && 1727cb35676eSMark Johnston (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled))) { 1728ebcddc72SAlan Cox if (page_shortage > 0) { 1729e2068d0bSJeff Roberson vmd->vmd_laundry_request = VM_LAUNDRY_SHORTFALL; 173083c9dea1SGleb Smirnoff VM_CNT_INC(v_pdshortfalls); 1731e2068d0bSJeff Roberson } else if (vmd->vmd_laundry_request != 1732e2068d0bSJeff Roberson VM_LAUNDRY_SHORTFALL) 1733e2068d0bSJeff Roberson vmd->vmd_laundry_request = 1734e2068d0bSJeff Roberson VM_LAUNDRY_BACKGROUND; 1735e2068d0bSJeff Roberson wakeup(&vmd->vmd_laundry_request); 1736b1fd102eSMark Johnston } 173760684862SMark Johnston vmd->vmd_clean_pages_freed += 173860684862SMark Johnston starting_page_shortage - page_shortage; 1739ebcddc72SAlan Cox vm_pagequeue_unlock(pq); 1740ebcddc72SAlan Cox } 1741ebcddc72SAlan Cox 17429452b5edSAlan Cox /* 174376386c7eSKonstantin Belousov * If the inactive queue scan fails repeatedly to meet its 174476386c7eSKonstantin Belousov * target, kill the largest process. 174576386c7eSKonstantin Belousov */ 174676386c7eSKonstantin Belousov vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage); 174776386c7eSKonstantin Belousov 174876386c7eSKonstantin Belousov /* 1749be37ee79SMark Johnston * See the description of addl_page_shortage above. 1750be37ee79SMark Johnston */ 1751be37ee79SMark Johnston *addl_shortage = addl_page_shortage + deficit; 1752be37ee79SMark Johnston 1753e57dd910SAlan Cox return (page_shortage <= 0); 17542025d69bSKonstantin Belousov } 17552025d69bSKonstantin Belousov 1756449c2e92SKonstantin Belousov static int vm_pageout_oom_vote; 1757449c2e92SKonstantin Belousov 1758449c2e92SKonstantin Belousov /* 1759449c2e92SKonstantin Belousov * The pagedaemon threads randlomly select one to perform the 1760449c2e92SKonstantin Belousov * OOM. Trying to kill processes before all pagedaemons 1761449c2e92SKonstantin Belousov * failed to reach free target is premature. 1762449c2e92SKonstantin Belousov */ 1763449c2e92SKonstantin Belousov static void 176476386c7eSKonstantin Belousov vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage, 176576386c7eSKonstantin Belousov int starting_page_shortage) 1766449c2e92SKonstantin Belousov { 1767449c2e92SKonstantin Belousov int old_vote; 1768449c2e92SKonstantin Belousov 176976386c7eSKonstantin Belousov if (starting_page_shortage <= 0 || starting_page_shortage != 177076386c7eSKonstantin Belousov page_shortage) 177176386c7eSKonstantin Belousov vmd->vmd_oom_seq = 0; 177276386c7eSKonstantin Belousov else 177376386c7eSKonstantin Belousov vmd->vmd_oom_seq++; 177476386c7eSKonstantin Belousov if (vmd->vmd_oom_seq < vm_pageout_oom_seq) { 1775449c2e92SKonstantin Belousov if (vmd->vmd_oom) { 1776449c2e92SKonstantin Belousov vmd->vmd_oom = FALSE; 1777449c2e92SKonstantin Belousov atomic_subtract_int(&vm_pageout_oom_vote, 1); 1778449c2e92SKonstantin Belousov } 1779449c2e92SKonstantin Belousov return; 1780449c2e92SKonstantin Belousov } 1781449c2e92SKonstantin Belousov 178276386c7eSKonstantin Belousov /* 178376386c7eSKonstantin Belousov * Do not follow the call sequence until OOM condition is 178476386c7eSKonstantin Belousov * cleared. 178576386c7eSKonstantin Belousov */ 178676386c7eSKonstantin Belousov vmd->vmd_oom_seq = 0; 178776386c7eSKonstantin Belousov 1788449c2e92SKonstantin Belousov if (vmd->vmd_oom) 1789449c2e92SKonstantin Belousov return; 1790449c2e92SKonstantin Belousov 1791449c2e92SKonstantin Belousov vmd->vmd_oom = TRUE; 1792449c2e92SKonstantin Belousov old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1); 1793449c2e92SKonstantin Belousov if (old_vote != vm_ndomains - 1) 1794449c2e92SKonstantin Belousov return; 1795449c2e92SKonstantin Belousov 1796449c2e92SKonstantin Belousov /* 1797449c2e92SKonstantin Belousov * The current pagedaemon thread is the last in the quorum to 1798449c2e92SKonstantin Belousov * start OOM. Initiate the selection and signaling of the 1799449c2e92SKonstantin Belousov * victim. 1800449c2e92SKonstantin Belousov */ 1801449c2e92SKonstantin Belousov vm_pageout_oom(VM_OOM_MEM); 1802449c2e92SKonstantin Belousov 1803449c2e92SKonstantin Belousov /* 1804449c2e92SKonstantin Belousov * After one round of OOM terror, recall our vote. On the 1805449c2e92SKonstantin Belousov * next pass, current pagedaemon would vote again if the low 1806449c2e92SKonstantin Belousov * memory condition is still there, due to vmd_oom being 1807449c2e92SKonstantin Belousov * false. 1808449c2e92SKonstantin Belousov */ 1809449c2e92SKonstantin Belousov vmd->vmd_oom = FALSE; 1810449c2e92SKonstantin Belousov atomic_subtract_int(&vm_pageout_oom_vote, 1); 1811449c2e92SKonstantin Belousov } 18122025d69bSKonstantin Belousov 18133949873fSKonstantin Belousov /* 18143949873fSKonstantin Belousov * The OOM killer is the page daemon's action of last resort when 18153949873fSKonstantin Belousov * memory allocation requests have been stalled for a prolonged period 18163949873fSKonstantin Belousov * of time because it cannot reclaim memory. This function computes 18173949873fSKonstantin Belousov * the approximate number of physical pages that could be reclaimed if 18183949873fSKonstantin Belousov * the specified address space is destroyed. 18193949873fSKonstantin Belousov * 18203949873fSKonstantin Belousov * Private, anonymous memory owned by the address space is the 18213949873fSKonstantin Belousov * principal resource that we expect to recover after an OOM kill. 18223949873fSKonstantin Belousov * Since the physical pages mapped by the address space's COW entries 18233949873fSKonstantin Belousov * are typically shared pages, they are unlikely to be released and so 18243949873fSKonstantin Belousov * they are not counted. 18253949873fSKonstantin Belousov * 18263949873fSKonstantin Belousov * To get to the point where the page daemon runs the OOM killer, its 18273949873fSKonstantin Belousov * efforts to write-back vnode-backed pages may have stalled. This 18283949873fSKonstantin Belousov * could be caused by a memory allocation deadlock in the write path 18293949873fSKonstantin Belousov * that might be resolved by an OOM kill. Therefore, physical pages 18303949873fSKonstantin Belousov * belonging to vnode-backed objects are counted, because they might 18313949873fSKonstantin Belousov * be freed without being written out first if the address space holds 18323949873fSKonstantin Belousov * the last reference to an unlinked vnode. 18333949873fSKonstantin Belousov * 18343949873fSKonstantin Belousov * Similarly, physical pages belonging to OBJT_PHYS objects are 18353949873fSKonstantin Belousov * counted because the address space might hold the last reference to 18363949873fSKonstantin Belousov * the object. 18373949873fSKonstantin Belousov */ 18383949873fSKonstantin Belousov static long 18393949873fSKonstantin Belousov vm_pageout_oom_pagecount(struct vmspace *vmspace) 18403949873fSKonstantin Belousov { 18413949873fSKonstantin Belousov vm_map_t map; 18423949873fSKonstantin Belousov vm_map_entry_t entry; 18433949873fSKonstantin Belousov vm_object_t obj; 18443949873fSKonstantin Belousov long res; 18453949873fSKonstantin Belousov 18463949873fSKonstantin Belousov map = &vmspace->vm_map; 1847*c5b19cefSKonstantin Belousov KASSERT(!vm_map_is_system(map), ("system map")); 18483949873fSKonstantin Belousov sx_assert(&map->lock, SA_LOCKED); 18493949873fSKonstantin Belousov res = 0; 18502288078cSDoug Moore VM_MAP_ENTRY_FOREACH(entry, map) { 18513949873fSKonstantin Belousov if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) 18523949873fSKonstantin Belousov continue; 18533949873fSKonstantin Belousov obj = entry->object.vm_object; 18543949873fSKonstantin Belousov if (obj == NULL) 18553949873fSKonstantin Belousov continue; 18563949873fSKonstantin Belousov if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 && 18573949873fSKonstantin Belousov obj->ref_count != 1) 18583949873fSKonstantin Belousov continue; 18590cb2610eSMark Johnston if (obj->type == OBJT_PHYS || obj->type == OBJT_VNODE || 1860e123264eSMark Johnston (obj->flags & OBJ_SWAP) != 0) 18613949873fSKonstantin Belousov res += obj->resident_page_count; 18623949873fSKonstantin Belousov } 18633949873fSKonstantin Belousov return (res); 18643949873fSKonstantin Belousov } 18653949873fSKonstantin Belousov 1866245139c6SKonstantin Belousov static int vm_oom_ratelim_last; 1867245139c6SKonstantin Belousov static int vm_oom_pf_secs = 10; 1868245139c6SKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, oom_pf_secs, CTLFLAG_RWTUN, &vm_oom_pf_secs, 0, 1869245139c6SKonstantin Belousov ""); 1870245139c6SKonstantin Belousov static struct mtx vm_oom_ratelim_mtx; 1871245139c6SKonstantin Belousov 18722025d69bSKonstantin Belousov void 18732025d69bSKonstantin Belousov vm_pageout_oom(int shortage) 18742025d69bSKonstantin Belousov { 18754a864f62SMark Johnston const char *reason; 18762025d69bSKonstantin Belousov struct proc *p, *bigproc; 18772025d69bSKonstantin Belousov vm_offset_t size, bigsize; 18782025d69bSKonstantin Belousov struct thread *td; 18796bed074cSKonstantin Belousov struct vmspace *vm; 1880245139c6SKonstantin Belousov int now; 18813e78e983SAlan Cox bool breakout; 18822025d69bSKonstantin Belousov 18832025d69bSKonstantin Belousov /* 1884245139c6SKonstantin Belousov * For OOM requests originating from vm_fault(), there is a high 1885245139c6SKonstantin Belousov * chance that a single large process faults simultaneously in 1886245139c6SKonstantin Belousov * several threads. Also, on an active system running many 1887245139c6SKonstantin Belousov * processes of middle-size, like buildworld, all of them 1888245139c6SKonstantin Belousov * could fault almost simultaneously as well. 1889245139c6SKonstantin Belousov * 1890245139c6SKonstantin Belousov * To avoid killing too many processes, rate-limit OOMs 1891245139c6SKonstantin Belousov * initiated by vm_fault() time-outs on the waits for free 1892245139c6SKonstantin Belousov * pages. 1893245139c6SKonstantin Belousov */ 1894245139c6SKonstantin Belousov mtx_lock(&vm_oom_ratelim_mtx); 1895245139c6SKonstantin Belousov now = ticks; 1896245139c6SKonstantin Belousov if (shortage == VM_OOM_MEM_PF && 1897245139c6SKonstantin Belousov (u_int)(now - vm_oom_ratelim_last) < hz * vm_oom_pf_secs) { 1898245139c6SKonstantin Belousov mtx_unlock(&vm_oom_ratelim_mtx); 1899245139c6SKonstantin Belousov return; 1900245139c6SKonstantin Belousov } 1901245139c6SKonstantin Belousov vm_oom_ratelim_last = now; 1902245139c6SKonstantin Belousov mtx_unlock(&vm_oom_ratelim_mtx); 1903245139c6SKonstantin Belousov 1904245139c6SKonstantin Belousov /* 19051c58e4e5SJohn Baldwin * We keep the process bigproc locked once we find it to keep anyone 19061c58e4e5SJohn Baldwin * from messing with it; however, there is a possibility of 190728323addSBryan Drewery * deadlock if process B is bigproc and one of its child processes 19081c58e4e5SJohn Baldwin * attempts to propagate a signal to B while we are waiting for A's 19091c58e4e5SJohn Baldwin * lock while walking this list. To avoid this, we don't block on 19101c58e4e5SJohn Baldwin * the process lock but just skip a process if it is already locked. 19115663e6deSDavid Greenman */ 19125663e6deSDavid Greenman bigproc = NULL; 19135663e6deSDavid Greenman bigsize = 0; 19141005a129SJohn Baldwin sx_slock(&allproc_lock); 1915e602ba25SJulian Elischer FOREACH_PROC_IN_SYSTEM(p) { 191671943c3dSKonstantin Belousov PROC_LOCK(p); 191771943c3dSKonstantin Belousov 19181c58e4e5SJohn Baldwin /* 19193f1c4c4fSKonstantin Belousov * If this is a system, protected or killed process, skip it. 19205663e6deSDavid Greenman */ 192171943c3dSKonstantin Belousov if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC | 192271943c3dSKonstantin Belousov P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 || 192371943c3dSKonstantin Belousov p->p_pid == 1 || P_KILLED(p) || 192471943c3dSKonstantin Belousov (p->p_pid < 48 && swap_pager_avail != 0)) { 19258606d880SJohn Baldwin PROC_UNLOCK(p); 19265663e6deSDavid Greenman continue; 19275663e6deSDavid Greenman } 19285663e6deSDavid Greenman /* 1929dcbcd518SBruce Evans * If the process is in a non-running type state, 1930e602ba25SJulian Elischer * don't touch it. Check all the threads individually. 19315663e6deSDavid Greenman */ 19323e78e983SAlan Cox breakout = false; 1933e602ba25SJulian Elischer FOREACH_THREAD_IN_PROC(p, td) { 1934982d11f8SJeff Roberson thread_lock(td); 193571fad9fdSJulian Elischer if (!TD_ON_RUNQ(td) && 193671fad9fdSJulian Elischer !TD_IS_RUNNING(td) && 1937f497cda2SEdward Tomasz Napierala !TD_IS_SLEEPING(td) && 1938e24a6552SMark Johnston !TD_IS_SUSPENDED(td)) { 1939982d11f8SJeff Roberson thread_unlock(td); 19403e78e983SAlan Cox breakout = true; 1941e602ba25SJulian Elischer break; 1942e602ba25SJulian Elischer } 1943982d11f8SJeff Roberson thread_unlock(td); 1944e602ba25SJulian Elischer } 1945e602ba25SJulian Elischer if (breakout) { 19461c58e4e5SJohn Baldwin PROC_UNLOCK(p); 19475663e6deSDavid Greenman continue; 19485663e6deSDavid Greenman } 19495663e6deSDavid Greenman /* 19505663e6deSDavid Greenman * get the process size 19515663e6deSDavid Greenman */ 19526bed074cSKonstantin Belousov vm = vmspace_acquire_ref(p); 19536bed074cSKonstantin Belousov if (vm == NULL) { 19546bed074cSKonstantin Belousov PROC_UNLOCK(p); 19556bed074cSKonstantin Belousov continue; 19566bed074cSKonstantin Belousov } 19578370e9dfSMark Johnston _PHOLD(p); 195872d97679SDavid Schultz PROC_UNLOCK(p); 195995e2409aSKonstantin Belousov sx_sunlock(&allproc_lock); 196095e2409aSKonstantin Belousov if (!vm_map_trylock_read(&vm->vm_map)) { 196171943c3dSKonstantin Belousov vmspace_free(vm); 196295e2409aSKonstantin Belousov sx_slock(&allproc_lock); 196395e2409aSKonstantin Belousov PRELE(p); 196472d97679SDavid Schultz continue; 196572d97679SDavid Schultz } 19667981aa24SKonstantin Belousov size = vmspace_swap_count(vm); 1967245139c6SKonstantin Belousov if (shortage == VM_OOM_MEM || shortage == VM_OOM_MEM_PF) 19683949873fSKonstantin Belousov size += vm_pageout_oom_pagecount(vm); 19693949873fSKonstantin Belousov vm_map_unlock_read(&vm->vm_map); 19706bed074cSKonstantin Belousov vmspace_free(vm); 197195e2409aSKonstantin Belousov sx_slock(&allproc_lock); 19723949873fSKonstantin Belousov 19735663e6deSDavid Greenman /* 19743949873fSKonstantin Belousov * If this process is bigger than the biggest one, 19755663e6deSDavid Greenman * remember it. 19765663e6deSDavid Greenman */ 19775663e6deSDavid Greenman if (size > bigsize) { 19781c58e4e5SJohn Baldwin if (bigproc != NULL) 197971943c3dSKonstantin Belousov PRELE(bigproc); 19805663e6deSDavid Greenman bigproc = p; 19815663e6deSDavid Greenman bigsize = size; 198271943c3dSKonstantin Belousov } else { 198371943c3dSKonstantin Belousov PRELE(p); 198471943c3dSKonstantin Belousov } 19855663e6deSDavid Greenman } 19861005a129SJohn Baldwin sx_sunlock(&allproc_lock); 19874a864f62SMark Johnston 19885663e6deSDavid Greenman if (bigproc != NULL) { 19894a864f62SMark Johnston switch (shortage) { 19904a864f62SMark Johnston case VM_OOM_MEM: 19914a864f62SMark Johnston reason = "failed to reclaim memory"; 19924a864f62SMark Johnston break; 19934a864f62SMark Johnston case VM_OOM_MEM_PF: 19944a864f62SMark Johnston reason = "a thread waited too long to allocate a page"; 19954a864f62SMark Johnston break; 19964a864f62SMark Johnston case VM_OOM_SWAPZ: 19974a864f62SMark Johnston reason = "out of swap space"; 19984a864f62SMark Johnston break; 19994a864f62SMark Johnston default: 20004a864f62SMark Johnston panic("unknown OOM reason %d", shortage); 20014a864f62SMark Johnston } 20023c200db9SJonathan T. Looney if (vm_panic_on_oom != 0 && --vm_panic_on_oom == 0) 20034a864f62SMark Johnston panic("%s", reason); 200471943c3dSKonstantin Belousov PROC_LOCK(bigproc); 20054a864f62SMark Johnston killproc(bigproc, reason); 2006fa885116SJulian Elischer sched_nice(bigproc, PRIO_MIN); 200771943c3dSKonstantin Belousov _PRELE(bigproc); 20081c58e4e5SJohn Baldwin PROC_UNLOCK(bigproc); 20095663e6deSDavid Greenman } 20105663e6deSDavid Greenman } 201126f9a767SRodney W. Grimes 20128fc25508SMark Johnston /* 20138fc25508SMark Johnston * Signal a free page shortage to subsystems that have registered an event 20148fc25508SMark Johnston * handler. Reclaim memory from UMA in the event of a severe shortage. 20158fc25508SMark Johnston * Return true if the free page count should be re-evaluated. 20168fc25508SMark Johnston */ 2017b50a4ea6SMark Johnston static bool 2018b50a4ea6SMark Johnston vm_pageout_lowmem(void) 201949a3710cSMark Johnston { 2020b50a4ea6SMark Johnston static int lowmem_ticks = 0; 2021b50a4ea6SMark Johnston int last; 20228fc25508SMark Johnston bool ret; 20238fc25508SMark Johnston 20248fc25508SMark Johnston ret = false; 202549a3710cSMark Johnston 2026b50a4ea6SMark Johnston last = atomic_load_int(&lowmem_ticks); 2027b50a4ea6SMark Johnston while ((u_int)(ticks - last) / hz >= lowmem_period) { 2028b50a4ea6SMark Johnston if (atomic_fcmpset_int(&lowmem_ticks, &last, ticks) == 0) 2029b50a4ea6SMark Johnston continue; 2030b50a4ea6SMark Johnston 203149a3710cSMark Johnston /* 203249a3710cSMark Johnston * Decrease registered cache sizes. 203349a3710cSMark Johnston */ 203449a3710cSMark Johnston SDT_PROBE0(vm, , , vm__lowmem_scan); 203549a3710cSMark Johnston EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES); 203649a3710cSMark Johnston 203749a3710cSMark Johnston /* 203849a3710cSMark Johnston * We do this explicitly after the caches have been 20398fc25508SMark Johnston * drained above. 204049a3710cSMark Johnston */ 20418fc25508SMark Johnston uma_reclaim(UMA_RECLAIM_TRIM); 20428fc25508SMark Johnston ret = true; 2043ace409ceSAlexander Motin break; 204449a3710cSMark Johnston } 20458fc25508SMark Johnston 20468fc25508SMark Johnston /* 20478fc25508SMark Johnston * Kick off an asynchronous reclaim of cached memory if one of the 20488fc25508SMark Johnston * page daemons is failing to keep up with demand. Use the "severe" 20498fc25508SMark Johnston * threshold instead of "min" to ensure that we do not blow away the 20508fc25508SMark Johnston * caches if a subset of the NUMA domains are depleted by kernel memory 20518fc25508SMark Johnston * allocations; the domainset iterators automatically skip domains 20528fc25508SMark Johnston * below the "min" threshold on the first pass. 20538fc25508SMark Johnston * 20548fc25508SMark Johnston * UMA reclaim worker has its own rate-limiting mechanism, so don't 20558fc25508SMark Johnston * worry about kicking it too often. 20568fc25508SMark Johnston */ 20578fc25508SMark Johnston if (vm_page_count_severe()) 20588fc25508SMark Johnston uma_reclaim_wakeup(); 20598fc25508SMark Johnston 20608fc25508SMark Johnston return (ret); 206149a3710cSMark Johnston } 206249a3710cSMark Johnston 206349a3710cSMark Johnston static void 2064449c2e92SKonstantin Belousov vm_pageout_worker(void *arg) 2065449c2e92SKonstantin Belousov { 2066e2068d0bSJeff Roberson struct vm_domain *vmd; 2067b50a4ea6SMark Johnston u_int ofree; 206849a3710cSMark Johnston int addl_shortage, domain, shortage; 2069e57dd910SAlan Cox bool target_met; 2070449c2e92SKonstantin Belousov 2071e2068d0bSJeff Roberson domain = (uintptr_t)arg; 2072e2068d0bSJeff Roberson vmd = VM_DOMAIN(domain); 20735f8cd1c0SJeff Roberson shortage = 0; 2074e57dd910SAlan Cox target_met = true; 2075449c2e92SKonstantin Belousov 2076449c2e92SKonstantin Belousov /* 2077949c9186SKonstantin Belousov * XXXKIB It could be useful to bind pageout daemon threads to 2078949c9186SKonstantin Belousov * the cores belonging to the domain, from which vm_page_array 2079949c9186SKonstantin Belousov * is allocated. 2080449c2e92SKonstantin Belousov */ 2081449c2e92SKonstantin Belousov 2082e2068d0bSJeff Roberson KASSERT(vmd->vmd_segs != 0, ("domain without segments")); 2083e2068d0bSJeff Roberson vmd->vmd_last_active_scan = ticks; 2084449c2e92SKonstantin Belousov 2085449c2e92SKonstantin Belousov /* 2086449c2e92SKonstantin Belousov * The pageout daemon worker is never done, so loop forever. 2087449c2e92SKonstantin Belousov */ 2088449c2e92SKonstantin Belousov while (TRUE) { 208930fbfddaSJeff Roberson vm_domain_pageout_lock(vmd); 209049a3710cSMark Johnston 209130fbfddaSJeff Roberson /* 209230fbfddaSJeff Roberson * We need to clear wanted before we check the limits. This 209330fbfddaSJeff Roberson * prevents races with wakers who will check wanted after they 209430fbfddaSJeff Roberson * reach the limit. 209530fbfddaSJeff Roberson */ 209630fbfddaSJeff Roberson atomic_store_int(&vmd->vmd_pageout_wanted, 0); 209756ce0690SAlan Cox 209856ce0690SAlan Cox /* 20995f8cd1c0SJeff Roberson * Might the page daemon need to run again? 2100449c2e92SKonstantin Belousov */ 21015f8cd1c0SJeff Roberson if (vm_paging_needed(vmd, vmd->vmd_free_count)) { 210256ce0690SAlan Cox /* 210349a3710cSMark Johnston * Yes. If the scan failed to produce enough free 210449a3710cSMark Johnston * pages, sleep uninterruptibly for some time in the 210549a3710cSMark Johnston * hope that the laundry thread will clean some pages. 210656ce0690SAlan Cox */ 210730fbfddaSJeff Roberson vm_domain_pageout_unlock(vmd); 210849a3710cSMark Johnston if (!target_met) 21096eebec83SMark Johnston pause("pwait", hz / VM_INACT_SCAN_RATE); 2110449c2e92SKonstantin Belousov } else { 2111449c2e92SKonstantin Belousov /* 21125f8cd1c0SJeff Roberson * No, sleep until the next wakeup or until pages 21135f8cd1c0SJeff Roberson * need to have their reference stats updated. 2114449c2e92SKonstantin Belousov */ 21152c0f13aaSKonstantin Belousov if (mtx_sleep(&vmd->vmd_pageout_wanted, 211630fbfddaSJeff Roberson vm_domain_pageout_lockptr(vmd), PDROP | PVM, 21175f8cd1c0SJeff Roberson "psleep", hz / VM_INACT_SCAN_RATE) == 0) 211883c9dea1SGleb Smirnoff VM_CNT_INC(v_pdwakeups); 211956ce0690SAlan Cox } 2120be37ee79SMark Johnston 212130fbfddaSJeff Roberson /* Prevent spurious wakeups by ensuring that wanted is set. */ 212230fbfddaSJeff Roberson atomic_store_int(&vmd->vmd_pageout_wanted, 1); 212330fbfddaSJeff Roberson 212430fbfddaSJeff Roberson /* 212530fbfddaSJeff Roberson * Use the controller to calculate how many pages to free in 2126b50a4ea6SMark Johnston * this interval, and scan the inactive queue. If the lowmem 2127b50a4ea6SMark Johnston * handlers appear to have freed up some pages, subtract the 2128b50a4ea6SMark Johnston * difference from the inactive queue scan target. 212930fbfddaSJeff Roberson */ 21305f8cd1c0SJeff Roberson shortage = pidctrl_daemon(&vmd->vmd_pid, vmd->vmd_free_count); 213149a3710cSMark Johnston if (shortage > 0) { 2132b50a4ea6SMark Johnston ofree = vmd->vmd_free_count; 2133b50a4ea6SMark Johnston if (vm_pageout_lowmem() && vmd->vmd_free_count > ofree) 2134b50a4ea6SMark Johnston shortage -= min(vmd->vmd_free_count - ofree, 2135b50a4ea6SMark Johnston (u_int)shortage); 21360292c54bSConrad Meyer target_met = vm_pageout_inactive(vmd, shortage, 2137be37ee79SMark Johnston &addl_shortage); 213849a3710cSMark Johnston } else 213949a3710cSMark Johnston addl_shortage = 0; 214056ce0690SAlan Cox 2141be37ee79SMark Johnston /* 2142be37ee79SMark Johnston * Scan the active queue. A positive value for shortage 2143be37ee79SMark Johnston * indicates that we must aggressively deactivate pages to avoid 2144be37ee79SMark Johnston * a shortfall. 2145be37ee79SMark Johnston */ 21467bb4634eSMark Johnston shortage = vm_pageout_active_target(vmd) + addl_shortage; 2147be37ee79SMark Johnston vm_pageout_scan_active(vmd, shortage); 2148449c2e92SKonstantin Belousov } 2149449c2e92SKonstantin Belousov } 2150449c2e92SKonstantin Belousov 2151df8bae1dSRodney W. Grimes /* 21520292c54bSConrad Meyer * vm_pageout_helper runs additional pageout daemons in times of high paging 21530292c54bSConrad Meyer * activity. 21540292c54bSConrad Meyer */ 21550292c54bSConrad Meyer static void 21560292c54bSConrad Meyer vm_pageout_helper(void *arg) 21570292c54bSConrad Meyer { 21580292c54bSConrad Meyer struct vm_domain *vmd; 21590292c54bSConrad Meyer int domain; 21600292c54bSConrad Meyer 21610292c54bSConrad Meyer domain = (uintptr_t)arg; 21620292c54bSConrad Meyer vmd = VM_DOMAIN(domain); 21630292c54bSConrad Meyer 21640292c54bSConrad Meyer vm_domain_pageout_lock(vmd); 21650292c54bSConrad Meyer for (;;) { 21660292c54bSConrad Meyer msleep(&vmd->vmd_inactive_shortage, 21670292c54bSConrad Meyer vm_domain_pageout_lockptr(vmd), PVM, "psleep", 0); 21680292c54bSConrad Meyer blockcount_release(&vmd->vmd_inactive_starting, 1); 21690292c54bSConrad Meyer 21700292c54bSConrad Meyer vm_domain_pageout_unlock(vmd); 21710292c54bSConrad Meyer vm_pageout_scan_inactive(vmd, vmd->vmd_inactive_shortage); 21720292c54bSConrad Meyer vm_domain_pageout_lock(vmd); 21730292c54bSConrad Meyer 21740292c54bSConrad Meyer /* 21750292c54bSConrad Meyer * Release the running count while the pageout lock is held to 21760292c54bSConrad Meyer * prevent wakeup races. 21770292c54bSConrad Meyer */ 21780292c54bSConrad Meyer blockcount_release(&vmd->vmd_inactive_running, 1); 21790292c54bSConrad Meyer } 21800292c54bSConrad Meyer } 21810292c54bSConrad Meyer 21820292c54bSConrad Meyer static int 218374f5530dSConrad Meyer get_pageout_threads_per_domain(const struct vm_domain *vmd) 21840292c54bSConrad Meyer { 218574f5530dSConrad Meyer unsigned total_pageout_threads, eligible_cpus, domain_cpus; 21860292c54bSConrad Meyer 218774f5530dSConrad Meyer if (VM_DOMAIN_EMPTY(vmd->vmd_domain)) 218874f5530dSConrad Meyer return (0); 21890292c54bSConrad Meyer 21900292c54bSConrad Meyer /* 21910292c54bSConrad Meyer * Semi-arbitrarily constrain pagedaemon threads to less than half the 219274f5530dSConrad Meyer * total number of CPUs in the system as an upper limit. 21930292c54bSConrad Meyer */ 219474f5530dSConrad Meyer if (pageout_cpus_per_thread < 2) 219574f5530dSConrad Meyer pageout_cpus_per_thread = 2; 219674f5530dSConrad Meyer else if (pageout_cpus_per_thread > mp_ncpus) 219774f5530dSConrad Meyer pageout_cpus_per_thread = mp_ncpus; 21980292c54bSConrad Meyer 219974f5530dSConrad Meyer total_pageout_threads = howmany(mp_ncpus, pageout_cpus_per_thread); 220074f5530dSConrad Meyer domain_cpus = CPU_COUNT(&cpuset_domain[vmd->vmd_domain]); 220174f5530dSConrad Meyer 220274f5530dSConrad Meyer /* Pagedaemons are not run in empty domains. */ 220374f5530dSConrad Meyer eligible_cpus = mp_ncpus; 220474f5530dSConrad Meyer for (unsigned i = 0; i < vm_ndomains; i++) 220574f5530dSConrad Meyer if (VM_DOMAIN_EMPTY(i)) 220674f5530dSConrad Meyer eligible_cpus -= CPU_COUNT(&cpuset_domain[i]); 220774f5530dSConrad Meyer 220874f5530dSConrad Meyer /* 220974f5530dSConrad Meyer * Assign a portion of the total pageout threads to this domain 221074f5530dSConrad Meyer * corresponding to the fraction of pagedaemon-eligible CPUs in the 221174f5530dSConrad Meyer * domain. In asymmetric NUMA systems, domains with more CPUs may be 221274f5530dSConrad Meyer * allocated more threads than domains with fewer CPUs. 221374f5530dSConrad Meyer */ 221474f5530dSConrad Meyer return (howmany(total_pageout_threads * domain_cpus, eligible_cpus)); 22150292c54bSConrad Meyer } 22160292c54bSConrad Meyer 22170292c54bSConrad Meyer /* 22189c770a27SMark Johnston * Initialize basic pageout daemon settings. See the comment above the 22199c770a27SMark Johnston * definition of vm_domain for some explanation of how these thresholds are 22209c770a27SMark Johnston * used. 2221df8bae1dSRodney W. Grimes */ 22222b14f991SJulian Elischer static void 2223e2068d0bSJeff Roberson vm_pageout_init_domain(int domain) 2224df8bae1dSRodney W. Grimes { 2225e2068d0bSJeff Roberson struct vm_domain *vmd; 22265f8cd1c0SJeff Roberson struct sysctl_oid *oid; 2227e2068d0bSJeff Roberson 2228e2068d0bSJeff Roberson vmd = VM_DOMAIN(domain); 2229e2068d0bSJeff Roberson vmd->vmd_interrupt_free_min = 2; 2230f6b04d2bSDavid Greenman 223145ae1d91SAlan Cox /* 223245ae1d91SAlan Cox * v_free_reserved needs to include enough for the largest 223345ae1d91SAlan Cox * swap pager structures plus enough for any pv_entry structs 223445ae1d91SAlan Cox * when paging. 223545ae1d91SAlan Cox */ 22360cab71bcSDoug Moore vmd->vmd_pageout_free_min = 2 * MAXBSIZE / PAGE_SIZE + 2237e2068d0bSJeff Roberson vmd->vmd_interrupt_free_min; 2238e2068d0bSJeff Roberson vmd->vmd_free_reserved = vm_pageout_page_count + 22399c770a27SMark Johnston vmd->vmd_pageout_free_min + vmd->vmd_page_count / 768; 22409c770a27SMark Johnston vmd->vmd_free_min = vmd->vmd_page_count / 200; 2241e2068d0bSJeff Roberson vmd->vmd_free_severe = vmd->vmd_free_min / 2; 2242e2068d0bSJeff Roberson vmd->vmd_free_target = 4 * vmd->vmd_free_min + vmd->vmd_free_reserved; 2243e2068d0bSJeff Roberson vmd->vmd_free_min += vmd->vmd_free_reserved; 2244e2068d0bSJeff Roberson vmd->vmd_free_severe += vmd->vmd_free_reserved; 2245e2068d0bSJeff Roberson vmd->vmd_inactive_target = (3 * vmd->vmd_free_target) / 2; 2246e2068d0bSJeff Roberson if (vmd->vmd_inactive_target > vmd->vmd_free_count / 3) 2247e2068d0bSJeff Roberson vmd->vmd_inactive_target = vmd->vmd_free_count / 3; 2248df8bae1dSRodney W. Grimes 2249d9e23210SJeff Roberson /* 22505f8cd1c0SJeff Roberson * Set the default wakeup threshold to be 10% below the paging 22515f8cd1c0SJeff Roberson * target. This keeps the steady state out of shortfall. 2252d9e23210SJeff Roberson */ 22535f8cd1c0SJeff Roberson vmd->vmd_pageout_wakeup_thresh = (vmd->vmd_free_target / 10) * 9; 2254e2068d0bSJeff Roberson 2255e2068d0bSJeff Roberson /* 2256e2068d0bSJeff Roberson * Target amount of memory to move out of the laundry queue during a 2257e2068d0bSJeff Roberson * background laundering. This is proportional to the amount of system 2258e2068d0bSJeff Roberson * memory. 2259e2068d0bSJeff Roberson */ 2260e2068d0bSJeff Roberson vmd->vmd_background_launder_target = (vmd->vmd_free_target - 2261e2068d0bSJeff Roberson vmd->vmd_free_min) / 10; 22625f8cd1c0SJeff Roberson 22635f8cd1c0SJeff Roberson /* Initialize the pageout daemon pid controller. */ 22645f8cd1c0SJeff Roberson pidctrl_init(&vmd->vmd_pid, hz / VM_INACT_SCAN_RATE, 22655f8cd1c0SJeff Roberson vmd->vmd_free_target, PIDCTRL_BOUND, 22665f8cd1c0SJeff Roberson PIDCTRL_KPD, PIDCTRL_KID, PIDCTRL_KDD); 22675f8cd1c0SJeff Roberson oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(vmd->vmd_oid), OID_AUTO, 22687029da5cSPawel Biernacki "pidctrl", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); 22695f8cd1c0SJeff Roberson pidctrl_init_sysctl(&vmd->vmd_pid, SYSCTL_CHILDREN(oid)); 22700292c54bSConrad Meyer 227174f5530dSConrad Meyer vmd->vmd_inactive_threads = get_pageout_threads_per_domain(vmd); 2272e2068d0bSJeff Roberson } 2273e2068d0bSJeff Roberson 2274e2068d0bSJeff Roberson static void 2275e2068d0bSJeff Roberson vm_pageout_init(void) 2276e2068d0bSJeff Roberson { 227797458520SMark Johnston u_long freecount; 2278e2068d0bSJeff Roberson int i; 2279e2068d0bSJeff Roberson 2280e2068d0bSJeff Roberson /* 2281e2068d0bSJeff Roberson * Initialize some paging parameters. 2282e2068d0bSJeff Roberson */ 2283e2068d0bSJeff Roberson freecount = 0; 2284e2068d0bSJeff Roberson for (i = 0; i < vm_ndomains; i++) { 2285e2068d0bSJeff Roberson struct vm_domain *vmd; 2286e2068d0bSJeff Roberson 2287e2068d0bSJeff Roberson vm_pageout_init_domain(i); 2288e2068d0bSJeff Roberson vmd = VM_DOMAIN(i); 2289e2068d0bSJeff Roberson vm_cnt.v_free_reserved += vmd->vmd_free_reserved; 2290e2068d0bSJeff Roberson vm_cnt.v_free_target += vmd->vmd_free_target; 2291e2068d0bSJeff Roberson vm_cnt.v_free_min += vmd->vmd_free_min; 2292e2068d0bSJeff Roberson vm_cnt.v_inactive_target += vmd->vmd_inactive_target; 2293e2068d0bSJeff Roberson vm_cnt.v_pageout_free_min += vmd->vmd_pageout_free_min; 2294e2068d0bSJeff Roberson vm_cnt.v_interrupt_free_min += vmd->vmd_interrupt_free_min; 2295e2068d0bSJeff Roberson vm_cnt.v_free_severe += vmd->vmd_free_severe; 2296e2068d0bSJeff Roberson freecount += vmd->vmd_free_count; 2297e2068d0bSJeff Roberson } 2298d9e23210SJeff Roberson 2299d9e23210SJeff Roberson /* 2300d9e23210SJeff Roberson * Set interval in seconds for active scan. We want to visit each 2301c9612b2dSJeff Roberson * page at least once every ten minutes. This is to prevent worst 2302c9612b2dSJeff Roberson * case paging behaviors with stale active LRU. 2303d9e23210SJeff Roberson */ 2304d9e23210SJeff Roberson if (vm_pageout_update_period == 0) 2305c9612b2dSJeff Roberson vm_pageout_update_period = 600; 2306d9e23210SJeff Roberson 230797458520SMark Johnston /* 230897458520SMark Johnston * Set the maximum number of user-wired virtual pages. Historically the 230997458520SMark Johnston * main source of such pages was mlock(2) and mlockall(2). Hypervisors 231097458520SMark Johnston * may also request user-wired memory. 231197458520SMark Johnston */ 231254a3a114SMark Johnston if (vm_page_max_user_wired == 0) 231397458520SMark Johnston vm_page_max_user_wired = 4 * freecount / 5; 23144d19f4adSSteven Hartland } 23154d19f4adSSteven Hartland 23164d19f4adSSteven Hartland /* 23174d19f4adSSteven Hartland * vm_pageout is the high level pageout daemon. 23184d19f4adSSteven Hartland */ 23194d19f4adSSteven Hartland static void 23204d19f4adSSteven Hartland vm_pageout(void) 23214d19f4adSSteven Hartland { 2322920239efSMark Johnston struct proc *p; 2323920239efSMark Johnston struct thread *td; 23240292c54bSConrad Meyer int error, first, i, j, pageout_threads; 2325920239efSMark Johnston 2326920239efSMark Johnston p = curproc; 2327920239efSMark Johnston td = curthread; 2328df8bae1dSRodney W. Grimes 2329245139c6SKonstantin Belousov mtx_init(&vm_oom_ratelim_mtx, "vmoomr", NULL, MTX_DEF); 233024a1cce3SDavid Greenman swap_pager_swap_init(); 2331920239efSMark Johnston for (first = -1, i = 0; i < vm_ndomains; i++) { 233230c5525bSAndrew Gallatin if (VM_DOMAIN_EMPTY(i)) { 233330c5525bSAndrew Gallatin if (bootverbose) 233430c5525bSAndrew Gallatin printf("domain %d empty; skipping pageout\n", 233530c5525bSAndrew Gallatin i); 233630c5525bSAndrew Gallatin continue; 233730c5525bSAndrew Gallatin } 2338920239efSMark Johnston if (first == -1) 2339920239efSMark Johnston first = i; 2340920239efSMark Johnston else { 2341920239efSMark Johnston error = kthread_add(vm_pageout_worker, 2342920239efSMark Johnston (void *)(uintptr_t)i, p, NULL, 0, 0, "dom%d", i); 2343920239efSMark Johnston if (error != 0) 2344920239efSMark Johnston panic("starting pageout for domain %d: %d\n", 2345449c2e92SKonstantin Belousov i, error); 2346dc2efb27SJohn Dyson } 234774f5530dSConrad Meyer pageout_threads = VM_DOMAIN(i)->vmd_inactive_threads; 23480292c54bSConrad Meyer for (j = 0; j < pageout_threads - 1; j++) { 23490292c54bSConrad Meyer error = kthread_add(vm_pageout_helper, 23500292c54bSConrad Meyer (void *)(uintptr_t)i, p, NULL, 0, 0, 23510292c54bSConrad Meyer "dom%d helper%d", i, j); 23520292c54bSConrad Meyer if (error != 0) 23530292c54bSConrad Meyer panic("starting pageout helper %d for domain " 23540292c54bSConrad Meyer "%d: %d\n", j, i, error); 23550292c54bSConrad Meyer } 2356e2068d0bSJeff Roberson error = kthread_add(vm_pageout_laundry_worker, 2357920239efSMark Johnston (void *)(uintptr_t)i, p, NULL, 0, 0, "laundry: dom%d", i); 2358e2068d0bSJeff Roberson if (error != 0) 2359920239efSMark Johnston panic("starting laundry for domain %d: %d", i, error); 2360f919ebdeSDavid Greenman } 2361920239efSMark Johnston error = kthread_add(uma_reclaim_worker, NULL, p, NULL, 0, 0, "uma"); 236244ec2b63SKonstantin Belousov if (error != 0) 236344ec2b63SKonstantin Belousov panic("starting uma_reclaim helper, error %d\n", error); 2364920239efSMark Johnston 2365920239efSMark Johnston snprintf(td->td_name, sizeof(td->td_name), "dom%d", first); 2366920239efSMark Johnston vm_pageout_worker((void *)(uintptr_t)first); 2367df8bae1dSRodney W. Grimes } 236826f9a767SRodney W. Grimes 23696b4b77adSAlan Cox /* 2370280d15cdSMark Johnston * Perform an advisory wakeup of the page daemon. 23716b4b77adSAlan Cox */ 2372e0c5a895SJohn Dyson void 2373e2068d0bSJeff Roberson pagedaemon_wakeup(int domain) 2374e0c5a895SJohn Dyson { 2375e2068d0bSJeff Roberson struct vm_domain *vmd; 2376a1c0a785SAlan Cox 2377e2068d0bSJeff Roberson vmd = VM_DOMAIN(domain); 237830fbfddaSJeff Roberson vm_domain_pageout_assert_unlocked(vmd); 237930fbfddaSJeff Roberson if (curproc == pageproc) 238030fbfddaSJeff Roberson return; 2381280d15cdSMark Johnston 238230fbfddaSJeff Roberson if (atomic_fetchadd_int(&vmd->vmd_pageout_wanted, 1) == 0) { 238330fbfddaSJeff Roberson vm_domain_pageout_lock(vmd); 238430fbfddaSJeff Roberson atomic_store_int(&vmd->vmd_pageout_wanted, 1); 2385e2068d0bSJeff Roberson wakeup(&vmd->vmd_pageout_wanted); 238630fbfddaSJeff Roberson vm_domain_pageout_unlock(vmd); 2387e0c5a895SJohn Dyson } 2388e0c5a895SJohn Dyson } 2389