160727d8bSWarner Losh /*- 2796df753SPedro F. Giffuni * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3df57947fSPedro F. Giffuni * 426f9a767SRodney W. Grimes * Copyright (c) 1991 Regents of the University of California. 526f9a767SRodney W. Grimes * All rights reserved. 626f9a767SRodney W. Grimes * Copyright (c) 1994 John S. Dyson 726f9a767SRodney W. Grimes * All rights reserved. 826f9a767SRodney W. Grimes * Copyright (c) 1994 David Greenman 926f9a767SRodney W. Grimes * All rights reserved. 108dbca793STor Egge * Copyright (c) 2005 Yahoo! Technologies Norway AS 118dbca793STor Egge * All rights reserved. 12df8bae1dSRodney W. Grimes * 13df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 14df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 15df8bae1dSRodney W. Grimes * 16df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 17df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 18df8bae1dSRodney W. Grimes * are met: 19df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 20df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 21df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 22df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 23df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 24df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 255929bcfaSPhilippe Charnier * must display the following acknowledgement: 26df8bae1dSRodney W. Grimes * This product includes software developed by the University of 27df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 28df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 29df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 30df8bae1dSRodney W. Grimes * without specific prior written permission. 31df8bae1dSRodney W. Grimes * 32df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 33df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 34df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 35df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 36df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 37df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 38df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 39df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 40df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 41df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 42df8bae1dSRodney W. Grimes * SUCH DAMAGE. 43df8bae1dSRodney W. Grimes * 443c4dd356SDavid Greenman * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 45df8bae1dSRodney W. Grimes * 46df8bae1dSRodney W. Grimes * 47df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 48df8bae1dSRodney W. Grimes * All rights reserved. 49df8bae1dSRodney W. Grimes * 50df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 51df8bae1dSRodney W. Grimes * 52df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 53df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 54df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 55df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 56df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 57df8bae1dSRodney W. Grimes * 58df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 59df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 60df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 61df8bae1dSRodney W. Grimes * 62df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 63df8bae1dSRodney W. Grimes * 64df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 65df8bae1dSRodney W. Grimes * School of Computer Science 66df8bae1dSRodney W. Grimes * Carnegie Mellon University 67df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 68df8bae1dSRodney W. Grimes * 69df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 70df8bae1dSRodney W. Grimes * rights to redistribute these changes. 71df8bae1dSRodney W. Grimes */ 72df8bae1dSRodney W. Grimes 73df8bae1dSRodney W. Grimes /* 74df8bae1dSRodney W. Grimes * The proverbial page-out daemon. 75df8bae1dSRodney W. Grimes */ 76df8bae1dSRodney W. Grimes 77874651b1SDavid E. O'Brien #include <sys/cdefs.h> 78874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 79874651b1SDavid E. O'Brien 80faa5f8d8SAndrzej Bialecki #include "opt_vm.h" 817672ca05SMark Johnston 82df8bae1dSRodney W. Grimes #include <sys/param.h> 8326f9a767SRodney W. Grimes #include <sys/systm.h> 84b5e8ce9fSBruce Evans #include <sys/kernel.h> 85855a310fSJeff Roberson #include <sys/eventhandler.h> 86fb919e4dSMark Murray #include <sys/lock.h> 87fb919e4dSMark Murray #include <sys/mutex.h> 8826f9a767SRodney W. Grimes #include <sys/proc.h> 899c8b8baaSPeter Wemm #include <sys/kthread.h> 900384fff8SJason Evans #include <sys/ktr.h> 9197824da3SAlan Cox #include <sys/mount.h> 92099e7e95SEdward Tomasz Napierala #include <sys/racct.h> 9326f9a767SRodney W. Grimes #include <sys/resourcevar.h> 94b43179fbSJeff Roberson #include <sys/sched.h> 9514a0d74eSSteven Hartland #include <sys/sdt.h> 96d2fc5315SPoul-Henning Kamp #include <sys/signalvar.h> 97449c2e92SKonstantin Belousov #include <sys/smp.h> 98a6bf3a9eSRyan Stone #include <sys/time.h> 99f6b04d2bSDavid Greenman #include <sys/vnode.h> 100efeaf95aSDavid Greenman #include <sys/vmmeter.h> 10189f6b863SAttilio Rao #include <sys/rwlock.h> 1021005a129SJohn Baldwin #include <sys/sx.h> 10338efa82bSJohn Dyson #include <sys/sysctl.h> 104df8bae1dSRodney W. Grimes 105df8bae1dSRodney W. Grimes #include <vm/vm.h> 106efeaf95aSDavid Greenman #include <vm/vm_param.h> 107efeaf95aSDavid Greenman #include <vm/vm_object.h> 108df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 109efeaf95aSDavid Greenman #include <vm/vm_map.h> 110df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h> 11124a1cce3SDavid Greenman #include <vm/vm_pager.h> 112449c2e92SKonstantin Belousov #include <vm/vm_phys.h> 113e2068d0bSJeff Roberson #include <vm/vm_pagequeue.h> 11405f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h> 115efeaf95aSDavid Greenman #include <vm/vm_extern.h> 116670d17b5SJeff Roberson #include <vm/uma.h> 117df8bae1dSRodney W. Grimes 1182b14f991SJulian Elischer /* 1192b14f991SJulian Elischer * System initialization 1202b14f991SJulian Elischer */ 1212b14f991SJulian Elischer 1222b14f991SJulian Elischer /* the kernel process "vm_pageout"*/ 12311caded3SAlfred Perlstein static void vm_pageout(void); 1244d19f4adSSteven Hartland static void vm_pageout_init(void); 125ebcddc72SAlan Cox static int vm_pageout_clean(vm_page_t m, int *numpagedout); 12634d8b7eaSJeff Roberson static int vm_pageout_cluster(vm_page_t m); 12776386c7eSKonstantin Belousov static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage, 12876386c7eSKonstantin Belousov int starting_page_shortage); 12945ae1d91SAlan Cox 1304d19f4adSSteven Hartland SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init, 1314d19f4adSSteven Hartland NULL); 1324d19f4adSSteven Hartland 1332b14f991SJulian Elischer struct proc *pageproc; 1342b14f991SJulian Elischer 1352b14f991SJulian Elischer static struct kproc_desc page_kp = { 1362b14f991SJulian Elischer "pagedaemon", 1372b14f991SJulian Elischer vm_pageout, 1382b14f991SJulian Elischer &pageproc 1392b14f991SJulian Elischer }; 1404d19f4adSSteven Hartland SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start, 141237fdd78SRobert Watson &page_kp); 1422b14f991SJulian Elischer 14314a0d74eSSteven Hartland SDT_PROVIDER_DEFINE(vm); 14414a0d74eSSteven Hartland SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan); 14514a0d74eSSteven Hartland 146ebcddc72SAlan Cox /* Pagedaemon activity rates, in subdivisions of one second. */ 147ebcddc72SAlan Cox #define VM_LAUNDER_RATE 10 1485f8cd1c0SJeff Roberson #define VM_INACT_SCAN_RATE 10 1492b14f991SJulian Elischer 15076386c7eSKonstantin Belousov static int vm_pageout_oom_seq = 12; 151ebcddc72SAlan Cox 152d9e23210SJeff Roberson static int vm_pageout_update_period; 1534a365329SAndrey Zonov static int disable_swap_pageouts; 154c9612b2dSJeff Roberson static int lowmem_period = 10; 155b1fd102eSMark Johnston static int swapdev_enabled; 15670111b90SJohn Dyson 1578311a2b8SWill Andrews static int vm_panic_on_oom = 0; 1588311a2b8SWill Andrews 1598311a2b8SWill Andrews SYSCTL_INT(_vm, OID_AUTO, panic_on_oom, 1608311a2b8SWill Andrews CTLFLAG_RWTUN, &vm_panic_on_oom, 0, 1613c200db9SJonathan T. Looney "Panic on the given number of out-of-memory errors instead of killing the largest process"); 1628311a2b8SWill Andrews 163d9e23210SJeff Roberson SYSCTL_INT(_vm, OID_AUTO, pageout_update_period, 164e0b2fc3aSMark Johnston CTLFLAG_RWTUN, &vm_pageout_update_period, 0, 165d9e23210SJeff Roberson "Maximum active LRU update period"); 16653636869SAndrey Zonov 167e0b2fc3aSMark Johnston SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RWTUN, &lowmem_period, 0, 168c9612b2dSJeff Roberson "Low memory callback period"); 169c9612b2dSJeff Roberson 170ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 171e0b2fc3aSMark Johnston CTLFLAG_RWTUN, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 17212ac6a1dSJohn Dyson 17323b59018SMatthew Dillon static int pageout_lock_miss; 17423b59018SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 17523b59018SMatthew Dillon CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 17623b59018SMatthew Dillon 17776386c7eSKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq, 178e0b2fc3aSMark Johnston CTLFLAG_RWTUN, &vm_pageout_oom_seq, 0, 17976386c7eSKonstantin Belousov "back-to-back calls to oom detector to start OOM"); 18076386c7eSKonstantin Belousov 181ebcddc72SAlan Cox static int act_scan_laundry_weight = 3; 182e0b2fc3aSMark Johnston SYSCTL_INT(_vm, OID_AUTO, act_scan_laundry_weight, CTLFLAG_RWTUN, 183ebcddc72SAlan Cox &act_scan_laundry_weight, 0, 184ebcddc72SAlan Cox "weight given to clean vs. dirty pages in active queue scans"); 185ebcddc72SAlan Cox 186ebcddc72SAlan Cox static u_int vm_background_launder_rate = 4096; 187e0b2fc3aSMark Johnston SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RWTUN, 188ebcddc72SAlan Cox &vm_background_launder_rate, 0, 189ebcddc72SAlan Cox "background laundering rate, in kilobytes per second"); 190ebcddc72SAlan Cox 191ebcddc72SAlan Cox static u_int vm_background_launder_max = 20 * 1024; 192e0b2fc3aSMark Johnston SYSCTL_UINT(_vm, OID_AUTO, background_launder_max, CTLFLAG_RWTUN, 193ebcddc72SAlan Cox &vm_background_launder_max, 0, "background laundering cap, in kilobytes"); 194ebcddc72SAlan Cox 195e2241590SAlan Cox int vm_pageout_page_count = 32; 196df8bae1dSRodney W. Grimes 19754a3a114SMark Johnston u_long vm_page_max_user_wired; 19854a3a114SMark Johnston SYSCTL_ULONG(_vm, OID_AUTO, max_user_wired, CTLFLAG_RW, 19954a3a114SMark Johnston &vm_page_max_user_wired, 0, 20054a3a114SMark Johnston "system-wide limit to user-wired page count"); 201df8bae1dSRodney W. Grimes 202ebcddc72SAlan Cox static u_int isqrt(u_int num); 203ebcddc72SAlan Cox static int vm_pageout_launder(struct vm_domain *vmd, int launder, 204ebcddc72SAlan Cox bool in_shortfall); 205ebcddc72SAlan Cox static void vm_pageout_laundry_worker(void *arg); 206cd41fc12SDavid Greenman 2075cd29d0fSMark Johnston struct scan_state { 2085cd29d0fSMark Johnston struct vm_batchqueue bq; 2098d220203SAlan Cox struct vm_pagequeue *pq; 2105cd29d0fSMark Johnston vm_page_t marker; 2115cd29d0fSMark Johnston int maxscan; 2125cd29d0fSMark Johnston int scanned; 2135cd29d0fSMark Johnston }; 2148dbca793STor Egge 2155cd29d0fSMark Johnston static void 2165cd29d0fSMark Johnston vm_pageout_init_scan(struct scan_state *ss, struct vm_pagequeue *pq, 2175cd29d0fSMark Johnston vm_page_t marker, vm_page_t after, int maxscan) 2185cd29d0fSMark Johnston { 2198dbca793STor Egge 2205cd29d0fSMark Johnston vm_pagequeue_assert_locked(pq); 2215cff1f4dSMark Johnston KASSERT((marker->a.flags & PGA_ENQUEUED) == 0, 2225cd29d0fSMark Johnston ("marker %p already enqueued", marker)); 2235cd29d0fSMark Johnston 2245cd29d0fSMark Johnston if (after == NULL) 2255cd29d0fSMark Johnston TAILQ_INSERT_HEAD(&pq->pq_pl, marker, plinks.q); 2265cd29d0fSMark Johnston else 2275cd29d0fSMark Johnston TAILQ_INSERT_AFTER(&pq->pq_pl, after, marker, plinks.q); 2285cd29d0fSMark Johnston vm_page_aflag_set(marker, PGA_ENQUEUED); 2295cd29d0fSMark Johnston 2305cd29d0fSMark Johnston vm_batchqueue_init(&ss->bq); 2315cd29d0fSMark Johnston ss->pq = pq; 2325cd29d0fSMark Johnston ss->marker = marker; 2335cd29d0fSMark Johnston ss->maxscan = maxscan; 2345cd29d0fSMark Johnston ss->scanned = 0; 2358d220203SAlan Cox vm_pagequeue_unlock(pq); 2365cd29d0fSMark Johnston } 2378dbca793STor Egge 2385cd29d0fSMark Johnston static void 2395cd29d0fSMark Johnston vm_pageout_end_scan(struct scan_state *ss) 2405cd29d0fSMark Johnston { 2415cd29d0fSMark Johnston struct vm_pagequeue *pq; 2425cd29d0fSMark Johnston 2435cd29d0fSMark Johnston pq = ss->pq; 2445cd29d0fSMark Johnston vm_pagequeue_assert_locked(pq); 2455cff1f4dSMark Johnston KASSERT((ss->marker->a.flags & PGA_ENQUEUED) != 0, 2465cd29d0fSMark Johnston ("marker %p not enqueued", ss->marker)); 2475cd29d0fSMark Johnston 2485cd29d0fSMark Johnston TAILQ_REMOVE(&pq->pq_pl, ss->marker, plinks.q); 2495cd29d0fSMark Johnston vm_page_aflag_clear(ss->marker, PGA_ENQUEUED); 250899fe184SMark Johnston pq->pq_pdpages += ss->scanned; 2518dbca793STor Egge } 2528dbca793STor Egge 2538dbca793STor Egge /* 2545cd29d0fSMark Johnston * Add a small number of queued pages to a batch queue for later processing 2555cd29d0fSMark Johnston * without the corresponding queue lock held. The caller must have enqueued a 2565cd29d0fSMark Johnston * marker page at the desired start point for the scan. Pages will be 2575cd29d0fSMark Johnston * physically dequeued if the caller so requests. Otherwise, the returned 2585cd29d0fSMark Johnston * batch may contain marker pages, and it is up to the caller to handle them. 2595cd29d0fSMark Johnston * 26036f8fe9bSMark Johnston * When processing the batch queue, vm_page_queue() must be used to 26136f8fe9bSMark Johnston * determine whether the page has been logically dequeued by another thread. 26236f8fe9bSMark Johnston * Once this check is performed, the page lock guarantees that the page will 26336f8fe9bSMark Johnston * not be disassociated from the queue. 2645cd29d0fSMark Johnston */ 2655cd29d0fSMark Johnston static __always_inline void 2665cd29d0fSMark Johnston vm_pageout_collect_batch(struct scan_state *ss, const bool dequeue) 2675cd29d0fSMark Johnston { 2688d220203SAlan Cox struct vm_pagequeue *pq; 269d70f0ab3SMark Johnston vm_page_t m, marker, n; 2708c616246SKonstantin Belousov 2715cd29d0fSMark Johnston marker = ss->marker; 2725cd29d0fSMark Johnston pq = ss->pq; 2738c616246SKonstantin Belousov 2745cff1f4dSMark Johnston KASSERT((marker->a.flags & PGA_ENQUEUED) != 0, 2755cd29d0fSMark Johnston ("marker %p not enqueued", ss->marker)); 2768c616246SKonstantin Belousov 2778d220203SAlan Cox vm_pagequeue_lock(pq); 2785cd29d0fSMark Johnston for (m = TAILQ_NEXT(marker, plinks.q); m != NULL && 2795cd29d0fSMark Johnston ss->scanned < ss->maxscan && ss->bq.bq_cnt < VM_BATCHQUEUE_SIZE; 280d70f0ab3SMark Johnston m = n, ss->scanned++) { 281d70f0ab3SMark Johnston n = TAILQ_NEXT(m, plinks.q); 2825cd29d0fSMark Johnston if ((m->flags & PG_MARKER) == 0) { 2835cff1f4dSMark Johnston KASSERT((m->a.flags & PGA_ENQUEUED) != 0, 2845cd29d0fSMark Johnston ("page %p not enqueued", m)); 2855cd29d0fSMark Johnston KASSERT((m->flags & PG_FICTITIOUS) == 0, 2865cd29d0fSMark Johnston ("Fictitious page %p cannot be in page queue", m)); 2875cd29d0fSMark Johnston KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2885cd29d0fSMark Johnston ("Unmanaged page %p cannot be in page queue", m)); 2895cd29d0fSMark Johnston } else if (dequeue) 2905cd29d0fSMark Johnston continue; 2918c616246SKonstantin Belousov 2925cd29d0fSMark Johnston (void)vm_batchqueue_insert(&ss->bq, m); 2935cd29d0fSMark Johnston if (dequeue) { 2945cd29d0fSMark Johnston TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 2955cd29d0fSMark Johnston vm_page_aflag_clear(m, PGA_ENQUEUED); 2965cd29d0fSMark Johnston } 2975cd29d0fSMark Johnston } 2985cd29d0fSMark Johnston TAILQ_REMOVE(&pq->pq_pl, marker, plinks.q); 2995cd29d0fSMark Johnston if (__predict_true(m != NULL)) 3005cd29d0fSMark Johnston TAILQ_INSERT_BEFORE(m, marker, plinks.q); 3015cd29d0fSMark Johnston else 3025cd29d0fSMark Johnston TAILQ_INSERT_TAIL(&pq->pq_pl, marker, plinks.q); 3035cd29d0fSMark Johnston if (dequeue) 3045cd29d0fSMark Johnston vm_pagequeue_cnt_add(pq, -ss->bq.bq_cnt); 3055cd29d0fSMark Johnston vm_pagequeue_unlock(pq); 3065cd29d0fSMark Johnston } 3075cd29d0fSMark Johnston 308fee2a2faSMark Johnston /* 309fee2a2faSMark Johnston * Return the next page to be scanned, or NULL if the scan is complete. 310fee2a2faSMark Johnston */ 3115cd29d0fSMark Johnston static __always_inline vm_page_t 3125cd29d0fSMark Johnston vm_pageout_next(struct scan_state *ss, const bool dequeue) 3135cd29d0fSMark Johnston { 3145cd29d0fSMark Johnston 3155cd29d0fSMark Johnston if (ss->bq.bq_cnt == 0) 3165cd29d0fSMark Johnston vm_pageout_collect_batch(ss, dequeue); 3175cd29d0fSMark Johnston return (vm_batchqueue_pop(&ss->bq)); 3188c616246SKonstantin Belousov } 3198c616246SKonstantin Belousov 3208c616246SKonstantin Belousov /* 321b7f30bffSMark Johnston * Determine whether processing of a page should be deferred and ensure that any 322b7f30bffSMark Johnston * outstanding queue operations are processed. 323b7f30bffSMark Johnston */ 324b7f30bffSMark Johnston static __always_inline bool 325b7f30bffSMark Johnston vm_pageout_defer(vm_page_t m, const uint8_t queue, const bool enqueued) 326b7f30bffSMark Johnston { 327b7f30bffSMark Johnston vm_page_astate_t as; 328b7f30bffSMark Johnston 329b7f30bffSMark Johnston as = vm_page_astate_load(m); 330b7f30bffSMark Johnston if (__predict_false(as.queue != queue || 331b7f30bffSMark Johnston ((as.flags & PGA_ENQUEUED) != 0) != enqueued)) 332b7f30bffSMark Johnston return (true); 333b7f30bffSMark Johnston if ((as.flags & PGA_QUEUE_OP_MASK) != 0) { 334b7f30bffSMark Johnston vm_page_pqbatch_submit(m, queue); 335b7f30bffSMark Johnston return (true); 336b7f30bffSMark Johnston } 337b7f30bffSMark Johnston return (false); 338b7f30bffSMark Johnston } 339b7f30bffSMark Johnston 340b7f30bffSMark Johnston /* 341248fe642SAlan Cox * Scan for pages at adjacent offsets within the given page's object that are 342248fe642SAlan Cox * eligible for laundering, form a cluster of these pages and the given page, 343248fe642SAlan Cox * and launder that cluster. 34426f9a767SRodney W. Grimes */ 3453af76890SPoul-Henning Kamp static int 34634d8b7eaSJeff Roberson vm_pageout_cluster(vm_page_t m) 34724a1cce3SDavid Greenman { 34854d92145SMatthew Dillon vm_object_t object; 349248fe642SAlan Cox vm_page_t mc[2 * vm_pageout_page_count], p, pb, ps; 350248fe642SAlan Cox vm_pindex_t pindex; 351248fe642SAlan Cox int ib, is, page_base, pageout_count; 35226f9a767SRodney W. Grimes 35317f6a17bSAlan Cox object = m->object; 35489f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 355248fe642SAlan Cox pindex = m->pindex; 3560cddd8f0SMatthew Dillon 35763e97555SJeff Roberson vm_page_assert_xbusied(m); 3580d94caffSDavid Greenman 35991b4f427SAlan Cox mc[vm_pageout_page_count] = pb = ps = m; 36026f9a767SRodney W. Grimes pageout_count = 1; 361f35329acSJohn Dyson page_base = vm_pageout_page_count; 36290ecac61SMatthew Dillon ib = 1; 36390ecac61SMatthew Dillon is = 1; 36490ecac61SMatthew Dillon 36524a1cce3SDavid Greenman /* 366248fe642SAlan Cox * We can cluster only if the page is not clean, busy, or held, and 367ebcddc72SAlan Cox * the page is in the laundry queue. 36890ecac61SMatthew Dillon * 36990ecac61SMatthew Dillon * During heavy mmap/modification loads the pageout 37090ecac61SMatthew Dillon * daemon can really fragment the underlying file 371248fe642SAlan Cox * due to flushing pages out of order and not trying to 372248fe642SAlan Cox * align the clusters (which leaves sporadic out-of-order 37390ecac61SMatthew Dillon * holes). To solve this problem we do the reverse scan 37490ecac61SMatthew Dillon * first and attempt to align our cluster, then do a 37590ecac61SMatthew Dillon * forward scan if room remains. 37624a1cce3SDavid Greenman */ 37790ecac61SMatthew Dillon more: 378248fe642SAlan Cox while (ib != 0 && pageout_count < vm_pageout_page_count) { 37990ecac61SMatthew Dillon if (ib > pindex) { 38090ecac61SMatthew Dillon ib = 0; 38190ecac61SMatthew Dillon break; 382f6b04d2bSDavid Greenman } 38363e97555SJeff Roberson if ((p = vm_page_prev(pb)) == NULL || 38463e97555SJeff Roberson vm_page_tryxbusy(p) == 0) { 38590ecac61SMatthew Dillon ib = 0; 38690ecac61SMatthew Dillon break; 387f6b04d2bSDavid Greenman } 38863e97555SJeff Roberson if (vm_page_wired(p)) { 38963e97555SJeff Roberson ib = 0; 39063e97555SJeff Roberson vm_page_xunbusy(p); 39163e97555SJeff Roberson break; 39263e97555SJeff Roberson } 39324a1cce3SDavid Greenman vm_page_test_dirty(p); 3941b5c869dSMark Johnston if (p->dirty == 0) { 395eb5d3969SAlan Cox ib = 0; 39663e97555SJeff Roberson vm_page_xunbusy(p); 397eb5d3969SAlan Cox break; 398eb5d3969SAlan Cox } 399fee2a2faSMark Johnston if (!vm_page_in_laundry(p) || !vm_page_try_remove_write(p)) { 40063e97555SJeff Roberson vm_page_xunbusy(p); 40190ecac61SMatthew Dillon ib = 0; 40224a1cce3SDavid Greenman break; 403f6b04d2bSDavid Greenman } 40491b4f427SAlan Cox mc[--page_base] = pb = p; 40590ecac61SMatthew Dillon ++pageout_count; 40690ecac61SMatthew Dillon ++ib; 407248fe642SAlan Cox 40824a1cce3SDavid Greenman /* 409248fe642SAlan Cox * We are at an alignment boundary. Stop here, and switch 410248fe642SAlan Cox * directions. Do not clear ib. 41124a1cce3SDavid Greenman */ 41290ecac61SMatthew Dillon if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 41390ecac61SMatthew Dillon break; 41424a1cce3SDavid Greenman } 41590ecac61SMatthew Dillon while (pageout_count < vm_pageout_page_count && 41690ecac61SMatthew Dillon pindex + is < object->size) { 41763e97555SJeff Roberson if ((p = vm_page_next(ps)) == NULL || 41863e97555SJeff Roberson vm_page_tryxbusy(p) == 0) 41990ecac61SMatthew Dillon break; 42063e97555SJeff Roberson if (vm_page_wired(p)) { 42163e97555SJeff Roberson vm_page_xunbusy(p); 42263e97555SJeff Roberson break; 42363e97555SJeff Roberson } 42424a1cce3SDavid Greenman vm_page_test_dirty(p); 42563e97555SJeff Roberson if (p->dirty == 0) { 42663e97555SJeff Roberson vm_page_xunbusy(p); 427eb5d3969SAlan Cox break; 42863e97555SJeff Roberson } 429e8bcf696SMark Johnston if (!vm_page_in_laundry(p) || !vm_page_try_remove_write(p)) { 43063e97555SJeff Roberson vm_page_xunbusy(p); 43124a1cce3SDavid Greenman break; 432e8bcf696SMark Johnston } 43391b4f427SAlan Cox mc[page_base + pageout_count] = ps = p; 43490ecac61SMatthew Dillon ++pageout_count; 43590ecac61SMatthew Dillon ++is; 43624a1cce3SDavid Greenman } 43790ecac61SMatthew Dillon 43890ecac61SMatthew Dillon /* 43990ecac61SMatthew Dillon * If we exhausted our forward scan, continue with the reverse scan 440248fe642SAlan Cox * when possible, even past an alignment boundary. This catches 441248fe642SAlan Cox * boundary conditions. 44290ecac61SMatthew Dillon */ 443248fe642SAlan Cox if (ib != 0 && pageout_count < vm_pageout_page_count) 44490ecac61SMatthew Dillon goto more; 445f6b04d2bSDavid Greenman 44699e6e193SMark Johnston return (vm_pageout_flush(&mc[page_base], pageout_count, 44799e6e193SMark Johnston VM_PAGER_PUT_NOREUSE, 0, NULL, NULL)); 448aef922f5SJohn Dyson } 449aef922f5SJohn Dyson 4501c7c3c6aSMatthew Dillon /* 4511c7c3c6aSMatthew Dillon * vm_pageout_flush() - launder the given pages 4521c7c3c6aSMatthew Dillon * 4531c7c3c6aSMatthew Dillon * The given pages are laundered. Note that we setup for the start of 4541c7c3c6aSMatthew Dillon * I/O ( i.e. busy the page ), mark it read-only, and bump the object 4551c7c3c6aSMatthew Dillon * reference count all in here rather then in the parent. If we want 4561c7c3c6aSMatthew Dillon * the parent to do more sophisticated things we may have to change 4571c7c3c6aSMatthew Dillon * the ordering. 4581e8a675cSKonstantin Belousov * 4591e8a675cSKonstantin Belousov * Returned runlen is the count of pages between mreq and first 4601e8a675cSKonstantin Belousov * page after mreq with status VM_PAGER_AGAIN. 461126d6082SKonstantin Belousov * *eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL 462126d6082SKonstantin Belousov * for any page in runlen set. 4631c7c3c6aSMatthew Dillon */ 464aef922f5SJohn Dyson int 465126d6082SKonstantin Belousov vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen, 466126d6082SKonstantin Belousov boolean_t *eio) 467aef922f5SJohn Dyson { 4682e3b314dSAlan Cox vm_object_t object = mc[0]->object; 469aef922f5SJohn Dyson int pageout_status[count]; 47095461b45SJohn Dyson int numpagedout = 0; 4711e8a675cSKonstantin Belousov int i, runlen; 472aef922f5SJohn Dyson 47389f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 4747bec141bSKip Macy 4751c7c3c6aSMatthew Dillon /* 47663e97555SJeff Roberson * Initiate I/O. Mark the pages shared busy and verify that they're 47763e97555SJeff Roberson * valid and read-only. 4781c7c3c6aSMatthew Dillon * 4791c7c3c6aSMatthew Dillon * We do not have to fixup the clean/dirty bits here... we can 4801c7c3c6aSMatthew Dillon * allow the pager to do it after the I/O completes. 48102fa91d3SMatthew Dillon * 48202fa91d3SMatthew Dillon * NOTE! mc[i]->dirty may be partial or fragmented due to an 48302fa91d3SMatthew Dillon * edge case with file fragments. 4841c7c3c6aSMatthew Dillon */ 4858f9110f6SJohn Dyson for (i = 0; i < count; i++) { 4860012f373SJeff Roberson KASSERT(vm_page_all_valid(mc[i]), 4877a935082SAlan Cox ("vm_pageout_flush: partially invalid page %p index %d/%d", 4887a935082SAlan Cox mc[i], i, count)); 4895cff1f4dSMark Johnston KASSERT((mc[i]->a.flags & PGA_WRITEABLE) == 0, 490aed9aaaaSMark Johnston ("vm_pageout_flush: writeable page %p", mc[i])); 49163e97555SJeff Roberson vm_page_busy_downgrade(mc[i]); 4922965a453SKip Macy } 493d474eaaaSDoug Rabson vm_object_pip_add(object, count); 494aef922f5SJohn Dyson 495d076fbeaSAlan Cox vm_pager_put_pages(object, mc, count, flags, pageout_status); 49626f9a767SRodney W. Grimes 4971e8a675cSKonstantin Belousov runlen = count - mreq; 498126d6082SKonstantin Belousov if (eio != NULL) 499126d6082SKonstantin Belousov *eio = FALSE; 500aef922f5SJohn Dyson for (i = 0; i < count; i++) { 501aef922f5SJohn Dyson vm_page_t mt = mc[i]; 50224a1cce3SDavid Greenman 5034cd45723SAlan Cox KASSERT(pageout_status[i] == VM_PAGER_PEND || 5046031c68dSAlan Cox !pmap_page_is_write_mapped(mt), 5059ea8d1a6SAlan Cox ("vm_pageout_flush: page %p is not write protected", mt)); 50626f9a767SRodney W. Grimes switch (pageout_status[i]) { 50726f9a767SRodney W. Grimes case VM_PAGER_OK: 5089f5632e6SMark Johnston /* 5099f5632e6SMark Johnston * The page may have moved since laundering started, in 5109f5632e6SMark Johnston * which case it should be left alone. 5119f5632e6SMark Johnston */ 512ebcddc72SAlan Cox if (vm_page_in_laundry(mt)) 513ebcddc72SAlan Cox vm_page_deactivate_noreuse(mt); 514ebcddc72SAlan Cox /* FALLTHROUGH */ 51526f9a767SRodney W. Grimes case VM_PAGER_PEND: 51695461b45SJohn Dyson numpagedout++; 51726f9a767SRodney W. Grimes break; 51826f9a767SRodney W. Grimes case VM_PAGER_BAD: 51926f9a767SRodney W. Grimes /* 520ebcddc72SAlan Cox * The page is outside the object's range. We pretend 521ebcddc72SAlan Cox * that the page out worked and clean the page, so the 522ebcddc72SAlan Cox * changes will be lost if the page is reclaimed by 523ebcddc72SAlan Cox * the page daemon. 52426f9a767SRodney W. Grimes */ 52590ecac61SMatthew Dillon vm_page_undirty(mt); 526ebcddc72SAlan Cox if (vm_page_in_laundry(mt)) 527ebcddc72SAlan Cox vm_page_deactivate_noreuse(mt); 52826f9a767SRodney W. Grimes break; 52926f9a767SRodney W. Grimes case VM_PAGER_ERROR: 53026f9a767SRodney W. Grimes case VM_PAGER_FAIL: 53126f9a767SRodney W. Grimes /* 532b1fd102eSMark Johnston * If the page couldn't be paged out to swap because the 533b1fd102eSMark Johnston * pager wasn't able to find space, place the page in 534b1fd102eSMark Johnston * the PQ_UNSWAPPABLE holding queue. This is an 535b1fd102eSMark Johnston * optimization that prevents the page daemon from 536b1fd102eSMark Johnston * wasting CPU cycles on pages that cannot be reclaimed 537b1fd102eSMark Johnston * becase no swap device is configured. 538b1fd102eSMark Johnston * 539b1fd102eSMark Johnston * Otherwise, reactivate the page so that it doesn't 540b1fd102eSMark Johnston * clog the laundry and inactive queues. (We will try 541b1fd102eSMark Johnston * paging it out again later.) 54226f9a767SRodney W. Grimes */ 543b1fd102eSMark Johnston if (object->type == OBJT_SWAP && 544b1fd102eSMark Johnston pageout_status[i] == VM_PAGER_FAIL) { 545b1fd102eSMark Johnston vm_page_unswappable(mt); 546b1fd102eSMark Johnston numpagedout++; 547b1fd102eSMark Johnston } else 54824a1cce3SDavid Greenman vm_page_activate(mt); 549126d6082SKonstantin Belousov if (eio != NULL && i >= mreq && i - mreq < runlen) 550126d6082SKonstantin Belousov *eio = TRUE; 55126f9a767SRodney W. Grimes break; 55226f9a767SRodney W. Grimes case VM_PAGER_AGAIN: 5531e8a675cSKonstantin Belousov if (i >= mreq && i - mreq < runlen) 5541e8a675cSKonstantin Belousov runlen = i - mreq; 55526f9a767SRodney W. Grimes break; 55626f9a767SRodney W. Grimes } 55726f9a767SRodney W. Grimes 55826f9a767SRodney W. Grimes /* 5590d94caffSDavid Greenman * If the operation is still going, leave the page busy to 5600d94caffSDavid Greenman * block all other accesses. Also, leave the paging in 5610d94caffSDavid Greenman * progress indicator set so that we don't attempt an object 5620d94caffSDavid Greenman * collapse. 56326f9a767SRodney W. Grimes */ 56426f9a767SRodney W. Grimes if (pageout_status[i] != VM_PAGER_PEND) { 565f919ebdeSDavid Greenman vm_object_pip_wakeup(object); 566c7aebda8SAttilio Rao vm_page_sunbusy(mt); 5673c4a2440SAlan Cox } 5683c4a2440SAlan Cox } 5691e8a675cSKonstantin Belousov if (prunlen != NULL) 5701e8a675cSKonstantin Belousov *prunlen = runlen; 5713c4a2440SAlan Cox return (numpagedout); 57226f9a767SRodney W. Grimes } 57326f9a767SRodney W. Grimes 574b1fd102eSMark Johnston static void 575b1fd102eSMark Johnston vm_pageout_swapon(void *arg __unused, struct swdevt *sp __unused) 576b1fd102eSMark Johnston { 577b1fd102eSMark Johnston 578b1fd102eSMark Johnston atomic_store_rel_int(&swapdev_enabled, 1); 579b1fd102eSMark Johnston } 580b1fd102eSMark Johnston 581b1fd102eSMark Johnston static void 582b1fd102eSMark Johnston vm_pageout_swapoff(void *arg __unused, struct swdevt *sp __unused) 583b1fd102eSMark Johnston { 584b1fd102eSMark Johnston 585b1fd102eSMark Johnston if (swap_pager_nswapdev() == 1) 586b1fd102eSMark Johnston atomic_store_rel_int(&swapdev_enabled, 0); 587b1fd102eSMark Johnston } 588b1fd102eSMark Johnston 5891c7c3c6aSMatthew Dillon /* 59034d8b7eaSJeff Roberson * Attempt to acquire all of the necessary locks to launder a page and 59134d8b7eaSJeff Roberson * then call through the clustering layer to PUTPAGES. Wait a short 59234d8b7eaSJeff Roberson * time for a vnode lock. 59334d8b7eaSJeff Roberson * 59434d8b7eaSJeff Roberson * Requires the page and object lock on entry, releases both before return. 59534d8b7eaSJeff Roberson * Returns 0 on success and an errno otherwise. 59634d8b7eaSJeff Roberson */ 59734d8b7eaSJeff Roberson static int 598ebcddc72SAlan Cox vm_pageout_clean(vm_page_t m, int *numpagedout) 59934d8b7eaSJeff Roberson { 60034d8b7eaSJeff Roberson struct vnode *vp; 60134d8b7eaSJeff Roberson struct mount *mp; 60234d8b7eaSJeff Roberson vm_object_t object; 60334d8b7eaSJeff Roberson vm_pindex_t pindex; 60434d8b7eaSJeff Roberson int error, lockmode; 60534d8b7eaSJeff Roberson 60634d8b7eaSJeff Roberson object = m->object; 60734d8b7eaSJeff Roberson VM_OBJECT_ASSERT_WLOCKED(object); 60834d8b7eaSJeff Roberson error = 0; 60934d8b7eaSJeff Roberson vp = NULL; 61034d8b7eaSJeff Roberson mp = NULL; 61134d8b7eaSJeff Roberson 61234d8b7eaSJeff Roberson /* 61334d8b7eaSJeff Roberson * The object is already known NOT to be dead. It 61434d8b7eaSJeff Roberson * is possible for the vget() to block the whole 61534d8b7eaSJeff Roberson * pageout daemon, but the new low-memory handling 61634d8b7eaSJeff Roberson * code should prevent it. 61734d8b7eaSJeff Roberson * 61834d8b7eaSJeff Roberson * We can't wait forever for the vnode lock, we might 61934d8b7eaSJeff Roberson * deadlock due to a vn_read() getting stuck in 62034d8b7eaSJeff Roberson * vm_wait while holding this vnode. We skip the 62134d8b7eaSJeff Roberson * vnode if we can't get it in a reasonable amount 62234d8b7eaSJeff Roberson * of time. 62334d8b7eaSJeff Roberson */ 62434d8b7eaSJeff Roberson if (object->type == OBJT_VNODE) { 62563e97555SJeff Roberson vm_page_xunbusy(m); 62634d8b7eaSJeff Roberson vp = object->handle; 62734d8b7eaSJeff Roberson if (vp->v_type == VREG && 62834d8b7eaSJeff Roberson vn_start_write(vp, &mp, V_NOWAIT) != 0) { 62934d8b7eaSJeff Roberson mp = NULL; 63034d8b7eaSJeff Roberson error = EDEADLK; 63134d8b7eaSJeff Roberson goto unlock_all; 63234d8b7eaSJeff Roberson } 63334d8b7eaSJeff Roberson KASSERT(mp != NULL, 63434d8b7eaSJeff Roberson ("vp %p with NULL v_mount", vp)); 63534d8b7eaSJeff Roberson vm_object_reference_locked(object); 63634d8b7eaSJeff Roberson pindex = m->pindex; 63734d8b7eaSJeff Roberson VM_OBJECT_WUNLOCK(object); 63834d8b7eaSJeff Roberson lockmode = MNT_SHARED_WRITES(vp->v_mount) ? 63934d8b7eaSJeff Roberson LK_SHARED : LK_EXCLUSIVE; 64034d8b7eaSJeff Roberson if (vget(vp, lockmode | LK_TIMELOCK, curthread)) { 64134d8b7eaSJeff Roberson vp = NULL; 64234d8b7eaSJeff Roberson error = EDEADLK; 64334d8b7eaSJeff Roberson goto unlock_mp; 64434d8b7eaSJeff Roberson } 64534d8b7eaSJeff Roberson VM_OBJECT_WLOCK(object); 64657cd81a3SMark Johnston 64757cd81a3SMark Johnston /* 64857cd81a3SMark Johnston * Ensure that the object and vnode were not disassociated 64957cd81a3SMark Johnston * while locks were dropped. 65057cd81a3SMark Johnston */ 65157cd81a3SMark Johnston if (vp->v_object != object) { 65257cd81a3SMark Johnston error = ENOENT; 65357cd81a3SMark Johnston goto unlock_all; 65457cd81a3SMark Johnston } 65557cd81a3SMark Johnston 65634d8b7eaSJeff Roberson /* 6579f5632e6SMark Johnston * While the object was unlocked, the page may have been: 65834d8b7eaSJeff Roberson * (1) moved to a different queue, 65934d8b7eaSJeff Roberson * (2) reallocated to a different object, 66034d8b7eaSJeff Roberson * (3) reallocated to a different offset, or 66134d8b7eaSJeff Roberson * (4) cleaned. 66234d8b7eaSJeff Roberson */ 663ebcddc72SAlan Cox if (!vm_page_in_laundry(m) || m->object != object || 66434d8b7eaSJeff Roberson m->pindex != pindex || m->dirty == 0) { 66534d8b7eaSJeff Roberson error = ENXIO; 66634d8b7eaSJeff Roberson goto unlock_all; 66734d8b7eaSJeff Roberson } 66834d8b7eaSJeff Roberson 66934d8b7eaSJeff Roberson /* 6709f5632e6SMark Johnston * The page may have been busied while the object lock was 6719f5632e6SMark Johnston * released. 67234d8b7eaSJeff Roberson */ 67363e97555SJeff Roberson if (vm_page_tryxbusy(m) == 0) { 67434d8b7eaSJeff Roberson error = EBUSY; 67534d8b7eaSJeff Roberson goto unlock_all; 67634d8b7eaSJeff Roberson } 67734d8b7eaSJeff Roberson } 67834d8b7eaSJeff Roberson 67934d8b7eaSJeff Roberson /* 680fee2a2faSMark Johnston * Remove all writeable mappings, failing if the page is wired. 681fee2a2faSMark Johnston */ 682fee2a2faSMark Johnston if (!vm_page_try_remove_write(m)) { 68363e97555SJeff Roberson vm_page_xunbusy(m); 684fee2a2faSMark Johnston error = EBUSY; 685fee2a2faSMark Johnston goto unlock_all; 686fee2a2faSMark Johnston } 687fee2a2faSMark Johnston 688fee2a2faSMark Johnston /* 68934d8b7eaSJeff Roberson * If a page is dirty, then it is either being washed 69034d8b7eaSJeff Roberson * (but not yet cleaned) or it is still in the 69134d8b7eaSJeff Roberson * laundry. If it is still in the laundry, then we 69234d8b7eaSJeff Roberson * start the cleaning operation. 69334d8b7eaSJeff Roberson */ 694ebcddc72SAlan Cox if ((*numpagedout = vm_pageout_cluster(m)) == 0) 69534d8b7eaSJeff Roberson error = EIO; 69634d8b7eaSJeff Roberson 69734d8b7eaSJeff Roberson unlock_all: 69834d8b7eaSJeff Roberson VM_OBJECT_WUNLOCK(object); 69934d8b7eaSJeff Roberson 70034d8b7eaSJeff Roberson unlock_mp: 70134d8b7eaSJeff Roberson if (mp != NULL) { 70234d8b7eaSJeff Roberson if (vp != NULL) 70334d8b7eaSJeff Roberson vput(vp); 70434d8b7eaSJeff Roberson vm_object_deallocate(object); 70534d8b7eaSJeff Roberson vn_finished_write(mp); 70634d8b7eaSJeff Roberson } 70734d8b7eaSJeff Roberson 70834d8b7eaSJeff Roberson return (error); 70934d8b7eaSJeff Roberson } 71034d8b7eaSJeff Roberson 71134d8b7eaSJeff Roberson /* 712ebcddc72SAlan Cox * Attempt to launder the specified number of pages. 713ebcddc72SAlan Cox * 714ebcddc72SAlan Cox * Returns the number of pages successfully laundered. 715ebcddc72SAlan Cox */ 716ebcddc72SAlan Cox static int 717ebcddc72SAlan Cox vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall) 718ebcddc72SAlan Cox { 7195cd29d0fSMark Johnston struct scan_state ss; 720ebcddc72SAlan Cox struct vm_pagequeue *pq; 721ebcddc72SAlan Cox vm_object_t object; 7225cd29d0fSMark Johnston vm_page_t m, marker; 723f3f38e25SMark Johnston vm_page_astate_t new, old; 724f3f38e25SMark Johnston int act_delta, error, numpagedout, queue, refs, starting_target; 725ebcddc72SAlan Cox int vnodes_skipped; 72660256604SMark Johnston bool pageout_ok; 727ebcddc72SAlan Cox 7285cd29d0fSMark Johnston object = NULL; 729ebcddc72SAlan Cox starting_target = launder; 730ebcddc72SAlan Cox vnodes_skipped = 0; 731ebcddc72SAlan Cox 732ebcddc72SAlan Cox /* 733b1fd102eSMark Johnston * Scan the laundry queues for pages eligible to be laundered. We stop 734ebcddc72SAlan Cox * once the target number of dirty pages have been laundered, or once 735ebcddc72SAlan Cox * we've reached the end of the queue. A single iteration of this loop 736ebcddc72SAlan Cox * may cause more than one page to be laundered because of clustering. 737ebcddc72SAlan Cox * 738b1fd102eSMark Johnston * As an optimization, we avoid laundering from PQ_UNSWAPPABLE when no 739b1fd102eSMark Johnston * swap devices are configured. 740ebcddc72SAlan Cox */ 741b1fd102eSMark Johnston if (atomic_load_acq_int(&swapdev_enabled)) 74264b38930SMark Johnston queue = PQ_UNSWAPPABLE; 743b1fd102eSMark Johnston else 74464b38930SMark Johnston queue = PQ_LAUNDRY; 745ebcddc72SAlan Cox 746b1fd102eSMark Johnston scan: 74764b38930SMark Johnston marker = &vmd->vmd_markers[queue]; 7485cd29d0fSMark Johnston pq = &vmd->vmd_pagequeues[queue]; 749ebcddc72SAlan Cox vm_pagequeue_lock(pq); 7505cd29d0fSMark Johnston vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt); 7515cd29d0fSMark Johnston while (launder > 0 && (m = vm_pageout_next(&ss, false)) != NULL) { 7525cd29d0fSMark Johnston if (__predict_false((m->flags & PG_MARKER) != 0)) 753ebcddc72SAlan Cox continue; 7545cd29d0fSMark Johnston 7555cd29d0fSMark Johnston /* 756b7f30bffSMark Johnston * Don't touch a page that was removed from the queue after the 757b7f30bffSMark Johnston * page queue lock was released. Otherwise, ensure that any 758b7f30bffSMark Johnston * pending queue operations, such as dequeues for wired pages, 759b7f30bffSMark Johnston * are handled. 7605cd29d0fSMark Johnston */ 761b7f30bffSMark Johnston if (vm_pageout_defer(m, queue, true)) 762ebcddc72SAlan Cox continue; 763e8bcf696SMark Johnston 7649f5632e6SMark Johnston /* 7659f5632e6SMark Johnston * Lock the page's object. 7669f5632e6SMark Johnston */ 7679f5632e6SMark Johnston if (object == NULL || object != m->object) { 76860256604SMark Johnston if (object != NULL) 7695cd29d0fSMark Johnston VM_OBJECT_WUNLOCK(object); 77023ed568cSMateusz Guzik object = atomic_load_ptr(&m->object); 7719f5632e6SMark Johnston if (__predict_false(object == NULL)) 7729f5632e6SMark Johnston /* The page is being freed by another thread. */ 7739f5632e6SMark Johnston continue; 7749f5632e6SMark Johnston 775e8bcf696SMark Johnston /* Depends on type-stability. */ 77641fd4b94SMark Johnston VM_OBJECT_WLOCK(object); 7779f5632e6SMark Johnston if (__predict_false(m->object != object)) { 7789f5632e6SMark Johnston VM_OBJECT_WUNLOCK(object); 7799f5632e6SMark Johnston object = NULL; 78041fd4b94SMark Johnston continue; 7819f5632e6SMark Johnston } 7829f5632e6SMark Johnston } 7835cd29d0fSMark Johnston 78463e97555SJeff Roberson if (vm_page_tryxbusy(m) == 0) 7855cd29d0fSMark Johnston continue; 786ebcddc72SAlan Cox 787ebcddc72SAlan Cox /* 788b7f30bffSMark Johnston * Check for wirings now that we hold the object lock and have 7899f5632e6SMark Johnston * exclusively busied the page. If the page is mapped, it may 7909f5632e6SMark Johnston * still be wired by pmap lookups. The call to 791fee2a2faSMark Johnston * vm_page_try_remove_all() below atomically checks for such 792fee2a2faSMark Johnston * wirings and removes mappings. If the page is unmapped, the 7939f5632e6SMark Johnston * wire count is guaranteed not to increase after this check. 794fee2a2faSMark Johnston */ 7959f5632e6SMark Johnston if (__predict_false(vm_page_wired(m))) 796f3f38e25SMark Johnston goto skip_page; 797fee2a2faSMark Johnston 798fee2a2faSMark Johnston /* 799ebcddc72SAlan Cox * Invalid pages can be easily freed. They cannot be 800ebcddc72SAlan Cox * mapped; vm_page_free() asserts this. 801ebcddc72SAlan Cox */ 8020012f373SJeff Roberson if (vm_page_none_valid(m)) 803ebcddc72SAlan Cox goto free_page; 804ebcddc72SAlan Cox 805f3f38e25SMark Johnston refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0; 806f3f38e25SMark Johnston 807f3f38e25SMark Johnston for (old = vm_page_astate_load(m);;) { 808ebcddc72SAlan Cox /* 809f3f38e25SMark Johnston * Check to see if the page has been removed from the 810f3f38e25SMark Johnston * queue since the first such check. Leave it alone if 811f3f38e25SMark Johnston * so, discarding any references collected by 812f3f38e25SMark Johnston * pmap_ts_referenced(). 813ebcddc72SAlan Cox */ 814f3f38e25SMark Johnston if (__predict_false(_vm_page_queue(old) == PQ_NONE)) 815f3f38e25SMark Johnston goto skip_page; 816f3f38e25SMark Johnston 817f3f38e25SMark Johnston new = old; 818f3f38e25SMark Johnston act_delta = refs; 819f3f38e25SMark Johnston if ((old.flags & PGA_REFERENCED) != 0) { 820f3f38e25SMark Johnston new.flags &= ~PGA_REFERENCED; 821d7aeb429SAlan Cox act_delta++; 822ebcddc72SAlan Cox } 823f3f38e25SMark Johnston if (act_delta == 0) { 824f3f38e25SMark Johnston ; 825f3f38e25SMark Johnston } else if (object->ref_count != 0) { 826ebcddc72SAlan Cox /* 827f3f38e25SMark Johnston * Increase the activation count if the page was 828f3f38e25SMark Johnston * referenced while in the laundry queue. This 829f3f38e25SMark Johnston * makes it less likely that the page will be 830f3f38e25SMark Johnston * returned prematurely to the laundry queue. 831e8bcf696SMark Johnston */ 832f3f38e25SMark Johnston new.act_count += ACT_ADVANCE + 833f3f38e25SMark Johnston act_delta; 834f3f38e25SMark Johnston if (new.act_count > ACT_MAX) 835f3f38e25SMark Johnston new.act_count = ACT_MAX; 836f3f38e25SMark Johnston 837f7607c30SMark Johnston new.flags &= ~PGA_QUEUE_OP_MASK; 838f3f38e25SMark Johnston new.flags |= PGA_REQUEUE; 839f3f38e25SMark Johnston new.queue = PQ_ACTIVE; 840f3f38e25SMark Johnston if (!vm_page_pqstate_commit(m, &old, new)) 841f3f38e25SMark Johnston continue; 842e8bcf696SMark Johnston 843e8bcf696SMark Johnston /* 844e8bcf696SMark Johnston * If this was a background laundering, count 845e8bcf696SMark Johnston * activated pages towards our target. The 846e8bcf696SMark Johnston * purpose of background laundering is to ensure 847e8bcf696SMark Johnston * that pages are eventually cycled through the 848e8bcf696SMark Johnston * laundry queue, and an activation is a valid 849e8bcf696SMark Johnston * way out. 850ebcddc72SAlan Cox */ 851ebcddc72SAlan Cox if (!in_shortfall) 852ebcddc72SAlan Cox launder--; 853f3f38e25SMark Johnston VM_CNT_INC(v_reactivated); 854f3f38e25SMark Johnston goto skip_page; 8555cd29d0fSMark Johnston } else if ((object->flags & OBJ_DEAD) == 0) { 856f3f38e25SMark Johnston new.flags |= PGA_REQUEUE; 857f3f38e25SMark Johnston if (!vm_page_pqstate_commit(m, &old, new)) 858e8bcf696SMark Johnston continue; 859f3f38e25SMark Johnston goto skip_page; 8605cd29d0fSMark Johnston } 861f3f38e25SMark Johnston break; 862ebcddc72SAlan Cox } 863ebcddc72SAlan Cox 864ebcddc72SAlan Cox /* 865ebcddc72SAlan Cox * If the page appears to be clean at the machine-independent 866ebcddc72SAlan Cox * layer, then remove all of its mappings from the pmap in 867ebcddc72SAlan Cox * anticipation of freeing it. If, however, any of the page's 868ebcddc72SAlan Cox * mappings allow write access, then the page may still be 869ebcddc72SAlan Cox * modified until the last of those mappings are removed. 870ebcddc72SAlan Cox */ 871ebcddc72SAlan Cox if (object->ref_count != 0) { 872ebcddc72SAlan Cox vm_page_test_dirty(m); 8739f5632e6SMark Johnston if (m->dirty == 0 && !vm_page_try_remove_all(m)) 874f3f38e25SMark Johnston goto skip_page; 875fee2a2faSMark Johnston } 876ebcddc72SAlan Cox 877ebcddc72SAlan Cox /* 878ebcddc72SAlan Cox * Clean pages are freed, and dirty pages are paged out unless 879ebcddc72SAlan Cox * they belong to a dead object. Requeueing dirty pages from 880ebcddc72SAlan Cox * dead objects is pointless, as they are being paged out and 881ebcddc72SAlan Cox * freed by the thread that destroyed the object. 882ebcddc72SAlan Cox */ 883ebcddc72SAlan Cox if (m->dirty == 0) { 884ebcddc72SAlan Cox free_page: 8859f5632e6SMark Johnston /* 8869f5632e6SMark Johnston * Now we are guaranteed that no other threads are 8879f5632e6SMark Johnston * manipulating the page, check for a last-second 8889f5632e6SMark Johnston * reference. 8899f5632e6SMark Johnston */ 8909f5632e6SMark Johnston if (vm_pageout_defer(m, queue, true)) 8919f5632e6SMark Johnston goto skip_page; 892ebcddc72SAlan Cox vm_page_free(m); 89383c9dea1SGleb Smirnoff VM_CNT_INC(v_dfree); 894ebcddc72SAlan Cox } else if ((object->flags & OBJ_DEAD) == 0) { 895ebcddc72SAlan Cox if (object->type != OBJT_SWAP && 896ebcddc72SAlan Cox object->type != OBJT_DEFAULT) 897ebcddc72SAlan Cox pageout_ok = true; 898ebcddc72SAlan Cox else if (disable_swap_pageouts) 899ebcddc72SAlan Cox pageout_ok = false; 900ebcddc72SAlan Cox else 901ebcddc72SAlan Cox pageout_ok = true; 902ebcddc72SAlan Cox if (!pageout_ok) { 903f3f38e25SMark Johnston vm_page_launder(m); 904f3f38e25SMark Johnston goto skip_page; 905ebcddc72SAlan Cox } 906ebcddc72SAlan Cox 907ebcddc72SAlan Cox /* 908ebcddc72SAlan Cox * Form a cluster with adjacent, dirty pages from the 909ebcddc72SAlan Cox * same object, and page out that entire cluster. 910ebcddc72SAlan Cox * 911ebcddc72SAlan Cox * The adjacent, dirty pages must also be in the 912ebcddc72SAlan Cox * laundry. However, their mappings are not checked 913ebcddc72SAlan Cox * for new references. Consequently, a recently 914ebcddc72SAlan Cox * referenced page may be paged out. However, that 915ebcddc72SAlan Cox * page will not be prematurely reclaimed. After page 916ebcddc72SAlan Cox * out, the page will be placed in the inactive queue, 917ebcddc72SAlan Cox * where any new references will be detected and the 918ebcddc72SAlan Cox * page reactivated. 919ebcddc72SAlan Cox */ 920ebcddc72SAlan Cox error = vm_pageout_clean(m, &numpagedout); 921ebcddc72SAlan Cox if (error == 0) { 922ebcddc72SAlan Cox launder -= numpagedout; 9235cd29d0fSMark Johnston ss.scanned += numpagedout; 924ebcddc72SAlan Cox } else if (error == EDEADLK) { 925ebcddc72SAlan Cox pageout_lock_miss++; 926ebcddc72SAlan Cox vnodes_skipped++; 927ebcddc72SAlan Cox } 92860256604SMark Johnston object = NULL; 929f3f38e25SMark Johnston } else { 930f3f38e25SMark Johnston skip_page: 93163e97555SJeff Roberson vm_page_xunbusy(m); 932e8bcf696SMark Johnston } 933f3f38e25SMark Johnston } 93446e39081SMark Johnston if (object != NULL) { 935ebcddc72SAlan Cox VM_OBJECT_WUNLOCK(object); 93646e39081SMark Johnston object = NULL; 93746e39081SMark Johnston } 938ebcddc72SAlan Cox vm_pagequeue_lock(pq); 9395cd29d0fSMark Johnston vm_pageout_end_scan(&ss); 940ebcddc72SAlan Cox vm_pagequeue_unlock(pq); 941ebcddc72SAlan Cox 94264b38930SMark Johnston if (launder > 0 && queue == PQ_UNSWAPPABLE) { 94364b38930SMark Johnston queue = PQ_LAUNDRY; 944b1fd102eSMark Johnston goto scan; 945b1fd102eSMark Johnston } 946b1fd102eSMark Johnston 947ebcddc72SAlan Cox /* 948ebcddc72SAlan Cox * Wakeup the sync daemon if we skipped a vnode in a writeable object 949ebcddc72SAlan Cox * and we didn't launder enough pages. 950ebcddc72SAlan Cox */ 951ebcddc72SAlan Cox if (vnodes_skipped > 0 && launder > 0) 952ebcddc72SAlan Cox (void)speedup_syncer(); 953ebcddc72SAlan Cox 954ebcddc72SAlan Cox return (starting_target - launder); 955ebcddc72SAlan Cox } 956ebcddc72SAlan Cox 957ebcddc72SAlan Cox /* 958ebcddc72SAlan Cox * Compute the integer square root. 959ebcddc72SAlan Cox */ 960ebcddc72SAlan Cox static u_int 961ebcddc72SAlan Cox isqrt(u_int num) 962ebcddc72SAlan Cox { 963ebcddc72SAlan Cox u_int bit, root, tmp; 964ebcddc72SAlan Cox 96564f8d257SDoug Moore bit = num != 0 ? (1u << ((fls(num) - 1) & ~1)) : 0; 966ebcddc72SAlan Cox root = 0; 967ebcddc72SAlan Cox while (bit != 0) { 968ebcddc72SAlan Cox tmp = root + bit; 969ebcddc72SAlan Cox root >>= 1; 970ebcddc72SAlan Cox if (num >= tmp) { 971ebcddc72SAlan Cox num -= tmp; 972ebcddc72SAlan Cox root += bit; 973ebcddc72SAlan Cox } 974ebcddc72SAlan Cox bit >>= 2; 975ebcddc72SAlan Cox } 976ebcddc72SAlan Cox return (root); 977ebcddc72SAlan Cox } 978ebcddc72SAlan Cox 979ebcddc72SAlan Cox /* 980ebcddc72SAlan Cox * Perform the work of the laundry thread: periodically wake up and determine 981ebcddc72SAlan Cox * whether any pages need to be laundered. If so, determine the number of pages 982ebcddc72SAlan Cox * that need to be laundered, and launder them. 983ebcddc72SAlan Cox */ 984ebcddc72SAlan Cox static void 985ebcddc72SAlan Cox vm_pageout_laundry_worker(void *arg) 986ebcddc72SAlan Cox { 987e2068d0bSJeff Roberson struct vm_domain *vmd; 988ebcddc72SAlan Cox struct vm_pagequeue *pq; 98960684862SMark Johnston uint64_t nclean, ndirty, nfreed; 990e2068d0bSJeff Roberson int domain, last_target, launder, shortfall, shortfall_cycle, target; 991ebcddc72SAlan Cox bool in_shortfall; 992ebcddc72SAlan Cox 993e2068d0bSJeff Roberson domain = (uintptr_t)arg; 994e2068d0bSJeff Roberson vmd = VM_DOMAIN(domain); 995e2068d0bSJeff Roberson pq = &vmd->vmd_pagequeues[PQ_LAUNDRY]; 996e2068d0bSJeff Roberson KASSERT(vmd->vmd_segs != 0, ("domain without segments")); 997ebcddc72SAlan Cox 998ebcddc72SAlan Cox shortfall = 0; 999ebcddc72SAlan Cox in_shortfall = false; 1000ebcddc72SAlan Cox shortfall_cycle = 0; 10018002c3a4SMark Johnston last_target = target = 0; 100260684862SMark Johnston nfreed = 0; 1003ebcddc72SAlan Cox 1004ebcddc72SAlan Cox /* 1005b1fd102eSMark Johnston * Calls to these handlers are serialized by the swap syscall lock. 1006b1fd102eSMark Johnston */ 1007e2068d0bSJeff Roberson (void)EVENTHANDLER_REGISTER(swapon, vm_pageout_swapon, vmd, 1008b1fd102eSMark Johnston EVENTHANDLER_PRI_ANY); 1009e2068d0bSJeff Roberson (void)EVENTHANDLER_REGISTER(swapoff, vm_pageout_swapoff, vmd, 1010b1fd102eSMark Johnston EVENTHANDLER_PRI_ANY); 1011b1fd102eSMark Johnston 1012b1fd102eSMark Johnston /* 1013ebcddc72SAlan Cox * The pageout laundry worker is never done, so loop forever. 1014ebcddc72SAlan Cox */ 1015ebcddc72SAlan Cox for (;;) { 1016ebcddc72SAlan Cox KASSERT(target >= 0, ("negative target %d", target)); 1017ebcddc72SAlan Cox KASSERT(shortfall_cycle >= 0, 1018ebcddc72SAlan Cox ("negative cycle %d", shortfall_cycle)); 1019ebcddc72SAlan Cox launder = 0; 1020ebcddc72SAlan Cox 1021ebcddc72SAlan Cox /* 1022ebcddc72SAlan Cox * First determine whether we need to launder pages to meet a 1023ebcddc72SAlan Cox * shortage of free pages. 1024ebcddc72SAlan Cox */ 1025ebcddc72SAlan Cox if (shortfall > 0) { 1026ebcddc72SAlan Cox in_shortfall = true; 1027ebcddc72SAlan Cox shortfall_cycle = VM_LAUNDER_RATE / VM_INACT_SCAN_RATE; 1028ebcddc72SAlan Cox target = shortfall; 1029ebcddc72SAlan Cox } else if (!in_shortfall) 1030ebcddc72SAlan Cox goto trybackground; 1031e2068d0bSJeff Roberson else if (shortfall_cycle == 0 || vm_laundry_target(vmd) <= 0) { 1032ebcddc72SAlan Cox /* 1033ebcddc72SAlan Cox * We recently entered shortfall and began laundering 1034ebcddc72SAlan Cox * pages. If we have completed that laundering run 1035ebcddc72SAlan Cox * (and we are no longer in shortfall) or we have met 1036ebcddc72SAlan Cox * our laundry target through other activity, then we 1037ebcddc72SAlan Cox * can stop laundering pages. 1038ebcddc72SAlan Cox */ 1039ebcddc72SAlan Cox in_shortfall = false; 1040ebcddc72SAlan Cox target = 0; 1041ebcddc72SAlan Cox goto trybackground; 1042ebcddc72SAlan Cox } 1043ebcddc72SAlan Cox launder = target / shortfall_cycle--; 1044ebcddc72SAlan Cox goto dolaundry; 1045ebcddc72SAlan Cox 1046ebcddc72SAlan Cox /* 1047ebcddc72SAlan Cox * There's no immediate need to launder any pages; see if we 1048ebcddc72SAlan Cox * meet the conditions to perform background laundering: 1049ebcddc72SAlan Cox * 1050ebcddc72SAlan Cox * 1. The ratio of dirty to clean inactive pages exceeds the 105160684862SMark Johnston * background laundering threshold, or 1052ebcddc72SAlan Cox * 2. we haven't yet reached the target of the current 1053ebcddc72SAlan Cox * background laundering run. 1054ebcddc72SAlan Cox * 1055ebcddc72SAlan Cox * The background laundering threshold is not a constant. 1056ebcddc72SAlan Cox * Instead, it is a slowly growing function of the number of 105760684862SMark Johnston * clean pages freed by the page daemon since the last 105860684862SMark Johnston * background laundering. Thus, as the ratio of dirty to 105960684862SMark Johnston * clean inactive pages grows, the amount of memory pressure 1060c098768eSMark Johnston * required to trigger laundering decreases. We ensure 1061c098768eSMark Johnston * that the threshold is non-zero after an inactive queue 1062c098768eSMark Johnston * scan, even if that scan failed to free a single clean page. 1063ebcddc72SAlan Cox */ 1064ebcddc72SAlan Cox trybackground: 1065e2068d0bSJeff Roberson nclean = vmd->vmd_free_count + 1066e2068d0bSJeff Roberson vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt; 1067e2068d0bSJeff Roberson ndirty = vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt; 1068c098768eSMark Johnston if (target == 0 && ndirty * isqrt(howmany(nfreed + 1, 1069c098768eSMark Johnston vmd->vmd_free_target - vmd->vmd_free_min)) >= nclean) { 1070e2068d0bSJeff Roberson target = vmd->vmd_background_launder_target; 1071ebcddc72SAlan Cox } 1072ebcddc72SAlan Cox 1073ebcddc72SAlan Cox /* 1074ebcddc72SAlan Cox * We have a non-zero background laundering target. If we've 1075ebcddc72SAlan Cox * laundered up to our maximum without observing a page daemon 1076cb35676eSMark Johnston * request, just stop. This is a safety belt that ensures we 1077ebcddc72SAlan Cox * don't launder an excessive amount if memory pressure is low 1078ebcddc72SAlan Cox * and the ratio of dirty to clean pages is large. Otherwise, 1079ebcddc72SAlan Cox * proceed at the background laundering rate. 1080ebcddc72SAlan Cox */ 1081ebcddc72SAlan Cox if (target > 0) { 108260684862SMark Johnston if (nfreed > 0) { 108360684862SMark Johnston nfreed = 0; 1084ebcddc72SAlan Cox last_target = target; 1085ebcddc72SAlan Cox } else if (last_target - target >= 1086ebcddc72SAlan Cox vm_background_launder_max * PAGE_SIZE / 1024) { 1087ebcddc72SAlan Cox target = 0; 1088ebcddc72SAlan Cox } 1089ebcddc72SAlan Cox launder = vm_background_launder_rate * PAGE_SIZE / 1024; 1090ebcddc72SAlan Cox launder /= VM_LAUNDER_RATE; 1091ebcddc72SAlan Cox if (launder > target) 1092ebcddc72SAlan Cox launder = target; 1093ebcddc72SAlan Cox } 1094ebcddc72SAlan Cox 1095ebcddc72SAlan Cox dolaundry: 1096ebcddc72SAlan Cox if (launder > 0) { 1097ebcddc72SAlan Cox /* 1098ebcddc72SAlan Cox * Because of I/O clustering, the number of laundered 1099ebcddc72SAlan Cox * pages could exceed "target" by the maximum size of 1100ebcddc72SAlan Cox * a cluster minus one. 1101ebcddc72SAlan Cox */ 1102e2068d0bSJeff Roberson target -= min(vm_pageout_launder(vmd, launder, 1103ebcddc72SAlan Cox in_shortfall), target); 1104ebcddc72SAlan Cox pause("laundp", hz / VM_LAUNDER_RATE); 1105ebcddc72SAlan Cox } 1106ebcddc72SAlan Cox 1107ebcddc72SAlan Cox /* 1108ebcddc72SAlan Cox * If we're not currently laundering pages and the page daemon 1109ebcddc72SAlan Cox * hasn't posted a new request, sleep until the page daemon 1110ebcddc72SAlan Cox * kicks us. 1111ebcddc72SAlan Cox */ 1112ebcddc72SAlan Cox vm_pagequeue_lock(pq); 1113e2068d0bSJeff Roberson if (target == 0 && vmd->vmd_laundry_request == VM_LAUNDRY_IDLE) 1114e2068d0bSJeff Roberson (void)mtx_sleep(&vmd->vmd_laundry_request, 1115ebcddc72SAlan Cox vm_pagequeue_lockptr(pq), PVM, "launds", 0); 1116ebcddc72SAlan Cox 1117ebcddc72SAlan Cox /* 1118ebcddc72SAlan Cox * If the pagedaemon has indicated that it's in shortfall, start 1119ebcddc72SAlan Cox * a shortfall laundering unless we're already in the middle of 1120ebcddc72SAlan Cox * one. This may preempt a background laundering. 1121ebcddc72SAlan Cox */ 1122e2068d0bSJeff Roberson if (vmd->vmd_laundry_request == VM_LAUNDRY_SHORTFALL && 1123ebcddc72SAlan Cox (!in_shortfall || shortfall_cycle == 0)) { 1124e2068d0bSJeff Roberson shortfall = vm_laundry_target(vmd) + 1125e2068d0bSJeff Roberson vmd->vmd_pageout_deficit; 1126ebcddc72SAlan Cox target = 0; 1127ebcddc72SAlan Cox } else 1128ebcddc72SAlan Cox shortfall = 0; 1129ebcddc72SAlan Cox 1130ebcddc72SAlan Cox if (target == 0) 1131e2068d0bSJeff Roberson vmd->vmd_laundry_request = VM_LAUNDRY_IDLE; 113260684862SMark Johnston nfreed += vmd->vmd_clean_pages_freed; 113360684862SMark Johnston vmd->vmd_clean_pages_freed = 0; 1134ebcddc72SAlan Cox vm_pagequeue_unlock(pq); 1135ebcddc72SAlan Cox } 1136ebcddc72SAlan Cox } 1137ebcddc72SAlan Cox 1138be37ee79SMark Johnston /* 1139be37ee79SMark Johnston * Compute the number of pages we want to try to move from the 1140be37ee79SMark Johnston * active queue to either the inactive or laundry queue. 1141be37ee79SMark Johnston * 11427bb4634eSMark Johnston * When scanning active pages during a shortage, we make clean pages 11437bb4634eSMark Johnston * count more heavily towards the page shortage than dirty pages. 11447bb4634eSMark Johnston * This is because dirty pages must be laundered before they can be 11457bb4634eSMark Johnston * reused and thus have less utility when attempting to quickly 11467bb4634eSMark Johnston * alleviate a free page shortage. However, this weighting also 11477bb4634eSMark Johnston * causes the scan to deactivate dirty pages more aggressively, 11487bb4634eSMark Johnston * improving the effectiveness of clustering. 1149be37ee79SMark Johnston */ 1150be37ee79SMark Johnston static int 11517bb4634eSMark Johnston vm_pageout_active_target(struct vm_domain *vmd) 1152be37ee79SMark Johnston { 1153be37ee79SMark Johnston int shortage; 1154be37ee79SMark Johnston 1155be37ee79SMark Johnston shortage = vmd->vmd_inactive_target + vm_paging_target(vmd) - 1156be37ee79SMark Johnston (vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt + 1157be37ee79SMark Johnston vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt / act_scan_laundry_weight); 1158be37ee79SMark Johnston shortage *= act_scan_laundry_weight; 1159be37ee79SMark Johnston return (shortage); 1160be37ee79SMark Johnston } 1161be37ee79SMark Johnston 1162be37ee79SMark Johnston /* 1163be37ee79SMark Johnston * Scan the active queue. If there is no shortage of inactive pages, scan a 1164be37ee79SMark Johnston * small portion of the queue in order to maintain quasi-LRU. 1165be37ee79SMark Johnston */ 1166be37ee79SMark Johnston static void 1167be37ee79SMark Johnston vm_pageout_scan_active(struct vm_domain *vmd, int page_shortage) 1168be37ee79SMark Johnston { 1169be37ee79SMark Johnston struct scan_state ss; 1170fee2a2faSMark Johnston vm_object_t object; 1171be37ee79SMark Johnston vm_page_t m, marker; 1172be37ee79SMark Johnston struct vm_pagequeue *pq; 1173f3f38e25SMark Johnston vm_page_astate_t old, new; 1174be37ee79SMark Johnston long min_scan; 1175f3f38e25SMark Johnston int act_delta, max_scan, ps_delta, refs, scan_tick; 1176f3f38e25SMark Johnston uint8_t nqueue; 1177be37ee79SMark Johnston 1178be37ee79SMark Johnston marker = &vmd->vmd_markers[PQ_ACTIVE]; 1179be37ee79SMark Johnston pq = &vmd->vmd_pagequeues[PQ_ACTIVE]; 1180be37ee79SMark Johnston vm_pagequeue_lock(pq); 1181be37ee79SMark Johnston 1182be37ee79SMark Johnston /* 1183be37ee79SMark Johnston * If we're just idle polling attempt to visit every 1184be37ee79SMark Johnston * active page within 'update_period' seconds. 1185be37ee79SMark Johnston */ 1186be37ee79SMark Johnston scan_tick = ticks; 1187be37ee79SMark Johnston if (vm_pageout_update_period != 0) { 1188be37ee79SMark Johnston min_scan = pq->pq_cnt; 1189be37ee79SMark Johnston min_scan *= scan_tick - vmd->vmd_last_active_scan; 1190be37ee79SMark Johnston min_scan /= hz * vm_pageout_update_period; 1191be37ee79SMark Johnston } else 1192be37ee79SMark Johnston min_scan = 0; 1193be37ee79SMark Johnston if (min_scan > 0 || (page_shortage > 0 && pq->pq_cnt > 0)) 1194be37ee79SMark Johnston vmd->vmd_last_active_scan = scan_tick; 1195be37ee79SMark Johnston 1196be37ee79SMark Johnston /* 1197be37ee79SMark Johnston * Scan the active queue for pages that can be deactivated. Update 1198be37ee79SMark Johnston * the per-page activity counter and use it to identify deactivation 1199be37ee79SMark Johnston * candidates. Held pages may be deactivated. 1200be37ee79SMark Johnston * 1201be37ee79SMark Johnston * To avoid requeuing each page that remains in the active queue, we 12027bb4634eSMark Johnston * implement the CLOCK algorithm. To keep the implementation of the 12037bb4634eSMark Johnston * enqueue operation consistent for all page queues, we use two hands, 12047bb4634eSMark Johnston * represented by marker pages. Scans begin at the first hand, which 12057bb4634eSMark Johnston * precedes the second hand in the queue. When the two hands meet, 12067bb4634eSMark Johnston * they are moved back to the head and tail of the queue, respectively, 12077bb4634eSMark Johnston * and scanning resumes. 1208be37ee79SMark Johnston */ 1209be37ee79SMark Johnston max_scan = page_shortage > 0 ? pq->pq_cnt : min_scan; 1210be37ee79SMark Johnston act_scan: 1211be37ee79SMark Johnston vm_pageout_init_scan(&ss, pq, marker, &vmd->vmd_clock[0], max_scan); 1212be37ee79SMark Johnston while ((m = vm_pageout_next(&ss, false)) != NULL) { 1213be37ee79SMark Johnston if (__predict_false(m == &vmd->vmd_clock[1])) { 1214be37ee79SMark Johnston vm_pagequeue_lock(pq); 1215be37ee79SMark Johnston TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q); 1216be37ee79SMark Johnston TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[1], plinks.q); 1217be37ee79SMark Johnston TAILQ_INSERT_HEAD(&pq->pq_pl, &vmd->vmd_clock[0], 1218be37ee79SMark Johnston plinks.q); 1219be37ee79SMark Johnston TAILQ_INSERT_TAIL(&pq->pq_pl, &vmd->vmd_clock[1], 1220be37ee79SMark Johnston plinks.q); 1221be37ee79SMark Johnston max_scan -= ss.scanned; 1222be37ee79SMark Johnston vm_pageout_end_scan(&ss); 1223be37ee79SMark Johnston goto act_scan; 1224be37ee79SMark Johnston } 1225be37ee79SMark Johnston if (__predict_false((m->flags & PG_MARKER) != 0)) 1226be37ee79SMark Johnston continue; 1227be37ee79SMark Johnston 1228e8bcf696SMark Johnston /* 1229b7f30bffSMark Johnston * Don't touch a page that was removed from the queue after the 1230b7f30bffSMark Johnston * page queue lock was released. Otherwise, ensure that any 1231b7f30bffSMark Johnston * pending queue operations, such as dequeues for wired pages, 1232b7f30bffSMark Johnston * are handled. 1233e8bcf696SMark Johnston */ 1234b7f30bffSMark Johnston if (vm_pageout_defer(m, PQ_ACTIVE, true)) 1235e8bcf696SMark Johnston continue; 1236e8bcf696SMark Johnston 1237e8bcf696SMark Johnston /* 1238e8bcf696SMark Johnston * A page's object pointer may be set to NULL before 1239e8bcf696SMark Johnston * the object lock is acquired. 1240e8bcf696SMark Johnston */ 124123ed568cSMateusz Guzik object = atomic_load_ptr(&m->object); 1242fee2a2faSMark Johnston if (__predict_false(object == NULL)) 1243fee2a2faSMark Johnston /* 1244fee2a2faSMark Johnston * The page has been removed from its object. 1245fee2a2faSMark Johnston */ 1246fee2a2faSMark Johnston continue; 1247fee2a2faSMark Johnston 1248f3f38e25SMark Johnston /* Deferred free of swap space. */ 1249f3f38e25SMark Johnston if ((m->a.flags & PGA_SWAP_FREE) != 0 && 1250f3f38e25SMark Johnston VM_OBJECT_TRYWLOCK(object)) { 1251f3f38e25SMark Johnston if (m->object == object) 1252f3f38e25SMark Johnston vm_pager_page_unswapped(m); 1253f3f38e25SMark Johnston VM_OBJECT_WUNLOCK(object); 1254f3f38e25SMark Johnston } 1255f3f38e25SMark Johnston 1256fee2a2faSMark Johnston /* 1257be37ee79SMark Johnston * Check to see "how much" the page has been used. 1258d7aeb429SAlan Cox * 1259d7aeb429SAlan Cox * Test PGA_REFERENCED after calling pmap_ts_referenced() so 1260d7aeb429SAlan Cox * that a reference from a concurrently destroyed mapping is 1261d7aeb429SAlan Cox * observed here and now. 1262d7aeb429SAlan Cox * 1263e8bcf696SMark Johnston * Perform an unsynchronized object ref count check. While 1264e8bcf696SMark Johnston * the page lock ensures that the page is not reallocated to 1265e8bcf696SMark Johnston * another object, in particular, one with unmanaged mappings 1266e8bcf696SMark Johnston * that cannot support pmap_ts_referenced(), two races are, 1267be37ee79SMark Johnston * nonetheless, possible: 1268be37ee79SMark Johnston * 1) The count was transitioning to zero, but we saw a non- 1269e8bcf696SMark Johnston * zero value. pmap_ts_referenced() will return zero 1270e8bcf696SMark Johnston * because the page is not mapped. 1271e8bcf696SMark Johnston * 2) The count was transitioning to one, but we saw zero. 1272e8bcf696SMark Johnston * This race delays the detection of a new reference. At 1273e8bcf696SMark Johnston * worst, we will deactivate and reactivate the page. 1274be37ee79SMark Johnston */ 1275f3f38e25SMark Johnston refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0; 1276be37ee79SMark Johnston 1277f3f38e25SMark Johnston old = vm_page_astate_load(m); 1278f3f38e25SMark Johnston do { 1279f3f38e25SMark Johnston /* 1280f3f38e25SMark Johnston * Check to see if the page has been removed from the 1281f3f38e25SMark Johnston * queue since the first such check. Leave it alone if 1282f3f38e25SMark Johnston * so, discarding any references collected by 1283f3f38e25SMark Johnston * pmap_ts_referenced(). 1284f3f38e25SMark Johnston */ 1285f3f38e25SMark Johnston if (__predict_false(_vm_page_queue(old) == PQ_NONE)) 1286f3f38e25SMark Johnston break; 1287a8081778SJeff Roberson 1288be37ee79SMark Johnston /* 1289be37ee79SMark Johnston * Advance or decay the act_count based on recent usage. 1290be37ee79SMark Johnston */ 1291f3f38e25SMark Johnston new = old; 1292f3f38e25SMark Johnston act_delta = refs; 1293f3f38e25SMark Johnston if ((old.flags & PGA_REFERENCED) != 0) { 1294f3f38e25SMark Johnston new.flags &= ~PGA_REFERENCED; 1295f3f38e25SMark Johnston act_delta++; 1296f3f38e25SMark Johnston } 1297be37ee79SMark Johnston if (act_delta != 0) { 1298f3f38e25SMark Johnston new.act_count += ACT_ADVANCE + act_delta; 1299f3f38e25SMark Johnston if (new.act_count > ACT_MAX) 1300f3f38e25SMark Johnston new.act_count = ACT_MAX; 1301f3f38e25SMark Johnston } else { 1302f3f38e25SMark Johnston new.act_count -= min(new.act_count, 1303f3f38e25SMark Johnston ACT_DECLINE); 1304f3f38e25SMark Johnston } 1305be37ee79SMark Johnston 1306f3f38e25SMark Johnston if (new.act_count > 0) { 1307be37ee79SMark Johnston /* 1308f3f38e25SMark Johnston * Adjust the activation count and keep the page 1309f3f38e25SMark Johnston * in the active queue. The count might be left 1310f3f38e25SMark Johnston * unchanged if it is saturated. The page may 1311f3f38e25SMark Johnston * have been moved to a different queue since we 1312f3f38e25SMark Johnston * started the scan, in which case we move it 1313f3f38e25SMark Johnston * back. 1314be37ee79SMark Johnston */ 1315f3f38e25SMark Johnston ps_delta = 0; 1316f3f38e25SMark Johnston if (old.queue != PQ_ACTIVE) { 1317f7607c30SMark Johnston new.flags &= ~PGA_QUEUE_OP_MASK; 1318f7607c30SMark Johnston new.flags |= PGA_REQUEUE; 1319f7607c30SMark Johnston new.queue = PQ_ACTIVE; 1320f3f38e25SMark Johnston } 13217cdeaf33SMark Johnston } else { 1322be37ee79SMark Johnston /* 1323f3f38e25SMark Johnston * When not short for inactive pages, let dirty 1324f3f38e25SMark Johnston * pages go through the inactive queue before 1325f3f38e25SMark Johnston * moving to the laundry queue. This gives them 1326f3f38e25SMark Johnston * some extra time to be reactivated, 1327f3f38e25SMark Johnston * potentially avoiding an expensive pageout. 1328f3f38e25SMark Johnston * However, during a page shortage, the inactive 1329f3f38e25SMark Johnston * queue is necessarily small, and so dirty 1330f3f38e25SMark Johnston * pages would only spend a trivial amount of 1331f3f38e25SMark Johnston * time in the inactive queue. Therefore, we 1332f3f38e25SMark Johnston * might as well place them directly in the 1333f3f38e25SMark Johnston * laundry queue to reduce queuing overhead. 1334f3f38e25SMark Johnston * 1335be37ee79SMark Johnston * Calling vm_page_test_dirty() here would 1336be37ee79SMark Johnston * require acquisition of the object's write 1337be37ee79SMark Johnston * lock. However, during a page shortage, 1338f3f38e25SMark Johnston * directing dirty pages into the laundry queue 1339f3f38e25SMark Johnston * is only an optimization and not a 1340be37ee79SMark Johnston * requirement. Therefore, we simply rely on 1341f3f38e25SMark Johnston * the opportunistic updates to the page's dirty 1342f3f38e25SMark Johnston * field by the pmap. 1343be37ee79SMark Johnston */ 1344f3f38e25SMark Johnston if (page_shortage <= 0) { 1345f3f38e25SMark Johnston nqueue = PQ_INACTIVE; 1346f3f38e25SMark Johnston ps_delta = 0; 1347f3f38e25SMark Johnston } else if (m->dirty == 0) { 1348f3f38e25SMark Johnston nqueue = PQ_INACTIVE; 1349f3f38e25SMark Johnston ps_delta = act_scan_laundry_weight; 1350be37ee79SMark Johnston } else { 1351f3f38e25SMark Johnston nqueue = PQ_LAUNDRY; 1352f3f38e25SMark Johnston ps_delta = 1; 1353be37ee79SMark Johnston } 1354f3f38e25SMark Johnston 1355f7607c30SMark Johnston new.flags &= ~PGA_QUEUE_OP_MASK; 1356f3f38e25SMark Johnston new.flags |= PGA_REQUEUE; 1357f3f38e25SMark Johnston new.queue = nqueue; 1358be37ee79SMark Johnston } 1359f3f38e25SMark Johnston } while (!vm_page_pqstate_commit(m, &old, new)); 1360f3f38e25SMark Johnston 1361f3f38e25SMark Johnston page_shortage -= ps_delta; 1362be37ee79SMark Johnston } 1363be37ee79SMark Johnston vm_pagequeue_lock(pq); 1364be37ee79SMark Johnston TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q); 1365be37ee79SMark Johnston TAILQ_INSERT_AFTER(&pq->pq_pl, marker, &vmd->vmd_clock[0], plinks.q); 1366be37ee79SMark Johnston vm_pageout_end_scan(&ss); 1367be37ee79SMark Johnston vm_pagequeue_unlock(pq); 1368be37ee79SMark Johnston } 1369be37ee79SMark Johnston 13705cd29d0fSMark Johnston static int 1371f3f38e25SMark Johnston vm_pageout_reinsert_inactive_page(struct vm_pagequeue *pq, vm_page_t marker, 1372f3f38e25SMark Johnston vm_page_t m) 13735cd29d0fSMark Johnston { 1374f3f38e25SMark Johnston vm_page_astate_t as; 13755cd29d0fSMark Johnston 1376f3f38e25SMark Johnston vm_pagequeue_assert_locked(pq); 1377f3f38e25SMark Johnston 1378f3f38e25SMark Johnston as = vm_page_astate_load(m); 1379f3f38e25SMark Johnston if (as.queue != PQ_INACTIVE || (as.flags & PGA_ENQUEUED) != 0) 1380e8bcf696SMark Johnston return (0); 1381e8bcf696SMark Johnston vm_page_aflag_set(m, PGA_ENQUEUED); 1382f3f38e25SMark Johnston TAILQ_INSERT_BEFORE(marker, m, plinks.q); 13835cd29d0fSMark Johnston return (1); 13845cd29d0fSMark Johnston } 13855cd29d0fSMark Johnston 13865cd29d0fSMark Johnston /* 13875cd29d0fSMark Johnston * Re-add stuck pages to the inactive queue. We will examine them again 13885cd29d0fSMark Johnston * during the next scan. If the queue state of a page has changed since 13895cd29d0fSMark Johnston * it was physically removed from the page queue in 13905cd29d0fSMark Johnston * vm_pageout_collect_batch(), don't do anything with that page. 13915cd29d0fSMark Johnston */ 13925cd29d0fSMark Johnston static void 13935cd29d0fSMark Johnston vm_pageout_reinsert_inactive(struct scan_state *ss, struct vm_batchqueue *bq, 13945cd29d0fSMark Johnston vm_page_t m) 13955cd29d0fSMark Johnston { 13965cd29d0fSMark Johnston struct vm_pagequeue *pq; 1397f3f38e25SMark Johnston vm_page_t marker; 13985cd29d0fSMark Johnston int delta; 13995cd29d0fSMark Johnston 14005cd29d0fSMark Johnston delta = 0; 1401f3f38e25SMark Johnston marker = ss->marker; 14025cd29d0fSMark Johnston pq = ss->pq; 14035cd29d0fSMark Johnston 14045cd29d0fSMark Johnston if (m != NULL) { 14055cd29d0fSMark Johnston if (vm_batchqueue_insert(bq, m)) 14065cd29d0fSMark Johnston return; 14075cd29d0fSMark Johnston vm_pagequeue_lock(pq); 1408f3f38e25SMark Johnston delta += vm_pageout_reinsert_inactive_page(pq, marker, m); 14095cd29d0fSMark Johnston } else 14105cd29d0fSMark Johnston vm_pagequeue_lock(pq); 14115cd29d0fSMark Johnston while ((m = vm_batchqueue_pop(bq)) != NULL) 1412f3f38e25SMark Johnston delta += vm_pageout_reinsert_inactive_page(pq, marker, m); 14135cd29d0fSMark Johnston vm_pagequeue_cnt_add(pq, delta); 14145cd29d0fSMark Johnston vm_pagequeue_unlock(pq); 14155cd29d0fSMark Johnston vm_batchqueue_init(bq); 14165cd29d0fSMark Johnston } 14175cd29d0fSMark Johnston 1418ebcddc72SAlan Cox /* 141927e29d10SMark Johnston * Attempt to reclaim the requested number of pages from the inactive queue. 142027e29d10SMark Johnston * Returns true if the shortage was addressed. 1421df8bae1dSRodney W. Grimes */ 1422be37ee79SMark Johnston static int 142349a3710cSMark Johnston vm_pageout_scan_inactive(struct vm_domain *vmd, int shortage, 1424be37ee79SMark Johnston int *addl_shortage) 1425df8bae1dSRodney W. Grimes { 14265cd29d0fSMark Johnston struct scan_state ss; 14275cd29d0fSMark Johnston struct vm_batchqueue rq; 14285cd29d0fSMark Johnston vm_page_t m, marker; 14298d220203SAlan Cox struct vm_pagequeue *pq; 1430df8bae1dSRodney W. Grimes vm_object_t object; 1431f3f38e25SMark Johnston vm_page_astate_t old, new; 1432f3f38e25SMark Johnston int act_delta, addl_page_shortage, deficit, page_shortage, refs; 1433be37ee79SMark Johnston int starting_page_shortage; 14340d94caffSDavid Greenman 1435df8bae1dSRodney W. Grimes /* 143601f04471SMark Johnston * The addl_page_shortage is an estimate of the number of temporarily 1437311e34e2SKonstantin Belousov * stuck pages in the inactive queue. In other words, the 1438449c2e92SKonstantin Belousov * number of pages from the inactive count that should be 1439311e34e2SKonstantin Belousov * discounted in setting the target for the active queue scan. 1440311e34e2SKonstantin Belousov */ 14419099545aSAlan Cox addl_page_shortage = 0; 14429099545aSAlan Cox 14431c7c3c6aSMatthew Dillon /* 144449a3710cSMark Johnston * vmd_pageout_deficit counts the number of pages requested in 144549a3710cSMark Johnston * allocations that failed because of a free page shortage. We assume 144649a3710cSMark Johnston * that the allocations will be reattempted and thus include the deficit 144749a3710cSMark Johnston * in our scan target. 14481c7c3c6aSMatthew Dillon */ 1449e2068d0bSJeff Roberson deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit); 145049a3710cSMark Johnston starting_page_shortage = page_shortage = shortage + deficit; 14511c7c3c6aSMatthew Dillon 14525cd29d0fSMark Johnston object = NULL; 14535cd29d0fSMark Johnston vm_batchqueue_init(&rq); 14545cd29d0fSMark Johnston 1455936524aaSMatthew Dillon /* 1456f095d1bbSAlan Cox * Start scanning the inactive queue for pages that we can free. The 1457f095d1bbSAlan Cox * scan will stop when we reach the target or we have scanned the 14585cff1f4dSMark Johnston * entire queue. (Note that m->a.act_count is not used to make 1459f095d1bbSAlan Cox * decisions for the inactive queue, only for the active queue.) 14608d220203SAlan Cox */ 146164b38930SMark Johnston marker = &vmd->vmd_markers[PQ_INACTIVE]; 14625cd29d0fSMark Johnston pq = &vmd->vmd_pagequeues[PQ_INACTIVE]; 14638d220203SAlan Cox vm_pagequeue_lock(pq); 14645cd29d0fSMark Johnston vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt); 14655cd29d0fSMark Johnston while (page_shortage > 0 && (m = vm_pageout_next(&ss, true)) != NULL) { 14665cd29d0fSMark Johnston KASSERT((m->flags & PG_MARKER) == 0, 14675cd29d0fSMark Johnston ("marker page %p was dequeued", m)); 1468df8bae1dSRodney W. Grimes 1469936524aaSMatthew Dillon /* 1470b7f30bffSMark Johnston * Don't touch a page that was removed from the queue after the 1471b7f30bffSMark Johnston * page queue lock was released. Otherwise, ensure that any 1472b7f30bffSMark Johnston * pending queue operations, such as dequeues for wired pages, 1473b7f30bffSMark Johnston * are handled. 1474936524aaSMatthew Dillon */ 1475b7f30bffSMark Johnston if (vm_pageout_defer(m, PQ_INACTIVE, false)) 1476936524aaSMatthew Dillon continue; 1477e8bcf696SMark Johnston 14789f5632e6SMark Johnston /* 14799f5632e6SMark Johnston * Lock the page's object. 14809f5632e6SMark Johnston */ 14819f5632e6SMark Johnston if (object == NULL || object != m->object) { 148260256604SMark Johnston if (object != NULL) 14835cd29d0fSMark Johnston VM_OBJECT_WUNLOCK(object); 148423ed568cSMateusz Guzik object = atomic_load_ptr(&m->object); 14859f5632e6SMark Johnston if (__predict_false(object == NULL)) 14869f5632e6SMark Johnston /* The page is being freed by another thread. */ 14879f5632e6SMark Johnston continue; 14889f5632e6SMark Johnston 1489e8bcf696SMark Johnston /* Depends on type-stability. */ 149041fd4b94SMark Johnston VM_OBJECT_WLOCK(object); 14919f5632e6SMark Johnston if (__predict_false(m->object != object)) { 14929f5632e6SMark Johnston VM_OBJECT_WUNLOCK(object); 14939f5632e6SMark Johnston object = NULL; 14949f5632e6SMark Johnston goto reinsert; 149541fd4b94SMark Johnston } 149641fd4b94SMark Johnston } 14975cd29d0fSMark Johnston 149863e97555SJeff Roberson if (vm_page_tryxbusy(m) == 0) { 1499a3aeedabSAlan Cox /* 1500a3aeedabSAlan Cox * Don't mess with busy pages. Leave them at 1501a3aeedabSAlan Cox * the front of the queue. Most likely, they 1502a3aeedabSAlan Cox * are being paged out and will leave the 1503a3aeedabSAlan Cox * queue shortly after the scan finishes. So, 1504a3aeedabSAlan Cox * they ought to be discounted from the 1505a3aeedabSAlan Cox * inactive count. 1506a3aeedabSAlan Cox */ 1507a3aeedabSAlan Cox addl_page_shortage++; 15085cd29d0fSMark Johnston goto reinsert; 150926f9a767SRodney W. Grimes } 151048cc2fc7SKonstantin Belousov 1511a8081778SJeff Roberson /* Deferred free of swap space. */ 1512a8081778SJeff Roberson if ((m->a.flags & PGA_SWAP_FREE) != 0) 1513a8081778SJeff Roberson vm_pager_page_unswapped(m); 1514a8081778SJeff Roberson 151548cc2fc7SKonstantin Belousov /* 15169f5632e6SMark Johnston * Check for wirings now that we hold the object lock and have 15179f5632e6SMark Johnston * exclusively busied the page. If the page is mapped, it may 15189f5632e6SMark Johnston * still be wired by pmap lookups. The call to 1519fee2a2faSMark Johnston * vm_page_try_remove_all() below atomically checks for such 1520fee2a2faSMark Johnston * wirings and removes mappings. If the page is unmapped, the 15219f5632e6SMark Johnston * wire count is guaranteed not to increase after this check. 1522fee2a2faSMark Johnston */ 15239f5632e6SMark Johnston if (__predict_false(vm_page_wired(m))) 1524f3f38e25SMark Johnston goto skip_page; 1525fee2a2faSMark Johnston 1526fee2a2faSMark Johnston /* 15278748f58cSKonstantin Belousov * Invalid pages can be easily freed. They cannot be 15288748f58cSKonstantin Belousov * mapped, vm_page_free() asserts this. 1529776f729cSKonstantin Belousov */ 15300012f373SJeff Roberson if (vm_page_none_valid(m)) 15318748f58cSKonstantin Belousov goto free_page; 1532776f729cSKonstantin Belousov 1533f3f38e25SMark Johnston refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0; 1534f3f38e25SMark Johnston 1535f3f38e25SMark Johnston for (old = vm_page_astate_load(m);;) { 1536776f729cSKonstantin Belousov /* 1537f3f38e25SMark Johnston * Check to see if the page has been removed from the 1538f3f38e25SMark Johnston * queue since the first such check. Leave it alone if 1539f3f38e25SMark Johnston * so, discarding any references collected by 1540f3f38e25SMark Johnston * pmap_ts_referenced(). 15417e006499SJohn Dyson */ 1542f3f38e25SMark Johnston if (__predict_false(_vm_page_queue(old) == PQ_NONE)) 1543f3f38e25SMark Johnston goto skip_page; 1544f3f38e25SMark Johnston 1545f3f38e25SMark Johnston new = old; 1546f3f38e25SMark Johnston act_delta = refs; 1547f3f38e25SMark Johnston if ((old.flags & PGA_REFERENCED) != 0) { 1548f3f38e25SMark Johnston new.flags &= ~PGA_REFERENCED; 1549d7aeb429SAlan Cox act_delta++; 15502fe6e4d7SDavid Greenman } 1551f3f38e25SMark Johnston if (act_delta == 0) { 1552f3f38e25SMark Johnston ; 1553f3f38e25SMark Johnston } else if (object->ref_count != 0) { 1554e8bcf696SMark Johnston /* 1555f3f38e25SMark Johnston * Increase the activation count if the 1556f3f38e25SMark Johnston * page was referenced while in the 1557f3f38e25SMark Johnston * inactive queue. This makes it less 1558f3f38e25SMark Johnston * likely that the page will be returned 1559f3f38e25SMark Johnston * prematurely to the inactive queue. 1560e8bcf696SMark Johnston */ 1561f3f38e25SMark Johnston new.act_count += ACT_ADVANCE + 1562f3f38e25SMark Johnston act_delta; 1563f3f38e25SMark Johnston if (new.act_count > ACT_MAX) 1564f3f38e25SMark Johnston new.act_count = ACT_MAX; 1565f3f38e25SMark Johnston 1566f7607c30SMark Johnston new.flags &= ~PGA_QUEUE_OP_MASK; 1567f3f38e25SMark Johnston new.flags |= PGA_REQUEUE; 1568f3f38e25SMark Johnston new.queue = PQ_ACTIVE; 1569f3f38e25SMark Johnston if (!vm_page_pqstate_commit(m, &old, new)) 1570e8bcf696SMark Johnston continue; 1571f3f38e25SMark Johnston 1572f3f38e25SMark Johnston VM_CNT_INC(v_reactivated); 1573f3f38e25SMark Johnston goto skip_page; 1574ebcddc72SAlan Cox } else if ((object->flags & OBJ_DEAD) == 0) { 1575f3f38e25SMark Johnston new.queue = PQ_INACTIVE; 1576f3f38e25SMark Johnston new.flags |= PGA_REQUEUE; 1577f3f38e25SMark Johnston if (!vm_page_pqstate_commit(m, &old, new)) 1578f3f38e25SMark Johnston continue; 1579f3f38e25SMark Johnston goto skip_page; 1580ebcddc72SAlan Cox } 1581f3f38e25SMark Johnston break; 1582960810ccSAlan Cox } 158367bf6868SJohn Dyson 15847e006499SJohn Dyson /* 15859fc4739dSAlan Cox * If the page appears to be clean at the machine-independent 15869fc4739dSAlan Cox * layer, then remove all of its mappings from the pmap in 1587a766ffd0SAlan Cox * anticipation of freeing it. If, however, any of the page's 1588a766ffd0SAlan Cox * mappings allow write access, then the page may still be 1589a766ffd0SAlan Cox * modified until the last of those mappings are removed. 15907e006499SJohn Dyson */ 1591aa044135SAlan Cox if (object->ref_count != 0) { 15929fc4739dSAlan Cox vm_page_test_dirty(m); 15939f5632e6SMark Johnston if (m->dirty == 0 && !vm_page_try_remove_all(m)) 1594f3f38e25SMark Johnston goto skip_page; 1595fee2a2faSMark Johnston } 1596dcbcd518SBruce Evans 15976989c456SAlan Cox /* 1598ebcddc72SAlan Cox * Clean pages can be freed, but dirty pages must be sent back 1599ebcddc72SAlan Cox * to the laundry, unless they belong to a dead object. 1600ebcddc72SAlan Cox * Requeueing dirty pages from dead objects is pointless, as 1601ebcddc72SAlan Cox * they are being paged out and freed by the thread that 1602ebcddc72SAlan Cox * destroyed the object. 16036989c456SAlan Cox */ 1604ebcddc72SAlan Cox if (m->dirty == 0) { 16058748f58cSKonstantin Belousov free_page: 16065cd29d0fSMark Johnston /* 16079f5632e6SMark Johnston * Now we are guaranteed that no other threads are 16089f5632e6SMark Johnston * manipulating the page, check for a last-second 16099f5632e6SMark Johnston * reference that would save it from doom. 16105cd29d0fSMark Johnston */ 16119f5632e6SMark Johnston if (vm_pageout_defer(m, PQ_INACTIVE, false)) 16129f5632e6SMark Johnston goto skip_page; 16139f5632e6SMark Johnston 16149f5632e6SMark Johnston /* 16159f5632e6SMark Johnston * Because we dequeued the page and have already checked 16169f5632e6SMark Johnston * for pending dequeue and enqueue requests, we can 16179f5632e6SMark Johnston * safely disassociate the page from the inactive queue 16189f5632e6SMark Johnston * without holding the queue lock. 16199f5632e6SMark Johnston */ 16205cff1f4dSMark Johnston m->a.queue = PQ_NONE; 162178afdce6SAlan Cox vm_page_free(m); 16225cd29d0fSMark Johnston page_shortage--; 162363e97555SJeff Roberson continue; 162463e97555SJeff Roberson } 162563e97555SJeff Roberson if ((object->flags & OBJ_DEAD) == 0) 1626ebcddc72SAlan Cox vm_page_launder(m); 1627f3f38e25SMark Johnston skip_page: 1628f3f38e25SMark Johnston vm_page_xunbusy(m); 16295cd29d0fSMark Johnston continue; 16305cd29d0fSMark Johnston reinsert: 16315cd29d0fSMark Johnston vm_pageout_reinsert_inactive(&ss, &rq, m); 16325cd29d0fSMark Johnston } 163360256604SMark Johnston if (object != NULL) 163489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 16355cd29d0fSMark Johnston vm_pageout_reinsert_inactive(&ss, &rq, NULL); 16365cd29d0fSMark Johnston vm_pageout_reinsert_inactive(&ss, &ss.bq, NULL); 16378d220203SAlan Cox vm_pagequeue_lock(pq); 16385cd29d0fSMark Johnston vm_pageout_end_scan(&ss); 16398d220203SAlan Cox vm_pagequeue_unlock(pq); 164026f9a767SRodney W. Grimes 16415cd29d0fSMark Johnston VM_CNT_ADD(v_dfree, starting_page_shortage - page_shortage); 16425cd29d0fSMark Johnston 1643ebcddc72SAlan Cox /* 1644ebcddc72SAlan Cox * Wake up the laundry thread so that it can perform any needed 1645ebcddc72SAlan Cox * laundering. If we didn't meet our target, we're in shortfall and 1646b1fd102eSMark Johnston * need to launder more aggressively. If PQ_LAUNDRY is empty and no 1647b1fd102eSMark Johnston * swap devices are configured, the laundry thread has no work to do, so 1648b1fd102eSMark Johnston * don't bother waking it up. 1649cb35676eSMark Johnston * 1650cb35676eSMark Johnston * The laundry thread uses the number of inactive queue scans elapsed 1651cb35676eSMark Johnston * since the last laundering to determine whether to launder again, so 1652cb35676eSMark Johnston * keep count. 1653ebcddc72SAlan Cox */ 1654cb35676eSMark Johnston if (starting_page_shortage > 0) { 1655e2068d0bSJeff Roberson pq = &vmd->vmd_pagequeues[PQ_LAUNDRY]; 1656ebcddc72SAlan Cox vm_pagequeue_lock(pq); 1657e2068d0bSJeff Roberson if (vmd->vmd_laundry_request == VM_LAUNDRY_IDLE && 1658cb35676eSMark Johnston (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled))) { 1659ebcddc72SAlan Cox if (page_shortage > 0) { 1660e2068d0bSJeff Roberson vmd->vmd_laundry_request = VM_LAUNDRY_SHORTFALL; 166183c9dea1SGleb Smirnoff VM_CNT_INC(v_pdshortfalls); 1662e2068d0bSJeff Roberson } else if (vmd->vmd_laundry_request != 1663e2068d0bSJeff Roberson VM_LAUNDRY_SHORTFALL) 1664e2068d0bSJeff Roberson vmd->vmd_laundry_request = 1665e2068d0bSJeff Roberson VM_LAUNDRY_BACKGROUND; 1666e2068d0bSJeff Roberson wakeup(&vmd->vmd_laundry_request); 1667b1fd102eSMark Johnston } 166860684862SMark Johnston vmd->vmd_clean_pages_freed += 166960684862SMark Johnston starting_page_shortage - page_shortage; 1670ebcddc72SAlan Cox vm_pagequeue_unlock(pq); 1671ebcddc72SAlan Cox } 1672ebcddc72SAlan Cox 16739452b5edSAlan Cox /* 1674f095d1bbSAlan Cox * Wakeup the swapout daemon if we didn't free the targeted number of 1675f095d1bbSAlan Cox * pages. 16769452b5edSAlan Cox */ 1677ac04195bSKonstantin Belousov if (page_shortage > 0) 1678ac04195bSKonstantin Belousov vm_swapout_run(); 16799452b5edSAlan Cox 16809452b5edSAlan Cox /* 168176386c7eSKonstantin Belousov * If the inactive queue scan fails repeatedly to meet its 168276386c7eSKonstantin Belousov * target, kill the largest process. 168376386c7eSKonstantin Belousov */ 168476386c7eSKonstantin Belousov vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage); 168576386c7eSKonstantin Belousov 168676386c7eSKonstantin Belousov /* 1687be37ee79SMark Johnston * Reclaim pages by swapping out idle processes, if configured to do so. 16881c7c3c6aSMatthew Dillon */ 1689ac04195bSKonstantin Belousov vm_swapout_run_idle(); 1690be37ee79SMark Johnston 1691be37ee79SMark Johnston /* 1692be37ee79SMark Johnston * See the description of addl_page_shortage above. 1693be37ee79SMark Johnston */ 1694be37ee79SMark Johnston *addl_shortage = addl_page_shortage + deficit; 1695be37ee79SMark Johnston 1696e57dd910SAlan Cox return (page_shortage <= 0); 16972025d69bSKonstantin Belousov } 16982025d69bSKonstantin Belousov 1699449c2e92SKonstantin Belousov static int vm_pageout_oom_vote; 1700449c2e92SKonstantin Belousov 1701449c2e92SKonstantin Belousov /* 1702449c2e92SKonstantin Belousov * The pagedaemon threads randlomly select one to perform the 1703449c2e92SKonstantin Belousov * OOM. Trying to kill processes before all pagedaemons 1704449c2e92SKonstantin Belousov * failed to reach free target is premature. 1705449c2e92SKonstantin Belousov */ 1706449c2e92SKonstantin Belousov static void 170776386c7eSKonstantin Belousov vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage, 170876386c7eSKonstantin Belousov int starting_page_shortage) 1709449c2e92SKonstantin Belousov { 1710449c2e92SKonstantin Belousov int old_vote; 1711449c2e92SKonstantin Belousov 171276386c7eSKonstantin Belousov if (starting_page_shortage <= 0 || starting_page_shortage != 171376386c7eSKonstantin Belousov page_shortage) 171476386c7eSKonstantin Belousov vmd->vmd_oom_seq = 0; 171576386c7eSKonstantin Belousov else 171676386c7eSKonstantin Belousov vmd->vmd_oom_seq++; 171776386c7eSKonstantin Belousov if (vmd->vmd_oom_seq < vm_pageout_oom_seq) { 1718449c2e92SKonstantin Belousov if (vmd->vmd_oom) { 1719449c2e92SKonstantin Belousov vmd->vmd_oom = FALSE; 1720449c2e92SKonstantin Belousov atomic_subtract_int(&vm_pageout_oom_vote, 1); 1721449c2e92SKonstantin Belousov } 1722449c2e92SKonstantin Belousov return; 1723449c2e92SKonstantin Belousov } 1724449c2e92SKonstantin Belousov 172576386c7eSKonstantin Belousov /* 172676386c7eSKonstantin Belousov * Do not follow the call sequence until OOM condition is 172776386c7eSKonstantin Belousov * cleared. 172876386c7eSKonstantin Belousov */ 172976386c7eSKonstantin Belousov vmd->vmd_oom_seq = 0; 173076386c7eSKonstantin Belousov 1731449c2e92SKonstantin Belousov if (vmd->vmd_oom) 1732449c2e92SKonstantin Belousov return; 1733449c2e92SKonstantin Belousov 1734449c2e92SKonstantin Belousov vmd->vmd_oom = TRUE; 1735449c2e92SKonstantin Belousov old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1); 1736449c2e92SKonstantin Belousov if (old_vote != vm_ndomains - 1) 1737449c2e92SKonstantin Belousov return; 1738449c2e92SKonstantin Belousov 1739449c2e92SKonstantin Belousov /* 1740449c2e92SKonstantin Belousov * The current pagedaemon thread is the last in the quorum to 1741449c2e92SKonstantin Belousov * start OOM. Initiate the selection and signaling of the 1742449c2e92SKonstantin Belousov * victim. 1743449c2e92SKonstantin Belousov */ 1744449c2e92SKonstantin Belousov vm_pageout_oom(VM_OOM_MEM); 1745449c2e92SKonstantin Belousov 1746449c2e92SKonstantin Belousov /* 1747449c2e92SKonstantin Belousov * After one round of OOM terror, recall our vote. On the 1748449c2e92SKonstantin Belousov * next pass, current pagedaemon would vote again if the low 1749449c2e92SKonstantin Belousov * memory condition is still there, due to vmd_oom being 1750449c2e92SKonstantin Belousov * false. 1751449c2e92SKonstantin Belousov */ 1752449c2e92SKonstantin Belousov vmd->vmd_oom = FALSE; 1753449c2e92SKonstantin Belousov atomic_subtract_int(&vm_pageout_oom_vote, 1); 1754449c2e92SKonstantin Belousov } 17552025d69bSKonstantin Belousov 17563949873fSKonstantin Belousov /* 17573949873fSKonstantin Belousov * The OOM killer is the page daemon's action of last resort when 17583949873fSKonstantin Belousov * memory allocation requests have been stalled for a prolonged period 17593949873fSKonstantin Belousov * of time because it cannot reclaim memory. This function computes 17603949873fSKonstantin Belousov * the approximate number of physical pages that could be reclaimed if 17613949873fSKonstantin Belousov * the specified address space is destroyed. 17623949873fSKonstantin Belousov * 17633949873fSKonstantin Belousov * Private, anonymous memory owned by the address space is the 17643949873fSKonstantin Belousov * principal resource that we expect to recover after an OOM kill. 17653949873fSKonstantin Belousov * Since the physical pages mapped by the address space's COW entries 17663949873fSKonstantin Belousov * are typically shared pages, they are unlikely to be released and so 17673949873fSKonstantin Belousov * they are not counted. 17683949873fSKonstantin Belousov * 17693949873fSKonstantin Belousov * To get to the point where the page daemon runs the OOM killer, its 17703949873fSKonstantin Belousov * efforts to write-back vnode-backed pages may have stalled. This 17713949873fSKonstantin Belousov * could be caused by a memory allocation deadlock in the write path 17723949873fSKonstantin Belousov * that might be resolved by an OOM kill. Therefore, physical pages 17733949873fSKonstantin Belousov * belonging to vnode-backed objects are counted, because they might 17743949873fSKonstantin Belousov * be freed without being written out first if the address space holds 17753949873fSKonstantin Belousov * the last reference to an unlinked vnode. 17763949873fSKonstantin Belousov * 17773949873fSKonstantin Belousov * Similarly, physical pages belonging to OBJT_PHYS objects are 17783949873fSKonstantin Belousov * counted because the address space might hold the last reference to 17793949873fSKonstantin Belousov * the object. 17803949873fSKonstantin Belousov */ 17813949873fSKonstantin Belousov static long 17823949873fSKonstantin Belousov vm_pageout_oom_pagecount(struct vmspace *vmspace) 17833949873fSKonstantin Belousov { 17843949873fSKonstantin Belousov vm_map_t map; 17853949873fSKonstantin Belousov vm_map_entry_t entry; 17863949873fSKonstantin Belousov vm_object_t obj; 17873949873fSKonstantin Belousov long res; 17883949873fSKonstantin Belousov 17893949873fSKonstantin Belousov map = &vmspace->vm_map; 17903949873fSKonstantin Belousov KASSERT(!map->system_map, ("system map")); 17913949873fSKonstantin Belousov sx_assert(&map->lock, SA_LOCKED); 17923949873fSKonstantin Belousov res = 0; 17932288078cSDoug Moore VM_MAP_ENTRY_FOREACH(entry, map) { 17943949873fSKonstantin Belousov if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) 17953949873fSKonstantin Belousov continue; 17963949873fSKonstantin Belousov obj = entry->object.vm_object; 17973949873fSKonstantin Belousov if (obj == NULL) 17983949873fSKonstantin Belousov continue; 17993949873fSKonstantin Belousov if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 && 18003949873fSKonstantin Belousov obj->ref_count != 1) 18013949873fSKonstantin Belousov continue; 18023949873fSKonstantin Belousov switch (obj->type) { 18033949873fSKonstantin Belousov case OBJT_DEFAULT: 18043949873fSKonstantin Belousov case OBJT_SWAP: 18053949873fSKonstantin Belousov case OBJT_PHYS: 18063949873fSKonstantin Belousov case OBJT_VNODE: 18073949873fSKonstantin Belousov res += obj->resident_page_count; 18083949873fSKonstantin Belousov break; 18093949873fSKonstantin Belousov } 18103949873fSKonstantin Belousov } 18113949873fSKonstantin Belousov return (res); 18123949873fSKonstantin Belousov } 18133949873fSKonstantin Belousov 1814245139c6SKonstantin Belousov static int vm_oom_ratelim_last; 1815245139c6SKonstantin Belousov static int vm_oom_pf_secs = 10; 1816245139c6SKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, oom_pf_secs, CTLFLAG_RWTUN, &vm_oom_pf_secs, 0, 1817245139c6SKonstantin Belousov ""); 1818245139c6SKonstantin Belousov static struct mtx vm_oom_ratelim_mtx; 1819245139c6SKonstantin Belousov 18202025d69bSKonstantin Belousov void 18212025d69bSKonstantin Belousov vm_pageout_oom(int shortage) 18222025d69bSKonstantin Belousov { 18232025d69bSKonstantin Belousov struct proc *p, *bigproc; 18242025d69bSKonstantin Belousov vm_offset_t size, bigsize; 18252025d69bSKonstantin Belousov struct thread *td; 18266bed074cSKonstantin Belousov struct vmspace *vm; 1827245139c6SKonstantin Belousov int now; 18283e78e983SAlan Cox bool breakout; 18292025d69bSKonstantin Belousov 18302025d69bSKonstantin Belousov /* 1831245139c6SKonstantin Belousov * For OOM requests originating from vm_fault(), there is a high 1832245139c6SKonstantin Belousov * chance that a single large process faults simultaneously in 1833245139c6SKonstantin Belousov * several threads. Also, on an active system running many 1834245139c6SKonstantin Belousov * processes of middle-size, like buildworld, all of them 1835245139c6SKonstantin Belousov * could fault almost simultaneously as well. 1836245139c6SKonstantin Belousov * 1837245139c6SKonstantin Belousov * To avoid killing too many processes, rate-limit OOMs 1838245139c6SKonstantin Belousov * initiated by vm_fault() time-outs on the waits for free 1839245139c6SKonstantin Belousov * pages. 1840245139c6SKonstantin Belousov */ 1841245139c6SKonstantin Belousov mtx_lock(&vm_oom_ratelim_mtx); 1842245139c6SKonstantin Belousov now = ticks; 1843245139c6SKonstantin Belousov if (shortage == VM_OOM_MEM_PF && 1844245139c6SKonstantin Belousov (u_int)(now - vm_oom_ratelim_last) < hz * vm_oom_pf_secs) { 1845245139c6SKonstantin Belousov mtx_unlock(&vm_oom_ratelim_mtx); 1846245139c6SKonstantin Belousov return; 1847245139c6SKonstantin Belousov } 1848245139c6SKonstantin Belousov vm_oom_ratelim_last = now; 1849245139c6SKonstantin Belousov mtx_unlock(&vm_oom_ratelim_mtx); 1850245139c6SKonstantin Belousov 1851245139c6SKonstantin Belousov /* 18521c58e4e5SJohn Baldwin * We keep the process bigproc locked once we find it to keep anyone 18531c58e4e5SJohn Baldwin * from messing with it; however, there is a possibility of 185428323addSBryan Drewery * deadlock if process B is bigproc and one of its child processes 18551c58e4e5SJohn Baldwin * attempts to propagate a signal to B while we are waiting for A's 18561c58e4e5SJohn Baldwin * lock while walking this list. To avoid this, we don't block on 18571c58e4e5SJohn Baldwin * the process lock but just skip a process if it is already locked. 18585663e6deSDavid Greenman */ 18595663e6deSDavid Greenman bigproc = NULL; 18605663e6deSDavid Greenman bigsize = 0; 18611005a129SJohn Baldwin sx_slock(&allproc_lock); 1862e602ba25SJulian Elischer FOREACH_PROC_IN_SYSTEM(p) { 186371943c3dSKonstantin Belousov PROC_LOCK(p); 186471943c3dSKonstantin Belousov 18651c58e4e5SJohn Baldwin /* 18663f1c4c4fSKonstantin Belousov * If this is a system, protected or killed process, skip it. 18675663e6deSDavid Greenman */ 186871943c3dSKonstantin Belousov if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC | 186971943c3dSKonstantin Belousov P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 || 187071943c3dSKonstantin Belousov p->p_pid == 1 || P_KILLED(p) || 187171943c3dSKonstantin Belousov (p->p_pid < 48 && swap_pager_avail != 0)) { 18728606d880SJohn Baldwin PROC_UNLOCK(p); 18735663e6deSDavid Greenman continue; 18745663e6deSDavid Greenman } 18755663e6deSDavid Greenman /* 1876dcbcd518SBruce Evans * If the process is in a non-running type state, 1877e602ba25SJulian Elischer * don't touch it. Check all the threads individually. 18785663e6deSDavid Greenman */ 18793e78e983SAlan Cox breakout = false; 1880e602ba25SJulian Elischer FOREACH_THREAD_IN_PROC(p, td) { 1881982d11f8SJeff Roberson thread_lock(td); 188271fad9fdSJulian Elischer if (!TD_ON_RUNQ(td) && 188371fad9fdSJulian Elischer !TD_IS_RUNNING(td) && 1884f497cda2SEdward Tomasz Napierala !TD_IS_SLEEPING(td) && 1885b98acc0aSKonstantin Belousov !TD_IS_SUSPENDED(td) && 1886b98acc0aSKonstantin Belousov !TD_IS_SWAPPED(td)) { 1887982d11f8SJeff Roberson thread_unlock(td); 18883e78e983SAlan Cox breakout = true; 1889e602ba25SJulian Elischer break; 1890e602ba25SJulian Elischer } 1891982d11f8SJeff Roberson thread_unlock(td); 1892e602ba25SJulian Elischer } 1893e602ba25SJulian Elischer if (breakout) { 18941c58e4e5SJohn Baldwin PROC_UNLOCK(p); 18955663e6deSDavid Greenman continue; 18965663e6deSDavid Greenman } 18975663e6deSDavid Greenman /* 18985663e6deSDavid Greenman * get the process size 18995663e6deSDavid Greenman */ 19006bed074cSKonstantin Belousov vm = vmspace_acquire_ref(p); 19016bed074cSKonstantin Belousov if (vm == NULL) { 19026bed074cSKonstantin Belousov PROC_UNLOCK(p); 19036bed074cSKonstantin Belousov continue; 19046bed074cSKonstantin Belousov } 190595e2409aSKonstantin Belousov _PHOLD_LITE(p); 190672d97679SDavid Schultz PROC_UNLOCK(p); 190795e2409aSKonstantin Belousov sx_sunlock(&allproc_lock); 190895e2409aSKonstantin Belousov if (!vm_map_trylock_read(&vm->vm_map)) { 190971943c3dSKonstantin Belousov vmspace_free(vm); 191095e2409aSKonstantin Belousov sx_slock(&allproc_lock); 191195e2409aSKonstantin Belousov PRELE(p); 191272d97679SDavid Schultz continue; 191372d97679SDavid Schultz } 19147981aa24SKonstantin Belousov size = vmspace_swap_count(vm); 1915245139c6SKonstantin Belousov if (shortage == VM_OOM_MEM || shortage == VM_OOM_MEM_PF) 19163949873fSKonstantin Belousov size += vm_pageout_oom_pagecount(vm); 19173949873fSKonstantin Belousov vm_map_unlock_read(&vm->vm_map); 19186bed074cSKonstantin Belousov vmspace_free(vm); 191995e2409aSKonstantin Belousov sx_slock(&allproc_lock); 19203949873fSKonstantin Belousov 19215663e6deSDavid Greenman /* 19223949873fSKonstantin Belousov * If this process is bigger than the biggest one, 19235663e6deSDavid Greenman * remember it. 19245663e6deSDavid Greenman */ 19255663e6deSDavid Greenman if (size > bigsize) { 19261c58e4e5SJohn Baldwin if (bigproc != NULL) 192771943c3dSKonstantin Belousov PRELE(bigproc); 19285663e6deSDavid Greenman bigproc = p; 19295663e6deSDavid Greenman bigsize = size; 193071943c3dSKonstantin Belousov } else { 193171943c3dSKonstantin Belousov PRELE(p); 193271943c3dSKonstantin Belousov } 19335663e6deSDavid Greenman } 19341005a129SJohn Baldwin sx_sunlock(&allproc_lock); 19355663e6deSDavid Greenman if (bigproc != NULL) { 19363c200db9SJonathan T. Looney if (vm_panic_on_oom != 0 && --vm_panic_on_oom == 0) 19378311a2b8SWill Andrews panic("out of swap space"); 193871943c3dSKonstantin Belousov PROC_LOCK(bigproc); 1939729b1e51SDavid Greenman killproc(bigproc, "out of swap space"); 1940fa885116SJulian Elischer sched_nice(bigproc, PRIO_MIN); 194171943c3dSKonstantin Belousov _PRELE(bigproc); 19421c58e4e5SJohn Baldwin PROC_UNLOCK(bigproc); 19435663e6deSDavid Greenman } 19445663e6deSDavid Greenman } 194526f9a767SRodney W. Grimes 19468fc25508SMark Johnston /* 19478fc25508SMark Johnston * Signal a free page shortage to subsystems that have registered an event 19488fc25508SMark Johnston * handler. Reclaim memory from UMA in the event of a severe shortage. 19498fc25508SMark Johnston * Return true if the free page count should be re-evaluated. 19508fc25508SMark Johnston */ 1951b50a4ea6SMark Johnston static bool 1952b50a4ea6SMark Johnston vm_pageout_lowmem(void) 195349a3710cSMark Johnston { 1954b50a4ea6SMark Johnston static int lowmem_ticks = 0; 1955b50a4ea6SMark Johnston int last; 19568fc25508SMark Johnston bool ret; 19578fc25508SMark Johnston 19588fc25508SMark Johnston ret = false; 195949a3710cSMark Johnston 1960b50a4ea6SMark Johnston last = atomic_load_int(&lowmem_ticks); 1961b50a4ea6SMark Johnston while ((u_int)(ticks - last) / hz >= lowmem_period) { 1962b50a4ea6SMark Johnston if (atomic_fcmpset_int(&lowmem_ticks, &last, ticks) == 0) 1963b50a4ea6SMark Johnston continue; 1964b50a4ea6SMark Johnston 196549a3710cSMark Johnston /* 196649a3710cSMark Johnston * Decrease registered cache sizes. 196749a3710cSMark Johnston */ 196849a3710cSMark Johnston SDT_PROBE0(vm, , , vm__lowmem_scan); 196949a3710cSMark Johnston EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES); 197049a3710cSMark Johnston 197149a3710cSMark Johnston /* 197249a3710cSMark Johnston * We do this explicitly after the caches have been 19738fc25508SMark Johnston * drained above. 197449a3710cSMark Johnston */ 19758fc25508SMark Johnston uma_reclaim(UMA_RECLAIM_TRIM); 19768fc25508SMark Johnston ret = true; 1977ace409ceSAlexander Motin break; 197849a3710cSMark Johnston } 19798fc25508SMark Johnston 19808fc25508SMark Johnston /* 19818fc25508SMark Johnston * Kick off an asynchronous reclaim of cached memory if one of the 19828fc25508SMark Johnston * page daemons is failing to keep up with demand. Use the "severe" 19838fc25508SMark Johnston * threshold instead of "min" to ensure that we do not blow away the 19848fc25508SMark Johnston * caches if a subset of the NUMA domains are depleted by kernel memory 19858fc25508SMark Johnston * allocations; the domainset iterators automatically skip domains 19868fc25508SMark Johnston * below the "min" threshold on the first pass. 19878fc25508SMark Johnston * 19888fc25508SMark Johnston * UMA reclaim worker has its own rate-limiting mechanism, so don't 19898fc25508SMark Johnston * worry about kicking it too often. 19908fc25508SMark Johnston */ 19918fc25508SMark Johnston if (vm_page_count_severe()) 19928fc25508SMark Johnston uma_reclaim_wakeup(); 19938fc25508SMark Johnston 19948fc25508SMark Johnston return (ret); 199549a3710cSMark Johnston } 199649a3710cSMark Johnston 199749a3710cSMark Johnston static void 1998449c2e92SKonstantin Belousov vm_pageout_worker(void *arg) 1999449c2e92SKonstantin Belousov { 2000e2068d0bSJeff Roberson struct vm_domain *vmd; 2001b50a4ea6SMark Johnston u_int ofree; 200249a3710cSMark Johnston int addl_shortage, domain, shortage; 2003e57dd910SAlan Cox bool target_met; 2004449c2e92SKonstantin Belousov 2005e2068d0bSJeff Roberson domain = (uintptr_t)arg; 2006e2068d0bSJeff Roberson vmd = VM_DOMAIN(domain); 20075f8cd1c0SJeff Roberson shortage = 0; 2008e57dd910SAlan Cox target_met = true; 2009449c2e92SKonstantin Belousov 2010449c2e92SKonstantin Belousov /* 2011949c9186SKonstantin Belousov * XXXKIB It could be useful to bind pageout daemon threads to 2012949c9186SKonstantin Belousov * the cores belonging to the domain, from which vm_page_array 2013949c9186SKonstantin Belousov * is allocated. 2014449c2e92SKonstantin Belousov */ 2015449c2e92SKonstantin Belousov 2016e2068d0bSJeff Roberson KASSERT(vmd->vmd_segs != 0, ("domain without segments")); 2017e2068d0bSJeff Roberson vmd->vmd_last_active_scan = ticks; 2018449c2e92SKonstantin Belousov 2019449c2e92SKonstantin Belousov /* 2020449c2e92SKonstantin Belousov * The pageout daemon worker is never done, so loop forever. 2021449c2e92SKonstantin Belousov */ 2022449c2e92SKonstantin Belousov while (TRUE) { 202330fbfddaSJeff Roberson vm_domain_pageout_lock(vmd); 202449a3710cSMark Johnston 202530fbfddaSJeff Roberson /* 202630fbfddaSJeff Roberson * We need to clear wanted before we check the limits. This 202730fbfddaSJeff Roberson * prevents races with wakers who will check wanted after they 202830fbfddaSJeff Roberson * reach the limit. 202930fbfddaSJeff Roberson */ 203030fbfddaSJeff Roberson atomic_store_int(&vmd->vmd_pageout_wanted, 0); 203156ce0690SAlan Cox 203256ce0690SAlan Cox /* 20335f8cd1c0SJeff Roberson * Might the page daemon need to run again? 2034449c2e92SKonstantin Belousov */ 20355f8cd1c0SJeff Roberson if (vm_paging_needed(vmd, vmd->vmd_free_count)) { 203656ce0690SAlan Cox /* 203749a3710cSMark Johnston * Yes. If the scan failed to produce enough free 203849a3710cSMark Johnston * pages, sleep uninterruptibly for some time in the 203949a3710cSMark Johnston * hope that the laundry thread will clean some pages. 204056ce0690SAlan Cox */ 204130fbfddaSJeff Roberson vm_domain_pageout_unlock(vmd); 204249a3710cSMark Johnston if (!target_met) 20436eebec83SMark Johnston pause("pwait", hz / VM_INACT_SCAN_RATE); 2044449c2e92SKonstantin Belousov } else { 2045449c2e92SKonstantin Belousov /* 20465f8cd1c0SJeff Roberson * No, sleep until the next wakeup or until pages 20475f8cd1c0SJeff Roberson * need to have their reference stats updated. 2048449c2e92SKonstantin Belousov */ 20492c0f13aaSKonstantin Belousov if (mtx_sleep(&vmd->vmd_pageout_wanted, 205030fbfddaSJeff Roberson vm_domain_pageout_lockptr(vmd), PDROP | PVM, 20515f8cd1c0SJeff Roberson "psleep", hz / VM_INACT_SCAN_RATE) == 0) 205283c9dea1SGleb Smirnoff VM_CNT_INC(v_pdwakeups); 205356ce0690SAlan Cox } 2054be37ee79SMark Johnston 205530fbfddaSJeff Roberson /* Prevent spurious wakeups by ensuring that wanted is set. */ 205630fbfddaSJeff Roberson atomic_store_int(&vmd->vmd_pageout_wanted, 1); 205730fbfddaSJeff Roberson 205830fbfddaSJeff Roberson /* 205930fbfddaSJeff Roberson * Use the controller to calculate how many pages to free in 2060b50a4ea6SMark Johnston * this interval, and scan the inactive queue. If the lowmem 2061b50a4ea6SMark Johnston * handlers appear to have freed up some pages, subtract the 2062b50a4ea6SMark Johnston * difference from the inactive queue scan target. 206330fbfddaSJeff Roberson */ 20645f8cd1c0SJeff Roberson shortage = pidctrl_daemon(&vmd->vmd_pid, vmd->vmd_free_count); 206549a3710cSMark Johnston if (shortage > 0) { 2066b50a4ea6SMark Johnston ofree = vmd->vmd_free_count; 2067b50a4ea6SMark Johnston if (vm_pageout_lowmem() && vmd->vmd_free_count > ofree) 2068b50a4ea6SMark Johnston shortage -= min(vmd->vmd_free_count - ofree, 2069b50a4ea6SMark Johnston (u_int)shortage); 207049a3710cSMark Johnston target_met = vm_pageout_scan_inactive(vmd, shortage, 2071be37ee79SMark Johnston &addl_shortage); 207249a3710cSMark Johnston } else 207349a3710cSMark Johnston addl_shortage = 0; 207456ce0690SAlan Cox 2075be37ee79SMark Johnston /* 2076be37ee79SMark Johnston * Scan the active queue. A positive value for shortage 2077be37ee79SMark Johnston * indicates that we must aggressively deactivate pages to avoid 2078be37ee79SMark Johnston * a shortfall. 2079be37ee79SMark Johnston */ 20807bb4634eSMark Johnston shortage = vm_pageout_active_target(vmd) + addl_shortage; 2081be37ee79SMark Johnston vm_pageout_scan_active(vmd, shortage); 2082449c2e92SKonstantin Belousov } 2083449c2e92SKonstantin Belousov } 2084449c2e92SKonstantin Belousov 2085df8bae1dSRodney W. Grimes /* 20869c770a27SMark Johnston * Initialize basic pageout daemon settings. See the comment above the 20879c770a27SMark Johnston * definition of vm_domain for some explanation of how these thresholds are 20889c770a27SMark Johnston * used. 2089df8bae1dSRodney W. Grimes */ 20902b14f991SJulian Elischer static void 2091e2068d0bSJeff Roberson vm_pageout_init_domain(int domain) 2092df8bae1dSRodney W. Grimes { 2093e2068d0bSJeff Roberson struct vm_domain *vmd; 20945f8cd1c0SJeff Roberson struct sysctl_oid *oid; 2095e2068d0bSJeff Roberson 2096e2068d0bSJeff Roberson vmd = VM_DOMAIN(domain); 2097e2068d0bSJeff Roberson vmd->vmd_interrupt_free_min = 2; 2098f6b04d2bSDavid Greenman 209945ae1d91SAlan Cox /* 210045ae1d91SAlan Cox * v_free_reserved needs to include enough for the largest 210145ae1d91SAlan Cox * swap pager structures plus enough for any pv_entry structs 210245ae1d91SAlan Cox * when paging. 210345ae1d91SAlan Cox */ 21040cab71bcSDoug Moore vmd->vmd_pageout_free_min = 2 * MAXBSIZE / PAGE_SIZE + 2105e2068d0bSJeff Roberson vmd->vmd_interrupt_free_min; 2106e2068d0bSJeff Roberson vmd->vmd_free_reserved = vm_pageout_page_count + 21079c770a27SMark Johnston vmd->vmd_pageout_free_min + vmd->vmd_page_count / 768; 21089c770a27SMark Johnston vmd->vmd_free_min = vmd->vmd_page_count / 200; 2109e2068d0bSJeff Roberson vmd->vmd_free_severe = vmd->vmd_free_min / 2; 2110e2068d0bSJeff Roberson vmd->vmd_free_target = 4 * vmd->vmd_free_min + vmd->vmd_free_reserved; 2111e2068d0bSJeff Roberson vmd->vmd_free_min += vmd->vmd_free_reserved; 2112e2068d0bSJeff Roberson vmd->vmd_free_severe += vmd->vmd_free_reserved; 2113e2068d0bSJeff Roberson vmd->vmd_inactive_target = (3 * vmd->vmd_free_target) / 2; 2114e2068d0bSJeff Roberson if (vmd->vmd_inactive_target > vmd->vmd_free_count / 3) 2115e2068d0bSJeff Roberson vmd->vmd_inactive_target = vmd->vmd_free_count / 3; 2116df8bae1dSRodney W. Grimes 2117d9e23210SJeff Roberson /* 21185f8cd1c0SJeff Roberson * Set the default wakeup threshold to be 10% below the paging 21195f8cd1c0SJeff Roberson * target. This keeps the steady state out of shortfall. 2120d9e23210SJeff Roberson */ 21215f8cd1c0SJeff Roberson vmd->vmd_pageout_wakeup_thresh = (vmd->vmd_free_target / 10) * 9; 2122e2068d0bSJeff Roberson 2123e2068d0bSJeff Roberson /* 2124e2068d0bSJeff Roberson * Target amount of memory to move out of the laundry queue during a 2125e2068d0bSJeff Roberson * background laundering. This is proportional to the amount of system 2126e2068d0bSJeff Roberson * memory. 2127e2068d0bSJeff Roberson */ 2128e2068d0bSJeff Roberson vmd->vmd_background_launder_target = (vmd->vmd_free_target - 2129e2068d0bSJeff Roberson vmd->vmd_free_min) / 10; 21305f8cd1c0SJeff Roberson 21315f8cd1c0SJeff Roberson /* Initialize the pageout daemon pid controller. */ 21325f8cd1c0SJeff Roberson pidctrl_init(&vmd->vmd_pid, hz / VM_INACT_SCAN_RATE, 21335f8cd1c0SJeff Roberson vmd->vmd_free_target, PIDCTRL_BOUND, 21345f8cd1c0SJeff Roberson PIDCTRL_KPD, PIDCTRL_KID, PIDCTRL_KDD); 21355f8cd1c0SJeff Roberson oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(vmd->vmd_oid), OID_AUTO, 2136*7029da5cSPawel Biernacki "pidctrl", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); 21375f8cd1c0SJeff Roberson pidctrl_init_sysctl(&vmd->vmd_pid, SYSCTL_CHILDREN(oid)); 2138e2068d0bSJeff Roberson } 2139e2068d0bSJeff Roberson 2140e2068d0bSJeff Roberson static void 2141e2068d0bSJeff Roberson vm_pageout_init(void) 2142e2068d0bSJeff Roberson { 2143e2068d0bSJeff Roberson u_int freecount; 2144e2068d0bSJeff Roberson int i; 2145e2068d0bSJeff Roberson 2146e2068d0bSJeff Roberson /* 2147e2068d0bSJeff Roberson * Initialize some paging parameters. 2148e2068d0bSJeff Roberson */ 2149e2068d0bSJeff Roberson if (vm_cnt.v_page_count < 2000) 2150e2068d0bSJeff Roberson vm_pageout_page_count = 8; 2151e2068d0bSJeff Roberson 2152e2068d0bSJeff Roberson freecount = 0; 2153e2068d0bSJeff Roberson for (i = 0; i < vm_ndomains; i++) { 2154e2068d0bSJeff Roberson struct vm_domain *vmd; 2155e2068d0bSJeff Roberson 2156e2068d0bSJeff Roberson vm_pageout_init_domain(i); 2157e2068d0bSJeff Roberson vmd = VM_DOMAIN(i); 2158e2068d0bSJeff Roberson vm_cnt.v_free_reserved += vmd->vmd_free_reserved; 2159e2068d0bSJeff Roberson vm_cnt.v_free_target += vmd->vmd_free_target; 2160e2068d0bSJeff Roberson vm_cnt.v_free_min += vmd->vmd_free_min; 2161e2068d0bSJeff Roberson vm_cnt.v_inactive_target += vmd->vmd_inactive_target; 2162e2068d0bSJeff Roberson vm_cnt.v_pageout_free_min += vmd->vmd_pageout_free_min; 2163e2068d0bSJeff Roberson vm_cnt.v_interrupt_free_min += vmd->vmd_interrupt_free_min; 2164e2068d0bSJeff Roberson vm_cnt.v_free_severe += vmd->vmd_free_severe; 2165e2068d0bSJeff Roberson freecount += vmd->vmd_free_count; 2166e2068d0bSJeff Roberson } 2167d9e23210SJeff Roberson 2168d9e23210SJeff Roberson /* 2169d9e23210SJeff Roberson * Set interval in seconds for active scan. We want to visit each 2170c9612b2dSJeff Roberson * page at least once every ten minutes. This is to prevent worst 2171c9612b2dSJeff Roberson * case paging behaviors with stale active LRU. 2172d9e23210SJeff Roberson */ 2173d9e23210SJeff Roberson if (vm_pageout_update_period == 0) 2174c9612b2dSJeff Roberson vm_pageout_update_period = 600; 2175d9e23210SJeff Roberson 217654a3a114SMark Johnston if (vm_page_max_user_wired == 0) 217754a3a114SMark Johnston vm_page_max_user_wired = freecount / 3; 21784d19f4adSSteven Hartland } 21794d19f4adSSteven Hartland 21804d19f4adSSteven Hartland /* 21814d19f4adSSteven Hartland * vm_pageout is the high level pageout daemon. 21824d19f4adSSteven Hartland */ 21834d19f4adSSteven Hartland static void 21844d19f4adSSteven Hartland vm_pageout(void) 21854d19f4adSSteven Hartland { 2186920239efSMark Johnston struct proc *p; 2187920239efSMark Johnston struct thread *td; 2188920239efSMark Johnston int error, first, i; 2189920239efSMark Johnston 2190920239efSMark Johnston p = curproc; 2191920239efSMark Johnston td = curthread; 2192df8bae1dSRodney W. Grimes 2193245139c6SKonstantin Belousov mtx_init(&vm_oom_ratelim_mtx, "vmoomr", NULL, MTX_DEF); 219424a1cce3SDavid Greenman swap_pager_swap_init(); 2195920239efSMark Johnston for (first = -1, i = 0; i < vm_ndomains; i++) { 219630c5525bSAndrew Gallatin if (VM_DOMAIN_EMPTY(i)) { 219730c5525bSAndrew Gallatin if (bootverbose) 219830c5525bSAndrew Gallatin printf("domain %d empty; skipping pageout\n", 219930c5525bSAndrew Gallatin i); 220030c5525bSAndrew Gallatin continue; 220130c5525bSAndrew Gallatin } 2202920239efSMark Johnston if (first == -1) 2203920239efSMark Johnston first = i; 2204920239efSMark Johnston else { 2205920239efSMark Johnston error = kthread_add(vm_pageout_worker, 2206920239efSMark Johnston (void *)(uintptr_t)i, p, NULL, 0, 0, "dom%d", i); 2207920239efSMark Johnston if (error != 0) 2208920239efSMark Johnston panic("starting pageout for domain %d: %d\n", 2209449c2e92SKonstantin Belousov i, error); 2210dc2efb27SJohn Dyson } 2211e2068d0bSJeff Roberson error = kthread_add(vm_pageout_laundry_worker, 2212920239efSMark Johnston (void *)(uintptr_t)i, p, NULL, 0, 0, "laundry: dom%d", i); 2213e2068d0bSJeff Roberson if (error != 0) 2214920239efSMark Johnston panic("starting laundry for domain %d: %d", i, error); 2215f919ebdeSDavid Greenman } 2216920239efSMark Johnston error = kthread_add(uma_reclaim_worker, NULL, p, NULL, 0, 0, "uma"); 221744ec2b63SKonstantin Belousov if (error != 0) 221844ec2b63SKonstantin Belousov panic("starting uma_reclaim helper, error %d\n", error); 2219920239efSMark Johnston 2220920239efSMark Johnston snprintf(td->td_name, sizeof(td->td_name), "dom%d", first); 2221920239efSMark Johnston vm_pageout_worker((void *)(uintptr_t)first); 2222df8bae1dSRodney W. Grimes } 222326f9a767SRodney W. Grimes 22246b4b77adSAlan Cox /* 2225280d15cdSMark Johnston * Perform an advisory wakeup of the page daemon. 22266b4b77adSAlan Cox */ 2227e0c5a895SJohn Dyson void 2228e2068d0bSJeff Roberson pagedaemon_wakeup(int domain) 2229e0c5a895SJohn Dyson { 2230e2068d0bSJeff Roberson struct vm_domain *vmd; 2231a1c0a785SAlan Cox 2232e2068d0bSJeff Roberson vmd = VM_DOMAIN(domain); 223330fbfddaSJeff Roberson vm_domain_pageout_assert_unlocked(vmd); 223430fbfddaSJeff Roberson if (curproc == pageproc) 223530fbfddaSJeff Roberson return; 2236280d15cdSMark Johnston 223730fbfddaSJeff Roberson if (atomic_fetchadd_int(&vmd->vmd_pageout_wanted, 1) == 0) { 223830fbfddaSJeff Roberson vm_domain_pageout_lock(vmd); 223930fbfddaSJeff Roberson atomic_store_int(&vmd->vmd_pageout_wanted, 1); 2240e2068d0bSJeff Roberson wakeup(&vmd->vmd_pageout_wanted); 224130fbfddaSJeff Roberson vm_domain_pageout_unlock(vmd); 2242e0c5a895SJohn Dyson } 2243e0c5a895SJohn Dyson } 2244