160727d8bSWarner Losh /*- 2796df753SPedro F. Giffuni * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3df57947fSPedro F. Giffuni * 426f9a767SRodney W. Grimes * Copyright (c) 1991 Regents of the University of California. 526f9a767SRodney W. Grimes * All rights reserved. 626f9a767SRodney W. Grimes * Copyright (c) 1994 John S. Dyson 726f9a767SRodney W. Grimes * All rights reserved. 826f9a767SRodney W. Grimes * Copyright (c) 1994 David Greenman 926f9a767SRodney W. Grimes * All rights reserved. 108dbca793STor Egge * Copyright (c) 2005 Yahoo! Technologies Norway AS 118dbca793STor Egge * All rights reserved. 12df8bae1dSRodney W. Grimes * 13df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 14df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 15df8bae1dSRodney W. Grimes * 16df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 17df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 18df8bae1dSRodney W. Grimes * are met: 19df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 20df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 21df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 22df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 23df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 24df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 255929bcfaSPhilippe Charnier * must display the following acknowledgement: 26df8bae1dSRodney W. Grimes * This product includes software developed by the University of 27df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 28df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 29df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 30df8bae1dSRodney W. Grimes * without specific prior written permission. 31df8bae1dSRodney W. Grimes * 32df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 33df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 34df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 35df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 36df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 37df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 38df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 39df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 40df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 41df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 42df8bae1dSRodney W. Grimes * SUCH DAMAGE. 43df8bae1dSRodney W. Grimes * 443c4dd356SDavid Greenman * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 45df8bae1dSRodney W. Grimes * 46df8bae1dSRodney W. Grimes * 47df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 48df8bae1dSRodney W. Grimes * All rights reserved. 49df8bae1dSRodney W. Grimes * 50df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 51df8bae1dSRodney W. Grimes * 52df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 53df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 54df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 55df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 56df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 57df8bae1dSRodney W. Grimes * 58df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 59df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 60df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 61df8bae1dSRodney W. Grimes * 62df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 63df8bae1dSRodney W. Grimes * 64df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 65df8bae1dSRodney W. Grimes * School of Computer Science 66df8bae1dSRodney W. Grimes * Carnegie Mellon University 67df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 68df8bae1dSRodney W. Grimes * 69df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 70df8bae1dSRodney W. Grimes * rights to redistribute these changes. 71df8bae1dSRodney W. Grimes */ 72df8bae1dSRodney W. Grimes 73df8bae1dSRodney W. Grimes /* 74df8bae1dSRodney W. Grimes * The proverbial page-out daemon. 75df8bae1dSRodney W. Grimes */ 76df8bae1dSRodney W. Grimes 77874651b1SDavid E. O'Brien #include <sys/cdefs.h> 78874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 79874651b1SDavid E. O'Brien 80faa5f8d8SAndrzej Bialecki #include "opt_vm.h" 817672ca05SMark Johnston 82df8bae1dSRodney W. Grimes #include <sys/param.h> 8326f9a767SRodney W. Grimes #include <sys/systm.h> 84b5e8ce9fSBruce Evans #include <sys/kernel.h> 85855a310fSJeff Roberson #include <sys/eventhandler.h> 86fb919e4dSMark Murray #include <sys/lock.h> 87fb919e4dSMark Murray #include <sys/mutex.h> 8826f9a767SRodney W. Grimes #include <sys/proc.h> 899c8b8baaSPeter Wemm #include <sys/kthread.h> 900384fff8SJason Evans #include <sys/ktr.h> 9197824da3SAlan Cox #include <sys/mount.h> 92099e7e95SEdward Tomasz Napierala #include <sys/racct.h> 9326f9a767SRodney W. Grimes #include <sys/resourcevar.h> 94b43179fbSJeff Roberson #include <sys/sched.h> 9514a0d74eSSteven Hartland #include <sys/sdt.h> 96d2fc5315SPoul-Henning Kamp #include <sys/signalvar.h> 97449c2e92SKonstantin Belousov #include <sys/smp.h> 98a6bf3a9eSRyan Stone #include <sys/time.h> 99f6b04d2bSDavid Greenman #include <sys/vnode.h> 100efeaf95aSDavid Greenman #include <sys/vmmeter.h> 10189f6b863SAttilio Rao #include <sys/rwlock.h> 1021005a129SJohn Baldwin #include <sys/sx.h> 10338efa82bSJohn Dyson #include <sys/sysctl.h> 104df8bae1dSRodney W. Grimes 105df8bae1dSRodney W. Grimes #include <vm/vm.h> 106efeaf95aSDavid Greenman #include <vm/vm_param.h> 107efeaf95aSDavid Greenman #include <vm/vm_object.h> 108df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 109efeaf95aSDavid Greenman #include <vm/vm_map.h> 110df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h> 11124a1cce3SDavid Greenman #include <vm/vm_pager.h> 112449c2e92SKonstantin Belousov #include <vm/vm_phys.h> 113e2068d0bSJeff Roberson #include <vm/vm_pagequeue.h> 11405f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h> 115efeaf95aSDavid Greenman #include <vm/vm_extern.h> 116670d17b5SJeff Roberson #include <vm/uma.h> 117df8bae1dSRodney W. Grimes 1182b14f991SJulian Elischer /* 1192b14f991SJulian Elischer * System initialization 1202b14f991SJulian Elischer */ 1212b14f991SJulian Elischer 1222b14f991SJulian Elischer /* the kernel process "vm_pageout"*/ 12311caded3SAlfred Perlstein static void vm_pageout(void); 1244d19f4adSSteven Hartland static void vm_pageout_init(void); 125ebcddc72SAlan Cox static int vm_pageout_clean(vm_page_t m, int *numpagedout); 12634d8b7eaSJeff Roberson static int vm_pageout_cluster(vm_page_t m); 12776386c7eSKonstantin Belousov static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage, 12876386c7eSKonstantin Belousov int starting_page_shortage); 12945ae1d91SAlan Cox 1304d19f4adSSteven Hartland SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init, 1314d19f4adSSteven Hartland NULL); 1324d19f4adSSteven Hartland 1332b14f991SJulian Elischer struct proc *pageproc; 1342b14f991SJulian Elischer 1352b14f991SJulian Elischer static struct kproc_desc page_kp = { 1362b14f991SJulian Elischer "pagedaemon", 1372b14f991SJulian Elischer vm_pageout, 1382b14f991SJulian Elischer &pageproc 1392b14f991SJulian Elischer }; 1404d19f4adSSteven Hartland SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start, 141237fdd78SRobert Watson &page_kp); 1422b14f991SJulian Elischer 14314a0d74eSSteven Hartland SDT_PROVIDER_DEFINE(vm); 14414a0d74eSSteven Hartland SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan); 14514a0d74eSSteven Hartland 146ebcddc72SAlan Cox /* Pagedaemon activity rates, in subdivisions of one second. */ 147ebcddc72SAlan Cox #define VM_LAUNDER_RATE 10 1485f8cd1c0SJeff Roberson #define VM_INACT_SCAN_RATE 10 1492b14f991SJulian Elischer 15076386c7eSKonstantin Belousov static int vm_pageout_oom_seq = 12; 151ebcddc72SAlan Cox 152d9e23210SJeff Roberson static int vm_pageout_update_period; 1534a365329SAndrey Zonov static int disable_swap_pageouts; 154c9612b2dSJeff Roberson static int lowmem_period = 10; 155b1fd102eSMark Johnston static int swapdev_enabled; 15670111b90SJohn Dyson 1578311a2b8SWill Andrews static int vm_panic_on_oom = 0; 1588311a2b8SWill Andrews 1598311a2b8SWill Andrews SYSCTL_INT(_vm, OID_AUTO, panic_on_oom, 1608311a2b8SWill Andrews CTLFLAG_RWTUN, &vm_panic_on_oom, 0, 1618311a2b8SWill Andrews "panic on out of memory instead of killing the largest process"); 1628311a2b8SWill Andrews 163d9e23210SJeff Roberson SYSCTL_INT(_vm, OID_AUTO, pageout_update_period, 164e0b2fc3aSMark Johnston CTLFLAG_RWTUN, &vm_pageout_update_period, 0, 165d9e23210SJeff Roberson "Maximum active LRU update period"); 16653636869SAndrey Zonov 167e0b2fc3aSMark Johnston SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RWTUN, &lowmem_period, 0, 168c9612b2dSJeff Roberson "Low memory callback period"); 169c9612b2dSJeff Roberson 170ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 171e0b2fc3aSMark Johnston CTLFLAG_RWTUN, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 17212ac6a1dSJohn Dyson 17323b59018SMatthew Dillon static int pageout_lock_miss; 17423b59018SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 17523b59018SMatthew Dillon CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 17623b59018SMatthew Dillon 17776386c7eSKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq, 178e0b2fc3aSMark Johnston CTLFLAG_RWTUN, &vm_pageout_oom_seq, 0, 17976386c7eSKonstantin Belousov "back-to-back calls to oom detector to start OOM"); 18076386c7eSKonstantin Belousov 181ebcddc72SAlan Cox static int act_scan_laundry_weight = 3; 182e0b2fc3aSMark Johnston SYSCTL_INT(_vm, OID_AUTO, act_scan_laundry_weight, CTLFLAG_RWTUN, 183ebcddc72SAlan Cox &act_scan_laundry_weight, 0, 184ebcddc72SAlan Cox "weight given to clean vs. dirty pages in active queue scans"); 185ebcddc72SAlan Cox 186ebcddc72SAlan Cox static u_int vm_background_launder_rate = 4096; 187e0b2fc3aSMark Johnston SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RWTUN, 188ebcddc72SAlan Cox &vm_background_launder_rate, 0, 189ebcddc72SAlan Cox "background laundering rate, in kilobytes per second"); 190ebcddc72SAlan Cox 191ebcddc72SAlan Cox static u_int vm_background_launder_max = 20 * 1024; 192e0b2fc3aSMark Johnston SYSCTL_UINT(_vm, OID_AUTO, background_launder_max, CTLFLAG_RWTUN, 193ebcddc72SAlan Cox &vm_background_launder_max, 0, "background laundering cap, in kilobytes"); 194ebcddc72SAlan Cox 195e2241590SAlan Cox int vm_pageout_page_count = 32; 196df8bae1dSRodney W. Grimes 197c3cb3e12SDavid Greenman int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 1985dfc2870SAlan Cox SYSCTL_INT(_vm, OID_AUTO, max_wired, 1995dfc2870SAlan Cox CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count"); 200df8bae1dSRodney W. Grimes 201ebcddc72SAlan Cox static u_int isqrt(u_int num); 202ebcddc72SAlan Cox static int vm_pageout_launder(struct vm_domain *vmd, int launder, 203ebcddc72SAlan Cox bool in_shortfall); 204ebcddc72SAlan Cox static void vm_pageout_laundry_worker(void *arg); 205cd41fc12SDavid Greenman 2065cd29d0fSMark Johnston struct scan_state { 2075cd29d0fSMark Johnston struct vm_batchqueue bq; 2088d220203SAlan Cox struct vm_pagequeue *pq; 2095cd29d0fSMark Johnston vm_page_t marker; 2105cd29d0fSMark Johnston int maxscan; 2115cd29d0fSMark Johnston int scanned; 2125cd29d0fSMark Johnston }; 2138dbca793STor Egge 2145cd29d0fSMark Johnston static void 2155cd29d0fSMark Johnston vm_pageout_init_scan(struct scan_state *ss, struct vm_pagequeue *pq, 2165cd29d0fSMark Johnston vm_page_t marker, vm_page_t after, int maxscan) 2175cd29d0fSMark Johnston { 2188dbca793STor Egge 2195cd29d0fSMark Johnston vm_pagequeue_assert_locked(pq); 2205cd29d0fSMark Johnston KASSERT((marker->aflags & PGA_ENQUEUED) == 0, 2215cd29d0fSMark Johnston ("marker %p already enqueued", marker)); 2225cd29d0fSMark Johnston 2235cd29d0fSMark Johnston if (after == NULL) 2245cd29d0fSMark Johnston TAILQ_INSERT_HEAD(&pq->pq_pl, marker, plinks.q); 2255cd29d0fSMark Johnston else 2265cd29d0fSMark Johnston TAILQ_INSERT_AFTER(&pq->pq_pl, after, marker, plinks.q); 2275cd29d0fSMark Johnston vm_page_aflag_set(marker, PGA_ENQUEUED); 2285cd29d0fSMark Johnston 2295cd29d0fSMark Johnston vm_batchqueue_init(&ss->bq); 2305cd29d0fSMark Johnston ss->pq = pq; 2315cd29d0fSMark Johnston ss->marker = marker; 2325cd29d0fSMark Johnston ss->maxscan = maxscan; 2335cd29d0fSMark Johnston ss->scanned = 0; 2348d220203SAlan Cox vm_pagequeue_unlock(pq); 2355cd29d0fSMark Johnston } 2368dbca793STor Egge 2375cd29d0fSMark Johnston static void 2385cd29d0fSMark Johnston vm_pageout_end_scan(struct scan_state *ss) 2395cd29d0fSMark Johnston { 2405cd29d0fSMark Johnston struct vm_pagequeue *pq; 2415cd29d0fSMark Johnston 2425cd29d0fSMark Johnston pq = ss->pq; 2435cd29d0fSMark Johnston vm_pagequeue_assert_locked(pq); 2445cd29d0fSMark Johnston KASSERT((ss->marker->aflags & PGA_ENQUEUED) != 0, 2455cd29d0fSMark Johnston ("marker %p not enqueued", ss->marker)); 2465cd29d0fSMark Johnston 2475cd29d0fSMark Johnston TAILQ_REMOVE(&pq->pq_pl, ss->marker, plinks.q); 2485cd29d0fSMark Johnston vm_page_aflag_clear(ss->marker, PGA_ENQUEUED); 249899fe184SMark Johnston pq->pq_pdpages += ss->scanned; 2508dbca793STor Egge } 2518dbca793STor Egge 2528dbca793STor Egge /* 2535cd29d0fSMark Johnston * Add a small number of queued pages to a batch queue for later processing 2545cd29d0fSMark Johnston * without the corresponding queue lock held. The caller must have enqueued a 2555cd29d0fSMark Johnston * marker page at the desired start point for the scan. Pages will be 2565cd29d0fSMark Johnston * physically dequeued if the caller so requests. Otherwise, the returned 2575cd29d0fSMark Johnston * batch may contain marker pages, and it is up to the caller to handle them. 2585cd29d0fSMark Johnston * 25936f8fe9bSMark Johnston * When processing the batch queue, vm_page_queue() must be used to 26036f8fe9bSMark Johnston * determine whether the page has been logically dequeued by another thread. 26136f8fe9bSMark Johnston * Once this check is performed, the page lock guarantees that the page will 26236f8fe9bSMark Johnston * not be disassociated from the queue. 2635cd29d0fSMark Johnston */ 2645cd29d0fSMark Johnston static __always_inline void 2655cd29d0fSMark Johnston vm_pageout_collect_batch(struct scan_state *ss, const bool dequeue) 2665cd29d0fSMark Johnston { 2678d220203SAlan Cox struct vm_pagequeue *pq; 2685cd29d0fSMark Johnston vm_page_t m, marker; 2698c616246SKonstantin Belousov 2705cd29d0fSMark Johnston marker = ss->marker; 2715cd29d0fSMark Johnston pq = ss->pq; 2728c616246SKonstantin Belousov 2735cd29d0fSMark Johnston KASSERT((marker->aflags & PGA_ENQUEUED) != 0, 2745cd29d0fSMark Johnston ("marker %p not enqueued", ss->marker)); 2758c616246SKonstantin Belousov 2768d220203SAlan Cox vm_pagequeue_lock(pq); 2775cd29d0fSMark Johnston for (m = TAILQ_NEXT(marker, plinks.q); m != NULL && 2785cd29d0fSMark Johnston ss->scanned < ss->maxscan && ss->bq.bq_cnt < VM_BATCHQUEUE_SIZE; 2795cd29d0fSMark Johnston m = TAILQ_NEXT(m, plinks.q), ss->scanned++) { 2805cd29d0fSMark Johnston if ((m->flags & PG_MARKER) == 0) { 2815cd29d0fSMark Johnston KASSERT((m->aflags & PGA_ENQUEUED) != 0, 2825cd29d0fSMark Johnston ("page %p not enqueued", m)); 2835cd29d0fSMark Johnston KASSERT((m->flags & PG_FICTITIOUS) == 0, 2845cd29d0fSMark Johnston ("Fictitious page %p cannot be in page queue", m)); 2855cd29d0fSMark Johnston KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2865cd29d0fSMark Johnston ("Unmanaged page %p cannot be in page queue", m)); 2875cd29d0fSMark Johnston } else if (dequeue) 2885cd29d0fSMark Johnston continue; 2898c616246SKonstantin Belousov 2905cd29d0fSMark Johnston (void)vm_batchqueue_insert(&ss->bq, m); 2915cd29d0fSMark Johnston if (dequeue) { 2925cd29d0fSMark Johnston TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 2935cd29d0fSMark Johnston vm_page_aflag_clear(m, PGA_ENQUEUED); 2945cd29d0fSMark Johnston } 2955cd29d0fSMark Johnston } 2965cd29d0fSMark Johnston TAILQ_REMOVE(&pq->pq_pl, marker, plinks.q); 2975cd29d0fSMark Johnston if (__predict_true(m != NULL)) 2985cd29d0fSMark Johnston TAILQ_INSERT_BEFORE(m, marker, plinks.q); 2995cd29d0fSMark Johnston else 3005cd29d0fSMark Johnston TAILQ_INSERT_TAIL(&pq->pq_pl, marker, plinks.q); 3015cd29d0fSMark Johnston if (dequeue) 3025cd29d0fSMark Johnston vm_pagequeue_cnt_add(pq, -ss->bq.bq_cnt); 3035cd29d0fSMark Johnston vm_pagequeue_unlock(pq); 3045cd29d0fSMark Johnston } 3055cd29d0fSMark Johnston 3065cd29d0fSMark Johnston /* Return the next page to be scanned, or NULL if the scan is complete. */ 3075cd29d0fSMark Johnston static __always_inline vm_page_t 3085cd29d0fSMark Johnston vm_pageout_next(struct scan_state *ss, const bool dequeue) 3095cd29d0fSMark Johnston { 3105cd29d0fSMark Johnston 3115cd29d0fSMark Johnston if (ss->bq.bq_cnt == 0) 3125cd29d0fSMark Johnston vm_pageout_collect_batch(ss, dequeue); 3135cd29d0fSMark Johnston return (vm_batchqueue_pop(&ss->bq)); 3148c616246SKonstantin Belousov } 3158c616246SKonstantin Belousov 3168c616246SKonstantin Belousov /* 317248fe642SAlan Cox * Scan for pages at adjacent offsets within the given page's object that are 318248fe642SAlan Cox * eligible for laundering, form a cluster of these pages and the given page, 319248fe642SAlan Cox * and launder that cluster. 32026f9a767SRodney W. Grimes */ 3213af76890SPoul-Henning Kamp static int 32234d8b7eaSJeff Roberson vm_pageout_cluster(vm_page_t m) 32324a1cce3SDavid Greenman { 32454d92145SMatthew Dillon vm_object_t object; 325248fe642SAlan Cox vm_page_t mc[2 * vm_pageout_page_count], p, pb, ps; 326248fe642SAlan Cox vm_pindex_t pindex; 327248fe642SAlan Cox int ib, is, page_base, pageout_count; 32826f9a767SRodney W. Grimes 329248fe642SAlan Cox vm_page_assert_locked(m); 33017f6a17bSAlan Cox object = m->object; 33189f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 332248fe642SAlan Cox pindex = m->pindex; 3330cddd8f0SMatthew Dillon 334c7aebda8SAttilio Rao vm_page_assert_unbusied(m); 3351d3a1bcfSMark Johnston KASSERT(!vm_page_held(m), ("page %p is held", m)); 336aed9aaaaSMark Johnston 337aed9aaaaSMark Johnston pmap_remove_write(m); 33817f6a17bSAlan Cox vm_page_unlock(m); 3390d94caffSDavid Greenman 34091b4f427SAlan Cox mc[vm_pageout_page_count] = pb = ps = m; 34126f9a767SRodney W. Grimes pageout_count = 1; 342f35329acSJohn Dyson page_base = vm_pageout_page_count; 34390ecac61SMatthew Dillon ib = 1; 34490ecac61SMatthew Dillon is = 1; 34590ecac61SMatthew Dillon 34624a1cce3SDavid Greenman /* 347248fe642SAlan Cox * We can cluster only if the page is not clean, busy, or held, and 348ebcddc72SAlan Cox * the page is in the laundry queue. 34990ecac61SMatthew Dillon * 35090ecac61SMatthew Dillon * During heavy mmap/modification loads the pageout 35190ecac61SMatthew Dillon * daemon can really fragment the underlying file 352248fe642SAlan Cox * due to flushing pages out of order and not trying to 353248fe642SAlan Cox * align the clusters (which leaves sporadic out-of-order 35490ecac61SMatthew Dillon * holes). To solve this problem we do the reverse scan 35590ecac61SMatthew Dillon * first and attempt to align our cluster, then do a 35690ecac61SMatthew Dillon * forward scan if room remains. 35724a1cce3SDavid Greenman */ 35890ecac61SMatthew Dillon more: 359248fe642SAlan Cox while (ib != 0 && pageout_count < vm_pageout_page_count) { 36090ecac61SMatthew Dillon if (ib > pindex) { 36190ecac61SMatthew Dillon ib = 0; 36290ecac61SMatthew Dillon break; 363f6b04d2bSDavid Greenman } 364c7aebda8SAttilio Rao if ((p = vm_page_prev(pb)) == NULL || vm_page_busied(p)) { 36590ecac61SMatthew Dillon ib = 0; 36690ecac61SMatthew Dillon break; 367f6b04d2bSDavid Greenman } 36824a1cce3SDavid Greenman vm_page_test_dirty(p); 3691b5c869dSMark Johnston if (p->dirty == 0) { 370eb5d3969SAlan Cox ib = 0; 371eb5d3969SAlan Cox break; 372eb5d3969SAlan Cox } 373eb5d3969SAlan Cox vm_page_lock(p); 3741b5c869dSMark Johnston if (vm_page_held(p) || !vm_page_in_laundry(p)) { 3752965a453SKip Macy vm_page_unlock(p); 37690ecac61SMatthew Dillon ib = 0; 37724a1cce3SDavid Greenman break; 378f6b04d2bSDavid Greenman } 379aed9aaaaSMark Johnston pmap_remove_write(p); 3802965a453SKip Macy vm_page_unlock(p); 38191b4f427SAlan Cox mc[--page_base] = pb = p; 38290ecac61SMatthew Dillon ++pageout_count; 38390ecac61SMatthew Dillon ++ib; 384248fe642SAlan Cox 38524a1cce3SDavid Greenman /* 386248fe642SAlan Cox * We are at an alignment boundary. Stop here, and switch 387248fe642SAlan Cox * directions. Do not clear ib. 38824a1cce3SDavid Greenman */ 38990ecac61SMatthew Dillon if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 39090ecac61SMatthew Dillon break; 39124a1cce3SDavid Greenman } 39290ecac61SMatthew Dillon while (pageout_count < vm_pageout_page_count && 39390ecac61SMatthew Dillon pindex + is < object->size) { 394c7aebda8SAttilio Rao if ((p = vm_page_next(ps)) == NULL || vm_page_busied(p)) 39590ecac61SMatthew Dillon break; 39624a1cce3SDavid Greenman vm_page_test_dirty(p); 3971b5c869dSMark Johnston if (p->dirty == 0) 398eb5d3969SAlan Cox break; 399eb5d3969SAlan Cox vm_page_lock(p); 4001b5c869dSMark Johnston if (vm_page_held(p) || !vm_page_in_laundry(p)) { 4012965a453SKip Macy vm_page_unlock(p); 40224a1cce3SDavid Greenman break; 40324a1cce3SDavid Greenman } 404aed9aaaaSMark Johnston pmap_remove_write(p); 4052965a453SKip Macy vm_page_unlock(p); 40691b4f427SAlan Cox mc[page_base + pageout_count] = ps = p; 40790ecac61SMatthew Dillon ++pageout_count; 40890ecac61SMatthew Dillon ++is; 40924a1cce3SDavid Greenman } 41090ecac61SMatthew Dillon 41190ecac61SMatthew Dillon /* 41290ecac61SMatthew Dillon * If we exhausted our forward scan, continue with the reverse scan 413248fe642SAlan Cox * when possible, even past an alignment boundary. This catches 414248fe642SAlan Cox * boundary conditions. 41590ecac61SMatthew Dillon */ 416248fe642SAlan Cox if (ib != 0 && pageout_count < vm_pageout_page_count) 41790ecac61SMatthew Dillon goto more; 418f6b04d2bSDavid Greenman 41999e6e193SMark Johnston return (vm_pageout_flush(&mc[page_base], pageout_count, 42099e6e193SMark Johnston VM_PAGER_PUT_NOREUSE, 0, NULL, NULL)); 421aef922f5SJohn Dyson } 422aef922f5SJohn Dyson 4231c7c3c6aSMatthew Dillon /* 4241c7c3c6aSMatthew Dillon * vm_pageout_flush() - launder the given pages 4251c7c3c6aSMatthew Dillon * 4261c7c3c6aSMatthew Dillon * The given pages are laundered. Note that we setup for the start of 4271c7c3c6aSMatthew Dillon * I/O ( i.e. busy the page ), mark it read-only, and bump the object 4281c7c3c6aSMatthew Dillon * reference count all in here rather then in the parent. If we want 4291c7c3c6aSMatthew Dillon * the parent to do more sophisticated things we may have to change 4301c7c3c6aSMatthew Dillon * the ordering. 4311e8a675cSKonstantin Belousov * 4321e8a675cSKonstantin Belousov * Returned runlen is the count of pages between mreq and first 4331e8a675cSKonstantin Belousov * page after mreq with status VM_PAGER_AGAIN. 434126d6082SKonstantin Belousov * *eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL 435126d6082SKonstantin Belousov * for any page in runlen set. 4361c7c3c6aSMatthew Dillon */ 437aef922f5SJohn Dyson int 438126d6082SKonstantin Belousov vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen, 439126d6082SKonstantin Belousov boolean_t *eio) 440aef922f5SJohn Dyson { 4412e3b314dSAlan Cox vm_object_t object = mc[0]->object; 442aef922f5SJohn Dyson int pageout_status[count]; 44395461b45SJohn Dyson int numpagedout = 0; 4441e8a675cSKonstantin Belousov int i, runlen; 445aef922f5SJohn Dyson 44689f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 4477bec141bSKip Macy 4481c7c3c6aSMatthew Dillon /* 449aed9aaaaSMark Johnston * Initiate I/O. Mark the pages busy and verify that they're valid 450aed9aaaaSMark Johnston * and read-only. 4511c7c3c6aSMatthew Dillon * 4521c7c3c6aSMatthew Dillon * We do not have to fixup the clean/dirty bits here... we can 4531c7c3c6aSMatthew Dillon * allow the pager to do it after the I/O completes. 45402fa91d3SMatthew Dillon * 45502fa91d3SMatthew Dillon * NOTE! mc[i]->dirty may be partial or fragmented due to an 45602fa91d3SMatthew Dillon * edge case with file fragments. 4571c7c3c6aSMatthew Dillon */ 4588f9110f6SJohn Dyson for (i = 0; i < count; i++) { 4597a935082SAlan Cox KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, 4607a935082SAlan Cox ("vm_pageout_flush: partially invalid page %p index %d/%d", 4617a935082SAlan Cox mc[i], i, count)); 462aed9aaaaSMark Johnston KASSERT((mc[i]->aflags & PGA_WRITEABLE) == 0, 463aed9aaaaSMark Johnston ("vm_pageout_flush: writeable page %p", mc[i])); 464c7aebda8SAttilio Rao vm_page_sbusy(mc[i]); 4652965a453SKip Macy } 466d474eaaaSDoug Rabson vm_object_pip_add(object, count); 467aef922f5SJohn Dyson 468d076fbeaSAlan Cox vm_pager_put_pages(object, mc, count, flags, pageout_status); 46926f9a767SRodney W. Grimes 4701e8a675cSKonstantin Belousov runlen = count - mreq; 471126d6082SKonstantin Belousov if (eio != NULL) 472126d6082SKonstantin Belousov *eio = FALSE; 473aef922f5SJohn Dyson for (i = 0; i < count; i++) { 474aef922f5SJohn Dyson vm_page_t mt = mc[i]; 47524a1cce3SDavid Greenman 4764cd45723SAlan Cox KASSERT(pageout_status[i] == VM_PAGER_PEND || 4776031c68dSAlan Cox !pmap_page_is_write_mapped(mt), 4789ea8d1a6SAlan Cox ("vm_pageout_flush: page %p is not write protected", mt)); 47926f9a767SRodney W. Grimes switch (pageout_status[i]) { 48026f9a767SRodney W. Grimes case VM_PAGER_OK: 481ebcddc72SAlan Cox vm_page_lock(mt); 482ebcddc72SAlan Cox if (vm_page_in_laundry(mt)) 483ebcddc72SAlan Cox vm_page_deactivate_noreuse(mt); 484ebcddc72SAlan Cox vm_page_unlock(mt); 485ebcddc72SAlan Cox /* FALLTHROUGH */ 48626f9a767SRodney W. Grimes case VM_PAGER_PEND: 48795461b45SJohn Dyson numpagedout++; 48826f9a767SRodney W. Grimes break; 48926f9a767SRodney W. Grimes case VM_PAGER_BAD: 49026f9a767SRodney W. Grimes /* 491ebcddc72SAlan Cox * The page is outside the object's range. We pretend 492ebcddc72SAlan Cox * that the page out worked and clean the page, so the 493ebcddc72SAlan Cox * changes will be lost if the page is reclaimed by 494ebcddc72SAlan Cox * the page daemon. 49526f9a767SRodney W. Grimes */ 49690ecac61SMatthew Dillon vm_page_undirty(mt); 497ebcddc72SAlan Cox vm_page_lock(mt); 498ebcddc72SAlan Cox if (vm_page_in_laundry(mt)) 499ebcddc72SAlan Cox vm_page_deactivate_noreuse(mt); 500ebcddc72SAlan Cox vm_page_unlock(mt); 50126f9a767SRodney W. Grimes break; 50226f9a767SRodney W. Grimes case VM_PAGER_ERROR: 50326f9a767SRodney W. Grimes case VM_PAGER_FAIL: 50426f9a767SRodney W. Grimes /* 505b1fd102eSMark Johnston * If the page couldn't be paged out to swap because the 506b1fd102eSMark Johnston * pager wasn't able to find space, place the page in 507b1fd102eSMark Johnston * the PQ_UNSWAPPABLE holding queue. This is an 508b1fd102eSMark Johnston * optimization that prevents the page daemon from 509b1fd102eSMark Johnston * wasting CPU cycles on pages that cannot be reclaimed 510b1fd102eSMark Johnston * becase no swap device is configured. 511b1fd102eSMark Johnston * 512b1fd102eSMark Johnston * Otherwise, reactivate the page so that it doesn't 513b1fd102eSMark Johnston * clog the laundry and inactive queues. (We will try 514b1fd102eSMark Johnston * paging it out again later.) 51526f9a767SRodney W. Grimes */ 5163c4a2440SAlan Cox vm_page_lock(mt); 517b1fd102eSMark Johnston if (object->type == OBJT_SWAP && 518b1fd102eSMark Johnston pageout_status[i] == VM_PAGER_FAIL) { 519b1fd102eSMark Johnston vm_page_unswappable(mt); 520b1fd102eSMark Johnston numpagedout++; 521b1fd102eSMark Johnston } else 52224a1cce3SDavid Greenman vm_page_activate(mt); 5233c4a2440SAlan Cox vm_page_unlock(mt); 524126d6082SKonstantin Belousov if (eio != NULL && i >= mreq && i - mreq < runlen) 525126d6082SKonstantin Belousov *eio = TRUE; 52626f9a767SRodney W. Grimes break; 52726f9a767SRodney W. Grimes case VM_PAGER_AGAIN: 5281e8a675cSKonstantin Belousov if (i >= mreq && i - mreq < runlen) 5291e8a675cSKonstantin Belousov runlen = i - mreq; 53026f9a767SRodney W. Grimes break; 53126f9a767SRodney W. Grimes } 53226f9a767SRodney W. Grimes 53326f9a767SRodney W. Grimes /* 5340d94caffSDavid Greenman * If the operation is still going, leave the page busy to 5350d94caffSDavid Greenman * block all other accesses. Also, leave the paging in 5360d94caffSDavid Greenman * progress indicator set so that we don't attempt an object 5370d94caffSDavid Greenman * collapse. 53826f9a767SRodney W. Grimes */ 53926f9a767SRodney W. Grimes if (pageout_status[i] != VM_PAGER_PEND) { 540f919ebdeSDavid Greenman vm_object_pip_wakeup(object); 541c7aebda8SAttilio Rao vm_page_sunbusy(mt); 5423c4a2440SAlan Cox } 5433c4a2440SAlan Cox } 5441e8a675cSKonstantin Belousov if (prunlen != NULL) 5451e8a675cSKonstantin Belousov *prunlen = runlen; 5463c4a2440SAlan Cox return (numpagedout); 54726f9a767SRodney W. Grimes } 54826f9a767SRodney W. Grimes 549b1fd102eSMark Johnston static void 550b1fd102eSMark Johnston vm_pageout_swapon(void *arg __unused, struct swdevt *sp __unused) 551b1fd102eSMark Johnston { 552b1fd102eSMark Johnston 553b1fd102eSMark Johnston atomic_store_rel_int(&swapdev_enabled, 1); 554b1fd102eSMark Johnston } 555b1fd102eSMark Johnston 556b1fd102eSMark Johnston static void 557b1fd102eSMark Johnston vm_pageout_swapoff(void *arg __unused, struct swdevt *sp __unused) 558b1fd102eSMark Johnston { 559b1fd102eSMark Johnston 560b1fd102eSMark Johnston if (swap_pager_nswapdev() == 1) 561b1fd102eSMark Johnston atomic_store_rel_int(&swapdev_enabled, 0); 562b1fd102eSMark Johnston } 563b1fd102eSMark Johnston 5641c7c3c6aSMatthew Dillon /* 56534d8b7eaSJeff Roberson * Attempt to acquire all of the necessary locks to launder a page and 56634d8b7eaSJeff Roberson * then call through the clustering layer to PUTPAGES. Wait a short 56734d8b7eaSJeff Roberson * time for a vnode lock. 56834d8b7eaSJeff Roberson * 56934d8b7eaSJeff Roberson * Requires the page and object lock on entry, releases both before return. 57034d8b7eaSJeff Roberson * Returns 0 on success and an errno otherwise. 57134d8b7eaSJeff Roberson */ 57234d8b7eaSJeff Roberson static int 573ebcddc72SAlan Cox vm_pageout_clean(vm_page_t m, int *numpagedout) 57434d8b7eaSJeff Roberson { 57534d8b7eaSJeff Roberson struct vnode *vp; 57634d8b7eaSJeff Roberson struct mount *mp; 57734d8b7eaSJeff Roberson vm_object_t object; 57834d8b7eaSJeff Roberson vm_pindex_t pindex; 57934d8b7eaSJeff Roberson int error, lockmode; 58034d8b7eaSJeff Roberson 58134d8b7eaSJeff Roberson vm_page_assert_locked(m); 58234d8b7eaSJeff Roberson object = m->object; 58334d8b7eaSJeff Roberson VM_OBJECT_ASSERT_WLOCKED(object); 58434d8b7eaSJeff Roberson error = 0; 58534d8b7eaSJeff Roberson vp = NULL; 58634d8b7eaSJeff Roberson mp = NULL; 58734d8b7eaSJeff Roberson 58834d8b7eaSJeff Roberson /* 58934d8b7eaSJeff Roberson * The object is already known NOT to be dead. It 59034d8b7eaSJeff Roberson * is possible for the vget() to block the whole 59134d8b7eaSJeff Roberson * pageout daemon, but the new low-memory handling 59234d8b7eaSJeff Roberson * code should prevent it. 59334d8b7eaSJeff Roberson * 59434d8b7eaSJeff Roberson * We can't wait forever for the vnode lock, we might 59534d8b7eaSJeff Roberson * deadlock due to a vn_read() getting stuck in 59634d8b7eaSJeff Roberson * vm_wait while holding this vnode. We skip the 59734d8b7eaSJeff Roberson * vnode if we can't get it in a reasonable amount 59834d8b7eaSJeff Roberson * of time. 59934d8b7eaSJeff Roberson */ 60034d8b7eaSJeff Roberson if (object->type == OBJT_VNODE) { 60134d8b7eaSJeff Roberson vm_page_unlock(m); 60234d8b7eaSJeff Roberson vp = object->handle; 60334d8b7eaSJeff Roberson if (vp->v_type == VREG && 60434d8b7eaSJeff Roberson vn_start_write(vp, &mp, V_NOWAIT) != 0) { 60534d8b7eaSJeff Roberson mp = NULL; 60634d8b7eaSJeff Roberson error = EDEADLK; 60734d8b7eaSJeff Roberson goto unlock_all; 60834d8b7eaSJeff Roberson } 60934d8b7eaSJeff Roberson KASSERT(mp != NULL, 61034d8b7eaSJeff Roberson ("vp %p with NULL v_mount", vp)); 61134d8b7eaSJeff Roberson vm_object_reference_locked(object); 61234d8b7eaSJeff Roberson pindex = m->pindex; 61334d8b7eaSJeff Roberson VM_OBJECT_WUNLOCK(object); 61434d8b7eaSJeff Roberson lockmode = MNT_SHARED_WRITES(vp->v_mount) ? 61534d8b7eaSJeff Roberson LK_SHARED : LK_EXCLUSIVE; 61634d8b7eaSJeff Roberson if (vget(vp, lockmode | LK_TIMELOCK, curthread)) { 61734d8b7eaSJeff Roberson vp = NULL; 61834d8b7eaSJeff Roberson error = EDEADLK; 61934d8b7eaSJeff Roberson goto unlock_mp; 62034d8b7eaSJeff Roberson } 62134d8b7eaSJeff Roberson VM_OBJECT_WLOCK(object); 62257cd81a3SMark Johnston 62357cd81a3SMark Johnston /* 62457cd81a3SMark Johnston * Ensure that the object and vnode were not disassociated 62557cd81a3SMark Johnston * while locks were dropped. 62657cd81a3SMark Johnston */ 62757cd81a3SMark Johnston if (vp->v_object != object) { 62857cd81a3SMark Johnston error = ENOENT; 62957cd81a3SMark Johnston goto unlock_all; 63057cd81a3SMark Johnston } 63134d8b7eaSJeff Roberson vm_page_lock(m); 63257cd81a3SMark Johnston 63334d8b7eaSJeff Roberson /* 63434d8b7eaSJeff Roberson * While the object and page were unlocked, the page 63534d8b7eaSJeff Roberson * may have been: 63634d8b7eaSJeff Roberson * (1) moved to a different queue, 63734d8b7eaSJeff Roberson * (2) reallocated to a different object, 63834d8b7eaSJeff Roberson * (3) reallocated to a different offset, or 63934d8b7eaSJeff Roberson * (4) cleaned. 64034d8b7eaSJeff Roberson */ 641ebcddc72SAlan Cox if (!vm_page_in_laundry(m) || m->object != object || 64234d8b7eaSJeff Roberson m->pindex != pindex || m->dirty == 0) { 64334d8b7eaSJeff Roberson vm_page_unlock(m); 64434d8b7eaSJeff Roberson error = ENXIO; 64534d8b7eaSJeff Roberson goto unlock_all; 64634d8b7eaSJeff Roberson } 64734d8b7eaSJeff Roberson 64834d8b7eaSJeff Roberson /* 6491d3a1bcfSMark Johnston * The page may have been busied or referenced while the object 65034d8b7eaSJeff Roberson * and page locks were released. 65134d8b7eaSJeff Roberson */ 6521d3a1bcfSMark Johnston if (vm_page_busied(m) || vm_page_held(m)) { 65334d8b7eaSJeff Roberson vm_page_unlock(m); 65434d8b7eaSJeff Roberson error = EBUSY; 65534d8b7eaSJeff Roberson goto unlock_all; 65634d8b7eaSJeff Roberson } 65734d8b7eaSJeff Roberson } 65834d8b7eaSJeff Roberson 65934d8b7eaSJeff Roberson /* 66034d8b7eaSJeff Roberson * If a page is dirty, then it is either being washed 66134d8b7eaSJeff Roberson * (but not yet cleaned) or it is still in the 66234d8b7eaSJeff Roberson * laundry. If it is still in the laundry, then we 66334d8b7eaSJeff Roberson * start the cleaning operation. 66434d8b7eaSJeff Roberson */ 665ebcddc72SAlan Cox if ((*numpagedout = vm_pageout_cluster(m)) == 0) 66634d8b7eaSJeff Roberson error = EIO; 66734d8b7eaSJeff Roberson 66834d8b7eaSJeff Roberson unlock_all: 66934d8b7eaSJeff Roberson VM_OBJECT_WUNLOCK(object); 67034d8b7eaSJeff Roberson 67134d8b7eaSJeff Roberson unlock_mp: 67234d8b7eaSJeff Roberson vm_page_lock_assert(m, MA_NOTOWNED); 67334d8b7eaSJeff Roberson if (mp != NULL) { 67434d8b7eaSJeff Roberson if (vp != NULL) 67534d8b7eaSJeff Roberson vput(vp); 67634d8b7eaSJeff Roberson vm_object_deallocate(object); 67734d8b7eaSJeff Roberson vn_finished_write(mp); 67834d8b7eaSJeff Roberson } 67934d8b7eaSJeff Roberson 68034d8b7eaSJeff Roberson return (error); 68134d8b7eaSJeff Roberson } 68234d8b7eaSJeff Roberson 68334d8b7eaSJeff Roberson /* 684ebcddc72SAlan Cox * Attempt to launder the specified number of pages. 685ebcddc72SAlan Cox * 686ebcddc72SAlan Cox * Returns the number of pages successfully laundered. 687ebcddc72SAlan Cox */ 688ebcddc72SAlan Cox static int 689ebcddc72SAlan Cox vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall) 690ebcddc72SAlan Cox { 6915cd29d0fSMark Johnston struct scan_state ss; 692ebcddc72SAlan Cox struct vm_pagequeue *pq; 6935cd29d0fSMark Johnston struct mtx *mtx; 694ebcddc72SAlan Cox vm_object_t object; 6955cd29d0fSMark Johnston vm_page_t m, marker; 6965cd29d0fSMark Johnston int act_delta, error, numpagedout, queue, starting_target; 697ebcddc72SAlan Cox int vnodes_skipped; 698*60256604SMark Johnston bool pageout_ok; 699ebcddc72SAlan Cox 7005cd29d0fSMark Johnston mtx = NULL; 7015cd29d0fSMark Johnston object = NULL; 702ebcddc72SAlan Cox starting_target = launder; 703ebcddc72SAlan Cox vnodes_skipped = 0; 704ebcddc72SAlan Cox 705ebcddc72SAlan Cox /* 706b1fd102eSMark Johnston * Scan the laundry queues for pages eligible to be laundered. We stop 707ebcddc72SAlan Cox * once the target number of dirty pages have been laundered, or once 708ebcddc72SAlan Cox * we've reached the end of the queue. A single iteration of this loop 709ebcddc72SAlan Cox * may cause more than one page to be laundered because of clustering. 710ebcddc72SAlan Cox * 711b1fd102eSMark Johnston * As an optimization, we avoid laundering from PQ_UNSWAPPABLE when no 712b1fd102eSMark Johnston * swap devices are configured. 713ebcddc72SAlan Cox */ 714b1fd102eSMark Johnston if (atomic_load_acq_int(&swapdev_enabled)) 71564b38930SMark Johnston queue = PQ_UNSWAPPABLE; 716b1fd102eSMark Johnston else 71764b38930SMark Johnston queue = PQ_LAUNDRY; 718ebcddc72SAlan Cox 719b1fd102eSMark Johnston scan: 72064b38930SMark Johnston marker = &vmd->vmd_markers[queue]; 7215cd29d0fSMark Johnston pq = &vmd->vmd_pagequeues[queue]; 722ebcddc72SAlan Cox vm_pagequeue_lock(pq); 7235cd29d0fSMark Johnston vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt); 7245cd29d0fSMark Johnston while (launder > 0 && (m = vm_pageout_next(&ss, false)) != NULL) { 7255cd29d0fSMark Johnston if (__predict_false((m->flags & PG_MARKER) != 0)) 726ebcddc72SAlan Cox continue; 7275cd29d0fSMark Johnston 7285cd29d0fSMark Johnston vm_page_change_lock(m, &mtx); 7295cd29d0fSMark Johnston 7305cd29d0fSMark Johnston recheck: 7315cd29d0fSMark Johnston /* 7325cd29d0fSMark Johnston * The page may have been disassociated from the queue 7335cd29d0fSMark Johnston * while locks were dropped. 7345cd29d0fSMark Johnston */ 73536f8fe9bSMark Johnston if (vm_page_queue(m) != queue) 736ebcddc72SAlan Cox continue; 7375cd29d0fSMark Johnston 7385cd29d0fSMark Johnston /* 7395cd29d0fSMark Johnston * A requeue was requested, so this page gets a second 7405cd29d0fSMark Johnston * chance. 7415cd29d0fSMark Johnston */ 7425cd29d0fSMark Johnston if ((m->aflags & PGA_REQUEUE) != 0) { 7435cd29d0fSMark Johnston vm_page_requeue(m); 744ebcddc72SAlan Cox continue; 745ebcddc72SAlan Cox } 746ebcddc72SAlan Cox 747ebcddc72SAlan Cox /* 7485cd29d0fSMark Johnston * Held pages are essentially stuck in the queue. 7495cd29d0fSMark Johnston * 7505cd29d0fSMark Johnston * Wired pages may not be freed. Complete their removal 7515cd29d0fSMark Johnston * from the queue now to avoid needless revisits during 7525cd29d0fSMark Johnston * future scans. 753ebcddc72SAlan Cox */ 7545cd29d0fSMark Johnston if (m->hold_count != 0) 7555cd29d0fSMark Johnston continue; 7565cd29d0fSMark Johnston if (m->wire_count != 0) { 7575cd29d0fSMark Johnston vm_page_dequeue_deferred(m); 7585cd29d0fSMark Johnston continue; 7595cd29d0fSMark Johnston } 7605cd29d0fSMark Johnston 7615cd29d0fSMark Johnston if (object != m->object) { 762*60256604SMark Johnston if (object != NULL) 7635cd29d0fSMark Johnston VM_OBJECT_WUNLOCK(object); 7645cd29d0fSMark Johnston object = m->object; 7655cd29d0fSMark Johnston if (!VM_OBJECT_TRYWLOCK(object)) { 7665cd29d0fSMark Johnston mtx_unlock(mtx); 7675cd29d0fSMark Johnston /* Depends on type-stability. */ 7685cd29d0fSMark Johnston VM_OBJECT_WLOCK(object); 7695cd29d0fSMark Johnston mtx_lock(mtx); 7705cd29d0fSMark Johnston goto recheck; 771*60256604SMark Johnston } 7725cd29d0fSMark Johnston } 7735cd29d0fSMark Johnston 7745cd29d0fSMark Johnston if (vm_page_busied(m)) 7755cd29d0fSMark Johnston continue; 776ebcddc72SAlan Cox 777ebcddc72SAlan Cox /* 778ebcddc72SAlan Cox * Invalid pages can be easily freed. They cannot be 779ebcddc72SAlan Cox * mapped; vm_page_free() asserts this. 780ebcddc72SAlan Cox */ 781ebcddc72SAlan Cox if (m->valid == 0) 782ebcddc72SAlan Cox goto free_page; 783ebcddc72SAlan Cox 784ebcddc72SAlan Cox /* 785ebcddc72SAlan Cox * If the page has been referenced and the object is not dead, 786ebcddc72SAlan Cox * reactivate or requeue the page depending on whether the 787ebcddc72SAlan Cox * object is mapped. 788d7aeb429SAlan Cox * 789d7aeb429SAlan Cox * Test PGA_REFERENCED after calling pmap_ts_referenced() so 790d7aeb429SAlan Cox * that a reference from a concurrently destroyed mapping is 791d7aeb429SAlan Cox * observed here and now. 792ebcddc72SAlan Cox */ 793ebcddc72SAlan Cox if (object->ref_count != 0) 794d7aeb429SAlan Cox act_delta = pmap_ts_referenced(m); 795ebcddc72SAlan Cox else { 796ebcddc72SAlan Cox KASSERT(!pmap_page_is_mapped(m), 797ebcddc72SAlan Cox ("page %p is mapped", m)); 798d7aeb429SAlan Cox act_delta = 0; 799d7aeb429SAlan Cox } 800d7aeb429SAlan Cox if ((m->aflags & PGA_REFERENCED) != 0) { 801d7aeb429SAlan Cox vm_page_aflag_clear(m, PGA_REFERENCED); 802d7aeb429SAlan Cox act_delta++; 803ebcddc72SAlan Cox } 804ebcddc72SAlan Cox if (act_delta != 0) { 805ebcddc72SAlan Cox if (object->ref_count != 0) { 80683c9dea1SGleb Smirnoff VM_CNT_INC(v_reactivated); 807ebcddc72SAlan Cox vm_page_activate(m); 808ebcddc72SAlan Cox 809ebcddc72SAlan Cox /* 810ebcddc72SAlan Cox * Increase the activation count if the page 811ebcddc72SAlan Cox * was referenced while in the laundry queue. 812ebcddc72SAlan Cox * This makes it less likely that the page will 813ebcddc72SAlan Cox * be returned prematurely to the inactive 814ebcddc72SAlan Cox * queue. 815ebcddc72SAlan Cox */ 816ebcddc72SAlan Cox m->act_count += act_delta + ACT_ADVANCE; 817ebcddc72SAlan Cox 818ebcddc72SAlan Cox /* 819ebcddc72SAlan Cox * If this was a background laundering, count 820ebcddc72SAlan Cox * activated pages towards our target. The 821ebcddc72SAlan Cox * purpose of background laundering is to ensure 822ebcddc72SAlan Cox * that pages are eventually cycled through the 823ebcddc72SAlan Cox * laundry queue, and an activation is a valid 824ebcddc72SAlan Cox * way out. 825ebcddc72SAlan Cox */ 826ebcddc72SAlan Cox if (!in_shortfall) 827ebcddc72SAlan Cox launder--; 8285cd29d0fSMark Johnston continue; 8295cd29d0fSMark Johnston } else if ((object->flags & OBJ_DEAD) == 0) { 8305cd29d0fSMark Johnston vm_page_requeue(m); 8315cd29d0fSMark Johnston continue; 8325cd29d0fSMark Johnston } 833ebcddc72SAlan Cox } 834ebcddc72SAlan Cox 835ebcddc72SAlan Cox /* 836ebcddc72SAlan Cox * If the page appears to be clean at the machine-independent 837ebcddc72SAlan Cox * layer, then remove all of its mappings from the pmap in 838ebcddc72SAlan Cox * anticipation of freeing it. If, however, any of the page's 839ebcddc72SAlan Cox * mappings allow write access, then the page may still be 840ebcddc72SAlan Cox * modified until the last of those mappings are removed. 841ebcddc72SAlan Cox */ 842ebcddc72SAlan Cox if (object->ref_count != 0) { 843ebcddc72SAlan Cox vm_page_test_dirty(m); 844ebcddc72SAlan Cox if (m->dirty == 0) 845ebcddc72SAlan Cox pmap_remove_all(m); 846ebcddc72SAlan Cox } 847ebcddc72SAlan Cox 848ebcddc72SAlan Cox /* 849ebcddc72SAlan Cox * Clean pages are freed, and dirty pages are paged out unless 850ebcddc72SAlan Cox * they belong to a dead object. Requeueing dirty pages from 851ebcddc72SAlan Cox * dead objects is pointless, as they are being paged out and 852ebcddc72SAlan Cox * freed by the thread that destroyed the object. 853ebcddc72SAlan Cox */ 854ebcddc72SAlan Cox if (m->dirty == 0) { 855ebcddc72SAlan Cox free_page: 856ebcddc72SAlan Cox vm_page_free(m); 85783c9dea1SGleb Smirnoff VM_CNT_INC(v_dfree); 858ebcddc72SAlan Cox } else if ((object->flags & OBJ_DEAD) == 0) { 859ebcddc72SAlan Cox if (object->type != OBJT_SWAP && 860ebcddc72SAlan Cox object->type != OBJT_DEFAULT) 861ebcddc72SAlan Cox pageout_ok = true; 862ebcddc72SAlan Cox else if (disable_swap_pageouts) 863ebcddc72SAlan Cox pageout_ok = false; 864ebcddc72SAlan Cox else 865ebcddc72SAlan Cox pageout_ok = true; 866ebcddc72SAlan Cox if (!pageout_ok) { 8675cd29d0fSMark Johnston vm_page_requeue(m); 8685cd29d0fSMark Johnston continue; 869ebcddc72SAlan Cox } 870ebcddc72SAlan Cox 871ebcddc72SAlan Cox /* 872ebcddc72SAlan Cox * Form a cluster with adjacent, dirty pages from the 873ebcddc72SAlan Cox * same object, and page out that entire cluster. 874ebcddc72SAlan Cox * 875ebcddc72SAlan Cox * The adjacent, dirty pages must also be in the 876ebcddc72SAlan Cox * laundry. However, their mappings are not checked 877ebcddc72SAlan Cox * for new references. Consequently, a recently 878ebcddc72SAlan Cox * referenced page may be paged out. However, that 879ebcddc72SAlan Cox * page will not be prematurely reclaimed. After page 880ebcddc72SAlan Cox * out, the page will be placed in the inactive queue, 881ebcddc72SAlan Cox * where any new references will be detected and the 882ebcddc72SAlan Cox * page reactivated. 883ebcddc72SAlan Cox */ 884ebcddc72SAlan Cox error = vm_pageout_clean(m, &numpagedout); 885ebcddc72SAlan Cox if (error == 0) { 886ebcddc72SAlan Cox launder -= numpagedout; 8875cd29d0fSMark Johnston ss.scanned += numpagedout; 888ebcddc72SAlan Cox } else if (error == EDEADLK) { 889ebcddc72SAlan Cox pageout_lock_miss++; 890ebcddc72SAlan Cox vnodes_skipped++; 891ebcddc72SAlan Cox } 8925cd29d0fSMark Johnston mtx = NULL; 893*60256604SMark Johnston object = NULL; 894ebcddc72SAlan Cox } 8955cd29d0fSMark Johnston } 896*60256604SMark Johnston if (mtx != NULL) 8975cd29d0fSMark Johnston mtx_unlock(mtx); 898*60256604SMark Johnston if (object != NULL) 899ebcddc72SAlan Cox VM_OBJECT_WUNLOCK(object); 900ebcddc72SAlan Cox vm_pagequeue_lock(pq); 9015cd29d0fSMark Johnston vm_pageout_end_scan(&ss); 902ebcddc72SAlan Cox vm_pagequeue_unlock(pq); 903ebcddc72SAlan Cox 90464b38930SMark Johnston if (launder > 0 && queue == PQ_UNSWAPPABLE) { 90564b38930SMark Johnston queue = PQ_LAUNDRY; 906b1fd102eSMark Johnston goto scan; 907b1fd102eSMark Johnston } 908b1fd102eSMark Johnston 909ebcddc72SAlan Cox /* 910ebcddc72SAlan Cox * Wakeup the sync daemon if we skipped a vnode in a writeable object 911ebcddc72SAlan Cox * and we didn't launder enough pages. 912ebcddc72SAlan Cox */ 913ebcddc72SAlan Cox if (vnodes_skipped > 0 && launder > 0) 914ebcddc72SAlan Cox (void)speedup_syncer(); 915ebcddc72SAlan Cox 916ebcddc72SAlan Cox return (starting_target - launder); 917ebcddc72SAlan Cox } 918ebcddc72SAlan Cox 919ebcddc72SAlan Cox /* 920ebcddc72SAlan Cox * Compute the integer square root. 921ebcddc72SAlan Cox */ 922ebcddc72SAlan Cox static u_int 923ebcddc72SAlan Cox isqrt(u_int num) 924ebcddc72SAlan Cox { 925ebcddc72SAlan Cox u_int bit, root, tmp; 926ebcddc72SAlan Cox 927ebcddc72SAlan Cox bit = 1u << ((NBBY * sizeof(u_int)) - 2); 928ebcddc72SAlan Cox while (bit > num) 929ebcddc72SAlan Cox bit >>= 2; 930ebcddc72SAlan Cox root = 0; 931ebcddc72SAlan Cox while (bit != 0) { 932ebcddc72SAlan Cox tmp = root + bit; 933ebcddc72SAlan Cox root >>= 1; 934ebcddc72SAlan Cox if (num >= tmp) { 935ebcddc72SAlan Cox num -= tmp; 936ebcddc72SAlan Cox root += bit; 937ebcddc72SAlan Cox } 938ebcddc72SAlan Cox bit >>= 2; 939ebcddc72SAlan Cox } 940ebcddc72SAlan Cox return (root); 941ebcddc72SAlan Cox } 942ebcddc72SAlan Cox 943ebcddc72SAlan Cox /* 944ebcddc72SAlan Cox * Perform the work of the laundry thread: periodically wake up and determine 945ebcddc72SAlan Cox * whether any pages need to be laundered. If so, determine the number of pages 946ebcddc72SAlan Cox * that need to be laundered, and launder them. 947ebcddc72SAlan Cox */ 948ebcddc72SAlan Cox static void 949ebcddc72SAlan Cox vm_pageout_laundry_worker(void *arg) 950ebcddc72SAlan Cox { 951e2068d0bSJeff Roberson struct vm_domain *vmd; 952ebcddc72SAlan Cox struct vm_pagequeue *pq; 95360684862SMark Johnston uint64_t nclean, ndirty, nfreed; 954e2068d0bSJeff Roberson int domain, last_target, launder, shortfall, shortfall_cycle, target; 955ebcddc72SAlan Cox bool in_shortfall; 956ebcddc72SAlan Cox 957e2068d0bSJeff Roberson domain = (uintptr_t)arg; 958e2068d0bSJeff Roberson vmd = VM_DOMAIN(domain); 959e2068d0bSJeff Roberson pq = &vmd->vmd_pagequeues[PQ_LAUNDRY]; 960e2068d0bSJeff Roberson KASSERT(vmd->vmd_segs != 0, ("domain without segments")); 961ebcddc72SAlan Cox 962ebcddc72SAlan Cox shortfall = 0; 963ebcddc72SAlan Cox in_shortfall = false; 964ebcddc72SAlan Cox shortfall_cycle = 0; 9658002c3a4SMark Johnston last_target = target = 0; 96660684862SMark Johnston nfreed = 0; 967ebcddc72SAlan Cox 968ebcddc72SAlan Cox /* 969b1fd102eSMark Johnston * Calls to these handlers are serialized by the swap syscall lock. 970b1fd102eSMark Johnston */ 971e2068d0bSJeff Roberson (void)EVENTHANDLER_REGISTER(swapon, vm_pageout_swapon, vmd, 972b1fd102eSMark Johnston EVENTHANDLER_PRI_ANY); 973e2068d0bSJeff Roberson (void)EVENTHANDLER_REGISTER(swapoff, vm_pageout_swapoff, vmd, 974b1fd102eSMark Johnston EVENTHANDLER_PRI_ANY); 975b1fd102eSMark Johnston 976b1fd102eSMark Johnston /* 977ebcddc72SAlan Cox * The pageout laundry worker is never done, so loop forever. 978ebcddc72SAlan Cox */ 979ebcddc72SAlan Cox for (;;) { 980ebcddc72SAlan Cox KASSERT(target >= 0, ("negative target %d", target)); 981ebcddc72SAlan Cox KASSERT(shortfall_cycle >= 0, 982ebcddc72SAlan Cox ("negative cycle %d", shortfall_cycle)); 983ebcddc72SAlan Cox launder = 0; 984ebcddc72SAlan Cox 985ebcddc72SAlan Cox /* 986ebcddc72SAlan Cox * First determine whether we need to launder pages to meet a 987ebcddc72SAlan Cox * shortage of free pages. 988ebcddc72SAlan Cox */ 989ebcddc72SAlan Cox if (shortfall > 0) { 990ebcddc72SAlan Cox in_shortfall = true; 991ebcddc72SAlan Cox shortfall_cycle = VM_LAUNDER_RATE / VM_INACT_SCAN_RATE; 992ebcddc72SAlan Cox target = shortfall; 993ebcddc72SAlan Cox } else if (!in_shortfall) 994ebcddc72SAlan Cox goto trybackground; 995e2068d0bSJeff Roberson else if (shortfall_cycle == 0 || vm_laundry_target(vmd) <= 0) { 996ebcddc72SAlan Cox /* 997ebcddc72SAlan Cox * We recently entered shortfall and began laundering 998ebcddc72SAlan Cox * pages. If we have completed that laundering run 999ebcddc72SAlan Cox * (and we are no longer in shortfall) or we have met 1000ebcddc72SAlan Cox * our laundry target through other activity, then we 1001ebcddc72SAlan Cox * can stop laundering pages. 1002ebcddc72SAlan Cox */ 1003ebcddc72SAlan Cox in_shortfall = false; 1004ebcddc72SAlan Cox target = 0; 1005ebcddc72SAlan Cox goto trybackground; 1006ebcddc72SAlan Cox } 1007ebcddc72SAlan Cox launder = target / shortfall_cycle--; 1008ebcddc72SAlan Cox goto dolaundry; 1009ebcddc72SAlan Cox 1010ebcddc72SAlan Cox /* 1011ebcddc72SAlan Cox * There's no immediate need to launder any pages; see if we 1012ebcddc72SAlan Cox * meet the conditions to perform background laundering: 1013ebcddc72SAlan Cox * 1014ebcddc72SAlan Cox * 1. The ratio of dirty to clean inactive pages exceeds the 101560684862SMark Johnston * background laundering threshold, or 1016ebcddc72SAlan Cox * 2. we haven't yet reached the target of the current 1017ebcddc72SAlan Cox * background laundering run. 1018ebcddc72SAlan Cox * 1019ebcddc72SAlan Cox * The background laundering threshold is not a constant. 1020ebcddc72SAlan Cox * Instead, it is a slowly growing function of the number of 102160684862SMark Johnston * clean pages freed by the page daemon since the last 102260684862SMark Johnston * background laundering. Thus, as the ratio of dirty to 102360684862SMark Johnston * clean inactive pages grows, the amount of memory pressure 1024c098768eSMark Johnston * required to trigger laundering decreases. We ensure 1025c098768eSMark Johnston * that the threshold is non-zero after an inactive queue 1026c098768eSMark Johnston * scan, even if that scan failed to free a single clean page. 1027ebcddc72SAlan Cox */ 1028ebcddc72SAlan Cox trybackground: 1029e2068d0bSJeff Roberson nclean = vmd->vmd_free_count + 1030e2068d0bSJeff Roberson vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt; 1031e2068d0bSJeff Roberson ndirty = vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt; 1032c098768eSMark Johnston if (target == 0 && ndirty * isqrt(howmany(nfreed + 1, 1033c098768eSMark Johnston vmd->vmd_free_target - vmd->vmd_free_min)) >= nclean) { 1034e2068d0bSJeff Roberson target = vmd->vmd_background_launder_target; 1035ebcddc72SAlan Cox } 1036ebcddc72SAlan Cox 1037ebcddc72SAlan Cox /* 1038ebcddc72SAlan Cox * We have a non-zero background laundering target. If we've 1039ebcddc72SAlan Cox * laundered up to our maximum without observing a page daemon 1040cb35676eSMark Johnston * request, just stop. This is a safety belt that ensures we 1041ebcddc72SAlan Cox * don't launder an excessive amount if memory pressure is low 1042ebcddc72SAlan Cox * and the ratio of dirty to clean pages is large. Otherwise, 1043ebcddc72SAlan Cox * proceed at the background laundering rate. 1044ebcddc72SAlan Cox */ 1045ebcddc72SAlan Cox if (target > 0) { 104660684862SMark Johnston if (nfreed > 0) { 104760684862SMark Johnston nfreed = 0; 1048ebcddc72SAlan Cox last_target = target; 1049ebcddc72SAlan Cox } else if (last_target - target >= 1050ebcddc72SAlan Cox vm_background_launder_max * PAGE_SIZE / 1024) { 1051ebcddc72SAlan Cox target = 0; 1052ebcddc72SAlan Cox } 1053ebcddc72SAlan Cox launder = vm_background_launder_rate * PAGE_SIZE / 1024; 1054ebcddc72SAlan Cox launder /= VM_LAUNDER_RATE; 1055ebcddc72SAlan Cox if (launder > target) 1056ebcddc72SAlan Cox launder = target; 1057ebcddc72SAlan Cox } 1058ebcddc72SAlan Cox 1059ebcddc72SAlan Cox dolaundry: 1060ebcddc72SAlan Cox if (launder > 0) { 1061ebcddc72SAlan Cox /* 1062ebcddc72SAlan Cox * Because of I/O clustering, the number of laundered 1063ebcddc72SAlan Cox * pages could exceed "target" by the maximum size of 1064ebcddc72SAlan Cox * a cluster minus one. 1065ebcddc72SAlan Cox */ 1066e2068d0bSJeff Roberson target -= min(vm_pageout_launder(vmd, launder, 1067ebcddc72SAlan Cox in_shortfall), target); 1068ebcddc72SAlan Cox pause("laundp", hz / VM_LAUNDER_RATE); 1069ebcddc72SAlan Cox } 1070ebcddc72SAlan Cox 1071ebcddc72SAlan Cox /* 1072ebcddc72SAlan Cox * If we're not currently laundering pages and the page daemon 1073ebcddc72SAlan Cox * hasn't posted a new request, sleep until the page daemon 1074ebcddc72SAlan Cox * kicks us. 1075ebcddc72SAlan Cox */ 1076ebcddc72SAlan Cox vm_pagequeue_lock(pq); 1077e2068d0bSJeff Roberson if (target == 0 && vmd->vmd_laundry_request == VM_LAUNDRY_IDLE) 1078e2068d0bSJeff Roberson (void)mtx_sleep(&vmd->vmd_laundry_request, 1079ebcddc72SAlan Cox vm_pagequeue_lockptr(pq), PVM, "launds", 0); 1080ebcddc72SAlan Cox 1081ebcddc72SAlan Cox /* 1082ebcddc72SAlan Cox * If the pagedaemon has indicated that it's in shortfall, start 1083ebcddc72SAlan Cox * a shortfall laundering unless we're already in the middle of 1084ebcddc72SAlan Cox * one. This may preempt a background laundering. 1085ebcddc72SAlan Cox */ 1086e2068d0bSJeff Roberson if (vmd->vmd_laundry_request == VM_LAUNDRY_SHORTFALL && 1087ebcddc72SAlan Cox (!in_shortfall || shortfall_cycle == 0)) { 1088e2068d0bSJeff Roberson shortfall = vm_laundry_target(vmd) + 1089e2068d0bSJeff Roberson vmd->vmd_pageout_deficit; 1090ebcddc72SAlan Cox target = 0; 1091ebcddc72SAlan Cox } else 1092ebcddc72SAlan Cox shortfall = 0; 1093ebcddc72SAlan Cox 1094ebcddc72SAlan Cox if (target == 0) 1095e2068d0bSJeff Roberson vmd->vmd_laundry_request = VM_LAUNDRY_IDLE; 109660684862SMark Johnston nfreed += vmd->vmd_clean_pages_freed; 109760684862SMark Johnston vmd->vmd_clean_pages_freed = 0; 1098ebcddc72SAlan Cox vm_pagequeue_unlock(pq); 1099ebcddc72SAlan Cox } 1100ebcddc72SAlan Cox } 1101ebcddc72SAlan Cox 1102be37ee79SMark Johnston /* 1103be37ee79SMark Johnston * Compute the number of pages we want to try to move from the 1104be37ee79SMark Johnston * active queue to either the inactive or laundry queue. 1105be37ee79SMark Johnston * 11067bb4634eSMark Johnston * When scanning active pages during a shortage, we make clean pages 11077bb4634eSMark Johnston * count more heavily towards the page shortage than dirty pages. 11087bb4634eSMark Johnston * This is because dirty pages must be laundered before they can be 11097bb4634eSMark Johnston * reused and thus have less utility when attempting to quickly 11107bb4634eSMark Johnston * alleviate a free page shortage. However, this weighting also 11117bb4634eSMark Johnston * causes the scan to deactivate dirty pages more aggressively, 11127bb4634eSMark Johnston * improving the effectiveness of clustering. 1113be37ee79SMark Johnston */ 1114be37ee79SMark Johnston static int 11157bb4634eSMark Johnston vm_pageout_active_target(struct vm_domain *vmd) 1116be37ee79SMark Johnston { 1117be37ee79SMark Johnston int shortage; 1118be37ee79SMark Johnston 1119be37ee79SMark Johnston shortage = vmd->vmd_inactive_target + vm_paging_target(vmd) - 1120be37ee79SMark Johnston (vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt + 1121be37ee79SMark Johnston vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt / act_scan_laundry_weight); 1122be37ee79SMark Johnston shortage *= act_scan_laundry_weight; 1123be37ee79SMark Johnston return (shortage); 1124be37ee79SMark Johnston } 1125be37ee79SMark Johnston 1126be37ee79SMark Johnston /* 1127be37ee79SMark Johnston * Scan the active queue. If there is no shortage of inactive pages, scan a 1128be37ee79SMark Johnston * small portion of the queue in order to maintain quasi-LRU. 1129be37ee79SMark Johnston */ 1130be37ee79SMark Johnston static void 1131be37ee79SMark Johnston vm_pageout_scan_active(struct vm_domain *vmd, int page_shortage) 1132be37ee79SMark Johnston { 1133be37ee79SMark Johnston struct scan_state ss; 1134be37ee79SMark Johnston struct mtx *mtx; 1135be37ee79SMark Johnston vm_page_t m, marker; 1136be37ee79SMark Johnston struct vm_pagequeue *pq; 1137be37ee79SMark Johnston long min_scan; 1138be37ee79SMark Johnston int act_delta, max_scan, scan_tick; 1139be37ee79SMark Johnston 1140be37ee79SMark Johnston marker = &vmd->vmd_markers[PQ_ACTIVE]; 1141be37ee79SMark Johnston pq = &vmd->vmd_pagequeues[PQ_ACTIVE]; 1142be37ee79SMark Johnston vm_pagequeue_lock(pq); 1143be37ee79SMark Johnston 1144be37ee79SMark Johnston /* 1145be37ee79SMark Johnston * If we're just idle polling attempt to visit every 1146be37ee79SMark Johnston * active page within 'update_period' seconds. 1147be37ee79SMark Johnston */ 1148be37ee79SMark Johnston scan_tick = ticks; 1149be37ee79SMark Johnston if (vm_pageout_update_period != 0) { 1150be37ee79SMark Johnston min_scan = pq->pq_cnt; 1151be37ee79SMark Johnston min_scan *= scan_tick - vmd->vmd_last_active_scan; 1152be37ee79SMark Johnston min_scan /= hz * vm_pageout_update_period; 1153be37ee79SMark Johnston } else 1154be37ee79SMark Johnston min_scan = 0; 1155be37ee79SMark Johnston if (min_scan > 0 || (page_shortage > 0 && pq->pq_cnt > 0)) 1156be37ee79SMark Johnston vmd->vmd_last_active_scan = scan_tick; 1157be37ee79SMark Johnston 1158be37ee79SMark Johnston /* 1159be37ee79SMark Johnston * Scan the active queue for pages that can be deactivated. Update 1160be37ee79SMark Johnston * the per-page activity counter and use it to identify deactivation 1161be37ee79SMark Johnston * candidates. Held pages may be deactivated. 1162be37ee79SMark Johnston * 1163be37ee79SMark Johnston * To avoid requeuing each page that remains in the active queue, we 11647bb4634eSMark Johnston * implement the CLOCK algorithm. To keep the implementation of the 11657bb4634eSMark Johnston * enqueue operation consistent for all page queues, we use two hands, 11667bb4634eSMark Johnston * represented by marker pages. Scans begin at the first hand, which 11677bb4634eSMark Johnston * precedes the second hand in the queue. When the two hands meet, 11687bb4634eSMark Johnston * they are moved back to the head and tail of the queue, respectively, 11697bb4634eSMark Johnston * and scanning resumes. 1170be37ee79SMark Johnston */ 1171be37ee79SMark Johnston max_scan = page_shortage > 0 ? pq->pq_cnt : min_scan; 1172be37ee79SMark Johnston mtx = NULL; 1173be37ee79SMark Johnston act_scan: 1174be37ee79SMark Johnston vm_pageout_init_scan(&ss, pq, marker, &vmd->vmd_clock[0], max_scan); 1175be37ee79SMark Johnston while ((m = vm_pageout_next(&ss, false)) != NULL) { 1176be37ee79SMark Johnston if (__predict_false(m == &vmd->vmd_clock[1])) { 1177be37ee79SMark Johnston vm_pagequeue_lock(pq); 1178be37ee79SMark Johnston TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q); 1179be37ee79SMark Johnston TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[1], plinks.q); 1180be37ee79SMark Johnston TAILQ_INSERT_HEAD(&pq->pq_pl, &vmd->vmd_clock[0], 1181be37ee79SMark Johnston plinks.q); 1182be37ee79SMark Johnston TAILQ_INSERT_TAIL(&pq->pq_pl, &vmd->vmd_clock[1], 1183be37ee79SMark Johnston plinks.q); 1184be37ee79SMark Johnston max_scan -= ss.scanned; 1185be37ee79SMark Johnston vm_pageout_end_scan(&ss); 1186be37ee79SMark Johnston goto act_scan; 1187be37ee79SMark Johnston } 1188be37ee79SMark Johnston if (__predict_false((m->flags & PG_MARKER) != 0)) 1189be37ee79SMark Johnston continue; 1190be37ee79SMark Johnston 1191be37ee79SMark Johnston vm_page_change_lock(m, &mtx); 1192be37ee79SMark Johnston 1193be37ee79SMark Johnston /* 1194be37ee79SMark Johnston * The page may have been disassociated from the queue 1195be37ee79SMark Johnston * while locks were dropped. 1196be37ee79SMark Johnston */ 1197be37ee79SMark Johnston if (vm_page_queue(m) != PQ_ACTIVE) 1198be37ee79SMark Johnston continue; 1199be37ee79SMark Johnston 1200be37ee79SMark Johnston /* 1201be37ee79SMark Johnston * Wired pages are dequeued lazily. 1202be37ee79SMark Johnston */ 1203be37ee79SMark Johnston if (m->wire_count != 0) { 1204be37ee79SMark Johnston vm_page_dequeue_deferred(m); 1205be37ee79SMark Johnston continue; 1206be37ee79SMark Johnston } 1207be37ee79SMark Johnston 1208be37ee79SMark Johnston /* 1209be37ee79SMark Johnston * Check to see "how much" the page has been used. 1210d7aeb429SAlan Cox * 1211d7aeb429SAlan Cox * Test PGA_REFERENCED after calling pmap_ts_referenced() so 1212d7aeb429SAlan Cox * that a reference from a concurrently destroyed mapping is 1213d7aeb429SAlan Cox * observed here and now. 1214d7aeb429SAlan Cox * 1215be37ee79SMark Johnston * Perform an unsynchronized object ref count check. While 1216be37ee79SMark Johnston * the page lock ensures that the page is not reallocated to 1217be37ee79SMark Johnston * another object, in particular, one with unmanaged mappings 1218be37ee79SMark Johnston * that cannot support pmap_ts_referenced(), two races are, 1219be37ee79SMark Johnston * nonetheless, possible: 1220be37ee79SMark Johnston * 1) The count was transitioning to zero, but we saw a non- 1221be37ee79SMark Johnston * zero value. pmap_ts_referenced() will return zero 1222be37ee79SMark Johnston * because the page is not mapped. 1223be37ee79SMark Johnston * 2) The count was transitioning to one, but we saw zero. 1224be37ee79SMark Johnston * This race delays the detection of a new reference. At 1225be37ee79SMark Johnston * worst, we will deactivate and reactivate the page. 1226be37ee79SMark Johnston */ 1227be37ee79SMark Johnston if (m->object->ref_count != 0) 1228d7aeb429SAlan Cox act_delta = pmap_ts_referenced(m); 1229d7aeb429SAlan Cox else 1230d7aeb429SAlan Cox act_delta = 0; 1231d7aeb429SAlan Cox if ((m->aflags & PGA_REFERENCED) != 0) { 1232d7aeb429SAlan Cox vm_page_aflag_clear(m, PGA_REFERENCED); 1233d7aeb429SAlan Cox act_delta++; 1234d7aeb429SAlan Cox } 1235be37ee79SMark Johnston 1236be37ee79SMark Johnston /* 1237be37ee79SMark Johnston * Advance or decay the act_count based on recent usage. 1238be37ee79SMark Johnston */ 1239be37ee79SMark Johnston if (act_delta != 0) { 1240be37ee79SMark Johnston m->act_count += ACT_ADVANCE + act_delta; 1241be37ee79SMark Johnston if (m->act_count > ACT_MAX) 1242be37ee79SMark Johnston m->act_count = ACT_MAX; 1243be37ee79SMark Johnston } else 1244be37ee79SMark Johnston m->act_count -= min(m->act_count, ACT_DECLINE); 1245be37ee79SMark Johnston 1246be37ee79SMark Johnston if (m->act_count == 0) { 1247be37ee79SMark Johnston /* 1248be37ee79SMark Johnston * When not short for inactive pages, let dirty pages go 1249be37ee79SMark Johnston * through the inactive queue before moving to the 1250be37ee79SMark Johnston * laundry queues. This gives them some extra time to 1251be37ee79SMark Johnston * be reactivated, potentially avoiding an expensive 12527bb4634eSMark Johnston * pageout. However, during a page shortage, the 12537bb4634eSMark Johnston * inactive queue is necessarily small, and so dirty 12547bb4634eSMark Johnston * pages would only spend a trivial amount of time in 12557bb4634eSMark Johnston * the inactive queue. Therefore, we might as well 12567bb4634eSMark Johnston * place them directly in the laundry queue to reduce 12577bb4634eSMark Johnston * queuing overhead. 1258be37ee79SMark Johnston */ 1259be37ee79SMark Johnston if (page_shortage <= 0) 1260be37ee79SMark Johnston vm_page_deactivate(m); 1261be37ee79SMark Johnston else { 1262be37ee79SMark Johnston /* 1263be37ee79SMark Johnston * Calling vm_page_test_dirty() here would 1264be37ee79SMark Johnston * require acquisition of the object's write 1265be37ee79SMark Johnston * lock. However, during a page shortage, 1266be37ee79SMark Johnston * directing dirty pages into the laundry 1267be37ee79SMark Johnston * queue is only an optimization and not a 1268be37ee79SMark Johnston * requirement. Therefore, we simply rely on 1269be37ee79SMark Johnston * the opportunistic updates to the page's 1270be37ee79SMark Johnston * dirty field by the pmap. 1271be37ee79SMark Johnston */ 1272be37ee79SMark Johnston if (m->dirty == 0) { 1273be37ee79SMark Johnston vm_page_deactivate(m); 1274be37ee79SMark Johnston page_shortage -= 1275be37ee79SMark Johnston act_scan_laundry_weight; 1276be37ee79SMark Johnston } else { 1277be37ee79SMark Johnston vm_page_launder(m); 1278be37ee79SMark Johnston page_shortage--; 1279be37ee79SMark Johnston } 1280be37ee79SMark Johnston } 1281be37ee79SMark Johnston } 1282be37ee79SMark Johnston } 1283be37ee79SMark Johnston if (mtx != NULL) { 1284be37ee79SMark Johnston mtx_unlock(mtx); 1285be37ee79SMark Johnston mtx = NULL; 1286be37ee79SMark Johnston } 1287be37ee79SMark Johnston vm_pagequeue_lock(pq); 1288be37ee79SMark Johnston TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q); 1289be37ee79SMark Johnston TAILQ_INSERT_AFTER(&pq->pq_pl, marker, &vmd->vmd_clock[0], plinks.q); 1290be37ee79SMark Johnston vm_pageout_end_scan(&ss); 1291be37ee79SMark Johnston vm_pagequeue_unlock(pq); 1292be37ee79SMark Johnston } 1293be37ee79SMark Johnston 12945cd29d0fSMark Johnston static int 12955cd29d0fSMark Johnston vm_pageout_reinsert_inactive_page(struct scan_state *ss, vm_page_t m) 12965cd29d0fSMark Johnston { 12975cd29d0fSMark Johnston struct vm_domain *vmd; 12985cd29d0fSMark Johnston 12991b5c869dSMark Johnston if (m->queue != PQ_INACTIVE || (m->aflags & PGA_ENQUEUED) != 0) 13005cd29d0fSMark Johnston return (0); 13015cd29d0fSMark Johnston vm_page_aflag_set(m, PGA_ENQUEUED); 13025cd29d0fSMark Johnston if ((m->aflags & PGA_REQUEUE_HEAD) != 0) { 13035cd29d0fSMark Johnston vmd = vm_pagequeue_domain(m); 13045cd29d0fSMark Johnston TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q); 13055cd29d0fSMark Johnston vm_page_aflag_clear(m, PGA_REQUEUE | PGA_REQUEUE_HEAD); 13065cd29d0fSMark Johnston } else if ((m->aflags & PGA_REQUEUE) != 0) { 13075cd29d0fSMark Johnston TAILQ_INSERT_TAIL(&ss->pq->pq_pl, m, plinks.q); 13085cd29d0fSMark Johnston vm_page_aflag_clear(m, PGA_REQUEUE | PGA_REQUEUE_HEAD); 13095cd29d0fSMark Johnston } else 13105cd29d0fSMark Johnston TAILQ_INSERT_BEFORE(ss->marker, m, plinks.q); 13115cd29d0fSMark Johnston return (1); 13125cd29d0fSMark Johnston } 13135cd29d0fSMark Johnston 13145cd29d0fSMark Johnston /* 13155cd29d0fSMark Johnston * Re-add stuck pages to the inactive queue. We will examine them again 13165cd29d0fSMark Johnston * during the next scan. If the queue state of a page has changed since 13175cd29d0fSMark Johnston * it was physically removed from the page queue in 13185cd29d0fSMark Johnston * vm_pageout_collect_batch(), don't do anything with that page. 13195cd29d0fSMark Johnston */ 13205cd29d0fSMark Johnston static void 13215cd29d0fSMark Johnston vm_pageout_reinsert_inactive(struct scan_state *ss, struct vm_batchqueue *bq, 13225cd29d0fSMark Johnston vm_page_t m) 13235cd29d0fSMark Johnston { 13245cd29d0fSMark Johnston struct vm_pagequeue *pq; 13255cd29d0fSMark Johnston int delta; 13265cd29d0fSMark Johnston 13275cd29d0fSMark Johnston delta = 0; 13285cd29d0fSMark Johnston pq = ss->pq; 13295cd29d0fSMark Johnston 13305cd29d0fSMark Johnston if (m != NULL) { 13315cd29d0fSMark Johnston if (vm_batchqueue_insert(bq, m)) 13325cd29d0fSMark Johnston return; 13335cd29d0fSMark Johnston vm_pagequeue_lock(pq); 13345cd29d0fSMark Johnston delta += vm_pageout_reinsert_inactive_page(ss, m); 13355cd29d0fSMark Johnston } else 13365cd29d0fSMark Johnston vm_pagequeue_lock(pq); 13375cd29d0fSMark Johnston while ((m = vm_batchqueue_pop(bq)) != NULL) 13385cd29d0fSMark Johnston delta += vm_pageout_reinsert_inactive_page(ss, m); 13395cd29d0fSMark Johnston vm_pagequeue_cnt_add(pq, delta); 13405cd29d0fSMark Johnston vm_pagequeue_unlock(pq); 13415cd29d0fSMark Johnston vm_batchqueue_init(bq); 13425cd29d0fSMark Johnston } 13435cd29d0fSMark Johnston 1344ebcddc72SAlan Cox /* 134527e29d10SMark Johnston * Attempt to reclaim the requested number of pages from the inactive queue. 134627e29d10SMark Johnston * Returns true if the shortage was addressed. 1347df8bae1dSRodney W. Grimes */ 1348be37ee79SMark Johnston static int 134949a3710cSMark Johnston vm_pageout_scan_inactive(struct vm_domain *vmd, int shortage, 1350be37ee79SMark Johnston int *addl_shortage) 1351df8bae1dSRodney W. Grimes { 13525cd29d0fSMark Johnston struct scan_state ss; 13535cd29d0fSMark Johnston struct vm_batchqueue rq; 13545cd29d0fSMark Johnston struct mtx *mtx; 13555cd29d0fSMark Johnston vm_page_t m, marker; 13568d220203SAlan Cox struct vm_pagequeue *pq; 1357df8bae1dSRodney W. Grimes vm_object_t object; 1358be37ee79SMark Johnston int act_delta, addl_page_shortage, deficit, page_shortage; 1359be37ee79SMark Johnston int starting_page_shortage; 13600d94caffSDavid Greenman 1361df8bae1dSRodney W. Grimes /* 136201f04471SMark Johnston * The addl_page_shortage is an estimate of the number of temporarily 1363311e34e2SKonstantin Belousov * stuck pages in the inactive queue. In other words, the 1364449c2e92SKonstantin Belousov * number of pages from the inactive count that should be 1365311e34e2SKonstantin Belousov * discounted in setting the target for the active queue scan. 1366311e34e2SKonstantin Belousov */ 13679099545aSAlan Cox addl_page_shortage = 0; 13689099545aSAlan Cox 13691c7c3c6aSMatthew Dillon /* 137049a3710cSMark Johnston * vmd_pageout_deficit counts the number of pages requested in 137149a3710cSMark Johnston * allocations that failed because of a free page shortage. We assume 137249a3710cSMark Johnston * that the allocations will be reattempted and thus include the deficit 137349a3710cSMark Johnston * in our scan target. 13741c7c3c6aSMatthew Dillon */ 1375e2068d0bSJeff Roberson deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit); 137649a3710cSMark Johnston starting_page_shortage = page_shortage = shortage + deficit; 13771c7c3c6aSMatthew Dillon 13785cd29d0fSMark Johnston mtx = NULL; 13795cd29d0fSMark Johnston object = NULL; 13805cd29d0fSMark Johnston vm_batchqueue_init(&rq); 13815cd29d0fSMark Johnston 1382936524aaSMatthew Dillon /* 1383f095d1bbSAlan Cox * Start scanning the inactive queue for pages that we can free. The 1384f095d1bbSAlan Cox * scan will stop when we reach the target or we have scanned the 1385f095d1bbSAlan Cox * entire queue. (Note that m->act_count is not used to make 1386f095d1bbSAlan Cox * decisions for the inactive queue, only for the active queue.) 13878d220203SAlan Cox */ 138864b38930SMark Johnston marker = &vmd->vmd_markers[PQ_INACTIVE]; 13895cd29d0fSMark Johnston pq = &vmd->vmd_pagequeues[PQ_INACTIVE]; 13908d220203SAlan Cox vm_pagequeue_lock(pq); 13915cd29d0fSMark Johnston vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt); 13925cd29d0fSMark Johnston while (page_shortage > 0 && (m = vm_pageout_next(&ss, true)) != NULL) { 13935cd29d0fSMark Johnston KASSERT((m->flags & PG_MARKER) == 0, 13945cd29d0fSMark Johnston ("marker page %p was dequeued", m)); 1395df8bae1dSRodney W. Grimes 13965cd29d0fSMark Johnston vm_page_change_lock(m, &mtx); 1397df8bae1dSRodney W. Grimes 13985cd29d0fSMark Johnston recheck: 1399936524aaSMatthew Dillon /* 14005cd29d0fSMark Johnston * The page may have been disassociated from the queue 14015cd29d0fSMark Johnston * while locks were dropped. 1402936524aaSMatthew Dillon */ 140336f8fe9bSMark Johnston if (vm_page_queue(m) != PQ_INACTIVE) { 14045cd29d0fSMark Johnston addl_page_shortage++; 1405936524aaSMatthew Dillon continue; 14065cd29d0fSMark Johnston } 14077900f95dSKonstantin Belousov 14088c616246SKonstantin Belousov /* 14095cd29d0fSMark Johnston * The page was re-enqueued after the page queue lock was 14105cd29d0fSMark Johnston * dropped, or a requeue was requested. This page gets a second 14115cd29d0fSMark Johnston * chance. 14128c616246SKonstantin Belousov */ 14135cd29d0fSMark Johnston if ((m->aflags & (PGA_ENQUEUED | PGA_REQUEUE | 14145cd29d0fSMark Johnston PGA_REQUEUE_HEAD)) != 0) 14155cd29d0fSMark Johnston goto reinsert; 14165cd29d0fSMark Johnston 14171d3a1bcfSMark Johnston /* 14185cd29d0fSMark Johnston * Held pages are essentially stuck in the queue. So, 14195cd29d0fSMark Johnston * they ought to be discounted from the inactive count. 1420be37ee79SMark Johnston * See the description of addl_page_shortage above. 14215cd29d0fSMark Johnston * 14225cd29d0fSMark Johnston * Wired pages may not be freed. Complete their removal 14235cd29d0fSMark Johnston * from the queue now to avoid needless revisits during 14245cd29d0fSMark Johnston * future scans. 1425a3aeedabSAlan Cox */ 14265cd29d0fSMark Johnston if (m->hold_count != 0) { 1427a3aeedabSAlan Cox addl_page_shortage++; 14285cd29d0fSMark Johnston goto reinsert; 14295cd29d0fSMark Johnston } 14305cd29d0fSMark Johnston if (m->wire_count != 0) { 14315cd29d0fSMark Johnston vm_page_dequeue_deferred(m); 14325cd29d0fSMark Johnston continue; 14335cd29d0fSMark Johnston } 14345cd29d0fSMark Johnston 14355cd29d0fSMark Johnston if (object != m->object) { 1436*60256604SMark Johnston if (object != NULL) 14375cd29d0fSMark Johnston VM_OBJECT_WUNLOCK(object); 14389ee2165fSAlan Cox object = m->object; 1439a3aeedabSAlan Cox if (!VM_OBJECT_TRYWLOCK(object)) { 14405cd29d0fSMark Johnston mtx_unlock(mtx); 14415cd29d0fSMark Johnston /* Depends on type-stability. */ 14425cd29d0fSMark Johnston VM_OBJECT_WLOCK(object); 14435cd29d0fSMark Johnston mtx_lock(mtx); 14445cd29d0fSMark Johnston goto recheck; 1445*60256604SMark Johnston } 1446a3aeedabSAlan Cox } 14475cd29d0fSMark Johnston 1448a3aeedabSAlan Cox if (vm_page_busied(m)) { 1449a3aeedabSAlan Cox /* 1450a3aeedabSAlan Cox * Don't mess with busy pages. Leave them at 1451a3aeedabSAlan Cox * the front of the queue. Most likely, they 1452a3aeedabSAlan Cox * are being paged out and will leave the 1453a3aeedabSAlan Cox * queue shortly after the scan finishes. So, 1454a3aeedabSAlan Cox * they ought to be discounted from the 1455a3aeedabSAlan Cox * inactive count. 1456a3aeedabSAlan Cox */ 1457a3aeedabSAlan Cox addl_page_shortage++; 14585cd29d0fSMark Johnston goto reinsert; 145926f9a767SRodney W. Grimes } 146048cc2fc7SKonstantin Belousov 146148cc2fc7SKonstantin Belousov /* 14628748f58cSKonstantin Belousov * Invalid pages can be easily freed. They cannot be 14638748f58cSKonstantin Belousov * mapped, vm_page_free() asserts this. 1464776f729cSKonstantin Belousov */ 14658748f58cSKonstantin Belousov if (m->valid == 0) 14668748f58cSKonstantin Belousov goto free_page; 1467776f729cSKonstantin Belousov 1468776f729cSKonstantin Belousov /* 1469960810ccSAlan Cox * If the page has been referenced and the object is not dead, 1470960810ccSAlan Cox * reactivate or requeue the page depending on whether the 1471960810ccSAlan Cox * object is mapped. 1472d7aeb429SAlan Cox * 1473d7aeb429SAlan Cox * Test PGA_REFERENCED after calling pmap_ts_referenced() so 1474d7aeb429SAlan Cox * that a reference from a concurrently destroyed mapping is 1475d7aeb429SAlan Cox * observed here and now. 14767e006499SJohn Dyson */ 1477d7aeb429SAlan Cox if (object->ref_count != 0) 1478d7aeb429SAlan Cox act_delta = pmap_ts_referenced(m); 1479d7aeb429SAlan Cox else { 1480bb7858eaSJeff Roberson KASSERT(!pmap_page_is_mapped(m), 1481be37ee79SMark Johnston ("page %p is mapped", m)); 1482d7aeb429SAlan Cox act_delta = 0; 1483d7aeb429SAlan Cox } 1484d7aeb429SAlan Cox if ((m->aflags & PGA_REFERENCED) != 0) { 1485d7aeb429SAlan Cox vm_page_aflag_clear(m, PGA_REFERENCED); 1486d7aeb429SAlan Cox act_delta++; 14872fe6e4d7SDavid Greenman } 1488bb7858eaSJeff Roberson if (act_delta != 0) { 148986fa2471SAlan Cox if (object->ref_count != 0) { 149083c9dea1SGleb Smirnoff VM_CNT_INC(v_reactivated); 149126f9a767SRodney W. Grimes vm_page_activate(m); 1492960810ccSAlan Cox 1493960810ccSAlan Cox /* 1494960810ccSAlan Cox * Increase the activation count if the page 1495960810ccSAlan Cox * was referenced while in the inactive queue. 1496960810ccSAlan Cox * This makes it less likely that the page will 1497960810ccSAlan Cox * be returned prematurely to the inactive 1498960810ccSAlan Cox * queue. 1499960810ccSAlan Cox */ 1500bb7858eaSJeff Roberson m->act_count += act_delta + ACT_ADVANCE; 15015cd29d0fSMark Johnston continue; 1502ebcddc72SAlan Cox } else if ((object->flags & OBJ_DEAD) == 0) { 15035cd29d0fSMark Johnston vm_page_aflag_set(m, PGA_REQUEUE); 15045cd29d0fSMark Johnston goto reinsert; 1505ebcddc72SAlan Cox } 1506960810ccSAlan Cox } 150767bf6868SJohn Dyson 15087e006499SJohn Dyson /* 15099fc4739dSAlan Cox * If the page appears to be clean at the machine-independent 15109fc4739dSAlan Cox * layer, then remove all of its mappings from the pmap in 1511a766ffd0SAlan Cox * anticipation of freeing it. If, however, any of the page's 1512a766ffd0SAlan Cox * mappings allow write access, then the page may still be 1513a766ffd0SAlan Cox * modified until the last of those mappings are removed. 15147e006499SJohn Dyson */ 1515aa044135SAlan Cox if (object->ref_count != 0) { 15169fc4739dSAlan Cox vm_page_test_dirty(m); 1517aa044135SAlan Cox if (m->dirty == 0) 1518b78ddb0bSAlan Cox pmap_remove_all(m); 1519aa044135SAlan Cox } 1520dcbcd518SBruce Evans 15216989c456SAlan Cox /* 1522ebcddc72SAlan Cox * Clean pages can be freed, but dirty pages must be sent back 1523ebcddc72SAlan Cox * to the laundry, unless they belong to a dead object. 1524ebcddc72SAlan Cox * Requeueing dirty pages from dead objects is pointless, as 1525ebcddc72SAlan Cox * they are being paged out and freed by the thread that 1526ebcddc72SAlan Cox * destroyed the object. 15276989c456SAlan Cox */ 1528ebcddc72SAlan Cox if (m->dirty == 0) { 15298748f58cSKonstantin Belousov free_page: 15305cd29d0fSMark Johnston /* 15315cd29d0fSMark Johnston * Because we dequeued the page and have already 15325cd29d0fSMark Johnston * checked for concurrent dequeue and enqueue 15335cd29d0fSMark Johnston * requests, we can safely disassociate the page 15345cd29d0fSMark Johnston * from the inactive queue. 15355cd29d0fSMark Johnston */ 15365cd29d0fSMark Johnston KASSERT((m->aflags & PGA_QUEUE_STATE_MASK) == 0, 15375cd29d0fSMark Johnston ("page %p has queue state", m)); 15385cd29d0fSMark Johnston m->queue = PQ_NONE; 153978afdce6SAlan Cox vm_page_free(m); 15405cd29d0fSMark Johnston page_shortage--; 1541ebcddc72SAlan Cox } else if ((object->flags & OBJ_DEAD) == 0) 1542ebcddc72SAlan Cox vm_page_launder(m); 15435cd29d0fSMark Johnston continue; 15445cd29d0fSMark Johnston reinsert: 15455cd29d0fSMark Johnston vm_pageout_reinsert_inactive(&ss, &rq, m); 15465cd29d0fSMark Johnston } 1547*60256604SMark Johnston if (mtx != NULL) 15485cd29d0fSMark Johnston mtx_unlock(mtx); 1549*60256604SMark Johnston if (object != NULL) 155089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 15515cd29d0fSMark Johnston vm_pageout_reinsert_inactive(&ss, &rq, NULL); 15525cd29d0fSMark Johnston vm_pageout_reinsert_inactive(&ss, &ss.bq, NULL); 15538d220203SAlan Cox vm_pagequeue_lock(pq); 15545cd29d0fSMark Johnston vm_pageout_end_scan(&ss); 15558d220203SAlan Cox vm_pagequeue_unlock(pq); 155626f9a767SRodney W. Grimes 15575cd29d0fSMark Johnston VM_CNT_ADD(v_dfree, starting_page_shortage - page_shortage); 15585cd29d0fSMark Johnston 1559ebcddc72SAlan Cox /* 1560ebcddc72SAlan Cox * Wake up the laundry thread so that it can perform any needed 1561ebcddc72SAlan Cox * laundering. If we didn't meet our target, we're in shortfall and 1562b1fd102eSMark Johnston * need to launder more aggressively. If PQ_LAUNDRY is empty and no 1563b1fd102eSMark Johnston * swap devices are configured, the laundry thread has no work to do, so 1564b1fd102eSMark Johnston * don't bother waking it up. 1565cb35676eSMark Johnston * 1566cb35676eSMark Johnston * The laundry thread uses the number of inactive queue scans elapsed 1567cb35676eSMark Johnston * since the last laundering to determine whether to launder again, so 1568cb35676eSMark Johnston * keep count. 1569ebcddc72SAlan Cox */ 1570cb35676eSMark Johnston if (starting_page_shortage > 0) { 1571e2068d0bSJeff Roberson pq = &vmd->vmd_pagequeues[PQ_LAUNDRY]; 1572ebcddc72SAlan Cox vm_pagequeue_lock(pq); 1573e2068d0bSJeff Roberson if (vmd->vmd_laundry_request == VM_LAUNDRY_IDLE && 1574cb35676eSMark Johnston (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled))) { 1575ebcddc72SAlan Cox if (page_shortage > 0) { 1576e2068d0bSJeff Roberson vmd->vmd_laundry_request = VM_LAUNDRY_SHORTFALL; 157783c9dea1SGleb Smirnoff VM_CNT_INC(v_pdshortfalls); 1578e2068d0bSJeff Roberson } else if (vmd->vmd_laundry_request != 1579e2068d0bSJeff Roberson VM_LAUNDRY_SHORTFALL) 1580e2068d0bSJeff Roberson vmd->vmd_laundry_request = 1581e2068d0bSJeff Roberson VM_LAUNDRY_BACKGROUND; 1582e2068d0bSJeff Roberson wakeup(&vmd->vmd_laundry_request); 1583b1fd102eSMark Johnston } 158460684862SMark Johnston vmd->vmd_clean_pages_freed += 158560684862SMark Johnston starting_page_shortage - page_shortage; 1586ebcddc72SAlan Cox vm_pagequeue_unlock(pq); 1587ebcddc72SAlan Cox } 1588ebcddc72SAlan Cox 15899452b5edSAlan Cox /* 1590f095d1bbSAlan Cox * Wakeup the swapout daemon if we didn't free the targeted number of 1591f095d1bbSAlan Cox * pages. 15929452b5edSAlan Cox */ 1593ac04195bSKonstantin Belousov if (page_shortage > 0) 1594ac04195bSKonstantin Belousov vm_swapout_run(); 15959452b5edSAlan Cox 15969452b5edSAlan Cox /* 159776386c7eSKonstantin Belousov * If the inactive queue scan fails repeatedly to meet its 159876386c7eSKonstantin Belousov * target, kill the largest process. 159976386c7eSKonstantin Belousov */ 160076386c7eSKonstantin Belousov vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage); 160176386c7eSKonstantin Belousov 160276386c7eSKonstantin Belousov /* 1603be37ee79SMark Johnston * Reclaim pages by swapping out idle processes, if configured to do so. 16041c7c3c6aSMatthew Dillon */ 1605ac04195bSKonstantin Belousov vm_swapout_run_idle(); 1606be37ee79SMark Johnston 1607be37ee79SMark Johnston /* 1608be37ee79SMark Johnston * See the description of addl_page_shortage above. 1609be37ee79SMark Johnston */ 1610be37ee79SMark Johnston *addl_shortage = addl_page_shortage + deficit; 1611be37ee79SMark Johnston 1612e57dd910SAlan Cox return (page_shortage <= 0); 16132025d69bSKonstantin Belousov } 16142025d69bSKonstantin Belousov 1615449c2e92SKonstantin Belousov static int vm_pageout_oom_vote; 1616449c2e92SKonstantin Belousov 1617449c2e92SKonstantin Belousov /* 1618449c2e92SKonstantin Belousov * The pagedaemon threads randlomly select one to perform the 1619449c2e92SKonstantin Belousov * OOM. Trying to kill processes before all pagedaemons 1620449c2e92SKonstantin Belousov * failed to reach free target is premature. 1621449c2e92SKonstantin Belousov */ 1622449c2e92SKonstantin Belousov static void 162376386c7eSKonstantin Belousov vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage, 162476386c7eSKonstantin Belousov int starting_page_shortage) 1625449c2e92SKonstantin Belousov { 1626449c2e92SKonstantin Belousov int old_vote; 1627449c2e92SKonstantin Belousov 162876386c7eSKonstantin Belousov if (starting_page_shortage <= 0 || starting_page_shortage != 162976386c7eSKonstantin Belousov page_shortage) 163076386c7eSKonstantin Belousov vmd->vmd_oom_seq = 0; 163176386c7eSKonstantin Belousov else 163276386c7eSKonstantin Belousov vmd->vmd_oom_seq++; 163376386c7eSKonstantin Belousov if (vmd->vmd_oom_seq < vm_pageout_oom_seq) { 1634449c2e92SKonstantin Belousov if (vmd->vmd_oom) { 1635449c2e92SKonstantin Belousov vmd->vmd_oom = FALSE; 1636449c2e92SKonstantin Belousov atomic_subtract_int(&vm_pageout_oom_vote, 1); 1637449c2e92SKonstantin Belousov } 1638449c2e92SKonstantin Belousov return; 1639449c2e92SKonstantin Belousov } 1640449c2e92SKonstantin Belousov 164176386c7eSKonstantin Belousov /* 164276386c7eSKonstantin Belousov * Do not follow the call sequence until OOM condition is 164376386c7eSKonstantin Belousov * cleared. 164476386c7eSKonstantin Belousov */ 164576386c7eSKonstantin Belousov vmd->vmd_oom_seq = 0; 164676386c7eSKonstantin Belousov 1647449c2e92SKonstantin Belousov if (vmd->vmd_oom) 1648449c2e92SKonstantin Belousov return; 1649449c2e92SKonstantin Belousov 1650449c2e92SKonstantin Belousov vmd->vmd_oom = TRUE; 1651449c2e92SKonstantin Belousov old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1); 1652449c2e92SKonstantin Belousov if (old_vote != vm_ndomains - 1) 1653449c2e92SKonstantin Belousov return; 1654449c2e92SKonstantin Belousov 1655449c2e92SKonstantin Belousov /* 1656449c2e92SKonstantin Belousov * The current pagedaemon thread is the last in the quorum to 1657449c2e92SKonstantin Belousov * start OOM. Initiate the selection and signaling of the 1658449c2e92SKonstantin Belousov * victim. 1659449c2e92SKonstantin Belousov */ 1660449c2e92SKonstantin Belousov vm_pageout_oom(VM_OOM_MEM); 1661449c2e92SKonstantin Belousov 1662449c2e92SKonstantin Belousov /* 1663449c2e92SKonstantin Belousov * After one round of OOM terror, recall our vote. On the 1664449c2e92SKonstantin Belousov * next pass, current pagedaemon would vote again if the low 1665449c2e92SKonstantin Belousov * memory condition is still there, due to vmd_oom being 1666449c2e92SKonstantin Belousov * false. 1667449c2e92SKonstantin Belousov */ 1668449c2e92SKonstantin Belousov vmd->vmd_oom = FALSE; 1669449c2e92SKonstantin Belousov atomic_subtract_int(&vm_pageout_oom_vote, 1); 1670449c2e92SKonstantin Belousov } 16712025d69bSKonstantin Belousov 16723949873fSKonstantin Belousov /* 16733949873fSKonstantin Belousov * The OOM killer is the page daemon's action of last resort when 16743949873fSKonstantin Belousov * memory allocation requests have been stalled for a prolonged period 16753949873fSKonstantin Belousov * of time because it cannot reclaim memory. This function computes 16763949873fSKonstantin Belousov * the approximate number of physical pages that could be reclaimed if 16773949873fSKonstantin Belousov * the specified address space is destroyed. 16783949873fSKonstantin Belousov * 16793949873fSKonstantin Belousov * Private, anonymous memory owned by the address space is the 16803949873fSKonstantin Belousov * principal resource that we expect to recover after an OOM kill. 16813949873fSKonstantin Belousov * Since the physical pages mapped by the address space's COW entries 16823949873fSKonstantin Belousov * are typically shared pages, they are unlikely to be released and so 16833949873fSKonstantin Belousov * they are not counted. 16843949873fSKonstantin Belousov * 16853949873fSKonstantin Belousov * To get to the point where the page daemon runs the OOM killer, its 16863949873fSKonstantin Belousov * efforts to write-back vnode-backed pages may have stalled. This 16873949873fSKonstantin Belousov * could be caused by a memory allocation deadlock in the write path 16883949873fSKonstantin Belousov * that might be resolved by an OOM kill. Therefore, physical pages 16893949873fSKonstantin Belousov * belonging to vnode-backed objects are counted, because they might 16903949873fSKonstantin Belousov * be freed without being written out first if the address space holds 16913949873fSKonstantin Belousov * the last reference to an unlinked vnode. 16923949873fSKonstantin Belousov * 16933949873fSKonstantin Belousov * Similarly, physical pages belonging to OBJT_PHYS objects are 16943949873fSKonstantin Belousov * counted because the address space might hold the last reference to 16953949873fSKonstantin Belousov * the object. 16963949873fSKonstantin Belousov */ 16973949873fSKonstantin Belousov static long 16983949873fSKonstantin Belousov vm_pageout_oom_pagecount(struct vmspace *vmspace) 16993949873fSKonstantin Belousov { 17003949873fSKonstantin Belousov vm_map_t map; 17013949873fSKonstantin Belousov vm_map_entry_t entry; 17023949873fSKonstantin Belousov vm_object_t obj; 17033949873fSKonstantin Belousov long res; 17043949873fSKonstantin Belousov 17053949873fSKonstantin Belousov map = &vmspace->vm_map; 17063949873fSKonstantin Belousov KASSERT(!map->system_map, ("system map")); 17073949873fSKonstantin Belousov sx_assert(&map->lock, SA_LOCKED); 17083949873fSKonstantin Belousov res = 0; 17093949873fSKonstantin Belousov for (entry = map->header.next; entry != &map->header; 17103949873fSKonstantin Belousov entry = entry->next) { 17113949873fSKonstantin Belousov if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) 17123949873fSKonstantin Belousov continue; 17133949873fSKonstantin Belousov obj = entry->object.vm_object; 17143949873fSKonstantin Belousov if (obj == NULL) 17153949873fSKonstantin Belousov continue; 17163949873fSKonstantin Belousov if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 && 17173949873fSKonstantin Belousov obj->ref_count != 1) 17183949873fSKonstantin Belousov continue; 17193949873fSKonstantin Belousov switch (obj->type) { 17203949873fSKonstantin Belousov case OBJT_DEFAULT: 17213949873fSKonstantin Belousov case OBJT_SWAP: 17223949873fSKonstantin Belousov case OBJT_PHYS: 17233949873fSKonstantin Belousov case OBJT_VNODE: 17243949873fSKonstantin Belousov res += obj->resident_page_count; 17253949873fSKonstantin Belousov break; 17263949873fSKonstantin Belousov } 17273949873fSKonstantin Belousov } 17283949873fSKonstantin Belousov return (res); 17293949873fSKonstantin Belousov } 17303949873fSKonstantin Belousov 17312025d69bSKonstantin Belousov void 17322025d69bSKonstantin Belousov vm_pageout_oom(int shortage) 17332025d69bSKonstantin Belousov { 17342025d69bSKonstantin Belousov struct proc *p, *bigproc; 17352025d69bSKonstantin Belousov vm_offset_t size, bigsize; 17362025d69bSKonstantin Belousov struct thread *td; 17376bed074cSKonstantin Belousov struct vmspace *vm; 17383e78e983SAlan Cox bool breakout; 17392025d69bSKonstantin Belousov 17402025d69bSKonstantin Belousov /* 17411c58e4e5SJohn Baldwin * We keep the process bigproc locked once we find it to keep anyone 17421c58e4e5SJohn Baldwin * from messing with it; however, there is a possibility of 174328323addSBryan Drewery * deadlock if process B is bigproc and one of its child processes 17441c58e4e5SJohn Baldwin * attempts to propagate a signal to B while we are waiting for A's 17451c58e4e5SJohn Baldwin * lock while walking this list. To avoid this, we don't block on 17461c58e4e5SJohn Baldwin * the process lock but just skip a process if it is already locked. 17475663e6deSDavid Greenman */ 17485663e6deSDavid Greenman bigproc = NULL; 17495663e6deSDavid Greenman bigsize = 0; 17501005a129SJohn Baldwin sx_slock(&allproc_lock); 1751e602ba25SJulian Elischer FOREACH_PROC_IN_SYSTEM(p) { 175271943c3dSKonstantin Belousov PROC_LOCK(p); 175371943c3dSKonstantin Belousov 17541c58e4e5SJohn Baldwin /* 17553f1c4c4fSKonstantin Belousov * If this is a system, protected or killed process, skip it. 17565663e6deSDavid Greenman */ 175771943c3dSKonstantin Belousov if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC | 175871943c3dSKonstantin Belousov P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 || 175971943c3dSKonstantin Belousov p->p_pid == 1 || P_KILLED(p) || 176071943c3dSKonstantin Belousov (p->p_pid < 48 && swap_pager_avail != 0)) { 17618606d880SJohn Baldwin PROC_UNLOCK(p); 17625663e6deSDavid Greenman continue; 17635663e6deSDavid Greenman } 17645663e6deSDavid Greenman /* 1765dcbcd518SBruce Evans * If the process is in a non-running type state, 1766e602ba25SJulian Elischer * don't touch it. Check all the threads individually. 17675663e6deSDavid Greenman */ 17683e78e983SAlan Cox breakout = false; 1769e602ba25SJulian Elischer FOREACH_THREAD_IN_PROC(p, td) { 1770982d11f8SJeff Roberson thread_lock(td); 177171fad9fdSJulian Elischer if (!TD_ON_RUNQ(td) && 177271fad9fdSJulian Elischer !TD_IS_RUNNING(td) && 1773f497cda2SEdward Tomasz Napierala !TD_IS_SLEEPING(td) && 1774b98acc0aSKonstantin Belousov !TD_IS_SUSPENDED(td) && 1775b98acc0aSKonstantin Belousov !TD_IS_SWAPPED(td)) { 1776982d11f8SJeff Roberson thread_unlock(td); 17773e78e983SAlan Cox breakout = true; 1778e602ba25SJulian Elischer break; 1779e602ba25SJulian Elischer } 1780982d11f8SJeff Roberson thread_unlock(td); 1781e602ba25SJulian Elischer } 1782e602ba25SJulian Elischer if (breakout) { 17831c58e4e5SJohn Baldwin PROC_UNLOCK(p); 17845663e6deSDavid Greenman continue; 17855663e6deSDavid Greenman } 17865663e6deSDavid Greenman /* 17875663e6deSDavid Greenman * get the process size 17885663e6deSDavid Greenman */ 17896bed074cSKonstantin Belousov vm = vmspace_acquire_ref(p); 17906bed074cSKonstantin Belousov if (vm == NULL) { 17916bed074cSKonstantin Belousov PROC_UNLOCK(p); 17926bed074cSKonstantin Belousov continue; 17936bed074cSKonstantin Belousov } 179495e2409aSKonstantin Belousov _PHOLD_LITE(p); 179572d97679SDavid Schultz PROC_UNLOCK(p); 179695e2409aSKonstantin Belousov sx_sunlock(&allproc_lock); 179795e2409aSKonstantin Belousov if (!vm_map_trylock_read(&vm->vm_map)) { 179871943c3dSKonstantin Belousov vmspace_free(vm); 179995e2409aSKonstantin Belousov sx_slock(&allproc_lock); 180095e2409aSKonstantin Belousov PRELE(p); 180172d97679SDavid Schultz continue; 180272d97679SDavid Schultz } 18037981aa24SKonstantin Belousov size = vmspace_swap_count(vm); 18042025d69bSKonstantin Belousov if (shortage == VM_OOM_MEM) 18053949873fSKonstantin Belousov size += vm_pageout_oom_pagecount(vm); 18063949873fSKonstantin Belousov vm_map_unlock_read(&vm->vm_map); 18076bed074cSKonstantin Belousov vmspace_free(vm); 180895e2409aSKonstantin Belousov sx_slock(&allproc_lock); 18093949873fSKonstantin Belousov 18105663e6deSDavid Greenman /* 18113949873fSKonstantin Belousov * If this process is bigger than the biggest one, 18125663e6deSDavid Greenman * remember it. 18135663e6deSDavid Greenman */ 18145663e6deSDavid Greenman if (size > bigsize) { 18151c58e4e5SJohn Baldwin if (bigproc != NULL) 181671943c3dSKonstantin Belousov PRELE(bigproc); 18175663e6deSDavid Greenman bigproc = p; 18185663e6deSDavid Greenman bigsize = size; 181971943c3dSKonstantin Belousov } else { 182071943c3dSKonstantin Belousov PRELE(p); 182171943c3dSKonstantin Belousov } 18225663e6deSDavid Greenman } 18231005a129SJohn Baldwin sx_sunlock(&allproc_lock); 18245663e6deSDavid Greenman if (bigproc != NULL) { 18258311a2b8SWill Andrews if (vm_panic_on_oom != 0) 18268311a2b8SWill Andrews panic("out of swap space"); 182771943c3dSKonstantin Belousov PROC_LOCK(bigproc); 1828729b1e51SDavid Greenman killproc(bigproc, "out of swap space"); 1829fa885116SJulian Elischer sched_nice(bigproc, PRIO_MIN); 183071943c3dSKonstantin Belousov _PRELE(bigproc); 18311c58e4e5SJohn Baldwin PROC_UNLOCK(bigproc); 18325663e6deSDavid Greenman } 18335663e6deSDavid Greenman } 183426f9a767SRodney W. Grimes 1835b50a4ea6SMark Johnston static bool 1836b50a4ea6SMark Johnston vm_pageout_lowmem(void) 183749a3710cSMark Johnston { 1838b50a4ea6SMark Johnston static int lowmem_ticks = 0; 1839b50a4ea6SMark Johnston int last; 184049a3710cSMark Johnston 1841b50a4ea6SMark Johnston last = atomic_load_int(&lowmem_ticks); 1842b50a4ea6SMark Johnston while ((u_int)(ticks - last) / hz >= lowmem_period) { 1843b50a4ea6SMark Johnston if (atomic_fcmpset_int(&lowmem_ticks, &last, ticks) == 0) 1844b50a4ea6SMark Johnston continue; 1845b50a4ea6SMark Johnston 184649a3710cSMark Johnston /* 184749a3710cSMark Johnston * Decrease registered cache sizes. 184849a3710cSMark Johnston */ 184949a3710cSMark Johnston SDT_PROBE0(vm, , , vm__lowmem_scan); 185049a3710cSMark Johnston EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES); 185149a3710cSMark Johnston 185249a3710cSMark Johnston /* 185349a3710cSMark Johnston * We do this explicitly after the caches have been 185449a3710cSMark Johnston * drained above. 185549a3710cSMark Johnston */ 185649a3710cSMark Johnston uma_reclaim(); 1857b50a4ea6SMark Johnston return (true); 185849a3710cSMark Johnston } 1859b50a4ea6SMark Johnston return (false); 186049a3710cSMark Johnston } 186149a3710cSMark Johnston 186249a3710cSMark Johnston static void 1863449c2e92SKonstantin Belousov vm_pageout_worker(void *arg) 1864449c2e92SKonstantin Belousov { 1865e2068d0bSJeff Roberson struct vm_domain *vmd; 1866b50a4ea6SMark Johnston u_int ofree; 186749a3710cSMark Johnston int addl_shortage, domain, shortage; 1868e57dd910SAlan Cox bool target_met; 1869449c2e92SKonstantin Belousov 1870e2068d0bSJeff Roberson domain = (uintptr_t)arg; 1871e2068d0bSJeff Roberson vmd = VM_DOMAIN(domain); 18725f8cd1c0SJeff Roberson shortage = 0; 1873e57dd910SAlan Cox target_met = true; 1874449c2e92SKonstantin Belousov 1875449c2e92SKonstantin Belousov /* 1876949c9186SKonstantin Belousov * XXXKIB It could be useful to bind pageout daemon threads to 1877949c9186SKonstantin Belousov * the cores belonging to the domain, from which vm_page_array 1878949c9186SKonstantin Belousov * is allocated. 1879449c2e92SKonstantin Belousov */ 1880449c2e92SKonstantin Belousov 1881e2068d0bSJeff Roberson KASSERT(vmd->vmd_segs != 0, ("domain without segments")); 1882e2068d0bSJeff Roberson vmd->vmd_last_active_scan = ticks; 1883449c2e92SKonstantin Belousov 1884449c2e92SKonstantin Belousov /* 1885449c2e92SKonstantin Belousov * The pageout daemon worker is never done, so loop forever. 1886449c2e92SKonstantin Belousov */ 1887449c2e92SKonstantin Belousov while (TRUE) { 188830fbfddaSJeff Roberson vm_domain_pageout_lock(vmd); 188949a3710cSMark Johnston 189030fbfddaSJeff Roberson /* 189130fbfddaSJeff Roberson * We need to clear wanted before we check the limits. This 189230fbfddaSJeff Roberson * prevents races with wakers who will check wanted after they 189330fbfddaSJeff Roberson * reach the limit. 189430fbfddaSJeff Roberson */ 189530fbfddaSJeff Roberson atomic_store_int(&vmd->vmd_pageout_wanted, 0); 189656ce0690SAlan Cox 189756ce0690SAlan Cox /* 18985f8cd1c0SJeff Roberson * Might the page daemon need to run again? 1899449c2e92SKonstantin Belousov */ 19005f8cd1c0SJeff Roberson if (vm_paging_needed(vmd, vmd->vmd_free_count)) { 190156ce0690SAlan Cox /* 190249a3710cSMark Johnston * Yes. If the scan failed to produce enough free 190349a3710cSMark Johnston * pages, sleep uninterruptibly for some time in the 190449a3710cSMark Johnston * hope that the laundry thread will clean some pages. 190556ce0690SAlan Cox */ 190630fbfddaSJeff Roberson vm_domain_pageout_unlock(vmd); 190749a3710cSMark Johnston if (!target_met) 19086eebec83SMark Johnston pause("pwait", hz / VM_INACT_SCAN_RATE); 1909449c2e92SKonstantin Belousov } else { 1910449c2e92SKonstantin Belousov /* 19115f8cd1c0SJeff Roberson * No, sleep until the next wakeup or until pages 19125f8cd1c0SJeff Roberson * need to have their reference stats updated. 1913449c2e92SKonstantin Belousov */ 19142c0f13aaSKonstantin Belousov if (mtx_sleep(&vmd->vmd_pageout_wanted, 191530fbfddaSJeff Roberson vm_domain_pageout_lockptr(vmd), PDROP | PVM, 19165f8cd1c0SJeff Roberson "psleep", hz / VM_INACT_SCAN_RATE) == 0) 191783c9dea1SGleb Smirnoff VM_CNT_INC(v_pdwakeups); 191856ce0690SAlan Cox } 1919be37ee79SMark Johnston 192030fbfddaSJeff Roberson /* Prevent spurious wakeups by ensuring that wanted is set. */ 192130fbfddaSJeff Roberson atomic_store_int(&vmd->vmd_pageout_wanted, 1); 192230fbfddaSJeff Roberson 192330fbfddaSJeff Roberson /* 192430fbfddaSJeff Roberson * Use the controller to calculate how many pages to free in 1925b50a4ea6SMark Johnston * this interval, and scan the inactive queue. If the lowmem 1926b50a4ea6SMark Johnston * handlers appear to have freed up some pages, subtract the 1927b50a4ea6SMark Johnston * difference from the inactive queue scan target. 192830fbfddaSJeff Roberson */ 19295f8cd1c0SJeff Roberson shortage = pidctrl_daemon(&vmd->vmd_pid, vmd->vmd_free_count); 193049a3710cSMark Johnston if (shortage > 0) { 1931b50a4ea6SMark Johnston ofree = vmd->vmd_free_count; 1932b50a4ea6SMark Johnston if (vm_pageout_lowmem() && vmd->vmd_free_count > ofree) 1933b50a4ea6SMark Johnston shortage -= min(vmd->vmd_free_count - ofree, 1934b50a4ea6SMark Johnston (u_int)shortage); 193549a3710cSMark Johnston target_met = vm_pageout_scan_inactive(vmd, shortage, 1936be37ee79SMark Johnston &addl_shortage); 193749a3710cSMark Johnston } else 193849a3710cSMark Johnston addl_shortage = 0; 193956ce0690SAlan Cox 1940be37ee79SMark Johnston /* 1941be37ee79SMark Johnston * Scan the active queue. A positive value for shortage 1942be37ee79SMark Johnston * indicates that we must aggressively deactivate pages to avoid 1943be37ee79SMark Johnston * a shortfall. 1944be37ee79SMark Johnston */ 19457bb4634eSMark Johnston shortage = vm_pageout_active_target(vmd) + addl_shortage; 1946be37ee79SMark Johnston vm_pageout_scan_active(vmd, shortage); 1947449c2e92SKonstantin Belousov } 1948449c2e92SKonstantin Belousov } 1949449c2e92SKonstantin Belousov 1950df8bae1dSRodney W. Grimes /* 19514d19f4adSSteven Hartland * vm_pageout_init initialises basic pageout daemon settings. 1952df8bae1dSRodney W. Grimes */ 19532b14f991SJulian Elischer static void 1954e2068d0bSJeff Roberson vm_pageout_init_domain(int domain) 1955df8bae1dSRodney W. Grimes { 1956e2068d0bSJeff Roberson struct vm_domain *vmd; 19575f8cd1c0SJeff Roberson struct sysctl_oid *oid; 1958e2068d0bSJeff Roberson 1959e2068d0bSJeff Roberson vmd = VM_DOMAIN(domain); 1960e2068d0bSJeff Roberson vmd->vmd_interrupt_free_min = 2; 1961f6b04d2bSDavid Greenman 196245ae1d91SAlan Cox /* 196345ae1d91SAlan Cox * v_free_reserved needs to include enough for the largest 196445ae1d91SAlan Cox * swap pager structures plus enough for any pv_entry structs 196545ae1d91SAlan Cox * when paging. 196645ae1d91SAlan Cox */ 1967e2068d0bSJeff Roberson if (vmd->vmd_page_count > 1024) 1968e2068d0bSJeff Roberson vmd->vmd_free_min = 4 + (vmd->vmd_page_count - 1024) / 200; 19692feb50bfSAttilio Rao else 1970e2068d0bSJeff Roberson vmd->vmd_free_min = 4; 1971e2068d0bSJeff Roberson vmd->vmd_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1972e2068d0bSJeff Roberson vmd->vmd_interrupt_free_min; 1973e2068d0bSJeff Roberson vmd->vmd_free_reserved = vm_pageout_page_count + 1974e2068d0bSJeff Roberson vmd->vmd_pageout_free_min + (vmd->vmd_page_count / 768); 1975e2068d0bSJeff Roberson vmd->vmd_free_severe = vmd->vmd_free_min / 2; 1976e2068d0bSJeff Roberson vmd->vmd_free_target = 4 * vmd->vmd_free_min + vmd->vmd_free_reserved; 1977e2068d0bSJeff Roberson vmd->vmd_free_min += vmd->vmd_free_reserved; 1978e2068d0bSJeff Roberson vmd->vmd_free_severe += vmd->vmd_free_reserved; 1979e2068d0bSJeff Roberson vmd->vmd_inactive_target = (3 * vmd->vmd_free_target) / 2; 1980e2068d0bSJeff Roberson if (vmd->vmd_inactive_target > vmd->vmd_free_count / 3) 1981e2068d0bSJeff Roberson vmd->vmd_inactive_target = vmd->vmd_free_count / 3; 1982df8bae1dSRodney W. Grimes 1983d9e23210SJeff Roberson /* 19845f8cd1c0SJeff Roberson * Set the default wakeup threshold to be 10% below the paging 19855f8cd1c0SJeff Roberson * target. This keeps the steady state out of shortfall. 1986d9e23210SJeff Roberson */ 19875f8cd1c0SJeff Roberson vmd->vmd_pageout_wakeup_thresh = (vmd->vmd_free_target / 10) * 9; 1988e2068d0bSJeff Roberson 1989e2068d0bSJeff Roberson /* 1990e2068d0bSJeff Roberson * Target amount of memory to move out of the laundry queue during a 1991e2068d0bSJeff Roberson * background laundering. This is proportional to the amount of system 1992e2068d0bSJeff Roberson * memory. 1993e2068d0bSJeff Roberson */ 1994e2068d0bSJeff Roberson vmd->vmd_background_launder_target = (vmd->vmd_free_target - 1995e2068d0bSJeff Roberson vmd->vmd_free_min) / 10; 19965f8cd1c0SJeff Roberson 19975f8cd1c0SJeff Roberson /* Initialize the pageout daemon pid controller. */ 19985f8cd1c0SJeff Roberson pidctrl_init(&vmd->vmd_pid, hz / VM_INACT_SCAN_RATE, 19995f8cd1c0SJeff Roberson vmd->vmd_free_target, PIDCTRL_BOUND, 20005f8cd1c0SJeff Roberson PIDCTRL_KPD, PIDCTRL_KID, PIDCTRL_KDD); 20015f8cd1c0SJeff Roberson oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(vmd->vmd_oid), OID_AUTO, 20025f8cd1c0SJeff Roberson "pidctrl", CTLFLAG_RD, NULL, ""); 20035f8cd1c0SJeff Roberson pidctrl_init_sysctl(&vmd->vmd_pid, SYSCTL_CHILDREN(oid)); 2004e2068d0bSJeff Roberson } 2005e2068d0bSJeff Roberson 2006e2068d0bSJeff Roberson static void 2007e2068d0bSJeff Roberson vm_pageout_init(void) 2008e2068d0bSJeff Roberson { 2009e2068d0bSJeff Roberson u_int freecount; 2010e2068d0bSJeff Roberson int i; 2011e2068d0bSJeff Roberson 2012e2068d0bSJeff Roberson /* 2013e2068d0bSJeff Roberson * Initialize some paging parameters. 2014e2068d0bSJeff Roberson */ 2015e2068d0bSJeff Roberson if (vm_cnt.v_page_count < 2000) 2016e2068d0bSJeff Roberson vm_pageout_page_count = 8; 2017e2068d0bSJeff Roberson 2018e2068d0bSJeff Roberson freecount = 0; 2019e2068d0bSJeff Roberson for (i = 0; i < vm_ndomains; i++) { 2020e2068d0bSJeff Roberson struct vm_domain *vmd; 2021e2068d0bSJeff Roberson 2022e2068d0bSJeff Roberson vm_pageout_init_domain(i); 2023e2068d0bSJeff Roberson vmd = VM_DOMAIN(i); 2024e2068d0bSJeff Roberson vm_cnt.v_free_reserved += vmd->vmd_free_reserved; 2025e2068d0bSJeff Roberson vm_cnt.v_free_target += vmd->vmd_free_target; 2026e2068d0bSJeff Roberson vm_cnt.v_free_min += vmd->vmd_free_min; 2027e2068d0bSJeff Roberson vm_cnt.v_inactive_target += vmd->vmd_inactive_target; 2028e2068d0bSJeff Roberson vm_cnt.v_pageout_free_min += vmd->vmd_pageout_free_min; 2029e2068d0bSJeff Roberson vm_cnt.v_interrupt_free_min += vmd->vmd_interrupt_free_min; 2030e2068d0bSJeff Roberson vm_cnt.v_free_severe += vmd->vmd_free_severe; 2031e2068d0bSJeff Roberson freecount += vmd->vmd_free_count; 2032e2068d0bSJeff Roberson } 2033d9e23210SJeff Roberson 2034d9e23210SJeff Roberson /* 2035d9e23210SJeff Roberson * Set interval in seconds for active scan. We want to visit each 2036c9612b2dSJeff Roberson * page at least once every ten minutes. This is to prevent worst 2037c9612b2dSJeff Roberson * case paging behaviors with stale active LRU. 2038d9e23210SJeff Roberson */ 2039d9e23210SJeff Roberson if (vm_pageout_update_period == 0) 2040c9612b2dSJeff Roberson vm_pageout_update_period = 600; 2041d9e23210SJeff Roberson 2042df8bae1dSRodney W. Grimes if (vm_page_max_wired == 0) 2043e2068d0bSJeff Roberson vm_page_max_wired = freecount / 3; 20444d19f4adSSteven Hartland } 20454d19f4adSSteven Hartland 20464d19f4adSSteven Hartland /* 20474d19f4adSSteven Hartland * vm_pageout is the high level pageout daemon. 20484d19f4adSSteven Hartland */ 20494d19f4adSSteven Hartland static void 20504d19f4adSSteven Hartland vm_pageout(void) 20514d19f4adSSteven Hartland { 2052920239efSMark Johnston struct proc *p; 2053920239efSMark Johnston struct thread *td; 2054920239efSMark Johnston int error, first, i; 2055920239efSMark Johnston 2056920239efSMark Johnston p = curproc; 2057920239efSMark Johnston td = curthread; 2058df8bae1dSRodney W. Grimes 205924a1cce3SDavid Greenman swap_pager_swap_init(); 2060920239efSMark Johnston for (first = -1, i = 0; i < vm_ndomains; i++) { 206130c5525bSAndrew Gallatin if (VM_DOMAIN_EMPTY(i)) { 206230c5525bSAndrew Gallatin if (bootverbose) 206330c5525bSAndrew Gallatin printf("domain %d empty; skipping pageout\n", 206430c5525bSAndrew Gallatin i); 206530c5525bSAndrew Gallatin continue; 206630c5525bSAndrew Gallatin } 2067920239efSMark Johnston if (first == -1) 2068920239efSMark Johnston first = i; 2069920239efSMark Johnston else { 2070920239efSMark Johnston error = kthread_add(vm_pageout_worker, 2071920239efSMark Johnston (void *)(uintptr_t)i, p, NULL, 0, 0, "dom%d", i); 2072920239efSMark Johnston if (error != 0) 2073920239efSMark Johnston panic("starting pageout for domain %d: %d\n", 2074449c2e92SKonstantin Belousov i, error); 2075dc2efb27SJohn Dyson } 2076e2068d0bSJeff Roberson error = kthread_add(vm_pageout_laundry_worker, 2077920239efSMark Johnston (void *)(uintptr_t)i, p, NULL, 0, 0, "laundry: dom%d", i); 2078e2068d0bSJeff Roberson if (error != 0) 2079920239efSMark Johnston panic("starting laundry for domain %d: %d", i, error); 2080f919ebdeSDavid Greenman } 2081920239efSMark Johnston error = kthread_add(uma_reclaim_worker, NULL, p, NULL, 0, 0, "uma"); 208244ec2b63SKonstantin Belousov if (error != 0) 208344ec2b63SKonstantin Belousov panic("starting uma_reclaim helper, error %d\n", error); 2084920239efSMark Johnston 2085920239efSMark Johnston snprintf(td->td_name, sizeof(td->td_name), "dom%d", first); 2086920239efSMark Johnston vm_pageout_worker((void *)(uintptr_t)first); 2087df8bae1dSRodney W. Grimes } 208826f9a767SRodney W. Grimes 20896b4b77adSAlan Cox /* 2090280d15cdSMark Johnston * Perform an advisory wakeup of the page daemon. 20916b4b77adSAlan Cox */ 2092e0c5a895SJohn Dyson void 2093e2068d0bSJeff Roberson pagedaemon_wakeup(int domain) 2094e0c5a895SJohn Dyson { 2095e2068d0bSJeff Roberson struct vm_domain *vmd; 2096a1c0a785SAlan Cox 2097e2068d0bSJeff Roberson vmd = VM_DOMAIN(domain); 209830fbfddaSJeff Roberson vm_domain_pageout_assert_unlocked(vmd); 209930fbfddaSJeff Roberson if (curproc == pageproc) 210030fbfddaSJeff Roberson return; 2101280d15cdSMark Johnston 210230fbfddaSJeff Roberson if (atomic_fetchadd_int(&vmd->vmd_pageout_wanted, 1) == 0) { 210330fbfddaSJeff Roberson vm_domain_pageout_lock(vmd); 210430fbfddaSJeff Roberson atomic_store_int(&vmd->vmd_pageout_wanted, 1); 2105e2068d0bSJeff Roberson wakeup(&vmd->vmd_pageout_wanted); 210630fbfddaSJeff Roberson vm_domain_pageout_unlock(vmd); 2107e0c5a895SJohn Dyson } 2108e0c5a895SJohn Dyson } 2109