160727d8bSWarner Losh /*- 2796df753SPedro F. Giffuni * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3df57947fSPedro F. Giffuni * 426f9a767SRodney W. Grimes * Copyright (c) 1991 Regents of the University of California. 526f9a767SRodney W. Grimes * All rights reserved. 626f9a767SRodney W. Grimes * Copyright (c) 1994 John S. Dyson 726f9a767SRodney W. Grimes * All rights reserved. 826f9a767SRodney W. Grimes * Copyright (c) 1994 David Greenman 926f9a767SRodney W. Grimes * All rights reserved. 108dbca793STor Egge * Copyright (c) 2005 Yahoo! Technologies Norway AS 118dbca793STor Egge * All rights reserved. 12df8bae1dSRodney W. Grimes * 13df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 14df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 15df8bae1dSRodney W. Grimes * 16df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 17df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 18df8bae1dSRodney W. Grimes * are met: 19df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 20df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 21df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 22df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 23df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 24df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 255929bcfaSPhilippe Charnier * must display the following acknowledgement: 26df8bae1dSRodney W. Grimes * This product includes software developed by the University of 27df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 28df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 29df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 30df8bae1dSRodney W. Grimes * without specific prior written permission. 31df8bae1dSRodney W. Grimes * 32df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 33df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 34df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 35df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 36df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 37df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 38df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 39df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 40df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 41df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 42df8bae1dSRodney W. Grimes * SUCH DAMAGE. 43df8bae1dSRodney W. Grimes * 443c4dd356SDavid Greenman * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 45df8bae1dSRodney W. Grimes * 46df8bae1dSRodney W. Grimes * 47df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 48df8bae1dSRodney W. Grimes * All rights reserved. 49df8bae1dSRodney W. Grimes * 50df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 51df8bae1dSRodney W. Grimes * 52df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 53df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 54df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 55df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 56df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 57df8bae1dSRodney W. Grimes * 58df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 59df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 60df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 61df8bae1dSRodney W. Grimes * 62df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 63df8bae1dSRodney W. Grimes * 64df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 65df8bae1dSRodney W. Grimes * School of Computer Science 66df8bae1dSRodney W. Grimes * Carnegie Mellon University 67df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 68df8bae1dSRodney W. Grimes * 69df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 70df8bae1dSRodney W. Grimes * rights to redistribute these changes. 71df8bae1dSRodney W. Grimes */ 72df8bae1dSRodney W. Grimes 73df8bae1dSRodney W. Grimes /* 74df8bae1dSRodney W. Grimes * The proverbial page-out daemon. 75df8bae1dSRodney W. Grimes */ 76df8bae1dSRodney W. Grimes 77874651b1SDavid E. O'Brien #include <sys/cdefs.h> 78874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 79874651b1SDavid E. O'Brien 80faa5f8d8SAndrzej Bialecki #include "opt_vm.h" 817672ca05SMark Johnston 82df8bae1dSRodney W. Grimes #include <sys/param.h> 8326f9a767SRodney W. Grimes #include <sys/systm.h> 84b5e8ce9fSBruce Evans #include <sys/kernel.h> 85855a310fSJeff Roberson #include <sys/eventhandler.h> 86fb919e4dSMark Murray #include <sys/lock.h> 87fb919e4dSMark Murray #include <sys/mutex.h> 8826f9a767SRodney W. Grimes #include <sys/proc.h> 899c8b8baaSPeter Wemm #include <sys/kthread.h> 900384fff8SJason Evans #include <sys/ktr.h> 9197824da3SAlan Cox #include <sys/mount.h> 92099e7e95SEdward Tomasz Napierala #include <sys/racct.h> 9326f9a767SRodney W. Grimes #include <sys/resourcevar.h> 94b43179fbSJeff Roberson #include <sys/sched.h> 9514a0d74eSSteven Hartland #include <sys/sdt.h> 96d2fc5315SPoul-Henning Kamp #include <sys/signalvar.h> 97449c2e92SKonstantin Belousov #include <sys/smp.h> 98a6bf3a9eSRyan Stone #include <sys/time.h> 99f6b04d2bSDavid Greenman #include <sys/vnode.h> 100efeaf95aSDavid Greenman #include <sys/vmmeter.h> 10189f6b863SAttilio Rao #include <sys/rwlock.h> 1021005a129SJohn Baldwin #include <sys/sx.h> 10338efa82bSJohn Dyson #include <sys/sysctl.h> 104df8bae1dSRodney W. Grimes 105df8bae1dSRodney W. Grimes #include <vm/vm.h> 106efeaf95aSDavid Greenman #include <vm/vm_param.h> 107efeaf95aSDavid Greenman #include <vm/vm_object.h> 108df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 109efeaf95aSDavid Greenman #include <vm/vm_map.h> 110df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h> 11124a1cce3SDavid Greenman #include <vm/vm_pager.h> 112449c2e92SKonstantin Belousov #include <vm/vm_phys.h> 113e2068d0bSJeff Roberson #include <vm/vm_pagequeue.h> 11405f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h> 115efeaf95aSDavid Greenman #include <vm/vm_extern.h> 116670d17b5SJeff Roberson #include <vm/uma.h> 117df8bae1dSRodney W. Grimes 1182b14f991SJulian Elischer /* 1192b14f991SJulian Elischer * System initialization 1202b14f991SJulian Elischer */ 1212b14f991SJulian Elischer 1222b14f991SJulian Elischer /* the kernel process "vm_pageout"*/ 12311caded3SAlfred Perlstein static void vm_pageout(void); 1244d19f4adSSteven Hartland static void vm_pageout_init(void); 125ebcddc72SAlan Cox static int vm_pageout_clean(vm_page_t m, int *numpagedout); 12634d8b7eaSJeff Roberson static int vm_pageout_cluster(vm_page_t m); 1275f8cd1c0SJeff Roberson static bool vm_pageout_scan(struct vm_domain *vmd, int pass, int shortage); 12876386c7eSKonstantin Belousov static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage, 12976386c7eSKonstantin Belousov int starting_page_shortage); 13045ae1d91SAlan Cox 1314d19f4adSSteven Hartland SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init, 1324d19f4adSSteven Hartland NULL); 1334d19f4adSSteven Hartland 1342b14f991SJulian Elischer struct proc *pageproc; 1352b14f991SJulian Elischer 1362b14f991SJulian Elischer static struct kproc_desc page_kp = { 1372b14f991SJulian Elischer "pagedaemon", 1382b14f991SJulian Elischer vm_pageout, 1392b14f991SJulian Elischer &pageproc 1402b14f991SJulian Elischer }; 1414d19f4adSSteven Hartland SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start, 142237fdd78SRobert Watson &page_kp); 1432b14f991SJulian Elischer 14414a0d74eSSteven Hartland SDT_PROVIDER_DEFINE(vm); 14514a0d74eSSteven Hartland SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan); 14614a0d74eSSteven Hartland 147ebcddc72SAlan Cox /* Pagedaemon activity rates, in subdivisions of one second. */ 148ebcddc72SAlan Cox #define VM_LAUNDER_RATE 10 1495f8cd1c0SJeff Roberson #define VM_INACT_SCAN_RATE 10 1502b14f991SJulian Elischer 15176386c7eSKonstantin Belousov static int vm_pageout_oom_seq = 12; 152ebcddc72SAlan Cox 153d9e23210SJeff Roberson static int vm_pageout_update_period; 1544a365329SAndrey Zonov static int disable_swap_pageouts; 155c9612b2dSJeff Roberson static int lowmem_period = 10; 156a6bf3a9eSRyan Stone static time_t lowmem_uptime; 157b1fd102eSMark Johnston static int swapdev_enabled; 15870111b90SJohn Dyson 1598311a2b8SWill Andrews static int vm_panic_on_oom = 0; 1608311a2b8SWill Andrews 1618311a2b8SWill Andrews SYSCTL_INT(_vm, OID_AUTO, panic_on_oom, 1628311a2b8SWill Andrews CTLFLAG_RWTUN, &vm_panic_on_oom, 0, 1638311a2b8SWill Andrews "panic on out of memory instead of killing the largest process"); 1648311a2b8SWill Andrews 165d9e23210SJeff Roberson SYSCTL_INT(_vm, OID_AUTO, pageout_update_period, 166e0b2fc3aSMark Johnston CTLFLAG_RWTUN, &vm_pageout_update_period, 0, 167d9e23210SJeff Roberson "Maximum active LRU update period"); 16853636869SAndrey Zonov 169e0b2fc3aSMark Johnston SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RWTUN, &lowmem_period, 0, 170c9612b2dSJeff Roberson "Low memory callback period"); 171c9612b2dSJeff Roberson 172ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 173e0b2fc3aSMark Johnston CTLFLAG_RWTUN, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 17412ac6a1dSJohn Dyson 17523b59018SMatthew Dillon static int pageout_lock_miss; 17623b59018SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 17723b59018SMatthew Dillon CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 17823b59018SMatthew Dillon 17976386c7eSKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq, 180e0b2fc3aSMark Johnston CTLFLAG_RWTUN, &vm_pageout_oom_seq, 0, 18176386c7eSKonstantin Belousov "back-to-back calls to oom detector to start OOM"); 18276386c7eSKonstantin Belousov 183ebcddc72SAlan Cox static int act_scan_laundry_weight = 3; 184e0b2fc3aSMark Johnston SYSCTL_INT(_vm, OID_AUTO, act_scan_laundry_weight, CTLFLAG_RWTUN, 185ebcddc72SAlan Cox &act_scan_laundry_weight, 0, 186ebcddc72SAlan Cox "weight given to clean vs. dirty pages in active queue scans"); 187ebcddc72SAlan Cox 188ebcddc72SAlan Cox static u_int vm_background_launder_rate = 4096; 189e0b2fc3aSMark Johnston SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RWTUN, 190ebcddc72SAlan Cox &vm_background_launder_rate, 0, 191ebcddc72SAlan Cox "background laundering rate, in kilobytes per second"); 192ebcddc72SAlan Cox 193ebcddc72SAlan Cox static u_int vm_background_launder_max = 20 * 1024; 194e0b2fc3aSMark Johnston SYSCTL_UINT(_vm, OID_AUTO, background_launder_max, CTLFLAG_RWTUN, 195ebcddc72SAlan Cox &vm_background_launder_max, 0, "background laundering cap, in kilobytes"); 196ebcddc72SAlan Cox 197e2241590SAlan Cox int vm_pageout_page_count = 32; 198df8bae1dSRodney W. Grimes 199c3cb3e12SDavid Greenman int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 2005dfc2870SAlan Cox SYSCTL_INT(_vm, OID_AUTO, max_wired, 2015dfc2870SAlan Cox CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count"); 202df8bae1dSRodney W. Grimes 203ebcddc72SAlan Cox static u_int isqrt(u_int num); 204ebcddc72SAlan Cox static int vm_pageout_launder(struct vm_domain *vmd, int launder, 205ebcddc72SAlan Cox bool in_shortfall); 206ebcddc72SAlan Cox static void vm_pageout_laundry_worker(void *arg); 207cd41fc12SDavid Greenman 2085cd29d0fSMark Johnston struct scan_state { 2095cd29d0fSMark Johnston struct vm_batchqueue bq; 2108d220203SAlan Cox struct vm_pagequeue *pq; 2115cd29d0fSMark Johnston vm_page_t marker; 2125cd29d0fSMark Johnston int maxscan; 2135cd29d0fSMark Johnston int scanned; 2145cd29d0fSMark Johnston }; 2158dbca793STor Egge 2165cd29d0fSMark Johnston static void 2175cd29d0fSMark Johnston vm_pageout_init_scan(struct scan_state *ss, struct vm_pagequeue *pq, 2185cd29d0fSMark Johnston vm_page_t marker, vm_page_t after, int maxscan) 2195cd29d0fSMark Johnston { 2208dbca793STor Egge 2215cd29d0fSMark Johnston vm_pagequeue_assert_locked(pq); 2225cd29d0fSMark Johnston KASSERT((marker->aflags & PGA_ENQUEUED) == 0, 2235cd29d0fSMark Johnston ("marker %p already enqueued", marker)); 2245cd29d0fSMark Johnston 2255cd29d0fSMark Johnston if (after == NULL) 2265cd29d0fSMark Johnston TAILQ_INSERT_HEAD(&pq->pq_pl, marker, plinks.q); 2275cd29d0fSMark Johnston else 2285cd29d0fSMark Johnston TAILQ_INSERT_AFTER(&pq->pq_pl, after, marker, plinks.q); 2295cd29d0fSMark Johnston vm_page_aflag_set(marker, PGA_ENQUEUED); 2305cd29d0fSMark Johnston 2315cd29d0fSMark Johnston vm_batchqueue_init(&ss->bq); 2325cd29d0fSMark Johnston ss->pq = pq; 2335cd29d0fSMark Johnston ss->marker = marker; 2345cd29d0fSMark Johnston ss->maxscan = maxscan; 2355cd29d0fSMark Johnston ss->scanned = 0; 2368d220203SAlan Cox vm_pagequeue_unlock(pq); 2375cd29d0fSMark Johnston } 2388dbca793STor Egge 2395cd29d0fSMark Johnston static void 2405cd29d0fSMark Johnston vm_pageout_end_scan(struct scan_state *ss) 2415cd29d0fSMark Johnston { 2425cd29d0fSMark Johnston struct vm_pagequeue *pq; 2435cd29d0fSMark Johnston 2445cd29d0fSMark Johnston pq = ss->pq; 2455cd29d0fSMark Johnston vm_pagequeue_assert_locked(pq); 2465cd29d0fSMark Johnston KASSERT((ss->marker->aflags & PGA_ENQUEUED) != 0, 2475cd29d0fSMark Johnston ("marker %p not enqueued", ss->marker)); 2485cd29d0fSMark Johnston 2495cd29d0fSMark Johnston TAILQ_REMOVE(&pq->pq_pl, ss->marker, plinks.q); 2505cd29d0fSMark Johnston vm_page_aflag_clear(ss->marker, PGA_ENQUEUED); 2515cd29d0fSMark Johnston VM_CNT_ADD(v_pdpages, ss->scanned); 2528dbca793STor Egge } 2538dbca793STor Egge 2548dbca793STor Egge /* 2555cd29d0fSMark Johnston * Add a small number of queued pages to a batch queue for later processing 2565cd29d0fSMark Johnston * without the corresponding queue lock held. The caller must have enqueued a 2575cd29d0fSMark Johnston * marker page at the desired start point for the scan. Pages will be 2585cd29d0fSMark Johnston * physically dequeued if the caller so requests. Otherwise, the returned 2595cd29d0fSMark Johnston * batch may contain marker pages, and it is up to the caller to handle them. 2605cd29d0fSMark Johnston * 26136f8fe9bSMark Johnston * When processing the batch queue, vm_page_queue() must be used to 26236f8fe9bSMark Johnston * determine whether the page has been logically dequeued by another thread. 26336f8fe9bSMark Johnston * Once this check is performed, the page lock guarantees that the page will 26436f8fe9bSMark Johnston * not be disassociated from the queue. 2655cd29d0fSMark Johnston */ 2665cd29d0fSMark Johnston static __always_inline void 2675cd29d0fSMark Johnston vm_pageout_collect_batch(struct scan_state *ss, const bool dequeue) 2685cd29d0fSMark Johnston { 2698d220203SAlan Cox struct vm_pagequeue *pq; 2705cd29d0fSMark Johnston vm_page_t m, marker; 2718c616246SKonstantin Belousov 2725cd29d0fSMark Johnston marker = ss->marker; 2735cd29d0fSMark Johnston pq = ss->pq; 2748c616246SKonstantin Belousov 2755cd29d0fSMark Johnston KASSERT((marker->aflags & PGA_ENQUEUED) != 0, 2765cd29d0fSMark Johnston ("marker %p not enqueued", ss->marker)); 2778c616246SKonstantin Belousov 2788d220203SAlan Cox vm_pagequeue_lock(pq); 2795cd29d0fSMark Johnston for (m = TAILQ_NEXT(marker, plinks.q); m != NULL && 2805cd29d0fSMark Johnston ss->scanned < ss->maxscan && ss->bq.bq_cnt < VM_BATCHQUEUE_SIZE; 2815cd29d0fSMark Johnston m = TAILQ_NEXT(m, plinks.q), ss->scanned++) { 2825cd29d0fSMark Johnston if ((m->flags & PG_MARKER) == 0) { 2835cd29d0fSMark Johnston KASSERT((m->aflags & PGA_ENQUEUED) != 0, 2845cd29d0fSMark Johnston ("page %p not enqueued", m)); 2855cd29d0fSMark Johnston KASSERT((m->flags & PG_FICTITIOUS) == 0, 2865cd29d0fSMark Johnston ("Fictitious page %p cannot be in page queue", m)); 2875cd29d0fSMark Johnston KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2885cd29d0fSMark Johnston ("Unmanaged page %p cannot be in page queue", m)); 2895cd29d0fSMark Johnston } else if (dequeue) 2905cd29d0fSMark Johnston continue; 2918c616246SKonstantin Belousov 2925cd29d0fSMark Johnston (void)vm_batchqueue_insert(&ss->bq, m); 2935cd29d0fSMark Johnston if (dequeue) { 2945cd29d0fSMark Johnston TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 2955cd29d0fSMark Johnston vm_page_aflag_clear(m, PGA_ENQUEUED); 2965cd29d0fSMark Johnston } 2975cd29d0fSMark Johnston } 2985cd29d0fSMark Johnston TAILQ_REMOVE(&pq->pq_pl, marker, plinks.q); 2995cd29d0fSMark Johnston if (__predict_true(m != NULL)) 3005cd29d0fSMark Johnston TAILQ_INSERT_BEFORE(m, marker, plinks.q); 3015cd29d0fSMark Johnston else 3025cd29d0fSMark Johnston TAILQ_INSERT_TAIL(&pq->pq_pl, marker, plinks.q); 3035cd29d0fSMark Johnston if (dequeue) 3045cd29d0fSMark Johnston vm_pagequeue_cnt_add(pq, -ss->bq.bq_cnt); 3055cd29d0fSMark Johnston vm_pagequeue_unlock(pq); 3065cd29d0fSMark Johnston } 3075cd29d0fSMark Johnston 3085cd29d0fSMark Johnston /* Return the next page to be scanned, or NULL if the scan is complete. */ 3095cd29d0fSMark Johnston static __always_inline vm_page_t 3105cd29d0fSMark Johnston vm_pageout_next(struct scan_state *ss, const bool dequeue) 3115cd29d0fSMark Johnston { 3125cd29d0fSMark Johnston 3135cd29d0fSMark Johnston if (ss->bq.bq_cnt == 0) 3145cd29d0fSMark Johnston vm_pageout_collect_batch(ss, dequeue); 3155cd29d0fSMark Johnston return (vm_batchqueue_pop(&ss->bq)); 3168c616246SKonstantin Belousov } 3178c616246SKonstantin Belousov 3188c616246SKonstantin Belousov /* 319248fe642SAlan Cox * Scan for pages at adjacent offsets within the given page's object that are 320248fe642SAlan Cox * eligible for laundering, form a cluster of these pages and the given page, 321248fe642SAlan Cox * and launder that cluster. 32226f9a767SRodney W. Grimes */ 3233af76890SPoul-Henning Kamp static int 32434d8b7eaSJeff Roberson vm_pageout_cluster(vm_page_t m) 32524a1cce3SDavid Greenman { 32654d92145SMatthew Dillon vm_object_t object; 327248fe642SAlan Cox vm_page_t mc[2 * vm_pageout_page_count], p, pb, ps; 328248fe642SAlan Cox vm_pindex_t pindex; 329248fe642SAlan Cox int ib, is, page_base, pageout_count; 33026f9a767SRodney W. Grimes 331248fe642SAlan Cox vm_page_assert_locked(m); 33217f6a17bSAlan Cox object = m->object; 33389f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 334248fe642SAlan Cox pindex = m->pindex; 3350cddd8f0SMatthew Dillon 336c7aebda8SAttilio Rao vm_page_assert_unbusied(m); 3371d3a1bcfSMark Johnston KASSERT(!vm_page_held(m), ("page %p is held", m)); 338aed9aaaaSMark Johnston 339aed9aaaaSMark Johnston pmap_remove_write(m); 34017f6a17bSAlan Cox vm_page_unlock(m); 3410d94caffSDavid Greenman 34291b4f427SAlan Cox mc[vm_pageout_page_count] = pb = ps = m; 34326f9a767SRodney W. Grimes pageout_count = 1; 344f35329acSJohn Dyson page_base = vm_pageout_page_count; 34590ecac61SMatthew Dillon ib = 1; 34690ecac61SMatthew Dillon is = 1; 34790ecac61SMatthew Dillon 34824a1cce3SDavid Greenman /* 349248fe642SAlan Cox * We can cluster only if the page is not clean, busy, or held, and 350ebcddc72SAlan Cox * the page is in the laundry queue. 35190ecac61SMatthew Dillon * 35290ecac61SMatthew Dillon * During heavy mmap/modification loads the pageout 35390ecac61SMatthew Dillon * daemon can really fragment the underlying file 354248fe642SAlan Cox * due to flushing pages out of order and not trying to 355248fe642SAlan Cox * align the clusters (which leaves sporadic out-of-order 35690ecac61SMatthew Dillon * holes). To solve this problem we do the reverse scan 35790ecac61SMatthew Dillon * first and attempt to align our cluster, then do a 35890ecac61SMatthew Dillon * forward scan if room remains. 35924a1cce3SDavid Greenman */ 36090ecac61SMatthew Dillon more: 361248fe642SAlan Cox while (ib != 0 && pageout_count < vm_pageout_page_count) { 36290ecac61SMatthew Dillon if (ib > pindex) { 36390ecac61SMatthew Dillon ib = 0; 36490ecac61SMatthew Dillon break; 365f6b04d2bSDavid Greenman } 366c7aebda8SAttilio Rao if ((p = vm_page_prev(pb)) == NULL || vm_page_busied(p)) { 36790ecac61SMatthew Dillon ib = 0; 36890ecac61SMatthew Dillon break; 369f6b04d2bSDavid Greenman } 37024a1cce3SDavid Greenman vm_page_test_dirty(p); 3711b5c869dSMark Johnston if (p->dirty == 0) { 372eb5d3969SAlan Cox ib = 0; 373eb5d3969SAlan Cox break; 374eb5d3969SAlan Cox } 375eb5d3969SAlan Cox vm_page_lock(p); 3761b5c869dSMark Johnston if (vm_page_held(p) || !vm_page_in_laundry(p)) { 3772965a453SKip Macy vm_page_unlock(p); 37890ecac61SMatthew Dillon ib = 0; 37924a1cce3SDavid Greenman break; 380f6b04d2bSDavid Greenman } 381aed9aaaaSMark Johnston pmap_remove_write(p); 3822965a453SKip Macy vm_page_unlock(p); 38391b4f427SAlan Cox mc[--page_base] = pb = p; 38490ecac61SMatthew Dillon ++pageout_count; 38590ecac61SMatthew Dillon ++ib; 386248fe642SAlan Cox 38724a1cce3SDavid Greenman /* 388248fe642SAlan Cox * We are at an alignment boundary. Stop here, and switch 389248fe642SAlan Cox * directions. Do not clear ib. 39024a1cce3SDavid Greenman */ 39190ecac61SMatthew Dillon if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 39290ecac61SMatthew Dillon break; 39324a1cce3SDavid Greenman } 39490ecac61SMatthew Dillon while (pageout_count < vm_pageout_page_count && 39590ecac61SMatthew Dillon pindex + is < object->size) { 396c7aebda8SAttilio Rao if ((p = vm_page_next(ps)) == NULL || vm_page_busied(p)) 39790ecac61SMatthew Dillon break; 39824a1cce3SDavid Greenman vm_page_test_dirty(p); 3991b5c869dSMark Johnston if (p->dirty == 0) 400eb5d3969SAlan Cox break; 401eb5d3969SAlan Cox vm_page_lock(p); 4021b5c869dSMark Johnston if (vm_page_held(p) || !vm_page_in_laundry(p)) { 4032965a453SKip Macy vm_page_unlock(p); 40424a1cce3SDavid Greenman break; 40524a1cce3SDavid Greenman } 406aed9aaaaSMark Johnston pmap_remove_write(p); 4072965a453SKip Macy vm_page_unlock(p); 40891b4f427SAlan Cox mc[page_base + pageout_count] = ps = p; 40990ecac61SMatthew Dillon ++pageout_count; 41090ecac61SMatthew Dillon ++is; 41124a1cce3SDavid Greenman } 41290ecac61SMatthew Dillon 41390ecac61SMatthew Dillon /* 41490ecac61SMatthew Dillon * If we exhausted our forward scan, continue with the reverse scan 415248fe642SAlan Cox * when possible, even past an alignment boundary. This catches 416248fe642SAlan Cox * boundary conditions. 41790ecac61SMatthew Dillon */ 418248fe642SAlan Cox if (ib != 0 && pageout_count < vm_pageout_page_count) 41990ecac61SMatthew Dillon goto more; 420f6b04d2bSDavid Greenman 42199e6e193SMark Johnston return (vm_pageout_flush(&mc[page_base], pageout_count, 42299e6e193SMark Johnston VM_PAGER_PUT_NOREUSE, 0, NULL, NULL)); 423aef922f5SJohn Dyson } 424aef922f5SJohn Dyson 4251c7c3c6aSMatthew Dillon /* 4261c7c3c6aSMatthew Dillon * vm_pageout_flush() - launder the given pages 4271c7c3c6aSMatthew Dillon * 4281c7c3c6aSMatthew Dillon * The given pages are laundered. Note that we setup for the start of 4291c7c3c6aSMatthew Dillon * I/O ( i.e. busy the page ), mark it read-only, and bump the object 4301c7c3c6aSMatthew Dillon * reference count all in here rather then in the parent. If we want 4311c7c3c6aSMatthew Dillon * the parent to do more sophisticated things we may have to change 4321c7c3c6aSMatthew Dillon * the ordering. 4331e8a675cSKonstantin Belousov * 4341e8a675cSKonstantin Belousov * Returned runlen is the count of pages between mreq and first 4351e8a675cSKonstantin Belousov * page after mreq with status VM_PAGER_AGAIN. 436126d6082SKonstantin Belousov * *eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL 437126d6082SKonstantin Belousov * for any page in runlen set. 4381c7c3c6aSMatthew Dillon */ 439aef922f5SJohn Dyson int 440126d6082SKonstantin Belousov vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen, 441126d6082SKonstantin Belousov boolean_t *eio) 442aef922f5SJohn Dyson { 4432e3b314dSAlan Cox vm_object_t object = mc[0]->object; 444aef922f5SJohn Dyson int pageout_status[count]; 44595461b45SJohn Dyson int numpagedout = 0; 4461e8a675cSKonstantin Belousov int i, runlen; 447aef922f5SJohn Dyson 44889f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 4497bec141bSKip Macy 4501c7c3c6aSMatthew Dillon /* 451aed9aaaaSMark Johnston * Initiate I/O. Mark the pages busy and verify that they're valid 452aed9aaaaSMark Johnston * and read-only. 4531c7c3c6aSMatthew Dillon * 4541c7c3c6aSMatthew Dillon * We do not have to fixup the clean/dirty bits here... we can 4551c7c3c6aSMatthew Dillon * allow the pager to do it after the I/O completes. 45602fa91d3SMatthew Dillon * 45702fa91d3SMatthew Dillon * NOTE! mc[i]->dirty may be partial or fragmented due to an 45802fa91d3SMatthew Dillon * edge case with file fragments. 4591c7c3c6aSMatthew Dillon */ 4608f9110f6SJohn Dyson for (i = 0; i < count; i++) { 4617a935082SAlan Cox KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, 4627a935082SAlan Cox ("vm_pageout_flush: partially invalid page %p index %d/%d", 4637a935082SAlan Cox mc[i], i, count)); 464aed9aaaaSMark Johnston KASSERT((mc[i]->aflags & PGA_WRITEABLE) == 0, 465aed9aaaaSMark Johnston ("vm_pageout_flush: writeable page %p", mc[i])); 466c7aebda8SAttilio Rao vm_page_sbusy(mc[i]); 4672965a453SKip Macy } 468d474eaaaSDoug Rabson vm_object_pip_add(object, count); 469aef922f5SJohn Dyson 470d076fbeaSAlan Cox vm_pager_put_pages(object, mc, count, flags, pageout_status); 47126f9a767SRodney W. Grimes 4721e8a675cSKonstantin Belousov runlen = count - mreq; 473126d6082SKonstantin Belousov if (eio != NULL) 474126d6082SKonstantin Belousov *eio = FALSE; 475aef922f5SJohn Dyson for (i = 0; i < count; i++) { 476aef922f5SJohn Dyson vm_page_t mt = mc[i]; 47724a1cce3SDavid Greenman 4784cd45723SAlan Cox KASSERT(pageout_status[i] == VM_PAGER_PEND || 4796031c68dSAlan Cox !pmap_page_is_write_mapped(mt), 4809ea8d1a6SAlan Cox ("vm_pageout_flush: page %p is not write protected", mt)); 48126f9a767SRodney W. Grimes switch (pageout_status[i]) { 48226f9a767SRodney W. Grimes case VM_PAGER_OK: 483ebcddc72SAlan Cox vm_page_lock(mt); 484ebcddc72SAlan Cox if (vm_page_in_laundry(mt)) 485ebcddc72SAlan Cox vm_page_deactivate_noreuse(mt); 486ebcddc72SAlan Cox vm_page_unlock(mt); 487ebcddc72SAlan Cox /* FALLTHROUGH */ 48826f9a767SRodney W. Grimes case VM_PAGER_PEND: 48995461b45SJohn Dyson numpagedout++; 49026f9a767SRodney W. Grimes break; 49126f9a767SRodney W. Grimes case VM_PAGER_BAD: 49226f9a767SRodney W. Grimes /* 493ebcddc72SAlan Cox * The page is outside the object's range. We pretend 494ebcddc72SAlan Cox * that the page out worked and clean the page, so the 495ebcddc72SAlan Cox * changes will be lost if the page is reclaimed by 496ebcddc72SAlan Cox * the page daemon. 49726f9a767SRodney W. Grimes */ 49890ecac61SMatthew Dillon vm_page_undirty(mt); 499ebcddc72SAlan Cox vm_page_lock(mt); 500ebcddc72SAlan Cox if (vm_page_in_laundry(mt)) 501ebcddc72SAlan Cox vm_page_deactivate_noreuse(mt); 502ebcddc72SAlan Cox vm_page_unlock(mt); 50326f9a767SRodney W. Grimes break; 50426f9a767SRodney W. Grimes case VM_PAGER_ERROR: 50526f9a767SRodney W. Grimes case VM_PAGER_FAIL: 50626f9a767SRodney W. Grimes /* 507b1fd102eSMark Johnston * If the page couldn't be paged out to swap because the 508b1fd102eSMark Johnston * pager wasn't able to find space, place the page in 509b1fd102eSMark Johnston * the PQ_UNSWAPPABLE holding queue. This is an 510b1fd102eSMark Johnston * optimization that prevents the page daemon from 511b1fd102eSMark Johnston * wasting CPU cycles on pages that cannot be reclaimed 512b1fd102eSMark Johnston * becase no swap device is configured. 513b1fd102eSMark Johnston * 514b1fd102eSMark Johnston * Otherwise, reactivate the page so that it doesn't 515b1fd102eSMark Johnston * clog the laundry and inactive queues. (We will try 516b1fd102eSMark Johnston * paging it out again later.) 51726f9a767SRodney W. Grimes */ 5183c4a2440SAlan Cox vm_page_lock(mt); 519b1fd102eSMark Johnston if (object->type == OBJT_SWAP && 520b1fd102eSMark Johnston pageout_status[i] == VM_PAGER_FAIL) { 521b1fd102eSMark Johnston vm_page_unswappable(mt); 522b1fd102eSMark Johnston numpagedout++; 523b1fd102eSMark Johnston } else 52424a1cce3SDavid Greenman vm_page_activate(mt); 5253c4a2440SAlan Cox vm_page_unlock(mt); 526126d6082SKonstantin Belousov if (eio != NULL && i >= mreq && i - mreq < runlen) 527126d6082SKonstantin Belousov *eio = TRUE; 52826f9a767SRodney W. Grimes break; 52926f9a767SRodney W. Grimes case VM_PAGER_AGAIN: 5301e8a675cSKonstantin Belousov if (i >= mreq && i - mreq < runlen) 5311e8a675cSKonstantin Belousov runlen = i - mreq; 53226f9a767SRodney W. Grimes break; 53326f9a767SRodney W. Grimes } 53426f9a767SRodney W. Grimes 53526f9a767SRodney W. Grimes /* 5360d94caffSDavid Greenman * If the operation is still going, leave the page busy to 5370d94caffSDavid Greenman * block all other accesses. Also, leave the paging in 5380d94caffSDavid Greenman * progress indicator set so that we don't attempt an object 5390d94caffSDavid Greenman * collapse. 54026f9a767SRodney W. Grimes */ 54126f9a767SRodney W. Grimes if (pageout_status[i] != VM_PAGER_PEND) { 542f919ebdeSDavid Greenman vm_object_pip_wakeup(object); 543c7aebda8SAttilio Rao vm_page_sunbusy(mt); 5443c4a2440SAlan Cox } 5453c4a2440SAlan Cox } 5461e8a675cSKonstantin Belousov if (prunlen != NULL) 5471e8a675cSKonstantin Belousov *prunlen = runlen; 5483c4a2440SAlan Cox return (numpagedout); 54926f9a767SRodney W. Grimes } 55026f9a767SRodney W. Grimes 551b1fd102eSMark Johnston static void 552b1fd102eSMark Johnston vm_pageout_swapon(void *arg __unused, struct swdevt *sp __unused) 553b1fd102eSMark Johnston { 554b1fd102eSMark Johnston 555b1fd102eSMark Johnston atomic_store_rel_int(&swapdev_enabled, 1); 556b1fd102eSMark Johnston } 557b1fd102eSMark Johnston 558b1fd102eSMark Johnston static void 559b1fd102eSMark Johnston vm_pageout_swapoff(void *arg __unused, struct swdevt *sp __unused) 560b1fd102eSMark Johnston { 561b1fd102eSMark Johnston 562b1fd102eSMark Johnston if (swap_pager_nswapdev() == 1) 563b1fd102eSMark Johnston atomic_store_rel_int(&swapdev_enabled, 0); 564b1fd102eSMark Johnston } 565b1fd102eSMark Johnston 5661c7c3c6aSMatthew Dillon /* 56734d8b7eaSJeff Roberson * Attempt to acquire all of the necessary locks to launder a page and 56834d8b7eaSJeff Roberson * then call through the clustering layer to PUTPAGES. Wait a short 56934d8b7eaSJeff Roberson * time for a vnode lock. 57034d8b7eaSJeff Roberson * 57134d8b7eaSJeff Roberson * Requires the page and object lock on entry, releases both before return. 57234d8b7eaSJeff Roberson * Returns 0 on success and an errno otherwise. 57334d8b7eaSJeff Roberson */ 57434d8b7eaSJeff Roberson static int 575ebcddc72SAlan Cox vm_pageout_clean(vm_page_t m, int *numpagedout) 57634d8b7eaSJeff Roberson { 57734d8b7eaSJeff Roberson struct vnode *vp; 57834d8b7eaSJeff Roberson struct mount *mp; 57934d8b7eaSJeff Roberson vm_object_t object; 58034d8b7eaSJeff Roberson vm_pindex_t pindex; 58134d8b7eaSJeff Roberson int error, lockmode; 58234d8b7eaSJeff Roberson 58334d8b7eaSJeff Roberson vm_page_assert_locked(m); 58434d8b7eaSJeff Roberson object = m->object; 58534d8b7eaSJeff Roberson VM_OBJECT_ASSERT_WLOCKED(object); 58634d8b7eaSJeff Roberson error = 0; 58734d8b7eaSJeff Roberson vp = NULL; 58834d8b7eaSJeff Roberson mp = NULL; 58934d8b7eaSJeff Roberson 59034d8b7eaSJeff Roberson /* 59134d8b7eaSJeff Roberson * The object is already known NOT to be dead. It 59234d8b7eaSJeff Roberson * is possible for the vget() to block the whole 59334d8b7eaSJeff Roberson * pageout daemon, but the new low-memory handling 59434d8b7eaSJeff Roberson * code should prevent it. 59534d8b7eaSJeff Roberson * 59634d8b7eaSJeff Roberson * We can't wait forever for the vnode lock, we might 59734d8b7eaSJeff Roberson * deadlock due to a vn_read() getting stuck in 59834d8b7eaSJeff Roberson * vm_wait while holding this vnode. We skip the 59934d8b7eaSJeff Roberson * vnode if we can't get it in a reasonable amount 60034d8b7eaSJeff Roberson * of time. 60134d8b7eaSJeff Roberson */ 60234d8b7eaSJeff Roberson if (object->type == OBJT_VNODE) { 60334d8b7eaSJeff Roberson vm_page_unlock(m); 60434d8b7eaSJeff Roberson vp = object->handle; 60534d8b7eaSJeff Roberson if (vp->v_type == VREG && 60634d8b7eaSJeff Roberson vn_start_write(vp, &mp, V_NOWAIT) != 0) { 60734d8b7eaSJeff Roberson mp = NULL; 60834d8b7eaSJeff Roberson error = EDEADLK; 60934d8b7eaSJeff Roberson goto unlock_all; 61034d8b7eaSJeff Roberson } 61134d8b7eaSJeff Roberson KASSERT(mp != NULL, 61234d8b7eaSJeff Roberson ("vp %p with NULL v_mount", vp)); 61334d8b7eaSJeff Roberson vm_object_reference_locked(object); 61434d8b7eaSJeff Roberson pindex = m->pindex; 61534d8b7eaSJeff Roberson VM_OBJECT_WUNLOCK(object); 61634d8b7eaSJeff Roberson lockmode = MNT_SHARED_WRITES(vp->v_mount) ? 61734d8b7eaSJeff Roberson LK_SHARED : LK_EXCLUSIVE; 61834d8b7eaSJeff Roberson if (vget(vp, lockmode | LK_TIMELOCK, curthread)) { 61934d8b7eaSJeff Roberson vp = NULL; 62034d8b7eaSJeff Roberson error = EDEADLK; 62134d8b7eaSJeff Roberson goto unlock_mp; 62234d8b7eaSJeff Roberson } 62334d8b7eaSJeff Roberson VM_OBJECT_WLOCK(object); 62457cd81a3SMark Johnston 62557cd81a3SMark Johnston /* 62657cd81a3SMark Johnston * Ensure that the object and vnode were not disassociated 62757cd81a3SMark Johnston * while locks were dropped. 62857cd81a3SMark Johnston */ 62957cd81a3SMark Johnston if (vp->v_object != object) { 63057cd81a3SMark Johnston error = ENOENT; 63157cd81a3SMark Johnston goto unlock_all; 63257cd81a3SMark Johnston } 63334d8b7eaSJeff Roberson vm_page_lock(m); 63457cd81a3SMark Johnston 63534d8b7eaSJeff Roberson /* 63634d8b7eaSJeff Roberson * While the object and page were unlocked, the page 63734d8b7eaSJeff Roberson * may have been: 63834d8b7eaSJeff Roberson * (1) moved to a different queue, 63934d8b7eaSJeff Roberson * (2) reallocated to a different object, 64034d8b7eaSJeff Roberson * (3) reallocated to a different offset, or 64134d8b7eaSJeff Roberson * (4) cleaned. 64234d8b7eaSJeff Roberson */ 643ebcddc72SAlan Cox if (!vm_page_in_laundry(m) || m->object != object || 64434d8b7eaSJeff Roberson m->pindex != pindex || m->dirty == 0) { 64534d8b7eaSJeff Roberson vm_page_unlock(m); 64634d8b7eaSJeff Roberson error = ENXIO; 64734d8b7eaSJeff Roberson goto unlock_all; 64834d8b7eaSJeff Roberson } 64934d8b7eaSJeff Roberson 65034d8b7eaSJeff Roberson /* 6511d3a1bcfSMark Johnston * The page may have been busied or referenced while the object 65234d8b7eaSJeff Roberson * and page locks were released. 65334d8b7eaSJeff Roberson */ 6541d3a1bcfSMark Johnston if (vm_page_busied(m) || vm_page_held(m)) { 65534d8b7eaSJeff Roberson vm_page_unlock(m); 65634d8b7eaSJeff Roberson error = EBUSY; 65734d8b7eaSJeff Roberson goto unlock_all; 65834d8b7eaSJeff Roberson } 65934d8b7eaSJeff Roberson } 66034d8b7eaSJeff Roberson 66134d8b7eaSJeff Roberson /* 66234d8b7eaSJeff Roberson * If a page is dirty, then it is either being washed 66334d8b7eaSJeff Roberson * (but not yet cleaned) or it is still in the 66434d8b7eaSJeff Roberson * laundry. If it is still in the laundry, then we 66534d8b7eaSJeff Roberson * start the cleaning operation. 66634d8b7eaSJeff Roberson */ 667ebcddc72SAlan Cox if ((*numpagedout = vm_pageout_cluster(m)) == 0) 66834d8b7eaSJeff Roberson error = EIO; 66934d8b7eaSJeff Roberson 67034d8b7eaSJeff Roberson unlock_all: 67134d8b7eaSJeff Roberson VM_OBJECT_WUNLOCK(object); 67234d8b7eaSJeff Roberson 67334d8b7eaSJeff Roberson unlock_mp: 67434d8b7eaSJeff Roberson vm_page_lock_assert(m, MA_NOTOWNED); 67534d8b7eaSJeff Roberson if (mp != NULL) { 67634d8b7eaSJeff Roberson if (vp != NULL) 67734d8b7eaSJeff Roberson vput(vp); 67834d8b7eaSJeff Roberson vm_object_deallocate(object); 67934d8b7eaSJeff Roberson vn_finished_write(mp); 68034d8b7eaSJeff Roberson } 68134d8b7eaSJeff Roberson 68234d8b7eaSJeff Roberson return (error); 68334d8b7eaSJeff Roberson } 68434d8b7eaSJeff Roberson 68534d8b7eaSJeff Roberson /* 686ebcddc72SAlan Cox * Attempt to launder the specified number of pages. 687ebcddc72SAlan Cox * 688ebcddc72SAlan Cox * Returns the number of pages successfully laundered. 689ebcddc72SAlan Cox */ 690ebcddc72SAlan Cox static int 691ebcddc72SAlan Cox vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall) 692ebcddc72SAlan Cox { 6935cd29d0fSMark Johnston struct scan_state ss; 694ebcddc72SAlan Cox struct vm_pagequeue *pq; 6955cd29d0fSMark Johnston struct mtx *mtx; 696ebcddc72SAlan Cox vm_object_t object; 6975cd29d0fSMark Johnston vm_page_t m, marker; 6985cd29d0fSMark Johnston int act_delta, error, numpagedout, queue, starting_target; 699ebcddc72SAlan Cox int vnodes_skipped; 7005cd29d0fSMark Johnston bool obj_locked, pageout_ok; 701ebcddc72SAlan Cox 7025cd29d0fSMark Johnston mtx = NULL; 7035cd29d0fSMark Johnston obj_locked = false; 7045cd29d0fSMark Johnston object = NULL; 705ebcddc72SAlan Cox starting_target = launder; 706ebcddc72SAlan Cox vnodes_skipped = 0; 707ebcddc72SAlan Cox 708ebcddc72SAlan Cox /* 709b1fd102eSMark Johnston * Scan the laundry queues for pages eligible to be laundered. We stop 710ebcddc72SAlan Cox * once the target number of dirty pages have been laundered, or once 711ebcddc72SAlan Cox * we've reached the end of the queue. A single iteration of this loop 712ebcddc72SAlan Cox * may cause more than one page to be laundered because of clustering. 713ebcddc72SAlan Cox * 714b1fd102eSMark Johnston * As an optimization, we avoid laundering from PQ_UNSWAPPABLE when no 715b1fd102eSMark Johnston * swap devices are configured. 716ebcddc72SAlan Cox */ 717b1fd102eSMark Johnston if (atomic_load_acq_int(&swapdev_enabled)) 71864b38930SMark Johnston queue = PQ_UNSWAPPABLE; 719b1fd102eSMark Johnston else 72064b38930SMark Johnston queue = PQ_LAUNDRY; 721ebcddc72SAlan Cox 722b1fd102eSMark Johnston scan: 72364b38930SMark Johnston marker = &vmd->vmd_markers[queue]; 7245cd29d0fSMark Johnston pq = &vmd->vmd_pagequeues[queue]; 725ebcddc72SAlan Cox vm_pagequeue_lock(pq); 7265cd29d0fSMark Johnston vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt); 7275cd29d0fSMark Johnston while (launder > 0 && (m = vm_pageout_next(&ss, false)) != NULL) { 7285cd29d0fSMark Johnston if (__predict_false((m->flags & PG_MARKER) != 0)) 729ebcddc72SAlan Cox continue; 7305cd29d0fSMark Johnston 7315cd29d0fSMark Johnston vm_page_change_lock(m, &mtx); 7325cd29d0fSMark Johnston 7335cd29d0fSMark Johnston recheck: 7345cd29d0fSMark Johnston /* 7355cd29d0fSMark Johnston * The page may have been disassociated from the queue 7365cd29d0fSMark Johnston * while locks were dropped. 7375cd29d0fSMark Johnston */ 73836f8fe9bSMark Johnston if (vm_page_queue(m) != queue) 739ebcddc72SAlan Cox continue; 7405cd29d0fSMark Johnston 7415cd29d0fSMark Johnston /* 7425cd29d0fSMark Johnston * A requeue was requested, so this page gets a second 7435cd29d0fSMark Johnston * chance. 7445cd29d0fSMark Johnston */ 7455cd29d0fSMark Johnston if ((m->aflags & PGA_REQUEUE) != 0) { 7465cd29d0fSMark Johnston vm_page_requeue(m); 747ebcddc72SAlan Cox continue; 748ebcddc72SAlan Cox } 749ebcddc72SAlan Cox 750ebcddc72SAlan Cox /* 7515cd29d0fSMark Johnston * Held pages are essentially stuck in the queue. 7525cd29d0fSMark Johnston * 7535cd29d0fSMark Johnston * Wired pages may not be freed. Complete their removal 7545cd29d0fSMark Johnston * from the queue now to avoid needless revisits during 7555cd29d0fSMark Johnston * future scans. 756ebcddc72SAlan Cox */ 7575cd29d0fSMark Johnston if (m->hold_count != 0) 7585cd29d0fSMark Johnston continue; 7595cd29d0fSMark Johnston if (m->wire_count != 0) { 7605cd29d0fSMark Johnston vm_page_dequeue_deferred(m); 7615cd29d0fSMark Johnston continue; 7625cd29d0fSMark Johnston } 7635cd29d0fSMark Johnston 7645cd29d0fSMark Johnston if (object != m->object) { 7655cd29d0fSMark Johnston if (obj_locked) { 7665cd29d0fSMark Johnston VM_OBJECT_WUNLOCK(object); 7675cd29d0fSMark Johnston obj_locked = false; 7685cd29d0fSMark Johnston } 7695cd29d0fSMark Johnston object = m->object; 7705cd29d0fSMark Johnston } 7715cd29d0fSMark Johnston if (!obj_locked) { 7725cd29d0fSMark Johnston if (!VM_OBJECT_TRYWLOCK(object)) { 7735cd29d0fSMark Johnston mtx_unlock(mtx); 7745cd29d0fSMark Johnston /* Depends on type-stability. */ 7755cd29d0fSMark Johnston VM_OBJECT_WLOCK(object); 7765cd29d0fSMark Johnston obj_locked = true; 7775cd29d0fSMark Johnston mtx_lock(mtx); 7785cd29d0fSMark Johnston goto recheck; 7795cd29d0fSMark Johnston } else 7805cd29d0fSMark Johnston obj_locked = true; 7815cd29d0fSMark Johnston } 7825cd29d0fSMark Johnston 7835cd29d0fSMark Johnston if (vm_page_busied(m)) 7845cd29d0fSMark Johnston continue; 785ebcddc72SAlan Cox 786ebcddc72SAlan Cox /* 787ebcddc72SAlan Cox * Invalid pages can be easily freed. They cannot be 788ebcddc72SAlan Cox * mapped; vm_page_free() asserts this. 789ebcddc72SAlan Cox */ 790ebcddc72SAlan Cox if (m->valid == 0) 791ebcddc72SAlan Cox goto free_page; 792ebcddc72SAlan Cox 793ebcddc72SAlan Cox /* 794ebcddc72SAlan Cox * If the page has been referenced and the object is not dead, 795ebcddc72SAlan Cox * reactivate or requeue the page depending on whether the 796ebcddc72SAlan Cox * object is mapped. 797ebcddc72SAlan Cox */ 798ebcddc72SAlan Cox if ((m->aflags & PGA_REFERENCED) != 0) { 799ebcddc72SAlan Cox vm_page_aflag_clear(m, PGA_REFERENCED); 800ebcddc72SAlan Cox act_delta = 1; 801ebcddc72SAlan Cox } else 802ebcddc72SAlan Cox act_delta = 0; 803ebcddc72SAlan Cox if (object->ref_count != 0) 804ebcddc72SAlan Cox act_delta += pmap_ts_referenced(m); 805ebcddc72SAlan Cox else { 806ebcddc72SAlan Cox KASSERT(!pmap_page_is_mapped(m), 807ebcddc72SAlan Cox ("page %p is mapped", m)); 808ebcddc72SAlan Cox } 809ebcddc72SAlan Cox if (act_delta != 0) { 810ebcddc72SAlan Cox if (object->ref_count != 0) { 81183c9dea1SGleb Smirnoff VM_CNT_INC(v_reactivated); 812ebcddc72SAlan Cox vm_page_activate(m); 813ebcddc72SAlan Cox 814ebcddc72SAlan Cox /* 815ebcddc72SAlan Cox * Increase the activation count if the page 816ebcddc72SAlan Cox * was referenced while in the laundry queue. 817ebcddc72SAlan Cox * This makes it less likely that the page will 818ebcddc72SAlan Cox * be returned prematurely to the inactive 819ebcddc72SAlan Cox * queue. 820ebcddc72SAlan Cox */ 821ebcddc72SAlan Cox m->act_count += act_delta + ACT_ADVANCE; 822ebcddc72SAlan Cox 823ebcddc72SAlan Cox /* 824ebcddc72SAlan Cox * If this was a background laundering, count 825ebcddc72SAlan Cox * activated pages towards our target. The 826ebcddc72SAlan Cox * purpose of background laundering is to ensure 827ebcddc72SAlan Cox * that pages are eventually cycled through the 828ebcddc72SAlan Cox * laundry queue, and an activation is a valid 829ebcddc72SAlan Cox * way out. 830ebcddc72SAlan Cox */ 831ebcddc72SAlan Cox if (!in_shortfall) 832ebcddc72SAlan Cox launder--; 8335cd29d0fSMark Johnston continue; 8345cd29d0fSMark Johnston } else if ((object->flags & OBJ_DEAD) == 0) { 8355cd29d0fSMark Johnston vm_page_requeue(m); 8365cd29d0fSMark Johnston continue; 8375cd29d0fSMark Johnston } 838ebcddc72SAlan Cox } 839ebcddc72SAlan Cox 840ebcddc72SAlan Cox /* 841ebcddc72SAlan Cox * If the page appears to be clean at the machine-independent 842ebcddc72SAlan Cox * layer, then remove all of its mappings from the pmap in 843ebcddc72SAlan Cox * anticipation of freeing it. If, however, any of the page's 844ebcddc72SAlan Cox * mappings allow write access, then the page may still be 845ebcddc72SAlan Cox * modified until the last of those mappings are removed. 846ebcddc72SAlan Cox */ 847ebcddc72SAlan Cox if (object->ref_count != 0) { 848ebcddc72SAlan Cox vm_page_test_dirty(m); 849ebcddc72SAlan Cox if (m->dirty == 0) 850ebcddc72SAlan Cox pmap_remove_all(m); 851ebcddc72SAlan Cox } 852ebcddc72SAlan Cox 853ebcddc72SAlan Cox /* 854ebcddc72SAlan Cox * Clean pages are freed, and dirty pages are paged out unless 855ebcddc72SAlan Cox * they belong to a dead object. Requeueing dirty pages from 856ebcddc72SAlan Cox * dead objects is pointless, as they are being paged out and 857ebcddc72SAlan Cox * freed by the thread that destroyed the object. 858ebcddc72SAlan Cox */ 859ebcddc72SAlan Cox if (m->dirty == 0) { 860ebcddc72SAlan Cox free_page: 861ebcddc72SAlan Cox vm_page_free(m); 86283c9dea1SGleb Smirnoff VM_CNT_INC(v_dfree); 863ebcddc72SAlan Cox } else if ((object->flags & OBJ_DEAD) == 0) { 864ebcddc72SAlan Cox if (object->type != OBJT_SWAP && 865ebcddc72SAlan Cox object->type != OBJT_DEFAULT) 866ebcddc72SAlan Cox pageout_ok = true; 867ebcddc72SAlan Cox else if (disable_swap_pageouts) 868ebcddc72SAlan Cox pageout_ok = false; 869ebcddc72SAlan Cox else 870ebcddc72SAlan Cox pageout_ok = true; 871ebcddc72SAlan Cox if (!pageout_ok) { 8725cd29d0fSMark Johnston vm_page_requeue(m); 8735cd29d0fSMark Johnston continue; 874ebcddc72SAlan Cox } 875ebcddc72SAlan Cox 876ebcddc72SAlan Cox /* 877ebcddc72SAlan Cox * Form a cluster with adjacent, dirty pages from the 878ebcddc72SAlan Cox * same object, and page out that entire cluster. 879ebcddc72SAlan Cox * 880ebcddc72SAlan Cox * The adjacent, dirty pages must also be in the 881ebcddc72SAlan Cox * laundry. However, their mappings are not checked 882ebcddc72SAlan Cox * for new references. Consequently, a recently 883ebcddc72SAlan Cox * referenced page may be paged out. However, that 884ebcddc72SAlan Cox * page will not be prematurely reclaimed. After page 885ebcddc72SAlan Cox * out, the page will be placed in the inactive queue, 886ebcddc72SAlan Cox * where any new references will be detected and the 887ebcddc72SAlan Cox * page reactivated. 888ebcddc72SAlan Cox */ 889ebcddc72SAlan Cox error = vm_pageout_clean(m, &numpagedout); 890ebcddc72SAlan Cox if (error == 0) { 891ebcddc72SAlan Cox launder -= numpagedout; 8925cd29d0fSMark Johnston ss.scanned += numpagedout; 893ebcddc72SAlan Cox } else if (error == EDEADLK) { 894ebcddc72SAlan Cox pageout_lock_miss++; 895ebcddc72SAlan Cox vnodes_skipped++; 896ebcddc72SAlan Cox } 8975cd29d0fSMark Johnston mtx = NULL; 8985cd29d0fSMark Johnston obj_locked = false; 899ebcddc72SAlan Cox } 9005cd29d0fSMark Johnston } 9015cd29d0fSMark Johnston if (mtx != NULL) { 9025cd29d0fSMark Johnston mtx_unlock(mtx); 9035cd29d0fSMark Johnston mtx = NULL; 9045cd29d0fSMark Johnston } 9055cd29d0fSMark Johnston if (obj_locked) { 906ebcddc72SAlan Cox VM_OBJECT_WUNLOCK(object); 9075cd29d0fSMark Johnston obj_locked = false; 9085cd29d0fSMark Johnston } 909ebcddc72SAlan Cox vm_pagequeue_lock(pq); 9105cd29d0fSMark Johnston vm_pageout_end_scan(&ss); 911ebcddc72SAlan Cox vm_pagequeue_unlock(pq); 912ebcddc72SAlan Cox 91364b38930SMark Johnston if (launder > 0 && queue == PQ_UNSWAPPABLE) { 91464b38930SMark Johnston queue = PQ_LAUNDRY; 915b1fd102eSMark Johnston goto scan; 916b1fd102eSMark Johnston } 917b1fd102eSMark Johnston 918ebcddc72SAlan Cox /* 919ebcddc72SAlan Cox * Wakeup the sync daemon if we skipped a vnode in a writeable object 920ebcddc72SAlan Cox * and we didn't launder enough pages. 921ebcddc72SAlan Cox */ 922ebcddc72SAlan Cox if (vnodes_skipped > 0 && launder > 0) 923ebcddc72SAlan Cox (void)speedup_syncer(); 924ebcddc72SAlan Cox 925ebcddc72SAlan Cox return (starting_target - launder); 926ebcddc72SAlan Cox } 927ebcddc72SAlan Cox 928ebcddc72SAlan Cox /* 929ebcddc72SAlan Cox * Compute the integer square root. 930ebcddc72SAlan Cox */ 931ebcddc72SAlan Cox static u_int 932ebcddc72SAlan Cox isqrt(u_int num) 933ebcddc72SAlan Cox { 934ebcddc72SAlan Cox u_int bit, root, tmp; 935ebcddc72SAlan Cox 936ebcddc72SAlan Cox bit = 1u << ((NBBY * sizeof(u_int)) - 2); 937ebcddc72SAlan Cox while (bit > num) 938ebcddc72SAlan Cox bit >>= 2; 939ebcddc72SAlan Cox root = 0; 940ebcddc72SAlan Cox while (bit != 0) { 941ebcddc72SAlan Cox tmp = root + bit; 942ebcddc72SAlan Cox root >>= 1; 943ebcddc72SAlan Cox if (num >= tmp) { 944ebcddc72SAlan Cox num -= tmp; 945ebcddc72SAlan Cox root += bit; 946ebcddc72SAlan Cox } 947ebcddc72SAlan Cox bit >>= 2; 948ebcddc72SAlan Cox } 949ebcddc72SAlan Cox return (root); 950ebcddc72SAlan Cox } 951ebcddc72SAlan Cox 952ebcddc72SAlan Cox /* 953ebcddc72SAlan Cox * Perform the work of the laundry thread: periodically wake up and determine 954ebcddc72SAlan Cox * whether any pages need to be laundered. If so, determine the number of pages 955ebcddc72SAlan Cox * that need to be laundered, and launder them. 956ebcddc72SAlan Cox */ 957ebcddc72SAlan Cox static void 958ebcddc72SAlan Cox vm_pageout_laundry_worker(void *arg) 959ebcddc72SAlan Cox { 960e2068d0bSJeff Roberson struct vm_domain *vmd; 961ebcddc72SAlan Cox struct vm_pagequeue *pq; 96260684862SMark Johnston uint64_t nclean, ndirty, nfreed; 963e2068d0bSJeff Roberson int domain, last_target, launder, shortfall, shortfall_cycle, target; 964ebcddc72SAlan Cox bool in_shortfall; 965ebcddc72SAlan Cox 966e2068d0bSJeff Roberson domain = (uintptr_t)arg; 967e2068d0bSJeff Roberson vmd = VM_DOMAIN(domain); 968e2068d0bSJeff Roberson pq = &vmd->vmd_pagequeues[PQ_LAUNDRY]; 969e2068d0bSJeff Roberson KASSERT(vmd->vmd_segs != 0, ("domain without segments")); 970ebcddc72SAlan Cox 971ebcddc72SAlan Cox shortfall = 0; 972ebcddc72SAlan Cox in_shortfall = false; 973ebcddc72SAlan Cox shortfall_cycle = 0; 974ebcddc72SAlan Cox target = 0; 97560684862SMark Johnston nfreed = 0; 976ebcddc72SAlan Cox 977ebcddc72SAlan Cox /* 978b1fd102eSMark Johnston * Calls to these handlers are serialized by the swap syscall lock. 979b1fd102eSMark Johnston */ 980e2068d0bSJeff Roberson (void)EVENTHANDLER_REGISTER(swapon, vm_pageout_swapon, vmd, 981b1fd102eSMark Johnston EVENTHANDLER_PRI_ANY); 982e2068d0bSJeff Roberson (void)EVENTHANDLER_REGISTER(swapoff, vm_pageout_swapoff, vmd, 983b1fd102eSMark Johnston EVENTHANDLER_PRI_ANY); 984b1fd102eSMark Johnston 985b1fd102eSMark Johnston /* 986ebcddc72SAlan Cox * The pageout laundry worker is never done, so loop forever. 987ebcddc72SAlan Cox */ 988ebcddc72SAlan Cox for (;;) { 989ebcddc72SAlan Cox KASSERT(target >= 0, ("negative target %d", target)); 990ebcddc72SAlan Cox KASSERT(shortfall_cycle >= 0, 991ebcddc72SAlan Cox ("negative cycle %d", shortfall_cycle)); 992ebcddc72SAlan Cox launder = 0; 993ebcddc72SAlan Cox 994ebcddc72SAlan Cox /* 995ebcddc72SAlan Cox * First determine whether we need to launder pages to meet a 996ebcddc72SAlan Cox * shortage of free pages. 997ebcddc72SAlan Cox */ 998ebcddc72SAlan Cox if (shortfall > 0) { 999ebcddc72SAlan Cox in_shortfall = true; 1000ebcddc72SAlan Cox shortfall_cycle = VM_LAUNDER_RATE / VM_INACT_SCAN_RATE; 1001ebcddc72SAlan Cox target = shortfall; 1002ebcddc72SAlan Cox } else if (!in_shortfall) 1003ebcddc72SAlan Cox goto trybackground; 1004e2068d0bSJeff Roberson else if (shortfall_cycle == 0 || vm_laundry_target(vmd) <= 0) { 1005ebcddc72SAlan Cox /* 1006ebcddc72SAlan Cox * We recently entered shortfall and began laundering 1007ebcddc72SAlan Cox * pages. If we have completed that laundering run 1008ebcddc72SAlan Cox * (and we are no longer in shortfall) or we have met 1009ebcddc72SAlan Cox * our laundry target through other activity, then we 1010ebcddc72SAlan Cox * can stop laundering pages. 1011ebcddc72SAlan Cox */ 1012ebcddc72SAlan Cox in_shortfall = false; 1013ebcddc72SAlan Cox target = 0; 1014ebcddc72SAlan Cox goto trybackground; 1015ebcddc72SAlan Cox } 1016ebcddc72SAlan Cox launder = target / shortfall_cycle--; 1017ebcddc72SAlan Cox goto dolaundry; 1018ebcddc72SAlan Cox 1019ebcddc72SAlan Cox /* 1020ebcddc72SAlan Cox * There's no immediate need to launder any pages; see if we 1021ebcddc72SAlan Cox * meet the conditions to perform background laundering: 1022ebcddc72SAlan Cox * 1023ebcddc72SAlan Cox * 1. The ratio of dirty to clean inactive pages exceeds the 102460684862SMark Johnston * background laundering threshold, or 1025ebcddc72SAlan Cox * 2. we haven't yet reached the target of the current 1026ebcddc72SAlan Cox * background laundering run. 1027ebcddc72SAlan Cox * 1028ebcddc72SAlan Cox * The background laundering threshold is not a constant. 1029ebcddc72SAlan Cox * Instead, it is a slowly growing function of the number of 103060684862SMark Johnston * clean pages freed by the page daemon since the last 103160684862SMark Johnston * background laundering. Thus, as the ratio of dirty to 103260684862SMark Johnston * clean inactive pages grows, the amount of memory pressure 1033c098768eSMark Johnston * required to trigger laundering decreases. We ensure 1034c098768eSMark Johnston * that the threshold is non-zero after an inactive queue 1035c098768eSMark Johnston * scan, even if that scan failed to free a single clean page. 1036ebcddc72SAlan Cox */ 1037ebcddc72SAlan Cox trybackground: 1038e2068d0bSJeff Roberson nclean = vmd->vmd_free_count + 1039e2068d0bSJeff Roberson vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt; 1040e2068d0bSJeff Roberson ndirty = vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt; 1041c098768eSMark Johnston if (target == 0 && ndirty * isqrt(howmany(nfreed + 1, 1042c098768eSMark Johnston vmd->vmd_free_target - vmd->vmd_free_min)) >= nclean) { 1043e2068d0bSJeff Roberson target = vmd->vmd_background_launder_target; 1044ebcddc72SAlan Cox } 1045ebcddc72SAlan Cox 1046ebcddc72SAlan Cox /* 1047ebcddc72SAlan Cox * We have a non-zero background laundering target. If we've 1048ebcddc72SAlan Cox * laundered up to our maximum without observing a page daemon 1049cb35676eSMark Johnston * request, just stop. This is a safety belt that ensures we 1050ebcddc72SAlan Cox * don't launder an excessive amount if memory pressure is low 1051ebcddc72SAlan Cox * and the ratio of dirty to clean pages is large. Otherwise, 1052ebcddc72SAlan Cox * proceed at the background laundering rate. 1053ebcddc72SAlan Cox */ 1054ebcddc72SAlan Cox if (target > 0) { 105560684862SMark Johnston if (nfreed > 0) { 105660684862SMark Johnston nfreed = 0; 1057ebcddc72SAlan Cox last_target = target; 1058ebcddc72SAlan Cox } else if (last_target - target >= 1059ebcddc72SAlan Cox vm_background_launder_max * PAGE_SIZE / 1024) { 1060ebcddc72SAlan Cox target = 0; 1061ebcddc72SAlan Cox } 1062ebcddc72SAlan Cox launder = vm_background_launder_rate * PAGE_SIZE / 1024; 1063ebcddc72SAlan Cox launder /= VM_LAUNDER_RATE; 1064ebcddc72SAlan Cox if (launder > target) 1065ebcddc72SAlan Cox launder = target; 1066ebcddc72SAlan Cox } 1067ebcddc72SAlan Cox 1068ebcddc72SAlan Cox dolaundry: 1069ebcddc72SAlan Cox if (launder > 0) { 1070ebcddc72SAlan Cox /* 1071ebcddc72SAlan Cox * Because of I/O clustering, the number of laundered 1072ebcddc72SAlan Cox * pages could exceed "target" by the maximum size of 1073ebcddc72SAlan Cox * a cluster minus one. 1074ebcddc72SAlan Cox */ 1075e2068d0bSJeff Roberson target -= min(vm_pageout_launder(vmd, launder, 1076ebcddc72SAlan Cox in_shortfall), target); 1077ebcddc72SAlan Cox pause("laundp", hz / VM_LAUNDER_RATE); 1078ebcddc72SAlan Cox } 1079ebcddc72SAlan Cox 1080ebcddc72SAlan Cox /* 1081ebcddc72SAlan Cox * If we're not currently laundering pages and the page daemon 1082ebcddc72SAlan Cox * hasn't posted a new request, sleep until the page daemon 1083ebcddc72SAlan Cox * kicks us. 1084ebcddc72SAlan Cox */ 1085ebcddc72SAlan Cox vm_pagequeue_lock(pq); 1086e2068d0bSJeff Roberson if (target == 0 && vmd->vmd_laundry_request == VM_LAUNDRY_IDLE) 1087e2068d0bSJeff Roberson (void)mtx_sleep(&vmd->vmd_laundry_request, 1088ebcddc72SAlan Cox vm_pagequeue_lockptr(pq), PVM, "launds", 0); 1089ebcddc72SAlan Cox 1090ebcddc72SAlan Cox /* 1091ebcddc72SAlan Cox * If the pagedaemon has indicated that it's in shortfall, start 1092ebcddc72SAlan Cox * a shortfall laundering unless we're already in the middle of 1093ebcddc72SAlan Cox * one. This may preempt a background laundering. 1094ebcddc72SAlan Cox */ 1095e2068d0bSJeff Roberson if (vmd->vmd_laundry_request == VM_LAUNDRY_SHORTFALL && 1096ebcddc72SAlan Cox (!in_shortfall || shortfall_cycle == 0)) { 1097e2068d0bSJeff Roberson shortfall = vm_laundry_target(vmd) + 1098e2068d0bSJeff Roberson vmd->vmd_pageout_deficit; 1099ebcddc72SAlan Cox target = 0; 1100ebcddc72SAlan Cox } else 1101ebcddc72SAlan Cox shortfall = 0; 1102ebcddc72SAlan Cox 1103ebcddc72SAlan Cox if (target == 0) 1104e2068d0bSJeff Roberson vmd->vmd_laundry_request = VM_LAUNDRY_IDLE; 110560684862SMark Johnston nfreed += vmd->vmd_clean_pages_freed; 110660684862SMark Johnston vmd->vmd_clean_pages_freed = 0; 1107ebcddc72SAlan Cox vm_pagequeue_unlock(pq); 1108ebcddc72SAlan Cox } 1109ebcddc72SAlan Cox } 1110ebcddc72SAlan Cox 11115cd29d0fSMark Johnston static int 11125cd29d0fSMark Johnston vm_pageout_reinsert_inactive_page(struct scan_state *ss, vm_page_t m) 11135cd29d0fSMark Johnston { 11145cd29d0fSMark Johnston struct vm_domain *vmd; 11155cd29d0fSMark Johnston 11161b5c869dSMark Johnston if (m->queue != PQ_INACTIVE || (m->aflags & PGA_ENQUEUED) != 0) 11175cd29d0fSMark Johnston return (0); 11185cd29d0fSMark Johnston vm_page_aflag_set(m, PGA_ENQUEUED); 11195cd29d0fSMark Johnston if ((m->aflags & PGA_REQUEUE_HEAD) != 0) { 11205cd29d0fSMark Johnston vmd = vm_pagequeue_domain(m); 11215cd29d0fSMark Johnston TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q); 11225cd29d0fSMark Johnston vm_page_aflag_clear(m, PGA_REQUEUE | PGA_REQUEUE_HEAD); 11235cd29d0fSMark Johnston } else if ((m->aflags & PGA_REQUEUE) != 0) { 11245cd29d0fSMark Johnston TAILQ_INSERT_TAIL(&ss->pq->pq_pl, m, plinks.q); 11255cd29d0fSMark Johnston vm_page_aflag_clear(m, PGA_REQUEUE | PGA_REQUEUE_HEAD); 11265cd29d0fSMark Johnston } else 11275cd29d0fSMark Johnston TAILQ_INSERT_BEFORE(ss->marker, m, plinks.q); 11285cd29d0fSMark Johnston return (1); 11295cd29d0fSMark Johnston } 11305cd29d0fSMark Johnston 11315cd29d0fSMark Johnston /* 11325cd29d0fSMark Johnston * Re-add stuck pages to the inactive queue. We will examine them again 11335cd29d0fSMark Johnston * during the next scan. If the queue state of a page has changed since 11345cd29d0fSMark Johnston * it was physically removed from the page queue in 11355cd29d0fSMark Johnston * vm_pageout_collect_batch(), don't do anything with that page. 11365cd29d0fSMark Johnston */ 11375cd29d0fSMark Johnston static void 11385cd29d0fSMark Johnston vm_pageout_reinsert_inactive(struct scan_state *ss, struct vm_batchqueue *bq, 11395cd29d0fSMark Johnston vm_page_t m) 11405cd29d0fSMark Johnston { 11415cd29d0fSMark Johnston struct vm_pagequeue *pq; 11425cd29d0fSMark Johnston int delta; 11435cd29d0fSMark Johnston 11445cd29d0fSMark Johnston delta = 0; 11455cd29d0fSMark Johnston pq = ss->pq; 11465cd29d0fSMark Johnston 11475cd29d0fSMark Johnston if (m != NULL) { 11485cd29d0fSMark Johnston if (vm_batchqueue_insert(bq, m)) 11495cd29d0fSMark Johnston return; 11505cd29d0fSMark Johnston vm_pagequeue_lock(pq); 11515cd29d0fSMark Johnston delta += vm_pageout_reinsert_inactive_page(ss, m); 11525cd29d0fSMark Johnston } else 11535cd29d0fSMark Johnston vm_pagequeue_lock(pq); 11545cd29d0fSMark Johnston while ((m = vm_batchqueue_pop(bq)) != NULL) 11555cd29d0fSMark Johnston delta += vm_pageout_reinsert_inactive_page(ss, m); 11565cd29d0fSMark Johnston vm_pagequeue_cnt_add(pq, delta); 11575cd29d0fSMark Johnston vm_pagequeue_unlock(pq); 11585cd29d0fSMark Johnston vm_batchqueue_init(bq); 11595cd29d0fSMark Johnston } 11605cd29d0fSMark Johnston 1161ebcddc72SAlan Cox /* 1162df8bae1dSRodney W. Grimes * vm_pageout_scan does the dirty work for the pageout daemon. 1163d9e23210SJeff Roberson * 1164ebcddc72SAlan Cox * pass == 0: Update active LRU/deactivate pages 1165ebcddc72SAlan Cox * pass >= 1: Free inactive pages 1166e57dd910SAlan Cox * 1167e57dd910SAlan Cox * Returns true if pass was zero or enough pages were freed by the inactive 1168e57dd910SAlan Cox * queue scan to meet the target. 1169df8bae1dSRodney W. Grimes */ 1170e57dd910SAlan Cox static bool 11715f8cd1c0SJeff Roberson vm_pageout_scan(struct vm_domain *vmd, int pass, int shortage) 1172df8bae1dSRodney W. Grimes { 11735cd29d0fSMark Johnston struct scan_state ss; 11745cd29d0fSMark Johnston struct vm_batchqueue rq; 11755cd29d0fSMark Johnston struct mtx *mtx; 11765cd29d0fSMark Johnston vm_page_t m, marker; 11778d220203SAlan Cox struct vm_pagequeue *pq; 1178df8bae1dSRodney W. Grimes vm_object_t object; 117922cf98d1SAlan Cox long min_scan; 11805cd29d0fSMark Johnston int act_delta, addl_page_shortage, deficit, inactq_shortage, max_scan; 11815cd29d0fSMark Johnston int page_shortage, scan_tick, starting_page_shortage; 11825cd29d0fSMark Johnston bool obj_locked; 11830d94caffSDavid Greenman 1184df8bae1dSRodney W. Grimes /* 1185d9e23210SJeff Roberson * If we need to reclaim memory ask kernel caches to return 1186c9612b2dSJeff Roberson * some. We rate limit to avoid thrashing. 1187d9e23210SJeff Roberson */ 1188e2068d0bSJeff Roberson if (vmd == VM_DOMAIN(0) && pass > 0 && 1189a6bf3a9eSRyan Stone (time_uptime - lowmem_uptime) >= lowmem_period) { 1190d9e23210SJeff Roberson /* 1191855a310fSJeff Roberson * Decrease registered cache sizes. 1192855a310fSJeff Roberson */ 119314a0d74eSSteven Hartland SDT_PROBE0(vm, , , vm__lowmem_scan); 11949b43bc27SAndriy Gapon EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES); 1195855a310fSJeff Roberson /* 1196d9e23210SJeff Roberson * We do this explicitly after the caches have been 1197d9e23210SJeff Roberson * drained above. 1198855a310fSJeff Roberson */ 1199855a310fSJeff Roberson uma_reclaim(); 1200a6bf3a9eSRyan Stone lowmem_uptime = time_uptime; 1201d9e23210SJeff Roberson } 12025985940eSJohn Dyson 1203311e34e2SKonstantin Belousov /* 1204*01f04471SMark Johnston * The addl_page_shortage is an estimate of the number of temporarily 1205311e34e2SKonstantin Belousov * stuck pages in the inactive queue. In other words, the 1206449c2e92SKonstantin Belousov * number of pages from the inactive count that should be 1207311e34e2SKonstantin Belousov * discounted in setting the target for the active queue scan. 1208311e34e2SKonstantin Belousov */ 12099099545aSAlan Cox addl_page_shortage = 0; 12109099545aSAlan Cox 12111c7c3c6aSMatthew Dillon /* 1212e57dd910SAlan Cox * Calculate the number of pages that we want to free. This number 1213e57dd910SAlan Cox * can be negative if many pages are freed between the wakeup call to 1214e57dd910SAlan Cox * the page daemon and this calculation. 12151c7c3c6aSMatthew Dillon */ 121660196cdaSAlan Cox if (pass > 0) { 1217e2068d0bSJeff Roberson deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit); 12185f8cd1c0SJeff Roberson page_shortage = shortage + deficit; 121960196cdaSAlan Cox } else 122060196cdaSAlan Cox page_shortage = deficit = 0; 122176386c7eSKonstantin Belousov starting_page_shortage = page_shortage; 12221c7c3c6aSMatthew Dillon 12235cd29d0fSMark Johnston mtx = NULL; 12245cd29d0fSMark Johnston obj_locked = false; 12255cd29d0fSMark Johnston object = NULL; 12265cd29d0fSMark Johnston vm_batchqueue_init(&rq); 12275cd29d0fSMark Johnston 1228936524aaSMatthew Dillon /* 1229f095d1bbSAlan Cox * Start scanning the inactive queue for pages that we can free. The 1230f095d1bbSAlan Cox * scan will stop when we reach the target or we have scanned the 1231f095d1bbSAlan Cox * entire queue. (Note that m->act_count is not used to make 1232f095d1bbSAlan Cox * decisions for the inactive queue, only for the active queue.) 12338d220203SAlan Cox */ 123464b38930SMark Johnston marker = &vmd->vmd_markers[PQ_INACTIVE]; 12355cd29d0fSMark Johnston pq = &vmd->vmd_pagequeues[PQ_INACTIVE]; 12368d220203SAlan Cox vm_pagequeue_lock(pq); 12375cd29d0fSMark Johnston vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt); 12385cd29d0fSMark Johnston while (page_shortage > 0 && (m = vm_pageout_next(&ss, true)) != NULL) { 12395cd29d0fSMark Johnston KASSERT((m->flags & PG_MARKER) == 0, 12405cd29d0fSMark Johnston ("marker page %p was dequeued", m)); 1241df8bae1dSRodney W. Grimes 12425cd29d0fSMark Johnston vm_page_change_lock(m, &mtx); 1243df8bae1dSRodney W. Grimes 12445cd29d0fSMark Johnston recheck: 1245936524aaSMatthew Dillon /* 12465cd29d0fSMark Johnston * The page may have been disassociated from the queue 12475cd29d0fSMark Johnston * while locks were dropped. 1248936524aaSMatthew Dillon */ 124936f8fe9bSMark Johnston if (vm_page_queue(m) != PQ_INACTIVE) { 12505cd29d0fSMark Johnston addl_page_shortage++; 1251936524aaSMatthew Dillon continue; 12525cd29d0fSMark Johnston } 12537900f95dSKonstantin Belousov 12548c616246SKonstantin Belousov /* 12555cd29d0fSMark Johnston * The page was re-enqueued after the page queue lock was 12565cd29d0fSMark Johnston * dropped, or a requeue was requested. This page gets a second 12575cd29d0fSMark Johnston * chance. 12588c616246SKonstantin Belousov */ 12595cd29d0fSMark Johnston if ((m->aflags & (PGA_ENQUEUED | PGA_REQUEUE | 12605cd29d0fSMark Johnston PGA_REQUEUE_HEAD)) != 0) 12615cd29d0fSMark Johnston goto reinsert; 12625cd29d0fSMark Johnston 12631d3a1bcfSMark Johnston /* 12645cd29d0fSMark Johnston * Held pages are essentially stuck in the queue. So, 12655cd29d0fSMark Johnston * they ought to be discounted from the inactive count. 12665cd29d0fSMark Johnston * See the calculation of inactq_shortage before the 1267a3aeedabSAlan Cox * loop over the active queue below. 12685cd29d0fSMark Johnston * 12695cd29d0fSMark Johnston * Wired pages may not be freed. Complete their removal 12705cd29d0fSMark Johnston * from the queue now to avoid needless revisits during 12715cd29d0fSMark Johnston * future scans. 1272a3aeedabSAlan Cox */ 12735cd29d0fSMark Johnston if (m->hold_count != 0) { 1274a3aeedabSAlan Cox addl_page_shortage++; 12755cd29d0fSMark Johnston goto reinsert; 12765cd29d0fSMark Johnston } 12775cd29d0fSMark Johnston if (m->wire_count != 0) { 12785cd29d0fSMark Johnston vm_page_dequeue_deferred(m); 12795cd29d0fSMark Johnston continue; 12805cd29d0fSMark Johnston } 12815cd29d0fSMark Johnston 12825cd29d0fSMark Johnston if (object != m->object) { 12835cd29d0fSMark Johnston if (obj_locked) { 12845cd29d0fSMark Johnston VM_OBJECT_WUNLOCK(object); 12855cd29d0fSMark Johnston obj_locked = false; 1286df8bae1dSRodney W. Grimes } 12879ee2165fSAlan Cox object = m->object; 12885cd29d0fSMark Johnston } 12895cd29d0fSMark Johnston if (!obj_locked) { 1290a3aeedabSAlan Cox if (!VM_OBJECT_TRYWLOCK(object)) { 12915cd29d0fSMark Johnston mtx_unlock(mtx); 12925cd29d0fSMark Johnston /* Depends on type-stability. */ 12935cd29d0fSMark Johnston VM_OBJECT_WLOCK(object); 12945cd29d0fSMark Johnston obj_locked = true; 12955cd29d0fSMark Johnston mtx_lock(mtx); 12965cd29d0fSMark Johnston goto recheck; 12975cd29d0fSMark Johnston } else 12985cd29d0fSMark Johnston obj_locked = true; 1299a3aeedabSAlan Cox } 13005cd29d0fSMark Johnston 1301a3aeedabSAlan Cox if (vm_page_busied(m)) { 1302a3aeedabSAlan Cox /* 1303a3aeedabSAlan Cox * Don't mess with busy pages. Leave them at 1304a3aeedabSAlan Cox * the front of the queue. Most likely, they 1305a3aeedabSAlan Cox * are being paged out and will leave the 1306a3aeedabSAlan Cox * queue shortly after the scan finishes. So, 1307a3aeedabSAlan Cox * they ought to be discounted from the 1308a3aeedabSAlan Cox * inactive count. 1309a3aeedabSAlan Cox */ 1310a3aeedabSAlan Cox addl_page_shortage++; 13115cd29d0fSMark Johnston goto reinsert; 131226f9a767SRodney W. Grimes } 131348cc2fc7SKonstantin Belousov 131448cc2fc7SKonstantin Belousov /* 13158748f58cSKonstantin Belousov * Invalid pages can be easily freed. They cannot be 13168748f58cSKonstantin Belousov * mapped, vm_page_free() asserts this. 1317776f729cSKonstantin Belousov */ 13188748f58cSKonstantin Belousov if (m->valid == 0) 13198748f58cSKonstantin Belousov goto free_page; 1320776f729cSKonstantin Belousov 1321776f729cSKonstantin Belousov /* 1322960810ccSAlan Cox * If the page has been referenced and the object is not dead, 1323960810ccSAlan Cox * reactivate or requeue the page depending on whether the 1324960810ccSAlan Cox * object is mapped. 13257e006499SJohn Dyson */ 1326bb7858eaSJeff Roberson if ((m->aflags & PGA_REFERENCED) != 0) { 1327bb7858eaSJeff Roberson vm_page_aflag_clear(m, PGA_REFERENCED); 1328bb7858eaSJeff Roberson act_delta = 1; 132986fa2471SAlan Cox } else 133086fa2471SAlan Cox act_delta = 0; 1331bb7858eaSJeff Roberson if (object->ref_count != 0) { 1332bb7858eaSJeff Roberson act_delta += pmap_ts_referenced(m); 1333bb7858eaSJeff Roberson } else { 1334bb7858eaSJeff Roberson KASSERT(!pmap_page_is_mapped(m), 1335bb7858eaSJeff Roberson ("vm_pageout_scan: page %p is mapped", m)); 13362fe6e4d7SDavid Greenman } 1337bb7858eaSJeff Roberson if (act_delta != 0) { 133886fa2471SAlan Cox if (object->ref_count != 0) { 133983c9dea1SGleb Smirnoff VM_CNT_INC(v_reactivated); 134026f9a767SRodney W. Grimes vm_page_activate(m); 1341960810ccSAlan Cox 1342960810ccSAlan Cox /* 1343960810ccSAlan Cox * Increase the activation count if the page 1344960810ccSAlan Cox * was referenced while in the inactive queue. 1345960810ccSAlan Cox * This makes it less likely that the page will 1346960810ccSAlan Cox * be returned prematurely to the inactive 1347960810ccSAlan Cox * queue. 1348960810ccSAlan Cox */ 1349bb7858eaSJeff Roberson m->act_count += act_delta + ACT_ADVANCE; 13505cd29d0fSMark Johnston continue; 1351ebcddc72SAlan Cox } else if ((object->flags & OBJ_DEAD) == 0) { 13525cd29d0fSMark Johnston vm_page_aflag_set(m, PGA_REQUEUE); 13535cd29d0fSMark Johnston goto reinsert; 1354ebcddc72SAlan Cox } 1355960810ccSAlan Cox } 135667bf6868SJohn Dyson 13577e006499SJohn Dyson /* 13589fc4739dSAlan Cox * If the page appears to be clean at the machine-independent 13599fc4739dSAlan Cox * layer, then remove all of its mappings from the pmap in 1360a766ffd0SAlan Cox * anticipation of freeing it. If, however, any of the page's 1361a766ffd0SAlan Cox * mappings allow write access, then the page may still be 1362a766ffd0SAlan Cox * modified until the last of those mappings are removed. 13637e006499SJohn Dyson */ 1364aa044135SAlan Cox if (object->ref_count != 0) { 13659fc4739dSAlan Cox vm_page_test_dirty(m); 1366aa044135SAlan Cox if (m->dirty == 0) 1367b78ddb0bSAlan Cox pmap_remove_all(m); 1368aa044135SAlan Cox } 1369dcbcd518SBruce Evans 13706989c456SAlan Cox /* 1371ebcddc72SAlan Cox * Clean pages can be freed, but dirty pages must be sent back 1372ebcddc72SAlan Cox * to the laundry, unless they belong to a dead object. 1373ebcddc72SAlan Cox * Requeueing dirty pages from dead objects is pointless, as 1374ebcddc72SAlan Cox * they are being paged out and freed by the thread that 1375ebcddc72SAlan Cox * destroyed the object. 13766989c456SAlan Cox */ 1377ebcddc72SAlan Cox if (m->dirty == 0) { 13788748f58cSKonstantin Belousov free_page: 13795cd29d0fSMark Johnston /* 13805cd29d0fSMark Johnston * Because we dequeued the page and have already 13815cd29d0fSMark Johnston * checked for concurrent dequeue and enqueue 13825cd29d0fSMark Johnston * requests, we can safely disassociate the page 13835cd29d0fSMark Johnston * from the inactive queue. 13845cd29d0fSMark Johnston */ 13855cd29d0fSMark Johnston KASSERT((m->aflags & PGA_QUEUE_STATE_MASK) == 0, 13865cd29d0fSMark Johnston ("page %p has queue state", m)); 13875cd29d0fSMark Johnston m->queue = PQ_NONE; 138878afdce6SAlan Cox vm_page_free(m); 13895cd29d0fSMark Johnston page_shortage--; 1390ebcddc72SAlan Cox } else if ((object->flags & OBJ_DEAD) == 0) 1391ebcddc72SAlan Cox vm_page_launder(m); 13925cd29d0fSMark Johnston continue; 13935cd29d0fSMark Johnston reinsert: 13945cd29d0fSMark Johnston vm_pageout_reinsert_inactive(&ss, &rq, m); 13955cd29d0fSMark Johnston } 13965cd29d0fSMark Johnston if (mtx != NULL) { 13975cd29d0fSMark Johnston mtx_unlock(mtx); 13985cd29d0fSMark Johnston mtx = NULL; 13995cd29d0fSMark Johnston } 14005cd29d0fSMark Johnston if (obj_locked) { 140189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 14025cd29d0fSMark Johnston obj_locked = false; 14035cd29d0fSMark Johnston } 14045cd29d0fSMark Johnston vm_pageout_reinsert_inactive(&ss, &rq, NULL); 14055cd29d0fSMark Johnston vm_pageout_reinsert_inactive(&ss, &ss.bq, NULL); 14068d220203SAlan Cox vm_pagequeue_lock(pq); 14075cd29d0fSMark Johnston vm_pageout_end_scan(&ss); 14088d220203SAlan Cox vm_pagequeue_unlock(pq); 140926f9a767SRodney W. Grimes 14105cd29d0fSMark Johnston VM_CNT_ADD(v_dfree, starting_page_shortage - page_shortage); 14115cd29d0fSMark Johnston 1412ebcddc72SAlan Cox /* 1413ebcddc72SAlan Cox * Wake up the laundry thread so that it can perform any needed 1414ebcddc72SAlan Cox * laundering. If we didn't meet our target, we're in shortfall and 1415b1fd102eSMark Johnston * need to launder more aggressively. If PQ_LAUNDRY is empty and no 1416b1fd102eSMark Johnston * swap devices are configured, the laundry thread has no work to do, so 1417b1fd102eSMark Johnston * don't bother waking it up. 1418cb35676eSMark Johnston * 1419cb35676eSMark Johnston * The laundry thread uses the number of inactive queue scans elapsed 1420cb35676eSMark Johnston * since the last laundering to determine whether to launder again, so 1421cb35676eSMark Johnston * keep count. 1422ebcddc72SAlan Cox */ 1423cb35676eSMark Johnston if (starting_page_shortage > 0) { 1424e2068d0bSJeff Roberson pq = &vmd->vmd_pagequeues[PQ_LAUNDRY]; 1425ebcddc72SAlan Cox vm_pagequeue_lock(pq); 1426e2068d0bSJeff Roberson if (vmd->vmd_laundry_request == VM_LAUNDRY_IDLE && 1427cb35676eSMark Johnston (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled))) { 1428ebcddc72SAlan Cox if (page_shortage > 0) { 1429e2068d0bSJeff Roberson vmd->vmd_laundry_request = VM_LAUNDRY_SHORTFALL; 143083c9dea1SGleb Smirnoff VM_CNT_INC(v_pdshortfalls); 1431e2068d0bSJeff Roberson } else if (vmd->vmd_laundry_request != 1432e2068d0bSJeff Roberson VM_LAUNDRY_SHORTFALL) 1433e2068d0bSJeff Roberson vmd->vmd_laundry_request = 1434e2068d0bSJeff Roberson VM_LAUNDRY_BACKGROUND; 1435e2068d0bSJeff Roberson wakeup(&vmd->vmd_laundry_request); 1436b1fd102eSMark Johnston } 143760684862SMark Johnston vmd->vmd_clean_pages_freed += 143860684862SMark Johnston starting_page_shortage - page_shortage; 1439ebcddc72SAlan Cox vm_pagequeue_unlock(pq); 1440ebcddc72SAlan Cox } 1441ebcddc72SAlan Cox 14429452b5edSAlan Cox /* 1443f095d1bbSAlan Cox * Wakeup the swapout daemon if we didn't free the targeted number of 1444f095d1bbSAlan Cox * pages. 14459452b5edSAlan Cox */ 1446ac04195bSKonstantin Belousov if (page_shortage > 0) 1447ac04195bSKonstantin Belousov vm_swapout_run(); 14489452b5edSAlan Cox 14499452b5edSAlan Cox /* 145076386c7eSKonstantin Belousov * If the inactive queue scan fails repeatedly to meet its 145176386c7eSKonstantin Belousov * target, kill the largest process. 145276386c7eSKonstantin Belousov */ 145376386c7eSKonstantin Belousov vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage); 145476386c7eSKonstantin Belousov 145576386c7eSKonstantin Belousov /* 1456936524aaSMatthew Dillon * Compute the number of pages we want to try to move from the 1457ebcddc72SAlan Cox * active queue to either the inactive or laundry queue. 1458ebcddc72SAlan Cox * 1459ebcddc72SAlan Cox * When scanning active pages, we make clean pages count more heavily 1460ebcddc72SAlan Cox * towards the page shortage than dirty pages. This is because dirty 1461ebcddc72SAlan Cox * pages must be laundered before they can be reused and thus have less 1462ebcddc72SAlan Cox * utility when attempting to quickly alleviate a shortage. However, 1463ebcddc72SAlan Cox * this weighting also causes the scan to deactivate dirty pages more 1464ebcddc72SAlan Cox * more aggressively, improving the effectiveness of clustering and 1465ebcddc72SAlan Cox * ensuring that they can eventually be reused. 14661c7c3c6aSMatthew Dillon */ 1467e2068d0bSJeff Roberson inactq_shortage = vmd->vmd_inactive_target - (pq->pq_cnt + 1468e2068d0bSJeff Roberson vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt / act_scan_laundry_weight) + 146959d3150bSMark Johnston vm_paging_target(vmd) + deficit + addl_page_shortage; 147082e2d06aSMark Johnston inactq_shortage *= act_scan_laundry_weight; 14719099545aSAlan Cox 14725cd29d0fSMark Johnston marker = &vmd->vmd_markers[PQ_ACTIVE]; 1473114f62c6SJeff Roberson pq = &vmd->vmd_pagequeues[PQ_ACTIVE]; 1474114f62c6SJeff Roberson vm_pagequeue_lock(pq); 14759099545aSAlan Cox 1476d9e23210SJeff Roberson /* 1477d9e23210SJeff Roberson * If we're just idle polling attempt to visit every 1478d9e23210SJeff Roberson * active page within 'update_period' seconds. 1479d9e23210SJeff Roberson */ 148022cf98d1SAlan Cox scan_tick = ticks; 148122cf98d1SAlan Cox if (vm_pageout_update_period != 0) { 148222cf98d1SAlan Cox min_scan = pq->pq_cnt; 148322cf98d1SAlan Cox min_scan *= scan_tick - vmd->vmd_last_active_scan; 148422cf98d1SAlan Cox min_scan /= hz * vm_pageout_update_period; 148522cf98d1SAlan Cox } else 148622cf98d1SAlan Cox min_scan = 0; 14875cd29d0fSMark Johnston if (min_scan > 0 || (inactq_shortage > 0 && pq->pq_cnt > 0)) 148822cf98d1SAlan Cox vmd->vmd_last_active_scan = scan_tick; 14891c7c3c6aSMatthew Dillon 14901c7c3c6aSMatthew Dillon /* 149122cf98d1SAlan Cox * Scan the active queue for pages that can be deactivated. Update 149222cf98d1SAlan Cox * the per-page activity counter and use it to identify deactivation 149379144408SAlan Cox * candidates. Held pages may be deactivated. 14945cd29d0fSMark Johnston * 14955cd29d0fSMark Johnston * To avoid requeuing each page that remains in the active queue, we 14965cd29d0fSMark Johnston * implement the CLOCK algorithm. To maintain consistency in the 14975cd29d0fSMark Johnston * generic page queue code, pages are inserted at the tail of the 14985cd29d0fSMark Johnston * active queue. We thus use two hands, represented by marker pages: 14995cd29d0fSMark Johnston * scans begin at the first hand, which precedes the second hand in 15005cd29d0fSMark Johnston * the queue. When the two hands meet, they are moved back to the 15015cd29d0fSMark Johnston * head and tail of the queue, respectively, and scanning resumes. 15021c7c3c6aSMatthew Dillon */ 15035cd29d0fSMark Johnston max_scan = inactq_shortage > 0 ? pq->pq_cnt : min_scan; 15045cd29d0fSMark Johnston act_scan: 15055cd29d0fSMark Johnston vm_pageout_init_scan(&ss, pq, marker, &vmd->vmd_clock[0], max_scan); 15065cd29d0fSMark Johnston while ((m = vm_pageout_next(&ss, false)) != NULL) { 15075cd29d0fSMark Johnston if (__predict_false(m == &vmd->vmd_clock[1])) { 15085cd29d0fSMark Johnston vm_pagequeue_lock(pq); 15095cd29d0fSMark Johnston TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q); 15105cd29d0fSMark Johnston TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[1], plinks.q); 15115cd29d0fSMark Johnston TAILQ_INSERT_HEAD(&pq->pq_pl, &vmd->vmd_clock[0], 15125cd29d0fSMark Johnston plinks.q); 15135cd29d0fSMark Johnston TAILQ_INSERT_TAIL(&pq->pq_pl, &vmd->vmd_clock[1], 15145cd29d0fSMark Johnston plinks.q); 15155cd29d0fSMark Johnston max_scan -= ss.scanned; 15165cd29d0fSMark Johnston vm_pageout_end_scan(&ss); 15175cd29d0fSMark Johnston goto act_scan; 15182965a453SKip Macy } 15195cd29d0fSMark Johnston if (__predict_false((m->flags & PG_MARKER) != 0)) 15205cd29d0fSMark Johnston continue; 15215cd29d0fSMark Johnston 15225cd29d0fSMark Johnston vm_page_change_lock(m, &mtx); 1523b18bfc3dSJohn Dyson 1524b18bfc3dSJohn Dyson /* 15255cd29d0fSMark Johnston * The page may have been disassociated from the queue 15265cd29d0fSMark Johnston * while locks were dropped. 1527b18bfc3dSJohn Dyson */ 152836f8fe9bSMark Johnston if (vm_page_queue(m) != PQ_ACTIVE) 15295cd29d0fSMark Johnston continue; 1530ef743ce6SJohn Dyson 15317e006499SJohn Dyson /* 15321d3a1bcfSMark Johnston * Wired pages are dequeued lazily. 15331d3a1bcfSMark Johnston */ 15341d3a1bcfSMark Johnston if (m->wire_count != 0) { 15355cd29d0fSMark Johnston vm_page_dequeue_deferred(m); 15361d3a1bcfSMark Johnston continue; 15371d3a1bcfSMark Johnston } 15381d3a1bcfSMark Johnston 15391d3a1bcfSMark Johnston /* 15407e006499SJohn Dyson * Check to see "how much" the page has been used. 15417e006499SJohn Dyson */ 154286fa2471SAlan Cox if ((m->aflags & PGA_REFERENCED) != 0) { 1543bb7858eaSJeff Roberson vm_page_aflag_clear(m, PGA_REFERENCED); 154486fa2471SAlan Cox act_delta = 1; 154586fa2471SAlan Cox } else 154686fa2471SAlan Cox act_delta = 0; 154786fa2471SAlan Cox 1548274132acSJeff Roberson /* 154979144408SAlan Cox * Perform an unsynchronized object ref count check. While 155079144408SAlan Cox * the page lock ensures that the page is not reallocated to 155179144408SAlan Cox * another object, in particular, one with unmanaged mappings 155279144408SAlan Cox * that cannot support pmap_ts_referenced(), two races are, 155379144408SAlan Cox * nonetheless, possible: 155479144408SAlan Cox * 1) The count was transitioning to zero, but we saw a non- 155579144408SAlan Cox * zero value. pmap_ts_referenced() will return zero 155679144408SAlan Cox * because the page is not mapped. 155779144408SAlan Cox * 2) The count was transitioning to one, but we saw zero. 155879144408SAlan Cox * This race delays the detection of a new reference. At 155979144408SAlan Cox * worst, we will deactivate and reactivate the page. 1560274132acSJeff Roberson */ 1561274132acSJeff Roberson if (m->object->ref_count != 0) 1562bb7858eaSJeff Roberson act_delta += pmap_ts_referenced(m); 1563bb7858eaSJeff Roberson 1564bb7858eaSJeff Roberson /* 1565bb7858eaSJeff Roberson * Advance or decay the act_count based on recent usage. 1566bb7858eaSJeff Roberson */ 156786fa2471SAlan Cox if (act_delta != 0) { 1568bb7858eaSJeff Roberson m->act_count += ACT_ADVANCE + act_delta; 156938efa82bSJohn Dyson if (m->act_count > ACT_MAX) 157038efa82bSJohn Dyson m->act_count = ACT_MAX; 157186fa2471SAlan Cox } else 157238efa82bSJohn Dyson m->act_count -= min(m->act_count, ACT_DECLINE); 1573bb7858eaSJeff Roberson 157486fa2471SAlan Cox if (m->act_count == 0) { 1575ebcddc72SAlan Cox /* 1576ebcddc72SAlan Cox * When not short for inactive pages, let dirty pages go 1577ebcddc72SAlan Cox * through the inactive queue before moving to the 1578ebcddc72SAlan Cox * laundry queues. This gives them some extra time to 1579ebcddc72SAlan Cox * be reactivated, potentially avoiding an expensive 1580ebcddc72SAlan Cox * pageout. During a page shortage, the inactive queue 1581ebcddc72SAlan Cox * is necessarily small, so we may move dirty pages 1582ebcddc72SAlan Cox * directly to the laundry queue. 1583ebcddc72SAlan Cox */ 1584ebcddc72SAlan Cox if (inactq_shortage <= 0) 1585d4a272dbSJohn Dyson vm_page_deactivate(m); 1586ebcddc72SAlan Cox else { 1587ebcddc72SAlan Cox /* 1588ebcddc72SAlan Cox * Calling vm_page_test_dirty() here would 1589ebcddc72SAlan Cox * require acquisition of the object's write 1590ebcddc72SAlan Cox * lock. However, during a page shortage, 1591ebcddc72SAlan Cox * directing dirty pages into the laundry 1592ebcddc72SAlan Cox * queue is only an optimization and not a 1593ebcddc72SAlan Cox * requirement. Therefore, we simply rely on 1594ebcddc72SAlan Cox * the opportunistic updates to the page's 1595ebcddc72SAlan Cox * dirty field by the pmap. 1596ebcddc72SAlan Cox */ 1597ebcddc72SAlan Cox if (m->dirty == 0) { 1598ebcddc72SAlan Cox vm_page_deactivate(m); 1599ebcddc72SAlan Cox inactq_shortage -= 1600ebcddc72SAlan Cox act_scan_laundry_weight; 1601ebcddc72SAlan Cox } else { 1602ebcddc72SAlan Cox vm_page_launder(m); 1603e57dd910SAlan Cox inactq_shortage--; 1604ebcddc72SAlan Cox } 1605ebcddc72SAlan Cox } 160626f9a767SRodney W. Grimes } 16075cd29d0fSMark Johnston } 16085cd29d0fSMark Johnston if (mtx != NULL) { 16095cd29d0fSMark Johnston mtx_unlock(mtx); 16105cd29d0fSMark Johnston mtx = NULL; 16115cd29d0fSMark Johnston } 16125cd29d0fSMark Johnston vm_pagequeue_lock(pq); 16135cd29d0fSMark Johnston TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q); 16145cd29d0fSMark Johnston TAILQ_INSERT_AFTER(&pq->pq_pl, marker, &vmd->vmd_clock[0], plinks.q); 16155cd29d0fSMark Johnston vm_pageout_end_scan(&ss); 16168d220203SAlan Cox vm_pagequeue_unlock(pq); 16175cd29d0fSMark Johnston 1618ac04195bSKonstantin Belousov if (pass > 0) 1619ac04195bSKonstantin Belousov vm_swapout_run_idle(); 1620e57dd910SAlan Cox return (page_shortage <= 0); 16212025d69bSKonstantin Belousov } 16222025d69bSKonstantin Belousov 1623449c2e92SKonstantin Belousov static int vm_pageout_oom_vote; 1624449c2e92SKonstantin Belousov 1625449c2e92SKonstantin Belousov /* 1626449c2e92SKonstantin Belousov * The pagedaemon threads randlomly select one to perform the 1627449c2e92SKonstantin Belousov * OOM. Trying to kill processes before all pagedaemons 1628449c2e92SKonstantin Belousov * failed to reach free target is premature. 1629449c2e92SKonstantin Belousov */ 1630449c2e92SKonstantin Belousov static void 163176386c7eSKonstantin Belousov vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage, 163276386c7eSKonstantin Belousov int starting_page_shortage) 1633449c2e92SKonstantin Belousov { 1634449c2e92SKonstantin Belousov int old_vote; 1635449c2e92SKonstantin Belousov 163676386c7eSKonstantin Belousov if (starting_page_shortage <= 0 || starting_page_shortage != 163776386c7eSKonstantin Belousov page_shortage) 163876386c7eSKonstantin Belousov vmd->vmd_oom_seq = 0; 163976386c7eSKonstantin Belousov else 164076386c7eSKonstantin Belousov vmd->vmd_oom_seq++; 164176386c7eSKonstantin Belousov if (vmd->vmd_oom_seq < vm_pageout_oom_seq) { 1642449c2e92SKonstantin Belousov if (vmd->vmd_oom) { 1643449c2e92SKonstantin Belousov vmd->vmd_oom = FALSE; 1644449c2e92SKonstantin Belousov atomic_subtract_int(&vm_pageout_oom_vote, 1); 1645449c2e92SKonstantin Belousov } 1646449c2e92SKonstantin Belousov return; 1647449c2e92SKonstantin Belousov } 1648449c2e92SKonstantin Belousov 164976386c7eSKonstantin Belousov /* 165076386c7eSKonstantin Belousov * Do not follow the call sequence until OOM condition is 165176386c7eSKonstantin Belousov * cleared. 165276386c7eSKonstantin Belousov */ 165376386c7eSKonstantin Belousov vmd->vmd_oom_seq = 0; 165476386c7eSKonstantin Belousov 1655449c2e92SKonstantin Belousov if (vmd->vmd_oom) 1656449c2e92SKonstantin Belousov return; 1657449c2e92SKonstantin Belousov 1658449c2e92SKonstantin Belousov vmd->vmd_oom = TRUE; 1659449c2e92SKonstantin Belousov old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1); 1660449c2e92SKonstantin Belousov if (old_vote != vm_ndomains - 1) 1661449c2e92SKonstantin Belousov return; 1662449c2e92SKonstantin Belousov 1663449c2e92SKonstantin Belousov /* 1664449c2e92SKonstantin Belousov * The current pagedaemon thread is the last in the quorum to 1665449c2e92SKonstantin Belousov * start OOM. Initiate the selection and signaling of the 1666449c2e92SKonstantin Belousov * victim. 1667449c2e92SKonstantin Belousov */ 1668449c2e92SKonstantin Belousov vm_pageout_oom(VM_OOM_MEM); 1669449c2e92SKonstantin Belousov 1670449c2e92SKonstantin Belousov /* 1671449c2e92SKonstantin Belousov * After one round of OOM terror, recall our vote. On the 1672449c2e92SKonstantin Belousov * next pass, current pagedaemon would vote again if the low 1673449c2e92SKonstantin Belousov * memory condition is still there, due to vmd_oom being 1674449c2e92SKonstantin Belousov * false. 1675449c2e92SKonstantin Belousov */ 1676449c2e92SKonstantin Belousov vmd->vmd_oom = FALSE; 1677449c2e92SKonstantin Belousov atomic_subtract_int(&vm_pageout_oom_vote, 1); 1678449c2e92SKonstantin Belousov } 16792025d69bSKonstantin Belousov 16803949873fSKonstantin Belousov /* 16813949873fSKonstantin Belousov * The OOM killer is the page daemon's action of last resort when 16823949873fSKonstantin Belousov * memory allocation requests have been stalled for a prolonged period 16833949873fSKonstantin Belousov * of time because it cannot reclaim memory. This function computes 16843949873fSKonstantin Belousov * the approximate number of physical pages that could be reclaimed if 16853949873fSKonstantin Belousov * the specified address space is destroyed. 16863949873fSKonstantin Belousov * 16873949873fSKonstantin Belousov * Private, anonymous memory owned by the address space is the 16883949873fSKonstantin Belousov * principal resource that we expect to recover after an OOM kill. 16893949873fSKonstantin Belousov * Since the physical pages mapped by the address space's COW entries 16903949873fSKonstantin Belousov * are typically shared pages, they are unlikely to be released and so 16913949873fSKonstantin Belousov * they are not counted. 16923949873fSKonstantin Belousov * 16933949873fSKonstantin Belousov * To get to the point where the page daemon runs the OOM killer, its 16943949873fSKonstantin Belousov * efforts to write-back vnode-backed pages may have stalled. This 16953949873fSKonstantin Belousov * could be caused by a memory allocation deadlock in the write path 16963949873fSKonstantin Belousov * that might be resolved by an OOM kill. Therefore, physical pages 16973949873fSKonstantin Belousov * belonging to vnode-backed objects are counted, because they might 16983949873fSKonstantin Belousov * be freed without being written out first if the address space holds 16993949873fSKonstantin Belousov * the last reference to an unlinked vnode. 17003949873fSKonstantin Belousov * 17013949873fSKonstantin Belousov * Similarly, physical pages belonging to OBJT_PHYS objects are 17023949873fSKonstantin Belousov * counted because the address space might hold the last reference to 17033949873fSKonstantin Belousov * the object. 17043949873fSKonstantin Belousov */ 17053949873fSKonstantin Belousov static long 17063949873fSKonstantin Belousov vm_pageout_oom_pagecount(struct vmspace *vmspace) 17073949873fSKonstantin Belousov { 17083949873fSKonstantin Belousov vm_map_t map; 17093949873fSKonstantin Belousov vm_map_entry_t entry; 17103949873fSKonstantin Belousov vm_object_t obj; 17113949873fSKonstantin Belousov long res; 17123949873fSKonstantin Belousov 17133949873fSKonstantin Belousov map = &vmspace->vm_map; 17143949873fSKonstantin Belousov KASSERT(!map->system_map, ("system map")); 17153949873fSKonstantin Belousov sx_assert(&map->lock, SA_LOCKED); 17163949873fSKonstantin Belousov res = 0; 17173949873fSKonstantin Belousov for (entry = map->header.next; entry != &map->header; 17183949873fSKonstantin Belousov entry = entry->next) { 17193949873fSKonstantin Belousov if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) 17203949873fSKonstantin Belousov continue; 17213949873fSKonstantin Belousov obj = entry->object.vm_object; 17223949873fSKonstantin Belousov if (obj == NULL) 17233949873fSKonstantin Belousov continue; 17243949873fSKonstantin Belousov if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 && 17253949873fSKonstantin Belousov obj->ref_count != 1) 17263949873fSKonstantin Belousov continue; 17273949873fSKonstantin Belousov switch (obj->type) { 17283949873fSKonstantin Belousov case OBJT_DEFAULT: 17293949873fSKonstantin Belousov case OBJT_SWAP: 17303949873fSKonstantin Belousov case OBJT_PHYS: 17313949873fSKonstantin Belousov case OBJT_VNODE: 17323949873fSKonstantin Belousov res += obj->resident_page_count; 17333949873fSKonstantin Belousov break; 17343949873fSKonstantin Belousov } 17353949873fSKonstantin Belousov } 17363949873fSKonstantin Belousov return (res); 17373949873fSKonstantin Belousov } 17383949873fSKonstantin Belousov 17392025d69bSKonstantin Belousov void 17402025d69bSKonstantin Belousov vm_pageout_oom(int shortage) 17412025d69bSKonstantin Belousov { 17422025d69bSKonstantin Belousov struct proc *p, *bigproc; 17432025d69bSKonstantin Belousov vm_offset_t size, bigsize; 17442025d69bSKonstantin Belousov struct thread *td; 17456bed074cSKonstantin Belousov struct vmspace *vm; 17463e78e983SAlan Cox bool breakout; 17472025d69bSKonstantin Belousov 17482025d69bSKonstantin Belousov /* 17491c58e4e5SJohn Baldwin * We keep the process bigproc locked once we find it to keep anyone 17501c58e4e5SJohn Baldwin * from messing with it; however, there is a possibility of 175128323addSBryan Drewery * deadlock if process B is bigproc and one of its child processes 17521c58e4e5SJohn Baldwin * attempts to propagate a signal to B while we are waiting for A's 17531c58e4e5SJohn Baldwin * lock while walking this list. To avoid this, we don't block on 17541c58e4e5SJohn Baldwin * the process lock but just skip a process if it is already locked. 17555663e6deSDavid Greenman */ 17565663e6deSDavid Greenman bigproc = NULL; 17575663e6deSDavid Greenman bigsize = 0; 17581005a129SJohn Baldwin sx_slock(&allproc_lock); 1759e602ba25SJulian Elischer FOREACH_PROC_IN_SYSTEM(p) { 176071943c3dSKonstantin Belousov PROC_LOCK(p); 176171943c3dSKonstantin Belousov 17621c58e4e5SJohn Baldwin /* 17633f1c4c4fSKonstantin Belousov * If this is a system, protected or killed process, skip it. 17645663e6deSDavid Greenman */ 176571943c3dSKonstantin Belousov if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC | 176671943c3dSKonstantin Belousov P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 || 176771943c3dSKonstantin Belousov p->p_pid == 1 || P_KILLED(p) || 176871943c3dSKonstantin Belousov (p->p_pid < 48 && swap_pager_avail != 0)) { 17698606d880SJohn Baldwin PROC_UNLOCK(p); 17705663e6deSDavid Greenman continue; 17715663e6deSDavid Greenman } 17725663e6deSDavid Greenman /* 1773dcbcd518SBruce Evans * If the process is in a non-running type state, 1774e602ba25SJulian Elischer * don't touch it. Check all the threads individually. 17755663e6deSDavid Greenman */ 17763e78e983SAlan Cox breakout = false; 1777e602ba25SJulian Elischer FOREACH_THREAD_IN_PROC(p, td) { 1778982d11f8SJeff Roberson thread_lock(td); 177971fad9fdSJulian Elischer if (!TD_ON_RUNQ(td) && 178071fad9fdSJulian Elischer !TD_IS_RUNNING(td) && 1781f497cda2SEdward Tomasz Napierala !TD_IS_SLEEPING(td) && 1782b98acc0aSKonstantin Belousov !TD_IS_SUSPENDED(td) && 1783b98acc0aSKonstantin Belousov !TD_IS_SWAPPED(td)) { 1784982d11f8SJeff Roberson thread_unlock(td); 17853e78e983SAlan Cox breakout = true; 1786e602ba25SJulian Elischer break; 1787e602ba25SJulian Elischer } 1788982d11f8SJeff Roberson thread_unlock(td); 1789e602ba25SJulian Elischer } 1790e602ba25SJulian Elischer if (breakout) { 17911c58e4e5SJohn Baldwin PROC_UNLOCK(p); 17925663e6deSDavid Greenman continue; 17935663e6deSDavid Greenman } 17945663e6deSDavid Greenman /* 17955663e6deSDavid Greenman * get the process size 17965663e6deSDavid Greenman */ 17976bed074cSKonstantin Belousov vm = vmspace_acquire_ref(p); 17986bed074cSKonstantin Belousov if (vm == NULL) { 17996bed074cSKonstantin Belousov PROC_UNLOCK(p); 18006bed074cSKonstantin Belousov continue; 18016bed074cSKonstantin Belousov } 180295e2409aSKonstantin Belousov _PHOLD_LITE(p); 180372d97679SDavid Schultz PROC_UNLOCK(p); 180495e2409aSKonstantin Belousov sx_sunlock(&allproc_lock); 180595e2409aSKonstantin Belousov if (!vm_map_trylock_read(&vm->vm_map)) { 180671943c3dSKonstantin Belousov vmspace_free(vm); 180795e2409aSKonstantin Belousov sx_slock(&allproc_lock); 180895e2409aSKonstantin Belousov PRELE(p); 180972d97679SDavid Schultz continue; 181072d97679SDavid Schultz } 18117981aa24SKonstantin Belousov size = vmspace_swap_count(vm); 18122025d69bSKonstantin Belousov if (shortage == VM_OOM_MEM) 18133949873fSKonstantin Belousov size += vm_pageout_oom_pagecount(vm); 18143949873fSKonstantin Belousov vm_map_unlock_read(&vm->vm_map); 18156bed074cSKonstantin Belousov vmspace_free(vm); 181695e2409aSKonstantin Belousov sx_slock(&allproc_lock); 18173949873fSKonstantin Belousov 18185663e6deSDavid Greenman /* 18193949873fSKonstantin Belousov * If this process is bigger than the biggest one, 18205663e6deSDavid Greenman * remember it. 18215663e6deSDavid Greenman */ 18225663e6deSDavid Greenman if (size > bigsize) { 18231c58e4e5SJohn Baldwin if (bigproc != NULL) 182471943c3dSKonstantin Belousov PRELE(bigproc); 18255663e6deSDavid Greenman bigproc = p; 18265663e6deSDavid Greenman bigsize = size; 182771943c3dSKonstantin Belousov } else { 182871943c3dSKonstantin Belousov PRELE(p); 182971943c3dSKonstantin Belousov } 18305663e6deSDavid Greenman } 18311005a129SJohn Baldwin sx_sunlock(&allproc_lock); 18325663e6deSDavid Greenman if (bigproc != NULL) { 18338311a2b8SWill Andrews if (vm_panic_on_oom != 0) 18348311a2b8SWill Andrews panic("out of swap space"); 183571943c3dSKonstantin Belousov PROC_LOCK(bigproc); 1836729b1e51SDavid Greenman killproc(bigproc, "out of swap space"); 1837fa885116SJulian Elischer sched_nice(bigproc, PRIO_MIN); 183871943c3dSKonstantin Belousov _PRELE(bigproc); 18391c58e4e5SJohn Baldwin PROC_UNLOCK(bigproc); 18405663e6deSDavid Greenman } 18415663e6deSDavid Greenman } 184226f9a767SRodney W. Grimes 1843449c2e92SKonstantin Belousov static void 1844449c2e92SKonstantin Belousov vm_pageout_worker(void *arg) 1845449c2e92SKonstantin Belousov { 1846e2068d0bSJeff Roberson struct vm_domain *vmd; 18475f8cd1c0SJeff Roberson int domain, pass, shortage; 1848e57dd910SAlan Cox bool target_met; 1849449c2e92SKonstantin Belousov 1850e2068d0bSJeff Roberson domain = (uintptr_t)arg; 1851e2068d0bSJeff Roberson vmd = VM_DOMAIN(domain); 185270cf3cedSAlan Cox pass = 0; 18535f8cd1c0SJeff Roberson shortage = 0; 1854e57dd910SAlan Cox target_met = true; 1855449c2e92SKonstantin Belousov 1856449c2e92SKonstantin Belousov /* 1857949c9186SKonstantin Belousov * XXXKIB It could be useful to bind pageout daemon threads to 1858949c9186SKonstantin Belousov * the cores belonging to the domain, from which vm_page_array 1859949c9186SKonstantin Belousov * is allocated. 1860449c2e92SKonstantin Belousov */ 1861449c2e92SKonstantin Belousov 1862e2068d0bSJeff Roberson KASSERT(vmd->vmd_segs != 0, ("domain without segments")); 1863e2068d0bSJeff Roberson vmd->vmd_last_active_scan = ticks; 1864449c2e92SKonstantin Belousov 1865449c2e92SKonstantin Belousov /* 1866449c2e92SKonstantin Belousov * The pageout daemon worker is never done, so loop forever. 1867449c2e92SKonstantin Belousov */ 1868449c2e92SKonstantin Belousov while (TRUE) { 186930fbfddaSJeff Roberson vm_domain_pageout_lock(vmd); 187030fbfddaSJeff Roberson /* 187130fbfddaSJeff Roberson * We need to clear wanted before we check the limits. This 187230fbfddaSJeff Roberson * prevents races with wakers who will check wanted after they 187330fbfddaSJeff Roberson * reach the limit. 187430fbfddaSJeff Roberson */ 187530fbfddaSJeff Roberson atomic_store_int(&vmd->vmd_pageout_wanted, 0); 187656ce0690SAlan Cox 187756ce0690SAlan Cox /* 18785f8cd1c0SJeff Roberson * Might the page daemon need to run again? 1879449c2e92SKonstantin Belousov */ 18805f8cd1c0SJeff Roberson if (vm_paging_needed(vmd, vmd->vmd_free_count)) { 188156ce0690SAlan Cox /* 18825f8cd1c0SJeff Roberson * Yes, the scan failed to free enough pages. If 18835f8cd1c0SJeff Roberson * we have performed a level >= 1 (page reclamation) 18845f8cd1c0SJeff Roberson * scan, then sleep a bit and try again. 188556ce0690SAlan Cox */ 188630fbfddaSJeff Roberson vm_domain_pageout_unlock(vmd); 18875f8cd1c0SJeff Roberson if (pass > 1) 18886eebec83SMark Johnston pause("pwait", hz / VM_INACT_SCAN_RATE); 1889449c2e92SKonstantin Belousov } else { 1890449c2e92SKonstantin Belousov /* 18915f8cd1c0SJeff Roberson * No, sleep until the next wakeup or until pages 18925f8cd1c0SJeff Roberson * need to have their reference stats updated. 1893449c2e92SKonstantin Belousov */ 18942c0f13aaSKonstantin Belousov if (mtx_sleep(&vmd->vmd_pageout_wanted, 189530fbfddaSJeff Roberson vm_domain_pageout_lockptr(vmd), PDROP | PVM, 18965f8cd1c0SJeff Roberson "psleep", hz / VM_INACT_SCAN_RATE) == 0) 189783c9dea1SGleb Smirnoff VM_CNT_INC(v_pdwakeups); 189856ce0690SAlan Cox } 189930fbfddaSJeff Roberson /* Prevent spurious wakeups by ensuring that wanted is set. */ 190030fbfddaSJeff Roberson atomic_store_int(&vmd->vmd_pageout_wanted, 1); 190130fbfddaSJeff Roberson 190230fbfddaSJeff Roberson /* 190330fbfddaSJeff Roberson * Use the controller to calculate how many pages to free in 190430fbfddaSJeff Roberson * this interval. 190530fbfddaSJeff Roberson */ 19065f8cd1c0SJeff Roberson shortage = pidctrl_daemon(&vmd->vmd_pid, vmd->vmd_free_count); 19075f8cd1c0SJeff Roberson if (shortage && pass == 0) 19085f8cd1c0SJeff Roberson pass = 1; 190956ce0690SAlan Cox 19105f8cd1c0SJeff Roberson target_met = vm_pageout_scan(vmd, pass, shortage); 19115f8cd1c0SJeff Roberson /* 19125f8cd1c0SJeff Roberson * If the target was not met we must increase the pass to 19135f8cd1c0SJeff Roberson * more aggressively reclaim. 19145f8cd1c0SJeff Roberson */ 19155f8cd1c0SJeff Roberson if (!target_met) 19165f8cd1c0SJeff Roberson pass++; 1917449c2e92SKonstantin Belousov } 1918449c2e92SKonstantin Belousov } 1919449c2e92SKonstantin Belousov 1920df8bae1dSRodney W. Grimes /* 19214d19f4adSSteven Hartland * vm_pageout_init initialises basic pageout daemon settings. 1922df8bae1dSRodney W. Grimes */ 19232b14f991SJulian Elischer static void 1924e2068d0bSJeff Roberson vm_pageout_init_domain(int domain) 1925df8bae1dSRodney W. Grimes { 1926e2068d0bSJeff Roberson struct vm_domain *vmd; 19275f8cd1c0SJeff Roberson struct sysctl_oid *oid; 1928e2068d0bSJeff Roberson 1929e2068d0bSJeff Roberson vmd = VM_DOMAIN(domain); 1930e2068d0bSJeff Roberson vmd->vmd_interrupt_free_min = 2; 1931f6b04d2bSDavid Greenman 193245ae1d91SAlan Cox /* 193345ae1d91SAlan Cox * v_free_reserved needs to include enough for the largest 193445ae1d91SAlan Cox * swap pager structures plus enough for any pv_entry structs 193545ae1d91SAlan Cox * when paging. 193645ae1d91SAlan Cox */ 1937e2068d0bSJeff Roberson if (vmd->vmd_page_count > 1024) 1938e2068d0bSJeff Roberson vmd->vmd_free_min = 4 + (vmd->vmd_page_count - 1024) / 200; 19392feb50bfSAttilio Rao else 1940e2068d0bSJeff Roberson vmd->vmd_free_min = 4; 1941e2068d0bSJeff Roberson vmd->vmd_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1942e2068d0bSJeff Roberson vmd->vmd_interrupt_free_min; 1943e2068d0bSJeff Roberson vmd->vmd_free_reserved = vm_pageout_page_count + 1944e2068d0bSJeff Roberson vmd->vmd_pageout_free_min + (vmd->vmd_page_count / 768); 1945e2068d0bSJeff Roberson vmd->vmd_free_severe = vmd->vmd_free_min / 2; 1946e2068d0bSJeff Roberson vmd->vmd_free_target = 4 * vmd->vmd_free_min + vmd->vmd_free_reserved; 1947e2068d0bSJeff Roberson vmd->vmd_free_min += vmd->vmd_free_reserved; 1948e2068d0bSJeff Roberson vmd->vmd_free_severe += vmd->vmd_free_reserved; 1949e2068d0bSJeff Roberson vmd->vmd_inactive_target = (3 * vmd->vmd_free_target) / 2; 1950e2068d0bSJeff Roberson if (vmd->vmd_inactive_target > vmd->vmd_free_count / 3) 1951e2068d0bSJeff Roberson vmd->vmd_inactive_target = vmd->vmd_free_count / 3; 1952df8bae1dSRodney W. Grimes 1953d9e23210SJeff Roberson /* 19545f8cd1c0SJeff Roberson * Set the default wakeup threshold to be 10% below the paging 19555f8cd1c0SJeff Roberson * target. This keeps the steady state out of shortfall. 1956d9e23210SJeff Roberson */ 19575f8cd1c0SJeff Roberson vmd->vmd_pageout_wakeup_thresh = (vmd->vmd_free_target / 10) * 9; 1958e2068d0bSJeff Roberson 1959e2068d0bSJeff Roberson /* 1960e2068d0bSJeff Roberson * Target amount of memory to move out of the laundry queue during a 1961e2068d0bSJeff Roberson * background laundering. This is proportional to the amount of system 1962e2068d0bSJeff Roberson * memory. 1963e2068d0bSJeff Roberson */ 1964e2068d0bSJeff Roberson vmd->vmd_background_launder_target = (vmd->vmd_free_target - 1965e2068d0bSJeff Roberson vmd->vmd_free_min) / 10; 19665f8cd1c0SJeff Roberson 19675f8cd1c0SJeff Roberson /* Initialize the pageout daemon pid controller. */ 19685f8cd1c0SJeff Roberson pidctrl_init(&vmd->vmd_pid, hz / VM_INACT_SCAN_RATE, 19695f8cd1c0SJeff Roberson vmd->vmd_free_target, PIDCTRL_BOUND, 19705f8cd1c0SJeff Roberson PIDCTRL_KPD, PIDCTRL_KID, PIDCTRL_KDD); 19715f8cd1c0SJeff Roberson oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(vmd->vmd_oid), OID_AUTO, 19725f8cd1c0SJeff Roberson "pidctrl", CTLFLAG_RD, NULL, ""); 19735f8cd1c0SJeff Roberson pidctrl_init_sysctl(&vmd->vmd_pid, SYSCTL_CHILDREN(oid)); 1974e2068d0bSJeff Roberson } 1975e2068d0bSJeff Roberson 1976e2068d0bSJeff Roberson static void 1977e2068d0bSJeff Roberson vm_pageout_init(void) 1978e2068d0bSJeff Roberson { 1979e2068d0bSJeff Roberson u_int freecount; 1980e2068d0bSJeff Roberson int i; 1981e2068d0bSJeff Roberson 1982e2068d0bSJeff Roberson /* 1983e2068d0bSJeff Roberson * Initialize some paging parameters. 1984e2068d0bSJeff Roberson */ 1985e2068d0bSJeff Roberson if (vm_cnt.v_page_count < 2000) 1986e2068d0bSJeff Roberson vm_pageout_page_count = 8; 1987e2068d0bSJeff Roberson 1988e2068d0bSJeff Roberson freecount = 0; 1989e2068d0bSJeff Roberson for (i = 0; i < vm_ndomains; i++) { 1990e2068d0bSJeff Roberson struct vm_domain *vmd; 1991e2068d0bSJeff Roberson 1992e2068d0bSJeff Roberson vm_pageout_init_domain(i); 1993e2068d0bSJeff Roberson vmd = VM_DOMAIN(i); 1994e2068d0bSJeff Roberson vm_cnt.v_free_reserved += vmd->vmd_free_reserved; 1995e2068d0bSJeff Roberson vm_cnt.v_free_target += vmd->vmd_free_target; 1996e2068d0bSJeff Roberson vm_cnt.v_free_min += vmd->vmd_free_min; 1997e2068d0bSJeff Roberson vm_cnt.v_inactive_target += vmd->vmd_inactive_target; 1998e2068d0bSJeff Roberson vm_cnt.v_pageout_free_min += vmd->vmd_pageout_free_min; 1999e2068d0bSJeff Roberson vm_cnt.v_interrupt_free_min += vmd->vmd_interrupt_free_min; 2000e2068d0bSJeff Roberson vm_cnt.v_free_severe += vmd->vmd_free_severe; 2001e2068d0bSJeff Roberson freecount += vmd->vmd_free_count; 2002e2068d0bSJeff Roberson } 2003d9e23210SJeff Roberson 2004d9e23210SJeff Roberson /* 2005d9e23210SJeff Roberson * Set interval in seconds for active scan. We want to visit each 2006c9612b2dSJeff Roberson * page at least once every ten minutes. This is to prevent worst 2007c9612b2dSJeff Roberson * case paging behaviors with stale active LRU. 2008d9e23210SJeff Roberson */ 2009d9e23210SJeff Roberson if (vm_pageout_update_period == 0) 2010c9612b2dSJeff Roberson vm_pageout_update_period = 600; 2011d9e23210SJeff Roberson 2012df8bae1dSRodney W. Grimes if (vm_page_max_wired == 0) 2013e2068d0bSJeff Roberson vm_page_max_wired = freecount / 3; 20144d19f4adSSteven Hartland } 20154d19f4adSSteven Hartland 20164d19f4adSSteven Hartland /* 20174d19f4adSSteven Hartland * vm_pageout is the high level pageout daemon. 20184d19f4adSSteven Hartland */ 20194d19f4adSSteven Hartland static void 20204d19f4adSSteven Hartland vm_pageout(void) 20214d19f4adSSteven Hartland { 202244ec2b63SKonstantin Belousov int error; 202344ec2b63SKonstantin Belousov int i; 2024df8bae1dSRodney W. Grimes 202524a1cce3SDavid Greenman swap_pager_swap_init(); 20263b8cf4acSMark Johnston snprintf(curthread->td_name, sizeof(curthread->td_name), "dom0"); 2027ebcddc72SAlan Cox error = kthread_add(vm_pageout_laundry_worker, NULL, curproc, NULL, 2028ebcddc72SAlan Cox 0, 0, "laundry: dom0"); 2029ebcddc72SAlan Cox if (error != 0) 2030ebcddc72SAlan Cox panic("starting laundry for domain 0, error %d", error); 2031449c2e92SKonstantin Belousov for (i = 1; i < vm_ndomains; i++) { 2032449c2e92SKonstantin Belousov error = kthread_add(vm_pageout_worker, (void *)(uintptr_t)i, 2033449c2e92SKonstantin Belousov curproc, NULL, 0, 0, "dom%d", i); 2034449c2e92SKonstantin Belousov if (error != 0) { 2035449c2e92SKonstantin Belousov panic("starting pageout for domain %d, error %d\n", 2036449c2e92SKonstantin Belousov i, error); 2037dc2efb27SJohn Dyson } 2038e2068d0bSJeff Roberson error = kthread_add(vm_pageout_laundry_worker, 2039e2068d0bSJeff Roberson (void *)(uintptr_t)i, curproc, NULL, 0, 0, 2040e2068d0bSJeff Roberson "laundry: dom%d", i); 2041e2068d0bSJeff Roberson if (error != 0) 2042e2068d0bSJeff Roberson panic("starting laundry for domain %d, error %d", 2043e2068d0bSJeff Roberson i, error); 2044f919ebdeSDavid Greenman } 204544ec2b63SKonstantin Belousov error = kthread_add(uma_reclaim_worker, NULL, curproc, NULL, 204644ec2b63SKonstantin Belousov 0, 0, "uma"); 204744ec2b63SKonstantin Belousov if (error != 0) 204844ec2b63SKonstantin Belousov panic("starting uma_reclaim helper, error %d\n", error); 2049d395270dSDimitry Andric vm_pageout_worker((void *)(uintptr_t)0); 2050df8bae1dSRodney W. Grimes } 205126f9a767SRodney W. Grimes 20526b4b77adSAlan Cox /* 2053280d15cdSMark Johnston * Perform an advisory wakeup of the page daemon. 20546b4b77adSAlan Cox */ 2055e0c5a895SJohn Dyson void 2056e2068d0bSJeff Roberson pagedaemon_wakeup(int domain) 2057e0c5a895SJohn Dyson { 2058e2068d0bSJeff Roberson struct vm_domain *vmd; 2059a1c0a785SAlan Cox 2060e2068d0bSJeff Roberson vmd = VM_DOMAIN(domain); 206130fbfddaSJeff Roberson vm_domain_pageout_assert_unlocked(vmd); 206230fbfddaSJeff Roberson if (curproc == pageproc) 206330fbfddaSJeff Roberson return; 2064280d15cdSMark Johnston 206530fbfddaSJeff Roberson if (atomic_fetchadd_int(&vmd->vmd_pageout_wanted, 1) == 0) { 206630fbfddaSJeff Roberson vm_domain_pageout_lock(vmd); 206730fbfddaSJeff Roberson atomic_store_int(&vmd->vmd_pageout_wanted, 1); 2068e2068d0bSJeff Roberson wakeup(&vmd->vmd_pageout_wanted); 206930fbfddaSJeff Roberson vm_domain_pageout_unlock(vmd); 2070e0c5a895SJohn Dyson } 2071e0c5a895SJohn Dyson } 2072