xref: /freebsd/sys/vm/vm_pageout.c (revision 41fd4b9422e37d764a0a6fe2f7f2cda3a523d822)
160727d8bSWarner Losh /*-
2796df753SPedro F. Giffuni  * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
3df57947fSPedro F. Giffuni  *
426f9a767SRodney W. Grimes  * Copyright (c) 1991 Regents of the University of California.
526f9a767SRodney W. Grimes  * All rights reserved.
626f9a767SRodney W. Grimes  * Copyright (c) 1994 John S. Dyson
726f9a767SRodney W. Grimes  * All rights reserved.
826f9a767SRodney W. Grimes  * Copyright (c) 1994 David Greenman
926f9a767SRodney W. Grimes  * All rights reserved.
108dbca793STor Egge  * Copyright (c) 2005 Yahoo! Technologies Norway AS
118dbca793STor Egge  * All rights reserved.
12df8bae1dSRodney W. Grimes  *
13df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
14df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
15df8bae1dSRodney W. Grimes  *
16df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
17df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
18df8bae1dSRodney W. Grimes  * are met:
19df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
20df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
21df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
22df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
23df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
24df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
255929bcfaSPhilippe Charnier  *    must display the following acknowledgement:
26df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
27df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
28df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
29df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
30df8bae1dSRodney W. Grimes  *    without specific prior written permission.
31df8bae1dSRodney W. Grimes  *
32df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
43df8bae1dSRodney W. Grimes  *
443c4dd356SDavid Greenman  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
45df8bae1dSRodney W. Grimes  *
46df8bae1dSRodney W. Grimes  *
47df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
48df8bae1dSRodney W. Grimes  * All rights reserved.
49df8bae1dSRodney W. Grimes  *
50df8bae1dSRodney W. Grimes  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
51df8bae1dSRodney W. Grimes  *
52df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
53df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
54df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
55df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
56df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
57df8bae1dSRodney W. Grimes  *
58df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
59df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
60df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
61df8bae1dSRodney W. Grimes  *
62df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
63df8bae1dSRodney W. Grimes  *
64df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
65df8bae1dSRodney W. Grimes  *  School of Computer Science
66df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
67df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
68df8bae1dSRodney W. Grimes  *
69df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
70df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
71df8bae1dSRodney W. Grimes  */
72df8bae1dSRodney W. Grimes 
73df8bae1dSRodney W. Grimes /*
74df8bae1dSRodney W. Grimes  *	The proverbial page-out daemon.
75df8bae1dSRodney W. Grimes  */
76df8bae1dSRodney W. Grimes 
77874651b1SDavid E. O'Brien #include <sys/cdefs.h>
78874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$");
79874651b1SDavid E. O'Brien 
80faa5f8d8SAndrzej Bialecki #include "opt_vm.h"
817672ca05SMark Johnston 
82df8bae1dSRodney W. Grimes #include <sys/param.h>
8326f9a767SRodney W. Grimes #include <sys/systm.h>
84b5e8ce9fSBruce Evans #include <sys/kernel.h>
85855a310fSJeff Roberson #include <sys/eventhandler.h>
86fb919e4dSMark Murray #include <sys/lock.h>
87fb919e4dSMark Murray #include <sys/mutex.h>
8826f9a767SRodney W. Grimes #include <sys/proc.h>
899c8b8baaSPeter Wemm #include <sys/kthread.h>
900384fff8SJason Evans #include <sys/ktr.h>
9197824da3SAlan Cox #include <sys/mount.h>
92099e7e95SEdward Tomasz Napierala #include <sys/racct.h>
9326f9a767SRodney W. Grimes #include <sys/resourcevar.h>
94b43179fbSJeff Roberson #include <sys/sched.h>
9514a0d74eSSteven Hartland #include <sys/sdt.h>
96d2fc5315SPoul-Henning Kamp #include <sys/signalvar.h>
97449c2e92SKonstantin Belousov #include <sys/smp.h>
98a6bf3a9eSRyan Stone #include <sys/time.h>
99f6b04d2bSDavid Greenman #include <sys/vnode.h>
100efeaf95aSDavid Greenman #include <sys/vmmeter.h>
10189f6b863SAttilio Rao #include <sys/rwlock.h>
1021005a129SJohn Baldwin #include <sys/sx.h>
10338efa82bSJohn Dyson #include <sys/sysctl.h>
104df8bae1dSRodney W. Grimes 
105df8bae1dSRodney W. Grimes #include <vm/vm.h>
106efeaf95aSDavid Greenman #include <vm/vm_param.h>
107efeaf95aSDavid Greenman #include <vm/vm_object.h>
108df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
109efeaf95aSDavid Greenman #include <vm/vm_map.h>
110df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h>
11124a1cce3SDavid Greenman #include <vm/vm_pager.h>
112449c2e92SKonstantin Belousov #include <vm/vm_phys.h>
113e2068d0bSJeff Roberson #include <vm/vm_pagequeue.h>
11405f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h>
115efeaf95aSDavid Greenman #include <vm/vm_extern.h>
116670d17b5SJeff Roberson #include <vm/uma.h>
117df8bae1dSRodney W. Grimes 
1182b14f991SJulian Elischer /*
1192b14f991SJulian Elischer  * System initialization
1202b14f991SJulian Elischer  */
1212b14f991SJulian Elischer 
1222b14f991SJulian Elischer /* the kernel process "vm_pageout"*/
12311caded3SAlfred Perlstein static void vm_pageout(void);
1244d19f4adSSteven Hartland static void vm_pageout_init(void);
125ebcddc72SAlan Cox static int vm_pageout_clean(vm_page_t m, int *numpagedout);
12634d8b7eaSJeff Roberson static int vm_pageout_cluster(vm_page_t m);
12776386c7eSKonstantin Belousov static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
12876386c7eSKonstantin Belousov     int starting_page_shortage);
12945ae1d91SAlan Cox 
1304d19f4adSSteven Hartland SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init,
1314d19f4adSSteven Hartland     NULL);
1324d19f4adSSteven Hartland 
1332b14f991SJulian Elischer struct proc *pageproc;
1342b14f991SJulian Elischer 
1352b14f991SJulian Elischer static struct kproc_desc page_kp = {
1362b14f991SJulian Elischer 	"pagedaemon",
1372b14f991SJulian Elischer 	vm_pageout,
1382b14f991SJulian Elischer 	&pageproc
1392b14f991SJulian Elischer };
1404d19f4adSSteven Hartland SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start,
141237fdd78SRobert Watson     &page_kp);
1422b14f991SJulian Elischer 
14314a0d74eSSteven Hartland SDT_PROVIDER_DEFINE(vm);
14414a0d74eSSteven Hartland SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan);
14514a0d74eSSteven Hartland 
146ebcddc72SAlan Cox /* Pagedaemon activity rates, in subdivisions of one second. */
147ebcddc72SAlan Cox #define	VM_LAUNDER_RATE		10
1485f8cd1c0SJeff Roberson #define	VM_INACT_SCAN_RATE	10
1492b14f991SJulian Elischer 
15076386c7eSKonstantin Belousov static int vm_pageout_oom_seq = 12;
151ebcddc72SAlan Cox 
152d9e23210SJeff Roberson static int vm_pageout_update_period;
1534a365329SAndrey Zonov static int disable_swap_pageouts;
154c9612b2dSJeff Roberson static int lowmem_period = 10;
155b1fd102eSMark Johnston static int swapdev_enabled;
15670111b90SJohn Dyson 
1578311a2b8SWill Andrews static int vm_panic_on_oom = 0;
1588311a2b8SWill Andrews 
1598311a2b8SWill Andrews SYSCTL_INT(_vm, OID_AUTO, panic_on_oom,
1608311a2b8SWill Andrews 	CTLFLAG_RWTUN, &vm_panic_on_oom, 0,
1618311a2b8SWill Andrews 	"panic on out of memory instead of killing the largest process");
1628311a2b8SWill Andrews 
163d9e23210SJeff Roberson SYSCTL_INT(_vm, OID_AUTO, pageout_update_period,
164e0b2fc3aSMark Johnston 	CTLFLAG_RWTUN, &vm_pageout_update_period, 0,
165d9e23210SJeff Roberson 	"Maximum active LRU update period");
16653636869SAndrey Zonov 
167e0b2fc3aSMark Johnston SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RWTUN, &lowmem_period, 0,
168c9612b2dSJeff Roberson 	"Low memory callback period");
169c9612b2dSJeff Roberson 
170ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
171e0b2fc3aSMark Johnston 	CTLFLAG_RWTUN, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
17212ac6a1dSJohn Dyson 
17323b59018SMatthew Dillon static int pageout_lock_miss;
17423b59018SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
17523b59018SMatthew Dillon 	CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
17623b59018SMatthew Dillon 
17776386c7eSKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq,
178e0b2fc3aSMark Johnston 	CTLFLAG_RWTUN, &vm_pageout_oom_seq, 0,
17976386c7eSKonstantin Belousov 	"back-to-back calls to oom detector to start OOM");
18076386c7eSKonstantin Belousov 
181ebcddc72SAlan Cox static int act_scan_laundry_weight = 3;
182e0b2fc3aSMark Johnston SYSCTL_INT(_vm, OID_AUTO, act_scan_laundry_weight, CTLFLAG_RWTUN,
183ebcddc72SAlan Cox     &act_scan_laundry_weight, 0,
184ebcddc72SAlan Cox     "weight given to clean vs. dirty pages in active queue scans");
185ebcddc72SAlan Cox 
186ebcddc72SAlan Cox static u_int vm_background_launder_rate = 4096;
187e0b2fc3aSMark Johnston SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RWTUN,
188ebcddc72SAlan Cox     &vm_background_launder_rate, 0,
189ebcddc72SAlan Cox     "background laundering rate, in kilobytes per second");
190ebcddc72SAlan Cox 
191ebcddc72SAlan Cox static u_int vm_background_launder_max = 20 * 1024;
192e0b2fc3aSMark Johnston SYSCTL_UINT(_vm, OID_AUTO, background_launder_max, CTLFLAG_RWTUN,
193ebcddc72SAlan Cox     &vm_background_launder_max, 0, "background laundering cap, in kilobytes");
194ebcddc72SAlan Cox 
195e2241590SAlan Cox int vm_pageout_page_count = 32;
196df8bae1dSRodney W. Grimes 
19754a3a114SMark Johnston u_long vm_page_max_user_wired;
19854a3a114SMark Johnston SYSCTL_ULONG(_vm, OID_AUTO, max_user_wired, CTLFLAG_RW,
19954a3a114SMark Johnston     &vm_page_max_user_wired, 0,
20054a3a114SMark Johnston     "system-wide limit to user-wired page count");
201df8bae1dSRodney W. Grimes 
202ebcddc72SAlan Cox static u_int isqrt(u_int num);
203ebcddc72SAlan Cox static int vm_pageout_launder(struct vm_domain *vmd, int launder,
204ebcddc72SAlan Cox     bool in_shortfall);
205ebcddc72SAlan Cox static void vm_pageout_laundry_worker(void *arg);
206cd41fc12SDavid Greenman 
2075cd29d0fSMark Johnston struct scan_state {
2085cd29d0fSMark Johnston 	struct vm_batchqueue bq;
2098d220203SAlan Cox 	struct vm_pagequeue *pq;
2105cd29d0fSMark Johnston 	vm_page_t	marker;
2115cd29d0fSMark Johnston 	int		maxscan;
2125cd29d0fSMark Johnston 	int		scanned;
2135cd29d0fSMark Johnston };
2148dbca793STor Egge 
2155cd29d0fSMark Johnston static void
2165cd29d0fSMark Johnston vm_pageout_init_scan(struct scan_state *ss, struct vm_pagequeue *pq,
2175cd29d0fSMark Johnston     vm_page_t marker, vm_page_t after, int maxscan)
2185cd29d0fSMark Johnston {
2198dbca793STor Egge 
2205cd29d0fSMark Johnston 	vm_pagequeue_assert_locked(pq);
221*41fd4b94SMark Johnston 	KASSERT((vm_page_aflags(marker) & PGA_ENQUEUED) == 0,
2225cd29d0fSMark Johnston 	    ("marker %p already enqueued", marker));
2235cd29d0fSMark Johnston 
2245cd29d0fSMark Johnston 	if (after == NULL)
2255cd29d0fSMark Johnston 		TAILQ_INSERT_HEAD(&pq->pq_pl, marker, plinks.q);
2265cd29d0fSMark Johnston 	else
2275cd29d0fSMark Johnston 		TAILQ_INSERT_AFTER(&pq->pq_pl, after, marker, plinks.q);
2285cd29d0fSMark Johnston 	vm_page_aflag_set(marker, PGA_ENQUEUED);
2295cd29d0fSMark Johnston 
2305cd29d0fSMark Johnston 	vm_batchqueue_init(&ss->bq);
2315cd29d0fSMark Johnston 	ss->pq = pq;
2325cd29d0fSMark Johnston 	ss->marker = marker;
2335cd29d0fSMark Johnston 	ss->maxscan = maxscan;
2345cd29d0fSMark Johnston 	ss->scanned = 0;
2358d220203SAlan Cox 	vm_pagequeue_unlock(pq);
2365cd29d0fSMark Johnston }
2378dbca793STor Egge 
2385cd29d0fSMark Johnston static void
2395cd29d0fSMark Johnston vm_pageout_end_scan(struct scan_state *ss)
2405cd29d0fSMark Johnston {
2415cd29d0fSMark Johnston 	struct vm_pagequeue *pq;
2425cd29d0fSMark Johnston 
2435cd29d0fSMark Johnston 	pq = ss->pq;
2445cd29d0fSMark Johnston 	vm_pagequeue_assert_locked(pq);
245*41fd4b94SMark Johnston 	KASSERT((vm_page_aflags(ss->marker) & PGA_ENQUEUED) != 0,
2465cd29d0fSMark Johnston 	    ("marker %p not enqueued", ss->marker));
2475cd29d0fSMark Johnston 
2485cd29d0fSMark Johnston 	TAILQ_REMOVE(&pq->pq_pl, ss->marker, plinks.q);
2495cd29d0fSMark Johnston 	vm_page_aflag_clear(ss->marker, PGA_ENQUEUED);
250899fe184SMark Johnston 	pq->pq_pdpages += ss->scanned;
2518dbca793STor Egge }
2528dbca793STor Egge 
2538dbca793STor Egge /*
2545cd29d0fSMark Johnston  * Add a small number of queued pages to a batch queue for later processing
2555cd29d0fSMark Johnston  * without the corresponding queue lock held.  The caller must have enqueued a
2565cd29d0fSMark Johnston  * marker page at the desired start point for the scan.  Pages will be
2575cd29d0fSMark Johnston  * physically dequeued if the caller so requests.  Otherwise, the returned
2585cd29d0fSMark Johnston  * batch may contain marker pages, and it is up to the caller to handle them.
2595cd29d0fSMark Johnston  *
26036f8fe9bSMark Johnston  * When processing the batch queue, vm_page_queue() must be used to
26136f8fe9bSMark Johnston  * determine whether the page has been logically dequeued by another thread.
26236f8fe9bSMark Johnston  * Once this check is performed, the page lock guarantees that the page will
26336f8fe9bSMark Johnston  * not be disassociated from the queue.
2645cd29d0fSMark Johnston  */
2655cd29d0fSMark Johnston static __always_inline void
2665cd29d0fSMark Johnston vm_pageout_collect_batch(struct scan_state *ss, const bool dequeue)
2675cd29d0fSMark Johnston {
2688d220203SAlan Cox 	struct vm_pagequeue *pq;
269d70f0ab3SMark Johnston 	vm_page_t m, marker, n;
2708c616246SKonstantin Belousov 
2715cd29d0fSMark Johnston 	marker = ss->marker;
2725cd29d0fSMark Johnston 	pq = ss->pq;
2738c616246SKonstantin Belousov 
274*41fd4b94SMark Johnston 	KASSERT((marker->astate.flags & PGA_ENQUEUED) != 0,
2755cd29d0fSMark Johnston 	    ("marker %p not enqueued", ss->marker));
2768c616246SKonstantin Belousov 
2778d220203SAlan Cox 	vm_pagequeue_lock(pq);
2785cd29d0fSMark Johnston 	for (m = TAILQ_NEXT(marker, plinks.q); m != NULL &&
2795cd29d0fSMark Johnston 	    ss->scanned < ss->maxscan && ss->bq.bq_cnt < VM_BATCHQUEUE_SIZE;
280d70f0ab3SMark Johnston 	    m = n, ss->scanned++) {
281d70f0ab3SMark Johnston 		n = TAILQ_NEXT(m, plinks.q);
2825cd29d0fSMark Johnston 		if ((m->flags & PG_MARKER) == 0) {
283*41fd4b94SMark Johnston 			KASSERT((m->astate.flags & PGA_ENQUEUED) != 0,
2845cd29d0fSMark Johnston 			    ("page %p not enqueued", m));
2855cd29d0fSMark Johnston 			KASSERT((m->flags & PG_FICTITIOUS) == 0,
2865cd29d0fSMark Johnston 			    ("Fictitious page %p cannot be in page queue", m));
2875cd29d0fSMark Johnston 			KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2885cd29d0fSMark Johnston 			    ("Unmanaged page %p cannot be in page queue", m));
2895cd29d0fSMark Johnston 		} else if (dequeue)
2905cd29d0fSMark Johnston 			continue;
2918c616246SKonstantin Belousov 
2925cd29d0fSMark Johnston 		(void)vm_batchqueue_insert(&ss->bq, m);
2935cd29d0fSMark Johnston 		if (dequeue) {
2945cd29d0fSMark Johnston 			TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
2955cd29d0fSMark Johnston 			vm_page_aflag_clear(m, PGA_ENQUEUED);
2965cd29d0fSMark Johnston 		}
2975cd29d0fSMark Johnston 	}
2985cd29d0fSMark Johnston 	TAILQ_REMOVE(&pq->pq_pl, marker, plinks.q);
2995cd29d0fSMark Johnston 	if (__predict_true(m != NULL))
3005cd29d0fSMark Johnston 		TAILQ_INSERT_BEFORE(m, marker, plinks.q);
3015cd29d0fSMark Johnston 	else
3025cd29d0fSMark Johnston 		TAILQ_INSERT_TAIL(&pq->pq_pl, marker, plinks.q);
3035cd29d0fSMark Johnston 	if (dequeue)
3045cd29d0fSMark Johnston 		vm_pagequeue_cnt_add(pq, -ss->bq.bq_cnt);
3055cd29d0fSMark Johnston 	vm_pagequeue_unlock(pq);
3065cd29d0fSMark Johnston }
3075cd29d0fSMark Johnston 
308fee2a2faSMark Johnston /*
309fee2a2faSMark Johnston  * Return the next page to be scanned, or NULL if the scan is complete.
310fee2a2faSMark Johnston  */
3115cd29d0fSMark Johnston static __always_inline vm_page_t
3125cd29d0fSMark Johnston vm_pageout_next(struct scan_state *ss, const bool dequeue)
3135cd29d0fSMark Johnston {
3145cd29d0fSMark Johnston 
3155cd29d0fSMark Johnston 	if (ss->bq.bq_cnt == 0)
3165cd29d0fSMark Johnston 		vm_pageout_collect_batch(ss, dequeue);
3175cd29d0fSMark Johnston 	return (vm_batchqueue_pop(&ss->bq));
3188c616246SKonstantin Belousov }
3198c616246SKonstantin Belousov 
3208c616246SKonstantin Belousov /*
321248fe642SAlan Cox  * Scan for pages at adjacent offsets within the given page's object that are
322248fe642SAlan Cox  * eligible for laundering, form a cluster of these pages and the given page,
323248fe642SAlan Cox  * and launder that cluster.
32426f9a767SRodney W. Grimes  */
3253af76890SPoul-Henning Kamp static int
32634d8b7eaSJeff Roberson vm_pageout_cluster(vm_page_t m)
32724a1cce3SDavid Greenman {
32854d92145SMatthew Dillon 	vm_object_t object;
329248fe642SAlan Cox 	vm_page_t mc[2 * vm_pageout_page_count], p, pb, ps;
330248fe642SAlan Cox 	vm_pindex_t pindex;
331248fe642SAlan Cox 	int ib, is, page_base, pageout_count;
33226f9a767SRodney W. Grimes 
33317f6a17bSAlan Cox 	object = m->object;
33489f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
335248fe642SAlan Cox 	pindex = m->pindex;
3360cddd8f0SMatthew Dillon 
337c7aebda8SAttilio Rao 	vm_page_assert_unbusied(m);
3380d94caffSDavid Greenman 
33991b4f427SAlan Cox 	mc[vm_pageout_page_count] = pb = ps = m;
34026f9a767SRodney W. Grimes 	pageout_count = 1;
341f35329acSJohn Dyson 	page_base = vm_pageout_page_count;
34290ecac61SMatthew Dillon 	ib = 1;
34390ecac61SMatthew Dillon 	is = 1;
34490ecac61SMatthew Dillon 
34524a1cce3SDavid Greenman 	/*
346248fe642SAlan Cox 	 * We can cluster only if the page is not clean, busy, or held, and
347ebcddc72SAlan Cox 	 * the page is in the laundry queue.
34890ecac61SMatthew Dillon 	 *
34990ecac61SMatthew Dillon 	 * During heavy mmap/modification loads the pageout
35090ecac61SMatthew Dillon 	 * daemon can really fragment the underlying file
351248fe642SAlan Cox 	 * due to flushing pages out of order and not trying to
352248fe642SAlan Cox 	 * align the clusters (which leaves sporadic out-of-order
35390ecac61SMatthew Dillon 	 * holes).  To solve this problem we do the reverse scan
35490ecac61SMatthew Dillon 	 * first and attempt to align our cluster, then do a
35590ecac61SMatthew Dillon 	 * forward scan if room remains.
35624a1cce3SDavid Greenman 	 */
35790ecac61SMatthew Dillon more:
358248fe642SAlan Cox 	while (ib != 0 && pageout_count < vm_pageout_page_count) {
35990ecac61SMatthew Dillon 		if (ib > pindex) {
36090ecac61SMatthew Dillon 			ib = 0;
36190ecac61SMatthew Dillon 			break;
362f6b04d2bSDavid Greenman 		}
363fee2a2faSMark Johnston 		if ((p = vm_page_prev(pb)) == NULL || vm_page_busied(p) ||
364fee2a2faSMark Johnston 		    vm_page_wired(p)) {
36590ecac61SMatthew Dillon 			ib = 0;
36690ecac61SMatthew Dillon 			break;
367f6b04d2bSDavid Greenman 		}
36824a1cce3SDavid Greenman 		vm_page_test_dirty(p);
3691b5c869dSMark Johnston 		if (p->dirty == 0) {
370eb5d3969SAlan Cox 			ib = 0;
371eb5d3969SAlan Cox 			break;
372eb5d3969SAlan Cox 		}
373fee2a2faSMark Johnston 		if (!vm_page_in_laundry(p) || !vm_page_try_remove_write(p)) {
37490ecac61SMatthew Dillon 			ib = 0;
37524a1cce3SDavid Greenman 			break;
376f6b04d2bSDavid Greenman 		}
37791b4f427SAlan Cox 		mc[--page_base] = pb = p;
37890ecac61SMatthew Dillon 		++pageout_count;
37990ecac61SMatthew Dillon 		++ib;
380248fe642SAlan Cox 
38124a1cce3SDavid Greenman 		/*
382248fe642SAlan Cox 		 * We are at an alignment boundary.  Stop here, and switch
383248fe642SAlan Cox 		 * directions.  Do not clear ib.
38424a1cce3SDavid Greenman 		 */
38590ecac61SMatthew Dillon 		if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
38690ecac61SMatthew Dillon 			break;
38724a1cce3SDavid Greenman 	}
38890ecac61SMatthew Dillon 	while (pageout_count < vm_pageout_page_count &&
38990ecac61SMatthew Dillon 	    pindex + is < object->size) {
390fee2a2faSMark Johnston 		if ((p = vm_page_next(ps)) == NULL || vm_page_busied(p) ||
391fee2a2faSMark Johnston 		    vm_page_wired(p))
39290ecac61SMatthew Dillon 			break;
39324a1cce3SDavid Greenman 		vm_page_test_dirty(p);
3941b5c869dSMark Johnston 		if (p->dirty == 0)
395eb5d3969SAlan Cox 			break;
396*41fd4b94SMark Johnston 		if (!vm_page_in_laundry(p) || !vm_page_try_remove_write(p))
39724a1cce3SDavid Greenman 			break;
39891b4f427SAlan Cox 		mc[page_base + pageout_count] = ps = p;
39990ecac61SMatthew Dillon 		++pageout_count;
40090ecac61SMatthew Dillon 		++is;
40124a1cce3SDavid Greenman 	}
40290ecac61SMatthew Dillon 
40390ecac61SMatthew Dillon 	/*
40490ecac61SMatthew Dillon 	 * If we exhausted our forward scan, continue with the reverse scan
405248fe642SAlan Cox 	 * when possible, even past an alignment boundary.  This catches
406248fe642SAlan Cox 	 * boundary conditions.
40790ecac61SMatthew Dillon 	 */
408248fe642SAlan Cox 	if (ib != 0 && pageout_count < vm_pageout_page_count)
40990ecac61SMatthew Dillon 		goto more;
410f6b04d2bSDavid Greenman 
41199e6e193SMark Johnston 	return (vm_pageout_flush(&mc[page_base], pageout_count,
41299e6e193SMark Johnston 	    VM_PAGER_PUT_NOREUSE, 0, NULL, NULL));
413aef922f5SJohn Dyson }
414aef922f5SJohn Dyson 
4151c7c3c6aSMatthew Dillon /*
4161c7c3c6aSMatthew Dillon  * vm_pageout_flush() - launder the given pages
4171c7c3c6aSMatthew Dillon  *
4181c7c3c6aSMatthew Dillon  *	The given pages are laundered.  Note that we setup for the start of
4191c7c3c6aSMatthew Dillon  *	I/O ( i.e. busy the page ), mark it read-only, and bump the object
4201c7c3c6aSMatthew Dillon  *	reference count all in here rather then in the parent.  If we want
4211c7c3c6aSMatthew Dillon  *	the parent to do more sophisticated things we may have to change
4221c7c3c6aSMatthew Dillon  *	the ordering.
4231e8a675cSKonstantin Belousov  *
4241e8a675cSKonstantin Belousov  *	Returned runlen is the count of pages between mreq and first
4251e8a675cSKonstantin Belousov  *	page after mreq with status VM_PAGER_AGAIN.
426126d6082SKonstantin Belousov  *	*eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL
427126d6082SKonstantin Belousov  *	for any page in runlen set.
4281c7c3c6aSMatthew Dillon  */
429aef922f5SJohn Dyson int
430126d6082SKonstantin Belousov vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
431126d6082SKonstantin Belousov     boolean_t *eio)
432aef922f5SJohn Dyson {
4332e3b314dSAlan Cox 	vm_object_t object = mc[0]->object;
434aef922f5SJohn Dyson 	int pageout_status[count];
43595461b45SJohn Dyson 	int numpagedout = 0;
4361e8a675cSKonstantin Belousov 	int i, runlen;
437aef922f5SJohn Dyson 
43889f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
4397bec141bSKip Macy 
4401c7c3c6aSMatthew Dillon 	/*
441aed9aaaaSMark Johnston 	 * Initiate I/O.  Mark the pages busy and verify that they're valid
442aed9aaaaSMark Johnston 	 * and read-only.
4431c7c3c6aSMatthew Dillon 	 *
4441c7c3c6aSMatthew Dillon 	 * We do not have to fixup the clean/dirty bits here... we can
4451c7c3c6aSMatthew Dillon 	 * allow the pager to do it after the I/O completes.
44602fa91d3SMatthew Dillon 	 *
44702fa91d3SMatthew Dillon 	 * NOTE! mc[i]->dirty may be partial or fragmented due to an
44802fa91d3SMatthew Dillon 	 * edge case with file fragments.
4491c7c3c6aSMatthew Dillon 	 */
4508f9110f6SJohn Dyson 	for (i = 0; i < count; i++) {
4517a935082SAlan Cox 		KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
4527a935082SAlan Cox 		    ("vm_pageout_flush: partially invalid page %p index %d/%d",
4537a935082SAlan Cox 			mc[i], i, count));
454*41fd4b94SMark Johnston 		KASSERT((vm_page_aflags(mc[i]) & PGA_WRITEABLE) == 0,
455aed9aaaaSMark Johnston 		    ("vm_pageout_flush: writeable page %p", mc[i]));
456c7aebda8SAttilio Rao 		vm_page_sbusy(mc[i]);
4572965a453SKip Macy 	}
458d474eaaaSDoug Rabson 	vm_object_pip_add(object, count);
459aef922f5SJohn Dyson 
460d076fbeaSAlan Cox 	vm_pager_put_pages(object, mc, count, flags, pageout_status);
46126f9a767SRodney W. Grimes 
4621e8a675cSKonstantin Belousov 	runlen = count - mreq;
463126d6082SKonstantin Belousov 	if (eio != NULL)
464126d6082SKonstantin Belousov 		*eio = FALSE;
465aef922f5SJohn Dyson 	for (i = 0; i < count; i++) {
466aef922f5SJohn Dyson 		vm_page_t mt = mc[i];
46724a1cce3SDavid Greenman 
4684cd45723SAlan Cox 		KASSERT(pageout_status[i] == VM_PAGER_PEND ||
4696031c68dSAlan Cox 		    !pmap_page_is_write_mapped(mt),
4709ea8d1a6SAlan Cox 		    ("vm_pageout_flush: page %p is not write protected", mt));
47126f9a767SRodney W. Grimes 		switch (pageout_status[i]) {
47226f9a767SRodney W. Grimes 		case VM_PAGER_OK:
473ebcddc72SAlan Cox 			vm_page_lock(mt);
474ebcddc72SAlan Cox 			if (vm_page_in_laundry(mt))
475ebcddc72SAlan Cox 				vm_page_deactivate_noreuse(mt);
476ebcddc72SAlan Cox 			vm_page_unlock(mt);
477ebcddc72SAlan Cox 			/* FALLTHROUGH */
47826f9a767SRodney W. Grimes 		case VM_PAGER_PEND:
47995461b45SJohn Dyson 			numpagedout++;
48026f9a767SRodney W. Grimes 			break;
48126f9a767SRodney W. Grimes 		case VM_PAGER_BAD:
48226f9a767SRodney W. Grimes 			/*
483ebcddc72SAlan Cox 			 * The page is outside the object's range.  We pretend
484ebcddc72SAlan Cox 			 * that the page out worked and clean the page, so the
485ebcddc72SAlan Cox 			 * changes will be lost if the page is reclaimed by
486ebcddc72SAlan Cox 			 * the page daemon.
48726f9a767SRodney W. Grimes 			 */
48890ecac61SMatthew Dillon 			vm_page_undirty(mt);
489ebcddc72SAlan Cox 			vm_page_lock(mt);
490ebcddc72SAlan Cox 			if (vm_page_in_laundry(mt))
491ebcddc72SAlan Cox 				vm_page_deactivate_noreuse(mt);
492ebcddc72SAlan Cox 			vm_page_unlock(mt);
49326f9a767SRodney W. Grimes 			break;
49426f9a767SRodney W. Grimes 		case VM_PAGER_ERROR:
49526f9a767SRodney W. Grimes 		case VM_PAGER_FAIL:
49626f9a767SRodney W. Grimes 			/*
497b1fd102eSMark Johnston 			 * If the page couldn't be paged out to swap because the
498b1fd102eSMark Johnston 			 * pager wasn't able to find space, place the page in
499b1fd102eSMark Johnston 			 * the PQ_UNSWAPPABLE holding queue.  This is an
500b1fd102eSMark Johnston 			 * optimization that prevents the page daemon from
501b1fd102eSMark Johnston 			 * wasting CPU cycles on pages that cannot be reclaimed
502b1fd102eSMark Johnston 			 * becase no swap device is configured.
503b1fd102eSMark Johnston 			 *
504b1fd102eSMark Johnston 			 * Otherwise, reactivate the page so that it doesn't
505b1fd102eSMark Johnston 			 * clog the laundry and inactive queues.  (We will try
506b1fd102eSMark Johnston 			 * paging it out again later.)
50726f9a767SRodney W. Grimes 			 */
5083c4a2440SAlan Cox 			vm_page_lock(mt);
509b1fd102eSMark Johnston 			if (object->type == OBJT_SWAP &&
510b1fd102eSMark Johnston 			    pageout_status[i] == VM_PAGER_FAIL) {
511b1fd102eSMark Johnston 				vm_page_unswappable(mt);
512b1fd102eSMark Johnston 				numpagedout++;
513b1fd102eSMark Johnston 			} else
51424a1cce3SDavid Greenman 				vm_page_activate(mt);
5153c4a2440SAlan Cox 			vm_page_unlock(mt);
516126d6082SKonstantin Belousov 			if (eio != NULL && i >= mreq && i - mreq < runlen)
517126d6082SKonstantin Belousov 				*eio = TRUE;
51826f9a767SRodney W. Grimes 			break;
51926f9a767SRodney W. Grimes 		case VM_PAGER_AGAIN:
5201e8a675cSKonstantin Belousov 			if (i >= mreq && i - mreq < runlen)
5211e8a675cSKonstantin Belousov 				runlen = i - mreq;
52226f9a767SRodney W. Grimes 			break;
52326f9a767SRodney W. Grimes 		}
52426f9a767SRodney W. Grimes 
52526f9a767SRodney W. Grimes 		/*
5260d94caffSDavid Greenman 		 * If the operation is still going, leave the page busy to
5270d94caffSDavid Greenman 		 * block all other accesses. Also, leave the paging in
5280d94caffSDavid Greenman 		 * progress indicator set so that we don't attempt an object
5290d94caffSDavid Greenman 		 * collapse.
53026f9a767SRodney W. Grimes 		 */
53126f9a767SRodney W. Grimes 		if (pageout_status[i] != VM_PAGER_PEND) {
532f919ebdeSDavid Greenman 			vm_object_pip_wakeup(object);
533c7aebda8SAttilio Rao 			vm_page_sunbusy(mt);
5343c4a2440SAlan Cox 		}
5353c4a2440SAlan Cox 	}
5361e8a675cSKonstantin Belousov 	if (prunlen != NULL)
5371e8a675cSKonstantin Belousov 		*prunlen = runlen;
5383c4a2440SAlan Cox 	return (numpagedout);
53926f9a767SRodney W. Grimes }
54026f9a767SRodney W. Grimes 
541b1fd102eSMark Johnston static void
542b1fd102eSMark Johnston vm_pageout_swapon(void *arg __unused, struct swdevt *sp __unused)
543b1fd102eSMark Johnston {
544b1fd102eSMark Johnston 
545b1fd102eSMark Johnston 	atomic_store_rel_int(&swapdev_enabled, 1);
546b1fd102eSMark Johnston }
547b1fd102eSMark Johnston 
548b1fd102eSMark Johnston static void
549b1fd102eSMark Johnston vm_pageout_swapoff(void *arg __unused, struct swdevt *sp __unused)
550b1fd102eSMark Johnston {
551b1fd102eSMark Johnston 
552b1fd102eSMark Johnston 	if (swap_pager_nswapdev() == 1)
553b1fd102eSMark Johnston 		atomic_store_rel_int(&swapdev_enabled, 0);
554b1fd102eSMark Johnston }
555b1fd102eSMark Johnston 
5561c7c3c6aSMatthew Dillon /*
55734d8b7eaSJeff Roberson  * Attempt to acquire all of the necessary locks to launder a page and
55834d8b7eaSJeff Roberson  * then call through the clustering layer to PUTPAGES.  Wait a short
55934d8b7eaSJeff Roberson  * time for a vnode lock.
56034d8b7eaSJeff Roberson  *
56134d8b7eaSJeff Roberson  * Requires the page and object lock on entry, releases both before return.
56234d8b7eaSJeff Roberson  * Returns 0 on success and an errno otherwise.
56334d8b7eaSJeff Roberson  */
56434d8b7eaSJeff Roberson static int
565ebcddc72SAlan Cox vm_pageout_clean(vm_page_t m, int *numpagedout)
56634d8b7eaSJeff Roberson {
56734d8b7eaSJeff Roberson 	struct vnode *vp;
56834d8b7eaSJeff Roberson 	struct mount *mp;
56934d8b7eaSJeff Roberson 	vm_object_t object;
57034d8b7eaSJeff Roberson 	vm_pindex_t pindex;
57134d8b7eaSJeff Roberson 	int error, lockmode;
57234d8b7eaSJeff Roberson 
57334d8b7eaSJeff Roberson 	object = m->object;
57434d8b7eaSJeff Roberson 	VM_OBJECT_ASSERT_WLOCKED(object);
57534d8b7eaSJeff Roberson 	error = 0;
57634d8b7eaSJeff Roberson 	vp = NULL;
57734d8b7eaSJeff Roberson 	mp = NULL;
57834d8b7eaSJeff Roberson 
57934d8b7eaSJeff Roberson 	/*
58034d8b7eaSJeff Roberson 	 * The object is already known NOT to be dead.   It
58134d8b7eaSJeff Roberson 	 * is possible for the vget() to block the whole
58234d8b7eaSJeff Roberson 	 * pageout daemon, but the new low-memory handling
58334d8b7eaSJeff Roberson 	 * code should prevent it.
58434d8b7eaSJeff Roberson 	 *
58534d8b7eaSJeff Roberson 	 * We can't wait forever for the vnode lock, we might
58634d8b7eaSJeff Roberson 	 * deadlock due to a vn_read() getting stuck in
58734d8b7eaSJeff Roberson 	 * vm_wait while holding this vnode.  We skip the
58834d8b7eaSJeff Roberson 	 * vnode if we can't get it in a reasonable amount
58934d8b7eaSJeff Roberson 	 * of time.
59034d8b7eaSJeff Roberson 	 */
59134d8b7eaSJeff Roberson 	if (object->type == OBJT_VNODE) {
59234d8b7eaSJeff Roberson 		vp = object->handle;
59334d8b7eaSJeff Roberson 		if (vp->v_type == VREG &&
59434d8b7eaSJeff Roberson 		    vn_start_write(vp, &mp, V_NOWAIT) != 0) {
59534d8b7eaSJeff Roberson 			mp = NULL;
59634d8b7eaSJeff Roberson 			error = EDEADLK;
59734d8b7eaSJeff Roberson 			goto unlock_all;
59834d8b7eaSJeff Roberson 		}
59934d8b7eaSJeff Roberson 		KASSERT(mp != NULL,
60034d8b7eaSJeff Roberson 		    ("vp %p with NULL v_mount", vp));
60134d8b7eaSJeff Roberson 		vm_object_reference_locked(object);
60234d8b7eaSJeff Roberson 		pindex = m->pindex;
60334d8b7eaSJeff Roberson 		VM_OBJECT_WUNLOCK(object);
60434d8b7eaSJeff Roberson 		lockmode = MNT_SHARED_WRITES(vp->v_mount) ?
60534d8b7eaSJeff Roberson 		    LK_SHARED : LK_EXCLUSIVE;
60634d8b7eaSJeff Roberson 		if (vget(vp, lockmode | LK_TIMELOCK, curthread)) {
60734d8b7eaSJeff Roberson 			vp = NULL;
60834d8b7eaSJeff Roberson 			error = EDEADLK;
60934d8b7eaSJeff Roberson 			goto unlock_mp;
61034d8b7eaSJeff Roberson 		}
61134d8b7eaSJeff Roberson 		VM_OBJECT_WLOCK(object);
61257cd81a3SMark Johnston 
61357cd81a3SMark Johnston 		/*
61457cd81a3SMark Johnston 		 * Ensure that the object and vnode were not disassociated
61557cd81a3SMark Johnston 		 * while locks were dropped.
61657cd81a3SMark Johnston 		 */
61757cd81a3SMark Johnston 		if (vp->v_object != object) {
61857cd81a3SMark Johnston 			error = ENOENT;
61957cd81a3SMark Johnston 			goto unlock_all;
62057cd81a3SMark Johnston 		}
62157cd81a3SMark Johnston 
62234d8b7eaSJeff Roberson 		/*
62334d8b7eaSJeff Roberson 		 * While the object and page were unlocked, the page
62434d8b7eaSJeff Roberson 		 * may have been:
62534d8b7eaSJeff Roberson 		 * (1) moved to a different queue,
62634d8b7eaSJeff Roberson 		 * (2) reallocated to a different object,
62734d8b7eaSJeff Roberson 		 * (3) reallocated to a different offset, or
62834d8b7eaSJeff Roberson 		 * (4) cleaned.
62934d8b7eaSJeff Roberson 		 */
630ebcddc72SAlan Cox 		if (!vm_page_in_laundry(m) || m->object != object ||
63134d8b7eaSJeff Roberson 		    m->pindex != pindex || m->dirty == 0) {
63234d8b7eaSJeff Roberson 			vm_page_unlock(m);
63334d8b7eaSJeff Roberson 			error = ENXIO;
63434d8b7eaSJeff Roberson 			goto unlock_all;
63534d8b7eaSJeff Roberson 		}
63634d8b7eaSJeff Roberson 
63734d8b7eaSJeff Roberson 		/*
638fee2a2faSMark Johnston 		 * The page may have been busied while the object and page
639fee2a2faSMark Johnston 		 * locks were released.
64034d8b7eaSJeff Roberson 		 */
641fee2a2faSMark Johnston 		if (vm_page_busied(m)) {
64234d8b7eaSJeff Roberson 			vm_page_unlock(m);
64334d8b7eaSJeff Roberson 			error = EBUSY;
64434d8b7eaSJeff Roberson 			goto unlock_all;
64534d8b7eaSJeff Roberson 		}
64634d8b7eaSJeff Roberson 	}
64734d8b7eaSJeff Roberson 
64834d8b7eaSJeff Roberson 	/*
649fee2a2faSMark Johnston 	 * Remove all writeable mappings, failing if the page is wired.
650fee2a2faSMark Johnston 	 */
651fee2a2faSMark Johnston 	if (!vm_page_try_remove_write(m)) {
652fee2a2faSMark Johnston 		vm_page_unlock(m);
653fee2a2faSMark Johnston 		error = EBUSY;
654fee2a2faSMark Johnston 		goto unlock_all;
655fee2a2faSMark Johnston 	}
656fee2a2faSMark Johnston 
657fee2a2faSMark Johnston 	/*
65834d8b7eaSJeff Roberson 	 * If a page is dirty, then it is either being washed
65934d8b7eaSJeff Roberson 	 * (but not yet cleaned) or it is still in the
66034d8b7eaSJeff Roberson 	 * laundry.  If it is still in the laundry, then we
66134d8b7eaSJeff Roberson 	 * start the cleaning operation.
66234d8b7eaSJeff Roberson 	 */
663ebcddc72SAlan Cox 	if ((*numpagedout = vm_pageout_cluster(m)) == 0)
66434d8b7eaSJeff Roberson 		error = EIO;
66534d8b7eaSJeff Roberson 
66634d8b7eaSJeff Roberson unlock_all:
66734d8b7eaSJeff Roberson 	VM_OBJECT_WUNLOCK(object);
66834d8b7eaSJeff Roberson 
66934d8b7eaSJeff Roberson unlock_mp:
67034d8b7eaSJeff Roberson 	vm_page_lock_assert(m, MA_NOTOWNED);
67134d8b7eaSJeff Roberson 	if (mp != NULL) {
67234d8b7eaSJeff Roberson 		if (vp != NULL)
67334d8b7eaSJeff Roberson 			vput(vp);
67434d8b7eaSJeff Roberson 		vm_object_deallocate(object);
67534d8b7eaSJeff Roberson 		vn_finished_write(mp);
67634d8b7eaSJeff Roberson 	}
67734d8b7eaSJeff Roberson 
67834d8b7eaSJeff Roberson 	return (error);
67934d8b7eaSJeff Roberson }
68034d8b7eaSJeff Roberson 
68134d8b7eaSJeff Roberson /*
682ebcddc72SAlan Cox  * Attempt to launder the specified number of pages.
683ebcddc72SAlan Cox  *
684ebcddc72SAlan Cox  * Returns the number of pages successfully laundered.
685ebcddc72SAlan Cox  */
686ebcddc72SAlan Cox static int
687ebcddc72SAlan Cox vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
688ebcddc72SAlan Cox {
6895cd29d0fSMark Johnston 	struct scan_state ss;
690ebcddc72SAlan Cox 	struct vm_pagequeue *pq;
691ebcddc72SAlan Cox 	vm_object_t object;
6925cd29d0fSMark Johnston 	vm_page_t m, marker;
693*41fd4b94SMark Johnston 	vm_page_astate_t old, new;
694*41fd4b94SMark Johnston 	int act_delta, error, numpagedout, queue, refs, starting_target;
695ebcddc72SAlan Cox 	int vnodes_skipped;
69660256604SMark Johnston 	bool pageout_ok;
697ebcddc72SAlan Cox 
6985cd29d0fSMark Johnston 	object = NULL;
699ebcddc72SAlan Cox 	starting_target = launder;
700ebcddc72SAlan Cox 	vnodes_skipped = 0;
701ebcddc72SAlan Cox 
702ebcddc72SAlan Cox 	/*
703b1fd102eSMark Johnston 	 * Scan the laundry queues for pages eligible to be laundered.  We stop
704ebcddc72SAlan Cox 	 * once the target number of dirty pages have been laundered, or once
705ebcddc72SAlan Cox 	 * we've reached the end of the queue.  A single iteration of this loop
706ebcddc72SAlan Cox 	 * may cause more than one page to be laundered because of clustering.
707ebcddc72SAlan Cox 	 *
708b1fd102eSMark Johnston 	 * As an optimization, we avoid laundering from PQ_UNSWAPPABLE when no
709b1fd102eSMark Johnston 	 * swap devices are configured.
710ebcddc72SAlan Cox 	 */
711b1fd102eSMark Johnston 	if (atomic_load_acq_int(&swapdev_enabled))
71264b38930SMark Johnston 		queue = PQ_UNSWAPPABLE;
713b1fd102eSMark Johnston 	else
71464b38930SMark Johnston 		queue = PQ_LAUNDRY;
715ebcddc72SAlan Cox 
716b1fd102eSMark Johnston scan:
71764b38930SMark Johnston 	marker = &vmd->vmd_markers[queue];
7185cd29d0fSMark Johnston 	pq = &vmd->vmd_pagequeues[queue];
719ebcddc72SAlan Cox 	vm_pagequeue_lock(pq);
7205cd29d0fSMark Johnston 	vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt);
7215cd29d0fSMark Johnston 	while (launder > 0 && (m = vm_pageout_next(&ss, false)) != NULL) {
7225cd29d0fSMark Johnston 		if (__predict_false((m->flags & PG_MARKER) != 0))
723ebcddc72SAlan Cox 			continue;
7245cd29d0fSMark Johnston 
7255cd29d0fSMark Johnston 		/*
726*41fd4b94SMark Johnston 		 * Perform some quick and racy checks of the page's queue state.
727*41fd4b94SMark Johnston 		 * Bail if things are not as we expect.
7285cd29d0fSMark Johnston 		 */
729*41fd4b94SMark Johnston 		old = vm_page_astate_load(m);
730*41fd4b94SMark Johnston 		if (old.queue != PQ_LAUNDRY || (old.flags & PGA_ENQUEUED) == 0)
731ebcddc72SAlan Cox 			continue;
732*41fd4b94SMark Johnston 		if ((old.flags & PGA_QUEUE_OP_MASK) != 0) {
7337cdeaf33SMark Johnston 			vm_page_pqbatch_submit(m, queue);
734ebcddc72SAlan Cox 			continue;
735ebcddc72SAlan Cox 		}
736ebcddc72SAlan Cox 
7375cd29d0fSMark Johnston 		if (object != m->object) {
73860256604SMark Johnston 			if (object != NULL)
7395cd29d0fSMark Johnston 				VM_OBJECT_WUNLOCK(object);
740fee2a2faSMark Johnston 			object = (vm_object_t)atomic_load_ptr(&m->object);
741*41fd4b94SMark Johnston 			if (object == NULL)
742fee2a2faSMark Johnston 				continue;
743*41fd4b94SMark Johnston 			VM_OBJECT_WLOCK(object);
744*41fd4b94SMark Johnston 			if (m->object != object) {
745*41fd4b94SMark Johnston 				VM_OBJECT_WUNLOCK(object);
746*41fd4b94SMark Johnston 				object = NULL;
747*41fd4b94SMark Johnston 				continue;
748*41fd4b94SMark Johnston 			}
749*41fd4b94SMark Johnston 		}
7505cd29d0fSMark Johnston 
7515cd29d0fSMark Johnston 		if (vm_page_busied(m))
7525cd29d0fSMark Johnston 			continue;
753ebcddc72SAlan Cox 
754ebcddc72SAlan Cox 		/*
755*41fd4b94SMark Johnston 		 * Check for wirings now that we hold the object lock and have
756*41fd4b94SMark Johnston 		 * verified that the page is unbusied.  If the page is mapped,
757*41fd4b94SMark Johnston 		 * it may still be wired by pmap lookups.  The call to
758fee2a2faSMark Johnston 		 * vm_page_try_remove_all() below atomically checks for such
759fee2a2faSMark Johnston 		 * wirings and removes mappings.  If the page is unmapped, the
760fee2a2faSMark Johnston 		 * wire count is guaranteed not to increase.
761fee2a2faSMark Johnston 		 */
762fee2a2faSMark Johnston 		if (__predict_false(vm_page_wired(m))) {
763*41fd4b94SMark Johnston 			vm_page_pqbatch_submit(m, queue);
764fee2a2faSMark Johnston 			continue;
765fee2a2faSMark Johnston 		}
766fee2a2faSMark Johnston 
767fee2a2faSMark Johnston 		/*
768ebcddc72SAlan Cox 		 * Invalid pages can be easily freed.  They cannot be
769ebcddc72SAlan Cox 		 * mapped; vm_page_free() asserts this.
770ebcddc72SAlan Cox 		 */
771ebcddc72SAlan Cox 		if (m->valid == 0)
772ebcddc72SAlan Cox 			goto free_page;
773ebcddc72SAlan Cox 
774ebcddc72SAlan Cox 		/*
775ebcddc72SAlan Cox 		 * If the page has been referenced and the object is not dead,
776ebcddc72SAlan Cox 		 * reactivate or requeue the page depending on whether the
777ebcddc72SAlan Cox 		 * object is mapped.
778d7aeb429SAlan Cox 		 *
779d7aeb429SAlan Cox 		 * Test PGA_REFERENCED after calling pmap_ts_referenced() so
780d7aeb429SAlan Cox 		 * that a reference from a concurrently destroyed mapping is
781d7aeb429SAlan Cox 		 * observed here and now.
782ebcddc72SAlan Cox 		 */
783*41fd4b94SMark Johnston 		refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
784*41fd4b94SMark Johnston 
785*41fd4b94SMark Johnston 		for (old = vm_page_astate_load(m);;) {
786*41fd4b94SMark Johnston 			if (old.queue != queue ||
787*41fd4b94SMark Johnston 			    (old.flags & PGA_ENQUEUED) == 0)
788*41fd4b94SMark Johnston 				goto next_page;
789*41fd4b94SMark Johnston 
790*41fd4b94SMark Johnston 			if ((old.flags & PGA_QUEUE_OP_MASK) != 0) {
791*41fd4b94SMark Johnston 				vm_page_pqbatch_submit(m, queue);
792*41fd4b94SMark Johnston 				goto next_page;
793d7aeb429SAlan Cox 			}
794*41fd4b94SMark Johnston 
795*41fd4b94SMark Johnston 			new = old;
796*41fd4b94SMark Johnston 			act_delta = refs;
797*41fd4b94SMark Johnston 			if ((old.flags & PGA_REFERENCED) != 0) {
798*41fd4b94SMark Johnston 				new.flags &= ~PGA_REFERENCED;
799d7aeb429SAlan Cox 				act_delta++;
800ebcddc72SAlan Cox 			}
801ebcddc72SAlan Cox 			if (act_delta != 0) {
802ebcddc72SAlan Cox 				if (object->ref_count != 0) {
803ebcddc72SAlan Cox 					/*
804*41fd4b94SMark Johnston 					 * Increase the activation count if the
805*41fd4b94SMark Johnston 					 * page was referenced while in the
806*41fd4b94SMark Johnston 					 * laundry queue.  This makes it less
807*41fd4b94SMark Johnston 					 * likely that the page will be returned
808*41fd4b94SMark Johnston 					 * prematurely to the inactive queue.
809ebcddc72SAlan Cox 					 */
810*41fd4b94SMark Johnston 					new.act_count += ACT_ADVANCE +
811*41fd4b94SMark Johnston 					    act_delta;
812*41fd4b94SMark Johnston 					if (new.act_count > ACT_MAX)
813*41fd4b94SMark Johnston 						new.act_count = ACT_MAX;
814*41fd4b94SMark Johnston 
815*41fd4b94SMark Johnston 					new.flags |= PGA_REQUEUE;
816*41fd4b94SMark Johnston 					new.queue = PQ_ACTIVE;
817*41fd4b94SMark Johnston 					if (!vm_page_pqstate_commit(m, &old,
818*41fd4b94SMark Johnston 					    new))
819*41fd4b94SMark Johnston 						continue;
820*41fd4b94SMark Johnston 
821*41fd4b94SMark Johnston 					VM_CNT_INC(v_reactivated);
822ebcddc72SAlan Cox 
823ebcddc72SAlan Cox 					/*
824*41fd4b94SMark Johnston 					 * If this was a background laundering,
825*41fd4b94SMark Johnston 					 * count activated pages towards our
826*41fd4b94SMark Johnston 					 * target.  The purpose of background
827*41fd4b94SMark Johnston 					 * laundering is to ensure that pages
828*41fd4b94SMark Johnston 					 * are eventually cycled through the
829*41fd4b94SMark Johnston 					 * laundry queue, and an activation is a
830*41fd4b94SMark Johnston 					 * valid way out.
831ebcddc72SAlan Cox 					 */
832ebcddc72SAlan Cox 					if (!in_shortfall)
833ebcddc72SAlan Cox 						launder--;
834*41fd4b94SMark Johnston 					goto next_page;
8355cd29d0fSMark Johnston 				} else if ((object->flags & OBJ_DEAD) == 0) {
836*41fd4b94SMark Johnston 					vm_page_launder(m);
837*41fd4b94SMark Johnston 					goto next_page;
8385cd29d0fSMark Johnston 				}
839ebcddc72SAlan Cox 			}
840*41fd4b94SMark Johnston 			break;
841*41fd4b94SMark Johnston 		}
842ebcddc72SAlan Cox 
843ebcddc72SAlan Cox 		/*
844ebcddc72SAlan Cox 		 * If the page appears to be clean at the machine-independent
845ebcddc72SAlan Cox 		 * layer, then remove all of its mappings from the pmap in
846ebcddc72SAlan Cox 		 * anticipation of freeing it.  If, however, any of the page's
847ebcddc72SAlan Cox 		 * mappings allow write access, then the page may still be
848ebcddc72SAlan Cox 		 * modified until the last of those mappings are removed.
849ebcddc72SAlan Cox 		 */
850ebcddc72SAlan Cox 		if (object->ref_count != 0) {
851ebcddc72SAlan Cox 			vm_page_test_dirty(m);
852fee2a2faSMark Johnston 			if (m->dirty == 0 && !vm_page_try_remove_all(m)) {
853*41fd4b94SMark Johnston 				vm_page_pqbatch_submit(m, queue);
854fee2a2faSMark Johnston 				continue;
855fee2a2faSMark Johnston 			}
856ebcddc72SAlan Cox 		}
857ebcddc72SAlan Cox 
858ebcddc72SAlan Cox 		/*
859ebcddc72SAlan Cox 		 * Clean pages are freed, and dirty pages are paged out unless
860ebcddc72SAlan Cox 		 * they belong to a dead object.  Requeueing dirty pages from
861ebcddc72SAlan Cox 		 * dead objects is pointless, as they are being paged out and
862ebcddc72SAlan Cox 		 * freed by the thread that destroyed the object.
863ebcddc72SAlan Cox 		 */
864ebcddc72SAlan Cox 		if (m->dirty == 0) {
865ebcddc72SAlan Cox free_page:
866ebcddc72SAlan Cox 			vm_page_free(m);
86783c9dea1SGleb Smirnoff 			VM_CNT_INC(v_dfree);
868ebcddc72SAlan Cox 		} else if ((object->flags & OBJ_DEAD) == 0) {
869ebcddc72SAlan Cox 			if (object->type != OBJT_SWAP &&
870ebcddc72SAlan Cox 			    object->type != OBJT_DEFAULT)
871ebcddc72SAlan Cox 				pageout_ok = true;
872ebcddc72SAlan Cox 			else if (disable_swap_pageouts)
873ebcddc72SAlan Cox 				pageout_ok = false;
874ebcddc72SAlan Cox 			else
875ebcddc72SAlan Cox 				pageout_ok = true;
876ebcddc72SAlan Cox 			if (!pageout_ok) {
877*41fd4b94SMark Johnston 				vm_page_launder(m);
8785cd29d0fSMark Johnston 				continue;
879ebcddc72SAlan Cox 			}
880ebcddc72SAlan Cox 
881ebcddc72SAlan Cox 			/*
882ebcddc72SAlan Cox 			 * Form a cluster with adjacent, dirty pages from the
883ebcddc72SAlan Cox 			 * same object, and page out that entire cluster.
884ebcddc72SAlan Cox 			 *
885ebcddc72SAlan Cox 			 * The adjacent, dirty pages must also be in the
886ebcddc72SAlan Cox 			 * laundry.  However, their mappings are not checked
887ebcddc72SAlan Cox 			 * for new references.  Consequently, a recently
888ebcddc72SAlan Cox 			 * referenced page may be paged out.  However, that
889ebcddc72SAlan Cox 			 * page will not be prematurely reclaimed.  After page
890ebcddc72SAlan Cox 			 * out, the page will be placed in the inactive queue,
891ebcddc72SAlan Cox 			 * where any new references will be detected and the
892ebcddc72SAlan Cox 			 * page reactivated.
893ebcddc72SAlan Cox 			 */
894ebcddc72SAlan Cox 			error = vm_pageout_clean(m, &numpagedout);
895ebcddc72SAlan Cox 			if (error == 0) {
896ebcddc72SAlan Cox 				launder -= numpagedout;
8975cd29d0fSMark Johnston 				ss.scanned += numpagedout;
898ebcddc72SAlan Cox 			} else if (error == EDEADLK) {
899ebcddc72SAlan Cox 				pageout_lock_miss++;
900ebcddc72SAlan Cox 				vnodes_skipped++;
901ebcddc72SAlan Cox 			}
90260256604SMark Johnston 			object = NULL;
903ebcddc72SAlan Cox 		}
904*41fd4b94SMark Johnston next_page:;
90546e39081SMark Johnston 	}
90646e39081SMark Johnston 	if (object != NULL) {
907ebcddc72SAlan Cox 		VM_OBJECT_WUNLOCK(object);
90846e39081SMark Johnston 		object = NULL;
90946e39081SMark Johnston 	}
910ebcddc72SAlan Cox 	vm_pagequeue_lock(pq);
9115cd29d0fSMark Johnston 	vm_pageout_end_scan(&ss);
912ebcddc72SAlan Cox 	vm_pagequeue_unlock(pq);
913ebcddc72SAlan Cox 
91464b38930SMark Johnston 	if (launder > 0 && queue == PQ_UNSWAPPABLE) {
91564b38930SMark Johnston 		queue = PQ_LAUNDRY;
916b1fd102eSMark Johnston 		goto scan;
917b1fd102eSMark Johnston 	}
918b1fd102eSMark Johnston 
919ebcddc72SAlan Cox 	/*
920ebcddc72SAlan Cox 	 * Wakeup the sync daemon if we skipped a vnode in a writeable object
921ebcddc72SAlan Cox 	 * and we didn't launder enough pages.
922ebcddc72SAlan Cox 	 */
923ebcddc72SAlan Cox 	if (vnodes_skipped > 0 && launder > 0)
924ebcddc72SAlan Cox 		(void)speedup_syncer();
925ebcddc72SAlan Cox 
926ebcddc72SAlan Cox 	return (starting_target - launder);
927ebcddc72SAlan Cox }
928ebcddc72SAlan Cox 
929ebcddc72SAlan Cox /*
930ebcddc72SAlan Cox  * Compute the integer square root.
931ebcddc72SAlan Cox  */
932ebcddc72SAlan Cox static u_int
933ebcddc72SAlan Cox isqrt(u_int num)
934ebcddc72SAlan Cox {
935ebcddc72SAlan Cox 	u_int bit, root, tmp;
936ebcddc72SAlan Cox 
93764f8d257SDoug Moore 	bit = num != 0 ? (1u << ((fls(num) - 1) & ~1)) : 0;
938ebcddc72SAlan Cox 	root = 0;
939ebcddc72SAlan Cox 	while (bit != 0) {
940ebcddc72SAlan Cox 		tmp = root + bit;
941ebcddc72SAlan Cox 		root >>= 1;
942ebcddc72SAlan Cox 		if (num >= tmp) {
943ebcddc72SAlan Cox 			num -= tmp;
944ebcddc72SAlan Cox 			root += bit;
945ebcddc72SAlan Cox 		}
946ebcddc72SAlan Cox 		bit >>= 2;
947ebcddc72SAlan Cox 	}
948ebcddc72SAlan Cox 	return (root);
949ebcddc72SAlan Cox }
950ebcddc72SAlan Cox 
951ebcddc72SAlan Cox /*
952ebcddc72SAlan Cox  * Perform the work of the laundry thread: periodically wake up and determine
953ebcddc72SAlan Cox  * whether any pages need to be laundered.  If so, determine the number of pages
954ebcddc72SAlan Cox  * that need to be laundered, and launder them.
955ebcddc72SAlan Cox  */
956ebcddc72SAlan Cox static void
957ebcddc72SAlan Cox vm_pageout_laundry_worker(void *arg)
958ebcddc72SAlan Cox {
959e2068d0bSJeff Roberson 	struct vm_domain *vmd;
960ebcddc72SAlan Cox 	struct vm_pagequeue *pq;
96160684862SMark Johnston 	uint64_t nclean, ndirty, nfreed;
962e2068d0bSJeff Roberson 	int domain, last_target, launder, shortfall, shortfall_cycle, target;
963ebcddc72SAlan Cox 	bool in_shortfall;
964ebcddc72SAlan Cox 
965e2068d0bSJeff Roberson 	domain = (uintptr_t)arg;
966e2068d0bSJeff Roberson 	vmd = VM_DOMAIN(domain);
967e2068d0bSJeff Roberson 	pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
968e2068d0bSJeff Roberson 	KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
969ebcddc72SAlan Cox 
970ebcddc72SAlan Cox 	shortfall = 0;
971ebcddc72SAlan Cox 	in_shortfall = false;
972ebcddc72SAlan Cox 	shortfall_cycle = 0;
9738002c3a4SMark Johnston 	last_target = target = 0;
97460684862SMark Johnston 	nfreed = 0;
975ebcddc72SAlan Cox 
976ebcddc72SAlan Cox 	/*
977b1fd102eSMark Johnston 	 * Calls to these handlers are serialized by the swap syscall lock.
978b1fd102eSMark Johnston 	 */
979e2068d0bSJeff Roberson 	(void)EVENTHANDLER_REGISTER(swapon, vm_pageout_swapon, vmd,
980b1fd102eSMark Johnston 	    EVENTHANDLER_PRI_ANY);
981e2068d0bSJeff Roberson 	(void)EVENTHANDLER_REGISTER(swapoff, vm_pageout_swapoff, vmd,
982b1fd102eSMark Johnston 	    EVENTHANDLER_PRI_ANY);
983b1fd102eSMark Johnston 
984b1fd102eSMark Johnston 	/*
985ebcddc72SAlan Cox 	 * The pageout laundry worker is never done, so loop forever.
986ebcddc72SAlan Cox 	 */
987ebcddc72SAlan Cox 	for (;;) {
988ebcddc72SAlan Cox 		KASSERT(target >= 0, ("negative target %d", target));
989ebcddc72SAlan Cox 		KASSERT(shortfall_cycle >= 0,
990ebcddc72SAlan Cox 		    ("negative cycle %d", shortfall_cycle));
991ebcddc72SAlan Cox 		launder = 0;
992ebcddc72SAlan Cox 
993ebcddc72SAlan Cox 		/*
994ebcddc72SAlan Cox 		 * First determine whether we need to launder pages to meet a
995ebcddc72SAlan Cox 		 * shortage of free pages.
996ebcddc72SAlan Cox 		 */
997ebcddc72SAlan Cox 		if (shortfall > 0) {
998ebcddc72SAlan Cox 			in_shortfall = true;
999ebcddc72SAlan Cox 			shortfall_cycle = VM_LAUNDER_RATE / VM_INACT_SCAN_RATE;
1000ebcddc72SAlan Cox 			target = shortfall;
1001ebcddc72SAlan Cox 		} else if (!in_shortfall)
1002ebcddc72SAlan Cox 			goto trybackground;
1003e2068d0bSJeff Roberson 		else if (shortfall_cycle == 0 || vm_laundry_target(vmd) <= 0) {
1004ebcddc72SAlan Cox 			/*
1005ebcddc72SAlan Cox 			 * We recently entered shortfall and began laundering
1006ebcddc72SAlan Cox 			 * pages.  If we have completed that laundering run
1007ebcddc72SAlan Cox 			 * (and we are no longer in shortfall) or we have met
1008ebcddc72SAlan Cox 			 * our laundry target through other activity, then we
1009ebcddc72SAlan Cox 			 * can stop laundering pages.
1010ebcddc72SAlan Cox 			 */
1011ebcddc72SAlan Cox 			in_shortfall = false;
1012ebcddc72SAlan Cox 			target = 0;
1013ebcddc72SAlan Cox 			goto trybackground;
1014ebcddc72SAlan Cox 		}
1015ebcddc72SAlan Cox 		launder = target / shortfall_cycle--;
1016ebcddc72SAlan Cox 		goto dolaundry;
1017ebcddc72SAlan Cox 
1018ebcddc72SAlan Cox 		/*
1019ebcddc72SAlan Cox 		 * There's no immediate need to launder any pages; see if we
1020ebcddc72SAlan Cox 		 * meet the conditions to perform background laundering:
1021ebcddc72SAlan Cox 		 *
1022ebcddc72SAlan Cox 		 * 1. The ratio of dirty to clean inactive pages exceeds the
102360684862SMark Johnston 		 *    background laundering threshold, or
1024ebcddc72SAlan Cox 		 * 2. we haven't yet reached the target of the current
1025ebcddc72SAlan Cox 		 *    background laundering run.
1026ebcddc72SAlan Cox 		 *
1027ebcddc72SAlan Cox 		 * The background laundering threshold is not a constant.
1028ebcddc72SAlan Cox 		 * Instead, it is a slowly growing function of the number of
102960684862SMark Johnston 		 * clean pages freed by the page daemon since the last
103060684862SMark Johnston 		 * background laundering.  Thus, as the ratio of dirty to
103160684862SMark Johnston 		 * clean inactive pages grows, the amount of memory pressure
1032c098768eSMark Johnston 		 * required to trigger laundering decreases.  We ensure
1033c098768eSMark Johnston 		 * that the threshold is non-zero after an inactive queue
1034c098768eSMark Johnston 		 * scan, even if that scan failed to free a single clean page.
1035ebcddc72SAlan Cox 		 */
1036ebcddc72SAlan Cox trybackground:
1037e2068d0bSJeff Roberson 		nclean = vmd->vmd_free_count +
1038e2068d0bSJeff Roberson 		    vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt;
1039e2068d0bSJeff Roberson 		ndirty = vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt;
1040c098768eSMark Johnston 		if (target == 0 && ndirty * isqrt(howmany(nfreed + 1,
1041c098768eSMark Johnston 		    vmd->vmd_free_target - vmd->vmd_free_min)) >= nclean) {
1042e2068d0bSJeff Roberson 			target = vmd->vmd_background_launder_target;
1043ebcddc72SAlan Cox 		}
1044ebcddc72SAlan Cox 
1045ebcddc72SAlan Cox 		/*
1046ebcddc72SAlan Cox 		 * We have a non-zero background laundering target.  If we've
1047ebcddc72SAlan Cox 		 * laundered up to our maximum without observing a page daemon
1048cb35676eSMark Johnston 		 * request, just stop.  This is a safety belt that ensures we
1049ebcddc72SAlan Cox 		 * don't launder an excessive amount if memory pressure is low
1050ebcddc72SAlan Cox 		 * and the ratio of dirty to clean pages is large.  Otherwise,
1051ebcddc72SAlan Cox 		 * proceed at the background laundering rate.
1052ebcddc72SAlan Cox 		 */
1053ebcddc72SAlan Cox 		if (target > 0) {
105460684862SMark Johnston 			if (nfreed > 0) {
105560684862SMark Johnston 				nfreed = 0;
1056ebcddc72SAlan Cox 				last_target = target;
1057ebcddc72SAlan Cox 			} else if (last_target - target >=
1058ebcddc72SAlan Cox 			    vm_background_launder_max * PAGE_SIZE / 1024) {
1059ebcddc72SAlan Cox 				target = 0;
1060ebcddc72SAlan Cox 			}
1061ebcddc72SAlan Cox 			launder = vm_background_launder_rate * PAGE_SIZE / 1024;
1062ebcddc72SAlan Cox 			launder /= VM_LAUNDER_RATE;
1063ebcddc72SAlan Cox 			if (launder > target)
1064ebcddc72SAlan Cox 				launder = target;
1065ebcddc72SAlan Cox 		}
1066ebcddc72SAlan Cox 
1067ebcddc72SAlan Cox dolaundry:
1068ebcddc72SAlan Cox 		if (launder > 0) {
1069ebcddc72SAlan Cox 			/*
1070ebcddc72SAlan Cox 			 * Because of I/O clustering, the number of laundered
1071ebcddc72SAlan Cox 			 * pages could exceed "target" by the maximum size of
1072ebcddc72SAlan Cox 			 * a cluster minus one.
1073ebcddc72SAlan Cox 			 */
1074e2068d0bSJeff Roberson 			target -= min(vm_pageout_launder(vmd, launder,
1075ebcddc72SAlan Cox 			    in_shortfall), target);
1076ebcddc72SAlan Cox 			pause("laundp", hz / VM_LAUNDER_RATE);
1077ebcddc72SAlan Cox 		}
1078ebcddc72SAlan Cox 
1079ebcddc72SAlan Cox 		/*
1080ebcddc72SAlan Cox 		 * If we're not currently laundering pages and the page daemon
1081ebcddc72SAlan Cox 		 * hasn't posted a new request, sleep until the page daemon
1082ebcddc72SAlan Cox 		 * kicks us.
1083ebcddc72SAlan Cox 		 */
1084ebcddc72SAlan Cox 		vm_pagequeue_lock(pq);
1085e2068d0bSJeff Roberson 		if (target == 0 && vmd->vmd_laundry_request == VM_LAUNDRY_IDLE)
1086e2068d0bSJeff Roberson 			(void)mtx_sleep(&vmd->vmd_laundry_request,
1087ebcddc72SAlan Cox 			    vm_pagequeue_lockptr(pq), PVM, "launds", 0);
1088ebcddc72SAlan Cox 
1089ebcddc72SAlan Cox 		/*
1090ebcddc72SAlan Cox 		 * If the pagedaemon has indicated that it's in shortfall, start
1091ebcddc72SAlan Cox 		 * a shortfall laundering unless we're already in the middle of
1092ebcddc72SAlan Cox 		 * one.  This may preempt a background laundering.
1093ebcddc72SAlan Cox 		 */
1094e2068d0bSJeff Roberson 		if (vmd->vmd_laundry_request == VM_LAUNDRY_SHORTFALL &&
1095ebcddc72SAlan Cox 		    (!in_shortfall || shortfall_cycle == 0)) {
1096e2068d0bSJeff Roberson 			shortfall = vm_laundry_target(vmd) +
1097e2068d0bSJeff Roberson 			    vmd->vmd_pageout_deficit;
1098ebcddc72SAlan Cox 			target = 0;
1099ebcddc72SAlan Cox 		} else
1100ebcddc72SAlan Cox 			shortfall = 0;
1101ebcddc72SAlan Cox 
1102ebcddc72SAlan Cox 		if (target == 0)
1103e2068d0bSJeff Roberson 			vmd->vmd_laundry_request = VM_LAUNDRY_IDLE;
110460684862SMark Johnston 		nfreed += vmd->vmd_clean_pages_freed;
110560684862SMark Johnston 		vmd->vmd_clean_pages_freed = 0;
1106ebcddc72SAlan Cox 		vm_pagequeue_unlock(pq);
1107ebcddc72SAlan Cox 	}
1108ebcddc72SAlan Cox }
1109ebcddc72SAlan Cox 
1110be37ee79SMark Johnston /*
1111be37ee79SMark Johnston  * Compute the number of pages we want to try to move from the
1112be37ee79SMark Johnston  * active queue to either the inactive or laundry queue.
1113be37ee79SMark Johnston  *
11147bb4634eSMark Johnston  * When scanning active pages during a shortage, we make clean pages
11157bb4634eSMark Johnston  * count more heavily towards the page shortage than dirty pages.
11167bb4634eSMark Johnston  * This is because dirty pages must be laundered before they can be
11177bb4634eSMark Johnston  * reused and thus have less utility when attempting to quickly
11187bb4634eSMark Johnston  * alleviate a free page shortage.  However, this weighting also
11197bb4634eSMark Johnston  * causes the scan to deactivate dirty pages more aggressively,
11207bb4634eSMark Johnston  * improving the effectiveness of clustering.
1121be37ee79SMark Johnston  */
1122be37ee79SMark Johnston static int
11237bb4634eSMark Johnston vm_pageout_active_target(struct vm_domain *vmd)
1124be37ee79SMark Johnston {
1125be37ee79SMark Johnston 	int shortage;
1126be37ee79SMark Johnston 
1127be37ee79SMark Johnston 	shortage = vmd->vmd_inactive_target + vm_paging_target(vmd) -
1128be37ee79SMark Johnston 	    (vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt +
1129be37ee79SMark Johnston 	    vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt / act_scan_laundry_weight);
1130be37ee79SMark Johnston 	shortage *= act_scan_laundry_weight;
1131be37ee79SMark Johnston 	return (shortage);
1132be37ee79SMark Johnston }
1133be37ee79SMark Johnston 
1134be37ee79SMark Johnston /*
1135be37ee79SMark Johnston  * Scan the active queue.  If there is no shortage of inactive pages, scan a
1136be37ee79SMark Johnston  * small portion of the queue in order to maintain quasi-LRU.
1137be37ee79SMark Johnston  */
1138be37ee79SMark Johnston static void
1139be37ee79SMark Johnston vm_pageout_scan_active(struct vm_domain *vmd, int page_shortage)
1140be37ee79SMark Johnston {
1141be37ee79SMark Johnston 	struct scan_state ss;
1142fee2a2faSMark Johnston 	vm_object_t object;
1143be37ee79SMark Johnston 	vm_page_t m, marker;
1144*41fd4b94SMark Johnston 	vm_page_astate_t old, new;
1145be37ee79SMark Johnston 	struct vm_pagequeue *pq;
1146be37ee79SMark Johnston 	long min_scan;
1147*41fd4b94SMark Johnston 	int act_delta, max_scan, ps_delta, refs, scan_tick;
1148*41fd4b94SMark Johnston 	uint8_t nqueue;
1149be37ee79SMark Johnston 
1150be37ee79SMark Johnston 	marker = &vmd->vmd_markers[PQ_ACTIVE];
1151be37ee79SMark Johnston 	pq = &vmd->vmd_pagequeues[PQ_ACTIVE];
1152be37ee79SMark Johnston 	vm_pagequeue_lock(pq);
1153be37ee79SMark Johnston 
1154be37ee79SMark Johnston 	/*
1155be37ee79SMark Johnston 	 * If we're just idle polling attempt to visit every
1156be37ee79SMark Johnston 	 * active page within 'update_period' seconds.
1157be37ee79SMark Johnston 	 */
1158be37ee79SMark Johnston 	scan_tick = ticks;
1159be37ee79SMark Johnston 	if (vm_pageout_update_period != 0) {
1160be37ee79SMark Johnston 		min_scan = pq->pq_cnt;
1161be37ee79SMark Johnston 		min_scan *= scan_tick - vmd->vmd_last_active_scan;
1162be37ee79SMark Johnston 		min_scan /= hz * vm_pageout_update_period;
1163be37ee79SMark Johnston 	} else
1164be37ee79SMark Johnston 		min_scan = 0;
1165be37ee79SMark Johnston 	if (min_scan > 0 || (page_shortage > 0 && pq->pq_cnt > 0))
1166be37ee79SMark Johnston 		vmd->vmd_last_active_scan = scan_tick;
1167be37ee79SMark Johnston 
1168be37ee79SMark Johnston 	/*
1169be37ee79SMark Johnston 	 * Scan the active queue for pages that can be deactivated.  Update
1170be37ee79SMark Johnston 	 * the per-page activity counter and use it to identify deactivation
1171be37ee79SMark Johnston 	 * candidates.  Held pages may be deactivated.
1172be37ee79SMark Johnston 	 *
1173be37ee79SMark Johnston 	 * To avoid requeuing each page that remains in the active queue, we
11747bb4634eSMark Johnston 	 * implement the CLOCK algorithm.  To keep the implementation of the
11757bb4634eSMark Johnston 	 * enqueue operation consistent for all page queues, we use two hands,
11767bb4634eSMark Johnston 	 * represented by marker pages. Scans begin at the first hand, which
11777bb4634eSMark Johnston 	 * precedes the second hand in the queue.  When the two hands meet,
11787bb4634eSMark Johnston 	 * they are moved back to the head and tail of the queue, respectively,
11797bb4634eSMark Johnston 	 * and scanning resumes.
1180be37ee79SMark Johnston 	 */
1181be37ee79SMark Johnston 	max_scan = page_shortage > 0 ? pq->pq_cnt : min_scan;
1182be37ee79SMark Johnston act_scan:
1183be37ee79SMark Johnston 	vm_pageout_init_scan(&ss, pq, marker, &vmd->vmd_clock[0], max_scan);
1184be37ee79SMark Johnston 	while ((m = vm_pageout_next(&ss, false)) != NULL) {
1185be37ee79SMark Johnston 		if (__predict_false(m == &vmd->vmd_clock[1])) {
1186be37ee79SMark Johnston 			vm_pagequeue_lock(pq);
1187be37ee79SMark Johnston 			TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q);
1188be37ee79SMark Johnston 			TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[1], plinks.q);
1189be37ee79SMark Johnston 			TAILQ_INSERT_HEAD(&pq->pq_pl, &vmd->vmd_clock[0],
1190be37ee79SMark Johnston 			    plinks.q);
1191be37ee79SMark Johnston 			TAILQ_INSERT_TAIL(&pq->pq_pl, &vmd->vmd_clock[1],
1192be37ee79SMark Johnston 			    plinks.q);
1193be37ee79SMark Johnston 			max_scan -= ss.scanned;
1194be37ee79SMark Johnston 			vm_pageout_end_scan(&ss);
1195be37ee79SMark Johnston 			goto act_scan;
1196be37ee79SMark Johnston 		}
1197be37ee79SMark Johnston 		if (__predict_false((m->flags & PG_MARKER) != 0))
1198be37ee79SMark Johnston 			continue;
1199be37ee79SMark Johnston 
1200fee2a2faSMark Johnston 		object = (vm_object_t)atomic_load_ptr(&m->object);
1201fee2a2faSMark Johnston 		if (__predict_false(object == NULL))
1202fee2a2faSMark Johnston 			/*
1203fee2a2faSMark Johnston 			 * The page has been removed from its object.
1204fee2a2faSMark Johnston 			 */
1205fee2a2faSMark Johnston 			continue;
1206fee2a2faSMark Johnston 
1207fee2a2faSMark Johnston 		/*
1208be37ee79SMark Johnston 		 * Check to see "how much" the page has been used.
1209d7aeb429SAlan Cox 		 *
1210d7aeb429SAlan Cox 		 * Test PGA_REFERENCED after calling pmap_ts_referenced() so
1211d7aeb429SAlan Cox 		 * that a reference from a concurrently destroyed mapping is
1212d7aeb429SAlan Cox 		 * observed here and now.
1213d7aeb429SAlan Cox 		 *
1214*41fd4b94SMark Johnston 		 * Perform an unsynchronized object ref count check.  While the
1215*41fd4b94SMark Johnston 		 * page lock ensures that the page is not reallocated to another
1216*41fd4b94SMark Johnston 		 * object, in particular, one with unmanaged mappings that
1217*41fd4b94SMark Johnston 		 * cannot support pmap_ts_referenced(), two races are,
1218be37ee79SMark Johnston 		 * nonetheless, possible:
1219*41fd4b94SMark Johnston 		 *
1220be37ee79SMark Johnston 		 * 1) The count was transitioning to zero, but we saw a non-
1221*41fd4b94SMark Johnston 		 *    zero value.  pmap_ts_referenced() will return zero because
1222*41fd4b94SMark Johnston 		 *    the page is not mapped.
1223*41fd4b94SMark Johnston 		 * 2) The count was transitioning to one, but we saw zero.  This
1224*41fd4b94SMark Johnston 		 *    race delays the detection of a new reference.  At worst,
1225*41fd4b94SMark Johnston 		 *    we will deactivate and reactivate the page.
1226be37ee79SMark Johnston 		 */
1227*41fd4b94SMark Johnston 		refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
1228*41fd4b94SMark Johnston 
1229*41fd4b94SMark Johnston 		for (old = vm_page_astate_load(m);;) {
1230*41fd4b94SMark Johnston 			if (old.queue != PQ_ACTIVE ||
1231*41fd4b94SMark Johnston 			    (old.flags & PGA_ENQUEUED) == 0)
1232*41fd4b94SMark Johnston 				/*
1233*41fd4b94SMark Johnston 				 * Something has moved the page out of the
1234*41fd4b94SMark Johnston 				 * active queue.  Don't touch it.
1235*41fd4b94SMark Johnston 				 */
1236*41fd4b94SMark Johnston 				break;
1237*41fd4b94SMark Johnston 			if ((old.flags & PGA_DEQUEUE) != 0) {
1238*41fd4b94SMark Johnston 				vm_page_pqbatch_submit(m, PQ_ACTIVE);
1239*41fd4b94SMark Johnston 				break;
1240*41fd4b94SMark Johnston 			}
1241*41fd4b94SMark Johnston 
1242*41fd4b94SMark Johnston 			new = old;
1243*41fd4b94SMark Johnston 			act_delta = refs;
1244*41fd4b94SMark Johnston 			if ((old.flags & PGA_REFERENCED) != 0) {
1245*41fd4b94SMark Johnston 				new.flags &= ~PGA_REFERENCED;
1246d7aeb429SAlan Cox 				act_delta++;
1247d7aeb429SAlan Cox 			}
1248be37ee79SMark Johnston 
1249be37ee79SMark Johnston 			/*
1250be37ee79SMark Johnston 			 * Advance or decay the act_count based on recent usage.
1251be37ee79SMark Johnston 			 */
1252be37ee79SMark Johnston 			if (act_delta != 0) {
1253*41fd4b94SMark Johnston 				new.act_count += ACT_ADVANCE + act_delta;
1254*41fd4b94SMark Johnston 				if (new.act_count > ACT_MAX)
1255*41fd4b94SMark Johnston 					new.act_count = ACT_MAX;
1256*41fd4b94SMark Johnston 			} else {
1257*41fd4b94SMark Johnston 				new.act_count -= min(new.act_count, ACT_DECLINE);
1258*41fd4b94SMark Johnston 			}
1259be37ee79SMark Johnston 
1260*41fd4b94SMark Johnston 			if (new.act_count > 0) {
1261be37ee79SMark Johnston 				/*
1262*41fd4b94SMark Johnston 				 * Adjust the activation count and keep the page
1263*41fd4b94SMark Johnston 				 * in the active queue.  The count might be left
1264*41fd4b94SMark Johnston 				 * unchanged if it is saturated.
1265be37ee79SMark Johnston 				 */
1266*41fd4b94SMark Johnston 				if (new.act_count == old.act_count ||
1267*41fd4b94SMark Johnston 				    vm_page_astate_fcmpset(m, &old, new))
1268*41fd4b94SMark Johnston 					break;
12697cdeaf33SMark Johnston 			} else {
1270be37ee79SMark Johnston 				/*
1271*41fd4b94SMark Johnston 				 * When not short for inactive pages, let dirty
1272*41fd4b94SMark Johnston 				 * pages go through the inactive queue before
1273*41fd4b94SMark Johnston 				 * moving to the laundry queues.  This gives
1274*41fd4b94SMark Johnston 				 * them some extra time to be reactivated,
1275*41fd4b94SMark Johnston 				 * potentially avoiding an expensive pageout.
1276*41fd4b94SMark Johnston 				 * However, during a page shortage, the inactive
1277*41fd4b94SMark Johnston 				 * queue is necessarily small, and so dirty
1278*41fd4b94SMark Johnston 				 * pages would only spend a trivial amount of
1279*41fd4b94SMark Johnston 				 * time in the inactive queue.  Therefore, we
1280*41fd4b94SMark Johnston 				 * might as well place them directly in the
1281*41fd4b94SMark Johnston 				 * laundry queue to reduce queuing overhead.
1282*41fd4b94SMark Johnston 				 *
1283be37ee79SMark Johnston 				 * Calling vm_page_test_dirty() here would
1284be37ee79SMark Johnston 				 * require acquisition of the object's write
1285be37ee79SMark Johnston 				 * lock.  However, during a page shortage,
1286*41fd4b94SMark Johnston 				 * directing dirty pages into the laundry queue
1287*41fd4b94SMark Johnston 				 * is only an optimization and not a
1288be37ee79SMark Johnston 				 * requirement.  Therefore, we simply rely on
1289*41fd4b94SMark Johnston 				 * the opportunistic updates to the page's dirty
1290*41fd4b94SMark Johnston 				 * field by the pmap.
1291be37ee79SMark Johnston 				 */
1292*41fd4b94SMark Johnston 				if (page_shortage <= 0) {
1293*41fd4b94SMark Johnston 					nqueue = PQ_INACTIVE;
1294*41fd4b94SMark Johnston 					ps_delta = 0;
1295*41fd4b94SMark Johnston 				} else if (m->dirty == 0) {
1296*41fd4b94SMark Johnston 					nqueue = PQ_INACTIVE;
1297*41fd4b94SMark Johnston 					ps_delta = act_scan_laundry_weight;
1298be37ee79SMark Johnston 				} else {
1299*41fd4b94SMark Johnston 					nqueue = PQ_LAUNDRY;
1300*41fd4b94SMark Johnston 					ps_delta = 1;
1301*41fd4b94SMark Johnston 				}
1302*41fd4b94SMark Johnston 
1303*41fd4b94SMark Johnston 				new.flags |= PGA_REQUEUE;
1304*41fd4b94SMark Johnston 				new.queue = nqueue;
1305*41fd4b94SMark Johnston 				if (vm_page_pqstate_commit(m, &old, new)) {
1306*41fd4b94SMark Johnston 					page_shortage -= ps_delta;
1307*41fd4b94SMark Johnston 					break;
1308be37ee79SMark Johnston 				}
1309be37ee79SMark Johnston 			}
1310be37ee79SMark Johnston 		}
1311be37ee79SMark Johnston 	}
1312be37ee79SMark Johnston 	vm_pagequeue_lock(pq);
1313be37ee79SMark Johnston 	TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q);
1314be37ee79SMark Johnston 	TAILQ_INSERT_AFTER(&pq->pq_pl, marker, &vmd->vmd_clock[0], plinks.q);
1315be37ee79SMark Johnston 	vm_pageout_end_scan(&ss);
1316be37ee79SMark Johnston 	vm_pagequeue_unlock(pq);
1317be37ee79SMark Johnston }
1318be37ee79SMark Johnston 
13195cd29d0fSMark Johnston static int
13205cd29d0fSMark Johnston vm_pageout_reinsert_inactive_page(struct scan_state *ss, vm_page_t m)
13215cd29d0fSMark Johnston {
13225cd29d0fSMark Johnston 	struct vm_domain *vmd;
1323*41fd4b94SMark Johnston 	vm_page_astate_t old, new;
13245cd29d0fSMark Johnston 
1325*41fd4b94SMark Johnston 	for (old = vm_page_astate_load(m);;) {
1326*41fd4b94SMark Johnston 		if (old.queue != PQ_INACTIVE ||
1327*41fd4b94SMark Johnston 		    (old.flags & (PGA_DEQUEUE | PGA_ENQUEUED)) != 0)
1328*41fd4b94SMark Johnston 			break;
1329*41fd4b94SMark Johnston 
1330*41fd4b94SMark Johnston 		new = old;
1331*41fd4b94SMark Johnston 		new.flags |= PGA_ENQUEUED;
1332*41fd4b94SMark Johnston 		new.flags &= ~(PGA_REQUEUE | PGA_REQUEUE_HEAD);
1333*41fd4b94SMark Johnston 		if (!vm_page_astate_fcmpset(m, &old, new))
1334*41fd4b94SMark Johnston 			continue;
1335*41fd4b94SMark Johnston 
1336*41fd4b94SMark Johnston 		if ((old.flags & PGA_REQUEUE_HEAD) != 0) {
13375cd29d0fSMark Johnston 			vmd = vm_pagequeue_domain(m);
13385cd29d0fSMark Johnston 			TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q);
1339*41fd4b94SMark Johnston 		} else if ((old.flags & PGA_REQUEUE) != 0) {
13405cd29d0fSMark Johnston 			TAILQ_INSERT_TAIL(&ss->pq->pq_pl, m, plinks.q);
1341*41fd4b94SMark Johnston 		} else {
13425cd29d0fSMark Johnston 			TAILQ_INSERT_BEFORE(ss->marker, m, plinks.q);
1343*41fd4b94SMark Johnston 		}
13445cd29d0fSMark Johnston 		return (1);
13455cd29d0fSMark Johnston 	}
1346*41fd4b94SMark Johnston 	return (0);
1347*41fd4b94SMark Johnston }
13485cd29d0fSMark Johnston 
13495cd29d0fSMark Johnston /*
13505cd29d0fSMark Johnston  * Re-add stuck pages to the inactive queue.  We will examine them again
13515cd29d0fSMark Johnston  * during the next scan.  If the queue state of a page has changed since
13525cd29d0fSMark Johnston  * it was physically removed from the page queue in
13535cd29d0fSMark Johnston  * vm_pageout_collect_batch(), don't do anything with that page.
13545cd29d0fSMark Johnston  */
13555cd29d0fSMark Johnston static void
13565cd29d0fSMark Johnston vm_pageout_reinsert_inactive(struct scan_state *ss, struct vm_batchqueue *bq,
13575cd29d0fSMark Johnston     vm_page_t m)
13585cd29d0fSMark Johnston {
13595cd29d0fSMark Johnston 	struct vm_pagequeue *pq;
13605cd29d0fSMark Johnston 	int delta;
13615cd29d0fSMark Johnston 
13625cd29d0fSMark Johnston 	delta = 0;
13635cd29d0fSMark Johnston 	pq = ss->pq;
13645cd29d0fSMark Johnston 
13655cd29d0fSMark Johnston 	if (m != NULL) {
13665cd29d0fSMark Johnston 		if (vm_batchqueue_insert(bq, m))
13675cd29d0fSMark Johnston 			return;
13685cd29d0fSMark Johnston 		vm_pagequeue_lock(pq);
13695cd29d0fSMark Johnston 		delta += vm_pageout_reinsert_inactive_page(ss, m);
13705cd29d0fSMark Johnston 	} else
13715cd29d0fSMark Johnston 		vm_pagequeue_lock(pq);
13725cd29d0fSMark Johnston 	while ((m = vm_batchqueue_pop(bq)) != NULL)
13735cd29d0fSMark Johnston 		delta += vm_pageout_reinsert_inactive_page(ss, m);
13745cd29d0fSMark Johnston 	vm_pagequeue_cnt_add(pq, delta);
13755cd29d0fSMark Johnston 	vm_pagequeue_unlock(pq);
13765cd29d0fSMark Johnston 	vm_batchqueue_init(bq);
13775cd29d0fSMark Johnston }
13785cd29d0fSMark Johnston 
1379ebcddc72SAlan Cox /*
138027e29d10SMark Johnston  * Attempt to reclaim the requested number of pages from the inactive queue.
138127e29d10SMark Johnston  * Returns true if the shortage was addressed.
1382df8bae1dSRodney W. Grimes  */
1383be37ee79SMark Johnston static int
138449a3710cSMark Johnston vm_pageout_scan_inactive(struct vm_domain *vmd, int shortage,
1385be37ee79SMark Johnston     int *addl_shortage)
1386df8bae1dSRodney W. Grimes {
13875cd29d0fSMark Johnston 	struct scan_state ss;
13885cd29d0fSMark Johnston 	struct vm_batchqueue rq;
13895cd29d0fSMark Johnston 	vm_page_t m, marker;
1390*41fd4b94SMark Johnston 	vm_page_astate_t old, new;
13918d220203SAlan Cox 	struct vm_pagequeue *pq;
1392df8bae1dSRodney W. Grimes 	vm_object_t object;
1393*41fd4b94SMark Johnston 	int act_delta, addl_page_shortage, deficit, page_shortage, refs;
1394be37ee79SMark Johnston 	int starting_page_shortage;
13950d94caffSDavid Greenman 
1396df8bae1dSRodney W. Grimes 	/*
139701f04471SMark Johnston 	 * The addl_page_shortage is an estimate of the number of temporarily
1398311e34e2SKonstantin Belousov 	 * stuck pages in the inactive queue.  In other words, the
1399449c2e92SKonstantin Belousov 	 * number of pages from the inactive count that should be
1400311e34e2SKonstantin Belousov 	 * discounted in setting the target for the active queue scan.
1401311e34e2SKonstantin Belousov 	 */
14029099545aSAlan Cox 	addl_page_shortage = 0;
14039099545aSAlan Cox 
14041c7c3c6aSMatthew Dillon 	/*
140549a3710cSMark Johnston 	 * vmd_pageout_deficit counts the number of pages requested in
140649a3710cSMark Johnston 	 * allocations that failed because of a free page shortage.  We assume
140749a3710cSMark Johnston 	 * that the allocations will be reattempted and thus include the deficit
140849a3710cSMark Johnston 	 * in our scan target.
14091c7c3c6aSMatthew Dillon 	 */
1410e2068d0bSJeff Roberson 	deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit);
141149a3710cSMark Johnston 	starting_page_shortage = page_shortage = shortage + deficit;
14121c7c3c6aSMatthew Dillon 
14135cd29d0fSMark Johnston 	object = NULL;
14145cd29d0fSMark Johnston 	vm_batchqueue_init(&rq);
14155cd29d0fSMark Johnston 
1416936524aaSMatthew Dillon 	/*
1417f095d1bbSAlan Cox 	 * Start scanning the inactive queue for pages that we can free.  The
1418f095d1bbSAlan Cox 	 * scan will stop when we reach the target or we have scanned the
1419f095d1bbSAlan Cox 	 * entire queue.  (Note that m->act_count is not used to make
1420f095d1bbSAlan Cox 	 * decisions for the inactive queue, only for the active queue.)
14218d220203SAlan Cox 	 */
142264b38930SMark Johnston 	marker = &vmd->vmd_markers[PQ_INACTIVE];
14235cd29d0fSMark Johnston 	pq = &vmd->vmd_pagequeues[PQ_INACTIVE];
14248d220203SAlan Cox 	vm_pagequeue_lock(pq);
14255cd29d0fSMark Johnston 	vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt);
14265cd29d0fSMark Johnston 	while (page_shortage > 0 && (m = vm_pageout_next(&ss, true)) != NULL) {
14275cd29d0fSMark Johnston 		KASSERT((m->flags & PG_MARKER) == 0,
14285cd29d0fSMark Johnston 		    ("marker page %p was dequeued", m));
1429df8bae1dSRodney W. Grimes 
1430936524aaSMatthew Dillon 		/*
1431*41fd4b94SMark Johnston 		 * Perform some quick and racy checks of the page's queue state.
1432*41fd4b94SMark Johnston 		 * Bail if things are not as we expect.
1433936524aaSMatthew Dillon 		 */
1434*41fd4b94SMark Johnston 		old = vm_page_astate_load(m);
1435*41fd4b94SMark Johnston 		if (old.queue != PQ_INACTIVE || (old.flags & PGA_ENQUEUED) != 0)
1436936524aaSMatthew Dillon 			continue;
1437*41fd4b94SMark Johnston 		if ((old.flags & PGA_QUEUE_OP_MASK) != 0) {
1438*41fd4b94SMark Johnston 			vm_page_pqbatch_submit(m, PQ_INACTIVE);
14395cd29d0fSMark Johnston 			continue;
14405cd29d0fSMark Johnston 		}
14415cd29d0fSMark Johnston 
14425cd29d0fSMark Johnston 		if (object != m->object) {
144360256604SMark Johnston 			if (object != NULL)
14445cd29d0fSMark Johnston 				VM_OBJECT_WUNLOCK(object);
1445fee2a2faSMark Johnston 			object = (vm_object_t)atomic_load_ptr(&m->object);
1446*41fd4b94SMark Johnston 			if (object == NULL)
1447fee2a2faSMark Johnston 				continue;
1448*41fd4b94SMark Johnston 			VM_OBJECT_WLOCK(object);
1449*41fd4b94SMark Johnston 			if (m->object != object) {
1450*41fd4b94SMark Johnston 				VM_OBJECT_WUNLOCK(object);
1451*41fd4b94SMark Johnston 				object = NULL;
1452*41fd4b94SMark Johnston 				goto reinsert;
1453*41fd4b94SMark Johnston 			}
1454*41fd4b94SMark Johnston 		}
14555cd29d0fSMark Johnston 
1456a3aeedabSAlan Cox 		if (vm_page_busied(m)) {
1457a3aeedabSAlan Cox 			/*
1458a3aeedabSAlan Cox 			 * Don't mess with busy pages.  Leave them at
1459a3aeedabSAlan Cox 			 * the front of the queue.  Most likely, they
1460a3aeedabSAlan Cox 			 * are being paged out and will leave the
1461a3aeedabSAlan Cox 			 * queue shortly after the scan finishes.  So,
1462a3aeedabSAlan Cox 			 * they ought to be discounted from the
1463a3aeedabSAlan Cox 			 * inactive count.
1464a3aeedabSAlan Cox 			 */
1465a3aeedabSAlan Cox 			addl_page_shortage++;
14665cd29d0fSMark Johnston 			goto reinsert;
146726f9a767SRodney W. Grimes 		}
146848cc2fc7SKonstantin Belousov 
146948cc2fc7SKonstantin Belousov 		/*
1470*41fd4b94SMark Johnston 		 * Check for wirings now that we hold the object lock and have
1471*41fd4b94SMark Johnston 		 * verified that the page is unbusied.  If the page is mapped,
1472*41fd4b94SMark Johnston 		 * it may still be wired by pmap lookups.  The call to
1473fee2a2faSMark Johnston 		 * vm_page_try_remove_all() below atomically checks for such
1474fee2a2faSMark Johnston 		 * wirings and removes mappings.  If the page is unmapped, the
1475fee2a2faSMark Johnston 		 * wire count is guaranteed not to increase.
1476fee2a2faSMark Johnston 		 */
1477fee2a2faSMark Johnston 		if (__predict_false(vm_page_wired(m))) {
1478*41fd4b94SMark Johnston 			vm_page_pqbatch_submit(m, PQ_INACTIVE);
1479fee2a2faSMark Johnston 			continue;
1480fee2a2faSMark Johnston 		}
1481fee2a2faSMark Johnston 
1482fee2a2faSMark Johnston 		/*
14838748f58cSKonstantin Belousov 		 * Invalid pages can be easily freed. They cannot be
14848748f58cSKonstantin Belousov 		 * mapped, vm_page_free() asserts this.
1485776f729cSKonstantin Belousov 		 */
14868748f58cSKonstantin Belousov 		if (m->valid == 0)
14878748f58cSKonstantin Belousov 			goto free_page;
1488776f729cSKonstantin Belousov 
1489776f729cSKonstantin Belousov 		/*
1490960810ccSAlan Cox 		 * If the page has been referenced and the object is not dead,
1491960810ccSAlan Cox 		 * reactivate or requeue the page depending on whether the
1492960810ccSAlan Cox 		 * object is mapped.
1493d7aeb429SAlan Cox 		 *
1494d7aeb429SAlan Cox 		 * Test PGA_REFERENCED after calling pmap_ts_referenced() so
1495d7aeb429SAlan Cox 		 * that a reference from a concurrently destroyed mapping is
1496d7aeb429SAlan Cox 		 * observed here and now.
14977e006499SJohn Dyson 		 */
1498*41fd4b94SMark Johnston 		refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
1499*41fd4b94SMark Johnston 
1500*41fd4b94SMark Johnston 		for (old = vm_page_astate_load(m);;) {
1501*41fd4b94SMark Johnston 			if (old.queue != PQ_INACTIVE ||
1502*41fd4b94SMark Johnston 			    (old.flags & PGA_ENQUEUED) != 0)
1503*41fd4b94SMark Johnston 				goto next_page;
1504*41fd4b94SMark Johnston 
1505*41fd4b94SMark Johnston 			if ((old.flags & PGA_QUEUE_OP_MASK) != 0) {
1506*41fd4b94SMark Johnston 				vm_page_pqbatch_submit(m, PQ_INACTIVE);
1507*41fd4b94SMark Johnston 				goto next_page;
1508d7aeb429SAlan Cox 			}
1509*41fd4b94SMark Johnston 
1510*41fd4b94SMark Johnston 			new = old;
1511*41fd4b94SMark Johnston 			act_delta = refs;
1512*41fd4b94SMark Johnston 			if ((old.flags & PGA_REFERENCED) != 0) {
1513*41fd4b94SMark Johnston 				new.flags &= ~PGA_REFERENCED;
1514d7aeb429SAlan Cox 				act_delta++;
15152fe6e4d7SDavid Greenman 			}
1516bb7858eaSJeff Roberson 			if (act_delta != 0) {
151786fa2471SAlan Cox 				if (object->ref_count != 0) {
1518960810ccSAlan Cox 					/*
1519*41fd4b94SMark Johnston 					 * Increase the activation count if the
1520*41fd4b94SMark Johnston 					 * page was referenced while in the
1521*41fd4b94SMark Johnston 					 * inactive queue.  This makes it less
1522*41fd4b94SMark Johnston 					 * likely that the page will be returned
1523*41fd4b94SMark Johnston 					 * prematurely to the inactive queue.
1524960810ccSAlan Cox 					 */
1525*41fd4b94SMark Johnston 					new.act_count += ACT_ADVANCE +
1526*41fd4b94SMark Johnston 					    act_delta;
1527*41fd4b94SMark Johnston 					if (new.act_count > ACT_MAX)
1528*41fd4b94SMark Johnston 						new.act_count = ACT_MAX;
1529*41fd4b94SMark Johnston 
1530*41fd4b94SMark Johnston 					new.flags |= PGA_REQUEUE;
1531*41fd4b94SMark Johnston 					new.queue = PQ_ACTIVE;
1532*41fd4b94SMark Johnston 					if (!vm_page_pqstate_commit(m, &old,
1533*41fd4b94SMark Johnston 					    new))
15345cd29d0fSMark Johnston 						continue;
1535*41fd4b94SMark Johnston 
1536*41fd4b94SMark Johnston 					VM_CNT_INC(v_reactivated);
1537*41fd4b94SMark Johnston 					goto next_page;
1538ebcddc72SAlan Cox 				} else if ((object->flags & OBJ_DEAD) == 0) {
15395cd29d0fSMark Johnston 					vm_page_aflag_set(m, PGA_REQUEUE);
15405cd29d0fSMark Johnston 					goto reinsert;
1541ebcddc72SAlan Cox 				}
1542960810ccSAlan Cox 			}
1543*41fd4b94SMark Johnston 			break;
1544*41fd4b94SMark Johnston 		}
154567bf6868SJohn Dyson 
15467e006499SJohn Dyson 		/*
15479fc4739dSAlan Cox 		 * If the page appears to be clean at the machine-independent
15489fc4739dSAlan Cox 		 * layer, then remove all of its mappings from the pmap in
1549a766ffd0SAlan Cox 		 * anticipation of freeing it.  If, however, any of the page's
1550a766ffd0SAlan Cox 		 * mappings allow write access, then the page may still be
1551a766ffd0SAlan Cox 		 * modified until the last of those mappings are removed.
15527e006499SJohn Dyson 		 */
1553aa044135SAlan Cox 		if (object->ref_count != 0) {
15549fc4739dSAlan Cox 			vm_page_test_dirty(m);
1555fee2a2faSMark Johnston 			if (m->dirty == 0 && !vm_page_try_remove_all(m)) {
1556*41fd4b94SMark Johnston 				vm_page_pqbatch_submit(m, PQ_INACTIVE);
1557fee2a2faSMark Johnston 				continue;
1558fee2a2faSMark Johnston 			}
1559aa044135SAlan Cox 		}
1560dcbcd518SBruce Evans 
15616989c456SAlan Cox 		/*
1562ebcddc72SAlan Cox 		 * Clean pages can be freed, but dirty pages must be sent back
1563ebcddc72SAlan Cox 		 * to the laundry, unless they belong to a dead object.
1564ebcddc72SAlan Cox 		 * Requeueing dirty pages from dead objects is pointless, as
1565ebcddc72SAlan Cox 		 * they are being paged out and freed by the thread that
1566ebcddc72SAlan Cox 		 * destroyed the object.
15676989c456SAlan Cox 		 */
1568ebcddc72SAlan Cox 		if (m->dirty == 0) {
15698748f58cSKonstantin Belousov free_page:
1570*41fd4b94SMark Johnston 			/* XXX comment */
1571*41fd4b94SMark Johnston 			old = vm_page_astate_load(m);
1572*41fd4b94SMark Johnston 			if (old.queue != PQ_INACTIVE ||
1573*41fd4b94SMark Johnston 			    (old.flags & PGA_QUEUE_STATE_MASK) != 0) {
1574*41fd4b94SMark Johnston 				vm_page_pqbatch_submit(m, PQ_INACTIVE);
1575*41fd4b94SMark Johnston 				goto next_page;
1576*41fd4b94SMark Johnston 			}
1577*41fd4b94SMark Johnston 
15785cd29d0fSMark Johnston 			/*
15795cd29d0fSMark Johnston 			 * Because we dequeued the page and have already
15805cd29d0fSMark Johnston 			 * checked for concurrent dequeue and enqueue
15815cd29d0fSMark Johnston 			 * requests, we can safely disassociate the page
15825cd29d0fSMark Johnston 			 * from the inactive queue.
15835cd29d0fSMark Johnston 			 */
1584*41fd4b94SMark Johnston 			m->astate.queue = PQ_NONE;
158578afdce6SAlan Cox 			vm_page_free(m);
15865cd29d0fSMark Johnston 			page_shortage--;
1587ebcddc72SAlan Cox 		} else if ((object->flags & OBJ_DEAD) == 0)
1588ebcddc72SAlan Cox 			vm_page_launder(m);
1589*41fd4b94SMark Johnston next_page:
15905cd29d0fSMark Johnston 		continue;
15915cd29d0fSMark Johnston reinsert:
15925cd29d0fSMark Johnston 		vm_pageout_reinsert_inactive(&ss, &rq, m);
15935cd29d0fSMark Johnston 	}
159460256604SMark Johnston 	if (object != NULL)
159589f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(object);
15965cd29d0fSMark Johnston 	vm_pageout_reinsert_inactive(&ss, &rq, NULL);
15975cd29d0fSMark Johnston 	vm_pageout_reinsert_inactive(&ss, &ss.bq, NULL);
15988d220203SAlan Cox 	vm_pagequeue_lock(pq);
15995cd29d0fSMark Johnston 	vm_pageout_end_scan(&ss);
16008d220203SAlan Cox 	vm_pagequeue_unlock(pq);
160126f9a767SRodney W. Grimes 
16025cd29d0fSMark Johnston 	VM_CNT_ADD(v_dfree, starting_page_shortage - page_shortage);
16035cd29d0fSMark Johnston 
1604ebcddc72SAlan Cox 	/*
1605ebcddc72SAlan Cox 	 * Wake up the laundry thread so that it can perform any needed
1606ebcddc72SAlan Cox 	 * laundering.  If we didn't meet our target, we're in shortfall and
1607b1fd102eSMark Johnston 	 * need to launder more aggressively.  If PQ_LAUNDRY is empty and no
1608b1fd102eSMark Johnston 	 * swap devices are configured, the laundry thread has no work to do, so
1609b1fd102eSMark Johnston 	 * don't bother waking it up.
1610cb35676eSMark Johnston 	 *
1611cb35676eSMark Johnston 	 * The laundry thread uses the number of inactive queue scans elapsed
1612cb35676eSMark Johnston 	 * since the last laundering to determine whether to launder again, so
1613cb35676eSMark Johnston 	 * keep count.
1614ebcddc72SAlan Cox 	 */
1615cb35676eSMark Johnston 	if (starting_page_shortage > 0) {
1616e2068d0bSJeff Roberson 		pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
1617ebcddc72SAlan Cox 		vm_pagequeue_lock(pq);
1618e2068d0bSJeff Roberson 		if (vmd->vmd_laundry_request == VM_LAUNDRY_IDLE &&
1619cb35676eSMark Johnston 		    (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled))) {
1620ebcddc72SAlan Cox 			if (page_shortage > 0) {
1621e2068d0bSJeff Roberson 				vmd->vmd_laundry_request = VM_LAUNDRY_SHORTFALL;
162283c9dea1SGleb Smirnoff 				VM_CNT_INC(v_pdshortfalls);
1623e2068d0bSJeff Roberson 			} else if (vmd->vmd_laundry_request !=
1624e2068d0bSJeff Roberson 			    VM_LAUNDRY_SHORTFALL)
1625e2068d0bSJeff Roberson 				vmd->vmd_laundry_request =
1626e2068d0bSJeff Roberson 				    VM_LAUNDRY_BACKGROUND;
1627e2068d0bSJeff Roberson 			wakeup(&vmd->vmd_laundry_request);
1628b1fd102eSMark Johnston 		}
162960684862SMark Johnston 		vmd->vmd_clean_pages_freed +=
163060684862SMark Johnston 		    starting_page_shortage - page_shortage;
1631ebcddc72SAlan Cox 		vm_pagequeue_unlock(pq);
1632ebcddc72SAlan Cox 	}
1633ebcddc72SAlan Cox 
16349452b5edSAlan Cox 	/*
1635f095d1bbSAlan Cox 	 * Wakeup the swapout daemon if we didn't free the targeted number of
1636f095d1bbSAlan Cox 	 * pages.
16379452b5edSAlan Cox 	 */
1638ac04195bSKonstantin Belousov 	if (page_shortage > 0)
1639ac04195bSKonstantin Belousov 		vm_swapout_run();
16409452b5edSAlan Cox 
16419452b5edSAlan Cox 	/*
164276386c7eSKonstantin Belousov 	 * If the inactive queue scan fails repeatedly to meet its
164376386c7eSKonstantin Belousov 	 * target, kill the largest process.
164476386c7eSKonstantin Belousov 	 */
164576386c7eSKonstantin Belousov 	vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage);
164676386c7eSKonstantin Belousov 
164776386c7eSKonstantin Belousov 	/*
1648be37ee79SMark Johnston 	 * Reclaim pages by swapping out idle processes, if configured to do so.
16491c7c3c6aSMatthew Dillon 	 */
1650ac04195bSKonstantin Belousov 	vm_swapout_run_idle();
1651be37ee79SMark Johnston 
1652be37ee79SMark Johnston 	/*
1653be37ee79SMark Johnston 	 * See the description of addl_page_shortage above.
1654be37ee79SMark Johnston 	 */
1655be37ee79SMark Johnston 	*addl_shortage = addl_page_shortage + deficit;
1656be37ee79SMark Johnston 
1657e57dd910SAlan Cox 	return (page_shortage <= 0);
16582025d69bSKonstantin Belousov }
16592025d69bSKonstantin Belousov 
1660449c2e92SKonstantin Belousov static int vm_pageout_oom_vote;
1661449c2e92SKonstantin Belousov 
1662449c2e92SKonstantin Belousov /*
1663449c2e92SKonstantin Belousov  * The pagedaemon threads randlomly select one to perform the
1664449c2e92SKonstantin Belousov  * OOM.  Trying to kill processes before all pagedaemons
1665449c2e92SKonstantin Belousov  * failed to reach free target is premature.
1666449c2e92SKonstantin Belousov  */
1667449c2e92SKonstantin Belousov static void
166876386c7eSKonstantin Belousov vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
166976386c7eSKonstantin Belousov     int starting_page_shortage)
1670449c2e92SKonstantin Belousov {
1671449c2e92SKonstantin Belousov 	int old_vote;
1672449c2e92SKonstantin Belousov 
167376386c7eSKonstantin Belousov 	if (starting_page_shortage <= 0 || starting_page_shortage !=
167476386c7eSKonstantin Belousov 	    page_shortage)
167576386c7eSKonstantin Belousov 		vmd->vmd_oom_seq = 0;
167676386c7eSKonstantin Belousov 	else
167776386c7eSKonstantin Belousov 		vmd->vmd_oom_seq++;
167876386c7eSKonstantin Belousov 	if (vmd->vmd_oom_seq < vm_pageout_oom_seq) {
1679449c2e92SKonstantin Belousov 		if (vmd->vmd_oom) {
1680449c2e92SKonstantin Belousov 			vmd->vmd_oom = FALSE;
1681449c2e92SKonstantin Belousov 			atomic_subtract_int(&vm_pageout_oom_vote, 1);
1682449c2e92SKonstantin Belousov 		}
1683449c2e92SKonstantin Belousov 		return;
1684449c2e92SKonstantin Belousov 	}
1685449c2e92SKonstantin Belousov 
168676386c7eSKonstantin Belousov 	/*
168776386c7eSKonstantin Belousov 	 * Do not follow the call sequence until OOM condition is
168876386c7eSKonstantin Belousov 	 * cleared.
168976386c7eSKonstantin Belousov 	 */
169076386c7eSKonstantin Belousov 	vmd->vmd_oom_seq = 0;
169176386c7eSKonstantin Belousov 
1692449c2e92SKonstantin Belousov 	if (vmd->vmd_oom)
1693449c2e92SKonstantin Belousov 		return;
1694449c2e92SKonstantin Belousov 
1695449c2e92SKonstantin Belousov 	vmd->vmd_oom = TRUE;
1696449c2e92SKonstantin Belousov 	old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1);
1697449c2e92SKonstantin Belousov 	if (old_vote != vm_ndomains - 1)
1698449c2e92SKonstantin Belousov 		return;
1699449c2e92SKonstantin Belousov 
1700449c2e92SKonstantin Belousov 	/*
1701449c2e92SKonstantin Belousov 	 * The current pagedaemon thread is the last in the quorum to
1702449c2e92SKonstantin Belousov 	 * start OOM.  Initiate the selection and signaling of the
1703449c2e92SKonstantin Belousov 	 * victim.
1704449c2e92SKonstantin Belousov 	 */
1705449c2e92SKonstantin Belousov 	vm_pageout_oom(VM_OOM_MEM);
1706449c2e92SKonstantin Belousov 
1707449c2e92SKonstantin Belousov 	/*
1708449c2e92SKonstantin Belousov 	 * After one round of OOM terror, recall our vote.  On the
1709449c2e92SKonstantin Belousov 	 * next pass, current pagedaemon would vote again if the low
1710449c2e92SKonstantin Belousov 	 * memory condition is still there, due to vmd_oom being
1711449c2e92SKonstantin Belousov 	 * false.
1712449c2e92SKonstantin Belousov 	 */
1713449c2e92SKonstantin Belousov 	vmd->vmd_oom = FALSE;
1714449c2e92SKonstantin Belousov 	atomic_subtract_int(&vm_pageout_oom_vote, 1);
1715449c2e92SKonstantin Belousov }
17162025d69bSKonstantin Belousov 
17173949873fSKonstantin Belousov /*
17183949873fSKonstantin Belousov  * The OOM killer is the page daemon's action of last resort when
17193949873fSKonstantin Belousov  * memory allocation requests have been stalled for a prolonged period
17203949873fSKonstantin Belousov  * of time because it cannot reclaim memory.  This function computes
17213949873fSKonstantin Belousov  * the approximate number of physical pages that could be reclaimed if
17223949873fSKonstantin Belousov  * the specified address space is destroyed.
17233949873fSKonstantin Belousov  *
17243949873fSKonstantin Belousov  * Private, anonymous memory owned by the address space is the
17253949873fSKonstantin Belousov  * principal resource that we expect to recover after an OOM kill.
17263949873fSKonstantin Belousov  * Since the physical pages mapped by the address space's COW entries
17273949873fSKonstantin Belousov  * are typically shared pages, they are unlikely to be released and so
17283949873fSKonstantin Belousov  * they are not counted.
17293949873fSKonstantin Belousov  *
17303949873fSKonstantin Belousov  * To get to the point where the page daemon runs the OOM killer, its
17313949873fSKonstantin Belousov  * efforts to write-back vnode-backed pages may have stalled.  This
17323949873fSKonstantin Belousov  * could be caused by a memory allocation deadlock in the write path
17333949873fSKonstantin Belousov  * that might be resolved by an OOM kill.  Therefore, physical pages
17343949873fSKonstantin Belousov  * belonging to vnode-backed objects are counted, because they might
17353949873fSKonstantin Belousov  * be freed without being written out first if the address space holds
17363949873fSKonstantin Belousov  * the last reference to an unlinked vnode.
17373949873fSKonstantin Belousov  *
17383949873fSKonstantin Belousov  * Similarly, physical pages belonging to OBJT_PHYS objects are
17393949873fSKonstantin Belousov  * counted because the address space might hold the last reference to
17403949873fSKonstantin Belousov  * the object.
17413949873fSKonstantin Belousov  */
17423949873fSKonstantin Belousov static long
17433949873fSKonstantin Belousov vm_pageout_oom_pagecount(struct vmspace *vmspace)
17443949873fSKonstantin Belousov {
17453949873fSKonstantin Belousov 	vm_map_t map;
17463949873fSKonstantin Belousov 	vm_map_entry_t entry;
17473949873fSKonstantin Belousov 	vm_object_t obj;
17483949873fSKonstantin Belousov 	long res;
17493949873fSKonstantin Belousov 
17503949873fSKonstantin Belousov 	map = &vmspace->vm_map;
17513949873fSKonstantin Belousov 	KASSERT(!map->system_map, ("system map"));
17523949873fSKonstantin Belousov 	sx_assert(&map->lock, SA_LOCKED);
17533949873fSKonstantin Belousov 	res = 0;
17543949873fSKonstantin Belousov 	for (entry = map->header.next; entry != &map->header;
17553949873fSKonstantin Belousov 	    entry = entry->next) {
17563949873fSKonstantin Belousov 		if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
17573949873fSKonstantin Belousov 			continue;
17583949873fSKonstantin Belousov 		obj = entry->object.vm_object;
17593949873fSKonstantin Belousov 		if (obj == NULL)
17603949873fSKonstantin Belousov 			continue;
17613949873fSKonstantin Belousov 		if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 &&
17623949873fSKonstantin Belousov 		    obj->ref_count != 1)
17633949873fSKonstantin Belousov 			continue;
17643949873fSKonstantin Belousov 		switch (obj->type) {
17653949873fSKonstantin Belousov 		case OBJT_DEFAULT:
17663949873fSKonstantin Belousov 		case OBJT_SWAP:
17673949873fSKonstantin Belousov 		case OBJT_PHYS:
17683949873fSKonstantin Belousov 		case OBJT_VNODE:
17693949873fSKonstantin Belousov 			res += obj->resident_page_count;
17703949873fSKonstantin Belousov 			break;
17713949873fSKonstantin Belousov 		}
17723949873fSKonstantin Belousov 	}
17733949873fSKonstantin Belousov 	return (res);
17743949873fSKonstantin Belousov }
17753949873fSKonstantin Belousov 
1776245139c6SKonstantin Belousov static int vm_oom_ratelim_last;
1777245139c6SKonstantin Belousov static int vm_oom_pf_secs = 10;
1778245139c6SKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, oom_pf_secs, CTLFLAG_RWTUN, &vm_oom_pf_secs, 0,
1779245139c6SKonstantin Belousov     "");
1780245139c6SKonstantin Belousov static struct mtx vm_oom_ratelim_mtx;
1781245139c6SKonstantin Belousov 
17822025d69bSKonstantin Belousov void
17832025d69bSKonstantin Belousov vm_pageout_oom(int shortage)
17842025d69bSKonstantin Belousov {
17852025d69bSKonstantin Belousov 	struct proc *p, *bigproc;
17862025d69bSKonstantin Belousov 	vm_offset_t size, bigsize;
17872025d69bSKonstantin Belousov 	struct thread *td;
17886bed074cSKonstantin Belousov 	struct vmspace *vm;
1789245139c6SKonstantin Belousov 	int now;
17903e78e983SAlan Cox 	bool breakout;
17912025d69bSKonstantin Belousov 
17922025d69bSKonstantin Belousov 	/*
1793245139c6SKonstantin Belousov 	 * For OOM requests originating from vm_fault(), there is a high
1794245139c6SKonstantin Belousov 	 * chance that a single large process faults simultaneously in
1795245139c6SKonstantin Belousov 	 * several threads.  Also, on an active system running many
1796245139c6SKonstantin Belousov 	 * processes of middle-size, like buildworld, all of them
1797245139c6SKonstantin Belousov 	 * could fault almost simultaneously as well.
1798245139c6SKonstantin Belousov 	 *
1799245139c6SKonstantin Belousov 	 * To avoid killing too many processes, rate-limit OOMs
1800245139c6SKonstantin Belousov 	 * initiated by vm_fault() time-outs on the waits for free
1801245139c6SKonstantin Belousov 	 * pages.
1802245139c6SKonstantin Belousov 	 */
1803245139c6SKonstantin Belousov 	mtx_lock(&vm_oom_ratelim_mtx);
1804245139c6SKonstantin Belousov 	now = ticks;
1805245139c6SKonstantin Belousov 	if (shortage == VM_OOM_MEM_PF &&
1806245139c6SKonstantin Belousov 	    (u_int)(now - vm_oom_ratelim_last) < hz * vm_oom_pf_secs) {
1807245139c6SKonstantin Belousov 		mtx_unlock(&vm_oom_ratelim_mtx);
1808245139c6SKonstantin Belousov 		return;
1809245139c6SKonstantin Belousov 	}
1810245139c6SKonstantin Belousov 	vm_oom_ratelim_last = now;
1811245139c6SKonstantin Belousov 	mtx_unlock(&vm_oom_ratelim_mtx);
1812245139c6SKonstantin Belousov 
1813245139c6SKonstantin Belousov 	/*
18141c58e4e5SJohn Baldwin 	 * We keep the process bigproc locked once we find it to keep anyone
18151c58e4e5SJohn Baldwin 	 * from messing with it; however, there is a possibility of
181628323addSBryan Drewery 	 * deadlock if process B is bigproc and one of its child processes
18171c58e4e5SJohn Baldwin 	 * attempts to propagate a signal to B while we are waiting for A's
18181c58e4e5SJohn Baldwin 	 * lock while walking this list.  To avoid this, we don't block on
18191c58e4e5SJohn Baldwin 	 * the process lock but just skip a process if it is already locked.
18205663e6deSDavid Greenman 	 */
18215663e6deSDavid Greenman 	bigproc = NULL;
18225663e6deSDavid Greenman 	bigsize = 0;
18231005a129SJohn Baldwin 	sx_slock(&allproc_lock);
1824e602ba25SJulian Elischer 	FOREACH_PROC_IN_SYSTEM(p) {
182571943c3dSKonstantin Belousov 		PROC_LOCK(p);
182671943c3dSKonstantin Belousov 
18271c58e4e5SJohn Baldwin 		/*
18283f1c4c4fSKonstantin Belousov 		 * If this is a system, protected or killed process, skip it.
18295663e6deSDavid Greenman 		 */
183071943c3dSKonstantin Belousov 		if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
183171943c3dSKonstantin Belousov 		    P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 ||
183271943c3dSKonstantin Belousov 		    p->p_pid == 1 || P_KILLED(p) ||
183371943c3dSKonstantin Belousov 		    (p->p_pid < 48 && swap_pager_avail != 0)) {
18348606d880SJohn Baldwin 			PROC_UNLOCK(p);
18355663e6deSDavid Greenman 			continue;
18365663e6deSDavid Greenman 		}
18375663e6deSDavid Greenman 		/*
1838dcbcd518SBruce Evans 		 * If the process is in a non-running type state,
1839e602ba25SJulian Elischer 		 * don't touch it.  Check all the threads individually.
18405663e6deSDavid Greenman 		 */
18413e78e983SAlan Cox 		breakout = false;
1842e602ba25SJulian Elischer 		FOREACH_THREAD_IN_PROC(p, td) {
1843982d11f8SJeff Roberson 			thread_lock(td);
184471fad9fdSJulian Elischer 			if (!TD_ON_RUNQ(td) &&
184571fad9fdSJulian Elischer 			    !TD_IS_RUNNING(td) &&
1846f497cda2SEdward Tomasz Napierala 			    !TD_IS_SLEEPING(td) &&
1847b98acc0aSKonstantin Belousov 			    !TD_IS_SUSPENDED(td) &&
1848b98acc0aSKonstantin Belousov 			    !TD_IS_SWAPPED(td)) {
1849982d11f8SJeff Roberson 				thread_unlock(td);
18503e78e983SAlan Cox 				breakout = true;
1851e602ba25SJulian Elischer 				break;
1852e602ba25SJulian Elischer 			}
1853982d11f8SJeff Roberson 			thread_unlock(td);
1854e602ba25SJulian Elischer 		}
1855e602ba25SJulian Elischer 		if (breakout) {
18561c58e4e5SJohn Baldwin 			PROC_UNLOCK(p);
18575663e6deSDavid Greenman 			continue;
18585663e6deSDavid Greenman 		}
18595663e6deSDavid Greenman 		/*
18605663e6deSDavid Greenman 		 * get the process size
18615663e6deSDavid Greenman 		 */
18626bed074cSKonstantin Belousov 		vm = vmspace_acquire_ref(p);
18636bed074cSKonstantin Belousov 		if (vm == NULL) {
18646bed074cSKonstantin Belousov 			PROC_UNLOCK(p);
18656bed074cSKonstantin Belousov 			continue;
18666bed074cSKonstantin Belousov 		}
186795e2409aSKonstantin Belousov 		_PHOLD_LITE(p);
186872d97679SDavid Schultz 		PROC_UNLOCK(p);
186995e2409aSKonstantin Belousov 		sx_sunlock(&allproc_lock);
187095e2409aSKonstantin Belousov 		if (!vm_map_trylock_read(&vm->vm_map)) {
187171943c3dSKonstantin Belousov 			vmspace_free(vm);
187295e2409aSKonstantin Belousov 			sx_slock(&allproc_lock);
187395e2409aSKonstantin Belousov 			PRELE(p);
187472d97679SDavid Schultz 			continue;
187572d97679SDavid Schultz 		}
18767981aa24SKonstantin Belousov 		size = vmspace_swap_count(vm);
1877245139c6SKonstantin Belousov 		if (shortage == VM_OOM_MEM || shortage == VM_OOM_MEM_PF)
18783949873fSKonstantin Belousov 			size += vm_pageout_oom_pagecount(vm);
18793949873fSKonstantin Belousov 		vm_map_unlock_read(&vm->vm_map);
18806bed074cSKonstantin Belousov 		vmspace_free(vm);
188195e2409aSKonstantin Belousov 		sx_slock(&allproc_lock);
18823949873fSKonstantin Belousov 
18835663e6deSDavid Greenman 		/*
18843949873fSKonstantin Belousov 		 * If this process is bigger than the biggest one,
18855663e6deSDavid Greenman 		 * remember it.
18865663e6deSDavid Greenman 		 */
18875663e6deSDavid Greenman 		if (size > bigsize) {
18881c58e4e5SJohn Baldwin 			if (bigproc != NULL)
188971943c3dSKonstantin Belousov 				PRELE(bigproc);
18905663e6deSDavid Greenman 			bigproc = p;
18915663e6deSDavid Greenman 			bigsize = size;
189271943c3dSKonstantin Belousov 		} else {
189371943c3dSKonstantin Belousov 			PRELE(p);
189471943c3dSKonstantin Belousov 		}
18955663e6deSDavid Greenman 	}
18961005a129SJohn Baldwin 	sx_sunlock(&allproc_lock);
18975663e6deSDavid Greenman 	if (bigproc != NULL) {
18988311a2b8SWill Andrews 		if (vm_panic_on_oom != 0)
18998311a2b8SWill Andrews 			panic("out of swap space");
190071943c3dSKonstantin Belousov 		PROC_LOCK(bigproc);
1901729b1e51SDavid Greenman 		killproc(bigproc, "out of swap space");
1902fa885116SJulian Elischer 		sched_nice(bigproc, PRIO_MIN);
190371943c3dSKonstantin Belousov 		_PRELE(bigproc);
19041c58e4e5SJohn Baldwin 		PROC_UNLOCK(bigproc);
19055663e6deSDavid Greenman 	}
19065663e6deSDavid Greenman }
190726f9a767SRodney W. Grimes 
1908b50a4ea6SMark Johnston static bool
1909b50a4ea6SMark Johnston vm_pageout_lowmem(void)
191049a3710cSMark Johnston {
1911b50a4ea6SMark Johnston 	static int lowmem_ticks = 0;
1912b50a4ea6SMark Johnston 	int last;
191349a3710cSMark Johnston 
1914b50a4ea6SMark Johnston 	last = atomic_load_int(&lowmem_ticks);
1915b50a4ea6SMark Johnston 	while ((u_int)(ticks - last) / hz >= lowmem_period) {
1916b50a4ea6SMark Johnston 		if (atomic_fcmpset_int(&lowmem_ticks, &last, ticks) == 0)
1917b50a4ea6SMark Johnston 			continue;
1918b50a4ea6SMark Johnston 
191949a3710cSMark Johnston 		/*
192049a3710cSMark Johnston 		 * Decrease registered cache sizes.
192149a3710cSMark Johnston 		 */
192249a3710cSMark Johnston 		SDT_PROBE0(vm, , , vm__lowmem_scan);
192349a3710cSMark Johnston 		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES);
192449a3710cSMark Johnston 
192549a3710cSMark Johnston 		/*
192649a3710cSMark Johnston 		 * We do this explicitly after the caches have been
192708cfa56eSMark Johnston 		 * drained above.  If we have a severe page shortage on
192808cfa56eSMark Johnston 		 * our hands, completely drain all UMA zones.  Otherwise,
192908cfa56eSMark Johnston 		 * just prune the caches.
193049a3710cSMark Johnston 		 */
193108cfa56eSMark Johnston 		uma_reclaim(vm_page_count_min() ? UMA_RECLAIM_DRAIN_CPU :
193208cfa56eSMark Johnston 		    UMA_RECLAIM_TRIM);
1933b50a4ea6SMark Johnston 		return (true);
193449a3710cSMark Johnston 	}
1935b50a4ea6SMark Johnston 	return (false);
193649a3710cSMark Johnston }
193749a3710cSMark Johnston 
193849a3710cSMark Johnston static void
1939449c2e92SKonstantin Belousov vm_pageout_worker(void *arg)
1940449c2e92SKonstantin Belousov {
1941e2068d0bSJeff Roberson 	struct vm_domain *vmd;
1942b50a4ea6SMark Johnston 	u_int ofree;
194349a3710cSMark Johnston 	int addl_shortage, domain, shortage;
1944e57dd910SAlan Cox 	bool target_met;
1945449c2e92SKonstantin Belousov 
1946e2068d0bSJeff Roberson 	domain = (uintptr_t)arg;
1947e2068d0bSJeff Roberson 	vmd = VM_DOMAIN(domain);
19485f8cd1c0SJeff Roberson 	shortage = 0;
1949e57dd910SAlan Cox 	target_met = true;
1950449c2e92SKonstantin Belousov 
1951449c2e92SKonstantin Belousov 	/*
1952949c9186SKonstantin Belousov 	 * XXXKIB It could be useful to bind pageout daemon threads to
1953949c9186SKonstantin Belousov 	 * the cores belonging to the domain, from which vm_page_array
1954949c9186SKonstantin Belousov 	 * is allocated.
1955449c2e92SKonstantin Belousov 	 */
1956449c2e92SKonstantin Belousov 
1957e2068d0bSJeff Roberson 	KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
1958e2068d0bSJeff Roberson 	vmd->vmd_last_active_scan = ticks;
1959449c2e92SKonstantin Belousov 
1960449c2e92SKonstantin Belousov 	/*
1961449c2e92SKonstantin Belousov 	 * The pageout daemon worker is never done, so loop forever.
1962449c2e92SKonstantin Belousov 	 */
1963449c2e92SKonstantin Belousov 	while (TRUE) {
196430fbfddaSJeff Roberson 		vm_domain_pageout_lock(vmd);
196549a3710cSMark Johnston 
196630fbfddaSJeff Roberson 		/*
196730fbfddaSJeff Roberson 		 * We need to clear wanted before we check the limits.  This
196830fbfddaSJeff Roberson 		 * prevents races with wakers who will check wanted after they
196930fbfddaSJeff Roberson 		 * reach the limit.
197030fbfddaSJeff Roberson 		 */
197130fbfddaSJeff Roberson 		atomic_store_int(&vmd->vmd_pageout_wanted, 0);
197256ce0690SAlan Cox 
197356ce0690SAlan Cox 		/*
19745f8cd1c0SJeff Roberson 		 * Might the page daemon need to run again?
1975449c2e92SKonstantin Belousov 		 */
19765f8cd1c0SJeff Roberson 		if (vm_paging_needed(vmd, vmd->vmd_free_count)) {
197756ce0690SAlan Cox 			/*
197849a3710cSMark Johnston 			 * Yes.  If the scan failed to produce enough free
197949a3710cSMark Johnston 			 * pages, sleep uninterruptibly for some time in the
198049a3710cSMark Johnston 			 * hope that the laundry thread will clean some pages.
198156ce0690SAlan Cox 			 */
198230fbfddaSJeff Roberson 			vm_domain_pageout_unlock(vmd);
198349a3710cSMark Johnston 			if (!target_met)
19846eebec83SMark Johnston 				pause("pwait", hz / VM_INACT_SCAN_RATE);
1985449c2e92SKonstantin Belousov 		} else {
1986449c2e92SKonstantin Belousov 			/*
19875f8cd1c0SJeff Roberson 			 * No, sleep until the next wakeup or until pages
19885f8cd1c0SJeff Roberson 			 * need to have their reference stats updated.
1989449c2e92SKonstantin Belousov 			 */
19902c0f13aaSKonstantin Belousov 			if (mtx_sleep(&vmd->vmd_pageout_wanted,
199130fbfddaSJeff Roberson 			    vm_domain_pageout_lockptr(vmd), PDROP | PVM,
19925f8cd1c0SJeff Roberson 			    "psleep", hz / VM_INACT_SCAN_RATE) == 0)
199383c9dea1SGleb Smirnoff 				VM_CNT_INC(v_pdwakeups);
199456ce0690SAlan Cox 		}
1995be37ee79SMark Johnston 
199630fbfddaSJeff Roberson 		/* Prevent spurious wakeups by ensuring that wanted is set. */
199730fbfddaSJeff Roberson 		atomic_store_int(&vmd->vmd_pageout_wanted, 1);
199830fbfddaSJeff Roberson 
199930fbfddaSJeff Roberson 		/*
200030fbfddaSJeff Roberson 		 * Use the controller to calculate how many pages to free in
2001b50a4ea6SMark Johnston 		 * this interval, and scan the inactive queue.  If the lowmem
2002b50a4ea6SMark Johnston 		 * handlers appear to have freed up some pages, subtract the
2003b50a4ea6SMark Johnston 		 * difference from the inactive queue scan target.
200430fbfddaSJeff Roberson 		 */
20055f8cd1c0SJeff Roberson 		shortage = pidctrl_daemon(&vmd->vmd_pid, vmd->vmd_free_count);
200649a3710cSMark Johnston 		if (shortage > 0) {
2007b50a4ea6SMark Johnston 			ofree = vmd->vmd_free_count;
2008b50a4ea6SMark Johnston 			if (vm_pageout_lowmem() && vmd->vmd_free_count > ofree)
2009b50a4ea6SMark Johnston 				shortage -= min(vmd->vmd_free_count - ofree,
2010b50a4ea6SMark Johnston 				    (u_int)shortage);
201149a3710cSMark Johnston 			target_met = vm_pageout_scan_inactive(vmd, shortage,
2012be37ee79SMark Johnston 			    &addl_shortage);
201349a3710cSMark Johnston 		} else
201449a3710cSMark Johnston 			addl_shortage = 0;
201556ce0690SAlan Cox 
2016be37ee79SMark Johnston 		/*
2017be37ee79SMark Johnston 		 * Scan the active queue.  A positive value for shortage
2018be37ee79SMark Johnston 		 * indicates that we must aggressively deactivate pages to avoid
2019be37ee79SMark Johnston 		 * a shortfall.
2020be37ee79SMark Johnston 		 */
20217bb4634eSMark Johnston 		shortage = vm_pageout_active_target(vmd) + addl_shortage;
2022be37ee79SMark Johnston 		vm_pageout_scan_active(vmd, shortage);
2023449c2e92SKonstantin Belousov 	}
2024449c2e92SKonstantin Belousov }
2025449c2e92SKonstantin Belousov 
2026df8bae1dSRodney W. Grimes /*
20274d19f4adSSteven Hartland  *	vm_pageout_init initialises basic pageout daemon settings.
2028df8bae1dSRodney W. Grimes  */
20292b14f991SJulian Elischer static void
2030e2068d0bSJeff Roberson vm_pageout_init_domain(int domain)
2031df8bae1dSRodney W. Grimes {
2032e2068d0bSJeff Roberson 	struct vm_domain *vmd;
20335f8cd1c0SJeff Roberson 	struct sysctl_oid *oid;
2034e2068d0bSJeff Roberson 
2035e2068d0bSJeff Roberson 	vmd = VM_DOMAIN(domain);
2036e2068d0bSJeff Roberson 	vmd->vmd_interrupt_free_min = 2;
2037f6b04d2bSDavid Greenman 
203845ae1d91SAlan Cox 	/*
203945ae1d91SAlan Cox 	 * v_free_reserved needs to include enough for the largest
204045ae1d91SAlan Cox 	 * swap pager structures plus enough for any pv_entry structs
204145ae1d91SAlan Cox 	 * when paging.
204245ae1d91SAlan Cox 	 */
2043e2068d0bSJeff Roberson 	if (vmd->vmd_page_count > 1024)
2044e2068d0bSJeff Roberson 		vmd->vmd_free_min = 4 + (vmd->vmd_page_count - 1024) / 200;
20452feb50bfSAttilio Rao 	else
2046e2068d0bSJeff Roberson 		vmd->vmd_free_min = 4;
20470cab71bcSDoug Moore 	vmd->vmd_pageout_free_min = 2 * MAXBSIZE / PAGE_SIZE +
2048e2068d0bSJeff Roberson 	    vmd->vmd_interrupt_free_min;
2049e2068d0bSJeff Roberson 	vmd->vmd_free_reserved = vm_pageout_page_count +
2050e2068d0bSJeff Roberson 	    vmd->vmd_pageout_free_min + (vmd->vmd_page_count / 768);
2051e2068d0bSJeff Roberson 	vmd->vmd_free_severe = vmd->vmd_free_min / 2;
2052e2068d0bSJeff Roberson 	vmd->vmd_free_target = 4 * vmd->vmd_free_min + vmd->vmd_free_reserved;
2053e2068d0bSJeff Roberson 	vmd->vmd_free_min += vmd->vmd_free_reserved;
2054e2068d0bSJeff Roberson 	vmd->vmd_free_severe += vmd->vmd_free_reserved;
2055e2068d0bSJeff Roberson 	vmd->vmd_inactive_target = (3 * vmd->vmd_free_target) / 2;
2056e2068d0bSJeff Roberson 	if (vmd->vmd_inactive_target > vmd->vmd_free_count / 3)
2057e2068d0bSJeff Roberson 		vmd->vmd_inactive_target = vmd->vmd_free_count / 3;
2058df8bae1dSRodney W. Grimes 
2059d9e23210SJeff Roberson 	/*
20605f8cd1c0SJeff Roberson 	 * Set the default wakeup threshold to be 10% below the paging
20615f8cd1c0SJeff Roberson 	 * target.  This keeps the steady state out of shortfall.
2062d9e23210SJeff Roberson 	 */
20635f8cd1c0SJeff Roberson 	vmd->vmd_pageout_wakeup_thresh = (vmd->vmd_free_target / 10) * 9;
2064e2068d0bSJeff Roberson 
2065e2068d0bSJeff Roberson 	/*
2066e2068d0bSJeff Roberson 	 * Target amount of memory to move out of the laundry queue during a
2067e2068d0bSJeff Roberson 	 * background laundering.  This is proportional to the amount of system
2068e2068d0bSJeff Roberson 	 * memory.
2069e2068d0bSJeff Roberson 	 */
2070e2068d0bSJeff Roberson 	vmd->vmd_background_launder_target = (vmd->vmd_free_target -
2071e2068d0bSJeff Roberson 	    vmd->vmd_free_min) / 10;
20725f8cd1c0SJeff Roberson 
20735f8cd1c0SJeff Roberson 	/* Initialize the pageout daemon pid controller. */
20745f8cd1c0SJeff Roberson 	pidctrl_init(&vmd->vmd_pid, hz / VM_INACT_SCAN_RATE,
20755f8cd1c0SJeff Roberson 	    vmd->vmd_free_target, PIDCTRL_BOUND,
20765f8cd1c0SJeff Roberson 	    PIDCTRL_KPD, PIDCTRL_KID, PIDCTRL_KDD);
20775f8cd1c0SJeff Roberson 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(vmd->vmd_oid), OID_AUTO,
20785f8cd1c0SJeff Roberson 	    "pidctrl", CTLFLAG_RD, NULL, "");
20795f8cd1c0SJeff Roberson 	pidctrl_init_sysctl(&vmd->vmd_pid, SYSCTL_CHILDREN(oid));
2080e2068d0bSJeff Roberson }
2081e2068d0bSJeff Roberson 
2082e2068d0bSJeff Roberson static void
2083e2068d0bSJeff Roberson vm_pageout_init(void)
2084e2068d0bSJeff Roberson {
2085e2068d0bSJeff Roberson 	u_int freecount;
2086e2068d0bSJeff Roberson 	int i;
2087e2068d0bSJeff Roberson 
2088e2068d0bSJeff Roberson 	/*
2089e2068d0bSJeff Roberson 	 * Initialize some paging parameters.
2090e2068d0bSJeff Roberson 	 */
2091e2068d0bSJeff Roberson 	if (vm_cnt.v_page_count < 2000)
2092e2068d0bSJeff Roberson 		vm_pageout_page_count = 8;
2093e2068d0bSJeff Roberson 
2094e2068d0bSJeff Roberson 	freecount = 0;
2095e2068d0bSJeff Roberson 	for (i = 0; i < vm_ndomains; i++) {
2096e2068d0bSJeff Roberson 		struct vm_domain *vmd;
2097e2068d0bSJeff Roberson 
2098e2068d0bSJeff Roberson 		vm_pageout_init_domain(i);
2099e2068d0bSJeff Roberson 		vmd = VM_DOMAIN(i);
2100e2068d0bSJeff Roberson 		vm_cnt.v_free_reserved += vmd->vmd_free_reserved;
2101e2068d0bSJeff Roberson 		vm_cnt.v_free_target += vmd->vmd_free_target;
2102e2068d0bSJeff Roberson 		vm_cnt.v_free_min += vmd->vmd_free_min;
2103e2068d0bSJeff Roberson 		vm_cnt.v_inactive_target += vmd->vmd_inactive_target;
2104e2068d0bSJeff Roberson 		vm_cnt.v_pageout_free_min += vmd->vmd_pageout_free_min;
2105e2068d0bSJeff Roberson 		vm_cnt.v_interrupt_free_min += vmd->vmd_interrupt_free_min;
2106e2068d0bSJeff Roberson 		vm_cnt.v_free_severe += vmd->vmd_free_severe;
2107e2068d0bSJeff Roberson 		freecount += vmd->vmd_free_count;
2108e2068d0bSJeff Roberson 	}
2109d9e23210SJeff Roberson 
2110d9e23210SJeff Roberson 	/*
2111d9e23210SJeff Roberson 	 * Set interval in seconds for active scan.  We want to visit each
2112c9612b2dSJeff Roberson 	 * page at least once every ten minutes.  This is to prevent worst
2113c9612b2dSJeff Roberson 	 * case paging behaviors with stale active LRU.
2114d9e23210SJeff Roberson 	 */
2115d9e23210SJeff Roberson 	if (vm_pageout_update_period == 0)
2116c9612b2dSJeff Roberson 		vm_pageout_update_period = 600;
2117d9e23210SJeff Roberson 
211854a3a114SMark Johnston 	if (vm_page_max_user_wired == 0)
211954a3a114SMark Johnston 		vm_page_max_user_wired = freecount / 3;
21204d19f4adSSteven Hartland }
21214d19f4adSSteven Hartland 
21224d19f4adSSteven Hartland /*
21234d19f4adSSteven Hartland  *     vm_pageout is the high level pageout daemon.
21244d19f4adSSteven Hartland  */
21254d19f4adSSteven Hartland static void
21264d19f4adSSteven Hartland vm_pageout(void)
21274d19f4adSSteven Hartland {
2128920239efSMark Johnston 	struct proc *p;
2129920239efSMark Johnston 	struct thread *td;
2130920239efSMark Johnston 	int error, first, i;
2131920239efSMark Johnston 
2132920239efSMark Johnston 	p = curproc;
2133920239efSMark Johnston 	td = curthread;
2134df8bae1dSRodney W. Grimes 
2135245139c6SKonstantin Belousov 	mtx_init(&vm_oom_ratelim_mtx, "vmoomr", NULL, MTX_DEF);
213624a1cce3SDavid Greenman 	swap_pager_swap_init();
2137920239efSMark Johnston 	for (first = -1, i = 0; i < vm_ndomains; i++) {
213830c5525bSAndrew Gallatin 		if (VM_DOMAIN_EMPTY(i)) {
213930c5525bSAndrew Gallatin 			if (bootverbose)
214030c5525bSAndrew Gallatin 				printf("domain %d empty; skipping pageout\n",
214130c5525bSAndrew Gallatin 				    i);
214230c5525bSAndrew Gallatin 			continue;
214330c5525bSAndrew Gallatin 		}
2144920239efSMark Johnston 		if (first == -1)
2145920239efSMark Johnston 			first = i;
2146920239efSMark Johnston 		else {
2147920239efSMark Johnston 			error = kthread_add(vm_pageout_worker,
2148920239efSMark Johnston 			    (void *)(uintptr_t)i, p, NULL, 0, 0, "dom%d", i);
2149920239efSMark Johnston 			if (error != 0)
2150920239efSMark Johnston 				panic("starting pageout for domain %d: %d\n",
2151449c2e92SKonstantin Belousov 				    i, error);
2152dc2efb27SJohn Dyson 		}
2153e2068d0bSJeff Roberson 		error = kthread_add(vm_pageout_laundry_worker,
2154920239efSMark Johnston 		    (void *)(uintptr_t)i, p, NULL, 0, 0, "laundry: dom%d", i);
2155e2068d0bSJeff Roberson 		if (error != 0)
2156920239efSMark Johnston 			panic("starting laundry for domain %d: %d", i, error);
2157f919ebdeSDavid Greenman 	}
2158920239efSMark Johnston 	error = kthread_add(uma_reclaim_worker, NULL, p, NULL, 0, 0, "uma");
215944ec2b63SKonstantin Belousov 	if (error != 0)
216044ec2b63SKonstantin Belousov 		panic("starting uma_reclaim helper, error %d\n", error);
2161920239efSMark Johnston 
2162920239efSMark Johnston 	snprintf(td->td_name, sizeof(td->td_name), "dom%d", first);
2163920239efSMark Johnston 	vm_pageout_worker((void *)(uintptr_t)first);
2164df8bae1dSRodney W. Grimes }
216526f9a767SRodney W. Grimes 
21666b4b77adSAlan Cox /*
2167280d15cdSMark Johnston  * Perform an advisory wakeup of the page daemon.
21686b4b77adSAlan Cox  */
2169e0c5a895SJohn Dyson void
2170e2068d0bSJeff Roberson pagedaemon_wakeup(int domain)
2171e0c5a895SJohn Dyson {
2172e2068d0bSJeff Roberson 	struct vm_domain *vmd;
2173a1c0a785SAlan Cox 
2174e2068d0bSJeff Roberson 	vmd = VM_DOMAIN(domain);
217530fbfddaSJeff Roberson 	vm_domain_pageout_assert_unlocked(vmd);
217630fbfddaSJeff Roberson 	if (curproc == pageproc)
217730fbfddaSJeff Roberson 		return;
2178280d15cdSMark Johnston 
217930fbfddaSJeff Roberson 	if (atomic_fetchadd_int(&vmd->vmd_pageout_wanted, 1) == 0) {
218030fbfddaSJeff Roberson 		vm_domain_pageout_lock(vmd);
218130fbfddaSJeff Roberson 		atomic_store_int(&vmd->vmd_pageout_wanted, 1);
2182e2068d0bSJeff Roberson 		wakeup(&vmd->vmd_pageout_wanted);
218330fbfddaSJeff Roberson 		vm_domain_pageout_unlock(vmd);
2184e0c5a895SJohn Dyson 	}
2185e0c5a895SJohn Dyson }
2186