xref: /freebsd/sys/vm/vm_pageout.c (revision acb4cb33d35838e3e86412202cd63d9021b21ce2)
160727d8bSWarner Losh /*-
2796df753SPedro F. Giffuni  * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
3df57947fSPedro F. Giffuni  *
426f9a767SRodney W. Grimes  * Copyright (c) 1991 Regents of the University of California.
526f9a767SRodney W. Grimes  * All rights reserved.
626f9a767SRodney W. Grimes  * Copyright (c) 1994 John S. Dyson
726f9a767SRodney W. Grimes  * All rights reserved.
826f9a767SRodney W. Grimes  * Copyright (c) 1994 David Greenman
926f9a767SRodney W. Grimes  * All rights reserved.
108dbca793STor Egge  * Copyright (c) 2005 Yahoo! Technologies Norway AS
118dbca793STor Egge  * All rights reserved.
12df8bae1dSRodney W. Grimes  *
13df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
14df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
15df8bae1dSRodney W. Grimes  *
16df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
17df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
18df8bae1dSRodney W. Grimes  * are met:
19df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
20df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
21df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
22df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
23df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
24df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
255929bcfaSPhilippe Charnier  *    must display the following acknowledgement:
26df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
27df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
28df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
29df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
30df8bae1dSRodney W. Grimes  *    without specific prior written permission.
31df8bae1dSRodney W. Grimes  *
32df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
43df8bae1dSRodney W. Grimes  *
44df8bae1dSRodney W. Grimes  *
45df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46df8bae1dSRodney W. Grimes  * All rights reserved.
47df8bae1dSRodney W. Grimes  *
48df8bae1dSRodney W. Grimes  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
49df8bae1dSRodney W. Grimes  *
50df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
51df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
52df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
53df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
54df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
55df8bae1dSRodney W. Grimes  *
56df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
57df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
58df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
59df8bae1dSRodney W. Grimes  *
60df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
61df8bae1dSRodney W. Grimes  *
62df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
63df8bae1dSRodney W. Grimes  *  School of Computer Science
64df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
65df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
66df8bae1dSRodney W. Grimes  *
67df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
68df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
69df8bae1dSRodney W. Grimes  */
70df8bae1dSRodney W. Grimes 
71df8bae1dSRodney W. Grimes /*
72df8bae1dSRodney W. Grimes  *	The proverbial page-out daemon.
73df8bae1dSRodney W. Grimes  */
74df8bae1dSRodney W. Grimes 
75874651b1SDavid E. O'Brien #include <sys/cdefs.h>
76faa5f8d8SAndrzej Bialecki #include "opt_vm.h"
777672ca05SMark Johnston 
78df8bae1dSRodney W. Grimes #include <sys/param.h>
7926f9a767SRodney W. Grimes #include <sys/systm.h>
80b5e8ce9fSBruce Evans #include <sys/kernel.h>
810292c54bSConrad Meyer #include <sys/blockcount.h>
82855a310fSJeff Roberson #include <sys/eventhandler.h>
83fb919e4dSMark Murray #include <sys/lock.h>
84fb919e4dSMark Murray #include <sys/mutex.h>
8526f9a767SRodney W. Grimes #include <sys/proc.h>
869c8b8baaSPeter Wemm #include <sys/kthread.h>
870384fff8SJason Evans #include <sys/ktr.h>
8897824da3SAlan Cox #include <sys/mount.h>
89099e7e95SEdward Tomasz Napierala #include <sys/racct.h>
9026f9a767SRodney W. Grimes #include <sys/resourcevar.h>
91b43179fbSJeff Roberson #include <sys/sched.h>
9214a0d74eSSteven Hartland #include <sys/sdt.h>
93d2fc5315SPoul-Henning Kamp #include <sys/signalvar.h>
94449c2e92SKonstantin Belousov #include <sys/smp.h>
95a6bf3a9eSRyan Stone #include <sys/time.h>
96f6b04d2bSDavid Greenman #include <sys/vnode.h>
97efeaf95aSDavid Greenman #include <sys/vmmeter.h>
9889f6b863SAttilio Rao #include <sys/rwlock.h>
991005a129SJohn Baldwin #include <sys/sx.h>
10038efa82bSJohn Dyson #include <sys/sysctl.h>
101df8bae1dSRodney W. Grimes 
102df8bae1dSRodney W. Grimes #include <vm/vm.h>
103efeaf95aSDavid Greenman #include <vm/vm_param.h>
104efeaf95aSDavid Greenman #include <vm/vm_object.h>
105df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
106efeaf95aSDavid Greenman #include <vm/vm_map.h>
107df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h>
10824a1cce3SDavid Greenman #include <vm/vm_pager.h>
109449c2e92SKonstantin Belousov #include <vm/vm_phys.h>
110e2068d0bSJeff Roberson #include <vm/vm_pagequeue.h>
11105f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h>
112efeaf95aSDavid Greenman #include <vm/vm_extern.h>
113670d17b5SJeff Roberson #include <vm/uma.h>
114df8bae1dSRodney W. Grimes 
1152b14f991SJulian Elischer /*
1162b14f991SJulian Elischer  * System initialization
1172b14f991SJulian Elischer  */
1182b14f991SJulian Elischer 
1192b14f991SJulian Elischer /* the kernel process "vm_pageout"*/
12011caded3SAlfred Perlstein static void vm_pageout(void);
1214d19f4adSSteven Hartland static void vm_pageout_init(void);
122ebcddc72SAlan Cox static int vm_pageout_clean(vm_page_t m, int *numpagedout);
12334d8b7eaSJeff Roberson static int vm_pageout_cluster(vm_page_t m);
12476386c7eSKonstantin Belousov static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
12576386c7eSKonstantin Belousov     int starting_page_shortage);
12645ae1d91SAlan Cox 
1274d19f4adSSteven Hartland SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init,
1284d19f4adSSteven Hartland     NULL);
1294d19f4adSSteven Hartland 
1302b14f991SJulian Elischer struct proc *pageproc;
1312b14f991SJulian Elischer 
1322b14f991SJulian Elischer static struct kproc_desc page_kp = {
1332b14f991SJulian Elischer 	"pagedaemon",
1342b14f991SJulian Elischer 	vm_pageout,
1352b14f991SJulian Elischer 	&pageproc
1362b14f991SJulian Elischer };
1374d19f4adSSteven Hartland SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start,
138237fdd78SRobert Watson     &page_kp);
1392b14f991SJulian Elischer 
14014a0d74eSSteven Hartland SDT_PROVIDER_DEFINE(vm);
14114a0d74eSSteven Hartland SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan);
14214a0d74eSSteven Hartland 
143ebcddc72SAlan Cox /* Pagedaemon activity rates, in subdivisions of one second. */
144ebcddc72SAlan Cox #define	VM_LAUNDER_RATE		10
1455f8cd1c0SJeff Roberson #define	VM_INACT_SCAN_RATE	10
1462b14f991SJulian Elischer 
147b1fd102eSMark Johnston static int swapdev_enabled;
148c4a25e07SMark Johnston int vm_pageout_page_count = 32;
14970111b90SJohn Dyson 
1508311a2b8SWill Andrews static int vm_panic_on_oom = 0;
1518311a2b8SWill Andrews SYSCTL_INT(_vm, OID_AUTO, panic_on_oom,
1528311a2b8SWill Andrews     CTLFLAG_RWTUN, &vm_panic_on_oom, 0,
153c4a25e07SMark Johnston     "Panic on the given number of out-of-memory errors instead of "
154c4a25e07SMark Johnston     "killing the largest process");
1558311a2b8SWill Andrews 
156c4a25e07SMark Johnston static int vm_pageout_update_period;
157d9e23210SJeff Roberson SYSCTL_INT(_vm, OID_AUTO, pageout_update_period,
158e0b2fc3aSMark Johnston     CTLFLAG_RWTUN, &vm_pageout_update_period, 0,
159d9e23210SJeff Roberson     "Maximum active LRU update period");
16053636869SAndrey Zonov 
16174f5530dSConrad Meyer static int pageout_cpus_per_thread = 16;
16274f5530dSConrad Meyer SYSCTL_INT(_vm, OID_AUTO, pageout_cpus_per_thread, CTLFLAG_RDTUN,
16374f5530dSConrad Meyer     &pageout_cpus_per_thread, 0,
16474f5530dSConrad Meyer     "Number of CPUs per pagedaemon worker thread");
1650292c54bSConrad Meyer 
166c4a25e07SMark Johnston static int lowmem_period = 10;
167e0b2fc3aSMark Johnston SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RWTUN, &lowmem_period, 0,
168c9612b2dSJeff Roberson     "Low memory callback period");
169c9612b2dSJeff Roberson 
170c4a25e07SMark Johnston static int disable_swap_pageouts;
171ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
172c4a25e07SMark Johnston     CTLFLAG_RWTUN, &disable_swap_pageouts, 0,
173c4a25e07SMark Johnston     "Disallow swapout of dirty pages");
17412ac6a1dSJohn Dyson 
17523b59018SMatthew Dillon static int pageout_lock_miss;
17623b59018SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
177c4a25e07SMark Johnston     CTLFLAG_RD, &pageout_lock_miss, 0,
178c4a25e07SMark Johnston     "vget() lock misses during pageout");
17923b59018SMatthew Dillon 
180c4a25e07SMark Johnston static int vm_pageout_oom_seq = 12;
18176386c7eSKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq,
182e0b2fc3aSMark Johnston     CTLFLAG_RWTUN, &vm_pageout_oom_seq, 0,
18376386c7eSKonstantin Belousov     "back-to-back calls to oom detector to start OOM");
18476386c7eSKonstantin Belousov 
185ebcddc72SAlan Cox static int act_scan_laundry_weight = 3;
186e0b2fc3aSMark Johnston SYSCTL_INT(_vm, OID_AUTO, act_scan_laundry_weight, CTLFLAG_RWTUN,
187ebcddc72SAlan Cox     &act_scan_laundry_weight, 0,
188ebcddc72SAlan Cox     "weight given to clean vs. dirty pages in active queue scans");
189ebcddc72SAlan Cox 
190ebcddc72SAlan Cox static u_int vm_background_launder_rate = 4096;
191e0b2fc3aSMark Johnston SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RWTUN,
192ebcddc72SAlan Cox     &vm_background_launder_rate, 0,
193ebcddc72SAlan Cox     "background laundering rate, in kilobytes per second");
194ebcddc72SAlan Cox 
195ebcddc72SAlan Cox static u_int vm_background_launder_max = 20 * 1024;
196e0b2fc3aSMark Johnston SYSCTL_UINT(_vm, OID_AUTO, background_launder_max, CTLFLAG_RWTUN,
197c4a25e07SMark Johnston     &vm_background_launder_max, 0,
198c4a25e07SMark Johnston     "background laundering cap, in kilobytes");
199df8bae1dSRodney W. Grimes 
20054a3a114SMark Johnston u_long vm_page_max_user_wired;
20154a3a114SMark Johnston SYSCTL_ULONG(_vm, OID_AUTO, max_user_wired, CTLFLAG_RW,
20254a3a114SMark Johnston     &vm_page_max_user_wired, 0,
20354a3a114SMark Johnston     "system-wide limit to user-wired page count");
204df8bae1dSRodney W. Grimes 
205ebcddc72SAlan Cox static u_int isqrt(u_int num);
206ebcddc72SAlan Cox static int vm_pageout_launder(struct vm_domain *vmd, int launder,
207ebcddc72SAlan Cox     bool in_shortfall);
208ebcddc72SAlan Cox static void vm_pageout_laundry_worker(void *arg);
209cd41fc12SDavid Greenman 
2105cd29d0fSMark Johnston struct scan_state {
2115cd29d0fSMark Johnston 	struct vm_batchqueue bq;
2128d220203SAlan Cox 	struct vm_pagequeue *pq;
2135cd29d0fSMark Johnston 	vm_page_t	marker;
2145cd29d0fSMark Johnston 	int		maxscan;
2155cd29d0fSMark Johnston 	int		scanned;
2165cd29d0fSMark Johnston };
2178dbca793STor Egge 
2185cd29d0fSMark Johnston static void
2195cd29d0fSMark Johnston vm_pageout_init_scan(struct scan_state *ss, struct vm_pagequeue *pq,
2205cd29d0fSMark Johnston     vm_page_t marker, vm_page_t after, int maxscan)
2215cd29d0fSMark Johnston {
2228dbca793STor Egge 
2235cd29d0fSMark Johnston 	vm_pagequeue_assert_locked(pq);
2245cff1f4dSMark Johnston 	KASSERT((marker->a.flags & PGA_ENQUEUED) == 0,
2255cd29d0fSMark Johnston 	    ("marker %p already enqueued", marker));
2265cd29d0fSMark Johnston 
2275cd29d0fSMark Johnston 	if (after == NULL)
2285cd29d0fSMark Johnston 		TAILQ_INSERT_HEAD(&pq->pq_pl, marker, plinks.q);
2295cd29d0fSMark Johnston 	else
2305cd29d0fSMark Johnston 		TAILQ_INSERT_AFTER(&pq->pq_pl, after, marker, plinks.q);
2315cd29d0fSMark Johnston 	vm_page_aflag_set(marker, PGA_ENQUEUED);
2325cd29d0fSMark Johnston 
2335cd29d0fSMark Johnston 	vm_batchqueue_init(&ss->bq);
2345cd29d0fSMark Johnston 	ss->pq = pq;
2355cd29d0fSMark Johnston 	ss->marker = marker;
2365cd29d0fSMark Johnston 	ss->maxscan = maxscan;
2375cd29d0fSMark Johnston 	ss->scanned = 0;
2388d220203SAlan Cox 	vm_pagequeue_unlock(pq);
2395cd29d0fSMark Johnston }
2408dbca793STor Egge 
2415cd29d0fSMark Johnston static void
2425cd29d0fSMark Johnston vm_pageout_end_scan(struct scan_state *ss)
2435cd29d0fSMark Johnston {
2445cd29d0fSMark Johnston 	struct vm_pagequeue *pq;
2455cd29d0fSMark Johnston 
2465cd29d0fSMark Johnston 	pq = ss->pq;
2475cd29d0fSMark Johnston 	vm_pagequeue_assert_locked(pq);
2485cff1f4dSMark Johnston 	KASSERT((ss->marker->a.flags & PGA_ENQUEUED) != 0,
2495cd29d0fSMark Johnston 	    ("marker %p not enqueued", ss->marker));
2505cd29d0fSMark Johnston 
2515cd29d0fSMark Johnston 	TAILQ_REMOVE(&pq->pq_pl, ss->marker, plinks.q);
2525cd29d0fSMark Johnston 	vm_page_aflag_clear(ss->marker, PGA_ENQUEUED);
253899fe184SMark Johnston 	pq->pq_pdpages += ss->scanned;
2548dbca793STor Egge }
2558dbca793STor Egge 
2568dbca793STor Egge /*
2575cd29d0fSMark Johnston  * Add a small number of queued pages to a batch queue for later processing
2585cd29d0fSMark Johnston  * without the corresponding queue lock held.  The caller must have enqueued a
2595cd29d0fSMark Johnston  * marker page at the desired start point for the scan.  Pages will be
2605cd29d0fSMark Johnston  * physically dequeued if the caller so requests.  Otherwise, the returned
2615cd29d0fSMark Johnston  * batch may contain marker pages, and it is up to the caller to handle them.
2625cd29d0fSMark Johnston  *
263efec381dSMark Johnston  * When processing the batch queue, vm_pageout_defer() must be used to
264efec381dSMark Johnston  * determine whether the page has been logically dequeued since the batch was
265efec381dSMark Johnston  * collected.
2665cd29d0fSMark Johnston  */
2675cd29d0fSMark Johnston static __always_inline void
2685cd29d0fSMark Johnston vm_pageout_collect_batch(struct scan_state *ss, const bool dequeue)
2695cd29d0fSMark Johnston {
2708d220203SAlan Cox 	struct vm_pagequeue *pq;
271d70f0ab3SMark Johnston 	vm_page_t m, marker, n;
2728c616246SKonstantin Belousov 
2735cd29d0fSMark Johnston 	marker = ss->marker;
2745cd29d0fSMark Johnston 	pq = ss->pq;
2758c616246SKonstantin Belousov 
2765cff1f4dSMark Johnston 	KASSERT((marker->a.flags & PGA_ENQUEUED) != 0,
2775cd29d0fSMark Johnston 	    ("marker %p not enqueued", ss->marker));
2788c616246SKonstantin Belousov 
2798d220203SAlan Cox 	vm_pagequeue_lock(pq);
2805cd29d0fSMark Johnston 	for (m = TAILQ_NEXT(marker, plinks.q); m != NULL &&
2815cd29d0fSMark Johnston 	    ss->scanned < ss->maxscan && ss->bq.bq_cnt < VM_BATCHQUEUE_SIZE;
282d70f0ab3SMark Johnston 	    m = n, ss->scanned++) {
283d70f0ab3SMark Johnston 		n = TAILQ_NEXT(m, plinks.q);
2845cd29d0fSMark Johnston 		if ((m->flags & PG_MARKER) == 0) {
2855cff1f4dSMark Johnston 			KASSERT((m->a.flags & PGA_ENQUEUED) != 0,
2865cd29d0fSMark Johnston 			    ("page %p not enqueued", m));
2875cd29d0fSMark Johnston 			KASSERT((m->flags & PG_FICTITIOUS) == 0,
2885cd29d0fSMark Johnston 			    ("Fictitious page %p cannot be in page queue", m));
2895cd29d0fSMark Johnston 			KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2905cd29d0fSMark Johnston 			    ("Unmanaged page %p cannot be in page queue", m));
2915cd29d0fSMark Johnston 		} else if (dequeue)
2925cd29d0fSMark Johnston 			continue;
2938c616246SKonstantin Belousov 
2945cd29d0fSMark Johnston 		(void)vm_batchqueue_insert(&ss->bq, m);
2955cd29d0fSMark Johnston 		if (dequeue) {
2965cd29d0fSMark Johnston 			TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
2975cd29d0fSMark Johnston 			vm_page_aflag_clear(m, PGA_ENQUEUED);
2985cd29d0fSMark Johnston 		}
2995cd29d0fSMark Johnston 	}
3005cd29d0fSMark Johnston 	TAILQ_REMOVE(&pq->pq_pl, marker, plinks.q);
3015cd29d0fSMark Johnston 	if (__predict_true(m != NULL))
3025cd29d0fSMark Johnston 		TAILQ_INSERT_BEFORE(m, marker, plinks.q);
3035cd29d0fSMark Johnston 	else
3045cd29d0fSMark Johnston 		TAILQ_INSERT_TAIL(&pq->pq_pl, marker, plinks.q);
3055cd29d0fSMark Johnston 	if (dequeue)
3065cd29d0fSMark Johnston 		vm_pagequeue_cnt_add(pq, -ss->bq.bq_cnt);
3075cd29d0fSMark Johnston 	vm_pagequeue_unlock(pq);
3085cd29d0fSMark Johnston }
3095cd29d0fSMark Johnston 
310fee2a2faSMark Johnston /*
311fee2a2faSMark Johnston  * Return the next page to be scanned, or NULL if the scan is complete.
312fee2a2faSMark Johnston  */
3135cd29d0fSMark Johnston static __always_inline vm_page_t
3145cd29d0fSMark Johnston vm_pageout_next(struct scan_state *ss, const bool dequeue)
3155cd29d0fSMark Johnston {
3165cd29d0fSMark Johnston 
3175cd29d0fSMark Johnston 	if (ss->bq.bq_cnt == 0)
3185cd29d0fSMark Johnston 		vm_pageout_collect_batch(ss, dequeue);
3195cd29d0fSMark Johnston 	return (vm_batchqueue_pop(&ss->bq));
3208c616246SKonstantin Belousov }
3218c616246SKonstantin Belousov 
3228c616246SKonstantin Belousov /*
323b7f30bffSMark Johnston  * Determine whether processing of a page should be deferred and ensure that any
324b7f30bffSMark Johnston  * outstanding queue operations are processed.
325b7f30bffSMark Johnston  */
326b7f30bffSMark Johnston static __always_inline bool
327b7f30bffSMark Johnston vm_pageout_defer(vm_page_t m, const uint8_t queue, const bool enqueued)
328b7f30bffSMark Johnston {
329b7f30bffSMark Johnston 	vm_page_astate_t as;
330b7f30bffSMark Johnston 
331b7f30bffSMark Johnston 	as = vm_page_astate_load(m);
332b7f30bffSMark Johnston 	if (__predict_false(as.queue != queue ||
333b7f30bffSMark Johnston 	    ((as.flags & PGA_ENQUEUED) != 0) != enqueued))
334b7f30bffSMark Johnston 		return (true);
335b7f30bffSMark Johnston 	if ((as.flags & PGA_QUEUE_OP_MASK) != 0) {
336b7f30bffSMark Johnston 		vm_page_pqbatch_submit(m, queue);
337b7f30bffSMark Johnston 		return (true);
338b7f30bffSMark Johnston 	}
339b7f30bffSMark Johnston 	return (false);
340b7f30bffSMark Johnston }
341b7f30bffSMark Johnston 
342b7f30bffSMark Johnston /*
343*acb4cb33SDoug Moore  * We can cluster only if the page is not clean, busy, or held, and the page is
344*acb4cb33SDoug Moore  * in the laundry queue.
345*acb4cb33SDoug Moore  */
346*acb4cb33SDoug Moore static bool
347*acb4cb33SDoug Moore vm_pageout_flushable(vm_page_t m)
348*acb4cb33SDoug Moore {
349*acb4cb33SDoug Moore 	if (vm_page_tryxbusy(m) == 0)
350*acb4cb33SDoug Moore 		return (false);
351*acb4cb33SDoug Moore 	if (!vm_page_wired(m)) {
352*acb4cb33SDoug Moore 		vm_page_test_dirty(m);
353*acb4cb33SDoug Moore 		if (m->dirty != 0 && vm_page_in_laundry(m) &&
354*acb4cb33SDoug Moore 		    vm_page_try_remove_write(m))
355*acb4cb33SDoug Moore 			return (true);
356*acb4cb33SDoug Moore 	}
357*acb4cb33SDoug Moore 	vm_page_xunbusy(m);
358*acb4cb33SDoug Moore 	return (false);
359*acb4cb33SDoug Moore }
360*acb4cb33SDoug Moore 
361*acb4cb33SDoug Moore /*
362248fe642SAlan Cox  * Scan for pages at adjacent offsets within the given page's object that are
363248fe642SAlan Cox  * eligible for laundering, form a cluster of these pages and the given page,
364248fe642SAlan Cox  * and launder that cluster.
36526f9a767SRodney W. Grimes  */
3663af76890SPoul-Henning Kamp static int
36734d8b7eaSJeff Roberson vm_pageout_cluster(vm_page_t m)
36824a1cce3SDavid Greenman {
36954d92145SMatthew Dillon 	vm_object_t object;
370*acb4cb33SDoug Moore 	vm_page_t mc[2 * vm_pageout_page_count - 1];
371*acb4cb33SDoug Moore 	int alignment, num_ends, page_base, pageout_count;
37226f9a767SRodney W. Grimes 
37317f6a17bSAlan Cox 	object = m->object;
37489f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
3750cddd8f0SMatthew Dillon 
37663e97555SJeff Roberson 	vm_page_assert_xbusied(m);
3770d94caffSDavid Greenman 
378*acb4cb33SDoug Moore 	alignment = m->pindex % vm_pageout_page_count;
379*acb4cb33SDoug Moore 	num_ends = 0;
3806d86bdf1SDoug Moore 	page_base = nitems(mc) / 2;
381*acb4cb33SDoug Moore 	pageout_count = 1;
382*acb4cb33SDoug Moore 	mc[page_base] = m;
38390ecac61SMatthew Dillon 
38424a1cce3SDavid Greenman 	/*
38590ecac61SMatthew Dillon 	 * During heavy mmap/modification loads the pageout
38690ecac61SMatthew Dillon 	 * daemon can really fragment the underlying file
387248fe642SAlan Cox 	 * due to flushing pages out of order and not trying to
388248fe642SAlan Cox 	 * align the clusters (which leaves sporadic out-of-order
38990ecac61SMatthew Dillon 	 * holes).  To solve this problem we do the reverse scan
39090ecac61SMatthew Dillon 	 * first and attempt to align our cluster, then do a
39190ecac61SMatthew Dillon 	 * forward scan if room remains.
39224a1cce3SDavid Greenman 	 */
39390ecac61SMatthew Dillon more:
394*acb4cb33SDoug Moore 	m = mc[page_base];
395*acb4cb33SDoug Moore 	while (pageout_count < vm_pageout_page_count) {
39624a1cce3SDavid Greenman 		/*
397*acb4cb33SDoug Moore 		 * If we are at an alignment boundary, and haven't reached the
398*acb4cb33SDoug Moore 		 * last flushable page forward, stop here, and switch
399*acb4cb33SDoug Moore 		 * directions.
40024a1cce3SDavid Greenman 		 */
401*acb4cb33SDoug Moore 		if (alignment == pageout_count - 1 && num_ends == 0)
40290ecac61SMatthew Dillon 			break;
40390ecac61SMatthew Dillon 
404*acb4cb33SDoug Moore 		m = vm_page_prev(m);
405*acb4cb33SDoug Moore 		if (m == NULL || !vm_pageout_flushable(m)) {
406*acb4cb33SDoug Moore 			num_ends++;
407*acb4cb33SDoug Moore 			break;
408*acb4cb33SDoug Moore 		}
409*acb4cb33SDoug Moore 		mc[--page_base] = m;
410*acb4cb33SDoug Moore 		++pageout_count;
411*acb4cb33SDoug Moore 	}
412*acb4cb33SDoug Moore 	m = mc[page_base + pageout_count - 1];
413*acb4cb33SDoug Moore 	while (num_ends != 2 && pageout_count < vm_pageout_page_count) {
414*acb4cb33SDoug Moore 		m = vm_page_next(m);
415*acb4cb33SDoug Moore 		if (m == NULL || !vm_pageout_flushable(m)) {
416*acb4cb33SDoug Moore 			if (num_ends++ == 0)
417*acb4cb33SDoug Moore 				/* Resume the reverse scan. */
41890ecac61SMatthew Dillon 				goto more;
419*acb4cb33SDoug Moore 			break;
420*acb4cb33SDoug Moore 		}
421*acb4cb33SDoug Moore 		mc[page_base + pageout_count] = m;
422*acb4cb33SDoug Moore 		++pageout_count;
423*acb4cb33SDoug Moore 	}
424f6b04d2bSDavid Greenman 
42599e6e193SMark Johnston 	return (vm_pageout_flush(&mc[page_base], pageout_count,
42699e6e193SMark Johnston 	    VM_PAGER_PUT_NOREUSE, 0, NULL, NULL));
427aef922f5SJohn Dyson }
428aef922f5SJohn Dyson 
4291c7c3c6aSMatthew Dillon /*
4301c7c3c6aSMatthew Dillon  * vm_pageout_flush() - launder the given pages
4311c7c3c6aSMatthew Dillon  *
4321c7c3c6aSMatthew Dillon  *	The given pages are laundered.  Note that we setup for the start of
4331c7c3c6aSMatthew Dillon  *	I/O ( i.e. busy the page ), mark it read-only, and bump the object
4341c7c3c6aSMatthew Dillon  *	reference count all in here rather then in the parent.  If we want
4351c7c3c6aSMatthew Dillon  *	the parent to do more sophisticated things we may have to change
4361c7c3c6aSMatthew Dillon  *	the ordering.
4371e8a675cSKonstantin Belousov  *
4381e8a675cSKonstantin Belousov  *	Returned runlen is the count of pages between mreq and first
4391e8a675cSKonstantin Belousov  *	page after mreq with status VM_PAGER_AGAIN.
440126d6082SKonstantin Belousov  *	*eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL
441126d6082SKonstantin Belousov  *	for any page in runlen set.
4421c7c3c6aSMatthew Dillon  */
443aef922f5SJohn Dyson int
444126d6082SKonstantin Belousov vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
445126d6082SKonstantin Belousov     boolean_t *eio)
446aef922f5SJohn Dyson {
4472e3b314dSAlan Cox 	vm_object_t object = mc[0]->object;
448aef922f5SJohn Dyson 	int pageout_status[count];
44995461b45SJohn Dyson 	int numpagedout = 0;
4501e8a675cSKonstantin Belousov 	int i, runlen;
451aef922f5SJohn Dyson 
45289f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
4537bec141bSKip Macy 
4541c7c3c6aSMatthew Dillon 	/*
45563e97555SJeff Roberson 	 * Initiate I/O.  Mark the pages shared busy and verify that they're
45663e97555SJeff Roberson 	 * valid and read-only.
4571c7c3c6aSMatthew Dillon 	 *
4581c7c3c6aSMatthew Dillon 	 * We do not have to fixup the clean/dirty bits here... we can
4591c7c3c6aSMatthew Dillon 	 * allow the pager to do it after the I/O completes.
46002fa91d3SMatthew Dillon 	 *
46102fa91d3SMatthew Dillon 	 * NOTE! mc[i]->dirty may be partial or fragmented due to an
46202fa91d3SMatthew Dillon 	 * edge case with file fragments.
4631c7c3c6aSMatthew Dillon 	 */
4648f9110f6SJohn Dyson 	for (i = 0; i < count; i++) {
4650012f373SJeff Roberson 		KASSERT(vm_page_all_valid(mc[i]),
4667a935082SAlan Cox 		    ("vm_pageout_flush: partially invalid page %p index %d/%d",
4677a935082SAlan Cox 			mc[i], i, count));
4685cff1f4dSMark Johnston 		KASSERT((mc[i]->a.flags & PGA_WRITEABLE) == 0,
469aed9aaaaSMark Johnston 		    ("vm_pageout_flush: writeable page %p", mc[i]));
47063e97555SJeff Roberson 		vm_page_busy_downgrade(mc[i]);
4712965a453SKip Macy 	}
472d474eaaaSDoug Rabson 	vm_object_pip_add(object, count);
473aef922f5SJohn Dyson 
474d076fbeaSAlan Cox 	vm_pager_put_pages(object, mc, count, flags, pageout_status);
47526f9a767SRodney W. Grimes 
4761e8a675cSKonstantin Belousov 	runlen = count - mreq;
477126d6082SKonstantin Belousov 	if (eio != NULL)
478126d6082SKonstantin Belousov 		*eio = FALSE;
479aef922f5SJohn Dyson 	for (i = 0; i < count; i++) {
480aef922f5SJohn Dyson 		vm_page_t mt = mc[i];
48124a1cce3SDavid Greenman 
4824cd45723SAlan Cox 		KASSERT(pageout_status[i] == VM_PAGER_PEND ||
4836031c68dSAlan Cox 		    !pmap_page_is_write_mapped(mt),
4849ea8d1a6SAlan Cox 		    ("vm_pageout_flush: page %p is not write protected", mt));
48526f9a767SRodney W. Grimes 		switch (pageout_status[i]) {
48626f9a767SRodney W. Grimes 		case VM_PAGER_OK:
4879f5632e6SMark Johnston 			/*
4889f5632e6SMark Johnston 			 * The page may have moved since laundering started, in
4899f5632e6SMark Johnston 			 * which case it should be left alone.
4909f5632e6SMark Johnston 			 */
491ebcddc72SAlan Cox 			if (vm_page_in_laundry(mt))
492ebcddc72SAlan Cox 				vm_page_deactivate_noreuse(mt);
493ebcddc72SAlan Cox 			/* FALLTHROUGH */
49426f9a767SRodney W. Grimes 		case VM_PAGER_PEND:
49595461b45SJohn Dyson 			numpagedout++;
49626f9a767SRodney W. Grimes 			break;
49726f9a767SRodney W. Grimes 		case VM_PAGER_BAD:
49826f9a767SRodney W. Grimes 			/*
499ebcddc72SAlan Cox 			 * The page is outside the object's range.  We pretend
500ebcddc72SAlan Cox 			 * that the page out worked and clean the page, so the
501ebcddc72SAlan Cox 			 * changes will be lost if the page is reclaimed by
502ebcddc72SAlan Cox 			 * the page daemon.
50326f9a767SRodney W. Grimes 			 */
50490ecac61SMatthew Dillon 			vm_page_undirty(mt);
505ebcddc72SAlan Cox 			if (vm_page_in_laundry(mt))
506ebcddc72SAlan Cox 				vm_page_deactivate_noreuse(mt);
50726f9a767SRodney W. Grimes 			break;
50826f9a767SRodney W. Grimes 		case VM_PAGER_ERROR:
50926f9a767SRodney W. Grimes 		case VM_PAGER_FAIL:
51026f9a767SRodney W. Grimes 			/*
511b1fd102eSMark Johnston 			 * If the page couldn't be paged out to swap because the
512b1fd102eSMark Johnston 			 * pager wasn't able to find space, place the page in
513b1fd102eSMark Johnston 			 * the PQ_UNSWAPPABLE holding queue.  This is an
514b1fd102eSMark Johnston 			 * optimization that prevents the page daemon from
515b1fd102eSMark Johnston 			 * wasting CPU cycles on pages that cannot be reclaimed
516fa7a635fSGordon Bergling 			 * because no swap device is configured.
517b1fd102eSMark Johnston 			 *
518b1fd102eSMark Johnston 			 * Otherwise, reactivate the page so that it doesn't
519b1fd102eSMark Johnston 			 * clog the laundry and inactive queues.  (We will try
520b1fd102eSMark Johnston 			 * paging it out again later.)
52126f9a767SRodney W. Grimes 			 */
5224b8365d7SKonstantin Belousov 			if ((object->flags & OBJ_SWAP) != 0 &&
523b1fd102eSMark Johnston 			    pageout_status[i] == VM_PAGER_FAIL) {
524b1fd102eSMark Johnston 				vm_page_unswappable(mt);
525b1fd102eSMark Johnston 				numpagedout++;
526b1fd102eSMark Johnston 			} else
52724a1cce3SDavid Greenman 				vm_page_activate(mt);
528126d6082SKonstantin Belousov 			if (eio != NULL && i >= mreq && i - mreq < runlen)
529126d6082SKonstantin Belousov 				*eio = TRUE;
53026f9a767SRodney W. Grimes 			break;
53126f9a767SRodney W. Grimes 		case VM_PAGER_AGAIN:
5321e8a675cSKonstantin Belousov 			if (i >= mreq && i - mreq < runlen)
5331e8a675cSKonstantin Belousov 				runlen = i - mreq;
53426f9a767SRodney W. Grimes 			break;
53526f9a767SRodney W. Grimes 		}
53626f9a767SRodney W. Grimes 
53726f9a767SRodney W. Grimes 		/*
5380d94caffSDavid Greenman 		 * If the operation is still going, leave the page busy to
5390d94caffSDavid Greenman 		 * block all other accesses. Also, leave the paging in
5400d94caffSDavid Greenman 		 * progress indicator set so that we don't attempt an object
5410d94caffSDavid Greenman 		 * collapse.
54226f9a767SRodney W. Grimes 		 */
54326f9a767SRodney W. Grimes 		if (pageout_status[i] != VM_PAGER_PEND) {
544f919ebdeSDavid Greenman 			vm_object_pip_wakeup(object);
545c7aebda8SAttilio Rao 			vm_page_sunbusy(mt);
5463c4a2440SAlan Cox 		}
5473c4a2440SAlan Cox 	}
5481e8a675cSKonstantin Belousov 	if (prunlen != NULL)
5491e8a675cSKonstantin Belousov 		*prunlen = runlen;
5503c4a2440SAlan Cox 	return (numpagedout);
55126f9a767SRodney W. Grimes }
55226f9a767SRodney W. Grimes 
553b1fd102eSMark Johnston static void
554b1fd102eSMark Johnston vm_pageout_swapon(void *arg __unused, struct swdevt *sp __unused)
555b1fd102eSMark Johnston {
556b1fd102eSMark Johnston 
557b1fd102eSMark Johnston 	atomic_store_rel_int(&swapdev_enabled, 1);
558b1fd102eSMark Johnston }
559b1fd102eSMark Johnston 
560b1fd102eSMark Johnston static void
561b1fd102eSMark Johnston vm_pageout_swapoff(void *arg __unused, struct swdevt *sp __unused)
562b1fd102eSMark Johnston {
563b1fd102eSMark Johnston 
564b1fd102eSMark Johnston 	if (swap_pager_nswapdev() == 1)
565b1fd102eSMark Johnston 		atomic_store_rel_int(&swapdev_enabled, 0);
566b1fd102eSMark Johnston }
567b1fd102eSMark Johnston 
5681c7c3c6aSMatthew Dillon /*
56934d8b7eaSJeff Roberson  * Attempt to acquire all of the necessary locks to launder a page and
57034d8b7eaSJeff Roberson  * then call through the clustering layer to PUTPAGES.  Wait a short
57134d8b7eaSJeff Roberson  * time for a vnode lock.
57234d8b7eaSJeff Roberson  *
57334d8b7eaSJeff Roberson  * Requires the page and object lock on entry, releases both before return.
57434d8b7eaSJeff Roberson  * Returns 0 on success and an errno otherwise.
57534d8b7eaSJeff Roberson  */
57634d8b7eaSJeff Roberson static int
577ebcddc72SAlan Cox vm_pageout_clean(vm_page_t m, int *numpagedout)
57834d8b7eaSJeff Roberson {
57934d8b7eaSJeff Roberson 	struct vnode *vp;
58034d8b7eaSJeff Roberson 	struct mount *mp;
58134d8b7eaSJeff Roberson 	vm_object_t object;
58234d8b7eaSJeff Roberson 	vm_pindex_t pindex;
5830ef5eee9SKonstantin Belousov 	int error;
58434d8b7eaSJeff Roberson 
58534d8b7eaSJeff Roberson 	object = m->object;
58634d8b7eaSJeff Roberson 	VM_OBJECT_ASSERT_WLOCKED(object);
58734d8b7eaSJeff Roberson 	error = 0;
58834d8b7eaSJeff Roberson 	vp = NULL;
58934d8b7eaSJeff Roberson 	mp = NULL;
59034d8b7eaSJeff Roberson 
59134d8b7eaSJeff Roberson 	/*
59234d8b7eaSJeff Roberson 	 * The object is already known NOT to be dead.   It
59334d8b7eaSJeff Roberson 	 * is possible for the vget() to block the whole
59434d8b7eaSJeff Roberson 	 * pageout daemon, but the new low-memory handling
59534d8b7eaSJeff Roberson 	 * code should prevent it.
59634d8b7eaSJeff Roberson 	 *
59734d8b7eaSJeff Roberson 	 * We can't wait forever for the vnode lock, we might
59834d8b7eaSJeff Roberson 	 * deadlock due to a vn_read() getting stuck in
59934d8b7eaSJeff Roberson 	 * vm_wait while holding this vnode.  We skip the
60034d8b7eaSJeff Roberson 	 * vnode if we can't get it in a reasonable amount
60134d8b7eaSJeff Roberson 	 * of time.
60234d8b7eaSJeff Roberson 	 */
60334d8b7eaSJeff Roberson 	if (object->type == OBJT_VNODE) {
60463e97555SJeff Roberson 		vm_page_xunbusy(m);
60534d8b7eaSJeff Roberson 		vp = object->handle;
60634d8b7eaSJeff Roberson 		if (vp->v_type == VREG &&
60734d8b7eaSJeff Roberson 		    vn_start_write(vp, &mp, V_NOWAIT) != 0) {
60834d8b7eaSJeff Roberson 			mp = NULL;
60934d8b7eaSJeff Roberson 			error = EDEADLK;
61034d8b7eaSJeff Roberson 			goto unlock_all;
61134d8b7eaSJeff Roberson 		}
61234d8b7eaSJeff Roberson 		KASSERT(mp != NULL,
61334d8b7eaSJeff Roberson 		    ("vp %p with NULL v_mount", vp));
61434d8b7eaSJeff Roberson 		vm_object_reference_locked(object);
61534d8b7eaSJeff Roberson 		pindex = m->pindex;
61634d8b7eaSJeff Roberson 		VM_OBJECT_WUNLOCK(object);
6170ef5eee9SKonstantin Belousov 		if (vget(vp, vn_lktype_write(NULL, vp) | LK_TIMELOCK) != 0) {
61834d8b7eaSJeff Roberson 			vp = NULL;
61934d8b7eaSJeff Roberson 			error = EDEADLK;
62034d8b7eaSJeff Roberson 			goto unlock_mp;
62134d8b7eaSJeff Roberson 		}
62234d8b7eaSJeff Roberson 		VM_OBJECT_WLOCK(object);
62357cd81a3SMark Johnston 
62457cd81a3SMark Johnston 		/*
62557cd81a3SMark Johnston 		 * Ensure that the object and vnode were not disassociated
62657cd81a3SMark Johnston 		 * while locks were dropped.
62757cd81a3SMark Johnston 		 */
62857cd81a3SMark Johnston 		if (vp->v_object != object) {
62957cd81a3SMark Johnston 			error = ENOENT;
63057cd81a3SMark Johnston 			goto unlock_all;
63157cd81a3SMark Johnston 		}
63257cd81a3SMark Johnston 
63334d8b7eaSJeff Roberson 		/*
6349f5632e6SMark Johnston 		 * While the object was unlocked, the page may have been:
63534d8b7eaSJeff Roberson 		 * (1) moved to a different queue,
63634d8b7eaSJeff Roberson 		 * (2) reallocated to a different object,
63734d8b7eaSJeff Roberson 		 * (3) reallocated to a different offset, or
63834d8b7eaSJeff Roberson 		 * (4) cleaned.
63934d8b7eaSJeff Roberson 		 */
640ebcddc72SAlan Cox 		if (!vm_page_in_laundry(m) || m->object != object ||
64134d8b7eaSJeff Roberson 		    m->pindex != pindex || m->dirty == 0) {
64234d8b7eaSJeff Roberson 			error = ENXIO;
64334d8b7eaSJeff Roberson 			goto unlock_all;
64434d8b7eaSJeff Roberson 		}
64534d8b7eaSJeff Roberson 
64634d8b7eaSJeff Roberson 		/*
6479f5632e6SMark Johnston 		 * The page may have been busied while the object lock was
6489f5632e6SMark Johnston 		 * released.
64934d8b7eaSJeff Roberson 		 */
65063e97555SJeff Roberson 		if (vm_page_tryxbusy(m) == 0) {
65134d8b7eaSJeff Roberson 			error = EBUSY;
65234d8b7eaSJeff Roberson 			goto unlock_all;
65334d8b7eaSJeff Roberson 		}
65434d8b7eaSJeff Roberson 	}
65534d8b7eaSJeff Roberson 
65634d8b7eaSJeff Roberson 	/*
657fee2a2faSMark Johnston 	 * Remove all writeable mappings, failing if the page is wired.
658fee2a2faSMark Johnston 	 */
659fee2a2faSMark Johnston 	if (!vm_page_try_remove_write(m)) {
66063e97555SJeff Roberson 		vm_page_xunbusy(m);
661fee2a2faSMark Johnston 		error = EBUSY;
662fee2a2faSMark Johnston 		goto unlock_all;
663fee2a2faSMark Johnston 	}
664fee2a2faSMark Johnston 
665fee2a2faSMark Johnston 	/*
66634d8b7eaSJeff Roberson 	 * If a page is dirty, then it is either being washed
66734d8b7eaSJeff Roberson 	 * (but not yet cleaned) or it is still in the
66834d8b7eaSJeff Roberson 	 * laundry.  If it is still in the laundry, then we
66934d8b7eaSJeff Roberson 	 * start the cleaning operation.
67034d8b7eaSJeff Roberson 	 */
671ebcddc72SAlan Cox 	if ((*numpagedout = vm_pageout_cluster(m)) == 0)
67234d8b7eaSJeff Roberson 		error = EIO;
67334d8b7eaSJeff Roberson 
67434d8b7eaSJeff Roberson unlock_all:
67534d8b7eaSJeff Roberson 	VM_OBJECT_WUNLOCK(object);
67634d8b7eaSJeff Roberson 
67734d8b7eaSJeff Roberson unlock_mp:
67834d8b7eaSJeff Roberson 	if (mp != NULL) {
67934d8b7eaSJeff Roberson 		if (vp != NULL)
68034d8b7eaSJeff Roberson 			vput(vp);
68134d8b7eaSJeff Roberson 		vm_object_deallocate(object);
68234d8b7eaSJeff Roberson 		vn_finished_write(mp);
68334d8b7eaSJeff Roberson 	}
68434d8b7eaSJeff Roberson 
68534d8b7eaSJeff Roberson 	return (error);
68634d8b7eaSJeff Roberson }
68734d8b7eaSJeff Roberson 
68834d8b7eaSJeff Roberson /*
689ebcddc72SAlan Cox  * Attempt to launder the specified number of pages.
690ebcddc72SAlan Cox  *
691ebcddc72SAlan Cox  * Returns the number of pages successfully laundered.
692ebcddc72SAlan Cox  */
693ebcddc72SAlan Cox static int
694ebcddc72SAlan Cox vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
695ebcddc72SAlan Cox {
6965cd29d0fSMark Johnston 	struct scan_state ss;
697ebcddc72SAlan Cox 	struct vm_pagequeue *pq;
698ebcddc72SAlan Cox 	vm_object_t object;
6995cd29d0fSMark Johnston 	vm_page_t m, marker;
700f3f38e25SMark Johnston 	vm_page_astate_t new, old;
701f3f38e25SMark Johnston 	int act_delta, error, numpagedout, queue, refs, starting_target;
702ebcddc72SAlan Cox 	int vnodes_skipped;
70360256604SMark Johnston 	bool pageout_ok;
704ebcddc72SAlan Cox 
7055cd29d0fSMark Johnston 	object = NULL;
706ebcddc72SAlan Cox 	starting_target = launder;
707ebcddc72SAlan Cox 	vnodes_skipped = 0;
708ebcddc72SAlan Cox 
709ebcddc72SAlan Cox 	/*
710b1fd102eSMark Johnston 	 * Scan the laundry queues for pages eligible to be laundered.  We stop
711ebcddc72SAlan Cox 	 * once the target number of dirty pages have been laundered, or once
712ebcddc72SAlan Cox 	 * we've reached the end of the queue.  A single iteration of this loop
713ebcddc72SAlan Cox 	 * may cause more than one page to be laundered because of clustering.
714ebcddc72SAlan Cox 	 *
715b1fd102eSMark Johnston 	 * As an optimization, we avoid laundering from PQ_UNSWAPPABLE when no
716b1fd102eSMark Johnston 	 * swap devices are configured.
717ebcddc72SAlan Cox 	 */
718b1fd102eSMark Johnston 	if (atomic_load_acq_int(&swapdev_enabled))
71964b38930SMark Johnston 		queue = PQ_UNSWAPPABLE;
720b1fd102eSMark Johnston 	else
72164b38930SMark Johnston 		queue = PQ_LAUNDRY;
722ebcddc72SAlan Cox 
723b1fd102eSMark Johnston scan:
72464b38930SMark Johnston 	marker = &vmd->vmd_markers[queue];
7255cd29d0fSMark Johnston 	pq = &vmd->vmd_pagequeues[queue];
726ebcddc72SAlan Cox 	vm_pagequeue_lock(pq);
7275cd29d0fSMark Johnston 	vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt);
7285cd29d0fSMark Johnston 	while (launder > 0 && (m = vm_pageout_next(&ss, false)) != NULL) {
7295cd29d0fSMark Johnston 		if (__predict_false((m->flags & PG_MARKER) != 0))
730ebcddc72SAlan Cox 			continue;
7315cd29d0fSMark Johnston 
7325cd29d0fSMark Johnston 		/*
733b7f30bffSMark Johnston 		 * Don't touch a page that was removed from the queue after the
734b7f30bffSMark Johnston 		 * page queue lock was released.  Otherwise, ensure that any
735b7f30bffSMark Johnston 		 * pending queue operations, such as dequeues for wired pages,
736b7f30bffSMark Johnston 		 * are handled.
7375cd29d0fSMark Johnston 		 */
738b7f30bffSMark Johnston 		if (vm_pageout_defer(m, queue, true))
739ebcddc72SAlan Cox 			continue;
740e8bcf696SMark Johnston 
7419f5632e6SMark Johnston 		/*
7429f5632e6SMark Johnston 		 * Lock the page's object.
7439f5632e6SMark Johnston 		 */
7449f5632e6SMark Johnston 		if (object == NULL || object != m->object) {
74560256604SMark Johnston 			if (object != NULL)
7465cd29d0fSMark Johnston 				VM_OBJECT_WUNLOCK(object);
74723ed568cSMateusz Guzik 			object = atomic_load_ptr(&m->object);
7489f5632e6SMark Johnston 			if (__predict_false(object == NULL))
7499f5632e6SMark Johnston 				/* The page is being freed by another thread. */
7509f5632e6SMark Johnston 				continue;
7519f5632e6SMark Johnston 
752e8bcf696SMark Johnston 			/* Depends on type-stability. */
75341fd4b94SMark Johnston 			VM_OBJECT_WLOCK(object);
7549f5632e6SMark Johnston 			if (__predict_false(m->object != object)) {
7559f5632e6SMark Johnston 				VM_OBJECT_WUNLOCK(object);
7569f5632e6SMark Johnston 				object = NULL;
75741fd4b94SMark Johnston 				continue;
7589f5632e6SMark Johnston 			}
7599f5632e6SMark Johnston 		}
7605cd29d0fSMark Johnston 
76163e97555SJeff Roberson 		if (vm_page_tryxbusy(m) == 0)
7625cd29d0fSMark Johnston 			continue;
763ebcddc72SAlan Cox 
764ebcddc72SAlan Cox 		/*
765b7f30bffSMark Johnston 		 * Check for wirings now that we hold the object lock and have
7669f5632e6SMark Johnston 		 * exclusively busied the page.  If the page is mapped, it may
7679f5632e6SMark Johnston 		 * still be wired by pmap lookups.  The call to
768fee2a2faSMark Johnston 		 * vm_page_try_remove_all() below atomically checks for such
769fee2a2faSMark Johnston 		 * wirings and removes mappings.  If the page is unmapped, the
7709f5632e6SMark Johnston 		 * wire count is guaranteed not to increase after this check.
771fee2a2faSMark Johnston 		 */
7729f5632e6SMark Johnston 		if (__predict_false(vm_page_wired(m)))
773f3f38e25SMark Johnston 			goto skip_page;
774fee2a2faSMark Johnston 
775fee2a2faSMark Johnston 		/*
776ebcddc72SAlan Cox 		 * Invalid pages can be easily freed.  They cannot be
777ebcddc72SAlan Cox 		 * mapped; vm_page_free() asserts this.
778ebcddc72SAlan Cox 		 */
7790012f373SJeff Roberson 		if (vm_page_none_valid(m))
780ebcddc72SAlan Cox 			goto free_page;
781ebcddc72SAlan Cox 
782b51927b7SKonstantin Belousov 		refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
783f3f38e25SMark Johnston 
784f3f38e25SMark Johnston 		for (old = vm_page_astate_load(m);;) {
785ebcddc72SAlan Cox 			/*
786f3f38e25SMark Johnston 			 * Check to see if the page has been removed from the
787f3f38e25SMark Johnston 			 * queue since the first such check.  Leave it alone if
788f3f38e25SMark Johnston 			 * so, discarding any references collected by
789f3f38e25SMark Johnston 			 * pmap_ts_referenced().
790ebcddc72SAlan Cox 			 */
791f3f38e25SMark Johnston 			if (__predict_false(_vm_page_queue(old) == PQ_NONE))
792f3f38e25SMark Johnston 				goto skip_page;
793f3f38e25SMark Johnston 
794f3f38e25SMark Johnston 			new = old;
795f3f38e25SMark Johnston 			act_delta = refs;
796f3f38e25SMark Johnston 			if ((old.flags & PGA_REFERENCED) != 0) {
797f3f38e25SMark Johnston 				new.flags &= ~PGA_REFERENCED;
798d7aeb429SAlan Cox 				act_delta++;
799ebcddc72SAlan Cox 			}
800f3f38e25SMark Johnston 			if (act_delta == 0) {
801f3f38e25SMark Johnston 				;
802b51927b7SKonstantin Belousov 			} else if (object->ref_count != 0) {
803ebcddc72SAlan Cox 				/*
804f3f38e25SMark Johnston 				 * Increase the activation count if the page was
805f3f38e25SMark Johnston 				 * referenced while in the laundry queue.  This
806f3f38e25SMark Johnston 				 * makes it less likely that the page will be
807f3f38e25SMark Johnston 				 * returned prematurely to the laundry queue.
808e8bcf696SMark Johnston 				 */
809f3f38e25SMark Johnston 				new.act_count += ACT_ADVANCE +
810f3f38e25SMark Johnston 				    act_delta;
811f3f38e25SMark Johnston 				if (new.act_count > ACT_MAX)
812f3f38e25SMark Johnston 					new.act_count = ACT_MAX;
813f3f38e25SMark Johnston 
814f7607c30SMark Johnston 				new.flags &= ~PGA_QUEUE_OP_MASK;
815f3f38e25SMark Johnston 				new.flags |= PGA_REQUEUE;
816f3f38e25SMark Johnston 				new.queue = PQ_ACTIVE;
817f3f38e25SMark Johnston 				if (!vm_page_pqstate_commit(m, &old, new))
818f3f38e25SMark Johnston 					continue;
819e8bcf696SMark Johnston 
820e8bcf696SMark Johnston 				/*
821e8bcf696SMark Johnston 				 * If this was a background laundering, count
822e8bcf696SMark Johnston 				 * activated pages towards our target.  The
823e8bcf696SMark Johnston 				 * purpose of background laundering is to ensure
824e8bcf696SMark Johnston 				 * that pages are eventually cycled through the
825e8bcf696SMark Johnston 				 * laundry queue, and an activation is a valid
826e8bcf696SMark Johnston 				 * way out.
827ebcddc72SAlan Cox 				 */
828ebcddc72SAlan Cox 				if (!in_shortfall)
829ebcddc72SAlan Cox 					launder--;
830f3f38e25SMark Johnston 				VM_CNT_INC(v_reactivated);
831f3f38e25SMark Johnston 				goto skip_page;
8325cd29d0fSMark Johnston 			} else if ((object->flags & OBJ_DEAD) == 0) {
833f3f38e25SMark Johnston 				new.flags |= PGA_REQUEUE;
834f3f38e25SMark Johnston 				if (!vm_page_pqstate_commit(m, &old, new))
835e8bcf696SMark Johnston 					continue;
836f3f38e25SMark Johnston 				goto skip_page;
8375cd29d0fSMark Johnston 			}
838f3f38e25SMark Johnston 			break;
839ebcddc72SAlan Cox 		}
840ebcddc72SAlan Cox 
841ebcddc72SAlan Cox 		/*
842ebcddc72SAlan Cox 		 * If the page appears to be clean at the machine-independent
843ebcddc72SAlan Cox 		 * layer, then remove all of its mappings from the pmap in
844ebcddc72SAlan Cox 		 * anticipation of freeing it.  If, however, any of the page's
845ebcddc72SAlan Cox 		 * mappings allow write access, then the page may still be
846ebcddc72SAlan Cox 		 * modified until the last of those mappings are removed.
847ebcddc72SAlan Cox 		 */
848ebcddc72SAlan Cox 		if (object->ref_count != 0) {
849ebcddc72SAlan Cox 			vm_page_test_dirty(m);
8509f5632e6SMark Johnston 			if (m->dirty == 0 && !vm_page_try_remove_all(m))
851f3f38e25SMark Johnston 				goto skip_page;
852fee2a2faSMark Johnston 		}
853ebcddc72SAlan Cox 
854ebcddc72SAlan Cox 		/*
855ebcddc72SAlan Cox 		 * Clean pages are freed, and dirty pages are paged out unless
856ebcddc72SAlan Cox 		 * they belong to a dead object.  Requeueing dirty pages from
857ebcddc72SAlan Cox 		 * dead objects is pointless, as they are being paged out and
858ebcddc72SAlan Cox 		 * freed by the thread that destroyed the object.
859ebcddc72SAlan Cox 		 */
860ebcddc72SAlan Cox 		if (m->dirty == 0) {
861ebcddc72SAlan Cox free_page:
8629f5632e6SMark Johnston 			/*
8639f5632e6SMark Johnston 			 * Now we are guaranteed that no other threads are
8649f5632e6SMark Johnston 			 * manipulating the page, check for a last-second
8659f5632e6SMark Johnston 			 * reference.
8669f5632e6SMark Johnston 			 */
8679f5632e6SMark Johnston 			if (vm_pageout_defer(m, queue, true))
8689f5632e6SMark Johnston 				goto skip_page;
869ebcddc72SAlan Cox 			vm_page_free(m);
87083c9dea1SGleb Smirnoff 			VM_CNT_INC(v_dfree);
871ebcddc72SAlan Cox 		} else if ((object->flags & OBJ_DEAD) == 0) {
8720cb2610eSMark Johnston 			if ((object->flags & OBJ_SWAP) != 0)
8730cb2610eSMark Johnston 				pageout_ok = disable_swap_pageouts == 0;
874ebcddc72SAlan Cox 			else
875ebcddc72SAlan Cox 				pageout_ok = true;
876ebcddc72SAlan Cox 			if (!pageout_ok) {
877f3f38e25SMark Johnston 				vm_page_launder(m);
878f3f38e25SMark Johnston 				goto skip_page;
879ebcddc72SAlan Cox 			}
880ebcddc72SAlan Cox 
881ebcddc72SAlan Cox 			/*
882ebcddc72SAlan Cox 			 * Form a cluster with adjacent, dirty pages from the
883ebcddc72SAlan Cox 			 * same object, and page out that entire cluster.
884ebcddc72SAlan Cox 			 *
885ebcddc72SAlan Cox 			 * The adjacent, dirty pages must also be in the
886ebcddc72SAlan Cox 			 * laundry.  However, their mappings are not checked
887ebcddc72SAlan Cox 			 * for new references.  Consequently, a recently
888ebcddc72SAlan Cox 			 * referenced page may be paged out.  However, that
889ebcddc72SAlan Cox 			 * page will not be prematurely reclaimed.  After page
890ebcddc72SAlan Cox 			 * out, the page will be placed in the inactive queue,
891ebcddc72SAlan Cox 			 * where any new references will be detected and the
892ebcddc72SAlan Cox 			 * page reactivated.
893ebcddc72SAlan Cox 			 */
894ebcddc72SAlan Cox 			error = vm_pageout_clean(m, &numpagedout);
895ebcddc72SAlan Cox 			if (error == 0) {
896ebcddc72SAlan Cox 				launder -= numpagedout;
8975cd29d0fSMark Johnston 				ss.scanned += numpagedout;
898ebcddc72SAlan Cox 			} else if (error == EDEADLK) {
899ebcddc72SAlan Cox 				pageout_lock_miss++;
900ebcddc72SAlan Cox 				vnodes_skipped++;
901ebcddc72SAlan Cox 			}
90260256604SMark Johnston 			object = NULL;
903f3f38e25SMark Johnston 		} else {
904f3f38e25SMark Johnston skip_page:
90563e97555SJeff Roberson 			vm_page_xunbusy(m);
906e8bcf696SMark Johnston 		}
907f3f38e25SMark Johnston 	}
90846e39081SMark Johnston 	if (object != NULL) {
909ebcddc72SAlan Cox 		VM_OBJECT_WUNLOCK(object);
91046e39081SMark Johnston 		object = NULL;
91146e39081SMark Johnston 	}
912ebcddc72SAlan Cox 	vm_pagequeue_lock(pq);
9135cd29d0fSMark Johnston 	vm_pageout_end_scan(&ss);
914ebcddc72SAlan Cox 	vm_pagequeue_unlock(pq);
915ebcddc72SAlan Cox 
91664b38930SMark Johnston 	if (launder > 0 && queue == PQ_UNSWAPPABLE) {
91764b38930SMark Johnston 		queue = PQ_LAUNDRY;
918b1fd102eSMark Johnston 		goto scan;
919b1fd102eSMark Johnston 	}
920b1fd102eSMark Johnston 
921ebcddc72SAlan Cox 	/*
922ebcddc72SAlan Cox 	 * Wakeup the sync daemon if we skipped a vnode in a writeable object
923ebcddc72SAlan Cox 	 * and we didn't launder enough pages.
924ebcddc72SAlan Cox 	 */
925ebcddc72SAlan Cox 	if (vnodes_skipped > 0 && launder > 0)
926ebcddc72SAlan Cox 		(void)speedup_syncer();
927ebcddc72SAlan Cox 
928ebcddc72SAlan Cox 	return (starting_target - launder);
929ebcddc72SAlan Cox }
930ebcddc72SAlan Cox 
931ebcddc72SAlan Cox /*
932ebcddc72SAlan Cox  * Compute the integer square root.
933ebcddc72SAlan Cox  */
934ebcddc72SAlan Cox static u_int
935ebcddc72SAlan Cox isqrt(u_int num)
936ebcddc72SAlan Cox {
937ebcddc72SAlan Cox 	u_int bit, root, tmp;
938ebcddc72SAlan Cox 
93964f8d257SDoug Moore 	bit = num != 0 ? (1u << ((fls(num) - 1) & ~1)) : 0;
940ebcddc72SAlan Cox 	root = 0;
941ebcddc72SAlan Cox 	while (bit != 0) {
942ebcddc72SAlan Cox 		tmp = root + bit;
943ebcddc72SAlan Cox 		root >>= 1;
944ebcddc72SAlan Cox 		if (num >= tmp) {
945ebcddc72SAlan Cox 			num -= tmp;
946ebcddc72SAlan Cox 			root += bit;
947ebcddc72SAlan Cox 		}
948ebcddc72SAlan Cox 		bit >>= 2;
949ebcddc72SAlan Cox 	}
950ebcddc72SAlan Cox 	return (root);
951ebcddc72SAlan Cox }
952ebcddc72SAlan Cox 
953ebcddc72SAlan Cox /*
954ebcddc72SAlan Cox  * Perform the work of the laundry thread: periodically wake up and determine
955ebcddc72SAlan Cox  * whether any pages need to be laundered.  If so, determine the number of pages
956ebcddc72SAlan Cox  * that need to be laundered, and launder them.
957ebcddc72SAlan Cox  */
958ebcddc72SAlan Cox static void
959ebcddc72SAlan Cox vm_pageout_laundry_worker(void *arg)
960ebcddc72SAlan Cox {
961e2068d0bSJeff Roberson 	struct vm_domain *vmd;
962ebcddc72SAlan Cox 	struct vm_pagequeue *pq;
96360684862SMark Johnston 	uint64_t nclean, ndirty, nfreed;
964e2068d0bSJeff Roberson 	int domain, last_target, launder, shortfall, shortfall_cycle, target;
965ebcddc72SAlan Cox 	bool in_shortfall;
966ebcddc72SAlan Cox 
967e2068d0bSJeff Roberson 	domain = (uintptr_t)arg;
968e2068d0bSJeff Roberson 	vmd = VM_DOMAIN(domain);
969e2068d0bSJeff Roberson 	pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
970e2068d0bSJeff Roberson 	KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
971ebcddc72SAlan Cox 
972ebcddc72SAlan Cox 	shortfall = 0;
973ebcddc72SAlan Cox 	in_shortfall = false;
974ebcddc72SAlan Cox 	shortfall_cycle = 0;
9758002c3a4SMark Johnston 	last_target = target = 0;
97660684862SMark Johnston 	nfreed = 0;
977ebcddc72SAlan Cox 
978ebcddc72SAlan Cox 	/*
979b1fd102eSMark Johnston 	 * Calls to these handlers are serialized by the swap syscall lock.
980b1fd102eSMark Johnston 	 */
981e2068d0bSJeff Roberson 	(void)EVENTHANDLER_REGISTER(swapon, vm_pageout_swapon, vmd,
982b1fd102eSMark Johnston 	    EVENTHANDLER_PRI_ANY);
983e2068d0bSJeff Roberson 	(void)EVENTHANDLER_REGISTER(swapoff, vm_pageout_swapoff, vmd,
984b1fd102eSMark Johnston 	    EVENTHANDLER_PRI_ANY);
985b1fd102eSMark Johnston 
986b1fd102eSMark Johnston 	/*
987ebcddc72SAlan Cox 	 * The pageout laundry worker is never done, so loop forever.
988ebcddc72SAlan Cox 	 */
989ebcddc72SAlan Cox 	for (;;) {
990ebcddc72SAlan Cox 		KASSERT(target >= 0, ("negative target %d", target));
991ebcddc72SAlan Cox 		KASSERT(shortfall_cycle >= 0,
992ebcddc72SAlan Cox 		    ("negative cycle %d", shortfall_cycle));
993ebcddc72SAlan Cox 		launder = 0;
994ebcddc72SAlan Cox 
995ebcddc72SAlan Cox 		/*
996ebcddc72SAlan Cox 		 * First determine whether we need to launder pages to meet a
997ebcddc72SAlan Cox 		 * shortage of free pages.
998ebcddc72SAlan Cox 		 */
999ebcddc72SAlan Cox 		if (shortfall > 0) {
1000ebcddc72SAlan Cox 			in_shortfall = true;
1001ebcddc72SAlan Cox 			shortfall_cycle = VM_LAUNDER_RATE / VM_INACT_SCAN_RATE;
1002ebcddc72SAlan Cox 			target = shortfall;
1003ebcddc72SAlan Cox 		} else if (!in_shortfall)
1004ebcddc72SAlan Cox 			goto trybackground;
1005e2068d0bSJeff Roberson 		else if (shortfall_cycle == 0 || vm_laundry_target(vmd) <= 0) {
1006ebcddc72SAlan Cox 			/*
1007ebcddc72SAlan Cox 			 * We recently entered shortfall and began laundering
1008ebcddc72SAlan Cox 			 * pages.  If we have completed that laundering run
1009ebcddc72SAlan Cox 			 * (and we are no longer in shortfall) or we have met
1010ebcddc72SAlan Cox 			 * our laundry target through other activity, then we
1011ebcddc72SAlan Cox 			 * can stop laundering pages.
1012ebcddc72SAlan Cox 			 */
1013ebcddc72SAlan Cox 			in_shortfall = false;
1014ebcddc72SAlan Cox 			target = 0;
1015ebcddc72SAlan Cox 			goto trybackground;
1016ebcddc72SAlan Cox 		}
1017ebcddc72SAlan Cox 		launder = target / shortfall_cycle--;
1018ebcddc72SAlan Cox 		goto dolaundry;
1019ebcddc72SAlan Cox 
1020ebcddc72SAlan Cox 		/*
1021ebcddc72SAlan Cox 		 * There's no immediate need to launder any pages; see if we
1022ebcddc72SAlan Cox 		 * meet the conditions to perform background laundering:
1023ebcddc72SAlan Cox 		 *
1024ebcddc72SAlan Cox 		 * 1. The ratio of dirty to clean inactive pages exceeds the
102560684862SMark Johnston 		 *    background laundering threshold, or
1026ebcddc72SAlan Cox 		 * 2. we haven't yet reached the target of the current
1027ebcddc72SAlan Cox 		 *    background laundering run.
1028ebcddc72SAlan Cox 		 *
1029ebcddc72SAlan Cox 		 * The background laundering threshold is not a constant.
1030ebcddc72SAlan Cox 		 * Instead, it is a slowly growing function of the number of
103160684862SMark Johnston 		 * clean pages freed by the page daemon since the last
103260684862SMark Johnston 		 * background laundering.  Thus, as the ratio of dirty to
103360684862SMark Johnston 		 * clean inactive pages grows, the amount of memory pressure
1034c098768eSMark Johnston 		 * required to trigger laundering decreases.  We ensure
1035c098768eSMark Johnston 		 * that the threshold is non-zero after an inactive queue
1036c098768eSMark Johnston 		 * scan, even if that scan failed to free a single clean page.
1037ebcddc72SAlan Cox 		 */
1038ebcddc72SAlan Cox trybackground:
1039e2068d0bSJeff Roberson 		nclean = vmd->vmd_free_count +
1040e2068d0bSJeff Roberson 		    vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt;
1041e2068d0bSJeff Roberson 		ndirty = vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt;
1042c098768eSMark Johnston 		if (target == 0 && ndirty * isqrt(howmany(nfreed + 1,
1043c098768eSMark Johnston 		    vmd->vmd_free_target - vmd->vmd_free_min)) >= nclean) {
1044e2068d0bSJeff Roberson 			target = vmd->vmd_background_launder_target;
1045ebcddc72SAlan Cox 		}
1046ebcddc72SAlan Cox 
1047ebcddc72SAlan Cox 		/*
1048ebcddc72SAlan Cox 		 * We have a non-zero background laundering target.  If we've
1049ebcddc72SAlan Cox 		 * laundered up to our maximum without observing a page daemon
1050cb35676eSMark Johnston 		 * request, just stop.  This is a safety belt that ensures we
1051ebcddc72SAlan Cox 		 * don't launder an excessive amount if memory pressure is low
1052ebcddc72SAlan Cox 		 * and the ratio of dirty to clean pages is large.  Otherwise,
1053ebcddc72SAlan Cox 		 * proceed at the background laundering rate.
1054ebcddc72SAlan Cox 		 */
1055ebcddc72SAlan Cox 		if (target > 0) {
105660684862SMark Johnston 			if (nfreed > 0) {
105760684862SMark Johnston 				nfreed = 0;
1058ebcddc72SAlan Cox 				last_target = target;
1059ebcddc72SAlan Cox 			} else if (last_target - target >=
1060ebcddc72SAlan Cox 			    vm_background_launder_max * PAGE_SIZE / 1024) {
1061ebcddc72SAlan Cox 				target = 0;
1062ebcddc72SAlan Cox 			}
1063ebcddc72SAlan Cox 			launder = vm_background_launder_rate * PAGE_SIZE / 1024;
1064ebcddc72SAlan Cox 			launder /= VM_LAUNDER_RATE;
1065ebcddc72SAlan Cox 			if (launder > target)
1066ebcddc72SAlan Cox 				launder = target;
1067ebcddc72SAlan Cox 		}
1068ebcddc72SAlan Cox 
1069ebcddc72SAlan Cox dolaundry:
1070ebcddc72SAlan Cox 		if (launder > 0) {
1071ebcddc72SAlan Cox 			/*
1072ebcddc72SAlan Cox 			 * Because of I/O clustering, the number of laundered
1073ebcddc72SAlan Cox 			 * pages could exceed "target" by the maximum size of
1074ebcddc72SAlan Cox 			 * a cluster minus one.
1075ebcddc72SAlan Cox 			 */
1076e2068d0bSJeff Roberson 			target -= min(vm_pageout_launder(vmd, launder,
1077ebcddc72SAlan Cox 			    in_shortfall), target);
1078ebcddc72SAlan Cox 			pause("laundp", hz / VM_LAUNDER_RATE);
1079ebcddc72SAlan Cox 		}
1080ebcddc72SAlan Cox 
1081ebcddc72SAlan Cox 		/*
1082ebcddc72SAlan Cox 		 * If we're not currently laundering pages and the page daemon
1083ebcddc72SAlan Cox 		 * hasn't posted a new request, sleep until the page daemon
1084ebcddc72SAlan Cox 		 * kicks us.
1085ebcddc72SAlan Cox 		 */
1086ebcddc72SAlan Cox 		vm_pagequeue_lock(pq);
1087e2068d0bSJeff Roberson 		if (target == 0 && vmd->vmd_laundry_request == VM_LAUNDRY_IDLE)
1088e2068d0bSJeff Roberson 			(void)mtx_sleep(&vmd->vmd_laundry_request,
1089ebcddc72SAlan Cox 			    vm_pagequeue_lockptr(pq), PVM, "launds", 0);
1090ebcddc72SAlan Cox 
1091ebcddc72SAlan Cox 		/*
1092ebcddc72SAlan Cox 		 * If the pagedaemon has indicated that it's in shortfall, start
1093ebcddc72SAlan Cox 		 * a shortfall laundering unless we're already in the middle of
1094ebcddc72SAlan Cox 		 * one.  This may preempt a background laundering.
1095ebcddc72SAlan Cox 		 */
1096e2068d0bSJeff Roberson 		if (vmd->vmd_laundry_request == VM_LAUNDRY_SHORTFALL &&
1097ebcddc72SAlan Cox 		    (!in_shortfall || shortfall_cycle == 0)) {
1098e2068d0bSJeff Roberson 			shortfall = vm_laundry_target(vmd) +
1099e2068d0bSJeff Roberson 			    vmd->vmd_pageout_deficit;
1100ebcddc72SAlan Cox 			target = 0;
1101ebcddc72SAlan Cox 		} else
1102ebcddc72SAlan Cox 			shortfall = 0;
1103ebcddc72SAlan Cox 
1104ebcddc72SAlan Cox 		if (target == 0)
1105e2068d0bSJeff Roberson 			vmd->vmd_laundry_request = VM_LAUNDRY_IDLE;
110660684862SMark Johnston 		nfreed += vmd->vmd_clean_pages_freed;
110760684862SMark Johnston 		vmd->vmd_clean_pages_freed = 0;
1108ebcddc72SAlan Cox 		vm_pagequeue_unlock(pq);
1109ebcddc72SAlan Cox 	}
1110ebcddc72SAlan Cox }
1111ebcddc72SAlan Cox 
1112be37ee79SMark Johnston /*
1113be37ee79SMark Johnston  * Compute the number of pages we want to try to move from the
1114be37ee79SMark Johnston  * active queue to either the inactive or laundry queue.
1115be37ee79SMark Johnston  *
11167bb4634eSMark Johnston  * When scanning active pages during a shortage, we make clean pages
11177bb4634eSMark Johnston  * count more heavily towards the page shortage than dirty pages.
11187bb4634eSMark Johnston  * This is because dirty pages must be laundered before they can be
11197bb4634eSMark Johnston  * reused and thus have less utility when attempting to quickly
11207bb4634eSMark Johnston  * alleviate a free page shortage.  However, this weighting also
11217bb4634eSMark Johnston  * causes the scan to deactivate dirty pages more aggressively,
11227bb4634eSMark Johnston  * improving the effectiveness of clustering.
1123be37ee79SMark Johnston  */
1124be37ee79SMark Johnston static int
11257bb4634eSMark Johnston vm_pageout_active_target(struct vm_domain *vmd)
1126be37ee79SMark Johnston {
1127be37ee79SMark Johnston 	int shortage;
1128be37ee79SMark Johnston 
1129be37ee79SMark Johnston 	shortage = vmd->vmd_inactive_target + vm_paging_target(vmd) -
1130be37ee79SMark Johnston 	    (vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt +
1131be37ee79SMark Johnston 	    vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt / act_scan_laundry_weight);
1132be37ee79SMark Johnston 	shortage *= act_scan_laundry_weight;
1133be37ee79SMark Johnston 	return (shortage);
1134be37ee79SMark Johnston }
1135be37ee79SMark Johnston 
1136be37ee79SMark Johnston /*
1137be37ee79SMark Johnston  * Scan the active queue.  If there is no shortage of inactive pages, scan a
1138be37ee79SMark Johnston  * small portion of the queue in order to maintain quasi-LRU.
1139be37ee79SMark Johnston  */
1140be37ee79SMark Johnston static void
1141be37ee79SMark Johnston vm_pageout_scan_active(struct vm_domain *vmd, int page_shortage)
1142be37ee79SMark Johnston {
1143be37ee79SMark Johnston 	struct scan_state ss;
1144fee2a2faSMark Johnston 	vm_object_t object;
1145be37ee79SMark Johnston 	vm_page_t m, marker;
1146be37ee79SMark Johnston 	struct vm_pagequeue *pq;
1147f3f38e25SMark Johnston 	vm_page_astate_t old, new;
1148be37ee79SMark Johnston 	long min_scan;
1149f3f38e25SMark Johnston 	int act_delta, max_scan, ps_delta, refs, scan_tick;
1150f3f38e25SMark Johnston 	uint8_t nqueue;
1151be37ee79SMark Johnston 
1152be37ee79SMark Johnston 	marker = &vmd->vmd_markers[PQ_ACTIVE];
1153be37ee79SMark Johnston 	pq = &vmd->vmd_pagequeues[PQ_ACTIVE];
1154be37ee79SMark Johnston 	vm_pagequeue_lock(pq);
1155be37ee79SMark Johnston 
1156be37ee79SMark Johnston 	/*
1157be37ee79SMark Johnston 	 * If we're just idle polling attempt to visit every
1158be37ee79SMark Johnston 	 * active page within 'update_period' seconds.
1159be37ee79SMark Johnston 	 */
1160be37ee79SMark Johnston 	scan_tick = ticks;
1161be37ee79SMark Johnston 	if (vm_pageout_update_period != 0) {
1162be37ee79SMark Johnston 		min_scan = pq->pq_cnt;
1163be37ee79SMark Johnston 		min_scan *= scan_tick - vmd->vmd_last_active_scan;
1164be37ee79SMark Johnston 		min_scan /= hz * vm_pageout_update_period;
1165be37ee79SMark Johnston 	} else
1166be37ee79SMark Johnston 		min_scan = 0;
1167be37ee79SMark Johnston 	if (min_scan > 0 || (page_shortage > 0 && pq->pq_cnt > 0))
1168be37ee79SMark Johnston 		vmd->vmd_last_active_scan = scan_tick;
1169be37ee79SMark Johnston 
1170be37ee79SMark Johnston 	/*
1171be37ee79SMark Johnston 	 * Scan the active queue for pages that can be deactivated.  Update
1172be37ee79SMark Johnston 	 * the per-page activity counter and use it to identify deactivation
1173be37ee79SMark Johnston 	 * candidates.  Held pages may be deactivated.
1174be37ee79SMark Johnston 	 *
1175be37ee79SMark Johnston 	 * To avoid requeuing each page that remains in the active queue, we
11767bb4634eSMark Johnston 	 * implement the CLOCK algorithm.  To keep the implementation of the
11777bb4634eSMark Johnston 	 * enqueue operation consistent for all page queues, we use two hands,
11787bb4634eSMark Johnston 	 * represented by marker pages. Scans begin at the first hand, which
11797bb4634eSMark Johnston 	 * precedes the second hand in the queue.  When the two hands meet,
11807bb4634eSMark Johnston 	 * they are moved back to the head and tail of the queue, respectively,
11817bb4634eSMark Johnston 	 * and scanning resumes.
1182be37ee79SMark Johnston 	 */
1183be37ee79SMark Johnston 	max_scan = page_shortage > 0 ? pq->pq_cnt : min_scan;
1184be37ee79SMark Johnston act_scan:
1185be37ee79SMark Johnston 	vm_pageout_init_scan(&ss, pq, marker, &vmd->vmd_clock[0], max_scan);
1186be37ee79SMark Johnston 	while ((m = vm_pageout_next(&ss, false)) != NULL) {
1187be37ee79SMark Johnston 		if (__predict_false(m == &vmd->vmd_clock[1])) {
1188be37ee79SMark Johnston 			vm_pagequeue_lock(pq);
1189be37ee79SMark Johnston 			TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q);
1190be37ee79SMark Johnston 			TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[1], plinks.q);
1191be37ee79SMark Johnston 			TAILQ_INSERT_HEAD(&pq->pq_pl, &vmd->vmd_clock[0],
1192be37ee79SMark Johnston 			    plinks.q);
1193be37ee79SMark Johnston 			TAILQ_INSERT_TAIL(&pq->pq_pl, &vmd->vmd_clock[1],
1194be37ee79SMark Johnston 			    plinks.q);
1195be37ee79SMark Johnston 			max_scan -= ss.scanned;
1196be37ee79SMark Johnston 			vm_pageout_end_scan(&ss);
1197be37ee79SMark Johnston 			goto act_scan;
1198be37ee79SMark Johnston 		}
1199be37ee79SMark Johnston 		if (__predict_false((m->flags & PG_MARKER) != 0))
1200be37ee79SMark Johnston 			continue;
1201be37ee79SMark Johnston 
1202e8bcf696SMark Johnston 		/*
1203b7f30bffSMark Johnston 		 * Don't touch a page that was removed from the queue after the
1204b7f30bffSMark Johnston 		 * page queue lock was released.  Otherwise, ensure that any
1205b7f30bffSMark Johnston 		 * pending queue operations, such as dequeues for wired pages,
1206b7f30bffSMark Johnston 		 * are handled.
1207e8bcf696SMark Johnston 		 */
1208b7f30bffSMark Johnston 		if (vm_pageout_defer(m, PQ_ACTIVE, true))
1209e8bcf696SMark Johnston 			continue;
1210e8bcf696SMark Johnston 
1211e8bcf696SMark Johnston 		/*
1212e8bcf696SMark Johnston 		 * A page's object pointer may be set to NULL before
1213e8bcf696SMark Johnston 		 * the object lock is acquired.
1214e8bcf696SMark Johnston 		 */
121523ed568cSMateusz Guzik 		object = atomic_load_ptr(&m->object);
1216fee2a2faSMark Johnston 		if (__predict_false(object == NULL))
1217fee2a2faSMark Johnston 			/*
1218fee2a2faSMark Johnston 			 * The page has been removed from its object.
1219fee2a2faSMark Johnston 			 */
1220fee2a2faSMark Johnston 			continue;
1221fee2a2faSMark Johnston 
1222f3f38e25SMark Johnston 		/* Deferred free of swap space. */
1223f3f38e25SMark Johnston 		if ((m->a.flags & PGA_SWAP_FREE) != 0 &&
1224f3f38e25SMark Johnston 		    VM_OBJECT_TRYWLOCK(object)) {
1225f3f38e25SMark Johnston 			if (m->object == object)
1226f3f38e25SMark Johnston 				vm_pager_page_unswapped(m);
1227f3f38e25SMark Johnston 			VM_OBJECT_WUNLOCK(object);
1228f3f38e25SMark Johnston 		}
1229f3f38e25SMark Johnston 
1230fee2a2faSMark Johnston 		/*
1231be37ee79SMark Johnston 		 * Check to see "how much" the page has been used.
1232d7aeb429SAlan Cox 		 *
1233d7aeb429SAlan Cox 		 * Test PGA_REFERENCED after calling pmap_ts_referenced() so
1234d7aeb429SAlan Cox 		 * that a reference from a concurrently destroyed mapping is
1235d7aeb429SAlan Cox 		 * observed here and now.
1236b51927b7SKonstantin Belousov 		 *
1237b51927b7SKonstantin Belousov 		 * Perform an unsynchronized object ref count check.  While
1238b51927b7SKonstantin Belousov 		 * the page lock ensures that the page is not reallocated to
1239b51927b7SKonstantin Belousov 		 * another object, in particular, one with unmanaged mappings
1240b51927b7SKonstantin Belousov 		 * that cannot support pmap_ts_referenced(), two races are,
1241b51927b7SKonstantin Belousov 		 * nonetheless, possible:
1242b51927b7SKonstantin Belousov 		 * 1) The count was transitioning to zero, but we saw a non-
1243b51927b7SKonstantin Belousov 		 *    zero value.  pmap_ts_referenced() will return zero
1244b51927b7SKonstantin Belousov 		 *    because the page is not mapped.
1245b51927b7SKonstantin Belousov 		 * 2) The count was transitioning to one, but we saw zero.
1246b51927b7SKonstantin Belousov 		 *    This race delays the detection of a new reference.  At
1247b51927b7SKonstantin Belousov 		 *    worst, we will deactivate and reactivate the page.
1248be37ee79SMark Johnston 		 */
1249b51927b7SKonstantin Belousov 		refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
1250be37ee79SMark Johnston 
1251f3f38e25SMark Johnston 		old = vm_page_astate_load(m);
1252f3f38e25SMark Johnston 		do {
1253f3f38e25SMark Johnston 			/*
1254f3f38e25SMark Johnston 			 * Check to see if the page has been removed from the
1255f3f38e25SMark Johnston 			 * queue since the first such check.  Leave it alone if
1256f3f38e25SMark Johnston 			 * so, discarding any references collected by
1257f3f38e25SMark Johnston 			 * pmap_ts_referenced().
1258f3f38e25SMark Johnston 			 */
1259609de97eSEric van Gyzen 			if (__predict_false(_vm_page_queue(old) == PQ_NONE)) {
1260609de97eSEric van Gyzen 				ps_delta = 0;
1261f3f38e25SMark Johnston 				break;
1262609de97eSEric van Gyzen 			}
1263a8081778SJeff Roberson 
1264be37ee79SMark Johnston 			/*
1265be37ee79SMark Johnston 			 * Advance or decay the act_count based on recent usage.
1266be37ee79SMark Johnston 			 */
1267f3f38e25SMark Johnston 			new = old;
1268f3f38e25SMark Johnston 			act_delta = refs;
1269f3f38e25SMark Johnston 			if ((old.flags & PGA_REFERENCED) != 0) {
1270f3f38e25SMark Johnston 				new.flags &= ~PGA_REFERENCED;
1271f3f38e25SMark Johnston 				act_delta++;
1272f3f38e25SMark Johnston 			}
1273be37ee79SMark Johnston 			if (act_delta != 0) {
1274f3f38e25SMark Johnston 				new.act_count += ACT_ADVANCE + act_delta;
1275f3f38e25SMark Johnston 				if (new.act_count > ACT_MAX)
1276f3f38e25SMark Johnston 					new.act_count = ACT_MAX;
1277f3f38e25SMark Johnston 			} else {
1278f3f38e25SMark Johnston 				new.act_count -= min(new.act_count,
1279f3f38e25SMark Johnston 				    ACT_DECLINE);
1280f3f38e25SMark Johnston 			}
1281be37ee79SMark Johnston 
1282f3f38e25SMark Johnston 			if (new.act_count > 0) {
1283be37ee79SMark Johnston 				/*
1284f3f38e25SMark Johnston 				 * Adjust the activation count and keep the page
1285f3f38e25SMark Johnston 				 * in the active queue.  The count might be left
1286f3f38e25SMark Johnston 				 * unchanged if it is saturated.  The page may
1287f3f38e25SMark Johnston 				 * have been moved to a different queue since we
1288f3f38e25SMark Johnston 				 * started the scan, in which case we move it
1289f3f38e25SMark Johnston 				 * back.
1290be37ee79SMark Johnston 				 */
1291f3f38e25SMark Johnston 				ps_delta = 0;
1292f3f38e25SMark Johnston 				if (old.queue != PQ_ACTIVE) {
1293f7607c30SMark Johnston 					new.flags &= ~PGA_QUEUE_OP_MASK;
1294f7607c30SMark Johnston 					new.flags |= PGA_REQUEUE;
1295f7607c30SMark Johnston 					new.queue = PQ_ACTIVE;
1296f3f38e25SMark Johnston 				}
12977cdeaf33SMark Johnston 			} else {
1298be37ee79SMark Johnston 				/*
1299f3f38e25SMark Johnston 				 * When not short for inactive pages, let dirty
1300f3f38e25SMark Johnston 				 * pages go through the inactive queue before
1301f3f38e25SMark Johnston 				 * moving to the laundry queue.  This gives them
1302f3f38e25SMark Johnston 				 * some extra time to be reactivated,
1303f3f38e25SMark Johnston 				 * potentially avoiding an expensive pageout.
1304f3f38e25SMark Johnston 				 * However, during a page shortage, the inactive
1305f3f38e25SMark Johnston 				 * queue is necessarily small, and so dirty
1306f3f38e25SMark Johnston 				 * pages would only spend a trivial amount of
1307f3f38e25SMark Johnston 				 * time in the inactive queue.  Therefore, we
1308f3f38e25SMark Johnston 				 * might as well place them directly in the
1309f3f38e25SMark Johnston 				 * laundry queue to reduce queuing overhead.
1310f3f38e25SMark Johnston 				 *
1311be37ee79SMark Johnston 				 * Calling vm_page_test_dirty() here would
1312be37ee79SMark Johnston 				 * require acquisition of the object's write
1313be37ee79SMark Johnston 				 * lock.  However, during a page shortage,
1314f3f38e25SMark Johnston 				 * directing dirty pages into the laundry queue
1315f3f38e25SMark Johnston 				 * is only an optimization and not a
1316be37ee79SMark Johnston 				 * requirement.  Therefore, we simply rely on
1317f3f38e25SMark Johnston 				 * the opportunistic updates to the page's dirty
1318f3f38e25SMark Johnston 				 * field by the pmap.
1319be37ee79SMark Johnston 				 */
1320f3f38e25SMark Johnston 				if (page_shortage <= 0) {
1321f3f38e25SMark Johnston 					nqueue = PQ_INACTIVE;
1322f3f38e25SMark Johnston 					ps_delta = 0;
1323f3f38e25SMark Johnston 				} else if (m->dirty == 0) {
1324f3f38e25SMark Johnston 					nqueue = PQ_INACTIVE;
1325f3f38e25SMark Johnston 					ps_delta = act_scan_laundry_weight;
1326be37ee79SMark Johnston 				} else {
1327f3f38e25SMark Johnston 					nqueue = PQ_LAUNDRY;
1328f3f38e25SMark Johnston 					ps_delta = 1;
1329be37ee79SMark Johnston 				}
1330f3f38e25SMark Johnston 
1331f7607c30SMark Johnston 				new.flags &= ~PGA_QUEUE_OP_MASK;
1332f3f38e25SMark Johnston 				new.flags |= PGA_REQUEUE;
1333f3f38e25SMark Johnston 				new.queue = nqueue;
1334be37ee79SMark Johnston 			}
1335f3f38e25SMark Johnston 		} while (!vm_page_pqstate_commit(m, &old, new));
1336f3f38e25SMark Johnston 
1337f3f38e25SMark Johnston 		page_shortage -= ps_delta;
1338be37ee79SMark Johnston 	}
1339be37ee79SMark Johnston 	vm_pagequeue_lock(pq);
1340be37ee79SMark Johnston 	TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q);
1341be37ee79SMark Johnston 	TAILQ_INSERT_AFTER(&pq->pq_pl, marker, &vmd->vmd_clock[0], plinks.q);
1342be37ee79SMark Johnston 	vm_pageout_end_scan(&ss);
1343be37ee79SMark Johnston 	vm_pagequeue_unlock(pq);
1344be37ee79SMark Johnston }
1345be37ee79SMark Johnston 
13465cd29d0fSMark Johnston static int
1347f3f38e25SMark Johnston vm_pageout_reinsert_inactive_page(struct vm_pagequeue *pq, vm_page_t marker,
1348f3f38e25SMark Johnston     vm_page_t m)
13495cd29d0fSMark Johnston {
1350f3f38e25SMark Johnston 	vm_page_astate_t as;
13515cd29d0fSMark Johnston 
1352f3f38e25SMark Johnston 	vm_pagequeue_assert_locked(pq);
1353f3f38e25SMark Johnston 
1354f3f38e25SMark Johnston 	as = vm_page_astate_load(m);
1355f3f38e25SMark Johnston 	if (as.queue != PQ_INACTIVE || (as.flags & PGA_ENQUEUED) != 0)
1356e8bcf696SMark Johnston 		return (0);
1357e8bcf696SMark Johnston 	vm_page_aflag_set(m, PGA_ENQUEUED);
1358f3f38e25SMark Johnston 	TAILQ_INSERT_BEFORE(marker, m, plinks.q);
13595cd29d0fSMark Johnston 	return (1);
13605cd29d0fSMark Johnston }
13615cd29d0fSMark Johnston 
13625cd29d0fSMark Johnston /*
13635cd29d0fSMark Johnston  * Re-add stuck pages to the inactive queue.  We will examine them again
13645cd29d0fSMark Johnston  * during the next scan.  If the queue state of a page has changed since
13655cd29d0fSMark Johnston  * it was physically removed from the page queue in
13665cd29d0fSMark Johnston  * vm_pageout_collect_batch(), don't do anything with that page.
13675cd29d0fSMark Johnston  */
13685cd29d0fSMark Johnston static void
13695cd29d0fSMark Johnston vm_pageout_reinsert_inactive(struct scan_state *ss, struct vm_batchqueue *bq,
13705cd29d0fSMark Johnston     vm_page_t m)
13715cd29d0fSMark Johnston {
13725cd29d0fSMark Johnston 	struct vm_pagequeue *pq;
1373f3f38e25SMark Johnston 	vm_page_t marker;
13745cd29d0fSMark Johnston 	int delta;
13755cd29d0fSMark Johnston 
13765cd29d0fSMark Johnston 	delta = 0;
1377f3f38e25SMark Johnston 	marker = ss->marker;
13785cd29d0fSMark Johnston 	pq = ss->pq;
13795cd29d0fSMark Johnston 
13805cd29d0fSMark Johnston 	if (m != NULL) {
13811cac76c9SAndrew Gallatin 		if (vm_batchqueue_insert(bq, m) != 0)
13825cd29d0fSMark Johnston 			return;
13835cd29d0fSMark Johnston 		vm_pagequeue_lock(pq);
1384f3f38e25SMark Johnston 		delta += vm_pageout_reinsert_inactive_page(pq, marker, m);
13855cd29d0fSMark Johnston 	} else
13865cd29d0fSMark Johnston 		vm_pagequeue_lock(pq);
13875cd29d0fSMark Johnston 	while ((m = vm_batchqueue_pop(bq)) != NULL)
1388f3f38e25SMark Johnston 		delta += vm_pageout_reinsert_inactive_page(pq, marker, m);
13895cd29d0fSMark Johnston 	vm_pagequeue_cnt_add(pq, delta);
13905cd29d0fSMark Johnston 	vm_pagequeue_unlock(pq);
13915cd29d0fSMark Johnston 	vm_batchqueue_init(bq);
13925cd29d0fSMark Johnston }
13935cd29d0fSMark Johnston 
13940292c54bSConrad Meyer static void
13950292c54bSConrad Meyer vm_pageout_scan_inactive(struct vm_domain *vmd, int page_shortage)
1396df8bae1dSRodney W. Grimes {
13970292c54bSConrad Meyer 	struct timeval start, end;
13985cd29d0fSMark Johnston 	struct scan_state ss;
13995cd29d0fSMark Johnston 	struct vm_batchqueue rq;
14000292c54bSConrad Meyer 	struct vm_page marker_page;
14015cd29d0fSMark Johnston 	vm_page_t m, marker;
14028d220203SAlan Cox 	struct vm_pagequeue *pq;
1403df8bae1dSRodney W. Grimes 	vm_object_t object;
1404f3f38e25SMark Johnston 	vm_page_astate_t old, new;
14050292c54bSConrad Meyer 	int act_delta, addl_page_shortage, starting_page_shortage, refs;
14060292c54bSConrad Meyer 
14070292c54bSConrad Meyer 	object = NULL;
14080292c54bSConrad Meyer 	vm_batchqueue_init(&rq);
14090292c54bSConrad Meyer 	getmicrouptime(&start);
14100d94caffSDavid Greenman 
1411df8bae1dSRodney W. Grimes 	/*
141201f04471SMark Johnston 	 * The addl_page_shortage is an estimate of the number of temporarily
1413311e34e2SKonstantin Belousov 	 * stuck pages in the inactive queue.  In other words, the
1414449c2e92SKonstantin Belousov 	 * number of pages from the inactive count that should be
1415311e34e2SKonstantin Belousov 	 * discounted in setting the target for the active queue scan.
1416311e34e2SKonstantin Belousov 	 */
14179099545aSAlan Cox 	addl_page_shortage = 0;
14189099545aSAlan Cox 
14191c7c3c6aSMatthew Dillon 	/*
1420f095d1bbSAlan Cox 	 * Start scanning the inactive queue for pages that we can free.  The
1421f095d1bbSAlan Cox 	 * scan will stop when we reach the target or we have scanned the
14225cff1f4dSMark Johnston 	 * entire queue.  (Note that m->a.act_count is not used to make
1423f095d1bbSAlan Cox 	 * decisions for the inactive queue, only for the active queue.)
14248d220203SAlan Cox 	 */
14250292c54bSConrad Meyer 	starting_page_shortage = page_shortage;
14260292c54bSConrad Meyer 	marker = &marker_page;
14270292c54bSConrad Meyer 	vm_page_init_marker(marker, PQ_INACTIVE, 0);
14285cd29d0fSMark Johnston 	pq = &vmd->vmd_pagequeues[PQ_INACTIVE];
14298d220203SAlan Cox 	vm_pagequeue_lock(pq);
14305cd29d0fSMark Johnston 	vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt);
1431a216e311SRyan Libby 	while (page_shortage > 0) {
1432a216e311SRyan Libby 		/*
1433a216e311SRyan Libby 		 * If we need to refill the scan batch queue, release any
1434a216e311SRyan Libby 		 * optimistically held object lock.  This gives someone else a
1435a216e311SRyan Libby 		 * chance to grab the lock, and also avoids holding it while we
1436a216e311SRyan Libby 		 * do unrelated work.
1437a216e311SRyan Libby 		 */
1438a216e311SRyan Libby 		if (object != NULL && vm_batchqueue_empty(&ss.bq)) {
1439a216e311SRyan Libby 			VM_OBJECT_WUNLOCK(object);
1440a216e311SRyan Libby 			object = NULL;
1441a216e311SRyan Libby 		}
1442a216e311SRyan Libby 
1443a216e311SRyan Libby 		m = vm_pageout_next(&ss, true);
1444a216e311SRyan Libby 		if (m == NULL)
1445a216e311SRyan Libby 			break;
14465cd29d0fSMark Johnston 		KASSERT((m->flags & PG_MARKER) == 0,
14475cd29d0fSMark Johnston 		    ("marker page %p was dequeued", m));
1448df8bae1dSRodney W. Grimes 
1449936524aaSMatthew Dillon 		/*
1450b7f30bffSMark Johnston 		 * Don't touch a page that was removed from the queue after the
1451b7f30bffSMark Johnston 		 * page queue lock was released.  Otherwise, ensure that any
1452b7f30bffSMark Johnston 		 * pending queue operations, such as dequeues for wired pages,
1453b7f30bffSMark Johnston 		 * are handled.
1454936524aaSMatthew Dillon 		 */
1455b7f30bffSMark Johnston 		if (vm_pageout_defer(m, PQ_INACTIVE, false))
1456936524aaSMatthew Dillon 			continue;
1457e8bcf696SMark Johnston 
14589f5632e6SMark Johnston 		/*
14599f5632e6SMark Johnston 		 * Lock the page's object.
14609f5632e6SMark Johnston 		 */
14619f5632e6SMark Johnston 		if (object == NULL || object != m->object) {
146260256604SMark Johnston 			if (object != NULL)
14635cd29d0fSMark Johnston 				VM_OBJECT_WUNLOCK(object);
146423ed568cSMateusz Guzik 			object = atomic_load_ptr(&m->object);
14659f5632e6SMark Johnston 			if (__predict_false(object == NULL))
14669f5632e6SMark Johnston 				/* The page is being freed by another thread. */
14679f5632e6SMark Johnston 				continue;
14689f5632e6SMark Johnston 
1469e8bcf696SMark Johnston 			/* Depends on type-stability. */
147041fd4b94SMark Johnston 			VM_OBJECT_WLOCK(object);
14719f5632e6SMark Johnston 			if (__predict_false(m->object != object)) {
14729f5632e6SMark Johnston 				VM_OBJECT_WUNLOCK(object);
14739f5632e6SMark Johnston 				object = NULL;
14749f5632e6SMark Johnston 				goto reinsert;
147541fd4b94SMark Johnston 			}
147641fd4b94SMark Johnston 		}
14775cd29d0fSMark Johnston 
147863e97555SJeff Roberson 		if (vm_page_tryxbusy(m) == 0) {
1479a3aeedabSAlan Cox 			/*
1480a3aeedabSAlan Cox 			 * Don't mess with busy pages.  Leave them at
1481a3aeedabSAlan Cox 			 * the front of the queue.  Most likely, they
1482a3aeedabSAlan Cox 			 * are being paged out and will leave the
1483a3aeedabSAlan Cox 			 * queue shortly after the scan finishes.  So,
1484a3aeedabSAlan Cox 			 * they ought to be discounted from the
1485a3aeedabSAlan Cox 			 * inactive count.
1486a3aeedabSAlan Cox 			 */
1487a3aeedabSAlan Cox 			addl_page_shortage++;
14885cd29d0fSMark Johnston 			goto reinsert;
148926f9a767SRodney W. Grimes 		}
149048cc2fc7SKonstantin Belousov 
1491a8081778SJeff Roberson 		/* Deferred free of swap space. */
1492a8081778SJeff Roberson 		if ((m->a.flags & PGA_SWAP_FREE) != 0)
1493a8081778SJeff Roberson 			vm_pager_page_unswapped(m);
1494a8081778SJeff Roberson 
149548cc2fc7SKonstantin Belousov 		/*
14969f5632e6SMark Johnston 		 * Check for wirings now that we hold the object lock and have
14979f5632e6SMark Johnston 		 * exclusively busied the page.  If the page is mapped, it may
14989f5632e6SMark Johnston 		 * still be wired by pmap lookups.  The call to
1499fee2a2faSMark Johnston 		 * vm_page_try_remove_all() below atomically checks for such
1500fee2a2faSMark Johnston 		 * wirings and removes mappings.  If the page is unmapped, the
15019f5632e6SMark Johnston 		 * wire count is guaranteed not to increase after this check.
1502fee2a2faSMark Johnston 		 */
15039f5632e6SMark Johnston 		if (__predict_false(vm_page_wired(m)))
1504f3f38e25SMark Johnston 			goto skip_page;
1505fee2a2faSMark Johnston 
1506fee2a2faSMark Johnston 		/*
15078748f58cSKonstantin Belousov 		 * Invalid pages can be easily freed. They cannot be
15088748f58cSKonstantin Belousov 		 * mapped, vm_page_free() asserts this.
1509776f729cSKonstantin Belousov 		 */
15100012f373SJeff Roberson 		if (vm_page_none_valid(m))
15118748f58cSKonstantin Belousov 			goto free_page;
1512776f729cSKonstantin Belousov 
1513b51927b7SKonstantin Belousov 		refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
1514f3f38e25SMark Johnston 
1515f3f38e25SMark Johnston 		for (old = vm_page_astate_load(m);;) {
1516776f729cSKonstantin Belousov 			/*
1517f3f38e25SMark Johnston 			 * Check to see if the page has been removed from the
1518f3f38e25SMark Johnston 			 * queue since the first such check.  Leave it alone if
1519f3f38e25SMark Johnston 			 * so, discarding any references collected by
1520f3f38e25SMark Johnston 			 * pmap_ts_referenced().
15217e006499SJohn Dyson 			 */
1522f3f38e25SMark Johnston 			if (__predict_false(_vm_page_queue(old) == PQ_NONE))
1523f3f38e25SMark Johnston 				goto skip_page;
1524f3f38e25SMark Johnston 
1525f3f38e25SMark Johnston 			new = old;
1526f3f38e25SMark Johnston 			act_delta = refs;
1527f3f38e25SMark Johnston 			if ((old.flags & PGA_REFERENCED) != 0) {
1528f3f38e25SMark Johnston 				new.flags &= ~PGA_REFERENCED;
1529d7aeb429SAlan Cox 				act_delta++;
15302fe6e4d7SDavid Greenman 			}
1531f3f38e25SMark Johnston 			if (act_delta == 0) {
1532f3f38e25SMark Johnston 				;
1533b51927b7SKonstantin Belousov 			} else if (object->ref_count != 0) {
1534e8bcf696SMark Johnston 				/*
1535f3f38e25SMark Johnston 				 * Increase the activation count if the
1536f3f38e25SMark Johnston 				 * page was referenced while in the
1537f3f38e25SMark Johnston 				 * inactive queue.  This makes it less
1538f3f38e25SMark Johnston 				 * likely that the page will be returned
1539f3f38e25SMark Johnston 				 * prematurely to the inactive queue.
1540e8bcf696SMark Johnston 				 */
1541f3f38e25SMark Johnston 				new.act_count += ACT_ADVANCE +
1542f3f38e25SMark Johnston 				    act_delta;
1543f3f38e25SMark Johnston 				if (new.act_count > ACT_MAX)
1544f3f38e25SMark Johnston 					new.act_count = ACT_MAX;
1545f3f38e25SMark Johnston 
1546f7607c30SMark Johnston 				new.flags &= ~PGA_QUEUE_OP_MASK;
1547f3f38e25SMark Johnston 				new.flags |= PGA_REQUEUE;
1548f3f38e25SMark Johnston 				new.queue = PQ_ACTIVE;
1549f3f38e25SMark Johnston 				if (!vm_page_pqstate_commit(m, &old, new))
1550e8bcf696SMark Johnston 					continue;
1551f3f38e25SMark Johnston 
1552f3f38e25SMark Johnston 				VM_CNT_INC(v_reactivated);
1553f3f38e25SMark Johnston 				goto skip_page;
1554ebcddc72SAlan Cox 			} else if ((object->flags & OBJ_DEAD) == 0) {
1555f3f38e25SMark Johnston 				new.queue = PQ_INACTIVE;
1556f3f38e25SMark Johnston 				new.flags |= PGA_REQUEUE;
1557f3f38e25SMark Johnston 				if (!vm_page_pqstate_commit(m, &old, new))
1558f3f38e25SMark Johnston 					continue;
1559f3f38e25SMark Johnston 				goto skip_page;
1560ebcddc72SAlan Cox 			}
1561f3f38e25SMark Johnston 			break;
1562960810ccSAlan Cox 		}
156367bf6868SJohn Dyson 
15647e006499SJohn Dyson 		/*
15659fc4739dSAlan Cox 		 * If the page appears to be clean at the machine-independent
15669fc4739dSAlan Cox 		 * layer, then remove all of its mappings from the pmap in
1567a766ffd0SAlan Cox 		 * anticipation of freeing it.  If, however, any of the page's
1568a766ffd0SAlan Cox 		 * mappings allow write access, then the page may still be
1569a766ffd0SAlan Cox 		 * modified until the last of those mappings are removed.
15707e006499SJohn Dyson 		 */
1571b51927b7SKonstantin Belousov 		if (object->ref_count != 0) {
15729fc4739dSAlan Cox 			vm_page_test_dirty(m);
15739f5632e6SMark Johnston 			if (m->dirty == 0 && !vm_page_try_remove_all(m))
1574f3f38e25SMark Johnston 				goto skip_page;
1575fee2a2faSMark Johnston 		}
1576dcbcd518SBruce Evans 
15776989c456SAlan Cox 		/*
1578ebcddc72SAlan Cox 		 * Clean pages can be freed, but dirty pages must be sent back
1579ebcddc72SAlan Cox 		 * to the laundry, unless they belong to a dead object.
1580ebcddc72SAlan Cox 		 * Requeueing dirty pages from dead objects is pointless, as
1581ebcddc72SAlan Cox 		 * they are being paged out and freed by the thread that
1582ebcddc72SAlan Cox 		 * destroyed the object.
15836989c456SAlan Cox 		 */
1584ebcddc72SAlan Cox 		if (m->dirty == 0) {
15858748f58cSKonstantin Belousov free_page:
15865cd29d0fSMark Johnston 			/*
15879f5632e6SMark Johnston 			 * Now we are guaranteed that no other threads are
15889f5632e6SMark Johnston 			 * manipulating the page, check for a last-second
15899f5632e6SMark Johnston 			 * reference that would save it from doom.
15905cd29d0fSMark Johnston 			 */
15919f5632e6SMark Johnston 			if (vm_pageout_defer(m, PQ_INACTIVE, false))
15929f5632e6SMark Johnston 				goto skip_page;
15939f5632e6SMark Johnston 
15949f5632e6SMark Johnston 			/*
15959f5632e6SMark Johnston 			 * Because we dequeued the page and have already checked
15969f5632e6SMark Johnston 			 * for pending dequeue and enqueue requests, we can
15979f5632e6SMark Johnston 			 * safely disassociate the page from the inactive queue
15989f5632e6SMark Johnston 			 * without holding the queue lock.
15999f5632e6SMark Johnston 			 */
16005cff1f4dSMark Johnston 			m->a.queue = PQ_NONE;
160178afdce6SAlan Cox 			vm_page_free(m);
16025cd29d0fSMark Johnston 			page_shortage--;
160363e97555SJeff Roberson 			continue;
160463e97555SJeff Roberson 		}
160563e97555SJeff Roberson 		if ((object->flags & OBJ_DEAD) == 0)
1606ebcddc72SAlan Cox 			vm_page_launder(m);
1607f3f38e25SMark Johnston skip_page:
1608f3f38e25SMark Johnston 		vm_page_xunbusy(m);
16095cd29d0fSMark Johnston 		continue;
16105cd29d0fSMark Johnston reinsert:
16115cd29d0fSMark Johnston 		vm_pageout_reinsert_inactive(&ss, &rq, m);
16125cd29d0fSMark Johnston 	}
161360256604SMark Johnston 	if (object != NULL)
161489f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(object);
16155cd29d0fSMark Johnston 	vm_pageout_reinsert_inactive(&ss, &rq, NULL);
16165cd29d0fSMark Johnston 	vm_pageout_reinsert_inactive(&ss, &ss.bq, NULL);
16178d220203SAlan Cox 	vm_pagequeue_lock(pq);
16185cd29d0fSMark Johnston 	vm_pageout_end_scan(&ss);
16198d220203SAlan Cox 	vm_pagequeue_unlock(pq);
162026f9a767SRodney W. Grimes 
16210292c54bSConrad Meyer 	/*
16220292c54bSConrad Meyer 	 * Record the remaining shortage and the progress and rate it was made.
16230292c54bSConrad Meyer 	 */
16240292c54bSConrad Meyer 	atomic_add_int(&vmd->vmd_addl_shortage, addl_page_shortage);
16250292c54bSConrad Meyer 	getmicrouptime(&end);
16260292c54bSConrad Meyer 	timevalsub(&end, &start);
16270292c54bSConrad Meyer 	atomic_add_int(&vmd->vmd_inactive_us,
16280292c54bSConrad Meyer 	    end.tv_sec * 1000000 + end.tv_usec);
16290292c54bSConrad Meyer 	atomic_add_int(&vmd->vmd_inactive_freed,
16300292c54bSConrad Meyer 	    starting_page_shortage - page_shortage);
16310292c54bSConrad Meyer }
16320292c54bSConrad Meyer 
16330292c54bSConrad Meyer /*
16340292c54bSConrad Meyer  * Dispatch a number of inactive threads according to load and collect the
16352913cc46SMark Johnston  * results to present a coherent view of paging activity on this domain.
16360292c54bSConrad Meyer  */
16370292c54bSConrad Meyer static int
16380292c54bSConrad Meyer vm_pageout_inactive_dispatch(struct vm_domain *vmd, int shortage)
16390292c54bSConrad Meyer {
16402913cc46SMark Johnston 	u_int freed, pps, slop, threads, us;
16410292c54bSConrad Meyer 
16420292c54bSConrad Meyer 	vmd->vmd_inactive_shortage = shortage;
16432913cc46SMark Johnston 	slop = 0;
16440292c54bSConrad Meyer 
16450292c54bSConrad Meyer 	/*
16460292c54bSConrad Meyer 	 * If we have more work than we can do in a quarter of our interval, we
16470292c54bSConrad Meyer 	 * fire off multiple threads to process it.
16480292c54bSConrad Meyer 	 */
16490292c54bSConrad Meyer 	threads = vmd->vmd_inactive_threads;
16502913cc46SMark Johnston 	if (threads > 1 && vmd->vmd_inactive_pps != 0 &&
16512913cc46SMark Johnston 	    shortage > vmd->vmd_inactive_pps / VM_INACT_SCAN_RATE / 4) {
16520292c54bSConrad Meyer 		vmd->vmd_inactive_shortage /= threads;
16532913cc46SMark Johnston 		slop = shortage % threads;
16542913cc46SMark Johnston 		vm_domain_pageout_lock(vmd);
16550292c54bSConrad Meyer 		blockcount_acquire(&vmd->vmd_inactive_starting, threads - 1);
16560292c54bSConrad Meyer 		blockcount_acquire(&vmd->vmd_inactive_running, threads - 1);
16570292c54bSConrad Meyer 		wakeup(&vmd->vmd_inactive_shortage);
16580292c54bSConrad Meyer 		vm_domain_pageout_unlock(vmd);
16590292c54bSConrad Meyer 	}
16600292c54bSConrad Meyer 
16610292c54bSConrad Meyer 	/* Run the local thread scan. */
16622913cc46SMark Johnston 	vm_pageout_scan_inactive(vmd, vmd->vmd_inactive_shortage + slop);
16630292c54bSConrad Meyer 
16640292c54bSConrad Meyer 	/*
16650292c54bSConrad Meyer 	 * Block until helper threads report results and then accumulate
16660292c54bSConrad Meyer 	 * totals.
16670292c54bSConrad Meyer 	 */
16680292c54bSConrad Meyer 	blockcount_wait(&vmd->vmd_inactive_running, NULL, "vmpoid", PVM);
16690292c54bSConrad Meyer 	freed = atomic_readandclear_int(&vmd->vmd_inactive_freed);
16700292c54bSConrad Meyer 	VM_CNT_ADD(v_dfree, freed);
16710292c54bSConrad Meyer 
16720292c54bSConrad Meyer 	/*
16730292c54bSConrad Meyer 	 * Calculate the per-thread paging rate with an exponential decay of
16740292c54bSConrad Meyer 	 * prior results.  Careful to avoid integer rounding errors with large
16750292c54bSConrad Meyer 	 * us values.
16760292c54bSConrad Meyer 	 */
16770292c54bSConrad Meyer 	us = max(atomic_readandclear_int(&vmd->vmd_inactive_us), 1);
16780292c54bSConrad Meyer 	if (us > 1000000)
16790292c54bSConrad Meyer 		/* Keep rounding to tenths */
16800292c54bSConrad Meyer 		pps = (freed * 10) / ((us * 10) / 1000000);
16810292c54bSConrad Meyer 	else
16820292c54bSConrad Meyer 		pps = (1000000 / us) * freed;
16830292c54bSConrad Meyer 	vmd->vmd_inactive_pps = (vmd->vmd_inactive_pps / 2) + (pps / 2);
16840292c54bSConrad Meyer 
16850292c54bSConrad Meyer 	return (shortage - freed);
16860292c54bSConrad Meyer }
16870292c54bSConrad Meyer 
16880292c54bSConrad Meyer /*
16890292c54bSConrad Meyer  * Attempt to reclaim the requested number of pages from the inactive queue.
16900292c54bSConrad Meyer  * Returns true if the shortage was addressed.
16910292c54bSConrad Meyer  */
16920292c54bSConrad Meyer static int
16930292c54bSConrad Meyer vm_pageout_inactive(struct vm_domain *vmd, int shortage, int *addl_shortage)
16940292c54bSConrad Meyer {
16950292c54bSConrad Meyer 	struct vm_pagequeue *pq;
16960292c54bSConrad Meyer 	u_int addl_page_shortage, deficit, page_shortage;
16970292c54bSConrad Meyer 	u_int starting_page_shortage;
16980292c54bSConrad Meyer 
16990292c54bSConrad Meyer 	/*
17000292c54bSConrad Meyer 	 * vmd_pageout_deficit counts the number of pages requested in
17010292c54bSConrad Meyer 	 * allocations that failed because of a free page shortage.  We assume
17020292c54bSConrad Meyer 	 * that the allocations will be reattempted and thus include the deficit
17030292c54bSConrad Meyer 	 * in our scan target.
17040292c54bSConrad Meyer 	 */
17050292c54bSConrad Meyer 	deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit);
17060292c54bSConrad Meyer 	starting_page_shortage = shortage + deficit;
17070292c54bSConrad Meyer 
17080292c54bSConrad Meyer 	/*
17090292c54bSConrad Meyer 	 * Run the inactive scan on as many threads as is necessary.
17100292c54bSConrad Meyer 	 */
17110292c54bSConrad Meyer 	page_shortage = vm_pageout_inactive_dispatch(vmd, starting_page_shortage);
17120292c54bSConrad Meyer 	addl_page_shortage = atomic_readandclear_int(&vmd->vmd_addl_shortage);
17135cd29d0fSMark Johnston 
1714ebcddc72SAlan Cox 	/*
1715ebcddc72SAlan Cox 	 * Wake up the laundry thread so that it can perform any needed
1716ebcddc72SAlan Cox 	 * laundering.  If we didn't meet our target, we're in shortfall and
1717b1fd102eSMark Johnston 	 * need to launder more aggressively.  If PQ_LAUNDRY is empty and no
1718b1fd102eSMark Johnston 	 * swap devices are configured, the laundry thread has no work to do, so
1719b1fd102eSMark Johnston 	 * don't bother waking it up.
1720cb35676eSMark Johnston 	 *
1721cb35676eSMark Johnston 	 * The laundry thread uses the number of inactive queue scans elapsed
1722cb35676eSMark Johnston 	 * since the last laundering to determine whether to launder again, so
1723cb35676eSMark Johnston 	 * keep count.
1724ebcddc72SAlan Cox 	 */
1725cb35676eSMark Johnston 	if (starting_page_shortage > 0) {
1726e2068d0bSJeff Roberson 		pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
1727ebcddc72SAlan Cox 		vm_pagequeue_lock(pq);
1728e2068d0bSJeff Roberson 		if (vmd->vmd_laundry_request == VM_LAUNDRY_IDLE &&
1729cb35676eSMark Johnston 		    (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled))) {
1730ebcddc72SAlan Cox 			if (page_shortage > 0) {
1731e2068d0bSJeff Roberson 				vmd->vmd_laundry_request = VM_LAUNDRY_SHORTFALL;
173283c9dea1SGleb Smirnoff 				VM_CNT_INC(v_pdshortfalls);
1733e2068d0bSJeff Roberson 			} else if (vmd->vmd_laundry_request !=
1734e2068d0bSJeff Roberson 			    VM_LAUNDRY_SHORTFALL)
1735e2068d0bSJeff Roberson 				vmd->vmd_laundry_request =
1736e2068d0bSJeff Roberson 				    VM_LAUNDRY_BACKGROUND;
1737e2068d0bSJeff Roberson 			wakeup(&vmd->vmd_laundry_request);
1738b1fd102eSMark Johnston 		}
173960684862SMark Johnston 		vmd->vmd_clean_pages_freed +=
174060684862SMark Johnston 		    starting_page_shortage - page_shortage;
1741ebcddc72SAlan Cox 		vm_pagequeue_unlock(pq);
1742ebcddc72SAlan Cox 	}
1743ebcddc72SAlan Cox 
17449452b5edSAlan Cox 	/*
174576386c7eSKonstantin Belousov 	 * If the inactive queue scan fails repeatedly to meet its
174676386c7eSKonstantin Belousov 	 * target, kill the largest process.
174776386c7eSKonstantin Belousov 	 */
174876386c7eSKonstantin Belousov 	vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage);
174976386c7eSKonstantin Belousov 
175076386c7eSKonstantin Belousov 	/*
1751be37ee79SMark Johnston 	 * See the description of addl_page_shortage above.
1752be37ee79SMark Johnston 	 */
1753be37ee79SMark Johnston 	*addl_shortage = addl_page_shortage + deficit;
1754be37ee79SMark Johnston 
1755e57dd910SAlan Cox 	return (page_shortage <= 0);
17562025d69bSKonstantin Belousov }
17572025d69bSKonstantin Belousov 
1758449c2e92SKonstantin Belousov static int vm_pageout_oom_vote;
1759449c2e92SKonstantin Belousov 
1760449c2e92SKonstantin Belousov /*
1761449c2e92SKonstantin Belousov  * The pagedaemon threads randlomly select one to perform the
1762449c2e92SKonstantin Belousov  * OOM.  Trying to kill processes before all pagedaemons
1763449c2e92SKonstantin Belousov  * failed to reach free target is premature.
1764449c2e92SKonstantin Belousov  */
1765449c2e92SKonstantin Belousov static void
176676386c7eSKonstantin Belousov vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
176776386c7eSKonstantin Belousov     int starting_page_shortage)
1768449c2e92SKonstantin Belousov {
1769449c2e92SKonstantin Belousov 	int old_vote;
1770449c2e92SKonstantin Belousov 
177176386c7eSKonstantin Belousov 	if (starting_page_shortage <= 0 || starting_page_shortage !=
177276386c7eSKonstantin Belousov 	    page_shortage)
177376386c7eSKonstantin Belousov 		vmd->vmd_oom_seq = 0;
177476386c7eSKonstantin Belousov 	else
177576386c7eSKonstantin Belousov 		vmd->vmd_oom_seq++;
177676386c7eSKonstantin Belousov 	if (vmd->vmd_oom_seq < vm_pageout_oom_seq) {
1777449c2e92SKonstantin Belousov 		if (vmd->vmd_oom) {
1778449c2e92SKonstantin Belousov 			vmd->vmd_oom = FALSE;
1779449c2e92SKonstantin Belousov 			atomic_subtract_int(&vm_pageout_oom_vote, 1);
1780449c2e92SKonstantin Belousov 		}
1781449c2e92SKonstantin Belousov 		return;
1782449c2e92SKonstantin Belousov 	}
1783449c2e92SKonstantin Belousov 
178476386c7eSKonstantin Belousov 	/*
178576386c7eSKonstantin Belousov 	 * Do not follow the call sequence until OOM condition is
178676386c7eSKonstantin Belousov 	 * cleared.
178776386c7eSKonstantin Belousov 	 */
178876386c7eSKonstantin Belousov 	vmd->vmd_oom_seq = 0;
178976386c7eSKonstantin Belousov 
1790449c2e92SKonstantin Belousov 	if (vmd->vmd_oom)
1791449c2e92SKonstantin Belousov 		return;
1792449c2e92SKonstantin Belousov 
1793449c2e92SKonstantin Belousov 	vmd->vmd_oom = TRUE;
1794449c2e92SKonstantin Belousov 	old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1);
1795449c2e92SKonstantin Belousov 	if (old_vote != vm_ndomains - 1)
1796449c2e92SKonstantin Belousov 		return;
1797449c2e92SKonstantin Belousov 
1798449c2e92SKonstantin Belousov 	/*
1799449c2e92SKonstantin Belousov 	 * The current pagedaemon thread is the last in the quorum to
1800449c2e92SKonstantin Belousov 	 * start OOM.  Initiate the selection and signaling of the
1801449c2e92SKonstantin Belousov 	 * victim.
1802449c2e92SKonstantin Belousov 	 */
1803449c2e92SKonstantin Belousov 	vm_pageout_oom(VM_OOM_MEM);
1804449c2e92SKonstantin Belousov 
1805449c2e92SKonstantin Belousov 	/*
1806449c2e92SKonstantin Belousov 	 * After one round of OOM terror, recall our vote.  On the
1807449c2e92SKonstantin Belousov 	 * next pass, current pagedaemon would vote again if the low
1808449c2e92SKonstantin Belousov 	 * memory condition is still there, due to vmd_oom being
1809449c2e92SKonstantin Belousov 	 * false.
1810449c2e92SKonstantin Belousov 	 */
1811449c2e92SKonstantin Belousov 	vmd->vmd_oom = FALSE;
1812449c2e92SKonstantin Belousov 	atomic_subtract_int(&vm_pageout_oom_vote, 1);
1813449c2e92SKonstantin Belousov }
18142025d69bSKonstantin Belousov 
18153949873fSKonstantin Belousov /*
18163949873fSKonstantin Belousov  * The OOM killer is the page daemon's action of last resort when
18173949873fSKonstantin Belousov  * memory allocation requests have been stalled for a prolonged period
18183949873fSKonstantin Belousov  * of time because it cannot reclaim memory.  This function computes
18193949873fSKonstantin Belousov  * the approximate number of physical pages that could be reclaimed if
18203949873fSKonstantin Belousov  * the specified address space is destroyed.
18213949873fSKonstantin Belousov  *
18223949873fSKonstantin Belousov  * Private, anonymous memory owned by the address space is the
18233949873fSKonstantin Belousov  * principal resource that we expect to recover after an OOM kill.
18243949873fSKonstantin Belousov  * Since the physical pages mapped by the address space's COW entries
18253949873fSKonstantin Belousov  * are typically shared pages, they are unlikely to be released and so
18263949873fSKonstantin Belousov  * they are not counted.
18273949873fSKonstantin Belousov  *
18283949873fSKonstantin Belousov  * To get to the point where the page daemon runs the OOM killer, its
18293949873fSKonstantin Belousov  * efforts to write-back vnode-backed pages may have stalled.  This
18303949873fSKonstantin Belousov  * could be caused by a memory allocation deadlock in the write path
18313949873fSKonstantin Belousov  * that might be resolved by an OOM kill.  Therefore, physical pages
18323949873fSKonstantin Belousov  * belonging to vnode-backed objects are counted, because they might
18333949873fSKonstantin Belousov  * be freed without being written out first if the address space holds
18343949873fSKonstantin Belousov  * the last reference to an unlinked vnode.
18353949873fSKonstantin Belousov  *
18363949873fSKonstantin Belousov  * Similarly, physical pages belonging to OBJT_PHYS objects are
18373949873fSKonstantin Belousov  * counted because the address space might hold the last reference to
18383949873fSKonstantin Belousov  * the object.
18393949873fSKonstantin Belousov  */
18403949873fSKonstantin Belousov static long
18413949873fSKonstantin Belousov vm_pageout_oom_pagecount(struct vmspace *vmspace)
18423949873fSKonstantin Belousov {
18433949873fSKonstantin Belousov 	vm_map_t map;
18443949873fSKonstantin Belousov 	vm_map_entry_t entry;
18453949873fSKonstantin Belousov 	vm_object_t obj;
18463949873fSKonstantin Belousov 	long res;
18473949873fSKonstantin Belousov 
18483949873fSKonstantin Belousov 	map = &vmspace->vm_map;
18493949873fSKonstantin Belousov 	KASSERT(!map->system_map, ("system map"));
18503949873fSKonstantin Belousov 	sx_assert(&map->lock, SA_LOCKED);
18513949873fSKonstantin Belousov 	res = 0;
18522288078cSDoug Moore 	VM_MAP_ENTRY_FOREACH(entry, map) {
18533949873fSKonstantin Belousov 		if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
18543949873fSKonstantin Belousov 			continue;
18553949873fSKonstantin Belousov 		obj = entry->object.vm_object;
18563949873fSKonstantin Belousov 		if (obj == NULL)
18573949873fSKonstantin Belousov 			continue;
18583949873fSKonstantin Belousov 		if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 &&
18593949873fSKonstantin Belousov 		    obj->ref_count != 1)
18603949873fSKonstantin Belousov 			continue;
18610cb2610eSMark Johnston 		if (obj->type == OBJT_PHYS || obj->type == OBJT_VNODE ||
1862e123264eSMark Johnston 		    (obj->flags & OBJ_SWAP) != 0)
18633949873fSKonstantin Belousov 			res += obj->resident_page_count;
18643949873fSKonstantin Belousov 	}
18653949873fSKonstantin Belousov 	return (res);
18663949873fSKonstantin Belousov }
18673949873fSKonstantin Belousov 
1868245139c6SKonstantin Belousov static int vm_oom_ratelim_last;
1869245139c6SKonstantin Belousov static int vm_oom_pf_secs = 10;
1870245139c6SKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, oom_pf_secs, CTLFLAG_RWTUN, &vm_oom_pf_secs, 0,
1871245139c6SKonstantin Belousov     "");
1872245139c6SKonstantin Belousov static struct mtx vm_oom_ratelim_mtx;
1873245139c6SKonstantin Belousov 
18742025d69bSKonstantin Belousov void
18752025d69bSKonstantin Belousov vm_pageout_oom(int shortage)
18762025d69bSKonstantin Belousov {
18774a864f62SMark Johnston 	const char *reason;
18782025d69bSKonstantin Belousov 	struct proc *p, *bigproc;
18792025d69bSKonstantin Belousov 	vm_offset_t size, bigsize;
18802025d69bSKonstantin Belousov 	struct thread *td;
18816bed074cSKonstantin Belousov 	struct vmspace *vm;
1882245139c6SKonstantin Belousov 	int now;
18833e78e983SAlan Cox 	bool breakout;
18842025d69bSKonstantin Belousov 
18852025d69bSKonstantin Belousov 	/*
1886245139c6SKonstantin Belousov 	 * For OOM requests originating from vm_fault(), there is a high
1887245139c6SKonstantin Belousov 	 * chance that a single large process faults simultaneously in
1888245139c6SKonstantin Belousov 	 * several threads.  Also, on an active system running many
1889245139c6SKonstantin Belousov 	 * processes of middle-size, like buildworld, all of them
1890245139c6SKonstantin Belousov 	 * could fault almost simultaneously as well.
1891245139c6SKonstantin Belousov 	 *
1892245139c6SKonstantin Belousov 	 * To avoid killing too many processes, rate-limit OOMs
1893245139c6SKonstantin Belousov 	 * initiated by vm_fault() time-outs on the waits for free
1894245139c6SKonstantin Belousov 	 * pages.
1895245139c6SKonstantin Belousov 	 */
1896245139c6SKonstantin Belousov 	mtx_lock(&vm_oom_ratelim_mtx);
1897245139c6SKonstantin Belousov 	now = ticks;
1898245139c6SKonstantin Belousov 	if (shortage == VM_OOM_MEM_PF &&
1899245139c6SKonstantin Belousov 	    (u_int)(now - vm_oom_ratelim_last) < hz * vm_oom_pf_secs) {
1900245139c6SKonstantin Belousov 		mtx_unlock(&vm_oom_ratelim_mtx);
1901245139c6SKonstantin Belousov 		return;
1902245139c6SKonstantin Belousov 	}
1903245139c6SKonstantin Belousov 	vm_oom_ratelim_last = now;
1904245139c6SKonstantin Belousov 	mtx_unlock(&vm_oom_ratelim_mtx);
1905245139c6SKonstantin Belousov 
1906245139c6SKonstantin Belousov 	/*
19071c58e4e5SJohn Baldwin 	 * We keep the process bigproc locked once we find it to keep anyone
19081c58e4e5SJohn Baldwin 	 * from messing with it; however, there is a possibility of
190928323addSBryan Drewery 	 * deadlock if process B is bigproc and one of its child processes
19101c58e4e5SJohn Baldwin 	 * attempts to propagate a signal to B while we are waiting for A's
19111c58e4e5SJohn Baldwin 	 * lock while walking this list.  To avoid this, we don't block on
19121c58e4e5SJohn Baldwin 	 * the process lock but just skip a process if it is already locked.
19135663e6deSDavid Greenman 	 */
19145663e6deSDavid Greenman 	bigproc = NULL;
19155663e6deSDavid Greenman 	bigsize = 0;
19161005a129SJohn Baldwin 	sx_slock(&allproc_lock);
1917e602ba25SJulian Elischer 	FOREACH_PROC_IN_SYSTEM(p) {
191871943c3dSKonstantin Belousov 		PROC_LOCK(p);
191971943c3dSKonstantin Belousov 
19201c58e4e5SJohn Baldwin 		/*
19213f1c4c4fSKonstantin Belousov 		 * If this is a system, protected or killed process, skip it.
19225663e6deSDavid Greenman 		 */
192371943c3dSKonstantin Belousov 		if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
192471943c3dSKonstantin Belousov 		    P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 ||
192571943c3dSKonstantin Belousov 		    p->p_pid == 1 || P_KILLED(p) ||
192671943c3dSKonstantin Belousov 		    (p->p_pid < 48 && swap_pager_avail != 0)) {
19278606d880SJohn Baldwin 			PROC_UNLOCK(p);
19285663e6deSDavid Greenman 			continue;
19295663e6deSDavid Greenman 		}
19305663e6deSDavid Greenman 		/*
1931dcbcd518SBruce Evans 		 * If the process is in a non-running type state,
1932e602ba25SJulian Elischer 		 * don't touch it.  Check all the threads individually.
19335663e6deSDavid Greenman 		 */
19343e78e983SAlan Cox 		breakout = false;
1935e602ba25SJulian Elischer 		FOREACH_THREAD_IN_PROC(p, td) {
1936982d11f8SJeff Roberson 			thread_lock(td);
193771fad9fdSJulian Elischer 			if (!TD_ON_RUNQ(td) &&
193871fad9fdSJulian Elischer 			    !TD_IS_RUNNING(td) &&
1939f497cda2SEdward Tomasz Napierala 			    !TD_IS_SLEEPING(td) &&
1940e24a6552SMark Johnston 			    !TD_IS_SUSPENDED(td)) {
1941982d11f8SJeff Roberson 				thread_unlock(td);
19423e78e983SAlan Cox 				breakout = true;
1943e602ba25SJulian Elischer 				break;
1944e602ba25SJulian Elischer 			}
1945982d11f8SJeff Roberson 			thread_unlock(td);
1946e602ba25SJulian Elischer 		}
1947e602ba25SJulian Elischer 		if (breakout) {
19481c58e4e5SJohn Baldwin 			PROC_UNLOCK(p);
19495663e6deSDavid Greenman 			continue;
19505663e6deSDavid Greenman 		}
19515663e6deSDavid Greenman 		/*
19525663e6deSDavid Greenman 		 * get the process size
19535663e6deSDavid Greenman 		 */
19546bed074cSKonstantin Belousov 		vm = vmspace_acquire_ref(p);
19556bed074cSKonstantin Belousov 		if (vm == NULL) {
19566bed074cSKonstantin Belousov 			PROC_UNLOCK(p);
19576bed074cSKonstantin Belousov 			continue;
19586bed074cSKonstantin Belousov 		}
19598370e9dfSMark Johnston 		_PHOLD(p);
196072d97679SDavid Schultz 		PROC_UNLOCK(p);
196195e2409aSKonstantin Belousov 		sx_sunlock(&allproc_lock);
196295e2409aSKonstantin Belousov 		if (!vm_map_trylock_read(&vm->vm_map)) {
196371943c3dSKonstantin Belousov 			vmspace_free(vm);
196495e2409aSKonstantin Belousov 			sx_slock(&allproc_lock);
196595e2409aSKonstantin Belousov 			PRELE(p);
196672d97679SDavid Schultz 			continue;
196772d97679SDavid Schultz 		}
19687981aa24SKonstantin Belousov 		size = vmspace_swap_count(vm);
1969245139c6SKonstantin Belousov 		if (shortage == VM_OOM_MEM || shortage == VM_OOM_MEM_PF)
19703949873fSKonstantin Belousov 			size += vm_pageout_oom_pagecount(vm);
19713949873fSKonstantin Belousov 		vm_map_unlock_read(&vm->vm_map);
19726bed074cSKonstantin Belousov 		vmspace_free(vm);
197395e2409aSKonstantin Belousov 		sx_slock(&allproc_lock);
19743949873fSKonstantin Belousov 
19755663e6deSDavid Greenman 		/*
19763949873fSKonstantin Belousov 		 * If this process is bigger than the biggest one,
19775663e6deSDavid Greenman 		 * remember it.
19785663e6deSDavid Greenman 		 */
19795663e6deSDavid Greenman 		if (size > bigsize) {
19801c58e4e5SJohn Baldwin 			if (bigproc != NULL)
198171943c3dSKonstantin Belousov 				PRELE(bigproc);
19825663e6deSDavid Greenman 			bigproc = p;
19835663e6deSDavid Greenman 			bigsize = size;
198471943c3dSKonstantin Belousov 		} else {
198571943c3dSKonstantin Belousov 			PRELE(p);
198671943c3dSKonstantin Belousov 		}
19875663e6deSDavid Greenman 	}
19881005a129SJohn Baldwin 	sx_sunlock(&allproc_lock);
19894a864f62SMark Johnston 
19905663e6deSDavid Greenman 	if (bigproc != NULL) {
19914a864f62SMark Johnston 		switch (shortage) {
19924a864f62SMark Johnston 		case VM_OOM_MEM:
19934a864f62SMark Johnston 			reason = "failed to reclaim memory";
19944a864f62SMark Johnston 			break;
19954a864f62SMark Johnston 		case VM_OOM_MEM_PF:
19964a864f62SMark Johnston 			reason = "a thread waited too long to allocate a page";
19974a864f62SMark Johnston 			break;
19984a864f62SMark Johnston 		case VM_OOM_SWAPZ:
19994a864f62SMark Johnston 			reason = "out of swap space";
20004a864f62SMark Johnston 			break;
20014a864f62SMark Johnston 		default:
20024a864f62SMark Johnston 			panic("unknown OOM reason %d", shortage);
20034a864f62SMark Johnston 		}
20043c200db9SJonathan T. Looney 		if (vm_panic_on_oom != 0 && --vm_panic_on_oom == 0)
20054a864f62SMark Johnston 			panic("%s", reason);
200671943c3dSKonstantin Belousov 		PROC_LOCK(bigproc);
20074a864f62SMark Johnston 		killproc(bigproc, reason);
2008fa885116SJulian Elischer 		sched_nice(bigproc, PRIO_MIN);
200971943c3dSKonstantin Belousov 		_PRELE(bigproc);
20101c58e4e5SJohn Baldwin 		PROC_UNLOCK(bigproc);
20115663e6deSDavid Greenman 	}
20125663e6deSDavid Greenman }
201326f9a767SRodney W. Grimes 
20148fc25508SMark Johnston /*
20158fc25508SMark Johnston  * Signal a free page shortage to subsystems that have registered an event
20168fc25508SMark Johnston  * handler.  Reclaim memory from UMA in the event of a severe shortage.
20178fc25508SMark Johnston  * Return true if the free page count should be re-evaluated.
20188fc25508SMark Johnston  */
2019b50a4ea6SMark Johnston static bool
2020b50a4ea6SMark Johnston vm_pageout_lowmem(void)
202149a3710cSMark Johnston {
2022b50a4ea6SMark Johnston 	static int lowmem_ticks = 0;
2023b50a4ea6SMark Johnston 	int last;
20248fc25508SMark Johnston 	bool ret;
20258fc25508SMark Johnston 
20268fc25508SMark Johnston 	ret = false;
202749a3710cSMark Johnston 
2028b50a4ea6SMark Johnston 	last = atomic_load_int(&lowmem_ticks);
2029b50a4ea6SMark Johnston 	while ((u_int)(ticks - last) / hz >= lowmem_period) {
2030b50a4ea6SMark Johnston 		if (atomic_fcmpset_int(&lowmem_ticks, &last, ticks) == 0)
2031b50a4ea6SMark Johnston 			continue;
2032b50a4ea6SMark Johnston 
203349a3710cSMark Johnston 		/*
203449a3710cSMark Johnston 		 * Decrease registered cache sizes.
203549a3710cSMark Johnston 		 */
203649a3710cSMark Johnston 		SDT_PROBE0(vm, , , vm__lowmem_scan);
203749a3710cSMark Johnston 		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES);
203849a3710cSMark Johnston 
203949a3710cSMark Johnston 		/*
204049a3710cSMark Johnston 		 * We do this explicitly after the caches have been
20418fc25508SMark Johnston 		 * drained above.
204249a3710cSMark Johnston 		 */
20438fc25508SMark Johnston 		uma_reclaim(UMA_RECLAIM_TRIM);
20448fc25508SMark Johnston 		ret = true;
2045ace409ceSAlexander Motin 		break;
204649a3710cSMark Johnston 	}
20478fc25508SMark Johnston 
20488fc25508SMark Johnston 	/*
20498fc25508SMark Johnston 	 * Kick off an asynchronous reclaim of cached memory if one of the
20508fc25508SMark Johnston 	 * page daemons is failing to keep up with demand.  Use the "severe"
20518fc25508SMark Johnston 	 * threshold instead of "min" to ensure that we do not blow away the
20528fc25508SMark Johnston 	 * caches if a subset of the NUMA domains are depleted by kernel memory
20538fc25508SMark Johnston 	 * allocations; the domainset iterators automatically skip domains
20548fc25508SMark Johnston 	 * below the "min" threshold on the first pass.
20558fc25508SMark Johnston 	 *
20568fc25508SMark Johnston 	 * UMA reclaim worker has its own rate-limiting mechanism, so don't
20578fc25508SMark Johnston 	 * worry about kicking it too often.
20588fc25508SMark Johnston 	 */
20598fc25508SMark Johnston 	if (vm_page_count_severe())
20608fc25508SMark Johnston 		uma_reclaim_wakeup();
20618fc25508SMark Johnston 
20628fc25508SMark Johnston 	return (ret);
206349a3710cSMark Johnston }
206449a3710cSMark Johnston 
206549a3710cSMark Johnston static void
2066449c2e92SKonstantin Belousov vm_pageout_worker(void *arg)
2067449c2e92SKonstantin Belousov {
2068e2068d0bSJeff Roberson 	struct vm_domain *vmd;
2069b50a4ea6SMark Johnston 	u_int ofree;
207049a3710cSMark Johnston 	int addl_shortage, domain, shortage;
2071e57dd910SAlan Cox 	bool target_met;
2072449c2e92SKonstantin Belousov 
2073e2068d0bSJeff Roberson 	domain = (uintptr_t)arg;
2074e2068d0bSJeff Roberson 	vmd = VM_DOMAIN(domain);
20755f8cd1c0SJeff Roberson 	shortage = 0;
2076e57dd910SAlan Cox 	target_met = true;
2077449c2e92SKonstantin Belousov 
2078449c2e92SKonstantin Belousov 	/*
2079949c9186SKonstantin Belousov 	 * XXXKIB It could be useful to bind pageout daemon threads to
2080949c9186SKonstantin Belousov 	 * the cores belonging to the domain, from which vm_page_array
2081949c9186SKonstantin Belousov 	 * is allocated.
2082449c2e92SKonstantin Belousov 	 */
2083449c2e92SKonstantin Belousov 
2084e2068d0bSJeff Roberson 	KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
2085e2068d0bSJeff Roberson 	vmd->vmd_last_active_scan = ticks;
2086449c2e92SKonstantin Belousov 
2087449c2e92SKonstantin Belousov 	/*
2088449c2e92SKonstantin Belousov 	 * The pageout daemon worker is never done, so loop forever.
2089449c2e92SKonstantin Belousov 	 */
2090449c2e92SKonstantin Belousov 	while (TRUE) {
209130fbfddaSJeff Roberson 		vm_domain_pageout_lock(vmd);
209249a3710cSMark Johnston 
209330fbfddaSJeff Roberson 		/*
209430fbfddaSJeff Roberson 		 * We need to clear wanted before we check the limits.  This
209530fbfddaSJeff Roberson 		 * prevents races with wakers who will check wanted after they
209630fbfddaSJeff Roberson 		 * reach the limit.
209730fbfddaSJeff Roberson 		 */
209830fbfddaSJeff Roberson 		atomic_store_int(&vmd->vmd_pageout_wanted, 0);
209956ce0690SAlan Cox 
210056ce0690SAlan Cox 		/*
21015f8cd1c0SJeff Roberson 		 * Might the page daemon need to run again?
2102449c2e92SKonstantin Belousov 		 */
21035f8cd1c0SJeff Roberson 		if (vm_paging_needed(vmd, vmd->vmd_free_count)) {
210456ce0690SAlan Cox 			/*
210549a3710cSMark Johnston 			 * Yes.  If the scan failed to produce enough free
210649a3710cSMark Johnston 			 * pages, sleep uninterruptibly for some time in the
210749a3710cSMark Johnston 			 * hope that the laundry thread will clean some pages.
210856ce0690SAlan Cox 			 */
210930fbfddaSJeff Roberson 			vm_domain_pageout_unlock(vmd);
211049a3710cSMark Johnston 			if (!target_met)
21116eebec83SMark Johnston 				pause("pwait", hz / VM_INACT_SCAN_RATE);
2112449c2e92SKonstantin Belousov 		} else {
2113449c2e92SKonstantin Belousov 			/*
21145f8cd1c0SJeff Roberson 			 * No, sleep until the next wakeup or until pages
21155f8cd1c0SJeff Roberson 			 * need to have their reference stats updated.
2116449c2e92SKonstantin Belousov 			 */
21172c0f13aaSKonstantin Belousov 			if (mtx_sleep(&vmd->vmd_pageout_wanted,
211830fbfddaSJeff Roberson 			    vm_domain_pageout_lockptr(vmd), PDROP | PVM,
21195f8cd1c0SJeff Roberson 			    "psleep", hz / VM_INACT_SCAN_RATE) == 0)
212083c9dea1SGleb Smirnoff 				VM_CNT_INC(v_pdwakeups);
212156ce0690SAlan Cox 		}
2122be37ee79SMark Johnston 
212330fbfddaSJeff Roberson 		/* Prevent spurious wakeups by ensuring that wanted is set. */
212430fbfddaSJeff Roberson 		atomic_store_int(&vmd->vmd_pageout_wanted, 1);
212530fbfddaSJeff Roberson 
212630fbfddaSJeff Roberson 		/*
212730fbfddaSJeff Roberson 		 * Use the controller to calculate how many pages to free in
2128b50a4ea6SMark Johnston 		 * this interval, and scan the inactive queue.  If the lowmem
2129b50a4ea6SMark Johnston 		 * handlers appear to have freed up some pages, subtract the
2130b50a4ea6SMark Johnston 		 * difference from the inactive queue scan target.
213130fbfddaSJeff Roberson 		 */
21325f8cd1c0SJeff Roberson 		shortage = pidctrl_daemon(&vmd->vmd_pid, vmd->vmd_free_count);
213349a3710cSMark Johnston 		if (shortage > 0) {
2134b50a4ea6SMark Johnston 			ofree = vmd->vmd_free_count;
2135b50a4ea6SMark Johnston 			if (vm_pageout_lowmem() && vmd->vmd_free_count > ofree)
2136b50a4ea6SMark Johnston 				shortage -= min(vmd->vmd_free_count - ofree,
2137b50a4ea6SMark Johnston 				    (u_int)shortage);
21380292c54bSConrad Meyer 			target_met = vm_pageout_inactive(vmd, shortage,
2139be37ee79SMark Johnston 			    &addl_shortage);
214049a3710cSMark Johnston 		} else
214149a3710cSMark Johnston 			addl_shortage = 0;
214256ce0690SAlan Cox 
2143be37ee79SMark Johnston 		/*
2144be37ee79SMark Johnston 		 * Scan the active queue.  A positive value for shortage
2145be37ee79SMark Johnston 		 * indicates that we must aggressively deactivate pages to avoid
2146be37ee79SMark Johnston 		 * a shortfall.
2147be37ee79SMark Johnston 		 */
21487bb4634eSMark Johnston 		shortage = vm_pageout_active_target(vmd) + addl_shortage;
2149be37ee79SMark Johnston 		vm_pageout_scan_active(vmd, shortage);
2150449c2e92SKonstantin Belousov 	}
2151449c2e92SKonstantin Belousov }
2152449c2e92SKonstantin Belousov 
2153df8bae1dSRodney W. Grimes /*
21540292c54bSConrad Meyer  * vm_pageout_helper runs additional pageout daemons in times of high paging
21550292c54bSConrad Meyer  * activity.
21560292c54bSConrad Meyer  */
21570292c54bSConrad Meyer static void
21580292c54bSConrad Meyer vm_pageout_helper(void *arg)
21590292c54bSConrad Meyer {
21600292c54bSConrad Meyer 	struct vm_domain *vmd;
21610292c54bSConrad Meyer 	int domain;
21620292c54bSConrad Meyer 
21630292c54bSConrad Meyer 	domain = (uintptr_t)arg;
21640292c54bSConrad Meyer 	vmd = VM_DOMAIN(domain);
21650292c54bSConrad Meyer 
21660292c54bSConrad Meyer 	vm_domain_pageout_lock(vmd);
21670292c54bSConrad Meyer 	for (;;) {
21680292c54bSConrad Meyer 		msleep(&vmd->vmd_inactive_shortage,
21690292c54bSConrad Meyer 		    vm_domain_pageout_lockptr(vmd), PVM, "psleep", 0);
21700292c54bSConrad Meyer 		blockcount_release(&vmd->vmd_inactive_starting, 1);
21710292c54bSConrad Meyer 
21720292c54bSConrad Meyer 		vm_domain_pageout_unlock(vmd);
21730292c54bSConrad Meyer 		vm_pageout_scan_inactive(vmd, vmd->vmd_inactive_shortage);
21740292c54bSConrad Meyer 		vm_domain_pageout_lock(vmd);
21750292c54bSConrad Meyer 
21760292c54bSConrad Meyer 		/*
21770292c54bSConrad Meyer 		 * Release the running count while the pageout lock is held to
21780292c54bSConrad Meyer 		 * prevent wakeup races.
21790292c54bSConrad Meyer 		 */
21800292c54bSConrad Meyer 		blockcount_release(&vmd->vmd_inactive_running, 1);
21810292c54bSConrad Meyer 	}
21820292c54bSConrad Meyer }
21830292c54bSConrad Meyer 
21840292c54bSConrad Meyer static int
218574f5530dSConrad Meyer get_pageout_threads_per_domain(const struct vm_domain *vmd)
21860292c54bSConrad Meyer {
218774f5530dSConrad Meyer 	unsigned total_pageout_threads, eligible_cpus, domain_cpus;
21880292c54bSConrad Meyer 
218974f5530dSConrad Meyer 	if (VM_DOMAIN_EMPTY(vmd->vmd_domain))
219074f5530dSConrad Meyer 		return (0);
21910292c54bSConrad Meyer 
21920292c54bSConrad Meyer 	/*
21930292c54bSConrad Meyer 	 * Semi-arbitrarily constrain pagedaemon threads to less than half the
219474f5530dSConrad Meyer 	 * total number of CPUs in the system as an upper limit.
21950292c54bSConrad Meyer 	 */
219674f5530dSConrad Meyer 	if (pageout_cpus_per_thread < 2)
219774f5530dSConrad Meyer 		pageout_cpus_per_thread = 2;
219874f5530dSConrad Meyer 	else if (pageout_cpus_per_thread > mp_ncpus)
219974f5530dSConrad Meyer 		pageout_cpus_per_thread = mp_ncpus;
22000292c54bSConrad Meyer 
220174f5530dSConrad Meyer 	total_pageout_threads = howmany(mp_ncpus, pageout_cpus_per_thread);
220274f5530dSConrad Meyer 	domain_cpus = CPU_COUNT(&cpuset_domain[vmd->vmd_domain]);
220374f5530dSConrad Meyer 
220474f5530dSConrad Meyer 	/* Pagedaemons are not run in empty domains. */
220574f5530dSConrad Meyer 	eligible_cpus = mp_ncpus;
220674f5530dSConrad Meyer 	for (unsigned i = 0; i < vm_ndomains; i++)
220774f5530dSConrad Meyer 		if (VM_DOMAIN_EMPTY(i))
220874f5530dSConrad Meyer 			eligible_cpus -= CPU_COUNT(&cpuset_domain[i]);
220974f5530dSConrad Meyer 
221074f5530dSConrad Meyer 	/*
221174f5530dSConrad Meyer 	 * Assign a portion of the total pageout threads to this domain
221274f5530dSConrad Meyer 	 * corresponding to the fraction of pagedaemon-eligible CPUs in the
221374f5530dSConrad Meyer 	 * domain.  In asymmetric NUMA systems, domains with more CPUs may be
221474f5530dSConrad Meyer 	 * allocated more threads than domains with fewer CPUs.
221574f5530dSConrad Meyer 	 */
221674f5530dSConrad Meyer 	return (howmany(total_pageout_threads * domain_cpus, eligible_cpus));
22170292c54bSConrad Meyer }
22180292c54bSConrad Meyer 
22190292c54bSConrad Meyer /*
22209c770a27SMark Johnston  * Initialize basic pageout daemon settings.  See the comment above the
22219c770a27SMark Johnston  * definition of vm_domain for some explanation of how these thresholds are
22229c770a27SMark Johnston  * used.
2223df8bae1dSRodney W. Grimes  */
22242b14f991SJulian Elischer static void
2225e2068d0bSJeff Roberson vm_pageout_init_domain(int domain)
2226df8bae1dSRodney W. Grimes {
2227e2068d0bSJeff Roberson 	struct vm_domain *vmd;
22285f8cd1c0SJeff Roberson 	struct sysctl_oid *oid;
2229e2068d0bSJeff Roberson 
2230e2068d0bSJeff Roberson 	vmd = VM_DOMAIN(domain);
2231e2068d0bSJeff Roberson 	vmd->vmd_interrupt_free_min = 2;
2232f6b04d2bSDavid Greenman 
223345ae1d91SAlan Cox 	/*
223445ae1d91SAlan Cox 	 * v_free_reserved needs to include enough for the largest
223545ae1d91SAlan Cox 	 * swap pager structures plus enough for any pv_entry structs
223645ae1d91SAlan Cox 	 * when paging.
223745ae1d91SAlan Cox 	 */
22380cab71bcSDoug Moore 	vmd->vmd_pageout_free_min = 2 * MAXBSIZE / PAGE_SIZE +
2239e2068d0bSJeff Roberson 	    vmd->vmd_interrupt_free_min;
2240e2068d0bSJeff Roberson 	vmd->vmd_free_reserved = vm_pageout_page_count +
22419c770a27SMark Johnston 	    vmd->vmd_pageout_free_min + vmd->vmd_page_count / 768;
22429c770a27SMark Johnston 	vmd->vmd_free_min = vmd->vmd_page_count / 200;
2243e2068d0bSJeff Roberson 	vmd->vmd_free_severe = vmd->vmd_free_min / 2;
2244e2068d0bSJeff Roberson 	vmd->vmd_free_target = 4 * vmd->vmd_free_min + vmd->vmd_free_reserved;
2245e2068d0bSJeff Roberson 	vmd->vmd_free_min += vmd->vmd_free_reserved;
2246e2068d0bSJeff Roberson 	vmd->vmd_free_severe += vmd->vmd_free_reserved;
2247e2068d0bSJeff Roberson 	vmd->vmd_inactive_target = (3 * vmd->vmd_free_target) / 2;
2248e2068d0bSJeff Roberson 	if (vmd->vmd_inactive_target > vmd->vmd_free_count / 3)
2249e2068d0bSJeff Roberson 		vmd->vmd_inactive_target = vmd->vmd_free_count / 3;
2250df8bae1dSRodney W. Grimes 
2251d9e23210SJeff Roberson 	/*
22525f8cd1c0SJeff Roberson 	 * Set the default wakeup threshold to be 10% below the paging
22535f8cd1c0SJeff Roberson 	 * target.  This keeps the steady state out of shortfall.
2254d9e23210SJeff Roberson 	 */
22555f8cd1c0SJeff Roberson 	vmd->vmd_pageout_wakeup_thresh = (vmd->vmd_free_target / 10) * 9;
2256e2068d0bSJeff Roberson 
2257e2068d0bSJeff Roberson 	/*
2258e2068d0bSJeff Roberson 	 * Target amount of memory to move out of the laundry queue during a
2259e2068d0bSJeff Roberson 	 * background laundering.  This is proportional to the amount of system
2260e2068d0bSJeff Roberson 	 * memory.
2261e2068d0bSJeff Roberson 	 */
2262e2068d0bSJeff Roberson 	vmd->vmd_background_launder_target = (vmd->vmd_free_target -
2263e2068d0bSJeff Roberson 	    vmd->vmd_free_min) / 10;
22645f8cd1c0SJeff Roberson 
22655f8cd1c0SJeff Roberson 	/* Initialize the pageout daemon pid controller. */
22665f8cd1c0SJeff Roberson 	pidctrl_init(&vmd->vmd_pid, hz / VM_INACT_SCAN_RATE,
22675f8cd1c0SJeff Roberson 	    vmd->vmd_free_target, PIDCTRL_BOUND,
22685f8cd1c0SJeff Roberson 	    PIDCTRL_KPD, PIDCTRL_KID, PIDCTRL_KDD);
22695f8cd1c0SJeff Roberson 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(vmd->vmd_oid), OID_AUTO,
22707029da5cSPawel Biernacki 	    "pidctrl", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
22715f8cd1c0SJeff Roberson 	pidctrl_init_sysctl(&vmd->vmd_pid, SYSCTL_CHILDREN(oid));
22720292c54bSConrad Meyer 
227374f5530dSConrad Meyer 	vmd->vmd_inactive_threads = get_pageout_threads_per_domain(vmd);
2274e2068d0bSJeff Roberson }
2275e2068d0bSJeff Roberson 
2276e2068d0bSJeff Roberson static void
2277e2068d0bSJeff Roberson vm_pageout_init(void)
2278e2068d0bSJeff Roberson {
227997458520SMark Johnston 	u_long freecount;
2280e2068d0bSJeff Roberson 	int i;
2281e2068d0bSJeff Roberson 
2282e2068d0bSJeff Roberson 	/*
2283e2068d0bSJeff Roberson 	 * Initialize some paging parameters.
2284e2068d0bSJeff Roberson 	 */
2285e2068d0bSJeff Roberson 	freecount = 0;
2286e2068d0bSJeff Roberson 	for (i = 0; i < vm_ndomains; i++) {
2287e2068d0bSJeff Roberson 		struct vm_domain *vmd;
2288e2068d0bSJeff Roberson 
2289e2068d0bSJeff Roberson 		vm_pageout_init_domain(i);
2290e2068d0bSJeff Roberson 		vmd = VM_DOMAIN(i);
2291e2068d0bSJeff Roberson 		vm_cnt.v_free_reserved += vmd->vmd_free_reserved;
2292e2068d0bSJeff Roberson 		vm_cnt.v_free_target += vmd->vmd_free_target;
2293e2068d0bSJeff Roberson 		vm_cnt.v_free_min += vmd->vmd_free_min;
2294e2068d0bSJeff Roberson 		vm_cnt.v_inactive_target += vmd->vmd_inactive_target;
2295e2068d0bSJeff Roberson 		vm_cnt.v_pageout_free_min += vmd->vmd_pageout_free_min;
2296e2068d0bSJeff Roberson 		vm_cnt.v_interrupt_free_min += vmd->vmd_interrupt_free_min;
2297e2068d0bSJeff Roberson 		vm_cnt.v_free_severe += vmd->vmd_free_severe;
2298e2068d0bSJeff Roberson 		freecount += vmd->vmd_free_count;
2299e2068d0bSJeff Roberson 	}
2300d9e23210SJeff Roberson 
2301d9e23210SJeff Roberson 	/*
2302d9e23210SJeff Roberson 	 * Set interval in seconds for active scan.  We want to visit each
2303c9612b2dSJeff Roberson 	 * page at least once every ten minutes.  This is to prevent worst
2304c9612b2dSJeff Roberson 	 * case paging behaviors with stale active LRU.
2305d9e23210SJeff Roberson 	 */
2306d9e23210SJeff Roberson 	if (vm_pageout_update_period == 0)
2307c9612b2dSJeff Roberson 		vm_pageout_update_period = 600;
2308d9e23210SJeff Roberson 
230997458520SMark Johnston 	/*
231097458520SMark Johnston 	 * Set the maximum number of user-wired virtual pages.  Historically the
231197458520SMark Johnston 	 * main source of such pages was mlock(2) and mlockall(2).  Hypervisors
231297458520SMark Johnston 	 * may also request user-wired memory.
231397458520SMark Johnston 	 */
231454a3a114SMark Johnston 	if (vm_page_max_user_wired == 0)
231597458520SMark Johnston 		vm_page_max_user_wired = 4 * freecount / 5;
23164d19f4adSSteven Hartland }
23174d19f4adSSteven Hartland 
23184d19f4adSSteven Hartland /*
23194d19f4adSSteven Hartland  *     vm_pageout is the high level pageout daemon.
23204d19f4adSSteven Hartland  */
23214d19f4adSSteven Hartland static void
23224d19f4adSSteven Hartland vm_pageout(void)
23234d19f4adSSteven Hartland {
2324920239efSMark Johnston 	struct proc *p;
2325920239efSMark Johnston 	struct thread *td;
23260292c54bSConrad Meyer 	int error, first, i, j, pageout_threads;
2327920239efSMark Johnston 
2328920239efSMark Johnston 	p = curproc;
2329920239efSMark Johnston 	td = curthread;
2330df8bae1dSRodney W. Grimes 
2331245139c6SKonstantin Belousov 	mtx_init(&vm_oom_ratelim_mtx, "vmoomr", NULL, MTX_DEF);
233224a1cce3SDavid Greenman 	swap_pager_swap_init();
2333920239efSMark Johnston 	for (first = -1, i = 0; i < vm_ndomains; i++) {
233430c5525bSAndrew Gallatin 		if (VM_DOMAIN_EMPTY(i)) {
233530c5525bSAndrew Gallatin 			if (bootverbose)
233630c5525bSAndrew Gallatin 				printf("domain %d empty; skipping pageout\n",
233730c5525bSAndrew Gallatin 				    i);
233830c5525bSAndrew Gallatin 			continue;
233930c5525bSAndrew Gallatin 		}
2340920239efSMark Johnston 		if (first == -1)
2341920239efSMark Johnston 			first = i;
2342920239efSMark Johnston 		else {
2343920239efSMark Johnston 			error = kthread_add(vm_pageout_worker,
2344920239efSMark Johnston 			    (void *)(uintptr_t)i, p, NULL, 0, 0, "dom%d", i);
2345920239efSMark Johnston 			if (error != 0)
2346920239efSMark Johnston 				panic("starting pageout for domain %d: %d\n",
2347449c2e92SKonstantin Belousov 				    i, error);
2348dc2efb27SJohn Dyson 		}
234974f5530dSConrad Meyer 		pageout_threads = VM_DOMAIN(i)->vmd_inactive_threads;
23500292c54bSConrad Meyer 		for (j = 0; j < pageout_threads - 1; j++) {
23510292c54bSConrad Meyer 			error = kthread_add(vm_pageout_helper,
23520292c54bSConrad Meyer 			    (void *)(uintptr_t)i, p, NULL, 0, 0,
23530292c54bSConrad Meyer 			    "dom%d helper%d", i, j);
23540292c54bSConrad Meyer 			if (error != 0)
23550292c54bSConrad Meyer 				panic("starting pageout helper %d for domain "
23560292c54bSConrad Meyer 				    "%d: %d\n", j, i, error);
23570292c54bSConrad Meyer 		}
2358e2068d0bSJeff Roberson 		error = kthread_add(vm_pageout_laundry_worker,
2359920239efSMark Johnston 		    (void *)(uintptr_t)i, p, NULL, 0, 0, "laundry: dom%d", i);
2360e2068d0bSJeff Roberson 		if (error != 0)
2361920239efSMark Johnston 			panic("starting laundry for domain %d: %d", i, error);
2362f919ebdeSDavid Greenman 	}
2363920239efSMark Johnston 	error = kthread_add(uma_reclaim_worker, NULL, p, NULL, 0, 0, "uma");
236444ec2b63SKonstantin Belousov 	if (error != 0)
236544ec2b63SKonstantin Belousov 		panic("starting uma_reclaim helper, error %d\n", error);
2366920239efSMark Johnston 
2367920239efSMark Johnston 	snprintf(td->td_name, sizeof(td->td_name), "dom%d", first);
2368920239efSMark Johnston 	vm_pageout_worker((void *)(uintptr_t)first);
2369df8bae1dSRodney W. Grimes }
237026f9a767SRodney W. Grimes 
23716b4b77adSAlan Cox /*
2372280d15cdSMark Johnston  * Perform an advisory wakeup of the page daemon.
23736b4b77adSAlan Cox  */
2374e0c5a895SJohn Dyson void
2375e2068d0bSJeff Roberson pagedaemon_wakeup(int domain)
2376e0c5a895SJohn Dyson {
2377e2068d0bSJeff Roberson 	struct vm_domain *vmd;
2378a1c0a785SAlan Cox 
2379e2068d0bSJeff Roberson 	vmd = VM_DOMAIN(domain);
238030fbfddaSJeff Roberson 	vm_domain_pageout_assert_unlocked(vmd);
238130fbfddaSJeff Roberson 	if (curproc == pageproc)
238230fbfddaSJeff Roberson 		return;
2383280d15cdSMark Johnston 
238430fbfddaSJeff Roberson 	if (atomic_fetchadd_int(&vmd->vmd_pageout_wanted, 1) == 0) {
238530fbfddaSJeff Roberson 		vm_domain_pageout_lock(vmd);
238630fbfddaSJeff Roberson 		atomic_store_int(&vmd->vmd_pageout_wanted, 1);
2387e2068d0bSJeff Roberson 		wakeup(&vmd->vmd_pageout_wanted);
238830fbfddaSJeff Roberson 		vm_domain_pageout_unlock(vmd);
2389e0c5a895SJohn Dyson 	}
2390e0c5a895SJohn Dyson }
2391