xref: /freebsd/sys/vm/vm_pageout.c (revision 4a864f624a7097f1d032a0350ac70fa6c371179e)
160727d8bSWarner Losh /*-
2796df753SPedro F. Giffuni  * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
3df57947fSPedro F. Giffuni  *
426f9a767SRodney W. Grimes  * Copyright (c) 1991 Regents of the University of California.
526f9a767SRodney W. Grimes  * All rights reserved.
626f9a767SRodney W. Grimes  * Copyright (c) 1994 John S. Dyson
726f9a767SRodney W. Grimes  * All rights reserved.
826f9a767SRodney W. Grimes  * Copyright (c) 1994 David Greenman
926f9a767SRodney W. Grimes  * All rights reserved.
108dbca793STor Egge  * Copyright (c) 2005 Yahoo! Technologies Norway AS
118dbca793STor Egge  * All rights reserved.
12df8bae1dSRodney W. Grimes  *
13df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
14df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
15df8bae1dSRodney W. Grimes  *
16df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
17df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
18df8bae1dSRodney W. Grimes  * are met:
19df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
20df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
21df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
22df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
23df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
24df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
255929bcfaSPhilippe Charnier  *    must display the following acknowledgement:
26df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
27df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
28df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
29df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
30df8bae1dSRodney W. Grimes  *    without specific prior written permission.
31df8bae1dSRodney W. Grimes  *
32df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
43df8bae1dSRodney W. Grimes  *
443c4dd356SDavid Greenman  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
45df8bae1dSRodney W. Grimes  *
46df8bae1dSRodney W. Grimes  *
47df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
48df8bae1dSRodney W. Grimes  * All rights reserved.
49df8bae1dSRodney W. Grimes  *
50df8bae1dSRodney W. Grimes  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
51df8bae1dSRodney W. Grimes  *
52df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
53df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
54df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
55df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
56df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
57df8bae1dSRodney W. Grimes  *
58df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
59df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
60df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
61df8bae1dSRodney W. Grimes  *
62df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
63df8bae1dSRodney W. Grimes  *
64df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
65df8bae1dSRodney W. Grimes  *  School of Computer Science
66df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
67df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
68df8bae1dSRodney W. Grimes  *
69df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
70df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
71df8bae1dSRodney W. Grimes  */
72df8bae1dSRodney W. Grimes 
73df8bae1dSRodney W. Grimes /*
74df8bae1dSRodney W. Grimes  *	The proverbial page-out daemon.
75df8bae1dSRodney W. Grimes  */
76df8bae1dSRodney W. Grimes 
77874651b1SDavid E. O'Brien #include <sys/cdefs.h>
78874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$");
79874651b1SDavid E. O'Brien 
80faa5f8d8SAndrzej Bialecki #include "opt_vm.h"
817672ca05SMark Johnston 
82df8bae1dSRodney W. Grimes #include <sys/param.h>
8326f9a767SRodney W. Grimes #include <sys/systm.h>
84b5e8ce9fSBruce Evans #include <sys/kernel.h>
850292c54bSConrad Meyer #include <sys/blockcount.h>
86855a310fSJeff Roberson #include <sys/eventhandler.h>
87fb919e4dSMark Murray #include <sys/lock.h>
88fb919e4dSMark Murray #include <sys/mutex.h>
8926f9a767SRodney W. Grimes #include <sys/proc.h>
909c8b8baaSPeter Wemm #include <sys/kthread.h>
910384fff8SJason Evans #include <sys/ktr.h>
9297824da3SAlan Cox #include <sys/mount.h>
93099e7e95SEdward Tomasz Napierala #include <sys/racct.h>
9426f9a767SRodney W. Grimes #include <sys/resourcevar.h>
95b43179fbSJeff Roberson #include <sys/sched.h>
9614a0d74eSSteven Hartland #include <sys/sdt.h>
97d2fc5315SPoul-Henning Kamp #include <sys/signalvar.h>
98449c2e92SKonstantin Belousov #include <sys/smp.h>
99a6bf3a9eSRyan Stone #include <sys/time.h>
100f6b04d2bSDavid Greenman #include <sys/vnode.h>
101efeaf95aSDavid Greenman #include <sys/vmmeter.h>
10289f6b863SAttilio Rao #include <sys/rwlock.h>
1031005a129SJohn Baldwin #include <sys/sx.h>
10438efa82bSJohn Dyson #include <sys/sysctl.h>
105df8bae1dSRodney W. Grimes 
106df8bae1dSRodney W. Grimes #include <vm/vm.h>
107efeaf95aSDavid Greenman #include <vm/vm_param.h>
108efeaf95aSDavid Greenman #include <vm/vm_object.h>
109df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
110efeaf95aSDavid Greenman #include <vm/vm_map.h>
111df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h>
11224a1cce3SDavid Greenman #include <vm/vm_pager.h>
113449c2e92SKonstantin Belousov #include <vm/vm_phys.h>
114e2068d0bSJeff Roberson #include <vm/vm_pagequeue.h>
11505f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h>
116efeaf95aSDavid Greenman #include <vm/vm_extern.h>
117670d17b5SJeff Roberson #include <vm/uma.h>
118df8bae1dSRodney W. Grimes 
1192b14f991SJulian Elischer /*
1202b14f991SJulian Elischer  * System initialization
1212b14f991SJulian Elischer  */
1222b14f991SJulian Elischer 
1232b14f991SJulian Elischer /* the kernel process "vm_pageout"*/
12411caded3SAlfred Perlstein static void vm_pageout(void);
1254d19f4adSSteven Hartland static void vm_pageout_init(void);
126ebcddc72SAlan Cox static int vm_pageout_clean(vm_page_t m, int *numpagedout);
12734d8b7eaSJeff Roberson static int vm_pageout_cluster(vm_page_t m);
12876386c7eSKonstantin Belousov static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
12976386c7eSKonstantin Belousov     int starting_page_shortage);
13045ae1d91SAlan Cox 
1314d19f4adSSteven Hartland SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init,
1324d19f4adSSteven Hartland     NULL);
1334d19f4adSSteven Hartland 
1342b14f991SJulian Elischer struct proc *pageproc;
1352b14f991SJulian Elischer 
1362b14f991SJulian Elischer static struct kproc_desc page_kp = {
1372b14f991SJulian Elischer 	"pagedaemon",
1382b14f991SJulian Elischer 	vm_pageout,
1392b14f991SJulian Elischer 	&pageproc
1402b14f991SJulian Elischer };
1414d19f4adSSteven Hartland SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start,
142237fdd78SRobert Watson     &page_kp);
1432b14f991SJulian Elischer 
14414a0d74eSSteven Hartland SDT_PROVIDER_DEFINE(vm);
14514a0d74eSSteven Hartland SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan);
14614a0d74eSSteven Hartland 
147ebcddc72SAlan Cox /* Pagedaemon activity rates, in subdivisions of one second. */
148ebcddc72SAlan Cox #define	VM_LAUNDER_RATE		10
1495f8cd1c0SJeff Roberson #define	VM_INACT_SCAN_RATE	10
1502b14f991SJulian Elischer 
151b1fd102eSMark Johnston static int swapdev_enabled;
152c4a25e07SMark Johnston int vm_pageout_page_count = 32;
15370111b90SJohn Dyson 
1548311a2b8SWill Andrews static int vm_panic_on_oom = 0;
1558311a2b8SWill Andrews SYSCTL_INT(_vm, OID_AUTO, panic_on_oom,
1568311a2b8SWill Andrews     CTLFLAG_RWTUN, &vm_panic_on_oom, 0,
157c4a25e07SMark Johnston     "Panic on the given number of out-of-memory errors instead of "
158c4a25e07SMark Johnston     "killing the largest process");
1598311a2b8SWill Andrews 
160c4a25e07SMark Johnston static int vm_pageout_update_period;
161d9e23210SJeff Roberson SYSCTL_INT(_vm, OID_AUTO, pageout_update_period,
162e0b2fc3aSMark Johnston     CTLFLAG_RWTUN, &vm_pageout_update_period, 0,
163d9e23210SJeff Roberson     "Maximum active LRU update period");
16453636869SAndrey Zonov 
16574f5530dSConrad Meyer static int pageout_cpus_per_thread = 16;
16674f5530dSConrad Meyer SYSCTL_INT(_vm, OID_AUTO, pageout_cpus_per_thread, CTLFLAG_RDTUN,
16774f5530dSConrad Meyer     &pageout_cpus_per_thread, 0,
16874f5530dSConrad Meyer     "Number of CPUs per pagedaemon worker thread");
1690292c54bSConrad Meyer 
170c4a25e07SMark Johnston static int lowmem_period = 10;
171e0b2fc3aSMark Johnston SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RWTUN, &lowmem_period, 0,
172c9612b2dSJeff Roberson     "Low memory callback period");
173c9612b2dSJeff Roberson 
174c4a25e07SMark Johnston static int disable_swap_pageouts;
175ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
176c4a25e07SMark Johnston     CTLFLAG_RWTUN, &disable_swap_pageouts, 0,
177c4a25e07SMark Johnston     "Disallow swapout of dirty pages");
17812ac6a1dSJohn Dyson 
17923b59018SMatthew Dillon static int pageout_lock_miss;
18023b59018SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
181c4a25e07SMark Johnston     CTLFLAG_RD, &pageout_lock_miss, 0,
182c4a25e07SMark Johnston     "vget() lock misses during pageout");
18323b59018SMatthew Dillon 
184c4a25e07SMark Johnston static int vm_pageout_oom_seq = 12;
18576386c7eSKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq,
186e0b2fc3aSMark Johnston     CTLFLAG_RWTUN, &vm_pageout_oom_seq, 0,
18776386c7eSKonstantin Belousov     "back-to-back calls to oom detector to start OOM");
18876386c7eSKonstantin Belousov 
189ebcddc72SAlan Cox static int act_scan_laundry_weight = 3;
190e0b2fc3aSMark Johnston SYSCTL_INT(_vm, OID_AUTO, act_scan_laundry_weight, CTLFLAG_RWTUN,
191ebcddc72SAlan Cox     &act_scan_laundry_weight, 0,
192ebcddc72SAlan Cox     "weight given to clean vs. dirty pages in active queue scans");
193ebcddc72SAlan Cox 
194ebcddc72SAlan Cox static u_int vm_background_launder_rate = 4096;
195e0b2fc3aSMark Johnston SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RWTUN,
196ebcddc72SAlan Cox     &vm_background_launder_rate, 0,
197ebcddc72SAlan Cox     "background laundering rate, in kilobytes per second");
198ebcddc72SAlan Cox 
199ebcddc72SAlan Cox static u_int vm_background_launder_max = 20 * 1024;
200e0b2fc3aSMark Johnston SYSCTL_UINT(_vm, OID_AUTO, background_launder_max, CTLFLAG_RWTUN,
201c4a25e07SMark Johnston     &vm_background_launder_max, 0,
202c4a25e07SMark Johnston     "background laundering cap, in kilobytes");
203df8bae1dSRodney W. Grimes 
20454a3a114SMark Johnston u_long vm_page_max_user_wired;
20554a3a114SMark Johnston SYSCTL_ULONG(_vm, OID_AUTO, max_user_wired, CTLFLAG_RW,
20654a3a114SMark Johnston     &vm_page_max_user_wired, 0,
20754a3a114SMark Johnston     "system-wide limit to user-wired page count");
208df8bae1dSRodney W. Grimes 
209ebcddc72SAlan Cox static u_int isqrt(u_int num);
210ebcddc72SAlan Cox static int vm_pageout_launder(struct vm_domain *vmd, int launder,
211ebcddc72SAlan Cox     bool in_shortfall);
212ebcddc72SAlan Cox static void vm_pageout_laundry_worker(void *arg);
213cd41fc12SDavid Greenman 
2145cd29d0fSMark Johnston struct scan_state {
2155cd29d0fSMark Johnston 	struct vm_batchqueue bq;
2168d220203SAlan Cox 	struct vm_pagequeue *pq;
2175cd29d0fSMark Johnston 	vm_page_t	marker;
2185cd29d0fSMark Johnston 	int		maxscan;
2195cd29d0fSMark Johnston 	int		scanned;
2205cd29d0fSMark Johnston };
2218dbca793STor Egge 
2225cd29d0fSMark Johnston static void
2235cd29d0fSMark Johnston vm_pageout_init_scan(struct scan_state *ss, struct vm_pagequeue *pq,
2245cd29d0fSMark Johnston     vm_page_t marker, vm_page_t after, int maxscan)
2255cd29d0fSMark Johnston {
2268dbca793STor Egge 
2275cd29d0fSMark Johnston 	vm_pagequeue_assert_locked(pq);
2285cff1f4dSMark Johnston 	KASSERT((marker->a.flags & PGA_ENQUEUED) == 0,
2295cd29d0fSMark Johnston 	    ("marker %p already enqueued", marker));
2305cd29d0fSMark Johnston 
2315cd29d0fSMark Johnston 	if (after == NULL)
2325cd29d0fSMark Johnston 		TAILQ_INSERT_HEAD(&pq->pq_pl, marker, plinks.q);
2335cd29d0fSMark Johnston 	else
2345cd29d0fSMark Johnston 		TAILQ_INSERT_AFTER(&pq->pq_pl, after, marker, plinks.q);
2355cd29d0fSMark Johnston 	vm_page_aflag_set(marker, PGA_ENQUEUED);
2365cd29d0fSMark Johnston 
2375cd29d0fSMark Johnston 	vm_batchqueue_init(&ss->bq);
2385cd29d0fSMark Johnston 	ss->pq = pq;
2395cd29d0fSMark Johnston 	ss->marker = marker;
2405cd29d0fSMark Johnston 	ss->maxscan = maxscan;
2415cd29d0fSMark Johnston 	ss->scanned = 0;
2428d220203SAlan Cox 	vm_pagequeue_unlock(pq);
2435cd29d0fSMark Johnston }
2448dbca793STor Egge 
2455cd29d0fSMark Johnston static void
2465cd29d0fSMark Johnston vm_pageout_end_scan(struct scan_state *ss)
2475cd29d0fSMark Johnston {
2485cd29d0fSMark Johnston 	struct vm_pagequeue *pq;
2495cd29d0fSMark Johnston 
2505cd29d0fSMark Johnston 	pq = ss->pq;
2515cd29d0fSMark Johnston 	vm_pagequeue_assert_locked(pq);
2525cff1f4dSMark Johnston 	KASSERT((ss->marker->a.flags & PGA_ENQUEUED) != 0,
2535cd29d0fSMark Johnston 	    ("marker %p not enqueued", ss->marker));
2545cd29d0fSMark Johnston 
2555cd29d0fSMark Johnston 	TAILQ_REMOVE(&pq->pq_pl, ss->marker, plinks.q);
2565cd29d0fSMark Johnston 	vm_page_aflag_clear(ss->marker, PGA_ENQUEUED);
257899fe184SMark Johnston 	pq->pq_pdpages += ss->scanned;
2588dbca793STor Egge }
2598dbca793STor Egge 
2608dbca793STor Egge /*
2615cd29d0fSMark Johnston  * Add a small number of queued pages to a batch queue for later processing
2625cd29d0fSMark Johnston  * without the corresponding queue lock held.  The caller must have enqueued a
2635cd29d0fSMark Johnston  * marker page at the desired start point for the scan.  Pages will be
2645cd29d0fSMark Johnston  * physically dequeued if the caller so requests.  Otherwise, the returned
2655cd29d0fSMark Johnston  * batch may contain marker pages, and it is up to the caller to handle them.
2665cd29d0fSMark Johnston  *
267efec381dSMark Johnston  * When processing the batch queue, vm_pageout_defer() must be used to
268efec381dSMark Johnston  * determine whether the page has been logically dequeued since the batch was
269efec381dSMark Johnston  * collected.
2705cd29d0fSMark Johnston  */
2715cd29d0fSMark Johnston static __always_inline void
2725cd29d0fSMark Johnston vm_pageout_collect_batch(struct scan_state *ss, const bool dequeue)
2735cd29d0fSMark Johnston {
2748d220203SAlan Cox 	struct vm_pagequeue *pq;
275d70f0ab3SMark Johnston 	vm_page_t m, marker, n;
2768c616246SKonstantin Belousov 
2775cd29d0fSMark Johnston 	marker = ss->marker;
2785cd29d0fSMark Johnston 	pq = ss->pq;
2798c616246SKonstantin Belousov 
2805cff1f4dSMark Johnston 	KASSERT((marker->a.flags & PGA_ENQUEUED) != 0,
2815cd29d0fSMark Johnston 	    ("marker %p not enqueued", ss->marker));
2828c616246SKonstantin Belousov 
2838d220203SAlan Cox 	vm_pagequeue_lock(pq);
2845cd29d0fSMark Johnston 	for (m = TAILQ_NEXT(marker, plinks.q); m != NULL &&
2855cd29d0fSMark Johnston 	    ss->scanned < ss->maxscan && ss->bq.bq_cnt < VM_BATCHQUEUE_SIZE;
286d70f0ab3SMark Johnston 	    m = n, ss->scanned++) {
287d70f0ab3SMark Johnston 		n = TAILQ_NEXT(m, plinks.q);
2885cd29d0fSMark Johnston 		if ((m->flags & PG_MARKER) == 0) {
2895cff1f4dSMark Johnston 			KASSERT((m->a.flags & PGA_ENQUEUED) != 0,
2905cd29d0fSMark Johnston 			    ("page %p not enqueued", m));
2915cd29d0fSMark Johnston 			KASSERT((m->flags & PG_FICTITIOUS) == 0,
2925cd29d0fSMark Johnston 			    ("Fictitious page %p cannot be in page queue", m));
2935cd29d0fSMark Johnston 			KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2945cd29d0fSMark Johnston 			    ("Unmanaged page %p cannot be in page queue", m));
2955cd29d0fSMark Johnston 		} else if (dequeue)
2965cd29d0fSMark Johnston 			continue;
2978c616246SKonstantin Belousov 
2985cd29d0fSMark Johnston 		(void)vm_batchqueue_insert(&ss->bq, m);
2995cd29d0fSMark Johnston 		if (dequeue) {
3005cd29d0fSMark Johnston 			TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
3015cd29d0fSMark Johnston 			vm_page_aflag_clear(m, PGA_ENQUEUED);
3025cd29d0fSMark Johnston 		}
3035cd29d0fSMark Johnston 	}
3045cd29d0fSMark Johnston 	TAILQ_REMOVE(&pq->pq_pl, marker, plinks.q);
3055cd29d0fSMark Johnston 	if (__predict_true(m != NULL))
3065cd29d0fSMark Johnston 		TAILQ_INSERT_BEFORE(m, marker, plinks.q);
3075cd29d0fSMark Johnston 	else
3085cd29d0fSMark Johnston 		TAILQ_INSERT_TAIL(&pq->pq_pl, marker, plinks.q);
3095cd29d0fSMark Johnston 	if (dequeue)
3105cd29d0fSMark Johnston 		vm_pagequeue_cnt_add(pq, -ss->bq.bq_cnt);
3115cd29d0fSMark Johnston 	vm_pagequeue_unlock(pq);
3125cd29d0fSMark Johnston }
3135cd29d0fSMark Johnston 
314fee2a2faSMark Johnston /*
315fee2a2faSMark Johnston  * Return the next page to be scanned, or NULL if the scan is complete.
316fee2a2faSMark Johnston  */
3175cd29d0fSMark Johnston static __always_inline vm_page_t
3185cd29d0fSMark Johnston vm_pageout_next(struct scan_state *ss, const bool dequeue)
3195cd29d0fSMark Johnston {
3205cd29d0fSMark Johnston 
3215cd29d0fSMark Johnston 	if (ss->bq.bq_cnt == 0)
3225cd29d0fSMark Johnston 		vm_pageout_collect_batch(ss, dequeue);
3235cd29d0fSMark Johnston 	return (vm_batchqueue_pop(&ss->bq));
3248c616246SKonstantin Belousov }
3258c616246SKonstantin Belousov 
3268c616246SKonstantin Belousov /*
327b7f30bffSMark Johnston  * Determine whether processing of a page should be deferred and ensure that any
328b7f30bffSMark Johnston  * outstanding queue operations are processed.
329b7f30bffSMark Johnston  */
330b7f30bffSMark Johnston static __always_inline bool
331b7f30bffSMark Johnston vm_pageout_defer(vm_page_t m, const uint8_t queue, const bool enqueued)
332b7f30bffSMark Johnston {
333b7f30bffSMark Johnston 	vm_page_astate_t as;
334b7f30bffSMark Johnston 
335b7f30bffSMark Johnston 	as = vm_page_astate_load(m);
336b7f30bffSMark Johnston 	if (__predict_false(as.queue != queue ||
337b7f30bffSMark Johnston 	    ((as.flags & PGA_ENQUEUED) != 0) != enqueued))
338b7f30bffSMark Johnston 		return (true);
339b7f30bffSMark Johnston 	if ((as.flags & PGA_QUEUE_OP_MASK) != 0) {
340b7f30bffSMark Johnston 		vm_page_pqbatch_submit(m, queue);
341b7f30bffSMark Johnston 		return (true);
342b7f30bffSMark Johnston 	}
343b7f30bffSMark Johnston 	return (false);
344b7f30bffSMark Johnston }
345b7f30bffSMark Johnston 
346b7f30bffSMark Johnston /*
347248fe642SAlan Cox  * Scan for pages at adjacent offsets within the given page's object that are
348248fe642SAlan Cox  * eligible for laundering, form a cluster of these pages and the given page,
349248fe642SAlan Cox  * and launder that cluster.
35026f9a767SRodney W. Grimes  */
3513af76890SPoul-Henning Kamp static int
35234d8b7eaSJeff Roberson vm_pageout_cluster(vm_page_t m)
35324a1cce3SDavid Greenman {
35454d92145SMatthew Dillon 	vm_object_t object;
355248fe642SAlan Cox 	vm_page_t mc[2 * vm_pageout_page_count], p, pb, ps;
356248fe642SAlan Cox 	vm_pindex_t pindex;
357248fe642SAlan Cox 	int ib, is, page_base, pageout_count;
35826f9a767SRodney W. Grimes 
35917f6a17bSAlan Cox 	object = m->object;
36089f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
361248fe642SAlan Cox 	pindex = m->pindex;
3620cddd8f0SMatthew Dillon 
36363e97555SJeff Roberson 	vm_page_assert_xbusied(m);
3640d94caffSDavid Greenman 
36591b4f427SAlan Cox 	mc[vm_pageout_page_count] = pb = ps = m;
36626f9a767SRodney W. Grimes 	pageout_count = 1;
367f35329acSJohn Dyson 	page_base = vm_pageout_page_count;
36890ecac61SMatthew Dillon 	ib = 1;
36990ecac61SMatthew Dillon 	is = 1;
37090ecac61SMatthew Dillon 
37124a1cce3SDavid Greenman 	/*
372248fe642SAlan Cox 	 * We can cluster only if the page is not clean, busy, or held, and
373ebcddc72SAlan Cox 	 * the page is in the laundry queue.
37490ecac61SMatthew Dillon 	 *
37590ecac61SMatthew Dillon 	 * During heavy mmap/modification loads the pageout
37690ecac61SMatthew Dillon 	 * daemon can really fragment the underlying file
377248fe642SAlan Cox 	 * due to flushing pages out of order and not trying to
378248fe642SAlan Cox 	 * align the clusters (which leaves sporadic out-of-order
37990ecac61SMatthew Dillon 	 * holes).  To solve this problem we do the reverse scan
38090ecac61SMatthew Dillon 	 * first and attempt to align our cluster, then do a
38190ecac61SMatthew Dillon 	 * forward scan if room remains.
38224a1cce3SDavid Greenman 	 */
38390ecac61SMatthew Dillon more:
384248fe642SAlan Cox 	while (ib != 0 && pageout_count < vm_pageout_page_count) {
38590ecac61SMatthew Dillon 		if (ib > pindex) {
38690ecac61SMatthew Dillon 			ib = 0;
38790ecac61SMatthew Dillon 			break;
388f6b04d2bSDavid Greenman 		}
38963e97555SJeff Roberson 		if ((p = vm_page_prev(pb)) == NULL ||
39063e97555SJeff Roberson 		    vm_page_tryxbusy(p) == 0) {
39190ecac61SMatthew Dillon 			ib = 0;
39290ecac61SMatthew Dillon 			break;
393f6b04d2bSDavid Greenman 		}
39463e97555SJeff Roberson 		if (vm_page_wired(p)) {
39563e97555SJeff Roberson 			ib = 0;
39663e97555SJeff Roberson 			vm_page_xunbusy(p);
39763e97555SJeff Roberson 			break;
39863e97555SJeff Roberson 		}
39924a1cce3SDavid Greenman 		vm_page_test_dirty(p);
4001b5c869dSMark Johnston 		if (p->dirty == 0) {
401eb5d3969SAlan Cox 			ib = 0;
40263e97555SJeff Roberson 			vm_page_xunbusy(p);
403eb5d3969SAlan Cox 			break;
404eb5d3969SAlan Cox 		}
405fee2a2faSMark Johnston 		if (!vm_page_in_laundry(p) || !vm_page_try_remove_write(p)) {
40663e97555SJeff Roberson 			vm_page_xunbusy(p);
40790ecac61SMatthew Dillon 			ib = 0;
40824a1cce3SDavid Greenman 			break;
409f6b04d2bSDavid Greenman 		}
41091b4f427SAlan Cox 		mc[--page_base] = pb = p;
41190ecac61SMatthew Dillon 		++pageout_count;
41290ecac61SMatthew Dillon 		++ib;
413248fe642SAlan Cox 
41424a1cce3SDavid Greenman 		/*
415248fe642SAlan Cox 		 * We are at an alignment boundary.  Stop here, and switch
416248fe642SAlan Cox 		 * directions.  Do not clear ib.
41724a1cce3SDavid Greenman 		 */
41890ecac61SMatthew Dillon 		if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
41990ecac61SMatthew Dillon 			break;
42024a1cce3SDavid Greenman 	}
42190ecac61SMatthew Dillon 	while (pageout_count < vm_pageout_page_count &&
42290ecac61SMatthew Dillon 	    pindex + is < object->size) {
42363e97555SJeff Roberson 		if ((p = vm_page_next(ps)) == NULL ||
42463e97555SJeff Roberson 		    vm_page_tryxbusy(p) == 0)
42590ecac61SMatthew Dillon 			break;
42663e97555SJeff Roberson 		if (vm_page_wired(p)) {
42763e97555SJeff Roberson 			vm_page_xunbusy(p);
42863e97555SJeff Roberson 			break;
42963e97555SJeff Roberson 		}
43024a1cce3SDavid Greenman 		vm_page_test_dirty(p);
43163e97555SJeff Roberson 		if (p->dirty == 0) {
43263e97555SJeff Roberson 			vm_page_xunbusy(p);
433eb5d3969SAlan Cox 			break;
43463e97555SJeff Roberson 		}
435e8bcf696SMark Johnston 		if (!vm_page_in_laundry(p) || !vm_page_try_remove_write(p)) {
43663e97555SJeff Roberson 			vm_page_xunbusy(p);
43724a1cce3SDavid Greenman 			break;
438e8bcf696SMark Johnston 		}
43991b4f427SAlan Cox 		mc[page_base + pageout_count] = ps = p;
44090ecac61SMatthew Dillon 		++pageout_count;
44190ecac61SMatthew Dillon 		++is;
44224a1cce3SDavid Greenman 	}
44390ecac61SMatthew Dillon 
44490ecac61SMatthew Dillon 	/*
44590ecac61SMatthew Dillon 	 * If we exhausted our forward scan, continue with the reverse scan
446248fe642SAlan Cox 	 * when possible, even past an alignment boundary.  This catches
447248fe642SAlan Cox 	 * boundary conditions.
44890ecac61SMatthew Dillon 	 */
449248fe642SAlan Cox 	if (ib != 0 && pageout_count < vm_pageout_page_count)
45090ecac61SMatthew Dillon 		goto more;
451f6b04d2bSDavid Greenman 
45299e6e193SMark Johnston 	return (vm_pageout_flush(&mc[page_base], pageout_count,
45399e6e193SMark Johnston 	    VM_PAGER_PUT_NOREUSE, 0, NULL, NULL));
454aef922f5SJohn Dyson }
455aef922f5SJohn Dyson 
4561c7c3c6aSMatthew Dillon /*
4571c7c3c6aSMatthew Dillon  * vm_pageout_flush() - launder the given pages
4581c7c3c6aSMatthew Dillon  *
4591c7c3c6aSMatthew Dillon  *	The given pages are laundered.  Note that we setup for the start of
4601c7c3c6aSMatthew Dillon  *	I/O ( i.e. busy the page ), mark it read-only, and bump the object
4611c7c3c6aSMatthew Dillon  *	reference count all in here rather then in the parent.  If we want
4621c7c3c6aSMatthew Dillon  *	the parent to do more sophisticated things we may have to change
4631c7c3c6aSMatthew Dillon  *	the ordering.
4641e8a675cSKonstantin Belousov  *
4651e8a675cSKonstantin Belousov  *	Returned runlen is the count of pages between mreq and first
4661e8a675cSKonstantin Belousov  *	page after mreq with status VM_PAGER_AGAIN.
467126d6082SKonstantin Belousov  *	*eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL
468126d6082SKonstantin Belousov  *	for any page in runlen set.
4691c7c3c6aSMatthew Dillon  */
470aef922f5SJohn Dyson int
471126d6082SKonstantin Belousov vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
472126d6082SKonstantin Belousov     boolean_t *eio)
473aef922f5SJohn Dyson {
4742e3b314dSAlan Cox 	vm_object_t object = mc[0]->object;
475aef922f5SJohn Dyson 	int pageout_status[count];
47695461b45SJohn Dyson 	int numpagedout = 0;
4771e8a675cSKonstantin Belousov 	int i, runlen;
478aef922f5SJohn Dyson 
47989f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
4807bec141bSKip Macy 
4811c7c3c6aSMatthew Dillon 	/*
48263e97555SJeff Roberson 	 * Initiate I/O.  Mark the pages shared busy and verify that they're
48363e97555SJeff Roberson 	 * valid and read-only.
4841c7c3c6aSMatthew Dillon 	 *
4851c7c3c6aSMatthew Dillon 	 * We do not have to fixup the clean/dirty bits here... we can
4861c7c3c6aSMatthew Dillon 	 * allow the pager to do it after the I/O completes.
48702fa91d3SMatthew Dillon 	 *
48802fa91d3SMatthew Dillon 	 * NOTE! mc[i]->dirty may be partial or fragmented due to an
48902fa91d3SMatthew Dillon 	 * edge case with file fragments.
4901c7c3c6aSMatthew Dillon 	 */
4918f9110f6SJohn Dyson 	for (i = 0; i < count; i++) {
4920012f373SJeff Roberson 		KASSERT(vm_page_all_valid(mc[i]),
4937a935082SAlan Cox 		    ("vm_pageout_flush: partially invalid page %p index %d/%d",
4947a935082SAlan Cox 			mc[i], i, count));
4955cff1f4dSMark Johnston 		KASSERT((mc[i]->a.flags & PGA_WRITEABLE) == 0,
496aed9aaaaSMark Johnston 		    ("vm_pageout_flush: writeable page %p", mc[i]));
49763e97555SJeff Roberson 		vm_page_busy_downgrade(mc[i]);
4982965a453SKip Macy 	}
499d474eaaaSDoug Rabson 	vm_object_pip_add(object, count);
500aef922f5SJohn Dyson 
501d076fbeaSAlan Cox 	vm_pager_put_pages(object, mc, count, flags, pageout_status);
50226f9a767SRodney W. Grimes 
5031e8a675cSKonstantin Belousov 	runlen = count - mreq;
504126d6082SKonstantin Belousov 	if (eio != NULL)
505126d6082SKonstantin Belousov 		*eio = FALSE;
506aef922f5SJohn Dyson 	for (i = 0; i < count; i++) {
507aef922f5SJohn Dyson 		vm_page_t mt = mc[i];
50824a1cce3SDavid Greenman 
5094cd45723SAlan Cox 		KASSERT(pageout_status[i] == VM_PAGER_PEND ||
5106031c68dSAlan Cox 		    !pmap_page_is_write_mapped(mt),
5119ea8d1a6SAlan Cox 		    ("vm_pageout_flush: page %p is not write protected", mt));
51226f9a767SRodney W. Grimes 		switch (pageout_status[i]) {
51326f9a767SRodney W. Grimes 		case VM_PAGER_OK:
5149f5632e6SMark Johnston 			/*
5159f5632e6SMark Johnston 			 * The page may have moved since laundering started, in
5169f5632e6SMark Johnston 			 * which case it should be left alone.
5179f5632e6SMark Johnston 			 */
518ebcddc72SAlan Cox 			if (vm_page_in_laundry(mt))
519ebcddc72SAlan Cox 				vm_page_deactivate_noreuse(mt);
520ebcddc72SAlan Cox 			/* FALLTHROUGH */
52126f9a767SRodney W. Grimes 		case VM_PAGER_PEND:
52295461b45SJohn Dyson 			numpagedout++;
52326f9a767SRodney W. Grimes 			break;
52426f9a767SRodney W. Grimes 		case VM_PAGER_BAD:
52526f9a767SRodney W. Grimes 			/*
526ebcddc72SAlan Cox 			 * The page is outside the object's range.  We pretend
527ebcddc72SAlan Cox 			 * that the page out worked and clean the page, so the
528ebcddc72SAlan Cox 			 * changes will be lost if the page is reclaimed by
529ebcddc72SAlan Cox 			 * the page daemon.
53026f9a767SRodney W. Grimes 			 */
53190ecac61SMatthew Dillon 			vm_page_undirty(mt);
532ebcddc72SAlan Cox 			if (vm_page_in_laundry(mt))
533ebcddc72SAlan Cox 				vm_page_deactivate_noreuse(mt);
53426f9a767SRodney W. Grimes 			break;
53526f9a767SRodney W. Grimes 		case VM_PAGER_ERROR:
53626f9a767SRodney W. Grimes 		case VM_PAGER_FAIL:
53726f9a767SRodney W. Grimes 			/*
538b1fd102eSMark Johnston 			 * If the page couldn't be paged out to swap because the
539b1fd102eSMark Johnston 			 * pager wasn't able to find space, place the page in
540b1fd102eSMark Johnston 			 * the PQ_UNSWAPPABLE holding queue.  This is an
541b1fd102eSMark Johnston 			 * optimization that prevents the page daemon from
542b1fd102eSMark Johnston 			 * wasting CPU cycles on pages that cannot be reclaimed
543fa7a635fSGordon Bergling 			 * because no swap device is configured.
544b1fd102eSMark Johnston 			 *
545b1fd102eSMark Johnston 			 * Otherwise, reactivate the page so that it doesn't
546b1fd102eSMark Johnston 			 * clog the laundry and inactive queues.  (We will try
547b1fd102eSMark Johnston 			 * paging it out again later.)
54826f9a767SRodney W. Grimes 			 */
5494b8365d7SKonstantin Belousov 			if ((object->flags & OBJ_SWAP) != 0 &&
550b1fd102eSMark Johnston 			    pageout_status[i] == VM_PAGER_FAIL) {
551b1fd102eSMark Johnston 				vm_page_unswappable(mt);
552b1fd102eSMark Johnston 				numpagedout++;
553b1fd102eSMark Johnston 			} else
55424a1cce3SDavid Greenman 				vm_page_activate(mt);
555126d6082SKonstantin Belousov 			if (eio != NULL && i >= mreq && i - mreq < runlen)
556126d6082SKonstantin Belousov 				*eio = TRUE;
55726f9a767SRodney W. Grimes 			break;
55826f9a767SRodney W. Grimes 		case VM_PAGER_AGAIN:
5591e8a675cSKonstantin Belousov 			if (i >= mreq && i - mreq < runlen)
5601e8a675cSKonstantin Belousov 				runlen = i - mreq;
56126f9a767SRodney W. Grimes 			break;
56226f9a767SRodney W. Grimes 		}
56326f9a767SRodney W. Grimes 
56426f9a767SRodney W. Grimes 		/*
5650d94caffSDavid Greenman 		 * If the operation is still going, leave the page busy to
5660d94caffSDavid Greenman 		 * block all other accesses. Also, leave the paging in
5670d94caffSDavid Greenman 		 * progress indicator set so that we don't attempt an object
5680d94caffSDavid Greenman 		 * collapse.
56926f9a767SRodney W. Grimes 		 */
57026f9a767SRodney W. Grimes 		if (pageout_status[i] != VM_PAGER_PEND) {
571f919ebdeSDavid Greenman 			vm_object_pip_wakeup(object);
572c7aebda8SAttilio Rao 			vm_page_sunbusy(mt);
5733c4a2440SAlan Cox 		}
5743c4a2440SAlan Cox 	}
5751e8a675cSKonstantin Belousov 	if (prunlen != NULL)
5761e8a675cSKonstantin Belousov 		*prunlen = runlen;
5773c4a2440SAlan Cox 	return (numpagedout);
57826f9a767SRodney W. Grimes }
57926f9a767SRodney W. Grimes 
580b1fd102eSMark Johnston static void
581b1fd102eSMark Johnston vm_pageout_swapon(void *arg __unused, struct swdevt *sp __unused)
582b1fd102eSMark Johnston {
583b1fd102eSMark Johnston 
584b1fd102eSMark Johnston 	atomic_store_rel_int(&swapdev_enabled, 1);
585b1fd102eSMark Johnston }
586b1fd102eSMark Johnston 
587b1fd102eSMark Johnston static void
588b1fd102eSMark Johnston vm_pageout_swapoff(void *arg __unused, struct swdevt *sp __unused)
589b1fd102eSMark Johnston {
590b1fd102eSMark Johnston 
591b1fd102eSMark Johnston 	if (swap_pager_nswapdev() == 1)
592b1fd102eSMark Johnston 		atomic_store_rel_int(&swapdev_enabled, 0);
593b1fd102eSMark Johnston }
594b1fd102eSMark Johnston 
5951c7c3c6aSMatthew Dillon /*
59634d8b7eaSJeff Roberson  * Attempt to acquire all of the necessary locks to launder a page and
59734d8b7eaSJeff Roberson  * then call through the clustering layer to PUTPAGES.  Wait a short
59834d8b7eaSJeff Roberson  * time for a vnode lock.
59934d8b7eaSJeff Roberson  *
60034d8b7eaSJeff Roberson  * Requires the page and object lock on entry, releases both before return.
60134d8b7eaSJeff Roberson  * Returns 0 on success and an errno otherwise.
60234d8b7eaSJeff Roberson  */
60334d8b7eaSJeff Roberson static int
604ebcddc72SAlan Cox vm_pageout_clean(vm_page_t m, int *numpagedout)
60534d8b7eaSJeff Roberson {
60634d8b7eaSJeff Roberson 	struct vnode *vp;
60734d8b7eaSJeff Roberson 	struct mount *mp;
60834d8b7eaSJeff Roberson 	vm_object_t object;
60934d8b7eaSJeff Roberson 	vm_pindex_t pindex;
6100ef5eee9SKonstantin Belousov 	int error;
61134d8b7eaSJeff Roberson 
61234d8b7eaSJeff Roberson 	object = m->object;
61334d8b7eaSJeff Roberson 	VM_OBJECT_ASSERT_WLOCKED(object);
61434d8b7eaSJeff Roberson 	error = 0;
61534d8b7eaSJeff Roberson 	vp = NULL;
61634d8b7eaSJeff Roberson 	mp = NULL;
61734d8b7eaSJeff Roberson 
61834d8b7eaSJeff Roberson 	/*
61934d8b7eaSJeff Roberson 	 * The object is already known NOT to be dead.   It
62034d8b7eaSJeff Roberson 	 * is possible for the vget() to block the whole
62134d8b7eaSJeff Roberson 	 * pageout daemon, but the new low-memory handling
62234d8b7eaSJeff Roberson 	 * code should prevent it.
62334d8b7eaSJeff Roberson 	 *
62434d8b7eaSJeff Roberson 	 * We can't wait forever for the vnode lock, we might
62534d8b7eaSJeff Roberson 	 * deadlock due to a vn_read() getting stuck in
62634d8b7eaSJeff Roberson 	 * vm_wait while holding this vnode.  We skip the
62734d8b7eaSJeff Roberson 	 * vnode if we can't get it in a reasonable amount
62834d8b7eaSJeff Roberson 	 * of time.
62934d8b7eaSJeff Roberson 	 */
63034d8b7eaSJeff Roberson 	if (object->type == OBJT_VNODE) {
63163e97555SJeff Roberson 		vm_page_xunbusy(m);
63234d8b7eaSJeff Roberson 		vp = object->handle;
63334d8b7eaSJeff Roberson 		if (vp->v_type == VREG &&
63434d8b7eaSJeff Roberson 		    vn_start_write(vp, &mp, V_NOWAIT) != 0) {
63534d8b7eaSJeff Roberson 			mp = NULL;
63634d8b7eaSJeff Roberson 			error = EDEADLK;
63734d8b7eaSJeff Roberson 			goto unlock_all;
63834d8b7eaSJeff Roberson 		}
63934d8b7eaSJeff Roberson 		KASSERT(mp != NULL,
64034d8b7eaSJeff Roberson 		    ("vp %p with NULL v_mount", vp));
64134d8b7eaSJeff Roberson 		vm_object_reference_locked(object);
64234d8b7eaSJeff Roberson 		pindex = m->pindex;
64334d8b7eaSJeff Roberson 		VM_OBJECT_WUNLOCK(object);
6440ef5eee9SKonstantin Belousov 		if (vget(vp, vn_lktype_write(NULL, vp) | LK_TIMELOCK) != 0) {
64534d8b7eaSJeff Roberson 			vp = NULL;
64634d8b7eaSJeff Roberson 			error = EDEADLK;
64734d8b7eaSJeff Roberson 			goto unlock_mp;
64834d8b7eaSJeff Roberson 		}
64934d8b7eaSJeff Roberson 		VM_OBJECT_WLOCK(object);
65057cd81a3SMark Johnston 
65157cd81a3SMark Johnston 		/*
65257cd81a3SMark Johnston 		 * Ensure that the object and vnode were not disassociated
65357cd81a3SMark Johnston 		 * while locks were dropped.
65457cd81a3SMark Johnston 		 */
65557cd81a3SMark Johnston 		if (vp->v_object != object) {
65657cd81a3SMark Johnston 			error = ENOENT;
65757cd81a3SMark Johnston 			goto unlock_all;
65857cd81a3SMark Johnston 		}
65957cd81a3SMark Johnston 
66034d8b7eaSJeff Roberson 		/*
6619f5632e6SMark Johnston 		 * While the object was unlocked, the page may have been:
66234d8b7eaSJeff Roberson 		 * (1) moved to a different queue,
66334d8b7eaSJeff Roberson 		 * (2) reallocated to a different object,
66434d8b7eaSJeff Roberson 		 * (3) reallocated to a different offset, or
66534d8b7eaSJeff Roberson 		 * (4) cleaned.
66634d8b7eaSJeff Roberson 		 */
667ebcddc72SAlan Cox 		if (!vm_page_in_laundry(m) || m->object != object ||
66834d8b7eaSJeff Roberson 		    m->pindex != pindex || m->dirty == 0) {
66934d8b7eaSJeff Roberson 			error = ENXIO;
67034d8b7eaSJeff Roberson 			goto unlock_all;
67134d8b7eaSJeff Roberson 		}
67234d8b7eaSJeff Roberson 
67334d8b7eaSJeff Roberson 		/*
6749f5632e6SMark Johnston 		 * The page may have been busied while the object lock was
6759f5632e6SMark Johnston 		 * released.
67634d8b7eaSJeff Roberson 		 */
67763e97555SJeff Roberson 		if (vm_page_tryxbusy(m) == 0) {
67834d8b7eaSJeff Roberson 			error = EBUSY;
67934d8b7eaSJeff Roberson 			goto unlock_all;
68034d8b7eaSJeff Roberson 		}
68134d8b7eaSJeff Roberson 	}
68234d8b7eaSJeff Roberson 
68334d8b7eaSJeff Roberson 	/*
684fee2a2faSMark Johnston 	 * Remove all writeable mappings, failing if the page is wired.
685fee2a2faSMark Johnston 	 */
686fee2a2faSMark Johnston 	if (!vm_page_try_remove_write(m)) {
68763e97555SJeff Roberson 		vm_page_xunbusy(m);
688fee2a2faSMark Johnston 		error = EBUSY;
689fee2a2faSMark Johnston 		goto unlock_all;
690fee2a2faSMark Johnston 	}
691fee2a2faSMark Johnston 
692fee2a2faSMark Johnston 	/*
69334d8b7eaSJeff Roberson 	 * If a page is dirty, then it is either being washed
69434d8b7eaSJeff Roberson 	 * (but not yet cleaned) or it is still in the
69534d8b7eaSJeff Roberson 	 * laundry.  If it is still in the laundry, then we
69634d8b7eaSJeff Roberson 	 * start the cleaning operation.
69734d8b7eaSJeff Roberson 	 */
698ebcddc72SAlan Cox 	if ((*numpagedout = vm_pageout_cluster(m)) == 0)
69934d8b7eaSJeff Roberson 		error = EIO;
70034d8b7eaSJeff Roberson 
70134d8b7eaSJeff Roberson unlock_all:
70234d8b7eaSJeff Roberson 	VM_OBJECT_WUNLOCK(object);
70334d8b7eaSJeff Roberson 
70434d8b7eaSJeff Roberson unlock_mp:
70534d8b7eaSJeff Roberson 	if (mp != NULL) {
70634d8b7eaSJeff Roberson 		if (vp != NULL)
70734d8b7eaSJeff Roberson 			vput(vp);
70834d8b7eaSJeff Roberson 		vm_object_deallocate(object);
70934d8b7eaSJeff Roberson 		vn_finished_write(mp);
71034d8b7eaSJeff Roberson 	}
71134d8b7eaSJeff Roberson 
71234d8b7eaSJeff Roberson 	return (error);
71334d8b7eaSJeff Roberson }
71434d8b7eaSJeff Roberson 
71534d8b7eaSJeff Roberson /*
716ebcddc72SAlan Cox  * Attempt to launder the specified number of pages.
717ebcddc72SAlan Cox  *
718ebcddc72SAlan Cox  * Returns the number of pages successfully laundered.
719ebcddc72SAlan Cox  */
720ebcddc72SAlan Cox static int
721ebcddc72SAlan Cox vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
722ebcddc72SAlan Cox {
7235cd29d0fSMark Johnston 	struct scan_state ss;
724ebcddc72SAlan Cox 	struct vm_pagequeue *pq;
725ebcddc72SAlan Cox 	vm_object_t object;
7265cd29d0fSMark Johnston 	vm_page_t m, marker;
727f3f38e25SMark Johnston 	vm_page_astate_t new, old;
728f3f38e25SMark Johnston 	int act_delta, error, numpagedout, queue, refs, starting_target;
729ebcddc72SAlan Cox 	int vnodes_skipped;
73060256604SMark Johnston 	bool pageout_ok;
731ebcddc72SAlan Cox 
7325cd29d0fSMark Johnston 	object = NULL;
733ebcddc72SAlan Cox 	starting_target = launder;
734ebcddc72SAlan Cox 	vnodes_skipped = 0;
735ebcddc72SAlan Cox 
736ebcddc72SAlan Cox 	/*
737b1fd102eSMark Johnston 	 * Scan the laundry queues for pages eligible to be laundered.  We stop
738ebcddc72SAlan Cox 	 * once the target number of dirty pages have been laundered, or once
739ebcddc72SAlan Cox 	 * we've reached the end of the queue.  A single iteration of this loop
740ebcddc72SAlan Cox 	 * may cause more than one page to be laundered because of clustering.
741ebcddc72SAlan Cox 	 *
742b1fd102eSMark Johnston 	 * As an optimization, we avoid laundering from PQ_UNSWAPPABLE when no
743b1fd102eSMark Johnston 	 * swap devices are configured.
744ebcddc72SAlan Cox 	 */
745b1fd102eSMark Johnston 	if (atomic_load_acq_int(&swapdev_enabled))
74664b38930SMark Johnston 		queue = PQ_UNSWAPPABLE;
747b1fd102eSMark Johnston 	else
74864b38930SMark Johnston 		queue = PQ_LAUNDRY;
749ebcddc72SAlan Cox 
750b1fd102eSMark Johnston scan:
75164b38930SMark Johnston 	marker = &vmd->vmd_markers[queue];
7525cd29d0fSMark Johnston 	pq = &vmd->vmd_pagequeues[queue];
753ebcddc72SAlan Cox 	vm_pagequeue_lock(pq);
7545cd29d0fSMark Johnston 	vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt);
7555cd29d0fSMark Johnston 	while (launder > 0 && (m = vm_pageout_next(&ss, false)) != NULL) {
7565cd29d0fSMark Johnston 		if (__predict_false((m->flags & PG_MARKER) != 0))
757ebcddc72SAlan Cox 			continue;
7585cd29d0fSMark Johnston 
7595cd29d0fSMark Johnston 		/*
760b7f30bffSMark Johnston 		 * Don't touch a page that was removed from the queue after the
761b7f30bffSMark Johnston 		 * page queue lock was released.  Otherwise, ensure that any
762b7f30bffSMark Johnston 		 * pending queue operations, such as dequeues for wired pages,
763b7f30bffSMark Johnston 		 * are handled.
7645cd29d0fSMark Johnston 		 */
765b7f30bffSMark Johnston 		if (vm_pageout_defer(m, queue, true))
766ebcddc72SAlan Cox 			continue;
767e8bcf696SMark Johnston 
7689f5632e6SMark Johnston 		/*
7699f5632e6SMark Johnston 		 * Lock the page's object.
7709f5632e6SMark Johnston 		 */
7719f5632e6SMark Johnston 		if (object == NULL || object != m->object) {
77260256604SMark Johnston 			if (object != NULL)
7735cd29d0fSMark Johnston 				VM_OBJECT_WUNLOCK(object);
77423ed568cSMateusz Guzik 			object = atomic_load_ptr(&m->object);
7759f5632e6SMark Johnston 			if (__predict_false(object == NULL))
7769f5632e6SMark Johnston 				/* The page is being freed by another thread. */
7779f5632e6SMark Johnston 				continue;
7789f5632e6SMark Johnston 
779e8bcf696SMark Johnston 			/* Depends on type-stability. */
78041fd4b94SMark Johnston 			VM_OBJECT_WLOCK(object);
7819f5632e6SMark Johnston 			if (__predict_false(m->object != object)) {
7829f5632e6SMark Johnston 				VM_OBJECT_WUNLOCK(object);
7839f5632e6SMark Johnston 				object = NULL;
78441fd4b94SMark Johnston 				continue;
7859f5632e6SMark Johnston 			}
7869f5632e6SMark Johnston 		}
7875cd29d0fSMark Johnston 
78863e97555SJeff Roberson 		if (vm_page_tryxbusy(m) == 0)
7895cd29d0fSMark Johnston 			continue;
790ebcddc72SAlan Cox 
791ebcddc72SAlan Cox 		/*
792b7f30bffSMark Johnston 		 * Check for wirings now that we hold the object lock and have
7939f5632e6SMark Johnston 		 * exclusively busied the page.  If the page is mapped, it may
7949f5632e6SMark Johnston 		 * still be wired by pmap lookups.  The call to
795fee2a2faSMark Johnston 		 * vm_page_try_remove_all() below atomically checks for such
796fee2a2faSMark Johnston 		 * wirings and removes mappings.  If the page is unmapped, the
7979f5632e6SMark Johnston 		 * wire count is guaranteed not to increase after this check.
798fee2a2faSMark Johnston 		 */
7999f5632e6SMark Johnston 		if (__predict_false(vm_page_wired(m)))
800f3f38e25SMark Johnston 			goto skip_page;
801fee2a2faSMark Johnston 
802fee2a2faSMark Johnston 		/*
803ebcddc72SAlan Cox 		 * Invalid pages can be easily freed.  They cannot be
804ebcddc72SAlan Cox 		 * mapped; vm_page_free() asserts this.
805ebcddc72SAlan Cox 		 */
8060012f373SJeff Roberson 		if (vm_page_none_valid(m))
807ebcddc72SAlan Cox 			goto free_page;
808ebcddc72SAlan Cox 
809f3f38e25SMark Johnston 		refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
810f3f38e25SMark Johnston 
811f3f38e25SMark Johnston 		for (old = vm_page_astate_load(m);;) {
812ebcddc72SAlan Cox 			/*
813f3f38e25SMark Johnston 			 * Check to see if the page has been removed from the
814f3f38e25SMark Johnston 			 * queue since the first such check.  Leave it alone if
815f3f38e25SMark Johnston 			 * so, discarding any references collected by
816f3f38e25SMark Johnston 			 * pmap_ts_referenced().
817ebcddc72SAlan Cox 			 */
818f3f38e25SMark Johnston 			if (__predict_false(_vm_page_queue(old) == PQ_NONE))
819f3f38e25SMark Johnston 				goto skip_page;
820f3f38e25SMark Johnston 
821f3f38e25SMark Johnston 			new = old;
822f3f38e25SMark Johnston 			act_delta = refs;
823f3f38e25SMark Johnston 			if ((old.flags & PGA_REFERENCED) != 0) {
824f3f38e25SMark Johnston 				new.flags &= ~PGA_REFERENCED;
825d7aeb429SAlan Cox 				act_delta++;
826ebcddc72SAlan Cox 			}
827f3f38e25SMark Johnston 			if (act_delta == 0) {
828f3f38e25SMark Johnston 				;
829f3f38e25SMark Johnston 			} else if (object->ref_count != 0) {
830ebcddc72SAlan Cox 				/*
831f3f38e25SMark Johnston 				 * Increase the activation count if the page was
832f3f38e25SMark Johnston 				 * referenced while in the laundry queue.  This
833f3f38e25SMark Johnston 				 * makes it less likely that the page will be
834f3f38e25SMark Johnston 				 * returned prematurely to the laundry queue.
835e8bcf696SMark Johnston 				 */
836f3f38e25SMark Johnston 				new.act_count += ACT_ADVANCE +
837f3f38e25SMark Johnston 				    act_delta;
838f3f38e25SMark Johnston 				if (new.act_count > ACT_MAX)
839f3f38e25SMark Johnston 					new.act_count = ACT_MAX;
840f3f38e25SMark Johnston 
841f7607c30SMark Johnston 				new.flags &= ~PGA_QUEUE_OP_MASK;
842f3f38e25SMark Johnston 				new.flags |= PGA_REQUEUE;
843f3f38e25SMark Johnston 				new.queue = PQ_ACTIVE;
844f3f38e25SMark Johnston 				if (!vm_page_pqstate_commit(m, &old, new))
845f3f38e25SMark Johnston 					continue;
846e8bcf696SMark Johnston 
847e8bcf696SMark Johnston 				/*
848e8bcf696SMark Johnston 				 * If this was a background laundering, count
849e8bcf696SMark Johnston 				 * activated pages towards our target.  The
850e8bcf696SMark Johnston 				 * purpose of background laundering is to ensure
851e8bcf696SMark Johnston 				 * that pages are eventually cycled through the
852e8bcf696SMark Johnston 				 * laundry queue, and an activation is a valid
853e8bcf696SMark Johnston 				 * way out.
854ebcddc72SAlan Cox 				 */
855ebcddc72SAlan Cox 				if (!in_shortfall)
856ebcddc72SAlan Cox 					launder--;
857f3f38e25SMark Johnston 				VM_CNT_INC(v_reactivated);
858f3f38e25SMark Johnston 				goto skip_page;
8595cd29d0fSMark Johnston 			} else if ((object->flags & OBJ_DEAD) == 0) {
860f3f38e25SMark Johnston 				new.flags |= PGA_REQUEUE;
861f3f38e25SMark Johnston 				if (!vm_page_pqstate_commit(m, &old, new))
862e8bcf696SMark Johnston 					continue;
863f3f38e25SMark Johnston 				goto skip_page;
8645cd29d0fSMark Johnston 			}
865f3f38e25SMark Johnston 			break;
866ebcddc72SAlan Cox 		}
867ebcddc72SAlan Cox 
868ebcddc72SAlan Cox 		/*
869ebcddc72SAlan Cox 		 * If the page appears to be clean at the machine-independent
870ebcddc72SAlan Cox 		 * layer, then remove all of its mappings from the pmap in
871ebcddc72SAlan Cox 		 * anticipation of freeing it.  If, however, any of the page's
872ebcddc72SAlan Cox 		 * mappings allow write access, then the page may still be
873ebcddc72SAlan Cox 		 * modified until the last of those mappings are removed.
874ebcddc72SAlan Cox 		 */
875ebcddc72SAlan Cox 		if (object->ref_count != 0) {
876ebcddc72SAlan Cox 			vm_page_test_dirty(m);
8779f5632e6SMark Johnston 			if (m->dirty == 0 && !vm_page_try_remove_all(m))
878f3f38e25SMark Johnston 				goto skip_page;
879fee2a2faSMark Johnston 		}
880ebcddc72SAlan Cox 
881ebcddc72SAlan Cox 		/*
882ebcddc72SAlan Cox 		 * Clean pages are freed, and dirty pages are paged out unless
883ebcddc72SAlan Cox 		 * they belong to a dead object.  Requeueing dirty pages from
884ebcddc72SAlan Cox 		 * dead objects is pointless, as they are being paged out and
885ebcddc72SAlan Cox 		 * freed by the thread that destroyed the object.
886ebcddc72SAlan Cox 		 */
887ebcddc72SAlan Cox 		if (m->dirty == 0) {
888ebcddc72SAlan Cox free_page:
8899f5632e6SMark Johnston 			/*
8909f5632e6SMark Johnston 			 * Now we are guaranteed that no other threads are
8919f5632e6SMark Johnston 			 * manipulating the page, check for a last-second
8929f5632e6SMark Johnston 			 * reference.
8939f5632e6SMark Johnston 			 */
8949f5632e6SMark Johnston 			if (vm_pageout_defer(m, queue, true))
8959f5632e6SMark Johnston 				goto skip_page;
896ebcddc72SAlan Cox 			vm_page_free(m);
89783c9dea1SGleb Smirnoff 			VM_CNT_INC(v_dfree);
898ebcddc72SAlan Cox 		} else if ((object->flags & OBJ_DEAD) == 0) {
8994b8365d7SKonstantin Belousov 			if ((object->flags & OBJ_SWAP) == 0 &&
900ebcddc72SAlan Cox 			    object->type != OBJT_DEFAULT)
901ebcddc72SAlan Cox 				pageout_ok = true;
902ebcddc72SAlan Cox 			else if (disable_swap_pageouts)
903ebcddc72SAlan Cox 				pageout_ok = false;
904ebcddc72SAlan Cox 			else
905ebcddc72SAlan Cox 				pageout_ok = true;
906ebcddc72SAlan Cox 			if (!pageout_ok) {
907f3f38e25SMark Johnston 				vm_page_launder(m);
908f3f38e25SMark Johnston 				goto skip_page;
909ebcddc72SAlan Cox 			}
910ebcddc72SAlan Cox 
911ebcddc72SAlan Cox 			/*
912ebcddc72SAlan Cox 			 * Form a cluster with adjacent, dirty pages from the
913ebcddc72SAlan Cox 			 * same object, and page out that entire cluster.
914ebcddc72SAlan Cox 			 *
915ebcddc72SAlan Cox 			 * The adjacent, dirty pages must also be in the
916ebcddc72SAlan Cox 			 * laundry.  However, their mappings are not checked
917ebcddc72SAlan Cox 			 * for new references.  Consequently, a recently
918ebcddc72SAlan Cox 			 * referenced page may be paged out.  However, that
919ebcddc72SAlan Cox 			 * page will not be prematurely reclaimed.  After page
920ebcddc72SAlan Cox 			 * out, the page will be placed in the inactive queue,
921ebcddc72SAlan Cox 			 * where any new references will be detected and the
922ebcddc72SAlan Cox 			 * page reactivated.
923ebcddc72SAlan Cox 			 */
924ebcddc72SAlan Cox 			error = vm_pageout_clean(m, &numpagedout);
925ebcddc72SAlan Cox 			if (error == 0) {
926ebcddc72SAlan Cox 				launder -= numpagedout;
9275cd29d0fSMark Johnston 				ss.scanned += numpagedout;
928ebcddc72SAlan Cox 			} else if (error == EDEADLK) {
929ebcddc72SAlan Cox 				pageout_lock_miss++;
930ebcddc72SAlan Cox 				vnodes_skipped++;
931ebcddc72SAlan Cox 			}
93260256604SMark Johnston 			object = NULL;
933f3f38e25SMark Johnston 		} else {
934f3f38e25SMark Johnston skip_page:
93563e97555SJeff Roberson 			vm_page_xunbusy(m);
936e8bcf696SMark Johnston 		}
937f3f38e25SMark Johnston 	}
93846e39081SMark Johnston 	if (object != NULL) {
939ebcddc72SAlan Cox 		VM_OBJECT_WUNLOCK(object);
94046e39081SMark Johnston 		object = NULL;
94146e39081SMark Johnston 	}
942ebcddc72SAlan Cox 	vm_pagequeue_lock(pq);
9435cd29d0fSMark Johnston 	vm_pageout_end_scan(&ss);
944ebcddc72SAlan Cox 	vm_pagequeue_unlock(pq);
945ebcddc72SAlan Cox 
94664b38930SMark Johnston 	if (launder > 0 && queue == PQ_UNSWAPPABLE) {
94764b38930SMark Johnston 		queue = PQ_LAUNDRY;
948b1fd102eSMark Johnston 		goto scan;
949b1fd102eSMark Johnston 	}
950b1fd102eSMark Johnston 
951ebcddc72SAlan Cox 	/*
952ebcddc72SAlan Cox 	 * Wakeup the sync daemon if we skipped a vnode in a writeable object
953ebcddc72SAlan Cox 	 * and we didn't launder enough pages.
954ebcddc72SAlan Cox 	 */
955ebcddc72SAlan Cox 	if (vnodes_skipped > 0 && launder > 0)
956ebcddc72SAlan Cox 		(void)speedup_syncer();
957ebcddc72SAlan Cox 
958ebcddc72SAlan Cox 	return (starting_target - launder);
959ebcddc72SAlan Cox }
960ebcddc72SAlan Cox 
961ebcddc72SAlan Cox /*
962ebcddc72SAlan Cox  * Compute the integer square root.
963ebcddc72SAlan Cox  */
964ebcddc72SAlan Cox static u_int
965ebcddc72SAlan Cox isqrt(u_int num)
966ebcddc72SAlan Cox {
967ebcddc72SAlan Cox 	u_int bit, root, tmp;
968ebcddc72SAlan Cox 
96964f8d257SDoug Moore 	bit = num != 0 ? (1u << ((fls(num) - 1) & ~1)) : 0;
970ebcddc72SAlan Cox 	root = 0;
971ebcddc72SAlan Cox 	while (bit != 0) {
972ebcddc72SAlan Cox 		tmp = root + bit;
973ebcddc72SAlan Cox 		root >>= 1;
974ebcddc72SAlan Cox 		if (num >= tmp) {
975ebcddc72SAlan Cox 			num -= tmp;
976ebcddc72SAlan Cox 			root += bit;
977ebcddc72SAlan Cox 		}
978ebcddc72SAlan Cox 		bit >>= 2;
979ebcddc72SAlan Cox 	}
980ebcddc72SAlan Cox 	return (root);
981ebcddc72SAlan Cox }
982ebcddc72SAlan Cox 
983ebcddc72SAlan Cox /*
984ebcddc72SAlan Cox  * Perform the work of the laundry thread: periodically wake up and determine
985ebcddc72SAlan Cox  * whether any pages need to be laundered.  If so, determine the number of pages
986ebcddc72SAlan Cox  * that need to be laundered, and launder them.
987ebcddc72SAlan Cox  */
988ebcddc72SAlan Cox static void
989ebcddc72SAlan Cox vm_pageout_laundry_worker(void *arg)
990ebcddc72SAlan Cox {
991e2068d0bSJeff Roberson 	struct vm_domain *vmd;
992ebcddc72SAlan Cox 	struct vm_pagequeue *pq;
99360684862SMark Johnston 	uint64_t nclean, ndirty, nfreed;
994e2068d0bSJeff Roberson 	int domain, last_target, launder, shortfall, shortfall_cycle, target;
995ebcddc72SAlan Cox 	bool in_shortfall;
996ebcddc72SAlan Cox 
997e2068d0bSJeff Roberson 	domain = (uintptr_t)arg;
998e2068d0bSJeff Roberson 	vmd = VM_DOMAIN(domain);
999e2068d0bSJeff Roberson 	pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
1000e2068d0bSJeff Roberson 	KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
1001ebcddc72SAlan Cox 
1002ebcddc72SAlan Cox 	shortfall = 0;
1003ebcddc72SAlan Cox 	in_shortfall = false;
1004ebcddc72SAlan Cox 	shortfall_cycle = 0;
10058002c3a4SMark Johnston 	last_target = target = 0;
100660684862SMark Johnston 	nfreed = 0;
1007ebcddc72SAlan Cox 
1008ebcddc72SAlan Cox 	/*
1009b1fd102eSMark Johnston 	 * Calls to these handlers are serialized by the swap syscall lock.
1010b1fd102eSMark Johnston 	 */
1011e2068d0bSJeff Roberson 	(void)EVENTHANDLER_REGISTER(swapon, vm_pageout_swapon, vmd,
1012b1fd102eSMark Johnston 	    EVENTHANDLER_PRI_ANY);
1013e2068d0bSJeff Roberson 	(void)EVENTHANDLER_REGISTER(swapoff, vm_pageout_swapoff, vmd,
1014b1fd102eSMark Johnston 	    EVENTHANDLER_PRI_ANY);
1015b1fd102eSMark Johnston 
1016b1fd102eSMark Johnston 	/*
1017ebcddc72SAlan Cox 	 * The pageout laundry worker is never done, so loop forever.
1018ebcddc72SAlan Cox 	 */
1019ebcddc72SAlan Cox 	for (;;) {
1020ebcddc72SAlan Cox 		KASSERT(target >= 0, ("negative target %d", target));
1021ebcddc72SAlan Cox 		KASSERT(shortfall_cycle >= 0,
1022ebcddc72SAlan Cox 		    ("negative cycle %d", shortfall_cycle));
1023ebcddc72SAlan Cox 		launder = 0;
1024ebcddc72SAlan Cox 
1025ebcddc72SAlan Cox 		/*
1026ebcddc72SAlan Cox 		 * First determine whether we need to launder pages to meet a
1027ebcddc72SAlan Cox 		 * shortage of free pages.
1028ebcddc72SAlan Cox 		 */
1029ebcddc72SAlan Cox 		if (shortfall > 0) {
1030ebcddc72SAlan Cox 			in_shortfall = true;
1031ebcddc72SAlan Cox 			shortfall_cycle = VM_LAUNDER_RATE / VM_INACT_SCAN_RATE;
1032ebcddc72SAlan Cox 			target = shortfall;
1033ebcddc72SAlan Cox 		} else if (!in_shortfall)
1034ebcddc72SAlan Cox 			goto trybackground;
1035e2068d0bSJeff Roberson 		else if (shortfall_cycle == 0 || vm_laundry_target(vmd) <= 0) {
1036ebcddc72SAlan Cox 			/*
1037ebcddc72SAlan Cox 			 * We recently entered shortfall and began laundering
1038ebcddc72SAlan Cox 			 * pages.  If we have completed that laundering run
1039ebcddc72SAlan Cox 			 * (and we are no longer in shortfall) or we have met
1040ebcddc72SAlan Cox 			 * our laundry target through other activity, then we
1041ebcddc72SAlan Cox 			 * can stop laundering pages.
1042ebcddc72SAlan Cox 			 */
1043ebcddc72SAlan Cox 			in_shortfall = false;
1044ebcddc72SAlan Cox 			target = 0;
1045ebcddc72SAlan Cox 			goto trybackground;
1046ebcddc72SAlan Cox 		}
1047ebcddc72SAlan Cox 		launder = target / shortfall_cycle--;
1048ebcddc72SAlan Cox 		goto dolaundry;
1049ebcddc72SAlan Cox 
1050ebcddc72SAlan Cox 		/*
1051ebcddc72SAlan Cox 		 * There's no immediate need to launder any pages; see if we
1052ebcddc72SAlan Cox 		 * meet the conditions to perform background laundering:
1053ebcddc72SAlan Cox 		 *
1054ebcddc72SAlan Cox 		 * 1. The ratio of dirty to clean inactive pages exceeds the
105560684862SMark Johnston 		 *    background laundering threshold, or
1056ebcddc72SAlan Cox 		 * 2. we haven't yet reached the target of the current
1057ebcddc72SAlan Cox 		 *    background laundering run.
1058ebcddc72SAlan Cox 		 *
1059ebcddc72SAlan Cox 		 * The background laundering threshold is not a constant.
1060ebcddc72SAlan Cox 		 * Instead, it is a slowly growing function of the number of
106160684862SMark Johnston 		 * clean pages freed by the page daemon since the last
106260684862SMark Johnston 		 * background laundering.  Thus, as the ratio of dirty to
106360684862SMark Johnston 		 * clean inactive pages grows, the amount of memory pressure
1064c098768eSMark Johnston 		 * required to trigger laundering decreases.  We ensure
1065c098768eSMark Johnston 		 * that the threshold is non-zero after an inactive queue
1066c098768eSMark Johnston 		 * scan, even if that scan failed to free a single clean page.
1067ebcddc72SAlan Cox 		 */
1068ebcddc72SAlan Cox trybackground:
1069e2068d0bSJeff Roberson 		nclean = vmd->vmd_free_count +
1070e2068d0bSJeff Roberson 		    vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt;
1071e2068d0bSJeff Roberson 		ndirty = vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt;
1072c098768eSMark Johnston 		if (target == 0 && ndirty * isqrt(howmany(nfreed + 1,
1073c098768eSMark Johnston 		    vmd->vmd_free_target - vmd->vmd_free_min)) >= nclean) {
1074e2068d0bSJeff Roberson 			target = vmd->vmd_background_launder_target;
1075ebcddc72SAlan Cox 		}
1076ebcddc72SAlan Cox 
1077ebcddc72SAlan Cox 		/*
1078ebcddc72SAlan Cox 		 * We have a non-zero background laundering target.  If we've
1079ebcddc72SAlan Cox 		 * laundered up to our maximum without observing a page daemon
1080cb35676eSMark Johnston 		 * request, just stop.  This is a safety belt that ensures we
1081ebcddc72SAlan Cox 		 * don't launder an excessive amount if memory pressure is low
1082ebcddc72SAlan Cox 		 * and the ratio of dirty to clean pages is large.  Otherwise,
1083ebcddc72SAlan Cox 		 * proceed at the background laundering rate.
1084ebcddc72SAlan Cox 		 */
1085ebcddc72SAlan Cox 		if (target > 0) {
108660684862SMark Johnston 			if (nfreed > 0) {
108760684862SMark Johnston 				nfreed = 0;
1088ebcddc72SAlan Cox 				last_target = target;
1089ebcddc72SAlan Cox 			} else if (last_target - target >=
1090ebcddc72SAlan Cox 			    vm_background_launder_max * PAGE_SIZE / 1024) {
1091ebcddc72SAlan Cox 				target = 0;
1092ebcddc72SAlan Cox 			}
1093ebcddc72SAlan Cox 			launder = vm_background_launder_rate * PAGE_SIZE / 1024;
1094ebcddc72SAlan Cox 			launder /= VM_LAUNDER_RATE;
1095ebcddc72SAlan Cox 			if (launder > target)
1096ebcddc72SAlan Cox 				launder = target;
1097ebcddc72SAlan Cox 		}
1098ebcddc72SAlan Cox 
1099ebcddc72SAlan Cox dolaundry:
1100ebcddc72SAlan Cox 		if (launder > 0) {
1101ebcddc72SAlan Cox 			/*
1102ebcddc72SAlan Cox 			 * Because of I/O clustering, the number of laundered
1103ebcddc72SAlan Cox 			 * pages could exceed "target" by the maximum size of
1104ebcddc72SAlan Cox 			 * a cluster minus one.
1105ebcddc72SAlan Cox 			 */
1106e2068d0bSJeff Roberson 			target -= min(vm_pageout_launder(vmd, launder,
1107ebcddc72SAlan Cox 			    in_shortfall), target);
1108ebcddc72SAlan Cox 			pause("laundp", hz / VM_LAUNDER_RATE);
1109ebcddc72SAlan Cox 		}
1110ebcddc72SAlan Cox 
1111ebcddc72SAlan Cox 		/*
1112ebcddc72SAlan Cox 		 * If we're not currently laundering pages and the page daemon
1113ebcddc72SAlan Cox 		 * hasn't posted a new request, sleep until the page daemon
1114ebcddc72SAlan Cox 		 * kicks us.
1115ebcddc72SAlan Cox 		 */
1116ebcddc72SAlan Cox 		vm_pagequeue_lock(pq);
1117e2068d0bSJeff Roberson 		if (target == 0 && vmd->vmd_laundry_request == VM_LAUNDRY_IDLE)
1118e2068d0bSJeff Roberson 			(void)mtx_sleep(&vmd->vmd_laundry_request,
1119ebcddc72SAlan Cox 			    vm_pagequeue_lockptr(pq), PVM, "launds", 0);
1120ebcddc72SAlan Cox 
1121ebcddc72SAlan Cox 		/*
1122ebcddc72SAlan Cox 		 * If the pagedaemon has indicated that it's in shortfall, start
1123ebcddc72SAlan Cox 		 * a shortfall laundering unless we're already in the middle of
1124ebcddc72SAlan Cox 		 * one.  This may preempt a background laundering.
1125ebcddc72SAlan Cox 		 */
1126e2068d0bSJeff Roberson 		if (vmd->vmd_laundry_request == VM_LAUNDRY_SHORTFALL &&
1127ebcddc72SAlan Cox 		    (!in_shortfall || shortfall_cycle == 0)) {
1128e2068d0bSJeff Roberson 			shortfall = vm_laundry_target(vmd) +
1129e2068d0bSJeff Roberson 			    vmd->vmd_pageout_deficit;
1130ebcddc72SAlan Cox 			target = 0;
1131ebcddc72SAlan Cox 		} else
1132ebcddc72SAlan Cox 			shortfall = 0;
1133ebcddc72SAlan Cox 
1134ebcddc72SAlan Cox 		if (target == 0)
1135e2068d0bSJeff Roberson 			vmd->vmd_laundry_request = VM_LAUNDRY_IDLE;
113660684862SMark Johnston 		nfreed += vmd->vmd_clean_pages_freed;
113760684862SMark Johnston 		vmd->vmd_clean_pages_freed = 0;
1138ebcddc72SAlan Cox 		vm_pagequeue_unlock(pq);
1139ebcddc72SAlan Cox 	}
1140ebcddc72SAlan Cox }
1141ebcddc72SAlan Cox 
1142be37ee79SMark Johnston /*
1143be37ee79SMark Johnston  * Compute the number of pages we want to try to move from the
1144be37ee79SMark Johnston  * active queue to either the inactive or laundry queue.
1145be37ee79SMark Johnston  *
11467bb4634eSMark Johnston  * When scanning active pages during a shortage, we make clean pages
11477bb4634eSMark Johnston  * count more heavily towards the page shortage than dirty pages.
11487bb4634eSMark Johnston  * This is because dirty pages must be laundered before they can be
11497bb4634eSMark Johnston  * reused and thus have less utility when attempting to quickly
11507bb4634eSMark Johnston  * alleviate a free page shortage.  However, this weighting also
11517bb4634eSMark Johnston  * causes the scan to deactivate dirty pages more aggressively,
11527bb4634eSMark Johnston  * improving the effectiveness of clustering.
1153be37ee79SMark Johnston  */
1154be37ee79SMark Johnston static int
11557bb4634eSMark Johnston vm_pageout_active_target(struct vm_domain *vmd)
1156be37ee79SMark Johnston {
1157be37ee79SMark Johnston 	int shortage;
1158be37ee79SMark Johnston 
1159be37ee79SMark Johnston 	shortage = vmd->vmd_inactive_target + vm_paging_target(vmd) -
1160be37ee79SMark Johnston 	    (vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt +
1161be37ee79SMark Johnston 	    vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt / act_scan_laundry_weight);
1162be37ee79SMark Johnston 	shortage *= act_scan_laundry_weight;
1163be37ee79SMark Johnston 	return (shortage);
1164be37ee79SMark Johnston }
1165be37ee79SMark Johnston 
1166be37ee79SMark Johnston /*
1167be37ee79SMark Johnston  * Scan the active queue.  If there is no shortage of inactive pages, scan a
1168be37ee79SMark Johnston  * small portion of the queue in order to maintain quasi-LRU.
1169be37ee79SMark Johnston  */
1170be37ee79SMark Johnston static void
1171be37ee79SMark Johnston vm_pageout_scan_active(struct vm_domain *vmd, int page_shortage)
1172be37ee79SMark Johnston {
1173be37ee79SMark Johnston 	struct scan_state ss;
1174fee2a2faSMark Johnston 	vm_object_t object;
1175be37ee79SMark Johnston 	vm_page_t m, marker;
1176be37ee79SMark Johnston 	struct vm_pagequeue *pq;
1177f3f38e25SMark Johnston 	vm_page_astate_t old, new;
1178be37ee79SMark Johnston 	long min_scan;
1179f3f38e25SMark Johnston 	int act_delta, max_scan, ps_delta, refs, scan_tick;
1180f3f38e25SMark Johnston 	uint8_t nqueue;
1181be37ee79SMark Johnston 
1182be37ee79SMark Johnston 	marker = &vmd->vmd_markers[PQ_ACTIVE];
1183be37ee79SMark Johnston 	pq = &vmd->vmd_pagequeues[PQ_ACTIVE];
1184be37ee79SMark Johnston 	vm_pagequeue_lock(pq);
1185be37ee79SMark Johnston 
1186be37ee79SMark Johnston 	/*
1187be37ee79SMark Johnston 	 * If we're just idle polling attempt to visit every
1188be37ee79SMark Johnston 	 * active page within 'update_period' seconds.
1189be37ee79SMark Johnston 	 */
1190be37ee79SMark Johnston 	scan_tick = ticks;
1191be37ee79SMark Johnston 	if (vm_pageout_update_period != 0) {
1192be37ee79SMark Johnston 		min_scan = pq->pq_cnt;
1193be37ee79SMark Johnston 		min_scan *= scan_tick - vmd->vmd_last_active_scan;
1194be37ee79SMark Johnston 		min_scan /= hz * vm_pageout_update_period;
1195be37ee79SMark Johnston 	} else
1196be37ee79SMark Johnston 		min_scan = 0;
1197be37ee79SMark Johnston 	if (min_scan > 0 || (page_shortage > 0 && pq->pq_cnt > 0))
1198be37ee79SMark Johnston 		vmd->vmd_last_active_scan = scan_tick;
1199be37ee79SMark Johnston 
1200be37ee79SMark Johnston 	/*
1201be37ee79SMark Johnston 	 * Scan the active queue for pages that can be deactivated.  Update
1202be37ee79SMark Johnston 	 * the per-page activity counter and use it to identify deactivation
1203be37ee79SMark Johnston 	 * candidates.  Held pages may be deactivated.
1204be37ee79SMark Johnston 	 *
1205be37ee79SMark Johnston 	 * To avoid requeuing each page that remains in the active queue, we
12067bb4634eSMark Johnston 	 * implement the CLOCK algorithm.  To keep the implementation of the
12077bb4634eSMark Johnston 	 * enqueue operation consistent for all page queues, we use two hands,
12087bb4634eSMark Johnston 	 * represented by marker pages. Scans begin at the first hand, which
12097bb4634eSMark Johnston 	 * precedes the second hand in the queue.  When the two hands meet,
12107bb4634eSMark Johnston 	 * they are moved back to the head and tail of the queue, respectively,
12117bb4634eSMark Johnston 	 * and scanning resumes.
1212be37ee79SMark Johnston 	 */
1213be37ee79SMark Johnston 	max_scan = page_shortage > 0 ? pq->pq_cnt : min_scan;
1214be37ee79SMark Johnston act_scan:
1215be37ee79SMark Johnston 	vm_pageout_init_scan(&ss, pq, marker, &vmd->vmd_clock[0], max_scan);
1216be37ee79SMark Johnston 	while ((m = vm_pageout_next(&ss, false)) != NULL) {
1217be37ee79SMark Johnston 		if (__predict_false(m == &vmd->vmd_clock[1])) {
1218be37ee79SMark Johnston 			vm_pagequeue_lock(pq);
1219be37ee79SMark Johnston 			TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q);
1220be37ee79SMark Johnston 			TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[1], plinks.q);
1221be37ee79SMark Johnston 			TAILQ_INSERT_HEAD(&pq->pq_pl, &vmd->vmd_clock[0],
1222be37ee79SMark Johnston 			    plinks.q);
1223be37ee79SMark Johnston 			TAILQ_INSERT_TAIL(&pq->pq_pl, &vmd->vmd_clock[1],
1224be37ee79SMark Johnston 			    plinks.q);
1225be37ee79SMark Johnston 			max_scan -= ss.scanned;
1226be37ee79SMark Johnston 			vm_pageout_end_scan(&ss);
1227be37ee79SMark Johnston 			goto act_scan;
1228be37ee79SMark Johnston 		}
1229be37ee79SMark Johnston 		if (__predict_false((m->flags & PG_MARKER) != 0))
1230be37ee79SMark Johnston 			continue;
1231be37ee79SMark Johnston 
1232e8bcf696SMark Johnston 		/*
1233b7f30bffSMark Johnston 		 * Don't touch a page that was removed from the queue after the
1234b7f30bffSMark Johnston 		 * page queue lock was released.  Otherwise, ensure that any
1235b7f30bffSMark Johnston 		 * pending queue operations, such as dequeues for wired pages,
1236b7f30bffSMark Johnston 		 * are handled.
1237e8bcf696SMark Johnston 		 */
1238b7f30bffSMark Johnston 		if (vm_pageout_defer(m, PQ_ACTIVE, true))
1239e8bcf696SMark Johnston 			continue;
1240e8bcf696SMark Johnston 
1241e8bcf696SMark Johnston 		/*
1242e8bcf696SMark Johnston 		 * A page's object pointer may be set to NULL before
1243e8bcf696SMark Johnston 		 * the object lock is acquired.
1244e8bcf696SMark Johnston 		 */
124523ed568cSMateusz Guzik 		object = atomic_load_ptr(&m->object);
1246fee2a2faSMark Johnston 		if (__predict_false(object == NULL))
1247fee2a2faSMark Johnston 			/*
1248fee2a2faSMark Johnston 			 * The page has been removed from its object.
1249fee2a2faSMark Johnston 			 */
1250fee2a2faSMark Johnston 			continue;
1251fee2a2faSMark Johnston 
1252f3f38e25SMark Johnston 		/* Deferred free of swap space. */
1253f3f38e25SMark Johnston 		if ((m->a.flags & PGA_SWAP_FREE) != 0 &&
1254f3f38e25SMark Johnston 		    VM_OBJECT_TRYWLOCK(object)) {
1255f3f38e25SMark Johnston 			if (m->object == object)
1256f3f38e25SMark Johnston 				vm_pager_page_unswapped(m);
1257f3f38e25SMark Johnston 			VM_OBJECT_WUNLOCK(object);
1258f3f38e25SMark Johnston 		}
1259f3f38e25SMark Johnston 
1260fee2a2faSMark Johnston 		/*
1261be37ee79SMark Johnston 		 * Check to see "how much" the page has been used.
1262d7aeb429SAlan Cox 		 *
1263d7aeb429SAlan Cox 		 * Test PGA_REFERENCED after calling pmap_ts_referenced() so
1264d7aeb429SAlan Cox 		 * that a reference from a concurrently destroyed mapping is
1265d7aeb429SAlan Cox 		 * observed here and now.
1266d7aeb429SAlan Cox 		 *
1267e8bcf696SMark Johnston 		 * Perform an unsynchronized object ref count check.  While
1268e8bcf696SMark Johnston 		 * the page lock ensures that the page is not reallocated to
1269e8bcf696SMark Johnston 		 * another object, in particular, one with unmanaged mappings
1270e8bcf696SMark Johnston 		 * that cannot support pmap_ts_referenced(), two races are,
1271be37ee79SMark Johnston 		 * nonetheless, possible:
1272be37ee79SMark Johnston 		 * 1) The count was transitioning to zero, but we saw a non-
1273e8bcf696SMark Johnston 		 *    zero value.  pmap_ts_referenced() will return zero
1274e8bcf696SMark Johnston 		 *    because the page is not mapped.
1275e8bcf696SMark Johnston 		 * 2) The count was transitioning to one, but we saw zero.
1276e8bcf696SMark Johnston 		 *    This race delays the detection of a new reference.  At
1277e8bcf696SMark Johnston 		 *    worst, we will deactivate and reactivate the page.
1278be37ee79SMark Johnston 		 */
1279f3f38e25SMark Johnston 		refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
1280be37ee79SMark Johnston 
1281f3f38e25SMark Johnston 		old = vm_page_astate_load(m);
1282f3f38e25SMark Johnston 		do {
1283f3f38e25SMark Johnston 			/*
1284f3f38e25SMark Johnston 			 * Check to see if the page has been removed from the
1285f3f38e25SMark Johnston 			 * queue since the first such check.  Leave it alone if
1286f3f38e25SMark Johnston 			 * so, discarding any references collected by
1287f3f38e25SMark Johnston 			 * pmap_ts_referenced().
1288f3f38e25SMark Johnston 			 */
1289609de97eSEric van Gyzen 			if (__predict_false(_vm_page_queue(old) == PQ_NONE)) {
1290609de97eSEric van Gyzen 				ps_delta = 0;
1291f3f38e25SMark Johnston 				break;
1292609de97eSEric van Gyzen 			}
1293a8081778SJeff Roberson 
1294be37ee79SMark Johnston 			/*
1295be37ee79SMark Johnston 			 * Advance or decay the act_count based on recent usage.
1296be37ee79SMark Johnston 			 */
1297f3f38e25SMark Johnston 			new = old;
1298f3f38e25SMark Johnston 			act_delta = refs;
1299f3f38e25SMark Johnston 			if ((old.flags & PGA_REFERENCED) != 0) {
1300f3f38e25SMark Johnston 				new.flags &= ~PGA_REFERENCED;
1301f3f38e25SMark Johnston 				act_delta++;
1302f3f38e25SMark Johnston 			}
1303be37ee79SMark Johnston 			if (act_delta != 0) {
1304f3f38e25SMark Johnston 				new.act_count += ACT_ADVANCE + act_delta;
1305f3f38e25SMark Johnston 				if (new.act_count > ACT_MAX)
1306f3f38e25SMark Johnston 					new.act_count = ACT_MAX;
1307f3f38e25SMark Johnston 			} else {
1308f3f38e25SMark Johnston 				new.act_count -= min(new.act_count,
1309f3f38e25SMark Johnston 				    ACT_DECLINE);
1310f3f38e25SMark Johnston 			}
1311be37ee79SMark Johnston 
1312f3f38e25SMark Johnston 			if (new.act_count > 0) {
1313be37ee79SMark Johnston 				/*
1314f3f38e25SMark Johnston 				 * Adjust the activation count and keep the page
1315f3f38e25SMark Johnston 				 * in the active queue.  The count might be left
1316f3f38e25SMark Johnston 				 * unchanged if it is saturated.  The page may
1317f3f38e25SMark Johnston 				 * have been moved to a different queue since we
1318f3f38e25SMark Johnston 				 * started the scan, in which case we move it
1319f3f38e25SMark Johnston 				 * back.
1320be37ee79SMark Johnston 				 */
1321f3f38e25SMark Johnston 				ps_delta = 0;
1322f3f38e25SMark Johnston 				if (old.queue != PQ_ACTIVE) {
1323f7607c30SMark Johnston 					new.flags &= ~PGA_QUEUE_OP_MASK;
1324f7607c30SMark Johnston 					new.flags |= PGA_REQUEUE;
1325f7607c30SMark Johnston 					new.queue = PQ_ACTIVE;
1326f3f38e25SMark Johnston 				}
13277cdeaf33SMark Johnston 			} else {
1328be37ee79SMark Johnston 				/*
1329f3f38e25SMark Johnston 				 * When not short for inactive pages, let dirty
1330f3f38e25SMark Johnston 				 * pages go through the inactive queue before
1331f3f38e25SMark Johnston 				 * moving to the laundry queue.  This gives them
1332f3f38e25SMark Johnston 				 * some extra time to be reactivated,
1333f3f38e25SMark Johnston 				 * potentially avoiding an expensive pageout.
1334f3f38e25SMark Johnston 				 * However, during a page shortage, the inactive
1335f3f38e25SMark Johnston 				 * queue is necessarily small, and so dirty
1336f3f38e25SMark Johnston 				 * pages would only spend a trivial amount of
1337f3f38e25SMark Johnston 				 * time in the inactive queue.  Therefore, we
1338f3f38e25SMark Johnston 				 * might as well place them directly in the
1339f3f38e25SMark Johnston 				 * laundry queue to reduce queuing overhead.
1340f3f38e25SMark Johnston 				 *
1341be37ee79SMark Johnston 				 * Calling vm_page_test_dirty() here would
1342be37ee79SMark Johnston 				 * require acquisition of the object's write
1343be37ee79SMark Johnston 				 * lock.  However, during a page shortage,
1344f3f38e25SMark Johnston 				 * directing dirty pages into the laundry queue
1345f3f38e25SMark Johnston 				 * is only an optimization and not a
1346be37ee79SMark Johnston 				 * requirement.  Therefore, we simply rely on
1347f3f38e25SMark Johnston 				 * the opportunistic updates to the page's dirty
1348f3f38e25SMark Johnston 				 * field by the pmap.
1349be37ee79SMark Johnston 				 */
1350f3f38e25SMark Johnston 				if (page_shortage <= 0) {
1351f3f38e25SMark Johnston 					nqueue = PQ_INACTIVE;
1352f3f38e25SMark Johnston 					ps_delta = 0;
1353f3f38e25SMark Johnston 				} else if (m->dirty == 0) {
1354f3f38e25SMark Johnston 					nqueue = PQ_INACTIVE;
1355f3f38e25SMark Johnston 					ps_delta = act_scan_laundry_weight;
1356be37ee79SMark Johnston 				} else {
1357f3f38e25SMark Johnston 					nqueue = PQ_LAUNDRY;
1358f3f38e25SMark Johnston 					ps_delta = 1;
1359be37ee79SMark Johnston 				}
1360f3f38e25SMark Johnston 
1361f7607c30SMark Johnston 				new.flags &= ~PGA_QUEUE_OP_MASK;
1362f3f38e25SMark Johnston 				new.flags |= PGA_REQUEUE;
1363f3f38e25SMark Johnston 				new.queue = nqueue;
1364be37ee79SMark Johnston 			}
1365f3f38e25SMark Johnston 		} while (!vm_page_pqstate_commit(m, &old, new));
1366f3f38e25SMark Johnston 
1367f3f38e25SMark Johnston 		page_shortage -= ps_delta;
1368be37ee79SMark Johnston 	}
1369be37ee79SMark Johnston 	vm_pagequeue_lock(pq);
1370be37ee79SMark Johnston 	TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q);
1371be37ee79SMark Johnston 	TAILQ_INSERT_AFTER(&pq->pq_pl, marker, &vmd->vmd_clock[0], plinks.q);
1372be37ee79SMark Johnston 	vm_pageout_end_scan(&ss);
1373be37ee79SMark Johnston 	vm_pagequeue_unlock(pq);
1374be37ee79SMark Johnston }
1375be37ee79SMark Johnston 
13765cd29d0fSMark Johnston static int
1377f3f38e25SMark Johnston vm_pageout_reinsert_inactive_page(struct vm_pagequeue *pq, vm_page_t marker,
1378f3f38e25SMark Johnston     vm_page_t m)
13795cd29d0fSMark Johnston {
1380f3f38e25SMark Johnston 	vm_page_astate_t as;
13815cd29d0fSMark Johnston 
1382f3f38e25SMark Johnston 	vm_pagequeue_assert_locked(pq);
1383f3f38e25SMark Johnston 
1384f3f38e25SMark Johnston 	as = vm_page_astate_load(m);
1385f3f38e25SMark Johnston 	if (as.queue != PQ_INACTIVE || (as.flags & PGA_ENQUEUED) != 0)
1386e8bcf696SMark Johnston 		return (0);
1387e8bcf696SMark Johnston 	vm_page_aflag_set(m, PGA_ENQUEUED);
1388f3f38e25SMark Johnston 	TAILQ_INSERT_BEFORE(marker, m, plinks.q);
13895cd29d0fSMark Johnston 	return (1);
13905cd29d0fSMark Johnston }
13915cd29d0fSMark Johnston 
13925cd29d0fSMark Johnston /*
13935cd29d0fSMark Johnston  * Re-add stuck pages to the inactive queue.  We will examine them again
13945cd29d0fSMark Johnston  * during the next scan.  If the queue state of a page has changed since
13955cd29d0fSMark Johnston  * it was physically removed from the page queue in
13965cd29d0fSMark Johnston  * vm_pageout_collect_batch(), don't do anything with that page.
13975cd29d0fSMark Johnston  */
13985cd29d0fSMark Johnston static void
13995cd29d0fSMark Johnston vm_pageout_reinsert_inactive(struct scan_state *ss, struct vm_batchqueue *bq,
14005cd29d0fSMark Johnston     vm_page_t m)
14015cd29d0fSMark Johnston {
14025cd29d0fSMark Johnston 	struct vm_pagequeue *pq;
1403f3f38e25SMark Johnston 	vm_page_t marker;
14045cd29d0fSMark Johnston 	int delta;
14055cd29d0fSMark Johnston 
14065cd29d0fSMark Johnston 	delta = 0;
1407f3f38e25SMark Johnston 	marker = ss->marker;
14085cd29d0fSMark Johnston 	pq = ss->pq;
14095cd29d0fSMark Johnston 
14105cd29d0fSMark Johnston 	if (m != NULL) {
14115cd29d0fSMark Johnston 		if (vm_batchqueue_insert(bq, m))
14125cd29d0fSMark Johnston 			return;
14135cd29d0fSMark Johnston 		vm_pagequeue_lock(pq);
1414f3f38e25SMark Johnston 		delta += vm_pageout_reinsert_inactive_page(pq, marker, m);
14155cd29d0fSMark Johnston 	} else
14165cd29d0fSMark Johnston 		vm_pagequeue_lock(pq);
14175cd29d0fSMark Johnston 	while ((m = vm_batchqueue_pop(bq)) != NULL)
1418f3f38e25SMark Johnston 		delta += vm_pageout_reinsert_inactive_page(pq, marker, m);
14195cd29d0fSMark Johnston 	vm_pagequeue_cnt_add(pq, delta);
14205cd29d0fSMark Johnston 	vm_pagequeue_unlock(pq);
14215cd29d0fSMark Johnston 	vm_batchqueue_init(bq);
14225cd29d0fSMark Johnston }
14235cd29d0fSMark Johnston 
14240292c54bSConrad Meyer static void
14250292c54bSConrad Meyer vm_pageout_scan_inactive(struct vm_domain *vmd, int page_shortage)
1426df8bae1dSRodney W. Grimes {
14270292c54bSConrad Meyer 	struct timeval start, end;
14285cd29d0fSMark Johnston 	struct scan_state ss;
14295cd29d0fSMark Johnston 	struct vm_batchqueue rq;
14300292c54bSConrad Meyer 	struct vm_page marker_page;
14315cd29d0fSMark Johnston 	vm_page_t m, marker;
14328d220203SAlan Cox 	struct vm_pagequeue *pq;
1433df8bae1dSRodney W. Grimes 	vm_object_t object;
1434f3f38e25SMark Johnston 	vm_page_astate_t old, new;
14350292c54bSConrad Meyer 	int act_delta, addl_page_shortage, starting_page_shortage, refs;
14360292c54bSConrad Meyer 
14370292c54bSConrad Meyer 	object = NULL;
14380292c54bSConrad Meyer 	vm_batchqueue_init(&rq);
14390292c54bSConrad Meyer 	getmicrouptime(&start);
14400d94caffSDavid Greenman 
1441df8bae1dSRodney W. Grimes 	/*
144201f04471SMark Johnston 	 * The addl_page_shortage is an estimate of the number of temporarily
1443311e34e2SKonstantin Belousov 	 * stuck pages in the inactive queue.  In other words, the
1444449c2e92SKonstantin Belousov 	 * number of pages from the inactive count that should be
1445311e34e2SKonstantin Belousov 	 * discounted in setting the target for the active queue scan.
1446311e34e2SKonstantin Belousov 	 */
14479099545aSAlan Cox 	addl_page_shortage = 0;
14489099545aSAlan Cox 
14491c7c3c6aSMatthew Dillon 	/*
1450f095d1bbSAlan Cox 	 * Start scanning the inactive queue for pages that we can free.  The
1451f095d1bbSAlan Cox 	 * scan will stop when we reach the target or we have scanned the
14525cff1f4dSMark Johnston 	 * entire queue.  (Note that m->a.act_count is not used to make
1453f095d1bbSAlan Cox 	 * decisions for the inactive queue, only for the active queue.)
14548d220203SAlan Cox 	 */
14550292c54bSConrad Meyer 	starting_page_shortage = page_shortage;
14560292c54bSConrad Meyer 	marker = &marker_page;
14570292c54bSConrad Meyer 	vm_page_init_marker(marker, PQ_INACTIVE, 0);
14585cd29d0fSMark Johnston 	pq = &vmd->vmd_pagequeues[PQ_INACTIVE];
14598d220203SAlan Cox 	vm_pagequeue_lock(pq);
14605cd29d0fSMark Johnston 	vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt);
14615cd29d0fSMark Johnston 	while (page_shortage > 0 && (m = vm_pageout_next(&ss, true)) != NULL) {
14625cd29d0fSMark Johnston 		KASSERT((m->flags & PG_MARKER) == 0,
14635cd29d0fSMark Johnston 		    ("marker page %p was dequeued", m));
1464df8bae1dSRodney W. Grimes 
1465936524aaSMatthew Dillon 		/*
1466b7f30bffSMark Johnston 		 * Don't touch a page that was removed from the queue after the
1467b7f30bffSMark Johnston 		 * page queue lock was released.  Otherwise, ensure that any
1468b7f30bffSMark Johnston 		 * pending queue operations, such as dequeues for wired pages,
1469b7f30bffSMark Johnston 		 * are handled.
1470936524aaSMatthew Dillon 		 */
1471b7f30bffSMark Johnston 		if (vm_pageout_defer(m, PQ_INACTIVE, false))
1472936524aaSMatthew Dillon 			continue;
1473e8bcf696SMark Johnston 
14749f5632e6SMark Johnston 		/*
14759f5632e6SMark Johnston 		 * Lock the page's object.
14769f5632e6SMark Johnston 		 */
14779f5632e6SMark Johnston 		if (object == NULL || object != m->object) {
147860256604SMark Johnston 			if (object != NULL)
14795cd29d0fSMark Johnston 				VM_OBJECT_WUNLOCK(object);
148023ed568cSMateusz Guzik 			object = atomic_load_ptr(&m->object);
14819f5632e6SMark Johnston 			if (__predict_false(object == NULL))
14829f5632e6SMark Johnston 				/* The page is being freed by another thread. */
14839f5632e6SMark Johnston 				continue;
14849f5632e6SMark Johnston 
1485e8bcf696SMark Johnston 			/* Depends on type-stability. */
148641fd4b94SMark Johnston 			VM_OBJECT_WLOCK(object);
14879f5632e6SMark Johnston 			if (__predict_false(m->object != object)) {
14889f5632e6SMark Johnston 				VM_OBJECT_WUNLOCK(object);
14899f5632e6SMark Johnston 				object = NULL;
14909f5632e6SMark Johnston 				goto reinsert;
149141fd4b94SMark Johnston 			}
149241fd4b94SMark Johnston 		}
14935cd29d0fSMark Johnston 
149463e97555SJeff Roberson 		if (vm_page_tryxbusy(m) == 0) {
1495a3aeedabSAlan Cox 			/*
1496a3aeedabSAlan Cox 			 * Don't mess with busy pages.  Leave them at
1497a3aeedabSAlan Cox 			 * the front of the queue.  Most likely, they
1498a3aeedabSAlan Cox 			 * are being paged out and will leave the
1499a3aeedabSAlan Cox 			 * queue shortly after the scan finishes.  So,
1500a3aeedabSAlan Cox 			 * they ought to be discounted from the
1501a3aeedabSAlan Cox 			 * inactive count.
1502a3aeedabSAlan Cox 			 */
1503a3aeedabSAlan Cox 			addl_page_shortage++;
15045cd29d0fSMark Johnston 			goto reinsert;
150526f9a767SRodney W. Grimes 		}
150648cc2fc7SKonstantin Belousov 
1507a8081778SJeff Roberson 		/* Deferred free of swap space. */
1508a8081778SJeff Roberson 		if ((m->a.flags & PGA_SWAP_FREE) != 0)
1509a8081778SJeff Roberson 			vm_pager_page_unswapped(m);
1510a8081778SJeff Roberson 
151148cc2fc7SKonstantin Belousov 		/*
15129f5632e6SMark Johnston 		 * Check for wirings now that we hold the object lock and have
15139f5632e6SMark Johnston 		 * exclusively busied the page.  If the page is mapped, it may
15149f5632e6SMark Johnston 		 * still be wired by pmap lookups.  The call to
1515fee2a2faSMark Johnston 		 * vm_page_try_remove_all() below atomically checks for such
1516fee2a2faSMark Johnston 		 * wirings and removes mappings.  If the page is unmapped, the
15179f5632e6SMark Johnston 		 * wire count is guaranteed not to increase after this check.
1518fee2a2faSMark Johnston 		 */
15199f5632e6SMark Johnston 		if (__predict_false(vm_page_wired(m)))
1520f3f38e25SMark Johnston 			goto skip_page;
1521fee2a2faSMark Johnston 
1522fee2a2faSMark Johnston 		/*
15238748f58cSKonstantin Belousov 		 * Invalid pages can be easily freed. They cannot be
15248748f58cSKonstantin Belousov 		 * mapped, vm_page_free() asserts this.
1525776f729cSKonstantin Belousov 		 */
15260012f373SJeff Roberson 		if (vm_page_none_valid(m))
15278748f58cSKonstantin Belousov 			goto free_page;
1528776f729cSKonstantin Belousov 
1529f3f38e25SMark Johnston 		refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
1530f3f38e25SMark Johnston 
1531f3f38e25SMark Johnston 		for (old = vm_page_astate_load(m);;) {
1532776f729cSKonstantin Belousov 			/*
1533f3f38e25SMark Johnston 			 * Check to see if the page has been removed from the
1534f3f38e25SMark Johnston 			 * queue since the first such check.  Leave it alone if
1535f3f38e25SMark Johnston 			 * so, discarding any references collected by
1536f3f38e25SMark Johnston 			 * pmap_ts_referenced().
15377e006499SJohn Dyson 			 */
1538f3f38e25SMark Johnston 			if (__predict_false(_vm_page_queue(old) == PQ_NONE))
1539f3f38e25SMark Johnston 				goto skip_page;
1540f3f38e25SMark Johnston 
1541f3f38e25SMark Johnston 			new = old;
1542f3f38e25SMark Johnston 			act_delta = refs;
1543f3f38e25SMark Johnston 			if ((old.flags & PGA_REFERENCED) != 0) {
1544f3f38e25SMark Johnston 				new.flags &= ~PGA_REFERENCED;
1545d7aeb429SAlan Cox 				act_delta++;
15462fe6e4d7SDavid Greenman 			}
1547f3f38e25SMark Johnston 			if (act_delta == 0) {
1548f3f38e25SMark Johnston 				;
1549f3f38e25SMark Johnston 			} else if (object->ref_count != 0) {
1550e8bcf696SMark Johnston 				/*
1551f3f38e25SMark Johnston 				 * Increase the activation count if the
1552f3f38e25SMark Johnston 				 * page was referenced while in the
1553f3f38e25SMark Johnston 				 * inactive queue.  This makes it less
1554f3f38e25SMark Johnston 				 * likely that the page will be returned
1555f3f38e25SMark Johnston 				 * prematurely to the inactive queue.
1556e8bcf696SMark Johnston 				 */
1557f3f38e25SMark Johnston 				new.act_count += ACT_ADVANCE +
1558f3f38e25SMark Johnston 				    act_delta;
1559f3f38e25SMark Johnston 				if (new.act_count > ACT_MAX)
1560f3f38e25SMark Johnston 					new.act_count = ACT_MAX;
1561f3f38e25SMark Johnston 
1562f7607c30SMark Johnston 				new.flags &= ~PGA_QUEUE_OP_MASK;
1563f3f38e25SMark Johnston 				new.flags |= PGA_REQUEUE;
1564f3f38e25SMark Johnston 				new.queue = PQ_ACTIVE;
1565f3f38e25SMark Johnston 				if (!vm_page_pqstate_commit(m, &old, new))
1566e8bcf696SMark Johnston 					continue;
1567f3f38e25SMark Johnston 
1568f3f38e25SMark Johnston 				VM_CNT_INC(v_reactivated);
1569f3f38e25SMark Johnston 				goto skip_page;
1570ebcddc72SAlan Cox 			} else if ((object->flags & OBJ_DEAD) == 0) {
1571f3f38e25SMark Johnston 				new.queue = PQ_INACTIVE;
1572f3f38e25SMark Johnston 				new.flags |= PGA_REQUEUE;
1573f3f38e25SMark Johnston 				if (!vm_page_pqstate_commit(m, &old, new))
1574f3f38e25SMark Johnston 					continue;
1575f3f38e25SMark Johnston 				goto skip_page;
1576ebcddc72SAlan Cox 			}
1577f3f38e25SMark Johnston 			break;
1578960810ccSAlan Cox 		}
157967bf6868SJohn Dyson 
15807e006499SJohn Dyson 		/*
15819fc4739dSAlan Cox 		 * If the page appears to be clean at the machine-independent
15829fc4739dSAlan Cox 		 * layer, then remove all of its mappings from the pmap in
1583a766ffd0SAlan Cox 		 * anticipation of freeing it.  If, however, any of the page's
1584a766ffd0SAlan Cox 		 * mappings allow write access, then the page may still be
1585a766ffd0SAlan Cox 		 * modified until the last of those mappings are removed.
15867e006499SJohn Dyson 		 */
1587aa044135SAlan Cox 		if (object->ref_count != 0) {
15889fc4739dSAlan Cox 			vm_page_test_dirty(m);
15899f5632e6SMark Johnston 			if (m->dirty == 0 && !vm_page_try_remove_all(m))
1590f3f38e25SMark Johnston 				goto skip_page;
1591fee2a2faSMark Johnston 		}
1592dcbcd518SBruce Evans 
15936989c456SAlan Cox 		/*
1594ebcddc72SAlan Cox 		 * Clean pages can be freed, but dirty pages must be sent back
1595ebcddc72SAlan Cox 		 * to the laundry, unless they belong to a dead object.
1596ebcddc72SAlan Cox 		 * Requeueing dirty pages from dead objects is pointless, as
1597ebcddc72SAlan Cox 		 * they are being paged out and freed by the thread that
1598ebcddc72SAlan Cox 		 * destroyed the object.
15996989c456SAlan Cox 		 */
1600ebcddc72SAlan Cox 		if (m->dirty == 0) {
16018748f58cSKonstantin Belousov free_page:
16025cd29d0fSMark Johnston 			/*
16039f5632e6SMark Johnston 			 * Now we are guaranteed that no other threads are
16049f5632e6SMark Johnston 			 * manipulating the page, check for a last-second
16059f5632e6SMark Johnston 			 * reference that would save it from doom.
16065cd29d0fSMark Johnston 			 */
16079f5632e6SMark Johnston 			if (vm_pageout_defer(m, PQ_INACTIVE, false))
16089f5632e6SMark Johnston 				goto skip_page;
16099f5632e6SMark Johnston 
16109f5632e6SMark Johnston 			/*
16119f5632e6SMark Johnston 			 * Because we dequeued the page and have already checked
16129f5632e6SMark Johnston 			 * for pending dequeue and enqueue requests, we can
16139f5632e6SMark Johnston 			 * safely disassociate the page from the inactive queue
16149f5632e6SMark Johnston 			 * without holding the queue lock.
16159f5632e6SMark Johnston 			 */
16165cff1f4dSMark Johnston 			m->a.queue = PQ_NONE;
161778afdce6SAlan Cox 			vm_page_free(m);
16185cd29d0fSMark Johnston 			page_shortage--;
161963e97555SJeff Roberson 			continue;
162063e97555SJeff Roberson 		}
162163e97555SJeff Roberson 		if ((object->flags & OBJ_DEAD) == 0)
1622ebcddc72SAlan Cox 			vm_page_launder(m);
1623f3f38e25SMark Johnston skip_page:
1624f3f38e25SMark Johnston 		vm_page_xunbusy(m);
16255cd29d0fSMark Johnston 		continue;
16265cd29d0fSMark Johnston reinsert:
16275cd29d0fSMark Johnston 		vm_pageout_reinsert_inactive(&ss, &rq, m);
16285cd29d0fSMark Johnston 	}
162960256604SMark Johnston 	if (object != NULL)
163089f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(object);
16315cd29d0fSMark Johnston 	vm_pageout_reinsert_inactive(&ss, &rq, NULL);
16325cd29d0fSMark Johnston 	vm_pageout_reinsert_inactive(&ss, &ss.bq, NULL);
16338d220203SAlan Cox 	vm_pagequeue_lock(pq);
16345cd29d0fSMark Johnston 	vm_pageout_end_scan(&ss);
16358d220203SAlan Cox 	vm_pagequeue_unlock(pq);
163626f9a767SRodney W. Grimes 
16370292c54bSConrad Meyer 	/*
16380292c54bSConrad Meyer 	 * Record the remaining shortage and the progress and rate it was made.
16390292c54bSConrad Meyer 	 */
16400292c54bSConrad Meyer 	atomic_add_int(&vmd->vmd_addl_shortage, addl_page_shortage);
16410292c54bSConrad Meyer 	getmicrouptime(&end);
16420292c54bSConrad Meyer 	timevalsub(&end, &start);
16430292c54bSConrad Meyer 	atomic_add_int(&vmd->vmd_inactive_us,
16440292c54bSConrad Meyer 	    end.tv_sec * 1000000 + end.tv_usec);
16450292c54bSConrad Meyer 	atomic_add_int(&vmd->vmd_inactive_freed,
16460292c54bSConrad Meyer 	    starting_page_shortage - page_shortage);
16470292c54bSConrad Meyer }
16480292c54bSConrad Meyer 
16490292c54bSConrad Meyer /*
16500292c54bSConrad Meyer  * Dispatch a number of inactive threads according to load and collect the
16512913cc46SMark Johnston  * results to present a coherent view of paging activity on this domain.
16520292c54bSConrad Meyer  */
16530292c54bSConrad Meyer static int
16540292c54bSConrad Meyer vm_pageout_inactive_dispatch(struct vm_domain *vmd, int shortage)
16550292c54bSConrad Meyer {
16562913cc46SMark Johnston 	u_int freed, pps, slop, threads, us;
16570292c54bSConrad Meyer 
16580292c54bSConrad Meyer 	vmd->vmd_inactive_shortage = shortage;
16592913cc46SMark Johnston 	slop = 0;
16600292c54bSConrad Meyer 
16610292c54bSConrad Meyer 	/*
16620292c54bSConrad Meyer 	 * If we have more work than we can do in a quarter of our interval, we
16630292c54bSConrad Meyer 	 * fire off multiple threads to process it.
16640292c54bSConrad Meyer 	 */
16650292c54bSConrad Meyer 	threads = vmd->vmd_inactive_threads;
16662913cc46SMark Johnston 	if (threads > 1 && vmd->vmd_inactive_pps != 0 &&
16672913cc46SMark Johnston 	    shortage > vmd->vmd_inactive_pps / VM_INACT_SCAN_RATE / 4) {
16680292c54bSConrad Meyer 		vmd->vmd_inactive_shortage /= threads;
16692913cc46SMark Johnston 		slop = shortage % threads;
16702913cc46SMark Johnston 		vm_domain_pageout_lock(vmd);
16710292c54bSConrad Meyer 		blockcount_acquire(&vmd->vmd_inactive_starting, threads - 1);
16720292c54bSConrad Meyer 		blockcount_acquire(&vmd->vmd_inactive_running, threads - 1);
16730292c54bSConrad Meyer 		wakeup(&vmd->vmd_inactive_shortage);
16740292c54bSConrad Meyer 		vm_domain_pageout_unlock(vmd);
16750292c54bSConrad Meyer 	}
16760292c54bSConrad Meyer 
16770292c54bSConrad Meyer 	/* Run the local thread scan. */
16782913cc46SMark Johnston 	vm_pageout_scan_inactive(vmd, vmd->vmd_inactive_shortage + slop);
16790292c54bSConrad Meyer 
16800292c54bSConrad Meyer 	/*
16810292c54bSConrad Meyer 	 * Block until helper threads report results and then accumulate
16820292c54bSConrad Meyer 	 * totals.
16830292c54bSConrad Meyer 	 */
16840292c54bSConrad Meyer 	blockcount_wait(&vmd->vmd_inactive_running, NULL, "vmpoid", PVM);
16850292c54bSConrad Meyer 	freed = atomic_readandclear_int(&vmd->vmd_inactive_freed);
16860292c54bSConrad Meyer 	VM_CNT_ADD(v_dfree, freed);
16870292c54bSConrad Meyer 
16880292c54bSConrad Meyer 	/*
16890292c54bSConrad Meyer 	 * Calculate the per-thread paging rate with an exponential decay of
16900292c54bSConrad Meyer 	 * prior results.  Careful to avoid integer rounding errors with large
16910292c54bSConrad Meyer 	 * us values.
16920292c54bSConrad Meyer 	 */
16930292c54bSConrad Meyer 	us = max(atomic_readandclear_int(&vmd->vmd_inactive_us), 1);
16940292c54bSConrad Meyer 	if (us > 1000000)
16950292c54bSConrad Meyer 		/* Keep rounding to tenths */
16960292c54bSConrad Meyer 		pps = (freed * 10) / ((us * 10) / 1000000);
16970292c54bSConrad Meyer 	else
16980292c54bSConrad Meyer 		pps = (1000000 / us) * freed;
16990292c54bSConrad Meyer 	vmd->vmd_inactive_pps = (vmd->vmd_inactive_pps / 2) + (pps / 2);
17000292c54bSConrad Meyer 
17010292c54bSConrad Meyer 	return (shortage - freed);
17020292c54bSConrad Meyer }
17030292c54bSConrad Meyer 
17040292c54bSConrad Meyer /*
17050292c54bSConrad Meyer  * Attempt to reclaim the requested number of pages from the inactive queue.
17060292c54bSConrad Meyer  * Returns true if the shortage was addressed.
17070292c54bSConrad Meyer  */
17080292c54bSConrad Meyer static int
17090292c54bSConrad Meyer vm_pageout_inactive(struct vm_domain *vmd, int shortage, int *addl_shortage)
17100292c54bSConrad Meyer {
17110292c54bSConrad Meyer 	struct vm_pagequeue *pq;
17120292c54bSConrad Meyer 	u_int addl_page_shortage, deficit, page_shortage;
17130292c54bSConrad Meyer 	u_int starting_page_shortage;
17140292c54bSConrad Meyer 
17150292c54bSConrad Meyer 	/*
17160292c54bSConrad Meyer 	 * vmd_pageout_deficit counts the number of pages requested in
17170292c54bSConrad Meyer 	 * allocations that failed because of a free page shortage.  We assume
17180292c54bSConrad Meyer 	 * that the allocations will be reattempted and thus include the deficit
17190292c54bSConrad Meyer 	 * in our scan target.
17200292c54bSConrad Meyer 	 */
17210292c54bSConrad Meyer 	deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit);
17220292c54bSConrad Meyer 	starting_page_shortage = shortage + deficit;
17230292c54bSConrad Meyer 
17240292c54bSConrad Meyer 	/*
17250292c54bSConrad Meyer 	 * Run the inactive scan on as many threads as is necessary.
17260292c54bSConrad Meyer 	 */
17270292c54bSConrad Meyer 	page_shortage = vm_pageout_inactive_dispatch(vmd, starting_page_shortage);
17280292c54bSConrad Meyer 	addl_page_shortage = atomic_readandclear_int(&vmd->vmd_addl_shortage);
17295cd29d0fSMark Johnston 
1730ebcddc72SAlan Cox 	/*
1731ebcddc72SAlan Cox 	 * Wake up the laundry thread so that it can perform any needed
1732ebcddc72SAlan Cox 	 * laundering.  If we didn't meet our target, we're in shortfall and
1733b1fd102eSMark Johnston 	 * need to launder more aggressively.  If PQ_LAUNDRY is empty and no
1734b1fd102eSMark Johnston 	 * swap devices are configured, the laundry thread has no work to do, so
1735b1fd102eSMark Johnston 	 * don't bother waking it up.
1736cb35676eSMark Johnston 	 *
1737cb35676eSMark Johnston 	 * The laundry thread uses the number of inactive queue scans elapsed
1738cb35676eSMark Johnston 	 * since the last laundering to determine whether to launder again, so
1739cb35676eSMark Johnston 	 * keep count.
1740ebcddc72SAlan Cox 	 */
1741cb35676eSMark Johnston 	if (starting_page_shortage > 0) {
1742e2068d0bSJeff Roberson 		pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
1743ebcddc72SAlan Cox 		vm_pagequeue_lock(pq);
1744e2068d0bSJeff Roberson 		if (vmd->vmd_laundry_request == VM_LAUNDRY_IDLE &&
1745cb35676eSMark Johnston 		    (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled))) {
1746ebcddc72SAlan Cox 			if (page_shortage > 0) {
1747e2068d0bSJeff Roberson 				vmd->vmd_laundry_request = VM_LAUNDRY_SHORTFALL;
174883c9dea1SGleb Smirnoff 				VM_CNT_INC(v_pdshortfalls);
1749e2068d0bSJeff Roberson 			} else if (vmd->vmd_laundry_request !=
1750e2068d0bSJeff Roberson 			    VM_LAUNDRY_SHORTFALL)
1751e2068d0bSJeff Roberson 				vmd->vmd_laundry_request =
1752e2068d0bSJeff Roberson 				    VM_LAUNDRY_BACKGROUND;
1753e2068d0bSJeff Roberson 			wakeup(&vmd->vmd_laundry_request);
1754b1fd102eSMark Johnston 		}
175560684862SMark Johnston 		vmd->vmd_clean_pages_freed +=
175660684862SMark Johnston 		    starting_page_shortage - page_shortage;
1757ebcddc72SAlan Cox 		vm_pagequeue_unlock(pq);
1758ebcddc72SAlan Cox 	}
1759ebcddc72SAlan Cox 
17609452b5edSAlan Cox 	/*
1761f095d1bbSAlan Cox 	 * Wakeup the swapout daemon if we didn't free the targeted number of
1762f095d1bbSAlan Cox 	 * pages.
17639452b5edSAlan Cox 	 */
1764ac04195bSKonstantin Belousov 	if (page_shortage > 0)
1765ac04195bSKonstantin Belousov 		vm_swapout_run();
17669452b5edSAlan Cox 
17679452b5edSAlan Cox 	/*
176876386c7eSKonstantin Belousov 	 * If the inactive queue scan fails repeatedly to meet its
176976386c7eSKonstantin Belousov 	 * target, kill the largest process.
177076386c7eSKonstantin Belousov 	 */
177176386c7eSKonstantin Belousov 	vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage);
177276386c7eSKonstantin Belousov 
177376386c7eSKonstantin Belousov 	/*
1774be37ee79SMark Johnston 	 * Reclaim pages by swapping out idle processes, if configured to do so.
17751c7c3c6aSMatthew Dillon 	 */
1776ac04195bSKonstantin Belousov 	vm_swapout_run_idle();
1777be37ee79SMark Johnston 
1778be37ee79SMark Johnston 	/*
1779be37ee79SMark Johnston 	 * See the description of addl_page_shortage above.
1780be37ee79SMark Johnston 	 */
1781be37ee79SMark Johnston 	*addl_shortage = addl_page_shortage + deficit;
1782be37ee79SMark Johnston 
1783e57dd910SAlan Cox 	return (page_shortage <= 0);
17842025d69bSKonstantin Belousov }
17852025d69bSKonstantin Belousov 
1786449c2e92SKonstantin Belousov static int vm_pageout_oom_vote;
1787449c2e92SKonstantin Belousov 
1788449c2e92SKonstantin Belousov /*
1789449c2e92SKonstantin Belousov  * The pagedaemon threads randlomly select one to perform the
1790449c2e92SKonstantin Belousov  * OOM.  Trying to kill processes before all pagedaemons
1791449c2e92SKonstantin Belousov  * failed to reach free target is premature.
1792449c2e92SKonstantin Belousov  */
1793449c2e92SKonstantin Belousov static void
179476386c7eSKonstantin Belousov vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
179576386c7eSKonstantin Belousov     int starting_page_shortage)
1796449c2e92SKonstantin Belousov {
1797449c2e92SKonstantin Belousov 	int old_vote;
1798449c2e92SKonstantin Belousov 
179976386c7eSKonstantin Belousov 	if (starting_page_shortage <= 0 || starting_page_shortage !=
180076386c7eSKonstantin Belousov 	    page_shortage)
180176386c7eSKonstantin Belousov 		vmd->vmd_oom_seq = 0;
180276386c7eSKonstantin Belousov 	else
180376386c7eSKonstantin Belousov 		vmd->vmd_oom_seq++;
180476386c7eSKonstantin Belousov 	if (vmd->vmd_oom_seq < vm_pageout_oom_seq) {
1805449c2e92SKonstantin Belousov 		if (vmd->vmd_oom) {
1806449c2e92SKonstantin Belousov 			vmd->vmd_oom = FALSE;
1807449c2e92SKonstantin Belousov 			atomic_subtract_int(&vm_pageout_oom_vote, 1);
1808449c2e92SKonstantin Belousov 		}
1809449c2e92SKonstantin Belousov 		return;
1810449c2e92SKonstantin Belousov 	}
1811449c2e92SKonstantin Belousov 
181276386c7eSKonstantin Belousov 	/*
181376386c7eSKonstantin Belousov 	 * Do not follow the call sequence until OOM condition is
181476386c7eSKonstantin Belousov 	 * cleared.
181576386c7eSKonstantin Belousov 	 */
181676386c7eSKonstantin Belousov 	vmd->vmd_oom_seq = 0;
181776386c7eSKonstantin Belousov 
1818449c2e92SKonstantin Belousov 	if (vmd->vmd_oom)
1819449c2e92SKonstantin Belousov 		return;
1820449c2e92SKonstantin Belousov 
1821449c2e92SKonstantin Belousov 	vmd->vmd_oom = TRUE;
1822449c2e92SKonstantin Belousov 	old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1);
1823449c2e92SKonstantin Belousov 	if (old_vote != vm_ndomains - 1)
1824449c2e92SKonstantin Belousov 		return;
1825449c2e92SKonstantin Belousov 
1826449c2e92SKonstantin Belousov 	/*
1827449c2e92SKonstantin Belousov 	 * The current pagedaemon thread is the last in the quorum to
1828449c2e92SKonstantin Belousov 	 * start OOM.  Initiate the selection and signaling of the
1829449c2e92SKonstantin Belousov 	 * victim.
1830449c2e92SKonstantin Belousov 	 */
1831449c2e92SKonstantin Belousov 	vm_pageout_oom(VM_OOM_MEM);
1832449c2e92SKonstantin Belousov 
1833449c2e92SKonstantin Belousov 	/*
1834449c2e92SKonstantin Belousov 	 * After one round of OOM terror, recall our vote.  On the
1835449c2e92SKonstantin Belousov 	 * next pass, current pagedaemon would vote again if the low
1836449c2e92SKonstantin Belousov 	 * memory condition is still there, due to vmd_oom being
1837449c2e92SKonstantin Belousov 	 * false.
1838449c2e92SKonstantin Belousov 	 */
1839449c2e92SKonstantin Belousov 	vmd->vmd_oom = FALSE;
1840449c2e92SKonstantin Belousov 	atomic_subtract_int(&vm_pageout_oom_vote, 1);
1841449c2e92SKonstantin Belousov }
18422025d69bSKonstantin Belousov 
18433949873fSKonstantin Belousov /*
18443949873fSKonstantin Belousov  * The OOM killer is the page daemon's action of last resort when
18453949873fSKonstantin Belousov  * memory allocation requests have been stalled for a prolonged period
18463949873fSKonstantin Belousov  * of time because it cannot reclaim memory.  This function computes
18473949873fSKonstantin Belousov  * the approximate number of physical pages that could be reclaimed if
18483949873fSKonstantin Belousov  * the specified address space is destroyed.
18493949873fSKonstantin Belousov  *
18503949873fSKonstantin Belousov  * Private, anonymous memory owned by the address space is the
18513949873fSKonstantin Belousov  * principal resource that we expect to recover after an OOM kill.
18523949873fSKonstantin Belousov  * Since the physical pages mapped by the address space's COW entries
18533949873fSKonstantin Belousov  * are typically shared pages, they are unlikely to be released and so
18543949873fSKonstantin Belousov  * they are not counted.
18553949873fSKonstantin Belousov  *
18563949873fSKonstantin Belousov  * To get to the point where the page daemon runs the OOM killer, its
18573949873fSKonstantin Belousov  * efforts to write-back vnode-backed pages may have stalled.  This
18583949873fSKonstantin Belousov  * could be caused by a memory allocation deadlock in the write path
18593949873fSKonstantin Belousov  * that might be resolved by an OOM kill.  Therefore, physical pages
18603949873fSKonstantin Belousov  * belonging to vnode-backed objects are counted, because they might
18613949873fSKonstantin Belousov  * be freed without being written out first if the address space holds
18623949873fSKonstantin Belousov  * the last reference to an unlinked vnode.
18633949873fSKonstantin Belousov  *
18643949873fSKonstantin Belousov  * Similarly, physical pages belonging to OBJT_PHYS objects are
18653949873fSKonstantin Belousov  * counted because the address space might hold the last reference to
18663949873fSKonstantin Belousov  * the object.
18673949873fSKonstantin Belousov  */
18683949873fSKonstantin Belousov static long
18693949873fSKonstantin Belousov vm_pageout_oom_pagecount(struct vmspace *vmspace)
18703949873fSKonstantin Belousov {
18713949873fSKonstantin Belousov 	vm_map_t map;
18723949873fSKonstantin Belousov 	vm_map_entry_t entry;
18733949873fSKonstantin Belousov 	vm_object_t obj;
18743949873fSKonstantin Belousov 	long res;
18753949873fSKonstantin Belousov 
18763949873fSKonstantin Belousov 	map = &vmspace->vm_map;
18773949873fSKonstantin Belousov 	KASSERT(!map->system_map, ("system map"));
18783949873fSKonstantin Belousov 	sx_assert(&map->lock, SA_LOCKED);
18793949873fSKonstantin Belousov 	res = 0;
18802288078cSDoug Moore 	VM_MAP_ENTRY_FOREACH(entry, map) {
18813949873fSKonstantin Belousov 		if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
18823949873fSKonstantin Belousov 			continue;
18833949873fSKonstantin Belousov 		obj = entry->object.vm_object;
18843949873fSKonstantin Belousov 		if (obj == NULL)
18853949873fSKonstantin Belousov 			continue;
18863949873fSKonstantin Belousov 		if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 &&
18873949873fSKonstantin Belousov 		    obj->ref_count != 1)
18883949873fSKonstantin Belousov 			continue;
18897079449bSKonstantin Belousov 		if (obj->type == OBJT_DEFAULT || obj->type == OBJT_PHYS ||
18907079449bSKonstantin Belousov 		    obj->type == OBJT_VNODE || (obj->flags & OBJ_SWAP) != 0)
18913949873fSKonstantin Belousov 			res += obj->resident_page_count;
18923949873fSKonstantin Belousov 	}
18933949873fSKonstantin Belousov 	return (res);
18943949873fSKonstantin Belousov }
18953949873fSKonstantin Belousov 
1896245139c6SKonstantin Belousov static int vm_oom_ratelim_last;
1897245139c6SKonstantin Belousov static int vm_oom_pf_secs = 10;
1898245139c6SKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, oom_pf_secs, CTLFLAG_RWTUN, &vm_oom_pf_secs, 0,
1899245139c6SKonstantin Belousov     "");
1900245139c6SKonstantin Belousov static struct mtx vm_oom_ratelim_mtx;
1901245139c6SKonstantin Belousov 
19022025d69bSKonstantin Belousov void
19032025d69bSKonstantin Belousov vm_pageout_oom(int shortage)
19042025d69bSKonstantin Belousov {
1905*4a864f62SMark Johnston 	const char *reason;
19062025d69bSKonstantin Belousov 	struct proc *p, *bigproc;
19072025d69bSKonstantin Belousov 	vm_offset_t size, bigsize;
19082025d69bSKonstantin Belousov 	struct thread *td;
19096bed074cSKonstantin Belousov 	struct vmspace *vm;
1910245139c6SKonstantin Belousov 	int now;
19113e78e983SAlan Cox 	bool breakout;
19122025d69bSKonstantin Belousov 
19132025d69bSKonstantin Belousov 	/*
1914245139c6SKonstantin Belousov 	 * For OOM requests originating from vm_fault(), there is a high
1915245139c6SKonstantin Belousov 	 * chance that a single large process faults simultaneously in
1916245139c6SKonstantin Belousov 	 * several threads.  Also, on an active system running many
1917245139c6SKonstantin Belousov 	 * processes of middle-size, like buildworld, all of them
1918245139c6SKonstantin Belousov 	 * could fault almost simultaneously as well.
1919245139c6SKonstantin Belousov 	 *
1920245139c6SKonstantin Belousov 	 * To avoid killing too many processes, rate-limit OOMs
1921245139c6SKonstantin Belousov 	 * initiated by vm_fault() time-outs on the waits for free
1922245139c6SKonstantin Belousov 	 * pages.
1923245139c6SKonstantin Belousov 	 */
1924245139c6SKonstantin Belousov 	mtx_lock(&vm_oom_ratelim_mtx);
1925245139c6SKonstantin Belousov 	now = ticks;
1926245139c6SKonstantin Belousov 	if (shortage == VM_OOM_MEM_PF &&
1927245139c6SKonstantin Belousov 	    (u_int)(now - vm_oom_ratelim_last) < hz * vm_oom_pf_secs) {
1928245139c6SKonstantin Belousov 		mtx_unlock(&vm_oom_ratelim_mtx);
1929245139c6SKonstantin Belousov 		return;
1930245139c6SKonstantin Belousov 	}
1931245139c6SKonstantin Belousov 	vm_oom_ratelim_last = now;
1932245139c6SKonstantin Belousov 	mtx_unlock(&vm_oom_ratelim_mtx);
1933245139c6SKonstantin Belousov 
1934245139c6SKonstantin Belousov 	/*
19351c58e4e5SJohn Baldwin 	 * We keep the process bigproc locked once we find it to keep anyone
19361c58e4e5SJohn Baldwin 	 * from messing with it; however, there is a possibility of
193728323addSBryan Drewery 	 * deadlock if process B is bigproc and one of its child processes
19381c58e4e5SJohn Baldwin 	 * attempts to propagate a signal to B while we are waiting for A's
19391c58e4e5SJohn Baldwin 	 * lock while walking this list.  To avoid this, we don't block on
19401c58e4e5SJohn Baldwin 	 * the process lock but just skip a process if it is already locked.
19415663e6deSDavid Greenman 	 */
19425663e6deSDavid Greenman 	bigproc = NULL;
19435663e6deSDavid Greenman 	bigsize = 0;
19441005a129SJohn Baldwin 	sx_slock(&allproc_lock);
1945e602ba25SJulian Elischer 	FOREACH_PROC_IN_SYSTEM(p) {
194671943c3dSKonstantin Belousov 		PROC_LOCK(p);
194771943c3dSKonstantin Belousov 
19481c58e4e5SJohn Baldwin 		/*
19493f1c4c4fSKonstantin Belousov 		 * If this is a system, protected or killed process, skip it.
19505663e6deSDavid Greenman 		 */
195171943c3dSKonstantin Belousov 		if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
195271943c3dSKonstantin Belousov 		    P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 ||
195371943c3dSKonstantin Belousov 		    p->p_pid == 1 || P_KILLED(p) ||
195471943c3dSKonstantin Belousov 		    (p->p_pid < 48 && swap_pager_avail != 0)) {
19558606d880SJohn Baldwin 			PROC_UNLOCK(p);
19565663e6deSDavid Greenman 			continue;
19575663e6deSDavid Greenman 		}
19585663e6deSDavid Greenman 		/*
1959dcbcd518SBruce Evans 		 * If the process is in a non-running type state,
1960e602ba25SJulian Elischer 		 * don't touch it.  Check all the threads individually.
19615663e6deSDavid Greenman 		 */
19623e78e983SAlan Cox 		breakout = false;
1963e602ba25SJulian Elischer 		FOREACH_THREAD_IN_PROC(p, td) {
1964982d11f8SJeff Roberson 			thread_lock(td);
196571fad9fdSJulian Elischer 			if (!TD_ON_RUNQ(td) &&
196671fad9fdSJulian Elischer 			    !TD_IS_RUNNING(td) &&
1967f497cda2SEdward Tomasz Napierala 			    !TD_IS_SLEEPING(td) &&
1968b98acc0aSKonstantin Belousov 			    !TD_IS_SUSPENDED(td) &&
1969b98acc0aSKonstantin Belousov 			    !TD_IS_SWAPPED(td)) {
1970982d11f8SJeff Roberson 				thread_unlock(td);
19713e78e983SAlan Cox 				breakout = true;
1972e602ba25SJulian Elischer 				break;
1973e602ba25SJulian Elischer 			}
1974982d11f8SJeff Roberson 			thread_unlock(td);
1975e602ba25SJulian Elischer 		}
1976e602ba25SJulian Elischer 		if (breakout) {
19771c58e4e5SJohn Baldwin 			PROC_UNLOCK(p);
19785663e6deSDavid Greenman 			continue;
19795663e6deSDavid Greenman 		}
19805663e6deSDavid Greenman 		/*
19815663e6deSDavid Greenman 		 * get the process size
19825663e6deSDavid Greenman 		 */
19836bed074cSKonstantin Belousov 		vm = vmspace_acquire_ref(p);
19846bed074cSKonstantin Belousov 		if (vm == NULL) {
19856bed074cSKonstantin Belousov 			PROC_UNLOCK(p);
19866bed074cSKonstantin Belousov 			continue;
19876bed074cSKonstantin Belousov 		}
198895e2409aSKonstantin Belousov 		_PHOLD_LITE(p);
198972d97679SDavid Schultz 		PROC_UNLOCK(p);
199095e2409aSKonstantin Belousov 		sx_sunlock(&allproc_lock);
199195e2409aSKonstantin Belousov 		if (!vm_map_trylock_read(&vm->vm_map)) {
199271943c3dSKonstantin Belousov 			vmspace_free(vm);
199395e2409aSKonstantin Belousov 			sx_slock(&allproc_lock);
199495e2409aSKonstantin Belousov 			PRELE(p);
199572d97679SDavid Schultz 			continue;
199672d97679SDavid Schultz 		}
19977981aa24SKonstantin Belousov 		size = vmspace_swap_count(vm);
1998245139c6SKonstantin Belousov 		if (shortage == VM_OOM_MEM || shortage == VM_OOM_MEM_PF)
19993949873fSKonstantin Belousov 			size += vm_pageout_oom_pagecount(vm);
20003949873fSKonstantin Belousov 		vm_map_unlock_read(&vm->vm_map);
20016bed074cSKonstantin Belousov 		vmspace_free(vm);
200295e2409aSKonstantin Belousov 		sx_slock(&allproc_lock);
20033949873fSKonstantin Belousov 
20045663e6deSDavid Greenman 		/*
20053949873fSKonstantin Belousov 		 * If this process is bigger than the biggest one,
20065663e6deSDavid Greenman 		 * remember it.
20075663e6deSDavid Greenman 		 */
20085663e6deSDavid Greenman 		if (size > bigsize) {
20091c58e4e5SJohn Baldwin 			if (bigproc != NULL)
201071943c3dSKonstantin Belousov 				PRELE(bigproc);
20115663e6deSDavid Greenman 			bigproc = p;
20125663e6deSDavid Greenman 			bigsize = size;
201371943c3dSKonstantin Belousov 		} else {
201471943c3dSKonstantin Belousov 			PRELE(p);
201571943c3dSKonstantin Belousov 		}
20165663e6deSDavid Greenman 	}
20171005a129SJohn Baldwin 	sx_sunlock(&allproc_lock);
2018*4a864f62SMark Johnston 
20195663e6deSDavid Greenman 	if (bigproc != NULL) {
2020*4a864f62SMark Johnston 		switch (shortage) {
2021*4a864f62SMark Johnston 		case VM_OOM_MEM:
2022*4a864f62SMark Johnston 			reason = "failed to reclaim memory";
2023*4a864f62SMark Johnston 			break;
2024*4a864f62SMark Johnston 		case VM_OOM_MEM_PF:
2025*4a864f62SMark Johnston 			reason = "a thread waited too long to allocate a page";
2026*4a864f62SMark Johnston 			break;
2027*4a864f62SMark Johnston 		case VM_OOM_SWAPZ:
2028*4a864f62SMark Johnston 			reason = "out of swap space";
2029*4a864f62SMark Johnston 			break;
2030*4a864f62SMark Johnston 		default:
2031*4a864f62SMark Johnston 			panic("unknown OOM reason %d", shortage);
2032*4a864f62SMark Johnston 		}
20333c200db9SJonathan T. Looney 		if (vm_panic_on_oom != 0 && --vm_panic_on_oom == 0)
2034*4a864f62SMark Johnston 			panic("%s", reason);
203571943c3dSKonstantin Belousov 		PROC_LOCK(bigproc);
2036*4a864f62SMark Johnston 		killproc(bigproc, reason);
2037fa885116SJulian Elischer 		sched_nice(bigproc, PRIO_MIN);
203871943c3dSKonstantin Belousov 		_PRELE(bigproc);
20391c58e4e5SJohn Baldwin 		PROC_UNLOCK(bigproc);
20405663e6deSDavid Greenman 	}
20415663e6deSDavid Greenman }
204226f9a767SRodney W. Grimes 
20438fc25508SMark Johnston /*
20448fc25508SMark Johnston  * Signal a free page shortage to subsystems that have registered an event
20458fc25508SMark Johnston  * handler.  Reclaim memory from UMA in the event of a severe shortage.
20468fc25508SMark Johnston  * Return true if the free page count should be re-evaluated.
20478fc25508SMark Johnston  */
2048b50a4ea6SMark Johnston static bool
2049b50a4ea6SMark Johnston vm_pageout_lowmem(void)
205049a3710cSMark Johnston {
2051b50a4ea6SMark Johnston 	static int lowmem_ticks = 0;
2052b50a4ea6SMark Johnston 	int last;
20538fc25508SMark Johnston 	bool ret;
20548fc25508SMark Johnston 
20558fc25508SMark Johnston 	ret = false;
205649a3710cSMark Johnston 
2057b50a4ea6SMark Johnston 	last = atomic_load_int(&lowmem_ticks);
2058b50a4ea6SMark Johnston 	while ((u_int)(ticks - last) / hz >= lowmem_period) {
2059b50a4ea6SMark Johnston 		if (atomic_fcmpset_int(&lowmem_ticks, &last, ticks) == 0)
2060b50a4ea6SMark Johnston 			continue;
2061b50a4ea6SMark Johnston 
206249a3710cSMark Johnston 		/*
206349a3710cSMark Johnston 		 * Decrease registered cache sizes.
206449a3710cSMark Johnston 		 */
206549a3710cSMark Johnston 		SDT_PROBE0(vm, , , vm__lowmem_scan);
206649a3710cSMark Johnston 		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES);
206749a3710cSMark Johnston 
206849a3710cSMark Johnston 		/*
206949a3710cSMark Johnston 		 * We do this explicitly after the caches have been
20708fc25508SMark Johnston 		 * drained above.
207149a3710cSMark Johnston 		 */
20728fc25508SMark Johnston 		uma_reclaim(UMA_RECLAIM_TRIM);
20738fc25508SMark Johnston 		ret = true;
2074ace409ceSAlexander Motin 		break;
207549a3710cSMark Johnston 	}
20768fc25508SMark Johnston 
20778fc25508SMark Johnston 	/*
20788fc25508SMark Johnston 	 * Kick off an asynchronous reclaim of cached memory if one of the
20798fc25508SMark Johnston 	 * page daemons is failing to keep up with demand.  Use the "severe"
20808fc25508SMark Johnston 	 * threshold instead of "min" to ensure that we do not blow away the
20818fc25508SMark Johnston 	 * caches if a subset of the NUMA domains are depleted by kernel memory
20828fc25508SMark Johnston 	 * allocations; the domainset iterators automatically skip domains
20838fc25508SMark Johnston 	 * below the "min" threshold on the first pass.
20848fc25508SMark Johnston 	 *
20858fc25508SMark Johnston 	 * UMA reclaim worker has its own rate-limiting mechanism, so don't
20868fc25508SMark Johnston 	 * worry about kicking it too often.
20878fc25508SMark Johnston 	 */
20888fc25508SMark Johnston 	if (vm_page_count_severe())
20898fc25508SMark Johnston 		uma_reclaim_wakeup();
20908fc25508SMark Johnston 
20918fc25508SMark Johnston 	return (ret);
209249a3710cSMark Johnston }
209349a3710cSMark Johnston 
209449a3710cSMark Johnston static void
2095449c2e92SKonstantin Belousov vm_pageout_worker(void *arg)
2096449c2e92SKonstantin Belousov {
2097e2068d0bSJeff Roberson 	struct vm_domain *vmd;
2098b50a4ea6SMark Johnston 	u_int ofree;
209949a3710cSMark Johnston 	int addl_shortage, domain, shortage;
2100e57dd910SAlan Cox 	bool target_met;
2101449c2e92SKonstantin Belousov 
2102e2068d0bSJeff Roberson 	domain = (uintptr_t)arg;
2103e2068d0bSJeff Roberson 	vmd = VM_DOMAIN(domain);
21045f8cd1c0SJeff Roberson 	shortage = 0;
2105e57dd910SAlan Cox 	target_met = true;
2106449c2e92SKonstantin Belousov 
2107449c2e92SKonstantin Belousov 	/*
2108949c9186SKonstantin Belousov 	 * XXXKIB It could be useful to bind pageout daemon threads to
2109949c9186SKonstantin Belousov 	 * the cores belonging to the domain, from which vm_page_array
2110949c9186SKonstantin Belousov 	 * is allocated.
2111449c2e92SKonstantin Belousov 	 */
2112449c2e92SKonstantin Belousov 
2113e2068d0bSJeff Roberson 	KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
2114e2068d0bSJeff Roberson 	vmd->vmd_last_active_scan = ticks;
2115449c2e92SKonstantin Belousov 
2116449c2e92SKonstantin Belousov 	/*
2117449c2e92SKonstantin Belousov 	 * The pageout daemon worker is never done, so loop forever.
2118449c2e92SKonstantin Belousov 	 */
2119449c2e92SKonstantin Belousov 	while (TRUE) {
212030fbfddaSJeff Roberson 		vm_domain_pageout_lock(vmd);
212149a3710cSMark Johnston 
212230fbfddaSJeff Roberson 		/*
212330fbfddaSJeff Roberson 		 * We need to clear wanted before we check the limits.  This
212430fbfddaSJeff Roberson 		 * prevents races with wakers who will check wanted after they
212530fbfddaSJeff Roberson 		 * reach the limit.
212630fbfddaSJeff Roberson 		 */
212730fbfddaSJeff Roberson 		atomic_store_int(&vmd->vmd_pageout_wanted, 0);
212856ce0690SAlan Cox 
212956ce0690SAlan Cox 		/*
21305f8cd1c0SJeff Roberson 		 * Might the page daemon need to run again?
2131449c2e92SKonstantin Belousov 		 */
21325f8cd1c0SJeff Roberson 		if (vm_paging_needed(vmd, vmd->vmd_free_count)) {
213356ce0690SAlan Cox 			/*
213449a3710cSMark Johnston 			 * Yes.  If the scan failed to produce enough free
213549a3710cSMark Johnston 			 * pages, sleep uninterruptibly for some time in the
213649a3710cSMark Johnston 			 * hope that the laundry thread will clean some pages.
213756ce0690SAlan Cox 			 */
213830fbfddaSJeff Roberson 			vm_domain_pageout_unlock(vmd);
213949a3710cSMark Johnston 			if (!target_met)
21406eebec83SMark Johnston 				pause("pwait", hz / VM_INACT_SCAN_RATE);
2141449c2e92SKonstantin Belousov 		} else {
2142449c2e92SKonstantin Belousov 			/*
21435f8cd1c0SJeff Roberson 			 * No, sleep until the next wakeup or until pages
21445f8cd1c0SJeff Roberson 			 * need to have their reference stats updated.
2145449c2e92SKonstantin Belousov 			 */
21462c0f13aaSKonstantin Belousov 			if (mtx_sleep(&vmd->vmd_pageout_wanted,
214730fbfddaSJeff Roberson 			    vm_domain_pageout_lockptr(vmd), PDROP | PVM,
21485f8cd1c0SJeff Roberson 			    "psleep", hz / VM_INACT_SCAN_RATE) == 0)
214983c9dea1SGleb Smirnoff 				VM_CNT_INC(v_pdwakeups);
215056ce0690SAlan Cox 		}
2151be37ee79SMark Johnston 
215230fbfddaSJeff Roberson 		/* Prevent spurious wakeups by ensuring that wanted is set. */
215330fbfddaSJeff Roberson 		atomic_store_int(&vmd->vmd_pageout_wanted, 1);
215430fbfddaSJeff Roberson 
215530fbfddaSJeff Roberson 		/*
215630fbfddaSJeff Roberson 		 * Use the controller to calculate how many pages to free in
2157b50a4ea6SMark Johnston 		 * this interval, and scan the inactive queue.  If the lowmem
2158b50a4ea6SMark Johnston 		 * handlers appear to have freed up some pages, subtract the
2159b50a4ea6SMark Johnston 		 * difference from the inactive queue scan target.
216030fbfddaSJeff Roberson 		 */
21615f8cd1c0SJeff Roberson 		shortage = pidctrl_daemon(&vmd->vmd_pid, vmd->vmd_free_count);
216249a3710cSMark Johnston 		if (shortage > 0) {
2163b50a4ea6SMark Johnston 			ofree = vmd->vmd_free_count;
2164b50a4ea6SMark Johnston 			if (vm_pageout_lowmem() && vmd->vmd_free_count > ofree)
2165b50a4ea6SMark Johnston 				shortage -= min(vmd->vmd_free_count - ofree,
2166b50a4ea6SMark Johnston 				    (u_int)shortage);
21670292c54bSConrad Meyer 			target_met = vm_pageout_inactive(vmd, shortage,
2168be37ee79SMark Johnston 			    &addl_shortage);
216949a3710cSMark Johnston 		} else
217049a3710cSMark Johnston 			addl_shortage = 0;
217156ce0690SAlan Cox 
2172be37ee79SMark Johnston 		/*
2173be37ee79SMark Johnston 		 * Scan the active queue.  A positive value for shortage
2174be37ee79SMark Johnston 		 * indicates that we must aggressively deactivate pages to avoid
2175be37ee79SMark Johnston 		 * a shortfall.
2176be37ee79SMark Johnston 		 */
21777bb4634eSMark Johnston 		shortage = vm_pageout_active_target(vmd) + addl_shortage;
2178be37ee79SMark Johnston 		vm_pageout_scan_active(vmd, shortage);
2179449c2e92SKonstantin Belousov 	}
2180449c2e92SKonstantin Belousov }
2181449c2e92SKonstantin Belousov 
2182df8bae1dSRodney W. Grimes /*
21830292c54bSConrad Meyer  * vm_pageout_helper runs additional pageout daemons in times of high paging
21840292c54bSConrad Meyer  * activity.
21850292c54bSConrad Meyer  */
21860292c54bSConrad Meyer static void
21870292c54bSConrad Meyer vm_pageout_helper(void *arg)
21880292c54bSConrad Meyer {
21890292c54bSConrad Meyer 	struct vm_domain *vmd;
21900292c54bSConrad Meyer 	int domain;
21910292c54bSConrad Meyer 
21920292c54bSConrad Meyer 	domain = (uintptr_t)arg;
21930292c54bSConrad Meyer 	vmd = VM_DOMAIN(domain);
21940292c54bSConrad Meyer 
21950292c54bSConrad Meyer 	vm_domain_pageout_lock(vmd);
21960292c54bSConrad Meyer 	for (;;) {
21970292c54bSConrad Meyer 		msleep(&vmd->vmd_inactive_shortage,
21980292c54bSConrad Meyer 		    vm_domain_pageout_lockptr(vmd), PVM, "psleep", 0);
21990292c54bSConrad Meyer 		blockcount_release(&vmd->vmd_inactive_starting, 1);
22000292c54bSConrad Meyer 
22010292c54bSConrad Meyer 		vm_domain_pageout_unlock(vmd);
22020292c54bSConrad Meyer 		vm_pageout_scan_inactive(vmd, vmd->vmd_inactive_shortage);
22030292c54bSConrad Meyer 		vm_domain_pageout_lock(vmd);
22040292c54bSConrad Meyer 
22050292c54bSConrad Meyer 		/*
22060292c54bSConrad Meyer 		 * Release the running count while the pageout lock is held to
22070292c54bSConrad Meyer 		 * prevent wakeup races.
22080292c54bSConrad Meyer 		 */
22090292c54bSConrad Meyer 		blockcount_release(&vmd->vmd_inactive_running, 1);
22100292c54bSConrad Meyer 	}
22110292c54bSConrad Meyer }
22120292c54bSConrad Meyer 
22130292c54bSConrad Meyer static int
221474f5530dSConrad Meyer get_pageout_threads_per_domain(const struct vm_domain *vmd)
22150292c54bSConrad Meyer {
221674f5530dSConrad Meyer 	unsigned total_pageout_threads, eligible_cpus, domain_cpus;
22170292c54bSConrad Meyer 
221874f5530dSConrad Meyer 	if (VM_DOMAIN_EMPTY(vmd->vmd_domain))
221974f5530dSConrad Meyer 		return (0);
22200292c54bSConrad Meyer 
22210292c54bSConrad Meyer 	/*
22220292c54bSConrad Meyer 	 * Semi-arbitrarily constrain pagedaemon threads to less than half the
222374f5530dSConrad Meyer 	 * total number of CPUs in the system as an upper limit.
22240292c54bSConrad Meyer 	 */
222574f5530dSConrad Meyer 	if (pageout_cpus_per_thread < 2)
222674f5530dSConrad Meyer 		pageout_cpus_per_thread = 2;
222774f5530dSConrad Meyer 	else if (pageout_cpus_per_thread > mp_ncpus)
222874f5530dSConrad Meyer 		pageout_cpus_per_thread = mp_ncpus;
22290292c54bSConrad Meyer 
223074f5530dSConrad Meyer 	total_pageout_threads = howmany(mp_ncpus, pageout_cpus_per_thread);
223174f5530dSConrad Meyer 	domain_cpus = CPU_COUNT(&cpuset_domain[vmd->vmd_domain]);
223274f5530dSConrad Meyer 
223374f5530dSConrad Meyer 	/* Pagedaemons are not run in empty domains. */
223474f5530dSConrad Meyer 	eligible_cpus = mp_ncpus;
223574f5530dSConrad Meyer 	for (unsigned i = 0; i < vm_ndomains; i++)
223674f5530dSConrad Meyer 		if (VM_DOMAIN_EMPTY(i))
223774f5530dSConrad Meyer 			eligible_cpus -= CPU_COUNT(&cpuset_domain[i]);
223874f5530dSConrad Meyer 
223974f5530dSConrad Meyer 	/*
224074f5530dSConrad Meyer 	 * Assign a portion of the total pageout threads to this domain
224174f5530dSConrad Meyer 	 * corresponding to the fraction of pagedaemon-eligible CPUs in the
224274f5530dSConrad Meyer 	 * domain.  In asymmetric NUMA systems, domains with more CPUs may be
224374f5530dSConrad Meyer 	 * allocated more threads than domains with fewer CPUs.
224474f5530dSConrad Meyer 	 */
224574f5530dSConrad Meyer 	return (howmany(total_pageout_threads * domain_cpus, eligible_cpus));
22460292c54bSConrad Meyer }
22470292c54bSConrad Meyer 
22480292c54bSConrad Meyer /*
22499c770a27SMark Johnston  * Initialize basic pageout daemon settings.  See the comment above the
22509c770a27SMark Johnston  * definition of vm_domain for some explanation of how these thresholds are
22519c770a27SMark Johnston  * used.
2252df8bae1dSRodney W. Grimes  */
22532b14f991SJulian Elischer static void
2254e2068d0bSJeff Roberson vm_pageout_init_domain(int domain)
2255df8bae1dSRodney W. Grimes {
2256e2068d0bSJeff Roberson 	struct vm_domain *vmd;
22575f8cd1c0SJeff Roberson 	struct sysctl_oid *oid;
2258e2068d0bSJeff Roberson 
2259e2068d0bSJeff Roberson 	vmd = VM_DOMAIN(domain);
2260e2068d0bSJeff Roberson 	vmd->vmd_interrupt_free_min = 2;
2261f6b04d2bSDavid Greenman 
226245ae1d91SAlan Cox 	/*
226345ae1d91SAlan Cox 	 * v_free_reserved needs to include enough for the largest
226445ae1d91SAlan Cox 	 * swap pager structures plus enough for any pv_entry structs
226545ae1d91SAlan Cox 	 * when paging.
226645ae1d91SAlan Cox 	 */
22670cab71bcSDoug Moore 	vmd->vmd_pageout_free_min = 2 * MAXBSIZE / PAGE_SIZE +
2268e2068d0bSJeff Roberson 	    vmd->vmd_interrupt_free_min;
2269e2068d0bSJeff Roberson 	vmd->vmd_free_reserved = vm_pageout_page_count +
22709c770a27SMark Johnston 	    vmd->vmd_pageout_free_min + vmd->vmd_page_count / 768;
22719c770a27SMark Johnston 	vmd->vmd_free_min = vmd->vmd_page_count / 200;
2272e2068d0bSJeff Roberson 	vmd->vmd_free_severe = vmd->vmd_free_min / 2;
2273e2068d0bSJeff Roberson 	vmd->vmd_free_target = 4 * vmd->vmd_free_min + vmd->vmd_free_reserved;
2274e2068d0bSJeff Roberson 	vmd->vmd_free_min += vmd->vmd_free_reserved;
2275e2068d0bSJeff Roberson 	vmd->vmd_free_severe += vmd->vmd_free_reserved;
2276e2068d0bSJeff Roberson 	vmd->vmd_inactive_target = (3 * vmd->vmd_free_target) / 2;
2277e2068d0bSJeff Roberson 	if (vmd->vmd_inactive_target > vmd->vmd_free_count / 3)
2278e2068d0bSJeff Roberson 		vmd->vmd_inactive_target = vmd->vmd_free_count / 3;
2279df8bae1dSRodney W. Grimes 
2280d9e23210SJeff Roberson 	/*
22815f8cd1c0SJeff Roberson 	 * Set the default wakeup threshold to be 10% below the paging
22825f8cd1c0SJeff Roberson 	 * target.  This keeps the steady state out of shortfall.
2283d9e23210SJeff Roberson 	 */
22845f8cd1c0SJeff Roberson 	vmd->vmd_pageout_wakeup_thresh = (vmd->vmd_free_target / 10) * 9;
2285e2068d0bSJeff Roberson 
2286e2068d0bSJeff Roberson 	/*
2287e2068d0bSJeff Roberson 	 * Target amount of memory to move out of the laundry queue during a
2288e2068d0bSJeff Roberson 	 * background laundering.  This is proportional to the amount of system
2289e2068d0bSJeff Roberson 	 * memory.
2290e2068d0bSJeff Roberson 	 */
2291e2068d0bSJeff Roberson 	vmd->vmd_background_launder_target = (vmd->vmd_free_target -
2292e2068d0bSJeff Roberson 	    vmd->vmd_free_min) / 10;
22935f8cd1c0SJeff Roberson 
22945f8cd1c0SJeff Roberson 	/* Initialize the pageout daemon pid controller. */
22955f8cd1c0SJeff Roberson 	pidctrl_init(&vmd->vmd_pid, hz / VM_INACT_SCAN_RATE,
22965f8cd1c0SJeff Roberson 	    vmd->vmd_free_target, PIDCTRL_BOUND,
22975f8cd1c0SJeff Roberson 	    PIDCTRL_KPD, PIDCTRL_KID, PIDCTRL_KDD);
22985f8cd1c0SJeff Roberson 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(vmd->vmd_oid), OID_AUTO,
22997029da5cSPawel Biernacki 	    "pidctrl", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
23005f8cd1c0SJeff Roberson 	pidctrl_init_sysctl(&vmd->vmd_pid, SYSCTL_CHILDREN(oid));
23010292c54bSConrad Meyer 
230274f5530dSConrad Meyer 	vmd->vmd_inactive_threads = get_pageout_threads_per_domain(vmd);
2303e2068d0bSJeff Roberson }
2304e2068d0bSJeff Roberson 
2305e2068d0bSJeff Roberson static void
2306e2068d0bSJeff Roberson vm_pageout_init(void)
2307e2068d0bSJeff Roberson {
230897458520SMark Johnston 	u_long freecount;
2309e2068d0bSJeff Roberson 	int i;
2310e2068d0bSJeff Roberson 
2311e2068d0bSJeff Roberson 	/*
2312e2068d0bSJeff Roberson 	 * Initialize some paging parameters.
2313e2068d0bSJeff Roberson 	 */
2314e2068d0bSJeff Roberson 	if (vm_cnt.v_page_count < 2000)
2315e2068d0bSJeff Roberson 		vm_pageout_page_count = 8;
2316e2068d0bSJeff Roberson 
2317e2068d0bSJeff Roberson 	freecount = 0;
2318e2068d0bSJeff Roberson 	for (i = 0; i < vm_ndomains; i++) {
2319e2068d0bSJeff Roberson 		struct vm_domain *vmd;
2320e2068d0bSJeff Roberson 
2321e2068d0bSJeff Roberson 		vm_pageout_init_domain(i);
2322e2068d0bSJeff Roberson 		vmd = VM_DOMAIN(i);
2323e2068d0bSJeff Roberson 		vm_cnt.v_free_reserved += vmd->vmd_free_reserved;
2324e2068d0bSJeff Roberson 		vm_cnt.v_free_target += vmd->vmd_free_target;
2325e2068d0bSJeff Roberson 		vm_cnt.v_free_min += vmd->vmd_free_min;
2326e2068d0bSJeff Roberson 		vm_cnt.v_inactive_target += vmd->vmd_inactive_target;
2327e2068d0bSJeff Roberson 		vm_cnt.v_pageout_free_min += vmd->vmd_pageout_free_min;
2328e2068d0bSJeff Roberson 		vm_cnt.v_interrupt_free_min += vmd->vmd_interrupt_free_min;
2329e2068d0bSJeff Roberson 		vm_cnt.v_free_severe += vmd->vmd_free_severe;
2330e2068d0bSJeff Roberson 		freecount += vmd->vmd_free_count;
2331e2068d0bSJeff Roberson 	}
2332d9e23210SJeff Roberson 
2333d9e23210SJeff Roberson 	/*
2334d9e23210SJeff Roberson 	 * Set interval in seconds for active scan.  We want to visit each
2335c9612b2dSJeff Roberson 	 * page at least once every ten minutes.  This is to prevent worst
2336c9612b2dSJeff Roberson 	 * case paging behaviors with stale active LRU.
2337d9e23210SJeff Roberson 	 */
2338d9e23210SJeff Roberson 	if (vm_pageout_update_period == 0)
2339c9612b2dSJeff Roberson 		vm_pageout_update_period = 600;
2340d9e23210SJeff Roberson 
234197458520SMark Johnston 	/*
234297458520SMark Johnston 	 * Set the maximum number of user-wired virtual pages.  Historically the
234397458520SMark Johnston 	 * main source of such pages was mlock(2) and mlockall(2).  Hypervisors
234497458520SMark Johnston 	 * may also request user-wired memory.
234597458520SMark Johnston 	 */
234654a3a114SMark Johnston 	if (vm_page_max_user_wired == 0)
234797458520SMark Johnston 		vm_page_max_user_wired = 4 * freecount / 5;
23484d19f4adSSteven Hartland }
23494d19f4adSSteven Hartland 
23504d19f4adSSteven Hartland /*
23514d19f4adSSteven Hartland  *     vm_pageout is the high level pageout daemon.
23524d19f4adSSteven Hartland  */
23534d19f4adSSteven Hartland static void
23544d19f4adSSteven Hartland vm_pageout(void)
23554d19f4adSSteven Hartland {
2356920239efSMark Johnston 	struct proc *p;
2357920239efSMark Johnston 	struct thread *td;
23580292c54bSConrad Meyer 	int error, first, i, j, pageout_threads;
2359920239efSMark Johnston 
2360920239efSMark Johnston 	p = curproc;
2361920239efSMark Johnston 	td = curthread;
2362df8bae1dSRodney W. Grimes 
2363245139c6SKonstantin Belousov 	mtx_init(&vm_oom_ratelim_mtx, "vmoomr", NULL, MTX_DEF);
236424a1cce3SDavid Greenman 	swap_pager_swap_init();
2365920239efSMark Johnston 	for (first = -1, i = 0; i < vm_ndomains; i++) {
236630c5525bSAndrew Gallatin 		if (VM_DOMAIN_EMPTY(i)) {
236730c5525bSAndrew Gallatin 			if (bootverbose)
236830c5525bSAndrew Gallatin 				printf("domain %d empty; skipping pageout\n",
236930c5525bSAndrew Gallatin 				    i);
237030c5525bSAndrew Gallatin 			continue;
237130c5525bSAndrew Gallatin 		}
2372920239efSMark Johnston 		if (first == -1)
2373920239efSMark Johnston 			first = i;
2374920239efSMark Johnston 		else {
2375920239efSMark Johnston 			error = kthread_add(vm_pageout_worker,
2376920239efSMark Johnston 			    (void *)(uintptr_t)i, p, NULL, 0, 0, "dom%d", i);
2377920239efSMark Johnston 			if (error != 0)
2378920239efSMark Johnston 				panic("starting pageout for domain %d: %d\n",
2379449c2e92SKonstantin Belousov 				    i, error);
2380dc2efb27SJohn Dyson 		}
238174f5530dSConrad Meyer 		pageout_threads = VM_DOMAIN(i)->vmd_inactive_threads;
23820292c54bSConrad Meyer 		for (j = 0; j < pageout_threads - 1; j++) {
23830292c54bSConrad Meyer 			error = kthread_add(vm_pageout_helper,
23840292c54bSConrad Meyer 			    (void *)(uintptr_t)i, p, NULL, 0, 0,
23850292c54bSConrad Meyer 			    "dom%d helper%d", i, j);
23860292c54bSConrad Meyer 			if (error != 0)
23870292c54bSConrad Meyer 				panic("starting pageout helper %d for domain "
23880292c54bSConrad Meyer 				    "%d: %d\n", j, i, error);
23890292c54bSConrad Meyer 		}
2390e2068d0bSJeff Roberson 		error = kthread_add(vm_pageout_laundry_worker,
2391920239efSMark Johnston 		    (void *)(uintptr_t)i, p, NULL, 0, 0, "laundry: dom%d", i);
2392e2068d0bSJeff Roberson 		if (error != 0)
2393920239efSMark Johnston 			panic("starting laundry for domain %d: %d", i, error);
2394f919ebdeSDavid Greenman 	}
2395920239efSMark Johnston 	error = kthread_add(uma_reclaim_worker, NULL, p, NULL, 0, 0, "uma");
239644ec2b63SKonstantin Belousov 	if (error != 0)
239744ec2b63SKonstantin Belousov 		panic("starting uma_reclaim helper, error %d\n", error);
2398920239efSMark Johnston 
2399920239efSMark Johnston 	snprintf(td->td_name, sizeof(td->td_name), "dom%d", first);
2400920239efSMark Johnston 	vm_pageout_worker((void *)(uintptr_t)first);
2401df8bae1dSRodney W. Grimes }
240226f9a767SRodney W. Grimes 
24036b4b77adSAlan Cox /*
2404280d15cdSMark Johnston  * Perform an advisory wakeup of the page daemon.
24056b4b77adSAlan Cox  */
2406e0c5a895SJohn Dyson void
2407e2068d0bSJeff Roberson pagedaemon_wakeup(int domain)
2408e0c5a895SJohn Dyson {
2409e2068d0bSJeff Roberson 	struct vm_domain *vmd;
2410a1c0a785SAlan Cox 
2411e2068d0bSJeff Roberson 	vmd = VM_DOMAIN(domain);
241230fbfddaSJeff Roberson 	vm_domain_pageout_assert_unlocked(vmd);
241330fbfddaSJeff Roberson 	if (curproc == pageproc)
241430fbfddaSJeff Roberson 		return;
2415280d15cdSMark Johnston 
241630fbfddaSJeff Roberson 	if (atomic_fetchadd_int(&vmd->vmd_pageout_wanted, 1) == 0) {
241730fbfddaSJeff Roberson 		vm_domain_pageout_lock(vmd);
241830fbfddaSJeff Roberson 		atomic_store_int(&vmd->vmd_pageout_wanted, 1);
2419e2068d0bSJeff Roberson 		wakeup(&vmd->vmd_pageout_wanted);
242030fbfddaSJeff Roberson 		vm_domain_pageout_unlock(vmd);
2421e0c5a895SJohn Dyson 	}
2422e0c5a895SJohn Dyson }
2423