xref: /freebsd/sys/vm/vm_pageout.c (revision 899fe184c718ba810aaacc7fc3852149f409388f)
160727d8bSWarner Losh /*-
2796df753SPedro F. Giffuni  * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
3df57947fSPedro F. Giffuni  *
426f9a767SRodney W. Grimes  * Copyright (c) 1991 Regents of the University of California.
526f9a767SRodney W. Grimes  * All rights reserved.
626f9a767SRodney W. Grimes  * Copyright (c) 1994 John S. Dyson
726f9a767SRodney W. Grimes  * All rights reserved.
826f9a767SRodney W. Grimes  * Copyright (c) 1994 David Greenman
926f9a767SRodney W. Grimes  * All rights reserved.
108dbca793STor Egge  * Copyright (c) 2005 Yahoo! Technologies Norway AS
118dbca793STor Egge  * All rights reserved.
12df8bae1dSRodney W. Grimes  *
13df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
14df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
15df8bae1dSRodney W. Grimes  *
16df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
17df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
18df8bae1dSRodney W. Grimes  * are met:
19df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
20df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
21df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
22df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
23df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
24df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
255929bcfaSPhilippe Charnier  *    must display the following acknowledgement:
26df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
27df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
28df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
29df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
30df8bae1dSRodney W. Grimes  *    without specific prior written permission.
31df8bae1dSRodney W. Grimes  *
32df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
43df8bae1dSRodney W. Grimes  *
443c4dd356SDavid Greenman  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
45df8bae1dSRodney W. Grimes  *
46df8bae1dSRodney W. Grimes  *
47df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
48df8bae1dSRodney W. Grimes  * All rights reserved.
49df8bae1dSRodney W. Grimes  *
50df8bae1dSRodney W. Grimes  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
51df8bae1dSRodney W. Grimes  *
52df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
53df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
54df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
55df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
56df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
57df8bae1dSRodney W. Grimes  *
58df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
59df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
60df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
61df8bae1dSRodney W. Grimes  *
62df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
63df8bae1dSRodney W. Grimes  *
64df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
65df8bae1dSRodney W. Grimes  *  School of Computer Science
66df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
67df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
68df8bae1dSRodney W. Grimes  *
69df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
70df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
71df8bae1dSRodney W. Grimes  */
72df8bae1dSRodney W. Grimes 
73df8bae1dSRodney W. Grimes /*
74df8bae1dSRodney W. Grimes  *	The proverbial page-out daemon.
75df8bae1dSRodney W. Grimes  */
76df8bae1dSRodney W. Grimes 
77874651b1SDavid E. O'Brien #include <sys/cdefs.h>
78874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$");
79874651b1SDavid E. O'Brien 
80faa5f8d8SAndrzej Bialecki #include "opt_vm.h"
817672ca05SMark Johnston 
82df8bae1dSRodney W. Grimes #include <sys/param.h>
8326f9a767SRodney W. Grimes #include <sys/systm.h>
84b5e8ce9fSBruce Evans #include <sys/kernel.h>
85855a310fSJeff Roberson #include <sys/eventhandler.h>
86fb919e4dSMark Murray #include <sys/lock.h>
87fb919e4dSMark Murray #include <sys/mutex.h>
8826f9a767SRodney W. Grimes #include <sys/proc.h>
899c8b8baaSPeter Wemm #include <sys/kthread.h>
900384fff8SJason Evans #include <sys/ktr.h>
9197824da3SAlan Cox #include <sys/mount.h>
92099e7e95SEdward Tomasz Napierala #include <sys/racct.h>
9326f9a767SRodney W. Grimes #include <sys/resourcevar.h>
94b43179fbSJeff Roberson #include <sys/sched.h>
9514a0d74eSSteven Hartland #include <sys/sdt.h>
96d2fc5315SPoul-Henning Kamp #include <sys/signalvar.h>
97449c2e92SKonstantin Belousov #include <sys/smp.h>
98a6bf3a9eSRyan Stone #include <sys/time.h>
99f6b04d2bSDavid Greenman #include <sys/vnode.h>
100efeaf95aSDavid Greenman #include <sys/vmmeter.h>
10189f6b863SAttilio Rao #include <sys/rwlock.h>
1021005a129SJohn Baldwin #include <sys/sx.h>
10338efa82bSJohn Dyson #include <sys/sysctl.h>
104df8bae1dSRodney W. Grimes 
105df8bae1dSRodney W. Grimes #include <vm/vm.h>
106efeaf95aSDavid Greenman #include <vm/vm_param.h>
107efeaf95aSDavid Greenman #include <vm/vm_object.h>
108df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
109efeaf95aSDavid Greenman #include <vm/vm_map.h>
110df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h>
11124a1cce3SDavid Greenman #include <vm/vm_pager.h>
112449c2e92SKonstantin Belousov #include <vm/vm_phys.h>
113e2068d0bSJeff Roberson #include <vm/vm_pagequeue.h>
11405f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h>
115efeaf95aSDavid Greenman #include <vm/vm_extern.h>
116670d17b5SJeff Roberson #include <vm/uma.h>
117df8bae1dSRodney W. Grimes 
1182b14f991SJulian Elischer /*
1192b14f991SJulian Elischer  * System initialization
1202b14f991SJulian Elischer  */
1212b14f991SJulian Elischer 
1222b14f991SJulian Elischer /* the kernel process "vm_pageout"*/
12311caded3SAlfred Perlstein static void vm_pageout(void);
1244d19f4adSSteven Hartland static void vm_pageout_init(void);
125ebcddc72SAlan Cox static int vm_pageout_clean(vm_page_t m, int *numpagedout);
12634d8b7eaSJeff Roberson static int vm_pageout_cluster(vm_page_t m);
12776386c7eSKonstantin Belousov static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
12876386c7eSKonstantin Belousov     int starting_page_shortage);
12945ae1d91SAlan Cox 
1304d19f4adSSteven Hartland SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init,
1314d19f4adSSteven Hartland     NULL);
1324d19f4adSSteven Hartland 
1332b14f991SJulian Elischer struct proc *pageproc;
1342b14f991SJulian Elischer 
1352b14f991SJulian Elischer static struct kproc_desc page_kp = {
1362b14f991SJulian Elischer 	"pagedaemon",
1372b14f991SJulian Elischer 	vm_pageout,
1382b14f991SJulian Elischer 	&pageproc
1392b14f991SJulian Elischer };
1404d19f4adSSteven Hartland SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start,
141237fdd78SRobert Watson     &page_kp);
1422b14f991SJulian Elischer 
14314a0d74eSSteven Hartland SDT_PROVIDER_DEFINE(vm);
14414a0d74eSSteven Hartland SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan);
14514a0d74eSSteven Hartland 
146ebcddc72SAlan Cox /* Pagedaemon activity rates, in subdivisions of one second. */
147ebcddc72SAlan Cox #define	VM_LAUNDER_RATE		10
1485f8cd1c0SJeff Roberson #define	VM_INACT_SCAN_RATE	10
1492b14f991SJulian Elischer 
15076386c7eSKonstantin Belousov static int vm_pageout_oom_seq = 12;
151ebcddc72SAlan Cox 
152d9e23210SJeff Roberson static int vm_pageout_update_period;
1534a365329SAndrey Zonov static int disable_swap_pageouts;
154c9612b2dSJeff Roberson static int lowmem_period = 10;
155b1fd102eSMark Johnston static int swapdev_enabled;
15670111b90SJohn Dyson 
1578311a2b8SWill Andrews static int vm_panic_on_oom = 0;
1588311a2b8SWill Andrews 
1598311a2b8SWill Andrews SYSCTL_INT(_vm, OID_AUTO, panic_on_oom,
1608311a2b8SWill Andrews 	CTLFLAG_RWTUN, &vm_panic_on_oom, 0,
1618311a2b8SWill Andrews 	"panic on out of memory instead of killing the largest process");
1628311a2b8SWill Andrews 
163d9e23210SJeff Roberson SYSCTL_INT(_vm, OID_AUTO, pageout_update_period,
164e0b2fc3aSMark Johnston 	CTLFLAG_RWTUN, &vm_pageout_update_period, 0,
165d9e23210SJeff Roberson 	"Maximum active LRU update period");
16653636869SAndrey Zonov 
167e0b2fc3aSMark Johnston SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RWTUN, &lowmem_period, 0,
168c9612b2dSJeff Roberson 	"Low memory callback period");
169c9612b2dSJeff Roberson 
170ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
171e0b2fc3aSMark Johnston 	CTLFLAG_RWTUN, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
17212ac6a1dSJohn Dyson 
17323b59018SMatthew Dillon static int pageout_lock_miss;
17423b59018SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
17523b59018SMatthew Dillon 	CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
17623b59018SMatthew Dillon 
17776386c7eSKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq,
178e0b2fc3aSMark Johnston 	CTLFLAG_RWTUN, &vm_pageout_oom_seq, 0,
17976386c7eSKonstantin Belousov 	"back-to-back calls to oom detector to start OOM");
18076386c7eSKonstantin Belousov 
181ebcddc72SAlan Cox static int act_scan_laundry_weight = 3;
182e0b2fc3aSMark Johnston SYSCTL_INT(_vm, OID_AUTO, act_scan_laundry_weight, CTLFLAG_RWTUN,
183ebcddc72SAlan Cox     &act_scan_laundry_weight, 0,
184ebcddc72SAlan Cox     "weight given to clean vs. dirty pages in active queue scans");
185ebcddc72SAlan Cox 
186ebcddc72SAlan Cox static u_int vm_background_launder_rate = 4096;
187e0b2fc3aSMark Johnston SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RWTUN,
188ebcddc72SAlan Cox     &vm_background_launder_rate, 0,
189ebcddc72SAlan Cox     "background laundering rate, in kilobytes per second");
190ebcddc72SAlan Cox 
191ebcddc72SAlan Cox static u_int vm_background_launder_max = 20 * 1024;
192e0b2fc3aSMark Johnston SYSCTL_UINT(_vm, OID_AUTO, background_launder_max, CTLFLAG_RWTUN,
193ebcddc72SAlan Cox     &vm_background_launder_max, 0, "background laundering cap, in kilobytes");
194ebcddc72SAlan Cox 
195e2241590SAlan Cox int vm_pageout_page_count = 32;
196df8bae1dSRodney W. Grimes 
197c3cb3e12SDavid Greenman int vm_page_max_wired;		/* XXX max # of wired pages system-wide */
1985dfc2870SAlan Cox SYSCTL_INT(_vm, OID_AUTO, max_wired,
1995dfc2870SAlan Cox 	CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count");
200df8bae1dSRodney W. Grimes 
201ebcddc72SAlan Cox static u_int isqrt(u_int num);
202ebcddc72SAlan Cox static int vm_pageout_launder(struct vm_domain *vmd, int launder,
203ebcddc72SAlan Cox     bool in_shortfall);
204ebcddc72SAlan Cox static void vm_pageout_laundry_worker(void *arg);
205cd41fc12SDavid Greenman 
2065cd29d0fSMark Johnston struct scan_state {
2075cd29d0fSMark Johnston 	struct vm_batchqueue bq;
2088d220203SAlan Cox 	struct vm_pagequeue *pq;
2095cd29d0fSMark Johnston 	vm_page_t	marker;
2105cd29d0fSMark Johnston 	int		maxscan;
2115cd29d0fSMark Johnston 	int		scanned;
2125cd29d0fSMark Johnston };
2138dbca793STor Egge 
2145cd29d0fSMark Johnston static void
2155cd29d0fSMark Johnston vm_pageout_init_scan(struct scan_state *ss, struct vm_pagequeue *pq,
2165cd29d0fSMark Johnston     vm_page_t marker, vm_page_t after, int maxscan)
2175cd29d0fSMark Johnston {
2188dbca793STor Egge 
2195cd29d0fSMark Johnston 	vm_pagequeue_assert_locked(pq);
2205cd29d0fSMark Johnston 	KASSERT((marker->aflags & PGA_ENQUEUED) == 0,
2215cd29d0fSMark Johnston 	    ("marker %p already enqueued", marker));
2225cd29d0fSMark Johnston 
2235cd29d0fSMark Johnston 	if (after == NULL)
2245cd29d0fSMark Johnston 		TAILQ_INSERT_HEAD(&pq->pq_pl, marker, plinks.q);
2255cd29d0fSMark Johnston 	else
2265cd29d0fSMark Johnston 		TAILQ_INSERT_AFTER(&pq->pq_pl, after, marker, plinks.q);
2275cd29d0fSMark Johnston 	vm_page_aflag_set(marker, PGA_ENQUEUED);
2285cd29d0fSMark Johnston 
2295cd29d0fSMark Johnston 	vm_batchqueue_init(&ss->bq);
2305cd29d0fSMark Johnston 	ss->pq = pq;
2315cd29d0fSMark Johnston 	ss->marker = marker;
2325cd29d0fSMark Johnston 	ss->maxscan = maxscan;
2335cd29d0fSMark Johnston 	ss->scanned = 0;
2348d220203SAlan Cox 	vm_pagequeue_unlock(pq);
2355cd29d0fSMark Johnston }
2368dbca793STor Egge 
2375cd29d0fSMark Johnston static void
2385cd29d0fSMark Johnston vm_pageout_end_scan(struct scan_state *ss)
2395cd29d0fSMark Johnston {
2405cd29d0fSMark Johnston 	struct vm_pagequeue *pq;
2415cd29d0fSMark Johnston 
2425cd29d0fSMark Johnston 	pq = ss->pq;
2435cd29d0fSMark Johnston 	vm_pagequeue_assert_locked(pq);
2445cd29d0fSMark Johnston 	KASSERT((ss->marker->aflags & PGA_ENQUEUED) != 0,
2455cd29d0fSMark Johnston 	    ("marker %p not enqueued", ss->marker));
2465cd29d0fSMark Johnston 
2475cd29d0fSMark Johnston 	TAILQ_REMOVE(&pq->pq_pl, ss->marker, plinks.q);
2485cd29d0fSMark Johnston 	vm_page_aflag_clear(ss->marker, PGA_ENQUEUED);
249*899fe184SMark Johnston 	pq->pq_pdpages += ss->scanned;
2508dbca793STor Egge }
2518dbca793STor Egge 
2528dbca793STor Egge /*
2535cd29d0fSMark Johnston  * Add a small number of queued pages to a batch queue for later processing
2545cd29d0fSMark Johnston  * without the corresponding queue lock held.  The caller must have enqueued a
2555cd29d0fSMark Johnston  * marker page at the desired start point for the scan.  Pages will be
2565cd29d0fSMark Johnston  * physically dequeued if the caller so requests.  Otherwise, the returned
2575cd29d0fSMark Johnston  * batch may contain marker pages, and it is up to the caller to handle them.
2585cd29d0fSMark Johnston  *
25936f8fe9bSMark Johnston  * When processing the batch queue, vm_page_queue() must be used to
26036f8fe9bSMark Johnston  * determine whether the page has been logically dequeued by another thread.
26136f8fe9bSMark Johnston  * Once this check is performed, the page lock guarantees that the page will
26236f8fe9bSMark Johnston  * not be disassociated from the queue.
2635cd29d0fSMark Johnston  */
2645cd29d0fSMark Johnston static __always_inline void
2655cd29d0fSMark Johnston vm_pageout_collect_batch(struct scan_state *ss, const bool dequeue)
2665cd29d0fSMark Johnston {
2678d220203SAlan Cox 	struct vm_pagequeue *pq;
2685cd29d0fSMark Johnston 	vm_page_t m, marker;
2698c616246SKonstantin Belousov 
2705cd29d0fSMark Johnston 	marker = ss->marker;
2715cd29d0fSMark Johnston 	pq = ss->pq;
2728c616246SKonstantin Belousov 
2735cd29d0fSMark Johnston 	KASSERT((marker->aflags & PGA_ENQUEUED) != 0,
2745cd29d0fSMark Johnston 	    ("marker %p not enqueued", ss->marker));
2758c616246SKonstantin Belousov 
2768d220203SAlan Cox 	vm_pagequeue_lock(pq);
2775cd29d0fSMark Johnston 	for (m = TAILQ_NEXT(marker, plinks.q); m != NULL &&
2785cd29d0fSMark Johnston 	    ss->scanned < ss->maxscan && ss->bq.bq_cnt < VM_BATCHQUEUE_SIZE;
2795cd29d0fSMark Johnston 	    m = TAILQ_NEXT(m, plinks.q), ss->scanned++) {
2805cd29d0fSMark Johnston 		if ((m->flags & PG_MARKER) == 0) {
2815cd29d0fSMark Johnston 			KASSERT((m->aflags & PGA_ENQUEUED) != 0,
2825cd29d0fSMark Johnston 			    ("page %p not enqueued", m));
2835cd29d0fSMark Johnston 			KASSERT((m->flags & PG_FICTITIOUS) == 0,
2845cd29d0fSMark Johnston 			    ("Fictitious page %p cannot be in page queue", m));
2855cd29d0fSMark Johnston 			KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2865cd29d0fSMark Johnston 			    ("Unmanaged page %p cannot be in page queue", m));
2875cd29d0fSMark Johnston 		} else if (dequeue)
2885cd29d0fSMark Johnston 			continue;
2898c616246SKonstantin Belousov 
2905cd29d0fSMark Johnston 		(void)vm_batchqueue_insert(&ss->bq, m);
2915cd29d0fSMark Johnston 		if (dequeue) {
2925cd29d0fSMark Johnston 			TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
2935cd29d0fSMark Johnston 			vm_page_aflag_clear(m, PGA_ENQUEUED);
2945cd29d0fSMark Johnston 		}
2955cd29d0fSMark Johnston 	}
2965cd29d0fSMark Johnston 	TAILQ_REMOVE(&pq->pq_pl, marker, plinks.q);
2975cd29d0fSMark Johnston 	if (__predict_true(m != NULL))
2985cd29d0fSMark Johnston 		TAILQ_INSERT_BEFORE(m, marker, plinks.q);
2995cd29d0fSMark Johnston 	else
3005cd29d0fSMark Johnston 		TAILQ_INSERT_TAIL(&pq->pq_pl, marker, plinks.q);
3015cd29d0fSMark Johnston 	if (dequeue)
3025cd29d0fSMark Johnston 		vm_pagequeue_cnt_add(pq, -ss->bq.bq_cnt);
3035cd29d0fSMark Johnston 	vm_pagequeue_unlock(pq);
3045cd29d0fSMark Johnston }
3055cd29d0fSMark Johnston 
3065cd29d0fSMark Johnston /* Return the next page to be scanned, or NULL if the scan is complete. */
3075cd29d0fSMark Johnston static __always_inline vm_page_t
3085cd29d0fSMark Johnston vm_pageout_next(struct scan_state *ss, const bool dequeue)
3095cd29d0fSMark Johnston {
3105cd29d0fSMark Johnston 
3115cd29d0fSMark Johnston 	if (ss->bq.bq_cnt == 0)
3125cd29d0fSMark Johnston 		vm_pageout_collect_batch(ss, dequeue);
3135cd29d0fSMark Johnston 	return (vm_batchqueue_pop(&ss->bq));
3148c616246SKonstantin Belousov }
3158c616246SKonstantin Belousov 
3168c616246SKonstantin Belousov /*
317248fe642SAlan Cox  * Scan for pages at adjacent offsets within the given page's object that are
318248fe642SAlan Cox  * eligible for laundering, form a cluster of these pages and the given page,
319248fe642SAlan Cox  * and launder that cluster.
32026f9a767SRodney W. Grimes  */
3213af76890SPoul-Henning Kamp static int
32234d8b7eaSJeff Roberson vm_pageout_cluster(vm_page_t m)
32324a1cce3SDavid Greenman {
32454d92145SMatthew Dillon 	vm_object_t object;
325248fe642SAlan Cox 	vm_page_t mc[2 * vm_pageout_page_count], p, pb, ps;
326248fe642SAlan Cox 	vm_pindex_t pindex;
327248fe642SAlan Cox 	int ib, is, page_base, pageout_count;
32826f9a767SRodney W. Grimes 
329248fe642SAlan Cox 	vm_page_assert_locked(m);
33017f6a17bSAlan Cox 	object = m->object;
33189f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
332248fe642SAlan Cox 	pindex = m->pindex;
3330cddd8f0SMatthew Dillon 
334c7aebda8SAttilio Rao 	vm_page_assert_unbusied(m);
3351d3a1bcfSMark Johnston 	KASSERT(!vm_page_held(m), ("page %p is held", m));
336aed9aaaaSMark Johnston 
337aed9aaaaSMark Johnston 	pmap_remove_write(m);
33817f6a17bSAlan Cox 	vm_page_unlock(m);
3390d94caffSDavid Greenman 
34091b4f427SAlan Cox 	mc[vm_pageout_page_count] = pb = ps = m;
34126f9a767SRodney W. Grimes 	pageout_count = 1;
342f35329acSJohn Dyson 	page_base = vm_pageout_page_count;
34390ecac61SMatthew Dillon 	ib = 1;
34490ecac61SMatthew Dillon 	is = 1;
34590ecac61SMatthew Dillon 
34624a1cce3SDavid Greenman 	/*
347248fe642SAlan Cox 	 * We can cluster only if the page is not clean, busy, or held, and
348ebcddc72SAlan Cox 	 * the page is in the laundry queue.
34990ecac61SMatthew Dillon 	 *
35090ecac61SMatthew Dillon 	 * During heavy mmap/modification loads the pageout
35190ecac61SMatthew Dillon 	 * daemon can really fragment the underlying file
352248fe642SAlan Cox 	 * due to flushing pages out of order and not trying to
353248fe642SAlan Cox 	 * align the clusters (which leaves sporadic out-of-order
35490ecac61SMatthew Dillon 	 * holes).  To solve this problem we do the reverse scan
35590ecac61SMatthew Dillon 	 * first and attempt to align our cluster, then do a
35690ecac61SMatthew Dillon 	 * forward scan if room remains.
35724a1cce3SDavid Greenman 	 */
35890ecac61SMatthew Dillon more:
359248fe642SAlan Cox 	while (ib != 0 && pageout_count < vm_pageout_page_count) {
36090ecac61SMatthew Dillon 		if (ib > pindex) {
36190ecac61SMatthew Dillon 			ib = 0;
36290ecac61SMatthew Dillon 			break;
363f6b04d2bSDavid Greenman 		}
364c7aebda8SAttilio Rao 		if ((p = vm_page_prev(pb)) == NULL || vm_page_busied(p)) {
36590ecac61SMatthew Dillon 			ib = 0;
36690ecac61SMatthew Dillon 			break;
367f6b04d2bSDavid Greenman 		}
36824a1cce3SDavid Greenman 		vm_page_test_dirty(p);
3691b5c869dSMark Johnston 		if (p->dirty == 0) {
370eb5d3969SAlan Cox 			ib = 0;
371eb5d3969SAlan Cox 			break;
372eb5d3969SAlan Cox 		}
373eb5d3969SAlan Cox 		vm_page_lock(p);
3741b5c869dSMark Johnston 		if (vm_page_held(p) || !vm_page_in_laundry(p)) {
3752965a453SKip Macy 			vm_page_unlock(p);
37690ecac61SMatthew Dillon 			ib = 0;
37724a1cce3SDavid Greenman 			break;
378f6b04d2bSDavid Greenman 		}
379aed9aaaaSMark Johnston 		pmap_remove_write(p);
3802965a453SKip Macy 		vm_page_unlock(p);
38191b4f427SAlan Cox 		mc[--page_base] = pb = p;
38290ecac61SMatthew Dillon 		++pageout_count;
38390ecac61SMatthew Dillon 		++ib;
384248fe642SAlan Cox 
38524a1cce3SDavid Greenman 		/*
386248fe642SAlan Cox 		 * We are at an alignment boundary.  Stop here, and switch
387248fe642SAlan Cox 		 * directions.  Do not clear ib.
38824a1cce3SDavid Greenman 		 */
38990ecac61SMatthew Dillon 		if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
39090ecac61SMatthew Dillon 			break;
39124a1cce3SDavid Greenman 	}
39290ecac61SMatthew Dillon 	while (pageout_count < vm_pageout_page_count &&
39390ecac61SMatthew Dillon 	    pindex + is < object->size) {
394c7aebda8SAttilio Rao 		if ((p = vm_page_next(ps)) == NULL || vm_page_busied(p))
39590ecac61SMatthew Dillon 			break;
39624a1cce3SDavid Greenman 		vm_page_test_dirty(p);
3971b5c869dSMark Johnston 		if (p->dirty == 0)
398eb5d3969SAlan Cox 			break;
399eb5d3969SAlan Cox 		vm_page_lock(p);
4001b5c869dSMark Johnston 		if (vm_page_held(p) || !vm_page_in_laundry(p)) {
4012965a453SKip Macy 			vm_page_unlock(p);
40224a1cce3SDavid Greenman 			break;
40324a1cce3SDavid Greenman 		}
404aed9aaaaSMark Johnston 		pmap_remove_write(p);
4052965a453SKip Macy 		vm_page_unlock(p);
40691b4f427SAlan Cox 		mc[page_base + pageout_count] = ps = p;
40790ecac61SMatthew Dillon 		++pageout_count;
40890ecac61SMatthew Dillon 		++is;
40924a1cce3SDavid Greenman 	}
41090ecac61SMatthew Dillon 
41190ecac61SMatthew Dillon 	/*
41290ecac61SMatthew Dillon 	 * If we exhausted our forward scan, continue with the reverse scan
413248fe642SAlan Cox 	 * when possible, even past an alignment boundary.  This catches
414248fe642SAlan Cox 	 * boundary conditions.
41590ecac61SMatthew Dillon 	 */
416248fe642SAlan Cox 	if (ib != 0 && pageout_count < vm_pageout_page_count)
41790ecac61SMatthew Dillon 		goto more;
418f6b04d2bSDavid Greenman 
41999e6e193SMark Johnston 	return (vm_pageout_flush(&mc[page_base], pageout_count,
42099e6e193SMark Johnston 	    VM_PAGER_PUT_NOREUSE, 0, NULL, NULL));
421aef922f5SJohn Dyson }
422aef922f5SJohn Dyson 
4231c7c3c6aSMatthew Dillon /*
4241c7c3c6aSMatthew Dillon  * vm_pageout_flush() - launder the given pages
4251c7c3c6aSMatthew Dillon  *
4261c7c3c6aSMatthew Dillon  *	The given pages are laundered.  Note that we setup for the start of
4271c7c3c6aSMatthew Dillon  *	I/O ( i.e. busy the page ), mark it read-only, and bump the object
4281c7c3c6aSMatthew Dillon  *	reference count all in here rather then in the parent.  If we want
4291c7c3c6aSMatthew Dillon  *	the parent to do more sophisticated things we may have to change
4301c7c3c6aSMatthew Dillon  *	the ordering.
4311e8a675cSKonstantin Belousov  *
4321e8a675cSKonstantin Belousov  *	Returned runlen is the count of pages between mreq and first
4331e8a675cSKonstantin Belousov  *	page after mreq with status VM_PAGER_AGAIN.
434126d6082SKonstantin Belousov  *	*eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL
435126d6082SKonstantin Belousov  *	for any page in runlen set.
4361c7c3c6aSMatthew Dillon  */
437aef922f5SJohn Dyson int
438126d6082SKonstantin Belousov vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
439126d6082SKonstantin Belousov     boolean_t *eio)
440aef922f5SJohn Dyson {
4412e3b314dSAlan Cox 	vm_object_t object = mc[0]->object;
442aef922f5SJohn Dyson 	int pageout_status[count];
44395461b45SJohn Dyson 	int numpagedout = 0;
4441e8a675cSKonstantin Belousov 	int i, runlen;
445aef922f5SJohn Dyson 
44689f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
4477bec141bSKip Macy 
4481c7c3c6aSMatthew Dillon 	/*
449aed9aaaaSMark Johnston 	 * Initiate I/O.  Mark the pages busy and verify that they're valid
450aed9aaaaSMark Johnston 	 * and read-only.
4511c7c3c6aSMatthew Dillon 	 *
4521c7c3c6aSMatthew Dillon 	 * We do not have to fixup the clean/dirty bits here... we can
4531c7c3c6aSMatthew Dillon 	 * allow the pager to do it after the I/O completes.
45402fa91d3SMatthew Dillon 	 *
45502fa91d3SMatthew Dillon 	 * NOTE! mc[i]->dirty may be partial or fragmented due to an
45602fa91d3SMatthew Dillon 	 * edge case with file fragments.
4571c7c3c6aSMatthew Dillon 	 */
4588f9110f6SJohn Dyson 	for (i = 0; i < count; i++) {
4597a935082SAlan Cox 		KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
4607a935082SAlan Cox 		    ("vm_pageout_flush: partially invalid page %p index %d/%d",
4617a935082SAlan Cox 			mc[i], i, count));
462aed9aaaaSMark Johnston 		KASSERT((mc[i]->aflags & PGA_WRITEABLE) == 0,
463aed9aaaaSMark Johnston 		    ("vm_pageout_flush: writeable page %p", mc[i]));
464c7aebda8SAttilio Rao 		vm_page_sbusy(mc[i]);
4652965a453SKip Macy 	}
466d474eaaaSDoug Rabson 	vm_object_pip_add(object, count);
467aef922f5SJohn Dyson 
468d076fbeaSAlan Cox 	vm_pager_put_pages(object, mc, count, flags, pageout_status);
46926f9a767SRodney W. Grimes 
4701e8a675cSKonstantin Belousov 	runlen = count - mreq;
471126d6082SKonstantin Belousov 	if (eio != NULL)
472126d6082SKonstantin Belousov 		*eio = FALSE;
473aef922f5SJohn Dyson 	for (i = 0; i < count; i++) {
474aef922f5SJohn Dyson 		vm_page_t mt = mc[i];
47524a1cce3SDavid Greenman 
4764cd45723SAlan Cox 		KASSERT(pageout_status[i] == VM_PAGER_PEND ||
4776031c68dSAlan Cox 		    !pmap_page_is_write_mapped(mt),
4789ea8d1a6SAlan Cox 		    ("vm_pageout_flush: page %p is not write protected", mt));
47926f9a767SRodney W. Grimes 		switch (pageout_status[i]) {
48026f9a767SRodney W. Grimes 		case VM_PAGER_OK:
481ebcddc72SAlan Cox 			vm_page_lock(mt);
482ebcddc72SAlan Cox 			if (vm_page_in_laundry(mt))
483ebcddc72SAlan Cox 				vm_page_deactivate_noreuse(mt);
484ebcddc72SAlan Cox 			vm_page_unlock(mt);
485ebcddc72SAlan Cox 			/* FALLTHROUGH */
48626f9a767SRodney W. Grimes 		case VM_PAGER_PEND:
48795461b45SJohn Dyson 			numpagedout++;
48826f9a767SRodney W. Grimes 			break;
48926f9a767SRodney W. Grimes 		case VM_PAGER_BAD:
49026f9a767SRodney W. Grimes 			/*
491ebcddc72SAlan Cox 			 * The page is outside the object's range.  We pretend
492ebcddc72SAlan Cox 			 * that the page out worked and clean the page, so the
493ebcddc72SAlan Cox 			 * changes will be lost if the page is reclaimed by
494ebcddc72SAlan Cox 			 * the page daemon.
49526f9a767SRodney W. Grimes 			 */
49690ecac61SMatthew Dillon 			vm_page_undirty(mt);
497ebcddc72SAlan Cox 			vm_page_lock(mt);
498ebcddc72SAlan Cox 			if (vm_page_in_laundry(mt))
499ebcddc72SAlan Cox 				vm_page_deactivate_noreuse(mt);
500ebcddc72SAlan Cox 			vm_page_unlock(mt);
50126f9a767SRodney W. Grimes 			break;
50226f9a767SRodney W. Grimes 		case VM_PAGER_ERROR:
50326f9a767SRodney W. Grimes 		case VM_PAGER_FAIL:
50426f9a767SRodney W. Grimes 			/*
505b1fd102eSMark Johnston 			 * If the page couldn't be paged out to swap because the
506b1fd102eSMark Johnston 			 * pager wasn't able to find space, place the page in
507b1fd102eSMark Johnston 			 * the PQ_UNSWAPPABLE holding queue.  This is an
508b1fd102eSMark Johnston 			 * optimization that prevents the page daemon from
509b1fd102eSMark Johnston 			 * wasting CPU cycles on pages that cannot be reclaimed
510b1fd102eSMark Johnston 			 * becase no swap device is configured.
511b1fd102eSMark Johnston 			 *
512b1fd102eSMark Johnston 			 * Otherwise, reactivate the page so that it doesn't
513b1fd102eSMark Johnston 			 * clog the laundry and inactive queues.  (We will try
514b1fd102eSMark Johnston 			 * paging it out again later.)
51526f9a767SRodney W. Grimes 			 */
5163c4a2440SAlan Cox 			vm_page_lock(mt);
517b1fd102eSMark Johnston 			if (object->type == OBJT_SWAP &&
518b1fd102eSMark Johnston 			    pageout_status[i] == VM_PAGER_FAIL) {
519b1fd102eSMark Johnston 				vm_page_unswappable(mt);
520b1fd102eSMark Johnston 				numpagedout++;
521b1fd102eSMark Johnston 			} else
52224a1cce3SDavid Greenman 				vm_page_activate(mt);
5233c4a2440SAlan Cox 			vm_page_unlock(mt);
524126d6082SKonstantin Belousov 			if (eio != NULL && i >= mreq && i - mreq < runlen)
525126d6082SKonstantin Belousov 				*eio = TRUE;
52626f9a767SRodney W. Grimes 			break;
52726f9a767SRodney W. Grimes 		case VM_PAGER_AGAIN:
5281e8a675cSKonstantin Belousov 			if (i >= mreq && i - mreq < runlen)
5291e8a675cSKonstantin Belousov 				runlen = i - mreq;
53026f9a767SRodney W. Grimes 			break;
53126f9a767SRodney W. Grimes 		}
53226f9a767SRodney W. Grimes 
53326f9a767SRodney W. Grimes 		/*
5340d94caffSDavid Greenman 		 * If the operation is still going, leave the page busy to
5350d94caffSDavid Greenman 		 * block all other accesses. Also, leave the paging in
5360d94caffSDavid Greenman 		 * progress indicator set so that we don't attempt an object
5370d94caffSDavid Greenman 		 * collapse.
53826f9a767SRodney W. Grimes 		 */
53926f9a767SRodney W. Grimes 		if (pageout_status[i] != VM_PAGER_PEND) {
540f919ebdeSDavid Greenman 			vm_object_pip_wakeup(object);
541c7aebda8SAttilio Rao 			vm_page_sunbusy(mt);
5423c4a2440SAlan Cox 		}
5433c4a2440SAlan Cox 	}
5441e8a675cSKonstantin Belousov 	if (prunlen != NULL)
5451e8a675cSKonstantin Belousov 		*prunlen = runlen;
5463c4a2440SAlan Cox 	return (numpagedout);
54726f9a767SRodney W. Grimes }
54826f9a767SRodney W. Grimes 
549b1fd102eSMark Johnston static void
550b1fd102eSMark Johnston vm_pageout_swapon(void *arg __unused, struct swdevt *sp __unused)
551b1fd102eSMark Johnston {
552b1fd102eSMark Johnston 
553b1fd102eSMark Johnston 	atomic_store_rel_int(&swapdev_enabled, 1);
554b1fd102eSMark Johnston }
555b1fd102eSMark Johnston 
556b1fd102eSMark Johnston static void
557b1fd102eSMark Johnston vm_pageout_swapoff(void *arg __unused, struct swdevt *sp __unused)
558b1fd102eSMark Johnston {
559b1fd102eSMark Johnston 
560b1fd102eSMark Johnston 	if (swap_pager_nswapdev() == 1)
561b1fd102eSMark Johnston 		atomic_store_rel_int(&swapdev_enabled, 0);
562b1fd102eSMark Johnston }
563b1fd102eSMark Johnston 
5641c7c3c6aSMatthew Dillon /*
56534d8b7eaSJeff Roberson  * Attempt to acquire all of the necessary locks to launder a page and
56634d8b7eaSJeff Roberson  * then call through the clustering layer to PUTPAGES.  Wait a short
56734d8b7eaSJeff Roberson  * time for a vnode lock.
56834d8b7eaSJeff Roberson  *
56934d8b7eaSJeff Roberson  * Requires the page and object lock on entry, releases both before return.
57034d8b7eaSJeff Roberson  * Returns 0 on success and an errno otherwise.
57134d8b7eaSJeff Roberson  */
57234d8b7eaSJeff Roberson static int
573ebcddc72SAlan Cox vm_pageout_clean(vm_page_t m, int *numpagedout)
57434d8b7eaSJeff Roberson {
57534d8b7eaSJeff Roberson 	struct vnode *vp;
57634d8b7eaSJeff Roberson 	struct mount *mp;
57734d8b7eaSJeff Roberson 	vm_object_t object;
57834d8b7eaSJeff Roberson 	vm_pindex_t pindex;
57934d8b7eaSJeff Roberson 	int error, lockmode;
58034d8b7eaSJeff Roberson 
58134d8b7eaSJeff Roberson 	vm_page_assert_locked(m);
58234d8b7eaSJeff Roberson 	object = m->object;
58334d8b7eaSJeff Roberson 	VM_OBJECT_ASSERT_WLOCKED(object);
58434d8b7eaSJeff Roberson 	error = 0;
58534d8b7eaSJeff Roberson 	vp = NULL;
58634d8b7eaSJeff Roberson 	mp = NULL;
58734d8b7eaSJeff Roberson 
58834d8b7eaSJeff Roberson 	/*
58934d8b7eaSJeff Roberson 	 * The object is already known NOT to be dead.   It
59034d8b7eaSJeff Roberson 	 * is possible for the vget() to block the whole
59134d8b7eaSJeff Roberson 	 * pageout daemon, but the new low-memory handling
59234d8b7eaSJeff Roberson 	 * code should prevent it.
59334d8b7eaSJeff Roberson 	 *
59434d8b7eaSJeff Roberson 	 * We can't wait forever for the vnode lock, we might
59534d8b7eaSJeff Roberson 	 * deadlock due to a vn_read() getting stuck in
59634d8b7eaSJeff Roberson 	 * vm_wait while holding this vnode.  We skip the
59734d8b7eaSJeff Roberson 	 * vnode if we can't get it in a reasonable amount
59834d8b7eaSJeff Roberson 	 * of time.
59934d8b7eaSJeff Roberson 	 */
60034d8b7eaSJeff Roberson 	if (object->type == OBJT_VNODE) {
60134d8b7eaSJeff Roberson 		vm_page_unlock(m);
60234d8b7eaSJeff Roberson 		vp = object->handle;
60334d8b7eaSJeff Roberson 		if (vp->v_type == VREG &&
60434d8b7eaSJeff Roberson 		    vn_start_write(vp, &mp, V_NOWAIT) != 0) {
60534d8b7eaSJeff Roberson 			mp = NULL;
60634d8b7eaSJeff Roberson 			error = EDEADLK;
60734d8b7eaSJeff Roberson 			goto unlock_all;
60834d8b7eaSJeff Roberson 		}
60934d8b7eaSJeff Roberson 		KASSERT(mp != NULL,
61034d8b7eaSJeff Roberson 		    ("vp %p with NULL v_mount", vp));
61134d8b7eaSJeff Roberson 		vm_object_reference_locked(object);
61234d8b7eaSJeff Roberson 		pindex = m->pindex;
61334d8b7eaSJeff Roberson 		VM_OBJECT_WUNLOCK(object);
61434d8b7eaSJeff Roberson 		lockmode = MNT_SHARED_WRITES(vp->v_mount) ?
61534d8b7eaSJeff Roberson 		    LK_SHARED : LK_EXCLUSIVE;
61634d8b7eaSJeff Roberson 		if (vget(vp, lockmode | LK_TIMELOCK, curthread)) {
61734d8b7eaSJeff Roberson 			vp = NULL;
61834d8b7eaSJeff Roberson 			error = EDEADLK;
61934d8b7eaSJeff Roberson 			goto unlock_mp;
62034d8b7eaSJeff Roberson 		}
62134d8b7eaSJeff Roberson 		VM_OBJECT_WLOCK(object);
62257cd81a3SMark Johnston 
62357cd81a3SMark Johnston 		/*
62457cd81a3SMark Johnston 		 * Ensure that the object and vnode were not disassociated
62557cd81a3SMark Johnston 		 * while locks were dropped.
62657cd81a3SMark Johnston 		 */
62757cd81a3SMark Johnston 		if (vp->v_object != object) {
62857cd81a3SMark Johnston 			error = ENOENT;
62957cd81a3SMark Johnston 			goto unlock_all;
63057cd81a3SMark Johnston 		}
63134d8b7eaSJeff Roberson 		vm_page_lock(m);
63257cd81a3SMark Johnston 
63334d8b7eaSJeff Roberson 		/*
63434d8b7eaSJeff Roberson 		 * While the object and page were unlocked, the page
63534d8b7eaSJeff Roberson 		 * may have been:
63634d8b7eaSJeff Roberson 		 * (1) moved to a different queue,
63734d8b7eaSJeff Roberson 		 * (2) reallocated to a different object,
63834d8b7eaSJeff Roberson 		 * (3) reallocated to a different offset, or
63934d8b7eaSJeff Roberson 		 * (4) cleaned.
64034d8b7eaSJeff Roberson 		 */
641ebcddc72SAlan Cox 		if (!vm_page_in_laundry(m) || m->object != object ||
64234d8b7eaSJeff Roberson 		    m->pindex != pindex || m->dirty == 0) {
64334d8b7eaSJeff Roberson 			vm_page_unlock(m);
64434d8b7eaSJeff Roberson 			error = ENXIO;
64534d8b7eaSJeff Roberson 			goto unlock_all;
64634d8b7eaSJeff Roberson 		}
64734d8b7eaSJeff Roberson 
64834d8b7eaSJeff Roberson 		/*
6491d3a1bcfSMark Johnston 		 * The page may have been busied or referenced while the object
65034d8b7eaSJeff Roberson 		 * and page locks were released.
65134d8b7eaSJeff Roberson 		 */
6521d3a1bcfSMark Johnston 		if (vm_page_busied(m) || vm_page_held(m)) {
65334d8b7eaSJeff Roberson 			vm_page_unlock(m);
65434d8b7eaSJeff Roberson 			error = EBUSY;
65534d8b7eaSJeff Roberson 			goto unlock_all;
65634d8b7eaSJeff Roberson 		}
65734d8b7eaSJeff Roberson 	}
65834d8b7eaSJeff Roberson 
65934d8b7eaSJeff Roberson 	/*
66034d8b7eaSJeff Roberson 	 * If a page is dirty, then it is either being washed
66134d8b7eaSJeff Roberson 	 * (but not yet cleaned) or it is still in the
66234d8b7eaSJeff Roberson 	 * laundry.  If it is still in the laundry, then we
66334d8b7eaSJeff Roberson 	 * start the cleaning operation.
66434d8b7eaSJeff Roberson 	 */
665ebcddc72SAlan Cox 	if ((*numpagedout = vm_pageout_cluster(m)) == 0)
66634d8b7eaSJeff Roberson 		error = EIO;
66734d8b7eaSJeff Roberson 
66834d8b7eaSJeff Roberson unlock_all:
66934d8b7eaSJeff Roberson 	VM_OBJECT_WUNLOCK(object);
67034d8b7eaSJeff Roberson 
67134d8b7eaSJeff Roberson unlock_mp:
67234d8b7eaSJeff Roberson 	vm_page_lock_assert(m, MA_NOTOWNED);
67334d8b7eaSJeff Roberson 	if (mp != NULL) {
67434d8b7eaSJeff Roberson 		if (vp != NULL)
67534d8b7eaSJeff Roberson 			vput(vp);
67634d8b7eaSJeff Roberson 		vm_object_deallocate(object);
67734d8b7eaSJeff Roberson 		vn_finished_write(mp);
67834d8b7eaSJeff Roberson 	}
67934d8b7eaSJeff Roberson 
68034d8b7eaSJeff Roberson 	return (error);
68134d8b7eaSJeff Roberson }
68234d8b7eaSJeff Roberson 
68334d8b7eaSJeff Roberson /*
684ebcddc72SAlan Cox  * Attempt to launder the specified number of pages.
685ebcddc72SAlan Cox  *
686ebcddc72SAlan Cox  * Returns the number of pages successfully laundered.
687ebcddc72SAlan Cox  */
688ebcddc72SAlan Cox static int
689ebcddc72SAlan Cox vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
690ebcddc72SAlan Cox {
6915cd29d0fSMark Johnston 	struct scan_state ss;
692ebcddc72SAlan Cox 	struct vm_pagequeue *pq;
6935cd29d0fSMark Johnston 	struct mtx *mtx;
694ebcddc72SAlan Cox 	vm_object_t object;
6955cd29d0fSMark Johnston 	vm_page_t m, marker;
6965cd29d0fSMark Johnston 	int act_delta, error, numpagedout, queue, starting_target;
697ebcddc72SAlan Cox 	int vnodes_skipped;
6985cd29d0fSMark Johnston 	bool obj_locked, pageout_ok;
699ebcddc72SAlan Cox 
7005cd29d0fSMark Johnston 	mtx = NULL;
7015cd29d0fSMark Johnston 	obj_locked = false;
7025cd29d0fSMark Johnston 	object = NULL;
703ebcddc72SAlan Cox 	starting_target = launder;
704ebcddc72SAlan Cox 	vnodes_skipped = 0;
705ebcddc72SAlan Cox 
706ebcddc72SAlan Cox 	/*
707b1fd102eSMark Johnston 	 * Scan the laundry queues for pages eligible to be laundered.  We stop
708ebcddc72SAlan Cox 	 * once the target number of dirty pages have been laundered, or once
709ebcddc72SAlan Cox 	 * we've reached the end of the queue.  A single iteration of this loop
710ebcddc72SAlan Cox 	 * may cause more than one page to be laundered because of clustering.
711ebcddc72SAlan Cox 	 *
712b1fd102eSMark Johnston 	 * As an optimization, we avoid laundering from PQ_UNSWAPPABLE when no
713b1fd102eSMark Johnston 	 * swap devices are configured.
714ebcddc72SAlan Cox 	 */
715b1fd102eSMark Johnston 	if (atomic_load_acq_int(&swapdev_enabled))
71664b38930SMark Johnston 		queue = PQ_UNSWAPPABLE;
717b1fd102eSMark Johnston 	else
71864b38930SMark Johnston 		queue = PQ_LAUNDRY;
719ebcddc72SAlan Cox 
720b1fd102eSMark Johnston scan:
72164b38930SMark Johnston 	marker = &vmd->vmd_markers[queue];
7225cd29d0fSMark Johnston 	pq = &vmd->vmd_pagequeues[queue];
723ebcddc72SAlan Cox 	vm_pagequeue_lock(pq);
7245cd29d0fSMark Johnston 	vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt);
7255cd29d0fSMark Johnston 	while (launder > 0 && (m = vm_pageout_next(&ss, false)) != NULL) {
7265cd29d0fSMark Johnston 		if (__predict_false((m->flags & PG_MARKER) != 0))
727ebcddc72SAlan Cox 			continue;
7285cd29d0fSMark Johnston 
7295cd29d0fSMark Johnston 		vm_page_change_lock(m, &mtx);
7305cd29d0fSMark Johnston 
7315cd29d0fSMark Johnston recheck:
7325cd29d0fSMark Johnston 		/*
7335cd29d0fSMark Johnston 		 * The page may have been disassociated from the queue
7345cd29d0fSMark Johnston 		 * while locks were dropped.
7355cd29d0fSMark Johnston 		 */
73636f8fe9bSMark Johnston 		if (vm_page_queue(m) != queue)
737ebcddc72SAlan Cox 			continue;
7385cd29d0fSMark Johnston 
7395cd29d0fSMark Johnston 		/*
7405cd29d0fSMark Johnston 		 * A requeue was requested, so this page gets a second
7415cd29d0fSMark Johnston 		 * chance.
7425cd29d0fSMark Johnston 		 */
7435cd29d0fSMark Johnston 		if ((m->aflags & PGA_REQUEUE) != 0) {
7445cd29d0fSMark Johnston 			vm_page_requeue(m);
745ebcddc72SAlan Cox 			continue;
746ebcddc72SAlan Cox 		}
747ebcddc72SAlan Cox 
748ebcddc72SAlan Cox 		/*
7495cd29d0fSMark Johnston 		 * Held pages are essentially stuck in the queue.
7505cd29d0fSMark Johnston 		 *
7515cd29d0fSMark Johnston 		 * Wired pages may not be freed.  Complete their removal
7525cd29d0fSMark Johnston 		 * from the queue now to avoid needless revisits during
7535cd29d0fSMark Johnston 		 * future scans.
754ebcddc72SAlan Cox 		 */
7555cd29d0fSMark Johnston 		if (m->hold_count != 0)
7565cd29d0fSMark Johnston 			continue;
7575cd29d0fSMark Johnston 		if (m->wire_count != 0) {
7585cd29d0fSMark Johnston 			vm_page_dequeue_deferred(m);
7595cd29d0fSMark Johnston 			continue;
7605cd29d0fSMark Johnston 		}
7615cd29d0fSMark Johnston 
7625cd29d0fSMark Johnston 		if (object != m->object) {
7635cd29d0fSMark Johnston 			if (obj_locked) {
7645cd29d0fSMark Johnston 				VM_OBJECT_WUNLOCK(object);
7655cd29d0fSMark Johnston 				obj_locked = false;
7665cd29d0fSMark Johnston 			}
7675cd29d0fSMark Johnston 			object = m->object;
7685cd29d0fSMark Johnston 		}
7695cd29d0fSMark Johnston 		if (!obj_locked) {
7705cd29d0fSMark Johnston 			if (!VM_OBJECT_TRYWLOCK(object)) {
7715cd29d0fSMark Johnston 				mtx_unlock(mtx);
7725cd29d0fSMark Johnston 				/* Depends on type-stability. */
7735cd29d0fSMark Johnston 				VM_OBJECT_WLOCK(object);
7745cd29d0fSMark Johnston 				obj_locked = true;
7755cd29d0fSMark Johnston 				mtx_lock(mtx);
7765cd29d0fSMark Johnston 				goto recheck;
7775cd29d0fSMark Johnston 			} else
7785cd29d0fSMark Johnston 				obj_locked = true;
7795cd29d0fSMark Johnston 		}
7805cd29d0fSMark Johnston 
7815cd29d0fSMark Johnston 		if (vm_page_busied(m))
7825cd29d0fSMark Johnston 			continue;
783ebcddc72SAlan Cox 
784ebcddc72SAlan Cox 		/*
785ebcddc72SAlan Cox 		 * Invalid pages can be easily freed.  They cannot be
786ebcddc72SAlan Cox 		 * mapped; vm_page_free() asserts this.
787ebcddc72SAlan Cox 		 */
788ebcddc72SAlan Cox 		if (m->valid == 0)
789ebcddc72SAlan Cox 			goto free_page;
790ebcddc72SAlan Cox 
791ebcddc72SAlan Cox 		/*
792ebcddc72SAlan Cox 		 * If the page has been referenced and the object is not dead,
793ebcddc72SAlan Cox 		 * reactivate or requeue the page depending on whether the
794ebcddc72SAlan Cox 		 * object is mapped.
795d7aeb429SAlan Cox 		 *
796d7aeb429SAlan Cox 		 * Test PGA_REFERENCED after calling pmap_ts_referenced() so
797d7aeb429SAlan Cox 		 * that a reference from a concurrently destroyed mapping is
798d7aeb429SAlan Cox 		 * observed here and now.
799ebcddc72SAlan Cox 		 */
800ebcddc72SAlan Cox 		if (object->ref_count != 0)
801d7aeb429SAlan Cox 			act_delta = pmap_ts_referenced(m);
802ebcddc72SAlan Cox 		else {
803ebcddc72SAlan Cox 			KASSERT(!pmap_page_is_mapped(m),
804ebcddc72SAlan Cox 			    ("page %p is mapped", m));
805d7aeb429SAlan Cox 			act_delta = 0;
806d7aeb429SAlan Cox 		}
807d7aeb429SAlan Cox 		if ((m->aflags & PGA_REFERENCED) != 0) {
808d7aeb429SAlan Cox 			vm_page_aflag_clear(m, PGA_REFERENCED);
809d7aeb429SAlan Cox 			act_delta++;
810ebcddc72SAlan Cox 		}
811ebcddc72SAlan Cox 		if (act_delta != 0) {
812ebcddc72SAlan Cox 			if (object->ref_count != 0) {
81383c9dea1SGleb Smirnoff 				VM_CNT_INC(v_reactivated);
814ebcddc72SAlan Cox 				vm_page_activate(m);
815ebcddc72SAlan Cox 
816ebcddc72SAlan Cox 				/*
817ebcddc72SAlan Cox 				 * Increase the activation count if the page
818ebcddc72SAlan Cox 				 * was referenced while in the laundry queue.
819ebcddc72SAlan Cox 				 * This makes it less likely that the page will
820ebcddc72SAlan Cox 				 * be returned prematurely to the inactive
821ebcddc72SAlan Cox 				 * queue.
822ebcddc72SAlan Cox  				 */
823ebcddc72SAlan Cox 				m->act_count += act_delta + ACT_ADVANCE;
824ebcddc72SAlan Cox 
825ebcddc72SAlan Cox 				/*
826ebcddc72SAlan Cox 				 * If this was a background laundering, count
827ebcddc72SAlan Cox 				 * activated pages towards our target.  The
828ebcddc72SAlan Cox 				 * purpose of background laundering is to ensure
829ebcddc72SAlan Cox 				 * that pages are eventually cycled through the
830ebcddc72SAlan Cox 				 * laundry queue, and an activation is a valid
831ebcddc72SAlan Cox 				 * way out.
832ebcddc72SAlan Cox 				 */
833ebcddc72SAlan Cox 				if (!in_shortfall)
834ebcddc72SAlan Cox 					launder--;
8355cd29d0fSMark Johnston 				continue;
8365cd29d0fSMark Johnston 			} else if ((object->flags & OBJ_DEAD) == 0) {
8375cd29d0fSMark Johnston 				vm_page_requeue(m);
8385cd29d0fSMark Johnston 				continue;
8395cd29d0fSMark Johnston 			}
840ebcddc72SAlan Cox 		}
841ebcddc72SAlan Cox 
842ebcddc72SAlan Cox 		/*
843ebcddc72SAlan Cox 		 * If the page appears to be clean at the machine-independent
844ebcddc72SAlan Cox 		 * layer, then remove all of its mappings from the pmap in
845ebcddc72SAlan Cox 		 * anticipation of freeing it.  If, however, any of the page's
846ebcddc72SAlan Cox 		 * mappings allow write access, then the page may still be
847ebcddc72SAlan Cox 		 * modified until the last of those mappings are removed.
848ebcddc72SAlan Cox 		 */
849ebcddc72SAlan Cox 		if (object->ref_count != 0) {
850ebcddc72SAlan Cox 			vm_page_test_dirty(m);
851ebcddc72SAlan Cox 			if (m->dirty == 0)
852ebcddc72SAlan Cox 				pmap_remove_all(m);
853ebcddc72SAlan Cox 		}
854ebcddc72SAlan Cox 
855ebcddc72SAlan Cox 		/*
856ebcddc72SAlan Cox 		 * Clean pages are freed, and dirty pages are paged out unless
857ebcddc72SAlan Cox 		 * they belong to a dead object.  Requeueing dirty pages from
858ebcddc72SAlan Cox 		 * dead objects is pointless, as they are being paged out and
859ebcddc72SAlan Cox 		 * freed by the thread that destroyed the object.
860ebcddc72SAlan Cox 		 */
861ebcddc72SAlan Cox 		if (m->dirty == 0) {
862ebcddc72SAlan Cox free_page:
863ebcddc72SAlan Cox 			vm_page_free(m);
86483c9dea1SGleb Smirnoff 			VM_CNT_INC(v_dfree);
865ebcddc72SAlan Cox 		} else if ((object->flags & OBJ_DEAD) == 0) {
866ebcddc72SAlan Cox 			if (object->type != OBJT_SWAP &&
867ebcddc72SAlan Cox 			    object->type != OBJT_DEFAULT)
868ebcddc72SAlan Cox 				pageout_ok = true;
869ebcddc72SAlan Cox 			else if (disable_swap_pageouts)
870ebcddc72SAlan Cox 				pageout_ok = false;
871ebcddc72SAlan Cox 			else
872ebcddc72SAlan Cox 				pageout_ok = true;
873ebcddc72SAlan Cox 			if (!pageout_ok) {
8745cd29d0fSMark Johnston 				vm_page_requeue(m);
8755cd29d0fSMark Johnston 				continue;
876ebcddc72SAlan Cox 			}
877ebcddc72SAlan Cox 
878ebcddc72SAlan Cox 			/*
879ebcddc72SAlan Cox 			 * Form a cluster with adjacent, dirty pages from the
880ebcddc72SAlan Cox 			 * same object, and page out that entire cluster.
881ebcddc72SAlan Cox 			 *
882ebcddc72SAlan Cox 			 * The adjacent, dirty pages must also be in the
883ebcddc72SAlan Cox 			 * laundry.  However, their mappings are not checked
884ebcddc72SAlan Cox 			 * for new references.  Consequently, a recently
885ebcddc72SAlan Cox 			 * referenced page may be paged out.  However, that
886ebcddc72SAlan Cox 			 * page will not be prematurely reclaimed.  After page
887ebcddc72SAlan Cox 			 * out, the page will be placed in the inactive queue,
888ebcddc72SAlan Cox 			 * where any new references will be detected and the
889ebcddc72SAlan Cox 			 * page reactivated.
890ebcddc72SAlan Cox 			 */
891ebcddc72SAlan Cox 			error = vm_pageout_clean(m, &numpagedout);
892ebcddc72SAlan Cox 			if (error == 0) {
893ebcddc72SAlan Cox 				launder -= numpagedout;
8945cd29d0fSMark Johnston 				ss.scanned += numpagedout;
895ebcddc72SAlan Cox 			} else if (error == EDEADLK) {
896ebcddc72SAlan Cox 				pageout_lock_miss++;
897ebcddc72SAlan Cox 				vnodes_skipped++;
898ebcddc72SAlan Cox 			}
8995cd29d0fSMark Johnston 			mtx = NULL;
9005cd29d0fSMark Johnston 			obj_locked = false;
901ebcddc72SAlan Cox 		}
9025cd29d0fSMark Johnston 	}
9035cd29d0fSMark Johnston 	if (mtx != NULL) {
9045cd29d0fSMark Johnston 		mtx_unlock(mtx);
9055cd29d0fSMark Johnston 		mtx = NULL;
9065cd29d0fSMark Johnston 	}
9075cd29d0fSMark Johnston 	if (obj_locked) {
908ebcddc72SAlan Cox 		VM_OBJECT_WUNLOCK(object);
9095cd29d0fSMark Johnston 		obj_locked = false;
9105cd29d0fSMark Johnston 	}
911ebcddc72SAlan Cox 	vm_pagequeue_lock(pq);
9125cd29d0fSMark Johnston 	vm_pageout_end_scan(&ss);
913ebcddc72SAlan Cox 	vm_pagequeue_unlock(pq);
914ebcddc72SAlan Cox 
91564b38930SMark Johnston 	if (launder > 0 && queue == PQ_UNSWAPPABLE) {
91664b38930SMark Johnston 		queue = PQ_LAUNDRY;
917b1fd102eSMark Johnston 		goto scan;
918b1fd102eSMark Johnston 	}
919b1fd102eSMark Johnston 
920ebcddc72SAlan Cox 	/*
921ebcddc72SAlan Cox 	 * Wakeup the sync daemon if we skipped a vnode in a writeable object
922ebcddc72SAlan Cox 	 * and we didn't launder enough pages.
923ebcddc72SAlan Cox 	 */
924ebcddc72SAlan Cox 	if (vnodes_skipped > 0 && launder > 0)
925ebcddc72SAlan Cox 		(void)speedup_syncer();
926ebcddc72SAlan Cox 
927ebcddc72SAlan Cox 	return (starting_target - launder);
928ebcddc72SAlan Cox }
929ebcddc72SAlan Cox 
930ebcddc72SAlan Cox /*
931ebcddc72SAlan Cox  * Compute the integer square root.
932ebcddc72SAlan Cox  */
933ebcddc72SAlan Cox static u_int
934ebcddc72SAlan Cox isqrt(u_int num)
935ebcddc72SAlan Cox {
936ebcddc72SAlan Cox 	u_int bit, root, tmp;
937ebcddc72SAlan Cox 
938ebcddc72SAlan Cox 	bit = 1u << ((NBBY * sizeof(u_int)) - 2);
939ebcddc72SAlan Cox 	while (bit > num)
940ebcddc72SAlan Cox 		bit >>= 2;
941ebcddc72SAlan Cox 	root = 0;
942ebcddc72SAlan Cox 	while (bit != 0) {
943ebcddc72SAlan Cox 		tmp = root + bit;
944ebcddc72SAlan Cox 		root >>= 1;
945ebcddc72SAlan Cox 		if (num >= tmp) {
946ebcddc72SAlan Cox 			num -= tmp;
947ebcddc72SAlan Cox 			root += bit;
948ebcddc72SAlan Cox 		}
949ebcddc72SAlan Cox 		bit >>= 2;
950ebcddc72SAlan Cox 	}
951ebcddc72SAlan Cox 	return (root);
952ebcddc72SAlan Cox }
953ebcddc72SAlan Cox 
954ebcddc72SAlan Cox /*
955ebcddc72SAlan Cox  * Perform the work of the laundry thread: periodically wake up and determine
956ebcddc72SAlan Cox  * whether any pages need to be laundered.  If so, determine the number of pages
957ebcddc72SAlan Cox  * that need to be laundered, and launder them.
958ebcddc72SAlan Cox  */
959ebcddc72SAlan Cox static void
960ebcddc72SAlan Cox vm_pageout_laundry_worker(void *arg)
961ebcddc72SAlan Cox {
962e2068d0bSJeff Roberson 	struct vm_domain *vmd;
963ebcddc72SAlan Cox 	struct vm_pagequeue *pq;
96460684862SMark Johnston 	uint64_t nclean, ndirty, nfreed;
965e2068d0bSJeff Roberson 	int domain, last_target, launder, shortfall, shortfall_cycle, target;
966ebcddc72SAlan Cox 	bool in_shortfall;
967ebcddc72SAlan Cox 
968e2068d0bSJeff Roberson 	domain = (uintptr_t)arg;
969e2068d0bSJeff Roberson 	vmd = VM_DOMAIN(domain);
970e2068d0bSJeff Roberson 	pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
971e2068d0bSJeff Roberson 	KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
972ebcddc72SAlan Cox 
973ebcddc72SAlan Cox 	shortfall = 0;
974ebcddc72SAlan Cox 	in_shortfall = false;
975ebcddc72SAlan Cox 	shortfall_cycle = 0;
976ebcddc72SAlan Cox 	target = 0;
97760684862SMark Johnston 	nfreed = 0;
978ebcddc72SAlan Cox 
979ebcddc72SAlan Cox 	/*
980b1fd102eSMark Johnston 	 * Calls to these handlers are serialized by the swap syscall lock.
981b1fd102eSMark Johnston 	 */
982e2068d0bSJeff Roberson 	(void)EVENTHANDLER_REGISTER(swapon, vm_pageout_swapon, vmd,
983b1fd102eSMark Johnston 	    EVENTHANDLER_PRI_ANY);
984e2068d0bSJeff Roberson 	(void)EVENTHANDLER_REGISTER(swapoff, vm_pageout_swapoff, vmd,
985b1fd102eSMark Johnston 	    EVENTHANDLER_PRI_ANY);
986b1fd102eSMark Johnston 
987b1fd102eSMark Johnston 	/*
988ebcddc72SAlan Cox 	 * The pageout laundry worker is never done, so loop forever.
989ebcddc72SAlan Cox 	 */
990ebcddc72SAlan Cox 	for (;;) {
991ebcddc72SAlan Cox 		KASSERT(target >= 0, ("negative target %d", target));
992ebcddc72SAlan Cox 		KASSERT(shortfall_cycle >= 0,
993ebcddc72SAlan Cox 		    ("negative cycle %d", shortfall_cycle));
994ebcddc72SAlan Cox 		launder = 0;
995ebcddc72SAlan Cox 
996ebcddc72SAlan Cox 		/*
997ebcddc72SAlan Cox 		 * First determine whether we need to launder pages to meet a
998ebcddc72SAlan Cox 		 * shortage of free pages.
999ebcddc72SAlan Cox 		 */
1000ebcddc72SAlan Cox 		if (shortfall > 0) {
1001ebcddc72SAlan Cox 			in_shortfall = true;
1002ebcddc72SAlan Cox 			shortfall_cycle = VM_LAUNDER_RATE / VM_INACT_SCAN_RATE;
1003ebcddc72SAlan Cox 			target = shortfall;
1004ebcddc72SAlan Cox 		} else if (!in_shortfall)
1005ebcddc72SAlan Cox 			goto trybackground;
1006e2068d0bSJeff Roberson 		else if (shortfall_cycle == 0 || vm_laundry_target(vmd) <= 0) {
1007ebcddc72SAlan Cox 			/*
1008ebcddc72SAlan Cox 			 * We recently entered shortfall and began laundering
1009ebcddc72SAlan Cox 			 * pages.  If we have completed that laundering run
1010ebcddc72SAlan Cox 			 * (and we are no longer in shortfall) or we have met
1011ebcddc72SAlan Cox 			 * our laundry target through other activity, then we
1012ebcddc72SAlan Cox 			 * can stop laundering pages.
1013ebcddc72SAlan Cox 			 */
1014ebcddc72SAlan Cox 			in_shortfall = false;
1015ebcddc72SAlan Cox 			target = 0;
1016ebcddc72SAlan Cox 			goto trybackground;
1017ebcddc72SAlan Cox 		}
1018ebcddc72SAlan Cox 		launder = target / shortfall_cycle--;
1019ebcddc72SAlan Cox 		goto dolaundry;
1020ebcddc72SAlan Cox 
1021ebcddc72SAlan Cox 		/*
1022ebcddc72SAlan Cox 		 * There's no immediate need to launder any pages; see if we
1023ebcddc72SAlan Cox 		 * meet the conditions to perform background laundering:
1024ebcddc72SAlan Cox 		 *
1025ebcddc72SAlan Cox 		 * 1. The ratio of dirty to clean inactive pages exceeds the
102660684862SMark Johnston 		 *    background laundering threshold, or
1027ebcddc72SAlan Cox 		 * 2. we haven't yet reached the target of the current
1028ebcddc72SAlan Cox 		 *    background laundering run.
1029ebcddc72SAlan Cox 		 *
1030ebcddc72SAlan Cox 		 * The background laundering threshold is not a constant.
1031ebcddc72SAlan Cox 		 * Instead, it is a slowly growing function of the number of
103260684862SMark Johnston 		 * clean pages freed by the page daemon since the last
103360684862SMark Johnston 		 * background laundering.  Thus, as the ratio of dirty to
103460684862SMark Johnston 		 * clean inactive pages grows, the amount of memory pressure
1035c098768eSMark Johnston 		 * required to trigger laundering decreases.  We ensure
1036c098768eSMark Johnston 		 * that the threshold is non-zero after an inactive queue
1037c098768eSMark Johnston 		 * scan, even if that scan failed to free a single clean page.
1038ebcddc72SAlan Cox 		 */
1039ebcddc72SAlan Cox trybackground:
1040e2068d0bSJeff Roberson 		nclean = vmd->vmd_free_count +
1041e2068d0bSJeff Roberson 		    vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt;
1042e2068d0bSJeff Roberson 		ndirty = vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt;
1043c098768eSMark Johnston 		if (target == 0 && ndirty * isqrt(howmany(nfreed + 1,
1044c098768eSMark Johnston 		    vmd->vmd_free_target - vmd->vmd_free_min)) >= nclean) {
1045e2068d0bSJeff Roberson 			target = vmd->vmd_background_launder_target;
1046ebcddc72SAlan Cox 		}
1047ebcddc72SAlan Cox 
1048ebcddc72SAlan Cox 		/*
1049ebcddc72SAlan Cox 		 * We have a non-zero background laundering target.  If we've
1050ebcddc72SAlan Cox 		 * laundered up to our maximum without observing a page daemon
1051cb35676eSMark Johnston 		 * request, just stop.  This is a safety belt that ensures we
1052ebcddc72SAlan Cox 		 * don't launder an excessive amount if memory pressure is low
1053ebcddc72SAlan Cox 		 * and the ratio of dirty to clean pages is large.  Otherwise,
1054ebcddc72SAlan Cox 		 * proceed at the background laundering rate.
1055ebcddc72SAlan Cox 		 */
1056ebcddc72SAlan Cox 		if (target > 0) {
105760684862SMark Johnston 			if (nfreed > 0) {
105860684862SMark Johnston 				nfreed = 0;
1059ebcddc72SAlan Cox 				last_target = target;
1060ebcddc72SAlan Cox 			} else if (last_target - target >=
1061ebcddc72SAlan Cox 			    vm_background_launder_max * PAGE_SIZE / 1024) {
1062ebcddc72SAlan Cox 				target = 0;
1063ebcddc72SAlan Cox 			}
1064ebcddc72SAlan Cox 			launder = vm_background_launder_rate * PAGE_SIZE / 1024;
1065ebcddc72SAlan Cox 			launder /= VM_LAUNDER_RATE;
1066ebcddc72SAlan Cox 			if (launder > target)
1067ebcddc72SAlan Cox 				launder = target;
1068ebcddc72SAlan Cox 		}
1069ebcddc72SAlan Cox 
1070ebcddc72SAlan Cox dolaundry:
1071ebcddc72SAlan Cox 		if (launder > 0) {
1072ebcddc72SAlan Cox 			/*
1073ebcddc72SAlan Cox 			 * Because of I/O clustering, the number of laundered
1074ebcddc72SAlan Cox 			 * pages could exceed "target" by the maximum size of
1075ebcddc72SAlan Cox 			 * a cluster minus one.
1076ebcddc72SAlan Cox 			 */
1077e2068d0bSJeff Roberson 			target -= min(vm_pageout_launder(vmd, launder,
1078ebcddc72SAlan Cox 			    in_shortfall), target);
1079ebcddc72SAlan Cox 			pause("laundp", hz / VM_LAUNDER_RATE);
1080ebcddc72SAlan Cox 		}
1081ebcddc72SAlan Cox 
1082ebcddc72SAlan Cox 		/*
1083ebcddc72SAlan Cox 		 * If we're not currently laundering pages and the page daemon
1084ebcddc72SAlan Cox 		 * hasn't posted a new request, sleep until the page daemon
1085ebcddc72SAlan Cox 		 * kicks us.
1086ebcddc72SAlan Cox 		 */
1087ebcddc72SAlan Cox 		vm_pagequeue_lock(pq);
1088e2068d0bSJeff Roberson 		if (target == 0 && vmd->vmd_laundry_request == VM_LAUNDRY_IDLE)
1089e2068d0bSJeff Roberson 			(void)mtx_sleep(&vmd->vmd_laundry_request,
1090ebcddc72SAlan Cox 			    vm_pagequeue_lockptr(pq), PVM, "launds", 0);
1091ebcddc72SAlan Cox 
1092ebcddc72SAlan Cox 		/*
1093ebcddc72SAlan Cox 		 * If the pagedaemon has indicated that it's in shortfall, start
1094ebcddc72SAlan Cox 		 * a shortfall laundering unless we're already in the middle of
1095ebcddc72SAlan Cox 		 * one.  This may preempt a background laundering.
1096ebcddc72SAlan Cox 		 */
1097e2068d0bSJeff Roberson 		if (vmd->vmd_laundry_request == VM_LAUNDRY_SHORTFALL &&
1098ebcddc72SAlan Cox 		    (!in_shortfall || shortfall_cycle == 0)) {
1099e2068d0bSJeff Roberson 			shortfall = vm_laundry_target(vmd) +
1100e2068d0bSJeff Roberson 			    vmd->vmd_pageout_deficit;
1101ebcddc72SAlan Cox 			target = 0;
1102ebcddc72SAlan Cox 		} else
1103ebcddc72SAlan Cox 			shortfall = 0;
1104ebcddc72SAlan Cox 
1105ebcddc72SAlan Cox 		if (target == 0)
1106e2068d0bSJeff Roberson 			vmd->vmd_laundry_request = VM_LAUNDRY_IDLE;
110760684862SMark Johnston 		nfreed += vmd->vmd_clean_pages_freed;
110860684862SMark Johnston 		vmd->vmd_clean_pages_freed = 0;
1109ebcddc72SAlan Cox 		vm_pagequeue_unlock(pq);
1110ebcddc72SAlan Cox 	}
1111ebcddc72SAlan Cox }
1112ebcddc72SAlan Cox 
1113be37ee79SMark Johnston /*
1114be37ee79SMark Johnston  * Compute the number of pages we want to try to move from the
1115be37ee79SMark Johnston  * active queue to either the inactive or laundry queue.
1116be37ee79SMark Johnston  *
11177bb4634eSMark Johnston  * When scanning active pages during a shortage, we make clean pages
11187bb4634eSMark Johnston  * count more heavily towards the page shortage than dirty pages.
11197bb4634eSMark Johnston  * This is because dirty pages must be laundered before they can be
11207bb4634eSMark Johnston  * reused and thus have less utility when attempting to quickly
11217bb4634eSMark Johnston  * alleviate a free page shortage.  However, this weighting also
11227bb4634eSMark Johnston  * causes the scan to deactivate dirty pages more aggressively,
11237bb4634eSMark Johnston  * improving the effectiveness of clustering.
1124be37ee79SMark Johnston  */
1125be37ee79SMark Johnston static int
11267bb4634eSMark Johnston vm_pageout_active_target(struct vm_domain *vmd)
1127be37ee79SMark Johnston {
1128be37ee79SMark Johnston 	int shortage;
1129be37ee79SMark Johnston 
1130be37ee79SMark Johnston 	shortage = vmd->vmd_inactive_target + vm_paging_target(vmd) -
1131be37ee79SMark Johnston 	    (vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt +
1132be37ee79SMark Johnston 	    vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt / act_scan_laundry_weight);
1133be37ee79SMark Johnston 	shortage *= act_scan_laundry_weight;
1134be37ee79SMark Johnston 	return (shortage);
1135be37ee79SMark Johnston }
1136be37ee79SMark Johnston 
1137be37ee79SMark Johnston /*
1138be37ee79SMark Johnston  * Scan the active queue.  If there is no shortage of inactive pages, scan a
1139be37ee79SMark Johnston  * small portion of the queue in order to maintain quasi-LRU.
1140be37ee79SMark Johnston  */
1141be37ee79SMark Johnston static void
1142be37ee79SMark Johnston vm_pageout_scan_active(struct vm_domain *vmd, int page_shortage)
1143be37ee79SMark Johnston {
1144be37ee79SMark Johnston 	struct scan_state ss;
1145be37ee79SMark Johnston 	struct mtx *mtx;
1146be37ee79SMark Johnston 	vm_page_t m, marker;
1147be37ee79SMark Johnston 	struct vm_pagequeue *pq;
1148be37ee79SMark Johnston 	long min_scan;
1149be37ee79SMark Johnston 	int act_delta, max_scan, scan_tick;
1150be37ee79SMark Johnston 
1151be37ee79SMark Johnston 	marker = &vmd->vmd_markers[PQ_ACTIVE];
1152be37ee79SMark Johnston 	pq = &vmd->vmd_pagequeues[PQ_ACTIVE];
1153be37ee79SMark Johnston 	vm_pagequeue_lock(pq);
1154be37ee79SMark Johnston 
1155be37ee79SMark Johnston 	/*
1156be37ee79SMark Johnston 	 * If we're just idle polling attempt to visit every
1157be37ee79SMark Johnston 	 * active page within 'update_period' seconds.
1158be37ee79SMark Johnston 	 */
1159be37ee79SMark Johnston 	scan_tick = ticks;
1160be37ee79SMark Johnston 	if (vm_pageout_update_period != 0) {
1161be37ee79SMark Johnston 		min_scan = pq->pq_cnt;
1162be37ee79SMark Johnston 		min_scan *= scan_tick - vmd->vmd_last_active_scan;
1163be37ee79SMark Johnston 		min_scan /= hz * vm_pageout_update_period;
1164be37ee79SMark Johnston 	} else
1165be37ee79SMark Johnston 		min_scan = 0;
1166be37ee79SMark Johnston 	if (min_scan > 0 || (page_shortage > 0 && pq->pq_cnt > 0))
1167be37ee79SMark Johnston 		vmd->vmd_last_active_scan = scan_tick;
1168be37ee79SMark Johnston 
1169be37ee79SMark Johnston 	/*
1170be37ee79SMark Johnston 	 * Scan the active queue for pages that can be deactivated.  Update
1171be37ee79SMark Johnston 	 * the per-page activity counter and use it to identify deactivation
1172be37ee79SMark Johnston 	 * candidates.  Held pages may be deactivated.
1173be37ee79SMark Johnston 	 *
1174be37ee79SMark Johnston 	 * To avoid requeuing each page that remains in the active queue, we
11757bb4634eSMark Johnston 	 * implement the CLOCK algorithm.  To keep the implementation of the
11767bb4634eSMark Johnston 	 * enqueue operation consistent for all page queues, we use two hands,
11777bb4634eSMark Johnston 	 * represented by marker pages. Scans begin at the first hand, which
11787bb4634eSMark Johnston 	 * precedes the second hand in the queue.  When the two hands meet,
11797bb4634eSMark Johnston 	 * they are moved back to the head and tail of the queue, respectively,
11807bb4634eSMark Johnston 	 * and scanning resumes.
1181be37ee79SMark Johnston 	 */
1182be37ee79SMark Johnston 	max_scan = page_shortage > 0 ? pq->pq_cnt : min_scan;
1183be37ee79SMark Johnston 	mtx = NULL;
1184be37ee79SMark Johnston act_scan:
1185be37ee79SMark Johnston 	vm_pageout_init_scan(&ss, pq, marker, &vmd->vmd_clock[0], max_scan);
1186be37ee79SMark Johnston 	while ((m = vm_pageout_next(&ss, false)) != NULL) {
1187be37ee79SMark Johnston 		if (__predict_false(m == &vmd->vmd_clock[1])) {
1188be37ee79SMark Johnston 			vm_pagequeue_lock(pq);
1189be37ee79SMark Johnston 			TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q);
1190be37ee79SMark Johnston 			TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[1], plinks.q);
1191be37ee79SMark Johnston 			TAILQ_INSERT_HEAD(&pq->pq_pl, &vmd->vmd_clock[0],
1192be37ee79SMark Johnston 			    plinks.q);
1193be37ee79SMark Johnston 			TAILQ_INSERT_TAIL(&pq->pq_pl, &vmd->vmd_clock[1],
1194be37ee79SMark Johnston 			    plinks.q);
1195be37ee79SMark Johnston 			max_scan -= ss.scanned;
1196be37ee79SMark Johnston 			vm_pageout_end_scan(&ss);
1197be37ee79SMark Johnston 			goto act_scan;
1198be37ee79SMark Johnston 		}
1199be37ee79SMark Johnston 		if (__predict_false((m->flags & PG_MARKER) != 0))
1200be37ee79SMark Johnston 			continue;
1201be37ee79SMark Johnston 
1202be37ee79SMark Johnston 		vm_page_change_lock(m, &mtx);
1203be37ee79SMark Johnston 
1204be37ee79SMark Johnston 		/*
1205be37ee79SMark Johnston 		 * The page may have been disassociated from the queue
1206be37ee79SMark Johnston 		 * while locks were dropped.
1207be37ee79SMark Johnston 		 */
1208be37ee79SMark Johnston 		if (vm_page_queue(m) != PQ_ACTIVE)
1209be37ee79SMark Johnston 			continue;
1210be37ee79SMark Johnston 
1211be37ee79SMark Johnston 		/*
1212be37ee79SMark Johnston 		 * Wired pages are dequeued lazily.
1213be37ee79SMark Johnston 		 */
1214be37ee79SMark Johnston 		if (m->wire_count != 0) {
1215be37ee79SMark Johnston 			vm_page_dequeue_deferred(m);
1216be37ee79SMark Johnston 			continue;
1217be37ee79SMark Johnston 		}
1218be37ee79SMark Johnston 
1219be37ee79SMark Johnston 		/*
1220be37ee79SMark Johnston 		 * Check to see "how much" the page has been used.
1221d7aeb429SAlan Cox 		 *
1222d7aeb429SAlan Cox 		 * Test PGA_REFERENCED after calling pmap_ts_referenced() so
1223d7aeb429SAlan Cox 		 * that a reference from a concurrently destroyed mapping is
1224d7aeb429SAlan Cox 		 * observed here and now.
1225d7aeb429SAlan Cox 		 *
1226be37ee79SMark Johnston 		 * Perform an unsynchronized object ref count check.  While
1227be37ee79SMark Johnston 		 * the page lock ensures that the page is not reallocated to
1228be37ee79SMark Johnston 		 * another object, in particular, one with unmanaged mappings
1229be37ee79SMark Johnston 		 * that cannot support pmap_ts_referenced(), two races are,
1230be37ee79SMark Johnston 		 * nonetheless, possible:
1231be37ee79SMark Johnston 		 * 1) The count was transitioning to zero, but we saw a non-
1232be37ee79SMark Johnston 		 *    zero value.  pmap_ts_referenced() will return zero
1233be37ee79SMark Johnston 		 *    because the page is not mapped.
1234be37ee79SMark Johnston 		 * 2) The count was transitioning to one, but we saw zero.
1235be37ee79SMark Johnston 		 *    This race delays the detection of a new reference.  At
1236be37ee79SMark Johnston 		 *    worst, we will deactivate and reactivate the page.
1237be37ee79SMark Johnston 		 */
1238be37ee79SMark Johnston 		if (m->object->ref_count != 0)
1239d7aeb429SAlan Cox 			act_delta = pmap_ts_referenced(m);
1240d7aeb429SAlan Cox 		else
1241d7aeb429SAlan Cox 			act_delta = 0;
1242d7aeb429SAlan Cox 		if ((m->aflags & PGA_REFERENCED) != 0) {
1243d7aeb429SAlan Cox 			vm_page_aflag_clear(m, PGA_REFERENCED);
1244d7aeb429SAlan Cox 			act_delta++;
1245d7aeb429SAlan Cox 		}
1246be37ee79SMark Johnston 
1247be37ee79SMark Johnston 		/*
1248be37ee79SMark Johnston 		 * Advance or decay the act_count based on recent usage.
1249be37ee79SMark Johnston 		 */
1250be37ee79SMark Johnston 		if (act_delta != 0) {
1251be37ee79SMark Johnston 			m->act_count += ACT_ADVANCE + act_delta;
1252be37ee79SMark Johnston 			if (m->act_count > ACT_MAX)
1253be37ee79SMark Johnston 				m->act_count = ACT_MAX;
1254be37ee79SMark Johnston 		} else
1255be37ee79SMark Johnston 			m->act_count -= min(m->act_count, ACT_DECLINE);
1256be37ee79SMark Johnston 
1257be37ee79SMark Johnston 		if (m->act_count == 0) {
1258be37ee79SMark Johnston 			/*
1259be37ee79SMark Johnston 			 * When not short for inactive pages, let dirty pages go
1260be37ee79SMark Johnston 			 * through the inactive queue before moving to the
1261be37ee79SMark Johnston 			 * laundry queues.  This gives them some extra time to
1262be37ee79SMark Johnston 			 * be reactivated, potentially avoiding an expensive
12637bb4634eSMark Johnston 			 * pageout.  However, during a page shortage, the
12647bb4634eSMark Johnston 			 * inactive queue is necessarily small, and so dirty
12657bb4634eSMark Johnston 			 * pages would only spend a trivial amount of time in
12667bb4634eSMark Johnston 			 * the inactive queue.  Therefore, we might as well
12677bb4634eSMark Johnston 			 * place them directly in the laundry queue to reduce
12687bb4634eSMark Johnston 			 * queuing overhead.
1269be37ee79SMark Johnston 			 */
1270be37ee79SMark Johnston 			if (page_shortage <= 0)
1271be37ee79SMark Johnston 				vm_page_deactivate(m);
1272be37ee79SMark Johnston 			else {
1273be37ee79SMark Johnston 				/*
1274be37ee79SMark Johnston 				 * Calling vm_page_test_dirty() here would
1275be37ee79SMark Johnston 				 * require acquisition of the object's write
1276be37ee79SMark Johnston 				 * lock.  However, during a page shortage,
1277be37ee79SMark Johnston 				 * directing dirty pages into the laundry
1278be37ee79SMark Johnston 				 * queue is only an optimization and not a
1279be37ee79SMark Johnston 				 * requirement.  Therefore, we simply rely on
1280be37ee79SMark Johnston 				 * the opportunistic updates to the page's
1281be37ee79SMark Johnston 				 * dirty field by the pmap.
1282be37ee79SMark Johnston 				 */
1283be37ee79SMark Johnston 				if (m->dirty == 0) {
1284be37ee79SMark Johnston 					vm_page_deactivate(m);
1285be37ee79SMark Johnston 					page_shortage -=
1286be37ee79SMark Johnston 					    act_scan_laundry_weight;
1287be37ee79SMark Johnston 				} else {
1288be37ee79SMark Johnston 					vm_page_launder(m);
1289be37ee79SMark Johnston 					page_shortage--;
1290be37ee79SMark Johnston 				}
1291be37ee79SMark Johnston 			}
1292be37ee79SMark Johnston 		}
1293be37ee79SMark Johnston 	}
1294be37ee79SMark Johnston 	if (mtx != NULL) {
1295be37ee79SMark Johnston 		mtx_unlock(mtx);
1296be37ee79SMark Johnston 		mtx = NULL;
1297be37ee79SMark Johnston 	}
1298be37ee79SMark Johnston 	vm_pagequeue_lock(pq);
1299be37ee79SMark Johnston 	TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q);
1300be37ee79SMark Johnston 	TAILQ_INSERT_AFTER(&pq->pq_pl, marker, &vmd->vmd_clock[0], plinks.q);
1301be37ee79SMark Johnston 	vm_pageout_end_scan(&ss);
1302be37ee79SMark Johnston 	vm_pagequeue_unlock(pq);
1303be37ee79SMark Johnston }
1304be37ee79SMark Johnston 
13055cd29d0fSMark Johnston static int
13065cd29d0fSMark Johnston vm_pageout_reinsert_inactive_page(struct scan_state *ss, vm_page_t m)
13075cd29d0fSMark Johnston {
13085cd29d0fSMark Johnston 	struct vm_domain *vmd;
13095cd29d0fSMark Johnston 
13101b5c869dSMark Johnston 	if (m->queue != PQ_INACTIVE || (m->aflags & PGA_ENQUEUED) != 0)
13115cd29d0fSMark Johnston 		return (0);
13125cd29d0fSMark Johnston 	vm_page_aflag_set(m, PGA_ENQUEUED);
13135cd29d0fSMark Johnston 	if ((m->aflags & PGA_REQUEUE_HEAD) != 0) {
13145cd29d0fSMark Johnston 		vmd = vm_pagequeue_domain(m);
13155cd29d0fSMark Johnston 		TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q);
13165cd29d0fSMark Johnston 		vm_page_aflag_clear(m, PGA_REQUEUE | PGA_REQUEUE_HEAD);
13175cd29d0fSMark Johnston 	} else if ((m->aflags & PGA_REQUEUE) != 0) {
13185cd29d0fSMark Johnston 		TAILQ_INSERT_TAIL(&ss->pq->pq_pl, m, plinks.q);
13195cd29d0fSMark Johnston 		vm_page_aflag_clear(m, PGA_REQUEUE | PGA_REQUEUE_HEAD);
13205cd29d0fSMark Johnston 	} else
13215cd29d0fSMark Johnston 		TAILQ_INSERT_BEFORE(ss->marker, m, plinks.q);
13225cd29d0fSMark Johnston 	return (1);
13235cd29d0fSMark Johnston }
13245cd29d0fSMark Johnston 
13255cd29d0fSMark Johnston /*
13265cd29d0fSMark Johnston  * Re-add stuck pages to the inactive queue.  We will examine them again
13275cd29d0fSMark Johnston  * during the next scan.  If the queue state of a page has changed since
13285cd29d0fSMark Johnston  * it was physically removed from the page queue in
13295cd29d0fSMark Johnston  * vm_pageout_collect_batch(), don't do anything with that page.
13305cd29d0fSMark Johnston  */
13315cd29d0fSMark Johnston static void
13325cd29d0fSMark Johnston vm_pageout_reinsert_inactive(struct scan_state *ss, struct vm_batchqueue *bq,
13335cd29d0fSMark Johnston     vm_page_t m)
13345cd29d0fSMark Johnston {
13355cd29d0fSMark Johnston 	struct vm_pagequeue *pq;
13365cd29d0fSMark Johnston 	int delta;
13375cd29d0fSMark Johnston 
13385cd29d0fSMark Johnston 	delta = 0;
13395cd29d0fSMark Johnston 	pq = ss->pq;
13405cd29d0fSMark Johnston 
13415cd29d0fSMark Johnston 	if (m != NULL) {
13425cd29d0fSMark Johnston 		if (vm_batchqueue_insert(bq, m))
13435cd29d0fSMark Johnston 			return;
13445cd29d0fSMark Johnston 		vm_pagequeue_lock(pq);
13455cd29d0fSMark Johnston 		delta += vm_pageout_reinsert_inactive_page(ss, m);
13465cd29d0fSMark Johnston 	} else
13475cd29d0fSMark Johnston 		vm_pagequeue_lock(pq);
13485cd29d0fSMark Johnston 	while ((m = vm_batchqueue_pop(bq)) != NULL)
13495cd29d0fSMark Johnston 		delta += vm_pageout_reinsert_inactive_page(ss, m);
13505cd29d0fSMark Johnston 	vm_pagequeue_cnt_add(pq, delta);
13515cd29d0fSMark Johnston 	vm_pagequeue_unlock(pq);
13525cd29d0fSMark Johnston 	vm_batchqueue_init(bq);
13535cd29d0fSMark Johnston }
13545cd29d0fSMark Johnston 
1355ebcddc72SAlan Cox /*
135627e29d10SMark Johnston  * Attempt to reclaim the requested number of pages from the inactive queue.
135727e29d10SMark Johnston  * Returns true if the shortage was addressed.
1358df8bae1dSRodney W. Grimes  */
1359be37ee79SMark Johnston static int
136049a3710cSMark Johnston vm_pageout_scan_inactive(struct vm_domain *vmd, int shortage,
1361be37ee79SMark Johnston     int *addl_shortage)
1362df8bae1dSRodney W. Grimes {
13635cd29d0fSMark Johnston 	struct scan_state ss;
13645cd29d0fSMark Johnston 	struct vm_batchqueue rq;
13655cd29d0fSMark Johnston 	struct mtx *mtx;
13665cd29d0fSMark Johnston 	vm_page_t m, marker;
13678d220203SAlan Cox 	struct vm_pagequeue *pq;
1368df8bae1dSRodney W. Grimes 	vm_object_t object;
1369be37ee79SMark Johnston 	int act_delta, addl_page_shortage, deficit, page_shortage;
1370be37ee79SMark Johnston 	int starting_page_shortage;
13715cd29d0fSMark Johnston 	bool obj_locked;
13720d94caffSDavid Greenman 
1373df8bae1dSRodney W. Grimes 	/*
137401f04471SMark Johnston 	 * The addl_page_shortage is an estimate of the number of temporarily
1375311e34e2SKonstantin Belousov 	 * stuck pages in the inactive queue.  In other words, the
1376449c2e92SKonstantin Belousov 	 * number of pages from the inactive count that should be
1377311e34e2SKonstantin Belousov 	 * discounted in setting the target for the active queue scan.
1378311e34e2SKonstantin Belousov 	 */
13799099545aSAlan Cox 	addl_page_shortage = 0;
13809099545aSAlan Cox 
13811c7c3c6aSMatthew Dillon 	/*
138249a3710cSMark Johnston 	 * vmd_pageout_deficit counts the number of pages requested in
138349a3710cSMark Johnston 	 * allocations that failed because of a free page shortage.  We assume
138449a3710cSMark Johnston 	 * that the allocations will be reattempted and thus include the deficit
138549a3710cSMark Johnston 	 * in our scan target.
13861c7c3c6aSMatthew Dillon 	 */
1387e2068d0bSJeff Roberson 	deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit);
138849a3710cSMark Johnston 	starting_page_shortage = page_shortage = shortage + deficit;
13891c7c3c6aSMatthew Dillon 
13905cd29d0fSMark Johnston 	mtx = NULL;
13915cd29d0fSMark Johnston 	obj_locked = false;
13925cd29d0fSMark Johnston 	object = NULL;
13935cd29d0fSMark Johnston 	vm_batchqueue_init(&rq);
13945cd29d0fSMark Johnston 
1395936524aaSMatthew Dillon 	/*
1396f095d1bbSAlan Cox 	 * Start scanning the inactive queue for pages that we can free.  The
1397f095d1bbSAlan Cox 	 * scan will stop when we reach the target or we have scanned the
1398f095d1bbSAlan Cox 	 * entire queue.  (Note that m->act_count is not used to make
1399f095d1bbSAlan Cox 	 * decisions for the inactive queue, only for the active queue.)
14008d220203SAlan Cox 	 */
140164b38930SMark Johnston 	marker = &vmd->vmd_markers[PQ_INACTIVE];
14025cd29d0fSMark Johnston 	pq = &vmd->vmd_pagequeues[PQ_INACTIVE];
14038d220203SAlan Cox 	vm_pagequeue_lock(pq);
14045cd29d0fSMark Johnston 	vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt);
14055cd29d0fSMark Johnston 	while (page_shortage > 0 && (m = vm_pageout_next(&ss, true)) != NULL) {
14065cd29d0fSMark Johnston 		KASSERT((m->flags & PG_MARKER) == 0,
14075cd29d0fSMark Johnston 		    ("marker page %p was dequeued", m));
1408df8bae1dSRodney W. Grimes 
14095cd29d0fSMark Johnston 		vm_page_change_lock(m, &mtx);
1410df8bae1dSRodney W. Grimes 
14115cd29d0fSMark Johnston recheck:
1412936524aaSMatthew Dillon 		/*
14135cd29d0fSMark Johnston 		 * The page may have been disassociated from the queue
14145cd29d0fSMark Johnston 		 * while locks were dropped.
1415936524aaSMatthew Dillon 		 */
141636f8fe9bSMark Johnston 		if (vm_page_queue(m) != PQ_INACTIVE) {
14175cd29d0fSMark Johnston 			addl_page_shortage++;
1418936524aaSMatthew Dillon 			continue;
14195cd29d0fSMark Johnston 		}
14207900f95dSKonstantin Belousov 
14218c616246SKonstantin Belousov 		/*
14225cd29d0fSMark Johnston 		 * The page was re-enqueued after the page queue lock was
14235cd29d0fSMark Johnston 		 * dropped, or a requeue was requested.  This page gets a second
14245cd29d0fSMark Johnston 		 * chance.
14258c616246SKonstantin Belousov 		 */
14265cd29d0fSMark Johnston 		if ((m->aflags & (PGA_ENQUEUED | PGA_REQUEUE |
14275cd29d0fSMark Johnston 		    PGA_REQUEUE_HEAD)) != 0)
14285cd29d0fSMark Johnston 			goto reinsert;
14295cd29d0fSMark Johnston 
14301d3a1bcfSMark Johnston 		/*
14315cd29d0fSMark Johnston 		 * Held pages are essentially stuck in the queue.  So,
14325cd29d0fSMark Johnston 		 * they ought to be discounted from the inactive count.
1433be37ee79SMark Johnston 		 * See the description of addl_page_shortage above.
14345cd29d0fSMark Johnston 		 *
14355cd29d0fSMark Johnston 		 * Wired pages may not be freed.  Complete their removal
14365cd29d0fSMark Johnston 		 * from the queue now to avoid needless revisits during
14375cd29d0fSMark Johnston 		 * future scans.
1438a3aeedabSAlan Cox 		 */
14395cd29d0fSMark Johnston 		if (m->hold_count != 0) {
1440a3aeedabSAlan Cox 			addl_page_shortage++;
14415cd29d0fSMark Johnston 			goto reinsert;
14425cd29d0fSMark Johnston 		}
14435cd29d0fSMark Johnston 		if (m->wire_count != 0) {
14445cd29d0fSMark Johnston 			vm_page_dequeue_deferred(m);
14455cd29d0fSMark Johnston 			continue;
14465cd29d0fSMark Johnston 		}
14475cd29d0fSMark Johnston 
14485cd29d0fSMark Johnston 		if (object != m->object) {
14495cd29d0fSMark Johnston 			if (obj_locked) {
14505cd29d0fSMark Johnston 				VM_OBJECT_WUNLOCK(object);
14515cd29d0fSMark Johnston 				obj_locked = false;
1452df8bae1dSRodney W. Grimes 			}
14539ee2165fSAlan Cox 			object = m->object;
14545cd29d0fSMark Johnston 		}
14555cd29d0fSMark Johnston 		if (!obj_locked) {
1456a3aeedabSAlan Cox 			if (!VM_OBJECT_TRYWLOCK(object)) {
14575cd29d0fSMark Johnston 				mtx_unlock(mtx);
14585cd29d0fSMark Johnston 				/* Depends on type-stability. */
14595cd29d0fSMark Johnston 				VM_OBJECT_WLOCK(object);
14605cd29d0fSMark Johnston 				obj_locked = true;
14615cd29d0fSMark Johnston 				mtx_lock(mtx);
14625cd29d0fSMark Johnston 				goto recheck;
14635cd29d0fSMark Johnston 			} else
14645cd29d0fSMark Johnston 				obj_locked = true;
1465a3aeedabSAlan Cox 		}
14665cd29d0fSMark Johnston 
1467a3aeedabSAlan Cox 		if (vm_page_busied(m)) {
1468a3aeedabSAlan Cox 			/*
1469a3aeedabSAlan Cox 			 * Don't mess with busy pages.  Leave them at
1470a3aeedabSAlan Cox 			 * the front of the queue.  Most likely, they
1471a3aeedabSAlan Cox 			 * are being paged out and will leave the
1472a3aeedabSAlan Cox 			 * queue shortly after the scan finishes.  So,
1473a3aeedabSAlan Cox 			 * they ought to be discounted from the
1474a3aeedabSAlan Cox 			 * inactive count.
1475a3aeedabSAlan Cox 			 */
1476a3aeedabSAlan Cox 			addl_page_shortage++;
14775cd29d0fSMark Johnston 			goto reinsert;
147826f9a767SRodney W. Grimes 		}
147948cc2fc7SKonstantin Belousov 
148048cc2fc7SKonstantin Belousov 		/*
14818748f58cSKonstantin Belousov 		 * Invalid pages can be easily freed. They cannot be
14828748f58cSKonstantin Belousov 		 * mapped, vm_page_free() asserts this.
1483776f729cSKonstantin Belousov 		 */
14848748f58cSKonstantin Belousov 		if (m->valid == 0)
14858748f58cSKonstantin Belousov 			goto free_page;
1486776f729cSKonstantin Belousov 
1487776f729cSKonstantin Belousov 		/*
1488960810ccSAlan Cox 		 * If the page has been referenced and the object is not dead,
1489960810ccSAlan Cox 		 * reactivate or requeue the page depending on whether the
1490960810ccSAlan Cox 		 * object is mapped.
1491d7aeb429SAlan Cox 		 *
1492d7aeb429SAlan Cox 		 * Test PGA_REFERENCED after calling pmap_ts_referenced() so
1493d7aeb429SAlan Cox 		 * that a reference from a concurrently destroyed mapping is
1494d7aeb429SAlan Cox 		 * observed here and now.
14957e006499SJohn Dyson 		 */
1496d7aeb429SAlan Cox 		if (object->ref_count != 0)
1497d7aeb429SAlan Cox 			act_delta = pmap_ts_referenced(m);
1498d7aeb429SAlan Cox 		else {
1499bb7858eaSJeff Roberson 			KASSERT(!pmap_page_is_mapped(m),
1500be37ee79SMark Johnston 			    ("page %p is mapped", m));
1501d7aeb429SAlan Cox 			act_delta = 0;
1502d7aeb429SAlan Cox 		}
1503d7aeb429SAlan Cox 		if ((m->aflags & PGA_REFERENCED) != 0) {
1504d7aeb429SAlan Cox 			vm_page_aflag_clear(m, PGA_REFERENCED);
1505d7aeb429SAlan Cox 			act_delta++;
15062fe6e4d7SDavid Greenman 		}
1507bb7858eaSJeff Roberson 		if (act_delta != 0) {
150886fa2471SAlan Cox 			if (object->ref_count != 0) {
150983c9dea1SGleb Smirnoff 				VM_CNT_INC(v_reactivated);
151026f9a767SRodney W. Grimes 				vm_page_activate(m);
1511960810ccSAlan Cox 
1512960810ccSAlan Cox 				/*
1513960810ccSAlan Cox 				 * Increase the activation count if the page
1514960810ccSAlan Cox 				 * was referenced while in the inactive queue.
1515960810ccSAlan Cox 				 * This makes it less likely that the page will
1516960810ccSAlan Cox 				 * be returned prematurely to the inactive
1517960810ccSAlan Cox 				 * queue.
1518960810ccSAlan Cox  				 */
1519bb7858eaSJeff Roberson 				m->act_count += act_delta + ACT_ADVANCE;
15205cd29d0fSMark Johnston 				continue;
1521ebcddc72SAlan Cox 			} else if ((object->flags & OBJ_DEAD) == 0) {
15225cd29d0fSMark Johnston 				vm_page_aflag_set(m, PGA_REQUEUE);
15235cd29d0fSMark Johnston 				goto reinsert;
1524ebcddc72SAlan Cox 			}
1525960810ccSAlan Cox 		}
152667bf6868SJohn Dyson 
15277e006499SJohn Dyson 		/*
15289fc4739dSAlan Cox 		 * If the page appears to be clean at the machine-independent
15299fc4739dSAlan Cox 		 * layer, then remove all of its mappings from the pmap in
1530a766ffd0SAlan Cox 		 * anticipation of freeing it.  If, however, any of the page's
1531a766ffd0SAlan Cox 		 * mappings allow write access, then the page may still be
1532a766ffd0SAlan Cox 		 * modified until the last of those mappings are removed.
15337e006499SJohn Dyson 		 */
1534aa044135SAlan Cox 		if (object->ref_count != 0) {
15359fc4739dSAlan Cox 			vm_page_test_dirty(m);
1536aa044135SAlan Cox 			if (m->dirty == 0)
1537b78ddb0bSAlan Cox 				pmap_remove_all(m);
1538aa044135SAlan Cox 		}
1539dcbcd518SBruce Evans 
15406989c456SAlan Cox 		/*
1541ebcddc72SAlan Cox 		 * Clean pages can be freed, but dirty pages must be sent back
1542ebcddc72SAlan Cox 		 * to the laundry, unless they belong to a dead object.
1543ebcddc72SAlan Cox 		 * Requeueing dirty pages from dead objects is pointless, as
1544ebcddc72SAlan Cox 		 * they are being paged out and freed by the thread that
1545ebcddc72SAlan Cox 		 * destroyed the object.
15466989c456SAlan Cox 		 */
1547ebcddc72SAlan Cox 		if (m->dirty == 0) {
15488748f58cSKonstantin Belousov free_page:
15495cd29d0fSMark Johnston 			/*
15505cd29d0fSMark Johnston 			 * Because we dequeued the page and have already
15515cd29d0fSMark Johnston 			 * checked for concurrent dequeue and enqueue
15525cd29d0fSMark Johnston 			 * requests, we can safely disassociate the page
15535cd29d0fSMark Johnston 			 * from the inactive queue.
15545cd29d0fSMark Johnston 			 */
15555cd29d0fSMark Johnston 			KASSERT((m->aflags & PGA_QUEUE_STATE_MASK) == 0,
15565cd29d0fSMark Johnston 			    ("page %p has queue state", m));
15575cd29d0fSMark Johnston 			m->queue = PQ_NONE;
155878afdce6SAlan Cox 			vm_page_free(m);
15595cd29d0fSMark Johnston 			page_shortage--;
1560ebcddc72SAlan Cox 		} else if ((object->flags & OBJ_DEAD) == 0)
1561ebcddc72SAlan Cox 			vm_page_launder(m);
15625cd29d0fSMark Johnston 		continue;
15635cd29d0fSMark Johnston reinsert:
15645cd29d0fSMark Johnston 		vm_pageout_reinsert_inactive(&ss, &rq, m);
15655cd29d0fSMark Johnston 	}
15665cd29d0fSMark Johnston 	if (mtx != NULL) {
15675cd29d0fSMark Johnston 		mtx_unlock(mtx);
15685cd29d0fSMark Johnston 		mtx = NULL;
15695cd29d0fSMark Johnston 	}
15705cd29d0fSMark Johnston 	if (obj_locked) {
157189f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(object);
15725cd29d0fSMark Johnston 		obj_locked = false;
15735cd29d0fSMark Johnston 	}
15745cd29d0fSMark Johnston 	vm_pageout_reinsert_inactive(&ss, &rq, NULL);
15755cd29d0fSMark Johnston 	vm_pageout_reinsert_inactive(&ss, &ss.bq, NULL);
15768d220203SAlan Cox 	vm_pagequeue_lock(pq);
15775cd29d0fSMark Johnston 	vm_pageout_end_scan(&ss);
15788d220203SAlan Cox 	vm_pagequeue_unlock(pq);
157926f9a767SRodney W. Grimes 
15805cd29d0fSMark Johnston 	VM_CNT_ADD(v_dfree, starting_page_shortage - page_shortage);
15815cd29d0fSMark Johnston 
1582ebcddc72SAlan Cox 	/*
1583ebcddc72SAlan Cox 	 * Wake up the laundry thread so that it can perform any needed
1584ebcddc72SAlan Cox 	 * laundering.  If we didn't meet our target, we're in shortfall and
1585b1fd102eSMark Johnston 	 * need to launder more aggressively.  If PQ_LAUNDRY is empty and no
1586b1fd102eSMark Johnston 	 * swap devices are configured, the laundry thread has no work to do, so
1587b1fd102eSMark Johnston 	 * don't bother waking it up.
1588cb35676eSMark Johnston 	 *
1589cb35676eSMark Johnston 	 * The laundry thread uses the number of inactive queue scans elapsed
1590cb35676eSMark Johnston 	 * since the last laundering to determine whether to launder again, so
1591cb35676eSMark Johnston 	 * keep count.
1592ebcddc72SAlan Cox 	 */
1593cb35676eSMark Johnston 	if (starting_page_shortage > 0) {
1594e2068d0bSJeff Roberson 		pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
1595ebcddc72SAlan Cox 		vm_pagequeue_lock(pq);
1596e2068d0bSJeff Roberson 		if (vmd->vmd_laundry_request == VM_LAUNDRY_IDLE &&
1597cb35676eSMark Johnston 		    (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled))) {
1598ebcddc72SAlan Cox 			if (page_shortage > 0) {
1599e2068d0bSJeff Roberson 				vmd->vmd_laundry_request = VM_LAUNDRY_SHORTFALL;
160083c9dea1SGleb Smirnoff 				VM_CNT_INC(v_pdshortfalls);
1601e2068d0bSJeff Roberson 			} else if (vmd->vmd_laundry_request !=
1602e2068d0bSJeff Roberson 			    VM_LAUNDRY_SHORTFALL)
1603e2068d0bSJeff Roberson 				vmd->vmd_laundry_request =
1604e2068d0bSJeff Roberson 				    VM_LAUNDRY_BACKGROUND;
1605e2068d0bSJeff Roberson 			wakeup(&vmd->vmd_laundry_request);
1606b1fd102eSMark Johnston 		}
160760684862SMark Johnston 		vmd->vmd_clean_pages_freed +=
160860684862SMark Johnston 		    starting_page_shortage - page_shortage;
1609ebcddc72SAlan Cox 		vm_pagequeue_unlock(pq);
1610ebcddc72SAlan Cox 	}
1611ebcddc72SAlan Cox 
16129452b5edSAlan Cox 	/*
1613f095d1bbSAlan Cox 	 * Wakeup the swapout daemon if we didn't free the targeted number of
1614f095d1bbSAlan Cox 	 * pages.
16159452b5edSAlan Cox 	 */
1616ac04195bSKonstantin Belousov 	if (page_shortage > 0)
1617ac04195bSKonstantin Belousov 		vm_swapout_run();
16189452b5edSAlan Cox 
16199452b5edSAlan Cox 	/*
162076386c7eSKonstantin Belousov 	 * If the inactive queue scan fails repeatedly to meet its
162176386c7eSKonstantin Belousov 	 * target, kill the largest process.
162276386c7eSKonstantin Belousov 	 */
162376386c7eSKonstantin Belousov 	vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage);
162476386c7eSKonstantin Belousov 
162576386c7eSKonstantin Belousov 	/*
1626be37ee79SMark Johnston 	 * Reclaim pages by swapping out idle processes, if configured to do so.
16271c7c3c6aSMatthew Dillon 	 */
1628ac04195bSKonstantin Belousov 	vm_swapout_run_idle();
1629be37ee79SMark Johnston 
1630be37ee79SMark Johnston 	/*
1631be37ee79SMark Johnston 	 * See the description of addl_page_shortage above.
1632be37ee79SMark Johnston 	 */
1633be37ee79SMark Johnston 	*addl_shortage = addl_page_shortage + deficit;
1634be37ee79SMark Johnston 
1635e57dd910SAlan Cox 	return (page_shortage <= 0);
16362025d69bSKonstantin Belousov }
16372025d69bSKonstantin Belousov 
1638449c2e92SKonstantin Belousov static int vm_pageout_oom_vote;
1639449c2e92SKonstantin Belousov 
1640449c2e92SKonstantin Belousov /*
1641449c2e92SKonstantin Belousov  * The pagedaemon threads randlomly select one to perform the
1642449c2e92SKonstantin Belousov  * OOM.  Trying to kill processes before all pagedaemons
1643449c2e92SKonstantin Belousov  * failed to reach free target is premature.
1644449c2e92SKonstantin Belousov  */
1645449c2e92SKonstantin Belousov static void
164676386c7eSKonstantin Belousov vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
164776386c7eSKonstantin Belousov     int starting_page_shortage)
1648449c2e92SKonstantin Belousov {
1649449c2e92SKonstantin Belousov 	int old_vote;
1650449c2e92SKonstantin Belousov 
165176386c7eSKonstantin Belousov 	if (starting_page_shortage <= 0 || starting_page_shortage !=
165276386c7eSKonstantin Belousov 	    page_shortage)
165376386c7eSKonstantin Belousov 		vmd->vmd_oom_seq = 0;
165476386c7eSKonstantin Belousov 	else
165576386c7eSKonstantin Belousov 		vmd->vmd_oom_seq++;
165676386c7eSKonstantin Belousov 	if (vmd->vmd_oom_seq < vm_pageout_oom_seq) {
1657449c2e92SKonstantin Belousov 		if (vmd->vmd_oom) {
1658449c2e92SKonstantin Belousov 			vmd->vmd_oom = FALSE;
1659449c2e92SKonstantin Belousov 			atomic_subtract_int(&vm_pageout_oom_vote, 1);
1660449c2e92SKonstantin Belousov 		}
1661449c2e92SKonstantin Belousov 		return;
1662449c2e92SKonstantin Belousov 	}
1663449c2e92SKonstantin Belousov 
166476386c7eSKonstantin Belousov 	/*
166576386c7eSKonstantin Belousov 	 * Do not follow the call sequence until OOM condition is
166676386c7eSKonstantin Belousov 	 * cleared.
166776386c7eSKonstantin Belousov 	 */
166876386c7eSKonstantin Belousov 	vmd->vmd_oom_seq = 0;
166976386c7eSKonstantin Belousov 
1670449c2e92SKonstantin Belousov 	if (vmd->vmd_oom)
1671449c2e92SKonstantin Belousov 		return;
1672449c2e92SKonstantin Belousov 
1673449c2e92SKonstantin Belousov 	vmd->vmd_oom = TRUE;
1674449c2e92SKonstantin Belousov 	old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1);
1675449c2e92SKonstantin Belousov 	if (old_vote != vm_ndomains - 1)
1676449c2e92SKonstantin Belousov 		return;
1677449c2e92SKonstantin Belousov 
1678449c2e92SKonstantin Belousov 	/*
1679449c2e92SKonstantin Belousov 	 * The current pagedaemon thread is the last in the quorum to
1680449c2e92SKonstantin Belousov 	 * start OOM.  Initiate the selection and signaling of the
1681449c2e92SKonstantin Belousov 	 * victim.
1682449c2e92SKonstantin Belousov 	 */
1683449c2e92SKonstantin Belousov 	vm_pageout_oom(VM_OOM_MEM);
1684449c2e92SKonstantin Belousov 
1685449c2e92SKonstantin Belousov 	/*
1686449c2e92SKonstantin Belousov 	 * After one round of OOM terror, recall our vote.  On the
1687449c2e92SKonstantin Belousov 	 * next pass, current pagedaemon would vote again if the low
1688449c2e92SKonstantin Belousov 	 * memory condition is still there, due to vmd_oom being
1689449c2e92SKonstantin Belousov 	 * false.
1690449c2e92SKonstantin Belousov 	 */
1691449c2e92SKonstantin Belousov 	vmd->vmd_oom = FALSE;
1692449c2e92SKonstantin Belousov 	atomic_subtract_int(&vm_pageout_oom_vote, 1);
1693449c2e92SKonstantin Belousov }
16942025d69bSKonstantin Belousov 
16953949873fSKonstantin Belousov /*
16963949873fSKonstantin Belousov  * The OOM killer is the page daemon's action of last resort when
16973949873fSKonstantin Belousov  * memory allocation requests have been stalled for a prolonged period
16983949873fSKonstantin Belousov  * of time because it cannot reclaim memory.  This function computes
16993949873fSKonstantin Belousov  * the approximate number of physical pages that could be reclaimed if
17003949873fSKonstantin Belousov  * the specified address space is destroyed.
17013949873fSKonstantin Belousov  *
17023949873fSKonstantin Belousov  * Private, anonymous memory owned by the address space is the
17033949873fSKonstantin Belousov  * principal resource that we expect to recover after an OOM kill.
17043949873fSKonstantin Belousov  * Since the physical pages mapped by the address space's COW entries
17053949873fSKonstantin Belousov  * are typically shared pages, they are unlikely to be released and so
17063949873fSKonstantin Belousov  * they are not counted.
17073949873fSKonstantin Belousov  *
17083949873fSKonstantin Belousov  * To get to the point where the page daemon runs the OOM killer, its
17093949873fSKonstantin Belousov  * efforts to write-back vnode-backed pages may have stalled.  This
17103949873fSKonstantin Belousov  * could be caused by a memory allocation deadlock in the write path
17113949873fSKonstantin Belousov  * that might be resolved by an OOM kill.  Therefore, physical pages
17123949873fSKonstantin Belousov  * belonging to vnode-backed objects are counted, because they might
17133949873fSKonstantin Belousov  * be freed without being written out first if the address space holds
17143949873fSKonstantin Belousov  * the last reference to an unlinked vnode.
17153949873fSKonstantin Belousov  *
17163949873fSKonstantin Belousov  * Similarly, physical pages belonging to OBJT_PHYS objects are
17173949873fSKonstantin Belousov  * counted because the address space might hold the last reference to
17183949873fSKonstantin Belousov  * the object.
17193949873fSKonstantin Belousov  */
17203949873fSKonstantin Belousov static long
17213949873fSKonstantin Belousov vm_pageout_oom_pagecount(struct vmspace *vmspace)
17223949873fSKonstantin Belousov {
17233949873fSKonstantin Belousov 	vm_map_t map;
17243949873fSKonstantin Belousov 	vm_map_entry_t entry;
17253949873fSKonstantin Belousov 	vm_object_t obj;
17263949873fSKonstantin Belousov 	long res;
17273949873fSKonstantin Belousov 
17283949873fSKonstantin Belousov 	map = &vmspace->vm_map;
17293949873fSKonstantin Belousov 	KASSERT(!map->system_map, ("system map"));
17303949873fSKonstantin Belousov 	sx_assert(&map->lock, SA_LOCKED);
17313949873fSKonstantin Belousov 	res = 0;
17323949873fSKonstantin Belousov 	for (entry = map->header.next; entry != &map->header;
17333949873fSKonstantin Belousov 	    entry = entry->next) {
17343949873fSKonstantin Belousov 		if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
17353949873fSKonstantin Belousov 			continue;
17363949873fSKonstantin Belousov 		obj = entry->object.vm_object;
17373949873fSKonstantin Belousov 		if (obj == NULL)
17383949873fSKonstantin Belousov 			continue;
17393949873fSKonstantin Belousov 		if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 &&
17403949873fSKonstantin Belousov 		    obj->ref_count != 1)
17413949873fSKonstantin Belousov 			continue;
17423949873fSKonstantin Belousov 		switch (obj->type) {
17433949873fSKonstantin Belousov 		case OBJT_DEFAULT:
17443949873fSKonstantin Belousov 		case OBJT_SWAP:
17453949873fSKonstantin Belousov 		case OBJT_PHYS:
17463949873fSKonstantin Belousov 		case OBJT_VNODE:
17473949873fSKonstantin Belousov 			res += obj->resident_page_count;
17483949873fSKonstantin Belousov 			break;
17493949873fSKonstantin Belousov 		}
17503949873fSKonstantin Belousov 	}
17513949873fSKonstantin Belousov 	return (res);
17523949873fSKonstantin Belousov }
17533949873fSKonstantin Belousov 
17542025d69bSKonstantin Belousov void
17552025d69bSKonstantin Belousov vm_pageout_oom(int shortage)
17562025d69bSKonstantin Belousov {
17572025d69bSKonstantin Belousov 	struct proc *p, *bigproc;
17582025d69bSKonstantin Belousov 	vm_offset_t size, bigsize;
17592025d69bSKonstantin Belousov 	struct thread *td;
17606bed074cSKonstantin Belousov 	struct vmspace *vm;
17613e78e983SAlan Cox 	bool breakout;
17622025d69bSKonstantin Belousov 
17632025d69bSKonstantin Belousov 	/*
17641c58e4e5SJohn Baldwin 	 * We keep the process bigproc locked once we find it to keep anyone
17651c58e4e5SJohn Baldwin 	 * from messing with it; however, there is a possibility of
176628323addSBryan Drewery 	 * deadlock if process B is bigproc and one of its child processes
17671c58e4e5SJohn Baldwin 	 * attempts to propagate a signal to B while we are waiting for A's
17681c58e4e5SJohn Baldwin 	 * lock while walking this list.  To avoid this, we don't block on
17691c58e4e5SJohn Baldwin 	 * the process lock but just skip a process if it is already locked.
17705663e6deSDavid Greenman 	 */
17715663e6deSDavid Greenman 	bigproc = NULL;
17725663e6deSDavid Greenman 	bigsize = 0;
17731005a129SJohn Baldwin 	sx_slock(&allproc_lock);
1774e602ba25SJulian Elischer 	FOREACH_PROC_IN_SYSTEM(p) {
177571943c3dSKonstantin Belousov 		PROC_LOCK(p);
177671943c3dSKonstantin Belousov 
17771c58e4e5SJohn Baldwin 		/*
17783f1c4c4fSKonstantin Belousov 		 * If this is a system, protected or killed process, skip it.
17795663e6deSDavid Greenman 		 */
178071943c3dSKonstantin Belousov 		if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
178171943c3dSKonstantin Belousov 		    P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 ||
178271943c3dSKonstantin Belousov 		    p->p_pid == 1 || P_KILLED(p) ||
178371943c3dSKonstantin Belousov 		    (p->p_pid < 48 && swap_pager_avail != 0)) {
17848606d880SJohn Baldwin 			PROC_UNLOCK(p);
17855663e6deSDavid Greenman 			continue;
17865663e6deSDavid Greenman 		}
17875663e6deSDavid Greenman 		/*
1788dcbcd518SBruce Evans 		 * If the process is in a non-running type state,
1789e602ba25SJulian Elischer 		 * don't touch it.  Check all the threads individually.
17905663e6deSDavid Greenman 		 */
17913e78e983SAlan Cox 		breakout = false;
1792e602ba25SJulian Elischer 		FOREACH_THREAD_IN_PROC(p, td) {
1793982d11f8SJeff Roberson 			thread_lock(td);
179471fad9fdSJulian Elischer 			if (!TD_ON_RUNQ(td) &&
179571fad9fdSJulian Elischer 			    !TD_IS_RUNNING(td) &&
1796f497cda2SEdward Tomasz Napierala 			    !TD_IS_SLEEPING(td) &&
1797b98acc0aSKonstantin Belousov 			    !TD_IS_SUSPENDED(td) &&
1798b98acc0aSKonstantin Belousov 			    !TD_IS_SWAPPED(td)) {
1799982d11f8SJeff Roberson 				thread_unlock(td);
18003e78e983SAlan Cox 				breakout = true;
1801e602ba25SJulian Elischer 				break;
1802e602ba25SJulian Elischer 			}
1803982d11f8SJeff Roberson 			thread_unlock(td);
1804e602ba25SJulian Elischer 		}
1805e602ba25SJulian Elischer 		if (breakout) {
18061c58e4e5SJohn Baldwin 			PROC_UNLOCK(p);
18075663e6deSDavid Greenman 			continue;
18085663e6deSDavid Greenman 		}
18095663e6deSDavid Greenman 		/*
18105663e6deSDavid Greenman 		 * get the process size
18115663e6deSDavid Greenman 		 */
18126bed074cSKonstantin Belousov 		vm = vmspace_acquire_ref(p);
18136bed074cSKonstantin Belousov 		if (vm == NULL) {
18146bed074cSKonstantin Belousov 			PROC_UNLOCK(p);
18156bed074cSKonstantin Belousov 			continue;
18166bed074cSKonstantin Belousov 		}
181795e2409aSKonstantin Belousov 		_PHOLD_LITE(p);
181872d97679SDavid Schultz 		PROC_UNLOCK(p);
181995e2409aSKonstantin Belousov 		sx_sunlock(&allproc_lock);
182095e2409aSKonstantin Belousov 		if (!vm_map_trylock_read(&vm->vm_map)) {
182171943c3dSKonstantin Belousov 			vmspace_free(vm);
182295e2409aSKonstantin Belousov 			sx_slock(&allproc_lock);
182395e2409aSKonstantin Belousov 			PRELE(p);
182472d97679SDavid Schultz 			continue;
182572d97679SDavid Schultz 		}
18267981aa24SKonstantin Belousov 		size = vmspace_swap_count(vm);
18272025d69bSKonstantin Belousov 		if (shortage == VM_OOM_MEM)
18283949873fSKonstantin Belousov 			size += vm_pageout_oom_pagecount(vm);
18293949873fSKonstantin Belousov 		vm_map_unlock_read(&vm->vm_map);
18306bed074cSKonstantin Belousov 		vmspace_free(vm);
183195e2409aSKonstantin Belousov 		sx_slock(&allproc_lock);
18323949873fSKonstantin Belousov 
18335663e6deSDavid Greenman 		/*
18343949873fSKonstantin Belousov 		 * If this process is bigger than the biggest one,
18355663e6deSDavid Greenman 		 * remember it.
18365663e6deSDavid Greenman 		 */
18375663e6deSDavid Greenman 		if (size > bigsize) {
18381c58e4e5SJohn Baldwin 			if (bigproc != NULL)
183971943c3dSKonstantin Belousov 				PRELE(bigproc);
18405663e6deSDavid Greenman 			bigproc = p;
18415663e6deSDavid Greenman 			bigsize = size;
184271943c3dSKonstantin Belousov 		} else {
184371943c3dSKonstantin Belousov 			PRELE(p);
184471943c3dSKonstantin Belousov 		}
18455663e6deSDavid Greenman 	}
18461005a129SJohn Baldwin 	sx_sunlock(&allproc_lock);
18475663e6deSDavid Greenman 	if (bigproc != NULL) {
18488311a2b8SWill Andrews 		if (vm_panic_on_oom != 0)
18498311a2b8SWill Andrews 			panic("out of swap space");
185071943c3dSKonstantin Belousov 		PROC_LOCK(bigproc);
1851729b1e51SDavid Greenman 		killproc(bigproc, "out of swap space");
1852fa885116SJulian Elischer 		sched_nice(bigproc, PRIO_MIN);
185371943c3dSKonstantin Belousov 		_PRELE(bigproc);
18541c58e4e5SJohn Baldwin 		PROC_UNLOCK(bigproc);
18555663e6deSDavid Greenman 	}
18565663e6deSDavid Greenman }
185726f9a767SRodney W. Grimes 
1858b50a4ea6SMark Johnston static bool
1859b50a4ea6SMark Johnston vm_pageout_lowmem(void)
186049a3710cSMark Johnston {
1861b50a4ea6SMark Johnston 	static int lowmem_ticks = 0;
1862b50a4ea6SMark Johnston 	int last;
186349a3710cSMark Johnston 
1864b50a4ea6SMark Johnston 	last = atomic_load_int(&lowmem_ticks);
1865b50a4ea6SMark Johnston 	while ((u_int)(ticks - last) / hz >= lowmem_period) {
1866b50a4ea6SMark Johnston 		if (atomic_fcmpset_int(&lowmem_ticks, &last, ticks) == 0)
1867b50a4ea6SMark Johnston 			continue;
1868b50a4ea6SMark Johnston 
186949a3710cSMark Johnston 		/*
187049a3710cSMark Johnston 		 * Decrease registered cache sizes.
187149a3710cSMark Johnston 		 */
187249a3710cSMark Johnston 		SDT_PROBE0(vm, , , vm__lowmem_scan);
187349a3710cSMark Johnston 		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES);
187449a3710cSMark Johnston 
187549a3710cSMark Johnston 		/*
187649a3710cSMark Johnston 		 * We do this explicitly after the caches have been
187749a3710cSMark Johnston 		 * drained above.
187849a3710cSMark Johnston 		 */
187949a3710cSMark Johnston 		uma_reclaim();
1880b50a4ea6SMark Johnston 		return (true);
188149a3710cSMark Johnston 	}
1882b50a4ea6SMark Johnston 	return (false);
188349a3710cSMark Johnston }
188449a3710cSMark Johnston 
188549a3710cSMark Johnston static void
1886449c2e92SKonstantin Belousov vm_pageout_worker(void *arg)
1887449c2e92SKonstantin Belousov {
1888e2068d0bSJeff Roberson 	struct vm_domain *vmd;
1889b50a4ea6SMark Johnston 	u_int ofree;
189049a3710cSMark Johnston 	int addl_shortage, domain, shortage;
1891e57dd910SAlan Cox 	bool target_met;
1892449c2e92SKonstantin Belousov 
1893e2068d0bSJeff Roberson 	domain = (uintptr_t)arg;
1894e2068d0bSJeff Roberson 	vmd = VM_DOMAIN(domain);
18955f8cd1c0SJeff Roberson 	shortage = 0;
1896e57dd910SAlan Cox 	target_met = true;
1897449c2e92SKonstantin Belousov 
1898449c2e92SKonstantin Belousov 	/*
1899949c9186SKonstantin Belousov 	 * XXXKIB It could be useful to bind pageout daemon threads to
1900949c9186SKonstantin Belousov 	 * the cores belonging to the domain, from which vm_page_array
1901949c9186SKonstantin Belousov 	 * is allocated.
1902449c2e92SKonstantin Belousov 	 */
1903449c2e92SKonstantin Belousov 
1904e2068d0bSJeff Roberson 	KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
1905e2068d0bSJeff Roberson 	vmd->vmd_last_active_scan = ticks;
1906449c2e92SKonstantin Belousov 
1907449c2e92SKonstantin Belousov 	/*
1908449c2e92SKonstantin Belousov 	 * The pageout daemon worker is never done, so loop forever.
1909449c2e92SKonstantin Belousov 	 */
1910449c2e92SKonstantin Belousov 	while (TRUE) {
191130fbfddaSJeff Roberson 		vm_domain_pageout_lock(vmd);
191249a3710cSMark Johnston 
191330fbfddaSJeff Roberson 		/*
191430fbfddaSJeff Roberson 		 * We need to clear wanted before we check the limits.  This
191530fbfddaSJeff Roberson 		 * prevents races with wakers who will check wanted after they
191630fbfddaSJeff Roberson 		 * reach the limit.
191730fbfddaSJeff Roberson 		 */
191830fbfddaSJeff Roberson 		atomic_store_int(&vmd->vmd_pageout_wanted, 0);
191956ce0690SAlan Cox 
192056ce0690SAlan Cox 		/*
19215f8cd1c0SJeff Roberson 		 * Might the page daemon need to run again?
1922449c2e92SKonstantin Belousov 		 */
19235f8cd1c0SJeff Roberson 		if (vm_paging_needed(vmd, vmd->vmd_free_count)) {
192456ce0690SAlan Cox 			/*
192549a3710cSMark Johnston 			 * Yes.  If the scan failed to produce enough free
192649a3710cSMark Johnston 			 * pages, sleep uninterruptibly for some time in the
192749a3710cSMark Johnston 			 * hope that the laundry thread will clean some pages.
192856ce0690SAlan Cox 			 */
192930fbfddaSJeff Roberson 			vm_domain_pageout_unlock(vmd);
193049a3710cSMark Johnston 			if (!target_met)
19316eebec83SMark Johnston 				pause("pwait", hz / VM_INACT_SCAN_RATE);
1932449c2e92SKonstantin Belousov 		} else {
1933449c2e92SKonstantin Belousov 			/*
19345f8cd1c0SJeff Roberson 			 * No, sleep until the next wakeup or until pages
19355f8cd1c0SJeff Roberson 			 * need to have their reference stats updated.
1936449c2e92SKonstantin Belousov 			 */
19372c0f13aaSKonstantin Belousov 			if (mtx_sleep(&vmd->vmd_pageout_wanted,
193830fbfddaSJeff Roberson 			    vm_domain_pageout_lockptr(vmd), PDROP | PVM,
19395f8cd1c0SJeff Roberson 			    "psleep", hz / VM_INACT_SCAN_RATE) == 0)
194083c9dea1SGleb Smirnoff 				VM_CNT_INC(v_pdwakeups);
194156ce0690SAlan Cox 		}
1942be37ee79SMark Johnston 
194330fbfddaSJeff Roberson 		/* Prevent spurious wakeups by ensuring that wanted is set. */
194430fbfddaSJeff Roberson 		atomic_store_int(&vmd->vmd_pageout_wanted, 1);
194530fbfddaSJeff Roberson 
194630fbfddaSJeff Roberson 		/*
194730fbfddaSJeff Roberson 		 * Use the controller to calculate how many pages to free in
1948b50a4ea6SMark Johnston 		 * this interval, and scan the inactive queue.  If the lowmem
1949b50a4ea6SMark Johnston 		 * handlers appear to have freed up some pages, subtract the
1950b50a4ea6SMark Johnston 		 * difference from the inactive queue scan target.
195130fbfddaSJeff Roberson 		 */
19525f8cd1c0SJeff Roberson 		shortage = pidctrl_daemon(&vmd->vmd_pid, vmd->vmd_free_count);
195349a3710cSMark Johnston 		if (shortage > 0) {
1954b50a4ea6SMark Johnston 			ofree = vmd->vmd_free_count;
1955b50a4ea6SMark Johnston 			if (vm_pageout_lowmem() && vmd->vmd_free_count > ofree)
1956b50a4ea6SMark Johnston 				shortage -= min(vmd->vmd_free_count - ofree,
1957b50a4ea6SMark Johnston 				    (u_int)shortage);
195849a3710cSMark Johnston 			target_met = vm_pageout_scan_inactive(vmd, shortage,
1959be37ee79SMark Johnston 			    &addl_shortage);
196049a3710cSMark Johnston 		} else
196149a3710cSMark Johnston 			addl_shortage = 0;
196256ce0690SAlan Cox 
1963be37ee79SMark Johnston 		/*
1964be37ee79SMark Johnston 		 * Scan the active queue.  A positive value for shortage
1965be37ee79SMark Johnston 		 * indicates that we must aggressively deactivate pages to avoid
1966be37ee79SMark Johnston 		 * a shortfall.
1967be37ee79SMark Johnston 		 */
19687bb4634eSMark Johnston 		shortage = vm_pageout_active_target(vmd) + addl_shortage;
1969be37ee79SMark Johnston 		vm_pageout_scan_active(vmd, shortage);
1970449c2e92SKonstantin Belousov 	}
1971449c2e92SKonstantin Belousov }
1972449c2e92SKonstantin Belousov 
1973df8bae1dSRodney W. Grimes /*
19744d19f4adSSteven Hartland  *	vm_pageout_init initialises basic pageout daemon settings.
1975df8bae1dSRodney W. Grimes  */
19762b14f991SJulian Elischer static void
1977e2068d0bSJeff Roberson vm_pageout_init_domain(int domain)
1978df8bae1dSRodney W. Grimes {
1979e2068d0bSJeff Roberson 	struct vm_domain *vmd;
19805f8cd1c0SJeff Roberson 	struct sysctl_oid *oid;
1981e2068d0bSJeff Roberson 
1982e2068d0bSJeff Roberson 	vmd = VM_DOMAIN(domain);
1983e2068d0bSJeff Roberson 	vmd->vmd_interrupt_free_min = 2;
1984f6b04d2bSDavid Greenman 
198545ae1d91SAlan Cox 	/*
198645ae1d91SAlan Cox 	 * v_free_reserved needs to include enough for the largest
198745ae1d91SAlan Cox 	 * swap pager structures plus enough for any pv_entry structs
198845ae1d91SAlan Cox 	 * when paging.
198945ae1d91SAlan Cox 	 */
1990e2068d0bSJeff Roberson 	if (vmd->vmd_page_count > 1024)
1991e2068d0bSJeff Roberson 		vmd->vmd_free_min = 4 + (vmd->vmd_page_count - 1024) / 200;
19922feb50bfSAttilio Rao 	else
1993e2068d0bSJeff Roberson 		vmd->vmd_free_min = 4;
1994e2068d0bSJeff Roberson 	vmd->vmd_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
1995e2068d0bSJeff Roberson 	    vmd->vmd_interrupt_free_min;
1996e2068d0bSJeff Roberson 	vmd->vmd_free_reserved = vm_pageout_page_count +
1997e2068d0bSJeff Roberson 	    vmd->vmd_pageout_free_min + (vmd->vmd_page_count / 768);
1998e2068d0bSJeff Roberson 	vmd->vmd_free_severe = vmd->vmd_free_min / 2;
1999e2068d0bSJeff Roberson 	vmd->vmd_free_target = 4 * vmd->vmd_free_min + vmd->vmd_free_reserved;
2000e2068d0bSJeff Roberson 	vmd->vmd_free_min += vmd->vmd_free_reserved;
2001e2068d0bSJeff Roberson 	vmd->vmd_free_severe += vmd->vmd_free_reserved;
2002e2068d0bSJeff Roberson 	vmd->vmd_inactive_target = (3 * vmd->vmd_free_target) / 2;
2003e2068d0bSJeff Roberson 	if (vmd->vmd_inactive_target > vmd->vmd_free_count / 3)
2004e2068d0bSJeff Roberson 		vmd->vmd_inactive_target = vmd->vmd_free_count / 3;
2005df8bae1dSRodney W. Grimes 
2006d9e23210SJeff Roberson 	/*
20075f8cd1c0SJeff Roberson 	 * Set the default wakeup threshold to be 10% below the paging
20085f8cd1c0SJeff Roberson 	 * target.  This keeps the steady state out of shortfall.
2009d9e23210SJeff Roberson 	 */
20105f8cd1c0SJeff Roberson 	vmd->vmd_pageout_wakeup_thresh = (vmd->vmd_free_target / 10) * 9;
2011e2068d0bSJeff Roberson 
2012e2068d0bSJeff Roberson 	/*
2013e2068d0bSJeff Roberson 	 * Target amount of memory to move out of the laundry queue during a
2014e2068d0bSJeff Roberson 	 * background laundering.  This is proportional to the amount of system
2015e2068d0bSJeff Roberson 	 * memory.
2016e2068d0bSJeff Roberson 	 */
2017e2068d0bSJeff Roberson 	vmd->vmd_background_launder_target = (vmd->vmd_free_target -
2018e2068d0bSJeff Roberson 	    vmd->vmd_free_min) / 10;
20195f8cd1c0SJeff Roberson 
20205f8cd1c0SJeff Roberson 	/* Initialize the pageout daemon pid controller. */
20215f8cd1c0SJeff Roberson 	pidctrl_init(&vmd->vmd_pid, hz / VM_INACT_SCAN_RATE,
20225f8cd1c0SJeff Roberson 	    vmd->vmd_free_target, PIDCTRL_BOUND,
20235f8cd1c0SJeff Roberson 	    PIDCTRL_KPD, PIDCTRL_KID, PIDCTRL_KDD);
20245f8cd1c0SJeff Roberson 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(vmd->vmd_oid), OID_AUTO,
20255f8cd1c0SJeff Roberson 	    "pidctrl", CTLFLAG_RD, NULL, "");
20265f8cd1c0SJeff Roberson 	pidctrl_init_sysctl(&vmd->vmd_pid, SYSCTL_CHILDREN(oid));
2027e2068d0bSJeff Roberson }
2028e2068d0bSJeff Roberson 
2029e2068d0bSJeff Roberson static void
2030e2068d0bSJeff Roberson vm_pageout_init(void)
2031e2068d0bSJeff Roberson {
2032e2068d0bSJeff Roberson 	u_int freecount;
2033e2068d0bSJeff Roberson 	int i;
2034e2068d0bSJeff Roberson 
2035e2068d0bSJeff Roberson 	/*
2036e2068d0bSJeff Roberson 	 * Initialize some paging parameters.
2037e2068d0bSJeff Roberson 	 */
2038e2068d0bSJeff Roberson 	if (vm_cnt.v_page_count < 2000)
2039e2068d0bSJeff Roberson 		vm_pageout_page_count = 8;
2040e2068d0bSJeff Roberson 
2041e2068d0bSJeff Roberson 	freecount = 0;
2042e2068d0bSJeff Roberson 	for (i = 0; i < vm_ndomains; i++) {
2043e2068d0bSJeff Roberson 		struct vm_domain *vmd;
2044e2068d0bSJeff Roberson 
2045e2068d0bSJeff Roberson 		vm_pageout_init_domain(i);
2046e2068d0bSJeff Roberson 		vmd = VM_DOMAIN(i);
2047e2068d0bSJeff Roberson 		vm_cnt.v_free_reserved += vmd->vmd_free_reserved;
2048e2068d0bSJeff Roberson 		vm_cnt.v_free_target += vmd->vmd_free_target;
2049e2068d0bSJeff Roberson 		vm_cnt.v_free_min += vmd->vmd_free_min;
2050e2068d0bSJeff Roberson 		vm_cnt.v_inactive_target += vmd->vmd_inactive_target;
2051e2068d0bSJeff Roberson 		vm_cnt.v_pageout_free_min += vmd->vmd_pageout_free_min;
2052e2068d0bSJeff Roberson 		vm_cnt.v_interrupt_free_min += vmd->vmd_interrupt_free_min;
2053e2068d0bSJeff Roberson 		vm_cnt.v_free_severe += vmd->vmd_free_severe;
2054e2068d0bSJeff Roberson 		freecount += vmd->vmd_free_count;
2055e2068d0bSJeff Roberson 	}
2056d9e23210SJeff Roberson 
2057d9e23210SJeff Roberson 	/*
2058d9e23210SJeff Roberson 	 * Set interval in seconds for active scan.  We want to visit each
2059c9612b2dSJeff Roberson 	 * page at least once every ten minutes.  This is to prevent worst
2060c9612b2dSJeff Roberson 	 * case paging behaviors with stale active LRU.
2061d9e23210SJeff Roberson 	 */
2062d9e23210SJeff Roberson 	if (vm_pageout_update_period == 0)
2063c9612b2dSJeff Roberson 		vm_pageout_update_period = 600;
2064d9e23210SJeff Roberson 
2065df8bae1dSRodney W. Grimes 	if (vm_page_max_wired == 0)
2066e2068d0bSJeff Roberson 		vm_page_max_wired = freecount / 3;
20674d19f4adSSteven Hartland }
20684d19f4adSSteven Hartland 
20694d19f4adSSteven Hartland /*
20704d19f4adSSteven Hartland  *     vm_pageout is the high level pageout daemon.
20714d19f4adSSteven Hartland  */
20724d19f4adSSteven Hartland static void
20734d19f4adSSteven Hartland vm_pageout(void)
20744d19f4adSSteven Hartland {
207544ec2b63SKonstantin Belousov 	int error;
207644ec2b63SKonstantin Belousov 	int i;
2077df8bae1dSRodney W. Grimes 
207824a1cce3SDavid Greenman 	swap_pager_swap_init();
20793b8cf4acSMark Johnston 	snprintf(curthread->td_name, sizeof(curthread->td_name), "dom0");
2080ebcddc72SAlan Cox 	error = kthread_add(vm_pageout_laundry_worker, NULL, curproc, NULL,
2081ebcddc72SAlan Cox 	    0, 0, "laundry: dom0");
2082ebcddc72SAlan Cox 	if (error != 0)
2083ebcddc72SAlan Cox 		panic("starting laundry for domain 0, error %d", error);
2084449c2e92SKonstantin Belousov 	for (i = 1; i < vm_ndomains; i++) {
2085449c2e92SKonstantin Belousov 		error = kthread_add(vm_pageout_worker, (void *)(uintptr_t)i,
2086449c2e92SKonstantin Belousov 		    curproc, NULL, 0, 0, "dom%d", i);
2087449c2e92SKonstantin Belousov 		if (error != 0) {
2088449c2e92SKonstantin Belousov 			panic("starting pageout for domain %d, error %d\n",
2089449c2e92SKonstantin Belousov 			    i, error);
2090dc2efb27SJohn Dyson 		}
2091e2068d0bSJeff Roberson 		error = kthread_add(vm_pageout_laundry_worker,
2092e2068d0bSJeff Roberson 		    (void *)(uintptr_t)i, curproc, NULL, 0, 0,
2093e2068d0bSJeff Roberson 		    "laundry: dom%d", i);
2094e2068d0bSJeff Roberson 		if (error != 0)
2095e2068d0bSJeff Roberson 			panic("starting laundry for domain %d, error %d",
2096e2068d0bSJeff Roberson 			    i, error);
2097f919ebdeSDavid Greenman 	}
209844ec2b63SKonstantin Belousov 	error = kthread_add(uma_reclaim_worker, NULL, curproc, NULL,
209944ec2b63SKonstantin Belousov 	    0, 0, "uma");
210044ec2b63SKonstantin Belousov 	if (error != 0)
210144ec2b63SKonstantin Belousov 		panic("starting uma_reclaim helper, error %d\n", error);
2102d395270dSDimitry Andric 	vm_pageout_worker((void *)(uintptr_t)0);
2103df8bae1dSRodney W. Grimes }
210426f9a767SRodney W. Grimes 
21056b4b77adSAlan Cox /*
2106280d15cdSMark Johnston  * Perform an advisory wakeup of the page daemon.
21076b4b77adSAlan Cox  */
2108e0c5a895SJohn Dyson void
2109e2068d0bSJeff Roberson pagedaemon_wakeup(int domain)
2110e0c5a895SJohn Dyson {
2111e2068d0bSJeff Roberson 	struct vm_domain *vmd;
2112a1c0a785SAlan Cox 
2113e2068d0bSJeff Roberson 	vmd = VM_DOMAIN(domain);
211430fbfddaSJeff Roberson 	vm_domain_pageout_assert_unlocked(vmd);
211530fbfddaSJeff Roberson 	if (curproc == pageproc)
211630fbfddaSJeff Roberson 		return;
2117280d15cdSMark Johnston 
211830fbfddaSJeff Roberson 	if (atomic_fetchadd_int(&vmd->vmd_pageout_wanted, 1) == 0) {
211930fbfddaSJeff Roberson 		vm_domain_pageout_lock(vmd);
212030fbfddaSJeff Roberson 		atomic_store_int(&vmd->vmd_pageout_wanted, 1);
2121e2068d0bSJeff Roberson 		wakeup(&vmd->vmd_pageout_wanted);
212230fbfddaSJeff Roberson 		vm_domain_pageout_unlock(vmd);
2123e0c5a895SJohn Dyson 	}
2124e0c5a895SJohn Dyson }
2125