xref: /freebsd/sys/vm/vm_pageout.c (revision 30fbfdda6cb85c3d66ce6f10a30c0e87a510f1ab)
160727d8bSWarner Losh /*-
2796df753SPedro F. Giffuni  * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
3df57947fSPedro F. Giffuni  *
426f9a767SRodney W. Grimes  * Copyright (c) 1991 Regents of the University of California.
526f9a767SRodney W. Grimes  * All rights reserved.
626f9a767SRodney W. Grimes  * Copyright (c) 1994 John S. Dyson
726f9a767SRodney W. Grimes  * All rights reserved.
826f9a767SRodney W. Grimes  * Copyright (c) 1994 David Greenman
926f9a767SRodney W. Grimes  * All rights reserved.
108dbca793STor Egge  * Copyright (c) 2005 Yahoo! Technologies Norway AS
118dbca793STor Egge  * All rights reserved.
12df8bae1dSRodney W. Grimes  *
13df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
14df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
15df8bae1dSRodney W. Grimes  *
16df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
17df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
18df8bae1dSRodney W. Grimes  * are met:
19df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
20df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
21df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
22df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
23df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
24df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
255929bcfaSPhilippe Charnier  *    must display the following acknowledgement:
26df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
27df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
28df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
29df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
30df8bae1dSRodney W. Grimes  *    without specific prior written permission.
31df8bae1dSRodney W. Grimes  *
32df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
43df8bae1dSRodney W. Grimes  *
443c4dd356SDavid Greenman  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
45df8bae1dSRodney W. Grimes  *
46df8bae1dSRodney W. Grimes  *
47df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
48df8bae1dSRodney W. Grimes  * All rights reserved.
49df8bae1dSRodney W. Grimes  *
50df8bae1dSRodney W. Grimes  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
51df8bae1dSRodney W. Grimes  *
52df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
53df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
54df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
55df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
56df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
57df8bae1dSRodney W. Grimes  *
58df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
59df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
60df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
61df8bae1dSRodney W. Grimes  *
62df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
63df8bae1dSRodney W. Grimes  *
64df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
65df8bae1dSRodney W. Grimes  *  School of Computer Science
66df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
67df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
68df8bae1dSRodney W. Grimes  *
69df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
70df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
71df8bae1dSRodney W. Grimes  */
72df8bae1dSRodney W. Grimes 
73df8bae1dSRodney W. Grimes /*
74df8bae1dSRodney W. Grimes  *	The proverbial page-out daemon.
75df8bae1dSRodney W. Grimes  */
76df8bae1dSRodney W. Grimes 
77874651b1SDavid E. O'Brien #include <sys/cdefs.h>
78874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$");
79874651b1SDavid E. O'Brien 
80faa5f8d8SAndrzej Bialecki #include "opt_vm.h"
817672ca05SMark Johnston 
82df8bae1dSRodney W. Grimes #include <sys/param.h>
8326f9a767SRodney W. Grimes #include <sys/systm.h>
84b5e8ce9fSBruce Evans #include <sys/kernel.h>
85855a310fSJeff Roberson #include <sys/eventhandler.h>
86fb919e4dSMark Murray #include <sys/lock.h>
87fb919e4dSMark Murray #include <sys/mutex.h>
8826f9a767SRodney W. Grimes #include <sys/proc.h>
899c8b8baaSPeter Wemm #include <sys/kthread.h>
900384fff8SJason Evans #include <sys/ktr.h>
9197824da3SAlan Cox #include <sys/mount.h>
92099e7e95SEdward Tomasz Napierala #include <sys/racct.h>
9326f9a767SRodney W. Grimes #include <sys/resourcevar.h>
94b43179fbSJeff Roberson #include <sys/sched.h>
9514a0d74eSSteven Hartland #include <sys/sdt.h>
96d2fc5315SPoul-Henning Kamp #include <sys/signalvar.h>
97449c2e92SKonstantin Belousov #include <sys/smp.h>
98a6bf3a9eSRyan Stone #include <sys/time.h>
99f6b04d2bSDavid Greenman #include <sys/vnode.h>
100efeaf95aSDavid Greenman #include <sys/vmmeter.h>
10189f6b863SAttilio Rao #include <sys/rwlock.h>
1021005a129SJohn Baldwin #include <sys/sx.h>
10338efa82bSJohn Dyson #include <sys/sysctl.h>
104df8bae1dSRodney W. Grimes 
105df8bae1dSRodney W. Grimes #include <vm/vm.h>
106efeaf95aSDavid Greenman #include <vm/vm_param.h>
107efeaf95aSDavid Greenman #include <vm/vm_object.h>
108df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
109efeaf95aSDavid Greenman #include <vm/vm_map.h>
110df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h>
11124a1cce3SDavid Greenman #include <vm/vm_pager.h>
112449c2e92SKonstantin Belousov #include <vm/vm_phys.h>
113e2068d0bSJeff Roberson #include <vm/vm_pagequeue.h>
11405f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h>
115efeaf95aSDavid Greenman #include <vm/vm_extern.h>
116670d17b5SJeff Roberson #include <vm/uma.h>
117df8bae1dSRodney W. Grimes 
1182b14f991SJulian Elischer /*
1192b14f991SJulian Elischer  * System initialization
1202b14f991SJulian Elischer  */
1212b14f991SJulian Elischer 
1222b14f991SJulian Elischer /* the kernel process "vm_pageout"*/
12311caded3SAlfred Perlstein static void vm_pageout(void);
1244d19f4adSSteven Hartland static void vm_pageout_init(void);
125ebcddc72SAlan Cox static int vm_pageout_clean(vm_page_t m, int *numpagedout);
12634d8b7eaSJeff Roberson static int vm_pageout_cluster(vm_page_t m);
1275f8cd1c0SJeff Roberson static bool vm_pageout_scan(struct vm_domain *vmd, int pass, int shortage);
12876386c7eSKonstantin Belousov static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
12976386c7eSKonstantin Belousov     int starting_page_shortage);
13045ae1d91SAlan Cox 
1314d19f4adSSteven Hartland SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init,
1324d19f4adSSteven Hartland     NULL);
1334d19f4adSSteven Hartland 
1342b14f991SJulian Elischer struct proc *pageproc;
1352b14f991SJulian Elischer 
1362b14f991SJulian Elischer static struct kproc_desc page_kp = {
1372b14f991SJulian Elischer 	"pagedaemon",
1382b14f991SJulian Elischer 	vm_pageout,
1392b14f991SJulian Elischer 	&pageproc
1402b14f991SJulian Elischer };
1414d19f4adSSteven Hartland SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start,
142237fdd78SRobert Watson     &page_kp);
1432b14f991SJulian Elischer 
14414a0d74eSSteven Hartland SDT_PROVIDER_DEFINE(vm);
14514a0d74eSSteven Hartland SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan);
14614a0d74eSSteven Hartland 
147ebcddc72SAlan Cox /* Pagedaemon activity rates, in subdivisions of one second. */
148ebcddc72SAlan Cox #define	VM_LAUNDER_RATE		10
1495f8cd1c0SJeff Roberson #define	VM_INACT_SCAN_RATE	10
1502b14f991SJulian Elischer 
15176386c7eSKonstantin Belousov static int vm_pageout_oom_seq = 12;
152ebcddc72SAlan Cox 
153d9e23210SJeff Roberson static int vm_pageout_update_period;
1544a365329SAndrey Zonov static int disable_swap_pageouts;
155c9612b2dSJeff Roberson static int lowmem_period = 10;
156a6bf3a9eSRyan Stone static time_t lowmem_uptime;
157b1fd102eSMark Johnston static int swapdev_enabled;
15870111b90SJohn Dyson 
1598311a2b8SWill Andrews static int vm_panic_on_oom = 0;
1608311a2b8SWill Andrews 
1618311a2b8SWill Andrews SYSCTL_INT(_vm, OID_AUTO, panic_on_oom,
1628311a2b8SWill Andrews 	CTLFLAG_RWTUN, &vm_panic_on_oom, 0,
1638311a2b8SWill Andrews 	"panic on out of memory instead of killing the largest process");
1648311a2b8SWill Andrews 
165d9e23210SJeff Roberson SYSCTL_INT(_vm, OID_AUTO, pageout_update_period,
166e0b2fc3aSMark Johnston 	CTLFLAG_RWTUN, &vm_pageout_update_period, 0,
167d9e23210SJeff Roberson 	"Maximum active LRU update period");
16853636869SAndrey Zonov 
169e0b2fc3aSMark Johnston SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RWTUN, &lowmem_period, 0,
170c9612b2dSJeff Roberson 	"Low memory callback period");
171c9612b2dSJeff Roberson 
172ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
173e0b2fc3aSMark Johnston 	CTLFLAG_RWTUN, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
17412ac6a1dSJohn Dyson 
17523b59018SMatthew Dillon static int pageout_lock_miss;
17623b59018SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
17723b59018SMatthew Dillon 	CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
17823b59018SMatthew Dillon 
17976386c7eSKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq,
180e0b2fc3aSMark Johnston 	CTLFLAG_RWTUN, &vm_pageout_oom_seq, 0,
18176386c7eSKonstantin Belousov 	"back-to-back calls to oom detector to start OOM");
18276386c7eSKonstantin Belousov 
183ebcddc72SAlan Cox static int act_scan_laundry_weight = 3;
184e0b2fc3aSMark Johnston SYSCTL_INT(_vm, OID_AUTO, act_scan_laundry_weight, CTLFLAG_RWTUN,
185ebcddc72SAlan Cox     &act_scan_laundry_weight, 0,
186ebcddc72SAlan Cox     "weight given to clean vs. dirty pages in active queue scans");
187ebcddc72SAlan Cox 
188ebcddc72SAlan Cox static u_int vm_background_launder_rate = 4096;
189e0b2fc3aSMark Johnston SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RWTUN,
190ebcddc72SAlan Cox     &vm_background_launder_rate, 0,
191ebcddc72SAlan Cox     "background laundering rate, in kilobytes per second");
192ebcddc72SAlan Cox 
193ebcddc72SAlan Cox static u_int vm_background_launder_max = 20 * 1024;
194e0b2fc3aSMark Johnston SYSCTL_UINT(_vm, OID_AUTO, background_launder_max, CTLFLAG_RWTUN,
195ebcddc72SAlan Cox     &vm_background_launder_max, 0, "background laundering cap, in kilobytes");
196ebcddc72SAlan Cox 
197e2241590SAlan Cox int vm_pageout_page_count = 32;
198df8bae1dSRodney W. Grimes 
199c3cb3e12SDavid Greenman int vm_page_max_wired;		/* XXX max # of wired pages system-wide */
2005dfc2870SAlan Cox SYSCTL_INT(_vm, OID_AUTO, max_wired,
2015dfc2870SAlan Cox 	CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count");
202df8bae1dSRodney W. Grimes 
203ebcddc72SAlan Cox static u_int isqrt(u_int num);
20485eeca35SAlan Cox static boolean_t vm_pageout_fallback_object_lock(vm_page_t, vm_page_t *);
205ebcddc72SAlan Cox static int vm_pageout_launder(struct vm_domain *vmd, int launder,
206ebcddc72SAlan Cox     bool in_shortfall);
207ebcddc72SAlan Cox static void vm_pageout_laundry_worker(void *arg);
20885eeca35SAlan Cox static boolean_t vm_pageout_page_lock(vm_page_t, vm_page_t *);
209cd41fc12SDavid Greenman 
210a8229fa3SAlan Cox /*
211a8229fa3SAlan Cox  * Initialize a dummy page for marking the caller's place in the specified
212a8229fa3SAlan Cox  * paging queue.  In principle, this function only needs to set the flag
213f0edf3f8SAlan Cox  * PG_MARKER.  Nonetheless, it write busies and initializes the hold count
214c7aebda8SAttilio Rao  * to one as safety precautions.
215a8229fa3SAlan Cox  */
2168c616246SKonstantin Belousov static void
2178c616246SKonstantin Belousov vm_pageout_init_marker(vm_page_t marker, u_short queue)
2188c616246SKonstantin Belousov {
2198c616246SKonstantin Belousov 
2208c616246SKonstantin Belousov 	bzero(marker, sizeof(*marker));
221a8229fa3SAlan Cox 	marker->flags = PG_MARKER;
222c7aebda8SAttilio Rao 	marker->busy_lock = VPB_SINGLE_EXCLUSIVER;
2238c616246SKonstantin Belousov 	marker->queue = queue;
224a8229fa3SAlan Cox 	marker->hold_count = 1;
2258c616246SKonstantin Belousov }
2268c616246SKonstantin Belousov 
22726f9a767SRodney W. Grimes /*
2288dbca793STor Egge  * vm_pageout_fallback_object_lock:
2298dbca793STor Egge  *
23089f6b863SAttilio Rao  * Lock vm object currently associated with `m'. VM_OBJECT_TRYWLOCK is
2318dbca793STor Egge  * known to have failed and page queue must be either PQ_ACTIVE or
23244be0a8eSMark Johnston  * PQ_INACTIVE.  To avoid lock order violation, unlock the page queue
2338dbca793STor Egge  * while locking the vm object.  Use marker page to detect page queue
2348dbca793STor Egge  * changes and maintain notion of next page on page queue.  Return
2358dbca793STor Egge  * TRUE if no changes were detected, FALSE otherwise.  vm object is
2368dbca793STor Egge  * locked on return.
2378dbca793STor Egge  *
2388dbca793STor Egge  * This function depends on both the lock portion of struct vm_object
2398dbca793STor Egge  * and normal struct vm_page being type stable.
2408dbca793STor Egge  */
24185eeca35SAlan Cox static boolean_t
2428dbca793STor Egge vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
2438dbca793STor Egge {
2448dbca793STor Egge 	struct vm_page marker;
2458d220203SAlan Cox 	struct vm_pagequeue *pq;
2468dbca793STor Egge 	boolean_t unchanged;
2478dbca793STor Egge 	u_short queue;
2488dbca793STor Egge 	vm_object_t object;
2498dbca793STor Egge 
2508dbca793STor Egge 	queue = m->queue;
2518c616246SKonstantin Belousov 	vm_pageout_init_marker(&marker, queue);
252449c2e92SKonstantin Belousov 	pq = vm_page_pagequeue(m);
2538dbca793STor Egge 	object = m->object;
2548dbca793STor Egge 
255c325e866SKonstantin Belousov 	TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q);
2568d220203SAlan Cox 	vm_pagequeue_unlock(pq);
2572965a453SKip Macy 	vm_page_unlock(m);
25889f6b863SAttilio Rao 	VM_OBJECT_WLOCK(object);
2592965a453SKip Macy 	vm_page_lock(m);
2608d220203SAlan Cox 	vm_pagequeue_lock(pq);
2618dbca793STor Egge 
26269b8585eSKonstantin Belousov 	/*
26369b8585eSKonstantin Belousov 	 * The page's object might have changed, and/or the page might
26469b8585eSKonstantin Belousov 	 * have moved from its original position in the queue.  If the
26569b8585eSKonstantin Belousov 	 * page's object has changed, then the caller should abandon
26669b8585eSKonstantin Belousov 	 * processing the page because the wrong object lock was
26769b8585eSKonstantin Belousov 	 * acquired.  Use the marker's plinks.q, not the page's, to
26869b8585eSKonstantin Belousov 	 * determine if the page has been moved.  The state of the
26969b8585eSKonstantin Belousov 	 * page's plinks.q can be indeterminate; whereas, the marker's
27069b8585eSKonstantin Belousov 	 * plinks.q must be valid.
27169b8585eSKonstantin Belousov 	 */
272c325e866SKonstantin Belousov 	*next = TAILQ_NEXT(&marker, plinks.q);
27369b8585eSKonstantin Belousov 	unchanged = m->object == object &&
27469b8585eSKonstantin Belousov 	    m == TAILQ_PREV(&marker, pglist, plinks.q);
27569b8585eSKonstantin Belousov 	KASSERT(!unchanged || m->queue == queue,
27669b8585eSKonstantin Belousov 	    ("page %p queue %d %d", m, queue, m->queue));
277c325e866SKonstantin Belousov 	TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q);
2788dbca793STor Egge 	return (unchanged);
2798dbca793STor Egge }
2808dbca793STor Egge 
2818dbca793STor Egge /*
2828c616246SKonstantin Belousov  * Lock the page while holding the page queue lock.  Use marker page
2838c616246SKonstantin Belousov  * to detect page queue changes and maintain notion of next page on
2848c616246SKonstantin Belousov  * page queue.  Return TRUE if no changes were detected, FALSE
2858c616246SKonstantin Belousov  * otherwise.  The page is locked on return. The page queue lock might
2868c616246SKonstantin Belousov  * be dropped and reacquired.
2878c616246SKonstantin Belousov  *
2888c616246SKonstantin Belousov  * This function depends on normal struct vm_page being type stable.
2898c616246SKonstantin Belousov  */
29085eeca35SAlan Cox static boolean_t
2918c616246SKonstantin Belousov vm_pageout_page_lock(vm_page_t m, vm_page_t *next)
2928c616246SKonstantin Belousov {
2938c616246SKonstantin Belousov 	struct vm_page marker;
2948d220203SAlan Cox 	struct vm_pagequeue *pq;
2958c616246SKonstantin Belousov 	boolean_t unchanged;
2968c616246SKonstantin Belousov 	u_short queue;
2978c616246SKonstantin Belousov 
2988c616246SKonstantin Belousov 	vm_page_lock_assert(m, MA_NOTOWNED);
2998c616246SKonstantin Belousov 	if (vm_page_trylock(m))
3008c616246SKonstantin Belousov 		return (TRUE);
3018c616246SKonstantin Belousov 
3028c616246SKonstantin Belousov 	queue = m->queue;
3038c616246SKonstantin Belousov 	vm_pageout_init_marker(&marker, queue);
304449c2e92SKonstantin Belousov 	pq = vm_page_pagequeue(m);
3058c616246SKonstantin Belousov 
306c325e866SKonstantin Belousov 	TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q);
3078d220203SAlan Cox 	vm_pagequeue_unlock(pq);
3088c616246SKonstantin Belousov 	vm_page_lock(m);
3098d220203SAlan Cox 	vm_pagequeue_lock(pq);
3108c616246SKonstantin Belousov 
3118c616246SKonstantin Belousov 	/* Page queue might have changed. */
312c325e866SKonstantin Belousov 	*next = TAILQ_NEXT(&marker, plinks.q);
31369b8585eSKonstantin Belousov 	unchanged = m == TAILQ_PREV(&marker, pglist, plinks.q);
31469b8585eSKonstantin Belousov 	KASSERT(!unchanged || m->queue == queue,
31569b8585eSKonstantin Belousov 	    ("page %p queue %d %d", m, queue, m->queue));
316c325e866SKonstantin Belousov 	TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q);
3178c616246SKonstantin Belousov 	return (unchanged);
3188c616246SKonstantin Belousov }
3198c616246SKonstantin Belousov 
3208c616246SKonstantin Belousov /*
321248fe642SAlan Cox  * Scan for pages at adjacent offsets within the given page's object that are
322248fe642SAlan Cox  * eligible for laundering, form a cluster of these pages and the given page,
323248fe642SAlan Cox  * and launder that cluster.
32426f9a767SRodney W. Grimes  */
3253af76890SPoul-Henning Kamp static int
32634d8b7eaSJeff Roberson vm_pageout_cluster(vm_page_t m)
32724a1cce3SDavid Greenman {
32854d92145SMatthew Dillon 	vm_object_t object;
329248fe642SAlan Cox 	vm_page_t mc[2 * vm_pageout_page_count], p, pb, ps;
330248fe642SAlan Cox 	vm_pindex_t pindex;
331248fe642SAlan Cox 	int ib, is, page_base, pageout_count;
33226f9a767SRodney W. Grimes 
333248fe642SAlan Cox 	vm_page_assert_locked(m);
33417f6a17bSAlan Cox 	object = m->object;
33589f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
336248fe642SAlan Cox 	pindex = m->pindex;
3370cddd8f0SMatthew Dillon 
338c7aebda8SAttilio Rao 	vm_page_assert_unbusied(m);
3391d3a1bcfSMark Johnston 	KASSERT(!vm_page_held(m), ("page %p is held", m));
340aed9aaaaSMark Johnston 
341aed9aaaaSMark Johnston 	pmap_remove_write(m);
34217f6a17bSAlan Cox 	vm_page_unlock(m);
3430d94caffSDavid Greenman 
34491b4f427SAlan Cox 	mc[vm_pageout_page_count] = pb = ps = m;
34526f9a767SRodney W. Grimes 	pageout_count = 1;
346f35329acSJohn Dyson 	page_base = vm_pageout_page_count;
34790ecac61SMatthew Dillon 	ib = 1;
34890ecac61SMatthew Dillon 	is = 1;
34990ecac61SMatthew Dillon 
35024a1cce3SDavid Greenman 	/*
351248fe642SAlan Cox 	 * We can cluster only if the page is not clean, busy, or held, and
352ebcddc72SAlan Cox 	 * the page is in the laundry queue.
35390ecac61SMatthew Dillon 	 *
35490ecac61SMatthew Dillon 	 * During heavy mmap/modification loads the pageout
35590ecac61SMatthew Dillon 	 * daemon can really fragment the underlying file
356248fe642SAlan Cox 	 * due to flushing pages out of order and not trying to
357248fe642SAlan Cox 	 * align the clusters (which leaves sporadic out-of-order
35890ecac61SMatthew Dillon 	 * holes).  To solve this problem we do the reverse scan
35990ecac61SMatthew Dillon 	 * first and attempt to align our cluster, then do a
36090ecac61SMatthew Dillon 	 * forward scan if room remains.
36124a1cce3SDavid Greenman 	 */
36290ecac61SMatthew Dillon more:
363248fe642SAlan Cox 	while (ib != 0 && pageout_count < vm_pageout_page_count) {
36490ecac61SMatthew Dillon 		if (ib > pindex) {
36590ecac61SMatthew Dillon 			ib = 0;
36690ecac61SMatthew Dillon 			break;
367f6b04d2bSDavid Greenman 		}
368c7aebda8SAttilio Rao 		if ((p = vm_page_prev(pb)) == NULL || vm_page_busied(p)) {
36990ecac61SMatthew Dillon 			ib = 0;
37090ecac61SMatthew Dillon 			break;
371f6b04d2bSDavid Greenman 		}
37224a1cce3SDavid Greenman 		vm_page_test_dirty(p);
373eb5d3969SAlan Cox 		if (p->dirty == 0) {
374eb5d3969SAlan Cox 			ib = 0;
375eb5d3969SAlan Cox 			break;
376eb5d3969SAlan Cox 		}
377eb5d3969SAlan Cox 		vm_page_lock(p);
3781d3a1bcfSMark Johnston 		if (!vm_page_in_laundry(p) || vm_page_held(p)) {
3792965a453SKip Macy 			vm_page_unlock(p);
38090ecac61SMatthew Dillon 			ib = 0;
38124a1cce3SDavid Greenman 			break;
382f6b04d2bSDavid Greenman 		}
383aed9aaaaSMark Johnston 		pmap_remove_write(p);
3842965a453SKip Macy 		vm_page_unlock(p);
38591b4f427SAlan Cox 		mc[--page_base] = pb = p;
38690ecac61SMatthew Dillon 		++pageout_count;
38790ecac61SMatthew Dillon 		++ib;
388248fe642SAlan Cox 
38924a1cce3SDavid Greenman 		/*
390248fe642SAlan Cox 		 * We are at an alignment boundary.  Stop here, and switch
391248fe642SAlan Cox 		 * directions.  Do not clear ib.
39224a1cce3SDavid Greenman 		 */
39390ecac61SMatthew Dillon 		if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
39490ecac61SMatthew Dillon 			break;
39524a1cce3SDavid Greenman 	}
39690ecac61SMatthew Dillon 	while (pageout_count < vm_pageout_page_count &&
39790ecac61SMatthew Dillon 	    pindex + is < object->size) {
398c7aebda8SAttilio Rao 		if ((p = vm_page_next(ps)) == NULL || vm_page_busied(p))
39990ecac61SMatthew Dillon 			break;
40024a1cce3SDavid Greenman 		vm_page_test_dirty(p);
401eb5d3969SAlan Cox 		if (p->dirty == 0)
402eb5d3969SAlan Cox 			break;
403eb5d3969SAlan Cox 		vm_page_lock(p);
4041d3a1bcfSMark Johnston 		if (!vm_page_in_laundry(p) || vm_page_held(p)) {
4052965a453SKip Macy 			vm_page_unlock(p);
40624a1cce3SDavid Greenman 			break;
40724a1cce3SDavid Greenman 		}
408aed9aaaaSMark Johnston 		pmap_remove_write(p);
4092965a453SKip Macy 		vm_page_unlock(p);
41091b4f427SAlan Cox 		mc[page_base + pageout_count] = ps = p;
41190ecac61SMatthew Dillon 		++pageout_count;
41290ecac61SMatthew Dillon 		++is;
41324a1cce3SDavid Greenman 	}
41490ecac61SMatthew Dillon 
41590ecac61SMatthew Dillon 	/*
41690ecac61SMatthew Dillon 	 * If we exhausted our forward scan, continue with the reverse scan
417248fe642SAlan Cox 	 * when possible, even past an alignment boundary.  This catches
418248fe642SAlan Cox 	 * boundary conditions.
41990ecac61SMatthew Dillon 	 */
420248fe642SAlan Cox 	if (ib != 0 && pageout_count < vm_pageout_page_count)
42190ecac61SMatthew Dillon 		goto more;
422f6b04d2bSDavid Greenman 
42399e6e193SMark Johnston 	return (vm_pageout_flush(&mc[page_base], pageout_count,
42499e6e193SMark Johnston 	    VM_PAGER_PUT_NOREUSE, 0, NULL, NULL));
425aef922f5SJohn Dyson }
426aef922f5SJohn Dyson 
4271c7c3c6aSMatthew Dillon /*
4281c7c3c6aSMatthew Dillon  * vm_pageout_flush() - launder the given pages
4291c7c3c6aSMatthew Dillon  *
4301c7c3c6aSMatthew Dillon  *	The given pages are laundered.  Note that we setup for the start of
4311c7c3c6aSMatthew Dillon  *	I/O ( i.e. busy the page ), mark it read-only, and bump the object
4321c7c3c6aSMatthew Dillon  *	reference count all in here rather then in the parent.  If we want
4331c7c3c6aSMatthew Dillon  *	the parent to do more sophisticated things we may have to change
4341c7c3c6aSMatthew Dillon  *	the ordering.
4351e8a675cSKonstantin Belousov  *
4361e8a675cSKonstantin Belousov  *	Returned runlen is the count of pages between mreq and first
4371e8a675cSKonstantin Belousov  *	page after mreq with status VM_PAGER_AGAIN.
438126d6082SKonstantin Belousov  *	*eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL
439126d6082SKonstantin Belousov  *	for any page in runlen set.
4401c7c3c6aSMatthew Dillon  */
441aef922f5SJohn Dyson int
442126d6082SKonstantin Belousov vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
443126d6082SKonstantin Belousov     boolean_t *eio)
444aef922f5SJohn Dyson {
4452e3b314dSAlan Cox 	vm_object_t object = mc[0]->object;
446aef922f5SJohn Dyson 	int pageout_status[count];
44795461b45SJohn Dyson 	int numpagedout = 0;
4481e8a675cSKonstantin Belousov 	int i, runlen;
449aef922f5SJohn Dyson 
45089f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
4517bec141bSKip Macy 
4521c7c3c6aSMatthew Dillon 	/*
453aed9aaaaSMark Johnston 	 * Initiate I/O.  Mark the pages busy and verify that they're valid
454aed9aaaaSMark Johnston 	 * and read-only.
4551c7c3c6aSMatthew Dillon 	 *
4561c7c3c6aSMatthew Dillon 	 * We do not have to fixup the clean/dirty bits here... we can
4571c7c3c6aSMatthew Dillon 	 * allow the pager to do it after the I/O completes.
45802fa91d3SMatthew Dillon 	 *
45902fa91d3SMatthew Dillon 	 * NOTE! mc[i]->dirty may be partial or fragmented due to an
46002fa91d3SMatthew Dillon 	 * edge case with file fragments.
4611c7c3c6aSMatthew Dillon 	 */
4628f9110f6SJohn Dyson 	for (i = 0; i < count; i++) {
4637a935082SAlan Cox 		KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
4647a935082SAlan Cox 		    ("vm_pageout_flush: partially invalid page %p index %d/%d",
4657a935082SAlan Cox 			mc[i], i, count));
466aed9aaaaSMark Johnston 		KASSERT((mc[i]->aflags & PGA_WRITEABLE) == 0,
467aed9aaaaSMark Johnston 		    ("vm_pageout_flush: writeable page %p", mc[i]));
468c7aebda8SAttilio Rao 		vm_page_sbusy(mc[i]);
4692965a453SKip Macy 	}
470d474eaaaSDoug Rabson 	vm_object_pip_add(object, count);
471aef922f5SJohn Dyson 
472d076fbeaSAlan Cox 	vm_pager_put_pages(object, mc, count, flags, pageout_status);
47326f9a767SRodney W. Grimes 
4741e8a675cSKonstantin Belousov 	runlen = count - mreq;
475126d6082SKonstantin Belousov 	if (eio != NULL)
476126d6082SKonstantin Belousov 		*eio = FALSE;
477aef922f5SJohn Dyson 	for (i = 0; i < count; i++) {
478aef922f5SJohn Dyson 		vm_page_t mt = mc[i];
47924a1cce3SDavid Greenman 
4804cd45723SAlan Cox 		KASSERT(pageout_status[i] == VM_PAGER_PEND ||
4816031c68dSAlan Cox 		    !pmap_page_is_write_mapped(mt),
4829ea8d1a6SAlan Cox 		    ("vm_pageout_flush: page %p is not write protected", mt));
48326f9a767SRodney W. Grimes 		switch (pageout_status[i]) {
48426f9a767SRodney W. Grimes 		case VM_PAGER_OK:
485ebcddc72SAlan Cox 			vm_page_lock(mt);
486ebcddc72SAlan Cox 			if (vm_page_in_laundry(mt))
487ebcddc72SAlan Cox 				vm_page_deactivate_noreuse(mt);
488ebcddc72SAlan Cox 			vm_page_unlock(mt);
489ebcddc72SAlan Cox 			/* FALLTHROUGH */
49026f9a767SRodney W. Grimes 		case VM_PAGER_PEND:
49195461b45SJohn Dyson 			numpagedout++;
49226f9a767SRodney W. Grimes 			break;
49326f9a767SRodney W. Grimes 		case VM_PAGER_BAD:
49426f9a767SRodney W. Grimes 			/*
495ebcddc72SAlan Cox 			 * The page is outside the object's range.  We pretend
496ebcddc72SAlan Cox 			 * that the page out worked and clean the page, so the
497ebcddc72SAlan Cox 			 * changes will be lost if the page is reclaimed by
498ebcddc72SAlan Cox 			 * the page daemon.
49926f9a767SRodney W. Grimes 			 */
50090ecac61SMatthew Dillon 			vm_page_undirty(mt);
501ebcddc72SAlan Cox 			vm_page_lock(mt);
502ebcddc72SAlan Cox 			if (vm_page_in_laundry(mt))
503ebcddc72SAlan Cox 				vm_page_deactivate_noreuse(mt);
504ebcddc72SAlan Cox 			vm_page_unlock(mt);
50526f9a767SRodney W. Grimes 			break;
50626f9a767SRodney W. Grimes 		case VM_PAGER_ERROR:
50726f9a767SRodney W. Grimes 		case VM_PAGER_FAIL:
50826f9a767SRodney W. Grimes 			/*
509b1fd102eSMark Johnston 			 * If the page couldn't be paged out to swap because the
510b1fd102eSMark Johnston 			 * pager wasn't able to find space, place the page in
511b1fd102eSMark Johnston 			 * the PQ_UNSWAPPABLE holding queue.  This is an
512b1fd102eSMark Johnston 			 * optimization that prevents the page daemon from
513b1fd102eSMark Johnston 			 * wasting CPU cycles on pages that cannot be reclaimed
514b1fd102eSMark Johnston 			 * becase no swap device is configured.
515b1fd102eSMark Johnston 			 *
516b1fd102eSMark Johnston 			 * Otherwise, reactivate the page so that it doesn't
517b1fd102eSMark Johnston 			 * clog the laundry and inactive queues.  (We will try
518b1fd102eSMark Johnston 			 * paging it out again later.)
51926f9a767SRodney W. Grimes 			 */
5203c4a2440SAlan Cox 			vm_page_lock(mt);
521b1fd102eSMark Johnston 			if (object->type == OBJT_SWAP &&
522b1fd102eSMark Johnston 			    pageout_status[i] == VM_PAGER_FAIL) {
523b1fd102eSMark Johnston 				vm_page_unswappable(mt);
524b1fd102eSMark Johnston 				numpagedout++;
525b1fd102eSMark Johnston 			} else
52624a1cce3SDavid Greenman 				vm_page_activate(mt);
5273c4a2440SAlan Cox 			vm_page_unlock(mt);
528126d6082SKonstantin Belousov 			if (eio != NULL && i >= mreq && i - mreq < runlen)
529126d6082SKonstantin Belousov 				*eio = TRUE;
53026f9a767SRodney W. Grimes 			break;
53126f9a767SRodney W. Grimes 		case VM_PAGER_AGAIN:
5321e8a675cSKonstantin Belousov 			if (i >= mreq && i - mreq < runlen)
5331e8a675cSKonstantin Belousov 				runlen = i - mreq;
53426f9a767SRodney W. Grimes 			break;
53526f9a767SRodney W. Grimes 		}
53626f9a767SRodney W. Grimes 
53726f9a767SRodney W. Grimes 		/*
5380d94caffSDavid Greenman 		 * If the operation is still going, leave the page busy to
5390d94caffSDavid Greenman 		 * block all other accesses. Also, leave the paging in
5400d94caffSDavid Greenman 		 * progress indicator set so that we don't attempt an object
5410d94caffSDavid Greenman 		 * collapse.
54226f9a767SRodney W. Grimes 		 */
54326f9a767SRodney W. Grimes 		if (pageout_status[i] != VM_PAGER_PEND) {
544f919ebdeSDavid Greenman 			vm_object_pip_wakeup(object);
545c7aebda8SAttilio Rao 			vm_page_sunbusy(mt);
5463c4a2440SAlan Cox 		}
5473c4a2440SAlan Cox 	}
5481e8a675cSKonstantin Belousov 	if (prunlen != NULL)
5491e8a675cSKonstantin Belousov 		*prunlen = runlen;
5503c4a2440SAlan Cox 	return (numpagedout);
55126f9a767SRodney W. Grimes }
55226f9a767SRodney W. Grimes 
553b1fd102eSMark Johnston static void
554b1fd102eSMark Johnston vm_pageout_swapon(void *arg __unused, struct swdevt *sp __unused)
555b1fd102eSMark Johnston {
556b1fd102eSMark Johnston 
557b1fd102eSMark Johnston 	atomic_store_rel_int(&swapdev_enabled, 1);
558b1fd102eSMark Johnston }
559b1fd102eSMark Johnston 
560b1fd102eSMark Johnston static void
561b1fd102eSMark Johnston vm_pageout_swapoff(void *arg __unused, struct swdevt *sp __unused)
562b1fd102eSMark Johnston {
563b1fd102eSMark Johnston 
564b1fd102eSMark Johnston 	if (swap_pager_nswapdev() == 1)
565b1fd102eSMark Johnston 		atomic_store_rel_int(&swapdev_enabled, 0);
566b1fd102eSMark Johnston }
567b1fd102eSMark Johnston 
5681c7c3c6aSMatthew Dillon /*
56934d8b7eaSJeff Roberson  * Attempt to acquire all of the necessary locks to launder a page and
57034d8b7eaSJeff Roberson  * then call through the clustering layer to PUTPAGES.  Wait a short
57134d8b7eaSJeff Roberson  * time for a vnode lock.
57234d8b7eaSJeff Roberson  *
57334d8b7eaSJeff Roberson  * Requires the page and object lock on entry, releases both before return.
57434d8b7eaSJeff Roberson  * Returns 0 on success and an errno otherwise.
57534d8b7eaSJeff Roberson  */
57634d8b7eaSJeff Roberson static int
577ebcddc72SAlan Cox vm_pageout_clean(vm_page_t m, int *numpagedout)
57834d8b7eaSJeff Roberson {
57934d8b7eaSJeff Roberson 	struct vnode *vp;
58034d8b7eaSJeff Roberson 	struct mount *mp;
58134d8b7eaSJeff Roberson 	vm_object_t object;
58234d8b7eaSJeff Roberson 	vm_pindex_t pindex;
58334d8b7eaSJeff Roberson 	int error, lockmode;
58434d8b7eaSJeff Roberson 
58534d8b7eaSJeff Roberson 	vm_page_assert_locked(m);
58634d8b7eaSJeff Roberson 	object = m->object;
58734d8b7eaSJeff Roberson 	VM_OBJECT_ASSERT_WLOCKED(object);
58834d8b7eaSJeff Roberson 	error = 0;
58934d8b7eaSJeff Roberson 	vp = NULL;
59034d8b7eaSJeff Roberson 	mp = NULL;
59134d8b7eaSJeff Roberson 
59234d8b7eaSJeff Roberson 	/*
59334d8b7eaSJeff Roberson 	 * The object is already known NOT to be dead.   It
59434d8b7eaSJeff Roberson 	 * is possible for the vget() to block the whole
59534d8b7eaSJeff Roberson 	 * pageout daemon, but the new low-memory handling
59634d8b7eaSJeff Roberson 	 * code should prevent it.
59734d8b7eaSJeff Roberson 	 *
59834d8b7eaSJeff Roberson 	 * We can't wait forever for the vnode lock, we might
59934d8b7eaSJeff Roberson 	 * deadlock due to a vn_read() getting stuck in
60034d8b7eaSJeff Roberson 	 * vm_wait while holding this vnode.  We skip the
60134d8b7eaSJeff Roberson 	 * vnode if we can't get it in a reasonable amount
60234d8b7eaSJeff Roberson 	 * of time.
60334d8b7eaSJeff Roberson 	 */
60434d8b7eaSJeff Roberson 	if (object->type == OBJT_VNODE) {
60534d8b7eaSJeff Roberson 		vm_page_unlock(m);
60634d8b7eaSJeff Roberson 		vp = object->handle;
60734d8b7eaSJeff Roberson 		if (vp->v_type == VREG &&
60834d8b7eaSJeff Roberson 		    vn_start_write(vp, &mp, V_NOWAIT) != 0) {
60934d8b7eaSJeff Roberson 			mp = NULL;
61034d8b7eaSJeff Roberson 			error = EDEADLK;
61134d8b7eaSJeff Roberson 			goto unlock_all;
61234d8b7eaSJeff Roberson 		}
61334d8b7eaSJeff Roberson 		KASSERT(mp != NULL,
61434d8b7eaSJeff Roberson 		    ("vp %p with NULL v_mount", vp));
61534d8b7eaSJeff Roberson 		vm_object_reference_locked(object);
61634d8b7eaSJeff Roberson 		pindex = m->pindex;
61734d8b7eaSJeff Roberson 		VM_OBJECT_WUNLOCK(object);
61834d8b7eaSJeff Roberson 		lockmode = MNT_SHARED_WRITES(vp->v_mount) ?
61934d8b7eaSJeff Roberson 		    LK_SHARED : LK_EXCLUSIVE;
62034d8b7eaSJeff Roberson 		if (vget(vp, lockmode | LK_TIMELOCK, curthread)) {
62134d8b7eaSJeff Roberson 			vp = NULL;
62234d8b7eaSJeff Roberson 			error = EDEADLK;
62334d8b7eaSJeff Roberson 			goto unlock_mp;
62434d8b7eaSJeff Roberson 		}
62534d8b7eaSJeff Roberson 		VM_OBJECT_WLOCK(object);
62657cd81a3SMark Johnston 
62757cd81a3SMark Johnston 		/*
62857cd81a3SMark Johnston 		 * Ensure that the object and vnode were not disassociated
62957cd81a3SMark Johnston 		 * while locks were dropped.
63057cd81a3SMark Johnston 		 */
63157cd81a3SMark Johnston 		if (vp->v_object != object) {
63257cd81a3SMark Johnston 			error = ENOENT;
63357cd81a3SMark Johnston 			goto unlock_all;
63457cd81a3SMark Johnston 		}
63534d8b7eaSJeff Roberson 		vm_page_lock(m);
63657cd81a3SMark Johnston 
63734d8b7eaSJeff Roberson 		/*
63834d8b7eaSJeff Roberson 		 * While the object and page were unlocked, the page
63934d8b7eaSJeff Roberson 		 * may have been:
64034d8b7eaSJeff Roberson 		 * (1) moved to a different queue,
64134d8b7eaSJeff Roberson 		 * (2) reallocated to a different object,
64234d8b7eaSJeff Roberson 		 * (3) reallocated to a different offset, or
64334d8b7eaSJeff Roberson 		 * (4) cleaned.
64434d8b7eaSJeff Roberson 		 */
645ebcddc72SAlan Cox 		if (!vm_page_in_laundry(m) || m->object != object ||
64634d8b7eaSJeff Roberson 		    m->pindex != pindex || m->dirty == 0) {
64734d8b7eaSJeff Roberson 			vm_page_unlock(m);
64834d8b7eaSJeff Roberson 			error = ENXIO;
64934d8b7eaSJeff Roberson 			goto unlock_all;
65034d8b7eaSJeff Roberson 		}
65134d8b7eaSJeff Roberson 
65234d8b7eaSJeff Roberson 		/*
6531d3a1bcfSMark Johnston 		 * The page may have been busied or referenced while the object
65434d8b7eaSJeff Roberson 		 * and page locks were released.
65534d8b7eaSJeff Roberson 		 */
6561d3a1bcfSMark Johnston 		if (vm_page_busied(m) || vm_page_held(m)) {
65734d8b7eaSJeff Roberson 			vm_page_unlock(m);
65834d8b7eaSJeff Roberson 			error = EBUSY;
65934d8b7eaSJeff Roberson 			goto unlock_all;
66034d8b7eaSJeff Roberson 		}
66134d8b7eaSJeff Roberson 	}
66234d8b7eaSJeff Roberson 
66334d8b7eaSJeff Roberson 	/*
66434d8b7eaSJeff Roberson 	 * If a page is dirty, then it is either being washed
66534d8b7eaSJeff Roberson 	 * (but not yet cleaned) or it is still in the
66634d8b7eaSJeff Roberson 	 * laundry.  If it is still in the laundry, then we
66734d8b7eaSJeff Roberson 	 * start the cleaning operation.
66834d8b7eaSJeff Roberson 	 */
669ebcddc72SAlan Cox 	if ((*numpagedout = vm_pageout_cluster(m)) == 0)
67034d8b7eaSJeff Roberson 		error = EIO;
67134d8b7eaSJeff Roberson 
67234d8b7eaSJeff Roberson unlock_all:
67334d8b7eaSJeff Roberson 	VM_OBJECT_WUNLOCK(object);
67434d8b7eaSJeff Roberson 
67534d8b7eaSJeff Roberson unlock_mp:
67634d8b7eaSJeff Roberson 	vm_page_lock_assert(m, MA_NOTOWNED);
67734d8b7eaSJeff Roberson 	if (mp != NULL) {
67834d8b7eaSJeff Roberson 		if (vp != NULL)
67934d8b7eaSJeff Roberson 			vput(vp);
68034d8b7eaSJeff Roberson 		vm_object_deallocate(object);
68134d8b7eaSJeff Roberson 		vn_finished_write(mp);
68234d8b7eaSJeff Roberson 	}
68334d8b7eaSJeff Roberson 
68434d8b7eaSJeff Roberson 	return (error);
68534d8b7eaSJeff Roberson }
68634d8b7eaSJeff Roberson 
68734d8b7eaSJeff Roberson /*
688ebcddc72SAlan Cox  * Attempt to launder the specified number of pages.
689ebcddc72SAlan Cox  *
690ebcddc72SAlan Cox  * Returns the number of pages successfully laundered.
691ebcddc72SAlan Cox  */
692ebcddc72SAlan Cox static int
693ebcddc72SAlan Cox vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
694ebcddc72SAlan Cox {
695ebcddc72SAlan Cox 	struct vm_pagequeue *pq;
696ebcddc72SAlan Cox 	vm_object_t object;
697ebcddc72SAlan Cox 	vm_page_t m, next;
698ebcddc72SAlan Cox 	int act_delta, error, maxscan, numpagedout, starting_target;
699ebcddc72SAlan Cox 	int vnodes_skipped;
700ebcddc72SAlan Cox 	bool pageout_ok, queue_locked;
701ebcddc72SAlan Cox 
702ebcddc72SAlan Cox 	starting_target = launder;
703ebcddc72SAlan Cox 	vnodes_skipped = 0;
704ebcddc72SAlan Cox 
705ebcddc72SAlan Cox 	/*
706b1fd102eSMark Johnston 	 * Scan the laundry queues for pages eligible to be laundered.  We stop
707ebcddc72SAlan Cox 	 * once the target number of dirty pages have been laundered, or once
708ebcddc72SAlan Cox 	 * we've reached the end of the queue.  A single iteration of this loop
709ebcddc72SAlan Cox 	 * may cause more than one page to be laundered because of clustering.
710ebcddc72SAlan Cox 	 *
711ebcddc72SAlan Cox 	 * maxscan ensures that we don't re-examine requeued pages.  Any
712ebcddc72SAlan Cox 	 * additional pages written as part of a cluster are subtracted from
713ebcddc72SAlan Cox 	 * maxscan since they must be taken from the laundry queue.
714b1fd102eSMark Johnston 	 *
715b1fd102eSMark Johnston 	 * As an optimization, we avoid laundering from PQ_UNSWAPPABLE when no
716b1fd102eSMark Johnston 	 * swap devices are configured.
717ebcddc72SAlan Cox 	 */
718b1fd102eSMark Johnston 	if (atomic_load_acq_int(&swapdev_enabled))
719b1fd102eSMark Johnston 		pq = &vmd->vmd_pagequeues[PQ_UNSWAPPABLE];
720b1fd102eSMark Johnston 	else
721ebcddc72SAlan Cox 		pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
722ebcddc72SAlan Cox 
723b1fd102eSMark Johnston scan:
724ebcddc72SAlan Cox 	vm_pagequeue_lock(pq);
725b1fd102eSMark Johnston 	maxscan = pq->pq_cnt;
726ebcddc72SAlan Cox 	queue_locked = true;
727ebcddc72SAlan Cox 	for (m = TAILQ_FIRST(&pq->pq_pl);
728ebcddc72SAlan Cox 	    m != NULL && maxscan-- > 0 && launder > 0;
729ebcddc72SAlan Cox 	    m = next) {
730ebcddc72SAlan Cox 		vm_pagequeue_assert_locked(pq);
731ebcddc72SAlan Cox 		KASSERT(queue_locked, ("unlocked laundry queue"));
732ebcddc72SAlan Cox 		KASSERT(vm_page_in_laundry(m),
733ebcddc72SAlan Cox 		    ("page %p has an inconsistent queue", m));
734ebcddc72SAlan Cox 		next = TAILQ_NEXT(m, plinks.q);
735ebcddc72SAlan Cox 		if ((m->flags & PG_MARKER) != 0)
736ebcddc72SAlan Cox 			continue;
737ebcddc72SAlan Cox 		KASSERT((m->flags & PG_FICTITIOUS) == 0,
738ebcddc72SAlan Cox 		    ("PG_FICTITIOUS page %p cannot be in laundry queue", m));
739ebcddc72SAlan Cox 		KASSERT((m->oflags & VPO_UNMANAGED) == 0,
740ebcddc72SAlan Cox 		    ("VPO_UNMANAGED page %p cannot be in laundry queue", m));
741ebcddc72SAlan Cox 		if (!vm_pageout_page_lock(m, &next) || m->hold_count != 0) {
742ebcddc72SAlan Cox 			vm_page_unlock(m);
743ebcddc72SAlan Cox 			continue;
744ebcddc72SAlan Cox 		}
7451d3a1bcfSMark Johnston 		if (m->wire_count != 0) {
7461d3a1bcfSMark Johnston 			vm_page_dequeue_locked(m);
7471d3a1bcfSMark Johnston 			vm_page_unlock(m);
7481d3a1bcfSMark Johnston 			continue;
7491d3a1bcfSMark Johnston 		}
750ebcddc72SAlan Cox 		object = m->object;
751ebcddc72SAlan Cox 		if ((!VM_OBJECT_TRYWLOCK(object) &&
752ebcddc72SAlan Cox 		    (!vm_pageout_fallback_object_lock(m, &next) ||
7531d3a1bcfSMark Johnston 		    vm_page_held(m))) || vm_page_busied(m)) {
754ebcddc72SAlan Cox 			VM_OBJECT_WUNLOCK(object);
7551d3a1bcfSMark Johnston 			if (m->wire_count != 0 && vm_page_pagequeue(m) == pq)
7561d3a1bcfSMark Johnston 				vm_page_dequeue_locked(m);
757ebcddc72SAlan Cox 			vm_page_unlock(m);
758ebcddc72SAlan Cox 			continue;
759ebcddc72SAlan Cox 		}
760ebcddc72SAlan Cox 
761ebcddc72SAlan Cox 		/*
762ebcddc72SAlan Cox 		 * Unlock the laundry queue, invalidating the 'next' pointer.
763ebcddc72SAlan Cox 		 * Use a marker to remember our place in the laundry queue.
764ebcddc72SAlan Cox 		 */
765ebcddc72SAlan Cox 		TAILQ_INSERT_AFTER(&pq->pq_pl, m, &vmd->vmd_laundry_marker,
766ebcddc72SAlan Cox 		    plinks.q);
767ebcddc72SAlan Cox 		vm_pagequeue_unlock(pq);
768ebcddc72SAlan Cox 		queue_locked = false;
769ebcddc72SAlan Cox 
770ebcddc72SAlan Cox 		/*
771ebcddc72SAlan Cox 		 * Invalid pages can be easily freed.  They cannot be
772ebcddc72SAlan Cox 		 * mapped; vm_page_free() asserts this.
773ebcddc72SAlan Cox 		 */
774ebcddc72SAlan Cox 		if (m->valid == 0)
775ebcddc72SAlan Cox 			goto free_page;
776ebcddc72SAlan Cox 
777ebcddc72SAlan Cox 		/*
778ebcddc72SAlan Cox 		 * If the page has been referenced and the object is not dead,
779ebcddc72SAlan Cox 		 * reactivate or requeue the page depending on whether the
780ebcddc72SAlan Cox 		 * object is mapped.
781ebcddc72SAlan Cox 		 */
782ebcddc72SAlan Cox 		if ((m->aflags & PGA_REFERENCED) != 0) {
783ebcddc72SAlan Cox 			vm_page_aflag_clear(m, PGA_REFERENCED);
784ebcddc72SAlan Cox 			act_delta = 1;
785ebcddc72SAlan Cox 		} else
786ebcddc72SAlan Cox 			act_delta = 0;
787ebcddc72SAlan Cox 		if (object->ref_count != 0)
788ebcddc72SAlan Cox 			act_delta += pmap_ts_referenced(m);
789ebcddc72SAlan Cox 		else {
790ebcddc72SAlan Cox 			KASSERT(!pmap_page_is_mapped(m),
791ebcddc72SAlan Cox 			    ("page %p is mapped", m));
792ebcddc72SAlan Cox 		}
793ebcddc72SAlan Cox 		if (act_delta != 0) {
794ebcddc72SAlan Cox 			if (object->ref_count != 0) {
79583c9dea1SGleb Smirnoff 				VM_CNT_INC(v_reactivated);
796ebcddc72SAlan Cox 				vm_page_activate(m);
797ebcddc72SAlan Cox 
798ebcddc72SAlan Cox 				/*
799ebcddc72SAlan Cox 				 * Increase the activation count if the page
800ebcddc72SAlan Cox 				 * was referenced while in the laundry queue.
801ebcddc72SAlan Cox 				 * This makes it less likely that the page will
802ebcddc72SAlan Cox 				 * be returned prematurely to the inactive
803ebcddc72SAlan Cox 				 * queue.
804ebcddc72SAlan Cox  				 */
805ebcddc72SAlan Cox 				m->act_count += act_delta + ACT_ADVANCE;
806ebcddc72SAlan Cox 
807ebcddc72SAlan Cox 				/*
808ebcddc72SAlan Cox 				 * If this was a background laundering, count
809ebcddc72SAlan Cox 				 * activated pages towards our target.  The
810ebcddc72SAlan Cox 				 * purpose of background laundering is to ensure
811ebcddc72SAlan Cox 				 * that pages are eventually cycled through the
812ebcddc72SAlan Cox 				 * laundry queue, and an activation is a valid
813ebcddc72SAlan Cox 				 * way out.
814ebcddc72SAlan Cox 				 */
815ebcddc72SAlan Cox 				if (!in_shortfall)
816ebcddc72SAlan Cox 					launder--;
817ebcddc72SAlan Cox 				goto drop_page;
818ebcddc72SAlan Cox 			} else if ((object->flags & OBJ_DEAD) == 0)
819ebcddc72SAlan Cox 				goto requeue_page;
820ebcddc72SAlan Cox 		}
821ebcddc72SAlan Cox 
822ebcddc72SAlan Cox 		/*
823ebcddc72SAlan Cox 		 * If the page appears to be clean at the machine-independent
824ebcddc72SAlan Cox 		 * layer, then remove all of its mappings from the pmap in
825ebcddc72SAlan Cox 		 * anticipation of freeing it.  If, however, any of the page's
826ebcddc72SAlan Cox 		 * mappings allow write access, then the page may still be
827ebcddc72SAlan Cox 		 * modified until the last of those mappings are removed.
828ebcddc72SAlan Cox 		 */
829ebcddc72SAlan Cox 		if (object->ref_count != 0) {
830ebcddc72SAlan Cox 			vm_page_test_dirty(m);
831ebcddc72SAlan Cox 			if (m->dirty == 0)
832ebcddc72SAlan Cox 				pmap_remove_all(m);
833ebcddc72SAlan Cox 		}
834ebcddc72SAlan Cox 
835ebcddc72SAlan Cox 		/*
836ebcddc72SAlan Cox 		 * Clean pages are freed, and dirty pages are paged out unless
837ebcddc72SAlan Cox 		 * they belong to a dead object.  Requeueing dirty pages from
838ebcddc72SAlan Cox 		 * dead objects is pointless, as they are being paged out and
839ebcddc72SAlan Cox 		 * freed by the thread that destroyed the object.
840ebcddc72SAlan Cox 		 */
841ebcddc72SAlan Cox 		if (m->dirty == 0) {
842ebcddc72SAlan Cox free_page:
843ebcddc72SAlan Cox 			vm_page_free(m);
84483c9dea1SGleb Smirnoff 			VM_CNT_INC(v_dfree);
845ebcddc72SAlan Cox 		} else if ((object->flags & OBJ_DEAD) == 0) {
846ebcddc72SAlan Cox 			if (object->type != OBJT_SWAP &&
847ebcddc72SAlan Cox 			    object->type != OBJT_DEFAULT)
848ebcddc72SAlan Cox 				pageout_ok = true;
849ebcddc72SAlan Cox 			else if (disable_swap_pageouts)
850ebcddc72SAlan Cox 				pageout_ok = false;
851ebcddc72SAlan Cox 			else
852ebcddc72SAlan Cox 				pageout_ok = true;
853ebcddc72SAlan Cox 			if (!pageout_ok) {
854ebcddc72SAlan Cox requeue_page:
855ebcddc72SAlan Cox 				vm_pagequeue_lock(pq);
856ebcddc72SAlan Cox 				queue_locked = true;
857ebcddc72SAlan Cox 				vm_page_requeue_locked(m);
858ebcddc72SAlan Cox 				goto drop_page;
859ebcddc72SAlan Cox 			}
860ebcddc72SAlan Cox 
861ebcddc72SAlan Cox 			/*
862ebcddc72SAlan Cox 			 * Form a cluster with adjacent, dirty pages from the
863ebcddc72SAlan Cox 			 * same object, and page out that entire cluster.
864ebcddc72SAlan Cox 			 *
865ebcddc72SAlan Cox 			 * The adjacent, dirty pages must also be in the
866ebcddc72SAlan Cox 			 * laundry.  However, their mappings are not checked
867ebcddc72SAlan Cox 			 * for new references.  Consequently, a recently
868ebcddc72SAlan Cox 			 * referenced page may be paged out.  However, that
869ebcddc72SAlan Cox 			 * page will not be prematurely reclaimed.  After page
870ebcddc72SAlan Cox 			 * out, the page will be placed in the inactive queue,
871ebcddc72SAlan Cox 			 * where any new references will be detected and the
872ebcddc72SAlan Cox 			 * page reactivated.
873ebcddc72SAlan Cox 			 */
874ebcddc72SAlan Cox 			error = vm_pageout_clean(m, &numpagedout);
875ebcddc72SAlan Cox 			if (error == 0) {
876ebcddc72SAlan Cox 				launder -= numpagedout;
877ebcddc72SAlan Cox 				maxscan -= numpagedout - 1;
878ebcddc72SAlan Cox 			} else if (error == EDEADLK) {
879ebcddc72SAlan Cox 				pageout_lock_miss++;
880ebcddc72SAlan Cox 				vnodes_skipped++;
881ebcddc72SAlan Cox 			}
882ebcddc72SAlan Cox 			goto relock_queue;
883ebcddc72SAlan Cox 		}
884ebcddc72SAlan Cox drop_page:
885ebcddc72SAlan Cox 		vm_page_unlock(m);
886ebcddc72SAlan Cox 		VM_OBJECT_WUNLOCK(object);
887ebcddc72SAlan Cox relock_queue:
888ebcddc72SAlan Cox 		if (!queue_locked) {
889ebcddc72SAlan Cox 			vm_pagequeue_lock(pq);
890ebcddc72SAlan Cox 			queue_locked = true;
891ebcddc72SAlan Cox 		}
892ebcddc72SAlan Cox 		next = TAILQ_NEXT(&vmd->vmd_laundry_marker, plinks.q);
893ebcddc72SAlan Cox 		TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_laundry_marker, plinks.q);
894ebcddc72SAlan Cox 	}
895ebcddc72SAlan Cox 	vm_pagequeue_unlock(pq);
896ebcddc72SAlan Cox 
897b1fd102eSMark Johnston 	if (launder > 0 && pq == &vmd->vmd_pagequeues[PQ_UNSWAPPABLE]) {
898b1fd102eSMark Johnston 		pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
899b1fd102eSMark Johnston 		goto scan;
900b1fd102eSMark Johnston 	}
901b1fd102eSMark Johnston 
902ebcddc72SAlan Cox 	/*
903ebcddc72SAlan Cox 	 * Wakeup the sync daemon if we skipped a vnode in a writeable object
904ebcddc72SAlan Cox 	 * and we didn't launder enough pages.
905ebcddc72SAlan Cox 	 */
906ebcddc72SAlan Cox 	if (vnodes_skipped > 0 && launder > 0)
907ebcddc72SAlan Cox 		(void)speedup_syncer();
908ebcddc72SAlan Cox 
909ebcddc72SAlan Cox 	return (starting_target - launder);
910ebcddc72SAlan Cox }
911ebcddc72SAlan Cox 
912ebcddc72SAlan Cox /*
913ebcddc72SAlan Cox  * Compute the integer square root.
914ebcddc72SAlan Cox  */
915ebcddc72SAlan Cox static u_int
916ebcddc72SAlan Cox isqrt(u_int num)
917ebcddc72SAlan Cox {
918ebcddc72SAlan Cox 	u_int bit, root, tmp;
919ebcddc72SAlan Cox 
920ebcddc72SAlan Cox 	bit = 1u << ((NBBY * sizeof(u_int)) - 2);
921ebcddc72SAlan Cox 	while (bit > num)
922ebcddc72SAlan Cox 		bit >>= 2;
923ebcddc72SAlan Cox 	root = 0;
924ebcddc72SAlan Cox 	while (bit != 0) {
925ebcddc72SAlan Cox 		tmp = root + bit;
926ebcddc72SAlan Cox 		root >>= 1;
927ebcddc72SAlan Cox 		if (num >= tmp) {
928ebcddc72SAlan Cox 			num -= tmp;
929ebcddc72SAlan Cox 			root += bit;
930ebcddc72SAlan Cox 		}
931ebcddc72SAlan Cox 		bit >>= 2;
932ebcddc72SAlan Cox 	}
933ebcddc72SAlan Cox 	return (root);
934ebcddc72SAlan Cox }
935ebcddc72SAlan Cox 
936ebcddc72SAlan Cox /*
937ebcddc72SAlan Cox  * Perform the work of the laundry thread: periodically wake up and determine
938ebcddc72SAlan Cox  * whether any pages need to be laundered.  If so, determine the number of pages
939ebcddc72SAlan Cox  * that need to be laundered, and launder them.
940ebcddc72SAlan Cox  */
941ebcddc72SAlan Cox static void
942ebcddc72SAlan Cox vm_pageout_laundry_worker(void *arg)
943ebcddc72SAlan Cox {
944e2068d0bSJeff Roberson 	struct vm_domain *vmd;
945ebcddc72SAlan Cox 	struct vm_pagequeue *pq;
946ebcddc72SAlan Cox 	uint64_t nclean, ndirty;
947cb35676eSMark Johnston 	u_int inactq_scans, last_launder;
948e2068d0bSJeff Roberson 	int domain, last_target, launder, shortfall, shortfall_cycle, target;
949ebcddc72SAlan Cox 	bool in_shortfall;
950ebcddc72SAlan Cox 
951e2068d0bSJeff Roberson 	domain = (uintptr_t)arg;
952e2068d0bSJeff Roberson 	vmd = VM_DOMAIN(domain);
953e2068d0bSJeff Roberson 	pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
954e2068d0bSJeff Roberson 	KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
955e2068d0bSJeff Roberson 	vm_pageout_init_marker(&vmd->vmd_laundry_marker, PQ_LAUNDRY);
956ebcddc72SAlan Cox 
957ebcddc72SAlan Cox 	shortfall = 0;
958ebcddc72SAlan Cox 	in_shortfall = false;
959ebcddc72SAlan Cox 	shortfall_cycle = 0;
960ebcddc72SAlan Cox 	target = 0;
961cb35676eSMark Johnston 	inactq_scans = 0;
962ebcddc72SAlan Cox 	last_launder = 0;
963ebcddc72SAlan Cox 
964ebcddc72SAlan Cox 	/*
965b1fd102eSMark Johnston 	 * Calls to these handlers are serialized by the swap syscall lock.
966b1fd102eSMark Johnston 	 */
967e2068d0bSJeff Roberson 	(void)EVENTHANDLER_REGISTER(swapon, vm_pageout_swapon, vmd,
968b1fd102eSMark Johnston 	    EVENTHANDLER_PRI_ANY);
969e2068d0bSJeff Roberson 	(void)EVENTHANDLER_REGISTER(swapoff, vm_pageout_swapoff, vmd,
970b1fd102eSMark Johnston 	    EVENTHANDLER_PRI_ANY);
971b1fd102eSMark Johnston 
972b1fd102eSMark Johnston 	/*
973ebcddc72SAlan Cox 	 * The pageout laundry worker is never done, so loop forever.
974ebcddc72SAlan Cox 	 */
975ebcddc72SAlan Cox 	for (;;) {
976ebcddc72SAlan Cox 		KASSERT(target >= 0, ("negative target %d", target));
977ebcddc72SAlan Cox 		KASSERT(shortfall_cycle >= 0,
978ebcddc72SAlan Cox 		    ("negative cycle %d", shortfall_cycle));
979ebcddc72SAlan Cox 		launder = 0;
980ebcddc72SAlan Cox 
981ebcddc72SAlan Cox 		/*
982ebcddc72SAlan Cox 		 * First determine whether we need to launder pages to meet a
983ebcddc72SAlan Cox 		 * shortage of free pages.
984ebcddc72SAlan Cox 		 */
985ebcddc72SAlan Cox 		if (shortfall > 0) {
986ebcddc72SAlan Cox 			in_shortfall = true;
987ebcddc72SAlan Cox 			shortfall_cycle = VM_LAUNDER_RATE / VM_INACT_SCAN_RATE;
988ebcddc72SAlan Cox 			target = shortfall;
989ebcddc72SAlan Cox 		} else if (!in_shortfall)
990ebcddc72SAlan Cox 			goto trybackground;
991e2068d0bSJeff Roberson 		else if (shortfall_cycle == 0 || vm_laundry_target(vmd) <= 0) {
992ebcddc72SAlan Cox 			/*
993ebcddc72SAlan Cox 			 * We recently entered shortfall and began laundering
994ebcddc72SAlan Cox 			 * pages.  If we have completed that laundering run
995ebcddc72SAlan Cox 			 * (and we are no longer in shortfall) or we have met
996ebcddc72SAlan Cox 			 * our laundry target through other activity, then we
997ebcddc72SAlan Cox 			 * can stop laundering pages.
998ebcddc72SAlan Cox 			 */
999ebcddc72SAlan Cox 			in_shortfall = false;
1000ebcddc72SAlan Cox 			target = 0;
1001ebcddc72SAlan Cox 			goto trybackground;
1002ebcddc72SAlan Cox 		}
1003cb35676eSMark Johnston 		last_launder = inactq_scans;
1004ebcddc72SAlan Cox 		launder = target / shortfall_cycle--;
1005ebcddc72SAlan Cox 		goto dolaundry;
1006ebcddc72SAlan Cox 
1007ebcddc72SAlan Cox 		/*
1008ebcddc72SAlan Cox 		 * There's no immediate need to launder any pages; see if we
1009ebcddc72SAlan Cox 		 * meet the conditions to perform background laundering:
1010ebcddc72SAlan Cox 		 *
1011ebcddc72SAlan Cox 		 * 1. The ratio of dirty to clean inactive pages exceeds the
1012ebcddc72SAlan Cox 		 *    background laundering threshold and the pagedaemon has
1013ebcddc72SAlan Cox 		 *    been woken up to reclaim pages since our last
1014ebcddc72SAlan Cox 		 *    laundering, or
1015ebcddc72SAlan Cox 		 * 2. we haven't yet reached the target of the current
1016ebcddc72SAlan Cox 		 *    background laundering run.
1017ebcddc72SAlan Cox 		 *
1018ebcddc72SAlan Cox 		 * The background laundering threshold is not a constant.
1019ebcddc72SAlan Cox 		 * Instead, it is a slowly growing function of the number of
1020cb35676eSMark Johnston 		 * page daemon scans since the last laundering.  Thus, as the
1021ebcddc72SAlan Cox 		 * ratio of dirty to clean inactive pages grows, the amount of
1022ebcddc72SAlan Cox 		 * memory pressure required to trigger laundering decreases.
1023ebcddc72SAlan Cox 		 */
1024ebcddc72SAlan Cox trybackground:
1025e2068d0bSJeff Roberson 		nclean = vmd->vmd_free_count +
1026e2068d0bSJeff Roberson 		    vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt;
1027e2068d0bSJeff Roberson 		ndirty = vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt;
1028cb35676eSMark Johnston 		if (target == 0 && inactq_scans != last_launder &&
1029cb35676eSMark Johnston 		    ndirty * isqrt(inactq_scans - last_launder) >= nclean) {
1030e2068d0bSJeff Roberson 			target = vmd->vmd_background_launder_target;
1031ebcddc72SAlan Cox 		}
1032ebcddc72SAlan Cox 
1033ebcddc72SAlan Cox 		/*
1034ebcddc72SAlan Cox 		 * We have a non-zero background laundering target.  If we've
1035ebcddc72SAlan Cox 		 * laundered up to our maximum without observing a page daemon
1036cb35676eSMark Johnston 		 * request, just stop.  This is a safety belt that ensures we
1037ebcddc72SAlan Cox 		 * don't launder an excessive amount if memory pressure is low
1038ebcddc72SAlan Cox 		 * and the ratio of dirty to clean pages is large.  Otherwise,
1039ebcddc72SAlan Cox 		 * proceed at the background laundering rate.
1040ebcddc72SAlan Cox 		 */
1041ebcddc72SAlan Cox 		if (target > 0) {
1042cb35676eSMark Johnston 			if (inactq_scans != last_launder) {
1043cb35676eSMark Johnston 				last_launder = inactq_scans;
1044ebcddc72SAlan Cox 				last_target = target;
1045ebcddc72SAlan Cox 			} else if (last_target - target >=
1046ebcddc72SAlan Cox 			    vm_background_launder_max * PAGE_SIZE / 1024) {
1047ebcddc72SAlan Cox 				target = 0;
1048ebcddc72SAlan Cox 			}
1049ebcddc72SAlan Cox 			launder = vm_background_launder_rate * PAGE_SIZE / 1024;
1050ebcddc72SAlan Cox 			launder /= VM_LAUNDER_RATE;
1051ebcddc72SAlan Cox 			if (launder > target)
1052ebcddc72SAlan Cox 				launder = target;
1053ebcddc72SAlan Cox 		}
1054ebcddc72SAlan Cox 
1055ebcddc72SAlan Cox dolaundry:
1056ebcddc72SAlan Cox 		if (launder > 0) {
1057ebcddc72SAlan Cox 			/*
1058ebcddc72SAlan Cox 			 * Because of I/O clustering, the number of laundered
1059ebcddc72SAlan Cox 			 * pages could exceed "target" by the maximum size of
1060ebcddc72SAlan Cox 			 * a cluster minus one.
1061ebcddc72SAlan Cox 			 */
1062e2068d0bSJeff Roberson 			target -= min(vm_pageout_launder(vmd, launder,
1063ebcddc72SAlan Cox 			    in_shortfall), target);
1064ebcddc72SAlan Cox 			pause("laundp", hz / VM_LAUNDER_RATE);
1065ebcddc72SAlan Cox 		}
1066ebcddc72SAlan Cox 
1067ebcddc72SAlan Cox 		/*
1068ebcddc72SAlan Cox 		 * If we're not currently laundering pages and the page daemon
1069ebcddc72SAlan Cox 		 * hasn't posted a new request, sleep until the page daemon
1070ebcddc72SAlan Cox 		 * kicks us.
1071ebcddc72SAlan Cox 		 */
1072ebcddc72SAlan Cox 		vm_pagequeue_lock(pq);
1073e2068d0bSJeff Roberson 		if (target == 0 && vmd->vmd_laundry_request == VM_LAUNDRY_IDLE)
1074e2068d0bSJeff Roberson 			(void)mtx_sleep(&vmd->vmd_laundry_request,
1075ebcddc72SAlan Cox 			    vm_pagequeue_lockptr(pq), PVM, "launds", 0);
1076ebcddc72SAlan Cox 
1077ebcddc72SAlan Cox 		/*
1078ebcddc72SAlan Cox 		 * If the pagedaemon has indicated that it's in shortfall, start
1079ebcddc72SAlan Cox 		 * a shortfall laundering unless we're already in the middle of
1080ebcddc72SAlan Cox 		 * one.  This may preempt a background laundering.
1081ebcddc72SAlan Cox 		 */
1082e2068d0bSJeff Roberson 		if (vmd->vmd_laundry_request == VM_LAUNDRY_SHORTFALL &&
1083ebcddc72SAlan Cox 		    (!in_shortfall || shortfall_cycle == 0)) {
1084e2068d0bSJeff Roberson 			shortfall = vm_laundry_target(vmd) +
1085e2068d0bSJeff Roberson 			    vmd->vmd_pageout_deficit;
1086ebcddc72SAlan Cox 			target = 0;
1087ebcddc72SAlan Cox 		} else
1088ebcddc72SAlan Cox 			shortfall = 0;
1089ebcddc72SAlan Cox 
1090ebcddc72SAlan Cox 		if (target == 0)
1091e2068d0bSJeff Roberson 			vmd->vmd_laundry_request = VM_LAUNDRY_IDLE;
1092e2068d0bSJeff Roberson 		inactq_scans = vmd->vmd_inactq_scans;
1093ebcddc72SAlan Cox 		vm_pagequeue_unlock(pq);
1094ebcddc72SAlan Cox 	}
1095ebcddc72SAlan Cox }
1096ebcddc72SAlan Cox 
1097ebcddc72SAlan Cox /*
1098df8bae1dSRodney W. Grimes  *	vm_pageout_scan does the dirty work for the pageout daemon.
1099d9e23210SJeff Roberson  *
1100ebcddc72SAlan Cox  *	pass == 0: Update active LRU/deactivate pages
1101ebcddc72SAlan Cox  *	pass >= 1: Free inactive pages
1102e57dd910SAlan Cox  *
1103e57dd910SAlan Cox  * Returns true if pass was zero or enough pages were freed by the inactive
1104e57dd910SAlan Cox  * queue scan to meet the target.
1105df8bae1dSRodney W. Grimes  */
1106e57dd910SAlan Cox static bool
11075f8cd1c0SJeff Roberson vm_pageout_scan(struct vm_domain *vmd, int pass, int shortage)
1108df8bae1dSRodney W. Grimes {
1109502ba6e4SJohn Dyson 	vm_page_t m, next;
11108d220203SAlan Cox 	struct vm_pagequeue *pq;
1111df8bae1dSRodney W. Grimes 	vm_object_t object;
111222cf98d1SAlan Cox 	long min_scan;
1113ebcddc72SAlan Cox 	int act_delta, addl_page_shortage, deficit, inactq_shortage, maxscan;
1114ebcddc72SAlan Cox 	int page_shortage, scan_tick, scanned, starting_page_shortage;
1115ebcddc72SAlan Cox 	boolean_t queue_locked;
11160d94caffSDavid Greenman 
1117df8bae1dSRodney W. Grimes 	/*
1118d9e23210SJeff Roberson 	 * If we need to reclaim memory ask kernel caches to return
1119c9612b2dSJeff Roberson 	 * some.  We rate limit to avoid thrashing.
1120d9e23210SJeff Roberson 	 */
1121e2068d0bSJeff Roberson 	if (vmd == VM_DOMAIN(0) && pass > 0 &&
1122a6bf3a9eSRyan Stone 	    (time_uptime - lowmem_uptime) >= lowmem_period) {
1123d9e23210SJeff Roberson 		/*
1124855a310fSJeff Roberson 		 * Decrease registered cache sizes.
1125855a310fSJeff Roberson 		 */
112614a0d74eSSteven Hartland 		SDT_PROBE0(vm, , , vm__lowmem_scan);
11279b43bc27SAndriy Gapon 		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES);
1128855a310fSJeff Roberson 		/*
1129d9e23210SJeff Roberson 		 * We do this explicitly after the caches have been
1130d9e23210SJeff Roberson 		 * drained above.
1131855a310fSJeff Roberson 		 */
1132855a310fSJeff Roberson 		uma_reclaim();
1133a6bf3a9eSRyan Stone 		lowmem_uptime = time_uptime;
1134d9e23210SJeff Roberson 	}
11355985940eSJohn Dyson 
1136311e34e2SKonstantin Belousov 	/*
113796240c89SEitan Adler 	 * The addl_page_shortage is the number of temporarily
1138311e34e2SKonstantin Belousov 	 * stuck pages in the inactive queue.  In other words, the
1139449c2e92SKonstantin Belousov 	 * number of pages from the inactive count that should be
1140311e34e2SKonstantin Belousov 	 * discounted in setting the target for the active queue scan.
1141311e34e2SKonstantin Belousov 	 */
11429099545aSAlan Cox 	addl_page_shortage = 0;
11439099545aSAlan Cox 
11441c7c3c6aSMatthew Dillon 	/*
1145e57dd910SAlan Cox 	 * Calculate the number of pages that we want to free.  This number
1146e57dd910SAlan Cox 	 * can be negative if many pages are freed between the wakeup call to
1147e57dd910SAlan Cox 	 * the page daemon and this calculation.
11481c7c3c6aSMatthew Dillon 	 */
114960196cdaSAlan Cox 	if (pass > 0) {
1150e2068d0bSJeff Roberson 		deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit);
11515f8cd1c0SJeff Roberson 		page_shortage = shortage + deficit;
115260196cdaSAlan Cox 	} else
115360196cdaSAlan Cox 		page_shortage = deficit = 0;
115476386c7eSKonstantin Belousov 	starting_page_shortage = page_shortage;
11551c7c3c6aSMatthew Dillon 
1156936524aaSMatthew Dillon 	/*
1157f095d1bbSAlan Cox 	 * Start scanning the inactive queue for pages that we can free.  The
1158f095d1bbSAlan Cox 	 * scan will stop when we reach the target or we have scanned the
1159f095d1bbSAlan Cox 	 * entire queue.  (Note that m->act_count is not used to make
1160f095d1bbSAlan Cox 	 * decisions for the inactive queue, only for the active queue.)
11618d220203SAlan Cox 	 */
1162449c2e92SKonstantin Belousov 	pq = &vmd->vmd_pagequeues[PQ_INACTIVE];
1163449c2e92SKonstantin Belousov 	maxscan = pq->pq_cnt;
11648d220203SAlan Cox 	vm_pagequeue_lock(pq);
11653ac8f842SMark Johnston 	queue_locked = TRUE;
11668d220203SAlan Cox 	for (m = TAILQ_FIRST(&pq->pq_pl);
11671c7c3c6aSMatthew Dillon 	     m != NULL && maxscan-- > 0 && page_shortage > 0;
1168e929c00dSKirk McKusick 	     m = next) {
11698d220203SAlan Cox 		vm_pagequeue_assert_locked(pq);
11703ac8f842SMark Johnston 		KASSERT(queue_locked, ("unlocked inactive queue"));
1171ebcddc72SAlan Cox 		KASSERT(vm_page_inactive(m), ("Inactive queue %p", m));
1172df8bae1dSRodney W. Grimes 
117383c9dea1SGleb Smirnoff 		VM_CNT_INC(v_pdpages);
1174c325e866SKonstantin Belousov 		next = TAILQ_NEXT(m, plinks.q);
1175df8bae1dSRodney W. Grimes 
1176936524aaSMatthew Dillon 		/*
1177936524aaSMatthew Dillon 		 * skip marker pages
1178936524aaSMatthew Dillon 		 */
1179936524aaSMatthew Dillon 		if (m->flags & PG_MARKER)
1180936524aaSMatthew Dillon 			continue;
1181936524aaSMatthew Dillon 
11827900f95dSKonstantin Belousov 		KASSERT((m->flags & PG_FICTITIOUS) == 0,
11837900f95dSKonstantin Belousov 		    ("Fictitious page %p cannot be in inactive queue", m));
11847900f95dSKonstantin Belousov 		KASSERT((m->oflags & VPO_UNMANAGED) == 0,
11857900f95dSKonstantin Belousov 		    ("Unmanaged page %p cannot be in inactive queue", m));
11867900f95dSKonstantin Belousov 
11878c616246SKonstantin Belousov 		/*
1188311e34e2SKonstantin Belousov 		 * The page or object lock acquisitions fail if the
1189311e34e2SKonstantin Belousov 		 * page was removed from the queue or moved to a
1190311e34e2SKonstantin Belousov 		 * different position within the queue.  In either
1191311e34e2SKonstantin Belousov 		 * case, addl_page_shortage should not be incremented.
11928c616246SKonstantin Belousov 		 */
1193a3aeedabSAlan Cox 		if (!vm_pageout_page_lock(m, &next))
1194a3aeedabSAlan Cox 			goto unlock_page;
11951d3a1bcfSMark Johnston 		else if (m->wire_count != 0) {
11961d3a1bcfSMark Johnston 			/*
11971d3a1bcfSMark Johnston 			 * Wired pages may not be freed, and unwiring a queued
11981d3a1bcfSMark Johnston 			 * page will cause it to be requeued.  Thus, remove them
11991d3a1bcfSMark Johnston 			 * from the queue now to avoid unnecessary revisits.
12001d3a1bcfSMark Johnston 			 */
12011d3a1bcfSMark Johnston 			vm_page_dequeue_locked(m);
12021d3a1bcfSMark Johnston 			addl_page_shortage++;
12031d3a1bcfSMark Johnston 			goto unlock_page;
12041d3a1bcfSMark Johnston 		} else if (m->hold_count != 0) {
1205a3aeedabSAlan Cox 			/*
1206a3aeedabSAlan Cox 			 * Held pages are essentially stuck in the
1207a3aeedabSAlan Cox 			 * queue.  So, they ought to be discounted
1208a3aeedabSAlan Cox 			 * from the inactive count.  See the
1209e57dd910SAlan Cox 			 * calculation of inactq_shortage before the
1210a3aeedabSAlan Cox 			 * loop over the active queue below.
1211a3aeedabSAlan Cox 			 */
1212a3aeedabSAlan Cox 			addl_page_shortage++;
1213a3aeedabSAlan Cox 			goto unlock_page;
1214df8bae1dSRodney W. Grimes 		}
12159ee2165fSAlan Cox 		object = m->object;
1216a3aeedabSAlan Cox 		if (!VM_OBJECT_TRYWLOCK(object)) {
1217a3aeedabSAlan Cox 			if (!vm_pageout_fallback_object_lock(m, &next))
1218a3aeedabSAlan Cox 				goto unlock_object;
12191d3a1bcfSMark Johnston 			else if (m->wire_count != 0) {
12201d3a1bcfSMark Johnston 				vm_page_dequeue_locked(m);
12211d3a1bcfSMark Johnston 				addl_page_shortage++;
12221d3a1bcfSMark Johnston 				goto unlock_object;
12231d3a1bcfSMark Johnston 			} else if (m->hold_count != 0) {
1224b182ec9eSJohn Dyson 				addl_page_shortage++;
1225a3aeedabSAlan Cox 				goto unlock_object;
1226a3aeedabSAlan Cox 			}
1227a3aeedabSAlan Cox 		}
1228a3aeedabSAlan Cox 		if (vm_page_busied(m)) {
1229a3aeedabSAlan Cox 			/*
1230a3aeedabSAlan Cox 			 * Don't mess with busy pages.  Leave them at
1231a3aeedabSAlan Cox 			 * the front of the queue.  Most likely, they
1232a3aeedabSAlan Cox 			 * are being paged out and will leave the
1233a3aeedabSAlan Cox 			 * queue shortly after the scan finishes.  So,
1234a3aeedabSAlan Cox 			 * they ought to be discounted from the
1235a3aeedabSAlan Cox 			 * inactive count.
1236a3aeedabSAlan Cox 			 */
1237a3aeedabSAlan Cox 			addl_page_shortage++;
1238a3aeedabSAlan Cox unlock_object:
1239a3aeedabSAlan Cox 			VM_OBJECT_WUNLOCK(object);
1240a3aeedabSAlan Cox unlock_page:
1241a3aeedabSAlan Cox 			vm_page_unlock(m);
124226f9a767SRodney W. Grimes 			continue;
124326f9a767SRodney W. Grimes 		}
12441d3a1bcfSMark Johnston 		KASSERT(!vm_page_held(m), ("Held page %p", m));
1245bd7e5f99SJohn Dyson 
12467e006499SJohn Dyson 		/*
1247ebcddc72SAlan Cox 		 * Dequeue the inactive page and unlock the inactive page
1248ebcddc72SAlan Cox 		 * queue, invalidating the 'next' pointer.  Dequeueing the
1249ebcddc72SAlan Cox 		 * page here avoids a later reacquisition (and release) of
1250ebcddc72SAlan Cox 		 * the inactive page queue lock when vm_page_activate(),
1251ebcddc72SAlan Cox 		 * vm_page_free(), or vm_page_launder() is called.  Use a
1252ebcddc72SAlan Cox 		 * marker to remember our place in the inactive queue.
125348cc2fc7SKonstantin Belousov 		 */
1254c325e866SKonstantin Belousov 		TAILQ_INSERT_AFTER(&pq->pq_pl, m, &vmd->vmd_marker, plinks.q);
1255ebcddc72SAlan Cox 		vm_page_dequeue_locked(m);
12568d220203SAlan Cox 		vm_pagequeue_unlock(pq);
12573ac8f842SMark Johnston 		queue_locked = FALSE;
125848cc2fc7SKonstantin Belousov 
125948cc2fc7SKonstantin Belousov 		/*
12608748f58cSKonstantin Belousov 		 * Invalid pages can be easily freed. They cannot be
12618748f58cSKonstantin Belousov 		 * mapped, vm_page_free() asserts this.
1262776f729cSKonstantin Belousov 		 */
12638748f58cSKonstantin Belousov 		if (m->valid == 0)
12648748f58cSKonstantin Belousov 			goto free_page;
1265776f729cSKonstantin Belousov 
1266776f729cSKonstantin Belousov 		/*
1267960810ccSAlan Cox 		 * If the page has been referenced and the object is not dead,
1268960810ccSAlan Cox 		 * reactivate or requeue the page depending on whether the
1269960810ccSAlan Cox 		 * object is mapped.
12707e006499SJohn Dyson 		 */
1271bb7858eaSJeff Roberson 		if ((m->aflags & PGA_REFERENCED) != 0) {
1272bb7858eaSJeff Roberson 			vm_page_aflag_clear(m, PGA_REFERENCED);
1273bb7858eaSJeff Roberson 			act_delta = 1;
127486fa2471SAlan Cox 		} else
127586fa2471SAlan Cox 			act_delta = 0;
1276bb7858eaSJeff Roberson 		if (object->ref_count != 0) {
1277bb7858eaSJeff Roberson 			act_delta += pmap_ts_referenced(m);
1278bb7858eaSJeff Roberson 		} else {
1279bb7858eaSJeff Roberson 			KASSERT(!pmap_page_is_mapped(m),
1280bb7858eaSJeff Roberson 			    ("vm_pageout_scan: page %p is mapped", m));
12812fe6e4d7SDavid Greenman 		}
1282bb7858eaSJeff Roberson 		if (act_delta != 0) {
128386fa2471SAlan Cox 			if (object->ref_count != 0) {
128483c9dea1SGleb Smirnoff 				VM_CNT_INC(v_reactivated);
128526f9a767SRodney W. Grimes 				vm_page_activate(m);
1286960810ccSAlan Cox 
1287960810ccSAlan Cox 				/*
1288960810ccSAlan Cox 				 * Increase the activation count if the page
1289960810ccSAlan Cox 				 * was referenced while in the inactive queue.
1290960810ccSAlan Cox 				 * This makes it less likely that the page will
1291960810ccSAlan Cox 				 * be returned prematurely to the inactive
1292960810ccSAlan Cox 				 * queue.
1293960810ccSAlan Cox  				 */
1294bb7858eaSJeff Roberson 				m->act_count += act_delta + ACT_ADVANCE;
1295960810ccSAlan Cox 				goto drop_page;
1296ebcddc72SAlan Cox 			} else if ((object->flags & OBJ_DEAD) == 0) {
1297ebcddc72SAlan Cox 				vm_pagequeue_lock(pq);
1298ebcddc72SAlan Cox 				queue_locked = TRUE;
1299ebcddc72SAlan Cox 				m->queue = PQ_INACTIVE;
1300ebcddc72SAlan Cox 				TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
1301ebcddc72SAlan Cox 				vm_pagequeue_cnt_inc(pq);
1302ebcddc72SAlan Cox 				goto drop_page;
1303ebcddc72SAlan Cox 			}
1304960810ccSAlan Cox 		}
130567bf6868SJohn Dyson 
13067e006499SJohn Dyson 		/*
13079fc4739dSAlan Cox 		 * If the page appears to be clean at the machine-independent
13089fc4739dSAlan Cox 		 * layer, then remove all of its mappings from the pmap in
1309a766ffd0SAlan Cox 		 * anticipation of freeing it.  If, however, any of the page's
1310a766ffd0SAlan Cox 		 * mappings allow write access, then the page may still be
1311a766ffd0SAlan Cox 		 * modified until the last of those mappings are removed.
13127e006499SJohn Dyson 		 */
1313aa044135SAlan Cox 		if (object->ref_count != 0) {
13149fc4739dSAlan Cox 			vm_page_test_dirty(m);
1315aa044135SAlan Cox 			if (m->dirty == 0)
1316b78ddb0bSAlan Cox 				pmap_remove_all(m);
1317aa044135SAlan Cox 		}
1318dcbcd518SBruce Evans 
13196989c456SAlan Cox 		/*
1320ebcddc72SAlan Cox 		 * Clean pages can be freed, but dirty pages must be sent back
1321ebcddc72SAlan Cox 		 * to the laundry, unless they belong to a dead object.
1322ebcddc72SAlan Cox 		 * Requeueing dirty pages from dead objects is pointless, as
1323ebcddc72SAlan Cox 		 * they are being paged out and freed by the thread that
1324ebcddc72SAlan Cox 		 * destroyed the object.
13256989c456SAlan Cox 		 */
1326ebcddc72SAlan Cox 		if (m->dirty == 0) {
13278748f58cSKonstantin Belousov free_page:
132878afdce6SAlan Cox 			vm_page_free(m);
132983c9dea1SGleb Smirnoff 			VM_CNT_INC(v_dfree);
13301c7c3c6aSMatthew Dillon 			--page_shortage;
1331ebcddc72SAlan Cox 		} else if ((object->flags & OBJ_DEAD) == 0)
1332ebcddc72SAlan Cox 			vm_page_launder(m);
1333776f729cSKonstantin Belousov drop_page:
133448cc2fc7SKonstantin Belousov 		vm_page_unlock(m);
133589f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(object);
13363ac8f842SMark Johnston 		if (!queue_locked) {
13378d220203SAlan Cox 			vm_pagequeue_lock(pq);
13383ac8f842SMark Johnston 			queue_locked = TRUE;
13396989c456SAlan Cox 		}
1340c325e866SKonstantin Belousov 		next = TAILQ_NEXT(&vmd->vmd_marker, plinks.q);
1341c325e866SKonstantin Belousov 		TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_marker, plinks.q);
13420d94caffSDavid Greenman 	}
13438d220203SAlan Cox 	vm_pagequeue_unlock(pq);
134426f9a767SRodney W. Grimes 
1345ebcddc72SAlan Cox 	/*
1346ebcddc72SAlan Cox 	 * Wake up the laundry thread so that it can perform any needed
1347ebcddc72SAlan Cox 	 * laundering.  If we didn't meet our target, we're in shortfall and
1348b1fd102eSMark Johnston 	 * need to launder more aggressively.  If PQ_LAUNDRY is empty and no
1349b1fd102eSMark Johnston 	 * swap devices are configured, the laundry thread has no work to do, so
1350b1fd102eSMark Johnston 	 * don't bother waking it up.
1351cb35676eSMark Johnston 	 *
1352cb35676eSMark Johnston 	 * The laundry thread uses the number of inactive queue scans elapsed
1353cb35676eSMark Johnston 	 * since the last laundering to determine whether to launder again, so
1354cb35676eSMark Johnston 	 * keep count.
1355ebcddc72SAlan Cox 	 */
1356cb35676eSMark Johnston 	if (starting_page_shortage > 0) {
1357e2068d0bSJeff Roberson 		pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
1358ebcddc72SAlan Cox 		vm_pagequeue_lock(pq);
1359e2068d0bSJeff Roberson 		if (vmd->vmd_laundry_request == VM_LAUNDRY_IDLE &&
1360cb35676eSMark Johnston 		    (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled))) {
1361ebcddc72SAlan Cox 			if (page_shortage > 0) {
1362e2068d0bSJeff Roberson 				vmd->vmd_laundry_request = VM_LAUNDRY_SHORTFALL;
136383c9dea1SGleb Smirnoff 				VM_CNT_INC(v_pdshortfalls);
1364e2068d0bSJeff Roberson 			} else if (vmd->vmd_laundry_request !=
1365e2068d0bSJeff Roberson 			    VM_LAUNDRY_SHORTFALL)
1366e2068d0bSJeff Roberson 				vmd->vmd_laundry_request =
1367e2068d0bSJeff Roberson 				    VM_LAUNDRY_BACKGROUND;
1368e2068d0bSJeff Roberson 			wakeup(&vmd->vmd_laundry_request);
1369b1fd102eSMark Johnston 		}
1370e2068d0bSJeff Roberson 		vmd->vmd_inactq_scans++;
1371ebcddc72SAlan Cox 		vm_pagequeue_unlock(pq);
1372ebcddc72SAlan Cox 	}
1373ebcddc72SAlan Cox 
13749452b5edSAlan Cox 	/*
1375f095d1bbSAlan Cox 	 * Wakeup the swapout daemon if we didn't free the targeted number of
1376f095d1bbSAlan Cox 	 * pages.
13779452b5edSAlan Cox 	 */
1378ac04195bSKonstantin Belousov 	if (page_shortage > 0)
1379ac04195bSKonstantin Belousov 		vm_swapout_run();
13809452b5edSAlan Cox 
13819452b5edSAlan Cox 	/*
138276386c7eSKonstantin Belousov 	 * If the inactive queue scan fails repeatedly to meet its
138376386c7eSKonstantin Belousov 	 * target, kill the largest process.
138476386c7eSKonstantin Belousov 	 */
138576386c7eSKonstantin Belousov 	vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage);
138676386c7eSKonstantin Belousov 
138776386c7eSKonstantin Belousov 	/*
1388936524aaSMatthew Dillon 	 * Compute the number of pages we want to try to move from the
1389ebcddc72SAlan Cox 	 * active queue to either the inactive or laundry queue.
1390ebcddc72SAlan Cox 	 *
1391ebcddc72SAlan Cox 	 * When scanning active pages, we make clean pages count more heavily
1392ebcddc72SAlan Cox 	 * towards the page shortage than dirty pages.  This is because dirty
1393ebcddc72SAlan Cox 	 * pages must be laundered before they can be reused and thus have less
1394ebcddc72SAlan Cox 	 * utility when attempting to quickly alleviate a shortage.  However,
1395ebcddc72SAlan Cox 	 * this weighting also causes the scan to deactivate dirty pages more
1396ebcddc72SAlan Cox 	 * more aggressively, improving the effectiveness of clustering and
1397ebcddc72SAlan Cox 	 * ensuring that they can eventually be reused.
13981c7c3c6aSMatthew Dillon 	 */
1399e2068d0bSJeff Roberson 	inactq_shortage = vmd->vmd_inactive_target - (pq->pq_cnt +
1400e2068d0bSJeff Roberson 	    vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt / act_scan_laundry_weight) +
140159d3150bSMark Johnston 	    vm_paging_target(vmd) + deficit + addl_page_shortage;
140282e2d06aSMark Johnston 	inactq_shortage *= act_scan_laundry_weight;
14039099545aSAlan Cox 
1404114f62c6SJeff Roberson 	pq = &vmd->vmd_pagequeues[PQ_ACTIVE];
1405114f62c6SJeff Roberson 	vm_pagequeue_lock(pq);
14069099545aSAlan Cox 	maxscan = pq->pq_cnt;
14079099545aSAlan Cox 
1408d9e23210SJeff Roberson 	/*
1409d9e23210SJeff Roberson 	 * If we're just idle polling attempt to visit every
1410d9e23210SJeff Roberson 	 * active page within 'update_period' seconds.
1411d9e23210SJeff Roberson 	 */
141222cf98d1SAlan Cox 	scan_tick = ticks;
141322cf98d1SAlan Cox 	if (vm_pageout_update_period != 0) {
141422cf98d1SAlan Cox 		min_scan = pq->pq_cnt;
141522cf98d1SAlan Cox 		min_scan *= scan_tick - vmd->vmd_last_active_scan;
141622cf98d1SAlan Cox 		min_scan /= hz * vm_pageout_update_period;
141722cf98d1SAlan Cox 	} else
141822cf98d1SAlan Cox 		min_scan = 0;
1419e57dd910SAlan Cox 	if (min_scan > 0 || (inactq_shortage > 0 && maxscan > 0))
142022cf98d1SAlan Cox 		vmd->vmd_last_active_scan = scan_tick;
14211c7c3c6aSMatthew Dillon 
14221c7c3c6aSMatthew Dillon 	/*
142322cf98d1SAlan Cox 	 * Scan the active queue for pages that can be deactivated.  Update
142422cf98d1SAlan Cox 	 * the per-page activity counter and use it to identify deactivation
142579144408SAlan Cox 	 * candidates.  Held pages may be deactivated.
14261c7c3c6aSMatthew Dillon 	 */
142722cf98d1SAlan Cox 	for (m = TAILQ_FIRST(&pq->pq_pl), scanned = 0; m != NULL && (scanned <
1428e57dd910SAlan Cox 	    min_scan || (inactq_shortage > 0 && scanned < maxscan)); m = next,
142922cf98d1SAlan Cox 	    scanned++) {
14309cf51988SAlan Cox 		KASSERT(m->queue == PQ_ACTIVE,
1431d3c09dd7SAlan Cox 		    ("vm_pageout_scan: page %p isn't active", m));
1432c325e866SKonstantin Belousov 		next = TAILQ_NEXT(m, plinks.q);
143322cf98d1SAlan Cox 		if ((m->flags & PG_MARKER) != 0)
14348dbca793STor Egge 			continue;
14357900f95dSKonstantin Belousov 		KASSERT((m->flags & PG_FICTITIOUS) == 0,
14367900f95dSKonstantin Belousov 		    ("Fictitious page %p cannot be in active queue", m));
14377900f95dSKonstantin Belousov 		KASSERT((m->oflags & VPO_UNMANAGED) == 0,
14387900f95dSKonstantin Belousov 		    ("Unmanaged page %p cannot be in active queue", m));
14399ee2165fSAlan Cox 		if (!vm_pageout_page_lock(m, &next)) {
14408c616246SKonstantin Belousov 			vm_page_unlock(m);
14412965a453SKip Macy 			continue;
14422965a453SKip Macy 		}
1443b18bfc3dSJohn Dyson 
1444b18bfc3dSJohn Dyson 		/*
144579144408SAlan Cox 		 * The count for page daemon pages is updated after checking
144679144408SAlan Cox 		 * the page for eligibility.
1447b18bfc3dSJohn Dyson 		 */
144883c9dea1SGleb Smirnoff 		VM_CNT_INC(v_pdpages);
1449ef743ce6SJohn Dyson 
14507e006499SJohn Dyson 		/*
14511d3a1bcfSMark Johnston 		 * Wired pages are dequeued lazily.
14521d3a1bcfSMark Johnston 		 */
14531d3a1bcfSMark Johnston 		if (m->wire_count != 0) {
14541d3a1bcfSMark Johnston 			vm_page_dequeue_locked(m);
14551d3a1bcfSMark Johnston 			vm_page_unlock(m);
14561d3a1bcfSMark Johnston 			continue;
14571d3a1bcfSMark Johnston 		}
14581d3a1bcfSMark Johnston 
14591d3a1bcfSMark Johnston 		/*
14607e006499SJohn Dyson 		 * Check to see "how much" the page has been used.
14617e006499SJohn Dyson 		 */
146286fa2471SAlan Cox 		if ((m->aflags & PGA_REFERENCED) != 0) {
1463bb7858eaSJeff Roberson 			vm_page_aflag_clear(m, PGA_REFERENCED);
146486fa2471SAlan Cox 			act_delta = 1;
146586fa2471SAlan Cox 		} else
146686fa2471SAlan Cox 			act_delta = 0;
146786fa2471SAlan Cox 
1468274132acSJeff Roberson 		/*
146979144408SAlan Cox 		 * Perform an unsynchronized object ref count check.  While
147079144408SAlan Cox 		 * the page lock ensures that the page is not reallocated to
147179144408SAlan Cox 		 * another object, in particular, one with unmanaged mappings
147279144408SAlan Cox 		 * that cannot support pmap_ts_referenced(), two races are,
147379144408SAlan Cox 		 * nonetheless, possible:
147479144408SAlan Cox 		 * 1) The count was transitioning to zero, but we saw a non-
147579144408SAlan Cox 		 *    zero value.  pmap_ts_referenced() will return zero
147679144408SAlan Cox 		 *    because the page is not mapped.
147779144408SAlan Cox 		 * 2) The count was transitioning to one, but we saw zero.
147879144408SAlan Cox 		 *    This race delays the detection of a new reference.  At
147979144408SAlan Cox 		 *    worst, we will deactivate and reactivate the page.
1480274132acSJeff Roberson 		 */
1481274132acSJeff Roberson 		if (m->object->ref_count != 0)
1482bb7858eaSJeff Roberson 			act_delta += pmap_ts_referenced(m);
1483bb7858eaSJeff Roberson 
1484bb7858eaSJeff Roberson 		/*
1485bb7858eaSJeff Roberson 		 * Advance or decay the act_count based on recent usage.
1486bb7858eaSJeff Roberson 		 */
148786fa2471SAlan Cox 		if (act_delta != 0) {
1488bb7858eaSJeff Roberson 			m->act_count += ACT_ADVANCE + act_delta;
148938efa82bSJohn Dyson 			if (m->act_count > ACT_MAX)
149038efa82bSJohn Dyson 				m->act_count = ACT_MAX;
149186fa2471SAlan Cox 		} else
149238efa82bSJohn Dyson 			m->act_count -= min(m->act_count, ACT_DECLINE);
1493bb7858eaSJeff Roberson 
1494bb7858eaSJeff Roberson 		/*
1495ebcddc72SAlan Cox 		 * Move this page to the tail of the active, inactive or laundry
1496bb7858eaSJeff Roberson 		 * queue depending on usage.
1497bb7858eaSJeff Roberson 		 */
149886fa2471SAlan Cox 		if (m->act_count == 0) {
14998d220203SAlan Cox 			/* Dequeue to avoid later lock recursion. */
15008d220203SAlan Cox 			vm_page_dequeue_locked(m);
1501ebcddc72SAlan Cox 
1502ebcddc72SAlan Cox 			/*
1503ebcddc72SAlan Cox 			 * When not short for inactive pages, let dirty pages go
1504ebcddc72SAlan Cox 			 * through the inactive queue before moving to the
1505ebcddc72SAlan Cox 			 * laundry queues.  This gives them some extra time to
1506ebcddc72SAlan Cox 			 * be reactivated, potentially avoiding an expensive
1507ebcddc72SAlan Cox 			 * pageout.  During a page shortage, the inactive queue
1508ebcddc72SAlan Cox 			 * is necessarily small, so we may move dirty pages
1509ebcddc72SAlan Cox 			 * directly to the laundry queue.
1510ebcddc72SAlan Cox 			 */
1511ebcddc72SAlan Cox 			if (inactq_shortage <= 0)
1512d4a272dbSJohn Dyson 				vm_page_deactivate(m);
1513ebcddc72SAlan Cox 			else {
1514ebcddc72SAlan Cox 				/*
1515ebcddc72SAlan Cox 				 * Calling vm_page_test_dirty() here would
1516ebcddc72SAlan Cox 				 * require acquisition of the object's write
1517ebcddc72SAlan Cox 				 * lock.  However, during a page shortage,
1518ebcddc72SAlan Cox 				 * directing dirty pages into the laundry
1519ebcddc72SAlan Cox 				 * queue is only an optimization and not a
1520ebcddc72SAlan Cox 				 * requirement.  Therefore, we simply rely on
1521ebcddc72SAlan Cox 				 * the opportunistic updates to the page's
1522ebcddc72SAlan Cox 				 * dirty field by the pmap.
1523ebcddc72SAlan Cox 				 */
1524ebcddc72SAlan Cox 				if (m->dirty == 0) {
1525ebcddc72SAlan Cox 					vm_page_deactivate(m);
1526ebcddc72SAlan Cox 					inactq_shortage -=
1527ebcddc72SAlan Cox 					    act_scan_laundry_weight;
1528ebcddc72SAlan Cox 				} else {
1529ebcddc72SAlan Cox 					vm_page_launder(m);
1530e57dd910SAlan Cox 					inactq_shortage--;
1531ebcddc72SAlan Cox 				}
1532ebcddc72SAlan Cox 			}
15338d220203SAlan Cox 		} else
15348d220203SAlan Cox 			vm_page_requeue_locked(m);
15352965a453SKip Macy 		vm_page_unlock(m);
153626f9a767SRodney W. Grimes 	}
15378d220203SAlan Cox 	vm_pagequeue_unlock(pq);
1538ac04195bSKonstantin Belousov 	if (pass > 0)
1539ac04195bSKonstantin Belousov 		vm_swapout_run_idle();
1540e57dd910SAlan Cox 	return (page_shortage <= 0);
15412025d69bSKonstantin Belousov }
15422025d69bSKonstantin Belousov 
1543449c2e92SKonstantin Belousov static int vm_pageout_oom_vote;
1544449c2e92SKonstantin Belousov 
1545449c2e92SKonstantin Belousov /*
1546449c2e92SKonstantin Belousov  * The pagedaemon threads randlomly select one to perform the
1547449c2e92SKonstantin Belousov  * OOM.  Trying to kill processes before all pagedaemons
1548449c2e92SKonstantin Belousov  * failed to reach free target is premature.
1549449c2e92SKonstantin Belousov  */
1550449c2e92SKonstantin Belousov static void
155176386c7eSKonstantin Belousov vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
155276386c7eSKonstantin Belousov     int starting_page_shortage)
1553449c2e92SKonstantin Belousov {
1554449c2e92SKonstantin Belousov 	int old_vote;
1555449c2e92SKonstantin Belousov 
155676386c7eSKonstantin Belousov 	if (starting_page_shortage <= 0 || starting_page_shortage !=
155776386c7eSKonstantin Belousov 	    page_shortage)
155876386c7eSKonstantin Belousov 		vmd->vmd_oom_seq = 0;
155976386c7eSKonstantin Belousov 	else
156076386c7eSKonstantin Belousov 		vmd->vmd_oom_seq++;
156176386c7eSKonstantin Belousov 	if (vmd->vmd_oom_seq < vm_pageout_oom_seq) {
1562449c2e92SKonstantin Belousov 		if (vmd->vmd_oom) {
1563449c2e92SKonstantin Belousov 			vmd->vmd_oom = FALSE;
1564449c2e92SKonstantin Belousov 			atomic_subtract_int(&vm_pageout_oom_vote, 1);
1565449c2e92SKonstantin Belousov 		}
1566449c2e92SKonstantin Belousov 		return;
1567449c2e92SKonstantin Belousov 	}
1568449c2e92SKonstantin Belousov 
156976386c7eSKonstantin Belousov 	/*
157076386c7eSKonstantin Belousov 	 * Do not follow the call sequence until OOM condition is
157176386c7eSKonstantin Belousov 	 * cleared.
157276386c7eSKonstantin Belousov 	 */
157376386c7eSKonstantin Belousov 	vmd->vmd_oom_seq = 0;
157476386c7eSKonstantin Belousov 
1575449c2e92SKonstantin Belousov 	if (vmd->vmd_oom)
1576449c2e92SKonstantin Belousov 		return;
1577449c2e92SKonstantin Belousov 
1578449c2e92SKonstantin Belousov 	vmd->vmd_oom = TRUE;
1579449c2e92SKonstantin Belousov 	old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1);
1580449c2e92SKonstantin Belousov 	if (old_vote != vm_ndomains - 1)
1581449c2e92SKonstantin Belousov 		return;
1582449c2e92SKonstantin Belousov 
1583449c2e92SKonstantin Belousov 	/*
1584449c2e92SKonstantin Belousov 	 * The current pagedaemon thread is the last in the quorum to
1585449c2e92SKonstantin Belousov 	 * start OOM.  Initiate the selection and signaling of the
1586449c2e92SKonstantin Belousov 	 * victim.
1587449c2e92SKonstantin Belousov 	 */
1588449c2e92SKonstantin Belousov 	vm_pageout_oom(VM_OOM_MEM);
1589449c2e92SKonstantin Belousov 
1590449c2e92SKonstantin Belousov 	/*
1591449c2e92SKonstantin Belousov 	 * After one round of OOM terror, recall our vote.  On the
1592449c2e92SKonstantin Belousov 	 * next pass, current pagedaemon would vote again if the low
1593449c2e92SKonstantin Belousov 	 * memory condition is still there, due to vmd_oom being
1594449c2e92SKonstantin Belousov 	 * false.
1595449c2e92SKonstantin Belousov 	 */
1596449c2e92SKonstantin Belousov 	vmd->vmd_oom = FALSE;
1597449c2e92SKonstantin Belousov 	atomic_subtract_int(&vm_pageout_oom_vote, 1);
1598449c2e92SKonstantin Belousov }
15992025d69bSKonstantin Belousov 
16003949873fSKonstantin Belousov /*
16013949873fSKonstantin Belousov  * The OOM killer is the page daemon's action of last resort when
16023949873fSKonstantin Belousov  * memory allocation requests have been stalled for a prolonged period
16033949873fSKonstantin Belousov  * of time because it cannot reclaim memory.  This function computes
16043949873fSKonstantin Belousov  * the approximate number of physical pages that could be reclaimed if
16053949873fSKonstantin Belousov  * the specified address space is destroyed.
16063949873fSKonstantin Belousov  *
16073949873fSKonstantin Belousov  * Private, anonymous memory owned by the address space is the
16083949873fSKonstantin Belousov  * principal resource that we expect to recover after an OOM kill.
16093949873fSKonstantin Belousov  * Since the physical pages mapped by the address space's COW entries
16103949873fSKonstantin Belousov  * are typically shared pages, they are unlikely to be released and so
16113949873fSKonstantin Belousov  * they are not counted.
16123949873fSKonstantin Belousov  *
16133949873fSKonstantin Belousov  * To get to the point where the page daemon runs the OOM killer, its
16143949873fSKonstantin Belousov  * efforts to write-back vnode-backed pages may have stalled.  This
16153949873fSKonstantin Belousov  * could be caused by a memory allocation deadlock in the write path
16163949873fSKonstantin Belousov  * that might be resolved by an OOM kill.  Therefore, physical pages
16173949873fSKonstantin Belousov  * belonging to vnode-backed objects are counted, because they might
16183949873fSKonstantin Belousov  * be freed without being written out first if the address space holds
16193949873fSKonstantin Belousov  * the last reference to an unlinked vnode.
16203949873fSKonstantin Belousov  *
16213949873fSKonstantin Belousov  * Similarly, physical pages belonging to OBJT_PHYS objects are
16223949873fSKonstantin Belousov  * counted because the address space might hold the last reference to
16233949873fSKonstantin Belousov  * the object.
16243949873fSKonstantin Belousov  */
16253949873fSKonstantin Belousov static long
16263949873fSKonstantin Belousov vm_pageout_oom_pagecount(struct vmspace *vmspace)
16273949873fSKonstantin Belousov {
16283949873fSKonstantin Belousov 	vm_map_t map;
16293949873fSKonstantin Belousov 	vm_map_entry_t entry;
16303949873fSKonstantin Belousov 	vm_object_t obj;
16313949873fSKonstantin Belousov 	long res;
16323949873fSKonstantin Belousov 
16333949873fSKonstantin Belousov 	map = &vmspace->vm_map;
16343949873fSKonstantin Belousov 	KASSERT(!map->system_map, ("system map"));
16353949873fSKonstantin Belousov 	sx_assert(&map->lock, SA_LOCKED);
16363949873fSKonstantin Belousov 	res = 0;
16373949873fSKonstantin Belousov 	for (entry = map->header.next; entry != &map->header;
16383949873fSKonstantin Belousov 	    entry = entry->next) {
16393949873fSKonstantin Belousov 		if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
16403949873fSKonstantin Belousov 			continue;
16413949873fSKonstantin Belousov 		obj = entry->object.vm_object;
16423949873fSKonstantin Belousov 		if (obj == NULL)
16433949873fSKonstantin Belousov 			continue;
16443949873fSKonstantin Belousov 		if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 &&
16453949873fSKonstantin Belousov 		    obj->ref_count != 1)
16463949873fSKonstantin Belousov 			continue;
16473949873fSKonstantin Belousov 		switch (obj->type) {
16483949873fSKonstantin Belousov 		case OBJT_DEFAULT:
16493949873fSKonstantin Belousov 		case OBJT_SWAP:
16503949873fSKonstantin Belousov 		case OBJT_PHYS:
16513949873fSKonstantin Belousov 		case OBJT_VNODE:
16523949873fSKonstantin Belousov 			res += obj->resident_page_count;
16533949873fSKonstantin Belousov 			break;
16543949873fSKonstantin Belousov 		}
16553949873fSKonstantin Belousov 	}
16563949873fSKonstantin Belousov 	return (res);
16573949873fSKonstantin Belousov }
16583949873fSKonstantin Belousov 
16592025d69bSKonstantin Belousov void
16602025d69bSKonstantin Belousov vm_pageout_oom(int shortage)
16612025d69bSKonstantin Belousov {
16622025d69bSKonstantin Belousov 	struct proc *p, *bigproc;
16632025d69bSKonstantin Belousov 	vm_offset_t size, bigsize;
16642025d69bSKonstantin Belousov 	struct thread *td;
16656bed074cSKonstantin Belousov 	struct vmspace *vm;
16663e78e983SAlan Cox 	bool breakout;
16672025d69bSKonstantin Belousov 
16682025d69bSKonstantin Belousov 	/*
16691c58e4e5SJohn Baldwin 	 * We keep the process bigproc locked once we find it to keep anyone
16701c58e4e5SJohn Baldwin 	 * from messing with it; however, there is a possibility of
167128323addSBryan Drewery 	 * deadlock if process B is bigproc and one of its child processes
16721c58e4e5SJohn Baldwin 	 * attempts to propagate a signal to B while we are waiting for A's
16731c58e4e5SJohn Baldwin 	 * lock while walking this list.  To avoid this, we don't block on
16741c58e4e5SJohn Baldwin 	 * the process lock but just skip a process if it is already locked.
16755663e6deSDavid Greenman 	 */
16765663e6deSDavid Greenman 	bigproc = NULL;
16775663e6deSDavid Greenman 	bigsize = 0;
16781005a129SJohn Baldwin 	sx_slock(&allproc_lock);
1679e602ba25SJulian Elischer 	FOREACH_PROC_IN_SYSTEM(p) {
168071943c3dSKonstantin Belousov 		PROC_LOCK(p);
168171943c3dSKonstantin Belousov 
16821c58e4e5SJohn Baldwin 		/*
16833f1c4c4fSKonstantin Belousov 		 * If this is a system, protected or killed process, skip it.
16845663e6deSDavid Greenman 		 */
168571943c3dSKonstantin Belousov 		if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
168671943c3dSKonstantin Belousov 		    P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 ||
168771943c3dSKonstantin Belousov 		    p->p_pid == 1 || P_KILLED(p) ||
168871943c3dSKonstantin Belousov 		    (p->p_pid < 48 && swap_pager_avail != 0)) {
16898606d880SJohn Baldwin 			PROC_UNLOCK(p);
16905663e6deSDavid Greenman 			continue;
16915663e6deSDavid Greenman 		}
16925663e6deSDavid Greenman 		/*
1693dcbcd518SBruce Evans 		 * If the process is in a non-running type state,
1694e602ba25SJulian Elischer 		 * don't touch it.  Check all the threads individually.
16955663e6deSDavid Greenman 		 */
16963e78e983SAlan Cox 		breakout = false;
1697e602ba25SJulian Elischer 		FOREACH_THREAD_IN_PROC(p, td) {
1698982d11f8SJeff Roberson 			thread_lock(td);
169971fad9fdSJulian Elischer 			if (!TD_ON_RUNQ(td) &&
170071fad9fdSJulian Elischer 			    !TD_IS_RUNNING(td) &&
1701f497cda2SEdward Tomasz Napierala 			    !TD_IS_SLEEPING(td) &&
1702b98acc0aSKonstantin Belousov 			    !TD_IS_SUSPENDED(td) &&
1703b98acc0aSKonstantin Belousov 			    !TD_IS_SWAPPED(td)) {
1704982d11f8SJeff Roberson 				thread_unlock(td);
17053e78e983SAlan Cox 				breakout = true;
1706e602ba25SJulian Elischer 				break;
1707e602ba25SJulian Elischer 			}
1708982d11f8SJeff Roberson 			thread_unlock(td);
1709e602ba25SJulian Elischer 		}
1710e602ba25SJulian Elischer 		if (breakout) {
17111c58e4e5SJohn Baldwin 			PROC_UNLOCK(p);
17125663e6deSDavid Greenman 			continue;
17135663e6deSDavid Greenman 		}
17145663e6deSDavid Greenman 		/*
17155663e6deSDavid Greenman 		 * get the process size
17165663e6deSDavid Greenman 		 */
17176bed074cSKonstantin Belousov 		vm = vmspace_acquire_ref(p);
17186bed074cSKonstantin Belousov 		if (vm == NULL) {
17196bed074cSKonstantin Belousov 			PROC_UNLOCK(p);
17206bed074cSKonstantin Belousov 			continue;
17216bed074cSKonstantin Belousov 		}
172295e2409aSKonstantin Belousov 		_PHOLD_LITE(p);
172372d97679SDavid Schultz 		PROC_UNLOCK(p);
172495e2409aSKonstantin Belousov 		sx_sunlock(&allproc_lock);
172595e2409aSKonstantin Belousov 		if (!vm_map_trylock_read(&vm->vm_map)) {
172671943c3dSKonstantin Belousov 			vmspace_free(vm);
172795e2409aSKonstantin Belousov 			sx_slock(&allproc_lock);
172895e2409aSKonstantin Belousov 			PRELE(p);
172972d97679SDavid Schultz 			continue;
173072d97679SDavid Schultz 		}
17317981aa24SKonstantin Belousov 		size = vmspace_swap_count(vm);
17322025d69bSKonstantin Belousov 		if (shortage == VM_OOM_MEM)
17333949873fSKonstantin Belousov 			size += vm_pageout_oom_pagecount(vm);
17343949873fSKonstantin Belousov 		vm_map_unlock_read(&vm->vm_map);
17356bed074cSKonstantin Belousov 		vmspace_free(vm);
173695e2409aSKonstantin Belousov 		sx_slock(&allproc_lock);
17373949873fSKonstantin Belousov 
17385663e6deSDavid Greenman 		/*
17393949873fSKonstantin Belousov 		 * If this process is bigger than the biggest one,
17405663e6deSDavid Greenman 		 * remember it.
17415663e6deSDavid Greenman 		 */
17425663e6deSDavid Greenman 		if (size > bigsize) {
17431c58e4e5SJohn Baldwin 			if (bigproc != NULL)
174471943c3dSKonstantin Belousov 				PRELE(bigproc);
17455663e6deSDavid Greenman 			bigproc = p;
17465663e6deSDavid Greenman 			bigsize = size;
174771943c3dSKonstantin Belousov 		} else {
174871943c3dSKonstantin Belousov 			PRELE(p);
174971943c3dSKonstantin Belousov 		}
17505663e6deSDavid Greenman 	}
17511005a129SJohn Baldwin 	sx_sunlock(&allproc_lock);
17525663e6deSDavid Greenman 	if (bigproc != NULL) {
17538311a2b8SWill Andrews 		if (vm_panic_on_oom != 0)
17548311a2b8SWill Andrews 			panic("out of swap space");
175571943c3dSKonstantin Belousov 		PROC_LOCK(bigproc);
1756729b1e51SDavid Greenman 		killproc(bigproc, "out of swap space");
1757fa885116SJulian Elischer 		sched_nice(bigproc, PRIO_MIN);
175871943c3dSKonstantin Belousov 		_PRELE(bigproc);
17591c58e4e5SJohn Baldwin 		PROC_UNLOCK(bigproc);
17605663e6deSDavid Greenman 	}
17615663e6deSDavid Greenman }
176226f9a767SRodney W. Grimes 
1763449c2e92SKonstantin Belousov static void
1764449c2e92SKonstantin Belousov vm_pageout_worker(void *arg)
1765449c2e92SKonstantin Belousov {
1766e2068d0bSJeff Roberson 	struct vm_domain *vmd;
17675f8cd1c0SJeff Roberson 	int domain, pass, shortage;
1768e57dd910SAlan Cox 	bool target_met;
1769449c2e92SKonstantin Belousov 
1770e2068d0bSJeff Roberson 	domain = (uintptr_t)arg;
1771e2068d0bSJeff Roberson 	vmd = VM_DOMAIN(domain);
177270cf3cedSAlan Cox 	pass = 0;
17735f8cd1c0SJeff Roberson 	shortage = 0;
1774e57dd910SAlan Cox 	target_met = true;
1775449c2e92SKonstantin Belousov 
1776449c2e92SKonstantin Belousov 	/*
1777949c9186SKonstantin Belousov 	 * XXXKIB It could be useful to bind pageout daemon threads to
1778949c9186SKonstantin Belousov 	 * the cores belonging to the domain, from which vm_page_array
1779949c9186SKonstantin Belousov 	 * is allocated.
1780449c2e92SKonstantin Belousov 	 */
1781449c2e92SKonstantin Belousov 
1782e2068d0bSJeff Roberson 	KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
1783e2068d0bSJeff Roberson 	vmd->vmd_last_active_scan = ticks;
1784e2068d0bSJeff Roberson 	vm_pageout_init_marker(&vmd->vmd_marker, PQ_INACTIVE);
1785e2068d0bSJeff Roberson 	vm_pageout_init_marker(&vmd->vmd_inacthead, PQ_INACTIVE);
1786e2068d0bSJeff Roberson 	TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_INACTIVE].pq_pl,
1787e2068d0bSJeff Roberson 	    &vmd->vmd_inacthead, plinks.q);
1788449c2e92SKonstantin Belousov 
1789449c2e92SKonstantin Belousov 	/*
1790449c2e92SKonstantin Belousov 	 * The pageout daemon worker is never done, so loop forever.
1791449c2e92SKonstantin Belousov 	 */
1792449c2e92SKonstantin Belousov 	while (TRUE) {
1793*30fbfddaSJeff Roberson 		vm_domain_pageout_lock(vmd);
1794*30fbfddaSJeff Roberson 		/*
1795*30fbfddaSJeff Roberson 		 * We need to clear wanted before we check the limits.  This
1796*30fbfddaSJeff Roberson 		 * prevents races with wakers who will check wanted after they
1797*30fbfddaSJeff Roberson 		 * reach the limit.
1798*30fbfddaSJeff Roberson 		 */
1799*30fbfddaSJeff Roberson 		atomic_store_int(&vmd->vmd_pageout_wanted, 0);
180056ce0690SAlan Cox 
180156ce0690SAlan Cox 		/*
18025f8cd1c0SJeff Roberson 		 * Might the page daemon need to run again?
1803449c2e92SKonstantin Belousov 		 */
18045f8cd1c0SJeff Roberson 		if (vm_paging_needed(vmd, vmd->vmd_free_count)) {
180556ce0690SAlan Cox 			/*
18065f8cd1c0SJeff Roberson 			 * Yes, the scan failed to free enough pages.  If
18075f8cd1c0SJeff Roberson 			 * we have performed a level >= 1 (page reclamation)
18085f8cd1c0SJeff Roberson 			 * scan, then sleep a bit and try again.
180956ce0690SAlan Cox 			 */
1810*30fbfddaSJeff Roberson 			vm_domain_pageout_unlock(vmd);
18115f8cd1c0SJeff Roberson 			if (pass > 1)
18126eebec83SMark Johnston 				pause("pwait", hz / VM_INACT_SCAN_RATE);
1813449c2e92SKonstantin Belousov 		} else {
1814449c2e92SKonstantin Belousov 			/*
18155f8cd1c0SJeff Roberson 			 * No, sleep until the next wakeup or until pages
18165f8cd1c0SJeff Roberson 			 * need to have their reference stats updated.
1817449c2e92SKonstantin Belousov 			 */
18182c0f13aaSKonstantin Belousov 			if (mtx_sleep(&vmd->vmd_pageout_wanted,
1819*30fbfddaSJeff Roberson 			    vm_domain_pageout_lockptr(vmd), PDROP | PVM,
18205f8cd1c0SJeff Roberson 			    "psleep", hz / VM_INACT_SCAN_RATE) == 0)
182183c9dea1SGleb Smirnoff 				VM_CNT_INC(v_pdwakeups);
182256ce0690SAlan Cox 		}
1823*30fbfddaSJeff Roberson 		/* Prevent spurious wakeups by ensuring that wanted is set. */
1824*30fbfddaSJeff Roberson 		atomic_store_int(&vmd->vmd_pageout_wanted, 1);
1825*30fbfddaSJeff Roberson 
1826*30fbfddaSJeff Roberson 		/*
1827*30fbfddaSJeff Roberson 		 * Use the controller to calculate how many pages to free in
1828*30fbfddaSJeff Roberson 		 * this interval.
1829*30fbfddaSJeff Roberson 		 */
18305f8cd1c0SJeff Roberson 		shortage = pidctrl_daemon(&vmd->vmd_pid, vmd->vmd_free_count);
18315f8cd1c0SJeff Roberson 		if (shortage && pass == 0)
18325f8cd1c0SJeff Roberson 			pass = 1;
183356ce0690SAlan Cox 
18345f8cd1c0SJeff Roberson 		target_met = vm_pageout_scan(vmd, pass, shortage);
18355f8cd1c0SJeff Roberson 		/*
18365f8cd1c0SJeff Roberson 		 * If the target was not met we must increase the pass to
18375f8cd1c0SJeff Roberson 		 * more aggressively reclaim.
18385f8cd1c0SJeff Roberson 		 */
18395f8cd1c0SJeff Roberson 		if (!target_met)
18405f8cd1c0SJeff Roberson 			pass++;
1841449c2e92SKonstantin Belousov 	}
1842449c2e92SKonstantin Belousov }
1843449c2e92SKonstantin Belousov 
1844df8bae1dSRodney W. Grimes /*
18454d19f4adSSteven Hartland  *	vm_pageout_init initialises basic pageout daemon settings.
1846df8bae1dSRodney W. Grimes  */
18472b14f991SJulian Elischer static void
1848e2068d0bSJeff Roberson vm_pageout_init_domain(int domain)
1849df8bae1dSRodney W. Grimes {
1850e2068d0bSJeff Roberson 	struct vm_domain *vmd;
18515f8cd1c0SJeff Roberson 	struct sysctl_oid *oid;
1852e2068d0bSJeff Roberson 
1853e2068d0bSJeff Roberson 	vmd = VM_DOMAIN(domain);
1854e2068d0bSJeff Roberson 	vmd->vmd_interrupt_free_min = 2;
1855f6b04d2bSDavid Greenman 
185645ae1d91SAlan Cox 	/*
185745ae1d91SAlan Cox 	 * v_free_reserved needs to include enough for the largest
185845ae1d91SAlan Cox 	 * swap pager structures plus enough for any pv_entry structs
185945ae1d91SAlan Cox 	 * when paging.
186045ae1d91SAlan Cox 	 */
1861e2068d0bSJeff Roberson 	if (vmd->vmd_page_count > 1024)
1862e2068d0bSJeff Roberson 		vmd->vmd_free_min = 4 + (vmd->vmd_page_count - 1024) / 200;
18632feb50bfSAttilio Rao 	else
1864e2068d0bSJeff Roberson 		vmd->vmd_free_min = 4;
1865e2068d0bSJeff Roberson 	vmd->vmd_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
1866e2068d0bSJeff Roberson 	    vmd->vmd_interrupt_free_min;
1867e2068d0bSJeff Roberson 	vmd->vmd_free_reserved = vm_pageout_page_count +
1868e2068d0bSJeff Roberson 	    vmd->vmd_pageout_free_min + (vmd->vmd_page_count / 768);
1869e2068d0bSJeff Roberson 	vmd->vmd_free_severe = vmd->vmd_free_min / 2;
1870e2068d0bSJeff Roberson 	vmd->vmd_free_target = 4 * vmd->vmd_free_min + vmd->vmd_free_reserved;
1871e2068d0bSJeff Roberson 	vmd->vmd_free_min += vmd->vmd_free_reserved;
1872e2068d0bSJeff Roberson 	vmd->vmd_free_severe += vmd->vmd_free_reserved;
1873e2068d0bSJeff Roberson 	vmd->vmd_inactive_target = (3 * vmd->vmd_free_target) / 2;
1874e2068d0bSJeff Roberson 	if (vmd->vmd_inactive_target > vmd->vmd_free_count / 3)
1875e2068d0bSJeff Roberson 		vmd->vmd_inactive_target = vmd->vmd_free_count / 3;
1876df8bae1dSRodney W. Grimes 
1877d9e23210SJeff Roberson 	/*
18785f8cd1c0SJeff Roberson 	 * Set the default wakeup threshold to be 10% below the paging
18795f8cd1c0SJeff Roberson 	 * target.  This keeps the steady state out of shortfall.
1880d9e23210SJeff Roberson 	 */
18815f8cd1c0SJeff Roberson 	vmd->vmd_pageout_wakeup_thresh = (vmd->vmd_free_target / 10) * 9;
1882e2068d0bSJeff Roberson 
1883e2068d0bSJeff Roberson 	/*
1884e2068d0bSJeff Roberson 	 * Target amount of memory to move out of the laundry queue during a
1885e2068d0bSJeff Roberson 	 * background laundering.  This is proportional to the amount of system
1886e2068d0bSJeff Roberson 	 * memory.
1887e2068d0bSJeff Roberson 	 */
1888e2068d0bSJeff Roberson 	vmd->vmd_background_launder_target = (vmd->vmd_free_target -
1889e2068d0bSJeff Roberson 	    vmd->vmd_free_min) / 10;
18905f8cd1c0SJeff Roberson 
18915f8cd1c0SJeff Roberson 	/* Initialize the pageout daemon pid controller. */
18925f8cd1c0SJeff Roberson 	pidctrl_init(&vmd->vmd_pid, hz / VM_INACT_SCAN_RATE,
18935f8cd1c0SJeff Roberson 	    vmd->vmd_free_target, PIDCTRL_BOUND,
18945f8cd1c0SJeff Roberson 	    PIDCTRL_KPD, PIDCTRL_KID, PIDCTRL_KDD);
18955f8cd1c0SJeff Roberson 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(vmd->vmd_oid), OID_AUTO,
18965f8cd1c0SJeff Roberson 	    "pidctrl", CTLFLAG_RD, NULL, "");
18975f8cd1c0SJeff Roberson 	pidctrl_init_sysctl(&vmd->vmd_pid, SYSCTL_CHILDREN(oid));
1898e2068d0bSJeff Roberson }
1899e2068d0bSJeff Roberson 
1900e2068d0bSJeff Roberson static void
1901e2068d0bSJeff Roberson vm_pageout_init(void)
1902e2068d0bSJeff Roberson {
1903e2068d0bSJeff Roberson 	u_int freecount;
1904e2068d0bSJeff Roberson 	int i;
1905e2068d0bSJeff Roberson 
1906e2068d0bSJeff Roberson 	/*
1907e2068d0bSJeff Roberson 	 * Initialize some paging parameters.
1908e2068d0bSJeff Roberson 	 */
1909e2068d0bSJeff Roberson 	if (vm_cnt.v_page_count < 2000)
1910e2068d0bSJeff Roberson 		vm_pageout_page_count = 8;
1911e2068d0bSJeff Roberson 
1912e2068d0bSJeff Roberson 	freecount = 0;
1913e2068d0bSJeff Roberson 	for (i = 0; i < vm_ndomains; i++) {
1914e2068d0bSJeff Roberson 		struct vm_domain *vmd;
1915e2068d0bSJeff Roberson 
1916e2068d0bSJeff Roberson 		vm_pageout_init_domain(i);
1917e2068d0bSJeff Roberson 		vmd = VM_DOMAIN(i);
1918e2068d0bSJeff Roberson 		vm_cnt.v_free_reserved += vmd->vmd_free_reserved;
1919e2068d0bSJeff Roberson 		vm_cnt.v_free_target += vmd->vmd_free_target;
1920e2068d0bSJeff Roberson 		vm_cnt.v_free_min += vmd->vmd_free_min;
1921e2068d0bSJeff Roberson 		vm_cnt.v_inactive_target += vmd->vmd_inactive_target;
1922e2068d0bSJeff Roberson 		vm_cnt.v_pageout_free_min += vmd->vmd_pageout_free_min;
1923e2068d0bSJeff Roberson 		vm_cnt.v_interrupt_free_min += vmd->vmd_interrupt_free_min;
1924e2068d0bSJeff Roberson 		vm_cnt.v_free_severe += vmd->vmd_free_severe;
1925e2068d0bSJeff Roberson 		freecount += vmd->vmd_free_count;
1926e2068d0bSJeff Roberson 	}
1927d9e23210SJeff Roberson 
1928d9e23210SJeff Roberson 	/*
1929d9e23210SJeff Roberson 	 * Set interval in seconds for active scan.  We want to visit each
1930c9612b2dSJeff Roberson 	 * page at least once every ten minutes.  This is to prevent worst
1931c9612b2dSJeff Roberson 	 * case paging behaviors with stale active LRU.
1932d9e23210SJeff Roberson 	 */
1933d9e23210SJeff Roberson 	if (vm_pageout_update_period == 0)
1934c9612b2dSJeff Roberson 		vm_pageout_update_period = 600;
1935d9e23210SJeff Roberson 
1936df8bae1dSRodney W. Grimes 	if (vm_page_max_wired == 0)
1937e2068d0bSJeff Roberson 		vm_page_max_wired = freecount / 3;
19384d19f4adSSteven Hartland }
19394d19f4adSSteven Hartland 
19404d19f4adSSteven Hartland /*
19414d19f4adSSteven Hartland  *     vm_pageout is the high level pageout daemon.
19424d19f4adSSteven Hartland  */
19434d19f4adSSteven Hartland static void
19444d19f4adSSteven Hartland vm_pageout(void)
19454d19f4adSSteven Hartland {
194644ec2b63SKonstantin Belousov 	int error;
194744ec2b63SKonstantin Belousov 	int i;
1948df8bae1dSRodney W. Grimes 
194924a1cce3SDavid Greenman 	swap_pager_swap_init();
19503b8cf4acSMark Johnston 	snprintf(curthread->td_name, sizeof(curthread->td_name), "dom0");
1951ebcddc72SAlan Cox 	error = kthread_add(vm_pageout_laundry_worker, NULL, curproc, NULL,
1952ebcddc72SAlan Cox 	    0, 0, "laundry: dom0");
1953ebcddc72SAlan Cox 	if (error != 0)
1954ebcddc72SAlan Cox 		panic("starting laundry for domain 0, error %d", error);
1955449c2e92SKonstantin Belousov 	for (i = 1; i < vm_ndomains; i++) {
1956449c2e92SKonstantin Belousov 		error = kthread_add(vm_pageout_worker, (void *)(uintptr_t)i,
1957449c2e92SKonstantin Belousov 		    curproc, NULL, 0, 0, "dom%d", i);
1958449c2e92SKonstantin Belousov 		if (error != 0) {
1959449c2e92SKonstantin Belousov 			panic("starting pageout for domain %d, error %d\n",
1960449c2e92SKonstantin Belousov 			    i, error);
1961dc2efb27SJohn Dyson 		}
1962e2068d0bSJeff Roberson 		error = kthread_add(vm_pageout_laundry_worker,
1963e2068d0bSJeff Roberson 		    (void *)(uintptr_t)i, curproc, NULL, 0, 0,
1964e2068d0bSJeff Roberson 		    "laundry: dom%d", i);
1965e2068d0bSJeff Roberson 		if (error != 0)
1966e2068d0bSJeff Roberson 			panic("starting laundry for domain %d, error %d",
1967e2068d0bSJeff Roberson 			    i, error);
1968f919ebdeSDavid Greenman 	}
196944ec2b63SKonstantin Belousov 	error = kthread_add(uma_reclaim_worker, NULL, curproc, NULL,
197044ec2b63SKonstantin Belousov 	    0, 0, "uma");
197144ec2b63SKonstantin Belousov 	if (error != 0)
197244ec2b63SKonstantin Belousov 		panic("starting uma_reclaim helper, error %d\n", error);
1973d395270dSDimitry Andric 	vm_pageout_worker((void *)(uintptr_t)0);
1974df8bae1dSRodney W. Grimes }
197526f9a767SRodney W. Grimes 
19766b4b77adSAlan Cox /*
1977280d15cdSMark Johnston  * Perform an advisory wakeup of the page daemon.
19786b4b77adSAlan Cox  */
1979e0c5a895SJohn Dyson void
1980e2068d0bSJeff Roberson pagedaemon_wakeup(int domain)
1981e0c5a895SJohn Dyson {
1982e2068d0bSJeff Roberson 	struct vm_domain *vmd;
1983a1c0a785SAlan Cox 
1984e2068d0bSJeff Roberson 	vmd = VM_DOMAIN(domain);
1985*30fbfddaSJeff Roberson 	vm_domain_pageout_assert_unlocked(vmd);
1986*30fbfddaSJeff Roberson 	if (curproc == pageproc)
1987*30fbfddaSJeff Roberson 		return;
1988280d15cdSMark Johnston 
1989*30fbfddaSJeff Roberson 	if (atomic_fetchadd_int(&vmd->vmd_pageout_wanted, 1) == 0) {
1990*30fbfddaSJeff Roberson 		vm_domain_pageout_lock(vmd);
1991*30fbfddaSJeff Roberson 		atomic_store_int(&vmd->vmd_pageout_wanted, 1);
1992e2068d0bSJeff Roberson 		wakeup(&vmd->vmd_pageout_wanted);
1993*30fbfddaSJeff Roberson 		vm_domain_pageout_unlock(vmd);
1994e0c5a895SJohn Dyson 	}
1995e0c5a895SJohn Dyson }
1996