xref: /freebsd/sys/vm/vm_pageout.c (revision 936524aa02cdcfc3c7e153dd3147b4e5a013c62d)
1df8bae1dSRodney W. Grimes /*
226f9a767SRodney W. Grimes  * Copyright (c) 1991 Regents of the University of California.
326f9a767SRodney W. Grimes  * All rights reserved.
426f9a767SRodney W. Grimes  * Copyright (c) 1994 John S. Dyson
526f9a767SRodney W. Grimes  * All rights reserved.
626f9a767SRodney W. Grimes  * Copyright (c) 1994 David Greenman
726f9a767SRodney W. Grimes  * All rights reserved.
8df8bae1dSRodney W. Grimes  *
9df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
10df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
11df8bae1dSRodney W. Grimes  *
12df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
13df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
14df8bae1dSRodney W. Grimes  * are met:
15df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
17df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
18df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
19df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
20df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
215929bcfaSPhilippe Charnier  *    must display the following acknowledgement:
22df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
23df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
24df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
25df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
26df8bae1dSRodney W. Grimes  *    without specific prior written permission.
27df8bae1dSRodney W. Grimes  *
28df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
39df8bae1dSRodney W. Grimes  *
403c4dd356SDavid Greenman  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
41df8bae1dSRodney W. Grimes  *
42df8bae1dSRodney W. Grimes  *
43df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
44df8bae1dSRodney W. Grimes  * All rights reserved.
45df8bae1dSRodney W. Grimes  *
46df8bae1dSRodney W. Grimes  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
47df8bae1dSRodney W. Grimes  *
48df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
49df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
50df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
51df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
52df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
53df8bae1dSRodney W. Grimes  *
54df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57df8bae1dSRodney W. Grimes  *
58df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
59df8bae1dSRodney W. Grimes  *
60df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61df8bae1dSRodney W. Grimes  *  School of Computer Science
62df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
63df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
64df8bae1dSRodney W. Grimes  *
65df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
66df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
6726f9a767SRodney W. Grimes  *
68c3aac50fSPeter Wemm  * $FreeBSD$
69df8bae1dSRodney W. Grimes  */
70df8bae1dSRodney W. Grimes 
71df8bae1dSRodney W. Grimes /*
72df8bae1dSRodney W. Grimes  *	The proverbial page-out daemon.
73df8bae1dSRodney W. Grimes  */
74df8bae1dSRodney W. Grimes 
75faa5f8d8SAndrzej Bialecki #include "opt_vm.h"
76df8bae1dSRodney W. Grimes #include <sys/param.h>
7726f9a767SRodney W. Grimes #include <sys/systm.h>
78b5e8ce9fSBruce Evans #include <sys/kernel.h>
7926f9a767SRodney W. Grimes #include <sys/proc.h>
809c8b8baaSPeter Wemm #include <sys/kthread.h>
810384fff8SJason Evans #include <sys/ktr.h>
8226f9a767SRodney W. Grimes #include <sys/resourcevar.h>
83d2fc5315SPoul-Henning Kamp #include <sys/signalvar.h>
84f6b04d2bSDavid Greenman #include <sys/vnode.h>
85efeaf95aSDavid Greenman #include <sys/vmmeter.h>
8638efa82bSJohn Dyson #include <sys/sysctl.h>
87df8bae1dSRodney W. Grimes 
88df8bae1dSRodney W. Grimes #include <vm/vm.h>
89efeaf95aSDavid Greenman #include <vm/vm_param.h>
90996c772fSJohn Dyson #include <sys/lock.h>
91efeaf95aSDavid Greenman #include <vm/vm_object.h>
92df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
93efeaf95aSDavid Greenman #include <vm/vm_map.h>
94df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h>
9524a1cce3SDavid Greenman #include <vm/vm_pager.h>
9605f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h>
97efeaf95aSDavid Greenman #include <vm/vm_extern.h>
98df8bae1dSRodney W. Grimes 
990384fff8SJason Evans #include <machine/mutex.h>
1000384fff8SJason Evans 
1012b14f991SJulian Elischer /*
1022b14f991SJulian Elischer  * System initialization
1032b14f991SJulian Elischer  */
1042b14f991SJulian Elischer 
1052b14f991SJulian Elischer /* the kernel process "vm_pageout"*/
1062b14f991SJulian Elischer static void vm_pageout __P((void));
1078f9110f6SJohn Dyson static int vm_pageout_clean __P((vm_page_t));
1083af76890SPoul-Henning Kamp static int vm_pageout_scan __P((void));
109f35329acSJohn Dyson static int vm_pageout_free_page_calc __P((vm_size_t count));
1102b14f991SJulian Elischer struct proc *pageproc;
1112b14f991SJulian Elischer 
1122b14f991SJulian Elischer static struct kproc_desc page_kp = {
1132b14f991SJulian Elischer 	"pagedaemon",
1142b14f991SJulian Elischer 	vm_pageout,
1152b14f991SJulian Elischer 	&pageproc
1162b14f991SJulian Elischer };
1179c8b8baaSPeter Wemm SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp)
1182b14f991SJulian Elischer 
11938efa82bSJohn Dyson #if !defined(NO_SWAPPING)
1202b14f991SJulian Elischer /* the kernel process "vm_daemon"*/
1212b14f991SJulian Elischer static void vm_daemon __P((void));
122f708ef1bSPoul-Henning Kamp static struct	proc *vmproc;
1232b14f991SJulian Elischer 
1242b14f991SJulian Elischer static struct kproc_desc vm_kp = {
1252b14f991SJulian Elischer 	"vmdaemon",
1262b14f991SJulian Elischer 	vm_daemon,
1272b14f991SJulian Elischer 	&vmproc
1282b14f991SJulian Elischer };
1299c8b8baaSPeter Wemm SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
13038efa82bSJohn Dyson #endif
1312b14f991SJulian Elischer 
1322b14f991SJulian Elischer 
1332d8acc0fSJohn Dyson int vm_pages_needed=0;		/* Event on which pageout daemon sleeps */
1342d8acc0fSJohn Dyson int vm_pageout_deficit=0;	/* Estimated number of pages deficit */
1352d8acc0fSJohn Dyson int vm_pageout_pages_needed=0;	/* flag saying that the pageout daemon needs pages */
13626f9a767SRodney W. Grimes 
13738efa82bSJohn Dyson #if !defined(NO_SWAPPING)
138f708ef1bSPoul-Henning Kamp static int vm_pageout_req_swapout;	/* XXX */
139f708ef1bSPoul-Henning Kamp static int vm_daemon_needed;
14038efa82bSJohn Dyson #endif
1415663e6deSDavid Greenman extern int vm_swap_size;
142303b270bSEivind Eklund static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
143303b270bSEivind Eklund static int vm_pageout_full_stats_interval = 0;
144303b270bSEivind Eklund static int vm_pageout_stats_free_max=0, vm_pageout_algorithm_lru=0;
145303b270bSEivind Eklund static int defer_swap_pageouts=0;
146303b270bSEivind Eklund static int disable_swap_pageouts=0;
14770111b90SJohn Dyson 
148303b270bSEivind Eklund static int max_page_launder=100;
149936524aaSMatthew Dillon static int vm_pageout_actcmp=0;
15038efa82bSJohn Dyson #if defined(NO_SWAPPING)
151303b270bSEivind Eklund static int vm_swap_enabled=0;
152303b270bSEivind Eklund static int vm_swap_idle_enabled=0;
15338efa82bSJohn Dyson #else
154303b270bSEivind Eklund static int vm_swap_enabled=1;
155303b270bSEivind Eklund static int vm_swap_idle_enabled=0;
15638efa82bSJohn Dyson #endif
15738efa82bSJohn Dyson 
15838efa82bSJohn Dyson SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
159b0359e2cSPeter Wemm 	CTLFLAG_RW, &vm_pageout_algorithm_lru, 0, "LRU page mgmt");
16038efa82bSJohn Dyson 
161dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
162b0359e2cSPeter Wemm 	CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
163dc2efb27SJohn Dyson 
164dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
165b0359e2cSPeter Wemm 	CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
166dc2efb27SJohn Dyson 
167dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
168b0359e2cSPeter Wemm 	CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
169dc2efb27SJohn Dyson 
170dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
171b0359e2cSPeter Wemm 	CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
172dc2efb27SJohn Dyson 
17338efa82bSJohn Dyson #if defined(NO_SWAPPING)
174ceb0cf87SJohn Dyson SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
175ceb0cf87SJohn Dyson 	CTLFLAG_RD, &vm_swap_enabled, 0, "");
176ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
177ceb0cf87SJohn Dyson 	CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
17838efa82bSJohn Dyson #else
179ceb0cf87SJohn Dyson SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
180b0359e2cSPeter Wemm 	CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
181ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
182b0359e2cSPeter Wemm 	CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
18338efa82bSJohn Dyson #endif
18426f9a767SRodney W. Grimes 
185ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
186b0359e2cSPeter Wemm 	CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
18712ac6a1dSJohn Dyson 
188ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
189b0359e2cSPeter Wemm 	CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
19012ac6a1dSJohn Dyson 
191ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, max_page_launder,
192b0359e2cSPeter Wemm 	CTLFLAG_RW, &max_page_launder, 0, "Maximum number of pages to clean per pass");
193936524aaSMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, vm_pageout_actcmp,
194936524aaSMatthew Dillon 	CTLFLAG_RD, &vm_pageout_actcmp, 0, "pagedaemon agressiveness");
19570111b90SJohn Dyson 
19626f9a767SRodney W. Grimes 
197ffc82b0aSJohn Dyson #define VM_PAGEOUT_PAGE_COUNT 16
198bbc0ec52SDavid Greenman int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
199df8bae1dSRodney W. Grimes 
200c3cb3e12SDavid Greenman int vm_page_max_wired;		/* XXX max # of wired pages system-wide */
201df8bae1dSRodney W. Grimes 
20238efa82bSJohn Dyson #if !defined(NO_SWAPPING)
20338efa82bSJohn Dyson typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int));
20438efa82bSJohn Dyson static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t));
205cd41fc12SDavid Greenman static freeer_fcn_t vm_pageout_object_deactivate_pages;
206cd41fc12SDavid Greenman static void vm_req_vmdaemon __P((void));
20738efa82bSJohn Dyson #endif
208dc2efb27SJohn Dyson static void vm_pageout_page_stats(void);
209cd41fc12SDavid Greenman 
21026f9a767SRodney W. Grimes /*
21126f9a767SRodney W. Grimes  * vm_pageout_clean:
21224a1cce3SDavid Greenman  *
2130d94caffSDavid Greenman  * Clean the page and remove it from the laundry.
21426f9a767SRodney W. Grimes  *
2150d94caffSDavid Greenman  * We set the busy bit to cause potential page faults on this page to
2161c7c3c6aSMatthew Dillon  * block.  Note the careful timing, however, the busy bit isn't set till
2171c7c3c6aSMatthew Dillon  * late and we cannot do anything that will mess with the page.
21826f9a767SRodney W. Grimes  */
2191c7c3c6aSMatthew Dillon 
2203af76890SPoul-Henning Kamp static int
2218f9110f6SJohn Dyson vm_pageout_clean(m)
22224a1cce3SDavid Greenman 	vm_page_t m;
22324a1cce3SDavid Greenman {
22426f9a767SRodney W. Grimes 	register vm_object_t object;
225f35329acSJohn Dyson 	vm_page_t mc[2*vm_pageout_page_count];
22624a1cce3SDavid Greenman 	int pageout_count;
22790ecac61SMatthew Dillon 	int ib, is, page_base;
228a316d390SJohn Dyson 	vm_pindex_t pindex = m->pindex;
22926f9a767SRodney W. Grimes 
23026f9a767SRodney W. Grimes 	object = m->object;
23124a1cce3SDavid Greenman 
23226f9a767SRodney W. Grimes 	/*
2331c7c3c6aSMatthew Dillon 	 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
2341c7c3c6aSMatthew Dillon 	 * with the new swapper, but we could have serious problems paging
2351c7c3c6aSMatthew Dillon 	 * out other object types if there is insufficient memory.
2361c7c3c6aSMatthew Dillon 	 *
2371c7c3c6aSMatthew Dillon 	 * Unfortunately, checking free memory here is far too late, so the
2381c7c3c6aSMatthew Dillon 	 * check has been moved up a procedural level.
2391c7c3c6aSMatthew Dillon 	 */
2401c7c3c6aSMatthew Dillon 
24124a1cce3SDavid Greenman 	/*
2428b03c8edSMatthew Dillon 	 * Don't mess with the page if it's busy, held, or special
24324a1cce3SDavid Greenman 	 */
2448f9110f6SJohn Dyson 	if ((m->hold_count != 0) ||
2458b03c8edSMatthew Dillon 	    ((m->busy != 0) || (m->flags & (PG_BUSY|PG_UNMANAGED)))) {
2460d94caffSDavid Greenman 		return 0;
2478b03c8edSMatthew Dillon 	}
2480d94caffSDavid Greenman 
249f35329acSJohn Dyson 	mc[vm_pageout_page_count] = m;
25026f9a767SRodney W. Grimes 	pageout_count = 1;
251f35329acSJohn Dyson 	page_base = vm_pageout_page_count;
25290ecac61SMatthew Dillon 	ib = 1;
25390ecac61SMatthew Dillon 	is = 1;
25490ecac61SMatthew Dillon 
25524a1cce3SDavid Greenman 	/*
25624a1cce3SDavid Greenman 	 * Scan object for clusterable pages.
25724a1cce3SDavid Greenman 	 *
25824a1cce3SDavid Greenman 	 * We can cluster ONLY if: ->> the page is NOT
25924a1cce3SDavid Greenman 	 * clean, wired, busy, held, or mapped into a
26024a1cce3SDavid Greenman 	 * buffer, and one of the following:
26124a1cce3SDavid Greenman 	 * 1) The page is inactive, or a seldom used
26224a1cce3SDavid Greenman 	 *    active page.
26324a1cce3SDavid Greenman 	 * -or-
26424a1cce3SDavid Greenman 	 * 2) we force the issue.
26590ecac61SMatthew Dillon 	 *
26690ecac61SMatthew Dillon 	 * During heavy mmap/modification loads the pageout
26790ecac61SMatthew Dillon 	 * daemon can really fragment the underlying file
26890ecac61SMatthew Dillon 	 * due to flushing pages out of order and not trying
26990ecac61SMatthew Dillon 	 * align the clusters (which leave sporatic out-of-order
27090ecac61SMatthew Dillon 	 * holes).  To solve this problem we do the reverse scan
27190ecac61SMatthew Dillon 	 * first and attempt to align our cluster, then do a
27290ecac61SMatthew Dillon 	 * forward scan if room remains.
27324a1cce3SDavid Greenman 	 */
27490ecac61SMatthew Dillon 
27590ecac61SMatthew Dillon more:
27690ecac61SMatthew Dillon 	while (ib && pageout_count < vm_pageout_page_count) {
27724a1cce3SDavid Greenman 		vm_page_t p;
278f6b04d2bSDavid Greenman 
27990ecac61SMatthew Dillon 		if (ib > pindex) {
28090ecac61SMatthew Dillon 			ib = 0;
28190ecac61SMatthew Dillon 			break;
282f6b04d2bSDavid Greenman 		}
28390ecac61SMatthew Dillon 
28490ecac61SMatthew Dillon 		if ((p = vm_page_lookup(object, pindex - ib)) == NULL) {
28590ecac61SMatthew Dillon 			ib = 0;
28690ecac61SMatthew Dillon 			break;
28790ecac61SMatthew Dillon 		}
2885070c7f8SJohn Dyson 		if (((p->queue - p->pc) == PQ_CACHE) ||
2898b03c8edSMatthew Dillon 		    (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) {
29090ecac61SMatthew Dillon 			ib = 0;
29190ecac61SMatthew Dillon 			break;
292f6b04d2bSDavid Greenman 		}
29324a1cce3SDavid Greenman 		vm_page_test_dirty(p);
29490ecac61SMatthew Dillon 		if ((p->dirty & p->valid) == 0 ||
29590ecac61SMatthew Dillon 		    p->queue != PQ_INACTIVE ||
29690ecac61SMatthew Dillon 		    p->wire_count != 0 ||
29790ecac61SMatthew Dillon 		    p->hold_count != 0) {
29890ecac61SMatthew Dillon 			ib = 0;
29924a1cce3SDavid Greenman 			break;
300f6b04d2bSDavid Greenman 		}
30190ecac61SMatthew Dillon 		mc[--page_base] = p;
30290ecac61SMatthew Dillon 		++pageout_count;
30390ecac61SMatthew Dillon 		++ib;
30424a1cce3SDavid Greenman 		/*
30590ecac61SMatthew Dillon 		 * alignment boundry, stop here and switch directions.  Do
30690ecac61SMatthew Dillon 		 * not clear ib.
30724a1cce3SDavid Greenman 		 */
30890ecac61SMatthew Dillon 		if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
30990ecac61SMatthew Dillon 			break;
31024a1cce3SDavid Greenman 	}
31190ecac61SMatthew Dillon 
31290ecac61SMatthew Dillon 	while (pageout_count < vm_pageout_page_count &&
31390ecac61SMatthew Dillon 	    pindex + is < object->size) {
31490ecac61SMatthew Dillon 		vm_page_t p;
31590ecac61SMatthew Dillon 
31690ecac61SMatthew Dillon 		if ((p = vm_page_lookup(object, pindex + is)) == NULL)
31790ecac61SMatthew Dillon 			break;
3185070c7f8SJohn Dyson 		if (((p->queue - p->pc) == PQ_CACHE) ||
3198b03c8edSMatthew Dillon 		    (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) {
32090ecac61SMatthew Dillon 			break;
32124a1cce3SDavid Greenman 		}
32224a1cce3SDavid Greenman 		vm_page_test_dirty(p);
32390ecac61SMatthew Dillon 		if ((p->dirty & p->valid) == 0 ||
32490ecac61SMatthew Dillon 		    p->queue != PQ_INACTIVE ||
32590ecac61SMatthew Dillon 		    p->wire_count != 0 ||
32690ecac61SMatthew Dillon 		    p->hold_count != 0) {
32724a1cce3SDavid Greenman 			break;
32824a1cce3SDavid Greenman 		}
32990ecac61SMatthew Dillon 		mc[page_base + pageout_count] = p;
33090ecac61SMatthew Dillon 		++pageout_count;
33190ecac61SMatthew Dillon 		++is;
33224a1cce3SDavid Greenman 	}
33390ecac61SMatthew Dillon 
33490ecac61SMatthew Dillon 	/*
33590ecac61SMatthew Dillon 	 * If we exhausted our forward scan, continue with the reverse scan
33690ecac61SMatthew Dillon 	 * when possible, even past a page boundry.  This catches boundry
33790ecac61SMatthew Dillon 	 * conditions.
33890ecac61SMatthew Dillon 	 */
33990ecac61SMatthew Dillon 	if (ib && pageout_count < vm_pageout_page_count)
34090ecac61SMatthew Dillon 		goto more;
341f6b04d2bSDavid Greenman 
34267bf6868SJohn Dyson 	/*
34367bf6868SJohn Dyson 	 * we allow reads during pageouts...
34467bf6868SJohn Dyson 	 */
3458f9110f6SJohn Dyson 	return vm_pageout_flush(&mc[page_base], pageout_count, 0);
346aef922f5SJohn Dyson }
347aef922f5SJohn Dyson 
3481c7c3c6aSMatthew Dillon /*
3491c7c3c6aSMatthew Dillon  * vm_pageout_flush() - launder the given pages
3501c7c3c6aSMatthew Dillon  *
3511c7c3c6aSMatthew Dillon  *	The given pages are laundered.  Note that we setup for the start of
3521c7c3c6aSMatthew Dillon  *	I/O ( i.e. busy the page ), mark it read-only, and bump the object
3531c7c3c6aSMatthew Dillon  *	reference count all in here rather then in the parent.  If we want
3541c7c3c6aSMatthew Dillon  *	the parent to do more sophisticated things we may have to change
3551c7c3c6aSMatthew Dillon  *	the ordering.
3561c7c3c6aSMatthew Dillon  */
3571c7c3c6aSMatthew Dillon 
358aef922f5SJohn Dyson int
3598f9110f6SJohn Dyson vm_pageout_flush(mc, count, flags)
360aef922f5SJohn Dyson 	vm_page_t *mc;
361aef922f5SJohn Dyson 	int count;
3628f9110f6SJohn Dyson 	int flags;
363aef922f5SJohn Dyson {
364aef922f5SJohn Dyson 	register vm_object_t object;
365aef922f5SJohn Dyson 	int pageout_status[count];
36695461b45SJohn Dyson 	int numpagedout = 0;
367aef922f5SJohn Dyson 	int i;
368aef922f5SJohn Dyson 
3691c7c3c6aSMatthew Dillon 	/*
3701c7c3c6aSMatthew Dillon 	 * Initiate I/O.  Bump the vm_page_t->busy counter and
3711c7c3c6aSMatthew Dillon 	 * mark the pages read-only.
3721c7c3c6aSMatthew Dillon 	 *
3731c7c3c6aSMatthew Dillon 	 * We do not have to fixup the clean/dirty bits here... we can
3741c7c3c6aSMatthew Dillon 	 * allow the pager to do it after the I/O completes.
3751c7c3c6aSMatthew Dillon 	 */
3761c7c3c6aSMatthew Dillon 
3778f9110f6SJohn Dyson 	for (i = 0; i < count; i++) {
378936524aaSMatthew Dillon 		KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL && mc[i]->dirty == VM_PAGE_BITS_ALL, ("vm_pageout_flush page %p index %d/%d: partially dirty page", mc[i], i, count));
379e69763a3SDoug Rabson 		vm_page_io_start(mc[i]);
3808f9110f6SJohn Dyson 		vm_page_protect(mc[i], VM_PROT_READ);
3818f9110f6SJohn Dyson 	}
3828f9110f6SJohn Dyson 
383aef922f5SJohn Dyson 	object = mc[0]->object;
384d474eaaaSDoug Rabson 	vm_object_pip_add(object, count);
385aef922f5SJohn Dyson 
386aef922f5SJohn Dyson 	vm_pager_put_pages(object, mc, count,
3878f9110f6SJohn Dyson 	    (flags | ((object == kernel_object) ? OBJPC_SYNC : 0)),
38826f9a767SRodney W. Grimes 	    pageout_status);
38926f9a767SRodney W. Grimes 
390aef922f5SJohn Dyson 	for (i = 0; i < count; i++) {
391aef922f5SJohn Dyson 		vm_page_t mt = mc[i];
39224a1cce3SDavid Greenman 
39326f9a767SRodney W. Grimes 		switch (pageout_status[i]) {
39426f9a767SRodney W. Grimes 		case VM_PAGER_OK:
39595461b45SJohn Dyson 			numpagedout++;
39626f9a767SRodney W. Grimes 			break;
39726f9a767SRodney W. Grimes 		case VM_PAGER_PEND:
39895461b45SJohn Dyson 			numpagedout++;
39926f9a767SRodney W. Grimes 			break;
40026f9a767SRodney W. Grimes 		case VM_PAGER_BAD:
40126f9a767SRodney W. Grimes 			/*
4020d94caffSDavid Greenman 			 * Page outside of range of object. Right now we
4030d94caffSDavid Greenman 			 * essentially lose the changes by pretending it
4040d94caffSDavid Greenman 			 * worked.
40526f9a767SRodney W. Grimes 			 */
4060385347cSPeter Wemm 			pmap_clear_modify(mt);
40790ecac61SMatthew Dillon 			vm_page_undirty(mt);
40826f9a767SRodney W. Grimes 			break;
40926f9a767SRodney W. Grimes 		case VM_PAGER_ERROR:
41026f9a767SRodney W. Grimes 		case VM_PAGER_FAIL:
41126f9a767SRodney W. Grimes 			/*
4120d94caffSDavid Greenman 			 * If page couldn't be paged out, then reactivate the
4130d94caffSDavid Greenman 			 * page so it doesn't clog the inactive list.  (We
4140d94caffSDavid Greenman 			 * will try paging out it again later).
41526f9a767SRodney W. Grimes 			 */
41624a1cce3SDavid Greenman 			vm_page_activate(mt);
41726f9a767SRodney W. Grimes 			break;
41826f9a767SRodney W. Grimes 		case VM_PAGER_AGAIN:
41926f9a767SRodney W. Grimes 			break;
42026f9a767SRodney W. Grimes 		}
42126f9a767SRodney W. Grimes 
42226f9a767SRodney W. Grimes 		/*
4230d94caffSDavid Greenman 		 * If the operation is still going, leave the page busy to
4240d94caffSDavid Greenman 		 * block all other accesses. Also, leave the paging in
4250d94caffSDavid Greenman 		 * progress indicator set so that we don't attempt an object
4260d94caffSDavid Greenman 		 * collapse.
42726f9a767SRodney W. Grimes 		 */
42826f9a767SRodney W. Grimes 		if (pageout_status[i] != VM_PAGER_PEND) {
429f919ebdeSDavid Greenman 			vm_object_pip_wakeup(object);
430e69763a3SDoug Rabson 			vm_page_io_finish(mt);
431936524aaSMatthew Dillon 			if (!vm_page_count_severe() || !vm_page_try_to_cache(mt))
432936524aaSMatthew Dillon 				vm_page_protect(mt, VM_PROT_READ);
43326f9a767SRodney W. Grimes 		}
43426f9a767SRodney W. Grimes 	}
43595461b45SJohn Dyson 	return numpagedout;
43626f9a767SRodney W. Grimes }
43726f9a767SRodney W. Grimes 
43838efa82bSJohn Dyson #if !defined(NO_SWAPPING)
43926f9a767SRodney W. Grimes /*
44026f9a767SRodney W. Grimes  *	vm_pageout_object_deactivate_pages
44126f9a767SRodney W. Grimes  *
44226f9a767SRodney W. Grimes  *	deactivate enough pages to satisfy the inactive target
44326f9a767SRodney W. Grimes  *	requirements or if vm_page_proc_limit is set, then
44426f9a767SRodney W. Grimes  *	deactivate all of the pages in the object and its
44524a1cce3SDavid Greenman  *	backing_objects.
44626f9a767SRodney W. Grimes  *
44726f9a767SRodney W. Grimes  *	The object and map must be locked.
44826f9a767SRodney W. Grimes  */
44938efa82bSJohn Dyson static void
45038efa82bSJohn Dyson vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
45126f9a767SRodney W. Grimes 	vm_map_t map;
45226f9a767SRodney W. Grimes 	vm_object_t object;
45338efa82bSJohn Dyson 	vm_pindex_t desired;
4540d94caffSDavid Greenman 	int map_remove_only;
45526f9a767SRodney W. Grimes {
45626f9a767SRodney W. Grimes 	register vm_page_t p, next;
45726f9a767SRodney W. Grimes 	int rcount;
45838efa82bSJohn Dyson 	int remove_mode;
4591eeaa1e3SJohn Dyson 	int s;
46026f9a767SRodney W. Grimes 
46124964514SPeter Wemm 	if (object->type == OBJT_DEVICE || object->type == OBJT_PHYS)
46238efa82bSJohn Dyson 		return;
4638f895206SDavid Greenman 
46438efa82bSJohn Dyson 	while (object) {
465b1028ad1SLuoqi Chen 		if (pmap_resident_count(vm_map_pmap(map)) <= desired)
46638efa82bSJohn Dyson 			return;
46724a1cce3SDavid Greenman 		if (object->paging_in_progress)
46838efa82bSJohn Dyson 			return;
46926f9a767SRodney W. Grimes 
47038efa82bSJohn Dyson 		remove_mode = map_remove_only;
47138efa82bSJohn Dyson 		if (object->shadow_count > 1)
47238efa82bSJohn Dyson 			remove_mode = 1;
47326f9a767SRodney W. Grimes 	/*
47426f9a767SRodney W. Grimes 	 * scan the objects entire memory queue
47526f9a767SRodney W. Grimes 	 */
47626f9a767SRodney W. Grimes 		rcount = object->resident_page_count;
477b18bfc3dSJohn Dyson 		p = TAILQ_FIRST(&object->memq);
47826f9a767SRodney W. Grimes 		while (p && (rcount-- > 0)) {
4797e006499SJohn Dyson 			int actcount;
480b1028ad1SLuoqi Chen 			if (pmap_resident_count(vm_map_pmap(map)) <= desired)
48138efa82bSJohn Dyson 				return;
482b18bfc3dSJohn Dyson 			next = TAILQ_NEXT(p, listq);
483a58d1fa1SDavid Greenman 			cnt.v_pdpages++;
4840d94caffSDavid Greenman 			if (p->wire_count != 0 ||
4850d94caffSDavid Greenman 			    p->hold_count != 0 ||
4860d94caffSDavid Greenman 			    p->busy != 0 ||
4878b03c8edSMatthew Dillon 			    (p->flags & (PG_BUSY|PG_UNMANAGED)) ||
4880385347cSPeter Wemm 			    !pmap_page_exists(vm_map_pmap(map), p)) {
4890d94caffSDavid Greenman 				p = next;
4900d94caffSDavid Greenman 				continue;
4910d94caffSDavid Greenman 			}
492ef743ce6SJohn Dyson 
4930385347cSPeter Wemm 			actcount = pmap_ts_referenced(p);
4947e006499SJohn Dyson 			if (actcount) {
495e69763a3SDoug Rabson 				vm_page_flag_set(p, PG_REFERENCED);
496c8c4b40cSJohn Dyson 			} else if (p->flags & PG_REFERENCED) {
4977e006499SJohn Dyson 				actcount = 1;
498ef743ce6SJohn Dyson 			}
499ef743ce6SJohn Dyson 
50038efa82bSJohn Dyson 			if ((p->queue != PQ_ACTIVE) &&
50138efa82bSJohn Dyson 				(p->flags & PG_REFERENCED)) {
502ef743ce6SJohn Dyson 				vm_page_activate(p);
5037e006499SJohn Dyson 				p->act_count += actcount;
504e69763a3SDoug Rabson 				vm_page_flag_clear(p, PG_REFERENCED);
505c8c4b40cSJohn Dyson 			} else if (p->queue == PQ_ACTIVE) {
506ef743ce6SJohn Dyson 				if ((p->flags & PG_REFERENCED) == 0) {
507c8c4b40cSJohn Dyson 					p->act_count -= min(p->act_count, ACT_DECLINE);
508c8c4b40cSJohn Dyson 					if (!remove_mode && (vm_pageout_algorithm_lru || (p->act_count == 0))) {
509b18bfc3dSJohn Dyson 						vm_page_protect(p, VM_PROT_NONE);
51026f9a767SRodney W. Grimes 						vm_page_deactivate(p);
51126f9a767SRodney W. Grimes 					} else {
512c8c4b40cSJohn Dyson 						s = splvm();
513be72f788SAlan Cox 						TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, p, pageq);
514be72f788SAlan Cox 						TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, p, pageq);
515c8c4b40cSJohn Dyson 						splx(s);
516c8c4b40cSJohn Dyson 					}
517c8c4b40cSJohn Dyson 				} else {
518eaf13dd7SJohn Dyson 					vm_page_activate(p);
519e69763a3SDoug Rabson 					vm_page_flag_clear(p, PG_REFERENCED);
52038efa82bSJohn Dyson 					if (p->act_count < (ACT_MAX - ACT_ADVANCE))
52138efa82bSJohn Dyson 						p->act_count += ACT_ADVANCE;
5221eeaa1e3SJohn Dyson 					s = splvm();
523be72f788SAlan Cox 					TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, p, pageq);
524be72f788SAlan Cox 					TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, p, pageq);
5251eeaa1e3SJohn Dyson 					splx(s);
52626f9a767SRodney W. Grimes 				}
527bd7e5f99SJohn Dyson 			} else if (p->queue == PQ_INACTIVE) {
528f919ebdeSDavid Greenman 				vm_page_protect(p, VM_PROT_NONE);
52926f9a767SRodney W. Grimes 			}
53026f9a767SRodney W. Grimes 			p = next;
53126f9a767SRodney W. Grimes 		}
53238efa82bSJohn Dyson 		object = object->backing_object;
53338efa82bSJohn Dyson 	}
53438efa82bSJohn Dyson 	return;
53526f9a767SRodney W. Grimes }
53626f9a767SRodney W. Grimes 
53726f9a767SRodney W. Grimes /*
53826f9a767SRodney W. Grimes  * deactivate some number of pages in a map, try to do it fairly, but
53926f9a767SRodney W. Grimes  * that is really hard to do.
54026f9a767SRodney W. Grimes  */
541cd41fc12SDavid Greenman static void
54238efa82bSJohn Dyson vm_pageout_map_deactivate_pages(map, desired)
54326f9a767SRodney W. Grimes 	vm_map_t map;
54438efa82bSJohn Dyson 	vm_pindex_t desired;
54526f9a767SRodney W. Grimes {
54626f9a767SRodney W. Grimes 	vm_map_entry_t tmpe;
54738efa82bSJohn Dyson 	vm_object_t obj, bigobj;
5480d94caffSDavid Greenman 
549996c772fSJohn Dyson 	if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) {
55026f9a767SRodney W. Grimes 		return;
55126f9a767SRodney W. Grimes 	}
55238efa82bSJohn Dyson 
55338efa82bSJohn Dyson 	bigobj = NULL;
55438efa82bSJohn Dyson 
55538efa82bSJohn Dyson 	/*
55638efa82bSJohn Dyson 	 * first, search out the biggest object, and try to free pages from
55738efa82bSJohn Dyson 	 * that.
55838efa82bSJohn Dyson 	 */
55926f9a767SRodney W. Grimes 	tmpe = map->header.next;
56038efa82bSJohn Dyson 	while (tmpe != &map->header) {
5619fdfe602SMatthew Dillon 		if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
56238efa82bSJohn Dyson 			obj = tmpe->object.vm_object;
56338efa82bSJohn Dyson 			if ((obj != NULL) && (obj->shadow_count <= 1) &&
56438efa82bSJohn Dyson 				((bigobj == NULL) ||
56538efa82bSJohn Dyson 				 (bigobj->resident_page_count < obj->resident_page_count))) {
56638efa82bSJohn Dyson 				bigobj = obj;
56738efa82bSJohn Dyson 			}
56838efa82bSJohn Dyson 		}
56938efa82bSJohn Dyson 		tmpe = tmpe->next;
57038efa82bSJohn Dyson 	}
57138efa82bSJohn Dyson 
57238efa82bSJohn Dyson 	if (bigobj)
57338efa82bSJohn Dyson 		vm_pageout_object_deactivate_pages(map, bigobj, desired, 0);
57438efa82bSJohn Dyson 
57538efa82bSJohn Dyson 	/*
57638efa82bSJohn Dyson 	 * Next, hunt around for other pages to deactivate.  We actually
57738efa82bSJohn Dyson 	 * do this search sort of wrong -- .text first is not the best idea.
57838efa82bSJohn Dyson 	 */
57938efa82bSJohn Dyson 	tmpe = map->header.next;
58038efa82bSJohn Dyson 	while (tmpe != &map->header) {
581b1028ad1SLuoqi Chen 		if (pmap_resident_count(vm_map_pmap(map)) <= desired)
58238efa82bSJohn Dyson 			break;
5839fdfe602SMatthew Dillon 		if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
58438efa82bSJohn Dyson 			obj = tmpe->object.vm_object;
58501155bd7SDavid Greenman 			if (obj)
58638efa82bSJohn Dyson 				vm_pageout_object_deactivate_pages(map, obj, desired, 0);
58738efa82bSJohn Dyson 		}
58826f9a767SRodney W. Grimes 		tmpe = tmpe->next;
58926f9a767SRodney W. Grimes 	};
59038efa82bSJohn Dyson 
59138efa82bSJohn Dyson 	/*
59238efa82bSJohn Dyson 	 * Remove all mappings if a process is swapped out, this will free page
59338efa82bSJohn Dyson 	 * table pages.
59438efa82bSJohn Dyson 	 */
59538efa82bSJohn Dyson 	if (desired == 0)
59638efa82bSJohn Dyson 		pmap_remove(vm_map_pmap(map),
59738efa82bSJohn Dyson 			VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
59838efa82bSJohn Dyson 	vm_map_unlock(map);
59926f9a767SRodney W. Grimes 	return;
60026f9a767SRodney W. Grimes }
60138efa82bSJohn Dyson #endif
602df8bae1dSRodney W. Grimes 
6031c7c3c6aSMatthew Dillon /*
6041c7c3c6aSMatthew Dillon  * Don't try to be fancy - being fancy can lead to VOP_LOCK's and therefore
6051c7c3c6aSMatthew Dillon  * to vnode deadlocks.  We only do it for OBJT_DEFAULT and OBJT_SWAP objects
6061c7c3c6aSMatthew Dillon  * which we know can be trivially freed.
6071c7c3c6aSMatthew Dillon  */
6081c7c3c6aSMatthew Dillon 
609925a3a41SJohn Dyson void
610925a3a41SJohn Dyson vm_pageout_page_free(vm_page_t m) {
6111c7c3c6aSMatthew Dillon 	vm_object_t object = m->object;
6121c7c3c6aSMatthew Dillon 	int type = object->type;
613925a3a41SJohn Dyson 
6141c7c3c6aSMatthew Dillon 	if (type == OBJT_SWAP || type == OBJT_DEFAULT)
6151c7c3c6aSMatthew Dillon 		vm_object_reference(object);
616e69763a3SDoug Rabson 	vm_page_busy(m);
617925a3a41SJohn Dyson 	vm_page_protect(m, VM_PROT_NONE);
618925a3a41SJohn Dyson 	vm_page_free(m);
6191c7c3c6aSMatthew Dillon 	if (type == OBJT_SWAP || type == OBJT_DEFAULT)
62047221757SJohn Dyson 		vm_object_deallocate(object);
621925a3a41SJohn Dyson }
622925a3a41SJohn Dyson 
623df8bae1dSRodney W. Grimes /*
624df8bae1dSRodney W. Grimes  *	vm_pageout_scan does the dirty work for the pageout daemon.
625df8bae1dSRodney W. Grimes  */
6263af76890SPoul-Henning Kamp static int
627df8bae1dSRodney W. Grimes vm_pageout_scan()
628df8bae1dSRodney W. Grimes {
629502ba6e4SJohn Dyson 	vm_page_t m, next;
630936524aaSMatthew Dillon 	struct vm_page marker;
6311c7c3c6aSMatthew Dillon 	int page_shortage, maxscan, pcount;
6321c7c3c6aSMatthew Dillon 	int addl_page_shortage, addl_page_shortage_init;
63370111b90SJohn Dyson 	int maxlaunder;
6345663e6deSDavid Greenman 	struct proc *p, *bigproc;
6355663e6deSDavid Greenman 	vm_offset_t size, bigsize;
636df8bae1dSRodney W. Grimes 	vm_object_t object;
63726f9a767SRodney W. Grimes 	int force_wakeup = 0;
6387e006499SJohn Dyson 	int actcount;
639f6b04d2bSDavid Greenman 	int vnodes_skipped = 0;
6401eeaa1e3SJohn Dyson 	int s;
6410d94caffSDavid Greenman 
642df8bae1dSRodney W. Grimes 	/*
6435985940eSJohn Dyson 	 * Do whatever cleanup that the pmap code can.
6445985940eSJohn Dyson 	 */
6455985940eSJohn Dyson 	pmap_collect();
6465985940eSJohn Dyson 
6471c7c3c6aSMatthew Dillon 	addl_page_shortage_init = vm_pageout_deficit;
64895461b45SJohn Dyson 	vm_pageout_deficit = 0;
649b182ec9eSJohn Dyson 
650ceb0cf87SJohn Dyson 	if (max_page_launder == 0)
651ceb0cf87SJohn Dyson 		max_page_launder = 1;
6521c7c3c6aSMatthew Dillon 
6531c7c3c6aSMatthew Dillon 	/*
6541c7c3c6aSMatthew Dillon 	 * Calculate the number of pages we want to either free or move
655936524aaSMatthew Dillon 	 * to the cache.  Be more agressive if we aren't making our target.
6561c7c3c6aSMatthew Dillon 	 */
6571c7c3c6aSMatthew Dillon 
658936524aaSMatthew Dillon 	page_shortage = vm_paging_target() +
659936524aaSMatthew Dillon 		addl_page_shortage_init + vm_pageout_actcmp;
6601c7c3c6aSMatthew Dillon 
6611c7c3c6aSMatthew Dillon 	/*
662936524aaSMatthew Dillon 	 * Figure out how agressively we should flush dirty pages.
6631c7c3c6aSMatthew Dillon 	 */
664936524aaSMatthew Dillon 	{
665936524aaSMatthew Dillon 		int factor = vm_pageout_actcmp;
6661c7c3c6aSMatthew Dillon 
667936524aaSMatthew Dillon 		maxlaunder = cnt.v_inactive_target / 3 + factor;
668936524aaSMatthew Dillon 		if (maxlaunder > max_page_launder + factor)
669936524aaSMatthew Dillon 			maxlaunder = max_page_launder + factor;
6701c7c3c6aSMatthew Dillon 	}
6711c7c3c6aSMatthew Dillon 
6721c7c3c6aSMatthew Dillon 	/*
673936524aaSMatthew Dillon 	 * Initialize our marker
674936524aaSMatthew Dillon 	 */
675936524aaSMatthew Dillon 	bzero(&marker, sizeof(marker));
676936524aaSMatthew Dillon 	marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
677936524aaSMatthew Dillon 	marker.queue = PQ_INACTIVE;
678936524aaSMatthew Dillon 	marker.wire_count = 1;
679936524aaSMatthew Dillon 
680936524aaSMatthew Dillon 	/*
6811c7c3c6aSMatthew Dillon 	 * Start scanning the inactive queue for pages we can move to the
6821c7c3c6aSMatthew Dillon 	 * cache or free.  The scan will stop when the target is reached or
683936524aaSMatthew Dillon 	 * we have scanned the entire inactive queue.  Note that m->act_count
684936524aaSMatthew Dillon 	 * is not used to form decisions for the inactive queue, only for the
685936524aaSMatthew Dillon 	 * active queue.
6861c7c3c6aSMatthew Dillon 	 */
68770111b90SJohn Dyson 
68867bf6868SJohn Dyson rescan0:
6891c7c3c6aSMatthew Dillon 	addl_page_shortage = addl_page_shortage_init;
690f6b04d2bSDavid Greenman 	maxscan = cnt.v_inactive_count;
691be72f788SAlan Cox 	for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl);
6921c7c3c6aSMatthew Dillon 	     m != NULL && maxscan-- > 0 && page_shortage > 0;
693e929c00dSKirk McKusick 	     m = next) {
694df8bae1dSRodney W. Grimes 
695a58d1fa1SDavid Greenman 		cnt.v_pdpages++;
696b182ec9eSJohn Dyson 
697f35329acSJohn Dyson 		if (m->queue != PQ_INACTIVE) {
69867bf6868SJohn Dyson 			goto rescan0;
699f35329acSJohn Dyson 		}
700b182ec9eSJohn Dyson 
701b18bfc3dSJohn Dyson 		next = TAILQ_NEXT(m, pageq);
702df8bae1dSRodney W. Grimes 
703936524aaSMatthew Dillon 		/*
704936524aaSMatthew Dillon 		 * skip marker pages
705936524aaSMatthew Dillon 		 */
706936524aaSMatthew Dillon 		if (m->flags & PG_MARKER)
707936524aaSMatthew Dillon 			continue;
708936524aaSMatthew Dillon 
709b182ec9eSJohn Dyson 		if (m->hold_count) {
710f35329acSJohn Dyson 			s = splvm();
711be72f788SAlan Cox 			TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
712be72f788SAlan Cox 			TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
713f35329acSJohn Dyson 			splx(s);
714b182ec9eSJohn Dyson 			addl_page_shortage++;
715b182ec9eSJohn Dyson 			continue;
716df8bae1dSRodney W. Grimes 		}
71726f9a767SRodney W. Grimes 		/*
718b18bfc3dSJohn Dyson 		 * Dont mess with busy pages, keep in the front of the
719b18bfc3dSJohn Dyson 		 * queue, most likely are being paged out.
72026f9a767SRodney W. Grimes 		 */
721bd7e5f99SJohn Dyson 		if (m->busy || (m->flags & PG_BUSY)) {
722b182ec9eSJohn Dyson 			addl_page_shortage++;
72326f9a767SRodney W. Grimes 			continue;
72426f9a767SRodney W. Grimes 		}
725bd7e5f99SJohn Dyson 
7267e006499SJohn Dyson 		/*
7271c7c3c6aSMatthew Dillon 		 * If the object is not being used, we ignore previous
7281c7c3c6aSMatthew Dillon 		 * references.
7297e006499SJohn Dyson 		 */
7300d94caffSDavid Greenman 		if (m->object->ref_count == 0) {
731e69763a3SDoug Rabson 			vm_page_flag_clear(m, PG_REFERENCED);
7320385347cSPeter Wemm 			pmap_clear_reference(m);
7337e006499SJohn Dyson 
7347e006499SJohn Dyson 		/*
7351c7c3c6aSMatthew Dillon 		 * Otherwise, if the page has been referenced while in the
7361c7c3c6aSMatthew Dillon 		 * inactive queue, we bump the "activation count" upwards,
7371c7c3c6aSMatthew Dillon 		 * making it less likely that the page will be added back to
7381c7c3c6aSMatthew Dillon 		 * the inactive queue prematurely again.  Here we check the
7391c7c3c6aSMatthew Dillon 		 * page tables (or emulated bits, if any), given the upper
7401c7c3c6aSMatthew Dillon 		 * level VM system not knowing anything about existing
7411c7c3c6aSMatthew Dillon 		 * references.
7427e006499SJohn Dyson 		 */
743ef743ce6SJohn Dyson 		} else if (((m->flags & PG_REFERENCED) == 0) &&
7440385347cSPeter Wemm 			(actcount = pmap_ts_referenced(m))) {
745ef743ce6SJohn Dyson 			vm_page_activate(m);
7467e006499SJohn Dyson 			m->act_count += (actcount + ACT_ADVANCE);
747ef743ce6SJohn Dyson 			continue;
7482fe6e4d7SDavid Greenman 		}
749ef743ce6SJohn Dyson 
7507e006499SJohn Dyson 		/*
7511c7c3c6aSMatthew Dillon 		 * If the upper level VM system knows about any page
7521c7c3c6aSMatthew Dillon 		 * references, we activate the page.  We also set the
7531c7c3c6aSMatthew Dillon 		 * "activation count" higher than normal so that we will less
7541c7c3c6aSMatthew Dillon 		 * likely place pages back onto the inactive queue again.
7557e006499SJohn Dyson 		 */
756bd7e5f99SJohn Dyson 		if ((m->flags & PG_REFERENCED) != 0) {
757e69763a3SDoug Rabson 			vm_page_flag_clear(m, PG_REFERENCED);
7580385347cSPeter Wemm 			actcount = pmap_ts_referenced(m);
75926f9a767SRodney W. Grimes 			vm_page_activate(m);
7607e006499SJohn Dyson 			m->act_count += (actcount + ACT_ADVANCE + 1);
7610d94caffSDavid Greenman 			continue;
7620d94caffSDavid Greenman 		}
76367bf6868SJohn Dyson 
7647e006499SJohn Dyson 		/*
7651c7c3c6aSMatthew Dillon 		 * If the upper level VM system doesn't know anything about
7661c7c3c6aSMatthew Dillon 		 * the page being dirty, we have to check for it again.  As
7671c7c3c6aSMatthew Dillon 		 * far as the VM code knows, any partially dirty pages are
7681c7c3c6aSMatthew Dillon 		 * fully dirty.
7697e006499SJohn Dyson 		 */
770f6b04d2bSDavid Greenman 		if (m->dirty == 0) {
771bd7e5f99SJohn Dyson 			vm_page_test_dirty(m);
772427e99a0SAlexander Langer 		} else {
7737dbf82dcSMatthew Dillon 			vm_page_dirty(m);
77430dcfc09SJohn Dyson 		}
775ef743ce6SJohn Dyson 
7767e006499SJohn Dyson 		/*
7777e006499SJohn Dyson 		 * Invalid pages can be easily freed
7787e006499SJohn Dyson 		 */
7796d40c3d3SDavid Greenman 		if (m->valid == 0) {
780925a3a41SJohn Dyson 			vm_pageout_page_free(m);
78167bf6868SJohn Dyson 			cnt.v_dfree++;
7821c7c3c6aSMatthew Dillon 			--page_shortage;
7837e006499SJohn Dyson 
7847e006499SJohn Dyson 		/*
785936524aaSMatthew Dillon 		 * Clean pages can be placed onto the cache queue.  This
786936524aaSMatthew Dillon 		 * effectively frees them.
7877e006499SJohn Dyson 		 */
788bd7e5f99SJohn Dyson 		} else if (m->dirty == 0) {
789bd7e5f99SJohn Dyson 			vm_page_cache(m);
7901c7c3c6aSMatthew Dillon 			--page_shortage;
7917e006499SJohn Dyson 
7927e006499SJohn Dyson 		/*
7937e006499SJohn Dyson 		 * Dirty pages need to be paged out.  Note that we clean
7947e006499SJohn Dyson 		 * only a limited number of pages per pagedaemon pass.
7957e006499SJohn Dyson 		 */
7960d94caffSDavid Greenman 		} else if (maxlaunder > 0) {
79712ac6a1dSJohn Dyson 			int swap_pageouts_ok;
798f6b04d2bSDavid Greenman 			struct vnode *vp = NULL;
799f2a2857bSKirk McKusick 			struct mount *mp;
8000d94caffSDavid Greenman 
8010d94caffSDavid Greenman 			object = m->object;
8027e006499SJohn Dyson 
80312ac6a1dSJohn Dyson 			if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) {
80412ac6a1dSJohn Dyson 				swap_pageouts_ok = 1;
80512ac6a1dSJohn Dyson 			} else {
80612ac6a1dSJohn Dyson 				swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
80712ac6a1dSJohn Dyson 				swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
80890ecac61SMatthew Dillon 				vm_page_count_min());
80912ac6a1dSJohn Dyson 
81012ac6a1dSJohn Dyson 			}
81170111b90SJohn Dyson 
81270111b90SJohn Dyson 			/*
8131c7c3c6aSMatthew Dillon 			 * We don't bother paging objects that are "dead".
8141c7c3c6aSMatthew Dillon 			 * Those objects are in a "rundown" state.
81570111b90SJohn Dyson 			 */
81670111b90SJohn Dyson 			if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
81712ac6a1dSJohn Dyson 				s = splvm();
818be72f788SAlan Cox 				TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
819be72f788SAlan Cox 				TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
82012ac6a1dSJohn Dyson 				splx(s);
82112ac6a1dSJohn Dyson 				continue;
82212ac6a1dSJohn Dyson 			}
82312ac6a1dSJohn Dyson 
8241c7c3c6aSMatthew Dillon 			/*
8251c7c3c6aSMatthew Dillon 			 * Presumably we have sufficient free memory to do
8261c7c3c6aSMatthew Dillon 			 * the more sophisticated checks and locking required
8271c7c3c6aSMatthew Dillon 			 * for vnodes.
8281c7c3c6aSMatthew Dillon 			 *
8291c7c3c6aSMatthew Dillon 			 * The object is already known NOT to be dead.  The
8301c7c3c6aSMatthew Dillon 			 * vget() may still block, though, because
8311c7c3c6aSMatthew Dillon 			 * VOP_ISLOCKED() doesn't check to see if an inode
8321c7c3c6aSMatthew Dillon 			 * (v_data) is associated with the vnode.  If it isn't,
8331c7c3c6aSMatthew Dillon 			 * vget() will load in it from disk.  Worse, vget()
8341c7c3c6aSMatthew Dillon 			 * may actually get stuck waiting on "inode" if another
8351c7c3c6aSMatthew Dillon 			 * process is in the process of bringing the inode in.
8361c7c3c6aSMatthew Dillon 			 * This is bad news for us either way.
8371c7c3c6aSMatthew Dillon 			 *
8381c7c3c6aSMatthew Dillon 			 * So for the moment we check v_data == NULL as a
8391c7c3c6aSMatthew Dillon 			 * workaround.  This means that vnodes which do not
8401c7c3c6aSMatthew Dillon 			 * use v_data in the way we expect probably will not
8411c7c3c6aSMatthew Dillon 			 * wind up being paged out by the pager and it will be
8421c7c3c6aSMatthew Dillon 			 * up to the syncer to get them.  That's better then
8431c7c3c6aSMatthew Dillon 			 * us blocking here.
8441c7c3c6aSMatthew Dillon 			 *
8451c7c3c6aSMatthew Dillon 			 * This whole code section is bogus - we need to fix
8461c7c3c6aSMatthew Dillon 			 * the vnode pager to handle vm_page_t's without us
8471c7c3c6aSMatthew Dillon 			 * having to do any sophisticated VOP tests.
8481c7c3c6aSMatthew Dillon 			 */
8491c7c3c6aSMatthew Dillon 
8501c7c3c6aSMatthew Dillon 			if (object->type == OBJT_VNODE) {
85124a1cce3SDavid Greenman 				vp = object->handle;
8521c7c3c6aSMatthew Dillon 
853f2a2857bSKirk McKusick 				mp = NULL;
854f2a2857bSKirk McKusick 				if (vp->v_type == VREG)
855f2a2857bSKirk McKusick 					vn_start_write(vp, &mp, V_NOWAIT);
8566bdfe06aSEivind Eklund 				if (VOP_ISLOCKED(vp, NULL) ||
8571c7c3c6aSMatthew Dillon 				    vp->v_data == NULL ||
85847221757SJohn Dyson 				    vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curproc)) {
859f2a2857bSKirk McKusick 					vn_finished_write(mp);
860b182ec9eSJohn Dyson 					if ((m->queue == PQ_INACTIVE) &&
861b182ec9eSJohn Dyson 						(m->hold_count == 0) &&
862b182ec9eSJohn Dyson 						(m->busy == 0) &&
863b182ec9eSJohn Dyson 						(m->flags & PG_BUSY) == 0) {
864f35329acSJohn Dyson 						s = splvm();
865be72f788SAlan Cox 						TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
866be72f788SAlan Cox 						TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
867f35329acSJohn Dyson 						splx(s);
86885a376ebSJohn Dyson 					}
869aef922f5SJohn Dyson 					if (object->flags & OBJ_MIGHTBEDIRTY)
870925a3a41SJohn Dyson 						vnodes_skipped++;
871b182ec9eSJohn Dyson 					continue;
87285a376ebSJohn Dyson 				}
873b182ec9eSJohn Dyson 
874f35329acSJohn Dyson 				/*
875936524aaSMatthew Dillon 				 * The page might have been moved to another
876936524aaSMatthew Dillon 				 * queue during potential blocking in vget()
877936524aaSMatthew Dillon 				 * above.  The page might have been freed and
878936524aaSMatthew Dillon 				 * reused for another vnode.  The object might
879936524aaSMatthew Dillon 				 * have been reused for another vnode.
880f35329acSJohn Dyson 				 */
881936524aaSMatthew Dillon 				if (m->queue != PQ_INACTIVE ||
882936524aaSMatthew Dillon 				    m->object != object ||
883936524aaSMatthew Dillon 				    object->handle != vp) {
884b182ec9eSJohn Dyson 					if (object->flags & OBJ_MIGHTBEDIRTY)
885925a3a41SJohn Dyson 						vnodes_skipped++;
886b182ec9eSJohn Dyson 					vput(vp);
887f2a2857bSKirk McKusick 					vn_finished_write(mp);
888b182ec9eSJohn Dyson 					continue;
889b182ec9eSJohn Dyson 				}
890b182ec9eSJohn Dyson 
891f35329acSJohn Dyson 				/*
892936524aaSMatthew Dillon 				 * The page may have been busied during the
893936524aaSMatthew Dillon 				 * blocking in vput();  We don't move the
894936524aaSMatthew Dillon 				 * page back onto the end of the queue so that
895936524aaSMatthew Dillon 				 * statistics are more correct if we don't.
896f35329acSJohn Dyson 				 */
897b182ec9eSJohn Dyson 				if (m->busy || (m->flags & PG_BUSY)) {
898b182ec9eSJohn Dyson 					vput(vp);
899f2a2857bSKirk McKusick 					vn_finished_write(mp);
900b182ec9eSJohn Dyson 					continue;
901b182ec9eSJohn Dyson 				}
902b182ec9eSJohn Dyson 
903f35329acSJohn Dyson 				/*
904f35329acSJohn Dyson 				 * If the page has become held, then skip it
905f35329acSJohn Dyson 				 */
906b182ec9eSJohn Dyson 				if (m->hold_count) {
907f35329acSJohn Dyson 					s = splvm();
908be72f788SAlan Cox 					TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
909be72f788SAlan Cox 					TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
910f35329acSJohn Dyson 					splx(s);
911b182ec9eSJohn Dyson 					if (object->flags & OBJ_MIGHTBEDIRTY)
912925a3a41SJohn Dyson 						vnodes_skipped++;
913b182ec9eSJohn Dyson 					vput(vp);
914f2a2857bSKirk McKusick 					vn_finished_write(mp);
915f6b04d2bSDavid Greenman 					continue;
916f6b04d2bSDavid Greenman 				}
917f6b04d2bSDavid Greenman 			}
918f6b04d2bSDavid Greenman 
9190d94caffSDavid Greenman 			/*
9200d94caffSDavid Greenman 			 * If a page is dirty, then it is either being washed
9210d94caffSDavid Greenman 			 * (but not yet cleaned) or it is still in the
9220d94caffSDavid Greenman 			 * laundry.  If it is still in the laundry, then we
923936524aaSMatthew Dillon 			 * start the cleaning operation.  maxlaunder nominally
924936524aaSMatthew Dillon 			 * counts I/O cost (seeks) rather then bytes.
925936524aaSMatthew Dillon 			 *
926936524aaSMatthew Dillon 			 * This operation may cluster, invalidating the 'next'
927936524aaSMatthew Dillon 			 * pointer.  To prevent an inordinate number of
928936524aaSMatthew Dillon 			 * restarts we use our marker to remember our place.
9290d94caffSDavid Greenman 			 */
930936524aaSMatthew Dillon 			s = splvm();
931936524aaSMatthew Dillon 			TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq);
932936524aaSMatthew Dillon 			splx(s);
933936524aaSMatthew Dillon 			if (vm_pageout_clean(m) != 0)
934936524aaSMatthew Dillon 				--maxlaunder;
935936524aaSMatthew Dillon 			s = splvm();
936936524aaSMatthew Dillon 			next = TAILQ_NEXT(&marker, pageq);
937936524aaSMatthew Dillon 			TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq);
938936524aaSMatthew Dillon 			splx(s);
939f2a2857bSKirk McKusick 			if (vp) {
940f6b04d2bSDavid Greenman 				vput(vp);
941f2a2857bSKirk McKusick 				vn_finished_write(mp);
942f2a2857bSKirk McKusick 			}
9430d94caffSDavid Greenman 		}
944df8bae1dSRodney W. Grimes 	}
94526f9a767SRodney W. Grimes 
946df8bae1dSRodney W. Grimes 	/*
947936524aaSMatthew Dillon 	 * If we were not able to meet our target, increase actcmp
948df8bae1dSRodney W. Grimes 	 */
9491c7c3c6aSMatthew Dillon 
950936524aaSMatthew Dillon 	if (vm_page_count_min()) {
951936524aaSMatthew Dillon 		if (vm_pageout_actcmp < ACT_MAX / 2)
952936524aaSMatthew Dillon 			vm_pageout_actcmp += ACT_ADVANCE;
953936524aaSMatthew Dillon 	} else {
954936524aaSMatthew Dillon 		if (vm_pageout_actcmp < ACT_DECLINE)
955936524aaSMatthew Dillon 			vm_pageout_actcmp = 0;
956936524aaSMatthew Dillon 		else
957936524aaSMatthew Dillon 			vm_pageout_actcmp -= ACT_DECLINE;
9581c7c3c6aSMatthew Dillon 	}
9591c7c3c6aSMatthew Dillon 
9601c7c3c6aSMatthew Dillon 	/*
961936524aaSMatthew Dillon 	 * Compute the number of pages we want to try to move from the
962936524aaSMatthew Dillon 	 * active queue to the inactive queue.
9631c7c3c6aSMatthew Dillon 	 */
9641c7c3c6aSMatthew Dillon 
965936524aaSMatthew Dillon 	page_shortage = vm_paging_target() +
966936524aaSMatthew Dillon 		cnt.v_inactive_target - cnt.v_inactive_count;
967b182ec9eSJohn Dyson 	page_shortage += addl_page_shortage;
968936524aaSMatthew Dillon 	page_shortage += vm_pageout_actcmp;
9691c7c3c6aSMatthew Dillon 
9701c7c3c6aSMatthew Dillon 	/*
971936524aaSMatthew Dillon 	 * Scan the active queue for things we can deactivate. We nominally
972936524aaSMatthew Dillon 	 * track the per-page activity counter and use it to locate
973936524aaSMatthew Dillon 	 * deactivation candidates.
9741c7c3c6aSMatthew Dillon 	 */
97526f9a767SRodney W. Grimes 
976b18bfc3dSJohn Dyson 	pcount = cnt.v_active_count;
977be72f788SAlan Cox 	m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl);
9781c7c3c6aSMatthew Dillon 
979b18bfc3dSJohn Dyson 	while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
980f35329acSJohn Dyson 
9817e006499SJohn Dyson 		/*
982956f3135SPhilippe Charnier 		 * This is a consistency check, and should likely be a panic
9837e006499SJohn Dyson 		 * or warning.
9847e006499SJohn Dyson 		 */
985f35329acSJohn Dyson 		if (m->queue != PQ_ACTIVE) {
98638efa82bSJohn Dyson 			break;
987f35329acSJohn Dyson 		}
988f35329acSJohn Dyson 
989b18bfc3dSJohn Dyson 		next = TAILQ_NEXT(m, pageq);
990df8bae1dSRodney W. Grimes 		/*
99126f9a767SRodney W. Grimes 		 * Don't deactivate pages that are busy.
992df8bae1dSRodney W. Grimes 		 */
993a647a309SDavid Greenman 		if ((m->busy != 0) ||
9940d94caffSDavid Greenman 		    (m->flags & PG_BUSY) ||
995f6b04d2bSDavid Greenman 		    (m->hold_count != 0)) {
996f35329acSJohn Dyson 			s = splvm();
997be72f788SAlan Cox 			TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
998be72f788SAlan Cox 			TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
999f35329acSJohn Dyson 			splx(s);
100026f9a767SRodney W. Grimes 			m = next;
100126f9a767SRodney W. Grimes 			continue;
1002df8bae1dSRodney W. Grimes 		}
1003b18bfc3dSJohn Dyson 
1004b18bfc3dSJohn Dyson 		/*
1005b18bfc3dSJohn Dyson 		 * The count for pagedaemon pages is done after checking the
1006956f3135SPhilippe Charnier 		 * page for eligibility...
1007b18bfc3dSJohn Dyson 		 */
1008b18bfc3dSJohn Dyson 		cnt.v_pdpages++;
1009ef743ce6SJohn Dyson 
10107e006499SJohn Dyson 		/*
10117e006499SJohn Dyson 		 * Check to see "how much" the page has been used.
10127e006499SJohn Dyson 		 */
10137e006499SJohn Dyson 		actcount = 0;
1014ef743ce6SJohn Dyson 		if (m->object->ref_count != 0) {
1015ef743ce6SJohn Dyson 			if (m->flags & PG_REFERENCED) {
10167e006499SJohn Dyson 				actcount += 1;
10170d94caffSDavid Greenman 			}
10180385347cSPeter Wemm 			actcount += pmap_ts_referenced(m);
10197e006499SJohn Dyson 			if (actcount) {
10207e006499SJohn Dyson 				m->act_count += ACT_ADVANCE + actcount;
102138efa82bSJohn Dyson 				if (m->act_count > ACT_MAX)
102238efa82bSJohn Dyson 					m->act_count = ACT_MAX;
102338efa82bSJohn Dyson 			}
1024b18bfc3dSJohn Dyson 		}
1025ef743ce6SJohn Dyson 
10267e006499SJohn Dyson 		/*
10277e006499SJohn Dyson 		 * Since we have "tested" this bit, we need to clear it now.
10287e006499SJohn Dyson 		 */
1029e69763a3SDoug Rabson 		vm_page_flag_clear(m, PG_REFERENCED);
1030ef743ce6SJohn Dyson 
10317e006499SJohn Dyson 		/*
10327e006499SJohn Dyson 		 * Only if an object is currently being used, do we use the
10337e006499SJohn Dyson 		 * page activation count stats.
10347e006499SJohn Dyson 		 */
10357e006499SJohn Dyson 		if (actcount && (m->object->ref_count != 0)) {
1036f35329acSJohn Dyson 			s = splvm();
1037be72f788SAlan Cox 			TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1038be72f788SAlan Cox 			TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1039f35329acSJohn Dyson 			splx(s);
104026f9a767SRodney W. Grimes 		} else {
104138efa82bSJohn Dyson 			m->act_count -= min(m->act_count, ACT_DECLINE);
104238efa82bSJohn Dyson 			if (vm_pageout_algorithm_lru ||
1043936524aaSMatthew Dillon 			    (m->object->ref_count == 0) ||
1044936524aaSMatthew Dillon 			    (m->act_count <= vm_pageout_actcmp)) {
1045925a3a41SJohn Dyson 				page_shortage--;
1046d4a272dbSJohn Dyson 				if (m->object->ref_count == 0) {
1047ef743ce6SJohn Dyson 					vm_page_protect(m, VM_PROT_NONE);
1048d4a272dbSJohn Dyson 					if (m->dirty == 0)
10490d94caffSDavid Greenman 						vm_page_cache(m);
1050d4a272dbSJohn Dyson 					else
1051d4a272dbSJohn Dyson 						vm_page_deactivate(m);
10520d94caffSDavid Greenman 				} else {
105326f9a767SRodney W. Grimes 					vm_page_deactivate(m);
1054df8bae1dSRodney W. Grimes 				}
105538efa82bSJohn Dyson 			} else {
105638efa82bSJohn Dyson 				s = splvm();
1057be72f788SAlan Cox 				TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1058be72f788SAlan Cox 				TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
105938efa82bSJohn Dyson 				splx(s);
106038efa82bSJohn Dyson 			}
1061df8bae1dSRodney W. Grimes 		}
106226f9a767SRodney W. Grimes 		m = next;
106326f9a767SRodney W. Grimes 	}
1064df8bae1dSRodney W. Grimes 
1065f35329acSJohn Dyson 	s = splvm();
10661c7c3c6aSMatthew Dillon 
1067df8bae1dSRodney W. Grimes 	/*
10680d94caffSDavid Greenman 	 * We try to maintain some *really* free pages, this allows interrupt
10691c7c3c6aSMatthew Dillon 	 * code to be guaranteed space.  Since both cache and free queues
10701c7c3c6aSMatthew Dillon 	 * are considered basically 'free', moving pages from cache to free
10711c7c3c6aSMatthew Dillon 	 * does not effect other calculations.
1072df8bae1dSRodney W. Grimes 	 */
10731c7c3c6aSMatthew Dillon 
1074a1f6d91cSDavid Greenman 	while (cnt.v_free_count < cnt.v_free_reserved) {
10755070c7f8SJohn Dyson 		static int cache_rover = 0;
1076faa273d5SMatthew Dillon 		m = vm_page_list_find(PQ_CACHE, cache_rover, FALSE);
10770d94caffSDavid Greenman 		if (!m)
10780d94caffSDavid Greenman 			break;
10798b03c8edSMatthew Dillon 		if ((m->flags & (PG_BUSY|PG_UNMANAGED)) ||
10808b03c8edSMatthew Dillon 		    m->busy ||
10818b03c8edSMatthew Dillon 		    m->hold_count ||
10828b03c8edSMatthew Dillon 		    m->wire_count) {
1083d044d7bfSMatthew Dillon #ifdef INVARIANTS
1084d044d7bfSMatthew Dillon 			printf("Warning: busy page %p found in cache\n", m);
1085d044d7bfSMatthew Dillon #endif
1086aaba53daSMatthew Dillon 			vm_page_deactivate(m);
1087aaba53daSMatthew Dillon 			continue;
1088aaba53daSMatthew Dillon 		}
10895070c7f8SJohn Dyson 		cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK;
1090925a3a41SJohn Dyson 		vm_pageout_page_free(m);
10910bb3a0d2SDavid Greenman 		cnt.v_dfree++;
109226f9a767SRodney W. Grimes 	}
1093f35329acSJohn Dyson 	splx(s);
10945663e6deSDavid Greenman 
1095ceb0cf87SJohn Dyson #if !defined(NO_SWAPPING)
1096ceb0cf87SJohn Dyson 	/*
1097ceb0cf87SJohn Dyson 	 * Idle process swapout -- run once per second.
1098ceb0cf87SJohn Dyson 	 */
1099ceb0cf87SJohn Dyson 	if (vm_swap_idle_enabled) {
1100ceb0cf87SJohn Dyson 		static long lsec;
1101227ee8a1SPoul-Henning Kamp 		if (time_second != lsec) {
1102ceb0cf87SJohn Dyson 			vm_pageout_req_swapout |= VM_SWAP_IDLE;
1103ceb0cf87SJohn Dyson 			vm_req_vmdaemon();
1104227ee8a1SPoul-Henning Kamp 			lsec = time_second;
1105ceb0cf87SJohn Dyson 		}
1106ceb0cf87SJohn Dyson 	}
1107ceb0cf87SJohn Dyson #endif
1108ceb0cf87SJohn Dyson 
11095663e6deSDavid Greenman 	/*
1110f6b04d2bSDavid Greenman 	 * If we didn't get enough free pages, and we have skipped a vnode
11114c1f8ee9SDavid Greenman 	 * in a writeable object, wakeup the sync daemon.  And kick swapout
11124c1f8ee9SDavid Greenman 	 * if we did not get enough free pages.
1113f6b04d2bSDavid Greenman 	 */
111490ecac61SMatthew Dillon 	if (vm_paging_target() > 0) {
111590ecac61SMatthew Dillon 		if (vnodes_skipped && vm_page_count_min())
1116d50c1994SPeter Wemm 			(void) speedup_syncer();
111738efa82bSJohn Dyson #if !defined(NO_SWAPPING)
111890ecac61SMatthew Dillon 		if (vm_swap_enabled && vm_page_count_target()) {
11194c1f8ee9SDavid Greenman 			vm_req_vmdaemon();
1120ceb0cf87SJohn Dyson 			vm_pageout_req_swapout |= VM_SWAP_NORMAL;
11214c1f8ee9SDavid Greenman 		}
11225afce282SDavid Greenman #endif
11234c1f8ee9SDavid Greenman 	}
11244c1f8ee9SDavid Greenman 
1125f6b04d2bSDavid Greenman 	/*
11260d94caffSDavid Greenman 	 * make sure that we have swap space -- if we are low on memory and
11270d94caffSDavid Greenman 	 * swap -- then kill the biggest process.
11285663e6deSDavid Greenman 	 */
1129936524aaSMatthew Dillon 	if ((vm_swap_size < 64 || swap_pager_full) && vm_page_count_min()) {
11305663e6deSDavid Greenman 		bigproc = NULL;
11315663e6deSDavid Greenman 		bigsize = 0;
11321b67ec6dSJeffrey Hsu 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
11335663e6deSDavid Greenman 			/*
11345663e6deSDavid Greenman 			 * if this is a system process, skip it
11355663e6deSDavid Greenman 			 */
1136c8da68e9SPeter Wemm 			if ((p->p_flag & P_SYSTEM) || (p->p_lock > 0) ||
1137c8da68e9SPeter Wemm 			    (p->p_pid == 1) ||
113879221631SDavid Greenman 			    ((p->p_pid < 48) && (vm_swap_size != 0))) {
11395663e6deSDavid Greenman 				continue;
11405663e6deSDavid Greenman 			}
11415663e6deSDavid Greenman 			/*
11425663e6deSDavid Greenman 			 * if the process is in a non-running type state,
11435663e6deSDavid Greenman 			 * don't touch it.
11445663e6deSDavid Greenman 			 */
11455663e6deSDavid Greenman 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
11465663e6deSDavid Greenman 				continue;
11475663e6deSDavid Greenman 			}
11485663e6deSDavid Greenman 			/*
11495663e6deSDavid Greenman 			 * get the process size
11505663e6deSDavid Greenman 			 */
1151b1028ad1SLuoqi Chen 			size = vmspace_resident_count(p->p_vmspace);
11525663e6deSDavid Greenman 			/*
11535663e6deSDavid Greenman 			 * if the this process is bigger than the biggest one
11545663e6deSDavid Greenman 			 * remember it.
11555663e6deSDavid Greenman 			 */
11565663e6deSDavid Greenman 			if (size > bigsize) {
11575663e6deSDavid Greenman 				bigproc = p;
11585663e6deSDavid Greenman 				bigsize = size;
11595663e6deSDavid Greenman 			}
11605663e6deSDavid Greenman 		}
11615663e6deSDavid Greenman 		if (bigproc != NULL) {
1162729b1e51SDavid Greenman 			killproc(bigproc, "out of swap space");
11635663e6deSDavid Greenman 			bigproc->p_estcpu = 0;
11645663e6deSDavid Greenman 			bigproc->p_nice = PRIO_MIN;
11655663e6deSDavid Greenman 			resetpriority(bigproc);
116624a1cce3SDavid Greenman 			wakeup(&cnt.v_free_count);
11675663e6deSDavid Greenman 		}
11685663e6deSDavid Greenman 	}
116926f9a767SRodney W. Grimes 	return force_wakeup;
117026f9a767SRodney W. Grimes }
117126f9a767SRodney W. Grimes 
1172dc2efb27SJohn Dyson /*
1173dc2efb27SJohn Dyson  * This routine tries to maintain the pseudo LRU active queue,
1174dc2efb27SJohn Dyson  * so that during long periods of time where there is no paging,
1175956f3135SPhilippe Charnier  * that some statistic accumulation still occurs.  This code
1176dc2efb27SJohn Dyson  * helps the situation where paging just starts to occur.
1177dc2efb27SJohn Dyson  */
1178dc2efb27SJohn Dyson static void
1179dc2efb27SJohn Dyson vm_pageout_page_stats()
1180dc2efb27SJohn Dyson {
1181dc2efb27SJohn Dyson 	int s;
1182dc2efb27SJohn Dyson 	vm_page_t m,next;
1183dc2efb27SJohn Dyson 	int pcount,tpcount;		/* Number of pages to check */
1184dc2efb27SJohn Dyson 	static int fullintervalcount = 0;
1185bef608bdSJohn Dyson 	int page_shortage;
118625db2c54SMatthew Dillon 	int s0;
1187bef608bdSJohn Dyson 
118890ecac61SMatthew Dillon 	page_shortage =
118990ecac61SMatthew Dillon 	    (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) -
1190bef608bdSJohn Dyson 	    (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
119190ecac61SMatthew Dillon 
1192bef608bdSJohn Dyson 	if (page_shortage <= 0)
1193bef608bdSJohn Dyson 		return;
1194dc2efb27SJohn Dyson 
119525db2c54SMatthew Dillon 	s0 = splvm();
119625db2c54SMatthew Dillon 
1197dc2efb27SJohn Dyson 	pcount = cnt.v_active_count;
1198dc2efb27SJohn Dyson 	fullintervalcount += vm_pageout_stats_interval;
1199dc2efb27SJohn Dyson 	if (fullintervalcount < vm_pageout_full_stats_interval) {
1200dc2efb27SJohn Dyson 		tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count;
1201dc2efb27SJohn Dyson 		if (pcount > tpcount)
1202dc2efb27SJohn Dyson 			pcount = tpcount;
1203883f3caaSMatthew Dillon 	} else {
1204883f3caaSMatthew Dillon 		fullintervalcount = 0;
1205dc2efb27SJohn Dyson 	}
1206dc2efb27SJohn Dyson 
1207be72f788SAlan Cox 	m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl);
1208dc2efb27SJohn Dyson 	while ((m != NULL) && (pcount-- > 0)) {
12097e006499SJohn Dyson 		int actcount;
1210dc2efb27SJohn Dyson 
1211dc2efb27SJohn Dyson 		if (m->queue != PQ_ACTIVE) {
1212dc2efb27SJohn Dyson 			break;
1213dc2efb27SJohn Dyson 		}
1214dc2efb27SJohn Dyson 
1215dc2efb27SJohn Dyson 		next = TAILQ_NEXT(m, pageq);
1216dc2efb27SJohn Dyson 		/*
1217dc2efb27SJohn Dyson 		 * Don't deactivate pages that are busy.
1218dc2efb27SJohn Dyson 		 */
1219dc2efb27SJohn Dyson 		if ((m->busy != 0) ||
1220dc2efb27SJohn Dyson 		    (m->flags & PG_BUSY) ||
1221dc2efb27SJohn Dyson 		    (m->hold_count != 0)) {
1222dc2efb27SJohn Dyson 			s = splvm();
1223be72f788SAlan Cox 			TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1224be72f788SAlan Cox 			TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1225dc2efb27SJohn Dyson 			splx(s);
1226dc2efb27SJohn Dyson 			m = next;
1227dc2efb27SJohn Dyson 			continue;
1228dc2efb27SJohn Dyson 		}
1229dc2efb27SJohn Dyson 
12307e006499SJohn Dyson 		actcount = 0;
1231dc2efb27SJohn Dyson 		if (m->flags & PG_REFERENCED) {
1232e69763a3SDoug Rabson 			vm_page_flag_clear(m, PG_REFERENCED);
12337e006499SJohn Dyson 			actcount += 1;
1234dc2efb27SJohn Dyson 		}
1235dc2efb27SJohn Dyson 
12360385347cSPeter Wemm 		actcount += pmap_ts_referenced(m);
12377e006499SJohn Dyson 		if (actcount) {
12387e006499SJohn Dyson 			m->act_count += ACT_ADVANCE + actcount;
1239dc2efb27SJohn Dyson 			if (m->act_count > ACT_MAX)
1240dc2efb27SJohn Dyson 				m->act_count = ACT_MAX;
1241dc2efb27SJohn Dyson 			s = splvm();
1242be72f788SAlan Cox 			TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1243be72f788SAlan Cox 			TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1244dc2efb27SJohn Dyson 			splx(s);
1245dc2efb27SJohn Dyson 		} else {
1246dc2efb27SJohn Dyson 			if (m->act_count == 0) {
12477e006499SJohn Dyson 				/*
12487e006499SJohn Dyson 				 * We turn off page access, so that we have more accurate
12497e006499SJohn Dyson 				 * RSS stats.  We don't do this in the normal page deactivation
12507e006499SJohn Dyson 				 * when the system is loaded VM wise, because the cost of
12517e006499SJohn Dyson 				 * the large number of page protect operations would be higher
12527e006499SJohn Dyson 				 * than the value of doing the operation.
12537e006499SJohn Dyson 				 */
1254dc2efb27SJohn Dyson 				vm_page_protect(m, VM_PROT_NONE);
1255dc2efb27SJohn Dyson 				vm_page_deactivate(m);
1256dc2efb27SJohn Dyson 			} else {
1257dc2efb27SJohn Dyson 				m->act_count -= min(m->act_count, ACT_DECLINE);
1258dc2efb27SJohn Dyson 				s = splvm();
1259be72f788SAlan Cox 				TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1260be72f788SAlan Cox 				TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1261dc2efb27SJohn Dyson 				splx(s);
1262dc2efb27SJohn Dyson 			}
1263dc2efb27SJohn Dyson 		}
1264dc2efb27SJohn Dyson 
1265dc2efb27SJohn Dyson 		m = next;
1266dc2efb27SJohn Dyson 	}
126725db2c54SMatthew Dillon 	splx(s0);
1268dc2efb27SJohn Dyson }
1269dc2efb27SJohn Dyson 
1270b182ec9eSJohn Dyson static int
1271b182ec9eSJohn Dyson vm_pageout_free_page_calc(count)
1272b182ec9eSJohn Dyson vm_size_t count;
1273b182ec9eSJohn Dyson {
1274b182ec9eSJohn Dyson 	if (count < cnt.v_page_count)
1275b182ec9eSJohn Dyson 		 return 0;
1276b182ec9eSJohn Dyson 	/*
1277b182ec9eSJohn Dyson 	 * free_reserved needs to include enough for the largest swap pager
1278b182ec9eSJohn Dyson 	 * structures plus enough for any pv_entry structs when paging.
1279b182ec9eSJohn Dyson 	 */
1280b182ec9eSJohn Dyson 	if (cnt.v_page_count > 1024)
1281b182ec9eSJohn Dyson 		cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
1282b182ec9eSJohn Dyson 	else
1283b182ec9eSJohn Dyson 		cnt.v_free_min = 4;
1284f35329acSJohn Dyson 	cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
1285f35329acSJohn Dyson 		cnt.v_interrupt_free_min;
1286f35329acSJohn Dyson 	cnt.v_free_reserved = vm_pageout_page_count +
1287a15403deSJohn Dyson 		cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE;
128890ecac61SMatthew Dillon 	cnt.v_free_severe = cnt.v_free_min / 2;
1289a2f4a846SJohn Dyson 	cnt.v_free_min += cnt.v_free_reserved;
129090ecac61SMatthew Dillon 	cnt.v_free_severe += cnt.v_free_reserved;
1291b182ec9eSJohn Dyson 	return 1;
1292b182ec9eSJohn Dyson }
1293b182ec9eSJohn Dyson 
1294b182ec9eSJohn Dyson 
1295df8bae1dSRodney W. Grimes /*
1296df8bae1dSRodney W. Grimes  *	vm_pageout is the high level pageout daemon.
1297df8bae1dSRodney W. Grimes  */
12982b14f991SJulian Elischer static void
129926f9a767SRodney W. Grimes vm_pageout()
1300df8bae1dSRodney W. Grimes {
13010384fff8SJason Evans 
13020384fff8SJason Evans 	mtx_enter(&Giant, MTX_DEF);
13030384fff8SJason Evans 
1304df8bae1dSRodney W. Grimes 	/*
1305df8bae1dSRodney W. Grimes 	 * Initialize some paging parameters.
1306df8bae1dSRodney W. Grimes 	 */
1307df8bae1dSRodney W. Grimes 
1308f6b04d2bSDavid Greenman 	cnt.v_interrupt_free_min = 2;
1309f35329acSJohn Dyson 	if (cnt.v_page_count < 2000)
1310f35329acSJohn Dyson 		vm_pageout_page_count = 8;
1311f6b04d2bSDavid Greenman 
1312b182ec9eSJohn Dyson 	vm_pageout_free_page_calc(cnt.v_page_count);
1313ed74321bSDavid Greenman 	/*
13140d94caffSDavid Greenman 	 * free_reserved needs to include enough for the largest swap pager
13150d94caffSDavid Greenman 	 * structures plus enough for any pv_entry structs when paging.
1316ed74321bSDavid Greenman 	 */
1317a15403deSJohn Dyson 	if (cnt.v_free_count > 6144)
13180d94caffSDavid Greenman 		cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved;
1319a15403deSJohn Dyson 	else
1320a15403deSJohn Dyson 		cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved;
13216f2b142eSDavid Greenman 
1322a15403deSJohn Dyson 	if (cnt.v_free_count > 2048) {
1323a15403deSJohn Dyson 		cnt.v_cache_min = cnt.v_free_target;
1324a15403deSJohn Dyson 		cnt.v_cache_max = 2 * cnt.v_cache_min;
1325a15403deSJohn Dyson 		cnt.v_inactive_target = (3 * cnt.v_free_target) / 2;
13260d94caffSDavid Greenman 	} else {
13270d94caffSDavid Greenman 		cnt.v_cache_min = 0;
13280d94caffSDavid Greenman 		cnt.v_cache_max = 0;
13296f2b142eSDavid Greenman 		cnt.v_inactive_target = cnt.v_free_count / 4;
13300d94caffSDavid Greenman 	}
1331e47ed70bSJohn Dyson 	if (cnt.v_inactive_target > cnt.v_free_count / 3)
1332e47ed70bSJohn Dyson 		cnt.v_inactive_target = cnt.v_free_count / 3;
1333df8bae1dSRodney W. Grimes 
1334df8bae1dSRodney W. Grimes 	/* XXX does not really belong here */
1335df8bae1dSRodney W. Grimes 	if (vm_page_max_wired == 0)
1336df8bae1dSRodney W. Grimes 		vm_page_max_wired = cnt.v_free_count / 3;
1337df8bae1dSRodney W. Grimes 
1338dc2efb27SJohn Dyson 	if (vm_pageout_stats_max == 0)
1339dc2efb27SJohn Dyson 		vm_pageout_stats_max = cnt.v_free_target;
1340dc2efb27SJohn Dyson 
1341dc2efb27SJohn Dyson 	/*
1342dc2efb27SJohn Dyson 	 * Set interval in seconds for stats scan.
1343dc2efb27SJohn Dyson 	 */
1344dc2efb27SJohn Dyson 	if (vm_pageout_stats_interval == 0)
1345bef608bdSJohn Dyson 		vm_pageout_stats_interval = 5;
1346dc2efb27SJohn Dyson 	if (vm_pageout_full_stats_interval == 0)
1347dc2efb27SJohn Dyson 		vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1348dc2efb27SJohn Dyson 
1349dc2efb27SJohn Dyson 
1350dc2efb27SJohn Dyson 	/*
1351dc2efb27SJohn Dyson 	 * Set maximum free per pass
1352dc2efb27SJohn Dyson 	 */
1353dc2efb27SJohn Dyson 	if (vm_pageout_stats_free_max == 0)
1354bef608bdSJohn Dyson 		vm_pageout_stats_free_max = 5;
1355dc2efb27SJohn Dyson 
1356ceb0cf87SJohn Dyson 	max_page_launder = (cnt.v_page_count > 1800 ? 32 : 16);
135726f9a767SRodney W. Grimes 
1358e929c00dSKirk McKusick 	curproc->p_flag |= P_BUFEXHAUST;
135924a1cce3SDavid Greenman 	swap_pager_swap_init();
1360df8bae1dSRodney W. Grimes 	/*
13610d94caffSDavid Greenman 	 * The pageout daemon is never done, so loop forever.
1362df8bae1dSRodney W. Grimes 	 */
1363df8bae1dSRodney W. Grimes 	while (TRUE) {
1364dc2efb27SJohn Dyson 		int error;
1365b18bfc3dSJohn Dyson 		int s = splvm();
136690ecac61SMatthew Dillon 
1367936524aaSMatthew Dillon 		/*
1368936524aaSMatthew Dillon 		 * If we have enough free memory, wakeup waiters.  Do
1369936524aaSMatthew Dillon 		 * not clear vm_pages_needed until we reach our target,
1370936524aaSMatthew Dillon 		 * otherwise we may be woken up over and over again and
1371936524aaSMatthew Dillon 		 * waste a lot of cpu.
1372936524aaSMatthew Dillon 		 */
1373936524aaSMatthew Dillon 		if (vm_pages_needed && !vm_page_count_min()) {
1374936524aaSMatthew Dillon 			if (vm_paging_needed() <= 0)
1375936524aaSMatthew Dillon 				vm_pages_needed = 0;
1376936524aaSMatthew Dillon 			wakeup(&cnt.v_free_count);
1377936524aaSMatthew Dillon 		}
1378936524aaSMatthew Dillon 		if (vm_pages_needed) {
137990ecac61SMatthew Dillon 			/*
138090ecac61SMatthew Dillon 			 * Still not done, sleep a bit and go again
138190ecac61SMatthew Dillon 			 */
138290ecac61SMatthew Dillon 			tsleep(&vm_pages_needed, PVM, "psleep", hz/2);
138390ecac61SMatthew Dillon 		} else {
138490ecac61SMatthew Dillon 			/*
138590ecac61SMatthew Dillon 			 * Good enough, sleep & handle stats
138690ecac61SMatthew Dillon 			 */
1387dc2efb27SJohn Dyson 			error = tsleep(&vm_pages_needed,
1388dc2efb27SJohn Dyson 				PVM, "psleep", vm_pageout_stats_interval * hz);
1389dc2efb27SJohn Dyson 			if (error && !vm_pages_needed) {
1390936524aaSMatthew Dillon 				if (vm_pageout_actcmp > 0)
1391936524aaSMatthew Dillon 					--vm_pageout_actcmp;
1392dc2efb27SJohn Dyson 				splx(s);
1393dc2efb27SJohn Dyson 				vm_pageout_page_stats();
1394dc2efb27SJohn Dyson 				continue;
1395dc2efb27SJohn Dyson 			}
1396f919ebdeSDavid Greenman 		}
1397e47ed70bSJohn Dyson 
1398b18bfc3dSJohn Dyson 		if (vm_pages_needed)
1399b18bfc3dSJohn Dyson 			cnt.v_pdwakeups++;
1400f919ebdeSDavid Greenman 		splx(s);
14010d94caffSDavid Greenman 		vm_pageout_scan();
14022d8acc0fSJohn Dyson 		vm_pageout_deficit = 0;
1403df8bae1dSRodney W. Grimes 	}
1404df8bae1dSRodney W. Grimes }
140526f9a767SRodney W. Grimes 
1406e0c5a895SJohn Dyson void
1407e0c5a895SJohn Dyson pagedaemon_wakeup()
1408e0c5a895SJohn Dyson {
1409e0c5a895SJohn Dyson 	if (!vm_pages_needed && curproc != pageproc) {
1410e0c5a895SJohn Dyson 		vm_pages_needed++;
1411e0c5a895SJohn Dyson 		wakeup(&vm_pages_needed);
1412e0c5a895SJohn Dyson 	}
1413e0c5a895SJohn Dyson }
1414e0c5a895SJohn Dyson 
141538efa82bSJohn Dyson #if !defined(NO_SWAPPING)
14165afce282SDavid Greenman static void
14175afce282SDavid Greenman vm_req_vmdaemon()
14185afce282SDavid Greenman {
14195afce282SDavid Greenman 	static int lastrun = 0;
14205afce282SDavid Greenman 
1421b18bfc3dSJohn Dyson 	if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
14225afce282SDavid Greenman 		wakeup(&vm_daemon_needed);
14235afce282SDavid Greenman 		lastrun = ticks;
14245afce282SDavid Greenman 	}
14255afce282SDavid Greenman }
14265afce282SDavid Greenman 
14272b14f991SJulian Elischer static void
14284f9fb771SBruce Evans vm_daemon()
14290d94caffSDavid Greenman {
14302fe6e4d7SDavid Greenman 	struct proc *p;
14310d94caffSDavid Greenman 
14320384fff8SJason Evans 	mtx_enter(&Giant, MTX_DEF);
14330384fff8SJason Evans 
14342fe6e4d7SDavid Greenman 	while (TRUE) {
1435e8f36785SJohn Dyson 		tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0);
14364c1f8ee9SDavid Greenman 		if (vm_pageout_req_swapout) {
1437ceb0cf87SJohn Dyson 			swapout_procs(vm_pageout_req_swapout);
14384c1f8ee9SDavid Greenman 			vm_pageout_req_swapout = 0;
14394c1f8ee9SDavid Greenman 		}
14402fe6e4d7SDavid Greenman 		/*
14410d94caffSDavid Greenman 		 * scan the processes for exceeding their rlimits or if
14420d94caffSDavid Greenman 		 * process is swapped out -- deactivate pages
14432fe6e4d7SDavid Greenman 		 */
14442fe6e4d7SDavid Greenman 
14451b67ec6dSJeffrey Hsu 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
1446fe2144fdSLuoqi Chen 			vm_pindex_t limit, size;
14472fe6e4d7SDavid Greenman 
14482fe6e4d7SDavid Greenman 			/*
14492fe6e4d7SDavid Greenman 			 * if this is a system process or if we have already
14502fe6e4d7SDavid Greenman 			 * looked at this process, skip it.
14512fe6e4d7SDavid Greenman 			 */
14522fe6e4d7SDavid Greenman 			if (p->p_flag & (P_SYSTEM | P_WEXIT)) {
14532fe6e4d7SDavid Greenman 				continue;
14542fe6e4d7SDavid Greenman 			}
14552fe6e4d7SDavid Greenman 			/*
14562fe6e4d7SDavid Greenman 			 * if the process is in a non-running type state,
14572fe6e4d7SDavid Greenman 			 * don't touch it.
14582fe6e4d7SDavid Greenman 			 */
14592fe6e4d7SDavid Greenman 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
14602fe6e4d7SDavid Greenman 				continue;
14612fe6e4d7SDavid Greenman 			}
14622fe6e4d7SDavid Greenman 			/*
14632fe6e4d7SDavid Greenman 			 * get a limit
14642fe6e4d7SDavid Greenman 			 */
1465fe2144fdSLuoqi Chen 			limit = OFF_TO_IDX(
1466fe2144fdSLuoqi Chen 			    qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
1467fe2144fdSLuoqi Chen 				p->p_rlimit[RLIMIT_RSS].rlim_max));
14682fe6e4d7SDavid Greenman 
14692fe6e4d7SDavid Greenman 			/*
14700d94caffSDavid Greenman 			 * let processes that are swapped out really be
14710d94caffSDavid Greenman 			 * swapped out set the limit to nothing (will force a
14720d94caffSDavid Greenman 			 * swap-out.)
14732fe6e4d7SDavid Greenman 			 */
14742fe6e4d7SDavid Greenman 			if ((p->p_flag & P_INMEM) == 0)
14750d94caffSDavid Greenman 				limit = 0;	/* XXX */
14762fe6e4d7SDavid Greenman 
1477fe2144fdSLuoqi Chen 			size = vmspace_resident_count(p->p_vmspace);
14782fe6e4d7SDavid Greenman 			if (limit >= 0 && size >= limit) {
1479fe2144fdSLuoqi Chen 				vm_pageout_map_deactivate_pages(
1480fe2144fdSLuoqi Chen 				    &p->p_vmspace->vm_map, limit);
14812fe6e4d7SDavid Greenman 			}
14822fe6e4d7SDavid Greenman 		}
148324a1cce3SDavid Greenman 	}
14842fe6e4d7SDavid Greenman }
148538efa82bSJohn Dyson #endif
1486