xref: /freebsd/sys/vm/vm_pageout.c (revision 9c8b8baa38c9a8135d7602f127cb0c735010837d)
1df8bae1dSRodney W. Grimes /*
226f9a767SRodney W. Grimes  * Copyright (c) 1991 Regents of the University of California.
326f9a767SRodney W. Grimes  * All rights reserved.
426f9a767SRodney W. Grimes  * Copyright (c) 1994 John S. Dyson
526f9a767SRodney W. Grimes  * All rights reserved.
626f9a767SRodney W. Grimes  * Copyright (c) 1994 David Greenman
726f9a767SRodney W. Grimes  * All rights reserved.
8df8bae1dSRodney W. Grimes  *
9df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
10df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
11df8bae1dSRodney W. Grimes  *
12df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
13df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
14df8bae1dSRodney W. Grimes  * are met:
15df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
17df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
18df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
19df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
20df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
21df8bae1dSRodney W. Grimes  *    must display the following acknowledgement:
22df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
23df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
24df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
25df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
26df8bae1dSRodney W. Grimes  *    without specific prior written permission.
27df8bae1dSRodney W. Grimes  *
28df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
39df8bae1dSRodney W. Grimes  *
403c4dd356SDavid Greenman  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
41df8bae1dSRodney W. Grimes  *
42df8bae1dSRodney W. Grimes  *
43df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
44df8bae1dSRodney W. Grimes  * All rights reserved.
45df8bae1dSRodney W. Grimes  *
46df8bae1dSRodney W. Grimes  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
47df8bae1dSRodney W. Grimes  *
48df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
49df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
50df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
51df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
52df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
53df8bae1dSRodney W. Grimes  *
54df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57df8bae1dSRodney W. Grimes  *
58df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
59df8bae1dSRodney W. Grimes  *
60df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61df8bae1dSRodney W. Grimes  *  School of Computer Science
62df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
63df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
64df8bae1dSRodney W. Grimes  *
65df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
66df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
6726f9a767SRodney W. Grimes  *
689c8b8baaSPeter Wemm  * $Id: vm_pageout.c,v 1.142 1999/06/26 14:56:58 peter Exp $
69df8bae1dSRodney W. Grimes  */
70df8bae1dSRodney W. Grimes 
71df8bae1dSRodney W. Grimes /*
72df8bae1dSRodney W. Grimes  *	The proverbial page-out daemon.
73df8bae1dSRodney W. Grimes  */
74df8bae1dSRodney W. Grimes 
75faa5f8d8SAndrzej Bialecki #include "opt_vm.h"
76df8bae1dSRodney W. Grimes #include <sys/param.h>
7726f9a767SRodney W. Grimes #include <sys/systm.h>
78b5e8ce9fSBruce Evans #include <sys/kernel.h>
7926f9a767SRodney W. Grimes #include <sys/proc.h>
809c8b8baaSPeter Wemm #include <sys/kthread.h>
8126f9a767SRodney W. Grimes #include <sys/resourcevar.h>
82d2fc5315SPoul-Henning Kamp #include <sys/signalvar.h>
83f6b04d2bSDavid Greenman #include <sys/vnode.h>
84efeaf95aSDavid Greenman #include <sys/vmmeter.h>
8538efa82bSJohn Dyson #include <sys/sysctl.h>
86df8bae1dSRodney W. Grimes 
87df8bae1dSRodney W. Grimes #include <vm/vm.h>
88efeaf95aSDavid Greenman #include <vm/vm_param.h>
89efeaf95aSDavid Greenman #include <vm/vm_prot.h>
90996c772fSJohn Dyson #include <sys/lock.h>
91efeaf95aSDavid Greenman #include <vm/vm_object.h>
92df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
93efeaf95aSDavid Greenman #include <vm/vm_map.h>
94df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h>
9524a1cce3SDavid Greenman #include <vm/vm_pager.h>
9605f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h>
97efeaf95aSDavid Greenman #include <vm/vm_extern.h>
98df8bae1dSRodney W. Grimes 
992b14f991SJulian Elischer /*
1002b14f991SJulian Elischer  * System initialization
1012b14f991SJulian Elischer  */
1022b14f991SJulian Elischer 
1032b14f991SJulian Elischer /* the kernel process "vm_pageout"*/
1042b14f991SJulian Elischer static void vm_pageout __P((void));
1058f9110f6SJohn Dyson static int vm_pageout_clean __P((vm_page_t));
1063af76890SPoul-Henning Kamp static int vm_pageout_scan __P((void));
107f35329acSJohn Dyson static int vm_pageout_free_page_calc __P((vm_size_t count));
1082b14f991SJulian Elischer struct proc *pageproc;
1092b14f991SJulian Elischer 
1102b14f991SJulian Elischer static struct kproc_desc page_kp = {
1112b14f991SJulian Elischer 	"pagedaemon",
1122b14f991SJulian Elischer 	vm_pageout,
1132b14f991SJulian Elischer 	&pageproc
1142b14f991SJulian Elischer };
1159c8b8baaSPeter Wemm SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp)
1162b14f991SJulian Elischer 
11738efa82bSJohn Dyson #if !defined(NO_SWAPPING)
1182b14f991SJulian Elischer /* the kernel process "vm_daemon"*/
1192b14f991SJulian Elischer static void vm_daemon __P((void));
120f708ef1bSPoul-Henning Kamp static struct	proc *vmproc;
1212b14f991SJulian Elischer 
1222b14f991SJulian Elischer static struct kproc_desc vm_kp = {
1232b14f991SJulian Elischer 	"vmdaemon",
1242b14f991SJulian Elischer 	vm_daemon,
1252b14f991SJulian Elischer 	&vmproc
1262b14f991SJulian Elischer };
1279c8b8baaSPeter Wemm SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
12838efa82bSJohn Dyson #endif
1292b14f991SJulian Elischer 
1302b14f991SJulian Elischer 
1312d8acc0fSJohn Dyson int vm_pages_needed=0;		/* Event on which pageout daemon sleeps */
1322d8acc0fSJohn Dyson int vm_pageout_deficit=0;	/* Estimated number of pages deficit */
1332d8acc0fSJohn Dyson int vm_pageout_pages_needed=0;	/* flag saying that the pageout daemon needs pages */
13426f9a767SRodney W. Grimes 
13526f9a767SRodney W. Grimes extern int npendingio;
13638efa82bSJohn Dyson #if !defined(NO_SWAPPING)
137f708ef1bSPoul-Henning Kamp static int vm_pageout_req_swapout;	/* XXX */
138f708ef1bSPoul-Henning Kamp static int vm_daemon_needed;
13938efa82bSJohn Dyson #endif
14026f9a767SRodney W. Grimes extern int nswiodone;
1415663e6deSDavid Greenman extern int vm_swap_size;
142303b270bSEivind Eklund static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
143303b270bSEivind Eklund static int vm_pageout_full_stats_interval = 0;
144303b270bSEivind Eklund static int vm_pageout_stats_free_max=0, vm_pageout_algorithm_lru=0;
145303b270bSEivind Eklund static int defer_swap_pageouts=0;
146303b270bSEivind Eklund static int disable_swap_pageouts=0;
14770111b90SJohn Dyson 
148303b270bSEivind Eklund static int max_page_launder=100;
14938efa82bSJohn Dyson #if defined(NO_SWAPPING)
150303b270bSEivind Eklund static int vm_swap_enabled=0;
151303b270bSEivind Eklund static int vm_swap_idle_enabled=0;
15238efa82bSJohn Dyson #else
153303b270bSEivind Eklund static int vm_swap_enabled=1;
154303b270bSEivind Eklund static int vm_swap_idle_enabled=0;
15538efa82bSJohn Dyson #endif
15638efa82bSJohn Dyson 
15738efa82bSJohn Dyson SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
158b0359e2cSPeter Wemm 	CTLFLAG_RW, &vm_pageout_algorithm_lru, 0, "LRU page mgmt");
15938efa82bSJohn Dyson 
160dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
161b0359e2cSPeter Wemm 	CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
162dc2efb27SJohn Dyson 
163dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
164b0359e2cSPeter Wemm 	CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
165dc2efb27SJohn Dyson 
166dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
167b0359e2cSPeter Wemm 	CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
168dc2efb27SJohn Dyson 
169dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
170b0359e2cSPeter Wemm 	CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
171dc2efb27SJohn Dyson 
17238efa82bSJohn Dyson #if defined(NO_SWAPPING)
173ceb0cf87SJohn Dyson SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
174ceb0cf87SJohn Dyson 	CTLFLAG_RD, &vm_swap_enabled, 0, "");
175ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
176ceb0cf87SJohn Dyson 	CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
17738efa82bSJohn Dyson #else
178ceb0cf87SJohn Dyson SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
179b0359e2cSPeter Wemm 	CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
180ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
181b0359e2cSPeter Wemm 	CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
18238efa82bSJohn Dyson #endif
18326f9a767SRodney W. Grimes 
184ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
185b0359e2cSPeter Wemm 	CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
18612ac6a1dSJohn Dyson 
187ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
188b0359e2cSPeter Wemm 	CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
18912ac6a1dSJohn Dyson 
190ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, max_page_launder,
191b0359e2cSPeter Wemm 	CTLFLAG_RW, &max_page_launder, 0, "Maximum number of pages to clean per pass");
19270111b90SJohn Dyson 
19326f9a767SRodney W. Grimes 
194ffc82b0aSJohn Dyson #define VM_PAGEOUT_PAGE_COUNT 16
195bbc0ec52SDavid Greenman int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
196df8bae1dSRodney W. Grimes 
197c3cb3e12SDavid Greenman int vm_page_max_wired;		/* XXX max # of wired pages system-wide */
198df8bae1dSRodney W. Grimes 
19938efa82bSJohn Dyson #if !defined(NO_SWAPPING)
20038efa82bSJohn Dyson typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int));
20138efa82bSJohn Dyson static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t));
202cd41fc12SDavid Greenman static freeer_fcn_t vm_pageout_object_deactivate_pages;
203cd41fc12SDavid Greenman static void vm_req_vmdaemon __P((void));
20438efa82bSJohn Dyson #endif
205dc2efb27SJohn Dyson static void vm_pageout_page_stats(void);
206cd41fc12SDavid Greenman 
20726f9a767SRodney W. Grimes /*
20826f9a767SRodney W. Grimes  * vm_pageout_clean:
20924a1cce3SDavid Greenman  *
2100d94caffSDavid Greenman  * Clean the page and remove it from the laundry.
21126f9a767SRodney W. Grimes  *
2120d94caffSDavid Greenman  * We set the busy bit to cause potential page faults on this page to
2131c7c3c6aSMatthew Dillon  * block.  Note the careful timing, however, the busy bit isn't set till
2141c7c3c6aSMatthew Dillon  * late and we cannot do anything that will mess with the page.
21526f9a767SRodney W. Grimes  */
2161c7c3c6aSMatthew Dillon 
2173af76890SPoul-Henning Kamp static int
2188f9110f6SJohn Dyson vm_pageout_clean(m)
21924a1cce3SDavid Greenman 	vm_page_t m;
22024a1cce3SDavid Greenman {
22126f9a767SRodney W. Grimes 	register vm_object_t object;
222f35329acSJohn Dyson 	vm_page_t mc[2*vm_pageout_page_count];
22324a1cce3SDavid Greenman 	int pageout_count;
22424a1cce3SDavid Greenman 	int i, forward_okay, backward_okay, page_base;
225a316d390SJohn Dyson 	vm_pindex_t pindex = m->pindex;
22626f9a767SRodney W. Grimes 
22726f9a767SRodney W. Grimes 	object = m->object;
22824a1cce3SDavid Greenman 
22926f9a767SRodney W. Grimes 	/*
2301c7c3c6aSMatthew Dillon 	 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
2311c7c3c6aSMatthew Dillon 	 * with the new swapper, but we could have serious problems paging
2321c7c3c6aSMatthew Dillon 	 * out other object types if there is insufficient memory.
2331c7c3c6aSMatthew Dillon 	 *
2341c7c3c6aSMatthew Dillon 	 * Unfortunately, checking free memory here is far too late, so the
2351c7c3c6aSMatthew Dillon 	 * check has been moved up a procedural level.
2361c7c3c6aSMatthew Dillon 	 */
2371c7c3c6aSMatthew Dillon 
2381c7c3c6aSMatthew Dillon #if 0
2391c7c3c6aSMatthew Dillon 	/*
24024a1cce3SDavid Greenman 	 * If not OBJT_SWAP, additional memory may be needed to do the pageout.
24124a1cce3SDavid Greenman 	 * Try to avoid the deadlock.
24226f9a767SRodney W. Grimes 	 */
2438f9110f6SJohn Dyson 	if ((object->type == OBJT_DEFAULT) &&
24424a1cce3SDavid Greenman 	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min))
24526f9a767SRodney W. Grimes 		return 0;
2461c7c3c6aSMatthew Dillon #endif
24726f9a767SRodney W. Grimes 
24824a1cce3SDavid Greenman 	/*
24924a1cce3SDavid Greenman 	 * Don't mess with the page if it's busy.
25024a1cce3SDavid Greenman 	 */
2518f9110f6SJohn Dyson 	if ((m->hold_count != 0) ||
2520d94caffSDavid Greenman 	    ((m->busy != 0) || (m->flags & PG_BUSY)))
2530d94caffSDavid Greenman 		return 0;
2540d94caffSDavid Greenman 
2551c7c3c6aSMatthew Dillon #if 0
25624a1cce3SDavid Greenman 	/*
2571c7c3c6aSMatthew Dillon 	 * XXX REMOVED XXX.  vm_object_collapse() can block, which can
2581c7c3c6aSMatthew Dillon 	 * change the page state.  Calling vm_object_collapse() might also
2591c7c3c6aSMatthew Dillon 	 * destroy or rename the page because we have not busied it yet!!!
2601c7c3c6aSMatthew Dillon 	 * So this code segment is removed.
2611c7c3c6aSMatthew Dillon 	 */
2621c7c3c6aSMatthew Dillon 	/*
2631c7c3c6aSMatthew Dillon 	 * Try collapsing before it's too late.   XXX huh?  Why are we doing
2641c7c3c6aSMatthew Dillon 	 * this here?
26524a1cce3SDavid Greenman 	 */
2668f9110f6SJohn Dyson 	if (object->backing_object) {
26726f9a767SRodney W. Grimes 		vm_object_collapse(object);
26826f9a767SRodney W. Grimes 	}
2691c7c3c6aSMatthew Dillon #endif
2703c018e72SJohn Dyson 
271f35329acSJohn Dyson 	mc[vm_pageout_page_count] = m;
27226f9a767SRodney W. Grimes 	pageout_count = 1;
273f35329acSJohn Dyson 	page_base = vm_pageout_page_count;
27424a1cce3SDavid Greenman 	forward_okay = TRUE;
275a316d390SJohn Dyson 	if (pindex != 0)
27624a1cce3SDavid Greenman 		backward_okay = TRUE;
27726f9a767SRodney W. Grimes 	else
27824a1cce3SDavid Greenman 		backward_okay = FALSE;
27924a1cce3SDavid Greenman 	/*
28024a1cce3SDavid Greenman 	 * Scan object for clusterable pages.
28124a1cce3SDavid Greenman 	 *
28224a1cce3SDavid Greenman 	 * We can cluster ONLY if: ->> the page is NOT
28324a1cce3SDavid Greenman 	 * clean, wired, busy, held, or mapped into a
28424a1cce3SDavid Greenman 	 * buffer, and one of the following:
28524a1cce3SDavid Greenman 	 * 1) The page is inactive, or a seldom used
28624a1cce3SDavid Greenman 	 *    active page.
28724a1cce3SDavid Greenman 	 * -or-
28824a1cce3SDavid Greenman 	 * 2) we force the issue.
28924a1cce3SDavid Greenman 	 */
29024a1cce3SDavid Greenman 	for (i = 1; (i < vm_pageout_page_count) && (forward_okay || backward_okay); i++) {
29124a1cce3SDavid Greenman 		vm_page_t p;
292f6b04d2bSDavid Greenman 
29324a1cce3SDavid Greenman 		/*
29424a1cce3SDavid Greenman 		 * See if forward page is clusterable.
29524a1cce3SDavid Greenman 		 */
29624a1cce3SDavid Greenman 		if (forward_okay) {
29724a1cce3SDavid Greenman 			/*
29824a1cce3SDavid Greenman 			 * Stop forward scan at end of object.
29924a1cce3SDavid Greenman 			 */
300a316d390SJohn Dyson 			if ((pindex + i) > object->size) {
30124a1cce3SDavid Greenman 				forward_okay = FALSE;
30224a1cce3SDavid Greenman 				goto do_backward;
303f6b04d2bSDavid Greenman 			}
304a316d390SJohn Dyson 			p = vm_page_lookup(object, pindex + i);
30524a1cce3SDavid Greenman 			if (p) {
3065070c7f8SJohn Dyson 				if (((p->queue - p->pc) == PQ_CACHE) ||
3075070c7f8SJohn Dyson 					(p->flags & PG_BUSY) || p->busy) {
30824a1cce3SDavid Greenman 					forward_okay = FALSE;
30924a1cce3SDavid Greenman 					goto do_backward;
310f6b04d2bSDavid Greenman 				}
31124a1cce3SDavid Greenman 				vm_page_test_dirty(p);
31224a1cce3SDavid Greenman 				if ((p->dirty & p->valid) != 0 &&
3138f9110f6SJohn Dyson 				    (p->queue == PQ_INACTIVE) &&
31424a1cce3SDavid Greenman 				    (p->wire_count == 0) &&
31524a1cce3SDavid Greenman 				    (p->hold_count == 0)) {
316f35329acSJohn Dyson 					mc[vm_pageout_page_count + i] = p;
31724a1cce3SDavid Greenman 					pageout_count++;
31824a1cce3SDavid Greenman 					if (pageout_count == vm_pageout_page_count)
31924a1cce3SDavid Greenman 						break;
32024a1cce3SDavid Greenman 				} else {
32124a1cce3SDavid Greenman 					forward_okay = FALSE;
322f6b04d2bSDavid Greenman 				}
32324a1cce3SDavid Greenman 			} else {
32424a1cce3SDavid Greenman 				forward_okay = FALSE;
32524a1cce3SDavid Greenman 			}
32624a1cce3SDavid Greenman 		}
32724a1cce3SDavid Greenman do_backward:
32824a1cce3SDavid Greenman 		/*
32924a1cce3SDavid Greenman 		 * See if backward page is clusterable.
33024a1cce3SDavid Greenman 		 */
33124a1cce3SDavid Greenman 		if (backward_okay) {
33224a1cce3SDavid Greenman 			/*
33324a1cce3SDavid Greenman 			 * Stop backward scan at beginning of object.
33424a1cce3SDavid Greenman 			 */
335a316d390SJohn Dyson 			if ((pindex - i) == 0) {
33624a1cce3SDavid Greenman 				backward_okay = FALSE;
33724a1cce3SDavid Greenman 			}
338a316d390SJohn Dyson 			p = vm_page_lookup(object, pindex - i);
33924a1cce3SDavid Greenman 			if (p) {
3405070c7f8SJohn Dyson 				if (((p->queue - p->pc) == PQ_CACHE) ||
3415070c7f8SJohn Dyson 					(p->flags & PG_BUSY) || p->busy) {
34224a1cce3SDavid Greenman 					backward_okay = FALSE;
34324a1cce3SDavid Greenman 					continue;
34424a1cce3SDavid Greenman 				}
34524a1cce3SDavid Greenman 				vm_page_test_dirty(p);
34624a1cce3SDavid Greenman 				if ((p->dirty & p->valid) != 0 &&
3478f9110f6SJohn Dyson 				    (p->queue == PQ_INACTIVE) &&
34824a1cce3SDavid Greenman 				    (p->wire_count == 0) &&
34924a1cce3SDavid Greenman 				    (p->hold_count == 0)) {
350f35329acSJohn Dyson 					mc[vm_pageout_page_count - i] = p;
35124a1cce3SDavid Greenman 					pageout_count++;
35224a1cce3SDavid Greenman 					page_base--;
35324a1cce3SDavid Greenman 					if (pageout_count == vm_pageout_page_count)
35424a1cce3SDavid Greenman 						break;
35524a1cce3SDavid Greenman 				} else {
35624a1cce3SDavid Greenman 					backward_okay = FALSE;
35724a1cce3SDavid Greenman 				}
35824a1cce3SDavid Greenman 			} else {
35924a1cce3SDavid Greenman 				backward_okay = FALSE;
36024a1cce3SDavid Greenman 			}
361f6b04d2bSDavid Greenman 		}
362f6b04d2bSDavid Greenman 	}
363f6b04d2bSDavid Greenman 
36467bf6868SJohn Dyson 	/*
36567bf6868SJohn Dyson 	 * we allow reads during pageouts...
36667bf6868SJohn Dyson 	 */
3678f9110f6SJohn Dyson 	return vm_pageout_flush(&mc[page_base], pageout_count, 0);
368aef922f5SJohn Dyson }
369aef922f5SJohn Dyson 
3701c7c3c6aSMatthew Dillon /*
3711c7c3c6aSMatthew Dillon  * vm_pageout_flush() - launder the given pages
3721c7c3c6aSMatthew Dillon  *
3731c7c3c6aSMatthew Dillon  *	The given pages are laundered.  Note that we setup for the start of
3741c7c3c6aSMatthew Dillon  *	I/O ( i.e. busy the page ), mark it read-only, and bump the object
3751c7c3c6aSMatthew Dillon  *	reference count all in here rather then in the parent.  If we want
3761c7c3c6aSMatthew Dillon  *	the parent to do more sophisticated things we may have to change
3771c7c3c6aSMatthew Dillon  *	the ordering.
3781c7c3c6aSMatthew Dillon  */
3791c7c3c6aSMatthew Dillon 
380aef922f5SJohn Dyson int
3818f9110f6SJohn Dyson vm_pageout_flush(mc, count, flags)
382aef922f5SJohn Dyson 	vm_page_t *mc;
383aef922f5SJohn Dyson 	int count;
3848f9110f6SJohn Dyson 	int flags;
385aef922f5SJohn Dyson {
386aef922f5SJohn Dyson 	register vm_object_t object;
387aef922f5SJohn Dyson 	int pageout_status[count];
38895461b45SJohn Dyson 	int numpagedout = 0;
389aef922f5SJohn Dyson 	int i;
390aef922f5SJohn Dyson 
3911c7c3c6aSMatthew Dillon 	/*
3921c7c3c6aSMatthew Dillon 	 * Initiate I/O.  Bump the vm_page_t->busy counter and
3931c7c3c6aSMatthew Dillon 	 * mark the pages read-only.
3941c7c3c6aSMatthew Dillon 	 *
3951c7c3c6aSMatthew Dillon 	 * We do not have to fixup the clean/dirty bits here... we can
3961c7c3c6aSMatthew Dillon 	 * allow the pager to do it after the I/O completes.
3971c7c3c6aSMatthew Dillon 	 */
3981c7c3c6aSMatthew Dillon 
3998f9110f6SJohn Dyson 	for (i = 0; i < count; i++) {
400e69763a3SDoug Rabson 		vm_page_io_start(mc[i]);
4018f9110f6SJohn Dyson 		vm_page_protect(mc[i], VM_PROT_READ);
4028f9110f6SJohn Dyson 	}
4038f9110f6SJohn Dyson 
404aef922f5SJohn Dyson 	object = mc[0]->object;
405d474eaaaSDoug Rabson 	vm_object_pip_add(object, count);
406aef922f5SJohn Dyson 
407aef922f5SJohn Dyson 	vm_pager_put_pages(object, mc, count,
4088f9110f6SJohn Dyson 	    (flags | ((object == kernel_object) ? OBJPC_SYNC : 0)),
40926f9a767SRodney W. Grimes 	    pageout_status);
41026f9a767SRodney W. Grimes 
411aef922f5SJohn Dyson 	for (i = 0; i < count; i++) {
412aef922f5SJohn Dyson 		vm_page_t mt = mc[i];
41324a1cce3SDavid Greenman 
41426f9a767SRodney W. Grimes 		switch (pageout_status[i]) {
41526f9a767SRodney W. Grimes 		case VM_PAGER_OK:
41695461b45SJohn Dyson 			numpagedout++;
41726f9a767SRodney W. Grimes 			break;
41826f9a767SRodney W. Grimes 		case VM_PAGER_PEND:
41995461b45SJohn Dyson 			numpagedout++;
42026f9a767SRodney W. Grimes 			break;
42126f9a767SRodney W. Grimes 		case VM_PAGER_BAD:
42226f9a767SRodney W. Grimes 			/*
4230d94caffSDavid Greenman 			 * Page outside of range of object. Right now we
4240d94caffSDavid Greenman 			 * essentially lose the changes by pretending it
4250d94caffSDavid Greenman 			 * worked.
42626f9a767SRodney W. Grimes 			 */
42767bf6868SJohn Dyson 			pmap_clear_modify(VM_PAGE_TO_PHYS(mt));
42824a1cce3SDavid Greenman 			mt->dirty = 0;
42926f9a767SRodney W. Grimes 			break;
43026f9a767SRodney W. Grimes 		case VM_PAGER_ERROR:
43126f9a767SRodney W. Grimes 		case VM_PAGER_FAIL:
43226f9a767SRodney W. Grimes 			/*
4330d94caffSDavid Greenman 			 * If page couldn't be paged out, then reactivate the
4340d94caffSDavid Greenman 			 * page so it doesn't clog the inactive list.  (We
4350d94caffSDavid Greenman 			 * will try paging out it again later).
43626f9a767SRodney W. Grimes 			 */
43724a1cce3SDavid Greenman 			vm_page_activate(mt);
43826f9a767SRodney W. Grimes 			break;
43926f9a767SRodney W. Grimes 		case VM_PAGER_AGAIN:
44026f9a767SRodney W. Grimes 			break;
44126f9a767SRodney W. Grimes 		}
44226f9a767SRodney W. Grimes 
44326f9a767SRodney W. Grimes 		/*
4440d94caffSDavid Greenman 		 * If the operation is still going, leave the page busy to
4450d94caffSDavid Greenman 		 * block all other accesses. Also, leave the paging in
4460d94caffSDavid Greenman 		 * progress indicator set so that we don't attempt an object
4470d94caffSDavid Greenman 		 * collapse.
44826f9a767SRodney W. Grimes 		 */
44926f9a767SRodney W. Grimes 		if (pageout_status[i] != VM_PAGER_PEND) {
450f919ebdeSDavid Greenman 			vm_object_pip_wakeup(object);
451e69763a3SDoug Rabson 			vm_page_io_finish(mt);
45226f9a767SRodney W. Grimes 		}
45326f9a767SRodney W. Grimes 	}
45495461b45SJohn Dyson 	return numpagedout;
45526f9a767SRodney W. Grimes }
45626f9a767SRodney W. Grimes 
45738efa82bSJohn Dyson #if !defined(NO_SWAPPING)
45826f9a767SRodney W. Grimes /*
45926f9a767SRodney W. Grimes  *	vm_pageout_object_deactivate_pages
46026f9a767SRodney W. Grimes  *
46126f9a767SRodney W. Grimes  *	deactivate enough pages to satisfy the inactive target
46226f9a767SRodney W. Grimes  *	requirements or if vm_page_proc_limit is set, then
46326f9a767SRodney W. Grimes  *	deactivate all of the pages in the object and its
46424a1cce3SDavid Greenman  *	backing_objects.
46526f9a767SRodney W. Grimes  *
46626f9a767SRodney W. Grimes  *	The object and map must be locked.
46726f9a767SRodney W. Grimes  */
46838efa82bSJohn Dyson static void
46938efa82bSJohn Dyson vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
47026f9a767SRodney W. Grimes 	vm_map_t map;
47126f9a767SRodney W. Grimes 	vm_object_t object;
47238efa82bSJohn Dyson 	vm_pindex_t desired;
4730d94caffSDavid Greenman 	int map_remove_only;
47426f9a767SRodney W. Grimes {
47526f9a767SRodney W. Grimes 	register vm_page_t p, next;
47626f9a767SRodney W. Grimes 	int rcount;
47738efa82bSJohn Dyson 	int remove_mode;
4781eeaa1e3SJohn Dyson 	int s;
47926f9a767SRodney W. Grimes 
48024a1cce3SDavid Greenman 	if (object->type == OBJT_DEVICE)
48138efa82bSJohn Dyson 		return;
4828f895206SDavid Greenman 
48338efa82bSJohn Dyson 	while (object) {
484b1028ad1SLuoqi Chen 		if (pmap_resident_count(vm_map_pmap(map)) <= desired)
48538efa82bSJohn Dyson 			return;
48624a1cce3SDavid Greenman 		if (object->paging_in_progress)
48738efa82bSJohn Dyson 			return;
48826f9a767SRodney W. Grimes 
48938efa82bSJohn Dyson 		remove_mode = map_remove_only;
49038efa82bSJohn Dyson 		if (object->shadow_count > 1)
49138efa82bSJohn Dyson 			remove_mode = 1;
49226f9a767SRodney W. Grimes 	/*
49326f9a767SRodney W. Grimes 	 * scan the objects entire memory queue
49426f9a767SRodney W. Grimes 	 */
49526f9a767SRodney W. Grimes 		rcount = object->resident_page_count;
496b18bfc3dSJohn Dyson 		p = TAILQ_FIRST(&object->memq);
49726f9a767SRodney W. Grimes 		while (p && (rcount-- > 0)) {
4987e006499SJohn Dyson 			int actcount;
499b1028ad1SLuoqi Chen 			if (pmap_resident_count(vm_map_pmap(map)) <= desired)
50038efa82bSJohn Dyson 				return;
501b18bfc3dSJohn Dyson 			next = TAILQ_NEXT(p, listq);
502a58d1fa1SDavid Greenman 			cnt.v_pdpages++;
5030d94caffSDavid Greenman 			if (p->wire_count != 0 ||
5040d94caffSDavid Greenman 			    p->hold_count != 0 ||
5050d94caffSDavid Greenman 			    p->busy != 0 ||
506bd7e5f99SJohn Dyson 			    (p->flags & PG_BUSY) ||
5070d94caffSDavid Greenman 			    !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) {
5080d94caffSDavid Greenman 				p = next;
5090d94caffSDavid Greenman 				continue;
5100d94caffSDavid Greenman 			}
511ef743ce6SJohn Dyson 
5127e006499SJohn Dyson 			actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p));
5137e006499SJohn Dyson 			if (actcount) {
514e69763a3SDoug Rabson 				vm_page_flag_set(p, PG_REFERENCED);
515c8c4b40cSJohn Dyson 			} else if (p->flags & PG_REFERENCED) {
5167e006499SJohn Dyson 				actcount = 1;
517ef743ce6SJohn Dyson 			}
518ef743ce6SJohn Dyson 
51938efa82bSJohn Dyson 			if ((p->queue != PQ_ACTIVE) &&
52038efa82bSJohn Dyson 				(p->flags & PG_REFERENCED)) {
521ef743ce6SJohn Dyson 				vm_page_activate(p);
5227e006499SJohn Dyson 				p->act_count += actcount;
523e69763a3SDoug Rabson 				vm_page_flag_clear(p, PG_REFERENCED);
524c8c4b40cSJohn Dyson 			} else if (p->queue == PQ_ACTIVE) {
525ef743ce6SJohn Dyson 				if ((p->flags & PG_REFERENCED) == 0) {
526c8c4b40cSJohn Dyson 					p->act_count -= min(p->act_count, ACT_DECLINE);
527c8c4b40cSJohn Dyson 					if (!remove_mode && (vm_pageout_algorithm_lru || (p->act_count == 0))) {
528b18bfc3dSJohn Dyson 						vm_page_protect(p, VM_PROT_NONE);
52926f9a767SRodney W. Grimes 						vm_page_deactivate(p);
53026f9a767SRodney W. Grimes 					} else {
531c8c4b40cSJohn Dyson 						s = splvm();
532c8c4b40cSJohn Dyson 						TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
533c8c4b40cSJohn Dyson 						TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
534c8c4b40cSJohn Dyson 						splx(s);
535c8c4b40cSJohn Dyson 					}
536c8c4b40cSJohn Dyson 				} else {
537eaf13dd7SJohn Dyson 					vm_page_activate(p);
538e69763a3SDoug Rabson 					vm_page_flag_clear(p, PG_REFERENCED);
53938efa82bSJohn Dyson 					if (p->act_count < (ACT_MAX - ACT_ADVANCE))
54038efa82bSJohn Dyson 						p->act_count += ACT_ADVANCE;
5411eeaa1e3SJohn Dyson 					s = splvm();
54226f9a767SRodney W. Grimes 					TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
54326f9a767SRodney W. Grimes 					TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
5441eeaa1e3SJohn Dyson 					splx(s);
54526f9a767SRodney W. Grimes 				}
546bd7e5f99SJohn Dyson 			} else if (p->queue == PQ_INACTIVE) {
547f919ebdeSDavid Greenman 				vm_page_protect(p, VM_PROT_NONE);
54826f9a767SRodney W. Grimes 			}
54926f9a767SRodney W. Grimes 			p = next;
55026f9a767SRodney W. Grimes 		}
55138efa82bSJohn Dyson 		object = object->backing_object;
55238efa82bSJohn Dyson 	}
55338efa82bSJohn Dyson 	return;
55426f9a767SRodney W. Grimes }
55526f9a767SRodney W. Grimes 
55626f9a767SRodney W. Grimes /*
55726f9a767SRodney W. Grimes  * deactivate some number of pages in a map, try to do it fairly, but
55826f9a767SRodney W. Grimes  * that is really hard to do.
55926f9a767SRodney W. Grimes  */
560cd41fc12SDavid Greenman static void
56138efa82bSJohn Dyson vm_pageout_map_deactivate_pages(map, desired)
56226f9a767SRodney W. Grimes 	vm_map_t map;
56338efa82bSJohn Dyson 	vm_pindex_t desired;
56426f9a767SRodney W. Grimes {
56526f9a767SRodney W. Grimes 	vm_map_entry_t tmpe;
56638efa82bSJohn Dyson 	vm_object_t obj, bigobj;
5670d94caffSDavid Greenman 
568996c772fSJohn Dyson 	if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) {
56926f9a767SRodney W. Grimes 		return;
57026f9a767SRodney W. Grimes 	}
57138efa82bSJohn Dyson 
57238efa82bSJohn Dyson 	bigobj = NULL;
57338efa82bSJohn Dyson 
57438efa82bSJohn Dyson 	/*
57538efa82bSJohn Dyson 	 * first, search out the biggest object, and try to free pages from
57638efa82bSJohn Dyson 	 * that.
57738efa82bSJohn Dyson 	 */
57826f9a767SRodney W. Grimes 	tmpe = map->header.next;
57938efa82bSJohn Dyson 	while (tmpe != &map->header) {
5809fdfe602SMatthew Dillon 		if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
58138efa82bSJohn Dyson 			obj = tmpe->object.vm_object;
58238efa82bSJohn Dyson 			if ((obj != NULL) && (obj->shadow_count <= 1) &&
58338efa82bSJohn Dyson 				((bigobj == NULL) ||
58438efa82bSJohn Dyson 				 (bigobj->resident_page_count < obj->resident_page_count))) {
58538efa82bSJohn Dyson 				bigobj = obj;
58638efa82bSJohn Dyson 			}
58738efa82bSJohn Dyson 		}
58838efa82bSJohn Dyson 		tmpe = tmpe->next;
58938efa82bSJohn Dyson 	}
59038efa82bSJohn Dyson 
59138efa82bSJohn Dyson 	if (bigobj)
59238efa82bSJohn Dyson 		vm_pageout_object_deactivate_pages(map, bigobj, desired, 0);
59338efa82bSJohn Dyson 
59438efa82bSJohn Dyson 	/*
59538efa82bSJohn Dyson 	 * Next, hunt around for other pages to deactivate.  We actually
59638efa82bSJohn Dyson 	 * do this search sort of wrong -- .text first is not the best idea.
59738efa82bSJohn Dyson 	 */
59838efa82bSJohn Dyson 	tmpe = map->header.next;
59938efa82bSJohn Dyson 	while (tmpe != &map->header) {
600b1028ad1SLuoqi Chen 		if (pmap_resident_count(vm_map_pmap(map)) <= desired)
60138efa82bSJohn Dyson 			break;
6029fdfe602SMatthew Dillon 		if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
60338efa82bSJohn Dyson 			obj = tmpe->object.vm_object;
60401155bd7SDavid Greenman 			if (obj)
60538efa82bSJohn Dyson 				vm_pageout_object_deactivate_pages(map, obj, desired, 0);
60638efa82bSJohn Dyson 		}
60726f9a767SRodney W. Grimes 		tmpe = tmpe->next;
60826f9a767SRodney W. Grimes 	};
60938efa82bSJohn Dyson 
61038efa82bSJohn Dyson 	/*
61138efa82bSJohn Dyson 	 * Remove all mappings if a process is swapped out, this will free page
61238efa82bSJohn Dyson 	 * table pages.
61338efa82bSJohn Dyson 	 */
61438efa82bSJohn Dyson 	if (desired == 0)
61538efa82bSJohn Dyson 		pmap_remove(vm_map_pmap(map),
61638efa82bSJohn Dyson 			VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
61738efa82bSJohn Dyson 	vm_map_unlock(map);
61826f9a767SRodney W. Grimes 	return;
61926f9a767SRodney W. Grimes }
62038efa82bSJohn Dyson #endif
621df8bae1dSRodney W. Grimes 
6221c7c3c6aSMatthew Dillon /*
6231c7c3c6aSMatthew Dillon  * Don't try to be fancy - being fancy can lead to VOP_LOCK's and therefore
6241c7c3c6aSMatthew Dillon  * to vnode deadlocks.  We only do it for OBJT_DEFAULT and OBJT_SWAP objects
6251c7c3c6aSMatthew Dillon  * which we know can be trivially freed.
6261c7c3c6aSMatthew Dillon  */
6271c7c3c6aSMatthew Dillon 
628925a3a41SJohn Dyson void
629925a3a41SJohn Dyson vm_pageout_page_free(vm_page_t m) {
6301c7c3c6aSMatthew Dillon 	vm_object_t object = m->object;
6311c7c3c6aSMatthew Dillon 	int type = object->type;
632925a3a41SJohn Dyson 
6331c7c3c6aSMatthew Dillon 	if (type == OBJT_SWAP || type == OBJT_DEFAULT)
6341c7c3c6aSMatthew Dillon 		vm_object_reference(object);
635e69763a3SDoug Rabson 	vm_page_busy(m);
636925a3a41SJohn Dyson 	vm_page_protect(m, VM_PROT_NONE);
637925a3a41SJohn Dyson 	vm_page_free(m);
6381c7c3c6aSMatthew Dillon 	if (type == OBJT_SWAP || type == OBJT_DEFAULT)
63947221757SJohn Dyson 		vm_object_deallocate(object);
640925a3a41SJohn Dyson }
641925a3a41SJohn Dyson 
642df8bae1dSRodney W. Grimes /*
643df8bae1dSRodney W. Grimes  *	vm_pageout_scan does the dirty work for the pageout daemon.
644df8bae1dSRodney W. Grimes  */
6453af76890SPoul-Henning Kamp static int
646df8bae1dSRodney W. Grimes vm_pageout_scan()
647df8bae1dSRodney W. Grimes {
648502ba6e4SJohn Dyson 	vm_page_t m, next;
6491c7c3c6aSMatthew Dillon 	int page_shortage, maxscan, pcount;
6501c7c3c6aSMatthew Dillon 	int addl_page_shortage, addl_page_shortage_init;
65170111b90SJohn Dyson 	int maxlaunder;
6521c7c3c6aSMatthew Dillon 	int launder_loop = 0;
6535663e6deSDavid Greenman 	struct proc *p, *bigproc;
6545663e6deSDavid Greenman 	vm_offset_t size, bigsize;
655df8bae1dSRodney W. Grimes 	vm_object_t object;
65626f9a767SRodney W. Grimes 	int force_wakeup = 0;
6577e006499SJohn Dyson 	int actcount;
658f6b04d2bSDavid Greenman 	int vnodes_skipped = 0;
6591eeaa1e3SJohn Dyson 	int s;
6600d94caffSDavid Greenman 
661df8bae1dSRodney W. Grimes 	/*
6625985940eSJohn Dyson 	 * Do whatever cleanup that the pmap code can.
6635985940eSJohn Dyson 	 */
6645985940eSJohn Dyson 	pmap_collect();
6655985940eSJohn Dyson 
6661c7c3c6aSMatthew Dillon 	addl_page_shortage_init = vm_pageout_deficit;
66795461b45SJohn Dyson 	vm_pageout_deficit = 0;
668b182ec9eSJohn Dyson 
669ceb0cf87SJohn Dyson 	if (max_page_launder == 0)
670ceb0cf87SJohn Dyson 		max_page_launder = 1;
6711c7c3c6aSMatthew Dillon 
6721c7c3c6aSMatthew Dillon 	/*
6731c7c3c6aSMatthew Dillon 	 * Calculate the number of pages we want to either free or move
6741c7c3c6aSMatthew Dillon 	 * to the cache.
6751c7c3c6aSMatthew Dillon 	 */
6761c7c3c6aSMatthew Dillon 
6771c7c3c6aSMatthew Dillon 	page_shortage = (cnt.v_free_target + cnt.v_cache_min) -
6781c7c3c6aSMatthew Dillon 	    (cnt.v_free_count + cnt.v_cache_count);
6791c7c3c6aSMatthew Dillon 	page_shortage += addl_page_shortage_init;
6801c7c3c6aSMatthew Dillon 
6811c7c3c6aSMatthew Dillon 	/*
6821c7c3c6aSMatthew Dillon 	 * Figure out what to do with dirty pages when they are encountered.
6831c7c3c6aSMatthew Dillon 	 * Assume that 1/3 of the pages on the inactive list are clean.  If
6841c7c3c6aSMatthew Dillon 	 * we think we can reach our target, disable laundering (do not
6851c7c3c6aSMatthew Dillon 	 * clean any dirty pages).  If we miss the target we will loop back
6861c7c3c6aSMatthew Dillon 	 * up and do a laundering run.
6871c7c3c6aSMatthew Dillon 	 */
6881c7c3c6aSMatthew Dillon 
6891c7c3c6aSMatthew Dillon 	if (cnt.v_inactive_count / 3 > page_shortage) {
6901c7c3c6aSMatthew Dillon 		maxlaunder = 0;
6911c7c3c6aSMatthew Dillon 		launder_loop = 0;
6921c7c3c6aSMatthew Dillon 	} else {
6931c7c3c6aSMatthew Dillon 		maxlaunder =
6941c7c3c6aSMatthew Dillon 		    (cnt.v_inactive_target > max_page_launder) ?
695ceb0cf87SJohn Dyson 		    max_page_launder : cnt.v_inactive_target;
6961c7c3c6aSMatthew Dillon 		launder_loop = 1;
6971c7c3c6aSMatthew Dillon 	}
6981c7c3c6aSMatthew Dillon 
6991c7c3c6aSMatthew Dillon 	/*
7001c7c3c6aSMatthew Dillon 	 * Start scanning the inactive queue for pages we can move to the
7011c7c3c6aSMatthew Dillon 	 * cache or free.  The scan will stop when the target is reached or
7021c7c3c6aSMatthew Dillon 	 * we have scanned the entire inactive queue.
7031c7c3c6aSMatthew Dillon 	 */
70470111b90SJohn Dyson 
70567bf6868SJohn Dyson rescan0:
7061c7c3c6aSMatthew Dillon 	addl_page_shortage = addl_page_shortage_init;
707f6b04d2bSDavid Greenman 	maxscan = cnt.v_inactive_count;
7081c7c3c6aSMatthew Dillon 	for (
7091c7c3c6aSMatthew Dillon 	    m = TAILQ_FIRST(&vm_page_queue_inactive);
7101c7c3c6aSMatthew Dillon 	    m != NULL && maxscan-- > 0 && page_shortage > 0;
7111c7c3c6aSMatthew Dillon 	    m = next
7121c7c3c6aSMatthew Dillon 	) {
713df8bae1dSRodney W. Grimes 
714a58d1fa1SDavid Greenman 		cnt.v_pdpages++;
715b182ec9eSJohn Dyson 
716f35329acSJohn Dyson 		if (m->queue != PQ_INACTIVE) {
71767bf6868SJohn Dyson 			goto rescan0;
718f35329acSJohn Dyson 		}
719b182ec9eSJohn Dyson 
720b18bfc3dSJohn Dyson 		next = TAILQ_NEXT(m, pageq);
721df8bae1dSRodney W. Grimes 
722b182ec9eSJohn Dyson 		if (m->hold_count) {
723f35329acSJohn Dyson 			s = splvm();
724b182ec9eSJohn Dyson 			TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
725b182ec9eSJohn Dyson 			TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
726f35329acSJohn Dyson 			splx(s);
727b182ec9eSJohn Dyson 			addl_page_shortage++;
728b182ec9eSJohn Dyson 			continue;
729df8bae1dSRodney W. Grimes 		}
73026f9a767SRodney W. Grimes 		/*
731b18bfc3dSJohn Dyson 		 * Dont mess with busy pages, keep in the front of the
732b18bfc3dSJohn Dyson 		 * queue, most likely are being paged out.
73326f9a767SRodney W. Grimes 		 */
734bd7e5f99SJohn Dyson 		if (m->busy || (m->flags & PG_BUSY)) {
735b182ec9eSJohn Dyson 			addl_page_shortage++;
73626f9a767SRodney W. Grimes 			continue;
73726f9a767SRodney W. Grimes 		}
738bd7e5f99SJohn Dyson 
7397e006499SJohn Dyson 		/*
7401c7c3c6aSMatthew Dillon 		 * If the object is not being used, we ignore previous
7411c7c3c6aSMatthew Dillon 		 * references.
7427e006499SJohn Dyson 		 */
7430d94caffSDavid Greenman 		if (m->object->ref_count == 0) {
744e69763a3SDoug Rabson 			vm_page_flag_clear(m, PG_REFERENCED);
74567bf6868SJohn Dyson 			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
7467e006499SJohn Dyson 
7477e006499SJohn Dyson 		/*
7481c7c3c6aSMatthew Dillon 		 * Otherwise, if the page has been referenced while in the
7491c7c3c6aSMatthew Dillon 		 * inactive queue, we bump the "activation count" upwards,
7501c7c3c6aSMatthew Dillon 		 * making it less likely that the page will be added back to
7511c7c3c6aSMatthew Dillon 		 * the inactive queue prematurely again.  Here we check the
7521c7c3c6aSMatthew Dillon 		 * page tables (or emulated bits, if any), given the upper
7531c7c3c6aSMatthew Dillon 		 * level VM system not knowing anything about existing
7541c7c3c6aSMatthew Dillon 		 * references.
7557e006499SJohn Dyson 		 */
756ef743ce6SJohn Dyson 		} else if (((m->flags & PG_REFERENCED) == 0) &&
7577e006499SJohn Dyson 			(actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)))) {
758ef743ce6SJohn Dyson 			vm_page_activate(m);
7597e006499SJohn Dyson 			m->act_count += (actcount + ACT_ADVANCE);
760ef743ce6SJohn Dyson 			continue;
7612fe6e4d7SDavid Greenman 		}
762ef743ce6SJohn Dyson 
7637e006499SJohn Dyson 		/*
7641c7c3c6aSMatthew Dillon 		 * If the upper level VM system knows about any page
7651c7c3c6aSMatthew Dillon 		 * references, we activate the page.  We also set the
7661c7c3c6aSMatthew Dillon 		 * "activation count" higher than normal so that we will less
7671c7c3c6aSMatthew Dillon 		 * likely place pages back onto the inactive queue again.
7687e006499SJohn Dyson 		 */
769bd7e5f99SJohn Dyson 		if ((m->flags & PG_REFERENCED) != 0) {
770e69763a3SDoug Rabson 			vm_page_flag_clear(m, PG_REFERENCED);
7717e006499SJohn Dyson 			actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
77226f9a767SRodney W. Grimes 			vm_page_activate(m);
7737e006499SJohn Dyson 			m->act_count += (actcount + ACT_ADVANCE + 1);
7740d94caffSDavid Greenman 			continue;
7750d94caffSDavid Greenman 		}
77667bf6868SJohn Dyson 
7777e006499SJohn Dyson 		/*
7781c7c3c6aSMatthew Dillon 		 * If the upper level VM system doesn't know anything about
7791c7c3c6aSMatthew Dillon 		 * the page being dirty, we have to check for it again.  As
7801c7c3c6aSMatthew Dillon 		 * far as the VM code knows, any partially dirty pages are
7811c7c3c6aSMatthew Dillon 		 * fully dirty.
7827e006499SJohn Dyson 		 */
783f6b04d2bSDavid Greenman 		if (m->dirty == 0) {
784bd7e5f99SJohn Dyson 			vm_page_test_dirty(m);
785427e99a0SAlexander Langer 		} else {
7867dbf82dcSMatthew Dillon 			vm_page_dirty(m);
78730dcfc09SJohn Dyson 		}
788ef743ce6SJohn Dyson 
7897e006499SJohn Dyson 		/*
7907e006499SJohn Dyson 		 * Invalid pages can be easily freed
7917e006499SJohn Dyson 		 */
7926d40c3d3SDavid Greenman 		if (m->valid == 0) {
793925a3a41SJohn Dyson 			vm_pageout_page_free(m);
79467bf6868SJohn Dyson 			cnt.v_dfree++;
7951c7c3c6aSMatthew Dillon 			--page_shortage;
7967e006499SJohn Dyson 
7977e006499SJohn Dyson 		/*
7987e006499SJohn Dyson 		 * Clean pages can be placed onto the cache queue.
7997e006499SJohn Dyson 		 */
800bd7e5f99SJohn Dyson 		} else if (m->dirty == 0) {
801bd7e5f99SJohn Dyson 			vm_page_cache(m);
8021c7c3c6aSMatthew Dillon 			--page_shortage;
8037e006499SJohn Dyson 
8047e006499SJohn Dyson 		/*
8057e006499SJohn Dyson 		 * Dirty pages need to be paged out.  Note that we clean
8067e006499SJohn Dyson 		 * only a limited number of pages per pagedaemon pass.
8077e006499SJohn Dyson 		 */
8080d94caffSDavid Greenman 		} else if (maxlaunder > 0) {
8090d94caffSDavid Greenman 			int written;
81012ac6a1dSJohn Dyson 			int swap_pageouts_ok;
811f6b04d2bSDavid Greenman 			struct vnode *vp = NULL;
8120d94caffSDavid Greenman 
8130d94caffSDavid Greenman 			object = m->object;
8147e006499SJohn Dyson 
81512ac6a1dSJohn Dyson 			if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) {
81612ac6a1dSJohn Dyson 				swap_pageouts_ok = 1;
81712ac6a1dSJohn Dyson 			} else {
81812ac6a1dSJohn Dyson 				swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
81912ac6a1dSJohn Dyson 				swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
82012ac6a1dSJohn Dyson 					(cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min);
82112ac6a1dSJohn Dyson 
82212ac6a1dSJohn Dyson 			}
82370111b90SJohn Dyson 
82470111b90SJohn Dyson 			/*
8251c7c3c6aSMatthew Dillon 			 * We don't bother paging objects that are "dead".
8261c7c3c6aSMatthew Dillon 			 * Those objects are in a "rundown" state.
82770111b90SJohn Dyson 			 */
82870111b90SJohn Dyson 			if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
82912ac6a1dSJohn Dyson 				s = splvm();
83012ac6a1dSJohn Dyson 				TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
83112ac6a1dSJohn Dyson 				TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
83212ac6a1dSJohn Dyson 				splx(s);
83312ac6a1dSJohn Dyson 				continue;
83412ac6a1dSJohn Dyson 			}
83512ac6a1dSJohn Dyson 
8361c7c3c6aSMatthew Dillon 			/*
8371c7c3c6aSMatthew Dillon 			 * For now we protect against potential memory
8381c7c3c6aSMatthew Dillon 			 * deadlocks by requiring significant memory to be
8391c7c3c6aSMatthew Dillon 			 * free if the object is not OBJT_DEFAULT or OBJT_SWAP.
8401c7c3c6aSMatthew Dillon 			 * We do not 'trust' any other object type to operate
8411c7c3c6aSMatthew Dillon 			 * with low memory, not even OBJT_DEVICE.  The VM
8421c7c3c6aSMatthew Dillon 			 * allocator will special case allocations done by
8431c7c3c6aSMatthew Dillon 			 * the pageout daemon so the check below actually
8441c7c3c6aSMatthew Dillon 			 * does have some hysteresis in it.  It isn't the best
8451c7c3c6aSMatthew Dillon 			 * solution, though.
8461c7c3c6aSMatthew Dillon 			 */
8471c7c3c6aSMatthew Dillon 
8481c7c3c6aSMatthew Dillon 			if (
8491c7c3c6aSMatthew Dillon 			    object->type != OBJT_DEFAULT &&
8501c7c3c6aSMatthew Dillon 			    object->type != OBJT_SWAP &&
8511c7c3c6aSMatthew Dillon 			    cnt.v_free_count < cnt.v_free_reserved
8521c7c3c6aSMatthew Dillon 			) {
8531c7c3c6aSMatthew Dillon 				s = splvm();
8541c7c3c6aSMatthew Dillon 				TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
8551c7c3c6aSMatthew Dillon 				TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
8561c7c3c6aSMatthew Dillon 				splx(s);
8571c7c3c6aSMatthew Dillon 				continue;
8581c7c3c6aSMatthew Dillon 			}
8591c7c3c6aSMatthew Dillon 
8601c7c3c6aSMatthew Dillon 			/*
8611c7c3c6aSMatthew Dillon 			 * Presumably we have sufficient free memory to do
8621c7c3c6aSMatthew Dillon 			 * the more sophisticated checks and locking required
8631c7c3c6aSMatthew Dillon 			 * for vnodes.
8641c7c3c6aSMatthew Dillon 			 *
8651c7c3c6aSMatthew Dillon 			 * The object is already known NOT to be dead.  The
8661c7c3c6aSMatthew Dillon 			 * vget() may still block, though, because
8671c7c3c6aSMatthew Dillon 			 * VOP_ISLOCKED() doesn't check to see if an inode
8681c7c3c6aSMatthew Dillon 			 * (v_data) is associated with the vnode.  If it isn't,
8691c7c3c6aSMatthew Dillon 			 * vget() will load in it from disk.  Worse, vget()
8701c7c3c6aSMatthew Dillon 			 * may actually get stuck waiting on "inode" if another
8711c7c3c6aSMatthew Dillon 			 * process is in the process of bringing the inode in.
8721c7c3c6aSMatthew Dillon 			 * This is bad news for us either way.
8731c7c3c6aSMatthew Dillon 			 *
8741c7c3c6aSMatthew Dillon 			 * So for the moment we check v_data == NULL as a
8751c7c3c6aSMatthew Dillon 			 * workaround.  This means that vnodes which do not
8761c7c3c6aSMatthew Dillon 			 * use v_data in the way we expect probably will not
8771c7c3c6aSMatthew Dillon 			 * wind up being paged out by the pager and it will be
8781c7c3c6aSMatthew Dillon 			 * up to the syncer to get them.  That's better then
8791c7c3c6aSMatthew Dillon 			 * us blocking here.
8801c7c3c6aSMatthew Dillon 			 *
8811c7c3c6aSMatthew Dillon 			 * This whole code section is bogus - we need to fix
8821c7c3c6aSMatthew Dillon 			 * the vnode pager to handle vm_page_t's without us
8831c7c3c6aSMatthew Dillon 			 * having to do any sophisticated VOP tests.
8841c7c3c6aSMatthew Dillon 			 */
8851c7c3c6aSMatthew Dillon 
8861c7c3c6aSMatthew Dillon 			if (object->type == OBJT_VNODE) {
88724a1cce3SDavid Greenman 				vp = object->handle;
8881c7c3c6aSMatthew Dillon 
889996c772fSJohn Dyson 				if (VOP_ISLOCKED(vp) ||
8901c7c3c6aSMatthew Dillon 				    vp->v_data == NULL ||
89147221757SJohn Dyson 				    vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curproc)) {
892b182ec9eSJohn Dyson 					if ((m->queue == PQ_INACTIVE) &&
893b182ec9eSJohn Dyson 						(m->hold_count == 0) &&
894b182ec9eSJohn Dyson 						(m->busy == 0) &&
895b182ec9eSJohn Dyson 						(m->flags & PG_BUSY) == 0) {
896f35329acSJohn Dyson 						s = splvm();
89785a376ebSJohn Dyson 						TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
89885a376ebSJohn Dyson 						TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
899f35329acSJohn Dyson 						splx(s);
90085a376ebSJohn Dyson 					}
901aef922f5SJohn Dyson 					if (object->flags & OBJ_MIGHTBEDIRTY)
902925a3a41SJohn Dyson 						vnodes_skipped++;
903b182ec9eSJohn Dyson 					continue;
90485a376ebSJohn Dyson 				}
905b182ec9eSJohn Dyson 
906f35329acSJohn Dyson 				/*
907f35329acSJohn Dyson 				 * The page might have been moved to another queue
908f35329acSJohn Dyson 				 * during potential blocking in vget() above.
909f35329acSJohn Dyson 				 */
910b182ec9eSJohn Dyson 				if (m->queue != PQ_INACTIVE) {
911b182ec9eSJohn Dyson 					if (object->flags & OBJ_MIGHTBEDIRTY)
912925a3a41SJohn Dyson 						vnodes_skipped++;
913b182ec9eSJohn Dyson 					vput(vp);
914b182ec9eSJohn Dyson 					continue;
915b182ec9eSJohn Dyson 				}
916b182ec9eSJohn Dyson 
917f35329acSJohn Dyson 				/*
918f35329acSJohn Dyson 				 * The page may have been busied during the blocking in
919f35329acSJohn Dyson 				 * vput();  We don't move the page back onto the end of
920f35329acSJohn Dyson 				 * the queue so that statistics are more correct if we don't.
921f35329acSJohn Dyson 				 */
922b182ec9eSJohn Dyson 				if (m->busy || (m->flags & PG_BUSY)) {
923b182ec9eSJohn Dyson 					vput(vp);
924b182ec9eSJohn Dyson 					continue;
925b182ec9eSJohn Dyson 				}
926b182ec9eSJohn Dyson 
927f35329acSJohn Dyson 				/*
928f35329acSJohn Dyson 				 * If the page has become held, then skip it
929f35329acSJohn Dyson 				 */
930b182ec9eSJohn Dyson 				if (m->hold_count) {
931f35329acSJohn Dyson 					s = splvm();
932b182ec9eSJohn Dyson 					TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
933b182ec9eSJohn Dyson 					TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
934f35329acSJohn Dyson 					splx(s);
935b182ec9eSJohn Dyson 					if (object->flags & OBJ_MIGHTBEDIRTY)
936925a3a41SJohn Dyson 						vnodes_skipped++;
937b182ec9eSJohn Dyson 					vput(vp);
938f6b04d2bSDavid Greenman 					continue;
939f6b04d2bSDavid Greenman 				}
940f6b04d2bSDavid Greenman 			}
941f6b04d2bSDavid Greenman 
9420d94caffSDavid Greenman 			/*
9430d94caffSDavid Greenman 			 * If a page is dirty, then it is either being washed
9440d94caffSDavid Greenman 			 * (but not yet cleaned) or it is still in the
9450d94caffSDavid Greenman 			 * laundry.  If it is still in the laundry, then we
9460d94caffSDavid Greenman 			 * start the cleaning operation.
9470d94caffSDavid Greenman 			 */
9488f9110f6SJohn Dyson 			written = vm_pageout_clean(m);
949f6b04d2bSDavid Greenman 			if (vp)
950f6b04d2bSDavid Greenman 				vput(vp);
951f6b04d2bSDavid Greenman 
9520d94caffSDavid Greenman 			maxlaunder -= written;
9530d94caffSDavid Greenman 		}
954df8bae1dSRodney W. Grimes 	}
95526f9a767SRodney W. Grimes 
956df8bae1dSRodney W. Grimes 	/*
9571c7c3c6aSMatthew Dillon 	 * If we still have a page shortage and we didn't launder anything,
9581c7c3c6aSMatthew Dillon 	 * run the inactive scan again and launder something this time.
959df8bae1dSRodney W. Grimes 	 */
9601c7c3c6aSMatthew Dillon 
9611c7c3c6aSMatthew Dillon 	if (launder_loop == 0 && page_shortage > 0) {
9621c7c3c6aSMatthew Dillon 		launder_loop = 1;
9631c7c3c6aSMatthew Dillon 		maxlaunder =
9641c7c3c6aSMatthew Dillon 		    (cnt.v_inactive_target > max_page_launder) ?
9651c7c3c6aSMatthew Dillon 		    max_page_launder : cnt.v_inactive_target;
9661c7c3c6aSMatthew Dillon 		goto rescan0;
9671c7c3c6aSMatthew Dillon 	}
9681c7c3c6aSMatthew Dillon 
9691c7c3c6aSMatthew Dillon 	/*
9701c7c3c6aSMatthew Dillon 	 * Compute the page shortage from the point of view of having to
9711c7c3c6aSMatthew Dillon 	 * move pages from the active queue to the inactive queue.
9721c7c3c6aSMatthew Dillon 	 */
9731c7c3c6aSMatthew Dillon 
974b182ec9eSJohn Dyson 	page_shortage = (cnt.v_inactive_target + cnt.v_cache_min) -
9750d94caffSDavid Greenman 	    (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
976b182ec9eSJohn Dyson 	page_shortage += addl_page_shortage;
9771c7c3c6aSMatthew Dillon 
9781c7c3c6aSMatthew Dillon 	/*
9791c7c3c6aSMatthew Dillon 	 * Scan the active queue for things we can deactivate
9801c7c3c6aSMatthew Dillon 	 */
98126f9a767SRodney W. Grimes 
982b18bfc3dSJohn Dyson 	pcount = cnt.v_active_count;
983b18bfc3dSJohn Dyson 	m = TAILQ_FIRST(&vm_page_queue_active);
9841c7c3c6aSMatthew Dillon 
985b18bfc3dSJohn Dyson 	while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
986f35329acSJohn Dyson 
9877e006499SJohn Dyson 		/*
9887e006499SJohn Dyson 		 * This is a consistancy check, and should likely be a panic
9897e006499SJohn Dyson 		 * or warning.
9907e006499SJohn Dyson 		 */
991f35329acSJohn Dyson 		if (m->queue != PQ_ACTIVE) {
99238efa82bSJohn Dyson 			break;
993f35329acSJohn Dyson 		}
994f35329acSJohn Dyson 
995b18bfc3dSJohn Dyson 		next = TAILQ_NEXT(m, pageq);
996df8bae1dSRodney W. Grimes 		/*
99726f9a767SRodney W. Grimes 		 * Don't deactivate pages that are busy.
998df8bae1dSRodney W. Grimes 		 */
999a647a309SDavid Greenman 		if ((m->busy != 0) ||
10000d94caffSDavid Greenman 		    (m->flags & PG_BUSY) ||
1001f6b04d2bSDavid Greenman 		    (m->hold_count != 0)) {
1002f35329acSJohn Dyson 			s = splvm();
10036d40c3d3SDavid Greenman 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
10046d40c3d3SDavid Greenman 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1005f35329acSJohn Dyson 			splx(s);
100626f9a767SRodney W. Grimes 			m = next;
100726f9a767SRodney W. Grimes 			continue;
1008df8bae1dSRodney W. Grimes 		}
1009b18bfc3dSJohn Dyson 
1010b18bfc3dSJohn Dyson 		/*
1011b18bfc3dSJohn Dyson 		 * The count for pagedaemon pages is done after checking the
1012b18bfc3dSJohn Dyson 		 * page for eligbility...
1013b18bfc3dSJohn Dyson 		 */
1014b18bfc3dSJohn Dyson 		cnt.v_pdpages++;
1015ef743ce6SJohn Dyson 
10167e006499SJohn Dyson 		/*
10177e006499SJohn Dyson 		 * Check to see "how much" the page has been used.
10187e006499SJohn Dyson 		 */
10197e006499SJohn Dyson 		actcount = 0;
1020ef743ce6SJohn Dyson 		if (m->object->ref_count != 0) {
1021ef743ce6SJohn Dyson 			if (m->flags & PG_REFERENCED) {
10227e006499SJohn Dyson 				actcount += 1;
10230d94caffSDavid Greenman 			}
10247e006499SJohn Dyson 			actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
10257e006499SJohn Dyson 			if (actcount) {
10267e006499SJohn Dyson 				m->act_count += ACT_ADVANCE + actcount;
102738efa82bSJohn Dyson 				if (m->act_count > ACT_MAX)
102838efa82bSJohn Dyson 					m->act_count = ACT_MAX;
102938efa82bSJohn Dyson 			}
1030b18bfc3dSJohn Dyson 		}
1031ef743ce6SJohn Dyson 
10327e006499SJohn Dyson 		/*
10337e006499SJohn Dyson 		 * Since we have "tested" this bit, we need to clear it now.
10347e006499SJohn Dyson 		 */
1035e69763a3SDoug Rabson 		vm_page_flag_clear(m, PG_REFERENCED);
1036ef743ce6SJohn Dyson 
10377e006499SJohn Dyson 		/*
10387e006499SJohn Dyson 		 * Only if an object is currently being used, do we use the
10397e006499SJohn Dyson 		 * page activation count stats.
10407e006499SJohn Dyson 		 */
10417e006499SJohn Dyson 		if (actcount && (m->object->ref_count != 0)) {
1042f35329acSJohn Dyson 			s = splvm();
104326f9a767SRodney W. Grimes 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
104426f9a767SRodney W. Grimes 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1045f35329acSJohn Dyson 			splx(s);
104626f9a767SRodney W. Grimes 		} else {
104738efa82bSJohn Dyson 			m->act_count -= min(m->act_count, ACT_DECLINE);
104838efa82bSJohn Dyson 			if (vm_pageout_algorithm_lru ||
104938efa82bSJohn Dyson 				(m->object->ref_count == 0) || (m->act_count == 0)) {
1050925a3a41SJohn Dyson 				page_shortage--;
1051d4a272dbSJohn Dyson 				if (m->object->ref_count == 0) {
1052ef743ce6SJohn Dyson 					vm_page_protect(m, VM_PROT_NONE);
1053d4a272dbSJohn Dyson 					if (m->dirty == 0)
10540d94caffSDavid Greenman 						vm_page_cache(m);
1055d4a272dbSJohn Dyson 					else
1056d4a272dbSJohn Dyson 						vm_page_deactivate(m);
10570d94caffSDavid Greenman 				} else {
105826f9a767SRodney W. Grimes 					vm_page_deactivate(m);
1059df8bae1dSRodney W. Grimes 				}
106038efa82bSJohn Dyson 			} else {
106138efa82bSJohn Dyson 				s = splvm();
106238efa82bSJohn Dyson 				TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
106338efa82bSJohn Dyson 				TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
106438efa82bSJohn Dyson 				splx(s);
106538efa82bSJohn Dyson 			}
1066df8bae1dSRodney W. Grimes 		}
106726f9a767SRodney W. Grimes 		m = next;
106826f9a767SRodney W. Grimes 	}
1069df8bae1dSRodney W. Grimes 
1070f35329acSJohn Dyson 	s = splvm();
10711c7c3c6aSMatthew Dillon 
1072df8bae1dSRodney W. Grimes 	/*
10730d94caffSDavid Greenman 	 * We try to maintain some *really* free pages, this allows interrupt
10741c7c3c6aSMatthew Dillon 	 * code to be guaranteed space.  Since both cache and free queues
10751c7c3c6aSMatthew Dillon 	 * are considered basically 'free', moving pages from cache to free
10761c7c3c6aSMatthew Dillon 	 * does not effect other calculations.
1077df8bae1dSRodney W. Grimes 	 */
10781c7c3c6aSMatthew Dillon 
1079a1f6d91cSDavid Greenman 	while (cnt.v_free_count < cnt.v_free_reserved) {
10805070c7f8SJohn Dyson 		static int cache_rover = 0;
1081faa273d5SMatthew Dillon 		m = vm_page_list_find(PQ_CACHE, cache_rover, FALSE);
10820d94caffSDavid Greenman 		if (!m)
10830d94caffSDavid Greenman 			break;
1084aaba53daSMatthew Dillon 		if ((m->flags & PG_BUSY) || m->busy || m->hold_count || m->wire_count) {
1085d044d7bfSMatthew Dillon #ifdef INVARIANTS
1086d044d7bfSMatthew Dillon 			printf("Warning: busy page %p found in cache\n", m);
1087d044d7bfSMatthew Dillon #endif
1088aaba53daSMatthew Dillon 			vm_page_deactivate(m);
1089aaba53daSMatthew Dillon 			continue;
1090aaba53daSMatthew Dillon 		}
10915070c7f8SJohn Dyson 		cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK;
1092925a3a41SJohn Dyson 		vm_pageout_page_free(m);
10930bb3a0d2SDavid Greenman 		cnt.v_dfree++;
109426f9a767SRodney W. Grimes 	}
1095f35329acSJohn Dyson 	splx(s);
10965663e6deSDavid Greenman 
1097ceb0cf87SJohn Dyson #if !defined(NO_SWAPPING)
1098ceb0cf87SJohn Dyson 	/*
1099ceb0cf87SJohn Dyson 	 * Idle process swapout -- run once per second.
1100ceb0cf87SJohn Dyson 	 */
1101ceb0cf87SJohn Dyson 	if (vm_swap_idle_enabled) {
1102ceb0cf87SJohn Dyson 		static long lsec;
1103227ee8a1SPoul-Henning Kamp 		if (time_second != lsec) {
1104ceb0cf87SJohn Dyson 			vm_pageout_req_swapout |= VM_SWAP_IDLE;
1105ceb0cf87SJohn Dyson 			vm_req_vmdaemon();
1106227ee8a1SPoul-Henning Kamp 			lsec = time_second;
1107ceb0cf87SJohn Dyson 		}
1108ceb0cf87SJohn Dyson 	}
1109ceb0cf87SJohn Dyson #endif
1110ceb0cf87SJohn Dyson 
11115663e6deSDavid Greenman 	/*
1112f6b04d2bSDavid Greenman 	 * If we didn't get enough free pages, and we have skipped a vnode
11134c1f8ee9SDavid Greenman 	 * in a writeable object, wakeup the sync daemon.  And kick swapout
11144c1f8ee9SDavid Greenman 	 * if we did not get enough free pages.
1115f6b04d2bSDavid Greenman 	 */
1116bd7e5f99SJohn Dyson 	if ((cnt.v_cache_count + cnt.v_free_count) <
1117bd7e5f99SJohn Dyson 		(cnt.v_free_target + cnt.v_cache_min) ) {
1118f6b04d2bSDavid Greenman 		if (vnodes_skipped &&
1119f6b04d2bSDavid Greenman 		    (cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_min) {
1120d50c1994SPeter Wemm 			(void) speedup_syncer();
1121f6b04d2bSDavid Greenman 		}
112238efa82bSJohn Dyson #if !defined(NO_SWAPPING)
1123ceb0cf87SJohn Dyson 		if (vm_swap_enabled &&
112438efa82bSJohn Dyson 			(cnt.v_free_count + cnt.v_cache_count < cnt.v_free_target)) {
11254c1f8ee9SDavid Greenman 			vm_req_vmdaemon();
1126ceb0cf87SJohn Dyson 			vm_pageout_req_swapout |= VM_SWAP_NORMAL;
11274c1f8ee9SDavid Greenman 		}
11285afce282SDavid Greenman #endif
11294c1f8ee9SDavid Greenman 	}
11304c1f8ee9SDavid Greenman 
1131f6b04d2bSDavid Greenman 	/*
11320d94caffSDavid Greenman 	 * make sure that we have swap space -- if we are low on memory and
11330d94caffSDavid Greenman 	 * swap -- then kill the biggest process.
11345663e6deSDavid Greenman 	 */
11355663e6deSDavid Greenman 	if ((vm_swap_size == 0 || swap_pager_full) &&
11360d94caffSDavid Greenman 	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) {
11375663e6deSDavid Greenman 		bigproc = NULL;
11385663e6deSDavid Greenman 		bigsize = 0;
11391b67ec6dSJeffrey Hsu 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
11405663e6deSDavid Greenman 			/*
11415663e6deSDavid Greenman 			 * if this is a system process, skip it
11425663e6deSDavid Greenman 			 */
1143c8da68e9SPeter Wemm 			if ((p->p_flag & P_SYSTEM) || (p->p_lock > 0) ||
1144c8da68e9SPeter Wemm 			    (p->p_pid == 1) ||
114579221631SDavid Greenman 			    ((p->p_pid < 48) && (vm_swap_size != 0))) {
11465663e6deSDavid Greenman 				continue;
11475663e6deSDavid Greenman 			}
11485663e6deSDavid Greenman 			/*
11495663e6deSDavid Greenman 			 * if the process is in a non-running type state,
11505663e6deSDavid Greenman 			 * don't touch it.
11515663e6deSDavid Greenman 			 */
11525663e6deSDavid Greenman 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
11535663e6deSDavid Greenman 				continue;
11545663e6deSDavid Greenman 			}
11555663e6deSDavid Greenman 			/*
11565663e6deSDavid Greenman 			 * get the process size
11575663e6deSDavid Greenman 			 */
1158b1028ad1SLuoqi Chen 			size = vmspace_resident_count(p->p_vmspace);
11595663e6deSDavid Greenman 			/*
11605663e6deSDavid Greenman 			 * if the this process is bigger than the biggest one
11615663e6deSDavid Greenman 			 * remember it.
11625663e6deSDavid Greenman 			 */
11635663e6deSDavid Greenman 			if (size > bigsize) {
11645663e6deSDavid Greenman 				bigproc = p;
11655663e6deSDavid Greenman 				bigsize = size;
11665663e6deSDavid Greenman 			}
11675663e6deSDavid Greenman 		}
11685663e6deSDavid Greenman 		if (bigproc != NULL) {
1169729b1e51SDavid Greenman 			killproc(bigproc, "out of swap space");
11705663e6deSDavid Greenman 			bigproc->p_estcpu = 0;
11715663e6deSDavid Greenman 			bigproc->p_nice = PRIO_MIN;
11725663e6deSDavid Greenman 			resetpriority(bigproc);
117324a1cce3SDavid Greenman 			wakeup(&cnt.v_free_count);
11745663e6deSDavid Greenman 		}
11755663e6deSDavid Greenman 	}
117626f9a767SRodney W. Grimes 	return force_wakeup;
117726f9a767SRodney W. Grimes }
117826f9a767SRodney W. Grimes 
1179dc2efb27SJohn Dyson /*
1180dc2efb27SJohn Dyson  * This routine tries to maintain the pseudo LRU active queue,
1181dc2efb27SJohn Dyson  * so that during long periods of time where there is no paging,
1182dc2efb27SJohn Dyson  * that some statistic accumlation still occurs.  This code
1183dc2efb27SJohn Dyson  * helps the situation where paging just starts to occur.
1184dc2efb27SJohn Dyson  */
1185dc2efb27SJohn Dyson static void
1186dc2efb27SJohn Dyson vm_pageout_page_stats()
1187dc2efb27SJohn Dyson {
1188dc2efb27SJohn Dyson 	int s;
1189dc2efb27SJohn Dyson 	vm_page_t m,next;
1190dc2efb27SJohn Dyson 	int pcount,tpcount;		/* Number of pages to check */
1191dc2efb27SJohn Dyson 	static int fullintervalcount = 0;
1192bef608bdSJohn Dyson 	int page_shortage;
1193bef608bdSJohn Dyson 
1194bef608bdSJohn Dyson 	page_shortage = (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) -
1195bef608bdSJohn Dyson 	    (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
1196bef608bdSJohn Dyson 	if (page_shortage <= 0)
1197bef608bdSJohn Dyson 		return;
1198dc2efb27SJohn Dyson 
1199dc2efb27SJohn Dyson 	pcount = cnt.v_active_count;
1200dc2efb27SJohn Dyson 	fullintervalcount += vm_pageout_stats_interval;
1201dc2efb27SJohn Dyson 	if (fullintervalcount < vm_pageout_full_stats_interval) {
1202dc2efb27SJohn Dyson 		tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count;
1203dc2efb27SJohn Dyson 		if (pcount > tpcount)
1204dc2efb27SJohn Dyson 			pcount = tpcount;
1205dc2efb27SJohn Dyson 	}
1206dc2efb27SJohn Dyson 
1207dc2efb27SJohn Dyson 	m = TAILQ_FIRST(&vm_page_queue_active);
1208dc2efb27SJohn Dyson 	while ((m != NULL) && (pcount-- > 0)) {
12097e006499SJohn Dyson 		int actcount;
1210dc2efb27SJohn Dyson 
1211dc2efb27SJohn Dyson 		if (m->queue != PQ_ACTIVE) {
1212dc2efb27SJohn Dyson 			break;
1213dc2efb27SJohn Dyson 		}
1214dc2efb27SJohn Dyson 
1215dc2efb27SJohn Dyson 		next = TAILQ_NEXT(m, pageq);
1216dc2efb27SJohn Dyson 		/*
1217dc2efb27SJohn Dyson 		 * Don't deactivate pages that are busy.
1218dc2efb27SJohn Dyson 		 */
1219dc2efb27SJohn Dyson 		if ((m->busy != 0) ||
1220dc2efb27SJohn Dyson 		    (m->flags & PG_BUSY) ||
1221dc2efb27SJohn Dyson 		    (m->hold_count != 0)) {
1222dc2efb27SJohn Dyson 			s = splvm();
1223dc2efb27SJohn Dyson 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1224dc2efb27SJohn Dyson 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1225dc2efb27SJohn Dyson 			splx(s);
1226dc2efb27SJohn Dyson 			m = next;
1227dc2efb27SJohn Dyson 			continue;
1228dc2efb27SJohn Dyson 		}
1229dc2efb27SJohn Dyson 
12307e006499SJohn Dyson 		actcount = 0;
1231dc2efb27SJohn Dyson 		if (m->flags & PG_REFERENCED) {
1232e69763a3SDoug Rabson 			vm_page_flag_clear(m, PG_REFERENCED);
12337e006499SJohn Dyson 			actcount += 1;
1234dc2efb27SJohn Dyson 		}
1235dc2efb27SJohn Dyson 
12367e006499SJohn Dyson 		actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
12377e006499SJohn Dyson 		if (actcount) {
12387e006499SJohn Dyson 			m->act_count += ACT_ADVANCE + actcount;
1239dc2efb27SJohn Dyson 			if (m->act_count > ACT_MAX)
1240dc2efb27SJohn Dyson 				m->act_count = ACT_MAX;
1241dc2efb27SJohn Dyson 			s = splvm();
1242dc2efb27SJohn Dyson 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1243dc2efb27SJohn Dyson 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1244dc2efb27SJohn Dyson 			splx(s);
1245dc2efb27SJohn Dyson 		} else {
1246dc2efb27SJohn Dyson 			if (m->act_count == 0) {
12477e006499SJohn Dyson 				/*
12487e006499SJohn Dyson 				 * We turn off page access, so that we have more accurate
12497e006499SJohn Dyson 				 * RSS stats.  We don't do this in the normal page deactivation
12507e006499SJohn Dyson 				 * when the system is loaded VM wise, because the cost of
12517e006499SJohn Dyson 				 * the large number of page protect operations would be higher
12527e006499SJohn Dyson 				 * than the value of doing the operation.
12537e006499SJohn Dyson 				 */
1254dc2efb27SJohn Dyson 				vm_page_protect(m, VM_PROT_NONE);
1255dc2efb27SJohn Dyson 				vm_page_deactivate(m);
1256dc2efb27SJohn Dyson 			} else {
1257dc2efb27SJohn Dyson 				m->act_count -= min(m->act_count, ACT_DECLINE);
1258dc2efb27SJohn Dyson 				s = splvm();
1259dc2efb27SJohn Dyson 				TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1260dc2efb27SJohn Dyson 				TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1261dc2efb27SJohn Dyson 				splx(s);
1262dc2efb27SJohn Dyson 			}
1263dc2efb27SJohn Dyson 		}
1264dc2efb27SJohn Dyson 
1265dc2efb27SJohn Dyson 		m = next;
1266dc2efb27SJohn Dyson 	}
1267dc2efb27SJohn Dyson }
1268dc2efb27SJohn Dyson 
1269b182ec9eSJohn Dyson static int
1270b182ec9eSJohn Dyson vm_pageout_free_page_calc(count)
1271b182ec9eSJohn Dyson vm_size_t count;
1272b182ec9eSJohn Dyson {
1273b182ec9eSJohn Dyson 	if (count < cnt.v_page_count)
1274b182ec9eSJohn Dyson 		 return 0;
1275b182ec9eSJohn Dyson 	/*
1276b182ec9eSJohn Dyson 	 * free_reserved needs to include enough for the largest swap pager
1277b182ec9eSJohn Dyson 	 * structures plus enough for any pv_entry structs when paging.
1278b182ec9eSJohn Dyson 	 */
1279b182ec9eSJohn Dyson 	if (cnt.v_page_count > 1024)
1280b182ec9eSJohn Dyson 		cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
1281b182ec9eSJohn Dyson 	else
1282b182ec9eSJohn Dyson 		cnt.v_free_min = 4;
1283f35329acSJohn Dyson 	cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
1284f35329acSJohn Dyson 		cnt.v_interrupt_free_min;
1285f35329acSJohn Dyson 	cnt.v_free_reserved = vm_pageout_page_count +
1286a15403deSJohn Dyson 		cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE;
1287a2f4a846SJohn Dyson 	cnt.v_free_min += cnt.v_free_reserved;
1288b182ec9eSJohn Dyson 	return 1;
1289b182ec9eSJohn Dyson }
1290b182ec9eSJohn Dyson 
1291b182ec9eSJohn Dyson 
1292df8bae1dSRodney W. Grimes /*
1293df8bae1dSRodney W. Grimes  *	vm_pageout is the high level pageout daemon.
1294df8bae1dSRodney W. Grimes  */
12952b14f991SJulian Elischer static void
129626f9a767SRodney W. Grimes vm_pageout()
1297df8bae1dSRodney W. Grimes {
1298df8bae1dSRodney W. Grimes 	/*
1299df8bae1dSRodney W. Grimes 	 * Initialize some paging parameters.
1300df8bae1dSRodney W. Grimes 	 */
1301df8bae1dSRodney W. Grimes 
1302f6b04d2bSDavid Greenman 	cnt.v_interrupt_free_min = 2;
1303f35329acSJohn Dyson 	if (cnt.v_page_count < 2000)
1304f35329acSJohn Dyson 		vm_pageout_page_count = 8;
1305f6b04d2bSDavid Greenman 
1306b182ec9eSJohn Dyson 	vm_pageout_free_page_calc(cnt.v_page_count);
1307ed74321bSDavid Greenman 	/*
13080d94caffSDavid Greenman 	 * free_reserved needs to include enough for the largest swap pager
13090d94caffSDavid Greenman 	 * structures plus enough for any pv_entry structs when paging.
1310ed74321bSDavid Greenman 	 */
1311a15403deSJohn Dyson 	if (cnt.v_free_count > 6144)
13120d94caffSDavid Greenman 		cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved;
1313a15403deSJohn Dyson 	else
1314a15403deSJohn Dyson 		cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved;
13156f2b142eSDavid Greenman 
1316a15403deSJohn Dyson 	if (cnt.v_free_count > 2048) {
1317a15403deSJohn Dyson 		cnt.v_cache_min = cnt.v_free_target;
1318a15403deSJohn Dyson 		cnt.v_cache_max = 2 * cnt.v_cache_min;
1319a15403deSJohn Dyson 		cnt.v_inactive_target = (3 * cnt.v_free_target) / 2;
13200d94caffSDavid Greenman 	} else {
13210d94caffSDavid Greenman 		cnt.v_cache_min = 0;
13220d94caffSDavid Greenman 		cnt.v_cache_max = 0;
13236f2b142eSDavid Greenman 		cnt.v_inactive_target = cnt.v_free_count / 4;
13240d94caffSDavid Greenman 	}
1325e47ed70bSJohn Dyson 	if (cnt.v_inactive_target > cnt.v_free_count / 3)
1326e47ed70bSJohn Dyson 		cnt.v_inactive_target = cnt.v_free_count / 3;
1327df8bae1dSRodney W. Grimes 
1328df8bae1dSRodney W. Grimes 	/* XXX does not really belong here */
1329df8bae1dSRodney W. Grimes 	if (vm_page_max_wired == 0)
1330df8bae1dSRodney W. Grimes 		vm_page_max_wired = cnt.v_free_count / 3;
1331df8bae1dSRodney W. Grimes 
1332dc2efb27SJohn Dyson 	if (vm_pageout_stats_max == 0)
1333dc2efb27SJohn Dyson 		vm_pageout_stats_max = cnt.v_free_target;
1334dc2efb27SJohn Dyson 
1335dc2efb27SJohn Dyson 	/*
1336dc2efb27SJohn Dyson 	 * Set interval in seconds for stats scan.
1337dc2efb27SJohn Dyson 	 */
1338dc2efb27SJohn Dyson 	if (vm_pageout_stats_interval == 0)
1339bef608bdSJohn Dyson 		vm_pageout_stats_interval = 5;
1340dc2efb27SJohn Dyson 	if (vm_pageout_full_stats_interval == 0)
1341dc2efb27SJohn Dyson 		vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1342dc2efb27SJohn Dyson 
1343dc2efb27SJohn Dyson 
1344dc2efb27SJohn Dyson 	/*
1345dc2efb27SJohn Dyson 	 * Set maximum free per pass
1346dc2efb27SJohn Dyson 	 */
1347dc2efb27SJohn Dyson 	if (vm_pageout_stats_free_max == 0)
1348bef608bdSJohn Dyson 		vm_pageout_stats_free_max = 5;
1349dc2efb27SJohn Dyson 
1350ceb0cf87SJohn Dyson 	max_page_launder = (cnt.v_page_count > 1800 ? 32 : 16);
135126f9a767SRodney W. Grimes 
135224a1cce3SDavid Greenman 	swap_pager_swap_init();
1353df8bae1dSRodney W. Grimes 	/*
13540d94caffSDavid Greenman 	 * The pageout daemon is never done, so loop forever.
1355df8bae1dSRodney W. Grimes 	 */
1356df8bae1dSRodney W. Grimes 	while (TRUE) {
1357dc2efb27SJohn Dyson 		int error;
1358b18bfc3dSJohn Dyson 		int s = splvm();
1359f919ebdeSDavid Greenman 		if (!vm_pages_needed ||
1360545901f7SJohn Dyson 			((cnt.v_free_count + cnt.v_cache_count) > cnt.v_free_min)) {
1361f919ebdeSDavid Greenman 			vm_pages_needed = 0;
1362dc2efb27SJohn Dyson 			error = tsleep(&vm_pages_needed,
1363dc2efb27SJohn Dyson 				PVM, "psleep", vm_pageout_stats_interval * hz);
1364dc2efb27SJohn Dyson 			if (error && !vm_pages_needed) {
1365dc2efb27SJohn Dyson 				splx(s);
1366dc2efb27SJohn Dyson 				vm_pageout_page_stats();
1367dc2efb27SJohn Dyson 				continue;
1368dc2efb27SJohn Dyson 			}
1369dc2efb27SJohn Dyson 		} else if (vm_pages_needed) {
1370e47ed70bSJohn Dyson 			vm_pages_needed = 0;
1371e47ed70bSJohn Dyson 			tsleep(&vm_pages_needed, PVM, "psleep", hz/2);
1372f919ebdeSDavid Greenman 		}
1373e47ed70bSJohn Dyson 
1374b18bfc3dSJohn Dyson 		if (vm_pages_needed)
1375b18bfc3dSJohn Dyson 			cnt.v_pdwakeups++;
1376f919ebdeSDavid Greenman 		vm_pages_needed = 0;
1377f919ebdeSDavid Greenman 		splx(s);
13780d94caffSDavid Greenman 		vm_pageout_scan();
13792d8acc0fSJohn Dyson 		vm_pageout_deficit = 0;
138024a1cce3SDavid Greenman 		wakeup(&cnt.v_free_count);
1381df8bae1dSRodney W. Grimes 	}
1382df8bae1dSRodney W. Grimes }
138326f9a767SRodney W. Grimes 
1384e0c5a895SJohn Dyson void
1385e0c5a895SJohn Dyson pagedaemon_wakeup()
1386e0c5a895SJohn Dyson {
1387e0c5a895SJohn Dyson 	if (!vm_pages_needed && curproc != pageproc) {
1388e0c5a895SJohn Dyson 		vm_pages_needed++;
1389e0c5a895SJohn Dyson 		wakeup(&vm_pages_needed);
1390e0c5a895SJohn Dyson 	}
1391e0c5a895SJohn Dyson }
1392e0c5a895SJohn Dyson 
139338efa82bSJohn Dyson #if !defined(NO_SWAPPING)
13945afce282SDavid Greenman static void
13955afce282SDavid Greenman vm_req_vmdaemon()
13965afce282SDavid Greenman {
13975afce282SDavid Greenman 	static int lastrun = 0;
13985afce282SDavid Greenman 
1399b18bfc3dSJohn Dyson 	if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
14005afce282SDavid Greenman 		wakeup(&vm_daemon_needed);
14015afce282SDavid Greenman 		lastrun = ticks;
14025afce282SDavid Greenman 	}
14035afce282SDavid Greenman }
14045afce282SDavid Greenman 
14052b14f991SJulian Elischer static void
14064f9fb771SBruce Evans vm_daemon()
14070d94caffSDavid Greenman {
14082fe6e4d7SDavid Greenman 	struct proc *p;
14090d94caffSDavid Greenman 
14102fe6e4d7SDavid Greenman 	while (TRUE) {
1411e8f36785SJohn Dyson 		tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0);
14124c1f8ee9SDavid Greenman 		if (vm_pageout_req_swapout) {
1413ceb0cf87SJohn Dyson 			swapout_procs(vm_pageout_req_swapout);
14144c1f8ee9SDavid Greenman 			vm_pageout_req_swapout = 0;
14154c1f8ee9SDavid Greenman 		}
14162fe6e4d7SDavid Greenman 		/*
14170d94caffSDavid Greenman 		 * scan the processes for exceeding their rlimits or if
14180d94caffSDavid Greenman 		 * process is swapped out -- deactivate pages
14192fe6e4d7SDavid Greenman 		 */
14202fe6e4d7SDavid Greenman 
14211b67ec6dSJeffrey Hsu 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
1422fe2144fdSLuoqi Chen 			vm_pindex_t limit, size;
14232fe6e4d7SDavid Greenman 
14242fe6e4d7SDavid Greenman 			/*
14252fe6e4d7SDavid Greenman 			 * if this is a system process or if we have already
14262fe6e4d7SDavid Greenman 			 * looked at this process, skip it.
14272fe6e4d7SDavid Greenman 			 */
14282fe6e4d7SDavid Greenman 			if (p->p_flag & (P_SYSTEM | P_WEXIT)) {
14292fe6e4d7SDavid Greenman 				continue;
14302fe6e4d7SDavid Greenman 			}
14312fe6e4d7SDavid Greenman 			/*
14322fe6e4d7SDavid Greenman 			 * if the process is in a non-running type state,
14332fe6e4d7SDavid Greenman 			 * don't touch it.
14342fe6e4d7SDavid Greenman 			 */
14352fe6e4d7SDavid Greenman 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
14362fe6e4d7SDavid Greenman 				continue;
14372fe6e4d7SDavid Greenman 			}
14382fe6e4d7SDavid Greenman 			/*
14392fe6e4d7SDavid Greenman 			 * get a limit
14402fe6e4d7SDavid Greenman 			 */
1441fe2144fdSLuoqi Chen 			limit = OFF_TO_IDX(
1442fe2144fdSLuoqi Chen 			    qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
1443fe2144fdSLuoqi Chen 				p->p_rlimit[RLIMIT_RSS].rlim_max));
14442fe6e4d7SDavid Greenman 
14452fe6e4d7SDavid Greenman 			/*
14460d94caffSDavid Greenman 			 * let processes that are swapped out really be
14470d94caffSDavid Greenman 			 * swapped out set the limit to nothing (will force a
14480d94caffSDavid Greenman 			 * swap-out.)
14492fe6e4d7SDavid Greenman 			 */
14502fe6e4d7SDavid Greenman 			if ((p->p_flag & P_INMEM) == 0)
14510d94caffSDavid Greenman 				limit = 0;	/* XXX */
14522fe6e4d7SDavid Greenman 
1453fe2144fdSLuoqi Chen 			size = vmspace_resident_count(p->p_vmspace);
14542fe6e4d7SDavid Greenman 			if (limit >= 0 && size >= limit) {
1455fe2144fdSLuoqi Chen 				vm_pageout_map_deactivate_pages(
1456fe2144fdSLuoqi Chen 				    &p->p_vmspace->vm_map, limit);
14572fe6e4d7SDavid Greenman 			}
14582fe6e4d7SDavid Greenman 		}
145924a1cce3SDavid Greenman 	}
14602fe6e4d7SDavid Greenman }
146138efa82bSJohn Dyson #endif
1462