xref: /freebsd/sys/vm/vm_pageout.c (revision 1c7c3c6a869e5eb64a19fda327dbe9f37af584b2)
1df8bae1dSRodney W. Grimes /*
226f9a767SRodney W. Grimes  * Copyright (c) 1991 Regents of the University of California.
326f9a767SRodney W. Grimes  * All rights reserved.
426f9a767SRodney W. Grimes  * Copyright (c) 1994 John S. Dyson
526f9a767SRodney W. Grimes  * All rights reserved.
626f9a767SRodney W. Grimes  * Copyright (c) 1994 David Greenman
726f9a767SRodney W. Grimes  * All rights reserved.
8df8bae1dSRodney W. Grimes  *
9df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
10df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
11df8bae1dSRodney W. Grimes  *
12df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
13df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
14df8bae1dSRodney W. Grimes  * are met:
15df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
17df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
18df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
19df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
20df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
21df8bae1dSRodney W. Grimes  *    must display the following acknowledgement:
22df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
23df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
24df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
25df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
26df8bae1dSRodney W. Grimes  *    without specific prior written permission.
27df8bae1dSRodney W. Grimes  *
28df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
39df8bae1dSRodney W. Grimes  *
403c4dd356SDavid Greenman  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
41df8bae1dSRodney W. Grimes  *
42df8bae1dSRodney W. Grimes  *
43df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
44df8bae1dSRodney W. Grimes  * All rights reserved.
45df8bae1dSRodney W. Grimes  *
46df8bae1dSRodney W. Grimes  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
47df8bae1dSRodney W. Grimes  *
48df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
49df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
50df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
51df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
52df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
53df8bae1dSRodney W. Grimes  *
54df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57df8bae1dSRodney W. Grimes  *
58df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
59df8bae1dSRodney W. Grimes  *
60df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61df8bae1dSRodney W. Grimes  *  School of Computer Science
62df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
63df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
64df8bae1dSRodney W. Grimes  *
65df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
66df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
6726f9a767SRodney W. Grimes  *
681c7c3c6aSMatthew Dillon  * $Id: vm_pageout.c,v 1.129 1998/10/31 17:21:31 peter Exp $
69df8bae1dSRodney W. Grimes  */
70df8bae1dSRodney W. Grimes 
71df8bae1dSRodney W. Grimes /*
72df8bae1dSRodney W. Grimes  *	The proverbial page-out daemon.
73df8bae1dSRodney W. Grimes  */
74df8bae1dSRodney W. Grimes 
75faa5f8d8SAndrzej Bialecki #include "opt_vm.h"
76df8bae1dSRodney W. Grimes #include <sys/param.h>
7726f9a767SRodney W. Grimes #include <sys/systm.h>
78b5e8ce9fSBruce Evans #include <sys/kernel.h>
7926f9a767SRodney W. Grimes #include <sys/proc.h>
8026f9a767SRodney W. Grimes #include <sys/resourcevar.h>
81d2fc5315SPoul-Henning Kamp #include <sys/signalvar.h>
82f6b04d2bSDavid Greenman #include <sys/vnode.h>
83efeaf95aSDavid Greenman #include <sys/vmmeter.h>
8438efa82bSJohn Dyson #include <sys/sysctl.h>
85df8bae1dSRodney W. Grimes 
86df8bae1dSRodney W. Grimes #include <vm/vm.h>
87efeaf95aSDavid Greenman #include <vm/vm_param.h>
88efeaf95aSDavid Greenman #include <vm/vm_prot.h>
89996c772fSJohn Dyson #include <sys/lock.h>
90efeaf95aSDavid Greenman #include <vm/vm_object.h>
91df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
92efeaf95aSDavid Greenman #include <vm/vm_map.h>
93df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h>
9424a1cce3SDavid Greenman #include <vm/vm_pager.h>
9505f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h>
96efeaf95aSDavid Greenman #include <vm/vm_extern.h>
97df8bae1dSRodney W. Grimes 
982b14f991SJulian Elischer /*
992b14f991SJulian Elischer  * System initialization
1002b14f991SJulian Elischer  */
1012b14f991SJulian Elischer 
1022b14f991SJulian Elischer /* the kernel process "vm_pageout"*/
1032b14f991SJulian Elischer static void vm_pageout __P((void));
1048f9110f6SJohn Dyson static int vm_pageout_clean __P((vm_page_t));
1053af76890SPoul-Henning Kamp static int vm_pageout_scan __P((void));
106f35329acSJohn Dyson static int vm_pageout_free_page_calc __P((vm_size_t count));
1072b14f991SJulian Elischer struct proc *pageproc;
1082b14f991SJulian Elischer 
1092b14f991SJulian Elischer static struct kproc_desc page_kp = {
1102b14f991SJulian Elischer 	"pagedaemon",
1112b14f991SJulian Elischer 	vm_pageout,
1122b14f991SJulian Elischer 	&pageproc
1132b14f991SJulian Elischer };
1144590fd3aSDavid Greenman SYSINIT_KT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp)
1152b14f991SJulian Elischer 
11638efa82bSJohn Dyson #if !defined(NO_SWAPPING)
1172b14f991SJulian Elischer /* the kernel process "vm_daemon"*/
1182b14f991SJulian Elischer static void vm_daemon __P((void));
119f708ef1bSPoul-Henning Kamp static struct	proc *vmproc;
1202b14f991SJulian Elischer 
1212b14f991SJulian Elischer static struct kproc_desc vm_kp = {
1222b14f991SJulian Elischer 	"vmdaemon",
1232b14f991SJulian Elischer 	vm_daemon,
1242b14f991SJulian Elischer 	&vmproc
1252b14f991SJulian Elischer };
1264590fd3aSDavid Greenman SYSINIT_KT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
12738efa82bSJohn Dyson #endif
1282b14f991SJulian Elischer 
1292b14f991SJulian Elischer 
1302d8acc0fSJohn Dyson int vm_pages_needed=0;		/* Event on which pageout daemon sleeps */
1312d8acc0fSJohn Dyson int vm_pageout_deficit=0;	/* Estimated number of pages deficit */
1322d8acc0fSJohn Dyson int vm_pageout_pages_needed=0;	/* flag saying that the pageout daemon needs pages */
13326f9a767SRodney W. Grimes 
13426f9a767SRodney W. Grimes extern int npendingio;
13538efa82bSJohn Dyson #if !defined(NO_SWAPPING)
136f708ef1bSPoul-Henning Kamp static int vm_pageout_req_swapout;	/* XXX */
137f708ef1bSPoul-Henning Kamp static int vm_daemon_needed;
13838efa82bSJohn Dyson #endif
13926f9a767SRodney W. Grimes extern int nswiodone;
1405663e6deSDavid Greenman extern int vm_swap_size;
141f6b04d2bSDavid Greenman extern int vfs_update_wakeup;
142303b270bSEivind Eklund static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
143303b270bSEivind Eklund static int vm_pageout_full_stats_interval = 0;
144303b270bSEivind Eklund static int vm_pageout_stats_free_max=0, vm_pageout_algorithm_lru=0;
145303b270bSEivind Eklund static int defer_swap_pageouts=0;
146303b270bSEivind Eklund static int disable_swap_pageouts=0;
14770111b90SJohn Dyson 
148303b270bSEivind Eklund static int max_page_launder=100;
14938efa82bSJohn Dyson #if defined(NO_SWAPPING)
150303b270bSEivind Eklund static int vm_swap_enabled=0;
151303b270bSEivind Eklund static int vm_swap_idle_enabled=0;
15238efa82bSJohn Dyson #else
153303b270bSEivind Eklund static int vm_swap_enabled=1;
154303b270bSEivind Eklund static int vm_swap_idle_enabled=0;
15538efa82bSJohn Dyson #endif
15638efa82bSJohn Dyson 
15738efa82bSJohn Dyson SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
158b0359e2cSPeter Wemm 	CTLFLAG_RW, &vm_pageout_algorithm_lru, 0, "LRU page mgmt");
15938efa82bSJohn Dyson 
160dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
161b0359e2cSPeter Wemm 	CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
162dc2efb27SJohn Dyson 
163dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
164b0359e2cSPeter Wemm 	CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
165dc2efb27SJohn Dyson 
166dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
167b0359e2cSPeter Wemm 	CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
168dc2efb27SJohn Dyson 
169dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
170b0359e2cSPeter Wemm 	CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
171dc2efb27SJohn Dyson 
17238efa82bSJohn Dyson #if defined(NO_SWAPPING)
173ceb0cf87SJohn Dyson SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
174ceb0cf87SJohn Dyson 	CTLFLAG_RD, &vm_swap_enabled, 0, "");
175ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
176ceb0cf87SJohn Dyson 	CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
17738efa82bSJohn Dyson #else
178ceb0cf87SJohn Dyson SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
179b0359e2cSPeter Wemm 	CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
180ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
181b0359e2cSPeter Wemm 	CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
18238efa82bSJohn Dyson #endif
18326f9a767SRodney W. Grimes 
184ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
185b0359e2cSPeter Wemm 	CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
18612ac6a1dSJohn Dyson 
187ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
188b0359e2cSPeter Wemm 	CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
18912ac6a1dSJohn Dyson 
190ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, max_page_launder,
191b0359e2cSPeter Wemm 	CTLFLAG_RW, &max_page_launder, 0, "Maximum number of pages to clean per pass");
19270111b90SJohn Dyson 
19326f9a767SRodney W. Grimes 
194ffc82b0aSJohn Dyson #define VM_PAGEOUT_PAGE_COUNT 16
195bbc0ec52SDavid Greenman int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
196df8bae1dSRodney W. Grimes 
197c3cb3e12SDavid Greenman int vm_page_max_wired;		/* XXX max # of wired pages system-wide */
198df8bae1dSRodney W. Grimes 
19938efa82bSJohn Dyson #if !defined(NO_SWAPPING)
20038efa82bSJohn Dyson typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int));
20138efa82bSJohn Dyson static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t));
202cd41fc12SDavid Greenman static freeer_fcn_t vm_pageout_object_deactivate_pages;
203cd41fc12SDavid Greenman static void vm_req_vmdaemon __P((void));
20438efa82bSJohn Dyson #endif
205dc2efb27SJohn Dyson static void vm_pageout_page_stats(void);
2065985940eSJohn Dyson void pmap_collect(void);
207cd41fc12SDavid Greenman 
20826f9a767SRodney W. Grimes /*
20926f9a767SRodney W. Grimes  * vm_pageout_clean:
21024a1cce3SDavid Greenman  *
2110d94caffSDavid Greenman  * Clean the page and remove it from the laundry.
21226f9a767SRodney W. Grimes  *
2130d94caffSDavid Greenman  * We set the busy bit to cause potential page faults on this page to
2141c7c3c6aSMatthew Dillon  * block.  Note the careful timing, however, the busy bit isn't set till
2151c7c3c6aSMatthew Dillon  * late and we cannot do anything that will mess with the page.
21626f9a767SRodney W. Grimes  */
2171c7c3c6aSMatthew Dillon 
2183af76890SPoul-Henning Kamp static int
2198f9110f6SJohn Dyson vm_pageout_clean(m)
22024a1cce3SDavid Greenman 	vm_page_t m;
22124a1cce3SDavid Greenman {
22226f9a767SRodney W. Grimes 	register vm_object_t object;
223f35329acSJohn Dyson 	vm_page_t mc[2*vm_pageout_page_count];
22424a1cce3SDavid Greenman 	int pageout_count;
22524a1cce3SDavid Greenman 	int i, forward_okay, backward_okay, page_base;
226a316d390SJohn Dyson 	vm_pindex_t pindex = m->pindex;
22726f9a767SRodney W. Grimes 
22826f9a767SRodney W. Grimes 	object = m->object;
22924a1cce3SDavid Greenman 
23026f9a767SRodney W. Grimes 	/*
2311c7c3c6aSMatthew Dillon 	 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
2321c7c3c6aSMatthew Dillon 	 * with the new swapper, but we could have serious problems paging
2331c7c3c6aSMatthew Dillon 	 * out other object types if there is insufficient memory.
2341c7c3c6aSMatthew Dillon 	 *
2351c7c3c6aSMatthew Dillon 	 * Unfortunately, checking free memory here is far too late, so the
2361c7c3c6aSMatthew Dillon 	 * check has been moved up a procedural level.
2371c7c3c6aSMatthew Dillon 	 */
2381c7c3c6aSMatthew Dillon 
2391c7c3c6aSMatthew Dillon #if 0
2401c7c3c6aSMatthew Dillon 	/*
24124a1cce3SDavid Greenman 	 * If not OBJT_SWAP, additional memory may be needed to do the pageout.
24224a1cce3SDavid Greenman 	 * Try to avoid the deadlock.
24326f9a767SRodney W. Grimes 	 */
2448f9110f6SJohn Dyson 	if ((object->type == OBJT_DEFAULT) &&
24524a1cce3SDavid Greenman 	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min))
24626f9a767SRodney W. Grimes 		return 0;
2471c7c3c6aSMatthew Dillon #endif
24826f9a767SRodney W. Grimes 
24924a1cce3SDavid Greenman 	/*
25024a1cce3SDavid Greenman 	 * Don't mess with the page if it's busy.
25124a1cce3SDavid Greenman 	 */
2528f9110f6SJohn Dyson 	if ((m->hold_count != 0) ||
2530d94caffSDavid Greenman 	    ((m->busy != 0) || (m->flags & PG_BUSY)))
2540d94caffSDavid Greenman 		return 0;
2550d94caffSDavid Greenman 
2561c7c3c6aSMatthew Dillon #if 0
25724a1cce3SDavid Greenman 	/*
2581c7c3c6aSMatthew Dillon 	 * XXX REMOVED XXX.  vm_object_collapse() can block, which can
2591c7c3c6aSMatthew Dillon 	 * change the page state.  Calling vm_object_collapse() might also
2601c7c3c6aSMatthew Dillon 	 * destroy or rename the page because we have not busied it yet!!!
2611c7c3c6aSMatthew Dillon 	 * So this code segment is removed.
2621c7c3c6aSMatthew Dillon 	 */
2631c7c3c6aSMatthew Dillon 	/*
2641c7c3c6aSMatthew Dillon 	 * Try collapsing before it's too late.   XXX huh?  Why are we doing
2651c7c3c6aSMatthew Dillon 	 * this here?
26624a1cce3SDavid Greenman 	 */
2678f9110f6SJohn Dyson 	if (object->backing_object) {
26826f9a767SRodney W. Grimes 		vm_object_collapse(object);
26926f9a767SRodney W. Grimes 	}
2701c7c3c6aSMatthew Dillon #endif
2713c018e72SJohn Dyson 
272f35329acSJohn Dyson 	mc[vm_pageout_page_count] = m;
27326f9a767SRodney W. Grimes 	pageout_count = 1;
274f35329acSJohn Dyson 	page_base = vm_pageout_page_count;
27524a1cce3SDavid Greenman 	forward_okay = TRUE;
276a316d390SJohn Dyson 	if (pindex != 0)
27724a1cce3SDavid Greenman 		backward_okay = TRUE;
27826f9a767SRodney W. Grimes 	else
27924a1cce3SDavid Greenman 		backward_okay = FALSE;
28024a1cce3SDavid Greenman 	/*
28124a1cce3SDavid Greenman 	 * Scan object for clusterable pages.
28224a1cce3SDavid Greenman 	 *
28324a1cce3SDavid Greenman 	 * We can cluster ONLY if: ->> the page is NOT
28424a1cce3SDavid Greenman 	 * clean, wired, busy, held, or mapped into a
28524a1cce3SDavid Greenman 	 * buffer, and one of the following:
28624a1cce3SDavid Greenman 	 * 1) The page is inactive, or a seldom used
28724a1cce3SDavid Greenman 	 *    active page.
28824a1cce3SDavid Greenman 	 * -or-
28924a1cce3SDavid Greenman 	 * 2) we force the issue.
29024a1cce3SDavid Greenman 	 */
29124a1cce3SDavid Greenman 	for (i = 1; (i < vm_pageout_page_count) && (forward_okay || backward_okay); i++) {
29224a1cce3SDavid Greenman 		vm_page_t p;
293f6b04d2bSDavid Greenman 
29424a1cce3SDavid Greenman 		/*
29524a1cce3SDavid Greenman 		 * See if forward page is clusterable.
29624a1cce3SDavid Greenman 		 */
29724a1cce3SDavid Greenman 		if (forward_okay) {
29824a1cce3SDavid Greenman 			/*
29924a1cce3SDavid Greenman 			 * Stop forward scan at end of object.
30024a1cce3SDavid Greenman 			 */
301a316d390SJohn Dyson 			if ((pindex + i) > object->size) {
30224a1cce3SDavid Greenman 				forward_okay = FALSE;
30324a1cce3SDavid Greenman 				goto do_backward;
304f6b04d2bSDavid Greenman 			}
305a316d390SJohn Dyson 			p = vm_page_lookup(object, pindex + i);
30624a1cce3SDavid Greenman 			if (p) {
3075070c7f8SJohn Dyson 				if (((p->queue - p->pc) == PQ_CACHE) ||
3085070c7f8SJohn Dyson 					(p->flags & PG_BUSY) || p->busy) {
30924a1cce3SDavid Greenman 					forward_okay = FALSE;
31024a1cce3SDavid Greenman 					goto do_backward;
311f6b04d2bSDavid Greenman 				}
31224a1cce3SDavid Greenman 				vm_page_test_dirty(p);
31324a1cce3SDavid Greenman 				if ((p->dirty & p->valid) != 0 &&
3148f9110f6SJohn Dyson 				    (p->queue == PQ_INACTIVE) &&
31524a1cce3SDavid Greenman 				    (p->wire_count == 0) &&
31624a1cce3SDavid Greenman 				    (p->hold_count == 0)) {
317f35329acSJohn Dyson 					mc[vm_pageout_page_count + i] = p;
31824a1cce3SDavid Greenman 					pageout_count++;
31924a1cce3SDavid Greenman 					if (pageout_count == vm_pageout_page_count)
32024a1cce3SDavid Greenman 						break;
32124a1cce3SDavid Greenman 				} else {
32224a1cce3SDavid Greenman 					forward_okay = FALSE;
323f6b04d2bSDavid Greenman 				}
32424a1cce3SDavid Greenman 			} else {
32524a1cce3SDavid Greenman 				forward_okay = FALSE;
32624a1cce3SDavid Greenman 			}
32724a1cce3SDavid Greenman 		}
32824a1cce3SDavid Greenman do_backward:
32924a1cce3SDavid Greenman 		/*
33024a1cce3SDavid Greenman 		 * See if backward page is clusterable.
33124a1cce3SDavid Greenman 		 */
33224a1cce3SDavid Greenman 		if (backward_okay) {
33324a1cce3SDavid Greenman 			/*
33424a1cce3SDavid Greenman 			 * Stop backward scan at beginning of object.
33524a1cce3SDavid Greenman 			 */
336a316d390SJohn Dyson 			if ((pindex - i) == 0) {
33724a1cce3SDavid Greenman 				backward_okay = FALSE;
33824a1cce3SDavid Greenman 			}
339a316d390SJohn Dyson 			p = vm_page_lookup(object, pindex - i);
34024a1cce3SDavid Greenman 			if (p) {
3415070c7f8SJohn Dyson 				if (((p->queue - p->pc) == PQ_CACHE) ||
3425070c7f8SJohn Dyson 					(p->flags & PG_BUSY) || p->busy) {
34324a1cce3SDavid Greenman 					backward_okay = FALSE;
34424a1cce3SDavid Greenman 					continue;
34524a1cce3SDavid Greenman 				}
34624a1cce3SDavid Greenman 				vm_page_test_dirty(p);
34724a1cce3SDavid Greenman 				if ((p->dirty & p->valid) != 0 &&
3488f9110f6SJohn Dyson 				    (p->queue == PQ_INACTIVE) &&
34924a1cce3SDavid Greenman 				    (p->wire_count == 0) &&
35024a1cce3SDavid Greenman 				    (p->hold_count == 0)) {
351f35329acSJohn Dyson 					mc[vm_pageout_page_count - i] = p;
35224a1cce3SDavid Greenman 					pageout_count++;
35324a1cce3SDavid Greenman 					page_base--;
35424a1cce3SDavid Greenman 					if (pageout_count == vm_pageout_page_count)
35524a1cce3SDavid Greenman 						break;
35624a1cce3SDavid Greenman 				} else {
35724a1cce3SDavid Greenman 					backward_okay = FALSE;
35824a1cce3SDavid Greenman 				}
35924a1cce3SDavid Greenman 			} else {
36024a1cce3SDavid Greenman 				backward_okay = FALSE;
36124a1cce3SDavid Greenman 			}
362f6b04d2bSDavid Greenman 		}
363f6b04d2bSDavid Greenman 	}
364f6b04d2bSDavid Greenman 
36567bf6868SJohn Dyson 	/*
36667bf6868SJohn Dyson 	 * we allow reads during pageouts...
36767bf6868SJohn Dyson 	 */
3688f9110f6SJohn Dyson 	return vm_pageout_flush(&mc[page_base], pageout_count, 0);
369aef922f5SJohn Dyson }
370aef922f5SJohn Dyson 
3711c7c3c6aSMatthew Dillon /*
3721c7c3c6aSMatthew Dillon  * vm_pageout_flush() - launder the given pages
3731c7c3c6aSMatthew Dillon  *
3741c7c3c6aSMatthew Dillon  *	The given pages are laundered.  Note that we setup for the start of
3751c7c3c6aSMatthew Dillon  *	I/O ( i.e. busy the page ), mark it read-only, and bump the object
3761c7c3c6aSMatthew Dillon  *	reference count all in here rather then in the parent.  If we want
3771c7c3c6aSMatthew Dillon  *	the parent to do more sophisticated things we may have to change
3781c7c3c6aSMatthew Dillon  *	the ordering.
3791c7c3c6aSMatthew Dillon  */
3801c7c3c6aSMatthew Dillon 
381aef922f5SJohn Dyson int
3828f9110f6SJohn Dyson vm_pageout_flush(mc, count, flags)
383aef922f5SJohn Dyson 	vm_page_t *mc;
384aef922f5SJohn Dyson 	int count;
3858f9110f6SJohn Dyson 	int flags;
386aef922f5SJohn Dyson {
387aef922f5SJohn Dyson 	register vm_object_t object;
388aef922f5SJohn Dyson 	int pageout_status[count];
38995461b45SJohn Dyson 	int numpagedout = 0;
390aef922f5SJohn Dyson 	int i;
391aef922f5SJohn Dyson 
3921c7c3c6aSMatthew Dillon 	/*
3931c7c3c6aSMatthew Dillon 	 * Initiate I/O.  Bump the vm_page_t->busy counter and
3941c7c3c6aSMatthew Dillon 	 * mark the pages read-only.
3951c7c3c6aSMatthew Dillon 	 *
3961c7c3c6aSMatthew Dillon 	 * We do not have to fixup the clean/dirty bits here... we can
3971c7c3c6aSMatthew Dillon 	 * allow the pager to do it after the I/O completes.
3981c7c3c6aSMatthew Dillon 	 */
3991c7c3c6aSMatthew Dillon 
4008f9110f6SJohn Dyson 	for (i = 0; i < count; i++) {
401e69763a3SDoug Rabson 		vm_page_io_start(mc[i]);
4028f9110f6SJohn Dyson 		vm_page_protect(mc[i], VM_PROT_READ);
4038f9110f6SJohn Dyson 	}
4048f9110f6SJohn Dyson 
405aef922f5SJohn Dyson 	object = mc[0]->object;
406d474eaaaSDoug Rabson 	vm_object_pip_add(object, count);
407aef922f5SJohn Dyson 
408aef922f5SJohn Dyson 	vm_pager_put_pages(object, mc, count,
4098f9110f6SJohn Dyson 	    (flags | ((object == kernel_object) ? OBJPC_SYNC : 0)),
41026f9a767SRodney W. Grimes 	    pageout_status);
41126f9a767SRodney W. Grimes 
412aef922f5SJohn Dyson 	for (i = 0; i < count; i++) {
413aef922f5SJohn Dyson 		vm_page_t mt = mc[i];
41424a1cce3SDavid Greenman 
41526f9a767SRodney W. Grimes 		switch (pageout_status[i]) {
41626f9a767SRodney W. Grimes 		case VM_PAGER_OK:
41795461b45SJohn Dyson 			numpagedout++;
41826f9a767SRodney W. Grimes 			break;
41926f9a767SRodney W. Grimes 		case VM_PAGER_PEND:
42095461b45SJohn Dyson 			numpagedout++;
42126f9a767SRodney W. Grimes 			break;
42226f9a767SRodney W. Grimes 		case VM_PAGER_BAD:
42326f9a767SRodney W. Grimes 			/*
4240d94caffSDavid Greenman 			 * Page outside of range of object. Right now we
4250d94caffSDavid Greenman 			 * essentially lose the changes by pretending it
4260d94caffSDavid Greenman 			 * worked.
42726f9a767SRodney W. Grimes 			 */
42867bf6868SJohn Dyson 			pmap_clear_modify(VM_PAGE_TO_PHYS(mt));
42924a1cce3SDavid Greenman 			mt->dirty = 0;
43026f9a767SRodney W. Grimes 			break;
43126f9a767SRodney W. Grimes 		case VM_PAGER_ERROR:
43226f9a767SRodney W. Grimes 		case VM_PAGER_FAIL:
43326f9a767SRodney W. Grimes 			/*
4340d94caffSDavid Greenman 			 * If page couldn't be paged out, then reactivate the
4350d94caffSDavid Greenman 			 * page so it doesn't clog the inactive list.  (We
4360d94caffSDavid Greenman 			 * will try paging out it again later).
43726f9a767SRodney W. Grimes 			 */
43824a1cce3SDavid Greenman 			vm_page_activate(mt);
43926f9a767SRodney W. Grimes 			break;
44026f9a767SRodney W. Grimes 		case VM_PAGER_AGAIN:
44126f9a767SRodney W. Grimes 			break;
44226f9a767SRodney W. Grimes 		}
44326f9a767SRodney W. Grimes 
44426f9a767SRodney W. Grimes 		/*
4450d94caffSDavid Greenman 		 * If the operation is still going, leave the page busy to
4460d94caffSDavid Greenman 		 * block all other accesses. Also, leave the paging in
4470d94caffSDavid Greenman 		 * progress indicator set so that we don't attempt an object
4480d94caffSDavid Greenman 		 * collapse.
44926f9a767SRodney W. Grimes 		 */
45026f9a767SRodney W. Grimes 		if (pageout_status[i] != VM_PAGER_PEND) {
451f919ebdeSDavid Greenman 			vm_object_pip_wakeup(object);
452e69763a3SDoug Rabson 			vm_page_io_finish(mt);
45326f9a767SRodney W. Grimes 		}
45426f9a767SRodney W. Grimes 	}
45595461b45SJohn Dyson 	return numpagedout;
45626f9a767SRodney W. Grimes }
45726f9a767SRodney W. Grimes 
45838efa82bSJohn Dyson #if !defined(NO_SWAPPING)
45926f9a767SRodney W. Grimes /*
46026f9a767SRodney W. Grimes  *	vm_pageout_object_deactivate_pages
46126f9a767SRodney W. Grimes  *
46226f9a767SRodney W. Grimes  *	deactivate enough pages to satisfy the inactive target
46326f9a767SRodney W. Grimes  *	requirements or if vm_page_proc_limit is set, then
46426f9a767SRodney W. Grimes  *	deactivate all of the pages in the object and its
46524a1cce3SDavid Greenman  *	backing_objects.
46626f9a767SRodney W. Grimes  *
46726f9a767SRodney W. Grimes  *	The object and map must be locked.
46826f9a767SRodney W. Grimes  */
46938efa82bSJohn Dyson static void
47038efa82bSJohn Dyson vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
47126f9a767SRodney W. Grimes 	vm_map_t map;
47226f9a767SRodney W. Grimes 	vm_object_t object;
47338efa82bSJohn Dyson 	vm_pindex_t desired;
4740d94caffSDavid Greenman 	int map_remove_only;
47526f9a767SRodney W. Grimes {
47626f9a767SRodney W. Grimes 	register vm_page_t p, next;
47726f9a767SRodney W. Grimes 	int rcount;
47838efa82bSJohn Dyson 	int remove_mode;
4791eeaa1e3SJohn Dyson 	int s;
48026f9a767SRodney W. Grimes 
48124a1cce3SDavid Greenman 	if (object->type == OBJT_DEVICE)
48238efa82bSJohn Dyson 		return;
4838f895206SDavid Greenman 
48438efa82bSJohn Dyson 	while (object) {
48538efa82bSJohn Dyson 		if (vm_map_pmap(map)->pm_stats.resident_count <= desired)
48638efa82bSJohn Dyson 			return;
48724a1cce3SDavid Greenman 		if (object->paging_in_progress)
48838efa82bSJohn Dyson 			return;
48926f9a767SRodney W. Grimes 
49038efa82bSJohn Dyson 		remove_mode = map_remove_only;
49138efa82bSJohn Dyson 		if (object->shadow_count > 1)
49238efa82bSJohn Dyson 			remove_mode = 1;
49326f9a767SRodney W. Grimes 	/*
49426f9a767SRodney W. Grimes 	 * scan the objects entire memory queue
49526f9a767SRodney W. Grimes 	 */
49626f9a767SRodney W. Grimes 		rcount = object->resident_page_count;
497b18bfc3dSJohn Dyson 		p = TAILQ_FIRST(&object->memq);
49826f9a767SRodney W. Grimes 		while (p && (rcount-- > 0)) {
4997e006499SJohn Dyson 			int actcount;
50038efa82bSJohn Dyson 			if (vm_map_pmap(map)->pm_stats.resident_count <= desired)
50138efa82bSJohn Dyson 				return;
502b18bfc3dSJohn Dyson 			next = TAILQ_NEXT(p, listq);
503a58d1fa1SDavid Greenman 			cnt.v_pdpages++;
5040d94caffSDavid Greenman 			if (p->wire_count != 0 ||
5050d94caffSDavid Greenman 			    p->hold_count != 0 ||
5060d94caffSDavid Greenman 			    p->busy != 0 ||
507bd7e5f99SJohn Dyson 			    (p->flags & PG_BUSY) ||
5080d94caffSDavid Greenman 			    !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) {
5090d94caffSDavid Greenman 				p = next;
5100d94caffSDavid Greenman 				continue;
5110d94caffSDavid Greenman 			}
512ef743ce6SJohn Dyson 
5137e006499SJohn Dyson 			actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p));
5147e006499SJohn Dyson 			if (actcount) {
515e69763a3SDoug Rabson 				vm_page_flag_set(p, PG_REFERENCED);
516c8c4b40cSJohn Dyson 			} else if (p->flags & PG_REFERENCED) {
5177e006499SJohn Dyson 				actcount = 1;
518ef743ce6SJohn Dyson 			}
519ef743ce6SJohn Dyson 
52038efa82bSJohn Dyson 			if ((p->queue != PQ_ACTIVE) &&
52138efa82bSJohn Dyson 				(p->flags & PG_REFERENCED)) {
522ef743ce6SJohn Dyson 				vm_page_activate(p);
5237e006499SJohn Dyson 				p->act_count += actcount;
524e69763a3SDoug Rabson 				vm_page_flag_clear(p, PG_REFERENCED);
525c8c4b40cSJohn Dyson 			} else if (p->queue == PQ_ACTIVE) {
526ef743ce6SJohn Dyson 				if ((p->flags & PG_REFERENCED) == 0) {
527c8c4b40cSJohn Dyson 					p->act_count -= min(p->act_count, ACT_DECLINE);
528c8c4b40cSJohn Dyson 					if (!remove_mode && (vm_pageout_algorithm_lru || (p->act_count == 0))) {
529b18bfc3dSJohn Dyson 						vm_page_protect(p, VM_PROT_NONE);
53026f9a767SRodney W. Grimes 						vm_page_deactivate(p);
53126f9a767SRodney W. Grimes 					} else {
532c8c4b40cSJohn Dyson 						s = splvm();
533c8c4b40cSJohn Dyson 						TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
534c8c4b40cSJohn Dyson 						TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
535c8c4b40cSJohn Dyson 						splx(s);
536c8c4b40cSJohn Dyson 					}
537c8c4b40cSJohn Dyson 				} else {
538eaf13dd7SJohn Dyson 					vm_page_activate(p);
539e69763a3SDoug Rabson 					vm_page_flag_clear(p, PG_REFERENCED);
54038efa82bSJohn Dyson 					if (p->act_count < (ACT_MAX - ACT_ADVANCE))
54138efa82bSJohn Dyson 						p->act_count += ACT_ADVANCE;
5421eeaa1e3SJohn Dyson 					s = splvm();
54326f9a767SRodney W. Grimes 					TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
54426f9a767SRodney W. Grimes 					TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
5451eeaa1e3SJohn Dyson 					splx(s);
54626f9a767SRodney W. Grimes 				}
547bd7e5f99SJohn Dyson 			} else if (p->queue == PQ_INACTIVE) {
548f919ebdeSDavid Greenman 				vm_page_protect(p, VM_PROT_NONE);
54926f9a767SRodney W. Grimes 			}
55026f9a767SRodney W. Grimes 			p = next;
55126f9a767SRodney W. Grimes 		}
55238efa82bSJohn Dyson 		object = object->backing_object;
55338efa82bSJohn Dyson 	}
55438efa82bSJohn Dyson 	return;
55526f9a767SRodney W. Grimes }
55626f9a767SRodney W. Grimes 
55726f9a767SRodney W. Grimes /*
55826f9a767SRodney W. Grimes  * deactivate some number of pages in a map, try to do it fairly, but
55926f9a767SRodney W. Grimes  * that is really hard to do.
56026f9a767SRodney W. Grimes  */
561cd41fc12SDavid Greenman static void
56238efa82bSJohn Dyson vm_pageout_map_deactivate_pages(map, desired)
56326f9a767SRodney W. Grimes 	vm_map_t map;
56438efa82bSJohn Dyson 	vm_pindex_t desired;
56526f9a767SRodney W. Grimes {
56626f9a767SRodney W. Grimes 	vm_map_entry_t tmpe;
56738efa82bSJohn Dyson 	vm_object_t obj, bigobj;
5680d94caffSDavid Greenman 
569996c772fSJohn Dyson 	if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) {
57026f9a767SRodney W. Grimes 		return;
57126f9a767SRodney W. Grimes 	}
57238efa82bSJohn Dyson 
57338efa82bSJohn Dyson 	bigobj = NULL;
57438efa82bSJohn Dyson 
57538efa82bSJohn Dyson 	/*
57638efa82bSJohn Dyson 	 * first, search out the biggest object, and try to free pages from
57738efa82bSJohn Dyson 	 * that.
57838efa82bSJohn Dyson 	 */
57926f9a767SRodney W. Grimes 	tmpe = map->header.next;
58038efa82bSJohn Dyson 	while (tmpe != &map->header) {
581afa07f7eSJohn Dyson 		if ((tmpe->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
58238efa82bSJohn Dyson 			obj = tmpe->object.vm_object;
58338efa82bSJohn Dyson 			if ((obj != NULL) && (obj->shadow_count <= 1) &&
58438efa82bSJohn Dyson 				((bigobj == NULL) ||
58538efa82bSJohn Dyson 				 (bigobj->resident_page_count < obj->resident_page_count))) {
58638efa82bSJohn Dyson 				bigobj = obj;
58738efa82bSJohn Dyson 			}
58838efa82bSJohn Dyson 		}
58938efa82bSJohn Dyson 		tmpe = tmpe->next;
59038efa82bSJohn Dyson 	}
59138efa82bSJohn Dyson 
59238efa82bSJohn Dyson 	if (bigobj)
59338efa82bSJohn Dyson 		vm_pageout_object_deactivate_pages(map, bigobj, desired, 0);
59438efa82bSJohn Dyson 
59538efa82bSJohn Dyson 	/*
59638efa82bSJohn Dyson 	 * Next, hunt around for other pages to deactivate.  We actually
59738efa82bSJohn Dyson 	 * do this search sort of wrong -- .text first is not the best idea.
59838efa82bSJohn Dyson 	 */
59938efa82bSJohn Dyson 	tmpe = map->header.next;
60038efa82bSJohn Dyson 	while (tmpe != &map->header) {
60138efa82bSJohn Dyson 		if (vm_map_pmap(map)->pm_stats.resident_count <= desired)
60238efa82bSJohn Dyson 			break;
603afa07f7eSJohn Dyson 		if ((tmpe->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
60438efa82bSJohn Dyson 			obj = tmpe->object.vm_object;
60501155bd7SDavid Greenman 			if (obj)
60638efa82bSJohn Dyson 				vm_pageout_object_deactivate_pages(map, obj, desired, 0);
60738efa82bSJohn Dyson 		}
60826f9a767SRodney W. Grimes 		tmpe = tmpe->next;
60926f9a767SRodney W. Grimes 	};
61038efa82bSJohn Dyson 
61138efa82bSJohn Dyson 	/*
61238efa82bSJohn Dyson 	 * Remove all mappings if a process is swapped out, this will free page
61338efa82bSJohn Dyson 	 * table pages.
61438efa82bSJohn Dyson 	 */
61538efa82bSJohn Dyson 	if (desired == 0)
61638efa82bSJohn Dyson 		pmap_remove(vm_map_pmap(map),
61738efa82bSJohn Dyson 			VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
61838efa82bSJohn Dyson 	vm_map_unlock(map);
61926f9a767SRodney W. Grimes 	return;
62026f9a767SRodney W. Grimes }
62138efa82bSJohn Dyson #endif
622df8bae1dSRodney W. Grimes 
6231c7c3c6aSMatthew Dillon /*
6241c7c3c6aSMatthew Dillon  * Don't try to be fancy - being fancy can lead to VOP_LOCK's and therefore
6251c7c3c6aSMatthew Dillon  * to vnode deadlocks.  We only do it for OBJT_DEFAULT and OBJT_SWAP objects
6261c7c3c6aSMatthew Dillon  * which we know can be trivially freed.
6271c7c3c6aSMatthew Dillon  */
6281c7c3c6aSMatthew Dillon 
629925a3a41SJohn Dyson void
630925a3a41SJohn Dyson vm_pageout_page_free(vm_page_t m) {
6311c7c3c6aSMatthew Dillon 	vm_object_t object = m->object;
6321c7c3c6aSMatthew Dillon 	int type = object->type;
633925a3a41SJohn Dyson 
6341c7c3c6aSMatthew Dillon 	if (type == OBJT_SWAP || type == OBJT_DEFAULT)
6351c7c3c6aSMatthew Dillon 		vm_object_reference(object);
636e69763a3SDoug Rabson 	vm_page_busy(m);
637925a3a41SJohn Dyson 	vm_page_protect(m, VM_PROT_NONE);
638925a3a41SJohn Dyson 	vm_page_free(m);
6391c7c3c6aSMatthew Dillon 	if (type == OBJT_SWAP || type == OBJT_DEFAULT)
64047221757SJohn Dyson 		vm_object_deallocate(object);
641925a3a41SJohn Dyson }
642925a3a41SJohn Dyson 
643df8bae1dSRodney W. Grimes /*
644df8bae1dSRodney W. Grimes  *	vm_pageout_scan does the dirty work for the pageout daemon.
645df8bae1dSRodney W. Grimes  */
6463af76890SPoul-Henning Kamp static int
647df8bae1dSRodney W. Grimes vm_pageout_scan()
648df8bae1dSRodney W. Grimes {
649502ba6e4SJohn Dyson 	vm_page_t m, next;
6501c7c3c6aSMatthew Dillon 	int page_shortage, maxscan, pcount;
6511c7c3c6aSMatthew Dillon 	int addl_page_shortage, addl_page_shortage_init;
65270111b90SJohn Dyson 	int maxlaunder;
6531c7c3c6aSMatthew Dillon 	int launder_loop = 0;
6545663e6deSDavid Greenman 	struct proc *p, *bigproc;
6555663e6deSDavid Greenman 	vm_offset_t size, bigsize;
656df8bae1dSRodney W. Grimes 	vm_object_t object;
65726f9a767SRodney W. Grimes 	int force_wakeup = 0;
6587e006499SJohn Dyson 	int actcount;
659f6b04d2bSDavid Greenman 	int vnodes_skipped = 0;
6601eeaa1e3SJohn Dyson 	int s;
6610d94caffSDavid Greenman 
662df8bae1dSRodney W. Grimes 	/*
6635985940eSJohn Dyson 	 * Do whatever cleanup that the pmap code can.
6645985940eSJohn Dyson 	 */
6655985940eSJohn Dyson 	pmap_collect();
6665985940eSJohn Dyson 
6671c7c3c6aSMatthew Dillon 	addl_page_shortage_init = vm_pageout_deficit;
66895461b45SJohn Dyson 	vm_pageout_deficit = 0;
669b182ec9eSJohn Dyson 
670ceb0cf87SJohn Dyson 	if (max_page_launder == 0)
671ceb0cf87SJohn Dyson 		max_page_launder = 1;
6721c7c3c6aSMatthew Dillon 
6731c7c3c6aSMatthew Dillon 	/*
6741c7c3c6aSMatthew Dillon 	 * Calculate the number of pages we want to either free or move
6751c7c3c6aSMatthew Dillon 	 * to the cache.
6761c7c3c6aSMatthew Dillon 	 */
6771c7c3c6aSMatthew Dillon 
6781c7c3c6aSMatthew Dillon 	page_shortage = (cnt.v_free_target + cnt.v_cache_min) -
6791c7c3c6aSMatthew Dillon 	    (cnt.v_free_count + cnt.v_cache_count);
6801c7c3c6aSMatthew Dillon 	page_shortage += addl_page_shortage_init;
6811c7c3c6aSMatthew Dillon 
6821c7c3c6aSMatthew Dillon 	/*
6831c7c3c6aSMatthew Dillon 	 * Figure out what to do with dirty pages when they are encountered.
6841c7c3c6aSMatthew Dillon 	 * Assume that 1/3 of the pages on the inactive list are clean.  If
6851c7c3c6aSMatthew Dillon 	 * we think we can reach our target, disable laundering (do not
6861c7c3c6aSMatthew Dillon 	 * clean any dirty pages).  If we miss the target we will loop back
6871c7c3c6aSMatthew Dillon 	 * up and do a laundering run.
6881c7c3c6aSMatthew Dillon 	 */
6891c7c3c6aSMatthew Dillon 
6901c7c3c6aSMatthew Dillon 	if (cnt.v_inactive_count / 3 > page_shortage) {
6911c7c3c6aSMatthew Dillon 		maxlaunder = 0;
6921c7c3c6aSMatthew Dillon 		launder_loop = 0;
6931c7c3c6aSMatthew Dillon 	} else {
6941c7c3c6aSMatthew Dillon 		maxlaunder =
6951c7c3c6aSMatthew Dillon 		    (cnt.v_inactive_target > max_page_launder) ?
696ceb0cf87SJohn Dyson 		    max_page_launder : cnt.v_inactive_target;
6971c7c3c6aSMatthew Dillon 		launder_loop = 1;
6981c7c3c6aSMatthew Dillon 	}
6991c7c3c6aSMatthew Dillon 
7001c7c3c6aSMatthew Dillon 	/*
7011c7c3c6aSMatthew Dillon 	 * Start scanning the inactive queue for pages we can move to the
7021c7c3c6aSMatthew Dillon 	 * cache or free.  The scan will stop when the target is reached or
7031c7c3c6aSMatthew Dillon 	 * we have scanned the entire inactive queue.
7041c7c3c6aSMatthew Dillon 	 */
70570111b90SJohn Dyson 
70667bf6868SJohn Dyson rescan0:
7071c7c3c6aSMatthew Dillon 	addl_page_shortage = addl_page_shortage_init;
708f6b04d2bSDavid Greenman 	maxscan = cnt.v_inactive_count;
7091c7c3c6aSMatthew Dillon 	for (
7101c7c3c6aSMatthew Dillon 	    m = TAILQ_FIRST(&vm_page_queue_inactive);
7111c7c3c6aSMatthew Dillon 	    m != NULL && maxscan-- > 0 && page_shortage > 0;
7121c7c3c6aSMatthew Dillon 	    m = next
7131c7c3c6aSMatthew Dillon 	) {
714df8bae1dSRodney W. Grimes 
715a58d1fa1SDavid Greenman 		cnt.v_pdpages++;
716b182ec9eSJohn Dyson 
717f35329acSJohn Dyson 		if (m->queue != PQ_INACTIVE) {
71867bf6868SJohn Dyson 			goto rescan0;
719f35329acSJohn Dyson 		}
720b182ec9eSJohn Dyson 
721b18bfc3dSJohn Dyson 		next = TAILQ_NEXT(m, pageq);
722df8bae1dSRodney W. Grimes 
723b182ec9eSJohn Dyson 		if (m->hold_count) {
724f35329acSJohn Dyson 			s = splvm();
725b182ec9eSJohn Dyson 			TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
726b182ec9eSJohn Dyson 			TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
727f35329acSJohn Dyson 			splx(s);
728b182ec9eSJohn Dyson 			addl_page_shortage++;
729b182ec9eSJohn Dyson 			continue;
730df8bae1dSRodney W. Grimes 		}
73126f9a767SRodney W. Grimes 		/*
732b18bfc3dSJohn Dyson 		 * Dont mess with busy pages, keep in the front of the
733b18bfc3dSJohn Dyson 		 * queue, most likely are being paged out.
73426f9a767SRodney W. Grimes 		 */
735bd7e5f99SJohn Dyson 		if (m->busy || (m->flags & PG_BUSY)) {
736b182ec9eSJohn Dyson 			addl_page_shortage++;
73726f9a767SRodney W. Grimes 			continue;
73826f9a767SRodney W. Grimes 		}
739bd7e5f99SJohn Dyson 
7407e006499SJohn Dyson 		/*
7411c7c3c6aSMatthew Dillon 		 * If the object is not being used, we ignore previous
7421c7c3c6aSMatthew Dillon 		 * references.
7437e006499SJohn Dyson 		 */
7440d94caffSDavid Greenman 		if (m->object->ref_count == 0) {
745e69763a3SDoug Rabson 			vm_page_flag_clear(m, PG_REFERENCED);
74667bf6868SJohn Dyson 			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
7477e006499SJohn Dyson 
7487e006499SJohn Dyson 		/*
7491c7c3c6aSMatthew Dillon 		 * Otherwise, if the page has been referenced while in the
7501c7c3c6aSMatthew Dillon 		 * inactive queue, we bump the "activation count" upwards,
7511c7c3c6aSMatthew Dillon 		 * making it less likely that the page will be added back to
7521c7c3c6aSMatthew Dillon 		 * the inactive queue prematurely again.  Here we check the
7531c7c3c6aSMatthew Dillon 		 * page tables (or emulated bits, if any), given the upper
7541c7c3c6aSMatthew Dillon 		 * level VM system not knowing anything about existing
7551c7c3c6aSMatthew Dillon 		 * references.
7567e006499SJohn Dyson 		 */
757ef743ce6SJohn Dyson 		} else if (((m->flags & PG_REFERENCED) == 0) &&
7587e006499SJohn Dyson 			(actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)))) {
759ef743ce6SJohn Dyson 			vm_page_activate(m);
7607e006499SJohn Dyson 			m->act_count += (actcount + ACT_ADVANCE);
761ef743ce6SJohn Dyson 			continue;
7622fe6e4d7SDavid Greenman 		}
763ef743ce6SJohn Dyson 
7647e006499SJohn Dyson 		/*
7651c7c3c6aSMatthew Dillon 		 * If the upper level VM system knows about any page
7661c7c3c6aSMatthew Dillon 		 * references, we activate the page.  We also set the
7671c7c3c6aSMatthew Dillon 		 * "activation count" higher than normal so that we will less
7681c7c3c6aSMatthew Dillon 		 * likely place pages back onto the inactive queue again.
7697e006499SJohn Dyson 		 */
770bd7e5f99SJohn Dyson 		if ((m->flags & PG_REFERENCED) != 0) {
771e69763a3SDoug Rabson 			vm_page_flag_clear(m, PG_REFERENCED);
7727e006499SJohn Dyson 			actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
77326f9a767SRodney W. Grimes 			vm_page_activate(m);
7747e006499SJohn Dyson 			m->act_count += (actcount + ACT_ADVANCE + 1);
7750d94caffSDavid Greenman 			continue;
7760d94caffSDavid Greenman 		}
77767bf6868SJohn Dyson 
7787e006499SJohn Dyson 		/*
7791c7c3c6aSMatthew Dillon 		 * If the upper level VM system doesn't know anything about
7801c7c3c6aSMatthew Dillon 		 * the page being dirty, we have to check for it again.  As
7811c7c3c6aSMatthew Dillon 		 * far as the VM code knows, any partially dirty pages are
7821c7c3c6aSMatthew Dillon 		 * fully dirty.
7837e006499SJohn Dyson 		 */
784f6b04d2bSDavid Greenman 		if (m->dirty == 0) {
785bd7e5f99SJohn Dyson 			vm_page_test_dirty(m);
786427e99a0SAlexander Langer 		} else {
787bd7e5f99SJohn Dyson 			m->dirty = VM_PAGE_BITS_ALL;
78830dcfc09SJohn Dyson 		}
789ef743ce6SJohn Dyson 
7907e006499SJohn Dyson 		/*
7917e006499SJohn Dyson 		 * Invalid pages can be easily freed
7927e006499SJohn Dyson 		 */
7936d40c3d3SDavid Greenman 		if (m->valid == 0) {
794925a3a41SJohn Dyson 			vm_pageout_page_free(m);
79567bf6868SJohn Dyson 			cnt.v_dfree++;
7961c7c3c6aSMatthew Dillon 			--page_shortage;
7977e006499SJohn Dyson 
7987e006499SJohn Dyson 		/*
7997e006499SJohn Dyson 		 * Clean pages can be placed onto the cache queue.
8007e006499SJohn Dyson 		 */
801bd7e5f99SJohn Dyson 		} else if (m->dirty == 0) {
802bd7e5f99SJohn Dyson 			vm_page_cache(m);
8031c7c3c6aSMatthew Dillon 			--page_shortage;
8047e006499SJohn Dyson 
8057e006499SJohn Dyson 		/*
8067e006499SJohn Dyson 		 * Dirty pages need to be paged out.  Note that we clean
8077e006499SJohn Dyson 		 * only a limited number of pages per pagedaemon pass.
8087e006499SJohn Dyson 		 */
8090d94caffSDavid Greenman 		} else if (maxlaunder > 0) {
8100d94caffSDavid Greenman 			int written;
81112ac6a1dSJohn Dyson 			int swap_pageouts_ok;
812f6b04d2bSDavid Greenman 			struct vnode *vp = NULL;
8130d94caffSDavid Greenman 
8140d94caffSDavid Greenman 			object = m->object;
8157e006499SJohn Dyson 
81612ac6a1dSJohn Dyson 			if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) {
81712ac6a1dSJohn Dyson 				swap_pageouts_ok = 1;
81812ac6a1dSJohn Dyson 			} else {
81912ac6a1dSJohn Dyson 				swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
82012ac6a1dSJohn Dyson 				swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
82112ac6a1dSJohn Dyson 					(cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min);
82212ac6a1dSJohn Dyson 
82312ac6a1dSJohn Dyson 			}
82470111b90SJohn Dyson 
82570111b90SJohn Dyson 			/*
8261c7c3c6aSMatthew Dillon 			 * We don't bother paging objects that are "dead".
8271c7c3c6aSMatthew Dillon 			 * Those objects are in a "rundown" state.
82870111b90SJohn Dyson 			 */
82970111b90SJohn Dyson 			if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
83012ac6a1dSJohn Dyson 				s = splvm();
83112ac6a1dSJohn Dyson 				TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
83212ac6a1dSJohn Dyson 				TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
83312ac6a1dSJohn Dyson 				splx(s);
83412ac6a1dSJohn Dyson 				continue;
83512ac6a1dSJohn Dyson 			}
83612ac6a1dSJohn Dyson 
8371c7c3c6aSMatthew Dillon 			/*
8381c7c3c6aSMatthew Dillon 			 * For now we protect against potential memory
8391c7c3c6aSMatthew Dillon 			 * deadlocks by requiring significant memory to be
8401c7c3c6aSMatthew Dillon 			 * free if the object is not OBJT_DEFAULT or OBJT_SWAP.
8411c7c3c6aSMatthew Dillon 			 * We do not 'trust' any other object type to operate
8421c7c3c6aSMatthew Dillon 			 * with low memory, not even OBJT_DEVICE.  The VM
8431c7c3c6aSMatthew Dillon 			 * allocator will special case allocations done by
8441c7c3c6aSMatthew Dillon 			 * the pageout daemon so the check below actually
8451c7c3c6aSMatthew Dillon 			 * does have some hysteresis in it.  It isn't the best
8461c7c3c6aSMatthew Dillon 			 * solution, though.
8471c7c3c6aSMatthew Dillon 			 */
8481c7c3c6aSMatthew Dillon 
8491c7c3c6aSMatthew Dillon 			if (
8501c7c3c6aSMatthew Dillon 			    object->type != OBJT_DEFAULT &&
8511c7c3c6aSMatthew Dillon 			    object->type != OBJT_SWAP &&
8521c7c3c6aSMatthew Dillon 			    cnt.v_free_count < cnt.v_free_reserved
8531c7c3c6aSMatthew Dillon 			) {
8541c7c3c6aSMatthew Dillon 				s = splvm();
8551c7c3c6aSMatthew Dillon 				TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
8561c7c3c6aSMatthew Dillon 				TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
8571c7c3c6aSMatthew Dillon 				splx(s);
8581c7c3c6aSMatthew Dillon 				continue;
8591c7c3c6aSMatthew Dillon 			}
8601c7c3c6aSMatthew Dillon 
8611c7c3c6aSMatthew Dillon 			/*
8621c7c3c6aSMatthew Dillon 			 * Presumably we have sufficient free memory to do
8631c7c3c6aSMatthew Dillon 			 * the more sophisticated checks and locking required
8641c7c3c6aSMatthew Dillon 			 * for vnodes.
8651c7c3c6aSMatthew Dillon 			 *
8661c7c3c6aSMatthew Dillon 			 * The object is already known NOT to be dead.  The
8671c7c3c6aSMatthew Dillon 			 * vget() may still block, though, because
8681c7c3c6aSMatthew Dillon 			 * VOP_ISLOCKED() doesn't check to see if an inode
8691c7c3c6aSMatthew Dillon 			 * (v_data) is associated with the vnode.  If it isn't,
8701c7c3c6aSMatthew Dillon 			 * vget() will load in it from disk.  Worse, vget()
8711c7c3c6aSMatthew Dillon 			 * may actually get stuck waiting on "inode" if another
8721c7c3c6aSMatthew Dillon 			 * process is in the process of bringing the inode in.
8731c7c3c6aSMatthew Dillon 			 * This is bad news for us either way.
8741c7c3c6aSMatthew Dillon 			 *
8751c7c3c6aSMatthew Dillon 			 * So for the moment we check v_data == NULL as a
8761c7c3c6aSMatthew Dillon 			 * workaround.  This means that vnodes which do not
8771c7c3c6aSMatthew Dillon 			 * use v_data in the way we expect probably will not
8781c7c3c6aSMatthew Dillon 			 * wind up being paged out by the pager and it will be
8791c7c3c6aSMatthew Dillon 			 * up to the syncer to get them.  That's better then
8801c7c3c6aSMatthew Dillon 			 * us blocking here.
8811c7c3c6aSMatthew Dillon 			 *
8821c7c3c6aSMatthew Dillon 			 * This whole code section is bogus - we need to fix
8831c7c3c6aSMatthew Dillon 			 * the vnode pager to handle vm_page_t's without us
8841c7c3c6aSMatthew Dillon 			 * having to do any sophisticated VOP tests.
8851c7c3c6aSMatthew Dillon 			 */
8861c7c3c6aSMatthew Dillon 
8871c7c3c6aSMatthew Dillon 			if (object->type == OBJT_VNODE) {
88824a1cce3SDavid Greenman 				vp = object->handle;
8891c7c3c6aSMatthew Dillon 
890996c772fSJohn Dyson 				if (VOP_ISLOCKED(vp) ||
8911c7c3c6aSMatthew Dillon 				    vp->v_data == NULL ||
89247221757SJohn Dyson 				    vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curproc)) {
893b182ec9eSJohn Dyson 					if ((m->queue == PQ_INACTIVE) &&
894b182ec9eSJohn Dyson 						(m->hold_count == 0) &&
895b182ec9eSJohn Dyson 						(m->busy == 0) &&
896b182ec9eSJohn Dyson 						(m->flags & PG_BUSY) == 0) {
897f35329acSJohn Dyson 						s = splvm();
89885a376ebSJohn Dyson 						TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
89985a376ebSJohn Dyson 						TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
900f35329acSJohn Dyson 						splx(s);
90185a376ebSJohn Dyson 					}
902aef922f5SJohn Dyson 					if (object->flags & OBJ_MIGHTBEDIRTY)
903925a3a41SJohn Dyson 						vnodes_skipped++;
904b182ec9eSJohn Dyson 					continue;
90585a376ebSJohn Dyson 				}
906b182ec9eSJohn Dyson 
907f35329acSJohn Dyson 				/*
908f35329acSJohn Dyson 				 * The page might have been moved to another queue
909f35329acSJohn Dyson 				 * during potential blocking in vget() above.
910f35329acSJohn Dyson 				 */
911b182ec9eSJohn Dyson 				if (m->queue != PQ_INACTIVE) {
912b182ec9eSJohn Dyson 					if (object->flags & OBJ_MIGHTBEDIRTY)
913925a3a41SJohn Dyson 						vnodes_skipped++;
914b182ec9eSJohn Dyson 					vput(vp);
915b182ec9eSJohn Dyson 					continue;
916b182ec9eSJohn Dyson 				}
917b182ec9eSJohn Dyson 
918f35329acSJohn Dyson 				/*
919f35329acSJohn Dyson 				 * The page may have been busied during the blocking in
920f35329acSJohn Dyson 				 * vput();  We don't move the page back onto the end of
921f35329acSJohn Dyson 				 * the queue so that statistics are more correct if we don't.
922f35329acSJohn Dyson 				 */
923b182ec9eSJohn Dyson 				if (m->busy || (m->flags & PG_BUSY)) {
924b182ec9eSJohn Dyson 					vput(vp);
925b182ec9eSJohn Dyson 					continue;
926b182ec9eSJohn Dyson 				}
927b182ec9eSJohn Dyson 
928f35329acSJohn Dyson 				/*
929f35329acSJohn Dyson 				 * If the page has become held, then skip it
930f35329acSJohn Dyson 				 */
931b182ec9eSJohn Dyson 				if (m->hold_count) {
932f35329acSJohn Dyson 					s = splvm();
933b182ec9eSJohn Dyson 					TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
934b182ec9eSJohn Dyson 					TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
935f35329acSJohn Dyson 					splx(s);
936b182ec9eSJohn Dyson 					if (object->flags & OBJ_MIGHTBEDIRTY)
937925a3a41SJohn Dyson 						vnodes_skipped++;
938b182ec9eSJohn Dyson 					vput(vp);
939f6b04d2bSDavid Greenman 					continue;
940f6b04d2bSDavid Greenman 				}
941f6b04d2bSDavid Greenman 			}
942f6b04d2bSDavid Greenman 
9430d94caffSDavid Greenman 			/*
9440d94caffSDavid Greenman 			 * If a page is dirty, then it is either being washed
9450d94caffSDavid Greenman 			 * (but not yet cleaned) or it is still in the
9460d94caffSDavid Greenman 			 * laundry.  If it is still in the laundry, then we
9470d94caffSDavid Greenman 			 * start the cleaning operation.
9480d94caffSDavid Greenman 			 */
9498f9110f6SJohn Dyson 			written = vm_pageout_clean(m);
950f6b04d2bSDavid Greenman 			if (vp)
951f6b04d2bSDavid Greenman 				vput(vp);
952f6b04d2bSDavid Greenman 
9530d94caffSDavid Greenman 			maxlaunder -= written;
9540d94caffSDavid Greenman 		}
955df8bae1dSRodney W. Grimes 	}
95626f9a767SRodney W. Grimes 
957df8bae1dSRodney W. Grimes 	/*
9581c7c3c6aSMatthew Dillon 	 * If we still have a page shortage and we didn't launder anything,
9591c7c3c6aSMatthew Dillon 	 * run the inactive scan again and launder something this time.
960df8bae1dSRodney W. Grimes 	 */
9611c7c3c6aSMatthew Dillon 
9621c7c3c6aSMatthew Dillon 	if (launder_loop == 0 && page_shortage > 0) {
9631c7c3c6aSMatthew Dillon 		launder_loop = 1;
9641c7c3c6aSMatthew Dillon 		maxlaunder =
9651c7c3c6aSMatthew Dillon 		    (cnt.v_inactive_target > max_page_launder) ?
9661c7c3c6aSMatthew Dillon 		    max_page_launder : cnt.v_inactive_target;
9671c7c3c6aSMatthew Dillon 		goto rescan0;
9681c7c3c6aSMatthew Dillon 	}
9691c7c3c6aSMatthew Dillon 
9701c7c3c6aSMatthew Dillon 	/*
9711c7c3c6aSMatthew Dillon 	 * Compute the page shortage from the point of view of having to
9721c7c3c6aSMatthew Dillon 	 * move pages from the active queue to the inactive queue.
9731c7c3c6aSMatthew Dillon 	 */
9741c7c3c6aSMatthew Dillon 
975b182ec9eSJohn Dyson 	page_shortage = (cnt.v_inactive_target + cnt.v_cache_min) -
9760d94caffSDavid Greenman 	    (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
977b182ec9eSJohn Dyson 	page_shortage += addl_page_shortage;
9781c7c3c6aSMatthew Dillon 
9791c7c3c6aSMatthew Dillon 	/*
9801c7c3c6aSMatthew Dillon 	 * Scan the active queue for things we can deactivate
9811c7c3c6aSMatthew Dillon 	 */
98226f9a767SRodney W. Grimes 
983b18bfc3dSJohn Dyson 	pcount = cnt.v_active_count;
984b18bfc3dSJohn Dyson 	m = TAILQ_FIRST(&vm_page_queue_active);
9851c7c3c6aSMatthew Dillon 
986b18bfc3dSJohn Dyson 	while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
987f35329acSJohn Dyson 
9887e006499SJohn Dyson 		/*
9897e006499SJohn Dyson 		 * This is a consistancy check, and should likely be a panic
9907e006499SJohn Dyson 		 * or warning.
9917e006499SJohn Dyson 		 */
992f35329acSJohn Dyson 		if (m->queue != PQ_ACTIVE) {
99338efa82bSJohn Dyson 			break;
994f35329acSJohn Dyson 		}
995f35329acSJohn Dyson 
996b18bfc3dSJohn Dyson 		next = TAILQ_NEXT(m, pageq);
997df8bae1dSRodney W. Grimes 		/*
99826f9a767SRodney W. Grimes 		 * Don't deactivate pages that are busy.
999df8bae1dSRodney W. Grimes 		 */
1000a647a309SDavid Greenman 		if ((m->busy != 0) ||
10010d94caffSDavid Greenman 		    (m->flags & PG_BUSY) ||
1002f6b04d2bSDavid Greenman 		    (m->hold_count != 0)) {
1003f35329acSJohn Dyson 			s = splvm();
10046d40c3d3SDavid Greenman 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
10056d40c3d3SDavid Greenman 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1006f35329acSJohn Dyson 			splx(s);
100726f9a767SRodney W. Grimes 			m = next;
100826f9a767SRodney W. Grimes 			continue;
1009df8bae1dSRodney W. Grimes 		}
1010b18bfc3dSJohn Dyson 
1011b18bfc3dSJohn Dyson 		/*
1012b18bfc3dSJohn Dyson 		 * The count for pagedaemon pages is done after checking the
1013b18bfc3dSJohn Dyson 		 * page for eligbility...
1014b18bfc3dSJohn Dyson 		 */
1015b18bfc3dSJohn Dyson 		cnt.v_pdpages++;
1016ef743ce6SJohn Dyson 
10177e006499SJohn Dyson 		/*
10187e006499SJohn Dyson 		 * Check to see "how much" the page has been used.
10197e006499SJohn Dyson 		 */
10207e006499SJohn Dyson 		actcount = 0;
1021ef743ce6SJohn Dyson 		if (m->object->ref_count != 0) {
1022ef743ce6SJohn Dyson 			if (m->flags & PG_REFERENCED) {
10237e006499SJohn Dyson 				actcount += 1;
10240d94caffSDavid Greenman 			}
10257e006499SJohn Dyson 			actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
10267e006499SJohn Dyson 			if (actcount) {
10277e006499SJohn Dyson 				m->act_count += ACT_ADVANCE + actcount;
102838efa82bSJohn Dyson 				if (m->act_count > ACT_MAX)
102938efa82bSJohn Dyson 					m->act_count = ACT_MAX;
103038efa82bSJohn Dyson 			}
1031b18bfc3dSJohn Dyson 		}
1032ef743ce6SJohn Dyson 
10337e006499SJohn Dyson 		/*
10347e006499SJohn Dyson 		 * Since we have "tested" this bit, we need to clear it now.
10357e006499SJohn Dyson 		 */
1036e69763a3SDoug Rabson 		vm_page_flag_clear(m, PG_REFERENCED);
1037ef743ce6SJohn Dyson 
10387e006499SJohn Dyson 		/*
10397e006499SJohn Dyson 		 * Only if an object is currently being used, do we use the
10407e006499SJohn Dyson 		 * page activation count stats.
10417e006499SJohn Dyson 		 */
10427e006499SJohn Dyson 		if (actcount && (m->object->ref_count != 0)) {
1043f35329acSJohn Dyson 			s = splvm();
104426f9a767SRodney W. Grimes 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
104526f9a767SRodney W. Grimes 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1046f35329acSJohn Dyson 			splx(s);
104726f9a767SRodney W. Grimes 		} else {
104838efa82bSJohn Dyson 			m->act_count -= min(m->act_count, ACT_DECLINE);
104938efa82bSJohn Dyson 			if (vm_pageout_algorithm_lru ||
105038efa82bSJohn Dyson 				(m->object->ref_count == 0) || (m->act_count == 0)) {
1051925a3a41SJohn Dyson 				page_shortage--;
1052d4a272dbSJohn Dyson 				if (m->object->ref_count == 0) {
1053ef743ce6SJohn Dyson 					vm_page_protect(m, VM_PROT_NONE);
1054d4a272dbSJohn Dyson 					if (m->dirty == 0)
10550d94caffSDavid Greenman 						vm_page_cache(m);
1056d4a272dbSJohn Dyson 					else
1057d4a272dbSJohn Dyson 						vm_page_deactivate(m);
10580d94caffSDavid Greenman 				} else {
105926f9a767SRodney W. Grimes 					vm_page_deactivate(m);
1060df8bae1dSRodney W. Grimes 				}
106138efa82bSJohn Dyson 			} else {
106238efa82bSJohn Dyson 				s = splvm();
106338efa82bSJohn Dyson 				TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
106438efa82bSJohn Dyson 				TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
106538efa82bSJohn Dyson 				splx(s);
106638efa82bSJohn Dyson 			}
1067df8bae1dSRodney W. Grimes 		}
106826f9a767SRodney W. Grimes 		m = next;
106926f9a767SRodney W. Grimes 	}
1070df8bae1dSRodney W. Grimes 
1071f35329acSJohn Dyson 	s = splvm();
10721c7c3c6aSMatthew Dillon 
1073df8bae1dSRodney W. Grimes 	/*
10740d94caffSDavid Greenman 	 * We try to maintain some *really* free pages, this allows interrupt
10751c7c3c6aSMatthew Dillon 	 * code to be guaranteed space.  Since both cache and free queues
10761c7c3c6aSMatthew Dillon 	 * are considered basically 'free', moving pages from cache to free
10771c7c3c6aSMatthew Dillon 	 * does not effect other calculations.
1078df8bae1dSRodney W. Grimes 	 */
10791c7c3c6aSMatthew Dillon 
1080a1f6d91cSDavid Greenman 	while (cnt.v_free_count < cnt.v_free_reserved) {
10815070c7f8SJohn Dyson 		static int cache_rover = 0;
10825070c7f8SJohn Dyson 		m = vm_page_list_find(PQ_CACHE, cache_rover);
10830d94caffSDavid Greenman 		if (!m)
10840d94caffSDavid Greenman 			break;
10855070c7f8SJohn Dyson 		cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK;
1086925a3a41SJohn Dyson 		vm_pageout_page_free(m);
10870bb3a0d2SDavid Greenman 		cnt.v_dfree++;
108826f9a767SRodney W. Grimes 	}
1089f35329acSJohn Dyson 	splx(s);
10905663e6deSDavid Greenman 
1091ceb0cf87SJohn Dyson #if !defined(NO_SWAPPING)
1092ceb0cf87SJohn Dyson 	/*
1093ceb0cf87SJohn Dyson 	 * Idle process swapout -- run once per second.
1094ceb0cf87SJohn Dyson 	 */
1095ceb0cf87SJohn Dyson 	if (vm_swap_idle_enabled) {
1096ceb0cf87SJohn Dyson 		static long lsec;
1097227ee8a1SPoul-Henning Kamp 		if (time_second != lsec) {
1098ceb0cf87SJohn Dyson 			vm_pageout_req_swapout |= VM_SWAP_IDLE;
1099ceb0cf87SJohn Dyson 			vm_req_vmdaemon();
1100227ee8a1SPoul-Henning Kamp 			lsec = time_second;
1101ceb0cf87SJohn Dyson 		}
1102ceb0cf87SJohn Dyson 	}
1103ceb0cf87SJohn Dyson #endif
1104ceb0cf87SJohn Dyson 
11055663e6deSDavid Greenman 	/*
1106f6b04d2bSDavid Greenman 	 * If we didn't get enough free pages, and we have skipped a vnode
11074c1f8ee9SDavid Greenman 	 * in a writeable object, wakeup the sync daemon.  And kick swapout
11084c1f8ee9SDavid Greenman 	 * if we did not get enough free pages.
1109f6b04d2bSDavid Greenman 	 */
1110bd7e5f99SJohn Dyson 	if ((cnt.v_cache_count + cnt.v_free_count) <
1111bd7e5f99SJohn Dyson 		(cnt.v_free_target + cnt.v_cache_min) ) {
1112f6b04d2bSDavid Greenman 		if (vnodes_skipped &&
1113f6b04d2bSDavid Greenman 		    (cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_min) {
1114f6b04d2bSDavid Greenman 			if (!vfs_update_wakeup) {
1115f6b04d2bSDavid Greenman 				vfs_update_wakeup = 1;
111624a1cce3SDavid Greenman 				wakeup(&vfs_update_wakeup);
1117f6b04d2bSDavid Greenman 			}
1118f6b04d2bSDavid Greenman 		}
111938efa82bSJohn Dyson #if !defined(NO_SWAPPING)
1120ceb0cf87SJohn Dyson 		if (vm_swap_enabled &&
112138efa82bSJohn Dyson 			(cnt.v_free_count + cnt.v_cache_count < cnt.v_free_target)) {
11224c1f8ee9SDavid Greenman 			vm_req_vmdaemon();
1123ceb0cf87SJohn Dyson 			vm_pageout_req_swapout |= VM_SWAP_NORMAL;
11244c1f8ee9SDavid Greenman 		}
11255afce282SDavid Greenman #endif
11264c1f8ee9SDavid Greenman 	}
11274c1f8ee9SDavid Greenman 
1128f6b04d2bSDavid Greenman 	/*
11290d94caffSDavid Greenman 	 * make sure that we have swap space -- if we are low on memory and
11300d94caffSDavid Greenman 	 * swap -- then kill the biggest process.
11315663e6deSDavid Greenman 	 */
11325663e6deSDavid Greenman 	if ((vm_swap_size == 0 || swap_pager_full) &&
11330d94caffSDavid Greenman 	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) {
11345663e6deSDavid Greenman 		bigproc = NULL;
11355663e6deSDavid Greenman 		bigsize = 0;
11361b67ec6dSJeffrey Hsu 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
11375663e6deSDavid Greenman 			/*
11385663e6deSDavid Greenman 			 * if this is a system process, skip it
11395663e6deSDavid Greenman 			 */
114079221631SDavid Greenman 			if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) ||
114179221631SDavid Greenman 			    ((p->p_pid < 48) && (vm_swap_size != 0))) {
11425663e6deSDavid Greenman 				continue;
11435663e6deSDavid Greenman 			}
11445663e6deSDavid Greenman 			/*
11455663e6deSDavid Greenman 			 * if the process is in a non-running type state,
11465663e6deSDavid Greenman 			 * don't touch it.
11475663e6deSDavid Greenman 			 */
11485663e6deSDavid Greenman 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
11495663e6deSDavid Greenman 				continue;
11505663e6deSDavid Greenman 			}
11515663e6deSDavid Greenman 			/*
11525663e6deSDavid Greenman 			 * get the process size
11535663e6deSDavid Greenman 			 */
11545663e6deSDavid Greenman 			size = p->p_vmspace->vm_pmap.pm_stats.resident_count;
11555663e6deSDavid Greenman 			/*
11565663e6deSDavid Greenman 			 * if the this process is bigger than the biggest one
11575663e6deSDavid Greenman 			 * remember it.
11585663e6deSDavid Greenman 			 */
11595663e6deSDavid Greenman 			if (size > bigsize) {
11605663e6deSDavid Greenman 				bigproc = p;
11615663e6deSDavid Greenman 				bigsize = size;
11625663e6deSDavid Greenman 			}
11635663e6deSDavid Greenman 		}
11645663e6deSDavid Greenman 		if (bigproc != NULL) {
1165729b1e51SDavid Greenman 			killproc(bigproc, "out of swap space");
11665663e6deSDavid Greenman 			bigproc->p_estcpu = 0;
11675663e6deSDavid Greenman 			bigproc->p_nice = PRIO_MIN;
11685663e6deSDavid Greenman 			resetpriority(bigproc);
116924a1cce3SDavid Greenman 			wakeup(&cnt.v_free_count);
11705663e6deSDavid Greenman 		}
11715663e6deSDavid Greenman 	}
117226f9a767SRodney W. Grimes 	return force_wakeup;
117326f9a767SRodney W. Grimes }
117426f9a767SRodney W. Grimes 
1175dc2efb27SJohn Dyson /*
1176dc2efb27SJohn Dyson  * This routine tries to maintain the pseudo LRU active queue,
1177dc2efb27SJohn Dyson  * so that during long periods of time where there is no paging,
1178dc2efb27SJohn Dyson  * that some statistic accumlation still occurs.  This code
1179dc2efb27SJohn Dyson  * helps the situation where paging just starts to occur.
1180dc2efb27SJohn Dyson  */
1181dc2efb27SJohn Dyson static void
1182dc2efb27SJohn Dyson vm_pageout_page_stats()
1183dc2efb27SJohn Dyson {
1184dc2efb27SJohn Dyson 	int s;
1185dc2efb27SJohn Dyson 	vm_page_t m,next;
1186dc2efb27SJohn Dyson 	int pcount,tpcount;		/* Number of pages to check */
1187dc2efb27SJohn Dyson 	static int fullintervalcount = 0;
1188bef608bdSJohn Dyson 	int page_shortage;
1189bef608bdSJohn Dyson 
1190bef608bdSJohn Dyson 	page_shortage = (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) -
1191bef608bdSJohn Dyson 	    (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
1192bef608bdSJohn Dyson 	if (page_shortage <= 0)
1193bef608bdSJohn Dyson 		return;
1194dc2efb27SJohn Dyson 
1195dc2efb27SJohn Dyson 	pcount = cnt.v_active_count;
1196dc2efb27SJohn Dyson 	fullintervalcount += vm_pageout_stats_interval;
1197dc2efb27SJohn Dyson 	if (fullintervalcount < vm_pageout_full_stats_interval) {
1198dc2efb27SJohn Dyson 		tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count;
1199dc2efb27SJohn Dyson 		if (pcount > tpcount)
1200dc2efb27SJohn Dyson 			pcount = tpcount;
1201dc2efb27SJohn Dyson 	}
1202dc2efb27SJohn Dyson 
1203dc2efb27SJohn Dyson 	m = TAILQ_FIRST(&vm_page_queue_active);
1204dc2efb27SJohn Dyson 	while ((m != NULL) && (pcount-- > 0)) {
12057e006499SJohn Dyson 		int actcount;
1206dc2efb27SJohn Dyson 
1207dc2efb27SJohn Dyson 		if (m->queue != PQ_ACTIVE) {
1208dc2efb27SJohn Dyson 			break;
1209dc2efb27SJohn Dyson 		}
1210dc2efb27SJohn Dyson 
1211dc2efb27SJohn Dyson 		next = TAILQ_NEXT(m, pageq);
1212dc2efb27SJohn Dyson 		/*
1213dc2efb27SJohn Dyson 		 * Don't deactivate pages that are busy.
1214dc2efb27SJohn Dyson 		 */
1215dc2efb27SJohn Dyson 		if ((m->busy != 0) ||
1216dc2efb27SJohn Dyson 		    (m->flags & PG_BUSY) ||
1217dc2efb27SJohn Dyson 		    (m->hold_count != 0)) {
1218dc2efb27SJohn Dyson 			s = splvm();
1219dc2efb27SJohn Dyson 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1220dc2efb27SJohn Dyson 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1221dc2efb27SJohn Dyson 			splx(s);
1222dc2efb27SJohn Dyson 			m = next;
1223dc2efb27SJohn Dyson 			continue;
1224dc2efb27SJohn Dyson 		}
1225dc2efb27SJohn Dyson 
12267e006499SJohn Dyson 		actcount = 0;
1227dc2efb27SJohn Dyson 		if (m->flags & PG_REFERENCED) {
1228e69763a3SDoug Rabson 			vm_page_flag_clear(m, PG_REFERENCED);
12297e006499SJohn Dyson 			actcount += 1;
1230dc2efb27SJohn Dyson 		}
1231dc2efb27SJohn Dyson 
12327e006499SJohn Dyson 		actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
12337e006499SJohn Dyson 		if (actcount) {
12347e006499SJohn Dyson 			m->act_count += ACT_ADVANCE + actcount;
1235dc2efb27SJohn Dyson 			if (m->act_count > ACT_MAX)
1236dc2efb27SJohn Dyson 				m->act_count = ACT_MAX;
1237dc2efb27SJohn Dyson 			s = splvm();
1238dc2efb27SJohn Dyson 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1239dc2efb27SJohn Dyson 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1240dc2efb27SJohn Dyson 			splx(s);
1241dc2efb27SJohn Dyson 		} else {
1242dc2efb27SJohn Dyson 			if (m->act_count == 0) {
12437e006499SJohn Dyson 				/*
12447e006499SJohn Dyson 				 * We turn off page access, so that we have more accurate
12457e006499SJohn Dyson 				 * RSS stats.  We don't do this in the normal page deactivation
12467e006499SJohn Dyson 				 * when the system is loaded VM wise, because the cost of
12477e006499SJohn Dyson 				 * the large number of page protect operations would be higher
12487e006499SJohn Dyson 				 * than the value of doing the operation.
12497e006499SJohn Dyson 				 */
1250dc2efb27SJohn Dyson 				vm_page_protect(m, VM_PROT_NONE);
1251dc2efb27SJohn Dyson 				vm_page_deactivate(m);
1252dc2efb27SJohn Dyson 			} else {
1253dc2efb27SJohn Dyson 				m->act_count -= min(m->act_count, ACT_DECLINE);
1254dc2efb27SJohn Dyson 				s = splvm();
1255dc2efb27SJohn Dyson 				TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1256dc2efb27SJohn Dyson 				TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1257dc2efb27SJohn Dyson 				splx(s);
1258dc2efb27SJohn Dyson 			}
1259dc2efb27SJohn Dyson 		}
1260dc2efb27SJohn Dyson 
1261dc2efb27SJohn Dyson 		m = next;
1262dc2efb27SJohn Dyson 	}
1263dc2efb27SJohn Dyson }
1264dc2efb27SJohn Dyson 
1265b182ec9eSJohn Dyson static int
1266b182ec9eSJohn Dyson vm_pageout_free_page_calc(count)
1267b182ec9eSJohn Dyson vm_size_t count;
1268b182ec9eSJohn Dyson {
1269b182ec9eSJohn Dyson 	if (count < cnt.v_page_count)
1270b182ec9eSJohn Dyson 		 return 0;
1271b182ec9eSJohn Dyson 	/*
1272b182ec9eSJohn Dyson 	 * free_reserved needs to include enough for the largest swap pager
1273b182ec9eSJohn Dyson 	 * structures plus enough for any pv_entry structs when paging.
1274b182ec9eSJohn Dyson 	 */
1275b182ec9eSJohn Dyson 	if (cnt.v_page_count > 1024)
1276b182ec9eSJohn Dyson 		cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
1277b182ec9eSJohn Dyson 	else
1278b182ec9eSJohn Dyson 		cnt.v_free_min = 4;
1279f35329acSJohn Dyson 	cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
1280f35329acSJohn Dyson 		cnt.v_interrupt_free_min;
1281f35329acSJohn Dyson 	cnt.v_free_reserved = vm_pageout_page_count +
1282a15403deSJohn Dyson 		cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE;
1283a2f4a846SJohn Dyson 	cnt.v_free_min += cnt.v_free_reserved;
1284b182ec9eSJohn Dyson 	return 1;
1285b182ec9eSJohn Dyson }
1286b182ec9eSJohn Dyson 
1287b182ec9eSJohn Dyson 
1288df8bae1dSRodney W. Grimes /*
1289df8bae1dSRodney W. Grimes  *	vm_pageout is the high level pageout daemon.
1290df8bae1dSRodney W. Grimes  */
12912b14f991SJulian Elischer static void
129226f9a767SRodney W. Grimes vm_pageout()
1293df8bae1dSRodney W. Grimes {
1294df8bae1dSRodney W. Grimes 	/*
1295df8bae1dSRodney W. Grimes 	 * Initialize some paging parameters.
1296df8bae1dSRodney W. Grimes 	 */
1297df8bae1dSRodney W. Grimes 
1298f6b04d2bSDavid Greenman 	cnt.v_interrupt_free_min = 2;
1299f35329acSJohn Dyson 	if (cnt.v_page_count < 2000)
1300f35329acSJohn Dyson 		vm_pageout_page_count = 8;
1301f6b04d2bSDavid Greenman 
1302b182ec9eSJohn Dyson 	vm_pageout_free_page_calc(cnt.v_page_count);
1303ed74321bSDavid Greenman 	/*
13040d94caffSDavid Greenman 	 * free_reserved needs to include enough for the largest swap pager
13050d94caffSDavid Greenman 	 * structures plus enough for any pv_entry structs when paging.
1306ed74321bSDavid Greenman 	 */
1307a15403deSJohn Dyson 	if (cnt.v_free_count > 6144)
13080d94caffSDavid Greenman 		cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved;
1309a15403deSJohn Dyson 	else
1310a15403deSJohn Dyson 		cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved;
13116f2b142eSDavid Greenman 
1312a15403deSJohn Dyson 	if (cnt.v_free_count > 2048) {
1313a15403deSJohn Dyson 		cnt.v_cache_min = cnt.v_free_target;
1314a15403deSJohn Dyson 		cnt.v_cache_max = 2 * cnt.v_cache_min;
1315a15403deSJohn Dyson 		cnt.v_inactive_target = (3 * cnt.v_free_target) / 2;
13160d94caffSDavid Greenman 	} else {
13170d94caffSDavid Greenman 		cnt.v_cache_min = 0;
13180d94caffSDavid Greenman 		cnt.v_cache_max = 0;
13196f2b142eSDavid Greenman 		cnt.v_inactive_target = cnt.v_free_count / 4;
13200d94caffSDavid Greenman 	}
1321e47ed70bSJohn Dyson 	if (cnt.v_inactive_target > cnt.v_free_count / 3)
1322e47ed70bSJohn Dyson 		cnt.v_inactive_target = cnt.v_free_count / 3;
1323df8bae1dSRodney W. Grimes 
1324df8bae1dSRodney W. Grimes 	/* XXX does not really belong here */
1325df8bae1dSRodney W. Grimes 	if (vm_page_max_wired == 0)
1326df8bae1dSRodney W. Grimes 		vm_page_max_wired = cnt.v_free_count / 3;
1327df8bae1dSRodney W. Grimes 
1328dc2efb27SJohn Dyson 	if (vm_pageout_stats_max == 0)
1329dc2efb27SJohn Dyson 		vm_pageout_stats_max = cnt.v_free_target;
1330dc2efb27SJohn Dyson 
1331dc2efb27SJohn Dyson 	/*
1332dc2efb27SJohn Dyson 	 * Set interval in seconds for stats scan.
1333dc2efb27SJohn Dyson 	 */
1334dc2efb27SJohn Dyson 	if (vm_pageout_stats_interval == 0)
1335bef608bdSJohn Dyson 		vm_pageout_stats_interval = 5;
1336dc2efb27SJohn Dyson 	if (vm_pageout_full_stats_interval == 0)
1337dc2efb27SJohn Dyson 		vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1338dc2efb27SJohn Dyson 
1339dc2efb27SJohn Dyson 
1340dc2efb27SJohn Dyson 	/*
1341dc2efb27SJohn Dyson 	 * Set maximum free per pass
1342dc2efb27SJohn Dyson 	 */
1343dc2efb27SJohn Dyson 	if (vm_pageout_stats_free_max == 0)
1344bef608bdSJohn Dyson 		vm_pageout_stats_free_max = 5;
1345dc2efb27SJohn Dyson 
1346ceb0cf87SJohn Dyson 	max_page_launder = (cnt.v_page_count > 1800 ? 32 : 16);
134726f9a767SRodney W. Grimes 
134824a1cce3SDavid Greenman 	swap_pager_swap_init();
1349df8bae1dSRodney W. Grimes 	/*
13500d94caffSDavid Greenman 	 * The pageout daemon is never done, so loop forever.
1351df8bae1dSRodney W. Grimes 	 */
1352df8bae1dSRodney W. Grimes 	while (TRUE) {
1353dc2efb27SJohn Dyson 		int error;
1354b18bfc3dSJohn Dyson 		int s = splvm();
1355f919ebdeSDavid Greenman 		if (!vm_pages_needed ||
1356545901f7SJohn Dyson 			((cnt.v_free_count + cnt.v_cache_count) > cnt.v_free_min)) {
1357f919ebdeSDavid Greenman 			vm_pages_needed = 0;
1358dc2efb27SJohn Dyson 			error = tsleep(&vm_pages_needed,
1359dc2efb27SJohn Dyson 				PVM, "psleep", vm_pageout_stats_interval * hz);
1360dc2efb27SJohn Dyson 			if (error && !vm_pages_needed) {
1361dc2efb27SJohn Dyson 				splx(s);
1362dc2efb27SJohn Dyson 				vm_pageout_page_stats();
1363dc2efb27SJohn Dyson 				continue;
1364dc2efb27SJohn Dyson 			}
1365dc2efb27SJohn Dyson 		} else if (vm_pages_needed) {
1366e47ed70bSJohn Dyson 			vm_pages_needed = 0;
1367e47ed70bSJohn Dyson 			tsleep(&vm_pages_needed, PVM, "psleep", hz/2);
1368f919ebdeSDavid Greenman 		}
1369e47ed70bSJohn Dyson 
1370b18bfc3dSJohn Dyson 		if (vm_pages_needed)
1371b18bfc3dSJohn Dyson 			cnt.v_pdwakeups++;
1372f919ebdeSDavid Greenman 		vm_pages_needed = 0;
1373f919ebdeSDavid Greenman 		splx(s);
13740d94caffSDavid Greenman 		vm_pageout_scan();
13752d8acc0fSJohn Dyson 		vm_pageout_deficit = 0;
137624a1cce3SDavid Greenman 		wakeup(&cnt.v_free_count);
1377df8bae1dSRodney W. Grimes 	}
1378df8bae1dSRodney W. Grimes }
137926f9a767SRodney W. Grimes 
1380e0c5a895SJohn Dyson void
1381e0c5a895SJohn Dyson pagedaemon_wakeup()
1382e0c5a895SJohn Dyson {
1383e0c5a895SJohn Dyson 	if (!vm_pages_needed && curproc != pageproc) {
1384e0c5a895SJohn Dyson 		vm_pages_needed++;
1385e0c5a895SJohn Dyson 		wakeup(&vm_pages_needed);
1386e0c5a895SJohn Dyson 	}
1387e0c5a895SJohn Dyson }
1388e0c5a895SJohn Dyson 
138938efa82bSJohn Dyson #if !defined(NO_SWAPPING)
13905afce282SDavid Greenman static void
13915afce282SDavid Greenman vm_req_vmdaemon()
13925afce282SDavid Greenman {
13935afce282SDavid Greenman 	static int lastrun = 0;
13945afce282SDavid Greenman 
1395b18bfc3dSJohn Dyson 	if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
13965afce282SDavid Greenman 		wakeup(&vm_daemon_needed);
13975afce282SDavid Greenman 		lastrun = ticks;
13985afce282SDavid Greenman 	}
13995afce282SDavid Greenman }
14005afce282SDavid Greenman 
14012b14f991SJulian Elischer static void
14024f9fb771SBruce Evans vm_daemon()
14030d94caffSDavid Greenman {
14042fe6e4d7SDavid Greenman 	struct proc *p;
14050d94caffSDavid Greenman 
14062fe6e4d7SDavid Greenman 	while (TRUE) {
1407e8f36785SJohn Dyson 		tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0);
14084c1f8ee9SDavid Greenman 		if (vm_pageout_req_swapout) {
1409ceb0cf87SJohn Dyson 			swapout_procs(vm_pageout_req_swapout);
14104c1f8ee9SDavid Greenman 			vm_pageout_req_swapout = 0;
14114c1f8ee9SDavid Greenman 		}
14122fe6e4d7SDavid Greenman 		/*
14130d94caffSDavid Greenman 		 * scan the processes for exceeding their rlimits or if
14140d94caffSDavid Greenman 		 * process is swapped out -- deactivate pages
14152fe6e4d7SDavid Greenman 		 */
14162fe6e4d7SDavid Greenman 
14171b67ec6dSJeffrey Hsu 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
14182fe6e4d7SDavid Greenman 			quad_t limit;
14192fe6e4d7SDavid Greenman 			vm_offset_t size;
14202fe6e4d7SDavid Greenman 
14212fe6e4d7SDavid Greenman 			/*
14222fe6e4d7SDavid Greenman 			 * if this is a system process or if we have already
14232fe6e4d7SDavid Greenman 			 * looked at this process, skip it.
14242fe6e4d7SDavid Greenman 			 */
14252fe6e4d7SDavid Greenman 			if (p->p_flag & (P_SYSTEM | P_WEXIT)) {
14262fe6e4d7SDavid Greenman 				continue;
14272fe6e4d7SDavid Greenman 			}
14282fe6e4d7SDavid Greenman 			/*
14292fe6e4d7SDavid Greenman 			 * if the process is in a non-running type state,
14302fe6e4d7SDavid Greenman 			 * don't touch it.
14312fe6e4d7SDavid Greenman 			 */
14322fe6e4d7SDavid Greenman 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
14332fe6e4d7SDavid Greenman 				continue;
14342fe6e4d7SDavid Greenman 			}
14352fe6e4d7SDavid Greenman 			/*
14362fe6e4d7SDavid Greenman 			 * get a limit
14372fe6e4d7SDavid Greenman 			 */
14382fe6e4d7SDavid Greenman 			limit = qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
14392fe6e4d7SDavid Greenman 			    p->p_rlimit[RLIMIT_RSS].rlim_max);
14402fe6e4d7SDavid Greenman 
14412fe6e4d7SDavid Greenman 			/*
14420d94caffSDavid Greenman 			 * let processes that are swapped out really be
14430d94caffSDavid Greenman 			 * swapped out set the limit to nothing (will force a
14440d94caffSDavid Greenman 			 * swap-out.)
14452fe6e4d7SDavid Greenman 			 */
14462fe6e4d7SDavid Greenman 			if ((p->p_flag & P_INMEM) == 0)
14470d94caffSDavid Greenman 				limit = 0;	/* XXX */
14482fe6e4d7SDavid Greenman 
1449a91c5a7eSJohn Dyson 			size = p->p_vmspace->vm_pmap.pm_stats.resident_count * PAGE_SIZE;
14502fe6e4d7SDavid Greenman 			if (limit >= 0 && size >= limit) {
14512fe6e4d7SDavid Greenman 				vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map,
145238efa82bSJohn Dyson 				    (vm_pindex_t)(limit >> PAGE_SHIFT) );
14532fe6e4d7SDavid Greenman 			}
14542fe6e4d7SDavid Greenman 		}
145524a1cce3SDavid Greenman 	}
14562fe6e4d7SDavid Greenman }
145738efa82bSJohn Dyson #endif
1458