xref: /freebsd/sys/vm/vm_pageout.c (revision 70111b90163c31900e33d610a0c83c44d0c3ce8f)
1df8bae1dSRodney W. Grimes /*
226f9a767SRodney W. Grimes  * Copyright (c) 1991 Regents of the University of California.
326f9a767SRodney W. Grimes  * All rights reserved.
426f9a767SRodney W. Grimes  * Copyright (c) 1994 John S. Dyson
526f9a767SRodney W. Grimes  * All rights reserved.
626f9a767SRodney W. Grimes  * Copyright (c) 1994 David Greenman
726f9a767SRodney W. Grimes  * All rights reserved.
8df8bae1dSRodney W. Grimes  *
9df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
10df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
11df8bae1dSRodney W. Grimes  *
12df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
13df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
14df8bae1dSRodney W. Grimes  * are met:
15df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
17df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
18df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
19df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
20df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
21df8bae1dSRodney W. Grimes  *    must display the following acknowledgement:
22df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
23df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
24df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
25df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
26df8bae1dSRodney W. Grimes  *    without specific prior written permission.
27df8bae1dSRodney W. Grimes  *
28df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
39df8bae1dSRodney W. Grimes  *
403c4dd356SDavid Greenman  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
41df8bae1dSRodney W. Grimes  *
42df8bae1dSRodney W. Grimes  *
43df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
44df8bae1dSRodney W. Grimes  * All rights reserved.
45df8bae1dSRodney W. Grimes  *
46df8bae1dSRodney W. Grimes  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
47df8bae1dSRodney W. Grimes  *
48df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
49df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
50df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
51df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
52df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
53df8bae1dSRodney W. Grimes  *
54df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57df8bae1dSRodney W. Grimes  *
58df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
59df8bae1dSRodney W. Grimes  *
60df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61df8bae1dSRodney W. Grimes  *  School of Computer Science
62df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
63df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
64df8bae1dSRodney W. Grimes  *
65df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
66df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
6726f9a767SRodney W. Grimes  *
6870111b90SJohn Dyson  * $Id: vm_pageout.c,v 1.101 1997/12/04 19:00:56 dyson Exp $
69df8bae1dSRodney W. Grimes  */
70df8bae1dSRodney W. Grimes 
71df8bae1dSRodney W. Grimes /*
72df8bae1dSRodney W. Grimes  *	The proverbial page-out daemon.
73df8bae1dSRodney W. Grimes  */
74df8bae1dSRodney W. Grimes 
75df8bae1dSRodney W. Grimes #include <sys/param.h>
7626f9a767SRodney W. Grimes #include <sys/systm.h>
77b5e8ce9fSBruce Evans #include <sys/kernel.h>
7826f9a767SRodney W. Grimes #include <sys/proc.h>
7926f9a767SRodney W. Grimes #include <sys/resourcevar.h>
80d2fc5315SPoul-Henning Kamp #include <sys/signalvar.h>
81f6b04d2bSDavid Greenman #include <sys/vnode.h>
82efeaf95aSDavid Greenman #include <sys/vmmeter.h>
8338efa82bSJohn Dyson #include <sys/sysctl.h>
84df8bae1dSRodney W. Grimes 
85df8bae1dSRodney W. Grimes #include <vm/vm.h>
86efeaf95aSDavid Greenman #include <vm/vm_param.h>
87efeaf95aSDavid Greenman #include <vm/vm_prot.h>
88996c772fSJohn Dyson #include <sys/lock.h>
89efeaf95aSDavid Greenman #include <vm/vm_object.h>
90df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
91efeaf95aSDavid Greenman #include <vm/vm_map.h>
92df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h>
9324a1cce3SDavid Greenman #include <vm/vm_pager.h>
9405f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h>
95efeaf95aSDavid Greenman #include <vm/vm_extern.h>
96df8bae1dSRodney W. Grimes 
972b14f991SJulian Elischer /*
982b14f991SJulian Elischer  * System initialization
992b14f991SJulian Elischer  */
1002b14f991SJulian Elischer 
1012b14f991SJulian Elischer /* the kernel process "vm_pageout"*/
1022b14f991SJulian Elischer static void vm_pageout __P((void));
1033af76890SPoul-Henning Kamp static int vm_pageout_clean __P((vm_page_t, int));
1043af76890SPoul-Henning Kamp static int vm_pageout_scan __P((void));
105f35329acSJohn Dyson static int vm_pageout_free_page_calc __P((vm_size_t count));
1062b14f991SJulian Elischer struct proc *pageproc;
1072b14f991SJulian Elischer 
1082b14f991SJulian Elischer static struct kproc_desc page_kp = {
1092b14f991SJulian Elischer 	"pagedaemon",
1102b14f991SJulian Elischer 	vm_pageout,
1112b14f991SJulian Elischer 	&pageproc
1122b14f991SJulian Elischer };
1134590fd3aSDavid Greenman SYSINIT_KT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp)
1142b14f991SJulian Elischer 
11538efa82bSJohn Dyson #if !defined(NO_SWAPPING)
1162b14f991SJulian Elischer /* the kernel process "vm_daemon"*/
1172b14f991SJulian Elischer static void vm_daemon __P((void));
118f708ef1bSPoul-Henning Kamp static struct	proc *vmproc;
1192b14f991SJulian Elischer 
1202b14f991SJulian Elischer static struct kproc_desc vm_kp = {
1212b14f991SJulian Elischer 	"vmdaemon",
1222b14f991SJulian Elischer 	vm_daemon,
1232b14f991SJulian Elischer 	&vmproc
1242b14f991SJulian Elischer };
1254590fd3aSDavid Greenman SYSINIT_KT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
12638efa82bSJohn Dyson #endif
1272b14f991SJulian Elischer 
1282b14f991SJulian Elischer 
129df8bae1dSRodney W. Grimes int vm_pages_needed;		/* Event on which pageout daemon sleeps */
13026f9a767SRodney W. Grimes 
131c3cb3e12SDavid Greenman int vm_pageout_pages_needed;	/* flag saying that the pageout daemon needs pages */
13226f9a767SRodney W. Grimes 
13326f9a767SRodney W. Grimes extern int npendingio;
13438efa82bSJohn Dyson #if !defined(NO_SWAPPING)
135f708ef1bSPoul-Henning Kamp static int vm_pageout_req_swapout;	/* XXX */
136f708ef1bSPoul-Henning Kamp static int vm_daemon_needed;
13738efa82bSJohn Dyson #endif
13826f9a767SRodney W. Grimes extern int nswiodone;
1395663e6deSDavid Greenman extern int vm_swap_size;
140f6b04d2bSDavid Greenman extern int vfs_update_wakeup;
141dc2efb27SJohn Dyson int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
142dc2efb27SJohn Dyson int vm_pageout_full_stats_interval = 0;
143dc2efb27SJohn Dyson int vm_pageout_stats_free_max=0, vm_pageout_algorithm_lru=0;
14412ac6a1dSJohn Dyson int defer_swap_pageouts=0;
14512ac6a1dSJohn Dyson int disable_swap_pageouts=0;
14670111b90SJohn Dyson 
14770111b90SJohn Dyson int vm_maxlaunder=100;
14838efa82bSJohn Dyson #if defined(NO_SWAPPING)
14938efa82bSJohn Dyson int vm_swapping_enabled=0;
15038efa82bSJohn Dyson #else
15138efa82bSJohn Dyson int vm_swapping_enabled=1;
15238efa82bSJohn Dyson #endif
15338efa82bSJohn Dyson 
15438efa82bSJohn Dyson SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
15538efa82bSJohn Dyson 	CTLFLAG_RW, &vm_pageout_algorithm_lru, 0, "");
15638efa82bSJohn Dyson 
157dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
158dc2efb27SJohn Dyson 	CTLFLAG_RW, &vm_pageout_stats_max, 0, "");
159dc2efb27SJohn Dyson 
160dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
161dc2efb27SJohn Dyson 	CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "");
162dc2efb27SJohn Dyson 
163dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
164dc2efb27SJohn Dyson 	CTLFLAG_RW, &vm_pageout_stats_interval, 0, "");
165dc2efb27SJohn Dyson 
166dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
167dc2efb27SJohn Dyson 	CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "");
168dc2efb27SJohn Dyson 
16938efa82bSJohn Dyson #if defined(NO_SWAPPING)
17038efa82bSJohn Dyson SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swapping_enabled,
17138efa82bSJohn Dyson 	CTLFLAG_RD, &vm_swapping_enabled, 0, "");
17238efa82bSJohn Dyson #else
17338efa82bSJohn Dyson SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swapping_enabled,
17438efa82bSJohn Dyson 	CTLFLAG_RW, &vm_swapping_enabled, 0, "");
17538efa82bSJohn Dyson #endif
17626f9a767SRodney W. Grimes 
17712ac6a1dSJohn Dyson SYSCTL_INT(_vm, OID_AUTO, defer_swap_pageouts,
17812ac6a1dSJohn Dyson 	CTLFLAG_RW, &defer_swap_pageouts, 0, "");
17912ac6a1dSJohn Dyson 
18012ac6a1dSJohn Dyson SYSCTL_INT(_vm, OID_AUTO, disable_swap_pageouts,
18112ac6a1dSJohn Dyson 	CTLFLAG_RW, &disable_swap_pageouts, 0, "");
18212ac6a1dSJohn Dyson 
18370111b90SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, vm_maxlaunder,
18470111b90SJohn Dyson 	CTLFLAG_RW, &vm_maxlaunder, 0, "");
18570111b90SJohn Dyson 
18626f9a767SRodney W. Grimes 
187a316d390SJohn Dyson #define VM_PAGEOUT_PAGE_COUNT 16
188bbc0ec52SDavid Greenman int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
189df8bae1dSRodney W. Grimes 
190c3cb3e12SDavid Greenman int vm_page_max_wired;		/* XXX max # of wired pages system-wide */
191df8bae1dSRodney W. Grimes 
19238efa82bSJohn Dyson #if !defined(NO_SWAPPING)
19338efa82bSJohn Dyson typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int));
19438efa82bSJohn Dyson static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t));
195cd41fc12SDavid Greenman static freeer_fcn_t vm_pageout_object_deactivate_pages;
196cd41fc12SDavid Greenman static void vm_req_vmdaemon __P((void));
19738efa82bSJohn Dyson #endif
198dc2efb27SJohn Dyson static void vm_pageout_page_stats(void);
1995985940eSJohn Dyson void pmap_collect(void);
200cd41fc12SDavid Greenman 
20126f9a767SRodney W. Grimes /*
20226f9a767SRodney W. Grimes  * vm_pageout_clean:
20324a1cce3SDavid Greenman  *
2040d94caffSDavid Greenman  * Clean the page and remove it from the laundry.
20526f9a767SRodney W. Grimes  *
2060d94caffSDavid Greenman  * We set the busy bit to cause potential page faults on this page to
20726f9a767SRodney W. Grimes  * block.
20826f9a767SRodney W. Grimes  *
2090d94caffSDavid Greenman  * And we set pageout-in-progress to keep the object from disappearing
2100d94caffSDavid Greenman  * during pageout.  This guarantees that the page won't move from the
2110d94caffSDavid Greenman  * inactive queue.  (However, any other page on the inactive queue may
2120d94caffSDavid Greenman  * move!)
21326f9a767SRodney W. Grimes  */
2143af76890SPoul-Henning Kamp static int
21524a1cce3SDavid Greenman vm_pageout_clean(m, sync)
21624a1cce3SDavid Greenman 	vm_page_t m;
21724a1cce3SDavid Greenman 	int sync;
21824a1cce3SDavid Greenman {
21926f9a767SRodney W. Grimes 	register vm_object_t object;
220f35329acSJohn Dyson 	vm_page_t mc[2*vm_pageout_page_count];
22124a1cce3SDavid Greenman 	int pageout_count;
22224a1cce3SDavid Greenman 	int i, forward_okay, backward_okay, page_base;
223a316d390SJohn Dyson 	vm_pindex_t pindex = m->pindex;
22426f9a767SRodney W. Grimes 
22526f9a767SRodney W. Grimes 	object = m->object;
22624a1cce3SDavid Greenman 
22726f9a767SRodney W. Grimes 	/*
22824a1cce3SDavid Greenman 	 * If not OBJT_SWAP, additional memory may be needed to do the pageout.
22924a1cce3SDavid Greenman 	 * Try to avoid the deadlock.
23026f9a767SRodney W. Grimes 	 */
23124a1cce3SDavid Greenman 	if ((sync != VM_PAGEOUT_FORCE) &&
232f35329acSJohn Dyson 	    (object->type == OBJT_DEFAULT) &&
23324a1cce3SDavid Greenman 	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min))
23426f9a767SRodney W. Grimes 		return 0;
23526f9a767SRodney W. Grimes 
23624a1cce3SDavid Greenman 	/*
23724a1cce3SDavid Greenman 	 * Don't mess with the page if it's busy.
23824a1cce3SDavid Greenman 	 */
239f6b04d2bSDavid Greenman 	if ((!sync && m->hold_count != 0) ||
2400d94caffSDavid Greenman 	    ((m->busy != 0) || (m->flags & PG_BUSY)))
2410d94caffSDavid Greenman 		return 0;
2420d94caffSDavid Greenman 
24324a1cce3SDavid Greenman 	/*
24424a1cce3SDavid Greenman 	 * Try collapsing before it's too late.
24524a1cce3SDavid Greenman 	 */
24624a1cce3SDavid Greenman 	if (!sync && object->backing_object) {
24726f9a767SRodney W. Grimes 		vm_object_collapse(object);
24826f9a767SRodney W. Grimes 	}
2493c018e72SJohn Dyson 
250f35329acSJohn Dyson 	mc[vm_pageout_page_count] = m;
25126f9a767SRodney W. Grimes 	pageout_count = 1;
252f35329acSJohn Dyson 	page_base = vm_pageout_page_count;
25324a1cce3SDavid Greenman 	forward_okay = TRUE;
254a316d390SJohn Dyson 	if (pindex != 0)
25524a1cce3SDavid Greenman 		backward_okay = TRUE;
25626f9a767SRodney W. Grimes 	else
25724a1cce3SDavid Greenman 		backward_okay = FALSE;
25824a1cce3SDavid Greenman 	/*
25924a1cce3SDavid Greenman 	 * Scan object for clusterable pages.
26024a1cce3SDavid Greenman 	 *
26124a1cce3SDavid Greenman 	 * We can cluster ONLY if: ->> the page is NOT
26224a1cce3SDavid Greenman 	 * clean, wired, busy, held, or mapped into a
26324a1cce3SDavid Greenman 	 * buffer, and one of the following:
26424a1cce3SDavid Greenman 	 * 1) The page is inactive, or a seldom used
26524a1cce3SDavid Greenman 	 *    active page.
26624a1cce3SDavid Greenman 	 * -or-
26724a1cce3SDavid Greenman 	 * 2) we force the issue.
26824a1cce3SDavid Greenman 	 */
26924a1cce3SDavid Greenman 	for (i = 1; (i < vm_pageout_page_count) && (forward_okay || backward_okay); i++) {
27024a1cce3SDavid Greenman 		vm_page_t p;
271f6b04d2bSDavid Greenman 
27224a1cce3SDavid Greenman 		/*
27324a1cce3SDavid Greenman 		 * See if forward page is clusterable.
27424a1cce3SDavid Greenman 		 */
27524a1cce3SDavid Greenman 		if (forward_okay) {
27624a1cce3SDavid Greenman 			/*
27724a1cce3SDavid Greenman 			 * Stop forward scan at end of object.
27824a1cce3SDavid Greenman 			 */
279a316d390SJohn Dyson 			if ((pindex + i) > object->size) {
28024a1cce3SDavid Greenman 				forward_okay = FALSE;
28124a1cce3SDavid Greenman 				goto do_backward;
282f6b04d2bSDavid Greenman 			}
283a316d390SJohn Dyson 			p = vm_page_lookup(object, pindex + i);
28424a1cce3SDavid Greenman 			if (p) {
2855070c7f8SJohn Dyson 				if (((p->queue - p->pc) == PQ_CACHE) ||
2865070c7f8SJohn Dyson 					(p->flags & PG_BUSY) || p->busy) {
28724a1cce3SDavid Greenman 					forward_okay = FALSE;
28824a1cce3SDavid Greenman 					goto do_backward;
289f6b04d2bSDavid Greenman 				}
29024a1cce3SDavid Greenman 				vm_page_test_dirty(p);
29124a1cce3SDavid Greenman 				if ((p->dirty & p->valid) != 0 &&
292bd7e5f99SJohn Dyson 				    ((p->queue == PQ_INACTIVE) ||
29324a1cce3SDavid Greenman 				     (sync == VM_PAGEOUT_FORCE)) &&
29424a1cce3SDavid Greenman 				    (p->wire_count == 0) &&
29524a1cce3SDavid Greenman 				    (p->hold_count == 0)) {
296f35329acSJohn Dyson 					mc[vm_pageout_page_count + i] = p;
29724a1cce3SDavid Greenman 					pageout_count++;
29824a1cce3SDavid Greenman 					if (pageout_count == vm_pageout_page_count)
29924a1cce3SDavid Greenman 						break;
30024a1cce3SDavid Greenman 				} else {
30124a1cce3SDavid Greenman 					forward_okay = FALSE;
302f6b04d2bSDavid Greenman 				}
30324a1cce3SDavid Greenman 			} else {
30424a1cce3SDavid Greenman 				forward_okay = FALSE;
30524a1cce3SDavid Greenman 			}
30624a1cce3SDavid Greenman 		}
30724a1cce3SDavid Greenman do_backward:
30824a1cce3SDavid Greenman 		/*
30924a1cce3SDavid Greenman 		 * See if backward page is clusterable.
31024a1cce3SDavid Greenman 		 */
31124a1cce3SDavid Greenman 		if (backward_okay) {
31224a1cce3SDavid Greenman 			/*
31324a1cce3SDavid Greenman 			 * Stop backward scan at beginning of object.
31424a1cce3SDavid Greenman 			 */
315a316d390SJohn Dyson 			if ((pindex - i) == 0) {
31624a1cce3SDavid Greenman 				backward_okay = FALSE;
31724a1cce3SDavid Greenman 			}
318a316d390SJohn Dyson 			p = vm_page_lookup(object, pindex - i);
31924a1cce3SDavid Greenman 			if (p) {
3205070c7f8SJohn Dyson 				if (((p->queue - p->pc) == PQ_CACHE) ||
3215070c7f8SJohn Dyson 					(p->flags & PG_BUSY) || p->busy) {
32224a1cce3SDavid Greenman 					backward_okay = FALSE;
32324a1cce3SDavid Greenman 					continue;
32424a1cce3SDavid Greenman 				}
32524a1cce3SDavid Greenman 				vm_page_test_dirty(p);
32624a1cce3SDavid Greenman 				if ((p->dirty & p->valid) != 0 &&
327bd7e5f99SJohn Dyson 				    ((p->queue == PQ_INACTIVE) ||
32824a1cce3SDavid Greenman 				     (sync == VM_PAGEOUT_FORCE)) &&
32924a1cce3SDavid Greenman 				    (p->wire_count == 0) &&
33024a1cce3SDavid Greenman 				    (p->hold_count == 0)) {
331f35329acSJohn Dyson 					mc[vm_pageout_page_count - i] = p;
33224a1cce3SDavid Greenman 					pageout_count++;
33324a1cce3SDavid Greenman 					page_base--;
33424a1cce3SDavid Greenman 					if (pageout_count == vm_pageout_page_count)
33524a1cce3SDavid Greenman 						break;
33624a1cce3SDavid Greenman 				} else {
33724a1cce3SDavid Greenman 					backward_okay = FALSE;
33824a1cce3SDavid Greenman 				}
33924a1cce3SDavid Greenman 			} else {
34024a1cce3SDavid Greenman 				backward_okay = FALSE;
34124a1cce3SDavid Greenman 			}
342f6b04d2bSDavid Greenman 		}
343f6b04d2bSDavid Greenman 	}
344f6b04d2bSDavid Greenman 
34567bf6868SJohn Dyson 	/*
34667bf6868SJohn Dyson 	 * we allow reads during pageouts...
34767bf6868SJohn Dyson 	 */
34824a1cce3SDavid Greenman 	for (i = page_base; i < (page_base + pageout_count); i++) {
34924a1cce3SDavid Greenman 		mc[i]->flags |= PG_BUSY;
35067bf6868SJohn Dyson 		vm_page_protect(mc[i], VM_PROT_READ);
35126f9a767SRodney W. Grimes 	}
35226f9a767SRodney W. Grimes 
353aef922f5SJohn Dyson 	return vm_pageout_flush(&mc[page_base], pageout_count, sync);
354aef922f5SJohn Dyson }
355aef922f5SJohn Dyson 
356aef922f5SJohn Dyson int
357aef922f5SJohn Dyson vm_pageout_flush(mc, count, sync)
358aef922f5SJohn Dyson 	vm_page_t *mc;
359aef922f5SJohn Dyson 	int count;
360aef922f5SJohn Dyson 	int sync;
361aef922f5SJohn Dyson {
362aef922f5SJohn Dyson 	register vm_object_t object;
363aef922f5SJohn Dyson 	int pageout_status[count];
364aef922f5SJohn Dyson 	int anyok = 0;
365aef922f5SJohn Dyson 	int i;
366aef922f5SJohn Dyson 
367aef922f5SJohn Dyson 	object = mc[0]->object;
368aef922f5SJohn Dyson 	object->paging_in_progress += count;
369aef922f5SJohn Dyson 
370aef922f5SJohn Dyson 	vm_pager_put_pages(object, mc, count,
37126f9a767SRodney W. Grimes 	    ((sync || (object == kernel_object)) ? TRUE : FALSE),
37226f9a767SRodney W. Grimes 	    pageout_status);
37326f9a767SRodney W. Grimes 
374aef922f5SJohn Dyson 	for (i = 0; i < count; i++) {
375aef922f5SJohn Dyson 		vm_page_t mt = mc[i];
37624a1cce3SDavid Greenman 
37726f9a767SRodney W. Grimes 		switch (pageout_status[i]) {
37826f9a767SRodney W. Grimes 		case VM_PAGER_OK:
37926f9a767SRodney W. Grimes 			++anyok;
38026f9a767SRodney W. Grimes 			break;
38126f9a767SRodney W. Grimes 		case VM_PAGER_PEND:
38226f9a767SRodney W. Grimes 			++anyok;
38326f9a767SRodney W. Grimes 			break;
38426f9a767SRodney W. Grimes 		case VM_PAGER_BAD:
38526f9a767SRodney W. Grimes 			/*
3860d94caffSDavid Greenman 			 * Page outside of range of object. Right now we
3870d94caffSDavid Greenman 			 * essentially lose the changes by pretending it
3880d94caffSDavid Greenman 			 * worked.
38926f9a767SRodney W. Grimes 			 */
39067bf6868SJohn Dyson 			pmap_clear_modify(VM_PAGE_TO_PHYS(mt));
39124a1cce3SDavid Greenman 			mt->dirty = 0;
39226f9a767SRodney W. Grimes 			break;
39326f9a767SRodney W. Grimes 		case VM_PAGER_ERROR:
39426f9a767SRodney W. Grimes 		case VM_PAGER_FAIL:
39526f9a767SRodney W. Grimes 			/*
3960d94caffSDavid Greenman 			 * If page couldn't be paged out, then reactivate the
3970d94caffSDavid Greenman 			 * page so it doesn't clog the inactive list.  (We
3980d94caffSDavid Greenman 			 * will try paging out it again later).
39926f9a767SRodney W. Grimes 			 */
400bd7e5f99SJohn Dyson 			if (mt->queue == PQ_INACTIVE)
40124a1cce3SDavid Greenman 				vm_page_activate(mt);
40226f9a767SRodney W. Grimes 			break;
40326f9a767SRodney W. Grimes 		case VM_PAGER_AGAIN:
40426f9a767SRodney W. Grimes 			break;
40526f9a767SRodney W. Grimes 		}
40626f9a767SRodney W. Grimes 
40726f9a767SRodney W. Grimes 
40826f9a767SRodney W. Grimes 		/*
4090d94caffSDavid Greenman 		 * If the operation is still going, leave the page busy to
4100d94caffSDavid Greenman 		 * block all other accesses. Also, leave the paging in
4110d94caffSDavid Greenman 		 * progress indicator set so that we don't attempt an object
4120d94caffSDavid Greenman 		 * collapse.
41326f9a767SRodney W. Grimes 		 */
41426f9a767SRodney W. Grimes 		if (pageout_status[i] != VM_PAGER_PEND) {
415f919ebdeSDavid Greenman 			vm_object_pip_wakeup(object);
41624a1cce3SDavid Greenman 			PAGE_WAKEUP(mt);
41726f9a767SRodney W. Grimes 		}
41826f9a767SRodney W. Grimes 	}
41926f9a767SRodney W. Grimes 	return anyok;
42026f9a767SRodney W. Grimes }
42126f9a767SRodney W. Grimes 
42238efa82bSJohn Dyson #if !defined(NO_SWAPPING)
42326f9a767SRodney W. Grimes /*
42426f9a767SRodney W. Grimes  *	vm_pageout_object_deactivate_pages
42526f9a767SRodney W. Grimes  *
42626f9a767SRodney W. Grimes  *	deactivate enough pages to satisfy the inactive target
42726f9a767SRodney W. Grimes  *	requirements or if vm_page_proc_limit is set, then
42826f9a767SRodney W. Grimes  *	deactivate all of the pages in the object and its
42924a1cce3SDavid Greenman  *	backing_objects.
43026f9a767SRodney W. Grimes  *
43126f9a767SRodney W. Grimes  *	The object and map must be locked.
43226f9a767SRodney W. Grimes  */
43338efa82bSJohn Dyson static void
43438efa82bSJohn Dyson vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
43526f9a767SRodney W. Grimes 	vm_map_t map;
43626f9a767SRodney W. Grimes 	vm_object_t object;
43738efa82bSJohn Dyson 	vm_pindex_t desired;
4380d94caffSDavid Greenman 	int map_remove_only;
43926f9a767SRodney W. Grimes {
44026f9a767SRodney W. Grimes 	register vm_page_t p, next;
44126f9a767SRodney W. Grimes 	int rcount;
44238efa82bSJohn Dyson 	int remove_mode;
4431eeaa1e3SJohn Dyson 	int s;
44426f9a767SRodney W. Grimes 
44524a1cce3SDavid Greenman 	if (object->type == OBJT_DEVICE)
44638efa82bSJohn Dyson 		return;
4478f895206SDavid Greenman 
44838efa82bSJohn Dyson 	while (object) {
44938efa82bSJohn Dyson 		if (vm_map_pmap(map)->pm_stats.resident_count <= desired)
45038efa82bSJohn Dyson 			return;
45124a1cce3SDavid Greenman 		if (object->paging_in_progress)
45238efa82bSJohn Dyson 			return;
45326f9a767SRodney W. Grimes 
45438efa82bSJohn Dyson 		remove_mode = map_remove_only;
45538efa82bSJohn Dyson 		if (object->shadow_count > 1)
45638efa82bSJohn Dyson 			remove_mode = 1;
45726f9a767SRodney W. Grimes 	/*
45826f9a767SRodney W. Grimes 	 * scan the objects entire memory queue
45926f9a767SRodney W. Grimes 	 */
46026f9a767SRodney W. Grimes 		rcount = object->resident_page_count;
461b18bfc3dSJohn Dyson 		p = TAILQ_FIRST(&object->memq);
46226f9a767SRodney W. Grimes 		while (p && (rcount-- > 0)) {
4637e006499SJohn Dyson 			int actcount;
46438efa82bSJohn Dyson 			if (vm_map_pmap(map)->pm_stats.resident_count <= desired)
46538efa82bSJohn Dyson 				return;
466b18bfc3dSJohn Dyson 			next = TAILQ_NEXT(p, listq);
467a58d1fa1SDavid Greenman 			cnt.v_pdpages++;
4680d94caffSDavid Greenman 			if (p->wire_count != 0 ||
4690d94caffSDavid Greenman 			    p->hold_count != 0 ||
4700d94caffSDavid Greenman 			    p->busy != 0 ||
471bd7e5f99SJohn Dyson 			    (p->flags & PG_BUSY) ||
4720d94caffSDavid Greenman 			    !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) {
4730d94caffSDavid Greenman 				p = next;
4740d94caffSDavid Greenman 				continue;
4750d94caffSDavid Greenman 			}
476ef743ce6SJohn Dyson 
4777e006499SJohn Dyson 			actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p));
4787e006499SJohn Dyson 			if (actcount) {
479ef743ce6SJohn Dyson 				p->flags |= PG_REFERENCED;
480c8c4b40cSJohn Dyson 			} else if (p->flags & PG_REFERENCED) {
4817e006499SJohn Dyson 				actcount = 1;
482ef743ce6SJohn Dyson 			}
483ef743ce6SJohn Dyson 
48438efa82bSJohn Dyson 			if ((p->queue != PQ_ACTIVE) &&
48538efa82bSJohn Dyson 				(p->flags & PG_REFERENCED)) {
486ef743ce6SJohn Dyson 				vm_page_activate(p);
4877e006499SJohn Dyson 				p->act_count += actcount;
488c8c4b40cSJohn Dyson 				p->flags &= ~PG_REFERENCED;
489c8c4b40cSJohn Dyson 			} else if (p->queue == PQ_ACTIVE) {
490ef743ce6SJohn Dyson 				if ((p->flags & PG_REFERENCED) == 0) {
491c8c4b40cSJohn Dyson 					p->act_count -= min(p->act_count, ACT_DECLINE);
492c8c4b40cSJohn Dyson 					if (!remove_mode && (vm_pageout_algorithm_lru || (p->act_count == 0))) {
493b18bfc3dSJohn Dyson 						vm_page_protect(p, VM_PROT_NONE);
49426f9a767SRodney W. Grimes 						vm_page_deactivate(p);
49526f9a767SRodney W. Grimes 					} else {
496c8c4b40cSJohn Dyson 						s = splvm();
497c8c4b40cSJohn Dyson 						TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
498c8c4b40cSJohn Dyson 						TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
499c8c4b40cSJohn Dyson 						splx(s);
500c8c4b40cSJohn Dyson 					}
501c8c4b40cSJohn Dyson 				} else {
502a647a309SDavid Greenman 					p->flags &= ~PG_REFERENCED;
50338efa82bSJohn Dyson 					if (p->act_count < (ACT_MAX - ACT_ADVANCE))
50438efa82bSJohn Dyson 						p->act_count += ACT_ADVANCE;
5051eeaa1e3SJohn Dyson 					s = splvm();
50626f9a767SRodney W. Grimes 					TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
50726f9a767SRodney W. Grimes 					TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
5081eeaa1e3SJohn Dyson 					splx(s);
50926f9a767SRodney W. Grimes 				}
510bd7e5f99SJohn Dyson 			} else if (p->queue == PQ_INACTIVE) {
511f919ebdeSDavid Greenman 				vm_page_protect(p, VM_PROT_NONE);
51226f9a767SRodney W. Grimes 			}
51326f9a767SRodney W. Grimes 			p = next;
51426f9a767SRodney W. Grimes 		}
51538efa82bSJohn Dyson 		object = object->backing_object;
51638efa82bSJohn Dyson 	}
51738efa82bSJohn Dyson 	return;
51826f9a767SRodney W. Grimes }
51926f9a767SRodney W. Grimes 
52026f9a767SRodney W. Grimes /*
52126f9a767SRodney W. Grimes  * deactivate some number of pages in a map, try to do it fairly, but
52226f9a767SRodney W. Grimes  * that is really hard to do.
52326f9a767SRodney W. Grimes  */
524cd41fc12SDavid Greenman static void
52538efa82bSJohn Dyson vm_pageout_map_deactivate_pages(map, desired)
52626f9a767SRodney W. Grimes 	vm_map_t map;
52738efa82bSJohn Dyson 	vm_pindex_t desired;
52826f9a767SRodney W. Grimes {
52926f9a767SRodney W. Grimes 	vm_map_entry_t tmpe;
53038efa82bSJohn Dyson 	vm_object_t obj, bigobj;
5310d94caffSDavid Greenman 
53226f9a767SRodney W. Grimes 	vm_map_reference(map);
533996c772fSJohn Dyson 	if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) {
53426f9a767SRodney W. Grimes 		vm_map_deallocate(map);
53526f9a767SRodney W. Grimes 		return;
53626f9a767SRodney W. Grimes 	}
53738efa82bSJohn Dyson 
53838efa82bSJohn Dyson 	bigobj = NULL;
53938efa82bSJohn Dyson 
54038efa82bSJohn Dyson 	/*
54138efa82bSJohn Dyson 	 * first, search out the biggest object, and try to free pages from
54238efa82bSJohn Dyson 	 * that.
54338efa82bSJohn Dyson 	 */
54426f9a767SRodney W. Grimes 	tmpe = map->header.next;
54538efa82bSJohn Dyson 	while (tmpe != &map->header) {
546afa07f7eSJohn Dyson 		if ((tmpe->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
54738efa82bSJohn Dyson 			obj = tmpe->object.vm_object;
54838efa82bSJohn Dyson 			if ((obj != NULL) && (obj->shadow_count <= 1) &&
54938efa82bSJohn Dyson 				((bigobj == NULL) ||
55038efa82bSJohn Dyson 				 (bigobj->resident_page_count < obj->resident_page_count))) {
55138efa82bSJohn Dyson 				bigobj = obj;
55238efa82bSJohn Dyson 			}
55338efa82bSJohn Dyson 		}
55438efa82bSJohn Dyson 		tmpe = tmpe->next;
55538efa82bSJohn Dyson 	}
55638efa82bSJohn Dyson 
55738efa82bSJohn Dyson 	if (bigobj)
55838efa82bSJohn Dyson 		vm_pageout_object_deactivate_pages(map, bigobj, desired, 0);
55938efa82bSJohn Dyson 
56038efa82bSJohn Dyson 	/*
56138efa82bSJohn Dyson 	 * Next, hunt around for other pages to deactivate.  We actually
56238efa82bSJohn Dyson 	 * do this search sort of wrong -- .text first is not the best idea.
56338efa82bSJohn Dyson 	 */
56438efa82bSJohn Dyson 	tmpe = map->header.next;
56538efa82bSJohn Dyson 	while (tmpe != &map->header) {
56638efa82bSJohn Dyson 		if (vm_map_pmap(map)->pm_stats.resident_count <= desired)
56738efa82bSJohn Dyson 			break;
568afa07f7eSJohn Dyson 		if ((tmpe->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
56938efa82bSJohn Dyson 			obj = tmpe->object.vm_object;
57001155bd7SDavid Greenman 			if (obj)
57138efa82bSJohn Dyson 				vm_pageout_object_deactivate_pages(map, obj, desired, 0);
57238efa82bSJohn Dyson 		}
57326f9a767SRodney W. Grimes 		tmpe = tmpe->next;
57426f9a767SRodney W. Grimes 	};
57538efa82bSJohn Dyson 
57638efa82bSJohn Dyson 	/*
57738efa82bSJohn Dyson 	 * Remove all mappings if a process is swapped out, this will free page
57838efa82bSJohn Dyson 	 * table pages.
57938efa82bSJohn Dyson 	 */
58038efa82bSJohn Dyson 	if (desired == 0)
58138efa82bSJohn Dyson 		pmap_remove(vm_map_pmap(map),
58238efa82bSJohn Dyson 			VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
58338efa82bSJohn Dyson 	vm_map_unlock(map);
58426f9a767SRodney W. Grimes 	vm_map_deallocate(map);
58526f9a767SRodney W. Grimes 	return;
58626f9a767SRodney W. Grimes }
58738efa82bSJohn Dyson #endif
588df8bae1dSRodney W. Grimes 
589df8bae1dSRodney W. Grimes /*
590df8bae1dSRodney W. Grimes  *	vm_pageout_scan does the dirty work for the pageout daemon.
591df8bae1dSRodney W. Grimes  */
5923af76890SPoul-Henning Kamp static int
593df8bae1dSRodney W. Grimes vm_pageout_scan()
594df8bae1dSRodney W. Grimes {
595502ba6e4SJohn Dyson 	vm_page_t m, next;
59670111b90SJohn Dyson 	int page_shortage, addl_page_shortage, maxscan, pcount;
59770111b90SJohn Dyson 	int maxlaunder;
5984e39a515SPoul-Henning Kamp 	int pages_freed;
5995663e6deSDavid Greenman 	struct proc *p, *bigproc;
6005663e6deSDavid Greenman 	vm_offset_t size, bigsize;
601df8bae1dSRodney W. Grimes 	vm_object_t object;
60226f9a767SRodney W. Grimes 	int force_wakeup = 0;
6037e006499SJohn Dyson 	int actcount;
604f6b04d2bSDavid Greenman 	int vnodes_skipped = 0;
6051eeaa1e3SJohn Dyson 	int s;
6060d94caffSDavid Greenman 
607df8bae1dSRodney W. Grimes 	/*
6085985940eSJohn Dyson 	 * Do whatever cleanup that the pmap code can.
6095985940eSJohn Dyson 	 */
6105985940eSJohn Dyson 	pmap_collect();
6115985940eSJohn Dyson 
6125985940eSJohn Dyson 	/*
6130d94caffSDavid Greenman 	 * Start scanning the inactive queue for pages we can free. We keep
6140d94caffSDavid Greenman 	 * scanning until we have enough free pages or we have scanned through
6150d94caffSDavid Greenman 	 * the entire queue.  If we encounter dirty pages, we start cleaning
6160d94caffSDavid Greenman 	 * them.
617df8bae1dSRodney W. Grimes 	 */
618df8bae1dSRodney W. Grimes 
619b182ec9eSJohn Dyson 	pages_freed = 0;
620f35329acSJohn Dyson 	addl_page_shortage = 0;
621b182ec9eSJohn Dyson 
62270111b90SJohn Dyson 	if (vm_maxlaunder == 0)
62370111b90SJohn Dyson 		vm_maxlaunder = 1;
62470111b90SJohn Dyson 	maxlaunder = (cnt.v_inactive_target > vm_maxlaunder) ?
62570111b90SJohn Dyson 	    vm_maxlaunder : cnt.v_inactive_target;
62670111b90SJohn Dyson 
62767bf6868SJohn Dyson rescan0:
628f6b04d2bSDavid Greenman 	maxscan = cnt.v_inactive_count;
629b182ec9eSJohn Dyson 	for( m = TAILQ_FIRST(&vm_page_queue_inactive);
630b182ec9eSJohn Dyson 
631b182ec9eSJohn Dyson 		(m != NULL) && (maxscan-- > 0) &&
632b18bfc3dSJohn Dyson 			((cnt.v_cache_count + cnt.v_free_count) <
633b182ec9eSJohn Dyson 			(cnt.v_cache_min + cnt.v_free_target));
634b182ec9eSJohn Dyson 
635b182ec9eSJohn Dyson 		m = next) {
636df8bae1dSRodney W. Grimes 
637a58d1fa1SDavid Greenman 		cnt.v_pdpages++;
638b182ec9eSJohn Dyson 
639f35329acSJohn Dyson 		if (m->queue != PQ_INACTIVE) {
64067bf6868SJohn Dyson 			goto rescan0;
641f35329acSJohn Dyson 		}
642b182ec9eSJohn Dyson 
643b18bfc3dSJohn Dyson 		next = TAILQ_NEXT(m, pageq);
644df8bae1dSRodney W. Grimes 
645b182ec9eSJohn Dyson 		if (m->hold_count) {
646f35329acSJohn Dyson 			s = splvm();
647b182ec9eSJohn Dyson 			TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
648b182ec9eSJohn Dyson 			TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
649f35329acSJohn Dyson 			splx(s);
650b182ec9eSJohn Dyson 			addl_page_shortage++;
651b182ec9eSJohn Dyson 			continue;
652df8bae1dSRodney W. Grimes 		}
65326f9a767SRodney W. Grimes 		/*
654b18bfc3dSJohn Dyson 		 * Dont mess with busy pages, keep in the front of the
655b18bfc3dSJohn Dyson 		 * queue, most likely are being paged out.
65626f9a767SRodney W. Grimes 		 */
657bd7e5f99SJohn Dyson 		if (m->busy || (m->flags & PG_BUSY)) {
658b182ec9eSJohn Dyson 			addl_page_shortage++;
65926f9a767SRodney W. Grimes 			continue;
66026f9a767SRodney W. Grimes 		}
661bd7e5f99SJohn Dyson 
6627e006499SJohn Dyson 		/*
6637e006499SJohn Dyson 		 * If the object is not being used, we ignore previous references.
6647e006499SJohn Dyson 		 */
6650d94caffSDavid Greenman 		if (m->object->ref_count == 0) {
6660d94caffSDavid Greenman 			m->flags &= ~PG_REFERENCED;
66767bf6868SJohn Dyson 			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
6687e006499SJohn Dyson 
6697e006499SJohn Dyson 		/*
6707e006499SJohn Dyson 		 * Otherwise, if the page has been referenced while in the inactive
6717e006499SJohn Dyson 		 * queue, we bump the "activation count" upwards, making it less
6727e006499SJohn Dyson 		 * likely that the page will be added back to the inactive queue
6737e006499SJohn Dyson 		 * prematurely again.  Here we check the page tables (or emulated
6747e006499SJohn Dyson 		 * bits, if any), given the upper level VM system not knowing anything
6757e006499SJohn Dyson 		 * about existing references.
6767e006499SJohn Dyson 		 */
677ef743ce6SJohn Dyson 		} else if (((m->flags & PG_REFERENCED) == 0) &&
6787e006499SJohn Dyson 			(actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)))) {
679ef743ce6SJohn Dyson 			vm_page_activate(m);
6807e006499SJohn Dyson 			m->act_count += (actcount + ACT_ADVANCE);
681ef743ce6SJohn Dyson 			continue;
6822fe6e4d7SDavid Greenman 		}
683ef743ce6SJohn Dyson 
6847e006499SJohn Dyson 		/*
6857e006499SJohn Dyson 		 * If the upper level VM system knows about any page references,
6867e006499SJohn Dyson 		 * we activate the page.  We also set the "activation count" higher
6877e006499SJohn Dyson 		 * than normal so that we will less likely place pages back onto the
6887e006499SJohn Dyson 		 * inactive queue again.
6897e006499SJohn Dyson 		 */
690bd7e5f99SJohn Dyson 		if ((m->flags & PG_REFERENCED) != 0) {
691a647a309SDavid Greenman 			m->flags &= ~PG_REFERENCED;
6927e006499SJohn Dyson #if 0
69367bf6868SJohn Dyson 			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
6947e006499SJohn Dyson #else
6957e006499SJohn Dyson 			actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
6967e006499SJohn Dyson #endif
69726f9a767SRodney W. Grimes 			vm_page_activate(m);
6987e006499SJohn Dyson 			m->act_count += (actcount + ACT_ADVANCE + 1);
6990d94caffSDavid Greenman 			continue;
7000d94caffSDavid Greenman 		}
70167bf6868SJohn Dyson 
7027e006499SJohn Dyson 		/*
7037e006499SJohn Dyson 		 * If the upper level VM system doesn't know anything about the
7047e006499SJohn Dyson 		 * page being dirty, we have to check for it again.  As far as the
7057e006499SJohn Dyson 		 * VM code knows, any partially dirty pages are fully dirty.
7067e006499SJohn Dyson 		 */
707f6b04d2bSDavid Greenman 		if (m->dirty == 0) {
708bd7e5f99SJohn Dyson 			vm_page_test_dirty(m);
70930dcfc09SJohn Dyson 		} else if (m->dirty != 0) {
710bd7e5f99SJohn Dyson 			m->dirty = VM_PAGE_BITS_ALL;
71130dcfc09SJohn Dyson 		}
712ef743ce6SJohn Dyson 
7137e006499SJohn Dyson 		/*
7147e006499SJohn Dyson 		 * Invalid pages can be easily freed
7157e006499SJohn Dyson 		 */
7166d40c3d3SDavid Greenman 		if (m->valid == 0) {
717bd7e5f99SJohn Dyson 			vm_page_protect(m, VM_PROT_NONE);
7186d40c3d3SDavid Greenman 			vm_page_free(m);
71967bf6868SJohn Dyson 			cnt.v_dfree++;
720f6b04d2bSDavid Greenman 			++pages_freed;
7217e006499SJohn Dyson 
7227e006499SJohn Dyson 		/*
7237e006499SJohn Dyson 		 * Clean pages can be placed onto the cache queue.
7247e006499SJohn Dyson 		 */
725bd7e5f99SJohn Dyson 		} else if (m->dirty == 0) {
726bd7e5f99SJohn Dyson 			vm_page_cache(m);
727bd7e5f99SJohn Dyson 			++pages_freed;
7287e006499SJohn Dyson 
7297e006499SJohn Dyson 		/*
7307e006499SJohn Dyson 		 * Dirty pages need to be paged out.  Note that we clean
7317e006499SJohn Dyson 		 * only a limited number of pages per pagedaemon pass.
7327e006499SJohn Dyson 		 */
7330d94caffSDavid Greenman 		} else if (maxlaunder > 0) {
7340d94caffSDavid Greenman 			int written;
73512ac6a1dSJohn Dyson 			int swap_pageouts_ok;
736f6b04d2bSDavid Greenman 			struct vnode *vp = NULL;
7370d94caffSDavid Greenman 
7380d94caffSDavid Greenman 			object = m->object;
7397e006499SJohn Dyson 
74012ac6a1dSJohn Dyson 			if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) {
74112ac6a1dSJohn Dyson 				swap_pageouts_ok = 1;
74212ac6a1dSJohn Dyson 			} else {
74312ac6a1dSJohn Dyson 				swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
74412ac6a1dSJohn Dyson 				swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
74512ac6a1dSJohn Dyson 					(cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min);
74612ac6a1dSJohn Dyson 
74712ac6a1dSJohn Dyson 			}
74870111b90SJohn Dyson 
74970111b90SJohn Dyson 			/*
75070111b90SJohn Dyson 			 * We don't bother paging objects that are "dead".  Those
75170111b90SJohn Dyson 			 * objects are in a "rundown" state.
75270111b90SJohn Dyson 			 */
75370111b90SJohn Dyson 			if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
75412ac6a1dSJohn Dyson 				s = splvm();
75512ac6a1dSJohn Dyson 				TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
75612ac6a1dSJohn Dyson 				TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
75712ac6a1dSJohn Dyson 				splx(s);
75812ac6a1dSJohn Dyson 				continue;
75912ac6a1dSJohn Dyson 			}
76012ac6a1dSJohn Dyson 
76124a1cce3SDavid Greenman 			if (object->type == OBJT_VNODE) {
76224a1cce3SDavid Greenman 				vp = object->handle;
763996c772fSJohn Dyson 				if (VOP_ISLOCKED(vp) ||
7642f558c3eSBruce Evans 				    vget(vp, LK_EXCLUSIVE, curproc)) {
765b182ec9eSJohn Dyson 					if ((m->queue == PQ_INACTIVE) &&
766b182ec9eSJohn Dyson 						(m->hold_count == 0) &&
767b182ec9eSJohn Dyson 						(m->busy == 0) &&
768b182ec9eSJohn Dyson 						(m->flags & PG_BUSY) == 0) {
769f35329acSJohn Dyson 						s = splvm();
77085a376ebSJohn Dyson 						TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
77185a376ebSJohn Dyson 						TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
772f35329acSJohn Dyson 						splx(s);
77385a376ebSJohn Dyson 					}
774aef922f5SJohn Dyson 					if (object->flags & OBJ_MIGHTBEDIRTY)
775f6b04d2bSDavid Greenman 						++vnodes_skipped;
776b182ec9eSJohn Dyson 					continue;
77785a376ebSJohn Dyson 				}
778b182ec9eSJohn Dyson 
779f35329acSJohn Dyson 				/*
780f35329acSJohn Dyson 				 * The page might have been moved to another queue
781f35329acSJohn Dyson 				 * during potential blocking in vget() above.
782f35329acSJohn Dyson 				 */
783b182ec9eSJohn Dyson 				if (m->queue != PQ_INACTIVE) {
784b182ec9eSJohn Dyson 					if (object->flags & OBJ_MIGHTBEDIRTY)
785b182ec9eSJohn Dyson 						++vnodes_skipped;
786b182ec9eSJohn Dyson 					vput(vp);
787b182ec9eSJohn Dyson 					continue;
788b182ec9eSJohn Dyson 				}
789b182ec9eSJohn Dyson 
790f35329acSJohn Dyson 				/*
791f35329acSJohn Dyson 				 * The page may have been busied during the blocking in
792f35329acSJohn Dyson 				 * vput();  We don't move the page back onto the end of
793f35329acSJohn Dyson 				 * the queue so that statistics are more correct if we don't.
794f35329acSJohn Dyson 				 */
795b182ec9eSJohn Dyson 				if (m->busy || (m->flags & PG_BUSY)) {
796b182ec9eSJohn Dyson 					vput(vp);
797b182ec9eSJohn Dyson 					continue;
798b182ec9eSJohn Dyson 				}
799b182ec9eSJohn Dyson 
800f35329acSJohn Dyson 				/*
801f35329acSJohn Dyson 				 * If the page has become held, then skip it
802f35329acSJohn Dyson 				 */
803b182ec9eSJohn Dyson 				if (m->hold_count) {
804f35329acSJohn Dyson 					s = splvm();
805b182ec9eSJohn Dyson 					TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
806b182ec9eSJohn Dyson 					TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
807f35329acSJohn Dyson 					splx(s);
808b182ec9eSJohn Dyson 					if (object->flags & OBJ_MIGHTBEDIRTY)
809b182ec9eSJohn Dyson 						++vnodes_skipped;
810b182ec9eSJohn Dyson 					vput(vp);
811f6b04d2bSDavid Greenman 					continue;
812f6b04d2bSDavid Greenman 				}
813f6b04d2bSDavid Greenman 			}
814f6b04d2bSDavid Greenman 
8150d94caffSDavid Greenman 			/*
8160d94caffSDavid Greenman 			 * If a page is dirty, then it is either being washed
8170d94caffSDavid Greenman 			 * (but not yet cleaned) or it is still in the
8180d94caffSDavid Greenman 			 * laundry.  If it is still in the laundry, then we
8190d94caffSDavid Greenman 			 * start the cleaning operation.
8200d94caffSDavid Greenman 			 */
8210d94caffSDavid Greenman 			written = vm_pageout_clean(m, 0);
822f6b04d2bSDavid Greenman 			if (vp)
823f6b04d2bSDavid Greenman 				vput(vp);
824f6b04d2bSDavid Greenman 
8250d94caffSDavid Greenman 			maxlaunder -= written;
8260d94caffSDavid Greenman 		}
827df8bae1dSRodney W. Grimes 	}
82826f9a767SRodney W. Grimes 
829df8bae1dSRodney W. Grimes 	/*
8300d94caffSDavid Greenman 	 * Compute the page shortage.  If we are still very low on memory be
8310d94caffSDavid Greenman 	 * sure that we will move a minimal amount of pages from active to
8320d94caffSDavid Greenman 	 * inactive.
833df8bae1dSRodney W. Grimes 	 */
834b182ec9eSJohn Dyson 	page_shortage = (cnt.v_inactive_target + cnt.v_cache_min) -
8350d94caffSDavid Greenman 	    (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
83626f9a767SRodney W. Grimes 	if (page_shortage <= 0) {
83726f9a767SRodney W. Grimes 		if (pages_freed == 0) {
83817c4c408SDavid Greenman 			page_shortage = cnt.v_free_min - cnt.v_free_count;
839f6b04d2bSDavid Greenman 		} else {
840f6b04d2bSDavid Greenman 			page_shortage = 1;
84126f9a767SRodney W. Grimes 		}
842df8bae1dSRodney W. Grimes 	}
8437e006499SJohn Dyson 
8447e006499SJohn Dyson 	/*
8457e006499SJohn Dyson 	 * If the "inactive" loop finds that there is a shortage over and
8467e006499SJohn Dyson 	 * above the page statistics variables, then we need to accomodate
8477e006499SJohn Dyson 	 * that.  This avoids potential deadlocks due to pages being temporarily
8487e006499SJohn Dyson 	 * busy for I/O or other types of temporary wiring.
8497e006499SJohn Dyson 	 */
850b182ec9eSJohn Dyson 	if (addl_page_shortage) {
851b182ec9eSJohn Dyson 		if (page_shortage < 0)
852b182ec9eSJohn Dyson 			page_shortage = 0;
853b182ec9eSJohn Dyson 		page_shortage += addl_page_shortage;
854b182ec9eSJohn Dyson 	}
85526f9a767SRodney W. Grimes 
856b18bfc3dSJohn Dyson 	pcount = cnt.v_active_count;
857b18bfc3dSJohn Dyson 	m = TAILQ_FIRST(&vm_page_queue_active);
858b18bfc3dSJohn Dyson 	while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
859f35329acSJohn Dyson 
8607e006499SJohn Dyson 		/*
8617e006499SJohn Dyson 		 * This is a consistancy check, and should likely be a panic
8627e006499SJohn Dyson 		 * or warning.
8637e006499SJohn Dyson 		 */
864f35329acSJohn Dyson 		if (m->queue != PQ_ACTIVE) {
86538efa82bSJohn Dyson 			break;
866f35329acSJohn Dyson 		}
867f35329acSJohn Dyson 
868b18bfc3dSJohn Dyson 		next = TAILQ_NEXT(m, pageq);
869df8bae1dSRodney W. Grimes 		/*
87026f9a767SRodney W. Grimes 		 * Don't deactivate pages that are busy.
871df8bae1dSRodney W. Grimes 		 */
872a647a309SDavid Greenman 		if ((m->busy != 0) ||
8730d94caffSDavid Greenman 		    (m->flags & PG_BUSY) ||
874f6b04d2bSDavid Greenman 		    (m->hold_count != 0)) {
875f35329acSJohn Dyson 			s = splvm();
8766d40c3d3SDavid Greenman 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
8776d40c3d3SDavid Greenman 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
878f35329acSJohn Dyson 			splx(s);
87926f9a767SRodney W. Grimes 			m = next;
88026f9a767SRodney W. Grimes 			continue;
881df8bae1dSRodney W. Grimes 		}
882b18bfc3dSJohn Dyson 
883b18bfc3dSJohn Dyson 		/*
884b18bfc3dSJohn Dyson 		 * The count for pagedaemon pages is done after checking the
885b18bfc3dSJohn Dyson 		 * page for eligbility...
886b18bfc3dSJohn Dyson 		 */
887b18bfc3dSJohn Dyson 		cnt.v_pdpages++;
888ef743ce6SJohn Dyson 
8897e006499SJohn Dyson 		/*
8907e006499SJohn Dyson 		 * Check to see "how much" the page has been used.
8917e006499SJohn Dyson 		 */
8927e006499SJohn Dyson 		actcount = 0;
893ef743ce6SJohn Dyson 		if (m->object->ref_count != 0) {
894ef743ce6SJohn Dyson 			if (m->flags & PG_REFERENCED) {
8957e006499SJohn Dyson 				actcount += 1;
8960d94caffSDavid Greenman 			}
8977e006499SJohn Dyson 			actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
8987e006499SJohn Dyson 			if (actcount) {
8997e006499SJohn Dyson 				m->act_count += ACT_ADVANCE + actcount;
90038efa82bSJohn Dyson 				if (m->act_count > ACT_MAX)
90138efa82bSJohn Dyson 					m->act_count = ACT_MAX;
90238efa82bSJohn Dyson 			}
903b18bfc3dSJohn Dyson 		}
904ef743ce6SJohn Dyson 
9057e006499SJohn Dyson 		/*
9067e006499SJohn Dyson 		 * Since we have "tested" this bit, we need to clear it now.
9077e006499SJohn Dyson 		 */
908b18bfc3dSJohn Dyson 		m->flags &= ~PG_REFERENCED;
909ef743ce6SJohn Dyson 
9107e006499SJohn Dyson 		/*
9117e006499SJohn Dyson 		 * Only if an object is currently being used, do we use the
9127e006499SJohn Dyson 		 * page activation count stats.
9137e006499SJohn Dyson 		 */
9147e006499SJohn Dyson 		if (actcount && (m->object->ref_count != 0)) {
915f35329acSJohn Dyson 			s = splvm();
91626f9a767SRodney W. Grimes 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
91726f9a767SRodney W. Grimes 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
918f35329acSJohn Dyson 			splx(s);
91926f9a767SRodney W. Grimes 		} else {
92038efa82bSJohn Dyson 			m->act_count -= min(m->act_count, ACT_DECLINE);
92138efa82bSJohn Dyson 			if (vm_pageout_algorithm_lru ||
92238efa82bSJohn Dyson 				(m->object->ref_count == 0) || (m->act_count == 0)) {
9230d94caffSDavid Greenman 				--page_shortage;
924d4a272dbSJohn Dyson 				if (m->object->ref_count == 0) {
925ef743ce6SJohn Dyson 					vm_page_protect(m, VM_PROT_NONE);
926d4a272dbSJohn Dyson 					if (m->dirty == 0)
9270d94caffSDavid Greenman 						vm_page_cache(m);
928d4a272dbSJohn Dyson 					else
929d4a272dbSJohn Dyson 						vm_page_deactivate(m);
9300d94caffSDavid Greenman 				} else {
93126f9a767SRodney W. Grimes 					vm_page_deactivate(m);
932df8bae1dSRodney W. Grimes 				}
93338efa82bSJohn Dyson 			} else {
93438efa82bSJohn Dyson 				s = splvm();
93538efa82bSJohn Dyson 				TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
93638efa82bSJohn Dyson 				TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
93738efa82bSJohn Dyson 				splx(s);
93838efa82bSJohn Dyson 			}
939df8bae1dSRodney W. Grimes 		}
94026f9a767SRodney W. Grimes 		m = next;
94126f9a767SRodney W. Grimes 	}
942df8bae1dSRodney W. Grimes 
943f35329acSJohn Dyson 	s = splvm();
944df8bae1dSRodney W. Grimes 	/*
9450d94caffSDavid Greenman 	 * We try to maintain some *really* free pages, this allows interrupt
9460d94caffSDavid Greenman 	 * code to be guaranteed space.
947df8bae1dSRodney W. Grimes 	 */
948a1f6d91cSDavid Greenman 	while (cnt.v_free_count < cnt.v_free_reserved) {
9495070c7f8SJohn Dyson 		static int cache_rover = 0;
9505070c7f8SJohn Dyson 		m = vm_page_list_find(PQ_CACHE, cache_rover);
9510d94caffSDavid Greenman 		if (!m)
9520d94caffSDavid Greenman 			break;
9535070c7f8SJohn Dyson 		cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK;
9540d94caffSDavid Greenman 		vm_page_free(m);
9550bb3a0d2SDavid Greenman 		cnt.v_dfree++;
95626f9a767SRodney W. Grimes 	}
957f35329acSJohn Dyson 	splx(s);
9585663e6deSDavid Greenman 
9595663e6deSDavid Greenman 	/*
960f6b04d2bSDavid Greenman 	 * If we didn't get enough free pages, and we have skipped a vnode
9614c1f8ee9SDavid Greenman 	 * in a writeable object, wakeup the sync daemon.  And kick swapout
9624c1f8ee9SDavid Greenman 	 * if we did not get enough free pages.
963f6b04d2bSDavid Greenman 	 */
964bd7e5f99SJohn Dyson 	if ((cnt.v_cache_count + cnt.v_free_count) <
965bd7e5f99SJohn Dyson 		(cnt.v_free_target + cnt.v_cache_min) ) {
966f6b04d2bSDavid Greenman 		if (vnodes_skipped &&
967f6b04d2bSDavid Greenman 		    (cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_min) {
968f6b04d2bSDavid Greenman 			if (!vfs_update_wakeup) {
969f6b04d2bSDavid Greenman 				vfs_update_wakeup = 1;
97024a1cce3SDavid Greenman 				wakeup(&vfs_update_wakeup);
971f6b04d2bSDavid Greenman 			}
972f6b04d2bSDavid Greenman 		}
97338efa82bSJohn Dyson #if !defined(NO_SWAPPING)
97438efa82bSJohn Dyson 		if (vm_swapping_enabled &&
97538efa82bSJohn Dyson 			(cnt.v_free_count + cnt.v_cache_count < cnt.v_free_target)) {
9764c1f8ee9SDavid Greenman 			vm_req_vmdaemon();
977b18bfc3dSJohn Dyson 			vm_pageout_req_swapout = 1;
9784c1f8ee9SDavid Greenman 		}
9795afce282SDavid Greenman #endif
9804c1f8ee9SDavid Greenman 	}
9814c1f8ee9SDavid Greenman 
982f6b04d2bSDavid Greenman 
983f6b04d2bSDavid Greenman 	/*
9840d94caffSDavid Greenman 	 * make sure that we have swap space -- if we are low on memory and
9850d94caffSDavid Greenman 	 * swap -- then kill the biggest process.
9865663e6deSDavid Greenman 	 */
9875663e6deSDavid Greenman 	if ((vm_swap_size == 0 || swap_pager_full) &&
9880d94caffSDavid Greenman 	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) {
9895663e6deSDavid Greenman 		bigproc = NULL;
9905663e6deSDavid Greenman 		bigsize = 0;
9911b67ec6dSJeffrey Hsu 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
9925663e6deSDavid Greenman 			/*
9935663e6deSDavid Greenman 			 * if this is a system process, skip it
9945663e6deSDavid Greenman 			 */
99579221631SDavid Greenman 			if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) ||
99679221631SDavid Greenman 			    ((p->p_pid < 48) && (vm_swap_size != 0))) {
9975663e6deSDavid Greenman 				continue;
9985663e6deSDavid Greenman 			}
9995663e6deSDavid Greenman 			/*
10005663e6deSDavid Greenman 			 * if the process is in a non-running type state,
10015663e6deSDavid Greenman 			 * don't touch it.
10025663e6deSDavid Greenman 			 */
10035663e6deSDavid Greenman 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
10045663e6deSDavid Greenman 				continue;
10055663e6deSDavid Greenman 			}
10065663e6deSDavid Greenman 			/*
10075663e6deSDavid Greenman 			 * get the process size
10085663e6deSDavid Greenman 			 */
10095663e6deSDavid Greenman 			size = p->p_vmspace->vm_pmap.pm_stats.resident_count;
10105663e6deSDavid Greenman 			/*
10115663e6deSDavid Greenman 			 * if the this process is bigger than the biggest one
10125663e6deSDavid Greenman 			 * remember it.
10135663e6deSDavid Greenman 			 */
10145663e6deSDavid Greenman 			if (size > bigsize) {
10155663e6deSDavid Greenman 				bigproc = p;
10165663e6deSDavid Greenman 				bigsize = size;
10175663e6deSDavid Greenman 			}
10185663e6deSDavid Greenman 		}
10195663e6deSDavid Greenman 		if (bigproc != NULL) {
1020729b1e51SDavid Greenman 			killproc(bigproc, "out of swap space");
10215663e6deSDavid Greenman 			bigproc->p_estcpu = 0;
10225663e6deSDavid Greenman 			bigproc->p_nice = PRIO_MIN;
10235663e6deSDavid Greenman 			resetpriority(bigproc);
102424a1cce3SDavid Greenman 			wakeup(&cnt.v_free_count);
10255663e6deSDavid Greenman 		}
10265663e6deSDavid Greenman 	}
102726f9a767SRodney W. Grimes 	return force_wakeup;
102826f9a767SRodney W. Grimes }
102926f9a767SRodney W. Grimes 
1030dc2efb27SJohn Dyson /*
1031dc2efb27SJohn Dyson  * This routine tries to maintain the pseudo LRU active queue,
1032dc2efb27SJohn Dyson  * so that during long periods of time where there is no paging,
1033dc2efb27SJohn Dyson  * that some statistic accumlation still occurs.  This code
1034dc2efb27SJohn Dyson  * helps the situation where paging just starts to occur.
1035dc2efb27SJohn Dyson  */
1036dc2efb27SJohn Dyson static void
1037dc2efb27SJohn Dyson vm_pageout_page_stats()
1038dc2efb27SJohn Dyson {
1039dc2efb27SJohn Dyson 	int s;
1040dc2efb27SJohn Dyson 	vm_page_t m,next;
1041dc2efb27SJohn Dyson 	int pcount,tpcount;		/* Number of pages to check */
1042dc2efb27SJohn Dyson 	static int fullintervalcount = 0;
1043dc2efb27SJohn Dyson 
1044dc2efb27SJohn Dyson 	pcount = cnt.v_active_count;
1045dc2efb27SJohn Dyson 	fullintervalcount += vm_pageout_stats_interval;
1046dc2efb27SJohn Dyson 	if (fullintervalcount < vm_pageout_full_stats_interval) {
1047dc2efb27SJohn Dyson 		tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count;
1048dc2efb27SJohn Dyson 		if (pcount > tpcount)
1049dc2efb27SJohn Dyson 			pcount = tpcount;
1050dc2efb27SJohn Dyson 	}
1051dc2efb27SJohn Dyson 
1052dc2efb27SJohn Dyson 	m = TAILQ_FIRST(&vm_page_queue_active);
1053dc2efb27SJohn Dyson 	while ((m != NULL) && (pcount-- > 0)) {
10547e006499SJohn Dyson 		int actcount;
1055dc2efb27SJohn Dyson 
1056dc2efb27SJohn Dyson 		if (m->queue != PQ_ACTIVE) {
1057dc2efb27SJohn Dyson 			break;
1058dc2efb27SJohn Dyson 		}
1059dc2efb27SJohn Dyson 
1060dc2efb27SJohn Dyson 		next = TAILQ_NEXT(m, pageq);
1061dc2efb27SJohn Dyson 		/*
1062dc2efb27SJohn Dyson 		 * Don't deactivate pages that are busy.
1063dc2efb27SJohn Dyson 		 */
1064dc2efb27SJohn Dyson 		if ((m->busy != 0) ||
1065dc2efb27SJohn Dyson 		    (m->flags & PG_BUSY) ||
1066dc2efb27SJohn Dyson 		    (m->hold_count != 0)) {
1067dc2efb27SJohn Dyson 			s = splvm();
1068dc2efb27SJohn Dyson 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1069dc2efb27SJohn Dyson 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1070dc2efb27SJohn Dyson 			splx(s);
1071dc2efb27SJohn Dyson 			m = next;
1072dc2efb27SJohn Dyson 			continue;
1073dc2efb27SJohn Dyson 		}
1074dc2efb27SJohn Dyson 
10757e006499SJohn Dyson 		actcount = 0;
1076dc2efb27SJohn Dyson 		if (m->flags & PG_REFERENCED) {
1077dc2efb27SJohn Dyson 			m->flags &= ~PG_REFERENCED;
10787e006499SJohn Dyson 			actcount += 1;
1079dc2efb27SJohn Dyson 		}
1080dc2efb27SJohn Dyson 
10817e006499SJohn Dyson 		actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
10827e006499SJohn Dyson 		if (actcount) {
10837e006499SJohn Dyson 			m->act_count += ACT_ADVANCE + actcount;
1084dc2efb27SJohn Dyson 			if (m->act_count > ACT_MAX)
1085dc2efb27SJohn Dyson 				m->act_count = ACT_MAX;
1086dc2efb27SJohn Dyson 			s = splvm();
1087dc2efb27SJohn Dyson 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1088dc2efb27SJohn Dyson 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1089dc2efb27SJohn Dyson 			splx(s);
1090dc2efb27SJohn Dyson 		} else {
1091dc2efb27SJohn Dyson 			if (m->act_count == 0) {
10927e006499SJohn Dyson 				/*
10937e006499SJohn Dyson 				 * We turn off page access, so that we have more accurate
10947e006499SJohn Dyson 				 * RSS stats.  We don't do this in the normal page deactivation
10957e006499SJohn Dyson 				 * when the system is loaded VM wise, because the cost of
10967e006499SJohn Dyson 				 * the large number of page protect operations would be higher
10977e006499SJohn Dyson 				 * than the value of doing the operation.
10987e006499SJohn Dyson 				 */
1099dc2efb27SJohn Dyson 				vm_page_protect(m, VM_PROT_NONE);
1100dc2efb27SJohn Dyson 				vm_page_deactivate(m);
1101dc2efb27SJohn Dyson 			} else {
1102dc2efb27SJohn Dyson 				m->act_count -= min(m->act_count, ACT_DECLINE);
1103dc2efb27SJohn Dyson 				s = splvm();
1104dc2efb27SJohn Dyson 				TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1105dc2efb27SJohn Dyson 				TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1106dc2efb27SJohn Dyson 				splx(s);
1107dc2efb27SJohn Dyson 			}
1108dc2efb27SJohn Dyson 		}
1109dc2efb27SJohn Dyson 
1110dc2efb27SJohn Dyson 		m = next;
1111dc2efb27SJohn Dyson 	}
1112dc2efb27SJohn Dyson }
1113dc2efb27SJohn Dyson 
1114dc2efb27SJohn Dyson 
1115b182ec9eSJohn Dyson static int
1116b182ec9eSJohn Dyson vm_pageout_free_page_calc(count)
1117b182ec9eSJohn Dyson vm_size_t count;
1118b182ec9eSJohn Dyson {
1119b182ec9eSJohn Dyson 	if (count < cnt.v_page_count)
1120b182ec9eSJohn Dyson 		 return 0;
1121b182ec9eSJohn Dyson 	/*
1122b182ec9eSJohn Dyson 	 * free_reserved needs to include enough for the largest swap pager
1123b182ec9eSJohn Dyson 	 * structures plus enough for any pv_entry structs when paging.
1124b182ec9eSJohn Dyson 	 */
1125b182ec9eSJohn Dyson 	if (cnt.v_page_count > 1024)
1126b182ec9eSJohn Dyson 		cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
1127b182ec9eSJohn Dyson 	else
1128b182ec9eSJohn Dyson 		cnt.v_free_min = 4;
1129f35329acSJohn Dyson 	cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
1130f35329acSJohn Dyson 		cnt.v_interrupt_free_min;
1131f35329acSJohn Dyson 	cnt.v_free_reserved = vm_pageout_page_count +
1132a2f4a846SJohn Dyson 		cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE;
1133a2f4a846SJohn Dyson 	cnt.v_free_min += cnt.v_free_reserved;
1134b182ec9eSJohn Dyson 	return 1;
1135b182ec9eSJohn Dyson }
1136b182ec9eSJohn Dyson 
1137b182ec9eSJohn Dyson 
1138df8bae1dSRodney W. Grimes /*
1139df8bae1dSRodney W. Grimes  *	vm_pageout is the high level pageout daemon.
1140df8bae1dSRodney W. Grimes  */
11412b14f991SJulian Elischer static void
114226f9a767SRodney W. Grimes vm_pageout()
1143df8bae1dSRodney W. Grimes {
1144df8bae1dSRodney W. Grimes 	/*
1145df8bae1dSRodney W. Grimes 	 * Initialize some paging parameters.
1146df8bae1dSRodney W. Grimes 	 */
1147df8bae1dSRodney W. Grimes 
1148f6b04d2bSDavid Greenman 	cnt.v_interrupt_free_min = 2;
1149f35329acSJohn Dyson 	if (cnt.v_page_count < 2000)
1150f35329acSJohn Dyson 		vm_pageout_page_count = 8;
1151f6b04d2bSDavid Greenman 
1152b182ec9eSJohn Dyson 	vm_pageout_free_page_calc(cnt.v_page_count);
1153ed74321bSDavid Greenman 	/*
11540d94caffSDavid Greenman 	 * free_reserved needs to include enough for the largest swap pager
11550d94caffSDavid Greenman 	 * structures plus enough for any pv_entry structs when paging.
1156ed74321bSDavid Greenman 	 */
11570d94caffSDavid Greenman 	cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved;
11586f2b142eSDavid Greenman 
11596ac5bfdbSJohn Dyson 	if (cnt.v_free_count > 1024) {
11600d94caffSDavid Greenman 		cnt.v_cache_max = (cnt.v_free_count - 1024) / 2;
11616f2b142eSDavid Greenman 		cnt.v_cache_min = (cnt.v_free_count - 1024) / 8;
11626f2b142eSDavid Greenman 		cnt.v_inactive_target = 2*cnt.v_cache_min + 192;
11630d94caffSDavid Greenman 	} else {
11640d94caffSDavid Greenman 		cnt.v_cache_min = 0;
11650d94caffSDavid Greenman 		cnt.v_cache_max = 0;
11666f2b142eSDavid Greenman 		cnt.v_inactive_target = cnt.v_free_count / 4;
11670d94caffSDavid Greenman 	}
1168df8bae1dSRodney W. Grimes 
1169df8bae1dSRodney W. Grimes 	/* XXX does not really belong here */
1170df8bae1dSRodney W. Grimes 	if (vm_page_max_wired == 0)
1171df8bae1dSRodney W. Grimes 		vm_page_max_wired = cnt.v_free_count / 3;
1172df8bae1dSRodney W. Grimes 
1173dc2efb27SJohn Dyson 	if (vm_pageout_stats_max == 0)
1174dc2efb27SJohn Dyson 		vm_pageout_stats_max = cnt.v_free_target;
1175dc2efb27SJohn Dyson 
1176dc2efb27SJohn Dyson 	/*
1177dc2efb27SJohn Dyson 	 * Set interval in seconds for stats scan.
1178dc2efb27SJohn Dyson 	 */
1179dc2efb27SJohn Dyson 	if (vm_pageout_stats_interval == 0)
1180dc2efb27SJohn Dyson 		vm_pageout_stats_interval = 4;
1181dc2efb27SJohn Dyson 	if (vm_pageout_full_stats_interval == 0)
1182dc2efb27SJohn Dyson 		vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1183dc2efb27SJohn Dyson 
1184dc2efb27SJohn Dyson 
1185dc2efb27SJohn Dyson 	/*
1186dc2efb27SJohn Dyson 	 * Set maximum free per pass
1187dc2efb27SJohn Dyson 	 */
1188dc2efb27SJohn Dyson 	if (vm_pageout_stats_free_max == 0)
1189dc2efb27SJohn Dyson 		vm_pageout_stats_free_max = 25;
1190dc2efb27SJohn Dyson 
119170111b90SJohn Dyson 	vm_maxlaunder = (cnt.v_page_count > 1800 ? 32 : 16);
119226f9a767SRodney W. Grimes 
119324a1cce3SDavid Greenman 	swap_pager_swap_init();
1194df8bae1dSRodney W. Grimes 	/*
11950d94caffSDavid Greenman 	 * The pageout daemon is never done, so loop forever.
1196df8bae1dSRodney W. Grimes 	 */
1197df8bae1dSRodney W. Grimes 	while (TRUE) {
119885a376ebSJohn Dyson 		int inactive_target;
1199dc2efb27SJohn Dyson 		int error;
1200b18bfc3dSJohn Dyson 		int s = splvm();
1201f919ebdeSDavid Greenman 		if (!vm_pages_needed ||
1202545901f7SJohn Dyson 			((cnt.v_free_count + cnt.v_cache_count) > cnt.v_free_min)) {
1203f919ebdeSDavid Greenman 			vm_pages_needed = 0;
1204dc2efb27SJohn Dyson 			error = tsleep(&vm_pages_needed,
1205dc2efb27SJohn Dyson 				PVM, "psleep", vm_pageout_stats_interval * hz);
1206dc2efb27SJohn Dyson 			if (error && !vm_pages_needed) {
1207dc2efb27SJohn Dyson 				splx(s);
1208dc2efb27SJohn Dyson 				vm_pageout_page_stats();
1209dc2efb27SJohn Dyson 				continue;
1210dc2efb27SJohn Dyson 			}
1211dc2efb27SJohn Dyson 		} else if (vm_pages_needed) {
121238efa82bSJohn Dyson 			tsleep(&vm_pages_needed, PVM, "psleep", hz/10);
1213f919ebdeSDavid Greenman 		}
1214b182ec9eSJohn Dyson 		inactive_target =
1215b182ec9eSJohn Dyson 			(cnt.v_page_count - cnt.v_wire_count) / 4;
1216b182ec9eSJohn Dyson 		if (inactive_target < 2*cnt.v_free_min)
1217b182ec9eSJohn Dyson 			inactive_target = 2*cnt.v_free_min;
1218b182ec9eSJohn Dyson 		cnt.v_inactive_target = inactive_target;
1219b18bfc3dSJohn Dyson 		if (vm_pages_needed)
1220b18bfc3dSJohn Dyson 			cnt.v_pdwakeups++;
1221f919ebdeSDavid Greenman 		vm_pages_needed = 0;
1222f919ebdeSDavid Greenman 		splx(s);
1223df8bae1dSRodney W. Grimes 		vm_pager_sync();
12240d94caffSDavid Greenman 		vm_pageout_scan();
122526f9a767SRodney W. Grimes 		vm_pager_sync();
122624a1cce3SDavid Greenman 		wakeup(&cnt.v_free_count);
1227df8bae1dSRodney W. Grimes 	}
1228df8bae1dSRodney W. Grimes }
122926f9a767SRodney W. Grimes 
1230e0c5a895SJohn Dyson void
1231e0c5a895SJohn Dyson pagedaemon_wakeup()
1232e0c5a895SJohn Dyson {
1233e0c5a895SJohn Dyson 	if (!vm_pages_needed && curproc != pageproc) {
1234e0c5a895SJohn Dyson 		vm_pages_needed++;
1235e0c5a895SJohn Dyson 		wakeup(&vm_pages_needed);
1236e0c5a895SJohn Dyson 	}
1237e0c5a895SJohn Dyson }
1238e0c5a895SJohn Dyson 
123938efa82bSJohn Dyson #if !defined(NO_SWAPPING)
12405afce282SDavid Greenman static void
12415afce282SDavid Greenman vm_req_vmdaemon()
12425afce282SDavid Greenman {
12435afce282SDavid Greenman 	static int lastrun = 0;
12445afce282SDavid Greenman 
1245b18bfc3dSJohn Dyson 	if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
12465afce282SDavid Greenman 		wakeup(&vm_daemon_needed);
12475afce282SDavid Greenman 		lastrun = ticks;
12485afce282SDavid Greenman 	}
12495afce282SDavid Greenman }
12505afce282SDavid Greenman 
12512b14f991SJulian Elischer static void
12524f9fb771SBruce Evans vm_daemon()
12530d94caffSDavid Greenman {
12542fe6e4d7SDavid Greenman 	vm_object_t object;
12552fe6e4d7SDavid Greenman 	struct proc *p;
12560d94caffSDavid Greenman 
12572fe6e4d7SDavid Greenman 	while (TRUE) {
125824a1cce3SDavid Greenman 		tsleep(&vm_daemon_needed, PUSER, "psleep", 0);
12594c1f8ee9SDavid Greenman 		if (vm_pageout_req_swapout) {
12606306c897SDavid Greenman 			swapout_procs();
12614c1f8ee9SDavid Greenman 			vm_pageout_req_swapout = 0;
12624c1f8ee9SDavid Greenman 		}
12632fe6e4d7SDavid Greenman 		/*
12640d94caffSDavid Greenman 		 * scan the processes for exceeding their rlimits or if
12650d94caffSDavid Greenman 		 * process is swapped out -- deactivate pages
12662fe6e4d7SDavid Greenman 		 */
12672fe6e4d7SDavid Greenman 
12681b67ec6dSJeffrey Hsu 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
12692fe6e4d7SDavid Greenman 			quad_t limit;
12702fe6e4d7SDavid Greenman 			vm_offset_t size;
12712fe6e4d7SDavid Greenman 
12722fe6e4d7SDavid Greenman 			/*
12732fe6e4d7SDavid Greenman 			 * if this is a system process or if we have already
12742fe6e4d7SDavid Greenman 			 * looked at this process, skip it.
12752fe6e4d7SDavid Greenman 			 */
12762fe6e4d7SDavid Greenman 			if (p->p_flag & (P_SYSTEM | P_WEXIT)) {
12772fe6e4d7SDavid Greenman 				continue;
12782fe6e4d7SDavid Greenman 			}
12792fe6e4d7SDavid Greenman 			/*
12802fe6e4d7SDavid Greenman 			 * if the process is in a non-running type state,
12812fe6e4d7SDavid Greenman 			 * don't touch it.
12822fe6e4d7SDavid Greenman 			 */
12832fe6e4d7SDavid Greenman 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
12842fe6e4d7SDavid Greenman 				continue;
12852fe6e4d7SDavid Greenman 			}
12862fe6e4d7SDavid Greenman 			/*
12872fe6e4d7SDavid Greenman 			 * get a limit
12882fe6e4d7SDavid Greenman 			 */
12892fe6e4d7SDavid Greenman 			limit = qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
12902fe6e4d7SDavid Greenman 			    p->p_rlimit[RLIMIT_RSS].rlim_max);
12912fe6e4d7SDavid Greenman 
12922fe6e4d7SDavid Greenman 			/*
12930d94caffSDavid Greenman 			 * let processes that are swapped out really be
12940d94caffSDavid Greenman 			 * swapped out set the limit to nothing (will force a
12950d94caffSDavid Greenman 			 * swap-out.)
12962fe6e4d7SDavid Greenman 			 */
12972fe6e4d7SDavid Greenman 			if ((p->p_flag & P_INMEM) == 0)
12980d94caffSDavid Greenman 				limit = 0;	/* XXX */
12992fe6e4d7SDavid Greenman 
1300a91c5a7eSJohn Dyson 			size = p->p_vmspace->vm_pmap.pm_stats.resident_count * PAGE_SIZE;
13012fe6e4d7SDavid Greenman 			if (limit >= 0 && size >= limit) {
13022fe6e4d7SDavid Greenman 				vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map,
130338efa82bSJohn Dyson 				    (vm_pindex_t)(limit >> PAGE_SHIFT) );
13042fe6e4d7SDavid Greenman 			}
13052fe6e4d7SDavid Greenman 		}
13062fe6e4d7SDavid Greenman 
13070d94caffSDavid Greenman 		/*
13080d94caffSDavid Greenman 		 * we remove cached objects that have no RSS...
13090d94caffSDavid Greenman 		 */
13100d94caffSDavid Greenman restart:
1311b18bfc3dSJohn Dyson 		object = TAILQ_FIRST(&vm_object_cached_list);
13122fe6e4d7SDavid Greenman 		while (object) {
13132fe6e4d7SDavid Greenman 			/*
13142fe6e4d7SDavid Greenman 			 * if there are no resident pages -- get rid of the object
13152fe6e4d7SDavid Greenman 			 */
13162fe6e4d7SDavid Greenman 			if (object->resident_page_count == 0) {
131724a1cce3SDavid Greenman 				vm_object_reference(object);
13182fe6e4d7SDavid Greenman 				pager_cache(object, FALSE);
13192fe6e4d7SDavid Greenman 				goto restart;
13202fe6e4d7SDavid Greenman 			}
1321b18bfc3dSJohn Dyson 			object = TAILQ_NEXT(object, cached_list);
13222fe6e4d7SDavid Greenman 		}
132324a1cce3SDavid Greenman 	}
13242fe6e4d7SDavid Greenman }
132538efa82bSJohn Dyson #endif
1326