xref: /freebsd/sys/vm/vm_pageout.c (revision ceb0cf87e893efc7331317d58fa5ddeadd8fa2a0)
1df8bae1dSRodney W. Grimes /*
226f9a767SRodney W. Grimes  * Copyright (c) 1991 Regents of the University of California.
326f9a767SRodney W. Grimes  * All rights reserved.
426f9a767SRodney W. Grimes  * Copyright (c) 1994 John S. Dyson
526f9a767SRodney W. Grimes  * All rights reserved.
626f9a767SRodney W. Grimes  * Copyright (c) 1994 David Greenman
726f9a767SRodney W. Grimes  * All rights reserved.
8df8bae1dSRodney W. Grimes  *
9df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
10df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
11df8bae1dSRodney W. Grimes  *
12df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
13df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
14df8bae1dSRodney W. Grimes  * are met:
15df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
17df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
18df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
19df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
20df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
21df8bae1dSRodney W. Grimes  *    must display the following acknowledgement:
22df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
23df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
24df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
25df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
26df8bae1dSRodney W. Grimes  *    without specific prior written permission.
27df8bae1dSRodney W. Grimes  *
28df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
39df8bae1dSRodney W. Grimes  *
403c4dd356SDavid Greenman  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
41df8bae1dSRodney W. Grimes  *
42df8bae1dSRodney W. Grimes  *
43df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
44df8bae1dSRodney W. Grimes  * All rights reserved.
45df8bae1dSRodney W. Grimes  *
46df8bae1dSRodney W. Grimes  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
47df8bae1dSRodney W. Grimes  *
48df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
49df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
50df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
51df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
52df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
53df8bae1dSRodney W. Grimes  *
54df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57df8bae1dSRodney W. Grimes  *
58df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
59df8bae1dSRodney W. Grimes  *
60df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61df8bae1dSRodney W. Grimes  *  School of Computer Science
62df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
63df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
64df8bae1dSRodney W. Grimes  *
65df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
66df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
6726f9a767SRodney W. Grimes  *
68ceb0cf87SJohn Dyson  * $Id: vm_pageout.c,v 1.102 1997/12/05 05:41:06 dyson Exp $
69df8bae1dSRodney W. Grimes  */
70df8bae1dSRodney W. Grimes 
71df8bae1dSRodney W. Grimes /*
72df8bae1dSRodney W. Grimes  *	The proverbial page-out daemon.
73df8bae1dSRodney W. Grimes  */
74df8bae1dSRodney W. Grimes 
75df8bae1dSRodney W. Grimes #include <sys/param.h>
7626f9a767SRodney W. Grimes #include <sys/systm.h>
77b5e8ce9fSBruce Evans #include <sys/kernel.h>
7826f9a767SRodney W. Grimes #include <sys/proc.h>
7926f9a767SRodney W. Grimes #include <sys/resourcevar.h>
80d2fc5315SPoul-Henning Kamp #include <sys/signalvar.h>
81f6b04d2bSDavid Greenman #include <sys/vnode.h>
82efeaf95aSDavid Greenman #include <sys/vmmeter.h>
8338efa82bSJohn Dyson #include <sys/sysctl.h>
84df8bae1dSRodney W. Grimes 
85df8bae1dSRodney W. Grimes #include <vm/vm.h>
86efeaf95aSDavid Greenman #include <vm/vm_param.h>
87efeaf95aSDavid Greenman #include <vm/vm_prot.h>
88996c772fSJohn Dyson #include <sys/lock.h>
89efeaf95aSDavid Greenman #include <vm/vm_object.h>
90df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
91efeaf95aSDavid Greenman #include <vm/vm_map.h>
92df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h>
9324a1cce3SDavid Greenman #include <vm/vm_pager.h>
9405f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h>
95efeaf95aSDavid Greenman #include <vm/vm_extern.h>
96df8bae1dSRodney W. Grimes 
972b14f991SJulian Elischer /*
982b14f991SJulian Elischer  * System initialization
992b14f991SJulian Elischer  */
1002b14f991SJulian Elischer 
1012b14f991SJulian Elischer /* the kernel process "vm_pageout"*/
1022b14f991SJulian Elischer static void vm_pageout __P((void));
1033af76890SPoul-Henning Kamp static int vm_pageout_clean __P((vm_page_t, int));
1043af76890SPoul-Henning Kamp static int vm_pageout_scan __P((void));
105f35329acSJohn Dyson static int vm_pageout_free_page_calc __P((vm_size_t count));
1062b14f991SJulian Elischer struct proc *pageproc;
1072b14f991SJulian Elischer 
1082b14f991SJulian Elischer static struct kproc_desc page_kp = {
1092b14f991SJulian Elischer 	"pagedaemon",
1102b14f991SJulian Elischer 	vm_pageout,
1112b14f991SJulian Elischer 	&pageproc
1122b14f991SJulian Elischer };
1134590fd3aSDavid Greenman SYSINIT_KT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp)
1142b14f991SJulian Elischer 
11538efa82bSJohn Dyson #if !defined(NO_SWAPPING)
1162b14f991SJulian Elischer /* the kernel process "vm_daemon"*/
1172b14f991SJulian Elischer static void vm_daemon __P((void));
118f708ef1bSPoul-Henning Kamp static struct	proc *vmproc;
1192b14f991SJulian Elischer 
1202b14f991SJulian Elischer static struct kproc_desc vm_kp = {
1212b14f991SJulian Elischer 	"vmdaemon",
1222b14f991SJulian Elischer 	vm_daemon,
1232b14f991SJulian Elischer 	&vmproc
1242b14f991SJulian Elischer };
1254590fd3aSDavid Greenman SYSINIT_KT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
12638efa82bSJohn Dyson #endif
1272b14f991SJulian Elischer 
1282b14f991SJulian Elischer 
129df8bae1dSRodney W. Grimes int vm_pages_needed;		/* Event on which pageout daemon sleeps */
13026f9a767SRodney W. Grimes 
131c3cb3e12SDavid Greenman int vm_pageout_pages_needed;	/* flag saying that the pageout daemon needs pages */
13226f9a767SRodney W. Grimes 
13326f9a767SRodney W. Grimes extern int npendingio;
13438efa82bSJohn Dyson #if !defined(NO_SWAPPING)
135f708ef1bSPoul-Henning Kamp static int vm_pageout_req_swapout;	/* XXX */
136f708ef1bSPoul-Henning Kamp static int vm_daemon_needed;
13738efa82bSJohn Dyson #endif
13826f9a767SRodney W. Grimes extern int nswiodone;
1395663e6deSDavid Greenman extern int vm_swap_size;
140f6b04d2bSDavid Greenman extern int vfs_update_wakeup;
141dc2efb27SJohn Dyson int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
142dc2efb27SJohn Dyson int vm_pageout_full_stats_interval = 0;
143dc2efb27SJohn Dyson int vm_pageout_stats_free_max=0, vm_pageout_algorithm_lru=0;
14412ac6a1dSJohn Dyson int defer_swap_pageouts=0;
14512ac6a1dSJohn Dyson int disable_swap_pageouts=0;
14670111b90SJohn Dyson 
147ceb0cf87SJohn Dyson int max_page_launder=100;
14838efa82bSJohn Dyson #if defined(NO_SWAPPING)
149ceb0cf87SJohn Dyson int vm_swap_enabled=0;
150ceb0cf87SJohn Dyson int vm_swap_idle_enabled=0;
15138efa82bSJohn Dyson #else
152ceb0cf87SJohn Dyson int vm_swap_enabled=1;
153ceb0cf87SJohn Dyson int vm_swap_idle_enabled=0;
15438efa82bSJohn Dyson #endif
15538efa82bSJohn Dyson 
15638efa82bSJohn Dyson SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
15738efa82bSJohn Dyson 	CTLFLAG_RW, &vm_pageout_algorithm_lru, 0, "");
15838efa82bSJohn Dyson 
159dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
160dc2efb27SJohn Dyson 	CTLFLAG_RW, &vm_pageout_stats_max, 0, "");
161dc2efb27SJohn Dyson 
162dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
163dc2efb27SJohn Dyson 	CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "");
164dc2efb27SJohn Dyson 
165dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
166dc2efb27SJohn Dyson 	CTLFLAG_RW, &vm_pageout_stats_interval, 0, "");
167dc2efb27SJohn Dyson 
168dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
169dc2efb27SJohn Dyson 	CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "");
170dc2efb27SJohn Dyson 
17138efa82bSJohn Dyson #if defined(NO_SWAPPING)
172ceb0cf87SJohn Dyson SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
173ceb0cf87SJohn Dyson 	CTLFLAG_RD, &vm_swap_enabled, 0, "");
174ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
175ceb0cf87SJohn Dyson 	CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
17638efa82bSJohn Dyson #else
177ceb0cf87SJohn Dyson SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
178ceb0cf87SJohn Dyson 	CTLFLAG_RW, &vm_swap_enabled, 0, "");
179ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
180ceb0cf87SJohn Dyson 	CTLFLAG_RW, &vm_swap_idle_enabled, 0, "");
18138efa82bSJohn Dyson #endif
18226f9a767SRodney W. Grimes 
183ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
18412ac6a1dSJohn Dyson 	CTLFLAG_RW, &defer_swap_pageouts, 0, "");
18512ac6a1dSJohn Dyson 
186ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
18712ac6a1dSJohn Dyson 	CTLFLAG_RW, &disable_swap_pageouts, 0, "");
18812ac6a1dSJohn Dyson 
189ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, max_page_launder,
190ceb0cf87SJohn Dyson 	CTLFLAG_RW, &max_page_launder, 0, "");
19170111b90SJohn Dyson 
19226f9a767SRodney W. Grimes 
193a316d390SJohn Dyson #define VM_PAGEOUT_PAGE_COUNT 16
194bbc0ec52SDavid Greenman int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
195df8bae1dSRodney W. Grimes 
196c3cb3e12SDavid Greenman int vm_page_max_wired;		/* XXX max # of wired pages system-wide */
197df8bae1dSRodney W. Grimes 
19838efa82bSJohn Dyson #if !defined(NO_SWAPPING)
19938efa82bSJohn Dyson typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int));
20038efa82bSJohn Dyson static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t));
201cd41fc12SDavid Greenman static freeer_fcn_t vm_pageout_object_deactivate_pages;
202cd41fc12SDavid Greenman static void vm_req_vmdaemon __P((void));
20338efa82bSJohn Dyson #endif
204dc2efb27SJohn Dyson static void vm_pageout_page_stats(void);
2055985940eSJohn Dyson void pmap_collect(void);
206cd41fc12SDavid Greenman 
20726f9a767SRodney W. Grimes /*
20826f9a767SRodney W. Grimes  * vm_pageout_clean:
20924a1cce3SDavid Greenman  *
2100d94caffSDavid Greenman  * Clean the page and remove it from the laundry.
21126f9a767SRodney W. Grimes  *
2120d94caffSDavid Greenman  * We set the busy bit to cause potential page faults on this page to
21326f9a767SRodney W. Grimes  * block.
21426f9a767SRodney W. Grimes  *
2150d94caffSDavid Greenman  * And we set pageout-in-progress to keep the object from disappearing
2160d94caffSDavid Greenman  * during pageout.  This guarantees that the page won't move from the
2170d94caffSDavid Greenman  * inactive queue.  (However, any other page on the inactive queue may
2180d94caffSDavid Greenman  * move!)
21926f9a767SRodney W. Grimes  */
2203af76890SPoul-Henning Kamp static int
22124a1cce3SDavid Greenman vm_pageout_clean(m, sync)
22224a1cce3SDavid Greenman 	vm_page_t m;
22324a1cce3SDavid Greenman 	int sync;
22424a1cce3SDavid Greenman {
22526f9a767SRodney W. Grimes 	register vm_object_t object;
226f35329acSJohn Dyson 	vm_page_t mc[2*vm_pageout_page_count];
22724a1cce3SDavid Greenman 	int pageout_count;
22824a1cce3SDavid Greenman 	int i, forward_okay, backward_okay, page_base;
229a316d390SJohn Dyson 	vm_pindex_t pindex = m->pindex;
23026f9a767SRodney W. Grimes 
23126f9a767SRodney W. Grimes 	object = m->object;
23224a1cce3SDavid Greenman 
23326f9a767SRodney W. Grimes 	/*
23424a1cce3SDavid Greenman 	 * If not OBJT_SWAP, additional memory may be needed to do the pageout.
23524a1cce3SDavid Greenman 	 * Try to avoid the deadlock.
23626f9a767SRodney W. Grimes 	 */
23724a1cce3SDavid Greenman 	if ((sync != VM_PAGEOUT_FORCE) &&
238f35329acSJohn Dyson 	    (object->type == OBJT_DEFAULT) &&
23924a1cce3SDavid Greenman 	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min))
24026f9a767SRodney W. Grimes 		return 0;
24126f9a767SRodney W. Grimes 
24224a1cce3SDavid Greenman 	/*
24324a1cce3SDavid Greenman 	 * Don't mess with the page if it's busy.
24424a1cce3SDavid Greenman 	 */
245f6b04d2bSDavid Greenman 	if ((!sync && m->hold_count != 0) ||
2460d94caffSDavid Greenman 	    ((m->busy != 0) || (m->flags & PG_BUSY)))
2470d94caffSDavid Greenman 		return 0;
2480d94caffSDavid Greenman 
24924a1cce3SDavid Greenman 	/*
25024a1cce3SDavid Greenman 	 * Try collapsing before it's too late.
25124a1cce3SDavid Greenman 	 */
25224a1cce3SDavid Greenman 	if (!sync && object->backing_object) {
25326f9a767SRodney W. Grimes 		vm_object_collapse(object);
25426f9a767SRodney W. Grimes 	}
2553c018e72SJohn Dyson 
256f35329acSJohn Dyson 	mc[vm_pageout_page_count] = m;
25726f9a767SRodney W. Grimes 	pageout_count = 1;
258f35329acSJohn Dyson 	page_base = vm_pageout_page_count;
25924a1cce3SDavid Greenman 	forward_okay = TRUE;
260a316d390SJohn Dyson 	if (pindex != 0)
26124a1cce3SDavid Greenman 		backward_okay = TRUE;
26226f9a767SRodney W. Grimes 	else
26324a1cce3SDavid Greenman 		backward_okay = FALSE;
26424a1cce3SDavid Greenman 	/*
26524a1cce3SDavid Greenman 	 * Scan object for clusterable pages.
26624a1cce3SDavid Greenman 	 *
26724a1cce3SDavid Greenman 	 * We can cluster ONLY if: ->> the page is NOT
26824a1cce3SDavid Greenman 	 * clean, wired, busy, held, or mapped into a
26924a1cce3SDavid Greenman 	 * buffer, and one of the following:
27024a1cce3SDavid Greenman 	 * 1) The page is inactive, or a seldom used
27124a1cce3SDavid Greenman 	 *    active page.
27224a1cce3SDavid Greenman 	 * -or-
27324a1cce3SDavid Greenman 	 * 2) we force the issue.
27424a1cce3SDavid Greenman 	 */
27524a1cce3SDavid Greenman 	for (i = 1; (i < vm_pageout_page_count) && (forward_okay || backward_okay); i++) {
27624a1cce3SDavid Greenman 		vm_page_t p;
277f6b04d2bSDavid Greenman 
27824a1cce3SDavid Greenman 		/*
27924a1cce3SDavid Greenman 		 * See if forward page is clusterable.
28024a1cce3SDavid Greenman 		 */
28124a1cce3SDavid Greenman 		if (forward_okay) {
28224a1cce3SDavid Greenman 			/*
28324a1cce3SDavid Greenman 			 * Stop forward scan at end of object.
28424a1cce3SDavid Greenman 			 */
285a316d390SJohn Dyson 			if ((pindex + i) > object->size) {
28624a1cce3SDavid Greenman 				forward_okay = FALSE;
28724a1cce3SDavid Greenman 				goto do_backward;
288f6b04d2bSDavid Greenman 			}
289a316d390SJohn Dyson 			p = vm_page_lookup(object, pindex + i);
29024a1cce3SDavid Greenman 			if (p) {
2915070c7f8SJohn Dyson 				if (((p->queue - p->pc) == PQ_CACHE) ||
2925070c7f8SJohn Dyson 					(p->flags & PG_BUSY) || p->busy) {
29324a1cce3SDavid Greenman 					forward_okay = FALSE;
29424a1cce3SDavid Greenman 					goto do_backward;
295f6b04d2bSDavid Greenman 				}
29624a1cce3SDavid Greenman 				vm_page_test_dirty(p);
29724a1cce3SDavid Greenman 				if ((p->dirty & p->valid) != 0 &&
298bd7e5f99SJohn Dyson 				    ((p->queue == PQ_INACTIVE) ||
29924a1cce3SDavid Greenman 				     (sync == VM_PAGEOUT_FORCE)) &&
30024a1cce3SDavid Greenman 				    (p->wire_count == 0) &&
30124a1cce3SDavid Greenman 				    (p->hold_count == 0)) {
302f35329acSJohn Dyson 					mc[vm_pageout_page_count + i] = p;
30324a1cce3SDavid Greenman 					pageout_count++;
30424a1cce3SDavid Greenman 					if (pageout_count == vm_pageout_page_count)
30524a1cce3SDavid Greenman 						break;
30624a1cce3SDavid Greenman 				} else {
30724a1cce3SDavid Greenman 					forward_okay = FALSE;
308f6b04d2bSDavid Greenman 				}
30924a1cce3SDavid Greenman 			} else {
31024a1cce3SDavid Greenman 				forward_okay = FALSE;
31124a1cce3SDavid Greenman 			}
31224a1cce3SDavid Greenman 		}
31324a1cce3SDavid Greenman do_backward:
31424a1cce3SDavid Greenman 		/*
31524a1cce3SDavid Greenman 		 * See if backward page is clusterable.
31624a1cce3SDavid Greenman 		 */
31724a1cce3SDavid Greenman 		if (backward_okay) {
31824a1cce3SDavid Greenman 			/*
31924a1cce3SDavid Greenman 			 * Stop backward scan at beginning of object.
32024a1cce3SDavid Greenman 			 */
321a316d390SJohn Dyson 			if ((pindex - i) == 0) {
32224a1cce3SDavid Greenman 				backward_okay = FALSE;
32324a1cce3SDavid Greenman 			}
324a316d390SJohn Dyson 			p = vm_page_lookup(object, pindex - i);
32524a1cce3SDavid Greenman 			if (p) {
3265070c7f8SJohn Dyson 				if (((p->queue - p->pc) == PQ_CACHE) ||
3275070c7f8SJohn Dyson 					(p->flags & PG_BUSY) || p->busy) {
32824a1cce3SDavid Greenman 					backward_okay = FALSE;
32924a1cce3SDavid Greenman 					continue;
33024a1cce3SDavid Greenman 				}
33124a1cce3SDavid Greenman 				vm_page_test_dirty(p);
33224a1cce3SDavid Greenman 				if ((p->dirty & p->valid) != 0 &&
333bd7e5f99SJohn Dyson 				    ((p->queue == PQ_INACTIVE) ||
33424a1cce3SDavid Greenman 				     (sync == VM_PAGEOUT_FORCE)) &&
33524a1cce3SDavid Greenman 				    (p->wire_count == 0) &&
33624a1cce3SDavid Greenman 				    (p->hold_count == 0)) {
337f35329acSJohn Dyson 					mc[vm_pageout_page_count - i] = p;
33824a1cce3SDavid Greenman 					pageout_count++;
33924a1cce3SDavid Greenman 					page_base--;
34024a1cce3SDavid Greenman 					if (pageout_count == vm_pageout_page_count)
34124a1cce3SDavid Greenman 						break;
34224a1cce3SDavid Greenman 				} else {
34324a1cce3SDavid Greenman 					backward_okay = FALSE;
34424a1cce3SDavid Greenman 				}
34524a1cce3SDavid Greenman 			} else {
34624a1cce3SDavid Greenman 				backward_okay = FALSE;
34724a1cce3SDavid Greenman 			}
348f6b04d2bSDavid Greenman 		}
349f6b04d2bSDavid Greenman 	}
350f6b04d2bSDavid Greenman 
35167bf6868SJohn Dyson 	/*
35267bf6868SJohn Dyson 	 * we allow reads during pageouts...
35367bf6868SJohn Dyson 	 */
35424a1cce3SDavid Greenman 	for (i = page_base; i < (page_base + pageout_count); i++) {
35524a1cce3SDavid Greenman 		mc[i]->flags |= PG_BUSY;
35667bf6868SJohn Dyson 		vm_page_protect(mc[i], VM_PROT_READ);
35726f9a767SRodney W. Grimes 	}
35826f9a767SRodney W. Grimes 
359aef922f5SJohn Dyson 	return vm_pageout_flush(&mc[page_base], pageout_count, sync);
360aef922f5SJohn Dyson }
361aef922f5SJohn Dyson 
362aef922f5SJohn Dyson int
363aef922f5SJohn Dyson vm_pageout_flush(mc, count, sync)
364aef922f5SJohn Dyson 	vm_page_t *mc;
365aef922f5SJohn Dyson 	int count;
366aef922f5SJohn Dyson 	int sync;
367aef922f5SJohn Dyson {
368aef922f5SJohn Dyson 	register vm_object_t object;
369aef922f5SJohn Dyson 	int pageout_status[count];
370aef922f5SJohn Dyson 	int anyok = 0;
371aef922f5SJohn Dyson 	int i;
372aef922f5SJohn Dyson 
373aef922f5SJohn Dyson 	object = mc[0]->object;
374aef922f5SJohn Dyson 	object->paging_in_progress += count;
375aef922f5SJohn Dyson 
376aef922f5SJohn Dyson 	vm_pager_put_pages(object, mc, count,
37726f9a767SRodney W. Grimes 	    ((sync || (object == kernel_object)) ? TRUE : FALSE),
37826f9a767SRodney W. Grimes 	    pageout_status);
37926f9a767SRodney W. Grimes 
380aef922f5SJohn Dyson 	for (i = 0; i < count; i++) {
381aef922f5SJohn Dyson 		vm_page_t mt = mc[i];
38224a1cce3SDavid Greenman 
38326f9a767SRodney W. Grimes 		switch (pageout_status[i]) {
38426f9a767SRodney W. Grimes 		case VM_PAGER_OK:
38526f9a767SRodney W. Grimes 			++anyok;
38626f9a767SRodney W. Grimes 			break;
38726f9a767SRodney W. Grimes 		case VM_PAGER_PEND:
38826f9a767SRodney W. Grimes 			++anyok;
38926f9a767SRodney W. Grimes 			break;
39026f9a767SRodney W. Grimes 		case VM_PAGER_BAD:
39126f9a767SRodney W. Grimes 			/*
3920d94caffSDavid Greenman 			 * Page outside of range of object. Right now we
3930d94caffSDavid Greenman 			 * essentially lose the changes by pretending it
3940d94caffSDavid Greenman 			 * worked.
39526f9a767SRodney W. Grimes 			 */
39667bf6868SJohn Dyson 			pmap_clear_modify(VM_PAGE_TO_PHYS(mt));
39724a1cce3SDavid Greenman 			mt->dirty = 0;
39826f9a767SRodney W. Grimes 			break;
39926f9a767SRodney W. Grimes 		case VM_PAGER_ERROR:
40026f9a767SRodney W. Grimes 		case VM_PAGER_FAIL:
40126f9a767SRodney W. Grimes 			/*
4020d94caffSDavid Greenman 			 * If page couldn't be paged out, then reactivate the
4030d94caffSDavid Greenman 			 * page so it doesn't clog the inactive list.  (We
4040d94caffSDavid Greenman 			 * will try paging out it again later).
40526f9a767SRodney W. Grimes 			 */
406bd7e5f99SJohn Dyson 			if (mt->queue == PQ_INACTIVE)
40724a1cce3SDavid Greenman 				vm_page_activate(mt);
40826f9a767SRodney W. Grimes 			break;
40926f9a767SRodney W. Grimes 		case VM_PAGER_AGAIN:
41026f9a767SRodney W. Grimes 			break;
41126f9a767SRodney W. Grimes 		}
41226f9a767SRodney W. Grimes 
41326f9a767SRodney W. Grimes 
41426f9a767SRodney W. Grimes 		/*
4150d94caffSDavid Greenman 		 * If the operation is still going, leave the page busy to
4160d94caffSDavid Greenman 		 * block all other accesses. Also, leave the paging in
4170d94caffSDavid Greenman 		 * progress indicator set so that we don't attempt an object
4180d94caffSDavid Greenman 		 * collapse.
41926f9a767SRodney W. Grimes 		 */
42026f9a767SRodney W. Grimes 		if (pageout_status[i] != VM_PAGER_PEND) {
421f919ebdeSDavid Greenman 			vm_object_pip_wakeup(object);
42224a1cce3SDavid Greenman 			PAGE_WAKEUP(mt);
42326f9a767SRodney W. Grimes 		}
42426f9a767SRodney W. Grimes 	}
42526f9a767SRodney W. Grimes 	return anyok;
42626f9a767SRodney W. Grimes }
42726f9a767SRodney W. Grimes 
42838efa82bSJohn Dyson #if !defined(NO_SWAPPING)
42926f9a767SRodney W. Grimes /*
43026f9a767SRodney W. Grimes  *	vm_pageout_object_deactivate_pages
43126f9a767SRodney W. Grimes  *
43226f9a767SRodney W. Grimes  *	deactivate enough pages to satisfy the inactive target
43326f9a767SRodney W. Grimes  *	requirements or if vm_page_proc_limit is set, then
43426f9a767SRodney W. Grimes  *	deactivate all of the pages in the object and its
43524a1cce3SDavid Greenman  *	backing_objects.
43626f9a767SRodney W. Grimes  *
43726f9a767SRodney W. Grimes  *	The object and map must be locked.
43826f9a767SRodney W. Grimes  */
43938efa82bSJohn Dyson static void
44038efa82bSJohn Dyson vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
44126f9a767SRodney W. Grimes 	vm_map_t map;
44226f9a767SRodney W. Grimes 	vm_object_t object;
44338efa82bSJohn Dyson 	vm_pindex_t desired;
4440d94caffSDavid Greenman 	int map_remove_only;
44526f9a767SRodney W. Grimes {
44626f9a767SRodney W. Grimes 	register vm_page_t p, next;
44726f9a767SRodney W. Grimes 	int rcount;
44838efa82bSJohn Dyson 	int remove_mode;
4491eeaa1e3SJohn Dyson 	int s;
45026f9a767SRodney W. Grimes 
45124a1cce3SDavid Greenman 	if (object->type == OBJT_DEVICE)
45238efa82bSJohn Dyson 		return;
4538f895206SDavid Greenman 
45438efa82bSJohn Dyson 	while (object) {
45538efa82bSJohn Dyson 		if (vm_map_pmap(map)->pm_stats.resident_count <= desired)
45638efa82bSJohn Dyson 			return;
45724a1cce3SDavid Greenman 		if (object->paging_in_progress)
45838efa82bSJohn Dyson 			return;
45926f9a767SRodney W. Grimes 
46038efa82bSJohn Dyson 		remove_mode = map_remove_only;
46138efa82bSJohn Dyson 		if (object->shadow_count > 1)
46238efa82bSJohn Dyson 			remove_mode = 1;
46326f9a767SRodney W. Grimes 	/*
46426f9a767SRodney W. Grimes 	 * scan the objects entire memory queue
46526f9a767SRodney W. Grimes 	 */
46626f9a767SRodney W. Grimes 		rcount = object->resident_page_count;
467b18bfc3dSJohn Dyson 		p = TAILQ_FIRST(&object->memq);
46826f9a767SRodney W. Grimes 		while (p && (rcount-- > 0)) {
4697e006499SJohn Dyson 			int actcount;
47038efa82bSJohn Dyson 			if (vm_map_pmap(map)->pm_stats.resident_count <= desired)
47138efa82bSJohn Dyson 				return;
472b18bfc3dSJohn Dyson 			next = TAILQ_NEXT(p, listq);
473a58d1fa1SDavid Greenman 			cnt.v_pdpages++;
4740d94caffSDavid Greenman 			if (p->wire_count != 0 ||
4750d94caffSDavid Greenman 			    p->hold_count != 0 ||
4760d94caffSDavid Greenman 			    p->busy != 0 ||
477bd7e5f99SJohn Dyson 			    (p->flags & PG_BUSY) ||
4780d94caffSDavid Greenman 			    !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) {
4790d94caffSDavid Greenman 				p = next;
4800d94caffSDavid Greenman 				continue;
4810d94caffSDavid Greenman 			}
482ef743ce6SJohn Dyson 
4837e006499SJohn Dyson 			actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p));
4847e006499SJohn Dyson 			if (actcount) {
485ef743ce6SJohn Dyson 				p->flags |= PG_REFERENCED;
486c8c4b40cSJohn Dyson 			} else if (p->flags & PG_REFERENCED) {
4877e006499SJohn Dyson 				actcount = 1;
488ef743ce6SJohn Dyson 			}
489ef743ce6SJohn Dyson 
49038efa82bSJohn Dyson 			if ((p->queue != PQ_ACTIVE) &&
49138efa82bSJohn Dyson 				(p->flags & PG_REFERENCED)) {
492ef743ce6SJohn Dyson 				vm_page_activate(p);
4937e006499SJohn Dyson 				p->act_count += actcount;
494c8c4b40cSJohn Dyson 				p->flags &= ~PG_REFERENCED;
495c8c4b40cSJohn Dyson 			} else if (p->queue == PQ_ACTIVE) {
496ef743ce6SJohn Dyson 				if ((p->flags & PG_REFERENCED) == 0) {
497c8c4b40cSJohn Dyson 					p->act_count -= min(p->act_count, ACT_DECLINE);
498c8c4b40cSJohn Dyson 					if (!remove_mode && (vm_pageout_algorithm_lru || (p->act_count == 0))) {
499b18bfc3dSJohn Dyson 						vm_page_protect(p, VM_PROT_NONE);
50026f9a767SRodney W. Grimes 						vm_page_deactivate(p);
50126f9a767SRodney W. Grimes 					} else {
502c8c4b40cSJohn Dyson 						s = splvm();
503c8c4b40cSJohn Dyson 						TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
504c8c4b40cSJohn Dyson 						TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
505c8c4b40cSJohn Dyson 						splx(s);
506c8c4b40cSJohn Dyson 					}
507c8c4b40cSJohn Dyson 				} else {
508a647a309SDavid Greenman 					p->flags &= ~PG_REFERENCED;
50938efa82bSJohn Dyson 					if (p->act_count < (ACT_MAX - ACT_ADVANCE))
51038efa82bSJohn Dyson 						p->act_count += ACT_ADVANCE;
5111eeaa1e3SJohn Dyson 					s = splvm();
51226f9a767SRodney W. Grimes 					TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
51326f9a767SRodney W. Grimes 					TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
5141eeaa1e3SJohn Dyson 					splx(s);
51526f9a767SRodney W. Grimes 				}
516bd7e5f99SJohn Dyson 			} else if (p->queue == PQ_INACTIVE) {
517f919ebdeSDavid Greenman 				vm_page_protect(p, VM_PROT_NONE);
51826f9a767SRodney W. Grimes 			}
51926f9a767SRodney W. Grimes 			p = next;
52026f9a767SRodney W. Grimes 		}
52138efa82bSJohn Dyson 		object = object->backing_object;
52238efa82bSJohn Dyson 	}
52338efa82bSJohn Dyson 	return;
52426f9a767SRodney W. Grimes }
52526f9a767SRodney W. Grimes 
52626f9a767SRodney W. Grimes /*
52726f9a767SRodney W. Grimes  * deactivate some number of pages in a map, try to do it fairly, but
52826f9a767SRodney W. Grimes  * that is really hard to do.
52926f9a767SRodney W. Grimes  */
530cd41fc12SDavid Greenman static void
53138efa82bSJohn Dyson vm_pageout_map_deactivate_pages(map, desired)
53226f9a767SRodney W. Grimes 	vm_map_t map;
53338efa82bSJohn Dyson 	vm_pindex_t desired;
53426f9a767SRodney W. Grimes {
53526f9a767SRodney W. Grimes 	vm_map_entry_t tmpe;
53638efa82bSJohn Dyson 	vm_object_t obj, bigobj;
5370d94caffSDavid Greenman 
53826f9a767SRodney W. Grimes 	vm_map_reference(map);
539996c772fSJohn Dyson 	if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) {
54026f9a767SRodney W. Grimes 		vm_map_deallocate(map);
54126f9a767SRodney W. Grimes 		return;
54226f9a767SRodney W. Grimes 	}
54338efa82bSJohn Dyson 
54438efa82bSJohn Dyson 	bigobj = NULL;
54538efa82bSJohn Dyson 
54638efa82bSJohn Dyson 	/*
54738efa82bSJohn Dyson 	 * first, search out the biggest object, and try to free pages from
54838efa82bSJohn Dyson 	 * that.
54938efa82bSJohn Dyson 	 */
55026f9a767SRodney W. Grimes 	tmpe = map->header.next;
55138efa82bSJohn Dyson 	while (tmpe != &map->header) {
552afa07f7eSJohn Dyson 		if ((tmpe->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
55338efa82bSJohn Dyson 			obj = tmpe->object.vm_object;
55438efa82bSJohn Dyson 			if ((obj != NULL) && (obj->shadow_count <= 1) &&
55538efa82bSJohn Dyson 				((bigobj == NULL) ||
55638efa82bSJohn Dyson 				 (bigobj->resident_page_count < obj->resident_page_count))) {
55738efa82bSJohn Dyson 				bigobj = obj;
55838efa82bSJohn Dyson 			}
55938efa82bSJohn Dyson 		}
56038efa82bSJohn Dyson 		tmpe = tmpe->next;
56138efa82bSJohn Dyson 	}
56238efa82bSJohn Dyson 
56338efa82bSJohn Dyson 	if (bigobj)
56438efa82bSJohn Dyson 		vm_pageout_object_deactivate_pages(map, bigobj, desired, 0);
56538efa82bSJohn Dyson 
56638efa82bSJohn Dyson 	/*
56738efa82bSJohn Dyson 	 * Next, hunt around for other pages to deactivate.  We actually
56838efa82bSJohn Dyson 	 * do this search sort of wrong -- .text first is not the best idea.
56938efa82bSJohn Dyson 	 */
57038efa82bSJohn Dyson 	tmpe = map->header.next;
57138efa82bSJohn Dyson 	while (tmpe != &map->header) {
57238efa82bSJohn Dyson 		if (vm_map_pmap(map)->pm_stats.resident_count <= desired)
57338efa82bSJohn Dyson 			break;
574afa07f7eSJohn Dyson 		if ((tmpe->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
57538efa82bSJohn Dyson 			obj = tmpe->object.vm_object;
57601155bd7SDavid Greenman 			if (obj)
57738efa82bSJohn Dyson 				vm_pageout_object_deactivate_pages(map, obj, desired, 0);
57838efa82bSJohn Dyson 		}
57926f9a767SRodney W. Grimes 		tmpe = tmpe->next;
58026f9a767SRodney W. Grimes 	};
58138efa82bSJohn Dyson 
58238efa82bSJohn Dyson 	/*
58338efa82bSJohn Dyson 	 * Remove all mappings if a process is swapped out, this will free page
58438efa82bSJohn Dyson 	 * table pages.
58538efa82bSJohn Dyson 	 */
58638efa82bSJohn Dyson 	if (desired == 0)
58738efa82bSJohn Dyson 		pmap_remove(vm_map_pmap(map),
58838efa82bSJohn Dyson 			VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
58938efa82bSJohn Dyson 	vm_map_unlock(map);
59026f9a767SRodney W. Grimes 	vm_map_deallocate(map);
59126f9a767SRodney W. Grimes 	return;
59226f9a767SRodney W. Grimes }
59338efa82bSJohn Dyson #endif
594df8bae1dSRodney W. Grimes 
595df8bae1dSRodney W. Grimes /*
596df8bae1dSRodney W. Grimes  *	vm_pageout_scan does the dirty work for the pageout daemon.
597df8bae1dSRodney W. Grimes  */
5983af76890SPoul-Henning Kamp static int
599df8bae1dSRodney W. Grimes vm_pageout_scan()
600df8bae1dSRodney W. Grimes {
601502ba6e4SJohn Dyson 	vm_page_t m, next;
60270111b90SJohn Dyson 	int page_shortage, addl_page_shortage, maxscan, pcount;
60370111b90SJohn Dyson 	int maxlaunder;
6044e39a515SPoul-Henning Kamp 	int pages_freed;
6055663e6deSDavid Greenman 	struct proc *p, *bigproc;
6065663e6deSDavid Greenman 	vm_offset_t size, bigsize;
607df8bae1dSRodney W. Grimes 	vm_object_t object;
60826f9a767SRodney W. Grimes 	int force_wakeup = 0;
6097e006499SJohn Dyson 	int actcount;
610f6b04d2bSDavid Greenman 	int vnodes_skipped = 0;
6111eeaa1e3SJohn Dyson 	int s;
6120d94caffSDavid Greenman 
613df8bae1dSRodney W. Grimes 	/*
6145985940eSJohn Dyson 	 * Do whatever cleanup that the pmap code can.
6155985940eSJohn Dyson 	 */
6165985940eSJohn Dyson 	pmap_collect();
6175985940eSJohn Dyson 
6185985940eSJohn Dyson 	/*
6190d94caffSDavid Greenman 	 * Start scanning the inactive queue for pages we can free. We keep
6200d94caffSDavid Greenman 	 * scanning until we have enough free pages or we have scanned through
6210d94caffSDavid Greenman 	 * the entire queue.  If we encounter dirty pages, we start cleaning
6220d94caffSDavid Greenman 	 * them.
623df8bae1dSRodney W. Grimes 	 */
624df8bae1dSRodney W. Grimes 
625b182ec9eSJohn Dyson 	pages_freed = 0;
626f35329acSJohn Dyson 	addl_page_shortage = 0;
627b182ec9eSJohn Dyson 
628ceb0cf87SJohn Dyson 	if (max_page_launder == 0)
629ceb0cf87SJohn Dyson 		max_page_launder = 1;
630ceb0cf87SJohn Dyson 	maxlaunder = (cnt.v_inactive_target > max_page_launder) ?
631ceb0cf87SJohn Dyson 	    max_page_launder : cnt.v_inactive_target;
63270111b90SJohn Dyson 
63367bf6868SJohn Dyson rescan0:
634f6b04d2bSDavid Greenman 	maxscan = cnt.v_inactive_count;
635b182ec9eSJohn Dyson 	for( m = TAILQ_FIRST(&vm_page_queue_inactive);
636b182ec9eSJohn Dyson 
637b182ec9eSJohn Dyson 		(m != NULL) && (maxscan-- > 0) &&
638b18bfc3dSJohn Dyson 			((cnt.v_cache_count + cnt.v_free_count) <
639b182ec9eSJohn Dyson 			(cnt.v_cache_min + cnt.v_free_target));
640b182ec9eSJohn Dyson 
641b182ec9eSJohn Dyson 		m = next) {
642df8bae1dSRodney W. Grimes 
643a58d1fa1SDavid Greenman 		cnt.v_pdpages++;
644b182ec9eSJohn Dyson 
645f35329acSJohn Dyson 		if (m->queue != PQ_INACTIVE) {
64667bf6868SJohn Dyson 			goto rescan0;
647f35329acSJohn Dyson 		}
648b182ec9eSJohn Dyson 
649b18bfc3dSJohn Dyson 		next = TAILQ_NEXT(m, pageq);
650df8bae1dSRodney W. Grimes 
651b182ec9eSJohn Dyson 		if (m->hold_count) {
652f35329acSJohn Dyson 			s = splvm();
653b182ec9eSJohn Dyson 			TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
654b182ec9eSJohn Dyson 			TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
655f35329acSJohn Dyson 			splx(s);
656b182ec9eSJohn Dyson 			addl_page_shortage++;
657b182ec9eSJohn Dyson 			continue;
658df8bae1dSRodney W. Grimes 		}
65926f9a767SRodney W. Grimes 		/*
660b18bfc3dSJohn Dyson 		 * Dont mess with busy pages, keep in the front of the
661b18bfc3dSJohn Dyson 		 * queue, most likely are being paged out.
66226f9a767SRodney W. Grimes 		 */
663bd7e5f99SJohn Dyson 		if (m->busy || (m->flags & PG_BUSY)) {
664b182ec9eSJohn Dyson 			addl_page_shortage++;
66526f9a767SRodney W. Grimes 			continue;
66626f9a767SRodney W. Grimes 		}
667bd7e5f99SJohn Dyson 
6687e006499SJohn Dyson 		/*
6697e006499SJohn Dyson 		 * If the object is not being used, we ignore previous references.
6707e006499SJohn Dyson 		 */
6710d94caffSDavid Greenman 		if (m->object->ref_count == 0) {
6720d94caffSDavid Greenman 			m->flags &= ~PG_REFERENCED;
67367bf6868SJohn Dyson 			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
6747e006499SJohn Dyson 
6757e006499SJohn Dyson 		/*
6767e006499SJohn Dyson 		 * Otherwise, if the page has been referenced while in the inactive
6777e006499SJohn Dyson 		 * queue, we bump the "activation count" upwards, making it less
6787e006499SJohn Dyson 		 * likely that the page will be added back to the inactive queue
6797e006499SJohn Dyson 		 * prematurely again.  Here we check the page tables (or emulated
6807e006499SJohn Dyson 		 * bits, if any), given the upper level VM system not knowing anything
6817e006499SJohn Dyson 		 * about existing references.
6827e006499SJohn Dyson 		 */
683ef743ce6SJohn Dyson 		} else if (((m->flags & PG_REFERENCED) == 0) &&
6847e006499SJohn Dyson 			(actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)))) {
685ef743ce6SJohn Dyson 			vm_page_activate(m);
6867e006499SJohn Dyson 			m->act_count += (actcount + ACT_ADVANCE);
687ef743ce6SJohn Dyson 			continue;
6882fe6e4d7SDavid Greenman 		}
689ef743ce6SJohn Dyson 
6907e006499SJohn Dyson 		/*
6917e006499SJohn Dyson 		 * If the upper level VM system knows about any page references,
6927e006499SJohn Dyson 		 * we activate the page.  We also set the "activation count" higher
6937e006499SJohn Dyson 		 * than normal so that we will less likely place pages back onto the
6947e006499SJohn Dyson 		 * inactive queue again.
6957e006499SJohn Dyson 		 */
696bd7e5f99SJohn Dyson 		if ((m->flags & PG_REFERENCED) != 0) {
697a647a309SDavid Greenman 			m->flags &= ~PG_REFERENCED;
6987e006499SJohn Dyson #if 0
69967bf6868SJohn Dyson 			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
7007e006499SJohn Dyson #else
7017e006499SJohn Dyson 			actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
7027e006499SJohn Dyson #endif
70326f9a767SRodney W. Grimes 			vm_page_activate(m);
7047e006499SJohn Dyson 			m->act_count += (actcount + ACT_ADVANCE + 1);
7050d94caffSDavid Greenman 			continue;
7060d94caffSDavid Greenman 		}
70767bf6868SJohn Dyson 
7087e006499SJohn Dyson 		/*
7097e006499SJohn Dyson 		 * If the upper level VM system doesn't know anything about the
7107e006499SJohn Dyson 		 * page being dirty, we have to check for it again.  As far as the
7117e006499SJohn Dyson 		 * VM code knows, any partially dirty pages are fully dirty.
7127e006499SJohn Dyson 		 */
713f6b04d2bSDavid Greenman 		if (m->dirty == 0) {
714bd7e5f99SJohn Dyson 			vm_page_test_dirty(m);
71530dcfc09SJohn Dyson 		} else if (m->dirty != 0) {
716bd7e5f99SJohn Dyson 			m->dirty = VM_PAGE_BITS_ALL;
71730dcfc09SJohn Dyson 		}
718ef743ce6SJohn Dyson 
7197e006499SJohn Dyson 		/*
7207e006499SJohn Dyson 		 * Invalid pages can be easily freed
7217e006499SJohn Dyson 		 */
7226d40c3d3SDavid Greenman 		if (m->valid == 0) {
723bd7e5f99SJohn Dyson 			vm_page_protect(m, VM_PROT_NONE);
7246d40c3d3SDavid Greenman 			vm_page_free(m);
72567bf6868SJohn Dyson 			cnt.v_dfree++;
726f6b04d2bSDavid Greenman 			++pages_freed;
7277e006499SJohn Dyson 
7287e006499SJohn Dyson 		/*
7297e006499SJohn Dyson 		 * Clean pages can be placed onto the cache queue.
7307e006499SJohn Dyson 		 */
731bd7e5f99SJohn Dyson 		} else if (m->dirty == 0) {
732bd7e5f99SJohn Dyson 			vm_page_cache(m);
733bd7e5f99SJohn Dyson 			++pages_freed;
7347e006499SJohn Dyson 
7357e006499SJohn Dyson 		/*
7367e006499SJohn Dyson 		 * Dirty pages need to be paged out.  Note that we clean
7377e006499SJohn Dyson 		 * only a limited number of pages per pagedaemon pass.
7387e006499SJohn Dyson 		 */
7390d94caffSDavid Greenman 		} else if (maxlaunder > 0) {
7400d94caffSDavid Greenman 			int written;
74112ac6a1dSJohn Dyson 			int swap_pageouts_ok;
742f6b04d2bSDavid Greenman 			struct vnode *vp = NULL;
7430d94caffSDavid Greenman 
7440d94caffSDavid Greenman 			object = m->object;
7457e006499SJohn Dyson 
74612ac6a1dSJohn Dyson 			if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) {
74712ac6a1dSJohn Dyson 				swap_pageouts_ok = 1;
74812ac6a1dSJohn Dyson 			} else {
74912ac6a1dSJohn Dyson 				swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
75012ac6a1dSJohn Dyson 				swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
75112ac6a1dSJohn Dyson 					(cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min);
75212ac6a1dSJohn Dyson 
75312ac6a1dSJohn Dyson 			}
75470111b90SJohn Dyson 
75570111b90SJohn Dyson 			/*
75670111b90SJohn Dyson 			 * We don't bother paging objects that are "dead".  Those
75770111b90SJohn Dyson 			 * objects are in a "rundown" state.
75870111b90SJohn Dyson 			 */
75970111b90SJohn Dyson 			if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
76012ac6a1dSJohn Dyson 				s = splvm();
76112ac6a1dSJohn Dyson 				TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
76212ac6a1dSJohn Dyson 				TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
76312ac6a1dSJohn Dyson 				splx(s);
76412ac6a1dSJohn Dyson 				continue;
76512ac6a1dSJohn Dyson 			}
76612ac6a1dSJohn Dyson 
76724a1cce3SDavid Greenman 			if (object->type == OBJT_VNODE) {
76824a1cce3SDavid Greenman 				vp = object->handle;
769996c772fSJohn Dyson 				if (VOP_ISLOCKED(vp) ||
7702f558c3eSBruce Evans 				    vget(vp, LK_EXCLUSIVE, curproc)) {
771b182ec9eSJohn Dyson 					if ((m->queue == PQ_INACTIVE) &&
772b182ec9eSJohn Dyson 						(m->hold_count == 0) &&
773b182ec9eSJohn Dyson 						(m->busy == 0) &&
774b182ec9eSJohn Dyson 						(m->flags & PG_BUSY) == 0) {
775f35329acSJohn Dyson 						s = splvm();
77685a376ebSJohn Dyson 						TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
77785a376ebSJohn Dyson 						TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
778f35329acSJohn Dyson 						splx(s);
77985a376ebSJohn Dyson 					}
780aef922f5SJohn Dyson 					if (object->flags & OBJ_MIGHTBEDIRTY)
781f6b04d2bSDavid Greenman 						++vnodes_skipped;
782b182ec9eSJohn Dyson 					continue;
78385a376ebSJohn Dyson 				}
784b182ec9eSJohn Dyson 
785f35329acSJohn Dyson 				/*
786f35329acSJohn Dyson 				 * The page might have been moved to another queue
787f35329acSJohn Dyson 				 * during potential blocking in vget() above.
788f35329acSJohn Dyson 				 */
789b182ec9eSJohn Dyson 				if (m->queue != PQ_INACTIVE) {
790b182ec9eSJohn Dyson 					if (object->flags & OBJ_MIGHTBEDIRTY)
791b182ec9eSJohn Dyson 						++vnodes_skipped;
792b182ec9eSJohn Dyson 					vput(vp);
793b182ec9eSJohn Dyson 					continue;
794b182ec9eSJohn Dyson 				}
795b182ec9eSJohn Dyson 
796f35329acSJohn Dyson 				/*
797f35329acSJohn Dyson 				 * The page may have been busied during the blocking in
798f35329acSJohn Dyson 				 * vput();  We don't move the page back onto the end of
799f35329acSJohn Dyson 				 * the queue so that statistics are more correct if we don't.
800f35329acSJohn Dyson 				 */
801b182ec9eSJohn Dyson 				if (m->busy || (m->flags & PG_BUSY)) {
802b182ec9eSJohn Dyson 					vput(vp);
803b182ec9eSJohn Dyson 					continue;
804b182ec9eSJohn Dyson 				}
805b182ec9eSJohn Dyson 
806f35329acSJohn Dyson 				/*
807f35329acSJohn Dyson 				 * If the page has become held, then skip it
808f35329acSJohn Dyson 				 */
809b182ec9eSJohn Dyson 				if (m->hold_count) {
810f35329acSJohn Dyson 					s = splvm();
811b182ec9eSJohn Dyson 					TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
812b182ec9eSJohn Dyson 					TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
813f35329acSJohn Dyson 					splx(s);
814b182ec9eSJohn Dyson 					if (object->flags & OBJ_MIGHTBEDIRTY)
815b182ec9eSJohn Dyson 						++vnodes_skipped;
816b182ec9eSJohn Dyson 					vput(vp);
817f6b04d2bSDavid Greenman 					continue;
818f6b04d2bSDavid Greenman 				}
819f6b04d2bSDavid Greenman 			}
820f6b04d2bSDavid Greenman 
8210d94caffSDavid Greenman 			/*
8220d94caffSDavid Greenman 			 * If a page is dirty, then it is either being washed
8230d94caffSDavid Greenman 			 * (but not yet cleaned) or it is still in the
8240d94caffSDavid Greenman 			 * laundry.  If it is still in the laundry, then we
8250d94caffSDavid Greenman 			 * start the cleaning operation.
8260d94caffSDavid Greenman 			 */
8270d94caffSDavid Greenman 			written = vm_pageout_clean(m, 0);
828f6b04d2bSDavid Greenman 			if (vp)
829f6b04d2bSDavid Greenman 				vput(vp);
830f6b04d2bSDavid Greenman 
8310d94caffSDavid Greenman 			maxlaunder -= written;
8320d94caffSDavid Greenman 		}
833df8bae1dSRodney W. Grimes 	}
83426f9a767SRodney W. Grimes 
835df8bae1dSRodney W. Grimes 	/*
8360d94caffSDavid Greenman 	 * Compute the page shortage.  If we are still very low on memory be
8370d94caffSDavid Greenman 	 * sure that we will move a minimal amount of pages from active to
8380d94caffSDavid Greenman 	 * inactive.
839df8bae1dSRodney W. Grimes 	 */
840b182ec9eSJohn Dyson 	page_shortage = (cnt.v_inactive_target + cnt.v_cache_min) -
8410d94caffSDavid Greenman 	    (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
84226f9a767SRodney W. Grimes 	if (page_shortage <= 0) {
84326f9a767SRodney W. Grimes 		if (pages_freed == 0) {
84417c4c408SDavid Greenman 			page_shortage = cnt.v_free_min - cnt.v_free_count;
845f6b04d2bSDavid Greenman 		} else {
846f6b04d2bSDavid Greenman 			page_shortage = 1;
84726f9a767SRodney W. Grimes 		}
848df8bae1dSRodney W. Grimes 	}
8497e006499SJohn Dyson 
8507e006499SJohn Dyson 	/*
8517e006499SJohn Dyson 	 * If the "inactive" loop finds that there is a shortage over and
8527e006499SJohn Dyson 	 * above the page statistics variables, then we need to accomodate
8537e006499SJohn Dyson 	 * that.  This avoids potential deadlocks due to pages being temporarily
8547e006499SJohn Dyson 	 * busy for I/O or other types of temporary wiring.
8557e006499SJohn Dyson 	 */
856b182ec9eSJohn Dyson 	if (addl_page_shortage) {
857b182ec9eSJohn Dyson 		if (page_shortage < 0)
858b182ec9eSJohn Dyson 			page_shortage = 0;
859b182ec9eSJohn Dyson 		page_shortage += addl_page_shortage;
860b182ec9eSJohn Dyson 	}
86126f9a767SRodney W. Grimes 
862b18bfc3dSJohn Dyson 	pcount = cnt.v_active_count;
863b18bfc3dSJohn Dyson 	m = TAILQ_FIRST(&vm_page_queue_active);
864b18bfc3dSJohn Dyson 	while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
865f35329acSJohn Dyson 
8667e006499SJohn Dyson 		/*
8677e006499SJohn Dyson 		 * This is a consistancy check, and should likely be a panic
8687e006499SJohn Dyson 		 * or warning.
8697e006499SJohn Dyson 		 */
870f35329acSJohn Dyson 		if (m->queue != PQ_ACTIVE) {
87138efa82bSJohn Dyson 			break;
872f35329acSJohn Dyson 		}
873f35329acSJohn Dyson 
874b18bfc3dSJohn Dyson 		next = TAILQ_NEXT(m, pageq);
875df8bae1dSRodney W. Grimes 		/*
87626f9a767SRodney W. Grimes 		 * Don't deactivate pages that are busy.
877df8bae1dSRodney W. Grimes 		 */
878a647a309SDavid Greenman 		if ((m->busy != 0) ||
8790d94caffSDavid Greenman 		    (m->flags & PG_BUSY) ||
880f6b04d2bSDavid Greenman 		    (m->hold_count != 0)) {
881f35329acSJohn Dyson 			s = splvm();
8826d40c3d3SDavid Greenman 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
8836d40c3d3SDavid Greenman 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
884f35329acSJohn Dyson 			splx(s);
88526f9a767SRodney W. Grimes 			m = next;
88626f9a767SRodney W. Grimes 			continue;
887df8bae1dSRodney W. Grimes 		}
888b18bfc3dSJohn Dyson 
889b18bfc3dSJohn Dyson 		/*
890b18bfc3dSJohn Dyson 		 * The count for pagedaemon pages is done after checking the
891b18bfc3dSJohn Dyson 		 * page for eligbility...
892b18bfc3dSJohn Dyson 		 */
893b18bfc3dSJohn Dyson 		cnt.v_pdpages++;
894ef743ce6SJohn Dyson 
8957e006499SJohn Dyson 		/*
8967e006499SJohn Dyson 		 * Check to see "how much" the page has been used.
8977e006499SJohn Dyson 		 */
8987e006499SJohn Dyson 		actcount = 0;
899ef743ce6SJohn Dyson 		if (m->object->ref_count != 0) {
900ef743ce6SJohn Dyson 			if (m->flags & PG_REFERENCED) {
9017e006499SJohn Dyson 				actcount += 1;
9020d94caffSDavid Greenman 			}
9037e006499SJohn Dyson 			actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
9047e006499SJohn Dyson 			if (actcount) {
9057e006499SJohn Dyson 				m->act_count += ACT_ADVANCE + actcount;
90638efa82bSJohn Dyson 				if (m->act_count > ACT_MAX)
90738efa82bSJohn Dyson 					m->act_count = ACT_MAX;
90838efa82bSJohn Dyson 			}
909b18bfc3dSJohn Dyson 		}
910ef743ce6SJohn Dyson 
9117e006499SJohn Dyson 		/*
9127e006499SJohn Dyson 		 * Since we have "tested" this bit, we need to clear it now.
9137e006499SJohn Dyson 		 */
914b18bfc3dSJohn Dyson 		m->flags &= ~PG_REFERENCED;
915ef743ce6SJohn Dyson 
9167e006499SJohn Dyson 		/*
9177e006499SJohn Dyson 		 * Only if an object is currently being used, do we use the
9187e006499SJohn Dyson 		 * page activation count stats.
9197e006499SJohn Dyson 		 */
9207e006499SJohn Dyson 		if (actcount && (m->object->ref_count != 0)) {
921f35329acSJohn Dyson 			s = splvm();
92226f9a767SRodney W. Grimes 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
92326f9a767SRodney W. Grimes 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
924f35329acSJohn Dyson 			splx(s);
92526f9a767SRodney W. Grimes 		} else {
92638efa82bSJohn Dyson 			m->act_count -= min(m->act_count, ACT_DECLINE);
92738efa82bSJohn Dyson 			if (vm_pageout_algorithm_lru ||
92838efa82bSJohn Dyson 				(m->object->ref_count == 0) || (m->act_count == 0)) {
9290d94caffSDavid Greenman 				--page_shortage;
930d4a272dbSJohn Dyson 				if (m->object->ref_count == 0) {
931ef743ce6SJohn Dyson 					vm_page_protect(m, VM_PROT_NONE);
932d4a272dbSJohn Dyson 					if (m->dirty == 0)
9330d94caffSDavid Greenman 						vm_page_cache(m);
934d4a272dbSJohn Dyson 					else
935d4a272dbSJohn Dyson 						vm_page_deactivate(m);
9360d94caffSDavid Greenman 				} else {
93726f9a767SRodney W. Grimes 					vm_page_deactivate(m);
938df8bae1dSRodney W. Grimes 				}
93938efa82bSJohn Dyson 			} else {
94038efa82bSJohn Dyson 				s = splvm();
94138efa82bSJohn Dyson 				TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
94238efa82bSJohn Dyson 				TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
94338efa82bSJohn Dyson 				splx(s);
94438efa82bSJohn Dyson 			}
945df8bae1dSRodney W. Grimes 		}
94626f9a767SRodney W. Grimes 		m = next;
94726f9a767SRodney W. Grimes 	}
948df8bae1dSRodney W. Grimes 
949f35329acSJohn Dyson 	s = splvm();
950df8bae1dSRodney W. Grimes 	/*
9510d94caffSDavid Greenman 	 * We try to maintain some *really* free pages, this allows interrupt
9520d94caffSDavid Greenman 	 * code to be guaranteed space.
953df8bae1dSRodney W. Grimes 	 */
954a1f6d91cSDavid Greenman 	while (cnt.v_free_count < cnt.v_free_reserved) {
9555070c7f8SJohn Dyson 		static int cache_rover = 0;
9565070c7f8SJohn Dyson 		m = vm_page_list_find(PQ_CACHE, cache_rover);
9570d94caffSDavid Greenman 		if (!m)
9580d94caffSDavid Greenman 			break;
9595070c7f8SJohn Dyson 		cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK;
9600d94caffSDavid Greenman 		vm_page_free(m);
9610bb3a0d2SDavid Greenman 		cnt.v_dfree++;
96226f9a767SRodney W. Grimes 	}
963f35329acSJohn Dyson 	splx(s);
9645663e6deSDavid Greenman 
965ceb0cf87SJohn Dyson #if !defined(NO_SWAPPING)
966ceb0cf87SJohn Dyson 	/*
967ceb0cf87SJohn Dyson 	 * Idle process swapout -- run once per second.
968ceb0cf87SJohn Dyson 	 */
969ceb0cf87SJohn Dyson 	if (vm_swap_idle_enabled) {
970ceb0cf87SJohn Dyson 		static long lsec;
971ceb0cf87SJohn Dyson 		if (time.tv_sec != lsec) {
972ceb0cf87SJohn Dyson 			vm_pageout_req_swapout |= VM_SWAP_IDLE;
973ceb0cf87SJohn Dyson 			vm_req_vmdaemon();
974ceb0cf87SJohn Dyson 			lsec = time.tv_sec;
975ceb0cf87SJohn Dyson 		}
976ceb0cf87SJohn Dyson 	}
977ceb0cf87SJohn Dyson #endif
978ceb0cf87SJohn Dyson 
9795663e6deSDavid Greenman 	/*
980f6b04d2bSDavid Greenman 	 * If we didn't get enough free pages, and we have skipped a vnode
9814c1f8ee9SDavid Greenman 	 * in a writeable object, wakeup the sync daemon.  And kick swapout
9824c1f8ee9SDavid Greenman 	 * if we did not get enough free pages.
983f6b04d2bSDavid Greenman 	 */
984bd7e5f99SJohn Dyson 	if ((cnt.v_cache_count + cnt.v_free_count) <
985bd7e5f99SJohn Dyson 		(cnt.v_free_target + cnt.v_cache_min) ) {
986f6b04d2bSDavid Greenman 		if (vnodes_skipped &&
987f6b04d2bSDavid Greenman 		    (cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_min) {
988f6b04d2bSDavid Greenman 			if (!vfs_update_wakeup) {
989f6b04d2bSDavid Greenman 				vfs_update_wakeup = 1;
99024a1cce3SDavid Greenman 				wakeup(&vfs_update_wakeup);
991f6b04d2bSDavid Greenman 			}
992f6b04d2bSDavid Greenman 		}
99338efa82bSJohn Dyson #if !defined(NO_SWAPPING)
994ceb0cf87SJohn Dyson 		if (vm_swap_enabled &&
99538efa82bSJohn Dyson 			(cnt.v_free_count + cnt.v_cache_count < cnt.v_free_target)) {
9964c1f8ee9SDavid Greenman 			vm_req_vmdaemon();
997ceb0cf87SJohn Dyson 			vm_pageout_req_swapout |= VM_SWAP_NORMAL;
9984c1f8ee9SDavid Greenman 		}
9995afce282SDavid Greenman #endif
10004c1f8ee9SDavid Greenman 	}
10014c1f8ee9SDavid Greenman 
1002f6b04d2bSDavid Greenman 
1003f6b04d2bSDavid Greenman 	/*
10040d94caffSDavid Greenman 	 * make sure that we have swap space -- if we are low on memory and
10050d94caffSDavid Greenman 	 * swap -- then kill the biggest process.
10065663e6deSDavid Greenman 	 */
10075663e6deSDavid Greenman 	if ((vm_swap_size == 0 || swap_pager_full) &&
10080d94caffSDavid Greenman 	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) {
10095663e6deSDavid Greenman 		bigproc = NULL;
10105663e6deSDavid Greenman 		bigsize = 0;
10111b67ec6dSJeffrey Hsu 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
10125663e6deSDavid Greenman 			/*
10135663e6deSDavid Greenman 			 * if this is a system process, skip it
10145663e6deSDavid Greenman 			 */
101579221631SDavid Greenman 			if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) ||
101679221631SDavid Greenman 			    ((p->p_pid < 48) && (vm_swap_size != 0))) {
10175663e6deSDavid Greenman 				continue;
10185663e6deSDavid Greenman 			}
10195663e6deSDavid Greenman 			/*
10205663e6deSDavid Greenman 			 * if the process is in a non-running type state,
10215663e6deSDavid Greenman 			 * don't touch it.
10225663e6deSDavid Greenman 			 */
10235663e6deSDavid Greenman 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
10245663e6deSDavid Greenman 				continue;
10255663e6deSDavid Greenman 			}
10265663e6deSDavid Greenman 			/*
10275663e6deSDavid Greenman 			 * get the process size
10285663e6deSDavid Greenman 			 */
10295663e6deSDavid Greenman 			size = p->p_vmspace->vm_pmap.pm_stats.resident_count;
10305663e6deSDavid Greenman 			/*
10315663e6deSDavid Greenman 			 * if the this process is bigger than the biggest one
10325663e6deSDavid Greenman 			 * remember it.
10335663e6deSDavid Greenman 			 */
10345663e6deSDavid Greenman 			if (size > bigsize) {
10355663e6deSDavid Greenman 				bigproc = p;
10365663e6deSDavid Greenman 				bigsize = size;
10375663e6deSDavid Greenman 			}
10385663e6deSDavid Greenman 		}
10395663e6deSDavid Greenman 		if (bigproc != NULL) {
1040729b1e51SDavid Greenman 			killproc(bigproc, "out of swap space");
10415663e6deSDavid Greenman 			bigproc->p_estcpu = 0;
10425663e6deSDavid Greenman 			bigproc->p_nice = PRIO_MIN;
10435663e6deSDavid Greenman 			resetpriority(bigproc);
104424a1cce3SDavid Greenman 			wakeup(&cnt.v_free_count);
10455663e6deSDavid Greenman 		}
10465663e6deSDavid Greenman 	}
104726f9a767SRodney W. Grimes 	return force_wakeup;
104826f9a767SRodney W. Grimes }
104926f9a767SRodney W. Grimes 
1050dc2efb27SJohn Dyson /*
1051dc2efb27SJohn Dyson  * This routine tries to maintain the pseudo LRU active queue,
1052dc2efb27SJohn Dyson  * so that during long periods of time where there is no paging,
1053dc2efb27SJohn Dyson  * that some statistic accumlation still occurs.  This code
1054dc2efb27SJohn Dyson  * helps the situation where paging just starts to occur.
1055dc2efb27SJohn Dyson  */
1056dc2efb27SJohn Dyson static void
1057dc2efb27SJohn Dyson vm_pageout_page_stats()
1058dc2efb27SJohn Dyson {
1059dc2efb27SJohn Dyson 	int s;
1060dc2efb27SJohn Dyson 	vm_page_t m,next;
1061dc2efb27SJohn Dyson 	int pcount,tpcount;		/* Number of pages to check */
1062dc2efb27SJohn Dyson 	static int fullintervalcount = 0;
1063dc2efb27SJohn Dyson 
1064dc2efb27SJohn Dyson 	pcount = cnt.v_active_count;
1065dc2efb27SJohn Dyson 	fullintervalcount += vm_pageout_stats_interval;
1066dc2efb27SJohn Dyson 	if (fullintervalcount < vm_pageout_full_stats_interval) {
1067dc2efb27SJohn Dyson 		tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count;
1068dc2efb27SJohn Dyson 		if (pcount > tpcount)
1069dc2efb27SJohn Dyson 			pcount = tpcount;
1070dc2efb27SJohn Dyson 	}
1071dc2efb27SJohn Dyson 
1072dc2efb27SJohn Dyson 	m = TAILQ_FIRST(&vm_page_queue_active);
1073dc2efb27SJohn Dyson 	while ((m != NULL) && (pcount-- > 0)) {
10747e006499SJohn Dyson 		int actcount;
1075dc2efb27SJohn Dyson 
1076dc2efb27SJohn Dyson 		if (m->queue != PQ_ACTIVE) {
1077dc2efb27SJohn Dyson 			break;
1078dc2efb27SJohn Dyson 		}
1079dc2efb27SJohn Dyson 
1080dc2efb27SJohn Dyson 		next = TAILQ_NEXT(m, pageq);
1081dc2efb27SJohn Dyson 		/*
1082dc2efb27SJohn Dyson 		 * Don't deactivate pages that are busy.
1083dc2efb27SJohn Dyson 		 */
1084dc2efb27SJohn Dyson 		if ((m->busy != 0) ||
1085dc2efb27SJohn Dyson 		    (m->flags & PG_BUSY) ||
1086dc2efb27SJohn Dyson 		    (m->hold_count != 0)) {
1087dc2efb27SJohn Dyson 			s = splvm();
1088dc2efb27SJohn Dyson 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1089dc2efb27SJohn Dyson 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1090dc2efb27SJohn Dyson 			splx(s);
1091dc2efb27SJohn Dyson 			m = next;
1092dc2efb27SJohn Dyson 			continue;
1093dc2efb27SJohn Dyson 		}
1094dc2efb27SJohn Dyson 
10957e006499SJohn Dyson 		actcount = 0;
1096dc2efb27SJohn Dyson 		if (m->flags & PG_REFERENCED) {
1097dc2efb27SJohn Dyson 			m->flags &= ~PG_REFERENCED;
10987e006499SJohn Dyson 			actcount += 1;
1099dc2efb27SJohn Dyson 		}
1100dc2efb27SJohn Dyson 
11017e006499SJohn Dyson 		actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
11027e006499SJohn Dyson 		if (actcount) {
11037e006499SJohn Dyson 			m->act_count += ACT_ADVANCE + actcount;
1104dc2efb27SJohn Dyson 			if (m->act_count > ACT_MAX)
1105dc2efb27SJohn Dyson 				m->act_count = ACT_MAX;
1106dc2efb27SJohn Dyson 			s = splvm();
1107dc2efb27SJohn Dyson 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1108dc2efb27SJohn Dyson 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1109dc2efb27SJohn Dyson 			splx(s);
1110dc2efb27SJohn Dyson 		} else {
1111dc2efb27SJohn Dyson 			if (m->act_count == 0) {
11127e006499SJohn Dyson 				/*
11137e006499SJohn Dyson 				 * We turn off page access, so that we have more accurate
11147e006499SJohn Dyson 				 * RSS stats.  We don't do this in the normal page deactivation
11157e006499SJohn Dyson 				 * when the system is loaded VM wise, because the cost of
11167e006499SJohn Dyson 				 * the large number of page protect operations would be higher
11177e006499SJohn Dyson 				 * than the value of doing the operation.
11187e006499SJohn Dyson 				 */
1119dc2efb27SJohn Dyson 				vm_page_protect(m, VM_PROT_NONE);
1120dc2efb27SJohn Dyson 				vm_page_deactivate(m);
1121dc2efb27SJohn Dyson 			} else {
1122dc2efb27SJohn Dyson 				m->act_count -= min(m->act_count, ACT_DECLINE);
1123dc2efb27SJohn Dyson 				s = splvm();
1124dc2efb27SJohn Dyson 				TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1125dc2efb27SJohn Dyson 				TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1126dc2efb27SJohn Dyson 				splx(s);
1127dc2efb27SJohn Dyson 			}
1128dc2efb27SJohn Dyson 		}
1129dc2efb27SJohn Dyson 
1130dc2efb27SJohn Dyson 		m = next;
1131dc2efb27SJohn Dyson 	}
1132dc2efb27SJohn Dyson }
1133dc2efb27SJohn Dyson 
1134dc2efb27SJohn Dyson 
1135b182ec9eSJohn Dyson static int
1136b182ec9eSJohn Dyson vm_pageout_free_page_calc(count)
1137b182ec9eSJohn Dyson vm_size_t count;
1138b182ec9eSJohn Dyson {
1139b182ec9eSJohn Dyson 	if (count < cnt.v_page_count)
1140b182ec9eSJohn Dyson 		 return 0;
1141b182ec9eSJohn Dyson 	/*
1142b182ec9eSJohn Dyson 	 * free_reserved needs to include enough for the largest swap pager
1143b182ec9eSJohn Dyson 	 * structures plus enough for any pv_entry structs when paging.
1144b182ec9eSJohn Dyson 	 */
1145b182ec9eSJohn Dyson 	if (cnt.v_page_count > 1024)
1146b182ec9eSJohn Dyson 		cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
1147b182ec9eSJohn Dyson 	else
1148b182ec9eSJohn Dyson 		cnt.v_free_min = 4;
1149f35329acSJohn Dyson 	cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
1150f35329acSJohn Dyson 		cnt.v_interrupt_free_min;
1151f35329acSJohn Dyson 	cnt.v_free_reserved = vm_pageout_page_count +
1152a2f4a846SJohn Dyson 		cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE;
1153a2f4a846SJohn Dyson 	cnt.v_free_min += cnt.v_free_reserved;
1154b182ec9eSJohn Dyson 	return 1;
1155b182ec9eSJohn Dyson }
1156b182ec9eSJohn Dyson 
1157b182ec9eSJohn Dyson 
1158df8bae1dSRodney W. Grimes /*
1159df8bae1dSRodney W. Grimes  *	vm_pageout is the high level pageout daemon.
1160df8bae1dSRodney W. Grimes  */
11612b14f991SJulian Elischer static void
116226f9a767SRodney W. Grimes vm_pageout()
1163df8bae1dSRodney W. Grimes {
1164df8bae1dSRodney W. Grimes 	/*
1165df8bae1dSRodney W. Grimes 	 * Initialize some paging parameters.
1166df8bae1dSRodney W. Grimes 	 */
1167df8bae1dSRodney W. Grimes 
1168f6b04d2bSDavid Greenman 	cnt.v_interrupt_free_min = 2;
1169f35329acSJohn Dyson 	if (cnt.v_page_count < 2000)
1170f35329acSJohn Dyson 		vm_pageout_page_count = 8;
1171f6b04d2bSDavid Greenman 
1172b182ec9eSJohn Dyson 	vm_pageout_free_page_calc(cnt.v_page_count);
1173ed74321bSDavid Greenman 	/*
11740d94caffSDavid Greenman 	 * free_reserved needs to include enough for the largest swap pager
11750d94caffSDavid Greenman 	 * structures plus enough for any pv_entry structs when paging.
1176ed74321bSDavid Greenman 	 */
11770d94caffSDavid Greenman 	cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved;
11786f2b142eSDavid Greenman 
11796ac5bfdbSJohn Dyson 	if (cnt.v_free_count > 1024) {
11800d94caffSDavid Greenman 		cnt.v_cache_max = (cnt.v_free_count - 1024) / 2;
11816f2b142eSDavid Greenman 		cnt.v_cache_min = (cnt.v_free_count - 1024) / 8;
11826f2b142eSDavid Greenman 		cnt.v_inactive_target = 2*cnt.v_cache_min + 192;
11830d94caffSDavid Greenman 	} else {
11840d94caffSDavid Greenman 		cnt.v_cache_min = 0;
11850d94caffSDavid Greenman 		cnt.v_cache_max = 0;
11866f2b142eSDavid Greenman 		cnt.v_inactive_target = cnt.v_free_count / 4;
11870d94caffSDavid Greenman 	}
1188df8bae1dSRodney W. Grimes 
1189df8bae1dSRodney W. Grimes 	/* XXX does not really belong here */
1190df8bae1dSRodney W. Grimes 	if (vm_page_max_wired == 0)
1191df8bae1dSRodney W. Grimes 		vm_page_max_wired = cnt.v_free_count / 3;
1192df8bae1dSRodney W. Grimes 
1193dc2efb27SJohn Dyson 	if (vm_pageout_stats_max == 0)
1194dc2efb27SJohn Dyson 		vm_pageout_stats_max = cnt.v_free_target;
1195dc2efb27SJohn Dyson 
1196dc2efb27SJohn Dyson 	/*
1197dc2efb27SJohn Dyson 	 * Set interval in seconds for stats scan.
1198dc2efb27SJohn Dyson 	 */
1199dc2efb27SJohn Dyson 	if (vm_pageout_stats_interval == 0)
1200dc2efb27SJohn Dyson 		vm_pageout_stats_interval = 4;
1201dc2efb27SJohn Dyson 	if (vm_pageout_full_stats_interval == 0)
1202dc2efb27SJohn Dyson 		vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1203dc2efb27SJohn Dyson 
1204dc2efb27SJohn Dyson 
1205dc2efb27SJohn Dyson 	/*
1206dc2efb27SJohn Dyson 	 * Set maximum free per pass
1207dc2efb27SJohn Dyson 	 */
1208dc2efb27SJohn Dyson 	if (vm_pageout_stats_free_max == 0)
1209dc2efb27SJohn Dyson 		vm_pageout_stats_free_max = 25;
1210dc2efb27SJohn Dyson 
1211ceb0cf87SJohn Dyson 	max_page_launder = (cnt.v_page_count > 1800 ? 32 : 16);
121226f9a767SRodney W. Grimes 
121324a1cce3SDavid Greenman 	swap_pager_swap_init();
1214df8bae1dSRodney W. Grimes 	/*
12150d94caffSDavid Greenman 	 * The pageout daemon is never done, so loop forever.
1216df8bae1dSRodney W. Grimes 	 */
1217df8bae1dSRodney W. Grimes 	while (TRUE) {
121885a376ebSJohn Dyson 		int inactive_target;
1219dc2efb27SJohn Dyson 		int error;
1220b18bfc3dSJohn Dyson 		int s = splvm();
1221f919ebdeSDavid Greenman 		if (!vm_pages_needed ||
1222545901f7SJohn Dyson 			((cnt.v_free_count + cnt.v_cache_count) > cnt.v_free_min)) {
1223f919ebdeSDavid Greenman 			vm_pages_needed = 0;
1224dc2efb27SJohn Dyson 			error = tsleep(&vm_pages_needed,
1225dc2efb27SJohn Dyson 				PVM, "psleep", vm_pageout_stats_interval * hz);
1226dc2efb27SJohn Dyson 			if (error && !vm_pages_needed) {
1227dc2efb27SJohn Dyson 				splx(s);
1228dc2efb27SJohn Dyson 				vm_pageout_page_stats();
1229dc2efb27SJohn Dyson 				continue;
1230dc2efb27SJohn Dyson 			}
1231dc2efb27SJohn Dyson 		} else if (vm_pages_needed) {
123238efa82bSJohn Dyson 			tsleep(&vm_pages_needed, PVM, "psleep", hz/10);
1233f919ebdeSDavid Greenman 		}
1234b182ec9eSJohn Dyson 		inactive_target =
1235b182ec9eSJohn Dyson 			(cnt.v_page_count - cnt.v_wire_count) / 4;
1236b182ec9eSJohn Dyson 		if (inactive_target < 2*cnt.v_free_min)
1237b182ec9eSJohn Dyson 			inactive_target = 2*cnt.v_free_min;
1238b182ec9eSJohn Dyson 		cnt.v_inactive_target = inactive_target;
1239b18bfc3dSJohn Dyson 		if (vm_pages_needed)
1240b18bfc3dSJohn Dyson 			cnt.v_pdwakeups++;
1241f919ebdeSDavid Greenman 		vm_pages_needed = 0;
1242f919ebdeSDavid Greenman 		splx(s);
1243df8bae1dSRodney W. Grimes 		vm_pager_sync();
12440d94caffSDavid Greenman 		vm_pageout_scan();
124526f9a767SRodney W. Grimes 		vm_pager_sync();
124624a1cce3SDavid Greenman 		wakeup(&cnt.v_free_count);
1247df8bae1dSRodney W. Grimes 	}
1248df8bae1dSRodney W. Grimes }
124926f9a767SRodney W. Grimes 
1250e0c5a895SJohn Dyson void
1251e0c5a895SJohn Dyson pagedaemon_wakeup()
1252e0c5a895SJohn Dyson {
1253e0c5a895SJohn Dyson 	if (!vm_pages_needed && curproc != pageproc) {
1254e0c5a895SJohn Dyson 		vm_pages_needed++;
1255e0c5a895SJohn Dyson 		wakeup(&vm_pages_needed);
1256e0c5a895SJohn Dyson 	}
1257e0c5a895SJohn Dyson }
1258e0c5a895SJohn Dyson 
125938efa82bSJohn Dyson #if !defined(NO_SWAPPING)
12605afce282SDavid Greenman static void
12615afce282SDavid Greenman vm_req_vmdaemon()
12625afce282SDavid Greenman {
12635afce282SDavid Greenman 	static int lastrun = 0;
12645afce282SDavid Greenman 
1265b18bfc3dSJohn Dyson 	if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
12665afce282SDavid Greenman 		wakeup(&vm_daemon_needed);
12675afce282SDavid Greenman 		lastrun = ticks;
12685afce282SDavid Greenman 	}
12695afce282SDavid Greenman }
12705afce282SDavid Greenman 
12712b14f991SJulian Elischer static void
12724f9fb771SBruce Evans vm_daemon()
12730d94caffSDavid Greenman {
12742fe6e4d7SDavid Greenman 	vm_object_t object;
12752fe6e4d7SDavid Greenman 	struct proc *p;
12760d94caffSDavid Greenman 
12772fe6e4d7SDavid Greenman 	while (TRUE) {
127824a1cce3SDavid Greenman 		tsleep(&vm_daemon_needed, PUSER, "psleep", 0);
12794c1f8ee9SDavid Greenman 		if (vm_pageout_req_swapout) {
1280ceb0cf87SJohn Dyson 			swapout_procs(vm_pageout_req_swapout);
12814c1f8ee9SDavid Greenman 			vm_pageout_req_swapout = 0;
12824c1f8ee9SDavid Greenman 		}
12832fe6e4d7SDavid Greenman 		/*
12840d94caffSDavid Greenman 		 * scan the processes for exceeding their rlimits or if
12850d94caffSDavid Greenman 		 * process is swapped out -- deactivate pages
12862fe6e4d7SDavid Greenman 		 */
12872fe6e4d7SDavid Greenman 
12881b67ec6dSJeffrey Hsu 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
12892fe6e4d7SDavid Greenman 			quad_t limit;
12902fe6e4d7SDavid Greenman 			vm_offset_t size;
12912fe6e4d7SDavid Greenman 
12922fe6e4d7SDavid Greenman 			/*
12932fe6e4d7SDavid Greenman 			 * if this is a system process or if we have already
12942fe6e4d7SDavid Greenman 			 * looked at this process, skip it.
12952fe6e4d7SDavid Greenman 			 */
12962fe6e4d7SDavid Greenman 			if (p->p_flag & (P_SYSTEM | P_WEXIT)) {
12972fe6e4d7SDavid Greenman 				continue;
12982fe6e4d7SDavid Greenman 			}
12992fe6e4d7SDavid Greenman 			/*
13002fe6e4d7SDavid Greenman 			 * if the process is in a non-running type state,
13012fe6e4d7SDavid Greenman 			 * don't touch it.
13022fe6e4d7SDavid Greenman 			 */
13032fe6e4d7SDavid Greenman 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
13042fe6e4d7SDavid Greenman 				continue;
13052fe6e4d7SDavid Greenman 			}
13062fe6e4d7SDavid Greenman 			/*
13072fe6e4d7SDavid Greenman 			 * get a limit
13082fe6e4d7SDavid Greenman 			 */
13092fe6e4d7SDavid Greenman 			limit = qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
13102fe6e4d7SDavid Greenman 			    p->p_rlimit[RLIMIT_RSS].rlim_max);
13112fe6e4d7SDavid Greenman 
13122fe6e4d7SDavid Greenman 			/*
13130d94caffSDavid Greenman 			 * let processes that are swapped out really be
13140d94caffSDavid Greenman 			 * swapped out set the limit to nothing (will force a
13150d94caffSDavid Greenman 			 * swap-out.)
13162fe6e4d7SDavid Greenman 			 */
13172fe6e4d7SDavid Greenman 			if ((p->p_flag & P_INMEM) == 0)
13180d94caffSDavid Greenman 				limit = 0;	/* XXX */
13192fe6e4d7SDavid Greenman 
1320a91c5a7eSJohn Dyson 			size = p->p_vmspace->vm_pmap.pm_stats.resident_count * PAGE_SIZE;
13212fe6e4d7SDavid Greenman 			if (limit >= 0 && size >= limit) {
13222fe6e4d7SDavid Greenman 				vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map,
132338efa82bSJohn Dyson 				    (vm_pindex_t)(limit >> PAGE_SHIFT) );
13242fe6e4d7SDavid Greenman 			}
13252fe6e4d7SDavid Greenman 		}
13262fe6e4d7SDavid Greenman 
13270d94caffSDavid Greenman 		/*
13280d94caffSDavid Greenman 		 * we remove cached objects that have no RSS...
13290d94caffSDavid Greenman 		 */
13300d94caffSDavid Greenman restart:
1331b18bfc3dSJohn Dyson 		object = TAILQ_FIRST(&vm_object_cached_list);
13322fe6e4d7SDavid Greenman 		while (object) {
13332fe6e4d7SDavid Greenman 			/*
13342fe6e4d7SDavid Greenman 			 * if there are no resident pages -- get rid of the object
13352fe6e4d7SDavid Greenman 			 */
13362fe6e4d7SDavid Greenman 			if (object->resident_page_count == 0) {
133724a1cce3SDavid Greenman 				vm_object_reference(object);
13382fe6e4d7SDavid Greenman 				pager_cache(object, FALSE);
13392fe6e4d7SDavid Greenman 				goto restart;
13402fe6e4d7SDavid Greenman 			}
1341b18bfc3dSJohn Dyson 			object = TAILQ_NEXT(object, cached_list);
13422fe6e4d7SDavid Greenman 		}
134324a1cce3SDavid Greenman 	}
13442fe6e4d7SDavid Greenman }
134538efa82bSJohn Dyson #endif
1346