xref: /freebsd/sys/vm/vm_pageout.c (revision 7e006499864b37d13f9d69b8d87466e5092c84ed)
1df8bae1dSRodney W. Grimes /*
226f9a767SRodney W. Grimes  * Copyright (c) 1991 Regents of the University of California.
326f9a767SRodney W. Grimes  * All rights reserved.
426f9a767SRodney W. Grimes  * Copyright (c) 1994 John S. Dyson
526f9a767SRodney W. Grimes  * All rights reserved.
626f9a767SRodney W. Grimes  * Copyright (c) 1994 David Greenman
726f9a767SRodney W. Grimes  * All rights reserved.
8df8bae1dSRodney W. Grimes  *
9df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
10df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
11df8bae1dSRodney W. Grimes  *
12df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
13df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
14df8bae1dSRodney W. Grimes  * are met:
15df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
17df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
18df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
19df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
20df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
21df8bae1dSRodney W. Grimes  *    must display the following acknowledgement:
22df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
23df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
24df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
25df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
26df8bae1dSRodney W. Grimes  *    without specific prior written permission.
27df8bae1dSRodney W. Grimes  *
28df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
39df8bae1dSRodney W. Grimes  *
403c4dd356SDavid Greenman  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
41df8bae1dSRodney W. Grimes  *
42df8bae1dSRodney W. Grimes  *
43df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
44df8bae1dSRodney W. Grimes  * All rights reserved.
45df8bae1dSRodney W. Grimes  *
46df8bae1dSRodney W. Grimes  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
47df8bae1dSRodney W. Grimes  *
48df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
49df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
50df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
51df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
52df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
53df8bae1dSRodney W. Grimes  *
54df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57df8bae1dSRodney W. Grimes  *
58df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
59df8bae1dSRodney W. Grimes  *
60df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61df8bae1dSRodney W. Grimes  *  School of Computer Science
62df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
63df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
64df8bae1dSRodney W. Grimes  *
65df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
66df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
6726f9a767SRodney W. Grimes  *
687e006499SJohn Dyson  * $Id: vm_pageout.c,v 1.98 1997/09/01 03:17:26 bde Exp $
69df8bae1dSRodney W. Grimes  */
70df8bae1dSRodney W. Grimes 
71df8bae1dSRodney W. Grimes /*
72df8bae1dSRodney W. Grimes  *	The proverbial page-out daemon.
73df8bae1dSRodney W. Grimes  */
74df8bae1dSRodney W. Grimes 
75df8bae1dSRodney W. Grimes #include <sys/param.h>
7626f9a767SRodney W. Grimes #include <sys/systm.h>
77b5e8ce9fSBruce Evans #include <sys/kernel.h>
7826f9a767SRodney W. Grimes #include <sys/proc.h>
7926f9a767SRodney W. Grimes #include <sys/resourcevar.h>
80d2fc5315SPoul-Henning Kamp #include <sys/signalvar.h>
81f6b04d2bSDavid Greenman #include <sys/vnode.h>
82efeaf95aSDavid Greenman #include <sys/vmmeter.h>
8338efa82bSJohn Dyson #include <sys/sysctl.h>
84df8bae1dSRodney W. Grimes 
85df8bae1dSRodney W. Grimes #include <vm/vm.h>
86efeaf95aSDavid Greenman #include <vm/vm_param.h>
87efeaf95aSDavid Greenman #include <vm/vm_prot.h>
88996c772fSJohn Dyson #include <sys/lock.h>
89efeaf95aSDavid Greenman #include <vm/vm_object.h>
90df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
91efeaf95aSDavid Greenman #include <vm/vm_map.h>
92df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h>
9324a1cce3SDavid Greenman #include <vm/vm_pager.h>
9405f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h>
95efeaf95aSDavid Greenman #include <vm/vm_extern.h>
96df8bae1dSRodney W. Grimes 
972b14f991SJulian Elischer /*
982b14f991SJulian Elischer  * System initialization
992b14f991SJulian Elischer  */
1002b14f991SJulian Elischer 
1012b14f991SJulian Elischer /* the kernel process "vm_pageout"*/
1022b14f991SJulian Elischer static void vm_pageout __P((void));
1033af76890SPoul-Henning Kamp static int vm_pageout_clean __P((vm_page_t, int));
1043af76890SPoul-Henning Kamp static int vm_pageout_scan __P((void));
105f35329acSJohn Dyson static int vm_pageout_free_page_calc __P((vm_size_t count));
1062b14f991SJulian Elischer struct proc *pageproc;
1072b14f991SJulian Elischer 
1082b14f991SJulian Elischer static struct kproc_desc page_kp = {
1092b14f991SJulian Elischer 	"pagedaemon",
1102b14f991SJulian Elischer 	vm_pageout,
1112b14f991SJulian Elischer 	&pageproc
1122b14f991SJulian Elischer };
1134590fd3aSDavid Greenman SYSINIT_KT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp)
1142b14f991SJulian Elischer 
11538efa82bSJohn Dyson #if !defined(NO_SWAPPING)
1162b14f991SJulian Elischer /* the kernel process "vm_daemon"*/
1172b14f991SJulian Elischer static void vm_daemon __P((void));
118f708ef1bSPoul-Henning Kamp static struct	proc *vmproc;
1192b14f991SJulian Elischer 
1202b14f991SJulian Elischer static struct kproc_desc vm_kp = {
1212b14f991SJulian Elischer 	"vmdaemon",
1222b14f991SJulian Elischer 	vm_daemon,
1232b14f991SJulian Elischer 	&vmproc
1242b14f991SJulian Elischer };
1254590fd3aSDavid Greenman SYSINIT_KT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
12638efa82bSJohn Dyson #endif
1272b14f991SJulian Elischer 
1282b14f991SJulian Elischer 
129df8bae1dSRodney W. Grimes int vm_pages_needed;		/* Event on which pageout daemon sleeps */
13026f9a767SRodney W. Grimes 
131c3cb3e12SDavid Greenman int vm_pageout_pages_needed;	/* flag saying that the pageout daemon needs pages */
13226f9a767SRodney W. Grimes 
13326f9a767SRodney W. Grimes extern int npendingio;
13438efa82bSJohn Dyson #if !defined(NO_SWAPPING)
135f708ef1bSPoul-Henning Kamp static int vm_pageout_req_swapout;	/* XXX */
136f708ef1bSPoul-Henning Kamp static int vm_daemon_needed;
13738efa82bSJohn Dyson #endif
13826f9a767SRodney W. Grimes extern int nswiodone;
1395663e6deSDavid Greenman extern int vm_swap_size;
140f6b04d2bSDavid Greenman extern int vfs_update_wakeup;
141dc2efb27SJohn Dyson int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
142dc2efb27SJohn Dyson int vm_pageout_full_stats_interval = 0;
143dc2efb27SJohn Dyson int vm_pageout_stats_free_max=0, vm_pageout_algorithm_lru=0;
14438efa82bSJohn Dyson #if defined(NO_SWAPPING)
14538efa82bSJohn Dyson int vm_swapping_enabled=0;
14638efa82bSJohn Dyson #else
14738efa82bSJohn Dyson int vm_swapping_enabled=1;
14838efa82bSJohn Dyson #endif
14938efa82bSJohn Dyson 
15038efa82bSJohn Dyson SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
15138efa82bSJohn Dyson 	CTLFLAG_RW, &vm_pageout_algorithm_lru, 0, "");
15238efa82bSJohn Dyson 
153dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
154dc2efb27SJohn Dyson 	CTLFLAG_RW, &vm_pageout_stats_max, 0, "");
155dc2efb27SJohn Dyson 
156dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
157dc2efb27SJohn Dyson 	CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "");
158dc2efb27SJohn Dyson 
159dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
160dc2efb27SJohn Dyson 	CTLFLAG_RW, &vm_pageout_stats_interval, 0, "");
161dc2efb27SJohn Dyson 
162dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
163dc2efb27SJohn Dyson 	CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "");
164dc2efb27SJohn Dyson 
16538efa82bSJohn Dyson #if defined(NO_SWAPPING)
16638efa82bSJohn Dyson SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swapping_enabled,
16738efa82bSJohn Dyson 	CTLFLAG_RD, &vm_swapping_enabled, 0, "");
16838efa82bSJohn Dyson #else
16938efa82bSJohn Dyson SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swapping_enabled,
17038efa82bSJohn Dyson 	CTLFLAG_RW, &vm_swapping_enabled, 0, "");
17138efa82bSJohn Dyson #endif
17226f9a767SRodney W. Grimes 
1730d94caffSDavid Greenman #define MAXLAUNDER (cnt.v_page_count > 1800 ? 32 : 16)
17426f9a767SRodney W. Grimes 
175a316d390SJohn Dyson #define VM_PAGEOUT_PAGE_COUNT 16
176bbc0ec52SDavid Greenman int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
177df8bae1dSRodney W. Grimes 
178c3cb3e12SDavid Greenman int vm_page_max_wired;		/* XXX max # of wired pages system-wide */
179df8bae1dSRodney W. Grimes 
18038efa82bSJohn Dyson #if !defined(NO_SWAPPING)
18138efa82bSJohn Dyson typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int));
18238efa82bSJohn Dyson static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t));
183cd41fc12SDavid Greenman static freeer_fcn_t vm_pageout_object_deactivate_pages;
184cd41fc12SDavid Greenman static void vm_req_vmdaemon __P((void));
18538efa82bSJohn Dyson #endif
186dc2efb27SJohn Dyson static void vm_pageout_page_stats(void);
187cd41fc12SDavid Greenman 
18826f9a767SRodney W. Grimes /*
18926f9a767SRodney W. Grimes  * vm_pageout_clean:
19024a1cce3SDavid Greenman  *
1910d94caffSDavid Greenman  * Clean the page and remove it from the laundry.
19226f9a767SRodney W. Grimes  *
1930d94caffSDavid Greenman  * We set the busy bit to cause potential page faults on this page to
19426f9a767SRodney W. Grimes  * block.
19526f9a767SRodney W. Grimes  *
1960d94caffSDavid Greenman  * And we set pageout-in-progress to keep the object from disappearing
1970d94caffSDavid Greenman  * during pageout.  This guarantees that the page won't move from the
1980d94caffSDavid Greenman  * inactive queue.  (However, any other page on the inactive queue may
1990d94caffSDavid Greenman  * move!)
20026f9a767SRodney W. Grimes  */
2013af76890SPoul-Henning Kamp static int
20224a1cce3SDavid Greenman vm_pageout_clean(m, sync)
20324a1cce3SDavid Greenman 	vm_page_t m;
20424a1cce3SDavid Greenman 	int sync;
20524a1cce3SDavid Greenman {
20626f9a767SRodney W. Grimes 	register vm_object_t object;
207f35329acSJohn Dyson 	vm_page_t mc[2*vm_pageout_page_count];
20824a1cce3SDavid Greenman 	int pageout_count;
20924a1cce3SDavid Greenman 	int i, forward_okay, backward_okay, page_base;
210a316d390SJohn Dyson 	vm_pindex_t pindex = m->pindex;
21126f9a767SRodney W. Grimes 
21226f9a767SRodney W. Grimes 	object = m->object;
21324a1cce3SDavid Greenman 
21426f9a767SRodney W. Grimes 	/*
21524a1cce3SDavid Greenman 	 * If not OBJT_SWAP, additional memory may be needed to do the pageout.
21624a1cce3SDavid Greenman 	 * Try to avoid the deadlock.
21726f9a767SRodney W. Grimes 	 */
21824a1cce3SDavid Greenman 	if ((sync != VM_PAGEOUT_FORCE) &&
219f35329acSJohn Dyson 	    (object->type == OBJT_DEFAULT) &&
22024a1cce3SDavid Greenman 	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min))
22126f9a767SRodney W. Grimes 		return 0;
22226f9a767SRodney W. Grimes 
22324a1cce3SDavid Greenman 	/*
22424a1cce3SDavid Greenman 	 * Don't mess with the page if it's busy.
22524a1cce3SDavid Greenman 	 */
226f6b04d2bSDavid Greenman 	if ((!sync && m->hold_count != 0) ||
2270d94caffSDavid Greenman 	    ((m->busy != 0) || (m->flags & PG_BUSY)))
2280d94caffSDavid Greenman 		return 0;
2290d94caffSDavid Greenman 
23024a1cce3SDavid Greenman 	/*
23124a1cce3SDavid Greenman 	 * Try collapsing before it's too late.
23224a1cce3SDavid Greenman 	 */
23324a1cce3SDavid Greenman 	if (!sync && object->backing_object) {
23426f9a767SRodney W. Grimes 		vm_object_collapse(object);
23526f9a767SRodney W. Grimes 	}
2363c018e72SJohn Dyson 
237f35329acSJohn Dyson 	mc[vm_pageout_page_count] = m;
23826f9a767SRodney W. Grimes 	pageout_count = 1;
239f35329acSJohn Dyson 	page_base = vm_pageout_page_count;
24024a1cce3SDavid Greenman 	forward_okay = TRUE;
241a316d390SJohn Dyson 	if (pindex != 0)
24224a1cce3SDavid Greenman 		backward_okay = TRUE;
24326f9a767SRodney W. Grimes 	else
24424a1cce3SDavid Greenman 		backward_okay = FALSE;
24524a1cce3SDavid Greenman 	/*
24624a1cce3SDavid Greenman 	 * Scan object for clusterable pages.
24724a1cce3SDavid Greenman 	 *
24824a1cce3SDavid Greenman 	 * We can cluster ONLY if: ->> the page is NOT
24924a1cce3SDavid Greenman 	 * clean, wired, busy, held, or mapped into a
25024a1cce3SDavid Greenman 	 * buffer, and one of the following:
25124a1cce3SDavid Greenman 	 * 1) The page is inactive, or a seldom used
25224a1cce3SDavid Greenman 	 *    active page.
25324a1cce3SDavid Greenman 	 * -or-
25424a1cce3SDavid Greenman 	 * 2) we force the issue.
25524a1cce3SDavid Greenman 	 */
25624a1cce3SDavid Greenman 	for (i = 1; (i < vm_pageout_page_count) && (forward_okay || backward_okay); i++) {
25724a1cce3SDavid Greenman 		vm_page_t p;
258f6b04d2bSDavid Greenman 
25924a1cce3SDavid Greenman 		/*
26024a1cce3SDavid Greenman 		 * See if forward page is clusterable.
26124a1cce3SDavid Greenman 		 */
26224a1cce3SDavid Greenman 		if (forward_okay) {
26324a1cce3SDavid Greenman 			/*
26424a1cce3SDavid Greenman 			 * Stop forward scan at end of object.
26524a1cce3SDavid Greenman 			 */
266a316d390SJohn Dyson 			if ((pindex + i) > object->size) {
26724a1cce3SDavid Greenman 				forward_okay = FALSE;
26824a1cce3SDavid Greenman 				goto do_backward;
269f6b04d2bSDavid Greenman 			}
270a316d390SJohn Dyson 			p = vm_page_lookup(object, pindex + i);
27124a1cce3SDavid Greenman 			if (p) {
2725070c7f8SJohn Dyson 				if (((p->queue - p->pc) == PQ_CACHE) ||
2735070c7f8SJohn Dyson 					(p->flags & PG_BUSY) || p->busy) {
27424a1cce3SDavid Greenman 					forward_okay = FALSE;
27524a1cce3SDavid Greenman 					goto do_backward;
276f6b04d2bSDavid Greenman 				}
27724a1cce3SDavid Greenman 				vm_page_test_dirty(p);
27824a1cce3SDavid Greenman 				if ((p->dirty & p->valid) != 0 &&
279bd7e5f99SJohn Dyson 				    ((p->queue == PQ_INACTIVE) ||
28024a1cce3SDavid Greenman 				     (sync == VM_PAGEOUT_FORCE)) &&
28124a1cce3SDavid Greenman 				    (p->wire_count == 0) &&
28224a1cce3SDavid Greenman 				    (p->hold_count == 0)) {
283f35329acSJohn Dyson 					mc[vm_pageout_page_count + i] = p;
28424a1cce3SDavid Greenman 					pageout_count++;
28524a1cce3SDavid Greenman 					if (pageout_count == vm_pageout_page_count)
28624a1cce3SDavid Greenman 						break;
28724a1cce3SDavid Greenman 				} else {
28824a1cce3SDavid Greenman 					forward_okay = FALSE;
289f6b04d2bSDavid Greenman 				}
29024a1cce3SDavid Greenman 			} else {
29124a1cce3SDavid Greenman 				forward_okay = FALSE;
29224a1cce3SDavid Greenman 			}
29324a1cce3SDavid Greenman 		}
29424a1cce3SDavid Greenman do_backward:
29524a1cce3SDavid Greenman 		/*
29624a1cce3SDavid Greenman 		 * See if backward page is clusterable.
29724a1cce3SDavid Greenman 		 */
29824a1cce3SDavid Greenman 		if (backward_okay) {
29924a1cce3SDavid Greenman 			/*
30024a1cce3SDavid Greenman 			 * Stop backward scan at beginning of object.
30124a1cce3SDavid Greenman 			 */
302a316d390SJohn Dyson 			if ((pindex - i) == 0) {
30324a1cce3SDavid Greenman 				backward_okay = FALSE;
30424a1cce3SDavid Greenman 			}
305a316d390SJohn Dyson 			p = vm_page_lookup(object, pindex - i);
30624a1cce3SDavid Greenman 			if (p) {
3075070c7f8SJohn Dyson 				if (((p->queue - p->pc) == PQ_CACHE) ||
3085070c7f8SJohn Dyson 					(p->flags & PG_BUSY) || p->busy) {
30924a1cce3SDavid Greenman 					backward_okay = FALSE;
31024a1cce3SDavid Greenman 					continue;
31124a1cce3SDavid Greenman 				}
31224a1cce3SDavid Greenman 				vm_page_test_dirty(p);
31324a1cce3SDavid Greenman 				if ((p->dirty & p->valid) != 0 &&
314bd7e5f99SJohn Dyson 				    ((p->queue == PQ_INACTIVE) ||
31524a1cce3SDavid Greenman 				     (sync == VM_PAGEOUT_FORCE)) &&
31624a1cce3SDavid Greenman 				    (p->wire_count == 0) &&
31724a1cce3SDavid Greenman 				    (p->hold_count == 0)) {
318f35329acSJohn Dyson 					mc[vm_pageout_page_count - i] = p;
31924a1cce3SDavid Greenman 					pageout_count++;
32024a1cce3SDavid Greenman 					page_base--;
32124a1cce3SDavid Greenman 					if (pageout_count == vm_pageout_page_count)
32224a1cce3SDavid Greenman 						break;
32324a1cce3SDavid Greenman 				} else {
32424a1cce3SDavid Greenman 					backward_okay = FALSE;
32524a1cce3SDavid Greenman 				}
32624a1cce3SDavid Greenman 			} else {
32724a1cce3SDavid Greenman 				backward_okay = FALSE;
32824a1cce3SDavid Greenman 			}
329f6b04d2bSDavid Greenman 		}
330f6b04d2bSDavid Greenman 	}
331f6b04d2bSDavid Greenman 
33267bf6868SJohn Dyson 	/*
33367bf6868SJohn Dyson 	 * we allow reads during pageouts...
33467bf6868SJohn Dyson 	 */
33524a1cce3SDavid Greenman 	for (i = page_base; i < (page_base + pageout_count); i++) {
33624a1cce3SDavid Greenman 		mc[i]->flags |= PG_BUSY;
33767bf6868SJohn Dyson 		vm_page_protect(mc[i], VM_PROT_READ);
33826f9a767SRodney W. Grimes 	}
33926f9a767SRodney W. Grimes 
340aef922f5SJohn Dyson 	return vm_pageout_flush(&mc[page_base], pageout_count, sync);
341aef922f5SJohn Dyson }
342aef922f5SJohn Dyson 
343aef922f5SJohn Dyson int
344aef922f5SJohn Dyson vm_pageout_flush(mc, count, sync)
345aef922f5SJohn Dyson 	vm_page_t *mc;
346aef922f5SJohn Dyson 	int count;
347aef922f5SJohn Dyson 	int sync;
348aef922f5SJohn Dyson {
349aef922f5SJohn Dyson 	register vm_object_t object;
350aef922f5SJohn Dyson 	int pageout_status[count];
351aef922f5SJohn Dyson 	int anyok = 0;
352aef922f5SJohn Dyson 	int i;
353aef922f5SJohn Dyson 
354aef922f5SJohn Dyson 	object = mc[0]->object;
355aef922f5SJohn Dyson 	object->paging_in_progress += count;
356aef922f5SJohn Dyson 
357aef922f5SJohn Dyson 	vm_pager_put_pages(object, mc, count,
35826f9a767SRodney W. Grimes 	    ((sync || (object == kernel_object)) ? TRUE : FALSE),
35926f9a767SRodney W. Grimes 	    pageout_status);
36026f9a767SRodney W. Grimes 
361aef922f5SJohn Dyson 	for (i = 0; i < count; i++) {
362aef922f5SJohn Dyson 		vm_page_t mt = mc[i];
36324a1cce3SDavid Greenman 
36426f9a767SRodney W. Grimes 		switch (pageout_status[i]) {
36526f9a767SRodney W. Grimes 		case VM_PAGER_OK:
36626f9a767SRodney W. Grimes 			++anyok;
36726f9a767SRodney W. Grimes 			break;
36826f9a767SRodney W. Grimes 		case VM_PAGER_PEND:
36926f9a767SRodney W. Grimes 			++anyok;
37026f9a767SRodney W. Grimes 			break;
37126f9a767SRodney W. Grimes 		case VM_PAGER_BAD:
37226f9a767SRodney W. Grimes 			/*
3730d94caffSDavid Greenman 			 * Page outside of range of object. Right now we
3740d94caffSDavid Greenman 			 * essentially lose the changes by pretending it
3750d94caffSDavid Greenman 			 * worked.
37626f9a767SRodney W. Grimes 			 */
37767bf6868SJohn Dyson 			pmap_clear_modify(VM_PAGE_TO_PHYS(mt));
37824a1cce3SDavid Greenman 			mt->dirty = 0;
37926f9a767SRodney W. Grimes 			break;
38026f9a767SRodney W. Grimes 		case VM_PAGER_ERROR:
38126f9a767SRodney W. Grimes 		case VM_PAGER_FAIL:
38226f9a767SRodney W. Grimes 			/*
3830d94caffSDavid Greenman 			 * If page couldn't be paged out, then reactivate the
3840d94caffSDavid Greenman 			 * page so it doesn't clog the inactive list.  (We
3850d94caffSDavid Greenman 			 * will try paging out it again later).
38626f9a767SRodney W. Grimes 			 */
387bd7e5f99SJohn Dyson 			if (mt->queue == PQ_INACTIVE)
38824a1cce3SDavid Greenman 				vm_page_activate(mt);
38926f9a767SRodney W. Grimes 			break;
39026f9a767SRodney W. Grimes 		case VM_PAGER_AGAIN:
39126f9a767SRodney W. Grimes 			break;
39226f9a767SRodney W. Grimes 		}
39326f9a767SRodney W. Grimes 
39426f9a767SRodney W. Grimes 
39526f9a767SRodney W. Grimes 		/*
3960d94caffSDavid Greenman 		 * If the operation is still going, leave the page busy to
3970d94caffSDavid Greenman 		 * block all other accesses. Also, leave the paging in
3980d94caffSDavid Greenman 		 * progress indicator set so that we don't attempt an object
3990d94caffSDavid Greenman 		 * collapse.
40026f9a767SRodney W. Grimes 		 */
40126f9a767SRodney W. Grimes 		if (pageout_status[i] != VM_PAGER_PEND) {
402f919ebdeSDavid Greenman 			vm_object_pip_wakeup(object);
40324a1cce3SDavid Greenman 			PAGE_WAKEUP(mt);
40426f9a767SRodney W. Grimes 		}
40526f9a767SRodney W. Grimes 	}
40626f9a767SRodney W. Grimes 	return anyok;
40726f9a767SRodney W. Grimes }
40826f9a767SRodney W. Grimes 
40938efa82bSJohn Dyson #if !defined(NO_SWAPPING)
41026f9a767SRodney W. Grimes /*
41126f9a767SRodney W. Grimes  *	vm_pageout_object_deactivate_pages
41226f9a767SRodney W. Grimes  *
41326f9a767SRodney W. Grimes  *	deactivate enough pages to satisfy the inactive target
41426f9a767SRodney W. Grimes  *	requirements or if vm_page_proc_limit is set, then
41526f9a767SRodney W. Grimes  *	deactivate all of the pages in the object and its
41624a1cce3SDavid Greenman  *	backing_objects.
41726f9a767SRodney W. Grimes  *
41826f9a767SRodney W. Grimes  *	The object and map must be locked.
41926f9a767SRodney W. Grimes  */
42038efa82bSJohn Dyson static void
42138efa82bSJohn Dyson vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
42226f9a767SRodney W. Grimes 	vm_map_t map;
42326f9a767SRodney W. Grimes 	vm_object_t object;
42438efa82bSJohn Dyson 	vm_pindex_t desired;
4250d94caffSDavid Greenman 	int map_remove_only;
42626f9a767SRodney W. Grimes {
42726f9a767SRodney W. Grimes 	register vm_page_t p, next;
42826f9a767SRodney W. Grimes 	int rcount;
42938efa82bSJohn Dyson 	int remove_mode;
4301eeaa1e3SJohn Dyson 	int s;
43126f9a767SRodney W. Grimes 
43224a1cce3SDavid Greenman 	if (object->type == OBJT_DEVICE)
43338efa82bSJohn Dyson 		return;
4348f895206SDavid Greenman 
43538efa82bSJohn Dyson 	while (object) {
43638efa82bSJohn Dyson 		if (vm_map_pmap(map)->pm_stats.resident_count <= desired)
43738efa82bSJohn Dyson 			return;
43824a1cce3SDavid Greenman 		if (object->paging_in_progress)
43938efa82bSJohn Dyson 			return;
44026f9a767SRodney W. Grimes 
44138efa82bSJohn Dyson 		remove_mode = map_remove_only;
44238efa82bSJohn Dyson 		if (object->shadow_count > 1)
44338efa82bSJohn Dyson 			remove_mode = 1;
44426f9a767SRodney W. Grimes 	/*
44526f9a767SRodney W. Grimes 	 * scan the objects entire memory queue
44626f9a767SRodney W. Grimes 	 */
44726f9a767SRodney W. Grimes 		rcount = object->resident_page_count;
448b18bfc3dSJohn Dyson 		p = TAILQ_FIRST(&object->memq);
44926f9a767SRodney W. Grimes 		while (p && (rcount-- > 0)) {
4507e006499SJohn Dyson 			int actcount;
45138efa82bSJohn Dyson 			if (vm_map_pmap(map)->pm_stats.resident_count <= desired)
45238efa82bSJohn Dyson 				return;
453b18bfc3dSJohn Dyson 			next = TAILQ_NEXT(p, listq);
454a58d1fa1SDavid Greenman 			cnt.v_pdpages++;
4550d94caffSDavid Greenman 			if (p->wire_count != 0 ||
4560d94caffSDavid Greenman 			    p->hold_count != 0 ||
4570d94caffSDavid Greenman 			    p->busy != 0 ||
458bd7e5f99SJohn Dyson 			    (p->flags & PG_BUSY) ||
4590d94caffSDavid Greenman 			    !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) {
4600d94caffSDavid Greenman 				p = next;
4610d94caffSDavid Greenman 				continue;
4620d94caffSDavid Greenman 			}
463ef743ce6SJohn Dyson 
4647e006499SJohn Dyson 			actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p));
4657e006499SJohn Dyson 			if (actcount) {
466ef743ce6SJohn Dyson 				p->flags |= PG_REFERENCED;
467c8c4b40cSJohn Dyson 			} else if (p->flags & PG_REFERENCED) {
4687e006499SJohn Dyson 				actcount = 1;
469ef743ce6SJohn Dyson 			}
470ef743ce6SJohn Dyson 
47138efa82bSJohn Dyson 			if ((p->queue != PQ_ACTIVE) &&
47238efa82bSJohn Dyson 				(p->flags & PG_REFERENCED)) {
473ef743ce6SJohn Dyson 				vm_page_activate(p);
4747e006499SJohn Dyson 				p->act_count += actcount;
475c8c4b40cSJohn Dyson 				p->flags &= ~PG_REFERENCED;
476c8c4b40cSJohn Dyson 			} else if (p->queue == PQ_ACTIVE) {
477ef743ce6SJohn Dyson 				if ((p->flags & PG_REFERENCED) == 0) {
478c8c4b40cSJohn Dyson 					p->act_count -= min(p->act_count, ACT_DECLINE);
479c8c4b40cSJohn Dyson 					if (!remove_mode && (vm_pageout_algorithm_lru || (p->act_count == 0))) {
480b18bfc3dSJohn Dyson 						vm_page_protect(p, VM_PROT_NONE);
48126f9a767SRodney W. Grimes 						vm_page_deactivate(p);
48226f9a767SRodney W. Grimes 					} else {
483c8c4b40cSJohn Dyson 						s = splvm();
484c8c4b40cSJohn Dyson 						TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
485c8c4b40cSJohn Dyson 						TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
486c8c4b40cSJohn Dyson 						splx(s);
487c8c4b40cSJohn Dyson 					}
488c8c4b40cSJohn Dyson 				} else {
489a647a309SDavid Greenman 					p->flags &= ~PG_REFERENCED;
49038efa82bSJohn Dyson 					if (p->act_count < (ACT_MAX - ACT_ADVANCE))
49138efa82bSJohn Dyson 						p->act_count += ACT_ADVANCE;
4921eeaa1e3SJohn Dyson 					s = splvm();
49326f9a767SRodney W. Grimes 					TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
49426f9a767SRodney W. Grimes 					TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
4951eeaa1e3SJohn Dyson 					splx(s);
49626f9a767SRodney W. Grimes 				}
497bd7e5f99SJohn Dyson 			} else if (p->queue == PQ_INACTIVE) {
498f919ebdeSDavid Greenman 				vm_page_protect(p, VM_PROT_NONE);
49926f9a767SRodney W. Grimes 			}
50026f9a767SRodney W. Grimes 			p = next;
50126f9a767SRodney W. Grimes 		}
50238efa82bSJohn Dyson 		object = object->backing_object;
50338efa82bSJohn Dyson 	}
50438efa82bSJohn Dyson 	return;
50526f9a767SRodney W. Grimes }
50626f9a767SRodney W. Grimes 
50726f9a767SRodney W. Grimes /*
50826f9a767SRodney W. Grimes  * deactivate some number of pages in a map, try to do it fairly, but
50926f9a767SRodney W. Grimes  * that is really hard to do.
51026f9a767SRodney W. Grimes  */
511cd41fc12SDavid Greenman static void
51238efa82bSJohn Dyson vm_pageout_map_deactivate_pages(map, desired)
51326f9a767SRodney W. Grimes 	vm_map_t map;
51438efa82bSJohn Dyson 	vm_pindex_t desired;
51526f9a767SRodney W. Grimes {
51626f9a767SRodney W. Grimes 	vm_map_entry_t tmpe;
51738efa82bSJohn Dyson 	vm_object_t obj, bigobj;
5180d94caffSDavid Greenman 
51926f9a767SRodney W. Grimes 	vm_map_reference(map);
520996c772fSJohn Dyson 	if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) {
52126f9a767SRodney W. Grimes 		vm_map_deallocate(map);
52226f9a767SRodney W. Grimes 		return;
52326f9a767SRodney W. Grimes 	}
52438efa82bSJohn Dyson 
52538efa82bSJohn Dyson 	bigobj = NULL;
52638efa82bSJohn Dyson 
52738efa82bSJohn Dyson 	/*
52838efa82bSJohn Dyson 	 * first, search out the biggest object, and try to free pages from
52938efa82bSJohn Dyson 	 * that.
53038efa82bSJohn Dyson 	 */
53126f9a767SRodney W. Grimes 	tmpe = map->header.next;
53238efa82bSJohn Dyson 	while (tmpe != &map->header) {
533afa07f7eSJohn Dyson 		if ((tmpe->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
53438efa82bSJohn Dyson 			obj = tmpe->object.vm_object;
53538efa82bSJohn Dyson 			if ((obj != NULL) && (obj->shadow_count <= 1) &&
53638efa82bSJohn Dyson 				((bigobj == NULL) ||
53738efa82bSJohn Dyson 				 (bigobj->resident_page_count < obj->resident_page_count))) {
53838efa82bSJohn Dyson 				bigobj = obj;
53938efa82bSJohn Dyson 			}
54038efa82bSJohn Dyson 		}
54138efa82bSJohn Dyson 		tmpe = tmpe->next;
54238efa82bSJohn Dyson 	}
54338efa82bSJohn Dyson 
54438efa82bSJohn Dyson 	if (bigobj)
54538efa82bSJohn Dyson 		vm_pageout_object_deactivate_pages(map, bigobj, desired, 0);
54638efa82bSJohn Dyson 
54738efa82bSJohn Dyson 	/*
54838efa82bSJohn Dyson 	 * Next, hunt around for other pages to deactivate.  We actually
54938efa82bSJohn Dyson 	 * do this search sort of wrong -- .text first is not the best idea.
55038efa82bSJohn Dyson 	 */
55138efa82bSJohn Dyson 	tmpe = map->header.next;
55238efa82bSJohn Dyson 	while (tmpe != &map->header) {
55338efa82bSJohn Dyson 		if (vm_map_pmap(map)->pm_stats.resident_count <= desired)
55438efa82bSJohn Dyson 			break;
555afa07f7eSJohn Dyson 		if ((tmpe->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
55638efa82bSJohn Dyson 			obj = tmpe->object.vm_object;
55701155bd7SDavid Greenman 			if (obj)
55838efa82bSJohn Dyson 				vm_pageout_object_deactivate_pages(map, obj, desired, 0);
55938efa82bSJohn Dyson 		}
56026f9a767SRodney W. Grimes 		tmpe = tmpe->next;
56126f9a767SRodney W. Grimes 	};
56238efa82bSJohn Dyson 
56338efa82bSJohn Dyson 	/*
56438efa82bSJohn Dyson 	 * Remove all mappings if a process is swapped out, this will free page
56538efa82bSJohn Dyson 	 * table pages.
56638efa82bSJohn Dyson 	 */
56738efa82bSJohn Dyson 	if (desired == 0)
56838efa82bSJohn Dyson 		pmap_remove(vm_map_pmap(map),
56938efa82bSJohn Dyson 			VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
57038efa82bSJohn Dyson 	vm_map_unlock(map);
57126f9a767SRodney W. Grimes 	vm_map_deallocate(map);
57226f9a767SRodney W. Grimes 	return;
57326f9a767SRodney W. Grimes }
57438efa82bSJohn Dyson #endif
575df8bae1dSRodney W. Grimes 
576df8bae1dSRodney W. Grimes /*
577df8bae1dSRodney W. Grimes  *	vm_pageout_scan does the dirty work for the pageout daemon.
578df8bae1dSRodney W. Grimes  */
5793af76890SPoul-Henning Kamp static int
580df8bae1dSRodney W. Grimes vm_pageout_scan()
581df8bae1dSRodney W. Grimes {
582502ba6e4SJohn Dyson 	vm_page_t m, next;
583b182ec9eSJohn Dyson 	int page_shortage, addl_page_shortage, maxscan, maxlaunder, pcount;
5844e39a515SPoul-Henning Kamp 	int pages_freed;
5855663e6deSDavid Greenman 	struct proc *p, *bigproc;
5865663e6deSDavid Greenman 	vm_offset_t size, bigsize;
587df8bae1dSRodney W. Grimes 	vm_object_t object;
58826f9a767SRodney W. Grimes 	int force_wakeup = 0;
5897e006499SJohn Dyson 	int actcount;
590f6b04d2bSDavid Greenman 	int vnodes_skipped = 0;
5911eeaa1e3SJohn Dyson 	int s;
5920d94caffSDavid Greenman 
593df8bae1dSRodney W. Grimes 	/*
5940d94caffSDavid Greenman 	 * Start scanning the inactive queue for pages we can free. We keep
5950d94caffSDavid Greenman 	 * scanning until we have enough free pages or we have scanned through
5960d94caffSDavid Greenman 	 * the entire queue.  If we encounter dirty pages, we start cleaning
5970d94caffSDavid Greenman 	 * them.
598df8bae1dSRodney W. Grimes 	 */
599df8bae1dSRodney W. Grimes 
600b182ec9eSJohn Dyson 	pages_freed = 0;
601f35329acSJohn Dyson 	addl_page_shortage = 0;
602b182ec9eSJohn Dyson 
6030d94caffSDavid Greenman 	maxlaunder = (cnt.v_inactive_target > MAXLAUNDER) ?
6040d94caffSDavid Greenman 	    MAXLAUNDER : cnt.v_inactive_target;
60567bf6868SJohn Dyson rescan0:
606f6b04d2bSDavid Greenman 	maxscan = cnt.v_inactive_count;
607b182ec9eSJohn Dyson 	for( m = TAILQ_FIRST(&vm_page_queue_inactive);
608b182ec9eSJohn Dyson 
609b182ec9eSJohn Dyson 		(m != NULL) && (maxscan-- > 0) &&
610b18bfc3dSJohn Dyson 			((cnt.v_cache_count + cnt.v_free_count) <
611b182ec9eSJohn Dyson 			(cnt.v_cache_min + cnt.v_free_target));
612b182ec9eSJohn Dyson 
613b182ec9eSJohn Dyson 		m = next) {
614df8bae1dSRodney W. Grimes 
615a58d1fa1SDavid Greenman 		cnt.v_pdpages++;
616b182ec9eSJohn Dyson 
617f35329acSJohn Dyson 		if (m->queue != PQ_INACTIVE) {
61867bf6868SJohn Dyson 			goto rescan0;
619f35329acSJohn Dyson 		}
620b182ec9eSJohn Dyson 
621b18bfc3dSJohn Dyson 		next = TAILQ_NEXT(m, pageq);
622df8bae1dSRodney W. Grimes 
623b182ec9eSJohn Dyson 		if (m->hold_count) {
624f35329acSJohn Dyson 			s = splvm();
625b182ec9eSJohn Dyson 			TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
626b182ec9eSJohn Dyson 			TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
627f35329acSJohn Dyson 			splx(s);
628b182ec9eSJohn Dyson 			addl_page_shortage++;
629b182ec9eSJohn Dyson 			continue;
630df8bae1dSRodney W. Grimes 		}
63126f9a767SRodney W. Grimes 		/*
632b18bfc3dSJohn Dyson 		 * Dont mess with busy pages, keep in the front of the
633b18bfc3dSJohn Dyson 		 * queue, most likely are being paged out.
63426f9a767SRodney W. Grimes 		 */
635bd7e5f99SJohn Dyson 		if (m->busy || (m->flags & PG_BUSY)) {
636b182ec9eSJohn Dyson 			addl_page_shortage++;
63726f9a767SRodney W. Grimes 			continue;
63826f9a767SRodney W. Grimes 		}
639bd7e5f99SJohn Dyson 
6407e006499SJohn Dyson 		/*
6417e006499SJohn Dyson 		 * If the object is not being used, we ignore previous references.
6427e006499SJohn Dyson 		 */
6430d94caffSDavid Greenman 		if (m->object->ref_count == 0) {
6440d94caffSDavid Greenman 			m->flags &= ~PG_REFERENCED;
64567bf6868SJohn Dyson 			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
6467e006499SJohn Dyson 
6477e006499SJohn Dyson 		/*
6487e006499SJohn Dyson 		 * Otherwise, if the page has been referenced while in the inactive
6497e006499SJohn Dyson 		 * queue, we bump the "activation count" upwards, making it less
6507e006499SJohn Dyson 		 * likely that the page will be added back to the inactive queue
6517e006499SJohn Dyson 		 * prematurely again.  Here we check the page tables (or emulated
6527e006499SJohn Dyson 		 * bits, if any), given the upper level VM system not knowing anything
6537e006499SJohn Dyson 		 * about existing references.
6547e006499SJohn Dyson 		 */
655ef743ce6SJohn Dyson 		} else if (((m->flags & PG_REFERENCED) == 0) &&
6567e006499SJohn Dyson 			(actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)))) {
657ef743ce6SJohn Dyson 			vm_page_activate(m);
6587e006499SJohn Dyson 			m->act_count += (actcount + ACT_ADVANCE);
659ef743ce6SJohn Dyson 			continue;
6602fe6e4d7SDavid Greenman 		}
661ef743ce6SJohn Dyson 
6627e006499SJohn Dyson 		/*
6637e006499SJohn Dyson 		 * If the upper level VM system knows about any page references,
6647e006499SJohn Dyson 		 * we activate the page.  We also set the "activation count" higher
6657e006499SJohn Dyson 		 * than normal so that we will less likely place pages back onto the
6667e006499SJohn Dyson 		 * inactive queue again.
6677e006499SJohn Dyson 		 */
668bd7e5f99SJohn Dyson 		if ((m->flags & PG_REFERENCED) != 0) {
669a647a309SDavid Greenman 			m->flags &= ~PG_REFERENCED;
6707e006499SJohn Dyson #if 0
67167bf6868SJohn Dyson 			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
6727e006499SJohn Dyson #else
6737e006499SJohn Dyson 			actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
6747e006499SJohn Dyson #endif
67526f9a767SRodney W. Grimes 			vm_page_activate(m);
6767e006499SJohn Dyson 			m->act_count += (actcount + ACT_ADVANCE + 1);
6770d94caffSDavid Greenman 			continue;
6780d94caffSDavid Greenman 		}
67967bf6868SJohn Dyson 
6807e006499SJohn Dyson 		/*
6817e006499SJohn Dyson 		 * If the upper level VM system doesn't know anything about the
6827e006499SJohn Dyson 		 * page being dirty, we have to check for it again.  As far as the
6837e006499SJohn Dyson 		 * VM code knows, any partially dirty pages are fully dirty.
6847e006499SJohn Dyson 		 */
685f6b04d2bSDavid Greenman 		if (m->dirty == 0) {
686bd7e5f99SJohn Dyson 			vm_page_test_dirty(m);
68730dcfc09SJohn Dyson 		} else if (m->dirty != 0) {
688bd7e5f99SJohn Dyson 			m->dirty = VM_PAGE_BITS_ALL;
68930dcfc09SJohn Dyson 		}
690ef743ce6SJohn Dyson 
6917e006499SJohn Dyson 		/*
6927e006499SJohn Dyson 		 * Invalid pages can be easily freed
6937e006499SJohn Dyson 		 */
6946d40c3d3SDavid Greenman 		if (m->valid == 0) {
695bd7e5f99SJohn Dyson 			vm_page_protect(m, VM_PROT_NONE);
6966d40c3d3SDavid Greenman 			vm_page_free(m);
69767bf6868SJohn Dyson 			cnt.v_dfree++;
698f6b04d2bSDavid Greenman 			++pages_freed;
6997e006499SJohn Dyson 
7007e006499SJohn Dyson 		/*
7017e006499SJohn Dyson 		 * Clean pages can be placed onto the cache queue.
7027e006499SJohn Dyson 		 */
703bd7e5f99SJohn Dyson 		} else if (m->dirty == 0) {
704bd7e5f99SJohn Dyson 			vm_page_cache(m);
705bd7e5f99SJohn Dyson 			++pages_freed;
7067e006499SJohn Dyson 
7077e006499SJohn Dyson 		/*
7087e006499SJohn Dyson 		 * Dirty pages need to be paged out.  Note that we clean
7097e006499SJohn Dyson 		 * only a limited number of pages per pagedaemon pass.
7107e006499SJohn Dyson 		 */
7110d94caffSDavid Greenman 		} else if (maxlaunder > 0) {
7120d94caffSDavid Greenman 			int written;
713f6b04d2bSDavid Greenman 			struct vnode *vp = NULL;
7140d94caffSDavid Greenman 
7150d94caffSDavid Greenman 			object = m->object;
7167e006499SJohn Dyson 
7177e006499SJohn Dyson 			/*
7187e006499SJohn Dyson 			 * We don't bother paging objects that are "dead".  Those
7197e006499SJohn Dyson 			 * objects are in a "rundown" state.
7207e006499SJohn Dyson 			 */
72124a1cce3SDavid Greenman 			if (object->flags & OBJ_DEAD) {
722f35329acSJohn Dyson 				s = splvm();
72385a376ebSJohn Dyson 				TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
72485a376ebSJohn Dyson 				TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
725f35329acSJohn Dyson 				splx(s);
7260d94caffSDavid Greenman 				continue;
7270d94caffSDavid Greenman 			}
728f6b04d2bSDavid Greenman 
72924a1cce3SDavid Greenman 			if (object->type == OBJT_VNODE) {
73024a1cce3SDavid Greenman 				vp = object->handle;
731996c772fSJohn Dyson 				if (VOP_ISLOCKED(vp) ||
7322f558c3eSBruce Evans 				    vget(vp, LK_EXCLUSIVE, curproc)) {
733b182ec9eSJohn Dyson 					if ((m->queue == PQ_INACTIVE) &&
734b182ec9eSJohn Dyson 						(m->hold_count == 0) &&
735b182ec9eSJohn Dyson 						(m->busy == 0) &&
736b182ec9eSJohn Dyson 						(m->flags & PG_BUSY) == 0) {
737f35329acSJohn Dyson 						s = splvm();
73885a376ebSJohn Dyson 						TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
73985a376ebSJohn Dyson 						TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
740f35329acSJohn Dyson 						splx(s);
74185a376ebSJohn Dyson 					}
742aef922f5SJohn Dyson 					if (object->flags & OBJ_MIGHTBEDIRTY)
743f6b04d2bSDavid Greenman 						++vnodes_skipped;
744b182ec9eSJohn Dyson 					continue;
74585a376ebSJohn Dyson 				}
746b182ec9eSJohn Dyson 
747f35329acSJohn Dyson 				/*
748f35329acSJohn Dyson 				 * The page might have been moved to another queue
749f35329acSJohn Dyson 				 * during potential blocking in vget() above.
750f35329acSJohn Dyson 				 */
751b182ec9eSJohn Dyson 				if (m->queue != PQ_INACTIVE) {
752b182ec9eSJohn Dyson 					if (object->flags & OBJ_MIGHTBEDIRTY)
753b182ec9eSJohn Dyson 						++vnodes_skipped;
754b182ec9eSJohn Dyson 					vput(vp);
755b182ec9eSJohn Dyson 					continue;
756b182ec9eSJohn Dyson 				}
757b182ec9eSJohn Dyson 
758f35329acSJohn Dyson 				/*
759f35329acSJohn Dyson 				 * The page may have been busied during the blocking in
760f35329acSJohn Dyson 				 * vput();  We don't move the page back onto the end of
761f35329acSJohn Dyson 				 * the queue so that statistics are more correct if we don't.
762f35329acSJohn Dyson 				 */
763b182ec9eSJohn Dyson 				if (m->busy || (m->flags & PG_BUSY)) {
764b182ec9eSJohn Dyson 					vput(vp);
765b182ec9eSJohn Dyson 					continue;
766b182ec9eSJohn Dyson 				}
767b182ec9eSJohn Dyson 
768f35329acSJohn Dyson 				/*
769f35329acSJohn Dyson 				 * If the page has become held, then skip it
770f35329acSJohn Dyson 				 */
771b182ec9eSJohn Dyson 				if (m->hold_count) {
772f35329acSJohn Dyson 					s = splvm();
773b182ec9eSJohn Dyson 					TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
774b182ec9eSJohn Dyson 					TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
775f35329acSJohn Dyson 					splx(s);
776b182ec9eSJohn Dyson 					if (object->flags & OBJ_MIGHTBEDIRTY)
777b182ec9eSJohn Dyson 						++vnodes_skipped;
778b182ec9eSJohn Dyson 					vput(vp);
779f6b04d2bSDavid Greenman 					continue;
780f6b04d2bSDavid Greenman 				}
781f6b04d2bSDavid Greenman 			}
782f6b04d2bSDavid Greenman 
7830d94caffSDavid Greenman 			/*
7840d94caffSDavid Greenman 			 * If a page is dirty, then it is either being washed
7850d94caffSDavid Greenman 			 * (but not yet cleaned) or it is still in the
7860d94caffSDavid Greenman 			 * laundry.  If it is still in the laundry, then we
7870d94caffSDavid Greenman 			 * start the cleaning operation.
7880d94caffSDavid Greenman 			 */
7890d94caffSDavid Greenman 			written = vm_pageout_clean(m, 0);
790f6b04d2bSDavid Greenman 
791f6b04d2bSDavid Greenman 			if (vp)
792f6b04d2bSDavid Greenman 				vput(vp);
793f6b04d2bSDavid Greenman 
7940d94caffSDavid Greenman 			maxlaunder -= written;
7950d94caffSDavid Greenman 		}
796df8bae1dSRodney W. Grimes 	}
79726f9a767SRodney W. Grimes 
798df8bae1dSRodney W. Grimes 	/*
7990d94caffSDavid Greenman 	 * Compute the page shortage.  If we are still very low on memory be
8000d94caffSDavid Greenman 	 * sure that we will move a minimal amount of pages from active to
8010d94caffSDavid Greenman 	 * inactive.
802df8bae1dSRodney W. Grimes 	 */
803b182ec9eSJohn Dyson 	page_shortage = (cnt.v_inactive_target + cnt.v_cache_min) -
8040d94caffSDavid Greenman 	    (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
80526f9a767SRodney W. Grimes 	if (page_shortage <= 0) {
80626f9a767SRodney W. Grimes 		if (pages_freed == 0) {
80717c4c408SDavid Greenman 			page_shortage = cnt.v_free_min - cnt.v_free_count;
808f6b04d2bSDavid Greenman 		} else {
809f6b04d2bSDavid Greenman 			page_shortage = 1;
81026f9a767SRodney W. Grimes 		}
811df8bae1dSRodney W. Grimes 	}
8127e006499SJohn Dyson 
8137e006499SJohn Dyson 	/*
8147e006499SJohn Dyson 	 * If the "inactive" loop finds that there is a shortage over and
8157e006499SJohn Dyson 	 * above the page statistics variables, then we need to accomodate
8167e006499SJohn Dyson 	 * that.  This avoids potential deadlocks due to pages being temporarily
8177e006499SJohn Dyson 	 * busy for I/O or other types of temporary wiring.
8187e006499SJohn Dyson 	 */
819b182ec9eSJohn Dyson 	if (addl_page_shortage) {
820b182ec9eSJohn Dyson 		if (page_shortage < 0)
821b182ec9eSJohn Dyson 			page_shortage = 0;
822b182ec9eSJohn Dyson 		page_shortage += addl_page_shortage;
823b182ec9eSJohn Dyson 	}
82426f9a767SRodney W. Grimes 
825b18bfc3dSJohn Dyson 	pcount = cnt.v_active_count;
826b18bfc3dSJohn Dyson 	m = TAILQ_FIRST(&vm_page_queue_active);
827b18bfc3dSJohn Dyson 	while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
828f35329acSJohn Dyson 
8297e006499SJohn Dyson 		/*
8307e006499SJohn Dyson 		 * This is a consistancy check, and should likely be a panic
8317e006499SJohn Dyson 		 * or warning.
8327e006499SJohn Dyson 		 */
833f35329acSJohn Dyson 		if (m->queue != PQ_ACTIVE) {
83438efa82bSJohn Dyson 			break;
835f35329acSJohn Dyson 		}
836f35329acSJohn Dyson 
837b18bfc3dSJohn Dyson 		next = TAILQ_NEXT(m, pageq);
838df8bae1dSRodney W. Grimes 		/*
83926f9a767SRodney W. Grimes 		 * Don't deactivate pages that are busy.
840df8bae1dSRodney W. Grimes 		 */
841a647a309SDavid Greenman 		if ((m->busy != 0) ||
8420d94caffSDavid Greenman 		    (m->flags & PG_BUSY) ||
843f6b04d2bSDavid Greenman 		    (m->hold_count != 0)) {
844f35329acSJohn Dyson 			s = splvm();
8456d40c3d3SDavid Greenman 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
8466d40c3d3SDavid Greenman 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
847f35329acSJohn Dyson 			splx(s);
84826f9a767SRodney W. Grimes 			m = next;
84926f9a767SRodney W. Grimes 			continue;
850df8bae1dSRodney W. Grimes 		}
851b18bfc3dSJohn Dyson 
852b18bfc3dSJohn Dyson 		/*
853b18bfc3dSJohn Dyson 		 * The count for pagedaemon pages is done after checking the
854b18bfc3dSJohn Dyson 		 * page for eligbility...
855b18bfc3dSJohn Dyson 		 */
856b18bfc3dSJohn Dyson 		cnt.v_pdpages++;
857ef743ce6SJohn Dyson 
8587e006499SJohn Dyson 		/*
8597e006499SJohn Dyson 		 * Check to see "how much" the page has been used.
8607e006499SJohn Dyson 		 */
8617e006499SJohn Dyson 		actcount = 0;
862ef743ce6SJohn Dyson 		if (m->object->ref_count != 0) {
863ef743ce6SJohn Dyson 			if (m->flags & PG_REFERENCED) {
8647e006499SJohn Dyson 				actcount += 1;
8650d94caffSDavid Greenman 			}
8667e006499SJohn Dyson 			actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
8677e006499SJohn Dyson 			if (actcount) {
8687e006499SJohn Dyson 				m->act_count += ACT_ADVANCE + actcount;
86938efa82bSJohn Dyson 				if (m->act_count > ACT_MAX)
87038efa82bSJohn Dyson 					m->act_count = ACT_MAX;
87138efa82bSJohn Dyson 			}
872b18bfc3dSJohn Dyson 		}
873ef743ce6SJohn Dyson 
8747e006499SJohn Dyson 		/*
8757e006499SJohn Dyson 		 * Since we have "tested" this bit, we need to clear it now.
8767e006499SJohn Dyson 		 */
877b18bfc3dSJohn Dyson 		m->flags &= ~PG_REFERENCED;
878ef743ce6SJohn Dyson 
8797e006499SJohn Dyson 		/*
8807e006499SJohn Dyson 		 * Only if an object is currently being used, do we use the
8817e006499SJohn Dyson 		 * page activation count stats.
8827e006499SJohn Dyson 		 */
8837e006499SJohn Dyson 		if (actcount && (m->object->ref_count != 0)) {
884f35329acSJohn Dyson 			s = splvm();
88526f9a767SRodney W. Grimes 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
88626f9a767SRodney W. Grimes 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
887f35329acSJohn Dyson 			splx(s);
88826f9a767SRodney W. Grimes 		} else {
88938efa82bSJohn Dyson 			m->act_count -= min(m->act_count, ACT_DECLINE);
89038efa82bSJohn Dyson 			if (vm_pageout_algorithm_lru ||
89138efa82bSJohn Dyson 				(m->object->ref_count == 0) || (m->act_count == 0)) {
8920d94caffSDavid Greenman 				--page_shortage;
893d4a272dbSJohn Dyson 				if (m->object->ref_count == 0) {
894ef743ce6SJohn Dyson 					vm_page_protect(m, VM_PROT_NONE);
895d4a272dbSJohn Dyson 					if (m->dirty == 0)
8960d94caffSDavid Greenman 						vm_page_cache(m);
897d4a272dbSJohn Dyson 					else
898d4a272dbSJohn Dyson 						vm_page_deactivate(m);
8990d94caffSDavid Greenman 				} else {
90026f9a767SRodney W. Grimes 					vm_page_deactivate(m);
901df8bae1dSRodney W. Grimes 				}
90238efa82bSJohn Dyson 			} else {
90338efa82bSJohn Dyson 				s = splvm();
90438efa82bSJohn Dyson 				TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
90538efa82bSJohn Dyson 				TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
90638efa82bSJohn Dyson 				splx(s);
90738efa82bSJohn Dyson 			}
908df8bae1dSRodney W. Grimes 		}
90926f9a767SRodney W. Grimes 		m = next;
91026f9a767SRodney W. Grimes 	}
911df8bae1dSRodney W. Grimes 
912f35329acSJohn Dyson 	s = splvm();
913df8bae1dSRodney W. Grimes 	/*
9140d94caffSDavid Greenman 	 * We try to maintain some *really* free pages, this allows interrupt
9150d94caffSDavid Greenman 	 * code to be guaranteed space.
916df8bae1dSRodney W. Grimes 	 */
917a1f6d91cSDavid Greenman 	while (cnt.v_free_count < cnt.v_free_reserved) {
9185070c7f8SJohn Dyson 		static int cache_rover = 0;
9195070c7f8SJohn Dyson 		m = vm_page_list_find(PQ_CACHE, cache_rover);
9200d94caffSDavid Greenman 		if (!m)
9210d94caffSDavid Greenman 			break;
9225070c7f8SJohn Dyson 		cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK;
9230d94caffSDavid Greenman 		vm_page_free(m);
9240bb3a0d2SDavid Greenman 		cnt.v_dfree++;
92526f9a767SRodney W. Grimes 	}
926f35329acSJohn Dyson 	splx(s);
9275663e6deSDavid Greenman 
9285663e6deSDavid Greenman 	/*
929f6b04d2bSDavid Greenman 	 * If we didn't get enough free pages, and we have skipped a vnode
9304c1f8ee9SDavid Greenman 	 * in a writeable object, wakeup the sync daemon.  And kick swapout
9314c1f8ee9SDavid Greenman 	 * if we did not get enough free pages.
932f6b04d2bSDavid Greenman 	 */
933bd7e5f99SJohn Dyson 	if ((cnt.v_cache_count + cnt.v_free_count) <
934bd7e5f99SJohn Dyson 		(cnt.v_free_target + cnt.v_cache_min) ) {
935f6b04d2bSDavid Greenman 		if (vnodes_skipped &&
936f6b04d2bSDavid Greenman 		    (cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_min) {
937f6b04d2bSDavid Greenman 			if (!vfs_update_wakeup) {
938f6b04d2bSDavid Greenman 				vfs_update_wakeup = 1;
93924a1cce3SDavid Greenman 				wakeup(&vfs_update_wakeup);
940f6b04d2bSDavid Greenman 			}
941f6b04d2bSDavid Greenman 		}
94238efa82bSJohn Dyson #if !defined(NO_SWAPPING)
94338efa82bSJohn Dyson 		if (vm_swapping_enabled &&
94438efa82bSJohn Dyson 			(cnt.v_free_count + cnt.v_cache_count < cnt.v_free_target)) {
9454c1f8ee9SDavid Greenman 			vm_req_vmdaemon();
946b18bfc3dSJohn Dyson 			vm_pageout_req_swapout = 1;
9474c1f8ee9SDavid Greenman 		}
9485afce282SDavid Greenman #endif
9494c1f8ee9SDavid Greenman 	}
9504c1f8ee9SDavid Greenman 
951f6b04d2bSDavid Greenman 
952f6b04d2bSDavid Greenman 	/*
9530d94caffSDavid Greenman 	 * make sure that we have swap space -- if we are low on memory and
9540d94caffSDavid Greenman 	 * swap -- then kill the biggest process.
9555663e6deSDavid Greenman 	 */
9565663e6deSDavid Greenman 	if ((vm_swap_size == 0 || swap_pager_full) &&
9570d94caffSDavid Greenman 	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) {
9585663e6deSDavid Greenman 		bigproc = NULL;
9595663e6deSDavid Greenman 		bigsize = 0;
9601b67ec6dSJeffrey Hsu 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
9615663e6deSDavid Greenman 			/*
9625663e6deSDavid Greenman 			 * if this is a system process, skip it
9635663e6deSDavid Greenman 			 */
96479221631SDavid Greenman 			if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) ||
96579221631SDavid Greenman 			    ((p->p_pid < 48) && (vm_swap_size != 0))) {
9665663e6deSDavid Greenman 				continue;
9675663e6deSDavid Greenman 			}
9685663e6deSDavid Greenman 			/*
9695663e6deSDavid Greenman 			 * if the process is in a non-running type state,
9705663e6deSDavid Greenman 			 * don't touch it.
9715663e6deSDavid Greenman 			 */
9725663e6deSDavid Greenman 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
9735663e6deSDavid Greenman 				continue;
9745663e6deSDavid Greenman 			}
9755663e6deSDavid Greenman 			/*
9765663e6deSDavid Greenman 			 * get the process size
9775663e6deSDavid Greenman 			 */
9785663e6deSDavid Greenman 			size = p->p_vmspace->vm_pmap.pm_stats.resident_count;
9795663e6deSDavid Greenman 			/*
9805663e6deSDavid Greenman 			 * if the this process is bigger than the biggest one
9815663e6deSDavid Greenman 			 * remember it.
9825663e6deSDavid Greenman 			 */
9835663e6deSDavid Greenman 			if (size > bigsize) {
9845663e6deSDavid Greenman 				bigproc = p;
9855663e6deSDavid Greenman 				bigsize = size;
9865663e6deSDavid Greenman 			}
9875663e6deSDavid Greenman 		}
9885663e6deSDavid Greenman 		if (bigproc != NULL) {
989729b1e51SDavid Greenman 			killproc(bigproc, "out of swap space");
9905663e6deSDavid Greenman 			bigproc->p_estcpu = 0;
9915663e6deSDavid Greenman 			bigproc->p_nice = PRIO_MIN;
9925663e6deSDavid Greenman 			resetpriority(bigproc);
99324a1cce3SDavid Greenman 			wakeup(&cnt.v_free_count);
9945663e6deSDavid Greenman 		}
9955663e6deSDavid Greenman 	}
99626f9a767SRodney W. Grimes 	return force_wakeup;
99726f9a767SRodney W. Grimes }
99826f9a767SRodney W. Grimes 
999dc2efb27SJohn Dyson /*
1000dc2efb27SJohn Dyson  * This routine tries to maintain the pseudo LRU active queue,
1001dc2efb27SJohn Dyson  * so that during long periods of time where there is no paging,
1002dc2efb27SJohn Dyson  * that some statistic accumlation still occurs.  This code
1003dc2efb27SJohn Dyson  * helps the situation where paging just starts to occur.
1004dc2efb27SJohn Dyson  */
1005dc2efb27SJohn Dyson static void
1006dc2efb27SJohn Dyson vm_pageout_page_stats()
1007dc2efb27SJohn Dyson {
1008dc2efb27SJohn Dyson 	int s;
1009dc2efb27SJohn Dyson 	vm_page_t m,next;
1010dc2efb27SJohn Dyson 	int pcount,tpcount;		/* Number of pages to check */
1011dc2efb27SJohn Dyson 	static int fullintervalcount = 0;
1012dc2efb27SJohn Dyson 
1013dc2efb27SJohn Dyson 	pcount = cnt.v_active_count;
1014dc2efb27SJohn Dyson 	fullintervalcount += vm_pageout_stats_interval;
1015dc2efb27SJohn Dyson 	if (fullintervalcount < vm_pageout_full_stats_interval) {
1016dc2efb27SJohn Dyson 		tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count;
1017dc2efb27SJohn Dyson 		if (pcount > tpcount)
1018dc2efb27SJohn Dyson 			pcount = tpcount;
1019dc2efb27SJohn Dyson 	}
1020dc2efb27SJohn Dyson 
1021dc2efb27SJohn Dyson 	m = TAILQ_FIRST(&vm_page_queue_active);
1022dc2efb27SJohn Dyson 	while ((m != NULL) && (pcount-- > 0)) {
10237e006499SJohn Dyson 		int actcount;
1024dc2efb27SJohn Dyson 
1025dc2efb27SJohn Dyson 		if (m->queue != PQ_ACTIVE) {
1026dc2efb27SJohn Dyson 			break;
1027dc2efb27SJohn Dyson 		}
1028dc2efb27SJohn Dyson 
1029dc2efb27SJohn Dyson 		next = TAILQ_NEXT(m, pageq);
1030dc2efb27SJohn Dyson 		/*
1031dc2efb27SJohn Dyson 		 * Don't deactivate pages that are busy.
1032dc2efb27SJohn Dyson 		 */
1033dc2efb27SJohn Dyson 		if ((m->busy != 0) ||
1034dc2efb27SJohn Dyson 		    (m->flags & PG_BUSY) ||
1035dc2efb27SJohn Dyson 		    (m->hold_count != 0)) {
1036dc2efb27SJohn Dyson 			s = splvm();
1037dc2efb27SJohn Dyson 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1038dc2efb27SJohn Dyson 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1039dc2efb27SJohn Dyson 			splx(s);
1040dc2efb27SJohn Dyson 			m = next;
1041dc2efb27SJohn Dyson 			continue;
1042dc2efb27SJohn Dyson 		}
1043dc2efb27SJohn Dyson 
10447e006499SJohn Dyson 		actcount = 0;
1045dc2efb27SJohn Dyson 		if (m->flags & PG_REFERENCED) {
1046dc2efb27SJohn Dyson 			m->flags &= ~PG_REFERENCED;
10477e006499SJohn Dyson 			actcount += 1;
1048dc2efb27SJohn Dyson 		}
1049dc2efb27SJohn Dyson 
10507e006499SJohn Dyson 		actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
10517e006499SJohn Dyson 		if (actcount) {
10527e006499SJohn Dyson 			m->act_count += ACT_ADVANCE + actcount;
1053dc2efb27SJohn Dyson 			if (m->act_count > ACT_MAX)
1054dc2efb27SJohn Dyson 				m->act_count = ACT_MAX;
1055dc2efb27SJohn Dyson 			s = splvm();
1056dc2efb27SJohn Dyson 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1057dc2efb27SJohn Dyson 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1058dc2efb27SJohn Dyson 			splx(s);
1059dc2efb27SJohn Dyson 		} else {
1060dc2efb27SJohn Dyson 			if (m->act_count == 0) {
10617e006499SJohn Dyson 				/*
10627e006499SJohn Dyson 				 * We turn off page access, so that we have more accurate
10637e006499SJohn Dyson 				 * RSS stats.  We don't do this in the normal page deactivation
10647e006499SJohn Dyson 				 * when the system is loaded VM wise, because the cost of
10657e006499SJohn Dyson 				 * the large number of page protect operations would be higher
10667e006499SJohn Dyson 				 * than the value of doing the operation.
10677e006499SJohn Dyson 				 */
1068dc2efb27SJohn Dyson 				vm_page_protect(m, VM_PROT_NONE);
1069dc2efb27SJohn Dyson 				vm_page_deactivate(m);
1070dc2efb27SJohn Dyson 			} else {
1071dc2efb27SJohn Dyson 				m->act_count -= min(m->act_count, ACT_DECLINE);
1072dc2efb27SJohn Dyson 				s = splvm();
1073dc2efb27SJohn Dyson 				TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1074dc2efb27SJohn Dyson 				TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1075dc2efb27SJohn Dyson 				splx(s);
1076dc2efb27SJohn Dyson 			}
1077dc2efb27SJohn Dyson 		}
1078dc2efb27SJohn Dyson 
1079dc2efb27SJohn Dyson 		m = next;
1080dc2efb27SJohn Dyson 	}
1081dc2efb27SJohn Dyson }
1082dc2efb27SJohn Dyson 
1083dc2efb27SJohn Dyson 
1084b182ec9eSJohn Dyson static int
1085b182ec9eSJohn Dyson vm_pageout_free_page_calc(count)
1086b182ec9eSJohn Dyson vm_size_t count;
1087b182ec9eSJohn Dyson {
1088b182ec9eSJohn Dyson 	if (count < cnt.v_page_count)
1089b182ec9eSJohn Dyson 		 return 0;
1090b182ec9eSJohn Dyson 	/*
1091b182ec9eSJohn Dyson 	 * free_reserved needs to include enough for the largest swap pager
1092b182ec9eSJohn Dyson 	 * structures plus enough for any pv_entry structs when paging.
1093b182ec9eSJohn Dyson 	 */
1094b182ec9eSJohn Dyson 	if (cnt.v_page_count > 1024)
1095b182ec9eSJohn Dyson 		cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
1096b182ec9eSJohn Dyson 	else
1097b182ec9eSJohn Dyson 		cnt.v_free_min = 4;
1098f35329acSJohn Dyson 	cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
1099f35329acSJohn Dyson 		cnt.v_interrupt_free_min;
1100f35329acSJohn Dyson 	cnt.v_free_reserved = vm_pageout_page_count +
1101a2f4a846SJohn Dyson 		cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE;
1102a2f4a846SJohn Dyson 	cnt.v_free_min += cnt.v_free_reserved;
1103b182ec9eSJohn Dyson 	return 1;
1104b182ec9eSJohn Dyson }
1105b182ec9eSJohn Dyson 
1106b182ec9eSJohn Dyson 
1107df8bae1dSRodney W. Grimes /*
1108df8bae1dSRodney W. Grimes  *	vm_pageout is the high level pageout daemon.
1109df8bae1dSRodney W. Grimes  */
11102b14f991SJulian Elischer static void
111126f9a767SRodney W. Grimes vm_pageout()
1112df8bae1dSRodney W. Grimes {
1113df8bae1dSRodney W. Grimes 	/*
1114df8bae1dSRodney W. Grimes 	 * Initialize some paging parameters.
1115df8bae1dSRodney W. Grimes 	 */
1116df8bae1dSRodney W. Grimes 
1117f6b04d2bSDavid Greenman 	cnt.v_interrupt_free_min = 2;
1118f35329acSJohn Dyson 	if (cnt.v_page_count < 2000)
1119f35329acSJohn Dyson 		vm_pageout_page_count = 8;
1120f6b04d2bSDavid Greenman 
1121b182ec9eSJohn Dyson 	vm_pageout_free_page_calc(cnt.v_page_count);
1122ed74321bSDavid Greenman 	/*
11230d94caffSDavid Greenman 	 * free_reserved needs to include enough for the largest swap pager
11240d94caffSDavid Greenman 	 * structures plus enough for any pv_entry structs when paging.
1125ed74321bSDavid Greenman 	 */
11260d94caffSDavid Greenman 	cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved;
11276f2b142eSDavid Greenman 
11286ac5bfdbSJohn Dyson 	if (cnt.v_free_count > 1024) {
11290d94caffSDavid Greenman 		cnt.v_cache_max = (cnt.v_free_count - 1024) / 2;
11306f2b142eSDavid Greenman 		cnt.v_cache_min = (cnt.v_free_count - 1024) / 8;
11316f2b142eSDavid Greenman 		cnt.v_inactive_target = 2*cnt.v_cache_min + 192;
11320d94caffSDavid Greenman 	} else {
11330d94caffSDavid Greenman 		cnt.v_cache_min = 0;
11340d94caffSDavid Greenman 		cnt.v_cache_max = 0;
11356f2b142eSDavid Greenman 		cnt.v_inactive_target = cnt.v_free_count / 4;
11360d94caffSDavid Greenman 	}
1137df8bae1dSRodney W. Grimes 
1138df8bae1dSRodney W. Grimes 	/* XXX does not really belong here */
1139df8bae1dSRodney W. Grimes 	if (vm_page_max_wired == 0)
1140df8bae1dSRodney W. Grimes 		vm_page_max_wired = cnt.v_free_count / 3;
1141df8bae1dSRodney W. Grimes 
1142dc2efb27SJohn Dyson 	if (vm_pageout_stats_max == 0)
1143dc2efb27SJohn Dyson 		vm_pageout_stats_max = cnt.v_free_target;
1144dc2efb27SJohn Dyson 
1145dc2efb27SJohn Dyson 	/*
1146dc2efb27SJohn Dyson 	 * Set interval in seconds for stats scan.
1147dc2efb27SJohn Dyson 	 */
1148dc2efb27SJohn Dyson 	if (vm_pageout_stats_interval == 0)
1149dc2efb27SJohn Dyson 		vm_pageout_stats_interval = 4;
1150dc2efb27SJohn Dyson 	if (vm_pageout_full_stats_interval == 0)
1151dc2efb27SJohn Dyson 		vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1152dc2efb27SJohn Dyson 
1153dc2efb27SJohn Dyson 
1154dc2efb27SJohn Dyson 	/*
1155dc2efb27SJohn Dyson 	 * Set maximum free per pass
1156dc2efb27SJohn Dyson 	 */
1157dc2efb27SJohn Dyson 	if (vm_pageout_stats_free_max == 0)
1158dc2efb27SJohn Dyson 		vm_pageout_stats_free_max = 25;
1159dc2efb27SJohn Dyson 
116026f9a767SRodney W. Grimes 
116124a1cce3SDavid Greenman 	swap_pager_swap_init();
1162df8bae1dSRodney W. Grimes 	/*
11630d94caffSDavid Greenman 	 * The pageout daemon is never done, so loop forever.
1164df8bae1dSRodney W. Grimes 	 */
1165df8bae1dSRodney W. Grimes 	while (TRUE) {
116685a376ebSJohn Dyson 		int inactive_target;
1167dc2efb27SJohn Dyson 		int error;
1168b18bfc3dSJohn Dyson 		int s = splvm();
1169f919ebdeSDavid Greenman 		if (!vm_pages_needed ||
1170545901f7SJohn Dyson 			((cnt.v_free_count + cnt.v_cache_count) > cnt.v_free_min)) {
1171f919ebdeSDavid Greenman 			vm_pages_needed = 0;
1172dc2efb27SJohn Dyson 			error = tsleep(&vm_pages_needed,
1173dc2efb27SJohn Dyson 				PVM, "psleep", vm_pageout_stats_interval * hz);
1174dc2efb27SJohn Dyson 			if (error && !vm_pages_needed) {
1175dc2efb27SJohn Dyson 				splx(s);
1176dc2efb27SJohn Dyson 				vm_pageout_page_stats();
1177dc2efb27SJohn Dyson 				continue;
1178dc2efb27SJohn Dyson 			}
1179dc2efb27SJohn Dyson 		} else if (vm_pages_needed) {
118038efa82bSJohn Dyson 			tsleep(&vm_pages_needed, PVM, "psleep", hz/10);
1181f919ebdeSDavid Greenman 		}
1182b182ec9eSJohn Dyson 		inactive_target =
1183b182ec9eSJohn Dyson 			(cnt.v_page_count - cnt.v_wire_count) / 4;
1184b182ec9eSJohn Dyson 		if (inactive_target < 2*cnt.v_free_min)
1185b182ec9eSJohn Dyson 			inactive_target = 2*cnt.v_free_min;
1186b182ec9eSJohn Dyson 		cnt.v_inactive_target = inactive_target;
1187b18bfc3dSJohn Dyson 		if (vm_pages_needed)
1188b18bfc3dSJohn Dyson 			cnt.v_pdwakeups++;
1189f919ebdeSDavid Greenman 		vm_pages_needed = 0;
1190f919ebdeSDavid Greenman 		splx(s);
1191df8bae1dSRodney W. Grimes 		vm_pager_sync();
11920d94caffSDavid Greenman 		vm_pageout_scan();
119326f9a767SRodney W. Grimes 		vm_pager_sync();
119424a1cce3SDavid Greenman 		wakeup(&cnt.v_free_count);
1195df8bae1dSRodney W. Grimes 	}
1196df8bae1dSRodney W. Grimes }
119726f9a767SRodney W. Grimes 
1198e0c5a895SJohn Dyson void
1199e0c5a895SJohn Dyson pagedaemon_wakeup()
1200e0c5a895SJohn Dyson {
1201e0c5a895SJohn Dyson 	if (!vm_pages_needed && curproc != pageproc) {
1202e0c5a895SJohn Dyson 		vm_pages_needed++;
1203e0c5a895SJohn Dyson 		wakeup(&vm_pages_needed);
1204e0c5a895SJohn Dyson 	}
1205e0c5a895SJohn Dyson }
1206e0c5a895SJohn Dyson 
120738efa82bSJohn Dyson #if !defined(NO_SWAPPING)
12085afce282SDavid Greenman static void
12095afce282SDavid Greenman vm_req_vmdaemon()
12105afce282SDavid Greenman {
12115afce282SDavid Greenman 	static int lastrun = 0;
12125afce282SDavid Greenman 
1213b18bfc3dSJohn Dyson 	if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
12145afce282SDavid Greenman 		wakeup(&vm_daemon_needed);
12155afce282SDavid Greenman 		lastrun = ticks;
12165afce282SDavid Greenman 	}
12175afce282SDavid Greenman }
12185afce282SDavid Greenman 
12192b14f991SJulian Elischer static void
12204f9fb771SBruce Evans vm_daemon()
12210d94caffSDavid Greenman {
12222fe6e4d7SDavid Greenman 	vm_object_t object;
12232fe6e4d7SDavid Greenman 	struct proc *p;
12240d94caffSDavid Greenman 
12252fe6e4d7SDavid Greenman 	while (TRUE) {
122624a1cce3SDavid Greenman 		tsleep(&vm_daemon_needed, PUSER, "psleep", 0);
12274c1f8ee9SDavid Greenman 		if (vm_pageout_req_swapout) {
12286306c897SDavid Greenman 			swapout_procs();
12294c1f8ee9SDavid Greenman 			vm_pageout_req_swapout = 0;
12304c1f8ee9SDavid Greenman 		}
12312fe6e4d7SDavid Greenman 		/*
12320d94caffSDavid Greenman 		 * scan the processes for exceeding their rlimits or if
12330d94caffSDavid Greenman 		 * process is swapped out -- deactivate pages
12342fe6e4d7SDavid Greenman 		 */
12352fe6e4d7SDavid Greenman 
12361b67ec6dSJeffrey Hsu 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
12372fe6e4d7SDavid Greenman 			quad_t limit;
12382fe6e4d7SDavid Greenman 			vm_offset_t size;
12392fe6e4d7SDavid Greenman 
12402fe6e4d7SDavid Greenman 			/*
12412fe6e4d7SDavid Greenman 			 * if this is a system process or if we have already
12422fe6e4d7SDavid Greenman 			 * looked at this process, skip it.
12432fe6e4d7SDavid Greenman 			 */
12442fe6e4d7SDavid Greenman 			if (p->p_flag & (P_SYSTEM | P_WEXIT)) {
12452fe6e4d7SDavid Greenman 				continue;
12462fe6e4d7SDavid Greenman 			}
12472fe6e4d7SDavid Greenman 			/*
12482fe6e4d7SDavid Greenman 			 * if the process is in a non-running type state,
12492fe6e4d7SDavid Greenman 			 * don't touch it.
12502fe6e4d7SDavid Greenman 			 */
12512fe6e4d7SDavid Greenman 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
12522fe6e4d7SDavid Greenman 				continue;
12532fe6e4d7SDavid Greenman 			}
12542fe6e4d7SDavid Greenman 			/*
12552fe6e4d7SDavid Greenman 			 * get a limit
12562fe6e4d7SDavid Greenman 			 */
12572fe6e4d7SDavid Greenman 			limit = qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
12582fe6e4d7SDavid Greenman 			    p->p_rlimit[RLIMIT_RSS].rlim_max);
12592fe6e4d7SDavid Greenman 
12602fe6e4d7SDavid Greenman 			/*
12610d94caffSDavid Greenman 			 * let processes that are swapped out really be
12620d94caffSDavid Greenman 			 * swapped out set the limit to nothing (will force a
12630d94caffSDavid Greenman 			 * swap-out.)
12642fe6e4d7SDavid Greenman 			 */
12652fe6e4d7SDavid Greenman 			if ((p->p_flag & P_INMEM) == 0)
12660d94caffSDavid Greenman 				limit = 0;	/* XXX */
12672fe6e4d7SDavid Greenman 
1268a91c5a7eSJohn Dyson 			size = p->p_vmspace->vm_pmap.pm_stats.resident_count * PAGE_SIZE;
12692fe6e4d7SDavid Greenman 			if (limit >= 0 && size >= limit) {
12702fe6e4d7SDavid Greenman 				vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map,
127138efa82bSJohn Dyson 				    (vm_pindex_t)(limit >> PAGE_SHIFT) );
12722fe6e4d7SDavid Greenman 			}
12732fe6e4d7SDavid Greenman 		}
12742fe6e4d7SDavid Greenman 
12750d94caffSDavid Greenman 		/*
12760d94caffSDavid Greenman 		 * we remove cached objects that have no RSS...
12770d94caffSDavid Greenman 		 */
12780d94caffSDavid Greenman restart:
1279b18bfc3dSJohn Dyson 		object = TAILQ_FIRST(&vm_object_cached_list);
12802fe6e4d7SDavid Greenman 		while (object) {
12812fe6e4d7SDavid Greenman 			/*
12822fe6e4d7SDavid Greenman 			 * if there are no resident pages -- get rid of the object
12832fe6e4d7SDavid Greenman 			 */
12842fe6e4d7SDavid Greenman 			if (object->resident_page_count == 0) {
128524a1cce3SDavid Greenman 				vm_object_reference(object);
12862fe6e4d7SDavid Greenman 				pager_cache(object, FALSE);
12872fe6e4d7SDavid Greenman 				goto restart;
12882fe6e4d7SDavid Greenman 			}
1289b18bfc3dSJohn Dyson 			object = TAILQ_NEXT(object, cached_list);
12902fe6e4d7SDavid Greenman 		}
129124a1cce3SDavid Greenman 	}
12922fe6e4d7SDavid Greenman }
129338efa82bSJohn Dyson #endif
1294