xref: /freebsd/sys/vm/vm_pageout.c (revision faa5f8d8da352e425d6d4593165fcbd7a8657f52)
1df8bae1dSRodney W. Grimes /*
226f9a767SRodney W. Grimes  * Copyright (c) 1991 Regents of the University of California.
326f9a767SRodney W. Grimes  * All rights reserved.
426f9a767SRodney W. Grimes  * Copyright (c) 1994 John S. Dyson
526f9a767SRodney W. Grimes  * All rights reserved.
626f9a767SRodney W. Grimes  * Copyright (c) 1994 David Greenman
726f9a767SRodney W. Grimes  * All rights reserved.
8df8bae1dSRodney W. Grimes  *
9df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
10df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
11df8bae1dSRodney W. Grimes  *
12df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
13df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
14df8bae1dSRodney W. Grimes  * are met:
15df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
17df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
18df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
19df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
20df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
21df8bae1dSRodney W. Grimes  *    must display the following acknowledgement:
22df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
23df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
24df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
25df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
26df8bae1dSRodney W. Grimes  *    without specific prior written permission.
27df8bae1dSRodney W. Grimes  *
28df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
39df8bae1dSRodney W. Grimes  *
403c4dd356SDavid Greenman  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
41df8bae1dSRodney W. Grimes  *
42df8bae1dSRodney W. Grimes  *
43df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
44df8bae1dSRodney W. Grimes  * All rights reserved.
45df8bae1dSRodney W. Grimes  *
46df8bae1dSRodney W. Grimes  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
47df8bae1dSRodney W. Grimes  *
48df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
49df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
50df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
51df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
52df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
53df8bae1dSRodney W. Grimes  *
54df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57df8bae1dSRodney W. Grimes  *
58df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
59df8bae1dSRodney W. Grimes  *
60df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61df8bae1dSRodney W. Grimes  *  School of Computer Science
62df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
63df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
64df8bae1dSRodney W. Grimes  *
65df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
66df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
6726f9a767SRodney W. Grimes  *
68faa5f8d8SAndrzej Bialecki  * $Id: vm_pageout.c,v 1.126 1998/09/04 08:06:57 dfr Exp $
69df8bae1dSRodney W. Grimes  */
70df8bae1dSRodney W. Grimes 
71df8bae1dSRodney W. Grimes /*
72df8bae1dSRodney W. Grimes  *	The proverbial page-out daemon.
73df8bae1dSRodney W. Grimes  */
74df8bae1dSRodney W. Grimes 
75faa5f8d8SAndrzej Bialecki #include "opt_vm.h"
76df8bae1dSRodney W. Grimes #include <sys/param.h>
7726f9a767SRodney W. Grimes #include <sys/systm.h>
78b5e8ce9fSBruce Evans #include <sys/kernel.h>
7926f9a767SRodney W. Grimes #include <sys/proc.h>
8026f9a767SRodney W. Grimes #include <sys/resourcevar.h>
81d2fc5315SPoul-Henning Kamp #include <sys/signalvar.h>
82f6b04d2bSDavid Greenman #include <sys/vnode.h>
83efeaf95aSDavid Greenman #include <sys/vmmeter.h>
8438efa82bSJohn Dyson #include <sys/sysctl.h>
85df8bae1dSRodney W. Grimes 
86df8bae1dSRodney W. Grimes #include <vm/vm.h>
87efeaf95aSDavid Greenman #include <vm/vm_param.h>
88efeaf95aSDavid Greenman #include <vm/vm_prot.h>
89996c772fSJohn Dyson #include <sys/lock.h>
90efeaf95aSDavid Greenman #include <vm/vm_object.h>
91df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
92efeaf95aSDavid Greenman #include <vm/vm_map.h>
93df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h>
9424a1cce3SDavid Greenman #include <vm/vm_pager.h>
9505f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h>
96efeaf95aSDavid Greenman #include <vm/vm_extern.h>
97df8bae1dSRodney W. Grimes 
982b14f991SJulian Elischer /*
992b14f991SJulian Elischer  * System initialization
1002b14f991SJulian Elischer  */
1012b14f991SJulian Elischer 
1022b14f991SJulian Elischer /* the kernel process "vm_pageout"*/
1032b14f991SJulian Elischer static void vm_pageout __P((void));
1048f9110f6SJohn Dyson static int vm_pageout_clean __P((vm_page_t));
1053af76890SPoul-Henning Kamp static int vm_pageout_scan __P((void));
106f35329acSJohn Dyson static int vm_pageout_free_page_calc __P((vm_size_t count));
1072b14f991SJulian Elischer struct proc *pageproc;
1082b14f991SJulian Elischer 
1092b14f991SJulian Elischer static struct kproc_desc page_kp = {
1102b14f991SJulian Elischer 	"pagedaemon",
1112b14f991SJulian Elischer 	vm_pageout,
1122b14f991SJulian Elischer 	&pageproc
1132b14f991SJulian Elischer };
1144590fd3aSDavid Greenman SYSINIT_KT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp)
1152b14f991SJulian Elischer 
11638efa82bSJohn Dyson #if !defined(NO_SWAPPING)
1172b14f991SJulian Elischer /* the kernel process "vm_daemon"*/
1182b14f991SJulian Elischer static void vm_daemon __P((void));
119f708ef1bSPoul-Henning Kamp static struct	proc *vmproc;
1202b14f991SJulian Elischer 
1212b14f991SJulian Elischer static struct kproc_desc vm_kp = {
1222b14f991SJulian Elischer 	"vmdaemon",
1232b14f991SJulian Elischer 	vm_daemon,
1242b14f991SJulian Elischer 	&vmproc
1252b14f991SJulian Elischer };
1264590fd3aSDavid Greenman SYSINIT_KT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
12738efa82bSJohn Dyson #endif
1282b14f991SJulian Elischer 
1292b14f991SJulian Elischer 
1302d8acc0fSJohn Dyson int vm_pages_needed=0;		/* Event on which pageout daemon sleeps */
1312d8acc0fSJohn Dyson int vm_pageout_deficit=0;	/* Estimated number of pages deficit */
1322d8acc0fSJohn Dyson int vm_pageout_pages_needed=0;	/* flag saying that the pageout daemon needs pages */
13326f9a767SRodney W. Grimes 
13426f9a767SRodney W. Grimes extern int npendingio;
13538efa82bSJohn Dyson #if !defined(NO_SWAPPING)
136f708ef1bSPoul-Henning Kamp static int vm_pageout_req_swapout;	/* XXX */
137f708ef1bSPoul-Henning Kamp static int vm_daemon_needed;
13838efa82bSJohn Dyson #endif
13926f9a767SRodney W. Grimes extern int nswiodone;
1405663e6deSDavid Greenman extern int vm_swap_size;
141f6b04d2bSDavid Greenman extern int vfs_update_wakeup;
142303b270bSEivind Eklund static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
143303b270bSEivind Eklund static int vm_pageout_full_stats_interval = 0;
144303b270bSEivind Eklund static int vm_pageout_stats_free_max=0, vm_pageout_algorithm_lru=0;
145303b270bSEivind Eklund static int defer_swap_pageouts=0;
146303b270bSEivind Eklund static int disable_swap_pageouts=0;
14770111b90SJohn Dyson 
148303b270bSEivind Eklund static int max_page_launder=100;
14938efa82bSJohn Dyson #if defined(NO_SWAPPING)
150303b270bSEivind Eklund static int vm_swap_enabled=0;
151303b270bSEivind Eklund static int vm_swap_idle_enabled=0;
15238efa82bSJohn Dyson #else
153303b270bSEivind Eklund static int vm_swap_enabled=1;
154303b270bSEivind Eklund static int vm_swap_idle_enabled=0;
15538efa82bSJohn Dyson #endif
15638efa82bSJohn Dyson 
15738efa82bSJohn Dyson SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
15838efa82bSJohn Dyson 	CTLFLAG_RW, &vm_pageout_algorithm_lru, 0, "");
15938efa82bSJohn Dyson 
160dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
161dc2efb27SJohn Dyson 	CTLFLAG_RW, &vm_pageout_stats_max, 0, "");
162dc2efb27SJohn Dyson 
163dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
164dc2efb27SJohn Dyson 	CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "");
165dc2efb27SJohn Dyson 
166dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
167dc2efb27SJohn Dyson 	CTLFLAG_RW, &vm_pageout_stats_interval, 0, "");
168dc2efb27SJohn Dyson 
169dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
170dc2efb27SJohn Dyson 	CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "");
171dc2efb27SJohn Dyson 
17238efa82bSJohn Dyson #if defined(NO_SWAPPING)
173ceb0cf87SJohn Dyson SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
174ceb0cf87SJohn Dyson 	CTLFLAG_RD, &vm_swap_enabled, 0, "");
175ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
176ceb0cf87SJohn Dyson 	CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
17738efa82bSJohn Dyson #else
178ceb0cf87SJohn Dyson SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
179ceb0cf87SJohn Dyson 	CTLFLAG_RW, &vm_swap_enabled, 0, "");
180ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
181ceb0cf87SJohn Dyson 	CTLFLAG_RW, &vm_swap_idle_enabled, 0, "");
18238efa82bSJohn Dyson #endif
18326f9a767SRodney W. Grimes 
184ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
18512ac6a1dSJohn Dyson 	CTLFLAG_RW, &defer_swap_pageouts, 0, "");
18612ac6a1dSJohn Dyson 
187ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
18812ac6a1dSJohn Dyson 	CTLFLAG_RW, &disable_swap_pageouts, 0, "");
18912ac6a1dSJohn Dyson 
190ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, max_page_launder,
191ceb0cf87SJohn Dyson 	CTLFLAG_RW, &max_page_launder, 0, "");
19270111b90SJohn Dyson 
19326f9a767SRodney W. Grimes 
194ffc82b0aSJohn Dyson #define VM_PAGEOUT_PAGE_COUNT 16
195bbc0ec52SDavid Greenman int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
196df8bae1dSRodney W. Grimes 
197c3cb3e12SDavid Greenman int vm_page_max_wired;		/* XXX max # of wired pages system-wide */
198df8bae1dSRodney W. Grimes 
19938efa82bSJohn Dyson #if !defined(NO_SWAPPING)
20038efa82bSJohn Dyson typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int));
20138efa82bSJohn Dyson static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t));
202cd41fc12SDavid Greenman static freeer_fcn_t vm_pageout_object_deactivate_pages;
203cd41fc12SDavid Greenman static void vm_req_vmdaemon __P((void));
20438efa82bSJohn Dyson #endif
205dc2efb27SJohn Dyson static void vm_pageout_page_stats(void);
2065985940eSJohn Dyson void pmap_collect(void);
207cd41fc12SDavid Greenman 
20826f9a767SRodney W. Grimes /*
20926f9a767SRodney W. Grimes  * vm_pageout_clean:
21024a1cce3SDavid Greenman  *
2110d94caffSDavid Greenman  * Clean the page and remove it from the laundry.
21226f9a767SRodney W. Grimes  *
2130d94caffSDavid Greenman  * We set the busy bit to cause potential page faults on this page to
21426f9a767SRodney W. Grimes  * block.
21526f9a767SRodney W. Grimes  *
2160d94caffSDavid Greenman  * And we set pageout-in-progress to keep the object from disappearing
2170d94caffSDavid Greenman  * during pageout.  This guarantees that the page won't move from the
2180d94caffSDavid Greenman  * inactive queue.  (However, any other page on the inactive queue may
2190d94caffSDavid Greenman  * move!)
22026f9a767SRodney W. Grimes  */
2213af76890SPoul-Henning Kamp static int
2228f9110f6SJohn Dyson vm_pageout_clean(m)
22324a1cce3SDavid Greenman 	vm_page_t m;
22424a1cce3SDavid Greenman {
22526f9a767SRodney W. Grimes 	register vm_object_t object;
226f35329acSJohn Dyson 	vm_page_t mc[2*vm_pageout_page_count];
22724a1cce3SDavid Greenman 	int pageout_count;
22824a1cce3SDavid Greenman 	int i, forward_okay, backward_okay, page_base;
229a316d390SJohn Dyson 	vm_pindex_t pindex = m->pindex;
23026f9a767SRodney W. Grimes 
23126f9a767SRodney W. Grimes 	object = m->object;
23224a1cce3SDavid Greenman 
23326f9a767SRodney W. Grimes 	/*
23424a1cce3SDavid Greenman 	 * If not OBJT_SWAP, additional memory may be needed to do the pageout.
23524a1cce3SDavid Greenman 	 * Try to avoid the deadlock.
23626f9a767SRodney W. Grimes 	 */
2378f9110f6SJohn Dyson 	if ((object->type == OBJT_DEFAULT) &&
23824a1cce3SDavid Greenman 	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min))
23926f9a767SRodney W. Grimes 		return 0;
24026f9a767SRodney W. Grimes 
24124a1cce3SDavid Greenman 	/*
24224a1cce3SDavid Greenman 	 * Don't mess with the page if it's busy.
24324a1cce3SDavid Greenman 	 */
2448f9110f6SJohn Dyson 	if ((m->hold_count != 0) ||
2450d94caffSDavid Greenman 	    ((m->busy != 0) || (m->flags & PG_BUSY)))
2460d94caffSDavid Greenman 		return 0;
2470d94caffSDavid Greenman 
24824a1cce3SDavid Greenman 	/*
24924a1cce3SDavid Greenman 	 * Try collapsing before it's too late.
25024a1cce3SDavid Greenman 	 */
2518f9110f6SJohn Dyson 	if (object->backing_object) {
25226f9a767SRodney W. Grimes 		vm_object_collapse(object);
25326f9a767SRodney W. Grimes 	}
2543c018e72SJohn Dyson 
255f35329acSJohn Dyson 	mc[vm_pageout_page_count] = m;
25626f9a767SRodney W. Grimes 	pageout_count = 1;
257f35329acSJohn Dyson 	page_base = vm_pageout_page_count;
25824a1cce3SDavid Greenman 	forward_okay = TRUE;
259a316d390SJohn Dyson 	if (pindex != 0)
26024a1cce3SDavid Greenman 		backward_okay = TRUE;
26126f9a767SRodney W. Grimes 	else
26224a1cce3SDavid Greenman 		backward_okay = FALSE;
26324a1cce3SDavid Greenman 	/*
26424a1cce3SDavid Greenman 	 * Scan object for clusterable pages.
26524a1cce3SDavid Greenman 	 *
26624a1cce3SDavid Greenman 	 * We can cluster ONLY if: ->> the page is NOT
26724a1cce3SDavid Greenman 	 * clean, wired, busy, held, or mapped into a
26824a1cce3SDavid Greenman 	 * buffer, and one of the following:
26924a1cce3SDavid Greenman 	 * 1) The page is inactive, or a seldom used
27024a1cce3SDavid Greenman 	 *    active page.
27124a1cce3SDavid Greenman 	 * -or-
27224a1cce3SDavid Greenman 	 * 2) we force the issue.
27324a1cce3SDavid Greenman 	 */
27424a1cce3SDavid Greenman 	for (i = 1; (i < vm_pageout_page_count) && (forward_okay || backward_okay); i++) {
27524a1cce3SDavid Greenman 		vm_page_t p;
276f6b04d2bSDavid Greenman 
27724a1cce3SDavid Greenman 		/*
27824a1cce3SDavid Greenman 		 * See if forward page is clusterable.
27924a1cce3SDavid Greenman 		 */
28024a1cce3SDavid Greenman 		if (forward_okay) {
28124a1cce3SDavid Greenman 			/*
28224a1cce3SDavid Greenman 			 * Stop forward scan at end of object.
28324a1cce3SDavid Greenman 			 */
284a316d390SJohn Dyson 			if ((pindex + i) > object->size) {
28524a1cce3SDavid Greenman 				forward_okay = FALSE;
28624a1cce3SDavid Greenman 				goto do_backward;
287f6b04d2bSDavid Greenman 			}
288a316d390SJohn Dyson 			p = vm_page_lookup(object, pindex + i);
28924a1cce3SDavid Greenman 			if (p) {
2905070c7f8SJohn Dyson 				if (((p->queue - p->pc) == PQ_CACHE) ||
2915070c7f8SJohn Dyson 					(p->flags & PG_BUSY) || p->busy) {
29224a1cce3SDavid Greenman 					forward_okay = FALSE;
29324a1cce3SDavid Greenman 					goto do_backward;
294f6b04d2bSDavid Greenman 				}
29524a1cce3SDavid Greenman 				vm_page_test_dirty(p);
29624a1cce3SDavid Greenman 				if ((p->dirty & p->valid) != 0 &&
2978f9110f6SJohn Dyson 				    (p->queue == PQ_INACTIVE) &&
29824a1cce3SDavid Greenman 				    (p->wire_count == 0) &&
29924a1cce3SDavid Greenman 				    (p->hold_count == 0)) {
300f35329acSJohn Dyson 					mc[vm_pageout_page_count + i] = p;
30124a1cce3SDavid Greenman 					pageout_count++;
30224a1cce3SDavid Greenman 					if (pageout_count == vm_pageout_page_count)
30324a1cce3SDavid Greenman 						break;
30424a1cce3SDavid Greenman 				} else {
30524a1cce3SDavid Greenman 					forward_okay = FALSE;
306f6b04d2bSDavid Greenman 				}
30724a1cce3SDavid Greenman 			} else {
30824a1cce3SDavid Greenman 				forward_okay = FALSE;
30924a1cce3SDavid Greenman 			}
31024a1cce3SDavid Greenman 		}
31124a1cce3SDavid Greenman do_backward:
31224a1cce3SDavid Greenman 		/*
31324a1cce3SDavid Greenman 		 * See if backward page is clusterable.
31424a1cce3SDavid Greenman 		 */
31524a1cce3SDavid Greenman 		if (backward_okay) {
31624a1cce3SDavid Greenman 			/*
31724a1cce3SDavid Greenman 			 * Stop backward scan at beginning of object.
31824a1cce3SDavid Greenman 			 */
319a316d390SJohn Dyson 			if ((pindex - i) == 0) {
32024a1cce3SDavid Greenman 				backward_okay = FALSE;
32124a1cce3SDavid Greenman 			}
322a316d390SJohn Dyson 			p = vm_page_lookup(object, pindex - i);
32324a1cce3SDavid Greenman 			if (p) {
3245070c7f8SJohn Dyson 				if (((p->queue - p->pc) == PQ_CACHE) ||
3255070c7f8SJohn Dyson 					(p->flags & PG_BUSY) || p->busy) {
32624a1cce3SDavid Greenman 					backward_okay = FALSE;
32724a1cce3SDavid Greenman 					continue;
32824a1cce3SDavid Greenman 				}
32924a1cce3SDavid Greenman 				vm_page_test_dirty(p);
33024a1cce3SDavid Greenman 				if ((p->dirty & p->valid) != 0 &&
3318f9110f6SJohn Dyson 				    (p->queue == PQ_INACTIVE) &&
33224a1cce3SDavid Greenman 				    (p->wire_count == 0) &&
33324a1cce3SDavid Greenman 				    (p->hold_count == 0)) {
334f35329acSJohn Dyson 					mc[vm_pageout_page_count - i] = p;
33524a1cce3SDavid Greenman 					pageout_count++;
33624a1cce3SDavid Greenman 					page_base--;
33724a1cce3SDavid Greenman 					if (pageout_count == vm_pageout_page_count)
33824a1cce3SDavid Greenman 						break;
33924a1cce3SDavid Greenman 				} else {
34024a1cce3SDavid Greenman 					backward_okay = FALSE;
34124a1cce3SDavid Greenman 				}
34224a1cce3SDavid Greenman 			} else {
34324a1cce3SDavid Greenman 				backward_okay = FALSE;
34424a1cce3SDavid Greenman 			}
345f6b04d2bSDavid Greenman 		}
346f6b04d2bSDavid Greenman 	}
347f6b04d2bSDavid Greenman 
34867bf6868SJohn Dyson 	/*
34967bf6868SJohn Dyson 	 * we allow reads during pageouts...
35067bf6868SJohn Dyson 	 */
3518f9110f6SJohn Dyson 	return vm_pageout_flush(&mc[page_base], pageout_count, 0);
352aef922f5SJohn Dyson }
353aef922f5SJohn Dyson 
354aef922f5SJohn Dyson int
3558f9110f6SJohn Dyson vm_pageout_flush(mc, count, flags)
356aef922f5SJohn Dyson 	vm_page_t *mc;
357aef922f5SJohn Dyson 	int count;
3588f9110f6SJohn Dyson 	int flags;
359aef922f5SJohn Dyson {
360aef922f5SJohn Dyson 	register vm_object_t object;
361aef922f5SJohn Dyson 	int pageout_status[count];
36295461b45SJohn Dyson 	int numpagedout = 0;
363aef922f5SJohn Dyson 	int i;
364aef922f5SJohn Dyson 
3658f9110f6SJohn Dyson 	for (i = 0; i < count; i++) {
366e69763a3SDoug Rabson 		vm_page_io_start(mc[i]);
3678f9110f6SJohn Dyson 		vm_page_protect(mc[i], VM_PROT_READ);
3688f9110f6SJohn Dyson 	}
3698f9110f6SJohn Dyson 
370aef922f5SJohn Dyson 	object = mc[0]->object;
371d474eaaaSDoug Rabson 	vm_object_pip_add(object, count);
372aef922f5SJohn Dyson 
373aef922f5SJohn Dyson 	vm_pager_put_pages(object, mc, count,
3748f9110f6SJohn Dyson 	    (flags | ((object == kernel_object) ? OBJPC_SYNC : 0)),
37526f9a767SRodney W. Grimes 	    pageout_status);
37626f9a767SRodney W. Grimes 
377aef922f5SJohn Dyson 	for (i = 0; i < count; i++) {
378aef922f5SJohn Dyson 		vm_page_t mt = mc[i];
37924a1cce3SDavid Greenman 
38026f9a767SRodney W. Grimes 		switch (pageout_status[i]) {
38126f9a767SRodney W. Grimes 		case VM_PAGER_OK:
38295461b45SJohn Dyson 			numpagedout++;
38326f9a767SRodney W. Grimes 			break;
38426f9a767SRodney W. Grimes 		case VM_PAGER_PEND:
38595461b45SJohn Dyson 			numpagedout++;
38626f9a767SRodney W. Grimes 			break;
38726f9a767SRodney W. Grimes 		case VM_PAGER_BAD:
38826f9a767SRodney W. Grimes 			/*
3890d94caffSDavid Greenman 			 * Page outside of range of object. Right now we
3900d94caffSDavid Greenman 			 * essentially lose the changes by pretending it
3910d94caffSDavid Greenman 			 * worked.
39226f9a767SRodney W. Grimes 			 */
39367bf6868SJohn Dyson 			pmap_clear_modify(VM_PAGE_TO_PHYS(mt));
39424a1cce3SDavid Greenman 			mt->dirty = 0;
39526f9a767SRodney W. Grimes 			break;
39626f9a767SRodney W. Grimes 		case VM_PAGER_ERROR:
39726f9a767SRodney W. Grimes 		case VM_PAGER_FAIL:
39826f9a767SRodney W. Grimes 			/*
3990d94caffSDavid Greenman 			 * If page couldn't be paged out, then reactivate the
4000d94caffSDavid Greenman 			 * page so it doesn't clog the inactive list.  (We
4010d94caffSDavid Greenman 			 * will try paging out it again later).
40226f9a767SRodney W. Grimes 			 */
40324a1cce3SDavid Greenman 			vm_page_activate(mt);
40426f9a767SRodney W. Grimes 			break;
40526f9a767SRodney W. Grimes 		case VM_PAGER_AGAIN:
40626f9a767SRodney W. Grimes 			break;
40726f9a767SRodney W. Grimes 		}
40826f9a767SRodney W. Grimes 
40926f9a767SRodney W. Grimes 		/*
4100d94caffSDavid Greenman 		 * If the operation is still going, leave the page busy to
4110d94caffSDavid Greenman 		 * block all other accesses. Also, leave the paging in
4120d94caffSDavid Greenman 		 * progress indicator set so that we don't attempt an object
4130d94caffSDavid Greenman 		 * collapse.
41426f9a767SRodney W. Grimes 		 */
41526f9a767SRodney W. Grimes 		if (pageout_status[i] != VM_PAGER_PEND) {
416f919ebdeSDavid Greenman 			vm_object_pip_wakeup(object);
417e69763a3SDoug Rabson 			vm_page_io_finish(mt);
41826f9a767SRodney W. Grimes 		}
41926f9a767SRodney W. Grimes 	}
42095461b45SJohn Dyson 	return numpagedout;
42126f9a767SRodney W. Grimes }
42226f9a767SRodney W. Grimes 
42338efa82bSJohn Dyson #if !defined(NO_SWAPPING)
42426f9a767SRodney W. Grimes /*
42526f9a767SRodney W. Grimes  *	vm_pageout_object_deactivate_pages
42626f9a767SRodney W. Grimes  *
42726f9a767SRodney W. Grimes  *	deactivate enough pages to satisfy the inactive target
42826f9a767SRodney W. Grimes  *	requirements or if vm_page_proc_limit is set, then
42926f9a767SRodney W. Grimes  *	deactivate all of the pages in the object and its
43024a1cce3SDavid Greenman  *	backing_objects.
43126f9a767SRodney W. Grimes  *
43226f9a767SRodney W. Grimes  *	The object and map must be locked.
43326f9a767SRodney W. Grimes  */
43438efa82bSJohn Dyson static void
43538efa82bSJohn Dyson vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
43626f9a767SRodney W. Grimes 	vm_map_t map;
43726f9a767SRodney W. Grimes 	vm_object_t object;
43838efa82bSJohn Dyson 	vm_pindex_t desired;
4390d94caffSDavid Greenman 	int map_remove_only;
44026f9a767SRodney W. Grimes {
44126f9a767SRodney W. Grimes 	register vm_page_t p, next;
44226f9a767SRodney W. Grimes 	int rcount;
44338efa82bSJohn Dyson 	int remove_mode;
4441eeaa1e3SJohn Dyson 	int s;
44526f9a767SRodney W. Grimes 
44624a1cce3SDavid Greenman 	if (object->type == OBJT_DEVICE)
44738efa82bSJohn Dyson 		return;
4488f895206SDavid Greenman 
44938efa82bSJohn Dyson 	while (object) {
45038efa82bSJohn Dyson 		if (vm_map_pmap(map)->pm_stats.resident_count <= desired)
45138efa82bSJohn Dyson 			return;
45224a1cce3SDavid Greenman 		if (object->paging_in_progress)
45338efa82bSJohn Dyson 			return;
45426f9a767SRodney W. Grimes 
45538efa82bSJohn Dyson 		remove_mode = map_remove_only;
45638efa82bSJohn Dyson 		if (object->shadow_count > 1)
45738efa82bSJohn Dyson 			remove_mode = 1;
45826f9a767SRodney W. Grimes 	/*
45926f9a767SRodney W. Grimes 	 * scan the objects entire memory queue
46026f9a767SRodney W. Grimes 	 */
46126f9a767SRodney W. Grimes 		rcount = object->resident_page_count;
462b18bfc3dSJohn Dyson 		p = TAILQ_FIRST(&object->memq);
46326f9a767SRodney W. Grimes 		while (p && (rcount-- > 0)) {
4647e006499SJohn Dyson 			int actcount;
46538efa82bSJohn Dyson 			if (vm_map_pmap(map)->pm_stats.resident_count <= desired)
46638efa82bSJohn Dyson 				return;
467b18bfc3dSJohn Dyson 			next = TAILQ_NEXT(p, listq);
468a58d1fa1SDavid Greenman 			cnt.v_pdpages++;
4690d94caffSDavid Greenman 			if (p->wire_count != 0 ||
4700d94caffSDavid Greenman 			    p->hold_count != 0 ||
4710d94caffSDavid Greenman 			    p->busy != 0 ||
472bd7e5f99SJohn Dyson 			    (p->flags & PG_BUSY) ||
4730d94caffSDavid Greenman 			    !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) {
4740d94caffSDavid Greenman 				p = next;
4750d94caffSDavid Greenman 				continue;
4760d94caffSDavid Greenman 			}
477ef743ce6SJohn Dyson 
4787e006499SJohn Dyson 			actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p));
4797e006499SJohn Dyson 			if (actcount) {
480e69763a3SDoug Rabson 				vm_page_flag_set(p, PG_REFERENCED);
481c8c4b40cSJohn Dyson 			} else if (p->flags & PG_REFERENCED) {
4827e006499SJohn Dyson 				actcount = 1;
483ef743ce6SJohn Dyson 			}
484ef743ce6SJohn Dyson 
48538efa82bSJohn Dyson 			if ((p->queue != PQ_ACTIVE) &&
48638efa82bSJohn Dyson 				(p->flags & PG_REFERENCED)) {
487ef743ce6SJohn Dyson 				vm_page_activate(p);
4887e006499SJohn Dyson 				p->act_count += actcount;
489e69763a3SDoug Rabson 				vm_page_flag_clear(p, PG_REFERENCED);
490c8c4b40cSJohn Dyson 			} else if (p->queue == PQ_ACTIVE) {
491ef743ce6SJohn Dyson 				if ((p->flags & PG_REFERENCED) == 0) {
492c8c4b40cSJohn Dyson 					p->act_count -= min(p->act_count, ACT_DECLINE);
493c8c4b40cSJohn Dyson 					if (!remove_mode && (vm_pageout_algorithm_lru || (p->act_count == 0))) {
494b18bfc3dSJohn Dyson 						vm_page_protect(p, VM_PROT_NONE);
49526f9a767SRodney W. Grimes 						vm_page_deactivate(p);
49626f9a767SRodney W. Grimes 					} else {
497c8c4b40cSJohn Dyson 						s = splvm();
498c8c4b40cSJohn Dyson 						TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
499c8c4b40cSJohn Dyson 						TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
500c8c4b40cSJohn Dyson 						splx(s);
501c8c4b40cSJohn Dyson 					}
502c8c4b40cSJohn Dyson 				} else {
503eaf13dd7SJohn Dyson 					vm_page_activate(p);
504e69763a3SDoug Rabson 					vm_page_flag_clear(p, PG_REFERENCED);
50538efa82bSJohn Dyson 					if (p->act_count < (ACT_MAX - ACT_ADVANCE))
50638efa82bSJohn Dyson 						p->act_count += ACT_ADVANCE;
5071eeaa1e3SJohn Dyson 					s = splvm();
50826f9a767SRodney W. Grimes 					TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
50926f9a767SRodney W. Grimes 					TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
5101eeaa1e3SJohn Dyson 					splx(s);
51126f9a767SRodney W. Grimes 				}
512bd7e5f99SJohn Dyson 			} else if (p->queue == PQ_INACTIVE) {
513f919ebdeSDavid Greenman 				vm_page_protect(p, VM_PROT_NONE);
51426f9a767SRodney W. Grimes 			}
51526f9a767SRodney W. Grimes 			p = next;
51626f9a767SRodney W. Grimes 		}
51738efa82bSJohn Dyson 		object = object->backing_object;
51838efa82bSJohn Dyson 	}
51938efa82bSJohn Dyson 	return;
52026f9a767SRodney W. Grimes }
52126f9a767SRodney W. Grimes 
52226f9a767SRodney W. Grimes /*
52326f9a767SRodney W. Grimes  * deactivate some number of pages in a map, try to do it fairly, but
52426f9a767SRodney W. Grimes  * that is really hard to do.
52526f9a767SRodney W. Grimes  */
526cd41fc12SDavid Greenman static void
52738efa82bSJohn Dyson vm_pageout_map_deactivate_pages(map, desired)
52826f9a767SRodney W. Grimes 	vm_map_t map;
52938efa82bSJohn Dyson 	vm_pindex_t desired;
53026f9a767SRodney W. Grimes {
53126f9a767SRodney W. Grimes 	vm_map_entry_t tmpe;
53238efa82bSJohn Dyson 	vm_object_t obj, bigobj;
5330d94caffSDavid Greenman 
534996c772fSJohn Dyson 	if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) {
53526f9a767SRodney W. Grimes 		return;
53626f9a767SRodney W. Grimes 	}
53738efa82bSJohn Dyson 
53838efa82bSJohn Dyson 	bigobj = NULL;
53938efa82bSJohn Dyson 
54038efa82bSJohn Dyson 	/*
54138efa82bSJohn Dyson 	 * first, search out the biggest object, and try to free pages from
54238efa82bSJohn Dyson 	 * that.
54338efa82bSJohn Dyson 	 */
54426f9a767SRodney W. Grimes 	tmpe = map->header.next;
54538efa82bSJohn Dyson 	while (tmpe != &map->header) {
546afa07f7eSJohn Dyson 		if ((tmpe->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
54738efa82bSJohn Dyson 			obj = tmpe->object.vm_object;
54838efa82bSJohn Dyson 			if ((obj != NULL) && (obj->shadow_count <= 1) &&
54938efa82bSJohn Dyson 				((bigobj == NULL) ||
55038efa82bSJohn Dyson 				 (bigobj->resident_page_count < obj->resident_page_count))) {
55138efa82bSJohn Dyson 				bigobj = obj;
55238efa82bSJohn Dyson 			}
55338efa82bSJohn Dyson 		}
55438efa82bSJohn Dyson 		tmpe = tmpe->next;
55538efa82bSJohn Dyson 	}
55638efa82bSJohn Dyson 
55738efa82bSJohn Dyson 	if (bigobj)
55838efa82bSJohn Dyson 		vm_pageout_object_deactivate_pages(map, bigobj, desired, 0);
55938efa82bSJohn Dyson 
56038efa82bSJohn Dyson 	/*
56138efa82bSJohn Dyson 	 * Next, hunt around for other pages to deactivate.  We actually
56238efa82bSJohn Dyson 	 * do this search sort of wrong -- .text first is not the best idea.
56338efa82bSJohn Dyson 	 */
56438efa82bSJohn Dyson 	tmpe = map->header.next;
56538efa82bSJohn Dyson 	while (tmpe != &map->header) {
56638efa82bSJohn Dyson 		if (vm_map_pmap(map)->pm_stats.resident_count <= desired)
56738efa82bSJohn Dyson 			break;
568afa07f7eSJohn Dyson 		if ((tmpe->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
56938efa82bSJohn Dyson 			obj = tmpe->object.vm_object;
57001155bd7SDavid Greenman 			if (obj)
57138efa82bSJohn Dyson 				vm_pageout_object_deactivate_pages(map, obj, desired, 0);
57238efa82bSJohn Dyson 		}
57326f9a767SRodney W. Grimes 		tmpe = tmpe->next;
57426f9a767SRodney W. Grimes 	};
57538efa82bSJohn Dyson 
57638efa82bSJohn Dyson 	/*
57738efa82bSJohn Dyson 	 * Remove all mappings if a process is swapped out, this will free page
57838efa82bSJohn Dyson 	 * table pages.
57938efa82bSJohn Dyson 	 */
58038efa82bSJohn Dyson 	if (desired == 0)
58138efa82bSJohn Dyson 		pmap_remove(vm_map_pmap(map),
58238efa82bSJohn Dyson 			VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
58338efa82bSJohn Dyson 	vm_map_unlock(map);
58426f9a767SRodney W. Grimes 	return;
58526f9a767SRodney W. Grimes }
58638efa82bSJohn Dyson #endif
587df8bae1dSRodney W. Grimes 
588925a3a41SJohn Dyson void
589925a3a41SJohn Dyson vm_pageout_page_free(vm_page_t m) {
59047221757SJohn Dyson 	struct vnode *vp;
59147221757SJohn Dyson 	vm_object_t object;
592925a3a41SJohn Dyson 
59347221757SJohn Dyson 	object = m->object;
59447221757SJohn Dyson 	object->ref_count++;
595eaf13dd7SJohn Dyson 
59647221757SJohn Dyson 	if (object->type == OBJT_VNODE) {
59747221757SJohn Dyson 		vp = object->handle;
59847221757SJohn Dyson 		vp->v_usecount++;
59947221757SJohn Dyson 		if (VSHOULDBUSY(vp))
60047221757SJohn Dyson 			vbusy(vp);
601925a3a41SJohn Dyson 	}
602eaf13dd7SJohn Dyson 
603e69763a3SDoug Rabson 	vm_page_busy(m);
604925a3a41SJohn Dyson 	vm_page_protect(m, VM_PROT_NONE);
605925a3a41SJohn Dyson 	vm_page_free(m);
60647221757SJohn Dyson 	vm_object_deallocate(object);
607925a3a41SJohn Dyson }
608925a3a41SJohn Dyson 
609df8bae1dSRodney W. Grimes /*
610df8bae1dSRodney W. Grimes  *	vm_pageout_scan does the dirty work for the pageout daemon.
611df8bae1dSRodney W. Grimes  */
6123af76890SPoul-Henning Kamp static int
613df8bae1dSRodney W. Grimes vm_pageout_scan()
614df8bae1dSRodney W. Grimes {
615502ba6e4SJohn Dyson 	vm_page_t m, next;
61670111b90SJohn Dyson 	int page_shortage, addl_page_shortage, maxscan, pcount;
61770111b90SJohn Dyson 	int maxlaunder;
6184e39a515SPoul-Henning Kamp 	int pages_freed;
6195663e6deSDavid Greenman 	struct proc *p, *bigproc;
6205663e6deSDavid Greenman 	vm_offset_t size, bigsize;
621df8bae1dSRodney W. Grimes 	vm_object_t object;
62226f9a767SRodney W. Grimes 	int force_wakeup = 0;
6237e006499SJohn Dyson 	int actcount;
624f6b04d2bSDavid Greenman 	int vnodes_skipped = 0;
6251eeaa1e3SJohn Dyson 	int s;
6260d94caffSDavid Greenman 
627df8bae1dSRodney W. Grimes 	/*
6285985940eSJohn Dyson 	 * Do whatever cleanup that the pmap code can.
6295985940eSJohn Dyson 	 */
6305985940eSJohn Dyson 	pmap_collect();
6315985940eSJohn Dyson 
6325985940eSJohn Dyson 	/*
6330d94caffSDavid Greenman 	 * Start scanning the inactive queue for pages we can free. We keep
6340d94caffSDavid Greenman 	 * scanning until we have enough free pages or we have scanned through
6350d94caffSDavid Greenman 	 * the entire queue.  If we encounter dirty pages, we start cleaning
6360d94caffSDavid Greenman 	 * them.
637df8bae1dSRodney W. Grimes 	 */
638df8bae1dSRodney W. Grimes 
639b182ec9eSJohn Dyson 	pages_freed = 0;
6402d8acc0fSJohn Dyson 	addl_page_shortage = vm_pageout_deficit;
64195461b45SJohn Dyson 	vm_pageout_deficit = 0;
642b182ec9eSJohn Dyson 
643ceb0cf87SJohn Dyson 	if (max_page_launder == 0)
644ceb0cf87SJohn Dyson 		max_page_launder = 1;
645ceb0cf87SJohn Dyson 	maxlaunder = (cnt.v_inactive_target > max_page_launder) ?
646ceb0cf87SJohn Dyson 	    max_page_launder : cnt.v_inactive_target;
64770111b90SJohn Dyson 
64867bf6868SJohn Dyson rescan0:
649f6b04d2bSDavid Greenman 	maxscan = cnt.v_inactive_count;
650b182ec9eSJohn Dyson 	for( m = TAILQ_FIRST(&vm_page_queue_inactive);
651b182ec9eSJohn Dyson 
652b182ec9eSJohn Dyson 		(m != NULL) && (maxscan-- > 0) &&
653b18bfc3dSJohn Dyson 			((cnt.v_cache_count + cnt.v_free_count) <
654b182ec9eSJohn Dyson 			(cnt.v_cache_min + cnt.v_free_target));
655b182ec9eSJohn Dyson 
656b182ec9eSJohn Dyson 		m = next) {
657df8bae1dSRodney W. Grimes 
658a58d1fa1SDavid Greenman 		cnt.v_pdpages++;
659b182ec9eSJohn Dyson 
660f35329acSJohn Dyson 		if (m->queue != PQ_INACTIVE) {
66167bf6868SJohn Dyson 			goto rescan0;
662f35329acSJohn Dyson 		}
663b182ec9eSJohn Dyson 
664b18bfc3dSJohn Dyson 		next = TAILQ_NEXT(m, pageq);
665df8bae1dSRodney W. Grimes 
666b182ec9eSJohn Dyson 		if (m->hold_count) {
667f35329acSJohn Dyson 			s = splvm();
668b182ec9eSJohn Dyson 			TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
669b182ec9eSJohn Dyson 			TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
670f35329acSJohn Dyson 			splx(s);
671b182ec9eSJohn Dyson 			addl_page_shortage++;
672b182ec9eSJohn Dyson 			continue;
673df8bae1dSRodney W. Grimes 		}
67426f9a767SRodney W. Grimes 		/*
675b18bfc3dSJohn Dyson 		 * Dont mess with busy pages, keep in the front of the
676b18bfc3dSJohn Dyson 		 * queue, most likely are being paged out.
67726f9a767SRodney W. Grimes 		 */
678bd7e5f99SJohn Dyson 		if (m->busy || (m->flags & PG_BUSY)) {
679b182ec9eSJohn Dyson 			addl_page_shortage++;
68026f9a767SRodney W. Grimes 			continue;
68126f9a767SRodney W. Grimes 		}
682bd7e5f99SJohn Dyson 
6837e006499SJohn Dyson 		/*
6847e006499SJohn Dyson 		 * If the object is not being used, we ignore previous references.
6857e006499SJohn Dyson 		 */
6860d94caffSDavid Greenman 		if (m->object->ref_count == 0) {
687e69763a3SDoug Rabson 			vm_page_flag_clear(m, PG_REFERENCED);
68867bf6868SJohn Dyson 			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
6897e006499SJohn Dyson 
6907e006499SJohn Dyson 		/*
6917e006499SJohn Dyson 		 * Otherwise, if the page has been referenced while in the inactive
6927e006499SJohn Dyson 		 * queue, we bump the "activation count" upwards, making it less
6937e006499SJohn Dyson 		 * likely that the page will be added back to the inactive queue
6947e006499SJohn Dyson 		 * prematurely again.  Here we check the page tables (or emulated
6957e006499SJohn Dyson 		 * bits, if any), given the upper level VM system not knowing anything
6967e006499SJohn Dyson 		 * about existing references.
6977e006499SJohn Dyson 		 */
698ef743ce6SJohn Dyson 		} else if (((m->flags & PG_REFERENCED) == 0) &&
6997e006499SJohn Dyson 			(actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)))) {
700ef743ce6SJohn Dyson 			vm_page_activate(m);
7017e006499SJohn Dyson 			m->act_count += (actcount + ACT_ADVANCE);
702ef743ce6SJohn Dyson 			continue;
7032fe6e4d7SDavid Greenman 		}
704ef743ce6SJohn Dyson 
7057e006499SJohn Dyson 		/*
7067e006499SJohn Dyson 		 * If the upper level VM system knows about any page references,
7077e006499SJohn Dyson 		 * we activate the page.  We also set the "activation count" higher
7087e006499SJohn Dyson 		 * than normal so that we will less likely place pages back onto the
7097e006499SJohn Dyson 		 * inactive queue again.
7107e006499SJohn Dyson 		 */
711bd7e5f99SJohn Dyson 		if ((m->flags & PG_REFERENCED) != 0) {
712e69763a3SDoug Rabson 			vm_page_flag_clear(m, PG_REFERENCED);
7137e006499SJohn Dyson 			actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
71426f9a767SRodney W. Grimes 			vm_page_activate(m);
7157e006499SJohn Dyson 			m->act_count += (actcount + ACT_ADVANCE + 1);
7160d94caffSDavid Greenman 			continue;
7170d94caffSDavid Greenman 		}
71867bf6868SJohn Dyson 
7197e006499SJohn Dyson 		/*
7207e006499SJohn Dyson 		 * If the upper level VM system doesn't know anything about the
7217e006499SJohn Dyson 		 * page being dirty, we have to check for it again.  As far as the
7227e006499SJohn Dyson 		 * VM code knows, any partially dirty pages are fully dirty.
7237e006499SJohn Dyson 		 */
724f6b04d2bSDavid Greenman 		if (m->dirty == 0) {
725bd7e5f99SJohn Dyson 			vm_page_test_dirty(m);
726427e99a0SAlexander Langer 		} else {
727bd7e5f99SJohn Dyson 			m->dirty = VM_PAGE_BITS_ALL;
72830dcfc09SJohn Dyson 		}
729ef743ce6SJohn Dyson 
7307e006499SJohn Dyson 		/*
7317e006499SJohn Dyson 		 * Invalid pages can be easily freed
7327e006499SJohn Dyson 		 */
7336d40c3d3SDavid Greenman 		if (m->valid == 0) {
734925a3a41SJohn Dyson 			vm_pageout_page_free(m);
73567bf6868SJohn Dyson 			cnt.v_dfree++;
736925a3a41SJohn Dyson 			pages_freed++;
7377e006499SJohn Dyson 
7387e006499SJohn Dyson 		/*
7397e006499SJohn Dyson 		 * Clean pages can be placed onto the cache queue.
7407e006499SJohn Dyson 		 */
741bd7e5f99SJohn Dyson 		} else if (m->dirty == 0) {
742bd7e5f99SJohn Dyson 			vm_page_cache(m);
743925a3a41SJohn Dyson 			pages_freed++;
7447e006499SJohn Dyson 
7457e006499SJohn Dyson 		/*
7467e006499SJohn Dyson 		 * Dirty pages need to be paged out.  Note that we clean
7477e006499SJohn Dyson 		 * only a limited number of pages per pagedaemon pass.
7487e006499SJohn Dyson 		 */
7490d94caffSDavid Greenman 		} else if (maxlaunder > 0) {
7500d94caffSDavid Greenman 			int written;
75112ac6a1dSJohn Dyson 			int swap_pageouts_ok;
752f6b04d2bSDavid Greenman 			struct vnode *vp = NULL;
7530d94caffSDavid Greenman 
7540d94caffSDavid Greenman 			object = m->object;
7557e006499SJohn Dyson 
75612ac6a1dSJohn Dyson 			if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) {
75712ac6a1dSJohn Dyson 				swap_pageouts_ok = 1;
75812ac6a1dSJohn Dyson 			} else {
75912ac6a1dSJohn Dyson 				swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
76012ac6a1dSJohn Dyson 				swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
76112ac6a1dSJohn Dyson 					(cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min);
76212ac6a1dSJohn Dyson 
76312ac6a1dSJohn Dyson 			}
76470111b90SJohn Dyson 
76570111b90SJohn Dyson 			/*
76670111b90SJohn Dyson 			 * We don't bother paging objects that are "dead".  Those
76770111b90SJohn Dyson 			 * objects are in a "rundown" state.
76870111b90SJohn Dyson 			 */
76970111b90SJohn Dyson 			if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
77012ac6a1dSJohn Dyson 				s = splvm();
77112ac6a1dSJohn Dyson 				TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
77212ac6a1dSJohn Dyson 				TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
77312ac6a1dSJohn Dyson 				splx(s);
77412ac6a1dSJohn Dyson 				continue;
77512ac6a1dSJohn Dyson 			}
77612ac6a1dSJohn Dyson 
777eaf13dd7SJohn Dyson 			if ((object->type == OBJT_VNODE) &&
778eaf13dd7SJohn Dyson 				(object->flags & OBJ_DEAD) == 0) {
77924a1cce3SDavid Greenman 				vp = object->handle;
780996c772fSJohn Dyson 				if (VOP_ISLOCKED(vp) ||
78147221757SJohn Dyson 				    vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curproc)) {
782b182ec9eSJohn Dyson 					if ((m->queue == PQ_INACTIVE) &&
783b182ec9eSJohn Dyson 						(m->hold_count == 0) &&
784b182ec9eSJohn Dyson 						(m->busy == 0) &&
785b182ec9eSJohn Dyson 						(m->flags & PG_BUSY) == 0) {
786f35329acSJohn Dyson 						s = splvm();
78785a376ebSJohn Dyson 						TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
78885a376ebSJohn Dyson 						TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
789f35329acSJohn Dyson 						splx(s);
79085a376ebSJohn Dyson 					}
791aef922f5SJohn Dyson 					if (object->flags & OBJ_MIGHTBEDIRTY)
792925a3a41SJohn Dyson 						vnodes_skipped++;
793b182ec9eSJohn Dyson 					continue;
79485a376ebSJohn Dyson 				}
795b182ec9eSJohn Dyson 
796f35329acSJohn Dyson 				/*
797f35329acSJohn Dyson 				 * The page might have been moved to another queue
798f35329acSJohn Dyson 				 * during potential blocking in vget() above.
799f35329acSJohn Dyson 				 */
800b182ec9eSJohn Dyson 				if (m->queue != PQ_INACTIVE) {
801b182ec9eSJohn Dyson 					if (object->flags & OBJ_MIGHTBEDIRTY)
802925a3a41SJohn Dyson 						vnodes_skipped++;
803b182ec9eSJohn Dyson 					vput(vp);
804b182ec9eSJohn Dyson 					continue;
805b182ec9eSJohn Dyson 				}
806b182ec9eSJohn Dyson 
807f35329acSJohn Dyson 				/*
808f35329acSJohn Dyson 				 * The page may have been busied during the blocking in
809f35329acSJohn Dyson 				 * vput();  We don't move the page back onto the end of
810f35329acSJohn Dyson 				 * the queue so that statistics are more correct if we don't.
811f35329acSJohn Dyson 				 */
812b182ec9eSJohn Dyson 				if (m->busy || (m->flags & PG_BUSY)) {
813b182ec9eSJohn Dyson 					vput(vp);
814b182ec9eSJohn Dyson 					continue;
815b182ec9eSJohn Dyson 				}
816b182ec9eSJohn Dyson 
817f35329acSJohn Dyson 				/*
818f35329acSJohn Dyson 				 * If the page has become held, then skip it
819f35329acSJohn Dyson 				 */
820b182ec9eSJohn Dyson 				if (m->hold_count) {
821f35329acSJohn Dyson 					s = splvm();
822b182ec9eSJohn Dyson 					TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
823b182ec9eSJohn Dyson 					TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
824f35329acSJohn Dyson 					splx(s);
825b182ec9eSJohn Dyson 					if (object->flags & OBJ_MIGHTBEDIRTY)
826925a3a41SJohn Dyson 						vnodes_skipped++;
827b182ec9eSJohn Dyson 					vput(vp);
828f6b04d2bSDavid Greenman 					continue;
829f6b04d2bSDavid Greenman 				}
830f6b04d2bSDavid Greenman 			}
831f6b04d2bSDavid Greenman 
8320d94caffSDavid Greenman 			/*
8330d94caffSDavid Greenman 			 * If a page is dirty, then it is either being washed
8340d94caffSDavid Greenman 			 * (but not yet cleaned) or it is still in the
8350d94caffSDavid Greenman 			 * laundry.  If it is still in the laundry, then we
8360d94caffSDavid Greenman 			 * start the cleaning operation.
8370d94caffSDavid Greenman 			 */
8388f9110f6SJohn Dyson 			written = vm_pageout_clean(m);
839f6b04d2bSDavid Greenman 			if (vp)
840f6b04d2bSDavid Greenman 				vput(vp);
841f6b04d2bSDavid Greenman 
8420d94caffSDavid Greenman 			maxlaunder -= written;
8430d94caffSDavid Greenman 		}
844df8bae1dSRodney W. Grimes 	}
84526f9a767SRodney W. Grimes 
846df8bae1dSRodney W. Grimes 	/*
8470d94caffSDavid Greenman 	 * Compute the page shortage.  If we are still very low on memory be
8480d94caffSDavid Greenman 	 * sure that we will move a minimal amount of pages from active to
8490d94caffSDavid Greenman 	 * inactive.
850df8bae1dSRodney W. Grimes 	 */
851b182ec9eSJohn Dyson 	page_shortage = (cnt.v_inactive_target + cnt.v_cache_min) -
8520d94caffSDavid Greenman 	    (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
853b182ec9eSJohn Dyson 	page_shortage += addl_page_shortage;
854bef608bdSJohn Dyson 	if (page_shortage <= 0) {
855bef608bdSJohn Dyson 		page_shortage = 0;
856b182ec9eSJohn Dyson 	}
85726f9a767SRodney W. Grimes 
858b18bfc3dSJohn Dyson 	pcount = cnt.v_active_count;
859b18bfc3dSJohn Dyson 	m = TAILQ_FIRST(&vm_page_queue_active);
860b18bfc3dSJohn Dyson 	while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
861f35329acSJohn Dyson 
8627e006499SJohn Dyson 		/*
8637e006499SJohn Dyson 		 * This is a consistancy check, and should likely be a panic
8647e006499SJohn Dyson 		 * or warning.
8657e006499SJohn Dyson 		 */
866f35329acSJohn Dyson 		if (m->queue != PQ_ACTIVE) {
86738efa82bSJohn Dyson 			break;
868f35329acSJohn Dyson 		}
869f35329acSJohn Dyson 
870b18bfc3dSJohn Dyson 		next = TAILQ_NEXT(m, pageq);
871df8bae1dSRodney W. Grimes 		/*
87226f9a767SRodney W. Grimes 		 * Don't deactivate pages that are busy.
873df8bae1dSRodney W. Grimes 		 */
874a647a309SDavid Greenman 		if ((m->busy != 0) ||
8750d94caffSDavid Greenman 		    (m->flags & PG_BUSY) ||
876f6b04d2bSDavid Greenman 		    (m->hold_count != 0)) {
877f35329acSJohn Dyson 			s = splvm();
8786d40c3d3SDavid Greenman 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
8796d40c3d3SDavid Greenman 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
880f35329acSJohn Dyson 			splx(s);
88126f9a767SRodney W. Grimes 			m = next;
88226f9a767SRodney W. Grimes 			continue;
883df8bae1dSRodney W. Grimes 		}
884b18bfc3dSJohn Dyson 
885b18bfc3dSJohn Dyson 		/*
886b18bfc3dSJohn Dyson 		 * The count for pagedaemon pages is done after checking the
887b18bfc3dSJohn Dyson 		 * page for eligbility...
888b18bfc3dSJohn Dyson 		 */
889b18bfc3dSJohn Dyson 		cnt.v_pdpages++;
890ef743ce6SJohn Dyson 
8917e006499SJohn Dyson 		/*
8927e006499SJohn Dyson 		 * Check to see "how much" the page has been used.
8937e006499SJohn Dyson 		 */
8947e006499SJohn Dyson 		actcount = 0;
895ef743ce6SJohn Dyson 		if (m->object->ref_count != 0) {
896ef743ce6SJohn Dyson 			if (m->flags & PG_REFERENCED) {
8977e006499SJohn Dyson 				actcount += 1;
8980d94caffSDavid Greenman 			}
8997e006499SJohn Dyson 			actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
9007e006499SJohn Dyson 			if (actcount) {
9017e006499SJohn Dyson 				m->act_count += ACT_ADVANCE + actcount;
90238efa82bSJohn Dyson 				if (m->act_count > ACT_MAX)
90338efa82bSJohn Dyson 					m->act_count = ACT_MAX;
90438efa82bSJohn Dyson 			}
905b18bfc3dSJohn Dyson 		}
906ef743ce6SJohn Dyson 
9077e006499SJohn Dyson 		/*
9087e006499SJohn Dyson 		 * Since we have "tested" this bit, we need to clear it now.
9097e006499SJohn Dyson 		 */
910e69763a3SDoug Rabson 		vm_page_flag_clear(m, PG_REFERENCED);
911ef743ce6SJohn Dyson 
9127e006499SJohn Dyson 		/*
9137e006499SJohn Dyson 		 * Only if an object is currently being used, do we use the
9147e006499SJohn Dyson 		 * page activation count stats.
9157e006499SJohn Dyson 		 */
9167e006499SJohn Dyson 		if (actcount && (m->object->ref_count != 0)) {
917f35329acSJohn Dyson 			s = splvm();
91826f9a767SRodney W. Grimes 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
91926f9a767SRodney W. Grimes 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
920f35329acSJohn Dyson 			splx(s);
92126f9a767SRodney W. Grimes 		} else {
92238efa82bSJohn Dyson 			m->act_count -= min(m->act_count, ACT_DECLINE);
92338efa82bSJohn Dyson 			if (vm_pageout_algorithm_lru ||
92438efa82bSJohn Dyson 				(m->object->ref_count == 0) || (m->act_count == 0)) {
925925a3a41SJohn Dyson 				page_shortage--;
926d4a272dbSJohn Dyson 				if (m->object->ref_count == 0) {
927ef743ce6SJohn Dyson 					vm_page_protect(m, VM_PROT_NONE);
928d4a272dbSJohn Dyson 					if (m->dirty == 0)
9290d94caffSDavid Greenman 						vm_page_cache(m);
930d4a272dbSJohn Dyson 					else
931d4a272dbSJohn Dyson 						vm_page_deactivate(m);
9320d94caffSDavid Greenman 				} else {
93326f9a767SRodney W. Grimes 					vm_page_deactivate(m);
934df8bae1dSRodney W. Grimes 				}
93538efa82bSJohn Dyson 			} else {
93638efa82bSJohn Dyson 				s = splvm();
93738efa82bSJohn Dyson 				TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
93838efa82bSJohn Dyson 				TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
93938efa82bSJohn Dyson 				splx(s);
94038efa82bSJohn Dyson 			}
941df8bae1dSRodney W. Grimes 		}
94226f9a767SRodney W. Grimes 		m = next;
94326f9a767SRodney W. Grimes 	}
944df8bae1dSRodney W. Grimes 
945f35329acSJohn Dyson 	s = splvm();
946df8bae1dSRodney W. Grimes 	/*
9470d94caffSDavid Greenman 	 * We try to maintain some *really* free pages, this allows interrupt
9480d94caffSDavid Greenman 	 * code to be guaranteed space.
949df8bae1dSRodney W. Grimes 	 */
950a1f6d91cSDavid Greenman 	while (cnt.v_free_count < cnt.v_free_reserved) {
9515070c7f8SJohn Dyson 		static int cache_rover = 0;
9525070c7f8SJohn Dyson 		m = vm_page_list_find(PQ_CACHE, cache_rover);
9530d94caffSDavid Greenman 		if (!m)
9540d94caffSDavid Greenman 			break;
9555070c7f8SJohn Dyson 		cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK;
956925a3a41SJohn Dyson 		vm_pageout_page_free(m);
9570bb3a0d2SDavid Greenman 		cnt.v_dfree++;
95826f9a767SRodney W. Grimes 	}
959f35329acSJohn Dyson 	splx(s);
9605663e6deSDavid Greenman 
961ceb0cf87SJohn Dyson #if !defined(NO_SWAPPING)
962ceb0cf87SJohn Dyson 	/*
963ceb0cf87SJohn Dyson 	 * Idle process swapout -- run once per second.
964ceb0cf87SJohn Dyson 	 */
965ceb0cf87SJohn Dyson 	if (vm_swap_idle_enabled) {
966ceb0cf87SJohn Dyson 		static long lsec;
967227ee8a1SPoul-Henning Kamp 		if (time_second != lsec) {
968ceb0cf87SJohn Dyson 			vm_pageout_req_swapout |= VM_SWAP_IDLE;
969ceb0cf87SJohn Dyson 			vm_req_vmdaemon();
970227ee8a1SPoul-Henning Kamp 			lsec = time_second;
971ceb0cf87SJohn Dyson 		}
972ceb0cf87SJohn Dyson 	}
973ceb0cf87SJohn Dyson #endif
974ceb0cf87SJohn Dyson 
9755663e6deSDavid Greenman 	/*
976f6b04d2bSDavid Greenman 	 * If we didn't get enough free pages, and we have skipped a vnode
9774c1f8ee9SDavid Greenman 	 * in a writeable object, wakeup the sync daemon.  And kick swapout
9784c1f8ee9SDavid Greenman 	 * if we did not get enough free pages.
979f6b04d2bSDavid Greenman 	 */
980bd7e5f99SJohn Dyson 	if ((cnt.v_cache_count + cnt.v_free_count) <
981bd7e5f99SJohn Dyson 		(cnt.v_free_target + cnt.v_cache_min) ) {
982f6b04d2bSDavid Greenman 		if (vnodes_skipped &&
983f6b04d2bSDavid Greenman 		    (cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_min) {
984f6b04d2bSDavid Greenman 			if (!vfs_update_wakeup) {
985f6b04d2bSDavid Greenman 				vfs_update_wakeup = 1;
98624a1cce3SDavid Greenman 				wakeup(&vfs_update_wakeup);
987f6b04d2bSDavid Greenman 			}
988f6b04d2bSDavid Greenman 		}
98938efa82bSJohn Dyson #if !defined(NO_SWAPPING)
990ceb0cf87SJohn Dyson 		if (vm_swap_enabled &&
99138efa82bSJohn Dyson 			(cnt.v_free_count + cnt.v_cache_count < cnt.v_free_target)) {
9924c1f8ee9SDavid Greenman 			vm_req_vmdaemon();
993ceb0cf87SJohn Dyson 			vm_pageout_req_swapout |= VM_SWAP_NORMAL;
9944c1f8ee9SDavid Greenman 		}
9955afce282SDavid Greenman #endif
9964c1f8ee9SDavid Greenman 	}
9974c1f8ee9SDavid Greenman 
998f6b04d2bSDavid Greenman 
999f6b04d2bSDavid Greenman 	/*
10000d94caffSDavid Greenman 	 * make sure that we have swap space -- if we are low on memory and
10010d94caffSDavid Greenman 	 * swap -- then kill the biggest process.
10025663e6deSDavid Greenman 	 */
10035663e6deSDavid Greenman 	if ((vm_swap_size == 0 || swap_pager_full) &&
10040d94caffSDavid Greenman 	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) {
10055663e6deSDavid Greenman 		bigproc = NULL;
10065663e6deSDavid Greenman 		bigsize = 0;
10071b67ec6dSJeffrey Hsu 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
10085663e6deSDavid Greenman 			/*
10095663e6deSDavid Greenman 			 * if this is a system process, skip it
10105663e6deSDavid Greenman 			 */
101179221631SDavid Greenman 			if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) ||
101279221631SDavid Greenman 			    ((p->p_pid < 48) && (vm_swap_size != 0))) {
10135663e6deSDavid Greenman 				continue;
10145663e6deSDavid Greenman 			}
10155663e6deSDavid Greenman 			/*
10165663e6deSDavid Greenman 			 * if the process is in a non-running type state,
10175663e6deSDavid Greenman 			 * don't touch it.
10185663e6deSDavid Greenman 			 */
10195663e6deSDavid Greenman 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
10205663e6deSDavid Greenman 				continue;
10215663e6deSDavid Greenman 			}
10225663e6deSDavid Greenman 			/*
10235663e6deSDavid Greenman 			 * get the process size
10245663e6deSDavid Greenman 			 */
10255663e6deSDavid Greenman 			size = p->p_vmspace->vm_pmap.pm_stats.resident_count;
10265663e6deSDavid Greenman 			/*
10275663e6deSDavid Greenman 			 * if the this process is bigger than the biggest one
10285663e6deSDavid Greenman 			 * remember it.
10295663e6deSDavid Greenman 			 */
10305663e6deSDavid Greenman 			if (size > bigsize) {
10315663e6deSDavid Greenman 				bigproc = p;
10325663e6deSDavid Greenman 				bigsize = size;
10335663e6deSDavid Greenman 			}
10345663e6deSDavid Greenman 		}
10355663e6deSDavid Greenman 		if (bigproc != NULL) {
1036729b1e51SDavid Greenman 			killproc(bigproc, "out of swap space");
10375663e6deSDavid Greenman 			bigproc->p_estcpu = 0;
10385663e6deSDavid Greenman 			bigproc->p_nice = PRIO_MIN;
10395663e6deSDavid Greenman 			resetpriority(bigproc);
104024a1cce3SDavid Greenman 			wakeup(&cnt.v_free_count);
10415663e6deSDavid Greenman 		}
10425663e6deSDavid Greenman 	}
104326f9a767SRodney W. Grimes 	return force_wakeup;
104426f9a767SRodney W. Grimes }
104526f9a767SRodney W. Grimes 
1046dc2efb27SJohn Dyson /*
1047dc2efb27SJohn Dyson  * This routine tries to maintain the pseudo LRU active queue,
1048dc2efb27SJohn Dyson  * so that during long periods of time where there is no paging,
1049dc2efb27SJohn Dyson  * that some statistic accumlation still occurs.  This code
1050dc2efb27SJohn Dyson  * helps the situation where paging just starts to occur.
1051dc2efb27SJohn Dyson  */
1052dc2efb27SJohn Dyson static void
1053dc2efb27SJohn Dyson vm_pageout_page_stats()
1054dc2efb27SJohn Dyson {
1055dc2efb27SJohn Dyson 	int s;
1056dc2efb27SJohn Dyson 	vm_page_t m,next;
1057dc2efb27SJohn Dyson 	int pcount,tpcount;		/* Number of pages to check */
1058dc2efb27SJohn Dyson 	static int fullintervalcount = 0;
1059bef608bdSJohn Dyson 	int page_shortage;
1060bef608bdSJohn Dyson 
1061bef608bdSJohn Dyson 	page_shortage = (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) -
1062bef608bdSJohn Dyson 	    (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
1063bef608bdSJohn Dyson 	if (page_shortage <= 0)
1064bef608bdSJohn Dyson 		return;
1065dc2efb27SJohn Dyson 
1066dc2efb27SJohn Dyson 	pcount = cnt.v_active_count;
1067dc2efb27SJohn Dyson 	fullintervalcount += vm_pageout_stats_interval;
1068dc2efb27SJohn Dyson 	if (fullintervalcount < vm_pageout_full_stats_interval) {
1069dc2efb27SJohn Dyson 		tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count;
1070dc2efb27SJohn Dyson 		if (pcount > tpcount)
1071dc2efb27SJohn Dyson 			pcount = tpcount;
1072dc2efb27SJohn Dyson 	}
1073dc2efb27SJohn Dyson 
1074dc2efb27SJohn Dyson 	m = TAILQ_FIRST(&vm_page_queue_active);
1075dc2efb27SJohn Dyson 	while ((m != NULL) && (pcount-- > 0)) {
10767e006499SJohn Dyson 		int actcount;
1077dc2efb27SJohn Dyson 
1078dc2efb27SJohn Dyson 		if (m->queue != PQ_ACTIVE) {
1079dc2efb27SJohn Dyson 			break;
1080dc2efb27SJohn Dyson 		}
1081dc2efb27SJohn Dyson 
1082dc2efb27SJohn Dyson 		next = TAILQ_NEXT(m, pageq);
1083dc2efb27SJohn Dyson 		/*
1084dc2efb27SJohn Dyson 		 * Don't deactivate pages that are busy.
1085dc2efb27SJohn Dyson 		 */
1086dc2efb27SJohn Dyson 		if ((m->busy != 0) ||
1087dc2efb27SJohn Dyson 		    (m->flags & PG_BUSY) ||
1088dc2efb27SJohn Dyson 		    (m->hold_count != 0)) {
1089dc2efb27SJohn Dyson 			s = splvm();
1090dc2efb27SJohn Dyson 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1091dc2efb27SJohn Dyson 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1092dc2efb27SJohn Dyson 			splx(s);
1093dc2efb27SJohn Dyson 			m = next;
1094dc2efb27SJohn Dyson 			continue;
1095dc2efb27SJohn Dyson 		}
1096dc2efb27SJohn Dyson 
10977e006499SJohn Dyson 		actcount = 0;
1098dc2efb27SJohn Dyson 		if (m->flags & PG_REFERENCED) {
1099e69763a3SDoug Rabson 			vm_page_flag_clear(m, PG_REFERENCED);
11007e006499SJohn Dyson 			actcount += 1;
1101dc2efb27SJohn Dyson 		}
1102dc2efb27SJohn Dyson 
11037e006499SJohn Dyson 		actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
11047e006499SJohn Dyson 		if (actcount) {
11057e006499SJohn Dyson 			m->act_count += ACT_ADVANCE + actcount;
1106dc2efb27SJohn Dyson 			if (m->act_count > ACT_MAX)
1107dc2efb27SJohn Dyson 				m->act_count = ACT_MAX;
1108dc2efb27SJohn Dyson 			s = splvm();
1109dc2efb27SJohn Dyson 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1110dc2efb27SJohn Dyson 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1111dc2efb27SJohn Dyson 			splx(s);
1112dc2efb27SJohn Dyson 		} else {
1113dc2efb27SJohn Dyson 			if (m->act_count == 0) {
11147e006499SJohn Dyson 				/*
11157e006499SJohn Dyson 				 * We turn off page access, so that we have more accurate
11167e006499SJohn Dyson 				 * RSS stats.  We don't do this in the normal page deactivation
11177e006499SJohn Dyson 				 * when the system is loaded VM wise, because the cost of
11187e006499SJohn Dyson 				 * the large number of page protect operations would be higher
11197e006499SJohn Dyson 				 * than the value of doing the operation.
11207e006499SJohn Dyson 				 */
1121dc2efb27SJohn Dyson 				vm_page_protect(m, VM_PROT_NONE);
1122dc2efb27SJohn Dyson 				vm_page_deactivate(m);
1123dc2efb27SJohn Dyson 			} else {
1124dc2efb27SJohn Dyson 				m->act_count -= min(m->act_count, ACT_DECLINE);
1125dc2efb27SJohn Dyson 				s = splvm();
1126dc2efb27SJohn Dyson 				TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1127dc2efb27SJohn Dyson 				TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1128dc2efb27SJohn Dyson 				splx(s);
1129dc2efb27SJohn Dyson 			}
1130dc2efb27SJohn Dyson 		}
1131dc2efb27SJohn Dyson 
1132dc2efb27SJohn Dyson 		m = next;
1133dc2efb27SJohn Dyson 	}
1134dc2efb27SJohn Dyson }
1135dc2efb27SJohn Dyson 
1136b182ec9eSJohn Dyson static int
1137b182ec9eSJohn Dyson vm_pageout_free_page_calc(count)
1138b182ec9eSJohn Dyson vm_size_t count;
1139b182ec9eSJohn Dyson {
1140b182ec9eSJohn Dyson 	if (count < cnt.v_page_count)
1141b182ec9eSJohn Dyson 		 return 0;
1142b182ec9eSJohn Dyson 	/*
1143b182ec9eSJohn Dyson 	 * free_reserved needs to include enough for the largest swap pager
1144b182ec9eSJohn Dyson 	 * structures plus enough for any pv_entry structs when paging.
1145b182ec9eSJohn Dyson 	 */
1146b182ec9eSJohn Dyson 	if (cnt.v_page_count > 1024)
1147b182ec9eSJohn Dyson 		cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
1148b182ec9eSJohn Dyson 	else
1149b182ec9eSJohn Dyson 		cnt.v_free_min = 4;
1150f35329acSJohn Dyson 	cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
1151f35329acSJohn Dyson 		cnt.v_interrupt_free_min;
1152f35329acSJohn Dyson 	cnt.v_free_reserved = vm_pageout_page_count +
1153a15403deSJohn Dyson 		cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE;
1154a2f4a846SJohn Dyson 	cnt.v_free_min += cnt.v_free_reserved;
1155b182ec9eSJohn Dyson 	return 1;
1156b182ec9eSJohn Dyson }
1157b182ec9eSJohn Dyson 
1158b182ec9eSJohn Dyson 
1159df8bae1dSRodney W. Grimes /*
1160df8bae1dSRodney W. Grimes  *	vm_pageout is the high level pageout daemon.
1161df8bae1dSRodney W. Grimes  */
11622b14f991SJulian Elischer static void
116326f9a767SRodney W. Grimes vm_pageout()
1164df8bae1dSRodney W. Grimes {
1165df8bae1dSRodney W. Grimes 	/*
1166df8bae1dSRodney W. Grimes 	 * Initialize some paging parameters.
1167df8bae1dSRodney W. Grimes 	 */
1168df8bae1dSRodney W. Grimes 
1169f6b04d2bSDavid Greenman 	cnt.v_interrupt_free_min = 2;
1170f35329acSJohn Dyson 	if (cnt.v_page_count < 2000)
1171f35329acSJohn Dyson 		vm_pageout_page_count = 8;
1172f6b04d2bSDavid Greenman 
1173b182ec9eSJohn Dyson 	vm_pageout_free_page_calc(cnt.v_page_count);
1174ed74321bSDavid Greenman 	/*
11750d94caffSDavid Greenman 	 * free_reserved needs to include enough for the largest swap pager
11760d94caffSDavid Greenman 	 * structures plus enough for any pv_entry structs when paging.
1177ed74321bSDavid Greenman 	 */
1178a15403deSJohn Dyson 	if (cnt.v_free_count > 6144)
11790d94caffSDavid Greenman 		cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved;
1180a15403deSJohn Dyson 	else
1181a15403deSJohn Dyson 		cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved;
11826f2b142eSDavid Greenman 
1183a15403deSJohn Dyson 	if (cnt.v_free_count > 2048) {
1184a15403deSJohn Dyson 		cnt.v_cache_min = cnt.v_free_target;
1185a15403deSJohn Dyson 		cnt.v_cache_max = 2 * cnt.v_cache_min;
1186a15403deSJohn Dyson 		cnt.v_inactive_target = (3 * cnt.v_free_target) / 2;
11870d94caffSDavid Greenman 	} else {
11880d94caffSDavid Greenman 		cnt.v_cache_min = 0;
11890d94caffSDavid Greenman 		cnt.v_cache_max = 0;
11906f2b142eSDavid Greenman 		cnt.v_inactive_target = cnt.v_free_count / 4;
11910d94caffSDavid Greenman 	}
1192e47ed70bSJohn Dyson 	if (cnt.v_inactive_target > cnt.v_free_count / 3)
1193e47ed70bSJohn Dyson 		cnt.v_inactive_target = cnt.v_free_count / 3;
1194df8bae1dSRodney W. Grimes 
1195df8bae1dSRodney W. Grimes 	/* XXX does not really belong here */
1196df8bae1dSRodney W. Grimes 	if (vm_page_max_wired == 0)
1197df8bae1dSRodney W. Grimes 		vm_page_max_wired = cnt.v_free_count / 3;
1198df8bae1dSRodney W. Grimes 
1199dc2efb27SJohn Dyson 	if (vm_pageout_stats_max == 0)
1200dc2efb27SJohn Dyson 		vm_pageout_stats_max = cnt.v_free_target;
1201dc2efb27SJohn Dyson 
1202dc2efb27SJohn Dyson 	/*
1203dc2efb27SJohn Dyson 	 * Set interval in seconds for stats scan.
1204dc2efb27SJohn Dyson 	 */
1205dc2efb27SJohn Dyson 	if (vm_pageout_stats_interval == 0)
1206bef608bdSJohn Dyson 		vm_pageout_stats_interval = 5;
1207dc2efb27SJohn Dyson 	if (vm_pageout_full_stats_interval == 0)
1208dc2efb27SJohn Dyson 		vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1209dc2efb27SJohn Dyson 
1210dc2efb27SJohn Dyson 
1211dc2efb27SJohn Dyson 	/*
1212dc2efb27SJohn Dyson 	 * Set maximum free per pass
1213dc2efb27SJohn Dyson 	 */
1214dc2efb27SJohn Dyson 	if (vm_pageout_stats_free_max == 0)
1215bef608bdSJohn Dyson 		vm_pageout_stats_free_max = 5;
1216dc2efb27SJohn Dyson 
1217ceb0cf87SJohn Dyson 	max_page_launder = (cnt.v_page_count > 1800 ? 32 : 16);
121826f9a767SRodney W. Grimes 
121924a1cce3SDavid Greenman 	swap_pager_swap_init();
1220df8bae1dSRodney W. Grimes 	/*
12210d94caffSDavid Greenman 	 * The pageout daemon is never done, so loop forever.
1222df8bae1dSRodney W. Grimes 	 */
1223df8bae1dSRodney W. Grimes 	while (TRUE) {
122485a376ebSJohn Dyson 		int inactive_target;
1225dc2efb27SJohn Dyson 		int error;
1226b18bfc3dSJohn Dyson 		int s = splvm();
1227f919ebdeSDavid Greenman 		if (!vm_pages_needed ||
1228545901f7SJohn Dyson 			((cnt.v_free_count + cnt.v_cache_count) > cnt.v_free_min)) {
1229f919ebdeSDavid Greenman 			vm_pages_needed = 0;
1230dc2efb27SJohn Dyson 			error = tsleep(&vm_pages_needed,
1231dc2efb27SJohn Dyson 				PVM, "psleep", vm_pageout_stats_interval * hz);
1232dc2efb27SJohn Dyson 			if (error && !vm_pages_needed) {
1233dc2efb27SJohn Dyson 				splx(s);
1234dc2efb27SJohn Dyson 				vm_pageout_page_stats();
1235dc2efb27SJohn Dyson 				continue;
1236dc2efb27SJohn Dyson 			}
1237dc2efb27SJohn Dyson 		} else if (vm_pages_needed) {
1238e47ed70bSJohn Dyson 			vm_pages_needed = 0;
1239e47ed70bSJohn Dyson 			tsleep(&vm_pages_needed, PVM, "psleep", hz/2);
1240f919ebdeSDavid Greenman 		}
1241e47ed70bSJohn Dyson 
1242b18bfc3dSJohn Dyson 		if (vm_pages_needed)
1243b18bfc3dSJohn Dyson 			cnt.v_pdwakeups++;
1244f919ebdeSDavid Greenman 		vm_pages_needed = 0;
1245f919ebdeSDavid Greenman 		splx(s);
1246df8bae1dSRodney W. Grimes 		vm_pager_sync();
12470d94caffSDavid Greenman 		vm_pageout_scan();
12482d8acc0fSJohn Dyson 		vm_pageout_deficit = 0;
124926f9a767SRodney W. Grimes 		vm_pager_sync();
125024a1cce3SDavid Greenman 		wakeup(&cnt.v_free_count);
1251df8bae1dSRodney W. Grimes 	}
1252df8bae1dSRodney W. Grimes }
125326f9a767SRodney W. Grimes 
1254e0c5a895SJohn Dyson void
1255e0c5a895SJohn Dyson pagedaemon_wakeup()
1256e0c5a895SJohn Dyson {
1257e0c5a895SJohn Dyson 	if (!vm_pages_needed && curproc != pageproc) {
1258e0c5a895SJohn Dyson 		vm_pages_needed++;
1259e0c5a895SJohn Dyson 		wakeup(&vm_pages_needed);
1260e0c5a895SJohn Dyson 	}
1261e0c5a895SJohn Dyson }
1262e0c5a895SJohn Dyson 
126338efa82bSJohn Dyson #if !defined(NO_SWAPPING)
12645afce282SDavid Greenman static void
12655afce282SDavid Greenman vm_req_vmdaemon()
12665afce282SDavid Greenman {
12675afce282SDavid Greenman 	static int lastrun = 0;
12685afce282SDavid Greenman 
1269b18bfc3dSJohn Dyson 	if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
12705afce282SDavid Greenman 		wakeup(&vm_daemon_needed);
12715afce282SDavid Greenman 		lastrun = ticks;
12725afce282SDavid Greenman 	}
12735afce282SDavid Greenman }
12745afce282SDavid Greenman 
12752b14f991SJulian Elischer static void
12764f9fb771SBruce Evans vm_daemon()
12770d94caffSDavid Greenman {
12782fe6e4d7SDavid Greenman 	vm_object_t object;
12792fe6e4d7SDavid Greenman 	struct proc *p;
12800d94caffSDavid Greenman 
12812fe6e4d7SDavid Greenman 	while (TRUE) {
1282e8f36785SJohn Dyson 		tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0);
12834c1f8ee9SDavid Greenman 		if (vm_pageout_req_swapout) {
1284ceb0cf87SJohn Dyson 			swapout_procs(vm_pageout_req_swapout);
12854c1f8ee9SDavid Greenman 			vm_pageout_req_swapout = 0;
12864c1f8ee9SDavid Greenman 		}
12872fe6e4d7SDavid Greenman 		/*
12880d94caffSDavid Greenman 		 * scan the processes for exceeding their rlimits or if
12890d94caffSDavid Greenman 		 * process is swapped out -- deactivate pages
12902fe6e4d7SDavid Greenman 		 */
12912fe6e4d7SDavid Greenman 
12921b67ec6dSJeffrey Hsu 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
12932fe6e4d7SDavid Greenman 			quad_t limit;
12942fe6e4d7SDavid Greenman 			vm_offset_t size;
12952fe6e4d7SDavid Greenman 
12962fe6e4d7SDavid Greenman 			/*
12972fe6e4d7SDavid Greenman 			 * if this is a system process or if we have already
12982fe6e4d7SDavid Greenman 			 * looked at this process, skip it.
12992fe6e4d7SDavid Greenman 			 */
13002fe6e4d7SDavid Greenman 			if (p->p_flag & (P_SYSTEM | P_WEXIT)) {
13012fe6e4d7SDavid Greenman 				continue;
13022fe6e4d7SDavid Greenman 			}
13032fe6e4d7SDavid Greenman 			/*
13042fe6e4d7SDavid Greenman 			 * if the process is in a non-running type state,
13052fe6e4d7SDavid Greenman 			 * don't touch it.
13062fe6e4d7SDavid Greenman 			 */
13072fe6e4d7SDavid Greenman 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
13082fe6e4d7SDavid Greenman 				continue;
13092fe6e4d7SDavid Greenman 			}
13102fe6e4d7SDavid Greenman 			/*
13112fe6e4d7SDavid Greenman 			 * get a limit
13122fe6e4d7SDavid Greenman 			 */
13132fe6e4d7SDavid Greenman 			limit = qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
13142fe6e4d7SDavid Greenman 			    p->p_rlimit[RLIMIT_RSS].rlim_max);
13152fe6e4d7SDavid Greenman 
13162fe6e4d7SDavid Greenman 			/*
13170d94caffSDavid Greenman 			 * let processes that are swapped out really be
13180d94caffSDavid Greenman 			 * swapped out set the limit to nothing (will force a
13190d94caffSDavid Greenman 			 * swap-out.)
13202fe6e4d7SDavid Greenman 			 */
13212fe6e4d7SDavid Greenman 			if ((p->p_flag & P_INMEM) == 0)
13220d94caffSDavid Greenman 				limit = 0;	/* XXX */
13232fe6e4d7SDavid Greenman 
1324a91c5a7eSJohn Dyson 			size = p->p_vmspace->vm_pmap.pm_stats.resident_count * PAGE_SIZE;
13252fe6e4d7SDavid Greenman 			if (limit >= 0 && size >= limit) {
13262fe6e4d7SDavid Greenman 				vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map,
132738efa82bSJohn Dyson 				    (vm_pindex_t)(limit >> PAGE_SHIFT) );
13282fe6e4d7SDavid Greenman 			}
13292fe6e4d7SDavid Greenman 		}
133024a1cce3SDavid Greenman 	}
13312fe6e4d7SDavid Greenman }
133238efa82bSJohn Dyson #endif
1333