xref: /freebsd/sys/vm/vm_pageout.c (revision cd41fc123ea3ae3bdc43c65f4fcdeb07f27059f8)
1df8bae1dSRodney W. Grimes /*
226f9a767SRodney W. Grimes  * Copyright (c) 1991 Regents of the University of California.
326f9a767SRodney W. Grimes  * All rights reserved.
426f9a767SRodney W. Grimes  * Copyright (c) 1994 John S. Dyson
526f9a767SRodney W. Grimes  * All rights reserved.
626f9a767SRodney W. Grimes  * Copyright (c) 1994 David Greenman
726f9a767SRodney W. Grimes  * All rights reserved.
8df8bae1dSRodney W. Grimes  *
9df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
10df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
11df8bae1dSRodney W. Grimes  *
12df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
13df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
14df8bae1dSRodney W. Grimes  * are met:
15df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
17df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
18df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
19df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
20df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
21df8bae1dSRodney W. Grimes  *    must display the following acknowledgement:
22df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
23df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
24df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
25df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
26df8bae1dSRodney W. Grimes  *    without specific prior written permission.
27df8bae1dSRodney W. Grimes  *
28df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
39df8bae1dSRodney W. Grimes  *
403c4dd356SDavid Greenman  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
41df8bae1dSRodney W. Grimes  *
42df8bae1dSRodney W. Grimes  *
43df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
44df8bae1dSRodney W. Grimes  * All rights reserved.
45df8bae1dSRodney W. Grimes  *
46df8bae1dSRodney W. Grimes  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
47df8bae1dSRodney W. Grimes  *
48df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
49df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
50df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
51df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
52df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
53df8bae1dSRodney W. Grimes  *
54df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57df8bae1dSRodney W. Grimes  *
58df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
59df8bae1dSRodney W. Grimes  *
60df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61df8bae1dSRodney W. Grimes  *  School of Computer Science
62df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
63df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
64df8bae1dSRodney W. Grimes  *
65df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
66df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
6726f9a767SRodney W. Grimes  *
68cd41fc12SDavid Greenman  * $Id: vm_pageout.c,v 1.56 1995/10/06 09:42:11 phk Exp $
69df8bae1dSRodney W. Grimes  */
70df8bae1dSRodney W. Grimes 
71df8bae1dSRodney W. Grimes /*
72df8bae1dSRodney W. Grimes  *	The proverbial page-out daemon.
73df8bae1dSRodney W. Grimes  */
74df8bae1dSRodney W. Grimes 
75df8bae1dSRodney W. Grimes #include <sys/param.h>
7626f9a767SRodney W. Grimes #include <sys/systm.h>
77b5e8ce9fSBruce Evans #include <sys/kernel.h>
7826f9a767SRodney W. Grimes #include <sys/proc.h>
7926f9a767SRodney W. Grimes #include <sys/resourcevar.h>
8026f9a767SRodney W. Grimes #include <sys/malloc.h>
810d94caffSDavid Greenman #include <sys/kernel.h>
82d2fc5315SPoul-Henning Kamp #include <sys/signalvar.h>
83f6b04d2bSDavid Greenman #include <sys/vnode.h>
84df8bae1dSRodney W. Grimes 
85df8bae1dSRodney W. Grimes #include <vm/vm.h>
86df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
87df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h>
887c0414d0SDavid Greenman #include <vm/vm_kern.h>
8924a1cce3SDavid Greenman #include <vm/vm_pager.h>
9005f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h>
91df8bae1dSRodney W. Grimes 
922b14f991SJulian Elischer /*
932b14f991SJulian Elischer  * System initialization
942b14f991SJulian Elischer  */
952b14f991SJulian Elischer 
962b14f991SJulian Elischer /* the kernel process "vm_pageout"*/
972b14f991SJulian Elischer static void vm_pageout __P((void));
982b14f991SJulian Elischer struct proc *pageproc;
992b14f991SJulian Elischer 
1002b14f991SJulian Elischer static struct kproc_desc page_kp = {
1012b14f991SJulian Elischer 	"pagedaemon",
1022b14f991SJulian Elischer 	vm_pageout,
1032b14f991SJulian Elischer 	&pageproc
1042b14f991SJulian Elischer };
1054590fd3aSDavid Greenman SYSINIT_KT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp)
1062b14f991SJulian Elischer 
1072b14f991SJulian Elischer /* the kernel process "vm_daemon"*/
1082b14f991SJulian Elischer static void vm_daemon __P((void));
1092b14f991SJulian Elischer struct	proc *vmproc;
1102b14f991SJulian Elischer 
1112b14f991SJulian Elischer static struct kproc_desc vm_kp = {
1122b14f991SJulian Elischer 	"vmdaemon",
1132b14f991SJulian Elischer 	vm_daemon,
1142b14f991SJulian Elischer 	&vmproc
1152b14f991SJulian Elischer };
1164590fd3aSDavid Greenman SYSINIT_KT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
1172b14f991SJulian Elischer 
1182b14f991SJulian Elischer 
119df8bae1dSRodney W. Grimes int vm_pages_needed;		/* Event on which pageout daemon sleeps */
12026f9a767SRodney W. Grimes 
121c3cb3e12SDavid Greenman int vm_pageout_pages_needed;	/* flag saying that the pageout daemon needs pages */
12226f9a767SRodney W. Grimes 
12326f9a767SRodney W. Grimes extern int npendingio;
1247c0414d0SDavid Greenman int vm_pageout_req_swapout;	/* XXX */
1252fe6e4d7SDavid Greenman int vm_daemon_needed;
12626f9a767SRodney W. Grimes extern int nswiodone;
1275663e6deSDavid Greenman extern int vm_swap_size;
128f6b04d2bSDavid Greenman extern int vfs_update_wakeup;
12926f9a767SRodney W. Grimes 
1301ed81ef2SDavid Greenman #define MAXSCAN 1024		/* maximum number of pages to scan in queues */
13126f9a767SRodney W. Grimes 
1320d94caffSDavid Greenman #define MAXLAUNDER (cnt.v_page_count > 1800 ? 32 : 16)
13326f9a767SRodney W. Grimes 
13426f9a767SRodney W. Grimes #define VM_PAGEOUT_PAGE_COUNT 8
135bbc0ec52SDavid Greenman int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
136df8bae1dSRodney W. Grimes 
137c3cb3e12SDavid Greenman int vm_page_max_wired;		/* XXX max # of wired pages system-wide */
138df8bae1dSRodney W. Grimes 
139cd41fc12SDavid Greenman typedef int freeer_fcn_t __P((vm_map_t, vm_object_t, int, int));
140cd41fc12SDavid Greenman static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_map_entry_t,
141cd41fc12SDavid Greenman 						 int *, freeer_fcn_t *));
142cd41fc12SDavid Greenman static freeer_fcn_t vm_pageout_object_deactivate_pages;
143cd41fc12SDavid Greenman static void vm_req_vmdaemon __P((void));
144cd41fc12SDavid Greenman 
14526f9a767SRodney W. Grimes /*
14626f9a767SRodney W. Grimes  * vm_pageout_clean:
14724a1cce3SDavid Greenman  *
1480d94caffSDavid Greenman  * Clean the page and remove it from the laundry.
14926f9a767SRodney W. Grimes  *
1500d94caffSDavid Greenman  * We set the busy bit to cause potential page faults on this page to
15126f9a767SRodney W. Grimes  * block.
15226f9a767SRodney W. Grimes  *
1530d94caffSDavid Greenman  * And we set pageout-in-progress to keep the object from disappearing
1540d94caffSDavid Greenman  * during pageout.  This guarantees that the page won't move from the
1550d94caffSDavid Greenman  * inactive queue.  (However, any other page on the inactive queue may
1560d94caffSDavid Greenman  * move!)
15726f9a767SRodney W. Grimes  */
15824a1cce3SDavid Greenman int
15924a1cce3SDavid Greenman vm_pageout_clean(m, sync)
16024a1cce3SDavid Greenman 	vm_page_t m;
16124a1cce3SDavid Greenman 	int sync;
16224a1cce3SDavid Greenman {
16326f9a767SRodney W. Grimes 	register vm_object_t object;
16426f9a767SRodney W. Grimes 	int pageout_status[VM_PAGEOUT_PAGE_COUNT];
16524a1cce3SDavid Greenman 	vm_page_t mc[2*VM_PAGEOUT_PAGE_COUNT];
16624a1cce3SDavid Greenman 	int pageout_count;
16726f9a767SRodney W. Grimes 	int anyok = 0;
16824a1cce3SDavid Greenman 	int i, forward_okay, backward_okay, page_base;
16926f9a767SRodney W. Grimes 	vm_offset_t offset = m->offset;
17026f9a767SRodney W. Grimes 
17126f9a767SRodney W. Grimes 	object = m->object;
17224a1cce3SDavid Greenman 
17326f9a767SRodney W. Grimes 	/*
17424a1cce3SDavid Greenman 	 * If not OBJT_SWAP, additional memory may be needed to do the pageout.
17524a1cce3SDavid Greenman 	 * Try to avoid the deadlock.
17626f9a767SRodney W. Grimes 	 */
17724a1cce3SDavid Greenman 	if ((sync != VM_PAGEOUT_FORCE) &&
17824a1cce3SDavid Greenman 	    (object->type != OBJT_SWAP) &&
17924a1cce3SDavid Greenman 	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min))
18026f9a767SRodney W. Grimes 		return 0;
18126f9a767SRodney W. Grimes 
18224a1cce3SDavid Greenman 	/*
18324a1cce3SDavid Greenman 	 * Don't mess with the page if it's busy.
18424a1cce3SDavid Greenman 	 */
185f6b04d2bSDavid Greenman 	if ((!sync && m->hold_count != 0) ||
1860d94caffSDavid Greenman 	    ((m->busy != 0) || (m->flags & PG_BUSY)))
1870d94caffSDavid Greenman 		return 0;
1880d94caffSDavid Greenman 
18924a1cce3SDavid Greenman 	/*
19024a1cce3SDavid Greenman 	 * Try collapsing before it's too late.
19124a1cce3SDavid Greenman 	 */
19224a1cce3SDavid Greenman 	if (!sync && object->backing_object) {
19326f9a767SRodney W. Grimes 		vm_object_collapse(object);
19426f9a767SRodney W. Grimes 	}
19524a1cce3SDavid Greenman 	mc[VM_PAGEOUT_PAGE_COUNT] = m;
19626f9a767SRodney W. Grimes 	pageout_count = 1;
19724a1cce3SDavid Greenman 	page_base = VM_PAGEOUT_PAGE_COUNT;
19824a1cce3SDavid Greenman 	forward_okay = TRUE;
19924a1cce3SDavid Greenman 	if (offset != 0)
20024a1cce3SDavid Greenman 		backward_okay = TRUE;
20126f9a767SRodney W. Grimes 	else
20224a1cce3SDavid Greenman 		backward_okay = FALSE;
20324a1cce3SDavid Greenman 	/*
20424a1cce3SDavid Greenman 	 * Scan object for clusterable pages.
20524a1cce3SDavid Greenman 	 *
20624a1cce3SDavid Greenman 	 * We can cluster ONLY if: ->> the page is NOT
20724a1cce3SDavid Greenman 	 * clean, wired, busy, held, or mapped into a
20824a1cce3SDavid Greenman 	 * buffer, and one of the following:
20924a1cce3SDavid Greenman 	 * 1) The page is inactive, or a seldom used
21024a1cce3SDavid Greenman 	 *    active page.
21124a1cce3SDavid Greenman 	 * -or-
21224a1cce3SDavid Greenman 	 * 2) we force the issue.
21324a1cce3SDavid Greenman 	 */
21424a1cce3SDavid Greenman 	for (i = 1; (i < vm_pageout_page_count) && (forward_okay || backward_okay); i++) {
21524a1cce3SDavid Greenman 		vm_page_t p;
216f6b04d2bSDavid Greenman 
21724a1cce3SDavid Greenman 		/*
21824a1cce3SDavid Greenman 		 * See if forward page is clusterable.
21924a1cce3SDavid Greenman 		 */
22024a1cce3SDavid Greenman 		if (forward_okay) {
22124a1cce3SDavid Greenman 			/*
22224a1cce3SDavid Greenman 			 * Stop forward scan at end of object.
22324a1cce3SDavid Greenman 			 */
22424a1cce3SDavid Greenman 			if ((offset + i * PAGE_SIZE) > object->size) {
22524a1cce3SDavid Greenman 				forward_okay = FALSE;
22624a1cce3SDavid Greenman 				goto do_backward;
227f6b04d2bSDavid Greenman 			}
22824a1cce3SDavid Greenman 			p = vm_page_lookup(object, offset + i * PAGE_SIZE);
22924a1cce3SDavid Greenman 			if (p) {
23024a1cce3SDavid Greenman 				if ((p->flags & (PG_BUSY|PG_CACHE)) || p->busy) {
23124a1cce3SDavid Greenman 					forward_okay = FALSE;
23224a1cce3SDavid Greenman 					goto do_backward;
233f6b04d2bSDavid Greenman 				}
23424a1cce3SDavid Greenman 				vm_page_test_dirty(p);
23524a1cce3SDavid Greenman 				if ((p->dirty & p->valid) != 0 &&
23624a1cce3SDavid Greenman 				    ((p->flags & PG_INACTIVE) ||
23724a1cce3SDavid Greenman 				     (sync == VM_PAGEOUT_FORCE)) &&
23824a1cce3SDavid Greenman 				    (p->wire_count == 0) &&
23924a1cce3SDavid Greenman 				    (p->hold_count == 0)) {
24024a1cce3SDavid Greenman 					mc[VM_PAGEOUT_PAGE_COUNT + i] = p;
24124a1cce3SDavid Greenman 					pageout_count++;
24224a1cce3SDavid Greenman 					if (pageout_count == vm_pageout_page_count)
24324a1cce3SDavid Greenman 						break;
24424a1cce3SDavid Greenman 				} else {
24524a1cce3SDavid Greenman 					forward_okay = FALSE;
246f6b04d2bSDavid Greenman 				}
24724a1cce3SDavid Greenman 			} else {
24824a1cce3SDavid Greenman 				forward_okay = FALSE;
24924a1cce3SDavid Greenman 			}
25024a1cce3SDavid Greenman 		}
25124a1cce3SDavid Greenman do_backward:
25224a1cce3SDavid Greenman 		/*
25324a1cce3SDavid Greenman 		 * See if backward page is clusterable.
25424a1cce3SDavid Greenman 		 */
25524a1cce3SDavid Greenman 		if (backward_okay) {
25624a1cce3SDavid Greenman 			/*
25724a1cce3SDavid Greenman 			 * Stop backward scan at beginning of object.
25824a1cce3SDavid Greenman 			 */
25924a1cce3SDavid Greenman 			if ((offset - i * PAGE_SIZE) == 0) {
26024a1cce3SDavid Greenman 				backward_okay = FALSE;
26124a1cce3SDavid Greenman 			}
26224a1cce3SDavid Greenman 			p = vm_page_lookup(object, offset - i * PAGE_SIZE);
26324a1cce3SDavid Greenman 			if (p) {
26424a1cce3SDavid Greenman 				if ((p->flags & (PG_BUSY|PG_CACHE)) || p->busy) {
26524a1cce3SDavid Greenman 					backward_okay = FALSE;
26624a1cce3SDavid Greenman 					continue;
26724a1cce3SDavid Greenman 				}
26824a1cce3SDavid Greenman 				vm_page_test_dirty(p);
26924a1cce3SDavid Greenman 				if ((p->dirty & p->valid) != 0 &&
27024a1cce3SDavid Greenman 				    ((p->flags & PG_INACTIVE) ||
27124a1cce3SDavid Greenman 				     (sync == VM_PAGEOUT_FORCE)) &&
27224a1cce3SDavid Greenman 				    (p->wire_count == 0) &&
27324a1cce3SDavid Greenman 				    (p->hold_count == 0)) {
27424a1cce3SDavid Greenman 					mc[VM_PAGEOUT_PAGE_COUNT - i] = p;
27524a1cce3SDavid Greenman 					pageout_count++;
27624a1cce3SDavid Greenman 					page_base--;
27724a1cce3SDavid Greenman 					if (pageout_count == vm_pageout_page_count)
27824a1cce3SDavid Greenman 						break;
27924a1cce3SDavid Greenman 				} else {
28024a1cce3SDavid Greenman 					backward_okay = FALSE;
28124a1cce3SDavid Greenman 				}
28224a1cce3SDavid Greenman 			} else {
28324a1cce3SDavid Greenman 				backward_okay = FALSE;
28424a1cce3SDavid Greenman 			}
285f6b04d2bSDavid Greenman 		}
286f6b04d2bSDavid Greenman 	}
287f6b04d2bSDavid Greenman 
2880d94caffSDavid Greenman 	/*
2890d94caffSDavid Greenman 	 * we allow reads during pageouts...
2900d94caffSDavid Greenman 	 */
29124a1cce3SDavid Greenman 	for (i = page_base; i < (page_base + pageout_count); i++) {
29224a1cce3SDavid Greenman 		mc[i]->flags |= PG_BUSY;
29324a1cce3SDavid Greenman 		vm_page_protect(mc[i], VM_PROT_READ);
29426f9a767SRodney W. Grimes 	}
29526f9a767SRodney W. Grimes 	object->paging_in_progress += pageout_count;
29626f9a767SRodney W. Grimes 
29724a1cce3SDavid Greenman 	vm_pager_put_pages(object, &mc[page_base], pageout_count,
29826f9a767SRodney W. Grimes 	    ((sync || (object == kernel_object)) ? TRUE : FALSE),
29926f9a767SRodney W. Grimes 	    pageout_status);
30026f9a767SRodney W. Grimes 
30126f9a767SRodney W. Grimes 	for (i = 0; i < pageout_count; i++) {
30224a1cce3SDavid Greenman 		vm_page_t mt = mc[page_base + i];
30324a1cce3SDavid Greenman 
30426f9a767SRodney W. Grimes 		switch (pageout_status[i]) {
30526f9a767SRodney W. Grimes 		case VM_PAGER_OK:
30626f9a767SRodney W. Grimes 			++anyok;
30726f9a767SRodney W. Grimes 			break;
30826f9a767SRodney W. Grimes 		case VM_PAGER_PEND:
30926f9a767SRodney W. Grimes 			++anyok;
31026f9a767SRodney W. Grimes 			break;
31126f9a767SRodney W. Grimes 		case VM_PAGER_BAD:
31226f9a767SRodney W. Grimes 			/*
3130d94caffSDavid Greenman 			 * Page outside of range of object. Right now we
3140d94caffSDavid Greenman 			 * essentially lose the changes by pretending it
3150d94caffSDavid Greenman 			 * worked.
31626f9a767SRodney W. Grimes 			 */
31724a1cce3SDavid Greenman 			pmap_clear_modify(VM_PAGE_TO_PHYS(mt));
31824a1cce3SDavid Greenman 			mt->dirty = 0;
31926f9a767SRodney W. Grimes 			break;
32026f9a767SRodney W. Grimes 		case VM_PAGER_ERROR:
32126f9a767SRodney W. Grimes 		case VM_PAGER_FAIL:
32226f9a767SRodney W. Grimes 			/*
3230d94caffSDavid Greenman 			 * If page couldn't be paged out, then reactivate the
3240d94caffSDavid Greenman 			 * page so it doesn't clog the inactive list.  (We
3250d94caffSDavid Greenman 			 * will try paging out it again later).
32626f9a767SRodney W. Grimes 			 */
32724a1cce3SDavid Greenman 			if (mt->flags & PG_INACTIVE)
32824a1cce3SDavid Greenman 				vm_page_activate(mt);
32926f9a767SRodney W. Grimes 			break;
33026f9a767SRodney W. Grimes 		case VM_PAGER_AGAIN:
33126f9a767SRodney W. Grimes 			break;
33226f9a767SRodney W. Grimes 		}
33326f9a767SRodney W. Grimes 
33426f9a767SRodney W. Grimes 
33526f9a767SRodney W. Grimes 		/*
3360d94caffSDavid Greenman 		 * If the operation is still going, leave the page busy to
3370d94caffSDavid Greenman 		 * block all other accesses. Also, leave the paging in
3380d94caffSDavid Greenman 		 * progress indicator set so that we don't attempt an object
3390d94caffSDavid Greenman 		 * collapse.
34026f9a767SRodney W. Grimes 		 */
34126f9a767SRodney W. Grimes 		if (pageout_status[i] != VM_PAGER_PEND) {
342f919ebdeSDavid Greenman 			vm_object_pip_wakeup(object);
34324a1cce3SDavid Greenman 			if ((mt->flags & (PG_REFERENCED|PG_WANTED)) ||
34424a1cce3SDavid Greenman 			    pmap_is_referenced(VM_PAGE_TO_PHYS(mt))) {
34524a1cce3SDavid Greenman 				pmap_clear_reference(VM_PAGE_TO_PHYS(mt));
34624a1cce3SDavid Greenman 				mt->flags &= ~PG_REFERENCED;
34724a1cce3SDavid Greenman 				if (mt->flags & PG_INACTIVE)
34824a1cce3SDavid Greenman 					vm_page_activate(mt);
34926f9a767SRodney W. Grimes 			}
35024a1cce3SDavid Greenman 			PAGE_WAKEUP(mt);
35126f9a767SRodney W. Grimes 		}
35226f9a767SRodney W. Grimes 	}
35326f9a767SRodney W. Grimes 	return anyok;
35426f9a767SRodney W. Grimes }
35526f9a767SRodney W. Grimes 
35626f9a767SRodney W. Grimes /*
35726f9a767SRodney W. Grimes  *	vm_pageout_object_deactivate_pages
35826f9a767SRodney W. Grimes  *
35926f9a767SRodney W. Grimes  *	deactivate enough pages to satisfy the inactive target
36026f9a767SRodney W. Grimes  *	requirements or if vm_page_proc_limit is set, then
36126f9a767SRodney W. Grimes  *	deactivate all of the pages in the object and its
36224a1cce3SDavid Greenman  *	backing_objects.
36326f9a767SRodney W. Grimes  *
36426f9a767SRodney W. Grimes  *	The object and map must be locked.
36526f9a767SRodney W. Grimes  */
366cd41fc12SDavid Greenman static int
3670d94caffSDavid Greenman vm_pageout_object_deactivate_pages(map, object, count, map_remove_only)
36826f9a767SRodney W. Grimes 	vm_map_t map;
36926f9a767SRodney W. Grimes 	vm_object_t object;
37026f9a767SRodney W. Grimes 	int count;
3710d94caffSDavid Greenman 	int map_remove_only;
37226f9a767SRodney W. Grimes {
37326f9a767SRodney W. Grimes 	register vm_page_t p, next;
37426f9a767SRodney W. Grimes 	int rcount;
37526f9a767SRodney W. Grimes 	int dcount;
37626f9a767SRodney W. Grimes 
37726f9a767SRodney W. Grimes 	dcount = 0;
37826f9a767SRodney W. Grimes 	if (count == 0)
37926f9a767SRodney W. Grimes 		count = 1;
38026f9a767SRodney W. Grimes 
38124a1cce3SDavid Greenman 	if (object->type == OBJT_DEVICE)
3828f895206SDavid Greenman 		return 0;
3838f895206SDavid Greenman 
38424a1cce3SDavid Greenman 	if (object->backing_object) {
38524a1cce3SDavid Greenman 		if (object->backing_object->ref_count == 1)
38624a1cce3SDavid Greenman 			dcount += vm_pageout_object_deactivate_pages(map,
38724a1cce3SDavid Greenman 			    object->backing_object, count / 2 + 1, map_remove_only);
3880d94caffSDavid Greenman 		else
38924a1cce3SDavid Greenman 			vm_pageout_object_deactivate_pages(map,
39024a1cce3SDavid Greenman 			    object->backing_object, count, 1);
3912fe6e4d7SDavid Greenman 	}
39224a1cce3SDavid Greenman 	if (object->paging_in_progress)
39326f9a767SRodney W. Grimes 		return dcount;
39426f9a767SRodney W. Grimes 
39526f9a767SRodney W. Grimes 	/*
39626f9a767SRodney W. Grimes 	 * scan the objects entire memory queue
39726f9a767SRodney W. Grimes 	 */
39826f9a767SRodney W. Grimes 	rcount = object->resident_page_count;
39926f9a767SRodney W. Grimes 	p = object->memq.tqh_first;
40026f9a767SRodney W. Grimes 	while (p && (rcount-- > 0)) {
40126f9a767SRodney W. Grimes 		next = p->listq.tqe_next;
402a58d1fa1SDavid Greenman 		cnt.v_pdpages++;
4030d94caffSDavid Greenman 		if (p->wire_count != 0 ||
4040d94caffSDavid Greenman 		    p->hold_count != 0 ||
4050d94caffSDavid Greenman 		    p->busy != 0 ||
4060d94caffSDavid Greenman 		    !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) {
4070d94caffSDavid Greenman 			p = next;
4080d94caffSDavid Greenman 			continue;
4090d94caffSDavid Greenman 		}
41026f9a767SRodney W. Grimes 		/*
4110d94caffSDavid Greenman 		 * if a page is active, not wired and is in the processes
4120d94caffSDavid Greenman 		 * pmap, then deactivate the page.
41326f9a767SRodney W. Grimes 		 */
4140d94caffSDavid Greenman 		if ((p->flags & (PG_ACTIVE | PG_BUSY)) == PG_ACTIVE) {
415a647a309SDavid Greenman 			if (!pmap_is_referenced(VM_PAGE_TO_PHYS(p)) &&
4161ed81ef2SDavid Greenman 			    (p->flags & (PG_REFERENCED|PG_WANTED)) == 0) {
41726f9a767SRodney W. Grimes 				p->act_count -= min(p->act_count, ACT_DECLINE);
41826f9a767SRodney W. Grimes 				/*
4190d94caffSDavid Greenman 				 * if the page act_count is zero -- then we
4200d94caffSDavid Greenman 				 * deactivate
42126f9a767SRodney W. Grimes 				 */
42226f9a767SRodney W. Grimes 				if (!p->act_count) {
4230d94caffSDavid Greenman 					if (!map_remove_only)
42426f9a767SRodney W. Grimes 						vm_page_deactivate(p);
425f919ebdeSDavid Greenman 					vm_page_protect(p, VM_PROT_NONE);
42626f9a767SRodney W. Grimes 					/*
4270d94caffSDavid Greenman 					 * else if on the next go-around we
4280d94caffSDavid Greenman 					 * will deactivate the page we need to
4290d94caffSDavid Greenman 					 * place the page on the end of the
4300d94caffSDavid Greenman 					 * queue to age the other pages in
4310d94caffSDavid Greenman 					 * memory.
43226f9a767SRodney W. Grimes 					 */
43326f9a767SRodney W. Grimes 				} else {
43426f9a767SRodney W. Grimes 					TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
43526f9a767SRodney W. Grimes 					TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
43626f9a767SRodney W. Grimes 				}
43726f9a767SRodney W. Grimes 				/*
43826f9a767SRodney W. Grimes 				 * see if we are done yet
43926f9a767SRodney W. Grimes 				 */
44026f9a767SRodney W. Grimes 				if (p->flags & PG_INACTIVE) {
44126f9a767SRodney W. Grimes 					--count;
44226f9a767SRodney W. Grimes 					++dcount;
44326f9a767SRodney W. Grimes 					if (count <= 0 &&
44426f9a767SRodney W. Grimes 					    cnt.v_inactive_count > cnt.v_inactive_target) {
44526f9a767SRodney W. Grimes 						return dcount;
44626f9a767SRodney W. Grimes 					}
44726f9a767SRodney W. Grimes 				}
44826f9a767SRodney W. Grimes 			} else {
44926f9a767SRodney W. Grimes 				/*
45026f9a767SRodney W. Grimes 				 * Move the page to the bottom of the queue.
45126f9a767SRodney W. Grimes 				 */
45226f9a767SRodney W. Grimes 				pmap_clear_reference(VM_PAGE_TO_PHYS(p));
453a647a309SDavid Greenman 				p->flags &= ~PG_REFERENCED;
45426f9a767SRodney W. Grimes 				if (p->act_count < ACT_MAX)
45526f9a767SRodney W. Grimes 					p->act_count += ACT_ADVANCE;
45626f9a767SRodney W. Grimes 
45726f9a767SRodney W. Grimes 				TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
45826f9a767SRodney W. Grimes 				TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
45926f9a767SRodney W. Grimes 			}
4600d94caffSDavid Greenman 		} else if ((p->flags & (PG_INACTIVE | PG_BUSY)) == PG_INACTIVE) {
461f919ebdeSDavid Greenman 			vm_page_protect(p, VM_PROT_NONE);
46226f9a767SRodney W. Grimes 		}
46326f9a767SRodney W. Grimes 		p = next;
46426f9a767SRodney W. Grimes 	}
46526f9a767SRodney W. Grimes 	return dcount;
46626f9a767SRodney W. Grimes }
46726f9a767SRodney W. Grimes 
46826f9a767SRodney W. Grimes 
46926f9a767SRodney W. Grimes /*
47026f9a767SRodney W. Grimes  * deactivate some number of pages in a map, try to do it fairly, but
47126f9a767SRodney W. Grimes  * that is really hard to do.
47226f9a767SRodney W. Grimes  */
47326f9a767SRodney W. Grimes 
474cd41fc12SDavid Greenman static void
47526f9a767SRodney W. Grimes vm_pageout_map_deactivate_pages(map, entry, count, freeer)
47626f9a767SRodney W. Grimes 	vm_map_t map;
47726f9a767SRodney W. Grimes 	vm_map_entry_t entry;
47826f9a767SRodney W. Grimes 	int *count;
479cd41fc12SDavid Greenman 	freeer_fcn_t *freeer;
48026f9a767SRodney W. Grimes {
48126f9a767SRodney W. Grimes 	vm_map_t tmpm;
48226f9a767SRodney W. Grimes 	vm_map_entry_t tmpe;
48326f9a767SRodney W. Grimes 	vm_object_t obj;
4840d94caffSDavid Greenman 
48526f9a767SRodney W. Grimes 	if (*count <= 0)
48626f9a767SRodney W. Grimes 		return;
48726f9a767SRodney W. Grimes 	vm_map_reference(map);
48826f9a767SRodney W. Grimes 	if (!lock_try_read(&map->lock)) {
48926f9a767SRodney W. Grimes 		vm_map_deallocate(map);
49026f9a767SRodney W. Grimes 		return;
49126f9a767SRodney W. Grimes 	}
49226f9a767SRodney W. Grimes 	if (entry == 0) {
49326f9a767SRodney W. Grimes 		tmpe = map->header.next;
49426f9a767SRodney W. Grimes 		while (tmpe != &map->header && *count > 0) {
495cd41fc12SDavid Greenman 			vm_pageout_map_deactivate_pages(map, tmpe, count, freeer);
49626f9a767SRodney W. Grimes 			tmpe = tmpe->next;
49726f9a767SRodney W. Grimes 		};
49826f9a767SRodney W. Grimes 	} else if (entry->is_sub_map || entry->is_a_map) {
49926f9a767SRodney W. Grimes 		tmpm = entry->object.share_map;
50026f9a767SRodney W. Grimes 		tmpe = tmpm->header.next;
50126f9a767SRodney W. Grimes 		while (tmpe != &tmpm->header && *count > 0) {
502cd41fc12SDavid Greenman 			vm_pageout_map_deactivate_pages(tmpm, tmpe, count, freeer);
50326f9a767SRodney W. Grimes 			tmpe = tmpe->next;
50426f9a767SRodney W. Grimes 		};
5054e39a515SPoul-Henning Kamp 	} else if ((obj = entry->object.vm_object) != 0) {
506cd41fc12SDavid Greenman 		*count -= (*freeer) (map, obj, *count, TRUE);
50726f9a767SRodney W. Grimes 	}
50826f9a767SRodney W. Grimes 	lock_read_done(&map->lock);
50926f9a767SRodney W. Grimes 	vm_map_deallocate(map);
51026f9a767SRodney W. Grimes 	return;
51126f9a767SRodney W. Grimes }
512df8bae1dSRodney W. Grimes 
513cd41fc12SDavid Greenman static void
5140d94caffSDavid Greenman vm_req_vmdaemon()
5150d94caffSDavid Greenman {
5160d94caffSDavid Greenman 	static int lastrun = 0;
5170d94caffSDavid Greenman 
5182fe6e4d7SDavid Greenman 	if ((ticks > (lastrun + hz / 10)) || (ticks < lastrun)) {
51924a1cce3SDavid Greenman 		wakeup(&vm_daemon_needed);
5202fe6e4d7SDavid Greenman 		lastrun = ticks;
5212fe6e4d7SDavid Greenman 	}
5222fe6e4d7SDavid Greenman }
5232fe6e4d7SDavid Greenman 
524df8bae1dSRodney W. Grimes /*
525df8bae1dSRodney W. Grimes  *	vm_pageout_scan does the dirty work for the pageout daemon.
526df8bae1dSRodney W. Grimes  */
52726f9a767SRodney W. Grimes int
528df8bae1dSRodney W. Grimes vm_pageout_scan()
529df8bae1dSRodney W. Grimes {
53026f9a767SRodney W. Grimes 	vm_page_t m;
531f6b04d2bSDavid Greenman 	int page_shortage, maxscan, maxlaunder, pcount;
5324e39a515SPoul-Henning Kamp 	int pages_freed;
53326f9a767SRodney W. Grimes 	vm_page_t next;
5345663e6deSDavid Greenman 	struct proc *p, *bigproc;
5355663e6deSDavid Greenman 	vm_offset_t size, bigsize;
536df8bae1dSRodney W. Grimes 	vm_object_t object;
53726f9a767SRodney W. Grimes 	int force_wakeup = 0;
538f6b04d2bSDavid Greenman 	int vnodes_skipped = 0;
5390d94caffSDavid Greenman 
54026f9a767SRodney W. Grimes 	pages_freed = 0;
541df8bae1dSRodney W. Grimes 
542df8bae1dSRodney W. Grimes 	/*
5430d94caffSDavid Greenman 	 * Start scanning the inactive queue for pages we can free. We keep
5440d94caffSDavid Greenman 	 * scanning until we have enough free pages or we have scanned through
5450d94caffSDavid Greenman 	 * the entire queue.  If we encounter dirty pages, we start cleaning
5460d94caffSDavid Greenman 	 * them.
547df8bae1dSRodney W. Grimes 	 */
548df8bae1dSRodney W. Grimes 
5490d94caffSDavid Greenman 	maxlaunder = (cnt.v_inactive_target > MAXLAUNDER) ?
5500d94caffSDavid Greenman 	    MAXLAUNDER : cnt.v_inactive_target;
5510d94caffSDavid Greenman 
55226f9a767SRodney W. Grimes rescan1:
553f6b04d2bSDavid Greenman 	maxscan = cnt.v_inactive_count;
55426f9a767SRodney W. Grimes 	m = vm_page_queue_inactive.tqh_first;
555f6b04d2bSDavid Greenman 	while ((m != NULL) && (maxscan-- > 0) &&
556f6b04d2bSDavid Greenman 	    ((cnt.v_cache_count + cnt.v_free_count) < (cnt.v_cache_min + cnt.v_free_target))) {
55726f9a767SRodney W. Grimes 		vm_page_t next;
558df8bae1dSRodney W. Grimes 
559a58d1fa1SDavid Greenman 		cnt.v_pdpages++;
560df8bae1dSRodney W. Grimes 		next = m->pageq.tqe_next;
561df8bae1dSRodney W. Grimes 
5620d94caffSDavid Greenman #if defined(VM_DIAGNOSE)
56326f9a767SRodney W. Grimes 		if ((m->flags & PG_INACTIVE) == 0) {
564832f3afdSAndreas Schulz 			printf("vm_pageout_scan: page not inactive?\n");
5650d94caffSDavid Greenman 			break;
566df8bae1dSRodney W. Grimes 		}
5670d94caffSDavid Greenman #endif
56826f9a767SRodney W. Grimes 
56926f9a767SRodney W. Grimes 		/*
57026f9a767SRodney W. Grimes 		 * dont mess with busy pages
57126f9a767SRodney W. Grimes 		 */
572f6b04d2bSDavid Greenman 		if (m->hold_count || m->busy || (m->flags & PG_BUSY)) {
5730d94caffSDavid Greenman 			TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
5740d94caffSDavid Greenman 			TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
57526f9a767SRodney W. Grimes 			m = next;
57626f9a767SRodney W. Grimes 			continue;
57726f9a767SRodney W. Grimes 		}
5780d94caffSDavid Greenman 		if (((m->flags & PG_REFERENCED) == 0) &&
5790d94caffSDavid Greenman 		    pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
5802fe6e4d7SDavid Greenman 			m->flags |= PG_REFERENCED;
5810d94caffSDavid Greenman 		}
5820d94caffSDavid Greenman 		if (m->object->ref_count == 0) {
5830d94caffSDavid Greenman 			m->flags &= ~PG_REFERENCED;
5842fe6e4d7SDavid Greenman 			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
5852fe6e4d7SDavid Greenman 		}
5861ed81ef2SDavid Greenman 		if ((m->flags & (PG_REFERENCED|PG_WANTED)) != 0) {
587a647a309SDavid Greenman 			m->flags &= ~PG_REFERENCED;
5880d94caffSDavid Greenman 			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
58926f9a767SRodney W. Grimes 			vm_page_activate(m);
5906d40c3d3SDavid Greenman 			if (m->act_count < ACT_MAX)
5916d40c3d3SDavid Greenman 				m->act_count += ACT_ADVANCE;
5920d94caffSDavid Greenman 			m = next;
5930d94caffSDavid Greenman 			continue;
5940d94caffSDavid Greenman 		}
5950d94caffSDavid Greenman 
596f6b04d2bSDavid Greenman 		vm_page_test_dirty(m);
597f6b04d2bSDavid Greenman 		if (m->dirty == 0) {
598f6b04d2bSDavid Greenman 			if (m->bmapped == 0) {
5996d40c3d3SDavid Greenman 				if (m->valid == 0) {
6006d40c3d3SDavid Greenman 					pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
6016d40c3d3SDavid Greenman 					vm_page_free(m);
6020bb3a0d2SDavid Greenman 					cnt.v_dfree++;
6036f2b142eSDavid Greenman 				} else {
6040d94caffSDavid Greenman 					vm_page_cache(m);
6056d40c3d3SDavid Greenman 				}
606f6b04d2bSDavid Greenman 				++pages_freed;
607f6b04d2bSDavid Greenman 			} else {
608f6b04d2bSDavid Greenman 				m = next;
609f6b04d2bSDavid Greenman 				continue;
610f6b04d2bSDavid Greenman 			}
6110d94caffSDavid Greenman 		} else if (maxlaunder > 0) {
6120d94caffSDavid Greenman 			int written;
613f6b04d2bSDavid Greenman 			struct vnode *vp = NULL;
6140d94caffSDavid Greenman 
6150d94caffSDavid Greenman 			object = m->object;
61624a1cce3SDavid Greenman 			if (object->flags & OBJ_DEAD) {
6170d94caffSDavid Greenman 				m = next;
6180d94caffSDavid Greenman 				continue;
6190d94caffSDavid Greenman 			}
620f6b04d2bSDavid Greenman 
62124a1cce3SDavid Greenman 			if (object->type == OBJT_VNODE) {
62224a1cce3SDavid Greenman 				vp = object->handle;
623f6b04d2bSDavid Greenman 				if (VOP_ISLOCKED(vp) || vget(vp, 1)) {
624f6b04d2bSDavid Greenman 					if (object->flags & OBJ_WRITEABLE)
625f6b04d2bSDavid Greenman 						++vnodes_skipped;
626f6b04d2bSDavid Greenman 					m = next;
627f6b04d2bSDavid Greenman 					continue;
628f6b04d2bSDavid Greenman 				}
629f6b04d2bSDavid Greenman 			}
630f6b04d2bSDavid Greenman 
6310d94caffSDavid Greenman 			/*
6320d94caffSDavid Greenman 			 * If a page is dirty, then it is either being washed
6330d94caffSDavid Greenman 			 * (but not yet cleaned) or it is still in the
6340d94caffSDavid Greenman 			 * laundry.  If it is still in the laundry, then we
6350d94caffSDavid Greenman 			 * start the cleaning operation.
6360d94caffSDavid Greenman 			 */
6370d94caffSDavid Greenman 			written = vm_pageout_clean(m, 0);
638f6b04d2bSDavid Greenman 
639f6b04d2bSDavid Greenman 			if (vp)
640f6b04d2bSDavid Greenman 				vput(vp);
641f6b04d2bSDavid Greenman 
6420d94caffSDavid Greenman 			if (!next) {
6430d94caffSDavid Greenman 				break;
6440d94caffSDavid Greenman 			}
6450d94caffSDavid Greenman 			maxlaunder -= written;
6460d94caffSDavid Greenman 			/*
6470d94caffSDavid Greenman 			 * if the next page has been re-activated, start
6480d94caffSDavid Greenman 			 * scanning again
6490d94caffSDavid Greenman 			 */
6500d94caffSDavid Greenman 			if ((next->flags & PG_INACTIVE) == 0) {
6510d94caffSDavid Greenman 				goto rescan1;
6520d94caffSDavid Greenman 			}
653df8bae1dSRodney W. Grimes 		}
65426f9a767SRodney W. Grimes 		m = next;
65526f9a767SRodney W. Grimes 	}
65626f9a767SRodney W. Grimes 
657df8bae1dSRodney W. Grimes 	/*
6580d94caffSDavid Greenman 	 * Compute the page shortage.  If we are still very low on memory be
6590d94caffSDavid Greenman 	 * sure that we will move a minimal amount of pages from active to
6600d94caffSDavid Greenman 	 * inactive.
661df8bae1dSRodney W. Grimes 	 */
662df8bae1dSRodney W. Grimes 
66326f9a767SRodney W. Grimes 	page_shortage = cnt.v_inactive_target -
6640d94caffSDavid Greenman 	    (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
66526f9a767SRodney W. Grimes 	if (page_shortage <= 0) {
66626f9a767SRodney W. Grimes 		if (pages_freed == 0) {
66717c4c408SDavid Greenman 			page_shortage = cnt.v_free_min - cnt.v_free_count;
668f6b04d2bSDavid Greenman 		} else {
669f6b04d2bSDavid Greenman 			page_shortage = 1;
67026f9a767SRodney W. Grimes 		}
671df8bae1dSRodney W. Grimes 	}
672f6b04d2bSDavid Greenman 	maxscan = MAXSCAN;
673f6b04d2bSDavid Greenman 	pcount = cnt.v_active_count;
67426f9a767SRodney W. Grimes 	m = vm_page_queue_active.tqh_first;
675f6b04d2bSDavid Greenman 	while ((m != NULL) && (maxscan > 0) && (pcount-- > 0) && (page_shortage > 0)) {
67626f9a767SRodney W. Grimes 
677a58d1fa1SDavid Greenman 		cnt.v_pdpages++;
67826f9a767SRodney W. Grimes 		next = m->pageq.tqe_next;
679df8bae1dSRodney W. Grimes 
680df8bae1dSRodney W. Grimes 		/*
68126f9a767SRodney W. Grimes 		 * Don't deactivate pages that are busy.
682df8bae1dSRodney W. Grimes 		 */
683a647a309SDavid Greenman 		if ((m->busy != 0) ||
6840d94caffSDavid Greenman 		    (m->flags & PG_BUSY) ||
685f6b04d2bSDavid Greenman 		    (m->hold_count != 0)) {
6866d40c3d3SDavid Greenman 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
6876d40c3d3SDavid Greenman 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
68826f9a767SRodney W. Grimes 			m = next;
68926f9a767SRodney W. Grimes 			continue;
690df8bae1dSRodney W. Grimes 		}
6911ed81ef2SDavid Greenman 		if (m->object->ref_count && ((m->flags & (PG_REFERENCED|PG_WANTED)) ||
6920d94caffSDavid Greenman 			pmap_is_referenced(VM_PAGE_TO_PHYS(m)))) {
6930d94caffSDavid Greenman 			int s;
694df8bae1dSRodney W. Grimes 
695df8bae1dSRodney W. Grimes 			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
696a647a309SDavid Greenman 			m->flags &= ~PG_REFERENCED;
6970d94caffSDavid Greenman 			if (m->act_count < ACT_MAX) {
69826f9a767SRodney W. Grimes 				m->act_count += ACT_ADVANCE;
6990d94caffSDavid Greenman 			}
70026f9a767SRodney W. Grimes 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
70126f9a767SRodney W. Grimes 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
70226f9a767SRodney W. Grimes 		} else {
7036d40c3d3SDavid Greenman 			m->flags &= ~PG_REFERENCED;
7046d40c3d3SDavid Greenman 			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
70526f9a767SRodney W. Grimes 			m->act_count -= min(m->act_count, ACT_DECLINE);
706df8bae1dSRodney W. Grimes 
707df8bae1dSRodney W. Grimes 			/*
70826f9a767SRodney W. Grimes 			 * if the page act_count is zero -- then we deactivate
709df8bae1dSRodney W. Grimes 			 */
7100d94caffSDavid Greenman 			if (!m->act_count && (page_shortage > 0)) {
7110d94caffSDavid Greenman 				if (m->object->ref_count == 0) {
7120d94caffSDavid Greenman 					--page_shortage;
713f6b04d2bSDavid Greenman 					vm_page_test_dirty(m);
714f6b04d2bSDavid Greenman 					if ((m->bmapped == 0) && (m->dirty == 0) ) {
7150d94caffSDavid Greenman 						m->act_count = 0;
7160d94caffSDavid Greenman 						vm_page_cache(m);
7170d94caffSDavid Greenman 					} else {
7180d94caffSDavid Greenman 						vm_page_deactivate(m);
7190d94caffSDavid Greenman 					}
7200d94caffSDavid Greenman 				} else {
72126f9a767SRodney W. Grimes 					vm_page_deactivate(m);
72226f9a767SRodney W. Grimes 					--page_shortage;
7230d94caffSDavid Greenman 				}
7246d40c3d3SDavid Greenman 			} else if (m->act_count) {
72526f9a767SRodney W. Grimes 				TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
72626f9a767SRodney W. Grimes 				TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
727df8bae1dSRodney W. Grimes 			}
728df8bae1dSRodney W. Grimes 		}
729f6b04d2bSDavid Greenman 		maxscan--;
73026f9a767SRodney W. Grimes 		m = next;
73126f9a767SRodney W. Grimes 	}
732df8bae1dSRodney W. Grimes 
733df8bae1dSRodney W. Grimes 	/*
7340d94caffSDavid Greenman 	 * We try to maintain some *really* free pages, this allows interrupt
7350d94caffSDavid Greenman 	 * code to be guaranteed space.
736df8bae1dSRodney W. Grimes 	 */
737a1f6d91cSDavid Greenman 	while (cnt.v_free_count < cnt.v_free_reserved) {
7380d94caffSDavid Greenman 		m = vm_page_queue_cache.tqh_first;
7390d94caffSDavid Greenman 		if (!m)
7400d94caffSDavid Greenman 			break;
7410d94caffSDavid Greenman 		vm_page_free(m);
7420bb3a0d2SDavid Greenman 		cnt.v_dfree++;
74326f9a767SRodney W. Grimes 	}
7445663e6deSDavid Greenman 
7455663e6deSDavid Greenman 	/*
746f6b04d2bSDavid Greenman 	 * If we didn't get enough free pages, and we have skipped a vnode
7474c1f8ee9SDavid Greenman 	 * in a writeable object, wakeup the sync daemon.  And kick swapout
7484c1f8ee9SDavid Greenman 	 * if we did not get enough free pages.
749f6b04d2bSDavid Greenman 	 */
7504c1f8ee9SDavid Greenman 	if ((cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_target) {
751f6b04d2bSDavid Greenman 		if (vnodes_skipped &&
752f6b04d2bSDavid Greenman 		    (cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_min) {
753f6b04d2bSDavid Greenman 			if (!vfs_update_wakeup) {
754f6b04d2bSDavid Greenman 				vfs_update_wakeup = 1;
75524a1cce3SDavid Greenman 				wakeup(&vfs_update_wakeup);
756f6b04d2bSDavid Greenman 			}
757f6b04d2bSDavid Greenman 		}
7584c1f8ee9SDavid Greenman 		/*
7594c1f8ee9SDavid Greenman 		 * now swap processes out if we are in low memory conditions
7604c1f8ee9SDavid Greenman 		 */
7614c1f8ee9SDavid Greenman 		if (!swap_pager_full && vm_swap_size &&
7624c1f8ee9SDavid Greenman 			vm_pageout_req_swapout == 0) {
7634c1f8ee9SDavid Greenman 			vm_pageout_req_swapout = 1;
7644c1f8ee9SDavid Greenman 			vm_req_vmdaemon();
7654c1f8ee9SDavid Greenman 		}
7664c1f8ee9SDavid Greenman 	}
7674c1f8ee9SDavid Greenman 
7684c1f8ee9SDavid Greenman 	if ((cnt.v_inactive_count + cnt.v_free_count + cnt.v_cache_count) <
7694c1f8ee9SDavid Greenman 	    (cnt.v_inactive_target + cnt.v_free_min)) {
7704c1f8ee9SDavid Greenman 		vm_req_vmdaemon();
7714c1f8ee9SDavid Greenman 	}
772f6b04d2bSDavid Greenman 
773f6b04d2bSDavid Greenman 	/*
7740d94caffSDavid Greenman 	 * make sure that we have swap space -- if we are low on memory and
7750d94caffSDavid Greenman 	 * swap -- then kill the biggest process.
7765663e6deSDavid Greenman 	 */
7775663e6deSDavid Greenman 	if ((vm_swap_size == 0 || swap_pager_full) &&
7780d94caffSDavid Greenman 	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) {
7795663e6deSDavid Greenman 		bigproc = NULL;
7805663e6deSDavid Greenman 		bigsize = 0;
7815663e6deSDavid Greenman 		for (p = (struct proc *) allproc; p != NULL; p = p->p_next) {
7825663e6deSDavid Greenman 			/*
7835663e6deSDavid Greenman 			 * if this is a system process, skip it
7845663e6deSDavid Greenman 			 */
78579221631SDavid Greenman 			if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) ||
78679221631SDavid Greenman 			    ((p->p_pid < 48) && (vm_swap_size != 0))) {
7875663e6deSDavid Greenman 				continue;
7885663e6deSDavid Greenman 			}
7895663e6deSDavid Greenman 			/*
7905663e6deSDavid Greenman 			 * if the process is in a non-running type state,
7915663e6deSDavid Greenman 			 * don't touch it.
7925663e6deSDavid Greenman 			 */
7935663e6deSDavid Greenman 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
7945663e6deSDavid Greenman 				continue;
7955663e6deSDavid Greenman 			}
7965663e6deSDavid Greenman 			/*
7975663e6deSDavid Greenman 			 * get the process size
7985663e6deSDavid Greenman 			 */
7995663e6deSDavid Greenman 			size = p->p_vmspace->vm_pmap.pm_stats.resident_count;
8005663e6deSDavid Greenman 			/*
8015663e6deSDavid Greenman 			 * if the this process is bigger than the biggest one
8025663e6deSDavid Greenman 			 * remember it.
8035663e6deSDavid Greenman 			 */
8045663e6deSDavid Greenman 			if (size > bigsize) {
8055663e6deSDavid Greenman 				bigproc = p;
8065663e6deSDavid Greenman 				bigsize = size;
8075663e6deSDavid Greenman 			}
8085663e6deSDavid Greenman 		}
8095663e6deSDavid Greenman 		if (bigproc != NULL) {
8105663e6deSDavid Greenman 			printf("Process %lu killed by vm_pageout -- out of swap\n", (u_long) bigproc->p_pid);
8115663e6deSDavid Greenman 			psignal(bigproc, SIGKILL);
8125663e6deSDavid Greenman 			bigproc->p_estcpu = 0;
8135663e6deSDavid Greenman 			bigproc->p_nice = PRIO_MIN;
8145663e6deSDavid Greenman 			resetpriority(bigproc);
81524a1cce3SDavid Greenman 			wakeup(&cnt.v_free_count);
8165663e6deSDavid Greenman 		}
8175663e6deSDavid Greenman 	}
81826f9a767SRodney W. Grimes 	return force_wakeup;
81926f9a767SRodney W. Grimes }
82026f9a767SRodney W. Grimes 
821df8bae1dSRodney W. Grimes /*
822df8bae1dSRodney W. Grimes  *	vm_pageout is the high level pageout daemon.
823df8bae1dSRodney W. Grimes  */
8242b14f991SJulian Elischer static void
82526f9a767SRodney W. Grimes vm_pageout()
826df8bae1dSRodney W. Grimes {
827df8bae1dSRodney W. Grimes 	(void) spl0();
828df8bae1dSRodney W. Grimes 
829df8bae1dSRodney W. Grimes 	/*
830df8bae1dSRodney W. Grimes 	 * Initialize some paging parameters.
831df8bae1dSRodney W. Grimes 	 */
832df8bae1dSRodney W. Grimes 
833f6b04d2bSDavid Greenman 	cnt.v_interrupt_free_min = 2;
834f6b04d2bSDavid Greenman 
8350d94caffSDavid Greenman 	if (cnt.v_page_count > 1024)
8360d94caffSDavid Greenman 		cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
8370d94caffSDavid Greenman 	else
8380d94caffSDavid Greenman 		cnt.v_free_min = 4;
839ed74321bSDavid Greenman 	/*
8400d94caffSDavid Greenman 	 * free_reserved needs to include enough for the largest swap pager
8410d94caffSDavid Greenman 	 * structures plus enough for any pv_entry structs when paging.
842ed74321bSDavid Greenman 	 */
843f6b04d2bSDavid Greenman 	cnt.v_pageout_free_min = 6 + cnt.v_page_count / 1024 +
844f6b04d2bSDavid Greenman 				cnt.v_interrupt_free_min;
84561f5d510SDavid Greenman 	cnt.v_free_reserved = cnt.v_pageout_free_min + 6;
8460d94caffSDavid Greenman 	cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved;
84726f9a767SRodney W. Grimes 	cnt.v_free_min += cnt.v_free_reserved;
8486f2b142eSDavid Greenman 
8490d94caffSDavid Greenman 	if (cnt.v_page_count > 1024) {
8500d94caffSDavid Greenman 		cnt.v_cache_max = (cnt.v_free_count - 1024) / 2;
8516f2b142eSDavid Greenman 		cnt.v_cache_min = (cnt.v_free_count - 1024) / 8;
8526f2b142eSDavid Greenman 		cnt.v_inactive_target = 2*cnt.v_cache_min + 192;
8530d94caffSDavid Greenman 	} else {
8540d94caffSDavid Greenman 		cnt.v_cache_min = 0;
8550d94caffSDavid Greenman 		cnt.v_cache_max = 0;
8566f2b142eSDavid Greenman 		cnt.v_inactive_target = cnt.v_free_count / 4;
8570d94caffSDavid Greenman 	}
858df8bae1dSRodney W. Grimes 
859df8bae1dSRodney W. Grimes 	/* XXX does not really belong here */
860df8bae1dSRodney W. Grimes 	if (vm_page_max_wired == 0)
861df8bae1dSRodney W. Grimes 		vm_page_max_wired = cnt.v_free_count / 3;
862df8bae1dSRodney W. Grimes 
86326f9a767SRodney W. Grimes 
86424a1cce3SDavid Greenman 	swap_pager_swap_init();
865df8bae1dSRodney W. Grimes 	/*
8660d94caffSDavid Greenman 	 * The pageout daemon is never done, so loop forever.
867df8bae1dSRodney W. Grimes 	 */
868df8bae1dSRodney W. Grimes 	while (TRUE) {
869f919ebdeSDavid Greenman 		int s = splhigh();
870f919ebdeSDavid Greenman 
871f919ebdeSDavid Greenman 		if (!vm_pages_needed ||
872f919ebdeSDavid Greenman 			((cnt.v_free_count >= cnt.v_free_reserved) &&
873f919ebdeSDavid Greenman 			 (cnt.v_free_count + cnt.v_cache_count >= cnt.v_free_min))) {
874f919ebdeSDavid Greenman 			vm_pages_needed = 0;
87524a1cce3SDavid Greenman 			tsleep(&vm_pages_needed, PVM, "psleep", 0);
876f919ebdeSDavid Greenman 		}
877f919ebdeSDavid Greenman 		vm_pages_needed = 0;
878f919ebdeSDavid Greenman 		splx(s);
879a58d1fa1SDavid Greenman 		cnt.v_pdwakeups++;
880df8bae1dSRodney W. Grimes 		vm_pager_sync();
8810d94caffSDavid Greenman 		vm_pageout_scan();
88226f9a767SRodney W. Grimes 		vm_pager_sync();
88324a1cce3SDavid Greenman 		wakeup(&cnt.v_free_count);
88424a1cce3SDavid Greenman 		wakeup(kmem_map);
885df8bae1dSRodney W. Grimes 	}
886df8bae1dSRodney W. Grimes }
88726f9a767SRodney W. Grimes 
8882b14f991SJulian Elischer static void
8894f9fb771SBruce Evans vm_daemon()
8900d94caffSDavid Greenman {
8912fe6e4d7SDavid Greenman 	vm_object_t object;
8922fe6e4d7SDavid Greenman 	struct proc *p;
8930d94caffSDavid Greenman 
8942fe6e4d7SDavid Greenman 	while (TRUE) {
89524a1cce3SDavid Greenman 		tsleep(&vm_daemon_needed, PUSER, "psleep", 0);
8964c1f8ee9SDavid Greenman 		if (vm_pageout_req_swapout) {
8976306c897SDavid Greenman 			swapout_procs();
8984c1f8ee9SDavid Greenman 			vm_pageout_req_swapout = 0;
8994c1f8ee9SDavid Greenman 		}
9002fe6e4d7SDavid Greenman 		/*
9010d94caffSDavid Greenman 		 * scan the processes for exceeding their rlimits or if
9020d94caffSDavid Greenman 		 * process is swapped out -- deactivate pages
9032fe6e4d7SDavid Greenman 		 */
9042fe6e4d7SDavid Greenman 
9052fe6e4d7SDavid Greenman 		for (p = (struct proc *) allproc; p != NULL; p = p->p_next) {
9062fe6e4d7SDavid Greenman 			int overage;
9072fe6e4d7SDavid Greenman 			quad_t limit;
9082fe6e4d7SDavid Greenman 			vm_offset_t size;
9092fe6e4d7SDavid Greenman 
9102fe6e4d7SDavid Greenman 			/*
9112fe6e4d7SDavid Greenman 			 * if this is a system process or if we have already
9122fe6e4d7SDavid Greenman 			 * looked at this process, skip it.
9132fe6e4d7SDavid Greenman 			 */
9142fe6e4d7SDavid Greenman 			if (p->p_flag & (P_SYSTEM | P_WEXIT)) {
9152fe6e4d7SDavid Greenman 				continue;
9162fe6e4d7SDavid Greenman 			}
9172fe6e4d7SDavid Greenman 			/*
9182fe6e4d7SDavid Greenman 			 * if the process is in a non-running type state,
9192fe6e4d7SDavid Greenman 			 * don't touch it.
9202fe6e4d7SDavid Greenman 			 */
9212fe6e4d7SDavid Greenman 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
9222fe6e4d7SDavid Greenman 				continue;
9232fe6e4d7SDavid Greenman 			}
9242fe6e4d7SDavid Greenman 			/*
9252fe6e4d7SDavid Greenman 			 * get a limit
9262fe6e4d7SDavid Greenman 			 */
9272fe6e4d7SDavid Greenman 			limit = qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
9282fe6e4d7SDavid Greenman 			    p->p_rlimit[RLIMIT_RSS].rlim_max);
9292fe6e4d7SDavid Greenman 
9302fe6e4d7SDavid Greenman 			/*
9310d94caffSDavid Greenman 			 * let processes that are swapped out really be
9320d94caffSDavid Greenman 			 * swapped out set the limit to nothing (will force a
9330d94caffSDavid Greenman 			 * swap-out.)
9342fe6e4d7SDavid Greenman 			 */
9352fe6e4d7SDavid Greenman 			if ((p->p_flag & P_INMEM) == 0)
9360d94caffSDavid Greenman 				limit = 0;	/* XXX */
9372fe6e4d7SDavid Greenman 
9382fe6e4d7SDavid Greenman 			size = p->p_vmspace->vm_pmap.pm_stats.resident_count * NBPG;
9392fe6e4d7SDavid Greenman 			if (limit >= 0 && size >= limit) {
940a5eb0e27SPoul-Henning Kamp 				overage = (size - limit) >> PAGE_SHIFT;
9412fe6e4d7SDavid Greenman 				vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map,
9422fe6e4d7SDavid Greenman 				    (vm_map_entry_t) 0, &overage, vm_pageout_object_deactivate_pages);
9432fe6e4d7SDavid Greenman 			}
9442fe6e4d7SDavid Greenman 		}
9452fe6e4d7SDavid Greenman 
9460d94caffSDavid Greenman 		/*
9470d94caffSDavid Greenman 		 * we remove cached objects that have no RSS...
9480d94caffSDavid Greenman 		 */
9490d94caffSDavid Greenman restart:
9502fe6e4d7SDavid Greenman 		object = vm_object_cached_list.tqh_first;
9512fe6e4d7SDavid Greenman 		while (object) {
9522fe6e4d7SDavid Greenman 			/*
9532fe6e4d7SDavid Greenman 			 * if there are no resident pages -- get rid of the object
9542fe6e4d7SDavid Greenman 			 */
9552fe6e4d7SDavid Greenman 			if (object->resident_page_count == 0) {
95624a1cce3SDavid Greenman 				vm_object_reference(object);
9572fe6e4d7SDavid Greenman 				pager_cache(object, FALSE);
9582fe6e4d7SDavid Greenman 				goto restart;
9592fe6e4d7SDavid Greenman 			}
9602fe6e4d7SDavid Greenman 			object = object->cached_list.tqe_next;
9612fe6e4d7SDavid Greenman 		}
96224a1cce3SDavid Greenman 	}
9632fe6e4d7SDavid Greenman }
964