xref: /freebsd/sys/vm/vm_pageout.c (revision d50c19943070845750cecfaf32cde1a31c047491)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
41  *
42  *
43  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
44  * All rights reserved.
45  *
46  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
47  *
48  * Permission to use, copy, modify and distribute this software and
49  * its documentation is hereby granted, provided that both the copyright
50  * notice and this permission notice appear in all copies of the
51  * software, derivative works or modified versions, and any portions
52  * thereof, and that both notices appear in supporting documentation.
53  *
54  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57  *
58  * Carnegie Mellon requests users of this software to return to
59  *
60  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61  *  School of Computer Science
62  *  Carnegie Mellon University
63  *  Pittsburgh PA 15213-3890
64  *
65  * any improvements or extensions that they make and grant Carnegie the
66  * rights to redistribute these changes.
67  *
68  * $Id: vm_pageout.c,v 1.141 1999/04/23 20:29:57 dt Exp $
69  */
70 
71 /*
72  *	The proverbial page-out daemon.
73  */
74 
75 #include "opt_vm.h"
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/kernel.h>
79 #include <sys/proc.h>
80 #include <sys/resourcevar.h>
81 #include <sys/signalvar.h>
82 #include <sys/vnode.h>
83 #include <sys/vmmeter.h>
84 #include <sys/sysctl.h>
85 
86 #include <vm/vm.h>
87 #include <vm/vm_param.h>
88 #include <vm/vm_prot.h>
89 #include <sys/lock.h>
90 #include <vm/vm_object.h>
91 #include <vm/vm_page.h>
92 #include <vm/vm_map.h>
93 #include <vm/vm_pageout.h>
94 #include <vm/vm_pager.h>
95 #include <vm/swap_pager.h>
96 #include <vm/vm_extern.h>
97 
98 /*
99  * System initialization
100  */
101 
102 /* the kernel process "vm_pageout"*/
103 static void vm_pageout __P((void));
104 static int vm_pageout_clean __P((vm_page_t));
105 static int vm_pageout_scan __P((void));
106 static int vm_pageout_free_page_calc __P((vm_size_t count));
107 struct proc *pageproc;
108 
109 static struct kproc_desc page_kp = {
110 	"pagedaemon",
111 	vm_pageout,
112 	&pageproc
113 };
114 SYSINIT_KT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp)
115 
116 #if !defined(NO_SWAPPING)
117 /* the kernel process "vm_daemon"*/
118 static void vm_daemon __P((void));
119 static struct	proc *vmproc;
120 
121 static struct kproc_desc vm_kp = {
122 	"vmdaemon",
123 	vm_daemon,
124 	&vmproc
125 };
126 SYSINIT_KT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
127 #endif
128 
129 
130 int vm_pages_needed=0;		/* Event on which pageout daemon sleeps */
131 int vm_pageout_deficit=0;	/* Estimated number of pages deficit */
132 int vm_pageout_pages_needed=0;	/* flag saying that the pageout daemon needs pages */
133 
134 extern int npendingio;
135 #if !defined(NO_SWAPPING)
136 static int vm_pageout_req_swapout;	/* XXX */
137 static int vm_daemon_needed;
138 #endif
139 extern int nswiodone;
140 extern int vm_swap_size;
141 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
142 static int vm_pageout_full_stats_interval = 0;
143 static int vm_pageout_stats_free_max=0, vm_pageout_algorithm_lru=0;
144 static int defer_swap_pageouts=0;
145 static int disable_swap_pageouts=0;
146 
147 static int max_page_launder=100;
148 #if defined(NO_SWAPPING)
149 static int vm_swap_enabled=0;
150 static int vm_swap_idle_enabled=0;
151 #else
152 static int vm_swap_enabled=1;
153 static int vm_swap_idle_enabled=0;
154 #endif
155 
156 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
157 	CTLFLAG_RW, &vm_pageout_algorithm_lru, 0, "LRU page mgmt");
158 
159 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
160 	CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
161 
162 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
163 	CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
164 
165 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
166 	CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
167 
168 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
169 	CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
170 
171 #if defined(NO_SWAPPING)
172 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
173 	CTLFLAG_RD, &vm_swap_enabled, 0, "");
174 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
175 	CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
176 #else
177 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
178 	CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
179 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
180 	CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
181 #endif
182 
183 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
184 	CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
185 
186 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
187 	CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
188 
189 SYSCTL_INT(_vm, OID_AUTO, max_page_launder,
190 	CTLFLAG_RW, &max_page_launder, 0, "Maximum number of pages to clean per pass");
191 
192 
193 #define VM_PAGEOUT_PAGE_COUNT 16
194 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
195 
196 int vm_page_max_wired;		/* XXX max # of wired pages system-wide */
197 
198 #if !defined(NO_SWAPPING)
199 typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int));
200 static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t));
201 static freeer_fcn_t vm_pageout_object_deactivate_pages;
202 static void vm_req_vmdaemon __P((void));
203 #endif
204 static void vm_pageout_page_stats(void);
205 
206 /*
207  * vm_pageout_clean:
208  *
209  * Clean the page and remove it from the laundry.
210  *
211  * We set the busy bit to cause potential page faults on this page to
212  * block.  Note the careful timing, however, the busy bit isn't set till
213  * late and we cannot do anything that will mess with the page.
214  */
215 
216 static int
217 vm_pageout_clean(m)
218 	vm_page_t m;
219 {
220 	register vm_object_t object;
221 	vm_page_t mc[2*vm_pageout_page_count];
222 	int pageout_count;
223 	int i, forward_okay, backward_okay, page_base;
224 	vm_pindex_t pindex = m->pindex;
225 
226 	object = m->object;
227 
228 	/*
229 	 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
230 	 * with the new swapper, but we could have serious problems paging
231 	 * out other object types if there is insufficient memory.
232 	 *
233 	 * Unfortunately, checking free memory here is far too late, so the
234 	 * check has been moved up a procedural level.
235 	 */
236 
237 #if 0
238 	/*
239 	 * If not OBJT_SWAP, additional memory may be needed to do the pageout.
240 	 * Try to avoid the deadlock.
241 	 */
242 	if ((object->type == OBJT_DEFAULT) &&
243 	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min))
244 		return 0;
245 #endif
246 
247 	/*
248 	 * Don't mess with the page if it's busy.
249 	 */
250 	if ((m->hold_count != 0) ||
251 	    ((m->busy != 0) || (m->flags & PG_BUSY)))
252 		return 0;
253 
254 #if 0
255 	/*
256 	 * XXX REMOVED XXX.  vm_object_collapse() can block, which can
257 	 * change the page state.  Calling vm_object_collapse() might also
258 	 * destroy or rename the page because we have not busied it yet!!!
259 	 * So this code segment is removed.
260 	 */
261 	/*
262 	 * Try collapsing before it's too late.   XXX huh?  Why are we doing
263 	 * this here?
264 	 */
265 	if (object->backing_object) {
266 		vm_object_collapse(object);
267 	}
268 #endif
269 
270 	mc[vm_pageout_page_count] = m;
271 	pageout_count = 1;
272 	page_base = vm_pageout_page_count;
273 	forward_okay = TRUE;
274 	if (pindex != 0)
275 		backward_okay = TRUE;
276 	else
277 		backward_okay = FALSE;
278 	/*
279 	 * Scan object for clusterable pages.
280 	 *
281 	 * We can cluster ONLY if: ->> the page is NOT
282 	 * clean, wired, busy, held, or mapped into a
283 	 * buffer, and one of the following:
284 	 * 1) The page is inactive, or a seldom used
285 	 *    active page.
286 	 * -or-
287 	 * 2) we force the issue.
288 	 */
289 	for (i = 1; (i < vm_pageout_page_count) && (forward_okay || backward_okay); i++) {
290 		vm_page_t p;
291 
292 		/*
293 		 * See if forward page is clusterable.
294 		 */
295 		if (forward_okay) {
296 			/*
297 			 * Stop forward scan at end of object.
298 			 */
299 			if ((pindex + i) > object->size) {
300 				forward_okay = FALSE;
301 				goto do_backward;
302 			}
303 			p = vm_page_lookup(object, pindex + i);
304 			if (p) {
305 				if (((p->queue - p->pc) == PQ_CACHE) ||
306 					(p->flags & PG_BUSY) || p->busy) {
307 					forward_okay = FALSE;
308 					goto do_backward;
309 				}
310 				vm_page_test_dirty(p);
311 				if ((p->dirty & p->valid) != 0 &&
312 				    (p->queue == PQ_INACTIVE) &&
313 				    (p->wire_count == 0) &&
314 				    (p->hold_count == 0)) {
315 					mc[vm_pageout_page_count + i] = p;
316 					pageout_count++;
317 					if (pageout_count == vm_pageout_page_count)
318 						break;
319 				} else {
320 					forward_okay = FALSE;
321 				}
322 			} else {
323 				forward_okay = FALSE;
324 			}
325 		}
326 do_backward:
327 		/*
328 		 * See if backward page is clusterable.
329 		 */
330 		if (backward_okay) {
331 			/*
332 			 * Stop backward scan at beginning of object.
333 			 */
334 			if ((pindex - i) == 0) {
335 				backward_okay = FALSE;
336 			}
337 			p = vm_page_lookup(object, pindex - i);
338 			if (p) {
339 				if (((p->queue - p->pc) == PQ_CACHE) ||
340 					(p->flags & PG_BUSY) || p->busy) {
341 					backward_okay = FALSE;
342 					continue;
343 				}
344 				vm_page_test_dirty(p);
345 				if ((p->dirty & p->valid) != 0 &&
346 				    (p->queue == PQ_INACTIVE) &&
347 				    (p->wire_count == 0) &&
348 				    (p->hold_count == 0)) {
349 					mc[vm_pageout_page_count - i] = p;
350 					pageout_count++;
351 					page_base--;
352 					if (pageout_count == vm_pageout_page_count)
353 						break;
354 				} else {
355 					backward_okay = FALSE;
356 				}
357 			} else {
358 				backward_okay = FALSE;
359 			}
360 		}
361 	}
362 
363 	/*
364 	 * we allow reads during pageouts...
365 	 */
366 	return vm_pageout_flush(&mc[page_base], pageout_count, 0);
367 }
368 
369 /*
370  * vm_pageout_flush() - launder the given pages
371  *
372  *	The given pages are laundered.  Note that we setup for the start of
373  *	I/O ( i.e. busy the page ), mark it read-only, and bump the object
374  *	reference count all in here rather then in the parent.  If we want
375  *	the parent to do more sophisticated things we may have to change
376  *	the ordering.
377  */
378 
379 int
380 vm_pageout_flush(mc, count, flags)
381 	vm_page_t *mc;
382 	int count;
383 	int flags;
384 {
385 	register vm_object_t object;
386 	int pageout_status[count];
387 	int numpagedout = 0;
388 	int i;
389 
390 	/*
391 	 * Initiate I/O.  Bump the vm_page_t->busy counter and
392 	 * mark the pages read-only.
393 	 *
394 	 * We do not have to fixup the clean/dirty bits here... we can
395 	 * allow the pager to do it after the I/O completes.
396 	 */
397 
398 	for (i = 0; i < count; i++) {
399 		vm_page_io_start(mc[i]);
400 		vm_page_protect(mc[i], VM_PROT_READ);
401 	}
402 
403 	object = mc[0]->object;
404 	vm_object_pip_add(object, count);
405 
406 	vm_pager_put_pages(object, mc, count,
407 	    (flags | ((object == kernel_object) ? OBJPC_SYNC : 0)),
408 	    pageout_status);
409 
410 	for (i = 0; i < count; i++) {
411 		vm_page_t mt = mc[i];
412 
413 		switch (pageout_status[i]) {
414 		case VM_PAGER_OK:
415 			numpagedout++;
416 			break;
417 		case VM_PAGER_PEND:
418 			numpagedout++;
419 			break;
420 		case VM_PAGER_BAD:
421 			/*
422 			 * Page outside of range of object. Right now we
423 			 * essentially lose the changes by pretending it
424 			 * worked.
425 			 */
426 			pmap_clear_modify(VM_PAGE_TO_PHYS(mt));
427 			mt->dirty = 0;
428 			break;
429 		case VM_PAGER_ERROR:
430 		case VM_PAGER_FAIL:
431 			/*
432 			 * If page couldn't be paged out, then reactivate the
433 			 * page so it doesn't clog the inactive list.  (We
434 			 * will try paging out it again later).
435 			 */
436 			vm_page_activate(mt);
437 			break;
438 		case VM_PAGER_AGAIN:
439 			break;
440 		}
441 
442 		/*
443 		 * If the operation is still going, leave the page busy to
444 		 * block all other accesses. Also, leave the paging in
445 		 * progress indicator set so that we don't attempt an object
446 		 * collapse.
447 		 */
448 		if (pageout_status[i] != VM_PAGER_PEND) {
449 			vm_object_pip_wakeup(object);
450 			vm_page_io_finish(mt);
451 		}
452 	}
453 	return numpagedout;
454 }
455 
456 #if !defined(NO_SWAPPING)
457 /*
458  *	vm_pageout_object_deactivate_pages
459  *
460  *	deactivate enough pages to satisfy the inactive target
461  *	requirements or if vm_page_proc_limit is set, then
462  *	deactivate all of the pages in the object and its
463  *	backing_objects.
464  *
465  *	The object and map must be locked.
466  */
467 static void
468 vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
469 	vm_map_t map;
470 	vm_object_t object;
471 	vm_pindex_t desired;
472 	int map_remove_only;
473 {
474 	register vm_page_t p, next;
475 	int rcount;
476 	int remove_mode;
477 	int s;
478 
479 	if (object->type == OBJT_DEVICE)
480 		return;
481 
482 	while (object) {
483 		if (pmap_resident_count(vm_map_pmap(map)) <= desired)
484 			return;
485 		if (object->paging_in_progress)
486 			return;
487 
488 		remove_mode = map_remove_only;
489 		if (object->shadow_count > 1)
490 			remove_mode = 1;
491 	/*
492 	 * scan the objects entire memory queue
493 	 */
494 		rcount = object->resident_page_count;
495 		p = TAILQ_FIRST(&object->memq);
496 		while (p && (rcount-- > 0)) {
497 			int actcount;
498 			if (pmap_resident_count(vm_map_pmap(map)) <= desired)
499 				return;
500 			next = TAILQ_NEXT(p, listq);
501 			cnt.v_pdpages++;
502 			if (p->wire_count != 0 ||
503 			    p->hold_count != 0 ||
504 			    p->busy != 0 ||
505 			    (p->flags & PG_BUSY) ||
506 			    !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) {
507 				p = next;
508 				continue;
509 			}
510 
511 			actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p));
512 			if (actcount) {
513 				vm_page_flag_set(p, PG_REFERENCED);
514 			} else if (p->flags & PG_REFERENCED) {
515 				actcount = 1;
516 			}
517 
518 			if ((p->queue != PQ_ACTIVE) &&
519 				(p->flags & PG_REFERENCED)) {
520 				vm_page_activate(p);
521 				p->act_count += actcount;
522 				vm_page_flag_clear(p, PG_REFERENCED);
523 			} else if (p->queue == PQ_ACTIVE) {
524 				if ((p->flags & PG_REFERENCED) == 0) {
525 					p->act_count -= min(p->act_count, ACT_DECLINE);
526 					if (!remove_mode && (vm_pageout_algorithm_lru || (p->act_count == 0))) {
527 						vm_page_protect(p, VM_PROT_NONE);
528 						vm_page_deactivate(p);
529 					} else {
530 						s = splvm();
531 						TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
532 						TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
533 						splx(s);
534 					}
535 				} else {
536 					vm_page_activate(p);
537 					vm_page_flag_clear(p, PG_REFERENCED);
538 					if (p->act_count < (ACT_MAX - ACT_ADVANCE))
539 						p->act_count += ACT_ADVANCE;
540 					s = splvm();
541 					TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
542 					TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
543 					splx(s);
544 				}
545 			} else if (p->queue == PQ_INACTIVE) {
546 				vm_page_protect(p, VM_PROT_NONE);
547 			}
548 			p = next;
549 		}
550 		object = object->backing_object;
551 	}
552 	return;
553 }
554 
555 /*
556  * deactivate some number of pages in a map, try to do it fairly, but
557  * that is really hard to do.
558  */
559 static void
560 vm_pageout_map_deactivate_pages(map, desired)
561 	vm_map_t map;
562 	vm_pindex_t desired;
563 {
564 	vm_map_entry_t tmpe;
565 	vm_object_t obj, bigobj;
566 
567 	if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) {
568 		return;
569 	}
570 
571 	bigobj = NULL;
572 
573 	/*
574 	 * first, search out the biggest object, and try to free pages from
575 	 * that.
576 	 */
577 	tmpe = map->header.next;
578 	while (tmpe != &map->header) {
579 		if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
580 			obj = tmpe->object.vm_object;
581 			if ((obj != NULL) && (obj->shadow_count <= 1) &&
582 				((bigobj == NULL) ||
583 				 (bigobj->resident_page_count < obj->resident_page_count))) {
584 				bigobj = obj;
585 			}
586 		}
587 		tmpe = tmpe->next;
588 	}
589 
590 	if (bigobj)
591 		vm_pageout_object_deactivate_pages(map, bigobj, desired, 0);
592 
593 	/*
594 	 * Next, hunt around for other pages to deactivate.  We actually
595 	 * do this search sort of wrong -- .text first is not the best idea.
596 	 */
597 	tmpe = map->header.next;
598 	while (tmpe != &map->header) {
599 		if (pmap_resident_count(vm_map_pmap(map)) <= desired)
600 			break;
601 		if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
602 			obj = tmpe->object.vm_object;
603 			if (obj)
604 				vm_pageout_object_deactivate_pages(map, obj, desired, 0);
605 		}
606 		tmpe = tmpe->next;
607 	};
608 
609 	/*
610 	 * Remove all mappings if a process is swapped out, this will free page
611 	 * table pages.
612 	 */
613 	if (desired == 0)
614 		pmap_remove(vm_map_pmap(map),
615 			VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
616 	vm_map_unlock(map);
617 	return;
618 }
619 #endif
620 
621 /*
622  * Don't try to be fancy - being fancy can lead to VOP_LOCK's and therefore
623  * to vnode deadlocks.  We only do it for OBJT_DEFAULT and OBJT_SWAP objects
624  * which we know can be trivially freed.
625  */
626 
627 void
628 vm_pageout_page_free(vm_page_t m) {
629 	vm_object_t object = m->object;
630 	int type = object->type;
631 
632 	if (type == OBJT_SWAP || type == OBJT_DEFAULT)
633 		vm_object_reference(object);
634 	vm_page_busy(m);
635 	vm_page_protect(m, VM_PROT_NONE);
636 	vm_page_free(m);
637 	if (type == OBJT_SWAP || type == OBJT_DEFAULT)
638 		vm_object_deallocate(object);
639 }
640 
641 /*
642  *	vm_pageout_scan does the dirty work for the pageout daemon.
643  */
644 static int
645 vm_pageout_scan()
646 {
647 	vm_page_t m, next;
648 	int page_shortage, maxscan, pcount;
649 	int addl_page_shortage, addl_page_shortage_init;
650 	int maxlaunder;
651 	int launder_loop = 0;
652 	struct proc *p, *bigproc;
653 	vm_offset_t size, bigsize;
654 	vm_object_t object;
655 	int force_wakeup = 0;
656 	int actcount;
657 	int vnodes_skipped = 0;
658 	int s;
659 
660 	/*
661 	 * Do whatever cleanup that the pmap code can.
662 	 */
663 	pmap_collect();
664 
665 	addl_page_shortage_init = vm_pageout_deficit;
666 	vm_pageout_deficit = 0;
667 
668 	if (max_page_launder == 0)
669 		max_page_launder = 1;
670 
671 	/*
672 	 * Calculate the number of pages we want to either free or move
673 	 * to the cache.
674 	 */
675 
676 	page_shortage = (cnt.v_free_target + cnt.v_cache_min) -
677 	    (cnt.v_free_count + cnt.v_cache_count);
678 	page_shortage += addl_page_shortage_init;
679 
680 	/*
681 	 * Figure out what to do with dirty pages when they are encountered.
682 	 * Assume that 1/3 of the pages on the inactive list are clean.  If
683 	 * we think we can reach our target, disable laundering (do not
684 	 * clean any dirty pages).  If we miss the target we will loop back
685 	 * up and do a laundering run.
686 	 */
687 
688 	if (cnt.v_inactive_count / 3 > page_shortage) {
689 		maxlaunder = 0;
690 		launder_loop = 0;
691 	} else {
692 		maxlaunder =
693 		    (cnt.v_inactive_target > max_page_launder) ?
694 		    max_page_launder : cnt.v_inactive_target;
695 		launder_loop = 1;
696 	}
697 
698 	/*
699 	 * Start scanning the inactive queue for pages we can move to the
700 	 * cache or free.  The scan will stop when the target is reached or
701 	 * we have scanned the entire inactive queue.
702 	 */
703 
704 rescan0:
705 	addl_page_shortage = addl_page_shortage_init;
706 	maxscan = cnt.v_inactive_count;
707 	for (
708 	    m = TAILQ_FIRST(&vm_page_queue_inactive);
709 	    m != NULL && maxscan-- > 0 && page_shortage > 0;
710 	    m = next
711 	) {
712 
713 		cnt.v_pdpages++;
714 
715 		if (m->queue != PQ_INACTIVE) {
716 			goto rescan0;
717 		}
718 
719 		next = TAILQ_NEXT(m, pageq);
720 
721 		if (m->hold_count) {
722 			s = splvm();
723 			TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
724 			TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
725 			splx(s);
726 			addl_page_shortage++;
727 			continue;
728 		}
729 		/*
730 		 * Dont mess with busy pages, keep in the front of the
731 		 * queue, most likely are being paged out.
732 		 */
733 		if (m->busy || (m->flags & PG_BUSY)) {
734 			addl_page_shortage++;
735 			continue;
736 		}
737 
738 		/*
739 		 * If the object is not being used, we ignore previous
740 		 * references.
741 		 */
742 		if (m->object->ref_count == 0) {
743 			vm_page_flag_clear(m, PG_REFERENCED);
744 			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
745 
746 		/*
747 		 * Otherwise, if the page has been referenced while in the
748 		 * inactive queue, we bump the "activation count" upwards,
749 		 * making it less likely that the page will be added back to
750 		 * the inactive queue prematurely again.  Here we check the
751 		 * page tables (or emulated bits, if any), given the upper
752 		 * level VM system not knowing anything about existing
753 		 * references.
754 		 */
755 		} else if (((m->flags & PG_REFERENCED) == 0) &&
756 			(actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)))) {
757 			vm_page_activate(m);
758 			m->act_count += (actcount + ACT_ADVANCE);
759 			continue;
760 		}
761 
762 		/*
763 		 * If the upper level VM system knows about any page
764 		 * references, we activate the page.  We also set the
765 		 * "activation count" higher than normal so that we will less
766 		 * likely place pages back onto the inactive queue again.
767 		 */
768 		if ((m->flags & PG_REFERENCED) != 0) {
769 			vm_page_flag_clear(m, PG_REFERENCED);
770 			actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
771 			vm_page_activate(m);
772 			m->act_count += (actcount + ACT_ADVANCE + 1);
773 			continue;
774 		}
775 
776 		/*
777 		 * If the upper level VM system doesn't know anything about
778 		 * the page being dirty, we have to check for it again.  As
779 		 * far as the VM code knows, any partially dirty pages are
780 		 * fully dirty.
781 		 */
782 		if (m->dirty == 0) {
783 			vm_page_test_dirty(m);
784 		} else {
785 			vm_page_dirty(m);
786 		}
787 
788 		/*
789 		 * Invalid pages can be easily freed
790 		 */
791 		if (m->valid == 0) {
792 			vm_pageout_page_free(m);
793 			cnt.v_dfree++;
794 			--page_shortage;
795 
796 		/*
797 		 * Clean pages can be placed onto the cache queue.
798 		 */
799 		} else if (m->dirty == 0) {
800 			vm_page_cache(m);
801 			--page_shortage;
802 
803 		/*
804 		 * Dirty pages need to be paged out.  Note that we clean
805 		 * only a limited number of pages per pagedaemon pass.
806 		 */
807 		} else if (maxlaunder > 0) {
808 			int written;
809 			int swap_pageouts_ok;
810 			struct vnode *vp = NULL;
811 
812 			object = m->object;
813 
814 			if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) {
815 				swap_pageouts_ok = 1;
816 			} else {
817 				swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
818 				swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
819 					(cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min);
820 
821 			}
822 
823 			/*
824 			 * We don't bother paging objects that are "dead".
825 			 * Those objects are in a "rundown" state.
826 			 */
827 			if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
828 				s = splvm();
829 				TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
830 				TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
831 				splx(s);
832 				continue;
833 			}
834 
835 			/*
836 			 * For now we protect against potential memory
837 			 * deadlocks by requiring significant memory to be
838 			 * free if the object is not OBJT_DEFAULT or OBJT_SWAP.
839 			 * We do not 'trust' any other object type to operate
840 			 * with low memory, not even OBJT_DEVICE.  The VM
841 			 * allocator will special case allocations done by
842 			 * the pageout daemon so the check below actually
843 			 * does have some hysteresis in it.  It isn't the best
844 			 * solution, though.
845 			 */
846 
847 			if (
848 			    object->type != OBJT_DEFAULT &&
849 			    object->type != OBJT_SWAP &&
850 			    cnt.v_free_count < cnt.v_free_reserved
851 			) {
852 				s = splvm();
853 				TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
854 				TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
855 				splx(s);
856 				continue;
857 			}
858 
859 			/*
860 			 * Presumably we have sufficient free memory to do
861 			 * the more sophisticated checks and locking required
862 			 * for vnodes.
863 			 *
864 			 * The object is already known NOT to be dead.  The
865 			 * vget() may still block, though, because
866 			 * VOP_ISLOCKED() doesn't check to see if an inode
867 			 * (v_data) is associated with the vnode.  If it isn't,
868 			 * vget() will load in it from disk.  Worse, vget()
869 			 * may actually get stuck waiting on "inode" if another
870 			 * process is in the process of bringing the inode in.
871 			 * This is bad news for us either way.
872 			 *
873 			 * So for the moment we check v_data == NULL as a
874 			 * workaround.  This means that vnodes which do not
875 			 * use v_data in the way we expect probably will not
876 			 * wind up being paged out by the pager and it will be
877 			 * up to the syncer to get them.  That's better then
878 			 * us blocking here.
879 			 *
880 			 * This whole code section is bogus - we need to fix
881 			 * the vnode pager to handle vm_page_t's without us
882 			 * having to do any sophisticated VOP tests.
883 			 */
884 
885 			if (object->type == OBJT_VNODE) {
886 				vp = object->handle;
887 
888 				if (VOP_ISLOCKED(vp) ||
889 				    vp->v_data == NULL ||
890 				    vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curproc)) {
891 					if ((m->queue == PQ_INACTIVE) &&
892 						(m->hold_count == 0) &&
893 						(m->busy == 0) &&
894 						(m->flags & PG_BUSY) == 0) {
895 						s = splvm();
896 						TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
897 						TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
898 						splx(s);
899 					}
900 					if (object->flags & OBJ_MIGHTBEDIRTY)
901 						vnodes_skipped++;
902 					continue;
903 				}
904 
905 				/*
906 				 * The page might have been moved to another queue
907 				 * during potential blocking in vget() above.
908 				 */
909 				if (m->queue != PQ_INACTIVE) {
910 					if (object->flags & OBJ_MIGHTBEDIRTY)
911 						vnodes_skipped++;
912 					vput(vp);
913 					continue;
914 				}
915 
916 				/*
917 				 * The page may have been busied during the blocking in
918 				 * vput();  We don't move the page back onto the end of
919 				 * the queue so that statistics are more correct if we don't.
920 				 */
921 				if (m->busy || (m->flags & PG_BUSY)) {
922 					vput(vp);
923 					continue;
924 				}
925 
926 				/*
927 				 * If the page has become held, then skip it
928 				 */
929 				if (m->hold_count) {
930 					s = splvm();
931 					TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
932 					TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
933 					splx(s);
934 					if (object->flags & OBJ_MIGHTBEDIRTY)
935 						vnodes_skipped++;
936 					vput(vp);
937 					continue;
938 				}
939 			}
940 
941 			/*
942 			 * If a page is dirty, then it is either being washed
943 			 * (but not yet cleaned) or it is still in the
944 			 * laundry.  If it is still in the laundry, then we
945 			 * start the cleaning operation.
946 			 */
947 			written = vm_pageout_clean(m);
948 			if (vp)
949 				vput(vp);
950 
951 			maxlaunder -= written;
952 		}
953 	}
954 
955 	/*
956 	 * If we still have a page shortage and we didn't launder anything,
957 	 * run the inactive scan again and launder something this time.
958 	 */
959 
960 	if (launder_loop == 0 && page_shortage > 0) {
961 		launder_loop = 1;
962 		maxlaunder =
963 		    (cnt.v_inactive_target > max_page_launder) ?
964 		    max_page_launder : cnt.v_inactive_target;
965 		goto rescan0;
966 	}
967 
968 	/*
969 	 * Compute the page shortage from the point of view of having to
970 	 * move pages from the active queue to the inactive queue.
971 	 */
972 
973 	page_shortage = (cnt.v_inactive_target + cnt.v_cache_min) -
974 	    (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
975 	page_shortage += addl_page_shortage;
976 
977 	/*
978 	 * Scan the active queue for things we can deactivate
979 	 */
980 
981 	pcount = cnt.v_active_count;
982 	m = TAILQ_FIRST(&vm_page_queue_active);
983 
984 	while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
985 
986 		/*
987 		 * This is a consistancy check, and should likely be a panic
988 		 * or warning.
989 		 */
990 		if (m->queue != PQ_ACTIVE) {
991 			break;
992 		}
993 
994 		next = TAILQ_NEXT(m, pageq);
995 		/*
996 		 * Don't deactivate pages that are busy.
997 		 */
998 		if ((m->busy != 0) ||
999 		    (m->flags & PG_BUSY) ||
1000 		    (m->hold_count != 0)) {
1001 			s = splvm();
1002 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1003 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1004 			splx(s);
1005 			m = next;
1006 			continue;
1007 		}
1008 
1009 		/*
1010 		 * The count for pagedaemon pages is done after checking the
1011 		 * page for eligbility...
1012 		 */
1013 		cnt.v_pdpages++;
1014 
1015 		/*
1016 		 * Check to see "how much" the page has been used.
1017 		 */
1018 		actcount = 0;
1019 		if (m->object->ref_count != 0) {
1020 			if (m->flags & PG_REFERENCED) {
1021 				actcount += 1;
1022 			}
1023 			actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
1024 			if (actcount) {
1025 				m->act_count += ACT_ADVANCE + actcount;
1026 				if (m->act_count > ACT_MAX)
1027 					m->act_count = ACT_MAX;
1028 			}
1029 		}
1030 
1031 		/*
1032 		 * Since we have "tested" this bit, we need to clear it now.
1033 		 */
1034 		vm_page_flag_clear(m, PG_REFERENCED);
1035 
1036 		/*
1037 		 * Only if an object is currently being used, do we use the
1038 		 * page activation count stats.
1039 		 */
1040 		if (actcount && (m->object->ref_count != 0)) {
1041 			s = splvm();
1042 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1043 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1044 			splx(s);
1045 		} else {
1046 			m->act_count -= min(m->act_count, ACT_DECLINE);
1047 			if (vm_pageout_algorithm_lru ||
1048 				(m->object->ref_count == 0) || (m->act_count == 0)) {
1049 				page_shortage--;
1050 				if (m->object->ref_count == 0) {
1051 					vm_page_protect(m, VM_PROT_NONE);
1052 					if (m->dirty == 0)
1053 						vm_page_cache(m);
1054 					else
1055 						vm_page_deactivate(m);
1056 				} else {
1057 					vm_page_deactivate(m);
1058 				}
1059 			} else {
1060 				s = splvm();
1061 				TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1062 				TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1063 				splx(s);
1064 			}
1065 		}
1066 		m = next;
1067 	}
1068 
1069 	s = splvm();
1070 
1071 	/*
1072 	 * We try to maintain some *really* free pages, this allows interrupt
1073 	 * code to be guaranteed space.  Since both cache and free queues
1074 	 * are considered basically 'free', moving pages from cache to free
1075 	 * does not effect other calculations.
1076 	 */
1077 
1078 	while (cnt.v_free_count < cnt.v_free_reserved) {
1079 		static int cache_rover = 0;
1080 		m = vm_page_list_find(PQ_CACHE, cache_rover, FALSE);
1081 		if (!m)
1082 			break;
1083 		if ((m->flags & PG_BUSY) || m->busy || m->hold_count || m->wire_count) {
1084 #ifdef INVARIANTS
1085 			printf("Warning: busy page %p found in cache\n", m);
1086 #endif
1087 			vm_page_deactivate(m);
1088 			continue;
1089 		}
1090 		cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK;
1091 		vm_pageout_page_free(m);
1092 		cnt.v_dfree++;
1093 	}
1094 	splx(s);
1095 
1096 #if !defined(NO_SWAPPING)
1097 	/*
1098 	 * Idle process swapout -- run once per second.
1099 	 */
1100 	if (vm_swap_idle_enabled) {
1101 		static long lsec;
1102 		if (time_second != lsec) {
1103 			vm_pageout_req_swapout |= VM_SWAP_IDLE;
1104 			vm_req_vmdaemon();
1105 			lsec = time_second;
1106 		}
1107 	}
1108 #endif
1109 
1110 	/*
1111 	 * If we didn't get enough free pages, and we have skipped a vnode
1112 	 * in a writeable object, wakeup the sync daemon.  And kick swapout
1113 	 * if we did not get enough free pages.
1114 	 */
1115 	if ((cnt.v_cache_count + cnt.v_free_count) <
1116 		(cnt.v_free_target + cnt.v_cache_min) ) {
1117 		if (vnodes_skipped &&
1118 		    (cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_min) {
1119 			(void) speedup_syncer();
1120 		}
1121 #if !defined(NO_SWAPPING)
1122 		if (vm_swap_enabled &&
1123 			(cnt.v_free_count + cnt.v_cache_count < cnt.v_free_target)) {
1124 			vm_req_vmdaemon();
1125 			vm_pageout_req_swapout |= VM_SWAP_NORMAL;
1126 		}
1127 #endif
1128 	}
1129 
1130 	/*
1131 	 * make sure that we have swap space -- if we are low on memory and
1132 	 * swap -- then kill the biggest process.
1133 	 */
1134 	if ((vm_swap_size == 0 || swap_pager_full) &&
1135 	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) {
1136 		bigproc = NULL;
1137 		bigsize = 0;
1138 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
1139 			/*
1140 			 * if this is a system process, skip it
1141 			 */
1142 			if ((p->p_flag & P_SYSTEM) || (p->p_lock > 0) ||
1143 			    (p->p_pid == 1) ||
1144 			    ((p->p_pid < 48) && (vm_swap_size != 0))) {
1145 				continue;
1146 			}
1147 			/*
1148 			 * if the process is in a non-running type state,
1149 			 * don't touch it.
1150 			 */
1151 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
1152 				continue;
1153 			}
1154 			/*
1155 			 * get the process size
1156 			 */
1157 			size = vmspace_resident_count(p->p_vmspace);
1158 			/*
1159 			 * if the this process is bigger than the biggest one
1160 			 * remember it.
1161 			 */
1162 			if (size > bigsize) {
1163 				bigproc = p;
1164 				bigsize = size;
1165 			}
1166 		}
1167 		if (bigproc != NULL) {
1168 			killproc(bigproc, "out of swap space");
1169 			bigproc->p_estcpu = 0;
1170 			bigproc->p_nice = PRIO_MIN;
1171 			resetpriority(bigproc);
1172 			wakeup(&cnt.v_free_count);
1173 		}
1174 	}
1175 	return force_wakeup;
1176 }
1177 
1178 /*
1179  * This routine tries to maintain the pseudo LRU active queue,
1180  * so that during long periods of time where there is no paging,
1181  * that some statistic accumlation still occurs.  This code
1182  * helps the situation where paging just starts to occur.
1183  */
1184 static void
1185 vm_pageout_page_stats()
1186 {
1187 	int s;
1188 	vm_page_t m,next;
1189 	int pcount,tpcount;		/* Number of pages to check */
1190 	static int fullintervalcount = 0;
1191 	int page_shortage;
1192 
1193 	page_shortage = (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) -
1194 	    (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
1195 	if (page_shortage <= 0)
1196 		return;
1197 
1198 	pcount = cnt.v_active_count;
1199 	fullintervalcount += vm_pageout_stats_interval;
1200 	if (fullintervalcount < vm_pageout_full_stats_interval) {
1201 		tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count;
1202 		if (pcount > tpcount)
1203 			pcount = tpcount;
1204 	}
1205 
1206 	m = TAILQ_FIRST(&vm_page_queue_active);
1207 	while ((m != NULL) && (pcount-- > 0)) {
1208 		int actcount;
1209 
1210 		if (m->queue != PQ_ACTIVE) {
1211 			break;
1212 		}
1213 
1214 		next = TAILQ_NEXT(m, pageq);
1215 		/*
1216 		 * Don't deactivate pages that are busy.
1217 		 */
1218 		if ((m->busy != 0) ||
1219 		    (m->flags & PG_BUSY) ||
1220 		    (m->hold_count != 0)) {
1221 			s = splvm();
1222 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1223 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1224 			splx(s);
1225 			m = next;
1226 			continue;
1227 		}
1228 
1229 		actcount = 0;
1230 		if (m->flags & PG_REFERENCED) {
1231 			vm_page_flag_clear(m, PG_REFERENCED);
1232 			actcount += 1;
1233 		}
1234 
1235 		actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
1236 		if (actcount) {
1237 			m->act_count += ACT_ADVANCE + actcount;
1238 			if (m->act_count > ACT_MAX)
1239 				m->act_count = ACT_MAX;
1240 			s = splvm();
1241 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1242 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1243 			splx(s);
1244 		} else {
1245 			if (m->act_count == 0) {
1246 				/*
1247 				 * We turn off page access, so that we have more accurate
1248 				 * RSS stats.  We don't do this in the normal page deactivation
1249 				 * when the system is loaded VM wise, because the cost of
1250 				 * the large number of page protect operations would be higher
1251 				 * than the value of doing the operation.
1252 				 */
1253 				vm_page_protect(m, VM_PROT_NONE);
1254 				vm_page_deactivate(m);
1255 			} else {
1256 				m->act_count -= min(m->act_count, ACT_DECLINE);
1257 				s = splvm();
1258 				TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1259 				TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1260 				splx(s);
1261 			}
1262 		}
1263 
1264 		m = next;
1265 	}
1266 }
1267 
1268 static int
1269 vm_pageout_free_page_calc(count)
1270 vm_size_t count;
1271 {
1272 	if (count < cnt.v_page_count)
1273 		 return 0;
1274 	/*
1275 	 * free_reserved needs to include enough for the largest swap pager
1276 	 * structures plus enough for any pv_entry structs when paging.
1277 	 */
1278 	if (cnt.v_page_count > 1024)
1279 		cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
1280 	else
1281 		cnt.v_free_min = 4;
1282 	cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
1283 		cnt.v_interrupt_free_min;
1284 	cnt.v_free_reserved = vm_pageout_page_count +
1285 		cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE;
1286 	cnt.v_free_min += cnt.v_free_reserved;
1287 	return 1;
1288 }
1289 
1290 
1291 /*
1292  *	vm_pageout is the high level pageout daemon.
1293  */
1294 static void
1295 vm_pageout()
1296 {
1297 	/*
1298 	 * Initialize some paging parameters.
1299 	 */
1300 
1301 	cnt.v_interrupt_free_min = 2;
1302 	if (cnt.v_page_count < 2000)
1303 		vm_pageout_page_count = 8;
1304 
1305 	vm_pageout_free_page_calc(cnt.v_page_count);
1306 	/*
1307 	 * free_reserved needs to include enough for the largest swap pager
1308 	 * structures plus enough for any pv_entry structs when paging.
1309 	 */
1310 	if (cnt.v_free_count > 6144)
1311 		cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved;
1312 	else
1313 		cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved;
1314 
1315 	if (cnt.v_free_count > 2048) {
1316 		cnt.v_cache_min = cnt.v_free_target;
1317 		cnt.v_cache_max = 2 * cnt.v_cache_min;
1318 		cnt.v_inactive_target = (3 * cnt.v_free_target) / 2;
1319 	} else {
1320 		cnt.v_cache_min = 0;
1321 		cnt.v_cache_max = 0;
1322 		cnt.v_inactive_target = cnt.v_free_count / 4;
1323 	}
1324 	if (cnt.v_inactive_target > cnt.v_free_count / 3)
1325 		cnt.v_inactive_target = cnt.v_free_count / 3;
1326 
1327 	/* XXX does not really belong here */
1328 	if (vm_page_max_wired == 0)
1329 		vm_page_max_wired = cnt.v_free_count / 3;
1330 
1331 	if (vm_pageout_stats_max == 0)
1332 		vm_pageout_stats_max = cnt.v_free_target;
1333 
1334 	/*
1335 	 * Set interval in seconds for stats scan.
1336 	 */
1337 	if (vm_pageout_stats_interval == 0)
1338 		vm_pageout_stats_interval = 5;
1339 	if (vm_pageout_full_stats_interval == 0)
1340 		vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1341 
1342 
1343 	/*
1344 	 * Set maximum free per pass
1345 	 */
1346 	if (vm_pageout_stats_free_max == 0)
1347 		vm_pageout_stats_free_max = 5;
1348 
1349 	max_page_launder = (cnt.v_page_count > 1800 ? 32 : 16);
1350 
1351 	swap_pager_swap_init();
1352 	/*
1353 	 * The pageout daemon is never done, so loop forever.
1354 	 */
1355 	while (TRUE) {
1356 		int error;
1357 		int s = splvm();
1358 		if (!vm_pages_needed ||
1359 			((cnt.v_free_count + cnt.v_cache_count) > cnt.v_free_min)) {
1360 			vm_pages_needed = 0;
1361 			error = tsleep(&vm_pages_needed,
1362 				PVM, "psleep", vm_pageout_stats_interval * hz);
1363 			if (error && !vm_pages_needed) {
1364 				splx(s);
1365 				vm_pageout_page_stats();
1366 				continue;
1367 			}
1368 		} else if (vm_pages_needed) {
1369 			vm_pages_needed = 0;
1370 			tsleep(&vm_pages_needed, PVM, "psleep", hz/2);
1371 		}
1372 
1373 		if (vm_pages_needed)
1374 			cnt.v_pdwakeups++;
1375 		vm_pages_needed = 0;
1376 		splx(s);
1377 		vm_pageout_scan();
1378 		vm_pageout_deficit = 0;
1379 		wakeup(&cnt.v_free_count);
1380 	}
1381 }
1382 
1383 void
1384 pagedaemon_wakeup()
1385 {
1386 	if (!vm_pages_needed && curproc != pageproc) {
1387 		vm_pages_needed++;
1388 		wakeup(&vm_pages_needed);
1389 	}
1390 }
1391 
1392 #if !defined(NO_SWAPPING)
1393 static void
1394 vm_req_vmdaemon()
1395 {
1396 	static int lastrun = 0;
1397 
1398 	if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
1399 		wakeup(&vm_daemon_needed);
1400 		lastrun = ticks;
1401 	}
1402 }
1403 
1404 static void
1405 vm_daemon()
1406 {
1407 	struct proc *p;
1408 
1409 	while (TRUE) {
1410 		tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0);
1411 		if (vm_pageout_req_swapout) {
1412 			swapout_procs(vm_pageout_req_swapout);
1413 			vm_pageout_req_swapout = 0;
1414 		}
1415 		/*
1416 		 * scan the processes for exceeding their rlimits or if
1417 		 * process is swapped out -- deactivate pages
1418 		 */
1419 
1420 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
1421 			vm_pindex_t limit, size;
1422 
1423 			/*
1424 			 * if this is a system process or if we have already
1425 			 * looked at this process, skip it.
1426 			 */
1427 			if (p->p_flag & (P_SYSTEM | P_WEXIT)) {
1428 				continue;
1429 			}
1430 			/*
1431 			 * if the process is in a non-running type state,
1432 			 * don't touch it.
1433 			 */
1434 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
1435 				continue;
1436 			}
1437 			/*
1438 			 * get a limit
1439 			 */
1440 			limit = OFF_TO_IDX(
1441 			    qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
1442 				p->p_rlimit[RLIMIT_RSS].rlim_max));
1443 
1444 			/*
1445 			 * let processes that are swapped out really be
1446 			 * swapped out set the limit to nothing (will force a
1447 			 * swap-out.)
1448 			 */
1449 			if ((p->p_flag & P_INMEM) == 0)
1450 				limit = 0;	/* XXX */
1451 
1452 			size = vmspace_resident_count(p->p_vmspace);
1453 			if (limit >= 0 && size >= limit) {
1454 				vm_pageout_map_deactivate_pages(
1455 				    &p->p_vmspace->vm_map, limit);
1456 			}
1457 		}
1458 	}
1459 }
1460 #endif
1461