xref: /freebsd/sys/vm/vm_pageout.c (revision 77a0943ded95b9e6438f7db70c4a28e4d93946d4)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
41  *
42  *
43  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
44  * All rights reserved.
45  *
46  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
47  *
48  * Permission to use, copy, modify and distribute this software and
49  * its documentation is hereby granted, provided that both the copyright
50  * notice and this permission notice appear in all copies of the
51  * software, derivative works or modified versions, and any portions
52  * thereof, and that both notices appear in supporting documentation.
53  *
54  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57  *
58  * Carnegie Mellon requests users of this software to return to
59  *
60  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61  *  School of Computer Science
62  *  Carnegie Mellon University
63  *  Pittsburgh PA 15213-3890
64  *
65  * any improvements or extensions that they make and grant Carnegie the
66  * rights to redistribute these changes.
67  *
68  * $FreeBSD$
69  */
70 
71 /*
72  *	The proverbial page-out daemon.
73  */
74 
75 #include "opt_vm.h"
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/kernel.h>
79 #include <sys/proc.h>
80 #include <sys/kthread.h>
81 #include <sys/ktr.h>
82 #include <sys/resourcevar.h>
83 #include <sys/signalvar.h>
84 #include <sys/vnode.h>
85 #include <sys/vmmeter.h>
86 #include <sys/sysctl.h>
87 
88 #include <vm/vm.h>
89 #include <vm/vm_param.h>
90 #include <sys/lock.h>
91 #include <vm/vm_object.h>
92 #include <vm/vm_page.h>
93 #include <vm/vm_map.h>
94 #include <vm/vm_pageout.h>
95 #include <vm/vm_pager.h>
96 #include <vm/swap_pager.h>
97 #include <vm/vm_extern.h>
98 
99 #include <machine/mutex.h>
100 
101 /*
102  * System initialization
103  */
104 
105 /* the kernel process "vm_pageout"*/
106 static void vm_pageout __P((void));
107 static int vm_pageout_clean __P((vm_page_t));
108 static int vm_pageout_scan __P((void));
109 static int vm_pageout_free_page_calc __P((vm_size_t count));
110 struct proc *pageproc;
111 
112 static struct kproc_desc page_kp = {
113 	"pagedaemon",
114 	vm_pageout,
115 	&pageproc
116 };
117 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp)
118 
119 #if !defined(NO_SWAPPING)
120 /* the kernel process "vm_daemon"*/
121 static void vm_daemon __P((void));
122 static struct	proc *vmproc;
123 
124 static struct kproc_desc vm_kp = {
125 	"vmdaemon",
126 	vm_daemon,
127 	&vmproc
128 };
129 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
130 #endif
131 
132 
133 int vm_pages_needed=0;		/* Event on which pageout daemon sleeps */
134 int vm_pageout_deficit=0;	/* Estimated number of pages deficit */
135 int vm_pageout_pages_needed=0;	/* flag saying that the pageout daemon needs pages */
136 
137 #if !defined(NO_SWAPPING)
138 static int vm_pageout_req_swapout;	/* XXX */
139 static int vm_daemon_needed;
140 #endif
141 extern int vm_swap_size;
142 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
143 static int vm_pageout_full_stats_interval = 0;
144 static int vm_pageout_stats_free_max=0, vm_pageout_algorithm_lru=0;
145 static int defer_swap_pageouts=0;
146 static int disable_swap_pageouts=0;
147 
148 static int max_page_launder=100;
149 static int vm_pageout_actcmp=0;
150 #if defined(NO_SWAPPING)
151 static int vm_swap_enabled=0;
152 static int vm_swap_idle_enabled=0;
153 #else
154 static int vm_swap_enabled=1;
155 static int vm_swap_idle_enabled=0;
156 #endif
157 
158 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
159 	CTLFLAG_RW, &vm_pageout_algorithm_lru, 0, "LRU page mgmt");
160 
161 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
162 	CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
163 
164 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
165 	CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
166 
167 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
168 	CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
169 
170 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
171 	CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
172 
173 #if defined(NO_SWAPPING)
174 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
175 	CTLFLAG_RD, &vm_swap_enabled, 0, "");
176 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
177 	CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
178 #else
179 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
180 	CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
181 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
182 	CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
183 #endif
184 
185 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
186 	CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
187 
188 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
189 	CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
190 
191 SYSCTL_INT(_vm, OID_AUTO, max_page_launder,
192 	CTLFLAG_RW, &max_page_launder, 0, "Maximum number of pages to clean per pass");
193 SYSCTL_INT(_vm, OID_AUTO, vm_pageout_actcmp,
194 	CTLFLAG_RD, &vm_pageout_actcmp, 0, "pagedaemon agressiveness");
195 
196 
197 #define VM_PAGEOUT_PAGE_COUNT 16
198 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
199 
200 int vm_page_max_wired;		/* XXX max # of wired pages system-wide */
201 
202 #if !defined(NO_SWAPPING)
203 typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int));
204 static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t));
205 static freeer_fcn_t vm_pageout_object_deactivate_pages;
206 static void vm_req_vmdaemon __P((void));
207 #endif
208 static void vm_pageout_page_stats(void);
209 
210 /*
211  * vm_pageout_clean:
212  *
213  * Clean the page and remove it from the laundry.
214  *
215  * We set the busy bit to cause potential page faults on this page to
216  * block.  Note the careful timing, however, the busy bit isn't set till
217  * late and we cannot do anything that will mess with the page.
218  */
219 
220 static int
221 vm_pageout_clean(m)
222 	vm_page_t m;
223 {
224 	register vm_object_t object;
225 	vm_page_t mc[2*vm_pageout_page_count];
226 	int pageout_count;
227 	int ib, is, page_base;
228 	vm_pindex_t pindex = m->pindex;
229 
230 	object = m->object;
231 
232 	/*
233 	 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
234 	 * with the new swapper, but we could have serious problems paging
235 	 * out other object types if there is insufficient memory.
236 	 *
237 	 * Unfortunately, checking free memory here is far too late, so the
238 	 * check has been moved up a procedural level.
239 	 */
240 
241 	/*
242 	 * Don't mess with the page if it's busy, held, or special
243 	 */
244 	if ((m->hold_count != 0) ||
245 	    ((m->busy != 0) || (m->flags & (PG_BUSY|PG_UNMANAGED)))) {
246 		return 0;
247 	}
248 
249 	mc[vm_pageout_page_count] = m;
250 	pageout_count = 1;
251 	page_base = vm_pageout_page_count;
252 	ib = 1;
253 	is = 1;
254 
255 	/*
256 	 * Scan object for clusterable pages.
257 	 *
258 	 * We can cluster ONLY if: ->> the page is NOT
259 	 * clean, wired, busy, held, or mapped into a
260 	 * buffer, and one of the following:
261 	 * 1) The page is inactive, or a seldom used
262 	 *    active page.
263 	 * -or-
264 	 * 2) we force the issue.
265 	 *
266 	 * During heavy mmap/modification loads the pageout
267 	 * daemon can really fragment the underlying file
268 	 * due to flushing pages out of order and not trying
269 	 * align the clusters (which leave sporatic out-of-order
270 	 * holes).  To solve this problem we do the reverse scan
271 	 * first and attempt to align our cluster, then do a
272 	 * forward scan if room remains.
273 	 */
274 
275 more:
276 	while (ib && pageout_count < vm_pageout_page_count) {
277 		vm_page_t p;
278 
279 		if (ib > pindex) {
280 			ib = 0;
281 			break;
282 		}
283 
284 		if ((p = vm_page_lookup(object, pindex - ib)) == NULL) {
285 			ib = 0;
286 			break;
287 		}
288 		if (((p->queue - p->pc) == PQ_CACHE) ||
289 		    (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) {
290 			ib = 0;
291 			break;
292 		}
293 		vm_page_test_dirty(p);
294 		if ((p->dirty & p->valid) == 0 ||
295 		    p->queue != PQ_INACTIVE ||
296 		    p->wire_count != 0 ||
297 		    p->hold_count != 0) {
298 			ib = 0;
299 			break;
300 		}
301 		mc[--page_base] = p;
302 		++pageout_count;
303 		++ib;
304 		/*
305 		 * alignment boundry, stop here and switch directions.  Do
306 		 * not clear ib.
307 		 */
308 		if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
309 			break;
310 	}
311 
312 	while (pageout_count < vm_pageout_page_count &&
313 	    pindex + is < object->size) {
314 		vm_page_t p;
315 
316 		if ((p = vm_page_lookup(object, pindex + is)) == NULL)
317 			break;
318 		if (((p->queue - p->pc) == PQ_CACHE) ||
319 		    (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) {
320 			break;
321 		}
322 		vm_page_test_dirty(p);
323 		if ((p->dirty & p->valid) == 0 ||
324 		    p->queue != PQ_INACTIVE ||
325 		    p->wire_count != 0 ||
326 		    p->hold_count != 0) {
327 			break;
328 		}
329 		mc[page_base + pageout_count] = p;
330 		++pageout_count;
331 		++is;
332 	}
333 
334 	/*
335 	 * If we exhausted our forward scan, continue with the reverse scan
336 	 * when possible, even past a page boundry.  This catches boundry
337 	 * conditions.
338 	 */
339 	if (ib && pageout_count < vm_pageout_page_count)
340 		goto more;
341 
342 	/*
343 	 * we allow reads during pageouts...
344 	 */
345 	return vm_pageout_flush(&mc[page_base], pageout_count, 0);
346 }
347 
348 /*
349  * vm_pageout_flush() - launder the given pages
350  *
351  *	The given pages are laundered.  Note that we setup for the start of
352  *	I/O ( i.e. busy the page ), mark it read-only, and bump the object
353  *	reference count all in here rather then in the parent.  If we want
354  *	the parent to do more sophisticated things we may have to change
355  *	the ordering.
356  */
357 
358 int
359 vm_pageout_flush(mc, count, flags)
360 	vm_page_t *mc;
361 	int count;
362 	int flags;
363 {
364 	register vm_object_t object;
365 	int pageout_status[count];
366 	int numpagedout = 0;
367 	int i;
368 
369 	/*
370 	 * Initiate I/O.  Bump the vm_page_t->busy counter and
371 	 * mark the pages read-only.
372 	 *
373 	 * We do not have to fixup the clean/dirty bits here... we can
374 	 * allow the pager to do it after the I/O completes.
375 	 */
376 
377 	for (i = 0; i < count; i++) {
378 		KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL && mc[i]->dirty == VM_PAGE_BITS_ALL, ("vm_pageout_flush page %p index %d/%d: partially dirty page", mc[i], i, count));
379 		vm_page_io_start(mc[i]);
380 		vm_page_protect(mc[i], VM_PROT_READ);
381 	}
382 
383 	object = mc[0]->object;
384 	vm_object_pip_add(object, count);
385 
386 	vm_pager_put_pages(object, mc, count,
387 	    (flags | ((object == kernel_object) ? OBJPC_SYNC : 0)),
388 	    pageout_status);
389 
390 	for (i = 0; i < count; i++) {
391 		vm_page_t mt = mc[i];
392 
393 		switch (pageout_status[i]) {
394 		case VM_PAGER_OK:
395 			numpagedout++;
396 			break;
397 		case VM_PAGER_PEND:
398 			numpagedout++;
399 			break;
400 		case VM_PAGER_BAD:
401 			/*
402 			 * Page outside of range of object. Right now we
403 			 * essentially lose the changes by pretending it
404 			 * worked.
405 			 */
406 			pmap_clear_modify(mt);
407 			vm_page_undirty(mt);
408 			break;
409 		case VM_PAGER_ERROR:
410 		case VM_PAGER_FAIL:
411 			/*
412 			 * If page couldn't be paged out, then reactivate the
413 			 * page so it doesn't clog the inactive list.  (We
414 			 * will try paging out it again later).
415 			 */
416 			vm_page_activate(mt);
417 			break;
418 		case VM_PAGER_AGAIN:
419 			break;
420 		}
421 
422 		/*
423 		 * If the operation is still going, leave the page busy to
424 		 * block all other accesses. Also, leave the paging in
425 		 * progress indicator set so that we don't attempt an object
426 		 * collapse.
427 		 */
428 		if (pageout_status[i] != VM_PAGER_PEND) {
429 			vm_object_pip_wakeup(object);
430 			vm_page_io_finish(mt);
431 			if (!vm_page_count_severe() || !vm_page_try_to_cache(mt))
432 				vm_page_protect(mt, VM_PROT_READ);
433 		}
434 	}
435 	return numpagedout;
436 }
437 
438 #if !defined(NO_SWAPPING)
439 /*
440  *	vm_pageout_object_deactivate_pages
441  *
442  *	deactivate enough pages to satisfy the inactive target
443  *	requirements or if vm_page_proc_limit is set, then
444  *	deactivate all of the pages in the object and its
445  *	backing_objects.
446  *
447  *	The object and map must be locked.
448  */
449 static void
450 vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
451 	vm_map_t map;
452 	vm_object_t object;
453 	vm_pindex_t desired;
454 	int map_remove_only;
455 {
456 	register vm_page_t p, next;
457 	int rcount;
458 	int remove_mode;
459 	int s;
460 
461 	if (object->type == OBJT_DEVICE || object->type == OBJT_PHYS)
462 		return;
463 
464 	while (object) {
465 		if (pmap_resident_count(vm_map_pmap(map)) <= desired)
466 			return;
467 		if (object->paging_in_progress)
468 			return;
469 
470 		remove_mode = map_remove_only;
471 		if (object->shadow_count > 1)
472 			remove_mode = 1;
473 	/*
474 	 * scan the objects entire memory queue
475 	 */
476 		rcount = object->resident_page_count;
477 		p = TAILQ_FIRST(&object->memq);
478 		while (p && (rcount-- > 0)) {
479 			int actcount;
480 			if (pmap_resident_count(vm_map_pmap(map)) <= desired)
481 				return;
482 			next = TAILQ_NEXT(p, listq);
483 			cnt.v_pdpages++;
484 			if (p->wire_count != 0 ||
485 			    p->hold_count != 0 ||
486 			    p->busy != 0 ||
487 			    (p->flags & (PG_BUSY|PG_UNMANAGED)) ||
488 			    !pmap_page_exists(vm_map_pmap(map), p)) {
489 				p = next;
490 				continue;
491 			}
492 
493 			actcount = pmap_ts_referenced(p);
494 			if (actcount) {
495 				vm_page_flag_set(p, PG_REFERENCED);
496 			} else if (p->flags & PG_REFERENCED) {
497 				actcount = 1;
498 			}
499 
500 			if ((p->queue != PQ_ACTIVE) &&
501 				(p->flags & PG_REFERENCED)) {
502 				vm_page_activate(p);
503 				p->act_count += actcount;
504 				vm_page_flag_clear(p, PG_REFERENCED);
505 			} else if (p->queue == PQ_ACTIVE) {
506 				if ((p->flags & PG_REFERENCED) == 0) {
507 					p->act_count -= min(p->act_count, ACT_DECLINE);
508 					if (!remove_mode && (vm_pageout_algorithm_lru || (p->act_count == 0))) {
509 						vm_page_protect(p, VM_PROT_NONE);
510 						vm_page_deactivate(p);
511 					} else {
512 						s = splvm();
513 						TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, p, pageq);
514 						TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, p, pageq);
515 						splx(s);
516 					}
517 				} else {
518 					vm_page_activate(p);
519 					vm_page_flag_clear(p, PG_REFERENCED);
520 					if (p->act_count < (ACT_MAX - ACT_ADVANCE))
521 						p->act_count += ACT_ADVANCE;
522 					s = splvm();
523 					TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, p, pageq);
524 					TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, p, pageq);
525 					splx(s);
526 				}
527 			} else if (p->queue == PQ_INACTIVE) {
528 				vm_page_protect(p, VM_PROT_NONE);
529 			}
530 			p = next;
531 		}
532 		object = object->backing_object;
533 	}
534 	return;
535 }
536 
537 /*
538  * deactivate some number of pages in a map, try to do it fairly, but
539  * that is really hard to do.
540  */
541 static void
542 vm_pageout_map_deactivate_pages(map, desired)
543 	vm_map_t map;
544 	vm_pindex_t desired;
545 {
546 	vm_map_entry_t tmpe;
547 	vm_object_t obj, bigobj;
548 
549 	if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) {
550 		return;
551 	}
552 
553 	bigobj = NULL;
554 
555 	/*
556 	 * first, search out the biggest object, and try to free pages from
557 	 * that.
558 	 */
559 	tmpe = map->header.next;
560 	while (tmpe != &map->header) {
561 		if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
562 			obj = tmpe->object.vm_object;
563 			if ((obj != NULL) && (obj->shadow_count <= 1) &&
564 				((bigobj == NULL) ||
565 				 (bigobj->resident_page_count < obj->resident_page_count))) {
566 				bigobj = obj;
567 			}
568 		}
569 		tmpe = tmpe->next;
570 	}
571 
572 	if (bigobj)
573 		vm_pageout_object_deactivate_pages(map, bigobj, desired, 0);
574 
575 	/*
576 	 * Next, hunt around for other pages to deactivate.  We actually
577 	 * do this search sort of wrong -- .text first is not the best idea.
578 	 */
579 	tmpe = map->header.next;
580 	while (tmpe != &map->header) {
581 		if (pmap_resident_count(vm_map_pmap(map)) <= desired)
582 			break;
583 		if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
584 			obj = tmpe->object.vm_object;
585 			if (obj)
586 				vm_pageout_object_deactivate_pages(map, obj, desired, 0);
587 		}
588 		tmpe = tmpe->next;
589 	};
590 
591 	/*
592 	 * Remove all mappings if a process is swapped out, this will free page
593 	 * table pages.
594 	 */
595 	if (desired == 0)
596 		pmap_remove(vm_map_pmap(map),
597 			VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
598 	vm_map_unlock(map);
599 	return;
600 }
601 #endif
602 
603 /*
604  * Don't try to be fancy - being fancy can lead to VOP_LOCK's and therefore
605  * to vnode deadlocks.  We only do it for OBJT_DEFAULT and OBJT_SWAP objects
606  * which we know can be trivially freed.
607  */
608 
609 void
610 vm_pageout_page_free(vm_page_t m) {
611 	vm_object_t object = m->object;
612 	int type = object->type;
613 
614 	if (type == OBJT_SWAP || type == OBJT_DEFAULT)
615 		vm_object_reference(object);
616 	vm_page_busy(m);
617 	vm_page_protect(m, VM_PROT_NONE);
618 	vm_page_free(m);
619 	if (type == OBJT_SWAP || type == OBJT_DEFAULT)
620 		vm_object_deallocate(object);
621 }
622 
623 /*
624  *	vm_pageout_scan does the dirty work for the pageout daemon.
625  */
626 static int
627 vm_pageout_scan()
628 {
629 	vm_page_t m, next;
630 	struct vm_page marker;
631 	int page_shortage, maxscan, pcount;
632 	int addl_page_shortage, addl_page_shortage_init;
633 	int maxlaunder;
634 	struct proc *p, *bigproc;
635 	vm_offset_t size, bigsize;
636 	vm_object_t object;
637 	int force_wakeup = 0;
638 	int actcount;
639 	int vnodes_skipped = 0;
640 	int s;
641 
642 	/*
643 	 * Do whatever cleanup that the pmap code can.
644 	 */
645 	pmap_collect();
646 
647 	addl_page_shortage_init = vm_pageout_deficit;
648 	vm_pageout_deficit = 0;
649 
650 	if (max_page_launder == 0)
651 		max_page_launder = 1;
652 
653 	/*
654 	 * Calculate the number of pages we want to either free or move
655 	 * to the cache.  Be more agressive if we aren't making our target.
656 	 */
657 
658 	page_shortage = vm_paging_target() +
659 		addl_page_shortage_init + vm_pageout_actcmp;
660 
661 	/*
662 	 * Figure out how agressively we should flush dirty pages.
663 	 */
664 	{
665 		int factor = vm_pageout_actcmp;
666 
667 		maxlaunder = cnt.v_inactive_target / 3 + factor;
668 		if (maxlaunder > max_page_launder + factor)
669 			maxlaunder = max_page_launder + factor;
670 	}
671 
672 	/*
673 	 * Initialize our marker
674 	 */
675 	bzero(&marker, sizeof(marker));
676 	marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
677 	marker.queue = PQ_INACTIVE;
678 	marker.wire_count = 1;
679 
680 	/*
681 	 * Start scanning the inactive queue for pages we can move to the
682 	 * cache or free.  The scan will stop when the target is reached or
683 	 * we have scanned the entire inactive queue.  Note that m->act_count
684 	 * is not used to form decisions for the inactive queue, only for the
685 	 * active queue.
686 	 */
687 
688 rescan0:
689 	addl_page_shortage = addl_page_shortage_init;
690 	maxscan = cnt.v_inactive_count;
691 	for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl);
692 	     m != NULL && maxscan-- > 0 && page_shortage > 0;
693 	     m = next) {
694 
695 		cnt.v_pdpages++;
696 
697 		if (m->queue != PQ_INACTIVE) {
698 			goto rescan0;
699 		}
700 
701 		next = TAILQ_NEXT(m, pageq);
702 
703 		/*
704 		 * skip marker pages
705 		 */
706 		if (m->flags & PG_MARKER)
707 			continue;
708 
709 		if (m->hold_count) {
710 			s = splvm();
711 			TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
712 			TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
713 			splx(s);
714 			addl_page_shortage++;
715 			continue;
716 		}
717 		/*
718 		 * Dont mess with busy pages, keep in the front of the
719 		 * queue, most likely are being paged out.
720 		 */
721 		if (m->busy || (m->flags & PG_BUSY)) {
722 			addl_page_shortage++;
723 			continue;
724 		}
725 
726 		/*
727 		 * If the object is not being used, we ignore previous
728 		 * references.
729 		 */
730 		if (m->object->ref_count == 0) {
731 			vm_page_flag_clear(m, PG_REFERENCED);
732 			pmap_clear_reference(m);
733 
734 		/*
735 		 * Otherwise, if the page has been referenced while in the
736 		 * inactive queue, we bump the "activation count" upwards,
737 		 * making it less likely that the page will be added back to
738 		 * the inactive queue prematurely again.  Here we check the
739 		 * page tables (or emulated bits, if any), given the upper
740 		 * level VM system not knowing anything about existing
741 		 * references.
742 		 */
743 		} else if (((m->flags & PG_REFERENCED) == 0) &&
744 			(actcount = pmap_ts_referenced(m))) {
745 			vm_page_activate(m);
746 			m->act_count += (actcount + ACT_ADVANCE);
747 			continue;
748 		}
749 
750 		/*
751 		 * If the upper level VM system knows about any page
752 		 * references, we activate the page.  We also set the
753 		 * "activation count" higher than normal so that we will less
754 		 * likely place pages back onto the inactive queue again.
755 		 */
756 		if ((m->flags & PG_REFERENCED) != 0) {
757 			vm_page_flag_clear(m, PG_REFERENCED);
758 			actcount = pmap_ts_referenced(m);
759 			vm_page_activate(m);
760 			m->act_count += (actcount + ACT_ADVANCE + 1);
761 			continue;
762 		}
763 
764 		/*
765 		 * If the upper level VM system doesn't know anything about
766 		 * the page being dirty, we have to check for it again.  As
767 		 * far as the VM code knows, any partially dirty pages are
768 		 * fully dirty.
769 		 */
770 		if (m->dirty == 0) {
771 			vm_page_test_dirty(m);
772 		} else {
773 			vm_page_dirty(m);
774 		}
775 
776 		/*
777 		 * Invalid pages can be easily freed
778 		 */
779 		if (m->valid == 0) {
780 			vm_pageout_page_free(m);
781 			cnt.v_dfree++;
782 			--page_shortage;
783 
784 		/*
785 		 * Clean pages can be placed onto the cache queue.  This
786 		 * effectively frees them.
787 		 */
788 		} else if (m->dirty == 0) {
789 			vm_page_cache(m);
790 			--page_shortage;
791 
792 		/*
793 		 * Dirty pages need to be paged out.  Note that we clean
794 		 * only a limited number of pages per pagedaemon pass.
795 		 */
796 		} else if (maxlaunder > 0) {
797 			int swap_pageouts_ok;
798 			struct vnode *vp = NULL;
799 			struct mount *mp;
800 
801 			object = m->object;
802 
803 			if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) {
804 				swap_pageouts_ok = 1;
805 			} else {
806 				swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
807 				swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
808 				vm_page_count_min());
809 
810 			}
811 
812 			/*
813 			 * We don't bother paging objects that are "dead".
814 			 * Those objects are in a "rundown" state.
815 			 */
816 			if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
817 				s = splvm();
818 				TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
819 				TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
820 				splx(s);
821 				continue;
822 			}
823 
824 			/*
825 			 * Presumably we have sufficient free memory to do
826 			 * the more sophisticated checks and locking required
827 			 * for vnodes.
828 			 *
829 			 * The object is already known NOT to be dead.  The
830 			 * vget() may still block, though, because
831 			 * VOP_ISLOCKED() doesn't check to see if an inode
832 			 * (v_data) is associated with the vnode.  If it isn't,
833 			 * vget() will load in it from disk.  Worse, vget()
834 			 * may actually get stuck waiting on "inode" if another
835 			 * process is in the process of bringing the inode in.
836 			 * This is bad news for us either way.
837 			 *
838 			 * So for the moment we check v_data == NULL as a
839 			 * workaround.  This means that vnodes which do not
840 			 * use v_data in the way we expect probably will not
841 			 * wind up being paged out by the pager and it will be
842 			 * up to the syncer to get them.  That's better then
843 			 * us blocking here.
844 			 *
845 			 * This whole code section is bogus - we need to fix
846 			 * the vnode pager to handle vm_page_t's without us
847 			 * having to do any sophisticated VOP tests.
848 			 */
849 
850 			if (object->type == OBJT_VNODE) {
851 				vp = object->handle;
852 
853 				mp = NULL;
854 				if (vp->v_type == VREG)
855 					vn_start_write(vp, &mp, V_NOWAIT);
856 				if (VOP_ISLOCKED(vp, NULL) ||
857 				    vp->v_data == NULL ||
858 				    vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curproc)) {
859 					vn_finished_write(mp);
860 					if ((m->queue == PQ_INACTIVE) &&
861 						(m->hold_count == 0) &&
862 						(m->busy == 0) &&
863 						(m->flags & PG_BUSY) == 0) {
864 						s = splvm();
865 						TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
866 						TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
867 						splx(s);
868 					}
869 					if (object->flags & OBJ_MIGHTBEDIRTY)
870 						vnodes_skipped++;
871 					continue;
872 				}
873 
874 				/*
875 				 * The page might have been moved to another
876 				 * queue during potential blocking in vget()
877 				 * above.  The page might have been freed and
878 				 * reused for another vnode.  The object might
879 				 * have been reused for another vnode.
880 				 */
881 				if (m->queue != PQ_INACTIVE ||
882 				    m->object != object ||
883 				    object->handle != vp) {
884 					if (object->flags & OBJ_MIGHTBEDIRTY)
885 						vnodes_skipped++;
886 					vput(vp);
887 					vn_finished_write(mp);
888 					continue;
889 				}
890 
891 				/*
892 				 * The page may have been busied during the
893 				 * blocking in vput();  We don't move the
894 				 * page back onto the end of the queue so that
895 				 * statistics are more correct if we don't.
896 				 */
897 				if (m->busy || (m->flags & PG_BUSY)) {
898 					vput(vp);
899 					vn_finished_write(mp);
900 					continue;
901 				}
902 
903 				/*
904 				 * If the page has become held, then skip it
905 				 */
906 				if (m->hold_count) {
907 					s = splvm();
908 					TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
909 					TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
910 					splx(s);
911 					if (object->flags & OBJ_MIGHTBEDIRTY)
912 						vnodes_skipped++;
913 					vput(vp);
914 					vn_finished_write(mp);
915 					continue;
916 				}
917 			}
918 
919 			/*
920 			 * If a page is dirty, then it is either being washed
921 			 * (but not yet cleaned) or it is still in the
922 			 * laundry.  If it is still in the laundry, then we
923 			 * start the cleaning operation.  maxlaunder nominally
924 			 * counts I/O cost (seeks) rather then bytes.
925 			 *
926 			 * This operation may cluster, invalidating the 'next'
927 			 * pointer.  To prevent an inordinate number of
928 			 * restarts we use our marker to remember our place.
929 			 */
930 			s = splvm();
931 			TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq);
932 			splx(s);
933 			if (vm_pageout_clean(m) != 0)
934 				--maxlaunder;
935 			s = splvm();
936 			next = TAILQ_NEXT(&marker, pageq);
937 			TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq);
938 			splx(s);
939 			if (vp) {
940 				vput(vp);
941 				vn_finished_write(mp);
942 			}
943 		}
944 	}
945 
946 	/*
947 	 * If we were not able to meet our target, increase actcmp
948 	 */
949 
950 	if (vm_page_count_min()) {
951 		if (vm_pageout_actcmp < ACT_MAX / 2)
952 			vm_pageout_actcmp += ACT_ADVANCE;
953 	} else {
954 		if (vm_pageout_actcmp < ACT_DECLINE)
955 			vm_pageout_actcmp = 0;
956 		else
957 			vm_pageout_actcmp -= ACT_DECLINE;
958 	}
959 
960 	/*
961 	 * Compute the number of pages we want to try to move from the
962 	 * active queue to the inactive queue.
963 	 */
964 
965 	page_shortage = vm_paging_target() +
966 		cnt.v_inactive_target - cnt.v_inactive_count;
967 	page_shortage += addl_page_shortage;
968 	page_shortage += vm_pageout_actcmp;
969 
970 	/*
971 	 * Scan the active queue for things we can deactivate. We nominally
972 	 * track the per-page activity counter and use it to locate
973 	 * deactivation candidates.
974 	 */
975 
976 	pcount = cnt.v_active_count;
977 	m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl);
978 
979 	while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
980 
981 		/*
982 		 * This is a consistency check, and should likely be a panic
983 		 * or warning.
984 		 */
985 		if (m->queue != PQ_ACTIVE) {
986 			break;
987 		}
988 
989 		next = TAILQ_NEXT(m, pageq);
990 		/*
991 		 * Don't deactivate pages that are busy.
992 		 */
993 		if ((m->busy != 0) ||
994 		    (m->flags & PG_BUSY) ||
995 		    (m->hold_count != 0)) {
996 			s = splvm();
997 			TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
998 			TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
999 			splx(s);
1000 			m = next;
1001 			continue;
1002 		}
1003 
1004 		/*
1005 		 * The count for pagedaemon pages is done after checking the
1006 		 * page for eligibility...
1007 		 */
1008 		cnt.v_pdpages++;
1009 
1010 		/*
1011 		 * Check to see "how much" the page has been used.
1012 		 */
1013 		actcount = 0;
1014 		if (m->object->ref_count != 0) {
1015 			if (m->flags & PG_REFERENCED) {
1016 				actcount += 1;
1017 			}
1018 			actcount += pmap_ts_referenced(m);
1019 			if (actcount) {
1020 				m->act_count += ACT_ADVANCE + actcount;
1021 				if (m->act_count > ACT_MAX)
1022 					m->act_count = ACT_MAX;
1023 			}
1024 		}
1025 
1026 		/*
1027 		 * Since we have "tested" this bit, we need to clear it now.
1028 		 */
1029 		vm_page_flag_clear(m, PG_REFERENCED);
1030 
1031 		/*
1032 		 * Only if an object is currently being used, do we use the
1033 		 * page activation count stats.
1034 		 */
1035 		if (actcount && (m->object->ref_count != 0)) {
1036 			s = splvm();
1037 			TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1038 			TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1039 			splx(s);
1040 		} else {
1041 			m->act_count -= min(m->act_count, ACT_DECLINE);
1042 			if (vm_pageout_algorithm_lru ||
1043 			    (m->object->ref_count == 0) ||
1044 			    (m->act_count <= vm_pageout_actcmp)) {
1045 				page_shortage--;
1046 				if (m->object->ref_count == 0) {
1047 					vm_page_protect(m, VM_PROT_NONE);
1048 					if (m->dirty == 0)
1049 						vm_page_cache(m);
1050 					else
1051 						vm_page_deactivate(m);
1052 				} else {
1053 					vm_page_deactivate(m);
1054 				}
1055 			} else {
1056 				s = splvm();
1057 				TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1058 				TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1059 				splx(s);
1060 			}
1061 		}
1062 		m = next;
1063 	}
1064 
1065 	s = splvm();
1066 
1067 	/*
1068 	 * We try to maintain some *really* free pages, this allows interrupt
1069 	 * code to be guaranteed space.  Since both cache and free queues
1070 	 * are considered basically 'free', moving pages from cache to free
1071 	 * does not effect other calculations.
1072 	 */
1073 
1074 	while (cnt.v_free_count < cnt.v_free_reserved) {
1075 		static int cache_rover = 0;
1076 		m = vm_page_list_find(PQ_CACHE, cache_rover, FALSE);
1077 		if (!m)
1078 			break;
1079 		if ((m->flags & (PG_BUSY|PG_UNMANAGED)) ||
1080 		    m->busy ||
1081 		    m->hold_count ||
1082 		    m->wire_count) {
1083 #ifdef INVARIANTS
1084 			printf("Warning: busy page %p found in cache\n", m);
1085 #endif
1086 			vm_page_deactivate(m);
1087 			continue;
1088 		}
1089 		cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK;
1090 		vm_pageout_page_free(m);
1091 		cnt.v_dfree++;
1092 	}
1093 	splx(s);
1094 
1095 #if !defined(NO_SWAPPING)
1096 	/*
1097 	 * Idle process swapout -- run once per second.
1098 	 */
1099 	if (vm_swap_idle_enabled) {
1100 		static long lsec;
1101 		if (time_second != lsec) {
1102 			vm_pageout_req_swapout |= VM_SWAP_IDLE;
1103 			vm_req_vmdaemon();
1104 			lsec = time_second;
1105 		}
1106 	}
1107 #endif
1108 
1109 	/*
1110 	 * If we didn't get enough free pages, and we have skipped a vnode
1111 	 * in a writeable object, wakeup the sync daemon.  And kick swapout
1112 	 * if we did not get enough free pages.
1113 	 */
1114 	if (vm_paging_target() > 0) {
1115 		if (vnodes_skipped && vm_page_count_min())
1116 			(void) speedup_syncer();
1117 #if !defined(NO_SWAPPING)
1118 		if (vm_swap_enabled && vm_page_count_target()) {
1119 			vm_req_vmdaemon();
1120 			vm_pageout_req_swapout |= VM_SWAP_NORMAL;
1121 		}
1122 #endif
1123 	}
1124 
1125 	/*
1126 	 * make sure that we have swap space -- if we are low on memory and
1127 	 * swap -- then kill the biggest process.
1128 	 */
1129 	if ((vm_swap_size < 64 || swap_pager_full) && vm_page_count_min()) {
1130 		bigproc = NULL;
1131 		bigsize = 0;
1132 		lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
1133 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
1134 			/*
1135 			 * if this is a system process, skip it
1136 			 */
1137 			if ((p->p_flag & P_SYSTEM) || (p->p_lock > 0) ||
1138 			    (p->p_pid == 1) ||
1139 			    ((p->p_pid < 48) && (vm_swap_size != 0))) {
1140 				continue;
1141 			}
1142 			/*
1143 			 * if the process is in a non-running type state,
1144 			 * don't touch it.
1145 			 */
1146 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
1147 				continue;
1148 			}
1149 			/*
1150 			 * get the process size
1151 			 */
1152 			size = vmspace_resident_count(p->p_vmspace);
1153 			/*
1154 			 * if the this process is bigger than the biggest one
1155 			 * remember it.
1156 			 */
1157 			if (size > bigsize) {
1158 				bigproc = p;
1159 				bigsize = size;
1160 			}
1161 		}
1162 		lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
1163 		if (bigproc != NULL) {
1164 			killproc(bigproc, "out of swap space");
1165 			bigproc->p_estcpu = 0;
1166 			bigproc->p_nice = PRIO_MIN;
1167 			resetpriority(bigproc);
1168 			wakeup(&cnt.v_free_count);
1169 		}
1170 	}
1171 	return force_wakeup;
1172 }
1173 
1174 /*
1175  * This routine tries to maintain the pseudo LRU active queue,
1176  * so that during long periods of time where there is no paging,
1177  * that some statistic accumulation still occurs.  This code
1178  * helps the situation where paging just starts to occur.
1179  */
1180 static void
1181 vm_pageout_page_stats()
1182 {
1183 	int s;
1184 	vm_page_t m,next;
1185 	int pcount,tpcount;		/* Number of pages to check */
1186 	static int fullintervalcount = 0;
1187 	int page_shortage;
1188 	int s0;
1189 
1190 	page_shortage =
1191 	    (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) -
1192 	    (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
1193 
1194 	if (page_shortage <= 0)
1195 		return;
1196 
1197 	s0 = splvm();
1198 
1199 	pcount = cnt.v_active_count;
1200 	fullintervalcount += vm_pageout_stats_interval;
1201 	if (fullintervalcount < vm_pageout_full_stats_interval) {
1202 		tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count;
1203 		if (pcount > tpcount)
1204 			pcount = tpcount;
1205 	} else {
1206 		fullintervalcount = 0;
1207 	}
1208 
1209 	m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl);
1210 	while ((m != NULL) && (pcount-- > 0)) {
1211 		int actcount;
1212 
1213 		if (m->queue != PQ_ACTIVE) {
1214 			break;
1215 		}
1216 
1217 		next = TAILQ_NEXT(m, pageq);
1218 		/*
1219 		 * Don't deactivate pages that are busy.
1220 		 */
1221 		if ((m->busy != 0) ||
1222 		    (m->flags & PG_BUSY) ||
1223 		    (m->hold_count != 0)) {
1224 			s = splvm();
1225 			TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1226 			TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1227 			splx(s);
1228 			m = next;
1229 			continue;
1230 		}
1231 
1232 		actcount = 0;
1233 		if (m->flags & PG_REFERENCED) {
1234 			vm_page_flag_clear(m, PG_REFERENCED);
1235 			actcount += 1;
1236 		}
1237 
1238 		actcount += pmap_ts_referenced(m);
1239 		if (actcount) {
1240 			m->act_count += ACT_ADVANCE + actcount;
1241 			if (m->act_count > ACT_MAX)
1242 				m->act_count = ACT_MAX;
1243 			s = splvm();
1244 			TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1245 			TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1246 			splx(s);
1247 		} else {
1248 			if (m->act_count == 0) {
1249 				/*
1250 				 * We turn off page access, so that we have more accurate
1251 				 * RSS stats.  We don't do this in the normal page deactivation
1252 				 * when the system is loaded VM wise, because the cost of
1253 				 * the large number of page protect operations would be higher
1254 				 * than the value of doing the operation.
1255 				 */
1256 				vm_page_protect(m, VM_PROT_NONE);
1257 				vm_page_deactivate(m);
1258 			} else {
1259 				m->act_count -= min(m->act_count, ACT_DECLINE);
1260 				s = splvm();
1261 				TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1262 				TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1263 				splx(s);
1264 			}
1265 		}
1266 
1267 		m = next;
1268 	}
1269 	splx(s0);
1270 }
1271 
1272 static int
1273 vm_pageout_free_page_calc(count)
1274 vm_size_t count;
1275 {
1276 	if (count < cnt.v_page_count)
1277 		 return 0;
1278 	/*
1279 	 * free_reserved needs to include enough for the largest swap pager
1280 	 * structures plus enough for any pv_entry structs when paging.
1281 	 */
1282 	if (cnt.v_page_count > 1024)
1283 		cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
1284 	else
1285 		cnt.v_free_min = 4;
1286 	cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
1287 		cnt.v_interrupt_free_min;
1288 	cnt.v_free_reserved = vm_pageout_page_count +
1289 		cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE;
1290 	cnt.v_free_severe = cnt.v_free_min / 2;
1291 	cnt.v_free_min += cnt.v_free_reserved;
1292 	cnt.v_free_severe += cnt.v_free_reserved;
1293 	return 1;
1294 }
1295 
1296 
1297 /*
1298  *	vm_pageout is the high level pageout daemon.
1299  */
1300 static void
1301 vm_pageout()
1302 {
1303 
1304 	mtx_enter(&Giant, MTX_DEF);
1305 
1306 	/*
1307 	 * Initialize some paging parameters.
1308 	 */
1309 
1310 	cnt.v_interrupt_free_min = 2;
1311 	if (cnt.v_page_count < 2000)
1312 		vm_pageout_page_count = 8;
1313 
1314 	vm_pageout_free_page_calc(cnt.v_page_count);
1315 	/*
1316 	 * free_reserved needs to include enough for the largest swap pager
1317 	 * structures plus enough for any pv_entry structs when paging.
1318 	 */
1319 	if (cnt.v_free_count > 6144)
1320 		cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved;
1321 	else
1322 		cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved;
1323 
1324 	if (cnt.v_free_count > 2048) {
1325 		cnt.v_cache_min = cnt.v_free_target;
1326 		cnt.v_cache_max = 2 * cnt.v_cache_min;
1327 		cnt.v_inactive_target = (3 * cnt.v_free_target) / 2;
1328 	} else {
1329 		cnt.v_cache_min = 0;
1330 		cnt.v_cache_max = 0;
1331 		cnt.v_inactive_target = cnt.v_free_count / 4;
1332 	}
1333 	if (cnt.v_inactive_target > cnt.v_free_count / 3)
1334 		cnt.v_inactive_target = cnt.v_free_count / 3;
1335 
1336 	/* XXX does not really belong here */
1337 	if (vm_page_max_wired == 0)
1338 		vm_page_max_wired = cnt.v_free_count / 3;
1339 
1340 	if (vm_pageout_stats_max == 0)
1341 		vm_pageout_stats_max = cnt.v_free_target;
1342 
1343 	/*
1344 	 * Set interval in seconds for stats scan.
1345 	 */
1346 	if (vm_pageout_stats_interval == 0)
1347 		vm_pageout_stats_interval = 5;
1348 	if (vm_pageout_full_stats_interval == 0)
1349 		vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1350 
1351 
1352 	/*
1353 	 * Set maximum free per pass
1354 	 */
1355 	if (vm_pageout_stats_free_max == 0)
1356 		vm_pageout_stats_free_max = 5;
1357 
1358 	max_page_launder = (cnt.v_page_count > 1800 ? 32 : 16);
1359 
1360 	curproc->p_flag |= P_BUFEXHAUST;
1361 	swap_pager_swap_init();
1362 	/*
1363 	 * The pageout daemon is never done, so loop forever.
1364 	 */
1365 	while (TRUE) {
1366 		int error;
1367 		int s = splvm();
1368 
1369 		/*
1370 		 * If we have enough free memory, wakeup waiters.  Do
1371 		 * not clear vm_pages_needed until we reach our target,
1372 		 * otherwise we may be woken up over and over again and
1373 		 * waste a lot of cpu.
1374 		 */
1375 		if (vm_pages_needed && !vm_page_count_min()) {
1376 			if (vm_paging_needed() <= 0)
1377 				vm_pages_needed = 0;
1378 			wakeup(&cnt.v_free_count);
1379 		}
1380 		if (vm_pages_needed) {
1381 			/*
1382 			 * Still not done, sleep a bit and go again
1383 			 */
1384 			tsleep(&vm_pages_needed, PVM, "psleep", hz/2);
1385 		} else {
1386 			/*
1387 			 * Good enough, sleep & handle stats
1388 			 */
1389 			error = tsleep(&vm_pages_needed,
1390 				PVM, "psleep", vm_pageout_stats_interval * hz);
1391 			if (error && !vm_pages_needed) {
1392 				if (vm_pageout_actcmp > 0)
1393 					--vm_pageout_actcmp;
1394 				splx(s);
1395 				vm_pageout_page_stats();
1396 				continue;
1397 			}
1398 		}
1399 
1400 		if (vm_pages_needed)
1401 			cnt.v_pdwakeups++;
1402 		splx(s);
1403 		vm_pageout_scan();
1404 		vm_pageout_deficit = 0;
1405 	}
1406 }
1407 
1408 void
1409 pagedaemon_wakeup()
1410 {
1411 	if (!vm_pages_needed && curproc != pageproc) {
1412 		vm_pages_needed++;
1413 		wakeup(&vm_pages_needed);
1414 	}
1415 }
1416 
1417 #if !defined(NO_SWAPPING)
1418 static void
1419 vm_req_vmdaemon()
1420 {
1421 	static int lastrun = 0;
1422 
1423 	if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
1424 		wakeup(&vm_daemon_needed);
1425 		lastrun = ticks;
1426 	}
1427 }
1428 
1429 static void
1430 vm_daemon()
1431 {
1432 	struct proc *p;
1433 
1434 	mtx_enter(&Giant, MTX_DEF);
1435 
1436 	while (TRUE) {
1437 		tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0);
1438 		if (vm_pageout_req_swapout) {
1439 			swapout_procs(vm_pageout_req_swapout);
1440 			vm_pageout_req_swapout = 0;
1441 		}
1442 		/*
1443 		 * scan the processes for exceeding their rlimits or if
1444 		 * process is swapped out -- deactivate pages
1445 		 */
1446 
1447 		lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
1448 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
1449 			vm_pindex_t limit, size;
1450 
1451 			/*
1452 			 * if this is a system process or if we have already
1453 			 * looked at this process, skip it.
1454 			 */
1455 			if (p->p_flag & (P_SYSTEM | P_WEXIT)) {
1456 				continue;
1457 			}
1458 			/*
1459 			 * if the process is in a non-running type state,
1460 			 * don't touch it.
1461 			 */
1462 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
1463 				continue;
1464 			}
1465 			/*
1466 			 * get a limit
1467 			 */
1468 			limit = OFF_TO_IDX(
1469 			    qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
1470 				p->p_rlimit[RLIMIT_RSS].rlim_max));
1471 
1472 			/*
1473 			 * let processes that are swapped out really be
1474 			 * swapped out set the limit to nothing (will force a
1475 			 * swap-out.)
1476 			 */
1477 			if ((p->p_flag & P_INMEM) == 0)
1478 				limit = 0;	/* XXX */
1479 
1480 			size = vmspace_resident_count(p->p_vmspace);
1481 			if (limit >= 0 && size >= limit) {
1482 				vm_pageout_map_deactivate_pages(
1483 				    &p->p_vmspace->vm_map, limit);
1484 			}
1485 		}
1486 		lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
1487 	}
1488 }
1489 #endif
1490