xref: /freebsd/sys/vm/vm_pageout.c (revision 1b6c76a2fe091c74f08427e6c870851025a9cf67)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
41  *
42  *
43  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
44  * All rights reserved.
45  *
46  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
47  *
48  * Permission to use, copy, modify and distribute this software and
49  * its documentation is hereby granted, provided that both the copyright
50  * notice and this permission notice appear in all copies of the
51  * software, derivative works or modified versions, and any portions
52  * thereof, and that both notices appear in supporting documentation.
53  *
54  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57  *
58  * Carnegie Mellon requests users of this software to return to
59  *
60  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61  *  School of Computer Science
62  *  Carnegie Mellon University
63  *  Pittsburgh PA 15213-3890
64  *
65  * any improvements or extensions that they make and grant Carnegie the
66  * rights to redistribute these changes.
67  *
68  * $FreeBSD$
69  */
70 
71 /*
72  *	The proverbial page-out daemon.
73  */
74 
75 #include "opt_vm.h"
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/kernel.h>
79 #include <sys/lock.h>
80 #include <sys/mutex.h>
81 #include <sys/proc.h>
82 #include <sys/kthread.h>
83 #include <sys/ktr.h>
84 #include <sys/resourcevar.h>
85 #include <sys/signalvar.h>
86 #include <sys/vnode.h>
87 #include <sys/vmmeter.h>
88 #include <sys/sx.h>
89 #include <sys/sysctl.h>
90 
91 #include <vm/vm.h>
92 #include <vm/vm_param.h>
93 #include <vm/vm_object.h>
94 #include <vm/vm_page.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/vm_pager.h>
98 #include <vm/vm_zone.h>
99 #include <vm/swap_pager.h>
100 #include <vm/vm_extern.h>
101 
102 #include <machine/mutex.h>
103 
104 /*
105  * System initialization
106  */
107 
108 /* the kernel process "vm_pageout"*/
109 static void vm_pageout __P((void));
110 static int vm_pageout_clean __P((vm_page_t));
111 static void vm_pageout_scan __P((int pass));
112 static int vm_pageout_free_page_calc __P((vm_size_t count));
113 struct proc *pageproc;
114 
115 static struct kproc_desc page_kp = {
116 	"pagedaemon",
117 	vm_pageout,
118 	&pageproc
119 };
120 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp)
121 
122 #if !defined(NO_SWAPPING)
123 /* the kernel process "vm_daemon"*/
124 static void vm_daemon __P((void));
125 static struct	proc *vmproc;
126 
127 static struct kproc_desc vm_kp = {
128 	"vmdaemon",
129 	vm_daemon,
130 	&vmproc
131 };
132 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
133 #endif
134 
135 
136 int vm_pages_needed=0;		/* Event on which pageout daemon sleeps */
137 int vm_pageout_deficit=0;	/* Estimated number of pages deficit */
138 int vm_pageout_pages_needed=0;	/* flag saying that the pageout daemon needs pages */
139 
140 #if !defined(NO_SWAPPING)
141 static int vm_pageout_req_swapout;	/* XXX */
142 static int vm_daemon_needed;
143 #endif
144 extern int vm_swap_size;
145 static int vm_max_launder = 32;
146 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
147 static int vm_pageout_full_stats_interval = 0;
148 static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0;
149 static int defer_swap_pageouts=0;
150 static int disable_swap_pageouts=0;
151 
152 #if defined(NO_SWAPPING)
153 static int vm_swap_enabled=0;
154 static int vm_swap_idle_enabled=0;
155 #else
156 static int vm_swap_enabled=1;
157 static int vm_swap_idle_enabled=0;
158 #endif
159 
160 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
161 	CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt");
162 
163 SYSCTL_INT(_vm, OID_AUTO, max_launder,
164 	CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
165 
166 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
167 	CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
168 
169 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
170 	CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
171 
172 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
173 	CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
174 
175 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
176 	CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
177 
178 #if defined(NO_SWAPPING)
179 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
180 	CTLFLAG_RD, &vm_swap_enabled, 0, "");
181 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
182 	CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
183 #else
184 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
185 	CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
186 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
187 	CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
188 #endif
189 
190 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
191 	CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
192 
193 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
194 	CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
195 
196 #define VM_PAGEOUT_PAGE_COUNT 16
197 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
198 
199 int vm_page_max_wired;		/* XXX max # of wired pages system-wide */
200 
201 #if !defined(NO_SWAPPING)
202 typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int));
203 static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t));
204 static freeer_fcn_t vm_pageout_object_deactivate_pages;
205 static void vm_req_vmdaemon __P((void));
206 #endif
207 static void vm_pageout_page_stats(void);
208 
209 /*
210  * vm_pageout_clean:
211  *
212  * Clean the page and remove it from the laundry.
213  *
214  * We set the busy bit to cause potential page faults on this page to
215  * block.  Note the careful timing, however, the busy bit isn't set till
216  * late and we cannot do anything that will mess with the page.
217  */
218 
219 static int
220 vm_pageout_clean(m)
221 	vm_page_t m;
222 {
223 	register vm_object_t object;
224 	vm_page_t mc[2*vm_pageout_page_count];
225 	int pageout_count;
226 	int ib, is, page_base;
227 	vm_pindex_t pindex = m->pindex;
228 
229 	mtx_assert(&vm_mtx, MA_OWNED);
230 	object = m->object;
231 
232 	/*
233 	 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
234 	 * with the new swapper, but we could have serious problems paging
235 	 * out other object types if there is insufficient memory.
236 	 *
237 	 * Unfortunately, checking free memory here is far too late, so the
238 	 * check has been moved up a procedural level.
239 	 */
240 
241 	/*
242 	 * Don't mess with the page if it's busy, held, or special
243 	 */
244 	if ((m->hold_count != 0) ||
245 	    ((m->busy != 0) || (m->flags & (PG_BUSY|PG_UNMANAGED)))) {
246 		return 0;
247 	}
248 
249 	mc[vm_pageout_page_count] = m;
250 	pageout_count = 1;
251 	page_base = vm_pageout_page_count;
252 	ib = 1;
253 	is = 1;
254 
255 	/*
256 	 * Scan object for clusterable pages.
257 	 *
258 	 * We can cluster ONLY if: ->> the page is NOT
259 	 * clean, wired, busy, held, or mapped into a
260 	 * buffer, and one of the following:
261 	 * 1) The page is inactive, or a seldom used
262 	 *    active page.
263 	 * -or-
264 	 * 2) we force the issue.
265 	 *
266 	 * During heavy mmap/modification loads the pageout
267 	 * daemon can really fragment the underlying file
268 	 * due to flushing pages out of order and not trying
269 	 * align the clusters (which leave sporatic out-of-order
270 	 * holes).  To solve this problem we do the reverse scan
271 	 * first and attempt to align our cluster, then do a
272 	 * forward scan if room remains.
273 	 */
274 
275 more:
276 	while (ib && pageout_count < vm_pageout_page_count) {
277 		vm_page_t p;
278 
279 		if (ib > pindex) {
280 			ib = 0;
281 			break;
282 		}
283 
284 		if ((p = vm_page_lookup(object, pindex - ib)) == NULL) {
285 			ib = 0;
286 			break;
287 		}
288 		if (((p->queue - p->pc) == PQ_CACHE) ||
289 		    (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) {
290 			ib = 0;
291 			break;
292 		}
293 		vm_page_test_dirty(p);
294 		if ((p->dirty & p->valid) == 0 ||
295 		    p->queue != PQ_INACTIVE ||
296 		    p->wire_count != 0 ||
297 		    p->hold_count != 0) {
298 			ib = 0;
299 			break;
300 		}
301 		mc[--page_base] = p;
302 		++pageout_count;
303 		++ib;
304 		/*
305 		 * alignment boundry, stop here and switch directions.  Do
306 		 * not clear ib.
307 		 */
308 		if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
309 			break;
310 	}
311 
312 	while (pageout_count < vm_pageout_page_count &&
313 	    pindex + is < object->size) {
314 		vm_page_t p;
315 
316 		if ((p = vm_page_lookup(object, pindex + is)) == NULL)
317 			break;
318 		if (((p->queue - p->pc) == PQ_CACHE) ||
319 		    (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) {
320 			break;
321 		}
322 		vm_page_test_dirty(p);
323 		if ((p->dirty & p->valid) == 0 ||
324 		    p->queue != PQ_INACTIVE ||
325 		    p->wire_count != 0 ||
326 		    p->hold_count != 0) {
327 			break;
328 		}
329 		mc[page_base + pageout_count] = p;
330 		++pageout_count;
331 		++is;
332 	}
333 
334 	/*
335 	 * If we exhausted our forward scan, continue with the reverse scan
336 	 * when possible, even past a page boundry.  This catches boundry
337 	 * conditions.
338 	 */
339 	if (ib && pageout_count < vm_pageout_page_count)
340 		goto more;
341 
342 	/*
343 	 * we allow reads during pageouts...
344 	 */
345 	return vm_pageout_flush(&mc[page_base], pageout_count, 0);
346 }
347 
348 /*
349  * vm_pageout_flush() - launder the given pages
350  *
351  *	The given pages are laundered.  Note that we setup for the start of
352  *	I/O ( i.e. busy the page ), mark it read-only, and bump the object
353  *	reference count all in here rather then in the parent.  If we want
354  *	the parent to do more sophisticated things we may have to change
355  *	the ordering.
356  */
357 
358 int
359 vm_pageout_flush(mc, count, flags)
360 	vm_page_t *mc;
361 	int count;
362 	int flags;
363 {
364 	register vm_object_t object;
365 	int pageout_status[count];
366 	int numpagedout = 0;
367 	int i;
368 
369 	mtx_assert(&vm_mtx, MA_OWNED);
370 	/*
371 	 * Initiate I/O.  Bump the vm_page_t->busy counter and
372 	 * mark the pages read-only.
373 	 *
374 	 * We do not have to fixup the clean/dirty bits here... we can
375 	 * allow the pager to do it after the I/O completes.
376 	 *
377 	 * NOTE! mc[i]->dirty may be partial or fragmented due to an
378 	 * edge case with file fragments.
379 	 */
380 
381 	for (i = 0; i < count; i++) {
382 		KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, ("vm_pageout_flush page %p index %d/%d: partially invalid page", mc[i], i, count));
383 		vm_page_io_start(mc[i]);
384 		vm_page_protect(mc[i], VM_PROT_READ);
385 	}
386 
387 	object = mc[0]->object;
388 	vm_object_pip_add(object, count);
389 
390 	vm_pager_put_pages(object, mc, count,
391 	    (flags | ((object == kernel_object) ? OBJPC_SYNC : 0)),
392 	    pageout_status);
393 
394 	for (i = 0; i < count; i++) {
395 		vm_page_t mt = mc[i];
396 
397 		switch (pageout_status[i]) {
398 		case VM_PAGER_OK:
399 			numpagedout++;
400 			break;
401 		case VM_PAGER_PEND:
402 			numpagedout++;
403 			break;
404 		case VM_PAGER_BAD:
405 			/*
406 			 * Page outside of range of object. Right now we
407 			 * essentially lose the changes by pretending it
408 			 * worked.
409 			 */
410 			pmap_clear_modify(mt);
411 			vm_page_undirty(mt);
412 			break;
413 		case VM_PAGER_ERROR:
414 		case VM_PAGER_FAIL:
415 			/*
416 			 * If page couldn't be paged out, then reactivate the
417 			 * page so it doesn't clog the inactive list.  (We
418 			 * will try paging out it again later).
419 			 */
420 			vm_page_activate(mt);
421 			break;
422 		case VM_PAGER_AGAIN:
423 			break;
424 		}
425 
426 		/*
427 		 * If the operation is still going, leave the page busy to
428 		 * block all other accesses. Also, leave the paging in
429 		 * progress indicator set so that we don't attempt an object
430 		 * collapse.
431 		 */
432 		if (pageout_status[i] != VM_PAGER_PEND) {
433 			vm_object_pip_wakeup(object);
434 			vm_page_io_finish(mt);
435 			if (!vm_page_count_severe() || !vm_page_try_to_cache(mt))
436 				vm_page_protect(mt, VM_PROT_READ);
437 		}
438 	}
439 	return numpagedout;
440 }
441 
442 #if !defined(NO_SWAPPING)
443 /*
444  *	vm_pageout_object_deactivate_pages
445  *
446  *	deactivate enough pages to satisfy the inactive target
447  *	requirements or if vm_page_proc_limit is set, then
448  *	deactivate all of the pages in the object and its
449  *	backing_objects.
450  *
451  *	The object and map must be locked.
452  *
453  *	Requires the vm_mtx
454  */
455 static void
456 vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
457 	vm_map_t map;
458 	vm_object_t object;
459 	vm_pindex_t desired;
460 	int map_remove_only;
461 {
462 	register vm_page_t p, next;
463 	int rcount;
464 	int remove_mode;
465 	int s;
466 
467 	mtx_assert(&vm_mtx, MA_OWNED);
468 	if (object->type == OBJT_DEVICE || object->type == OBJT_PHYS)
469 		return;
470 
471 	while (object) {
472 		if (pmap_resident_count(vm_map_pmap(map)) <= desired)
473 			return;
474 		if (object->paging_in_progress)
475 			return;
476 
477 		remove_mode = map_remove_only;
478 		if (object->shadow_count > 1)
479 			remove_mode = 1;
480 	/*
481 	 * scan the objects entire memory queue
482 	 */
483 		rcount = object->resident_page_count;
484 		p = TAILQ_FIRST(&object->memq);
485 		while (p && (rcount-- > 0)) {
486 			int actcount;
487 			if (pmap_resident_count(vm_map_pmap(map)) <= desired)
488 				return;
489 			next = TAILQ_NEXT(p, listq);
490 			cnt.v_pdpages++;
491 			if (p->wire_count != 0 ||
492 			    p->hold_count != 0 ||
493 			    p->busy != 0 ||
494 			    (p->flags & (PG_BUSY|PG_UNMANAGED)) ||
495 			    !pmap_page_exists(vm_map_pmap(map), p)) {
496 				p = next;
497 				continue;
498 			}
499 
500 			actcount = pmap_ts_referenced(p);
501 			if (actcount) {
502 				vm_page_flag_set(p, PG_REFERENCED);
503 			} else if (p->flags & PG_REFERENCED) {
504 				actcount = 1;
505 			}
506 
507 			if ((p->queue != PQ_ACTIVE) &&
508 				(p->flags & PG_REFERENCED)) {
509 				vm_page_activate(p);
510 				p->act_count += actcount;
511 				vm_page_flag_clear(p, PG_REFERENCED);
512 			} else if (p->queue == PQ_ACTIVE) {
513 				if ((p->flags & PG_REFERENCED) == 0) {
514 					p->act_count -= min(p->act_count, ACT_DECLINE);
515 					if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) {
516 						vm_page_protect(p, VM_PROT_NONE);
517 						vm_page_deactivate(p);
518 					} else {
519 						s = splvm();
520 						TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, p, pageq);
521 						TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, p, pageq);
522 						splx(s);
523 					}
524 				} else {
525 					vm_page_activate(p);
526 					vm_page_flag_clear(p, PG_REFERENCED);
527 					if (p->act_count < (ACT_MAX - ACT_ADVANCE))
528 						p->act_count += ACT_ADVANCE;
529 					s = splvm();
530 					TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, p, pageq);
531 					TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, p, pageq);
532 					splx(s);
533 				}
534 			} else if (p->queue == PQ_INACTIVE) {
535 				vm_page_protect(p, VM_PROT_NONE);
536 			}
537 			p = next;
538 		}
539 		object = object->backing_object;
540 	}
541 	return;
542 }
543 
544 /*
545  * deactivate some number of pages in a map, try to do it fairly, but
546  * that is really hard to do.
547  */
548 static void
549 vm_pageout_map_deactivate_pages(map, desired)
550 	vm_map_t map;
551 	vm_pindex_t desired;
552 {
553 	vm_map_entry_t tmpe;
554 	vm_object_t obj, bigobj;
555 
556 	mtx_assert(&vm_mtx, MA_OWNED);
557 	if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) {
558 		return;
559 	}
560 
561 	bigobj = NULL;
562 
563 	/*
564 	 * first, search out the biggest object, and try to free pages from
565 	 * that.
566 	 */
567 	tmpe = map->header.next;
568 	while (tmpe != &map->header) {
569 		if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
570 			obj = tmpe->object.vm_object;
571 			if ((obj != NULL) && (obj->shadow_count <= 1) &&
572 				((bigobj == NULL) ||
573 				 (bigobj->resident_page_count < obj->resident_page_count))) {
574 				bigobj = obj;
575 			}
576 		}
577 		tmpe = tmpe->next;
578 	}
579 
580 	if (bigobj)
581 		vm_pageout_object_deactivate_pages(map, bigobj, desired, 0);
582 
583 	/*
584 	 * Next, hunt around for other pages to deactivate.  We actually
585 	 * do this search sort of wrong -- .text first is not the best idea.
586 	 */
587 	tmpe = map->header.next;
588 	while (tmpe != &map->header) {
589 		if (pmap_resident_count(vm_map_pmap(map)) <= desired)
590 			break;
591 		if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
592 			obj = tmpe->object.vm_object;
593 			if (obj)
594 				vm_pageout_object_deactivate_pages(map, obj, desired, 0);
595 		}
596 		tmpe = tmpe->next;
597 	};
598 
599 	/*
600 	 * Remove all mappings if a process is swapped out, this will free page
601 	 * table pages.
602 	 */
603 	if (desired == 0)
604 		pmap_remove(vm_map_pmap(map),
605 			VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
606 	vm_map_unlock(map);
607 	return;
608 }
609 #endif
610 
611 /*
612  * Don't try to be fancy - being fancy can lead to VOP_LOCK's and therefore
613  * to vnode deadlocks.  We only do it for OBJT_DEFAULT and OBJT_SWAP objects
614  * which we know can be trivially freed.
615  */
616 
617 void
618 vm_pageout_page_free(vm_page_t m) {
619 	vm_object_t object = m->object;
620 	int type = object->type;
621 
622 	mtx_assert(&vm_mtx, MA_OWNED);
623 	if (type == OBJT_SWAP || type == OBJT_DEFAULT)
624 		vm_object_reference(object);
625 	vm_page_busy(m);
626 	vm_page_protect(m, VM_PROT_NONE);
627 	vm_page_free(m);
628 	if (type == OBJT_SWAP || type == OBJT_DEFAULT)
629 		vm_object_deallocate(object);
630 }
631 
632 /*
633  *	vm_pageout_scan does the dirty work for the pageout daemon.
634  */
635 static void
636 vm_pageout_scan(int pass)
637 {
638 	vm_page_t m, next;
639 	struct vm_page marker;
640 	int save_page_shortage;
641 	int save_inactive_count;
642 	int page_shortage, maxscan, pcount;
643 	int addl_page_shortage, addl_page_shortage_init;
644 	struct proc *p, *bigproc;
645 	vm_offset_t size, bigsize;
646 	vm_object_t object;
647 	int actcount;
648 	int vnodes_skipped = 0;
649 	int maxlaunder;
650 	int s;
651 
652 	mtx_assert(&Giant, MA_OWNED);
653 	mtx_assert(&vm_mtx, MA_OWNED);
654 	/*
655 	 * Do whatever cleanup that the pmap code can.
656 	 */
657 	pmap_collect();
658 
659 	addl_page_shortage_init = vm_pageout_deficit;
660 	vm_pageout_deficit = 0;
661 
662 	/*
663 	 * Calculate the number of pages we want to either free or move
664 	 * to the cache.
665 	 */
666 	page_shortage = vm_paging_target() + addl_page_shortage_init;
667 	save_page_shortage = page_shortage;
668 	save_inactive_count = cnt.v_inactive_count;
669 
670 	/*
671 	 * Initialize our marker
672 	 */
673 	bzero(&marker, sizeof(marker));
674 	marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
675 	marker.queue = PQ_INACTIVE;
676 	marker.wire_count = 1;
677 
678 	/*
679 	 * Start scanning the inactive queue for pages we can move to the
680 	 * cache or free.  The scan will stop when the target is reached or
681 	 * we have scanned the entire inactive queue.  Note that m->act_count
682 	 * is not used to form decisions for the inactive queue, only for the
683 	 * active queue.
684 	 *
685 	 * maxlaunder limits the number of dirty pages we flush per scan.
686 	 * For most systems a smaller value (16 or 32) is more robust under
687 	 * extreme memory and disk pressure because any unnecessary writes
688 	 * to disk can result in extreme performance degredation.  However,
689 	 * systems with excessive dirty pages (especially when MAP_NOSYNC is
690 	 * used) will die horribly with limited laundering.  If the pageout
691 	 * daemon cannot clean enough pages in the first pass, we let it go
692 	 * all out in succeeding passes.
693 	 */
694 
695 	if ((maxlaunder = vm_max_launder) <= 1)
696 		maxlaunder = 1;
697 	if (pass)
698 		maxlaunder = 10000;
699 
700 rescan0:
701 	addl_page_shortage = addl_page_shortage_init;
702 	maxscan = cnt.v_inactive_count;
703 	for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl);
704 	     m != NULL && maxscan-- > 0 && page_shortage > 0;
705 	     m = next) {
706 
707 		cnt.v_pdpages++;
708 
709 		if (m->queue != PQ_INACTIVE) {
710 			goto rescan0;
711 		}
712 
713 		next = TAILQ_NEXT(m, pageq);
714 
715 		/*
716 		 * skip marker pages
717 		 */
718 		if (m->flags & PG_MARKER)
719 			continue;
720 
721 		if (m->hold_count) {
722 			s = splvm();
723 			TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
724 			TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
725 			splx(s);
726 			addl_page_shortage++;
727 			continue;
728 		}
729 		/*
730 		 * Dont mess with busy pages, keep in the front of the
731 		 * queue, most likely are being paged out.
732 		 */
733 		if (m->busy || (m->flags & PG_BUSY)) {
734 			addl_page_shortage++;
735 			continue;
736 		}
737 
738 		/*
739 		 * If the object is not being used, we ignore previous
740 		 * references.
741 		 */
742 		if (m->object->ref_count == 0) {
743 			vm_page_flag_clear(m, PG_REFERENCED);
744 			pmap_clear_reference(m);
745 
746 		/*
747 		 * Otherwise, if the page has been referenced while in the
748 		 * inactive queue, we bump the "activation count" upwards,
749 		 * making it less likely that the page will be added back to
750 		 * the inactive queue prematurely again.  Here we check the
751 		 * page tables (or emulated bits, if any), given the upper
752 		 * level VM system not knowing anything about existing
753 		 * references.
754 		 */
755 		} else if (((m->flags & PG_REFERENCED) == 0) &&
756 			(actcount = pmap_ts_referenced(m))) {
757 			vm_page_activate(m);
758 			m->act_count += (actcount + ACT_ADVANCE);
759 			continue;
760 		}
761 
762 		/*
763 		 * If the upper level VM system knows about any page
764 		 * references, we activate the page.  We also set the
765 		 * "activation count" higher than normal so that we will less
766 		 * likely place pages back onto the inactive queue again.
767 		 */
768 		if ((m->flags & PG_REFERENCED) != 0) {
769 			vm_page_flag_clear(m, PG_REFERENCED);
770 			actcount = pmap_ts_referenced(m);
771 			vm_page_activate(m);
772 			m->act_count += (actcount + ACT_ADVANCE + 1);
773 			continue;
774 		}
775 
776 		/*
777 		 * If the upper level VM system doesn't know anything about
778 		 * the page being dirty, we have to check for it again.  As
779 		 * far as the VM code knows, any partially dirty pages are
780 		 * fully dirty.
781 		 */
782 		if (m->dirty == 0) {
783 			vm_page_test_dirty(m);
784 		} else {
785 			vm_page_dirty(m);
786 		}
787 
788 		/*
789 		 * Invalid pages can be easily freed
790 		 */
791 		if (m->valid == 0) {
792 			vm_pageout_page_free(m);
793 			cnt.v_dfree++;
794 			--page_shortage;
795 
796 		/*
797 		 * Clean pages can be placed onto the cache queue.  This
798 		 * effectively frees them.
799 		 */
800 		} else if (m->dirty == 0) {
801 			vm_page_cache(m);
802 			--page_shortage;
803 		} else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) {
804 			/*
805 			 * Dirty pages need to be paged out, but flushing
806 			 * a page is extremely expensive verses freeing
807 			 * a clean page.  Rather then artificially limiting
808 			 * the number of pages we can flush, we instead give
809 			 * dirty pages extra priority on the inactive queue
810 			 * by forcing them to be cycled through the queue
811 			 * twice before being flushed, after which the
812 			 * (now clean) page will cycle through once more
813 			 * before being freed.  This significantly extends
814 			 * the thrash point for a heavily loaded machine.
815 			 */
816 			s = splvm();
817 			vm_page_flag_set(m, PG_WINATCFLS);
818 			TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
819 			TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
820 			splx(s);
821 		} else if (maxlaunder > 0) {
822 			/*
823 			 * We always want to try to flush some dirty pages if
824 			 * we encounter them, to keep the system stable.
825 			 * Normally this number is small, but under extreme
826 			 * pressure where there are insufficient clean pages
827 			 * on the inactive queue, we may have to go all out.
828 			 */
829 			int swap_pageouts_ok;
830 			struct vnode *vp = NULL;
831 			struct mount *mp;
832 
833 			object = m->object;
834 
835 			if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) {
836 				swap_pageouts_ok = 1;
837 			} else {
838 				swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
839 				swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
840 				vm_page_count_min());
841 
842 			}
843 
844 			/*
845 			 * We don't bother paging objects that are "dead".
846 			 * Those objects are in a "rundown" state.
847 			 */
848 			if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
849 				s = splvm();
850 				TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
851 				TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
852 				splx(s);
853 				continue;
854 			}
855 
856 			/*
857 			 * The object is already known NOT to be dead.   It
858 			 * is possible for the vget() to block the whole
859 			 * pageout daemon, but the new low-memory handling
860 			 * code should prevent it.
861 			 *
862 			 * The previous code skipped locked vnodes and, worse,
863 			 * reordered pages in the queue.  This results in
864 			 * completely non-deterministic operation and, on a
865 			 * busy system, can lead to extremely non-optimal
866 			 * pageouts.  For example, it can cause clean pages
867 			 * to be freed and dirty pages to be moved to the end
868 			 * of the queue.  Since dirty pages are also moved to
869 			 * the end of the queue once-cleaned, this gives
870 			 * way too large a weighting to defering the freeing
871 			 * of dirty pages.
872 			 *
873 			 * XXX we need to be able to apply a timeout to the
874 			 * vget() lock attempt.
875 			 */
876 
877 			if (object->type == OBJT_VNODE) {
878 				vp = object->handle;
879 
880 				mp = NULL;
881 				mtx_unlock(&vm_mtx);
882 				if (vp->v_type == VREG)
883 					vn_start_write(vp, &mp, V_NOWAIT);
884 				if (vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curproc)) {
885 					vn_finished_write(mp);
886 					mtx_lock(&vm_mtx);
887 					if (object->flags & OBJ_MIGHTBEDIRTY)
888 						vnodes_skipped++;
889 					continue;
890 				}
891 				mtx_lock(&vm_mtx);
892 
893 				/*
894 				 * The page might have been moved to another
895 				 * queue during potential blocking in vget()
896 				 * above.  The page might have been freed and
897 				 * reused for another vnode.  The object might
898 				 * have been reused for another vnode.
899 				 */
900 				if (m->queue != PQ_INACTIVE ||
901 				    m->object != object ||
902 				    object->handle != vp) {
903 					if (object->flags & OBJ_MIGHTBEDIRTY)
904 						vnodes_skipped++;
905 					mtx_unlock(&vm_mtx);
906 					vput(vp);
907 					vn_finished_write(mp);
908 					mtx_lock(&vm_mtx);
909 					continue;
910 				}
911 
912 				/*
913 				 * The page may have been busied during the
914 				 * blocking in vput();  We don't move the
915 				 * page back onto the end of the queue so that
916 				 * statistics are more correct if we don't.
917 				 */
918 				if (m->busy || (m->flags & PG_BUSY)) {
919 					mtx_unlock(&vm_mtx);
920 					vput(vp);
921 					vn_finished_write(mp);
922 					mtx_lock(&vm_mtx);
923 					continue;
924 				}
925 
926 				/*
927 				 * If the page has become held, then skip it
928 				 */
929 				if (m->hold_count) {
930 					s = splvm();
931 					TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
932 					TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
933 					splx(s);
934 					if (object->flags & OBJ_MIGHTBEDIRTY)
935 						vnodes_skipped++;
936 					mtx_unlock(&vm_mtx);
937 					vput(vp);
938 					vn_finished_write(mp);
939 					mtx_lock(&vm_mtx);
940 					continue;
941 				}
942 			}
943 
944 			/*
945 			 * If a page is dirty, then it is either being washed
946 			 * (but not yet cleaned) or it is still in the
947 			 * laundry.  If it is still in the laundry, then we
948 			 * start the cleaning operation.
949 			 *
950 			 * This operation may cluster, invalidating the 'next'
951 			 * pointer.  To prevent an inordinate number of
952 			 * restarts we use our marker to remember our place.
953 			 *
954 			 * decrement page_shortage on success to account for
955 			 * the (future) cleaned page.  Otherwise we could wind
956 			 * up laundering or cleaning too many pages.
957 			 */
958 			s = splvm();
959 			TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq);
960 			splx(s);
961 			if (vm_pageout_clean(m) != 0) {
962 				--page_shortage;
963 				--maxlaunder;
964 			}
965 			s = splvm();
966 			next = TAILQ_NEXT(&marker, pageq);
967 			TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq);
968 			splx(s);
969 			if (vp) {
970 				mtx_unlock(&vm_mtx);
971 				vput(vp);
972 				vn_finished_write(mp);
973 				mtx_lock(&vm_mtx);
974 			}
975 		}
976 	}
977 
978 	/*
979 	 * Compute the number of pages we want to try to move from the
980 	 * active queue to the inactive queue.
981 	 */
982 	page_shortage = vm_paging_target() +
983 		cnt.v_inactive_target - cnt.v_inactive_count;
984 	page_shortage += addl_page_shortage;
985 
986 	/*
987 	 * Scan the active queue for things we can deactivate. We nominally
988 	 * track the per-page activity counter and use it to locate
989 	 * deactivation candidates.
990 	 */
991 
992 	pcount = cnt.v_active_count;
993 	m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl);
994 
995 	while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
996 
997 		/*
998 		 * This is a consistency check, and should likely be a panic
999 		 * or warning.
1000 		 */
1001 		if (m->queue != PQ_ACTIVE) {
1002 			break;
1003 		}
1004 
1005 		next = TAILQ_NEXT(m, pageq);
1006 		/*
1007 		 * Don't deactivate pages that are busy.
1008 		 */
1009 		if ((m->busy != 0) ||
1010 		    (m->flags & PG_BUSY) ||
1011 		    (m->hold_count != 0)) {
1012 			s = splvm();
1013 			TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1014 			TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1015 			splx(s);
1016 			m = next;
1017 			continue;
1018 		}
1019 
1020 		/*
1021 		 * The count for pagedaemon pages is done after checking the
1022 		 * page for eligibility...
1023 		 */
1024 		cnt.v_pdpages++;
1025 
1026 		/*
1027 		 * Check to see "how much" the page has been used.
1028 		 */
1029 		actcount = 0;
1030 		if (m->object->ref_count != 0) {
1031 			if (m->flags & PG_REFERENCED) {
1032 				actcount += 1;
1033 			}
1034 			actcount += pmap_ts_referenced(m);
1035 			if (actcount) {
1036 				m->act_count += ACT_ADVANCE + actcount;
1037 				if (m->act_count > ACT_MAX)
1038 					m->act_count = ACT_MAX;
1039 			}
1040 		}
1041 
1042 		/*
1043 		 * Since we have "tested" this bit, we need to clear it now.
1044 		 */
1045 		vm_page_flag_clear(m, PG_REFERENCED);
1046 
1047 		/*
1048 		 * Only if an object is currently being used, do we use the
1049 		 * page activation count stats.
1050 		 */
1051 		if (actcount && (m->object->ref_count != 0)) {
1052 			s = splvm();
1053 			TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1054 			TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1055 			splx(s);
1056 		} else {
1057 			m->act_count -= min(m->act_count, ACT_DECLINE);
1058 			if (vm_pageout_algorithm ||
1059 			    m->object->ref_count == 0 ||
1060 			    m->act_count == 0) {
1061 				page_shortage--;
1062 				if (m->object->ref_count == 0) {
1063 					vm_page_protect(m, VM_PROT_NONE);
1064 					if (m->dirty == 0)
1065 						vm_page_cache(m);
1066 					else
1067 						vm_page_deactivate(m);
1068 				} else {
1069 					vm_page_deactivate(m);
1070 				}
1071 			} else {
1072 				s = splvm();
1073 				TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1074 				TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1075 				splx(s);
1076 			}
1077 		}
1078 		m = next;
1079 	}
1080 
1081 	s = splvm();
1082 
1083 	/*
1084 	 * We try to maintain some *really* free pages, this allows interrupt
1085 	 * code to be guaranteed space.  Since both cache and free queues
1086 	 * are considered basically 'free', moving pages from cache to free
1087 	 * does not effect other calculations.
1088 	 */
1089 
1090 	while (cnt.v_free_count < cnt.v_free_reserved) {
1091 		static int cache_rover = 0;
1092 		m = vm_page_list_find(PQ_CACHE, cache_rover, FALSE);
1093 		if (!m)
1094 			break;
1095 		if ((m->flags & (PG_BUSY|PG_UNMANAGED)) ||
1096 		    m->busy ||
1097 		    m->hold_count ||
1098 		    m->wire_count) {
1099 #ifdef INVARIANTS
1100 			printf("Warning: busy page %p found in cache\n", m);
1101 #endif
1102 			vm_page_deactivate(m);
1103 			continue;
1104 		}
1105 		cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK;
1106 		vm_pageout_page_free(m);
1107 		cnt.v_dfree++;
1108 	}
1109 	splx(s);
1110 
1111 #if !defined(NO_SWAPPING)
1112 	/*
1113 	 * Idle process swapout -- run once per second.
1114 	 */
1115 	if (vm_swap_idle_enabled) {
1116 		static long lsec;
1117 		if (time_second != lsec) {
1118 			vm_pageout_req_swapout |= VM_SWAP_IDLE;
1119 			vm_req_vmdaemon();
1120 			lsec = time_second;
1121 		}
1122 	}
1123 #endif
1124 
1125 	/*
1126 	 * If we didn't get enough free pages, and we have skipped a vnode
1127 	 * in a writeable object, wakeup the sync daemon.  And kick swapout
1128 	 * if we did not get enough free pages.
1129 	 */
1130 	if (vm_paging_target() > 0) {
1131 		if (vnodes_skipped && vm_page_count_min())
1132 			(void) speedup_syncer();
1133 #if !defined(NO_SWAPPING)
1134 		if (vm_swap_enabled && vm_page_count_target()) {
1135 			vm_req_vmdaemon();
1136 			vm_pageout_req_swapout |= VM_SWAP_NORMAL;
1137 		}
1138 #endif
1139 	}
1140 
1141 	/*
1142 	 * If we are out of swap and were not able to reach our paging
1143 	 * target, kill the largest process.
1144 	 *
1145 	 * We keep the process bigproc locked once we find it to keep anyone
1146 	 * from messing with it; however, there is a possibility of
1147 	 * deadlock if process B is bigproc and one of it's child processes
1148 	 * attempts to propagate a signal to B while we are waiting for A's
1149 	 * lock while walking this list.  To avoid this, we don't block on
1150 	 * the process lock but just skip a process if it is already locked.
1151 	 */
1152 	if ((vm_swap_size < 64 && vm_page_count_min()) ||
1153 	    (swap_pager_full && vm_paging_target() > 0)) {
1154 #if 0
1155 	if ((vm_swap_size < 64 || swap_pager_full) && vm_page_count_min()) {
1156 #endif
1157 		mtx_unlock(&vm_mtx);
1158 		bigproc = NULL;
1159 		bigsize = 0;
1160 		sx_slock(&allproc_lock);
1161 		mtx_lock(&vm_mtx);
1162 		LIST_FOREACH(p, &allproc, p_list) {
1163 			/*
1164 			 * If this process is already locked, skip it.
1165 			 */
1166 			if (PROC_TRYLOCK(p) == 0)
1167 				continue;
1168 			/*
1169 			 * if this is a system process, skip it
1170 			 */
1171 			if ((p->p_flag & P_SYSTEM) || (p->p_lock > 0) ||
1172 			    (p->p_pid == 1) ||
1173 			    ((p->p_pid < 48) && (vm_swap_size != 0))) {
1174 				PROC_UNLOCK(p);
1175 				continue;
1176 			}
1177 			/*
1178 			 * if the process is in a non-running type state,
1179 			 * don't touch it.
1180 			 */
1181 			mtx_lock_spin(&sched_lock);
1182 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
1183 				mtx_unlock_spin(&sched_lock);
1184 				PROC_UNLOCK(p);
1185 				continue;
1186 			}
1187 			mtx_unlock_spin(&sched_lock);
1188 			/*
1189 			 * get the process size
1190 			 */
1191 			size = vmspace_resident_count(p->p_vmspace) +
1192 				vmspace_swap_count(p->p_vmspace);
1193 			/*
1194 			 * if the this process is bigger than the biggest one
1195 			 * remember it.
1196 			 */
1197 			if (size > bigsize) {
1198 				if (bigproc != NULL)
1199 					PROC_UNLOCK(bigproc);
1200 				bigproc = p;
1201 				bigsize = size;
1202 			} else
1203 				PROC_UNLOCK(p);
1204 		}
1205 		sx_sunlock(&allproc_lock);
1206 		if (bigproc != NULL) {
1207 			killproc(bigproc, "out of swap space");
1208 			mtx_lock_spin(&sched_lock);
1209 			bigproc->p_estcpu = 0;
1210 			bigproc->p_nice = PRIO_MIN;
1211 			resetpriority(bigproc);
1212 			mtx_unlock_spin(&sched_lock);
1213 			PROC_UNLOCK(bigproc);
1214 			wakeup(&cnt.v_free_count);
1215 		}
1216 	}
1217 }
1218 
1219 /*
1220  * This routine tries to maintain the pseudo LRU active queue,
1221  * so that during long periods of time where there is no paging,
1222  * that some statistic accumulation still occurs.  This code
1223  * helps the situation where paging just starts to occur.
1224  */
1225 static void
1226 vm_pageout_page_stats()
1227 {
1228 	int s;
1229 	vm_page_t m,next;
1230 	int pcount,tpcount;		/* Number of pages to check */
1231 	static int fullintervalcount = 0;
1232 	int page_shortage;
1233 	int s0;
1234 
1235 	page_shortage =
1236 	    (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) -
1237 	    (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
1238 
1239 	if (page_shortage <= 0)
1240 		return;
1241 
1242 	s0 = splvm();
1243 
1244 	pcount = cnt.v_active_count;
1245 	fullintervalcount += vm_pageout_stats_interval;
1246 	if (fullintervalcount < vm_pageout_full_stats_interval) {
1247 		tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count;
1248 		if (pcount > tpcount)
1249 			pcount = tpcount;
1250 	} else {
1251 		fullintervalcount = 0;
1252 	}
1253 
1254 	m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl);
1255 	while ((m != NULL) && (pcount-- > 0)) {
1256 		int actcount;
1257 
1258 		if (m->queue != PQ_ACTIVE) {
1259 			break;
1260 		}
1261 
1262 		next = TAILQ_NEXT(m, pageq);
1263 		/*
1264 		 * Don't deactivate pages that are busy.
1265 		 */
1266 		if ((m->busy != 0) ||
1267 		    (m->flags & PG_BUSY) ||
1268 		    (m->hold_count != 0)) {
1269 			s = splvm();
1270 			TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1271 			TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1272 			splx(s);
1273 			m = next;
1274 			continue;
1275 		}
1276 
1277 		actcount = 0;
1278 		if (m->flags & PG_REFERENCED) {
1279 			vm_page_flag_clear(m, PG_REFERENCED);
1280 			actcount += 1;
1281 		}
1282 
1283 		actcount += pmap_ts_referenced(m);
1284 		if (actcount) {
1285 			m->act_count += ACT_ADVANCE + actcount;
1286 			if (m->act_count > ACT_MAX)
1287 				m->act_count = ACT_MAX;
1288 			s = splvm();
1289 			TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1290 			TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1291 			splx(s);
1292 		} else {
1293 			if (m->act_count == 0) {
1294 				/*
1295 				 * We turn off page access, so that we have
1296 				 * more accurate RSS stats.  We don't do this
1297 				 * in the normal page deactivation when the
1298 				 * system is loaded VM wise, because the
1299 				 * cost of the large number of page protect
1300 				 * operations would be higher than the value
1301 				 * of doing the operation.
1302 				 */
1303 				vm_page_protect(m, VM_PROT_NONE);
1304 				vm_page_deactivate(m);
1305 			} else {
1306 				m->act_count -= min(m->act_count, ACT_DECLINE);
1307 				s = splvm();
1308 				TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1309 				TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1310 				splx(s);
1311 			}
1312 		}
1313 
1314 		m = next;
1315 	}
1316 	splx(s0);
1317 }
1318 
1319 static int
1320 vm_pageout_free_page_calc(count)
1321 vm_size_t count;
1322 {
1323 	if (count < cnt.v_page_count)
1324 		 return 0;
1325 	/*
1326 	 * free_reserved needs to include enough for the largest swap pager
1327 	 * structures plus enough for any pv_entry structs when paging.
1328 	 */
1329 	if (cnt.v_page_count > 1024)
1330 		cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
1331 	else
1332 		cnt.v_free_min = 4;
1333 	cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
1334 		cnt.v_interrupt_free_min;
1335 	cnt.v_free_reserved = vm_pageout_page_count +
1336 		cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE;
1337 	cnt.v_free_severe = cnt.v_free_min / 2;
1338 	cnt.v_free_min += cnt.v_free_reserved;
1339 	cnt.v_free_severe += cnt.v_free_reserved;
1340 	return 1;
1341 }
1342 
1343 
1344 /*
1345  *	vm_pageout is the high level pageout daemon.
1346  */
1347 static void
1348 vm_pageout()
1349 {
1350 	int pass;
1351 
1352 	mtx_lock(&Giant);
1353 	mtx_lock(&vm_mtx);
1354 
1355 	/*
1356 	 * Initialize some paging parameters.
1357 	 */
1358 
1359 	cnt.v_interrupt_free_min = 2;
1360 	if (cnt.v_page_count < 2000)
1361 		vm_pageout_page_count = 8;
1362 
1363 	vm_pageout_free_page_calc(cnt.v_page_count);
1364 	/*
1365 	 * v_free_target and v_cache_min control pageout hysteresis.  Note
1366 	 * that these are more a measure of the VM cache queue hysteresis
1367 	 * then the VM free queue.  Specifically, v_free_target is the
1368 	 * high water mark (free+cache pages).
1369 	 *
1370 	 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the
1371 	 * low water mark, while v_free_min is the stop.  v_cache_min must
1372 	 * be big enough to handle memory needs while the pageout daemon
1373 	 * is signalled and run to free more pages.
1374 	 */
1375 	if (cnt.v_free_count > 6144)
1376 		cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved;
1377 	else
1378 		cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved;
1379 
1380 	if (cnt.v_free_count > 2048) {
1381 		cnt.v_cache_min = cnt.v_free_target;
1382 		cnt.v_cache_max = 2 * cnt.v_cache_min;
1383 		cnt.v_inactive_target = (3 * cnt.v_free_target) / 2;
1384 	} else {
1385 		cnt.v_cache_min = 0;
1386 		cnt.v_cache_max = 0;
1387 		cnt.v_inactive_target = cnt.v_free_count / 4;
1388 	}
1389 	if (cnt.v_inactive_target > cnt.v_free_count / 3)
1390 		cnt.v_inactive_target = cnt.v_free_count / 3;
1391 
1392 	/* XXX does not really belong here */
1393 	if (vm_page_max_wired == 0)
1394 		vm_page_max_wired = cnt.v_free_count / 3;
1395 
1396 	if (vm_pageout_stats_max == 0)
1397 		vm_pageout_stats_max = cnt.v_free_target;
1398 
1399 	/*
1400 	 * Set interval in seconds for stats scan.
1401 	 */
1402 	if (vm_pageout_stats_interval == 0)
1403 		vm_pageout_stats_interval = 5;
1404 	if (vm_pageout_full_stats_interval == 0)
1405 		vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1406 
1407 
1408 	/*
1409 	 * Set maximum free per pass
1410 	 */
1411 	if (vm_pageout_stats_free_max == 0)
1412 		vm_pageout_stats_free_max = 5;
1413 	mtx_unlock(&vm_mtx);
1414 
1415 	PROC_LOCK(curproc);
1416 	curproc->p_flag |= P_BUFEXHAUST;
1417 	PROC_UNLOCK(curproc);
1418 	swap_pager_swap_init();
1419 	pass = 0;
1420 	/*
1421 	 * The pageout daemon is never done, so loop forever.
1422 	 */
1423 	mtx_lock(&vm_mtx);
1424 	while (TRUE) {
1425 		int error;
1426 		int s = splvm();
1427 
1428 		/*
1429 		 * If we have enough free memory, wakeup waiters.  Do
1430 		 * not clear vm_pages_needed until we reach our target,
1431 		 * otherwise we may be woken up over and over again and
1432 		 * waste a lot of cpu.
1433 		 */
1434 		if (vm_pages_needed && !vm_page_count_min()) {
1435 			if (vm_paging_needed() <= 0)
1436 				vm_pages_needed = 0;
1437 			wakeup(&cnt.v_free_count);
1438 		}
1439 		if (vm_pages_needed) {
1440 			/*
1441 			 * Still not done, take a second pass without waiting
1442 			 * (unlimited dirty cleaning), otherwise sleep a bit
1443 			 * and try again.
1444 			 */
1445 			++pass;
1446 			if (pass > 1)
1447 				msleep(&vm_pages_needed, &vm_mtx, PVM,
1448 				       "psleep", hz/2);
1449 		} else {
1450 			/*
1451 			 * Good enough, sleep & handle stats.  Prime the pass
1452 			 * for the next run.
1453 			 */
1454 			if (pass > 1)
1455 				pass = 1;
1456 			else
1457 				pass = 0;
1458 			error = msleep(&vm_pages_needed, &vm_mtx,
1459 				PVM, "psleep", vm_pageout_stats_interval * hz);
1460 			if (error && !vm_pages_needed) {
1461 				splx(s);
1462 				pass = 0;
1463 				vm_pageout_page_stats();
1464 				continue;
1465 			}
1466 		}
1467 
1468 		if (vm_pages_needed)
1469 			cnt.v_pdwakeups++;
1470 		splx(s);
1471 		vm_pageout_scan(pass);
1472 		vm_pageout_deficit = 0;
1473 	}
1474 }
1475 
1476 void
1477 pagedaemon_wakeup()
1478 {
1479 	if (!vm_pages_needed && curproc != pageproc) {
1480 		vm_pages_needed++;
1481 		wakeup(&vm_pages_needed);
1482 	}
1483 }
1484 
1485 #if !defined(NO_SWAPPING)
1486 static void
1487 vm_req_vmdaemon()
1488 {
1489 	static int lastrun = 0;
1490 
1491 	if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
1492 		wakeup(&vm_daemon_needed);
1493 		lastrun = ticks;
1494 	}
1495 }
1496 
1497 static void
1498 vm_daemon()
1499 {
1500 	struct proc *p;
1501 
1502 	mtx_lock(&Giant);
1503 	while (TRUE) {
1504 		mtx_lock(&vm_mtx);
1505 		msleep(&vm_daemon_needed, &vm_mtx, PPAUSE, "psleep", 0);
1506 		if (vm_pageout_req_swapout) {
1507 			swapout_procs(vm_pageout_req_swapout);
1508 			mtx_assert(&vm_mtx, MA_OWNED);
1509 			vm_pageout_req_swapout = 0;
1510 		}
1511 		mtx_unlock(&vm_mtx);
1512 		/*
1513 		 * scan the processes for exceeding their rlimits or if
1514 		 * process is swapped out -- deactivate pages
1515 		 */
1516 
1517 		sx_slock(&allproc_lock);
1518 		LIST_FOREACH(p, &allproc, p_list) {
1519 			vm_pindex_t limit, size;
1520 
1521 			/*
1522 			 * if this is a system process or if we have already
1523 			 * looked at this process, skip it.
1524 			 */
1525 			if (p->p_flag & (P_SYSTEM | P_WEXIT)) {
1526 				continue;
1527 			}
1528 			mtx_lock(&vm_mtx);
1529 			/*
1530 			 * if the process is in a non-running type state,
1531 			 * don't touch it.
1532 			 */
1533 			mtx_lock_spin(&sched_lock);
1534 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
1535 				mtx_unlock_spin(&sched_lock);
1536 				mtx_unlock(&vm_mtx);
1537 				continue;
1538 			}
1539 			/*
1540 			 * get a limit
1541 			 */
1542 			limit = OFF_TO_IDX(
1543 			    qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
1544 				p->p_rlimit[RLIMIT_RSS].rlim_max));
1545 
1546 			/*
1547 			 * let processes that are swapped out really be
1548 			 * swapped out set the limit to nothing (will force a
1549 			 * swap-out.)
1550 			 */
1551 			if ((p->p_sflag & PS_INMEM) == 0)
1552 				limit = 0;	/* XXX */
1553 			mtx_unlock_spin(&sched_lock);
1554 
1555 			size = vmspace_resident_count(p->p_vmspace);
1556 			if (limit >= 0 && size >= limit) {
1557 				vm_pageout_map_deactivate_pages(
1558 				    &p->p_vmspace->vm_map, limit);
1559 			}
1560 			mtx_unlock(&vm_mtx);
1561 		}
1562 		sx_sunlock(&allproc_lock);
1563 	}
1564 }
1565 #endif
1566