xref: /freebsd/sys/vm/vm_pageout.c (revision f9218d3d4fd34f082473b3a021c6d4d109fb47cf)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
41  *
42  *
43  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
44  * All rights reserved.
45  *
46  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
47  *
48  * Permission to use, copy, modify and distribute this software and
49  * its documentation is hereby granted, provided that both the copyright
50  * notice and this permission notice appear in all copies of the
51  * software, derivative works or modified versions, and any portions
52  * thereof, and that both notices appear in supporting documentation.
53  *
54  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57  *
58  * Carnegie Mellon requests users of this software to return to
59  *
60  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61  *  School of Computer Science
62  *  Carnegie Mellon University
63  *  Pittsburgh PA 15213-3890
64  *
65  * any improvements or extensions that they make and grant Carnegie the
66  * rights to redistribute these changes.
67  *
68  * $FreeBSD$
69  */
70 
71 /*
72  *	The proverbial page-out daemon.
73  */
74 
75 #include "opt_vm.h"
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/kernel.h>
79 #include <sys/eventhandler.h>
80 #include <sys/lock.h>
81 #include <sys/mutex.h>
82 #include <sys/proc.h>
83 #include <sys/kthread.h>
84 #include <sys/ktr.h>
85 #include <sys/resourcevar.h>
86 #include <sys/sched.h>
87 #include <sys/signalvar.h>
88 #include <sys/vnode.h>
89 #include <sys/vmmeter.h>
90 #include <sys/sx.h>
91 #include <sys/sysctl.h>
92 
93 #include <vm/vm.h>
94 #include <vm/vm_param.h>
95 #include <vm/vm_object.h>
96 #include <vm/vm_page.h>
97 #include <vm/vm_map.h>
98 #include <vm/vm_pageout.h>
99 #include <vm/vm_pager.h>
100 #include <vm/swap_pager.h>
101 #include <vm/vm_extern.h>
102 #include <vm/uma.h>
103 
104 #include <machine/mutex.h>
105 
106 /*
107  * System initialization
108  */
109 
110 /* the kernel process "vm_pageout"*/
111 static void vm_pageout(void);
112 static int vm_pageout_clean(vm_page_t);
113 static void vm_pageout_page_free(vm_page_t);
114 static void vm_pageout_pmap_collect(void);
115 static void vm_pageout_scan(int pass);
116 static int vm_pageout_free_page_calc(vm_size_t count);
117 struct proc *pageproc;
118 
119 static struct kproc_desc page_kp = {
120 	"pagedaemon",
121 	vm_pageout,
122 	&pageproc
123 };
124 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp)
125 
126 #if !defined(NO_SWAPPING)
127 /* the kernel process "vm_daemon"*/
128 static void vm_daemon(void);
129 static struct	proc *vmproc;
130 
131 static struct kproc_desc vm_kp = {
132 	"vmdaemon",
133 	vm_daemon,
134 	&vmproc
135 };
136 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
137 #endif
138 
139 
140 int vm_pages_needed;		/* Event on which pageout daemon sleeps */
141 int vm_pageout_deficit;		/* Estimated number of pages deficit */
142 int vm_pageout_pages_needed;	/* flag saying that the pageout daemon needs pages */
143 
144 #if !defined(NO_SWAPPING)
145 static int vm_pageout_req_swapout;	/* XXX */
146 static int vm_daemon_needed;
147 #endif
148 static int vm_max_launder = 32;
149 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
150 static int vm_pageout_full_stats_interval = 0;
151 static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0;
152 static int defer_swap_pageouts=0;
153 static int disable_swap_pageouts=0;
154 
155 #if defined(NO_SWAPPING)
156 static int vm_swap_enabled=0;
157 static int vm_swap_idle_enabled=0;
158 #else
159 static int vm_swap_enabled=1;
160 static int vm_swap_idle_enabled=0;
161 #endif
162 
163 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
164 	CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt");
165 
166 SYSCTL_INT(_vm, OID_AUTO, max_launder,
167 	CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
168 
169 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
170 	CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
171 
172 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
173 	CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
174 
175 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
176 	CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
177 
178 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
179 	CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
180 
181 #if defined(NO_SWAPPING)
182 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
183 	CTLFLAG_RD, &vm_swap_enabled, 0, "");
184 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
185 	CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
186 #else
187 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
188 	CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
189 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
190 	CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
191 #endif
192 
193 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
194 	CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
195 
196 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
197 	CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
198 
199 static int pageout_lock_miss;
200 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
201 	CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
202 
203 #define VM_PAGEOUT_PAGE_COUNT 16
204 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
205 
206 int vm_page_max_wired;		/* XXX max # of wired pages system-wide */
207 
208 #if !defined(NO_SWAPPING)
209 typedef void freeer_fcn_t(vm_map_t, vm_object_t, vm_pindex_t, int);
210 static void vm_pageout_map_deactivate_pages(vm_map_t, vm_pindex_t);
211 static freeer_fcn_t vm_pageout_object_deactivate_pages;
212 static void vm_req_vmdaemon(void);
213 #endif
214 static void vm_pageout_page_stats(void);
215 
216 /*
217  * vm_pageout_clean:
218  *
219  * Clean the page and remove it from the laundry.
220  *
221  * We set the busy bit to cause potential page faults on this page to
222  * block.  Note the careful timing, however, the busy bit isn't set till
223  * late and we cannot do anything that will mess with the page.
224  */
225 static int
226 vm_pageout_clean(m)
227 	vm_page_t m;
228 {
229 	vm_object_t object;
230 	vm_page_t mc[2*vm_pageout_page_count];
231 	int pageout_count;
232 	int ib, is, page_base;
233 	vm_pindex_t pindex = m->pindex;
234 
235 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
236 
237 	object = m->object;
238 
239 	/*
240 	 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
241 	 * with the new swapper, but we could have serious problems paging
242 	 * out other object types if there is insufficient memory.
243 	 *
244 	 * Unfortunately, checking free memory here is far too late, so the
245 	 * check has been moved up a procedural level.
246 	 */
247 
248 	/*
249 	 * Don't mess with the page if it's busy, held, or special
250 	 */
251 	if ((m->hold_count != 0) ||
252 	    ((m->busy != 0) || (m->flags & (PG_BUSY|PG_UNMANAGED)))) {
253 		return 0;
254 	}
255 
256 	mc[vm_pageout_page_count] = m;
257 	pageout_count = 1;
258 	page_base = vm_pageout_page_count;
259 	ib = 1;
260 	is = 1;
261 
262 	/*
263 	 * Scan object for clusterable pages.
264 	 *
265 	 * We can cluster ONLY if: ->> the page is NOT
266 	 * clean, wired, busy, held, or mapped into a
267 	 * buffer, and one of the following:
268 	 * 1) The page is inactive, or a seldom used
269 	 *    active page.
270 	 * -or-
271 	 * 2) we force the issue.
272 	 *
273 	 * During heavy mmap/modification loads the pageout
274 	 * daemon can really fragment the underlying file
275 	 * due to flushing pages out of order and not trying
276 	 * align the clusters (which leave sporatic out-of-order
277 	 * holes).  To solve this problem we do the reverse scan
278 	 * first and attempt to align our cluster, then do a
279 	 * forward scan if room remains.
280 	 */
281 more:
282 	while (ib && pageout_count < vm_pageout_page_count) {
283 		vm_page_t p;
284 
285 		if (ib > pindex) {
286 			ib = 0;
287 			break;
288 		}
289 
290 		if ((p = vm_page_lookup(object, pindex - ib)) == NULL) {
291 			ib = 0;
292 			break;
293 		}
294 		if (((p->queue - p->pc) == PQ_CACHE) ||
295 		    (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) {
296 			ib = 0;
297 			break;
298 		}
299 		vm_page_test_dirty(p);
300 		if ((p->dirty & p->valid) == 0 ||
301 		    p->queue != PQ_INACTIVE ||
302 		    p->wire_count != 0 ||	/* may be held by buf cache */
303 		    p->hold_count != 0) {	/* may be undergoing I/O */
304 			ib = 0;
305 			break;
306 		}
307 		mc[--page_base] = p;
308 		++pageout_count;
309 		++ib;
310 		/*
311 		 * alignment boundry, stop here and switch directions.  Do
312 		 * not clear ib.
313 		 */
314 		if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
315 			break;
316 	}
317 
318 	while (pageout_count < vm_pageout_page_count &&
319 	    pindex + is < object->size) {
320 		vm_page_t p;
321 
322 		if ((p = vm_page_lookup(object, pindex + is)) == NULL)
323 			break;
324 		if (((p->queue - p->pc) == PQ_CACHE) ||
325 		    (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) {
326 			break;
327 		}
328 		vm_page_test_dirty(p);
329 		if ((p->dirty & p->valid) == 0 ||
330 		    p->queue != PQ_INACTIVE ||
331 		    p->wire_count != 0 ||	/* may be held by buf cache */
332 		    p->hold_count != 0) {	/* may be undergoing I/O */
333 			break;
334 		}
335 		mc[page_base + pageout_count] = p;
336 		++pageout_count;
337 		++is;
338 	}
339 
340 	/*
341 	 * If we exhausted our forward scan, continue with the reverse scan
342 	 * when possible, even past a page boundry.  This catches boundry
343 	 * conditions.
344 	 */
345 	if (ib && pageout_count < vm_pageout_page_count)
346 		goto more;
347 
348 	/*
349 	 * we allow reads during pageouts...
350 	 */
351 	return vm_pageout_flush(&mc[page_base], pageout_count, 0);
352 }
353 
354 /*
355  * vm_pageout_flush() - launder the given pages
356  *
357  *	The given pages are laundered.  Note that we setup for the start of
358  *	I/O ( i.e. busy the page ), mark it read-only, and bump the object
359  *	reference count all in here rather then in the parent.  If we want
360  *	the parent to do more sophisticated things we may have to change
361  *	the ordering.
362  */
363 int
364 vm_pageout_flush(mc, count, flags)
365 	vm_page_t *mc;
366 	int count;
367 	int flags;
368 {
369 	vm_object_t object;
370 	int pageout_status[count];
371 	int numpagedout = 0;
372 	int i;
373 
374 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
375 	/*
376 	 * Initiate I/O.  Bump the vm_page_t->busy counter and
377 	 * mark the pages read-only.
378 	 *
379 	 * We do not have to fixup the clean/dirty bits here... we can
380 	 * allow the pager to do it after the I/O completes.
381 	 *
382 	 * NOTE! mc[i]->dirty may be partial or fragmented due to an
383 	 * edge case with file fragments.
384 	 */
385 	for (i = 0; i < count; i++) {
386 		KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, ("vm_pageout_flush page %p index %d/%d: partially invalid page", mc[i], i, count));
387 		vm_page_io_start(mc[i]);
388 		pmap_page_protect(mc[i], VM_PROT_READ);
389 	}
390 	object = mc[0]->object;
391 	vm_page_unlock_queues();
392 	vm_object_pip_add(object, count);
393 
394 	vm_pager_put_pages(object, mc, count,
395 	    (flags | ((object == kernel_object) ? VM_PAGER_PUT_SYNC : 0)),
396 	    pageout_status);
397 
398 	vm_page_lock_queues();
399 	for (i = 0; i < count; i++) {
400 		vm_page_t mt = mc[i];
401 
402 		switch (pageout_status[i]) {
403 		case VM_PAGER_OK:
404 			numpagedout++;
405 			break;
406 		case VM_PAGER_PEND:
407 			numpagedout++;
408 			break;
409 		case VM_PAGER_BAD:
410 			/*
411 			 * Page outside of range of object. Right now we
412 			 * essentially lose the changes by pretending it
413 			 * worked.
414 			 */
415 			pmap_clear_modify(mt);
416 			vm_page_undirty(mt);
417 			break;
418 		case VM_PAGER_ERROR:
419 		case VM_PAGER_FAIL:
420 			/*
421 			 * If page couldn't be paged out, then reactivate the
422 			 * page so it doesn't clog the inactive list.  (We
423 			 * will try paging out it again later).
424 			 */
425 			vm_page_activate(mt);
426 			break;
427 		case VM_PAGER_AGAIN:
428 			break;
429 		}
430 
431 		/*
432 		 * If the operation is still going, leave the page busy to
433 		 * block all other accesses. Also, leave the paging in
434 		 * progress indicator set so that we don't attempt an object
435 		 * collapse.
436 		 */
437 		if (pageout_status[i] != VM_PAGER_PEND) {
438 			vm_object_pip_wakeup(object);
439 			vm_page_io_finish(mt);
440 			if (!vm_page_count_severe() || !vm_page_try_to_cache(mt))
441 				pmap_page_protect(mt, VM_PROT_READ);
442 		}
443 	}
444 	return numpagedout;
445 }
446 
447 #if !defined(NO_SWAPPING)
448 /*
449  *	vm_pageout_object_deactivate_pages
450  *
451  *	deactivate enough pages to satisfy the inactive target
452  *	requirements or if vm_page_proc_limit is set, then
453  *	deactivate all of the pages in the object and its
454  *	backing_objects.
455  *
456  *	The object and map must be locked.
457  */
458 static void
459 vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
460 	vm_map_t map;
461 	vm_object_t object;
462 	vm_pindex_t desired;
463 	int map_remove_only;
464 {
465 	vm_page_t p, next;
466 	int actcount, rcount, remove_mode;
467 
468 	GIANT_REQUIRED;
469 	if (object->type == OBJT_DEVICE || object->type == OBJT_PHYS)
470 		return;
471 
472 	while (object) {
473 		if (pmap_resident_count(vm_map_pmap(map)) <= desired)
474 			return;
475 		if (object->paging_in_progress)
476 			return;
477 
478 		remove_mode = map_remove_only;
479 		if (object->shadow_count > 1)
480 			remove_mode = 1;
481 		/*
482 		 * scan the objects entire memory queue
483 		 */
484 		rcount = object->resident_page_count;
485 		p = TAILQ_FIRST(&object->memq);
486 		vm_page_lock_queues();
487 		while (p && (rcount-- > 0)) {
488 			if (pmap_resident_count(map->pmap) <= desired) {
489 				vm_page_unlock_queues();
490 				return;
491 			}
492 			next = TAILQ_NEXT(p, listq);
493 			cnt.v_pdpages++;
494 			if (p->wire_count != 0 ||
495 			    p->hold_count != 0 ||
496 			    p->busy != 0 ||
497 			    (p->flags & (PG_BUSY|PG_UNMANAGED)) ||
498 			    !pmap_page_exists_quick(vm_map_pmap(map), p)) {
499 				p = next;
500 				continue;
501 			}
502 			actcount = pmap_ts_referenced(p);
503 			if (actcount) {
504 				vm_page_flag_set(p, PG_REFERENCED);
505 			} else if (p->flags & PG_REFERENCED) {
506 				actcount = 1;
507 			}
508 			if ((p->queue != PQ_ACTIVE) &&
509 				(p->flags & PG_REFERENCED)) {
510 				vm_page_activate(p);
511 				p->act_count += actcount;
512 				vm_page_flag_clear(p, PG_REFERENCED);
513 			} else if (p->queue == PQ_ACTIVE) {
514 				if ((p->flags & PG_REFERENCED) == 0) {
515 					p->act_count -= min(p->act_count, ACT_DECLINE);
516 					if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) {
517 						pmap_remove_all(p);
518 						vm_page_deactivate(p);
519 					} else {
520 						vm_pageq_requeue(p);
521 					}
522 				} else {
523 					vm_page_activate(p);
524 					vm_page_flag_clear(p, PG_REFERENCED);
525 					if (p->act_count < (ACT_MAX - ACT_ADVANCE))
526 						p->act_count += ACT_ADVANCE;
527 					vm_pageq_requeue(p);
528 				}
529 			} else if (p->queue == PQ_INACTIVE) {
530 				pmap_remove_all(p);
531 			}
532 			p = next;
533 		}
534 		vm_page_unlock_queues();
535 		object = object->backing_object;
536 	}
537 }
538 
539 /*
540  * deactivate some number of pages in a map, try to do it fairly, but
541  * that is really hard to do.
542  */
543 static void
544 vm_pageout_map_deactivate_pages(map, desired)
545 	vm_map_t map;
546 	vm_pindex_t desired;
547 {
548 	vm_map_entry_t tmpe;
549 	vm_object_t obj, bigobj;
550 	int nothingwired;
551 
552 	GIANT_REQUIRED;
553 	if (!vm_map_trylock(map))
554 		return;
555 
556 	bigobj = NULL;
557 	nothingwired = TRUE;
558 
559 	/*
560 	 * first, search out the biggest object, and try to free pages from
561 	 * that.
562 	 */
563 	tmpe = map->header.next;
564 	while (tmpe != &map->header) {
565 		if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
566 			obj = tmpe->object.vm_object;
567 			if ((obj != NULL) && (obj->shadow_count <= 1) &&
568 				((bigobj == NULL) ||
569 				 (bigobj->resident_page_count < obj->resident_page_count))) {
570 				bigobj = obj;
571 			}
572 		}
573 		if (tmpe->wired_count > 0)
574 			nothingwired = FALSE;
575 		tmpe = tmpe->next;
576 	}
577 
578 	if (bigobj)
579 		vm_pageout_object_deactivate_pages(map, bigobj, desired, 0);
580 
581 	/*
582 	 * Next, hunt around for other pages to deactivate.  We actually
583 	 * do this search sort of wrong -- .text first is not the best idea.
584 	 */
585 	tmpe = map->header.next;
586 	while (tmpe != &map->header) {
587 		if (pmap_resident_count(vm_map_pmap(map)) <= desired)
588 			break;
589 		if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
590 			obj = tmpe->object.vm_object;
591 			if (obj)
592 				vm_pageout_object_deactivate_pages(map, obj, desired, 0);
593 		}
594 		tmpe = tmpe->next;
595 	}
596 
597 	/*
598 	 * Remove all mappings if a process is swapped out, this will free page
599 	 * table pages.
600 	 */
601 	if (desired == 0 && nothingwired) {
602 		vm_page_lock_queues();
603 		pmap_remove(vm_map_pmap(map), vm_map_min(map),
604 		    vm_map_max(map));
605 		vm_page_unlock_queues();
606 	}
607 	vm_map_unlock(map);
608 }
609 #endif		/* !defined(NO_SWAPPING) */
610 
611 /*
612  * Don't try to be fancy - being fancy can lead to VOP_LOCK's and therefore
613  * to vnode deadlocks.  We only do it for OBJT_DEFAULT and OBJT_SWAP objects
614  * which we know can be trivially freed.
615  */
616 static void
617 vm_pageout_page_free(vm_page_t m)
618 {
619 	vm_object_t object = m->object;
620 	int type = object->type;
621 
622 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
623 	if (type == OBJT_SWAP || type == OBJT_DEFAULT)
624 		vm_object_reference(object);
625 	vm_page_busy(m);
626 	pmap_remove_all(m);
627 	vm_page_free(m);
628 	cnt.v_dfree++;
629 	if (type == OBJT_SWAP || type == OBJT_DEFAULT)
630 		vm_object_deallocate(object);
631 }
632 
633 /*
634  * This routine is very drastic, but can save the system
635  * in a pinch.
636  */
637 static void
638 vm_pageout_pmap_collect(void)
639 {
640 	int i;
641 	vm_page_t m;
642 	static int warningdone;
643 
644 	if (pmap_pagedaemon_waken == 0)
645 		return;
646 	if (warningdone < 5) {
647 		printf("collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n");
648 		warningdone++;
649 	}
650 	vm_page_lock_queues();
651 	for (i = 0; i < vm_page_array_size; i++) {
652 		m = &vm_page_array[i];
653 		if (m->wire_count || m->hold_count || m->busy ||
654 		    (m->flags & (PG_BUSY | PG_UNMANAGED)))
655 			continue;
656 		pmap_remove_all(m);
657 	}
658 	vm_page_unlock_queues();
659 	pmap_pagedaemon_waken = 0;
660 }
661 
662 /*
663  *	vm_pageout_scan does the dirty work for the pageout daemon.
664  */
665 static void
666 vm_pageout_scan(int pass)
667 {
668 	vm_page_t m, next;
669 	struct vm_page marker;
670 	int save_page_shortage;
671 	int save_inactive_count;
672 	int page_shortage, maxscan, pcount;
673 	int addl_page_shortage, addl_page_shortage_init;
674 	struct proc *p, *bigproc;
675 	vm_offset_t size, bigsize;
676 	vm_object_t object;
677 	int actcount;
678 	int vnodes_skipped = 0;
679 	int maxlaunder;
680 	int s;
681 	struct thread *td;
682 
683 	GIANT_REQUIRED;
684 	/*
685 	 * Decrease registered cache sizes.
686 	 */
687 	EVENTHANDLER_INVOKE(vm_lowmem, 0);
688 	/*
689 	 * We do this explicitly after the caches have been drained above.
690 	 */
691 	uma_reclaim();
692 	/*
693 	 * Do whatever cleanup that the pmap code can.
694 	 */
695 	vm_pageout_pmap_collect();
696 
697 	addl_page_shortage_init = atomic_readandclear_int(&vm_pageout_deficit);
698 
699 	/*
700 	 * Calculate the number of pages we want to either free or move
701 	 * to the cache.
702 	 */
703 	page_shortage = vm_paging_target() + addl_page_shortage_init;
704 	save_page_shortage = page_shortage;
705 	save_inactive_count = cnt.v_inactive_count;
706 
707 	/*
708 	 * Initialize our marker
709 	 */
710 	bzero(&marker, sizeof(marker));
711 	marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
712 	marker.queue = PQ_INACTIVE;
713 	marker.wire_count = 1;
714 
715 	/*
716 	 * Start scanning the inactive queue for pages we can move to the
717 	 * cache or free.  The scan will stop when the target is reached or
718 	 * we have scanned the entire inactive queue.  Note that m->act_count
719 	 * is not used to form decisions for the inactive queue, only for the
720 	 * active queue.
721 	 *
722 	 * maxlaunder limits the number of dirty pages we flush per scan.
723 	 * For most systems a smaller value (16 or 32) is more robust under
724 	 * extreme memory and disk pressure because any unnecessary writes
725 	 * to disk can result in extreme performance degredation.  However,
726 	 * systems with excessive dirty pages (especially when MAP_NOSYNC is
727 	 * used) will die horribly with limited laundering.  If the pageout
728 	 * daemon cannot clean enough pages in the first pass, we let it go
729 	 * all out in succeeding passes.
730 	 */
731 	if ((maxlaunder = vm_max_launder) <= 1)
732 		maxlaunder = 1;
733 	if (pass)
734 		maxlaunder = 10000;
735 rescan0:
736 	addl_page_shortage = addl_page_shortage_init;
737 	maxscan = cnt.v_inactive_count;
738 
739 	for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl);
740 	     m != NULL && maxscan-- > 0 && page_shortage > 0;
741 	     m = next) {
742 
743 		cnt.v_pdpages++;
744 
745 		if (m->queue != PQ_INACTIVE) {
746 			goto rescan0;
747 		}
748 
749 		next = TAILQ_NEXT(m, pageq);
750 
751 		/*
752 		 * skip marker pages
753 		 */
754 		if (m->flags & PG_MARKER)
755 			continue;
756 
757 		/*
758 		 * A held page may be undergoing I/O, so skip it.
759 		 */
760 		if (m->hold_count) {
761 			vm_pageq_requeue(m);
762 			addl_page_shortage++;
763 			continue;
764 		}
765 		/*
766 		 * Don't mess with busy pages, keep in the front of the
767 		 * queue, most likely are being paged out.
768 		 */
769 		if (m->busy || (m->flags & PG_BUSY)) {
770 			addl_page_shortage++;
771 			continue;
772 		}
773 
774 		vm_page_lock_queues();
775 		/*
776 		 * If the object is not being used, we ignore previous
777 		 * references.
778 		 */
779 		if (m->object->ref_count == 0) {
780 			vm_page_flag_clear(m, PG_REFERENCED);
781 			pmap_clear_reference(m);
782 
783 		/*
784 		 * Otherwise, if the page has been referenced while in the
785 		 * inactive queue, we bump the "activation count" upwards,
786 		 * making it less likely that the page will be added back to
787 		 * the inactive queue prematurely again.  Here we check the
788 		 * page tables (or emulated bits, if any), given the upper
789 		 * level VM system not knowing anything about existing
790 		 * references.
791 		 */
792 		} else if (((m->flags & PG_REFERENCED) == 0) &&
793 			(actcount = pmap_ts_referenced(m))) {
794 			vm_page_activate(m);
795 			vm_page_unlock_queues();
796 			m->act_count += (actcount + ACT_ADVANCE);
797 			continue;
798 		}
799 
800 		/*
801 		 * If the upper level VM system knows about any page
802 		 * references, we activate the page.  We also set the
803 		 * "activation count" higher than normal so that we will less
804 		 * likely place pages back onto the inactive queue again.
805 		 */
806 		if ((m->flags & PG_REFERENCED) != 0) {
807 			vm_page_flag_clear(m, PG_REFERENCED);
808 			actcount = pmap_ts_referenced(m);
809 			vm_page_activate(m);
810 			vm_page_unlock_queues();
811 			m->act_count += (actcount + ACT_ADVANCE + 1);
812 			continue;
813 		}
814 
815 		/*
816 		 * If the upper level VM system doesn't know anything about
817 		 * the page being dirty, we have to check for it again.  As
818 		 * far as the VM code knows, any partially dirty pages are
819 		 * fully dirty.
820 		 */
821 		if (m->dirty == 0) {
822 			vm_page_test_dirty(m);
823 		} else {
824 			vm_page_dirty(m);
825 		}
826 		vm_page_unlock_queues();
827 
828 		/*
829 		 * Invalid pages can be easily freed
830 		 */
831 		if (m->valid == 0) {
832 			vm_page_lock_queues();
833 			vm_pageout_page_free(m);
834 			vm_page_unlock_queues();
835 			--page_shortage;
836 
837 		/*
838 		 * Clean pages can be placed onto the cache queue.  This
839 		 * effectively frees them.
840 		 */
841 		} else if (m->dirty == 0) {
842 			vm_page_lock_queues();
843 			vm_page_cache(m);
844 			vm_page_unlock_queues();
845 			--page_shortage;
846 		} else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) {
847 			/*
848 			 * Dirty pages need to be paged out, but flushing
849 			 * a page is extremely expensive verses freeing
850 			 * a clean page.  Rather then artificially limiting
851 			 * the number of pages we can flush, we instead give
852 			 * dirty pages extra priority on the inactive queue
853 			 * by forcing them to be cycled through the queue
854 			 * twice before being flushed, after which the
855 			 * (now clean) page will cycle through once more
856 			 * before being freed.  This significantly extends
857 			 * the thrash point for a heavily loaded machine.
858 			 */
859 			vm_page_lock_queues();
860 			vm_page_flag_set(m, PG_WINATCFLS);
861 			vm_pageq_requeue(m);
862 			vm_page_unlock_queues();
863 		} else if (maxlaunder > 0) {
864 			/*
865 			 * We always want to try to flush some dirty pages if
866 			 * we encounter them, to keep the system stable.
867 			 * Normally this number is small, but under extreme
868 			 * pressure where there are insufficient clean pages
869 			 * on the inactive queue, we may have to go all out.
870 			 */
871 			int swap_pageouts_ok;
872 			struct vnode *vp = NULL;
873 			struct mount *mp;
874 
875 			object = m->object;
876 
877 			if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) {
878 				swap_pageouts_ok = 1;
879 			} else {
880 				swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
881 				swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
882 				vm_page_count_min());
883 
884 			}
885 
886 			/*
887 			 * We don't bother paging objects that are "dead".
888 			 * Those objects are in a "rundown" state.
889 			 */
890 			if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
891 				vm_pageq_requeue(m);
892 				continue;
893 			}
894 
895 			/*
896 			 * The object is already known NOT to be dead.   It
897 			 * is possible for the vget() to block the whole
898 			 * pageout daemon, but the new low-memory handling
899 			 * code should prevent it.
900 			 *
901 			 * The previous code skipped locked vnodes and, worse,
902 			 * reordered pages in the queue.  This results in
903 			 * completely non-deterministic operation and, on a
904 			 * busy system, can lead to extremely non-optimal
905 			 * pageouts.  For example, it can cause clean pages
906 			 * to be freed and dirty pages to be moved to the end
907 			 * of the queue.  Since dirty pages are also moved to
908 			 * the end of the queue once-cleaned, this gives
909 			 * way too large a weighting to defering the freeing
910 			 * of dirty pages.
911 			 *
912 			 * We can't wait forever for the vnode lock, we might
913 			 * deadlock due to a vn_read() getting stuck in
914 			 * vm_wait while holding this vnode.  We skip the
915 			 * vnode if we can't get it in a reasonable amount
916 			 * of time.
917 			 */
918 			if (object->type == OBJT_VNODE) {
919 				vp = object->handle;
920 
921 				mp = NULL;
922 				if (vp->v_type == VREG)
923 					vn_start_write(vp, &mp, V_NOWAIT);
924 				if (vget(vp, LK_EXCLUSIVE|LK_TIMELOCK, curthread)) {
925 					++pageout_lock_miss;
926 					vn_finished_write(mp);
927 					if (object->flags & OBJ_MIGHTBEDIRTY)
928 						vnodes_skipped++;
929 					continue;
930 				}
931 
932 				/*
933 				 * The page might have been moved to another
934 				 * queue during potential blocking in vget()
935 				 * above.  The page might have been freed and
936 				 * reused for another vnode.  The object might
937 				 * have been reused for another vnode.
938 				 */
939 				if (m->queue != PQ_INACTIVE ||
940 				    m->object != object ||
941 				    object->handle != vp) {
942 					if (object->flags & OBJ_MIGHTBEDIRTY)
943 						vnodes_skipped++;
944 					vput(vp);
945 					vn_finished_write(mp);
946 					continue;
947 				}
948 
949 				/*
950 				 * The page may have been busied during the
951 				 * blocking in vput();  We don't move the
952 				 * page back onto the end of the queue so that
953 				 * statistics are more correct if we don't.
954 				 */
955 				if (m->busy || (m->flags & PG_BUSY)) {
956 					vput(vp);
957 					vn_finished_write(mp);
958 					continue;
959 				}
960 
961 				/*
962 				 * If the page has become held it might
963 				 * be undergoing I/O, so skip it
964 				 */
965 				if (m->hold_count) {
966 					vm_pageq_requeue(m);
967 					if (object->flags & OBJ_MIGHTBEDIRTY)
968 						vnodes_skipped++;
969 					vput(vp);
970 					vn_finished_write(mp);
971 					continue;
972 				}
973 			}
974 
975 			/*
976 			 * If a page is dirty, then it is either being washed
977 			 * (but not yet cleaned) or it is still in the
978 			 * laundry.  If it is still in the laundry, then we
979 			 * start the cleaning operation.
980 			 *
981 			 * This operation may cluster, invalidating the 'next'
982 			 * pointer.  To prevent an inordinate number of
983 			 * restarts we use our marker to remember our place.
984 			 *
985 			 * decrement page_shortage on success to account for
986 			 * the (future) cleaned page.  Otherwise we could wind
987 			 * up laundering or cleaning too many pages.
988 			 */
989 			vm_page_lock_queues();
990 			s = splvm();
991 			TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq);
992 			splx(s);
993 			if (vm_pageout_clean(m) != 0) {
994 				--page_shortage;
995 				--maxlaunder;
996 			}
997 			s = splvm();
998 			next = TAILQ_NEXT(&marker, pageq);
999 			TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq);
1000 			splx(s);
1001 			vm_page_unlock_queues();
1002 			if (vp) {
1003 				vput(vp);
1004 				vn_finished_write(mp);
1005 			}
1006 		}
1007 	}
1008 
1009 	/*
1010 	 * Compute the number of pages we want to try to move from the
1011 	 * active queue to the inactive queue.
1012 	 */
1013 	page_shortage = vm_paging_target() +
1014 		cnt.v_inactive_target - cnt.v_inactive_count;
1015 	page_shortage += addl_page_shortage;
1016 
1017 	vm_page_lock_queues();
1018 	/*
1019 	 * Scan the active queue for things we can deactivate. We nominally
1020 	 * track the per-page activity counter and use it to locate
1021 	 * deactivation candidates.
1022 	 */
1023 	pcount = cnt.v_active_count;
1024 	m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl);
1025 
1026 	while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
1027 
1028 		/*
1029 		 * This is a consistency check, and should likely be a panic
1030 		 * or warning.
1031 		 */
1032 		if (m->queue != PQ_ACTIVE) {
1033 			break;
1034 		}
1035 
1036 		next = TAILQ_NEXT(m, pageq);
1037 		/*
1038 		 * Don't deactivate pages that are busy.
1039 		 */
1040 		if ((m->busy != 0) ||
1041 		    (m->flags & PG_BUSY) ||
1042 		    (m->hold_count != 0)) {
1043 			vm_pageq_requeue(m);
1044 			m = next;
1045 			continue;
1046 		}
1047 
1048 		/*
1049 		 * The count for pagedaemon pages is done after checking the
1050 		 * page for eligibility...
1051 		 */
1052 		cnt.v_pdpages++;
1053 
1054 		/*
1055 		 * Check to see "how much" the page has been used.
1056 		 */
1057 		actcount = 0;
1058 		if (m->object->ref_count != 0) {
1059 			if (m->flags & PG_REFERENCED) {
1060 				actcount += 1;
1061 			}
1062 			actcount += pmap_ts_referenced(m);
1063 			if (actcount) {
1064 				m->act_count += ACT_ADVANCE + actcount;
1065 				if (m->act_count > ACT_MAX)
1066 					m->act_count = ACT_MAX;
1067 			}
1068 		}
1069 
1070 		/*
1071 		 * Since we have "tested" this bit, we need to clear it now.
1072 		 */
1073 		vm_page_flag_clear(m, PG_REFERENCED);
1074 
1075 		/*
1076 		 * Only if an object is currently being used, do we use the
1077 		 * page activation count stats.
1078 		 */
1079 		if (actcount && (m->object->ref_count != 0)) {
1080 			vm_pageq_requeue(m);
1081 		} else {
1082 			m->act_count -= min(m->act_count, ACT_DECLINE);
1083 			if (vm_pageout_algorithm ||
1084 			    m->object->ref_count == 0 ||
1085 			    m->act_count == 0) {
1086 				page_shortage--;
1087 				if (m->object->ref_count == 0) {
1088 					pmap_remove_all(m);
1089 					if (m->dirty == 0)
1090 						vm_page_cache(m);
1091 					else
1092 						vm_page_deactivate(m);
1093 				} else {
1094 					vm_page_deactivate(m);
1095 				}
1096 			} else {
1097 				vm_pageq_requeue(m);
1098 			}
1099 		}
1100 		m = next;
1101 	}
1102 	s = splvm();
1103 
1104 	/*
1105 	 * We try to maintain some *really* free pages, this allows interrupt
1106 	 * code to be guaranteed space.  Since both cache and free queues
1107 	 * are considered basically 'free', moving pages from cache to free
1108 	 * does not effect other calculations.
1109 	 */
1110 	while (cnt.v_free_count < cnt.v_free_reserved) {
1111 		static int cache_rover = 0;
1112 		m = vm_pageq_find(PQ_CACHE, cache_rover, FALSE);
1113 		if (!m)
1114 			break;
1115 		if ((m->flags & (PG_BUSY|PG_UNMANAGED)) ||
1116 		    m->busy ||
1117 		    m->hold_count ||
1118 		    m->wire_count) {
1119 #ifdef INVARIANTS
1120 			printf("Warning: busy page %p found in cache\n", m);
1121 #endif
1122 			vm_page_deactivate(m);
1123 			continue;
1124 		}
1125 		cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK;
1126 		vm_pageout_page_free(m);
1127 	}
1128 	splx(s);
1129 	vm_page_unlock_queues();
1130 #if !defined(NO_SWAPPING)
1131 	/*
1132 	 * Idle process swapout -- run once per second.
1133 	 */
1134 	if (vm_swap_idle_enabled) {
1135 		static long lsec;
1136 		if (time_second != lsec) {
1137 			vm_pageout_req_swapout |= VM_SWAP_IDLE;
1138 			vm_req_vmdaemon();
1139 			lsec = time_second;
1140 		}
1141 	}
1142 #endif
1143 
1144 	/*
1145 	 * If we didn't get enough free pages, and we have skipped a vnode
1146 	 * in a writeable object, wakeup the sync daemon.  And kick swapout
1147 	 * if we did not get enough free pages.
1148 	 */
1149 	if (vm_paging_target() > 0) {
1150 		if (vnodes_skipped && vm_page_count_min())
1151 			(void) speedup_syncer();
1152 #if !defined(NO_SWAPPING)
1153 		if (vm_swap_enabled && vm_page_count_target()) {
1154 			vm_req_vmdaemon();
1155 			vm_pageout_req_swapout |= VM_SWAP_NORMAL;
1156 		}
1157 #endif
1158 	}
1159 
1160 	/*
1161 	 * If we are out of swap and were not able to reach our paging
1162 	 * target, kill the largest process.
1163 	 *
1164 	 * We keep the process bigproc locked once we find it to keep anyone
1165 	 * from messing with it; however, there is a possibility of
1166 	 * deadlock if process B is bigproc and one of it's child processes
1167 	 * attempts to propagate a signal to B while we are waiting for A's
1168 	 * lock while walking this list.  To avoid this, we don't block on
1169 	 * the process lock but just skip a process if it is already locked.
1170 	 */
1171 	if ((vm_swap_size < 64 && vm_page_count_min()) ||
1172 	    (swap_pager_full && vm_paging_target() > 0)) {
1173 #if 0
1174 	if ((vm_swap_size < 64 || swap_pager_full) && vm_page_count_min()) {
1175 #endif
1176 		bigproc = NULL;
1177 		bigsize = 0;
1178 		sx_slock(&allproc_lock);
1179 		FOREACH_PROC_IN_SYSTEM(p) {
1180 			int breakout;
1181 			/*
1182 			 * If this process is already locked, skip it.
1183 			 */
1184 			if (PROC_TRYLOCK(p) == 0)
1185 				continue;
1186 			/*
1187 			 * if this is a system process, skip it
1188 			 */
1189 			if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) ||
1190 			    ((p->p_pid < 48) && (vm_swap_size != 0))) {
1191 				PROC_UNLOCK(p);
1192 				continue;
1193 			}
1194 			/*
1195 			 * if the process is in a non-running type state,
1196 			 * don't touch it. Check all the threads individually.
1197 			 */
1198 			mtx_lock_spin(&sched_lock);
1199 			breakout = 0;
1200 			FOREACH_THREAD_IN_PROC(p, td) {
1201 				if (!TD_ON_RUNQ(td) &&
1202 				    !TD_IS_RUNNING(td) &&
1203 				    !TD_IS_SLEEPING(td)) {
1204 					breakout = 1;
1205 					break;
1206 				}
1207 			}
1208 			if (breakout) {
1209 				mtx_unlock_spin(&sched_lock);
1210 				PROC_UNLOCK(p);
1211 				continue;
1212 			}
1213 			mtx_unlock_spin(&sched_lock);
1214 			/*
1215 			 * get the process size
1216 			 */
1217 			size = vmspace_resident_count(p->p_vmspace) +
1218 				vmspace_swap_count(p->p_vmspace);
1219 			/*
1220 			 * if the this process is bigger than the biggest one
1221 			 * remember it.
1222 			 */
1223 			if (size > bigsize) {
1224 				if (bigproc != NULL)
1225 					PROC_UNLOCK(bigproc);
1226 				bigproc = p;
1227 				bigsize = size;
1228 			} else
1229 				PROC_UNLOCK(p);
1230 		}
1231 		sx_sunlock(&allproc_lock);
1232 		if (bigproc != NULL) {
1233 			struct ksegrp *kg;
1234 			killproc(bigproc, "out of swap space");
1235 			mtx_lock_spin(&sched_lock);
1236 			FOREACH_KSEGRP_IN_PROC(bigproc, kg) {
1237 				sched_nice(kg, PRIO_MIN); /* XXXKSE ??? */
1238 			}
1239 			mtx_unlock_spin(&sched_lock);
1240 			PROC_UNLOCK(bigproc);
1241 			wakeup(&cnt.v_free_count);
1242 		}
1243 	}
1244 }
1245 
1246 /*
1247  * This routine tries to maintain the pseudo LRU active queue,
1248  * so that during long periods of time where there is no paging,
1249  * that some statistic accumulation still occurs.  This code
1250  * helps the situation where paging just starts to occur.
1251  */
1252 static void
1253 vm_pageout_page_stats()
1254 {
1255 	vm_page_t m,next;
1256 	int pcount,tpcount;		/* Number of pages to check */
1257 	static int fullintervalcount = 0;
1258 	int page_shortage;
1259 	int s0;
1260 
1261 	page_shortage =
1262 	    (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) -
1263 	    (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
1264 
1265 	if (page_shortage <= 0)
1266 		return;
1267 
1268 	s0 = splvm();
1269 	vm_page_lock_queues();
1270 	pcount = cnt.v_active_count;
1271 	fullintervalcount += vm_pageout_stats_interval;
1272 	if (fullintervalcount < vm_pageout_full_stats_interval) {
1273 		tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count;
1274 		if (pcount > tpcount)
1275 			pcount = tpcount;
1276 	} else {
1277 		fullintervalcount = 0;
1278 	}
1279 
1280 	m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl);
1281 	while ((m != NULL) && (pcount-- > 0)) {
1282 		int actcount;
1283 
1284 		if (m->queue != PQ_ACTIVE) {
1285 			break;
1286 		}
1287 
1288 		next = TAILQ_NEXT(m, pageq);
1289 		/*
1290 		 * Don't deactivate pages that are busy.
1291 		 */
1292 		if ((m->busy != 0) ||
1293 		    (m->flags & PG_BUSY) ||
1294 		    (m->hold_count != 0)) {
1295 			vm_pageq_requeue(m);
1296 			m = next;
1297 			continue;
1298 		}
1299 
1300 		actcount = 0;
1301 		if (m->flags & PG_REFERENCED) {
1302 			vm_page_flag_clear(m, PG_REFERENCED);
1303 			actcount += 1;
1304 		}
1305 
1306 		actcount += pmap_ts_referenced(m);
1307 		if (actcount) {
1308 			m->act_count += ACT_ADVANCE + actcount;
1309 			if (m->act_count > ACT_MAX)
1310 				m->act_count = ACT_MAX;
1311 			vm_pageq_requeue(m);
1312 		} else {
1313 			if (m->act_count == 0) {
1314 				/*
1315 				 * We turn off page access, so that we have
1316 				 * more accurate RSS stats.  We don't do this
1317 				 * in the normal page deactivation when the
1318 				 * system is loaded VM wise, because the
1319 				 * cost of the large number of page protect
1320 				 * operations would be higher than the value
1321 				 * of doing the operation.
1322 				 */
1323 				pmap_remove_all(m);
1324 				vm_page_deactivate(m);
1325 			} else {
1326 				m->act_count -= min(m->act_count, ACT_DECLINE);
1327 				vm_pageq_requeue(m);
1328 			}
1329 		}
1330 
1331 		m = next;
1332 	}
1333 	vm_page_unlock_queues();
1334 	splx(s0);
1335 }
1336 
1337 static int
1338 vm_pageout_free_page_calc(count)
1339 vm_size_t count;
1340 {
1341 	if (count < cnt.v_page_count)
1342 		 return 0;
1343 	/*
1344 	 * free_reserved needs to include enough for the largest swap pager
1345 	 * structures plus enough for any pv_entry structs when paging.
1346 	 */
1347 	if (cnt.v_page_count > 1024)
1348 		cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
1349 	else
1350 		cnt.v_free_min = 4;
1351 	cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
1352 		cnt.v_interrupt_free_min;
1353 	cnt.v_free_reserved = vm_pageout_page_count +
1354 		cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE;
1355 	cnt.v_free_severe = cnt.v_free_min / 2;
1356 	cnt.v_free_min += cnt.v_free_reserved;
1357 	cnt.v_free_severe += cnt.v_free_reserved;
1358 	return 1;
1359 }
1360 
1361 /*
1362  *	vm_pageout is the high level pageout daemon.
1363  */
1364 static void
1365 vm_pageout()
1366 {
1367 	int error, pass, s;
1368 
1369 	mtx_lock(&Giant);
1370 
1371 	/*
1372 	 * Initialize some paging parameters.
1373 	 */
1374 	cnt.v_interrupt_free_min = 2;
1375 	if (cnt.v_page_count < 2000)
1376 		vm_pageout_page_count = 8;
1377 
1378 	vm_pageout_free_page_calc(cnt.v_page_count);
1379 	/*
1380 	 * v_free_target and v_cache_min control pageout hysteresis.  Note
1381 	 * that these are more a measure of the VM cache queue hysteresis
1382 	 * then the VM free queue.  Specifically, v_free_target is the
1383 	 * high water mark (free+cache pages).
1384 	 *
1385 	 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the
1386 	 * low water mark, while v_free_min is the stop.  v_cache_min must
1387 	 * be big enough to handle memory needs while the pageout daemon
1388 	 * is signalled and run to free more pages.
1389 	 */
1390 	if (cnt.v_free_count > 6144)
1391 		cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved;
1392 	else
1393 		cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved;
1394 
1395 	if (cnt.v_free_count > 2048) {
1396 		cnt.v_cache_min = cnt.v_free_target;
1397 		cnt.v_cache_max = 2 * cnt.v_cache_min;
1398 		cnt.v_inactive_target = (3 * cnt.v_free_target) / 2;
1399 	} else {
1400 		cnt.v_cache_min = 0;
1401 		cnt.v_cache_max = 0;
1402 		cnt.v_inactive_target = cnt.v_free_count / 4;
1403 	}
1404 	if (cnt.v_inactive_target > cnt.v_free_count / 3)
1405 		cnt.v_inactive_target = cnt.v_free_count / 3;
1406 
1407 	/* XXX does not really belong here */
1408 	if (vm_page_max_wired == 0)
1409 		vm_page_max_wired = cnt.v_free_count / 3;
1410 
1411 	if (vm_pageout_stats_max == 0)
1412 		vm_pageout_stats_max = cnt.v_free_target;
1413 
1414 	/*
1415 	 * Set interval in seconds for stats scan.
1416 	 */
1417 	if (vm_pageout_stats_interval == 0)
1418 		vm_pageout_stats_interval = 5;
1419 	if (vm_pageout_full_stats_interval == 0)
1420 		vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1421 
1422 	/*
1423 	 * Set maximum free per pass
1424 	 */
1425 	if (vm_pageout_stats_free_max == 0)
1426 		vm_pageout_stats_free_max = 5;
1427 
1428 	swap_pager_swap_init();
1429 	pass = 0;
1430 	/*
1431 	 * The pageout daemon is never done, so loop forever.
1432 	 */
1433 	while (TRUE) {
1434 		s = splvm();
1435 		vm_page_lock_queues();
1436 		/*
1437 		 * If we have enough free memory, wakeup waiters.  Do
1438 		 * not clear vm_pages_needed until we reach our target,
1439 		 * otherwise we may be woken up over and over again and
1440 		 * waste a lot of cpu.
1441 		 */
1442 		if (vm_pages_needed && !vm_page_count_min()) {
1443 			if (!vm_paging_needed())
1444 				vm_pages_needed = 0;
1445 			wakeup(&cnt.v_free_count);
1446 		}
1447 		if (vm_pages_needed) {
1448 			/*
1449 			 * Still not done, take a second pass without waiting
1450 			 * (unlimited dirty cleaning), otherwise sleep a bit
1451 			 * and try again.
1452 			 */
1453 			++pass;
1454 			if (pass > 1)
1455 				msleep(&vm_pages_needed, &vm_page_queue_mtx, PVM,
1456 				       "psleep", hz/2);
1457 		} else {
1458 			/*
1459 			 * Good enough, sleep & handle stats.  Prime the pass
1460 			 * for the next run.
1461 			 */
1462 			if (pass > 1)
1463 				pass = 1;
1464 			else
1465 				pass = 0;
1466 			error = msleep(&vm_pages_needed, &vm_page_queue_mtx, PVM,
1467 				    "psleep", vm_pageout_stats_interval * hz);
1468 			if (error && !vm_pages_needed) {
1469 				vm_page_unlock_queues();
1470 				splx(s);
1471 				pass = 0;
1472 				vm_pageout_page_stats();
1473 				continue;
1474 			}
1475 		}
1476 		if (vm_pages_needed)
1477 			cnt.v_pdwakeups++;
1478 		vm_page_unlock_queues();
1479 		splx(s);
1480 		vm_pageout_scan(pass);
1481 	}
1482 }
1483 
1484 /*
1485  * Unless the page queue lock is held by the caller, this function
1486  * should be regarded as advisory.  Specifically, the caller should
1487  * not msleep() on &cnt.v_free_count following this function unless
1488  * the page queue lock is held until the msleep() is performed.
1489  */
1490 void
1491 pagedaemon_wakeup()
1492 {
1493 
1494 	if (!vm_pages_needed && curthread->td_proc != pageproc) {
1495 		vm_pages_needed = 1;
1496 		wakeup(&vm_pages_needed);
1497 	}
1498 }
1499 
1500 #if !defined(NO_SWAPPING)
1501 static void
1502 vm_req_vmdaemon()
1503 {
1504 	static int lastrun = 0;
1505 
1506 	if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
1507 		wakeup(&vm_daemon_needed);
1508 		lastrun = ticks;
1509 	}
1510 }
1511 
1512 static void
1513 vm_daemon()
1514 {
1515 	struct proc *p;
1516 	int breakout;
1517 	struct thread *td;
1518 
1519 	mtx_lock(&Giant);
1520 	while (TRUE) {
1521 		tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0);
1522 		if (vm_pageout_req_swapout) {
1523 			swapout_procs(vm_pageout_req_swapout);
1524 			vm_pageout_req_swapout = 0;
1525 		}
1526 		/*
1527 		 * scan the processes for exceeding their rlimits or if
1528 		 * process is swapped out -- deactivate pages
1529 		 */
1530 		sx_slock(&allproc_lock);
1531 		LIST_FOREACH(p, &allproc, p_list) {
1532 			vm_pindex_t limit, size;
1533 
1534 			/*
1535 			 * if this is a system process or if we have already
1536 			 * looked at this process, skip it.
1537 			 */
1538 			if (p->p_flag & (P_SYSTEM | P_WEXIT)) {
1539 				continue;
1540 			}
1541 			/*
1542 			 * if the process is in a non-running type state,
1543 			 * don't touch it.
1544 			 */
1545 			mtx_lock_spin(&sched_lock);
1546 			breakout = 0;
1547 			FOREACH_THREAD_IN_PROC(p, td) {
1548 				if (!TD_ON_RUNQ(td) &&
1549 				    !TD_IS_RUNNING(td) &&
1550 				    !TD_IS_SLEEPING(td)) {
1551 					breakout = 1;
1552 					break;
1553 				}
1554 			}
1555 			if (breakout) {
1556 				mtx_unlock_spin(&sched_lock);
1557 				continue;
1558 			}
1559 			/*
1560 			 * get a limit
1561 			 */
1562 			limit = OFF_TO_IDX(
1563 			    qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
1564 				p->p_rlimit[RLIMIT_RSS].rlim_max));
1565 
1566 			/*
1567 			 * let processes that are swapped out really be
1568 			 * swapped out set the limit to nothing (will force a
1569 			 * swap-out.)
1570 			 */
1571 			if ((p->p_sflag & PS_INMEM) == 0)
1572 				limit = 0;	/* XXX */
1573 			mtx_unlock_spin(&sched_lock);
1574 
1575 			size = vmspace_resident_count(p->p_vmspace);
1576 			if (limit >= 0 && size >= limit) {
1577 				vm_pageout_map_deactivate_pages(
1578 				    &p->p_vmspace->vm_map, limit);
1579 			}
1580 		}
1581 		sx_sunlock(&allproc_lock);
1582 	}
1583 }
1584 #endif			/* !defined(NO_SWAPPING) */
1585