xref: /freebsd/sys/vm/vm_pageout.c (revision a3e8fd0b7f663db7eafff527d5c3ca3bcfa8a537)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
41  *
42  *
43  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
44  * All rights reserved.
45  *
46  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
47  *
48  * Permission to use, copy, modify and distribute this software and
49  * its documentation is hereby granted, provided that both the copyright
50  * notice and this permission notice appear in all copies of the
51  * software, derivative works or modified versions, and any portions
52  * thereof, and that both notices appear in supporting documentation.
53  *
54  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57  *
58  * Carnegie Mellon requests users of this software to return to
59  *
60  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61  *  School of Computer Science
62  *  Carnegie Mellon University
63  *  Pittsburgh PA 15213-3890
64  *
65  * any improvements or extensions that they make and grant Carnegie the
66  * rights to redistribute these changes.
67  *
68  * $FreeBSD$
69  */
70 
71 /*
72  *	The proverbial page-out daemon.
73  */
74 
75 #include "opt_vm.h"
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/kernel.h>
79 #include <sys/lock.h>
80 #include <sys/mutex.h>
81 #include <sys/proc.h>
82 #include <sys/kthread.h>
83 #include <sys/ktr.h>
84 #include <sys/resourcevar.h>
85 #include <sys/sched.h>
86 #include <sys/signalvar.h>
87 #include <sys/vnode.h>
88 #include <sys/vmmeter.h>
89 #include <sys/sx.h>
90 #include <sys/sysctl.h>
91 
92 #include <vm/vm.h>
93 #include <vm/vm_param.h>
94 #include <vm/vm_object.h>
95 #include <vm/vm_page.h>
96 #include <vm/vm_map.h>
97 #include <vm/vm_pageout.h>
98 #include <vm/vm_pager.h>
99 #include <vm/swap_pager.h>
100 #include <vm/vm_extern.h>
101 #include <vm/uma.h>
102 
103 #include <machine/mutex.h>
104 
105 /*
106  * System initialization
107  */
108 
109 /* the kernel process "vm_pageout"*/
110 static void vm_pageout(void);
111 static int vm_pageout_clean(vm_page_t);
112 static void vm_pageout_scan(int pass);
113 static int vm_pageout_free_page_calc(vm_size_t count);
114 struct proc *pageproc;
115 
116 static struct kproc_desc page_kp = {
117 	"pagedaemon",
118 	vm_pageout,
119 	&pageproc
120 };
121 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp)
122 
123 #if !defined(NO_SWAPPING)
124 /* the kernel process "vm_daemon"*/
125 static void vm_daemon(void);
126 static struct	proc *vmproc;
127 
128 static struct kproc_desc vm_kp = {
129 	"vmdaemon",
130 	vm_daemon,
131 	&vmproc
132 };
133 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
134 #endif
135 
136 
137 int vm_pages_needed=0;		/* Event on which pageout daemon sleeps */
138 int vm_pageout_deficit=0;	/* Estimated number of pages deficit */
139 int vm_pageout_pages_needed=0;	/* flag saying that the pageout daemon needs pages */
140 
141 #if !defined(NO_SWAPPING)
142 static int vm_pageout_req_swapout;	/* XXX */
143 static int vm_daemon_needed;
144 #endif
145 extern int vm_swap_size;
146 static int vm_max_launder = 32;
147 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
148 static int vm_pageout_full_stats_interval = 0;
149 static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0;
150 static int defer_swap_pageouts=0;
151 static int disable_swap_pageouts=0;
152 
153 #if defined(NO_SWAPPING)
154 static int vm_swap_enabled=0;
155 static int vm_swap_idle_enabled=0;
156 #else
157 static int vm_swap_enabled=1;
158 static int vm_swap_idle_enabled=0;
159 #endif
160 
161 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
162 	CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt");
163 
164 SYSCTL_INT(_vm, OID_AUTO, max_launder,
165 	CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
166 
167 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
168 	CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
169 
170 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
171 	CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
172 
173 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
174 	CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
175 
176 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
177 	CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
178 
179 #if defined(NO_SWAPPING)
180 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
181 	CTLFLAG_RD, &vm_swap_enabled, 0, "");
182 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
183 	CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
184 #else
185 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
186 	CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
187 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
188 	CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
189 #endif
190 
191 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
192 	CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
193 
194 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
195 	CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
196 
197 static int pageout_lock_miss;
198 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
199 	CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
200 
201 #define VM_PAGEOUT_PAGE_COUNT 16
202 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
203 
204 int vm_page_max_wired;		/* XXX max # of wired pages system-wide */
205 
206 #if !defined(NO_SWAPPING)
207 typedef void freeer_fcn_t(vm_map_t, vm_object_t, vm_pindex_t, int);
208 static void vm_pageout_map_deactivate_pages(vm_map_t, vm_pindex_t);
209 static freeer_fcn_t vm_pageout_object_deactivate_pages;
210 static void vm_req_vmdaemon(void);
211 #endif
212 static void vm_pageout_page_stats(void);
213 
214 /*
215  * vm_pageout_clean:
216  *
217  * Clean the page and remove it from the laundry.
218  *
219  * We set the busy bit to cause potential page faults on this page to
220  * block.  Note the careful timing, however, the busy bit isn't set till
221  * late and we cannot do anything that will mess with the page.
222  */
223 static int
224 vm_pageout_clean(m)
225 	vm_page_t m;
226 {
227 	vm_object_t object;
228 	vm_page_t mc[2*vm_pageout_page_count];
229 	int pageout_count;
230 	int ib, is, page_base;
231 	vm_pindex_t pindex = m->pindex;
232 
233 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
234 
235 	object = m->object;
236 
237 	/*
238 	 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
239 	 * with the new swapper, but we could have serious problems paging
240 	 * out other object types if there is insufficient memory.
241 	 *
242 	 * Unfortunately, checking free memory here is far too late, so the
243 	 * check has been moved up a procedural level.
244 	 */
245 
246 	/*
247 	 * Don't mess with the page if it's busy, held, or special
248 	 */
249 	if ((m->hold_count != 0) ||
250 	    ((m->busy != 0) || (m->flags & (PG_BUSY|PG_UNMANAGED)))) {
251 		return 0;
252 	}
253 
254 	mc[vm_pageout_page_count] = m;
255 	pageout_count = 1;
256 	page_base = vm_pageout_page_count;
257 	ib = 1;
258 	is = 1;
259 
260 	/*
261 	 * Scan object for clusterable pages.
262 	 *
263 	 * We can cluster ONLY if: ->> the page is NOT
264 	 * clean, wired, busy, held, or mapped into a
265 	 * buffer, and one of the following:
266 	 * 1) The page is inactive, or a seldom used
267 	 *    active page.
268 	 * -or-
269 	 * 2) we force the issue.
270 	 *
271 	 * During heavy mmap/modification loads the pageout
272 	 * daemon can really fragment the underlying file
273 	 * due to flushing pages out of order and not trying
274 	 * align the clusters (which leave sporatic out-of-order
275 	 * holes).  To solve this problem we do the reverse scan
276 	 * first and attempt to align our cluster, then do a
277 	 * forward scan if room remains.
278 	 */
279 more:
280 	while (ib && pageout_count < vm_pageout_page_count) {
281 		vm_page_t p;
282 
283 		if (ib > pindex) {
284 			ib = 0;
285 			break;
286 		}
287 
288 		if ((p = vm_page_lookup(object, pindex - ib)) == NULL) {
289 			ib = 0;
290 			break;
291 		}
292 		if (((p->queue - p->pc) == PQ_CACHE) ||
293 		    (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) {
294 			ib = 0;
295 			break;
296 		}
297 		vm_page_test_dirty(p);
298 		if ((p->dirty & p->valid) == 0 ||
299 		    p->queue != PQ_INACTIVE ||
300 		    p->wire_count != 0 ||	/* may be held by buf cache */
301 		    p->hold_count != 0) {	/* may be undergoing I/O */
302 			ib = 0;
303 			break;
304 		}
305 		mc[--page_base] = p;
306 		++pageout_count;
307 		++ib;
308 		/*
309 		 * alignment boundry, stop here and switch directions.  Do
310 		 * not clear ib.
311 		 */
312 		if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
313 			break;
314 	}
315 
316 	while (pageout_count < vm_pageout_page_count &&
317 	    pindex + is < object->size) {
318 		vm_page_t p;
319 
320 		if ((p = vm_page_lookup(object, pindex + is)) == NULL)
321 			break;
322 		if (((p->queue - p->pc) == PQ_CACHE) ||
323 		    (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) {
324 			break;
325 		}
326 		vm_page_test_dirty(p);
327 		if ((p->dirty & p->valid) == 0 ||
328 		    p->queue != PQ_INACTIVE ||
329 		    p->wire_count != 0 ||	/* may be held by buf cache */
330 		    p->hold_count != 0) {	/* may be undergoing I/O */
331 			break;
332 		}
333 		mc[page_base + pageout_count] = p;
334 		++pageout_count;
335 		++is;
336 	}
337 
338 	/*
339 	 * If we exhausted our forward scan, continue with the reverse scan
340 	 * when possible, even past a page boundry.  This catches boundry
341 	 * conditions.
342 	 */
343 	if (ib && pageout_count < vm_pageout_page_count)
344 		goto more;
345 
346 	/*
347 	 * we allow reads during pageouts...
348 	 */
349 	return vm_pageout_flush(&mc[page_base], pageout_count, 0);
350 }
351 
352 /*
353  * vm_pageout_flush() - launder the given pages
354  *
355  *	The given pages are laundered.  Note that we setup for the start of
356  *	I/O ( i.e. busy the page ), mark it read-only, and bump the object
357  *	reference count all in here rather then in the parent.  If we want
358  *	the parent to do more sophisticated things we may have to change
359  *	the ordering.
360  */
361 int
362 vm_pageout_flush(mc, count, flags)
363 	vm_page_t *mc;
364 	int count;
365 	int flags;
366 {
367 	vm_object_t object;
368 	int pageout_status[count];
369 	int numpagedout = 0;
370 	int i;
371 
372 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
373 	/*
374 	 * Initiate I/O.  Bump the vm_page_t->busy counter and
375 	 * mark the pages read-only.
376 	 *
377 	 * We do not have to fixup the clean/dirty bits here... we can
378 	 * allow the pager to do it after the I/O completes.
379 	 *
380 	 * NOTE! mc[i]->dirty may be partial or fragmented due to an
381 	 * edge case with file fragments.
382 	 */
383 	for (i = 0; i < count; i++) {
384 		KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, ("vm_pageout_flush page %p index %d/%d: partially invalid page", mc[i], i, count));
385 		vm_page_io_start(mc[i]);
386 		vm_page_protect(mc[i], VM_PROT_READ);
387 	}
388 	object = mc[0]->object;
389 	vm_page_unlock_queues();
390 	vm_object_pip_add(object, count);
391 
392 	vm_pager_put_pages(object, mc, count,
393 	    (flags | ((object == kernel_object) ? OBJPC_SYNC : 0)),
394 	    pageout_status);
395 
396 	vm_page_lock_queues();
397 	for (i = 0; i < count; i++) {
398 		vm_page_t mt = mc[i];
399 
400 		switch (pageout_status[i]) {
401 		case VM_PAGER_OK:
402 			numpagedout++;
403 			break;
404 		case VM_PAGER_PEND:
405 			numpagedout++;
406 			break;
407 		case VM_PAGER_BAD:
408 			/*
409 			 * Page outside of range of object. Right now we
410 			 * essentially lose the changes by pretending it
411 			 * worked.
412 			 */
413 			pmap_clear_modify(mt);
414 			vm_page_undirty(mt);
415 			break;
416 		case VM_PAGER_ERROR:
417 		case VM_PAGER_FAIL:
418 			/*
419 			 * If page couldn't be paged out, then reactivate the
420 			 * page so it doesn't clog the inactive list.  (We
421 			 * will try paging out it again later).
422 			 */
423 			vm_page_activate(mt);
424 			break;
425 		case VM_PAGER_AGAIN:
426 			break;
427 		}
428 
429 		/*
430 		 * If the operation is still going, leave the page busy to
431 		 * block all other accesses. Also, leave the paging in
432 		 * progress indicator set so that we don't attempt an object
433 		 * collapse.
434 		 */
435 		if (pageout_status[i] != VM_PAGER_PEND) {
436 			vm_object_pip_wakeup(object);
437 			vm_page_io_finish(mt);
438 			if (!vm_page_count_severe() || !vm_page_try_to_cache(mt))
439 				vm_page_protect(mt, VM_PROT_READ);
440 		}
441 	}
442 	return numpagedout;
443 }
444 
445 #if !defined(NO_SWAPPING)
446 /*
447  *	vm_pageout_object_deactivate_pages
448  *
449  *	deactivate enough pages to satisfy the inactive target
450  *	requirements or if vm_page_proc_limit is set, then
451  *	deactivate all of the pages in the object and its
452  *	backing_objects.
453  *
454  *	The object and map must be locked.
455  */
456 static void
457 vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
458 	vm_map_t map;
459 	vm_object_t object;
460 	vm_pindex_t desired;
461 	int map_remove_only;
462 {
463 	vm_page_t p, next;
464 	int actcount, rcount, remove_mode;
465 
466 	GIANT_REQUIRED;
467 	if (object->type == OBJT_DEVICE || object->type == OBJT_PHYS)
468 		return;
469 
470 	while (object) {
471 		if (pmap_resident_count(vm_map_pmap(map)) <= desired)
472 			return;
473 		if (object->paging_in_progress)
474 			return;
475 
476 		remove_mode = map_remove_only;
477 		if (object->shadow_count > 1)
478 			remove_mode = 1;
479 		/*
480 		 * scan the objects entire memory queue
481 		 */
482 		rcount = object->resident_page_count;
483 		p = TAILQ_FIRST(&object->memq);
484 		vm_page_lock_queues();
485 		while (p && (rcount-- > 0)) {
486 			if (pmap_resident_count(map->pmap) <= desired) {
487 				vm_page_unlock_queues();
488 				return;
489 			}
490 			next = TAILQ_NEXT(p, listq);
491 			cnt.v_pdpages++;
492 			if (p->wire_count != 0 ||
493 			    p->hold_count != 0 ||
494 			    p->busy != 0 ||
495 			    (p->flags & (PG_BUSY|PG_UNMANAGED)) ||
496 			    !pmap_page_exists_quick(vm_map_pmap(map), p)) {
497 				p = next;
498 				continue;
499 			}
500 			actcount = pmap_ts_referenced(p);
501 			if (actcount) {
502 				vm_page_flag_set(p, PG_REFERENCED);
503 			} else if (p->flags & PG_REFERENCED) {
504 				actcount = 1;
505 			}
506 			if ((p->queue != PQ_ACTIVE) &&
507 				(p->flags & PG_REFERENCED)) {
508 				vm_page_activate(p);
509 				p->act_count += actcount;
510 				vm_page_flag_clear(p, PG_REFERENCED);
511 			} else if (p->queue == PQ_ACTIVE) {
512 				if ((p->flags & PG_REFERENCED) == 0) {
513 					p->act_count -= min(p->act_count, ACT_DECLINE);
514 					if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) {
515 						vm_page_protect(p, VM_PROT_NONE);
516 						vm_page_deactivate(p);
517 					} else {
518 						vm_pageq_requeue(p);
519 					}
520 				} else {
521 					vm_page_activate(p);
522 					vm_page_flag_clear(p, PG_REFERENCED);
523 					if (p->act_count < (ACT_MAX - ACT_ADVANCE))
524 						p->act_count += ACT_ADVANCE;
525 					vm_pageq_requeue(p);
526 				}
527 			} else if (p->queue == PQ_INACTIVE) {
528 				vm_page_protect(p, VM_PROT_NONE);
529 			}
530 			p = next;
531 		}
532 		vm_page_unlock_queues();
533 		object = object->backing_object;
534 	}
535 }
536 
537 /*
538  * deactivate some number of pages in a map, try to do it fairly, but
539  * that is really hard to do.
540  */
541 static void
542 vm_pageout_map_deactivate_pages(map, desired)
543 	vm_map_t map;
544 	vm_pindex_t desired;
545 {
546 	vm_map_entry_t tmpe;
547 	vm_object_t obj, bigobj;
548 	int nothingwired;
549 
550 	GIANT_REQUIRED;
551 	if (!vm_map_trylock(map))
552 		return;
553 
554 	bigobj = NULL;
555 	nothingwired = TRUE;
556 
557 	/*
558 	 * first, search out the biggest object, and try to free pages from
559 	 * that.
560 	 */
561 	tmpe = map->header.next;
562 	while (tmpe != &map->header) {
563 		if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
564 			obj = tmpe->object.vm_object;
565 			if ((obj != NULL) && (obj->shadow_count <= 1) &&
566 				((bigobj == NULL) ||
567 				 (bigobj->resident_page_count < obj->resident_page_count))) {
568 				bigobj = obj;
569 			}
570 		}
571 		if (tmpe->wired_count > 0)
572 			nothingwired = FALSE;
573 		tmpe = tmpe->next;
574 	}
575 
576 	if (bigobj)
577 		vm_pageout_object_deactivate_pages(map, bigobj, desired, 0);
578 
579 	/*
580 	 * Next, hunt around for other pages to deactivate.  We actually
581 	 * do this search sort of wrong -- .text first is not the best idea.
582 	 */
583 	tmpe = map->header.next;
584 	while (tmpe != &map->header) {
585 		if (pmap_resident_count(vm_map_pmap(map)) <= desired)
586 			break;
587 		if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
588 			obj = tmpe->object.vm_object;
589 			if (obj)
590 				vm_pageout_object_deactivate_pages(map, obj, desired, 0);
591 		}
592 		tmpe = tmpe->next;
593 	};
594 
595 	/*
596 	 * Remove all mappings if a process is swapped out, this will free page
597 	 * table pages.
598 	 */
599 	if (desired == 0 && nothingwired)
600 		pmap_remove(vm_map_pmap(map), vm_map_min(map),
601 		    vm_map_max(map));
602 	vm_map_unlock(map);
603 	return;
604 }
605 #endif		/* !defined(NO_SWAPPING) */
606 
607 /*
608  * Don't try to be fancy - being fancy can lead to VOP_LOCK's and therefore
609  * to vnode deadlocks.  We only do it for OBJT_DEFAULT and OBJT_SWAP objects
610  * which we know can be trivially freed.
611  */
612 void
613 vm_pageout_page_free(vm_page_t m) {
614 	vm_object_t object = m->object;
615 	int type = object->type;
616 
617 	GIANT_REQUIRED;
618 	if (type == OBJT_SWAP || type == OBJT_DEFAULT)
619 		vm_object_reference(object);
620 	vm_page_busy(m);
621 	vm_page_protect(m, VM_PROT_NONE);
622 	vm_page_free(m);
623 	cnt.v_dfree++;
624 	if (type == OBJT_SWAP || type == OBJT_DEFAULT)
625 		vm_object_deallocate(object);
626 }
627 
628 /*
629  *	vm_pageout_scan does the dirty work for the pageout daemon.
630  */
631 static void
632 vm_pageout_scan(int pass)
633 {
634 	vm_page_t m, next;
635 	struct vm_page marker;
636 	int save_page_shortage;
637 	int save_inactive_count;
638 	int page_shortage, maxscan, pcount;
639 	int addl_page_shortage, addl_page_shortage_init;
640 	struct proc *p, *bigproc;
641 	vm_offset_t size, bigsize;
642 	vm_object_t object;
643 	int actcount;
644 	int vnodes_skipped = 0;
645 	int maxlaunder;
646 	int s;
647 	struct thread *td;
648 
649 	GIANT_REQUIRED;
650 	/*
651 	 * Do whatever cleanup that the pmap code can.
652 	 */
653 	pmap_collect();
654 	uma_reclaim();
655 
656 	addl_page_shortage_init = vm_pageout_deficit;
657 	vm_pageout_deficit = 0;
658 
659 	/*
660 	 * Calculate the number of pages we want to either free or move
661 	 * to the cache.
662 	 */
663 	page_shortage = vm_paging_target() + addl_page_shortage_init;
664 	save_page_shortage = page_shortage;
665 	save_inactive_count = cnt.v_inactive_count;
666 
667 	/*
668 	 * Initialize our marker
669 	 */
670 	bzero(&marker, sizeof(marker));
671 	marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
672 	marker.queue = PQ_INACTIVE;
673 	marker.wire_count = 1;
674 
675 	/*
676 	 * Start scanning the inactive queue for pages we can move to the
677 	 * cache or free.  The scan will stop when the target is reached or
678 	 * we have scanned the entire inactive queue.  Note that m->act_count
679 	 * is not used to form decisions for the inactive queue, only for the
680 	 * active queue.
681 	 *
682 	 * maxlaunder limits the number of dirty pages we flush per scan.
683 	 * For most systems a smaller value (16 or 32) is more robust under
684 	 * extreme memory and disk pressure because any unnecessary writes
685 	 * to disk can result in extreme performance degredation.  However,
686 	 * systems with excessive dirty pages (especially when MAP_NOSYNC is
687 	 * used) will die horribly with limited laundering.  If the pageout
688 	 * daemon cannot clean enough pages in the first pass, we let it go
689 	 * all out in succeeding passes.
690 	 */
691 	if ((maxlaunder = vm_max_launder) <= 1)
692 		maxlaunder = 1;
693 	if (pass)
694 		maxlaunder = 10000;
695 rescan0:
696 	addl_page_shortage = addl_page_shortage_init;
697 	maxscan = cnt.v_inactive_count;
698 
699 	for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl);
700 	     m != NULL && maxscan-- > 0 && page_shortage > 0;
701 	     m = next) {
702 
703 		cnt.v_pdpages++;
704 
705 		if (m->queue != PQ_INACTIVE) {
706 			goto rescan0;
707 		}
708 
709 		next = TAILQ_NEXT(m, pageq);
710 
711 		/*
712 		 * skip marker pages
713 		 */
714 		if (m->flags & PG_MARKER)
715 			continue;
716 
717 		/*
718 		 * A held page may be undergoing I/O, so skip it.
719 		 */
720 		if (m->hold_count) {
721 			vm_pageq_requeue(m);
722 			addl_page_shortage++;
723 			continue;
724 		}
725 		/*
726 		 * Don't mess with busy pages, keep in the front of the
727 		 * queue, most likely are being paged out.
728 		 */
729 		if (m->busy || (m->flags & PG_BUSY)) {
730 			addl_page_shortage++;
731 			continue;
732 		}
733 
734 		/*
735 		 * If the object is not being used, we ignore previous
736 		 * references.
737 		 */
738 		if (m->object->ref_count == 0) {
739 			vm_page_flag_clear(m, PG_REFERENCED);
740 			pmap_clear_reference(m);
741 
742 		/*
743 		 * Otherwise, if the page has been referenced while in the
744 		 * inactive queue, we bump the "activation count" upwards,
745 		 * making it less likely that the page will be added back to
746 		 * the inactive queue prematurely again.  Here we check the
747 		 * page tables (or emulated bits, if any), given the upper
748 		 * level VM system not knowing anything about existing
749 		 * references.
750 		 */
751 		} else if (((m->flags & PG_REFERENCED) == 0) &&
752 			(actcount = pmap_ts_referenced(m))) {
753 			vm_page_lock_queues();
754 			vm_page_activate(m);
755 			vm_page_unlock_queues();
756 			m->act_count += (actcount + ACT_ADVANCE);
757 			continue;
758 		}
759 
760 		/*
761 		 * If the upper level VM system knows about any page
762 		 * references, we activate the page.  We also set the
763 		 * "activation count" higher than normal so that we will less
764 		 * likely place pages back onto the inactive queue again.
765 		 */
766 		if ((m->flags & PG_REFERENCED) != 0) {
767 			vm_page_flag_clear(m, PG_REFERENCED);
768 			actcount = pmap_ts_referenced(m);
769 			vm_page_lock_queues();
770 			vm_page_activate(m);
771 			vm_page_unlock_queues();
772 			m->act_count += (actcount + ACT_ADVANCE + 1);
773 			continue;
774 		}
775 
776 		/*
777 		 * If the upper level VM system doesn't know anything about
778 		 * the page being dirty, we have to check for it again.  As
779 		 * far as the VM code knows, any partially dirty pages are
780 		 * fully dirty.
781 		 */
782 		if (m->dirty == 0) {
783 			vm_page_test_dirty(m);
784 		} else {
785 			vm_page_dirty(m);
786 		}
787 
788 		/*
789 		 * Invalid pages can be easily freed
790 		 */
791 		if (m->valid == 0) {
792 			vm_page_lock_queues();
793 			vm_pageout_page_free(m);
794 			vm_page_unlock_queues();
795 			--page_shortage;
796 
797 		/*
798 		 * Clean pages can be placed onto the cache queue.  This
799 		 * effectively frees them.
800 		 */
801 		} else if (m->dirty == 0) {
802 			vm_page_lock_queues();
803 			vm_page_cache(m);
804 			vm_page_unlock_queues();
805 			--page_shortage;
806 		} else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) {
807 			/*
808 			 * Dirty pages need to be paged out, but flushing
809 			 * a page is extremely expensive verses freeing
810 			 * a clean page.  Rather then artificially limiting
811 			 * the number of pages we can flush, we instead give
812 			 * dirty pages extra priority on the inactive queue
813 			 * by forcing them to be cycled through the queue
814 			 * twice before being flushed, after which the
815 			 * (now clean) page will cycle through once more
816 			 * before being freed.  This significantly extends
817 			 * the thrash point for a heavily loaded machine.
818 			 */
819 			vm_page_flag_set(m, PG_WINATCFLS);
820 			vm_pageq_requeue(m);
821 		} else if (maxlaunder > 0) {
822 			/*
823 			 * We always want to try to flush some dirty pages if
824 			 * we encounter them, to keep the system stable.
825 			 * Normally this number is small, but under extreme
826 			 * pressure where there are insufficient clean pages
827 			 * on the inactive queue, we may have to go all out.
828 			 */
829 			int swap_pageouts_ok;
830 			struct vnode *vp = NULL;
831 			struct mount *mp;
832 
833 			object = m->object;
834 
835 			if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) {
836 				swap_pageouts_ok = 1;
837 			} else {
838 				swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
839 				swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
840 				vm_page_count_min());
841 
842 			}
843 
844 			/*
845 			 * We don't bother paging objects that are "dead".
846 			 * Those objects are in a "rundown" state.
847 			 */
848 			if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
849 				vm_pageq_requeue(m);
850 				continue;
851 			}
852 
853 			/*
854 			 * The object is already known NOT to be dead.   It
855 			 * is possible for the vget() to block the whole
856 			 * pageout daemon, but the new low-memory handling
857 			 * code should prevent it.
858 			 *
859 			 * The previous code skipped locked vnodes and, worse,
860 			 * reordered pages in the queue.  This results in
861 			 * completely non-deterministic operation and, on a
862 			 * busy system, can lead to extremely non-optimal
863 			 * pageouts.  For example, it can cause clean pages
864 			 * to be freed and dirty pages to be moved to the end
865 			 * of the queue.  Since dirty pages are also moved to
866 			 * the end of the queue once-cleaned, this gives
867 			 * way too large a weighting to defering the freeing
868 			 * of dirty pages.
869 			 *
870 			 * We can't wait forever for the vnode lock, we might
871 			 * deadlock due to a vn_read() getting stuck in
872 			 * vm_wait while holding this vnode.  We skip the
873 			 * vnode if we can't get it in a reasonable amount
874 			 * of time.
875 			 */
876 			if (object->type == OBJT_VNODE) {
877 				vp = object->handle;
878 
879 				mp = NULL;
880 				if (vp->v_type == VREG)
881 					vn_start_write(vp, &mp, V_NOWAIT);
882 				if (vget(vp, LK_EXCLUSIVE|LK_TIMELOCK, curthread)) {
883 					++pageout_lock_miss;
884 					vn_finished_write(mp);
885 					if (object->flags & OBJ_MIGHTBEDIRTY)
886 						vnodes_skipped++;
887 					continue;
888 				}
889 
890 				/*
891 				 * The page might have been moved to another
892 				 * queue during potential blocking in vget()
893 				 * above.  The page might have been freed and
894 				 * reused for another vnode.  The object might
895 				 * have been reused for another vnode.
896 				 */
897 				if (m->queue != PQ_INACTIVE ||
898 				    m->object != object ||
899 				    object->handle != vp) {
900 					if (object->flags & OBJ_MIGHTBEDIRTY)
901 						vnodes_skipped++;
902 					vput(vp);
903 					vn_finished_write(mp);
904 					continue;
905 				}
906 
907 				/*
908 				 * The page may have been busied during the
909 				 * blocking in vput();  We don't move the
910 				 * page back onto the end of the queue so that
911 				 * statistics are more correct if we don't.
912 				 */
913 				if (m->busy || (m->flags & PG_BUSY)) {
914 					vput(vp);
915 					vn_finished_write(mp);
916 					continue;
917 				}
918 
919 				/*
920 				 * If the page has become held it might
921 				 * be undergoing I/O, so skip it
922 				 */
923 				if (m->hold_count) {
924 					vm_pageq_requeue(m);
925 					if (object->flags & OBJ_MIGHTBEDIRTY)
926 						vnodes_skipped++;
927 					vput(vp);
928 					vn_finished_write(mp);
929 					continue;
930 				}
931 			}
932 
933 			/*
934 			 * If a page is dirty, then it is either being washed
935 			 * (but not yet cleaned) or it is still in the
936 			 * laundry.  If it is still in the laundry, then we
937 			 * start the cleaning operation.
938 			 *
939 			 * This operation may cluster, invalidating the 'next'
940 			 * pointer.  To prevent an inordinate number of
941 			 * restarts we use our marker to remember our place.
942 			 *
943 			 * decrement page_shortage on success to account for
944 			 * the (future) cleaned page.  Otherwise we could wind
945 			 * up laundering or cleaning too many pages.
946 			 */
947 			vm_page_lock_queues();
948 			s = splvm();
949 			TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq);
950 			splx(s);
951 			if (vm_pageout_clean(m) != 0) {
952 				--page_shortage;
953 				--maxlaunder;
954 			}
955 			s = splvm();
956 			next = TAILQ_NEXT(&marker, pageq);
957 			TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq);
958 			splx(s);
959 			vm_page_unlock_queues();
960 			if (vp) {
961 				vput(vp);
962 				vn_finished_write(mp);
963 			}
964 		}
965 	}
966 
967 	/*
968 	 * Compute the number of pages we want to try to move from the
969 	 * active queue to the inactive queue.
970 	 */
971 	page_shortage = vm_paging_target() +
972 		cnt.v_inactive_target - cnt.v_inactive_count;
973 	page_shortage += addl_page_shortage;
974 
975 	vm_page_lock_queues();
976 	/*
977 	 * Scan the active queue for things we can deactivate. We nominally
978 	 * track the per-page activity counter and use it to locate
979 	 * deactivation candidates.
980 	 */
981 	pcount = cnt.v_active_count;
982 	m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl);
983 
984 	while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
985 
986 		/*
987 		 * This is a consistency check, and should likely be a panic
988 		 * or warning.
989 		 */
990 		if (m->queue != PQ_ACTIVE) {
991 			break;
992 		}
993 
994 		next = TAILQ_NEXT(m, pageq);
995 		/*
996 		 * Don't deactivate pages that are busy.
997 		 */
998 		if ((m->busy != 0) ||
999 		    (m->flags & PG_BUSY) ||
1000 		    (m->hold_count != 0)) {
1001 			vm_pageq_requeue(m);
1002 			m = next;
1003 			continue;
1004 		}
1005 
1006 		/*
1007 		 * The count for pagedaemon pages is done after checking the
1008 		 * page for eligibility...
1009 		 */
1010 		cnt.v_pdpages++;
1011 
1012 		/*
1013 		 * Check to see "how much" the page has been used.
1014 		 */
1015 		actcount = 0;
1016 		if (m->object->ref_count != 0) {
1017 			if (m->flags & PG_REFERENCED) {
1018 				actcount += 1;
1019 			}
1020 			actcount += pmap_ts_referenced(m);
1021 			if (actcount) {
1022 				m->act_count += ACT_ADVANCE + actcount;
1023 				if (m->act_count > ACT_MAX)
1024 					m->act_count = ACT_MAX;
1025 			}
1026 		}
1027 
1028 		/*
1029 		 * Since we have "tested" this bit, we need to clear it now.
1030 		 */
1031 		vm_page_flag_clear(m, PG_REFERENCED);
1032 
1033 		/*
1034 		 * Only if an object is currently being used, do we use the
1035 		 * page activation count stats.
1036 		 */
1037 		if (actcount && (m->object->ref_count != 0)) {
1038 			vm_pageq_requeue(m);
1039 		} else {
1040 			m->act_count -= min(m->act_count, ACT_DECLINE);
1041 			if (vm_pageout_algorithm ||
1042 			    m->object->ref_count == 0 ||
1043 			    m->act_count == 0) {
1044 				page_shortage--;
1045 				if (m->object->ref_count == 0) {
1046 					vm_page_protect(m, VM_PROT_NONE);
1047 					if (m->dirty == 0)
1048 						vm_page_cache(m);
1049 					else
1050 						vm_page_deactivate(m);
1051 				} else {
1052 					vm_page_deactivate(m);
1053 				}
1054 			} else {
1055 				vm_pageq_requeue(m);
1056 			}
1057 		}
1058 		m = next;
1059 	}
1060 	s = splvm();
1061 
1062 	/*
1063 	 * We try to maintain some *really* free pages, this allows interrupt
1064 	 * code to be guaranteed space.  Since both cache and free queues
1065 	 * are considered basically 'free', moving pages from cache to free
1066 	 * does not effect other calculations.
1067 	 */
1068 	while (cnt.v_free_count < cnt.v_free_reserved) {
1069 		static int cache_rover = 0;
1070 		m = vm_pageq_find(PQ_CACHE, cache_rover, FALSE);
1071 		if (!m)
1072 			break;
1073 		if ((m->flags & (PG_BUSY|PG_UNMANAGED)) ||
1074 		    m->busy ||
1075 		    m->hold_count ||
1076 		    m->wire_count) {
1077 #ifdef INVARIANTS
1078 			printf("Warning: busy page %p found in cache\n", m);
1079 #endif
1080 			vm_page_deactivate(m);
1081 			continue;
1082 		}
1083 		cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK;
1084 		vm_pageout_page_free(m);
1085 	}
1086 	splx(s);
1087 	vm_page_unlock_queues();
1088 #if !defined(NO_SWAPPING)
1089 	/*
1090 	 * Idle process swapout -- run once per second.
1091 	 */
1092 	if (vm_swap_idle_enabled) {
1093 		static long lsec;
1094 		if (time_second != lsec) {
1095 			vm_pageout_req_swapout |= VM_SWAP_IDLE;
1096 			vm_req_vmdaemon();
1097 			lsec = time_second;
1098 		}
1099 	}
1100 #endif
1101 
1102 	/*
1103 	 * If we didn't get enough free pages, and we have skipped a vnode
1104 	 * in a writeable object, wakeup the sync daemon.  And kick swapout
1105 	 * if we did not get enough free pages.
1106 	 */
1107 	if (vm_paging_target() > 0) {
1108 		if (vnodes_skipped && vm_page_count_min())
1109 			(void) speedup_syncer();
1110 #if !defined(NO_SWAPPING)
1111 		if (vm_swap_enabled && vm_page_count_target()) {
1112 			vm_req_vmdaemon();
1113 			vm_pageout_req_swapout |= VM_SWAP_NORMAL;
1114 		}
1115 #endif
1116 	}
1117 
1118 	/*
1119 	 * If we are out of swap and were not able to reach our paging
1120 	 * target, kill the largest process.
1121 	 *
1122 	 * We keep the process bigproc locked once we find it to keep anyone
1123 	 * from messing with it; however, there is a possibility of
1124 	 * deadlock if process B is bigproc and one of it's child processes
1125 	 * attempts to propagate a signal to B while we are waiting for A's
1126 	 * lock while walking this list.  To avoid this, we don't block on
1127 	 * the process lock but just skip a process if it is already locked.
1128 	 */
1129 	if ((vm_swap_size < 64 && vm_page_count_min()) ||
1130 	    (swap_pager_full && vm_paging_target() > 0)) {
1131 #if 0
1132 	if ((vm_swap_size < 64 || swap_pager_full) && vm_page_count_min()) {
1133 #endif
1134 		bigproc = NULL;
1135 		bigsize = 0;
1136 		sx_slock(&allproc_lock);
1137 		FOREACH_PROC_IN_SYSTEM(p) {
1138 			int breakout;
1139 			/*
1140 			 * If this process is already locked, skip it.
1141 			 */
1142 			if (PROC_TRYLOCK(p) == 0)
1143 				continue;
1144 			/*
1145 			 * if this is a system process, skip it
1146 			 */
1147 			if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) ||
1148 			    ((p->p_pid < 48) && (vm_swap_size != 0))) {
1149 				PROC_UNLOCK(p);
1150 				continue;
1151 			}
1152 			/*
1153 			 * if the process is in a non-running type state,
1154 			 * don't touch it. Check all the threads individually.
1155 			 */
1156 			mtx_lock_spin(&sched_lock);
1157 			breakout = 0;
1158 			FOREACH_THREAD_IN_PROC(p, td) {
1159 				if (!TD_ON_RUNQ(td) &&
1160 				    !TD_IS_RUNNING(td) &&
1161 				    !TD_IS_SLEEPING(td)) {
1162 					breakout = 1;
1163 					break;
1164 				}
1165 			}
1166 			if (breakout) {
1167 				mtx_unlock_spin(&sched_lock);
1168 				PROC_UNLOCK(p);
1169 				continue;
1170 			}
1171 			mtx_unlock_spin(&sched_lock);
1172 			/*
1173 			 * get the process size
1174 			 */
1175 			size = vmspace_resident_count(p->p_vmspace) +
1176 				vmspace_swap_count(p->p_vmspace);
1177 			/*
1178 			 * if the this process is bigger than the biggest one
1179 			 * remember it.
1180 			 */
1181 			if (size > bigsize) {
1182 				if (bigproc != NULL)
1183 					PROC_UNLOCK(bigproc);
1184 				bigproc = p;
1185 				bigsize = size;
1186 			} else
1187 				PROC_UNLOCK(p);
1188 		}
1189 		sx_sunlock(&allproc_lock);
1190 		if (bigproc != NULL) {
1191 			struct ksegrp *kg;
1192 			killproc(bigproc, "out of swap space");
1193 			mtx_lock_spin(&sched_lock);
1194 			FOREACH_KSEGRP_IN_PROC(bigproc, kg) {
1195 				sched_nice(kg, PRIO_MIN); /* XXXKSE ??? */
1196 			}
1197 			mtx_unlock_spin(&sched_lock);
1198 			PROC_UNLOCK(bigproc);
1199 			wakeup(&cnt.v_free_count);
1200 		}
1201 	}
1202 }
1203 
1204 /*
1205  * This routine tries to maintain the pseudo LRU active queue,
1206  * so that during long periods of time where there is no paging,
1207  * that some statistic accumulation still occurs.  This code
1208  * helps the situation where paging just starts to occur.
1209  */
1210 static void
1211 vm_pageout_page_stats()
1212 {
1213 	vm_page_t m,next;
1214 	int pcount,tpcount;		/* Number of pages to check */
1215 	static int fullintervalcount = 0;
1216 	int page_shortage;
1217 	int s0;
1218 
1219 	page_shortage =
1220 	    (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) -
1221 	    (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
1222 
1223 	if (page_shortage <= 0)
1224 		return;
1225 
1226 	s0 = splvm();
1227 	vm_page_lock_queues();
1228 	pcount = cnt.v_active_count;
1229 	fullintervalcount += vm_pageout_stats_interval;
1230 	if (fullintervalcount < vm_pageout_full_stats_interval) {
1231 		tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count;
1232 		if (pcount > tpcount)
1233 			pcount = tpcount;
1234 	} else {
1235 		fullintervalcount = 0;
1236 	}
1237 
1238 	m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl);
1239 	while ((m != NULL) && (pcount-- > 0)) {
1240 		int actcount;
1241 
1242 		if (m->queue != PQ_ACTIVE) {
1243 			break;
1244 		}
1245 
1246 		next = TAILQ_NEXT(m, pageq);
1247 		/*
1248 		 * Don't deactivate pages that are busy.
1249 		 */
1250 		if ((m->busy != 0) ||
1251 		    (m->flags & PG_BUSY) ||
1252 		    (m->hold_count != 0)) {
1253 			vm_pageq_requeue(m);
1254 			m = next;
1255 			continue;
1256 		}
1257 
1258 		actcount = 0;
1259 		if (m->flags & PG_REFERENCED) {
1260 			vm_page_flag_clear(m, PG_REFERENCED);
1261 			actcount += 1;
1262 		}
1263 
1264 		actcount += pmap_ts_referenced(m);
1265 		if (actcount) {
1266 			m->act_count += ACT_ADVANCE + actcount;
1267 			if (m->act_count > ACT_MAX)
1268 				m->act_count = ACT_MAX;
1269 			vm_pageq_requeue(m);
1270 		} else {
1271 			if (m->act_count == 0) {
1272 				/*
1273 				 * We turn off page access, so that we have
1274 				 * more accurate RSS stats.  We don't do this
1275 				 * in the normal page deactivation when the
1276 				 * system is loaded VM wise, because the
1277 				 * cost of the large number of page protect
1278 				 * operations would be higher than the value
1279 				 * of doing the operation.
1280 				 */
1281 				vm_page_protect(m, VM_PROT_NONE);
1282 				vm_page_deactivate(m);
1283 			} else {
1284 				m->act_count -= min(m->act_count, ACT_DECLINE);
1285 				vm_pageq_requeue(m);
1286 			}
1287 		}
1288 
1289 		m = next;
1290 	}
1291 	vm_page_unlock_queues();
1292 	splx(s0);
1293 }
1294 
1295 static int
1296 vm_pageout_free_page_calc(count)
1297 vm_size_t count;
1298 {
1299 	if (count < cnt.v_page_count)
1300 		 return 0;
1301 	/*
1302 	 * free_reserved needs to include enough for the largest swap pager
1303 	 * structures plus enough for any pv_entry structs when paging.
1304 	 */
1305 	if (cnt.v_page_count > 1024)
1306 		cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
1307 	else
1308 		cnt.v_free_min = 4;
1309 	cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
1310 		cnt.v_interrupt_free_min;
1311 	cnt.v_free_reserved = vm_pageout_page_count +
1312 		cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE;
1313 	cnt.v_free_severe = cnt.v_free_min / 2;
1314 	cnt.v_free_min += cnt.v_free_reserved;
1315 	cnt.v_free_severe += cnt.v_free_reserved;
1316 	return 1;
1317 }
1318 
1319 /*
1320  *	vm_pageout is the high level pageout daemon.
1321  */
1322 static void
1323 vm_pageout()
1324 {
1325 	int pass;
1326 
1327 	mtx_lock(&Giant);
1328 
1329 	/*
1330 	 * Initialize some paging parameters.
1331 	 */
1332 	cnt.v_interrupt_free_min = 2;
1333 	if (cnt.v_page_count < 2000)
1334 		vm_pageout_page_count = 8;
1335 
1336 	vm_pageout_free_page_calc(cnt.v_page_count);
1337 	/*
1338 	 * v_free_target and v_cache_min control pageout hysteresis.  Note
1339 	 * that these are more a measure of the VM cache queue hysteresis
1340 	 * then the VM free queue.  Specifically, v_free_target is the
1341 	 * high water mark (free+cache pages).
1342 	 *
1343 	 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the
1344 	 * low water mark, while v_free_min is the stop.  v_cache_min must
1345 	 * be big enough to handle memory needs while the pageout daemon
1346 	 * is signalled and run to free more pages.
1347 	 */
1348 	if (cnt.v_free_count > 6144)
1349 		cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved;
1350 	else
1351 		cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved;
1352 
1353 	if (cnt.v_free_count > 2048) {
1354 		cnt.v_cache_min = cnt.v_free_target;
1355 		cnt.v_cache_max = 2 * cnt.v_cache_min;
1356 		cnt.v_inactive_target = (3 * cnt.v_free_target) / 2;
1357 	} else {
1358 		cnt.v_cache_min = 0;
1359 		cnt.v_cache_max = 0;
1360 		cnt.v_inactive_target = cnt.v_free_count / 4;
1361 	}
1362 	if (cnt.v_inactive_target > cnt.v_free_count / 3)
1363 		cnt.v_inactive_target = cnt.v_free_count / 3;
1364 
1365 	/* XXX does not really belong here */
1366 	if (vm_page_max_wired == 0)
1367 		vm_page_max_wired = cnt.v_free_count / 3;
1368 
1369 	if (vm_pageout_stats_max == 0)
1370 		vm_pageout_stats_max = cnt.v_free_target;
1371 
1372 	/*
1373 	 * Set interval in seconds for stats scan.
1374 	 */
1375 	if (vm_pageout_stats_interval == 0)
1376 		vm_pageout_stats_interval = 5;
1377 	if (vm_pageout_full_stats_interval == 0)
1378 		vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1379 
1380 	/*
1381 	 * Set maximum free per pass
1382 	 */
1383 	if (vm_pageout_stats_free_max == 0)
1384 		vm_pageout_stats_free_max = 5;
1385 
1386 	swap_pager_swap_init();
1387 	pass = 0;
1388 	/*
1389 	 * The pageout daemon is never done, so loop forever.
1390 	 */
1391 	while (TRUE) {
1392 		int error;
1393 		int s = splvm();
1394 
1395 		/*
1396 		 * If we have enough free memory, wakeup waiters.  Do
1397 		 * not clear vm_pages_needed until we reach our target,
1398 		 * otherwise we may be woken up over and over again and
1399 		 * waste a lot of cpu.
1400 		 */
1401 		if (vm_pages_needed && !vm_page_count_min()) {
1402 			if (vm_paging_needed() <= 0)
1403 				vm_pages_needed = 0;
1404 			wakeup(&cnt.v_free_count);
1405 		}
1406 		if (vm_pages_needed) {
1407 			/*
1408 			 * Still not done, take a second pass without waiting
1409 			 * (unlimited dirty cleaning), otherwise sleep a bit
1410 			 * and try again.
1411 			 */
1412 			++pass;
1413 			if (pass > 1)
1414 				tsleep(&vm_pages_needed, PVM,
1415 				       "psleep", hz/2);
1416 		} else {
1417 			/*
1418 			 * Good enough, sleep & handle stats.  Prime the pass
1419 			 * for the next run.
1420 			 */
1421 			if (pass > 1)
1422 				pass = 1;
1423 			else
1424 				pass = 0;
1425 			error = tsleep(&vm_pages_needed, PVM,
1426 				    "psleep", vm_pageout_stats_interval * hz);
1427 			if (error && !vm_pages_needed) {
1428 				splx(s);
1429 				pass = 0;
1430 				vm_pageout_page_stats();
1431 				continue;
1432 			}
1433 		}
1434 
1435 		if (vm_pages_needed)
1436 			cnt.v_pdwakeups++;
1437 		splx(s);
1438 		vm_pageout_scan(pass);
1439 		vm_pageout_deficit = 0;
1440 	}
1441 }
1442 
1443 void
1444 pagedaemon_wakeup()
1445 {
1446 	if (!vm_pages_needed && curthread->td_proc != pageproc) {
1447 		vm_pages_needed++;
1448 		wakeup(&vm_pages_needed);
1449 	}
1450 }
1451 
1452 #if !defined(NO_SWAPPING)
1453 static void
1454 vm_req_vmdaemon()
1455 {
1456 	static int lastrun = 0;
1457 
1458 	if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
1459 		wakeup(&vm_daemon_needed);
1460 		lastrun = ticks;
1461 	}
1462 }
1463 
1464 static void
1465 vm_daemon()
1466 {
1467 	struct proc *p;
1468 	int breakout;
1469 	struct thread *td;
1470 
1471 	mtx_lock(&Giant);
1472 	while (TRUE) {
1473 		tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0);
1474 		if (vm_pageout_req_swapout) {
1475 			swapout_procs(vm_pageout_req_swapout);
1476 			vm_pageout_req_swapout = 0;
1477 		}
1478 		/*
1479 		 * scan the processes for exceeding their rlimits or if
1480 		 * process is swapped out -- deactivate pages
1481 		 */
1482 		sx_slock(&allproc_lock);
1483 		LIST_FOREACH(p, &allproc, p_list) {
1484 			vm_pindex_t limit, size;
1485 
1486 			/*
1487 			 * if this is a system process or if we have already
1488 			 * looked at this process, skip it.
1489 			 */
1490 			if (p->p_flag & (P_SYSTEM | P_WEXIT)) {
1491 				continue;
1492 			}
1493 			/*
1494 			 * if the process is in a non-running type state,
1495 			 * don't touch it.
1496 			 */
1497 			mtx_lock_spin(&sched_lock);
1498 			breakout = 0;
1499 			FOREACH_THREAD_IN_PROC(p, td) {
1500 				if (!TD_ON_RUNQ(td) &&
1501 				    !TD_IS_RUNNING(td) &&
1502 				    !TD_IS_SLEEPING(td)) {
1503 					breakout = 1;
1504 					break;
1505 				}
1506 			}
1507 			if (breakout) {
1508 				mtx_unlock_spin(&sched_lock);
1509 				continue;
1510 			}
1511 			/*
1512 			 * get a limit
1513 			 */
1514 			limit = OFF_TO_IDX(
1515 			    qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
1516 				p->p_rlimit[RLIMIT_RSS].rlim_max));
1517 
1518 			/*
1519 			 * let processes that are swapped out really be
1520 			 * swapped out set the limit to nothing (will force a
1521 			 * swap-out.)
1522 			 */
1523 			if ((p->p_sflag & PS_INMEM) == 0)
1524 				limit = 0;	/* XXX */
1525 			mtx_unlock_spin(&sched_lock);
1526 
1527 			size = vmspace_resident_count(p->p_vmspace);
1528 			if (limit >= 0 && size >= limit) {
1529 				vm_pageout_map_deactivate_pages(
1530 				    &p->p_vmspace->vm_map, limit);
1531 			}
1532 		}
1533 		sx_sunlock(&allproc_lock);
1534 	}
1535 }
1536 #endif			/* !defined(NO_SWAPPING) */
1537