xref: /freebsd/sys/vm/vm_pageout.c (revision 0de89efe5c443f213c7ea28773ef2dc6cf3af2ed)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
41  *
42  *
43  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
44  * All rights reserved.
45  *
46  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
47  *
48  * Permission to use, copy, modify and distribute this software and
49  * its documentation is hereby granted, provided that both the copyright
50  * notice and this permission notice appear in all copies of the
51  * software, derivative works or modified versions, and any portions
52  * thereof, and that both notices appear in supporting documentation.
53  *
54  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57  *
58  * Carnegie Mellon requests users of this software to return to
59  *
60  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61  *  School of Computer Science
62  *  Carnegie Mellon University
63  *  Pittsburgh PA 15213-3890
64  *
65  * any improvements or extensions that they make and grant Carnegie the
66  * rights to redistribute these changes.
67  *
68  * $Id: vm_pageout.c,v 1.97 1997/07/27 04:49:19 dyson Exp $
69  */
70 
71 /*
72  *	The proverbial page-out daemon.
73  */
74 
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/kernel.h>
78 #include <sys/proc.h>
79 #include <sys/resourcevar.h>
80 #include <sys/signalvar.h>
81 #include <sys/vnode.h>
82 #include <sys/vmmeter.h>
83 #include <sys/sysctl.h>
84 
85 #include <vm/vm.h>
86 #include <vm/vm_param.h>
87 #include <vm/vm_prot.h>
88 #include <sys/lock.h>
89 #include <vm/vm_object.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_map.h>
92 #include <vm/vm_pageout.h>
93 #include <vm/vm_pager.h>
94 #include <vm/swap_pager.h>
95 #include <vm/vm_extern.h>
96 
97 /*
98  * System initialization
99  */
100 
101 /* the kernel process "vm_pageout"*/
102 static void vm_pageout __P((void));
103 static int vm_pageout_clean __P((vm_page_t, int));
104 static int vm_pageout_scan __P((void));
105 static int vm_pageout_free_page_calc __P((vm_size_t count));
106 struct proc *pageproc;
107 
108 static struct kproc_desc page_kp = {
109 	"pagedaemon",
110 	vm_pageout,
111 	&pageproc
112 };
113 SYSINIT_KT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp)
114 
115 #if !defined(NO_SWAPPING)
116 /* the kernel process "vm_daemon"*/
117 static void vm_daemon __P((void));
118 static struct	proc *vmproc;
119 
120 static struct kproc_desc vm_kp = {
121 	"vmdaemon",
122 	vm_daemon,
123 	&vmproc
124 };
125 SYSINIT_KT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
126 #endif
127 
128 
129 int vm_pages_needed;		/* Event on which pageout daemon sleeps */
130 
131 int vm_pageout_pages_needed;	/* flag saying that the pageout daemon needs pages */
132 
133 extern int npendingio;
134 #if !defined(NO_SWAPPING)
135 static int vm_pageout_req_swapout;	/* XXX */
136 static int vm_daemon_needed;
137 #endif
138 extern int nswiodone;
139 extern int vm_swap_size;
140 extern int vfs_update_wakeup;
141 int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
142 int vm_pageout_full_stats_interval = 0;
143 int vm_pageout_stats_free_max=0, vm_pageout_algorithm_lru=0;
144 #if defined(NO_SWAPPING)
145 int vm_swapping_enabled=0;
146 #else
147 int vm_swapping_enabled=1;
148 #endif
149 
150 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
151 	CTLFLAG_RW, &vm_pageout_algorithm_lru, 0, "");
152 
153 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
154 	CTLFLAG_RW, &vm_pageout_stats_max, 0, "");
155 
156 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
157 	CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "");
158 
159 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
160 	CTLFLAG_RW, &vm_pageout_stats_interval, 0, "");
161 
162 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
163 	CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "");
164 
165 #if defined(NO_SWAPPING)
166 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swapping_enabled,
167 	CTLFLAG_RD, &vm_swapping_enabled, 0, "");
168 #else
169 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swapping_enabled,
170 	CTLFLAG_RW, &vm_swapping_enabled, 0, "");
171 #endif
172 
173 #define MAXLAUNDER (cnt.v_page_count > 1800 ? 32 : 16)
174 
175 #define VM_PAGEOUT_PAGE_COUNT 16
176 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
177 
178 int vm_page_max_wired;		/* XXX max # of wired pages system-wide */
179 
180 #if !defined(NO_SWAPPING)
181 typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int));
182 static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t));
183 static freeer_fcn_t vm_pageout_object_deactivate_pages;
184 static void vm_req_vmdaemon __P((void));
185 #endif
186 static void vm_pageout_page_stats(void);
187 
188 /*
189  * vm_pageout_clean:
190  *
191  * Clean the page and remove it from the laundry.
192  *
193  * We set the busy bit to cause potential page faults on this page to
194  * block.
195  *
196  * And we set pageout-in-progress to keep the object from disappearing
197  * during pageout.  This guarantees that the page won't move from the
198  * inactive queue.  (However, any other page on the inactive queue may
199  * move!)
200  */
201 static int
202 vm_pageout_clean(m, sync)
203 	vm_page_t m;
204 	int sync;
205 {
206 	register vm_object_t object;
207 	vm_page_t mc[2*vm_pageout_page_count];
208 	int pageout_count;
209 	int i, forward_okay, backward_okay, page_base;
210 	vm_pindex_t pindex = m->pindex;
211 
212 	object = m->object;
213 
214 	/*
215 	 * If not OBJT_SWAP, additional memory may be needed to do the pageout.
216 	 * Try to avoid the deadlock.
217 	 */
218 	if ((sync != VM_PAGEOUT_FORCE) &&
219 	    (object->type == OBJT_DEFAULT) &&
220 	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min))
221 		return 0;
222 
223 	/*
224 	 * Don't mess with the page if it's busy.
225 	 */
226 	if ((!sync && m->hold_count != 0) ||
227 	    ((m->busy != 0) || (m->flags & PG_BUSY)))
228 		return 0;
229 
230 	/*
231 	 * Try collapsing before it's too late.
232 	 */
233 	if (!sync && object->backing_object) {
234 		vm_object_collapse(object);
235 	}
236 
237 	mc[vm_pageout_page_count] = m;
238 	pageout_count = 1;
239 	page_base = vm_pageout_page_count;
240 	forward_okay = TRUE;
241 	if (pindex != 0)
242 		backward_okay = TRUE;
243 	else
244 		backward_okay = FALSE;
245 	/*
246 	 * Scan object for clusterable pages.
247 	 *
248 	 * We can cluster ONLY if: ->> the page is NOT
249 	 * clean, wired, busy, held, or mapped into a
250 	 * buffer, and one of the following:
251 	 * 1) The page is inactive, or a seldom used
252 	 *    active page.
253 	 * -or-
254 	 * 2) we force the issue.
255 	 */
256 	for (i = 1; (i < vm_pageout_page_count) && (forward_okay || backward_okay); i++) {
257 		vm_page_t p;
258 
259 		/*
260 		 * See if forward page is clusterable.
261 		 */
262 		if (forward_okay) {
263 			/*
264 			 * Stop forward scan at end of object.
265 			 */
266 			if ((pindex + i) > object->size) {
267 				forward_okay = FALSE;
268 				goto do_backward;
269 			}
270 			p = vm_page_lookup(object, pindex + i);
271 			if (p) {
272 				if (((p->queue - p->pc) == PQ_CACHE) ||
273 					(p->flags & PG_BUSY) || p->busy) {
274 					forward_okay = FALSE;
275 					goto do_backward;
276 				}
277 				vm_page_test_dirty(p);
278 				if ((p->dirty & p->valid) != 0 &&
279 				    ((p->queue == PQ_INACTIVE) ||
280 				     (sync == VM_PAGEOUT_FORCE)) &&
281 				    (p->wire_count == 0) &&
282 				    (p->hold_count == 0)) {
283 					mc[vm_pageout_page_count + i] = p;
284 					pageout_count++;
285 					if (pageout_count == vm_pageout_page_count)
286 						break;
287 				} else {
288 					forward_okay = FALSE;
289 				}
290 			} else {
291 				forward_okay = FALSE;
292 			}
293 		}
294 do_backward:
295 		/*
296 		 * See if backward page is clusterable.
297 		 */
298 		if (backward_okay) {
299 			/*
300 			 * Stop backward scan at beginning of object.
301 			 */
302 			if ((pindex - i) == 0) {
303 				backward_okay = FALSE;
304 			}
305 			p = vm_page_lookup(object, pindex - i);
306 			if (p) {
307 				if (((p->queue - p->pc) == PQ_CACHE) ||
308 					(p->flags & PG_BUSY) || p->busy) {
309 					backward_okay = FALSE;
310 					continue;
311 				}
312 				vm_page_test_dirty(p);
313 				if ((p->dirty & p->valid) != 0 &&
314 				    ((p->queue == PQ_INACTIVE) ||
315 				     (sync == VM_PAGEOUT_FORCE)) &&
316 				    (p->wire_count == 0) &&
317 				    (p->hold_count == 0)) {
318 					mc[vm_pageout_page_count - i] = p;
319 					pageout_count++;
320 					page_base--;
321 					if (pageout_count == vm_pageout_page_count)
322 						break;
323 				} else {
324 					backward_okay = FALSE;
325 				}
326 			} else {
327 				backward_okay = FALSE;
328 			}
329 		}
330 	}
331 
332 	/*
333 	 * we allow reads during pageouts...
334 	 */
335 	for (i = page_base; i < (page_base + pageout_count); i++) {
336 		mc[i]->flags |= PG_BUSY;
337 		vm_page_protect(mc[i], VM_PROT_READ);
338 	}
339 
340 	return vm_pageout_flush(&mc[page_base], pageout_count, sync);
341 }
342 
343 int
344 vm_pageout_flush(mc, count, sync)
345 	vm_page_t *mc;
346 	int count;
347 	int sync;
348 {
349 	register vm_object_t object;
350 	int pageout_status[count];
351 	int anyok = 0;
352 	int i;
353 
354 	object = mc[0]->object;
355 	object->paging_in_progress += count;
356 
357 	vm_pager_put_pages(object, mc, count,
358 	    ((sync || (object == kernel_object)) ? TRUE : FALSE),
359 	    pageout_status);
360 
361 	for (i = 0; i < count; i++) {
362 		vm_page_t mt = mc[i];
363 
364 		switch (pageout_status[i]) {
365 		case VM_PAGER_OK:
366 			++anyok;
367 			break;
368 		case VM_PAGER_PEND:
369 			++anyok;
370 			break;
371 		case VM_PAGER_BAD:
372 			/*
373 			 * Page outside of range of object. Right now we
374 			 * essentially lose the changes by pretending it
375 			 * worked.
376 			 */
377 			pmap_clear_modify(VM_PAGE_TO_PHYS(mt));
378 			mt->dirty = 0;
379 			break;
380 		case VM_PAGER_ERROR:
381 		case VM_PAGER_FAIL:
382 			/*
383 			 * If page couldn't be paged out, then reactivate the
384 			 * page so it doesn't clog the inactive list.  (We
385 			 * will try paging out it again later).
386 			 */
387 			if (mt->queue == PQ_INACTIVE)
388 				vm_page_activate(mt);
389 			break;
390 		case VM_PAGER_AGAIN:
391 			break;
392 		}
393 
394 
395 		/*
396 		 * If the operation is still going, leave the page busy to
397 		 * block all other accesses. Also, leave the paging in
398 		 * progress indicator set so that we don't attempt an object
399 		 * collapse.
400 		 */
401 		if (pageout_status[i] != VM_PAGER_PEND) {
402 			vm_object_pip_wakeup(object);
403 			PAGE_WAKEUP(mt);
404 		}
405 	}
406 	return anyok;
407 }
408 
409 #if !defined(NO_SWAPPING)
410 /*
411  *	vm_pageout_object_deactivate_pages
412  *
413  *	deactivate enough pages to satisfy the inactive target
414  *	requirements or if vm_page_proc_limit is set, then
415  *	deactivate all of the pages in the object and its
416  *	backing_objects.
417  *
418  *	The object and map must be locked.
419  */
420 static void
421 vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
422 	vm_map_t map;
423 	vm_object_t object;
424 	vm_pindex_t desired;
425 	int map_remove_only;
426 {
427 	register vm_page_t p, next;
428 	int rcount;
429 	int remove_mode;
430 	int s;
431 
432 	if (object->type == OBJT_DEVICE)
433 		return;
434 
435 	while (object) {
436 		if (vm_map_pmap(map)->pm_stats.resident_count <= desired)
437 			return;
438 		if (object->paging_in_progress)
439 			return;
440 
441 		remove_mode = map_remove_only;
442 		if (object->shadow_count > 1)
443 			remove_mode = 1;
444 	/*
445 	 * scan the objects entire memory queue
446 	 */
447 		rcount = object->resident_page_count;
448 		p = TAILQ_FIRST(&object->memq);
449 		while (p && (rcount-- > 0)) {
450 			int refcount;
451 			if (vm_map_pmap(map)->pm_stats.resident_count <= desired)
452 				return;
453 			next = TAILQ_NEXT(p, listq);
454 			cnt.v_pdpages++;
455 			if (p->wire_count != 0 ||
456 			    p->hold_count != 0 ||
457 			    p->busy != 0 ||
458 			    (p->flags & PG_BUSY) ||
459 			    !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) {
460 				p = next;
461 				continue;
462 			}
463 
464 			refcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p));
465 			if (refcount) {
466 				p->flags |= PG_REFERENCED;
467 			} else if (p->flags & PG_REFERENCED) {
468 				refcount = 1;
469 			}
470 
471 			if ((p->queue != PQ_ACTIVE) &&
472 				(p->flags & PG_REFERENCED)) {
473 				vm_page_activate(p);
474 				p->act_count += refcount;
475 				p->flags &= ~PG_REFERENCED;
476 			} else if (p->queue == PQ_ACTIVE) {
477 				if ((p->flags & PG_REFERENCED) == 0) {
478 					p->act_count -= min(p->act_count, ACT_DECLINE);
479 					if (!remove_mode && (vm_pageout_algorithm_lru || (p->act_count == 0))) {
480 						vm_page_protect(p, VM_PROT_NONE);
481 						vm_page_deactivate(p);
482 					} else {
483 						s = splvm();
484 						TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
485 						TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
486 						splx(s);
487 					}
488 				} else {
489 					p->flags &= ~PG_REFERENCED;
490 					if (p->act_count < (ACT_MAX - ACT_ADVANCE))
491 						p->act_count += ACT_ADVANCE;
492 					s = splvm();
493 					TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
494 					TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
495 					splx(s);
496 				}
497 			} else if (p->queue == PQ_INACTIVE) {
498 				vm_page_protect(p, VM_PROT_NONE);
499 			}
500 			p = next;
501 		}
502 		object = object->backing_object;
503 	}
504 	return;
505 }
506 
507 /*
508  * deactivate some number of pages in a map, try to do it fairly, but
509  * that is really hard to do.
510  */
511 static void
512 vm_pageout_map_deactivate_pages(map, desired)
513 	vm_map_t map;
514 	vm_pindex_t desired;
515 {
516 	vm_map_entry_t tmpe;
517 	vm_object_t obj, bigobj;
518 
519 	vm_map_reference(map);
520 	if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) {
521 		vm_map_deallocate(map);
522 		return;
523 	}
524 
525 	bigobj = NULL;
526 
527 	/*
528 	 * first, search out the biggest object, and try to free pages from
529 	 * that.
530 	 */
531 	tmpe = map->header.next;
532 	while (tmpe != &map->header) {
533 		if ((tmpe->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
534 			obj = tmpe->object.vm_object;
535 			if ((obj != NULL) && (obj->shadow_count <= 1) &&
536 				((bigobj == NULL) ||
537 				 (bigobj->resident_page_count < obj->resident_page_count))) {
538 				bigobj = obj;
539 			}
540 		}
541 		tmpe = tmpe->next;
542 	}
543 
544 	if (bigobj)
545 		vm_pageout_object_deactivate_pages(map, bigobj, desired, 0);
546 
547 	/*
548 	 * Next, hunt around for other pages to deactivate.  We actually
549 	 * do this search sort of wrong -- .text first is not the best idea.
550 	 */
551 	tmpe = map->header.next;
552 	while (tmpe != &map->header) {
553 		if (vm_map_pmap(map)->pm_stats.resident_count <= desired)
554 			break;
555 		if ((tmpe->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
556 			obj = tmpe->object.vm_object;
557 			if (obj)
558 				vm_pageout_object_deactivate_pages(map, obj, desired, 0);
559 		}
560 		tmpe = tmpe->next;
561 	};
562 
563 	/*
564 	 * Remove all mappings if a process is swapped out, this will free page
565 	 * table pages.
566 	 */
567 	if (desired == 0)
568 		pmap_remove(vm_map_pmap(map),
569 			VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
570 	vm_map_unlock(map);
571 	vm_map_deallocate(map);
572 	return;
573 }
574 #endif
575 
576 /*
577  *	vm_pageout_scan does the dirty work for the pageout daemon.
578  */
579 static int
580 vm_pageout_scan()
581 {
582 	vm_page_t m, next;
583 	int page_shortage, addl_page_shortage, maxscan, maxlaunder, pcount;
584 	int pages_freed;
585 	struct proc *p, *bigproc;
586 	vm_offset_t size, bigsize;
587 	vm_object_t object;
588 	int force_wakeup = 0;
589 	int vnodes_skipped = 0;
590 	int s;
591 
592 	/*
593 	 * Start scanning the inactive queue for pages we can free. We keep
594 	 * scanning until we have enough free pages or we have scanned through
595 	 * the entire queue.  If we encounter dirty pages, we start cleaning
596 	 * them.
597 	 */
598 
599 	pages_freed = 0;
600 	addl_page_shortage = 0;
601 
602 	maxlaunder = (cnt.v_inactive_target > MAXLAUNDER) ?
603 	    MAXLAUNDER : cnt.v_inactive_target;
604 rescan0:
605 	maxscan = cnt.v_inactive_count;
606 	for( m = TAILQ_FIRST(&vm_page_queue_inactive);
607 
608 		(m != NULL) && (maxscan-- > 0) &&
609 			((cnt.v_cache_count + cnt.v_free_count) <
610 			(cnt.v_cache_min + cnt.v_free_target));
611 
612 		m = next) {
613 
614 		cnt.v_pdpages++;
615 
616 		if (m->queue != PQ_INACTIVE) {
617 			goto rescan0;
618 		}
619 
620 		next = TAILQ_NEXT(m, pageq);
621 
622 		if (m->hold_count) {
623 			s = splvm();
624 			TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
625 			TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
626 			splx(s);
627 			addl_page_shortage++;
628 			continue;
629 		}
630 		/*
631 		 * Dont mess with busy pages, keep in the front of the
632 		 * queue, most likely are being paged out.
633 		 */
634 		if (m->busy || (m->flags & PG_BUSY)) {
635 			addl_page_shortage++;
636 			continue;
637 		}
638 
639 		if (m->object->ref_count == 0) {
640 			m->flags &= ~PG_REFERENCED;
641 			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
642 		} else if (((m->flags & PG_REFERENCED) == 0) &&
643 			pmap_ts_referenced(VM_PAGE_TO_PHYS(m))) {
644 			vm_page_activate(m);
645 			continue;
646 		}
647 
648 		if ((m->flags & PG_REFERENCED) != 0) {
649 			m->flags &= ~PG_REFERENCED;
650 			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
651 			vm_page_activate(m);
652 			continue;
653 		}
654 
655 		if (m->dirty == 0) {
656 			vm_page_test_dirty(m);
657 		} else if (m->dirty != 0) {
658 			m->dirty = VM_PAGE_BITS_ALL;
659 		}
660 
661 		if (m->valid == 0) {
662 			vm_page_protect(m, VM_PROT_NONE);
663 			vm_page_free(m);
664 			cnt.v_dfree++;
665 			++pages_freed;
666 		} else if (m->dirty == 0) {
667 			vm_page_cache(m);
668 			++pages_freed;
669 		} else if (maxlaunder > 0) {
670 			int written;
671 			struct vnode *vp = NULL;
672 
673 			object = m->object;
674 			if (object->flags & OBJ_DEAD) {
675 				s = splvm();
676 				TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
677 				TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
678 				splx(s);
679 				continue;
680 			}
681 
682 			if (object->type == OBJT_VNODE) {
683 				vp = object->handle;
684 				if (VOP_ISLOCKED(vp) ||
685 				    vget(vp, LK_EXCLUSIVE, curproc)) {
686 					if ((m->queue == PQ_INACTIVE) &&
687 						(m->hold_count == 0) &&
688 						(m->busy == 0) &&
689 						(m->flags & PG_BUSY) == 0) {
690 						s = splvm();
691 						TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
692 						TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
693 						splx(s);
694 					}
695 					if (object->flags & OBJ_MIGHTBEDIRTY)
696 						++vnodes_skipped;
697 					continue;
698 				}
699 
700 				/*
701 				 * The page might have been moved to another queue
702 				 * during potential blocking in vget() above.
703 				 */
704 				if (m->queue != PQ_INACTIVE) {
705 					if (object->flags & OBJ_MIGHTBEDIRTY)
706 						++vnodes_skipped;
707 					vput(vp);
708 					continue;
709 				}
710 
711 				/*
712 				 * The page may have been busied during the blocking in
713 				 * vput();  We don't move the page back onto the end of
714 				 * the queue so that statistics are more correct if we don't.
715 				 */
716 				if (m->busy || (m->flags & PG_BUSY)) {
717 					vput(vp);
718 					continue;
719 				}
720 
721 				/*
722 				 * If the page has become held, then skip it
723 				 */
724 				if (m->hold_count) {
725 					s = splvm();
726 					TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
727 					TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
728 					splx(s);
729 					if (object->flags & OBJ_MIGHTBEDIRTY)
730 						++vnodes_skipped;
731 					vput(vp);
732 					continue;
733 				}
734 			}
735 
736 			/*
737 			 * If a page is dirty, then it is either being washed
738 			 * (but not yet cleaned) or it is still in the
739 			 * laundry.  If it is still in the laundry, then we
740 			 * start the cleaning operation.
741 			 */
742 			written = vm_pageout_clean(m, 0);
743 
744 			if (vp)
745 				vput(vp);
746 
747 			maxlaunder -= written;
748 		}
749 	}
750 
751 	/*
752 	 * Compute the page shortage.  If we are still very low on memory be
753 	 * sure that we will move a minimal amount of pages from active to
754 	 * inactive.
755 	 */
756 
757 	page_shortage = (cnt.v_inactive_target + cnt.v_cache_min) -
758 	    (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
759 	if (page_shortage <= 0) {
760 		if (pages_freed == 0) {
761 			page_shortage = cnt.v_free_min - cnt.v_free_count;
762 		} else {
763 			page_shortage = 1;
764 		}
765 	}
766 	if (addl_page_shortage) {
767 		if (page_shortage < 0)
768 			page_shortage = 0;
769 		page_shortage += addl_page_shortage;
770 	}
771 
772 	pcount = cnt.v_active_count;
773 	m = TAILQ_FIRST(&vm_page_queue_active);
774 	while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
775 		int refcount;
776 
777 		if (m->queue != PQ_ACTIVE) {
778 			break;
779 		}
780 
781 		next = TAILQ_NEXT(m, pageq);
782 		/*
783 		 * Don't deactivate pages that are busy.
784 		 */
785 		if ((m->busy != 0) ||
786 		    (m->flags & PG_BUSY) ||
787 		    (m->hold_count != 0)) {
788 			s = splvm();
789 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
790 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
791 			splx(s);
792 			m = next;
793 			continue;
794 		}
795 
796 		/*
797 		 * The count for pagedaemon pages is done after checking the
798 		 * page for eligbility...
799 		 */
800 		cnt.v_pdpages++;
801 
802 		refcount = 0;
803 		if (m->object->ref_count != 0) {
804 			if (m->flags & PG_REFERENCED) {
805 				refcount += 1;
806 			}
807 			refcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
808 			if (refcount) {
809 				m->act_count += ACT_ADVANCE + refcount;
810 				if (m->act_count > ACT_MAX)
811 					m->act_count = ACT_MAX;
812 			}
813 		}
814 
815 		m->flags &= ~PG_REFERENCED;
816 
817 		if (refcount && (m->object->ref_count != 0)) {
818 			s = splvm();
819 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
820 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
821 			splx(s);
822 		} else {
823 			m->act_count -= min(m->act_count, ACT_DECLINE);
824 			if (vm_pageout_algorithm_lru ||
825 				(m->object->ref_count == 0) || (m->act_count == 0)) {
826 				--page_shortage;
827 				if (m->object->ref_count == 0) {
828 					vm_page_protect(m, VM_PROT_NONE);
829 					if (m->dirty == 0)
830 						vm_page_cache(m);
831 					else
832 						vm_page_deactivate(m);
833 				} else {
834 					vm_page_deactivate(m);
835 				}
836 			} else {
837 				s = splvm();
838 				TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
839 				TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
840 				splx(s);
841 			}
842 		}
843 		m = next;
844 	}
845 
846 	s = splvm();
847 	/*
848 	 * We try to maintain some *really* free pages, this allows interrupt
849 	 * code to be guaranteed space.
850 	 */
851 	while (cnt.v_free_count < cnt.v_free_reserved) {
852 		static int cache_rover = 0;
853 		m = vm_page_list_find(PQ_CACHE, cache_rover);
854 		if (!m)
855 			break;
856 		cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK;
857 		vm_page_free(m);
858 		cnt.v_dfree++;
859 	}
860 	splx(s);
861 
862 	/*
863 	 * If we didn't get enough free pages, and we have skipped a vnode
864 	 * in a writeable object, wakeup the sync daemon.  And kick swapout
865 	 * if we did not get enough free pages.
866 	 */
867 	if ((cnt.v_cache_count + cnt.v_free_count) <
868 		(cnt.v_free_target + cnt.v_cache_min) ) {
869 		if (vnodes_skipped &&
870 		    (cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_min) {
871 			if (!vfs_update_wakeup) {
872 				vfs_update_wakeup = 1;
873 				wakeup(&vfs_update_wakeup);
874 			}
875 		}
876 #if !defined(NO_SWAPPING)
877 		if (vm_swapping_enabled &&
878 			(cnt.v_free_count + cnt.v_cache_count < cnt.v_free_target)) {
879 			vm_req_vmdaemon();
880 			vm_pageout_req_swapout = 1;
881 		}
882 #endif
883 	}
884 
885 
886 	/*
887 	 * make sure that we have swap space -- if we are low on memory and
888 	 * swap -- then kill the biggest process.
889 	 */
890 	if ((vm_swap_size == 0 || swap_pager_full) &&
891 	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) {
892 		bigproc = NULL;
893 		bigsize = 0;
894 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
895 			/*
896 			 * if this is a system process, skip it
897 			 */
898 			if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) ||
899 			    ((p->p_pid < 48) && (vm_swap_size != 0))) {
900 				continue;
901 			}
902 			/*
903 			 * if the process is in a non-running type state,
904 			 * don't touch it.
905 			 */
906 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
907 				continue;
908 			}
909 			/*
910 			 * get the process size
911 			 */
912 			size = p->p_vmspace->vm_pmap.pm_stats.resident_count;
913 			/*
914 			 * if the this process is bigger than the biggest one
915 			 * remember it.
916 			 */
917 			if (size > bigsize) {
918 				bigproc = p;
919 				bigsize = size;
920 			}
921 		}
922 		if (bigproc != NULL) {
923 			killproc(bigproc, "out of swap space");
924 			bigproc->p_estcpu = 0;
925 			bigproc->p_nice = PRIO_MIN;
926 			resetpriority(bigproc);
927 			wakeup(&cnt.v_free_count);
928 		}
929 	}
930 	return force_wakeup;
931 }
932 
933 /*
934  * This routine tries to maintain the pseudo LRU active queue,
935  * so that during long periods of time where there is no paging,
936  * that some statistic accumlation still occurs.  This code
937  * helps the situation where paging just starts to occur.
938  */
939 static void
940 vm_pageout_page_stats()
941 {
942 	int s;
943 	vm_page_t m,next;
944 	int pcount,tpcount;		/* Number of pages to check */
945 	static int fullintervalcount = 0;
946 
947 	pcount = cnt.v_active_count;
948 	fullintervalcount += vm_pageout_stats_interval;
949 	if (fullintervalcount < vm_pageout_full_stats_interval) {
950 		tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count;
951 		if (pcount > tpcount)
952 			pcount = tpcount;
953 	}
954 
955 	m = TAILQ_FIRST(&vm_page_queue_active);
956 	while ((m != NULL) && (pcount-- > 0)) {
957 		int refcount;
958 
959 		if (m->queue != PQ_ACTIVE) {
960 			break;
961 		}
962 
963 		next = TAILQ_NEXT(m, pageq);
964 		/*
965 		 * Don't deactivate pages that are busy.
966 		 */
967 		if ((m->busy != 0) ||
968 		    (m->flags & PG_BUSY) ||
969 		    (m->hold_count != 0)) {
970 			s = splvm();
971 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
972 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
973 			splx(s);
974 			m = next;
975 			continue;
976 		}
977 
978 		refcount = 0;
979 		if (m->flags & PG_REFERENCED) {
980 			m->flags &= ~PG_REFERENCED;
981 			refcount += 1;
982 		}
983 
984 		refcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
985 		if (refcount) {
986 			m->act_count += ACT_ADVANCE + refcount;
987 			if (m->act_count > ACT_MAX)
988 				m->act_count = ACT_MAX;
989 			s = splvm();
990 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
991 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
992 			splx(s);
993 		} else {
994 			if (m->act_count == 0) {
995 				vm_page_protect(m, VM_PROT_NONE);
996 				vm_page_deactivate(m);
997 			} else {
998 				m->act_count -= min(m->act_count, ACT_DECLINE);
999 				s = splvm();
1000 				TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1001 				TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1002 				splx(s);
1003 			}
1004 		}
1005 
1006 		m = next;
1007 	}
1008 }
1009 
1010 
1011 static int
1012 vm_pageout_free_page_calc(count)
1013 vm_size_t count;
1014 {
1015 	if (count < cnt.v_page_count)
1016 		 return 0;
1017 	/*
1018 	 * free_reserved needs to include enough for the largest swap pager
1019 	 * structures plus enough for any pv_entry structs when paging.
1020 	 */
1021 	if (cnt.v_page_count > 1024)
1022 		cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
1023 	else
1024 		cnt.v_free_min = 4;
1025 	cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
1026 		cnt.v_interrupt_free_min;
1027 	cnt.v_free_reserved = vm_pageout_page_count +
1028 		cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE;
1029 	cnt.v_free_min += cnt.v_free_reserved;
1030 	return 1;
1031 }
1032 
1033 
1034 /*
1035  *	vm_pageout is the high level pageout daemon.
1036  */
1037 static void
1038 vm_pageout()
1039 {
1040 	/*
1041 	 * Initialize some paging parameters.
1042 	 */
1043 
1044 	cnt.v_interrupt_free_min = 2;
1045 	if (cnt.v_page_count < 2000)
1046 		vm_pageout_page_count = 8;
1047 
1048 	vm_pageout_free_page_calc(cnt.v_page_count);
1049 	/*
1050 	 * free_reserved needs to include enough for the largest swap pager
1051 	 * structures plus enough for any pv_entry structs when paging.
1052 	 */
1053 	cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved;
1054 
1055 	if (cnt.v_free_count > 1024) {
1056 		cnt.v_cache_max = (cnt.v_free_count - 1024) / 2;
1057 		cnt.v_cache_min = (cnt.v_free_count - 1024) / 8;
1058 		cnt.v_inactive_target = 2*cnt.v_cache_min + 192;
1059 	} else {
1060 		cnt.v_cache_min = 0;
1061 		cnt.v_cache_max = 0;
1062 		cnt.v_inactive_target = cnt.v_free_count / 4;
1063 	}
1064 
1065 	/* XXX does not really belong here */
1066 	if (vm_page_max_wired == 0)
1067 		vm_page_max_wired = cnt.v_free_count / 3;
1068 
1069 	if (vm_pageout_stats_max == 0)
1070 		vm_pageout_stats_max = cnt.v_free_target;
1071 
1072 	/*
1073 	 * Set interval in seconds for stats scan.
1074 	 */
1075 	if (vm_pageout_stats_interval == 0)
1076 		vm_pageout_stats_interval = 4;
1077 	if (vm_pageout_full_stats_interval == 0)
1078 		vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1079 
1080 
1081 	/*
1082 	 * Set maximum free per pass
1083 	 */
1084 	if (vm_pageout_stats_free_max == 0)
1085 		vm_pageout_stats_free_max = 25;
1086 
1087 
1088 	swap_pager_swap_init();
1089 	/*
1090 	 * The pageout daemon is never done, so loop forever.
1091 	 */
1092 	while (TRUE) {
1093 		int inactive_target;
1094 		int error;
1095 		int s = splvm();
1096 		if (!vm_pages_needed ||
1097 			((cnt.v_free_count + cnt.v_cache_count) > cnt.v_free_min)) {
1098 			vm_pages_needed = 0;
1099 			error = tsleep(&vm_pages_needed,
1100 				PVM, "psleep", vm_pageout_stats_interval * hz);
1101 			if (error && !vm_pages_needed) {
1102 				splx(s);
1103 				vm_pageout_page_stats();
1104 				continue;
1105 			}
1106 		} else if (vm_pages_needed) {
1107 			tsleep(&vm_pages_needed, PVM, "psleep", hz/10);
1108 		}
1109 		inactive_target =
1110 			(cnt.v_page_count - cnt.v_wire_count) / 4;
1111 		if (inactive_target < 2*cnt.v_free_min)
1112 			inactive_target = 2*cnt.v_free_min;
1113 		cnt.v_inactive_target = inactive_target;
1114 		if (vm_pages_needed)
1115 			cnt.v_pdwakeups++;
1116 		vm_pages_needed = 0;
1117 		splx(s);
1118 		vm_pager_sync();
1119 		vm_pageout_scan();
1120 		vm_pager_sync();
1121 		wakeup(&cnt.v_free_count);
1122 	}
1123 }
1124 
1125 void
1126 pagedaemon_wakeup()
1127 {
1128 	if (!vm_pages_needed && curproc != pageproc) {
1129 		vm_pages_needed++;
1130 		wakeup(&vm_pages_needed);
1131 	}
1132 }
1133 
1134 #if !defined(NO_SWAPPING)
1135 static void
1136 vm_req_vmdaemon()
1137 {
1138 	static int lastrun = 0;
1139 
1140 	if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
1141 		wakeup(&vm_daemon_needed);
1142 		lastrun = ticks;
1143 	}
1144 }
1145 
1146 static void
1147 vm_daemon()
1148 {
1149 	vm_object_t object;
1150 	struct proc *p;
1151 
1152 	while (TRUE) {
1153 		tsleep(&vm_daemon_needed, PUSER, "psleep", 0);
1154 		if (vm_pageout_req_swapout) {
1155 			swapout_procs();
1156 			vm_pageout_req_swapout = 0;
1157 		}
1158 		/*
1159 		 * scan the processes for exceeding their rlimits or if
1160 		 * process is swapped out -- deactivate pages
1161 		 */
1162 
1163 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
1164 			quad_t limit;
1165 			vm_offset_t size;
1166 
1167 			/*
1168 			 * if this is a system process or if we have already
1169 			 * looked at this process, skip it.
1170 			 */
1171 			if (p->p_flag & (P_SYSTEM | P_WEXIT)) {
1172 				continue;
1173 			}
1174 			/*
1175 			 * if the process is in a non-running type state,
1176 			 * don't touch it.
1177 			 */
1178 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
1179 				continue;
1180 			}
1181 			/*
1182 			 * get a limit
1183 			 */
1184 			limit = qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
1185 			    p->p_rlimit[RLIMIT_RSS].rlim_max);
1186 
1187 			/*
1188 			 * let processes that are swapped out really be
1189 			 * swapped out set the limit to nothing (will force a
1190 			 * swap-out.)
1191 			 */
1192 			if ((p->p_flag & P_INMEM) == 0)
1193 				limit = 0;	/* XXX */
1194 
1195 			size = p->p_vmspace->vm_pmap.pm_stats.resident_count * PAGE_SIZE;
1196 			if (limit >= 0 && size >= limit) {
1197 				vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map,
1198 				    (vm_pindex_t)(limit >> PAGE_SHIFT) );
1199 			}
1200 		}
1201 
1202 		/*
1203 		 * we remove cached objects that have no RSS...
1204 		 */
1205 restart:
1206 		object = TAILQ_FIRST(&vm_object_cached_list);
1207 		while (object) {
1208 			/*
1209 			 * if there are no resident pages -- get rid of the object
1210 			 */
1211 			if (object->resident_page_count == 0) {
1212 				vm_object_reference(object);
1213 				pager_cache(object, FALSE);
1214 				goto restart;
1215 			}
1216 			object = TAILQ_NEXT(object, cached_list);
1217 		}
1218 	}
1219 }
1220 #endif
1221