xref: /freebsd/sys/vm/vm_pageout.c (revision e627b39baccd1ec9129690167cf5e6d860509655)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
41  *
42  *
43  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
44  * All rights reserved.
45  *
46  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
47  *
48  * Permission to use, copy, modify and distribute this software and
49  * its documentation is hereby granted, provided that both the copyright
50  * notice and this permission notice appear in all copies of the
51  * software, derivative works or modified versions, and any portions
52  * thereof, and that both notices appear in supporting documentation.
53  *
54  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57  *
58  * Carnegie Mellon requests users of this software to return to
59  *
60  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61  *  School of Computer Science
62  *  Carnegie Mellon University
63  *  Pittsburgh PA 15213-3890
64  *
65  * any improvements or extensions that they make and grant Carnegie the
66  * rights to redistribute these changes.
67  *
68  * $Id: vm_pageout.c,v 1.85 1996/09/08 20:44:48 dyson Exp $
69  */
70 
71 /*
72  *	The proverbial page-out daemon.
73  */
74 
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/kernel.h>
78 #include <sys/proc.h>
79 #include <sys/resourcevar.h>
80 #include <sys/malloc.h>
81 #include <sys/kernel.h>
82 #include <sys/signalvar.h>
83 #include <sys/vnode.h>
84 #include <sys/vmmeter.h>
85 #include <sys/sysctl.h>
86 
87 #include <vm/vm.h>
88 #include <vm/vm_param.h>
89 #include <vm/vm_prot.h>
90 #include <vm/lock.h>
91 #include <vm/vm_object.h>
92 #include <vm/vm_page.h>
93 #include <vm/vm_map.h>
94 #include <vm/vm_pageout.h>
95 #include <vm/vm_kern.h>
96 #include <vm/vm_pager.h>
97 #include <vm/swap_pager.h>
98 #include <vm/vm_extern.h>
99 
100 /*
101  * System initialization
102  */
103 
104 /* the kernel process "vm_pageout"*/
105 static void vm_pageout __P((void));
106 static int vm_pageout_clean __P((vm_page_t, int));
107 static int vm_pageout_scan __P((void));
108 static int vm_pageout_free_page_calc __P((vm_size_t count));
109 struct proc *pageproc;
110 
111 static struct kproc_desc page_kp = {
112 	"pagedaemon",
113 	vm_pageout,
114 	&pageproc
115 };
116 SYSINIT_KT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp)
117 
118 #if !defined(NO_SWAPPING)
119 /* the kernel process "vm_daemon"*/
120 static void vm_daemon __P((void));
121 static struct	proc *vmproc;
122 
123 static struct kproc_desc vm_kp = {
124 	"vmdaemon",
125 	vm_daemon,
126 	&vmproc
127 };
128 SYSINIT_KT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
129 #endif
130 
131 
132 int vm_pages_needed;		/* Event on which pageout daemon sleeps */
133 
134 int vm_pageout_pages_needed;	/* flag saying that the pageout daemon needs pages */
135 
136 extern int npendingio;
137 #if !defined(NO_SWAPPING)
138 static int vm_pageout_req_swapout;	/* XXX */
139 static int vm_daemon_needed;
140 #endif
141 extern int nswiodone;
142 extern int vm_swap_size;
143 extern int vfs_update_wakeup;
144 int vm_pageout_algorithm_lru=0;
145 #if defined(NO_SWAPPING)
146 int vm_swapping_enabled=0;
147 #else
148 int vm_swapping_enabled=1;
149 #endif
150 
151 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
152 	CTLFLAG_RW, &vm_pageout_algorithm_lru, 0, "");
153 
154 #if defined(NO_SWAPPING)
155 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swapping_enabled,
156 	CTLFLAG_RD, &vm_swapping_enabled, 0, "");
157 #else
158 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swapping_enabled,
159 	CTLFLAG_RW, &vm_swapping_enabled, 0, "");
160 #endif
161 
162 #define MAXLAUNDER (cnt.v_page_count > 1800 ? 32 : 16)
163 
164 #define VM_PAGEOUT_PAGE_COUNT 16
165 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
166 
167 int vm_page_max_wired;		/* XXX max # of wired pages system-wide */
168 
169 #if !defined(NO_SWAPPING)
170 typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int));
171 static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t));
172 static freeer_fcn_t vm_pageout_object_deactivate_pages;
173 static void vm_req_vmdaemon __P((void));
174 #endif
175 
176 /*
177  * vm_pageout_clean:
178  *
179  * Clean the page and remove it from the laundry.
180  *
181  * We set the busy bit to cause potential page faults on this page to
182  * block.
183  *
184  * And we set pageout-in-progress to keep the object from disappearing
185  * during pageout.  This guarantees that the page won't move from the
186  * inactive queue.  (However, any other page on the inactive queue may
187  * move!)
188  */
189 static int
190 vm_pageout_clean(m, sync)
191 	vm_page_t m;
192 	int sync;
193 {
194 	register vm_object_t object;
195 	vm_page_t mc[2*vm_pageout_page_count];
196 	int pageout_count;
197 	int i, forward_okay, backward_okay, page_base;
198 	vm_pindex_t pindex = m->pindex;
199 
200 	object = m->object;
201 
202 	/*
203 	 * If not OBJT_SWAP, additional memory may be needed to do the pageout.
204 	 * Try to avoid the deadlock.
205 	 */
206 	if ((sync != VM_PAGEOUT_FORCE) &&
207 	    (object->type == OBJT_DEFAULT) &&
208 	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min))
209 		return 0;
210 
211 	/*
212 	 * Don't mess with the page if it's busy.
213 	 */
214 	if ((!sync && m->hold_count != 0) ||
215 	    ((m->busy != 0) || (m->flags & PG_BUSY)))
216 		return 0;
217 
218 	/*
219 	 * Try collapsing before it's too late.
220 	 */
221 	if (!sync && object->backing_object) {
222 		vm_object_collapse(object);
223 	}
224 	mc[vm_pageout_page_count] = m;
225 	pageout_count = 1;
226 	page_base = vm_pageout_page_count;
227 	forward_okay = TRUE;
228 	if (pindex != 0)
229 		backward_okay = TRUE;
230 	else
231 		backward_okay = FALSE;
232 	/*
233 	 * Scan object for clusterable pages.
234 	 *
235 	 * We can cluster ONLY if: ->> the page is NOT
236 	 * clean, wired, busy, held, or mapped into a
237 	 * buffer, and one of the following:
238 	 * 1) The page is inactive, or a seldom used
239 	 *    active page.
240 	 * -or-
241 	 * 2) we force the issue.
242 	 */
243 	for (i = 1; (i < vm_pageout_page_count) && (forward_okay || backward_okay); i++) {
244 		vm_page_t p;
245 
246 		/*
247 		 * See if forward page is clusterable.
248 		 */
249 		if (forward_okay) {
250 			/*
251 			 * Stop forward scan at end of object.
252 			 */
253 			if ((pindex + i) > object->size) {
254 				forward_okay = FALSE;
255 				goto do_backward;
256 			}
257 			p = vm_page_lookup(object, pindex + i);
258 			if (p) {
259 				if (((p->queue - p->pc) == PQ_CACHE) ||
260 					(p->flags & PG_BUSY) || p->busy) {
261 					forward_okay = FALSE;
262 					goto do_backward;
263 				}
264 				vm_page_test_dirty(p);
265 				if ((p->dirty & p->valid) != 0 &&
266 				    ((p->queue == PQ_INACTIVE) ||
267 				     (sync == VM_PAGEOUT_FORCE)) &&
268 				    (p->wire_count == 0) &&
269 				    (p->hold_count == 0)) {
270 					mc[vm_pageout_page_count + i] = p;
271 					pageout_count++;
272 					if (pageout_count == vm_pageout_page_count)
273 						break;
274 				} else {
275 					forward_okay = FALSE;
276 				}
277 			} else {
278 				forward_okay = FALSE;
279 			}
280 		}
281 do_backward:
282 		/*
283 		 * See if backward page is clusterable.
284 		 */
285 		if (backward_okay) {
286 			/*
287 			 * Stop backward scan at beginning of object.
288 			 */
289 			if ((pindex - i) == 0) {
290 				backward_okay = FALSE;
291 			}
292 			p = vm_page_lookup(object, pindex - i);
293 			if (p) {
294 				if (((p->queue - p->pc) == PQ_CACHE) ||
295 					(p->flags & PG_BUSY) || p->busy) {
296 					backward_okay = FALSE;
297 					continue;
298 				}
299 				vm_page_test_dirty(p);
300 				if ((p->dirty & p->valid) != 0 &&
301 				    ((p->queue == PQ_INACTIVE) ||
302 				     (sync == VM_PAGEOUT_FORCE)) &&
303 				    (p->wire_count == 0) &&
304 				    (p->hold_count == 0)) {
305 					mc[vm_pageout_page_count - i] = p;
306 					pageout_count++;
307 					page_base--;
308 					if (pageout_count == vm_pageout_page_count)
309 						break;
310 				} else {
311 					backward_okay = FALSE;
312 				}
313 			} else {
314 				backward_okay = FALSE;
315 			}
316 		}
317 	}
318 
319 	/*
320 	 * we allow reads during pageouts...
321 	 */
322 	for (i = page_base; i < (page_base + pageout_count); i++) {
323 		mc[i]->flags |= PG_BUSY;
324 		vm_page_protect(mc[i], VM_PROT_READ);
325 	}
326 
327 	return vm_pageout_flush(&mc[page_base], pageout_count, sync);
328 }
329 
330 int
331 vm_pageout_flush(mc, count, sync)
332 	vm_page_t *mc;
333 	int count;
334 	int sync;
335 {
336 	register vm_object_t object;
337 	int pageout_status[count];
338 	int anyok = 0;
339 	int i;
340 
341 	object = mc[0]->object;
342 	object->paging_in_progress += count;
343 
344 	vm_pager_put_pages(object, mc, count,
345 	    ((sync || (object == kernel_object)) ? TRUE : FALSE),
346 	    pageout_status);
347 
348 	for (i = 0; i < count; i++) {
349 		vm_page_t mt = mc[i];
350 
351 		switch (pageout_status[i]) {
352 		case VM_PAGER_OK:
353 			++anyok;
354 			break;
355 		case VM_PAGER_PEND:
356 			++anyok;
357 			break;
358 		case VM_PAGER_BAD:
359 			/*
360 			 * Page outside of range of object. Right now we
361 			 * essentially lose the changes by pretending it
362 			 * worked.
363 			 */
364 			pmap_clear_modify(VM_PAGE_TO_PHYS(mt));
365 			mt->dirty = 0;
366 			break;
367 		case VM_PAGER_ERROR:
368 		case VM_PAGER_FAIL:
369 			/*
370 			 * If page couldn't be paged out, then reactivate the
371 			 * page so it doesn't clog the inactive list.  (We
372 			 * will try paging out it again later).
373 			 */
374 			if (mt->queue == PQ_INACTIVE)
375 				vm_page_activate(mt);
376 			break;
377 		case VM_PAGER_AGAIN:
378 			break;
379 		}
380 
381 
382 		/*
383 		 * If the operation is still going, leave the page busy to
384 		 * block all other accesses. Also, leave the paging in
385 		 * progress indicator set so that we don't attempt an object
386 		 * collapse.
387 		 */
388 		if (pageout_status[i] != VM_PAGER_PEND) {
389 			vm_object_pip_wakeup(object);
390 			PAGE_WAKEUP(mt);
391 		}
392 	}
393 	return anyok;
394 }
395 
396 #if !defined(NO_SWAPPING)
397 /*
398  *	vm_pageout_object_deactivate_pages
399  *
400  *	deactivate enough pages to satisfy the inactive target
401  *	requirements or if vm_page_proc_limit is set, then
402  *	deactivate all of the pages in the object and its
403  *	backing_objects.
404  *
405  *	The object and map must be locked.
406  */
407 static void
408 vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
409 	vm_map_t map;
410 	vm_object_t object;
411 	vm_pindex_t desired;
412 	int map_remove_only;
413 {
414 	register vm_page_t p, next;
415 	int rcount;
416 	int remove_mode;
417 	int s;
418 
419 	if (object->type == OBJT_DEVICE)
420 		return;
421 
422 	while (object) {
423 		if (vm_map_pmap(map)->pm_stats.resident_count <= desired)
424 			return;
425 		if (object->paging_in_progress)
426 			return;
427 
428 		remove_mode = map_remove_only;
429 		if (object->shadow_count > 1)
430 			remove_mode = 1;
431 	/*
432 	 * scan the objects entire memory queue
433 	 */
434 		rcount = object->resident_page_count;
435 		p = TAILQ_FIRST(&object->memq);
436 		while (p && (rcount-- > 0)) {
437 			int refcount;
438 			if (vm_map_pmap(map)->pm_stats.resident_count <= desired)
439 				return;
440 			next = TAILQ_NEXT(p, listq);
441 			cnt.v_pdpages++;
442 			if (p->wire_count != 0 ||
443 			    p->hold_count != 0 ||
444 			    p->busy != 0 ||
445 			    (p->flags & PG_BUSY) ||
446 			    !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) {
447 				p = next;
448 				continue;
449 			}
450 
451 			refcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p));
452 			if (refcount) {
453 				p->flags |= PG_REFERENCED;
454 			} else if (p->flags & PG_REFERENCED) {
455 				refcount = 1;
456 			}
457 
458 			if ((p->queue != PQ_ACTIVE) &&
459 				(p->flags & PG_REFERENCED)) {
460 				vm_page_activate(p);
461 				p->act_count += refcount;
462 				p->flags &= ~PG_REFERENCED;
463 			} else if (p->queue == PQ_ACTIVE) {
464 				if ((p->flags & PG_REFERENCED) == 0) {
465 					p->act_count -= min(p->act_count, ACT_DECLINE);
466 					if (!remove_mode && (vm_pageout_algorithm_lru || (p->act_count == 0))) {
467 						vm_page_protect(p, VM_PROT_NONE);
468 						vm_page_deactivate(p);
469 					} else {
470 						s = splvm();
471 						TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
472 						TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
473 						splx(s);
474 					}
475 				} else {
476 					p->flags &= ~PG_REFERENCED;
477 					if (p->act_count < (ACT_MAX - ACT_ADVANCE))
478 						p->act_count += ACT_ADVANCE;
479 					s = splvm();
480 					TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
481 					TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
482 					splx(s);
483 				}
484 			} else if (p->queue == PQ_INACTIVE) {
485 				vm_page_protect(p, VM_PROT_NONE);
486 			}
487 			p = next;
488 		}
489 		object = object->backing_object;
490 	}
491 	return;
492 }
493 
494 /*
495  * deactivate some number of pages in a map, try to do it fairly, but
496  * that is really hard to do.
497  */
498 static void
499 vm_pageout_map_deactivate_pages(map, desired)
500 	vm_map_t map;
501 	vm_pindex_t desired;
502 {
503 	vm_map_entry_t tmpe;
504 	vm_object_t obj, bigobj;
505 
506 	vm_map_reference(map);
507 	if (!lock_try_write(&map->lock)) {
508 		vm_map_deallocate(map);
509 		return;
510 	}
511 
512 	bigobj = NULL;
513 
514 	/*
515 	 * first, search out the biggest object, and try to free pages from
516 	 * that.
517 	 */
518 	tmpe = map->header.next;
519 	while (tmpe != &map->header) {
520 		if ((tmpe->is_sub_map == 0) && (tmpe->is_a_map == 0)) {
521 			obj = tmpe->object.vm_object;
522 			if ((obj != NULL) && (obj->shadow_count <= 1) &&
523 				((bigobj == NULL) ||
524 				 (bigobj->resident_page_count < obj->resident_page_count))) {
525 				bigobj = obj;
526 			}
527 		}
528 		tmpe = tmpe->next;
529 	}
530 
531 	if (bigobj)
532 		vm_pageout_object_deactivate_pages(map, bigobj, desired, 0);
533 
534 	/*
535 	 * Next, hunt around for other pages to deactivate.  We actually
536 	 * do this search sort of wrong -- .text first is not the best idea.
537 	 */
538 	tmpe = map->header.next;
539 	while (tmpe != &map->header) {
540 		if (vm_map_pmap(map)->pm_stats.resident_count <= desired)
541 			break;
542 		if ((tmpe->is_sub_map == 0) && (tmpe->is_a_map == 0)) {
543 			obj = tmpe->object.vm_object;
544 			if (obj)
545 				vm_pageout_object_deactivate_pages(map, obj, desired, 0);
546 		}
547 		tmpe = tmpe->next;
548 	};
549 
550 	/*
551 	 * Remove all mappings if a process is swapped out, this will free page
552 	 * table pages.
553 	 */
554 	if (desired == 0)
555 		pmap_remove(vm_map_pmap(map),
556 			VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
557 	vm_map_unlock(map);
558 	vm_map_deallocate(map);
559 	return;
560 }
561 #endif
562 
563 /*
564  *	vm_pageout_scan does the dirty work for the pageout daemon.
565  */
566 static int
567 vm_pageout_scan()
568 {
569 	vm_page_t m, next;
570 	int page_shortage, addl_page_shortage, maxscan, maxlaunder, pcount;
571 	int pages_freed;
572 	struct proc *p, *bigproc;
573 	vm_offset_t size, bigsize;
574 	vm_object_t object;
575 	int force_wakeup = 0;
576 	int vnodes_skipped = 0;
577 	int s;
578 
579 	/*
580 	 * Start scanning the inactive queue for pages we can free. We keep
581 	 * scanning until we have enough free pages or we have scanned through
582 	 * the entire queue.  If we encounter dirty pages, we start cleaning
583 	 * them.
584 	 */
585 
586 	pages_freed = 0;
587 	addl_page_shortage = 0;
588 
589 	maxlaunder = (cnt.v_inactive_target > MAXLAUNDER) ?
590 	    MAXLAUNDER : cnt.v_inactive_target;
591 rescan0:
592 	maxscan = cnt.v_inactive_count;
593 	for( m = TAILQ_FIRST(&vm_page_queue_inactive);
594 
595 		(m != NULL) && (maxscan-- > 0) &&
596 			((cnt.v_cache_count + cnt.v_free_count) <
597 			(cnt.v_cache_min + cnt.v_free_target));
598 
599 		m = next) {
600 
601 		cnt.v_pdpages++;
602 
603 		if (m->queue != PQ_INACTIVE) {
604 			goto rescan0;
605 		}
606 
607 		next = TAILQ_NEXT(m, pageq);
608 
609 		if (m->hold_count) {
610 			s = splvm();
611 			TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
612 			TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
613 			splx(s);
614 			addl_page_shortage++;
615 			continue;
616 		}
617 		/*
618 		 * Dont mess with busy pages, keep in the front of the
619 		 * queue, most likely are being paged out.
620 		 */
621 		if (m->busy || (m->flags & PG_BUSY)) {
622 			addl_page_shortage++;
623 			continue;
624 		}
625 
626 		if (m->object->ref_count == 0) {
627 			m->flags &= ~PG_REFERENCED;
628 			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
629 		} else if (((m->flags & PG_REFERENCED) == 0) &&
630 			pmap_ts_referenced(VM_PAGE_TO_PHYS(m))) {
631 			vm_page_activate(m);
632 			continue;
633 		}
634 
635 		if ((m->flags & PG_REFERENCED) != 0) {
636 			m->flags &= ~PG_REFERENCED;
637 			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
638 			vm_page_activate(m);
639 			continue;
640 		}
641 
642 		if (m->dirty == 0) {
643 			vm_page_test_dirty(m);
644 		} else if (m->dirty != 0) {
645 			m->dirty = VM_PAGE_BITS_ALL;
646 		}
647 
648 		if (m->valid == 0) {
649 			vm_page_protect(m, VM_PROT_NONE);
650 			vm_page_free(m);
651 			cnt.v_dfree++;
652 			++pages_freed;
653 		} else if (m->dirty == 0) {
654 			vm_page_cache(m);
655 			++pages_freed;
656 		} else if (maxlaunder > 0) {
657 			int written;
658 			struct vnode *vp = NULL;
659 
660 			object = m->object;
661 			if (object->flags & OBJ_DEAD) {
662 				s = splvm();
663 				TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
664 				TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
665 				splx(s);
666 				continue;
667 			}
668 
669 			if (object->type == OBJT_VNODE) {
670 				vp = object->handle;
671 				if (VOP_ISLOCKED(vp) || vget(vp, 1)) {
672 					if ((m->queue == PQ_INACTIVE) &&
673 						(m->hold_count == 0) &&
674 						(m->busy == 0) &&
675 						(m->flags & PG_BUSY) == 0) {
676 						s = splvm();
677 						TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
678 						TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
679 						splx(s);
680 					}
681 					if (object->flags & OBJ_MIGHTBEDIRTY)
682 						++vnodes_skipped;
683 					continue;
684 				}
685 
686 				/*
687 				 * The page might have been moved to another queue
688 				 * during potential blocking in vget() above.
689 				 */
690 				if (m->queue != PQ_INACTIVE) {
691 					if (object->flags & OBJ_MIGHTBEDIRTY)
692 						++vnodes_skipped;
693 					vput(vp);
694 					continue;
695 				}
696 
697 				/*
698 				 * The page may have been busied during the blocking in
699 				 * vput();  We don't move the page back onto the end of
700 				 * the queue so that statistics are more correct if we don't.
701 				 */
702 				if (m->busy || (m->flags & PG_BUSY)) {
703 					vput(vp);
704 					continue;
705 				}
706 
707 				/*
708 				 * If the page has become held, then skip it
709 				 */
710 				if (m->hold_count) {
711 					s = splvm();
712 					TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
713 					TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
714 					splx(s);
715 					if (object->flags & OBJ_MIGHTBEDIRTY)
716 						++vnodes_skipped;
717 					vput(vp);
718 					continue;
719 				}
720 			}
721 
722 			/*
723 			 * If a page is dirty, then it is either being washed
724 			 * (but not yet cleaned) or it is still in the
725 			 * laundry.  If it is still in the laundry, then we
726 			 * start the cleaning operation.
727 			 */
728 			written = vm_pageout_clean(m, 0);
729 
730 			if (vp)
731 				vput(vp);
732 
733 			maxlaunder -= written;
734 		}
735 	}
736 
737 	/*
738 	 * Compute the page shortage.  If we are still very low on memory be
739 	 * sure that we will move a minimal amount of pages from active to
740 	 * inactive.
741 	 */
742 
743 	page_shortage = (cnt.v_inactive_target + cnt.v_cache_min) -
744 	    (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
745 	if (page_shortage <= 0) {
746 		if (pages_freed == 0) {
747 			page_shortage = cnt.v_free_min - cnt.v_free_count;
748 		} else {
749 			page_shortage = 1;
750 		}
751 	}
752 	if (addl_page_shortage) {
753 		if (page_shortage < 0)
754 			page_shortage = 0;
755 		page_shortage += addl_page_shortage;
756 	}
757 
758 	pcount = cnt.v_active_count;
759 	m = TAILQ_FIRST(&vm_page_queue_active);
760 	while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
761 		int refcount;
762 
763 		if (m->queue != PQ_ACTIVE) {
764 			break;
765 		}
766 
767 		next = TAILQ_NEXT(m, pageq);
768 		/*
769 		 * Don't deactivate pages that are busy.
770 		 */
771 		if ((m->busy != 0) ||
772 		    (m->flags & PG_BUSY) ||
773 		    (m->hold_count != 0)) {
774 			s = splvm();
775 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
776 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
777 			splx(s);
778 			m = next;
779 			continue;
780 		}
781 
782 		/*
783 		 * The count for pagedaemon pages is done after checking the
784 		 * page for eligbility...
785 		 */
786 		cnt.v_pdpages++;
787 
788 		refcount = 0;
789 		if (m->object->ref_count != 0) {
790 			if (m->flags & PG_REFERENCED) {
791 				refcount += 1;
792 			}
793 			refcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
794 			if (refcount) {
795 				m->act_count += ACT_ADVANCE + refcount;
796 				if (m->act_count > ACT_MAX)
797 					m->act_count = ACT_MAX;
798 			}
799 		}
800 
801 		m->flags &= ~PG_REFERENCED;
802 
803 		if (refcount && (m->object->ref_count != 0)) {
804 			s = splvm();
805 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
806 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
807 			splx(s);
808 		} else {
809 			m->act_count -= min(m->act_count, ACT_DECLINE);
810 			if (vm_pageout_algorithm_lru ||
811 				(m->object->ref_count == 0) || (m->act_count == 0)) {
812 				--page_shortage;
813 				vm_page_protect(m, VM_PROT_NONE);
814 				if ((m->dirty == 0) &&
815 					(m->object->ref_count == 0)) {
816 					vm_page_cache(m);
817 				} else {
818 					vm_page_deactivate(m);
819 				}
820 			} else {
821 				s = splvm();
822 				TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
823 				TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
824 				splx(s);
825 			}
826 		}
827 		m = next;
828 	}
829 
830 	s = splvm();
831 	/*
832 	 * We try to maintain some *really* free pages, this allows interrupt
833 	 * code to be guaranteed space.
834 	 */
835 	while (cnt.v_free_count < cnt.v_free_reserved) {
836 		static int cache_rover = 0;
837 		m = vm_page_list_find(PQ_CACHE, cache_rover);
838 		if (!m)
839 			break;
840 		cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK;
841 		vm_page_free(m);
842 		cnt.v_dfree++;
843 	}
844 	splx(s);
845 
846 	/*
847 	 * If we didn't get enough free pages, and we have skipped a vnode
848 	 * in a writeable object, wakeup the sync daemon.  And kick swapout
849 	 * if we did not get enough free pages.
850 	 */
851 	if ((cnt.v_cache_count + cnt.v_free_count) <
852 		(cnt.v_free_target + cnt.v_cache_min) ) {
853 		if (vnodes_skipped &&
854 		    (cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_min) {
855 			if (!vfs_update_wakeup) {
856 				vfs_update_wakeup = 1;
857 				wakeup(&vfs_update_wakeup);
858 			}
859 		}
860 #if !defined(NO_SWAPPING)
861 		if (vm_swapping_enabled &&
862 			(cnt.v_free_count + cnt.v_cache_count < cnt.v_free_target)) {
863 			vm_req_vmdaemon();
864 			vm_pageout_req_swapout = 1;
865 		}
866 #endif
867 	}
868 
869 
870 	/*
871 	 * make sure that we have swap space -- if we are low on memory and
872 	 * swap -- then kill the biggest process.
873 	 */
874 	if ((vm_swap_size == 0 || swap_pager_full) &&
875 	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) {
876 		bigproc = NULL;
877 		bigsize = 0;
878 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
879 			/*
880 			 * if this is a system process, skip it
881 			 */
882 			if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) ||
883 			    ((p->p_pid < 48) && (vm_swap_size != 0))) {
884 				continue;
885 			}
886 			/*
887 			 * if the process is in a non-running type state,
888 			 * don't touch it.
889 			 */
890 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
891 				continue;
892 			}
893 			/*
894 			 * get the process size
895 			 */
896 			size = p->p_vmspace->vm_pmap.pm_stats.resident_count;
897 			/*
898 			 * if the this process is bigger than the biggest one
899 			 * remember it.
900 			 */
901 			if (size > bigsize) {
902 				bigproc = p;
903 				bigsize = size;
904 			}
905 		}
906 		if (bigproc != NULL) {
907 			killproc(bigproc, "out of swap space");
908 			bigproc->p_estcpu = 0;
909 			bigproc->p_nice = PRIO_MIN;
910 			resetpriority(bigproc);
911 			wakeup(&cnt.v_free_count);
912 		}
913 	}
914 	return force_wakeup;
915 }
916 
917 static int
918 vm_pageout_free_page_calc(count)
919 vm_size_t count;
920 {
921 	if (count < cnt.v_page_count)
922 		 return 0;
923 	/*
924 	 * free_reserved needs to include enough for the largest swap pager
925 	 * structures plus enough for any pv_entry structs when paging.
926 	 */
927 	if (cnt.v_page_count > 1024)
928 		cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
929 	else
930 		cnt.v_free_min = 4;
931 	cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
932 		cnt.v_interrupt_free_min;
933 	cnt.v_free_reserved = vm_pageout_page_count +
934 		cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE;
935 	cnt.v_free_min += cnt.v_free_reserved;
936 	return 1;
937 }
938 
939 
940 #ifdef unused
941 int
942 vm_pageout_free_pages(object, add)
943 vm_object_t object;
944 int add;
945 {
946 	return vm_pageout_free_page_calc(object->size);
947 }
948 #endif
949 
950 /*
951  *	vm_pageout is the high level pageout daemon.
952  */
953 static void
954 vm_pageout()
955 {
956 	(void) spl0();
957 
958 	/*
959 	 * Initialize some paging parameters.
960 	 */
961 
962 	cnt.v_interrupt_free_min = 2;
963 	if (cnt.v_page_count < 2000)
964 		vm_pageout_page_count = 8;
965 
966 	vm_pageout_free_page_calc(cnt.v_page_count);
967 	/*
968 	 * free_reserved needs to include enough for the largest swap pager
969 	 * structures plus enough for any pv_entry structs when paging.
970 	 */
971 	cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved;
972 
973 	if (cnt.v_free_count > 1024) {
974 		cnt.v_cache_max = (cnt.v_free_count - 1024) / 2;
975 		cnt.v_cache_min = (cnt.v_free_count - 1024) / 8;
976 		cnt.v_inactive_target = 2*cnt.v_cache_min + 192;
977 	} else {
978 		cnt.v_cache_min = 0;
979 		cnt.v_cache_max = 0;
980 		cnt.v_inactive_target = cnt.v_free_count / 4;
981 	}
982 
983 	/* XXX does not really belong here */
984 	if (vm_page_max_wired == 0)
985 		vm_page_max_wired = cnt.v_free_count / 3;
986 
987 
988 	swap_pager_swap_init();
989 	/*
990 	 * The pageout daemon is never done, so loop forever.
991 	 */
992 	while (TRUE) {
993 		int inactive_target;
994 		int s = splvm();
995 		if (!vm_pages_needed ||
996 			((cnt.v_free_count + cnt.v_cache_count) > cnt.v_free_min)) {
997 			vm_pages_needed = 0;
998 			tsleep(&vm_pages_needed, PVM, "psleep", 0);
999 		} else if (!vm_pages_needed) {
1000 			tsleep(&vm_pages_needed, PVM, "psleep", hz/10);
1001 		}
1002 		inactive_target =
1003 			(cnt.v_page_count - cnt.v_wire_count) / 4;
1004 		if (inactive_target < 2*cnt.v_free_min)
1005 			inactive_target = 2*cnt.v_free_min;
1006 		cnt.v_inactive_target = inactive_target;
1007 		if (vm_pages_needed)
1008 			cnt.v_pdwakeups++;
1009 		vm_pages_needed = 0;
1010 		splx(s);
1011 		vm_pager_sync();
1012 		vm_pageout_scan();
1013 		vm_pager_sync();
1014 		wakeup(&cnt.v_free_count);
1015 	}
1016 }
1017 
1018 #if !defined(NO_SWAPPING)
1019 static void
1020 vm_req_vmdaemon()
1021 {
1022 	static int lastrun = 0;
1023 
1024 	if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
1025 		wakeup(&vm_daemon_needed);
1026 		lastrun = ticks;
1027 	}
1028 }
1029 
1030 static void
1031 vm_daemon()
1032 {
1033 	vm_object_t object;
1034 	struct proc *p;
1035 
1036 	(void) spl0();
1037 
1038 	while (TRUE) {
1039 		tsleep(&vm_daemon_needed, PUSER, "psleep", 0);
1040 		if (vm_pageout_req_swapout) {
1041 			swapout_procs();
1042 			vm_pageout_req_swapout = 0;
1043 		}
1044 		/*
1045 		 * scan the processes for exceeding their rlimits or if
1046 		 * process is swapped out -- deactivate pages
1047 		 */
1048 
1049 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
1050 			quad_t limit;
1051 			vm_offset_t size;
1052 
1053 			/*
1054 			 * if this is a system process or if we have already
1055 			 * looked at this process, skip it.
1056 			 */
1057 			if (p->p_flag & (P_SYSTEM | P_WEXIT)) {
1058 				continue;
1059 			}
1060 			/*
1061 			 * if the process is in a non-running type state,
1062 			 * don't touch it.
1063 			 */
1064 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
1065 				continue;
1066 			}
1067 			/*
1068 			 * get a limit
1069 			 */
1070 			limit = qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
1071 			    p->p_rlimit[RLIMIT_RSS].rlim_max);
1072 
1073 			/*
1074 			 * let processes that are swapped out really be
1075 			 * swapped out set the limit to nothing (will force a
1076 			 * swap-out.)
1077 			 */
1078 			if ((p->p_flag & P_INMEM) == 0)
1079 				limit = 0;	/* XXX */
1080 
1081 			size = p->p_vmspace->vm_pmap.pm_stats.resident_count * PAGE_SIZE;
1082 			if (limit >= 0 && size >= limit) {
1083 				vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map,
1084 				    (vm_pindex_t)(limit >> PAGE_SHIFT) );
1085 			}
1086 		}
1087 
1088 		/*
1089 		 * we remove cached objects that have no RSS...
1090 		 */
1091 restart:
1092 		object = TAILQ_FIRST(&vm_object_cached_list);
1093 		while (object) {
1094 			/*
1095 			 * if there are no resident pages -- get rid of the object
1096 			 */
1097 			if (object->resident_page_count == 0) {
1098 				vm_object_reference(object);
1099 				pager_cache(object, FALSE);
1100 				goto restart;
1101 			}
1102 			object = TAILQ_NEXT(object, cached_list);
1103 		}
1104 	}
1105 }
1106 #endif
1107