xref: /freebsd/sys/vm/vm_pageout.c (revision 8e6b01171e30297084bb0b4457c4183c2746aacc)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
41  *
42  *
43  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
44  * All rights reserved.
45  *
46  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
47  *
48  * Permission to use, copy, modify and distribute this software and
49  * its documentation is hereby granted, provided that both the copyright
50  * notice and this permission notice appear in all copies of the
51  * software, derivative works or modified versions, and any portions
52  * thereof, and that both notices appear in supporting documentation.
53  *
54  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57  *
58  * Carnegie Mellon requests users of this software to return to
59  *
60  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61  *  School of Computer Science
62  *  Carnegie Mellon University
63  *  Pittsburgh PA 15213-3890
64  *
65  * any improvements or extensions that they make and grant Carnegie the
66  * rights to redistribute these changes.
67  *
68  * $Id: vm_pageout.c,v 1.57 1995/10/07 19:02:55 davidg Exp $
69  */
70 
71 /*
72  *	The proverbial page-out daemon.
73  */
74 
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/kernel.h>
78 #include <sys/proc.h>
79 #include <sys/resourcevar.h>
80 #include <sys/malloc.h>
81 #include <sys/kernel.h>
82 #include <sys/signalvar.h>
83 #include <sys/vnode.h>
84 
85 #include <vm/vm.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_pageout.h>
88 #include <vm/vm_kern.h>
89 #include <vm/vm_pager.h>
90 #include <vm/swap_pager.h>
91 
92 /*
93  * System initialization
94  */
95 
96 /* the kernel process "vm_pageout"*/
97 static void vm_pageout __P((void));
98 struct proc *pageproc;
99 
100 static struct kproc_desc page_kp = {
101 	"pagedaemon",
102 	vm_pageout,
103 	&pageproc
104 };
105 SYSINIT_KT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp)
106 
107 /* the kernel process "vm_daemon"*/
108 static void vm_daemon __P((void));
109 struct	proc *vmproc;
110 
111 static struct kproc_desc vm_kp = {
112 	"vmdaemon",
113 	vm_daemon,
114 	&vmproc
115 };
116 SYSINIT_KT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
117 
118 
119 int vm_pages_needed;		/* Event on which pageout daemon sleeps */
120 
121 int vm_pageout_pages_needed;	/* flag saying that the pageout daemon needs pages */
122 
123 extern int npendingio;
124 int vm_pageout_req_swapout;	/* XXX */
125 int vm_daemon_needed;
126 extern int nswiodone;
127 extern int vm_swap_size;
128 extern int vfs_update_wakeup;
129 
130 #define MAXSCAN 1024		/* maximum number of pages to scan in queues */
131 
132 #define MAXLAUNDER (cnt.v_page_count > 1800 ? 32 : 16)
133 
134 #define VM_PAGEOUT_PAGE_COUNT 8
135 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
136 
137 int vm_page_max_wired;		/* XXX max # of wired pages system-wide */
138 
139 typedef int freeer_fcn_t __P((vm_map_t, vm_object_t, int, int));
140 static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_map_entry_t,
141 						 int *, freeer_fcn_t *));
142 static freeer_fcn_t vm_pageout_object_deactivate_pages;
143 static void vm_req_vmdaemon __P((void));
144 
145 /*
146  * vm_pageout_clean:
147  *
148  * Clean the page and remove it from the laundry.
149  *
150  * We set the busy bit to cause potential page faults on this page to
151  * block.
152  *
153  * And we set pageout-in-progress to keep the object from disappearing
154  * during pageout.  This guarantees that the page won't move from the
155  * inactive queue.  (However, any other page on the inactive queue may
156  * move!)
157  */
158 int
159 vm_pageout_clean(m, sync)
160 	vm_page_t m;
161 	int sync;
162 {
163 	register vm_object_t object;
164 	int pageout_status[VM_PAGEOUT_PAGE_COUNT];
165 	vm_page_t mc[2*VM_PAGEOUT_PAGE_COUNT];
166 	int pageout_count;
167 	int anyok = 0;
168 	int i, forward_okay, backward_okay, page_base;
169 	vm_offset_t offset = m->offset;
170 
171 	object = m->object;
172 
173 	/*
174 	 * If not OBJT_SWAP, additional memory may be needed to do the pageout.
175 	 * Try to avoid the deadlock.
176 	 */
177 	if ((sync != VM_PAGEOUT_FORCE) &&
178 	    (object->type != OBJT_SWAP) &&
179 	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min))
180 		return 0;
181 
182 	/*
183 	 * Don't mess with the page if it's busy.
184 	 */
185 	if ((!sync && m->hold_count != 0) ||
186 	    ((m->busy != 0) || (m->flags & PG_BUSY)))
187 		return 0;
188 
189 	/*
190 	 * Try collapsing before it's too late.
191 	 */
192 	if (!sync && object->backing_object) {
193 		vm_object_collapse(object);
194 	}
195 	mc[VM_PAGEOUT_PAGE_COUNT] = m;
196 	pageout_count = 1;
197 	page_base = VM_PAGEOUT_PAGE_COUNT;
198 	forward_okay = TRUE;
199 	if (offset != 0)
200 		backward_okay = TRUE;
201 	else
202 		backward_okay = FALSE;
203 	/*
204 	 * Scan object for clusterable pages.
205 	 *
206 	 * We can cluster ONLY if: ->> the page is NOT
207 	 * clean, wired, busy, held, or mapped into a
208 	 * buffer, and one of the following:
209 	 * 1) The page is inactive, or a seldom used
210 	 *    active page.
211 	 * -or-
212 	 * 2) we force the issue.
213 	 */
214 	for (i = 1; (i < vm_pageout_page_count) && (forward_okay || backward_okay); i++) {
215 		vm_page_t p;
216 
217 		/*
218 		 * See if forward page is clusterable.
219 		 */
220 		if (forward_okay) {
221 			/*
222 			 * Stop forward scan at end of object.
223 			 */
224 			if ((offset + i * PAGE_SIZE) > object->size) {
225 				forward_okay = FALSE;
226 				goto do_backward;
227 			}
228 			p = vm_page_lookup(object, offset + i * PAGE_SIZE);
229 			if (p) {
230 				if ((p->flags & (PG_BUSY|PG_CACHE)) || p->busy) {
231 					forward_okay = FALSE;
232 					goto do_backward;
233 				}
234 				vm_page_test_dirty(p);
235 				if ((p->dirty & p->valid) != 0 &&
236 				    ((p->flags & PG_INACTIVE) ||
237 				     (sync == VM_PAGEOUT_FORCE)) &&
238 				    (p->wire_count == 0) &&
239 				    (p->hold_count == 0)) {
240 					mc[VM_PAGEOUT_PAGE_COUNT + i] = p;
241 					pageout_count++;
242 					if (pageout_count == vm_pageout_page_count)
243 						break;
244 				} else {
245 					forward_okay = FALSE;
246 				}
247 			} else {
248 				forward_okay = FALSE;
249 			}
250 		}
251 do_backward:
252 		/*
253 		 * See if backward page is clusterable.
254 		 */
255 		if (backward_okay) {
256 			/*
257 			 * Stop backward scan at beginning of object.
258 			 */
259 			if ((offset - i * PAGE_SIZE) == 0) {
260 				backward_okay = FALSE;
261 			}
262 			p = vm_page_lookup(object, offset - i * PAGE_SIZE);
263 			if (p) {
264 				if ((p->flags & (PG_BUSY|PG_CACHE)) || p->busy) {
265 					backward_okay = FALSE;
266 					continue;
267 				}
268 				vm_page_test_dirty(p);
269 				if ((p->dirty & p->valid) != 0 &&
270 				    ((p->flags & PG_INACTIVE) ||
271 				     (sync == VM_PAGEOUT_FORCE)) &&
272 				    (p->wire_count == 0) &&
273 				    (p->hold_count == 0)) {
274 					mc[VM_PAGEOUT_PAGE_COUNT - i] = p;
275 					pageout_count++;
276 					page_base--;
277 					if (pageout_count == vm_pageout_page_count)
278 						break;
279 				} else {
280 					backward_okay = FALSE;
281 				}
282 			} else {
283 				backward_okay = FALSE;
284 			}
285 		}
286 	}
287 
288 	/*
289 	 * we allow reads during pageouts...
290 	 */
291 	for (i = page_base; i < (page_base + pageout_count); i++) {
292 		mc[i]->flags |= PG_BUSY;
293 		vm_page_protect(mc[i], VM_PROT_READ);
294 	}
295 	object->paging_in_progress += pageout_count;
296 
297 	vm_pager_put_pages(object, &mc[page_base], pageout_count,
298 	    ((sync || (object == kernel_object)) ? TRUE : FALSE),
299 	    pageout_status);
300 
301 	for (i = 0; i < pageout_count; i++) {
302 		vm_page_t mt = mc[page_base + i];
303 
304 		switch (pageout_status[i]) {
305 		case VM_PAGER_OK:
306 			++anyok;
307 			break;
308 		case VM_PAGER_PEND:
309 			++anyok;
310 			break;
311 		case VM_PAGER_BAD:
312 			/*
313 			 * Page outside of range of object. Right now we
314 			 * essentially lose the changes by pretending it
315 			 * worked.
316 			 */
317 			pmap_clear_modify(VM_PAGE_TO_PHYS(mt));
318 			mt->dirty = 0;
319 			break;
320 		case VM_PAGER_ERROR:
321 		case VM_PAGER_FAIL:
322 			/*
323 			 * If page couldn't be paged out, then reactivate the
324 			 * page so it doesn't clog the inactive list.  (We
325 			 * will try paging out it again later).
326 			 */
327 			if (mt->flags & PG_INACTIVE)
328 				vm_page_activate(mt);
329 			break;
330 		case VM_PAGER_AGAIN:
331 			break;
332 		}
333 
334 
335 		/*
336 		 * If the operation is still going, leave the page busy to
337 		 * block all other accesses. Also, leave the paging in
338 		 * progress indicator set so that we don't attempt an object
339 		 * collapse.
340 		 */
341 		if (pageout_status[i] != VM_PAGER_PEND) {
342 			vm_object_pip_wakeup(object);
343 			if ((mt->flags & (PG_REFERENCED|PG_WANTED)) ||
344 			    pmap_is_referenced(VM_PAGE_TO_PHYS(mt))) {
345 				pmap_clear_reference(VM_PAGE_TO_PHYS(mt));
346 				mt->flags &= ~PG_REFERENCED;
347 				if (mt->flags & PG_INACTIVE)
348 					vm_page_activate(mt);
349 			}
350 			PAGE_WAKEUP(mt);
351 		}
352 	}
353 	return anyok;
354 }
355 
356 /*
357  *	vm_pageout_object_deactivate_pages
358  *
359  *	deactivate enough pages to satisfy the inactive target
360  *	requirements or if vm_page_proc_limit is set, then
361  *	deactivate all of the pages in the object and its
362  *	backing_objects.
363  *
364  *	The object and map must be locked.
365  */
366 static int
367 vm_pageout_object_deactivate_pages(map, object, count, map_remove_only)
368 	vm_map_t map;
369 	vm_object_t object;
370 	int count;
371 	int map_remove_only;
372 {
373 	register vm_page_t p, next;
374 	int rcount;
375 	int dcount;
376 
377 	dcount = 0;
378 	if (count == 0)
379 		count = 1;
380 
381 	if (object->type == OBJT_DEVICE)
382 		return 0;
383 
384 	if (object->backing_object) {
385 		if (object->backing_object->ref_count == 1)
386 			dcount += vm_pageout_object_deactivate_pages(map,
387 			    object->backing_object, count / 2 + 1, map_remove_only);
388 		else
389 			vm_pageout_object_deactivate_pages(map,
390 			    object->backing_object, count, 1);
391 	}
392 	if (object->paging_in_progress)
393 		return dcount;
394 
395 	/*
396 	 * scan the objects entire memory queue
397 	 */
398 	rcount = object->resident_page_count;
399 	p = object->memq.tqh_first;
400 	while (p && (rcount-- > 0)) {
401 		next = p->listq.tqe_next;
402 		cnt.v_pdpages++;
403 		if (p->wire_count != 0 ||
404 		    p->hold_count != 0 ||
405 		    p->busy != 0 ||
406 		    !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) {
407 			p = next;
408 			continue;
409 		}
410 		/*
411 		 * if a page is active, not wired and is in the processes
412 		 * pmap, then deactivate the page.
413 		 */
414 		if ((p->flags & (PG_ACTIVE | PG_BUSY)) == PG_ACTIVE) {
415 			if (!pmap_is_referenced(VM_PAGE_TO_PHYS(p)) &&
416 			    (p->flags & (PG_REFERENCED|PG_WANTED)) == 0) {
417 				p->act_count -= min(p->act_count, ACT_DECLINE);
418 				/*
419 				 * if the page act_count is zero -- then we
420 				 * deactivate
421 				 */
422 				if (!p->act_count) {
423 					if (!map_remove_only)
424 						vm_page_deactivate(p);
425 					vm_page_protect(p, VM_PROT_NONE);
426 					/*
427 					 * else if on the next go-around we
428 					 * will deactivate the page we need to
429 					 * place the page on the end of the
430 					 * queue to age the other pages in
431 					 * memory.
432 					 */
433 				} else {
434 					TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
435 					TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
436 				}
437 				/*
438 				 * see if we are done yet
439 				 */
440 				if (p->flags & PG_INACTIVE) {
441 					--count;
442 					++dcount;
443 					if (count <= 0 &&
444 					    cnt.v_inactive_count > cnt.v_inactive_target) {
445 						return dcount;
446 					}
447 				}
448 			} else {
449 				/*
450 				 * Move the page to the bottom of the queue.
451 				 */
452 				pmap_clear_reference(VM_PAGE_TO_PHYS(p));
453 				p->flags &= ~PG_REFERENCED;
454 				if (p->act_count < ACT_MAX)
455 					p->act_count += ACT_ADVANCE;
456 
457 				TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
458 				TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
459 			}
460 		} else if ((p->flags & (PG_INACTIVE | PG_BUSY)) == PG_INACTIVE) {
461 			vm_page_protect(p, VM_PROT_NONE);
462 		}
463 		p = next;
464 	}
465 	return dcount;
466 }
467 
468 
469 /*
470  * deactivate some number of pages in a map, try to do it fairly, but
471  * that is really hard to do.
472  */
473 
474 static void
475 vm_pageout_map_deactivate_pages(map, entry, count, freeer)
476 	vm_map_t map;
477 	vm_map_entry_t entry;
478 	int *count;
479 	freeer_fcn_t *freeer;
480 {
481 	vm_map_t tmpm;
482 	vm_map_entry_t tmpe;
483 	vm_object_t obj;
484 
485 	if (*count <= 0)
486 		return;
487 	vm_map_reference(map);
488 	if (!lock_try_read(&map->lock)) {
489 		vm_map_deallocate(map);
490 		return;
491 	}
492 	if (entry == 0) {
493 		tmpe = map->header.next;
494 		while (tmpe != &map->header && *count > 0) {
495 			vm_pageout_map_deactivate_pages(map, tmpe, count, freeer);
496 			tmpe = tmpe->next;
497 		};
498 	} else if (entry->is_sub_map || entry->is_a_map) {
499 		tmpm = entry->object.share_map;
500 		tmpe = tmpm->header.next;
501 		while (tmpe != &tmpm->header && *count > 0) {
502 			vm_pageout_map_deactivate_pages(tmpm, tmpe, count, freeer);
503 			tmpe = tmpe->next;
504 		};
505 	} else if ((obj = entry->object.vm_object) != 0) {
506 		*count -= (*freeer) (map, obj, *count, TRUE);
507 	}
508 	lock_read_done(&map->lock);
509 	vm_map_deallocate(map);
510 	return;
511 }
512 
513 static void
514 vm_req_vmdaemon()
515 {
516 	static int lastrun = 0;
517 
518 	if ((ticks > (lastrun + hz / 10)) || (ticks < lastrun)) {
519 		wakeup(&vm_daemon_needed);
520 		lastrun = ticks;
521 	}
522 }
523 
524 /*
525  *	vm_pageout_scan does the dirty work for the pageout daemon.
526  */
527 int
528 vm_pageout_scan()
529 {
530 	vm_page_t m;
531 	int page_shortage, maxscan, maxlaunder, pcount;
532 	int pages_freed;
533 	vm_page_t next;
534 	struct proc *p, *bigproc;
535 	vm_offset_t size, bigsize;
536 	vm_object_t object;
537 	int force_wakeup = 0;
538 	int vnodes_skipped = 0;
539 
540 	pages_freed = 0;
541 
542 	/*
543 	 * Start scanning the inactive queue for pages we can free. We keep
544 	 * scanning until we have enough free pages or we have scanned through
545 	 * the entire queue.  If we encounter dirty pages, we start cleaning
546 	 * them.
547 	 */
548 
549 	maxlaunder = (cnt.v_inactive_target > MAXLAUNDER) ?
550 	    MAXLAUNDER : cnt.v_inactive_target;
551 
552 rescan1:
553 	maxscan = cnt.v_inactive_count;
554 	m = vm_page_queue_inactive.tqh_first;
555 	while ((m != NULL) && (maxscan-- > 0) &&
556 	    ((cnt.v_cache_count + cnt.v_free_count) < (cnt.v_cache_min + cnt.v_free_target))) {
557 		vm_page_t next;
558 
559 		cnt.v_pdpages++;
560 		next = m->pageq.tqe_next;
561 
562 #if defined(VM_DIAGNOSE)
563 		if ((m->flags & PG_INACTIVE) == 0) {
564 			printf("vm_pageout_scan: page not inactive?\n");
565 			break;
566 		}
567 #endif
568 
569 		/*
570 		 * dont mess with busy pages
571 		 */
572 		if (m->hold_count || m->busy || (m->flags & PG_BUSY)) {
573 			TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
574 			TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
575 			m = next;
576 			continue;
577 		}
578 		if (((m->flags & PG_REFERENCED) == 0) &&
579 		    pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
580 			m->flags |= PG_REFERENCED;
581 		}
582 		if (m->object->ref_count == 0) {
583 			m->flags &= ~PG_REFERENCED;
584 			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
585 		}
586 		if ((m->flags & (PG_REFERENCED|PG_WANTED)) != 0) {
587 			m->flags &= ~PG_REFERENCED;
588 			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
589 			vm_page_activate(m);
590 			if (m->act_count < ACT_MAX)
591 				m->act_count += ACT_ADVANCE;
592 			m = next;
593 			continue;
594 		}
595 
596 		vm_page_test_dirty(m);
597 		if (m->dirty == 0) {
598 			if (m->bmapped == 0) {
599 				if (m->valid == 0) {
600 					pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
601 					vm_page_free(m);
602 					cnt.v_dfree++;
603 				} else {
604 					vm_page_cache(m);
605 				}
606 				++pages_freed;
607 			} else {
608 				m = next;
609 				continue;
610 			}
611 		} else if (maxlaunder > 0) {
612 			int written;
613 			struct vnode *vp = NULL;
614 
615 			object = m->object;
616 			if (object->flags & OBJ_DEAD) {
617 				m = next;
618 				continue;
619 			}
620 
621 			if (object->type == OBJT_VNODE) {
622 				vp = object->handle;
623 				if (VOP_ISLOCKED(vp) || vget(vp, 1)) {
624 					if (object->flags & OBJ_WRITEABLE)
625 						++vnodes_skipped;
626 					m = next;
627 					continue;
628 				}
629 			}
630 
631 			/*
632 			 * If a page is dirty, then it is either being washed
633 			 * (but not yet cleaned) or it is still in the
634 			 * laundry.  If it is still in the laundry, then we
635 			 * start the cleaning operation.
636 			 */
637 			written = vm_pageout_clean(m, 0);
638 
639 			if (vp)
640 				vput(vp);
641 
642 			if (!next) {
643 				break;
644 			}
645 			maxlaunder -= written;
646 			/*
647 			 * if the next page has been re-activated, start
648 			 * scanning again
649 			 */
650 			if ((next->flags & PG_INACTIVE) == 0) {
651 				goto rescan1;
652 			}
653 		}
654 		m = next;
655 	}
656 
657 	/*
658 	 * Compute the page shortage.  If we are still very low on memory be
659 	 * sure that we will move a minimal amount of pages from active to
660 	 * inactive.
661 	 */
662 
663 	page_shortage = cnt.v_inactive_target -
664 	    (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
665 	if (page_shortage <= 0) {
666 		if (pages_freed == 0) {
667 			page_shortage = cnt.v_free_min - cnt.v_free_count;
668 		} else {
669 			page_shortage = 1;
670 		}
671 	}
672 	maxscan = MAXSCAN;
673 	pcount = cnt.v_active_count;
674 	m = vm_page_queue_active.tqh_first;
675 	while ((m != NULL) && (maxscan > 0) && (pcount-- > 0) && (page_shortage > 0)) {
676 
677 		cnt.v_pdpages++;
678 		next = m->pageq.tqe_next;
679 
680 		/*
681 		 * Don't deactivate pages that are busy.
682 		 */
683 		if ((m->busy != 0) ||
684 		    (m->flags & PG_BUSY) ||
685 		    (m->hold_count != 0)) {
686 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
687 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
688 			m = next;
689 			continue;
690 		}
691 		if (m->object->ref_count && ((m->flags & (PG_REFERENCED|PG_WANTED)) ||
692 			pmap_is_referenced(VM_PAGE_TO_PHYS(m)))) {
693 			int s;
694 
695 			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
696 			m->flags &= ~PG_REFERENCED;
697 			if (m->act_count < ACT_MAX) {
698 				m->act_count += ACT_ADVANCE;
699 			}
700 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
701 			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
702 		} else {
703 			m->flags &= ~PG_REFERENCED;
704 			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
705 			m->act_count -= min(m->act_count, ACT_DECLINE);
706 
707 			/*
708 			 * if the page act_count is zero -- then we deactivate
709 			 */
710 			if (!m->act_count && (page_shortage > 0)) {
711 				if (m->object->ref_count == 0) {
712 					--page_shortage;
713 					vm_page_test_dirty(m);
714 					if ((m->bmapped == 0) && (m->dirty == 0) ) {
715 						m->act_count = 0;
716 						vm_page_cache(m);
717 					} else {
718 						vm_page_deactivate(m);
719 					}
720 				} else {
721 					vm_page_deactivate(m);
722 					--page_shortage;
723 				}
724 			} else if (m->act_count) {
725 				TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
726 				TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
727 			}
728 		}
729 		maxscan--;
730 		m = next;
731 	}
732 
733 	/*
734 	 * We try to maintain some *really* free pages, this allows interrupt
735 	 * code to be guaranteed space.
736 	 */
737 	while (cnt.v_free_count < cnt.v_free_reserved) {
738 		m = vm_page_queue_cache.tqh_first;
739 		if (!m)
740 			break;
741 		vm_page_free(m);
742 		cnt.v_dfree++;
743 	}
744 
745 	/*
746 	 * If we didn't get enough free pages, and we have skipped a vnode
747 	 * in a writeable object, wakeup the sync daemon.  And kick swapout
748 	 * if we did not get enough free pages.
749 	 */
750 	if ((cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_target) {
751 		if (vnodes_skipped &&
752 		    (cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_min) {
753 			if (!vfs_update_wakeup) {
754 				vfs_update_wakeup = 1;
755 				wakeup(&vfs_update_wakeup);
756 			}
757 		}
758 		/*
759 		 * now swap processes out if we are in low memory conditions
760 		 */
761 		if (!swap_pager_full && vm_swap_size &&
762 			vm_pageout_req_swapout == 0) {
763 			vm_pageout_req_swapout = 1;
764 			vm_req_vmdaemon();
765 		}
766 	}
767 
768 	if ((cnt.v_inactive_count + cnt.v_free_count + cnt.v_cache_count) <
769 	    (cnt.v_inactive_target + cnt.v_free_min)) {
770 		vm_req_vmdaemon();
771 	}
772 
773 	/*
774 	 * make sure that we have swap space -- if we are low on memory and
775 	 * swap -- then kill the biggest process.
776 	 */
777 	if ((vm_swap_size == 0 || swap_pager_full) &&
778 	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) {
779 		bigproc = NULL;
780 		bigsize = 0;
781 		for (p = (struct proc *) allproc; p != NULL; p = p->p_next) {
782 			/*
783 			 * if this is a system process, skip it
784 			 */
785 			if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) ||
786 			    ((p->p_pid < 48) && (vm_swap_size != 0))) {
787 				continue;
788 			}
789 			/*
790 			 * if the process is in a non-running type state,
791 			 * don't touch it.
792 			 */
793 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
794 				continue;
795 			}
796 			/*
797 			 * get the process size
798 			 */
799 			size = p->p_vmspace->vm_pmap.pm_stats.resident_count;
800 			/*
801 			 * if the this process is bigger than the biggest one
802 			 * remember it.
803 			 */
804 			if (size > bigsize) {
805 				bigproc = p;
806 				bigsize = size;
807 			}
808 		}
809 		if (bigproc != NULL) {
810 			printf("Process %lu killed by vm_pageout -- out of swap\n", (u_long) bigproc->p_pid);
811 			psignal(bigproc, SIGKILL);
812 			bigproc->p_estcpu = 0;
813 			bigproc->p_nice = PRIO_MIN;
814 			resetpriority(bigproc);
815 			wakeup(&cnt.v_free_count);
816 		}
817 	}
818 	return force_wakeup;
819 }
820 
821 /*
822  *	vm_pageout is the high level pageout daemon.
823  */
824 static void
825 vm_pageout()
826 {
827 	(void) spl0();
828 
829 	/*
830 	 * Initialize some paging parameters.
831 	 */
832 
833 	cnt.v_interrupt_free_min = 2;
834 
835 	if (cnt.v_page_count > 1024)
836 		cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
837 	else
838 		cnt.v_free_min = 4;
839 	/*
840 	 * free_reserved needs to include enough for the largest swap pager
841 	 * structures plus enough for any pv_entry structs when paging.
842 	 */
843 	cnt.v_pageout_free_min = 6 + cnt.v_page_count / 1024 +
844 				cnt.v_interrupt_free_min;
845 	cnt.v_free_reserved = cnt.v_pageout_free_min + 6;
846 	cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved;
847 	cnt.v_free_min += cnt.v_free_reserved;
848 
849 	if (cnt.v_page_count > 1024) {
850 		cnt.v_cache_max = (cnt.v_free_count - 1024) / 2;
851 		cnt.v_cache_min = (cnt.v_free_count - 1024) / 8;
852 		cnt.v_inactive_target = 2*cnt.v_cache_min + 192;
853 	} else {
854 		cnt.v_cache_min = 0;
855 		cnt.v_cache_max = 0;
856 		cnt.v_inactive_target = cnt.v_free_count / 4;
857 	}
858 
859 	/* XXX does not really belong here */
860 	if (vm_page_max_wired == 0)
861 		vm_page_max_wired = cnt.v_free_count / 3;
862 
863 
864 	swap_pager_swap_init();
865 	/*
866 	 * The pageout daemon is never done, so loop forever.
867 	 */
868 	while (TRUE) {
869 		int s = splhigh();
870 
871 		if (!vm_pages_needed ||
872 			((cnt.v_free_count >= cnt.v_free_reserved) &&
873 			 (cnt.v_free_count + cnt.v_cache_count >= cnt.v_free_min))) {
874 			vm_pages_needed = 0;
875 			tsleep(&vm_pages_needed, PVM, "psleep", 0);
876 		}
877 		vm_pages_needed = 0;
878 		splx(s);
879 		cnt.v_pdwakeups++;
880 		vm_pager_sync();
881 		vm_pageout_scan();
882 		vm_pager_sync();
883 		wakeup(&cnt.v_free_count);
884 		wakeup(kmem_map);
885 	}
886 }
887 
888 static void
889 vm_daemon()
890 {
891 	vm_object_t object;
892 	struct proc *p;
893 
894 	while (TRUE) {
895 		tsleep(&vm_daemon_needed, PUSER, "psleep", 0);
896 		if (vm_pageout_req_swapout) {
897 			swapout_procs();
898 			vm_pageout_req_swapout = 0;
899 		}
900 		/*
901 		 * scan the processes for exceeding their rlimits or if
902 		 * process is swapped out -- deactivate pages
903 		 */
904 
905 		for (p = (struct proc *) allproc; p != NULL; p = p->p_next) {
906 			int overage;
907 			quad_t limit;
908 			vm_offset_t size;
909 
910 			/*
911 			 * if this is a system process or if we have already
912 			 * looked at this process, skip it.
913 			 */
914 			if (p->p_flag & (P_SYSTEM | P_WEXIT)) {
915 				continue;
916 			}
917 			/*
918 			 * if the process is in a non-running type state,
919 			 * don't touch it.
920 			 */
921 			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
922 				continue;
923 			}
924 			/*
925 			 * get a limit
926 			 */
927 			limit = qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
928 			    p->p_rlimit[RLIMIT_RSS].rlim_max);
929 
930 			/*
931 			 * let processes that are swapped out really be
932 			 * swapped out set the limit to nothing (will force a
933 			 * swap-out.)
934 			 */
935 			if ((p->p_flag & P_INMEM) == 0)
936 				limit = 0;	/* XXX */
937 
938 			size = p->p_vmspace->vm_pmap.pm_stats.resident_count * PAGE_SIZE;
939 			if (limit >= 0 && size >= limit) {
940 				overage = (size - limit) >> PAGE_SHIFT;
941 				vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map,
942 				    (vm_map_entry_t) 0, &overage, vm_pageout_object_deactivate_pages);
943 			}
944 		}
945 
946 		/*
947 		 * we remove cached objects that have no RSS...
948 		 */
949 restart:
950 		object = vm_object_cached_list.tqh_first;
951 		while (object) {
952 			/*
953 			 * if there are no resident pages -- get rid of the object
954 			 */
955 			if (object->resident_page_count == 0) {
956 				vm_object_reference(object);
957 				pager_cache(object, FALSE);
958 				goto restart;
959 			}
960 			object = object->cached_list.tqe_next;
961 		}
962 	}
963 }
964