1df8bae1dSRodney W. Grimes /* 226f9a767SRodney W. Grimes * Copyright (c) 1991 Regents of the University of California. 326f9a767SRodney W. Grimes * All rights reserved. 426f9a767SRodney W. Grimes * Copyright (c) 1994 John S. Dyson 526f9a767SRodney W. Grimes * All rights reserved. 626f9a767SRodney W. Grimes * Copyright (c) 1994 David Greenman 726f9a767SRodney W. Grimes * All rights reserved. 8df8bae1dSRodney W. Grimes * 9df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 10df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 11df8bae1dSRodney W. Grimes * 12df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 13df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 14df8bae1dSRodney W. Grimes * are met: 15df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 17df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 18df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 19df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 20df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 21df8bae1dSRodney W. Grimes * must display the following acknowledgement: 22df8bae1dSRodney W. Grimes * This product includes software developed by the University of 23df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 24df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 25df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 26df8bae1dSRodney W. Grimes * without specific prior written permission. 27df8bae1dSRodney W. Grimes * 28df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38df8bae1dSRodney W. Grimes * SUCH DAMAGE. 39df8bae1dSRodney W. Grimes * 403c4dd356SDavid Greenman * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41df8bae1dSRodney W. Grimes * 42df8bae1dSRodney W. Grimes * 43df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44df8bae1dSRodney W. Grimes * All rights reserved. 45df8bae1dSRodney W. Grimes * 46df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47df8bae1dSRodney W. Grimes * 48df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 49df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 50df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 51df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 52df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 53df8bae1dSRodney W. Grimes * 54df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57df8bae1dSRodney W. Grimes * 58df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 59df8bae1dSRodney W. Grimes * 60df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61df8bae1dSRodney W. Grimes * School of Computer Science 62df8bae1dSRodney W. Grimes * Carnegie Mellon University 63df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 64df8bae1dSRodney W. Grimes * 65df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 66df8bae1dSRodney W. Grimes * rights to redistribute these changes. 6726f9a767SRodney W. Grimes * 68d2fc5315SPoul-Henning Kamp * $Id: vm_pageout.c,v 1.34 1995/02/09 07:41:42 davidg Exp $ 69df8bae1dSRodney W. Grimes */ 70df8bae1dSRodney W. Grimes 71df8bae1dSRodney W. Grimes /* 72df8bae1dSRodney W. Grimes * The proverbial page-out daemon. 73df8bae1dSRodney W. Grimes */ 74df8bae1dSRodney W. Grimes 75df8bae1dSRodney W. Grimes #include <sys/param.h> 7626f9a767SRodney W. Grimes #include <sys/systm.h> 7726f9a767SRodney W. Grimes #include <sys/proc.h> 7826f9a767SRodney W. Grimes #include <sys/resourcevar.h> 7926f9a767SRodney W. Grimes #include <sys/malloc.h> 800d94caffSDavid Greenman #include <sys/kernel.h> 81d2fc5315SPoul-Henning Kamp #include <sys/signalvar.h> 82df8bae1dSRodney W. Grimes 83df8bae1dSRodney W. Grimes #include <vm/vm.h> 84df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 85df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h> 8605f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h> 87df8bae1dSRodney W. Grimes 8826f9a767SRodney W. Grimes extern vm_map_t kmem_map; 89df8bae1dSRodney W. Grimes int vm_pages_needed; /* Event on which pageout daemon sleeps */ 9026f9a767SRodney W. Grimes int vm_pagescanner; /* Event on which pagescanner sleeps */ 9126f9a767SRodney W. Grimes 9226f9a767SRodney W. Grimes int vm_pageout_pages_needed = 0;/* flag saying that the pageout daemon needs pages */ 9326f9a767SRodney W. Grimes int vm_page_pagesfreed; 9426f9a767SRodney W. Grimes 9526f9a767SRodney W. Grimes extern int npendingio; 9626f9a767SRodney W. Grimes int vm_pageout_proc_limit; 972fe6e4d7SDavid Greenman int vm_pageout_req_swapout; 982fe6e4d7SDavid Greenman int vm_daemon_needed; 9926f9a767SRodney W. Grimes extern int nswiodone; 10026f9a767SRodney W. Grimes extern int swap_pager_full; 1015663e6deSDavid Greenman extern int vm_swap_size; 10226f9a767SRodney W. Grimes extern int swap_pager_ready(); 10326f9a767SRodney W. Grimes 1041ed81ef2SDavid Greenman #define MAXSCAN 1024 /* maximum number of pages to scan in queues */ 10526f9a767SRodney W. Grimes #define ACT_DECLINE 1 10603e6c253SDavid Greenman #define ACT_ADVANCE 3 107a6ca859eSDavid Greenman #define ACT_MAX 100 10826f9a767SRodney W. Grimes 1090d94caffSDavid Greenman #define MAXLAUNDER (cnt.v_page_count > 1800 ? 32 : 16) 11026f9a767SRodney W. Grimes 11126f9a767SRodney W. Grimes #define VM_PAGEOUT_PAGE_COUNT 8 112bbc0ec52SDavid Greenman int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 11326f9a767SRodney W. Grimes int vm_pageout_req_do_stats; 114df8bae1dSRodney W. Grimes 115df8bae1dSRodney W. Grimes int vm_page_max_wired = 0; /* XXX max # of wired pages system-wide */ 116df8bae1dSRodney W. Grimes 11726f9a767SRodney W. Grimes /* 11826f9a767SRodney W. Grimes * vm_pageout_clean: 11926f9a767SRodney W. Grimes * cleans a vm_page 12026f9a767SRodney W. Grimes */ 12126f9a767SRodney W. Grimes int 12226f9a767SRodney W. Grimes vm_pageout_clean(m, sync) 12326f9a767SRodney W. Grimes register vm_page_t m; 12426f9a767SRodney W. Grimes int sync; 12526f9a767SRodney W. Grimes { 12626f9a767SRodney W. Grimes /* 1270d94caffSDavid Greenman * Clean the page and remove it from the laundry. 12826f9a767SRodney W. Grimes * 1290d94caffSDavid Greenman * We set the busy bit to cause potential page faults on this page to 13026f9a767SRodney W. Grimes * block. 13126f9a767SRodney W. Grimes * 1320d94caffSDavid Greenman * And we set pageout-in-progress to keep the object from disappearing 1330d94caffSDavid Greenman * during pageout. This guarantees that the page won't move from the 1340d94caffSDavid Greenman * inactive queue. (However, any other page on the inactive queue may 1350d94caffSDavid Greenman * move!) 13626f9a767SRodney W. Grimes */ 13726f9a767SRodney W. Grimes 13826f9a767SRodney W. Grimes register vm_object_t object; 13926f9a767SRodney W. Grimes register vm_pager_t pager; 14026f9a767SRodney W. Grimes int pageout_status[VM_PAGEOUT_PAGE_COUNT]; 14126f9a767SRodney W. Grimes vm_page_t ms[VM_PAGEOUT_PAGE_COUNT]; 14226f9a767SRodney W. Grimes int pageout_count; 14326f9a767SRodney W. Grimes int anyok = 0; 14426f9a767SRodney W. Grimes int i; 14526f9a767SRodney W. Grimes vm_offset_t offset = m->offset; 14626f9a767SRodney W. Grimes 14726f9a767SRodney W. Grimes object = m->object; 14826f9a767SRodney W. Grimes if (!object) { 14926f9a767SRodney W. Grimes printf("pager: object missing\n"); 15026f9a767SRodney W. Grimes return 0; 15126f9a767SRodney W. Grimes } 1520d94caffSDavid Greenman if (!object->pager && (object->flags & OBJ_INTERNAL) == 0) { 1530d94caffSDavid Greenman printf("pager: non internal obj without pager\n"); 1540d94caffSDavid Greenman } 15526f9a767SRodney W. Grimes /* 1560d94caffSDavid Greenman * Try to collapse the object before making a pager for it. We must 1570d94caffSDavid Greenman * unlock the page queues first. We try to defer the creation of a 1580d94caffSDavid Greenman * pager until all shadows are not paging. This allows 1590d94caffSDavid Greenman * vm_object_collapse to work better and helps control swap space 1600d94caffSDavid Greenman * size. (J. Dyson 11 Nov 93) 16126f9a767SRodney W. Grimes */ 16226f9a767SRodney W. Grimes 16326f9a767SRodney W. Grimes if (!object->pager && 1640d94caffSDavid Greenman (cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min) 16526f9a767SRodney W. Grimes return 0; 16626f9a767SRodney W. Grimes 1670d94caffSDavid Greenman if ((!sync && m->bmapped != 0 && m->hold_count != 0) || 1680d94caffSDavid Greenman ((m->busy != 0) || (m->flags & PG_BUSY))) 1690d94caffSDavid Greenman return 0; 1700d94caffSDavid Greenman 1710d94caffSDavid Greenman if (!sync && object->shadow) { 17226f9a767SRodney W. Grimes vm_object_collapse(object); 17326f9a767SRodney W. Grimes } 17426f9a767SRodney W. Grimes pageout_count = 1; 17526f9a767SRodney W. Grimes ms[0] = m; 17626f9a767SRodney W. Grimes 1774e39a515SPoul-Henning Kamp pager = object->pager; 1784e39a515SPoul-Henning Kamp if (pager) { 179bbc0ec52SDavid Greenman for (i = 1; i < vm_pageout_page_count; i++) { 1800d94caffSDavid Greenman vm_page_t mt; 1810d94caffSDavid Greenman 1820d94caffSDavid Greenman ms[i] = mt = vm_page_lookup(object, offset + i * NBPG); 1830d94caffSDavid Greenman if (mt) { 1840d94caffSDavid Greenman vm_page_test_dirty(mt); 1850d94caffSDavid Greenman /* 1860d94caffSDavid Greenman * we can cluster ONLY if: ->> the page is NOT 1870d94caffSDavid Greenman * busy, and is NOT clean the page is not 1880d94caffSDavid Greenman * wired, busy, held, or mapped into a buffer. 1890d94caffSDavid Greenman * and one of the following: 1) The page is 1900d94caffSDavid Greenman * inactive, or a seldom used active page. 2) 1910d94caffSDavid Greenman * or we force the issue. 1920d94caffSDavid Greenman */ 1930d94caffSDavid Greenman if ((mt->dirty & mt->valid) != 0 1940d94caffSDavid Greenman && (((mt->flags & (PG_BUSY | PG_INACTIVE)) == PG_INACTIVE) 1950d94caffSDavid Greenman || sync == VM_PAGEOUT_FORCE) 1960d94caffSDavid Greenman && (mt->wire_count == 0) 1970d94caffSDavid Greenman && (mt->busy == 0) 1980d94caffSDavid Greenman && (mt->hold_count == 0) 1990d94caffSDavid Greenman && (mt->bmapped == 0)) 20026f9a767SRodney W. Grimes pageout_count++; 20126f9a767SRodney W. Grimes else 20226f9a767SRodney W. Grimes break; 20326f9a767SRodney W. Grimes } else 20426f9a767SRodney W. Grimes break; 20526f9a767SRodney W. Grimes } 2060d94caffSDavid Greenman /* 2070d94caffSDavid Greenman * we allow reads during pageouts... 2080d94caffSDavid Greenman */ 20926f9a767SRodney W. Grimes for (i = 0; i < pageout_count; i++) { 21026f9a767SRodney W. Grimes ms[i]->flags |= PG_BUSY; 21126f9a767SRodney W. Grimes pmap_page_protect(VM_PAGE_TO_PHYS(ms[i]), VM_PROT_READ); 21226f9a767SRodney W. Grimes } 21326f9a767SRodney W. Grimes object->paging_in_progress += pageout_count; 21426f9a767SRodney W. Grimes } else { 21526f9a767SRodney W. Grimes 21626f9a767SRodney W. Grimes m->flags |= PG_BUSY; 21726f9a767SRodney W. Grimes 21826f9a767SRodney W. Grimes pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_READ); 21926f9a767SRodney W. Grimes 22026f9a767SRodney W. Grimes object->paging_in_progress++; 22126f9a767SRodney W. Grimes 22226f9a767SRodney W. Grimes pager = vm_pager_allocate(PG_DFLT, (caddr_t) 0, 22326f9a767SRodney W. Grimes object->size, VM_PROT_ALL, 0); 22426f9a767SRodney W. Grimes if (pager != NULL) { 22526f9a767SRodney W. Grimes vm_object_setpager(object, pager, 0, FALSE); 22626f9a767SRodney W. Grimes } 22726f9a767SRodney W. Grimes } 22826f9a767SRodney W. Grimes 22926f9a767SRodney W. Grimes /* 2300d94caffSDavid Greenman * If there is no pager for the page, use the default pager. If 2310d94caffSDavid Greenman * there's no place to put the page at the moment, leave it in the 2320d94caffSDavid Greenman * laundry and hope that there will be paging space later. 23326f9a767SRodney W. Grimes */ 23426f9a767SRodney W. Grimes 23526f9a767SRodney W. Grimes if ((pager && pager->pg_type == PG_SWAP) || 2360d94caffSDavid Greenman (cnt.v_free_count + cnt.v_cache_count) >= cnt.v_pageout_free_min) { 23726f9a767SRodney W. Grimes if (pageout_count == 1) { 23826f9a767SRodney W. Grimes pageout_status[0] = pager ? 23926f9a767SRodney W. Grimes vm_pager_put(pager, m, 24026f9a767SRodney W. Grimes ((sync || (object == kernel_object)) ? TRUE : FALSE)) : 24126f9a767SRodney W. Grimes VM_PAGER_FAIL; 24226f9a767SRodney W. Grimes } else { 24326f9a767SRodney W. Grimes if (!pager) { 24426f9a767SRodney W. Grimes for (i = 0; i < pageout_count; i++) 24526f9a767SRodney W. Grimes pageout_status[i] = VM_PAGER_FAIL; 24626f9a767SRodney W. Grimes } else { 24726f9a767SRodney W. Grimes vm_pager_put_pages(pager, ms, pageout_count, 24826f9a767SRodney W. Grimes ((sync || (object == kernel_object)) ? TRUE : FALSE), 24926f9a767SRodney W. Grimes pageout_status); 25026f9a767SRodney W. Grimes } 25126f9a767SRodney W. Grimes } 25226f9a767SRodney W. Grimes } else { 25326f9a767SRodney W. Grimes for (i = 0; i < pageout_count; i++) 25426f9a767SRodney W. Grimes pageout_status[i] = VM_PAGER_FAIL; 25526f9a767SRodney W. Grimes } 25626f9a767SRodney W. Grimes 25726f9a767SRodney W. Grimes for (i = 0; i < pageout_count; i++) { 25826f9a767SRodney W. Grimes switch (pageout_status[i]) { 25926f9a767SRodney W. Grimes case VM_PAGER_OK: 26026f9a767SRodney W. Grimes ++anyok; 26126f9a767SRodney W. Grimes break; 26226f9a767SRodney W. Grimes case VM_PAGER_PEND: 26326f9a767SRodney W. Grimes ++anyok; 26426f9a767SRodney W. Grimes break; 26526f9a767SRodney W. Grimes case VM_PAGER_BAD: 26626f9a767SRodney W. Grimes /* 2670d94caffSDavid Greenman * Page outside of range of object. Right now we 2680d94caffSDavid Greenman * essentially lose the changes by pretending it 2690d94caffSDavid Greenman * worked. 27026f9a767SRodney W. Grimes */ 27126f9a767SRodney W. Grimes pmap_clear_modify(VM_PAGE_TO_PHYS(ms[i])); 2720d94caffSDavid Greenman ms[i]->dirty = 0; 27326f9a767SRodney W. Grimes break; 27426f9a767SRodney W. Grimes case VM_PAGER_ERROR: 27526f9a767SRodney W. Grimes case VM_PAGER_FAIL: 27626f9a767SRodney W. Grimes /* 2770d94caffSDavid Greenman * If page couldn't be paged out, then reactivate the 2780d94caffSDavid Greenman * page so it doesn't clog the inactive list. (We 2790d94caffSDavid Greenman * will try paging out it again later). 28026f9a767SRodney W. Grimes */ 28126f9a767SRodney W. Grimes if (ms[i]->flags & PG_INACTIVE) 28226f9a767SRodney W. Grimes vm_page_activate(ms[i]); 28326f9a767SRodney W. Grimes break; 28426f9a767SRodney W. Grimes case VM_PAGER_AGAIN: 28526f9a767SRodney W. Grimes break; 28626f9a767SRodney W. Grimes } 28726f9a767SRodney W. Grimes 28826f9a767SRodney W. Grimes 28926f9a767SRodney W. Grimes /* 2900d94caffSDavid Greenman * If the operation is still going, leave the page busy to 2910d94caffSDavid Greenman * block all other accesses. Also, leave the paging in 2920d94caffSDavid Greenman * progress indicator set so that we don't attempt an object 2930d94caffSDavid Greenman * collapse. 29426f9a767SRodney W. Grimes */ 29526f9a767SRodney W. Grimes if (pageout_status[i] != VM_PAGER_PEND) { 29626f9a767SRodney W. Grimes if (--object->paging_in_progress == 0) 29726f9a767SRodney W. Grimes wakeup((caddr_t) object); 2981ed81ef2SDavid Greenman if ((ms[i]->flags & (PG_REFERENCED|PG_WANTED)) || 299a647a309SDavid Greenman pmap_is_referenced(VM_PAGE_TO_PHYS(ms[i]))) { 30026f9a767SRodney W. Grimes pmap_clear_reference(VM_PAGE_TO_PHYS(ms[i])); 301a647a309SDavid Greenman ms[i]->flags &= ~PG_REFERENCED; 30226f9a767SRodney W. Grimes if (ms[i]->flags & PG_INACTIVE) 30326f9a767SRodney W. Grimes vm_page_activate(ms[i]); 30426f9a767SRodney W. Grimes } 3051ed81ef2SDavid Greenman PAGE_WAKEUP(ms[i]); 30626f9a767SRodney W. Grimes } 30726f9a767SRodney W. Grimes } 30826f9a767SRodney W. Grimes return anyok; 30926f9a767SRodney W. Grimes } 31026f9a767SRodney W. Grimes 31126f9a767SRodney W. Grimes /* 31226f9a767SRodney W. Grimes * vm_pageout_object_deactivate_pages 31326f9a767SRodney W. Grimes * 31426f9a767SRodney W. Grimes * deactivate enough pages to satisfy the inactive target 31526f9a767SRodney W. Grimes * requirements or if vm_page_proc_limit is set, then 31626f9a767SRodney W. Grimes * deactivate all of the pages in the object and its 31726f9a767SRodney W. Grimes * shadows. 31826f9a767SRodney W. Grimes * 31926f9a767SRodney W. Grimes * The object and map must be locked. 32026f9a767SRodney W. Grimes */ 32126f9a767SRodney W. Grimes int 3220d94caffSDavid Greenman vm_pageout_object_deactivate_pages(map, object, count, map_remove_only) 32326f9a767SRodney W. Grimes vm_map_t map; 32426f9a767SRodney W. Grimes vm_object_t object; 32526f9a767SRodney W. Grimes int count; 3260d94caffSDavid Greenman int map_remove_only; 32726f9a767SRodney W. Grimes { 32826f9a767SRodney W. Grimes register vm_page_t p, next; 32926f9a767SRodney W. Grimes int rcount; 33026f9a767SRodney W. Grimes int dcount; 33126f9a767SRodney W. Grimes 33226f9a767SRodney W. Grimes dcount = 0; 33326f9a767SRodney W. Grimes if (count == 0) 33426f9a767SRodney W. Grimes count = 1; 33526f9a767SRodney W. Grimes 3368f895206SDavid Greenman if (object->pager && (object->pager->pg_type == PG_DEVICE)) 3378f895206SDavid Greenman return 0; 3388f895206SDavid Greenman 3392fe6e4d7SDavid Greenman if (object->shadow) { 3402fe6e4d7SDavid Greenman if (object->shadow->ref_count == 1) 3410d94caffSDavid Greenman dcount += vm_pageout_object_deactivate_pages(map, object->shadow, count / 2 + 1, map_remove_only); 3420d94caffSDavid Greenman else 3436d40c3d3SDavid Greenman vm_pageout_object_deactivate_pages(map, object->shadow, count, 1); 3442fe6e4d7SDavid Greenman } 3450d94caffSDavid Greenman if (object->paging_in_progress || !vm_object_lock_try(object)) 34626f9a767SRodney W. Grimes return dcount; 34726f9a767SRodney W. Grimes 34826f9a767SRodney W. Grimes /* 34926f9a767SRodney W. Grimes * scan the objects entire memory queue 35026f9a767SRodney W. Grimes */ 35126f9a767SRodney W. Grimes rcount = object->resident_page_count; 35226f9a767SRodney W. Grimes p = object->memq.tqh_first; 35326f9a767SRodney W. Grimes while (p && (rcount-- > 0)) { 35426f9a767SRodney W. Grimes next = p->listq.tqe_next; 355a58d1fa1SDavid Greenman cnt.v_pdpages++; 35626f9a767SRodney W. Grimes vm_page_lock_queues(); 3570d94caffSDavid Greenman if (p->wire_count != 0 || 3580d94caffSDavid Greenman p->hold_count != 0 || 3590d94caffSDavid Greenman p->bmapped != 0 || 3600d94caffSDavid Greenman p->busy != 0 || 3610d94caffSDavid Greenman !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) { 3620d94caffSDavid Greenman p = next; 3630d94caffSDavid Greenman continue; 3640d94caffSDavid Greenman } 36526f9a767SRodney W. Grimes /* 3660d94caffSDavid Greenman * if a page is active, not wired and is in the processes 3670d94caffSDavid Greenman * pmap, then deactivate the page. 36826f9a767SRodney W. Grimes */ 3690d94caffSDavid Greenman if ((p->flags & (PG_ACTIVE | PG_BUSY)) == PG_ACTIVE) { 370a647a309SDavid Greenman if (!pmap_is_referenced(VM_PAGE_TO_PHYS(p)) && 3711ed81ef2SDavid Greenman (p->flags & (PG_REFERENCED|PG_WANTED)) == 0) { 37226f9a767SRodney W. Grimes p->act_count -= min(p->act_count, ACT_DECLINE); 37326f9a767SRodney W. Grimes /* 3740d94caffSDavid Greenman * if the page act_count is zero -- then we 3750d94caffSDavid Greenman * deactivate 37626f9a767SRodney W. Grimes */ 37726f9a767SRodney W. Grimes if (!p->act_count) { 3780d94caffSDavid Greenman if (!map_remove_only) 37926f9a767SRodney W. Grimes vm_page_deactivate(p); 38026f9a767SRodney W. Grimes pmap_page_protect(VM_PAGE_TO_PHYS(p), 38126f9a767SRodney W. Grimes VM_PROT_NONE); 38226f9a767SRodney W. Grimes /* 3830d94caffSDavid Greenman * else if on the next go-around we 3840d94caffSDavid Greenman * will deactivate the page we need to 3850d94caffSDavid Greenman * place the page on the end of the 3860d94caffSDavid Greenman * queue to age the other pages in 3870d94caffSDavid Greenman * memory. 38826f9a767SRodney W. Grimes */ 38926f9a767SRodney W. Grimes } else { 39026f9a767SRodney W. Grimes TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 39126f9a767SRodney W. Grimes TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 39226f9a767SRodney W. Grimes TAILQ_REMOVE(&object->memq, p, listq); 39326f9a767SRodney W. Grimes TAILQ_INSERT_TAIL(&object->memq, p, listq); 39426f9a767SRodney W. Grimes } 39526f9a767SRodney W. Grimes /* 39626f9a767SRodney W. Grimes * see if we are done yet 39726f9a767SRodney W. Grimes */ 39826f9a767SRodney W. Grimes if (p->flags & PG_INACTIVE) { 39926f9a767SRodney W. Grimes --count; 40026f9a767SRodney W. Grimes ++dcount; 40126f9a767SRodney W. Grimes if (count <= 0 && 40226f9a767SRodney W. Grimes cnt.v_inactive_count > cnt.v_inactive_target) { 40326f9a767SRodney W. Grimes vm_page_unlock_queues(); 4040d94caffSDavid Greenman vm_object_unlock(object); 40526f9a767SRodney W. Grimes return dcount; 40626f9a767SRodney W. Grimes } 40726f9a767SRodney W. Grimes } 40826f9a767SRodney W. Grimes } else { 40926f9a767SRodney W. Grimes /* 41026f9a767SRodney W. Grimes * Move the page to the bottom of the queue. 41126f9a767SRodney W. Grimes */ 41226f9a767SRodney W. Grimes pmap_clear_reference(VM_PAGE_TO_PHYS(p)); 413a647a309SDavid Greenman p->flags &= ~PG_REFERENCED; 41426f9a767SRodney W. Grimes if (p->act_count < ACT_MAX) 41526f9a767SRodney W. Grimes p->act_count += ACT_ADVANCE; 41626f9a767SRodney W. Grimes 41726f9a767SRodney W. Grimes TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 41826f9a767SRodney W. Grimes TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 41926f9a767SRodney W. Grimes TAILQ_REMOVE(&object->memq, p, listq); 42026f9a767SRodney W. Grimes TAILQ_INSERT_TAIL(&object->memq, p, listq); 42126f9a767SRodney W. Grimes } 4220d94caffSDavid Greenman } else if ((p->flags & (PG_INACTIVE | PG_BUSY)) == PG_INACTIVE) { 4230d94caffSDavid Greenman pmap_page_protect(VM_PAGE_TO_PHYS(p), 4240d94caffSDavid Greenman VM_PROT_NONE); 42526f9a767SRodney W. Grimes } 42626f9a767SRodney W. Grimes vm_page_unlock_queues(); 42726f9a767SRodney W. Grimes p = next; 42826f9a767SRodney W. Grimes } 4290d94caffSDavid Greenman vm_object_unlock(object); 43026f9a767SRodney W. Grimes return dcount; 43126f9a767SRodney W. Grimes } 43226f9a767SRodney W. Grimes 43326f9a767SRodney W. Grimes 43426f9a767SRodney W. Grimes /* 43526f9a767SRodney W. Grimes * deactivate some number of pages in a map, try to do it fairly, but 43626f9a767SRodney W. Grimes * that is really hard to do. 43726f9a767SRodney W. Grimes */ 43826f9a767SRodney W. Grimes 43926f9a767SRodney W. Grimes void 44026f9a767SRodney W. Grimes vm_pageout_map_deactivate_pages(map, entry, count, freeer) 44126f9a767SRodney W. Grimes vm_map_t map; 44226f9a767SRodney W. Grimes vm_map_entry_t entry; 44326f9a767SRodney W. Grimes int *count; 44426f9a767SRodney W. Grimes int (*freeer) (vm_map_t, vm_object_t, int); 44526f9a767SRodney W. Grimes { 44626f9a767SRodney W. Grimes vm_map_t tmpm; 44726f9a767SRodney W. Grimes vm_map_entry_t tmpe; 44826f9a767SRodney W. Grimes vm_object_t obj; 4490d94caffSDavid Greenman 45026f9a767SRodney W. Grimes if (*count <= 0) 45126f9a767SRodney W. Grimes return; 45226f9a767SRodney W. Grimes vm_map_reference(map); 45326f9a767SRodney W. Grimes if (!lock_try_read(&map->lock)) { 45426f9a767SRodney W. Grimes vm_map_deallocate(map); 45526f9a767SRodney W. Grimes return; 45626f9a767SRodney W. Grimes } 45726f9a767SRodney W. Grimes if (entry == 0) { 45826f9a767SRodney W. Grimes tmpe = map->header.next; 45926f9a767SRodney W. Grimes while (tmpe != &map->header && *count > 0) { 4600d94caffSDavid Greenman vm_pageout_map_deactivate_pages(map, tmpe, count, freeer, 0); 46126f9a767SRodney W. Grimes tmpe = tmpe->next; 46226f9a767SRodney W. Grimes }; 46326f9a767SRodney W. Grimes } else if (entry->is_sub_map || entry->is_a_map) { 46426f9a767SRodney W. Grimes tmpm = entry->object.share_map; 46526f9a767SRodney W. Grimes tmpe = tmpm->header.next; 46626f9a767SRodney W. Grimes while (tmpe != &tmpm->header && *count > 0) { 4670d94caffSDavid Greenman vm_pageout_map_deactivate_pages(tmpm, tmpe, count, freeer, 0); 46826f9a767SRodney W. Grimes tmpe = tmpe->next; 46926f9a767SRodney W. Grimes }; 4704e39a515SPoul-Henning Kamp } else if ((obj = entry->object.vm_object) != 0) { 47126f9a767SRodney W. Grimes *count -= (*freeer) (map, obj, *count); 47226f9a767SRodney W. Grimes } 47326f9a767SRodney W. Grimes lock_read_done(&map->lock); 47426f9a767SRodney W. Grimes vm_map_deallocate(map); 47526f9a767SRodney W. Grimes return; 47626f9a767SRodney W. Grimes } 477df8bae1dSRodney W. Grimes 4782fe6e4d7SDavid Greenman void 4790d94caffSDavid Greenman vm_req_vmdaemon() 4800d94caffSDavid Greenman { 4812fe6e4d7SDavid Greenman extern int ticks; 4820d94caffSDavid Greenman static int lastrun = 0; 4830d94caffSDavid Greenman 4842fe6e4d7SDavid Greenman if ((ticks > (lastrun + hz / 10)) || (ticks < lastrun)) { 4852fe6e4d7SDavid Greenman wakeup((caddr_t) &vm_daemon_needed); 4862fe6e4d7SDavid Greenman lastrun = ticks; 4872fe6e4d7SDavid Greenman } 4882fe6e4d7SDavid Greenman } 4892fe6e4d7SDavid Greenman 490df8bae1dSRodney W. Grimes /* 491df8bae1dSRodney W. Grimes * vm_pageout_scan does the dirty work for the pageout daemon. 492df8bae1dSRodney W. Grimes */ 49326f9a767SRodney W. Grimes int 494df8bae1dSRodney W. Grimes vm_pageout_scan() 495df8bae1dSRodney W. Grimes { 49626f9a767SRodney W. Grimes vm_page_t m; 49726f9a767SRodney W. Grimes int page_shortage, maxscan, maxlaunder; 4984e39a515SPoul-Henning Kamp int pages_freed; 49926f9a767SRodney W. Grimes int desired_free; 50026f9a767SRodney W. Grimes vm_page_t next; 5015663e6deSDavid Greenman struct proc *p, *bigproc; 5025663e6deSDavid Greenman vm_offset_t size, bigsize; 503df8bae1dSRodney W. Grimes vm_object_t object; 50426f9a767SRodney W. Grimes int force_wakeup = 0; 5050d94caffSDavid Greenman 5062fe6e4d7SDavid Greenman /* calculate the total cached size */ 5072fe6e4d7SDavid Greenman 5080d94caffSDavid Greenman if ((cnt.v_inactive_count + cnt.v_free_count + cnt.v_cache_count) < 5090d94caffSDavid Greenman (cnt.v_inactive_target + cnt.v_free_min)) { 5102fe6e4d7SDavid Greenman vm_req_vmdaemon(); 5112fe6e4d7SDavid Greenman } 5122fe6e4d7SDavid Greenman /* 5132fe6e4d7SDavid Greenman * now swap processes out if we are in low memory conditions 5142fe6e4d7SDavid Greenman */ 5150d94caffSDavid Greenman if ((cnt.v_free_count <= cnt.v_free_min) && 5160d94caffSDavid Greenman !swap_pager_full && vm_swap_size && vm_pageout_req_swapout == 0) { 5172fe6e4d7SDavid Greenman vm_pageout_req_swapout = 1; 5182fe6e4d7SDavid Greenman vm_req_vmdaemon(); 5192fe6e4d7SDavid Greenman } 52026f9a767SRodney W. Grimes pages_freed = 0; 52126f9a767SRodney W. Grimes desired_free = cnt.v_free_target; 522df8bae1dSRodney W. Grimes 523df8bae1dSRodney W. Grimes /* 5240d94caffSDavid Greenman * Start scanning the inactive queue for pages we can free. We keep 5250d94caffSDavid Greenman * scanning until we have enough free pages or we have scanned through 5260d94caffSDavid Greenman * the entire queue. If we encounter dirty pages, we start cleaning 5270d94caffSDavid Greenman * them. 528df8bae1dSRodney W. Grimes */ 529df8bae1dSRodney W. Grimes 5300d94caffSDavid Greenman maxlaunder = (cnt.v_inactive_target > MAXLAUNDER) ? 5310d94caffSDavid Greenman MAXLAUNDER : cnt.v_inactive_target; 5320d94caffSDavid Greenman 53326f9a767SRodney W. Grimes rescan1: 5341ed81ef2SDavid Greenman maxscan = min(cnt.v_inactive_count, MAXSCAN); 53526f9a767SRodney W. Grimes m = vm_page_queue_inactive.tqh_first; 5361ed81ef2SDavid Greenman while (m && (maxscan-- > 0) && 5371ed81ef2SDavid Greenman ((cnt.v_free_count + cnt.v_cache_count) < desired_free)) { 53826f9a767SRodney W. Grimes vm_page_t next; 539df8bae1dSRodney W. Grimes 540a58d1fa1SDavid Greenman cnt.v_pdpages++; 541df8bae1dSRodney W. Grimes next = m->pageq.tqe_next; 542df8bae1dSRodney W. Grimes 5430d94caffSDavid Greenman #if defined(VM_DIAGNOSE) 54426f9a767SRodney W. Grimes if ((m->flags & PG_INACTIVE) == 0) { 545832f3afdSAndreas Schulz printf("vm_pageout_scan: page not inactive?\n"); 5460d94caffSDavid Greenman break; 547df8bae1dSRodney W. Grimes } 5480d94caffSDavid Greenman #endif 54926f9a767SRodney W. Grimes 55026f9a767SRodney W. Grimes /* 55126f9a767SRodney W. Grimes * dont mess with busy pages 55226f9a767SRodney W. Grimes */ 5530d94caffSDavid Greenman if (m->hold_count || m->busy || (m->flags & PG_BUSY) || 5540d94caffSDavid Greenman m->bmapped != 0) { 5550d94caffSDavid Greenman TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 5560d94caffSDavid Greenman TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 55726f9a767SRodney W. Grimes m = next; 55826f9a767SRodney W. Grimes continue; 55926f9a767SRodney W. Grimes } 5600d94caffSDavid Greenman if (((m->flags & PG_REFERENCED) == 0) && 5610d94caffSDavid Greenman pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { 5622fe6e4d7SDavid Greenman m->flags |= PG_REFERENCED; 5630d94caffSDavid Greenman } 5640d94caffSDavid Greenman if (m->object->ref_count == 0) { 5650d94caffSDavid Greenman m->flags &= ~PG_REFERENCED; 5662fe6e4d7SDavid Greenman pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 5672fe6e4d7SDavid Greenman } 5681ed81ef2SDavid Greenman if ((m->flags & (PG_REFERENCED|PG_WANTED)) != 0) { 569a647a309SDavid Greenman m->flags &= ~PG_REFERENCED; 5700d94caffSDavid Greenman pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 57126f9a767SRodney W. Grimes vm_page_activate(m); 5726d40c3d3SDavid Greenman if (m->act_count < ACT_MAX) 5736d40c3d3SDavid Greenman m->act_count += ACT_ADVANCE; 5740d94caffSDavid Greenman m = next; 5750d94caffSDavid Greenman continue; 5760d94caffSDavid Greenman } 5770d94caffSDavid Greenman vm_page_test_dirty(m); 5780d94caffSDavid Greenman 5790d94caffSDavid Greenman if ((m->dirty & m->valid) == 0) { 5806d40c3d3SDavid Greenman if (m->valid == 0) { 5816d40c3d3SDavid Greenman pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE); 5826d40c3d3SDavid Greenman vm_page_free(m); 5836d40c3d3SDavid Greenman } else if (((cnt.v_free_count + cnt.v_cache_count) < desired_free) || 5846d40c3d3SDavid Greenman (cnt.v_cache_count < cnt.v_cache_min)) { 5850d94caffSDavid Greenman vm_page_cache(m); 5866d40c3d3SDavid Greenman } 5870d94caffSDavid Greenman } else if (maxlaunder > 0) { 5880d94caffSDavid Greenman int written; 5890d94caffSDavid Greenman 5900d94caffSDavid Greenman object = m->object; 591a1f6d91cSDavid Greenman if ((object->flags & OBJ_DEAD) || !vm_object_lock_try(object)) { 5920d94caffSDavid Greenman m = next; 5930d94caffSDavid Greenman continue; 5940d94caffSDavid Greenman } 5950d94caffSDavid Greenman /* 5960d94caffSDavid Greenman * If a page is dirty, then it is either being washed 5970d94caffSDavid Greenman * (but not yet cleaned) or it is still in the 5980d94caffSDavid Greenman * laundry. If it is still in the laundry, then we 5990d94caffSDavid Greenman * start the cleaning operation. 6000d94caffSDavid Greenman */ 6010d94caffSDavid Greenman written = vm_pageout_clean(m, 0); 6020d94caffSDavid Greenman vm_object_unlock(object); 6030d94caffSDavid Greenman 6040d94caffSDavid Greenman if (!next) { 6050d94caffSDavid Greenman break; 6060d94caffSDavid Greenman } 6070d94caffSDavid Greenman maxlaunder -= written; 6080d94caffSDavid Greenman /* 6090d94caffSDavid Greenman * if the next page has been re-activated, start 6100d94caffSDavid Greenman * scanning again 6110d94caffSDavid Greenman */ 6120d94caffSDavid Greenman if ((next->flags & PG_INACTIVE) == 0) { 6130d94caffSDavid Greenman goto rescan1; 6140d94caffSDavid Greenman } 615df8bae1dSRodney W. Grimes } 61626f9a767SRodney W. Grimes m = next; 61726f9a767SRodney W. Grimes } 61826f9a767SRodney W. Grimes 619df8bae1dSRodney W. Grimes /* 6200d94caffSDavid Greenman * Compute the page shortage. If we are still very low on memory be 6210d94caffSDavid Greenman * sure that we will move a minimal amount of pages from active to 6220d94caffSDavid Greenman * inactive. 623df8bae1dSRodney W. Grimes */ 624df8bae1dSRodney W. Grimes 62526f9a767SRodney W. Grimes page_shortage = cnt.v_inactive_target - 6260d94caffSDavid Greenman (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 62726f9a767SRodney W. Grimes if (page_shortage <= 0) { 62826f9a767SRodney W. Grimes if (pages_freed == 0) { 6290d94caffSDavid Greenman if ((cnt.v_free_count + cnt.v_cache_count) < desired_free) { 6300d94caffSDavid Greenman page_shortage = 6310d94caffSDavid Greenman desired_free - (cnt.v_free_count + cnt.v_cache_count); 63226f9a767SRodney W. Grimes } 633df8bae1dSRodney W. Grimes } 6346d40c3d3SDavid Greenman if( (page_shortage <= 0) && (cnt.v_free_count < cnt.v_free_min)) 6356d40c3d3SDavid Greenman page_shortage = 1; 636df8bae1dSRodney W. Grimes } 6371ed81ef2SDavid Greenman maxscan = min(cnt.v_active_count, MAXSCAN); 63826f9a767SRodney W. Grimes m = vm_page_queue_active.tqh_first; 6391ed81ef2SDavid Greenman while (m && (maxscan-- > 0) && (page_shortage > 0)) { 64026f9a767SRodney W. Grimes 641a58d1fa1SDavid Greenman cnt.v_pdpages++; 64226f9a767SRodney W. Grimes next = m->pageq.tqe_next; 643df8bae1dSRodney W. Grimes 644df8bae1dSRodney W. Grimes /* 64526f9a767SRodney W. Grimes * Don't deactivate pages that are busy. 646df8bae1dSRodney W. Grimes */ 647a647a309SDavid Greenman if ((m->busy != 0) || 6480d94caffSDavid Greenman (m->flags & PG_BUSY) || 6490d94caffSDavid Greenman (m->hold_count != 0) || 6500d94caffSDavid Greenman (m->bmapped != 0)) { 6516d40c3d3SDavid Greenman TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 6526d40c3d3SDavid Greenman TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 65326f9a767SRodney W. Grimes m = next; 65426f9a767SRodney W. Grimes continue; 655df8bae1dSRodney W. Grimes } 6561ed81ef2SDavid Greenman if (m->object->ref_count && ((m->flags & (PG_REFERENCED|PG_WANTED)) || 6570d94caffSDavid Greenman pmap_is_referenced(VM_PAGE_TO_PHYS(m)))) { 6580d94caffSDavid Greenman int s; 659df8bae1dSRodney W. Grimes 660df8bae1dSRodney W. Grimes pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 661a647a309SDavid Greenman m->flags &= ~PG_REFERENCED; 6620d94caffSDavid Greenman if (m->act_count < ACT_MAX) { 66326f9a767SRodney W. Grimes m->act_count += ACT_ADVANCE; 6640d94caffSDavid Greenman } 66526f9a767SRodney W. Grimes TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 66626f9a767SRodney W. Grimes TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 6670d94caffSDavid Greenman s = splhigh(); 66826f9a767SRodney W. Grimes TAILQ_REMOVE(&m->object->memq, m, listq); 66926f9a767SRodney W. Grimes TAILQ_INSERT_TAIL(&m->object->memq, m, listq); 6700d94caffSDavid Greenman splx(s); 67126f9a767SRodney W. Grimes } else { 6726d40c3d3SDavid Greenman m->flags &= ~PG_REFERENCED; 6736d40c3d3SDavid Greenman pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 67426f9a767SRodney W. Grimes m->act_count -= min(m->act_count, ACT_DECLINE); 675df8bae1dSRodney W. Grimes 676df8bae1dSRodney W. Grimes /* 67726f9a767SRodney W. Grimes * if the page act_count is zero -- then we deactivate 678df8bae1dSRodney W. Grimes */ 6790d94caffSDavid Greenman if (!m->act_count && (page_shortage > 0)) { 6800d94caffSDavid Greenman if (m->object->ref_count == 0) { 6810d94caffSDavid Greenman vm_page_test_dirty(m); 6820d94caffSDavid Greenman --page_shortage; 6830d94caffSDavid Greenman if ((m->dirty & m->valid) == 0) { 6840d94caffSDavid Greenman m->act_count = 0; 6850d94caffSDavid Greenman vm_page_cache(m); 6860d94caffSDavid Greenman } else { 6870d94caffSDavid Greenman vm_page_deactivate(m); 6880d94caffSDavid Greenman } 6890d94caffSDavid Greenman } else { 69026f9a767SRodney W. Grimes vm_page_deactivate(m); 69126f9a767SRodney W. Grimes --page_shortage; 6920d94caffSDavid Greenman } 6936d40c3d3SDavid Greenman } else if (m->act_count) { 69426f9a767SRodney W. Grimes TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 69526f9a767SRodney W. Grimes TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 696df8bae1dSRodney W. Grimes } 697df8bae1dSRodney W. Grimes } 69826f9a767SRodney W. Grimes m = next; 69926f9a767SRodney W. Grimes } 700df8bae1dSRodney W. Grimes 701df8bae1dSRodney W. Grimes /* 7020d94caffSDavid Greenman * We try to maintain some *really* free pages, this allows interrupt 7030d94caffSDavid Greenman * code to be guaranteed space. 704df8bae1dSRodney W. Grimes */ 705a1f6d91cSDavid Greenman while (cnt.v_free_count < cnt.v_free_reserved) { 7060d94caffSDavid Greenman m = vm_page_queue_cache.tqh_first; 7070d94caffSDavid Greenman if (!m) 7080d94caffSDavid Greenman break; 7090d94caffSDavid Greenman vm_page_free(m); 71026f9a767SRodney W. Grimes } 7115663e6deSDavid Greenman 7125663e6deSDavid Greenman /* 7130d94caffSDavid Greenman * make sure that we have swap space -- if we are low on memory and 7140d94caffSDavid Greenman * swap -- then kill the biggest process. 7155663e6deSDavid Greenman */ 7165663e6deSDavid Greenman if ((vm_swap_size == 0 || swap_pager_full) && 7170d94caffSDavid Greenman ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) { 7185663e6deSDavid Greenman bigproc = NULL; 7195663e6deSDavid Greenman bigsize = 0; 7205663e6deSDavid Greenman for (p = (struct proc *) allproc; p != NULL; p = p->p_next) { 7215663e6deSDavid Greenman /* 7225663e6deSDavid Greenman * if this is a system process, skip it 7235663e6deSDavid Greenman */ 72479221631SDavid Greenman if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || 72579221631SDavid Greenman ((p->p_pid < 48) && (vm_swap_size != 0))) { 7265663e6deSDavid Greenman continue; 7275663e6deSDavid Greenman } 7285663e6deSDavid Greenman /* 7295663e6deSDavid Greenman * if the process is in a non-running type state, 7305663e6deSDavid Greenman * don't touch it. 7315663e6deSDavid Greenman */ 7325663e6deSDavid Greenman if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 7335663e6deSDavid Greenman continue; 7345663e6deSDavid Greenman } 7355663e6deSDavid Greenman /* 7365663e6deSDavid Greenman * get the process size 7375663e6deSDavid Greenman */ 7385663e6deSDavid Greenman size = p->p_vmspace->vm_pmap.pm_stats.resident_count; 7395663e6deSDavid Greenman /* 7405663e6deSDavid Greenman * if the this process is bigger than the biggest one 7415663e6deSDavid Greenman * remember it. 7425663e6deSDavid Greenman */ 7435663e6deSDavid Greenman if (size > bigsize) { 7445663e6deSDavid Greenman bigproc = p; 7455663e6deSDavid Greenman bigsize = size; 7465663e6deSDavid Greenman } 7475663e6deSDavid Greenman } 7485663e6deSDavid Greenman if (bigproc != NULL) { 7495663e6deSDavid Greenman printf("Process %lu killed by vm_pageout -- out of swap\n", (u_long) bigproc->p_pid); 7505663e6deSDavid Greenman psignal(bigproc, SIGKILL); 7515663e6deSDavid Greenman bigproc->p_estcpu = 0; 7525663e6deSDavid Greenman bigproc->p_nice = PRIO_MIN; 7535663e6deSDavid Greenman resetpriority(bigproc); 7545663e6deSDavid Greenman wakeup((caddr_t) &cnt.v_free_count); 7555663e6deSDavid Greenman } 7565663e6deSDavid Greenman } 75726f9a767SRodney W. Grimes vm_page_pagesfreed += pages_freed; 75826f9a767SRodney W. Grimes return force_wakeup; 75926f9a767SRodney W. Grimes } 76026f9a767SRodney W. Grimes 761df8bae1dSRodney W. Grimes /* 762df8bae1dSRodney W. Grimes * vm_pageout is the high level pageout daemon. 763df8bae1dSRodney W. Grimes */ 76426f9a767SRodney W. Grimes void 76526f9a767SRodney W. Grimes vm_pageout() 766df8bae1dSRodney W. Grimes { 767df8bae1dSRodney W. Grimes (void) spl0(); 768df8bae1dSRodney W. Grimes 769df8bae1dSRodney W. Grimes /* 770df8bae1dSRodney W. Grimes * Initialize some paging parameters. 771df8bae1dSRodney W. Grimes */ 772df8bae1dSRodney W. Grimes 7730d94caffSDavid Greenman if (cnt.v_page_count > 1024) 7740d94caffSDavid Greenman cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 7750d94caffSDavid Greenman else 7760d94caffSDavid Greenman cnt.v_free_min = 4; 777ed74321bSDavid Greenman /* 7780d94caffSDavid Greenman * free_reserved needs to include enough for the largest swap pager 7790d94caffSDavid Greenman * structures plus enough for any pv_entry structs when paging. 780ed74321bSDavid Greenman */ 7816d40c3d3SDavid Greenman cnt.v_pageout_free_min = 6 + cnt.v_page_count / 1024; 7820d94caffSDavid Greenman cnt.v_free_reserved = cnt.v_pageout_free_min + 2; 7830d94caffSDavid Greenman cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved; 7840d94caffSDavid Greenman cnt.v_inactive_target = cnt.v_free_count / 4; 7850d94caffSDavid Greenman if (cnt.v_inactive_target > 512) 7860d94caffSDavid Greenman cnt.v_inactive_target = 512; 78726f9a767SRodney W. Grimes cnt.v_free_min += cnt.v_free_reserved; 7880d94caffSDavid Greenman if (cnt.v_page_count > 1024) { 7890d94caffSDavid Greenman cnt.v_cache_max = (cnt.v_free_count - 1024) / 2; 7900d94caffSDavid Greenman cnt.v_cache_min = (cnt.v_free_count - 1024) / 20; 7910d94caffSDavid Greenman } else { 7920d94caffSDavid Greenman cnt.v_cache_min = 0; 7930d94caffSDavid Greenman cnt.v_cache_max = 0; 7940d94caffSDavid Greenman } 795df8bae1dSRodney W. Grimes 796df8bae1dSRodney W. Grimes /* XXX does not really belong here */ 797df8bae1dSRodney W. Grimes if (vm_page_max_wired == 0) 798df8bae1dSRodney W. Grimes vm_page_max_wired = cnt.v_free_count / 3; 799df8bae1dSRodney W. Grimes 80026f9a767SRodney W. Grimes 80126f9a767SRodney W. Grimes (void) swap_pager_alloc(0, 0, 0, 0); 802df8bae1dSRodney W. Grimes /* 8030d94caffSDavid Greenman * The pageout daemon is never done, so loop forever. 804df8bae1dSRodney W. Grimes */ 805df8bae1dSRodney W. Grimes while (TRUE) { 80626f9a767SRodney W. Grimes tsleep((caddr_t) &vm_pages_needed, PVM, "psleep", 0); 807a58d1fa1SDavid Greenman cnt.v_pdwakeups++; 808df8bae1dSRodney W. Grimes vm_pager_sync(); 8090d94caffSDavid Greenman vm_pageout_scan(); 81026f9a767SRodney W. Grimes vm_pager_sync(); 81126f9a767SRodney W. Grimes wakeup((caddr_t) &cnt.v_free_count); 81226f9a767SRodney W. Grimes wakeup((caddr_t) kmem_map); 813df8bae1dSRodney W. Grimes } 814df8bae1dSRodney W. Grimes } 81526f9a767SRodney W. Grimes 8162fe6e4d7SDavid Greenman void 817d2fc5315SPoul-Henning Kamp vm_daemon __P((void)) 8180d94caffSDavid Greenman { 8192fe6e4d7SDavid Greenman vm_object_t object; 8202fe6e4d7SDavid Greenman struct proc *p; 8210d94caffSDavid Greenman 8222fe6e4d7SDavid Greenman while (TRUE) { 8232fe6e4d7SDavid Greenman tsleep((caddr_t) &vm_daemon_needed, PUSER, "psleep", 0); 8242fe6e4d7SDavid Greenman swapout_threads(); 8252fe6e4d7SDavid Greenman /* 8260d94caffSDavid Greenman * scan the processes for exceeding their rlimits or if 8270d94caffSDavid Greenman * process is swapped out -- deactivate pages 8282fe6e4d7SDavid Greenman */ 8292fe6e4d7SDavid Greenman 8302fe6e4d7SDavid Greenman for (p = (struct proc *) allproc; p != NULL; p = p->p_next) { 8312fe6e4d7SDavid Greenman int overage; 8322fe6e4d7SDavid Greenman quad_t limit; 8332fe6e4d7SDavid Greenman vm_offset_t size; 8342fe6e4d7SDavid Greenman 8352fe6e4d7SDavid Greenman /* 8362fe6e4d7SDavid Greenman * if this is a system process or if we have already 8372fe6e4d7SDavid Greenman * looked at this process, skip it. 8382fe6e4d7SDavid Greenman */ 8392fe6e4d7SDavid Greenman if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 8402fe6e4d7SDavid Greenman continue; 8412fe6e4d7SDavid Greenman } 8422fe6e4d7SDavid Greenman /* 8432fe6e4d7SDavid Greenman * if the process is in a non-running type state, 8442fe6e4d7SDavid Greenman * don't touch it. 8452fe6e4d7SDavid Greenman */ 8462fe6e4d7SDavid Greenman if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 8472fe6e4d7SDavid Greenman continue; 8482fe6e4d7SDavid Greenman } 8492fe6e4d7SDavid Greenman /* 8502fe6e4d7SDavid Greenman * get a limit 8512fe6e4d7SDavid Greenman */ 8522fe6e4d7SDavid Greenman limit = qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 8532fe6e4d7SDavid Greenman p->p_rlimit[RLIMIT_RSS].rlim_max); 8542fe6e4d7SDavid Greenman 8552fe6e4d7SDavid Greenman /* 8560d94caffSDavid Greenman * let processes that are swapped out really be 8570d94caffSDavid Greenman * swapped out set the limit to nothing (will force a 8580d94caffSDavid Greenman * swap-out.) 8592fe6e4d7SDavid Greenman */ 8602fe6e4d7SDavid Greenman if ((p->p_flag & P_INMEM) == 0) 8610d94caffSDavid Greenman limit = 0; /* XXX */ 8622fe6e4d7SDavid Greenman 8632fe6e4d7SDavid Greenman size = p->p_vmspace->vm_pmap.pm_stats.resident_count * NBPG; 8642fe6e4d7SDavid Greenman if (limit >= 0 && size >= limit) { 8652fe6e4d7SDavid Greenman overage = (size - limit) / NBPG; 8662fe6e4d7SDavid Greenman vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map, 8672fe6e4d7SDavid Greenman (vm_map_entry_t) 0, &overage, vm_pageout_object_deactivate_pages); 8682fe6e4d7SDavid Greenman } 8692fe6e4d7SDavid Greenman } 8702fe6e4d7SDavid Greenman } 8712fe6e4d7SDavid Greenman 8720d94caffSDavid Greenman /* 8730d94caffSDavid Greenman * we remove cached objects that have no RSS... 8740d94caffSDavid Greenman */ 8750d94caffSDavid Greenman restart: 8762fe6e4d7SDavid Greenman vm_object_cache_lock(); 8772fe6e4d7SDavid Greenman object = vm_object_cached_list.tqh_first; 8782fe6e4d7SDavid Greenman while (object) { 8792fe6e4d7SDavid Greenman vm_object_cache_unlock(); 8802fe6e4d7SDavid Greenman /* 8812fe6e4d7SDavid Greenman * if there are no resident pages -- get rid of the object 8822fe6e4d7SDavid Greenman */ 8832fe6e4d7SDavid Greenman if (object->resident_page_count == 0) { 8842fe6e4d7SDavid Greenman if (object != vm_object_lookup(object->pager)) 8852fe6e4d7SDavid Greenman panic("vm_object_cache_trim: I'm sooo confused."); 8862fe6e4d7SDavid Greenman pager_cache(object, FALSE); 8872fe6e4d7SDavid Greenman goto restart; 8882fe6e4d7SDavid Greenman } 8892fe6e4d7SDavid Greenman object = object->cached_list.tqe_next; 8902fe6e4d7SDavid Greenman vm_object_cache_lock(); 8912fe6e4d7SDavid Greenman } 8922fe6e4d7SDavid Greenman vm_object_cache_unlock(); 8932fe6e4d7SDavid Greenman } 894