1df8bae1dSRodney W. Grimes /* 226f9a767SRodney W. Grimes * Copyright (c) 1991 Regents of the University of California. 326f9a767SRodney W. Grimes * All rights reserved. 426f9a767SRodney W. Grimes * Copyright (c) 1994 John S. Dyson 526f9a767SRodney W. Grimes * All rights reserved. 626f9a767SRodney W. Grimes * Copyright (c) 1994 David Greenman 726f9a767SRodney W. Grimes * All rights reserved. 8df8bae1dSRodney W. Grimes * 9df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 10df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 11df8bae1dSRodney W. Grimes * 12df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 13df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 14df8bae1dSRodney W. Grimes * are met: 15df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 17df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 18df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 19df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 20df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 21df8bae1dSRodney W. Grimes * must display the following acknowledgement: 22df8bae1dSRodney W. Grimes * This product includes software developed by the University of 23df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 24df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 25df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 26df8bae1dSRodney W. Grimes * without specific prior written permission. 27df8bae1dSRodney W. Grimes * 28df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38df8bae1dSRodney W. Grimes * SUCH DAMAGE. 39df8bae1dSRodney W. Grimes * 403c4dd356SDavid Greenman * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41df8bae1dSRodney W. Grimes * 42df8bae1dSRodney W. Grimes * 43df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44df8bae1dSRodney W. Grimes * All rights reserved. 45df8bae1dSRodney W. Grimes * 46df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47df8bae1dSRodney W. Grimes * 48df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 49df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 50df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 51df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 52df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 53df8bae1dSRodney W. Grimes * 54df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57df8bae1dSRodney W. Grimes * 58df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 59df8bae1dSRodney W. Grimes * 60df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61df8bae1dSRodney W. Grimes * School of Computer Science 62df8bae1dSRodney W. Grimes * Carnegie Mellon University 63df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 64df8bae1dSRodney W. Grimes * 65df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 66df8bae1dSRodney W. Grimes * rights to redistribute these changes. 6726f9a767SRodney W. Grimes * 68f919ebdeSDavid Greenman * $Id: vm_pageout.c,v 1.39 1995/02/25 18:39:04 bde Exp $ 69df8bae1dSRodney W. Grimes */ 70df8bae1dSRodney W. Grimes 71df8bae1dSRodney W. Grimes /* 72df8bae1dSRodney W. Grimes * The proverbial page-out daemon. 73df8bae1dSRodney W. Grimes */ 74df8bae1dSRodney W. Grimes 75df8bae1dSRodney W. Grimes #include <sys/param.h> 7626f9a767SRodney W. Grimes #include <sys/systm.h> 7726f9a767SRodney W. Grimes #include <sys/proc.h> 7826f9a767SRodney W. Grimes #include <sys/resourcevar.h> 7926f9a767SRodney W. Grimes #include <sys/malloc.h> 800d94caffSDavid Greenman #include <sys/kernel.h> 81d2fc5315SPoul-Henning Kamp #include <sys/signalvar.h> 82df8bae1dSRodney W. Grimes 83df8bae1dSRodney W. Grimes #include <vm/vm.h> 84df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 85df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h> 8605f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h> 87df8bae1dSRodney W. Grimes 8826f9a767SRodney W. Grimes extern vm_map_t kmem_map; 89df8bae1dSRodney W. Grimes int vm_pages_needed; /* Event on which pageout daemon sleeps */ 9026f9a767SRodney W. Grimes int vm_pagescanner; /* Event on which pagescanner sleeps */ 9126f9a767SRodney W. Grimes 9226f9a767SRodney W. Grimes int vm_pageout_pages_needed = 0;/* flag saying that the pageout daemon needs pages */ 9326f9a767SRodney W. Grimes int vm_page_pagesfreed; 9426f9a767SRodney W. Grimes 9526f9a767SRodney W. Grimes extern int npendingio; 9626f9a767SRodney W. Grimes int vm_pageout_proc_limit; 972fe6e4d7SDavid Greenman int vm_pageout_req_swapout; 982fe6e4d7SDavid Greenman int vm_daemon_needed; 9926f9a767SRodney W. Grimes extern int nswiodone; 10026f9a767SRodney W. Grimes extern int swap_pager_full; 1015663e6deSDavid Greenman extern int vm_swap_size; 10226f9a767SRodney W. Grimes extern int swap_pager_ready(); 10326f9a767SRodney W. Grimes 1041ed81ef2SDavid Greenman #define MAXSCAN 1024 /* maximum number of pages to scan in queues */ 10526f9a767SRodney W. Grimes 1060d94caffSDavid Greenman #define MAXLAUNDER (cnt.v_page_count > 1800 ? 32 : 16) 10726f9a767SRodney W. Grimes 10826f9a767SRodney W. Grimes #define VM_PAGEOUT_PAGE_COUNT 8 109bbc0ec52SDavid Greenman int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 11026f9a767SRodney W. Grimes int vm_pageout_req_do_stats; 111df8bae1dSRodney W. Grimes 112df8bae1dSRodney W. Grimes int vm_page_max_wired = 0; /* XXX max # of wired pages system-wide */ 113df8bae1dSRodney W. Grimes 11426f9a767SRodney W. Grimes /* 11526f9a767SRodney W. Grimes * vm_pageout_clean: 11626f9a767SRodney W. Grimes * cleans a vm_page 11726f9a767SRodney W. Grimes */ 11826f9a767SRodney W. Grimes int 11926f9a767SRodney W. Grimes vm_pageout_clean(m, sync) 12026f9a767SRodney W. Grimes register vm_page_t m; 12126f9a767SRodney W. Grimes int sync; 12226f9a767SRodney W. Grimes { 12326f9a767SRodney W. Grimes /* 1240d94caffSDavid Greenman * Clean the page and remove it from the laundry. 12526f9a767SRodney W. Grimes * 1260d94caffSDavid Greenman * We set the busy bit to cause potential page faults on this page to 12726f9a767SRodney W. Grimes * block. 12826f9a767SRodney W. Grimes * 1290d94caffSDavid Greenman * And we set pageout-in-progress to keep the object from disappearing 1300d94caffSDavid Greenman * during pageout. This guarantees that the page won't move from the 1310d94caffSDavid Greenman * inactive queue. (However, any other page on the inactive queue may 1320d94caffSDavid Greenman * move!) 13326f9a767SRodney W. Grimes */ 13426f9a767SRodney W. Grimes 13526f9a767SRodney W. Grimes register vm_object_t object; 13626f9a767SRodney W. Grimes register vm_pager_t pager; 13726f9a767SRodney W. Grimes int pageout_status[VM_PAGEOUT_PAGE_COUNT]; 13826f9a767SRodney W. Grimes vm_page_t ms[VM_PAGEOUT_PAGE_COUNT]; 13926f9a767SRodney W. Grimes int pageout_count; 14026f9a767SRodney W. Grimes int anyok = 0; 14126f9a767SRodney W. Grimes int i; 14226f9a767SRodney W. Grimes vm_offset_t offset = m->offset; 14326f9a767SRodney W. Grimes 14426f9a767SRodney W. Grimes object = m->object; 14526f9a767SRodney W. Grimes if (!object) { 14626f9a767SRodney W. Grimes printf("pager: object missing\n"); 14726f9a767SRodney W. Grimes return 0; 14826f9a767SRodney W. Grimes } 1490d94caffSDavid Greenman if (!object->pager && (object->flags & OBJ_INTERNAL) == 0) { 1500d94caffSDavid Greenman printf("pager: non internal obj without pager\n"); 1510d94caffSDavid Greenman } 15226f9a767SRodney W. Grimes /* 1530d94caffSDavid Greenman * Try to collapse the object before making a pager for it. We must 1540d94caffSDavid Greenman * unlock the page queues first. We try to defer the creation of a 1550d94caffSDavid Greenman * pager until all shadows are not paging. This allows 1560d94caffSDavid Greenman * vm_object_collapse to work better and helps control swap space 1570d94caffSDavid Greenman * size. (J. Dyson 11 Nov 93) 15826f9a767SRodney W. Grimes */ 15926f9a767SRodney W. Grimes 16026f9a767SRodney W. Grimes if (!object->pager && 1610d94caffSDavid Greenman (cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min) 16226f9a767SRodney W. Grimes return 0; 16326f9a767SRodney W. Grimes 1640d94caffSDavid Greenman if ((!sync && m->bmapped != 0 && m->hold_count != 0) || 1650d94caffSDavid Greenman ((m->busy != 0) || (m->flags & PG_BUSY))) 1660d94caffSDavid Greenman return 0; 1670d94caffSDavid Greenman 1680d94caffSDavid Greenman if (!sync && object->shadow) { 16926f9a767SRodney W. Grimes vm_object_collapse(object); 17026f9a767SRodney W. Grimes } 17126f9a767SRodney W. Grimes pageout_count = 1; 17226f9a767SRodney W. Grimes ms[0] = m; 17326f9a767SRodney W. Grimes 1744e39a515SPoul-Henning Kamp pager = object->pager; 1754e39a515SPoul-Henning Kamp if (pager) { 176bbc0ec52SDavid Greenman for (i = 1; i < vm_pageout_page_count; i++) { 1770d94caffSDavid Greenman vm_page_t mt; 1780d94caffSDavid Greenman 1790d94caffSDavid Greenman ms[i] = mt = vm_page_lookup(object, offset + i * NBPG); 1800d94caffSDavid Greenman if (mt) { 1810d94caffSDavid Greenman vm_page_test_dirty(mt); 1820d94caffSDavid Greenman /* 1830d94caffSDavid Greenman * we can cluster ONLY if: ->> the page is NOT 1840d94caffSDavid Greenman * busy, and is NOT clean the page is not 1850d94caffSDavid Greenman * wired, busy, held, or mapped into a buffer. 1860d94caffSDavid Greenman * and one of the following: 1) The page is 1870d94caffSDavid Greenman * inactive, or a seldom used active page. 2) 1880d94caffSDavid Greenman * or we force the issue. 1890d94caffSDavid Greenman */ 1900d94caffSDavid Greenman if ((mt->dirty & mt->valid) != 0 1910d94caffSDavid Greenman && (((mt->flags & (PG_BUSY | PG_INACTIVE)) == PG_INACTIVE) 1920d94caffSDavid Greenman || sync == VM_PAGEOUT_FORCE) 1930d94caffSDavid Greenman && (mt->wire_count == 0) 1940d94caffSDavid Greenman && (mt->busy == 0) 1950d94caffSDavid Greenman && (mt->hold_count == 0) 1960d94caffSDavid Greenman && (mt->bmapped == 0)) 19726f9a767SRodney W. Grimes pageout_count++; 19826f9a767SRodney W. Grimes else 19926f9a767SRodney W. Grimes break; 20026f9a767SRodney W. Grimes } else 20126f9a767SRodney W. Grimes break; 20226f9a767SRodney W. Grimes } 2030d94caffSDavid Greenman /* 2040d94caffSDavid Greenman * we allow reads during pageouts... 2050d94caffSDavid Greenman */ 20626f9a767SRodney W. Grimes for (i = 0; i < pageout_count; i++) { 20726f9a767SRodney W. Grimes ms[i]->flags |= PG_BUSY; 208f919ebdeSDavid Greenman vm_page_protect(ms[i], VM_PROT_READ); 20926f9a767SRodney W. Grimes } 21026f9a767SRodney W. Grimes object->paging_in_progress += pageout_count; 21126f9a767SRodney W. Grimes } else { 21226f9a767SRodney W. Grimes 21326f9a767SRodney W. Grimes m->flags |= PG_BUSY; 21426f9a767SRodney W. Grimes 215f919ebdeSDavid Greenman vm_page_protect(m, VM_PROT_READ); 21626f9a767SRodney W. Grimes 21726f9a767SRodney W. Grimes object->paging_in_progress++; 21826f9a767SRodney W. Grimes 21926f9a767SRodney W. Grimes pager = vm_pager_allocate(PG_DFLT, (caddr_t) 0, 22026f9a767SRodney W. Grimes object->size, VM_PROT_ALL, 0); 22126f9a767SRodney W. Grimes if (pager != NULL) { 22226f9a767SRodney W. Grimes vm_object_setpager(object, pager, 0, FALSE); 22326f9a767SRodney W. Grimes } 22426f9a767SRodney W. Grimes } 22526f9a767SRodney W. Grimes 22626f9a767SRodney W. Grimes /* 2270d94caffSDavid Greenman * If there is no pager for the page, use the default pager. If 2280d94caffSDavid Greenman * there's no place to put the page at the moment, leave it in the 2290d94caffSDavid Greenman * laundry and hope that there will be paging space later. 23026f9a767SRodney W. Grimes */ 23126f9a767SRodney W. Grimes 23226f9a767SRodney W. Grimes if ((pager && pager->pg_type == PG_SWAP) || 2330d94caffSDavid Greenman (cnt.v_free_count + cnt.v_cache_count) >= cnt.v_pageout_free_min) { 23426f9a767SRodney W. Grimes if (pageout_count == 1) { 23526f9a767SRodney W. Grimes pageout_status[0] = pager ? 23626f9a767SRodney W. Grimes vm_pager_put(pager, m, 23726f9a767SRodney W. Grimes ((sync || (object == kernel_object)) ? TRUE : FALSE)) : 23826f9a767SRodney W. Grimes VM_PAGER_FAIL; 23926f9a767SRodney W. Grimes } else { 24026f9a767SRodney W. Grimes if (!pager) { 24126f9a767SRodney W. Grimes for (i = 0; i < pageout_count; i++) 24226f9a767SRodney W. Grimes pageout_status[i] = VM_PAGER_FAIL; 24326f9a767SRodney W. Grimes } else { 24426f9a767SRodney W. Grimes vm_pager_put_pages(pager, ms, pageout_count, 24526f9a767SRodney W. Grimes ((sync || (object == kernel_object)) ? TRUE : FALSE), 24626f9a767SRodney W. Grimes pageout_status); 24726f9a767SRodney W. Grimes } 24826f9a767SRodney W. Grimes } 24926f9a767SRodney W. Grimes } else { 25026f9a767SRodney W. Grimes for (i = 0; i < pageout_count; i++) 25126f9a767SRodney W. Grimes pageout_status[i] = VM_PAGER_FAIL; 25226f9a767SRodney W. Grimes } 25326f9a767SRodney W. Grimes 25426f9a767SRodney W. Grimes for (i = 0; i < pageout_count; i++) { 25526f9a767SRodney W. Grimes switch (pageout_status[i]) { 25626f9a767SRodney W. Grimes case VM_PAGER_OK: 25726f9a767SRodney W. Grimes ++anyok; 25826f9a767SRodney W. Grimes break; 25926f9a767SRodney W. Grimes case VM_PAGER_PEND: 26026f9a767SRodney W. Grimes ++anyok; 26126f9a767SRodney W. Grimes break; 26226f9a767SRodney W. Grimes case VM_PAGER_BAD: 26326f9a767SRodney W. Grimes /* 2640d94caffSDavid Greenman * Page outside of range of object. Right now we 2650d94caffSDavid Greenman * essentially lose the changes by pretending it 2660d94caffSDavid Greenman * worked. 26726f9a767SRodney W. Grimes */ 26826f9a767SRodney W. Grimes pmap_clear_modify(VM_PAGE_TO_PHYS(ms[i])); 2690d94caffSDavid Greenman ms[i]->dirty = 0; 27026f9a767SRodney W. Grimes break; 27126f9a767SRodney W. Grimes case VM_PAGER_ERROR: 27226f9a767SRodney W. Grimes case VM_PAGER_FAIL: 27326f9a767SRodney W. Grimes /* 2740d94caffSDavid Greenman * If page couldn't be paged out, then reactivate the 2750d94caffSDavid Greenman * page so it doesn't clog the inactive list. (We 2760d94caffSDavid Greenman * will try paging out it again later). 27726f9a767SRodney W. Grimes */ 27826f9a767SRodney W. Grimes if (ms[i]->flags & PG_INACTIVE) 27926f9a767SRodney W. Grimes vm_page_activate(ms[i]); 28026f9a767SRodney W. Grimes break; 28126f9a767SRodney W. Grimes case VM_PAGER_AGAIN: 28226f9a767SRodney W. Grimes break; 28326f9a767SRodney W. Grimes } 28426f9a767SRodney W. Grimes 28526f9a767SRodney W. Grimes 28626f9a767SRodney W. Grimes /* 2870d94caffSDavid Greenman * If the operation is still going, leave the page busy to 2880d94caffSDavid Greenman * block all other accesses. Also, leave the paging in 2890d94caffSDavid Greenman * progress indicator set so that we don't attempt an object 2900d94caffSDavid Greenman * collapse. 29126f9a767SRodney W. Grimes */ 29226f9a767SRodney W. Grimes if (pageout_status[i] != VM_PAGER_PEND) { 293f919ebdeSDavid Greenman vm_object_pip_wakeup(object); 2941ed81ef2SDavid Greenman if ((ms[i]->flags & (PG_REFERENCED|PG_WANTED)) || 295a647a309SDavid Greenman pmap_is_referenced(VM_PAGE_TO_PHYS(ms[i]))) { 29626f9a767SRodney W. Grimes pmap_clear_reference(VM_PAGE_TO_PHYS(ms[i])); 297a647a309SDavid Greenman ms[i]->flags &= ~PG_REFERENCED; 29826f9a767SRodney W. Grimes if (ms[i]->flags & PG_INACTIVE) 29926f9a767SRodney W. Grimes vm_page_activate(ms[i]); 30026f9a767SRodney W. Grimes } 3011ed81ef2SDavid Greenman PAGE_WAKEUP(ms[i]); 30226f9a767SRodney W. Grimes } 30326f9a767SRodney W. Grimes } 30426f9a767SRodney W. Grimes return anyok; 30526f9a767SRodney W. Grimes } 30626f9a767SRodney W. Grimes 30726f9a767SRodney W. Grimes /* 30826f9a767SRodney W. Grimes * vm_pageout_object_deactivate_pages 30926f9a767SRodney W. Grimes * 31026f9a767SRodney W. Grimes * deactivate enough pages to satisfy the inactive target 31126f9a767SRodney W. Grimes * requirements or if vm_page_proc_limit is set, then 31226f9a767SRodney W. Grimes * deactivate all of the pages in the object and its 31326f9a767SRodney W. Grimes * shadows. 31426f9a767SRodney W. Grimes * 31526f9a767SRodney W. Grimes * The object and map must be locked. 31626f9a767SRodney W. Grimes */ 31726f9a767SRodney W. Grimes int 3180d94caffSDavid Greenman vm_pageout_object_deactivate_pages(map, object, count, map_remove_only) 31926f9a767SRodney W. Grimes vm_map_t map; 32026f9a767SRodney W. Grimes vm_object_t object; 32126f9a767SRodney W. Grimes int count; 3220d94caffSDavid Greenman int map_remove_only; 32326f9a767SRodney W. Grimes { 32426f9a767SRodney W. Grimes register vm_page_t p, next; 32526f9a767SRodney W. Grimes int rcount; 32626f9a767SRodney W. Grimes int dcount; 32726f9a767SRodney W. Grimes 32826f9a767SRodney W. Grimes dcount = 0; 32926f9a767SRodney W. Grimes if (count == 0) 33026f9a767SRodney W. Grimes count = 1; 33126f9a767SRodney W. Grimes 3328f895206SDavid Greenman if (object->pager && (object->pager->pg_type == PG_DEVICE)) 3338f895206SDavid Greenman return 0; 3348f895206SDavid Greenman 3352fe6e4d7SDavid Greenman if (object->shadow) { 3362fe6e4d7SDavid Greenman if (object->shadow->ref_count == 1) 3370d94caffSDavid Greenman dcount += vm_pageout_object_deactivate_pages(map, object->shadow, count / 2 + 1, map_remove_only); 3380d94caffSDavid Greenman else 3396d40c3d3SDavid Greenman vm_pageout_object_deactivate_pages(map, object->shadow, count, 1); 3402fe6e4d7SDavid Greenman } 3410d94caffSDavid Greenman if (object->paging_in_progress || !vm_object_lock_try(object)) 34226f9a767SRodney W. Grimes return dcount; 34326f9a767SRodney W. Grimes 34426f9a767SRodney W. Grimes /* 34526f9a767SRodney W. Grimes * scan the objects entire memory queue 34626f9a767SRodney W. Grimes */ 34726f9a767SRodney W. Grimes rcount = object->resident_page_count; 34826f9a767SRodney W. Grimes p = object->memq.tqh_first; 34926f9a767SRodney W. Grimes while (p && (rcount-- > 0)) { 35026f9a767SRodney W. Grimes next = p->listq.tqe_next; 351a58d1fa1SDavid Greenman cnt.v_pdpages++; 35226f9a767SRodney W. Grimes vm_page_lock_queues(); 3530d94caffSDavid Greenman if (p->wire_count != 0 || 3540d94caffSDavid Greenman p->hold_count != 0 || 3550d94caffSDavid Greenman p->bmapped != 0 || 3560d94caffSDavid Greenman p->busy != 0 || 3570d94caffSDavid Greenman !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) { 3580d94caffSDavid Greenman p = next; 3590d94caffSDavid Greenman continue; 3600d94caffSDavid Greenman } 36126f9a767SRodney W. Grimes /* 3620d94caffSDavid Greenman * if a page is active, not wired and is in the processes 3630d94caffSDavid Greenman * pmap, then deactivate the page. 36426f9a767SRodney W. Grimes */ 3650d94caffSDavid Greenman if ((p->flags & (PG_ACTIVE | PG_BUSY)) == PG_ACTIVE) { 366a647a309SDavid Greenman if (!pmap_is_referenced(VM_PAGE_TO_PHYS(p)) && 3671ed81ef2SDavid Greenman (p->flags & (PG_REFERENCED|PG_WANTED)) == 0) { 36826f9a767SRodney W. Grimes p->act_count -= min(p->act_count, ACT_DECLINE); 36926f9a767SRodney W. Grimes /* 3700d94caffSDavid Greenman * if the page act_count is zero -- then we 3710d94caffSDavid Greenman * deactivate 37226f9a767SRodney W. Grimes */ 37326f9a767SRodney W. Grimes if (!p->act_count) { 3740d94caffSDavid Greenman if (!map_remove_only) 37526f9a767SRodney W. Grimes vm_page_deactivate(p); 376f919ebdeSDavid Greenman vm_page_protect(p, VM_PROT_NONE); 37726f9a767SRodney W. Grimes /* 3780d94caffSDavid Greenman * else if on the next go-around we 3790d94caffSDavid Greenman * will deactivate the page we need to 3800d94caffSDavid Greenman * place the page on the end of the 3810d94caffSDavid Greenman * queue to age the other pages in 3820d94caffSDavid Greenman * memory. 38326f9a767SRodney W. Grimes */ 38426f9a767SRodney W. Grimes } else { 38526f9a767SRodney W. Grimes TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 38626f9a767SRodney W. Grimes TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 38726f9a767SRodney W. Grimes TAILQ_REMOVE(&object->memq, p, listq); 38826f9a767SRodney W. Grimes TAILQ_INSERT_TAIL(&object->memq, p, listq); 38926f9a767SRodney W. Grimes } 39026f9a767SRodney W. Grimes /* 39126f9a767SRodney W. Grimes * see if we are done yet 39226f9a767SRodney W. Grimes */ 39326f9a767SRodney W. Grimes if (p->flags & PG_INACTIVE) { 39426f9a767SRodney W. Grimes --count; 39526f9a767SRodney W. Grimes ++dcount; 39626f9a767SRodney W. Grimes if (count <= 0 && 39726f9a767SRodney W. Grimes cnt.v_inactive_count > cnt.v_inactive_target) { 39826f9a767SRodney W. Grimes vm_page_unlock_queues(); 3990d94caffSDavid Greenman vm_object_unlock(object); 40026f9a767SRodney W. Grimes return dcount; 40126f9a767SRodney W. Grimes } 40226f9a767SRodney W. Grimes } 40326f9a767SRodney W. Grimes } else { 40426f9a767SRodney W. Grimes /* 40526f9a767SRodney W. Grimes * Move the page to the bottom of the queue. 40626f9a767SRodney W. Grimes */ 40726f9a767SRodney W. Grimes pmap_clear_reference(VM_PAGE_TO_PHYS(p)); 408a647a309SDavid Greenman p->flags &= ~PG_REFERENCED; 40926f9a767SRodney W. Grimes if (p->act_count < ACT_MAX) 41026f9a767SRodney W. Grimes p->act_count += ACT_ADVANCE; 41126f9a767SRodney W. Grimes 41226f9a767SRodney W. Grimes TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 41326f9a767SRodney W. Grimes TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 41426f9a767SRodney W. Grimes TAILQ_REMOVE(&object->memq, p, listq); 41526f9a767SRodney W. Grimes TAILQ_INSERT_TAIL(&object->memq, p, listq); 41626f9a767SRodney W. Grimes } 4170d94caffSDavid Greenman } else if ((p->flags & (PG_INACTIVE | PG_BUSY)) == PG_INACTIVE) { 418f919ebdeSDavid Greenman vm_page_protect(p, VM_PROT_NONE); 41926f9a767SRodney W. Grimes } 42026f9a767SRodney W. Grimes vm_page_unlock_queues(); 42126f9a767SRodney W. Grimes p = next; 42226f9a767SRodney W. Grimes } 4230d94caffSDavid Greenman vm_object_unlock(object); 42426f9a767SRodney W. Grimes return dcount; 42526f9a767SRodney W. Grimes } 42626f9a767SRodney W. Grimes 42726f9a767SRodney W. Grimes 42826f9a767SRodney W. Grimes /* 42926f9a767SRodney W. Grimes * deactivate some number of pages in a map, try to do it fairly, but 43026f9a767SRodney W. Grimes * that is really hard to do. 43126f9a767SRodney W. Grimes */ 43226f9a767SRodney W. Grimes 43326f9a767SRodney W. Grimes void 43426f9a767SRodney W. Grimes vm_pageout_map_deactivate_pages(map, entry, count, freeer) 43526f9a767SRodney W. Grimes vm_map_t map; 43626f9a767SRodney W. Grimes vm_map_entry_t entry; 43726f9a767SRodney W. Grimes int *count; 43826f9a767SRodney W. Grimes int (*freeer) (vm_map_t, vm_object_t, int); 43926f9a767SRodney W. Grimes { 44026f9a767SRodney W. Grimes vm_map_t tmpm; 44126f9a767SRodney W. Grimes vm_map_entry_t tmpe; 44226f9a767SRodney W. Grimes vm_object_t obj; 4430d94caffSDavid Greenman 44426f9a767SRodney W. Grimes if (*count <= 0) 44526f9a767SRodney W. Grimes return; 44626f9a767SRodney W. Grimes vm_map_reference(map); 44726f9a767SRodney W. Grimes if (!lock_try_read(&map->lock)) { 44826f9a767SRodney W. Grimes vm_map_deallocate(map); 44926f9a767SRodney W. Grimes return; 45026f9a767SRodney W. Grimes } 45126f9a767SRodney W. Grimes if (entry == 0) { 45226f9a767SRodney W. Grimes tmpe = map->header.next; 45326f9a767SRodney W. Grimes while (tmpe != &map->header && *count > 0) { 4540d94caffSDavid Greenman vm_pageout_map_deactivate_pages(map, tmpe, count, freeer, 0); 45526f9a767SRodney W. Grimes tmpe = tmpe->next; 45626f9a767SRodney W. Grimes }; 45726f9a767SRodney W. Grimes } else if (entry->is_sub_map || entry->is_a_map) { 45826f9a767SRodney W. Grimes tmpm = entry->object.share_map; 45926f9a767SRodney W. Grimes tmpe = tmpm->header.next; 46026f9a767SRodney W. Grimes while (tmpe != &tmpm->header && *count > 0) { 4610d94caffSDavid Greenman vm_pageout_map_deactivate_pages(tmpm, tmpe, count, freeer, 0); 46226f9a767SRodney W. Grimes tmpe = tmpe->next; 46326f9a767SRodney W. Grimes }; 4644e39a515SPoul-Henning Kamp } else if ((obj = entry->object.vm_object) != 0) { 46526f9a767SRodney W. Grimes *count -= (*freeer) (map, obj, *count); 46626f9a767SRodney W. Grimes } 46726f9a767SRodney W. Grimes lock_read_done(&map->lock); 46826f9a767SRodney W. Grimes vm_map_deallocate(map); 46926f9a767SRodney W. Grimes return; 47026f9a767SRodney W. Grimes } 471df8bae1dSRodney W. Grimes 4722fe6e4d7SDavid Greenman void 4730d94caffSDavid Greenman vm_req_vmdaemon() 4740d94caffSDavid Greenman { 4752fe6e4d7SDavid Greenman extern int ticks; 4760d94caffSDavid Greenman static int lastrun = 0; 4770d94caffSDavid Greenman 4782fe6e4d7SDavid Greenman if ((ticks > (lastrun + hz / 10)) || (ticks < lastrun)) { 4792fe6e4d7SDavid Greenman wakeup((caddr_t) &vm_daemon_needed); 4802fe6e4d7SDavid Greenman lastrun = ticks; 4812fe6e4d7SDavid Greenman } 4822fe6e4d7SDavid Greenman } 4832fe6e4d7SDavid Greenman 484df8bae1dSRodney W. Grimes /* 485df8bae1dSRodney W. Grimes * vm_pageout_scan does the dirty work for the pageout daemon. 486df8bae1dSRodney W. Grimes */ 48726f9a767SRodney W. Grimes int 488df8bae1dSRodney W. Grimes vm_pageout_scan() 489df8bae1dSRodney W. Grimes { 49026f9a767SRodney W. Grimes vm_page_t m; 49126f9a767SRodney W. Grimes int page_shortage, maxscan, maxlaunder; 4924e39a515SPoul-Henning Kamp int pages_freed; 49326f9a767SRodney W. Grimes vm_page_t next; 4945663e6deSDavid Greenman struct proc *p, *bigproc; 4955663e6deSDavid Greenman vm_offset_t size, bigsize; 496df8bae1dSRodney W. Grimes vm_object_t object; 49726f9a767SRodney W. Grimes int force_wakeup = 0; 4980d94caffSDavid Greenman 4992fe6e4d7SDavid Greenman /* calculate the total cached size */ 5002fe6e4d7SDavid Greenman 5010d94caffSDavid Greenman if ((cnt.v_inactive_count + cnt.v_free_count + cnt.v_cache_count) < 5020d94caffSDavid Greenman (cnt.v_inactive_target + cnt.v_free_min)) { 5032fe6e4d7SDavid Greenman vm_req_vmdaemon(); 5042fe6e4d7SDavid Greenman } 5052fe6e4d7SDavid Greenman /* 5062fe6e4d7SDavid Greenman * now swap processes out if we are in low memory conditions 5072fe6e4d7SDavid Greenman */ 5080d94caffSDavid Greenman if ((cnt.v_free_count <= cnt.v_free_min) && 5090d94caffSDavid Greenman !swap_pager_full && vm_swap_size && vm_pageout_req_swapout == 0) { 5102fe6e4d7SDavid Greenman vm_pageout_req_swapout = 1; 5112fe6e4d7SDavid Greenman vm_req_vmdaemon(); 5122fe6e4d7SDavid Greenman } 51326f9a767SRodney W. Grimes pages_freed = 0; 514df8bae1dSRodney W. Grimes 515df8bae1dSRodney W. Grimes /* 5160d94caffSDavid Greenman * Start scanning the inactive queue for pages we can free. We keep 5170d94caffSDavid Greenman * scanning until we have enough free pages or we have scanned through 5180d94caffSDavid Greenman * the entire queue. If we encounter dirty pages, we start cleaning 5190d94caffSDavid Greenman * them. 520df8bae1dSRodney W. Grimes */ 521df8bae1dSRodney W. Grimes 5220d94caffSDavid Greenman maxlaunder = (cnt.v_inactive_target > MAXLAUNDER) ? 5230d94caffSDavid Greenman MAXLAUNDER : cnt.v_inactive_target; 5240d94caffSDavid Greenman 52526f9a767SRodney W. Grimes rescan1: 5261ed81ef2SDavid Greenman maxscan = min(cnt.v_inactive_count, MAXSCAN); 52726f9a767SRodney W. Grimes m = vm_page_queue_inactive.tqh_first; 5281ed81ef2SDavid Greenman while (m && (maxscan-- > 0) && 5296f2b142eSDavid Greenman (cnt.v_cache_count < (cnt.v_cache_min + cnt.v_free_target))) { 53026f9a767SRodney W. Grimes vm_page_t next; 531df8bae1dSRodney W. Grimes 532a58d1fa1SDavid Greenman cnt.v_pdpages++; 533df8bae1dSRodney W. Grimes next = m->pageq.tqe_next; 534df8bae1dSRodney W. Grimes 5350d94caffSDavid Greenman #if defined(VM_DIAGNOSE) 53626f9a767SRodney W. Grimes if ((m->flags & PG_INACTIVE) == 0) { 537832f3afdSAndreas Schulz printf("vm_pageout_scan: page not inactive?\n"); 5380d94caffSDavid Greenman break; 539df8bae1dSRodney W. Grimes } 5400d94caffSDavid Greenman #endif 54126f9a767SRodney W. Grimes 54226f9a767SRodney W. Grimes /* 54326f9a767SRodney W. Grimes * dont mess with busy pages 54426f9a767SRodney W. Grimes */ 5450d94caffSDavid Greenman if (m->hold_count || m->busy || (m->flags & PG_BUSY) || 5460d94caffSDavid Greenman m->bmapped != 0) { 5470d94caffSDavid Greenman TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 5480d94caffSDavid Greenman TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 54926f9a767SRodney W. Grimes m = next; 55026f9a767SRodney W. Grimes continue; 55126f9a767SRodney W. Grimes } 5520d94caffSDavid Greenman if (((m->flags & PG_REFERENCED) == 0) && 5530d94caffSDavid Greenman pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { 5542fe6e4d7SDavid Greenman m->flags |= PG_REFERENCED; 5550d94caffSDavid Greenman } 5560d94caffSDavid Greenman if (m->object->ref_count == 0) { 5570d94caffSDavid Greenman m->flags &= ~PG_REFERENCED; 5582fe6e4d7SDavid Greenman pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 5592fe6e4d7SDavid Greenman } 5601ed81ef2SDavid Greenman if ((m->flags & (PG_REFERENCED|PG_WANTED)) != 0) { 561a647a309SDavid Greenman m->flags &= ~PG_REFERENCED; 5620d94caffSDavid Greenman pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 56326f9a767SRodney W. Grimes vm_page_activate(m); 5646d40c3d3SDavid Greenman if (m->act_count < ACT_MAX) 5656d40c3d3SDavid Greenman m->act_count += ACT_ADVANCE; 5660d94caffSDavid Greenman m = next; 5670d94caffSDavid Greenman continue; 5680d94caffSDavid Greenman } 5690d94caffSDavid Greenman vm_page_test_dirty(m); 5700d94caffSDavid Greenman 5710d94caffSDavid Greenman if ((m->dirty & m->valid) == 0) { 5726d40c3d3SDavid Greenman if (m->valid == 0) { 5736d40c3d3SDavid Greenman pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE); 5746d40c3d3SDavid Greenman vm_page_free(m); 5756f2b142eSDavid Greenman } else { 5760d94caffSDavid Greenman vm_page_cache(m); 5776d40c3d3SDavid Greenman } 5780d94caffSDavid Greenman } else if (maxlaunder > 0) { 5790d94caffSDavid Greenman int written; 5800d94caffSDavid Greenman 5810d94caffSDavid Greenman object = m->object; 582a1f6d91cSDavid Greenman if ((object->flags & OBJ_DEAD) || !vm_object_lock_try(object)) { 5830d94caffSDavid Greenman m = next; 5840d94caffSDavid Greenman continue; 5850d94caffSDavid Greenman } 5860d94caffSDavid Greenman /* 5870d94caffSDavid Greenman * If a page is dirty, then it is either being washed 5880d94caffSDavid Greenman * (but not yet cleaned) or it is still in the 5890d94caffSDavid Greenman * laundry. If it is still in the laundry, then we 5900d94caffSDavid Greenman * start the cleaning operation. 5910d94caffSDavid Greenman */ 5920d94caffSDavid Greenman written = vm_pageout_clean(m, 0); 5930d94caffSDavid Greenman vm_object_unlock(object); 5940d94caffSDavid Greenman 5950d94caffSDavid Greenman if (!next) { 5960d94caffSDavid Greenman break; 5970d94caffSDavid Greenman } 5980d94caffSDavid Greenman maxlaunder -= written; 5990d94caffSDavid Greenman /* 6000d94caffSDavid Greenman * if the next page has been re-activated, start 6010d94caffSDavid Greenman * scanning again 6020d94caffSDavid Greenman */ 6030d94caffSDavid Greenman if ((next->flags & PG_INACTIVE) == 0) { 6040d94caffSDavid Greenman goto rescan1; 6050d94caffSDavid Greenman } 606df8bae1dSRodney W. Grimes } 60726f9a767SRodney W. Grimes m = next; 60826f9a767SRodney W. Grimes } 60926f9a767SRodney W. Grimes 610df8bae1dSRodney W. Grimes /* 6110d94caffSDavid Greenman * Compute the page shortage. If we are still very low on memory be 6120d94caffSDavid Greenman * sure that we will move a minimal amount of pages from active to 6130d94caffSDavid Greenman * inactive. 614df8bae1dSRodney W. Grimes */ 615df8bae1dSRodney W. Grimes 61626f9a767SRodney W. Grimes page_shortage = cnt.v_inactive_target - 6170d94caffSDavid Greenman (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 61826f9a767SRodney W. Grimes if (page_shortage <= 0) { 61926f9a767SRodney W. Grimes if (pages_freed == 0) { 6206f2b142eSDavid Greenman page_shortage = cnt.v_free_min - cnt.v_inactive_count; 62126f9a767SRodney W. Grimes } 622df8bae1dSRodney W. Grimes } 6231ed81ef2SDavid Greenman maxscan = min(cnt.v_active_count, MAXSCAN); 62426f9a767SRodney W. Grimes m = vm_page_queue_active.tqh_first; 6251ed81ef2SDavid Greenman while (m && (maxscan-- > 0) && (page_shortage > 0)) { 62626f9a767SRodney W. Grimes 627a58d1fa1SDavid Greenman cnt.v_pdpages++; 62826f9a767SRodney W. Grimes next = m->pageq.tqe_next; 629df8bae1dSRodney W. Grimes 630df8bae1dSRodney W. Grimes /* 63126f9a767SRodney W. Grimes * Don't deactivate pages that are busy. 632df8bae1dSRodney W. Grimes */ 633a647a309SDavid Greenman if ((m->busy != 0) || 6340d94caffSDavid Greenman (m->flags & PG_BUSY) || 6350d94caffSDavid Greenman (m->hold_count != 0) || 6360d94caffSDavid Greenman (m->bmapped != 0)) { 6376d40c3d3SDavid Greenman TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 6386d40c3d3SDavid Greenman TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 63926f9a767SRodney W. Grimes m = next; 64026f9a767SRodney W. Grimes continue; 641df8bae1dSRodney W. Grimes } 6421ed81ef2SDavid Greenman if (m->object->ref_count && ((m->flags & (PG_REFERENCED|PG_WANTED)) || 6430d94caffSDavid Greenman pmap_is_referenced(VM_PAGE_TO_PHYS(m)))) { 6440d94caffSDavid Greenman int s; 645df8bae1dSRodney W. Grimes 646df8bae1dSRodney W. Grimes pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 647a647a309SDavid Greenman m->flags &= ~PG_REFERENCED; 6480d94caffSDavid Greenman if (m->act_count < ACT_MAX) { 64926f9a767SRodney W. Grimes m->act_count += ACT_ADVANCE; 6500d94caffSDavid Greenman } 65126f9a767SRodney W. Grimes TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 65226f9a767SRodney W. Grimes TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 6530d94caffSDavid Greenman s = splhigh(); 65426f9a767SRodney W. Grimes TAILQ_REMOVE(&m->object->memq, m, listq); 65526f9a767SRodney W. Grimes TAILQ_INSERT_TAIL(&m->object->memq, m, listq); 6560d94caffSDavid Greenman splx(s); 65726f9a767SRodney W. Grimes } else { 6586d40c3d3SDavid Greenman m->flags &= ~PG_REFERENCED; 6596d40c3d3SDavid Greenman pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 66026f9a767SRodney W. Grimes m->act_count -= min(m->act_count, ACT_DECLINE); 661df8bae1dSRodney W. Grimes 662df8bae1dSRodney W. Grimes /* 66326f9a767SRodney W. Grimes * if the page act_count is zero -- then we deactivate 664df8bae1dSRodney W. Grimes */ 6650d94caffSDavid Greenman if (!m->act_count && (page_shortage > 0)) { 6660d94caffSDavid Greenman if (m->object->ref_count == 0) { 6670d94caffSDavid Greenman vm_page_test_dirty(m); 6680d94caffSDavid Greenman --page_shortage; 6690d94caffSDavid Greenman if ((m->dirty & m->valid) == 0) { 6700d94caffSDavid Greenman m->act_count = 0; 6710d94caffSDavid Greenman vm_page_cache(m); 6720d94caffSDavid Greenman } else { 6730d94caffSDavid Greenman vm_page_deactivate(m); 6740d94caffSDavid Greenman } 6750d94caffSDavid Greenman } else { 67626f9a767SRodney W. Grimes vm_page_deactivate(m); 67726f9a767SRodney W. Grimes --page_shortage; 6780d94caffSDavid Greenman } 6796d40c3d3SDavid Greenman } else if (m->act_count) { 68026f9a767SRodney W. Grimes TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 68126f9a767SRodney W. Grimes TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 682df8bae1dSRodney W. Grimes } 683df8bae1dSRodney W. Grimes } 68426f9a767SRodney W. Grimes m = next; 68526f9a767SRodney W. Grimes } 686df8bae1dSRodney W. Grimes 687df8bae1dSRodney W. Grimes /* 6880d94caffSDavid Greenman * We try to maintain some *really* free pages, this allows interrupt 6890d94caffSDavid Greenman * code to be guaranteed space. 690df8bae1dSRodney W. Grimes */ 691a1f6d91cSDavid Greenman while (cnt.v_free_count < cnt.v_free_reserved) { 6920d94caffSDavid Greenman m = vm_page_queue_cache.tqh_first; 6930d94caffSDavid Greenman if (!m) 6940d94caffSDavid Greenman break; 6950d94caffSDavid Greenman vm_page_free(m); 69626f9a767SRodney W. Grimes } 6975663e6deSDavid Greenman 6985663e6deSDavid Greenman /* 6990d94caffSDavid Greenman * make sure that we have swap space -- if we are low on memory and 7000d94caffSDavid Greenman * swap -- then kill the biggest process. 7015663e6deSDavid Greenman */ 7025663e6deSDavid Greenman if ((vm_swap_size == 0 || swap_pager_full) && 7030d94caffSDavid Greenman ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) { 7045663e6deSDavid Greenman bigproc = NULL; 7055663e6deSDavid Greenman bigsize = 0; 7065663e6deSDavid Greenman for (p = (struct proc *) allproc; p != NULL; p = p->p_next) { 7075663e6deSDavid Greenman /* 7085663e6deSDavid Greenman * if this is a system process, skip it 7095663e6deSDavid Greenman */ 71079221631SDavid Greenman if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || 71179221631SDavid Greenman ((p->p_pid < 48) && (vm_swap_size != 0))) { 7125663e6deSDavid Greenman continue; 7135663e6deSDavid Greenman } 7145663e6deSDavid Greenman /* 7155663e6deSDavid Greenman * if the process is in a non-running type state, 7165663e6deSDavid Greenman * don't touch it. 7175663e6deSDavid Greenman */ 7185663e6deSDavid Greenman if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 7195663e6deSDavid Greenman continue; 7205663e6deSDavid Greenman } 7215663e6deSDavid Greenman /* 7225663e6deSDavid Greenman * get the process size 7235663e6deSDavid Greenman */ 7245663e6deSDavid Greenman size = p->p_vmspace->vm_pmap.pm_stats.resident_count; 7255663e6deSDavid Greenman /* 7265663e6deSDavid Greenman * if the this process is bigger than the biggest one 7275663e6deSDavid Greenman * remember it. 7285663e6deSDavid Greenman */ 7295663e6deSDavid Greenman if (size > bigsize) { 7305663e6deSDavid Greenman bigproc = p; 7315663e6deSDavid Greenman bigsize = size; 7325663e6deSDavid Greenman } 7335663e6deSDavid Greenman } 7345663e6deSDavid Greenman if (bigproc != NULL) { 7355663e6deSDavid Greenman printf("Process %lu killed by vm_pageout -- out of swap\n", (u_long) bigproc->p_pid); 7365663e6deSDavid Greenman psignal(bigproc, SIGKILL); 7375663e6deSDavid Greenman bigproc->p_estcpu = 0; 7385663e6deSDavid Greenman bigproc->p_nice = PRIO_MIN; 7395663e6deSDavid Greenman resetpriority(bigproc); 7405663e6deSDavid Greenman wakeup((caddr_t) &cnt.v_free_count); 7415663e6deSDavid Greenman } 7425663e6deSDavid Greenman } 74326f9a767SRodney W. Grimes vm_page_pagesfreed += pages_freed; 74426f9a767SRodney W. Grimes return force_wakeup; 74526f9a767SRodney W. Grimes } 74626f9a767SRodney W. Grimes 747df8bae1dSRodney W. Grimes /* 748df8bae1dSRodney W. Grimes * vm_pageout is the high level pageout daemon. 749df8bae1dSRodney W. Grimes */ 75026f9a767SRodney W. Grimes void 75126f9a767SRodney W. Grimes vm_pageout() 752df8bae1dSRodney W. Grimes { 753df8bae1dSRodney W. Grimes (void) spl0(); 754df8bae1dSRodney W. Grimes 755df8bae1dSRodney W. Grimes /* 756df8bae1dSRodney W. Grimes * Initialize some paging parameters. 757df8bae1dSRodney W. Grimes */ 758df8bae1dSRodney W. Grimes 7590d94caffSDavid Greenman if (cnt.v_page_count > 1024) 7600d94caffSDavid Greenman cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 7610d94caffSDavid Greenman else 7620d94caffSDavid Greenman cnt.v_free_min = 4; 763ed74321bSDavid Greenman /* 7640d94caffSDavid Greenman * free_reserved needs to include enough for the largest swap pager 7650d94caffSDavid Greenman * structures plus enough for any pv_entry structs when paging. 766ed74321bSDavid Greenman */ 7676d40c3d3SDavid Greenman cnt.v_pageout_free_min = 6 + cnt.v_page_count / 1024; 7680d94caffSDavid Greenman cnt.v_free_reserved = cnt.v_pageout_free_min + 2; 7690d94caffSDavid Greenman cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved; 77026f9a767SRodney W. Grimes cnt.v_free_min += cnt.v_free_reserved; 7716f2b142eSDavid Greenman 7720d94caffSDavid Greenman if (cnt.v_page_count > 1024) { 7730d94caffSDavid Greenman cnt.v_cache_max = (cnt.v_free_count - 1024) / 2; 7746f2b142eSDavid Greenman cnt.v_cache_min = (cnt.v_free_count - 1024) / 8; 7756f2b142eSDavid Greenman cnt.v_inactive_target = 2*cnt.v_cache_min + 192; 7760d94caffSDavid Greenman } else { 7770d94caffSDavid Greenman cnt.v_cache_min = 0; 7780d94caffSDavid Greenman cnt.v_cache_max = 0; 7796f2b142eSDavid Greenman cnt.v_inactive_target = cnt.v_free_count / 4; 7800d94caffSDavid Greenman } 781df8bae1dSRodney W. Grimes 782df8bae1dSRodney W. Grimes /* XXX does not really belong here */ 783df8bae1dSRodney W. Grimes if (vm_page_max_wired == 0) 784df8bae1dSRodney W. Grimes vm_page_max_wired = cnt.v_free_count / 3; 785df8bae1dSRodney W. Grimes 7866f2b142eSDavid Greenman cnt.v_interrupt_free_min = 2; 7876f2b142eSDavid Greenman 78826f9a767SRodney W. Grimes 78926f9a767SRodney W. Grimes (void) swap_pager_alloc(0, 0, 0, 0); 790df8bae1dSRodney W. Grimes /* 7910d94caffSDavid Greenman * The pageout daemon is never done, so loop forever. 792df8bae1dSRodney W. Grimes */ 793df8bae1dSRodney W. Grimes while (TRUE) { 794f919ebdeSDavid Greenman int s = splhigh(); 795f919ebdeSDavid Greenman 796f919ebdeSDavid Greenman if (!vm_pages_needed || 797f919ebdeSDavid Greenman ((cnt.v_free_count >= cnt.v_free_reserved) && 798f919ebdeSDavid Greenman (cnt.v_free_count + cnt.v_cache_count >= cnt.v_free_min))) { 799f919ebdeSDavid Greenman vm_pages_needed = 0; 80026f9a767SRodney W. Grimes tsleep((caddr_t) &vm_pages_needed, PVM, "psleep", 0); 801f919ebdeSDavid Greenman } 802f919ebdeSDavid Greenman vm_pages_needed = 0; 803f919ebdeSDavid Greenman splx(s); 804a58d1fa1SDavid Greenman cnt.v_pdwakeups++; 805df8bae1dSRodney W. Grimes vm_pager_sync(); 8060d94caffSDavid Greenman vm_pageout_scan(); 80726f9a767SRodney W. Grimes vm_pager_sync(); 80826f9a767SRodney W. Grimes wakeup((caddr_t) &cnt.v_free_count); 80926f9a767SRodney W. Grimes wakeup((caddr_t) kmem_map); 810df8bae1dSRodney W. Grimes } 811df8bae1dSRodney W. Grimes } 81226f9a767SRodney W. Grimes 8132fe6e4d7SDavid Greenman void 8144f9fb771SBruce Evans vm_daemon() 8150d94caffSDavid Greenman { 8162fe6e4d7SDavid Greenman vm_object_t object; 8172fe6e4d7SDavid Greenman struct proc *p; 8180d94caffSDavid Greenman 8192fe6e4d7SDavid Greenman while (TRUE) { 8202fe6e4d7SDavid Greenman tsleep((caddr_t) &vm_daemon_needed, PUSER, "psleep", 0); 8212fe6e4d7SDavid Greenman swapout_threads(); 8222fe6e4d7SDavid Greenman /* 8230d94caffSDavid Greenman * scan the processes for exceeding their rlimits or if 8240d94caffSDavid Greenman * process is swapped out -- deactivate pages 8252fe6e4d7SDavid Greenman */ 8262fe6e4d7SDavid Greenman 8272fe6e4d7SDavid Greenman for (p = (struct proc *) allproc; p != NULL; p = p->p_next) { 8282fe6e4d7SDavid Greenman int overage; 8292fe6e4d7SDavid Greenman quad_t limit; 8302fe6e4d7SDavid Greenman vm_offset_t size; 8312fe6e4d7SDavid Greenman 8322fe6e4d7SDavid Greenman /* 8332fe6e4d7SDavid Greenman * if this is a system process or if we have already 8342fe6e4d7SDavid Greenman * looked at this process, skip it. 8352fe6e4d7SDavid Greenman */ 8362fe6e4d7SDavid Greenman if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 8372fe6e4d7SDavid Greenman continue; 8382fe6e4d7SDavid Greenman } 8392fe6e4d7SDavid Greenman /* 8402fe6e4d7SDavid Greenman * if the process is in a non-running type state, 8412fe6e4d7SDavid Greenman * don't touch it. 8422fe6e4d7SDavid Greenman */ 8432fe6e4d7SDavid Greenman if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 8442fe6e4d7SDavid Greenman continue; 8452fe6e4d7SDavid Greenman } 8462fe6e4d7SDavid Greenman /* 8472fe6e4d7SDavid Greenman * get a limit 8482fe6e4d7SDavid Greenman */ 8492fe6e4d7SDavid Greenman limit = qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 8502fe6e4d7SDavid Greenman p->p_rlimit[RLIMIT_RSS].rlim_max); 8512fe6e4d7SDavid Greenman 8522fe6e4d7SDavid Greenman /* 8530d94caffSDavid Greenman * let processes that are swapped out really be 8540d94caffSDavid Greenman * swapped out set the limit to nothing (will force a 8550d94caffSDavid Greenman * swap-out.) 8562fe6e4d7SDavid Greenman */ 8572fe6e4d7SDavid Greenman if ((p->p_flag & P_INMEM) == 0) 8580d94caffSDavid Greenman limit = 0; /* XXX */ 8592fe6e4d7SDavid Greenman 8602fe6e4d7SDavid Greenman size = p->p_vmspace->vm_pmap.pm_stats.resident_count * NBPG; 8612fe6e4d7SDavid Greenman if (limit >= 0 && size >= limit) { 8622fe6e4d7SDavid Greenman overage = (size - limit) / NBPG; 8632fe6e4d7SDavid Greenman vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map, 8642fe6e4d7SDavid Greenman (vm_map_entry_t) 0, &overage, vm_pageout_object_deactivate_pages); 8652fe6e4d7SDavid Greenman } 8662fe6e4d7SDavid Greenman } 8672fe6e4d7SDavid Greenman } 8682fe6e4d7SDavid Greenman 8690d94caffSDavid Greenman /* 8700d94caffSDavid Greenman * we remove cached objects that have no RSS... 8710d94caffSDavid Greenman */ 8720d94caffSDavid Greenman restart: 8732fe6e4d7SDavid Greenman vm_object_cache_lock(); 8742fe6e4d7SDavid Greenman object = vm_object_cached_list.tqh_first; 8752fe6e4d7SDavid Greenman while (object) { 8762fe6e4d7SDavid Greenman vm_object_cache_unlock(); 8772fe6e4d7SDavid Greenman /* 8782fe6e4d7SDavid Greenman * if there are no resident pages -- get rid of the object 8792fe6e4d7SDavid Greenman */ 8802fe6e4d7SDavid Greenman if (object->resident_page_count == 0) { 8812fe6e4d7SDavid Greenman if (object != vm_object_lookup(object->pager)) 8822fe6e4d7SDavid Greenman panic("vm_object_cache_trim: I'm sooo confused."); 8832fe6e4d7SDavid Greenman pager_cache(object, FALSE); 8842fe6e4d7SDavid Greenman goto restart; 8852fe6e4d7SDavid Greenman } 8862fe6e4d7SDavid Greenman object = object->cached_list.tqe_next; 8872fe6e4d7SDavid Greenman vm_object_cache_lock(); 8882fe6e4d7SDavid Greenman } 8892fe6e4d7SDavid Greenman vm_object_cache_unlock(); 8902fe6e4d7SDavid Greenman } 891