1df8bae1dSRodney W. Grimes /* 226f9a767SRodney W. Grimes * Copyright (c) 1991 Regents of the University of California. 326f9a767SRodney W. Grimes * All rights reserved. 426f9a767SRodney W. Grimes * Copyright (c) 1994 John S. Dyson 526f9a767SRodney W. Grimes * All rights reserved. 626f9a767SRodney W. Grimes * Copyright (c) 1994 David Greenman 726f9a767SRodney W. Grimes * All rights reserved. 8df8bae1dSRodney W. Grimes * 9df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 10df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 11df8bae1dSRodney W. Grimes * 12df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 13df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 14df8bae1dSRodney W. Grimes * are met: 15df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 17df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 18df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 19df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 20df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 215929bcfaSPhilippe Charnier * must display the following acknowledgement: 22df8bae1dSRodney W. Grimes * This product includes software developed by the University of 23df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 24df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 25df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 26df8bae1dSRodney W. Grimes * without specific prior written permission. 27df8bae1dSRodney W. Grimes * 28df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38df8bae1dSRodney W. Grimes * SUCH DAMAGE. 39df8bae1dSRodney W. Grimes * 403c4dd356SDavid Greenman * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41df8bae1dSRodney W. Grimes * 42df8bae1dSRodney W. Grimes * 43df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44df8bae1dSRodney W. Grimes * All rights reserved. 45df8bae1dSRodney W. Grimes * 46df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47df8bae1dSRodney W. Grimes * 48df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 49df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 50df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 51df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 52df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 53df8bae1dSRodney W. Grimes * 54df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57df8bae1dSRodney W. Grimes * 58df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 59df8bae1dSRodney W. Grimes * 60df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61df8bae1dSRodney W. Grimes * School of Computer Science 62df8bae1dSRodney W. Grimes * Carnegie Mellon University 63df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 64df8bae1dSRodney W. Grimes * 65df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 66df8bae1dSRodney W. Grimes * rights to redistribute these changes. 6726f9a767SRodney W. Grimes * 68c3aac50fSPeter Wemm * $FreeBSD$ 69df8bae1dSRodney W. Grimes */ 70df8bae1dSRodney W. Grimes 71df8bae1dSRodney W. Grimes /* 72df8bae1dSRodney W. Grimes * The proverbial page-out daemon. 73df8bae1dSRodney W. Grimes */ 74df8bae1dSRodney W. Grimes 75faa5f8d8SAndrzej Bialecki #include "opt_vm.h" 76df8bae1dSRodney W. Grimes #include <sys/param.h> 7726f9a767SRodney W. Grimes #include <sys/systm.h> 78b5e8ce9fSBruce Evans #include <sys/kernel.h> 79855a310fSJeff Roberson #include <sys/eventhandler.h> 80fb919e4dSMark Murray #include <sys/lock.h> 81fb919e4dSMark Murray #include <sys/mutex.h> 8226f9a767SRodney W. Grimes #include <sys/proc.h> 839c8b8baaSPeter Wemm #include <sys/kthread.h> 840384fff8SJason Evans #include <sys/ktr.h> 8526f9a767SRodney W. Grimes #include <sys/resourcevar.h> 86b43179fbSJeff Roberson #include <sys/sched.h> 87d2fc5315SPoul-Henning Kamp #include <sys/signalvar.h> 88f6b04d2bSDavid Greenman #include <sys/vnode.h> 89efeaf95aSDavid Greenman #include <sys/vmmeter.h> 901005a129SJohn Baldwin #include <sys/sx.h> 9138efa82bSJohn Dyson #include <sys/sysctl.h> 92df8bae1dSRodney W. Grimes 93df8bae1dSRodney W. Grimes #include <vm/vm.h> 94efeaf95aSDavid Greenman #include <vm/vm_param.h> 95efeaf95aSDavid Greenman #include <vm/vm_object.h> 96df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 97efeaf95aSDavid Greenman #include <vm/vm_map.h> 98df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h> 9924a1cce3SDavid Greenman #include <vm/vm_pager.h> 10005f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h> 101efeaf95aSDavid Greenman #include <vm/vm_extern.h> 102670d17b5SJeff Roberson #include <vm/uma.h> 103df8bae1dSRodney W. Grimes 1040384fff8SJason Evans #include <machine/mutex.h> 1050384fff8SJason Evans 1062b14f991SJulian Elischer /* 1072b14f991SJulian Elischer * System initialization 1082b14f991SJulian Elischer */ 1092b14f991SJulian Elischer 1102b14f991SJulian Elischer /* the kernel process "vm_pageout"*/ 11111caded3SAlfred Perlstein static void vm_pageout(void); 11211caded3SAlfred Perlstein static int vm_pageout_clean(vm_page_t); 113eea85e9bSAlan Cox static void vm_pageout_pmap_collect(void); 11411caded3SAlfred Perlstein static void vm_pageout_scan(int pass); 11511caded3SAlfred Perlstein static int vm_pageout_free_page_calc(vm_size_t count); 1162b14f991SJulian Elischer struct proc *pageproc; 1172b14f991SJulian Elischer 1182b14f991SJulian Elischer static struct kproc_desc page_kp = { 1192b14f991SJulian Elischer "pagedaemon", 1202b14f991SJulian Elischer vm_pageout, 1212b14f991SJulian Elischer &pageproc 1222b14f991SJulian Elischer }; 1239c8b8baaSPeter Wemm SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 1242b14f991SJulian Elischer 12538efa82bSJohn Dyson #if !defined(NO_SWAPPING) 1262b14f991SJulian Elischer /* the kernel process "vm_daemon"*/ 12711caded3SAlfred Perlstein static void vm_daemon(void); 128f708ef1bSPoul-Henning Kamp static struct proc *vmproc; 1292b14f991SJulian Elischer 1302b14f991SJulian Elischer static struct kproc_desc vm_kp = { 1312b14f991SJulian Elischer "vmdaemon", 1322b14f991SJulian Elischer vm_daemon, 1332b14f991SJulian Elischer &vmproc 1342b14f991SJulian Elischer }; 1359c8b8baaSPeter Wemm SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 13638efa82bSJohn Dyson #endif 1372b14f991SJulian Elischer 1382b14f991SJulian Elischer 1392d8acc0fSJohn Dyson int vm_pages_needed=0; /* Event on which pageout daemon sleeps */ 1402d8acc0fSJohn Dyson int vm_pageout_deficit=0; /* Estimated number of pages deficit */ 1412d8acc0fSJohn Dyson int vm_pageout_pages_needed=0; /* flag saying that the pageout daemon needs pages */ 14226f9a767SRodney W. Grimes 14338efa82bSJohn Dyson #if !defined(NO_SWAPPING) 144f708ef1bSPoul-Henning Kamp static int vm_pageout_req_swapout; /* XXX */ 145f708ef1bSPoul-Henning Kamp static int vm_daemon_needed; 14638efa82bSJohn Dyson #endif 1475663e6deSDavid Greenman extern int vm_swap_size; 1482b6b0df7SMatthew Dillon static int vm_max_launder = 32; 149303b270bSEivind Eklund static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 150303b270bSEivind Eklund static int vm_pageout_full_stats_interval = 0; 1512b6b0df7SMatthew Dillon static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0; 152303b270bSEivind Eklund static int defer_swap_pageouts=0; 153303b270bSEivind Eklund static int disable_swap_pageouts=0; 15470111b90SJohn Dyson 15538efa82bSJohn Dyson #if defined(NO_SWAPPING) 156303b270bSEivind Eklund static int vm_swap_enabled=0; 157303b270bSEivind Eklund static int vm_swap_idle_enabled=0; 15838efa82bSJohn Dyson #else 159303b270bSEivind Eklund static int vm_swap_enabled=1; 160303b270bSEivind Eklund static int vm_swap_idle_enabled=0; 16138efa82bSJohn Dyson #endif 16238efa82bSJohn Dyson 16338efa82bSJohn Dyson SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 1642b6b0df7SMatthew Dillon CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt"); 1652b6b0df7SMatthew Dillon 1662b6b0df7SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, max_launder, 1672b6b0df7SMatthew Dillon CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 16838efa82bSJohn Dyson 169dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 170b0359e2cSPeter Wemm CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 171dc2efb27SJohn Dyson 172dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 173b0359e2cSPeter Wemm CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 174dc2efb27SJohn Dyson 175dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 176b0359e2cSPeter Wemm CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 177dc2efb27SJohn Dyson 178dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max, 179b0359e2cSPeter Wemm CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented"); 180dc2efb27SJohn Dyson 18138efa82bSJohn Dyson #if defined(NO_SWAPPING) 182ceb0cf87SJohn Dyson SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 183ceb0cf87SJohn Dyson CTLFLAG_RD, &vm_swap_enabled, 0, ""); 184ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 185ceb0cf87SJohn Dyson CTLFLAG_RD, &vm_swap_idle_enabled, 0, ""); 18638efa82bSJohn Dyson #else 187ceb0cf87SJohn Dyson SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 188b0359e2cSPeter Wemm CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 189ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 190b0359e2cSPeter Wemm CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 19138efa82bSJohn Dyson #endif 19226f9a767SRodney W. Grimes 193ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 194b0359e2cSPeter Wemm CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 19512ac6a1dSJohn Dyson 196ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 197b0359e2cSPeter Wemm CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 19812ac6a1dSJohn Dyson 19923b59018SMatthew Dillon static int pageout_lock_miss; 20023b59018SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 20123b59018SMatthew Dillon CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 20223b59018SMatthew Dillon 203ffc82b0aSJohn Dyson #define VM_PAGEOUT_PAGE_COUNT 16 204bbc0ec52SDavid Greenman int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 205df8bae1dSRodney W. Grimes 206c3cb3e12SDavid Greenman int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 207df8bae1dSRodney W. Grimes 20838efa82bSJohn Dyson #if !defined(NO_SWAPPING) 20911caded3SAlfred Perlstein typedef void freeer_fcn_t(vm_map_t, vm_object_t, vm_pindex_t, int); 21011caded3SAlfred Perlstein static void vm_pageout_map_deactivate_pages(vm_map_t, vm_pindex_t); 211cd41fc12SDavid Greenman static freeer_fcn_t vm_pageout_object_deactivate_pages; 21211caded3SAlfred Perlstein static void vm_req_vmdaemon(void); 21338efa82bSJohn Dyson #endif 214dc2efb27SJohn Dyson static void vm_pageout_page_stats(void); 215cd41fc12SDavid Greenman 21626f9a767SRodney W. Grimes /* 21726f9a767SRodney W. Grimes * vm_pageout_clean: 21824a1cce3SDavid Greenman * 2190d94caffSDavid Greenman * Clean the page and remove it from the laundry. 22026f9a767SRodney W. Grimes * 2210d94caffSDavid Greenman * We set the busy bit to cause potential page faults on this page to 2221c7c3c6aSMatthew Dillon * block. Note the careful timing, however, the busy bit isn't set till 2231c7c3c6aSMatthew Dillon * late and we cannot do anything that will mess with the page. 22426f9a767SRodney W. Grimes */ 2253af76890SPoul-Henning Kamp static int 2268f9110f6SJohn Dyson vm_pageout_clean(m) 22724a1cce3SDavid Greenman vm_page_t m; 22824a1cce3SDavid Greenman { 22954d92145SMatthew Dillon vm_object_t object; 230f35329acSJohn Dyson vm_page_t mc[2*vm_pageout_page_count]; 23124a1cce3SDavid Greenman int pageout_count; 23290ecac61SMatthew Dillon int ib, is, page_base; 233a316d390SJohn Dyson vm_pindex_t pindex = m->pindex; 23426f9a767SRodney W. Grimes 23555df3298SAlan Cox mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2360cddd8f0SMatthew Dillon 23726f9a767SRodney W. Grimes object = m->object; 23824a1cce3SDavid Greenman 23926f9a767SRodney W. Grimes /* 2401c7c3c6aSMatthew Dillon * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 2411c7c3c6aSMatthew Dillon * with the new swapper, but we could have serious problems paging 2421c7c3c6aSMatthew Dillon * out other object types if there is insufficient memory. 2431c7c3c6aSMatthew Dillon * 2441c7c3c6aSMatthew Dillon * Unfortunately, checking free memory here is far too late, so the 2451c7c3c6aSMatthew Dillon * check has been moved up a procedural level. 2461c7c3c6aSMatthew Dillon */ 2471c7c3c6aSMatthew Dillon 24824a1cce3SDavid Greenman /* 2498b03c8edSMatthew Dillon * Don't mess with the page if it's busy, held, or special 25024a1cce3SDavid Greenman */ 2518f9110f6SJohn Dyson if ((m->hold_count != 0) || 2528b03c8edSMatthew Dillon ((m->busy != 0) || (m->flags & (PG_BUSY|PG_UNMANAGED)))) { 2530d94caffSDavid Greenman return 0; 2548b03c8edSMatthew Dillon } 2550d94caffSDavid Greenman 256f35329acSJohn Dyson mc[vm_pageout_page_count] = m; 25726f9a767SRodney W. Grimes pageout_count = 1; 258f35329acSJohn Dyson page_base = vm_pageout_page_count; 25990ecac61SMatthew Dillon ib = 1; 26090ecac61SMatthew Dillon is = 1; 26190ecac61SMatthew Dillon 26224a1cce3SDavid Greenman /* 26324a1cce3SDavid Greenman * Scan object for clusterable pages. 26424a1cce3SDavid Greenman * 26524a1cce3SDavid Greenman * We can cluster ONLY if: ->> the page is NOT 26624a1cce3SDavid Greenman * clean, wired, busy, held, or mapped into a 26724a1cce3SDavid Greenman * buffer, and one of the following: 26824a1cce3SDavid Greenman * 1) The page is inactive, or a seldom used 26924a1cce3SDavid Greenman * active page. 27024a1cce3SDavid Greenman * -or- 27124a1cce3SDavid Greenman * 2) we force the issue. 27290ecac61SMatthew Dillon * 27390ecac61SMatthew Dillon * During heavy mmap/modification loads the pageout 27490ecac61SMatthew Dillon * daemon can really fragment the underlying file 27590ecac61SMatthew Dillon * due to flushing pages out of order and not trying 27690ecac61SMatthew Dillon * align the clusters (which leave sporatic out-of-order 27790ecac61SMatthew Dillon * holes). To solve this problem we do the reverse scan 27890ecac61SMatthew Dillon * first and attempt to align our cluster, then do a 27990ecac61SMatthew Dillon * forward scan if room remains. 28024a1cce3SDavid Greenman */ 28190ecac61SMatthew Dillon more: 28290ecac61SMatthew Dillon while (ib && pageout_count < vm_pageout_page_count) { 28324a1cce3SDavid Greenman vm_page_t p; 284f6b04d2bSDavid Greenman 28590ecac61SMatthew Dillon if (ib > pindex) { 28690ecac61SMatthew Dillon ib = 0; 28790ecac61SMatthew Dillon break; 288f6b04d2bSDavid Greenman } 28990ecac61SMatthew Dillon 29090ecac61SMatthew Dillon if ((p = vm_page_lookup(object, pindex - ib)) == NULL) { 29190ecac61SMatthew Dillon ib = 0; 29290ecac61SMatthew Dillon break; 29390ecac61SMatthew Dillon } 2945070c7f8SJohn Dyson if (((p->queue - p->pc) == PQ_CACHE) || 2958b03c8edSMatthew Dillon (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 29690ecac61SMatthew Dillon ib = 0; 29790ecac61SMatthew Dillon break; 298f6b04d2bSDavid Greenman } 29924a1cce3SDavid Greenman vm_page_test_dirty(p); 30090ecac61SMatthew Dillon if ((p->dirty & p->valid) == 0 || 30190ecac61SMatthew Dillon p->queue != PQ_INACTIVE || 30257601bcbSMatthew Dillon p->wire_count != 0 || /* may be held by buf cache */ 30357601bcbSMatthew Dillon p->hold_count != 0) { /* may be undergoing I/O */ 30490ecac61SMatthew Dillon ib = 0; 30524a1cce3SDavid Greenman break; 306f6b04d2bSDavid Greenman } 30790ecac61SMatthew Dillon mc[--page_base] = p; 30890ecac61SMatthew Dillon ++pageout_count; 30990ecac61SMatthew Dillon ++ib; 31024a1cce3SDavid Greenman /* 31190ecac61SMatthew Dillon * alignment boundry, stop here and switch directions. Do 31290ecac61SMatthew Dillon * not clear ib. 31324a1cce3SDavid Greenman */ 31490ecac61SMatthew Dillon if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 31590ecac61SMatthew Dillon break; 31624a1cce3SDavid Greenman } 31790ecac61SMatthew Dillon 31890ecac61SMatthew Dillon while (pageout_count < vm_pageout_page_count && 31990ecac61SMatthew Dillon pindex + is < object->size) { 32090ecac61SMatthew Dillon vm_page_t p; 32190ecac61SMatthew Dillon 32290ecac61SMatthew Dillon if ((p = vm_page_lookup(object, pindex + is)) == NULL) 32390ecac61SMatthew Dillon break; 3245070c7f8SJohn Dyson if (((p->queue - p->pc) == PQ_CACHE) || 3258b03c8edSMatthew Dillon (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 32690ecac61SMatthew Dillon break; 32724a1cce3SDavid Greenman } 32824a1cce3SDavid Greenman vm_page_test_dirty(p); 32990ecac61SMatthew Dillon if ((p->dirty & p->valid) == 0 || 33090ecac61SMatthew Dillon p->queue != PQ_INACTIVE || 33157601bcbSMatthew Dillon p->wire_count != 0 || /* may be held by buf cache */ 33257601bcbSMatthew Dillon p->hold_count != 0) { /* may be undergoing I/O */ 33324a1cce3SDavid Greenman break; 33424a1cce3SDavid Greenman } 33590ecac61SMatthew Dillon mc[page_base + pageout_count] = p; 33690ecac61SMatthew Dillon ++pageout_count; 33790ecac61SMatthew Dillon ++is; 33824a1cce3SDavid Greenman } 33990ecac61SMatthew Dillon 34090ecac61SMatthew Dillon /* 34190ecac61SMatthew Dillon * If we exhausted our forward scan, continue with the reverse scan 34290ecac61SMatthew Dillon * when possible, even past a page boundry. This catches boundry 34390ecac61SMatthew Dillon * conditions. 34490ecac61SMatthew Dillon */ 34590ecac61SMatthew Dillon if (ib && pageout_count < vm_pageout_page_count) 34690ecac61SMatthew Dillon goto more; 347f6b04d2bSDavid Greenman 34867bf6868SJohn Dyson /* 34967bf6868SJohn Dyson * we allow reads during pageouts... 35067bf6868SJohn Dyson */ 3518f9110f6SJohn Dyson return vm_pageout_flush(&mc[page_base], pageout_count, 0); 352aef922f5SJohn Dyson } 353aef922f5SJohn Dyson 3541c7c3c6aSMatthew Dillon /* 3551c7c3c6aSMatthew Dillon * vm_pageout_flush() - launder the given pages 3561c7c3c6aSMatthew Dillon * 3571c7c3c6aSMatthew Dillon * The given pages are laundered. Note that we setup for the start of 3581c7c3c6aSMatthew Dillon * I/O ( i.e. busy the page ), mark it read-only, and bump the object 3591c7c3c6aSMatthew Dillon * reference count all in here rather then in the parent. If we want 3601c7c3c6aSMatthew Dillon * the parent to do more sophisticated things we may have to change 3611c7c3c6aSMatthew Dillon * the ordering. 3621c7c3c6aSMatthew Dillon */ 363aef922f5SJohn Dyson int 3648f9110f6SJohn Dyson vm_pageout_flush(mc, count, flags) 365aef922f5SJohn Dyson vm_page_t *mc; 366aef922f5SJohn Dyson int count; 3678f9110f6SJohn Dyson int flags; 368aef922f5SJohn Dyson { 36954d92145SMatthew Dillon vm_object_t object; 370aef922f5SJohn Dyson int pageout_status[count]; 37195461b45SJohn Dyson int numpagedout = 0; 372aef922f5SJohn Dyson int i; 373aef922f5SJohn Dyson 37455df3298SAlan Cox mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3751c7c3c6aSMatthew Dillon /* 3761c7c3c6aSMatthew Dillon * Initiate I/O. Bump the vm_page_t->busy counter and 3771c7c3c6aSMatthew Dillon * mark the pages read-only. 3781c7c3c6aSMatthew Dillon * 3791c7c3c6aSMatthew Dillon * We do not have to fixup the clean/dirty bits here... we can 3801c7c3c6aSMatthew Dillon * allow the pager to do it after the I/O completes. 38102fa91d3SMatthew Dillon * 38202fa91d3SMatthew Dillon * NOTE! mc[i]->dirty may be partial or fragmented due to an 38302fa91d3SMatthew Dillon * edge case with file fragments. 3841c7c3c6aSMatthew Dillon */ 3858f9110f6SJohn Dyson for (i = 0; i < count; i++) { 38602fa91d3SMatthew Dillon KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, ("vm_pageout_flush page %p index %d/%d: partially invalid page", mc[i], i, count)); 387e69763a3SDoug Rabson vm_page_io_start(mc[i]); 388a12cc0e4SAlan Cox pmap_page_protect(mc[i], VM_PROT_READ); 3898f9110f6SJohn Dyson } 390aef922f5SJohn Dyson object = mc[0]->object; 39155df3298SAlan Cox vm_page_unlock_queues(); 392d474eaaaSDoug Rabson vm_object_pip_add(object, count); 393aef922f5SJohn Dyson 394aef922f5SJohn Dyson vm_pager_put_pages(object, mc, count, 3958f9110f6SJohn Dyson (flags | ((object == kernel_object) ? OBJPC_SYNC : 0)), 39626f9a767SRodney W. Grimes pageout_status); 39726f9a767SRodney W. Grimes 39840eab1e9SAlan Cox vm_page_lock_queues(); 399aef922f5SJohn Dyson for (i = 0; i < count; i++) { 400aef922f5SJohn Dyson vm_page_t mt = mc[i]; 40124a1cce3SDavid Greenman 40226f9a767SRodney W. Grimes switch (pageout_status[i]) { 40326f9a767SRodney W. Grimes case VM_PAGER_OK: 40495461b45SJohn Dyson numpagedout++; 40526f9a767SRodney W. Grimes break; 40626f9a767SRodney W. Grimes case VM_PAGER_PEND: 40795461b45SJohn Dyson numpagedout++; 40826f9a767SRodney W. Grimes break; 40926f9a767SRodney W. Grimes case VM_PAGER_BAD: 41026f9a767SRodney W. Grimes /* 4110d94caffSDavid Greenman * Page outside of range of object. Right now we 4120d94caffSDavid Greenman * essentially lose the changes by pretending it 4130d94caffSDavid Greenman * worked. 41426f9a767SRodney W. Grimes */ 4150385347cSPeter Wemm pmap_clear_modify(mt); 41690ecac61SMatthew Dillon vm_page_undirty(mt); 41726f9a767SRodney W. Grimes break; 41826f9a767SRodney W. Grimes case VM_PAGER_ERROR: 41926f9a767SRodney W. Grimes case VM_PAGER_FAIL: 42026f9a767SRodney W. Grimes /* 4210d94caffSDavid Greenman * If page couldn't be paged out, then reactivate the 4220d94caffSDavid Greenman * page so it doesn't clog the inactive list. (We 4230d94caffSDavid Greenman * will try paging out it again later). 42426f9a767SRodney W. Grimes */ 42524a1cce3SDavid Greenman vm_page_activate(mt); 42626f9a767SRodney W. Grimes break; 42726f9a767SRodney W. Grimes case VM_PAGER_AGAIN: 42826f9a767SRodney W. Grimes break; 42926f9a767SRodney W. Grimes } 43026f9a767SRodney W. Grimes 43126f9a767SRodney W. Grimes /* 4320d94caffSDavid Greenman * If the operation is still going, leave the page busy to 4330d94caffSDavid Greenman * block all other accesses. Also, leave the paging in 4340d94caffSDavid Greenman * progress indicator set so that we don't attempt an object 4350d94caffSDavid Greenman * collapse. 43626f9a767SRodney W. Grimes */ 43726f9a767SRodney W. Grimes if (pageout_status[i] != VM_PAGER_PEND) { 438f919ebdeSDavid Greenman vm_object_pip_wakeup(object); 439e69763a3SDoug Rabson vm_page_io_finish(mt); 440936524aaSMatthew Dillon if (!vm_page_count_severe() || !vm_page_try_to_cache(mt)) 441a12cc0e4SAlan Cox pmap_page_protect(mt, VM_PROT_READ); 44226f9a767SRodney W. Grimes } 44326f9a767SRodney W. Grimes } 44495461b45SJohn Dyson return numpagedout; 44526f9a767SRodney W. Grimes } 44626f9a767SRodney W. Grimes 44738efa82bSJohn Dyson #if !defined(NO_SWAPPING) 44826f9a767SRodney W. Grimes /* 44926f9a767SRodney W. Grimes * vm_pageout_object_deactivate_pages 45026f9a767SRodney W. Grimes * 45126f9a767SRodney W. Grimes * deactivate enough pages to satisfy the inactive target 45226f9a767SRodney W. Grimes * requirements or if vm_page_proc_limit is set, then 45326f9a767SRodney W. Grimes * deactivate all of the pages in the object and its 45424a1cce3SDavid Greenman * backing_objects. 45526f9a767SRodney W. Grimes * 45626f9a767SRodney W. Grimes * The object and map must be locked. 45726f9a767SRodney W. Grimes */ 45838efa82bSJohn Dyson static void 45938efa82bSJohn Dyson vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only) 46026f9a767SRodney W. Grimes vm_map_t map; 46126f9a767SRodney W. Grimes vm_object_t object; 46238efa82bSJohn Dyson vm_pindex_t desired; 4630d94caffSDavid Greenman int map_remove_only; 46426f9a767SRodney W. Grimes { 46554d92145SMatthew Dillon vm_page_t p, next; 466ce18aebdSAlan Cox int actcount, rcount, remove_mode; 46726f9a767SRodney W. Grimes 4680cddd8f0SMatthew Dillon GIANT_REQUIRED; 46924964514SPeter Wemm if (object->type == OBJT_DEVICE || object->type == OBJT_PHYS) 47038efa82bSJohn Dyson return; 4718f895206SDavid Greenman 47238efa82bSJohn Dyson while (object) { 473b1028ad1SLuoqi Chen if (pmap_resident_count(vm_map_pmap(map)) <= desired) 47438efa82bSJohn Dyson return; 47524a1cce3SDavid Greenman if (object->paging_in_progress) 47638efa82bSJohn Dyson return; 47726f9a767SRodney W. Grimes 47838efa82bSJohn Dyson remove_mode = map_remove_only; 47938efa82bSJohn Dyson if (object->shadow_count > 1) 48038efa82bSJohn Dyson remove_mode = 1; 48126f9a767SRodney W. Grimes /* 48226f9a767SRodney W. Grimes * scan the objects entire memory queue 48326f9a767SRodney W. Grimes */ 48426f9a767SRodney W. Grimes rcount = object->resident_page_count; 485b18bfc3dSJohn Dyson p = TAILQ_FIRST(&object->memq); 486ce18aebdSAlan Cox vm_page_lock_queues(); 48726f9a767SRodney W. Grimes while (p && (rcount-- > 0)) { 488ce18aebdSAlan Cox if (pmap_resident_count(map->pmap) <= desired) { 489ce18aebdSAlan Cox vm_page_unlock_queues(); 49038efa82bSJohn Dyson return; 491ce18aebdSAlan Cox } 492b18bfc3dSJohn Dyson next = TAILQ_NEXT(p, listq); 493a58d1fa1SDavid Greenman cnt.v_pdpages++; 4940d94caffSDavid Greenman if (p->wire_count != 0 || 4950d94caffSDavid Greenman p->hold_count != 0 || 4960d94caffSDavid Greenman p->busy != 0 || 4978b03c8edSMatthew Dillon (p->flags & (PG_BUSY|PG_UNMANAGED)) || 4987f3a4093SMike Silbersack !pmap_page_exists_quick(vm_map_pmap(map), p)) { 4990d94caffSDavid Greenman p = next; 5000d94caffSDavid Greenman continue; 5010d94caffSDavid Greenman } 5020385347cSPeter Wemm actcount = pmap_ts_referenced(p); 5037e006499SJohn Dyson if (actcount) { 504e69763a3SDoug Rabson vm_page_flag_set(p, PG_REFERENCED); 505c8c4b40cSJohn Dyson } else if (p->flags & PG_REFERENCED) { 5067e006499SJohn Dyson actcount = 1; 507ef743ce6SJohn Dyson } 50838efa82bSJohn Dyson if ((p->queue != PQ_ACTIVE) && 50938efa82bSJohn Dyson (p->flags & PG_REFERENCED)) { 510ef743ce6SJohn Dyson vm_page_activate(p); 5117e006499SJohn Dyson p->act_count += actcount; 512e69763a3SDoug Rabson vm_page_flag_clear(p, PG_REFERENCED); 513c8c4b40cSJohn Dyson } else if (p->queue == PQ_ACTIVE) { 514ef743ce6SJohn Dyson if ((p->flags & PG_REFERENCED) == 0) { 515c8c4b40cSJohn Dyson p->act_count -= min(p->act_count, ACT_DECLINE); 5162b6b0df7SMatthew Dillon if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) { 5174fec79beSAlan Cox pmap_remove_all(p); 51826f9a767SRodney W. Grimes vm_page_deactivate(p); 51926f9a767SRodney W. Grimes } else { 5206d03d577SMatthew Dillon vm_pageq_requeue(p); 521c8c4b40cSJohn Dyson } 522c8c4b40cSJohn Dyson } else { 523eaf13dd7SJohn Dyson vm_page_activate(p); 524e69763a3SDoug Rabson vm_page_flag_clear(p, PG_REFERENCED); 52538efa82bSJohn Dyson if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 52638efa82bSJohn Dyson p->act_count += ACT_ADVANCE; 5276d03d577SMatthew Dillon vm_pageq_requeue(p); 52826f9a767SRodney W. Grimes } 529bd7e5f99SJohn Dyson } else if (p->queue == PQ_INACTIVE) { 5304fec79beSAlan Cox pmap_remove_all(p); 53126f9a767SRodney W. Grimes } 53226f9a767SRodney W. Grimes p = next; 53326f9a767SRodney W. Grimes } 534ce18aebdSAlan Cox vm_page_unlock_queues(); 53538efa82bSJohn Dyson object = object->backing_object; 53638efa82bSJohn Dyson } 53726f9a767SRodney W. Grimes } 53826f9a767SRodney W. Grimes 53926f9a767SRodney W. Grimes /* 54026f9a767SRodney W. Grimes * deactivate some number of pages in a map, try to do it fairly, but 54126f9a767SRodney W. Grimes * that is really hard to do. 54226f9a767SRodney W. Grimes */ 543cd41fc12SDavid Greenman static void 54438efa82bSJohn Dyson vm_pageout_map_deactivate_pages(map, desired) 54526f9a767SRodney W. Grimes vm_map_t map; 54638efa82bSJohn Dyson vm_pindex_t desired; 54726f9a767SRodney W. Grimes { 54826f9a767SRodney W. Grimes vm_map_entry_t tmpe; 54938efa82bSJohn Dyson vm_object_t obj, bigobj; 55030105b9eSTor Egge int nothingwired; 5510d94caffSDavid Greenman 5520cddd8f0SMatthew Dillon GIANT_REQUIRED; 553d974f03cSAlan Cox if (!vm_map_trylock(map)) 55426f9a767SRodney W. Grimes return; 55538efa82bSJohn Dyson 55638efa82bSJohn Dyson bigobj = NULL; 55730105b9eSTor Egge nothingwired = TRUE; 55838efa82bSJohn Dyson 55938efa82bSJohn Dyson /* 56038efa82bSJohn Dyson * first, search out the biggest object, and try to free pages from 56138efa82bSJohn Dyson * that. 56238efa82bSJohn Dyson */ 56326f9a767SRodney W. Grimes tmpe = map->header.next; 56438efa82bSJohn Dyson while (tmpe != &map->header) { 5659fdfe602SMatthew Dillon if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 56638efa82bSJohn Dyson obj = tmpe->object.vm_object; 56738efa82bSJohn Dyson if ((obj != NULL) && (obj->shadow_count <= 1) && 56838efa82bSJohn Dyson ((bigobj == NULL) || 56938efa82bSJohn Dyson (bigobj->resident_page_count < obj->resident_page_count))) { 57038efa82bSJohn Dyson bigobj = obj; 57138efa82bSJohn Dyson } 57238efa82bSJohn Dyson } 57330105b9eSTor Egge if (tmpe->wired_count > 0) 57430105b9eSTor Egge nothingwired = FALSE; 57538efa82bSJohn Dyson tmpe = tmpe->next; 57638efa82bSJohn Dyson } 57738efa82bSJohn Dyson 57838efa82bSJohn Dyson if (bigobj) 57938efa82bSJohn Dyson vm_pageout_object_deactivate_pages(map, bigobj, desired, 0); 58038efa82bSJohn Dyson 58138efa82bSJohn Dyson /* 58238efa82bSJohn Dyson * Next, hunt around for other pages to deactivate. We actually 58338efa82bSJohn Dyson * do this search sort of wrong -- .text first is not the best idea. 58438efa82bSJohn Dyson */ 58538efa82bSJohn Dyson tmpe = map->header.next; 58638efa82bSJohn Dyson while (tmpe != &map->header) { 587b1028ad1SLuoqi Chen if (pmap_resident_count(vm_map_pmap(map)) <= desired) 58838efa82bSJohn Dyson break; 5899fdfe602SMatthew Dillon if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 59038efa82bSJohn Dyson obj = tmpe->object.vm_object; 59101155bd7SDavid Greenman if (obj) 59238efa82bSJohn Dyson vm_pageout_object_deactivate_pages(map, obj, desired, 0); 59338efa82bSJohn Dyson } 59426f9a767SRodney W. Grimes tmpe = tmpe->next; 59526f9a767SRodney W. Grimes }; 59638efa82bSJohn Dyson 59738efa82bSJohn Dyson /* 59838efa82bSJohn Dyson * Remove all mappings if a process is swapped out, this will free page 59938efa82bSJohn Dyson * table pages. 60038efa82bSJohn Dyson */ 60130105b9eSTor Egge if (desired == 0 && nothingwired) 60205ba50f5SJake Burkholder pmap_remove(vm_map_pmap(map), vm_map_min(map), 60305ba50f5SJake Burkholder vm_map_max(map)); 60438efa82bSJohn Dyson vm_map_unlock(map); 60526f9a767SRodney W. Grimes return; 60626f9a767SRodney W. Grimes } 607a1287949SEivind Eklund #endif /* !defined(NO_SWAPPING) */ 608df8bae1dSRodney W. Grimes 6091c7c3c6aSMatthew Dillon /* 6101c7c3c6aSMatthew Dillon * Don't try to be fancy - being fancy can lead to VOP_LOCK's and therefore 6111c7c3c6aSMatthew Dillon * to vnode deadlocks. We only do it for OBJT_DEFAULT and OBJT_SWAP objects 6121c7c3c6aSMatthew Dillon * which we know can be trivially freed. 6131c7c3c6aSMatthew Dillon */ 614925a3a41SJohn Dyson void 615925a3a41SJohn Dyson vm_pageout_page_free(vm_page_t m) { 6161c7c3c6aSMatthew Dillon vm_object_t object = m->object; 6171c7c3c6aSMatthew Dillon int type = object->type; 618925a3a41SJohn Dyson 6190cddd8f0SMatthew Dillon GIANT_REQUIRED; 6201c7c3c6aSMatthew Dillon if (type == OBJT_SWAP || type == OBJT_DEFAULT) 6211c7c3c6aSMatthew Dillon vm_object_reference(object); 622e69763a3SDoug Rabson vm_page_busy(m); 6234fec79beSAlan Cox pmap_remove_all(m); 624925a3a41SJohn Dyson vm_page_free(m); 625299018d3SAlan Cox cnt.v_dfree++; 6261c7c3c6aSMatthew Dillon if (type == OBJT_SWAP || type == OBJT_DEFAULT) 62747221757SJohn Dyson vm_object_deallocate(object); 628925a3a41SJohn Dyson } 629925a3a41SJohn Dyson 630df8bae1dSRodney W. Grimes /* 631eea85e9bSAlan Cox * This routine is very drastic, but can save the system 632eea85e9bSAlan Cox * in a pinch. 633eea85e9bSAlan Cox */ 634eea85e9bSAlan Cox static void 635eea85e9bSAlan Cox vm_pageout_pmap_collect(void) 636eea85e9bSAlan Cox { 637eea85e9bSAlan Cox int i; 638eea85e9bSAlan Cox vm_page_t m; 639eea85e9bSAlan Cox static int warningdone; 640eea85e9bSAlan Cox 641eea85e9bSAlan Cox if (pmap_pagedaemon_waken == 0) 642eea85e9bSAlan Cox return; 643eea85e9bSAlan Cox if (warningdone < 5) { 644eea85e9bSAlan Cox printf("collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n"); 645eea85e9bSAlan Cox warningdone++; 646eea85e9bSAlan Cox } 647eea85e9bSAlan Cox vm_page_lock_queues(); 648eea85e9bSAlan Cox for (i = 0; i < vm_page_array_size; i++) { 649eea85e9bSAlan Cox m = &vm_page_array[i]; 650eea85e9bSAlan Cox if (m->wire_count || m->hold_count || m->busy || 651eea85e9bSAlan Cox (m->flags & (PG_BUSY | PG_UNMANAGED))) 652eea85e9bSAlan Cox continue; 653eea85e9bSAlan Cox pmap_remove_all(m); 654eea85e9bSAlan Cox } 655eea85e9bSAlan Cox vm_page_unlock_queues(); 656eea85e9bSAlan Cox pmap_pagedaemon_waken = 0; 657eea85e9bSAlan Cox } 658eea85e9bSAlan Cox 659eea85e9bSAlan Cox /* 660df8bae1dSRodney W. Grimes * vm_pageout_scan does the dirty work for the pageout daemon. 661df8bae1dSRodney W. Grimes */ 6622b6b0df7SMatthew Dillon static void 6632b6b0df7SMatthew Dillon vm_pageout_scan(int pass) 664df8bae1dSRodney W. Grimes { 665502ba6e4SJohn Dyson vm_page_t m, next; 666936524aaSMatthew Dillon struct vm_page marker; 6672b6b0df7SMatthew Dillon int save_page_shortage; 6682b6b0df7SMatthew Dillon int save_inactive_count; 6691c7c3c6aSMatthew Dillon int page_shortage, maxscan, pcount; 6701c7c3c6aSMatthew Dillon int addl_page_shortage, addl_page_shortage_init; 6715663e6deSDavid Greenman struct proc *p, *bigproc; 6725663e6deSDavid Greenman vm_offset_t size, bigsize; 673df8bae1dSRodney W. Grimes vm_object_t object; 6747e006499SJohn Dyson int actcount; 675f6b04d2bSDavid Greenman int vnodes_skipped = 0; 6762b6b0df7SMatthew Dillon int maxlaunder; 6771eeaa1e3SJohn Dyson int s; 678e602ba25SJulian Elischer struct thread *td; 6790d94caffSDavid Greenman 6800cddd8f0SMatthew Dillon GIANT_REQUIRED; 681df8bae1dSRodney W. Grimes /* 682855a310fSJeff Roberson * Decrease registered cache sizes. 683855a310fSJeff Roberson */ 684855a310fSJeff Roberson EVENTHANDLER_INVOKE(vm_lowmem, 0); 685855a310fSJeff Roberson /* 686855a310fSJeff Roberson * We do this explicitly after the caches have been drained above. 687855a310fSJeff Roberson */ 688855a310fSJeff Roberson uma_reclaim(); 689855a310fSJeff Roberson /* 6905985940eSJohn Dyson * Do whatever cleanup that the pmap code can. 6915985940eSJohn Dyson */ 692eea85e9bSAlan Cox vm_pageout_pmap_collect(); 6935985940eSJohn Dyson 6941c7c3c6aSMatthew Dillon addl_page_shortage_init = vm_pageout_deficit; 69595461b45SJohn Dyson vm_pageout_deficit = 0; 696b182ec9eSJohn Dyson 6971c7c3c6aSMatthew Dillon /* 6981c7c3c6aSMatthew Dillon * Calculate the number of pages we want to either free or move 6992b6b0df7SMatthew Dillon * to the cache. 7001c7c3c6aSMatthew Dillon */ 7012b6b0df7SMatthew Dillon page_shortage = vm_paging_target() + addl_page_shortage_init; 7022b6b0df7SMatthew Dillon save_page_shortage = page_shortage; 7032b6b0df7SMatthew Dillon save_inactive_count = cnt.v_inactive_count; 7041c7c3c6aSMatthew Dillon 7051c7c3c6aSMatthew Dillon /* 706936524aaSMatthew Dillon * Initialize our marker 707936524aaSMatthew Dillon */ 708936524aaSMatthew Dillon bzero(&marker, sizeof(marker)); 709936524aaSMatthew Dillon marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER; 710936524aaSMatthew Dillon marker.queue = PQ_INACTIVE; 711936524aaSMatthew Dillon marker.wire_count = 1; 712936524aaSMatthew Dillon 713936524aaSMatthew Dillon /* 7141c7c3c6aSMatthew Dillon * Start scanning the inactive queue for pages we can move to the 7151c7c3c6aSMatthew Dillon * cache or free. The scan will stop when the target is reached or 716936524aaSMatthew Dillon * we have scanned the entire inactive queue. Note that m->act_count 717936524aaSMatthew Dillon * is not used to form decisions for the inactive queue, only for the 718936524aaSMatthew Dillon * active queue. 7192b6b0df7SMatthew Dillon * 7202b6b0df7SMatthew Dillon * maxlaunder limits the number of dirty pages we flush per scan. 7212b6b0df7SMatthew Dillon * For most systems a smaller value (16 or 32) is more robust under 7222b6b0df7SMatthew Dillon * extreme memory and disk pressure because any unnecessary writes 7232b6b0df7SMatthew Dillon * to disk can result in extreme performance degredation. However, 7242b6b0df7SMatthew Dillon * systems with excessive dirty pages (especially when MAP_NOSYNC is 7252b6b0df7SMatthew Dillon * used) will die horribly with limited laundering. If the pageout 7262b6b0df7SMatthew Dillon * daemon cannot clean enough pages in the first pass, we let it go 7272b6b0df7SMatthew Dillon * all out in succeeding passes. 7281c7c3c6aSMatthew Dillon */ 7292b6b0df7SMatthew Dillon if ((maxlaunder = vm_max_launder) <= 1) 7302b6b0df7SMatthew Dillon maxlaunder = 1; 7312b6b0df7SMatthew Dillon if (pass) 7322b6b0df7SMatthew Dillon maxlaunder = 10000; 73367bf6868SJohn Dyson rescan0: 7341c7c3c6aSMatthew Dillon addl_page_shortage = addl_page_shortage_init; 735f6b04d2bSDavid Greenman maxscan = cnt.v_inactive_count; 7366d03d577SMatthew Dillon 737be72f788SAlan Cox for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); 7381c7c3c6aSMatthew Dillon m != NULL && maxscan-- > 0 && page_shortage > 0; 739e929c00dSKirk McKusick m = next) { 740df8bae1dSRodney W. Grimes 741a58d1fa1SDavid Greenman cnt.v_pdpages++; 742b182ec9eSJohn Dyson 743f35329acSJohn Dyson if (m->queue != PQ_INACTIVE) { 74467bf6868SJohn Dyson goto rescan0; 745f35329acSJohn Dyson } 746b182ec9eSJohn Dyson 747b18bfc3dSJohn Dyson next = TAILQ_NEXT(m, pageq); 748df8bae1dSRodney W. Grimes 749936524aaSMatthew Dillon /* 750936524aaSMatthew Dillon * skip marker pages 751936524aaSMatthew Dillon */ 752936524aaSMatthew Dillon if (m->flags & PG_MARKER) 753936524aaSMatthew Dillon continue; 754936524aaSMatthew Dillon 75557601bcbSMatthew Dillon /* 75657601bcbSMatthew Dillon * A held page may be undergoing I/O, so skip it. 75757601bcbSMatthew Dillon */ 758b182ec9eSJohn Dyson if (m->hold_count) { 7596d03d577SMatthew Dillon vm_pageq_requeue(m); 760b182ec9eSJohn Dyson addl_page_shortage++; 761b182ec9eSJohn Dyson continue; 762df8bae1dSRodney W. Grimes } 76326f9a767SRodney W. Grimes /* 764a1287949SEivind Eklund * Don't mess with busy pages, keep in the front of the 765b18bfc3dSJohn Dyson * queue, most likely are being paged out. 76626f9a767SRodney W. Grimes */ 767bd7e5f99SJohn Dyson if (m->busy || (m->flags & PG_BUSY)) { 768b182ec9eSJohn Dyson addl_page_shortage++; 76926f9a767SRodney W. Grimes continue; 77026f9a767SRodney W. Grimes } 771bd7e5f99SJohn Dyson 7727e006499SJohn Dyson /* 7731c7c3c6aSMatthew Dillon * If the object is not being used, we ignore previous 7741c7c3c6aSMatthew Dillon * references. 7757e006499SJohn Dyson */ 7760d94caffSDavid Greenman if (m->object->ref_count == 0) { 777e69763a3SDoug Rabson vm_page_flag_clear(m, PG_REFERENCED); 7780385347cSPeter Wemm pmap_clear_reference(m); 7797e006499SJohn Dyson 7807e006499SJohn Dyson /* 7811c7c3c6aSMatthew Dillon * Otherwise, if the page has been referenced while in the 7821c7c3c6aSMatthew Dillon * inactive queue, we bump the "activation count" upwards, 7831c7c3c6aSMatthew Dillon * making it less likely that the page will be added back to 7841c7c3c6aSMatthew Dillon * the inactive queue prematurely again. Here we check the 7851c7c3c6aSMatthew Dillon * page tables (or emulated bits, if any), given the upper 7861c7c3c6aSMatthew Dillon * level VM system not knowing anything about existing 7871c7c3c6aSMatthew Dillon * references. 7887e006499SJohn Dyson */ 789ef743ce6SJohn Dyson } else if (((m->flags & PG_REFERENCED) == 0) && 7900385347cSPeter Wemm (actcount = pmap_ts_referenced(m))) { 79167ef391eSAlan Cox vm_page_lock_queues(); 792ef743ce6SJohn Dyson vm_page_activate(m); 79367ef391eSAlan Cox vm_page_unlock_queues(); 7947e006499SJohn Dyson m->act_count += (actcount + ACT_ADVANCE); 795ef743ce6SJohn Dyson continue; 7962fe6e4d7SDavid Greenman } 797ef743ce6SJohn Dyson 7987e006499SJohn Dyson /* 7991c7c3c6aSMatthew Dillon * If the upper level VM system knows about any page 8001c7c3c6aSMatthew Dillon * references, we activate the page. We also set the 8011c7c3c6aSMatthew Dillon * "activation count" higher than normal so that we will less 8021c7c3c6aSMatthew Dillon * likely place pages back onto the inactive queue again. 8037e006499SJohn Dyson */ 804bd7e5f99SJohn Dyson if ((m->flags & PG_REFERENCED) != 0) { 805e69763a3SDoug Rabson vm_page_flag_clear(m, PG_REFERENCED); 8060385347cSPeter Wemm actcount = pmap_ts_referenced(m); 80767ef391eSAlan Cox vm_page_lock_queues(); 80826f9a767SRodney W. Grimes vm_page_activate(m); 80967ef391eSAlan Cox vm_page_unlock_queues(); 8107e006499SJohn Dyson m->act_count += (actcount + ACT_ADVANCE + 1); 8110d94caffSDavid Greenman continue; 8120d94caffSDavid Greenman } 81367bf6868SJohn Dyson 8147e006499SJohn Dyson /* 8151c7c3c6aSMatthew Dillon * If the upper level VM system doesn't know anything about 8161c7c3c6aSMatthew Dillon * the page being dirty, we have to check for it again. As 8171c7c3c6aSMatthew Dillon * far as the VM code knows, any partially dirty pages are 8181c7c3c6aSMatthew Dillon * fully dirty. 8197e006499SJohn Dyson */ 820f6b04d2bSDavid Greenman if (m->dirty == 0) { 821bd7e5f99SJohn Dyson vm_page_test_dirty(m); 822427e99a0SAlexander Langer } else { 8237dbf82dcSMatthew Dillon vm_page_dirty(m); 82430dcfc09SJohn Dyson } 825ef743ce6SJohn Dyson 8267e006499SJohn Dyson /* 8277e006499SJohn Dyson * Invalid pages can be easily freed 8287e006499SJohn Dyson */ 8296d40c3d3SDavid Greenman if (m->valid == 0) { 830299018d3SAlan Cox vm_page_lock_queues(); 831925a3a41SJohn Dyson vm_pageout_page_free(m); 832299018d3SAlan Cox vm_page_unlock_queues(); 8331c7c3c6aSMatthew Dillon --page_shortage; 8347e006499SJohn Dyson 8357e006499SJohn Dyson /* 836936524aaSMatthew Dillon * Clean pages can be placed onto the cache queue. This 837936524aaSMatthew Dillon * effectively frees them. 8387e006499SJohn Dyson */ 839bd7e5f99SJohn Dyson } else if (m->dirty == 0) { 84015a5d210SAlan Cox vm_page_lock_queues(); 841bd7e5f99SJohn Dyson vm_page_cache(m); 84215a5d210SAlan Cox vm_page_unlock_queues(); 8431c7c3c6aSMatthew Dillon --page_shortage; 8442b6b0df7SMatthew Dillon } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) { 8457e006499SJohn Dyson /* 8462b6b0df7SMatthew Dillon * Dirty pages need to be paged out, but flushing 8472b6b0df7SMatthew Dillon * a page is extremely expensive verses freeing 8482b6b0df7SMatthew Dillon * a clean page. Rather then artificially limiting 8492b6b0df7SMatthew Dillon * the number of pages we can flush, we instead give 8502b6b0df7SMatthew Dillon * dirty pages extra priority on the inactive queue 8512b6b0df7SMatthew Dillon * by forcing them to be cycled through the queue 8522b6b0df7SMatthew Dillon * twice before being flushed, after which the 8532b6b0df7SMatthew Dillon * (now clean) page will cycle through once more 8542b6b0df7SMatthew Dillon * before being freed. This significantly extends 8552b6b0df7SMatthew Dillon * the thrash point for a heavily loaded machine. 8567e006499SJohn Dyson */ 8572b6b0df7SMatthew Dillon vm_page_flag_set(m, PG_WINATCFLS); 8586d03d577SMatthew Dillon vm_pageq_requeue(m); 8590d94caffSDavid Greenman } else if (maxlaunder > 0) { 8602b6b0df7SMatthew Dillon /* 8612b6b0df7SMatthew Dillon * We always want to try to flush some dirty pages if 8622b6b0df7SMatthew Dillon * we encounter them, to keep the system stable. 8632b6b0df7SMatthew Dillon * Normally this number is small, but under extreme 8642b6b0df7SMatthew Dillon * pressure where there are insufficient clean pages 8652b6b0df7SMatthew Dillon * on the inactive queue, we may have to go all out. 8662b6b0df7SMatthew Dillon */ 86712ac6a1dSJohn Dyson int swap_pageouts_ok; 868f6b04d2bSDavid Greenman struct vnode *vp = NULL; 869f2a2857bSKirk McKusick struct mount *mp; 8700d94caffSDavid Greenman 8710d94caffSDavid Greenman object = m->object; 8727e006499SJohn Dyson 87312ac6a1dSJohn Dyson if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 87412ac6a1dSJohn Dyson swap_pageouts_ok = 1; 87512ac6a1dSJohn Dyson } else { 87612ac6a1dSJohn Dyson swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 87712ac6a1dSJohn Dyson swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 87890ecac61SMatthew Dillon vm_page_count_min()); 87912ac6a1dSJohn Dyson 88012ac6a1dSJohn Dyson } 88170111b90SJohn Dyson 88270111b90SJohn Dyson /* 8831c7c3c6aSMatthew Dillon * We don't bother paging objects that are "dead". 8841c7c3c6aSMatthew Dillon * Those objects are in a "rundown" state. 88570111b90SJohn Dyson */ 88670111b90SJohn Dyson if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 8876d03d577SMatthew Dillon vm_pageq_requeue(m); 88812ac6a1dSJohn Dyson continue; 88912ac6a1dSJohn Dyson } 89012ac6a1dSJohn Dyson 8911c7c3c6aSMatthew Dillon /* 8922b6b0df7SMatthew Dillon * The object is already known NOT to be dead. It 8932b6b0df7SMatthew Dillon * is possible for the vget() to block the whole 8942b6b0df7SMatthew Dillon * pageout daemon, but the new low-memory handling 8952b6b0df7SMatthew Dillon * code should prevent it. 8961c7c3c6aSMatthew Dillon * 8972b6b0df7SMatthew Dillon * The previous code skipped locked vnodes and, worse, 8982b6b0df7SMatthew Dillon * reordered pages in the queue. This results in 8992b6b0df7SMatthew Dillon * completely non-deterministic operation and, on a 9002b6b0df7SMatthew Dillon * busy system, can lead to extremely non-optimal 9012b6b0df7SMatthew Dillon * pageouts. For example, it can cause clean pages 9022b6b0df7SMatthew Dillon * to be freed and dirty pages to be moved to the end 9032b6b0df7SMatthew Dillon * of the queue. Since dirty pages are also moved to 9042b6b0df7SMatthew Dillon * the end of the queue once-cleaned, this gives 9052b6b0df7SMatthew Dillon * way too large a weighting to defering the freeing 9062b6b0df7SMatthew Dillon * of dirty pages. 9071c7c3c6aSMatthew Dillon * 90823b59018SMatthew Dillon * We can't wait forever for the vnode lock, we might 90923b59018SMatthew Dillon * deadlock due to a vn_read() getting stuck in 91023b59018SMatthew Dillon * vm_wait while holding this vnode. We skip the 91123b59018SMatthew Dillon * vnode if we can't get it in a reasonable amount 91223b59018SMatthew Dillon * of time. 9131c7c3c6aSMatthew Dillon */ 9141c7c3c6aSMatthew Dillon if (object->type == OBJT_VNODE) { 91524a1cce3SDavid Greenman vp = object->handle; 9161c7c3c6aSMatthew Dillon 917f2a2857bSKirk McKusick mp = NULL; 918f2a2857bSKirk McKusick if (vp->v_type == VREG) 919f2a2857bSKirk McKusick vn_start_write(vp, &mp, V_NOWAIT); 9203ef3e7c4SJeff Roberson if (vget(vp, LK_EXCLUSIVE|LK_TIMELOCK, curthread)) { 92123b59018SMatthew Dillon ++pageout_lock_miss; 922f2a2857bSKirk McKusick vn_finished_write(mp); 923aef922f5SJohn Dyson if (object->flags & OBJ_MIGHTBEDIRTY) 924925a3a41SJohn Dyson vnodes_skipped++; 925b182ec9eSJohn Dyson continue; 92685a376ebSJohn Dyson } 927b182ec9eSJohn Dyson 928f35329acSJohn Dyson /* 929936524aaSMatthew Dillon * The page might have been moved to another 930936524aaSMatthew Dillon * queue during potential blocking in vget() 931936524aaSMatthew Dillon * above. The page might have been freed and 932936524aaSMatthew Dillon * reused for another vnode. The object might 933936524aaSMatthew Dillon * have been reused for another vnode. 934f35329acSJohn Dyson */ 935936524aaSMatthew Dillon if (m->queue != PQ_INACTIVE || 936936524aaSMatthew Dillon m->object != object || 937936524aaSMatthew Dillon object->handle != vp) { 938b182ec9eSJohn Dyson if (object->flags & OBJ_MIGHTBEDIRTY) 939925a3a41SJohn Dyson vnodes_skipped++; 940b182ec9eSJohn Dyson vput(vp); 941f2a2857bSKirk McKusick vn_finished_write(mp); 942b182ec9eSJohn Dyson continue; 943b182ec9eSJohn Dyson } 944b182ec9eSJohn Dyson 945f35329acSJohn Dyson /* 946936524aaSMatthew Dillon * The page may have been busied during the 947936524aaSMatthew Dillon * blocking in vput(); We don't move the 948936524aaSMatthew Dillon * page back onto the end of the queue so that 949936524aaSMatthew Dillon * statistics are more correct if we don't. 950f35329acSJohn Dyson */ 951b182ec9eSJohn Dyson if (m->busy || (m->flags & PG_BUSY)) { 952b182ec9eSJohn Dyson vput(vp); 953f2a2857bSKirk McKusick vn_finished_write(mp); 954b182ec9eSJohn Dyson continue; 955b182ec9eSJohn Dyson } 956b182ec9eSJohn Dyson 957f35329acSJohn Dyson /* 95857601bcbSMatthew Dillon * If the page has become held it might 95957601bcbSMatthew Dillon * be undergoing I/O, so skip it 960f35329acSJohn Dyson */ 961b182ec9eSJohn Dyson if (m->hold_count) { 9626d03d577SMatthew Dillon vm_pageq_requeue(m); 963b182ec9eSJohn Dyson if (object->flags & OBJ_MIGHTBEDIRTY) 964925a3a41SJohn Dyson vnodes_skipped++; 965b182ec9eSJohn Dyson vput(vp); 966f2a2857bSKirk McKusick vn_finished_write(mp); 967f6b04d2bSDavid Greenman continue; 968f6b04d2bSDavid Greenman } 969f6b04d2bSDavid Greenman } 970f6b04d2bSDavid Greenman 9710d94caffSDavid Greenman /* 9720d94caffSDavid Greenman * If a page is dirty, then it is either being washed 9730d94caffSDavid Greenman * (but not yet cleaned) or it is still in the 9740d94caffSDavid Greenman * laundry. If it is still in the laundry, then we 9752b6b0df7SMatthew Dillon * start the cleaning operation. 976936524aaSMatthew Dillon * 977936524aaSMatthew Dillon * This operation may cluster, invalidating the 'next' 978936524aaSMatthew Dillon * pointer. To prevent an inordinate number of 979936524aaSMatthew Dillon * restarts we use our marker to remember our place. 9802b6b0df7SMatthew Dillon * 9812b6b0df7SMatthew Dillon * decrement page_shortage on success to account for 9822b6b0df7SMatthew Dillon * the (future) cleaned page. Otherwise we could wind 9832b6b0df7SMatthew Dillon * up laundering or cleaning too many pages. 9840d94caffSDavid Greenman */ 98555df3298SAlan Cox vm_page_lock_queues(); 986936524aaSMatthew Dillon s = splvm(); 987936524aaSMatthew Dillon TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq); 988936524aaSMatthew Dillon splx(s); 9892b6b0df7SMatthew Dillon if (vm_pageout_clean(m) != 0) { 9902b6b0df7SMatthew Dillon --page_shortage; 991936524aaSMatthew Dillon --maxlaunder; 9922b6b0df7SMatthew Dillon } 993936524aaSMatthew Dillon s = splvm(); 994936524aaSMatthew Dillon next = TAILQ_NEXT(&marker, pageq); 995936524aaSMatthew Dillon TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq); 996936524aaSMatthew Dillon splx(s); 99755df3298SAlan Cox vm_page_unlock_queues(); 998f2a2857bSKirk McKusick if (vp) { 999f6b04d2bSDavid Greenman vput(vp); 1000f2a2857bSKirk McKusick vn_finished_write(mp); 1001f2a2857bSKirk McKusick } 10020d94caffSDavid Greenman } 1003df8bae1dSRodney W. Grimes } 100426f9a767SRodney W. Grimes 1005df8bae1dSRodney W. Grimes /* 1006936524aaSMatthew Dillon * Compute the number of pages we want to try to move from the 1007936524aaSMatthew Dillon * active queue to the inactive queue. 10081c7c3c6aSMatthew Dillon */ 1009936524aaSMatthew Dillon page_shortage = vm_paging_target() + 1010936524aaSMatthew Dillon cnt.v_inactive_target - cnt.v_inactive_count; 1011b182ec9eSJohn Dyson page_shortage += addl_page_shortage; 10121c7c3c6aSMatthew Dillon 101348c0444cSAlan Cox vm_page_lock_queues(); 10141c7c3c6aSMatthew Dillon /* 1015936524aaSMatthew Dillon * Scan the active queue for things we can deactivate. We nominally 1016936524aaSMatthew Dillon * track the per-page activity counter and use it to locate 1017936524aaSMatthew Dillon * deactivation candidates. 10181c7c3c6aSMatthew Dillon */ 1019b18bfc3dSJohn Dyson pcount = cnt.v_active_count; 1020be72f788SAlan Cox m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 10211c7c3c6aSMatthew Dillon 1022b18bfc3dSJohn Dyson while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 1023f35329acSJohn Dyson 10247e006499SJohn Dyson /* 1025956f3135SPhilippe Charnier * This is a consistency check, and should likely be a panic 10267e006499SJohn Dyson * or warning. 10277e006499SJohn Dyson */ 1028f35329acSJohn Dyson if (m->queue != PQ_ACTIVE) { 102938efa82bSJohn Dyson break; 1030f35329acSJohn Dyson } 1031f35329acSJohn Dyson 1032b18bfc3dSJohn Dyson next = TAILQ_NEXT(m, pageq); 1033df8bae1dSRodney W. Grimes /* 103426f9a767SRodney W. Grimes * Don't deactivate pages that are busy. 1035df8bae1dSRodney W. Grimes */ 1036a647a309SDavid Greenman if ((m->busy != 0) || 10370d94caffSDavid Greenman (m->flags & PG_BUSY) || 1038f6b04d2bSDavid Greenman (m->hold_count != 0)) { 10396d03d577SMatthew Dillon vm_pageq_requeue(m); 104026f9a767SRodney W. Grimes m = next; 104126f9a767SRodney W. Grimes continue; 1042df8bae1dSRodney W. Grimes } 1043b18bfc3dSJohn Dyson 1044b18bfc3dSJohn Dyson /* 1045b18bfc3dSJohn Dyson * The count for pagedaemon pages is done after checking the 1046956f3135SPhilippe Charnier * page for eligibility... 1047b18bfc3dSJohn Dyson */ 1048b18bfc3dSJohn Dyson cnt.v_pdpages++; 1049ef743ce6SJohn Dyson 10507e006499SJohn Dyson /* 10517e006499SJohn Dyson * Check to see "how much" the page has been used. 10527e006499SJohn Dyson */ 10537e006499SJohn Dyson actcount = 0; 1054ef743ce6SJohn Dyson if (m->object->ref_count != 0) { 1055ef743ce6SJohn Dyson if (m->flags & PG_REFERENCED) { 10567e006499SJohn Dyson actcount += 1; 10570d94caffSDavid Greenman } 10580385347cSPeter Wemm actcount += pmap_ts_referenced(m); 10597e006499SJohn Dyson if (actcount) { 10607e006499SJohn Dyson m->act_count += ACT_ADVANCE + actcount; 106138efa82bSJohn Dyson if (m->act_count > ACT_MAX) 106238efa82bSJohn Dyson m->act_count = ACT_MAX; 106338efa82bSJohn Dyson } 1064b18bfc3dSJohn Dyson } 1065ef743ce6SJohn Dyson 10667e006499SJohn Dyson /* 10677e006499SJohn Dyson * Since we have "tested" this bit, we need to clear it now. 10687e006499SJohn Dyson */ 1069e69763a3SDoug Rabson vm_page_flag_clear(m, PG_REFERENCED); 1070ef743ce6SJohn Dyson 10717e006499SJohn Dyson /* 10727e006499SJohn Dyson * Only if an object is currently being used, do we use the 10737e006499SJohn Dyson * page activation count stats. 10747e006499SJohn Dyson */ 10757e006499SJohn Dyson if (actcount && (m->object->ref_count != 0)) { 10766d03d577SMatthew Dillon vm_pageq_requeue(m); 107726f9a767SRodney W. Grimes } else { 107838efa82bSJohn Dyson m->act_count -= min(m->act_count, ACT_DECLINE); 10792b6b0df7SMatthew Dillon if (vm_pageout_algorithm || 10802b6b0df7SMatthew Dillon m->object->ref_count == 0 || 10812b6b0df7SMatthew Dillon m->act_count == 0) { 1082925a3a41SJohn Dyson page_shortage--; 1083d4a272dbSJohn Dyson if (m->object->ref_count == 0) { 10844fec79beSAlan Cox pmap_remove_all(m); 1085d4a272dbSJohn Dyson if (m->dirty == 0) 10860d94caffSDavid Greenman vm_page_cache(m); 1087d4a272dbSJohn Dyson else 1088d4a272dbSJohn Dyson vm_page_deactivate(m); 10890d94caffSDavid Greenman } else { 109026f9a767SRodney W. Grimes vm_page_deactivate(m); 1091df8bae1dSRodney W. Grimes } 109238efa82bSJohn Dyson } else { 10936d03d577SMatthew Dillon vm_pageq_requeue(m); 109438efa82bSJohn Dyson } 1095df8bae1dSRodney W. Grimes } 109626f9a767SRodney W. Grimes m = next; 109726f9a767SRodney W. Grimes } 1098f35329acSJohn Dyson s = splvm(); 10991c7c3c6aSMatthew Dillon 1100df8bae1dSRodney W. Grimes /* 11010d94caffSDavid Greenman * We try to maintain some *really* free pages, this allows interrupt 11021c7c3c6aSMatthew Dillon * code to be guaranteed space. Since both cache and free queues 11031c7c3c6aSMatthew Dillon * are considered basically 'free', moving pages from cache to free 11041c7c3c6aSMatthew Dillon * does not effect other calculations. 1105df8bae1dSRodney W. Grimes */ 1106a1f6d91cSDavid Greenman while (cnt.v_free_count < cnt.v_free_reserved) { 11075070c7f8SJohn Dyson static int cache_rover = 0; 11086d03d577SMatthew Dillon m = vm_pageq_find(PQ_CACHE, cache_rover, FALSE); 11090d94caffSDavid Greenman if (!m) 11100d94caffSDavid Greenman break; 11118b03c8edSMatthew Dillon if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || 11128b03c8edSMatthew Dillon m->busy || 11138b03c8edSMatthew Dillon m->hold_count || 11148b03c8edSMatthew Dillon m->wire_count) { 1115d044d7bfSMatthew Dillon #ifdef INVARIANTS 1116d044d7bfSMatthew Dillon printf("Warning: busy page %p found in cache\n", m); 1117d044d7bfSMatthew Dillon #endif 1118aaba53daSMatthew Dillon vm_page_deactivate(m); 1119aaba53daSMatthew Dillon continue; 1120aaba53daSMatthew Dillon } 11215070c7f8SJohn Dyson cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; 1122925a3a41SJohn Dyson vm_pageout_page_free(m); 112326f9a767SRodney W. Grimes } 1124f35329acSJohn Dyson splx(s); 11258ffc1519SAlan Cox vm_page_unlock_queues(); 1126ceb0cf87SJohn Dyson #if !defined(NO_SWAPPING) 1127ceb0cf87SJohn Dyson /* 1128ceb0cf87SJohn Dyson * Idle process swapout -- run once per second. 1129ceb0cf87SJohn Dyson */ 1130ceb0cf87SJohn Dyson if (vm_swap_idle_enabled) { 1131ceb0cf87SJohn Dyson static long lsec; 1132227ee8a1SPoul-Henning Kamp if (time_second != lsec) { 1133ceb0cf87SJohn Dyson vm_pageout_req_swapout |= VM_SWAP_IDLE; 1134ceb0cf87SJohn Dyson vm_req_vmdaemon(); 1135227ee8a1SPoul-Henning Kamp lsec = time_second; 1136ceb0cf87SJohn Dyson } 1137ceb0cf87SJohn Dyson } 1138ceb0cf87SJohn Dyson #endif 1139ceb0cf87SJohn Dyson 11405663e6deSDavid Greenman /* 1141f6b04d2bSDavid Greenman * If we didn't get enough free pages, and we have skipped a vnode 11424c1f8ee9SDavid Greenman * in a writeable object, wakeup the sync daemon. And kick swapout 11434c1f8ee9SDavid Greenman * if we did not get enough free pages. 1144f6b04d2bSDavid Greenman */ 114590ecac61SMatthew Dillon if (vm_paging_target() > 0) { 114690ecac61SMatthew Dillon if (vnodes_skipped && vm_page_count_min()) 1147d50c1994SPeter Wemm (void) speedup_syncer(); 114838efa82bSJohn Dyson #if !defined(NO_SWAPPING) 114990ecac61SMatthew Dillon if (vm_swap_enabled && vm_page_count_target()) { 11504c1f8ee9SDavid Greenman vm_req_vmdaemon(); 1151ceb0cf87SJohn Dyson vm_pageout_req_swapout |= VM_SWAP_NORMAL; 11524c1f8ee9SDavid Greenman } 11535afce282SDavid Greenman #endif 11544c1f8ee9SDavid Greenman } 11554c1f8ee9SDavid Greenman 1156f6b04d2bSDavid Greenman /* 1157ff2b5645SMatthew Dillon * If we are out of swap and were not able to reach our paging 1158ff2b5645SMatthew Dillon * target, kill the largest process. 11591c58e4e5SJohn Baldwin * 11601c58e4e5SJohn Baldwin * We keep the process bigproc locked once we find it to keep anyone 11611c58e4e5SJohn Baldwin * from messing with it; however, there is a possibility of 11621c58e4e5SJohn Baldwin * deadlock if process B is bigproc and one of it's child processes 11631c58e4e5SJohn Baldwin * attempts to propagate a signal to B while we are waiting for A's 11641c58e4e5SJohn Baldwin * lock while walking this list. To avoid this, we don't block on 11651c58e4e5SJohn Baldwin * the process lock but just skip a process if it is already locked. 11665663e6deSDavid Greenman */ 1167ff2b5645SMatthew Dillon if ((vm_swap_size < 64 && vm_page_count_min()) || 1168ff2b5645SMatthew Dillon (swap_pager_full && vm_paging_target() > 0)) { 1169ff2b5645SMatthew Dillon #if 0 1170936524aaSMatthew Dillon if ((vm_swap_size < 64 || swap_pager_full) && vm_page_count_min()) { 1171ff2b5645SMatthew Dillon #endif 11725663e6deSDavid Greenman bigproc = NULL; 11735663e6deSDavid Greenman bigsize = 0; 11741005a129SJohn Baldwin sx_slock(&allproc_lock); 1175e602ba25SJulian Elischer FOREACH_PROC_IN_SYSTEM(p) { 1176e602ba25SJulian Elischer int breakout; 11775663e6deSDavid Greenman /* 11781c58e4e5SJohn Baldwin * If this process is already locked, skip it. 11791c58e4e5SJohn Baldwin */ 11801c58e4e5SJohn Baldwin if (PROC_TRYLOCK(p) == 0) 11811c58e4e5SJohn Baldwin continue; 11821c58e4e5SJohn Baldwin /* 11835663e6deSDavid Greenman * if this is a system process, skip it 11845663e6deSDavid Greenman */ 1185ef6020d1SMike Silbersack if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || 118679221631SDavid Greenman ((p->p_pid < 48) && (vm_swap_size != 0))) { 11878606d880SJohn Baldwin PROC_UNLOCK(p); 11885663e6deSDavid Greenman continue; 11895663e6deSDavid Greenman } 11905663e6deSDavid Greenman /* 11915663e6deSDavid Greenman * if the process is in a non-running type state, 1192e602ba25SJulian Elischer * don't touch it. Check all the threads individually. 11935663e6deSDavid Greenman */ 11949ed346baSBosko Milekic mtx_lock_spin(&sched_lock); 1195e602ba25SJulian Elischer breakout = 0; 1196e602ba25SJulian Elischer FOREACH_THREAD_IN_PROC(p, td) { 119771fad9fdSJulian Elischer if (!TD_ON_RUNQ(td) && 119871fad9fdSJulian Elischer !TD_IS_RUNNING(td) && 119971fad9fdSJulian Elischer !TD_IS_SLEEPING(td)) { 1200e602ba25SJulian Elischer breakout = 1; 1201e602ba25SJulian Elischer break; 1202e602ba25SJulian Elischer } 1203e602ba25SJulian Elischer } 1204e602ba25SJulian Elischer if (breakout) { 12059ed346baSBosko Milekic mtx_unlock_spin(&sched_lock); 12061c58e4e5SJohn Baldwin PROC_UNLOCK(p); 12075663e6deSDavid Greenman continue; 12085663e6deSDavid Greenman } 12099ed346baSBosko Milekic mtx_unlock_spin(&sched_lock); 12105663e6deSDavid Greenman /* 12115663e6deSDavid Greenman * get the process size 12125663e6deSDavid Greenman */ 1213ff2b5645SMatthew Dillon size = vmspace_resident_count(p->p_vmspace) + 1214ff2b5645SMatthew Dillon vmspace_swap_count(p->p_vmspace); 12155663e6deSDavid Greenman /* 12165663e6deSDavid Greenman * if the this process is bigger than the biggest one 12175663e6deSDavid Greenman * remember it. 12185663e6deSDavid Greenman */ 12195663e6deSDavid Greenman if (size > bigsize) { 12201c58e4e5SJohn Baldwin if (bigproc != NULL) 12211c58e4e5SJohn Baldwin PROC_UNLOCK(bigproc); 12225663e6deSDavid Greenman bigproc = p; 12235663e6deSDavid Greenman bigsize = size; 12241c58e4e5SJohn Baldwin } else 12251c58e4e5SJohn Baldwin PROC_UNLOCK(p); 12265663e6deSDavid Greenman } 12271005a129SJohn Baldwin sx_sunlock(&allproc_lock); 12285663e6deSDavid Greenman if (bigproc != NULL) { 1229b40ce416SJulian Elischer struct ksegrp *kg; 1230729b1e51SDavid Greenman killproc(bigproc, "out of swap space"); 12319ed346baSBosko Milekic mtx_lock_spin(&sched_lock); 1232b40ce416SJulian Elischer FOREACH_KSEGRP_IN_PROC(bigproc, kg) { 1233b43179fbSJeff Roberson sched_nice(kg, PRIO_MIN); /* XXXKSE ??? */ 1234b40ce416SJulian Elischer } 12359ed346baSBosko Milekic mtx_unlock_spin(&sched_lock); 12361c58e4e5SJohn Baldwin PROC_UNLOCK(bigproc); 123724a1cce3SDavid Greenman wakeup(&cnt.v_free_count); 12385663e6deSDavid Greenman } 12395663e6deSDavid Greenman } 124026f9a767SRodney W. Grimes } 124126f9a767SRodney W. Grimes 1242dc2efb27SJohn Dyson /* 1243dc2efb27SJohn Dyson * This routine tries to maintain the pseudo LRU active queue, 1244dc2efb27SJohn Dyson * so that during long periods of time where there is no paging, 1245956f3135SPhilippe Charnier * that some statistic accumulation still occurs. This code 1246dc2efb27SJohn Dyson * helps the situation where paging just starts to occur. 1247dc2efb27SJohn Dyson */ 1248dc2efb27SJohn Dyson static void 1249dc2efb27SJohn Dyson vm_pageout_page_stats() 1250dc2efb27SJohn Dyson { 1251dc2efb27SJohn Dyson vm_page_t m,next; 1252dc2efb27SJohn Dyson int pcount,tpcount; /* Number of pages to check */ 1253dc2efb27SJohn Dyson static int fullintervalcount = 0; 1254bef608bdSJohn Dyson int page_shortage; 125525db2c54SMatthew Dillon int s0; 1256bef608bdSJohn Dyson 125790ecac61SMatthew Dillon page_shortage = 125890ecac61SMatthew Dillon (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - 1259bef608bdSJohn Dyson (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 126090ecac61SMatthew Dillon 1261bef608bdSJohn Dyson if (page_shortage <= 0) 1262bef608bdSJohn Dyson return; 1263dc2efb27SJohn Dyson 126425db2c54SMatthew Dillon s0 = splvm(); 126548c0444cSAlan Cox vm_page_lock_queues(); 1266dc2efb27SJohn Dyson pcount = cnt.v_active_count; 1267dc2efb27SJohn Dyson fullintervalcount += vm_pageout_stats_interval; 1268dc2efb27SJohn Dyson if (fullintervalcount < vm_pageout_full_stats_interval) { 1269dc2efb27SJohn Dyson tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count; 1270dc2efb27SJohn Dyson if (pcount > tpcount) 1271dc2efb27SJohn Dyson pcount = tpcount; 1272883f3caaSMatthew Dillon } else { 1273883f3caaSMatthew Dillon fullintervalcount = 0; 1274dc2efb27SJohn Dyson } 1275dc2efb27SJohn Dyson 1276be72f788SAlan Cox m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1277dc2efb27SJohn Dyson while ((m != NULL) && (pcount-- > 0)) { 12787e006499SJohn Dyson int actcount; 1279dc2efb27SJohn Dyson 1280dc2efb27SJohn Dyson if (m->queue != PQ_ACTIVE) { 1281dc2efb27SJohn Dyson break; 1282dc2efb27SJohn Dyson } 1283dc2efb27SJohn Dyson 1284dc2efb27SJohn Dyson next = TAILQ_NEXT(m, pageq); 1285dc2efb27SJohn Dyson /* 1286dc2efb27SJohn Dyson * Don't deactivate pages that are busy. 1287dc2efb27SJohn Dyson */ 1288dc2efb27SJohn Dyson if ((m->busy != 0) || 1289dc2efb27SJohn Dyson (m->flags & PG_BUSY) || 1290dc2efb27SJohn Dyson (m->hold_count != 0)) { 12916d03d577SMatthew Dillon vm_pageq_requeue(m); 1292dc2efb27SJohn Dyson m = next; 1293dc2efb27SJohn Dyson continue; 1294dc2efb27SJohn Dyson } 1295dc2efb27SJohn Dyson 12967e006499SJohn Dyson actcount = 0; 1297dc2efb27SJohn Dyson if (m->flags & PG_REFERENCED) { 1298e69763a3SDoug Rabson vm_page_flag_clear(m, PG_REFERENCED); 12997e006499SJohn Dyson actcount += 1; 1300dc2efb27SJohn Dyson } 1301dc2efb27SJohn Dyson 13020385347cSPeter Wemm actcount += pmap_ts_referenced(m); 13037e006499SJohn Dyson if (actcount) { 13047e006499SJohn Dyson m->act_count += ACT_ADVANCE + actcount; 1305dc2efb27SJohn Dyson if (m->act_count > ACT_MAX) 1306dc2efb27SJohn Dyson m->act_count = ACT_MAX; 13076d03d577SMatthew Dillon vm_pageq_requeue(m); 1308dc2efb27SJohn Dyson } else { 1309dc2efb27SJohn Dyson if (m->act_count == 0) { 13107e006499SJohn Dyson /* 13112b6b0df7SMatthew Dillon * We turn off page access, so that we have 13122b6b0df7SMatthew Dillon * more accurate RSS stats. We don't do this 13132b6b0df7SMatthew Dillon * in the normal page deactivation when the 13142b6b0df7SMatthew Dillon * system is loaded VM wise, because the 13152b6b0df7SMatthew Dillon * cost of the large number of page protect 13162b6b0df7SMatthew Dillon * operations would be higher than the value 13172b6b0df7SMatthew Dillon * of doing the operation. 13187e006499SJohn Dyson */ 13194fec79beSAlan Cox pmap_remove_all(m); 1320dc2efb27SJohn Dyson vm_page_deactivate(m); 1321dc2efb27SJohn Dyson } else { 1322dc2efb27SJohn Dyson m->act_count -= min(m->act_count, ACT_DECLINE); 13236d03d577SMatthew Dillon vm_pageq_requeue(m); 1324dc2efb27SJohn Dyson } 1325dc2efb27SJohn Dyson } 1326dc2efb27SJohn Dyson 1327dc2efb27SJohn Dyson m = next; 1328dc2efb27SJohn Dyson } 132948c0444cSAlan Cox vm_page_unlock_queues(); 133025db2c54SMatthew Dillon splx(s0); 1331dc2efb27SJohn Dyson } 1332dc2efb27SJohn Dyson 1333b182ec9eSJohn Dyson static int 1334b182ec9eSJohn Dyson vm_pageout_free_page_calc(count) 1335b182ec9eSJohn Dyson vm_size_t count; 1336b182ec9eSJohn Dyson { 1337b182ec9eSJohn Dyson if (count < cnt.v_page_count) 1338b182ec9eSJohn Dyson return 0; 1339b182ec9eSJohn Dyson /* 1340b182ec9eSJohn Dyson * free_reserved needs to include enough for the largest swap pager 1341b182ec9eSJohn Dyson * structures plus enough for any pv_entry structs when paging. 1342b182ec9eSJohn Dyson */ 1343b182ec9eSJohn Dyson if (cnt.v_page_count > 1024) 1344b182ec9eSJohn Dyson cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1345b182ec9eSJohn Dyson else 1346b182ec9eSJohn Dyson cnt.v_free_min = 4; 1347f35329acSJohn Dyson cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1348f35329acSJohn Dyson cnt.v_interrupt_free_min; 1349f35329acSJohn Dyson cnt.v_free_reserved = vm_pageout_page_count + 1350a15403deSJohn Dyson cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE; 135190ecac61SMatthew Dillon cnt.v_free_severe = cnt.v_free_min / 2; 1352a2f4a846SJohn Dyson cnt.v_free_min += cnt.v_free_reserved; 135390ecac61SMatthew Dillon cnt.v_free_severe += cnt.v_free_reserved; 1354b182ec9eSJohn Dyson return 1; 1355b182ec9eSJohn Dyson } 1356b182ec9eSJohn Dyson 1357df8bae1dSRodney W. Grimes /* 1358df8bae1dSRodney W. Grimes * vm_pageout is the high level pageout daemon. 1359df8bae1dSRodney W. Grimes */ 13602b14f991SJulian Elischer static void 136126f9a767SRodney W. Grimes vm_pageout() 1362df8bae1dSRodney W. Grimes { 13632b6b0df7SMatthew Dillon int pass; 13640384fff8SJason Evans 136569a78d46SJohn Baldwin mtx_lock(&Giant); 13660384fff8SJason Evans 1367df8bae1dSRodney W. Grimes /* 1368df8bae1dSRodney W. Grimes * Initialize some paging parameters. 1369df8bae1dSRodney W. Grimes */ 1370f6b04d2bSDavid Greenman cnt.v_interrupt_free_min = 2; 1371f35329acSJohn Dyson if (cnt.v_page_count < 2000) 1372f35329acSJohn Dyson vm_pageout_page_count = 8; 1373f6b04d2bSDavid Greenman 1374b182ec9eSJohn Dyson vm_pageout_free_page_calc(cnt.v_page_count); 1375ed74321bSDavid Greenman /* 13762b6b0df7SMatthew Dillon * v_free_target and v_cache_min control pageout hysteresis. Note 13772b6b0df7SMatthew Dillon * that these are more a measure of the VM cache queue hysteresis 13782b6b0df7SMatthew Dillon * then the VM free queue. Specifically, v_free_target is the 13792b6b0df7SMatthew Dillon * high water mark (free+cache pages). 13802b6b0df7SMatthew Dillon * 13812b6b0df7SMatthew Dillon * v_free_reserved + v_cache_min (mostly means v_cache_min) is the 13822b6b0df7SMatthew Dillon * low water mark, while v_free_min is the stop. v_cache_min must 13832b6b0df7SMatthew Dillon * be big enough to handle memory needs while the pageout daemon 13842b6b0df7SMatthew Dillon * is signalled and run to free more pages. 1385ed74321bSDavid Greenman */ 1386a15403deSJohn Dyson if (cnt.v_free_count > 6144) 13872b6b0df7SMatthew Dillon cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved; 1388a15403deSJohn Dyson else 1389a15403deSJohn Dyson cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; 13906f2b142eSDavid Greenman 1391a15403deSJohn Dyson if (cnt.v_free_count > 2048) { 1392a15403deSJohn Dyson cnt.v_cache_min = cnt.v_free_target; 1393a15403deSJohn Dyson cnt.v_cache_max = 2 * cnt.v_cache_min; 1394a15403deSJohn Dyson cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 13950d94caffSDavid Greenman } else { 13960d94caffSDavid Greenman cnt.v_cache_min = 0; 13970d94caffSDavid Greenman cnt.v_cache_max = 0; 13986f2b142eSDavid Greenman cnt.v_inactive_target = cnt.v_free_count / 4; 13990d94caffSDavid Greenman } 1400e47ed70bSJohn Dyson if (cnt.v_inactive_target > cnt.v_free_count / 3) 1401e47ed70bSJohn Dyson cnt.v_inactive_target = cnt.v_free_count / 3; 1402df8bae1dSRodney W. Grimes 1403df8bae1dSRodney W. Grimes /* XXX does not really belong here */ 1404df8bae1dSRodney W. Grimes if (vm_page_max_wired == 0) 1405df8bae1dSRodney W. Grimes vm_page_max_wired = cnt.v_free_count / 3; 1406df8bae1dSRodney W. Grimes 1407dc2efb27SJohn Dyson if (vm_pageout_stats_max == 0) 1408dc2efb27SJohn Dyson vm_pageout_stats_max = cnt.v_free_target; 1409dc2efb27SJohn Dyson 1410dc2efb27SJohn Dyson /* 1411dc2efb27SJohn Dyson * Set interval in seconds for stats scan. 1412dc2efb27SJohn Dyson */ 1413dc2efb27SJohn Dyson if (vm_pageout_stats_interval == 0) 1414bef608bdSJohn Dyson vm_pageout_stats_interval = 5; 1415dc2efb27SJohn Dyson if (vm_pageout_full_stats_interval == 0) 1416dc2efb27SJohn Dyson vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1417dc2efb27SJohn Dyson 1418dc2efb27SJohn Dyson /* 1419dc2efb27SJohn Dyson * Set maximum free per pass 1420dc2efb27SJohn Dyson */ 1421dc2efb27SJohn Dyson if (vm_pageout_stats_free_max == 0) 1422bef608bdSJohn Dyson vm_pageout_stats_free_max = 5; 1423dc2efb27SJohn Dyson 142424a1cce3SDavid Greenman swap_pager_swap_init(); 14252b6b0df7SMatthew Dillon pass = 0; 1426df8bae1dSRodney W. Grimes /* 14270d94caffSDavid Greenman * The pageout daemon is never done, so loop forever. 1428df8bae1dSRodney W. Grimes */ 1429df8bae1dSRodney W. Grimes while (TRUE) { 1430dc2efb27SJohn Dyson int error; 1431b18bfc3dSJohn Dyson int s = splvm(); 143290ecac61SMatthew Dillon 1433936524aaSMatthew Dillon /* 1434936524aaSMatthew Dillon * If we have enough free memory, wakeup waiters. Do 1435936524aaSMatthew Dillon * not clear vm_pages_needed until we reach our target, 1436936524aaSMatthew Dillon * otherwise we may be woken up over and over again and 1437936524aaSMatthew Dillon * waste a lot of cpu. 1438936524aaSMatthew Dillon */ 1439936524aaSMatthew Dillon if (vm_pages_needed && !vm_page_count_min()) { 1440936524aaSMatthew Dillon if (vm_paging_needed() <= 0) 1441936524aaSMatthew Dillon vm_pages_needed = 0; 1442936524aaSMatthew Dillon wakeup(&cnt.v_free_count); 1443936524aaSMatthew Dillon } 1444936524aaSMatthew Dillon if (vm_pages_needed) { 144590ecac61SMatthew Dillon /* 14462b6b0df7SMatthew Dillon * Still not done, take a second pass without waiting 14472b6b0df7SMatthew Dillon * (unlimited dirty cleaning), otherwise sleep a bit 14482b6b0df7SMatthew Dillon * and try again. 144990ecac61SMatthew Dillon */ 14502b6b0df7SMatthew Dillon ++pass; 14512b6b0df7SMatthew Dillon if (pass > 1) 14520cddd8f0SMatthew Dillon tsleep(&vm_pages_needed, PVM, 145323955314SAlfred Perlstein "psleep", hz/2); 145490ecac61SMatthew Dillon } else { 145590ecac61SMatthew Dillon /* 14562b6b0df7SMatthew Dillon * Good enough, sleep & handle stats. Prime the pass 14572b6b0df7SMatthew Dillon * for the next run. 145890ecac61SMatthew Dillon */ 14592b6b0df7SMatthew Dillon if (pass > 1) 14602b6b0df7SMatthew Dillon pass = 1; 14612b6b0df7SMatthew Dillon else 14622b6b0df7SMatthew Dillon pass = 0; 14630cddd8f0SMatthew Dillon error = tsleep(&vm_pages_needed, PVM, 14640cddd8f0SMatthew Dillon "psleep", vm_pageout_stats_interval * hz); 1465dc2efb27SJohn Dyson if (error && !vm_pages_needed) { 1466dc2efb27SJohn Dyson splx(s); 14672b6b0df7SMatthew Dillon pass = 0; 1468dc2efb27SJohn Dyson vm_pageout_page_stats(); 1469dc2efb27SJohn Dyson continue; 1470dc2efb27SJohn Dyson } 1471f919ebdeSDavid Greenman } 1472e47ed70bSJohn Dyson 1473b18bfc3dSJohn Dyson if (vm_pages_needed) 1474b18bfc3dSJohn Dyson cnt.v_pdwakeups++; 1475f919ebdeSDavid Greenman splx(s); 14762b6b0df7SMatthew Dillon vm_pageout_scan(pass); 14772d8acc0fSJohn Dyson vm_pageout_deficit = 0; 1478df8bae1dSRodney W. Grimes } 1479df8bae1dSRodney W. Grimes } 148026f9a767SRodney W. Grimes 1481e0c5a895SJohn Dyson void 1482e0c5a895SJohn Dyson pagedaemon_wakeup() 1483e0c5a895SJohn Dyson { 1484b40ce416SJulian Elischer if (!vm_pages_needed && curthread->td_proc != pageproc) { 1485e0c5a895SJohn Dyson vm_pages_needed++; 1486e0c5a895SJohn Dyson wakeup(&vm_pages_needed); 1487e0c5a895SJohn Dyson } 1488e0c5a895SJohn Dyson } 1489e0c5a895SJohn Dyson 149038efa82bSJohn Dyson #if !defined(NO_SWAPPING) 14915afce282SDavid Greenman static void 14925afce282SDavid Greenman vm_req_vmdaemon() 14935afce282SDavid Greenman { 14945afce282SDavid Greenman static int lastrun = 0; 14955afce282SDavid Greenman 1496b18bfc3dSJohn Dyson if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 14975afce282SDavid Greenman wakeup(&vm_daemon_needed); 14985afce282SDavid Greenman lastrun = ticks; 14995afce282SDavid Greenman } 15005afce282SDavid Greenman } 15015afce282SDavid Greenman 15022b14f991SJulian Elischer static void 15034f9fb771SBruce Evans vm_daemon() 15040d94caffSDavid Greenman { 15052fe6e4d7SDavid Greenman struct proc *p; 1506e602ba25SJulian Elischer int breakout; 1507e602ba25SJulian Elischer struct thread *td; 15080d94caffSDavid Greenman 15093614c6fcSJohn Baldwin mtx_lock(&Giant); 15102fe6e4d7SDavid Greenman while (TRUE) { 15110cddd8f0SMatthew Dillon tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0); 15124c1f8ee9SDavid Greenman if (vm_pageout_req_swapout) { 1513ceb0cf87SJohn Dyson swapout_procs(vm_pageout_req_swapout); 15144c1f8ee9SDavid Greenman vm_pageout_req_swapout = 0; 15154c1f8ee9SDavid Greenman } 15162fe6e4d7SDavid Greenman /* 15170d94caffSDavid Greenman * scan the processes for exceeding their rlimits or if 15180d94caffSDavid Greenman * process is swapped out -- deactivate pages 15192fe6e4d7SDavid Greenman */ 15201005a129SJohn Baldwin sx_slock(&allproc_lock); 1521fc2ffbe6SPoul-Henning Kamp LIST_FOREACH(p, &allproc, p_list) { 1522fe2144fdSLuoqi Chen vm_pindex_t limit, size; 15232fe6e4d7SDavid Greenman 15242fe6e4d7SDavid Greenman /* 15252fe6e4d7SDavid Greenman * if this is a system process or if we have already 15262fe6e4d7SDavid Greenman * looked at this process, skip it. 15272fe6e4d7SDavid Greenman */ 15282fe6e4d7SDavid Greenman if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 15292fe6e4d7SDavid Greenman continue; 15302fe6e4d7SDavid Greenman } 15312fe6e4d7SDavid Greenman /* 15322fe6e4d7SDavid Greenman * if the process is in a non-running type state, 15332fe6e4d7SDavid Greenman * don't touch it. 15342fe6e4d7SDavid Greenman */ 15359ed346baSBosko Milekic mtx_lock_spin(&sched_lock); 1536e602ba25SJulian Elischer breakout = 0; 1537e602ba25SJulian Elischer FOREACH_THREAD_IN_PROC(p, td) { 153871fad9fdSJulian Elischer if (!TD_ON_RUNQ(td) && 153971fad9fdSJulian Elischer !TD_IS_RUNNING(td) && 154071fad9fdSJulian Elischer !TD_IS_SLEEPING(td)) { 1541e602ba25SJulian Elischer breakout = 1; 1542e602ba25SJulian Elischer break; 1543e602ba25SJulian Elischer } 1544e602ba25SJulian Elischer } 1545e602ba25SJulian Elischer if (breakout) { 15469ed346baSBosko Milekic mtx_unlock_spin(&sched_lock); 15472fe6e4d7SDavid Greenman continue; 15482fe6e4d7SDavid Greenman } 15492fe6e4d7SDavid Greenman /* 15502fe6e4d7SDavid Greenman * get a limit 15512fe6e4d7SDavid Greenman */ 1552fe2144fdSLuoqi Chen limit = OFF_TO_IDX( 1553fe2144fdSLuoqi Chen qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 1554fe2144fdSLuoqi Chen p->p_rlimit[RLIMIT_RSS].rlim_max)); 15552fe6e4d7SDavid Greenman 15562fe6e4d7SDavid Greenman /* 15570d94caffSDavid Greenman * let processes that are swapped out really be 15580d94caffSDavid Greenman * swapped out set the limit to nothing (will force a 15590d94caffSDavid Greenman * swap-out.) 15602fe6e4d7SDavid Greenman */ 15618606d880SJohn Baldwin if ((p->p_sflag & PS_INMEM) == 0) 15620d94caffSDavid Greenman limit = 0; /* XXX */ 15639ed346baSBosko Milekic mtx_unlock_spin(&sched_lock); 15642fe6e4d7SDavid Greenman 1565fe2144fdSLuoqi Chen size = vmspace_resident_count(p->p_vmspace); 15662fe6e4d7SDavid Greenman if (limit >= 0 && size >= limit) { 1567fe2144fdSLuoqi Chen vm_pageout_map_deactivate_pages( 1568fe2144fdSLuoqi Chen &p->p_vmspace->vm_map, limit); 15692fe6e4d7SDavid Greenman } 15702fe6e4d7SDavid Greenman } 15711005a129SJohn Baldwin sx_sunlock(&allproc_lock); 157224a1cce3SDavid Greenman } 15732fe6e4d7SDavid Greenman } 1574a1287949SEivind Eklund #endif /* !defined(NO_SWAPPING) */ 1575